1 1.118 riastrad /* $NetBSD: subr_vmem.c,v 1.118 2024/12/06 19:17:59 riastradh Exp $ */ 2 1.1 yamt 3 1.1 yamt /*- 4 1.55 yamt * Copyright (c)2006,2007,2008,2009 YAMAMOTO Takashi, 5 1.1 yamt * All rights reserved. 6 1.1 yamt * 7 1.1 yamt * Redistribution and use in source and binary forms, with or without 8 1.1 yamt * modification, are permitted provided that the following conditions 9 1.1 yamt * are met: 10 1.1 yamt * 1. Redistributions of source code must retain the above copyright 11 1.1 yamt * notice, this list of conditions and the following disclaimer. 12 1.1 yamt * 2. Redistributions in binary form must reproduce the above copyright 13 1.1 yamt * notice, this list of conditions and the following disclaimer in the 14 1.1 yamt * documentation and/or other materials provided with the distribution. 15 1.1 yamt * 16 1.1 yamt * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND 17 1.1 yamt * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE 18 1.1 yamt * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE 19 1.1 yamt * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE 20 1.1 yamt * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL 21 1.1 yamt * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS 22 1.1 yamt * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) 23 1.1 yamt * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT 24 1.1 yamt * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY 25 1.1 yamt * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF 26 1.1 yamt * SUCH DAMAGE. 27 1.1 yamt */ 28 1.1 yamt 29 1.1 yamt /* 30 1.1 yamt * reference: 31 1.1 yamt * - Magazines and Vmem: Extending the Slab Allocator 32 1.1 yamt * to Many CPUs and Arbitrary Resources 33 1.1 yamt * http://www.usenix.org/event/usenix01/bonwick.html 34 1.88 para * 35 1.88 para * locking & the boundary tag pool: 36 1.88 para * - A pool(9) is used for vmem boundary tags 37 1.88 para * - During a pool get call the global vmem_btag_refill_lock is taken, 38 1.88 para * to serialize access to the allocation reserve, but no other 39 1.88 para * vmem arena locks. 40 1.88 para * - During pool_put calls no vmem mutexes are locked. 41 1.88 para * - pool_drain doesn't hold the pool's mutex while releasing memory to 42 1.108 andvar * its backing therefore no interference with any vmem mutexes. 43 1.88 para * - The boundary tag pool is forced to put page headers into pool pages 44 1.88 para * (PR_PHINPAGE) and not off page to avoid pool recursion. 45 1.88 para * (due to sizeof(bt_t) it should be the case anyway) 46 1.1 yamt */ 47 1.1 yamt 48 1.1 yamt #include <sys/cdefs.h> 49 1.118 riastrad __KERNEL_RCSID(0, "$NetBSD: subr_vmem.c,v 1.118 2024/12/06 19:17:59 riastradh Exp $"); 50 1.1 yamt 51 1.93 pooka #if defined(_KERNEL) && defined(_KERNEL_OPT) 52 1.37 yamt #include "opt_ddb.h" 53 1.93 pooka #endif /* defined(_KERNEL) && defined(_KERNEL_OPT) */ 54 1.1 yamt 55 1.1 yamt #include <sys/param.h> 56 1.117 riastrad #include <sys/types.h> 57 1.117 riastrad 58 1.117 riastrad #include <sys/bitops.h> 59 1.1 yamt #include <sys/hash.h> 60 1.1 yamt #include <sys/queue.h> 61 1.1 yamt 62 1.1 yamt #if defined(_KERNEL) 63 1.117 riastrad 64 1.117 riastrad #include <sys/atomic.h> 65 1.117 riastrad #include <sys/callout.h> 66 1.30 yamt #include <sys/kernel.h> /* hz */ 67 1.66 para #include <sys/kmem.h> 68 1.1 yamt #include <sys/pool.h> 69 1.118 riastrad #include <sys/sdt.h> 70 1.117 riastrad #include <sys/systm.h> 71 1.1 yamt #include <sys/vmem.h> 72 1.80 para #include <sys/vmem_impl.h> 73 1.30 yamt #include <sys/workqueue.h> 74 1.117 riastrad 75 1.66 para #include <uvm/uvm.h> 76 1.66 para #include <uvm/uvm_extern.h> 77 1.66 para #include <uvm/uvm_km.h> 78 1.66 para #include <uvm/uvm_page.h> 79 1.66 para #include <uvm/uvm_pdaemon.h> 80 1.117 riastrad 81 1.1 yamt #else /* defined(_KERNEL) */ 82 1.117 riastrad 83 1.117 riastrad #include <assert.h> 84 1.117 riastrad #include <errno.h> 85 1.80 para #include <stdio.h> 86 1.80 para #include <stdlib.h> 87 1.80 para #include <string.h> 88 1.117 riastrad 89 1.1 yamt #include "../sys/vmem.h" 90 1.80 para #include "../sys/vmem_impl.h" 91 1.117 riastrad 92 1.118 riastrad #define SET_ERROR(E) (E) 93 1.118 riastrad 94 1.1 yamt #endif /* defined(_KERNEL) */ 95 1.1 yamt 96 1.117 riastrad #if defined(_KERNEL) 97 1.66 para 98 1.66 para #include <sys/evcnt.h> 99 1.117 riastrad 100 1.66 para #define VMEM_EVCNT_DEFINE(name) \ 101 1.66 para struct evcnt vmem_evcnt_##name = EVCNT_INITIALIZER(EVCNT_TYPE_MISC, NULL, \ 102 1.88 para "vmem", #name); \ 103 1.117 riastrad EVCNT_ATTACH_STATIC(vmem_evcnt_##name) 104 1.117 riastrad #define VMEM_EVCNT_INCR(ev) (vmem_evcnt_##ev.ev_count++) 105 1.117 riastrad #define VMEM_EVCNT_DECR(ev) (vmem_evcnt_##ev.ev_count--) 106 1.66 para 107 1.117 riastrad VMEM_EVCNT_DEFINE(static_bt_count); 108 1.117 riastrad VMEM_EVCNT_DEFINE(static_bt_inuse); 109 1.66 para 110 1.80 para #define VMEM_CONDVAR_INIT(vm, wchan) cv_init(&vm->vm_cv, wchan) 111 1.80 para #define VMEM_CONDVAR_DESTROY(vm) cv_destroy(&vm->vm_cv) 112 1.80 para #define VMEM_CONDVAR_WAIT(vm) cv_wait(&vm->vm_cv, &vm->vm_lock) 113 1.80 para #define VMEM_CONDVAR_BROADCAST(vm) cv_broadcast(&vm->vm_cv) 114 1.66 para 115 1.1 yamt #else /* defined(_KERNEL) */ 116 1.1 yamt 117 1.117 riastrad #define VMEM_EVCNT_INCR(ev) __nothing 118 1.117 riastrad #define VMEM_EVCNT_DECR(ev) __nothing 119 1.66 para 120 1.117 riastrad #define VMEM_CONDVAR_INIT(vm, wchan) __nothing 121 1.117 riastrad #define VMEM_CONDVAR_DESTROY(vm) __nothing 122 1.117 riastrad #define VMEM_CONDVAR_WAIT(vm) __nothing 123 1.117 riastrad #define VMEM_CONDVAR_BROADCAST(vm) __nothing 124 1.80 para 125 1.79 para #define UNITTEST 126 1.79 para #define KASSERT(a) assert(a) 127 1.110 thorpej #define KASSERTMSG(a, m, ...) assert(a) 128 1.117 riastrad #define mutex_init(a, b, c) __nothing 129 1.117 riastrad #define mutex_destroy(a) __nothing 130 1.117 riastrad #define mutex_enter(a) __nothing 131 1.55 yamt #define mutex_tryenter(a) true 132 1.117 riastrad #define mutex_exit(a) __nothing 133 1.110 thorpej #define mutex_owned(a) true 134 1.117 riastrad #define ASSERT_SLEEPABLE() __nothing 135 1.117 riastrad #define panic(...) (printf(__VA_ARGS__), abort()) 136 1.117 riastrad 137 1.1 yamt #endif /* defined(_KERNEL) */ 138 1.1 yamt 139 1.55 yamt #if defined(VMEM_SANITY) 140 1.55 yamt static void vmem_check(vmem_t *); 141 1.55 yamt #else /* defined(VMEM_SANITY) */ 142 1.117 riastrad #define vmem_check(vm) __nothing 143 1.55 yamt #endif /* defined(VMEM_SANITY) */ 144 1.1 yamt 145 1.30 yamt #define VMEM_HASHSIZE_MIN 1 /* XXX */ 146 1.54 yamt #define VMEM_HASHSIZE_MAX 65536 /* XXX */ 147 1.66 para #define VMEM_HASHSIZE_INIT 1 148 1.1 yamt 149 1.1 yamt #define VM_FITMASK (VM_BESTFIT | VM_INSTANTFIT) 150 1.1 yamt 151 1.80 para #if defined(_KERNEL) 152 1.80 para static bool vmem_bootstrapped = false; 153 1.80 para static kmutex_t vmem_list_lock; 154 1.80 para static LIST_HEAD(, vmem) vmem_list = LIST_HEAD_INITIALIZER(vmem_list); 155 1.80 para #endif /* defined(_KERNEL) */ 156 1.79 para 157 1.80 para /* ---- misc */ 158 1.1 yamt 159 1.110 thorpej #define VMEM_LOCK(vm) mutex_enter(&(vm)->vm_lock) 160 1.110 thorpej #define VMEM_TRYLOCK(vm) mutex_tryenter(&(vm)->vm_lock) 161 1.110 thorpej #define VMEM_UNLOCK(vm) mutex_exit(&(vm)->vm_lock) 162 1.110 thorpej #define VMEM_LOCK_INIT(vm, ipl) mutex_init(&(vm)->vm_lock, MUTEX_DEFAULT, (ipl)) 163 1.110 thorpej #define VMEM_LOCK_DESTROY(vm) mutex_destroy(&(vm)->vm_lock) 164 1.110 thorpej #define VMEM_ASSERT_LOCKED(vm) KASSERT(mutex_owned(&(vm)->vm_lock)) 165 1.1 yamt 166 1.19 yamt #define VMEM_ALIGNUP(addr, align) \ 167 1.19 yamt (-(-(addr) & -(align))) 168 1.62 rmind 169 1.19 yamt #define VMEM_CROSS_P(addr1, addr2, boundary) \ 170 1.19 yamt ((((addr1) ^ (addr2)) & -(boundary)) != 0) 171 1.19 yamt 172 1.4 yamt #define ORDER2SIZE(order) ((vmem_size_t)1 << (order)) 173 1.62 rmind #define SIZE2ORDER(size) ((int)ilog2(size)) 174 1.4 yamt 175 1.110 thorpej static void 176 1.110 thorpej vmem_kick_pdaemon(void) 177 1.110 thorpej { 178 1.110 thorpej #if defined(_KERNEL) 179 1.110 thorpej uvm_kick_pdaemon(); 180 1.110 thorpej #endif 181 1.110 thorpej } 182 1.110 thorpej 183 1.110 thorpej static void vmem_xfree_bt(vmem_t *, bt_t *); 184 1.110 thorpej 185 1.62 rmind #if !defined(_KERNEL) 186 1.62 rmind #define xmalloc(sz, flags) malloc(sz) 187 1.67 rmind #define xfree(p, sz) free(p) 188 1.62 rmind #define bt_alloc(vm, flags) malloc(sizeof(bt_t)) 189 1.62 rmind #define bt_free(vm, bt) free(bt) 190 1.117 riastrad #define bt_freetrim(vm, l) __nothing 191 1.66 para #else /* defined(_KERNEL) */ 192 1.1 yamt 193 1.67 rmind #define xmalloc(sz, flags) \ 194 1.80 para kmem_alloc(sz, ((flags) & VM_SLEEP) ? KM_SLEEP : KM_NOSLEEP); 195 1.80 para #define xfree(p, sz) kmem_free(p, sz); 196 1.66 para 197 1.75 para /* 198 1.75 para * BT_RESERVE calculation: 199 1.106 andvar * we allocate memory for boundary tags with vmem; therefore we have 200 1.105 riastrad * to keep a reserve of bts used to allocated memory for bts. 201 1.75 para * This reserve is 4 for each arena involved in allocating vmems memory. 202 1.75 para * BT_MAXFREE: don't cache excessive counts of bts in arenas 203 1.75 para */ 204 1.75 para #define STATIC_BT_COUNT 200 205 1.75 para #define BT_MINRESERVE 4 206 1.66 para #define BT_MAXFREE 64 207 1.66 para 208 1.66 para static struct vmem_btag static_bts[STATIC_BT_COUNT]; 209 1.66 para static int static_bt_count = STATIC_BT_COUNT; 210 1.66 para 211 1.80 para static struct vmem kmem_va_meta_arena_store; 212 1.66 para vmem_t *kmem_va_meta_arena; 213 1.80 para static struct vmem kmem_meta_arena_store; 214 1.88 para vmem_t *kmem_meta_arena = NULL; 215 1.66 para 216 1.88 para static kmutex_t vmem_btag_refill_lock; 217 1.66 para static kmutex_t vmem_btag_lock; 218 1.66 para static LIST_HEAD(, vmem_btag) vmem_btag_freelist; 219 1.66 para static size_t vmem_btag_freelist_count = 0; 220 1.88 para static struct pool vmem_btag_pool; 221 1.112 thorpej static bool vmem_btag_pool_initialized __read_mostly; 222 1.66 para 223 1.1 yamt /* ---- boundary tag */ 224 1.1 yamt 225 1.94 chs static int bt_refill(vmem_t *vm); 226 1.101 ad static int bt_refill_locked(vmem_t *vm); 227 1.66 para 228 1.88 para static void * 229 1.88 para pool_page_alloc_vmem_meta(struct pool *pp, int flags) 230 1.66 para { 231 1.88 para const vm_flag_t vflags = (flags & PR_WAITOK) ? VM_SLEEP: VM_NOSLEEP; 232 1.66 para vmem_addr_t va; 233 1.88 para int ret; 234 1.66 para 235 1.88 para ret = vmem_alloc(kmem_meta_arena, pp->pr_alloc->pa_pagesz, 236 1.88 para (vflags & ~VM_FITMASK) | VM_INSTANTFIT | VM_POPULATING, &va); 237 1.77 para 238 1.88 para return ret ? NULL : (void *)va; 239 1.88 para } 240 1.66 para 241 1.88 para static void 242 1.88 para pool_page_free_vmem_meta(struct pool *pp, void *v) 243 1.88 para { 244 1.66 para 245 1.88 para vmem_free(kmem_meta_arena, (vmem_addr_t)v, pp->pr_alloc->pa_pagesz); 246 1.88 para } 247 1.66 para 248 1.88 para /* allocator for vmem-pool metadata */ 249 1.88 para struct pool_allocator pool_allocator_vmem_meta = { 250 1.88 para .pa_alloc = pool_page_alloc_vmem_meta, 251 1.88 para .pa_free = pool_page_free_vmem_meta, 252 1.88 para .pa_pagesz = 0 253 1.88 para }; 254 1.66 para 255 1.66 para static int 256 1.101 ad bt_refill_locked(vmem_t *vm) 257 1.66 para { 258 1.66 para bt_t *bt; 259 1.66 para 260 1.101 ad VMEM_ASSERT_LOCKED(vm); 261 1.101 ad 262 1.88 para if (vm->vm_nfreetags > BT_MINRESERVE) { 263 1.88 para return 0; 264 1.77 para } 265 1.66 para 266 1.66 para mutex_enter(&vmem_btag_lock); 267 1.66 para while (!LIST_EMPTY(&vmem_btag_freelist) && 268 1.115 thorpej vm->vm_nfreetags <= BT_MINRESERVE && 269 1.115 thorpej (vm->vm_flags & VM_PRIVTAGS) == 0) { 270 1.66 para bt = LIST_FIRST(&vmem_btag_freelist); 271 1.66 para LIST_REMOVE(bt, bt_freelist); 272 1.114 thorpej bt->bt_flags = 0; 273 1.66 para LIST_INSERT_HEAD(&vm->vm_freetags, bt, bt_freelist); 274 1.66 para vm->vm_nfreetags++; 275 1.66 para vmem_btag_freelist_count--; 276 1.88 para VMEM_EVCNT_INCR(static_bt_inuse); 277 1.66 para } 278 1.66 para mutex_exit(&vmem_btag_lock); 279 1.66 para 280 1.88 para while (vm->vm_nfreetags <= BT_MINRESERVE) { 281 1.88 para VMEM_UNLOCK(vm); 282 1.112 thorpej KASSERT(vmem_btag_pool_initialized); 283 1.88 para mutex_enter(&vmem_btag_refill_lock); 284 1.91 para bt = pool_get(&vmem_btag_pool, PR_NOWAIT); 285 1.88 para mutex_exit(&vmem_btag_refill_lock); 286 1.88 para VMEM_LOCK(vm); 287 1.91 para if (bt == NULL) 288 1.88 para break; 289 1.114 thorpej bt->bt_flags = 0; 290 1.88 para LIST_INSERT_HEAD(&vm->vm_freetags, bt, bt_freelist); 291 1.88 para vm->vm_nfreetags++; 292 1.88 para } 293 1.88 para 294 1.92 para if (vm->vm_nfreetags <= BT_MINRESERVE) { 295 1.118 riastrad return SET_ERROR(ENOMEM); 296 1.66 para } 297 1.88 para 298 1.88 para if (kmem_meta_arena != NULL) { 299 1.101 ad VMEM_UNLOCK(vm); 300 1.94 chs (void)bt_refill(kmem_arena); 301 1.94 chs (void)bt_refill(kmem_va_meta_arena); 302 1.94 chs (void)bt_refill(kmem_meta_arena); 303 1.101 ad VMEM_LOCK(vm); 304 1.88 para } 305 1.66 para 306 1.66 para return 0; 307 1.66 para } 308 1.1 yamt 309 1.101 ad static int 310 1.101 ad bt_refill(vmem_t *vm) 311 1.101 ad { 312 1.101 ad int rv; 313 1.101 ad 314 1.101 ad VMEM_LOCK(vm); 315 1.101 ad rv = bt_refill_locked(vm); 316 1.101 ad VMEM_UNLOCK(vm); 317 1.101 ad return rv; 318 1.101 ad } 319 1.101 ad 320 1.88 para static bt_t * 321 1.17 yamt bt_alloc(vmem_t *vm, vm_flag_t flags) 322 1.1 yamt { 323 1.66 para bt_t *bt; 324 1.101 ad 325 1.101 ad VMEM_ASSERT_LOCKED(vm); 326 1.101 ad 327 1.88 para while (vm->vm_nfreetags <= BT_MINRESERVE && (flags & VM_POPULATING) == 0) { 328 1.101 ad if (bt_refill_locked(vm)) { 329 1.94 chs if ((flags & VM_NOSLEEP) != 0) { 330 1.94 chs return NULL; 331 1.94 chs } 332 1.94 chs 333 1.94 chs /* 334 1.94 chs * It would be nice to wait for something specific here 335 1.94 chs * but there are multiple ways that a retry could 336 1.94 chs * succeed and we can't wait for multiple things 337 1.94 chs * simultaneously. So we'll just sleep for an arbitrary 338 1.94 chs * short period of time and retry regardless. 339 1.94 chs * This should be a very rare case. 340 1.94 chs */ 341 1.94 chs 342 1.94 chs vmem_kick_pdaemon(); 343 1.101 ad kpause("btalloc", false, 1, &vm->vm_lock); 344 1.66 para } 345 1.66 para } 346 1.66 para bt = LIST_FIRST(&vm->vm_freetags); 347 1.66 para LIST_REMOVE(bt, bt_freelist); 348 1.66 para vm->vm_nfreetags--; 349 1.66 para 350 1.66 para return bt; 351 1.1 yamt } 352 1.1 yamt 353 1.88 para static void 354 1.17 yamt bt_free(vmem_t *vm, bt_t *bt) 355 1.1 yamt { 356 1.66 para 357 1.101 ad VMEM_ASSERT_LOCKED(vm); 358 1.101 ad 359 1.66 para LIST_INSERT_HEAD(&vm->vm_freetags, bt, bt_freelist); 360 1.66 para vm->vm_nfreetags++; 361 1.88 para } 362 1.88 para 363 1.88 para static void 364 1.88 para bt_freetrim(vmem_t *vm, int freelimit) 365 1.88 para { 366 1.113 thorpej bt_t *bt, *next_bt; 367 1.88 para LIST_HEAD(, vmem_btag) tofree; 368 1.88 para 369 1.101 ad VMEM_ASSERT_LOCKED(vm); 370 1.101 ad 371 1.88 para LIST_INIT(&tofree); 372 1.88 para 373 1.113 thorpej LIST_FOREACH_SAFE(bt, &vm->vm_freetags, bt_freelist, next_bt) { 374 1.113 thorpej if (vm->vm_nfreetags <= freelimit) { 375 1.113 thorpej break; 376 1.113 thorpej } 377 1.115 thorpej if (bt->bt_flags & BT_F_PRIVATE) { 378 1.115 thorpej continue; 379 1.115 thorpej } 380 1.66 para LIST_REMOVE(bt, bt_freelist); 381 1.66 para vm->vm_nfreetags--; 382 1.88 para if (bt >= static_bts 383 1.90 mlelstv && bt < &static_bts[STATIC_BT_COUNT]) { 384 1.88 para mutex_enter(&vmem_btag_lock); 385 1.88 para LIST_INSERT_HEAD(&vmem_btag_freelist, bt, bt_freelist); 386 1.88 para vmem_btag_freelist_count++; 387 1.88 para mutex_exit(&vmem_btag_lock); 388 1.88 para VMEM_EVCNT_DECR(static_bt_inuse); 389 1.88 para } else { 390 1.88 para LIST_INSERT_HEAD(&tofree, bt, bt_freelist); 391 1.88 para } 392 1.66 para } 393 1.88 para 394 1.66 para VMEM_UNLOCK(vm); 395 1.88 para while (!LIST_EMPTY(&tofree)) { 396 1.113 thorpej bt = LIST_FIRST(&tofree); 397 1.113 thorpej LIST_REMOVE(bt, bt_freelist); 398 1.113 thorpej pool_put(&vmem_btag_pool, bt); 399 1.88 para } 400 1.1 yamt } 401 1.115 thorpej 402 1.115 thorpej /* 403 1.115 thorpej * Add private boundary tags (statically-allocated by the caller) 404 1.115 thorpej * to a vmem arena's free tag list. 405 1.115 thorpej */ 406 1.115 thorpej void 407 1.115 thorpej vmem_add_bts(vmem_t *vm, struct vmem_btag *bts, unsigned int nbts) 408 1.115 thorpej { 409 1.115 thorpej VMEM_LOCK(vm); 410 1.115 thorpej while (nbts != 0) { 411 1.115 thorpej bts->bt_flags = BT_F_PRIVATE; 412 1.115 thorpej LIST_INSERT_HEAD(&vm->vm_freetags, bts, bt_freelist); 413 1.115 thorpej vm->vm_nfreetags++; 414 1.115 thorpej bts++; 415 1.115 thorpej nbts--; 416 1.115 thorpej } 417 1.115 thorpej VMEM_UNLOCK(vm); 418 1.115 thorpej } 419 1.67 rmind #endif /* defined(_KERNEL) */ 420 1.62 rmind 421 1.1 yamt /* 422 1.67 rmind * freelist[0] ... [1, 1] 423 1.1 yamt * freelist[1] ... [2, 3] 424 1.1 yamt * freelist[2] ... [4, 7] 425 1.1 yamt * freelist[3] ... [8, 15] 426 1.1 yamt * : 427 1.1 yamt * freelist[n] ... [(1 << n), (1 << (n + 1)) - 1] 428 1.1 yamt * : 429 1.1 yamt */ 430 1.1 yamt 431 1.1 yamt static struct vmem_freelist * 432 1.1 yamt bt_freehead_tofree(vmem_t *vm, vmem_size_t size) 433 1.1 yamt { 434 1.1 yamt const vmem_size_t qsize = size >> vm->vm_quantum_shift; 435 1.62 rmind const int idx = SIZE2ORDER(qsize); 436 1.1 yamt 437 1.109 riastrad KASSERT(size != 0); 438 1.109 riastrad KASSERT(qsize != 0); 439 1.1 yamt KASSERT((size & vm->vm_quantum_mask) == 0); 440 1.1 yamt KASSERT(idx >= 0); 441 1.1 yamt KASSERT(idx < VMEM_MAXORDER); 442 1.1 yamt 443 1.1 yamt return &vm->vm_freelist[idx]; 444 1.1 yamt } 445 1.1 yamt 446 1.59 yamt /* 447 1.59 yamt * bt_freehead_toalloc: return the freelist for the given size and allocation 448 1.59 yamt * strategy. 449 1.59 yamt * 450 1.59 yamt * for VM_INSTANTFIT, return the list in which any blocks are large enough 451 1.59 yamt * for the requested size. otherwise, return the list which can have blocks 452 1.59 yamt * large enough for the requested size. 453 1.59 yamt */ 454 1.59 yamt 455 1.1 yamt static struct vmem_freelist * 456 1.1 yamt bt_freehead_toalloc(vmem_t *vm, vmem_size_t size, vm_flag_t strat) 457 1.1 yamt { 458 1.1 yamt const vmem_size_t qsize = size >> vm->vm_quantum_shift; 459 1.62 rmind int idx = SIZE2ORDER(qsize); 460 1.1 yamt 461 1.109 riastrad KASSERT(size != 0); 462 1.109 riastrad KASSERT(qsize != 0); 463 1.1 yamt KASSERT((size & vm->vm_quantum_mask) == 0); 464 1.1 yamt 465 1.4 yamt if (strat == VM_INSTANTFIT && ORDER2SIZE(idx) != qsize) { 466 1.1 yamt idx++; 467 1.1 yamt /* check too large request? */ 468 1.1 yamt } 469 1.1 yamt KASSERT(idx >= 0); 470 1.1 yamt KASSERT(idx < VMEM_MAXORDER); 471 1.1 yamt 472 1.1 yamt return &vm->vm_freelist[idx]; 473 1.1 yamt } 474 1.1 yamt 475 1.1 yamt /* ---- boundary tag hash */ 476 1.1 yamt 477 1.1 yamt static struct vmem_hashlist * 478 1.1 yamt bt_hashhead(vmem_t *vm, vmem_addr_t addr) 479 1.1 yamt { 480 1.1 yamt struct vmem_hashlist *list; 481 1.1 yamt unsigned int hash; 482 1.1 yamt 483 1.1 yamt hash = hash32_buf(&addr, sizeof(addr), HASH32_BUF_INIT); 484 1.101 ad list = &vm->vm_hashlist[hash & vm->vm_hashmask]; 485 1.1 yamt 486 1.1 yamt return list; 487 1.1 yamt } 488 1.1 yamt 489 1.1 yamt static bt_t * 490 1.1 yamt bt_lookupbusy(vmem_t *vm, vmem_addr_t addr) 491 1.1 yamt { 492 1.1 yamt struct vmem_hashlist *list; 493 1.1 yamt bt_t *bt; 494 1.1 yamt 495 1.95 msaitoh list = bt_hashhead(vm, addr); 496 1.1 yamt LIST_FOREACH(bt, list, bt_hashlist) { 497 1.1 yamt if (bt->bt_start == addr) { 498 1.1 yamt break; 499 1.1 yamt } 500 1.1 yamt } 501 1.1 yamt 502 1.1 yamt return bt; 503 1.1 yamt } 504 1.1 yamt 505 1.1 yamt static void 506 1.1 yamt bt_rembusy(vmem_t *vm, bt_t *bt) 507 1.1 yamt { 508 1.1 yamt 509 1.1 yamt KASSERT(vm->vm_nbusytag > 0); 510 1.73 para vm->vm_inuse -= bt->bt_size; 511 1.1 yamt vm->vm_nbusytag--; 512 1.1 yamt LIST_REMOVE(bt, bt_hashlist); 513 1.1 yamt } 514 1.1 yamt 515 1.1 yamt static void 516 1.1 yamt bt_insbusy(vmem_t *vm, bt_t *bt) 517 1.1 yamt { 518 1.1 yamt struct vmem_hashlist *list; 519 1.1 yamt 520 1.1 yamt KASSERT(bt->bt_type == BT_TYPE_BUSY); 521 1.1 yamt 522 1.1 yamt list = bt_hashhead(vm, bt->bt_start); 523 1.1 yamt LIST_INSERT_HEAD(list, bt, bt_hashlist); 524 1.101 ad if (++vm->vm_nbusytag > vm->vm_maxbusytag) { 525 1.101 ad vm->vm_maxbusytag = vm->vm_nbusytag; 526 1.101 ad } 527 1.73 para vm->vm_inuse += bt->bt_size; 528 1.1 yamt } 529 1.1 yamt 530 1.1 yamt /* ---- boundary tag list */ 531 1.1 yamt 532 1.1 yamt static void 533 1.1 yamt bt_remseg(vmem_t *vm, bt_t *bt) 534 1.1 yamt { 535 1.1 yamt 536 1.87 christos TAILQ_REMOVE(&vm->vm_seglist, bt, bt_seglist); 537 1.1 yamt } 538 1.1 yamt 539 1.1 yamt static void 540 1.1 yamt bt_insseg(vmem_t *vm, bt_t *bt, bt_t *prev) 541 1.1 yamt { 542 1.1 yamt 543 1.87 christos TAILQ_INSERT_AFTER(&vm->vm_seglist, prev, bt, bt_seglist); 544 1.1 yamt } 545 1.1 yamt 546 1.1 yamt static void 547 1.1 yamt bt_insseg_tail(vmem_t *vm, bt_t *bt) 548 1.1 yamt { 549 1.1 yamt 550 1.87 christos TAILQ_INSERT_TAIL(&vm->vm_seglist, bt, bt_seglist); 551 1.1 yamt } 552 1.1 yamt 553 1.1 yamt static void 554 1.17 yamt bt_remfree(vmem_t *vm, bt_t *bt) 555 1.1 yamt { 556 1.1 yamt 557 1.1 yamt KASSERT(bt->bt_type == BT_TYPE_FREE); 558 1.1 yamt 559 1.1 yamt LIST_REMOVE(bt, bt_freelist); 560 1.1 yamt } 561 1.1 yamt 562 1.1 yamt static void 563 1.1 yamt bt_insfree(vmem_t *vm, bt_t *bt) 564 1.1 yamt { 565 1.1 yamt struct vmem_freelist *list; 566 1.1 yamt 567 1.1 yamt list = bt_freehead_tofree(vm, bt->bt_size); 568 1.1 yamt LIST_INSERT_HEAD(list, bt, bt_freelist); 569 1.1 yamt } 570 1.1 yamt 571 1.1 yamt /* ---- vmem internal functions */ 572 1.1 yamt 573 1.5 yamt #if defined(QCACHE) 574 1.5 yamt static inline vm_flag_t 575 1.5 yamt prf_to_vmf(int prflags) 576 1.5 yamt { 577 1.5 yamt vm_flag_t vmflags; 578 1.5 yamt 579 1.5 yamt KASSERT((prflags & ~(PR_LIMITFAIL | PR_WAITOK | PR_NOWAIT)) == 0); 580 1.5 yamt if ((prflags & PR_WAITOK) != 0) { 581 1.5 yamt vmflags = VM_SLEEP; 582 1.5 yamt } else { 583 1.5 yamt vmflags = VM_NOSLEEP; 584 1.5 yamt } 585 1.5 yamt return vmflags; 586 1.5 yamt } 587 1.5 yamt 588 1.5 yamt static inline int 589 1.5 yamt vmf_to_prf(vm_flag_t vmflags) 590 1.5 yamt { 591 1.5 yamt int prflags; 592 1.5 yamt 593 1.7 yamt if ((vmflags & VM_SLEEP) != 0) { 594 1.5 yamt prflags = PR_WAITOK; 595 1.7 yamt } else { 596 1.5 yamt prflags = PR_NOWAIT; 597 1.5 yamt } 598 1.5 yamt return prflags; 599 1.5 yamt } 600 1.5 yamt 601 1.5 yamt static size_t 602 1.5 yamt qc_poolpage_size(size_t qcache_max) 603 1.5 yamt { 604 1.5 yamt int i; 605 1.5 yamt 606 1.5 yamt for (i = 0; ORDER2SIZE(i) <= qcache_max * 3; i++) { 607 1.5 yamt /* nothing */ 608 1.5 yamt } 609 1.5 yamt return ORDER2SIZE(i); 610 1.5 yamt } 611 1.5 yamt 612 1.5 yamt static void * 613 1.5 yamt qc_poolpage_alloc(struct pool *pool, int prflags) 614 1.5 yamt { 615 1.5 yamt qcache_t *qc = QC_POOL_TO_QCACHE(pool); 616 1.5 yamt vmem_t *vm = qc->qc_vmem; 617 1.61 dyoung vmem_addr_t addr; 618 1.5 yamt 619 1.61 dyoung if (vmem_alloc(vm, pool->pr_alloc->pa_pagesz, 620 1.61 dyoung prf_to_vmf(prflags) | VM_INSTANTFIT, &addr) != 0) 621 1.61 dyoung return NULL; 622 1.61 dyoung return (void *)addr; 623 1.5 yamt } 624 1.5 yamt 625 1.5 yamt static void 626 1.5 yamt qc_poolpage_free(struct pool *pool, void *addr) 627 1.5 yamt { 628 1.5 yamt qcache_t *qc = QC_POOL_TO_QCACHE(pool); 629 1.5 yamt vmem_t *vm = qc->qc_vmem; 630 1.5 yamt 631 1.5 yamt vmem_free(vm, (vmem_addr_t)addr, pool->pr_alloc->pa_pagesz); 632 1.5 yamt } 633 1.5 yamt 634 1.5 yamt static void 635 1.31 ad qc_init(vmem_t *vm, size_t qcache_max, int ipl) 636 1.5 yamt { 637 1.22 yamt qcache_t *prevqc; 638 1.5 yamt struct pool_allocator *pa; 639 1.5 yamt int qcache_idx_max; 640 1.5 yamt int i; 641 1.5 yamt 642 1.5 yamt KASSERT((qcache_max & vm->vm_quantum_mask) == 0); 643 1.5 yamt if (qcache_max > (VMEM_QCACHE_IDX_MAX << vm->vm_quantum_shift)) { 644 1.5 yamt qcache_max = VMEM_QCACHE_IDX_MAX << vm->vm_quantum_shift; 645 1.5 yamt } 646 1.5 yamt vm->vm_qcache_max = qcache_max; 647 1.5 yamt pa = &vm->vm_qcache_allocator; 648 1.5 yamt memset(pa, 0, sizeof(*pa)); 649 1.5 yamt pa->pa_alloc = qc_poolpage_alloc; 650 1.5 yamt pa->pa_free = qc_poolpage_free; 651 1.5 yamt pa->pa_pagesz = qc_poolpage_size(qcache_max); 652 1.5 yamt 653 1.5 yamt qcache_idx_max = qcache_max >> vm->vm_quantum_shift; 654 1.22 yamt prevqc = NULL; 655 1.22 yamt for (i = qcache_idx_max; i > 0; i--) { 656 1.22 yamt qcache_t *qc = &vm->vm_qcache_store[i - 1]; 657 1.5 yamt size_t size = i << vm->vm_quantum_shift; 658 1.66 para pool_cache_t pc; 659 1.5 yamt 660 1.5 yamt qc->qc_vmem = vm; 661 1.8 martin snprintf(qc->qc_name, sizeof(qc->qc_name), "%s-%zu", 662 1.5 yamt vm->vm_name, size); 663 1.66 para 664 1.80 para pc = pool_cache_init(size, 665 1.80 para ORDER2SIZE(vm->vm_quantum_shift), 0, 666 1.80 para PR_NOALIGN | PR_NOTOUCH | PR_RECURSIVE /* XXX */, 667 1.80 para qc->qc_name, pa, ipl, NULL, NULL, NULL); 668 1.80 para 669 1.80 para KASSERT(pc); 670 1.80 para 671 1.66 para qc->qc_cache = pc; 672 1.35 ad KASSERT(qc->qc_cache != NULL); /* XXX */ 673 1.22 yamt if (prevqc != NULL && 674 1.35 ad qc->qc_cache->pc_pool.pr_itemsperpage == 675 1.35 ad prevqc->qc_cache->pc_pool.pr_itemsperpage) { 676 1.80 para pool_cache_destroy(qc->qc_cache); 677 1.22 yamt vm->vm_qcache[i - 1] = prevqc; 678 1.27 ad continue; 679 1.22 yamt } 680 1.35 ad qc->qc_cache->pc_pool.pr_qcache = qc; 681 1.22 yamt vm->vm_qcache[i - 1] = qc; 682 1.22 yamt prevqc = qc; 683 1.5 yamt } 684 1.5 yamt } 685 1.6 yamt 686 1.23 yamt static void 687 1.23 yamt qc_destroy(vmem_t *vm) 688 1.23 yamt { 689 1.23 yamt const qcache_t *prevqc; 690 1.23 yamt int i; 691 1.23 yamt int qcache_idx_max; 692 1.23 yamt 693 1.23 yamt qcache_idx_max = vm->vm_qcache_max >> vm->vm_quantum_shift; 694 1.23 yamt prevqc = NULL; 695 1.24 yamt for (i = 0; i < qcache_idx_max; i++) { 696 1.24 yamt qcache_t *qc = vm->vm_qcache[i]; 697 1.23 yamt 698 1.23 yamt if (prevqc == qc) { 699 1.23 yamt continue; 700 1.23 yamt } 701 1.80 para pool_cache_destroy(qc->qc_cache); 702 1.23 yamt prevqc = qc; 703 1.23 yamt } 704 1.23 yamt } 705 1.66 para #endif 706 1.23 yamt 707 1.66 para #if defined(_KERNEL) 708 1.80 para static void 709 1.66 para vmem_bootstrap(void) 710 1.6 yamt { 711 1.6 yamt 712 1.103 ad mutex_init(&vmem_list_lock, MUTEX_DEFAULT, IPL_NONE); 713 1.66 para mutex_init(&vmem_btag_lock, MUTEX_DEFAULT, IPL_VM); 714 1.88 para mutex_init(&vmem_btag_refill_lock, MUTEX_DEFAULT, IPL_VM); 715 1.6 yamt 716 1.66 para while (static_bt_count-- > 0) { 717 1.66 para bt_t *bt = &static_bts[static_bt_count]; 718 1.66 para LIST_INSERT_HEAD(&vmem_btag_freelist, bt, bt_freelist); 719 1.88 para VMEM_EVCNT_INCR(static_bt_count); 720 1.66 para vmem_btag_freelist_count++; 721 1.6 yamt } 722 1.80 para vmem_bootstrapped = TRUE; 723 1.6 yamt } 724 1.5 yamt 725 1.66 para void 726 1.80 para vmem_subsystem_init(vmem_t *vm) 727 1.1 yamt { 728 1.1 yamt 729 1.80 para kmem_va_meta_arena = vmem_init(&kmem_va_meta_arena_store, "vmem-va", 730 1.80 para 0, 0, PAGE_SIZE, vmem_alloc, vmem_free, vm, 731 1.66 para 0, VM_NOSLEEP | VM_BOOTSTRAP | VM_LARGEIMPORT, 732 1.66 para IPL_VM); 733 1.66 para 734 1.80 para kmem_meta_arena = vmem_init(&kmem_meta_arena_store, "vmem-meta", 735 1.80 para 0, 0, PAGE_SIZE, 736 1.66 para uvm_km_kmem_alloc, uvm_km_kmem_free, kmem_va_meta_arena, 737 1.66 para 0, VM_NOSLEEP | VM_BOOTSTRAP, IPL_VM); 738 1.88 para 739 1.101 ad pool_init(&vmem_btag_pool, sizeof(bt_t), coherency_unit, 0, 740 1.101 ad PR_PHINPAGE, "vmembt", &pool_allocator_vmem_meta, IPL_VM); 741 1.112 thorpej vmem_btag_pool_initialized = true; 742 1.1 yamt } 743 1.1 yamt #endif /* defined(_KERNEL) */ 744 1.1 yamt 745 1.61 dyoung static int 746 1.1 yamt vmem_add1(vmem_t *vm, vmem_addr_t addr, vmem_size_t size, vm_flag_t flags, 747 1.1 yamt int spanbttype) 748 1.1 yamt { 749 1.1 yamt bt_t *btspan; 750 1.1 yamt bt_t *btfree; 751 1.1 yamt 752 1.101 ad VMEM_ASSERT_LOCKED(vm); 753 1.1 yamt KASSERT((flags & (VM_SLEEP|VM_NOSLEEP)) != 0); 754 1.1 yamt KASSERT((~flags & (VM_SLEEP|VM_NOSLEEP)) != 0); 755 1.58 yamt KASSERT(spanbttype == BT_TYPE_SPAN || 756 1.58 yamt spanbttype == BT_TYPE_SPAN_STATIC); 757 1.1 yamt 758 1.1 yamt btspan = bt_alloc(vm, flags); 759 1.1 yamt if (btspan == NULL) { 760 1.118 riastrad return SET_ERROR(ENOMEM); 761 1.1 yamt } 762 1.1 yamt btfree = bt_alloc(vm, flags); 763 1.1 yamt if (btfree == NULL) { 764 1.1 yamt bt_free(vm, btspan); 765 1.118 riastrad return SET_ERROR(ENOMEM); 766 1.1 yamt } 767 1.1 yamt 768 1.1 yamt btspan->bt_type = spanbttype; 769 1.1 yamt btspan->bt_start = addr; 770 1.1 yamt btspan->bt_size = size; 771 1.1 yamt 772 1.1 yamt btfree->bt_type = BT_TYPE_FREE; 773 1.1 yamt btfree->bt_start = addr; 774 1.1 yamt btfree->bt_size = size; 775 1.1 yamt 776 1.1 yamt bt_insseg_tail(vm, btspan); 777 1.1 yamt bt_insseg(vm, btfree, btspan); 778 1.1 yamt bt_insfree(vm, btfree); 779 1.66 para vm->vm_size += size; 780 1.1 yamt 781 1.61 dyoung return 0; 782 1.1 yamt } 783 1.1 yamt 784 1.30 yamt static void 785 1.30 yamt vmem_destroy1(vmem_t *vm) 786 1.30 yamt { 787 1.30 yamt 788 1.30 yamt #if defined(QCACHE) 789 1.30 yamt qc_destroy(vm); 790 1.30 yamt #endif /* defined(QCACHE) */ 791 1.101 ad VMEM_LOCK(vm); 792 1.30 yamt 793 1.101 ad for (int i = 0; i < vm->vm_hashsize; i++) { 794 1.101 ad bt_t *bt; 795 1.30 yamt 796 1.101 ad while ((bt = LIST_FIRST(&vm->vm_hashlist[i])) != NULL) { 797 1.101 ad KASSERT(bt->bt_type == BT_TYPE_SPAN_STATIC); 798 1.101 ad LIST_REMOVE(bt, bt_hashlist); 799 1.101 ad bt_free(vm, bt); 800 1.66 para } 801 1.66 para } 802 1.66 para 803 1.101 ad /* bt_freetrim() drops the lock. */ 804 1.88 para bt_freetrim(vm, 0); 805 1.101 ad if (vm->vm_hashlist != &vm->vm_hash0) { 806 1.101 ad xfree(vm->vm_hashlist, 807 1.101 ad sizeof(struct vmem_hashlist) * vm->vm_hashsize); 808 1.101 ad } 809 1.66 para 810 1.80 para VMEM_CONDVAR_DESTROY(vm); 811 1.31 ad VMEM_LOCK_DESTROY(vm); 812 1.66 para xfree(vm, sizeof(*vm)); 813 1.30 yamt } 814 1.30 yamt 815 1.1 yamt static int 816 1.1 yamt vmem_import(vmem_t *vm, vmem_size_t size, vm_flag_t flags) 817 1.1 yamt { 818 1.1 yamt vmem_addr_t addr; 819 1.61 dyoung int rc; 820 1.1 yamt 821 1.101 ad VMEM_ASSERT_LOCKED(vm); 822 1.101 ad 823 1.61 dyoung if (vm->vm_importfn == NULL) { 824 1.118 riastrad return SET_ERROR(EINVAL); 825 1.1 yamt } 826 1.1 yamt 827 1.66 para if (vm->vm_flags & VM_LARGEIMPORT) { 828 1.80 para size *= 16; 829 1.66 para } 830 1.66 para 831 1.101 ad VMEM_UNLOCK(vm); 832 1.66 para if (vm->vm_flags & VM_XIMPORT) { 833 1.99 christos rc = __FPTRCAST(vmem_ximport_t *, vm->vm_importfn)(vm->vm_arg, 834 1.98 christos size, &size, flags, &addr); 835 1.66 para } else { 836 1.66 para rc = (vm->vm_importfn)(vm->vm_arg, size, flags, &addr); 837 1.69 rmind } 838 1.101 ad VMEM_LOCK(vm); 839 1.101 ad 840 1.69 rmind if (rc) { 841 1.118 riastrad return SET_ERROR(ENOMEM); 842 1.1 yamt } 843 1.1 yamt 844 1.61 dyoung if (vmem_add1(vm, addr, size, flags, BT_TYPE_SPAN) != 0) { 845 1.101 ad VMEM_UNLOCK(vm); 846 1.61 dyoung (*vm->vm_releasefn)(vm->vm_arg, addr, size); 847 1.101 ad VMEM_LOCK(vm); 848 1.118 riastrad return SET_ERROR(ENOMEM); 849 1.1 yamt } 850 1.1 yamt 851 1.1 yamt return 0; 852 1.1 yamt } 853 1.1 yamt 854 1.110 thorpej #if defined(_KERNEL) 855 1.1 yamt static int 856 1.1 yamt vmem_rehash(vmem_t *vm, size_t newhashsize, vm_flag_t flags) 857 1.1 yamt { 858 1.1 yamt bt_t *bt; 859 1.1 yamt int i; 860 1.1 yamt struct vmem_hashlist *newhashlist; 861 1.1 yamt struct vmem_hashlist *oldhashlist; 862 1.1 yamt size_t oldhashsize; 863 1.1 yamt 864 1.1 yamt KASSERT(newhashsize > 0); 865 1.1 yamt 866 1.101 ad /* Round hash size up to a power of 2. */ 867 1.101 ad newhashsize = 1 << (ilog2(newhashsize) + 1); 868 1.101 ad 869 1.1 yamt newhashlist = 870 1.101 ad xmalloc(sizeof(struct vmem_hashlist) * newhashsize, flags); 871 1.1 yamt if (newhashlist == NULL) { 872 1.118 riastrad return SET_ERROR(ENOMEM); 873 1.1 yamt } 874 1.1 yamt for (i = 0; i < newhashsize; i++) { 875 1.1 yamt LIST_INIT(&newhashlist[i]); 876 1.1 yamt } 877 1.1 yamt 878 1.101 ad VMEM_LOCK(vm); 879 1.101 ad /* Decay back to a small hash slowly. */ 880 1.101 ad if (vm->vm_maxbusytag >= 2) { 881 1.101 ad vm->vm_maxbusytag = vm->vm_maxbusytag / 2 - 1; 882 1.101 ad if (vm->vm_nbusytag > vm->vm_maxbusytag) { 883 1.101 ad vm->vm_maxbusytag = vm->vm_nbusytag; 884 1.101 ad } 885 1.101 ad } else { 886 1.101 ad vm->vm_maxbusytag = vm->vm_nbusytag; 887 1.30 yamt } 888 1.1 yamt oldhashlist = vm->vm_hashlist; 889 1.1 yamt oldhashsize = vm->vm_hashsize; 890 1.1 yamt vm->vm_hashlist = newhashlist; 891 1.1 yamt vm->vm_hashsize = newhashsize; 892 1.101 ad vm->vm_hashmask = newhashsize - 1; 893 1.1 yamt if (oldhashlist == NULL) { 894 1.1 yamt VMEM_UNLOCK(vm); 895 1.1 yamt return 0; 896 1.1 yamt } 897 1.1 yamt for (i = 0; i < oldhashsize; i++) { 898 1.1 yamt while ((bt = LIST_FIRST(&oldhashlist[i])) != NULL) { 899 1.1 yamt bt_rembusy(vm, bt); /* XXX */ 900 1.1 yamt bt_insbusy(vm, bt); 901 1.1 yamt } 902 1.1 yamt } 903 1.1 yamt VMEM_UNLOCK(vm); 904 1.1 yamt 905 1.66 para if (oldhashlist != &vm->vm_hash0) { 906 1.66 para xfree(oldhashlist, 907 1.101 ad sizeof(struct vmem_hashlist) * oldhashsize); 908 1.66 para } 909 1.1 yamt 910 1.1 yamt return 0; 911 1.1 yamt } 912 1.110 thorpej #endif /* _KERNEL */ 913 1.1 yamt 914 1.10 yamt /* 915 1.10 yamt * vmem_fit: check if a bt can satisfy the given restrictions. 916 1.59 yamt * 917 1.59 yamt * it's a caller's responsibility to ensure the region is big enough 918 1.59 yamt * before calling us. 919 1.10 yamt */ 920 1.10 yamt 921 1.61 dyoung static int 922 1.76 joerg vmem_fit(const bt_t *bt, vmem_size_t size, vmem_size_t align, 923 1.60 dyoung vmem_size_t phase, vmem_size_t nocross, 924 1.61 dyoung vmem_addr_t minaddr, vmem_addr_t maxaddr, vmem_addr_t *addrp) 925 1.10 yamt { 926 1.10 yamt vmem_addr_t start; 927 1.10 yamt vmem_addr_t end; 928 1.10 yamt 929 1.60 dyoung KASSERT(size > 0); 930 1.59 yamt KASSERT(bt->bt_size >= size); /* caller's responsibility */ 931 1.10 yamt 932 1.10 yamt /* 933 1.10 yamt * XXX assumption: vmem_addr_t and vmem_size_t are 934 1.10 yamt * unsigned integer of the same size. 935 1.10 yamt */ 936 1.10 yamt 937 1.10 yamt start = bt->bt_start; 938 1.10 yamt if (start < minaddr) { 939 1.10 yamt start = minaddr; 940 1.10 yamt } 941 1.10 yamt end = BT_END(bt); 942 1.60 dyoung if (end > maxaddr) { 943 1.60 dyoung end = maxaddr; 944 1.10 yamt } 945 1.60 dyoung if (start > end) { 946 1.118 riastrad return SET_ERROR(ENOMEM); 947 1.10 yamt } 948 1.19 yamt 949 1.19 yamt start = VMEM_ALIGNUP(start - phase, align) + phase; 950 1.10 yamt if (start < bt->bt_start) { 951 1.10 yamt start += align; 952 1.10 yamt } 953 1.19 yamt if (VMEM_CROSS_P(start, start + size - 1, nocross)) { 954 1.10 yamt KASSERT(align < nocross); 955 1.19 yamt start = VMEM_ALIGNUP(start - phase, nocross) + phase; 956 1.10 yamt } 957 1.60 dyoung if (start <= end && end - start >= size - 1) { 958 1.10 yamt KASSERT((start & (align - 1)) == phase); 959 1.19 yamt KASSERT(!VMEM_CROSS_P(start, start + size - 1, nocross)); 960 1.10 yamt KASSERT(minaddr <= start); 961 1.60 dyoung KASSERT(maxaddr == 0 || start + size - 1 <= maxaddr); 962 1.10 yamt KASSERT(bt->bt_start <= start); 963 1.60 dyoung KASSERT(BT_END(bt) - start >= size - 1); 964 1.61 dyoung *addrp = start; 965 1.61 dyoung return 0; 966 1.10 yamt } 967 1.118 riastrad return SET_ERROR(ENOMEM); 968 1.10 yamt } 969 1.10 yamt 970 1.80 para /* ---- vmem API */ 971 1.1 yamt 972 1.1 yamt /* 973 1.102 ad * vmem_init: creates a vmem arena. 974 1.1 yamt */ 975 1.1 yamt 976 1.80 para vmem_t * 977 1.80 para vmem_init(vmem_t *vm, const char *name, 978 1.80 para vmem_addr_t base, vmem_size_t size, vmem_size_t quantum, 979 1.80 para vmem_import_t *importfn, vmem_release_t *releasefn, 980 1.80 para vmem_t *arg, vmem_size_t qcache_max, vm_flag_t flags, int ipl) 981 1.1 yamt { 982 1.1 yamt int i; 983 1.1 yamt 984 1.1 yamt KASSERT((flags & (VM_SLEEP|VM_NOSLEEP)) != 0); 985 1.1 yamt KASSERT((~flags & (VM_SLEEP|VM_NOSLEEP)) != 0); 986 1.62 rmind KASSERT(quantum > 0); 987 1.116 thorpej KASSERT(powerof2(quantum)); 988 1.116 thorpej 989 1.116 thorpej /* 990 1.116 thorpej * If private tags are going to be used, they must 991 1.116 thorpej * be added to the arena before the first span is 992 1.116 thorpej * added. 993 1.116 thorpej */ 994 1.116 thorpej KASSERT((flags & VM_PRIVTAGS) == 0 || size == 0); 995 1.1 yamt 996 1.1 yamt #if defined(_KERNEL) 997 1.80 para /* XXX: SMP, we get called early... */ 998 1.80 para if (!vmem_bootstrapped) { 999 1.80 para vmem_bootstrap(); 1000 1.80 para } 1001 1.66 para #endif /* defined(_KERNEL) */ 1002 1.80 para 1003 1.80 para if (vm == NULL) { 1004 1.66 para vm = xmalloc(sizeof(*vm), flags); 1005 1.1 yamt } 1006 1.1 yamt if (vm == NULL) { 1007 1.1 yamt return NULL; 1008 1.1 yamt } 1009 1.1 yamt 1010 1.66 para VMEM_CONDVAR_INIT(vm, "vmem"); 1011 1.31 ad VMEM_LOCK_INIT(vm, ipl); 1012 1.66 para vm->vm_flags = flags; 1013 1.66 para vm->vm_nfreetags = 0; 1014 1.66 para LIST_INIT(&vm->vm_freetags); 1015 1.64 yamt strlcpy(vm->vm_name, name, sizeof(vm->vm_name)); 1016 1.1 yamt vm->vm_quantum_mask = quantum - 1; 1017 1.62 rmind vm->vm_quantum_shift = SIZE2ORDER(quantum); 1018 1.4 yamt KASSERT(ORDER2SIZE(vm->vm_quantum_shift) == quantum); 1019 1.61 dyoung vm->vm_importfn = importfn; 1020 1.61 dyoung vm->vm_releasefn = releasefn; 1021 1.61 dyoung vm->vm_arg = arg; 1022 1.1 yamt vm->vm_nbusytag = 0; 1023 1.101 ad vm->vm_maxbusytag = 0; 1024 1.66 para vm->vm_size = 0; 1025 1.66 para vm->vm_inuse = 0; 1026 1.5 yamt #if defined(QCACHE) 1027 1.31 ad qc_init(vm, qcache_max, ipl); 1028 1.5 yamt #endif /* defined(QCACHE) */ 1029 1.1 yamt 1030 1.87 christos TAILQ_INIT(&vm->vm_seglist); 1031 1.1 yamt for (i = 0; i < VMEM_MAXORDER; i++) { 1032 1.1 yamt LIST_INIT(&vm->vm_freelist[i]); 1033 1.1 yamt } 1034 1.101 ad memset(&vm->vm_hash0, 0, sizeof(vm->vm_hash0)); 1035 1.80 para vm->vm_hashsize = 1; 1036 1.101 ad vm->vm_hashmask = vm->vm_hashsize - 1; 1037 1.80 para vm->vm_hashlist = &vm->vm_hash0; 1038 1.1 yamt 1039 1.1 yamt if (size != 0) { 1040 1.61 dyoung if (vmem_add(vm, base, size, flags) != 0) { 1041 1.30 yamt vmem_destroy1(vm); 1042 1.1 yamt return NULL; 1043 1.1 yamt } 1044 1.1 yamt } 1045 1.1 yamt 1046 1.30 yamt #if defined(_KERNEL) 1047 1.66 para if (flags & VM_BOOTSTRAP) { 1048 1.94 chs bt_refill(vm); 1049 1.66 para } 1050 1.66 para 1051 1.30 yamt mutex_enter(&vmem_list_lock); 1052 1.30 yamt LIST_INSERT_HEAD(&vmem_list, vm, vm_alllist); 1053 1.30 yamt mutex_exit(&vmem_list_lock); 1054 1.30 yamt #endif /* defined(_KERNEL) */ 1055 1.30 yamt 1056 1.1 yamt return vm; 1057 1.1 yamt } 1058 1.1 yamt 1059 1.66 para 1060 1.66 para 1061 1.66 para /* 1062 1.66 para * vmem_create: create an arena. 1063 1.66 para * 1064 1.66 para * => must not be called from interrupt context. 1065 1.66 para */ 1066 1.66 para 1067 1.66 para vmem_t * 1068 1.66 para vmem_create(const char *name, vmem_addr_t base, vmem_size_t size, 1069 1.66 para vmem_size_t quantum, vmem_import_t *importfn, vmem_release_t *releasefn, 1070 1.67 rmind vmem_t *source, vmem_size_t qcache_max, vm_flag_t flags, int ipl) 1071 1.66 para { 1072 1.66 para 1073 1.66 para KASSERT((flags & (VM_XIMPORT)) == 0); 1074 1.66 para 1075 1.80 para return vmem_init(NULL, name, base, size, quantum, 1076 1.66 para importfn, releasefn, source, qcache_max, flags, ipl); 1077 1.66 para } 1078 1.66 para 1079 1.66 para /* 1080 1.66 para * vmem_xcreate: create an arena takes alternative import func. 1081 1.66 para * 1082 1.66 para * => must not be called from interrupt context. 1083 1.66 para */ 1084 1.66 para 1085 1.66 para vmem_t * 1086 1.66 para vmem_xcreate(const char *name, vmem_addr_t base, vmem_size_t size, 1087 1.66 para vmem_size_t quantum, vmem_ximport_t *importfn, vmem_release_t *releasefn, 1088 1.67 rmind vmem_t *source, vmem_size_t qcache_max, vm_flag_t flags, int ipl) 1089 1.66 para { 1090 1.66 para 1091 1.66 para KASSERT((flags & (VM_XIMPORT)) == 0); 1092 1.66 para 1093 1.80 para return vmem_init(NULL, name, base, size, quantum, 1094 1.99 christos __FPTRCAST(vmem_import_t *, importfn), releasefn, source, 1095 1.66 para qcache_max, flags | VM_XIMPORT, ipl); 1096 1.66 para } 1097 1.66 para 1098 1.1 yamt void 1099 1.1 yamt vmem_destroy(vmem_t *vm) 1100 1.1 yamt { 1101 1.1 yamt 1102 1.30 yamt #if defined(_KERNEL) 1103 1.30 yamt mutex_enter(&vmem_list_lock); 1104 1.30 yamt LIST_REMOVE(vm, vm_alllist); 1105 1.30 yamt mutex_exit(&vmem_list_lock); 1106 1.30 yamt #endif /* defined(_KERNEL) */ 1107 1.1 yamt 1108 1.30 yamt vmem_destroy1(vm); 1109 1.1 yamt } 1110 1.1 yamt 1111 1.1 yamt vmem_size_t 1112 1.1 yamt vmem_roundup_size(vmem_t *vm, vmem_size_t size) 1113 1.1 yamt { 1114 1.1 yamt 1115 1.1 yamt return (size + vm->vm_quantum_mask) & ~vm->vm_quantum_mask; 1116 1.1 yamt } 1117 1.1 yamt 1118 1.1 yamt /* 1119 1.83 yamt * vmem_alloc: allocate resource from the arena. 1120 1.1 yamt */ 1121 1.1 yamt 1122 1.61 dyoung int 1123 1.61 dyoung vmem_alloc(vmem_t *vm, vmem_size_t size, vm_flag_t flags, vmem_addr_t *addrp) 1124 1.1 yamt { 1125 1.86 martin const vm_flag_t strat __diagused = flags & VM_FITMASK; 1126 1.96 chs int error; 1127 1.1 yamt 1128 1.1 yamt KASSERT((flags & (VM_SLEEP|VM_NOSLEEP)) != 0); 1129 1.1 yamt KASSERT((~flags & (VM_SLEEP|VM_NOSLEEP)) != 0); 1130 1.1 yamt 1131 1.1 yamt KASSERT(size > 0); 1132 1.1 yamt KASSERT(strat == VM_BESTFIT || strat == VM_INSTANTFIT); 1133 1.3 yamt if ((flags & VM_SLEEP) != 0) { 1134 1.42 yamt ASSERT_SLEEPABLE(); 1135 1.3 yamt } 1136 1.1 yamt 1137 1.5 yamt #if defined(QCACHE) 1138 1.5 yamt if (size <= vm->vm_qcache_max) { 1139 1.61 dyoung void *p; 1140 1.38 yamt int qidx = (size + vm->vm_quantum_mask) >> vm->vm_quantum_shift; 1141 1.22 yamt qcache_t *qc = vm->vm_qcache[qidx - 1]; 1142 1.5 yamt 1143 1.61 dyoung p = pool_cache_get(qc->qc_cache, vmf_to_prf(flags)); 1144 1.61 dyoung if (addrp != NULL) 1145 1.61 dyoung *addrp = (vmem_addr_t)p; 1146 1.118 riastrad error = (p == NULL) ? SET_ERROR(ENOMEM) : 0; 1147 1.96 chs goto out; 1148 1.5 yamt } 1149 1.5 yamt #endif /* defined(QCACHE) */ 1150 1.5 yamt 1151 1.96 chs error = vmem_xalloc(vm, size, 0, 0, 0, VMEM_ADDR_MIN, VMEM_ADDR_MAX, 1152 1.61 dyoung flags, addrp); 1153 1.110 thorpej #if defined(QCACHE) 1154 1.110 thorpej out: 1155 1.110 thorpej #endif /* defined(QCACHE) */ 1156 1.107 riastrad KASSERTMSG(error || addrp == NULL || 1157 1.107 riastrad (*addrp & vm->vm_quantum_mask) == 0, 1158 1.107 riastrad "vmem %s mask=0x%jx addr=0x%jx", 1159 1.107 riastrad vm->vm_name, (uintmax_t)vm->vm_quantum_mask, (uintmax_t)*addrp); 1160 1.96 chs KASSERT(error == 0 || (flags & VM_SLEEP) == 0); 1161 1.96 chs return error; 1162 1.10 yamt } 1163 1.10 yamt 1164 1.61 dyoung int 1165 1.111 thorpej vmem_xalloc_addr(vmem_t *vm, const vmem_addr_t addr, const vmem_size_t size, 1166 1.111 thorpej vm_flag_t flags) 1167 1.111 thorpej { 1168 1.111 thorpej vmem_addr_t result; 1169 1.111 thorpej int error; 1170 1.111 thorpej 1171 1.111 thorpej KASSERT((addr & vm->vm_quantum_mask) == 0); 1172 1.111 thorpej KASSERT(size != 0); 1173 1.111 thorpej 1174 1.111 thorpej flags = (flags & ~VM_INSTANTFIT) | VM_BESTFIT; 1175 1.111 thorpej 1176 1.111 thorpej error = vmem_xalloc(vm, size, 0, 0, 0, addr, addr + size - 1, 1177 1.111 thorpej flags, &result); 1178 1.111 thorpej 1179 1.111 thorpej KASSERT(error || result == addr); 1180 1.111 thorpej KASSERT(error == 0 || (flags & VM_SLEEP) == 0); 1181 1.111 thorpej return error; 1182 1.111 thorpej } 1183 1.111 thorpej 1184 1.111 thorpej int 1185 1.60 dyoung vmem_xalloc(vmem_t *vm, const vmem_size_t size0, vmem_size_t align, 1186 1.60 dyoung const vmem_size_t phase, const vmem_size_t nocross, 1187 1.61 dyoung const vmem_addr_t minaddr, const vmem_addr_t maxaddr, const vm_flag_t flags, 1188 1.61 dyoung vmem_addr_t *addrp) 1189 1.10 yamt { 1190 1.10 yamt struct vmem_freelist *list; 1191 1.10 yamt struct vmem_freelist *first; 1192 1.10 yamt struct vmem_freelist *end; 1193 1.10 yamt bt_t *bt; 1194 1.10 yamt bt_t *btnew; 1195 1.10 yamt bt_t *btnew2; 1196 1.10 yamt const vmem_size_t size = vmem_roundup_size(vm, size0); 1197 1.10 yamt vm_flag_t strat = flags & VM_FITMASK; 1198 1.10 yamt vmem_addr_t start; 1199 1.61 dyoung int rc; 1200 1.10 yamt 1201 1.10 yamt KASSERT(size0 > 0); 1202 1.10 yamt KASSERT(size > 0); 1203 1.10 yamt KASSERT(strat == VM_BESTFIT || strat == VM_INSTANTFIT); 1204 1.10 yamt if ((flags & VM_SLEEP) != 0) { 1205 1.42 yamt ASSERT_SLEEPABLE(); 1206 1.10 yamt } 1207 1.10 yamt KASSERT((align & vm->vm_quantum_mask) == 0); 1208 1.10 yamt KASSERT((align & (align - 1)) == 0); 1209 1.10 yamt KASSERT((phase & vm->vm_quantum_mask) == 0); 1210 1.10 yamt KASSERT((nocross & vm->vm_quantum_mask) == 0); 1211 1.10 yamt KASSERT((nocross & (nocross - 1)) == 0); 1212 1.109 riastrad KASSERT(align == 0 || phase < align); 1213 1.109 riastrad KASSERT(phase == 0 || phase < align); 1214 1.10 yamt KASSERT(nocross == 0 || nocross >= size); 1215 1.60 dyoung KASSERT(minaddr <= maxaddr); 1216 1.19 yamt KASSERT(!VMEM_CROSS_P(phase, phase + size - 1, nocross)); 1217 1.10 yamt 1218 1.10 yamt if (align == 0) { 1219 1.10 yamt align = vm->vm_quantum_mask + 1; 1220 1.10 yamt } 1221 1.59 yamt 1222 1.59 yamt /* 1223 1.59 yamt * allocate boundary tags before acquiring the vmem lock. 1224 1.59 yamt */ 1225 1.101 ad VMEM_LOCK(vm); 1226 1.1 yamt btnew = bt_alloc(vm, flags); 1227 1.1 yamt if (btnew == NULL) { 1228 1.101 ad VMEM_UNLOCK(vm); 1229 1.118 riastrad return SET_ERROR(ENOMEM); 1230 1.1 yamt } 1231 1.10 yamt btnew2 = bt_alloc(vm, flags); /* XXX not necessary if no restrictions */ 1232 1.10 yamt if (btnew2 == NULL) { 1233 1.10 yamt bt_free(vm, btnew); 1234 1.101 ad VMEM_UNLOCK(vm); 1235 1.118 riastrad return SET_ERROR(ENOMEM); 1236 1.10 yamt } 1237 1.1 yamt 1238 1.59 yamt /* 1239 1.59 yamt * choose a free block from which we allocate. 1240 1.59 yamt */ 1241 1.1 yamt retry_strat: 1242 1.1 yamt first = bt_freehead_toalloc(vm, size, strat); 1243 1.1 yamt end = &vm->vm_freelist[VMEM_MAXORDER]; 1244 1.1 yamt retry: 1245 1.1 yamt bt = NULL; 1246 1.55 yamt vmem_check(vm); 1247 1.2 yamt if (strat == VM_INSTANTFIT) { 1248 1.59 yamt /* 1249 1.59 yamt * just choose the first block which satisfies our restrictions. 1250 1.59 yamt * 1251 1.59 yamt * note that we don't need to check the size of the blocks 1252 1.59 yamt * because any blocks found on these list should be larger than 1253 1.59 yamt * the given size. 1254 1.59 yamt */ 1255 1.2 yamt for (list = first; list < end; list++) { 1256 1.2 yamt bt = LIST_FIRST(list); 1257 1.2 yamt if (bt != NULL) { 1258 1.61 dyoung rc = vmem_fit(bt, size, align, phase, 1259 1.61 dyoung nocross, minaddr, maxaddr, &start); 1260 1.61 dyoung if (rc == 0) { 1261 1.10 yamt goto gotit; 1262 1.10 yamt } 1263 1.59 yamt /* 1264 1.59 yamt * don't bother to follow the bt_freelist link 1265 1.59 yamt * here. the list can be very long and we are 1266 1.59 yamt * told to run fast. blocks from the later free 1267 1.59 yamt * lists are larger and have better chances to 1268 1.59 yamt * satisfy our restrictions. 1269 1.59 yamt */ 1270 1.2 yamt } 1271 1.2 yamt } 1272 1.2 yamt } else { /* VM_BESTFIT */ 1273 1.59 yamt /* 1274 1.59 yamt * we assume that, for space efficiency, it's better to 1275 1.59 yamt * allocate from a smaller block. thus we will start searching 1276 1.59 yamt * from the lower-order list than VM_INSTANTFIT. 1277 1.59 yamt * however, don't bother to find the smallest block in a free 1278 1.59 yamt * list because the list can be very long. we can revisit it 1279 1.59 yamt * if/when it turns out to be a problem. 1280 1.59 yamt * 1281 1.59 yamt * note that the 'first' list can contain blocks smaller than 1282 1.59 yamt * the requested size. thus we need to check bt_size. 1283 1.59 yamt */ 1284 1.2 yamt for (list = first; list < end; list++) { 1285 1.2 yamt LIST_FOREACH(bt, list, bt_freelist) { 1286 1.2 yamt if (bt->bt_size >= size) { 1287 1.61 dyoung rc = vmem_fit(bt, size, align, phase, 1288 1.61 dyoung nocross, minaddr, maxaddr, &start); 1289 1.61 dyoung if (rc == 0) { 1290 1.10 yamt goto gotit; 1291 1.10 yamt } 1292 1.2 yamt } 1293 1.1 yamt } 1294 1.1 yamt } 1295 1.1 yamt } 1296 1.1 yamt #if 1 1297 1.2 yamt if (strat == VM_INSTANTFIT) { 1298 1.2 yamt strat = VM_BESTFIT; 1299 1.2 yamt goto retry_strat; 1300 1.2 yamt } 1301 1.1 yamt #endif 1302 1.69 rmind if (align != vm->vm_quantum_mask + 1 || phase != 0 || nocross != 0) { 1303 1.10 yamt 1304 1.10 yamt /* 1305 1.10 yamt * XXX should try to import a region large enough to 1306 1.10 yamt * satisfy restrictions? 1307 1.10 yamt */ 1308 1.10 yamt 1309 1.20 yamt goto fail; 1310 1.10 yamt } 1311 1.60 dyoung /* XXX eeek, minaddr & maxaddr not respected */ 1312 1.2 yamt if (vmem_import(vm, size, flags) == 0) { 1313 1.2 yamt goto retry; 1314 1.1 yamt } 1315 1.2 yamt /* XXX */ 1316 1.66 para 1317 1.68 para if ((flags & VM_SLEEP) != 0) { 1318 1.94 chs vmem_kick_pdaemon(); 1319 1.68 para VMEM_CONDVAR_WAIT(vm); 1320 1.68 para goto retry; 1321 1.68 para } 1322 1.20 yamt fail: 1323 1.20 yamt bt_free(vm, btnew); 1324 1.20 yamt bt_free(vm, btnew2); 1325 1.101 ad VMEM_UNLOCK(vm); 1326 1.118 riastrad return SET_ERROR(ENOMEM); 1327 1.2 yamt 1328 1.2 yamt gotit: 1329 1.1 yamt KASSERT(bt->bt_type == BT_TYPE_FREE); 1330 1.1 yamt KASSERT(bt->bt_size >= size); 1331 1.1 yamt bt_remfree(vm, bt); 1332 1.55 yamt vmem_check(vm); 1333 1.10 yamt if (bt->bt_start != start) { 1334 1.10 yamt btnew2->bt_type = BT_TYPE_FREE; 1335 1.10 yamt btnew2->bt_start = bt->bt_start; 1336 1.10 yamt btnew2->bt_size = start - bt->bt_start; 1337 1.10 yamt bt->bt_start = start; 1338 1.10 yamt bt->bt_size -= btnew2->bt_size; 1339 1.10 yamt bt_insfree(vm, btnew2); 1340 1.87 christos bt_insseg(vm, btnew2, TAILQ_PREV(bt, vmem_seglist, bt_seglist)); 1341 1.10 yamt btnew2 = NULL; 1342 1.55 yamt vmem_check(vm); 1343 1.10 yamt } 1344 1.10 yamt KASSERT(bt->bt_start == start); 1345 1.1 yamt if (bt->bt_size != size && bt->bt_size - size > vm->vm_quantum_mask) { 1346 1.1 yamt /* split */ 1347 1.1 yamt btnew->bt_type = BT_TYPE_BUSY; 1348 1.1 yamt btnew->bt_start = bt->bt_start; 1349 1.1 yamt btnew->bt_size = size; 1350 1.1 yamt bt->bt_start = bt->bt_start + size; 1351 1.1 yamt bt->bt_size -= size; 1352 1.1 yamt bt_insfree(vm, bt); 1353 1.87 christos bt_insseg(vm, btnew, TAILQ_PREV(bt, vmem_seglist, bt_seglist)); 1354 1.1 yamt bt_insbusy(vm, btnew); 1355 1.55 yamt vmem_check(vm); 1356 1.1 yamt } else { 1357 1.1 yamt bt->bt_type = BT_TYPE_BUSY; 1358 1.1 yamt bt_insbusy(vm, bt); 1359 1.55 yamt vmem_check(vm); 1360 1.1 yamt bt_free(vm, btnew); 1361 1.1 yamt btnew = bt; 1362 1.1 yamt } 1363 1.10 yamt if (btnew2 != NULL) { 1364 1.10 yamt bt_free(vm, btnew2); 1365 1.10 yamt } 1366 1.1 yamt KASSERT(btnew->bt_size >= size); 1367 1.1 yamt btnew->bt_type = BT_TYPE_BUSY; 1368 1.61 dyoung if (addrp != NULL) 1369 1.61 dyoung *addrp = btnew->bt_start; 1370 1.101 ad VMEM_UNLOCK(vm); 1371 1.107 riastrad KASSERTMSG(addrp == NULL || 1372 1.107 riastrad (*addrp & vm->vm_quantum_mask) == 0, 1373 1.107 riastrad "vmem %s mask=0x%jx addr=0x%jx", 1374 1.107 riastrad vm->vm_name, (uintmax_t)vm->vm_quantum_mask, (uintmax_t)*addrp); 1375 1.61 dyoung return 0; 1376 1.1 yamt } 1377 1.1 yamt 1378 1.1 yamt /* 1379 1.83 yamt * vmem_free: free the resource to the arena. 1380 1.1 yamt */ 1381 1.1 yamt 1382 1.1 yamt void 1383 1.1 yamt vmem_free(vmem_t *vm, vmem_addr_t addr, vmem_size_t size) 1384 1.1 yamt { 1385 1.1 yamt 1386 1.1 yamt KASSERT(size > 0); 1387 1.107 riastrad KASSERTMSG((addr & vm->vm_quantum_mask) == 0, 1388 1.107 riastrad "vmem %s mask=0x%jx addr=0x%jx", 1389 1.107 riastrad vm->vm_name, (uintmax_t)vm->vm_quantum_mask, (uintmax_t)addr); 1390 1.1 yamt 1391 1.5 yamt #if defined(QCACHE) 1392 1.5 yamt if (size <= vm->vm_qcache_max) { 1393 1.5 yamt int qidx = (size + vm->vm_quantum_mask) >> vm->vm_quantum_shift; 1394 1.22 yamt qcache_t *qc = vm->vm_qcache[qidx - 1]; 1395 1.5 yamt 1396 1.63 rmind pool_cache_put(qc->qc_cache, (void *)addr); 1397 1.63 rmind return; 1398 1.5 yamt } 1399 1.5 yamt #endif /* defined(QCACHE) */ 1400 1.5 yamt 1401 1.10 yamt vmem_xfree(vm, addr, size); 1402 1.10 yamt } 1403 1.10 yamt 1404 1.10 yamt void 1405 1.17 yamt vmem_xfree(vmem_t *vm, vmem_addr_t addr, vmem_size_t size) 1406 1.10 yamt { 1407 1.10 yamt bt_t *bt; 1408 1.10 yamt 1409 1.10 yamt KASSERT(size > 0); 1410 1.107 riastrad KASSERTMSG((addr & vm->vm_quantum_mask) == 0, 1411 1.107 riastrad "vmem %s mask=0x%jx addr=0x%jx", 1412 1.107 riastrad vm->vm_name, (uintmax_t)vm->vm_quantum_mask, (uintmax_t)addr); 1413 1.10 yamt 1414 1.1 yamt VMEM_LOCK(vm); 1415 1.1 yamt 1416 1.1 yamt bt = bt_lookupbusy(vm, addr); 1417 1.107 riastrad KASSERTMSG(bt != NULL, "vmem %s addr 0x%jx size 0x%jx", 1418 1.107 riastrad vm->vm_name, (uintmax_t)addr, (uintmax_t)size); 1419 1.1 yamt KASSERT(bt->bt_start == addr); 1420 1.1 yamt KASSERT(bt->bt_size == vmem_roundup_size(vm, size) || 1421 1.1 yamt bt->bt_size - vmem_roundup_size(vm, size) <= vm->vm_quantum_mask); 1422 1.105 riastrad 1423 1.104 thorpej /* vmem_xfree_bt() drops the lock. */ 1424 1.104 thorpej vmem_xfree_bt(vm, bt); 1425 1.104 thorpej } 1426 1.104 thorpej 1427 1.104 thorpej void 1428 1.104 thorpej vmem_xfreeall(vmem_t *vm) 1429 1.104 thorpej { 1430 1.104 thorpej bt_t *bt; 1431 1.104 thorpej 1432 1.110 thorpej #if defined(QCACHE) 1433 1.104 thorpej /* This can't be used if the arena has a quantum cache. */ 1434 1.104 thorpej KASSERT(vm->vm_qcache_max == 0); 1435 1.110 thorpej #endif /* defined(QCACHE) */ 1436 1.104 thorpej 1437 1.104 thorpej for (;;) { 1438 1.104 thorpej VMEM_LOCK(vm); 1439 1.104 thorpej TAILQ_FOREACH(bt, &vm->vm_seglist, bt_seglist) { 1440 1.104 thorpej if (bt->bt_type == BT_TYPE_BUSY) 1441 1.104 thorpej break; 1442 1.104 thorpej } 1443 1.104 thorpej if (bt != NULL) { 1444 1.104 thorpej /* vmem_xfree_bt() drops the lock. */ 1445 1.104 thorpej vmem_xfree_bt(vm, bt); 1446 1.104 thorpej } else { 1447 1.104 thorpej VMEM_UNLOCK(vm); 1448 1.104 thorpej return; 1449 1.104 thorpej } 1450 1.104 thorpej } 1451 1.104 thorpej } 1452 1.104 thorpej 1453 1.104 thorpej static void 1454 1.104 thorpej vmem_xfree_bt(vmem_t *vm, bt_t *bt) 1455 1.104 thorpej { 1456 1.104 thorpej bt_t *t; 1457 1.104 thorpej 1458 1.104 thorpej VMEM_ASSERT_LOCKED(vm); 1459 1.104 thorpej 1460 1.1 yamt KASSERT(bt->bt_type == BT_TYPE_BUSY); 1461 1.1 yamt bt_rembusy(vm, bt); 1462 1.1 yamt bt->bt_type = BT_TYPE_FREE; 1463 1.1 yamt 1464 1.1 yamt /* coalesce */ 1465 1.87 christos t = TAILQ_NEXT(bt, bt_seglist); 1466 1.1 yamt if (t != NULL && t->bt_type == BT_TYPE_FREE) { 1467 1.60 dyoung KASSERT(BT_END(bt) < t->bt_start); /* YYY */ 1468 1.1 yamt bt_remfree(vm, t); 1469 1.1 yamt bt_remseg(vm, t); 1470 1.1 yamt bt->bt_size += t->bt_size; 1471 1.101 ad bt_free(vm, t); 1472 1.1 yamt } 1473 1.87 christos t = TAILQ_PREV(bt, vmem_seglist, bt_seglist); 1474 1.1 yamt if (t != NULL && t->bt_type == BT_TYPE_FREE) { 1475 1.60 dyoung KASSERT(BT_END(t) < bt->bt_start); /* YYY */ 1476 1.1 yamt bt_remfree(vm, t); 1477 1.1 yamt bt_remseg(vm, t); 1478 1.1 yamt bt->bt_size += t->bt_size; 1479 1.1 yamt bt->bt_start = t->bt_start; 1480 1.101 ad bt_free(vm, t); 1481 1.1 yamt } 1482 1.1 yamt 1483 1.87 christos t = TAILQ_PREV(bt, vmem_seglist, bt_seglist); 1484 1.1 yamt KASSERT(t != NULL); 1485 1.1 yamt KASSERT(BT_ISSPAN_P(t) || t->bt_type == BT_TYPE_BUSY); 1486 1.61 dyoung if (vm->vm_releasefn != NULL && t->bt_type == BT_TYPE_SPAN && 1487 1.1 yamt t->bt_size == bt->bt_size) { 1488 1.1 yamt vmem_addr_t spanaddr; 1489 1.1 yamt vmem_size_t spansize; 1490 1.1 yamt 1491 1.1 yamt KASSERT(t->bt_start == bt->bt_start); 1492 1.1 yamt spanaddr = bt->bt_start; 1493 1.1 yamt spansize = bt->bt_size; 1494 1.1 yamt bt_remseg(vm, bt); 1495 1.101 ad bt_free(vm, bt); 1496 1.1 yamt bt_remseg(vm, t); 1497 1.101 ad bt_free(vm, t); 1498 1.66 para vm->vm_size -= spansize; 1499 1.68 para VMEM_CONDVAR_BROADCAST(vm); 1500 1.101 ad /* bt_freetrim() drops the lock. */ 1501 1.101 ad bt_freetrim(vm, BT_MAXFREE); 1502 1.61 dyoung (*vm->vm_releasefn)(vm->vm_arg, spanaddr, spansize); 1503 1.1 yamt } else { 1504 1.1 yamt bt_insfree(vm, bt); 1505 1.68 para VMEM_CONDVAR_BROADCAST(vm); 1506 1.101 ad /* bt_freetrim() drops the lock. */ 1507 1.101 ad bt_freetrim(vm, BT_MAXFREE); 1508 1.1 yamt } 1509 1.1 yamt } 1510 1.1 yamt 1511 1.1 yamt /* 1512 1.1 yamt * vmem_add: 1513 1.1 yamt * 1514 1.1 yamt * => caller must ensure appropriate spl, 1515 1.1 yamt * if the arena can be accessed from interrupt context. 1516 1.1 yamt */ 1517 1.1 yamt 1518 1.61 dyoung int 1519 1.1 yamt vmem_add(vmem_t *vm, vmem_addr_t addr, vmem_size_t size, vm_flag_t flags) 1520 1.1 yamt { 1521 1.101 ad int rv; 1522 1.1 yamt 1523 1.101 ad VMEM_LOCK(vm); 1524 1.101 ad rv = vmem_add1(vm, addr, size, flags, BT_TYPE_SPAN_STATIC); 1525 1.101 ad VMEM_UNLOCK(vm); 1526 1.101 ad 1527 1.101 ad return rv; 1528 1.1 yamt } 1529 1.1 yamt 1530 1.6 yamt /* 1531 1.66 para * vmem_size: information about arenas size 1532 1.6 yamt * 1533 1.66 para * => return free/allocated size in arena 1534 1.6 yamt */ 1535 1.66 para vmem_size_t 1536 1.66 para vmem_size(vmem_t *vm, int typemask) 1537 1.6 yamt { 1538 1.6 yamt 1539 1.66 para switch (typemask) { 1540 1.66 para case VMEM_ALLOC: 1541 1.66 para return vm->vm_inuse; 1542 1.66 para case VMEM_FREE: 1543 1.66 para return vm->vm_size - vm->vm_inuse; 1544 1.66 para case VMEM_FREE|VMEM_ALLOC: 1545 1.66 para return vm->vm_size; 1546 1.66 para default: 1547 1.66 para panic("vmem_size"); 1548 1.66 para } 1549 1.6 yamt } 1550 1.6 yamt 1551 1.30 yamt /* ---- rehash */ 1552 1.30 yamt 1553 1.30 yamt #if defined(_KERNEL) 1554 1.30 yamt static struct callout vmem_rehash_ch; 1555 1.30 yamt static int vmem_rehash_interval; 1556 1.30 yamt static struct workqueue *vmem_rehash_wq; 1557 1.30 yamt static struct work vmem_rehash_wk; 1558 1.30 yamt 1559 1.30 yamt static void 1560 1.30 yamt vmem_rehash_all(struct work *wk, void *dummy) 1561 1.30 yamt { 1562 1.30 yamt vmem_t *vm; 1563 1.30 yamt 1564 1.30 yamt KASSERT(wk == &vmem_rehash_wk); 1565 1.30 yamt mutex_enter(&vmem_list_lock); 1566 1.30 yamt LIST_FOREACH(vm, &vmem_list, vm_alllist) { 1567 1.30 yamt size_t desired; 1568 1.30 yamt size_t current; 1569 1.30 yamt 1570 1.101 ad desired = atomic_load_relaxed(&vm->vm_maxbusytag); 1571 1.101 ad current = atomic_load_relaxed(&vm->vm_hashsize); 1572 1.30 yamt 1573 1.30 yamt if (desired > VMEM_HASHSIZE_MAX) { 1574 1.30 yamt desired = VMEM_HASHSIZE_MAX; 1575 1.30 yamt } else if (desired < VMEM_HASHSIZE_MIN) { 1576 1.30 yamt desired = VMEM_HASHSIZE_MIN; 1577 1.30 yamt } 1578 1.30 yamt if (desired > current * 2 || desired * 2 < current) { 1579 1.30 yamt vmem_rehash(vm, desired, VM_NOSLEEP); 1580 1.30 yamt } 1581 1.30 yamt } 1582 1.30 yamt mutex_exit(&vmem_list_lock); 1583 1.30 yamt 1584 1.30 yamt callout_schedule(&vmem_rehash_ch, vmem_rehash_interval); 1585 1.30 yamt } 1586 1.30 yamt 1587 1.30 yamt static void 1588 1.30 yamt vmem_rehash_all_kick(void *dummy) 1589 1.30 yamt { 1590 1.30 yamt 1591 1.32 rmind workqueue_enqueue(vmem_rehash_wq, &vmem_rehash_wk, NULL); 1592 1.30 yamt } 1593 1.30 yamt 1594 1.30 yamt void 1595 1.30 yamt vmem_rehash_start(void) 1596 1.30 yamt { 1597 1.30 yamt int error; 1598 1.30 yamt 1599 1.30 yamt error = workqueue_create(&vmem_rehash_wq, "vmem_rehash", 1600 1.41 ad vmem_rehash_all, NULL, PRI_VM, IPL_SOFTCLOCK, WQ_MPSAFE); 1601 1.30 yamt if (error) { 1602 1.30 yamt panic("%s: workqueue_create %d\n", __func__, error); 1603 1.30 yamt } 1604 1.41 ad callout_init(&vmem_rehash_ch, CALLOUT_MPSAFE); 1605 1.30 yamt callout_setfunc(&vmem_rehash_ch, vmem_rehash_all_kick, NULL); 1606 1.30 yamt 1607 1.30 yamt vmem_rehash_interval = hz * 10; 1608 1.30 yamt callout_schedule(&vmem_rehash_ch, vmem_rehash_interval); 1609 1.30 yamt } 1610 1.30 yamt #endif /* defined(_KERNEL) */ 1611 1.30 yamt 1612 1.1 yamt /* ---- debug */ 1613 1.1 yamt 1614 1.55 yamt #if defined(DDB) || defined(UNITTEST) || defined(VMEM_SANITY) 1615 1.55 yamt 1616 1.82 christos static void bt_dump(const bt_t *, void (*)(const char *, ...) 1617 1.82 christos __printflike(1, 2)); 1618 1.55 yamt 1619 1.55 yamt static const char * 1620 1.55 yamt bt_type_string(int type) 1621 1.55 yamt { 1622 1.55 yamt static const char * const table[] = { 1623 1.55 yamt [BT_TYPE_BUSY] = "busy", 1624 1.55 yamt [BT_TYPE_FREE] = "free", 1625 1.55 yamt [BT_TYPE_SPAN] = "span", 1626 1.55 yamt [BT_TYPE_SPAN_STATIC] = "static span", 1627 1.55 yamt }; 1628 1.55 yamt 1629 1.55 yamt if (type >= __arraycount(table)) { 1630 1.55 yamt return "BOGUS"; 1631 1.55 yamt } 1632 1.55 yamt return table[type]; 1633 1.55 yamt } 1634 1.55 yamt 1635 1.55 yamt static void 1636 1.55 yamt bt_dump(const bt_t *bt, void (*pr)(const char *, ...)) 1637 1.55 yamt { 1638 1.55 yamt 1639 1.55 yamt (*pr)("\t%p: %" PRIu64 ", %" PRIu64 ", %d(%s)\n", 1640 1.55 yamt bt, (uint64_t)bt->bt_start, (uint64_t)bt->bt_size, 1641 1.55 yamt bt->bt_type, bt_type_string(bt->bt_type)); 1642 1.55 yamt } 1643 1.55 yamt 1644 1.55 yamt static void 1645 1.82 christos vmem_dump(const vmem_t *vm , void (*pr)(const char *, ...) __printflike(1, 2)) 1646 1.55 yamt { 1647 1.55 yamt const bt_t *bt; 1648 1.55 yamt int i; 1649 1.55 yamt 1650 1.55 yamt (*pr)("vmem %p '%s'\n", vm, vm->vm_name); 1651 1.87 christos TAILQ_FOREACH(bt, &vm->vm_seglist, bt_seglist) { 1652 1.55 yamt bt_dump(bt, pr); 1653 1.55 yamt } 1654 1.55 yamt 1655 1.55 yamt for (i = 0; i < VMEM_MAXORDER; i++) { 1656 1.55 yamt const struct vmem_freelist *fl = &vm->vm_freelist[i]; 1657 1.55 yamt 1658 1.55 yamt if (LIST_EMPTY(fl)) { 1659 1.55 yamt continue; 1660 1.55 yamt } 1661 1.55 yamt 1662 1.55 yamt (*pr)("freelist[%d]\n", i); 1663 1.55 yamt LIST_FOREACH(bt, fl, bt_freelist) { 1664 1.55 yamt bt_dump(bt, pr); 1665 1.55 yamt } 1666 1.55 yamt } 1667 1.55 yamt } 1668 1.55 yamt 1669 1.55 yamt #endif /* defined(DDB) || defined(UNITTEST) || defined(VMEM_SANITY) */ 1670 1.55 yamt 1671 1.37 yamt #if defined(DDB) 1672 1.37 yamt static bt_t * 1673 1.37 yamt vmem_whatis_lookup(vmem_t *vm, uintptr_t addr) 1674 1.37 yamt { 1675 1.39 yamt bt_t *bt; 1676 1.37 yamt 1677 1.87 christos TAILQ_FOREACH(bt, &vm->vm_seglist, bt_seglist) { 1678 1.39 yamt if (BT_ISSPAN_P(bt)) { 1679 1.39 yamt continue; 1680 1.39 yamt } 1681 1.60 dyoung if (bt->bt_start <= addr && addr <= BT_END(bt)) { 1682 1.39 yamt return bt; 1683 1.37 yamt } 1684 1.37 yamt } 1685 1.37 yamt 1686 1.37 yamt return NULL; 1687 1.37 yamt } 1688 1.37 yamt 1689 1.37 yamt void 1690 1.37 yamt vmem_whatis(uintptr_t addr, void (*pr)(const char *, ...)) 1691 1.37 yamt { 1692 1.37 yamt vmem_t *vm; 1693 1.37 yamt 1694 1.37 yamt LIST_FOREACH(vm, &vmem_list, vm_alllist) { 1695 1.37 yamt bt_t *bt; 1696 1.37 yamt 1697 1.37 yamt bt = vmem_whatis_lookup(vm, addr); 1698 1.37 yamt if (bt == NULL) { 1699 1.37 yamt continue; 1700 1.37 yamt } 1701 1.39 yamt (*pr)("%p is %p+%zu in VMEM '%s' (%s)\n", 1702 1.37 yamt (void *)addr, (void *)bt->bt_start, 1703 1.39 yamt (size_t)(addr - bt->bt_start), vm->vm_name, 1704 1.39 yamt (bt->bt_type == BT_TYPE_BUSY) ? "allocated" : "free"); 1705 1.37 yamt } 1706 1.37 yamt } 1707 1.43 cegger 1708 1.55 yamt void 1709 1.55 yamt vmem_printall(const char *modif, void (*pr)(const char *, ...)) 1710 1.43 cegger { 1711 1.55 yamt const vmem_t *vm; 1712 1.43 cegger 1713 1.47 cegger LIST_FOREACH(vm, &vmem_list, vm_alllist) { 1714 1.55 yamt vmem_dump(vm, pr); 1715 1.43 cegger } 1716 1.43 cegger } 1717 1.43 cegger 1718 1.43 cegger void 1719 1.43 cegger vmem_print(uintptr_t addr, const char *modif, void (*pr)(const char *, ...)) 1720 1.43 cegger { 1721 1.55 yamt const vmem_t *vm = (const void *)addr; 1722 1.43 cegger 1723 1.55 yamt vmem_dump(vm, pr); 1724 1.43 cegger } 1725 1.37 yamt #endif /* defined(DDB) */ 1726 1.37 yamt 1727 1.60 dyoung #if defined(_KERNEL) 1728 1.60 dyoung #define vmem_printf printf 1729 1.60 dyoung #else 1730 1.1 yamt #include <stdio.h> 1731 1.60 dyoung #include <stdarg.h> 1732 1.60 dyoung 1733 1.60 dyoung static void 1734 1.60 dyoung vmem_printf(const char *fmt, ...) 1735 1.60 dyoung { 1736 1.60 dyoung va_list ap; 1737 1.60 dyoung va_start(ap, fmt); 1738 1.60 dyoung vprintf(fmt, ap); 1739 1.60 dyoung va_end(ap); 1740 1.60 dyoung } 1741 1.60 dyoung #endif 1742 1.1 yamt 1743 1.55 yamt #if defined(VMEM_SANITY) 1744 1.1 yamt 1745 1.55 yamt static bool 1746 1.55 yamt vmem_check_sanity(vmem_t *vm) 1747 1.1 yamt { 1748 1.55 yamt const bt_t *bt, *bt2; 1749 1.1 yamt 1750 1.55 yamt KASSERT(vm != NULL); 1751 1.1 yamt 1752 1.87 christos TAILQ_FOREACH(bt, &vm->vm_seglist, bt_seglist) { 1753 1.60 dyoung if (bt->bt_start > BT_END(bt)) { 1754 1.55 yamt printf("corrupted tag\n"); 1755 1.60 dyoung bt_dump(bt, vmem_printf); 1756 1.55 yamt return false; 1757 1.55 yamt } 1758 1.55 yamt } 1759 1.87 christos TAILQ_FOREACH(bt, &vm->vm_seglist, bt_seglist) { 1760 1.87 christos TAILQ_FOREACH(bt2, &vm->vm_seglist, bt_seglist) { 1761 1.55 yamt if (bt == bt2) { 1762 1.55 yamt continue; 1763 1.55 yamt } 1764 1.55 yamt if (BT_ISSPAN_P(bt) != BT_ISSPAN_P(bt2)) { 1765 1.55 yamt continue; 1766 1.55 yamt } 1767 1.60 dyoung if (bt->bt_start <= BT_END(bt2) && 1768 1.60 dyoung bt2->bt_start <= BT_END(bt)) { 1769 1.55 yamt printf("overwrapped tags\n"); 1770 1.60 dyoung bt_dump(bt, vmem_printf); 1771 1.60 dyoung bt_dump(bt2, vmem_printf); 1772 1.55 yamt return false; 1773 1.55 yamt } 1774 1.55 yamt } 1775 1.1 yamt } 1776 1.1 yamt 1777 1.55 yamt return true; 1778 1.55 yamt } 1779 1.1 yamt 1780 1.55 yamt static void 1781 1.55 yamt vmem_check(vmem_t *vm) 1782 1.55 yamt { 1783 1.1 yamt 1784 1.55 yamt if (!vmem_check_sanity(vm)) { 1785 1.55 yamt panic("insanity vmem %p", vm); 1786 1.1 yamt } 1787 1.1 yamt } 1788 1.1 yamt 1789 1.55 yamt #endif /* defined(VMEM_SANITY) */ 1790 1.1 yamt 1791 1.55 yamt #if defined(UNITTEST) 1792 1.1 yamt int 1793 1.57 cegger main(void) 1794 1.1 yamt { 1795 1.61 dyoung int rc; 1796 1.1 yamt vmem_t *vm; 1797 1.1 yamt vmem_addr_t p; 1798 1.1 yamt struct reg { 1799 1.1 yamt vmem_addr_t p; 1800 1.1 yamt vmem_size_t sz; 1801 1.25 thorpej bool x; 1802 1.1 yamt } *reg = NULL; 1803 1.1 yamt int nreg = 0; 1804 1.1 yamt int nalloc = 0; 1805 1.1 yamt int nfree = 0; 1806 1.1 yamt vmem_size_t total = 0; 1807 1.1 yamt #if 1 1808 1.1 yamt vm_flag_t strat = VM_INSTANTFIT; 1809 1.1 yamt #else 1810 1.1 yamt vm_flag_t strat = VM_BESTFIT; 1811 1.1 yamt #endif 1812 1.1 yamt 1813 1.61 dyoung vm = vmem_create("test", 0, 0, 1, NULL, NULL, NULL, 0, VM_SLEEP, 1814 1.61 dyoung #ifdef _KERNEL 1815 1.61 dyoung IPL_NONE 1816 1.61 dyoung #else 1817 1.61 dyoung 0 1818 1.61 dyoung #endif 1819 1.61 dyoung ); 1820 1.1 yamt if (vm == NULL) { 1821 1.1 yamt printf("vmem_create\n"); 1822 1.1 yamt exit(EXIT_FAILURE); 1823 1.1 yamt } 1824 1.60 dyoung vmem_dump(vm, vmem_printf); 1825 1.1 yamt 1826 1.61 dyoung rc = vmem_add(vm, 0, 50, VM_SLEEP); 1827 1.61 dyoung assert(rc == 0); 1828 1.61 dyoung rc = vmem_add(vm, 100, 200, VM_SLEEP); 1829 1.61 dyoung assert(rc == 0); 1830 1.61 dyoung rc = vmem_add(vm, 2000, 1, VM_SLEEP); 1831 1.61 dyoung assert(rc == 0); 1832 1.61 dyoung rc = vmem_add(vm, 40000, 65536, VM_SLEEP); 1833 1.61 dyoung assert(rc == 0); 1834 1.61 dyoung rc = vmem_add(vm, 10000, 10000, VM_SLEEP); 1835 1.61 dyoung assert(rc == 0); 1836 1.61 dyoung rc = vmem_add(vm, 500, 1000, VM_SLEEP); 1837 1.61 dyoung assert(rc == 0); 1838 1.61 dyoung rc = vmem_add(vm, 0xffffff00, 0x100, VM_SLEEP); 1839 1.61 dyoung assert(rc == 0); 1840 1.61 dyoung rc = vmem_xalloc(vm, 0x101, 0, 0, 0, 1841 1.61 dyoung 0xffffff00, 0xffffffff, strat|VM_SLEEP, &p); 1842 1.61 dyoung assert(rc != 0); 1843 1.61 dyoung rc = vmem_xalloc(vm, 50, 0, 0, 0, 0, 49, strat|VM_SLEEP, &p); 1844 1.61 dyoung assert(rc == 0 && p == 0); 1845 1.61 dyoung vmem_xfree(vm, p, 50); 1846 1.61 dyoung rc = vmem_xalloc(vm, 25, 0, 0, 0, 0, 24, strat|VM_SLEEP, &p); 1847 1.61 dyoung assert(rc == 0 && p == 0); 1848 1.61 dyoung rc = vmem_xalloc(vm, 0x100, 0, 0, 0, 1849 1.61 dyoung 0xffffff01, 0xffffffff, strat|VM_SLEEP, &p); 1850 1.61 dyoung assert(rc != 0); 1851 1.61 dyoung rc = vmem_xalloc(vm, 0x100, 0, 0, 0, 1852 1.61 dyoung 0xffffff00, 0xfffffffe, strat|VM_SLEEP, &p); 1853 1.61 dyoung assert(rc != 0); 1854 1.61 dyoung rc = vmem_xalloc(vm, 0x100, 0, 0, 0, 1855 1.61 dyoung 0xffffff00, 0xffffffff, strat|VM_SLEEP, &p); 1856 1.61 dyoung assert(rc == 0); 1857 1.60 dyoung vmem_dump(vm, vmem_printf); 1858 1.1 yamt for (;;) { 1859 1.1 yamt struct reg *r; 1860 1.10 yamt int t = rand() % 100; 1861 1.1 yamt 1862 1.10 yamt if (t > 45) { 1863 1.10 yamt /* alloc */ 1864 1.1 yamt vmem_size_t sz = rand() % 500 + 1; 1865 1.25 thorpej bool x; 1866 1.10 yamt vmem_size_t align, phase, nocross; 1867 1.10 yamt vmem_addr_t minaddr, maxaddr; 1868 1.10 yamt 1869 1.10 yamt if (t > 70) { 1870 1.26 thorpej x = true; 1871 1.10 yamt /* XXX */ 1872 1.10 yamt align = 1 << (rand() % 15); 1873 1.10 yamt phase = rand() % 65536; 1874 1.10 yamt nocross = 1 << (rand() % 15); 1875 1.10 yamt if (align <= phase) { 1876 1.10 yamt phase = 0; 1877 1.10 yamt } 1878 1.19 yamt if (VMEM_CROSS_P(phase, phase + sz - 1, 1879 1.19 yamt nocross)) { 1880 1.10 yamt nocross = 0; 1881 1.10 yamt } 1882 1.60 dyoung do { 1883 1.60 dyoung minaddr = rand() % 50000; 1884 1.60 dyoung maxaddr = rand() % 70000; 1885 1.60 dyoung } while (minaddr > maxaddr); 1886 1.10 yamt printf("=== xalloc %" PRIu64 1887 1.10 yamt " align=%" PRIu64 ", phase=%" PRIu64 1888 1.10 yamt ", nocross=%" PRIu64 ", min=%" PRIu64 1889 1.10 yamt ", max=%" PRIu64 "\n", 1890 1.10 yamt (uint64_t)sz, 1891 1.10 yamt (uint64_t)align, 1892 1.10 yamt (uint64_t)phase, 1893 1.10 yamt (uint64_t)nocross, 1894 1.10 yamt (uint64_t)minaddr, 1895 1.10 yamt (uint64_t)maxaddr); 1896 1.61 dyoung rc = vmem_xalloc(vm, sz, align, phase, nocross, 1897 1.61 dyoung minaddr, maxaddr, strat|VM_SLEEP, &p); 1898 1.10 yamt } else { 1899 1.26 thorpej x = false; 1900 1.10 yamt printf("=== alloc %" PRIu64 "\n", (uint64_t)sz); 1901 1.61 dyoung rc = vmem_alloc(vm, sz, strat|VM_SLEEP, &p); 1902 1.10 yamt } 1903 1.1 yamt printf("-> %" PRIu64 "\n", (uint64_t)p); 1904 1.60 dyoung vmem_dump(vm, vmem_printf); 1905 1.61 dyoung if (rc != 0) { 1906 1.10 yamt if (x) { 1907 1.10 yamt continue; 1908 1.10 yamt } 1909 1.1 yamt break; 1910 1.1 yamt } 1911 1.1 yamt nreg++; 1912 1.1 yamt reg = realloc(reg, sizeof(*reg) * nreg); 1913 1.1 yamt r = ®[nreg - 1]; 1914 1.1 yamt r->p = p; 1915 1.1 yamt r->sz = sz; 1916 1.10 yamt r->x = x; 1917 1.1 yamt total += sz; 1918 1.1 yamt nalloc++; 1919 1.1 yamt } else if (nreg != 0) { 1920 1.10 yamt /* free */ 1921 1.1 yamt r = ®[rand() % nreg]; 1922 1.1 yamt printf("=== free %" PRIu64 ", %" PRIu64 "\n", 1923 1.1 yamt (uint64_t)r->p, (uint64_t)r->sz); 1924 1.10 yamt if (r->x) { 1925 1.10 yamt vmem_xfree(vm, r->p, r->sz); 1926 1.10 yamt } else { 1927 1.10 yamt vmem_free(vm, r->p, r->sz); 1928 1.10 yamt } 1929 1.1 yamt total -= r->sz; 1930 1.60 dyoung vmem_dump(vm, vmem_printf); 1931 1.1 yamt *r = reg[nreg - 1]; 1932 1.1 yamt nreg--; 1933 1.1 yamt nfree++; 1934 1.1 yamt } 1935 1.1 yamt printf("total=%" PRIu64 "\n", (uint64_t)total); 1936 1.1 yamt } 1937 1.1 yamt fprintf(stderr, "total=%" PRIu64 ", nalloc=%d, nfree=%d\n", 1938 1.1 yamt (uint64_t)total, nalloc, nfree); 1939 1.1 yamt exit(EXIT_SUCCESS); 1940 1.1 yamt } 1941 1.55 yamt #endif /* defined(UNITTEST) */ 1942