Home | History | Annotate | Line # | Download | only in kern
subr_vmem.c revision 1.100.6.1
      1  1.100.6.1    bouyer /*	$NetBSD: subr_vmem.c,v 1.100.6.1 2020/04/20 11:29:10 bouyer Exp $	*/
      2        1.1      yamt 
      3        1.1      yamt /*-
      4       1.55      yamt  * Copyright (c)2006,2007,2008,2009 YAMAMOTO Takashi,
      5        1.1      yamt  * All rights reserved.
      6        1.1      yamt  *
      7        1.1      yamt  * Redistribution and use in source and binary forms, with or without
      8        1.1      yamt  * modification, are permitted provided that the following conditions
      9        1.1      yamt  * are met:
     10        1.1      yamt  * 1. Redistributions of source code must retain the above copyright
     11        1.1      yamt  *    notice, this list of conditions and the following disclaimer.
     12        1.1      yamt  * 2. Redistributions in binary form must reproduce the above copyright
     13        1.1      yamt  *    notice, this list of conditions and the following disclaimer in the
     14        1.1      yamt  *    documentation and/or other materials provided with the distribution.
     15        1.1      yamt  *
     16        1.1      yamt  * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
     17        1.1      yamt  * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
     18        1.1      yamt  * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
     19        1.1      yamt  * ARE DISCLAIMED.  IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
     20        1.1      yamt  * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
     21        1.1      yamt  * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
     22        1.1      yamt  * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
     23        1.1      yamt  * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
     24        1.1      yamt  * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
     25        1.1      yamt  * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
     26        1.1      yamt  * SUCH DAMAGE.
     27        1.1      yamt  */
     28        1.1      yamt 
     29        1.1      yamt /*
     30        1.1      yamt  * reference:
     31        1.1      yamt  * -	Magazines and Vmem: Extending the Slab Allocator
     32        1.1      yamt  *	to Many CPUs and Arbitrary Resources
     33        1.1      yamt  *	http://www.usenix.org/event/usenix01/bonwick.html
     34       1.88      para  *
     35       1.88      para  * locking & the boundary tag pool:
     36       1.88      para  * - 	A pool(9) is used for vmem boundary tags
     37       1.88      para  * - 	During a pool get call the global vmem_btag_refill_lock is taken,
     38       1.88      para  *	to serialize access to the allocation reserve, but no other
     39       1.88      para  *	vmem arena locks.
     40       1.88      para  * -	During pool_put calls no vmem mutexes are locked.
     41       1.88      para  * - 	pool_drain doesn't hold the pool's mutex while releasing memory to
     42       1.88      para  * 	its backing therefore no interferance with any vmem mutexes.
     43       1.88      para  * -	The boundary tag pool is forced to put page headers into pool pages
     44       1.88      para  *  	(PR_PHINPAGE) and not off page to avoid pool recursion.
     45       1.88      para  *  	(due to sizeof(bt_t) it should be the case anyway)
     46        1.1      yamt  */
     47        1.1      yamt 
     48        1.1      yamt #include <sys/cdefs.h>
     49  1.100.6.1    bouyer __KERNEL_RCSID(0, "$NetBSD: subr_vmem.c,v 1.100.6.1 2020/04/20 11:29:10 bouyer Exp $");
     50        1.1      yamt 
     51       1.93     pooka #if defined(_KERNEL) && defined(_KERNEL_OPT)
     52       1.37      yamt #include "opt_ddb.h"
     53       1.93     pooka #endif /* defined(_KERNEL) && defined(_KERNEL_OPT) */
     54        1.1      yamt 
     55        1.1      yamt #include <sys/param.h>
     56        1.1      yamt #include <sys/hash.h>
     57        1.1      yamt #include <sys/queue.h>
     58       1.62     rmind #include <sys/bitops.h>
     59        1.1      yamt 
     60        1.1      yamt #if defined(_KERNEL)
     61        1.1      yamt #include <sys/systm.h>
     62       1.30      yamt #include <sys/kernel.h>	/* hz */
     63       1.30      yamt #include <sys/callout.h>
     64       1.66      para #include <sys/kmem.h>
     65        1.1      yamt #include <sys/pool.h>
     66        1.1      yamt #include <sys/vmem.h>
     67       1.80      para #include <sys/vmem_impl.h>
     68       1.30      yamt #include <sys/workqueue.h>
     69       1.66      para #include <sys/atomic.h>
     70       1.66      para #include <uvm/uvm.h>
     71       1.66      para #include <uvm/uvm_extern.h>
     72       1.66      para #include <uvm/uvm_km.h>
     73       1.66      para #include <uvm/uvm_page.h>
     74       1.66      para #include <uvm/uvm_pdaemon.h>
     75        1.1      yamt #else /* defined(_KERNEL) */
     76       1.80      para #include <stdio.h>
     77       1.80      para #include <errno.h>
     78       1.80      para #include <assert.h>
     79       1.80      para #include <stdlib.h>
     80       1.80      para #include <string.h>
     81        1.1      yamt #include "../sys/vmem.h"
     82       1.80      para #include "../sys/vmem_impl.h"
     83        1.1      yamt #endif /* defined(_KERNEL) */
     84        1.1      yamt 
     85       1.66      para 
     86        1.1      yamt #if defined(_KERNEL)
     87       1.66      para #include <sys/evcnt.h>
     88       1.66      para #define VMEM_EVCNT_DEFINE(name) \
     89       1.66      para struct evcnt vmem_evcnt_##name = EVCNT_INITIALIZER(EVCNT_TYPE_MISC, NULL, \
     90       1.88      para     "vmem", #name); \
     91       1.66      para EVCNT_ATTACH_STATIC(vmem_evcnt_##name);
     92       1.66      para #define VMEM_EVCNT_INCR(ev)	vmem_evcnt_##ev.ev_count++
     93       1.66      para #define VMEM_EVCNT_DECR(ev)	vmem_evcnt_##ev.ev_count--
     94       1.66      para 
     95       1.88      para VMEM_EVCNT_DEFINE(static_bt_count)
     96       1.88      para VMEM_EVCNT_DEFINE(static_bt_inuse)
     97       1.66      para 
     98       1.80      para #define	VMEM_CONDVAR_INIT(vm, wchan)	cv_init(&vm->vm_cv, wchan)
     99       1.80      para #define	VMEM_CONDVAR_DESTROY(vm)	cv_destroy(&vm->vm_cv)
    100       1.80      para #define	VMEM_CONDVAR_WAIT(vm)		cv_wait(&vm->vm_cv, &vm->vm_lock)
    101       1.80      para #define	VMEM_CONDVAR_BROADCAST(vm)	cv_broadcast(&vm->vm_cv)
    102       1.66      para 
    103        1.1      yamt #else /* defined(_KERNEL) */
    104        1.1      yamt 
    105       1.66      para #define VMEM_EVCNT_INCR(ev)	/* nothing */
    106       1.66      para #define VMEM_EVCNT_DECR(ev)	/* nothing */
    107       1.66      para 
    108       1.80      para #define	VMEM_CONDVAR_INIT(vm, wchan)	/* nothing */
    109       1.80      para #define	VMEM_CONDVAR_DESTROY(vm)	/* nothing */
    110       1.80      para #define	VMEM_CONDVAR_WAIT(vm)		/* nothing */
    111       1.80      para #define	VMEM_CONDVAR_BROADCAST(vm)	/* nothing */
    112       1.80      para 
    113       1.79      para #define	UNITTEST
    114       1.79      para #define	KASSERT(a)		assert(a)
    115       1.31        ad #define	mutex_init(a, b, c)	/* nothing */
    116       1.31        ad #define	mutex_destroy(a)	/* nothing */
    117       1.31        ad #define	mutex_enter(a)		/* nothing */
    118       1.55      yamt #define	mutex_tryenter(a)	true
    119       1.31        ad #define	mutex_exit(a)		/* nothing */
    120       1.31        ad #define	mutex_owned(a)		/* nothing */
    121       1.55      yamt #define	ASSERT_SLEEPABLE()	/* nothing */
    122       1.55      yamt #define	panic(...)		printf(__VA_ARGS__); abort()
    123        1.1      yamt #endif /* defined(_KERNEL) */
    124        1.1      yamt 
    125       1.55      yamt #if defined(VMEM_SANITY)
    126       1.55      yamt static void vmem_check(vmem_t *);
    127       1.55      yamt #else /* defined(VMEM_SANITY) */
    128       1.55      yamt #define vmem_check(vm)	/* nothing */
    129       1.55      yamt #endif /* defined(VMEM_SANITY) */
    130        1.1      yamt 
    131       1.30      yamt #define	VMEM_HASHSIZE_MIN	1	/* XXX */
    132       1.54      yamt #define	VMEM_HASHSIZE_MAX	65536	/* XXX */
    133       1.66      para #define	VMEM_HASHSIZE_INIT	1
    134        1.1      yamt 
    135        1.1      yamt #define	VM_FITMASK	(VM_BESTFIT | VM_INSTANTFIT)
    136        1.1      yamt 
    137       1.80      para #if defined(_KERNEL)
    138       1.80      para static bool vmem_bootstrapped = false;
    139       1.80      para static kmutex_t vmem_list_lock;
    140       1.80      para static LIST_HEAD(, vmem) vmem_list = LIST_HEAD_INITIALIZER(vmem_list);
    141       1.80      para #endif /* defined(_KERNEL) */
    142       1.79      para 
    143       1.80      para /* ---- misc */
    144        1.1      yamt 
    145       1.31        ad #define	VMEM_LOCK(vm)		mutex_enter(&vm->vm_lock)
    146       1.31        ad #define	VMEM_TRYLOCK(vm)	mutex_tryenter(&vm->vm_lock)
    147       1.31        ad #define	VMEM_UNLOCK(vm)		mutex_exit(&vm->vm_lock)
    148       1.36        ad #define	VMEM_LOCK_INIT(vm, ipl)	mutex_init(&vm->vm_lock, MUTEX_DEFAULT, ipl)
    149       1.31        ad #define	VMEM_LOCK_DESTROY(vm)	mutex_destroy(&vm->vm_lock)
    150       1.31        ad #define	VMEM_ASSERT_LOCKED(vm)	KASSERT(mutex_owned(&vm->vm_lock))
    151        1.1      yamt 
    152       1.19      yamt #define	VMEM_ALIGNUP(addr, align) \
    153       1.19      yamt 	(-(-(addr) & -(align)))
    154       1.62     rmind 
    155       1.19      yamt #define	VMEM_CROSS_P(addr1, addr2, boundary) \
    156       1.19      yamt 	((((addr1) ^ (addr2)) & -(boundary)) != 0)
    157       1.19      yamt 
    158        1.4      yamt #define	ORDER2SIZE(order)	((vmem_size_t)1 << (order))
    159       1.62     rmind #define	SIZE2ORDER(size)	((int)ilog2(size))
    160        1.4      yamt 
    161       1.62     rmind #if !defined(_KERNEL)
    162       1.62     rmind #define	xmalloc(sz, flags)	malloc(sz)
    163       1.67     rmind #define	xfree(p, sz)		free(p)
    164       1.62     rmind #define	bt_alloc(vm, flags)	malloc(sizeof(bt_t))
    165       1.62     rmind #define	bt_free(vm, bt)		free(bt)
    166       1.66      para #else /* defined(_KERNEL) */
    167        1.1      yamt 
    168       1.67     rmind #define	xmalloc(sz, flags) \
    169       1.80      para     kmem_alloc(sz, ((flags) & VM_SLEEP) ? KM_SLEEP : KM_NOSLEEP);
    170       1.80      para #define	xfree(p, sz)		kmem_free(p, sz);
    171       1.66      para 
    172       1.75      para /*
    173       1.75      para  * BT_RESERVE calculation:
    174       1.97  dholland  * we allocate memory for boundry tags with vmem; therefore we have
    175       1.75      para  * to keep a reserve of bts used to allocated memory for bts.
    176       1.75      para  * This reserve is 4 for each arena involved in allocating vmems memory.
    177       1.75      para  * BT_MAXFREE: don't cache excessive counts of bts in arenas
    178       1.75      para  */
    179       1.75      para #define STATIC_BT_COUNT 200
    180       1.75      para #define BT_MINRESERVE 4
    181       1.66      para #define BT_MAXFREE 64
    182       1.66      para 
    183       1.66      para static struct vmem_btag static_bts[STATIC_BT_COUNT];
    184       1.66      para static int static_bt_count = STATIC_BT_COUNT;
    185       1.66      para 
    186       1.80      para static struct vmem kmem_va_meta_arena_store;
    187       1.66      para vmem_t *kmem_va_meta_arena;
    188       1.80      para static struct vmem kmem_meta_arena_store;
    189       1.88      para vmem_t *kmem_meta_arena = NULL;
    190       1.66      para 
    191       1.88      para static kmutex_t vmem_btag_refill_lock;
    192       1.66      para static kmutex_t vmem_btag_lock;
    193       1.66      para static LIST_HEAD(, vmem_btag) vmem_btag_freelist;
    194       1.66      para static size_t vmem_btag_freelist_count = 0;
    195       1.88      para static struct pool vmem_btag_pool;
    196       1.66      para 
    197       1.94       chs static void
    198       1.94       chs vmem_kick_pdaemon(void)
    199       1.94       chs {
    200       1.94       chs #if defined(_KERNEL)
    201       1.94       chs 	uvm_kick_pdaemon();
    202       1.94       chs #endif
    203       1.94       chs }
    204       1.94       chs 
    205        1.1      yamt /* ---- boundary tag */
    206        1.1      yamt 
    207       1.94       chs static int bt_refill(vmem_t *vm);
    208  1.100.6.1    bouyer static int bt_refill_locked(vmem_t *vm);
    209       1.66      para 
    210       1.88      para static void *
    211       1.88      para pool_page_alloc_vmem_meta(struct pool *pp, int flags)
    212       1.66      para {
    213       1.88      para 	const vm_flag_t vflags = (flags & PR_WAITOK) ? VM_SLEEP: VM_NOSLEEP;
    214       1.66      para 	vmem_addr_t va;
    215       1.88      para 	int ret;
    216       1.66      para 
    217       1.88      para 	ret = vmem_alloc(kmem_meta_arena, pp->pr_alloc->pa_pagesz,
    218       1.88      para 	    (vflags & ~VM_FITMASK) | VM_INSTANTFIT | VM_POPULATING, &va);
    219       1.77      para 
    220       1.88      para 	return ret ? NULL : (void *)va;
    221       1.88      para }
    222       1.66      para 
    223       1.88      para static void
    224       1.88      para pool_page_free_vmem_meta(struct pool *pp, void *v)
    225       1.88      para {
    226       1.66      para 
    227       1.88      para 	vmem_free(kmem_meta_arena, (vmem_addr_t)v, pp->pr_alloc->pa_pagesz);
    228       1.88      para }
    229       1.66      para 
    230       1.88      para /* allocator for vmem-pool metadata */
    231       1.88      para struct pool_allocator pool_allocator_vmem_meta = {
    232       1.88      para 	.pa_alloc = pool_page_alloc_vmem_meta,
    233       1.88      para 	.pa_free = pool_page_free_vmem_meta,
    234       1.88      para 	.pa_pagesz = 0
    235       1.88      para };
    236       1.66      para 
    237       1.66      para static int
    238  1.100.6.1    bouyer bt_refill_locked(vmem_t *vm)
    239       1.66      para {
    240       1.66      para 	bt_t *bt;
    241       1.66      para 
    242  1.100.6.1    bouyer 	VMEM_ASSERT_LOCKED(vm);
    243  1.100.6.1    bouyer 
    244       1.88      para 	if (vm->vm_nfreetags > BT_MINRESERVE) {
    245       1.88      para 		return 0;
    246       1.77      para 	}
    247       1.66      para 
    248       1.66      para 	mutex_enter(&vmem_btag_lock);
    249       1.66      para 	while (!LIST_EMPTY(&vmem_btag_freelist) &&
    250       1.75      para 	    vm->vm_nfreetags <= BT_MINRESERVE) {
    251       1.66      para 		bt = LIST_FIRST(&vmem_btag_freelist);
    252       1.66      para 		LIST_REMOVE(bt, bt_freelist);
    253       1.66      para 		LIST_INSERT_HEAD(&vm->vm_freetags, bt, bt_freelist);
    254       1.66      para 		vm->vm_nfreetags++;
    255       1.66      para 		vmem_btag_freelist_count--;
    256       1.88      para 		VMEM_EVCNT_INCR(static_bt_inuse);
    257       1.66      para 	}
    258       1.66      para 	mutex_exit(&vmem_btag_lock);
    259       1.66      para 
    260       1.88      para 	while (vm->vm_nfreetags <= BT_MINRESERVE) {
    261       1.88      para 		VMEM_UNLOCK(vm);
    262       1.88      para 		mutex_enter(&vmem_btag_refill_lock);
    263       1.91      para 		bt = pool_get(&vmem_btag_pool, PR_NOWAIT);
    264       1.88      para 		mutex_exit(&vmem_btag_refill_lock);
    265       1.88      para 		VMEM_LOCK(vm);
    266       1.91      para 		if (bt == NULL)
    267       1.88      para 			break;
    268       1.88      para 		LIST_INSERT_HEAD(&vm->vm_freetags, bt, bt_freelist);
    269       1.88      para 		vm->vm_nfreetags++;
    270       1.88      para 	}
    271       1.88      para 
    272       1.92      para 	if (vm->vm_nfreetags <= BT_MINRESERVE) {
    273       1.66      para 		return ENOMEM;
    274       1.66      para 	}
    275       1.88      para 
    276       1.88      para 	if (kmem_meta_arena != NULL) {
    277  1.100.6.1    bouyer 		VMEM_UNLOCK(vm);
    278       1.94       chs 		(void)bt_refill(kmem_arena);
    279       1.94       chs 		(void)bt_refill(kmem_va_meta_arena);
    280       1.94       chs 		(void)bt_refill(kmem_meta_arena);
    281  1.100.6.1    bouyer 		VMEM_LOCK(vm);
    282       1.88      para 	}
    283       1.66      para 
    284       1.66      para 	return 0;
    285       1.66      para }
    286        1.1      yamt 
    287  1.100.6.1    bouyer static int
    288  1.100.6.1    bouyer bt_refill(vmem_t *vm)
    289  1.100.6.1    bouyer {
    290  1.100.6.1    bouyer 	int rv;
    291  1.100.6.1    bouyer 
    292  1.100.6.1    bouyer 	VMEM_LOCK(vm);
    293  1.100.6.1    bouyer 	rv = bt_refill_locked(vm);
    294  1.100.6.1    bouyer 	VMEM_UNLOCK(vm);
    295  1.100.6.1    bouyer 	return rv;
    296  1.100.6.1    bouyer }
    297  1.100.6.1    bouyer 
    298       1.88      para static bt_t *
    299       1.17      yamt bt_alloc(vmem_t *vm, vm_flag_t flags)
    300        1.1      yamt {
    301       1.66      para 	bt_t *bt;
    302  1.100.6.1    bouyer 
    303  1.100.6.1    bouyer 	VMEM_ASSERT_LOCKED(vm);
    304  1.100.6.1    bouyer 
    305       1.88      para 	while (vm->vm_nfreetags <= BT_MINRESERVE && (flags & VM_POPULATING) == 0) {
    306  1.100.6.1    bouyer 		if (bt_refill_locked(vm)) {
    307       1.94       chs 			if ((flags & VM_NOSLEEP) != 0) {
    308       1.94       chs 				return NULL;
    309       1.94       chs 			}
    310       1.94       chs 
    311       1.94       chs 			/*
    312       1.94       chs 			 * It would be nice to wait for something specific here
    313       1.94       chs 			 * but there are multiple ways that a retry could
    314       1.94       chs 			 * succeed and we can't wait for multiple things
    315       1.94       chs 			 * simultaneously.  So we'll just sleep for an arbitrary
    316       1.94       chs 			 * short period of time and retry regardless.
    317       1.94       chs 			 * This should be a very rare case.
    318       1.94       chs 			 */
    319       1.94       chs 
    320       1.94       chs 			vmem_kick_pdaemon();
    321  1.100.6.1    bouyer 			kpause("btalloc", false, 1, &vm->vm_lock);
    322       1.66      para 		}
    323       1.66      para 	}
    324       1.66      para 	bt = LIST_FIRST(&vm->vm_freetags);
    325       1.66      para 	LIST_REMOVE(bt, bt_freelist);
    326       1.66      para 	vm->vm_nfreetags--;
    327       1.66      para 
    328       1.66      para 	return bt;
    329        1.1      yamt }
    330        1.1      yamt 
    331       1.88      para static void
    332       1.17      yamt bt_free(vmem_t *vm, bt_t *bt)
    333        1.1      yamt {
    334       1.66      para 
    335  1.100.6.1    bouyer 	VMEM_ASSERT_LOCKED(vm);
    336  1.100.6.1    bouyer 
    337       1.66      para 	LIST_INSERT_HEAD(&vm->vm_freetags, bt, bt_freelist);
    338       1.66      para 	vm->vm_nfreetags++;
    339       1.88      para }
    340       1.88      para 
    341       1.88      para static void
    342       1.88      para bt_freetrim(vmem_t *vm, int freelimit)
    343       1.88      para {
    344       1.88      para 	bt_t *t;
    345       1.88      para 	LIST_HEAD(, vmem_btag) tofree;
    346       1.88      para 
    347  1.100.6.1    bouyer 	VMEM_ASSERT_LOCKED(vm);
    348  1.100.6.1    bouyer 
    349       1.88      para 	LIST_INIT(&tofree);
    350       1.88      para 
    351       1.88      para 	while (vm->vm_nfreetags > freelimit) {
    352       1.88      para 		bt_t *bt = LIST_FIRST(&vm->vm_freetags);
    353       1.66      para 		LIST_REMOVE(bt, bt_freelist);
    354       1.66      para 		vm->vm_nfreetags--;
    355       1.88      para 		if (bt >= static_bts
    356       1.90   mlelstv 		    && bt < &static_bts[STATIC_BT_COUNT]) {
    357       1.88      para 			mutex_enter(&vmem_btag_lock);
    358       1.88      para 			LIST_INSERT_HEAD(&vmem_btag_freelist, bt, bt_freelist);
    359       1.88      para 			vmem_btag_freelist_count++;
    360       1.88      para 			mutex_exit(&vmem_btag_lock);
    361       1.88      para 			VMEM_EVCNT_DECR(static_bt_inuse);
    362       1.88      para 		} else {
    363       1.88      para 			LIST_INSERT_HEAD(&tofree, bt, bt_freelist);
    364       1.88      para 		}
    365       1.66      para 	}
    366       1.88      para 
    367       1.66      para 	VMEM_UNLOCK(vm);
    368       1.88      para 	while (!LIST_EMPTY(&tofree)) {
    369       1.88      para 		t = LIST_FIRST(&tofree);
    370       1.88      para 		LIST_REMOVE(t, bt_freelist);
    371       1.88      para 		pool_put(&vmem_btag_pool, t);
    372       1.88      para 	}
    373        1.1      yamt }
    374       1.67     rmind #endif	/* defined(_KERNEL) */
    375       1.62     rmind 
    376        1.1      yamt /*
    377       1.67     rmind  * freelist[0] ... [1, 1]
    378        1.1      yamt  * freelist[1] ... [2, 3]
    379        1.1      yamt  * freelist[2] ... [4, 7]
    380        1.1      yamt  * freelist[3] ... [8, 15]
    381        1.1      yamt  *  :
    382        1.1      yamt  * freelist[n] ... [(1 << n), (1 << (n + 1)) - 1]
    383        1.1      yamt  *  :
    384        1.1      yamt  */
    385        1.1      yamt 
    386        1.1      yamt static struct vmem_freelist *
    387        1.1      yamt bt_freehead_tofree(vmem_t *vm, vmem_size_t size)
    388        1.1      yamt {
    389        1.1      yamt 	const vmem_size_t qsize = size >> vm->vm_quantum_shift;
    390       1.62     rmind 	const int idx = SIZE2ORDER(qsize);
    391        1.1      yamt 
    392       1.62     rmind 	KASSERT(size != 0 && qsize != 0);
    393        1.1      yamt 	KASSERT((size & vm->vm_quantum_mask) == 0);
    394        1.1      yamt 	KASSERT(idx >= 0);
    395        1.1      yamt 	KASSERT(idx < VMEM_MAXORDER);
    396        1.1      yamt 
    397        1.1      yamt 	return &vm->vm_freelist[idx];
    398        1.1      yamt }
    399        1.1      yamt 
    400       1.59      yamt /*
    401       1.59      yamt  * bt_freehead_toalloc: return the freelist for the given size and allocation
    402       1.59      yamt  * strategy.
    403       1.59      yamt  *
    404       1.59      yamt  * for VM_INSTANTFIT, return the list in which any blocks are large enough
    405       1.59      yamt  * for the requested size.  otherwise, return the list which can have blocks
    406       1.59      yamt  * large enough for the requested size.
    407       1.59      yamt  */
    408       1.59      yamt 
    409        1.1      yamt static struct vmem_freelist *
    410        1.1      yamt bt_freehead_toalloc(vmem_t *vm, vmem_size_t size, vm_flag_t strat)
    411        1.1      yamt {
    412        1.1      yamt 	const vmem_size_t qsize = size >> vm->vm_quantum_shift;
    413       1.62     rmind 	int idx = SIZE2ORDER(qsize);
    414        1.1      yamt 
    415       1.62     rmind 	KASSERT(size != 0 && qsize != 0);
    416        1.1      yamt 	KASSERT((size & vm->vm_quantum_mask) == 0);
    417        1.1      yamt 
    418        1.4      yamt 	if (strat == VM_INSTANTFIT && ORDER2SIZE(idx) != qsize) {
    419        1.1      yamt 		idx++;
    420        1.1      yamt 		/* check too large request? */
    421        1.1      yamt 	}
    422        1.1      yamt 	KASSERT(idx >= 0);
    423        1.1      yamt 	KASSERT(idx < VMEM_MAXORDER);
    424        1.1      yamt 
    425        1.1      yamt 	return &vm->vm_freelist[idx];
    426        1.1      yamt }
    427        1.1      yamt 
    428        1.1      yamt /* ---- boundary tag hash */
    429        1.1      yamt 
    430        1.1      yamt static struct vmem_hashlist *
    431        1.1      yamt bt_hashhead(vmem_t *vm, vmem_addr_t addr)
    432        1.1      yamt {
    433        1.1      yamt 	struct vmem_hashlist *list;
    434        1.1      yamt 	unsigned int hash;
    435        1.1      yamt 
    436        1.1      yamt 	hash = hash32_buf(&addr, sizeof(addr), HASH32_BUF_INIT);
    437  1.100.6.1    bouyer 	list = &vm->vm_hashlist[hash & vm->vm_hashmask];
    438        1.1      yamt 
    439        1.1      yamt 	return list;
    440        1.1      yamt }
    441        1.1      yamt 
    442        1.1      yamt static bt_t *
    443        1.1      yamt bt_lookupbusy(vmem_t *vm, vmem_addr_t addr)
    444        1.1      yamt {
    445        1.1      yamt 	struct vmem_hashlist *list;
    446        1.1      yamt 	bt_t *bt;
    447        1.1      yamt 
    448       1.95   msaitoh 	list = bt_hashhead(vm, addr);
    449        1.1      yamt 	LIST_FOREACH(bt, list, bt_hashlist) {
    450        1.1      yamt 		if (bt->bt_start == addr) {
    451        1.1      yamt 			break;
    452        1.1      yamt 		}
    453        1.1      yamt 	}
    454        1.1      yamt 
    455        1.1      yamt 	return bt;
    456        1.1      yamt }
    457        1.1      yamt 
    458        1.1      yamt static void
    459        1.1      yamt bt_rembusy(vmem_t *vm, bt_t *bt)
    460        1.1      yamt {
    461        1.1      yamt 
    462        1.1      yamt 	KASSERT(vm->vm_nbusytag > 0);
    463       1.73      para 	vm->vm_inuse -= bt->bt_size;
    464        1.1      yamt 	vm->vm_nbusytag--;
    465        1.1      yamt 	LIST_REMOVE(bt, bt_hashlist);
    466        1.1      yamt }
    467        1.1      yamt 
    468        1.1      yamt static void
    469        1.1      yamt bt_insbusy(vmem_t *vm, bt_t *bt)
    470        1.1      yamt {
    471        1.1      yamt 	struct vmem_hashlist *list;
    472        1.1      yamt 
    473        1.1      yamt 	KASSERT(bt->bt_type == BT_TYPE_BUSY);
    474        1.1      yamt 
    475        1.1      yamt 	list = bt_hashhead(vm, bt->bt_start);
    476        1.1      yamt 	LIST_INSERT_HEAD(list, bt, bt_hashlist);
    477  1.100.6.1    bouyer 	if (++vm->vm_nbusytag > vm->vm_maxbusytag) {
    478  1.100.6.1    bouyer 		vm->vm_maxbusytag = vm->vm_nbusytag;
    479  1.100.6.1    bouyer 	}
    480       1.73      para 	vm->vm_inuse += bt->bt_size;
    481        1.1      yamt }
    482        1.1      yamt 
    483        1.1      yamt /* ---- boundary tag list */
    484        1.1      yamt 
    485        1.1      yamt static void
    486        1.1      yamt bt_remseg(vmem_t *vm, bt_t *bt)
    487        1.1      yamt {
    488        1.1      yamt 
    489       1.87  christos 	TAILQ_REMOVE(&vm->vm_seglist, bt, bt_seglist);
    490        1.1      yamt }
    491        1.1      yamt 
    492        1.1      yamt static void
    493        1.1      yamt bt_insseg(vmem_t *vm, bt_t *bt, bt_t *prev)
    494        1.1      yamt {
    495        1.1      yamt 
    496       1.87  christos 	TAILQ_INSERT_AFTER(&vm->vm_seglist, prev, bt, bt_seglist);
    497        1.1      yamt }
    498        1.1      yamt 
    499        1.1      yamt static void
    500        1.1      yamt bt_insseg_tail(vmem_t *vm, bt_t *bt)
    501        1.1      yamt {
    502        1.1      yamt 
    503       1.87  christos 	TAILQ_INSERT_TAIL(&vm->vm_seglist, bt, bt_seglist);
    504        1.1      yamt }
    505        1.1      yamt 
    506        1.1      yamt static void
    507       1.17      yamt bt_remfree(vmem_t *vm, bt_t *bt)
    508        1.1      yamt {
    509        1.1      yamt 
    510        1.1      yamt 	KASSERT(bt->bt_type == BT_TYPE_FREE);
    511        1.1      yamt 
    512        1.1      yamt 	LIST_REMOVE(bt, bt_freelist);
    513        1.1      yamt }
    514        1.1      yamt 
    515        1.1      yamt static void
    516        1.1      yamt bt_insfree(vmem_t *vm, bt_t *bt)
    517        1.1      yamt {
    518        1.1      yamt 	struct vmem_freelist *list;
    519        1.1      yamt 
    520        1.1      yamt 	list = bt_freehead_tofree(vm, bt->bt_size);
    521        1.1      yamt 	LIST_INSERT_HEAD(list, bt, bt_freelist);
    522        1.1      yamt }
    523        1.1      yamt 
    524        1.1      yamt /* ---- vmem internal functions */
    525        1.1      yamt 
    526        1.5      yamt #if defined(QCACHE)
    527        1.5      yamt static inline vm_flag_t
    528        1.5      yamt prf_to_vmf(int prflags)
    529        1.5      yamt {
    530        1.5      yamt 	vm_flag_t vmflags;
    531        1.5      yamt 
    532        1.5      yamt 	KASSERT((prflags & ~(PR_LIMITFAIL | PR_WAITOK | PR_NOWAIT)) == 0);
    533        1.5      yamt 	if ((prflags & PR_WAITOK) != 0) {
    534        1.5      yamt 		vmflags = VM_SLEEP;
    535        1.5      yamt 	} else {
    536        1.5      yamt 		vmflags = VM_NOSLEEP;
    537        1.5      yamt 	}
    538        1.5      yamt 	return vmflags;
    539        1.5      yamt }
    540        1.5      yamt 
    541        1.5      yamt static inline int
    542        1.5      yamt vmf_to_prf(vm_flag_t vmflags)
    543        1.5      yamt {
    544        1.5      yamt 	int prflags;
    545        1.5      yamt 
    546        1.7      yamt 	if ((vmflags & VM_SLEEP) != 0) {
    547        1.5      yamt 		prflags = PR_WAITOK;
    548        1.7      yamt 	} else {
    549        1.5      yamt 		prflags = PR_NOWAIT;
    550        1.5      yamt 	}
    551        1.5      yamt 	return prflags;
    552        1.5      yamt }
    553        1.5      yamt 
    554        1.5      yamt static size_t
    555        1.5      yamt qc_poolpage_size(size_t qcache_max)
    556        1.5      yamt {
    557        1.5      yamt 	int i;
    558        1.5      yamt 
    559        1.5      yamt 	for (i = 0; ORDER2SIZE(i) <= qcache_max * 3; i++) {
    560        1.5      yamt 		/* nothing */
    561        1.5      yamt 	}
    562        1.5      yamt 	return ORDER2SIZE(i);
    563        1.5      yamt }
    564        1.5      yamt 
    565        1.5      yamt static void *
    566        1.5      yamt qc_poolpage_alloc(struct pool *pool, int prflags)
    567        1.5      yamt {
    568        1.5      yamt 	qcache_t *qc = QC_POOL_TO_QCACHE(pool);
    569        1.5      yamt 	vmem_t *vm = qc->qc_vmem;
    570       1.61    dyoung 	vmem_addr_t addr;
    571        1.5      yamt 
    572       1.61    dyoung 	if (vmem_alloc(vm, pool->pr_alloc->pa_pagesz,
    573       1.61    dyoung 	    prf_to_vmf(prflags) | VM_INSTANTFIT, &addr) != 0)
    574       1.61    dyoung 		return NULL;
    575       1.61    dyoung 	return (void *)addr;
    576        1.5      yamt }
    577        1.5      yamt 
    578        1.5      yamt static void
    579        1.5      yamt qc_poolpage_free(struct pool *pool, void *addr)
    580        1.5      yamt {
    581        1.5      yamt 	qcache_t *qc = QC_POOL_TO_QCACHE(pool);
    582        1.5      yamt 	vmem_t *vm = qc->qc_vmem;
    583        1.5      yamt 
    584        1.5      yamt 	vmem_free(vm, (vmem_addr_t)addr, pool->pr_alloc->pa_pagesz);
    585        1.5      yamt }
    586        1.5      yamt 
    587        1.5      yamt static void
    588       1.31        ad qc_init(vmem_t *vm, size_t qcache_max, int ipl)
    589        1.5      yamt {
    590       1.22      yamt 	qcache_t *prevqc;
    591        1.5      yamt 	struct pool_allocator *pa;
    592        1.5      yamt 	int qcache_idx_max;
    593        1.5      yamt 	int i;
    594        1.5      yamt 
    595        1.5      yamt 	KASSERT((qcache_max & vm->vm_quantum_mask) == 0);
    596        1.5      yamt 	if (qcache_max > (VMEM_QCACHE_IDX_MAX << vm->vm_quantum_shift)) {
    597        1.5      yamt 		qcache_max = VMEM_QCACHE_IDX_MAX << vm->vm_quantum_shift;
    598        1.5      yamt 	}
    599        1.5      yamt 	vm->vm_qcache_max = qcache_max;
    600        1.5      yamt 	pa = &vm->vm_qcache_allocator;
    601        1.5      yamt 	memset(pa, 0, sizeof(*pa));
    602        1.5      yamt 	pa->pa_alloc = qc_poolpage_alloc;
    603        1.5      yamt 	pa->pa_free = qc_poolpage_free;
    604        1.5      yamt 	pa->pa_pagesz = qc_poolpage_size(qcache_max);
    605        1.5      yamt 
    606        1.5      yamt 	qcache_idx_max = qcache_max >> vm->vm_quantum_shift;
    607       1.22      yamt 	prevqc = NULL;
    608       1.22      yamt 	for (i = qcache_idx_max; i > 0; i--) {
    609       1.22      yamt 		qcache_t *qc = &vm->vm_qcache_store[i - 1];
    610        1.5      yamt 		size_t size = i << vm->vm_quantum_shift;
    611       1.66      para 		pool_cache_t pc;
    612        1.5      yamt 
    613        1.5      yamt 		qc->qc_vmem = vm;
    614        1.8    martin 		snprintf(qc->qc_name, sizeof(qc->qc_name), "%s-%zu",
    615        1.5      yamt 		    vm->vm_name, size);
    616       1.66      para 
    617       1.80      para 		pc = pool_cache_init(size,
    618       1.80      para 		    ORDER2SIZE(vm->vm_quantum_shift), 0,
    619       1.80      para 		    PR_NOALIGN | PR_NOTOUCH | PR_RECURSIVE /* XXX */,
    620       1.80      para 		    qc->qc_name, pa, ipl, NULL, NULL, NULL);
    621       1.80      para 
    622       1.80      para 		KASSERT(pc);
    623       1.80      para 
    624       1.66      para 		qc->qc_cache = pc;
    625       1.35        ad 		KASSERT(qc->qc_cache != NULL);	/* XXX */
    626       1.22      yamt 		if (prevqc != NULL &&
    627       1.35        ad 		    qc->qc_cache->pc_pool.pr_itemsperpage ==
    628       1.35        ad 		    prevqc->qc_cache->pc_pool.pr_itemsperpage) {
    629       1.80      para 			pool_cache_destroy(qc->qc_cache);
    630       1.22      yamt 			vm->vm_qcache[i - 1] = prevqc;
    631       1.27        ad 			continue;
    632       1.22      yamt 		}
    633       1.35        ad 		qc->qc_cache->pc_pool.pr_qcache = qc;
    634       1.22      yamt 		vm->vm_qcache[i - 1] = qc;
    635       1.22      yamt 		prevqc = qc;
    636        1.5      yamt 	}
    637        1.5      yamt }
    638        1.6      yamt 
    639       1.23      yamt static void
    640       1.23      yamt qc_destroy(vmem_t *vm)
    641       1.23      yamt {
    642       1.23      yamt 	const qcache_t *prevqc;
    643       1.23      yamt 	int i;
    644       1.23      yamt 	int qcache_idx_max;
    645       1.23      yamt 
    646       1.23      yamt 	qcache_idx_max = vm->vm_qcache_max >> vm->vm_quantum_shift;
    647       1.23      yamt 	prevqc = NULL;
    648       1.24      yamt 	for (i = 0; i < qcache_idx_max; i++) {
    649       1.24      yamt 		qcache_t *qc = vm->vm_qcache[i];
    650       1.23      yamt 
    651       1.23      yamt 		if (prevqc == qc) {
    652       1.23      yamt 			continue;
    653       1.23      yamt 		}
    654       1.80      para 		pool_cache_destroy(qc->qc_cache);
    655       1.23      yamt 		prevqc = qc;
    656       1.23      yamt 	}
    657       1.23      yamt }
    658       1.66      para #endif
    659       1.23      yamt 
    660       1.66      para #if defined(_KERNEL)
    661       1.80      para static void
    662       1.66      para vmem_bootstrap(void)
    663        1.6      yamt {
    664        1.6      yamt 
    665       1.66      para 	mutex_init(&vmem_list_lock, MUTEX_DEFAULT, IPL_VM);
    666       1.66      para 	mutex_init(&vmem_btag_lock, MUTEX_DEFAULT, IPL_VM);
    667       1.88      para 	mutex_init(&vmem_btag_refill_lock, MUTEX_DEFAULT, IPL_VM);
    668        1.6      yamt 
    669       1.66      para 	while (static_bt_count-- > 0) {
    670       1.66      para 		bt_t *bt = &static_bts[static_bt_count];
    671       1.66      para 		LIST_INSERT_HEAD(&vmem_btag_freelist, bt, bt_freelist);
    672       1.88      para 		VMEM_EVCNT_INCR(static_bt_count);
    673       1.66      para 		vmem_btag_freelist_count++;
    674        1.6      yamt 	}
    675       1.80      para 	vmem_bootstrapped = TRUE;
    676        1.6      yamt }
    677        1.5      yamt 
    678       1.66      para void
    679       1.80      para vmem_subsystem_init(vmem_t *vm)
    680        1.1      yamt {
    681        1.1      yamt 
    682       1.80      para 	kmem_va_meta_arena = vmem_init(&kmem_va_meta_arena_store, "vmem-va",
    683       1.80      para 	    0, 0, PAGE_SIZE, vmem_alloc, vmem_free, vm,
    684       1.66      para 	    0, VM_NOSLEEP | VM_BOOTSTRAP | VM_LARGEIMPORT,
    685       1.66      para 	    IPL_VM);
    686       1.66      para 
    687       1.80      para 	kmem_meta_arena = vmem_init(&kmem_meta_arena_store, "vmem-meta",
    688       1.80      para 	    0, 0, PAGE_SIZE,
    689       1.66      para 	    uvm_km_kmem_alloc, uvm_km_kmem_free, kmem_va_meta_arena,
    690       1.66      para 	    0, VM_NOSLEEP | VM_BOOTSTRAP, IPL_VM);
    691       1.88      para 
    692  1.100.6.1    bouyer 	pool_init(&vmem_btag_pool, sizeof(bt_t), coherency_unit, 0,
    693  1.100.6.1    bouyer 	    PR_PHINPAGE, "vmembt", &pool_allocator_vmem_meta, IPL_VM);
    694        1.1      yamt }
    695        1.1      yamt #endif /* defined(_KERNEL) */
    696        1.1      yamt 
    697       1.61    dyoung static int
    698        1.1      yamt vmem_add1(vmem_t *vm, vmem_addr_t addr, vmem_size_t size, vm_flag_t flags,
    699        1.1      yamt     int spanbttype)
    700        1.1      yamt {
    701        1.1      yamt 	bt_t *btspan;
    702        1.1      yamt 	bt_t *btfree;
    703        1.1      yamt 
    704  1.100.6.1    bouyer 	VMEM_ASSERT_LOCKED(vm);
    705        1.1      yamt 	KASSERT((flags & (VM_SLEEP|VM_NOSLEEP)) != 0);
    706        1.1      yamt 	KASSERT((~flags & (VM_SLEEP|VM_NOSLEEP)) != 0);
    707       1.58      yamt 	KASSERT(spanbttype == BT_TYPE_SPAN ||
    708       1.58      yamt 	    spanbttype == BT_TYPE_SPAN_STATIC);
    709        1.1      yamt 
    710        1.1      yamt 	btspan = bt_alloc(vm, flags);
    711        1.1      yamt 	if (btspan == NULL) {
    712       1.61    dyoung 		return ENOMEM;
    713        1.1      yamt 	}
    714        1.1      yamt 	btfree = bt_alloc(vm, flags);
    715        1.1      yamt 	if (btfree == NULL) {
    716        1.1      yamt 		bt_free(vm, btspan);
    717       1.61    dyoung 		return ENOMEM;
    718        1.1      yamt 	}
    719        1.1      yamt 
    720        1.1      yamt 	btspan->bt_type = spanbttype;
    721        1.1      yamt 	btspan->bt_start = addr;
    722        1.1      yamt 	btspan->bt_size = size;
    723        1.1      yamt 
    724        1.1      yamt 	btfree->bt_type = BT_TYPE_FREE;
    725        1.1      yamt 	btfree->bt_start = addr;
    726        1.1      yamt 	btfree->bt_size = size;
    727        1.1      yamt 
    728        1.1      yamt 	bt_insseg_tail(vm, btspan);
    729        1.1      yamt 	bt_insseg(vm, btfree, btspan);
    730        1.1      yamt 	bt_insfree(vm, btfree);
    731       1.66      para 	vm->vm_size += size;
    732        1.1      yamt 
    733       1.61    dyoung 	return 0;
    734        1.1      yamt }
    735        1.1      yamt 
    736       1.30      yamt static void
    737       1.30      yamt vmem_destroy1(vmem_t *vm)
    738       1.30      yamt {
    739       1.30      yamt 
    740       1.30      yamt #if defined(QCACHE)
    741       1.30      yamt 	qc_destroy(vm);
    742       1.30      yamt #endif /* defined(QCACHE) */
    743  1.100.6.1    bouyer 	VMEM_LOCK(vm);
    744       1.30      yamt 
    745  1.100.6.1    bouyer 	for (int i = 0; i < vm->vm_hashsize; i++) {
    746  1.100.6.1    bouyer 		bt_t *bt;
    747       1.30      yamt 
    748  1.100.6.1    bouyer 		while ((bt = LIST_FIRST(&vm->vm_hashlist[i])) != NULL) {
    749  1.100.6.1    bouyer 			KASSERT(bt->bt_type == BT_TYPE_SPAN_STATIC);
    750  1.100.6.1    bouyer 			LIST_REMOVE(bt, bt_hashlist);
    751  1.100.6.1    bouyer 			bt_free(vm, bt);
    752       1.66      para 		}
    753       1.66      para 	}
    754       1.66      para 
    755  1.100.6.1    bouyer 	/* bt_freetrim() drops the lock. */
    756       1.88      para 	bt_freetrim(vm, 0);
    757  1.100.6.1    bouyer 	if (vm->vm_hashlist != &vm->vm_hash0) {
    758  1.100.6.1    bouyer 		xfree(vm->vm_hashlist,
    759  1.100.6.1    bouyer 		    sizeof(struct vmem_hashlist) * vm->vm_hashsize);
    760  1.100.6.1    bouyer 	}
    761       1.66      para 
    762       1.80      para 	VMEM_CONDVAR_DESTROY(vm);
    763       1.31        ad 	VMEM_LOCK_DESTROY(vm);
    764       1.66      para 	xfree(vm, sizeof(*vm));
    765       1.30      yamt }
    766       1.30      yamt 
    767        1.1      yamt static int
    768        1.1      yamt vmem_import(vmem_t *vm, vmem_size_t size, vm_flag_t flags)
    769        1.1      yamt {
    770        1.1      yamt 	vmem_addr_t addr;
    771       1.61    dyoung 	int rc;
    772        1.1      yamt 
    773  1.100.6.1    bouyer 	VMEM_ASSERT_LOCKED(vm);
    774  1.100.6.1    bouyer 
    775       1.61    dyoung 	if (vm->vm_importfn == NULL) {
    776        1.1      yamt 		return EINVAL;
    777        1.1      yamt 	}
    778        1.1      yamt 
    779       1.66      para 	if (vm->vm_flags & VM_LARGEIMPORT) {
    780       1.80      para 		size *= 16;
    781       1.66      para 	}
    782       1.66      para 
    783  1.100.6.1    bouyer 	VMEM_UNLOCK(vm);
    784       1.66      para 	if (vm->vm_flags & VM_XIMPORT) {
    785       1.99  christos 		rc = __FPTRCAST(vmem_ximport_t *, vm->vm_importfn)(vm->vm_arg,
    786       1.98  christos 		    size, &size, flags, &addr);
    787       1.66      para 	} else {
    788       1.66      para 		rc = (vm->vm_importfn)(vm->vm_arg, size, flags, &addr);
    789       1.69     rmind 	}
    790  1.100.6.1    bouyer 	VMEM_LOCK(vm);
    791  1.100.6.1    bouyer 
    792       1.69     rmind 	if (rc) {
    793       1.69     rmind 		return ENOMEM;
    794        1.1      yamt 	}
    795        1.1      yamt 
    796       1.61    dyoung 	if (vmem_add1(vm, addr, size, flags, BT_TYPE_SPAN) != 0) {
    797  1.100.6.1    bouyer 		VMEM_UNLOCK(vm);
    798       1.61    dyoung 		(*vm->vm_releasefn)(vm->vm_arg, addr, size);
    799  1.100.6.1    bouyer 		VMEM_LOCK(vm);
    800        1.1      yamt 		return ENOMEM;
    801        1.1      yamt 	}
    802        1.1      yamt 
    803        1.1      yamt 	return 0;
    804        1.1      yamt }
    805        1.1      yamt 
    806        1.1      yamt static int
    807        1.1      yamt vmem_rehash(vmem_t *vm, size_t newhashsize, vm_flag_t flags)
    808        1.1      yamt {
    809        1.1      yamt 	bt_t *bt;
    810        1.1      yamt 	int i;
    811        1.1      yamt 	struct vmem_hashlist *newhashlist;
    812        1.1      yamt 	struct vmem_hashlist *oldhashlist;
    813        1.1      yamt 	size_t oldhashsize;
    814        1.1      yamt 
    815        1.1      yamt 	KASSERT(newhashsize > 0);
    816        1.1      yamt 
    817  1.100.6.1    bouyer 	/* Round hash size up to a power of 2. */
    818  1.100.6.1    bouyer 	newhashsize = 1 << (ilog2(newhashsize) + 1);
    819  1.100.6.1    bouyer 
    820        1.1      yamt 	newhashlist =
    821  1.100.6.1    bouyer 	    xmalloc(sizeof(struct vmem_hashlist) * newhashsize, flags);
    822        1.1      yamt 	if (newhashlist == NULL) {
    823        1.1      yamt 		return ENOMEM;
    824        1.1      yamt 	}
    825        1.1      yamt 	for (i = 0; i < newhashsize; i++) {
    826        1.1      yamt 		LIST_INIT(&newhashlist[i]);
    827        1.1      yamt 	}
    828        1.1      yamt 
    829  1.100.6.1    bouyer 	VMEM_LOCK(vm);
    830  1.100.6.1    bouyer 	/* Decay back to a small hash slowly. */
    831  1.100.6.1    bouyer 	if (vm->vm_maxbusytag >= 2) {
    832  1.100.6.1    bouyer 		vm->vm_maxbusytag = vm->vm_maxbusytag / 2 - 1;
    833  1.100.6.1    bouyer 		if (vm->vm_nbusytag > vm->vm_maxbusytag) {
    834  1.100.6.1    bouyer 			vm->vm_maxbusytag = vm->vm_nbusytag;
    835  1.100.6.1    bouyer 		}
    836  1.100.6.1    bouyer 	} else {
    837  1.100.6.1    bouyer 		vm->vm_maxbusytag = vm->vm_nbusytag;
    838       1.30      yamt 	}
    839        1.1      yamt 	oldhashlist = vm->vm_hashlist;
    840        1.1      yamt 	oldhashsize = vm->vm_hashsize;
    841        1.1      yamt 	vm->vm_hashlist = newhashlist;
    842        1.1      yamt 	vm->vm_hashsize = newhashsize;
    843  1.100.6.1    bouyer 	vm->vm_hashmask = newhashsize - 1;
    844        1.1      yamt 	if (oldhashlist == NULL) {
    845        1.1      yamt 		VMEM_UNLOCK(vm);
    846        1.1      yamt 		return 0;
    847        1.1      yamt 	}
    848        1.1      yamt 	for (i = 0; i < oldhashsize; i++) {
    849        1.1      yamt 		while ((bt = LIST_FIRST(&oldhashlist[i])) != NULL) {
    850        1.1      yamt 			bt_rembusy(vm, bt); /* XXX */
    851        1.1      yamt 			bt_insbusy(vm, bt);
    852        1.1      yamt 		}
    853        1.1      yamt 	}
    854        1.1      yamt 	VMEM_UNLOCK(vm);
    855        1.1      yamt 
    856       1.66      para 	if (oldhashlist != &vm->vm_hash0) {
    857       1.66      para 		xfree(oldhashlist,
    858  1.100.6.1    bouyer 		    sizeof(struct vmem_hashlist) * oldhashsize);
    859       1.66      para 	}
    860        1.1      yamt 
    861        1.1      yamt 	return 0;
    862        1.1      yamt }
    863        1.1      yamt 
    864       1.10      yamt /*
    865       1.10      yamt  * vmem_fit: check if a bt can satisfy the given restrictions.
    866       1.59      yamt  *
    867       1.59      yamt  * it's a caller's responsibility to ensure the region is big enough
    868       1.59      yamt  * before calling us.
    869       1.10      yamt  */
    870       1.10      yamt 
    871       1.61    dyoung static int
    872       1.76     joerg vmem_fit(const bt_t *bt, vmem_size_t size, vmem_size_t align,
    873       1.60    dyoung     vmem_size_t phase, vmem_size_t nocross,
    874       1.61    dyoung     vmem_addr_t minaddr, vmem_addr_t maxaddr, vmem_addr_t *addrp)
    875       1.10      yamt {
    876       1.10      yamt 	vmem_addr_t start;
    877       1.10      yamt 	vmem_addr_t end;
    878       1.10      yamt 
    879       1.60    dyoung 	KASSERT(size > 0);
    880       1.59      yamt 	KASSERT(bt->bt_size >= size); /* caller's responsibility */
    881       1.10      yamt 
    882       1.10      yamt 	/*
    883       1.10      yamt 	 * XXX assumption: vmem_addr_t and vmem_size_t are
    884       1.10      yamt 	 * unsigned integer of the same size.
    885       1.10      yamt 	 */
    886       1.10      yamt 
    887       1.10      yamt 	start = bt->bt_start;
    888       1.10      yamt 	if (start < minaddr) {
    889       1.10      yamt 		start = minaddr;
    890       1.10      yamt 	}
    891       1.10      yamt 	end = BT_END(bt);
    892       1.60    dyoung 	if (end > maxaddr) {
    893       1.60    dyoung 		end = maxaddr;
    894       1.10      yamt 	}
    895       1.60    dyoung 	if (start > end) {
    896       1.61    dyoung 		return ENOMEM;
    897       1.10      yamt 	}
    898       1.19      yamt 
    899       1.19      yamt 	start = VMEM_ALIGNUP(start - phase, align) + phase;
    900       1.10      yamt 	if (start < bt->bt_start) {
    901       1.10      yamt 		start += align;
    902       1.10      yamt 	}
    903       1.19      yamt 	if (VMEM_CROSS_P(start, start + size - 1, nocross)) {
    904       1.10      yamt 		KASSERT(align < nocross);
    905       1.19      yamt 		start = VMEM_ALIGNUP(start - phase, nocross) + phase;
    906       1.10      yamt 	}
    907       1.60    dyoung 	if (start <= end && end - start >= size - 1) {
    908       1.10      yamt 		KASSERT((start & (align - 1)) == phase);
    909       1.19      yamt 		KASSERT(!VMEM_CROSS_P(start, start + size - 1, nocross));
    910       1.10      yamt 		KASSERT(minaddr <= start);
    911       1.60    dyoung 		KASSERT(maxaddr == 0 || start + size - 1 <= maxaddr);
    912       1.10      yamt 		KASSERT(bt->bt_start <= start);
    913       1.60    dyoung 		KASSERT(BT_END(bt) - start >= size - 1);
    914       1.61    dyoung 		*addrp = start;
    915       1.61    dyoung 		return 0;
    916       1.10      yamt 	}
    917       1.61    dyoung 	return ENOMEM;
    918       1.10      yamt }
    919       1.10      yamt 
    920       1.80      para /* ---- vmem API */
    921        1.1      yamt 
    922        1.1      yamt /*
    923  1.100.6.1    bouyer  * vmem_init: creates a vmem arena.
    924        1.1      yamt  */
    925        1.1      yamt 
    926       1.80      para vmem_t *
    927       1.80      para vmem_init(vmem_t *vm, const char *name,
    928       1.80      para     vmem_addr_t base, vmem_size_t size, vmem_size_t quantum,
    929       1.80      para     vmem_import_t *importfn, vmem_release_t *releasefn,
    930       1.80      para     vmem_t *arg, vmem_size_t qcache_max, vm_flag_t flags, int ipl)
    931        1.1      yamt {
    932        1.1      yamt 	int i;
    933        1.1      yamt 
    934        1.1      yamt 	KASSERT((flags & (VM_SLEEP|VM_NOSLEEP)) != 0);
    935        1.1      yamt 	KASSERT((~flags & (VM_SLEEP|VM_NOSLEEP)) != 0);
    936       1.62     rmind 	KASSERT(quantum > 0);
    937        1.1      yamt 
    938        1.1      yamt #if defined(_KERNEL)
    939       1.80      para 	/* XXX: SMP, we get called early... */
    940       1.80      para 	if (!vmem_bootstrapped) {
    941       1.80      para 		vmem_bootstrap();
    942       1.80      para 	}
    943       1.66      para #endif /* defined(_KERNEL) */
    944       1.80      para 
    945       1.80      para 	if (vm == NULL) {
    946       1.66      para 		vm = xmalloc(sizeof(*vm), flags);
    947        1.1      yamt 	}
    948        1.1      yamt 	if (vm == NULL) {
    949        1.1      yamt 		return NULL;
    950        1.1      yamt 	}
    951        1.1      yamt 
    952       1.66      para 	VMEM_CONDVAR_INIT(vm, "vmem");
    953       1.31        ad 	VMEM_LOCK_INIT(vm, ipl);
    954       1.66      para 	vm->vm_flags = flags;
    955       1.66      para 	vm->vm_nfreetags = 0;
    956       1.66      para 	LIST_INIT(&vm->vm_freetags);
    957       1.64      yamt 	strlcpy(vm->vm_name, name, sizeof(vm->vm_name));
    958        1.1      yamt 	vm->vm_quantum_mask = quantum - 1;
    959       1.62     rmind 	vm->vm_quantum_shift = SIZE2ORDER(quantum);
    960        1.4      yamt 	KASSERT(ORDER2SIZE(vm->vm_quantum_shift) == quantum);
    961       1.61    dyoung 	vm->vm_importfn = importfn;
    962       1.61    dyoung 	vm->vm_releasefn = releasefn;
    963       1.61    dyoung 	vm->vm_arg = arg;
    964        1.1      yamt 	vm->vm_nbusytag = 0;
    965  1.100.6.1    bouyer 	vm->vm_maxbusytag = 0;
    966       1.66      para 	vm->vm_size = 0;
    967       1.66      para 	vm->vm_inuse = 0;
    968        1.5      yamt #if defined(QCACHE)
    969       1.31        ad 	qc_init(vm, qcache_max, ipl);
    970        1.5      yamt #endif /* defined(QCACHE) */
    971        1.1      yamt 
    972       1.87  christos 	TAILQ_INIT(&vm->vm_seglist);
    973        1.1      yamt 	for (i = 0; i < VMEM_MAXORDER; i++) {
    974        1.1      yamt 		LIST_INIT(&vm->vm_freelist[i]);
    975        1.1      yamt 	}
    976  1.100.6.1    bouyer 	memset(&vm->vm_hash0, 0, sizeof(vm->vm_hash0));
    977       1.80      para 	vm->vm_hashsize = 1;
    978  1.100.6.1    bouyer 	vm->vm_hashmask = vm->vm_hashsize - 1;
    979       1.80      para 	vm->vm_hashlist = &vm->vm_hash0;
    980        1.1      yamt 
    981        1.1      yamt 	if (size != 0) {
    982       1.61    dyoung 		if (vmem_add(vm, base, size, flags) != 0) {
    983       1.30      yamt 			vmem_destroy1(vm);
    984        1.1      yamt 			return NULL;
    985        1.1      yamt 		}
    986        1.1      yamt 	}
    987        1.1      yamt 
    988       1.30      yamt #if defined(_KERNEL)
    989       1.66      para 	if (flags & VM_BOOTSTRAP) {
    990       1.94       chs 		bt_refill(vm);
    991       1.66      para 	}
    992       1.66      para 
    993       1.30      yamt 	mutex_enter(&vmem_list_lock);
    994       1.30      yamt 	LIST_INSERT_HEAD(&vmem_list, vm, vm_alllist);
    995       1.30      yamt 	mutex_exit(&vmem_list_lock);
    996       1.30      yamt #endif /* defined(_KERNEL) */
    997       1.30      yamt 
    998        1.1      yamt 	return vm;
    999        1.1      yamt }
   1000        1.1      yamt 
   1001       1.66      para 
   1002       1.66      para 
   1003       1.66      para /*
   1004       1.66      para  * vmem_create: create an arena.
   1005       1.66      para  *
   1006       1.66      para  * => must not be called from interrupt context.
   1007       1.66      para  */
   1008       1.66      para 
   1009       1.66      para vmem_t *
   1010       1.66      para vmem_create(const char *name, vmem_addr_t base, vmem_size_t size,
   1011       1.66      para     vmem_size_t quantum, vmem_import_t *importfn, vmem_release_t *releasefn,
   1012       1.67     rmind     vmem_t *source, vmem_size_t qcache_max, vm_flag_t flags, int ipl)
   1013       1.66      para {
   1014       1.66      para 
   1015       1.66      para 	KASSERT((flags & (VM_XIMPORT)) == 0);
   1016       1.66      para 
   1017       1.80      para 	return vmem_init(NULL, name, base, size, quantum,
   1018       1.66      para 	    importfn, releasefn, source, qcache_max, flags, ipl);
   1019       1.66      para }
   1020       1.66      para 
   1021       1.66      para /*
   1022       1.66      para  * vmem_xcreate: create an arena takes alternative import func.
   1023       1.66      para  *
   1024       1.66      para  * => must not be called from interrupt context.
   1025       1.66      para  */
   1026       1.66      para 
   1027       1.66      para vmem_t *
   1028       1.66      para vmem_xcreate(const char *name, vmem_addr_t base, vmem_size_t size,
   1029       1.66      para     vmem_size_t quantum, vmem_ximport_t *importfn, vmem_release_t *releasefn,
   1030       1.67     rmind     vmem_t *source, vmem_size_t qcache_max, vm_flag_t flags, int ipl)
   1031       1.66      para {
   1032       1.66      para 
   1033       1.66      para 	KASSERT((flags & (VM_XIMPORT)) == 0);
   1034       1.66      para 
   1035       1.80      para 	return vmem_init(NULL, name, base, size, quantum,
   1036       1.99  christos 	    __FPTRCAST(vmem_import_t *, importfn), releasefn, source,
   1037       1.66      para 	    qcache_max, flags | VM_XIMPORT, ipl);
   1038       1.66      para }
   1039       1.66      para 
   1040        1.1      yamt void
   1041        1.1      yamt vmem_destroy(vmem_t *vm)
   1042        1.1      yamt {
   1043        1.1      yamt 
   1044       1.30      yamt #if defined(_KERNEL)
   1045       1.30      yamt 	mutex_enter(&vmem_list_lock);
   1046       1.30      yamt 	LIST_REMOVE(vm, vm_alllist);
   1047       1.30      yamt 	mutex_exit(&vmem_list_lock);
   1048       1.30      yamt #endif /* defined(_KERNEL) */
   1049        1.1      yamt 
   1050       1.30      yamt 	vmem_destroy1(vm);
   1051        1.1      yamt }
   1052        1.1      yamt 
   1053        1.1      yamt vmem_size_t
   1054        1.1      yamt vmem_roundup_size(vmem_t *vm, vmem_size_t size)
   1055        1.1      yamt {
   1056        1.1      yamt 
   1057        1.1      yamt 	return (size + vm->vm_quantum_mask) & ~vm->vm_quantum_mask;
   1058        1.1      yamt }
   1059        1.1      yamt 
   1060        1.1      yamt /*
   1061       1.83      yamt  * vmem_alloc: allocate resource from the arena.
   1062        1.1      yamt  */
   1063        1.1      yamt 
   1064       1.61    dyoung int
   1065       1.61    dyoung vmem_alloc(vmem_t *vm, vmem_size_t size, vm_flag_t flags, vmem_addr_t *addrp)
   1066        1.1      yamt {
   1067       1.86    martin 	const vm_flag_t strat __diagused = flags & VM_FITMASK;
   1068       1.96       chs 	int error;
   1069        1.1      yamt 
   1070        1.1      yamt 	KASSERT((flags & (VM_SLEEP|VM_NOSLEEP)) != 0);
   1071        1.1      yamt 	KASSERT((~flags & (VM_SLEEP|VM_NOSLEEP)) != 0);
   1072        1.1      yamt 
   1073        1.1      yamt 	KASSERT(size > 0);
   1074        1.1      yamt 	KASSERT(strat == VM_BESTFIT || strat == VM_INSTANTFIT);
   1075        1.3      yamt 	if ((flags & VM_SLEEP) != 0) {
   1076       1.42      yamt 		ASSERT_SLEEPABLE();
   1077        1.3      yamt 	}
   1078        1.1      yamt 
   1079        1.5      yamt #if defined(QCACHE)
   1080        1.5      yamt 	if (size <= vm->vm_qcache_max) {
   1081       1.61    dyoung 		void *p;
   1082       1.38      yamt 		int qidx = (size + vm->vm_quantum_mask) >> vm->vm_quantum_shift;
   1083       1.22      yamt 		qcache_t *qc = vm->vm_qcache[qidx - 1];
   1084        1.5      yamt 
   1085       1.61    dyoung 		p = pool_cache_get(qc->qc_cache, vmf_to_prf(flags));
   1086       1.61    dyoung 		if (addrp != NULL)
   1087       1.61    dyoung 			*addrp = (vmem_addr_t)p;
   1088       1.96       chs 		error = (p == NULL) ? ENOMEM : 0;
   1089       1.96       chs 		goto out;
   1090        1.5      yamt 	}
   1091        1.5      yamt #endif /* defined(QCACHE) */
   1092        1.5      yamt 
   1093       1.96       chs 	error = vmem_xalloc(vm, size, 0, 0, 0, VMEM_ADDR_MIN, VMEM_ADDR_MAX,
   1094       1.61    dyoung 	    flags, addrp);
   1095       1.96       chs out:
   1096       1.96       chs 	KASSERT(error == 0 || (flags & VM_SLEEP) == 0);
   1097       1.96       chs 	return error;
   1098       1.10      yamt }
   1099       1.10      yamt 
   1100       1.61    dyoung int
   1101       1.60    dyoung vmem_xalloc(vmem_t *vm, const vmem_size_t size0, vmem_size_t align,
   1102       1.60    dyoung     const vmem_size_t phase, const vmem_size_t nocross,
   1103       1.61    dyoung     const vmem_addr_t minaddr, const vmem_addr_t maxaddr, const vm_flag_t flags,
   1104       1.61    dyoung     vmem_addr_t *addrp)
   1105       1.10      yamt {
   1106       1.10      yamt 	struct vmem_freelist *list;
   1107       1.10      yamt 	struct vmem_freelist *first;
   1108       1.10      yamt 	struct vmem_freelist *end;
   1109       1.10      yamt 	bt_t *bt;
   1110       1.10      yamt 	bt_t *btnew;
   1111       1.10      yamt 	bt_t *btnew2;
   1112       1.10      yamt 	const vmem_size_t size = vmem_roundup_size(vm, size0);
   1113       1.10      yamt 	vm_flag_t strat = flags & VM_FITMASK;
   1114       1.10      yamt 	vmem_addr_t start;
   1115       1.61    dyoung 	int rc;
   1116       1.10      yamt 
   1117       1.10      yamt 	KASSERT(size0 > 0);
   1118       1.10      yamt 	KASSERT(size > 0);
   1119       1.10      yamt 	KASSERT(strat == VM_BESTFIT || strat == VM_INSTANTFIT);
   1120       1.10      yamt 	if ((flags & VM_SLEEP) != 0) {
   1121       1.42      yamt 		ASSERT_SLEEPABLE();
   1122       1.10      yamt 	}
   1123       1.10      yamt 	KASSERT((align & vm->vm_quantum_mask) == 0);
   1124       1.10      yamt 	KASSERT((align & (align - 1)) == 0);
   1125       1.10      yamt 	KASSERT((phase & vm->vm_quantum_mask) == 0);
   1126       1.10      yamt 	KASSERT((nocross & vm->vm_quantum_mask) == 0);
   1127       1.10      yamt 	KASSERT((nocross & (nocross - 1)) == 0);
   1128       1.10      yamt 	KASSERT((align == 0 && phase == 0) || phase < align);
   1129       1.10      yamt 	KASSERT(nocross == 0 || nocross >= size);
   1130       1.60    dyoung 	KASSERT(minaddr <= maxaddr);
   1131       1.19      yamt 	KASSERT(!VMEM_CROSS_P(phase, phase + size - 1, nocross));
   1132       1.10      yamt 
   1133       1.10      yamt 	if (align == 0) {
   1134       1.10      yamt 		align = vm->vm_quantum_mask + 1;
   1135       1.10      yamt 	}
   1136       1.59      yamt 
   1137       1.59      yamt 	/*
   1138       1.59      yamt 	 * allocate boundary tags before acquiring the vmem lock.
   1139       1.59      yamt 	 */
   1140  1.100.6.1    bouyer 	VMEM_LOCK(vm);
   1141        1.1      yamt 	btnew = bt_alloc(vm, flags);
   1142        1.1      yamt 	if (btnew == NULL) {
   1143  1.100.6.1    bouyer 		VMEM_UNLOCK(vm);
   1144       1.61    dyoung 		return ENOMEM;
   1145        1.1      yamt 	}
   1146       1.10      yamt 	btnew2 = bt_alloc(vm, flags); /* XXX not necessary if no restrictions */
   1147       1.10      yamt 	if (btnew2 == NULL) {
   1148       1.10      yamt 		bt_free(vm, btnew);
   1149  1.100.6.1    bouyer 		VMEM_UNLOCK(vm);
   1150       1.61    dyoung 		return ENOMEM;
   1151       1.10      yamt 	}
   1152        1.1      yamt 
   1153       1.59      yamt 	/*
   1154       1.59      yamt 	 * choose a free block from which we allocate.
   1155       1.59      yamt 	 */
   1156        1.1      yamt retry_strat:
   1157        1.1      yamt 	first = bt_freehead_toalloc(vm, size, strat);
   1158        1.1      yamt 	end = &vm->vm_freelist[VMEM_MAXORDER];
   1159        1.1      yamt retry:
   1160        1.1      yamt 	bt = NULL;
   1161       1.55      yamt 	vmem_check(vm);
   1162        1.2      yamt 	if (strat == VM_INSTANTFIT) {
   1163       1.59      yamt 		/*
   1164       1.59      yamt 		 * just choose the first block which satisfies our restrictions.
   1165       1.59      yamt 		 *
   1166       1.59      yamt 		 * note that we don't need to check the size of the blocks
   1167       1.59      yamt 		 * because any blocks found on these list should be larger than
   1168       1.59      yamt 		 * the given size.
   1169       1.59      yamt 		 */
   1170        1.2      yamt 		for (list = first; list < end; list++) {
   1171        1.2      yamt 			bt = LIST_FIRST(list);
   1172        1.2      yamt 			if (bt != NULL) {
   1173       1.61    dyoung 				rc = vmem_fit(bt, size, align, phase,
   1174       1.61    dyoung 				    nocross, minaddr, maxaddr, &start);
   1175       1.61    dyoung 				if (rc == 0) {
   1176       1.10      yamt 					goto gotit;
   1177       1.10      yamt 				}
   1178       1.59      yamt 				/*
   1179       1.59      yamt 				 * don't bother to follow the bt_freelist link
   1180       1.59      yamt 				 * here.  the list can be very long and we are
   1181       1.59      yamt 				 * told to run fast.  blocks from the later free
   1182       1.59      yamt 				 * lists are larger and have better chances to
   1183       1.59      yamt 				 * satisfy our restrictions.
   1184       1.59      yamt 				 */
   1185        1.2      yamt 			}
   1186        1.2      yamt 		}
   1187        1.2      yamt 	} else { /* VM_BESTFIT */
   1188       1.59      yamt 		/*
   1189       1.59      yamt 		 * we assume that, for space efficiency, it's better to
   1190       1.59      yamt 		 * allocate from a smaller block.  thus we will start searching
   1191       1.59      yamt 		 * from the lower-order list than VM_INSTANTFIT.
   1192       1.59      yamt 		 * however, don't bother to find the smallest block in a free
   1193       1.59      yamt 		 * list because the list can be very long.  we can revisit it
   1194       1.59      yamt 		 * if/when it turns out to be a problem.
   1195       1.59      yamt 		 *
   1196       1.59      yamt 		 * note that the 'first' list can contain blocks smaller than
   1197       1.59      yamt 		 * the requested size.  thus we need to check bt_size.
   1198       1.59      yamt 		 */
   1199        1.2      yamt 		for (list = first; list < end; list++) {
   1200        1.2      yamt 			LIST_FOREACH(bt, list, bt_freelist) {
   1201        1.2      yamt 				if (bt->bt_size >= size) {
   1202       1.61    dyoung 					rc = vmem_fit(bt, size, align, phase,
   1203       1.61    dyoung 					    nocross, minaddr, maxaddr, &start);
   1204       1.61    dyoung 					if (rc == 0) {
   1205       1.10      yamt 						goto gotit;
   1206       1.10      yamt 					}
   1207        1.2      yamt 				}
   1208        1.1      yamt 			}
   1209        1.1      yamt 		}
   1210        1.1      yamt 	}
   1211        1.1      yamt #if 1
   1212        1.2      yamt 	if (strat == VM_INSTANTFIT) {
   1213        1.2      yamt 		strat = VM_BESTFIT;
   1214        1.2      yamt 		goto retry_strat;
   1215        1.2      yamt 	}
   1216        1.1      yamt #endif
   1217       1.69     rmind 	if (align != vm->vm_quantum_mask + 1 || phase != 0 || nocross != 0) {
   1218       1.10      yamt 
   1219       1.10      yamt 		/*
   1220       1.10      yamt 		 * XXX should try to import a region large enough to
   1221       1.10      yamt 		 * satisfy restrictions?
   1222       1.10      yamt 		 */
   1223       1.10      yamt 
   1224       1.20      yamt 		goto fail;
   1225       1.10      yamt 	}
   1226       1.60    dyoung 	/* XXX eeek, minaddr & maxaddr not respected */
   1227        1.2      yamt 	if (vmem_import(vm, size, flags) == 0) {
   1228        1.2      yamt 		goto retry;
   1229        1.1      yamt 	}
   1230        1.2      yamt 	/* XXX */
   1231       1.66      para 
   1232       1.68      para 	if ((flags & VM_SLEEP) != 0) {
   1233       1.94       chs 		vmem_kick_pdaemon();
   1234       1.68      para 		VMEM_CONDVAR_WAIT(vm);
   1235       1.68      para 		goto retry;
   1236       1.68      para 	}
   1237       1.20      yamt fail:
   1238       1.20      yamt 	bt_free(vm, btnew);
   1239       1.20      yamt 	bt_free(vm, btnew2);
   1240  1.100.6.1    bouyer 	VMEM_UNLOCK(vm);
   1241       1.61    dyoung 	return ENOMEM;
   1242        1.2      yamt 
   1243        1.2      yamt gotit:
   1244        1.1      yamt 	KASSERT(bt->bt_type == BT_TYPE_FREE);
   1245        1.1      yamt 	KASSERT(bt->bt_size >= size);
   1246        1.1      yamt 	bt_remfree(vm, bt);
   1247       1.55      yamt 	vmem_check(vm);
   1248       1.10      yamt 	if (bt->bt_start != start) {
   1249       1.10      yamt 		btnew2->bt_type = BT_TYPE_FREE;
   1250       1.10      yamt 		btnew2->bt_start = bt->bt_start;
   1251       1.10      yamt 		btnew2->bt_size = start - bt->bt_start;
   1252       1.10      yamt 		bt->bt_start = start;
   1253       1.10      yamt 		bt->bt_size -= btnew2->bt_size;
   1254       1.10      yamt 		bt_insfree(vm, btnew2);
   1255       1.87  christos 		bt_insseg(vm, btnew2, TAILQ_PREV(bt, vmem_seglist, bt_seglist));
   1256       1.10      yamt 		btnew2 = NULL;
   1257       1.55      yamt 		vmem_check(vm);
   1258       1.10      yamt 	}
   1259       1.10      yamt 	KASSERT(bt->bt_start == start);
   1260        1.1      yamt 	if (bt->bt_size != size && bt->bt_size - size > vm->vm_quantum_mask) {
   1261        1.1      yamt 		/* split */
   1262        1.1      yamt 		btnew->bt_type = BT_TYPE_BUSY;
   1263        1.1      yamt 		btnew->bt_start = bt->bt_start;
   1264        1.1      yamt 		btnew->bt_size = size;
   1265        1.1      yamt 		bt->bt_start = bt->bt_start + size;
   1266        1.1      yamt 		bt->bt_size -= size;
   1267        1.1      yamt 		bt_insfree(vm, bt);
   1268       1.87  christos 		bt_insseg(vm, btnew, TAILQ_PREV(bt, vmem_seglist, bt_seglist));
   1269        1.1      yamt 		bt_insbusy(vm, btnew);
   1270       1.55      yamt 		vmem_check(vm);
   1271        1.1      yamt 	} else {
   1272        1.1      yamt 		bt->bt_type = BT_TYPE_BUSY;
   1273        1.1      yamt 		bt_insbusy(vm, bt);
   1274       1.55      yamt 		vmem_check(vm);
   1275        1.1      yamt 		bt_free(vm, btnew);
   1276        1.1      yamt 		btnew = bt;
   1277        1.1      yamt 	}
   1278       1.10      yamt 	if (btnew2 != NULL) {
   1279       1.10      yamt 		bt_free(vm, btnew2);
   1280       1.10      yamt 	}
   1281        1.1      yamt 	KASSERT(btnew->bt_size >= size);
   1282        1.1      yamt 	btnew->bt_type = BT_TYPE_BUSY;
   1283       1.61    dyoung 	if (addrp != NULL)
   1284       1.61    dyoung 		*addrp = btnew->bt_start;
   1285  1.100.6.1    bouyer 	VMEM_UNLOCK(vm);
   1286       1.61    dyoung 	return 0;
   1287        1.1      yamt }
   1288        1.1      yamt 
   1289        1.1      yamt /*
   1290       1.83      yamt  * vmem_free: free the resource to the arena.
   1291        1.1      yamt  */
   1292        1.1      yamt 
   1293        1.1      yamt void
   1294        1.1      yamt vmem_free(vmem_t *vm, vmem_addr_t addr, vmem_size_t size)
   1295        1.1      yamt {
   1296        1.1      yamt 
   1297        1.1      yamt 	KASSERT(size > 0);
   1298        1.1      yamt 
   1299        1.5      yamt #if defined(QCACHE)
   1300        1.5      yamt 	if (size <= vm->vm_qcache_max) {
   1301        1.5      yamt 		int qidx = (size + vm->vm_quantum_mask) >> vm->vm_quantum_shift;
   1302       1.22      yamt 		qcache_t *qc = vm->vm_qcache[qidx - 1];
   1303        1.5      yamt 
   1304       1.63     rmind 		pool_cache_put(qc->qc_cache, (void *)addr);
   1305       1.63     rmind 		return;
   1306        1.5      yamt 	}
   1307        1.5      yamt #endif /* defined(QCACHE) */
   1308        1.5      yamt 
   1309       1.10      yamt 	vmem_xfree(vm, addr, size);
   1310       1.10      yamt }
   1311       1.10      yamt 
   1312       1.10      yamt void
   1313       1.17      yamt vmem_xfree(vmem_t *vm, vmem_addr_t addr, vmem_size_t size)
   1314       1.10      yamt {
   1315       1.10      yamt 	bt_t *bt;
   1316       1.10      yamt 	bt_t *t;
   1317       1.10      yamt 
   1318       1.10      yamt 	KASSERT(size > 0);
   1319       1.10      yamt 
   1320        1.1      yamt 	VMEM_LOCK(vm);
   1321        1.1      yamt 
   1322        1.1      yamt 	bt = bt_lookupbusy(vm, addr);
   1323        1.1      yamt 	KASSERT(bt != NULL);
   1324        1.1      yamt 	KASSERT(bt->bt_start == addr);
   1325        1.1      yamt 	KASSERT(bt->bt_size == vmem_roundup_size(vm, size) ||
   1326        1.1      yamt 	    bt->bt_size - vmem_roundup_size(vm, size) <= vm->vm_quantum_mask);
   1327        1.1      yamt 	KASSERT(bt->bt_type == BT_TYPE_BUSY);
   1328        1.1      yamt 	bt_rembusy(vm, bt);
   1329        1.1      yamt 	bt->bt_type = BT_TYPE_FREE;
   1330        1.1      yamt 
   1331        1.1      yamt 	/* coalesce */
   1332       1.87  christos 	t = TAILQ_NEXT(bt, bt_seglist);
   1333        1.1      yamt 	if (t != NULL && t->bt_type == BT_TYPE_FREE) {
   1334       1.60    dyoung 		KASSERT(BT_END(bt) < t->bt_start);	/* YYY */
   1335        1.1      yamt 		bt_remfree(vm, t);
   1336        1.1      yamt 		bt_remseg(vm, t);
   1337        1.1      yamt 		bt->bt_size += t->bt_size;
   1338  1.100.6.1    bouyer 		bt_free(vm, t);
   1339        1.1      yamt 	}
   1340       1.87  christos 	t = TAILQ_PREV(bt, vmem_seglist, bt_seglist);
   1341        1.1      yamt 	if (t != NULL && t->bt_type == BT_TYPE_FREE) {
   1342       1.60    dyoung 		KASSERT(BT_END(t) < bt->bt_start);	/* YYY */
   1343        1.1      yamt 		bt_remfree(vm, t);
   1344        1.1      yamt 		bt_remseg(vm, t);
   1345        1.1      yamt 		bt->bt_size += t->bt_size;
   1346        1.1      yamt 		bt->bt_start = t->bt_start;
   1347  1.100.6.1    bouyer 		bt_free(vm, t);
   1348        1.1      yamt 	}
   1349        1.1      yamt 
   1350       1.87  christos 	t = TAILQ_PREV(bt, vmem_seglist, bt_seglist);
   1351        1.1      yamt 	KASSERT(t != NULL);
   1352        1.1      yamt 	KASSERT(BT_ISSPAN_P(t) || t->bt_type == BT_TYPE_BUSY);
   1353       1.61    dyoung 	if (vm->vm_releasefn != NULL && t->bt_type == BT_TYPE_SPAN &&
   1354        1.1      yamt 	    t->bt_size == bt->bt_size) {
   1355        1.1      yamt 		vmem_addr_t spanaddr;
   1356        1.1      yamt 		vmem_size_t spansize;
   1357        1.1      yamt 
   1358        1.1      yamt 		KASSERT(t->bt_start == bt->bt_start);
   1359        1.1      yamt 		spanaddr = bt->bt_start;
   1360        1.1      yamt 		spansize = bt->bt_size;
   1361        1.1      yamt 		bt_remseg(vm, bt);
   1362  1.100.6.1    bouyer 		bt_free(vm, bt);
   1363        1.1      yamt 		bt_remseg(vm, t);
   1364  1.100.6.1    bouyer 		bt_free(vm, t);
   1365       1.66      para 		vm->vm_size -= spansize;
   1366       1.68      para 		VMEM_CONDVAR_BROADCAST(vm);
   1367  1.100.6.1    bouyer 		/* bt_freetrim() drops the lock. */
   1368  1.100.6.1    bouyer 		bt_freetrim(vm, BT_MAXFREE);
   1369       1.61    dyoung 		(*vm->vm_releasefn)(vm->vm_arg, spanaddr, spansize);
   1370        1.1      yamt 	} else {
   1371        1.1      yamt 		bt_insfree(vm, bt);
   1372       1.68      para 		VMEM_CONDVAR_BROADCAST(vm);
   1373  1.100.6.1    bouyer 		/* bt_freetrim() drops the lock. */
   1374  1.100.6.1    bouyer 		bt_freetrim(vm, BT_MAXFREE);
   1375        1.1      yamt 	}
   1376        1.1      yamt }
   1377        1.1      yamt 
   1378        1.1      yamt /*
   1379        1.1      yamt  * vmem_add:
   1380        1.1      yamt  *
   1381        1.1      yamt  * => caller must ensure appropriate spl,
   1382        1.1      yamt  *    if the arena can be accessed from interrupt context.
   1383        1.1      yamt  */
   1384        1.1      yamt 
   1385       1.61    dyoung int
   1386        1.1      yamt vmem_add(vmem_t *vm, vmem_addr_t addr, vmem_size_t size, vm_flag_t flags)
   1387        1.1      yamt {
   1388  1.100.6.1    bouyer 	int rv;
   1389        1.1      yamt 
   1390  1.100.6.1    bouyer 	VMEM_LOCK(vm);
   1391  1.100.6.1    bouyer 	rv = vmem_add1(vm, addr, size, flags, BT_TYPE_SPAN_STATIC);
   1392  1.100.6.1    bouyer 	VMEM_UNLOCK(vm);
   1393  1.100.6.1    bouyer 
   1394  1.100.6.1    bouyer 	return rv;
   1395        1.1      yamt }
   1396        1.1      yamt 
   1397        1.6      yamt /*
   1398       1.66      para  * vmem_size: information about arenas size
   1399        1.6      yamt  *
   1400       1.66      para  * => return free/allocated size in arena
   1401        1.6      yamt  */
   1402       1.66      para vmem_size_t
   1403       1.66      para vmem_size(vmem_t *vm, int typemask)
   1404        1.6      yamt {
   1405        1.6      yamt 
   1406       1.66      para 	switch (typemask) {
   1407       1.66      para 	case VMEM_ALLOC:
   1408       1.66      para 		return vm->vm_inuse;
   1409       1.66      para 	case VMEM_FREE:
   1410       1.66      para 		return vm->vm_size - vm->vm_inuse;
   1411       1.66      para 	case VMEM_FREE|VMEM_ALLOC:
   1412       1.66      para 		return vm->vm_size;
   1413       1.66      para 	default:
   1414       1.66      para 		panic("vmem_size");
   1415       1.66      para 	}
   1416        1.6      yamt }
   1417        1.6      yamt 
   1418       1.30      yamt /* ---- rehash */
   1419       1.30      yamt 
   1420       1.30      yamt #if defined(_KERNEL)
   1421       1.30      yamt static struct callout vmem_rehash_ch;
   1422       1.30      yamt static int vmem_rehash_interval;
   1423       1.30      yamt static struct workqueue *vmem_rehash_wq;
   1424       1.30      yamt static struct work vmem_rehash_wk;
   1425       1.30      yamt 
   1426       1.30      yamt static void
   1427       1.30      yamt vmem_rehash_all(struct work *wk, void *dummy)
   1428       1.30      yamt {
   1429       1.30      yamt 	vmem_t *vm;
   1430       1.30      yamt 
   1431       1.30      yamt 	KASSERT(wk == &vmem_rehash_wk);
   1432       1.30      yamt 	mutex_enter(&vmem_list_lock);
   1433       1.30      yamt 	LIST_FOREACH(vm, &vmem_list, vm_alllist) {
   1434       1.30      yamt 		size_t desired;
   1435       1.30      yamt 		size_t current;
   1436       1.30      yamt 
   1437  1.100.6.1    bouyer 		desired = atomic_load_relaxed(&vm->vm_maxbusytag);
   1438  1.100.6.1    bouyer 		current = atomic_load_relaxed(&vm->vm_hashsize);
   1439       1.30      yamt 
   1440       1.30      yamt 		if (desired > VMEM_HASHSIZE_MAX) {
   1441       1.30      yamt 			desired = VMEM_HASHSIZE_MAX;
   1442       1.30      yamt 		} else if (desired < VMEM_HASHSIZE_MIN) {
   1443       1.30      yamt 			desired = VMEM_HASHSIZE_MIN;
   1444       1.30      yamt 		}
   1445       1.30      yamt 		if (desired > current * 2 || desired * 2 < current) {
   1446       1.30      yamt 			vmem_rehash(vm, desired, VM_NOSLEEP);
   1447       1.30      yamt 		}
   1448       1.30      yamt 	}
   1449       1.30      yamt 	mutex_exit(&vmem_list_lock);
   1450       1.30      yamt 
   1451       1.30      yamt 	callout_schedule(&vmem_rehash_ch, vmem_rehash_interval);
   1452       1.30      yamt }
   1453       1.30      yamt 
   1454       1.30      yamt static void
   1455       1.30      yamt vmem_rehash_all_kick(void *dummy)
   1456       1.30      yamt {
   1457       1.30      yamt 
   1458       1.32     rmind 	workqueue_enqueue(vmem_rehash_wq, &vmem_rehash_wk, NULL);
   1459       1.30      yamt }
   1460       1.30      yamt 
   1461       1.30      yamt void
   1462       1.30      yamt vmem_rehash_start(void)
   1463       1.30      yamt {
   1464       1.30      yamt 	int error;
   1465       1.30      yamt 
   1466       1.30      yamt 	error = workqueue_create(&vmem_rehash_wq, "vmem_rehash",
   1467       1.41        ad 	    vmem_rehash_all, NULL, PRI_VM, IPL_SOFTCLOCK, WQ_MPSAFE);
   1468       1.30      yamt 	if (error) {
   1469       1.30      yamt 		panic("%s: workqueue_create %d\n", __func__, error);
   1470       1.30      yamt 	}
   1471       1.41        ad 	callout_init(&vmem_rehash_ch, CALLOUT_MPSAFE);
   1472       1.30      yamt 	callout_setfunc(&vmem_rehash_ch, vmem_rehash_all_kick, NULL);
   1473       1.30      yamt 
   1474       1.30      yamt 	vmem_rehash_interval = hz * 10;
   1475       1.30      yamt 	callout_schedule(&vmem_rehash_ch, vmem_rehash_interval);
   1476       1.30      yamt }
   1477       1.30      yamt #endif /* defined(_KERNEL) */
   1478       1.30      yamt 
   1479        1.1      yamt /* ---- debug */
   1480        1.1      yamt 
   1481       1.55      yamt #if defined(DDB) || defined(UNITTEST) || defined(VMEM_SANITY)
   1482       1.55      yamt 
   1483       1.82  christos static void bt_dump(const bt_t *, void (*)(const char *, ...)
   1484       1.82  christos     __printflike(1, 2));
   1485       1.55      yamt 
   1486       1.55      yamt static const char *
   1487       1.55      yamt bt_type_string(int type)
   1488       1.55      yamt {
   1489       1.55      yamt 	static const char * const table[] = {
   1490       1.55      yamt 		[BT_TYPE_BUSY] = "busy",
   1491       1.55      yamt 		[BT_TYPE_FREE] = "free",
   1492       1.55      yamt 		[BT_TYPE_SPAN] = "span",
   1493       1.55      yamt 		[BT_TYPE_SPAN_STATIC] = "static span",
   1494       1.55      yamt 	};
   1495       1.55      yamt 
   1496       1.55      yamt 	if (type >= __arraycount(table)) {
   1497       1.55      yamt 		return "BOGUS";
   1498       1.55      yamt 	}
   1499       1.55      yamt 	return table[type];
   1500       1.55      yamt }
   1501       1.55      yamt 
   1502       1.55      yamt static void
   1503       1.55      yamt bt_dump(const bt_t *bt, void (*pr)(const char *, ...))
   1504       1.55      yamt {
   1505       1.55      yamt 
   1506       1.55      yamt 	(*pr)("\t%p: %" PRIu64 ", %" PRIu64 ", %d(%s)\n",
   1507       1.55      yamt 	    bt, (uint64_t)bt->bt_start, (uint64_t)bt->bt_size,
   1508       1.55      yamt 	    bt->bt_type, bt_type_string(bt->bt_type));
   1509       1.55      yamt }
   1510       1.55      yamt 
   1511       1.55      yamt static void
   1512       1.82  christos vmem_dump(const vmem_t *vm , void (*pr)(const char *, ...) __printflike(1, 2))
   1513       1.55      yamt {
   1514       1.55      yamt 	const bt_t *bt;
   1515       1.55      yamt 	int i;
   1516       1.55      yamt 
   1517       1.55      yamt 	(*pr)("vmem %p '%s'\n", vm, vm->vm_name);
   1518       1.87  christos 	TAILQ_FOREACH(bt, &vm->vm_seglist, bt_seglist) {
   1519       1.55      yamt 		bt_dump(bt, pr);
   1520       1.55      yamt 	}
   1521       1.55      yamt 
   1522       1.55      yamt 	for (i = 0; i < VMEM_MAXORDER; i++) {
   1523       1.55      yamt 		const struct vmem_freelist *fl = &vm->vm_freelist[i];
   1524       1.55      yamt 
   1525       1.55      yamt 		if (LIST_EMPTY(fl)) {
   1526       1.55      yamt 			continue;
   1527       1.55      yamt 		}
   1528       1.55      yamt 
   1529       1.55      yamt 		(*pr)("freelist[%d]\n", i);
   1530       1.55      yamt 		LIST_FOREACH(bt, fl, bt_freelist) {
   1531       1.55      yamt 			bt_dump(bt, pr);
   1532       1.55      yamt 		}
   1533       1.55      yamt 	}
   1534       1.55      yamt }
   1535       1.55      yamt 
   1536       1.55      yamt #endif /* defined(DDB) || defined(UNITTEST) || defined(VMEM_SANITY) */
   1537       1.55      yamt 
   1538       1.37      yamt #if defined(DDB)
   1539       1.37      yamt static bt_t *
   1540       1.37      yamt vmem_whatis_lookup(vmem_t *vm, uintptr_t addr)
   1541       1.37      yamt {
   1542       1.39      yamt 	bt_t *bt;
   1543       1.37      yamt 
   1544       1.87  christos 	TAILQ_FOREACH(bt, &vm->vm_seglist, bt_seglist) {
   1545       1.39      yamt 		if (BT_ISSPAN_P(bt)) {
   1546       1.39      yamt 			continue;
   1547       1.39      yamt 		}
   1548       1.60    dyoung 		if (bt->bt_start <= addr && addr <= BT_END(bt)) {
   1549       1.39      yamt 			return bt;
   1550       1.37      yamt 		}
   1551       1.37      yamt 	}
   1552       1.37      yamt 
   1553       1.37      yamt 	return NULL;
   1554       1.37      yamt }
   1555       1.37      yamt 
   1556       1.37      yamt void
   1557       1.37      yamt vmem_whatis(uintptr_t addr, void (*pr)(const char *, ...))
   1558       1.37      yamt {
   1559       1.37      yamt 	vmem_t *vm;
   1560       1.37      yamt 
   1561       1.37      yamt 	LIST_FOREACH(vm, &vmem_list, vm_alllist) {
   1562       1.37      yamt 		bt_t *bt;
   1563       1.37      yamt 
   1564       1.37      yamt 		bt = vmem_whatis_lookup(vm, addr);
   1565       1.37      yamt 		if (bt == NULL) {
   1566       1.37      yamt 			continue;
   1567       1.37      yamt 		}
   1568       1.39      yamt 		(*pr)("%p is %p+%zu in VMEM '%s' (%s)\n",
   1569       1.37      yamt 		    (void *)addr, (void *)bt->bt_start,
   1570       1.39      yamt 		    (size_t)(addr - bt->bt_start), vm->vm_name,
   1571       1.39      yamt 		    (bt->bt_type == BT_TYPE_BUSY) ? "allocated" : "free");
   1572       1.37      yamt 	}
   1573       1.37      yamt }
   1574       1.43    cegger 
   1575       1.55      yamt void
   1576       1.55      yamt vmem_printall(const char *modif, void (*pr)(const char *, ...))
   1577       1.43    cegger {
   1578       1.55      yamt 	const vmem_t *vm;
   1579       1.43    cegger 
   1580       1.47    cegger 	LIST_FOREACH(vm, &vmem_list, vm_alllist) {
   1581       1.55      yamt 		vmem_dump(vm, pr);
   1582       1.43    cegger 	}
   1583       1.43    cegger }
   1584       1.43    cegger 
   1585       1.43    cegger void
   1586       1.43    cegger vmem_print(uintptr_t addr, const char *modif, void (*pr)(const char *, ...))
   1587       1.43    cegger {
   1588       1.55      yamt 	const vmem_t *vm = (const void *)addr;
   1589       1.43    cegger 
   1590       1.55      yamt 	vmem_dump(vm, pr);
   1591       1.43    cegger }
   1592       1.37      yamt #endif /* defined(DDB) */
   1593       1.37      yamt 
   1594       1.60    dyoung #if defined(_KERNEL)
   1595       1.60    dyoung #define vmem_printf printf
   1596       1.60    dyoung #else
   1597        1.1      yamt #include <stdio.h>
   1598       1.60    dyoung #include <stdarg.h>
   1599       1.60    dyoung 
   1600       1.60    dyoung static void
   1601       1.60    dyoung vmem_printf(const char *fmt, ...)
   1602       1.60    dyoung {
   1603       1.60    dyoung 	va_list ap;
   1604       1.60    dyoung 	va_start(ap, fmt);
   1605       1.60    dyoung 	vprintf(fmt, ap);
   1606       1.60    dyoung 	va_end(ap);
   1607       1.60    dyoung }
   1608       1.60    dyoung #endif
   1609        1.1      yamt 
   1610       1.55      yamt #if defined(VMEM_SANITY)
   1611        1.1      yamt 
   1612       1.55      yamt static bool
   1613       1.55      yamt vmem_check_sanity(vmem_t *vm)
   1614        1.1      yamt {
   1615       1.55      yamt 	const bt_t *bt, *bt2;
   1616        1.1      yamt 
   1617       1.55      yamt 	KASSERT(vm != NULL);
   1618        1.1      yamt 
   1619       1.87  christos 	TAILQ_FOREACH(bt, &vm->vm_seglist, bt_seglist) {
   1620       1.60    dyoung 		if (bt->bt_start > BT_END(bt)) {
   1621       1.55      yamt 			printf("corrupted tag\n");
   1622       1.60    dyoung 			bt_dump(bt, vmem_printf);
   1623       1.55      yamt 			return false;
   1624       1.55      yamt 		}
   1625       1.55      yamt 	}
   1626       1.87  christos 	TAILQ_FOREACH(bt, &vm->vm_seglist, bt_seglist) {
   1627       1.87  christos 		TAILQ_FOREACH(bt2, &vm->vm_seglist, bt_seglist) {
   1628       1.55      yamt 			if (bt == bt2) {
   1629       1.55      yamt 				continue;
   1630       1.55      yamt 			}
   1631       1.55      yamt 			if (BT_ISSPAN_P(bt) != BT_ISSPAN_P(bt2)) {
   1632       1.55      yamt 				continue;
   1633       1.55      yamt 			}
   1634       1.60    dyoung 			if (bt->bt_start <= BT_END(bt2) &&
   1635       1.60    dyoung 			    bt2->bt_start <= BT_END(bt)) {
   1636       1.55      yamt 				printf("overwrapped tags\n");
   1637       1.60    dyoung 				bt_dump(bt, vmem_printf);
   1638       1.60    dyoung 				bt_dump(bt2, vmem_printf);
   1639       1.55      yamt 				return false;
   1640       1.55      yamt 			}
   1641       1.55      yamt 		}
   1642        1.1      yamt 	}
   1643        1.1      yamt 
   1644       1.55      yamt 	return true;
   1645       1.55      yamt }
   1646        1.1      yamt 
   1647       1.55      yamt static void
   1648       1.55      yamt vmem_check(vmem_t *vm)
   1649       1.55      yamt {
   1650        1.1      yamt 
   1651       1.55      yamt 	if (!vmem_check_sanity(vm)) {
   1652       1.55      yamt 		panic("insanity vmem %p", vm);
   1653        1.1      yamt 	}
   1654        1.1      yamt }
   1655        1.1      yamt 
   1656       1.55      yamt #endif /* defined(VMEM_SANITY) */
   1657        1.1      yamt 
   1658       1.55      yamt #if defined(UNITTEST)
   1659        1.1      yamt int
   1660       1.57    cegger main(void)
   1661        1.1      yamt {
   1662       1.61    dyoung 	int rc;
   1663        1.1      yamt 	vmem_t *vm;
   1664        1.1      yamt 	vmem_addr_t p;
   1665        1.1      yamt 	struct reg {
   1666        1.1      yamt 		vmem_addr_t p;
   1667        1.1      yamt 		vmem_size_t sz;
   1668       1.25   thorpej 		bool x;
   1669        1.1      yamt 	} *reg = NULL;
   1670        1.1      yamt 	int nreg = 0;
   1671        1.1      yamt 	int nalloc = 0;
   1672        1.1      yamt 	int nfree = 0;
   1673        1.1      yamt 	vmem_size_t total = 0;
   1674        1.1      yamt #if 1
   1675        1.1      yamt 	vm_flag_t strat = VM_INSTANTFIT;
   1676        1.1      yamt #else
   1677        1.1      yamt 	vm_flag_t strat = VM_BESTFIT;
   1678        1.1      yamt #endif
   1679        1.1      yamt 
   1680       1.61    dyoung 	vm = vmem_create("test", 0, 0, 1, NULL, NULL, NULL, 0, VM_SLEEP,
   1681       1.61    dyoung #ifdef _KERNEL
   1682       1.61    dyoung 	    IPL_NONE
   1683       1.61    dyoung #else
   1684       1.61    dyoung 	    0
   1685       1.61    dyoung #endif
   1686       1.61    dyoung 	    );
   1687        1.1      yamt 	if (vm == NULL) {
   1688        1.1      yamt 		printf("vmem_create\n");
   1689        1.1      yamt 		exit(EXIT_FAILURE);
   1690        1.1      yamt 	}
   1691       1.60    dyoung 	vmem_dump(vm, vmem_printf);
   1692        1.1      yamt 
   1693       1.61    dyoung 	rc = vmem_add(vm, 0, 50, VM_SLEEP);
   1694       1.61    dyoung 	assert(rc == 0);
   1695       1.61    dyoung 	rc = vmem_add(vm, 100, 200, VM_SLEEP);
   1696       1.61    dyoung 	assert(rc == 0);
   1697       1.61    dyoung 	rc = vmem_add(vm, 2000, 1, VM_SLEEP);
   1698       1.61    dyoung 	assert(rc == 0);
   1699       1.61    dyoung 	rc = vmem_add(vm, 40000, 65536, VM_SLEEP);
   1700       1.61    dyoung 	assert(rc == 0);
   1701       1.61    dyoung 	rc = vmem_add(vm, 10000, 10000, VM_SLEEP);
   1702       1.61    dyoung 	assert(rc == 0);
   1703       1.61    dyoung 	rc = vmem_add(vm, 500, 1000, VM_SLEEP);
   1704       1.61    dyoung 	assert(rc == 0);
   1705       1.61    dyoung 	rc = vmem_add(vm, 0xffffff00, 0x100, VM_SLEEP);
   1706       1.61    dyoung 	assert(rc == 0);
   1707       1.61    dyoung 	rc = vmem_xalloc(vm, 0x101, 0, 0, 0,
   1708       1.61    dyoung 	    0xffffff00, 0xffffffff, strat|VM_SLEEP, &p);
   1709       1.61    dyoung 	assert(rc != 0);
   1710       1.61    dyoung 	rc = vmem_xalloc(vm, 50, 0, 0, 0, 0, 49, strat|VM_SLEEP, &p);
   1711       1.61    dyoung 	assert(rc == 0 && p == 0);
   1712       1.61    dyoung 	vmem_xfree(vm, p, 50);
   1713       1.61    dyoung 	rc = vmem_xalloc(vm, 25, 0, 0, 0, 0, 24, strat|VM_SLEEP, &p);
   1714       1.61    dyoung 	assert(rc == 0 && p == 0);
   1715       1.61    dyoung 	rc = vmem_xalloc(vm, 0x100, 0, 0, 0,
   1716       1.61    dyoung 	    0xffffff01, 0xffffffff, strat|VM_SLEEP, &p);
   1717       1.61    dyoung 	assert(rc != 0);
   1718       1.61    dyoung 	rc = vmem_xalloc(vm, 0x100, 0, 0, 0,
   1719       1.61    dyoung 	    0xffffff00, 0xfffffffe, strat|VM_SLEEP, &p);
   1720       1.61    dyoung 	assert(rc != 0);
   1721       1.61    dyoung 	rc = vmem_xalloc(vm, 0x100, 0, 0, 0,
   1722       1.61    dyoung 	    0xffffff00, 0xffffffff, strat|VM_SLEEP, &p);
   1723       1.61    dyoung 	assert(rc == 0);
   1724       1.60    dyoung 	vmem_dump(vm, vmem_printf);
   1725        1.1      yamt 	for (;;) {
   1726        1.1      yamt 		struct reg *r;
   1727       1.10      yamt 		int t = rand() % 100;
   1728        1.1      yamt 
   1729       1.10      yamt 		if (t > 45) {
   1730       1.10      yamt 			/* alloc */
   1731        1.1      yamt 			vmem_size_t sz = rand() % 500 + 1;
   1732       1.25   thorpej 			bool x;
   1733       1.10      yamt 			vmem_size_t align, phase, nocross;
   1734       1.10      yamt 			vmem_addr_t minaddr, maxaddr;
   1735       1.10      yamt 
   1736       1.10      yamt 			if (t > 70) {
   1737       1.26   thorpej 				x = true;
   1738       1.10      yamt 				/* XXX */
   1739       1.10      yamt 				align = 1 << (rand() % 15);
   1740       1.10      yamt 				phase = rand() % 65536;
   1741       1.10      yamt 				nocross = 1 << (rand() % 15);
   1742       1.10      yamt 				if (align <= phase) {
   1743       1.10      yamt 					phase = 0;
   1744       1.10      yamt 				}
   1745       1.19      yamt 				if (VMEM_CROSS_P(phase, phase + sz - 1,
   1746       1.19      yamt 				    nocross)) {
   1747       1.10      yamt 					nocross = 0;
   1748       1.10      yamt 				}
   1749       1.60    dyoung 				do {
   1750       1.60    dyoung 					minaddr = rand() % 50000;
   1751       1.60    dyoung 					maxaddr = rand() % 70000;
   1752       1.60    dyoung 				} while (minaddr > maxaddr);
   1753       1.10      yamt 				printf("=== xalloc %" PRIu64
   1754       1.10      yamt 				    " align=%" PRIu64 ", phase=%" PRIu64
   1755       1.10      yamt 				    ", nocross=%" PRIu64 ", min=%" PRIu64
   1756       1.10      yamt 				    ", max=%" PRIu64 "\n",
   1757       1.10      yamt 				    (uint64_t)sz,
   1758       1.10      yamt 				    (uint64_t)align,
   1759       1.10      yamt 				    (uint64_t)phase,
   1760       1.10      yamt 				    (uint64_t)nocross,
   1761       1.10      yamt 				    (uint64_t)minaddr,
   1762       1.10      yamt 				    (uint64_t)maxaddr);
   1763       1.61    dyoung 				rc = vmem_xalloc(vm, sz, align, phase, nocross,
   1764       1.61    dyoung 				    minaddr, maxaddr, strat|VM_SLEEP, &p);
   1765       1.10      yamt 			} else {
   1766       1.26   thorpej 				x = false;
   1767       1.10      yamt 				printf("=== alloc %" PRIu64 "\n", (uint64_t)sz);
   1768       1.61    dyoung 				rc = vmem_alloc(vm, sz, strat|VM_SLEEP, &p);
   1769       1.10      yamt 			}
   1770        1.1      yamt 			printf("-> %" PRIu64 "\n", (uint64_t)p);
   1771       1.60    dyoung 			vmem_dump(vm, vmem_printf);
   1772       1.61    dyoung 			if (rc != 0) {
   1773       1.10      yamt 				if (x) {
   1774       1.10      yamt 					continue;
   1775       1.10      yamt 				}
   1776        1.1      yamt 				break;
   1777        1.1      yamt 			}
   1778        1.1      yamt 			nreg++;
   1779        1.1      yamt 			reg = realloc(reg, sizeof(*reg) * nreg);
   1780        1.1      yamt 			r = &reg[nreg - 1];
   1781        1.1      yamt 			r->p = p;
   1782        1.1      yamt 			r->sz = sz;
   1783       1.10      yamt 			r->x = x;
   1784        1.1      yamt 			total += sz;
   1785        1.1      yamt 			nalloc++;
   1786        1.1      yamt 		} else if (nreg != 0) {
   1787       1.10      yamt 			/* free */
   1788        1.1      yamt 			r = &reg[rand() % nreg];
   1789        1.1      yamt 			printf("=== free %" PRIu64 ", %" PRIu64 "\n",
   1790        1.1      yamt 			    (uint64_t)r->p, (uint64_t)r->sz);
   1791       1.10      yamt 			if (r->x) {
   1792       1.10      yamt 				vmem_xfree(vm, r->p, r->sz);
   1793       1.10      yamt 			} else {
   1794       1.10      yamt 				vmem_free(vm, r->p, r->sz);
   1795       1.10      yamt 			}
   1796        1.1      yamt 			total -= r->sz;
   1797       1.60    dyoung 			vmem_dump(vm, vmem_printf);
   1798        1.1      yamt 			*r = reg[nreg - 1];
   1799        1.1      yamt 			nreg--;
   1800        1.1      yamt 			nfree++;
   1801        1.1      yamt 		}
   1802        1.1      yamt 		printf("total=%" PRIu64 "\n", (uint64_t)total);
   1803        1.1      yamt 	}
   1804        1.1      yamt 	fprintf(stderr, "total=%" PRIu64 ", nalloc=%d, nfree=%d\n",
   1805        1.1      yamt 	    (uint64_t)total, nalloc, nfree);
   1806        1.1      yamt 	exit(EXIT_SUCCESS);
   1807        1.1      yamt }
   1808       1.55      yamt #endif /* defined(UNITTEST) */
   1809