subr_vmem.c revision 1.36.2.2 1 1.36.2.2 yamt /* $NetBSD: subr_vmem.c,v 1.36.2.2 2007/12/13 05:06:01 yamt Exp $ */
2 1.1 yamt
3 1.1 yamt /*-
4 1.36.2.1 yamt * Copyright (c)2006, 2007 YAMAMOTO Takashi,
5 1.1 yamt * All rights reserved.
6 1.1 yamt *
7 1.1 yamt * Redistribution and use in source and binary forms, with or without
8 1.1 yamt * modification, are permitted provided that the following conditions
9 1.1 yamt * are met:
10 1.1 yamt * 1. Redistributions of source code must retain the above copyright
11 1.1 yamt * notice, this list of conditions and the following disclaimer.
12 1.1 yamt * 2. Redistributions in binary form must reproduce the above copyright
13 1.1 yamt * notice, this list of conditions and the following disclaimer in the
14 1.1 yamt * documentation and/or other materials provided with the distribution.
15 1.1 yamt *
16 1.1 yamt * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
17 1.1 yamt * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
18 1.1 yamt * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
19 1.1 yamt * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
20 1.1 yamt * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
21 1.1 yamt * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
22 1.1 yamt * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
23 1.1 yamt * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
24 1.1 yamt * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
25 1.1 yamt * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
26 1.1 yamt * SUCH DAMAGE.
27 1.1 yamt */
28 1.1 yamt
29 1.1 yamt /*
30 1.1 yamt * reference:
31 1.1 yamt * - Magazines and Vmem: Extending the Slab Allocator
32 1.1 yamt * to Many CPUs and Arbitrary Resources
33 1.1 yamt * http://www.usenix.org/event/usenix01/bonwick.html
34 1.18 yamt *
35 1.18 yamt * todo:
36 1.18 yamt * - decide how to import segments for vmem_xalloc.
37 1.1 yamt */
38 1.1 yamt
39 1.1 yamt #include <sys/cdefs.h>
40 1.36.2.2 yamt __KERNEL_RCSID(0, "$NetBSD: subr_vmem.c,v 1.36.2.2 2007/12/13 05:06:01 yamt Exp $");
41 1.1 yamt
42 1.1 yamt #define VMEM_DEBUG
43 1.5 yamt #if defined(_KERNEL)
44 1.36.2.2 yamt #include "opt_ddb.h"
45 1.5 yamt #define QCACHE
46 1.5 yamt #endif /* defined(_KERNEL) */
47 1.1 yamt
48 1.1 yamt #include <sys/param.h>
49 1.1 yamt #include <sys/hash.h>
50 1.1 yamt #include <sys/queue.h>
51 1.1 yamt
52 1.1 yamt #if defined(_KERNEL)
53 1.1 yamt #include <sys/systm.h>
54 1.30 yamt #include <sys/kernel.h> /* hz */
55 1.30 yamt #include <sys/callout.h>
56 1.1 yamt #include <sys/lock.h>
57 1.1 yamt #include <sys/once.h>
58 1.1 yamt #include <sys/pool.h>
59 1.3 yamt #include <sys/proc.h>
60 1.1 yamt #include <sys/vmem.h>
61 1.36.2.1 yamt #include <sys/kmem.h>
62 1.30 yamt #include <sys/workqueue.h>
63 1.36.2.1 yamt
64 1.36.2.1 yamt #include <uvm/uvm_extern.h>
65 1.36.2.1 yamt #include <uvm/uvm_map.h>
66 1.36.2.1 yamt #include <uvm/uvm_pdaemon.h>
67 1.1 yamt #else /* defined(_KERNEL) */
68 1.1 yamt #include "../sys/vmem.h"
69 1.1 yamt #endif /* defined(_KERNEL) */
70 1.1 yamt
71 1.1 yamt #if defined(_KERNEL)
72 1.31 ad #define LOCK_DECL(name) kmutex_t name
73 1.1 yamt #else /* defined(_KERNEL) */
74 1.1 yamt #include <errno.h>
75 1.1 yamt #include <assert.h>
76 1.1 yamt #include <stdlib.h>
77 1.1 yamt
78 1.1 yamt #define KASSERT(a) assert(a)
79 1.31 ad #define LOCK_DECL(name) /* nothing */
80 1.31 ad #define mutex_init(a, b, c) /* nothing */
81 1.31 ad #define mutex_destroy(a) /* nothing */
82 1.31 ad #define mutex_enter(a) /* nothing */
83 1.31 ad #define mutex_exit(a) /* nothing */
84 1.31 ad #define mutex_owned(a) /* nothing */
85 1.3 yamt #define ASSERT_SLEEPABLE(lk, msg) /* nothing */
86 1.31 ad #define IPL_VM 0
87 1.1 yamt #endif /* defined(_KERNEL) */
88 1.1 yamt
89 1.1 yamt struct vmem;
90 1.1 yamt struct vmem_btag;
91 1.1 yamt
92 1.1 yamt #if defined(VMEM_DEBUG)
93 1.1 yamt void vmem_dump(const vmem_t *);
94 1.36.2.1 yamt void vmem_dump_seglist(const vmem_t *);
95 1.36.2.1 yamt void vmem_dump_freelist(const vmem_t *);
96 1.36.2.1 yamt #if defined(QCACHE)
97 1.36.2.1 yamt void vmem_dump_qc(const vmem_t *);
98 1.36.2.1 yamt #endif /* defined(QCACHE) */
99 1.1 yamt #endif /* defined(VMEM_DEBUG) */
100 1.1 yamt
101 1.4 yamt #define VMEM_MAXORDER (sizeof(vmem_size_t) * CHAR_BIT)
102 1.30 yamt
103 1.30 yamt #define VMEM_HASHSIZE_MIN 1 /* XXX */
104 1.30 yamt #define VMEM_HASHSIZE_MAX 8192 /* XXX */
105 1.30 yamt #define VMEM_HASHSIZE_INIT VMEM_HASHSIZE_MIN
106 1.1 yamt
107 1.1 yamt #define VM_FITMASK (VM_BESTFIT | VM_INSTANTFIT)
108 1.1 yamt
109 1.36.2.1 yamt /* vm_flag_t (internal uses) */
110 1.36.2.1 yamt #define VM_BTPAGE 0x00008000
111 1.36.2.1 yamt
112 1.1 yamt CIRCLEQ_HEAD(vmem_seglist, vmem_btag);
113 1.1 yamt LIST_HEAD(vmem_freelist, vmem_btag);
114 1.1 yamt LIST_HEAD(vmem_hashlist, vmem_btag);
115 1.36.2.1 yamt typedef struct vmem_hashlist vmem_hashlist_t;
116 1.1 yamt
117 1.5 yamt #if defined(QCACHE)
118 1.5 yamt #define VMEM_QCACHE_IDX_MAX 32
119 1.5 yamt
120 1.5 yamt #define QC_NAME_MAX 16
121 1.5 yamt
122 1.5 yamt struct qcache {
123 1.35 ad pool_cache_t qc_cache;
124 1.5 yamt vmem_t *qc_vmem;
125 1.5 yamt char qc_name[QC_NAME_MAX];
126 1.5 yamt };
127 1.5 yamt typedef struct qcache qcache_t;
128 1.35 ad #define QC_POOL_TO_QCACHE(pool) ((qcache_t *)(pool->pr_qcache))
129 1.5 yamt #endif /* defined(QCACHE) */
130 1.5 yamt
131 1.1 yamt /* vmem arena */
132 1.1 yamt struct vmem {
133 1.31 ad LOCK_DECL(vm_lock);
134 1.36.2.1 yamt vm_flag_t vm_flags;
135 1.36.2.1 yamt int vm_freetags;
136 1.1 yamt vmem_addr_t (*vm_allocfn)(vmem_t *, vmem_size_t, vmem_size_t *,
137 1.1 yamt vm_flag_t);
138 1.1 yamt void (*vm_freefn)(vmem_t *, vmem_addr_t, vmem_size_t);
139 1.1 yamt vmem_t *vm_source;
140 1.1 yamt struct vmem_seglist vm_seglist;
141 1.1 yamt struct vmem_freelist vm_freelist[VMEM_MAXORDER];
142 1.36.2.1 yamt LIST_HEAD(, btpage_header) vm_btpagelist;
143 1.1 yamt size_t vm_hashsize;
144 1.1 yamt size_t vm_nbusytag;
145 1.36.2.1 yamt vmem_hashlist_t *vm_hashlist;
146 1.1 yamt size_t vm_quantum_mask;
147 1.1 yamt int vm_quantum_shift;
148 1.1 yamt const char *vm_name;
149 1.30 yamt LIST_ENTRY(vmem) vm_alllist;
150 1.5 yamt
151 1.5 yamt #if defined(QCACHE)
152 1.5 yamt /* quantum cache */
153 1.5 yamt size_t vm_qcache_max;
154 1.5 yamt struct pool_allocator vm_qcache_allocator;
155 1.22 yamt qcache_t vm_qcache_store[VMEM_QCACHE_IDX_MAX];
156 1.22 yamt qcache_t *vm_qcache[VMEM_QCACHE_IDX_MAX];
157 1.5 yamt #endif /* defined(QCACHE) */
158 1.1 yamt };
159 1.1 yamt
160 1.31 ad #define VMEM_LOCK(vm) mutex_enter(&vm->vm_lock)
161 1.31 ad #define VMEM_TRYLOCK(vm) mutex_tryenter(&vm->vm_lock)
162 1.31 ad #define VMEM_UNLOCK(vm) mutex_exit(&vm->vm_lock)
163 1.36 ad #define VMEM_LOCK_INIT(vm, ipl) mutex_init(&vm->vm_lock, MUTEX_DEFAULT, ipl)
164 1.31 ad #define VMEM_LOCK_DESTROY(vm) mutex_destroy(&vm->vm_lock)
165 1.31 ad #define VMEM_ASSERT_LOCKED(vm) KASSERT(mutex_owned(&vm->vm_lock))
166 1.1 yamt
167 1.36.2.1 yamt #define vmem_bootstrap_p(vm) (((vm)->vm_flags & VMC_KVA) != 0)
168 1.36.2.1 yamt
169 1.1 yamt /* boundary tag */
170 1.1 yamt struct vmem_btag {
171 1.1 yamt CIRCLEQ_ENTRY(vmem_btag) bt_seglist;
172 1.1 yamt union {
173 1.1 yamt LIST_ENTRY(vmem_btag) u_freelist; /* BT_TYPE_FREE */
174 1.1 yamt LIST_ENTRY(vmem_btag) u_hashlist; /* BT_TYPE_BUSY */
175 1.36.2.1 yamt SLIST_ENTRY(vmem_btag) u_sfreelist; /* in btpage_header */
176 1.36.2.1 yamt SLIST_ENTRY(vmem_btag) u_tmplist; /* temp use in vmem_xfree */
177 1.1 yamt } bt_u;
178 1.1 yamt #define bt_hashlist bt_u.u_hashlist
179 1.1 yamt #define bt_freelist bt_u.u_freelist
180 1.36.2.1 yamt #define bt_sfreelist bt_u.u_sfreelist
181 1.36.2.1 yamt #define bt_tmplist bt_u.u_tmplist
182 1.1 yamt vmem_addr_t bt_start;
183 1.1 yamt vmem_size_t bt_size;
184 1.1 yamt int bt_type;
185 1.1 yamt };
186 1.1 yamt
187 1.1 yamt #define BT_TYPE_SPAN 1
188 1.1 yamt #define BT_TYPE_SPAN_STATIC 2
189 1.1 yamt #define BT_TYPE_FREE 3
190 1.1 yamt #define BT_TYPE_BUSY 4
191 1.1 yamt #define BT_ISSPAN_P(bt) ((bt)->bt_type <= BT_TYPE_SPAN_STATIC)
192 1.1 yamt
193 1.1 yamt #define BT_END(bt) ((bt)->bt_start + (bt)->bt_size)
194 1.1 yamt
195 1.1 yamt typedef struct vmem_btag bt_t;
196 1.1 yamt
197 1.1 yamt /* ---- misc */
198 1.1 yamt
199 1.19 yamt #define VMEM_ALIGNUP(addr, align) \
200 1.19 yamt (-(-(addr) & -(align)))
201 1.19 yamt #define VMEM_CROSS_P(addr1, addr2, boundary) \
202 1.19 yamt ((((addr1) ^ (addr2)) & -(boundary)) != 0)
203 1.19 yamt
204 1.4 yamt #define ORDER2SIZE(order) ((vmem_size_t)1 << (order))
205 1.4 yamt
206 1.1 yamt static int
207 1.1 yamt calc_order(vmem_size_t size)
208 1.1 yamt {
209 1.4 yamt vmem_size_t target;
210 1.1 yamt int i;
211 1.1 yamt
212 1.1 yamt KASSERT(size != 0);
213 1.1 yamt
214 1.1 yamt i = 0;
215 1.4 yamt target = size >> 1;
216 1.4 yamt while (ORDER2SIZE(i) <= target) {
217 1.1 yamt i++;
218 1.1 yamt }
219 1.1 yamt
220 1.4 yamt KASSERT(ORDER2SIZE(i) <= size);
221 1.4 yamt KASSERT(size < ORDER2SIZE(i + 1) || ORDER2SIZE(i + 1) < ORDER2SIZE(i));
222 1.1 yamt
223 1.1 yamt return i;
224 1.1 yamt }
225 1.1 yamt
226 1.1 yamt static void *
227 1.1 yamt xmalloc(size_t sz, vm_flag_t flags)
228 1.1 yamt {
229 1.1 yamt
230 1.1 yamt #if defined(_KERNEL)
231 1.36.2.1 yamt return kmem_alloc(sz, (flags & VM_SLEEP) ? KM_SLEEP : KM_NOSLEEP);
232 1.1 yamt #else /* defined(_KERNEL) */
233 1.1 yamt return malloc(sz);
234 1.1 yamt #endif /* defined(_KERNEL) */
235 1.1 yamt }
236 1.1 yamt
237 1.1 yamt static void
238 1.36.2.1 yamt xfree(void *p, size_t sz)
239 1.1 yamt {
240 1.1 yamt
241 1.1 yamt #if defined(_KERNEL)
242 1.36.2.1 yamt kmem_free(p, sz);
243 1.1 yamt #else /* defined(_KERNEL) */
244 1.1 yamt return free(p);
245 1.1 yamt #endif /* defined(_KERNEL) */
246 1.1 yamt }
247 1.1 yamt
248 1.36.2.1 yamt /* ---- static storage for bootstrap */
249 1.36.2.1 yamt
250 1.36.2.1 yamt #define STATIC_POOL_NAME(type) static_ ## type
251 1.36.2.1 yamt #define STATIC_POOL_IDX(type) static_ ## type ## _idx
252 1.36.2.1 yamt #define STATIC_POOL_DEFINE(type, n) \
253 1.36.2.1 yamt type STATIC_POOL_NAME(type)[(n)] __unused ; \
254 1.36.2.1 yamt int STATIC_POOL_IDX(type) __unused
255 1.36.2.1 yamt #define STATIC_POOL_ALLOC(var, type) \
256 1.36.2.1 yamt (var) = &STATIC_POOL_NAME(type)[STATIC_POOL_IDX(type)++]; \
257 1.36.2.1 yamt KASSERT(STATIC_POOL_ELEM_P(type, var))
258 1.36.2.1 yamt #define STATIC_POOL_FREE(type, var) \
259 1.36.2.1 yamt KASSERT(STATIC_POOL_ELEM_P(type, var)); \
260 1.36.2.1 yamt KASSERT((var) == &STATIC_POOL_NAME(type)[STATIC_POOL_IDX(type)-1]); \
261 1.36.2.1 yamt STATIC_POOL_IDX(type)--
262 1.36.2.1 yamt #define STATIC_POOL_ELEM_P(type, var) \
263 1.36.2.1 yamt (&STATIC_POOL_NAME(type)[0] <= (var) && \
264 1.36.2.1 yamt (var) < &STATIC_POOL_NAME(type)[__arraycount(STATIC_POOL_NAME(type))])
265 1.36.2.1 yamt
266 1.36.2.1 yamt static STATIC_POOL_DEFINE(bt_t, 3);
267 1.36.2.1 yamt static STATIC_POOL_DEFINE(vmem_t, 2);
268 1.36.2.1 yamt static STATIC_POOL_DEFINE(vmem_hashlist_t, 2);
269 1.36.2.1 yamt typedef struct pool_cache vmem_pool_cache_t; /* XXX */
270 1.36.2.1 yamt static STATIC_POOL_DEFINE(vmem_pool_cache_t, VMEM_QCACHE_IDX_MAX+1);
271 1.36.2.1 yamt
272 1.1 yamt /* ---- boundary tag */
273 1.1 yamt
274 1.1 yamt #if defined(_KERNEL)
275 1.35 ad static struct pool_cache bt_cache;
276 1.1 yamt #endif /* defined(_KERNEL) */
277 1.1 yamt
278 1.36.2.1 yamt struct btpage_header {
279 1.36.2.1 yamt LIST_ENTRY(btpage_header) bh_q;
280 1.36.2.1 yamt int bh_nfree;
281 1.36.2.1 yamt SLIST_HEAD(, vmem_btag) bh_freelist;
282 1.36.2.1 yamt bt_t bh_bt[];
283 1.36.2.1 yamt };
284 1.36.2.1 yamt typedef struct btpage_header btpage_header_t;
285 1.36.2.1 yamt
286 1.36.2.1 yamt #define BT_PER_PAGE \
287 1.36.2.1 yamt ((PAGE_SIZE - sizeof(btpage_header_t)) / sizeof(bt_t))
288 1.36.2.1 yamt
289 1.36.2.1 yamt static int
290 1.36.2.1 yamt btpage_alloc(vmem_t *vm, vm_flag_t flags)
291 1.36.2.1 yamt {
292 1.36.2.1 yamt vmem_addr_t va;
293 1.36.2.1 yamt
294 1.36.2.1 yamt va = vmem_xalloc(vm, PAGE_SIZE, PAGE_SIZE, 0, 0, 0, 0,
295 1.36.2.1 yamt (flags & ~VM_FITMASK) | VM_INSTANTFIT | VM_BTPAGE);
296 1.36.2.1 yamt if (va == 0) {
297 1.36.2.1 yamt return ENOMEM;
298 1.36.2.1 yamt }
299 1.36.2.1 yamt return 0;
300 1.36.2.1 yamt }
301 1.36.2.1 yamt
302 1.36.2.1 yamt static void
303 1.36.2.1 yamt btpage_init(vmem_t *vm, struct vm_page *pg, vaddr_t va)
304 1.36.2.1 yamt {
305 1.36.2.1 yamt btpage_header_t *bh;
306 1.36.2.1 yamt int i;
307 1.36.2.1 yamt
308 1.36.2.1 yamt VMEM_ASSERT_LOCKED(vm);
309 1.36.2.1 yamt KASSERT((va & PAGE_MASK) == 0);
310 1.36.2.1 yamt pmap_kenter_pa(va, VM_PAGE_TO_PHYS(pg), VM_PROT_READ|VM_PROT_WRITE);
311 1.36.2.1 yamt pmap_update(pmap_kernel());
312 1.36.2.1 yamt bh = (void *)va;
313 1.36.2.1 yamt SLIST_INIT(&bh->bh_freelist);
314 1.36.2.1 yamt for (i = 0; i < BT_PER_PAGE; i++) {
315 1.36.2.1 yamt SLIST_INSERT_HEAD(&bh->bh_freelist, &bh->bh_bt[i],
316 1.36.2.1 yamt bt_sfreelist);
317 1.36.2.1 yamt }
318 1.36.2.1 yamt LIST_INSERT_HEAD(&vm->vm_btpagelist, bh, bh_q);
319 1.36.2.1 yamt bh->bh_nfree = BT_PER_PAGE;
320 1.36.2.1 yamt vm->vm_freetags += bh->bh_nfree;
321 1.36.2.1 yamt }
322 1.36.2.1 yamt
323 1.36.2.1 yamt static void
324 1.36.2.1 yamt btpage_free(vmem_t *vm, btpage_header_t *bh)
325 1.36.2.1 yamt {
326 1.36.2.1 yamt
327 1.36.2.1 yamt KASSERT(vmem_bootstrap_p(vm));
328 1.36.2.1 yamt pmap_kremove((vaddr_t)bh, PAGE_SIZE);
329 1.36.2.1 yamt pmap_update(pmap_kernel());
330 1.36.2.1 yamt vmem_xfree(vm, (vmem_addr_t)bh, PAGE_SIZE);
331 1.36.2.1 yamt }
332 1.36.2.1 yamt
333 1.36.2.1 yamt static btpage_header_t *
334 1.36.2.1 yamt btpage_lookup(bt_t *bt)
335 1.36.2.1 yamt {
336 1.36.2.1 yamt
337 1.36.2.1 yamt return (void *)trunc_page((vaddr_t)bt);
338 1.36.2.1 yamt }
339 1.36.2.1 yamt
340 1.36.2.1 yamt static bt_t *
341 1.36.2.1 yamt bt_alloc_bootstrap(vmem_t *vm)
342 1.36.2.1 yamt {
343 1.36.2.1 yamt btpage_header_t *bh;
344 1.36.2.1 yamt bt_t *bt;
345 1.36.2.1 yamt
346 1.36.2.1 yamt KASSERT(vmem_bootstrap_p(vm));
347 1.36.2.1 yamt VMEM_ASSERT_LOCKED(vm);
348 1.36.2.1 yamt bh = LIST_FIRST(&vm->vm_btpagelist);
349 1.36.2.1 yamt if (__predict_false(bh == NULL)) {
350 1.36.2.1 yamt STATIC_POOL_ALLOC(bt, bt_t);
351 1.36.2.1 yamt return bt;
352 1.36.2.1 yamt }
353 1.36.2.1 yamt KASSERT(bh->bh_nfree > 0);
354 1.36.2.1 yamt bt = SLIST_FIRST(&bh->bh_freelist);
355 1.36.2.1 yamt KASSERT(bt != NULL);
356 1.36.2.1 yamt SLIST_REMOVE_HEAD(&bh->bh_freelist, bt_sfreelist);
357 1.36.2.1 yamt bh->bh_nfree--;
358 1.36.2.1 yamt vm->vm_freetags--;
359 1.36.2.1 yamt if (SLIST_EMPTY(&bh->bh_freelist)) {
360 1.36.2.1 yamt KASSERT(bh->bh_nfree == 0);
361 1.36.2.1 yamt LIST_REMOVE(bh, bh_q);
362 1.36.2.1 yamt }
363 1.36.2.1 yamt return bt;
364 1.36.2.1 yamt }
365 1.36.2.1 yamt
366 1.36.2.1 yamt #define BT_MINRESERVE 1
367 1.36.2.1 yamt
368 1.1 yamt static bt_t *
369 1.17 yamt bt_alloc(vmem_t *vm, vm_flag_t flags)
370 1.1 yamt {
371 1.1 yamt bt_t *bt;
372 1.1 yamt
373 1.1 yamt #if defined(_KERNEL)
374 1.36.2.1 yamt if (vmem_bootstrap_p(vm)) {
375 1.36.2.1 yamt again:
376 1.36.2.1 yamt VMEM_LOCK(vm);
377 1.36.2.1 yamt if (vm->vm_freetags <= BT_MINRESERVE &&
378 1.36.2.1 yamt (flags & VM_BTPAGE) == 0) {
379 1.36.2.1 yamt VMEM_UNLOCK(vm);
380 1.36.2.1 yamt if (btpage_alloc(vm, flags)) {
381 1.36.2.1 yamt return NULL;
382 1.36.2.1 yamt }
383 1.36.2.1 yamt goto again;
384 1.36.2.1 yamt }
385 1.36.2.1 yamt bt = bt_alloc_bootstrap(vm);
386 1.36.2.1 yamt VMEM_UNLOCK(vm);
387 1.36.2.1 yamt } else {
388 1.36.2.1 yamt bt = pool_cache_get(&bt_cache,
389 1.36.2.1 yamt (flags & VM_SLEEP) != 0 ? PR_WAITOK : PR_NOWAIT);
390 1.36.2.1 yamt }
391 1.1 yamt #else /* defined(_KERNEL) */
392 1.1 yamt bt = malloc(sizeof *bt);
393 1.1 yamt #endif /* defined(_KERNEL) */
394 1.1 yamt
395 1.1 yamt return bt;
396 1.1 yamt }
397 1.1 yamt
398 1.1 yamt static void
399 1.17 yamt bt_free(vmem_t *vm, bt_t *bt)
400 1.1 yamt {
401 1.1 yamt
402 1.36.2.1 yamt KASSERT(bt != NULL);
403 1.36.2.1 yamt KASSERT(!STATIC_POOL_ELEM_P(bt_t, bt));
404 1.1 yamt #if defined(_KERNEL)
405 1.36.2.1 yamt if (vmem_bootstrap_p(vm)) {
406 1.36.2.1 yamt btpage_header_t *bh;
407 1.36.2.1 yamt
408 1.36.2.1 yamt bh = btpage_lookup(bt);
409 1.36.2.1 yamt VMEM_LOCK(vm);
410 1.36.2.1 yamt if (SLIST_EMPTY(&bh->bh_freelist)) {
411 1.36.2.1 yamt KASSERT(bh->bh_nfree == 0);
412 1.36.2.1 yamt LIST_INSERT_HEAD(&vm->vm_btpagelist, bh, bh_q);
413 1.36.2.1 yamt }
414 1.36.2.1 yamt SLIST_INSERT_HEAD(&bh->bh_freelist, bt, bt_sfreelist);
415 1.36.2.1 yamt bh->bh_nfree++;
416 1.36.2.1 yamt vm->vm_freetags++;
417 1.36.2.1 yamt if (vm->vm_freetags >= BT_PER_PAGE + BT_MINRESERVE &&
418 1.36.2.1 yamt bh->bh_nfree == BT_PER_PAGE) {
419 1.36.2.1 yamt LIST_REMOVE(bh, bh_q);
420 1.36.2.1 yamt vm->vm_freetags -= BT_PER_PAGE;
421 1.36.2.1 yamt VMEM_UNLOCK(vm);
422 1.36.2.1 yamt btpage_free(vm, bh);
423 1.36.2.1 yamt } else {
424 1.36.2.1 yamt VMEM_UNLOCK(vm);
425 1.36.2.1 yamt }
426 1.36.2.1 yamt } else {
427 1.36.2.1 yamt pool_cache_put(&bt_cache, bt);
428 1.36.2.1 yamt }
429 1.1 yamt #else /* defined(_KERNEL) */
430 1.1 yamt free(bt);
431 1.1 yamt #endif /* defined(_KERNEL) */
432 1.1 yamt }
433 1.1 yamt
434 1.1 yamt /*
435 1.1 yamt * freelist[0] ... [1, 1]
436 1.1 yamt * freelist[1] ... [2, 3]
437 1.1 yamt * freelist[2] ... [4, 7]
438 1.1 yamt * freelist[3] ... [8, 15]
439 1.1 yamt * :
440 1.1 yamt * freelist[n] ... [(1 << n), (1 << (n + 1)) - 1]
441 1.1 yamt * :
442 1.1 yamt */
443 1.1 yamt
444 1.1 yamt static struct vmem_freelist *
445 1.1 yamt bt_freehead_tofree(vmem_t *vm, vmem_size_t size)
446 1.1 yamt {
447 1.1 yamt const vmem_size_t qsize = size >> vm->vm_quantum_shift;
448 1.1 yamt int idx;
449 1.1 yamt
450 1.1 yamt KASSERT((size & vm->vm_quantum_mask) == 0);
451 1.1 yamt KASSERT(size != 0);
452 1.1 yamt
453 1.1 yamt idx = calc_order(qsize);
454 1.1 yamt KASSERT(idx >= 0);
455 1.1 yamt KASSERT(idx < VMEM_MAXORDER);
456 1.1 yamt
457 1.1 yamt return &vm->vm_freelist[idx];
458 1.1 yamt }
459 1.1 yamt
460 1.1 yamt static struct vmem_freelist *
461 1.1 yamt bt_freehead_toalloc(vmem_t *vm, vmem_size_t size, vm_flag_t strat)
462 1.1 yamt {
463 1.1 yamt const vmem_size_t qsize = size >> vm->vm_quantum_shift;
464 1.1 yamt int idx;
465 1.1 yamt
466 1.1 yamt KASSERT((size & vm->vm_quantum_mask) == 0);
467 1.1 yamt KASSERT(size != 0);
468 1.1 yamt
469 1.1 yamt idx = calc_order(qsize);
470 1.4 yamt if (strat == VM_INSTANTFIT && ORDER2SIZE(idx) != qsize) {
471 1.1 yamt idx++;
472 1.1 yamt /* check too large request? */
473 1.1 yamt }
474 1.1 yamt KASSERT(idx >= 0);
475 1.1 yamt KASSERT(idx < VMEM_MAXORDER);
476 1.1 yamt
477 1.1 yamt return &vm->vm_freelist[idx];
478 1.1 yamt }
479 1.1 yamt
480 1.1 yamt /* ---- boundary tag hash */
481 1.1 yamt
482 1.36.2.1 yamt static vmem_hashlist_t *
483 1.1 yamt bt_hashhead(vmem_t *vm, vmem_addr_t addr)
484 1.1 yamt {
485 1.36.2.1 yamt vmem_hashlist_t *list;
486 1.1 yamt unsigned int hash;
487 1.1 yamt
488 1.1 yamt hash = hash32_buf(&addr, sizeof(addr), HASH32_BUF_INIT);
489 1.1 yamt list = &vm->vm_hashlist[hash % vm->vm_hashsize];
490 1.1 yamt
491 1.1 yamt return list;
492 1.1 yamt }
493 1.1 yamt
494 1.1 yamt static bt_t *
495 1.1 yamt bt_lookupbusy(vmem_t *vm, vmem_addr_t addr)
496 1.1 yamt {
497 1.36.2.1 yamt vmem_hashlist_t *list;
498 1.1 yamt bt_t *bt;
499 1.1 yamt
500 1.1 yamt list = bt_hashhead(vm, addr);
501 1.1 yamt LIST_FOREACH(bt, list, bt_hashlist) {
502 1.1 yamt if (bt->bt_start == addr) {
503 1.1 yamt break;
504 1.1 yamt }
505 1.1 yamt }
506 1.1 yamt
507 1.1 yamt return bt;
508 1.1 yamt }
509 1.1 yamt
510 1.1 yamt static void
511 1.1 yamt bt_rembusy(vmem_t *vm, bt_t *bt)
512 1.1 yamt {
513 1.1 yamt
514 1.1 yamt KASSERT(vm->vm_nbusytag > 0);
515 1.1 yamt vm->vm_nbusytag--;
516 1.1 yamt LIST_REMOVE(bt, bt_hashlist);
517 1.1 yamt }
518 1.1 yamt
519 1.1 yamt static void
520 1.1 yamt bt_insbusy(vmem_t *vm, bt_t *bt)
521 1.1 yamt {
522 1.36.2.1 yamt vmem_hashlist_t *list;
523 1.1 yamt
524 1.1 yamt KASSERT(bt->bt_type == BT_TYPE_BUSY);
525 1.1 yamt
526 1.1 yamt list = bt_hashhead(vm, bt->bt_start);
527 1.1 yamt LIST_INSERT_HEAD(list, bt, bt_hashlist);
528 1.1 yamt vm->vm_nbusytag++;
529 1.1 yamt }
530 1.1 yamt
531 1.1 yamt /* ---- boundary tag list */
532 1.1 yamt
533 1.1 yamt static void
534 1.1 yamt bt_remseg(vmem_t *vm, bt_t *bt)
535 1.1 yamt {
536 1.1 yamt
537 1.1 yamt CIRCLEQ_REMOVE(&vm->vm_seglist, bt, bt_seglist);
538 1.1 yamt }
539 1.1 yamt
540 1.1 yamt static void
541 1.1 yamt bt_insseg(vmem_t *vm, bt_t *bt, bt_t *prev)
542 1.1 yamt {
543 1.1 yamt
544 1.1 yamt CIRCLEQ_INSERT_AFTER(&vm->vm_seglist, prev, bt, bt_seglist);
545 1.1 yamt }
546 1.1 yamt
547 1.1 yamt static void
548 1.1 yamt bt_insseg_tail(vmem_t *vm, bt_t *bt)
549 1.1 yamt {
550 1.1 yamt
551 1.1 yamt CIRCLEQ_INSERT_TAIL(&vm->vm_seglist, bt, bt_seglist);
552 1.1 yamt }
553 1.1 yamt
554 1.1 yamt static void
555 1.17 yamt bt_remfree(vmem_t *vm, bt_t *bt)
556 1.1 yamt {
557 1.1 yamt
558 1.1 yamt KASSERT(bt->bt_type == BT_TYPE_FREE);
559 1.1 yamt
560 1.1 yamt LIST_REMOVE(bt, bt_freelist);
561 1.1 yamt }
562 1.1 yamt
563 1.1 yamt static void
564 1.1 yamt bt_insfree(vmem_t *vm, bt_t *bt)
565 1.1 yamt {
566 1.1 yamt struct vmem_freelist *list;
567 1.1 yamt
568 1.1 yamt list = bt_freehead_tofree(vm, bt->bt_size);
569 1.1 yamt LIST_INSERT_HEAD(list, bt, bt_freelist);
570 1.1 yamt }
571 1.1 yamt
572 1.1 yamt /* ---- vmem internal functions */
573 1.1 yamt
574 1.30 yamt #if defined(_KERNEL)
575 1.30 yamt static kmutex_t vmem_list_lock;
576 1.30 yamt static LIST_HEAD(, vmem) vmem_list = LIST_HEAD_INITIALIZER(vmem_list);
577 1.30 yamt #endif /* defined(_KERNEL) */
578 1.30 yamt
579 1.5 yamt #if defined(QCACHE)
580 1.5 yamt static inline vm_flag_t
581 1.5 yamt prf_to_vmf(int prflags)
582 1.5 yamt {
583 1.5 yamt vm_flag_t vmflags;
584 1.5 yamt
585 1.5 yamt KASSERT((prflags & ~(PR_LIMITFAIL | PR_WAITOK | PR_NOWAIT)) == 0);
586 1.5 yamt if ((prflags & PR_WAITOK) != 0) {
587 1.5 yamt vmflags = VM_SLEEP;
588 1.5 yamt } else {
589 1.5 yamt vmflags = VM_NOSLEEP;
590 1.5 yamt }
591 1.5 yamt return vmflags;
592 1.5 yamt }
593 1.5 yamt
594 1.5 yamt static inline int
595 1.5 yamt vmf_to_prf(vm_flag_t vmflags)
596 1.5 yamt {
597 1.5 yamt int prflags;
598 1.5 yamt
599 1.7 yamt if ((vmflags & VM_SLEEP) != 0) {
600 1.5 yamt prflags = PR_WAITOK;
601 1.7 yamt } else {
602 1.5 yamt prflags = PR_NOWAIT;
603 1.5 yamt }
604 1.5 yamt return prflags;
605 1.5 yamt }
606 1.5 yamt
607 1.5 yamt static size_t
608 1.5 yamt qc_poolpage_size(size_t qcache_max)
609 1.5 yamt {
610 1.5 yamt int i;
611 1.5 yamt
612 1.5 yamt for (i = 0; ORDER2SIZE(i) <= qcache_max * 3; i++) {
613 1.5 yamt /* nothing */
614 1.5 yamt }
615 1.5 yamt return ORDER2SIZE(i);
616 1.5 yamt }
617 1.5 yamt
618 1.5 yamt static void *
619 1.5 yamt qc_poolpage_alloc(struct pool *pool, int prflags)
620 1.5 yamt {
621 1.5 yamt qcache_t *qc = QC_POOL_TO_QCACHE(pool);
622 1.5 yamt vmem_t *vm = qc->qc_vmem;
623 1.5 yamt
624 1.5 yamt return (void *)vmem_alloc(vm, pool->pr_alloc->pa_pagesz,
625 1.5 yamt prf_to_vmf(prflags) | VM_INSTANTFIT);
626 1.5 yamt }
627 1.5 yamt
628 1.5 yamt static void
629 1.5 yamt qc_poolpage_free(struct pool *pool, void *addr)
630 1.5 yamt {
631 1.5 yamt qcache_t *qc = QC_POOL_TO_QCACHE(pool);
632 1.5 yamt vmem_t *vm = qc->qc_vmem;
633 1.5 yamt
634 1.5 yamt vmem_free(vm, (vmem_addr_t)addr, pool->pr_alloc->pa_pagesz);
635 1.5 yamt }
636 1.5 yamt
637 1.5 yamt static void
638 1.31 ad qc_init(vmem_t *vm, size_t qcache_max, int ipl)
639 1.5 yamt {
640 1.22 yamt qcache_t *prevqc;
641 1.5 yamt struct pool_allocator *pa;
642 1.5 yamt int qcache_idx_max;
643 1.5 yamt int i;
644 1.5 yamt
645 1.5 yamt KASSERT((qcache_max & vm->vm_quantum_mask) == 0);
646 1.5 yamt if (qcache_max > (VMEM_QCACHE_IDX_MAX << vm->vm_quantum_shift)) {
647 1.5 yamt qcache_max = VMEM_QCACHE_IDX_MAX << vm->vm_quantum_shift;
648 1.5 yamt }
649 1.5 yamt vm->vm_qcache_max = qcache_max;
650 1.5 yamt pa = &vm->vm_qcache_allocator;
651 1.5 yamt memset(pa, 0, sizeof(*pa));
652 1.5 yamt pa->pa_alloc = qc_poolpage_alloc;
653 1.5 yamt pa->pa_free = qc_poolpage_free;
654 1.5 yamt pa->pa_pagesz = qc_poolpage_size(qcache_max);
655 1.5 yamt
656 1.5 yamt qcache_idx_max = qcache_max >> vm->vm_quantum_shift;
657 1.22 yamt prevqc = NULL;
658 1.22 yamt for (i = qcache_idx_max; i > 0; i--) {
659 1.22 yamt qcache_t *qc = &vm->vm_qcache_store[i - 1];
660 1.5 yamt size_t size = i << vm->vm_quantum_shift;
661 1.36.2.1 yamt pool_cache_t pc;
662 1.5 yamt
663 1.5 yamt qc->qc_vmem = vm;
664 1.8 martin snprintf(qc->qc_name, sizeof(qc->qc_name), "%s-%zu",
665 1.5 yamt vm->vm_name, size);
666 1.36.2.1 yamt if (!kmem_running_p()) {
667 1.36.2.1 yamt STATIC_POOL_ALLOC(pc, vmem_pool_cache_t);
668 1.36.2.1 yamt pool_cache_bootstrap(pc, size,
669 1.36.2.1 yamt ORDER2SIZE(vm->vm_quantum_shift), 0,
670 1.36.2.1 yamt PR_NOALIGN | PR_NOTOUCH /* XXX */,
671 1.36.2.1 yamt qc->qc_name, pa, ipl, NULL, NULL, NULL);
672 1.36.2.1 yamt } else {
673 1.36.2.1 yamt pc = pool_cache_init(size,
674 1.36.2.1 yamt ORDER2SIZE(vm->vm_quantum_shift), 0,
675 1.36.2.1 yamt PR_NOALIGN | PR_NOTOUCH /* XXX */,
676 1.36.2.1 yamt qc->qc_name, pa, ipl, NULL, NULL, NULL);
677 1.36.2.1 yamt }
678 1.36.2.1 yamt qc->qc_cache = pc;
679 1.22 yamt if (prevqc != NULL &&
680 1.35 ad qc->qc_cache->pc_pool.pr_itemsperpage ==
681 1.35 ad prevqc->qc_cache->pc_pool.pr_itemsperpage) {
682 1.36.2.1 yamt if (!kmem_running_p()) {
683 1.36.2.1 yamt pool_cache_bootstrap_destroy(pc);
684 1.36.2.1 yamt STATIC_POOL_FREE(vmem_pool_cache_t, pc);
685 1.36.2.1 yamt } else {
686 1.36.2.1 yamt pool_cache_destroy(pc);
687 1.36.2.1 yamt }
688 1.22 yamt vm->vm_qcache[i - 1] = prevqc;
689 1.27 ad continue;
690 1.22 yamt }
691 1.35 ad qc->qc_cache->pc_pool.pr_qcache = qc;
692 1.22 yamt vm->vm_qcache[i - 1] = qc;
693 1.22 yamt prevqc = qc;
694 1.5 yamt }
695 1.5 yamt }
696 1.6 yamt
697 1.23 yamt static void
698 1.23 yamt qc_destroy(vmem_t *vm)
699 1.23 yamt {
700 1.23 yamt const qcache_t *prevqc;
701 1.23 yamt int i;
702 1.23 yamt int qcache_idx_max;
703 1.23 yamt
704 1.36.2.1 yamt KASSERT(!vmem_bootstrap_p(vm));
705 1.23 yamt qcache_idx_max = vm->vm_qcache_max >> vm->vm_quantum_shift;
706 1.23 yamt prevqc = NULL;
707 1.24 yamt for (i = 0; i < qcache_idx_max; i++) {
708 1.24 yamt qcache_t *qc = vm->vm_qcache[i];
709 1.23 yamt
710 1.23 yamt if (prevqc == qc) {
711 1.23 yamt continue;
712 1.23 yamt }
713 1.35 ad pool_cache_destroy(qc->qc_cache);
714 1.23 yamt prevqc = qc;
715 1.23 yamt }
716 1.23 yamt }
717 1.23 yamt
718 1.25 thorpej static bool
719 1.6 yamt qc_reap(vmem_t *vm)
720 1.6 yamt {
721 1.22 yamt const qcache_t *prevqc;
722 1.6 yamt int i;
723 1.6 yamt int qcache_idx_max;
724 1.26 thorpej bool didsomething = false;
725 1.6 yamt
726 1.6 yamt qcache_idx_max = vm->vm_qcache_max >> vm->vm_quantum_shift;
727 1.22 yamt prevqc = NULL;
728 1.24 yamt for (i = 0; i < qcache_idx_max; i++) {
729 1.24 yamt qcache_t *qc = vm->vm_qcache[i];
730 1.6 yamt
731 1.22 yamt if (prevqc == qc) {
732 1.22 yamt continue;
733 1.22 yamt }
734 1.35 ad if (pool_cache_reclaim(qc->qc_cache) != 0) {
735 1.26 thorpej didsomething = true;
736 1.6 yamt }
737 1.22 yamt prevqc = qc;
738 1.6 yamt }
739 1.6 yamt
740 1.6 yamt return didsomething;
741 1.6 yamt }
742 1.5 yamt #endif /* defined(QCACHE) */
743 1.5 yamt
744 1.1 yamt #if defined(_KERNEL)
745 1.1 yamt static int
746 1.1 yamt vmem_init(void)
747 1.1 yamt {
748 1.1 yamt
749 1.30 yamt mutex_init(&vmem_list_lock, MUTEX_DEFAULT, IPL_NONE);
750 1.35 ad pool_cache_bootstrap(&bt_cache, sizeof(bt_t), 0, 0, 0, "vmembt",
751 1.35 ad NULL, IPL_VM, NULL, NULL, NULL);
752 1.1 yamt return 0;
753 1.1 yamt }
754 1.1 yamt #endif /* defined(_KERNEL) */
755 1.1 yamt
756 1.1 yamt static vmem_addr_t
757 1.1 yamt vmem_add1(vmem_t *vm, vmem_addr_t addr, vmem_size_t size, vm_flag_t flags,
758 1.1 yamt int spanbttype)
759 1.1 yamt {
760 1.1 yamt bt_t *btspan;
761 1.1 yamt bt_t *btfree;
762 1.1 yamt
763 1.1 yamt KASSERT((flags & (VM_SLEEP|VM_NOSLEEP)) != 0);
764 1.1 yamt KASSERT((~flags & (VM_SLEEP|VM_NOSLEEP)) != 0);
765 1.1 yamt
766 1.36.2.1 yamt if ((flags & VMC_KVA) != 0) {
767 1.36.2.1 yamt KASSERT(vmem_bootstrap_p(vm));
768 1.36.2.1 yamt KASSERT(CIRCLEQ_EMPTY(&vm->vm_seglist));
769 1.36.2.1 yamt STATIC_POOL_ALLOC(btspan, bt_t);
770 1.36.2.1 yamt STATIC_POOL_ALLOC(btfree, bt_t);
771 1.36.2.1 yamt } else {
772 1.36.2.1 yamt btspan = bt_alloc(vm, flags);
773 1.36.2.1 yamt if (btspan == NULL) {
774 1.36.2.1 yamt return VMEM_ADDR_NULL;
775 1.36.2.1 yamt }
776 1.36.2.1 yamt btfree = bt_alloc(vm, flags);
777 1.36.2.1 yamt if (btfree == NULL) {
778 1.36.2.1 yamt bt_free(vm, btspan);
779 1.36.2.1 yamt return VMEM_ADDR_NULL;
780 1.36.2.1 yamt }
781 1.1 yamt }
782 1.1 yamt
783 1.1 yamt btspan->bt_type = spanbttype;
784 1.1 yamt btspan->bt_start = addr;
785 1.1 yamt btspan->bt_size = size;
786 1.1 yamt
787 1.1 yamt btfree->bt_type = BT_TYPE_FREE;
788 1.1 yamt btfree->bt_start = addr;
789 1.1 yamt btfree->bt_size = size;
790 1.1 yamt
791 1.1 yamt VMEM_LOCK(vm);
792 1.1 yamt bt_insseg_tail(vm, btspan);
793 1.1 yamt bt_insseg(vm, btfree, btspan);
794 1.1 yamt bt_insfree(vm, btfree);
795 1.1 yamt VMEM_UNLOCK(vm);
796 1.1 yamt
797 1.36.2.1 yamt if ((flags & VMC_KVA) != 0) {
798 1.36.2.1 yamt bt_t *bt;
799 1.36.2.1 yamt
800 1.36.2.1 yamt /*
801 1.36.2.1 yamt * leak a bt.
802 1.36.2.1 yamt * this ensure that
803 1.36.2.1 yamt */
804 1.36.2.1 yamt
805 1.36.2.1 yamt bt = bt_alloc(vm, VM_NOSLEEP);
806 1.36.2.1 yamt KASSERT(bt != NULL);
807 1.36.2.1 yamt
808 1.36.2.1 yamt /*
809 1.36.2.1 yamt * don't leave "btfree" on the segment list because
810 1.36.2.1 yamt * bt_free() doesn't expect static tags.
811 1.36.2.1 yamt */
812 1.36.2.1 yamt
813 1.36.2.1 yamt bt = bt_alloc(vm, flags);
814 1.36.2.1 yamt VMEM_LOCK(vm);
815 1.36.2.1 yamt KASSERT(vm->vm_nbusytag == 1);
816 1.36.2.1 yamt bt->bt_start = btfree->bt_start;
817 1.36.2.1 yamt bt->bt_size = btfree->bt_size;
818 1.36.2.1 yamt bt->bt_type = btfree->bt_type;
819 1.36.2.1 yamt bt_insfree(vm, bt);
820 1.36.2.1 yamt bt_insseg(vm, bt, btfree);
821 1.36.2.1 yamt bt_remseg(vm, btfree);
822 1.36.2.1 yamt bt_remfree(vm, btfree);
823 1.36.2.1 yamt VMEM_UNLOCK(vm);
824 1.36.2.1 yamt }
825 1.36.2.1 yamt
826 1.1 yamt return addr;
827 1.1 yamt }
828 1.1 yamt
829 1.30 yamt static void
830 1.30 yamt vmem_destroy1(vmem_t *vm)
831 1.30 yamt {
832 1.30 yamt
833 1.36.2.1 yamt KASSERT(!vmem_bootstrap_p(vm));
834 1.36.2.1 yamt
835 1.30 yamt #if defined(QCACHE)
836 1.30 yamt qc_destroy(vm);
837 1.30 yamt #endif /* defined(QCACHE) */
838 1.30 yamt if (vm->vm_hashlist != NULL) {
839 1.30 yamt int i;
840 1.30 yamt
841 1.30 yamt for (i = 0; i < vm->vm_hashsize; i++) {
842 1.30 yamt bt_t *bt;
843 1.30 yamt
844 1.30 yamt while ((bt = LIST_FIRST(&vm->vm_hashlist[i])) != NULL) {
845 1.30 yamt KASSERT(bt->bt_type == BT_TYPE_SPAN_STATIC);
846 1.30 yamt bt_free(vm, bt);
847 1.30 yamt }
848 1.30 yamt }
849 1.36.2.1 yamt xfree(vm->vm_hashlist,
850 1.36.2.1 yamt sizeof(vmem_hashlist_t *) * vm->vm_hashsize);
851 1.30 yamt }
852 1.31 ad VMEM_LOCK_DESTROY(vm);
853 1.36.2.1 yamt xfree(vm, sizeof(*vm));
854 1.30 yamt }
855 1.30 yamt
856 1.1 yamt static int
857 1.1 yamt vmem_import(vmem_t *vm, vmem_size_t size, vm_flag_t flags)
858 1.1 yamt {
859 1.1 yamt vmem_addr_t addr;
860 1.1 yamt
861 1.1 yamt if (vm->vm_allocfn == NULL) {
862 1.1 yamt return EINVAL;
863 1.1 yamt }
864 1.1 yamt
865 1.1 yamt addr = (*vm->vm_allocfn)(vm->vm_source, size, &size, flags);
866 1.1 yamt if (addr == VMEM_ADDR_NULL) {
867 1.1 yamt return ENOMEM;
868 1.1 yamt }
869 1.1 yamt
870 1.1 yamt if (vmem_add1(vm, addr, size, flags, BT_TYPE_SPAN) == VMEM_ADDR_NULL) {
871 1.1 yamt (*vm->vm_freefn)(vm->vm_source, addr, size);
872 1.1 yamt return ENOMEM;
873 1.1 yamt }
874 1.1 yamt
875 1.1 yamt return 0;
876 1.1 yamt }
877 1.1 yamt
878 1.1 yamt static int
879 1.1 yamt vmem_rehash(vmem_t *vm, size_t newhashsize, vm_flag_t flags)
880 1.1 yamt {
881 1.1 yamt bt_t *bt;
882 1.1 yamt int i;
883 1.36.2.1 yamt vmem_hashlist_t *newhashlist;
884 1.36.2.1 yamt vmem_hashlist_t *oldhashlist;
885 1.1 yamt size_t oldhashsize;
886 1.1 yamt
887 1.1 yamt KASSERT(newhashsize > 0);
888 1.1 yamt
889 1.36.2.1 yamt newhashlist = xmalloc(sizeof(vmem_hashlist_t *) * newhashsize, flags);
890 1.1 yamt if (newhashlist == NULL) {
891 1.1 yamt return ENOMEM;
892 1.1 yamt }
893 1.1 yamt for (i = 0; i < newhashsize; i++) {
894 1.1 yamt LIST_INIT(&newhashlist[i]);
895 1.1 yamt }
896 1.1 yamt
897 1.30 yamt if (!VMEM_TRYLOCK(vm)) {
898 1.36.2.1 yamt xfree(newhashlist, sizeof(vmem_hashlist_t *) * newhashsize);
899 1.30 yamt return EBUSY;
900 1.30 yamt }
901 1.1 yamt oldhashlist = vm->vm_hashlist;
902 1.1 yamt oldhashsize = vm->vm_hashsize;
903 1.1 yamt vm->vm_hashlist = newhashlist;
904 1.1 yamt vm->vm_hashsize = newhashsize;
905 1.1 yamt if (oldhashlist == NULL) {
906 1.1 yamt VMEM_UNLOCK(vm);
907 1.1 yamt return 0;
908 1.1 yamt }
909 1.1 yamt for (i = 0; i < oldhashsize; i++) {
910 1.1 yamt while ((bt = LIST_FIRST(&oldhashlist[i])) != NULL) {
911 1.1 yamt bt_rembusy(vm, bt); /* XXX */
912 1.1 yamt bt_insbusy(vm, bt);
913 1.1 yamt }
914 1.1 yamt }
915 1.1 yamt VMEM_UNLOCK(vm);
916 1.1 yamt
917 1.36.2.1 yamt if (!STATIC_POOL_ELEM_P(vmem_hashlist_t, oldhashlist)) {
918 1.36.2.1 yamt xfree(oldhashlist, sizeof(vmem_hashlist_t *) * oldhashsize);
919 1.36.2.1 yamt }
920 1.1 yamt
921 1.1 yamt return 0;
922 1.1 yamt }
923 1.1 yamt
924 1.10 yamt /*
925 1.10 yamt * vmem_fit: check if a bt can satisfy the given restrictions.
926 1.10 yamt */
927 1.10 yamt
928 1.10 yamt static vmem_addr_t
929 1.10 yamt vmem_fit(const bt_t *bt, vmem_size_t size, vmem_size_t align, vmem_size_t phase,
930 1.10 yamt vmem_size_t nocross, vmem_addr_t minaddr, vmem_addr_t maxaddr)
931 1.10 yamt {
932 1.10 yamt vmem_addr_t start;
933 1.10 yamt vmem_addr_t end;
934 1.10 yamt
935 1.10 yamt KASSERT(bt->bt_size >= size);
936 1.10 yamt
937 1.10 yamt /*
938 1.10 yamt * XXX assumption: vmem_addr_t and vmem_size_t are
939 1.10 yamt * unsigned integer of the same size.
940 1.10 yamt */
941 1.10 yamt
942 1.10 yamt start = bt->bt_start;
943 1.10 yamt if (start < minaddr) {
944 1.10 yamt start = minaddr;
945 1.10 yamt }
946 1.10 yamt end = BT_END(bt);
947 1.10 yamt if (end > maxaddr - 1) {
948 1.10 yamt end = maxaddr - 1;
949 1.10 yamt }
950 1.10 yamt if (start >= end) {
951 1.10 yamt return VMEM_ADDR_NULL;
952 1.10 yamt }
953 1.19 yamt
954 1.19 yamt start = VMEM_ALIGNUP(start - phase, align) + phase;
955 1.10 yamt if (start < bt->bt_start) {
956 1.10 yamt start += align;
957 1.10 yamt }
958 1.19 yamt if (VMEM_CROSS_P(start, start + size - 1, nocross)) {
959 1.10 yamt KASSERT(align < nocross);
960 1.19 yamt start = VMEM_ALIGNUP(start - phase, nocross) + phase;
961 1.10 yamt }
962 1.10 yamt if (start < end && end - start >= size) {
963 1.10 yamt KASSERT((start & (align - 1)) == phase);
964 1.19 yamt KASSERT(!VMEM_CROSS_P(start, start + size - 1, nocross));
965 1.10 yamt KASSERT(minaddr <= start);
966 1.10 yamt KASSERT(maxaddr == 0 || start + size <= maxaddr);
967 1.10 yamt KASSERT(bt->bt_start <= start);
968 1.10 yamt KASSERT(start + size <= BT_END(bt));
969 1.10 yamt return start;
970 1.10 yamt }
971 1.10 yamt return VMEM_ADDR_NULL;
972 1.10 yamt }
973 1.10 yamt
974 1.1 yamt /* ---- vmem API */
975 1.1 yamt
976 1.1 yamt /*
977 1.1 yamt * vmem_create: create an arena.
978 1.1 yamt *
979 1.1 yamt * => must not be called from interrupt context.
980 1.1 yamt */
981 1.1 yamt
982 1.1 yamt vmem_t *
983 1.1 yamt vmem_create(const char *name, vmem_addr_t base, vmem_size_t size,
984 1.1 yamt vmem_size_t quantum,
985 1.1 yamt vmem_addr_t (*allocfn)(vmem_t *, vmem_size_t, vmem_size_t *, vm_flag_t),
986 1.1 yamt void (*freefn)(vmem_t *, vmem_addr_t, vmem_size_t),
987 1.31 ad vmem_t *source, vmem_size_t qcache_max, vm_flag_t flags,
988 1.31 ad int ipl)
989 1.1 yamt {
990 1.1 yamt vmem_t *vm;
991 1.1 yamt int i;
992 1.1 yamt #if defined(_KERNEL)
993 1.1 yamt static ONCE_DECL(control);
994 1.1 yamt #endif /* defined(_KERNEL) */
995 1.1 yamt
996 1.1 yamt KASSERT((flags & (VM_SLEEP|VM_NOSLEEP)) != 0);
997 1.1 yamt KASSERT((~flags & (VM_SLEEP|VM_NOSLEEP)) != 0);
998 1.1 yamt
999 1.1 yamt #if defined(_KERNEL)
1000 1.1 yamt if (RUN_ONCE(&control, vmem_init)) {
1001 1.1 yamt return NULL;
1002 1.1 yamt }
1003 1.1 yamt #endif /* defined(_KERNEL) */
1004 1.36.2.1 yamt if ((flags & (VMC_KVA|VMC_KMEM)) != 0) {
1005 1.36.2.1 yamt STATIC_POOL_ALLOC(vm, vmem_t);
1006 1.36.2.1 yamt } else {
1007 1.36.2.1 yamt vm = xmalloc(sizeof(*vm), flags);
1008 1.36.2.1 yamt if (vm == NULL) {
1009 1.36.2.1 yamt return NULL;
1010 1.36.2.1 yamt }
1011 1.1 yamt }
1012 1.1 yamt
1013 1.31 ad VMEM_LOCK_INIT(vm, ipl);
1014 1.1 yamt vm->vm_name = name;
1015 1.36.2.1 yamt vm->vm_flags = flags;
1016 1.36.2.1 yamt vm->vm_freetags = 0;
1017 1.1 yamt vm->vm_quantum_mask = quantum - 1;
1018 1.1 yamt vm->vm_quantum_shift = calc_order(quantum);
1019 1.4 yamt KASSERT(ORDER2SIZE(vm->vm_quantum_shift) == quantum);
1020 1.1 yamt vm->vm_allocfn = allocfn;
1021 1.1 yamt vm->vm_freefn = freefn;
1022 1.1 yamt vm->vm_source = source;
1023 1.1 yamt vm->vm_nbusytag = 0;
1024 1.5 yamt #if defined(QCACHE)
1025 1.31 ad qc_init(vm, qcache_max, ipl);
1026 1.5 yamt #endif /* defined(QCACHE) */
1027 1.1 yamt
1028 1.1 yamt CIRCLEQ_INIT(&vm->vm_seglist);
1029 1.1 yamt for (i = 0; i < VMEM_MAXORDER; i++) {
1030 1.1 yamt LIST_INIT(&vm->vm_freelist[i]);
1031 1.1 yamt }
1032 1.1 yamt vm->vm_hashlist = NULL;
1033 1.36.2.1 yamt if ((flags & (VMC_KVA|VMC_KMEM)) != 0) {
1034 1.36.2.1 yamt STATIC_POOL_ALLOC(vm->vm_hashlist, vmem_hashlist_t);
1035 1.36.2.1 yamt LIST_INIT(&vm->vm_hashlist[0]);
1036 1.36.2.1 yamt vm->vm_hashsize = 1;
1037 1.36.2.1 yamt } else if (vmem_rehash(vm, VMEM_HASHSIZE_INIT, flags)) {
1038 1.30 yamt vmem_destroy1(vm);
1039 1.1 yamt return NULL;
1040 1.1 yamt }
1041 1.1 yamt
1042 1.1 yamt if (size != 0) {
1043 1.1 yamt if (vmem_add(vm, base, size, flags) == 0) {
1044 1.30 yamt vmem_destroy1(vm);
1045 1.1 yamt return NULL;
1046 1.1 yamt }
1047 1.1 yamt }
1048 1.1 yamt
1049 1.30 yamt #if defined(_KERNEL)
1050 1.30 yamt mutex_enter(&vmem_list_lock);
1051 1.30 yamt LIST_INSERT_HEAD(&vmem_list, vm, vm_alllist);
1052 1.30 yamt mutex_exit(&vmem_list_lock);
1053 1.30 yamt #endif /* defined(_KERNEL) */
1054 1.30 yamt
1055 1.36.2.1 yamt #if 0
1056 1.36.2.1 yamt if (vmem_bootstrap_p(vm)) {
1057 1.36.2.1 yamt vmem_rehash(vm, VMEM_HASHSIZE_INIT, flags);
1058 1.36.2.1 yamt }
1059 1.36.2.1 yamt #endif
1060 1.36.2.1 yamt
1061 1.1 yamt return vm;
1062 1.1 yamt }
1063 1.1 yamt
1064 1.1 yamt void
1065 1.1 yamt vmem_destroy(vmem_t *vm)
1066 1.1 yamt {
1067 1.1 yamt
1068 1.30 yamt #if defined(_KERNEL)
1069 1.30 yamt mutex_enter(&vmem_list_lock);
1070 1.30 yamt LIST_REMOVE(vm, vm_alllist);
1071 1.30 yamt mutex_exit(&vmem_list_lock);
1072 1.30 yamt #endif /* defined(_KERNEL) */
1073 1.1 yamt
1074 1.30 yamt vmem_destroy1(vm);
1075 1.1 yamt }
1076 1.1 yamt
1077 1.1 yamt vmem_size_t
1078 1.1 yamt vmem_roundup_size(vmem_t *vm, vmem_size_t size)
1079 1.1 yamt {
1080 1.1 yamt
1081 1.1 yamt return (size + vm->vm_quantum_mask) & ~vm->vm_quantum_mask;
1082 1.1 yamt }
1083 1.1 yamt
1084 1.1 yamt /*
1085 1.1 yamt * vmem_alloc:
1086 1.1 yamt *
1087 1.1 yamt * => caller must ensure appropriate spl,
1088 1.1 yamt * if the arena can be accessed from interrupt context.
1089 1.1 yamt */
1090 1.1 yamt
1091 1.1 yamt vmem_addr_t
1092 1.1 yamt vmem_alloc(vmem_t *vm, vmem_size_t size0, vm_flag_t flags)
1093 1.1 yamt {
1094 1.12 yamt const vmem_size_t size __unused = vmem_roundup_size(vm, size0);
1095 1.12 yamt const vm_flag_t strat __unused = flags & VM_FITMASK;
1096 1.1 yamt
1097 1.1 yamt KASSERT((flags & (VM_SLEEP|VM_NOSLEEP)) != 0);
1098 1.1 yamt KASSERT((~flags & (VM_SLEEP|VM_NOSLEEP)) != 0);
1099 1.1 yamt
1100 1.1 yamt KASSERT(size0 > 0);
1101 1.1 yamt KASSERT(size > 0);
1102 1.1 yamt KASSERT(strat == VM_BESTFIT || strat == VM_INSTANTFIT);
1103 1.3 yamt if ((flags & VM_SLEEP) != 0) {
1104 1.16 yamt ASSERT_SLEEPABLE(NULL, __func__);
1105 1.3 yamt }
1106 1.1 yamt
1107 1.5 yamt #if defined(QCACHE)
1108 1.5 yamt if (size <= vm->vm_qcache_max) {
1109 1.5 yamt int qidx = size >> vm->vm_quantum_shift;
1110 1.22 yamt qcache_t *qc = vm->vm_qcache[qidx - 1];
1111 1.5 yamt
1112 1.35 ad return (vmem_addr_t)pool_cache_get(qc->qc_cache,
1113 1.5 yamt vmf_to_prf(flags));
1114 1.5 yamt }
1115 1.5 yamt #endif /* defined(QCACHE) */
1116 1.5 yamt
1117 1.10 yamt return vmem_xalloc(vm, size0, 0, 0, 0, 0, 0, flags);
1118 1.10 yamt }
1119 1.10 yamt
1120 1.10 yamt vmem_addr_t
1121 1.10 yamt vmem_xalloc(vmem_t *vm, vmem_size_t size0, vmem_size_t align, vmem_size_t phase,
1122 1.10 yamt vmem_size_t nocross, vmem_addr_t minaddr, vmem_addr_t maxaddr,
1123 1.10 yamt vm_flag_t flags)
1124 1.10 yamt {
1125 1.10 yamt struct vmem_freelist *list;
1126 1.10 yamt struct vmem_freelist *first;
1127 1.10 yamt struct vmem_freelist *end;
1128 1.10 yamt bt_t *bt;
1129 1.10 yamt bt_t *btnew;
1130 1.10 yamt bt_t *btnew2;
1131 1.10 yamt const vmem_size_t size = vmem_roundup_size(vm, size0);
1132 1.10 yamt vm_flag_t strat = flags & VM_FITMASK;
1133 1.10 yamt vmem_addr_t start;
1134 1.36.2.1 yamt struct vm_page *pg;
1135 1.10 yamt
1136 1.10 yamt KASSERT(size0 > 0);
1137 1.10 yamt KASSERT(size > 0);
1138 1.10 yamt KASSERT(strat == VM_BESTFIT || strat == VM_INSTANTFIT);
1139 1.10 yamt if ((flags & VM_SLEEP) != 0) {
1140 1.16 yamt ASSERT_SLEEPABLE(NULL, __func__);
1141 1.10 yamt }
1142 1.10 yamt KASSERT((align & vm->vm_quantum_mask) == 0);
1143 1.10 yamt KASSERT((align & (align - 1)) == 0);
1144 1.10 yamt KASSERT((phase & vm->vm_quantum_mask) == 0);
1145 1.10 yamt KASSERT((nocross & vm->vm_quantum_mask) == 0);
1146 1.10 yamt KASSERT((nocross & (nocross - 1)) == 0);
1147 1.10 yamt KASSERT((align == 0 && phase == 0) || phase < align);
1148 1.10 yamt KASSERT(nocross == 0 || nocross >= size);
1149 1.10 yamt KASSERT(maxaddr == 0 || minaddr < maxaddr);
1150 1.19 yamt KASSERT(!VMEM_CROSS_P(phase, phase + size - 1, nocross));
1151 1.10 yamt
1152 1.10 yamt if (align == 0) {
1153 1.10 yamt align = vm->vm_quantum_mask + 1;
1154 1.10 yamt }
1155 1.36.2.1 yamt pg = NULL;
1156 1.36.2.1 yamt if ((flags & VM_BTPAGE) != 0) {
1157 1.36.2.1 yamt KASSERT(size == PAGE_SIZE);
1158 1.36.2.1 yamt KASSERT(align == PAGE_SIZE);
1159 1.36.2.1 yamt while (pg == NULL) {
1160 1.36.2.1 yamt pg = uvm_pagealloc(NULL, 0, NULL, 0);
1161 1.36.2.1 yamt if (pg == NULL) {
1162 1.36.2.1 yamt if ((flags & VM_NOSLEEP) != 0) {
1163 1.36.2.1 yamt return ENOMEM;
1164 1.36.2.1 yamt }
1165 1.36.2.1 yamt uvm_wait("btpage");
1166 1.36.2.1 yamt }
1167 1.36.2.1 yamt }
1168 1.36.2.1 yamt btnew = NULL; /* XXX: gcc */
1169 1.36.2.1 yamt btnew2 = NULL;
1170 1.36.2.1 yamt } else {
1171 1.36.2.1 yamt btnew = bt_alloc(vm, flags);
1172 1.36.2.1 yamt if (btnew == NULL) {
1173 1.36.2.1 yamt return VMEM_ADDR_NULL;
1174 1.36.2.1 yamt }
1175 1.36.2.1 yamt /* XXX not necessary if no restrictions */
1176 1.36.2.1 yamt btnew2 = bt_alloc(vm, flags);
1177 1.36.2.1 yamt if (btnew2 == NULL) {
1178 1.36.2.1 yamt bt_free(vm, btnew);
1179 1.36.2.1 yamt return VMEM_ADDR_NULL;
1180 1.36.2.1 yamt }
1181 1.10 yamt }
1182 1.1 yamt
1183 1.1 yamt retry_strat:
1184 1.1 yamt first = bt_freehead_toalloc(vm, size, strat);
1185 1.1 yamt end = &vm->vm_freelist[VMEM_MAXORDER];
1186 1.1 yamt retry:
1187 1.1 yamt bt = NULL;
1188 1.1 yamt VMEM_LOCK(vm);
1189 1.2 yamt if (strat == VM_INSTANTFIT) {
1190 1.2 yamt for (list = first; list < end; list++) {
1191 1.2 yamt bt = LIST_FIRST(list);
1192 1.2 yamt if (bt != NULL) {
1193 1.10 yamt start = vmem_fit(bt, size, align, phase,
1194 1.10 yamt nocross, minaddr, maxaddr);
1195 1.10 yamt if (start != VMEM_ADDR_NULL) {
1196 1.10 yamt goto gotit;
1197 1.10 yamt }
1198 1.2 yamt }
1199 1.2 yamt }
1200 1.2 yamt } else { /* VM_BESTFIT */
1201 1.2 yamt for (list = first; list < end; list++) {
1202 1.2 yamt LIST_FOREACH(bt, list, bt_freelist) {
1203 1.2 yamt if (bt->bt_size >= size) {
1204 1.10 yamt start = vmem_fit(bt, size, align, phase,
1205 1.10 yamt nocross, minaddr, maxaddr);
1206 1.10 yamt if (start != VMEM_ADDR_NULL) {
1207 1.10 yamt goto gotit;
1208 1.10 yamt }
1209 1.2 yamt }
1210 1.1 yamt }
1211 1.1 yamt }
1212 1.1 yamt }
1213 1.2 yamt VMEM_UNLOCK(vm);
1214 1.1 yamt #if 1
1215 1.2 yamt if (strat == VM_INSTANTFIT) {
1216 1.2 yamt strat = VM_BESTFIT;
1217 1.2 yamt goto retry_strat;
1218 1.2 yamt }
1219 1.1 yamt #endif
1220 1.10 yamt if (align != vm->vm_quantum_mask + 1 || phase != 0 ||
1221 1.10 yamt nocross != 0 || minaddr != 0 || maxaddr != 0) {
1222 1.10 yamt
1223 1.10 yamt /*
1224 1.10 yamt * XXX should try to import a region large enough to
1225 1.10 yamt * satisfy restrictions?
1226 1.10 yamt */
1227 1.10 yamt
1228 1.20 yamt goto fail;
1229 1.10 yamt }
1230 1.2 yamt if (vmem_import(vm, size, flags) == 0) {
1231 1.2 yamt goto retry;
1232 1.1 yamt }
1233 1.2 yamt /* XXX */
1234 1.20 yamt fail:
1235 1.36.2.1 yamt if ((flags & VM_BTPAGE) != 0) {
1236 1.36.2.1 yamt uvm_pagefree(pg);
1237 1.36.2.1 yamt } else {
1238 1.36.2.1 yamt bt_free(vm, btnew);
1239 1.36.2.1 yamt bt_free(vm, btnew2);
1240 1.36.2.1 yamt }
1241 1.2 yamt return VMEM_ADDR_NULL;
1242 1.2 yamt
1243 1.2 yamt gotit:
1244 1.36.2.1 yamt #if defined(PMAP_GROWKERNEL)
1245 1.36.2.1 yamt if ((vm->vm_flags & VMC_KVA) != 0) {
1246 1.36.2.1 yamt uvm_growkernel(start + size);
1247 1.36.2.1 yamt }
1248 1.36.2.1 yamt #endif /* defined(PMAP_GROWKERNEL) */
1249 1.36.2.1 yamt if ((flags & VM_BTPAGE) != 0) {
1250 1.36.2.1 yamt vaddr_t va = (vaddr_t)start;
1251 1.36.2.1 yamt
1252 1.36.2.1 yamt KASSERT(bt->bt_start == start);
1253 1.36.2.1 yamt btnew = bt_alloc_bootstrap(vm);
1254 1.36.2.1 yamt btpage_init(vm, pg, va);
1255 1.36.2.1 yamt }
1256 1.1 yamt KASSERT(bt->bt_type == BT_TYPE_FREE);
1257 1.1 yamt KASSERT(bt->bt_size >= size);
1258 1.1 yamt bt_remfree(vm, bt);
1259 1.10 yamt if (bt->bt_start != start) {
1260 1.10 yamt btnew2->bt_type = BT_TYPE_FREE;
1261 1.10 yamt btnew2->bt_start = bt->bt_start;
1262 1.10 yamt btnew2->bt_size = start - bt->bt_start;
1263 1.10 yamt bt->bt_start = start;
1264 1.10 yamt bt->bt_size -= btnew2->bt_size;
1265 1.10 yamt bt_insfree(vm, btnew2);
1266 1.10 yamt bt_insseg(vm, btnew2, CIRCLEQ_PREV(bt, bt_seglist));
1267 1.10 yamt btnew2 = NULL;
1268 1.10 yamt }
1269 1.10 yamt KASSERT(bt->bt_start == start);
1270 1.1 yamt if (bt->bt_size != size && bt->bt_size - size > vm->vm_quantum_mask) {
1271 1.1 yamt /* split */
1272 1.1 yamt btnew->bt_type = BT_TYPE_BUSY;
1273 1.1 yamt btnew->bt_start = bt->bt_start;
1274 1.1 yamt btnew->bt_size = size;
1275 1.1 yamt bt->bt_start = bt->bt_start + size;
1276 1.1 yamt bt->bt_size -= size;
1277 1.1 yamt bt_insfree(vm, bt);
1278 1.1 yamt bt_insseg(vm, btnew, CIRCLEQ_PREV(bt, bt_seglist));
1279 1.1 yamt bt_insbusy(vm, btnew);
1280 1.1 yamt VMEM_UNLOCK(vm);
1281 1.1 yamt } else {
1282 1.1 yamt bt->bt_type = BT_TYPE_BUSY;
1283 1.1 yamt bt_insbusy(vm, bt);
1284 1.1 yamt VMEM_UNLOCK(vm);
1285 1.1 yamt bt_free(vm, btnew);
1286 1.1 yamt btnew = bt;
1287 1.1 yamt }
1288 1.10 yamt if (btnew2 != NULL) {
1289 1.10 yamt bt_free(vm, btnew2);
1290 1.10 yamt }
1291 1.1 yamt KASSERT(btnew->bt_size >= size);
1292 1.1 yamt btnew->bt_type = BT_TYPE_BUSY;
1293 1.1 yamt
1294 1.1 yamt return btnew->bt_start;
1295 1.1 yamt }
1296 1.1 yamt
1297 1.1 yamt /*
1298 1.1 yamt * vmem_free:
1299 1.1 yamt *
1300 1.1 yamt * => caller must ensure appropriate spl,
1301 1.1 yamt * if the arena can be accessed from interrupt context.
1302 1.1 yamt */
1303 1.1 yamt
1304 1.1 yamt void
1305 1.1 yamt vmem_free(vmem_t *vm, vmem_addr_t addr, vmem_size_t size)
1306 1.1 yamt {
1307 1.1 yamt
1308 1.1 yamt KASSERT(addr != VMEM_ADDR_NULL);
1309 1.1 yamt KASSERT(size > 0);
1310 1.1 yamt
1311 1.5 yamt #if defined(QCACHE)
1312 1.5 yamt if (size <= vm->vm_qcache_max) {
1313 1.5 yamt int qidx = (size + vm->vm_quantum_mask) >> vm->vm_quantum_shift;
1314 1.22 yamt qcache_t *qc = vm->vm_qcache[qidx - 1];
1315 1.5 yamt
1316 1.35 ad return pool_cache_put(qc->qc_cache, (void *)addr);
1317 1.5 yamt }
1318 1.5 yamt #endif /* defined(QCACHE) */
1319 1.5 yamt
1320 1.10 yamt vmem_xfree(vm, addr, size);
1321 1.10 yamt }
1322 1.10 yamt
1323 1.10 yamt void
1324 1.17 yamt vmem_xfree(vmem_t *vm, vmem_addr_t addr, vmem_size_t size)
1325 1.10 yamt {
1326 1.10 yamt bt_t *bt;
1327 1.10 yamt bt_t *t;
1328 1.36.2.1 yamt SLIST_HEAD(, vmem_btag) tofree;
1329 1.10 yamt
1330 1.10 yamt KASSERT(addr != VMEM_ADDR_NULL);
1331 1.10 yamt KASSERT(size > 0);
1332 1.10 yamt
1333 1.36.2.1 yamt SLIST_INIT(&tofree);
1334 1.36.2.1 yamt
1335 1.1 yamt VMEM_LOCK(vm);
1336 1.1 yamt
1337 1.1 yamt bt = bt_lookupbusy(vm, addr);
1338 1.1 yamt KASSERT(bt != NULL);
1339 1.1 yamt KASSERT(bt->bt_start == addr);
1340 1.1 yamt KASSERT(bt->bt_size == vmem_roundup_size(vm, size) ||
1341 1.1 yamt bt->bt_size - vmem_roundup_size(vm, size) <= vm->vm_quantum_mask);
1342 1.1 yamt KASSERT(bt->bt_type == BT_TYPE_BUSY);
1343 1.1 yamt bt_rembusy(vm, bt);
1344 1.1 yamt bt->bt_type = BT_TYPE_FREE;
1345 1.1 yamt
1346 1.1 yamt /* coalesce */
1347 1.1 yamt t = CIRCLEQ_NEXT(bt, bt_seglist);
1348 1.1 yamt if (t != NULL && t->bt_type == BT_TYPE_FREE) {
1349 1.1 yamt KASSERT(BT_END(bt) == t->bt_start);
1350 1.1 yamt bt_remfree(vm, t);
1351 1.1 yamt bt_remseg(vm, t);
1352 1.1 yamt bt->bt_size += t->bt_size;
1353 1.36.2.1 yamt SLIST_INSERT_HEAD(&tofree, t, bt_tmplist);
1354 1.1 yamt }
1355 1.1 yamt t = CIRCLEQ_PREV(bt, bt_seglist);
1356 1.1 yamt if (t != NULL && t->bt_type == BT_TYPE_FREE) {
1357 1.1 yamt KASSERT(BT_END(t) == bt->bt_start);
1358 1.1 yamt bt_remfree(vm, t);
1359 1.1 yamt bt_remseg(vm, t);
1360 1.1 yamt bt->bt_size += t->bt_size;
1361 1.1 yamt bt->bt_start = t->bt_start;
1362 1.36.2.1 yamt SLIST_INSERT_HEAD(&tofree, t, bt_tmplist);
1363 1.1 yamt }
1364 1.1 yamt
1365 1.1 yamt t = CIRCLEQ_PREV(bt, bt_seglist);
1366 1.1 yamt KASSERT(t != NULL);
1367 1.1 yamt KASSERT(BT_ISSPAN_P(t) || t->bt_type == BT_TYPE_BUSY);
1368 1.1 yamt if (vm->vm_freefn != NULL && t->bt_type == BT_TYPE_SPAN &&
1369 1.1 yamt t->bt_size == bt->bt_size) {
1370 1.1 yamt vmem_addr_t spanaddr;
1371 1.1 yamt vmem_size_t spansize;
1372 1.1 yamt
1373 1.1 yamt KASSERT(t->bt_start == bt->bt_start);
1374 1.1 yamt spanaddr = bt->bt_start;
1375 1.1 yamt spansize = bt->bt_size;
1376 1.1 yamt bt_remseg(vm, bt);
1377 1.36.2.1 yamt SLIST_INSERT_HEAD(&tofree, bt, bt_tmplist);
1378 1.1 yamt bt_remseg(vm, t);
1379 1.36.2.1 yamt SLIST_INSERT_HEAD(&tofree, t, bt_tmplist);
1380 1.1 yamt VMEM_UNLOCK(vm);
1381 1.1 yamt (*vm->vm_freefn)(vm->vm_source, spanaddr, spansize);
1382 1.1 yamt } else {
1383 1.1 yamt bt_insfree(vm, bt);
1384 1.1 yamt VMEM_UNLOCK(vm);
1385 1.1 yamt }
1386 1.36.2.1 yamt while ((t = SLIST_FIRST(&tofree)) != NULL) {
1387 1.36.2.1 yamt SLIST_REMOVE_HEAD(&tofree, bt_tmplist);
1388 1.36.2.1 yamt bt_free(vm, t);
1389 1.36.2.1 yamt }
1390 1.1 yamt }
1391 1.1 yamt
1392 1.1 yamt /*
1393 1.1 yamt * vmem_add:
1394 1.1 yamt *
1395 1.1 yamt * => caller must ensure appropriate spl,
1396 1.1 yamt * if the arena can be accessed from interrupt context.
1397 1.1 yamt */
1398 1.1 yamt
1399 1.1 yamt vmem_addr_t
1400 1.1 yamt vmem_add(vmem_t *vm, vmem_addr_t addr, vmem_size_t size, vm_flag_t flags)
1401 1.1 yamt {
1402 1.1 yamt
1403 1.1 yamt return vmem_add1(vm, addr, size, flags, BT_TYPE_SPAN_STATIC);
1404 1.1 yamt }
1405 1.1 yamt
1406 1.6 yamt /*
1407 1.6 yamt * vmem_reap: reap unused resources.
1408 1.6 yamt *
1409 1.26 thorpej * => return true if we successfully reaped something.
1410 1.6 yamt */
1411 1.6 yamt
1412 1.25 thorpej bool
1413 1.6 yamt vmem_reap(vmem_t *vm)
1414 1.6 yamt {
1415 1.26 thorpej bool didsomething = false;
1416 1.6 yamt
1417 1.6 yamt #if defined(QCACHE)
1418 1.6 yamt didsomething = qc_reap(vm);
1419 1.6 yamt #endif /* defined(QCACHE) */
1420 1.6 yamt return didsomething;
1421 1.6 yamt }
1422 1.6 yamt
1423 1.30 yamt /* ---- rehash */
1424 1.30 yamt
1425 1.30 yamt #if defined(_KERNEL)
1426 1.30 yamt static struct callout vmem_rehash_ch;
1427 1.30 yamt static int vmem_rehash_interval;
1428 1.30 yamt static struct workqueue *vmem_rehash_wq;
1429 1.30 yamt static struct work vmem_rehash_wk;
1430 1.30 yamt
1431 1.30 yamt static void
1432 1.30 yamt vmem_rehash_all(struct work *wk, void *dummy)
1433 1.30 yamt {
1434 1.30 yamt vmem_t *vm;
1435 1.30 yamt
1436 1.30 yamt KASSERT(wk == &vmem_rehash_wk);
1437 1.30 yamt mutex_enter(&vmem_list_lock);
1438 1.30 yamt LIST_FOREACH(vm, &vmem_list, vm_alllist) {
1439 1.30 yamt size_t desired;
1440 1.30 yamt size_t current;
1441 1.30 yamt
1442 1.30 yamt if (!VMEM_TRYLOCK(vm)) {
1443 1.30 yamt continue;
1444 1.30 yamt }
1445 1.30 yamt desired = vm->vm_nbusytag;
1446 1.30 yamt current = vm->vm_hashsize;
1447 1.30 yamt VMEM_UNLOCK(vm);
1448 1.30 yamt
1449 1.30 yamt if (desired > VMEM_HASHSIZE_MAX) {
1450 1.30 yamt desired = VMEM_HASHSIZE_MAX;
1451 1.30 yamt } else if (desired < VMEM_HASHSIZE_MIN) {
1452 1.30 yamt desired = VMEM_HASHSIZE_MIN;
1453 1.30 yamt }
1454 1.30 yamt if (desired > current * 2 || desired * 2 < current) {
1455 1.30 yamt vmem_rehash(vm, desired, VM_NOSLEEP);
1456 1.30 yamt }
1457 1.30 yamt }
1458 1.30 yamt mutex_exit(&vmem_list_lock);
1459 1.30 yamt
1460 1.30 yamt callout_schedule(&vmem_rehash_ch, vmem_rehash_interval);
1461 1.30 yamt }
1462 1.30 yamt
1463 1.30 yamt static void
1464 1.30 yamt vmem_rehash_all_kick(void *dummy)
1465 1.30 yamt {
1466 1.30 yamt
1467 1.32 rmind workqueue_enqueue(vmem_rehash_wq, &vmem_rehash_wk, NULL);
1468 1.30 yamt }
1469 1.30 yamt
1470 1.30 yamt void
1471 1.30 yamt vmem_rehash_start(void)
1472 1.30 yamt {
1473 1.30 yamt int error;
1474 1.30 yamt
1475 1.30 yamt error = workqueue_create(&vmem_rehash_wq, "vmem_rehash",
1476 1.34 ad vmem_rehash_all, NULL, PRI_VM, IPL_SOFTCLOCK, 0);
1477 1.30 yamt if (error) {
1478 1.30 yamt panic("%s: workqueue_create %d\n", __func__, error);
1479 1.30 yamt }
1480 1.31 ad callout_init(&vmem_rehash_ch, 0);
1481 1.30 yamt callout_setfunc(&vmem_rehash_ch, vmem_rehash_all_kick, NULL);
1482 1.30 yamt
1483 1.30 yamt vmem_rehash_interval = hz * 10;
1484 1.30 yamt callout_schedule(&vmem_rehash_ch, vmem_rehash_interval);
1485 1.30 yamt }
1486 1.30 yamt #endif /* defined(_KERNEL) */
1487 1.30 yamt
1488 1.1 yamt /* ---- debug */
1489 1.1 yamt
1490 1.36.2.2 yamt #if defined(DDB)
1491 1.36.2.2 yamt static bt_t *
1492 1.36.2.2 yamt vmem_whatis_lookup(vmem_t *vm, uintptr_t addr)
1493 1.36.2.2 yamt {
1494 1.36.2.2 yamt int i;
1495 1.36.2.2 yamt
1496 1.36.2.2 yamt for (i = 0; i < vm->vm_hashsize; i++) {
1497 1.36.2.2 yamt bt_t *bt;
1498 1.36.2.2 yamt
1499 1.36.2.2 yamt LIST_FOREACH(bt, &vm->vm_hashlist[i], bt_hashlist) {
1500 1.36.2.2 yamt if (bt->bt_start <= addr && addr < BT_END(bt)) {
1501 1.36.2.2 yamt return bt;
1502 1.36.2.2 yamt }
1503 1.36.2.2 yamt }
1504 1.36.2.2 yamt }
1505 1.36.2.2 yamt
1506 1.36.2.2 yamt return NULL;
1507 1.36.2.2 yamt }
1508 1.36.2.2 yamt
1509 1.36.2.2 yamt void
1510 1.36.2.2 yamt vmem_whatis(uintptr_t addr, void (*pr)(const char *, ...))
1511 1.36.2.2 yamt {
1512 1.36.2.2 yamt vmem_t *vm;
1513 1.36.2.2 yamt
1514 1.36.2.2 yamt LIST_FOREACH(vm, &vmem_list, vm_alllist) {
1515 1.36.2.2 yamt bt_t *bt;
1516 1.36.2.2 yamt
1517 1.36.2.2 yamt bt = vmem_whatis_lookup(vm, addr);
1518 1.36.2.2 yamt if (bt == NULL) {
1519 1.36.2.2 yamt continue;
1520 1.36.2.2 yamt }
1521 1.36.2.2 yamt (*pr)("%p is %p+%zu from VMEM '%s'\n",
1522 1.36.2.2 yamt (void *)addr, (void *)bt->bt_start,
1523 1.36.2.2 yamt (size_t)(addr - bt->bt_start), vm->vm_name);
1524 1.36.2.2 yamt }
1525 1.36.2.2 yamt }
1526 1.36.2.2 yamt #endif /* defined(DDB) */
1527 1.36.2.2 yamt
1528 1.1 yamt #if defined(VMEM_DEBUG)
1529 1.1 yamt
1530 1.1 yamt #if !defined(_KERNEL)
1531 1.1 yamt #include <stdio.h>
1532 1.1 yamt #endif /* !defined(_KERNEL) */
1533 1.1 yamt
1534 1.1 yamt void bt_dump(const bt_t *);
1535 1.1 yamt
1536 1.1 yamt void
1537 1.1 yamt bt_dump(const bt_t *bt)
1538 1.1 yamt {
1539 1.1 yamt
1540 1.36.2.1 yamt printf("\t%p: %" PRIu64 "(0x%" PRIx64 "), %" PRIu64 "(0x%" PRIx64
1541 1.36.2.1 yamt "), %d\n",
1542 1.36.2.1 yamt bt,
1543 1.36.2.1 yamt (uint64_t)bt->bt_start, (uint64_t)bt->bt_start,
1544 1.36.2.1 yamt (uint64_t)bt->bt_size, (uint64_t)bt->bt_size,
1545 1.1 yamt bt->bt_type);
1546 1.1 yamt }
1547 1.1 yamt
1548 1.1 yamt void
1549 1.36.2.1 yamt vmem_dump_seglist(const vmem_t *vm)
1550 1.1 yamt {
1551 1.1 yamt const bt_t *bt;
1552 1.1 yamt
1553 1.36.2.1 yamt printf("vmem %p '%s' SEGLIST\n", vm, vm->vm_name);
1554 1.36.2.1 yamt
1555 1.1 yamt CIRCLEQ_FOREACH(bt, &vm->vm_seglist, bt_seglist) {
1556 1.1 yamt bt_dump(bt);
1557 1.1 yamt }
1558 1.36.2.1 yamt }
1559 1.36.2.1 yamt
1560 1.36.2.1 yamt void
1561 1.36.2.1 yamt vmem_dump_freelist(const vmem_t *vm)
1562 1.36.2.1 yamt {
1563 1.36.2.1 yamt const bt_t *bt;
1564 1.36.2.1 yamt int i;
1565 1.36.2.1 yamt
1566 1.36.2.1 yamt printf("vmem %p '%s' FREELIST\n", vm, vm->vm_name);
1567 1.1 yamt
1568 1.1 yamt for (i = 0; i < VMEM_MAXORDER; i++) {
1569 1.1 yamt const struct vmem_freelist *fl = &vm->vm_freelist[i];
1570 1.1 yamt
1571 1.1 yamt if (LIST_EMPTY(fl)) {
1572 1.1 yamt continue;
1573 1.1 yamt }
1574 1.1 yamt
1575 1.1 yamt printf("freelist[%d]\n", i);
1576 1.1 yamt LIST_FOREACH(bt, fl, bt_freelist) {
1577 1.1 yamt bt_dump(bt);
1578 1.1 yamt }
1579 1.1 yamt }
1580 1.1 yamt }
1581 1.1 yamt
1582 1.36.2.1 yamt #if defined(QCACHE)
1583 1.36.2.1 yamt void
1584 1.36.2.1 yamt vmem_dump_qc(const vmem_t *vm)
1585 1.36.2.1 yamt {
1586 1.36.2.1 yamt int qcache_idx_max = vm->vm_qcache_max >> vm->vm_quantum_shift;
1587 1.36.2.1 yamt int i;
1588 1.36.2.1 yamt const qcache_t *prevqc;
1589 1.36.2.1 yamt
1590 1.36.2.1 yamt printf("qcache_max=%zu\n", vm->vm_qcache_max);
1591 1.36.2.1 yamt
1592 1.36.2.1 yamt prevqc = NULL;
1593 1.36.2.1 yamt for (i = 0; i < qcache_idx_max; i++) {
1594 1.36.2.1 yamt const qcache_t *qc;
1595 1.36.2.1 yamt
1596 1.36.2.1 yamt qc = vm->vm_qcache[i];
1597 1.36.2.1 yamt if (prevqc != qc) {
1598 1.36.2.1 yamt printf("CACHE[%d] (%zu-) %p\n",
1599 1.36.2.1 yamt i, (size_t)i << vm->vm_quantum_shift, qc->qc_cache);
1600 1.36.2.1 yamt }
1601 1.36.2.1 yamt prevqc = qc;
1602 1.36.2.1 yamt }
1603 1.36.2.1 yamt }
1604 1.36.2.1 yamt #endif /* defined(QCACHE) */
1605 1.36.2.1 yamt
1606 1.36.2.1 yamt void
1607 1.36.2.1 yamt vmem_dump(const vmem_t *vm)
1608 1.36.2.1 yamt {
1609 1.36.2.1 yamt
1610 1.36.2.1 yamt printf("vmem %p '%s'\n", vm, vm->vm_name);
1611 1.36.2.1 yamt vmem_dump_seglist(vm);
1612 1.36.2.1 yamt vmem_dump_freelist(vm);
1613 1.36.2.1 yamt #if defined(QCACHE)
1614 1.36.2.1 yamt vmem_dump_qc(vm);
1615 1.36.2.1 yamt #endif /* defined(QCACHE) */
1616 1.36.2.1 yamt }
1617 1.36.2.1 yamt
1618 1.1 yamt #if !defined(_KERNEL)
1619 1.1 yamt
1620 1.1 yamt int
1621 1.1 yamt main()
1622 1.1 yamt {
1623 1.1 yamt vmem_t *vm;
1624 1.1 yamt vmem_addr_t p;
1625 1.1 yamt struct reg {
1626 1.1 yamt vmem_addr_t p;
1627 1.1 yamt vmem_size_t sz;
1628 1.25 thorpej bool x;
1629 1.1 yamt } *reg = NULL;
1630 1.1 yamt int nreg = 0;
1631 1.1 yamt int nalloc = 0;
1632 1.1 yamt int nfree = 0;
1633 1.1 yamt vmem_size_t total = 0;
1634 1.1 yamt #if 1
1635 1.1 yamt vm_flag_t strat = VM_INSTANTFIT;
1636 1.1 yamt #else
1637 1.1 yamt vm_flag_t strat = VM_BESTFIT;
1638 1.1 yamt #endif
1639 1.1 yamt
1640 1.1 yamt vm = vmem_create("test", VMEM_ADDR_NULL, 0, 1,
1641 1.30 yamt NULL, NULL, NULL, 0, VM_SLEEP);
1642 1.1 yamt if (vm == NULL) {
1643 1.1 yamt printf("vmem_create\n");
1644 1.1 yamt exit(EXIT_FAILURE);
1645 1.1 yamt }
1646 1.1 yamt vmem_dump(vm);
1647 1.1 yamt
1648 1.1 yamt p = vmem_add(vm, 100, 200, VM_SLEEP);
1649 1.1 yamt p = vmem_add(vm, 2000, 1, VM_SLEEP);
1650 1.1 yamt p = vmem_add(vm, 40000, 0x10000000>>12, VM_SLEEP);
1651 1.1 yamt p = vmem_add(vm, 10000, 10000, VM_SLEEP);
1652 1.1 yamt p = vmem_add(vm, 500, 1000, VM_SLEEP);
1653 1.1 yamt vmem_dump(vm);
1654 1.1 yamt for (;;) {
1655 1.1 yamt struct reg *r;
1656 1.10 yamt int t = rand() % 100;
1657 1.1 yamt
1658 1.10 yamt if (t > 45) {
1659 1.10 yamt /* alloc */
1660 1.1 yamt vmem_size_t sz = rand() % 500 + 1;
1661 1.25 thorpej bool x;
1662 1.10 yamt vmem_size_t align, phase, nocross;
1663 1.10 yamt vmem_addr_t minaddr, maxaddr;
1664 1.10 yamt
1665 1.10 yamt if (t > 70) {
1666 1.26 thorpej x = true;
1667 1.10 yamt /* XXX */
1668 1.10 yamt align = 1 << (rand() % 15);
1669 1.10 yamt phase = rand() % 65536;
1670 1.10 yamt nocross = 1 << (rand() % 15);
1671 1.10 yamt if (align <= phase) {
1672 1.10 yamt phase = 0;
1673 1.10 yamt }
1674 1.19 yamt if (VMEM_CROSS_P(phase, phase + sz - 1,
1675 1.19 yamt nocross)) {
1676 1.10 yamt nocross = 0;
1677 1.10 yamt }
1678 1.10 yamt minaddr = rand() % 50000;
1679 1.10 yamt maxaddr = rand() % 70000;
1680 1.10 yamt if (minaddr > maxaddr) {
1681 1.10 yamt minaddr = 0;
1682 1.10 yamt maxaddr = 0;
1683 1.10 yamt }
1684 1.10 yamt printf("=== xalloc %" PRIu64
1685 1.10 yamt " align=%" PRIu64 ", phase=%" PRIu64
1686 1.10 yamt ", nocross=%" PRIu64 ", min=%" PRIu64
1687 1.10 yamt ", max=%" PRIu64 "\n",
1688 1.10 yamt (uint64_t)sz,
1689 1.10 yamt (uint64_t)align,
1690 1.10 yamt (uint64_t)phase,
1691 1.10 yamt (uint64_t)nocross,
1692 1.10 yamt (uint64_t)minaddr,
1693 1.10 yamt (uint64_t)maxaddr);
1694 1.10 yamt p = vmem_xalloc(vm, sz, align, phase, nocross,
1695 1.10 yamt minaddr, maxaddr, strat|VM_SLEEP);
1696 1.10 yamt } else {
1697 1.26 thorpej x = false;
1698 1.10 yamt printf("=== alloc %" PRIu64 "\n", (uint64_t)sz);
1699 1.10 yamt p = vmem_alloc(vm, sz, strat|VM_SLEEP);
1700 1.10 yamt }
1701 1.1 yamt printf("-> %" PRIu64 "\n", (uint64_t)p);
1702 1.1 yamt vmem_dump(vm);
1703 1.1 yamt if (p == VMEM_ADDR_NULL) {
1704 1.10 yamt if (x) {
1705 1.10 yamt continue;
1706 1.10 yamt }
1707 1.1 yamt break;
1708 1.1 yamt }
1709 1.1 yamt nreg++;
1710 1.1 yamt reg = realloc(reg, sizeof(*reg) * nreg);
1711 1.1 yamt r = ®[nreg - 1];
1712 1.1 yamt r->p = p;
1713 1.1 yamt r->sz = sz;
1714 1.10 yamt r->x = x;
1715 1.1 yamt total += sz;
1716 1.1 yamt nalloc++;
1717 1.1 yamt } else if (nreg != 0) {
1718 1.10 yamt /* free */
1719 1.1 yamt r = ®[rand() % nreg];
1720 1.1 yamt printf("=== free %" PRIu64 ", %" PRIu64 "\n",
1721 1.1 yamt (uint64_t)r->p, (uint64_t)r->sz);
1722 1.10 yamt if (r->x) {
1723 1.10 yamt vmem_xfree(vm, r->p, r->sz);
1724 1.10 yamt } else {
1725 1.10 yamt vmem_free(vm, r->p, r->sz);
1726 1.10 yamt }
1727 1.1 yamt total -= r->sz;
1728 1.1 yamt vmem_dump(vm);
1729 1.1 yamt *r = reg[nreg - 1];
1730 1.1 yamt nreg--;
1731 1.1 yamt nfree++;
1732 1.1 yamt }
1733 1.1 yamt printf("total=%" PRIu64 "\n", (uint64_t)total);
1734 1.1 yamt }
1735 1.1 yamt fprintf(stderr, "total=%" PRIu64 ", nalloc=%d, nfree=%d\n",
1736 1.1 yamt (uint64_t)total, nalloc, nfree);
1737 1.1 yamt exit(EXIT_SUCCESS);
1738 1.1 yamt }
1739 1.1 yamt #endif /* !defined(_KERNEL) */
1740 1.1 yamt #endif /* defined(VMEM_DEBUG) */
1741