subr_vmem.c revision 1.75 1 1.75 para /* $NetBSD: subr_vmem.c,v 1.75 2012/09/01 12:28:58 para Exp $ */
2 1.1 yamt
3 1.1 yamt /*-
4 1.55 yamt * Copyright (c)2006,2007,2008,2009 YAMAMOTO Takashi,
5 1.1 yamt * All rights reserved.
6 1.1 yamt *
7 1.1 yamt * Redistribution and use in source and binary forms, with or without
8 1.1 yamt * modification, are permitted provided that the following conditions
9 1.1 yamt * are met:
10 1.1 yamt * 1. Redistributions of source code must retain the above copyright
11 1.1 yamt * notice, this list of conditions and the following disclaimer.
12 1.1 yamt * 2. Redistributions in binary form must reproduce the above copyright
13 1.1 yamt * notice, this list of conditions and the following disclaimer in the
14 1.1 yamt * documentation and/or other materials provided with the distribution.
15 1.1 yamt *
16 1.1 yamt * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
17 1.1 yamt * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
18 1.1 yamt * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
19 1.1 yamt * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
20 1.1 yamt * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
21 1.1 yamt * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
22 1.1 yamt * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
23 1.1 yamt * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
24 1.1 yamt * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
25 1.1 yamt * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
26 1.1 yamt * SUCH DAMAGE.
27 1.1 yamt */
28 1.1 yamt
29 1.1 yamt /*
30 1.1 yamt * reference:
31 1.1 yamt * - Magazines and Vmem: Extending the Slab Allocator
32 1.1 yamt * to Many CPUs and Arbitrary Resources
33 1.1 yamt * http://www.usenix.org/event/usenix01/bonwick.html
34 1.1 yamt */
35 1.1 yamt
36 1.1 yamt #include <sys/cdefs.h>
37 1.75 para __KERNEL_RCSID(0, "$NetBSD: subr_vmem.c,v 1.75 2012/09/01 12:28:58 para Exp $");
38 1.1 yamt
39 1.5 yamt #if defined(_KERNEL)
40 1.37 yamt #include "opt_ddb.h"
41 1.5 yamt #define QCACHE
42 1.5 yamt #endif /* defined(_KERNEL) */
43 1.1 yamt
44 1.1 yamt #include <sys/param.h>
45 1.1 yamt #include <sys/hash.h>
46 1.1 yamt #include <sys/queue.h>
47 1.62 rmind #include <sys/bitops.h>
48 1.1 yamt
49 1.1 yamt #if defined(_KERNEL)
50 1.1 yamt #include <sys/systm.h>
51 1.30 yamt #include <sys/kernel.h> /* hz */
52 1.30 yamt #include <sys/callout.h>
53 1.66 para #include <sys/kmem.h>
54 1.1 yamt #include <sys/pool.h>
55 1.1 yamt #include <sys/vmem.h>
56 1.30 yamt #include <sys/workqueue.h>
57 1.66 para #include <sys/atomic.h>
58 1.66 para #include <uvm/uvm.h>
59 1.66 para #include <uvm/uvm_extern.h>
60 1.66 para #include <uvm/uvm_km.h>
61 1.66 para #include <uvm/uvm_page.h>
62 1.66 para #include <uvm/uvm_pdaemon.h>
63 1.1 yamt #else /* defined(_KERNEL) */
64 1.1 yamt #include "../sys/vmem.h"
65 1.1 yamt #endif /* defined(_KERNEL) */
66 1.1 yamt
67 1.66 para
68 1.1 yamt #if defined(_KERNEL)
69 1.66 para #include <sys/evcnt.h>
70 1.66 para #define VMEM_EVCNT_DEFINE(name) \
71 1.66 para struct evcnt vmem_evcnt_##name = EVCNT_INITIALIZER(EVCNT_TYPE_MISC, NULL, \
72 1.66 para "vmemev", #name); \
73 1.66 para EVCNT_ATTACH_STATIC(vmem_evcnt_##name);
74 1.66 para #define VMEM_EVCNT_INCR(ev) vmem_evcnt_##ev.ev_count++
75 1.66 para #define VMEM_EVCNT_DECR(ev) vmem_evcnt_##ev.ev_count--
76 1.66 para
77 1.66 para VMEM_EVCNT_DEFINE(bt_pages)
78 1.66 para VMEM_EVCNT_DEFINE(bt_count)
79 1.66 para VMEM_EVCNT_DEFINE(bt_inuse)
80 1.66 para
81 1.52 ad #define LOCK_DECL(name) \
82 1.52 ad kmutex_t name; char lockpad[COHERENCY_UNIT - sizeof(kmutex_t)]
83 1.66 para
84 1.66 para #define CONDVAR_DECL(name) \
85 1.74 njoly kcondvar_t name
86 1.66 para
87 1.1 yamt #else /* defined(_KERNEL) */
88 1.67 rmind #include <stdio.h>
89 1.1 yamt #include <errno.h>
90 1.1 yamt #include <assert.h>
91 1.1 yamt #include <stdlib.h>
92 1.64 yamt #include <string.h>
93 1.1 yamt
94 1.66 para #define VMEM_EVCNT_INCR(ev) /* nothing */
95 1.66 para #define VMEM_EVCNT_DECR(ev) /* nothing */
96 1.66 para
97 1.55 yamt #define UNITTEST
98 1.1 yamt #define KASSERT(a) assert(a)
99 1.31 ad #define LOCK_DECL(name) /* nothing */
100 1.66 para #define CONDVAR_DECL(name) /* nothing */
101 1.66 para #define VMEM_CONDVAR_INIT(vm, wchan) /* nothing */
102 1.69 rmind #define VMEM_CONDVAR_BROADCAST(vm) /* nothing */
103 1.31 ad #define mutex_init(a, b, c) /* nothing */
104 1.31 ad #define mutex_destroy(a) /* nothing */
105 1.31 ad #define mutex_enter(a) /* nothing */
106 1.55 yamt #define mutex_tryenter(a) true
107 1.31 ad #define mutex_exit(a) /* nothing */
108 1.31 ad #define mutex_owned(a) /* nothing */
109 1.55 yamt #define ASSERT_SLEEPABLE() /* nothing */
110 1.55 yamt #define panic(...) printf(__VA_ARGS__); abort()
111 1.1 yamt #endif /* defined(_KERNEL) */
112 1.1 yamt
113 1.1 yamt struct vmem;
114 1.1 yamt struct vmem_btag;
115 1.1 yamt
116 1.55 yamt #if defined(VMEM_SANITY)
117 1.55 yamt static void vmem_check(vmem_t *);
118 1.55 yamt #else /* defined(VMEM_SANITY) */
119 1.55 yamt #define vmem_check(vm) /* nothing */
120 1.55 yamt #endif /* defined(VMEM_SANITY) */
121 1.1 yamt
122 1.4 yamt #define VMEM_MAXORDER (sizeof(vmem_size_t) * CHAR_BIT)
123 1.30 yamt
124 1.30 yamt #define VMEM_HASHSIZE_MIN 1 /* XXX */
125 1.54 yamt #define VMEM_HASHSIZE_MAX 65536 /* XXX */
126 1.66 para #define VMEM_HASHSIZE_INIT 1
127 1.1 yamt
128 1.1 yamt #define VM_FITMASK (VM_BESTFIT | VM_INSTANTFIT)
129 1.1 yamt
130 1.1 yamt CIRCLEQ_HEAD(vmem_seglist, vmem_btag);
131 1.1 yamt LIST_HEAD(vmem_freelist, vmem_btag);
132 1.1 yamt LIST_HEAD(vmem_hashlist, vmem_btag);
133 1.1 yamt
134 1.5 yamt #if defined(QCACHE)
135 1.5 yamt #define VMEM_QCACHE_IDX_MAX 32
136 1.5 yamt
137 1.5 yamt #define QC_NAME_MAX 16
138 1.5 yamt
139 1.5 yamt struct qcache {
140 1.35 ad pool_cache_t qc_cache;
141 1.5 yamt vmem_t *qc_vmem;
142 1.5 yamt char qc_name[QC_NAME_MAX];
143 1.5 yamt };
144 1.5 yamt typedef struct qcache qcache_t;
145 1.35 ad #define QC_POOL_TO_QCACHE(pool) ((qcache_t *)(pool->pr_qcache))
146 1.5 yamt #endif /* defined(QCACHE) */
147 1.5 yamt
148 1.64 yamt #define VMEM_NAME_MAX 16
149 1.64 yamt
150 1.1 yamt /* vmem arena */
151 1.1 yamt struct vmem {
152 1.66 para CONDVAR_DECL(vm_cv);
153 1.31 ad LOCK_DECL(vm_lock);
154 1.66 para vm_flag_t vm_flags;
155 1.66 para vmem_import_t *vm_importfn;
156 1.66 para vmem_release_t *vm_releasefn;
157 1.66 para size_t vm_nfreetags;
158 1.66 para LIST_HEAD(, vmem_btag) vm_freetags;
159 1.61 dyoung void *vm_arg;
160 1.1 yamt struct vmem_seglist vm_seglist;
161 1.1 yamt struct vmem_freelist vm_freelist[VMEM_MAXORDER];
162 1.1 yamt size_t vm_hashsize;
163 1.1 yamt size_t vm_nbusytag;
164 1.1 yamt struct vmem_hashlist *vm_hashlist;
165 1.66 para struct vmem_hashlist vm_hash0;
166 1.1 yamt size_t vm_quantum_mask;
167 1.1 yamt int vm_quantum_shift;
168 1.66 para size_t vm_size;
169 1.66 para size_t vm_inuse;
170 1.64 yamt char vm_name[VMEM_NAME_MAX+1];
171 1.30 yamt LIST_ENTRY(vmem) vm_alllist;
172 1.5 yamt
173 1.5 yamt #if defined(QCACHE)
174 1.5 yamt /* quantum cache */
175 1.5 yamt size_t vm_qcache_max;
176 1.5 yamt struct pool_allocator vm_qcache_allocator;
177 1.22 yamt qcache_t vm_qcache_store[VMEM_QCACHE_IDX_MAX];
178 1.22 yamt qcache_t *vm_qcache[VMEM_QCACHE_IDX_MAX];
179 1.5 yamt #endif /* defined(QCACHE) */
180 1.1 yamt };
181 1.1 yamt
182 1.31 ad #define VMEM_LOCK(vm) mutex_enter(&vm->vm_lock)
183 1.31 ad #define VMEM_TRYLOCK(vm) mutex_tryenter(&vm->vm_lock)
184 1.31 ad #define VMEM_UNLOCK(vm) mutex_exit(&vm->vm_lock)
185 1.36 ad #define VMEM_LOCK_INIT(vm, ipl) mutex_init(&vm->vm_lock, MUTEX_DEFAULT, ipl)
186 1.31 ad #define VMEM_LOCK_DESTROY(vm) mutex_destroy(&vm->vm_lock)
187 1.31 ad #define VMEM_ASSERT_LOCKED(vm) KASSERT(mutex_owned(&vm->vm_lock))
188 1.1 yamt
189 1.66 para #if defined(_KERNEL)
190 1.66 para #define VMEM_CONDVAR_INIT(vm, wchan) cv_init(&vm->vm_cv, wchan)
191 1.66 para #define VMEM_CONDVAR_DESTROY(vm) cv_destroy(&vm->vm_cv)
192 1.66 para #define VMEM_CONDVAR_WAIT(vm) cv_wait(&vm->vm_cv, &vm->vm_lock)
193 1.66 para #define VMEM_CONDVAR_BROADCAST(vm) cv_broadcast(&vm->vm_cv)
194 1.66 para #endif /* defined(_KERNEL) */
195 1.66 para
196 1.1 yamt /* boundary tag */
197 1.1 yamt struct vmem_btag {
198 1.1 yamt CIRCLEQ_ENTRY(vmem_btag) bt_seglist;
199 1.1 yamt union {
200 1.1 yamt LIST_ENTRY(vmem_btag) u_freelist; /* BT_TYPE_FREE */
201 1.1 yamt LIST_ENTRY(vmem_btag) u_hashlist; /* BT_TYPE_BUSY */
202 1.1 yamt } bt_u;
203 1.1 yamt #define bt_hashlist bt_u.u_hashlist
204 1.1 yamt #define bt_freelist bt_u.u_freelist
205 1.1 yamt vmem_addr_t bt_start;
206 1.1 yamt vmem_size_t bt_size;
207 1.1 yamt int bt_type;
208 1.1 yamt };
209 1.1 yamt
210 1.1 yamt #define BT_TYPE_SPAN 1
211 1.1 yamt #define BT_TYPE_SPAN_STATIC 2
212 1.1 yamt #define BT_TYPE_FREE 3
213 1.1 yamt #define BT_TYPE_BUSY 4
214 1.1 yamt #define BT_ISSPAN_P(bt) ((bt)->bt_type <= BT_TYPE_SPAN_STATIC)
215 1.1 yamt
216 1.60 dyoung #define BT_END(bt) ((bt)->bt_start + (bt)->bt_size - 1)
217 1.1 yamt
218 1.1 yamt typedef struct vmem_btag bt_t;
219 1.1 yamt
220 1.66 para #if defined(_KERNEL)
221 1.66 para static kmutex_t vmem_list_lock;
222 1.66 para static LIST_HEAD(, vmem) vmem_list = LIST_HEAD_INITIALIZER(vmem_list);
223 1.66 para #endif /* defined(_KERNEL) */
224 1.66 para
225 1.1 yamt /* ---- misc */
226 1.1 yamt
227 1.19 yamt #define VMEM_ALIGNUP(addr, align) \
228 1.19 yamt (-(-(addr) & -(align)))
229 1.62 rmind
230 1.19 yamt #define VMEM_CROSS_P(addr1, addr2, boundary) \
231 1.19 yamt ((((addr1) ^ (addr2)) & -(boundary)) != 0)
232 1.19 yamt
233 1.4 yamt #define ORDER2SIZE(order) ((vmem_size_t)1 << (order))
234 1.62 rmind #define SIZE2ORDER(size) ((int)ilog2(size))
235 1.4 yamt
236 1.62 rmind #if !defined(_KERNEL)
237 1.62 rmind #define xmalloc(sz, flags) malloc(sz)
238 1.67 rmind #define xfree(p, sz) free(p)
239 1.62 rmind #define bt_alloc(vm, flags) malloc(sizeof(bt_t))
240 1.62 rmind #define bt_free(vm, bt) free(bt)
241 1.66 para #else /* defined(_KERNEL) */
242 1.1 yamt
243 1.67 rmind #define xmalloc(sz, flags) \
244 1.75 para kmem_intr_alloc(sz, ((flags) & VM_SLEEP) ? KM_SLEEP : KM_NOSLEEP);
245 1.75 para #define xfree(p, sz) kmem_intr_free(p, sz);
246 1.66 para
247 1.75 para /*
248 1.75 para * Memory for arenas initialized during bootstrap.
249 1.75 para * There is memory for STATIC_VMEM_COUNT bootstrap arenas.
250 1.75 para *
251 1.75 para * BT_RESERVE calculation:
252 1.75 para * we allocate memory for boundry tags with vmem, therefor we have
253 1.75 para * to keep a reserve of bts used to allocated memory for bts.
254 1.75 para * This reserve is 4 for each arena involved in allocating vmems memory.
255 1.75 para * BT_MAXFREE: don't cache excessive counts of bts in arenas
256 1.75 para */
257 1.75 para #define STATIC_VMEM_COUNT 4
258 1.75 para #define STATIC_BT_COUNT 200
259 1.75 para #define BT_MINRESERVE 4
260 1.66 para #define BT_MAXFREE 64
261 1.73 para /* must be equal or greater then qcache multiplier for kmem_va_arena */
262 1.73 para #define STATIC_QC_POOL_COUNT 8
263 1.66 para
264 1.66 para static struct vmem static_vmems[STATIC_VMEM_COUNT];
265 1.66 para static int static_vmem_count = STATIC_VMEM_COUNT;
266 1.66 para
267 1.66 para static struct vmem_btag static_bts[STATIC_BT_COUNT];
268 1.66 para static int static_bt_count = STATIC_BT_COUNT;
269 1.66 para
270 1.66 para static struct pool_cache static_qc_pools[STATIC_QC_POOL_COUNT];
271 1.66 para static int static_qc_pool_count = STATIC_QC_POOL_COUNT;
272 1.66 para
273 1.66 para vmem_t *kmem_va_meta_arena;
274 1.66 para vmem_t *kmem_meta_arena;
275 1.66 para
276 1.66 para static kmutex_t vmem_btag_lock;
277 1.66 para static LIST_HEAD(, vmem_btag) vmem_btag_freelist;
278 1.66 para static size_t vmem_btag_freelist_count = 0;
279 1.66 para static size_t vmem_btag_count = STATIC_BT_COUNT;
280 1.66 para
281 1.1 yamt /* ---- boundary tag */
282 1.1 yamt
283 1.67 rmind #define BT_PER_PAGE (PAGE_SIZE / sizeof(bt_t))
284 1.66 para
285 1.66 para static int bt_refill(vmem_t *vm, vm_flag_t flags);
286 1.66 para
287 1.66 para static int
288 1.66 para bt_refillglobal(vm_flag_t flags)
289 1.66 para {
290 1.66 para vmem_addr_t va;
291 1.66 para bt_t *btp;
292 1.66 para bt_t *bt;
293 1.66 para int i;
294 1.66 para
295 1.66 para mutex_enter(&vmem_btag_lock);
296 1.75 para if (vmem_btag_freelist_count > 0) {
297 1.66 para mutex_exit(&vmem_btag_lock);
298 1.66 para return 0;
299 1.66 para }
300 1.66 para
301 1.66 para if (vmem_alloc(kmem_meta_arena, PAGE_SIZE,
302 1.66 para (flags & ~VM_FITMASK) | VM_INSTANTFIT | VM_POPULATING, &va) != 0) {
303 1.66 para mutex_exit(&vmem_btag_lock);
304 1.66 para return ENOMEM;
305 1.66 para }
306 1.66 para VMEM_EVCNT_INCR(bt_pages);
307 1.66 para
308 1.66 para btp = (void *) va;
309 1.66 para for (i = 0; i < (BT_PER_PAGE); i++) {
310 1.66 para bt = btp;
311 1.66 para memset(bt, 0, sizeof(*bt));
312 1.66 para LIST_INSERT_HEAD(&vmem_btag_freelist, bt,
313 1.66 para bt_freelist);
314 1.66 para vmem_btag_freelist_count++;
315 1.66 para vmem_btag_count++;
316 1.66 para VMEM_EVCNT_INCR(bt_count);
317 1.66 para btp++;
318 1.66 para }
319 1.66 para mutex_exit(&vmem_btag_lock);
320 1.66 para
321 1.66 para bt_refill(kmem_arena, (flags & ~VM_FITMASK) | VM_INSTANTFIT);
322 1.66 para bt_refill(kmem_va_meta_arena, (flags & ~VM_FITMASK) | VM_INSTANTFIT);
323 1.66 para bt_refill(kmem_meta_arena, (flags & ~VM_FITMASK) | VM_INSTANTFIT);
324 1.66 para
325 1.66 para return 0;
326 1.66 para }
327 1.66 para
328 1.66 para static int
329 1.66 para bt_refill(vmem_t *vm, vm_flag_t flags)
330 1.66 para {
331 1.66 para bt_t *bt;
332 1.66 para
333 1.66 para bt_refillglobal(flags);
334 1.66 para
335 1.66 para VMEM_LOCK(vm);
336 1.66 para mutex_enter(&vmem_btag_lock);
337 1.66 para while (!LIST_EMPTY(&vmem_btag_freelist) &&
338 1.75 para vm->vm_nfreetags <= BT_MINRESERVE) {
339 1.66 para bt = LIST_FIRST(&vmem_btag_freelist);
340 1.66 para LIST_REMOVE(bt, bt_freelist);
341 1.66 para LIST_INSERT_HEAD(&vm->vm_freetags, bt, bt_freelist);
342 1.66 para vm->vm_nfreetags++;
343 1.66 para vmem_btag_freelist_count--;
344 1.66 para }
345 1.66 para mutex_exit(&vmem_btag_lock);
346 1.66 para
347 1.66 para if (vm->vm_nfreetags == 0) {
348 1.66 para VMEM_UNLOCK(vm);
349 1.66 para return ENOMEM;
350 1.66 para }
351 1.66 para VMEM_UNLOCK(vm);
352 1.66 para
353 1.66 para return 0;
354 1.66 para }
355 1.1 yamt
356 1.62 rmind static inline bt_t *
357 1.17 yamt bt_alloc(vmem_t *vm, vm_flag_t flags)
358 1.1 yamt {
359 1.66 para bt_t *bt;
360 1.66 para again:
361 1.66 para VMEM_LOCK(vm);
362 1.75 para if (vm->vm_nfreetags <= BT_MINRESERVE &&
363 1.66 para (flags & VM_POPULATING) == 0) {
364 1.66 para VMEM_UNLOCK(vm);
365 1.66 para if (bt_refill(vm, VM_NOSLEEP | VM_INSTANTFIT)) {
366 1.66 para return NULL;
367 1.66 para }
368 1.66 para goto again;
369 1.66 para }
370 1.66 para bt = LIST_FIRST(&vm->vm_freetags);
371 1.66 para LIST_REMOVE(bt, bt_freelist);
372 1.66 para vm->vm_nfreetags--;
373 1.66 para VMEM_UNLOCK(vm);
374 1.66 para VMEM_EVCNT_INCR(bt_inuse);
375 1.66 para
376 1.66 para return bt;
377 1.1 yamt }
378 1.1 yamt
379 1.62 rmind static inline void
380 1.17 yamt bt_free(vmem_t *vm, bt_t *bt)
381 1.1 yamt {
382 1.66 para
383 1.66 para VMEM_LOCK(vm);
384 1.66 para LIST_INSERT_HEAD(&vm->vm_freetags, bt, bt_freelist);
385 1.66 para vm->vm_nfreetags++;
386 1.66 para while (vm->vm_nfreetags > BT_MAXFREE) {
387 1.66 para bt = LIST_FIRST(&vm->vm_freetags);
388 1.66 para LIST_REMOVE(bt, bt_freelist);
389 1.66 para vm->vm_nfreetags--;
390 1.66 para mutex_enter(&vmem_btag_lock);
391 1.66 para LIST_INSERT_HEAD(&vmem_btag_freelist, bt, bt_freelist);
392 1.66 para vmem_btag_freelist_count++;
393 1.66 para mutex_exit(&vmem_btag_lock);
394 1.66 para }
395 1.66 para VMEM_UNLOCK(vm);
396 1.66 para VMEM_EVCNT_DECR(bt_inuse);
397 1.1 yamt }
398 1.1 yamt
399 1.67 rmind #endif /* defined(_KERNEL) */
400 1.62 rmind
401 1.1 yamt /*
402 1.67 rmind * freelist[0] ... [1, 1]
403 1.1 yamt * freelist[1] ... [2, 3]
404 1.1 yamt * freelist[2] ... [4, 7]
405 1.1 yamt * freelist[3] ... [8, 15]
406 1.1 yamt * :
407 1.1 yamt * freelist[n] ... [(1 << n), (1 << (n + 1)) - 1]
408 1.1 yamt * :
409 1.1 yamt */
410 1.1 yamt
411 1.1 yamt static struct vmem_freelist *
412 1.1 yamt bt_freehead_tofree(vmem_t *vm, vmem_size_t size)
413 1.1 yamt {
414 1.1 yamt const vmem_size_t qsize = size >> vm->vm_quantum_shift;
415 1.62 rmind const int idx = SIZE2ORDER(qsize);
416 1.1 yamt
417 1.62 rmind KASSERT(size != 0 && qsize != 0);
418 1.1 yamt KASSERT((size & vm->vm_quantum_mask) == 0);
419 1.1 yamt KASSERT(idx >= 0);
420 1.1 yamt KASSERT(idx < VMEM_MAXORDER);
421 1.1 yamt
422 1.1 yamt return &vm->vm_freelist[idx];
423 1.1 yamt }
424 1.1 yamt
425 1.59 yamt /*
426 1.59 yamt * bt_freehead_toalloc: return the freelist for the given size and allocation
427 1.59 yamt * strategy.
428 1.59 yamt *
429 1.59 yamt * for VM_INSTANTFIT, return the list in which any blocks are large enough
430 1.59 yamt * for the requested size. otherwise, return the list which can have blocks
431 1.59 yamt * large enough for the requested size.
432 1.59 yamt */
433 1.59 yamt
434 1.1 yamt static struct vmem_freelist *
435 1.1 yamt bt_freehead_toalloc(vmem_t *vm, vmem_size_t size, vm_flag_t strat)
436 1.1 yamt {
437 1.1 yamt const vmem_size_t qsize = size >> vm->vm_quantum_shift;
438 1.62 rmind int idx = SIZE2ORDER(qsize);
439 1.1 yamt
440 1.62 rmind KASSERT(size != 0 && qsize != 0);
441 1.1 yamt KASSERT((size & vm->vm_quantum_mask) == 0);
442 1.1 yamt
443 1.4 yamt if (strat == VM_INSTANTFIT && ORDER2SIZE(idx) != qsize) {
444 1.1 yamt idx++;
445 1.1 yamt /* check too large request? */
446 1.1 yamt }
447 1.1 yamt KASSERT(idx >= 0);
448 1.1 yamt KASSERT(idx < VMEM_MAXORDER);
449 1.1 yamt
450 1.1 yamt return &vm->vm_freelist[idx];
451 1.1 yamt }
452 1.1 yamt
453 1.1 yamt /* ---- boundary tag hash */
454 1.1 yamt
455 1.1 yamt static struct vmem_hashlist *
456 1.1 yamt bt_hashhead(vmem_t *vm, vmem_addr_t addr)
457 1.1 yamt {
458 1.1 yamt struct vmem_hashlist *list;
459 1.1 yamt unsigned int hash;
460 1.1 yamt
461 1.1 yamt hash = hash32_buf(&addr, sizeof(addr), HASH32_BUF_INIT);
462 1.1 yamt list = &vm->vm_hashlist[hash % vm->vm_hashsize];
463 1.1 yamt
464 1.1 yamt return list;
465 1.1 yamt }
466 1.1 yamt
467 1.1 yamt static bt_t *
468 1.1 yamt bt_lookupbusy(vmem_t *vm, vmem_addr_t addr)
469 1.1 yamt {
470 1.1 yamt struct vmem_hashlist *list;
471 1.1 yamt bt_t *bt;
472 1.1 yamt
473 1.1 yamt list = bt_hashhead(vm, addr);
474 1.1 yamt LIST_FOREACH(bt, list, bt_hashlist) {
475 1.1 yamt if (bt->bt_start == addr) {
476 1.1 yamt break;
477 1.1 yamt }
478 1.1 yamt }
479 1.1 yamt
480 1.1 yamt return bt;
481 1.1 yamt }
482 1.1 yamt
483 1.1 yamt static void
484 1.1 yamt bt_rembusy(vmem_t *vm, bt_t *bt)
485 1.1 yamt {
486 1.1 yamt
487 1.1 yamt KASSERT(vm->vm_nbusytag > 0);
488 1.73 para vm->vm_inuse -= bt->bt_size;
489 1.1 yamt vm->vm_nbusytag--;
490 1.1 yamt LIST_REMOVE(bt, bt_hashlist);
491 1.1 yamt }
492 1.1 yamt
493 1.1 yamt static void
494 1.1 yamt bt_insbusy(vmem_t *vm, bt_t *bt)
495 1.1 yamt {
496 1.1 yamt struct vmem_hashlist *list;
497 1.1 yamt
498 1.1 yamt KASSERT(bt->bt_type == BT_TYPE_BUSY);
499 1.1 yamt
500 1.1 yamt list = bt_hashhead(vm, bt->bt_start);
501 1.1 yamt LIST_INSERT_HEAD(list, bt, bt_hashlist);
502 1.1 yamt vm->vm_nbusytag++;
503 1.73 para vm->vm_inuse += bt->bt_size;
504 1.1 yamt }
505 1.1 yamt
506 1.1 yamt /* ---- boundary tag list */
507 1.1 yamt
508 1.1 yamt static void
509 1.1 yamt bt_remseg(vmem_t *vm, bt_t *bt)
510 1.1 yamt {
511 1.1 yamt
512 1.1 yamt CIRCLEQ_REMOVE(&vm->vm_seglist, bt, bt_seglist);
513 1.1 yamt }
514 1.1 yamt
515 1.1 yamt static void
516 1.1 yamt bt_insseg(vmem_t *vm, bt_t *bt, bt_t *prev)
517 1.1 yamt {
518 1.1 yamt
519 1.1 yamt CIRCLEQ_INSERT_AFTER(&vm->vm_seglist, prev, bt, bt_seglist);
520 1.1 yamt }
521 1.1 yamt
522 1.1 yamt static void
523 1.1 yamt bt_insseg_tail(vmem_t *vm, bt_t *bt)
524 1.1 yamt {
525 1.1 yamt
526 1.1 yamt CIRCLEQ_INSERT_TAIL(&vm->vm_seglist, bt, bt_seglist);
527 1.1 yamt }
528 1.1 yamt
529 1.1 yamt static void
530 1.17 yamt bt_remfree(vmem_t *vm, bt_t *bt)
531 1.1 yamt {
532 1.1 yamt
533 1.1 yamt KASSERT(bt->bt_type == BT_TYPE_FREE);
534 1.1 yamt
535 1.1 yamt LIST_REMOVE(bt, bt_freelist);
536 1.1 yamt }
537 1.1 yamt
538 1.1 yamt static void
539 1.1 yamt bt_insfree(vmem_t *vm, bt_t *bt)
540 1.1 yamt {
541 1.1 yamt struct vmem_freelist *list;
542 1.1 yamt
543 1.1 yamt list = bt_freehead_tofree(vm, bt->bt_size);
544 1.1 yamt LIST_INSERT_HEAD(list, bt, bt_freelist);
545 1.1 yamt }
546 1.1 yamt
547 1.1 yamt /* ---- vmem internal functions */
548 1.1 yamt
549 1.5 yamt #if defined(QCACHE)
550 1.5 yamt static inline vm_flag_t
551 1.5 yamt prf_to_vmf(int prflags)
552 1.5 yamt {
553 1.5 yamt vm_flag_t vmflags;
554 1.5 yamt
555 1.5 yamt KASSERT((prflags & ~(PR_LIMITFAIL | PR_WAITOK | PR_NOWAIT)) == 0);
556 1.5 yamt if ((prflags & PR_WAITOK) != 0) {
557 1.5 yamt vmflags = VM_SLEEP;
558 1.5 yamt } else {
559 1.5 yamt vmflags = VM_NOSLEEP;
560 1.5 yamt }
561 1.5 yamt return vmflags;
562 1.5 yamt }
563 1.5 yamt
564 1.5 yamt static inline int
565 1.5 yamt vmf_to_prf(vm_flag_t vmflags)
566 1.5 yamt {
567 1.5 yamt int prflags;
568 1.5 yamt
569 1.7 yamt if ((vmflags & VM_SLEEP) != 0) {
570 1.5 yamt prflags = PR_WAITOK;
571 1.7 yamt } else {
572 1.5 yamt prflags = PR_NOWAIT;
573 1.5 yamt }
574 1.5 yamt return prflags;
575 1.5 yamt }
576 1.5 yamt
577 1.5 yamt static size_t
578 1.5 yamt qc_poolpage_size(size_t qcache_max)
579 1.5 yamt {
580 1.5 yamt int i;
581 1.5 yamt
582 1.5 yamt for (i = 0; ORDER2SIZE(i) <= qcache_max * 3; i++) {
583 1.5 yamt /* nothing */
584 1.5 yamt }
585 1.5 yamt return ORDER2SIZE(i);
586 1.5 yamt }
587 1.5 yamt
588 1.5 yamt static void *
589 1.5 yamt qc_poolpage_alloc(struct pool *pool, int prflags)
590 1.5 yamt {
591 1.5 yamt qcache_t *qc = QC_POOL_TO_QCACHE(pool);
592 1.5 yamt vmem_t *vm = qc->qc_vmem;
593 1.61 dyoung vmem_addr_t addr;
594 1.5 yamt
595 1.61 dyoung if (vmem_alloc(vm, pool->pr_alloc->pa_pagesz,
596 1.61 dyoung prf_to_vmf(prflags) | VM_INSTANTFIT, &addr) != 0)
597 1.61 dyoung return NULL;
598 1.61 dyoung return (void *)addr;
599 1.5 yamt }
600 1.5 yamt
601 1.5 yamt static void
602 1.5 yamt qc_poolpage_free(struct pool *pool, void *addr)
603 1.5 yamt {
604 1.5 yamt qcache_t *qc = QC_POOL_TO_QCACHE(pool);
605 1.5 yamt vmem_t *vm = qc->qc_vmem;
606 1.5 yamt
607 1.5 yamt vmem_free(vm, (vmem_addr_t)addr, pool->pr_alloc->pa_pagesz);
608 1.5 yamt }
609 1.5 yamt
610 1.5 yamt static void
611 1.31 ad qc_init(vmem_t *vm, size_t qcache_max, int ipl)
612 1.5 yamt {
613 1.22 yamt qcache_t *prevqc;
614 1.5 yamt struct pool_allocator *pa;
615 1.5 yamt int qcache_idx_max;
616 1.5 yamt int i;
617 1.5 yamt
618 1.5 yamt KASSERT((qcache_max & vm->vm_quantum_mask) == 0);
619 1.5 yamt if (qcache_max > (VMEM_QCACHE_IDX_MAX << vm->vm_quantum_shift)) {
620 1.5 yamt qcache_max = VMEM_QCACHE_IDX_MAX << vm->vm_quantum_shift;
621 1.5 yamt }
622 1.5 yamt vm->vm_qcache_max = qcache_max;
623 1.5 yamt pa = &vm->vm_qcache_allocator;
624 1.5 yamt memset(pa, 0, sizeof(*pa));
625 1.5 yamt pa->pa_alloc = qc_poolpage_alloc;
626 1.5 yamt pa->pa_free = qc_poolpage_free;
627 1.5 yamt pa->pa_pagesz = qc_poolpage_size(qcache_max);
628 1.5 yamt
629 1.5 yamt qcache_idx_max = qcache_max >> vm->vm_quantum_shift;
630 1.22 yamt prevqc = NULL;
631 1.22 yamt for (i = qcache_idx_max; i > 0; i--) {
632 1.22 yamt qcache_t *qc = &vm->vm_qcache_store[i - 1];
633 1.5 yamt size_t size = i << vm->vm_quantum_shift;
634 1.66 para pool_cache_t pc;
635 1.5 yamt
636 1.5 yamt qc->qc_vmem = vm;
637 1.8 martin snprintf(qc->qc_name, sizeof(qc->qc_name), "%s-%zu",
638 1.5 yamt vm->vm_name, size);
639 1.66 para
640 1.66 para if (vm->vm_flags & VM_BOOTSTRAP) {
641 1.66 para KASSERT(static_qc_pool_count > 0);
642 1.66 para pc = &static_qc_pools[--static_qc_pool_count];
643 1.66 para pool_cache_bootstrap(pc, size,
644 1.66 para ORDER2SIZE(vm->vm_quantum_shift), 0,
645 1.66 para PR_NOALIGN | PR_NOTOUCH | PR_RECURSIVE /* XXX */,
646 1.66 para qc->qc_name, pa, ipl, NULL, NULL, NULL);
647 1.66 para } else {
648 1.66 para pc = pool_cache_init(size,
649 1.66 para ORDER2SIZE(vm->vm_quantum_shift), 0,
650 1.66 para PR_NOALIGN | PR_NOTOUCH /* XXX */,
651 1.66 para qc->qc_name, pa, ipl, NULL, NULL, NULL);
652 1.66 para }
653 1.66 para qc->qc_cache = pc;
654 1.35 ad KASSERT(qc->qc_cache != NULL); /* XXX */
655 1.22 yamt if (prevqc != NULL &&
656 1.35 ad qc->qc_cache->pc_pool.pr_itemsperpage ==
657 1.35 ad prevqc->qc_cache->pc_pool.pr_itemsperpage) {
658 1.66 para if (vm->vm_flags & VM_BOOTSTRAP) {
659 1.66 para pool_cache_bootstrap_destroy(pc);
660 1.66 para //static_qc_pool_count++;
661 1.66 para } else {
662 1.66 para pool_cache_destroy(qc->qc_cache);
663 1.66 para }
664 1.22 yamt vm->vm_qcache[i - 1] = prevqc;
665 1.27 ad continue;
666 1.22 yamt }
667 1.35 ad qc->qc_cache->pc_pool.pr_qcache = qc;
668 1.22 yamt vm->vm_qcache[i - 1] = qc;
669 1.22 yamt prevqc = qc;
670 1.5 yamt }
671 1.5 yamt }
672 1.6 yamt
673 1.23 yamt static void
674 1.23 yamt qc_destroy(vmem_t *vm)
675 1.23 yamt {
676 1.23 yamt const qcache_t *prevqc;
677 1.23 yamt int i;
678 1.23 yamt int qcache_idx_max;
679 1.23 yamt
680 1.23 yamt qcache_idx_max = vm->vm_qcache_max >> vm->vm_quantum_shift;
681 1.23 yamt prevqc = NULL;
682 1.24 yamt for (i = 0; i < qcache_idx_max; i++) {
683 1.24 yamt qcache_t *qc = vm->vm_qcache[i];
684 1.23 yamt
685 1.23 yamt if (prevqc == qc) {
686 1.23 yamt continue;
687 1.23 yamt }
688 1.66 para if (vm->vm_flags & VM_BOOTSTRAP) {
689 1.66 para pool_cache_bootstrap_destroy(qc->qc_cache);
690 1.66 para } else {
691 1.66 para pool_cache_destroy(qc->qc_cache);
692 1.66 para }
693 1.23 yamt prevqc = qc;
694 1.23 yamt }
695 1.23 yamt }
696 1.66 para #endif
697 1.23 yamt
698 1.66 para #if defined(_KERNEL)
699 1.66 para void
700 1.66 para vmem_bootstrap(void)
701 1.6 yamt {
702 1.6 yamt
703 1.66 para mutex_init(&vmem_list_lock, MUTEX_DEFAULT, IPL_VM);
704 1.66 para mutex_init(&vmem_btag_lock, MUTEX_DEFAULT, IPL_VM);
705 1.6 yamt
706 1.66 para while (static_bt_count-- > 0) {
707 1.66 para bt_t *bt = &static_bts[static_bt_count];
708 1.66 para LIST_INSERT_HEAD(&vmem_btag_freelist, bt, bt_freelist);
709 1.66 para VMEM_EVCNT_INCR(bt_count);
710 1.66 para vmem_btag_freelist_count++;
711 1.6 yamt }
712 1.6 yamt }
713 1.5 yamt
714 1.66 para void
715 1.66 para vmem_init(vmem_t *vm)
716 1.1 yamt {
717 1.1 yamt
718 1.66 para kmem_va_meta_arena = vmem_create("vmem-va", 0, 0, PAGE_SIZE,
719 1.66 para vmem_alloc, vmem_free, vm,
720 1.66 para 0, VM_NOSLEEP | VM_BOOTSTRAP | VM_LARGEIMPORT,
721 1.66 para IPL_VM);
722 1.66 para
723 1.66 para kmem_meta_arena = vmem_create("vmem-meta", 0, 0, PAGE_SIZE,
724 1.66 para uvm_km_kmem_alloc, uvm_km_kmem_free, kmem_va_meta_arena,
725 1.66 para 0, VM_NOSLEEP | VM_BOOTSTRAP, IPL_VM);
726 1.1 yamt }
727 1.1 yamt #endif /* defined(_KERNEL) */
728 1.1 yamt
729 1.61 dyoung static int
730 1.1 yamt vmem_add1(vmem_t *vm, vmem_addr_t addr, vmem_size_t size, vm_flag_t flags,
731 1.1 yamt int spanbttype)
732 1.1 yamt {
733 1.1 yamt bt_t *btspan;
734 1.1 yamt bt_t *btfree;
735 1.1 yamt
736 1.1 yamt KASSERT((flags & (VM_SLEEP|VM_NOSLEEP)) != 0);
737 1.1 yamt KASSERT((~flags & (VM_SLEEP|VM_NOSLEEP)) != 0);
738 1.58 yamt KASSERT(spanbttype == BT_TYPE_SPAN ||
739 1.58 yamt spanbttype == BT_TYPE_SPAN_STATIC);
740 1.1 yamt
741 1.1 yamt btspan = bt_alloc(vm, flags);
742 1.1 yamt if (btspan == NULL) {
743 1.61 dyoung return ENOMEM;
744 1.1 yamt }
745 1.1 yamt btfree = bt_alloc(vm, flags);
746 1.1 yamt if (btfree == NULL) {
747 1.1 yamt bt_free(vm, btspan);
748 1.61 dyoung return ENOMEM;
749 1.1 yamt }
750 1.1 yamt
751 1.1 yamt btspan->bt_type = spanbttype;
752 1.1 yamt btspan->bt_start = addr;
753 1.1 yamt btspan->bt_size = size;
754 1.1 yamt
755 1.1 yamt btfree->bt_type = BT_TYPE_FREE;
756 1.1 yamt btfree->bt_start = addr;
757 1.1 yamt btfree->bt_size = size;
758 1.1 yamt
759 1.1 yamt VMEM_LOCK(vm);
760 1.1 yamt bt_insseg_tail(vm, btspan);
761 1.1 yamt bt_insseg(vm, btfree, btspan);
762 1.1 yamt bt_insfree(vm, btfree);
763 1.66 para vm->vm_size += size;
764 1.1 yamt VMEM_UNLOCK(vm);
765 1.1 yamt
766 1.61 dyoung return 0;
767 1.1 yamt }
768 1.1 yamt
769 1.30 yamt static void
770 1.30 yamt vmem_destroy1(vmem_t *vm)
771 1.30 yamt {
772 1.30 yamt
773 1.30 yamt #if defined(QCACHE)
774 1.30 yamt qc_destroy(vm);
775 1.30 yamt #endif /* defined(QCACHE) */
776 1.30 yamt if (vm->vm_hashlist != NULL) {
777 1.30 yamt int i;
778 1.30 yamt
779 1.30 yamt for (i = 0; i < vm->vm_hashsize; i++) {
780 1.30 yamt bt_t *bt;
781 1.30 yamt
782 1.30 yamt while ((bt = LIST_FIRST(&vm->vm_hashlist[i])) != NULL) {
783 1.30 yamt KASSERT(bt->bt_type == BT_TYPE_SPAN_STATIC);
784 1.30 yamt bt_free(vm, bt);
785 1.30 yamt }
786 1.30 yamt }
787 1.66 para if (vm->vm_hashlist != &vm->vm_hash0) {
788 1.66 para xfree(vm->vm_hashlist,
789 1.66 para sizeof(struct vmem_hashlist *) * vm->vm_hashsize);
790 1.66 para }
791 1.66 para }
792 1.66 para
793 1.66 para while (vm->vm_nfreetags > 0) {
794 1.66 para bt_t *bt = LIST_FIRST(&vm->vm_freetags);
795 1.66 para LIST_REMOVE(bt, bt_freelist);
796 1.66 para vm->vm_nfreetags--;
797 1.66 para mutex_enter(&vmem_btag_lock);
798 1.66 para #if defined (_KERNEL)
799 1.66 para LIST_INSERT_HEAD(&vmem_btag_freelist, bt, bt_freelist);
800 1.66 para vmem_btag_freelist_count++;
801 1.66 para #endif /* defined(_KERNEL) */
802 1.66 para mutex_exit(&vmem_btag_lock);
803 1.30 yamt }
804 1.66 para
805 1.31 ad VMEM_LOCK_DESTROY(vm);
806 1.66 para xfree(vm, sizeof(*vm));
807 1.30 yamt }
808 1.30 yamt
809 1.1 yamt static int
810 1.1 yamt vmem_import(vmem_t *vm, vmem_size_t size, vm_flag_t flags)
811 1.1 yamt {
812 1.1 yamt vmem_addr_t addr;
813 1.61 dyoung int rc;
814 1.1 yamt
815 1.61 dyoung if (vm->vm_importfn == NULL) {
816 1.1 yamt return EINVAL;
817 1.1 yamt }
818 1.1 yamt
819 1.66 para if (vm->vm_flags & VM_LARGEIMPORT) {
820 1.72 para size *= 8;
821 1.66 para }
822 1.66 para
823 1.66 para if (vm->vm_flags & VM_XIMPORT) {
824 1.66 para rc = ((vmem_ximport_t *)vm->vm_importfn)(vm->vm_arg, size,
825 1.66 para &size, flags, &addr);
826 1.66 para } else {
827 1.66 para rc = (vm->vm_importfn)(vm->vm_arg, size, flags, &addr);
828 1.69 rmind }
829 1.69 rmind if (rc) {
830 1.69 rmind return ENOMEM;
831 1.1 yamt }
832 1.1 yamt
833 1.61 dyoung if (vmem_add1(vm, addr, size, flags, BT_TYPE_SPAN) != 0) {
834 1.61 dyoung (*vm->vm_releasefn)(vm->vm_arg, addr, size);
835 1.1 yamt return ENOMEM;
836 1.1 yamt }
837 1.1 yamt
838 1.1 yamt return 0;
839 1.1 yamt }
840 1.1 yamt
841 1.1 yamt static int
842 1.1 yamt vmem_rehash(vmem_t *vm, size_t newhashsize, vm_flag_t flags)
843 1.1 yamt {
844 1.1 yamt bt_t *bt;
845 1.1 yamt int i;
846 1.1 yamt struct vmem_hashlist *newhashlist;
847 1.1 yamt struct vmem_hashlist *oldhashlist;
848 1.1 yamt size_t oldhashsize;
849 1.1 yamt
850 1.1 yamt KASSERT(newhashsize > 0);
851 1.1 yamt
852 1.1 yamt newhashlist =
853 1.1 yamt xmalloc(sizeof(struct vmem_hashlist *) * newhashsize, flags);
854 1.1 yamt if (newhashlist == NULL) {
855 1.1 yamt return ENOMEM;
856 1.1 yamt }
857 1.1 yamt for (i = 0; i < newhashsize; i++) {
858 1.1 yamt LIST_INIT(&newhashlist[i]);
859 1.1 yamt }
860 1.1 yamt
861 1.30 yamt if (!VMEM_TRYLOCK(vm)) {
862 1.66 para xfree(newhashlist,
863 1.66 para sizeof(struct vmem_hashlist *) * newhashsize);
864 1.30 yamt return EBUSY;
865 1.30 yamt }
866 1.1 yamt oldhashlist = vm->vm_hashlist;
867 1.1 yamt oldhashsize = vm->vm_hashsize;
868 1.1 yamt vm->vm_hashlist = newhashlist;
869 1.1 yamt vm->vm_hashsize = newhashsize;
870 1.1 yamt if (oldhashlist == NULL) {
871 1.1 yamt VMEM_UNLOCK(vm);
872 1.1 yamt return 0;
873 1.1 yamt }
874 1.1 yamt for (i = 0; i < oldhashsize; i++) {
875 1.1 yamt while ((bt = LIST_FIRST(&oldhashlist[i])) != NULL) {
876 1.1 yamt bt_rembusy(vm, bt); /* XXX */
877 1.1 yamt bt_insbusy(vm, bt);
878 1.1 yamt }
879 1.1 yamt }
880 1.1 yamt VMEM_UNLOCK(vm);
881 1.1 yamt
882 1.66 para if (oldhashlist != &vm->vm_hash0) {
883 1.66 para xfree(oldhashlist,
884 1.66 para sizeof(struct vmem_hashlist *) * oldhashsize);
885 1.66 para }
886 1.1 yamt
887 1.1 yamt return 0;
888 1.1 yamt }
889 1.1 yamt
890 1.10 yamt /*
891 1.10 yamt * vmem_fit: check if a bt can satisfy the given restrictions.
892 1.59 yamt *
893 1.59 yamt * it's a caller's responsibility to ensure the region is big enough
894 1.59 yamt * before calling us.
895 1.10 yamt */
896 1.10 yamt
897 1.61 dyoung static int
898 1.60 dyoung vmem_fit(const bt_t const *bt, vmem_size_t size, vmem_size_t align,
899 1.60 dyoung vmem_size_t phase, vmem_size_t nocross,
900 1.61 dyoung vmem_addr_t minaddr, vmem_addr_t maxaddr, vmem_addr_t *addrp)
901 1.10 yamt {
902 1.10 yamt vmem_addr_t start;
903 1.10 yamt vmem_addr_t end;
904 1.10 yamt
905 1.60 dyoung KASSERT(size > 0);
906 1.59 yamt KASSERT(bt->bt_size >= size); /* caller's responsibility */
907 1.10 yamt
908 1.10 yamt /*
909 1.10 yamt * XXX assumption: vmem_addr_t and vmem_size_t are
910 1.10 yamt * unsigned integer of the same size.
911 1.10 yamt */
912 1.10 yamt
913 1.10 yamt start = bt->bt_start;
914 1.10 yamt if (start < minaddr) {
915 1.10 yamt start = minaddr;
916 1.10 yamt }
917 1.10 yamt end = BT_END(bt);
918 1.60 dyoung if (end > maxaddr) {
919 1.60 dyoung end = maxaddr;
920 1.10 yamt }
921 1.60 dyoung if (start > end) {
922 1.61 dyoung return ENOMEM;
923 1.10 yamt }
924 1.19 yamt
925 1.19 yamt start = VMEM_ALIGNUP(start - phase, align) + phase;
926 1.10 yamt if (start < bt->bt_start) {
927 1.10 yamt start += align;
928 1.10 yamt }
929 1.19 yamt if (VMEM_CROSS_P(start, start + size - 1, nocross)) {
930 1.10 yamt KASSERT(align < nocross);
931 1.19 yamt start = VMEM_ALIGNUP(start - phase, nocross) + phase;
932 1.10 yamt }
933 1.60 dyoung if (start <= end && end - start >= size - 1) {
934 1.10 yamt KASSERT((start & (align - 1)) == phase);
935 1.19 yamt KASSERT(!VMEM_CROSS_P(start, start + size - 1, nocross));
936 1.10 yamt KASSERT(minaddr <= start);
937 1.60 dyoung KASSERT(maxaddr == 0 || start + size - 1 <= maxaddr);
938 1.10 yamt KASSERT(bt->bt_start <= start);
939 1.60 dyoung KASSERT(BT_END(bt) - start >= size - 1);
940 1.61 dyoung *addrp = start;
941 1.61 dyoung return 0;
942 1.10 yamt }
943 1.61 dyoung return ENOMEM;
944 1.10 yamt }
945 1.10 yamt
946 1.1 yamt
947 1.1 yamt /*
948 1.66 para * vmem_create_internal: creates a vmem arena.
949 1.1 yamt */
950 1.1 yamt
951 1.66 para static vmem_t *
952 1.66 para vmem_create_internal(const char *name, vmem_addr_t base, vmem_size_t size,
953 1.66 para vmem_size_t quantum, vmem_import_t *importfn, vmem_release_t *releasefn,
954 1.61 dyoung void *arg, vmem_size_t qcache_max, vm_flag_t flags, int ipl)
955 1.1 yamt {
956 1.66 para vmem_t *vm = NULL;
957 1.1 yamt int i;
958 1.1 yamt
959 1.1 yamt KASSERT((flags & (VM_SLEEP|VM_NOSLEEP)) != 0);
960 1.1 yamt KASSERT((~flags & (VM_SLEEP|VM_NOSLEEP)) != 0);
961 1.62 rmind KASSERT(quantum > 0);
962 1.1 yamt
963 1.66 para if (flags & VM_BOOTSTRAP) {
964 1.1 yamt #if defined(_KERNEL)
965 1.66 para KASSERT(static_vmem_count > 0);
966 1.66 para vm = &static_vmems[--static_vmem_count];
967 1.66 para #endif /* defined(_KERNEL) */
968 1.66 para } else {
969 1.66 para vm = xmalloc(sizeof(*vm), flags);
970 1.1 yamt }
971 1.1 yamt if (vm == NULL) {
972 1.1 yamt return NULL;
973 1.1 yamt }
974 1.1 yamt
975 1.66 para VMEM_CONDVAR_INIT(vm, "vmem");
976 1.31 ad VMEM_LOCK_INIT(vm, ipl);
977 1.66 para vm->vm_flags = flags;
978 1.66 para vm->vm_nfreetags = 0;
979 1.66 para LIST_INIT(&vm->vm_freetags);
980 1.64 yamt strlcpy(vm->vm_name, name, sizeof(vm->vm_name));
981 1.1 yamt vm->vm_quantum_mask = quantum - 1;
982 1.62 rmind vm->vm_quantum_shift = SIZE2ORDER(quantum);
983 1.4 yamt KASSERT(ORDER2SIZE(vm->vm_quantum_shift) == quantum);
984 1.61 dyoung vm->vm_importfn = importfn;
985 1.61 dyoung vm->vm_releasefn = releasefn;
986 1.61 dyoung vm->vm_arg = arg;
987 1.1 yamt vm->vm_nbusytag = 0;
988 1.66 para vm->vm_size = 0;
989 1.66 para vm->vm_inuse = 0;
990 1.5 yamt #if defined(QCACHE)
991 1.31 ad qc_init(vm, qcache_max, ipl);
992 1.5 yamt #endif /* defined(QCACHE) */
993 1.1 yamt
994 1.1 yamt CIRCLEQ_INIT(&vm->vm_seglist);
995 1.1 yamt for (i = 0; i < VMEM_MAXORDER; i++) {
996 1.1 yamt LIST_INIT(&vm->vm_freelist[i]);
997 1.1 yamt }
998 1.1 yamt vm->vm_hashlist = NULL;
999 1.66 para if (flags & VM_BOOTSTRAP) {
1000 1.66 para vm->vm_hashsize = 1;
1001 1.66 para vm->vm_hashlist = &vm->vm_hash0;
1002 1.66 para } else if (vmem_rehash(vm, VMEM_HASHSIZE_INIT, flags)) {
1003 1.30 yamt vmem_destroy1(vm);
1004 1.1 yamt return NULL;
1005 1.1 yamt }
1006 1.1 yamt
1007 1.1 yamt if (size != 0) {
1008 1.61 dyoung if (vmem_add(vm, base, size, flags) != 0) {
1009 1.30 yamt vmem_destroy1(vm);
1010 1.1 yamt return NULL;
1011 1.1 yamt }
1012 1.1 yamt }
1013 1.1 yamt
1014 1.30 yamt #if defined(_KERNEL)
1015 1.66 para if (flags & VM_BOOTSTRAP) {
1016 1.66 para bt_refill(vm, VM_NOSLEEP);
1017 1.66 para }
1018 1.66 para
1019 1.30 yamt mutex_enter(&vmem_list_lock);
1020 1.30 yamt LIST_INSERT_HEAD(&vmem_list, vm, vm_alllist);
1021 1.30 yamt mutex_exit(&vmem_list_lock);
1022 1.30 yamt #endif /* defined(_KERNEL) */
1023 1.30 yamt
1024 1.1 yamt return vm;
1025 1.1 yamt }
1026 1.1 yamt
1027 1.66 para
1028 1.66 para /* ---- vmem API */
1029 1.66 para
1030 1.66 para /*
1031 1.66 para * vmem_create: create an arena.
1032 1.66 para *
1033 1.66 para * => must not be called from interrupt context.
1034 1.66 para */
1035 1.66 para
1036 1.66 para vmem_t *
1037 1.66 para vmem_create(const char *name, vmem_addr_t base, vmem_size_t size,
1038 1.66 para vmem_size_t quantum, vmem_import_t *importfn, vmem_release_t *releasefn,
1039 1.67 rmind vmem_t *source, vmem_size_t qcache_max, vm_flag_t flags, int ipl)
1040 1.66 para {
1041 1.66 para
1042 1.66 para KASSERT((flags & (VM_SLEEP|VM_NOSLEEP)) != 0);
1043 1.66 para KASSERT((~flags & (VM_SLEEP|VM_NOSLEEP)) != 0);
1044 1.66 para KASSERT((flags & (VM_XIMPORT)) == 0);
1045 1.66 para
1046 1.66 para return vmem_create_internal(name, base, size, quantum,
1047 1.66 para importfn, releasefn, source, qcache_max, flags, ipl);
1048 1.66 para }
1049 1.66 para
1050 1.66 para /*
1051 1.66 para * vmem_xcreate: create an arena takes alternative import func.
1052 1.66 para *
1053 1.66 para * => must not be called from interrupt context.
1054 1.66 para */
1055 1.66 para
1056 1.66 para vmem_t *
1057 1.66 para vmem_xcreate(const char *name, vmem_addr_t base, vmem_size_t size,
1058 1.66 para vmem_size_t quantum, vmem_ximport_t *importfn, vmem_release_t *releasefn,
1059 1.67 rmind vmem_t *source, vmem_size_t qcache_max, vm_flag_t flags, int ipl)
1060 1.66 para {
1061 1.66 para
1062 1.66 para KASSERT((flags & (VM_SLEEP|VM_NOSLEEP)) != 0);
1063 1.66 para KASSERT((~flags & (VM_SLEEP|VM_NOSLEEP)) != 0);
1064 1.66 para KASSERT((flags & (VM_XIMPORT)) == 0);
1065 1.66 para
1066 1.66 para return vmem_create_internal(name, base, size, quantum,
1067 1.66 para (vmem_import_t *)importfn, releasefn, source,
1068 1.66 para qcache_max, flags | VM_XIMPORT, ipl);
1069 1.66 para }
1070 1.66 para
1071 1.1 yamt void
1072 1.1 yamt vmem_destroy(vmem_t *vm)
1073 1.1 yamt {
1074 1.1 yamt
1075 1.30 yamt #if defined(_KERNEL)
1076 1.30 yamt mutex_enter(&vmem_list_lock);
1077 1.30 yamt LIST_REMOVE(vm, vm_alllist);
1078 1.30 yamt mutex_exit(&vmem_list_lock);
1079 1.30 yamt #endif /* defined(_KERNEL) */
1080 1.1 yamt
1081 1.30 yamt vmem_destroy1(vm);
1082 1.1 yamt }
1083 1.1 yamt
1084 1.1 yamt vmem_size_t
1085 1.1 yamt vmem_roundup_size(vmem_t *vm, vmem_size_t size)
1086 1.1 yamt {
1087 1.1 yamt
1088 1.1 yamt return (size + vm->vm_quantum_mask) & ~vm->vm_quantum_mask;
1089 1.1 yamt }
1090 1.1 yamt
1091 1.1 yamt /*
1092 1.1 yamt * vmem_alloc:
1093 1.1 yamt *
1094 1.1 yamt * => caller must ensure appropriate spl,
1095 1.1 yamt * if the arena can be accessed from interrupt context.
1096 1.1 yamt */
1097 1.1 yamt
1098 1.61 dyoung int
1099 1.61 dyoung vmem_alloc(vmem_t *vm, vmem_size_t size, vm_flag_t flags, vmem_addr_t *addrp)
1100 1.1 yamt {
1101 1.12 yamt const vm_flag_t strat __unused = flags & VM_FITMASK;
1102 1.1 yamt
1103 1.1 yamt KASSERT((flags & (VM_SLEEP|VM_NOSLEEP)) != 0);
1104 1.1 yamt KASSERT((~flags & (VM_SLEEP|VM_NOSLEEP)) != 0);
1105 1.1 yamt
1106 1.1 yamt KASSERT(size > 0);
1107 1.1 yamt KASSERT(strat == VM_BESTFIT || strat == VM_INSTANTFIT);
1108 1.3 yamt if ((flags & VM_SLEEP) != 0) {
1109 1.42 yamt ASSERT_SLEEPABLE();
1110 1.3 yamt }
1111 1.1 yamt
1112 1.5 yamt #if defined(QCACHE)
1113 1.5 yamt if (size <= vm->vm_qcache_max) {
1114 1.61 dyoung void *p;
1115 1.38 yamt int qidx = (size + vm->vm_quantum_mask) >> vm->vm_quantum_shift;
1116 1.22 yamt qcache_t *qc = vm->vm_qcache[qidx - 1];
1117 1.5 yamt
1118 1.61 dyoung p = pool_cache_get(qc->qc_cache, vmf_to_prf(flags));
1119 1.61 dyoung if (addrp != NULL)
1120 1.61 dyoung *addrp = (vmem_addr_t)p;
1121 1.61 dyoung return (p == NULL) ? ENOMEM : 0;
1122 1.5 yamt }
1123 1.5 yamt #endif /* defined(QCACHE) */
1124 1.5 yamt
1125 1.60 dyoung return vmem_xalloc(vm, size, 0, 0, 0, VMEM_ADDR_MIN, VMEM_ADDR_MAX,
1126 1.61 dyoung flags, addrp);
1127 1.10 yamt }
1128 1.10 yamt
1129 1.61 dyoung int
1130 1.60 dyoung vmem_xalloc(vmem_t *vm, const vmem_size_t size0, vmem_size_t align,
1131 1.60 dyoung const vmem_size_t phase, const vmem_size_t nocross,
1132 1.61 dyoung const vmem_addr_t minaddr, const vmem_addr_t maxaddr, const vm_flag_t flags,
1133 1.61 dyoung vmem_addr_t *addrp)
1134 1.10 yamt {
1135 1.10 yamt struct vmem_freelist *list;
1136 1.10 yamt struct vmem_freelist *first;
1137 1.10 yamt struct vmem_freelist *end;
1138 1.10 yamt bt_t *bt;
1139 1.10 yamt bt_t *btnew;
1140 1.10 yamt bt_t *btnew2;
1141 1.10 yamt const vmem_size_t size = vmem_roundup_size(vm, size0);
1142 1.10 yamt vm_flag_t strat = flags & VM_FITMASK;
1143 1.10 yamt vmem_addr_t start;
1144 1.61 dyoung int rc;
1145 1.10 yamt
1146 1.10 yamt KASSERT(size0 > 0);
1147 1.10 yamt KASSERT(size > 0);
1148 1.10 yamt KASSERT(strat == VM_BESTFIT || strat == VM_INSTANTFIT);
1149 1.10 yamt if ((flags & VM_SLEEP) != 0) {
1150 1.42 yamt ASSERT_SLEEPABLE();
1151 1.10 yamt }
1152 1.10 yamt KASSERT((align & vm->vm_quantum_mask) == 0);
1153 1.10 yamt KASSERT((align & (align - 1)) == 0);
1154 1.10 yamt KASSERT((phase & vm->vm_quantum_mask) == 0);
1155 1.10 yamt KASSERT((nocross & vm->vm_quantum_mask) == 0);
1156 1.10 yamt KASSERT((nocross & (nocross - 1)) == 0);
1157 1.10 yamt KASSERT((align == 0 && phase == 0) || phase < align);
1158 1.10 yamt KASSERT(nocross == 0 || nocross >= size);
1159 1.60 dyoung KASSERT(minaddr <= maxaddr);
1160 1.19 yamt KASSERT(!VMEM_CROSS_P(phase, phase + size - 1, nocross));
1161 1.10 yamt
1162 1.10 yamt if (align == 0) {
1163 1.10 yamt align = vm->vm_quantum_mask + 1;
1164 1.10 yamt }
1165 1.59 yamt
1166 1.59 yamt /*
1167 1.59 yamt * allocate boundary tags before acquiring the vmem lock.
1168 1.59 yamt */
1169 1.1 yamt btnew = bt_alloc(vm, flags);
1170 1.1 yamt if (btnew == NULL) {
1171 1.61 dyoung return ENOMEM;
1172 1.1 yamt }
1173 1.10 yamt btnew2 = bt_alloc(vm, flags); /* XXX not necessary if no restrictions */
1174 1.10 yamt if (btnew2 == NULL) {
1175 1.10 yamt bt_free(vm, btnew);
1176 1.61 dyoung return ENOMEM;
1177 1.10 yamt }
1178 1.1 yamt
1179 1.59 yamt /*
1180 1.59 yamt * choose a free block from which we allocate.
1181 1.59 yamt */
1182 1.1 yamt retry_strat:
1183 1.1 yamt first = bt_freehead_toalloc(vm, size, strat);
1184 1.1 yamt end = &vm->vm_freelist[VMEM_MAXORDER];
1185 1.1 yamt retry:
1186 1.1 yamt bt = NULL;
1187 1.1 yamt VMEM_LOCK(vm);
1188 1.55 yamt vmem_check(vm);
1189 1.2 yamt if (strat == VM_INSTANTFIT) {
1190 1.59 yamt /*
1191 1.59 yamt * just choose the first block which satisfies our restrictions.
1192 1.59 yamt *
1193 1.59 yamt * note that we don't need to check the size of the blocks
1194 1.59 yamt * because any blocks found on these list should be larger than
1195 1.59 yamt * the given size.
1196 1.59 yamt */
1197 1.2 yamt for (list = first; list < end; list++) {
1198 1.2 yamt bt = LIST_FIRST(list);
1199 1.2 yamt if (bt != NULL) {
1200 1.61 dyoung rc = vmem_fit(bt, size, align, phase,
1201 1.61 dyoung nocross, minaddr, maxaddr, &start);
1202 1.61 dyoung if (rc == 0) {
1203 1.10 yamt goto gotit;
1204 1.10 yamt }
1205 1.59 yamt /*
1206 1.59 yamt * don't bother to follow the bt_freelist link
1207 1.59 yamt * here. the list can be very long and we are
1208 1.59 yamt * told to run fast. blocks from the later free
1209 1.59 yamt * lists are larger and have better chances to
1210 1.59 yamt * satisfy our restrictions.
1211 1.59 yamt */
1212 1.2 yamt }
1213 1.2 yamt }
1214 1.2 yamt } else { /* VM_BESTFIT */
1215 1.59 yamt /*
1216 1.59 yamt * we assume that, for space efficiency, it's better to
1217 1.59 yamt * allocate from a smaller block. thus we will start searching
1218 1.59 yamt * from the lower-order list than VM_INSTANTFIT.
1219 1.59 yamt * however, don't bother to find the smallest block in a free
1220 1.59 yamt * list because the list can be very long. we can revisit it
1221 1.59 yamt * if/when it turns out to be a problem.
1222 1.59 yamt *
1223 1.59 yamt * note that the 'first' list can contain blocks smaller than
1224 1.59 yamt * the requested size. thus we need to check bt_size.
1225 1.59 yamt */
1226 1.2 yamt for (list = first; list < end; list++) {
1227 1.2 yamt LIST_FOREACH(bt, list, bt_freelist) {
1228 1.2 yamt if (bt->bt_size >= size) {
1229 1.61 dyoung rc = vmem_fit(bt, size, align, phase,
1230 1.61 dyoung nocross, minaddr, maxaddr, &start);
1231 1.61 dyoung if (rc == 0) {
1232 1.10 yamt goto gotit;
1233 1.10 yamt }
1234 1.2 yamt }
1235 1.1 yamt }
1236 1.1 yamt }
1237 1.1 yamt }
1238 1.2 yamt VMEM_UNLOCK(vm);
1239 1.1 yamt #if 1
1240 1.2 yamt if (strat == VM_INSTANTFIT) {
1241 1.2 yamt strat = VM_BESTFIT;
1242 1.2 yamt goto retry_strat;
1243 1.2 yamt }
1244 1.1 yamt #endif
1245 1.69 rmind if (align != vm->vm_quantum_mask + 1 || phase != 0 || nocross != 0) {
1246 1.10 yamt
1247 1.10 yamt /*
1248 1.10 yamt * XXX should try to import a region large enough to
1249 1.10 yamt * satisfy restrictions?
1250 1.10 yamt */
1251 1.10 yamt
1252 1.20 yamt goto fail;
1253 1.10 yamt }
1254 1.60 dyoung /* XXX eeek, minaddr & maxaddr not respected */
1255 1.2 yamt if (vmem_import(vm, size, flags) == 0) {
1256 1.2 yamt goto retry;
1257 1.1 yamt }
1258 1.2 yamt /* XXX */
1259 1.66 para
1260 1.68 para if ((flags & VM_SLEEP) != 0) {
1261 1.71 para #if defined(_KERNEL) && !defined(_RUMPKERNEL)
1262 1.71 para mutex_spin_enter(&uvm_fpageqlock);
1263 1.71 para uvm_kick_pdaemon();
1264 1.71 para mutex_spin_exit(&uvm_fpageqlock);
1265 1.71 para #endif
1266 1.68 para VMEM_LOCK(vm);
1267 1.68 para VMEM_CONDVAR_WAIT(vm);
1268 1.68 para VMEM_UNLOCK(vm);
1269 1.68 para goto retry;
1270 1.68 para }
1271 1.20 yamt fail:
1272 1.20 yamt bt_free(vm, btnew);
1273 1.20 yamt bt_free(vm, btnew2);
1274 1.61 dyoung return ENOMEM;
1275 1.2 yamt
1276 1.2 yamt gotit:
1277 1.1 yamt KASSERT(bt->bt_type == BT_TYPE_FREE);
1278 1.1 yamt KASSERT(bt->bt_size >= size);
1279 1.1 yamt bt_remfree(vm, bt);
1280 1.55 yamt vmem_check(vm);
1281 1.10 yamt if (bt->bt_start != start) {
1282 1.10 yamt btnew2->bt_type = BT_TYPE_FREE;
1283 1.10 yamt btnew2->bt_start = bt->bt_start;
1284 1.10 yamt btnew2->bt_size = start - bt->bt_start;
1285 1.10 yamt bt->bt_start = start;
1286 1.10 yamt bt->bt_size -= btnew2->bt_size;
1287 1.10 yamt bt_insfree(vm, btnew2);
1288 1.10 yamt bt_insseg(vm, btnew2, CIRCLEQ_PREV(bt, bt_seglist));
1289 1.10 yamt btnew2 = NULL;
1290 1.55 yamt vmem_check(vm);
1291 1.10 yamt }
1292 1.10 yamt KASSERT(bt->bt_start == start);
1293 1.1 yamt if (bt->bt_size != size && bt->bt_size - size > vm->vm_quantum_mask) {
1294 1.1 yamt /* split */
1295 1.1 yamt btnew->bt_type = BT_TYPE_BUSY;
1296 1.1 yamt btnew->bt_start = bt->bt_start;
1297 1.1 yamt btnew->bt_size = size;
1298 1.1 yamt bt->bt_start = bt->bt_start + size;
1299 1.1 yamt bt->bt_size -= size;
1300 1.1 yamt bt_insfree(vm, bt);
1301 1.1 yamt bt_insseg(vm, btnew, CIRCLEQ_PREV(bt, bt_seglist));
1302 1.1 yamt bt_insbusy(vm, btnew);
1303 1.55 yamt vmem_check(vm);
1304 1.1 yamt VMEM_UNLOCK(vm);
1305 1.1 yamt } else {
1306 1.1 yamt bt->bt_type = BT_TYPE_BUSY;
1307 1.1 yamt bt_insbusy(vm, bt);
1308 1.55 yamt vmem_check(vm);
1309 1.1 yamt VMEM_UNLOCK(vm);
1310 1.1 yamt bt_free(vm, btnew);
1311 1.1 yamt btnew = bt;
1312 1.1 yamt }
1313 1.10 yamt if (btnew2 != NULL) {
1314 1.10 yamt bt_free(vm, btnew2);
1315 1.10 yamt }
1316 1.1 yamt KASSERT(btnew->bt_size >= size);
1317 1.1 yamt btnew->bt_type = BT_TYPE_BUSY;
1318 1.1 yamt
1319 1.61 dyoung if (addrp != NULL)
1320 1.61 dyoung *addrp = btnew->bt_start;
1321 1.61 dyoung return 0;
1322 1.1 yamt }
1323 1.1 yamt
1324 1.1 yamt /*
1325 1.1 yamt * vmem_free:
1326 1.1 yamt *
1327 1.1 yamt * => caller must ensure appropriate spl,
1328 1.1 yamt * if the arena can be accessed from interrupt context.
1329 1.1 yamt */
1330 1.1 yamt
1331 1.1 yamt void
1332 1.1 yamt vmem_free(vmem_t *vm, vmem_addr_t addr, vmem_size_t size)
1333 1.1 yamt {
1334 1.1 yamt
1335 1.1 yamt KASSERT(size > 0);
1336 1.1 yamt
1337 1.5 yamt #if defined(QCACHE)
1338 1.5 yamt if (size <= vm->vm_qcache_max) {
1339 1.5 yamt int qidx = (size + vm->vm_quantum_mask) >> vm->vm_quantum_shift;
1340 1.22 yamt qcache_t *qc = vm->vm_qcache[qidx - 1];
1341 1.5 yamt
1342 1.63 rmind pool_cache_put(qc->qc_cache, (void *)addr);
1343 1.63 rmind return;
1344 1.5 yamt }
1345 1.5 yamt #endif /* defined(QCACHE) */
1346 1.5 yamt
1347 1.10 yamt vmem_xfree(vm, addr, size);
1348 1.10 yamt }
1349 1.10 yamt
1350 1.10 yamt void
1351 1.17 yamt vmem_xfree(vmem_t *vm, vmem_addr_t addr, vmem_size_t size)
1352 1.10 yamt {
1353 1.10 yamt bt_t *bt;
1354 1.10 yamt bt_t *t;
1355 1.66 para LIST_HEAD(, vmem_btag) tofree;
1356 1.66 para
1357 1.66 para LIST_INIT(&tofree);
1358 1.10 yamt
1359 1.10 yamt KASSERT(size > 0);
1360 1.10 yamt
1361 1.1 yamt VMEM_LOCK(vm);
1362 1.1 yamt
1363 1.1 yamt bt = bt_lookupbusy(vm, addr);
1364 1.1 yamt KASSERT(bt != NULL);
1365 1.1 yamt KASSERT(bt->bt_start == addr);
1366 1.1 yamt KASSERT(bt->bt_size == vmem_roundup_size(vm, size) ||
1367 1.1 yamt bt->bt_size - vmem_roundup_size(vm, size) <= vm->vm_quantum_mask);
1368 1.1 yamt KASSERT(bt->bt_type == BT_TYPE_BUSY);
1369 1.1 yamt bt_rembusy(vm, bt);
1370 1.1 yamt bt->bt_type = BT_TYPE_FREE;
1371 1.1 yamt
1372 1.1 yamt /* coalesce */
1373 1.1 yamt t = CIRCLEQ_NEXT(bt, bt_seglist);
1374 1.1 yamt if (t != NULL && t->bt_type == BT_TYPE_FREE) {
1375 1.60 dyoung KASSERT(BT_END(bt) < t->bt_start); /* YYY */
1376 1.1 yamt bt_remfree(vm, t);
1377 1.1 yamt bt_remseg(vm, t);
1378 1.1 yamt bt->bt_size += t->bt_size;
1379 1.66 para LIST_INSERT_HEAD(&tofree, t, bt_freelist);
1380 1.1 yamt }
1381 1.1 yamt t = CIRCLEQ_PREV(bt, bt_seglist);
1382 1.1 yamt if (t != NULL && t->bt_type == BT_TYPE_FREE) {
1383 1.60 dyoung KASSERT(BT_END(t) < bt->bt_start); /* YYY */
1384 1.1 yamt bt_remfree(vm, t);
1385 1.1 yamt bt_remseg(vm, t);
1386 1.1 yamt bt->bt_size += t->bt_size;
1387 1.1 yamt bt->bt_start = t->bt_start;
1388 1.66 para LIST_INSERT_HEAD(&tofree, t, bt_freelist);
1389 1.1 yamt }
1390 1.1 yamt
1391 1.1 yamt t = CIRCLEQ_PREV(bt, bt_seglist);
1392 1.1 yamt KASSERT(t != NULL);
1393 1.1 yamt KASSERT(BT_ISSPAN_P(t) || t->bt_type == BT_TYPE_BUSY);
1394 1.61 dyoung if (vm->vm_releasefn != NULL && t->bt_type == BT_TYPE_SPAN &&
1395 1.1 yamt t->bt_size == bt->bt_size) {
1396 1.1 yamt vmem_addr_t spanaddr;
1397 1.1 yamt vmem_size_t spansize;
1398 1.1 yamt
1399 1.1 yamt KASSERT(t->bt_start == bt->bt_start);
1400 1.1 yamt spanaddr = bt->bt_start;
1401 1.1 yamt spansize = bt->bt_size;
1402 1.1 yamt bt_remseg(vm, bt);
1403 1.66 para LIST_INSERT_HEAD(&tofree, bt, bt_freelist);
1404 1.1 yamt bt_remseg(vm, t);
1405 1.66 para LIST_INSERT_HEAD(&tofree, t, bt_freelist);
1406 1.66 para vm->vm_size -= spansize;
1407 1.68 para VMEM_CONDVAR_BROADCAST(vm);
1408 1.1 yamt VMEM_UNLOCK(vm);
1409 1.61 dyoung (*vm->vm_releasefn)(vm->vm_arg, spanaddr, spansize);
1410 1.1 yamt } else {
1411 1.1 yamt bt_insfree(vm, bt);
1412 1.68 para VMEM_CONDVAR_BROADCAST(vm);
1413 1.1 yamt VMEM_UNLOCK(vm);
1414 1.1 yamt }
1415 1.66 para
1416 1.66 para while (!LIST_EMPTY(&tofree)) {
1417 1.66 para t = LIST_FIRST(&tofree);
1418 1.66 para LIST_REMOVE(t, bt_freelist);
1419 1.66 para bt_free(vm, t);
1420 1.66 para }
1421 1.1 yamt }
1422 1.1 yamt
1423 1.1 yamt /*
1424 1.1 yamt * vmem_add:
1425 1.1 yamt *
1426 1.1 yamt * => caller must ensure appropriate spl,
1427 1.1 yamt * if the arena can be accessed from interrupt context.
1428 1.1 yamt */
1429 1.1 yamt
1430 1.61 dyoung int
1431 1.1 yamt vmem_add(vmem_t *vm, vmem_addr_t addr, vmem_size_t size, vm_flag_t flags)
1432 1.1 yamt {
1433 1.1 yamt
1434 1.1 yamt return vmem_add1(vm, addr, size, flags, BT_TYPE_SPAN_STATIC);
1435 1.1 yamt }
1436 1.1 yamt
1437 1.6 yamt /*
1438 1.66 para * vmem_size: information about arenas size
1439 1.6 yamt *
1440 1.66 para * => return free/allocated size in arena
1441 1.6 yamt */
1442 1.66 para vmem_size_t
1443 1.66 para vmem_size(vmem_t *vm, int typemask)
1444 1.6 yamt {
1445 1.6 yamt
1446 1.66 para switch (typemask) {
1447 1.66 para case VMEM_ALLOC:
1448 1.66 para return vm->vm_inuse;
1449 1.66 para case VMEM_FREE:
1450 1.66 para return vm->vm_size - vm->vm_inuse;
1451 1.66 para case VMEM_FREE|VMEM_ALLOC:
1452 1.66 para return vm->vm_size;
1453 1.66 para default:
1454 1.66 para panic("vmem_size");
1455 1.66 para }
1456 1.6 yamt }
1457 1.6 yamt
1458 1.30 yamt /* ---- rehash */
1459 1.30 yamt
1460 1.30 yamt #if defined(_KERNEL)
1461 1.30 yamt static struct callout vmem_rehash_ch;
1462 1.30 yamt static int vmem_rehash_interval;
1463 1.30 yamt static struct workqueue *vmem_rehash_wq;
1464 1.30 yamt static struct work vmem_rehash_wk;
1465 1.30 yamt
1466 1.30 yamt static void
1467 1.30 yamt vmem_rehash_all(struct work *wk, void *dummy)
1468 1.30 yamt {
1469 1.30 yamt vmem_t *vm;
1470 1.30 yamt
1471 1.30 yamt KASSERT(wk == &vmem_rehash_wk);
1472 1.30 yamt mutex_enter(&vmem_list_lock);
1473 1.30 yamt LIST_FOREACH(vm, &vmem_list, vm_alllist) {
1474 1.30 yamt size_t desired;
1475 1.30 yamt size_t current;
1476 1.30 yamt
1477 1.30 yamt if (!VMEM_TRYLOCK(vm)) {
1478 1.30 yamt continue;
1479 1.30 yamt }
1480 1.30 yamt desired = vm->vm_nbusytag;
1481 1.30 yamt current = vm->vm_hashsize;
1482 1.30 yamt VMEM_UNLOCK(vm);
1483 1.30 yamt
1484 1.30 yamt if (desired > VMEM_HASHSIZE_MAX) {
1485 1.30 yamt desired = VMEM_HASHSIZE_MAX;
1486 1.30 yamt } else if (desired < VMEM_HASHSIZE_MIN) {
1487 1.30 yamt desired = VMEM_HASHSIZE_MIN;
1488 1.30 yamt }
1489 1.30 yamt if (desired > current * 2 || desired * 2 < current) {
1490 1.30 yamt vmem_rehash(vm, desired, VM_NOSLEEP);
1491 1.30 yamt }
1492 1.30 yamt }
1493 1.30 yamt mutex_exit(&vmem_list_lock);
1494 1.30 yamt
1495 1.30 yamt callout_schedule(&vmem_rehash_ch, vmem_rehash_interval);
1496 1.30 yamt }
1497 1.30 yamt
1498 1.30 yamt static void
1499 1.30 yamt vmem_rehash_all_kick(void *dummy)
1500 1.30 yamt {
1501 1.30 yamt
1502 1.32 rmind workqueue_enqueue(vmem_rehash_wq, &vmem_rehash_wk, NULL);
1503 1.30 yamt }
1504 1.30 yamt
1505 1.30 yamt void
1506 1.30 yamt vmem_rehash_start(void)
1507 1.30 yamt {
1508 1.30 yamt int error;
1509 1.30 yamt
1510 1.30 yamt error = workqueue_create(&vmem_rehash_wq, "vmem_rehash",
1511 1.41 ad vmem_rehash_all, NULL, PRI_VM, IPL_SOFTCLOCK, WQ_MPSAFE);
1512 1.30 yamt if (error) {
1513 1.30 yamt panic("%s: workqueue_create %d\n", __func__, error);
1514 1.30 yamt }
1515 1.41 ad callout_init(&vmem_rehash_ch, CALLOUT_MPSAFE);
1516 1.30 yamt callout_setfunc(&vmem_rehash_ch, vmem_rehash_all_kick, NULL);
1517 1.30 yamt
1518 1.30 yamt vmem_rehash_interval = hz * 10;
1519 1.30 yamt callout_schedule(&vmem_rehash_ch, vmem_rehash_interval);
1520 1.30 yamt }
1521 1.30 yamt #endif /* defined(_KERNEL) */
1522 1.30 yamt
1523 1.1 yamt /* ---- debug */
1524 1.1 yamt
1525 1.55 yamt #if defined(DDB) || defined(UNITTEST) || defined(VMEM_SANITY)
1526 1.55 yamt
1527 1.55 yamt static void bt_dump(const bt_t *, void (*)(const char *, ...));
1528 1.55 yamt
1529 1.55 yamt static const char *
1530 1.55 yamt bt_type_string(int type)
1531 1.55 yamt {
1532 1.55 yamt static const char * const table[] = {
1533 1.55 yamt [BT_TYPE_BUSY] = "busy",
1534 1.55 yamt [BT_TYPE_FREE] = "free",
1535 1.55 yamt [BT_TYPE_SPAN] = "span",
1536 1.55 yamt [BT_TYPE_SPAN_STATIC] = "static span",
1537 1.55 yamt };
1538 1.55 yamt
1539 1.55 yamt if (type >= __arraycount(table)) {
1540 1.55 yamt return "BOGUS";
1541 1.55 yamt }
1542 1.55 yamt return table[type];
1543 1.55 yamt }
1544 1.55 yamt
1545 1.55 yamt static void
1546 1.55 yamt bt_dump(const bt_t *bt, void (*pr)(const char *, ...))
1547 1.55 yamt {
1548 1.55 yamt
1549 1.55 yamt (*pr)("\t%p: %" PRIu64 ", %" PRIu64 ", %d(%s)\n",
1550 1.55 yamt bt, (uint64_t)bt->bt_start, (uint64_t)bt->bt_size,
1551 1.55 yamt bt->bt_type, bt_type_string(bt->bt_type));
1552 1.55 yamt }
1553 1.55 yamt
1554 1.55 yamt static void
1555 1.55 yamt vmem_dump(const vmem_t *vm , void (*pr)(const char *, ...))
1556 1.55 yamt {
1557 1.55 yamt const bt_t *bt;
1558 1.55 yamt int i;
1559 1.55 yamt
1560 1.55 yamt (*pr)("vmem %p '%s'\n", vm, vm->vm_name);
1561 1.55 yamt CIRCLEQ_FOREACH(bt, &vm->vm_seglist, bt_seglist) {
1562 1.55 yamt bt_dump(bt, pr);
1563 1.55 yamt }
1564 1.55 yamt
1565 1.55 yamt for (i = 0; i < VMEM_MAXORDER; i++) {
1566 1.55 yamt const struct vmem_freelist *fl = &vm->vm_freelist[i];
1567 1.55 yamt
1568 1.55 yamt if (LIST_EMPTY(fl)) {
1569 1.55 yamt continue;
1570 1.55 yamt }
1571 1.55 yamt
1572 1.55 yamt (*pr)("freelist[%d]\n", i);
1573 1.55 yamt LIST_FOREACH(bt, fl, bt_freelist) {
1574 1.55 yamt bt_dump(bt, pr);
1575 1.55 yamt }
1576 1.55 yamt }
1577 1.55 yamt }
1578 1.55 yamt
1579 1.55 yamt #endif /* defined(DDB) || defined(UNITTEST) || defined(VMEM_SANITY) */
1580 1.55 yamt
1581 1.37 yamt #if defined(DDB)
1582 1.37 yamt static bt_t *
1583 1.37 yamt vmem_whatis_lookup(vmem_t *vm, uintptr_t addr)
1584 1.37 yamt {
1585 1.39 yamt bt_t *bt;
1586 1.37 yamt
1587 1.39 yamt CIRCLEQ_FOREACH(bt, &vm->vm_seglist, bt_seglist) {
1588 1.39 yamt if (BT_ISSPAN_P(bt)) {
1589 1.39 yamt continue;
1590 1.39 yamt }
1591 1.60 dyoung if (bt->bt_start <= addr && addr <= BT_END(bt)) {
1592 1.39 yamt return bt;
1593 1.37 yamt }
1594 1.37 yamt }
1595 1.37 yamt
1596 1.37 yamt return NULL;
1597 1.37 yamt }
1598 1.37 yamt
1599 1.37 yamt void
1600 1.37 yamt vmem_whatis(uintptr_t addr, void (*pr)(const char *, ...))
1601 1.37 yamt {
1602 1.37 yamt vmem_t *vm;
1603 1.37 yamt
1604 1.37 yamt LIST_FOREACH(vm, &vmem_list, vm_alllist) {
1605 1.37 yamt bt_t *bt;
1606 1.37 yamt
1607 1.37 yamt bt = vmem_whatis_lookup(vm, addr);
1608 1.37 yamt if (bt == NULL) {
1609 1.37 yamt continue;
1610 1.37 yamt }
1611 1.39 yamt (*pr)("%p is %p+%zu in VMEM '%s' (%s)\n",
1612 1.37 yamt (void *)addr, (void *)bt->bt_start,
1613 1.39 yamt (size_t)(addr - bt->bt_start), vm->vm_name,
1614 1.39 yamt (bt->bt_type == BT_TYPE_BUSY) ? "allocated" : "free");
1615 1.37 yamt }
1616 1.37 yamt }
1617 1.43 cegger
1618 1.55 yamt void
1619 1.55 yamt vmem_printall(const char *modif, void (*pr)(const char *, ...))
1620 1.43 cegger {
1621 1.55 yamt const vmem_t *vm;
1622 1.43 cegger
1623 1.47 cegger LIST_FOREACH(vm, &vmem_list, vm_alllist) {
1624 1.55 yamt vmem_dump(vm, pr);
1625 1.43 cegger }
1626 1.43 cegger }
1627 1.43 cegger
1628 1.43 cegger void
1629 1.43 cegger vmem_print(uintptr_t addr, const char *modif, void (*pr)(const char *, ...))
1630 1.43 cegger {
1631 1.55 yamt const vmem_t *vm = (const void *)addr;
1632 1.43 cegger
1633 1.55 yamt vmem_dump(vm, pr);
1634 1.43 cegger }
1635 1.37 yamt #endif /* defined(DDB) */
1636 1.37 yamt
1637 1.60 dyoung #if defined(_KERNEL)
1638 1.60 dyoung #define vmem_printf printf
1639 1.60 dyoung #else
1640 1.1 yamt #include <stdio.h>
1641 1.60 dyoung #include <stdarg.h>
1642 1.60 dyoung
1643 1.60 dyoung static void
1644 1.60 dyoung vmem_printf(const char *fmt, ...)
1645 1.60 dyoung {
1646 1.60 dyoung va_list ap;
1647 1.60 dyoung va_start(ap, fmt);
1648 1.60 dyoung vprintf(fmt, ap);
1649 1.60 dyoung va_end(ap);
1650 1.60 dyoung }
1651 1.60 dyoung #endif
1652 1.1 yamt
1653 1.55 yamt #if defined(VMEM_SANITY)
1654 1.1 yamt
1655 1.55 yamt static bool
1656 1.55 yamt vmem_check_sanity(vmem_t *vm)
1657 1.1 yamt {
1658 1.55 yamt const bt_t *bt, *bt2;
1659 1.1 yamt
1660 1.55 yamt KASSERT(vm != NULL);
1661 1.1 yamt
1662 1.1 yamt CIRCLEQ_FOREACH(bt, &vm->vm_seglist, bt_seglist) {
1663 1.60 dyoung if (bt->bt_start > BT_END(bt)) {
1664 1.55 yamt printf("corrupted tag\n");
1665 1.60 dyoung bt_dump(bt, vmem_printf);
1666 1.55 yamt return false;
1667 1.55 yamt }
1668 1.55 yamt }
1669 1.55 yamt CIRCLEQ_FOREACH(bt, &vm->vm_seglist, bt_seglist) {
1670 1.55 yamt CIRCLEQ_FOREACH(bt2, &vm->vm_seglist, bt_seglist) {
1671 1.55 yamt if (bt == bt2) {
1672 1.55 yamt continue;
1673 1.55 yamt }
1674 1.55 yamt if (BT_ISSPAN_P(bt) != BT_ISSPAN_P(bt2)) {
1675 1.55 yamt continue;
1676 1.55 yamt }
1677 1.60 dyoung if (bt->bt_start <= BT_END(bt2) &&
1678 1.60 dyoung bt2->bt_start <= BT_END(bt)) {
1679 1.55 yamt printf("overwrapped tags\n");
1680 1.60 dyoung bt_dump(bt, vmem_printf);
1681 1.60 dyoung bt_dump(bt2, vmem_printf);
1682 1.55 yamt return false;
1683 1.55 yamt }
1684 1.55 yamt }
1685 1.1 yamt }
1686 1.1 yamt
1687 1.55 yamt return true;
1688 1.55 yamt }
1689 1.1 yamt
1690 1.55 yamt static void
1691 1.55 yamt vmem_check(vmem_t *vm)
1692 1.55 yamt {
1693 1.1 yamt
1694 1.55 yamt if (!vmem_check_sanity(vm)) {
1695 1.55 yamt panic("insanity vmem %p", vm);
1696 1.1 yamt }
1697 1.1 yamt }
1698 1.1 yamt
1699 1.55 yamt #endif /* defined(VMEM_SANITY) */
1700 1.1 yamt
1701 1.55 yamt #if defined(UNITTEST)
1702 1.1 yamt int
1703 1.57 cegger main(void)
1704 1.1 yamt {
1705 1.61 dyoung int rc;
1706 1.1 yamt vmem_t *vm;
1707 1.1 yamt vmem_addr_t p;
1708 1.1 yamt struct reg {
1709 1.1 yamt vmem_addr_t p;
1710 1.1 yamt vmem_size_t sz;
1711 1.25 thorpej bool x;
1712 1.1 yamt } *reg = NULL;
1713 1.1 yamt int nreg = 0;
1714 1.1 yamt int nalloc = 0;
1715 1.1 yamt int nfree = 0;
1716 1.1 yamt vmem_size_t total = 0;
1717 1.1 yamt #if 1
1718 1.1 yamt vm_flag_t strat = VM_INSTANTFIT;
1719 1.1 yamt #else
1720 1.1 yamt vm_flag_t strat = VM_BESTFIT;
1721 1.1 yamt #endif
1722 1.1 yamt
1723 1.61 dyoung vm = vmem_create("test", 0, 0, 1, NULL, NULL, NULL, 0, VM_SLEEP,
1724 1.61 dyoung #ifdef _KERNEL
1725 1.61 dyoung IPL_NONE
1726 1.61 dyoung #else
1727 1.61 dyoung 0
1728 1.61 dyoung #endif
1729 1.61 dyoung );
1730 1.1 yamt if (vm == NULL) {
1731 1.1 yamt printf("vmem_create\n");
1732 1.1 yamt exit(EXIT_FAILURE);
1733 1.1 yamt }
1734 1.60 dyoung vmem_dump(vm, vmem_printf);
1735 1.1 yamt
1736 1.61 dyoung rc = vmem_add(vm, 0, 50, VM_SLEEP);
1737 1.61 dyoung assert(rc == 0);
1738 1.61 dyoung rc = vmem_add(vm, 100, 200, VM_SLEEP);
1739 1.61 dyoung assert(rc == 0);
1740 1.61 dyoung rc = vmem_add(vm, 2000, 1, VM_SLEEP);
1741 1.61 dyoung assert(rc == 0);
1742 1.61 dyoung rc = vmem_add(vm, 40000, 65536, VM_SLEEP);
1743 1.61 dyoung assert(rc == 0);
1744 1.61 dyoung rc = vmem_add(vm, 10000, 10000, VM_SLEEP);
1745 1.61 dyoung assert(rc == 0);
1746 1.61 dyoung rc = vmem_add(vm, 500, 1000, VM_SLEEP);
1747 1.61 dyoung assert(rc == 0);
1748 1.61 dyoung rc = vmem_add(vm, 0xffffff00, 0x100, VM_SLEEP);
1749 1.61 dyoung assert(rc == 0);
1750 1.61 dyoung rc = vmem_xalloc(vm, 0x101, 0, 0, 0,
1751 1.61 dyoung 0xffffff00, 0xffffffff, strat|VM_SLEEP, &p);
1752 1.61 dyoung assert(rc != 0);
1753 1.61 dyoung rc = vmem_xalloc(vm, 50, 0, 0, 0, 0, 49, strat|VM_SLEEP, &p);
1754 1.61 dyoung assert(rc == 0 && p == 0);
1755 1.61 dyoung vmem_xfree(vm, p, 50);
1756 1.61 dyoung rc = vmem_xalloc(vm, 25, 0, 0, 0, 0, 24, strat|VM_SLEEP, &p);
1757 1.61 dyoung assert(rc == 0 && p == 0);
1758 1.61 dyoung rc = vmem_xalloc(vm, 0x100, 0, 0, 0,
1759 1.61 dyoung 0xffffff01, 0xffffffff, strat|VM_SLEEP, &p);
1760 1.61 dyoung assert(rc != 0);
1761 1.61 dyoung rc = vmem_xalloc(vm, 0x100, 0, 0, 0,
1762 1.61 dyoung 0xffffff00, 0xfffffffe, strat|VM_SLEEP, &p);
1763 1.61 dyoung assert(rc != 0);
1764 1.61 dyoung rc = vmem_xalloc(vm, 0x100, 0, 0, 0,
1765 1.61 dyoung 0xffffff00, 0xffffffff, strat|VM_SLEEP, &p);
1766 1.61 dyoung assert(rc == 0);
1767 1.60 dyoung vmem_dump(vm, vmem_printf);
1768 1.1 yamt for (;;) {
1769 1.1 yamt struct reg *r;
1770 1.10 yamt int t = rand() % 100;
1771 1.1 yamt
1772 1.10 yamt if (t > 45) {
1773 1.10 yamt /* alloc */
1774 1.1 yamt vmem_size_t sz = rand() % 500 + 1;
1775 1.25 thorpej bool x;
1776 1.10 yamt vmem_size_t align, phase, nocross;
1777 1.10 yamt vmem_addr_t minaddr, maxaddr;
1778 1.10 yamt
1779 1.10 yamt if (t > 70) {
1780 1.26 thorpej x = true;
1781 1.10 yamt /* XXX */
1782 1.10 yamt align = 1 << (rand() % 15);
1783 1.10 yamt phase = rand() % 65536;
1784 1.10 yamt nocross = 1 << (rand() % 15);
1785 1.10 yamt if (align <= phase) {
1786 1.10 yamt phase = 0;
1787 1.10 yamt }
1788 1.19 yamt if (VMEM_CROSS_P(phase, phase + sz - 1,
1789 1.19 yamt nocross)) {
1790 1.10 yamt nocross = 0;
1791 1.10 yamt }
1792 1.60 dyoung do {
1793 1.60 dyoung minaddr = rand() % 50000;
1794 1.60 dyoung maxaddr = rand() % 70000;
1795 1.60 dyoung } while (minaddr > maxaddr);
1796 1.10 yamt printf("=== xalloc %" PRIu64
1797 1.10 yamt " align=%" PRIu64 ", phase=%" PRIu64
1798 1.10 yamt ", nocross=%" PRIu64 ", min=%" PRIu64
1799 1.10 yamt ", max=%" PRIu64 "\n",
1800 1.10 yamt (uint64_t)sz,
1801 1.10 yamt (uint64_t)align,
1802 1.10 yamt (uint64_t)phase,
1803 1.10 yamt (uint64_t)nocross,
1804 1.10 yamt (uint64_t)minaddr,
1805 1.10 yamt (uint64_t)maxaddr);
1806 1.61 dyoung rc = vmem_xalloc(vm, sz, align, phase, nocross,
1807 1.61 dyoung minaddr, maxaddr, strat|VM_SLEEP, &p);
1808 1.10 yamt } else {
1809 1.26 thorpej x = false;
1810 1.10 yamt printf("=== alloc %" PRIu64 "\n", (uint64_t)sz);
1811 1.61 dyoung rc = vmem_alloc(vm, sz, strat|VM_SLEEP, &p);
1812 1.10 yamt }
1813 1.1 yamt printf("-> %" PRIu64 "\n", (uint64_t)p);
1814 1.60 dyoung vmem_dump(vm, vmem_printf);
1815 1.61 dyoung if (rc != 0) {
1816 1.10 yamt if (x) {
1817 1.10 yamt continue;
1818 1.10 yamt }
1819 1.1 yamt break;
1820 1.1 yamt }
1821 1.1 yamt nreg++;
1822 1.1 yamt reg = realloc(reg, sizeof(*reg) * nreg);
1823 1.1 yamt r = ®[nreg - 1];
1824 1.1 yamt r->p = p;
1825 1.1 yamt r->sz = sz;
1826 1.10 yamt r->x = x;
1827 1.1 yamt total += sz;
1828 1.1 yamt nalloc++;
1829 1.1 yamt } else if (nreg != 0) {
1830 1.10 yamt /* free */
1831 1.1 yamt r = ®[rand() % nreg];
1832 1.1 yamt printf("=== free %" PRIu64 ", %" PRIu64 "\n",
1833 1.1 yamt (uint64_t)r->p, (uint64_t)r->sz);
1834 1.10 yamt if (r->x) {
1835 1.10 yamt vmem_xfree(vm, r->p, r->sz);
1836 1.10 yamt } else {
1837 1.10 yamt vmem_free(vm, r->p, r->sz);
1838 1.10 yamt }
1839 1.1 yamt total -= r->sz;
1840 1.60 dyoung vmem_dump(vm, vmem_printf);
1841 1.1 yamt *r = reg[nreg - 1];
1842 1.1 yamt nreg--;
1843 1.1 yamt nfree++;
1844 1.1 yamt }
1845 1.1 yamt printf("total=%" PRIu64 "\n", (uint64_t)total);
1846 1.1 yamt }
1847 1.1 yamt fprintf(stderr, "total=%" PRIu64 ", nalloc=%d, nfree=%d\n",
1848 1.1 yamt (uint64_t)total, nalloc, nfree);
1849 1.1 yamt exit(EXIT_SUCCESS);
1850 1.1 yamt }
1851 1.55 yamt #endif /* defined(UNITTEST) */
1852