subr_vmem.c revision 1.68 1 1.68 para /* $NetBSD: subr_vmem.c,v 1.68 2012/01/29 13:38:15 para Exp $ */
2 1.1 yamt
3 1.1 yamt /*-
4 1.55 yamt * Copyright (c)2006,2007,2008,2009 YAMAMOTO Takashi,
5 1.1 yamt * All rights reserved.
6 1.1 yamt *
7 1.1 yamt * Redistribution and use in source and binary forms, with or without
8 1.1 yamt * modification, are permitted provided that the following conditions
9 1.1 yamt * are met:
10 1.1 yamt * 1. Redistributions of source code must retain the above copyright
11 1.1 yamt * notice, this list of conditions and the following disclaimer.
12 1.1 yamt * 2. Redistributions in binary form must reproduce the above copyright
13 1.1 yamt * notice, this list of conditions and the following disclaimer in the
14 1.1 yamt * documentation and/or other materials provided with the distribution.
15 1.1 yamt *
16 1.1 yamt * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
17 1.1 yamt * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
18 1.1 yamt * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
19 1.1 yamt * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
20 1.1 yamt * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
21 1.1 yamt * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
22 1.1 yamt * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
23 1.1 yamt * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
24 1.1 yamt * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
25 1.1 yamt * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
26 1.1 yamt * SUCH DAMAGE.
27 1.1 yamt */
28 1.1 yamt
29 1.1 yamt /*
30 1.1 yamt * reference:
31 1.1 yamt * - Magazines and Vmem: Extending the Slab Allocator
32 1.1 yamt * to Many CPUs and Arbitrary Resources
33 1.1 yamt * http://www.usenix.org/event/usenix01/bonwick.html
34 1.1 yamt */
35 1.1 yamt
36 1.1 yamt #include <sys/cdefs.h>
37 1.68 para __KERNEL_RCSID(0, "$NetBSD: subr_vmem.c,v 1.68 2012/01/29 13:38:15 para Exp $");
38 1.1 yamt
39 1.5 yamt #if defined(_KERNEL)
40 1.37 yamt #include "opt_ddb.h"
41 1.5 yamt #define QCACHE
42 1.5 yamt #endif /* defined(_KERNEL) */
43 1.1 yamt
44 1.1 yamt #include <sys/param.h>
45 1.1 yamt #include <sys/hash.h>
46 1.1 yamt #include <sys/queue.h>
47 1.62 rmind #include <sys/bitops.h>
48 1.1 yamt
49 1.1 yamt #if defined(_KERNEL)
50 1.1 yamt #include <sys/systm.h>
51 1.30 yamt #include <sys/kernel.h> /* hz */
52 1.30 yamt #include <sys/callout.h>
53 1.66 para #include <sys/kmem.h>
54 1.1 yamt #include <sys/pool.h>
55 1.1 yamt #include <sys/vmem.h>
56 1.30 yamt #include <sys/workqueue.h>
57 1.66 para #include <sys/atomic.h>
58 1.66 para #include <uvm/uvm.h>
59 1.66 para #include <uvm/uvm_extern.h>
60 1.66 para #include <uvm/uvm_km.h>
61 1.66 para #include <uvm/uvm_page.h>
62 1.66 para #include <uvm/uvm_pdaemon.h>
63 1.1 yamt #else /* defined(_KERNEL) */
64 1.1 yamt #include "../sys/vmem.h"
65 1.1 yamt #endif /* defined(_KERNEL) */
66 1.1 yamt
67 1.66 para
68 1.1 yamt #if defined(_KERNEL)
69 1.66 para #include <sys/evcnt.h>
70 1.66 para #define VMEM_EVCNT_DEFINE(name) \
71 1.66 para struct evcnt vmem_evcnt_##name = EVCNT_INITIALIZER(EVCNT_TYPE_MISC, NULL, \
72 1.66 para "vmemev", #name); \
73 1.66 para EVCNT_ATTACH_STATIC(vmem_evcnt_##name);
74 1.66 para #define VMEM_EVCNT_INCR(ev) vmem_evcnt_##ev.ev_count++
75 1.66 para #define VMEM_EVCNT_DECR(ev) vmem_evcnt_##ev.ev_count--
76 1.66 para
77 1.66 para VMEM_EVCNT_DEFINE(bt_pages)
78 1.66 para VMEM_EVCNT_DEFINE(bt_count)
79 1.66 para VMEM_EVCNT_DEFINE(bt_inuse)
80 1.66 para
81 1.52 ad #define LOCK_DECL(name) \
82 1.52 ad kmutex_t name; char lockpad[COHERENCY_UNIT - sizeof(kmutex_t)]
83 1.66 para
84 1.66 para #define CONDVAR_DECL(name) \
85 1.66 para kcondvar_t name;
86 1.66 para
87 1.1 yamt #else /* defined(_KERNEL) */
88 1.67 rmind #include <stdio.h>
89 1.1 yamt #include <errno.h>
90 1.1 yamt #include <assert.h>
91 1.1 yamt #include <stdlib.h>
92 1.64 yamt #include <string.h>
93 1.1 yamt
94 1.66 para #define VMEM_EVCNT_INCR(ev) /* nothing */
95 1.66 para #define VMEM_EVCNT_DECR(ev) /* nothing */
96 1.66 para
97 1.55 yamt #define UNITTEST
98 1.1 yamt #define KASSERT(a) assert(a)
99 1.31 ad #define LOCK_DECL(name) /* nothing */
100 1.66 para #define CONDVAR_DECL(name) /* nothing */
101 1.66 para #define VMEM_CONDVAR_INIT(vm, wchan) /* nothing */
102 1.31 ad #define mutex_init(a, b, c) /* nothing */
103 1.31 ad #define mutex_destroy(a) /* nothing */
104 1.31 ad #define mutex_enter(a) /* nothing */
105 1.55 yamt #define mutex_tryenter(a) true
106 1.31 ad #define mutex_exit(a) /* nothing */
107 1.31 ad #define mutex_owned(a) /* nothing */
108 1.55 yamt #define ASSERT_SLEEPABLE() /* nothing */
109 1.55 yamt #define panic(...) printf(__VA_ARGS__); abort()
110 1.1 yamt #endif /* defined(_KERNEL) */
111 1.1 yamt
112 1.1 yamt struct vmem;
113 1.1 yamt struct vmem_btag;
114 1.1 yamt
115 1.55 yamt #if defined(VMEM_SANITY)
116 1.55 yamt static void vmem_check(vmem_t *);
117 1.55 yamt #else /* defined(VMEM_SANITY) */
118 1.55 yamt #define vmem_check(vm) /* nothing */
119 1.55 yamt #endif /* defined(VMEM_SANITY) */
120 1.1 yamt
121 1.4 yamt #define VMEM_MAXORDER (sizeof(vmem_size_t) * CHAR_BIT)
122 1.30 yamt
123 1.30 yamt #define VMEM_HASHSIZE_MIN 1 /* XXX */
124 1.54 yamt #define VMEM_HASHSIZE_MAX 65536 /* XXX */
125 1.66 para #define VMEM_HASHSIZE_INIT 1
126 1.1 yamt
127 1.1 yamt #define VM_FITMASK (VM_BESTFIT | VM_INSTANTFIT)
128 1.1 yamt
129 1.1 yamt CIRCLEQ_HEAD(vmem_seglist, vmem_btag);
130 1.1 yamt LIST_HEAD(vmem_freelist, vmem_btag);
131 1.1 yamt LIST_HEAD(vmem_hashlist, vmem_btag);
132 1.1 yamt
133 1.5 yamt #if defined(QCACHE)
134 1.5 yamt #define VMEM_QCACHE_IDX_MAX 32
135 1.5 yamt
136 1.5 yamt #define QC_NAME_MAX 16
137 1.5 yamt
138 1.5 yamt struct qcache {
139 1.35 ad pool_cache_t qc_cache;
140 1.5 yamt vmem_t *qc_vmem;
141 1.5 yamt char qc_name[QC_NAME_MAX];
142 1.5 yamt };
143 1.5 yamt typedef struct qcache qcache_t;
144 1.35 ad #define QC_POOL_TO_QCACHE(pool) ((qcache_t *)(pool->pr_qcache))
145 1.5 yamt #endif /* defined(QCACHE) */
146 1.5 yamt
147 1.64 yamt #define VMEM_NAME_MAX 16
148 1.64 yamt
149 1.1 yamt /* vmem arena */
150 1.1 yamt struct vmem {
151 1.66 para CONDVAR_DECL(vm_cv);
152 1.31 ad LOCK_DECL(vm_lock);
153 1.66 para vm_flag_t vm_flags;
154 1.66 para vmem_import_t *vm_importfn;
155 1.66 para vmem_release_t *vm_releasefn;
156 1.66 para size_t vm_nfreetags;
157 1.66 para LIST_HEAD(, vmem_btag) vm_freetags;
158 1.61 dyoung void *vm_arg;
159 1.1 yamt struct vmem_seglist vm_seglist;
160 1.1 yamt struct vmem_freelist vm_freelist[VMEM_MAXORDER];
161 1.1 yamt size_t vm_hashsize;
162 1.1 yamt size_t vm_nbusytag;
163 1.1 yamt struct vmem_hashlist *vm_hashlist;
164 1.66 para struct vmem_hashlist vm_hash0;
165 1.1 yamt size_t vm_quantum_mask;
166 1.1 yamt int vm_quantum_shift;
167 1.66 para size_t vm_size;
168 1.66 para size_t vm_inuse;
169 1.64 yamt char vm_name[VMEM_NAME_MAX+1];
170 1.30 yamt LIST_ENTRY(vmem) vm_alllist;
171 1.5 yamt
172 1.5 yamt #if defined(QCACHE)
173 1.5 yamt /* quantum cache */
174 1.5 yamt size_t vm_qcache_max;
175 1.5 yamt struct pool_allocator vm_qcache_allocator;
176 1.22 yamt qcache_t vm_qcache_store[VMEM_QCACHE_IDX_MAX];
177 1.22 yamt qcache_t *vm_qcache[VMEM_QCACHE_IDX_MAX];
178 1.5 yamt #endif /* defined(QCACHE) */
179 1.1 yamt };
180 1.1 yamt
181 1.31 ad #define VMEM_LOCK(vm) mutex_enter(&vm->vm_lock)
182 1.31 ad #define VMEM_TRYLOCK(vm) mutex_tryenter(&vm->vm_lock)
183 1.31 ad #define VMEM_UNLOCK(vm) mutex_exit(&vm->vm_lock)
184 1.36 ad #define VMEM_LOCK_INIT(vm, ipl) mutex_init(&vm->vm_lock, MUTEX_DEFAULT, ipl)
185 1.31 ad #define VMEM_LOCK_DESTROY(vm) mutex_destroy(&vm->vm_lock)
186 1.31 ad #define VMEM_ASSERT_LOCKED(vm) KASSERT(mutex_owned(&vm->vm_lock))
187 1.1 yamt
188 1.66 para #if defined(_KERNEL)
189 1.66 para #define VMEM_CONDVAR_INIT(vm, wchan) cv_init(&vm->vm_cv, wchan)
190 1.66 para #define VMEM_CONDVAR_DESTROY(vm) cv_destroy(&vm->vm_cv)
191 1.66 para #define VMEM_CONDVAR_WAIT(vm) cv_wait(&vm->vm_cv, &vm->vm_lock)
192 1.66 para #define VMEM_CONDVAR_BROADCAST(vm) cv_broadcast(&vm->vm_cv)
193 1.66 para #endif /* defined(_KERNEL) */
194 1.66 para
195 1.1 yamt /* boundary tag */
196 1.1 yamt struct vmem_btag {
197 1.1 yamt CIRCLEQ_ENTRY(vmem_btag) bt_seglist;
198 1.1 yamt union {
199 1.1 yamt LIST_ENTRY(vmem_btag) u_freelist; /* BT_TYPE_FREE */
200 1.1 yamt LIST_ENTRY(vmem_btag) u_hashlist; /* BT_TYPE_BUSY */
201 1.1 yamt } bt_u;
202 1.1 yamt #define bt_hashlist bt_u.u_hashlist
203 1.1 yamt #define bt_freelist bt_u.u_freelist
204 1.1 yamt vmem_addr_t bt_start;
205 1.1 yamt vmem_size_t bt_size;
206 1.1 yamt int bt_type;
207 1.1 yamt };
208 1.1 yamt
209 1.1 yamt #define BT_TYPE_SPAN 1
210 1.1 yamt #define BT_TYPE_SPAN_STATIC 2
211 1.1 yamt #define BT_TYPE_FREE 3
212 1.1 yamt #define BT_TYPE_BUSY 4
213 1.1 yamt #define BT_ISSPAN_P(bt) ((bt)->bt_type <= BT_TYPE_SPAN_STATIC)
214 1.1 yamt
215 1.60 dyoung #define BT_END(bt) ((bt)->bt_start + (bt)->bt_size - 1)
216 1.1 yamt
217 1.1 yamt typedef struct vmem_btag bt_t;
218 1.1 yamt
219 1.66 para #if defined(_KERNEL)
220 1.66 para static kmutex_t vmem_list_lock;
221 1.66 para static LIST_HEAD(, vmem) vmem_list = LIST_HEAD_INITIALIZER(vmem_list);
222 1.66 para #endif /* defined(_KERNEL) */
223 1.66 para
224 1.1 yamt /* ---- misc */
225 1.1 yamt
226 1.19 yamt #define VMEM_ALIGNUP(addr, align) \
227 1.19 yamt (-(-(addr) & -(align)))
228 1.62 rmind
229 1.19 yamt #define VMEM_CROSS_P(addr1, addr2, boundary) \
230 1.19 yamt ((((addr1) ^ (addr2)) & -(boundary)) != 0)
231 1.19 yamt
232 1.4 yamt #define ORDER2SIZE(order) ((vmem_size_t)1 << (order))
233 1.62 rmind #define SIZE2ORDER(size) ((int)ilog2(size))
234 1.4 yamt
235 1.62 rmind #if !defined(_KERNEL)
236 1.62 rmind #define xmalloc(sz, flags) malloc(sz)
237 1.67 rmind #define xfree(p, sz) free(p)
238 1.62 rmind #define bt_alloc(vm, flags) malloc(sizeof(bt_t))
239 1.62 rmind #define bt_free(vm, bt) free(bt)
240 1.66 para #else /* defined(_KERNEL) */
241 1.1 yamt
242 1.67 rmind #define xmalloc(sz, flags) \
243 1.67 rmind kmem_alloc(sz, ((flags) & VM_SLEEP) ? KM_SLEEP : KM_NOSLEEP);
244 1.67 rmind #define xfree(p, sz) kmem_free(p, sz);
245 1.66 para
246 1.66 para #define BT_MINRESERVE 6
247 1.66 para #define BT_MAXFREE 64
248 1.66 para #define STATIC_VMEM_COUNT 5
249 1.66 para #define STATIC_BT_COUNT 200
250 1.66 para #define STATIC_QC_POOL_COUNT (VMEM_QCACHE_IDX_MAX + 1)
251 1.66 para
252 1.66 para static struct vmem static_vmems[STATIC_VMEM_COUNT];
253 1.66 para static int static_vmem_count = STATIC_VMEM_COUNT;
254 1.66 para
255 1.66 para static struct vmem_btag static_bts[STATIC_BT_COUNT];
256 1.66 para static int static_bt_count = STATIC_BT_COUNT;
257 1.66 para
258 1.66 para static struct pool_cache static_qc_pools[STATIC_QC_POOL_COUNT];
259 1.66 para static int static_qc_pool_count = STATIC_QC_POOL_COUNT;
260 1.66 para
261 1.66 para vmem_t *kmem_va_meta_arena;
262 1.66 para vmem_t *kmem_meta_arena;
263 1.66 para
264 1.66 para static kmutex_t vmem_btag_lock;
265 1.66 para static LIST_HEAD(, vmem_btag) vmem_btag_freelist;
266 1.66 para static size_t vmem_btag_freelist_count = 0;
267 1.66 para static size_t vmem_btag_count = STATIC_BT_COUNT;
268 1.66 para
269 1.1 yamt /* ---- boundary tag */
270 1.1 yamt
271 1.67 rmind #define BT_PER_PAGE (PAGE_SIZE / sizeof(bt_t))
272 1.66 para
273 1.66 para static int bt_refill(vmem_t *vm, vm_flag_t flags);
274 1.66 para
275 1.66 para static int
276 1.66 para bt_refillglobal(vm_flag_t flags)
277 1.66 para {
278 1.66 para vmem_addr_t va;
279 1.66 para bt_t *btp;
280 1.66 para bt_t *bt;
281 1.66 para int i;
282 1.66 para
283 1.66 para mutex_enter(&vmem_btag_lock);
284 1.66 para if (vmem_btag_freelist_count > (BT_MINRESERVE * 16)) {
285 1.66 para mutex_exit(&vmem_btag_lock);
286 1.66 para return 0;
287 1.66 para }
288 1.66 para
289 1.66 para if (vmem_alloc(kmem_meta_arena, PAGE_SIZE,
290 1.66 para (flags & ~VM_FITMASK) | VM_INSTANTFIT | VM_POPULATING, &va) != 0) {
291 1.66 para mutex_exit(&vmem_btag_lock);
292 1.66 para return ENOMEM;
293 1.66 para }
294 1.66 para VMEM_EVCNT_INCR(bt_pages);
295 1.66 para
296 1.66 para btp = (void *) va;
297 1.66 para for (i = 0; i < (BT_PER_PAGE); i++) {
298 1.66 para bt = btp;
299 1.66 para memset(bt, 0, sizeof(*bt));
300 1.66 para LIST_INSERT_HEAD(&vmem_btag_freelist, bt,
301 1.66 para bt_freelist);
302 1.66 para vmem_btag_freelist_count++;
303 1.66 para vmem_btag_count++;
304 1.66 para VMEM_EVCNT_INCR(bt_count);
305 1.66 para btp++;
306 1.66 para }
307 1.66 para mutex_exit(&vmem_btag_lock);
308 1.66 para
309 1.66 para bt_refill(kmem_arena, (flags & ~VM_FITMASK) | VM_INSTANTFIT);
310 1.66 para bt_refill(kmem_va_meta_arena, (flags & ~VM_FITMASK) | VM_INSTANTFIT);
311 1.66 para bt_refill(kmem_meta_arena, (flags & ~VM_FITMASK) | VM_INSTANTFIT);
312 1.66 para
313 1.66 para return 0;
314 1.66 para }
315 1.66 para
316 1.66 para static int
317 1.66 para bt_refill(vmem_t *vm, vm_flag_t flags)
318 1.66 para {
319 1.66 para bt_t *bt;
320 1.66 para
321 1.66 para bt_refillglobal(flags);
322 1.66 para
323 1.66 para VMEM_LOCK(vm);
324 1.66 para mutex_enter(&vmem_btag_lock);
325 1.66 para while (!LIST_EMPTY(&vmem_btag_freelist) &&
326 1.66 para vm->vm_nfreetags < (BT_MINRESERVE * 2)) {
327 1.66 para bt = LIST_FIRST(&vmem_btag_freelist);
328 1.66 para LIST_REMOVE(bt, bt_freelist);
329 1.66 para LIST_INSERT_HEAD(&vm->vm_freetags, bt, bt_freelist);
330 1.66 para vm->vm_nfreetags++;
331 1.66 para vmem_btag_freelist_count--;
332 1.66 para }
333 1.66 para mutex_exit(&vmem_btag_lock);
334 1.66 para
335 1.66 para if (vm->vm_nfreetags == 0) {
336 1.66 para VMEM_UNLOCK(vm);
337 1.66 para return ENOMEM;
338 1.66 para }
339 1.66 para VMEM_UNLOCK(vm);
340 1.66 para
341 1.66 para return 0;
342 1.66 para }
343 1.1 yamt
344 1.62 rmind static inline bt_t *
345 1.17 yamt bt_alloc(vmem_t *vm, vm_flag_t flags)
346 1.1 yamt {
347 1.66 para bt_t *bt;
348 1.66 para again:
349 1.66 para VMEM_LOCK(vm);
350 1.66 para if (vm->vm_nfreetags < BT_MINRESERVE &&
351 1.66 para (flags & VM_POPULATING) == 0) {
352 1.66 para VMEM_UNLOCK(vm);
353 1.66 para if (bt_refill(vm, VM_NOSLEEP | VM_INSTANTFIT)) {
354 1.66 para return NULL;
355 1.66 para }
356 1.66 para goto again;
357 1.66 para }
358 1.66 para bt = LIST_FIRST(&vm->vm_freetags);
359 1.66 para LIST_REMOVE(bt, bt_freelist);
360 1.66 para vm->vm_nfreetags--;
361 1.66 para VMEM_UNLOCK(vm);
362 1.66 para VMEM_EVCNT_INCR(bt_inuse);
363 1.66 para
364 1.66 para return bt;
365 1.1 yamt }
366 1.1 yamt
367 1.62 rmind static inline void
368 1.17 yamt bt_free(vmem_t *vm, bt_t *bt)
369 1.1 yamt {
370 1.66 para
371 1.66 para VMEM_LOCK(vm);
372 1.66 para LIST_INSERT_HEAD(&vm->vm_freetags, bt, bt_freelist);
373 1.66 para vm->vm_nfreetags++;
374 1.66 para while (vm->vm_nfreetags > BT_MAXFREE) {
375 1.66 para bt = LIST_FIRST(&vm->vm_freetags);
376 1.66 para LIST_REMOVE(bt, bt_freelist);
377 1.66 para vm->vm_nfreetags--;
378 1.66 para mutex_enter(&vmem_btag_lock);
379 1.66 para LIST_INSERT_HEAD(&vmem_btag_freelist, bt, bt_freelist);
380 1.66 para vmem_btag_freelist_count++;
381 1.66 para mutex_exit(&vmem_btag_lock);
382 1.66 para }
383 1.66 para VMEM_UNLOCK(vm);
384 1.66 para VMEM_EVCNT_DECR(bt_inuse);
385 1.1 yamt }
386 1.1 yamt
387 1.67 rmind #endif /* defined(_KERNEL) */
388 1.62 rmind
389 1.1 yamt /*
390 1.67 rmind * freelist[0] ... [1, 1]
391 1.1 yamt * freelist[1] ... [2, 3]
392 1.1 yamt * freelist[2] ... [4, 7]
393 1.1 yamt * freelist[3] ... [8, 15]
394 1.1 yamt * :
395 1.1 yamt * freelist[n] ... [(1 << n), (1 << (n + 1)) - 1]
396 1.1 yamt * :
397 1.1 yamt */
398 1.1 yamt
399 1.1 yamt static struct vmem_freelist *
400 1.1 yamt bt_freehead_tofree(vmem_t *vm, vmem_size_t size)
401 1.1 yamt {
402 1.1 yamt const vmem_size_t qsize = size >> vm->vm_quantum_shift;
403 1.62 rmind const int idx = SIZE2ORDER(qsize);
404 1.1 yamt
405 1.62 rmind KASSERT(size != 0 && qsize != 0);
406 1.1 yamt KASSERT((size & vm->vm_quantum_mask) == 0);
407 1.1 yamt KASSERT(idx >= 0);
408 1.1 yamt KASSERT(idx < VMEM_MAXORDER);
409 1.1 yamt
410 1.1 yamt return &vm->vm_freelist[idx];
411 1.1 yamt }
412 1.1 yamt
413 1.59 yamt /*
414 1.59 yamt * bt_freehead_toalloc: return the freelist for the given size and allocation
415 1.59 yamt * strategy.
416 1.59 yamt *
417 1.59 yamt * for VM_INSTANTFIT, return the list in which any blocks are large enough
418 1.59 yamt * for the requested size. otherwise, return the list which can have blocks
419 1.59 yamt * large enough for the requested size.
420 1.59 yamt */
421 1.59 yamt
422 1.1 yamt static struct vmem_freelist *
423 1.1 yamt bt_freehead_toalloc(vmem_t *vm, vmem_size_t size, vm_flag_t strat)
424 1.1 yamt {
425 1.1 yamt const vmem_size_t qsize = size >> vm->vm_quantum_shift;
426 1.62 rmind int idx = SIZE2ORDER(qsize);
427 1.1 yamt
428 1.62 rmind KASSERT(size != 0 && qsize != 0);
429 1.1 yamt KASSERT((size & vm->vm_quantum_mask) == 0);
430 1.1 yamt
431 1.4 yamt if (strat == VM_INSTANTFIT && ORDER2SIZE(idx) != qsize) {
432 1.1 yamt idx++;
433 1.1 yamt /* check too large request? */
434 1.1 yamt }
435 1.1 yamt KASSERT(idx >= 0);
436 1.1 yamt KASSERT(idx < VMEM_MAXORDER);
437 1.1 yamt
438 1.1 yamt return &vm->vm_freelist[idx];
439 1.1 yamt }
440 1.1 yamt
441 1.1 yamt /* ---- boundary tag hash */
442 1.1 yamt
443 1.1 yamt static struct vmem_hashlist *
444 1.1 yamt bt_hashhead(vmem_t *vm, vmem_addr_t addr)
445 1.1 yamt {
446 1.1 yamt struct vmem_hashlist *list;
447 1.1 yamt unsigned int hash;
448 1.1 yamt
449 1.1 yamt hash = hash32_buf(&addr, sizeof(addr), HASH32_BUF_INIT);
450 1.1 yamt list = &vm->vm_hashlist[hash % vm->vm_hashsize];
451 1.1 yamt
452 1.1 yamt return list;
453 1.1 yamt }
454 1.1 yamt
455 1.1 yamt static bt_t *
456 1.1 yamt bt_lookupbusy(vmem_t *vm, vmem_addr_t addr)
457 1.1 yamt {
458 1.1 yamt struct vmem_hashlist *list;
459 1.1 yamt bt_t *bt;
460 1.1 yamt
461 1.1 yamt list = bt_hashhead(vm, addr);
462 1.1 yamt LIST_FOREACH(bt, list, bt_hashlist) {
463 1.1 yamt if (bt->bt_start == addr) {
464 1.1 yamt break;
465 1.1 yamt }
466 1.1 yamt }
467 1.1 yamt
468 1.1 yamt return bt;
469 1.1 yamt }
470 1.1 yamt
471 1.1 yamt static void
472 1.1 yamt bt_rembusy(vmem_t *vm, bt_t *bt)
473 1.1 yamt {
474 1.1 yamt
475 1.1 yamt KASSERT(vm->vm_nbusytag > 0);
476 1.1 yamt vm->vm_nbusytag--;
477 1.1 yamt LIST_REMOVE(bt, bt_hashlist);
478 1.1 yamt }
479 1.1 yamt
480 1.1 yamt static void
481 1.1 yamt bt_insbusy(vmem_t *vm, bt_t *bt)
482 1.1 yamt {
483 1.1 yamt struct vmem_hashlist *list;
484 1.1 yamt
485 1.1 yamt KASSERT(bt->bt_type == BT_TYPE_BUSY);
486 1.1 yamt
487 1.1 yamt list = bt_hashhead(vm, bt->bt_start);
488 1.1 yamt LIST_INSERT_HEAD(list, bt, bt_hashlist);
489 1.1 yamt vm->vm_nbusytag++;
490 1.1 yamt }
491 1.1 yamt
492 1.1 yamt /* ---- boundary tag list */
493 1.1 yamt
494 1.1 yamt static void
495 1.1 yamt bt_remseg(vmem_t *vm, bt_t *bt)
496 1.1 yamt {
497 1.1 yamt
498 1.1 yamt CIRCLEQ_REMOVE(&vm->vm_seglist, bt, bt_seglist);
499 1.1 yamt }
500 1.1 yamt
501 1.1 yamt static void
502 1.1 yamt bt_insseg(vmem_t *vm, bt_t *bt, bt_t *prev)
503 1.1 yamt {
504 1.1 yamt
505 1.1 yamt CIRCLEQ_INSERT_AFTER(&vm->vm_seglist, prev, bt, bt_seglist);
506 1.1 yamt }
507 1.1 yamt
508 1.1 yamt static void
509 1.1 yamt bt_insseg_tail(vmem_t *vm, bt_t *bt)
510 1.1 yamt {
511 1.1 yamt
512 1.1 yamt CIRCLEQ_INSERT_TAIL(&vm->vm_seglist, bt, bt_seglist);
513 1.1 yamt }
514 1.1 yamt
515 1.1 yamt static void
516 1.17 yamt bt_remfree(vmem_t *vm, bt_t *bt)
517 1.1 yamt {
518 1.1 yamt
519 1.1 yamt KASSERT(bt->bt_type == BT_TYPE_FREE);
520 1.1 yamt
521 1.1 yamt LIST_REMOVE(bt, bt_freelist);
522 1.1 yamt }
523 1.1 yamt
524 1.1 yamt static void
525 1.1 yamt bt_insfree(vmem_t *vm, bt_t *bt)
526 1.1 yamt {
527 1.1 yamt struct vmem_freelist *list;
528 1.1 yamt
529 1.1 yamt list = bt_freehead_tofree(vm, bt->bt_size);
530 1.1 yamt LIST_INSERT_HEAD(list, bt, bt_freelist);
531 1.1 yamt }
532 1.1 yamt
533 1.1 yamt /* ---- vmem internal functions */
534 1.1 yamt
535 1.5 yamt #if defined(QCACHE)
536 1.5 yamt static inline vm_flag_t
537 1.5 yamt prf_to_vmf(int prflags)
538 1.5 yamt {
539 1.5 yamt vm_flag_t vmflags;
540 1.5 yamt
541 1.5 yamt KASSERT((prflags & ~(PR_LIMITFAIL | PR_WAITOK | PR_NOWAIT)) == 0);
542 1.5 yamt if ((prflags & PR_WAITOK) != 0) {
543 1.5 yamt vmflags = VM_SLEEP;
544 1.5 yamt } else {
545 1.5 yamt vmflags = VM_NOSLEEP;
546 1.5 yamt }
547 1.5 yamt return vmflags;
548 1.5 yamt }
549 1.5 yamt
550 1.5 yamt static inline int
551 1.5 yamt vmf_to_prf(vm_flag_t vmflags)
552 1.5 yamt {
553 1.5 yamt int prflags;
554 1.5 yamt
555 1.7 yamt if ((vmflags & VM_SLEEP) != 0) {
556 1.5 yamt prflags = PR_WAITOK;
557 1.7 yamt } else {
558 1.5 yamt prflags = PR_NOWAIT;
559 1.5 yamt }
560 1.5 yamt return prflags;
561 1.5 yamt }
562 1.5 yamt
563 1.5 yamt static size_t
564 1.5 yamt qc_poolpage_size(size_t qcache_max)
565 1.5 yamt {
566 1.5 yamt int i;
567 1.5 yamt
568 1.5 yamt for (i = 0; ORDER2SIZE(i) <= qcache_max * 3; i++) {
569 1.5 yamt /* nothing */
570 1.5 yamt }
571 1.5 yamt return ORDER2SIZE(i);
572 1.5 yamt }
573 1.5 yamt
574 1.5 yamt static void *
575 1.5 yamt qc_poolpage_alloc(struct pool *pool, int prflags)
576 1.5 yamt {
577 1.5 yamt qcache_t *qc = QC_POOL_TO_QCACHE(pool);
578 1.5 yamt vmem_t *vm = qc->qc_vmem;
579 1.61 dyoung vmem_addr_t addr;
580 1.5 yamt
581 1.61 dyoung if (vmem_alloc(vm, pool->pr_alloc->pa_pagesz,
582 1.61 dyoung prf_to_vmf(prflags) | VM_INSTANTFIT, &addr) != 0)
583 1.61 dyoung return NULL;
584 1.61 dyoung return (void *)addr;
585 1.5 yamt }
586 1.5 yamt
587 1.5 yamt static void
588 1.5 yamt qc_poolpage_free(struct pool *pool, void *addr)
589 1.5 yamt {
590 1.5 yamt qcache_t *qc = QC_POOL_TO_QCACHE(pool);
591 1.5 yamt vmem_t *vm = qc->qc_vmem;
592 1.5 yamt
593 1.5 yamt vmem_free(vm, (vmem_addr_t)addr, pool->pr_alloc->pa_pagesz);
594 1.5 yamt }
595 1.5 yamt
596 1.5 yamt static void
597 1.31 ad qc_init(vmem_t *vm, size_t qcache_max, int ipl)
598 1.5 yamt {
599 1.22 yamt qcache_t *prevqc;
600 1.5 yamt struct pool_allocator *pa;
601 1.5 yamt int qcache_idx_max;
602 1.5 yamt int i;
603 1.5 yamt
604 1.5 yamt KASSERT((qcache_max & vm->vm_quantum_mask) == 0);
605 1.5 yamt if (qcache_max > (VMEM_QCACHE_IDX_MAX << vm->vm_quantum_shift)) {
606 1.5 yamt qcache_max = VMEM_QCACHE_IDX_MAX << vm->vm_quantum_shift;
607 1.5 yamt }
608 1.5 yamt vm->vm_qcache_max = qcache_max;
609 1.5 yamt pa = &vm->vm_qcache_allocator;
610 1.5 yamt memset(pa, 0, sizeof(*pa));
611 1.5 yamt pa->pa_alloc = qc_poolpage_alloc;
612 1.5 yamt pa->pa_free = qc_poolpage_free;
613 1.5 yamt pa->pa_pagesz = qc_poolpage_size(qcache_max);
614 1.5 yamt
615 1.5 yamt qcache_idx_max = qcache_max >> vm->vm_quantum_shift;
616 1.22 yamt prevqc = NULL;
617 1.22 yamt for (i = qcache_idx_max; i > 0; i--) {
618 1.22 yamt qcache_t *qc = &vm->vm_qcache_store[i - 1];
619 1.5 yamt size_t size = i << vm->vm_quantum_shift;
620 1.66 para pool_cache_t pc;
621 1.5 yamt
622 1.5 yamt qc->qc_vmem = vm;
623 1.8 martin snprintf(qc->qc_name, sizeof(qc->qc_name), "%s-%zu",
624 1.5 yamt vm->vm_name, size);
625 1.66 para
626 1.66 para if (vm->vm_flags & VM_BOOTSTRAP) {
627 1.66 para KASSERT(static_qc_pool_count > 0);
628 1.66 para pc = &static_qc_pools[--static_qc_pool_count];
629 1.66 para pool_cache_bootstrap(pc, size,
630 1.66 para ORDER2SIZE(vm->vm_quantum_shift), 0,
631 1.66 para PR_NOALIGN | PR_NOTOUCH | PR_RECURSIVE /* XXX */,
632 1.66 para qc->qc_name, pa, ipl, NULL, NULL, NULL);
633 1.66 para } else {
634 1.66 para pc = pool_cache_init(size,
635 1.66 para ORDER2SIZE(vm->vm_quantum_shift), 0,
636 1.66 para PR_NOALIGN | PR_NOTOUCH /* XXX */,
637 1.66 para qc->qc_name, pa, ipl, NULL, NULL, NULL);
638 1.66 para }
639 1.66 para qc->qc_cache = pc;
640 1.35 ad KASSERT(qc->qc_cache != NULL); /* XXX */
641 1.22 yamt if (prevqc != NULL &&
642 1.35 ad qc->qc_cache->pc_pool.pr_itemsperpage ==
643 1.35 ad prevqc->qc_cache->pc_pool.pr_itemsperpage) {
644 1.66 para if (vm->vm_flags & VM_BOOTSTRAP) {
645 1.66 para pool_cache_bootstrap_destroy(pc);
646 1.66 para //static_qc_pool_count++;
647 1.66 para } else {
648 1.66 para pool_cache_destroy(qc->qc_cache);
649 1.66 para }
650 1.22 yamt vm->vm_qcache[i - 1] = prevqc;
651 1.27 ad continue;
652 1.22 yamt }
653 1.35 ad qc->qc_cache->pc_pool.pr_qcache = qc;
654 1.22 yamt vm->vm_qcache[i - 1] = qc;
655 1.22 yamt prevqc = qc;
656 1.5 yamt }
657 1.5 yamt }
658 1.6 yamt
659 1.23 yamt static void
660 1.23 yamt qc_destroy(vmem_t *vm)
661 1.23 yamt {
662 1.23 yamt const qcache_t *prevqc;
663 1.23 yamt int i;
664 1.23 yamt int qcache_idx_max;
665 1.23 yamt
666 1.23 yamt qcache_idx_max = vm->vm_qcache_max >> vm->vm_quantum_shift;
667 1.23 yamt prevqc = NULL;
668 1.24 yamt for (i = 0; i < qcache_idx_max; i++) {
669 1.24 yamt qcache_t *qc = vm->vm_qcache[i];
670 1.23 yamt
671 1.23 yamt if (prevqc == qc) {
672 1.23 yamt continue;
673 1.23 yamt }
674 1.66 para if (vm->vm_flags & VM_BOOTSTRAP) {
675 1.66 para pool_cache_bootstrap_destroy(qc->qc_cache);
676 1.66 para } else {
677 1.66 para pool_cache_destroy(qc->qc_cache);
678 1.66 para }
679 1.23 yamt prevqc = qc;
680 1.23 yamt }
681 1.23 yamt }
682 1.66 para #endif
683 1.23 yamt
684 1.66 para #if defined(_KERNEL)
685 1.66 para void
686 1.66 para vmem_bootstrap(void)
687 1.6 yamt {
688 1.6 yamt
689 1.66 para mutex_init(&vmem_list_lock, MUTEX_DEFAULT, IPL_VM);
690 1.66 para mutex_init(&vmem_btag_lock, MUTEX_DEFAULT, IPL_VM);
691 1.6 yamt
692 1.66 para while (static_bt_count-- > 0) {
693 1.66 para bt_t *bt = &static_bts[static_bt_count];
694 1.66 para LIST_INSERT_HEAD(&vmem_btag_freelist, bt, bt_freelist);
695 1.66 para VMEM_EVCNT_INCR(bt_count);
696 1.66 para vmem_btag_freelist_count++;
697 1.6 yamt }
698 1.6 yamt }
699 1.5 yamt
700 1.66 para void
701 1.66 para vmem_init(vmem_t *vm)
702 1.1 yamt {
703 1.1 yamt
704 1.66 para kmem_va_meta_arena = vmem_create("vmem-va", 0, 0, PAGE_SIZE,
705 1.66 para vmem_alloc, vmem_free, vm,
706 1.66 para 0, VM_NOSLEEP | VM_BOOTSTRAP | VM_LARGEIMPORT,
707 1.66 para IPL_VM);
708 1.66 para
709 1.66 para kmem_meta_arena = vmem_create("vmem-meta", 0, 0, PAGE_SIZE,
710 1.66 para uvm_km_kmem_alloc, uvm_km_kmem_free, kmem_va_meta_arena,
711 1.66 para 0, VM_NOSLEEP | VM_BOOTSTRAP, IPL_VM);
712 1.1 yamt }
713 1.1 yamt #endif /* defined(_KERNEL) */
714 1.1 yamt
715 1.61 dyoung static int
716 1.1 yamt vmem_add1(vmem_t *vm, vmem_addr_t addr, vmem_size_t size, vm_flag_t flags,
717 1.1 yamt int spanbttype)
718 1.1 yamt {
719 1.1 yamt bt_t *btspan;
720 1.1 yamt bt_t *btfree;
721 1.1 yamt
722 1.1 yamt KASSERT((flags & (VM_SLEEP|VM_NOSLEEP)) != 0);
723 1.1 yamt KASSERT((~flags & (VM_SLEEP|VM_NOSLEEP)) != 0);
724 1.58 yamt KASSERT(spanbttype == BT_TYPE_SPAN ||
725 1.58 yamt spanbttype == BT_TYPE_SPAN_STATIC);
726 1.1 yamt
727 1.1 yamt btspan = bt_alloc(vm, flags);
728 1.1 yamt if (btspan == NULL) {
729 1.61 dyoung return ENOMEM;
730 1.1 yamt }
731 1.1 yamt btfree = bt_alloc(vm, flags);
732 1.1 yamt if (btfree == NULL) {
733 1.1 yamt bt_free(vm, btspan);
734 1.61 dyoung return ENOMEM;
735 1.1 yamt }
736 1.1 yamt
737 1.1 yamt btspan->bt_type = spanbttype;
738 1.1 yamt btspan->bt_start = addr;
739 1.1 yamt btspan->bt_size = size;
740 1.1 yamt
741 1.1 yamt btfree->bt_type = BT_TYPE_FREE;
742 1.1 yamt btfree->bt_start = addr;
743 1.1 yamt btfree->bt_size = size;
744 1.1 yamt
745 1.1 yamt VMEM_LOCK(vm);
746 1.1 yamt bt_insseg_tail(vm, btspan);
747 1.1 yamt bt_insseg(vm, btfree, btspan);
748 1.1 yamt bt_insfree(vm, btfree);
749 1.66 para vm->vm_size += size;
750 1.1 yamt VMEM_UNLOCK(vm);
751 1.1 yamt
752 1.61 dyoung return 0;
753 1.1 yamt }
754 1.1 yamt
755 1.30 yamt static void
756 1.30 yamt vmem_destroy1(vmem_t *vm)
757 1.30 yamt {
758 1.30 yamt
759 1.30 yamt #if defined(QCACHE)
760 1.30 yamt qc_destroy(vm);
761 1.30 yamt #endif /* defined(QCACHE) */
762 1.30 yamt if (vm->vm_hashlist != NULL) {
763 1.30 yamt int i;
764 1.30 yamt
765 1.30 yamt for (i = 0; i < vm->vm_hashsize; i++) {
766 1.30 yamt bt_t *bt;
767 1.30 yamt
768 1.30 yamt while ((bt = LIST_FIRST(&vm->vm_hashlist[i])) != NULL) {
769 1.30 yamt KASSERT(bt->bt_type == BT_TYPE_SPAN_STATIC);
770 1.30 yamt bt_free(vm, bt);
771 1.30 yamt }
772 1.30 yamt }
773 1.66 para if (vm->vm_hashlist != &vm->vm_hash0) {
774 1.66 para xfree(vm->vm_hashlist,
775 1.66 para sizeof(struct vmem_hashlist *) * vm->vm_hashsize);
776 1.66 para }
777 1.66 para }
778 1.66 para
779 1.66 para while (vm->vm_nfreetags > 0) {
780 1.66 para bt_t *bt = LIST_FIRST(&vm->vm_freetags);
781 1.66 para LIST_REMOVE(bt, bt_freelist);
782 1.66 para vm->vm_nfreetags--;
783 1.66 para mutex_enter(&vmem_btag_lock);
784 1.66 para #if defined (_KERNEL)
785 1.66 para LIST_INSERT_HEAD(&vmem_btag_freelist, bt, bt_freelist);
786 1.66 para vmem_btag_freelist_count++;
787 1.66 para #endif /* defined(_KERNEL) */
788 1.66 para mutex_exit(&vmem_btag_lock);
789 1.30 yamt }
790 1.66 para
791 1.31 ad VMEM_LOCK_DESTROY(vm);
792 1.66 para xfree(vm, sizeof(*vm));
793 1.30 yamt }
794 1.30 yamt
795 1.1 yamt static int
796 1.1 yamt vmem_import(vmem_t *vm, vmem_size_t size, vm_flag_t flags)
797 1.1 yamt {
798 1.1 yamt vmem_addr_t addr;
799 1.61 dyoung int rc;
800 1.1 yamt
801 1.61 dyoung if (vm->vm_importfn == NULL) {
802 1.1 yamt return EINVAL;
803 1.1 yamt }
804 1.1 yamt
805 1.66 para if (vm->vm_flags & VM_LARGEIMPORT) {
806 1.66 para size *= 32;
807 1.66 para }
808 1.66 para
809 1.66 para if (vm->vm_flags & VM_XIMPORT) {
810 1.66 para rc = ((vmem_ximport_t *)vm->vm_importfn)(vm->vm_arg, size,
811 1.66 para &size, flags, &addr);
812 1.66 para if (rc != 0) {
813 1.66 para return ENOMEM;
814 1.66 para }
815 1.66 para } else {
816 1.66 para rc = (vm->vm_importfn)(vm->vm_arg, size, flags, &addr);
817 1.66 para if (rc != 0) {
818 1.66 para return ENOMEM;
819 1.66 para }
820 1.1 yamt }
821 1.1 yamt
822 1.61 dyoung if (vmem_add1(vm, addr, size, flags, BT_TYPE_SPAN) != 0) {
823 1.61 dyoung (*vm->vm_releasefn)(vm->vm_arg, addr, size);
824 1.1 yamt return ENOMEM;
825 1.1 yamt }
826 1.1 yamt
827 1.1 yamt return 0;
828 1.1 yamt }
829 1.1 yamt
830 1.1 yamt static int
831 1.1 yamt vmem_rehash(vmem_t *vm, size_t newhashsize, vm_flag_t flags)
832 1.1 yamt {
833 1.1 yamt bt_t *bt;
834 1.1 yamt int i;
835 1.1 yamt struct vmem_hashlist *newhashlist;
836 1.1 yamt struct vmem_hashlist *oldhashlist;
837 1.1 yamt size_t oldhashsize;
838 1.1 yamt
839 1.1 yamt KASSERT(newhashsize > 0);
840 1.1 yamt
841 1.1 yamt newhashlist =
842 1.1 yamt xmalloc(sizeof(struct vmem_hashlist *) * newhashsize, flags);
843 1.1 yamt if (newhashlist == NULL) {
844 1.1 yamt return ENOMEM;
845 1.1 yamt }
846 1.1 yamt for (i = 0; i < newhashsize; i++) {
847 1.1 yamt LIST_INIT(&newhashlist[i]);
848 1.1 yamt }
849 1.1 yamt
850 1.30 yamt if (!VMEM_TRYLOCK(vm)) {
851 1.66 para xfree(newhashlist,
852 1.66 para sizeof(struct vmem_hashlist *) * newhashsize);
853 1.30 yamt return EBUSY;
854 1.30 yamt }
855 1.1 yamt oldhashlist = vm->vm_hashlist;
856 1.1 yamt oldhashsize = vm->vm_hashsize;
857 1.1 yamt vm->vm_hashlist = newhashlist;
858 1.1 yamt vm->vm_hashsize = newhashsize;
859 1.1 yamt if (oldhashlist == NULL) {
860 1.1 yamt VMEM_UNLOCK(vm);
861 1.1 yamt return 0;
862 1.1 yamt }
863 1.1 yamt for (i = 0; i < oldhashsize; i++) {
864 1.1 yamt while ((bt = LIST_FIRST(&oldhashlist[i])) != NULL) {
865 1.1 yamt bt_rembusy(vm, bt); /* XXX */
866 1.1 yamt bt_insbusy(vm, bt);
867 1.1 yamt }
868 1.1 yamt }
869 1.1 yamt VMEM_UNLOCK(vm);
870 1.1 yamt
871 1.66 para if (oldhashlist != &vm->vm_hash0) {
872 1.66 para xfree(oldhashlist,
873 1.66 para sizeof(struct vmem_hashlist *) * oldhashsize);
874 1.66 para }
875 1.1 yamt
876 1.1 yamt return 0;
877 1.1 yamt }
878 1.1 yamt
879 1.10 yamt /*
880 1.10 yamt * vmem_fit: check if a bt can satisfy the given restrictions.
881 1.59 yamt *
882 1.59 yamt * it's a caller's responsibility to ensure the region is big enough
883 1.59 yamt * before calling us.
884 1.10 yamt */
885 1.10 yamt
886 1.61 dyoung static int
887 1.60 dyoung vmem_fit(const bt_t const *bt, vmem_size_t size, vmem_size_t align,
888 1.60 dyoung vmem_size_t phase, vmem_size_t nocross,
889 1.61 dyoung vmem_addr_t minaddr, vmem_addr_t maxaddr, vmem_addr_t *addrp)
890 1.10 yamt {
891 1.10 yamt vmem_addr_t start;
892 1.10 yamt vmem_addr_t end;
893 1.10 yamt
894 1.60 dyoung KASSERT(size > 0);
895 1.59 yamt KASSERT(bt->bt_size >= size); /* caller's responsibility */
896 1.10 yamt
897 1.10 yamt /*
898 1.10 yamt * XXX assumption: vmem_addr_t and vmem_size_t are
899 1.10 yamt * unsigned integer of the same size.
900 1.10 yamt */
901 1.10 yamt
902 1.10 yamt start = bt->bt_start;
903 1.10 yamt if (start < minaddr) {
904 1.10 yamt start = minaddr;
905 1.10 yamt }
906 1.10 yamt end = BT_END(bt);
907 1.60 dyoung if (end > maxaddr) {
908 1.60 dyoung end = maxaddr;
909 1.10 yamt }
910 1.60 dyoung if (start > end) {
911 1.61 dyoung return ENOMEM;
912 1.10 yamt }
913 1.19 yamt
914 1.19 yamt start = VMEM_ALIGNUP(start - phase, align) + phase;
915 1.10 yamt if (start < bt->bt_start) {
916 1.10 yamt start += align;
917 1.10 yamt }
918 1.19 yamt if (VMEM_CROSS_P(start, start + size - 1, nocross)) {
919 1.10 yamt KASSERT(align < nocross);
920 1.19 yamt start = VMEM_ALIGNUP(start - phase, nocross) + phase;
921 1.10 yamt }
922 1.60 dyoung if (start <= end && end - start >= size - 1) {
923 1.10 yamt KASSERT((start & (align - 1)) == phase);
924 1.19 yamt KASSERT(!VMEM_CROSS_P(start, start + size - 1, nocross));
925 1.10 yamt KASSERT(minaddr <= start);
926 1.60 dyoung KASSERT(maxaddr == 0 || start + size - 1 <= maxaddr);
927 1.10 yamt KASSERT(bt->bt_start <= start);
928 1.60 dyoung KASSERT(BT_END(bt) - start >= size - 1);
929 1.61 dyoung *addrp = start;
930 1.61 dyoung return 0;
931 1.10 yamt }
932 1.61 dyoung return ENOMEM;
933 1.10 yamt }
934 1.10 yamt
935 1.1 yamt
936 1.1 yamt /*
937 1.66 para * vmem_create_internal: creates a vmem arena.
938 1.1 yamt */
939 1.1 yamt
940 1.66 para static vmem_t *
941 1.66 para vmem_create_internal(const char *name, vmem_addr_t base, vmem_size_t size,
942 1.66 para vmem_size_t quantum, vmem_import_t *importfn, vmem_release_t *releasefn,
943 1.61 dyoung void *arg, vmem_size_t qcache_max, vm_flag_t flags, int ipl)
944 1.1 yamt {
945 1.66 para vmem_t *vm = NULL;
946 1.1 yamt int i;
947 1.1 yamt
948 1.1 yamt KASSERT((flags & (VM_SLEEP|VM_NOSLEEP)) != 0);
949 1.1 yamt KASSERT((~flags & (VM_SLEEP|VM_NOSLEEP)) != 0);
950 1.62 rmind KASSERT(quantum > 0);
951 1.1 yamt
952 1.66 para if (flags & VM_BOOTSTRAP) {
953 1.1 yamt #if defined(_KERNEL)
954 1.66 para KASSERT(static_vmem_count > 0);
955 1.66 para vm = &static_vmems[--static_vmem_count];
956 1.66 para #endif /* defined(_KERNEL) */
957 1.66 para } else {
958 1.66 para vm = xmalloc(sizeof(*vm), flags);
959 1.1 yamt }
960 1.1 yamt if (vm == NULL) {
961 1.1 yamt return NULL;
962 1.1 yamt }
963 1.1 yamt
964 1.66 para VMEM_CONDVAR_INIT(vm, "vmem");
965 1.31 ad VMEM_LOCK_INIT(vm, ipl);
966 1.66 para vm->vm_flags = flags;
967 1.66 para vm->vm_nfreetags = 0;
968 1.66 para LIST_INIT(&vm->vm_freetags);
969 1.64 yamt strlcpy(vm->vm_name, name, sizeof(vm->vm_name));
970 1.1 yamt vm->vm_quantum_mask = quantum - 1;
971 1.62 rmind vm->vm_quantum_shift = SIZE2ORDER(quantum);
972 1.4 yamt KASSERT(ORDER2SIZE(vm->vm_quantum_shift) == quantum);
973 1.61 dyoung vm->vm_importfn = importfn;
974 1.61 dyoung vm->vm_releasefn = releasefn;
975 1.61 dyoung vm->vm_arg = arg;
976 1.1 yamt vm->vm_nbusytag = 0;
977 1.66 para vm->vm_size = 0;
978 1.66 para vm->vm_inuse = 0;
979 1.5 yamt #if defined(QCACHE)
980 1.31 ad qc_init(vm, qcache_max, ipl);
981 1.5 yamt #endif /* defined(QCACHE) */
982 1.1 yamt
983 1.1 yamt CIRCLEQ_INIT(&vm->vm_seglist);
984 1.1 yamt for (i = 0; i < VMEM_MAXORDER; i++) {
985 1.1 yamt LIST_INIT(&vm->vm_freelist[i]);
986 1.1 yamt }
987 1.1 yamt vm->vm_hashlist = NULL;
988 1.66 para if (flags & VM_BOOTSTRAP) {
989 1.66 para vm->vm_hashsize = 1;
990 1.66 para vm->vm_hashlist = &vm->vm_hash0;
991 1.66 para } else if (vmem_rehash(vm, VMEM_HASHSIZE_INIT, flags)) {
992 1.30 yamt vmem_destroy1(vm);
993 1.1 yamt return NULL;
994 1.1 yamt }
995 1.1 yamt
996 1.1 yamt if (size != 0) {
997 1.61 dyoung if (vmem_add(vm, base, size, flags) != 0) {
998 1.30 yamt vmem_destroy1(vm);
999 1.1 yamt return NULL;
1000 1.1 yamt }
1001 1.1 yamt }
1002 1.1 yamt
1003 1.30 yamt #if defined(_KERNEL)
1004 1.66 para if (flags & VM_BOOTSTRAP) {
1005 1.66 para bt_refill(vm, VM_NOSLEEP);
1006 1.66 para }
1007 1.66 para
1008 1.30 yamt mutex_enter(&vmem_list_lock);
1009 1.30 yamt LIST_INSERT_HEAD(&vmem_list, vm, vm_alllist);
1010 1.30 yamt mutex_exit(&vmem_list_lock);
1011 1.30 yamt #endif /* defined(_KERNEL) */
1012 1.30 yamt
1013 1.1 yamt return vm;
1014 1.1 yamt }
1015 1.1 yamt
1016 1.66 para
1017 1.66 para /* ---- vmem API */
1018 1.66 para
1019 1.66 para /*
1020 1.66 para * vmem_create: create an arena.
1021 1.66 para *
1022 1.66 para * => must not be called from interrupt context.
1023 1.66 para */
1024 1.66 para
1025 1.66 para vmem_t *
1026 1.66 para vmem_create(const char *name, vmem_addr_t base, vmem_size_t size,
1027 1.66 para vmem_size_t quantum, vmem_import_t *importfn, vmem_release_t *releasefn,
1028 1.67 rmind vmem_t *source, vmem_size_t qcache_max, vm_flag_t flags, int ipl)
1029 1.66 para {
1030 1.66 para
1031 1.66 para KASSERT((flags & (VM_SLEEP|VM_NOSLEEP)) != 0);
1032 1.66 para KASSERT((~flags & (VM_SLEEP|VM_NOSLEEP)) != 0);
1033 1.66 para KASSERT((flags & (VM_XIMPORT)) == 0);
1034 1.66 para
1035 1.66 para return vmem_create_internal(name, base, size, quantum,
1036 1.66 para importfn, releasefn, source, qcache_max, flags, ipl);
1037 1.66 para }
1038 1.66 para
1039 1.66 para /*
1040 1.66 para * vmem_xcreate: create an arena takes alternative import func.
1041 1.66 para *
1042 1.66 para * => must not be called from interrupt context.
1043 1.66 para */
1044 1.66 para
1045 1.66 para vmem_t *
1046 1.66 para vmem_xcreate(const char *name, vmem_addr_t base, vmem_size_t size,
1047 1.66 para vmem_size_t quantum, vmem_ximport_t *importfn, vmem_release_t *releasefn,
1048 1.67 rmind vmem_t *source, vmem_size_t qcache_max, vm_flag_t flags, int ipl)
1049 1.66 para {
1050 1.66 para
1051 1.66 para KASSERT((flags & (VM_SLEEP|VM_NOSLEEP)) != 0);
1052 1.66 para KASSERT((~flags & (VM_SLEEP|VM_NOSLEEP)) != 0);
1053 1.66 para KASSERT((flags & (VM_XIMPORT)) == 0);
1054 1.66 para
1055 1.66 para return vmem_create_internal(name, base, size, quantum,
1056 1.66 para (vmem_import_t *)importfn, releasefn, source,
1057 1.66 para qcache_max, flags | VM_XIMPORT, ipl);
1058 1.66 para }
1059 1.66 para
1060 1.1 yamt void
1061 1.1 yamt vmem_destroy(vmem_t *vm)
1062 1.1 yamt {
1063 1.1 yamt
1064 1.30 yamt #if defined(_KERNEL)
1065 1.30 yamt mutex_enter(&vmem_list_lock);
1066 1.30 yamt LIST_REMOVE(vm, vm_alllist);
1067 1.30 yamt mutex_exit(&vmem_list_lock);
1068 1.30 yamt #endif /* defined(_KERNEL) */
1069 1.1 yamt
1070 1.30 yamt vmem_destroy1(vm);
1071 1.1 yamt }
1072 1.1 yamt
1073 1.1 yamt vmem_size_t
1074 1.1 yamt vmem_roundup_size(vmem_t *vm, vmem_size_t size)
1075 1.1 yamt {
1076 1.1 yamt
1077 1.1 yamt return (size + vm->vm_quantum_mask) & ~vm->vm_quantum_mask;
1078 1.1 yamt }
1079 1.1 yamt
1080 1.1 yamt /*
1081 1.1 yamt * vmem_alloc:
1082 1.1 yamt *
1083 1.1 yamt * => caller must ensure appropriate spl,
1084 1.1 yamt * if the arena can be accessed from interrupt context.
1085 1.1 yamt */
1086 1.1 yamt
1087 1.61 dyoung int
1088 1.61 dyoung vmem_alloc(vmem_t *vm, vmem_size_t size, vm_flag_t flags, vmem_addr_t *addrp)
1089 1.1 yamt {
1090 1.12 yamt const vm_flag_t strat __unused = flags & VM_FITMASK;
1091 1.1 yamt
1092 1.1 yamt KASSERT((flags & (VM_SLEEP|VM_NOSLEEP)) != 0);
1093 1.1 yamt KASSERT((~flags & (VM_SLEEP|VM_NOSLEEP)) != 0);
1094 1.1 yamt
1095 1.1 yamt KASSERT(size > 0);
1096 1.1 yamt KASSERT(strat == VM_BESTFIT || strat == VM_INSTANTFIT);
1097 1.3 yamt if ((flags & VM_SLEEP) != 0) {
1098 1.42 yamt ASSERT_SLEEPABLE();
1099 1.3 yamt }
1100 1.1 yamt
1101 1.5 yamt #if defined(QCACHE)
1102 1.5 yamt if (size <= vm->vm_qcache_max) {
1103 1.61 dyoung void *p;
1104 1.38 yamt int qidx = (size + vm->vm_quantum_mask) >> vm->vm_quantum_shift;
1105 1.22 yamt qcache_t *qc = vm->vm_qcache[qidx - 1];
1106 1.5 yamt
1107 1.61 dyoung p = pool_cache_get(qc->qc_cache, vmf_to_prf(flags));
1108 1.61 dyoung if (addrp != NULL)
1109 1.61 dyoung *addrp = (vmem_addr_t)p;
1110 1.61 dyoung return (p == NULL) ? ENOMEM : 0;
1111 1.5 yamt }
1112 1.5 yamt #endif /* defined(QCACHE) */
1113 1.5 yamt
1114 1.60 dyoung return vmem_xalloc(vm, size, 0, 0, 0, VMEM_ADDR_MIN, VMEM_ADDR_MAX,
1115 1.61 dyoung flags, addrp);
1116 1.10 yamt }
1117 1.10 yamt
1118 1.61 dyoung int
1119 1.60 dyoung vmem_xalloc(vmem_t *vm, const vmem_size_t size0, vmem_size_t align,
1120 1.60 dyoung const vmem_size_t phase, const vmem_size_t nocross,
1121 1.61 dyoung const vmem_addr_t minaddr, const vmem_addr_t maxaddr, const vm_flag_t flags,
1122 1.61 dyoung vmem_addr_t *addrp)
1123 1.10 yamt {
1124 1.10 yamt struct vmem_freelist *list;
1125 1.10 yamt struct vmem_freelist *first;
1126 1.10 yamt struct vmem_freelist *end;
1127 1.10 yamt bt_t *bt;
1128 1.10 yamt bt_t *btnew;
1129 1.10 yamt bt_t *btnew2;
1130 1.10 yamt const vmem_size_t size = vmem_roundup_size(vm, size0);
1131 1.10 yamt vm_flag_t strat = flags & VM_FITMASK;
1132 1.10 yamt vmem_addr_t start;
1133 1.61 dyoung int rc;
1134 1.10 yamt
1135 1.10 yamt KASSERT(size0 > 0);
1136 1.10 yamt KASSERT(size > 0);
1137 1.10 yamt KASSERT(strat == VM_BESTFIT || strat == VM_INSTANTFIT);
1138 1.10 yamt if ((flags & VM_SLEEP) != 0) {
1139 1.42 yamt ASSERT_SLEEPABLE();
1140 1.10 yamt }
1141 1.10 yamt KASSERT((align & vm->vm_quantum_mask) == 0);
1142 1.10 yamt KASSERT((align & (align - 1)) == 0);
1143 1.10 yamt KASSERT((phase & vm->vm_quantum_mask) == 0);
1144 1.10 yamt KASSERT((nocross & vm->vm_quantum_mask) == 0);
1145 1.10 yamt KASSERT((nocross & (nocross - 1)) == 0);
1146 1.10 yamt KASSERT((align == 0 && phase == 0) || phase < align);
1147 1.10 yamt KASSERT(nocross == 0 || nocross >= size);
1148 1.60 dyoung KASSERT(minaddr <= maxaddr);
1149 1.19 yamt KASSERT(!VMEM_CROSS_P(phase, phase + size - 1, nocross));
1150 1.10 yamt
1151 1.10 yamt if (align == 0) {
1152 1.10 yamt align = vm->vm_quantum_mask + 1;
1153 1.10 yamt }
1154 1.59 yamt
1155 1.59 yamt /*
1156 1.59 yamt * allocate boundary tags before acquiring the vmem lock.
1157 1.59 yamt */
1158 1.1 yamt btnew = bt_alloc(vm, flags);
1159 1.1 yamt if (btnew == NULL) {
1160 1.61 dyoung return ENOMEM;
1161 1.1 yamt }
1162 1.10 yamt btnew2 = bt_alloc(vm, flags); /* XXX not necessary if no restrictions */
1163 1.10 yamt if (btnew2 == NULL) {
1164 1.10 yamt bt_free(vm, btnew);
1165 1.61 dyoung return ENOMEM;
1166 1.10 yamt }
1167 1.1 yamt
1168 1.59 yamt /*
1169 1.59 yamt * choose a free block from which we allocate.
1170 1.59 yamt */
1171 1.1 yamt retry_strat:
1172 1.1 yamt first = bt_freehead_toalloc(vm, size, strat);
1173 1.1 yamt end = &vm->vm_freelist[VMEM_MAXORDER];
1174 1.1 yamt retry:
1175 1.1 yamt bt = NULL;
1176 1.1 yamt VMEM_LOCK(vm);
1177 1.55 yamt vmem_check(vm);
1178 1.2 yamt if (strat == VM_INSTANTFIT) {
1179 1.59 yamt /*
1180 1.59 yamt * just choose the first block which satisfies our restrictions.
1181 1.59 yamt *
1182 1.59 yamt * note that we don't need to check the size of the blocks
1183 1.59 yamt * because any blocks found on these list should be larger than
1184 1.59 yamt * the given size.
1185 1.59 yamt */
1186 1.2 yamt for (list = first; list < end; list++) {
1187 1.2 yamt bt = LIST_FIRST(list);
1188 1.2 yamt if (bt != NULL) {
1189 1.61 dyoung rc = vmem_fit(bt, size, align, phase,
1190 1.61 dyoung nocross, minaddr, maxaddr, &start);
1191 1.61 dyoung if (rc == 0) {
1192 1.10 yamt goto gotit;
1193 1.10 yamt }
1194 1.59 yamt /*
1195 1.59 yamt * don't bother to follow the bt_freelist link
1196 1.59 yamt * here. the list can be very long and we are
1197 1.59 yamt * told to run fast. blocks from the later free
1198 1.59 yamt * lists are larger and have better chances to
1199 1.59 yamt * satisfy our restrictions.
1200 1.59 yamt */
1201 1.2 yamt }
1202 1.2 yamt }
1203 1.2 yamt } else { /* VM_BESTFIT */
1204 1.59 yamt /*
1205 1.59 yamt * we assume that, for space efficiency, it's better to
1206 1.59 yamt * allocate from a smaller block. thus we will start searching
1207 1.59 yamt * from the lower-order list than VM_INSTANTFIT.
1208 1.59 yamt * however, don't bother to find the smallest block in a free
1209 1.59 yamt * list because the list can be very long. we can revisit it
1210 1.59 yamt * if/when it turns out to be a problem.
1211 1.59 yamt *
1212 1.59 yamt * note that the 'first' list can contain blocks smaller than
1213 1.59 yamt * the requested size. thus we need to check bt_size.
1214 1.59 yamt */
1215 1.2 yamt for (list = first; list < end; list++) {
1216 1.2 yamt LIST_FOREACH(bt, list, bt_freelist) {
1217 1.2 yamt if (bt->bt_size >= size) {
1218 1.61 dyoung rc = vmem_fit(bt, size, align, phase,
1219 1.61 dyoung nocross, minaddr, maxaddr, &start);
1220 1.61 dyoung if (rc == 0) {
1221 1.10 yamt goto gotit;
1222 1.10 yamt }
1223 1.2 yamt }
1224 1.1 yamt }
1225 1.1 yamt }
1226 1.1 yamt }
1227 1.2 yamt VMEM_UNLOCK(vm);
1228 1.1 yamt #if 1
1229 1.2 yamt if (strat == VM_INSTANTFIT) {
1230 1.2 yamt strat = VM_BESTFIT;
1231 1.2 yamt goto retry_strat;
1232 1.2 yamt }
1233 1.1 yamt #endif
1234 1.10 yamt if (align != vm->vm_quantum_mask + 1 || phase != 0 ||
1235 1.60 dyoung nocross != 0) {
1236 1.10 yamt
1237 1.10 yamt /*
1238 1.10 yamt * XXX should try to import a region large enough to
1239 1.10 yamt * satisfy restrictions?
1240 1.10 yamt */
1241 1.10 yamt
1242 1.20 yamt goto fail;
1243 1.10 yamt }
1244 1.60 dyoung /* XXX eeek, minaddr & maxaddr not respected */
1245 1.2 yamt if (vmem_import(vm, size, flags) == 0) {
1246 1.2 yamt goto retry;
1247 1.1 yamt }
1248 1.2 yamt /* XXX */
1249 1.66 para
1250 1.68 para if ((flags & VM_SLEEP) != 0) {
1251 1.68 para uvm_kick_pdaemon();
1252 1.68 para VMEM_LOCK(vm);
1253 1.68 para VMEM_CONDVAR_WAIT(vm);
1254 1.68 para VMEM_UNLOCK(vm);
1255 1.68 para goto retry;
1256 1.68 para }
1257 1.20 yamt fail:
1258 1.20 yamt bt_free(vm, btnew);
1259 1.20 yamt bt_free(vm, btnew2);
1260 1.61 dyoung return ENOMEM;
1261 1.2 yamt
1262 1.2 yamt gotit:
1263 1.1 yamt KASSERT(bt->bt_type == BT_TYPE_FREE);
1264 1.1 yamt KASSERT(bt->bt_size >= size);
1265 1.1 yamt bt_remfree(vm, bt);
1266 1.55 yamt vmem_check(vm);
1267 1.10 yamt if (bt->bt_start != start) {
1268 1.10 yamt btnew2->bt_type = BT_TYPE_FREE;
1269 1.10 yamt btnew2->bt_start = bt->bt_start;
1270 1.10 yamt btnew2->bt_size = start - bt->bt_start;
1271 1.10 yamt bt->bt_start = start;
1272 1.10 yamt bt->bt_size -= btnew2->bt_size;
1273 1.10 yamt bt_insfree(vm, btnew2);
1274 1.10 yamt bt_insseg(vm, btnew2, CIRCLEQ_PREV(bt, bt_seglist));
1275 1.10 yamt btnew2 = NULL;
1276 1.55 yamt vmem_check(vm);
1277 1.10 yamt }
1278 1.10 yamt KASSERT(bt->bt_start == start);
1279 1.1 yamt if (bt->bt_size != size && bt->bt_size - size > vm->vm_quantum_mask) {
1280 1.1 yamt /* split */
1281 1.1 yamt btnew->bt_type = BT_TYPE_BUSY;
1282 1.1 yamt btnew->bt_start = bt->bt_start;
1283 1.1 yamt btnew->bt_size = size;
1284 1.1 yamt bt->bt_start = bt->bt_start + size;
1285 1.1 yamt bt->bt_size -= size;
1286 1.1 yamt bt_insfree(vm, bt);
1287 1.1 yamt bt_insseg(vm, btnew, CIRCLEQ_PREV(bt, bt_seglist));
1288 1.1 yamt bt_insbusy(vm, btnew);
1289 1.55 yamt vmem_check(vm);
1290 1.1 yamt VMEM_UNLOCK(vm);
1291 1.1 yamt } else {
1292 1.1 yamt bt->bt_type = BT_TYPE_BUSY;
1293 1.1 yamt bt_insbusy(vm, bt);
1294 1.55 yamt vmem_check(vm);
1295 1.1 yamt VMEM_UNLOCK(vm);
1296 1.1 yamt bt_free(vm, btnew);
1297 1.1 yamt btnew = bt;
1298 1.1 yamt }
1299 1.10 yamt if (btnew2 != NULL) {
1300 1.10 yamt bt_free(vm, btnew2);
1301 1.10 yamt }
1302 1.1 yamt KASSERT(btnew->bt_size >= size);
1303 1.1 yamt btnew->bt_type = BT_TYPE_BUSY;
1304 1.1 yamt
1305 1.61 dyoung if (addrp != NULL)
1306 1.61 dyoung *addrp = btnew->bt_start;
1307 1.61 dyoung return 0;
1308 1.1 yamt }
1309 1.1 yamt
1310 1.1 yamt /*
1311 1.1 yamt * vmem_free:
1312 1.1 yamt *
1313 1.1 yamt * => caller must ensure appropriate spl,
1314 1.1 yamt * if the arena can be accessed from interrupt context.
1315 1.1 yamt */
1316 1.1 yamt
1317 1.1 yamt void
1318 1.1 yamt vmem_free(vmem_t *vm, vmem_addr_t addr, vmem_size_t size)
1319 1.1 yamt {
1320 1.1 yamt
1321 1.1 yamt KASSERT(size > 0);
1322 1.1 yamt
1323 1.5 yamt #if defined(QCACHE)
1324 1.5 yamt if (size <= vm->vm_qcache_max) {
1325 1.5 yamt int qidx = (size + vm->vm_quantum_mask) >> vm->vm_quantum_shift;
1326 1.22 yamt qcache_t *qc = vm->vm_qcache[qidx - 1];
1327 1.5 yamt
1328 1.63 rmind pool_cache_put(qc->qc_cache, (void *)addr);
1329 1.63 rmind return;
1330 1.5 yamt }
1331 1.5 yamt #endif /* defined(QCACHE) */
1332 1.5 yamt
1333 1.10 yamt vmem_xfree(vm, addr, size);
1334 1.10 yamt }
1335 1.10 yamt
1336 1.10 yamt void
1337 1.17 yamt vmem_xfree(vmem_t *vm, vmem_addr_t addr, vmem_size_t size)
1338 1.10 yamt {
1339 1.10 yamt bt_t *bt;
1340 1.10 yamt bt_t *t;
1341 1.66 para LIST_HEAD(, vmem_btag) tofree;
1342 1.66 para
1343 1.66 para LIST_INIT(&tofree);
1344 1.10 yamt
1345 1.10 yamt KASSERT(size > 0);
1346 1.10 yamt
1347 1.1 yamt VMEM_LOCK(vm);
1348 1.1 yamt
1349 1.1 yamt bt = bt_lookupbusy(vm, addr);
1350 1.1 yamt KASSERT(bt != NULL);
1351 1.1 yamt KASSERT(bt->bt_start == addr);
1352 1.1 yamt KASSERT(bt->bt_size == vmem_roundup_size(vm, size) ||
1353 1.1 yamt bt->bt_size - vmem_roundup_size(vm, size) <= vm->vm_quantum_mask);
1354 1.1 yamt KASSERT(bt->bt_type == BT_TYPE_BUSY);
1355 1.1 yamt bt_rembusy(vm, bt);
1356 1.1 yamt bt->bt_type = BT_TYPE_FREE;
1357 1.1 yamt
1358 1.66 para vm->vm_inuse -= bt->bt_size;
1359 1.66 para
1360 1.1 yamt /* coalesce */
1361 1.1 yamt t = CIRCLEQ_NEXT(bt, bt_seglist);
1362 1.1 yamt if (t != NULL && t->bt_type == BT_TYPE_FREE) {
1363 1.60 dyoung KASSERT(BT_END(bt) < t->bt_start); /* YYY */
1364 1.1 yamt bt_remfree(vm, t);
1365 1.1 yamt bt_remseg(vm, t);
1366 1.1 yamt bt->bt_size += t->bt_size;
1367 1.66 para LIST_INSERT_HEAD(&tofree, t, bt_freelist);
1368 1.1 yamt }
1369 1.1 yamt t = CIRCLEQ_PREV(bt, bt_seglist);
1370 1.1 yamt if (t != NULL && t->bt_type == BT_TYPE_FREE) {
1371 1.60 dyoung KASSERT(BT_END(t) < bt->bt_start); /* YYY */
1372 1.1 yamt bt_remfree(vm, t);
1373 1.1 yamt bt_remseg(vm, t);
1374 1.1 yamt bt->bt_size += t->bt_size;
1375 1.1 yamt bt->bt_start = t->bt_start;
1376 1.66 para LIST_INSERT_HEAD(&tofree, t, bt_freelist);
1377 1.1 yamt }
1378 1.1 yamt
1379 1.1 yamt t = CIRCLEQ_PREV(bt, bt_seglist);
1380 1.1 yamt KASSERT(t != NULL);
1381 1.1 yamt KASSERT(BT_ISSPAN_P(t) || t->bt_type == BT_TYPE_BUSY);
1382 1.61 dyoung if (vm->vm_releasefn != NULL && t->bt_type == BT_TYPE_SPAN &&
1383 1.1 yamt t->bt_size == bt->bt_size) {
1384 1.1 yamt vmem_addr_t spanaddr;
1385 1.1 yamt vmem_size_t spansize;
1386 1.1 yamt
1387 1.1 yamt KASSERT(t->bt_start == bt->bt_start);
1388 1.1 yamt spanaddr = bt->bt_start;
1389 1.1 yamt spansize = bt->bt_size;
1390 1.1 yamt bt_remseg(vm, bt);
1391 1.66 para LIST_INSERT_HEAD(&tofree, bt, bt_freelist);
1392 1.1 yamt bt_remseg(vm, t);
1393 1.66 para LIST_INSERT_HEAD(&tofree, t, bt_freelist);
1394 1.66 para vm->vm_size -= spansize;
1395 1.68 para #if defined(_KERNEL)
1396 1.68 para VMEM_CONDVAR_BROADCAST(vm);
1397 1.68 para #endif /* defined(_KERNEL) */
1398 1.1 yamt VMEM_UNLOCK(vm);
1399 1.61 dyoung (*vm->vm_releasefn)(vm->vm_arg, spanaddr, spansize);
1400 1.1 yamt } else {
1401 1.1 yamt bt_insfree(vm, bt);
1402 1.68 para #if defined(_KERNEL)
1403 1.68 para VMEM_CONDVAR_BROADCAST(vm);
1404 1.68 para #endif /* defined(_KERNEL) */
1405 1.1 yamt VMEM_UNLOCK(vm);
1406 1.1 yamt }
1407 1.66 para
1408 1.66 para while (!LIST_EMPTY(&tofree)) {
1409 1.66 para t = LIST_FIRST(&tofree);
1410 1.66 para LIST_REMOVE(t, bt_freelist);
1411 1.66 para bt_free(vm, t);
1412 1.66 para }
1413 1.1 yamt }
1414 1.1 yamt
1415 1.1 yamt /*
1416 1.1 yamt * vmem_add:
1417 1.1 yamt *
1418 1.1 yamt * => caller must ensure appropriate spl,
1419 1.1 yamt * if the arena can be accessed from interrupt context.
1420 1.1 yamt */
1421 1.1 yamt
1422 1.61 dyoung int
1423 1.1 yamt vmem_add(vmem_t *vm, vmem_addr_t addr, vmem_size_t size, vm_flag_t flags)
1424 1.1 yamt {
1425 1.1 yamt
1426 1.1 yamt return vmem_add1(vm, addr, size, flags, BT_TYPE_SPAN_STATIC);
1427 1.1 yamt }
1428 1.1 yamt
1429 1.6 yamt /*
1430 1.66 para * vmem_size: information about arenas size
1431 1.6 yamt *
1432 1.66 para * => return free/allocated size in arena
1433 1.6 yamt */
1434 1.66 para vmem_size_t
1435 1.66 para vmem_size(vmem_t *vm, int typemask)
1436 1.6 yamt {
1437 1.6 yamt
1438 1.66 para switch (typemask) {
1439 1.66 para case VMEM_ALLOC:
1440 1.66 para return vm->vm_inuse;
1441 1.66 para case VMEM_FREE:
1442 1.66 para return vm->vm_size - vm->vm_inuse;
1443 1.66 para case VMEM_FREE|VMEM_ALLOC:
1444 1.66 para return vm->vm_size;
1445 1.66 para default:
1446 1.66 para panic("vmem_size");
1447 1.66 para }
1448 1.6 yamt }
1449 1.6 yamt
1450 1.30 yamt /* ---- rehash */
1451 1.30 yamt
1452 1.30 yamt #if defined(_KERNEL)
1453 1.30 yamt static struct callout vmem_rehash_ch;
1454 1.30 yamt static int vmem_rehash_interval;
1455 1.30 yamt static struct workqueue *vmem_rehash_wq;
1456 1.30 yamt static struct work vmem_rehash_wk;
1457 1.30 yamt
1458 1.30 yamt static void
1459 1.30 yamt vmem_rehash_all(struct work *wk, void *dummy)
1460 1.30 yamt {
1461 1.30 yamt vmem_t *vm;
1462 1.30 yamt
1463 1.30 yamt KASSERT(wk == &vmem_rehash_wk);
1464 1.30 yamt mutex_enter(&vmem_list_lock);
1465 1.30 yamt LIST_FOREACH(vm, &vmem_list, vm_alllist) {
1466 1.30 yamt size_t desired;
1467 1.30 yamt size_t current;
1468 1.30 yamt
1469 1.30 yamt if (!VMEM_TRYLOCK(vm)) {
1470 1.30 yamt continue;
1471 1.30 yamt }
1472 1.30 yamt desired = vm->vm_nbusytag;
1473 1.30 yamt current = vm->vm_hashsize;
1474 1.30 yamt VMEM_UNLOCK(vm);
1475 1.30 yamt
1476 1.30 yamt if (desired > VMEM_HASHSIZE_MAX) {
1477 1.30 yamt desired = VMEM_HASHSIZE_MAX;
1478 1.30 yamt } else if (desired < VMEM_HASHSIZE_MIN) {
1479 1.30 yamt desired = VMEM_HASHSIZE_MIN;
1480 1.30 yamt }
1481 1.30 yamt if (desired > current * 2 || desired * 2 < current) {
1482 1.30 yamt vmem_rehash(vm, desired, VM_NOSLEEP);
1483 1.30 yamt }
1484 1.30 yamt }
1485 1.30 yamt mutex_exit(&vmem_list_lock);
1486 1.30 yamt
1487 1.30 yamt callout_schedule(&vmem_rehash_ch, vmem_rehash_interval);
1488 1.30 yamt }
1489 1.30 yamt
1490 1.30 yamt static void
1491 1.30 yamt vmem_rehash_all_kick(void *dummy)
1492 1.30 yamt {
1493 1.30 yamt
1494 1.32 rmind workqueue_enqueue(vmem_rehash_wq, &vmem_rehash_wk, NULL);
1495 1.30 yamt }
1496 1.30 yamt
1497 1.30 yamt void
1498 1.30 yamt vmem_rehash_start(void)
1499 1.30 yamt {
1500 1.30 yamt int error;
1501 1.30 yamt
1502 1.30 yamt error = workqueue_create(&vmem_rehash_wq, "vmem_rehash",
1503 1.41 ad vmem_rehash_all, NULL, PRI_VM, IPL_SOFTCLOCK, WQ_MPSAFE);
1504 1.30 yamt if (error) {
1505 1.30 yamt panic("%s: workqueue_create %d\n", __func__, error);
1506 1.30 yamt }
1507 1.41 ad callout_init(&vmem_rehash_ch, CALLOUT_MPSAFE);
1508 1.30 yamt callout_setfunc(&vmem_rehash_ch, vmem_rehash_all_kick, NULL);
1509 1.30 yamt
1510 1.30 yamt vmem_rehash_interval = hz * 10;
1511 1.30 yamt callout_schedule(&vmem_rehash_ch, vmem_rehash_interval);
1512 1.30 yamt }
1513 1.30 yamt #endif /* defined(_KERNEL) */
1514 1.30 yamt
1515 1.1 yamt /* ---- debug */
1516 1.1 yamt
1517 1.55 yamt #if defined(DDB) || defined(UNITTEST) || defined(VMEM_SANITY)
1518 1.55 yamt
1519 1.55 yamt static void bt_dump(const bt_t *, void (*)(const char *, ...));
1520 1.55 yamt
1521 1.55 yamt static const char *
1522 1.55 yamt bt_type_string(int type)
1523 1.55 yamt {
1524 1.55 yamt static const char * const table[] = {
1525 1.55 yamt [BT_TYPE_BUSY] = "busy",
1526 1.55 yamt [BT_TYPE_FREE] = "free",
1527 1.55 yamt [BT_TYPE_SPAN] = "span",
1528 1.55 yamt [BT_TYPE_SPAN_STATIC] = "static span",
1529 1.55 yamt };
1530 1.55 yamt
1531 1.55 yamt if (type >= __arraycount(table)) {
1532 1.55 yamt return "BOGUS";
1533 1.55 yamt }
1534 1.55 yamt return table[type];
1535 1.55 yamt }
1536 1.55 yamt
1537 1.55 yamt static void
1538 1.55 yamt bt_dump(const bt_t *bt, void (*pr)(const char *, ...))
1539 1.55 yamt {
1540 1.55 yamt
1541 1.55 yamt (*pr)("\t%p: %" PRIu64 ", %" PRIu64 ", %d(%s)\n",
1542 1.55 yamt bt, (uint64_t)bt->bt_start, (uint64_t)bt->bt_size,
1543 1.55 yamt bt->bt_type, bt_type_string(bt->bt_type));
1544 1.55 yamt }
1545 1.55 yamt
1546 1.55 yamt static void
1547 1.55 yamt vmem_dump(const vmem_t *vm , void (*pr)(const char *, ...))
1548 1.55 yamt {
1549 1.55 yamt const bt_t *bt;
1550 1.55 yamt int i;
1551 1.55 yamt
1552 1.55 yamt (*pr)("vmem %p '%s'\n", vm, vm->vm_name);
1553 1.55 yamt CIRCLEQ_FOREACH(bt, &vm->vm_seglist, bt_seglist) {
1554 1.55 yamt bt_dump(bt, pr);
1555 1.55 yamt }
1556 1.55 yamt
1557 1.55 yamt for (i = 0; i < VMEM_MAXORDER; i++) {
1558 1.55 yamt const struct vmem_freelist *fl = &vm->vm_freelist[i];
1559 1.55 yamt
1560 1.55 yamt if (LIST_EMPTY(fl)) {
1561 1.55 yamt continue;
1562 1.55 yamt }
1563 1.55 yamt
1564 1.55 yamt (*pr)("freelist[%d]\n", i);
1565 1.55 yamt LIST_FOREACH(bt, fl, bt_freelist) {
1566 1.55 yamt bt_dump(bt, pr);
1567 1.55 yamt }
1568 1.55 yamt }
1569 1.55 yamt }
1570 1.55 yamt
1571 1.55 yamt #endif /* defined(DDB) || defined(UNITTEST) || defined(VMEM_SANITY) */
1572 1.55 yamt
1573 1.37 yamt #if defined(DDB)
1574 1.37 yamt static bt_t *
1575 1.37 yamt vmem_whatis_lookup(vmem_t *vm, uintptr_t addr)
1576 1.37 yamt {
1577 1.39 yamt bt_t *bt;
1578 1.37 yamt
1579 1.39 yamt CIRCLEQ_FOREACH(bt, &vm->vm_seglist, bt_seglist) {
1580 1.39 yamt if (BT_ISSPAN_P(bt)) {
1581 1.39 yamt continue;
1582 1.39 yamt }
1583 1.60 dyoung if (bt->bt_start <= addr && addr <= BT_END(bt)) {
1584 1.39 yamt return bt;
1585 1.37 yamt }
1586 1.37 yamt }
1587 1.37 yamt
1588 1.37 yamt return NULL;
1589 1.37 yamt }
1590 1.37 yamt
1591 1.37 yamt void
1592 1.37 yamt vmem_whatis(uintptr_t addr, void (*pr)(const char *, ...))
1593 1.37 yamt {
1594 1.37 yamt vmem_t *vm;
1595 1.37 yamt
1596 1.37 yamt LIST_FOREACH(vm, &vmem_list, vm_alllist) {
1597 1.37 yamt bt_t *bt;
1598 1.37 yamt
1599 1.37 yamt bt = vmem_whatis_lookup(vm, addr);
1600 1.37 yamt if (bt == NULL) {
1601 1.37 yamt continue;
1602 1.37 yamt }
1603 1.39 yamt (*pr)("%p is %p+%zu in VMEM '%s' (%s)\n",
1604 1.37 yamt (void *)addr, (void *)bt->bt_start,
1605 1.39 yamt (size_t)(addr - bt->bt_start), vm->vm_name,
1606 1.39 yamt (bt->bt_type == BT_TYPE_BUSY) ? "allocated" : "free");
1607 1.37 yamt }
1608 1.37 yamt }
1609 1.43 cegger
1610 1.55 yamt void
1611 1.55 yamt vmem_printall(const char *modif, void (*pr)(const char *, ...))
1612 1.43 cegger {
1613 1.55 yamt const vmem_t *vm;
1614 1.43 cegger
1615 1.47 cegger LIST_FOREACH(vm, &vmem_list, vm_alllist) {
1616 1.55 yamt vmem_dump(vm, pr);
1617 1.43 cegger }
1618 1.43 cegger }
1619 1.43 cegger
1620 1.43 cegger void
1621 1.43 cegger vmem_print(uintptr_t addr, const char *modif, void (*pr)(const char *, ...))
1622 1.43 cegger {
1623 1.55 yamt const vmem_t *vm = (const void *)addr;
1624 1.43 cegger
1625 1.55 yamt vmem_dump(vm, pr);
1626 1.43 cegger }
1627 1.37 yamt #endif /* defined(DDB) */
1628 1.37 yamt
1629 1.60 dyoung #if defined(_KERNEL)
1630 1.60 dyoung #define vmem_printf printf
1631 1.60 dyoung #else
1632 1.1 yamt #include <stdio.h>
1633 1.60 dyoung #include <stdarg.h>
1634 1.60 dyoung
1635 1.60 dyoung static void
1636 1.60 dyoung vmem_printf(const char *fmt, ...)
1637 1.60 dyoung {
1638 1.60 dyoung va_list ap;
1639 1.60 dyoung va_start(ap, fmt);
1640 1.60 dyoung vprintf(fmt, ap);
1641 1.60 dyoung va_end(ap);
1642 1.60 dyoung }
1643 1.60 dyoung #endif
1644 1.1 yamt
1645 1.55 yamt #if defined(VMEM_SANITY)
1646 1.1 yamt
1647 1.55 yamt static bool
1648 1.55 yamt vmem_check_sanity(vmem_t *vm)
1649 1.1 yamt {
1650 1.55 yamt const bt_t *bt, *bt2;
1651 1.1 yamt
1652 1.55 yamt KASSERT(vm != NULL);
1653 1.1 yamt
1654 1.1 yamt CIRCLEQ_FOREACH(bt, &vm->vm_seglist, bt_seglist) {
1655 1.60 dyoung if (bt->bt_start > BT_END(bt)) {
1656 1.55 yamt printf("corrupted tag\n");
1657 1.60 dyoung bt_dump(bt, vmem_printf);
1658 1.55 yamt return false;
1659 1.55 yamt }
1660 1.55 yamt }
1661 1.55 yamt CIRCLEQ_FOREACH(bt, &vm->vm_seglist, bt_seglist) {
1662 1.55 yamt CIRCLEQ_FOREACH(bt2, &vm->vm_seglist, bt_seglist) {
1663 1.55 yamt if (bt == bt2) {
1664 1.55 yamt continue;
1665 1.55 yamt }
1666 1.55 yamt if (BT_ISSPAN_P(bt) != BT_ISSPAN_P(bt2)) {
1667 1.55 yamt continue;
1668 1.55 yamt }
1669 1.60 dyoung if (bt->bt_start <= BT_END(bt2) &&
1670 1.60 dyoung bt2->bt_start <= BT_END(bt)) {
1671 1.55 yamt printf("overwrapped tags\n");
1672 1.60 dyoung bt_dump(bt, vmem_printf);
1673 1.60 dyoung bt_dump(bt2, vmem_printf);
1674 1.55 yamt return false;
1675 1.55 yamt }
1676 1.55 yamt }
1677 1.1 yamt }
1678 1.1 yamt
1679 1.55 yamt return true;
1680 1.55 yamt }
1681 1.1 yamt
1682 1.55 yamt static void
1683 1.55 yamt vmem_check(vmem_t *vm)
1684 1.55 yamt {
1685 1.1 yamt
1686 1.55 yamt if (!vmem_check_sanity(vm)) {
1687 1.55 yamt panic("insanity vmem %p", vm);
1688 1.1 yamt }
1689 1.1 yamt }
1690 1.1 yamt
1691 1.55 yamt #endif /* defined(VMEM_SANITY) */
1692 1.1 yamt
1693 1.55 yamt #if defined(UNITTEST)
1694 1.1 yamt int
1695 1.57 cegger main(void)
1696 1.1 yamt {
1697 1.61 dyoung int rc;
1698 1.1 yamt vmem_t *vm;
1699 1.1 yamt vmem_addr_t p;
1700 1.1 yamt struct reg {
1701 1.1 yamt vmem_addr_t p;
1702 1.1 yamt vmem_size_t sz;
1703 1.25 thorpej bool x;
1704 1.1 yamt } *reg = NULL;
1705 1.1 yamt int nreg = 0;
1706 1.1 yamt int nalloc = 0;
1707 1.1 yamt int nfree = 0;
1708 1.1 yamt vmem_size_t total = 0;
1709 1.1 yamt #if 1
1710 1.1 yamt vm_flag_t strat = VM_INSTANTFIT;
1711 1.1 yamt #else
1712 1.1 yamt vm_flag_t strat = VM_BESTFIT;
1713 1.1 yamt #endif
1714 1.1 yamt
1715 1.61 dyoung vm = vmem_create("test", 0, 0, 1, NULL, NULL, NULL, 0, VM_SLEEP,
1716 1.61 dyoung #ifdef _KERNEL
1717 1.61 dyoung IPL_NONE
1718 1.61 dyoung #else
1719 1.61 dyoung 0
1720 1.61 dyoung #endif
1721 1.61 dyoung );
1722 1.1 yamt if (vm == NULL) {
1723 1.1 yamt printf("vmem_create\n");
1724 1.1 yamt exit(EXIT_FAILURE);
1725 1.1 yamt }
1726 1.60 dyoung vmem_dump(vm, vmem_printf);
1727 1.1 yamt
1728 1.61 dyoung rc = vmem_add(vm, 0, 50, VM_SLEEP);
1729 1.61 dyoung assert(rc == 0);
1730 1.61 dyoung rc = vmem_add(vm, 100, 200, VM_SLEEP);
1731 1.61 dyoung assert(rc == 0);
1732 1.61 dyoung rc = vmem_add(vm, 2000, 1, VM_SLEEP);
1733 1.61 dyoung assert(rc == 0);
1734 1.61 dyoung rc = vmem_add(vm, 40000, 65536, VM_SLEEP);
1735 1.61 dyoung assert(rc == 0);
1736 1.61 dyoung rc = vmem_add(vm, 10000, 10000, VM_SLEEP);
1737 1.61 dyoung assert(rc == 0);
1738 1.61 dyoung rc = vmem_add(vm, 500, 1000, VM_SLEEP);
1739 1.61 dyoung assert(rc == 0);
1740 1.61 dyoung rc = vmem_add(vm, 0xffffff00, 0x100, VM_SLEEP);
1741 1.61 dyoung assert(rc == 0);
1742 1.61 dyoung rc = vmem_xalloc(vm, 0x101, 0, 0, 0,
1743 1.61 dyoung 0xffffff00, 0xffffffff, strat|VM_SLEEP, &p);
1744 1.61 dyoung assert(rc != 0);
1745 1.61 dyoung rc = vmem_xalloc(vm, 50, 0, 0, 0, 0, 49, strat|VM_SLEEP, &p);
1746 1.61 dyoung assert(rc == 0 && p == 0);
1747 1.61 dyoung vmem_xfree(vm, p, 50);
1748 1.61 dyoung rc = vmem_xalloc(vm, 25, 0, 0, 0, 0, 24, strat|VM_SLEEP, &p);
1749 1.61 dyoung assert(rc == 0 && p == 0);
1750 1.61 dyoung rc = vmem_xalloc(vm, 0x100, 0, 0, 0,
1751 1.61 dyoung 0xffffff01, 0xffffffff, strat|VM_SLEEP, &p);
1752 1.61 dyoung assert(rc != 0);
1753 1.61 dyoung rc = vmem_xalloc(vm, 0x100, 0, 0, 0,
1754 1.61 dyoung 0xffffff00, 0xfffffffe, strat|VM_SLEEP, &p);
1755 1.61 dyoung assert(rc != 0);
1756 1.61 dyoung rc = vmem_xalloc(vm, 0x100, 0, 0, 0,
1757 1.61 dyoung 0xffffff00, 0xffffffff, strat|VM_SLEEP, &p);
1758 1.61 dyoung assert(rc == 0);
1759 1.60 dyoung vmem_dump(vm, vmem_printf);
1760 1.1 yamt for (;;) {
1761 1.1 yamt struct reg *r;
1762 1.10 yamt int t = rand() % 100;
1763 1.1 yamt
1764 1.10 yamt if (t > 45) {
1765 1.10 yamt /* alloc */
1766 1.1 yamt vmem_size_t sz = rand() % 500 + 1;
1767 1.25 thorpej bool x;
1768 1.10 yamt vmem_size_t align, phase, nocross;
1769 1.10 yamt vmem_addr_t minaddr, maxaddr;
1770 1.10 yamt
1771 1.10 yamt if (t > 70) {
1772 1.26 thorpej x = true;
1773 1.10 yamt /* XXX */
1774 1.10 yamt align = 1 << (rand() % 15);
1775 1.10 yamt phase = rand() % 65536;
1776 1.10 yamt nocross = 1 << (rand() % 15);
1777 1.10 yamt if (align <= phase) {
1778 1.10 yamt phase = 0;
1779 1.10 yamt }
1780 1.19 yamt if (VMEM_CROSS_P(phase, phase + sz - 1,
1781 1.19 yamt nocross)) {
1782 1.10 yamt nocross = 0;
1783 1.10 yamt }
1784 1.60 dyoung do {
1785 1.60 dyoung minaddr = rand() % 50000;
1786 1.60 dyoung maxaddr = rand() % 70000;
1787 1.60 dyoung } while (minaddr > maxaddr);
1788 1.10 yamt printf("=== xalloc %" PRIu64
1789 1.10 yamt " align=%" PRIu64 ", phase=%" PRIu64
1790 1.10 yamt ", nocross=%" PRIu64 ", min=%" PRIu64
1791 1.10 yamt ", max=%" PRIu64 "\n",
1792 1.10 yamt (uint64_t)sz,
1793 1.10 yamt (uint64_t)align,
1794 1.10 yamt (uint64_t)phase,
1795 1.10 yamt (uint64_t)nocross,
1796 1.10 yamt (uint64_t)minaddr,
1797 1.10 yamt (uint64_t)maxaddr);
1798 1.61 dyoung rc = vmem_xalloc(vm, sz, align, phase, nocross,
1799 1.61 dyoung minaddr, maxaddr, strat|VM_SLEEP, &p);
1800 1.10 yamt } else {
1801 1.26 thorpej x = false;
1802 1.10 yamt printf("=== alloc %" PRIu64 "\n", (uint64_t)sz);
1803 1.61 dyoung rc = vmem_alloc(vm, sz, strat|VM_SLEEP, &p);
1804 1.10 yamt }
1805 1.1 yamt printf("-> %" PRIu64 "\n", (uint64_t)p);
1806 1.60 dyoung vmem_dump(vm, vmem_printf);
1807 1.61 dyoung if (rc != 0) {
1808 1.10 yamt if (x) {
1809 1.10 yamt continue;
1810 1.10 yamt }
1811 1.1 yamt break;
1812 1.1 yamt }
1813 1.1 yamt nreg++;
1814 1.1 yamt reg = realloc(reg, sizeof(*reg) * nreg);
1815 1.1 yamt r = ®[nreg - 1];
1816 1.1 yamt r->p = p;
1817 1.1 yamt r->sz = sz;
1818 1.10 yamt r->x = x;
1819 1.1 yamt total += sz;
1820 1.1 yamt nalloc++;
1821 1.1 yamt } else if (nreg != 0) {
1822 1.10 yamt /* free */
1823 1.1 yamt r = ®[rand() % nreg];
1824 1.1 yamt printf("=== free %" PRIu64 ", %" PRIu64 "\n",
1825 1.1 yamt (uint64_t)r->p, (uint64_t)r->sz);
1826 1.10 yamt if (r->x) {
1827 1.10 yamt vmem_xfree(vm, r->p, r->sz);
1828 1.10 yamt } else {
1829 1.10 yamt vmem_free(vm, r->p, r->sz);
1830 1.10 yamt }
1831 1.1 yamt total -= r->sz;
1832 1.60 dyoung vmem_dump(vm, vmem_printf);
1833 1.1 yamt *r = reg[nreg - 1];
1834 1.1 yamt nreg--;
1835 1.1 yamt nfree++;
1836 1.1 yamt }
1837 1.1 yamt printf("total=%" PRIu64 "\n", (uint64_t)total);
1838 1.1 yamt }
1839 1.1 yamt fprintf(stderr, "total=%" PRIu64 ", nalloc=%d, nfree=%d\n",
1840 1.1 yamt (uint64_t)total, nalloc, nfree);
1841 1.1 yamt exit(EXIT_SUCCESS);
1842 1.1 yamt }
1843 1.55 yamt #endif /* defined(UNITTEST) */
1844