subr_vmem.c revision 1.30 1 /* $NetBSD: subr_vmem.c,v 1.30 2007/06/17 13:34:43 yamt Exp $ */
2
3 /*-
4 * Copyright (c)2006 YAMAMOTO Takashi,
5 * All rights reserved.
6 *
7 * Redistribution and use in source and binary forms, with or without
8 * modification, are permitted provided that the following conditions
9 * are met:
10 * 1. Redistributions of source code must retain the above copyright
11 * notice, this list of conditions and the following disclaimer.
12 * 2. Redistributions in binary form must reproduce the above copyright
13 * notice, this list of conditions and the following disclaimer in the
14 * documentation and/or other materials provided with the distribution.
15 *
16 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
17 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
18 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
19 * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
20 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
21 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
22 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
23 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
24 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
25 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
26 * SUCH DAMAGE.
27 */
28
29 /*
30 * reference:
31 * - Magazines and Vmem: Extending the Slab Allocator
32 * to Many CPUs and Arbitrary Resources
33 * http://www.usenix.org/event/usenix01/bonwick.html
34 *
35 * todo:
36 * - decide how to import segments for vmem_xalloc.
37 * - don't rely on malloc(9).
38 */
39
40 #include <sys/cdefs.h>
41 __KERNEL_RCSID(0, "$NetBSD: subr_vmem.c,v 1.30 2007/06/17 13:34:43 yamt Exp $");
42
43 #define VMEM_DEBUG
44 #if defined(_KERNEL)
45 #define QCACHE
46 #endif /* defined(_KERNEL) */
47
48 #include <sys/param.h>
49 #include <sys/hash.h>
50 #include <sys/queue.h>
51
52 #if defined(_KERNEL)
53 #include <sys/systm.h>
54 #include <sys/kernel.h> /* hz */
55 #include <sys/callout.h>
56 #include <sys/lock.h>
57 #include <sys/malloc.h>
58 #include <sys/once.h>
59 #include <sys/pool.h>
60 #include <sys/proc.h>
61 #include <sys/vmem.h>
62 #include <sys/workqueue.h>
63 #else /* defined(_KERNEL) */
64 #include "../sys/vmem.h"
65 #endif /* defined(_KERNEL) */
66
67 #if defined(_KERNEL)
68 #define SIMPLELOCK_DECL(name) struct simplelock name
69 #else /* defined(_KERNEL) */
70 #include <errno.h>
71 #include <assert.h>
72 #include <stdlib.h>
73
74 #define KASSERT(a) assert(a)
75 #define SIMPLELOCK_DECL(name) /* nothing */
76 #define LOCK_ASSERT(a) /* nothing */
77 #define simple_lock_init(a) /* nothing */
78 #define simple_lock(a) /* nothing */
79 #define simple_lock_try(a) 1
80 #define simple_unlock(a) /* nothing */
81 #define ASSERT_SLEEPABLE(lk, msg) /* nothing */
82 #endif /* defined(_KERNEL) */
83
84 struct vmem;
85 struct vmem_btag;
86
87 #if defined(VMEM_DEBUG)
88 void vmem_dump(const vmem_t *);
89 #endif /* defined(VMEM_DEBUG) */
90
91 #define VMEM_MAXORDER (sizeof(vmem_size_t) * CHAR_BIT)
92
93 #define VMEM_HASHSIZE_MIN 1 /* XXX */
94 #define VMEM_HASHSIZE_MAX 8192 /* XXX */
95 #define VMEM_HASHSIZE_INIT VMEM_HASHSIZE_MIN
96
97 #define VM_FITMASK (VM_BESTFIT | VM_INSTANTFIT)
98
99 CIRCLEQ_HEAD(vmem_seglist, vmem_btag);
100 LIST_HEAD(vmem_freelist, vmem_btag);
101 LIST_HEAD(vmem_hashlist, vmem_btag);
102
103 #if defined(QCACHE)
104 #define VMEM_QCACHE_IDX_MAX 32
105
106 #define QC_NAME_MAX 16
107
108 struct qcache {
109 struct pool qc_pool;
110 struct pool_cache qc_cache;
111 vmem_t *qc_vmem;
112 char qc_name[QC_NAME_MAX];
113 };
114 typedef struct qcache qcache_t;
115 #define QC_POOL_TO_QCACHE(pool) ((qcache_t *)(pool))
116 #endif /* defined(QCACHE) */
117
118 /* vmem arena */
119 struct vmem {
120 SIMPLELOCK_DECL(vm_lock);
121 vmem_addr_t (*vm_allocfn)(vmem_t *, vmem_size_t, vmem_size_t *,
122 vm_flag_t);
123 void (*vm_freefn)(vmem_t *, vmem_addr_t, vmem_size_t);
124 vmem_t *vm_source;
125 struct vmem_seglist vm_seglist;
126 struct vmem_freelist vm_freelist[VMEM_MAXORDER];
127 size_t vm_hashsize;
128 size_t vm_nbusytag;
129 struct vmem_hashlist *vm_hashlist;
130 size_t vm_quantum_mask;
131 int vm_quantum_shift;
132 const char *vm_name;
133 LIST_ENTRY(vmem) vm_alllist;
134
135 #if defined(QCACHE)
136 /* quantum cache */
137 size_t vm_qcache_max;
138 struct pool_allocator vm_qcache_allocator;
139 qcache_t vm_qcache_store[VMEM_QCACHE_IDX_MAX];
140 qcache_t *vm_qcache[VMEM_QCACHE_IDX_MAX];
141 #endif /* defined(QCACHE) */
142 };
143
144 #define VMEM_LOCK(vm) simple_lock(&vm->vm_lock)
145 #define VMEM_TRYLOCK(vm) simple_lock_try(&vm->vm_lock)
146 #define VMEM_UNLOCK(vm) simple_unlock(&vm->vm_lock)
147 #define VMEM_LOCK_INIT(vm) simple_lock_init(&vm->vm_lock);
148 #define VMEM_ASSERT_LOCKED(vm) \
149 LOCK_ASSERT(simple_lock_held(&vm->vm_lock))
150 #define VMEM_ASSERT_UNLOCKED(vm) \
151 LOCK_ASSERT(!simple_lock_held(&vm->vm_lock))
152
153 /* boundary tag */
154 struct vmem_btag {
155 CIRCLEQ_ENTRY(vmem_btag) bt_seglist;
156 union {
157 LIST_ENTRY(vmem_btag) u_freelist; /* BT_TYPE_FREE */
158 LIST_ENTRY(vmem_btag) u_hashlist; /* BT_TYPE_BUSY */
159 } bt_u;
160 #define bt_hashlist bt_u.u_hashlist
161 #define bt_freelist bt_u.u_freelist
162 vmem_addr_t bt_start;
163 vmem_size_t bt_size;
164 int bt_type;
165 };
166
167 #define BT_TYPE_SPAN 1
168 #define BT_TYPE_SPAN_STATIC 2
169 #define BT_TYPE_FREE 3
170 #define BT_TYPE_BUSY 4
171 #define BT_ISSPAN_P(bt) ((bt)->bt_type <= BT_TYPE_SPAN_STATIC)
172
173 #define BT_END(bt) ((bt)->bt_start + (bt)->bt_size)
174
175 typedef struct vmem_btag bt_t;
176
177 /* ---- misc */
178
179 #define VMEM_ALIGNUP(addr, align) \
180 (-(-(addr) & -(align)))
181 #define VMEM_CROSS_P(addr1, addr2, boundary) \
182 ((((addr1) ^ (addr2)) & -(boundary)) != 0)
183
184 #define ORDER2SIZE(order) ((vmem_size_t)1 << (order))
185
186 static int
187 calc_order(vmem_size_t size)
188 {
189 vmem_size_t target;
190 int i;
191
192 KASSERT(size != 0);
193
194 i = 0;
195 target = size >> 1;
196 while (ORDER2SIZE(i) <= target) {
197 i++;
198 }
199
200 KASSERT(ORDER2SIZE(i) <= size);
201 KASSERT(size < ORDER2SIZE(i + 1) || ORDER2SIZE(i + 1) < ORDER2SIZE(i));
202
203 return i;
204 }
205
206 #if defined(_KERNEL)
207 static MALLOC_DEFINE(M_VMEM, "vmem", "vmem");
208 #endif /* defined(_KERNEL) */
209
210 static void *
211 xmalloc(size_t sz, vm_flag_t flags)
212 {
213
214 #if defined(_KERNEL)
215 return malloc(sz, M_VMEM,
216 M_CANFAIL | ((flags & VM_SLEEP) ? M_WAITOK : M_NOWAIT));
217 #else /* defined(_KERNEL) */
218 return malloc(sz);
219 #endif /* defined(_KERNEL) */
220 }
221
222 static void
223 xfree(void *p)
224 {
225
226 #if defined(_KERNEL)
227 return free(p, M_VMEM);
228 #else /* defined(_KERNEL) */
229 return free(p);
230 #endif /* defined(_KERNEL) */
231 }
232
233 /* ---- boundary tag */
234
235 #if defined(_KERNEL)
236 static struct pool_cache bt_poolcache;
237 static POOL_INIT(bt_pool, sizeof(bt_t), 0, 0, 0, "vmembtpl", NULL, IPL_VM);
238 #endif /* defined(_KERNEL) */
239
240 static bt_t *
241 bt_alloc(vmem_t *vm, vm_flag_t flags)
242 {
243 bt_t *bt;
244
245 #if defined(_KERNEL)
246 int s;
247
248 /* XXX bootstrap */
249 s = splvm();
250 bt = pool_cache_get(&bt_poolcache,
251 (flags & VM_SLEEP) != 0 ? PR_WAITOK : PR_NOWAIT);
252 splx(s);
253 #else /* defined(_KERNEL) */
254 bt = malloc(sizeof *bt);
255 #endif /* defined(_KERNEL) */
256
257 return bt;
258 }
259
260 static void
261 bt_free(vmem_t *vm, bt_t *bt)
262 {
263
264 #if defined(_KERNEL)
265 int s;
266
267 /* XXX bootstrap */
268 s = splvm();
269 pool_cache_put(&bt_poolcache, bt);
270 splx(s);
271 #else /* defined(_KERNEL) */
272 free(bt);
273 #endif /* defined(_KERNEL) */
274 }
275
276 /*
277 * freelist[0] ... [1, 1]
278 * freelist[1] ... [2, 3]
279 * freelist[2] ... [4, 7]
280 * freelist[3] ... [8, 15]
281 * :
282 * freelist[n] ... [(1 << n), (1 << (n + 1)) - 1]
283 * :
284 */
285
286 static struct vmem_freelist *
287 bt_freehead_tofree(vmem_t *vm, vmem_size_t size)
288 {
289 const vmem_size_t qsize = size >> vm->vm_quantum_shift;
290 int idx;
291
292 KASSERT((size & vm->vm_quantum_mask) == 0);
293 KASSERT(size != 0);
294
295 idx = calc_order(qsize);
296 KASSERT(idx >= 0);
297 KASSERT(idx < VMEM_MAXORDER);
298
299 return &vm->vm_freelist[idx];
300 }
301
302 static struct vmem_freelist *
303 bt_freehead_toalloc(vmem_t *vm, vmem_size_t size, vm_flag_t strat)
304 {
305 const vmem_size_t qsize = size >> vm->vm_quantum_shift;
306 int idx;
307
308 KASSERT((size & vm->vm_quantum_mask) == 0);
309 KASSERT(size != 0);
310
311 idx = calc_order(qsize);
312 if (strat == VM_INSTANTFIT && ORDER2SIZE(idx) != qsize) {
313 idx++;
314 /* check too large request? */
315 }
316 KASSERT(idx >= 0);
317 KASSERT(idx < VMEM_MAXORDER);
318
319 return &vm->vm_freelist[idx];
320 }
321
322 /* ---- boundary tag hash */
323
324 static struct vmem_hashlist *
325 bt_hashhead(vmem_t *vm, vmem_addr_t addr)
326 {
327 struct vmem_hashlist *list;
328 unsigned int hash;
329
330 hash = hash32_buf(&addr, sizeof(addr), HASH32_BUF_INIT);
331 list = &vm->vm_hashlist[hash % vm->vm_hashsize];
332
333 return list;
334 }
335
336 static bt_t *
337 bt_lookupbusy(vmem_t *vm, vmem_addr_t addr)
338 {
339 struct vmem_hashlist *list;
340 bt_t *bt;
341
342 list = bt_hashhead(vm, addr);
343 LIST_FOREACH(bt, list, bt_hashlist) {
344 if (bt->bt_start == addr) {
345 break;
346 }
347 }
348
349 return bt;
350 }
351
352 static void
353 bt_rembusy(vmem_t *vm, bt_t *bt)
354 {
355
356 KASSERT(vm->vm_nbusytag > 0);
357 vm->vm_nbusytag--;
358 LIST_REMOVE(bt, bt_hashlist);
359 }
360
361 static void
362 bt_insbusy(vmem_t *vm, bt_t *bt)
363 {
364 struct vmem_hashlist *list;
365
366 KASSERT(bt->bt_type == BT_TYPE_BUSY);
367
368 list = bt_hashhead(vm, bt->bt_start);
369 LIST_INSERT_HEAD(list, bt, bt_hashlist);
370 vm->vm_nbusytag++;
371 }
372
373 /* ---- boundary tag list */
374
375 static void
376 bt_remseg(vmem_t *vm, bt_t *bt)
377 {
378
379 CIRCLEQ_REMOVE(&vm->vm_seglist, bt, bt_seglist);
380 }
381
382 static void
383 bt_insseg(vmem_t *vm, bt_t *bt, bt_t *prev)
384 {
385
386 CIRCLEQ_INSERT_AFTER(&vm->vm_seglist, prev, bt, bt_seglist);
387 }
388
389 static void
390 bt_insseg_tail(vmem_t *vm, bt_t *bt)
391 {
392
393 CIRCLEQ_INSERT_TAIL(&vm->vm_seglist, bt, bt_seglist);
394 }
395
396 static void
397 bt_remfree(vmem_t *vm, bt_t *bt)
398 {
399
400 KASSERT(bt->bt_type == BT_TYPE_FREE);
401
402 LIST_REMOVE(bt, bt_freelist);
403 }
404
405 static void
406 bt_insfree(vmem_t *vm, bt_t *bt)
407 {
408 struct vmem_freelist *list;
409
410 list = bt_freehead_tofree(vm, bt->bt_size);
411 LIST_INSERT_HEAD(list, bt, bt_freelist);
412 }
413
414 /* ---- vmem internal functions */
415
416 #if defined(_KERNEL)
417 static kmutex_t vmem_list_lock;
418 static LIST_HEAD(, vmem) vmem_list = LIST_HEAD_INITIALIZER(vmem_list);
419 #endif /* defined(_KERNEL) */
420
421 #if defined(QCACHE)
422 static inline vm_flag_t
423 prf_to_vmf(int prflags)
424 {
425 vm_flag_t vmflags;
426
427 KASSERT((prflags & ~(PR_LIMITFAIL | PR_WAITOK | PR_NOWAIT)) == 0);
428 if ((prflags & PR_WAITOK) != 0) {
429 vmflags = VM_SLEEP;
430 } else {
431 vmflags = VM_NOSLEEP;
432 }
433 return vmflags;
434 }
435
436 static inline int
437 vmf_to_prf(vm_flag_t vmflags)
438 {
439 int prflags;
440
441 if ((vmflags & VM_SLEEP) != 0) {
442 prflags = PR_WAITOK;
443 } else {
444 prflags = PR_NOWAIT;
445 }
446 return prflags;
447 }
448
449 static size_t
450 qc_poolpage_size(size_t qcache_max)
451 {
452 int i;
453
454 for (i = 0; ORDER2SIZE(i) <= qcache_max * 3; i++) {
455 /* nothing */
456 }
457 return ORDER2SIZE(i);
458 }
459
460 static void *
461 qc_poolpage_alloc(struct pool *pool, int prflags)
462 {
463 qcache_t *qc = QC_POOL_TO_QCACHE(pool);
464 vmem_t *vm = qc->qc_vmem;
465
466 return (void *)vmem_alloc(vm, pool->pr_alloc->pa_pagesz,
467 prf_to_vmf(prflags) | VM_INSTANTFIT);
468 }
469
470 static void
471 qc_poolpage_free(struct pool *pool, void *addr)
472 {
473 qcache_t *qc = QC_POOL_TO_QCACHE(pool);
474 vmem_t *vm = qc->qc_vmem;
475
476 vmem_free(vm, (vmem_addr_t)addr, pool->pr_alloc->pa_pagesz);
477 }
478
479 static void
480 qc_init(vmem_t *vm, size_t qcache_max)
481 {
482 qcache_t *prevqc;
483 struct pool_allocator *pa;
484 int qcache_idx_max;
485 int i;
486
487 KASSERT((qcache_max & vm->vm_quantum_mask) == 0);
488 if (qcache_max > (VMEM_QCACHE_IDX_MAX << vm->vm_quantum_shift)) {
489 qcache_max = VMEM_QCACHE_IDX_MAX << vm->vm_quantum_shift;
490 }
491 vm->vm_qcache_max = qcache_max;
492 pa = &vm->vm_qcache_allocator;
493 memset(pa, 0, sizeof(*pa));
494 pa->pa_alloc = qc_poolpage_alloc;
495 pa->pa_free = qc_poolpage_free;
496 pa->pa_pagesz = qc_poolpage_size(qcache_max);
497
498 qcache_idx_max = qcache_max >> vm->vm_quantum_shift;
499 prevqc = NULL;
500 for (i = qcache_idx_max; i > 0; i--) {
501 qcache_t *qc = &vm->vm_qcache_store[i - 1];
502 size_t size = i << vm->vm_quantum_shift;
503
504 qc->qc_vmem = vm;
505 snprintf(qc->qc_name, sizeof(qc->qc_name), "%s-%zu",
506 vm->vm_name, size);
507 pool_init(&qc->qc_pool, size, ORDER2SIZE(vm->vm_quantum_shift),
508 0, PR_NOALIGN | PR_NOTOUCH /* XXX */, qc->qc_name, pa,
509 IPL_NONE);
510 if (prevqc != NULL &&
511 qc->qc_pool.pr_itemsperpage ==
512 prevqc->qc_pool.pr_itemsperpage) {
513 pool_destroy(&qc->qc_pool);
514 vm->vm_qcache[i - 1] = prevqc;
515 continue;
516 }
517 pool_cache_init(&qc->qc_cache, &qc->qc_pool, NULL, NULL, NULL);
518 vm->vm_qcache[i - 1] = qc;
519 prevqc = qc;
520 }
521 }
522
523 static void
524 qc_destroy(vmem_t *vm)
525 {
526 const qcache_t *prevqc;
527 int i;
528 int qcache_idx_max;
529
530 qcache_idx_max = vm->vm_qcache_max >> vm->vm_quantum_shift;
531 prevqc = NULL;
532 for (i = 0; i < qcache_idx_max; i++) {
533 qcache_t *qc = vm->vm_qcache[i];
534
535 if (prevqc == qc) {
536 continue;
537 }
538 pool_cache_destroy(&qc->qc_cache);
539 pool_destroy(&qc->qc_pool);
540 prevqc = qc;
541 }
542 }
543
544 static bool
545 qc_reap(vmem_t *vm)
546 {
547 const qcache_t *prevqc;
548 int i;
549 int qcache_idx_max;
550 bool didsomething = false;
551
552 qcache_idx_max = vm->vm_qcache_max >> vm->vm_quantum_shift;
553 prevqc = NULL;
554 for (i = 0; i < qcache_idx_max; i++) {
555 qcache_t *qc = vm->vm_qcache[i];
556
557 if (prevqc == qc) {
558 continue;
559 }
560 if (pool_reclaim(&qc->qc_pool) != 0) {
561 didsomething = true;
562 }
563 prevqc = qc;
564 }
565
566 return didsomething;
567 }
568 #endif /* defined(QCACHE) */
569
570 #if defined(_KERNEL)
571 static int
572 vmem_init(void)
573 {
574
575 mutex_init(&vmem_list_lock, MUTEX_DEFAULT, IPL_NONE);
576 pool_cache_init(&bt_poolcache, &bt_pool, NULL, NULL, NULL);
577 return 0;
578 }
579 #endif /* defined(_KERNEL) */
580
581 static vmem_addr_t
582 vmem_add1(vmem_t *vm, vmem_addr_t addr, vmem_size_t size, vm_flag_t flags,
583 int spanbttype)
584 {
585 bt_t *btspan;
586 bt_t *btfree;
587
588 KASSERT((flags & (VM_SLEEP|VM_NOSLEEP)) != 0);
589 KASSERT((~flags & (VM_SLEEP|VM_NOSLEEP)) != 0);
590 VMEM_ASSERT_UNLOCKED(vm);
591
592 btspan = bt_alloc(vm, flags);
593 if (btspan == NULL) {
594 return VMEM_ADDR_NULL;
595 }
596 btfree = bt_alloc(vm, flags);
597 if (btfree == NULL) {
598 bt_free(vm, btspan);
599 return VMEM_ADDR_NULL;
600 }
601
602 btspan->bt_type = spanbttype;
603 btspan->bt_start = addr;
604 btspan->bt_size = size;
605
606 btfree->bt_type = BT_TYPE_FREE;
607 btfree->bt_start = addr;
608 btfree->bt_size = size;
609
610 VMEM_LOCK(vm);
611 bt_insseg_tail(vm, btspan);
612 bt_insseg(vm, btfree, btspan);
613 bt_insfree(vm, btfree);
614 VMEM_UNLOCK(vm);
615
616 return addr;
617 }
618
619 static void
620 vmem_destroy1(vmem_t *vm)
621 {
622
623 VMEM_ASSERT_UNLOCKED(vm);
624
625 #if defined(QCACHE)
626 qc_destroy(vm);
627 #endif /* defined(QCACHE) */
628 if (vm->vm_hashlist != NULL) {
629 int i;
630
631 for (i = 0; i < vm->vm_hashsize; i++) {
632 bt_t *bt;
633
634 while ((bt = LIST_FIRST(&vm->vm_hashlist[i])) != NULL) {
635 KASSERT(bt->bt_type == BT_TYPE_SPAN_STATIC);
636 bt_free(vm, bt);
637 }
638 }
639 xfree(vm->vm_hashlist);
640 }
641 xfree(vm);
642 }
643
644 static int
645 vmem_import(vmem_t *vm, vmem_size_t size, vm_flag_t flags)
646 {
647 vmem_addr_t addr;
648
649 VMEM_ASSERT_UNLOCKED(vm);
650
651 if (vm->vm_allocfn == NULL) {
652 return EINVAL;
653 }
654
655 addr = (*vm->vm_allocfn)(vm->vm_source, size, &size, flags);
656 if (addr == VMEM_ADDR_NULL) {
657 return ENOMEM;
658 }
659
660 if (vmem_add1(vm, addr, size, flags, BT_TYPE_SPAN) == VMEM_ADDR_NULL) {
661 (*vm->vm_freefn)(vm->vm_source, addr, size);
662 return ENOMEM;
663 }
664
665 return 0;
666 }
667
668 static int
669 vmem_rehash(vmem_t *vm, size_t newhashsize, vm_flag_t flags)
670 {
671 bt_t *bt;
672 int i;
673 struct vmem_hashlist *newhashlist;
674 struct vmem_hashlist *oldhashlist;
675 size_t oldhashsize;
676
677 KASSERT(newhashsize > 0);
678 VMEM_ASSERT_UNLOCKED(vm);
679
680 newhashlist =
681 xmalloc(sizeof(struct vmem_hashlist *) * newhashsize, flags);
682 if (newhashlist == NULL) {
683 return ENOMEM;
684 }
685 for (i = 0; i < newhashsize; i++) {
686 LIST_INIT(&newhashlist[i]);
687 }
688
689 if (!VMEM_TRYLOCK(vm)) {
690 xfree(newhashlist);
691 return EBUSY;
692 }
693 oldhashlist = vm->vm_hashlist;
694 oldhashsize = vm->vm_hashsize;
695 vm->vm_hashlist = newhashlist;
696 vm->vm_hashsize = newhashsize;
697 if (oldhashlist == NULL) {
698 VMEM_UNLOCK(vm);
699 return 0;
700 }
701 for (i = 0; i < oldhashsize; i++) {
702 while ((bt = LIST_FIRST(&oldhashlist[i])) != NULL) {
703 bt_rembusy(vm, bt); /* XXX */
704 bt_insbusy(vm, bt);
705 }
706 }
707 VMEM_UNLOCK(vm);
708
709 xfree(oldhashlist);
710
711 return 0;
712 }
713
714 /*
715 * vmem_fit: check if a bt can satisfy the given restrictions.
716 */
717
718 static vmem_addr_t
719 vmem_fit(const bt_t *bt, vmem_size_t size, vmem_size_t align, vmem_size_t phase,
720 vmem_size_t nocross, vmem_addr_t minaddr, vmem_addr_t maxaddr)
721 {
722 vmem_addr_t start;
723 vmem_addr_t end;
724
725 KASSERT(bt->bt_size >= size);
726
727 /*
728 * XXX assumption: vmem_addr_t and vmem_size_t are
729 * unsigned integer of the same size.
730 */
731
732 start = bt->bt_start;
733 if (start < minaddr) {
734 start = minaddr;
735 }
736 end = BT_END(bt);
737 if (end > maxaddr - 1) {
738 end = maxaddr - 1;
739 }
740 if (start >= end) {
741 return VMEM_ADDR_NULL;
742 }
743
744 start = VMEM_ALIGNUP(start - phase, align) + phase;
745 if (start < bt->bt_start) {
746 start += align;
747 }
748 if (VMEM_CROSS_P(start, start + size - 1, nocross)) {
749 KASSERT(align < nocross);
750 start = VMEM_ALIGNUP(start - phase, nocross) + phase;
751 }
752 if (start < end && end - start >= size) {
753 KASSERT((start & (align - 1)) == phase);
754 KASSERT(!VMEM_CROSS_P(start, start + size - 1, nocross));
755 KASSERT(minaddr <= start);
756 KASSERT(maxaddr == 0 || start + size <= maxaddr);
757 KASSERT(bt->bt_start <= start);
758 KASSERT(start + size <= BT_END(bt));
759 return start;
760 }
761 return VMEM_ADDR_NULL;
762 }
763
764 /* ---- vmem API */
765
766 /*
767 * vmem_create: create an arena.
768 *
769 * => must not be called from interrupt context.
770 */
771
772 vmem_t *
773 vmem_create(const char *name, vmem_addr_t base, vmem_size_t size,
774 vmem_size_t quantum,
775 vmem_addr_t (*allocfn)(vmem_t *, vmem_size_t, vmem_size_t *, vm_flag_t),
776 void (*freefn)(vmem_t *, vmem_addr_t, vmem_size_t),
777 vmem_t *source, vmem_size_t qcache_max, vm_flag_t flags)
778 {
779 vmem_t *vm;
780 int i;
781 #if defined(_KERNEL)
782 static ONCE_DECL(control);
783 #endif /* defined(_KERNEL) */
784
785 KASSERT((flags & (VM_SLEEP|VM_NOSLEEP)) != 0);
786 KASSERT((~flags & (VM_SLEEP|VM_NOSLEEP)) != 0);
787
788 #if defined(_KERNEL)
789 if (RUN_ONCE(&control, vmem_init)) {
790 return NULL;
791 }
792 #endif /* defined(_KERNEL) */
793 vm = xmalloc(sizeof(*vm), flags);
794 if (vm == NULL) {
795 return NULL;
796 }
797
798 VMEM_LOCK_INIT(vm);
799 vm->vm_name = name;
800 vm->vm_quantum_mask = quantum - 1;
801 vm->vm_quantum_shift = calc_order(quantum);
802 KASSERT(ORDER2SIZE(vm->vm_quantum_shift) == quantum);
803 vm->vm_allocfn = allocfn;
804 vm->vm_freefn = freefn;
805 vm->vm_source = source;
806 vm->vm_nbusytag = 0;
807 #if defined(QCACHE)
808 qc_init(vm, qcache_max);
809 #endif /* defined(QCACHE) */
810
811 CIRCLEQ_INIT(&vm->vm_seglist);
812 for (i = 0; i < VMEM_MAXORDER; i++) {
813 LIST_INIT(&vm->vm_freelist[i]);
814 }
815 vm->vm_hashlist = NULL;
816 if (vmem_rehash(vm, VMEM_HASHSIZE_INIT, flags)) {
817 vmem_destroy1(vm);
818 return NULL;
819 }
820
821 if (size != 0) {
822 if (vmem_add(vm, base, size, flags) == 0) {
823 vmem_destroy1(vm);
824 return NULL;
825 }
826 }
827
828 #if defined(_KERNEL)
829 mutex_enter(&vmem_list_lock);
830 LIST_INSERT_HEAD(&vmem_list, vm, vm_alllist);
831 mutex_exit(&vmem_list_lock);
832 #endif /* defined(_KERNEL) */
833
834 return vm;
835 }
836
837 void
838 vmem_destroy(vmem_t *vm)
839 {
840
841 #if defined(_KERNEL)
842 mutex_enter(&vmem_list_lock);
843 LIST_REMOVE(vm, vm_alllist);
844 mutex_exit(&vmem_list_lock);
845 #endif /* defined(_KERNEL) */
846
847 vmem_destroy1(vm);
848 }
849
850 vmem_size_t
851 vmem_roundup_size(vmem_t *vm, vmem_size_t size)
852 {
853
854 return (size + vm->vm_quantum_mask) & ~vm->vm_quantum_mask;
855 }
856
857 /*
858 * vmem_alloc:
859 *
860 * => caller must ensure appropriate spl,
861 * if the arena can be accessed from interrupt context.
862 */
863
864 vmem_addr_t
865 vmem_alloc(vmem_t *vm, vmem_size_t size0, vm_flag_t flags)
866 {
867 const vmem_size_t size __unused = vmem_roundup_size(vm, size0);
868 const vm_flag_t strat __unused = flags & VM_FITMASK;
869
870 KASSERT((flags & (VM_SLEEP|VM_NOSLEEP)) != 0);
871 KASSERT((~flags & (VM_SLEEP|VM_NOSLEEP)) != 0);
872 VMEM_ASSERT_UNLOCKED(vm);
873
874 KASSERT(size0 > 0);
875 KASSERT(size > 0);
876 KASSERT(strat == VM_BESTFIT || strat == VM_INSTANTFIT);
877 if ((flags & VM_SLEEP) != 0) {
878 ASSERT_SLEEPABLE(NULL, __func__);
879 }
880
881 #if defined(QCACHE)
882 if (size <= vm->vm_qcache_max) {
883 int qidx = size >> vm->vm_quantum_shift;
884 qcache_t *qc = vm->vm_qcache[qidx - 1];
885
886 return (vmem_addr_t)pool_cache_get(&qc->qc_cache,
887 vmf_to_prf(flags));
888 }
889 #endif /* defined(QCACHE) */
890
891 return vmem_xalloc(vm, size0, 0, 0, 0, 0, 0, flags);
892 }
893
894 vmem_addr_t
895 vmem_xalloc(vmem_t *vm, vmem_size_t size0, vmem_size_t align, vmem_size_t phase,
896 vmem_size_t nocross, vmem_addr_t minaddr, vmem_addr_t maxaddr,
897 vm_flag_t flags)
898 {
899 struct vmem_freelist *list;
900 struct vmem_freelist *first;
901 struct vmem_freelist *end;
902 bt_t *bt;
903 bt_t *btnew;
904 bt_t *btnew2;
905 const vmem_size_t size = vmem_roundup_size(vm, size0);
906 vm_flag_t strat = flags & VM_FITMASK;
907 vmem_addr_t start;
908
909 KASSERT(size0 > 0);
910 KASSERT(size > 0);
911 KASSERT(strat == VM_BESTFIT || strat == VM_INSTANTFIT);
912 if ((flags & VM_SLEEP) != 0) {
913 ASSERT_SLEEPABLE(NULL, __func__);
914 }
915 KASSERT((align & vm->vm_quantum_mask) == 0);
916 KASSERT((align & (align - 1)) == 0);
917 KASSERT((phase & vm->vm_quantum_mask) == 0);
918 KASSERT((nocross & vm->vm_quantum_mask) == 0);
919 KASSERT((nocross & (nocross - 1)) == 0);
920 KASSERT((align == 0 && phase == 0) || phase < align);
921 KASSERT(nocross == 0 || nocross >= size);
922 KASSERT(maxaddr == 0 || minaddr < maxaddr);
923 KASSERT(!VMEM_CROSS_P(phase, phase + size - 1, nocross));
924
925 if (align == 0) {
926 align = vm->vm_quantum_mask + 1;
927 }
928 btnew = bt_alloc(vm, flags);
929 if (btnew == NULL) {
930 return VMEM_ADDR_NULL;
931 }
932 btnew2 = bt_alloc(vm, flags); /* XXX not necessary if no restrictions */
933 if (btnew2 == NULL) {
934 bt_free(vm, btnew);
935 return VMEM_ADDR_NULL;
936 }
937
938 retry_strat:
939 first = bt_freehead_toalloc(vm, size, strat);
940 end = &vm->vm_freelist[VMEM_MAXORDER];
941 retry:
942 bt = NULL;
943 VMEM_LOCK(vm);
944 if (strat == VM_INSTANTFIT) {
945 for (list = first; list < end; list++) {
946 bt = LIST_FIRST(list);
947 if (bt != NULL) {
948 start = vmem_fit(bt, size, align, phase,
949 nocross, minaddr, maxaddr);
950 if (start != VMEM_ADDR_NULL) {
951 goto gotit;
952 }
953 }
954 }
955 } else { /* VM_BESTFIT */
956 for (list = first; list < end; list++) {
957 LIST_FOREACH(bt, list, bt_freelist) {
958 if (bt->bt_size >= size) {
959 start = vmem_fit(bt, size, align, phase,
960 nocross, minaddr, maxaddr);
961 if (start != VMEM_ADDR_NULL) {
962 goto gotit;
963 }
964 }
965 }
966 }
967 }
968 VMEM_UNLOCK(vm);
969 #if 1
970 if (strat == VM_INSTANTFIT) {
971 strat = VM_BESTFIT;
972 goto retry_strat;
973 }
974 #endif
975 if (align != vm->vm_quantum_mask + 1 || phase != 0 ||
976 nocross != 0 || minaddr != 0 || maxaddr != 0) {
977
978 /*
979 * XXX should try to import a region large enough to
980 * satisfy restrictions?
981 */
982
983 goto fail;
984 }
985 if (vmem_import(vm, size, flags) == 0) {
986 goto retry;
987 }
988 /* XXX */
989 fail:
990 bt_free(vm, btnew);
991 bt_free(vm, btnew2);
992 return VMEM_ADDR_NULL;
993
994 gotit:
995 KASSERT(bt->bt_type == BT_TYPE_FREE);
996 KASSERT(bt->bt_size >= size);
997 bt_remfree(vm, bt);
998 if (bt->bt_start != start) {
999 btnew2->bt_type = BT_TYPE_FREE;
1000 btnew2->bt_start = bt->bt_start;
1001 btnew2->bt_size = start - bt->bt_start;
1002 bt->bt_start = start;
1003 bt->bt_size -= btnew2->bt_size;
1004 bt_insfree(vm, btnew2);
1005 bt_insseg(vm, btnew2, CIRCLEQ_PREV(bt, bt_seglist));
1006 btnew2 = NULL;
1007 }
1008 KASSERT(bt->bt_start == start);
1009 if (bt->bt_size != size && bt->bt_size - size > vm->vm_quantum_mask) {
1010 /* split */
1011 btnew->bt_type = BT_TYPE_BUSY;
1012 btnew->bt_start = bt->bt_start;
1013 btnew->bt_size = size;
1014 bt->bt_start = bt->bt_start + size;
1015 bt->bt_size -= size;
1016 bt_insfree(vm, bt);
1017 bt_insseg(vm, btnew, CIRCLEQ_PREV(bt, bt_seglist));
1018 bt_insbusy(vm, btnew);
1019 VMEM_UNLOCK(vm);
1020 } else {
1021 bt->bt_type = BT_TYPE_BUSY;
1022 bt_insbusy(vm, bt);
1023 VMEM_UNLOCK(vm);
1024 bt_free(vm, btnew);
1025 btnew = bt;
1026 }
1027 if (btnew2 != NULL) {
1028 bt_free(vm, btnew2);
1029 }
1030 KASSERT(btnew->bt_size >= size);
1031 btnew->bt_type = BT_TYPE_BUSY;
1032
1033 return btnew->bt_start;
1034 }
1035
1036 /*
1037 * vmem_free:
1038 *
1039 * => caller must ensure appropriate spl,
1040 * if the arena can be accessed from interrupt context.
1041 */
1042
1043 void
1044 vmem_free(vmem_t *vm, vmem_addr_t addr, vmem_size_t size)
1045 {
1046
1047 VMEM_ASSERT_UNLOCKED(vm);
1048 KASSERT(addr != VMEM_ADDR_NULL);
1049 KASSERT(size > 0);
1050
1051 #if defined(QCACHE)
1052 if (size <= vm->vm_qcache_max) {
1053 int qidx = (size + vm->vm_quantum_mask) >> vm->vm_quantum_shift;
1054 qcache_t *qc = vm->vm_qcache[qidx - 1];
1055
1056 return pool_cache_put(&qc->qc_cache, (void *)addr);
1057 }
1058 #endif /* defined(QCACHE) */
1059
1060 vmem_xfree(vm, addr, size);
1061 }
1062
1063 void
1064 vmem_xfree(vmem_t *vm, vmem_addr_t addr, vmem_size_t size)
1065 {
1066 bt_t *bt;
1067 bt_t *t;
1068
1069 VMEM_ASSERT_UNLOCKED(vm);
1070 KASSERT(addr != VMEM_ADDR_NULL);
1071 KASSERT(size > 0);
1072
1073 VMEM_LOCK(vm);
1074
1075 bt = bt_lookupbusy(vm, addr);
1076 KASSERT(bt != NULL);
1077 KASSERT(bt->bt_start == addr);
1078 KASSERT(bt->bt_size == vmem_roundup_size(vm, size) ||
1079 bt->bt_size - vmem_roundup_size(vm, size) <= vm->vm_quantum_mask);
1080 KASSERT(bt->bt_type == BT_TYPE_BUSY);
1081 bt_rembusy(vm, bt);
1082 bt->bt_type = BT_TYPE_FREE;
1083
1084 /* coalesce */
1085 t = CIRCLEQ_NEXT(bt, bt_seglist);
1086 if (t != NULL && t->bt_type == BT_TYPE_FREE) {
1087 KASSERT(BT_END(bt) == t->bt_start);
1088 bt_remfree(vm, t);
1089 bt_remseg(vm, t);
1090 bt->bt_size += t->bt_size;
1091 bt_free(vm, t);
1092 }
1093 t = CIRCLEQ_PREV(bt, bt_seglist);
1094 if (t != NULL && t->bt_type == BT_TYPE_FREE) {
1095 KASSERT(BT_END(t) == bt->bt_start);
1096 bt_remfree(vm, t);
1097 bt_remseg(vm, t);
1098 bt->bt_size += t->bt_size;
1099 bt->bt_start = t->bt_start;
1100 bt_free(vm, t);
1101 }
1102
1103 t = CIRCLEQ_PREV(bt, bt_seglist);
1104 KASSERT(t != NULL);
1105 KASSERT(BT_ISSPAN_P(t) || t->bt_type == BT_TYPE_BUSY);
1106 if (vm->vm_freefn != NULL && t->bt_type == BT_TYPE_SPAN &&
1107 t->bt_size == bt->bt_size) {
1108 vmem_addr_t spanaddr;
1109 vmem_size_t spansize;
1110
1111 KASSERT(t->bt_start == bt->bt_start);
1112 spanaddr = bt->bt_start;
1113 spansize = bt->bt_size;
1114 bt_remseg(vm, bt);
1115 bt_free(vm, bt);
1116 bt_remseg(vm, t);
1117 bt_free(vm, t);
1118 VMEM_UNLOCK(vm);
1119 (*vm->vm_freefn)(vm->vm_source, spanaddr, spansize);
1120 } else {
1121 bt_insfree(vm, bt);
1122 VMEM_UNLOCK(vm);
1123 }
1124 }
1125
1126 /*
1127 * vmem_add:
1128 *
1129 * => caller must ensure appropriate spl,
1130 * if the arena can be accessed from interrupt context.
1131 */
1132
1133 vmem_addr_t
1134 vmem_add(vmem_t *vm, vmem_addr_t addr, vmem_size_t size, vm_flag_t flags)
1135 {
1136
1137 return vmem_add1(vm, addr, size, flags, BT_TYPE_SPAN_STATIC);
1138 }
1139
1140 /*
1141 * vmem_reap: reap unused resources.
1142 *
1143 * => return true if we successfully reaped something.
1144 */
1145
1146 bool
1147 vmem_reap(vmem_t *vm)
1148 {
1149 bool didsomething = false;
1150
1151 VMEM_ASSERT_UNLOCKED(vm);
1152
1153 #if defined(QCACHE)
1154 didsomething = qc_reap(vm);
1155 #endif /* defined(QCACHE) */
1156 return didsomething;
1157 }
1158
1159 /* ---- rehash */
1160
1161 #if defined(_KERNEL)
1162 static struct callout vmem_rehash_ch;
1163 static int vmem_rehash_interval;
1164 static struct workqueue *vmem_rehash_wq;
1165 static struct work vmem_rehash_wk;
1166
1167 static void
1168 vmem_rehash_all(struct work *wk, void *dummy)
1169 {
1170 vmem_t *vm;
1171
1172 KASSERT(wk == &vmem_rehash_wk);
1173 mutex_enter(&vmem_list_lock);
1174 LIST_FOREACH(vm, &vmem_list, vm_alllist) {
1175 size_t desired;
1176 size_t current;
1177 int s;
1178
1179 s = splvm();
1180 if (!VMEM_TRYLOCK(vm)) {
1181 splx(s);
1182 continue;
1183 }
1184 desired = vm->vm_nbusytag;
1185 current = vm->vm_hashsize;
1186 VMEM_UNLOCK(vm);
1187 splx(s);
1188
1189 if (desired > VMEM_HASHSIZE_MAX) {
1190 desired = VMEM_HASHSIZE_MAX;
1191 } else if (desired < VMEM_HASHSIZE_MIN) {
1192 desired = VMEM_HASHSIZE_MIN;
1193 }
1194 if (desired > current * 2 || desired * 2 < current) {
1195 s = splvm();
1196 vmem_rehash(vm, desired, VM_NOSLEEP);
1197 splx(s);
1198 }
1199 }
1200 mutex_exit(&vmem_list_lock);
1201
1202 callout_schedule(&vmem_rehash_ch, vmem_rehash_interval);
1203 }
1204
1205 static void
1206 vmem_rehash_all_kick(void *dummy)
1207 {
1208
1209 workqueue_enqueue(vmem_rehash_wq, &vmem_rehash_wk);
1210 }
1211
1212 void
1213 vmem_rehash_start(void)
1214 {
1215 int error;
1216
1217 error = workqueue_create(&vmem_rehash_wq, "vmem_rehash",
1218 vmem_rehash_all, NULL, PVM, IPL_SOFTCLOCK, 0);
1219 if (error) {
1220 panic("%s: workqueue_create %d\n", __func__, error);
1221 }
1222 callout_init(&vmem_rehash_ch);
1223 callout_setfunc(&vmem_rehash_ch, vmem_rehash_all_kick, NULL);
1224
1225 vmem_rehash_interval = hz * 10;
1226 callout_schedule(&vmem_rehash_ch, vmem_rehash_interval);
1227 }
1228 #endif /* defined(_KERNEL) */
1229
1230 /* ---- debug */
1231
1232 #if defined(VMEM_DEBUG)
1233
1234 #if !defined(_KERNEL)
1235 #include <stdio.h>
1236 #endif /* !defined(_KERNEL) */
1237
1238 void bt_dump(const bt_t *);
1239
1240 void
1241 bt_dump(const bt_t *bt)
1242 {
1243
1244 printf("\t%p: %" PRIu64 ", %" PRIu64 ", %d\n",
1245 bt, (uint64_t)bt->bt_start, (uint64_t)bt->bt_size,
1246 bt->bt_type);
1247 }
1248
1249 void
1250 vmem_dump(const vmem_t *vm)
1251 {
1252 const bt_t *bt;
1253 int i;
1254
1255 printf("vmem %p '%s'\n", vm, vm->vm_name);
1256 CIRCLEQ_FOREACH(bt, &vm->vm_seglist, bt_seglist) {
1257 bt_dump(bt);
1258 }
1259
1260 for (i = 0; i < VMEM_MAXORDER; i++) {
1261 const struct vmem_freelist *fl = &vm->vm_freelist[i];
1262
1263 if (LIST_EMPTY(fl)) {
1264 continue;
1265 }
1266
1267 printf("freelist[%d]\n", i);
1268 LIST_FOREACH(bt, fl, bt_freelist) {
1269 bt_dump(bt);
1270 if (bt->bt_size) {
1271 }
1272 }
1273 }
1274 }
1275
1276 #if !defined(_KERNEL)
1277
1278 int
1279 main()
1280 {
1281 vmem_t *vm;
1282 vmem_addr_t p;
1283 struct reg {
1284 vmem_addr_t p;
1285 vmem_size_t sz;
1286 bool x;
1287 } *reg = NULL;
1288 int nreg = 0;
1289 int nalloc = 0;
1290 int nfree = 0;
1291 vmem_size_t total = 0;
1292 #if 1
1293 vm_flag_t strat = VM_INSTANTFIT;
1294 #else
1295 vm_flag_t strat = VM_BESTFIT;
1296 #endif
1297
1298 vm = vmem_create("test", VMEM_ADDR_NULL, 0, 1,
1299 NULL, NULL, NULL, 0, VM_SLEEP);
1300 if (vm == NULL) {
1301 printf("vmem_create\n");
1302 exit(EXIT_FAILURE);
1303 }
1304 vmem_dump(vm);
1305
1306 p = vmem_add(vm, 100, 200, VM_SLEEP);
1307 p = vmem_add(vm, 2000, 1, VM_SLEEP);
1308 p = vmem_add(vm, 40000, 0x10000000>>12, VM_SLEEP);
1309 p = vmem_add(vm, 10000, 10000, VM_SLEEP);
1310 p = vmem_add(vm, 500, 1000, VM_SLEEP);
1311 vmem_dump(vm);
1312 for (;;) {
1313 struct reg *r;
1314 int t = rand() % 100;
1315
1316 if (t > 45) {
1317 /* alloc */
1318 vmem_size_t sz = rand() % 500 + 1;
1319 bool x;
1320 vmem_size_t align, phase, nocross;
1321 vmem_addr_t minaddr, maxaddr;
1322
1323 if (t > 70) {
1324 x = true;
1325 /* XXX */
1326 align = 1 << (rand() % 15);
1327 phase = rand() % 65536;
1328 nocross = 1 << (rand() % 15);
1329 if (align <= phase) {
1330 phase = 0;
1331 }
1332 if (VMEM_CROSS_P(phase, phase + sz - 1,
1333 nocross)) {
1334 nocross = 0;
1335 }
1336 minaddr = rand() % 50000;
1337 maxaddr = rand() % 70000;
1338 if (minaddr > maxaddr) {
1339 minaddr = 0;
1340 maxaddr = 0;
1341 }
1342 printf("=== xalloc %" PRIu64
1343 " align=%" PRIu64 ", phase=%" PRIu64
1344 ", nocross=%" PRIu64 ", min=%" PRIu64
1345 ", max=%" PRIu64 "\n",
1346 (uint64_t)sz,
1347 (uint64_t)align,
1348 (uint64_t)phase,
1349 (uint64_t)nocross,
1350 (uint64_t)minaddr,
1351 (uint64_t)maxaddr);
1352 p = vmem_xalloc(vm, sz, align, phase, nocross,
1353 minaddr, maxaddr, strat|VM_SLEEP);
1354 } else {
1355 x = false;
1356 printf("=== alloc %" PRIu64 "\n", (uint64_t)sz);
1357 p = vmem_alloc(vm, sz, strat|VM_SLEEP);
1358 }
1359 printf("-> %" PRIu64 "\n", (uint64_t)p);
1360 vmem_dump(vm);
1361 if (p == VMEM_ADDR_NULL) {
1362 if (x) {
1363 continue;
1364 }
1365 break;
1366 }
1367 nreg++;
1368 reg = realloc(reg, sizeof(*reg) * nreg);
1369 r = ®[nreg - 1];
1370 r->p = p;
1371 r->sz = sz;
1372 r->x = x;
1373 total += sz;
1374 nalloc++;
1375 } else if (nreg != 0) {
1376 /* free */
1377 r = ®[rand() % nreg];
1378 printf("=== free %" PRIu64 ", %" PRIu64 "\n",
1379 (uint64_t)r->p, (uint64_t)r->sz);
1380 if (r->x) {
1381 vmem_xfree(vm, r->p, r->sz);
1382 } else {
1383 vmem_free(vm, r->p, r->sz);
1384 }
1385 total -= r->sz;
1386 vmem_dump(vm);
1387 *r = reg[nreg - 1];
1388 nreg--;
1389 nfree++;
1390 }
1391 printf("total=%" PRIu64 "\n", (uint64_t)total);
1392 }
1393 fprintf(stderr, "total=%" PRIu64 ", nalloc=%d, nfree=%d\n",
1394 (uint64_t)total, nalloc, nfree);
1395 exit(EXIT_SUCCESS);
1396 }
1397 #endif /* !defined(_KERNEL) */
1398 #endif /* defined(VMEM_DEBUG) */
1399