subr_vmem.c revision 1.4 1 /* $NetBSD: subr_vmem.c,v 1.4 2006/08/16 13:19:03 yamt Exp $ */
2
3 /*-
4 * Copyright (c)2006 YAMAMOTO Takashi,
5 * All rights reserved.
6 *
7 * Redistribution and use in source and binary forms, with or without
8 * modification, are permitted provided that the following conditions
9 * are met:
10 * 1. Redistributions of source code must retain the above copyright
11 * notice, this list of conditions and the following disclaimer.
12 * 2. Redistributions in binary form must reproduce the above copyright
13 * notice, this list of conditions and the following disclaimer in the
14 * documentation and/or other materials provided with the distribution.
15 *
16 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
17 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
18 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
19 * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
20 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
21 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
22 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
23 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
24 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
25 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
26 * SUCH DAMAGE.
27 */
28
29 /*
30 * reference:
31 * - Magazines and Vmem: Extending the Slab Allocator
32 * to Many CPUs and Arbitrary Resources
33 * http://www.usenix.org/event/usenix01/bonwick.html
34 *
35 * TODO:
36 * - implement quantum cache
37 * - implement vmem_xalloc/vmem_xfree
38 */
39
40 #include <sys/cdefs.h>
41 __KERNEL_RCSID(0, "$NetBSD: subr_vmem.c,v 1.4 2006/08/16 13:19:03 yamt Exp $");
42
43 #define VMEM_DEBUG
44
45 #include <sys/param.h>
46 #include <sys/hash.h>
47 #include <sys/queue.h>
48
49 #if defined(_KERNEL)
50 #include <sys/systm.h>
51 #include <sys/lock.h>
52 #include <sys/malloc.h>
53 #include <sys/once.h>
54 #include <sys/pool.h>
55 #include <sys/proc.h>
56 #include <sys/vmem.h>
57 #else /* defined(_KERNEL) */
58 #include "../sys/vmem.h"
59 #endif /* defined(_KERNEL) */
60
61 #if defined(_KERNEL)
62 #define SIMPLELOCK_DECL(name) struct simplelock name
63 #else /* defined(_KERNEL) */
64 #include <errno.h>
65 #include <assert.h>
66 #include <stdlib.h>
67
68 #define KASSERT(a) assert(a)
69 #define SIMPLELOCK_DECL(name) /* nothing */
70 #define LOCK_ASSERT(a) /* nothing */
71 #define simple_lock_init(a) /* nothing */
72 #define simple_lock(a) /* nothing */
73 #define simple_unlock(a) /* nothing */
74 #define ASSERT_SLEEPABLE(lk, msg) /* nothing */
75 #endif /* defined(_KERNEL) */
76
77 struct vmem;
78 struct vmem_btag;
79
80 #if defined(VMEM_DEBUG)
81 void vmem_dump(const vmem_t *);
82 #endif /* defined(VMEM_DEBUG) */
83
84 #define VMEM_MAXORDER (sizeof(vmem_size_t) * CHAR_BIT)
85 #define VMEM_HASHSIZE_INIT 4096 /* XXX */
86
87 #define VM_FITMASK (VM_BESTFIT | VM_INSTANTFIT)
88
89 CIRCLEQ_HEAD(vmem_seglist, vmem_btag);
90 LIST_HEAD(vmem_freelist, vmem_btag);
91 LIST_HEAD(vmem_hashlist, vmem_btag);
92
93 /* vmem arena */
94 struct vmem {
95 SIMPLELOCK_DECL(vm_lock);
96 vmem_addr_t (*vm_allocfn)(vmem_t *, vmem_size_t, vmem_size_t *,
97 vm_flag_t);
98 void (*vm_freefn)(vmem_t *, vmem_addr_t, vmem_size_t);
99 vmem_t *vm_source;
100 struct vmem_seglist vm_seglist;
101 struct vmem_freelist vm_freelist[VMEM_MAXORDER];
102 size_t vm_hashsize;
103 size_t vm_nbusytag;
104 struct vmem_hashlist *vm_hashlist;
105 size_t vm_qcache_max;
106 size_t vm_quantum_mask;
107 int vm_quantum_shift;
108 /* XXX qcache */
109 const char *vm_name;
110 };
111
112 #define VMEM_LOCK(vm) simple_lock(&vm->vm_lock)
113 #define VMEM_UNLOCK(vm) simple_unlock(&vm->vm_lock)
114 #define VMEM_LOCK_INIT(vm) simple_lock_init(&vm->vm_lock);
115 #define VMEM_ASSERT_LOCKED(vm) \
116 LOCK_ASSERT(simple_lock_held(&vm->vm_lock))
117 #define VMEM_ASSERT_UNLOCKED(vm) \
118 LOCK_ASSERT(!simple_lock_held(&vm->vm_lock))
119
120 /* boundary tag */
121 struct vmem_btag {
122 CIRCLEQ_ENTRY(vmem_btag) bt_seglist;
123 union {
124 LIST_ENTRY(vmem_btag) u_freelist; /* BT_TYPE_FREE */
125 LIST_ENTRY(vmem_btag) u_hashlist; /* BT_TYPE_BUSY */
126 } bt_u;
127 #define bt_hashlist bt_u.u_hashlist
128 #define bt_freelist bt_u.u_freelist
129 vmem_addr_t bt_start;
130 vmem_size_t bt_size;
131 int bt_type;
132 };
133
134 #define BT_TYPE_SPAN 1
135 #define BT_TYPE_SPAN_STATIC 2
136 #define BT_TYPE_FREE 3
137 #define BT_TYPE_BUSY 4
138 #define BT_ISSPAN_P(bt) ((bt)->bt_type <= BT_TYPE_SPAN_STATIC)
139
140 #define BT_END(bt) ((bt)->bt_start + (bt)->bt_size)
141
142 typedef struct vmem_btag bt_t;
143
144 /* ---- misc */
145
146 #define ORDER2SIZE(order) ((vmem_size_t)1 << (order))
147
148 static int
149 calc_order(vmem_size_t size)
150 {
151 vmem_size_t target;
152 int i;
153
154 KASSERT(size != 0);
155
156 i = 0;
157 target = size >> 1;
158 while (ORDER2SIZE(i) <= target) {
159 i++;
160 }
161
162 KASSERT(ORDER2SIZE(i) <= size);
163 KASSERT(size < ORDER2SIZE(i + 1) || ORDER2SIZE(i + 1) < ORDER2SIZE(i));
164
165 return i;
166 }
167
168 #if defined(_KERNEL)
169 static MALLOC_DEFINE(M_VMEM, "vmem", "vmem");
170 #endif /* defined(_KERNEL) */
171
172 static void *
173 xmalloc(size_t sz, vm_flag_t flags)
174 {
175
176 #if defined(_KERNEL)
177 return malloc(sz, M_VMEM,
178 M_CANFAIL | ((flags & VM_SLEEP) ? M_WAITOK : M_NOWAIT));
179 #else /* defined(_KERNEL) */
180 return malloc(sz);
181 #endif /* defined(_KERNEL) */
182 }
183
184 static void
185 xfree(void *p)
186 {
187
188 #if defined(_KERNEL)
189 return free(p, M_VMEM);
190 #else /* defined(_KERNEL) */
191 return free(p);
192 #endif /* defined(_KERNEL) */
193 }
194
195 /* ---- boundary tag */
196
197 #if defined(_KERNEL)
198 static struct pool_cache bt_poolcache;
199 static POOL_INIT(bt_pool, sizeof(bt_t), 0, 0, 0, "vmembtpl", NULL);
200 #endif /* defined(_KERNEL) */
201
202 static bt_t *
203 bt_alloc(vmem_t *vm, vm_flag_t flags)
204 {
205 bt_t *bt;
206
207 #if defined(_KERNEL)
208 /* XXX bootstrap */
209 bt = pool_cache_get(&bt_poolcache,
210 (flags & VM_SLEEP) != 0 ? PR_WAITOK : PR_NOWAIT);
211 #else /* defined(_KERNEL) */
212 bt = malloc(sizeof *bt);
213 #endif /* defined(_KERNEL) */
214
215 return bt;
216 }
217
218 static void
219 bt_free(vmem_t *vm, bt_t *bt)
220 {
221
222 #if defined(_KERNEL)
223 /* XXX bootstrap */
224 pool_cache_put(&bt_poolcache, bt);
225 #else /* defined(_KERNEL) */
226 free(bt);
227 #endif /* defined(_KERNEL) */
228 }
229
230 /*
231 * freelist[0] ... [1, 1]
232 * freelist[1] ... [2, 3]
233 * freelist[2] ... [4, 7]
234 * freelist[3] ... [8, 15]
235 * :
236 * freelist[n] ... [(1 << n), (1 << (n + 1)) - 1]
237 * :
238 */
239
240 static struct vmem_freelist *
241 bt_freehead_tofree(vmem_t *vm, vmem_size_t size)
242 {
243 const vmem_size_t qsize = size >> vm->vm_quantum_shift;
244 int idx;
245
246 KASSERT((size & vm->vm_quantum_mask) == 0);
247 KASSERT(size != 0);
248
249 idx = calc_order(qsize);
250 KASSERT(idx >= 0);
251 KASSERT(idx < VMEM_MAXORDER);
252
253 return &vm->vm_freelist[idx];
254 }
255
256 static struct vmem_freelist *
257 bt_freehead_toalloc(vmem_t *vm, vmem_size_t size, vm_flag_t strat)
258 {
259 const vmem_size_t qsize = size >> vm->vm_quantum_shift;
260 int idx;
261
262 KASSERT((size & vm->vm_quantum_mask) == 0);
263 KASSERT(size != 0);
264
265 idx = calc_order(qsize);
266 if (strat == VM_INSTANTFIT && ORDER2SIZE(idx) != qsize) {
267 idx++;
268 /* check too large request? */
269 }
270 KASSERT(idx >= 0);
271 KASSERT(idx < VMEM_MAXORDER);
272
273 return &vm->vm_freelist[idx];
274 }
275
276 /* ---- boundary tag hash */
277
278 static struct vmem_hashlist *
279 bt_hashhead(vmem_t *vm, vmem_addr_t addr)
280 {
281 struct vmem_hashlist *list;
282 unsigned int hash;
283
284 hash = hash32_buf(&addr, sizeof(addr), HASH32_BUF_INIT);
285 list = &vm->vm_hashlist[hash % vm->vm_hashsize];
286
287 return list;
288 }
289
290 static bt_t *
291 bt_lookupbusy(vmem_t *vm, vmem_addr_t addr)
292 {
293 struct vmem_hashlist *list;
294 bt_t *bt;
295
296 list = bt_hashhead(vm, addr);
297 LIST_FOREACH(bt, list, bt_hashlist) {
298 if (bt->bt_start == addr) {
299 break;
300 }
301 }
302
303 return bt;
304 }
305
306 static void
307 bt_rembusy(vmem_t *vm, bt_t *bt)
308 {
309
310 KASSERT(vm->vm_nbusytag > 0);
311 vm->vm_nbusytag--;
312 LIST_REMOVE(bt, bt_hashlist);
313 }
314
315 static void
316 bt_insbusy(vmem_t *vm, bt_t *bt)
317 {
318 struct vmem_hashlist *list;
319
320 KASSERT(bt->bt_type == BT_TYPE_BUSY);
321
322 list = bt_hashhead(vm, bt->bt_start);
323 LIST_INSERT_HEAD(list, bt, bt_hashlist);
324 vm->vm_nbusytag++;
325 }
326
327 /* ---- boundary tag list */
328
329 static void
330 bt_remseg(vmem_t *vm, bt_t *bt)
331 {
332
333 CIRCLEQ_REMOVE(&vm->vm_seglist, bt, bt_seglist);
334 }
335
336 static void
337 bt_insseg(vmem_t *vm, bt_t *bt, bt_t *prev)
338 {
339
340 CIRCLEQ_INSERT_AFTER(&vm->vm_seglist, prev, bt, bt_seglist);
341 }
342
343 static void
344 bt_insseg_tail(vmem_t *vm, bt_t *bt)
345 {
346
347 CIRCLEQ_INSERT_TAIL(&vm->vm_seglist, bt, bt_seglist);
348 }
349
350 static void
351 bt_remfree(vmem_t *vm, bt_t *bt)
352 {
353
354 KASSERT(bt->bt_type == BT_TYPE_FREE);
355
356 LIST_REMOVE(bt, bt_freelist);
357 }
358
359 static void
360 bt_insfree(vmem_t *vm, bt_t *bt)
361 {
362 struct vmem_freelist *list;
363
364 list = bt_freehead_tofree(vm, bt->bt_size);
365 LIST_INSERT_HEAD(list, bt, bt_freelist);
366 }
367
368 /* ---- vmem internal functions */
369
370 #if defined(_KERNEL)
371 static int
372 vmem_init(void)
373 {
374
375 pool_cache_init(&bt_poolcache, &bt_pool, NULL, NULL, NULL);
376 return 0;
377 }
378 #endif /* defined(_KERNEL) */
379
380 static vmem_addr_t
381 vmem_add1(vmem_t *vm, vmem_addr_t addr, vmem_size_t size, vm_flag_t flags,
382 int spanbttype)
383 {
384 bt_t *btspan;
385 bt_t *btfree;
386
387 KASSERT((flags & (VM_SLEEP|VM_NOSLEEP)) != 0);
388 KASSERT((~flags & (VM_SLEEP|VM_NOSLEEP)) != 0);
389 VMEM_ASSERT_UNLOCKED(vm);
390
391 btspan = bt_alloc(vm, flags);
392 if (btspan == NULL) {
393 return VMEM_ADDR_NULL;
394 }
395 btfree = bt_alloc(vm, flags);
396 if (btfree == NULL) {
397 bt_free(vm, btspan);
398 return VMEM_ADDR_NULL;
399 }
400
401 btspan->bt_type = spanbttype;
402 btspan->bt_start = addr;
403 btspan->bt_size = size;
404
405 btfree->bt_type = BT_TYPE_FREE;
406 btfree->bt_start = addr;
407 btfree->bt_size = size;
408
409 VMEM_LOCK(vm);
410 bt_insseg_tail(vm, btspan);
411 bt_insseg(vm, btfree, btspan);
412 bt_insfree(vm, btfree);
413 VMEM_UNLOCK(vm);
414
415 return addr;
416 }
417
418 static int
419 vmem_import(vmem_t *vm, vmem_size_t size, vm_flag_t flags)
420 {
421 vmem_addr_t addr;
422
423 VMEM_ASSERT_UNLOCKED(vm);
424
425 if (vm->vm_allocfn == NULL) {
426 return EINVAL;
427 }
428
429 addr = (*vm->vm_allocfn)(vm->vm_source, size, &size, flags);
430 if (addr == VMEM_ADDR_NULL) {
431 return ENOMEM;
432 }
433
434 if (vmem_add1(vm, addr, size, flags, BT_TYPE_SPAN) == VMEM_ADDR_NULL) {
435 (*vm->vm_freefn)(vm->vm_source, addr, size);
436 return ENOMEM;
437 }
438
439 return 0;
440 }
441
442 static int
443 vmem_rehash(vmem_t *vm, size_t newhashsize, vm_flag_t flags)
444 {
445 bt_t *bt;
446 int i;
447 struct vmem_hashlist *newhashlist;
448 struct vmem_hashlist *oldhashlist;
449 size_t oldhashsize;
450
451 KASSERT(newhashsize > 0);
452 VMEM_ASSERT_UNLOCKED(vm);
453
454 newhashlist =
455 xmalloc(sizeof(struct vmem_hashlist *) * newhashsize, flags);
456 if (newhashlist == NULL) {
457 return ENOMEM;
458 }
459 for (i = 0; i < newhashsize; i++) {
460 LIST_INIT(&newhashlist[i]);
461 }
462
463 VMEM_LOCK(vm);
464 oldhashlist = vm->vm_hashlist;
465 oldhashsize = vm->vm_hashsize;
466 vm->vm_hashlist = newhashlist;
467 vm->vm_hashsize = newhashsize;
468 if (oldhashlist == NULL) {
469 VMEM_UNLOCK(vm);
470 return 0;
471 }
472 for (i = 0; i < oldhashsize; i++) {
473 while ((bt = LIST_FIRST(&oldhashlist[i])) != NULL) {
474 bt_rembusy(vm, bt); /* XXX */
475 bt_insbusy(vm, bt);
476 }
477 }
478 VMEM_UNLOCK(vm);
479
480 xfree(oldhashlist);
481
482 return 0;
483 }
484
485 /* ---- vmem API */
486
487 /*
488 * vmem_create: create an arena.
489 *
490 * => must not be called from interrupt context.
491 */
492
493 vmem_t *
494 vmem_create(const char *name, vmem_addr_t base, vmem_size_t size,
495 vmem_size_t quantum,
496 vmem_addr_t (*allocfn)(vmem_t *, vmem_size_t, vmem_size_t *, vm_flag_t),
497 void (*freefn)(vmem_t *, vmem_addr_t, vmem_size_t),
498 vmem_t *source, vmem_size_t qcache_max, vm_flag_t flags)
499 {
500 vmem_t *vm;
501 int i;
502 #if defined(_KERNEL)
503 static ONCE_DECL(control);
504 #endif /* defined(_KERNEL) */
505
506 KASSERT((flags & (VM_SLEEP|VM_NOSLEEP)) != 0);
507 KASSERT((~flags & (VM_SLEEP|VM_NOSLEEP)) != 0);
508
509 #if defined(_KERNEL)
510 if (RUN_ONCE(&control, vmem_init)) {
511 return NULL;
512 }
513 #endif /* defined(_KERNEL) */
514 vm = xmalloc(sizeof(*vm), flags);
515 if (vm == NULL) {
516 return NULL;
517 }
518
519 VMEM_LOCK_INIT(vm);
520 vm->vm_name = name;
521 vm->vm_quantum_mask = quantum - 1;
522 vm->vm_quantum_shift = calc_order(quantum);
523 KASSERT(ORDER2SIZE(vm->vm_quantum_shift) == quantum);
524 vm->vm_allocfn = allocfn;
525 vm->vm_freefn = freefn;
526 vm->vm_source = source;
527 vm->vm_qcache_max = qcache_max;
528 vm->vm_nbusytag = 0;
529
530 CIRCLEQ_INIT(&vm->vm_seglist);
531 for (i = 0; i < VMEM_MAXORDER; i++) {
532 LIST_INIT(&vm->vm_freelist[i]);
533 }
534 vm->vm_hashlist = NULL;
535 if (vmem_rehash(vm, VMEM_HASHSIZE_INIT, flags)) {
536 vmem_destroy(vm);
537 return NULL;
538 }
539
540 if (size != 0) {
541 if (vmem_add(vm, base, size, flags) == 0) {
542 vmem_destroy(vm);
543 return NULL;
544 }
545 }
546
547 return vm;
548 }
549
550 void
551 vmem_destroy(vmem_t *vm)
552 {
553
554 VMEM_ASSERT_UNLOCKED(vm);
555
556 if (vm->vm_hashlist != NULL) {
557 int i;
558
559 for (i = 0; i < vm->vm_hashsize; i++) {
560 bt_t *bt;
561
562 while ((bt = LIST_FIRST(&vm->vm_hashlist[i])) != NULL) {
563 KASSERT(bt->bt_type == BT_TYPE_SPAN_STATIC);
564 bt_free(vm, bt);
565 }
566 }
567 xfree(vm->vm_hashlist);
568 }
569 xfree(vm);
570 }
571
572 vmem_size_t
573 vmem_roundup_size(vmem_t *vm, vmem_size_t size)
574 {
575
576 return (size + vm->vm_quantum_mask) & ~vm->vm_quantum_mask;
577 }
578
579 /*
580 * vmem_alloc:
581 *
582 * => caller must ensure appropriate spl,
583 * if the arena can be accessed from interrupt context.
584 */
585
586 vmem_addr_t
587 vmem_alloc(vmem_t *vm, vmem_size_t size0, vm_flag_t flags)
588 {
589 struct vmem_freelist *list;
590 struct vmem_freelist *first;
591 struct vmem_freelist *end;
592 bt_t *bt;
593 bt_t *btnew;
594 const vmem_size_t size = vmem_roundup_size(vm, size0);
595 vm_flag_t strat = flags & VM_FITMASK;
596
597 KASSERT((flags & (VM_SLEEP|VM_NOSLEEP)) != 0);
598 KASSERT((~flags & (VM_SLEEP|VM_NOSLEEP)) != 0);
599 VMEM_ASSERT_UNLOCKED(vm);
600
601 KASSERT(size0 > 0);
602 KASSERT(size > 0);
603 KASSERT(strat == VM_BESTFIT || strat == VM_INSTANTFIT);
604 if ((flags & VM_SLEEP) != 0) {
605 ASSERT_SLEEPABLE(NULL, "vmem_alloc");
606 }
607
608 btnew = bt_alloc(vm, flags);
609 if (btnew == NULL) {
610 return VMEM_ADDR_NULL;
611 }
612
613 retry_strat:
614 first = bt_freehead_toalloc(vm, size, strat);
615 end = &vm->vm_freelist[VMEM_MAXORDER];
616 retry:
617 bt = NULL;
618 VMEM_LOCK(vm);
619 if (strat == VM_INSTANTFIT) {
620 for (list = first; list < end; list++) {
621 bt = LIST_FIRST(list);
622 if (bt != NULL) {
623 goto gotit;
624 }
625 }
626 } else { /* VM_BESTFIT */
627 for (list = first; list < end; list++) {
628 LIST_FOREACH(bt, list, bt_freelist) {
629 if (bt->bt_size >= size) {
630 goto gotit;
631 }
632 }
633 }
634 }
635 VMEM_UNLOCK(vm);
636 #if 1
637 if (strat == VM_INSTANTFIT) {
638 strat = VM_BESTFIT;
639 goto retry_strat;
640 }
641 #endif
642 if (vmem_import(vm, size, flags) == 0) {
643 goto retry;
644 }
645 /* XXX */
646 return VMEM_ADDR_NULL;
647
648 gotit:
649 KASSERT(bt->bt_type == BT_TYPE_FREE);
650 KASSERT(bt->bt_size >= size);
651 bt_remfree(vm, bt);
652 if (bt->bt_size != size && bt->bt_size - size > vm->vm_quantum_mask) {
653 /* split */
654 btnew->bt_type = BT_TYPE_BUSY;
655 btnew->bt_start = bt->bt_start;
656 btnew->bt_size = size;
657 bt->bt_start = bt->bt_start + size;
658 bt->bt_size -= size;
659 bt_insfree(vm, bt);
660 bt_insseg(vm, btnew, CIRCLEQ_PREV(bt, bt_seglist));
661 bt_insbusy(vm, btnew);
662 VMEM_UNLOCK(vm);
663 } else {
664 bt->bt_type = BT_TYPE_BUSY;
665 bt_insbusy(vm, bt);
666 VMEM_UNLOCK(vm);
667 bt_free(vm, btnew);
668 btnew = bt;
669 }
670 KASSERT(btnew->bt_size >= size);
671 btnew->bt_type = BT_TYPE_BUSY;
672
673 return btnew->bt_start;
674 }
675
676 /*
677 * vmem_free:
678 *
679 * => caller must ensure appropriate spl,
680 * if the arena can be accessed from interrupt context.
681 */
682
683 void
684 vmem_free(vmem_t *vm, vmem_addr_t addr, vmem_size_t size)
685 {
686 bt_t *bt;
687 bt_t *t;
688
689 VMEM_ASSERT_UNLOCKED(vm);
690
691 KASSERT(addr != VMEM_ADDR_NULL);
692 KASSERT(size > 0);
693
694 VMEM_LOCK(vm);
695
696 bt = bt_lookupbusy(vm, addr);
697 KASSERT(bt != NULL);
698 KASSERT(bt->bt_start == addr);
699 KASSERT(bt->bt_size == vmem_roundup_size(vm, size) ||
700 bt->bt_size - vmem_roundup_size(vm, size) <= vm->vm_quantum_mask);
701 KASSERT(bt->bt_type == BT_TYPE_BUSY);
702 bt_rembusy(vm, bt);
703 bt->bt_type = BT_TYPE_FREE;
704
705 /* coalesce */
706 t = CIRCLEQ_NEXT(bt, bt_seglist);
707 if (t != NULL && t->bt_type == BT_TYPE_FREE) {
708 KASSERT(BT_END(bt) == t->bt_start);
709 bt_remfree(vm, t);
710 bt_remseg(vm, t);
711 bt->bt_size += t->bt_size;
712 bt_free(vm, t);
713 }
714 t = CIRCLEQ_PREV(bt, bt_seglist);
715 if (t != NULL && t->bt_type == BT_TYPE_FREE) {
716 KASSERT(BT_END(t) == bt->bt_start);
717 bt_remfree(vm, t);
718 bt_remseg(vm, t);
719 bt->bt_size += t->bt_size;
720 bt->bt_start = t->bt_start;
721 bt_free(vm, t);
722 }
723
724 t = CIRCLEQ_PREV(bt, bt_seglist);
725 KASSERT(t != NULL);
726 KASSERT(BT_ISSPAN_P(t) || t->bt_type == BT_TYPE_BUSY);
727 if (vm->vm_freefn != NULL && t->bt_type == BT_TYPE_SPAN &&
728 t->bt_size == bt->bt_size) {
729 vmem_addr_t spanaddr;
730 vmem_size_t spansize;
731
732 KASSERT(t->bt_start == bt->bt_start);
733 spanaddr = bt->bt_start;
734 spansize = bt->bt_size;
735 bt_remseg(vm, bt);
736 bt_free(vm, bt);
737 bt_remseg(vm, t);
738 bt_free(vm, t);
739 VMEM_UNLOCK(vm);
740 (*vm->vm_freefn)(vm->vm_source, spanaddr, spansize);
741 } else {
742 bt_insfree(vm, bt);
743 VMEM_UNLOCK(vm);
744 }
745 }
746
747 /*
748 * vmem_add:
749 *
750 * => caller must ensure appropriate spl,
751 * if the arena can be accessed from interrupt context.
752 */
753
754 vmem_addr_t
755 vmem_add(vmem_t *vm, vmem_addr_t addr, vmem_size_t size, vm_flag_t flags)
756 {
757
758 return vmem_add1(vm, addr, size, flags, BT_TYPE_SPAN_STATIC);
759 }
760
761 /* ---- debug */
762
763 #if defined(VMEM_DEBUG)
764
765 #if !defined(_KERNEL)
766 #include <stdio.h>
767 #endif /* !defined(_KERNEL) */
768
769 void bt_dump(const bt_t *);
770
771 void
772 bt_dump(const bt_t *bt)
773 {
774
775 printf("\t%p: %" PRIu64 ", %" PRIu64 ", %d\n",
776 bt, (uint64_t)bt->bt_start, (uint64_t)bt->bt_size,
777 bt->bt_type);
778 }
779
780 void
781 vmem_dump(const vmem_t *vm)
782 {
783 const bt_t *bt;
784 int i;
785
786 printf("vmem %p '%s'\n", vm, vm->vm_name);
787 CIRCLEQ_FOREACH(bt, &vm->vm_seglist, bt_seglist) {
788 bt_dump(bt);
789 }
790
791 for (i = 0; i < VMEM_MAXORDER; i++) {
792 const struct vmem_freelist *fl = &vm->vm_freelist[i];
793
794 if (LIST_EMPTY(fl)) {
795 continue;
796 }
797
798 printf("freelist[%d]\n", i);
799 LIST_FOREACH(bt, fl, bt_freelist) {
800 bt_dump(bt);
801 if (bt->bt_size) {
802 }
803 }
804 }
805 }
806
807 #if !defined(_KERNEL)
808
809 #include <stdlib.h>
810
811 int
812 main()
813 {
814 vmem_t *vm;
815 vmem_addr_t p;
816 struct reg {
817 vmem_addr_t p;
818 vmem_size_t sz;
819 } *reg = NULL;
820 int nreg = 0;
821 int nalloc = 0;
822 int nfree = 0;
823 vmem_size_t total = 0;
824 #if 1
825 vm_flag_t strat = VM_INSTANTFIT;
826 #else
827 vm_flag_t strat = VM_BESTFIT;
828 #endif
829
830 vm = vmem_create("test", VMEM_ADDR_NULL, 0, 1,
831 NULL, NULL, NULL, 0, VM_NOSLEEP);
832 if (vm == NULL) {
833 printf("vmem_create\n");
834 exit(EXIT_FAILURE);
835 }
836 vmem_dump(vm);
837
838 p = vmem_add(vm, 100, 200, VM_SLEEP);
839 p = vmem_add(vm, 2000, 1, VM_SLEEP);
840 p = vmem_add(vm, 40000, 0x10000000>>12, VM_SLEEP);
841 p = vmem_add(vm, 10000, 10000, VM_SLEEP);
842 p = vmem_add(vm, 500, 1000, VM_SLEEP);
843 vmem_dump(vm);
844 for (;;) {
845 struct reg *r;
846
847 if (rand() % 100 > 40) {
848 vmem_size_t sz = rand() % 500 + 1;
849
850 printf("=== alloc %" PRIu64 "\n", (uint64_t)sz);
851 p = vmem_alloc(vm, sz, strat|VM_SLEEP);
852 printf("-> %" PRIu64 "\n", (uint64_t)p);
853 vmem_dump(vm);
854 if (p == VMEM_ADDR_NULL) {
855 break;
856 }
857 nreg++;
858 reg = realloc(reg, sizeof(*reg) * nreg);
859 r = ®[nreg - 1];
860 r->p = p;
861 r->sz = sz;
862 total += sz;
863 nalloc++;
864 } else if (nreg != 0) {
865 r = ®[rand() % nreg];
866 printf("=== free %" PRIu64 ", %" PRIu64 "\n",
867 (uint64_t)r->p, (uint64_t)r->sz);
868 vmem_free(vm, r->p, r->sz);
869 total -= r->sz;
870 vmem_dump(vm);
871 *r = reg[nreg - 1];
872 nreg--;
873 nfree++;
874 }
875 printf("total=%" PRIu64 "\n", (uint64_t)total);
876 }
877 fprintf(stderr, "total=%" PRIu64 ", nalloc=%d, nfree=%d\n",
878 (uint64_t)total, nalloc, nfree);
879 exit(EXIT_SUCCESS);
880 }
881 #endif /* !defined(_KERNEL) */
882 #endif /* defined(VMEM_DEBUG) */
883