subr_vmem.c revision 1.2.2.3 1 /* $NetBSD: subr_vmem.c,v 1.2.2.3 2006/08/11 15:45:46 yamt Exp $ */
2
3 /*-
4 * Copyright (c)2006 YAMAMOTO Takashi,
5 * All rights reserved.
6 *
7 * Redistribution and use in source and binary forms, with or without
8 * modification, are permitted provided that the following conditions
9 * are met:
10 * 1. Redistributions of source code must retain the above copyright
11 * notice, this list of conditions and the following disclaimer.
12 * 2. Redistributions in binary form must reproduce the above copyright
13 * notice, this list of conditions and the following disclaimer in the
14 * documentation and/or other materials provided with the distribution.
15 *
16 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
17 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
18 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
19 * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
20 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
21 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
22 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
23 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
24 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
25 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
26 * SUCH DAMAGE.
27 */
28
29 /*
30 * reference:
31 * - Magazines and Vmem: Extending the Slab Allocator
32 * to Many CPUs and Arbitrary Resources
33 * http://www.usenix.org/event/usenix01/bonwick.html
34 *
35 * TODO:
36 * - implement quantum cache
37 * - implement vmem_xalloc/vmem_xfree
38 */
39
40 #include <sys/cdefs.h>
41 __KERNEL_RCSID(0, "$NetBSD: subr_vmem.c,v 1.2.2.3 2006/08/11 15:45:46 yamt Exp $");
42
43 #define VMEM_DEBUG
44
45 #include <sys/param.h>
46 #include <sys/hash.h>
47 #include <sys/queue.h>
48
49 #if defined(_KERNEL)
50 #include <sys/systm.h>
51 #include <sys/lock.h>
52 #include <sys/malloc.h>
53 #include <sys/once.h>
54 #include <sys/pool.h>
55 #include <sys/proc.h>
56 #include <sys/vmem.h>
57 #else /* defined(_KERNEL) */
58 #include "../sys/vmem.h"
59 #endif /* defined(_KERNEL) */
60
61 #if defined(_KERNEL)
62 #define SIMPLELOCK_DECL(name) struct simplelock name
63 #else /* defined(_KERNEL) */
64 #include <errno.h>
65 #include <assert.h>
66 #include <stdlib.h>
67
68 #define KASSERT(a) assert(a)
69 #define SIMPLELOCK_DECL(name) /* nothing */
70 #define LOCK_ASSERT(a) /* nothing */
71 #define simple_lock_init(a) /* nothing */
72 #define simple_lock(a) /* nothing */
73 #define simple_unlock(a) /* nothing */
74 #define ASSERT_SLEEPABLE(lk, msg) /* nothing */
75 #endif /* defined(_KERNEL) */
76
77 struct vmem;
78 struct vmem_btag;
79
80 #if defined(VMEM_DEBUG)
81 void vmem_dump(const vmem_t *);
82 #endif /* defined(VMEM_DEBUG) */
83
84 #define VMEM_MAXORDER 20
85 #define VMEM_HASHSIZE_INIT 4096 /* XXX */
86
87 #define VM_FITMASK (VM_BESTFIT | VM_INSTANTFIT)
88
89 CIRCLEQ_HEAD(vmem_seglist, vmem_btag);
90 LIST_HEAD(vmem_freelist, vmem_btag);
91 LIST_HEAD(vmem_hashlist, vmem_btag);
92
93 /* vmem arena */
94 struct vmem {
95 SIMPLELOCK_DECL(vm_lock);
96 vmem_addr_t (*vm_allocfn)(vmem_t *, vmem_size_t, vmem_size_t *,
97 vm_flag_t);
98 void (*vm_freefn)(vmem_t *, vmem_addr_t, vmem_size_t);
99 vmem_t *vm_source;
100 struct vmem_seglist vm_seglist;
101 struct vmem_freelist vm_freelist[VMEM_MAXORDER];
102 size_t vm_hashsize;
103 size_t vm_nbusytag;
104 struct vmem_hashlist *vm_hashlist;
105 size_t vm_qcache_max;
106 size_t vm_quantum_mask;
107 int vm_quantum_shift;
108 /* XXX qcache */
109 const char *vm_name;
110 };
111
112 #define VMEM_LOCK(vm) simple_lock(&vm->vm_lock)
113 #define VMEM_UNLOCK(vm) simple_unlock(&vm->vm_lock)
114 #define VMEM_LOCK_INIT(vm) simple_lock_init(&vm->vm_lock);
115 #define VMEM_ASSERT_LOCKED(vm) \
116 LOCK_ASSERT(simple_lock_held(&vm->vm_lock))
117 #define VMEM_ASSERT_UNLOCKED(vm) \
118 LOCK_ASSERT(!simple_lock_held(&vm->vm_lock))
119
120 /* boundary tag */
121 struct vmem_btag {
122 CIRCLEQ_ENTRY(vmem_btag) bt_seglist;
123 union {
124 LIST_ENTRY(vmem_btag) u_freelist; /* BT_TYPE_FREE */
125 LIST_ENTRY(vmem_btag) u_hashlist; /* BT_TYPE_BUSY */
126 } bt_u;
127 #define bt_hashlist bt_u.u_hashlist
128 #define bt_freelist bt_u.u_freelist
129 vmem_addr_t bt_start;
130 vmem_size_t bt_size;
131 int bt_type;
132 };
133
134 #define BT_TYPE_SPAN 1
135 #define BT_TYPE_SPAN_STATIC 2
136 #define BT_TYPE_FREE 3
137 #define BT_TYPE_BUSY 4
138 #define BT_ISSPAN_P(bt) ((bt)->bt_type <= BT_TYPE_SPAN_STATIC)
139
140 #define BT_END(bt) ((bt)->bt_start + (bt)->bt_size)
141
142 typedef struct vmem_btag bt_t;
143
144 /* ---- misc */
145
146 static int
147 calc_order(vmem_size_t size)
148 {
149 int i;
150
151 KASSERT(size != 0);
152
153 i = 0;
154 while (1 << (i + 1) <= size) {
155 i++;
156 }
157
158 KASSERT(1 << i <= size);
159 KASSERT(size < 1 << (i + 1));
160
161 return i;
162 }
163
164 #if defined(_KERNEL)
165 static MALLOC_DEFINE(M_VMEM, "vmem", "vmem");
166 #endif /* defined(_KERNEL) */
167
168 static void *
169 xmalloc(size_t sz, vm_flag_t flags)
170 {
171
172 #if defined(_KERNEL)
173 return malloc(sz, M_VMEM,
174 M_CANFAIL | ((flags & VM_SLEEP) ? M_WAITOK : M_NOWAIT));
175 #else /* defined(_KERNEL) */
176 return malloc(sz);
177 #endif /* defined(_KERNEL) */
178 }
179
180 static void
181 xfree(void *p)
182 {
183
184 #if defined(_KERNEL)
185 return free(p, M_VMEM);
186 #else /* defined(_KERNEL) */
187 return free(p);
188 #endif /* defined(_KERNEL) */
189 }
190
191 /* ---- boundary tag */
192
193 #if defined(_KERNEL)
194 static struct pool_cache bt_poolcache;
195 static POOL_INIT(bt_pool, sizeof(bt_t), 0, 0, 0, "vmembtpl", NULL);
196 #endif /* defined(_KERNEL) */
197
198 static bt_t *
199 bt_alloc(vmem_t *vm, vm_flag_t flags)
200 {
201 bt_t *bt;
202
203 #if defined(_KERNEL)
204 /* XXX bootstrap */
205 bt = pool_cache_get(&bt_poolcache,
206 (flags & VM_SLEEP) != 0 ? PR_WAITOK : PR_NOWAIT);
207 #else /* defined(_KERNEL) */
208 bt = malloc(sizeof *bt);
209 #endif /* defined(_KERNEL) */
210
211 return bt;
212 }
213
214 static void
215 bt_free(vmem_t *vm, bt_t *bt)
216 {
217
218 #if defined(_KERNEL)
219 /* XXX bootstrap */
220 pool_cache_put(&bt_poolcache, bt);
221 #else /* defined(_KERNEL) */
222 free(bt);
223 #endif /* defined(_KERNEL) */
224 }
225
226 /*
227 * freelist[0] ... [1, 1]
228 * freelist[1] ... [2, 3]
229 * freelist[2] ... [4, 7]
230 * freelist[3] ... [8, 15]
231 * :
232 * freelist[n] ... [(1 << n), (1 << (n + 1)) - 1]
233 * :
234 */
235
236 static struct vmem_freelist *
237 bt_freehead_tofree(vmem_t *vm, vmem_size_t size)
238 {
239 const vmem_size_t qsize = size >> vm->vm_quantum_shift;
240 int idx;
241
242 KASSERT((size & vm->vm_quantum_mask) == 0);
243 KASSERT(size != 0);
244
245 idx = calc_order(qsize);
246 KASSERT(idx >= 0);
247 KASSERT(idx < VMEM_MAXORDER);
248
249 return &vm->vm_freelist[idx];
250 }
251
252 static struct vmem_freelist *
253 bt_freehead_toalloc(vmem_t *vm, vmem_size_t size, vm_flag_t strat)
254 {
255 const vmem_size_t qsize = size >> vm->vm_quantum_shift;
256 int idx;
257
258 KASSERT((size & vm->vm_quantum_mask) == 0);
259 KASSERT(size != 0);
260
261 idx = calc_order(qsize);
262 if (strat == VM_INSTANTFIT && 1 << idx != qsize) {
263 idx++;
264 /* check too large request? */
265 }
266 KASSERT(idx >= 0);
267 KASSERT(idx < VMEM_MAXORDER);
268
269 return &vm->vm_freelist[idx];
270 }
271
272 /* ---- boundary tag hash */
273
274 static struct vmem_hashlist *
275 bt_hashhead(vmem_t *vm, vmem_addr_t addr)
276 {
277 struct vmem_hashlist *list;
278 unsigned int hash;
279
280 hash = hash32_buf(&addr, sizeof(addr), HASH32_BUF_INIT);
281 list = &vm->vm_hashlist[hash % vm->vm_hashsize];
282
283 return list;
284 }
285
286 static bt_t *
287 bt_lookupbusy(vmem_t *vm, vmem_addr_t addr)
288 {
289 struct vmem_hashlist *list;
290 bt_t *bt;
291
292 list = bt_hashhead(vm, addr);
293 LIST_FOREACH(bt, list, bt_hashlist) {
294 if (bt->bt_start == addr) {
295 break;
296 }
297 }
298
299 return bt;
300 }
301
302 static void
303 bt_rembusy(vmem_t *vm, bt_t *bt)
304 {
305
306 KASSERT(vm->vm_nbusytag > 0);
307 vm->vm_nbusytag--;
308 LIST_REMOVE(bt, bt_hashlist);
309 }
310
311 static void
312 bt_insbusy(vmem_t *vm, bt_t *bt)
313 {
314 struct vmem_hashlist *list;
315
316 KASSERT(bt->bt_type == BT_TYPE_BUSY);
317
318 list = bt_hashhead(vm, bt->bt_start);
319 LIST_INSERT_HEAD(list, bt, bt_hashlist);
320 vm->vm_nbusytag++;
321 }
322
323 /* ---- boundary tag list */
324
325 static void
326 bt_remseg(vmem_t *vm, bt_t *bt)
327 {
328
329 CIRCLEQ_REMOVE(&vm->vm_seglist, bt, bt_seglist);
330 }
331
332 static void
333 bt_insseg(vmem_t *vm, bt_t *bt, bt_t *prev)
334 {
335
336 CIRCLEQ_INSERT_AFTER(&vm->vm_seglist, prev, bt, bt_seglist);
337 }
338
339 static void
340 bt_insseg_tail(vmem_t *vm, bt_t *bt)
341 {
342
343 CIRCLEQ_INSERT_TAIL(&vm->vm_seglist, bt, bt_seglist);
344 }
345
346 static void
347 bt_remfree(vmem_t *vm, bt_t *bt)
348 {
349
350 KASSERT(bt->bt_type == BT_TYPE_FREE);
351
352 LIST_REMOVE(bt, bt_freelist);
353 }
354
355 static void
356 bt_insfree(vmem_t *vm, bt_t *bt)
357 {
358 struct vmem_freelist *list;
359
360 list = bt_freehead_tofree(vm, bt->bt_size);
361 LIST_INSERT_HEAD(list, bt, bt_freelist);
362 }
363
364 /* ---- vmem internal functions */
365
366 #if defined(_KERNEL)
367 static int
368 vmem_init(void)
369 {
370
371 pool_cache_init(&bt_poolcache, &bt_pool, NULL, NULL, NULL);
372 return 0;
373 }
374 #endif /* defined(_KERNEL) */
375
376 static vmem_addr_t
377 vmem_add1(vmem_t *vm, vmem_addr_t addr, vmem_size_t size, vm_flag_t flags,
378 int spanbttype)
379 {
380 bt_t *btspan;
381 bt_t *btfree;
382
383 KASSERT((flags & (VM_SLEEP|VM_NOSLEEP)) != 0);
384 KASSERT((~flags & (VM_SLEEP|VM_NOSLEEP)) != 0);
385 VMEM_ASSERT_UNLOCKED(vm);
386
387 btspan = bt_alloc(vm, flags);
388 if (btspan == NULL) {
389 return VMEM_ADDR_NULL;
390 }
391 btfree = bt_alloc(vm, flags);
392 if (btfree == NULL) {
393 bt_free(vm, btspan);
394 return VMEM_ADDR_NULL;
395 }
396
397 btspan->bt_type = spanbttype;
398 btspan->bt_start = addr;
399 btspan->bt_size = size;
400
401 btfree->bt_type = BT_TYPE_FREE;
402 btfree->bt_start = addr;
403 btfree->bt_size = size;
404
405 VMEM_LOCK(vm);
406 bt_insseg_tail(vm, btspan);
407 bt_insseg(vm, btfree, btspan);
408 bt_insfree(vm, btfree);
409 VMEM_UNLOCK(vm);
410
411 return addr;
412 }
413
414 static int
415 vmem_import(vmem_t *vm, vmem_size_t size, vm_flag_t flags)
416 {
417 vmem_addr_t addr;
418
419 VMEM_ASSERT_UNLOCKED(vm);
420
421 if (vm->vm_allocfn == NULL) {
422 return EINVAL;
423 }
424
425 addr = (*vm->vm_allocfn)(vm->vm_source, size, &size, flags);
426 if (addr == VMEM_ADDR_NULL) {
427 return ENOMEM;
428 }
429
430 if (vmem_add1(vm, addr, size, flags, BT_TYPE_SPAN) == VMEM_ADDR_NULL) {
431 (*vm->vm_freefn)(vm->vm_source, addr, size);
432 return ENOMEM;
433 }
434
435 return 0;
436 }
437
438 static int
439 vmem_rehash(vmem_t *vm, size_t newhashsize, vm_flag_t flags)
440 {
441 bt_t *bt;
442 int i;
443 struct vmem_hashlist *newhashlist;
444 struct vmem_hashlist *oldhashlist;
445 size_t oldhashsize;
446
447 KASSERT(newhashsize > 0);
448 VMEM_ASSERT_UNLOCKED(vm);
449
450 newhashlist =
451 xmalloc(sizeof(struct vmem_hashlist *) * newhashsize, flags);
452 if (newhashlist == NULL) {
453 return ENOMEM;
454 }
455 for (i = 0; i < newhashsize; i++) {
456 LIST_INIT(&newhashlist[i]);
457 }
458
459 VMEM_LOCK(vm);
460 oldhashlist = vm->vm_hashlist;
461 oldhashsize = vm->vm_hashsize;
462 vm->vm_hashlist = newhashlist;
463 vm->vm_hashsize = newhashsize;
464 if (oldhashlist == NULL) {
465 VMEM_UNLOCK(vm);
466 return 0;
467 }
468 for (i = 0; i < oldhashsize; i++) {
469 while ((bt = LIST_FIRST(&oldhashlist[i])) != NULL) {
470 bt_rembusy(vm, bt); /* XXX */
471 bt_insbusy(vm, bt);
472 }
473 }
474 VMEM_UNLOCK(vm);
475
476 xfree(oldhashlist);
477
478 return 0;
479 }
480
481 /* ---- vmem API */
482
483 /*
484 * vmem_create: create an arena.
485 *
486 * => must not be called from interrupt context.
487 */
488
489 vmem_t *
490 vmem_create(const char *name, vmem_addr_t base, vmem_size_t size,
491 vmem_size_t quantum,
492 vmem_addr_t (*allocfn)(vmem_t *, vmem_size_t, vmem_size_t *, vm_flag_t),
493 void (*freefn)(vmem_t *, vmem_addr_t, vmem_size_t),
494 vmem_t *source, vmem_size_t qcache_max, vm_flag_t flags)
495 {
496 vmem_t *vm;
497 int i;
498 #if defined(_KERNEL)
499 static ONCE_DECL(control);
500 #endif /* defined(_KERNEL) */
501
502 KASSERT((flags & (VM_SLEEP|VM_NOSLEEP)) != 0);
503 KASSERT((~flags & (VM_SLEEP|VM_NOSLEEP)) != 0);
504
505 #if defined(_KERNEL)
506 if (RUN_ONCE(&control, vmem_init)) {
507 return NULL;
508 }
509 #endif /* defined(_KERNEL) */
510 vm = xmalloc(sizeof(*vm), flags);
511 if (vm == NULL) {
512 return NULL;
513 }
514
515 VMEM_LOCK_INIT(vm);
516 vm->vm_name = name;
517 vm->vm_quantum_mask = quantum - 1;
518 vm->vm_quantum_shift = calc_order(quantum);
519 KASSERT((1 << vm->vm_quantum_shift) == quantum);
520 vm->vm_allocfn = allocfn;
521 vm->vm_freefn = freefn;
522 vm->vm_source = source;
523 vm->vm_qcache_max = qcache_max;
524 vm->vm_nbusytag = 0;
525
526 CIRCLEQ_INIT(&vm->vm_seglist);
527 for (i = 0; i < VMEM_MAXORDER; i++) {
528 LIST_INIT(&vm->vm_freelist[i]);
529 }
530 vm->vm_hashlist = NULL;
531 if (vmem_rehash(vm, VMEM_HASHSIZE_INIT, flags)) {
532 vmem_destroy(vm);
533 return NULL;
534 }
535
536 if (size != 0) {
537 if (vmem_add(vm, base, size, flags) == 0) {
538 vmem_destroy(vm);
539 return NULL;
540 }
541 }
542
543 return vm;
544 }
545
546 void
547 vmem_destroy(vmem_t *vm)
548 {
549
550 VMEM_ASSERT_UNLOCKED(vm);
551
552 if (vm->vm_hashlist != NULL) {
553 int i;
554
555 for (i = 0; i < vm->vm_hashsize; i++) {
556 bt_t *bt;
557
558 while ((bt = LIST_FIRST(&vm->vm_hashlist[i])) != NULL) {
559 KASSERT(bt->bt_type == BT_TYPE_SPAN_STATIC);
560 bt_free(vm, bt);
561 }
562 }
563 xfree(vm->vm_hashlist);
564 }
565 xfree(vm);
566 }
567
568 vmem_size_t
569 vmem_roundup_size(vmem_t *vm, vmem_size_t size)
570 {
571
572 return (size + vm->vm_quantum_mask) & ~vm->vm_quantum_mask;
573 }
574
575 /*
576 * vmem_alloc:
577 *
578 * => caller must ensure appropriate spl,
579 * if the arena can be accessed from interrupt context.
580 */
581
582 vmem_addr_t
583 vmem_alloc(vmem_t *vm, vmem_size_t size0, vm_flag_t flags)
584 {
585 struct vmem_freelist *list;
586 struct vmem_freelist *first;
587 struct vmem_freelist *end;
588 bt_t *bt;
589 bt_t *btnew;
590 const vmem_size_t size = vmem_roundup_size(vm, size0);
591 vm_flag_t strat = flags & VM_FITMASK;
592
593 KASSERT((flags & (VM_SLEEP|VM_NOSLEEP)) != 0);
594 KASSERT((~flags & (VM_SLEEP|VM_NOSLEEP)) != 0);
595 VMEM_ASSERT_UNLOCKED(vm);
596
597 KASSERT(size0 > 0);
598 KASSERT(size > 0);
599 KASSERT(strat == VM_BESTFIT || strat == VM_INSTANTFIT);
600 if ((flags & VM_SLEEP) != 0) {
601 ASSERT_SLEEPABLE(NULL, "vmem_alloc");
602 }
603
604 btnew = bt_alloc(vm, flags);
605 if (btnew == NULL) {
606 return VMEM_ADDR_NULL;
607 }
608
609 retry_strat:
610 first = bt_freehead_toalloc(vm, size, strat);
611 end = &vm->vm_freelist[VMEM_MAXORDER];
612 retry:
613 bt = NULL;
614 VMEM_LOCK(vm);
615 if (strat == VM_INSTANTFIT) {
616 for (list = first; list < end; list++) {
617 bt = LIST_FIRST(list);
618 if (bt != NULL) {
619 goto gotit;
620 }
621 }
622 } else { /* VM_BESTFIT */
623 for (list = first; list < end; list++) {
624 LIST_FOREACH(bt, list, bt_freelist) {
625 if (bt->bt_size >= size) {
626 goto gotit;
627 }
628 }
629 }
630 }
631 VMEM_UNLOCK(vm);
632 #if 1
633 if (strat == VM_INSTANTFIT) {
634 strat = VM_BESTFIT;
635 goto retry_strat;
636 }
637 #endif
638 if (vmem_import(vm, size, flags) == 0) {
639 goto retry;
640 }
641 /* XXX */
642 return VMEM_ADDR_NULL;
643
644 gotit:
645 KASSERT(bt->bt_type == BT_TYPE_FREE);
646 KASSERT(bt->bt_size >= size);
647 bt_remfree(vm, bt);
648 if (bt->bt_size != size && bt->bt_size - size > vm->vm_quantum_mask) {
649 /* split */
650 btnew->bt_type = BT_TYPE_BUSY;
651 btnew->bt_start = bt->bt_start;
652 btnew->bt_size = size;
653 bt->bt_start = bt->bt_start + size;
654 bt->bt_size -= size;
655 bt_insfree(vm, bt);
656 bt_insseg(vm, btnew, CIRCLEQ_PREV(bt, bt_seglist));
657 bt_insbusy(vm, btnew);
658 VMEM_UNLOCK(vm);
659 } else {
660 bt->bt_type = BT_TYPE_BUSY;
661 bt_insbusy(vm, bt);
662 VMEM_UNLOCK(vm);
663 bt_free(vm, btnew);
664 btnew = bt;
665 }
666 KASSERT(btnew->bt_size >= size);
667 btnew->bt_type = BT_TYPE_BUSY;
668
669 return btnew->bt_start;
670 }
671
672 /*
673 * vmem_free:
674 *
675 * => caller must ensure appropriate spl,
676 * if the arena can be accessed from interrupt context.
677 */
678
679 void
680 vmem_free(vmem_t *vm, vmem_addr_t addr, vmem_size_t size)
681 {
682 bt_t *bt;
683 bt_t *t;
684
685 VMEM_ASSERT_UNLOCKED(vm);
686
687 KASSERT(addr != VMEM_ADDR_NULL);
688 KASSERT(size > 0);
689
690 VMEM_LOCK(vm);
691
692 bt = bt_lookupbusy(vm, addr);
693 KASSERT(bt != NULL);
694 KASSERT(bt->bt_start == addr);
695 KASSERT(bt->bt_size == vmem_roundup_size(vm, size) ||
696 bt->bt_size - vmem_roundup_size(vm, size) <= vm->vm_quantum_mask);
697 KASSERT(bt->bt_type == BT_TYPE_BUSY);
698 bt_rembusy(vm, bt);
699 bt->bt_type = BT_TYPE_FREE;
700
701 /* coalesce */
702 t = CIRCLEQ_NEXT(bt, bt_seglist);
703 if (t != NULL && t->bt_type == BT_TYPE_FREE) {
704 KASSERT(BT_END(bt) == t->bt_start);
705 bt_remfree(vm, t);
706 bt_remseg(vm, t);
707 bt->bt_size += t->bt_size;
708 bt_free(vm, t);
709 }
710 t = CIRCLEQ_PREV(bt, bt_seglist);
711 if (t != NULL && t->bt_type == BT_TYPE_FREE) {
712 KASSERT(BT_END(t) == bt->bt_start);
713 bt_remfree(vm, t);
714 bt_remseg(vm, t);
715 bt->bt_size += t->bt_size;
716 bt->bt_start = t->bt_start;
717 bt_free(vm, t);
718 }
719
720 t = CIRCLEQ_PREV(bt, bt_seglist);
721 KASSERT(t != NULL);
722 KASSERT(BT_ISSPAN_P(t) || t->bt_type == BT_TYPE_BUSY);
723 if (vm->vm_freefn != NULL && t->bt_type == BT_TYPE_SPAN &&
724 t->bt_size == bt->bt_size) {
725 vmem_addr_t spanaddr;
726 vmem_size_t spansize;
727
728 KASSERT(t->bt_start == bt->bt_start);
729 spanaddr = bt->bt_start;
730 spansize = bt->bt_size;
731 bt_remseg(vm, bt);
732 bt_free(vm, bt);
733 bt_remseg(vm, t);
734 bt_free(vm, t);
735 VMEM_UNLOCK(vm);
736 (*vm->vm_freefn)(vm->vm_source, spanaddr, spansize);
737 } else {
738 bt_insfree(vm, bt);
739 VMEM_UNLOCK(vm);
740 }
741 }
742
743 /*
744 * vmem_add:
745 *
746 * => caller must ensure appropriate spl,
747 * if the arena can be accessed from interrupt context.
748 */
749
750 vmem_addr_t
751 vmem_add(vmem_t *vm, vmem_addr_t addr, vmem_size_t size, vm_flag_t flags)
752 {
753
754 return vmem_add1(vm, addr, size, flags, BT_TYPE_SPAN_STATIC);
755 }
756
757 /* ---- debug */
758
759 #if defined(VMEM_DEBUG)
760
761 #if !defined(_KERNEL)
762 #include <stdio.h>
763 #endif /* !defined(_KERNEL) */
764
765 void bt_dump(const bt_t *);
766
767 void
768 bt_dump(const bt_t *bt)
769 {
770
771 printf("\t%p: %" PRIu64 ", %" PRIu64 ", %d\n",
772 bt, (uint64_t)bt->bt_start, (uint64_t)bt->bt_size,
773 bt->bt_type);
774 }
775
776 void
777 vmem_dump(const vmem_t *vm)
778 {
779 const bt_t *bt;
780 int i;
781
782 printf("vmem %p '%s'\n", vm, vm->vm_name);
783 CIRCLEQ_FOREACH(bt, &vm->vm_seglist, bt_seglist) {
784 bt_dump(bt);
785 }
786
787 for (i = 0; i < VMEM_MAXORDER; i++) {
788 const struct vmem_freelist *fl = &vm->vm_freelist[i];
789
790 if (LIST_EMPTY(fl)) {
791 continue;
792 }
793
794 printf("freelist[%d]\n", i);
795 LIST_FOREACH(bt, fl, bt_freelist) {
796 bt_dump(bt);
797 if (bt->bt_size) {
798 }
799 }
800 }
801 }
802
803 #if !defined(_KERNEL)
804
805 #include <stdlib.h>
806
807 int
808 main()
809 {
810 vmem_t *vm;
811 vmem_addr_t p;
812 struct reg {
813 vmem_addr_t p;
814 vmem_size_t sz;
815 } *reg = NULL;
816 int nreg = 0;
817 int nalloc = 0;
818 int nfree = 0;
819 vmem_size_t total = 0;
820 #if 1
821 vm_flag_t strat = VM_INSTANTFIT;
822 #else
823 vm_flag_t strat = VM_BESTFIT;
824 #endif
825
826 vm = vmem_create("test", VMEM_ADDR_NULL, 0, 1,
827 NULL, NULL, NULL, 0, VM_NOSLEEP);
828 if (vm == NULL) {
829 printf("vmem_create\n");
830 exit(EXIT_FAILURE);
831 }
832 vmem_dump(vm);
833
834 p = vmem_add(vm, 100, 200, VM_SLEEP);
835 p = vmem_add(vm, 2000, 1, VM_SLEEP);
836 p = vmem_add(vm, 40000, 0x10000000>>12, VM_SLEEP);
837 p = vmem_add(vm, 10000, 10000, VM_SLEEP);
838 p = vmem_add(vm, 500, 1000, VM_SLEEP);
839 vmem_dump(vm);
840 for (;;) {
841 struct reg *r;
842
843 if (rand() % 100 > 40) {
844 vmem_size_t sz = rand() % 500 + 1;
845
846 printf("=== alloc %" PRIu64 "\n", (uint64_t)sz);
847 p = vmem_alloc(vm, sz, strat|VM_SLEEP);
848 printf("-> %" PRIu64 "\n", (uint64_t)p);
849 vmem_dump(vm);
850 if (p == VMEM_ADDR_NULL) {
851 break;
852 }
853 nreg++;
854 reg = realloc(reg, sizeof(*reg) * nreg);
855 r = ®[nreg - 1];
856 r->p = p;
857 r->sz = sz;
858 total += sz;
859 nalloc++;
860 } else if (nreg != 0) {
861 r = ®[rand() % nreg];
862 printf("=== free %" PRIu64 ", %" PRIu64 "\n",
863 (uint64_t)r->p, (uint64_t)r->sz);
864 vmem_free(vm, r->p, r->sz);
865 total -= r->sz;
866 vmem_dump(vm);
867 *r = reg[nreg - 1];
868 nreg--;
869 nfree++;
870 }
871 printf("total=%" PRIu64 "\n", (uint64_t)total);
872 }
873 fprintf(stderr, "total=%" PRIu64 ", nalloc=%d, nfree=%d\n",
874 (uint64_t)total, nalloc, nfree);
875 exit(EXIT_SUCCESS);
876 }
877 #endif /* !defined(_KERNEL) */
878 #endif /* defined(VMEM_DEBUG) */
879