subr_blist.c revision 1.14 1 1.14 andvar /* $NetBSD: subr_blist.c,v 1.14 2022/05/29 10:47:40 andvar Exp $ */
2 1.2 yamt
3 1.1 yamt /*-
4 1.1 yamt * Copyright (c) 1998 Matthew Dillon. All Rights Reserved.
5 1.1 yamt * Redistribution and use in source and binary forms, with or without
6 1.1 yamt * modification, are permitted provided that the following conditions
7 1.1 yamt * are met:
8 1.1 yamt * 1. Redistributions of source code must retain the above copyright
9 1.1 yamt * notice, this list of conditions and the following disclaimer.
10 1.1 yamt * 2. Redistributions in binary form must reproduce the above copyright
11 1.1 yamt * notice, this list of conditions and the following disclaimer in the
12 1.1 yamt * documentation and/or other materials provided with the distribution.
13 1.1 yamt * 4. Neither the name of the University nor the names of its contributors
14 1.1 yamt * may be used to endorse or promote products derived from this software
15 1.1 yamt * without specific prior written permission.
16 1.1 yamt *
17 1.1 yamt * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS
18 1.1 yamt * OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
19 1.1 yamt * WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
20 1.1 yamt * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY
21 1.1 yamt * DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
22 1.1 yamt * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE
23 1.1 yamt * GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
24 1.1 yamt * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY,
25 1.1 yamt * WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING
26 1.1 yamt * NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
27 1.1 yamt * SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
28 1.1 yamt */
29 1.1 yamt /*
30 1.1 yamt * BLIST.C - Bitmap allocator/deallocator, using a radix tree with hinting
31 1.1 yamt *
32 1.1 yamt * This module implements a general bitmap allocator/deallocator. The
33 1.1 yamt * allocator eats around 2 bits per 'block'. The module does not
34 1.12 wiz * try to interpret the meaning of a 'block' other than to return
35 1.3 yamt * BLIST_NONE on an allocation failure.
36 1.1 yamt *
37 1.1 yamt * A radix tree is used to maintain the bitmap. Two radix constants are
38 1.1 yamt * involved: One for the bitmaps contained in the leaf nodes (typically
39 1.1 yamt * 32), and one for the meta nodes (typically 16). Both meta and leaf
40 1.1 yamt * nodes have a hint field. This field gives us a hint as to the largest
41 1.1 yamt * free contiguous range of blocks under the node. It may contain a
42 1.1 yamt * value that is too high, but will never contain a value that is too
43 1.1 yamt * low. When the radix tree is searched, allocation failures in subtrees
44 1.1 yamt * update the hint.
45 1.1 yamt *
46 1.1 yamt * The radix tree also implements two collapsed states for meta nodes:
47 1.1 yamt * the ALL-ALLOCATED state and the ALL-FREE state. If a meta node is
48 1.1 yamt * in either of these two states, all information contained underneath
49 1.1 yamt * the node is considered stale. These states are used to optimize
50 1.1 yamt * allocation and freeing operations.
51 1.1 yamt *
52 1.1 yamt * The hinting greatly increases code efficiency for allocations while
53 1.1 yamt * the general radix structure optimizes both allocations and frees. The
54 1.1 yamt * radix tree should be able to operate well no matter how much
55 1.1 yamt * fragmentation there is and no matter how large a bitmap is used.
56 1.1 yamt *
57 1.1 yamt * Unlike the rlist code, the blist code wires all necessary memory at
58 1.1 yamt * creation time. Neither allocations nor frees require interaction with
59 1.1 yamt * the memory subsystem. In contrast, the rlist code may allocate memory
60 1.1 yamt * on an rlist_free() call. The non-blocking features of the blist code
61 1.1 yamt * are used to great advantage in the swap code (vm/nswap_pager.c). The
62 1.12 wiz * rlist code uses a little less overall memory than the blist code (but
63 1.1 yamt * due to swap interleaving not all that much less), but the blist code
64 1.1 yamt * scales much, much better.
65 1.1 yamt *
66 1.14 andvar * LAYOUT: The radix tree is laid out recursively using a
67 1.14 andvar * linear array. Each meta node is immediately followed (laid out
68 1.1 yamt * sequentially in memory) by BLIST_META_RADIX lower level nodes. This
69 1.1 yamt * is a recursive structure but one that can be easily scanned through
70 1.1 yamt * a very simple 'skip' calculation. In order to support large radixes,
71 1.1 yamt * portions of the tree may reside outside our memory allocation. We
72 1.1 yamt * handle this with an early-termination optimization (when bighint is
73 1.1 yamt * set to -1) on the scan. The memory allocation is only large enough
74 1.1 yamt * to cover the number of blocks requested at creation time even if it
75 1.1 yamt * must be encompassed in larger root-node radix.
76 1.1 yamt *
77 1.12 wiz * NOTE: the allocator cannot currently allocate more than
78 1.1 yamt * BLIST_BMAP_RADIX blocks per call. It will panic with 'allocation too
79 1.1 yamt * large' if you try. This is an area that could use improvement. The
80 1.1 yamt * radix is large enough that this restriction does not effect the swap
81 1.1 yamt * system, though. Currently only the allocation code is effected by
82 1.1 yamt * this algorithmic unfeature. The freeing code can handle arbitrary
83 1.1 yamt * ranges.
84 1.1 yamt *
85 1.1 yamt * This code can be compiled stand-alone for debugging.
86 1.1 yamt */
87 1.1 yamt
88 1.1 yamt #include <sys/cdefs.h>
89 1.14 andvar __KERNEL_RCSID(0, "$NetBSD: subr_blist.c,v 1.14 2022/05/29 10:47:40 andvar Exp $");
90 1.2 yamt #if 0
91 1.1 yamt __FBSDID("$FreeBSD: src/sys/kern/subr_blist.c,v 1.17 2004/06/04 04:03:25 alc Exp $");
92 1.2 yamt #endif
93 1.1 yamt
94 1.1 yamt #ifdef _KERNEL
95 1.1 yamt
96 1.1 yamt #include <sys/param.h>
97 1.1 yamt #include <sys/systm.h>
98 1.1 yamt #include <sys/blist.h>
99 1.10 rmind #include <sys/kmem.h>
100 1.1 yamt
101 1.1 yamt #else
102 1.1 yamt
103 1.1 yamt #ifndef BLIST_NO_DEBUG
104 1.1 yamt #define BLIST_DEBUG
105 1.1 yamt #endif
106 1.1 yamt
107 1.1 yamt #include <sys/types.h>
108 1.1 yamt #include <stdio.h>
109 1.1 yamt #include <string.h>
110 1.1 yamt #include <stdlib.h>
111 1.1 yamt #include <stdarg.h>
112 1.3 yamt #include <inttypes.h>
113 1.1 yamt
114 1.10 rmind #define KM_SLEEP 1
115 1.13 zafer #define kmem_zalloc(a,b) calloc(1, (a))
116 1.13 zafer #define kmem_alloc(a,b) malloc(a)
117 1.10 rmind #define kmem_free(a,b) free(a)
118 1.1 yamt
119 1.3 yamt #include "../sys/blist.h"
120 1.1 yamt
121 1.11 christos void panic(const char *ctl, ...) __printflike(1, 2);
122 1.1 yamt
123 1.1 yamt #endif
124 1.1 yamt
125 1.1 yamt /*
126 1.4 yamt * blmeta and bl_bitmap_t MUST be a power of 2 in size.
127 1.4 yamt */
128 1.4 yamt
129 1.4 yamt typedef struct blmeta {
130 1.4 yamt union {
131 1.5 yamt blist_blkno_t bmu_avail; /* space available under us */
132 1.5 yamt blist_bitmap_t bmu_bitmap; /* bitmap if we are a leaf */
133 1.4 yamt } u;
134 1.5 yamt blist_blkno_t bm_bighint; /* biggest contiguous block hint*/
135 1.4 yamt } blmeta_t;
136 1.4 yamt
137 1.4 yamt struct blist {
138 1.5 yamt blist_blkno_t bl_blocks; /* area of coverage */
139 1.5 yamt blist_blkno_t bl_radix; /* coverage radix */
140 1.5 yamt blist_blkno_t bl_skip; /* starting skip */
141 1.5 yamt blist_blkno_t bl_free; /* number of free blocks */
142 1.4 yamt blmeta_t *bl_root; /* root of radix tree */
143 1.5 yamt blist_blkno_t bl_rootblks; /* blks allocated for tree */
144 1.4 yamt };
145 1.4 yamt
146 1.4 yamt #define BLIST_META_RADIX 16
147 1.4 yamt
148 1.4 yamt /*
149 1.1 yamt * static support functions
150 1.1 yamt */
151 1.1 yamt
152 1.5 yamt static blist_blkno_t blst_leaf_alloc(blmeta_t *scan, blist_blkno_t blk,
153 1.5 yamt int count);
154 1.5 yamt static blist_blkno_t blst_meta_alloc(blmeta_t *scan, blist_blkno_t blk,
155 1.5 yamt blist_blkno_t count, blist_blkno_t radix, blist_blkno_t skip);
156 1.5 yamt static void blst_leaf_free(blmeta_t *scan, blist_blkno_t relblk, int count);
157 1.5 yamt static void blst_meta_free(blmeta_t *scan, blist_blkno_t freeBlk,
158 1.5 yamt blist_blkno_t count, blist_blkno_t radix, blist_blkno_t skip,
159 1.5 yamt blist_blkno_t blk);
160 1.5 yamt static void blst_copy(blmeta_t *scan, blist_blkno_t blk, blist_blkno_t radix,
161 1.5 yamt blist_blkno_t skip, blist_t dest, blist_blkno_t count);
162 1.5 yamt static int blst_leaf_fill(blmeta_t *scan, blist_blkno_t blk, int count);
163 1.5 yamt static blist_blkno_t blst_meta_fill(blmeta_t *scan, blist_blkno_t allocBlk,
164 1.5 yamt blist_blkno_t count, blist_blkno_t radix, blist_blkno_t skip,
165 1.5 yamt blist_blkno_t blk);
166 1.5 yamt static blist_blkno_t blst_radix_init(blmeta_t *scan, blist_blkno_t radix,
167 1.5 yamt blist_blkno_t skip, blist_blkno_t count);
168 1.1 yamt #ifndef _KERNEL
169 1.5 yamt static void blst_radix_print(blmeta_t *scan, blist_blkno_t blk,
170 1.5 yamt blist_blkno_t radix, blist_blkno_t skip, int tab);
171 1.1 yamt #endif
172 1.1 yamt
173 1.1 yamt /*
174 1.1 yamt * blist_create() - create a blist capable of handling up to the specified
175 1.1 yamt * number of blocks
176 1.1 yamt *
177 1.12 wiz * blocks must be greater than 0
178 1.1 yamt *
179 1.1 yamt * The smallest blist consists of a single leaf node capable of
180 1.1 yamt * managing BLIST_BMAP_RADIX blocks.
181 1.1 yamt */
182 1.1 yamt
183 1.1 yamt blist_t
184 1.5 yamt blist_create(blist_blkno_t blocks)
185 1.1 yamt {
186 1.1 yamt blist_t bl;
187 1.5 yamt blist_blkno_t radix;
188 1.5 yamt blist_blkno_t skip = 0;
189 1.1 yamt
190 1.1 yamt /*
191 1.1 yamt * Calculate radix and skip field used for scanning.
192 1.5 yamt *
193 1.5 yamt * XXX check overflow
194 1.1 yamt */
195 1.1 yamt radix = BLIST_BMAP_RADIX;
196 1.1 yamt
197 1.1 yamt while (radix < blocks) {
198 1.1 yamt radix *= BLIST_META_RADIX;
199 1.1 yamt skip = (skip + 1) * BLIST_META_RADIX;
200 1.1 yamt }
201 1.1 yamt
202 1.10 rmind bl = kmem_zalloc(sizeof(struct blist), KM_SLEEP);
203 1.1 yamt
204 1.1 yamt bl->bl_blocks = blocks;
205 1.1 yamt bl->bl_radix = radix;
206 1.1 yamt bl->bl_skip = skip;
207 1.1 yamt bl->bl_rootblks = 1 +
208 1.1 yamt blst_radix_init(NULL, bl->bl_radix, bl->bl_skip, blocks);
209 1.10 rmind bl->bl_root = kmem_alloc(sizeof(blmeta_t) * bl->bl_rootblks, KM_SLEEP);
210 1.1 yamt
211 1.1 yamt #if defined(BLIST_DEBUG)
212 1.1 yamt printf(
213 1.3 yamt "BLIST representing %" PRIu64 " blocks (%" PRIu64 " MB of swap)"
214 1.3 yamt ", requiring %" PRIu64 "K of ram\n",
215 1.5 yamt (uint64_t)bl->bl_blocks,
216 1.5 yamt (uint64_t)bl->bl_blocks * 4 / 1024,
217 1.5 yamt ((uint64_t)bl->bl_rootblks * sizeof(blmeta_t) + 1023) / 1024
218 1.1 yamt );
219 1.3 yamt printf("BLIST raw radix tree contains %" PRIu64 " records\n",
220 1.5 yamt (uint64_t)bl->bl_rootblks);
221 1.1 yamt #endif
222 1.1 yamt blst_radix_init(bl->bl_root, bl->bl_radix, bl->bl_skip, blocks);
223 1.1 yamt
224 1.1 yamt return(bl);
225 1.1 yamt }
226 1.1 yamt
227 1.1 yamt void
228 1.1 yamt blist_destroy(blist_t bl)
229 1.1 yamt {
230 1.10 rmind
231 1.10 rmind kmem_free(bl->bl_root, sizeof(blmeta_t) * bl->bl_rootblks);
232 1.10 rmind kmem_free(bl, sizeof(struct blist));
233 1.1 yamt }
234 1.1 yamt
235 1.1 yamt /*
236 1.1 yamt * blist_alloc() - reserve space in the block bitmap. Return the base
237 1.3 yamt * of a contiguous region or BLIST_NONE if space could
238 1.1 yamt * not be allocated.
239 1.1 yamt */
240 1.1 yamt
241 1.5 yamt blist_blkno_t
242 1.5 yamt blist_alloc(blist_t bl, blist_blkno_t count)
243 1.1 yamt {
244 1.5 yamt blist_blkno_t blk = BLIST_NONE;
245 1.1 yamt
246 1.1 yamt if (bl) {
247 1.1 yamt if (bl->bl_radix == BLIST_BMAP_RADIX)
248 1.1 yamt blk = blst_leaf_alloc(bl->bl_root, 0, count);
249 1.1 yamt else
250 1.1 yamt blk = blst_meta_alloc(bl->bl_root, 0, count, bl->bl_radix, bl->bl_skip);
251 1.3 yamt if (blk != BLIST_NONE)
252 1.1 yamt bl->bl_free -= count;
253 1.1 yamt }
254 1.1 yamt return(blk);
255 1.1 yamt }
256 1.1 yamt
257 1.1 yamt /*
258 1.1 yamt * blist_free() - free up space in the block bitmap. Return the base
259 1.1 yamt * of a contiguous region. Panic if an inconsistancy is
260 1.1 yamt * found.
261 1.1 yamt */
262 1.1 yamt
263 1.1 yamt void
264 1.5 yamt blist_free(blist_t bl, blist_blkno_t blkno, blist_blkno_t count)
265 1.1 yamt {
266 1.1 yamt if (bl) {
267 1.1 yamt if (bl->bl_radix == BLIST_BMAP_RADIX)
268 1.1 yamt blst_leaf_free(bl->bl_root, blkno, count);
269 1.1 yamt else
270 1.1 yamt blst_meta_free(bl->bl_root, blkno, count, bl->bl_radix, bl->bl_skip, 0);
271 1.1 yamt bl->bl_free += count;
272 1.1 yamt }
273 1.1 yamt }
274 1.1 yamt
275 1.1 yamt /*
276 1.1 yamt * blist_fill() - mark a region in the block bitmap as off-limits
277 1.1 yamt * to the allocator (i.e. allocate it), ignoring any
278 1.1 yamt * existing allocations. Return the number of blocks
279 1.1 yamt * actually filled that were free before the call.
280 1.1 yamt */
281 1.1 yamt
282 1.5 yamt blist_blkno_t
283 1.5 yamt blist_fill(blist_t bl, blist_blkno_t blkno, blist_blkno_t count)
284 1.1 yamt {
285 1.5 yamt blist_blkno_t filled;
286 1.1 yamt
287 1.1 yamt if (bl) {
288 1.1 yamt if (bl->bl_radix == BLIST_BMAP_RADIX)
289 1.1 yamt filled = blst_leaf_fill(bl->bl_root, blkno, count);
290 1.1 yamt else
291 1.1 yamt filled = blst_meta_fill(bl->bl_root, blkno, count,
292 1.1 yamt bl->bl_radix, bl->bl_skip, 0);
293 1.1 yamt bl->bl_free -= filled;
294 1.1 yamt return filled;
295 1.1 yamt } else
296 1.1 yamt return 0;
297 1.1 yamt }
298 1.1 yamt
299 1.1 yamt /*
300 1.1 yamt * blist_resize() - resize an existing radix tree to handle the
301 1.1 yamt * specified number of blocks. This will reallocate
302 1.1 yamt * the tree and transfer the previous bitmap to the new
303 1.1 yamt * one. When extending the tree you can specify whether
304 1.1 yamt * the new blocks are to left allocated or freed.
305 1.1 yamt */
306 1.1 yamt
307 1.1 yamt void
308 1.5 yamt blist_resize(blist_t *pbl, blist_blkno_t count, int freenew)
309 1.1 yamt {
310 1.1 yamt blist_t newbl = blist_create(count);
311 1.1 yamt blist_t save = *pbl;
312 1.1 yamt
313 1.1 yamt *pbl = newbl;
314 1.1 yamt if (count > save->bl_blocks)
315 1.1 yamt count = save->bl_blocks;
316 1.1 yamt blst_copy(save->bl_root, 0, save->bl_radix, save->bl_skip, newbl, count);
317 1.1 yamt
318 1.1 yamt /*
319 1.1 yamt * If resizing upwards, should we free the new space or not?
320 1.1 yamt */
321 1.1 yamt if (freenew && count < newbl->bl_blocks) {
322 1.1 yamt blist_free(newbl, count, newbl->bl_blocks - count);
323 1.1 yamt }
324 1.1 yamt blist_destroy(save);
325 1.1 yamt }
326 1.1 yamt
327 1.1 yamt #ifdef BLIST_DEBUG
328 1.1 yamt
329 1.1 yamt /*
330 1.1 yamt * blist_print() - dump radix tree
331 1.1 yamt */
332 1.1 yamt
333 1.1 yamt void
334 1.1 yamt blist_print(blist_t bl)
335 1.1 yamt {
336 1.1 yamt printf("BLIST {\n");
337 1.1 yamt blst_radix_print(bl->bl_root, 0, bl->bl_radix, bl->bl_skip, 4);
338 1.1 yamt printf("}\n");
339 1.1 yamt }
340 1.1 yamt
341 1.1 yamt #endif
342 1.1 yamt
343 1.1 yamt /************************************************************************
344 1.1 yamt * ALLOCATION SUPPORT FUNCTIONS *
345 1.1 yamt ************************************************************************
346 1.1 yamt *
347 1.1 yamt * These support functions do all the actual work. They may seem
348 1.1 yamt * rather longish, but that's because I've commented them up. The
349 1.1 yamt * actual code is straight forward.
350 1.1 yamt *
351 1.1 yamt */
352 1.1 yamt
353 1.1 yamt /*
354 1.1 yamt * blist_leaf_alloc() - allocate at a leaf in the radix tree (a bitmap).
355 1.1 yamt *
356 1.1 yamt * This is the core of the allocator and is optimized for the 1 block
357 1.1 yamt * and the BLIST_BMAP_RADIX block allocation cases. Other cases are
358 1.1 yamt * somewhat slower. The 1 block allocation case is log2 and extremely
359 1.1 yamt * quick.
360 1.1 yamt */
361 1.1 yamt
362 1.5 yamt static blist_blkno_t
363 1.1 yamt blst_leaf_alloc(
364 1.1 yamt blmeta_t *scan,
365 1.5 yamt blist_blkno_t blk,
366 1.1 yamt int count
367 1.1 yamt ) {
368 1.5 yamt blist_bitmap_t orig = scan->u.bmu_bitmap;
369 1.1 yamt
370 1.1 yamt if (orig == 0) {
371 1.1 yamt /*
372 1.1 yamt * Optimize bitmap all-allocated case. Also, count = 1
373 1.1 yamt * case assumes at least 1 bit is free in the bitmap, so
374 1.1 yamt * we have to take care of this case here.
375 1.1 yamt */
376 1.1 yamt scan->bm_bighint = 0;
377 1.3 yamt return(BLIST_NONE);
378 1.1 yamt }
379 1.1 yamt if (count == 1) {
380 1.1 yamt /*
381 1.1 yamt * Optimized code to allocate one bit out of the bitmap
382 1.1 yamt */
383 1.5 yamt blist_bitmap_t mask;
384 1.1 yamt int j = BLIST_BMAP_RADIX/2;
385 1.1 yamt int r = 0;
386 1.1 yamt
387 1.5 yamt mask = (blist_bitmap_t)-1 >> (BLIST_BMAP_RADIX/2);
388 1.1 yamt
389 1.1 yamt while (j) {
390 1.1 yamt if ((orig & mask) == 0) {
391 1.1 yamt r += j;
392 1.1 yamt orig >>= j;
393 1.1 yamt }
394 1.1 yamt j >>= 1;
395 1.1 yamt mask >>= j;
396 1.1 yamt }
397 1.5 yamt scan->u.bmu_bitmap &= ~((blist_bitmap_t)1 << r);
398 1.1 yamt return(blk + r);
399 1.1 yamt }
400 1.1 yamt if (count <= BLIST_BMAP_RADIX) {
401 1.1 yamt /*
402 1.1 yamt * non-optimized code to allocate N bits out of the bitmap.
403 1.1 yamt * The more bits, the faster the code runs. It will run
404 1.1 yamt * the slowest allocating 2 bits, but since there aren't any
405 1.1 yamt * memory ops in the core loop (or shouldn't be, anyway),
406 1.1 yamt * you probably won't notice the difference.
407 1.1 yamt */
408 1.1 yamt int j;
409 1.1 yamt int n = BLIST_BMAP_RADIX - count;
410 1.5 yamt blist_bitmap_t mask;
411 1.1 yamt
412 1.5 yamt mask = (blist_bitmap_t)-1 >> n;
413 1.1 yamt
414 1.1 yamt for (j = 0; j <= n; ++j) {
415 1.1 yamt if ((orig & mask) == mask) {
416 1.1 yamt scan->u.bmu_bitmap &= ~mask;
417 1.1 yamt return(blk + j);
418 1.1 yamt }
419 1.1 yamt mask = (mask << 1);
420 1.1 yamt }
421 1.1 yamt }
422 1.1 yamt /*
423 1.1 yamt * We couldn't allocate count in this subtree, update bighint.
424 1.1 yamt */
425 1.1 yamt scan->bm_bighint = count - 1;
426 1.3 yamt return(BLIST_NONE);
427 1.1 yamt }
428 1.1 yamt
429 1.1 yamt /*
430 1.1 yamt * blist_meta_alloc() - allocate at a meta in the radix tree.
431 1.1 yamt *
432 1.1 yamt * Attempt to allocate at a meta node. If we can't, we update
433 1.1 yamt * bighint and return a failure. Updating bighint optimize future
434 1.1 yamt * calls that hit this node. We have to check for our collapse cases
435 1.1 yamt * and we have a few optimizations strewn in as well.
436 1.1 yamt */
437 1.1 yamt
438 1.5 yamt static blist_blkno_t
439 1.1 yamt blst_meta_alloc(
440 1.1 yamt blmeta_t *scan,
441 1.5 yamt blist_blkno_t blk,
442 1.5 yamt blist_blkno_t count,
443 1.5 yamt blist_blkno_t radix,
444 1.5 yamt blist_blkno_t skip
445 1.1 yamt ) {
446 1.5 yamt blist_blkno_t i;
447 1.5 yamt blist_blkno_t next_skip = (skip / BLIST_META_RADIX);
448 1.1 yamt
449 1.1 yamt if (scan->u.bmu_avail == 0) {
450 1.1 yamt /*
451 1.1 yamt * ALL-ALLOCATED special case
452 1.1 yamt */
453 1.1 yamt scan->bm_bighint = count;
454 1.3 yamt return(BLIST_NONE);
455 1.1 yamt }
456 1.1 yamt
457 1.1 yamt if (scan->u.bmu_avail == radix) {
458 1.1 yamt radix /= BLIST_META_RADIX;
459 1.1 yamt
460 1.1 yamt /*
461 1.1 yamt * ALL-FREE special case, initialize uninitialize
462 1.1 yamt * sublevel.
463 1.1 yamt */
464 1.1 yamt for (i = 1; i <= skip; i += next_skip) {
465 1.5 yamt if (scan[i].bm_bighint == (blist_blkno_t)-1)
466 1.1 yamt break;
467 1.1 yamt if (next_skip == 1) {
468 1.5 yamt scan[i].u.bmu_bitmap = (blist_bitmap_t)-1;
469 1.1 yamt scan[i].bm_bighint = BLIST_BMAP_RADIX;
470 1.1 yamt } else {
471 1.1 yamt scan[i].bm_bighint = radix;
472 1.1 yamt scan[i].u.bmu_avail = radix;
473 1.1 yamt }
474 1.1 yamt }
475 1.1 yamt } else {
476 1.1 yamt radix /= BLIST_META_RADIX;
477 1.1 yamt }
478 1.1 yamt
479 1.1 yamt for (i = 1; i <= skip; i += next_skip) {
480 1.5 yamt if (scan[i].bm_bighint == (blist_blkno_t)-1) {
481 1.3 yamt /*
482 1.3 yamt * Terminator
483 1.3 yamt */
484 1.3 yamt break;
485 1.3 yamt } else if (count <= scan[i].bm_bighint) {
486 1.1 yamt /*
487 1.1 yamt * count fits in object
488 1.1 yamt */
489 1.5 yamt blist_blkno_t r;
490 1.1 yamt if (next_skip == 1) {
491 1.1 yamt r = blst_leaf_alloc(&scan[i], blk, count);
492 1.1 yamt } else {
493 1.1 yamt r = blst_meta_alloc(&scan[i], blk, count, radix, next_skip - 1);
494 1.1 yamt }
495 1.3 yamt if (r != BLIST_NONE) {
496 1.1 yamt scan->u.bmu_avail -= count;
497 1.1 yamt if (scan->bm_bighint > scan->u.bmu_avail)
498 1.1 yamt scan->bm_bighint = scan->u.bmu_avail;
499 1.1 yamt return(r);
500 1.1 yamt }
501 1.1 yamt } else if (count > radix) {
502 1.1 yamt /*
503 1.1 yamt * count does not fit in object even if it were
504 1.1 yamt * complete free.
505 1.1 yamt */
506 1.1 yamt panic("blist_meta_alloc: allocation too large");
507 1.1 yamt }
508 1.1 yamt blk += radix;
509 1.1 yamt }
510 1.1 yamt
511 1.1 yamt /*
512 1.1 yamt * We couldn't allocate count in this subtree, update bighint.
513 1.1 yamt */
514 1.1 yamt if (scan->bm_bighint >= count)
515 1.1 yamt scan->bm_bighint = count - 1;
516 1.3 yamt return(BLIST_NONE);
517 1.1 yamt }
518 1.1 yamt
519 1.1 yamt /*
520 1.1 yamt * BLST_LEAF_FREE() - free allocated block from leaf bitmap
521 1.1 yamt *
522 1.1 yamt */
523 1.1 yamt
524 1.1 yamt static void
525 1.1 yamt blst_leaf_free(
526 1.1 yamt blmeta_t *scan,
527 1.5 yamt blist_blkno_t blk,
528 1.1 yamt int count
529 1.1 yamt ) {
530 1.1 yamt /*
531 1.1 yamt * free some data in this bitmap
532 1.1 yamt *
533 1.1 yamt * e.g.
534 1.1 yamt * 0000111111111110000
535 1.1 yamt * \_________/\__/
536 1.1 yamt * v n
537 1.1 yamt */
538 1.1 yamt int n = blk & (BLIST_BMAP_RADIX - 1);
539 1.5 yamt blist_bitmap_t mask;
540 1.1 yamt
541 1.5 yamt mask = ((blist_bitmap_t)-1 << n) &
542 1.5 yamt ((blist_bitmap_t)-1 >> (BLIST_BMAP_RADIX - count - n));
543 1.1 yamt
544 1.1 yamt if (scan->u.bmu_bitmap & mask)
545 1.1 yamt panic("blst_radix_free: freeing free block");
546 1.1 yamt scan->u.bmu_bitmap |= mask;
547 1.1 yamt
548 1.1 yamt /*
549 1.1 yamt * We could probably do a better job here. We are required to make
550 1.1 yamt * bighint at least as large as the biggest contiguous block of
551 1.1 yamt * data. If we just shoehorn it, a little extra overhead will
552 1.1 yamt * be incured on the next allocation (but only that one typically).
553 1.1 yamt */
554 1.1 yamt scan->bm_bighint = BLIST_BMAP_RADIX;
555 1.1 yamt }
556 1.1 yamt
557 1.1 yamt /*
558 1.1 yamt * BLST_META_FREE() - free allocated blocks from radix tree meta info
559 1.1 yamt *
560 1.1 yamt * This support routine frees a range of blocks from the bitmap.
561 1.1 yamt * The range must be entirely enclosed by this radix node. If a
562 1.1 yamt * meta node, we break the range down recursively to free blocks
563 1.1 yamt * in subnodes (which means that this code can free an arbitrary
564 1.1 yamt * range whereas the allocation code cannot allocate an arbitrary
565 1.1 yamt * range).
566 1.1 yamt */
567 1.1 yamt
568 1.1 yamt static void
569 1.1 yamt blst_meta_free(
570 1.1 yamt blmeta_t *scan,
571 1.5 yamt blist_blkno_t freeBlk,
572 1.5 yamt blist_blkno_t count,
573 1.5 yamt blist_blkno_t radix,
574 1.5 yamt blist_blkno_t skip,
575 1.5 yamt blist_blkno_t blk
576 1.1 yamt ) {
577 1.5 yamt blist_blkno_t i;
578 1.5 yamt blist_blkno_t next_skip = (skip / BLIST_META_RADIX);
579 1.1 yamt
580 1.1 yamt #if 0
581 1.3 yamt printf("FREE (%" PRIx64 ",%" PRIu64
582 1.3 yamt ") FROM (%" PRIx64 ",%" PRIu64 ")\n",
583 1.5 yamt (uint64_t)freeBlk, (uint64_t)count,
584 1.5 yamt (uint64_t)blk, (uint64_t)radix
585 1.1 yamt );
586 1.1 yamt #endif
587 1.1 yamt
588 1.1 yamt if (scan->u.bmu_avail == 0) {
589 1.1 yamt /*
590 1.1 yamt * ALL-ALLOCATED special case, with possible
591 1.1 yamt * shortcut to ALL-FREE special case.
592 1.1 yamt */
593 1.1 yamt scan->u.bmu_avail = count;
594 1.1 yamt scan->bm_bighint = count;
595 1.1 yamt
596 1.1 yamt if (count != radix) {
597 1.1 yamt for (i = 1; i <= skip; i += next_skip) {
598 1.5 yamt if (scan[i].bm_bighint == (blist_blkno_t)-1)
599 1.1 yamt break;
600 1.1 yamt scan[i].bm_bighint = 0;
601 1.1 yamt if (next_skip == 1) {
602 1.1 yamt scan[i].u.bmu_bitmap = 0;
603 1.1 yamt } else {
604 1.1 yamt scan[i].u.bmu_avail = 0;
605 1.1 yamt }
606 1.1 yamt }
607 1.1 yamt /* fall through */
608 1.1 yamt }
609 1.1 yamt } else {
610 1.1 yamt scan->u.bmu_avail += count;
611 1.1 yamt /* scan->bm_bighint = radix; */
612 1.1 yamt }
613 1.1 yamt
614 1.1 yamt /*
615 1.1 yamt * ALL-FREE special case.
616 1.1 yamt */
617 1.1 yamt
618 1.1 yamt if (scan->u.bmu_avail == radix)
619 1.1 yamt return;
620 1.1 yamt if (scan->u.bmu_avail > radix)
621 1.3 yamt panic("blst_meta_free: freeing already free blocks (%"
622 1.3 yamt PRIu64 ") %" PRIu64 "/%" PRIu64,
623 1.5 yamt (uint64_t)count,
624 1.5 yamt (uint64_t)scan->u.bmu_avail,
625 1.5 yamt (uint64_t)radix);
626 1.1 yamt
627 1.1 yamt /*
628 1.1 yamt * Break the free down into its components
629 1.1 yamt */
630 1.1 yamt
631 1.1 yamt radix /= BLIST_META_RADIX;
632 1.1 yamt
633 1.1 yamt i = (freeBlk - blk) / radix;
634 1.1 yamt blk += i * radix;
635 1.1 yamt i = i * next_skip + 1;
636 1.1 yamt
637 1.1 yamt while (i <= skip && blk < freeBlk + count) {
638 1.5 yamt blist_blkno_t v;
639 1.1 yamt
640 1.1 yamt v = blk + radix - freeBlk;
641 1.1 yamt if (v > count)
642 1.1 yamt v = count;
643 1.1 yamt
644 1.5 yamt if (scan->bm_bighint == (blist_blkno_t)-1)
645 1.1 yamt panic("blst_meta_free: freeing unexpected range");
646 1.1 yamt
647 1.1 yamt if (next_skip == 1) {
648 1.1 yamt blst_leaf_free(&scan[i], freeBlk, v);
649 1.1 yamt } else {
650 1.1 yamt blst_meta_free(&scan[i], freeBlk, v, radix, next_skip - 1, blk);
651 1.1 yamt }
652 1.1 yamt if (scan->bm_bighint < scan[i].bm_bighint)
653 1.1 yamt scan->bm_bighint = scan[i].bm_bighint;
654 1.1 yamt count -= v;
655 1.1 yamt freeBlk += v;
656 1.1 yamt blk += radix;
657 1.1 yamt i += next_skip;
658 1.1 yamt }
659 1.1 yamt }
660 1.1 yamt
661 1.1 yamt /*
662 1.1 yamt * BLIST_RADIX_COPY() - copy one radix tree to another
663 1.1 yamt *
664 1.1 yamt * Locates free space in the source tree and frees it in the destination
665 1.1 yamt * tree. The space may not already be free in the destination.
666 1.1 yamt */
667 1.1 yamt
668 1.1 yamt static void blst_copy(
669 1.1 yamt blmeta_t *scan,
670 1.5 yamt blist_blkno_t blk,
671 1.5 yamt blist_blkno_t radix,
672 1.5 yamt blist_blkno_t skip,
673 1.1 yamt blist_t dest,
674 1.5 yamt blist_blkno_t count
675 1.1 yamt ) {
676 1.5 yamt blist_blkno_t next_skip;
677 1.5 yamt blist_blkno_t i;
678 1.1 yamt
679 1.1 yamt /*
680 1.1 yamt * Leaf node
681 1.1 yamt */
682 1.1 yamt
683 1.1 yamt if (radix == BLIST_BMAP_RADIX) {
684 1.5 yamt blist_bitmap_t v = scan->u.bmu_bitmap;
685 1.1 yamt
686 1.5 yamt if (v == (blist_bitmap_t)-1) {
687 1.1 yamt blist_free(dest, blk, count);
688 1.1 yamt } else if (v != 0) {
689 1.7 christos int j;
690 1.1 yamt
691 1.7 christos for (j = 0; j < BLIST_BMAP_RADIX && j < count; ++j) {
692 1.7 christos if (v & (1 << j))
693 1.7 christos blist_free(dest, blk + j, 1);
694 1.1 yamt }
695 1.1 yamt }
696 1.1 yamt return;
697 1.1 yamt }
698 1.1 yamt
699 1.1 yamt /*
700 1.1 yamt * Meta node
701 1.1 yamt */
702 1.1 yamt
703 1.1 yamt if (scan->u.bmu_avail == 0) {
704 1.1 yamt /*
705 1.1 yamt * Source all allocated, leave dest allocated
706 1.1 yamt */
707 1.1 yamt return;
708 1.1 yamt }
709 1.1 yamt if (scan->u.bmu_avail == radix) {
710 1.1 yamt /*
711 1.1 yamt * Source all free, free entire dest
712 1.1 yamt */
713 1.1 yamt if (count < radix)
714 1.1 yamt blist_free(dest, blk, count);
715 1.1 yamt else
716 1.1 yamt blist_free(dest, blk, radix);
717 1.1 yamt return;
718 1.1 yamt }
719 1.1 yamt
720 1.1 yamt
721 1.1 yamt radix /= BLIST_META_RADIX;
722 1.5 yamt next_skip = (skip / BLIST_META_RADIX);
723 1.1 yamt
724 1.1 yamt for (i = 1; count && i <= skip; i += next_skip) {
725 1.5 yamt if (scan[i].bm_bighint == (blist_blkno_t)-1)
726 1.1 yamt break;
727 1.1 yamt
728 1.1 yamt if (count >= radix) {
729 1.1 yamt blst_copy(
730 1.1 yamt &scan[i],
731 1.1 yamt blk,
732 1.1 yamt radix,
733 1.1 yamt next_skip - 1,
734 1.1 yamt dest,
735 1.1 yamt radix
736 1.1 yamt );
737 1.1 yamt count -= radix;
738 1.1 yamt } else {
739 1.1 yamt if (count) {
740 1.1 yamt blst_copy(
741 1.1 yamt &scan[i],
742 1.1 yamt blk,
743 1.1 yamt radix,
744 1.1 yamt next_skip - 1,
745 1.1 yamt dest,
746 1.1 yamt count
747 1.1 yamt );
748 1.1 yamt }
749 1.1 yamt count = 0;
750 1.1 yamt }
751 1.1 yamt blk += radix;
752 1.1 yamt }
753 1.1 yamt }
754 1.1 yamt
755 1.1 yamt /*
756 1.1 yamt * BLST_LEAF_FILL() - allocate specific blocks in leaf bitmap
757 1.1 yamt *
758 1.1 yamt * This routine allocates all blocks in the specified range
759 1.1 yamt * regardless of any existing allocations in that range. Returns
760 1.1 yamt * the number of blocks allocated by the call.
761 1.1 yamt */
762 1.1 yamt
763 1.1 yamt static int
764 1.5 yamt blst_leaf_fill(blmeta_t *scan, blist_blkno_t blk, int count)
765 1.1 yamt {
766 1.1 yamt int n = blk & (BLIST_BMAP_RADIX - 1);
767 1.1 yamt int nblks;
768 1.5 yamt blist_bitmap_t mask, bitmap;
769 1.1 yamt
770 1.5 yamt mask = ((blist_bitmap_t)-1 << n) &
771 1.5 yamt ((blist_bitmap_t)-1 >> (BLIST_BMAP_RADIX - count - n));
772 1.1 yamt
773 1.1 yamt /* Count the number of blocks we're about to allocate */
774 1.1 yamt bitmap = scan->u.bmu_bitmap & mask;
775 1.1 yamt for (nblks = 0; bitmap != 0; nblks++)
776 1.1 yamt bitmap &= bitmap - 1;
777 1.1 yamt
778 1.1 yamt scan->u.bmu_bitmap &= ~mask;
779 1.1 yamt return nblks;
780 1.1 yamt }
781 1.1 yamt
782 1.1 yamt /*
783 1.1 yamt * BLIST_META_FILL() - allocate specific blocks at a meta node
784 1.1 yamt *
785 1.1 yamt * This routine allocates the specified range of blocks,
786 1.1 yamt * regardless of any existing allocations in the range. The
787 1.1 yamt * range must be within the extent of this node. Returns the
788 1.1 yamt * number of blocks allocated by the call.
789 1.1 yamt */
790 1.5 yamt static blist_blkno_t
791 1.1 yamt blst_meta_fill(
792 1.1 yamt blmeta_t *scan,
793 1.5 yamt blist_blkno_t allocBlk,
794 1.5 yamt blist_blkno_t count,
795 1.5 yamt blist_blkno_t radix,
796 1.5 yamt blist_blkno_t skip,
797 1.5 yamt blist_blkno_t blk
798 1.1 yamt ) {
799 1.5 yamt blist_blkno_t i;
800 1.5 yamt blist_blkno_t next_skip = (skip / BLIST_META_RADIX);
801 1.5 yamt blist_blkno_t nblks = 0;
802 1.1 yamt
803 1.1 yamt if (count == radix || scan->u.bmu_avail == 0) {
804 1.1 yamt /*
805 1.1 yamt * ALL-ALLOCATED special case
806 1.1 yamt */
807 1.1 yamt nblks = scan->u.bmu_avail;
808 1.1 yamt scan->u.bmu_avail = 0;
809 1.1 yamt scan->bm_bighint = count;
810 1.1 yamt return nblks;
811 1.1 yamt }
812 1.1 yamt
813 1.9 yamt if (count > radix)
814 1.9 yamt panic("blist_meta_fill: allocation too large");
815 1.9 yamt
816 1.1 yamt if (scan->u.bmu_avail == radix) {
817 1.1 yamt radix /= BLIST_META_RADIX;
818 1.1 yamt
819 1.1 yamt /*
820 1.1 yamt * ALL-FREE special case, initialize sublevel
821 1.1 yamt */
822 1.1 yamt for (i = 1; i <= skip; i += next_skip) {
823 1.5 yamt if (scan[i].bm_bighint == (blist_blkno_t)-1)
824 1.1 yamt break;
825 1.1 yamt if (next_skip == 1) {
826 1.5 yamt scan[i].u.bmu_bitmap = (blist_bitmap_t)-1;
827 1.1 yamt scan[i].bm_bighint = BLIST_BMAP_RADIX;
828 1.1 yamt } else {
829 1.1 yamt scan[i].bm_bighint = radix;
830 1.1 yamt scan[i].u.bmu_avail = radix;
831 1.1 yamt }
832 1.1 yamt }
833 1.1 yamt } else {
834 1.1 yamt radix /= BLIST_META_RADIX;
835 1.1 yamt }
836 1.1 yamt
837 1.1 yamt i = (allocBlk - blk) / radix;
838 1.1 yamt blk += i * radix;
839 1.1 yamt i = i * next_skip + 1;
840 1.1 yamt
841 1.1 yamt while (i <= skip && blk < allocBlk + count) {
842 1.5 yamt blist_blkno_t v;
843 1.1 yamt
844 1.1 yamt v = blk + radix - allocBlk;
845 1.1 yamt if (v > count)
846 1.1 yamt v = count;
847 1.1 yamt
848 1.5 yamt if (scan->bm_bighint == (blist_blkno_t)-1)
849 1.1 yamt panic("blst_meta_fill: filling unexpected range");
850 1.1 yamt
851 1.1 yamt if (next_skip == 1) {
852 1.1 yamt nblks += blst_leaf_fill(&scan[i], allocBlk, v);
853 1.1 yamt } else {
854 1.1 yamt nblks += blst_meta_fill(&scan[i], allocBlk, v,
855 1.1 yamt radix, next_skip - 1, blk);
856 1.1 yamt }
857 1.1 yamt count -= v;
858 1.1 yamt allocBlk += v;
859 1.1 yamt blk += radix;
860 1.1 yamt i += next_skip;
861 1.1 yamt }
862 1.1 yamt scan->u.bmu_avail -= nblks;
863 1.1 yamt return nblks;
864 1.1 yamt }
865 1.1 yamt
866 1.1 yamt /*
867 1.1 yamt * BLST_RADIX_INIT() - initialize radix tree
868 1.1 yamt *
869 1.1 yamt * Initialize our meta structures and bitmaps and calculate the exact
870 1.1 yamt * amount of space required to manage 'count' blocks - this space may
871 1.12 wiz * be considerably less than the calculated radix due to the large
872 1.1 yamt * RADIX values we use.
873 1.1 yamt */
874 1.1 yamt
875 1.5 yamt static blist_blkno_t
876 1.5 yamt blst_radix_init(blmeta_t *scan, blist_blkno_t radix, blist_blkno_t skip,
877 1.5 yamt blist_blkno_t count)
878 1.1 yamt {
879 1.5 yamt blist_blkno_t i;
880 1.5 yamt blist_blkno_t next_skip;
881 1.5 yamt blist_blkno_t memindex = 0;
882 1.1 yamt
883 1.1 yamt /*
884 1.1 yamt * Leaf node
885 1.1 yamt */
886 1.1 yamt
887 1.1 yamt if (radix == BLIST_BMAP_RADIX) {
888 1.1 yamt if (scan) {
889 1.1 yamt scan->bm_bighint = 0;
890 1.1 yamt scan->u.bmu_bitmap = 0;
891 1.1 yamt }
892 1.1 yamt return(memindex);
893 1.1 yamt }
894 1.1 yamt
895 1.1 yamt /*
896 1.1 yamt * Meta node. If allocating the entire object we can special
897 1.1 yamt * case it. However, we need to figure out how much memory
898 1.1 yamt * is required to manage 'count' blocks, so we continue on anyway.
899 1.1 yamt */
900 1.1 yamt
901 1.1 yamt if (scan) {
902 1.1 yamt scan->bm_bighint = 0;
903 1.1 yamt scan->u.bmu_avail = 0;
904 1.1 yamt }
905 1.1 yamt
906 1.1 yamt radix /= BLIST_META_RADIX;
907 1.5 yamt next_skip = (skip / BLIST_META_RADIX);
908 1.1 yamt
909 1.1 yamt for (i = 1; i <= skip; i += next_skip) {
910 1.1 yamt if (count >= radix) {
911 1.1 yamt /*
912 1.1 yamt * Allocate the entire object
913 1.1 yamt */
914 1.1 yamt memindex = i + blst_radix_init(
915 1.1 yamt ((scan) ? &scan[i] : NULL),
916 1.1 yamt radix,
917 1.1 yamt next_skip - 1,
918 1.1 yamt radix
919 1.1 yamt );
920 1.1 yamt count -= radix;
921 1.1 yamt } else if (count > 0) {
922 1.1 yamt /*
923 1.1 yamt * Allocate a partial object
924 1.1 yamt */
925 1.1 yamt memindex = i + blst_radix_init(
926 1.1 yamt ((scan) ? &scan[i] : NULL),
927 1.1 yamt radix,
928 1.1 yamt next_skip - 1,
929 1.1 yamt count
930 1.1 yamt );
931 1.1 yamt count = 0;
932 1.1 yamt } else {
933 1.1 yamt /*
934 1.1 yamt * Add terminator and break out
935 1.1 yamt */
936 1.1 yamt if (scan)
937 1.5 yamt scan[i].bm_bighint = (blist_blkno_t)-1;
938 1.1 yamt break;
939 1.1 yamt }
940 1.1 yamt }
941 1.1 yamt if (memindex < i)
942 1.1 yamt memindex = i;
943 1.1 yamt return(memindex);
944 1.1 yamt }
945 1.1 yamt
946 1.1 yamt #ifdef BLIST_DEBUG
947 1.1 yamt
948 1.1 yamt static void
949 1.5 yamt blst_radix_print(blmeta_t *scan, blist_blkno_t blk, blist_blkno_t radix,
950 1.5 yamt blist_blkno_t skip, int tab)
951 1.1 yamt {
952 1.5 yamt blist_blkno_t i;
953 1.5 yamt blist_blkno_t next_skip;
954 1.1 yamt int lastState = 0;
955 1.1 yamt
956 1.1 yamt if (radix == BLIST_BMAP_RADIX) {
957 1.1 yamt printf(
958 1.5 yamt "%*.*s(%0*" PRIx64 ",%" PRIu64
959 1.5 yamt "): bitmap %0*" PRIx64 " big=%" PRIu64 "\n",
960 1.1 yamt tab, tab, "",
961 1.5 yamt sizeof(blk) * 2,
962 1.5 yamt (uint64_t)blk,
963 1.5 yamt (uint64_t)radix,
964 1.5 yamt sizeof(scan->u.bmu_bitmap) * 2,
965 1.5 yamt (uint64_t)scan->u.bmu_bitmap,
966 1.5 yamt (uint64_t)scan->bm_bighint
967 1.1 yamt );
968 1.1 yamt return;
969 1.1 yamt }
970 1.1 yamt
971 1.1 yamt if (scan->u.bmu_avail == 0) {
972 1.1 yamt printf(
973 1.5 yamt "%*.*s(%0*" PRIx64 ",%" PRIu64") ALL ALLOCATED\n",
974 1.5 yamt tab, tab, "",
975 1.5 yamt sizeof(blk) * 2,
976 1.5 yamt (uint64_t)blk,
977 1.5 yamt (uint64_t)radix
978 1.1 yamt );
979 1.1 yamt return;
980 1.1 yamt }
981 1.1 yamt if (scan->u.bmu_avail == radix) {
982 1.1 yamt printf(
983 1.5 yamt "%*.*s(%0*" PRIx64 ",%" PRIu64 ") ALL FREE\n",
984 1.5 yamt tab, tab, "",
985 1.5 yamt sizeof(blk) * 2,
986 1.5 yamt (uint64_t)blk,
987 1.5 yamt (uint64_t)radix
988 1.1 yamt );
989 1.1 yamt return;
990 1.1 yamt }
991 1.1 yamt
992 1.1 yamt printf(
993 1.5 yamt "%*.*s(%0*" PRIx64 ",%" PRIu64 "): subtree (%" PRIu64 "/%"
994 1.3 yamt PRIu64 ") big=%" PRIu64 " {\n",
995 1.1 yamt tab, tab, "",
996 1.5 yamt sizeof(blk) * 2,
997 1.5 yamt (uint64_t)blk,
998 1.5 yamt (uint64_t)radix,
999 1.5 yamt (uint64_t)scan->u.bmu_avail,
1000 1.5 yamt (uint64_t)radix,
1001 1.5 yamt (uint64_t)scan->bm_bighint
1002 1.1 yamt );
1003 1.1 yamt
1004 1.1 yamt radix /= BLIST_META_RADIX;
1005 1.5 yamt next_skip = (skip / BLIST_META_RADIX);
1006 1.1 yamt tab += 4;
1007 1.1 yamt
1008 1.1 yamt for (i = 1; i <= skip; i += next_skip) {
1009 1.5 yamt if (scan[i].bm_bighint == (blist_blkno_t)-1) {
1010 1.1 yamt printf(
1011 1.5 yamt "%*.*s(%0*" PRIx64 ",%" PRIu64 "): Terminator\n",
1012 1.1 yamt tab, tab, "",
1013 1.5 yamt sizeof(blk) * 2,
1014 1.5 yamt (uint64_t)blk,
1015 1.5 yamt (uint64_t)radix
1016 1.1 yamt );
1017 1.1 yamt lastState = 0;
1018 1.1 yamt break;
1019 1.1 yamt }
1020 1.1 yamt blst_radix_print(
1021 1.1 yamt &scan[i],
1022 1.1 yamt blk,
1023 1.1 yamt radix,
1024 1.1 yamt next_skip - 1,
1025 1.1 yamt tab
1026 1.1 yamt );
1027 1.1 yamt blk += radix;
1028 1.1 yamt }
1029 1.1 yamt tab -= 4;
1030 1.1 yamt
1031 1.1 yamt printf(
1032 1.1 yamt "%*.*s}\n",
1033 1.1 yamt tab, tab, ""
1034 1.1 yamt );
1035 1.1 yamt }
1036 1.1 yamt
1037 1.1 yamt #endif
1038 1.1 yamt
1039 1.1 yamt #ifdef BLIST_DEBUG
1040 1.1 yamt
1041 1.1 yamt int
1042 1.1 yamt main(int ac, char **av)
1043 1.1 yamt {
1044 1.5 yamt blist_blkno_t size = 1024;
1045 1.1 yamt int i;
1046 1.1 yamt blist_t bl;
1047 1.1 yamt
1048 1.1 yamt for (i = 1; i < ac; ++i) {
1049 1.1 yamt const char *ptr = av[i];
1050 1.1 yamt if (*ptr != '-') {
1051 1.1 yamt size = strtol(ptr, NULL, 0);
1052 1.1 yamt continue;
1053 1.1 yamt }
1054 1.1 yamt ptr += 2;
1055 1.1 yamt fprintf(stderr, "Bad option: %s\n", ptr - 2);
1056 1.1 yamt exit(1);
1057 1.1 yamt }
1058 1.1 yamt bl = blist_create(size);
1059 1.1 yamt blist_free(bl, 0, size);
1060 1.1 yamt
1061 1.1 yamt for (;;) {
1062 1.1 yamt char buf[1024];
1063 1.3 yamt uint64_t da = 0;
1064 1.3 yamt uint64_t count = 0;
1065 1.1 yamt
1066 1.3 yamt printf("%" PRIu64 "/%" PRIu64 "/%" PRIu64 "> ",
1067 1.5 yamt (uint64_t)bl->bl_free,
1068 1.5 yamt (uint64_t)size,
1069 1.5 yamt (uint64_t)bl->bl_radix);
1070 1.1 yamt fflush(stdout);
1071 1.1 yamt if (fgets(buf, sizeof(buf), stdin) == NULL)
1072 1.1 yamt break;
1073 1.1 yamt switch(buf[0]) {
1074 1.1 yamt case 'r':
1075 1.3 yamt if (sscanf(buf + 1, "%" SCNu64, &count) == 1) {
1076 1.1 yamt blist_resize(&bl, count, 1);
1077 1.1 yamt } else {
1078 1.1 yamt printf("?\n");
1079 1.1 yamt }
1080 1.1 yamt case 'p':
1081 1.1 yamt blist_print(bl);
1082 1.1 yamt break;
1083 1.1 yamt case 'a':
1084 1.3 yamt if (sscanf(buf + 1, "%" SCNu64, &count) == 1) {
1085 1.5 yamt blist_blkno_t blk = blist_alloc(bl, count);
1086 1.5 yamt printf(" R=%0*" PRIx64 "\n",
1087 1.5 yamt sizeof(blk) * 2,
1088 1.5 yamt (uint64_t)blk);
1089 1.1 yamt } else {
1090 1.1 yamt printf("?\n");
1091 1.1 yamt }
1092 1.1 yamt break;
1093 1.1 yamt case 'f':
1094 1.3 yamt if (sscanf(buf + 1, "%" SCNx64 " %" SCNu64,
1095 1.3 yamt &da, &count) == 2) {
1096 1.1 yamt blist_free(bl, da, count);
1097 1.1 yamt } else {
1098 1.1 yamt printf("?\n");
1099 1.1 yamt }
1100 1.1 yamt break;
1101 1.1 yamt case 'l':
1102 1.3 yamt if (sscanf(buf + 1, "%" SCNx64 " %" SCNu64,
1103 1.3 yamt &da, &count) == 2) {
1104 1.5 yamt printf(" n=%" PRIu64 "\n",
1105 1.5 yamt (uint64_t)blist_fill(bl, da, count));
1106 1.1 yamt } else {
1107 1.1 yamt printf("?\n");
1108 1.1 yamt }
1109 1.1 yamt break;
1110 1.1 yamt case '?':
1111 1.1 yamt case 'h':
1112 1.1 yamt puts(
1113 1.1 yamt "p -print\n"
1114 1.1 yamt "a %d -allocate\n"
1115 1.1 yamt "f %x %d -free\n"
1116 1.1 yamt "l %x %d -fill\n"
1117 1.1 yamt "r %d -resize\n"
1118 1.1 yamt "h/? -help"
1119 1.1 yamt );
1120 1.1 yamt break;
1121 1.1 yamt default:
1122 1.1 yamt printf("?\n");
1123 1.1 yamt break;
1124 1.1 yamt }
1125 1.1 yamt }
1126 1.1 yamt return(0);
1127 1.1 yamt }
1128 1.1 yamt
1129 1.1 yamt void
1130 1.1 yamt panic(const char *ctl, ...)
1131 1.1 yamt {
1132 1.1 yamt va_list va;
1133 1.1 yamt
1134 1.1 yamt va_start(va, ctl);
1135 1.1 yamt vfprintf(stderr, ctl, va);
1136 1.1 yamt fprintf(stderr, "\n");
1137 1.1 yamt va_end(va);
1138 1.1 yamt exit(1);
1139 1.1 yamt }
1140 1.1 yamt
1141 1.1 yamt #endif
1142 1.1 yamt
1143