subr_blist.c revision 1.5 1 1.5 yamt /* $NetBSD: subr_blist.c,v 1.5 2005/04/06 13:09:10 yamt Exp $ */
2 1.2 yamt
3 1.1 yamt /*-
4 1.1 yamt * Copyright (c) 1998 Matthew Dillon. All Rights Reserved.
5 1.1 yamt * Redistribution and use in source and binary forms, with or without
6 1.1 yamt * modification, are permitted provided that the following conditions
7 1.1 yamt * are met:
8 1.1 yamt * 1. Redistributions of source code must retain the above copyright
9 1.1 yamt * notice, this list of conditions and the following disclaimer.
10 1.1 yamt * 2. Redistributions in binary form must reproduce the above copyright
11 1.1 yamt * notice, this list of conditions and the following disclaimer in the
12 1.1 yamt * documentation and/or other materials provided with the distribution.
13 1.1 yamt * 4. Neither the name of the University nor the names of its contributors
14 1.1 yamt * may be used to endorse or promote products derived from this software
15 1.1 yamt * without specific prior written permission.
16 1.1 yamt *
17 1.1 yamt * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS
18 1.1 yamt * OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
19 1.1 yamt * WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
20 1.1 yamt * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY
21 1.1 yamt * DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
22 1.1 yamt * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE
23 1.1 yamt * GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
24 1.1 yamt * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY,
25 1.1 yamt * WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING
26 1.1 yamt * NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
27 1.1 yamt * SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
28 1.1 yamt */
29 1.1 yamt /*
30 1.1 yamt * BLIST.C - Bitmap allocator/deallocator, using a radix tree with hinting
31 1.1 yamt *
32 1.1 yamt * This module implements a general bitmap allocator/deallocator. The
33 1.1 yamt * allocator eats around 2 bits per 'block'. The module does not
34 1.1 yamt * try to interpret the meaning of a 'block' other then to return
35 1.3 yamt * BLIST_NONE on an allocation failure.
36 1.1 yamt *
37 1.1 yamt * A radix tree is used to maintain the bitmap. Two radix constants are
38 1.1 yamt * involved: One for the bitmaps contained in the leaf nodes (typically
39 1.1 yamt * 32), and one for the meta nodes (typically 16). Both meta and leaf
40 1.1 yamt * nodes have a hint field. This field gives us a hint as to the largest
41 1.1 yamt * free contiguous range of blocks under the node. It may contain a
42 1.1 yamt * value that is too high, but will never contain a value that is too
43 1.1 yamt * low. When the radix tree is searched, allocation failures in subtrees
44 1.1 yamt * update the hint.
45 1.1 yamt *
46 1.1 yamt * The radix tree also implements two collapsed states for meta nodes:
47 1.1 yamt * the ALL-ALLOCATED state and the ALL-FREE state. If a meta node is
48 1.1 yamt * in either of these two states, all information contained underneath
49 1.1 yamt * the node is considered stale. These states are used to optimize
50 1.1 yamt * allocation and freeing operations.
51 1.1 yamt *
52 1.1 yamt * The hinting greatly increases code efficiency for allocations while
53 1.1 yamt * the general radix structure optimizes both allocations and frees. The
54 1.1 yamt * radix tree should be able to operate well no matter how much
55 1.1 yamt * fragmentation there is and no matter how large a bitmap is used.
56 1.1 yamt *
57 1.1 yamt * Unlike the rlist code, the blist code wires all necessary memory at
58 1.1 yamt * creation time. Neither allocations nor frees require interaction with
59 1.1 yamt * the memory subsystem. In contrast, the rlist code may allocate memory
60 1.1 yamt * on an rlist_free() call. The non-blocking features of the blist code
61 1.1 yamt * are used to great advantage in the swap code (vm/nswap_pager.c). The
62 1.1 yamt * rlist code uses a little less overall memory then the blist code (but
63 1.1 yamt * due to swap interleaving not all that much less), but the blist code
64 1.1 yamt * scales much, much better.
65 1.1 yamt *
66 1.1 yamt * LAYOUT: The radix tree is layed out recursively using a
67 1.1 yamt * linear array. Each meta node is immediately followed (layed out
68 1.1 yamt * sequentially in memory) by BLIST_META_RADIX lower level nodes. This
69 1.1 yamt * is a recursive structure but one that can be easily scanned through
70 1.1 yamt * a very simple 'skip' calculation. In order to support large radixes,
71 1.1 yamt * portions of the tree may reside outside our memory allocation. We
72 1.1 yamt * handle this with an early-termination optimization (when bighint is
73 1.1 yamt * set to -1) on the scan. The memory allocation is only large enough
74 1.1 yamt * to cover the number of blocks requested at creation time even if it
75 1.1 yamt * must be encompassed in larger root-node radix.
76 1.1 yamt *
77 1.1 yamt * NOTE: the allocator cannot currently allocate more then
78 1.1 yamt * BLIST_BMAP_RADIX blocks per call. It will panic with 'allocation too
79 1.1 yamt * large' if you try. This is an area that could use improvement. The
80 1.1 yamt * radix is large enough that this restriction does not effect the swap
81 1.1 yamt * system, though. Currently only the allocation code is effected by
82 1.1 yamt * this algorithmic unfeature. The freeing code can handle arbitrary
83 1.1 yamt * ranges.
84 1.1 yamt *
85 1.1 yamt * This code can be compiled stand-alone for debugging.
86 1.1 yamt */
87 1.1 yamt
88 1.1 yamt #include <sys/cdefs.h>
89 1.5 yamt __KERNEL_RCSID(0, "$NetBSD: subr_blist.c,v 1.5 2005/04/06 13:09:10 yamt Exp $");
90 1.2 yamt #if 0
91 1.1 yamt __FBSDID("$FreeBSD: src/sys/kern/subr_blist.c,v 1.17 2004/06/04 04:03:25 alc Exp $");
92 1.2 yamt #endif
93 1.1 yamt
94 1.1 yamt #ifdef _KERNEL
95 1.1 yamt
96 1.1 yamt #include <sys/param.h>
97 1.1 yamt #include <sys/systm.h>
98 1.1 yamt #include <sys/lock.h>
99 1.1 yamt #include <sys/kernel.h>
100 1.1 yamt #include <sys/blist.h>
101 1.1 yamt #include <sys/malloc.h>
102 1.1 yamt #include <sys/proc.h>
103 1.1 yamt
104 1.1 yamt #else
105 1.1 yamt
106 1.1 yamt #ifndef BLIST_NO_DEBUG
107 1.1 yamt #define BLIST_DEBUG
108 1.1 yamt #endif
109 1.1 yamt
110 1.1 yamt #include <sys/types.h>
111 1.1 yamt #include <stdio.h>
112 1.1 yamt #include <string.h>
113 1.1 yamt #include <stdlib.h>
114 1.1 yamt #include <stdarg.h>
115 1.3 yamt #include <inttypes.h>
116 1.1 yamt
117 1.1 yamt #define malloc(a,b,c) calloc(a, 1)
118 1.1 yamt #define free(a,b) free(a)
119 1.1 yamt
120 1.3 yamt #include "../sys/blist.h"
121 1.1 yamt
122 1.1 yamt void panic(const char *ctl, ...);
123 1.1 yamt
124 1.1 yamt #endif
125 1.1 yamt
126 1.1 yamt /*
127 1.4 yamt * blmeta and bl_bitmap_t MUST be a power of 2 in size.
128 1.4 yamt */
129 1.4 yamt
130 1.4 yamt typedef struct blmeta {
131 1.4 yamt union {
132 1.5 yamt blist_blkno_t bmu_avail; /* space available under us */
133 1.5 yamt blist_bitmap_t bmu_bitmap; /* bitmap if we are a leaf */
134 1.4 yamt } u;
135 1.5 yamt blist_blkno_t bm_bighint; /* biggest contiguous block hint*/
136 1.4 yamt } blmeta_t;
137 1.4 yamt
138 1.4 yamt struct blist {
139 1.5 yamt blist_blkno_t bl_blocks; /* area of coverage */
140 1.5 yamt blist_blkno_t bl_radix; /* coverage radix */
141 1.5 yamt blist_blkno_t bl_skip; /* starting skip */
142 1.5 yamt blist_blkno_t bl_free; /* number of free blocks */
143 1.4 yamt blmeta_t *bl_root; /* root of radix tree */
144 1.5 yamt blist_blkno_t bl_rootblks; /* blks allocated for tree */
145 1.4 yamt };
146 1.4 yamt
147 1.4 yamt #define BLIST_META_RADIX 16
148 1.4 yamt
149 1.4 yamt /*
150 1.1 yamt * static support functions
151 1.1 yamt */
152 1.1 yamt
153 1.5 yamt static blist_blkno_t blst_leaf_alloc(blmeta_t *scan, blist_blkno_t blk,
154 1.5 yamt int count);
155 1.5 yamt static blist_blkno_t blst_meta_alloc(blmeta_t *scan, blist_blkno_t blk,
156 1.5 yamt blist_blkno_t count, blist_blkno_t radix, blist_blkno_t skip);
157 1.5 yamt static void blst_leaf_free(blmeta_t *scan, blist_blkno_t relblk, int count);
158 1.5 yamt static void blst_meta_free(blmeta_t *scan, blist_blkno_t freeBlk,
159 1.5 yamt blist_blkno_t count, blist_blkno_t radix, blist_blkno_t skip,
160 1.5 yamt blist_blkno_t blk);
161 1.5 yamt static void blst_copy(blmeta_t *scan, blist_blkno_t blk, blist_blkno_t radix,
162 1.5 yamt blist_blkno_t skip, blist_t dest, blist_blkno_t count);
163 1.5 yamt static int blst_leaf_fill(blmeta_t *scan, blist_blkno_t blk, int count);
164 1.5 yamt static blist_blkno_t blst_meta_fill(blmeta_t *scan, blist_blkno_t allocBlk,
165 1.5 yamt blist_blkno_t count, blist_blkno_t radix, blist_blkno_t skip,
166 1.5 yamt blist_blkno_t blk);
167 1.5 yamt static blist_blkno_t blst_radix_init(blmeta_t *scan, blist_blkno_t radix,
168 1.5 yamt blist_blkno_t skip, blist_blkno_t count);
169 1.1 yamt #ifndef _KERNEL
170 1.5 yamt static void blst_radix_print(blmeta_t *scan, blist_blkno_t blk,
171 1.5 yamt blist_blkno_t radix, blist_blkno_t skip, int tab);
172 1.1 yamt #endif
173 1.1 yamt
174 1.1 yamt #ifdef _KERNEL
175 1.3 yamt static MALLOC_DEFINE(M_BLIST, "blist", "Bitmap allocator");
176 1.1 yamt #endif
177 1.1 yamt
178 1.1 yamt /*
179 1.1 yamt * blist_create() - create a blist capable of handling up to the specified
180 1.1 yamt * number of blocks
181 1.1 yamt *
182 1.1 yamt * blocks must be greater then 0
183 1.1 yamt *
184 1.1 yamt * The smallest blist consists of a single leaf node capable of
185 1.1 yamt * managing BLIST_BMAP_RADIX blocks.
186 1.1 yamt */
187 1.1 yamt
188 1.1 yamt blist_t
189 1.5 yamt blist_create(blist_blkno_t blocks)
190 1.1 yamt {
191 1.1 yamt blist_t bl;
192 1.5 yamt blist_blkno_t radix;
193 1.5 yamt blist_blkno_t skip = 0;
194 1.1 yamt
195 1.1 yamt /*
196 1.1 yamt * Calculate radix and skip field used for scanning.
197 1.5 yamt *
198 1.5 yamt * XXX check overflow
199 1.1 yamt */
200 1.1 yamt radix = BLIST_BMAP_RADIX;
201 1.1 yamt
202 1.1 yamt while (radix < blocks) {
203 1.1 yamt radix *= BLIST_META_RADIX;
204 1.1 yamt skip = (skip + 1) * BLIST_META_RADIX;
205 1.1 yamt }
206 1.1 yamt
207 1.3 yamt bl = malloc(sizeof(struct blist), M_BLIST, M_WAITOK | M_ZERO);
208 1.1 yamt
209 1.1 yamt bl->bl_blocks = blocks;
210 1.1 yamt bl->bl_radix = radix;
211 1.1 yamt bl->bl_skip = skip;
212 1.1 yamt bl->bl_rootblks = 1 +
213 1.1 yamt blst_radix_init(NULL, bl->bl_radix, bl->bl_skip, blocks);
214 1.3 yamt bl->bl_root = malloc(sizeof(blmeta_t) * bl->bl_rootblks, M_BLIST, M_WAITOK);
215 1.1 yamt
216 1.1 yamt #if defined(BLIST_DEBUG)
217 1.1 yamt printf(
218 1.3 yamt "BLIST representing %" PRIu64 " blocks (%" PRIu64 " MB of swap)"
219 1.3 yamt ", requiring %" PRIu64 "K of ram\n",
220 1.5 yamt (uint64_t)bl->bl_blocks,
221 1.5 yamt (uint64_t)bl->bl_blocks * 4 / 1024,
222 1.5 yamt ((uint64_t)bl->bl_rootblks * sizeof(blmeta_t) + 1023) / 1024
223 1.1 yamt );
224 1.3 yamt printf("BLIST raw radix tree contains %" PRIu64 " records\n",
225 1.5 yamt (uint64_t)bl->bl_rootblks);
226 1.1 yamt #endif
227 1.1 yamt blst_radix_init(bl->bl_root, bl->bl_radix, bl->bl_skip, blocks);
228 1.1 yamt
229 1.1 yamt return(bl);
230 1.1 yamt }
231 1.1 yamt
232 1.1 yamt void
233 1.1 yamt blist_destroy(blist_t bl)
234 1.1 yamt {
235 1.3 yamt free(bl->bl_root, M_BLIST);
236 1.3 yamt free(bl, M_BLIST);
237 1.1 yamt }
238 1.1 yamt
239 1.1 yamt /*
240 1.1 yamt * blist_alloc() - reserve space in the block bitmap. Return the base
241 1.3 yamt * of a contiguous region or BLIST_NONE if space could
242 1.1 yamt * not be allocated.
243 1.1 yamt */
244 1.1 yamt
245 1.5 yamt blist_blkno_t
246 1.5 yamt blist_alloc(blist_t bl, blist_blkno_t count)
247 1.1 yamt {
248 1.5 yamt blist_blkno_t blk = BLIST_NONE;
249 1.1 yamt
250 1.1 yamt if (bl) {
251 1.1 yamt if (bl->bl_radix == BLIST_BMAP_RADIX)
252 1.1 yamt blk = blst_leaf_alloc(bl->bl_root, 0, count);
253 1.1 yamt else
254 1.1 yamt blk = blst_meta_alloc(bl->bl_root, 0, count, bl->bl_radix, bl->bl_skip);
255 1.3 yamt if (blk != BLIST_NONE)
256 1.1 yamt bl->bl_free -= count;
257 1.1 yamt }
258 1.1 yamt return(blk);
259 1.1 yamt }
260 1.1 yamt
261 1.1 yamt /*
262 1.1 yamt * blist_free() - free up space in the block bitmap. Return the base
263 1.1 yamt * of a contiguous region. Panic if an inconsistancy is
264 1.1 yamt * found.
265 1.1 yamt */
266 1.1 yamt
267 1.1 yamt void
268 1.5 yamt blist_free(blist_t bl, blist_blkno_t blkno, blist_blkno_t count)
269 1.1 yamt {
270 1.1 yamt if (bl) {
271 1.1 yamt if (bl->bl_radix == BLIST_BMAP_RADIX)
272 1.1 yamt blst_leaf_free(bl->bl_root, blkno, count);
273 1.1 yamt else
274 1.1 yamt blst_meta_free(bl->bl_root, blkno, count, bl->bl_radix, bl->bl_skip, 0);
275 1.1 yamt bl->bl_free += count;
276 1.1 yamt }
277 1.1 yamt }
278 1.1 yamt
279 1.1 yamt /*
280 1.1 yamt * blist_fill() - mark a region in the block bitmap as off-limits
281 1.1 yamt * to the allocator (i.e. allocate it), ignoring any
282 1.1 yamt * existing allocations. Return the number of blocks
283 1.1 yamt * actually filled that were free before the call.
284 1.1 yamt */
285 1.1 yamt
286 1.5 yamt blist_blkno_t
287 1.5 yamt blist_fill(blist_t bl, blist_blkno_t blkno, blist_blkno_t count)
288 1.1 yamt {
289 1.5 yamt blist_blkno_t filled;
290 1.1 yamt
291 1.1 yamt if (bl) {
292 1.1 yamt if (bl->bl_radix == BLIST_BMAP_RADIX)
293 1.1 yamt filled = blst_leaf_fill(bl->bl_root, blkno, count);
294 1.1 yamt else
295 1.1 yamt filled = blst_meta_fill(bl->bl_root, blkno, count,
296 1.1 yamt bl->bl_radix, bl->bl_skip, 0);
297 1.1 yamt bl->bl_free -= filled;
298 1.1 yamt return filled;
299 1.1 yamt } else
300 1.1 yamt return 0;
301 1.1 yamt }
302 1.1 yamt
303 1.1 yamt /*
304 1.1 yamt * blist_resize() - resize an existing radix tree to handle the
305 1.1 yamt * specified number of blocks. This will reallocate
306 1.1 yamt * the tree and transfer the previous bitmap to the new
307 1.1 yamt * one. When extending the tree you can specify whether
308 1.1 yamt * the new blocks are to left allocated or freed.
309 1.1 yamt */
310 1.1 yamt
311 1.1 yamt void
312 1.5 yamt blist_resize(blist_t *pbl, blist_blkno_t count, int freenew)
313 1.1 yamt {
314 1.1 yamt blist_t newbl = blist_create(count);
315 1.1 yamt blist_t save = *pbl;
316 1.1 yamt
317 1.1 yamt *pbl = newbl;
318 1.1 yamt if (count > save->bl_blocks)
319 1.1 yamt count = save->bl_blocks;
320 1.1 yamt blst_copy(save->bl_root, 0, save->bl_radix, save->bl_skip, newbl, count);
321 1.1 yamt
322 1.1 yamt /*
323 1.1 yamt * If resizing upwards, should we free the new space or not?
324 1.1 yamt */
325 1.1 yamt if (freenew && count < newbl->bl_blocks) {
326 1.1 yamt blist_free(newbl, count, newbl->bl_blocks - count);
327 1.1 yamt }
328 1.1 yamt blist_destroy(save);
329 1.1 yamt }
330 1.1 yamt
331 1.1 yamt #ifdef BLIST_DEBUG
332 1.1 yamt
333 1.1 yamt /*
334 1.1 yamt * blist_print() - dump radix tree
335 1.1 yamt */
336 1.1 yamt
337 1.1 yamt void
338 1.1 yamt blist_print(blist_t bl)
339 1.1 yamt {
340 1.1 yamt printf("BLIST {\n");
341 1.1 yamt blst_radix_print(bl->bl_root, 0, bl->bl_radix, bl->bl_skip, 4);
342 1.1 yamt printf("}\n");
343 1.1 yamt }
344 1.1 yamt
345 1.1 yamt #endif
346 1.1 yamt
347 1.1 yamt /************************************************************************
348 1.1 yamt * ALLOCATION SUPPORT FUNCTIONS *
349 1.1 yamt ************************************************************************
350 1.1 yamt *
351 1.1 yamt * These support functions do all the actual work. They may seem
352 1.1 yamt * rather longish, but that's because I've commented them up. The
353 1.1 yamt * actual code is straight forward.
354 1.1 yamt *
355 1.1 yamt */
356 1.1 yamt
357 1.1 yamt /*
358 1.1 yamt * blist_leaf_alloc() - allocate at a leaf in the radix tree (a bitmap).
359 1.1 yamt *
360 1.1 yamt * This is the core of the allocator and is optimized for the 1 block
361 1.1 yamt * and the BLIST_BMAP_RADIX block allocation cases. Other cases are
362 1.1 yamt * somewhat slower. The 1 block allocation case is log2 and extremely
363 1.1 yamt * quick.
364 1.1 yamt */
365 1.1 yamt
366 1.5 yamt static blist_blkno_t
367 1.1 yamt blst_leaf_alloc(
368 1.1 yamt blmeta_t *scan,
369 1.5 yamt blist_blkno_t blk,
370 1.1 yamt int count
371 1.1 yamt ) {
372 1.5 yamt blist_bitmap_t orig = scan->u.bmu_bitmap;
373 1.1 yamt
374 1.1 yamt if (orig == 0) {
375 1.1 yamt /*
376 1.1 yamt * Optimize bitmap all-allocated case. Also, count = 1
377 1.1 yamt * case assumes at least 1 bit is free in the bitmap, so
378 1.1 yamt * we have to take care of this case here.
379 1.1 yamt */
380 1.1 yamt scan->bm_bighint = 0;
381 1.3 yamt return(BLIST_NONE);
382 1.1 yamt }
383 1.1 yamt if (count == 1) {
384 1.1 yamt /*
385 1.1 yamt * Optimized code to allocate one bit out of the bitmap
386 1.1 yamt */
387 1.5 yamt blist_bitmap_t mask;
388 1.1 yamt int j = BLIST_BMAP_RADIX/2;
389 1.1 yamt int r = 0;
390 1.1 yamt
391 1.5 yamt mask = (blist_bitmap_t)-1 >> (BLIST_BMAP_RADIX/2);
392 1.1 yamt
393 1.1 yamt while (j) {
394 1.1 yamt if ((orig & mask) == 0) {
395 1.1 yamt r += j;
396 1.1 yamt orig >>= j;
397 1.1 yamt }
398 1.1 yamt j >>= 1;
399 1.1 yamt mask >>= j;
400 1.1 yamt }
401 1.5 yamt scan->u.bmu_bitmap &= ~((blist_bitmap_t)1 << r);
402 1.1 yamt return(blk + r);
403 1.1 yamt }
404 1.1 yamt if (count <= BLIST_BMAP_RADIX) {
405 1.1 yamt /*
406 1.1 yamt * non-optimized code to allocate N bits out of the bitmap.
407 1.1 yamt * The more bits, the faster the code runs. It will run
408 1.1 yamt * the slowest allocating 2 bits, but since there aren't any
409 1.1 yamt * memory ops in the core loop (or shouldn't be, anyway),
410 1.1 yamt * you probably won't notice the difference.
411 1.1 yamt */
412 1.1 yamt int j;
413 1.1 yamt int n = BLIST_BMAP_RADIX - count;
414 1.5 yamt blist_bitmap_t mask;
415 1.1 yamt
416 1.5 yamt mask = (blist_bitmap_t)-1 >> n;
417 1.1 yamt
418 1.1 yamt for (j = 0; j <= n; ++j) {
419 1.1 yamt if ((orig & mask) == mask) {
420 1.1 yamt scan->u.bmu_bitmap &= ~mask;
421 1.1 yamt return(blk + j);
422 1.1 yamt }
423 1.1 yamt mask = (mask << 1);
424 1.1 yamt }
425 1.1 yamt }
426 1.1 yamt /*
427 1.1 yamt * We couldn't allocate count in this subtree, update bighint.
428 1.1 yamt */
429 1.1 yamt scan->bm_bighint = count - 1;
430 1.3 yamt return(BLIST_NONE);
431 1.1 yamt }
432 1.1 yamt
433 1.1 yamt /*
434 1.1 yamt * blist_meta_alloc() - allocate at a meta in the radix tree.
435 1.1 yamt *
436 1.1 yamt * Attempt to allocate at a meta node. If we can't, we update
437 1.1 yamt * bighint and return a failure. Updating bighint optimize future
438 1.1 yamt * calls that hit this node. We have to check for our collapse cases
439 1.1 yamt * and we have a few optimizations strewn in as well.
440 1.1 yamt */
441 1.1 yamt
442 1.5 yamt static blist_blkno_t
443 1.1 yamt blst_meta_alloc(
444 1.1 yamt blmeta_t *scan,
445 1.5 yamt blist_blkno_t blk,
446 1.5 yamt blist_blkno_t count,
447 1.5 yamt blist_blkno_t radix,
448 1.5 yamt blist_blkno_t skip
449 1.1 yamt ) {
450 1.5 yamt blist_blkno_t i;
451 1.5 yamt blist_blkno_t next_skip = (skip / BLIST_META_RADIX);
452 1.1 yamt
453 1.1 yamt if (scan->u.bmu_avail == 0) {
454 1.1 yamt /*
455 1.1 yamt * ALL-ALLOCATED special case
456 1.1 yamt */
457 1.1 yamt scan->bm_bighint = count;
458 1.3 yamt return(BLIST_NONE);
459 1.1 yamt }
460 1.1 yamt
461 1.1 yamt if (scan->u.bmu_avail == radix) {
462 1.1 yamt radix /= BLIST_META_RADIX;
463 1.1 yamt
464 1.1 yamt /*
465 1.1 yamt * ALL-FREE special case, initialize uninitialize
466 1.1 yamt * sublevel.
467 1.1 yamt */
468 1.1 yamt for (i = 1; i <= skip; i += next_skip) {
469 1.5 yamt if (scan[i].bm_bighint == (blist_blkno_t)-1)
470 1.1 yamt break;
471 1.1 yamt if (next_skip == 1) {
472 1.5 yamt scan[i].u.bmu_bitmap = (blist_bitmap_t)-1;
473 1.1 yamt scan[i].bm_bighint = BLIST_BMAP_RADIX;
474 1.1 yamt } else {
475 1.1 yamt scan[i].bm_bighint = radix;
476 1.1 yamt scan[i].u.bmu_avail = radix;
477 1.1 yamt }
478 1.1 yamt }
479 1.1 yamt } else {
480 1.1 yamt radix /= BLIST_META_RADIX;
481 1.1 yamt }
482 1.1 yamt
483 1.1 yamt for (i = 1; i <= skip; i += next_skip) {
484 1.5 yamt if (scan[i].bm_bighint == (blist_blkno_t)-1) {
485 1.3 yamt /*
486 1.3 yamt * Terminator
487 1.3 yamt */
488 1.3 yamt break;
489 1.3 yamt } else if (count <= scan[i].bm_bighint) {
490 1.1 yamt /*
491 1.1 yamt * count fits in object
492 1.1 yamt */
493 1.5 yamt blist_blkno_t r;
494 1.1 yamt if (next_skip == 1) {
495 1.1 yamt r = blst_leaf_alloc(&scan[i], blk, count);
496 1.1 yamt } else {
497 1.1 yamt r = blst_meta_alloc(&scan[i], blk, count, radix, next_skip - 1);
498 1.1 yamt }
499 1.3 yamt if (r != BLIST_NONE) {
500 1.1 yamt scan->u.bmu_avail -= count;
501 1.1 yamt if (scan->bm_bighint > scan->u.bmu_avail)
502 1.1 yamt scan->bm_bighint = scan->u.bmu_avail;
503 1.1 yamt return(r);
504 1.1 yamt }
505 1.1 yamt } else if (count > radix) {
506 1.1 yamt /*
507 1.1 yamt * count does not fit in object even if it were
508 1.1 yamt * complete free.
509 1.1 yamt */
510 1.1 yamt panic("blist_meta_alloc: allocation too large");
511 1.1 yamt }
512 1.1 yamt blk += radix;
513 1.1 yamt }
514 1.1 yamt
515 1.1 yamt /*
516 1.1 yamt * We couldn't allocate count in this subtree, update bighint.
517 1.1 yamt */
518 1.1 yamt if (scan->bm_bighint >= count)
519 1.1 yamt scan->bm_bighint = count - 1;
520 1.3 yamt return(BLIST_NONE);
521 1.1 yamt }
522 1.1 yamt
523 1.1 yamt /*
524 1.1 yamt * BLST_LEAF_FREE() - free allocated block from leaf bitmap
525 1.1 yamt *
526 1.1 yamt */
527 1.1 yamt
528 1.1 yamt static void
529 1.1 yamt blst_leaf_free(
530 1.1 yamt blmeta_t *scan,
531 1.5 yamt blist_blkno_t blk,
532 1.1 yamt int count
533 1.1 yamt ) {
534 1.1 yamt /*
535 1.1 yamt * free some data in this bitmap
536 1.1 yamt *
537 1.1 yamt * e.g.
538 1.1 yamt * 0000111111111110000
539 1.1 yamt * \_________/\__/
540 1.1 yamt * v n
541 1.1 yamt */
542 1.1 yamt int n = blk & (BLIST_BMAP_RADIX - 1);
543 1.5 yamt blist_bitmap_t mask;
544 1.1 yamt
545 1.5 yamt mask = ((blist_bitmap_t)-1 << n) &
546 1.5 yamt ((blist_bitmap_t)-1 >> (BLIST_BMAP_RADIX - count - n));
547 1.1 yamt
548 1.1 yamt if (scan->u.bmu_bitmap & mask)
549 1.1 yamt panic("blst_radix_free: freeing free block");
550 1.1 yamt scan->u.bmu_bitmap |= mask;
551 1.1 yamt
552 1.1 yamt /*
553 1.1 yamt * We could probably do a better job here. We are required to make
554 1.1 yamt * bighint at least as large as the biggest contiguous block of
555 1.1 yamt * data. If we just shoehorn it, a little extra overhead will
556 1.1 yamt * be incured on the next allocation (but only that one typically).
557 1.1 yamt */
558 1.1 yamt scan->bm_bighint = BLIST_BMAP_RADIX;
559 1.1 yamt }
560 1.1 yamt
561 1.1 yamt /*
562 1.1 yamt * BLST_META_FREE() - free allocated blocks from radix tree meta info
563 1.1 yamt *
564 1.1 yamt * This support routine frees a range of blocks from the bitmap.
565 1.1 yamt * The range must be entirely enclosed by this radix node. If a
566 1.1 yamt * meta node, we break the range down recursively to free blocks
567 1.1 yamt * in subnodes (which means that this code can free an arbitrary
568 1.1 yamt * range whereas the allocation code cannot allocate an arbitrary
569 1.1 yamt * range).
570 1.1 yamt */
571 1.1 yamt
572 1.1 yamt static void
573 1.1 yamt blst_meta_free(
574 1.1 yamt blmeta_t *scan,
575 1.5 yamt blist_blkno_t freeBlk,
576 1.5 yamt blist_blkno_t count,
577 1.5 yamt blist_blkno_t radix,
578 1.5 yamt blist_blkno_t skip,
579 1.5 yamt blist_blkno_t blk
580 1.1 yamt ) {
581 1.5 yamt blist_blkno_t i;
582 1.5 yamt blist_blkno_t next_skip = (skip / BLIST_META_RADIX);
583 1.1 yamt
584 1.1 yamt #if 0
585 1.3 yamt printf("FREE (%" PRIx64 ",%" PRIu64
586 1.3 yamt ") FROM (%" PRIx64 ",%" PRIu64 ")\n",
587 1.5 yamt (uint64_t)freeBlk, (uint64_t)count,
588 1.5 yamt (uint64_t)blk, (uint64_t)radix
589 1.1 yamt );
590 1.1 yamt #endif
591 1.1 yamt
592 1.1 yamt if (scan->u.bmu_avail == 0) {
593 1.1 yamt /*
594 1.1 yamt * ALL-ALLOCATED special case, with possible
595 1.1 yamt * shortcut to ALL-FREE special case.
596 1.1 yamt */
597 1.1 yamt scan->u.bmu_avail = count;
598 1.1 yamt scan->bm_bighint = count;
599 1.1 yamt
600 1.1 yamt if (count != radix) {
601 1.1 yamt for (i = 1; i <= skip; i += next_skip) {
602 1.5 yamt if (scan[i].bm_bighint == (blist_blkno_t)-1)
603 1.1 yamt break;
604 1.1 yamt scan[i].bm_bighint = 0;
605 1.1 yamt if (next_skip == 1) {
606 1.1 yamt scan[i].u.bmu_bitmap = 0;
607 1.1 yamt } else {
608 1.1 yamt scan[i].u.bmu_avail = 0;
609 1.1 yamt }
610 1.1 yamt }
611 1.1 yamt /* fall through */
612 1.1 yamt }
613 1.1 yamt } else {
614 1.1 yamt scan->u.bmu_avail += count;
615 1.1 yamt /* scan->bm_bighint = radix; */
616 1.1 yamt }
617 1.1 yamt
618 1.1 yamt /*
619 1.1 yamt * ALL-FREE special case.
620 1.1 yamt */
621 1.1 yamt
622 1.1 yamt if (scan->u.bmu_avail == radix)
623 1.1 yamt return;
624 1.1 yamt if (scan->u.bmu_avail > radix)
625 1.3 yamt panic("blst_meta_free: freeing already free blocks (%"
626 1.3 yamt PRIu64 ") %" PRIu64 "/%" PRIu64,
627 1.5 yamt (uint64_t)count,
628 1.5 yamt (uint64_t)scan->u.bmu_avail,
629 1.5 yamt (uint64_t)radix);
630 1.1 yamt
631 1.1 yamt /*
632 1.1 yamt * Break the free down into its components
633 1.1 yamt */
634 1.1 yamt
635 1.1 yamt radix /= BLIST_META_RADIX;
636 1.1 yamt
637 1.1 yamt i = (freeBlk - blk) / radix;
638 1.1 yamt blk += i * radix;
639 1.1 yamt i = i * next_skip + 1;
640 1.1 yamt
641 1.1 yamt while (i <= skip && blk < freeBlk + count) {
642 1.5 yamt blist_blkno_t v;
643 1.1 yamt
644 1.1 yamt v = blk + radix - freeBlk;
645 1.1 yamt if (v > count)
646 1.1 yamt v = count;
647 1.1 yamt
648 1.5 yamt if (scan->bm_bighint == (blist_blkno_t)-1)
649 1.1 yamt panic("blst_meta_free: freeing unexpected range");
650 1.1 yamt
651 1.1 yamt if (next_skip == 1) {
652 1.1 yamt blst_leaf_free(&scan[i], freeBlk, v);
653 1.1 yamt } else {
654 1.1 yamt blst_meta_free(&scan[i], freeBlk, v, radix, next_skip - 1, blk);
655 1.1 yamt }
656 1.1 yamt if (scan->bm_bighint < scan[i].bm_bighint)
657 1.1 yamt scan->bm_bighint = scan[i].bm_bighint;
658 1.1 yamt count -= v;
659 1.1 yamt freeBlk += v;
660 1.1 yamt blk += radix;
661 1.1 yamt i += next_skip;
662 1.1 yamt }
663 1.1 yamt }
664 1.1 yamt
665 1.1 yamt /*
666 1.1 yamt * BLIST_RADIX_COPY() - copy one radix tree to another
667 1.1 yamt *
668 1.1 yamt * Locates free space in the source tree and frees it in the destination
669 1.1 yamt * tree. The space may not already be free in the destination.
670 1.1 yamt */
671 1.1 yamt
672 1.1 yamt static void blst_copy(
673 1.1 yamt blmeta_t *scan,
674 1.5 yamt blist_blkno_t blk,
675 1.5 yamt blist_blkno_t radix,
676 1.5 yamt blist_blkno_t skip,
677 1.1 yamt blist_t dest,
678 1.5 yamt blist_blkno_t count
679 1.1 yamt ) {
680 1.5 yamt blist_blkno_t next_skip;
681 1.5 yamt blist_blkno_t i;
682 1.1 yamt
683 1.1 yamt /*
684 1.1 yamt * Leaf node
685 1.1 yamt */
686 1.1 yamt
687 1.1 yamt if (radix == BLIST_BMAP_RADIX) {
688 1.5 yamt blist_bitmap_t v = scan->u.bmu_bitmap;
689 1.1 yamt
690 1.5 yamt if (v == (blist_bitmap_t)-1) {
691 1.1 yamt blist_free(dest, blk, count);
692 1.1 yamt } else if (v != 0) {
693 1.1 yamt int i;
694 1.1 yamt
695 1.1 yamt for (i = 0; i < BLIST_BMAP_RADIX && i < count; ++i) {
696 1.1 yamt if (v & (1 << i))
697 1.1 yamt blist_free(dest, blk + i, 1);
698 1.1 yamt }
699 1.1 yamt }
700 1.1 yamt return;
701 1.1 yamt }
702 1.1 yamt
703 1.1 yamt /*
704 1.1 yamt * Meta node
705 1.1 yamt */
706 1.1 yamt
707 1.1 yamt if (scan->u.bmu_avail == 0) {
708 1.1 yamt /*
709 1.1 yamt * Source all allocated, leave dest allocated
710 1.1 yamt */
711 1.1 yamt return;
712 1.1 yamt }
713 1.1 yamt if (scan->u.bmu_avail == radix) {
714 1.1 yamt /*
715 1.1 yamt * Source all free, free entire dest
716 1.1 yamt */
717 1.1 yamt if (count < radix)
718 1.1 yamt blist_free(dest, blk, count);
719 1.1 yamt else
720 1.1 yamt blist_free(dest, blk, radix);
721 1.1 yamt return;
722 1.1 yamt }
723 1.1 yamt
724 1.1 yamt
725 1.1 yamt radix /= BLIST_META_RADIX;
726 1.5 yamt next_skip = (skip / BLIST_META_RADIX);
727 1.1 yamt
728 1.1 yamt for (i = 1; count && i <= skip; i += next_skip) {
729 1.5 yamt if (scan[i].bm_bighint == (blist_blkno_t)-1)
730 1.1 yamt break;
731 1.1 yamt
732 1.1 yamt if (count >= radix) {
733 1.1 yamt blst_copy(
734 1.1 yamt &scan[i],
735 1.1 yamt blk,
736 1.1 yamt radix,
737 1.1 yamt next_skip - 1,
738 1.1 yamt dest,
739 1.1 yamt radix
740 1.1 yamt );
741 1.1 yamt count -= radix;
742 1.1 yamt } else {
743 1.1 yamt if (count) {
744 1.1 yamt blst_copy(
745 1.1 yamt &scan[i],
746 1.1 yamt blk,
747 1.1 yamt radix,
748 1.1 yamt next_skip - 1,
749 1.1 yamt dest,
750 1.1 yamt count
751 1.1 yamt );
752 1.1 yamt }
753 1.1 yamt count = 0;
754 1.1 yamt }
755 1.1 yamt blk += radix;
756 1.1 yamt }
757 1.1 yamt }
758 1.1 yamt
759 1.1 yamt /*
760 1.1 yamt * BLST_LEAF_FILL() - allocate specific blocks in leaf bitmap
761 1.1 yamt *
762 1.1 yamt * This routine allocates all blocks in the specified range
763 1.1 yamt * regardless of any existing allocations in that range. Returns
764 1.1 yamt * the number of blocks allocated by the call.
765 1.1 yamt */
766 1.1 yamt
767 1.1 yamt static int
768 1.5 yamt blst_leaf_fill(blmeta_t *scan, blist_blkno_t blk, int count)
769 1.1 yamt {
770 1.1 yamt int n = blk & (BLIST_BMAP_RADIX - 1);
771 1.1 yamt int nblks;
772 1.5 yamt blist_bitmap_t mask, bitmap;
773 1.1 yamt
774 1.5 yamt mask = ((blist_bitmap_t)-1 << n) &
775 1.5 yamt ((blist_bitmap_t)-1 >> (BLIST_BMAP_RADIX - count - n));
776 1.1 yamt
777 1.1 yamt /* Count the number of blocks we're about to allocate */
778 1.1 yamt bitmap = scan->u.bmu_bitmap & mask;
779 1.1 yamt for (nblks = 0; bitmap != 0; nblks++)
780 1.1 yamt bitmap &= bitmap - 1;
781 1.1 yamt
782 1.1 yamt scan->u.bmu_bitmap &= ~mask;
783 1.1 yamt return nblks;
784 1.1 yamt }
785 1.1 yamt
786 1.1 yamt /*
787 1.1 yamt * BLIST_META_FILL() - allocate specific blocks at a meta node
788 1.1 yamt *
789 1.1 yamt * This routine allocates the specified range of blocks,
790 1.1 yamt * regardless of any existing allocations in the range. The
791 1.1 yamt * range must be within the extent of this node. Returns the
792 1.1 yamt * number of blocks allocated by the call.
793 1.1 yamt */
794 1.5 yamt static blist_blkno_t
795 1.1 yamt blst_meta_fill(
796 1.1 yamt blmeta_t *scan,
797 1.5 yamt blist_blkno_t allocBlk,
798 1.5 yamt blist_blkno_t count,
799 1.5 yamt blist_blkno_t radix,
800 1.5 yamt blist_blkno_t skip,
801 1.5 yamt blist_blkno_t blk
802 1.1 yamt ) {
803 1.5 yamt blist_blkno_t i;
804 1.5 yamt blist_blkno_t next_skip = (skip / BLIST_META_RADIX);
805 1.5 yamt blist_blkno_t nblks = 0;
806 1.1 yamt
807 1.1 yamt if (count == radix || scan->u.bmu_avail == 0) {
808 1.1 yamt /*
809 1.1 yamt * ALL-ALLOCATED special case
810 1.1 yamt */
811 1.1 yamt nblks = scan->u.bmu_avail;
812 1.1 yamt scan->u.bmu_avail = 0;
813 1.1 yamt scan->bm_bighint = count;
814 1.1 yamt return nblks;
815 1.1 yamt }
816 1.1 yamt
817 1.1 yamt if (scan->u.bmu_avail == radix) {
818 1.1 yamt radix /= BLIST_META_RADIX;
819 1.1 yamt
820 1.1 yamt /*
821 1.1 yamt * ALL-FREE special case, initialize sublevel
822 1.1 yamt */
823 1.1 yamt for (i = 1; i <= skip; i += next_skip) {
824 1.5 yamt if (scan[i].bm_bighint == (blist_blkno_t)-1)
825 1.1 yamt break;
826 1.1 yamt if (next_skip == 1) {
827 1.5 yamt scan[i].u.bmu_bitmap = (blist_bitmap_t)-1;
828 1.1 yamt scan[i].bm_bighint = BLIST_BMAP_RADIX;
829 1.1 yamt } else {
830 1.1 yamt scan[i].bm_bighint = radix;
831 1.1 yamt scan[i].u.bmu_avail = radix;
832 1.1 yamt }
833 1.1 yamt }
834 1.1 yamt } else {
835 1.1 yamt radix /= BLIST_META_RADIX;
836 1.1 yamt }
837 1.1 yamt
838 1.1 yamt if (count > radix)
839 1.1 yamt panic("blist_meta_fill: allocation too large");
840 1.1 yamt
841 1.1 yamt i = (allocBlk - blk) / radix;
842 1.1 yamt blk += i * radix;
843 1.1 yamt i = i * next_skip + 1;
844 1.1 yamt
845 1.1 yamt while (i <= skip && blk < allocBlk + count) {
846 1.5 yamt blist_blkno_t v;
847 1.1 yamt
848 1.1 yamt v = blk + radix - allocBlk;
849 1.1 yamt if (v > count)
850 1.1 yamt v = count;
851 1.1 yamt
852 1.5 yamt if (scan->bm_bighint == (blist_blkno_t)-1)
853 1.1 yamt panic("blst_meta_fill: filling unexpected range");
854 1.1 yamt
855 1.1 yamt if (next_skip == 1) {
856 1.1 yamt nblks += blst_leaf_fill(&scan[i], allocBlk, v);
857 1.1 yamt } else {
858 1.1 yamt nblks += blst_meta_fill(&scan[i], allocBlk, v,
859 1.1 yamt radix, next_skip - 1, blk);
860 1.1 yamt }
861 1.1 yamt count -= v;
862 1.1 yamt allocBlk += v;
863 1.1 yamt blk += radix;
864 1.1 yamt i += next_skip;
865 1.1 yamt }
866 1.1 yamt scan->u.bmu_avail -= nblks;
867 1.1 yamt return nblks;
868 1.1 yamt }
869 1.1 yamt
870 1.1 yamt /*
871 1.1 yamt * BLST_RADIX_INIT() - initialize radix tree
872 1.1 yamt *
873 1.1 yamt * Initialize our meta structures and bitmaps and calculate the exact
874 1.1 yamt * amount of space required to manage 'count' blocks - this space may
875 1.1 yamt * be considerably less then the calculated radix due to the large
876 1.1 yamt * RADIX values we use.
877 1.1 yamt */
878 1.1 yamt
879 1.5 yamt static blist_blkno_t
880 1.5 yamt blst_radix_init(blmeta_t *scan, blist_blkno_t radix, blist_blkno_t skip,
881 1.5 yamt blist_blkno_t count)
882 1.1 yamt {
883 1.5 yamt blist_blkno_t i;
884 1.5 yamt blist_blkno_t next_skip;
885 1.5 yamt blist_blkno_t memindex = 0;
886 1.1 yamt
887 1.1 yamt /*
888 1.1 yamt * Leaf node
889 1.1 yamt */
890 1.1 yamt
891 1.1 yamt if (radix == BLIST_BMAP_RADIX) {
892 1.1 yamt if (scan) {
893 1.1 yamt scan->bm_bighint = 0;
894 1.1 yamt scan->u.bmu_bitmap = 0;
895 1.1 yamt }
896 1.1 yamt return(memindex);
897 1.1 yamt }
898 1.1 yamt
899 1.1 yamt /*
900 1.1 yamt * Meta node. If allocating the entire object we can special
901 1.1 yamt * case it. However, we need to figure out how much memory
902 1.1 yamt * is required to manage 'count' blocks, so we continue on anyway.
903 1.1 yamt */
904 1.1 yamt
905 1.1 yamt if (scan) {
906 1.1 yamt scan->bm_bighint = 0;
907 1.1 yamt scan->u.bmu_avail = 0;
908 1.1 yamt }
909 1.1 yamt
910 1.1 yamt radix /= BLIST_META_RADIX;
911 1.5 yamt next_skip = (skip / BLIST_META_RADIX);
912 1.1 yamt
913 1.1 yamt for (i = 1; i <= skip; i += next_skip) {
914 1.1 yamt if (count >= radix) {
915 1.1 yamt /*
916 1.1 yamt * Allocate the entire object
917 1.1 yamt */
918 1.1 yamt memindex = i + blst_radix_init(
919 1.1 yamt ((scan) ? &scan[i] : NULL),
920 1.1 yamt radix,
921 1.1 yamt next_skip - 1,
922 1.1 yamt radix
923 1.1 yamt );
924 1.1 yamt count -= radix;
925 1.1 yamt } else if (count > 0) {
926 1.1 yamt /*
927 1.1 yamt * Allocate a partial object
928 1.1 yamt */
929 1.1 yamt memindex = i + blst_radix_init(
930 1.1 yamt ((scan) ? &scan[i] : NULL),
931 1.1 yamt radix,
932 1.1 yamt next_skip - 1,
933 1.1 yamt count
934 1.1 yamt );
935 1.1 yamt count = 0;
936 1.1 yamt } else {
937 1.1 yamt /*
938 1.1 yamt * Add terminator and break out
939 1.1 yamt */
940 1.1 yamt if (scan)
941 1.5 yamt scan[i].bm_bighint = (blist_blkno_t)-1;
942 1.1 yamt break;
943 1.1 yamt }
944 1.1 yamt }
945 1.1 yamt if (memindex < i)
946 1.1 yamt memindex = i;
947 1.1 yamt return(memindex);
948 1.1 yamt }
949 1.1 yamt
950 1.1 yamt #ifdef BLIST_DEBUG
951 1.1 yamt
952 1.1 yamt static void
953 1.5 yamt blst_radix_print(blmeta_t *scan, blist_blkno_t blk, blist_blkno_t radix,
954 1.5 yamt blist_blkno_t skip, int tab)
955 1.1 yamt {
956 1.5 yamt blist_blkno_t i;
957 1.5 yamt blist_blkno_t next_skip;
958 1.1 yamt int lastState = 0;
959 1.1 yamt
960 1.1 yamt if (radix == BLIST_BMAP_RADIX) {
961 1.1 yamt printf(
962 1.5 yamt "%*.*s(%0*" PRIx64 ",%" PRIu64
963 1.5 yamt "): bitmap %0*" PRIx64 " big=%" PRIu64 "\n",
964 1.1 yamt tab, tab, "",
965 1.5 yamt sizeof(blk) * 2,
966 1.5 yamt (uint64_t)blk,
967 1.5 yamt (uint64_t)radix,
968 1.5 yamt sizeof(scan->u.bmu_bitmap) * 2,
969 1.5 yamt (uint64_t)scan->u.bmu_bitmap,
970 1.5 yamt (uint64_t)scan->bm_bighint
971 1.1 yamt );
972 1.1 yamt return;
973 1.1 yamt }
974 1.1 yamt
975 1.1 yamt if (scan->u.bmu_avail == 0) {
976 1.1 yamt printf(
977 1.5 yamt "%*.*s(%0*" PRIx64 ",%" PRIu64") ALL ALLOCATED\n",
978 1.5 yamt tab, tab, "",
979 1.5 yamt sizeof(blk) * 2,
980 1.5 yamt (uint64_t)blk,
981 1.5 yamt (uint64_t)radix
982 1.1 yamt );
983 1.1 yamt return;
984 1.1 yamt }
985 1.1 yamt if (scan->u.bmu_avail == radix) {
986 1.1 yamt printf(
987 1.5 yamt "%*.*s(%0*" PRIx64 ",%" PRIu64 ") ALL FREE\n",
988 1.5 yamt tab, tab, "",
989 1.5 yamt sizeof(blk) * 2,
990 1.5 yamt (uint64_t)blk,
991 1.5 yamt (uint64_t)radix
992 1.1 yamt );
993 1.1 yamt return;
994 1.1 yamt }
995 1.1 yamt
996 1.1 yamt printf(
997 1.5 yamt "%*.*s(%0*" PRIx64 ",%" PRIu64 "): subtree (%" PRIu64 "/%"
998 1.3 yamt PRIu64 ") big=%" PRIu64 " {\n",
999 1.1 yamt tab, tab, "",
1000 1.5 yamt sizeof(blk) * 2,
1001 1.5 yamt (uint64_t)blk,
1002 1.5 yamt (uint64_t)radix,
1003 1.5 yamt (uint64_t)scan->u.bmu_avail,
1004 1.5 yamt (uint64_t)radix,
1005 1.5 yamt (uint64_t)scan->bm_bighint
1006 1.1 yamt );
1007 1.1 yamt
1008 1.1 yamt radix /= BLIST_META_RADIX;
1009 1.5 yamt next_skip = (skip / BLIST_META_RADIX);
1010 1.1 yamt tab += 4;
1011 1.1 yamt
1012 1.1 yamt for (i = 1; i <= skip; i += next_skip) {
1013 1.5 yamt if (scan[i].bm_bighint == (blist_blkno_t)-1) {
1014 1.1 yamt printf(
1015 1.5 yamt "%*.*s(%0*" PRIx64 ",%" PRIu64 "): Terminator\n",
1016 1.1 yamt tab, tab, "",
1017 1.5 yamt sizeof(blk) * 2,
1018 1.5 yamt (uint64_t)blk,
1019 1.5 yamt (uint64_t)radix
1020 1.1 yamt );
1021 1.1 yamt lastState = 0;
1022 1.1 yamt break;
1023 1.1 yamt }
1024 1.1 yamt blst_radix_print(
1025 1.1 yamt &scan[i],
1026 1.1 yamt blk,
1027 1.1 yamt radix,
1028 1.1 yamt next_skip - 1,
1029 1.1 yamt tab
1030 1.1 yamt );
1031 1.1 yamt blk += radix;
1032 1.1 yamt }
1033 1.1 yamt tab -= 4;
1034 1.1 yamt
1035 1.1 yamt printf(
1036 1.1 yamt "%*.*s}\n",
1037 1.1 yamt tab, tab, ""
1038 1.1 yamt );
1039 1.1 yamt }
1040 1.1 yamt
1041 1.1 yamt #endif
1042 1.1 yamt
1043 1.1 yamt #ifdef BLIST_DEBUG
1044 1.1 yamt
1045 1.1 yamt int
1046 1.1 yamt main(int ac, char **av)
1047 1.1 yamt {
1048 1.5 yamt blist_blkno_t size = 1024;
1049 1.1 yamt int i;
1050 1.1 yamt blist_t bl;
1051 1.1 yamt
1052 1.1 yamt for (i = 1; i < ac; ++i) {
1053 1.1 yamt const char *ptr = av[i];
1054 1.1 yamt if (*ptr != '-') {
1055 1.1 yamt size = strtol(ptr, NULL, 0);
1056 1.1 yamt continue;
1057 1.1 yamt }
1058 1.1 yamt ptr += 2;
1059 1.1 yamt fprintf(stderr, "Bad option: %s\n", ptr - 2);
1060 1.1 yamt exit(1);
1061 1.1 yamt }
1062 1.1 yamt bl = blist_create(size);
1063 1.1 yamt blist_free(bl, 0, size);
1064 1.1 yamt
1065 1.1 yamt for (;;) {
1066 1.1 yamt char buf[1024];
1067 1.3 yamt uint64_t da = 0;
1068 1.3 yamt uint64_t count = 0;
1069 1.1 yamt
1070 1.3 yamt printf("%" PRIu64 "/%" PRIu64 "/%" PRIu64 "> ",
1071 1.5 yamt (uint64_t)bl->bl_free,
1072 1.5 yamt (uint64_t)size,
1073 1.5 yamt (uint64_t)bl->bl_radix);
1074 1.1 yamt fflush(stdout);
1075 1.1 yamt if (fgets(buf, sizeof(buf), stdin) == NULL)
1076 1.1 yamt break;
1077 1.1 yamt switch(buf[0]) {
1078 1.1 yamt case 'r':
1079 1.3 yamt if (sscanf(buf + 1, "%" SCNu64, &count) == 1) {
1080 1.1 yamt blist_resize(&bl, count, 1);
1081 1.1 yamt } else {
1082 1.1 yamt printf("?\n");
1083 1.1 yamt }
1084 1.1 yamt case 'p':
1085 1.1 yamt blist_print(bl);
1086 1.1 yamt break;
1087 1.1 yamt case 'a':
1088 1.3 yamt if (sscanf(buf + 1, "%" SCNu64, &count) == 1) {
1089 1.5 yamt blist_blkno_t blk = blist_alloc(bl, count);
1090 1.5 yamt printf(" R=%0*" PRIx64 "\n",
1091 1.5 yamt sizeof(blk) * 2,
1092 1.5 yamt (uint64_t)blk);
1093 1.1 yamt } else {
1094 1.1 yamt printf("?\n");
1095 1.1 yamt }
1096 1.1 yamt break;
1097 1.1 yamt case 'f':
1098 1.3 yamt if (sscanf(buf + 1, "%" SCNx64 " %" SCNu64,
1099 1.3 yamt &da, &count) == 2) {
1100 1.1 yamt blist_free(bl, da, count);
1101 1.1 yamt } else {
1102 1.1 yamt printf("?\n");
1103 1.1 yamt }
1104 1.1 yamt break;
1105 1.1 yamt case 'l':
1106 1.3 yamt if (sscanf(buf + 1, "%" SCNx64 " %" SCNu64,
1107 1.3 yamt &da, &count) == 2) {
1108 1.5 yamt printf(" n=%" PRIu64 "\n",
1109 1.5 yamt (uint64_t)blist_fill(bl, da, count));
1110 1.1 yamt } else {
1111 1.1 yamt printf("?\n");
1112 1.1 yamt }
1113 1.1 yamt break;
1114 1.1 yamt case '?':
1115 1.1 yamt case 'h':
1116 1.1 yamt puts(
1117 1.1 yamt "p -print\n"
1118 1.1 yamt "a %d -allocate\n"
1119 1.1 yamt "f %x %d -free\n"
1120 1.1 yamt "l %x %d -fill\n"
1121 1.1 yamt "r %d -resize\n"
1122 1.1 yamt "h/? -help"
1123 1.1 yamt );
1124 1.1 yamt break;
1125 1.1 yamt default:
1126 1.1 yamt printf("?\n");
1127 1.1 yamt break;
1128 1.1 yamt }
1129 1.1 yamt }
1130 1.1 yamt return(0);
1131 1.1 yamt }
1132 1.1 yamt
1133 1.1 yamt void
1134 1.1 yamt panic(const char *ctl, ...)
1135 1.1 yamt {
1136 1.1 yamt va_list va;
1137 1.1 yamt
1138 1.1 yamt va_start(va, ctl);
1139 1.1 yamt vfprintf(stderr, ctl, va);
1140 1.1 yamt fprintf(stderr, "\n");
1141 1.1 yamt va_end(va);
1142 1.1 yamt exit(1);
1143 1.1 yamt }
1144 1.1 yamt
1145 1.1 yamt #endif
1146 1.1 yamt
1147