Home | History | Annotate | Line # | Download | only in kern
subr_blist.c revision 1.8.2.1
      1  1.8.2.1      yamt /*	$NetBSD: subr_blist.c,v 1.8.2.1 2006/02/01 14:52:20 yamt Exp $	*/
      2      1.2      yamt 
      3      1.1      yamt /*-
      4      1.1      yamt  * Copyright (c) 1998 Matthew Dillon.  All Rights Reserved.
      5      1.1      yamt  * Redistribution and use in source and binary forms, with or without
      6      1.1      yamt  * modification, are permitted provided that the following conditions
      7      1.1      yamt  * are met:
      8      1.1      yamt  * 1. Redistributions of source code must retain the above copyright
      9      1.1      yamt  *    notice, this list of conditions and the following disclaimer.
     10      1.1      yamt  * 2. Redistributions in binary form must reproduce the above copyright
     11      1.1      yamt  *    notice, this list of conditions and the following disclaimer in the
     12      1.1      yamt  *    documentation and/or other materials provided with the distribution.
     13      1.1      yamt  * 4. Neither the name of the University nor the names of its contributors
     14      1.1      yamt  *    may be used to endorse or promote products derived from this software
     15      1.1      yamt  *    without specific prior written permission.
     16      1.1      yamt  *
     17      1.1      yamt  * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS
     18      1.1      yamt  * OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
     19      1.1      yamt  * WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
     20      1.1      yamt  * ARE DISCLAIMED.  IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY
     21      1.1      yamt  * DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
     22      1.1      yamt  * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE
     23      1.1      yamt  * GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
     24      1.1      yamt  * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY,
     25      1.1      yamt  * WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING
     26      1.1      yamt  * NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
     27      1.1      yamt  * SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
     28      1.1      yamt  */
     29      1.1      yamt /*
     30      1.1      yamt  * BLIST.C -	Bitmap allocator/deallocator, using a radix tree with hinting
     31      1.1      yamt  *
     32      1.1      yamt  *	This module implements a general bitmap allocator/deallocator.  The
     33      1.1      yamt  *	allocator eats around 2 bits per 'block'.  The module does not
     34      1.1      yamt  *	try to interpret the meaning of a 'block' other then to return
     35      1.3      yamt  *	BLIST_NONE on an allocation failure.
     36      1.1      yamt  *
     37      1.1      yamt  *	A radix tree is used to maintain the bitmap.  Two radix constants are
     38      1.1      yamt  *	involved:  One for the bitmaps contained in the leaf nodes (typically
     39      1.1      yamt  *	32), and one for the meta nodes (typically 16).  Both meta and leaf
     40      1.1      yamt  *	nodes have a hint field.  This field gives us a hint as to the largest
     41      1.1      yamt  *	free contiguous range of blocks under the node.  It may contain a
     42      1.1      yamt  *	value that is too high, but will never contain a value that is too
     43      1.1      yamt  *	low.  When the radix tree is searched, allocation failures in subtrees
     44      1.1      yamt  *	update the hint.
     45      1.1      yamt  *
     46      1.1      yamt  *	The radix tree also implements two collapsed states for meta nodes:
     47      1.1      yamt  *	the ALL-ALLOCATED state and the ALL-FREE state.  If a meta node is
     48      1.1      yamt  *	in either of these two states, all information contained underneath
     49      1.1      yamt  *	the node is considered stale.  These states are used to optimize
     50      1.1      yamt  *	allocation and freeing operations.
     51      1.1      yamt  *
     52      1.1      yamt  * 	The hinting greatly increases code efficiency for allocations while
     53      1.1      yamt  *	the general radix structure optimizes both allocations and frees.  The
     54      1.1      yamt  *	radix tree should be able to operate well no matter how much
     55      1.1      yamt  *	fragmentation there is and no matter how large a bitmap is used.
     56      1.1      yamt  *
     57      1.1      yamt  *	Unlike the rlist code, the blist code wires all necessary memory at
     58      1.1      yamt  *	creation time.  Neither allocations nor frees require interaction with
     59      1.1      yamt  *	the memory subsystem.  In contrast, the rlist code may allocate memory
     60      1.1      yamt  *	on an rlist_free() call.  The non-blocking features of the blist code
     61      1.1      yamt  *	are used to great advantage in the swap code (vm/nswap_pager.c).  The
     62      1.1      yamt  *	rlist code uses a little less overall memory then the blist code (but
     63      1.1      yamt  *	due to swap interleaving not all that much less), but the blist code
     64      1.1      yamt  *	scales much, much better.
     65      1.1      yamt  *
     66      1.1      yamt  *	LAYOUT: The radix tree is layed out recursively using a
     67      1.1      yamt  *	linear array.  Each meta node is immediately followed (layed out
     68      1.1      yamt  *	sequentially in memory) by BLIST_META_RADIX lower level nodes.  This
     69      1.1      yamt  *	is a recursive structure but one that can be easily scanned through
     70      1.1      yamt  *	a very simple 'skip' calculation.  In order to support large radixes,
     71      1.1      yamt  *	portions of the tree may reside outside our memory allocation.  We
     72      1.1      yamt  *	handle this with an early-termination optimization (when bighint is
     73      1.1      yamt  *	set to -1) on the scan.  The memory allocation is only large enough
     74      1.1      yamt  *	to cover the number of blocks requested at creation time even if it
     75      1.1      yamt  *	must be encompassed in larger root-node radix.
     76      1.1      yamt  *
     77      1.1      yamt  *	NOTE: the allocator cannot currently allocate more then
     78      1.1      yamt  *	BLIST_BMAP_RADIX blocks per call.  It will panic with 'allocation too
     79      1.1      yamt  *	large' if you try.  This is an area that could use improvement.  The
     80      1.1      yamt  *	radix is large enough that this restriction does not effect the swap
     81      1.1      yamt  *	system, though.  Currently only the allocation code is effected by
     82      1.1      yamt  *	this algorithmic unfeature.  The freeing code can handle arbitrary
     83      1.1      yamt  *	ranges.
     84      1.1      yamt  *
     85      1.1      yamt  *	This code can be compiled stand-alone for debugging.
     86      1.1      yamt  */
     87      1.1      yamt 
     88      1.1      yamt #include <sys/cdefs.h>
     89  1.8.2.1      yamt __KERNEL_RCSID(0, "$NetBSD: subr_blist.c,v 1.8.2.1 2006/02/01 14:52:20 yamt Exp $");
     90      1.2      yamt #if 0
     91      1.1      yamt __FBSDID("$FreeBSD: src/sys/kern/subr_blist.c,v 1.17 2004/06/04 04:03:25 alc Exp $");
     92      1.2      yamt #endif
     93      1.1      yamt 
     94      1.1      yamt #ifdef _KERNEL
     95      1.1      yamt 
     96      1.1      yamt #include <sys/param.h>
     97      1.1      yamt #include <sys/systm.h>
     98      1.1      yamt #include <sys/blist.h>
     99      1.1      yamt #include <sys/malloc.h>
    100      1.1      yamt 
    101      1.1      yamt #else
    102      1.1      yamt 
    103      1.1      yamt #ifndef BLIST_NO_DEBUG
    104      1.1      yamt #define BLIST_DEBUG
    105      1.1      yamt #endif
    106      1.1      yamt 
    107      1.1      yamt #include <sys/types.h>
    108      1.1      yamt #include <stdio.h>
    109      1.1      yamt #include <string.h>
    110      1.1      yamt #include <stdlib.h>
    111      1.1      yamt #include <stdarg.h>
    112      1.3      yamt #include <inttypes.h>
    113      1.1      yamt 
    114      1.1      yamt #define malloc(a,b,c)	calloc(a, 1)
    115      1.1      yamt #define free(a,b)	free(a)
    116      1.1      yamt 
    117      1.3      yamt #include "../sys/blist.h"
    118      1.1      yamt 
    119      1.1      yamt void panic(const char *ctl, ...);
    120      1.1      yamt 
    121      1.1      yamt #endif
    122      1.1      yamt 
    123      1.1      yamt /*
    124      1.4      yamt  * blmeta and bl_bitmap_t MUST be a power of 2 in size.
    125      1.4      yamt  */
    126      1.4      yamt 
    127      1.4      yamt typedef struct blmeta {
    128      1.4      yamt 	union {
    129      1.5      yamt 		blist_blkno_t	bmu_avail; /* space available under us	*/
    130      1.5      yamt 		blist_bitmap_t	bmu_bitmap; /* bitmap if we are a leaf	*/
    131      1.4      yamt 	} u;
    132      1.5      yamt 	blist_blkno_t	bm_bighint;	/* biggest contiguous block hint*/
    133      1.4      yamt } blmeta_t;
    134      1.4      yamt 
    135      1.4      yamt struct blist {
    136      1.5      yamt 	blist_blkno_t		bl_blocks;	/* area of coverage		*/
    137      1.5      yamt 	blist_blkno_t		bl_radix;	/* coverage radix		*/
    138      1.5      yamt 	blist_blkno_t		bl_skip;	/* starting skip		*/
    139      1.5      yamt 	blist_blkno_t		bl_free;	/* number of free blocks	*/
    140      1.4      yamt 	blmeta_t	*bl_root;	/* root of radix tree		*/
    141      1.5      yamt 	blist_blkno_t		bl_rootblks;	/* blks allocated for tree */
    142      1.4      yamt };
    143      1.4      yamt 
    144      1.4      yamt #define BLIST_META_RADIX	16
    145      1.4      yamt 
    146      1.4      yamt /*
    147      1.1      yamt  * static support functions
    148      1.1      yamt  */
    149      1.1      yamt 
    150      1.5      yamt static blist_blkno_t blst_leaf_alloc(blmeta_t *scan, blist_blkno_t blk,
    151      1.5      yamt     int count);
    152      1.5      yamt static blist_blkno_t blst_meta_alloc(blmeta_t *scan, blist_blkno_t blk,
    153      1.5      yamt     blist_blkno_t count, blist_blkno_t radix, blist_blkno_t skip);
    154      1.5      yamt static void blst_leaf_free(blmeta_t *scan, blist_blkno_t relblk, int count);
    155      1.5      yamt static void blst_meta_free(blmeta_t *scan, blist_blkno_t freeBlk,
    156      1.5      yamt     blist_blkno_t count, blist_blkno_t radix, blist_blkno_t skip,
    157      1.5      yamt     blist_blkno_t blk);
    158      1.5      yamt static void blst_copy(blmeta_t *scan, blist_blkno_t blk, blist_blkno_t radix,
    159      1.5      yamt     blist_blkno_t skip, blist_t dest, blist_blkno_t count);
    160      1.5      yamt static int blst_leaf_fill(blmeta_t *scan, blist_blkno_t blk, int count);
    161      1.5      yamt static blist_blkno_t blst_meta_fill(blmeta_t *scan, blist_blkno_t allocBlk,
    162      1.5      yamt     blist_blkno_t count, blist_blkno_t radix, blist_blkno_t skip,
    163      1.5      yamt     blist_blkno_t blk);
    164      1.5      yamt static blist_blkno_t blst_radix_init(blmeta_t *scan, blist_blkno_t radix,
    165      1.5      yamt     blist_blkno_t skip, blist_blkno_t count);
    166      1.1      yamt #ifndef _KERNEL
    167      1.5      yamt static void blst_radix_print(blmeta_t *scan, blist_blkno_t blk,
    168      1.5      yamt     blist_blkno_t radix, blist_blkno_t skip, int tab);
    169      1.1      yamt #endif
    170      1.1      yamt 
    171      1.1      yamt #ifdef _KERNEL
    172      1.3      yamt static MALLOC_DEFINE(M_BLIST, "blist", "Bitmap allocator");
    173      1.1      yamt #endif
    174      1.1      yamt 
    175      1.1      yamt /*
    176      1.1      yamt  * blist_create() - create a blist capable of handling up to the specified
    177      1.1      yamt  *		    number of blocks
    178      1.1      yamt  *
    179      1.1      yamt  *	blocks must be greater then 0
    180      1.1      yamt  *
    181      1.1      yamt  *	The smallest blist consists of a single leaf node capable of
    182      1.1      yamt  *	managing BLIST_BMAP_RADIX blocks.
    183      1.1      yamt  */
    184      1.1      yamt 
    185      1.1      yamt blist_t
    186      1.5      yamt blist_create(blist_blkno_t blocks)
    187      1.1      yamt {
    188      1.1      yamt 	blist_t bl;
    189      1.5      yamt 	blist_blkno_t radix;
    190      1.5      yamt 	blist_blkno_t skip = 0;
    191      1.1      yamt 
    192      1.1      yamt 	/*
    193      1.1      yamt 	 * Calculate radix and skip field used for scanning.
    194      1.5      yamt 	 *
    195      1.5      yamt 	 * XXX check overflow
    196      1.1      yamt 	 */
    197      1.1      yamt 	radix = BLIST_BMAP_RADIX;
    198      1.1      yamt 
    199      1.1      yamt 	while (radix < blocks) {
    200      1.1      yamt 		radix *= BLIST_META_RADIX;
    201      1.1      yamt 		skip = (skip + 1) * BLIST_META_RADIX;
    202      1.1      yamt 	}
    203      1.1      yamt 
    204      1.3      yamt 	bl = malloc(sizeof(struct blist), M_BLIST, M_WAITOK | M_ZERO);
    205      1.1      yamt 
    206      1.1      yamt 	bl->bl_blocks = blocks;
    207      1.1      yamt 	bl->bl_radix = radix;
    208      1.1      yamt 	bl->bl_skip = skip;
    209      1.1      yamt 	bl->bl_rootblks = 1 +
    210      1.1      yamt 	    blst_radix_init(NULL, bl->bl_radix, bl->bl_skip, blocks);
    211      1.3      yamt 	bl->bl_root = malloc(sizeof(blmeta_t) * bl->bl_rootblks, M_BLIST, M_WAITOK);
    212      1.1      yamt 
    213      1.1      yamt #if defined(BLIST_DEBUG)
    214      1.1      yamt 	printf(
    215      1.3      yamt 		"BLIST representing %" PRIu64 " blocks (%" PRIu64 " MB of swap)"
    216      1.3      yamt 		", requiring %" PRIu64 "K of ram\n",
    217      1.5      yamt 		(uint64_t)bl->bl_blocks,
    218      1.5      yamt 		(uint64_t)bl->bl_blocks * 4 / 1024,
    219      1.5      yamt 		((uint64_t)bl->bl_rootblks * sizeof(blmeta_t) + 1023) / 1024
    220      1.1      yamt 	);
    221      1.3      yamt 	printf("BLIST raw radix tree contains %" PRIu64 " records\n",
    222      1.5      yamt 	    (uint64_t)bl->bl_rootblks);
    223      1.1      yamt #endif
    224      1.1      yamt 	blst_radix_init(bl->bl_root, bl->bl_radix, bl->bl_skip, blocks);
    225      1.1      yamt 
    226      1.1      yamt 	return(bl);
    227      1.1      yamt }
    228      1.1      yamt 
    229      1.1      yamt void
    230      1.1      yamt blist_destroy(blist_t bl)
    231      1.1      yamt {
    232      1.3      yamt 	free(bl->bl_root, M_BLIST);
    233      1.3      yamt 	free(bl, M_BLIST);
    234      1.1      yamt }
    235      1.1      yamt 
    236      1.1      yamt /*
    237      1.1      yamt  * blist_alloc() - reserve space in the block bitmap.  Return the base
    238      1.3      yamt  *		     of a contiguous region or BLIST_NONE if space could
    239      1.1      yamt  *		     not be allocated.
    240      1.1      yamt  */
    241      1.1      yamt 
    242      1.5      yamt blist_blkno_t
    243      1.5      yamt blist_alloc(blist_t bl, blist_blkno_t count)
    244      1.1      yamt {
    245      1.5      yamt 	blist_blkno_t blk = BLIST_NONE;
    246      1.1      yamt 
    247      1.1      yamt 	if (bl) {
    248      1.1      yamt 		if (bl->bl_radix == BLIST_BMAP_RADIX)
    249      1.1      yamt 			blk = blst_leaf_alloc(bl->bl_root, 0, count);
    250      1.1      yamt 		else
    251      1.1      yamt 			blk = blst_meta_alloc(bl->bl_root, 0, count, bl->bl_radix, bl->bl_skip);
    252      1.3      yamt 		if (blk != BLIST_NONE)
    253      1.1      yamt 			bl->bl_free -= count;
    254      1.1      yamt 	}
    255      1.1      yamt 	return(blk);
    256      1.1      yamt }
    257      1.1      yamt 
    258      1.1      yamt /*
    259      1.1      yamt  * blist_free() -	free up space in the block bitmap.  Return the base
    260      1.1      yamt  *		     	of a contiguous region.  Panic if an inconsistancy is
    261      1.1      yamt  *			found.
    262      1.1      yamt  */
    263      1.1      yamt 
    264      1.1      yamt void
    265      1.5      yamt blist_free(blist_t bl, blist_blkno_t blkno, blist_blkno_t count)
    266      1.1      yamt {
    267      1.1      yamt 	if (bl) {
    268      1.1      yamt 		if (bl->bl_radix == BLIST_BMAP_RADIX)
    269      1.1      yamt 			blst_leaf_free(bl->bl_root, blkno, count);
    270      1.1      yamt 		else
    271      1.1      yamt 			blst_meta_free(bl->bl_root, blkno, count, bl->bl_radix, bl->bl_skip, 0);
    272      1.1      yamt 		bl->bl_free += count;
    273      1.1      yamt 	}
    274      1.1      yamt }
    275      1.1      yamt 
    276      1.1      yamt /*
    277      1.1      yamt  * blist_fill() -	mark a region in the block bitmap as off-limits
    278      1.1      yamt  *			to the allocator (i.e. allocate it), ignoring any
    279      1.1      yamt  *			existing allocations.  Return the number of blocks
    280      1.1      yamt  *			actually filled that were free before the call.
    281      1.1      yamt  */
    282      1.1      yamt 
    283      1.5      yamt blist_blkno_t
    284      1.5      yamt blist_fill(blist_t bl, blist_blkno_t blkno, blist_blkno_t count)
    285      1.1      yamt {
    286      1.5      yamt 	blist_blkno_t filled;
    287      1.1      yamt 
    288      1.1      yamt 	if (bl) {
    289      1.1      yamt 		if (bl->bl_radix == BLIST_BMAP_RADIX)
    290      1.1      yamt 			filled = blst_leaf_fill(bl->bl_root, blkno, count);
    291      1.1      yamt 		else
    292      1.1      yamt 			filled = blst_meta_fill(bl->bl_root, blkno, count,
    293      1.1      yamt 			    bl->bl_radix, bl->bl_skip, 0);
    294      1.1      yamt 		bl->bl_free -= filled;
    295      1.1      yamt 		return filled;
    296      1.1      yamt 	} else
    297      1.1      yamt 		return 0;
    298      1.1      yamt }
    299      1.1      yamt 
    300      1.1      yamt /*
    301      1.1      yamt  * blist_resize() -	resize an existing radix tree to handle the
    302      1.1      yamt  *			specified number of blocks.  This will reallocate
    303      1.1      yamt  *			the tree and transfer the previous bitmap to the new
    304      1.1      yamt  *			one.  When extending the tree you can specify whether
    305      1.1      yamt  *			the new blocks are to left allocated or freed.
    306      1.1      yamt  */
    307      1.1      yamt 
    308      1.1      yamt void
    309      1.5      yamt blist_resize(blist_t *pbl, blist_blkno_t count, int freenew)
    310      1.1      yamt {
    311      1.1      yamt     blist_t newbl = blist_create(count);
    312      1.1      yamt     blist_t save = *pbl;
    313      1.1      yamt 
    314      1.1      yamt     *pbl = newbl;
    315      1.1      yamt     if (count > save->bl_blocks)
    316      1.1      yamt 	    count = save->bl_blocks;
    317      1.1      yamt     blst_copy(save->bl_root, 0, save->bl_radix, save->bl_skip, newbl, count);
    318      1.1      yamt 
    319      1.1      yamt     /*
    320      1.1      yamt      * If resizing upwards, should we free the new space or not?
    321      1.1      yamt      */
    322      1.1      yamt     if (freenew && count < newbl->bl_blocks) {
    323      1.1      yamt 	    blist_free(newbl, count, newbl->bl_blocks - count);
    324      1.1      yamt     }
    325      1.1      yamt     blist_destroy(save);
    326      1.1      yamt }
    327      1.1      yamt 
    328      1.1      yamt #ifdef BLIST_DEBUG
    329      1.1      yamt 
    330      1.1      yamt /*
    331      1.1      yamt  * blist_print()    - dump radix tree
    332      1.1      yamt  */
    333      1.1      yamt 
    334      1.1      yamt void
    335      1.1      yamt blist_print(blist_t bl)
    336      1.1      yamt {
    337      1.1      yamt 	printf("BLIST {\n");
    338      1.1      yamt 	blst_radix_print(bl->bl_root, 0, bl->bl_radix, bl->bl_skip, 4);
    339      1.1      yamt 	printf("}\n");
    340      1.1      yamt }
    341      1.1      yamt 
    342      1.1      yamt #endif
    343      1.1      yamt 
    344      1.1      yamt /************************************************************************
    345      1.1      yamt  *			  ALLOCATION SUPPORT FUNCTIONS			*
    346      1.1      yamt  ************************************************************************
    347      1.1      yamt  *
    348      1.1      yamt  *	These support functions do all the actual work.  They may seem
    349      1.1      yamt  *	rather longish, but that's because I've commented them up.  The
    350      1.1      yamt  *	actual code is straight forward.
    351      1.1      yamt  *
    352      1.1      yamt  */
    353      1.1      yamt 
    354      1.1      yamt /*
    355      1.1      yamt  * blist_leaf_alloc() -	allocate at a leaf in the radix tree (a bitmap).
    356      1.1      yamt  *
    357      1.1      yamt  *	This is the core of the allocator and is optimized for the 1 block
    358      1.1      yamt  *	and the BLIST_BMAP_RADIX block allocation cases.  Other cases are
    359      1.1      yamt  *	somewhat slower.  The 1 block allocation case is log2 and extremely
    360      1.1      yamt  *	quick.
    361      1.1      yamt  */
    362      1.1      yamt 
    363      1.5      yamt static blist_blkno_t
    364      1.1      yamt blst_leaf_alloc(
    365      1.1      yamt 	blmeta_t *scan,
    366      1.5      yamt 	blist_blkno_t blk,
    367      1.1      yamt 	int count
    368      1.1      yamt ) {
    369      1.5      yamt 	blist_bitmap_t orig = scan->u.bmu_bitmap;
    370      1.1      yamt 
    371      1.1      yamt 	if (orig == 0) {
    372      1.1      yamt 		/*
    373      1.1      yamt 		 * Optimize bitmap all-allocated case.  Also, count = 1
    374      1.1      yamt 		 * case assumes at least 1 bit is free in the bitmap, so
    375      1.1      yamt 		 * we have to take care of this case here.
    376      1.1      yamt 		 */
    377      1.1      yamt 		scan->bm_bighint = 0;
    378      1.3      yamt 		return(BLIST_NONE);
    379      1.1      yamt 	}
    380      1.1      yamt 	if (count == 1) {
    381      1.1      yamt 		/*
    382      1.1      yamt 		 * Optimized code to allocate one bit out of the bitmap
    383      1.1      yamt 		 */
    384      1.5      yamt 		blist_bitmap_t mask;
    385      1.1      yamt 		int j = BLIST_BMAP_RADIX/2;
    386      1.1      yamt 		int r = 0;
    387      1.1      yamt 
    388      1.5      yamt 		mask = (blist_bitmap_t)-1 >> (BLIST_BMAP_RADIX/2);
    389      1.1      yamt 
    390      1.1      yamt 		while (j) {
    391      1.1      yamt 			if ((orig & mask) == 0) {
    392      1.1      yamt 			    r += j;
    393      1.1      yamt 			    orig >>= j;
    394      1.1      yamt 			}
    395      1.1      yamt 			j >>= 1;
    396      1.1      yamt 			mask >>= j;
    397      1.1      yamt 		}
    398      1.5      yamt 		scan->u.bmu_bitmap &= ~((blist_bitmap_t)1 << r);
    399      1.1      yamt 		return(blk + r);
    400      1.1      yamt 	}
    401      1.1      yamt 	if (count <= BLIST_BMAP_RADIX) {
    402      1.1      yamt 		/*
    403      1.1      yamt 		 * non-optimized code to allocate N bits out of the bitmap.
    404      1.1      yamt 		 * The more bits, the faster the code runs.  It will run
    405      1.1      yamt 		 * the slowest allocating 2 bits, but since there aren't any
    406      1.1      yamt 		 * memory ops in the core loop (or shouldn't be, anyway),
    407      1.1      yamt 		 * you probably won't notice the difference.
    408      1.1      yamt 		 */
    409      1.1      yamt 		int j;
    410      1.1      yamt 		int n = BLIST_BMAP_RADIX - count;
    411      1.5      yamt 		blist_bitmap_t mask;
    412      1.1      yamt 
    413      1.5      yamt 		mask = (blist_bitmap_t)-1 >> n;
    414      1.1      yamt 
    415      1.1      yamt 		for (j = 0; j <= n; ++j) {
    416      1.1      yamt 			if ((orig & mask) == mask) {
    417      1.1      yamt 				scan->u.bmu_bitmap &= ~mask;
    418      1.1      yamt 				return(blk + j);
    419      1.1      yamt 			}
    420      1.1      yamt 			mask = (mask << 1);
    421      1.1      yamt 		}
    422      1.1      yamt 	}
    423      1.1      yamt 	/*
    424      1.1      yamt 	 * We couldn't allocate count in this subtree, update bighint.
    425      1.1      yamt 	 */
    426      1.1      yamt 	scan->bm_bighint = count - 1;
    427      1.3      yamt 	return(BLIST_NONE);
    428      1.1      yamt }
    429      1.1      yamt 
    430      1.1      yamt /*
    431      1.1      yamt  * blist_meta_alloc() -	allocate at a meta in the radix tree.
    432      1.1      yamt  *
    433      1.1      yamt  *	Attempt to allocate at a meta node.  If we can't, we update
    434      1.1      yamt  *	bighint and return a failure.  Updating bighint optimize future
    435      1.1      yamt  *	calls that hit this node.  We have to check for our collapse cases
    436      1.1      yamt  *	and we have a few optimizations strewn in as well.
    437      1.1      yamt  */
    438      1.1      yamt 
    439      1.5      yamt static blist_blkno_t
    440      1.1      yamt blst_meta_alloc(
    441      1.1      yamt 	blmeta_t *scan,
    442      1.5      yamt 	blist_blkno_t blk,
    443      1.5      yamt 	blist_blkno_t count,
    444      1.5      yamt 	blist_blkno_t radix,
    445      1.5      yamt 	blist_blkno_t skip
    446      1.1      yamt ) {
    447      1.5      yamt 	blist_blkno_t i;
    448      1.5      yamt 	blist_blkno_t next_skip = (skip / BLIST_META_RADIX);
    449      1.1      yamt 
    450      1.1      yamt 	if (scan->u.bmu_avail == 0)  {
    451      1.1      yamt 		/*
    452      1.1      yamt 		 * ALL-ALLOCATED special case
    453      1.1      yamt 		 */
    454      1.1      yamt 		scan->bm_bighint = count;
    455      1.3      yamt 		return(BLIST_NONE);
    456      1.1      yamt 	}
    457      1.1      yamt 
    458      1.1      yamt 	if (scan->u.bmu_avail == radix) {
    459      1.1      yamt 		radix /= BLIST_META_RADIX;
    460      1.1      yamt 
    461      1.1      yamt 		/*
    462      1.1      yamt 		 * ALL-FREE special case, initialize uninitialize
    463      1.1      yamt 		 * sublevel.
    464      1.1      yamt 		 */
    465      1.1      yamt 		for (i = 1; i <= skip; i += next_skip) {
    466      1.5      yamt 			if (scan[i].bm_bighint == (blist_blkno_t)-1)
    467      1.1      yamt 				break;
    468      1.1      yamt 			if (next_skip == 1) {
    469      1.5      yamt 				scan[i].u.bmu_bitmap = (blist_bitmap_t)-1;
    470      1.1      yamt 				scan[i].bm_bighint = BLIST_BMAP_RADIX;
    471      1.1      yamt 			} else {
    472      1.1      yamt 				scan[i].bm_bighint = radix;
    473      1.1      yamt 				scan[i].u.bmu_avail = radix;
    474      1.1      yamt 			}
    475      1.1      yamt 		}
    476      1.1      yamt 	} else {
    477      1.1      yamt 		radix /= BLIST_META_RADIX;
    478      1.1      yamt 	}
    479      1.1      yamt 
    480      1.1      yamt 	for (i = 1; i <= skip; i += next_skip) {
    481      1.5      yamt 		if (scan[i].bm_bighint == (blist_blkno_t)-1) {
    482      1.3      yamt 			/*
    483      1.3      yamt 			 * Terminator
    484      1.3      yamt 			 */
    485      1.3      yamt 			break;
    486      1.3      yamt 		} else if (count <= scan[i].bm_bighint) {
    487      1.1      yamt 			/*
    488      1.1      yamt 			 * count fits in object
    489      1.1      yamt 			 */
    490      1.5      yamt 			blist_blkno_t r;
    491      1.1      yamt 			if (next_skip == 1) {
    492      1.1      yamt 				r = blst_leaf_alloc(&scan[i], blk, count);
    493      1.1      yamt 			} else {
    494      1.1      yamt 				r = blst_meta_alloc(&scan[i], blk, count, radix, next_skip - 1);
    495      1.1      yamt 			}
    496      1.3      yamt 			if (r != BLIST_NONE) {
    497      1.1      yamt 				scan->u.bmu_avail -= count;
    498      1.1      yamt 				if (scan->bm_bighint > scan->u.bmu_avail)
    499      1.1      yamt 					scan->bm_bighint = scan->u.bmu_avail;
    500      1.1      yamt 				return(r);
    501      1.1      yamt 			}
    502      1.1      yamt 		} else if (count > radix) {
    503      1.1      yamt 			/*
    504      1.1      yamt 			 * count does not fit in object even if it were
    505      1.1      yamt 			 * complete free.
    506      1.1      yamt 			 */
    507      1.1      yamt 			panic("blist_meta_alloc: allocation too large");
    508      1.1      yamt 		}
    509      1.1      yamt 		blk += radix;
    510      1.1      yamt 	}
    511      1.1      yamt 
    512      1.1      yamt 	/*
    513      1.1      yamt 	 * We couldn't allocate count in this subtree, update bighint.
    514      1.1      yamt 	 */
    515      1.1      yamt 	if (scan->bm_bighint >= count)
    516      1.1      yamt 		scan->bm_bighint = count - 1;
    517      1.3      yamt 	return(BLIST_NONE);
    518      1.1      yamt }
    519      1.1      yamt 
    520      1.1      yamt /*
    521      1.1      yamt  * BLST_LEAF_FREE() -	free allocated block from leaf bitmap
    522      1.1      yamt  *
    523      1.1      yamt  */
    524      1.1      yamt 
    525      1.1      yamt static void
    526      1.1      yamt blst_leaf_free(
    527      1.1      yamt 	blmeta_t *scan,
    528      1.5      yamt 	blist_blkno_t blk,
    529      1.1      yamt 	int count
    530      1.1      yamt ) {
    531      1.1      yamt 	/*
    532      1.1      yamt 	 * free some data in this bitmap
    533      1.1      yamt 	 *
    534      1.1      yamt 	 * e.g.
    535      1.1      yamt 	 *	0000111111111110000
    536      1.1      yamt 	 *          \_________/\__/
    537      1.1      yamt 	 *		v        n
    538      1.1      yamt 	 */
    539      1.1      yamt 	int n = blk & (BLIST_BMAP_RADIX - 1);
    540      1.5      yamt 	blist_bitmap_t mask;
    541      1.1      yamt 
    542      1.5      yamt 	mask = ((blist_bitmap_t)-1 << n) &
    543      1.5      yamt 	    ((blist_bitmap_t)-1 >> (BLIST_BMAP_RADIX - count - n));
    544      1.1      yamt 
    545      1.1      yamt 	if (scan->u.bmu_bitmap & mask)
    546      1.1      yamt 		panic("blst_radix_free: freeing free block");
    547      1.1      yamt 	scan->u.bmu_bitmap |= mask;
    548      1.1      yamt 
    549      1.1      yamt 	/*
    550      1.1      yamt 	 * We could probably do a better job here.  We are required to make
    551      1.1      yamt 	 * bighint at least as large as the biggest contiguous block of
    552      1.1      yamt 	 * data.  If we just shoehorn it, a little extra overhead will
    553      1.1      yamt 	 * be incured on the next allocation (but only that one typically).
    554      1.1      yamt 	 */
    555      1.1      yamt 	scan->bm_bighint = BLIST_BMAP_RADIX;
    556      1.1      yamt }
    557      1.1      yamt 
    558      1.1      yamt /*
    559      1.1      yamt  * BLST_META_FREE() - free allocated blocks from radix tree meta info
    560      1.1      yamt  *
    561      1.1      yamt  *	This support routine frees a range of blocks from the bitmap.
    562      1.1      yamt  *	The range must be entirely enclosed by this radix node.  If a
    563      1.1      yamt  *	meta node, we break the range down recursively to free blocks
    564      1.1      yamt  *	in subnodes (which means that this code can free an arbitrary
    565      1.1      yamt  *	range whereas the allocation code cannot allocate an arbitrary
    566      1.1      yamt  *	range).
    567      1.1      yamt  */
    568      1.1      yamt 
    569      1.1      yamt static void
    570      1.1      yamt blst_meta_free(
    571      1.1      yamt 	blmeta_t *scan,
    572      1.5      yamt 	blist_blkno_t freeBlk,
    573      1.5      yamt 	blist_blkno_t count,
    574      1.5      yamt 	blist_blkno_t radix,
    575      1.5      yamt 	blist_blkno_t skip,
    576      1.5      yamt 	blist_blkno_t blk
    577      1.1      yamt ) {
    578      1.5      yamt 	blist_blkno_t i;
    579      1.5      yamt 	blist_blkno_t next_skip = (skip / BLIST_META_RADIX);
    580      1.1      yamt 
    581      1.1      yamt #if 0
    582      1.3      yamt 	printf("FREE (%" PRIx64 ",%" PRIu64
    583      1.3      yamt 	    ") FROM (%" PRIx64 ",%" PRIu64 ")\n",
    584      1.5      yamt 	    (uint64_t)freeBlk, (uint64_t)count,
    585      1.5      yamt 	    (uint64_t)blk, (uint64_t)radix
    586      1.1      yamt 	);
    587      1.1      yamt #endif
    588      1.1      yamt 
    589      1.1      yamt 	if (scan->u.bmu_avail == 0) {
    590      1.1      yamt 		/*
    591      1.1      yamt 		 * ALL-ALLOCATED special case, with possible
    592      1.1      yamt 		 * shortcut to ALL-FREE special case.
    593      1.1      yamt 		 */
    594      1.1      yamt 		scan->u.bmu_avail = count;
    595      1.1      yamt 		scan->bm_bighint = count;
    596      1.1      yamt 
    597      1.1      yamt 		if (count != radix)  {
    598      1.1      yamt 			for (i = 1; i <= skip; i += next_skip) {
    599      1.5      yamt 				if (scan[i].bm_bighint == (blist_blkno_t)-1)
    600      1.1      yamt 					break;
    601      1.1      yamt 				scan[i].bm_bighint = 0;
    602      1.1      yamt 				if (next_skip == 1) {
    603      1.1      yamt 					scan[i].u.bmu_bitmap = 0;
    604      1.1      yamt 				} else {
    605      1.1      yamt 					scan[i].u.bmu_avail = 0;
    606      1.1      yamt 				}
    607      1.1      yamt 			}
    608      1.1      yamt 			/* fall through */
    609      1.1      yamt 		}
    610      1.1      yamt 	} else {
    611      1.1      yamt 		scan->u.bmu_avail += count;
    612      1.1      yamt 		/* scan->bm_bighint = radix; */
    613      1.1      yamt 	}
    614      1.1      yamt 
    615      1.1      yamt 	/*
    616      1.1      yamt 	 * ALL-FREE special case.
    617      1.1      yamt 	 */
    618      1.1      yamt 
    619      1.1      yamt 	if (scan->u.bmu_avail == radix)
    620      1.1      yamt 		return;
    621      1.1      yamt 	if (scan->u.bmu_avail > radix)
    622      1.3      yamt 		panic("blst_meta_free: freeing already free blocks (%"
    623      1.3      yamt 		    PRIu64 ") %" PRIu64 "/%" PRIu64,
    624      1.5      yamt 		    (uint64_t)count,
    625      1.5      yamt 		    (uint64_t)scan->u.bmu_avail,
    626      1.5      yamt 		    (uint64_t)radix);
    627      1.1      yamt 
    628      1.1      yamt 	/*
    629      1.1      yamt 	 * Break the free down into its components
    630      1.1      yamt 	 */
    631      1.1      yamt 
    632      1.1      yamt 	radix /= BLIST_META_RADIX;
    633      1.1      yamt 
    634      1.1      yamt 	i = (freeBlk - blk) / radix;
    635      1.1      yamt 	blk += i * radix;
    636      1.1      yamt 	i = i * next_skip + 1;
    637      1.1      yamt 
    638      1.1      yamt 	while (i <= skip && blk < freeBlk + count) {
    639      1.5      yamt 		blist_blkno_t v;
    640      1.1      yamt 
    641      1.1      yamt 		v = blk + radix - freeBlk;
    642      1.1      yamt 		if (v > count)
    643      1.1      yamt 			v = count;
    644      1.1      yamt 
    645      1.5      yamt 		if (scan->bm_bighint == (blist_blkno_t)-1)
    646      1.1      yamt 			panic("blst_meta_free: freeing unexpected range");
    647      1.1      yamt 
    648      1.1      yamt 		if (next_skip == 1) {
    649      1.1      yamt 			blst_leaf_free(&scan[i], freeBlk, v);
    650      1.1      yamt 		} else {
    651      1.1      yamt 			blst_meta_free(&scan[i], freeBlk, v, radix, next_skip - 1, blk);
    652      1.1      yamt 		}
    653      1.1      yamt 		if (scan->bm_bighint < scan[i].bm_bighint)
    654      1.1      yamt 		    scan->bm_bighint = scan[i].bm_bighint;
    655      1.1      yamt 		count -= v;
    656      1.1      yamt 		freeBlk += v;
    657      1.1      yamt 		blk += radix;
    658      1.1      yamt 		i += next_skip;
    659      1.1      yamt 	}
    660      1.1      yamt }
    661      1.1      yamt 
    662      1.1      yamt /*
    663      1.1      yamt  * BLIST_RADIX_COPY() - copy one radix tree to another
    664      1.1      yamt  *
    665      1.1      yamt  *	Locates free space in the source tree and frees it in the destination
    666      1.1      yamt  *	tree.  The space may not already be free in the destination.
    667      1.1      yamt  */
    668      1.1      yamt 
    669      1.1      yamt static void blst_copy(
    670      1.1      yamt 	blmeta_t *scan,
    671      1.5      yamt 	blist_blkno_t blk,
    672      1.5      yamt 	blist_blkno_t radix,
    673      1.5      yamt 	blist_blkno_t skip,
    674      1.1      yamt 	blist_t dest,
    675      1.5      yamt 	blist_blkno_t count
    676      1.1      yamt ) {
    677      1.5      yamt 	blist_blkno_t next_skip;
    678      1.5      yamt 	blist_blkno_t i;
    679      1.1      yamt 
    680      1.1      yamt 	/*
    681      1.1      yamt 	 * Leaf node
    682      1.1      yamt 	 */
    683      1.1      yamt 
    684      1.1      yamt 	if (radix == BLIST_BMAP_RADIX) {
    685      1.5      yamt 		blist_bitmap_t v = scan->u.bmu_bitmap;
    686      1.1      yamt 
    687      1.5      yamt 		if (v == (blist_bitmap_t)-1) {
    688      1.1      yamt 			blist_free(dest, blk, count);
    689      1.1      yamt 		} else if (v != 0) {
    690      1.7  christos 			int j;
    691      1.1      yamt 
    692      1.7  christos 			for (j = 0; j < BLIST_BMAP_RADIX && j < count; ++j) {
    693      1.7  christos 				if (v & (1 << j))
    694      1.7  christos 					blist_free(dest, blk + j, 1);
    695      1.1      yamt 			}
    696      1.1      yamt 		}
    697      1.1      yamt 		return;
    698      1.1      yamt 	}
    699      1.1      yamt 
    700      1.1      yamt 	/*
    701      1.1      yamt 	 * Meta node
    702      1.1      yamt 	 */
    703      1.1      yamt 
    704      1.1      yamt 	if (scan->u.bmu_avail == 0) {
    705      1.1      yamt 		/*
    706      1.1      yamt 		 * Source all allocated, leave dest allocated
    707      1.1      yamt 		 */
    708      1.1      yamt 		return;
    709      1.1      yamt 	}
    710      1.1      yamt 	if (scan->u.bmu_avail == radix) {
    711      1.1      yamt 		/*
    712      1.1      yamt 		 * Source all free, free entire dest
    713      1.1      yamt 		 */
    714      1.1      yamt 		if (count < radix)
    715      1.1      yamt 			blist_free(dest, blk, count);
    716      1.1      yamt 		else
    717      1.1      yamt 			blist_free(dest, blk, radix);
    718      1.1      yamt 		return;
    719      1.1      yamt 	}
    720      1.1      yamt 
    721      1.1      yamt 
    722      1.1      yamt 	radix /= BLIST_META_RADIX;
    723      1.5      yamt 	next_skip = (skip / BLIST_META_RADIX);
    724      1.1      yamt 
    725      1.1      yamt 	for (i = 1; count && i <= skip; i += next_skip) {
    726      1.5      yamt 		if (scan[i].bm_bighint == (blist_blkno_t)-1)
    727      1.1      yamt 			break;
    728      1.1      yamt 
    729      1.1      yamt 		if (count >= radix) {
    730      1.1      yamt 			blst_copy(
    731      1.1      yamt 			    &scan[i],
    732      1.1      yamt 			    blk,
    733      1.1      yamt 			    radix,
    734      1.1      yamt 			    next_skip - 1,
    735      1.1      yamt 			    dest,
    736      1.1      yamt 			    radix
    737      1.1      yamt 			);
    738      1.1      yamt 			count -= radix;
    739      1.1      yamt 		} else {
    740      1.1      yamt 			if (count) {
    741      1.1      yamt 				blst_copy(
    742      1.1      yamt 				    &scan[i],
    743      1.1      yamt 				    blk,
    744      1.1      yamt 				    radix,
    745      1.1      yamt 				    next_skip - 1,
    746      1.1      yamt 				    dest,
    747      1.1      yamt 				    count
    748      1.1      yamt 				);
    749      1.1      yamt 			}
    750      1.1      yamt 			count = 0;
    751      1.1      yamt 		}
    752      1.1      yamt 		blk += radix;
    753      1.1      yamt 	}
    754      1.1      yamt }
    755      1.1      yamt 
    756      1.1      yamt /*
    757      1.1      yamt  * BLST_LEAF_FILL() -	allocate specific blocks in leaf bitmap
    758      1.1      yamt  *
    759      1.1      yamt  *	This routine allocates all blocks in the specified range
    760      1.1      yamt  *	regardless of any existing allocations in that range.  Returns
    761      1.1      yamt  *	the number of blocks allocated by the call.
    762      1.1      yamt  */
    763      1.1      yamt 
    764      1.1      yamt static int
    765      1.5      yamt blst_leaf_fill(blmeta_t *scan, blist_blkno_t blk, int count)
    766      1.1      yamt {
    767      1.1      yamt 	int n = blk & (BLIST_BMAP_RADIX - 1);
    768      1.1      yamt 	int nblks;
    769      1.5      yamt 	blist_bitmap_t mask, bitmap;
    770      1.1      yamt 
    771      1.5      yamt 	mask = ((blist_bitmap_t)-1 << n) &
    772      1.5      yamt 	    ((blist_bitmap_t)-1 >> (BLIST_BMAP_RADIX - count - n));
    773      1.1      yamt 
    774      1.1      yamt 	/* Count the number of blocks we're about to allocate */
    775      1.1      yamt 	bitmap = scan->u.bmu_bitmap & mask;
    776      1.1      yamt 	for (nblks = 0; bitmap != 0; nblks++)
    777      1.1      yamt 		bitmap &= bitmap - 1;
    778      1.1      yamt 
    779      1.1      yamt 	scan->u.bmu_bitmap &= ~mask;
    780      1.1      yamt 	return nblks;
    781      1.1      yamt }
    782      1.1      yamt 
    783      1.1      yamt /*
    784      1.1      yamt  * BLIST_META_FILL() -	allocate specific blocks at a meta node
    785      1.1      yamt  *
    786      1.1      yamt  *	This routine allocates the specified range of blocks,
    787      1.1      yamt  *	regardless of any existing allocations in the range.  The
    788      1.1      yamt  *	range must be within the extent of this node.  Returns the
    789      1.1      yamt  *	number of blocks allocated by the call.
    790      1.1      yamt  */
    791      1.5      yamt static blist_blkno_t
    792      1.1      yamt blst_meta_fill(
    793      1.1      yamt 	blmeta_t *scan,
    794      1.5      yamt 	blist_blkno_t allocBlk,
    795      1.5      yamt 	blist_blkno_t count,
    796      1.5      yamt 	blist_blkno_t radix,
    797      1.5      yamt 	blist_blkno_t skip,
    798      1.5      yamt 	blist_blkno_t blk
    799      1.1      yamt ) {
    800      1.5      yamt 	blist_blkno_t i;
    801      1.5      yamt 	blist_blkno_t next_skip = (skip / BLIST_META_RADIX);
    802      1.5      yamt 	blist_blkno_t nblks = 0;
    803      1.1      yamt 
    804      1.1      yamt 	if (count == radix || scan->u.bmu_avail == 0)  {
    805      1.1      yamt 		/*
    806      1.1      yamt 		 * ALL-ALLOCATED special case
    807      1.1      yamt 		 */
    808      1.1      yamt 		nblks = scan->u.bmu_avail;
    809      1.1      yamt 		scan->u.bmu_avail = 0;
    810      1.1      yamt 		scan->bm_bighint = count;
    811      1.1      yamt 		return nblks;
    812      1.1      yamt 	}
    813      1.1      yamt 
    814  1.8.2.1      yamt 	if (count > radix)
    815  1.8.2.1      yamt 		panic("blist_meta_fill: allocation too large");
    816  1.8.2.1      yamt 
    817      1.1      yamt 	if (scan->u.bmu_avail == radix) {
    818      1.1      yamt 		radix /= BLIST_META_RADIX;
    819      1.1      yamt 
    820      1.1      yamt 		/*
    821      1.1      yamt 		 * ALL-FREE special case, initialize sublevel
    822      1.1      yamt 		 */
    823      1.1      yamt 		for (i = 1; i <= skip; i += next_skip) {
    824      1.5      yamt 			if (scan[i].bm_bighint == (blist_blkno_t)-1)
    825      1.1      yamt 				break;
    826      1.1      yamt 			if (next_skip == 1) {
    827      1.5      yamt 				scan[i].u.bmu_bitmap = (blist_bitmap_t)-1;
    828      1.1      yamt 				scan[i].bm_bighint = BLIST_BMAP_RADIX;
    829      1.1      yamt 			} else {
    830      1.1      yamt 				scan[i].bm_bighint = radix;
    831      1.1      yamt 				scan[i].u.bmu_avail = radix;
    832      1.1      yamt 			}
    833      1.1      yamt 		}
    834      1.1      yamt 	} else {
    835      1.1      yamt 		radix /= BLIST_META_RADIX;
    836      1.1      yamt 	}
    837      1.1      yamt 
    838      1.1      yamt 	i = (allocBlk - blk) / radix;
    839      1.1      yamt 	blk += i * radix;
    840      1.1      yamt 	i = i * next_skip + 1;
    841      1.1      yamt 
    842      1.1      yamt 	while (i <= skip && blk < allocBlk + count) {
    843      1.5      yamt 		blist_blkno_t v;
    844      1.1      yamt 
    845      1.1      yamt 		v = blk + radix - allocBlk;
    846      1.1      yamt 		if (v > count)
    847      1.1      yamt 			v = count;
    848      1.1      yamt 
    849      1.5      yamt 		if (scan->bm_bighint == (blist_blkno_t)-1)
    850      1.1      yamt 			panic("blst_meta_fill: filling unexpected range");
    851      1.1      yamt 
    852      1.1      yamt 		if (next_skip == 1) {
    853      1.1      yamt 			nblks += blst_leaf_fill(&scan[i], allocBlk, v);
    854      1.1      yamt 		} else {
    855      1.1      yamt 			nblks += blst_meta_fill(&scan[i], allocBlk, v,
    856      1.1      yamt 			    radix, next_skip - 1, blk);
    857      1.1      yamt 		}
    858      1.1      yamt 		count -= v;
    859      1.1      yamt 		allocBlk += v;
    860      1.1      yamt 		blk += radix;
    861      1.1      yamt 		i += next_skip;
    862      1.1      yamt 	}
    863      1.1      yamt 	scan->u.bmu_avail -= nblks;
    864      1.1      yamt 	return nblks;
    865      1.1      yamt }
    866      1.1      yamt 
    867      1.1      yamt /*
    868      1.1      yamt  * BLST_RADIX_INIT() - initialize radix tree
    869      1.1      yamt  *
    870      1.1      yamt  *	Initialize our meta structures and bitmaps and calculate the exact
    871      1.1      yamt  *	amount of space required to manage 'count' blocks - this space may
    872      1.1      yamt  *	be considerably less then the calculated radix due to the large
    873      1.1      yamt  *	RADIX values we use.
    874      1.1      yamt  */
    875      1.1      yamt 
    876      1.5      yamt static blist_blkno_t
    877      1.5      yamt blst_radix_init(blmeta_t *scan, blist_blkno_t radix, blist_blkno_t skip,
    878      1.5      yamt     blist_blkno_t count)
    879      1.1      yamt {
    880      1.5      yamt 	blist_blkno_t i;
    881      1.5      yamt 	blist_blkno_t next_skip;
    882      1.5      yamt 	blist_blkno_t memindex = 0;
    883      1.1      yamt 
    884      1.1      yamt 	/*
    885      1.1      yamt 	 * Leaf node
    886      1.1      yamt 	 */
    887      1.1      yamt 
    888      1.1      yamt 	if (radix == BLIST_BMAP_RADIX) {
    889      1.1      yamt 		if (scan) {
    890      1.1      yamt 			scan->bm_bighint = 0;
    891      1.1      yamt 			scan->u.bmu_bitmap = 0;
    892      1.1      yamt 		}
    893      1.1      yamt 		return(memindex);
    894      1.1      yamt 	}
    895      1.1      yamt 
    896      1.1      yamt 	/*
    897      1.1      yamt 	 * Meta node.  If allocating the entire object we can special
    898      1.1      yamt 	 * case it.  However, we need to figure out how much memory
    899      1.1      yamt 	 * is required to manage 'count' blocks, so we continue on anyway.
    900      1.1      yamt 	 */
    901      1.1      yamt 
    902      1.1      yamt 	if (scan) {
    903      1.1      yamt 		scan->bm_bighint = 0;
    904      1.1      yamt 		scan->u.bmu_avail = 0;
    905      1.1      yamt 	}
    906      1.1      yamt 
    907      1.1      yamt 	radix /= BLIST_META_RADIX;
    908      1.5      yamt 	next_skip = (skip / BLIST_META_RADIX);
    909      1.1      yamt 
    910      1.1      yamt 	for (i = 1; i <= skip; i += next_skip) {
    911      1.1      yamt 		if (count >= radix) {
    912      1.1      yamt 			/*
    913      1.1      yamt 			 * Allocate the entire object
    914      1.1      yamt 			 */
    915      1.1      yamt 			memindex = i + blst_radix_init(
    916      1.1      yamt 			    ((scan) ? &scan[i] : NULL),
    917      1.1      yamt 			    radix,
    918      1.1      yamt 			    next_skip - 1,
    919      1.1      yamt 			    radix
    920      1.1      yamt 			);
    921      1.1      yamt 			count -= radix;
    922      1.1      yamt 		} else if (count > 0) {
    923      1.1      yamt 			/*
    924      1.1      yamt 			 * Allocate a partial object
    925      1.1      yamt 			 */
    926      1.1      yamt 			memindex = i + blst_radix_init(
    927      1.1      yamt 			    ((scan) ? &scan[i] : NULL),
    928      1.1      yamt 			    radix,
    929      1.1      yamt 			    next_skip - 1,
    930      1.1      yamt 			    count
    931      1.1      yamt 			);
    932      1.1      yamt 			count = 0;
    933      1.1      yamt 		} else {
    934      1.1      yamt 			/*
    935      1.1      yamt 			 * Add terminator and break out
    936      1.1      yamt 			 */
    937      1.1      yamt 			if (scan)
    938      1.5      yamt 				scan[i].bm_bighint = (blist_blkno_t)-1;
    939      1.1      yamt 			break;
    940      1.1      yamt 		}
    941      1.1      yamt 	}
    942      1.1      yamt 	if (memindex < i)
    943      1.1      yamt 		memindex = i;
    944      1.1      yamt 	return(memindex);
    945      1.1      yamt }
    946      1.1      yamt 
    947      1.1      yamt #ifdef BLIST_DEBUG
    948      1.1      yamt 
    949      1.1      yamt static void
    950      1.5      yamt blst_radix_print(blmeta_t *scan, blist_blkno_t blk, blist_blkno_t radix,
    951      1.5      yamt     blist_blkno_t skip, int tab)
    952      1.1      yamt {
    953      1.5      yamt 	blist_blkno_t i;
    954      1.5      yamt 	blist_blkno_t next_skip;
    955      1.1      yamt 	int lastState = 0;
    956      1.1      yamt 
    957      1.1      yamt 	if (radix == BLIST_BMAP_RADIX) {
    958      1.1      yamt 		printf(
    959      1.5      yamt 		    "%*.*s(%0*" PRIx64 ",%" PRIu64
    960      1.5      yamt 		    "): bitmap %0*" PRIx64 " big=%" PRIu64 "\n",
    961      1.1      yamt 		    tab, tab, "",
    962      1.5      yamt 		    sizeof(blk) * 2,
    963      1.5      yamt 		    (uint64_t)blk,
    964      1.5      yamt 		    (uint64_t)radix,
    965      1.5      yamt 		    sizeof(scan->u.bmu_bitmap) * 2,
    966      1.5      yamt 		    (uint64_t)scan->u.bmu_bitmap,
    967      1.5      yamt 		    (uint64_t)scan->bm_bighint
    968      1.1      yamt 		);
    969      1.1      yamt 		return;
    970      1.1      yamt 	}
    971      1.1      yamt 
    972      1.1      yamt 	if (scan->u.bmu_avail == 0) {
    973      1.1      yamt 		printf(
    974      1.5      yamt 		    "%*.*s(%0*" PRIx64 ",%" PRIu64") ALL ALLOCATED\n",
    975      1.5      yamt 		    tab, tab, "",
    976      1.5      yamt 		    sizeof(blk) * 2,
    977      1.5      yamt 		    (uint64_t)blk,
    978      1.5      yamt 		    (uint64_t)radix
    979      1.1      yamt 		);
    980      1.1      yamt 		return;
    981      1.1      yamt 	}
    982      1.1      yamt 	if (scan->u.bmu_avail == radix) {
    983      1.1      yamt 		printf(
    984      1.5      yamt 		    "%*.*s(%0*" PRIx64 ",%" PRIu64 ") ALL FREE\n",
    985      1.5      yamt 		    tab, tab, "",
    986      1.5      yamt 		    sizeof(blk) * 2,
    987      1.5      yamt 		    (uint64_t)blk,
    988      1.5      yamt 		    (uint64_t)radix
    989      1.1      yamt 		);
    990      1.1      yamt 		return;
    991      1.1      yamt 	}
    992      1.1      yamt 
    993      1.1      yamt 	printf(
    994      1.5      yamt 	    "%*.*s(%0*" PRIx64 ",%" PRIu64 "): subtree (%" PRIu64 "/%"
    995      1.3      yamt 	    PRIu64 ") big=%" PRIu64 " {\n",
    996      1.1      yamt 	    tab, tab, "",
    997      1.5      yamt 	    sizeof(blk) * 2,
    998      1.5      yamt 	    (uint64_t)blk,
    999      1.5      yamt 	    (uint64_t)radix,
   1000      1.5      yamt 	    (uint64_t)scan->u.bmu_avail,
   1001      1.5      yamt 	    (uint64_t)radix,
   1002      1.5      yamt 	    (uint64_t)scan->bm_bighint
   1003      1.1      yamt 	);
   1004      1.1      yamt 
   1005      1.1      yamt 	radix /= BLIST_META_RADIX;
   1006      1.5      yamt 	next_skip = (skip / BLIST_META_RADIX);
   1007      1.1      yamt 	tab += 4;
   1008      1.1      yamt 
   1009      1.1      yamt 	for (i = 1; i <= skip; i += next_skip) {
   1010      1.5      yamt 		if (scan[i].bm_bighint == (blist_blkno_t)-1) {
   1011      1.1      yamt 			printf(
   1012      1.5      yamt 			    "%*.*s(%0*" PRIx64 ",%" PRIu64 "): Terminator\n",
   1013      1.1      yamt 			    tab, tab, "",
   1014      1.5      yamt 			    sizeof(blk) * 2,
   1015      1.5      yamt 			    (uint64_t)blk,
   1016      1.5      yamt 			    (uint64_t)radix
   1017      1.1      yamt 			);
   1018      1.1      yamt 			lastState = 0;
   1019      1.1      yamt 			break;
   1020      1.1      yamt 		}
   1021      1.1      yamt 		blst_radix_print(
   1022      1.1      yamt 		    &scan[i],
   1023      1.1      yamt 		    blk,
   1024      1.1      yamt 		    radix,
   1025      1.1      yamt 		    next_skip - 1,
   1026      1.1      yamt 		    tab
   1027      1.1      yamt 		);
   1028      1.1      yamt 		blk += radix;
   1029      1.1      yamt 	}
   1030      1.1      yamt 	tab -= 4;
   1031      1.1      yamt 
   1032      1.1      yamt 	printf(
   1033      1.1      yamt 	    "%*.*s}\n",
   1034      1.1      yamt 	    tab, tab, ""
   1035      1.1      yamt 	);
   1036      1.1      yamt }
   1037      1.1      yamt 
   1038      1.1      yamt #endif
   1039      1.1      yamt 
   1040      1.1      yamt #ifdef BLIST_DEBUG
   1041      1.1      yamt 
   1042      1.1      yamt int
   1043      1.1      yamt main(int ac, char **av)
   1044      1.1      yamt {
   1045      1.5      yamt 	blist_blkno_t size = 1024;
   1046      1.1      yamt 	int i;
   1047      1.1      yamt 	blist_t bl;
   1048      1.1      yamt 
   1049      1.1      yamt 	for (i = 1; i < ac; ++i) {
   1050      1.1      yamt 		const char *ptr = av[i];
   1051      1.1      yamt 		if (*ptr != '-') {
   1052      1.1      yamt 			size = strtol(ptr, NULL, 0);
   1053      1.1      yamt 			continue;
   1054      1.1      yamt 		}
   1055      1.1      yamt 		ptr += 2;
   1056      1.1      yamt 		fprintf(stderr, "Bad option: %s\n", ptr - 2);
   1057      1.1      yamt 		exit(1);
   1058      1.1      yamt 	}
   1059      1.1      yamt 	bl = blist_create(size);
   1060      1.1      yamt 	blist_free(bl, 0, size);
   1061      1.1      yamt 
   1062      1.1      yamt 	for (;;) {
   1063      1.1      yamt 		char buf[1024];
   1064      1.3      yamt 		uint64_t da = 0;
   1065      1.3      yamt 		uint64_t count = 0;
   1066      1.1      yamt 
   1067      1.3      yamt 		printf("%" PRIu64 "/%" PRIu64 "/%" PRIu64 "> ",
   1068      1.5      yamt 		    (uint64_t)bl->bl_free,
   1069      1.5      yamt 		    (uint64_t)size,
   1070      1.5      yamt 		    (uint64_t)bl->bl_radix);
   1071      1.1      yamt 		fflush(stdout);
   1072      1.1      yamt 		if (fgets(buf, sizeof(buf), stdin) == NULL)
   1073      1.1      yamt 			break;
   1074      1.1      yamt 		switch(buf[0]) {
   1075      1.1      yamt 		case 'r':
   1076      1.3      yamt 			if (sscanf(buf + 1, "%" SCNu64, &count) == 1) {
   1077      1.1      yamt 				blist_resize(&bl, count, 1);
   1078      1.1      yamt 			} else {
   1079      1.1      yamt 				printf("?\n");
   1080      1.1      yamt 			}
   1081      1.1      yamt 		case 'p':
   1082      1.1      yamt 			blist_print(bl);
   1083      1.1      yamt 			break;
   1084      1.1      yamt 		case 'a':
   1085      1.3      yamt 			if (sscanf(buf + 1, "%" SCNu64, &count) == 1) {
   1086      1.5      yamt 				blist_blkno_t blk = blist_alloc(bl, count);
   1087      1.5      yamt 				printf("    R=%0*" PRIx64 "\n",
   1088      1.5      yamt 				    sizeof(blk) * 2,
   1089      1.5      yamt 				    (uint64_t)blk);
   1090      1.1      yamt 			} else {
   1091      1.1      yamt 				printf("?\n");
   1092      1.1      yamt 			}
   1093      1.1      yamt 			break;
   1094      1.1      yamt 		case 'f':
   1095      1.3      yamt 			if (sscanf(buf + 1, "%" SCNx64 " %" SCNu64,
   1096      1.3      yamt 			    &da, &count) == 2) {
   1097      1.1      yamt 				blist_free(bl, da, count);
   1098      1.1      yamt 			} else {
   1099      1.1      yamt 				printf("?\n");
   1100      1.1      yamt 			}
   1101      1.1      yamt 			break;
   1102      1.1      yamt 		case 'l':
   1103      1.3      yamt 			if (sscanf(buf + 1, "%" SCNx64 " %" SCNu64,
   1104      1.3      yamt 			    &da, &count) == 2) {
   1105      1.5      yamt 				printf("    n=%" PRIu64 "\n",
   1106      1.5      yamt 				    (uint64_t)blist_fill(bl, da, count));
   1107      1.1      yamt 			} else {
   1108      1.1      yamt 				printf("?\n");
   1109      1.1      yamt 			}
   1110      1.1      yamt 			break;
   1111      1.1      yamt 		case '?':
   1112      1.1      yamt 		case 'h':
   1113      1.1      yamt 			puts(
   1114      1.1      yamt 			    "p          -print\n"
   1115      1.1      yamt 			    "a %d       -allocate\n"
   1116      1.1      yamt 			    "f %x %d    -free\n"
   1117      1.1      yamt 			    "l %x %d    -fill\n"
   1118      1.1      yamt 			    "r %d       -resize\n"
   1119      1.1      yamt 			    "h/?        -help"
   1120      1.1      yamt 			);
   1121      1.1      yamt 			break;
   1122      1.1      yamt 		default:
   1123      1.1      yamt 			printf("?\n");
   1124      1.1      yamt 			break;
   1125      1.1      yamt 		}
   1126      1.1      yamt 	}
   1127      1.1      yamt 	return(0);
   1128      1.1      yamt }
   1129      1.1      yamt 
   1130      1.1      yamt void
   1131      1.1      yamt panic(const char *ctl, ...)
   1132      1.1      yamt {
   1133      1.1      yamt 	va_list va;
   1134      1.1      yamt 
   1135      1.1      yamt 	va_start(va, ctl);
   1136      1.1      yamt 	vfprintf(stderr, ctl, va);
   1137      1.1      yamt 	fprintf(stderr, "\n");
   1138      1.1      yamt 	va_end(va);
   1139      1.1      yamt 	exit(1);
   1140      1.1      yamt }
   1141      1.1      yamt 
   1142      1.1      yamt #endif
   1143      1.1      yamt 
   1144