Home | History | Annotate | Line # | Download | only in kern
subr_blist.c revision 1.4
      1  1.4  yamt /*	$NetBSD: subr_blist.c,v 1.4 2005/04/06 11:36:37 yamt Exp $	*/
      2  1.2  yamt 
      3  1.1  yamt /*-
      4  1.1  yamt  * Copyright (c) 1998 Matthew Dillon.  All Rights Reserved.
      5  1.1  yamt  * Redistribution and use in source and binary forms, with or without
      6  1.1  yamt  * modification, are permitted provided that the following conditions
      7  1.1  yamt  * are met:
      8  1.1  yamt  * 1. Redistributions of source code must retain the above copyright
      9  1.1  yamt  *    notice, this list of conditions and the following disclaimer.
     10  1.1  yamt  * 2. Redistributions in binary form must reproduce the above copyright
     11  1.1  yamt  *    notice, this list of conditions and the following disclaimer in the
     12  1.1  yamt  *    documentation and/or other materials provided with the distribution.
     13  1.1  yamt  * 4. Neither the name of the University nor the names of its contributors
     14  1.1  yamt  *    may be used to endorse or promote products derived from this software
     15  1.1  yamt  *    without specific prior written permission.
     16  1.1  yamt  *
     17  1.1  yamt  * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS
     18  1.1  yamt  * OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
     19  1.1  yamt  * WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
     20  1.1  yamt  * ARE DISCLAIMED.  IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY
     21  1.1  yamt  * DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
     22  1.1  yamt  * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE
     23  1.1  yamt  * GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
     24  1.1  yamt  * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY,
     25  1.1  yamt  * WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING
     26  1.1  yamt  * NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
     27  1.1  yamt  * SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
     28  1.1  yamt  */
     29  1.1  yamt /*
     30  1.1  yamt  * BLIST.C -	Bitmap allocator/deallocator, using a radix tree with hinting
     31  1.1  yamt  *
     32  1.1  yamt  *	This module implements a general bitmap allocator/deallocator.  The
     33  1.1  yamt  *	allocator eats around 2 bits per 'block'.  The module does not
     34  1.1  yamt  *	try to interpret the meaning of a 'block' other then to return
     35  1.3  yamt  *	BLIST_NONE on an allocation failure.
     36  1.1  yamt  *
     37  1.1  yamt  *	A radix tree is used to maintain the bitmap.  Two radix constants are
     38  1.1  yamt  *	involved:  One for the bitmaps contained in the leaf nodes (typically
     39  1.1  yamt  *	32), and one for the meta nodes (typically 16).  Both meta and leaf
     40  1.1  yamt  *	nodes have a hint field.  This field gives us a hint as to the largest
     41  1.1  yamt  *	free contiguous range of blocks under the node.  It may contain a
     42  1.1  yamt  *	value that is too high, but will never contain a value that is too
     43  1.1  yamt  *	low.  When the radix tree is searched, allocation failures in subtrees
     44  1.1  yamt  *	update the hint.
     45  1.1  yamt  *
     46  1.1  yamt  *	The radix tree also implements two collapsed states for meta nodes:
     47  1.1  yamt  *	the ALL-ALLOCATED state and the ALL-FREE state.  If a meta node is
     48  1.1  yamt  *	in either of these two states, all information contained underneath
     49  1.1  yamt  *	the node is considered stale.  These states are used to optimize
     50  1.1  yamt  *	allocation and freeing operations.
     51  1.1  yamt  *
     52  1.1  yamt  * 	The hinting greatly increases code efficiency for allocations while
     53  1.1  yamt  *	the general radix structure optimizes both allocations and frees.  The
     54  1.1  yamt  *	radix tree should be able to operate well no matter how much
     55  1.1  yamt  *	fragmentation there is and no matter how large a bitmap is used.
     56  1.1  yamt  *
     57  1.1  yamt  *	Unlike the rlist code, the blist code wires all necessary memory at
     58  1.1  yamt  *	creation time.  Neither allocations nor frees require interaction with
     59  1.1  yamt  *	the memory subsystem.  In contrast, the rlist code may allocate memory
     60  1.1  yamt  *	on an rlist_free() call.  The non-blocking features of the blist code
     61  1.1  yamt  *	are used to great advantage in the swap code (vm/nswap_pager.c).  The
     62  1.1  yamt  *	rlist code uses a little less overall memory then the blist code (but
     63  1.1  yamt  *	due to swap interleaving not all that much less), but the blist code
     64  1.1  yamt  *	scales much, much better.
     65  1.1  yamt  *
     66  1.1  yamt  *	LAYOUT: The radix tree is layed out recursively using a
     67  1.1  yamt  *	linear array.  Each meta node is immediately followed (layed out
     68  1.1  yamt  *	sequentially in memory) by BLIST_META_RADIX lower level nodes.  This
     69  1.1  yamt  *	is a recursive structure but one that can be easily scanned through
     70  1.1  yamt  *	a very simple 'skip' calculation.  In order to support large radixes,
     71  1.1  yamt  *	portions of the tree may reside outside our memory allocation.  We
     72  1.1  yamt  *	handle this with an early-termination optimization (when bighint is
     73  1.1  yamt  *	set to -1) on the scan.  The memory allocation is only large enough
     74  1.1  yamt  *	to cover the number of blocks requested at creation time even if it
     75  1.1  yamt  *	must be encompassed in larger root-node radix.
     76  1.1  yamt  *
     77  1.1  yamt  *	NOTE: the allocator cannot currently allocate more then
     78  1.1  yamt  *	BLIST_BMAP_RADIX blocks per call.  It will panic with 'allocation too
     79  1.1  yamt  *	large' if you try.  This is an area that could use improvement.  The
     80  1.1  yamt  *	radix is large enough that this restriction does not effect the swap
     81  1.1  yamt  *	system, though.  Currently only the allocation code is effected by
     82  1.1  yamt  *	this algorithmic unfeature.  The freeing code can handle arbitrary
     83  1.1  yamt  *	ranges.
     84  1.1  yamt  *
     85  1.1  yamt  *	This code can be compiled stand-alone for debugging.
     86  1.1  yamt  */
     87  1.1  yamt 
     88  1.1  yamt #include <sys/cdefs.h>
     89  1.4  yamt __KERNEL_RCSID(0, "$NetBSD: subr_blist.c,v 1.4 2005/04/06 11:36:37 yamt Exp $");
     90  1.2  yamt #if 0
     91  1.1  yamt __FBSDID("$FreeBSD: src/sys/kern/subr_blist.c,v 1.17 2004/06/04 04:03:25 alc Exp $");
     92  1.2  yamt #endif
     93  1.1  yamt 
     94  1.1  yamt #ifdef _KERNEL
     95  1.1  yamt 
     96  1.1  yamt #include <sys/param.h>
     97  1.1  yamt #include <sys/systm.h>
     98  1.1  yamt #include <sys/lock.h>
     99  1.1  yamt #include <sys/kernel.h>
    100  1.1  yamt #include <sys/blist.h>
    101  1.1  yamt #include <sys/malloc.h>
    102  1.1  yamt #include <sys/proc.h>
    103  1.1  yamt 
    104  1.1  yamt #else
    105  1.1  yamt 
    106  1.1  yamt #ifndef BLIST_NO_DEBUG
    107  1.1  yamt #define BLIST_DEBUG
    108  1.1  yamt #endif
    109  1.1  yamt 
    110  1.1  yamt #include <sys/types.h>
    111  1.1  yamt #include <stdio.h>
    112  1.1  yamt #include <string.h>
    113  1.1  yamt #include <stdlib.h>
    114  1.1  yamt #include <stdarg.h>
    115  1.3  yamt #include <inttypes.h>
    116  1.1  yamt 
    117  1.1  yamt #define malloc(a,b,c)	calloc(a, 1)
    118  1.1  yamt #define free(a,b)	free(a)
    119  1.1  yamt 
    120  1.3  yamt #include "../sys/blist.h"
    121  1.1  yamt 
    122  1.1  yamt void panic(const char *ctl, ...);
    123  1.1  yamt 
    124  1.1  yamt #endif
    125  1.1  yamt 
    126  1.1  yamt /*
    127  1.4  yamt  * blmeta and bl_bitmap_t MUST be a power of 2 in size.
    128  1.4  yamt  */
    129  1.4  yamt 
    130  1.4  yamt typedef struct blmeta {
    131  1.4  yamt 	union {
    132  1.4  yamt 	    uint64_t	bmu_avail;	/* space available under us	*/
    133  1.4  yamt 	    uint64_t	bmu_bitmap;	/* bitmap if we are a leaf	*/
    134  1.4  yamt 	} u;
    135  1.4  yamt 	uint64_t	bm_bighint;	/* biggest contiguous block hint*/
    136  1.4  yamt } blmeta_t;
    137  1.4  yamt 
    138  1.4  yamt struct blist {
    139  1.4  yamt 	uint64_t		bl_blocks;	/* area of coverage		*/
    140  1.4  yamt 	uint64_t		bl_radix;	/* coverage radix		*/
    141  1.4  yamt 	uint64_t		bl_skip;	/* starting skip		*/
    142  1.4  yamt 	uint64_t		bl_free;	/* number of free blocks	*/
    143  1.4  yamt 	blmeta_t	*bl_root;	/* root of radix tree		*/
    144  1.4  yamt 	uint64_t		bl_rootblks;	/* blks allocated for tree */
    145  1.4  yamt };
    146  1.4  yamt 
    147  1.4  yamt #define BLIST_META_RADIX	16
    148  1.4  yamt 
    149  1.4  yamt /*
    150  1.1  yamt  * static support functions
    151  1.1  yamt  */
    152  1.1  yamt 
    153  1.3  yamt static uint64_t blst_leaf_alloc(blmeta_t *scan, uint64_t blk, int count);
    154  1.3  yamt static uint64_t blst_meta_alloc(blmeta_t *scan, uint64_t blk,
    155  1.3  yamt 				uint64_t count, uint64_t radix, int skip);
    156  1.3  yamt static void blst_leaf_free(blmeta_t *scan, uint64_t relblk, int count);
    157  1.3  yamt static void blst_meta_free(blmeta_t *scan, uint64_t freeBlk, uint64_t count,
    158  1.3  yamt 					uint64_t radix, int skip, uint64_t blk);
    159  1.3  yamt static void blst_copy(blmeta_t *scan, uint64_t blk, uint64_t radix,
    160  1.3  yamt 				uint64_t skip, blist_t dest, uint64_t count);
    161  1.3  yamt static int blst_leaf_fill(blmeta_t *scan, uint64_t blk, int count);
    162  1.3  yamt static int blst_meta_fill(blmeta_t *scan, uint64_t allocBlk, uint64_t count,
    163  1.3  yamt 				uint64_t radix, int skip, uint64_t blk);
    164  1.3  yamt static uint64_t	blst_radix_init(blmeta_t *scan, uint64_t radix,
    165  1.3  yamt 						int skip, uint64_t count);
    166  1.1  yamt #ifndef _KERNEL
    167  1.3  yamt static void	blst_radix_print(blmeta_t *scan, uint64_t blk,
    168  1.3  yamt 					uint64_t radix, int skip, int tab);
    169  1.1  yamt #endif
    170  1.1  yamt 
    171  1.1  yamt #ifdef _KERNEL
    172  1.3  yamt static MALLOC_DEFINE(M_BLIST, "blist", "Bitmap allocator");
    173  1.1  yamt #endif
    174  1.1  yamt 
    175  1.1  yamt /*
    176  1.1  yamt  * blist_create() - create a blist capable of handling up to the specified
    177  1.1  yamt  *		    number of blocks
    178  1.1  yamt  *
    179  1.1  yamt  *	blocks must be greater then 0
    180  1.1  yamt  *
    181  1.1  yamt  *	The smallest blist consists of a single leaf node capable of
    182  1.1  yamt  *	managing BLIST_BMAP_RADIX blocks.
    183  1.1  yamt  */
    184  1.1  yamt 
    185  1.1  yamt blist_t
    186  1.3  yamt blist_create(uint64_t blocks)
    187  1.1  yamt {
    188  1.1  yamt 	blist_t bl;
    189  1.1  yamt 	int radix;
    190  1.1  yamt 	int skip = 0;
    191  1.1  yamt 
    192  1.1  yamt 	/*
    193  1.1  yamt 	 * Calculate radix and skip field used for scanning.
    194  1.1  yamt 	 */
    195  1.1  yamt 	radix = BLIST_BMAP_RADIX;
    196  1.1  yamt 
    197  1.1  yamt 	while (radix < blocks) {
    198  1.1  yamt 		radix *= BLIST_META_RADIX;
    199  1.1  yamt 		skip = (skip + 1) * BLIST_META_RADIX;
    200  1.1  yamt 	}
    201  1.1  yamt 
    202  1.3  yamt 	bl = malloc(sizeof(struct blist), M_BLIST, M_WAITOK | M_ZERO);
    203  1.1  yamt 
    204  1.1  yamt 	bl->bl_blocks = blocks;
    205  1.1  yamt 	bl->bl_radix = radix;
    206  1.1  yamt 	bl->bl_skip = skip;
    207  1.1  yamt 	bl->bl_rootblks = 1 +
    208  1.1  yamt 	    blst_radix_init(NULL, bl->bl_radix, bl->bl_skip, blocks);
    209  1.3  yamt 	bl->bl_root = malloc(sizeof(blmeta_t) * bl->bl_rootblks, M_BLIST, M_WAITOK);
    210  1.1  yamt 
    211  1.1  yamt #if defined(BLIST_DEBUG)
    212  1.1  yamt 	printf(
    213  1.3  yamt 		"BLIST representing %" PRIu64 " blocks (%" PRIu64 " MB of swap)"
    214  1.3  yamt 		", requiring %" PRIu64 "K of ram\n",
    215  1.3  yamt 		bl->bl_blocks,
    216  1.3  yamt 		bl->bl_blocks * 4 / 1024,
    217  1.3  yamt 		(bl->bl_rootblks * sizeof(blmeta_t) + 1023) / 1024
    218  1.1  yamt 	);
    219  1.3  yamt 	printf("BLIST raw radix tree contains %" PRIu64 " records\n",
    220  1.3  yamt 	    bl->bl_rootblks);
    221  1.1  yamt #endif
    222  1.1  yamt 	blst_radix_init(bl->bl_root, bl->bl_radix, bl->bl_skip, blocks);
    223  1.1  yamt 
    224  1.1  yamt 	return(bl);
    225  1.1  yamt }
    226  1.1  yamt 
    227  1.1  yamt void
    228  1.1  yamt blist_destroy(blist_t bl)
    229  1.1  yamt {
    230  1.3  yamt 	free(bl->bl_root, M_BLIST);
    231  1.3  yamt 	free(bl, M_BLIST);
    232  1.1  yamt }
    233  1.1  yamt 
    234  1.1  yamt /*
    235  1.1  yamt  * blist_alloc() - reserve space in the block bitmap.  Return the base
    236  1.3  yamt  *		     of a contiguous region or BLIST_NONE if space could
    237  1.1  yamt  *		     not be allocated.
    238  1.1  yamt  */
    239  1.1  yamt 
    240  1.3  yamt uint64_t
    241  1.3  yamt blist_alloc(blist_t bl, uint64_t count)
    242  1.1  yamt {
    243  1.3  yamt 	uint64_t blk = BLIST_NONE;
    244  1.1  yamt 
    245  1.1  yamt 	if (bl) {
    246  1.1  yamt 		if (bl->bl_radix == BLIST_BMAP_RADIX)
    247  1.1  yamt 			blk = blst_leaf_alloc(bl->bl_root, 0, count);
    248  1.1  yamt 		else
    249  1.1  yamt 			blk = blst_meta_alloc(bl->bl_root, 0, count, bl->bl_radix, bl->bl_skip);
    250  1.3  yamt 		if (blk != BLIST_NONE)
    251  1.1  yamt 			bl->bl_free -= count;
    252  1.1  yamt 	}
    253  1.1  yamt 	return(blk);
    254  1.1  yamt }
    255  1.1  yamt 
    256  1.1  yamt /*
    257  1.1  yamt  * blist_free() -	free up space in the block bitmap.  Return the base
    258  1.1  yamt  *		     	of a contiguous region.  Panic if an inconsistancy is
    259  1.1  yamt  *			found.
    260  1.1  yamt  */
    261  1.1  yamt 
    262  1.1  yamt void
    263  1.3  yamt blist_free(blist_t bl, uint64_t blkno, uint64_t count)
    264  1.1  yamt {
    265  1.1  yamt 	if (bl) {
    266  1.1  yamt 		if (bl->bl_radix == BLIST_BMAP_RADIX)
    267  1.1  yamt 			blst_leaf_free(bl->bl_root, blkno, count);
    268  1.1  yamt 		else
    269  1.1  yamt 			blst_meta_free(bl->bl_root, blkno, count, bl->bl_radix, bl->bl_skip, 0);
    270  1.1  yamt 		bl->bl_free += count;
    271  1.1  yamt 	}
    272  1.1  yamt }
    273  1.1  yamt 
    274  1.1  yamt /*
    275  1.1  yamt  * blist_fill() -	mark a region in the block bitmap as off-limits
    276  1.1  yamt  *			to the allocator (i.e. allocate it), ignoring any
    277  1.1  yamt  *			existing allocations.  Return the number of blocks
    278  1.1  yamt  *			actually filled that were free before the call.
    279  1.1  yamt  */
    280  1.1  yamt 
    281  1.1  yamt int
    282  1.3  yamt blist_fill(blist_t bl, uint64_t blkno, uint64_t count)
    283  1.1  yamt {
    284  1.1  yamt 	int filled;
    285  1.1  yamt 
    286  1.1  yamt 	if (bl) {
    287  1.1  yamt 		if (bl->bl_radix == BLIST_BMAP_RADIX)
    288  1.1  yamt 			filled = blst_leaf_fill(bl->bl_root, blkno, count);
    289  1.1  yamt 		else
    290  1.1  yamt 			filled = blst_meta_fill(bl->bl_root, blkno, count,
    291  1.1  yamt 			    bl->bl_radix, bl->bl_skip, 0);
    292  1.1  yamt 		bl->bl_free -= filled;
    293  1.1  yamt 		return filled;
    294  1.1  yamt 	} else
    295  1.1  yamt 		return 0;
    296  1.1  yamt }
    297  1.1  yamt 
    298  1.1  yamt /*
    299  1.1  yamt  * blist_resize() -	resize an existing radix tree to handle the
    300  1.1  yamt  *			specified number of blocks.  This will reallocate
    301  1.1  yamt  *			the tree and transfer the previous bitmap to the new
    302  1.1  yamt  *			one.  When extending the tree you can specify whether
    303  1.1  yamt  *			the new blocks are to left allocated or freed.
    304  1.1  yamt  */
    305  1.1  yamt 
    306  1.1  yamt void
    307  1.3  yamt blist_resize(blist_t *pbl, uint64_t count, int freenew)
    308  1.1  yamt {
    309  1.1  yamt     blist_t newbl = blist_create(count);
    310  1.1  yamt     blist_t save = *pbl;
    311  1.1  yamt 
    312  1.1  yamt     *pbl = newbl;
    313  1.1  yamt     if (count > save->bl_blocks)
    314  1.1  yamt 	    count = save->bl_blocks;
    315  1.1  yamt     blst_copy(save->bl_root, 0, save->bl_radix, save->bl_skip, newbl, count);
    316  1.1  yamt 
    317  1.1  yamt     /*
    318  1.1  yamt      * If resizing upwards, should we free the new space or not?
    319  1.1  yamt      */
    320  1.1  yamt     if (freenew && count < newbl->bl_blocks) {
    321  1.1  yamt 	    blist_free(newbl, count, newbl->bl_blocks - count);
    322  1.1  yamt     }
    323  1.1  yamt     blist_destroy(save);
    324  1.1  yamt }
    325  1.1  yamt 
    326  1.1  yamt #ifdef BLIST_DEBUG
    327  1.1  yamt 
    328  1.1  yamt /*
    329  1.1  yamt  * blist_print()    - dump radix tree
    330  1.1  yamt  */
    331  1.1  yamt 
    332  1.1  yamt void
    333  1.1  yamt blist_print(blist_t bl)
    334  1.1  yamt {
    335  1.1  yamt 	printf("BLIST {\n");
    336  1.1  yamt 	blst_radix_print(bl->bl_root, 0, bl->bl_radix, bl->bl_skip, 4);
    337  1.1  yamt 	printf("}\n");
    338  1.1  yamt }
    339  1.1  yamt 
    340  1.1  yamt #endif
    341  1.1  yamt 
    342  1.1  yamt /************************************************************************
    343  1.1  yamt  *			  ALLOCATION SUPPORT FUNCTIONS			*
    344  1.1  yamt  ************************************************************************
    345  1.1  yamt  *
    346  1.1  yamt  *	These support functions do all the actual work.  They may seem
    347  1.1  yamt  *	rather longish, but that's because I've commented them up.  The
    348  1.1  yamt  *	actual code is straight forward.
    349  1.1  yamt  *
    350  1.1  yamt  */
    351  1.1  yamt 
    352  1.1  yamt /*
    353  1.1  yamt  * blist_leaf_alloc() -	allocate at a leaf in the radix tree (a bitmap).
    354  1.1  yamt  *
    355  1.1  yamt  *	This is the core of the allocator and is optimized for the 1 block
    356  1.1  yamt  *	and the BLIST_BMAP_RADIX block allocation cases.  Other cases are
    357  1.1  yamt  *	somewhat slower.  The 1 block allocation case is log2 and extremely
    358  1.1  yamt  *	quick.
    359  1.1  yamt  */
    360  1.1  yamt 
    361  1.3  yamt static uint64_t
    362  1.1  yamt blst_leaf_alloc(
    363  1.1  yamt 	blmeta_t *scan,
    364  1.3  yamt 	uint64_t blk,
    365  1.1  yamt 	int count
    366  1.1  yamt ) {
    367  1.3  yamt 	uint64_t orig = scan->u.bmu_bitmap;
    368  1.1  yamt 
    369  1.1  yamt 	if (orig == 0) {
    370  1.1  yamt 		/*
    371  1.1  yamt 		 * Optimize bitmap all-allocated case.  Also, count = 1
    372  1.1  yamt 		 * case assumes at least 1 bit is free in the bitmap, so
    373  1.1  yamt 		 * we have to take care of this case here.
    374  1.1  yamt 		 */
    375  1.1  yamt 		scan->bm_bighint = 0;
    376  1.3  yamt 		return(BLIST_NONE);
    377  1.1  yamt 	}
    378  1.1  yamt 	if (count == 1) {
    379  1.1  yamt 		/*
    380  1.1  yamt 		 * Optimized code to allocate one bit out of the bitmap
    381  1.1  yamt 		 */
    382  1.3  yamt 		uint64_t mask;
    383  1.1  yamt 		int j = BLIST_BMAP_RADIX/2;
    384  1.1  yamt 		int r = 0;
    385  1.1  yamt 
    386  1.3  yamt 		mask = (uint64_t)-1 >> (BLIST_BMAP_RADIX/2);
    387  1.1  yamt 
    388  1.1  yamt 		while (j) {
    389  1.1  yamt 			if ((orig & mask) == 0) {
    390  1.1  yamt 			    r += j;
    391  1.1  yamt 			    orig >>= j;
    392  1.1  yamt 			}
    393  1.1  yamt 			j >>= 1;
    394  1.1  yamt 			mask >>= j;
    395  1.1  yamt 		}
    396  1.3  yamt 		scan->u.bmu_bitmap &= ~((uint64_t)1 << r);
    397  1.1  yamt 		return(blk + r);
    398  1.1  yamt 	}
    399  1.1  yamt 	if (count <= BLIST_BMAP_RADIX) {
    400  1.1  yamt 		/*
    401  1.1  yamt 		 * non-optimized code to allocate N bits out of the bitmap.
    402  1.1  yamt 		 * The more bits, the faster the code runs.  It will run
    403  1.1  yamt 		 * the slowest allocating 2 bits, but since there aren't any
    404  1.1  yamt 		 * memory ops in the core loop (or shouldn't be, anyway),
    405  1.1  yamt 		 * you probably won't notice the difference.
    406  1.1  yamt 		 */
    407  1.1  yamt 		int j;
    408  1.1  yamt 		int n = BLIST_BMAP_RADIX - count;
    409  1.3  yamt 		uint64_t mask;
    410  1.1  yamt 
    411  1.3  yamt 		mask = (uint64_t)-1 >> n;
    412  1.1  yamt 
    413  1.1  yamt 		for (j = 0; j <= n; ++j) {
    414  1.1  yamt 			if ((orig & mask) == mask) {
    415  1.1  yamt 				scan->u.bmu_bitmap &= ~mask;
    416  1.1  yamt 				return(blk + j);
    417  1.1  yamt 			}
    418  1.1  yamt 			mask = (mask << 1);
    419  1.1  yamt 		}
    420  1.1  yamt 	}
    421  1.1  yamt 	/*
    422  1.1  yamt 	 * We couldn't allocate count in this subtree, update bighint.
    423  1.1  yamt 	 */
    424  1.1  yamt 	scan->bm_bighint = count - 1;
    425  1.3  yamt 	return(BLIST_NONE);
    426  1.1  yamt }
    427  1.1  yamt 
    428  1.1  yamt /*
    429  1.1  yamt  * blist_meta_alloc() -	allocate at a meta in the radix tree.
    430  1.1  yamt  *
    431  1.1  yamt  *	Attempt to allocate at a meta node.  If we can't, we update
    432  1.1  yamt  *	bighint and return a failure.  Updating bighint optimize future
    433  1.1  yamt  *	calls that hit this node.  We have to check for our collapse cases
    434  1.1  yamt  *	and we have a few optimizations strewn in as well.
    435  1.1  yamt  */
    436  1.1  yamt 
    437  1.3  yamt static uint64_t
    438  1.1  yamt blst_meta_alloc(
    439  1.1  yamt 	blmeta_t *scan,
    440  1.3  yamt 	uint64_t blk,
    441  1.3  yamt 	uint64_t count,
    442  1.3  yamt 	uint64_t radix,
    443  1.1  yamt 	int skip
    444  1.1  yamt ) {
    445  1.1  yamt 	int i;
    446  1.1  yamt 	int next_skip = ((u_int)skip / BLIST_META_RADIX);
    447  1.1  yamt 
    448  1.1  yamt 	if (scan->u.bmu_avail == 0)  {
    449  1.1  yamt 		/*
    450  1.1  yamt 		 * ALL-ALLOCATED special case
    451  1.1  yamt 		 */
    452  1.1  yamt 		scan->bm_bighint = count;
    453  1.3  yamt 		return(BLIST_NONE);
    454  1.1  yamt 	}
    455  1.1  yamt 
    456  1.1  yamt 	if (scan->u.bmu_avail == radix) {
    457  1.1  yamt 		radix /= BLIST_META_RADIX;
    458  1.1  yamt 
    459  1.1  yamt 		/*
    460  1.1  yamt 		 * ALL-FREE special case, initialize uninitialize
    461  1.1  yamt 		 * sublevel.
    462  1.1  yamt 		 */
    463  1.1  yamt 		for (i = 1; i <= skip; i += next_skip) {
    464  1.3  yamt 			if (scan[i].bm_bighint == (uint64_t)-1)
    465  1.1  yamt 				break;
    466  1.1  yamt 			if (next_skip == 1) {
    467  1.3  yamt 				scan[i].u.bmu_bitmap = (uint64_t)-1;
    468  1.1  yamt 				scan[i].bm_bighint = BLIST_BMAP_RADIX;
    469  1.1  yamt 			} else {
    470  1.1  yamt 				scan[i].bm_bighint = radix;
    471  1.1  yamt 				scan[i].u.bmu_avail = radix;
    472  1.1  yamt 			}
    473  1.1  yamt 		}
    474  1.1  yamt 	} else {
    475  1.1  yamt 		radix /= BLIST_META_RADIX;
    476  1.1  yamt 	}
    477  1.1  yamt 
    478  1.1  yamt 	for (i = 1; i <= skip; i += next_skip) {
    479  1.3  yamt 		if (scan[i].bm_bighint == (uint64_t)-1) {
    480  1.3  yamt 			/*
    481  1.3  yamt 			 * Terminator
    482  1.3  yamt 			 */
    483  1.3  yamt 			break;
    484  1.3  yamt 		} else if (count <= scan[i].bm_bighint) {
    485  1.1  yamt 			/*
    486  1.1  yamt 			 * count fits in object
    487  1.1  yamt 			 */
    488  1.3  yamt 			uint64_t r;
    489  1.1  yamt 			if (next_skip == 1) {
    490  1.1  yamt 				r = blst_leaf_alloc(&scan[i], blk, count);
    491  1.1  yamt 			} else {
    492  1.1  yamt 				r = blst_meta_alloc(&scan[i], blk, count, radix, next_skip - 1);
    493  1.1  yamt 			}
    494  1.3  yamt 			if (r != BLIST_NONE) {
    495  1.1  yamt 				scan->u.bmu_avail -= count;
    496  1.1  yamt 				if (scan->bm_bighint > scan->u.bmu_avail)
    497  1.1  yamt 					scan->bm_bighint = scan->u.bmu_avail;
    498  1.1  yamt 				return(r);
    499  1.1  yamt 			}
    500  1.1  yamt 		} else if (count > radix) {
    501  1.1  yamt 			/*
    502  1.1  yamt 			 * count does not fit in object even if it were
    503  1.1  yamt 			 * complete free.
    504  1.1  yamt 			 */
    505  1.1  yamt 			panic("blist_meta_alloc: allocation too large");
    506  1.1  yamt 		}
    507  1.1  yamt 		blk += radix;
    508  1.1  yamt 	}
    509  1.1  yamt 
    510  1.1  yamt 	/*
    511  1.1  yamt 	 * We couldn't allocate count in this subtree, update bighint.
    512  1.1  yamt 	 */
    513  1.1  yamt 	if (scan->bm_bighint >= count)
    514  1.1  yamt 		scan->bm_bighint = count - 1;
    515  1.3  yamt 	return(BLIST_NONE);
    516  1.1  yamt }
    517  1.1  yamt 
    518  1.1  yamt /*
    519  1.1  yamt  * BLST_LEAF_FREE() -	free allocated block from leaf bitmap
    520  1.1  yamt  *
    521  1.1  yamt  */
    522  1.1  yamt 
    523  1.1  yamt static void
    524  1.1  yamt blst_leaf_free(
    525  1.1  yamt 	blmeta_t *scan,
    526  1.3  yamt 	uint64_t blk,
    527  1.1  yamt 	int count
    528  1.1  yamt ) {
    529  1.1  yamt 	/*
    530  1.1  yamt 	 * free some data in this bitmap
    531  1.1  yamt 	 *
    532  1.1  yamt 	 * e.g.
    533  1.1  yamt 	 *	0000111111111110000
    534  1.1  yamt 	 *          \_________/\__/
    535  1.1  yamt 	 *		v        n
    536  1.1  yamt 	 */
    537  1.1  yamt 	int n = blk & (BLIST_BMAP_RADIX - 1);
    538  1.3  yamt 	uint64_t mask;
    539  1.1  yamt 
    540  1.3  yamt 	mask = ((uint64_t)-1 << n) &
    541  1.3  yamt 	    ((uint64_t)-1 >> (BLIST_BMAP_RADIX - count - n));
    542  1.1  yamt 
    543  1.1  yamt 	if (scan->u.bmu_bitmap & mask)
    544  1.1  yamt 		panic("blst_radix_free: freeing free block");
    545  1.1  yamt 	scan->u.bmu_bitmap |= mask;
    546  1.1  yamt 
    547  1.1  yamt 	/*
    548  1.1  yamt 	 * We could probably do a better job here.  We are required to make
    549  1.1  yamt 	 * bighint at least as large as the biggest contiguous block of
    550  1.1  yamt 	 * data.  If we just shoehorn it, a little extra overhead will
    551  1.1  yamt 	 * be incured on the next allocation (but only that one typically).
    552  1.1  yamt 	 */
    553  1.1  yamt 	scan->bm_bighint = BLIST_BMAP_RADIX;
    554  1.1  yamt }
    555  1.1  yamt 
    556  1.1  yamt /*
    557  1.1  yamt  * BLST_META_FREE() - free allocated blocks from radix tree meta info
    558  1.1  yamt  *
    559  1.1  yamt  *	This support routine frees a range of blocks from the bitmap.
    560  1.1  yamt  *	The range must be entirely enclosed by this radix node.  If a
    561  1.1  yamt  *	meta node, we break the range down recursively to free blocks
    562  1.1  yamt  *	in subnodes (which means that this code can free an arbitrary
    563  1.1  yamt  *	range whereas the allocation code cannot allocate an arbitrary
    564  1.1  yamt  *	range).
    565  1.1  yamt  */
    566  1.1  yamt 
    567  1.1  yamt static void
    568  1.1  yamt blst_meta_free(
    569  1.1  yamt 	blmeta_t *scan,
    570  1.3  yamt 	uint64_t freeBlk,
    571  1.3  yamt 	uint64_t count,
    572  1.3  yamt 	uint64_t radix,
    573  1.1  yamt 	int skip,
    574  1.3  yamt 	uint64_t blk
    575  1.1  yamt ) {
    576  1.1  yamt 	int i;
    577  1.1  yamt 	int next_skip = ((u_int)skip / BLIST_META_RADIX);
    578  1.1  yamt 
    579  1.1  yamt #if 0
    580  1.3  yamt 	printf("FREE (%" PRIx64 ",%" PRIu64
    581  1.3  yamt 	    ") FROM (%" PRIx64 ",%" PRIu64 ")\n",
    582  1.3  yamt 	    freeBlk, count,
    583  1.3  yamt 	    blk, radix
    584  1.1  yamt 	);
    585  1.1  yamt #endif
    586  1.1  yamt 
    587  1.1  yamt 	if (scan->u.bmu_avail == 0) {
    588  1.1  yamt 		/*
    589  1.1  yamt 		 * ALL-ALLOCATED special case, with possible
    590  1.1  yamt 		 * shortcut to ALL-FREE special case.
    591  1.1  yamt 		 */
    592  1.1  yamt 		scan->u.bmu_avail = count;
    593  1.1  yamt 		scan->bm_bighint = count;
    594  1.1  yamt 
    595  1.1  yamt 		if (count != radix)  {
    596  1.1  yamt 			for (i = 1; i <= skip; i += next_skip) {
    597  1.3  yamt 				if (scan[i].bm_bighint == (uint64_t)-1)
    598  1.1  yamt 					break;
    599  1.1  yamt 				scan[i].bm_bighint = 0;
    600  1.1  yamt 				if (next_skip == 1) {
    601  1.1  yamt 					scan[i].u.bmu_bitmap = 0;
    602  1.1  yamt 				} else {
    603  1.1  yamt 					scan[i].u.bmu_avail = 0;
    604  1.1  yamt 				}
    605  1.1  yamt 			}
    606  1.1  yamt 			/* fall through */
    607  1.1  yamt 		}
    608  1.1  yamt 	} else {
    609  1.1  yamt 		scan->u.bmu_avail += count;
    610  1.1  yamt 		/* scan->bm_bighint = radix; */
    611  1.1  yamt 	}
    612  1.1  yamt 
    613  1.1  yamt 	/*
    614  1.1  yamt 	 * ALL-FREE special case.
    615  1.1  yamt 	 */
    616  1.1  yamt 
    617  1.1  yamt 	if (scan->u.bmu_avail == radix)
    618  1.1  yamt 		return;
    619  1.1  yamt 	if (scan->u.bmu_avail > radix)
    620  1.3  yamt 		panic("blst_meta_free: freeing already free blocks (%"
    621  1.3  yamt 		    PRIu64 ") %" PRIu64 "/%" PRIu64,
    622  1.3  yamt 		    count, scan->u.bmu_avail, radix);
    623  1.1  yamt 
    624  1.1  yamt 	/*
    625  1.1  yamt 	 * Break the free down into its components
    626  1.1  yamt 	 */
    627  1.1  yamt 
    628  1.1  yamt 	radix /= BLIST_META_RADIX;
    629  1.1  yamt 
    630  1.1  yamt 	i = (freeBlk - blk) / radix;
    631  1.1  yamt 	blk += i * radix;
    632  1.1  yamt 	i = i * next_skip + 1;
    633  1.1  yamt 
    634  1.1  yamt 	while (i <= skip && blk < freeBlk + count) {
    635  1.3  yamt 		uint64_t v;
    636  1.1  yamt 
    637  1.1  yamt 		v = blk + radix - freeBlk;
    638  1.1  yamt 		if (v > count)
    639  1.1  yamt 			v = count;
    640  1.1  yamt 
    641  1.3  yamt 		if (scan->bm_bighint == (uint64_t)-1)
    642  1.1  yamt 			panic("blst_meta_free: freeing unexpected range");
    643  1.1  yamt 
    644  1.1  yamt 		if (next_skip == 1) {
    645  1.1  yamt 			blst_leaf_free(&scan[i], freeBlk, v);
    646  1.1  yamt 		} else {
    647  1.1  yamt 			blst_meta_free(&scan[i], freeBlk, v, radix, next_skip - 1, blk);
    648  1.1  yamt 		}
    649  1.1  yamt 		if (scan->bm_bighint < scan[i].bm_bighint)
    650  1.1  yamt 		    scan->bm_bighint = scan[i].bm_bighint;
    651  1.1  yamt 		count -= v;
    652  1.1  yamt 		freeBlk += v;
    653  1.1  yamt 		blk += radix;
    654  1.1  yamt 		i += next_skip;
    655  1.1  yamt 	}
    656  1.1  yamt }
    657  1.1  yamt 
    658  1.1  yamt /*
    659  1.1  yamt  * BLIST_RADIX_COPY() - copy one radix tree to another
    660  1.1  yamt  *
    661  1.1  yamt  *	Locates free space in the source tree and frees it in the destination
    662  1.1  yamt  *	tree.  The space may not already be free in the destination.
    663  1.1  yamt  */
    664  1.1  yamt 
    665  1.1  yamt static void blst_copy(
    666  1.1  yamt 	blmeta_t *scan,
    667  1.3  yamt 	uint64_t blk,
    668  1.3  yamt 	uint64_t radix,
    669  1.3  yamt 	uint64_t skip,
    670  1.1  yamt 	blist_t dest,
    671  1.3  yamt 	uint64_t count
    672  1.1  yamt ) {
    673  1.1  yamt 	int next_skip;
    674  1.1  yamt 	int i;
    675  1.1  yamt 
    676  1.1  yamt 	/*
    677  1.1  yamt 	 * Leaf node
    678  1.1  yamt 	 */
    679  1.1  yamt 
    680  1.1  yamt 	if (radix == BLIST_BMAP_RADIX) {
    681  1.3  yamt 		uint64_t v = scan->u.bmu_bitmap;
    682  1.1  yamt 
    683  1.3  yamt 		if (v == (uint64_t)-1) {
    684  1.1  yamt 			blist_free(dest, blk, count);
    685  1.1  yamt 		} else if (v != 0) {
    686  1.1  yamt 			int i;
    687  1.1  yamt 
    688  1.1  yamt 			for (i = 0; i < BLIST_BMAP_RADIX && i < count; ++i) {
    689  1.1  yamt 				if (v & (1 << i))
    690  1.1  yamt 					blist_free(dest, blk + i, 1);
    691  1.1  yamt 			}
    692  1.1  yamt 		}
    693  1.1  yamt 		return;
    694  1.1  yamt 	}
    695  1.1  yamt 
    696  1.1  yamt 	/*
    697  1.1  yamt 	 * Meta node
    698  1.1  yamt 	 */
    699  1.1  yamt 
    700  1.1  yamt 	if (scan->u.bmu_avail == 0) {
    701  1.1  yamt 		/*
    702  1.1  yamt 		 * Source all allocated, leave dest allocated
    703  1.1  yamt 		 */
    704  1.1  yamt 		return;
    705  1.1  yamt 	}
    706  1.1  yamt 	if (scan->u.bmu_avail == radix) {
    707  1.1  yamt 		/*
    708  1.1  yamt 		 * Source all free, free entire dest
    709  1.1  yamt 		 */
    710  1.1  yamt 		if (count < radix)
    711  1.1  yamt 			blist_free(dest, blk, count);
    712  1.1  yamt 		else
    713  1.1  yamt 			blist_free(dest, blk, radix);
    714  1.1  yamt 		return;
    715  1.1  yamt 	}
    716  1.1  yamt 
    717  1.1  yamt 
    718  1.1  yamt 	radix /= BLIST_META_RADIX;
    719  1.1  yamt 	next_skip = ((u_int)skip / BLIST_META_RADIX);
    720  1.1  yamt 
    721  1.1  yamt 	for (i = 1; count && i <= skip; i += next_skip) {
    722  1.3  yamt 		if (scan[i].bm_bighint == (uint64_t)-1)
    723  1.1  yamt 			break;
    724  1.1  yamt 
    725  1.1  yamt 		if (count >= radix) {
    726  1.1  yamt 			blst_copy(
    727  1.1  yamt 			    &scan[i],
    728  1.1  yamt 			    blk,
    729  1.1  yamt 			    radix,
    730  1.1  yamt 			    next_skip - 1,
    731  1.1  yamt 			    dest,
    732  1.1  yamt 			    radix
    733  1.1  yamt 			);
    734  1.1  yamt 			count -= radix;
    735  1.1  yamt 		} else {
    736  1.1  yamt 			if (count) {
    737  1.1  yamt 				blst_copy(
    738  1.1  yamt 				    &scan[i],
    739  1.1  yamt 				    blk,
    740  1.1  yamt 				    radix,
    741  1.1  yamt 				    next_skip - 1,
    742  1.1  yamt 				    dest,
    743  1.1  yamt 				    count
    744  1.1  yamt 				);
    745  1.1  yamt 			}
    746  1.1  yamt 			count = 0;
    747  1.1  yamt 		}
    748  1.1  yamt 		blk += radix;
    749  1.1  yamt 	}
    750  1.1  yamt }
    751  1.1  yamt 
    752  1.1  yamt /*
    753  1.1  yamt  * BLST_LEAF_FILL() -	allocate specific blocks in leaf bitmap
    754  1.1  yamt  *
    755  1.1  yamt  *	This routine allocates all blocks in the specified range
    756  1.1  yamt  *	regardless of any existing allocations in that range.  Returns
    757  1.1  yamt  *	the number of blocks allocated by the call.
    758  1.1  yamt  */
    759  1.1  yamt 
    760  1.1  yamt static int
    761  1.3  yamt blst_leaf_fill(blmeta_t *scan, uint64_t blk, int count)
    762  1.1  yamt {
    763  1.1  yamt 	int n = blk & (BLIST_BMAP_RADIX - 1);
    764  1.1  yamt 	int nblks;
    765  1.3  yamt 	uint64_t mask, bitmap;
    766  1.1  yamt 
    767  1.3  yamt 	mask = ((uint64_t)-1 << n) &
    768  1.3  yamt 	    ((uint64_t)-1 >> (BLIST_BMAP_RADIX - count - n));
    769  1.1  yamt 
    770  1.1  yamt 	/* Count the number of blocks we're about to allocate */
    771  1.1  yamt 	bitmap = scan->u.bmu_bitmap & mask;
    772  1.1  yamt 	for (nblks = 0; bitmap != 0; nblks++)
    773  1.1  yamt 		bitmap &= bitmap - 1;
    774  1.1  yamt 
    775  1.1  yamt 	scan->u.bmu_bitmap &= ~mask;
    776  1.1  yamt 	return nblks;
    777  1.1  yamt }
    778  1.1  yamt 
    779  1.1  yamt /*
    780  1.1  yamt  * BLIST_META_FILL() -	allocate specific blocks at a meta node
    781  1.1  yamt  *
    782  1.1  yamt  *	This routine allocates the specified range of blocks,
    783  1.1  yamt  *	regardless of any existing allocations in the range.  The
    784  1.1  yamt  *	range must be within the extent of this node.  Returns the
    785  1.1  yamt  *	number of blocks allocated by the call.
    786  1.1  yamt  */
    787  1.1  yamt static int
    788  1.1  yamt blst_meta_fill(
    789  1.1  yamt 	blmeta_t *scan,
    790  1.3  yamt 	uint64_t allocBlk,
    791  1.3  yamt 	uint64_t count,
    792  1.3  yamt 	uint64_t radix,
    793  1.1  yamt 	int skip,
    794  1.3  yamt 	uint64_t blk
    795  1.1  yamt ) {
    796  1.1  yamt 	int i;
    797  1.1  yamt 	int next_skip = ((u_int)skip / BLIST_META_RADIX);
    798  1.1  yamt 	int nblks = 0;
    799  1.1  yamt 
    800  1.1  yamt 	if (count == radix || scan->u.bmu_avail == 0)  {
    801  1.1  yamt 		/*
    802  1.1  yamt 		 * ALL-ALLOCATED special case
    803  1.1  yamt 		 */
    804  1.1  yamt 		nblks = scan->u.bmu_avail;
    805  1.1  yamt 		scan->u.bmu_avail = 0;
    806  1.1  yamt 		scan->bm_bighint = count;
    807  1.1  yamt 		return nblks;
    808  1.1  yamt 	}
    809  1.1  yamt 
    810  1.1  yamt 	if (scan->u.bmu_avail == radix) {
    811  1.1  yamt 		radix /= BLIST_META_RADIX;
    812  1.1  yamt 
    813  1.1  yamt 		/*
    814  1.1  yamt 		 * ALL-FREE special case, initialize sublevel
    815  1.1  yamt 		 */
    816  1.1  yamt 		for (i = 1; i <= skip; i += next_skip) {
    817  1.3  yamt 			if (scan[i].bm_bighint == (uint64_t)-1)
    818  1.1  yamt 				break;
    819  1.1  yamt 			if (next_skip == 1) {
    820  1.3  yamt 				scan[i].u.bmu_bitmap = (uint64_t)-1;
    821  1.1  yamt 				scan[i].bm_bighint = BLIST_BMAP_RADIX;
    822  1.1  yamt 			} else {
    823  1.1  yamt 				scan[i].bm_bighint = radix;
    824  1.1  yamt 				scan[i].u.bmu_avail = radix;
    825  1.1  yamt 			}
    826  1.1  yamt 		}
    827  1.1  yamt 	} else {
    828  1.1  yamt 		radix /= BLIST_META_RADIX;
    829  1.1  yamt 	}
    830  1.1  yamt 
    831  1.1  yamt 	if (count > radix)
    832  1.1  yamt 		panic("blist_meta_fill: allocation too large");
    833  1.1  yamt 
    834  1.1  yamt 	i = (allocBlk - blk) / radix;
    835  1.1  yamt 	blk += i * radix;
    836  1.1  yamt 	i = i * next_skip + 1;
    837  1.1  yamt 
    838  1.1  yamt 	while (i <= skip && blk < allocBlk + count) {
    839  1.3  yamt 		uint64_t v;
    840  1.1  yamt 
    841  1.1  yamt 		v = blk + radix - allocBlk;
    842  1.1  yamt 		if (v > count)
    843  1.1  yamt 			v = count;
    844  1.1  yamt 
    845  1.3  yamt 		if (scan->bm_bighint == (uint64_t)-1)
    846  1.1  yamt 			panic("blst_meta_fill: filling unexpected range");
    847  1.1  yamt 
    848  1.1  yamt 		if (next_skip == 1) {
    849  1.1  yamt 			nblks += blst_leaf_fill(&scan[i], allocBlk, v);
    850  1.1  yamt 		} else {
    851  1.1  yamt 			nblks += blst_meta_fill(&scan[i], allocBlk, v,
    852  1.1  yamt 			    radix, next_skip - 1, blk);
    853  1.1  yamt 		}
    854  1.1  yamt 		count -= v;
    855  1.1  yamt 		allocBlk += v;
    856  1.1  yamt 		blk += radix;
    857  1.1  yamt 		i += next_skip;
    858  1.1  yamt 	}
    859  1.1  yamt 	scan->u.bmu_avail -= nblks;
    860  1.1  yamt 	return nblks;
    861  1.1  yamt }
    862  1.1  yamt 
    863  1.1  yamt /*
    864  1.1  yamt  * BLST_RADIX_INIT() - initialize radix tree
    865  1.1  yamt  *
    866  1.1  yamt  *	Initialize our meta structures and bitmaps and calculate the exact
    867  1.1  yamt  *	amount of space required to manage 'count' blocks - this space may
    868  1.1  yamt  *	be considerably less then the calculated radix due to the large
    869  1.1  yamt  *	RADIX values we use.
    870  1.1  yamt  */
    871  1.1  yamt 
    872  1.3  yamt static uint64_t
    873  1.3  yamt blst_radix_init(blmeta_t *scan, uint64_t radix, int skip, uint64_t count)
    874  1.1  yamt {
    875  1.1  yamt 	int i;
    876  1.1  yamt 	int next_skip;
    877  1.3  yamt 	uint64_t memindex = 0;
    878  1.1  yamt 
    879  1.1  yamt 	/*
    880  1.1  yamt 	 * Leaf node
    881  1.1  yamt 	 */
    882  1.1  yamt 
    883  1.1  yamt 	if (radix == BLIST_BMAP_RADIX) {
    884  1.1  yamt 		if (scan) {
    885  1.1  yamt 			scan->bm_bighint = 0;
    886  1.1  yamt 			scan->u.bmu_bitmap = 0;
    887  1.1  yamt 		}
    888  1.1  yamt 		return(memindex);
    889  1.1  yamt 	}
    890  1.1  yamt 
    891  1.1  yamt 	/*
    892  1.1  yamt 	 * Meta node.  If allocating the entire object we can special
    893  1.1  yamt 	 * case it.  However, we need to figure out how much memory
    894  1.1  yamt 	 * is required to manage 'count' blocks, so we continue on anyway.
    895  1.1  yamt 	 */
    896  1.1  yamt 
    897  1.1  yamt 	if (scan) {
    898  1.1  yamt 		scan->bm_bighint = 0;
    899  1.1  yamt 		scan->u.bmu_avail = 0;
    900  1.1  yamt 	}
    901  1.1  yamt 
    902  1.1  yamt 	radix /= BLIST_META_RADIX;
    903  1.1  yamt 	next_skip = ((u_int)skip / BLIST_META_RADIX);
    904  1.1  yamt 
    905  1.1  yamt 	for (i = 1; i <= skip; i += next_skip) {
    906  1.1  yamt 		if (count >= radix) {
    907  1.1  yamt 			/*
    908  1.1  yamt 			 * Allocate the entire object
    909  1.1  yamt 			 */
    910  1.1  yamt 			memindex = i + blst_radix_init(
    911  1.1  yamt 			    ((scan) ? &scan[i] : NULL),
    912  1.1  yamt 			    radix,
    913  1.1  yamt 			    next_skip - 1,
    914  1.1  yamt 			    radix
    915  1.1  yamt 			);
    916  1.1  yamt 			count -= radix;
    917  1.1  yamt 		} else if (count > 0) {
    918  1.1  yamt 			/*
    919  1.1  yamt 			 * Allocate a partial object
    920  1.1  yamt 			 */
    921  1.1  yamt 			memindex = i + blst_radix_init(
    922  1.1  yamt 			    ((scan) ? &scan[i] : NULL),
    923  1.1  yamt 			    radix,
    924  1.1  yamt 			    next_skip - 1,
    925  1.1  yamt 			    count
    926  1.1  yamt 			);
    927  1.1  yamt 			count = 0;
    928  1.1  yamt 		} else {
    929  1.1  yamt 			/*
    930  1.1  yamt 			 * Add terminator and break out
    931  1.1  yamt 			 */
    932  1.1  yamt 			if (scan)
    933  1.3  yamt 				scan[i].bm_bighint = (uint64_t)-1;
    934  1.1  yamt 			break;
    935  1.1  yamt 		}
    936  1.1  yamt 	}
    937  1.1  yamt 	if (memindex < i)
    938  1.1  yamt 		memindex = i;
    939  1.1  yamt 	return(memindex);
    940  1.1  yamt }
    941  1.1  yamt 
    942  1.1  yamt #ifdef BLIST_DEBUG
    943  1.1  yamt 
    944  1.1  yamt static void
    945  1.3  yamt blst_radix_print(blmeta_t *scan, uint64_t blk, uint64_t radix, int skip, int tab)
    946  1.1  yamt {
    947  1.1  yamt 	int i;
    948  1.1  yamt 	int next_skip;
    949  1.1  yamt 	int lastState = 0;
    950  1.1  yamt 
    951  1.1  yamt 	if (radix == BLIST_BMAP_RADIX) {
    952  1.1  yamt 		printf(
    953  1.3  yamt 		    "%*.*s(%016" PRIx64 ",%" PRIu64
    954  1.3  yamt 		    "): bitmap %016" PRIx64 " big=%" PRIu64 "\n",
    955  1.1  yamt 		    tab, tab, "",
    956  1.3  yamt 		    blk, radix,
    957  1.3  yamt 		    scan->u.bmu_bitmap,
    958  1.3  yamt 		    scan->bm_bighint
    959  1.1  yamt 		);
    960  1.1  yamt 		return;
    961  1.1  yamt 	}
    962  1.1  yamt 
    963  1.1  yamt 	if (scan->u.bmu_avail == 0) {
    964  1.1  yamt 		printf(
    965  1.3  yamt 		    "%*.*s(%016" PRIx64 ",%" PRIu64") ALL ALLOCATED\n",
    966  1.3  yamt 		    tab, tab, "", blk, radix
    967  1.1  yamt 		);
    968  1.1  yamt 		return;
    969  1.1  yamt 	}
    970  1.1  yamt 	if (scan->u.bmu_avail == radix) {
    971  1.1  yamt 		printf(
    972  1.3  yamt 		    "%*.*s(%016" PRIx64 ",%" PRIu64 ") ALL FREE\n",
    973  1.3  yamt 		    tab, tab, "", blk, radix
    974  1.1  yamt 		);
    975  1.1  yamt 		return;
    976  1.1  yamt 	}
    977  1.1  yamt 
    978  1.1  yamt 	printf(
    979  1.3  yamt 	    "%*.*s(%016" PRIx64 ",%" PRIu64 "): subtree (%" PRIu64 "/%"
    980  1.3  yamt 	    PRIu64 ") big=%" PRIu64 " {\n",
    981  1.1  yamt 	    tab, tab, "",
    982  1.3  yamt 	    blk, radix, scan->u.bmu_avail, radix, scan->bm_bighint
    983  1.1  yamt 	);
    984  1.1  yamt 
    985  1.1  yamt 	radix /= BLIST_META_RADIX;
    986  1.1  yamt 	next_skip = ((u_int)skip / BLIST_META_RADIX);
    987  1.1  yamt 	tab += 4;
    988  1.1  yamt 
    989  1.1  yamt 	for (i = 1; i <= skip; i += next_skip) {
    990  1.3  yamt 		if (scan[i].bm_bighint == (uint64_t)-1) {
    991  1.1  yamt 			printf(
    992  1.3  yamt 			    "%*.*s(%016" PRIx64 ",%" PRIu64 "): Terminator\n",
    993  1.1  yamt 			    tab, tab, "",
    994  1.3  yamt 			    blk, radix
    995  1.1  yamt 			);
    996  1.1  yamt 			lastState = 0;
    997  1.1  yamt 			break;
    998  1.1  yamt 		}
    999  1.1  yamt 		blst_radix_print(
   1000  1.1  yamt 		    &scan[i],
   1001  1.1  yamt 		    blk,
   1002  1.1  yamt 		    radix,
   1003  1.1  yamt 		    next_skip - 1,
   1004  1.1  yamt 		    tab
   1005  1.1  yamt 		);
   1006  1.1  yamt 		blk += radix;
   1007  1.1  yamt 	}
   1008  1.1  yamt 	tab -= 4;
   1009  1.1  yamt 
   1010  1.1  yamt 	printf(
   1011  1.1  yamt 	    "%*.*s}\n",
   1012  1.1  yamt 	    tab, tab, ""
   1013  1.1  yamt 	);
   1014  1.1  yamt }
   1015  1.1  yamt 
   1016  1.1  yamt #endif
   1017  1.1  yamt 
   1018  1.1  yamt #ifdef BLIST_DEBUG
   1019  1.1  yamt 
   1020  1.1  yamt int
   1021  1.1  yamt main(int ac, char **av)
   1022  1.1  yamt {
   1023  1.3  yamt 	uint64_t size = 1024;
   1024  1.1  yamt 	int i;
   1025  1.1  yamt 	blist_t bl;
   1026  1.1  yamt 
   1027  1.1  yamt 	for (i = 1; i < ac; ++i) {
   1028  1.1  yamt 		const char *ptr = av[i];
   1029  1.1  yamt 		if (*ptr != '-') {
   1030  1.1  yamt 			size = strtol(ptr, NULL, 0);
   1031  1.1  yamt 			continue;
   1032  1.1  yamt 		}
   1033  1.1  yamt 		ptr += 2;
   1034  1.1  yamt 		fprintf(stderr, "Bad option: %s\n", ptr - 2);
   1035  1.1  yamt 		exit(1);
   1036  1.1  yamt 	}
   1037  1.1  yamt 	bl = blist_create(size);
   1038  1.1  yamt 	blist_free(bl, 0, size);
   1039  1.1  yamt 
   1040  1.1  yamt 	for (;;) {
   1041  1.1  yamt 		char buf[1024];
   1042  1.3  yamt 		uint64_t da = 0;
   1043  1.3  yamt 		uint64_t count = 0;
   1044  1.1  yamt 
   1045  1.1  yamt 
   1046  1.3  yamt 		printf("%" PRIu64 "/%" PRIu64 "/%" PRIu64 "> ",
   1047  1.3  yamt 		    bl->bl_free, size, bl->bl_radix);
   1048  1.1  yamt 		fflush(stdout);
   1049  1.1  yamt 		if (fgets(buf, sizeof(buf), stdin) == NULL)
   1050  1.1  yamt 			break;
   1051  1.1  yamt 		switch(buf[0]) {
   1052  1.1  yamt 		case 'r':
   1053  1.3  yamt 			if (sscanf(buf + 1, "%" SCNu64, &count) == 1) {
   1054  1.1  yamt 				blist_resize(&bl, count, 1);
   1055  1.1  yamt 			} else {
   1056  1.1  yamt 				printf("?\n");
   1057  1.1  yamt 			}
   1058  1.1  yamt 		case 'p':
   1059  1.1  yamt 			blist_print(bl);
   1060  1.1  yamt 			break;
   1061  1.1  yamt 		case 'a':
   1062  1.3  yamt 			if (sscanf(buf + 1, "%" SCNu64, &count) == 1) {
   1063  1.3  yamt 				uint64_t blk = blist_alloc(bl, count);
   1064  1.3  yamt 				printf("    R=%016" PRIx64 "\n", blk);
   1065  1.1  yamt 			} else {
   1066  1.1  yamt 				printf("?\n");
   1067  1.1  yamt 			}
   1068  1.1  yamt 			break;
   1069  1.1  yamt 		case 'f':
   1070  1.3  yamt 			if (sscanf(buf + 1, "%" SCNx64 " %" SCNu64,
   1071  1.3  yamt 			    &da, &count) == 2) {
   1072  1.1  yamt 				blist_free(bl, da, count);
   1073  1.1  yamt 			} else {
   1074  1.1  yamt 				printf("?\n");
   1075  1.1  yamt 			}
   1076  1.1  yamt 			break;
   1077  1.1  yamt 		case 'l':
   1078  1.3  yamt 			if (sscanf(buf + 1, "%" SCNx64 " %" SCNu64,
   1079  1.3  yamt 			    &da, &count) == 2) {
   1080  1.1  yamt 				printf("    n=%d\n",
   1081  1.1  yamt 				    blist_fill(bl, da, count));
   1082  1.1  yamt 			} else {
   1083  1.1  yamt 				printf("?\n");
   1084  1.1  yamt 			}
   1085  1.1  yamt 			break;
   1086  1.1  yamt 		case '?':
   1087  1.1  yamt 		case 'h':
   1088  1.1  yamt 			puts(
   1089  1.1  yamt 			    "p          -print\n"
   1090  1.1  yamt 			    "a %d       -allocate\n"
   1091  1.1  yamt 			    "f %x %d    -free\n"
   1092  1.1  yamt 			    "l %x %d    -fill\n"
   1093  1.1  yamt 			    "r %d       -resize\n"
   1094  1.1  yamt 			    "h/?        -help"
   1095  1.1  yamt 			);
   1096  1.1  yamt 			break;
   1097  1.1  yamt 		default:
   1098  1.1  yamt 			printf("?\n");
   1099  1.1  yamt 			break;
   1100  1.1  yamt 		}
   1101  1.1  yamt 	}
   1102  1.1  yamt 	return(0);
   1103  1.1  yamt }
   1104  1.1  yamt 
   1105  1.1  yamt void
   1106  1.1  yamt panic(const char *ctl, ...)
   1107  1.1  yamt {
   1108  1.1  yamt 	va_list va;
   1109  1.1  yamt 
   1110  1.1  yamt 	va_start(va, ctl);
   1111  1.1  yamt 	vfprintf(stderr, ctl, va);
   1112  1.1  yamt 	fprintf(stderr, "\n");
   1113  1.1  yamt 	va_end(va);
   1114  1.1  yamt 	exit(1);
   1115  1.1  yamt }
   1116  1.1  yamt 
   1117  1.1  yamt #endif
   1118  1.1  yamt 
   1119