Home | History | Annotate | Line # | Download | only in kern
subr_blist.c revision 1.1
      1  1.1  yamt /*-
      2  1.1  yamt  * Copyright (c) 1998 Matthew Dillon.  All Rights Reserved.
      3  1.1  yamt  * Redistribution and use in source and binary forms, with or without
      4  1.1  yamt  * modification, are permitted provided that the following conditions
      5  1.1  yamt  * are met:
      6  1.1  yamt  * 1. Redistributions of source code must retain the above copyright
      7  1.1  yamt  *    notice, this list of conditions and the following disclaimer.
      8  1.1  yamt  * 2. Redistributions in binary form must reproduce the above copyright
      9  1.1  yamt  *    notice, this list of conditions and the following disclaimer in the
     10  1.1  yamt  *    documentation and/or other materials provided with the distribution.
     11  1.1  yamt  * 4. Neither the name of the University nor the names of its contributors
     12  1.1  yamt  *    may be used to endorse or promote products derived from this software
     13  1.1  yamt  *    without specific prior written permission.
     14  1.1  yamt  *
     15  1.1  yamt  * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS
     16  1.1  yamt  * OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
     17  1.1  yamt  * WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
     18  1.1  yamt  * ARE DISCLAIMED.  IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY
     19  1.1  yamt  * DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
     20  1.1  yamt  * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE
     21  1.1  yamt  * GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
     22  1.1  yamt  * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY,
     23  1.1  yamt  * WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING
     24  1.1  yamt  * NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
     25  1.1  yamt  * SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
     26  1.1  yamt  */
     27  1.1  yamt /*
     28  1.1  yamt  * BLIST.C -	Bitmap allocator/deallocator, using a radix tree with hinting
     29  1.1  yamt  *
     30  1.1  yamt  *	This module implements a general bitmap allocator/deallocator.  The
     31  1.1  yamt  *	allocator eats around 2 bits per 'block'.  The module does not
     32  1.1  yamt  *	try to interpret the meaning of a 'block' other then to return
     33  1.1  yamt  *	SWAPBLK_NONE on an allocation failure.
     34  1.1  yamt  *
     35  1.1  yamt  *	A radix tree is used to maintain the bitmap.  Two radix constants are
     36  1.1  yamt  *	involved:  One for the bitmaps contained in the leaf nodes (typically
     37  1.1  yamt  *	32), and one for the meta nodes (typically 16).  Both meta and leaf
     38  1.1  yamt  *	nodes have a hint field.  This field gives us a hint as to the largest
     39  1.1  yamt  *	free contiguous range of blocks under the node.  It may contain a
     40  1.1  yamt  *	value that is too high, but will never contain a value that is too
     41  1.1  yamt  *	low.  When the radix tree is searched, allocation failures in subtrees
     42  1.1  yamt  *	update the hint.
     43  1.1  yamt  *
     44  1.1  yamt  *	The radix tree also implements two collapsed states for meta nodes:
     45  1.1  yamt  *	the ALL-ALLOCATED state and the ALL-FREE state.  If a meta node is
     46  1.1  yamt  *	in either of these two states, all information contained underneath
     47  1.1  yamt  *	the node is considered stale.  These states are used to optimize
     48  1.1  yamt  *	allocation and freeing operations.
     49  1.1  yamt  *
     50  1.1  yamt  * 	The hinting greatly increases code efficiency for allocations while
     51  1.1  yamt  *	the general radix structure optimizes both allocations and frees.  The
     52  1.1  yamt  *	radix tree should be able to operate well no matter how much
     53  1.1  yamt  *	fragmentation there is and no matter how large a bitmap is used.
     54  1.1  yamt  *
     55  1.1  yamt  *	Unlike the rlist code, the blist code wires all necessary memory at
     56  1.1  yamt  *	creation time.  Neither allocations nor frees require interaction with
     57  1.1  yamt  *	the memory subsystem.  In contrast, the rlist code may allocate memory
     58  1.1  yamt  *	on an rlist_free() call.  The non-blocking features of the blist code
     59  1.1  yamt  *	are used to great advantage in the swap code (vm/nswap_pager.c).  The
     60  1.1  yamt  *	rlist code uses a little less overall memory then the blist code (but
     61  1.1  yamt  *	due to swap interleaving not all that much less), but the blist code
     62  1.1  yamt  *	scales much, much better.
     63  1.1  yamt  *
     64  1.1  yamt  *	LAYOUT: The radix tree is layed out recursively using a
     65  1.1  yamt  *	linear array.  Each meta node is immediately followed (layed out
     66  1.1  yamt  *	sequentially in memory) by BLIST_META_RADIX lower level nodes.  This
     67  1.1  yamt  *	is a recursive structure but one that can be easily scanned through
     68  1.1  yamt  *	a very simple 'skip' calculation.  In order to support large radixes,
     69  1.1  yamt  *	portions of the tree may reside outside our memory allocation.  We
     70  1.1  yamt  *	handle this with an early-termination optimization (when bighint is
     71  1.1  yamt  *	set to -1) on the scan.  The memory allocation is only large enough
     72  1.1  yamt  *	to cover the number of blocks requested at creation time even if it
     73  1.1  yamt  *	must be encompassed in larger root-node radix.
     74  1.1  yamt  *
     75  1.1  yamt  *	NOTE: the allocator cannot currently allocate more then
     76  1.1  yamt  *	BLIST_BMAP_RADIX blocks per call.  It will panic with 'allocation too
     77  1.1  yamt  *	large' if you try.  This is an area that could use improvement.  The
     78  1.1  yamt  *	radix is large enough that this restriction does not effect the swap
     79  1.1  yamt  *	system, though.  Currently only the allocation code is effected by
     80  1.1  yamt  *	this algorithmic unfeature.  The freeing code can handle arbitrary
     81  1.1  yamt  *	ranges.
     82  1.1  yamt  *
     83  1.1  yamt  *	This code can be compiled stand-alone for debugging.
     84  1.1  yamt  */
     85  1.1  yamt 
     86  1.1  yamt #include <sys/cdefs.h>
     87  1.1  yamt __FBSDID("$FreeBSD: src/sys/kern/subr_blist.c,v 1.17 2004/06/04 04:03:25 alc Exp $");
     88  1.1  yamt 
     89  1.1  yamt #ifdef _KERNEL
     90  1.1  yamt 
     91  1.1  yamt #include <sys/param.h>
     92  1.1  yamt #include <sys/systm.h>
     93  1.1  yamt #include <sys/lock.h>
     94  1.1  yamt #include <sys/kernel.h>
     95  1.1  yamt #include <sys/blist.h>
     96  1.1  yamt #include <sys/malloc.h>
     97  1.1  yamt #include <sys/proc.h>
     98  1.1  yamt #include <sys/mutex.h>
     99  1.1  yamt 
    100  1.1  yamt #else
    101  1.1  yamt 
    102  1.1  yamt #ifndef BLIST_NO_DEBUG
    103  1.1  yamt #define BLIST_DEBUG
    104  1.1  yamt #endif
    105  1.1  yamt 
    106  1.1  yamt #define SWAPBLK_NONE ((daddr_t)-1)
    107  1.1  yamt 
    108  1.1  yamt #include <sys/types.h>
    109  1.1  yamt #include <stdio.h>
    110  1.1  yamt #include <string.h>
    111  1.1  yamt #include <stdlib.h>
    112  1.1  yamt #include <stdarg.h>
    113  1.1  yamt 
    114  1.1  yamt #define malloc(a,b,c)	calloc(a, 1)
    115  1.1  yamt #define free(a,b)	free(a)
    116  1.1  yamt 
    117  1.1  yamt typedef unsigned int u_daddr_t;
    118  1.1  yamt 
    119  1.1  yamt #include <sys/blist.h>
    120  1.1  yamt 
    121  1.1  yamt void panic(const char *ctl, ...);
    122  1.1  yamt 
    123  1.1  yamt #endif
    124  1.1  yamt 
    125  1.1  yamt /*
    126  1.1  yamt  * static support functions
    127  1.1  yamt  */
    128  1.1  yamt 
    129  1.1  yamt static daddr_t blst_leaf_alloc(blmeta_t *scan, daddr_t blk, int count);
    130  1.1  yamt static daddr_t blst_meta_alloc(blmeta_t *scan, daddr_t blk,
    131  1.1  yamt 				daddr_t count, daddr_t radix, int skip);
    132  1.1  yamt static void blst_leaf_free(blmeta_t *scan, daddr_t relblk, int count);
    133  1.1  yamt static void blst_meta_free(blmeta_t *scan, daddr_t freeBlk, daddr_t count,
    134  1.1  yamt 					daddr_t radix, int skip, daddr_t blk);
    135  1.1  yamt static void blst_copy(blmeta_t *scan, daddr_t blk, daddr_t radix,
    136  1.1  yamt 				daddr_t skip, blist_t dest, daddr_t count);
    137  1.1  yamt static int blst_leaf_fill(blmeta_t *scan, daddr_t blk, int count);
    138  1.1  yamt static int blst_meta_fill(blmeta_t *scan, daddr_t allocBlk, daddr_t count,
    139  1.1  yamt 				daddr_t radix, int skip, daddr_t blk);
    140  1.1  yamt static daddr_t	blst_radix_init(blmeta_t *scan, daddr_t radix,
    141  1.1  yamt 						int skip, daddr_t count);
    142  1.1  yamt #ifndef _KERNEL
    143  1.1  yamt static void	blst_radix_print(blmeta_t *scan, daddr_t blk,
    144  1.1  yamt 					daddr_t radix, int skip, int tab);
    145  1.1  yamt #endif
    146  1.1  yamt 
    147  1.1  yamt #ifdef _KERNEL
    148  1.1  yamt static MALLOC_DEFINE(M_SWAP, "SWAP", "Swap space");
    149  1.1  yamt #endif
    150  1.1  yamt 
    151  1.1  yamt /*
    152  1.1  yamt  * blist_create() - create a blist capable of handling up to the specified
    153  1.1  yamt  *		    number of blocks
    154  1.1  yamt  *
    155  1.1  yamt  *	blocks must be greater then 0
    156  1.1  yamt  *
    157  1.1  yamt  *	The smallest blist consists of a single leaf node capable of
    158  1.1  yamt  *	managing BLIST_BMAP_RADIX blocks.
    159  1.1  yamt  */
    160  1.1  yamt 
    161  1.1  yamt blist_t
    162  1.1  yamt blist_create(daddr_t blocks)
    163  1.1  yamt {
    164  1.1  yamt 	blist_t bl;
    165  1.1  yamt 	int radix;
    166  1.1  yamt 	int skip = 0;
    167  1.1  yamt 
    168  1.1  yamt 	/*
    169  1.1  yamt 	 * Calculate radix and skip field used for scanning.
    170  1.1  yamt 	 */
    171  1.1  yamt 	radix = BLIST_BMAP_RADIX;
    172  1.1  yamt 
    173  1.1  yamt 	while (radix < blocks) {
    174  1.1  yamt 		radix *= BLIST_META_RADIX;
    175  1.1  yamt 		skip = (skip + 1) * BLIST_META_RADIX;
    176  1.1  yamt 	}
    177  1.1  yamt 
    178  1.1  yamt 	bl = malloc(sizeof(struct blist), M_SWAP, M_WAITOK | M_ZERO);
    179  1.1  yamt 
    180  1.1  yamt 	bl->bl_blocks = blocks;
    181  1.1  yamt 	bl->bl_radix = radix;
    182  1.1  yamt 	bl->bl_skip = skip;
    183  1.1  yamt 	bl->bl_rootblks = 1 +
    184  1.1  yamt 	    blst_radix_init(NULL, bl->bl_radix, bl->bl_skip, blocks);
    185  1.1  yamt 	bl->bl_root = malloc(sizeof(blmeta_t) * bl->bl_rootblks, M_SWAP, M_WAITOK);
    186  1.1  yamt 
    187  1.1  yamt #if defined(BLIST_DEBUG)
    188  1.1  yamt 	printf(
    189  1.1  yamt 		"BLIST representing %lld blocks (%lld MB of swap)"
    190  1.1  yamt 		", requiring %lldK of ram\n",
    191  1.1  yamt 		(long long)bl->bl_blocks,
    192  1.1  yamt 		(long long)bl->bl_blocks * 4 / 1024,
    193  1.1  yamt 		(long long)(bl->bl_rootblks * sizeof(blmeta_t) + 1023) / 1024
    194  1.1  yamt 	);
    195  1.1  yamt 	printf("BLIST raw radix tree contains %lld records\n",
    196  1.1  yamt 	    (long long)bl->bl_rootblks);
    197  1.1  yamt #endif
    198  1.1  yamt 	blst_radix_init(bl->bl_root, bl->bl_radix, bl->bl_skip, blocks);
    199  1.1  yamt 
    200  1.1  yamt 	return(bl);
    201  1.1  yamt }
    202  1.1  yamt 
    203  1.1  yamt void
    204  1.1  yamt blist_destroy(blist_t bl)
    205  1.1  yamt {
    206  1.1  yamt 	free(bl->bl_root, M_SWAP);
    207  1.1  yamt 	free(bl, M_SWAP);
    208  1.1  yamt }
    209  1.1  yamt 
    210  1.1  yamt /*
    211  1.1  yamt  * blist_alloc() - reserve space in the block bitmap.  Return the base
    212  1.1  yamt  *		     of a contiguous region or SWAPBLK_NONE if space could
    213  1.1  yamt  *		     not be allocated.
    214  1.1  yamt  */
    215  1.1  yamt 
    216  1.1  yamt daddr_t
    217  1.1  yamt blist_alloc(blist_t bl, daddr_t count)
    218  1.1  yamt {
    219  1.1  yamt 	daddr_t blk = SWAPBLK_NONE;
    220  1.1  yamt 
    221  1.1  yamt 	if (bl) {
    222  1.1  yamt 		if (bl->bl_radix == BLIST_BMAP_RADIX)
    223  1.1  yamt 			blk = blst_leaf_alloc(bl->bl_root, 0, count);
    224  1.1  yamt 		else
    225  1.1  yamt 			blk = blst_meta_alloc(bl->bl_root, 0, count, bl->bl_radix, bl->bl_skip);
    226  1.1  yamt 		if (blk != SWAPBLK_NONE)
    227  1.1  yamt 			bl->bl_free -= count;
    228  1.1  yamt 	}
    229  1.1  yamt 	return(blk);
    230  1.1  yamt }
    231  1.1  yamt 
    232  1.1  yamt /*
    233  1.1  yamt  * blist_free() -	free up space in the block bitmap.  Return the base
    234  1.1  yamt  *		     	of a contiguous region.  Panic if an inconsistancy is
    235  1.1  yamt  *			found.
    236  1.1  yamt  */
    237  1.1  yamt 
    238  1.1  yamt void
    239  1.1  yamt blist_free(blist_t bl, daddr_t blkno, daddr_t count)
    240  1.1  yamt {
    241  1.1  yamt 	if (bl) {
    242  1.1  yamt 		if (bl->bl_radix == BLIST_BMAP_RADIX)
    243  1.1  yamt 			blst_leaf_free(bl->bl_root, blkno, count);
    244  1.1  yamt 		else
    245  1.1  yamt 			blst_meta_free(bl->bl_root, blkno, count, bl->bl_radix, bl->bl_skip, 0);
    246  1.1  yamt 		bl->bl_free += count;
    247  1.1  yamt 	}
    248  1.1  yamt }
    249  1.1  yamt 
    250  1.1  yamt /*
    251  1.1  yamt  * blist_fill() -	mark a region in the block bitmap as off-limits
    252  1.1  yamt  *			to the allocator (i.e. allocate it), ignoring any
    253  1.1  yamt  *			existing allocations.  Return the number of blocks
    254  1.1  yamt  *			actually filled that were free before the call.
    255  1.1  yamt  */
    256  1.1  yamt 
    257  1.1  yamt int
    258  1.1  yamt blist_fill(blist_t bl, daddr_t blkno, daddr_t count)
    259  1.1  yamt {
    260  1.1  yamt 	int filled;
    261  1.1  yamt 
    262  1.1  yamt 	if (bl) {
    263  1.1  yamt 		if (bl->bl_radix == BLIST_BMAP_RADIX)
    264  1.1  yamt 			filled = blst_leaf_fill(bl->bl_root, blkno, count);
    265  1.1  yamt 		else
    266  1.1  yamt 			filled = blst_meta_fill(bl->bl_root, blkno, count,
    267  1.1  yamt 			    bl->bl_radix, bl->bl_skip, 0);
    268  1.1  yamt 		bl->bl_free -= filled;
    269  1.1  yamt 		return filled;
    270  1.1  yamt 	} else
    271  1.1  yamt 		return 0;
    272  1.1  yamt }
    273  1.1  yamt 
    274  1.1  yamt /*
    275  1.1  yamt  * blist_resize() -	resize an existing radix tree to handle the
    276  1.1  yamt  *			specified number of blocks.  This will reallocate
    277  1.1  yamt  *			the tree and transfer the previous bitmap to the new
    278  1.1  yamt  *			one.  When extending the tree you can specify whether
    279  1.1  yamt  *			the new blocks are to left allocated or freed.
    280  1.1  yamt  */
    281  1.1  yamt 
    282  1.1  yamt void
    283  1.1  yamt blist_resize(blist_t *pbl, daddr_t count, int freenew)
    284  1.1  yamt {
    285  1.1  yamt     blist_t newbl = blist_create(count);
    286  1.1  yamt     blist_t save = *pbl;
    287  1.1  yamt 
    288  1.1  yamt     *pbl = newbl;
    289  1.1  yamt     if (count > save->bl_blocks)
    290  1.1  yamt 	    count = save->bl_blocks;
    291  1.1  yamt     blst_copy(save->bl_root, 0, save->bl_radix, save->bl_skip, newbl, count);
    292  1.1  yamt 
    293  1.1  yamt     /*
    294  1.1  yamt      * If resizing upwards, should we free the new space or not?
    295  1.1  yamt      */
    296  1.1  yamt     if (freenew && count < newbl->bl_blocks) {
    297  1.1  yamt 	    blist_free(newbl, count, newbl->bl_blocks - count);
    298  1.1  yamt     }
    299  1.1  yamt     blist_destroy(save);
    300  1.1  yamt }
    301  1.1  yamt 
    302  1.1  yamt #ifdef BLIST_DEBUG
    303  1.1  yamt 
    304  1.1  yamt /*
    305  1.1  yamt  * blist_print()    - dump radix tree
    306  1.1  yamt  */
    307  1.1  yamt 
    308  1.1  yamt void
    309  1.1  yamt blist_print(blist_t bl)
    310  1.1  yamt {
    311  1.1  yamt 	printf("BLIST {\n");
    312  1.1  yamt 	blst_radix_print(bl->bl_root, 0, bl->bl_radix, bl->bl_skip, 4);
    313  1.1  yamt 	printf("}\n");
    314  1.1  yamt }
    315  1.1  yamt 
    316  1.1  yamt #endif
    317  1.1  yamt 
    318  1.1  yamt /************************************************************************
    319  1.1  yamt  *			  ALLOCATION SUPPORT FUNCTIONS			*
    320  1.1  yamt  ************************************************************************
    321  1.1  yamt  *
    322  1.1  yamt  *	These support functions do all the actual work.  They may seem
    323  1.1  yamt  *	rather longish, but that's because I've commented them up.  The
    324  1.1  yamt  *	actual code is straight forward.
    325  1.1  yamt  *
    326  1.1  yamt  */
    327  1.1  yamt 
    328  1.1  yamt /*
    329  1.1  yamt  * blist_leaf_alloc() -	allocate at a leaf in the radix tree (a bitmap).
    330  1.1  yamt  *
    331  1.1  yamt  *	This is the core of the allocator and is optimized for the 1 block
    332  1.1  yamt  *	and the BLIST_BMAP_RADIX block allocation cases.  Other cases are
    333  1.1  yamt  *	somewhat slower.  The 1 block allocation case is log2 and extremely
    334  1.1  yamt  *	quick.
    335  1.1  yamt  */
    336  1.1  yamt 
    337  1.1  yamt static daddr_t
    338  1.1  yamt blst_leaf_alloc(
    339  1.1  yamt 	blmeta_t *scan,
    340  1.1  yamt 	daddr_t blk,
    341  1.1  yamt 	int count
    342  1.1  yamt ) {
    343  1.1  yamt 	u_daddr_t orig = scan->u.bmu_bitmap;
    344  1.1  yamt 
    345  1.1  yamt 	if (orig == 0) {
    346  1.1  yamt 		/*
    347  1.1  yamt 		 * Optimize bitmap all-allocated case.  Also, count = 1
    348  1.1  yamt 		 * case assumes at least 1 bit is free in the bitmap, so
    349  1.1  yamt 		 * we have to take care of this case here.
    350  1.1  yamt 		 */
    351  1.1  yamt 		scan->bm_bighint = 0;
    352  1.1  yamt 		return(SWAPBLK_NONE);
    353  1.1  yamt 	}
    354  1.1  yamt 	if (count == 1) {
    355  1.1  yamt 		/*
    356  1.1  yamt 		 * Optimized code to allocate one bit out of the bitmap
    357  1.1  yamt 		 */
    358  1.1  yamt 		u_daddr_t mask;
    359  1.1  yamt 		int j = BLIST_BMAP_RADIX/2;
    360  1.1  yamt 		int r = 0;
    361  1.1  yamt 
    362  1.1  yamt 		mask = (u_daddr_t)-1 >> (BLIST_BMAP_RADIX/2);
    363  1.1  yamt 
    364  1.1  yamt 		while (j) {
    365  1.1  yamt 			if ((orig & mask) == 0) {
    366  1.1  yamt 			    r += j;
    367  1.1  yamt 			    orig >>= j;
    368  1.1  yamt 			}
    369  1.1  yamt 			j >>= 1;
    370  1.1  yamt 			mask >>= j;
    371  1.1  yamt 		}
    372  1.1  yamt 		scan->u.bmu_bitmap &= ~(1 << r);
    373  1.1  yamt 		return(blk + r);
    374  1.1  yamt 	}
    375  1.1  yamt 	if (count <= BLIST_BMAP_RADIX) {
    376  1.1  yamt 		/*
    377  1.1  yamt 		 * non-optimized code to allocate N bits out of the bitmap.
    378  1.1  yamt 		 * The more bits, the faster the code runs.  It will run
    379  1.1  yamt 		 * the slowest allocating 2 bits, but since there aren't any
    380  1.1  yamt 		 * memory ops in the core loop (or shouldn't be, anyway),
    381  1.1  yamt 		 * you probably won't notice the difference.
    382  1.1  yamt 		 */
    383  1.1  yamt 		int j;
    384  1.1  yamt 		int n = BLIST_BMAP_RADIX - count;
    385  1.1  yamt 		u_daddr_t mask;
    386  1.1  yamt 
    387  1.1  yamt 		mask = (u_daddr_t)-1 >> n;
    388  1.1  yamt 
    389  1.1  yamt 		for (j = 0; j <= n; ++j) {
    390  1.1  yamt 			if ((orig & mask) == mask) {
    391  1.1  yamt 				scan->u.bmu_bitmap &= ~mask;
    392  1.1  yamt 				return(blk + j);
    393  1.1  yamt 			}
    394  1.1  yamt 			mask = (mask << 1);
    395  1.1  yamt 		}
    396  1.1  yamt 	}
    397  1.1  yamt 	/*
    398  1.1  yamt 	 * We couldn't allocate count in this subtree, update bighint.
    399  1.1  yamt 	 */
    400  1.1  yamt 	scan->bm_bighint = count - 1;
    401  1.1  yamt 	return(SWAPBLK_NONE);
    402  1.1  yamt }
    403  1.1  yamt 
    404  1.1  yamt /*
    405  1.1  yamt  * blist_meta_alloc() -	allocate at a meta in the radix tree.
    406  1.1  yamt  *
    407  1.1  yamt  *	Attempt to allocate at a meta node.  If we can't, we update
    408  1.1  yamt  *	bighint and return a failure.  Updating bighint optimize future
    409  1.1  yamt  *	calls that hit this node.  We have to check for our collapse cases
    410  1.1  yamt  *	and we have a few optimizations strewn in as well.
    411  1.1  yamt  */
    412  1.1  yamt 
    413  1.1  yamt static daddr_t
    414  1.1  yamt blst_meta_alloc(
    415  1.1  yamt 	blmeta_t *scan,
    416  1.1  yamt 	daddr_t blk,
    417  1.1  yamt 	daddr_t count,
    418  1.1  yamt 	daddr_t radix,
    419  1.1  yamt 	int skip
    420  1.1  yamt ) {
    421  1.1  yamt 	int i;
    422  1.1  yamt 	int next_skip = ((u_int)skip / BLIST_META_RADIX);
    423  1.1  yamt 
    424  1.1  yamt 	if (scan->u.bmu_avail == 0)  {
    425  1.1  yamt 		/*
    426  1.1  yamt 		 * ALL-ALLOCATED special case
    427  1.1  yamt 		 */
    428  1.1  yamt 		scan->bm_bighint = count;
    429  1.1  yamt 		return(SWAPBLK_NONE);
    430  1.1  yamt 	}
    431  1.1  yamt 
    432  1.1  yamt 	if (scan->u.bmu_avail == radix) {
    433  1.1  yamt 		radix /= BLIST_META_RADIX;
    434  1.1  yamt 
    435  1.1  yamt 		/*
    436  1.1  yamt 		 * ALL-FREE special case, initialize uninitialize
    437  1.1  yamt 		 * sublevel.
    438  1.1  yamt 		 */
    439  1.1  yamt 		for (i = 1; i <= skip; i += next_skip) {
    440  1.1  yamt 			if (scan[i].bm_bighint == (daddr_t)-1)
    441  1.1  yamt 				break;
    442  1.1  yamt 			if (next_skip == 1) {
    443  1.1  yamt 				scan[i].u.bmu_bitmap = (u_daddr_t)-1;
    444  1.1  yamt 				scan[i].bm_bighint = BLIST_BMAP_RADIX;
    445  1.1  yamt 			} else {
    446  1.1  yamt 				scan[i].bm_bighint = radix;
    447  1.1  yamt 				scan[i].u.bmu_avail = radix;
    448  1.1  yamt 			}
    449  1.1  yamt 		}
    450  1.1  yamt 	} else {
    451  1.1  yamt 		radix /= BLIST_META_RADIX;
    452  1.1  yamt 	}
    453  1.1  yamt 
    454  1.1  yamt 	for (i = 1; i <= skip; i += next_skip) {
    455  1.1  yamt 		if (count <= scan[i].bm_bighint) {
    456  1.1  yamt 			/*
    457  1.1  yamt 			 * count fits in object
    458  1.1  yamt 			 */
    459  1.1  yamt 			daddr_t r;
    460  1.1  yamt 			if (next_skip == 1) {
    461  1.1  yamt 				r = blst_leaf_alloc(&scan[i], blk, count);
    462  1.1  yamt 			} else {
    463  1.1  yamt 				r = blst_meta_alloc(&scan[i], blk, count, radix, next_skip - 1);
    464  1.1  yamt 			}
    465  1.1  yamt 			if (r != SWAPBLK_NONE) {
    466  1.1  yamt 				scan->u.bmu_avail -= count;
    467  1.1  yamt 				if (scan->bm_bighint > scan->u.bmu_avail)
    468  1.1  yamt 					scan->bm_bighint = scan->u.bmu_avail;
    469  1.1  yamt 				return(r);
    470  1.1  yamt 			}
    471  1.1  yamt 		} else if (scan[i].bm_bighint == (daddr_t)-1) {
    472  1.1  yamt 			/*
    473  1.1  yamt 			 * Terminator
    474  1.1  yamt 			 */
    475  1.1  yamt 			break;
    476  1.1  yamt 		} else if (count > radix) {
    477  1.1  yamt 			/*
    478  1.1  yamt 			 * count does not fit in object even if it were
    479  1.1  yamt 			 * complete free.
    480  1.1  yamt 			 */
    481  1.1  yamt 			panic("blist_meta_alloc: allocation too large");
    482  1.1  yamt 		}
    483  1.1  yamt 		blk += radix;
    484  1.1  yamt 	}
    485  1.1  yamt 
    486  1.1  yamt 	/*
    487  1.1  yamt 	 * We couldn't allocate count in this subtree, update bighint.
    488  1.1  yamt 	 */
    489  1.1  yamt 	if (scan->bm_bighint >= count)
    490  1.1  yamt 		scan->bm_bighint = count - 1;
    491  1.1  yamt 	return(SWAPBLK_NONE);
    492  1.1  yamt }
    493  1.1  yamt 
    494  1.1  yamt /*
    495  1.1  yamt  * BLST_LEAF_FREE() -	free allocated block from leaf bitmap
    496  1.1  yamt  *
    497  1.1  yamt  */
    498  1.1  yamt 
    499  1.1  yamt static void
    500  1.1  yamt blst_leaf_free(
    501  1.1  yamt 	blmeta_t *scan,
    502  1.1  yamt 	daddr_t blk,
    503  1.1  yamt 	int count
    504  1.1  yamt ) {
    505  1.1  yamt 	/*
    506  1.1  yamt 	 * free some data in this bitmap
    507  1.1  yamt 	 *
    508  1.1  yamt 	 * e.g.
    509  1.1  yamt 	 *	0000111111111110000
    510  1.1  yamt 	 *          \_________/\__/
    511  1.1  yamt 	 *		v        n
    512  1.1  yamt 	 */
    513  1.1  yamt 	int n = blk & (BLIST_BMAP_RADIX - 1);
    514  1.1  yamt 	u_daddr_t mask;
    515  1.1  yamt 
    516  1.1  yamt 	mask = ((u_daddr_t)-1 << n) &
    517  1.1  yamt 	    ((u_daddr_t)-1 >> (BLIST_BMAP_RADIX - count - n));
    518  1.1  yamt 
    519  1.1  yamt 	if (scan->u.bmu_bitmap & mask)
    520  1.1  yamt 		panic("blst_radix_free: freeing free block");
    521  1.1  yamt 	scan->u.bmu_bitmap |= mask;
    522  1.1  yamt 
    523  1.1  yamt 	/*
    524  1.1  yamt 	 * We could probably do a better job here.  We are required to make
    525  1.1  yamt 	 * bighint at least as large as the biggest contiguous block of
    526  1.1  yamt 	 * data.  If we just shoehorn it, a little extra overhead will
    527  1.1  yamt 	 * be incured on the next allocation (but only that one typically).
    528  1.1  yamt 	 */
    529  1.1  yamt 	scan->bm_bighint = BLIST_BMAP_RADIX;
    530  1.1  yamt }
    531  1.1  yamt 
    532  1.1  yamt /*
    533  1.1  yamt  * BLST_META_FREE() - free allocated blocks from radix tree meta info
    534  1.1  yamt  *
    535  1.1  yamt  *	This support routine frees a range of blocks from the bitmap.
    536  1.1  yamt  *	The range must be entirely enclosed by this radix node.  If a
    537  1.1  yamt  *	meta node, we break the range down recursively to free blocks
    538  1.1  yamt  *	in subnodes (which means that this code can free an arbitrary
    539  1.1  yamt  *	range whereas the allocation code cannot allocate an arbitrary
    540  1.1  yamt  *	range).
    541  1.1  yamt  */
    542  1.1  yamt 
    543  1.1  yamt static void
    544  1.1  yamt blst_meta_free(
    545  1.1  yamt 	blmeta_t *scan,
    546  1.1  yamt 	daddr_t freeBlk,
    547  1.1  yamt 	daddr_t count,
    548  1.1  yamt 	daddr_t radix,
    549  1.1  yamt 	int skip,
    550  1.1  yamt 	daddr_t blk
    551  1.1  yamt ) {
    552  1.1  yamt 	int i;
    553  1.1  yamt 	int next_skip = ((u_int)skip / BLIST_META_RADIX);
    554  1.1  yamt 
    555  1.1  yamt #if 0
    556  1.1  yamt 	printf("FREE (%llx,%lld) FROM (%llx,%lld)\n",
    557  1.1  yamt 	    (long long)freeBlk, (long long)count,
    558  1.1  yamt 	    (long long)blk, (long long)radix
    559  1.1  yamt 	);
    560  1.1  yamt #endif
    561  1.1  yamt 
    562  1.1  yamt 	if (scan->u.bmu_avail == 0) {
    563  1.1  yamt 		/*
    564  1.1  yamt 		 * ALL-ALLOCATED special case, with possible
    565  1.1  yamt 		 * shortcut to ALL-FREE special case.
    566  1.1  yamt 		 */
    567  1.1  yamt 		scan->u.bmu_avail = count;
    568  1.1  yamt 		scan->bm_bighint = count;
    569  1.1  yamt 
    570  1.1  yamt 		if (count != radix)  {
    571  1.1  yamt 			for (i = 1; i <= skip; i += next_skip) {
    572  1.1  yamt 				if (scan[i].bm_bighint == (daddr_t)-1)
    573  1.1  yamt 					break;
    574  1.1  yamt 				scan[i].bm_bighint = 0;
    575  1.1  yamt 				if (next_skip == 1) {
    576  1.1  yamt 					scan[i].u.bmu_bitmap = 0;
    577  1.1  yamt 				} else {
    578  1.1  yamt 					scan[i].u.bmu_avail = 0;
    579  1.1  yamt 				}
    580  1.1  yamt 			}
    581  1.1  yamt 			/* fall through */
    582  1.1  yamt 		}
    583  1.1  yamt 	} else {
    584  1.1  yamt 		scan->u.bmu_avail += count;
    585  1.1  yamt 		/* scan->bm_bighint = radix; */
    586  1.1  yamt 	}
    587  1.1  yamt 
    588  1.1  yamt 	/*
    589  1.1  yamt 	 * ALL-FREE special case.
    590  1.1  yamt 	 */
    591  1.1  yamt 
    592  1.1  yamt 	if (scan->u.bmu_avail == radix)
    593  1.1  yamt 		return;
    594  1.1  yamt 	if (scan->u.bmu_avail > radix)
    595  1.1  yamt 		panic("blst_meta_free: freeing already free blocks (%lld) %lld/%lld",
    596  1.1  yamt 		    (long long)count, (long long)scan->u.bmu_avail,
    597  1.1  yamt 		    (long long)radix);
    598  1.1  yamt 
    599  1.1  yamt 	/*
    600  1.1  yamt 	 * Break the free down into its components
    601  1.1  yamt 	 */
    602  1.1  yamt 
    603  1.1  yamt 	radix /= BLIST_META_RADIX;
    604  1.1  yamt 
    605  1.1  yamt 	i = (freeBlk - blk) / radix;
    606  1.1  yamt 	blk += i * radix;
    607  1.1  yamt 	i = i * next_skip + 1;
    608  1.1  yamt 
    609  1.1  yamt 	while (i <= skip && blk < freeBlk + count) {
    610  1.1  yamt 		daddr_t v;
    611  1.1  yamt 
    612  1.1  yamt 		v = blk + radix - freeBlk;
    613  1.1  yamt 		if (v > count)
    614  1.1  yamt 			v = count;
    615  1.1  yamt 
    616  1.1  yamt 		if (scan->bm_bighint == (daddr_t)-1)
    617  1.1  yamt 			panic("blst_meta_free: freeing unexpected range");
    618  1.1  yamt 
    619  1.1  yamt 		if (next_skip == 1) {
    620  1.1  yamt 			blst_leaf_free(&scan[i], freeBlk, v);
    621  1.1  yamt 		} else {
    622  1.1  yamt 			blst_meta_free(&scan[i], freeBlk, v, radix, next_skip - 1, blk);
    623  1.1  yamt 		}
    624  1.1  yamt 		if (scan->bm_bighint < scan[i].bm_bighint)
    625  1.1  yamt 		    scan->bm_bighint = scan[i].bm_bighint;
    626  1.1  yamt 		count -= v;
    627  1.1  yamt 		freeBlk += v;
    628  1.1  yamt 		blk += radix;
    629  1.1  yamt 		i += next_skip;
    630  1.1  yamt 	}
    631  1.1  yamt }
    632  1.1  yamt 
    633  1.1  yamt /*
    634  1.1  yamt  * BLIST_RADIX_COPY() - copy one radix tree to another
    635  1.1  yamt  *
    636  1.1  yamt  *	Locates free space in the source tree and frees it in the destination
    637  1.1  yamt  *	tree.  The space may not already be free in the destination.
    638  1.1  yamt  */
    639  1.1  yamt 
    640  1.1  yamt static void blst_copy(
    641  1.1  yamt 	blmeta_t *scan,
    642  1.1  yamt 	daddr_t blk,
    643  1.1  yamt 	daddr_t radix,
    644  1.1  yamt 	daddr_t skip,
    645  1.1  yamt 	blist_t dest,
    646  1.1  yamt 	daddr_t count
    647  1.1  yamt ) {
    648  1.1  yamt 	int next_skip;
    649  1.1  yamt 	int i;
    650  1.1  yamt 
    651  1.1  yamt 	/*
    652  1.1  yamt 	 * Leaf node
    653  1.1  yamt 	 */
    654  1.1  yamt 
    655  1.1  yamt 	if (radix == BLIST_BMAP_RADIX) {
    656  1.1  yamt 		u_daddr_t v = scan->u.bmu_bitmap;
    657  1.1  yamt 
    658  1.1  yamt 		if (v == (u_daddr_t)-1) {
    659  1.1  yamt 			blist_free(dest, blk, count);
    660  1.1  yamt 		} else if (v != 0) {
    661  1.1  yamt 			int i;
    662  1.1  yamt 
    663  1.1  yamt 			for (i = 0; i < BLIST_BMAP_RADIX && i < count; ++i) {
    664  1.1  yamt 				if (v & (1 << i))
    665  1.1  yamt 					blist_free(dest, blk + i, 1);
    666  1.1  yamt 			}
    667  1.1  yamt 		}
    668  1.1  yamt 		return;
    669  1.1  yamt 	}
    670  1.1  yamt 
    671  1.1  yamt 	/*
    672  1.1  yamt 	 * Meta node
    673  1.1  yamt 	 */
    674  1.1  yamt 
    675  1.1  yamt 	if (scan->u.bmu_avail == 0) {
    676  1.1  yamt 		/*
    677  1.1  yamt 		 * Source all allocated, leave dest allocated
    678  1.1  yamt 		 */
    679  1.1  yamt 		return;
    680  1.1  yamt 	}
    681  1.1  yamt 	if (scan->u.bmu_avail == radix) {
    682  1.1  yamt 		/*
    683  1.1  yamt 		 * Source all free, free entire dest
    684  1.1  yamt 		 */
    685  1.1  yamt 		if (count < radix)
    686  1.1  yamt 			blist_free(dest, blk, count);
    687  1.1  yamt 		else
    688  1.1  yamt 			blist_free(dest, blk, radix);
    689  1.1  yamt 		return;
    690  1.1  yamt 	}
    691  1.1  yamt 
    692  1.1  yamt 
    693  1.1  yamt 	radix /= BLIST_META_RADIX;
    694  1.1  yamt 	next_skip = ((u_int)skip / BLIST_META_RADIX);
    695  1.1  yamt 
    696  1.1  yamt 	for (i = 1; count && i <= skip; i += next_skip) {
    697  1.1  yamt 		if (scan[i].bm_bighint == (daddr_t)-1)
    698  1.1  yamt 			break;
    699  1.1  yamt 
    700  1.1  yamt 		if (count >= radix) {
    701  1.1  yamt 			blst_copy(
    702  1.1  yamt 			    &scan[i],
    703  1.1  yamt 			    blk,
    704  1.1  yamt 			    radix,
    705  1.1  yamt 			    next_skip - 1,
    706  1.1  yamt 			    dest,
    707  1.1  yamt 			    radix
    708  1.1  yamt 			);
    709  1.1  yamt 			count -= radix;
    710  1.1  yamt 		} else {
    711  1.1  yamt 			if (count) {
    712  1.1  yamt 				blst_copy(
    713  1.1  yamt 				    &scan[i],
    714  1.1  yamt 				    blk,
    715  1.1  yamt 				    radix,
    716  1.1  yamt 				    next_skip - 1,
    717  1.1  yamt 				    dest,
    718  1.1  yamt 				    count
    719  1.1  yamt 				);
    720  1.1  yamt 			}
    721  1.1  yamt 			count = 0;
    722  1.1  yamt 		}
    723  1.1  yamt 		blk += radix;
    724  1.1  yamt 	}
    725  1.1  yamt }
    726  1.1  yamt 
    727  1.1  yamt /*
    728  1.1  yamt  * BLST_LEAF_FILL() -	allocate specific blocks in leaf bitmap
    729  1.1  yamt  *
    730  1.1  yamt  *	This routine allocates all blocks in the specified range
    731  1.1  yamt  *	regardless of any existing allocations in that range.  Returns
    732  1.1  yamt  *	the number of blocks allocated by the call.
    733  1.1  yamt  */
    734  1.1  yamt 
    735  1.1  yamt static int
    736  1.1  yamt blst_leaf_fill(blmeta_t *scan, daddr_t blk, int count)
    737  1.1  yamt {
    738  1.1  yamt 	int n = blk & (BLIST_BMAP_RADIX - 1);
    739  1.1  yamt 	int nblks;
    740  1.1  yamt 	u_daddr_t mask, bitmap;
    741  1.1  yamt 
    742  1.1  yamt 	mask = ((u_daddr_t)-1 << n) &
    743  1.1  yamt 	    ((u_daddr_t)-1 >> (BLIST_BMAP_RADIX - count - n));
    744  1.1  yamt 
    745  1.1  yamt 	/* Count the number of blocks we're about to allocate */
    746  1.1  yamt 	bitmap = scan->u.bmu_bitmap & mask;
    747  1.1  yamt 	for (nblks = 0; bitmap != 0; nblks++)
    748  1.1  yamt 		bitmap &= bitmap - 1;
    749  1.1  yamt 
    750  1.1  yamt 	scan->u.bmu_bitmap &= ~mask;
    751  1.1  yamt 	return nblks;
    752  1.1  yamt }
    753  1.1  yamt 
    754  1.1  yamt /*
    755  1.1  yamt  * BLIST_META_FILL() -	allocate specific blocks at a meta node
    756  1.1  yamt  *
    757  1.1  yamt  *	This routine allocates the specified range of blocks,
    758  1.1  yamt  *	regardless of any existing allocations in the range.  The
    759  1.1  yamt  *	range must be within the extent of this node.  Returns the
    760  1.1  yamt  *	number of blocks allocated by the call.
    761  1.1  yamt  */
    762  1.1  yamt static int
    763  1.1  yamt blst_meta_fill(
    764  1.1  yamt 	blmeta_t *scan,
    765  1.1  yamt 	daddr_t allocBlk,
    766  1.1  yamt 	daddr_t count,
    767  1.1  yamt 	daddr_t radix,
    768  1.1  yamt 	int skip,
    769  1.1  yamt 	daddr_t blk
    770  1.1  yamt ) {
    771  1.1  yamt 	int i;
    772  1.1  yamt 	int next_skip = ((u_int)skip / BLIST_META_RADIX);
    773  1.1  yamt 	int nblks = 0;
    774  1.1  yamt 
    775  1.1  yamt 	if (count == radix || scan->u.bmu_avail == 0)  {
    776  1.1  yamt 		/*
    777  1.1  yamt 		 * ALL-ALLOCATED special case
    778  1.1  yamt 		 */
    779  1.1  yamt 		nblks = scan->u.bmu_avail;
    780  1.1  yamt 		scan->u.bmu_avail = 0;
    781  1.1  yamt 		scan->bm_bighint = count;
    782  1.1  yamt 		return nblks;
    783  1.1  yamt 	}
    784  1.1  yamt 
    785  1.1  yamt 	if (scan->u.bmu_avail == radix) {
    786  1.1  yamt 		radix /= BLIST_META_RADIX;
    787  1.1  yamt 
    788  1.1  yamt 		/*
    789  1.1  yamt 		 * ALL-FREE special case, initialize sublevel
    790  1.1  yamt 		 */
    791  1.1  yamt 		for (i = 1; i <= skip; i += next_skip) {
    792  1.1  yamt 			if (scan[i].bm_bighint == (daddr_t)-1)
    793  1.1  yamt 				break;
    794  1.1  yamt 			if (next_skip == 1) {
    795  1.1  yamt 				scan[i].u.bmu_bitmap = (u_daddr_t)-1;
    796  1.1  yamt 				scan[i].bm_bighint = BLIST_BMAP_RADIX;
    797  1.1  yamt 			} else {
    798  1.1  yamt 				scan[i].bm_bighint = radix;
    799  1.1  yamt 				scan[i].u.bmu_avail = radix;
    800  1.1  yamt 			}
    801  1.1  yamt 		}
    802  1.1  yamt 	} else {
    803  1.1  yamt 		radix /= BLIST_META_RADIX;
    804  1.1  yamt 	}
    805  1.1  yamt 
    806  1.1  yamt 	if (count > radix)
    807  1.1  yamt 		panic("blist_meta_fill: allocation too large");
    808  1.1  yamt 
    809  1.1  yamt 	i = (allocBlk - blk) / radix;
    810  1.1  yamt 	blk += i * radix;
    811  1.1  yamt 	i = i * next_skip + 1;
    812  1.1  yamt 
    813  1.1  yamt 	while (i <= skip && blk < allocBlk + count) {
    814  1.1  yamt 		daddr_t v;
    815  1.1  yamt 
    816  1.1  yamt 		v = blk + radix - allocBlk;
    817  1.1  yamt 		if (v > count)
    818  1.1  yamt 			v = count;
    819  1.1  yamt 
    820  1.1  yamt 		if (scan->bm_bighint == (daddr_t)-1)
    821  1.1  yamt 			panic("blst_meta_fill: filling unexpected range");
    822  1.1  yamt 
    823  1.1  yamt 		if (next_skip == 1) {
    824  1.1  yamt 			nblks += blst_leaf_fill(&scan[i], allocBlk, v);
    825  1.1  yamt 		} else {
    826  1.1  yamt 			nblks += blst_meta_fill(&scan[i], allocBlk, v,
    827  1.1  yamt 			    radix, next_skip - 1, blk);
    828  1.1  yamt 		}
    829  1.1  yamt 		count -= v;
    830  1.1  yamt 		allocBlk += v;
    831  1.1  yamt 		blk += radix;
    832  1.1  yamt 		i += next_skip;
    833  1.1  yamt 	}
    834  1.1  yamt 	scan->u.bmu_avail -= nblks;
    835  1.1  yamt 	return nblks;
    836  1.1  yamt }
    837  1.1  yamt 
    838  1.1  yamt /*
    839  1.1  yamt  * BLST_RADIX_INIT() - initialize radix tree
    840  1.1  yamt  *
    841  1.1  yamt  *	Initialize our meta structures and bitmaps and calculate the exact
    842  1.1  yamt  *	amount of space required to manage 'count' blocks - this space may
    843  1.1  yamt  *	be considerably less then the calculated radix due to the large
    844  1.1  yamt  *	RADIX values we use.
    845  1.1  yamt  */
    846  1.1  yamt 
    847  1.1  yamt static daddr_t
    848  1.1  yamt blst_radix_init(blmeta_t *scan, daddr_t radix, int skip, daddr_t count)
    849  1.1  yamt {
    850  1.1  yamt 	int i;
    851  1.1  yamt 	int next_skip;
    852  1.1  yamt 	daddr_t memindex = 0;
    853  1.1  yamt 
    854  1.1  yamt 	/*
    855  1.1  yamt 	 * Leaf node
    856  1.1  yamt 	 */
    857  1.1  yamt 
    858  1.1  yamt 	if (radix == BLIST_BMAP_RADIX) {
    859  1.1  yamt 		if (scan) {
    860  1.1  yamt 			scan->bm_bighint = 0;
    861  1.1  yamt 			scan->u.bmu_bitmap = 0;
    862  1.1  yamt 		}
    863  1.1  yamt 		return(memindex);
    864  1.1  yamt 	}
    865  1.1  yamt 
    866  1.1  yamt 	/*
    867  1.1  yamt 	 * Meta node.  If allocating the entire object we can special
    868  1.1  yamt 	 * case it.  However, we need to figure out how much memory
    869  1.1  yamt 	 * is required to manage 'count' blocks, so we continue on anyway.
    870  1.1  yamt 	 */
    871  1.1  yamt 
    872  1.1  yamt 	if (scan) {
    873  1.1  yamt 		scan->bm_bighint = 0;
    874  1.1  yamt 		scan->u.bmu_avail = 0;
    875  1.1  yamt 	}
    876  1.1  yamt 
    877  1.1  yamt 	radix /= BLIST_META_RADIX;
    878  1.1  yamt 	next_skip = ((u_int)skip / BLIST_META_RADIX);
    879  1.1  yamt 
    880  1.1  yamt 	for (i = 1; i <= skip; i += next_skip) {
    881  1.1  yamt 		if (count >= radix) {
    882  1.1  yamt 			/*
    883  1.1  yamt 			 * Allocate the entire object
    884  1.1  yamt 			 */
    885  1.1  yamt 			memindex = i + blst_radix_init(
    886  1.1  yamt 			    ((scan) ? &scan[i] : NULL),
    887  1.1  yamt 			    radix,
    888  1.1  yamt 			    next_skip - 1,
    889  1.1  yamt 			    radix
    890  1.1  yamt 			);
    891  1.1  yamt 			count -= radix;
    892  1.1  yamt 		} else if (count > 0) {
    893  1.1  yamt 			/*
    894  1.1  yamt 			 * Allocate a partial object
    895  1.1  yamt 			 */
    896  1.1  yamt 			memindex = i + blst_radix_init(
    897  1.1  yamt 			    ((scan) ? &scan[i] : NULL),
    898  1.1  yamt 			    radix,
    899  1.1  yamt 			    next_skip - 1,
    900  1.1  yamt 			    count
    901  1.1  yamt 			);
    902  1.1  yamt 			count = 0;
    903  1.1  yamt 		} else {
    904  1.1  yamt 			/*
    905  1.1  yamt 			 * Add terminator and break out
    906  1.1  yamt 			 */
    907  1.1  yamt 			if (scan)
    908  1.1  yamt 				scan[i].bm_bighint = (daddr_t)-1;
    909  1.1  yamt 			break;
    910  1.1  yamt 		}
    911  1.1  yamt 	}
    912  1.1  yamt 	if (memindex < i)
    913  1.1  yamt 		memindex = i;
    914  1.1  yamt 	return(memindex);
    915  1.1  yamt }
    916  1.1  yamt 
    917  1.1  yamt #ifdef BLIST_DEBUG
    918  1.1  yamt 
    919  1.1  yamt static void
    920  1.1  yamt blst_radix_print(blmeta_t *scan, daddr_t blk, daddr_t radix, int skip, int tab)
    921  1.1  yamt {
    922  1.1  yamt 	int i;
    923  1.1  yamt 	int next_skip;
    924  1.1  yamt 	int lastState = 0;
    925  1.1  yamt 
    926  1.1  yamt 	if (radix == BLIST_BMAP_RADIX) {
    927  1.1  yamt 		printf(
    928  1.1  yamt 		    "%*.*s(%08llx,%lld): bitmap %08llx big=%lld\n",
    929  1.1  yamt 		    tab, tab, "",
    930  1.1  yamt 		    (long long)blk, (long long)radix,
    931  1.1  yamt 		    (long long)scan->u.bmu_bitmap,
    932  1.1  yamt 		    (long long)scan->bm_bighint
    933  1.1  yamt 		);
    934  1.1  yamt 		return;
    935  1.1  yamt 	}
    936  1.1  yamt 
    937  1.1  yamt 	if (scan->u.bmu_avail == 0) {
    938  1.1  yamt 		printf(
    939  1.1  yamt 		    "%*.*s(%08llx,%lld) ALL ALLOCATED\n",
    940  1.1  yamt 		    tab, tab, "",
    941  1.1  yamt 		    (long long)blk,
    942  1.1  yamt 		    (long long)radix
    943  1.1  yamt 		);
    944  1.1  yamt 		return;
    945  1.1  yamt 	}
    946  1.1  yamt 	if (scan->u.bmu_avail == radix) {
    947  1.1  yamt 		printf(
    948  1.1  yamt 		    "%*.*s(%08llx,%lld) ALL FREE\n",
    949  1.1  yamt 		    tab, tab, "",
    950  1.1  yamt 		    (long long)blk,
    951  1.1  yamt 		    (long long)radix
    952  1.1  yamt 		);
    953  1.1  yamt 		return;
    954  1.1  yamt 	}
    955  1.1  yamt 
    956  1.1  yamt 	printf(
    957  1.1  yamt 	    "%*.*s(%08llx,%lld): subtree (%lld/%lld) big=%lld {\n",
    958  1.1  yamt 	    tab, tab, "",
    959  1.1  yamt 	    (long long)blk, (long long)radix,
    960  1.1  yamt 	    (long long)scan->u.bmu_avail,
    961  1.1  yamt 	    (long long)radix,
    962  1.1  yamt 	    (long long)scan->bm_bighint
    963  1.1  yamt 	);
    964  1.1  yamt 
    965  1.1  yamt 	radix /= BLIST_META_RADIX;
    966  1.1  yamt 	next_skip = ((u_int)skip / BLIST_META_RADIX);
    967  1.1  yamt 	tab += 4;
    968  1.1  yamt 
    969  1.1  yamt 	for (i = 1; i <= skip; i += next_skip) {
    970  1.1  yamt 		if (scan[i].bm_bighint == (daddr_t)-1) {
    971  1.1  yamt 			printf(
    972  1.1  yamt 			    "%*.*s(%08llx,%lld): Terminator\n",
    973  1.1  yamt 			    tab, tab, "",
    974  1.1  yamt 			    (long long)blk, (long long)radix
    975  1.1  yamt 			);
    976  1.1  yamt 			lastState = 0;
    977  1.1  yamt 			break;
    978  1.1  yamt 		}
    979  1.1  yamt 		blst_radix_print(
    980  1.1  yamt 		    &scan[i],
    981  1.1  yamt 		    blk,
    982  1.1  yamt 		    radix,
    983  1.1  yamt 		    next_skip - 1,
    984  1.1  yamt 		    tab
    985  1.1  yamt 		);
    986  1.1  yamt 		blk += radix;
    987  1.1  yamt 	}
    988  1.1  yamt 	tab -= 4;
    989  1.1  yamt 
    990  1.1  yamt 	printf(
    991  1.1  yamt 	    "%*.*s}\n",
    992  1.1  yamt 	    tab, tab, ""
    993  1.1  yamt 	);
    994  1.1  yamt }
    995  1.1  yamt 
    996  1.1  yamt #endif
    997  1.1  yamt 
    998  1.1  yamt #ifdef BLIST_DEBUG
    999  1.1  yamt 
   1000  1.1  yamt int
   1001  1.1  yamt main(int ac, char **av)
   1002  1.1  yamt {
   1003  1.1  yamt 	int size = 1024;
   1004  1.1  yamt 	int i;
   1005  1.1  yamt 	blist_t bl;
   1006  1.1  yamt 
   1007  1.1  yamt 	for (i = 1; i < ac; ++i) {
   1008  1.1  yamt 		const char *ptr = av[i];
   1009  1.1  yamt 		if (*ptr != '-') {
   1010  1.1  yamt 			size = strtol(ptr, NULL, 0);
   1011  1.1  yamt 			continue;
   1012  1.1  yamt 		}
   1013  1.1  yamt 		ptr += 2;
   1014  1.1  yamt 		fprintf(stderr, "Bad option: %s\n", ptr - 2);
   1015  1.1  yamt 		exit(1);
   1016  1.1  yamt 	}
   1017  1.1  yamt 	bl = blist_create(size);
   1018  1.1  yamt 	blist_free(bl, 0, size);
   1019  1.1  yamt 
   1020  1.1  yamt 	for (;;) {
   1021  1.1  yamt 		char buf[1024];
   1022  1.1  yamt 		daddr_t da = 0;
   1023  1.1  yamt 		daddr_t count = 0;
   1024  1.1  yamt 
   1025  1.1  yamt 
   1026  1.1  yamt 		printf("%lld/%lld/%lld> ", (long long)bl->bl_free,
   1027  1.1  yamt 		    (long long)size, (long long)bl->bl_radix);
   1028  1.1  yamt 		fflush(stdout);
   1029  1.1  yamt 		if (fgets(buf, sizeof(buf), stdin) == NULL)
   1030  1.1  yamt 			break;
   1031  1.1  yamt 		switch(buf[0]) {
   1032  1.1  yamt 		case 'r':
   1033  1.1  yamt 			if (sscanf(buf + 1, "%lld", &count) == 1) {
   1034  1.1  yamt 				blist_resize(&bl, count, 1);
   1035  1.1  yamt 			} else {
   1036  1.1  yamt 				printf("?\n");
   1037  1.1  yamt 			}
   1038  1.1  yamt 		case 'p':
   1039  1.1  yamt 			blist_print(bl);
   1040  1.1  yamt 			break;
   1041  1.1  yamt 		case 'a':
   1042  1.1  yamt 			if (sscanf(buf + 1, "%lld", &count) == 1) {
   1043  1.1  yamt 				daddr_t blk = blist_alloc(bl, count);
   1044  1.1  yamt 				printf("    R=%08llx\n", (long long)blk);
   1045  1.1  yamt 			} else {
   1046  1.1  yamt 				printf("?\n");
   1047  1.1  yamt 			}
   1048  1.1  yamt 			break;
   1049  1.1  yamt 		case 'f':
   1050  1.1  yamt 			if (sscanf(buf + 1, "%llx %lld",
   1051  1.1  yamt 			    (long long *)&da, (long long *)&count) == 2) {
   1052  1.1  yamt 				blist_free(bl, da, count);
   1053  1.1  yamt 			} else {
   1054  1.1  yamt 				printf("?\n");
   1055  1.1  yamt 			}
   1056  1.1  yamt 			break;
   1057  1.1  yamt 		case 'l':
   1058  1.1  yamt 			if (sscanf(buf + 1, "%llx %lld",
   1059  1.1  yamt 			    (long long *)&da, (long long *)&count) == 2) {
   1060  1.1  yamt 				printf("    n=%d\n",
   1061  1.1  yamt 				    blist_fill(bl, da, count));
   1062  1.1  yamt 			} else {
   1063  1.1  yamt 				printf("?\n");
   1064  1.1  yamt 			}
   1065  1.1  yamt 			break;
   1066  1.1  yamt 		case '?':
   1067  1.1  yamt 		case 'h':
   1068  1.1  yamt 			puts(
   1069  1.1  yamt 			    "p          -print\n"
   1070  1.1  yamt 			    "a %d       -allocate\n"
   1071  1.1  yamt 			    "f %x %d    -free\n"
   1072  1.1  yamt 			    "l %x %d    -fill\n"
   1073  1.1  yamt 			    "r %d       -resize\n"
   1074  1.1  yamt 			    "h/?        -help"
   1075  1.1  yamt 			);
   1076  1.1  yamt 			break;
   1077  1.1  yamt 		default:
   1078  1.1  yamt 			printf("?\n");
   1079  1.1  yamt 			break;
   1080  1.1  yamt 		}
   1081  1.1  yamt 	}
   1082  1.1  yamt 	return(0);
   1083  1.1  yamt }
   1084  1.1  yamt 
   1085  1.1  yamt void
   1086  1.1  yamt panic(const char *ctl, ...)
   1087  1.1  yamt {
   1088  1.1  yamt 	va_list va;
   1089  1.1  yamt 
   1090  1.1  yamt 	va_start(va, ctl);
   1091  1.1  yamt 	vfprintf(stderr, ctl, va);
   1092  1.1  yamt 	fprintf(stderr, "\n");
   1093  1.1  yamt 	va_end(va);
   1094  1.1  yamt 	exit(1);
   1095  1.1  yamt }
   1096  1.1  yamt 
   1097  1.1  yamt #endif
   1098  1.1  yamt 
   1099