Home | History | Annotate | Line # | Download | only in kern
subr_extent.c revision 1.29
      1 /*	$NetBSD: subr_extent.c,v 1.29 1999/10/11 22:57:17 thorpej Exp $	*/
      2 
      3 /*-
      4  * Copyright (c) 1996, 1998 The NetBSD Foundation, Inc.
      5  * All rights reserved.
      6  *
      7  * This code is derived from software contributed to The NetBSD Foundation
      8  * by Jason R. Thorpe and Matthias Drochner.
      9  *
     10  * Redistribution and use in source and binary forms, with or without
     11  * modification, are permitted provided that the following conditions
     12  * are met:
     13  * 1. Redistributions of source code must retain the above copyright
     14  *    notice, this list of conditions and the following disclaimer.
     15  * 2. Redistributions in binary form must reproduce the above copyright
     16  *    notice, this list of conditions and the following disclaimer in the
     17  *    documentation and/or other materials provided with the distribution.
     18  * 3. All advertising materials mentioning features or use of this software
     19  *    must display the following acknowledgement:
     20  *	This product includes software developed by the NetBSD
     21  *	Foundation, Inc. and its contributors.
     22  * 4. Neither the name of The NetBSD Foundation nor the names of its
     23  *    contributors may be used to endorse or promote products derived
     24  *    from this software without specific prior written permission.
     25  *
     26  * THIS SOFTWARE IS PROVIDED BY THE NETBSD FOUNDATION, INC. AND CONTRIBUTORS
     27  * ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED
     28  * TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
     29  * PURPOSE ARE DISCLAIMED.  IN NO EVENT SHALL THE FOUNDATION OR CONTRIBUTORS
     30  * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
     31  * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
     32  * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
     33  * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
     34  * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
     35  * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
     36  * POSSIBILITY OF SUCH DAMAGE.
     37  */
     38 
     39 /*
     40  * General purpose extent manager.
     41  */
     42 
     43 #ifdef _KERNEL
     44 #include <sys/param.h>
     45 #include <sys/extent.h>
     46 #include <sys/malloc.h>
     47 #include <sys/pool.h>
     48 #include <sys/time.h>
     49 #include <sys/systm.h>
     50 #include <sys/proc.h>
     51 #include <sys/lock.h>
     52 
     53 #include <vm/vm.h>
     54 #include <vm/vm_kern.h>
     55 
     56 #define	KMEM_IS_RUNNING		(kmem_map != NULL)
     57 #elif defined(_EXTENT_TESTING)
     58 /*
     59  * user-land definitions, so it can fit into a testing harness.
     60  */
     61 #include <sys/param.h>
     62 #include <sys/pool.h>
     63 #include <sys/extent.h>
     64 #include <errno.h>
     65 #include <stdlib.h>
     66 #include <stdio.h>
     67 #include <string.h>
     68 
     69 #define	malloc(s, t, flags)		malloc(s)
     70 #define	free(p, t)			free(p)
     71 #define	tsleep(chan, pri, str, timo)	(EWOULDBLOCK)
     72 #define	wakeup(chan)			((void)0)
     73 #define	pool_get(pool, flags)		malloc(pool->pr_size,0,0)
     74 #define	pool_put(pool, rp)		free(rp,0)
     75 #define	panic(a)			printf(a)
     76 #define	splhigh()			(1)
     77 #define	splx(s)				((void)(s))
     78 
     79 #define	KMEM_IS_RUNNING			(1)
     80 #endif
     81 
     82 static	pool_handle_t expool_create __P((void));
     83 static	void extent_insert_and_optimize __P((struct extent *, u_long, u_long,
     84 	    int, struct extent_region *, struct extent_region *));
     85 static	struct extent_region *extent_alloc_region_descriptor
     86 	    __P((struct extent *, int));
     87 static	void extent_free_region_descriptor __P((struct extent *,
     88 	    struct extent_region *));
     89 
     90 static pool_handle_t expool;
     91 
     92 /*
     93  * Macro to align to an arbitrary power-of-two boundary.
     94  */
     95 #define EXTENT_ALIGN(_start, _align, _skew)		\
     96 	(((((_start) - (_skew)) + ((_align) - 1)) & (-(_align))) + (_skew))
     97 
     98 /*
     99  * Create the extent_region pool.
    100  * (This is deferred until one of our callers thinks we can malloc()).
    101  */
    102 
    103 static pool_handle_t expool_create()
    104 {
    105 #if defined(_KERNEL)
    106 	expool = pool_create(sizeof(struct extent_region), 0, 0,
    107 			     0, "extent", 0, 0, 0, 0);
    108 #else
    109 	expool = (pool_handle_t)malloc(sizeof(*expool),0,0);
    110 	expool->pr_size = sizeof(struct extent_region);
    111 #endif
    112 	return (expool);
    113 }
    114 
    115 /*
    116  * Allocate and initialize an extent map.
    117  */
    118 struct extent *
    119 extent_create(name, start, end, mtype, storage, storagesize, flags)
    120 	const char *name;
    121 	u_long start, end;
    122 	int mtype;
    123 	caddr_t storage;
    124 	size_t storagesize;
    125 	int flags;
    126 {
    127 	struct extent *ex;
    128 	caddr_t cp = storage;
    129 	size_t sz = storagesize;
    130 	struct extent_region *rp;
    131 	int fixed_extent = (storage != NULL);
    132 	int s;
    133 
    134 #ifdef DIAGNOSTIC
    135 	/* Check arguments. */
    136 	if (name == NULL)
    137 		panic("extent_create: name == NULL");
    138 	if (end < start) {
    139 		printf("extent_create: extent `%s', start 0x%lx, end 0x%lx\n",
    140 		    name, start, end);
    141 		panic("extent_create: end < start");
    142 	}
    143 	if (fixed_extent && (storagesize < sizeof(struct extent_fixed)))
    144 		panic("extent_create: fixed extent, bad storagesize 0x%lx",
    145 		    (u_long)storagesize);
    146 	if (fixed_extent == 0 && (storagesize != 0 || storage != NULL))
    147 		panic("extent_create: storage provided for non-fixed");
    148 #endif
    149 
    150 	/* Allocate extent descriptor. */
    151 	if (fixed_extent) {
    152 		struct extent_fixed *fex;
    153 
    154 		memset(storage, 0, storagesize);
    155 
    156 		/*
    157 		 * Align all descriptors on "long" boundaries.
    158 		 */
    159 		fex = (struct extent_fixed *)cp;
    160 		ex = (struct extent *)fex;
    161 		cp += ALIGN(sizeof(struct extent_fixed));
    162 		sz -= ALIGN(sizeof(struct extent_fixed));
    163 		fex->fex_storage = storage;
    164 		fex->fex_storagesize = storagesize;
    165 
    166 		/*
    167 		 * In a fixed extent, we have to pre-allocate region
    168 		 * descriptors and place them in the extent's freelist.
    169 		 */
    170 		LIST_INIT(&fex->fex_freelist);
    171 		while (sz >= ALIGN(sizeof(struct extent_region))) {
    172 			rp = (struct extent_region *)cp;
    173 			cp += ALIGN(sizeof(struct extent_region));
    174 			sz -= ALIGN(sizeof(struct extent_region));
    175 			LIST_INSERT_HEAD(&fex->fex_freelist, rp, er_link);
    176 		}
    177 	} else {
    178 		s = splhigh();
    179 		if (expool == NULL)
    180 			expool_create();
    181 		splx(s);
    182 		if (expool == NULL)
    183 			return (NULL);
    184 
    185 		ex = (struct extent *)malloc(sizeof(struct extent),
    186 		    mtype, (flags & EX_WAITOK) ? M_WAITOK : M_NOWAIT);
    187 		if (ex == NULL)
    188 			return (NULL);
    189 	}
    190 
    191 	/* Fill in the extent descriptor and return it to the caller. */
    192 	simple_lock_init(&ex->ex_slock);
    193 	LIST_INIT(&ex->ex_regions);
    194 	ex->ex_name = name;
    195 	ex->ex_start = start;
    196 	ex->ex_end = end;
    197 	ex->ex_mtype = mtype;
    198 	ex->ex_flags = 0;
    199 	if (fixed_extent)
    200 		ex->ex_flags |= EXF_FIXED;
    201 	if (flags & EX_NOCOALESCE)
    202 		ex->ex_flags |= EXF_NOCOALESCE;
    203 	return (ex);
    204 }
    205 
    206 /*
    207  * Destroy an extent map.
    208  * Since we're freeing the data, there can't be any references
    209  * so we don't need any locking.
    210  */
    211 void
    212 extent_destroy(ex)
    213 	struct extent *ex;
    214 {
    215 	struct extent_region *rp, *orp;
    216 
    217 #ifdef DIAGNOSTIC
    218 	/* Check arguments. */
    219 	if (ex == NULL)
    220 		panic("extent_destroy: NULL extent");
    221 #endif
    222 
    223 	/* Free all region descriptors in extent. */
    224 	for (rp = ex->ex_regions.lh_first; rp != NULL; ) {
    225 		orp = rp;
    226 		rp = rp->er_link.le_next;
    227 		LIST_REMOVE(orp, er_link);
    228 		extent_free_region_descriptor(ex, orp);
    229 	}
    230 
    231 	/* If we're not a fixed extent, free the extent descriptor itself. */
    232 	if ((ex->ex_flags & EXF_FIXED) == 0)
    233 		free(ex, ex->ex_mtype);
    234 }
    235 
    236 /*
    237  * Insert a region descriptor into the sorted region list after the
    238  * entry "after" or at the head of the list (if "after" is NULL).
    239  * The region descriptor we insert is passed in "rp".  We must
    240  * allocate the region descriptor before calling this function!
    241  * If we don't need the region descriptor, it will be freed here.
    242  */
    243 static void
    244 extent_insert_and_optimize(ex, start, size, flags, after, rp)
    245 	struct extent *ex;
    246 	u_long start, size;
    247 	int flags;
    248 	struct extent_region *after, *rp;
    249 {
    250 	struct extent_region *nextr;
    251 	int appended = 0;
    252 
    253 	if (after == NULL) {
    254 		/*
    255 		 * We're the first in the region list.  If there's
    256 		 * a region after us, attempt to coalesce to save
    257 		 * descriptor overhead.
    258 		 */
    259 		if (((ex->ex_flags & EXF_NOCOALESCE) == 0) &&
    260 		    (ex->ex_regions.lh_first != NULL) &&
    261 		    ((start + size) == ex->ex_regions.lh_first->er_start)) {
    262 			/*
    263 			 * We can coalesce.  Prepend us to the first region.
    264 			 */
    265 			ex->ex_regions.lh_first->er_start = start;
    266 			extent_free_region_descriptor(ex, rp);
    267 			return;
    268 		}
    269 
    270 		/*
    271 		 * Can't coalesce.  Fill in the region descriptor
    272 		 * in, and insert us at the head of the region list.
    273 		 */
    274 		rp->er_start = start;
    275 		rp->er_end = start + (size - 1);
    276 		LIST_INSERT_HEAD(&ex->ex_regions, rp, er_link);
    277 		return;
    278 	}
    279 
    280 	/*
    281 	 * If EXF_NOCOALESCE is set, coalescing is disallowed.
    282 	 */
    283 	if (ex->ex_flags & EXF_NOCOALESCE)
    284 		goto cant_coalesce;
    285 
    286 	/*
    287 	 * Attempt to coalesce with the region before us.
    288 	 */
    289 	if ((after->er_end + 1) == start) {
    290 		/*
    291 		 * We can coalesce.  Append ourselves and make
    292 		 * note of it.
    293 		 */
    294 		after->er_end = start + (size - 1);
    295 		appended = 1;
    296 	}
    297 
    298 	/*
    299 	 * Attempt to coalesce with the region after us.
    300 	 */
    301 	if ((after->er_link.le_next != NULL) &&
    302 	    ((start + size) == after->er_link.le_next->er_start)) {
    303 		/*
    304 		 * We can coalesce.  Note that if we appended ourselves
    305 		 * to the previous region, we exactly fit the gap, and
    306 		 * can free the "next" region descriptor.
    307 		 */
    308 		if (appended) {
    309 			/*
    310 			 * Yup, we can free it up.
    311 			 */
    312 			after->er_end = after->er_link.le_next->er_end;
    313 			nextr = after->er_link.le_next;
    314 			LIST_REMOVE(nextr, er_link);
    315 			extent_free_region_descriptor(ex, nextr);
    316 		} else {
    317 			/*
    318 			 * Nope, just prepend us to the next region.
    319 			 */
    320 			after->er_link.le_next->er_start = start;
    321 		}
    322 
    323 		extent_free_region_descriptor(ex, rp);
    324 		return;
    325 	}
    326 
    327 	/*
    328 	 * We weren't able to coalesce with the next region, but
    329 	 * we don't need to allocate a region descriptor if we
    330 	 * appended ourselves to the previous region.
    331 	 */
    332 	if (appended) {
    333 		extent_free_region_descriptor(ex, rp);
    334 		return;
    335 	}
    336 
    337  cant_coalesce:
    338 
    339 	/*
    340 	 * Fill in the region descriptor and insert ourselves
    341 	 * into the region list.
    342 	 */
    343 	rp->er_start = start;
    344 	rp->er_end = start + (size - 1);
    345 	LIST_INSERT_AFTER(after, rp, er_link);
    346 }
    347 
    348 /*
    349  * Allocate a specific region in an extent map.
    350  */
    351 int
    352 extent_alloc_region(ex, start, size, flags)
    353 	struct extent *ex;
    354 	u_long start, size;
    355 	int flags;
    356 {
    357 	struct extent_region *rp, *last, *myrp;
    358 	u_long end = start + (size - 1);
    359 	int error;
    360 
    361 #ifdef DIAGNOSTIC
    362 	/* Check arguments. */
    363 	if (ex == NULL)
    364 		panic("extent_alloc_region: NULL extent");
    365 	if (size < 1) {
    366 		printf("extent_alloc_region: extent `%s', size 0x%lx\n",
    367 		    ex->ex_name, size);
    368 		panic("extent_alloc_region: bad size");
    369 	}
    370 	if (end < start) {
    371 		printf(
    372 		 "extent_alloc_region: extent `%s', start 0x%lx, size 0x%lx\n",
    373 		 ex->ex_name, start, size);
    374 		panic("extent_alloc_region: overflow");
    375 	}
    376 #endif
    377 
    378 	/*
    379 	 * Make sure the requested region lies within the
    380 	 * extent.
    381 	 *
    382 	 * We don't lock to check the range, because those values
    383 	 * are never modified, and if another thread deletes the
    384 	 * extent, we're screwed anyway.
    385 	 */
    386 	if ((start < ex->ex_start) || (end > ex->ex_end)) {
    387 #ifdef DIAGNOSTIC
    388 		printf("extent_alloc_region: extent `%s' (0x%lx - 0x%lx)\n",
    389 		    ex->ex_name, ex->ex_start, ex->ex_end);
    390 		printf("extent_alloc_region: start 0x%lx, end 0x%lx\n",
    391 		    start, end);
    392 		panic("extent_alloc_region: region lies outside extent");
    393 #else
    394 		return (EINVAL);
    395 #endif
    396 	}
    397 
    398 	/*
    399 	 * Allocate the region descriptor.  It will be freed later
    400 	 * if we can coalesce with another region.  Don't lock before
    401 	 * here!  This could block.
    402 	 */
    403 	myrp = extent_alloc_region_descriptor(ex, flags);
    404 	if (myrp == NULL) {
    405 #ifdef DIAGNOSTIC
    406 		printf(
    407 		    "extent_alloc_region: can't allocate region descriptor\n");
    408 #endif
    409 		return (ENOMEM);
    410 	}
    411 
    412  alloc_start:
    413 	simple_lock(&ex->ex_slock);
    414 
    415 	/*
    416 	 * Attempt to place ourselves in the desired area of the
    417 	 * extent.  We save ourselves some work by keeping the list sorted.
    418 	 * In other words, if the start of the current region is greater
    419 	 * than the end of our region, we don't have to search any further.
    420 	 */
    421 
    422 	/*
    423 	 * Keep a pointer to the last region we looked at so
    424 	 * that we don't have to traverse the list again when
    425 	 * we insert ourselves.  If "last" is NULL when we
    426 	 * finally insert ourselves, we go at the head of the
    427 	 * list.  See extent_insert_and_optimize() for details.
    428 	 */
    429 	last = NULL;
    430 
    431 	for (rp = ex->ex_regions.lh_first; rp != NULL;
    432 	    rp = rp->er_link.le_next) {
    433 		if (rp->er_start > end) {
    434 			/*
    435 			 * We lie before this region and don't
    436 			 * conflict.
    437 			 */
    438 			break;
    439 		}
    440 
    441 		/*
    442 		 * The current region begins before we end.
    443 		 * Check for a conflict.
    444 		 */
    445 		if (rp->er_end >= start) {
    446 			/*
    447 			 * We conflict.  If we can (and want to) wait,
    448 			 * do so.
    449 			 */
    450 			if (flags & EX_WAITSPACE) {
    451 				ex->ex_flags |= EXF_WANTED;
    452 				simple_unlock(&ex->ex_slock);
    453 				error = tsleep(ex,
    454 				    PRIBIO | ((flags & EX_CATCH) ? PCATCH : 0),
    455 				    "extnt", 0);
    456 				if (error)
    457 					return (error);
    458 				goto alloc_start;
    459 			}
    460 			extent_free_region_descriptor(ex, myrp);
    461 			simple_unlock(&ex->ex_slock);
    462 			return (EAGAIN);
    463 		}
    464 		/*
    465 		 * We don't conflict, but this region lies before
    466 		 * us.  Keep a pointer to this region, and keep
    467 		 * trying.
    468 		 */
    469 		last = rp;
    470 	}
    471 
    472 	/*
    473 	 * We don't conflict with any regions.  "last" points
    474 	 * to the region we fall after, or is NULL if we belong
    475 	 * at the beginning of the region list.  Insert ourselves.
    476 	 */
    477 	extent_insert_and_optimize(ex, start, size, flags, last, myrp);
    478 	simple_unlock(&ex->ex_slock);
    479 	return (0);
    480 }
    481 
    482 /*
    483  * Macro to check (x + y) <= z.  This check is designed to fail
    484  * if an overflow occurs.
    485  */
    486 #define LE_OV(x, y, z)	((((x) + (y)) >= (x)) && (((x) + (y)) <= (z)))
    487 
    488 /*
    489  * Allocate a region in an extent map subregion.
    490  *
    491  * If EX_FAST is specified, we return the first fit in the map.
    492  * Otherwise, we try to minimize fragmentation by finding the
    493  * smallest gap that will hold the request.
    494  *
    495  * The allocated region is aligned to "alignment", which must be
    496  * a power of 2.
    497  */
    498 int
    499 extent_alloc_subregion1(ex, substart, subend, size, alignment, skew, boundary,
    500     flags, result)
    501 	struct extent *ex;
    502 	u_long substart, subend, size, alignment, skew, boundary;
    503 	int flags;
    504 	u_long *result;
    505 {
    506 	struct extent_region *rp, *myrp, *last, *bestlast;
    507 	u_long newstart, newend, beststart, bestovh, ovh;
    508 	u_long dontcross;
    509 	int error;
    510 
    511 #ifdef DIAGNOSTIC
    512 	/*
    513 	 * Check arguments.
    514 	 *
    515 	 * We don't lock to check these, because these values
    516 	 * are never modified, and if another thread deletes the
    517 	 * extent, we're screwed anyway.
    518 	 */
    519 	if (ex == NULL)
    520 		panic("extent_alloc_subregion: NULL extent");
    521 	if (result == NULL)
    522 		panic("extent_alloc_subregion: NULL result pointer");
    523 	if ((substart < ex->ex_start) || (substart > ex->ex_end) ||
    524 	    (subend > ex->ex_end) || (subend < ex->ex_start)) {
    525   printf("extent_alloc_subregion: extent `%s', ex_start 0x%lx, ex_end 0x%lx\n",
    526 		    ex->ex_name, ex->ex_start, ex->ex_end);
    527 		printf("extent_alloc_subregion: substart 0x%lx, subend 0x%lx\n",
    528 		    substart, subend);
    529 		panic("extent_alloc_subregion: bad subregion");
    530 	}
    531 	if ((size < 1) || ((size - 1) > (subend - substart))) {
    532 		printf("extent_alloc_subregion: extent `%s', size 0x%lx\n",
    533 		    ex->ex_name, size);
    534 		panic("extent_alloc_subregion: bad size");
    535 	}
    536 	if (alignment == 0)
    537 		panic("extent_alloc_subregion: bad alignment");
    538 	if (boundary && (boundary < size)) {
    539 		printf(
    540 		    "extent_alloc_subregion: extent `%s', size 0x%lx,
    541 		    boundary 0x%lx\n", ex->ex_name, size, boundary);
    542 		panic("extent_alloc_subregion: bad boundary");
    543 	}
    544 #endif
    545 
    546 	/*
    547 	 * Allocate the region descriptor.  It will be freed later
    548 	 * if we can coalesce with another region.  Don't lock before
    549 	 * here!  This could block.
    550 	 */
    551 	myrp = extent_alloc_region_descriptor(ex, flags);
    552 	if (myrp == NULL) {
    553 #ifdef DIAGNOSTIC
    554 		printf(
    555 		 "extent_alloc_subregion: can't allocate region descriptor\n");
    556 #endif
    557 		return (ENOMEM);
    558 	}
    559 
    560  alloc_start:
    561 	simple_lock(&ex->ex_slock);
    562 
    563 	/*
    564 	 * Keep a pointer to the last region we looked at so
    565 	 * that we don't have to traverse the list again when
    566 	 * we insert ourselves.  If "last" is NULL when we
    567 	 * finally insert ourselves, we go at the head of the
    568 	 * list.  See extent_insert_and_optimize() for deatails.
    569 	 */
    570 	last = NULL;
    571 
    572 	/*
    573 	 * Keep track of size and location of the smallest
    574 	 * chunk we fit in.
    575 	 *
    576 	 * Since the extent can be as large as the numeric range
    577 	 * of the CPU (0 - 0xffffffff for 32-bit systems), the
    578 	 * best overhead value can be the maximum unsigned integer.
    579 	 * Thus, we initialize "bestovh" to 0, since we insert ourselves
    580 	 * into the region list immediately on an exact match (which
    581 	 * is the only case where "bestovh" would be set to 0).
    582 	 */
    583 	bestovh = 0;
    584 	beststart = 0;
    585 	bestlast = NULL;
    586 
    587 	/*
    588 	 * For N allocated regions, we must make (N + 1)
    589 	 * checks for unallocated space.  The first chunk we
    590 	 * check is the area from the beginning of the subregion
    591 	 * to the first allocated region after that point.
    592 	 */
    593 	newstart = EXTENT_ALIGN(substart, alignment, skew);
    594 	if (newstart < ex->ex_start) {
    595 #ifdef DIAGNOSTIC
    596 		printf(
    597       "extent_alloc_subregion: extent `%s' (0x%lx - 0x%lx), alignment 0x%lx\n",
    598 		 ex->ex_name, ex->ex_start, ex->ex_end, alignment);
    599 		simple_unlock(&ex->ex_slock);
    600 		panic("extent_alloc_subregion: overflow after alignment");
    601 #else
    602 		extent_free_region_descriptor(ex, myrp);
    603 		simple_unlock(&ex->ex_slock);
    604 		return (EINVAL);
    605 #endif
    606 	}
    607 
    608 	/*
    609 	 * Find the first allocated region that begins on or after
    610 	 * the subregion start, advancing the "last" pointer along
    611 	 * the way.
    612 	 */
    613 	for (rp = ex->ex_regions.lh_first; rp != NULL;
    614 	     rp = rp->er_link.le_next) {
    615 		if (rp->er_start >= newstart)
    616 			break;
    617 		last = rp;
    618 	}
    619 
    620 	/*
    621 	 * Relocate the start of our candidate region to the end of
    622 	 * the last allocated region (if there was one overlapping
    623 	 * our subrange).
    624 	 */
    625 	if (last != NULL && last->er_end >= newstart)
    626 		newstart = EXTENT_ALIGN((last->er_end + 1), alignment, skew);
    627 
    628 	for (; rp != NULL; rp = rp->er_link.le_next) {
    629 		/*
    630 		 * Check the chunk before "rp".  Note that our
    631 		 * comparison is safe from overflow conditions.
    632 		 */
    633 		if (LE_OV(newstart, size, rp->er_start)) {
    634 			/*
    635 			 * Do a boundary check, if necessary.  Note
    636 			 * that a region may *begin* on the boundary,
    637 			 * but it must end before the boundary.
    638 			 */
    639 			if (boundary) {
    640 				newend = newstart + (size - 1);
    641 
    642 				/*
    643 				 * Calculate the next boundary after the start
    644 				 * of this region.
    645 				 */
    646 				dontcross = EXTENT_ALIGN(newstart+1, boundary,
    647 				    (flags & EX_BOUNDZERO) ? 0 : ex->ex_start)
    648 				    - 1;
    649 
    650 #if 0
    651 				printf("newstart=%x newend=%x ex_start=%x ex_end=%x boundary=%x dontcross=%x\n",
    652 				    newstart, newend, ex->ex_start, ex->ex_end,
    653 				    boundary, dontcross);
    654 #endif
    655 
    656 				if (newend > dontcross) {
    657 					/*
    658 					 * Candidate region crosses boundary.
    659 					 * Throw away the leading part and see
    660 					 * if we still fit.
    661 					 */
    662 					newstart = dontcross + 1;
    663 					newend = newstart + (size - 1);
    664 					dontcross += boundary;
    665 					if (!LE_OV(newstart, size, rp->er_start))
    666 						continue;
    667 				}
    668 
    669 				/*
    670 				 * If we run past the end of
    671 				 * the extent or the boundary
    672 				 * overflows, then the request
    673 				 * can't fit.
    674 				 */
    675 				if (dontcross > ex->ex_end ||
    676 				    dontcross < newstart)
    677 					goto fail;
    678 			}
    679 
    680 			/*
    681 			 * We would fit into this space.  Calculate
    682 			 * the overhead (wasted space).  If we exactly
    683 			 * fit, or we're taking the first fit, insert
    684 			 * ourselves into the region list.
    685 			 */
    686 			ovh = rp->er_start - newstart - size;
    687 			if ((flags & EX_FAST) || (ovh == 0))
    688 				goto found;
    689 
    690 			/*
    691 			 * Don't exactly fit, but check to see
    692 			 * if we're better than any current choice.
    693 			 */
    694 			if ((bestovh == 0) || (ovh < bestovh)) {
    695 				bestovh = ovh;
    696 				beststart = newstart;
    697 				bestlast = last;
    698 			}
    699 		}
    700 
    701 		/*
    702 		 * Skip past the current region and check again.
    703 		 */
    704 		newstart = EXTENT_ALIGN((rp->er_end + 1), alignment, skew);
    705 		if (newstart < rp->er_end) {
    706 			/*
    707 			 * Overflow condition.  Don't error out, since
    708 			 * we might have a chunk of space that we can
    709 			 * use.
    710 			 */
    711 			goto fail;
    712 		}
    713 
    714 		last = rp;
    715 	}
    716 
    717 	/*
    718 	 * The final check is from the current starting point to the
    719 	 * end of the subregion.  If there were no allocated regions,
    720 	 * "newstart" is set to the beginning of the subregion, or
    721 	 * just past the end of the last allocated region, adjusted
    722 	 * for alignment in either case.
    723 	 */
    724 	if (LE_OV(newstart, (size - 1), subend)) {
    725 		/*
    726 		 * Do a boundary check, if necessary.  Note
    727 		 * that a region may *begin* on the boundary,
    728 		 * but it must end before the boundary.
    729 		 */
    730 		if (boundary) {
    731 			newend = newstart + (size - 1);
    732 
    733 			/*
    734 			 * Calculate the next boundary after the start
    735 			 * of this region.
    736 			 */
    737 			dontcross = EXTENT_ALIGN(newstart+1, boundary,
    738 			    (flags & EX_BOUNDZERO) ? 0 : ex->ex_start)
    739 			    - 1;
    740 
    741 #if 0
    742 			printf("newstart=%x newend=%x ex_start=%x ex_end=%x boundary=%x dontcross=%x\n",
    743 			    newstart, newend, ex->ex_start, ex->ex_end,
    744 			    boundary, dontcross);
    745 #endif
    746 
    747 			if (newend > dontcross) {
    748 				/*
    749 				 * Candidate region crosses boundary.
    750 				 * Throw away the leading part and see
    751 				 * if we still fit.
    752 				 */
    753 				newstart = dontcross + 1;
    754 				newend = newstart + (size - 1);
    755 				dontcross += boundary;
    756 				if (!LE_OV(newstart, (size - 1), subend))
    757 					goto fail;
    758 			}
    759 
    760 			/*
    761 			 * If we run past the end of
    762 			 * the extent or the boundary
    763 			 * overflows, then the request
    764 			 * can't fit.
    765 			 */
    766 			if (dontcross > ex->ex_end ||
    767 			    dontcross < newstart)
    768 				goto fail;
    769 		}
    770 
    771 		/*
    772 		 * We would fit into this space.  Calculate
    773 		 * the overhead (wasted space).  If we exactly
    774 		 * fit, or we're taking the first fit, insert
    775 		 * ourselves into the region list.
    776 		 */
    777 		ovh = ex->ex_end - newstart - (size - 1);
    778 		if ((flags & EX_FAST) || (ovh == 0))
    779 			goto found;
    780 
    781 		/*
    782 		 * Don't exactly fit, but check to see
    783 		 * if we're better than any current choice.
    784 		 */
    785 		if ((bestovh == 0) || (ovh < bestovh)) {
    786 			bestovh = ovh;
    787 			beststart = newstart;
    788 			bestlast = last;
    789 		}
    790 	}
    791 
    792  fail:
    793 	/*
    794 	 * One of the following two conditions have
    795 	 * occurred:
    796 	 *
    797 	 *	There is no chunk large enough to hold the request.
    798 	 *
    799 	 *	If EX_FAST was not specified, there is not an
    800 	 *	exact match for the request.
    801 	 *
    802 	 * Note that if we reach this point and EX_FAST is
    803 	 * set, then we know there is no space in the extent for
    804 	 * the request.
    805 	 */
    806 	if (((flags & EX_FAST) == 0) && (bestovh != 0)) {
    807 		/*
    808 		 * We have a match that's "good enough".
    809 		 */
    810 		newstart = beststart;
    811 		last = bestlast;
    812 		goto found;
    813 	}
    814 
    815 	/*
    816 	 * No space currently available.  Wait for it to free up,
    817 	 * if possible.
    818 	 */
    819 	if (flags & EX_WAITSPACE) {
    820 		ex->ex_flags |= EXF_WANTED;
    821 		simple_unlock(&ex->ex_slock);
    822 		error = tsleep(ex,
    823 		    PRIBIO | ((flags & EX_CATCH) ? PCATCH : 0), "extnt", 0);
    824 		if (error)
    825 			return (error);
    826 		goto alloc_start;
    827 	}
    828 
    829 	extent_free_region_descriptor(ex, myrp);
    830 	simple_unlock(&ex->ex_slock);
    831 	return (EAGAIN);
    832 
    833  found:
    834 	/*
    835 	 * Insert ourselves into the region list.
    836 	 */
    837 	extent_insert_and_optimize(ex, newstart, size, flags, last, myrp);
    838 	simple_unlock(&ex->ex_slock);
    839 	*result = newstart;
    840 	return (0);
    841 }
    842 
    843 int
    844 extent_free(ex, start, size, flags)
    845 	struct extent *ex;
    846 	u_long start, size;
    847 	int flags;
    848 {
    849 	struct extent_region *rp, *nrp = NULL;
    850 	u_long end = start + (size - 1);
    851 	int exflags;
    852 
    853 #ifdef DIAGNOSTIC
    854 	/*
    855 	 * Check arguments.
    856 	 *
    857 	 * We don't lock to check these, because these values
    858 	 * are never modified, and if another thread deletes the
    859 	 * extent, we're screwed anyway.
    860 	 */
    861 	if (ex == NULL)
    862 		panic("extent_free: NULL extent");
    863 	if ((start < ex->ex_start) || (start > ex->ex_end)) {
    864 		extent_print(ex);
    865 		printf("extent_free: extent `%s', start 0x%lx, size 0x%lx\n",
    866 		    ex->ex_name, start, size);
    867 		panic("extent_free: extent `%s', region not within extent",
    868 		    ex->ex_name);
    869 	}
    870 	/* Check for an overflow. */
    871 	if (end < start) {
    872 		extent_print(ex);
    873 		printf("extent_free: extent `%s', start 0x%lx, size 0x%lx\n",
    874 		    ex->ex_name, start, size);
    875 		panic("extent_free: overflow");
    876 	}
    877 #endif
    878 
    879 	/*
    880 	 * If we're allowing coalescing, we must allocate a region
    881 	 * descriptor now, since it might block.
    882 	 *
    883 	 * XXX Make a static, create-time flags word, so we don't
    884 	 * XXX have to lock to read it!
    885 	 */
    886 	simple_lock(&ex->ex_slock);
    887 	exflags = ex->ex_flags;
    888 	simple_unlock(&ex->ex_slock);
    889 
    890 	if ((exflags & EXF_NOCOALESCE) == 0) {
    891 		/* Allocate a region descriptor. */
    892 		nrp = extent_alloc_region_descriptor(ex, flags);
    893 		if (nrp == NULL)
    894 			return (ENOMEM);
    895 	}
    896 
    897 	simple_lock(&ex->ex_slock);
    898 
    899 	/*
    900 	 * Find region and deallocate.  Several possibilities:
    901 	 *
    902 	 *	1. (start == er_start) && (end == er_end):
    903 	 *	   Free descriptor.
    904 	 *
    905 	 *	2. (start == er_start) && (end < er_end):
    906 	 *	   Adjust er_start.
    907 	 *
    908 	 *	3. (start > er_start) && (end == er_end):
    909 	 *	   Adjust er_end.
    910 	 *
    911 	 *	4. (start > er_start) && (end < er_end):
    912 	 *	   Fragment region.  Requires descriptor alloc.
    913 	 *
    914 	 * Cases 2, 3, and 4 require that the EXF_NOCOALESCE flag
    915 	 * is not set.
    916 	 */
    917 	for (rp = ex->ex_regions.lh_first; rp != NULL;
    918 	    rp = rp->er_link.le_next) {
    919 		/*
    920 		 * Save ourselves some comparisons; does the current
    921 		 * region end before chunk to be freed begins?  If so,
    922 		 * then we haven't found the appropriate region descriptor.
    923 		 */
    924 		if (rp->er_end < start)
    925 			continue;
    926 
    927 		/*
    928 		 * Save ourselves some traversal; does the current
    929 		 * region begin after the chunk to be freed ends?  If so,
    930 		 * then we've already passed any possible region descriptors
    931 		 * that might have contained the chunk to be freed.
    932 		 */
    933 		if (rp->er_start > end)
    934 			break;
    935 
    936 		/* Case 1. */
    937 		if ((start == rp->er_start) && (end == rp->er_end)) {
    938 			LIST_REMOVE(rp, er_link);
    939 			extent_free_region_descriptor(ex, rp);
    940 			goto done;
    941 		}
    942 
    943 		/*
    944 		 * The following cases all require that EXF_NOCOALESCE
    945 		 * is not set.
    946 		 */
    947 		if (ex->ex_flags & EXF_NOCOALESCE)
    948 			continue;
    949 
    950 		/* Case 2. */
    951 		if ((start == rp->er_start) && (end < rp->er_end)) {
    952 			rp->er_start = (end + 1);
    953 			goto done;
    954 		}
    955 
    956 		/* Case 3. */
    957 		if ((start > rp->er_start) && (end == rp->er_end)) {
    958 			rp->er_end = (start - 1);
    959 			goto done;
    960 		}
    961 
    962 		/* Case 4. */
    963 		if ((start > rp->er_start) && (end < rp->er_end)) {
    964 			/* Fill in new descriptor. */
    965 			nrp->er_start = end + 1;
    966 			nrp->er_end = rp->er_end;
    967 
    968 			/* Adjust current descriptor. */
    969 			rp->er_end = start - 1;
    970 
    971 			/* Insert new descriptor after current. */
    972 			LIST_INSERT_AFTER(rp, nrp, er_link);
    973 
    974 			/* We used the new descriptor, so don't free it below */
    975 			nrp = NULL;
    976 			goto done;
    977 		}
    978 	}
    979 
    980 	/* Region not found, or request otherwise invalid. */
    981 	simple_unlock(&ex->ex_slock);
    982 	extent_print(ex);
    983 	printf("extent_free: start 0x%lx, end 0x%lx\n", start, end);
    984 	panic("extent_free: region not found");
    985 
    986  done:
    987 	if (nrp != NULL)
    988 		extent_free_region_descriptor(ex, nrp);
    989 	if (ex->ex_flags & EXF_WANTED) {
    990 		ex->ex_flags &= ~EXF_WANTED;
    991 		wakeup(ex);
    992 	}
    993 	simple_unlock(&ex->ex_slock);
    994 	return (0);
    995 }
    996 
    997 /*
    998  * Allocate an extent region descriptor.  EXTENT MUST NOT BE LOCKED,
    999  * AS THIS FUNCTION MAY BLOCK!  We will handle any locking we may need.
   1000  */
   1001 static struct extent_region *
   1002 extent_alloc_region_descriptor(ex, flags)
   1003 	struct extent *ex;
   1004 	int flags;
   1005 {
   1006 	struct extent_region *rp;
   1007 	int exflags;
   1008 	int s;
   1009 
   1010 	/*
   1011 	 * If the kernel memory allocator is not yet running, we can't
   1012 	 * use it (obviously).
   1013 	 */
   1014 	if (KMEM_IS_RUNNING == 0)
   1015 		flags &= ~EX_MALLOCOK;
   1016 
   1017 	/*
   1018 	 * XXX Make a static, create-time flags word, so we don't
   1019 	 * XXX have to lock to read it!
   1020 	 */
   1021 	simple_lock(&ex->ex_slock);
   1022 	exflags = ex->ex_flags;
   1023 	simple_unlock(&ex->ex_slock);
   1024 
   1025 	if (exflags & EXF_FIXED) {
   1026 		struct extent_fixed *fex = (struct extent_fixed *)ex;
   1027 
   1028 		for (;;) {
   1029 			simple_lock(&ex->ex_slock);
   1030 			if ((rp = fex->fex_freelist.lh_first) != NULL) {
   1031 				/*
   1032 				 * Don't muck with flags after pulling it off
   1033 				 * the freelist; it may have been dynamically
   1034 				 * allocated, and kindly given to us.  We
   1035 				 * need to remember that information.
   1036 				 */
   1037 				LIST_REMOVE(rp, er_link);
   1038 				simple_unlock(&ex->ex_slock);
   1039 				return (rp);
   1040 			}
   1041 			if (flags & EX_MALLOCOK) {
   1042 				simple_unlock(&ex->ex_slock);
   1043 				goto alloc;
   1044 			}
   1045 			if ((flags & EX_WAITOK) == 0) {
   1046 				simple_unlock(&ex->ex_slock);
   1047 				return (NULL);
   1048 			}
   1049 			ex->ex_flags |= EXF_FLWANTED;
   1050 			simple_unlock(&ex->ex_slock);
   1051 			if (tsleep(&fex->fex_freelist,
   1052 			    PRIBIO | ((flags & EX_CATCH) ? PCATCH : 0),
   1053 			    "extnt", 0))
   1054 				return (NULL);
   1055 		}
   1056 	}
   1057 
   1058  alloc:
   1059 	s = splhigh();
   1060 	if (expool == NULL && !expool_create()) {
   1061 		splx(s);
   1062 		return (NULL);
   1063 	}
   1064 
   1065 	rp = pool_get(expool, (flags & EX_WAITOK) ? PR_WAITOK : 0);
   1066 	splx(s);
   1067 
   1068 	if (rp != NULL)
   1069 		rp->er_flags = ER_ALLOC;
   1070 
   1071 	return (rp);
   1072 }
   1073 
   1074 /*
   1075  * Free an extent region descriptor.  EXTENT _MUST_ BE LOCKED!  This
   1076  * is safe as we do not block here.
   1077  */
   1078 static void
   1079 extent_free_region_descriptor(ex, rp)
   1080 	struct extent *ex;
   1081 	struct extent_region *rp;
   1082 {
   1083 	int s;
   1084 
   1085 	if (ex->ex_flags & EXF_FIXED) {
   1086 		struct extent_fixed *fex = (struct extent_fixed *)ex;
   1087 
   1088 		/*
   1089 		 * If someone's waiting for a region descriptor,
   1090 		 * be nice and give them this one, rather than
   1091 		 * just free'ing it back to the system.
   1092 		 */
   1093 		if (rp->er_flags & ER_ALLOC) {
   1094 			if (ex->ex_flags & EXF_FLWANTED) {
   1095 				/* Clear all but ER_ALLOC flag. */
   1096 				rp->er_flags = ER_ALLOC;
   1097 				LIST_INSERT_HEAD(&fex->fex_freelist, rp,
   1098 				    er_link);
   1099 				goto wake_em_up;
   1100 			} else {
   1101 				s = splhigh();
   1102 				pool_put(expool, rp);
   1103 				splx(s);
   1104 			}
   1105 		} else {
   1106 			/* Clear all flags. */
   1107 			rp->er_flags = 0;
   1108 			LIST_INSERT_HEAD(&fex->fex_freelist, rp, er_link);
   1109 		}
   1110 
   1111 		if (ex->ex_flags & EXF_FLWANTED) {
   1112  wake_em_up:
   1113 			ex->ex_flags &= ~EXF_FLWANTED;
   1114 			wakeup(&fex->fex_freelist);
   1115 		}
   1116 		return;
   1117 	}
   1118 
   1119 	/*
   1120 	 * We know it's dynamically allocated if we get here.
   1121 	 */
   1122 	s = splhigh();
   1123 	pool_put(expool, rp);
   1124 	splx(s);
   1125 }
   1126 
   1127 void
   1128 extent_print(ex)
   1129 	struct extent *ex;
   1130 {
   1131 	struct extent_region *rp;
   1132 
   1133 	if (ex == NULL)
   1134 		panic("extent_print: NULL extent");
   1135 
   1136 	simple_lock(&ex->ex_slock);
   1137 
   1138 	printf("extent `%s' (0x%lx - 0x%lx), flags = 0x%x\n", ex->ex_name,
   1139 	    ex->ex_start, ex->ex_end, ex->ex_flags);
   1140 
   1141 	for (rp = ex->ex_regions.lh_first; rp != NULL;
   1142 	    rp = rp->er_link.le_next)
   1143 		printf("     0x%lx - 0x%lx\n", rp->er_start, rp->er_end);
   1144 
   1145 	simple_unlock(&ex->ex_slock);
   1146 }
   1147