Home | History | Annotate | Line # | Download | only in kern
subr_extent.c revision 1.30
      1 /*	$NetBSD: subr_extent.c,v 1.30 2000/05/24 02:22:36 jhawk Exp $	*/
      2 
      3 /*-
      4  * Copyright (c) 1996, 1998 The NetBSD Foundation, Inc.
      5  * All rights reserved.
      6  *
      7  * This code is derived from software contributed to The NetBSD Foundation
      8  * by Jason R. Thorpe and Matthias Drochner.
      9  *
     10  * Redistribution and use in source and binary forms, with or without
     11  * modification, are permitted provided that the following conditions
     12  * are met:
     13  * 1. Redistributions of source code must retain the above copyright
     14  *    notice, this list of conditions and the following disclaimer.
     15  * 2. Redistributions in binary form must reproduce the above copyright
     16  *    notice, this list of conditions and the following disclaimer in the
     17  *    documentation and/or other materials provided with the distribution.
     18  * 3. All advertising materials mentioning features or use of this software
     19  *    must display the following acknowledgement:
     20  *	This product includes software developed by the NetBSD
     21  *	Foundation, Inc. and its contributors.
     22  * 4. Neither the name of The NetBSD Foundation nor the names of its
     23  *    contributors may be used to endorse or promote products derived
     24  *    from this software without specific prior written permission.
     25  *
     26  * THIS SOFTWARE IS PROVIDED BY THE NETBSD FOUNDATION, INC. AND CONTRIBUTORS
     27  * ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED
     28  * TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
     29  * PURPOSE ARE DISCLAIMED.  IN NO EVENT SHALL THE FOUNDATION OR CONTRIBUTORS
     30  * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
     31  * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
     32  * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
     33  * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
     34  * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
     35  * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
     36  * POSSIBILITY OF SUCH DAMAGE.
     37  */
     38 
     39 /*
     40  * General purpose extent manager.
     41  */
     42 
     43 #ifdef _KERNEL
     44 #include <sys/param.h>
     45 #include <sys/extent.h>
     46 #include <sys/malloc.h>
     47 #include <sys/pool.h>
     48 #include <sys/time.h>
     49 #include <sys/systm.h>
     50 #include <sys/proc.h>
     51 #include <sys/lock.h>
     52 
     53 #include <vm/vm.h>
     54 #include <vm/vm_kern.h>
     55 
     56 #define	KMEM_IS_RUNNING		(kmem_map != NULL)
     57 #elif defined(_EXTENT_TESTING)
     58 /*
     59  * user-land definitions, so it can fit into a testing harness.
     60  */
     61 #include <sys/param.h>
     62 #include <sys/pool.h>
     63 #include <sys/extent.h>
     64 #include <errno.h>
     65 #include <stdlib.h>
     66 #include <stdio.h>
     67 #include <string.h>
     68 
     69 /*
     70  * Use multi-line #defines to avoid screwing up the kernel tags file;
     71  * without this, ctags produces a tags file where panic() shows up
     72  * in subr_extent.c rather than subr_prf.c.
     73  */
     74 #define	\
     75 malloc(s, t, flags)		malloc(s)
     76 #define	\
     77 free(p, t)			free(p)
     78 #define	\
     79 tsleep(chan, pri, str, timo)	(EWOULDBLOCK)
     80 #define	\
     81 wakeup(chan)			((void)0)
     82 #define	\
     83 pool_get(pool, flags)		malloc(pool->pr_size,0,0)
     84 #define	\
     85 pool_put(pool, rp)		free(rp,0)
     86 #define	\
     87 panic(a)			printf(a)
     88 #define	\
     89 splhigh()			(1)
     90 #define	\
     91 splx(s)				((void)(s))
     92 
     93 #define	KMEM_IS_RUNNING			(1)
     94 #endif
     95 
     96 static	pool_handle_t expool_create __P((void));
     97 static	void extent_insert_and_optimize __P((struct extent *, u_long, u_long,
     98 	    int, struct extent_region *, struct extent_region *));
     99 static	struct extent_region *extent_alloc_region_descriptor
    100 	    __P((struct extent *, int));
    101 static	void extent_free_region_descriptor __P((struct extent *,
    102 	    struct extent_region *));
    103 
    104 static pool_handle_t expool;
    105 
    106 /*
    107  * Macro to align to an arbitrary power-of-two boundary.
    108  */
    109 #define EXTENT_ALIGN(_start, _align, _skew)		\
    110 	(((((_start) - (_skew)) + ((_align) - 1)) & (-(_align))) + (_skew))
    111 
    112 /*
    113  * Create the extent_region pool.
    114  * (This is deferred until one of our callers thinks we can malloc()).
    115  */
    116 
    117 static pool_handle_t expool_create()
    118 {
    119 #if defined(_KERNEL)
    120 	expool = pool_create(sizeof(struct extent_region), 0, 0,
    121 			     0, "extent", 0, 0, 0, 0);
    122 #else
    123 	expool = (pool_handle_t)malloc(sizeof(*expool),0,0);
    124 	expool->pr_size = sizeof(struct extent_region);
    125 #endif
    126 	return (expool);
    127 }
    128 
    129 /*
    130  * Allocate and initialize an extent map.
    131  */
    132 struct extent *
    133 extent_create(name, start, end, mtype, storage, storagesize, flags)
    134 	const char *name;
    135 	u_long start, end;
    136 	int mtype;
    137 	caddr_t storage;
    138 	size_t storagesize;
    139 	int flags;
    140 {
    141 	struct extent *ex;
    142 	caddr_t cp = storage;
    143 	size_t sz = storagesize;
    144 	struct extent_region *rp;
    145 	int fixed_extent = (storage != NULL);
    146 	int s;
    147 
    148 #ifdef DIAGNOSTIC
    149 	/* Check arguments. */
    150 	if (name == NULL)
    151 		panic("extent_create: name == NULL");
    152 	if (end < start) {
    153 		printf("extent_create: extent `%s', start 0x%lx, end 0x%lx\n",
    154 		    name, start, end);
    155 		panic("extent_create: end < start");
    156 	}
    157 	if (fixed_extent && (storagesize < sizeof(struct extent_fixed)))
    158 		panic("extent_create: fixed extent, bad storagesize 0x%lx",
    159 		    (u_long)storagesize);
    160 	if (fixed_extent == 0 && (storagesize != 0 || storage != NULL))
    161 		panic("extent_create: storage provided for non-fixed");
    162 #endif
    163 
    164 	/* Allocate extent descriptor. */
    165 	if (fixed_extent) {
    166 		struct extent_fixed *fex;
    167 
    168 		memset(storage, 0, storagesize);
    169 
    170 		/*
    171 		 * Align all descriptors on "long" boundaries.
    172 		 */
    173 		fex = (struct extent_fixed *)cp;
    174 		ex = (struct extent *)fex;
    175 		cp += ALIGN(sizeof(struct extent_fixed));
    176 		sz -= ALIGN(sizeof(struct extent_fixed));
    177 		fex->fex_storage = storage;
    178 		fex->fex_storagesize = storagesize;
    179 
    180 		/*
    181 		 * In a fixed extent, we have to pre-allocate region
    182 		 * descriptors and place them in the extent's freelist.
    183 		 */
    184 		LIST_INIT(&fex->fex_freelist);
    185 		while (sz >= ALIGN(sizeof(struct extent_region))) {
    186 			rp = (struct extent_region *)cp;
    187 			cp += ALIGN(sizeof(struct extent_region));
    188 			sz -= ALIGN(sizeof(struct extent_region));
    189 			LIST_INSERT_HEAD(&fex->fex_freelist, rp, er_link);
    190 		}
    191 	} else {
    192 		s = splhigh();
    193 		if (expool == NULL)
    194 			expool_create();
    195 		splx(s);
    196 		if (expool == NULL)
    197 			return (NULL);
    198 
    199 		ex = (struct extent *)malloc(sizeof(struct extent),
    200 		    mtype, (flags & EX_WAITOK) ? M_WAITOK : M_NOWAIT);
    201 		if (ex == NULL)
    202 			return (NULL);
    203 	}
    204 
    205 	/* Fill in the extent descriptor and return it to the caller. */
    206 	simple_lock_init(&ex->ex_slock);
    207 	LIST_INIT(&ex->ex_regions);
    208 	ex->ex_name = name;
    209 	ex->ex_start = start;
    210 	ex->ex_end = end;
    211 	ex->ex_mtype = mtype;
    212 	ex->ex_flags = 0;
    213 	if (fixed_extent)
    214 		ex->ex_flags |= EXF_FIXED;
    215 	if (flags & EX_NOCOALESCE)
    216 		ex->ex_flags |= EXF_NOCOALESCE;
    217 	return (ex);
    218 }
    219 
    220 /*
    221  * Destroy an extent map.
    222  * Since we're freeing the data, there can't be any references
    223  * so we don't need any locking.
    224  */
    225 void
    226 extent_destroy(ex)
    227 	struct extent *ex;
    228 {
    229 	struct extent_region *rp, *orp;
    230 
    231 #ifdef DIAGNOSTIC
    232 	/* Check arguments. */
    233 	if (ex == NULL)
    234 		panic("extent_destroy: NULL extent");
    235 #endif
    236 
    237 	/* Free all region descriptors in extent. */
    238 	for (rp = ex->ex_regions.lh_first; rp != NULL; ) {
    239 		orp = rp;
    240 		rp = rp->er_link.le_next;
    241 		LIST_REMOVE(orp, er_link);
    242 		extent_free_region_descriptor(ex, orp);
    243 	}
    244 
    245 	/* If we're not a fixed extent, free the extent descriptor itself. */
    246 	if ((ex->ex_flags & EXF_FIXED) == 0)
    247 		free(ex, ex->ex_mtype);
    248 }
    249 
    250 /*
    251  * Insert a region descriptor into the sorted region list after the
    252  * entry "after" or at the head of the list (if "after" is NULL).
    253  * The region descriptor we insert is passed in "rp".  We must
    254  * allocate the region descriptor before calling this function!
    255  * If we don't need the region descriptor, it will be freed here.
    256  */
    257 static void
    258 extent_insert_and_optimize(ex, start, size, flags, after, rp)
    259 	struct extent *ex;
    260 	u_long start, size;
    261 	int flags;
    262 	struct extent_region *after, *rp;
    263 {
    264 	struct extent_region *nextr;
    265 	int appended = 0;
    266 
    267 	if (after == NULL) {
    268 		/*
    269 		 * We're the first in the region list.  If there's
    270 		 * a region after us, attempt to coalesce to save
    271 		 * descriptor overhead.
    272 		 */
    273 		if (((ex->ex_flags & EXF_NOCOALESCE) == 0) &&
    274 		    (ex->ex_regions.lh_first != NULL) &&
    275 		    ((start + size) == ex->ex_regions.lh_first->er_start)) {
    276 			/*
    277 			 * We can coalesce.  Prepend us to the first region.
    278 			 */
    279 			ex->ex_regions.lh_first->er_start = start;
    280 			extent_free_region_descriptor(ex, rp);
    281 			return;
    282 		}
    283 
    284 		/*
    285 		 * Can't coalesce.  Fill in the region descriptor
    286 		 * in, and insert us at the head of the region list.
    287 		 */
    288 		rp->er_start = start;
    289 		rp->er_end = start + (size - 1);
    290 		LIST_INSERT_HEAD(&ex->ex_regions, rp, er_link);
    291 		return;
    292 	}
    293 
    294 	/*
    295 	 * If EXF_NOCOALESCE is set, coalescing is disallowed.
    296 	 */
    297 	if (ex->ex_flags & EXF_NOCOALESCE)
    298 		goto cant_coalesce;
    299 
    300 	/*
    301 	 * Attempt to coalesce with the region before us.
    302 	 */
    303 	if ((after->er_end + 1) == start) {
    304 		/*
    305 		 * We can coalesce.  Append ourselves and make
    306 		 * note of it.
    307 		 */
    308 		after->er_end = start + (size - 1);
    309 		appended = 1;
    310 	}
    311 
    312 	/*
    313 	 * Attempt to coalesce with the region after us.
    314 	 */
    315 	if ((after->er_link.le_next != NULL) &&
    316 	    ((start + size) == after->er_link.le_next->er_start)) {
    317 		/*
    318 		 * We can coalesce.  Note that if we appended ourselves
    319 		 * to the previous region, we exactly fit the gap, and
    320 		 * can free the "next" region descriptor.
    321 		 */
    322 		if (appended) {
    323 			/*
    324 			 * Yup, we can free it up.
    325 			 */
    326 			after->er_end = after->er_link.le_next->er_end;
    327 			nextr = after->er_link.le_next;
    328 			LIST_REMOVE(nextr, er_link);
    329 			extent_free_region_descriptor(ex, nextr);
    330 		} else {
    331 			/*
    332 			 * Nope, just prepend us to the next region.
    333 			 */
    334 			after->er_link.le_next->er_start = start;
    335 		}
    336 
    337 		extent_free_region_descriptor(ex, rp);
    338 		return;
    339 	}
    340 
    341 	/*
    342 	 * We weren't able to coalesce with the next region, but
    343 	 * we don't need to allocate a region descriptor if we
    344 	 * appended ourselves to the previous region.
    345 	 */
    346 	if (appended) {
    347 		extent_free_region_descriptor(ex, rp);
    348 		return;
    349 	}
    350 
    351  cant_coalesce:
    352 
    353 	/*
    354 	 * Fill in the region descriptor and insert ourselves
    355 	 * into the region list.
    356 	 */
    357 	rp->er_start = start;
    358 	rp->er_end = start + (size - 1);
    359 	LIST_INSERT_AFTER(after, rp, er_link);
    360 }
    361 
    362 /*
    363  * Allocate a specific region in an extent map.
    364  */
    365 int
    366 extent_alloc_region(ex, start, size, flags)
    367 	struct extent *ex;
    368 	u_long start, size;
    369 	int flags;
    370 {
    371 	struct extent_region *rp, *last, *myrp;
    372 	u_long end = start + (size - 1);
    373 	int error;
    374 
    375 #ifdef DIAGNOSTIC
    376 	/* Check arguments. */
    377 	if (ex == NULL)
    378 		panic("extent_alloc_region: NULL extent");
    379 	if (size < 1) {
    380 		printf("extent_alloc_region: extent `%s', size 0x%lx\n",
    381 		    ex->ex_name, size);
    382 		panic("extent_alloc_region: bad size");
    383 	}
    384 	if (end < start) {
    385 		printf(
    386 		 "extent_alloc_region: extent `%s', start 0x%lx, size 0x%lx\n",
    387 		 ex->ex_name, start, size);
    388 		panic("extent_alloc_region: overflow");
    389 	}
    390 #endif
    391 
    392 	/*
    393 	 * Make sure the requested region lies within the
    394 	 * extent.
    395 	 *
    396 	 * We don't lock to check the range, because those values
    397 	 * are never modified, and if another thread deletes the
    398 	 * extent, we're screwed anyway.
    399 	 */
    400 	if ((start < ex->ex_start) || (end > ex->ex_end)) {
    401 #ifdef DIAGNOSTIC
    402 		printf("extent_alloc_region: extent `%s' (0x%lx - 0x%lx)\n",
    403 		    ex->ex_name, ex->ex_start, ex->ex_end);
    404 		printf("extent_alloc_region: start 0x%lx, end 0x%lx\n",
    405 		    start, end);
    406 		panic("extent_alloc_region: region lies outside extent");
    407 #else
    408 		return (EINVAL);
    409 #endif
    410 	}
    411 
    412 	/*
    413 	 * Allocate the region descriptor.  It will be freed later
    414 	 * if we can coalesce with another region.  Don't lock before
    415 	 * here!  This could block.
    416 	 */
    417 	myrp = extent_alloc_region_descriptor(ex, flags);
    418 	if (myrp == NULL) {
    419 #ifdef DIAGNOSTIC
    420 		printf(
    421 		    "extent_alloc_region: can't allocate region descriptor\n");
    422 #endif
    423 		return (ENOMEM);
    424 	}
    425 
    426  alloc_start:
    427 	simple_lock(&ex->ex_slock);
    428 
    429 	/*
    430 	 * Attempt to place ourselves in the desired area of the
    431 	 * extent.  We save ourselves some work by keeping the list sorted.
    432 	 * In other words, if the start of the current region is greater
    433 	 * than the end of our region, we don't have to search any further.
    434 	 */
    435 
    436 	/*
    437 	 * Keep a pointer to the last region we looked at so
    438 	 * that we don't have to traverse the list again when
    439 	 * we insert ourselves.  If "last" is NULL when we
    440 	 * finally insert ourselves, we go at the head of the
    441 	 * list.  See extent_insert_and_optimize() for details.
    442 	 */
    443 	last = NULL;
    444 
    445 	for (rp = ex->ex_regions.lh_first; rp != NULL;
    446 	    rp = rp->er_link.le_next) {
    447 		if (rp->er_start > end) {
    448 			/*
    449 			 * We lie before this region and don't
    450 			 * conflict.
    451 			 */
    452 			break;
    453 		}
    454 
    455 		/*
    456 		 * The current region begins before we end.
    457 		 * Check for a conflict.
    458 		 */
    459 		if (rp->er_end >= start) {
    460 			/*
    461 			 * We conflict.  If we can (and want to) wait,
    462 			 * do so.
    463 			 */
    464 			if (flags & EX_WAITSPACE) {
    465 				ex->ex_flags |= EXF_WANTED;
    466 				simple_unlock(&ex->ex_slock);
    467 				error = tsleep(ex,
    468 				    PRIBIO | ((flags & EX_CATCH) ? PCATCH : 0),
    469 				    "extnt", 0);
    470 				if (error)
    471 					return (error);
    472 				goto alloc_start;
    473 			}
    474 			extent_free_region_descriptor(ex, myrp);
    475 			simple_unlock(&ex->ex_slock);
    476 			return (EAGAIN);
    477 		}
    478 		/*
    479 		 * We don't conflict, but this region lies before
    480 		 * us.  Keep a pointer to this region, and keep
    481 		 * trying.
    482 		 */
    483 		last = rp;
    484 	}
    485 
    486 	/*
    487 	 * We don't conflict with any regions.  "last" points
    488 	 * to the region we fall after, or is NULL if we belong
    489 	 * at the beginning of the region list.  Insert ourselves.
    490 	 */
    491 	extent_insert_and_optimize(ex, start, size, flags, last, myrp);
    492 	simple_unlock(&ex->ex_slock);
    493 	return (0);
    494 }
    495 
    496 /*
    497  * Macro to check (x + y) <= z.  This check is designed to fail
    498  * if an overflow occurs.
    499  */
    500 #define LE_OV(x, y, z)	((((x) + (y)) >= (x)) && (((x) + (y)) <= (z)))
    501 
    502 /*
    503  * Allocate a region in an extent map subregion.
    504  *
    505  * If EX_FAST is specified, we return the first fit in the map.
    506  * Otherwise, we try to minimize fragmentation by finding the
    507  * smallest gap that will hold the request.
    508  *
    509  * The allocated region is aligned to "alignment", which must be
    510  * a power of 2.
    511  */
    512 int
    513 extent_alloc_subregion1(ex, substart, subend, size, alignment, skew, boundary,
    514     flags, result)
    515 	struct extent *ex;
    516 	u_long substart, subend, size, alignment, skew, boundary;
    517 	int flags;
    518 	u_long *result;
    519 {
    520 	struct extent_region *rp, *myrp, *last, *bestlast;
    521 	u_long newstart, newend, beststart, bestovh, ovh;
    522 	u_long dontcross;
    523 	int error;
    524 
    525 #ifdef DIAGNOSTIC
    526 	/*
    527 	 * Check arguments.
    528 	 *
    529 	 * We don't lock to check these, because these values
    530 	 * are never modified, and if another thread deletes the
    531 	 * extent, we're screwed anyway.
    532 	 */
    533 	if (ex == NULL)
    534 		panic("extent_alloc_subregion: NULL extent");
    535 	if (result == NULL)
    536 		panic("extent_alloc_subregion: NULL result pointer");
    537 	if ((substart < ex->ex_start) || (substart > ex->ex_end) ||
    538 	    (subend > ex->ex_end) || (subend < ex->ex_start)) {
    539   printf("extent_alloc_subregion: extent `%s', ex_start 0x%lx, ex_end 0x%lx\n",
    540 		    ex->ex_name, ex->ex_start, ex->ex_end);
    541 		printf("extent_alloc_subregion: substart 0x%lx, subend 0x%lx\n",
    542 		    substart, subend);
    543 		panic("extent_alloc_subregion: bad subregion");
    544 	}
    545 	if ((size < 1) || ((size - 1) > (subend - substart))) {
    546 		printf("extent_alloc_subregion: extent `%s', size 0x%lx\n",
    547 		    ex->ex_name, size);
    548 		panic("extent_alloc_subregion: bad size");
    549 	}
    550 	if (alignment == 0)
    551 		panic("extent_alloc_subregion: bad alignment");
    552 	if (boundary && (boundary < size)) {
    553 		printf(
    554 		    "extent_alloc_subregion: extent `%s', size 0x%lx,
    555 		    boundary 0x%lx\n", ex->ex_name, size, boundary);
    556 		panic("extent_alloc_subregion: bad boundary");
    557 	}
    558 #endif
    559 
    560 	/*
    561 	 * Allocate the region descriptor.  It will be freed later
    562 	 * if we can coalesce with another region.  Don't lock before
    563 	 * here!  This could block.
    564 	 */
    565 	myrp = extent_alloc_region_descriptor(ex, flags);
    566 	if (myrp == NULL) {
    567 #ifdef DIAGNOSTIC
    568 		printf(
    569 		 "extent_alloc_subregion: can't allocate region descriptor\n");
    570 #endif
    571 		return (ENOMEM);
    572 	}
    573 
    574  alloc_start:
    575 	simple_lock(&ex->ex_slock);
    576 
    577 	/*
    578 	 * Keep a pointer to the last region we looked at so
    579 	 * that we don't have to traverse the list again when
    580 	 * we insert ourselves.  If "last" is NULL when we
    581 	 * finally insert ourselves, we go at the head of the
    582 	 * list.  See extent_insert_and_optimize() for deatails.
    583 	 */
    584 	last = NULL;
    585 
    586 	/*
    587 	 * Keep track of size and location of the smallest
    588 	 * chunk we fit in.
    589 	 *
    590 	 * Since the extent can be as large as the numeric range
    591 	 * of the CPU (0 - 0xffffffff for 32-bit systems), the
    592 	 * best overhead value can be the maximum unsigned integer.
    593 	 * Thus, we initialize "bestovh" to 0, since we insert ourselves
    594 	 * into the region list immediately on an exact match (which
    595 	 * is the only case where "bestovh" would be set to 0).
    596 	 */
    597 	bestovh = 0;
    598 	beststart = 0;
    599 	bestlast = NULL;
    600 
    601 	/*
    602 	 * For N allocated regions, we must make (N + 1)
    603 	 * checks for unallocated space.  The first chunk we
    604 	 * check is the area from the beginning of the subregion
    605 	 * to the first allocated region after that point.
    606 	 */
    607 	newstart = EXTENT_ALIGN(substart, alignment, skew);
    608 	if (newstart < ex->ex_start) {
    609 #ifdef DIAGNOSTIC
    610 		printf(
    611       "extent_alloc_subregion: extent `%s' (0x%lx - 0x%lx), alignment 0x%lx\n",
    612 		 ex->ex_name, ex->ex_start, ex->ex_end, alignment);
    613 		simple_unlock(&ex->ex_slock);
    614 		panic("extent_alloc_subregion: overflow after alignment");
    615 #else
    616 		extent_free_region_descriptor(ex, myrp);
    617 		simple_unlock(&ex->ex_slock);
    618 		return (EINVAL);
    619 #endif
    620 	}
    621 
    622 	/*
    623 	 * Find the first allocated region that begins on or after
    624 	 * the subregion start, advancing the "last" pointer along
    625 	 * the way.
    626 	 */
    627 	for (rp = ex->ex_regions.lh_first; rp != NULL;
    628 	     rp = rp->er_link.le_next) {
    629 		if (rp->er_start >= newstart)
    630 			break;
    631 		last = rp;
    632 	}
    633 
    634 	/*
    635 	 * Relocate the start of our candidate region to the end of
    636 	 * the last allocated region (if there was one overlapping
    637 	 * our subrange).
    638 	 */
    639 	if (last != NULL && last->er_end >= newstart)
    640 		newstart = EXTENT_ALIGN((last->er_end + 1), alignment, skew);
    641 
    642 	for (; rp != NULL; rp = rp->er_link.le_next) {
    643 		/*
    644 		 * Check the chunk before "rp".  Note that our
    645 		 * comparison is safe from overflow conditions.
    646 		 */
    647 		if (LE_OV(newstart, size, rp->er_start)) {
    648 			/*
    649 			 * Do a boundary check, if necessary.  Note
    650 			 * that a region may *begin* on the boundary,
    651 			 * but it must end before the boundary.
    652 			 */
    653 			if (boundary) {
    654 				newend = newstart + (size - 1);
    655 
    656 				/*
    657 				 * Calculate the next boundary after the start
    658 				 * of this region.
    659 				 */
    660 				dontcross = EXTENT_ALIGN(newstart+1, boundary,
    661 				    (flags & EX_BOUNDZERO) ? 0 : ex->ex_start)
    662 				    - 1;
    663 
    664 #if 0
    665 				printf("newstart=%x newend=%x ex_start=%x ex_end=%x boundary=%x dontcross=%x\n",
    666 				    newstart, newend, ex->ex_start, ex->ex_end,
    667 				    boundary, dontcross);
    668 #endif
    669 
    670 				if (newend > dontcross) {
    671 					/*
    672 					 * Candidate region crosses boundary.
    673 					 * Throw away the leading part and see
    674 					 * if we still fit.
    675 					 */
    676 					newstart = dontcross + 1;
    677 					newend = newstart + (size - 1);
    678 					dontcross += boundary;
    679 					if (!LE_OV(newstart, size, rp->er_start))
    680 						continue;
    681 				}
    682 
    683 				/*
    684 				 * If we run past the end of
    685 				 * the extent or the boundary
    686 				 * overflows, then the request
    687 				 * can't fit.
    688 				 */
    689 				if (dontcross > ex->ex_end ||
    690 				    dontcross < newstart)
    691 					goto fail;
    692 			}
    693 
    694 			/*
    695 			 * We would fit into this space.  Calculate
    696 			 * the overhead (wasted space).  If we exactly
    697 			 * fit, or we're taking the first fit, insert
    698 			 * ourselves into the region list.
    699 			 */
    700 			ovh = rp->er_start - newstart - size;
    701 			if ((flags & EX_FAST) || (ovh == 0))
    702 				goto found;
    703 
    704 			/*
    705 			 * Don't exactly fit, but check to see
    706 			 * if we're better than any current choice.
    707 			 */
    708 			if ((bestovh == 0) || (ovh < bestovh)) {
    709 				bestovh = ovh;
    710 				beststart = newstart;
    711 				bestlast = last;
    712 			}
    713 		}
    714 
    715 		/*
    716 		 * Skip past the current region and check again.
    717 		 */
    718 		newstart = EXTENT_ALIGN((rp->er_end + 1), alignment, skew);
    719 		if (newstart < rp->er_end) {
    720 			/*
    721 			 * Overflow condition.  Don't error out, since
    722 			 * we might have a chunk of space that we can
    723 			 * use.
    724 			 */
    725 			goto fail;
    726 		}
    727 
    728 		last = rp;
    729 	}
    730 
    731 	/*
    732 	 * The final check is from the current starting point to the
    733 	 * end of the subregion.  If there were no allocated regions,
    734 	 * "newstart" is set to the beginning of the subregion, or
    735 	 * just past the end of the last allocated region, adjusted
    736 	 * for alignment in either case.
    737 	 */
    738 	if (LE_OV(newstart, (size - 1), subend)) {
    739 		/*
    740 		 * Do a boundary check, if necessary.  Note
    741 		 * that a region may *begin* on the boundary,
    742 		 * but it must end before the boundary.
    743 		 */
    744 		if (boundary) {
    745 			newend = newstart + (size - 1);
    746 
    747 			/*
    748 			 * Calculate the next boundary after the start
    749 			 * of this region.
    750 			 */
    751 			dontcross = EXTENT_ALIGN(newstart+1, boundary,
    752 			    (flags & EX_BOUNDZERO) ? 0 : ex->ex_start)
    753 			    - 1;
    754 
    755 #if 0
    756 			printf("newstart=%x newend=%x ex_start=%x ex_end=%x boundary=%x dontcross=%x\n",
    757 			    newstart, newend, ex->ex_start, ex->ex_end,
    758 			    boundary, dontcross);
    759 #endif
    760 
    761 			if (newend > dontcross) {
    762 				/*
    763 				 * Candidate region crosses boundary.
    764 				 * Throw away the leading part and see
    765 				 * if we still fit.
    766 				 */
    767 				newstart = dontcross + 1;
    768 				newend = newstart + (size - 1);
    769 				dontcross += boundary;
    770 				if (!LE_OV(newstart, (size - 1), subend))
    771 					goto fail;
    772 			}
    773 
    774 			/*
    775 			 * If we run past the end of
    776 			 * the extent or the boundary
    777 			 * overflows, then the request
    778 			 * can't fit.
    779 			 */
    780 			if (dontcross > ex->ex_end ||
    781 			    dontcross < newstart)
    782 				goto fail;
    783 		}
    784 
    785 		/*
    786 		 * We would fit into this space.  Calculate
    787 		 * the overhead (wasted space).  If we exactly
    788 		 * fit, or we're taking the first fit, insert
    789 		 * ourselves into the region list.
    790 		 */
    791 		ovh = ex->ex_end - newstart - (size - 1);
    792 		if ((flags & EX_FAST) || (ovh == 0))
    793 			goto found;
    794 
    795 		/*
    796 		 * Don't exactly fit, but check to see
    797 		 * if we're better than any current choice.
    798 		 */
    799 		if ((bestovh == 0) || (ovh < bestovh)) {
    800 			bestovh = ovh;
    801 			beststart = newstart;
    802 			bestlast = last;
    803 		}
    804 	}
    805 
    806  fail:
    807 	/*
    808 	 * One of the following two conditions have
    809 	 * occurred:
    810 	 *
    811 	 *	There is no chunk large enough to hold the request.
    812 	 *
    813 	 *	If EX_FAST was not specified, there is not an
    814 	 *	exact match for the request.
    815 	 *
    816 	 * Note that if we reach this point and EX_FAST is
    817 	 * set, then we know there is no space in the extent for
    818 	 * the request.
    819 	 */
    820 	if (((flags & EX_FAST) == 0) && (bestovh != 0)) {
    821 		/*
    822 		 * We have a match that's "good enough".
    823 		 */
    824 		newstart = beststart;
    825 		last = bestlast;
    826 		goto found;
    827 	}
    828 
    829 	/*
    830 	 * No space currently available.  Wait for it to free up,
    831 	 * if possible.
    832 	 */
    833 	if (flags & EX_WAITSPACE) {
    834 		ex->ex_flags |= EXF_WANTED;
    835 		simple_unlock(&ex->ex_slock);
    836 		error = tsleep(ex,
    837 		    PRIBIO | ((flags & EX_CATCH) ? PCATCH : 0), "extnt", 0);
    838 		if (error)
    839 			return (error);
    840 		goto alloc_start;
    841 	}
    842 
    843 	extent_free_region_descriptor(ex, myrp);
    844 	simple_unlock(&ex->ex_slock);
    845 	return (EAGAIN);
    846 
    847  found:
    848 	/*
    849 	 * Insert ourselves into the region list.
    850 	 */
    851 	extent_insert_and_optimize(ex, newstart, size, flags, last, myrp);
    852 	simple_unlock(&ex->ex_slock);
    853 	*result = newstart;
    854 	return (0);
    855 }
    856 
    857 int
    858 extent_free(ex, start, size, flags)
    859 	struct extent *ex;
    860 	u_long start, size;
    861 	int flags;
    862 {
    863 	struct extent_region *rp, *nrp = NULL;
    864 	u_long end = start + (size - 1);
    865 	int exflags;
    866 
    867 #ifdef DIAGNOSTIC
    868 	/*
    869 	 * Check arguments.
    870 	 *
    871 	 * We don't lock to check these, because these values
    872 	 * are never modified, and if another thread deletes the
    873 	 * extent, we're screwed anyway.
    874 	 */
    875 	if (ex == NULL)
    876 		panic("extent_free: NULL extent");
    877 	if ((start < ex->ex_start) || (start > ex->ex_end)) {
    878 		extent_print(ex);
    879 		printf("extent_free: extent `%s', start 0x%lx, size 0x%lx\n",
    880 		    ex->ex_name, start, size);
    881 		panic("extent_free: extent `%s', region not within extent",
    882 		    ex->ex_name);
    883 	}
    884 	/* Check for an overflow. */
    885 	if (end < start) {
    886 		extent_print(ex);
    887 		printf("extent_free: extent `%s', start 0x%lx, size 0x%lx\n",
    888 		    ex->ex_name, start, size);
    889 		panic("extent_free: overflow");
    890 	}
    891 #endif
    892 
    893 	/*
    894 	 * If we're allowing coalescing, we must allocate a region
    895 	 * descriptor now, since it might block.
    896 	 *
    897 	 * XXX Make a static, create-time flags word, so we don't
    898 	 * XXX have to lock to read it!
    899 	 */
    900 	simple_lock(&ex->ex_slock);
    901 	exflags = ex->ex_flags;
    902 	simple_unlock(&ex->ex_slock);
    903 
    904 	if ((exflags & EXF_NOCOALESCE) == 0) {
    905 		/* Allocate a region descriptor. */
    906 		nrp = extent_alloc_region_descriptor(ex, flags);
    907 		if (nrp == NULL)
    908 			return (ENOMEM);
    909 	}
    910 
    911 	simple_lock(&ex->ex_slock);
    912 
    913 	/*
    914 	 * Find region and deallocate.  Several possibilities:
    915 	 *
    916 	 *	1. (start == er_start) && (end == er_end):
    917 	 *	   Free descriptor.
    918 	 *
    919 	 *	2. (start == er_start) && (end < er_end):
    920 	 *	   Adjust er_start.
    921 	 *
    922 	 *	3. (start > er_start) && (end == er_end):
    923 	 *	   Adjust er_end.
    924 	 *
    925 	 *	4. (start > er_start) && (end < er_end):
    926 	 *	   Fragment region.  Requires descriptor alloc.
    927 	 *
    928 	 * Cases 2, 3, and 4 require that the EXF_NOCOALESCE flag
    929 	 * is not set.
    930 	 */
    931 	for (rp = ex->ex_regions.lh_first; rp != NULL;
    932 	    rp = rp->er_link.le_next) {
    933 		/*
    934 		 * Save ourselves some comparisons; does the current
    935 		 * region end before chunk to be freed begins?  If so,
    936 		 * then we haven't found the appropriate region descriptor.
    937 		 */
    938 		if (rp->er_end < start)
    939 			continue;
    940 
    941 		/*
    942 		 * Save ourselves some traversal; does the current
    943 		 * region begin after the chunk to be freed ends?  If so,
    944 		 * then we've already passed any possible region descriptors
    945 		 * that might have contained the chunk to be freed.
    946 		 */
    947 		if (rp->er_start > end)
    948 			break;
    949 
    950 		/* Case 1. */
    951 		if ((start == rp->er_start) && (end == rp->er_end)) {
    952 			LIST_REMOVE(rp, er_link);
    953 			extent_free_region_descriptor(ex, rp);
    954 			goto done;
    955 		}
    956 
    957 		/*
    958 		 * The following cases all require that EXF_NOCOALESCE
    959 		 * is not set.
    960 		 */
    961 		if (ex->ex_flags & EXF_NOCOALESCE)
    962 			continue;
    963 
    964 		/* Case 2. */
    965 		if ((start == rp->er_start) && (end < rp->er_end)) {
    966 			rp->er_start = (end + 1);
    967 			goto done;
    968 		}
    969 
    970 		/* Case 3. */
    971 		if ((start > rp->er_start) && (end == rp->er_end)) {
    972 			rp->er_end = (start - 1);
    973 			goto done;
    974 		}
    975 
    976 		/* Case 4. */
    977 		if ((start > rp->er_start) && (end < rp->er_end)) {
    978 			/* Fill in new descriptor. */
    979 			nrp->er_start = end + 1;
    980 			nrp->er_end = rp->er_end;
    981 
    982 			/* Adjust current descriptor. */
    983 			rp->er_end = start - 1;
    984 
    985 			/* Insert new descriptor after current. */
    986 			LIST_INSERT_AFTER(rp, nrp, er_link);
    987 
    988 			/* We used the new descriptor, so don't free it below */
    989 			nrp = NULL;
    990 			goto done;
    991 		}
    992 	}
    993 
    994 	/* Region not found, or request otherwise invalid. */
    995 	simple_unlock(&ex->ex_slock);
    996 	extent_print(ex);
    997 	printf("extent_free: start 0x%lx, end 0x%lx\n", start, end);
    998 	panic("extent_free: region not found");
    999 
   1000  done:
   1001 	if (nrp != NULL)
   1002 		extent_free_region_descriptor(ex, nrp);
   1003 	if (ex->ex_flags & EXF_WANTED) {
   1004 		ex->ex_flags &= ~EXF_WANTED;
   1005 		wakeup(ex);
   1006 	}
   1007 	simple_unlock(&ex->ex_slock);
   1008 	return (0);
   1009 }
   1010 
   1011 /*
   1012  * Allocate an extent region descriptor.  EXTENT MUST NOT BE LOCKED,
   1013  * AS THIS FUNCTION MAY BLOCK!  We will handle any locking we may need.
   1014  */
   1015 static struct extent_region *
   1016 extent_alloc_region_descriptor(ex, flags)
   1017 	struct extent *ex;
   1018 	int flags;
   1019 {
   1020 	struct extent_region *rp;
   1021 	int exflags;
   1022 	int s;
   1023 
   1024 	/*
   1025 	 * If the kernel memory allocator is not yet running, we can't
   1026 	 * use it (obviously).
   1027 	 */
   1028 	if (KMEM_IS_RUNNING == 0)
   1029 		flags &= ~EX_MALLOCOK;
   1030 
   1031 	/*
   1032 	 * XXX Make a static, create-time flags word, so we don't
   1033 	 * XXX have to lock to read it!
   1034 	 */
   1035 	simple_lock(&ex->ex_slock);
   1036 	exflags = ex->ex_flags;
   1037 	simple_unlock(&ex->ex_slock);
   1038 
   1039 	if (exflags & EXF_FIXED) {
   1040 		struct extent_fixed *fex = (struct extent_fixed *)ex;
   1041 
   1042 		for (;;) {
   1043 			simple_lock(&ex->ex_slock);
   1044 			if ((rp = fex->fex_freelist.lh_first) != NULL) {
   1045 				/*
   1046 				 * Don't muck with flags after pulling it off
   1047 				 * the freelist; it may have been dynamically
   1048 				 * allocated, and kindly given to us.  We
   1049 				 * need to remember that information.
   1050 				 */
   1051 				LIST_REMOVE(rp, er_link);
   1052 				simple_unlock(&ex->ex_slock);
   1053 				return (rp);
   1054 			}
   1055 			if (flags & EX_MALLOCOK) {
   1056 				simple_unlock(&ex->ex_slock);
   1057 				goto alloc;
   1058 			}
   1059 			if ((flags & EX_WAITOK) == 0) {
   1060 				simple_unlock(&ex->ex_slock);
   1061 				return (NULL);
   1062 			}
   1063 			ex->ex_flags |= EXF_FLWANTED;
   1064 			simple_unlock(&ex->ex_slock);
   1065 			if (tsleep(&fex->fex_freelist,
   1066 			    PRIBIO | ((flags & EX_CATCH) ? PCATCH : 0),
   1067 			    "extnt", 0))
   1068 				return (NULL);
   1069 		}
   1070 	}
   1071 
   1072  alloc:
   1073 	s = splhigh();
   1074 	if (expool == NULL && !expool_create()) {
   1075 		splx(s);
   1076 		return (NULL);
   1077 	}
   1078 
   1079 	rp = pool_get(expool, (flags & EX_WAITOK) ? PR_WAITOK : 0);
   1080 	splx(s);
   1081 
   1082 	if (rp != NULL)
   1083 		rp->er_flags = ER_ALLOC;
   1084 
   1085 	return (rp);
   1086 }
   1087 
   1088 /*
   1089  * Free an extent region descriptor.  EXTENT _MUST_ BE LOCKED!  This
   1090  * is safe as we do not block here.
   1091  */
   1092 static void
   1093 extent_free_region_descriptor(ex, rp)
   1094 	struct extent *ex;
   1095 	struct extent_region *rp;
   1096 {
   1097 	int s;
   1098 
   1099 	if (ex->ex_flags & EXF_FIXED) {
   1100 		struct extent_fixed *fex = (struct extent_fixed *)ex;
   1101 
   1102 		/*
   1103 		 * If someone's waiting for a region descriptor,
   1104 		 * be nice and give them this one, rather than
   1105 		 * just free'ing it back to the system.
   1106 		 */
   1107 		if (rp->er_flags & ER_ALLOC) {
   1108 			if (ex->ex_flags & EXF_FLWANTED) {
   1109 				/* Clear all but ER_ALLOC flag. */
   1110 				rp->er_flags = ER_ALLOC;
   1111 				LIST_INSERT_HEAD(&fex->fex_freelist, rp,
   1112 				    er_link);
   1113 				goto wake_em_up;
   1114 			} else {
   1115 				s = splhigh();
   1116 				pool_put(expool, rp);
   1117 				splx(s);
   1118 			}
   1119 		} else {
   1120 			/* Clear all flags. */
   1121 			rp->er_flags = 0;
   1122 			LIST_INSERT_HEAD(&fex->fex_freelist, rp, er_link);
   1123 		}
   1124 
   1125 		if (ex->ex_flags & EXF_FLWANTED) {
   1126  wake_em_up:
   1127 			ex->ex_flags &= ~EXF_FLWANTED;
   1128 			wakeup(&fex->fex_freelist);
   1129 		}
   1130 		return;
   1131 	}
   1132 
   1133 	/*
   1134 	 * We know it's dynamically allocated if we get here.
   1135 	 */
   1136 	s = splhigh();
   1137 	pool_put(expool, rp);
   1138 	splx(s);
   1139 }
   1140 
   1141 void
   1142 extent_print(ex)
   1143 	struct extent *ex;
   1144 {
   1145 	struct extent_region *rp;
   1146 
   1147 	if (ex == NULL)
   1148 		panic("extent_print: NULL extent");
   1149 
   1150 	simple_lock(&ex->ex_slock);
   1151 
   1152 	printf("extent `%s' (0x%lx - 0x%lx), flags = 0x%x\n", ex->ex_name,
   1153 	    ex->ex_start, ex->ex_end, ex->ex_flags);
   1154 
   1155 	for (rp = ex->ex_regions.lh_first; rp != NULL;
   1156 	    rp = rp->er_link.le_next)
   1157 		printf("     0x%lx - 0x%lx\n", rp->er_start, rp->er_end);
   1158 
   1159 	simple_unlock(&ex->ex_slock);
   1160 }
   1161