Home | History | Annotate | Line # | Download | only in kern
subr_extent.c revision 1.64
      1 /*	$NetBSD: subr_extent.c,v 1.64 2007/03/13 13:25:57 dogcow Exp $	*/
      2 
      3 /*-
      4  * Copyright (c) 1996, 1998 The NetBSD Foundation, Inc.
      5  * All rights reserved.
      6  *
      7  * This code is derived from software contributed to The NetBSD Foundation
      8  * by Jason R. Thorpe and Matthias Drochner.
      9  *
     10  * Redistribution and use in source and binary forms, with or without
     11  * modification, are permitted provided that the following conditions
     12  * are met:
     13  * 1. Redistributions of source code must retain the above copyright
     14  *    notice, this list of conditions and the following disclaimer.
     15  * 2. Redistributions in binary form must reproduce the above copyright
     16  *    notice, this list of conditions and the following disclaimer in the
     17  *    documentation and/or other materials provided with the distribution.
     18  * 3. All advertising materials mentioning features or use of this software
     19  *    must display the following acknowledgement:
     20  *	This product includes software developed by the NetBSD
     21  *	Foundation, Inc. and its contributors.
     22  * 4. Neither the name of The NetBSD Foundation nor the names of its
     23  *    contributors may be used to endorse or promote products derived
     24  *    from this software without specific prior written permission.
     25  *
     26  * THIS SOFTWARE IS PROVIDED BY THE NETBSD FOUNDATION, INC. AND CONTRIBUTORS
     27  * ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED
     28  * TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
     29  * PURPOSE ARE DISCLAIMED.  IN NO EVENT SHALL THE FOUNDATION OR CONTRIBUTORS
     30  * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
     31  * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
     32  * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
     33  * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
     34  * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
     35  * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
     36  * POSSIBILITY OF SUCH DAMAGE.
     37  */
     38 
     39 /*
     40  * General purpose extent manager.
     41  */
     42 
     43 #include <sys/cdefs.h>
     44 __KERNEL_RCSID(0, "$NetBSD: subr_extent.c,v 1.64 2007/03/13 13:25:57 dogcow Exp $");
     45 
     46 #ifdef _KERNEL
     47 #include "opt_lockdebug.h"
     48 
     49 #include <sys/param.h>
     50 #include <sys/extent.h>
     51 #include <sys/malloc.h>
     52 #include <sys/pool.h>
     53 #include <sys/time.h>
     54 #include <sys/systm.h>
     55 #include <sys/proc.h>
     56 #include <sys/lock.h>
     57 
     58 #include <uvm/uvm_extern.h>
     59 
     60 #define	KMEM_IS_RUNNING		(kmem_map != NULL)
     61 #elif defined(_EXTENT_TESTING)
     62 /*
     63  * user-land definitions, so it can fit into a testing harness.
     64  */
     65 #include <sys/param.h>
     66 #include <sys/pool.h>
     67 #include <sys/extent.h>
     68 #include <errno.h>
     69 #include <stdlib.h>
     70 #include <stdio.h>
     71 #include <string.h>
     72 
     73 /*
     74  * Use multi-line #defines to avoid screwing up the kernel tags file;
     75  * without this, ctags produces a tags file where panic() shows up
     76  * in subr_extent.c rather than subr_prf.c.
     77  */
     78 #define	\
     79 malloc(s, t, flags)		malloc(s)
     80 #define	\
     81 free(p, t)			free(p)
     82 #define	\
     83 tsleep(chan, pri, str, timo)	(EWOULDBLOCK)
     84 #define	\
     85 ltsleep(chan,pri,str,timo,lck)	(EWOULDBLOCK)
     86 #define	\
     87 wakeup(chan)			((void)0)
     88 #define	\
     89 pool_get(pool, flags)		malloc((pool)->pr_size,0,0)
     90 #define	\
     91 pool_put(pool, rp)		free(rp,0)
     92 #define	\
     93 panic(a)			printf(a)
     94 #define	\
     95 splhigh()			(1)
     96 #define	\
     97 splx(s)				((void)(s))
     98 
     99 #undef simple_lock_init
    100 #undef simple_lock
    101 #undef simple_unlock
    102 
    103 #define	\
    104 simple_lock_init(l)		((void)(l))
    105 #define	\
    106 simple_lock(l)			((void)(l))
    107 #define	\
    108 simple_unlock(l)		((void)(l))
    109 #define	KMEM_IS_RUNNING			(1)
    110 #endif
    111 
    112 static struct pool expool;
    113 static struct simplelock expool_init_slock = SIMPLELOCK_INITIALIZER;
    114 static int expool_initialized;
    115 
    116 /*
    117  * Macro to align to an arbitrary power-of-two boundary.
    118  */
    119 #define EXTENT_ALIGN(_start, _align, _skew)		\
    120 	(((((_start) - (_skew)) + ((_align) - 1)) & (-(_align))) + (_skew))
    121 
    122 /*
    123  * Create the extent_region pool.
    124  * (This is deferred until one of our callers thinks we can malloc()).
    125  */
    126 
    127 static inline void
    128 expool_init(void)
    129 {
    130 
    131 	simple_lock(&expool_init_slock);
    132 	if (expool_initialized) {
    133 		simple_unlock(&expool_init_slock);
    134 		return;
    135 	}
    136 
    137 #if defined(_KERNEL)
    138 	pool_init(&expool, sizeof(struct extent_region), 0, 0, 0,
    139 	    "extent", NULL, IPL_VM);
    140 #else
    141 	expool.pr_size = sizeof(struct extent_region);
    142 #endif
    143 
    144 	expool_initialized = 1;
    145 	simple_unlock(&expool_init_slock);
    146 }
    147 
    148 /*
    149  * Allocate an extent region descriptor.  EXTENT MUST NOT BE LOCKED,
    150  * AS THIS FUNCTION MAY BLOCK!  We will handle any locking we may need.
    151  */
    152 static struct extent_region *
    153 extent_alloc_region_descriptor(struct extent *ex, int flags)
    154 {
    155 	struct extent_region *rp;
    156 	int exflags;
    157 	int s;
    158 
    159 	/*
    160 	 * If the kernel memory allocator is not yet running, we can't
    161 	 * use it (obviously).
    162 	 */
    163 	if (KMEM_IS_RUNNING == 0)
    164 		flags &= ~EX_MALLOCOK;
    165 
    166 	/*
    167 	 * XXX Make a static, create-time flags word, so we don't
    168 	 * XXX have to lock to read it!
    169 	 */
    170 	simple_lock(&ex->ex_slock);
    171 	exflags = ex->ex_flags;
    172 	simple_unlock(&ex->ex_slock);
    173 
    174 	if (exflags & EXF_FIXED) {
    175 		struct extent_fixed *fex = (struct extent_fixed *)ex;
    176 
    177 		for (;;) {
    178 			simple_lock(&ex->ex_slock);
    179 			if ((rp = LIST_FIRST(&fex->fex_freelist)) != NULL) {
    180 				/*
    181 				 * Don't muck with flags after pulling it off
    182 				 * the freelist; it may have been dynamically
    183 				 * allocated, and kindly given to us.  We
    184 				 * need to remember that information.
    185 				 */
    186 				LIST_REMOVE(rp, er_link);
    187 				simple_unlock(&ex->ex_slock);
    188 				return (rp);
    189 			}
    190 			if (flags & EX_MALLOCOK) {
    191 				simple_unlock(&ex->ex_slock);
    192 				goto alloc;
    193 			}
    194 			if ((flags & EX_WAITOK) == 0) {
    195 				simple_unlock(&ex->ex_slock);
    196 				return (NULL);
    197 			}
    198 			ex->ex_flags |= EXF_FLWANTED;
    199 			if (ltsleep(&fex->fex_freelist,
    200 			    PNORELOCK| PRIBIO | ((flags & EX_CATCH) ? PCATCH : 0),
    201 			    "extnt", 0, &ex->ex_slock))
    202 				return (NULL);
    203 		}
    204 	}
    205 
    206  alloc:
    207 	s = splhigh();
    208 	if (expool_initialized == 0)
    209 		expool_init();
    210 	rp = pool_get(&expool, (flags & EX_WAITOK) ? PR_WAITOK : 0);
    211 	splx(s);
    212 
    213 	if (rp != NULL)
    214 		rp->er_flags = ER_ALLOC;
    215 
    216 	return (rp);
    217 }
    218 
    219 /*
    220  * Free an extent region descriptor.  EXTENT _MUST_ BE LOCKED!  This
    221  * is safe as we do not block here.
    222  */
    223 static void
    224 extent_free_region_descriptor(struct extent *ex, struct extent_region *rp)
    225 {
    226 	int s;
    227 
    228 	if (ex->ex_flags & EXF_FIXED) {
    229 		struct extent_fixed *fex = (struct extent_fixed *)ex;
    230 
    231 		/*
    232 		 * If someone's waiting for a region descriptor,
    233 		 * be nice and give them this one, rather than
    234 		 * just free'ing it back to the system.
    235 		 */
    236 		if (rp->er_flags & ER_ALLOC) {
    237 			if (ex->ex_flags & EXF_FLWANTED) {
    238 				/* Clear all but ER_ALLOC flag. */
    239 				rp->er_flags = ER_ALLOC;
    240 				LIST_INSERT_HEAD(&fex->fex_freelist, rp,
    241 				    er_link);
    242 				goto wake_em_up;
    243 			} else {
    244 				s = splhigh();
    245 				pool_put(&expool, rp);
    246 				splx(s);
    247 			}
    248 		} else {
    249 			/* Clear all flags. */
    250 			rp->er_flags = 0;
    251 			LIST_INSERT_HEAD(&fex->fex_freelist, rp, er_link);
    252 		}
    253 
    254 		if (ex->ex_flags & EXF_FLWANTED) {
    255  wake_em_up:
    256 			ex->ex_flags &= ~EXF_FLWANTED;
    257 			wakeup(&fex->fex_freelist);
    258 		}
    259 		return;
    260 	}
    261 
    262 	/*
    263 	 * We know it's dynamically allocated if we get here.
    264 	 */
    265 	s = splhigh();
    266 	pool_put(&expool, rp);
    267 	splx(s);
    268 }
    269 
    270 /*
    271  * Allocate and initialize an extent map.
    272  */
    273 struct extent *
    274 extent_create(const char *name, u_long start, u_long end,
    275     struct malloc_type *mtype, void *storage, size_t storagesize, int flags)
    276 {
    277 	struct extent *ex;
    278 	char *cp = storage;
    279 	size_t sz = storagesize;
    280 	struct extent_region *rp;
    281 	int fixed_extent = (storage != NULL);
    282 	int s;
    283 
    284 #ifdef DIAGNOSTIC
    285 	/* Check arguments. */
    286 	if (name == NULL)
    287 		panic("extent_create: name == NULL");
    288 	if (end < start) {
    289 		printf("extent_create: extent `%s', start 0x%lx, end 0x%lx\n",
    290 		    name, start, end);
    291 		panic("extent_create: end < start");
    292 	}
    293 	if (fixed_extent && (storagesize < sizeof(struct extent_fixed)))
    294 		panic("extent_create: fixed extent, bad storagesize 0x%lx",
    295 		    (u_long)storagesize);
    296 	if (fixed_extent == 0 && (storagesize != 0 || storage != NULL))
    297 		panic("extent_create: storage provided for non-fixed");
    298 #endif
    299 
    300 	/* Allocate extent descriptor. */
    301 	if (fixed_extent) {
    302 		struct extent_fixed *fex;
    303 
    304 		memset(storage, 0, storagesize);
    305 
    306 		/*
    307 		 * Align all descriptors on "long" boundaries.
    308 		 */
    309 		fex = (struct extent_fixed *)cp;
    310 		ex = (struct extent *)fex;
    311 		cp += ALIGN(sizeof(struct extent_fixed));
    312 		sz -= ALIGN(sizeof(struct extent_fixed));
    313 		fex->fex_storage = storage;
    314 		fex->fex_storagesize = storagesize;
    315 
    316 		/*
    317 		 * In a fixed extent, we have to pre-allocate region
    318 		 * descriptors and place them in the extent's freelist.
    319 		 */
    320 		LIST_INIT(&fex->fex_freelist);
    321 		while (sz >= ALIGN(sizeof(struct extent_region))) {
    322 			rp = (struct extent_region *)cp;
    323 			cp += ALIGN(sizeof(struct extent_region));
    324 			sz -= ALIGN(sizeof(struct extent_region));
    325 			LIST_INSERT_HEAD(&fex->fex_freelist, rp, er_link);
    326 		}
    327 	} else {
    328 		s = splhigh();
    329 		if (expool_initialized == 0)
    330 			expool_init();
    331 		splx(s);
    332 
    333 		ex = (struct extent *)malloc(sizeof(struct extent),
    334 		    mtype, (flags & EX_WAITOK) ? M_WAITOK : M_NOWAIT);
    335 		if (ex == NULL)
    336 			return (NULL);
    337 	}
    338 
    339 	/* Fill in the extent descriptor and return it to the caller. */
    340 	simple_lock_init(&ex->ex_slock);
    341 	LIST_INIT(&ex->ex_regions);
    342 	ex->ex_name = name;
    343 	ex->ex_start = start;
    344 	ex->ex_end = end;
    345 	ex->ex_mtype = mtype;
    346 	ex->ex_flags = 0;
    347 	if (fixed_extent)
    348 		ex->ex_flags |= EXF_FIXED;
    349 	if (flags & EX_NOCOALESCE)
    350 		ex->ex_flags |= EXF_NOCOALESCE;
    351 	return (ex);
    352 }
    353 
    354 /*
    355  * Destroy an extent map.
    356  * Since we're freeing the data, there can't be any references
    357  * so we don't need any locking.
    358  */
    359 void
    360 extent_destroy(struct extent *ex)
    361 {
    362 	struct extent_region *rp, *orp;
    363 
    364 #ifdef DIAGNOSTIC
    365 	/* Check arguments. */
    366 	if (ex == NULL)
    367 		panic("extent_destroy: NULL extent");
    368 #endif
    369 
    370 	/* Free all region descriptors in extent. */
    371 	for (rp = LIST_FIRST(&ex->ex_regions); rp != NULL; ) {
    372 		orp = rp;
    373 		rp = LIST_NEXT(rp, er_link);
    374 		LIST_REMOVE(orp, er_link);
    375 		extent_free_region_descriptor(ex, orp);
    376 	}
    377 
    378 	/* If we're not a fixed extent, free the extent descriptor itself. */
    379 	if ((ex->ex_flags & EXF_FIXED) == 0)
    380 		free(ex, ex->ex_mtype);
    381 }
    382 
    383 /*
    384  * Insert a region descriptor into the sorted region list after the
    385  * entry "after" or at the head of the list (if "after" is NULL).
    386  * The region descriptor we insert is passed in "rp".  We must
    387  * allocate the region descriptor before calling this function!
    388  * If we don't need the region descriptor, it will be freed here.
    389  */
    390 static void
    391 extent_insert_and_optimize(struct extent *ex, u_long start, u_long size,
    392     int flags, struct extent_region *after, struct extent_region *rp)
    393 {
    394 	struct extent_region *nextr;
    395 	int appended = 0;
    396 
    397 	if (after == NULL) {
    398 		/*
    399 		 * We're the first in the region list.  If there's
    400 		 * a region after us, attempt to coalesce to save
    401 		 * descriptor overhead.
    402 		 */
    403 		if (((ex->ex_flags & EXF_NOCOALESCE) == 0) &&
    404 		    (LIST_FIRST(&ex->ex_regions) != NULL) &&
    405 		    ((start + size) == LIST_FIRST(&ex->ex_regions)->er_start)) {
    406 			/*
    407 			 * We can coalesce.  Prepend us to the first region.
    408 			 */
    409 			LIST_FIRST(&ex->ex_regions)->er_start = start;
    410 			extent_free_region_descriptor(ex, rp);
    411 			return;
    412 		}
    413 
    414 		/*
    415 		 * Can't coalesce.  Fill in the region descriptor
    416 		 * in, and insert us at the head of the region list.
    417 		 */
    418 		rp->er_start = start;
    419 		rp->er_end = start + (size - 1);
    420 		LIST_INSERT_HEAD(&ex->ex_regions, rp, er_link);
    421 		return;
    422 	}
    423 
    424 	/*
    425 	 * If EXF_NOCOALESCE is set, coalescing is disallowed.
    426 	 */
    427 	if (ex->ex_flags & EXF_NOCOALESCE)
    428 		goto cant_coalesce;
    429 
    430 	/*
    431 	 * Attempt to coalesce with the region before us.
    432 	 */
    433 	if ((after->er_end + 1) == start) {
    434 		/*
    435 		 * We can coalesce.  Append ourselves and make
    436 		 * note of it.
    437 		 */
    438 		after->er_end = start + (size - 1);
    439 		appended = 1;
    440 	}
    441 
    442 	/*
    443 	 * Attempt to coalesce with the region after us.
    444 	 */
    445 	if ((LIST_NEXT(after, er_link) != NULL) &&
    446 	    ((start + size) == LIST_NEXT(after, er_link)->er_start)) {
    447 		/*
    448 		 * We can coalesce.  Note that if we appended ourselves
    449 		 * to the previous region, we exactly fit the gap, and
    450 		 * can free the "next" region descriptor.
    451 		 */
    452 		if (appended) {
    453 			/*
    454 			 * Yup, we can free it up.
    455 			 */
    456 			after->er_end = LIST_NEXT(after, er_link)->er_end;
    457 			nextr = LIST_NEXT(after, er_link);
    458 			LIST_REMOVE(nextr, er_link);
    459 			extent_free_region_descriptor(ex, nextr);
    460 		} else {
    461 			/*
    462 			 * Nope, just prepend us to the next region.
    463 			 */
    464 			LIST_NEXT(after, er_link)->er_start = start;
    465 		}
    466 
    467 		extent_free_region_descriptor(ex, rp);
    468 		return;
    469 	}
    470 
    471 	/*
    472 	 * We weren't able to coalesce with the next region, but
    473 	 * we don't need to allocate a region descriptor if we
    474 	 * appended ourselves to the previous region.
    475 	 */
    476 	if (appended) {
    477 		extent_free_region_descriptor(ex, rp);
    478 		return;
    479 	}
    480 
    481  cant_coalesce:
    482 
    483 	/*
    484 	 * Fill in the region descriptor and insert ourselves
    485 	 * into the region list.
    486 	 */
    487 	rp->er_start = start;
    488 	rp->er_end = start + (size - 1);
    489 	LIST_INSERT_AFTER(after, rp, er_link);
    490 }
    491 
    492 /*
    493  * Allocate a specific region in an extent map.
    494  */
    495 int
    496 extent_alloc_region(struct extent *ex, u_long start, u_long size, int flags)
    497 {
    498 	struct extent_region *rp, *last, *myrp;
    499 	u_long end = start + (size - 1);
    500 	int error;
    501 
    502 #ifdef DIAGNOSTIC
    503 	/* Check arguments. */
    504 	if (ex == NULL)
    505 		panic("extent_alloc_region: NULL extent");
    506 	if (size < 1) {
    507 		printf("extent_alloc_region: extent `%s', size 0x%lx\n",
    508 		    ex->ex_name, size);
    509 		panic("extent_alloc_region: bad size");
    510 	}
    511 	if (end < start) {
    512 		printf(
    513 		 "extent_alloc_region: extent `%s', start 0x%lx, size 0x%lx\n",
    514 		 ex->ex_name, start, size);
    515 		panic("extent_alloc_region: overflow");
    516 	}
    517 #endif
    518 #ifdef LOCKDEBUG
    519 	if (flags & EX_WAITSPACE)
    520 		ASSERT_SLEEPABLE(NULL, "extent_alloc_region(EX_WAITSPACE)");
    521 #endif
    522 
    523 	/*
    524 	 * Make sure the requested region lies within the
    525 	 * extent.
    526 	 *
    527 	 * We don't lock to check the range, because those values
    528 	 * are never modified, and if another thread deletes the
    529 	 * extent, we're screwed anyway.
    530 	 */
    531 	if ((start < ex->ex_start) || (end > ex->ex_end)) {
    532 #ifdef DIAGNOSTIC
    533 		printf("extent_alloc_region: extent `%s' (0x%lx - 0x%lx)\n",
    534 		    ex->ex_name, ex->ex_start, ex->ex_end);
    535 		printf("extent_alloc_region: start 0x%lx, end 0x%lx\n",
    536 		    start, end);
    537 		panic("extent_alloc_region: region lies outside extent");
    538 #else
    539 		return (EINVAL);
    540 #endif
    541 	}
    542 
    543 	/*
    544 	 * Allocate the region descriptor.  It will be freed later
    545 	 * if we can coalesce with another region.  Don't lock before
    546 	 * here!  This could block.
    547 	 */
    548 	myrp = extent_alloc_region_descriptor(ex, flags);
    549 	if (myrp == NULL) {
    550 #ifdef DIAGNOSTIC
    551 		printf(
    552 		    "extent_alloc_region: can't allocate region descriptor\n");
    553 #endif
    554 		return (ENOMEM);
    555 	}
    556 
    557  alloc_start:
    558 	simple_lock(&ex->ex_slock);
    559 
    560 	/*
    561 	 * Attempt to place ourselves in the desired area of the
    562 	 * extent.  We save ourselves some work by keeping the list sorted.
    563 	 * In other words, if the start of the current region is greater
    564 	 * than the end of our region, we don't have to search any further.
    565 	 */
    566 
    567 	/*
    568 	 * Keep a pointer to the last region we looked at so
    569 	 * that we don't have to traverse the list again when
    570 	 * we insert ourselves.  If "last" is NULL when we
    571 	 * finally insert ourselves, we go at the head of the
    572 	 * list.  See extent_insert_and_optimize() for details.
    573 	 */
    574 	last = NULL;
    575 
    576 	LIST_FOREACH(rp, &ex->ex_regions, er_link) {
    577 		if (rp->er_start > end) {
    578 			/*
    579 			 * We lie before this region and don't
    580 			 * conflict.
    581 			 */
    582 			break;
    583 		}
    584 
    585 		/*
    586 		 * The current region begins before we end.
    587 		 * Check for a conflict.
    588 		 */
    589 		if (rp->er_end >= start) {
    590 			/*
    591 			 * We conflict.  If we can (and want to) wait,
    592 			 * do so.
    593 			 */
    594 			if (flags & EX_WAITSPACE) {
    595 				ex->ex_flags |= EXF_WANTED;
    596 				error = ltsleep(ex,
    597 				    PNORELOCK | PRIBIO | ((flags & EX_CATCH) ? PCATCH : 0),
    598 				    "extnt", 0, &ex->ex_slock);
    599 				if (error == 0)
    600 					goto alloc_start;
    601 			} else {
    602 				simple_unlock(&ex->ex_slock);
    603 				error = EAGAIN;
    604 			}
    605 			extent_free_region_descriptor(ex, myrp);
    606 			return error;
    607 		}
    608 		/*
    609 		 * We don't conflict, but this region lies before
    610 		 * us.  Keep a pointer to this region, and keep
    611 		 * trying.
    612 		 */
    613 		last = rp;
    614 	}
    615 
    616 	/*
    617 	 * We don't conflict with any regions.  "last" points
    618 	 * to the region we fall after, or is NULL if we belong
    619 	 * at the beginning of the region list.  Insert ourselves.
    620 	 */
    621 	extent_insert_and_optimize(ex, start, size, flags, last, myrp);
    622 	simple_unlock(&ex->ex_slock);
    623 	return (0);
    624 }
    625 
    626 /*
    627  * Macro to check (x + y) <= z.  This check is designed to fail
    628  * if an overflow occurs.
    629  */
    630 #define LE_OV(x, y, z)	((((x) + (y)) >= (x)) && (((x) + (y)) <= (z)))
    631 
    632 /*
    633  * Allocate a region in an extent map subregion.
    634  *
    635  * If EX_FAST is specified, we return the first fit in the map.
    636  * Otherwise, we try to minimize fragmentation by finding the
    637  * smallest gap that will hold the request.
    638  *
    639  * The allocated region is aligned to "alignment", which must be
    640  * a power of 2.
    641  */
    642 int
    643 extent_alloc_subregion1(struct extent *ex, u_long substart, u_long subend,
    644     u_long size, u_long alignment, u_long skew, u_long boundary,
    645     int flags, u_long *result)
    646 {
    647 	struct extent_region *rp, *myrp, *last, *bestlast;
    648 	u_long newstart, newend, exend, beststart, bestovh, ovh;
    649 	u_long dontcross;
    650 	int error;
    651 
    652 #ifdef DIAGNOSTIC
    653 	/*
    654 	 * Check arguments.
    655 	 *
    656 	 * We don't lock to check these, because these values
    657 	 * are never modified, and if another thread deletes the
    658 	 * extent, we're screwed anyway.
    659 	 */
    660 	if (ex == NULL)
    661 		panic("extent_alloc_subregion: NULL extent");
    662 	if (result == NULL)
    663 		panic("extent_alloc_subregion: NULL result pointer");
    664 	if ((substart < ex->ex_start) || (substart > ex->ex_end) ||
    665 	    (subend > ex->ex_end) || (subend < ex->ex_start)) {
    666   printf("extent_alloc_subregion: extent `%s', ex_start 0x%lx, ex_end 0x%lx\n",
    667 		    ex->ex_name, ex->ex_start, ex->ex_end);
    668 		printf("extent_alloc_subregion: substart 0x%lx, subend 0x%lx\n",
    669 		    substart, subend);
    670 		panic("extent_alloc_subregion: bad subregion");
    671 	}
    672 	if ((size < 1) || ((size - 1) > (subend - substart))) {
    673 		printf("extent_alloc_subregion: extent `%s', size 0x%lx\n",
    674 		    ex->ex_name, size);
    675 		panic("extent_alloc_subregion: bad size");
    676 	}
    677 	if (alignment == 0)
    678 		panic("extent_alloc_subregion: bad alignment");
    679 	if (boundary && (boundary < size)) {
    680 		printf(
    681 		    "extent_alloc_subregion: extent `%s', size 0x%lx, "
    682 		    "boundary 0x%lx\n", ex->ex_name, size, boundary);
    683 		panic("extent_alloc_subregion: bad boundary");
    684 	}
    685 #endif
    686 #ifdef LOCKDEBUG
    687 	if (flags & EX_WAITSPACE)
    688 		ASSERT_SLEEPABLE(NULL, "extent_alloc_subregion1(EX_WAITSPACE)");
    689 #endif
    690 
    691 	/*
    692 	 * Allocate the region descriptor.  It will be freed later
    693 	 * if we can coalesce with another region.  Don't lock before
    694 	 * here!  This could block.
    695 	 */
    696 	myrp = extent_alloc_region_descriptor(ex, flags);
    697 	if (myrp == NULL) {
    698 #ifdef DIAGNOSTIC
    699 		printf(
    700 		 "extent_alloc_subregion: can't allocate region descriptor\n");
    701 #endif
    702 		return (ENOMEM);
    703 	}
    704 
    705  alloc_start:
    706 	simple_lock(&ex->ex_slock);
    707 
    708 	/*
    709 	 * Keep a pointer to the last region we looked at so
    710 	 * that we don't have to traverse the list again when
    711 	 * we insert ourselves.  If "last" is NULL when we
    712 	 * finally insert ourselves, we go at the head of the
    713 	 * list.  See extent_insert_and_optimize() for deatails.
    714 	 */
    715 	last = NULL;
    716 
    717 	/*
    718 	 * Keep track of size and location of the smallest
    719 	 * chunk we fit in.
    720 	 *
    721 	 * Since the extent can be as large as the numeric range
    722 	 * of the CPU (0 - 0xffffffff for 32-bit systems), the
    723 	 * best overhead value can be the maximum unsigned integer.
    724 	 * Thus, we initialize "bestovh" to 0, since we insert ourselves
    725 	 * into the region list immediately on an exact match (which
    726 	 * is the only case where "bestovh" would be set to 0).
    727 	 */
    728 	bestovh = 0;
    729 	beststart = 0;
    730 	bestlast = NULL;
    731 
    732 	/*
    733 	 * Keep track of end of free region.  This is either the end of extent
    734 	 * or the start of a region past the subend.
    735 	 */
    736 	exend = ex->ex_end;
    737 
    738 	/*
    739 	 * For N allocated regions, we must make (N + 1)
    740 	 * checks for unallocated space.  The first chunk we
    741 	 * check is the area from the beginning of the subregion
    742 	 * to the first allocated region after that point.
    743 	 */
    744 	newstart = EXTENT_ALIGN(substart, alignment, skew);
    745 	if (newstart < ex->ex_start) {
    746 #ifdef DIAGNOSTIC
    747 		printf(
    748       "extent_alloc_subregion: extent `%s' (0x%lx - 0x%lx), alignment 0x%lx\n",
    749 		 ex->ex_name, ex->ex_start, ex->ex_end, alignment);
    750 		simple_unlock(&ex->ex_slock);
    751 		panic("extent_alloc_subregion: overflow after alignment");
    752 #else
    753 		extent_free_region_descriptor(ex, myrp);
    754 		simple_unlock(&ex->ex_slock);
    755 		return (EINVAL);
    756 #endif
    757 	}
    758 
    759 	/*
    760 	 * Find the first allocated region that begins on or after
    761 	 * the subregion start, advancing the "last" pointer along
    762 	 * the way.
    763 	 */
    764 	LIST_FOREACH(rp, &ex->ex_regions, er_link) {
    765 		if (rp->er_start >= newstart)
    766 			break;
    767 		last = rp;
    768 	}
    769 
    770 	/*
    771 	 * Relocate the start of our candidate region to the end of
    772 	 * the last allocated region (if there was one overlapping
    773 	 * our subrange).
    774 	 */
    775 	if (last != NULL && last->er_end >= newstart)
    776 		newstart = EXTENT_ALIGN((last->er_end + 1), alignment, skew);
    777 
    778 	for (; rp != NULL; rp = LIST_NEXT(rp, er_link)) {
    779 		/*
    780 		 * If the region pasts the subend, bail out and see
    781 		 * if we fit against the subend.
    782 		 */
    783 		if (rp->er_start > subend) {
    784 			exend = rp->er_start;
    785 			break;
    786 		}
    787 
    788 		/*
    789 		 * Check the chunk before "rp".  Note that our
    790 		 * comparison is safe from overflow conditions.
    791 		 */
    792 		if (LE_OV(newstart, size, rp->er_start)) {
    793 			/*
    794 			 * Do a boundary check, if necessary.  Note
    795 			 * that a region may *begin* on the boundary,
    796 			 * but it must end before the boundary.
    797 			 */
    798 			if (boundary) {
    799 				newend = newstart + (size - 1);
    800 
    801 				/*
    802 				 * Calculate the next boundary after the start
    803 				 * of this region.
    804 				 */
    805 				dontcross = EXTENT_ALIGN(newstart+1, boundary,
    806 				    (flags & EX_BOUNDZERO) ? 0 : ex->ex_start)
    807 				    - 1;
    808 
    809 #if 0
    810 				printf("newstart=%lx newend=%lx ex_start=%lx ex_end=%lx boundary=%lx dontcross=%lx\n",
    811 				    newstart, newend, ex->ex_start, ex->ex_end,
    812 				    boundary, dontcross);
    813 #endif
    814 
    815 				/* Check for overflow */
    816 				if (dontcross < ex->ex_start)
    817 					dontcross = ex->ex_end;
    818 				else if (newend > dontcross) {
    819 					/*
    820 					 * Candidate region crosses boundary.
    821 					 * Throw away the leading part and see
    822 					 * if we still fit.
    823 					 */
    824 					newstart = dontcross + 1;
    825 					newend = newstart + (size - 1);
    826 					dontcross += boundary;
    827 					if (!LE_OV(newstart, size, rp->er_start))
    828 						goto skip;
    829 				}
    830 
    831 				/*
    832 				 * If we run past the end of
    833 				 * the extent or the boundary
    834 				 * overflows, then the request
    835 				 * can't fit.
    836 				 */
    837 				if (newstart + size - 1 > ex->ex_end ||
    838 				    dontcross < newstart)
    839 					goto fail;
    840 			}
    841 
    842 			/*
    843 			 * We would fit into this space.  Calculate
    844 			 * the overhead (wasted space).  If we exactly
    845 			 * fit, or we're taking the first fit, insert
    846 			 * ourselves into the region list.
    847 			 */
    848 			ovh = rp->er_start - newstart - size;
    849 			if ((flags & EX_FAST) || (ovh == 0))
    850 				goto found;
    851 
    852 			/*
    853 			 * Don't exactly fit, but check to see
    854 			 * if we're better than any current choice.
    855 			 */
    856 			if ((bestovh == 0) || (ovh < bestovh)) {
    857 				bestovh = ovh;
    858 				beststart = newstart;
    859 				bestlast = last;
    860 			}
    861 		}
    862 
    863 skip:
    864 		/*
    865 		 * Skip past the current region and check again.
    866 		 */
    867 		newstart = EXTENT_ALIGN((rp->er_end + 1), alignment, skew);
    868 		if (newstart < rp->er_end) {
    869 			/*
    870 			 * Overflow condition.  Don't error out, since
    871 			 * we might have a chunk of space that we can
    872 			 * use.
    873 			 */
    874 			goto fail;
    875 		}
    876 
    877 		last = rp;
    878 	}
    879 
    880 	/*
    881 	 * The final check is from the current starting point to the
    882 	 * end of the subregion.  If there were no allocated regions,
    883 	 * "newstart" is set to the beginning of the subregion, or
    884 	 * just past the end of the last allocated region, adjusted
    885 	 * for alignment in either case.
    886 	 */
    887 	if (LE_OV(newstart, (size - 1), subend)) {
    888 		/*
    889 		 * Do a boundary check, if necessary.  Note
    890 		 * that a region may *begin* on the boundary,
    891 		 * but it must end before the boundary.
    892 		 */
    893 		if (boundary) {
    894 			newend = newstart + (size - 1);
    895 
    896 			/*
    897 			 * Calculate the next boundary after the start
    898 			 * of this region.
    899 			 */
    900 			dontcross = EXTENT_ALIGN(newstart+1, boundary,
    901 			    (flags & EX_BOUNDZERO) ? 0 : ex->ex_start)
    902 			    - 1;
    903 
    904 #if 0
    905 			printf("newstart=%lx newend=%lx ex_start=%lx ex_end=%lx boundary=%lx dontcross=%lx\n",
    906 			    newstart, newend, ex->ex_start, ex->ex_end,
    907 			    boundary, dontcross);
    908 #endif
    909 
    910 			/* Check for overflow */
    911 			if (dontcross < ex->ex_start)
    912 				dontcross = ex->ex_end;
    913 			else if (newend > dontcross) {
    914 				/*
    915 				 * Candidate region crosses boundary.
    916 				 * Throw away the leading part and see
    917 				 * if we still fit.
    918 				 */
    919 				newstart = dontcross + 1;
    920 				newend = newstart + (size - 1);
    921 				dontcross += boundary;
    922 				if (!LE_OV(newstart, (size - 1), subend))
    923 					goto fail;
    924 			}
    925 
    926 			/*
    927 			 * If we run past the end of
    928 			 * the extent or the boundary
    929 			 * overflows, then the request
    930 			 * can't fit.
    931 			 */
    932 			if (newstart + size - 1 > ex->ex_end ||
    933 			    dontcross < newstart)
    934 				goto fail;
    935 		}
    936 
    937 		/*
    938 		 * We would fit into this space.  Calculate
    939 		 * the overhead (wasted space).  If we exactly
    940 		 * fit, or we're taking the first fit, insert
    941 		 * ourselves into the region list.
    942 		 */
    943 		ovh = exend - newstart - (size - 1);
    944 		if ((flags & EX_FAST) || (ovh == 0))
    945 			goto found;
    946 
    947 		/*
    948 		 * Don't exactly fit, but check to see
    949 		 * if we're better than any current choice.
    950 		 */
    951 		if ((bestovh == 0) || (ovh < bestovh)) {
    952 			bestovh = ovh;
    953 			beststart = newstart;
    954 			bestlast = last;
    955 		}
    956 	}
    957 
    958  fail:
    959 	/*
    960 	 * One of the following two conditions have
    961 	 * occurred:
    962 	 *
    963 	 *	There is no chunk large enough to hold the request.
    964 	 *
    965 	 *	If EX_FAST was not specified, there is not an
    966 	 *	exact match for the request.
    967 	 *
    968 	 * Note that if we reach this point and EX_FAST is
    969 	 * set, then we know there is no space in the extent for
    970 	 * the request.
    971 	 */
    972 	if (((flags & EX_FAST) == 0) && (bestovh != 0)) {
    973 		/*
    974 		 * We have a match that's "good enough".
    975 		 */
    976 		newstart = beststart;
    977 		last = bestlast;
    978 		goto found;
    979 	}
    980 
    981 	/*
    982 	 * No space currently available.  Wait for it to free up,
    983 	 * if possible.
    984 	 */
    985 	if (flags & EX_WAITSPACE) {
    986 		ex->ex_flags |= EXF_WANTED;
    987 		error = ltsleep(ex,
    988 		    PNORELOCK | PRIBIO | ((flags & EX_CATCH) ? PCATCH : 0),
    989 		    "extnt", 0, &ex->ex_slock);
    990 		if (error == 0)
    991 			goto alloc_start;
    992 	} else {
    993 		simple_unlock(&ex->ex_slock);
    994 		error = EAGAIN;
    995 	}
    996 
    997 	extent_free_region_descriptor(ex, myrp);
    998 	return error;
    999 
   1000  found:
   1001 	/*
   1002 	 * Insert ourselves into the region list.
   1003 	 */
   1004 	extent_insert_and_optimize(ex, newstart, size, flags, last, myrp);
   1005 	simple_unlock(&ex->ex_slock);
   1006 	*result = newstart;
   1007 	return (0);
   1008 }
   1009 
   1010 int
   1011 extent_alloc_subregion(struct extent *ex, u_long start, u_long end, u_long size,
   1012     u_long alignment, u_long boundary, int flags, u_long *result)
   1013 {
   1014 
   1015 	return (extent_alloc_subregion1(ex, start, end, size, alignment,
   1016 					0, boundary, flags, result));
   1017 }
   1018 
   1019 int
   1020 extent_alloc(struct extent *ex, u_long size, u_long alignment, u_long boundary,
   1021     int flags, u_long *result)
   1022 {
   1023 
   1024 	return (extent_alloc_subregion1(ex, ex->ex_start, ex->ex_end,
   1025 					size, alignment, 0, boundary,
   1026 					flags, result));
   1027 }
   1028 
   1029 int
   1030 extent_alloc1(struct extent *ex, u_long size, u_long alignment, u_long skew,
   1031     u_long boundary, int flags, u_long *result)
   1032 {
   1033 
   1034 	return (extent_alloc_subregion1(ex, ex->ex_start, ex->ex_end,
   1035 					size, alignment, skew, boundary,
   1036 					flags, result));
   1037 }
   1038 
   1039 int
   1040 extent_free(struct extent *ex, u_long start, u_long size, int flags)
   1041 {
   1042 	struct extent_region *rp, *nrp = NULL;
   1043 	u_long end = start + (size - 1);
   1044 	int coalesce;
   1045 
   1046 #ifdef DIAGNOSTIC
   1047 	/*
   1048 	 * Check arguments.
   1049 	 *
   1050 	 * We don't lock to check these, because these values
   1051 	 * are never modified, and if another thread deletes the
   1052 	 * extent, we're screwed anyway.
   1053 	 */
   1054 	if (ex == NULL)
   1055 		panic("extent_free: NULL extent");
   1056 	if ((start < ex->ex_start) || (end > ex->ex_end)) {
   1057 		extent_print(ex);
   1058 		printf("extent_free: extent `%s', start 0x%lx, size 0x%lx\n",
   1059 		    ex->ex_name, start, size);
   1060 		panic("extent_free: extent `%s', region not within extent",
   1061 		    ex->ex_name);
   1062 	}
   1063 	/* Check for an overflow. */
   1064 	if (end < start) {
   1065 		extent_print(ex);
   1066 		printf("extent_free: extent `%s', start 0x%lx, size 0x%lx\n",
   1067 		    ex->ex_name, start, size);
   1068 		panic("extent_free: overflow");
   1069 	}
   1070 #endif
   1071 
   1072 	/*
   1073 	 * If we're allowing coalescing, we must allocate a region
   1074 	 * descriptor now, since it might block.
   1075 	 *
   1076 	 * XXX Make a static, create-time flags word, so we don't
   1077 	 * XXX have to lock to read it!
   1078 	 */
   1079 	simple_lock(&ex->ex_slock);
   1080 	coalesce = (ex->ex_flags & EXF_NOCOALESCE) == 0;
   1081 	simple_unlock(&ex->ex_slock);
   1082 
   1083 	if (coalesce) {
   1084 		/* Allocate a region descriptor. */
   1085 		nrp = extent_alloc_region_descriptor(ex, flags);
   1086 		if (nrp == NULL)
   1087 			return (ENOMEM);
   1088 	}
   1089 
   1090 	simple_lock(&ex->ex_slock);
   1091 
   1092 	/*
   1093 	 * Find region and deallocate.  Several possibilities:
   1094 	 *
   1095 	 *	1. (start == er_start) && (end == er_end):
   1096 	 *	   Free descriptor.
   1097 	 *
   1098 	 *	2. (start == er_start) && (end < er_end):
   1099 	 *	   Adjust er_start.
   1100 	 *
   1101 	 *	3. (start > er_start) && (end == er_end):
   1102 	 *	   Adjust er_end.
   1103 	 *
   1104 	 *	4. (start > er_start) && (end < er_end):
   1105 	 *	   Fragment region.  Requires descriptor alloc.
   1106 	 *
   1107 	 * Cases 2, 3, and 4 require that the EXF_NOCOALESCE flag
   1108 	 * is not set.
   1109 	 */
   1110 	LIST_FOREACH(rp, &ex->ex_regions, er_link) {
   1111 		/*
   1112 		 * Save ourselves some comparisons; does the current
   1113 		 * region end before chunk to be freed begins?  If so,
   1114 		 * then we haven't found the appropriate region descriptor.
   1115 		 */
   1116 		if (rp->er_end < start)
   1117 			continue;
   1118 
   1119 		/*
   1120 		 * Save ourselves some traversal; does the current
   1121 		 * region begin after the chunk to be freed ends?  If so,
   1122 		 * then we've already passed any possible region descriptors
   1123 		 * that might have contained the chunk to be freed.
   1124 		 */
   1125 		if (rp->er_start > end)
   1126 			break;
   1127 
   1128 		/* Case 1. */
   1129 		if ((start == rp->er_start) && (end == rp->er_end)) {
   1130 			LIST_REMOVE(rp, er_link);
   1131 			extent_free_region_descriptor(ex, rp);
   1132 			goto done;
   1133 		}
   1134 
   1135 		/*
   1136 		 * The following cases all require that EXF_NOCOALESCE
   1137 		 * is not set.
   1138 		 */
   1139 		if (!coalesce)
   1140 			continue;
   1141 
   1142 		/* Case 2. */
   1143 		if ((start == rp->er_start) && (end < rp->er_end)) {
   1144 			rp->er_start = (end + 1);
   1145 			goto done;
   1146 		}
   1147 
   1148 		/* Case 3. */
   1149 		if ((start > rp->er_start) && (end == rp->er_end)) {
   1150 			rp->er_end = (start - 1);
   1151 			goto done;
   1152 		}
   1153 
   1154 		/* Case 4. */
   1155 		if ((start > rp->er_start) && (end < rp->er_end)) {
   1156 			/* Fill in new descriptor. */
   1157 			nrp->er_start = end + 1;
   1158 			nrp->er_end = rp->er_end;
   1159 
   1160 			/* Adjust current descriptor. */
   1161 			rp->er_end = start - 1;
   1162 
   1163 			/* Insert new descriptor after current. */
   1164 			LIST_INSERT_AFTER(rp, nrp, er_link);
   1165 
   1166 			/* We used the new descriptor, so don't free it below */
   1167 			nrp = NULL;
   1168 			goto done;
   1169 		}
   1170 	}
   1171 
   1172 	/* Region not found, or request otherwise invalid. */
   1173 	simple_unlock(&ex->ex_slock);
   1174 	extent_print(ex);
   1175 	printf("extent_free: start 0x%lx, end 0x%lx\n", start, end);
   1176 	panic("extent_free: region not found");
   1177 
   1178  done:
   1179 	if (nrp != NULL)
   1180 		extent_free_region_descriptor(ex, nrp);
   1181 	if (ex->ex_flags & EXF_WANTED) {
   1182 		ex->ex_flags &= ~EXF_WANTED;
   1183 		wakeup(ex);
   1184 	}
   1185 	simple_unlock(&ex->ex_slock);
   1186 	return (0);
   1187 }
   1188 
   1189 void
   1190 extent_print(struct extent *ex)
   1191 {
   1192 	struct extent_region *rp;
   1193 
   1194 	if (ex == NULL)
   1195 		panic("extent_print: NULL extent");
   1196 
   1197 	simple_lock(&ex->ex_slock);
   1198 
   1199 	printf("extent `%s' (0x%lx - 0x%lx), flags = 0x%x\n", ex->ex_name,
   1200 	    ex->ex_start, ex->ex_end, ex->ex_flags);
   1201 
   1202 	LIST_FOREACH(rp, &ex->ex_regions, er_link)
   1203 		printf("     0x%lx - 0x%lx\n", rp->er_start, rp->er_end);
   1204 
   1205 	simple_unlock(&ex->ex_slock);
   1206 }
   1207