Home | History | Annotate | Line # | Download | only in kern
subr_extent.c revision 1.62.2.2
      1 /*	$NetBSD: subr_extent.c,v 1.62.2.2 2007/03/13 17:50:57 ad Exp $	*/
      2 
      3 /*-
      4  * Copyright (c) 1996, 1998 The NetBSD Foundation, Inc.
      5  * All rights reserved.
      6  *
      7  * This code is derived from software contributed to The NetBSD Foundation
      8  * by Jason R. Thorpe and Matthias Drochner.
      9  *
     10  * Redistribution and use in source and binary forms, with or without
     11  * modification, are permitted provided that the following conditions
     12  * are met:
     13  * 1. Redistributions of source code must retain the above copyright
     14  *    notice, this list of conditions and the following disclaimer.
     15  * 2. Redistributions in binary form must reproduce the above copyright
     16  *    notice, this list of conditions and the following disclaimer in the
     17  *    documentation and/or other materials provided with the distribution.
     18  * 3. All advertising materials mentioning features or use of this software
     19  *    must display the following acknowledgement:
     20  *	This product includes software developed by the NetBSD
     21  *	Foundation, Inc. and its contributors.
     22  * 4. Neither the name of The NetBSD Foundation nor the names of its
     23  *    contributors may be used to endorse or promote products derived
     24  *    from this software without specific prior written permission.
     25  *
     26  * THIS SOFTWARE IS PROVIDED BY THE NETBSD FOUNDATION, INC. AND CONTRIBUTORS
     27  * ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED
     28  * TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
     29  * PURPOSE ARE DISCLAIMED.  IN NO EVENT SHALL THE FOUNDATION OR CONTRIBUTORS
     30  * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
     31  * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
     32  * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
     33  * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
     34  * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
     35  * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
     36  * POSSIBILITY OF SUCH DAMAGE.
     37  */
     38 
     39 /*
     40  * General purpose extent manager.
     41  */
     42 
     43 #include <sys/cdefs.h>
     44 __KERNEL_RCSID(0, "$NetBSD: subr_extent.c,v 1.62.2.2 2007/03/13 17:50:57 ad Exp $");
     45 
     46 #ifdef _KERNEL
     47 #include "opt_lockdebug.h"
     48 
     49 #include <sys/param.h>
     50 #include <sys/extent.h>
     51 #include <sys/malloc.h>
     52 #include <sys/pool.h>
     53 #include <sys/time.h>
     54 #include <sys/systm.h>
     55 #include <sys/proc.h>
     56 #include <sys/lock.h>
     57 
     58 #include <uvm/uvm_extern.h>
     59 
     60 #define	KMEM_IS_RUNNING		(kmem_map != NULL)
     61 #elif defined(_EXTENT_TESTING)
     62 /*
     63  * user-land definitions, so it can fit into a testing harness.
     64  */
     65 #include <sys/param.h>
     66 #include <sys/pool.h>
     67 #include <sys/extent.h>
     68 #include <errno.h>
     69 #include <stdlib.h>
     70 #include <stdio.h>
     71 #include <string.h>
     72 
     73 /*
     74  * Use multi-line #defines to avoid screwing up the kernel tags file;
     75  * without this, ctags produces a tags file where panic() shows up
     76  * in subr_extent.c rather than subr_prf.c.
     77  */
     78 #define	\
     79 malloc(s, t, flags)		malloc(s)
     80 #define	\
     81 free(p, t)			free(p)
     82 #define	\
     83 tsleep(chan, pri, str, timo)	(EWOULDBLOCK)
     84 #define	\
     85 ltsleep(chan,pri,str,timo,lck)	(EWOULDBLOCK)
     86 #define	\
     87 wakeup(chan)			((void)0)
     88 #define	\
     89 pool_get(pool, flags)		malloc((pool)->pr_size,0,0)
     90 #define	\
     91 pool_put(pool, rp)		free(rp,0)
     92 #define	\
     93 panic(a)			printf(a)
     94 #define	\
     95 splhigh()			(1)
     96 #define	\
     97 splx(s)				((void)(s))
     98 
     99 #define	\
    100 simple_lock_init(l)		((void)(l))
    101 #define	\
    102 simple_lock(l)			((void)(l))
    103 #define	\
    104 simple_unlock(l)		((void)(l))
    105 #define	KMEM_IS_RUNNING			(1)
    106 #endif
    107 
    108 static struct pool expool;
    109 static struct simplelock expool_init_slock = SIMPLELOCK_INITIALIZER;
    110 static int expool_initialized;
    111 
    112 /*
    113  * Macro to align to an arbitrary power-of-two boundary.
    114  */
    115 #define EXTENT_ALIGN(_start, _align, _skew)		\
    116 	(((((_start) - (_skew)) + ((_align) - 1)) & (-(_align))) + (_skew))
    117 
    118 /*
    119  * Create the extent_region pool.
    120  * (This is deferred until one of our callers thinks we can malloc()).
    121  */
    122 
    123 static inline void
    124 expool_init(void)
    125 {
    126 
    127 	simple_lock(&expool_init_slock);
    128 	if (expool_initialized) {
    129 		simple_unlock(&expool_init_slock);
    130 		return;
    131 	}
    132 
    133 #if defined(_KERNEL)
    134 	pool_init(&expool, sizeof(struct extent_region), 0, 0, 0,
    135 	    "extent", NULL, IPL_VM);
    136 #else
    137 	expool.pr_size = sizeof(struct extent_region);
    138 #endif
    139 
    140 	expool_initialized = 1;
    141 	simple_unlock(&expool_init_slock);
    142 }
    143 
    144 /*
    145  * Allocate an extent region descriptor.  EXTENT MUST NOT BE LOCKED,
    146  * AS THIS FUNCTION MAY BLOCK!  We will handle any locking we may need.
    147  */
    148 static struct extent_region *
    149 extent_alloc_region_descriptor(struct extent *ex, int flags)
    150 {
    151 	struct extent_region *rp;
    152 	int exflags;
    153 
    154 	/*
    155 	 * If the kernel memory allocator is not yet running, we can't
    156 	 * use it (obviously).
    157 	 */
    158 	if (KMEM_IS_RUNNING == 0)
    159 		flags &= ~EX_MALLOCOK;
    160 
    161 	/*
    162 	 * XXX Make a static, create-time flags word, so we don't
    163 	 * XXX have to lock to read it!
    164 	 */
    165 	simple_lock(&ex->ex_slock);
    166 	exflags = ex->ex_flags;
    167 	simple_unlock(&ex->ex_slock);
    168 
    169 	if (exflags & EXF_FIXED) {
    170 		struct extent_fixed *fex = (struct extent_fixed *)ex;
    171 
    172 		for (;;) {
    173 			simple_lock(&ex->ex_slock);
    174 			if ((rp = LIST_FIRST(&fex->fex_freelist)) != NULL) {
    175 				/*
    176 				 * Don't muck with flags after pulling it off
    177 				 * the freelist; it may have been dynamically
    178 				 * allocated, and kindly given to us.  We
    179 				 * need to remember that information.
    180 				 */
    181 				LIST_REMOVE(rp, er_link);
    182 				simple_unlock(&ex->ex_slock);
    183 				return (rp);
    184 			}
    185 			if (flags & EX_MALLOCOK) {
    186 				simple_unlock(&ex->ex_slock);
    187 				goto alloc;
    188 			}
    189 			if ((flags & EX_WAITOK) == 0) {
    190 				simple_unlock(&ex->ex_slock);
    191 				return (NULL);
    192 			}
    193 			ex->ex_flags |= EXF_FLWANTED;
    194 			if (ltsleep(&fex->fex_freelist,
    195 			    PNORELOCK| PRIBIO | ((flags & EX_CATCH) ? PCATCH : 0),
    196 			    "extnt", 0, &ex->ex_slock))
    197 				return (NULL);
    198 		}
    199 	}
    200 
    201  alloc:
    202 	if (expool_initialized == 0)
    203 		expool_init();
    204 	rp = pool_get(&expool, (flags & EX_WAITOK) ? PR_WAITOK : 0);
    205 
    206 	if (rp != NULL)
    207 		rp->er_flags = ER_ALLOC;
    208 
    209 	return (rp);
    210 }
    211 
    212 /*
    213  * Free an extent region descriptor.  EXTENT _MUST_ BE LOCKED!  This
    214  * is safe as we do not block here.
    215  */
    216 static void
    217 extent_free_region_descriptor(struct extent *ex, struct extent_region *rp)
    218 {
    219 	int s;
    220 
    221 	if (ex->ex_flags & EXF_FIXED) {
    222 		struct extent_fixed *fex = (struct extent_fixed *)ex;
    223 
    224 		/*
    225 		 * If someone's waiting for a region descriptor,
    226 		 * be nice and give them this one, rather than
    227 		 * just free'ing it back to the system.
    228 		 */
    229 		if (rp->er_flags & ER_ALLOC) {
    230 			if (ex->ex_flags & EXF_FLWANTED) {
    231 				/* Clear all but ER_ALLOC flag. */
    232 				rp->er_flags = ER_ALLOC;
    233 				LIST_INSERT_HEAD(&fex->fex_freelist, rp,
    234 				    er_link);
    235 				goto wake_em_up;
    236 			} else {
    237 				s = splhigh();
    238 				pool_put(&expool, rp);
    239 				splx(s);
    240 			}
    241 		} else {
    242 			/* Clear all flags. */
    243 			rp->er_flags = 0;
    244 			LIST_INSERT_HEAD(&fex->fex_freelist, rp, er_link);
    245 		}
    246 
    247 		if (ex->ex_flags & EXF_FLWANTED) {
    248  wake_em_up:
    249 			ex->ex_flags &= ~EXF_FLWANTED;
    250 			wakeup(&fex->fex_freelist);
    251 		}
    252 		return;
    253 	}
    254 
    255 	/*
    256 	 * We know it's dynamically allocated if we get here.
    257 	 */
    258 	s = splhigh();
    259 	pool_put(&expool, rp);
    260 	splx(s);
    261 }
    262 
    263 /*
    264  * Allocate and initialize an extent map.
    265  */
    266 struct extent *
    267 extent_create(const char *name, u_long start, u_long end,
    268     struct malloc_type *mtype, void *storage, size_t storagesize, int flags)
    269 {
    270 	struct extent *ex;
    271 	char *cp = storage;
    272 	size_t sz = storagesize;
    273 	struct extent_region *rp;
    274 	int fixed_extent = (storage != NULL);
    275 
    276 #ifdef DIAGNOSTIC
    277 	/* Check arguments. */
    278 	if (name == NULL)
    279 		panic("extent_create: name == NULL");
    280 	if (end < start) {
    281 		printf("extent_create: extent `%s', start 0x%lx, end 0x%lx\n",
    282 		    name, start, end);
    283 		panic("extent_create: end < start");
    284 	}
    285 	if (fixed_extent && (storagesize < sizeof(struct extent_fixed)))
    286 		panic("extent_create: fixed extent, bad storagesize 0x%lx",
    287 		    (u_long)storagesize);
    288 	if (fixed_extent == 0 && (storagesize != 0 || storage != NULL))
    289 		panic("extent_create: storage provided for non-fixed");
    290 #endif
    291 
    292 	/* Allocate extent descriptor. */
    293 	if (fixed_extent) {
    294 		struct extent_fixed *fex;
    295 
    296 		memset(storage, 0, storagesize);
    297 
    298 		/*
    299 		 * Align all descriptors on "long" boundaries.
    300 		 */
    301 		fex = (struct extent_fixed *)cp;
    302 		ex = (struct extent *)fex;
    303 		cp += ALIGN(sizeof(struct extent_fixed));
    304 		sz -= ALIGN(sizeof(struct extent_fixed));
    305 		fex->fex_storage = storage;
    306 		fex->fex_storagesize = storagesize;
    307 
    308 		/*
    309 		 * In a fixed extent, we have to pre-allocate region
    310 		 * descriptors and place them in the extent's freelist.
    311 		 */
    312 		LIST_INIT(&fex->fex_freelist);
    313 		while (sz >= ALIGN(sizeof(struct extent_region))) {
    314 			rp = (struct extent_region *)cp;
    315 			cp += ALIGN(sizeof(struct extent_region));
    316 			sz -= ALIGN(sizeof(struct extent_region));
    317 			LIST_INSERT_HEAD(&fex->fex_freelist, rp, er_link);
    318 		}
    319 	} else {
    320 		if (expool_initialized == 0)
    321 			expool_init();
    322 
    323 		ex = (struct extent *)malloc(sizeof(struct extent),
    324 		    mtype, (flags & EX_WAITOK) ? M_WAITOK : M_NOWAIT);
    325 		if (ex == NULL)
    326 			return (NULL);
    327 	}
    328 
    329 	/* Fill in the extent descriptor and return it to the caller. */
    330 	simple_lock_init(&ex->ex_slock);
    331 	LIST_INIT(&ex->ex_regions);
    332 	ex->ex_name = name;
    333 	ex->ex_start = start;
    334 	ex->ex_end = end;
    335 	ex->ex_mtype = mtype;
    336 	ex->ex_flags = 0;
    337 	if (fixed_extent)
    338 		ex->ex_flags |= EXF_FIXED;
    339 	if (flags & EX_NOCOALESCE)
    340 		ex->ex_flags |= EXF_NOCOALESCE;
    341 	return (ex);
    342 }
    343 
    344 /*
    345  * Destroy an extent map.
    346  * Since we're freeing the data, there can't be any references
    347  * so we don't need any locking.
    348  */
    349 void
    350 extent_destroy(struct extent *ex)
    351 {
    352 	struct extent_region *rp, *orp;
    353 
    354 #ifdef DIAGNOSTIC
    355 	/* Check arguments. */
    356 	if (ex == NULL)
    357 		panic("extent_destroy: NULL extent");
    358 #endif
    359 
    360 	/* Free all region descriptors in extent. */
    361 	for (rp = LIST_FIRST(&ex->ex_regions); rp != NULL; ) {
    362 		orp = rp;
    363 		rp = LIST_NEXT(rp, er_link);
    364 		LIST_REMOVE(orp, er_link);
    365 		extent_free_region_descriptor(ex, orp);
    366 	}
    367 
    368 	/* If we're not a fixed extent, free the extent descriptor itself. */
    369 	if ((ex->ex_flags & EXF_FIXED) == 0)
    370 		free(ex, ex->ex_mtype);
    371 }
    372 
    373 /*
    374  * Insert a region descriptor into the sorted region list after the
    375  * entry "after" or at the head of the list (if "after" is NULL).
    376  * The region descriptor we insert is passed in "rp".  We must
    377  * allocate the region descriptor before calling this function!
    378  * If we don't need the region descriptor, it will be freed here.
    379  */
    380 static void
    381 extent_insert_and_optimize(struct extent *ex, u_long start, u_long size,
    382     int flags, struct extent_region *after, struct extent_region *rp)
    383 {
    384 	struct extent_region *nextr;
    385 	int appended = 0;
    386 
    387 	if (after == NULL) {
    388 		/*
    389 		 * We're the first in the region list.  If there's
    390 		 * a region after us, attempt to coalesce to save
    391 		 * descriptor overhead.
    392 		 */
    393 		if (((ex->ex_flags & EXF_NOCOALESCE) == 0) &&
    394 		    (LIST_FIRST(&ex->ex_regions) != NULL) &&
    395 		    ((start + size) == LIST_FIRST(&ex->ex_regions)->er_start)) {
    396 			/*
    397 			 * We can coalesce.  Prepend us to the first region.
    398 			 */
    399 			LIST_FIRST(&ex->ex_regions)->er_start = start;
    400 			extent_free_region_descriptor(ex, rp);
    401 			return;
    402 		}
    403 
    404 		/*
    405 		 * Can't coalesce.  Fill in the region descriptor
    406 		 * in, and insert us at the head of the region list.
    407 		 */
    408 		rp->er_start = start;
    409 		rp->er_end = start + (size - 1);
    410 		LIST_INSERT_HEAD(&ex->ex_regions, rp, er_link);
    411 		return;
    412 	}
    413 
    414 	/*
    415 	 * If EXF_NOCOALESCE is set, coalescing is disallowed.
    416 	 */
    417 	if (ex->ex_flags & EXF_NOCOALESCE)
    418 		goto cant_coalesce;
    419 
    420 	/*
    421 	 * Attempt to coalesce with the region before us.
    422 	 */
    423 	if ((after->er_end + 1) == start) {
    424 		/*
    425 		 * We can coalesce.  Append ourselves and make
    426 		 * note of it.
    427 		 */
    428 		after->er_end = start + (size - 1);
    429 		appended = 1;
    430 	}
    431 
    432 	/*
    433 	 * Attempt to coalesce with the region after us.
    434 	 */
    435 	if ((LIST_NEXT(after, er_link) != NULL) &&
    436 	    ((start + size) == LIST_NEXT(after, er_link)->er_start)) {
    437 		/*
    438 		 * We can coalesce.  Note that if we appended ourselves
    439 		 * to the previous region, we exactly fit the gap, and
    440 		 * can free the "next" region descriptor.
    441 		 */
    442 		if (appended) {
    443 			/*
    444 			 * Yup, we can free it up.
    445 			 */
    446 			after->er_end = LIST_NEXT(after, er_link)->er_end;
    447 			nextr = LIST_NEXT(after, er_link);
    448 			LIST_REMOVE(nextr, er_link);
    449 			extent_free_region_descriptor(ex, nextr);
    450 		} else {
    451 			/*
    452 			 * Nope, just prepend us to the next region.
    453 			 */
    454 			LIST_NEXT(after, er_link)->er_start = start;
    455 		}
    456 
    457 		extent_free_region_descriptor(ex, rp);
    458 		return;
    459 	}
    460 
    461 	/*
    462 	 * We weren't able to coalesce with the next region, but
    463 	 * we don't need to allocate a region descriptor if we
    464 	 * appended ourselves to the previous region.
    465 	 */
    466 	if (appended) {
    467 		extent_free_region_descriptor(ex, rp);
    468 		return;
    469 	}
    470 
    471  cant_coalesce:
    472 
    473 	/*
    474 	 * Fill in the region descriptor and insert ourselves
    475 	 * into the region list.
    476 	 */
    477 	rp->er_start = start;
    478 	rp->er_end = start + (size - 1);
    479 	LIST_INSERT_AFTER(after, rp, er_link);
    480 }
    481 
    482 /*
    483  * Allocate a specific region in an extent map.
    484  */
    485 int
    486 extent_alloc_region(struct extent *ex, u_long start, u_long size, int flags)
    487 {
    488 	struct extent_region *rp, *last, *myrp;
    489 	u_long end = start + (size - 1);
    490 	int error;
    491 
    492 #ifdef DIAGNOSTIC
    493 	/* Check arguments. */
    494 	if (ex == NULL)
    495 		panic("extent_alloc_region: NULL extent");
    496 	if (size < 1) {
    497 		printf("extent_alloc_region: extent `%s', size 0x%lx\n",
    498 		    ex->ex_name, size);
    499 		panic("extent_alloc_region: bad size");
    500 	}
    501 	if (end < start) {
    502 		printf(
    503 		 "extent_alloc_region: extent `%s', start 0x%lx, size 0x%lx\n",
    504 		 ex->ex_name, start, size);
    505 		panic("extent_alloc_region: overflow");
    506 	}
    507 #endif
    508 #ifdef LOCKDEBUG
    509 	if (flags & EX_WAITSPACE)
    510 		ASSERT_SLEEPABLE(NULL, "extent_alloc_region(EX_WAITSPACE)");
    511 #endif
    512 
    513 	/*
    514 	 * Make sure the requested region lies within the
    515 	 * extent.
    516 	 *
    517 	 * We don't lock to check the range, because those values
    518 	 * are never modified, and if another thread deletes the
    519 	 * extent, we're screwed anyway.
    520 	 */
    521 	if ((start < ex->ex_start) || (end > ex->ex_end)) {
    522 #ifdef DIAGNOSTIC
    523 		printf("extent_alloc_region: extent `%s' (0x%lx - 0x%lx)\n",
    524 		    ex->ex_name, ex->ex_start, ex->ex_end);
    525 		printf("extent_alloc_region: start 0x%lx, end 0x%lx\n",
    526 		    start, end);
    527 		panic("extent_alloc_region: region lies outside extent");
    528 #else
    529 		return (EINVAL);
    530 #endif
    531 	}
    532 
    533 	/*
    534 	 * Allocate the region descriptor.  It will be freed later
    535 	 * if we can coalesce with another region.  Don't lock before
    536 	 * here!  This could block.
    537 	 */
    538 	myrp = extent_alloc_region_descriptor(ex, flags);
    539 	if (myrp == NULL) {
    540 #ifdef DIAGNOSTIC
    541 		printf(
    542 		    "extent_alloc_region: can't allocate region descriptor\n");
    543 #endif
    544 		return (ENOMEM);
    545 	}
    546 
    547  alloc_start:
    548 	simple_lock(&ex->ex_slock);
    549 
    550 	/*
    551 	 * Attempt to place ourselves in the desired area of the
    552 	 * extent.  We save ourselves some work by keeping the list sorted.
    553 	 * In other words, if the start of the current region is greater
    554 	 * than the end of our region, we don't have to search any further.
    555 	 */
    556 
    557 	/*
    558 	 * Keep a pointer to the last region we looked at so
    559 	 * that we don't have to traverse the list again when
    560 	 * we insert ourselves.  If "last" is NULL when we
    561 	 * finally insert ourselves, we go at the head of the
    562 	 * list.  See extent_insert_and_optimize() for details.
    563 	 */
    564 	last = NULL;
    565 
    566 	LIST_FOREACH(rp, &ex->ex_regions, er_link) {
    567 		if (rp->er_start > end) {
    568 			/*
    569 			 * We lie before this region and don't
    570 			 * conflict.
    571 			 */
    572 			break;
    573 		}
    574 
    575 		/*
    576 		 * The current region begins before we end.
    577 		 * Check for a conflict.
    578 		 */
    579 		if (rp->er_end >= start) {
    580 			/*
    581 			 * We conflict.  If we can (and want to) wait,
    582 			 * do so.
    583 			 */
    584 			if (flags & EX_WAITSPACE) {
    585 				ex->ex_flags |= EXF_WANTED;
    586 				error = ltsleep(ex,
    587 				    PNORELOCK | PRIBIO | ((flags & EX_CATCH) ? PCATCH : 0),
    588 				    "extnt", 0, &ex->ex_slock);
    589 				if (error == 0)
    590 					goto alloc_start;
    591 			} else {
    592 				simple_unlock(&ex->ex_slock);
    593 				error = EAGAIN;
    594 			}
    595 			extent_free_region_descriptor(ex, myrp);
    596 			return error;
    597 		}
    598 		/*
    599 		 * We don't conflict, but this region lies before
    600 		 * us.  Keep a pointer to this region, and keep
    601 		 * trying.
    602 		 */
    603 		last = rp;
    604 	}
    605 
    606 	/*
    607 	 * We don't conflict with any regions.  "last" points
    608 	 * to the region we fall after, or is NULL if we belong
    609 	 * at the beginning of the region list.  Insert ourselves.
    610 	 */
    611 	extent_insert_and_optimize(ex, start, size, flags, last, myrp);
    612 	simple_unlock(&ex->ex_slock);
    613 	return (0);
    614 }
    615 
    616 /*
    617  * Macro to check (x + y) <= z.  This check is designed to fail
    618  * if an overflow occurs.
    619  */
    620 #define LE_OV(x, y, z)	((((x) + (y)) >= (x)) && (((x) + (y)) <= (z)))
    621 
    622 /*
    623  * Allocate a region in an extent map subregion.
    624  *
    625  * If EX_FAST is specified, we return the first fit in the map.
    626  * Otherwise, we try to minimize fragmentation by finding the
    627  * smallest gap that will hold the request.
    628  *
    629  * The allocated region is aligned to "alignment", which must be
    630  * a power of 2.
    631  */
    632 int
    633 extent_alloc_subregion1(struct extent *ex, u_long substart, u_long subend,
    634     u_long size, u_long alignment, u_long skew, u_long boundary,
    635     int flags, u_long *result)
    636 {
    637 	struct extent_region *rp, *myrp, *last, *bestlast;
    638 	u_long newstart, newend, exend, beststart, bestovh, ovh;
    639 	u_long dontcross;
    640 	int error;
    641 
    642 #ifdef DIAGNOSTIC
    643 	/*
    644 	 * Check arguments.
    645 	 *
    646 	 * We don't lock to check these, because these values
    647 	 * are never modified, and if another thread deletes the
    648 	 * extent, we're screwed anyway.
    649 	 */
    650 	if (ex == NULL)
    651 		panic("extent_alloc_subregion: NULL extent");
    652 	if (result == NULL)
    653 		panic("extent_alloc_subregion: NULL result pointer");
    654 	if ((substart < ex->ex_start) || (substart > ex->ex_end) ||
    655 	    (subend > ex->ex_end) || (subend < ex->ex_start)) {
    656   printf("extent_alloc_subregion: extent `%s', ex_start 0x%lx, ex_end 0x%lx\n",
    657 		    ex->ex_name, ex->ex_start, ex->ex_end);
    658 		printf("extent_alloc_subregion: substart 0x%lx, subend 0x%lx\n",
    659 		    substart, subend);
    660 		panic("extent_alloc_subregion: bad subregion");
    661 	}
    662 	if ((size < 1) || ((size - 1) > (subend - substart))) {
    663 		printf("extent_alloc_subregion: extent `%s', size 0x%lx\n",
    664 		    ex->ex_name, size);
    665 		panic("extent_alloc_subregion: bad size");
    666 	}
    667 	if (alignment == 0)
    668 		panic("extent_alloc_subregion: bad alignment");
    669 	if (boundary && (boundary < size)) {
    670 		printf(
    671 		    "extent_alloc_subregion: extent `%s', size 0x%lx, "
    672 		    "boundary 0x%lx\n", ex->ex_name, size, boundary);
    673 		panic("extent_alloc_subregion: bad boundary");
    674 	}
    675 #endif
    676 #ifdef LOCKDEBUG
    677 	if (flags & EX_WAITSPACE)
    678 		ASSERT_SLEEPABLE(NULL, "extent_alloc_subregion1(EX_WAITSPACE)");
    679 #endif
    680 
    681 	/*
    682 	 * Allocate the region descriptor.  It will be freed later
    683 	 * if we can coalesce with another region.  Don't lock before
    684 	 * here!  This could block.
    685 	 */
    686 	myrp = extent_alloc_region_descriptor(ex, flags);
    687 	if (myrp == NULL) {
    688 #ifdef DIAGNOSTIC
    689 		printf(
    690 		 "extent_alloc_subregion: can't allocate region descriptor\n");
    691 #endif
    692 		return (ENOMEM);
    693 	}
    694 
    695  alloc_start:
    696 	simple_lock(&ex->ex_slock);
    697 
    698 	/*
    699 	 * Keep a pointer to the last region we looked at so
    700 	 * that we don't have to traverse the list again when
    701 	 * we insert ourselves.  If "last" is NULL when we
    702 	 * finally insert ourselves, we go at the head of the
    703 	 * list.  See extent_insert_and_optimize() for deatails.
    704 	 */
    705 	last = NULL;
    706 
    707 	/*
    708 	 * Keep track of size and location of the smallest
    709 	 * chunk we fit in.
    710 	 *
    711 	 * Since the extent can be as large as the numeric range
    712 	 * of the CPU (0 - 0xffffffff for 32-bit systems), the
    713 	 * best overhead value can be the maximum unsigned integer.
    714 	 * Thus, we initialize "bestovh" to 0, since we insert ourselves
    715 	 * into the region list immediately on an exact match (which
    716 	 * is the only case where "bestovh" would be set to 0).
    717 	 */
    718 	bestovh = 0;
    719 	beststart = 0;
    720 	bestlast = NULL;
    721 
    722 	/*
    723 	 * Keep track of end of free region.  This is either the end of extent
    724 	 * or the start of a region past the subend.
    725 	 */
    726 	exend = ex->ex_end;
    727 
    728 	/*
    729 	 * For N allocated regions, we must make (N + 1)
    730 	 * checks for unallocated space.  The first chunk we
    731 	 * check is the area from the beginning of the subregion
    732 	 * to the first allocated region after that point.
    733 	 */
    734 	newstart = EXTENT_ALIGN(substart, alignment, skew);
    735 	if (newstart < ex->ex_start) {
    736 #ifdef DIAGNOSTIC
    737 		printf(
    738       "extent_alloc_subregion: extent `%s' (0x%lx - 0x%lx), alignment 0x%lx\n",
    739 		 ex->ex_name, ex->ex_start, ex->ex_end, alignment);
    740 		simple_unlock(&ex->ex_slock);
    741 		panic("extent_alloc_subregion: overflow after alignment");
    742 #else
    743 		extent_free_region_descriptor(ex, myrp);
    744 		simple_unlock(&ex->ex_slock);
    745 		return (EINVAL);
    746 #endif
    747 	}
    748 
    749 	/*
    750 	 * Find the first allocated region that begins on or after
    751 	 * the subregion start, advancing the "last" pointer along
    752 	 * the way.
    753 	 */
    754 	LIST_FOREACH(rp, &ex->ex_regions, er_link) {
    755 		if (rp->er_start >= newstart)
    756 			break;
    757 		last = rp;
    758 	}
    759 
    760 	/*
    761 	 * Relocate the start of our candidate region to the end of
    762 	 * the last allocated region (if there was one overlapping
    763 	 * our subrange).
    764 	 */
    765 	if (last != NULL && last->er_end >= newstart)
    766 		newstart = EXTENT_ALIGN((last->er_end + 1), alignment, skew);
    767 
    768 	for (; rp != NULL; rp = LIST_NEXT(rp, er_link)) {
    769 		/*
    770 		 * If the region pasts the subend, bail out and see
    771 		 * if we fit against the subend.
    772 		 */
    773 		if (rp->er_start > subend) {
    774 			exend = rp->er_start;
    775 			break;
    776 		}
    777 
    778 		/*
    779 		 * Check the chunk before "rp".  Note that our
    780 		 * comparison is safe from overflow conditions.
    781 		 */
    782 		if (LE_OV(newstart, size, rp->er_start)) {
    783 			/*
    784 			 * Do a boundary check, if necessary.  Note
    785 			 * that a region may *begin* on the boundary,
    786 			 * but it must end before the boundary.
    787 			 */
    788 			if (boundary) {
    789 				newend = newstart + (size - 1);
    790 
    791 				/*
    792 				 * Calculate the next boundary after the start
    793 				 * of this region.
    794 				 */
    795 				dontcross = EXTENT_ALIGN(newstart+1, boundary,
    796 				    (flags & EX_BOUNDZERO) ? 0 : ex->ex_start)
    797 				    - 1;
    798 
    799 #if 0
    800 				printf("newstart=%lx newend=%lx ex_start=%lx ex_end=%lx boundary=%lx dontcross=%lx\n",
    801 				    newstart, newend, ex->ex_start, ex->ex_end,
    802 				    boundary, dontcross);
    803 #endif
    804 
    805 				/* Check for overflow */
    806 				if (dontcross < ex->ex_start)
    807 					dontcross = ex->ex_end;
    808 				else if (newend > dontcross) {
    809 					/*
    810 					 * Candidate region crosses boundary.
    811 					 * Throw away the leading part and see
    812 					 * if we still fit.
    813 					 */
    814 					newstart = dontcross + 1;
    815 					newend = newstart + (size - 1);
    816 					dontcross += boundary;
    817 					if (!LE_OV(newstart, size, rp->er_start))
    818 						goto skip;
    819 				}
    820 
    821 				/*
    822 				 * If we run past the end of
    823 				 * the extent or the boundary
    824 				 * overflows, then the request
    825 				 * can't fit.
    826 				 */
    827 				if (newstart + size - 1 > ex->ex_end ||
    828 				    dontcross < newstart)
    829 					goto fail;
    830 			}
    831 
    832 			/*
    833 			 * We would fit into this space.  Calculate
    834 			 * the overhead (wasted space).  If we exactly
    835 			 * fit, or we're taking the first fit, insert
    836 			 * ourselves into the region list.
    837 			 */
    838 			ovh = rp->er_start - newstart - size;
    839 			if ((flags & EX_FAST) || (ovh == 0))
    840 				goto found;
    841 
    842 			/*
    843 			 * Don't exactly fit, but check to see
    844 			 * if we're better than any current choice.
    845 			 */
    846 			if ((bestovh == 0) || (ovh < bestovh)) {
    847 				bestovh = ovh;
    848 				beststart = newstart;
    849 				bestlast = last;
    850 			}
    851 		}
    852 
    853 skip:
    854 		/*
    855 		 * Skip past the current region and check again.
    856 		 */
    857 		newstart = EXTENT_ALIGN((rp->er_end + 1), alignment, skew);
    858 		if (newstart < rp->er_end) {
    859 			/*
    860 			 * Overflow condition.  Don't error out, since
    861 			 * we might have a chunk of space that we can
    862 			 * use.
    863 			 */
    864 			goto fail;
    865 		}
    866 
    867 		last = rp;
    868 	}
    869 
    870 	/*
    871 	 * The final check is from the current starting point to the
    872 	 * end of the subregion.  If there were no allocated regions,
    873 	 * "newstart" is set to the beginning of the subregion, or
    874 	 * just past the end of the last allocated region, adjusted
    875 	 * for alignment in either case.
    876 	 */
    877 	if (LE_OV(newstart, (size - 1), subend)) {
    878 		/*
    879 		 * Do a boundary check, if necessary.  Note
    880 		 * that a region may *begin* on the boundary,
    881 		 * but it must end before the boundary.
    882 		 */
    883 		if (boundary) {
    884 			newend = newstart + (size - 1);
    885 
    886 			/*
    887 			 * Calculate the next boundary after the start
    888 			 * of this region.
    889 			 */
    890 			dontcross = EXTENT_ALIGN(newstart+1, boundary,
    891 			    (flags & EX_BOUNDZERO) ? 0 : ex->ex_start)
    892 			    - 1;
    893 
    894 #if 0
    895 			printf("newstart=%lx newend=%lx ex_start=%lx ex_end=%lx boundary=%lx dontcross=%lx\n",
    896 			    newstart, newend, ex->ex_start, ex->ex_end,
    897 			    boundary, dontcross);
    898 #endif
    899 
    900 			/* Check for overflow */
    901 			if (dontcross < ex->ex_start)
    902 				dontcross = ex->ex_end;
    903 			else if (newend > dontcross) {
    904 				/*
    905 				 * Candidate region crosses boundary.
    906 				 * Throw away the leading part and see
    907 				 * if we still fit.
    908 				 */
    909 				newstart = dontcross + 1;
    910 				newend = newstart + (size - 1);
    911 				dontcross += boundary;
    912 				if (!LE_OV(newstart, (size - 1), subend))
    913 					goto fail;
    914 			}
    915 
    916 			/*
    917 			 * If we run past the end of
    918 			 * the extent or the boundary
    919 			 * overflows, then the request
    920 			 * can't fit.
    921 			 */
    922 			if (newstart + size - 1 > ex->ex_end ||
    923 			    dontcross < newstart)
    924 				goto fail;
    925 		}
    926 
    927 		/*
    928 		 * We would fit into this space.  Calculate
    929 		 * the overhead (wasted space).  If we exactly
    930 		 * fit, or we're taking the first fit, insert
    931 		 * ourselves into the region list.
    932 		 */
    933 		ovh = exend - newstart - (size - 1);
    934 		if ((flags & EX_FAST) || (ovh == 0))
    935 			goto found;
    936 
    937 		/*
    938 		 * Don't exactly fit, but check to see
    939 		 * if we're better than any current choice.
    940 		 */
    941 		if ((bestovh == 0) || (ovh < bestovh)) {
    942 			bestovh = ovh;
    943 			beststart = newstart;
    944 			bestlast = last;
    945 		}
    946 	}
    947 
    948  fail:
    949 	/*
    950 	 * One of the following two conditions have
    951 	 * occurred:
    952 	 *
    953 	 *	There is no chunk large enough to hold the request.
    954 	 *
    955 	 *	If EX_FAST was not specified, there is not an
    956 	 *	exact match for the request.
    957 	 *
    958 	 * Note that if we reach this point and EX_FAST is
    959 	 * set, then we know there is no space in the extent for
    960 	 * the request.
    961 	 */
    962 	if (((flags & EX_FAST) == 0) && (bestovh != 0)) {
    963 		/*
    964 		 * We have a match that's "good enough".
    965 		 */
    966 		newstart = beststart;
    967 		last = bestlast;
    968 		goto found;
    969 	}
    970 
    971 	/*
    972 	 * No space currently available.  Wait for it to free up,
    973 	 * if possible.
    974 	 */
    975 	if (flags & EX_WAITSPACE) {
    976 		ex->ex_flags |= EXF_WANTED;
    977 		error = ltsleep(ex,
    978 		    PNORELOCK | PRIBIO | ((flags & EX_CATCH) ? PCATCH : 0),
    979 		    "extnt", 0, &ex->ex_slock);
    980 		if (error == 0)
    981 			goto alloc_start;
    982 	} else {
    983 		simple_unlock(&ex->ex_slock);
    984 		error = EAGAIN;
    985 	}
    986 
    987 	extent_free_region_descriptor(ex, myrp);
    988 	return error;
    989 
    990  found:
    991 	/*
    992 	 * Insert ourselves into the region list.
    993 	 */
    994 	extent_insert_and_optimize(ex, newstart, size, flags, last, myrp);
    995 	simple_unlock(&ex->ex_slock);
    996 	*result = newstart;
    997 	return (0);
    998 }
    999 
   1000 int
   1001 extent_alloc_subregion(struct extent *ex, u_long start, u_long end, u_long size,
   1002     u_long alignment, u_long boundary, int flags, u_long *result)
   1003 {
   1004 
   1005 	return (extent_alloc_subregion1(ex, start, end, size, alignment,
   1006 					0, boundary, flags, result));
   1007 }
   1008 
   1009 int
   1010 extent_alloc(struct extent *ex, u_long size, u_long alignment, u_long boundary,
   1011     int flags, u_long *result)
   1012 {
   1013 
   1014 	return (extent_alloc_subregion1(ex, ex->ex_start, ex->ex_end,
   1015 					size, alignment, 0, boundary,
   1016 					flags, result));
   1017 }
   1018 
   1019 int
   1020 extent_alloc1(struct extent *ex, u_long size, u_long alignment, u_long skew,
   1021     u_long boundary, int flags, u_long *result)
   1022 {
   1023 
   1024 	return (extent_alloc_subregion1(ex, ex->ex_start, ex->ex_end,
   1025 					size, alignment, skew, boundary,
   1026 					flags, result));
   1027 }
   1028 
   1029 int
   1030 extent_free(struct extent *ex, u_long start, u_long size, int flags)
   1031 {
   1032 	struct extent_region *rp, *nrp = NULL;
   1033 	u_long end = start + (size - 1);
   1034 	int coalesce;
   1035 
   1036 #ifdef DIAGNOSTIC
   1037 	/*
   1038 	 * Check arguments.
   1039 	 *
   1040 	 * We don't lock to check these, because these values
   1041 	 * are never modified, and if another thread deletes the
   1042 	 * extent, we're screwed anyway.
   1043 	 */
   1044 	if (ex == NULL)
   1045 		panic("extent_free: NULL extent");
   1046 	if ((start < ex->ex_start) || (end > ex->ex_end)) {
   1047 		extent_print(ex);
   1048 		printf("extent_free: extent `%s', start 0x%lx, size 0x%lx\n",
   1049 		    ex->ex_name, start, size);
   1050 		panic("extent_free: extent `%s', region not within extent",
   1051 		    ex->ex_name);
   1052 	}
   1053 	/* Check for an overflow. */
   1054 	if (end < start) {
   1055 		extent_print(ex);
   1056 		printf("extent_free: extent `%s', start 0x%lx, size 0x%lx\n",
   1057 		    ex->ex_name, start, size);
   1058 		panic("extent_free: overflow");
   1059 	}
   1060 #endif
   1061 
   1062 	/*
   1063 	 * If we're allowing coalescing, we must allocate a region
   1064 	 * descriptor now, since it might block.
   1065 	 *
   1066 	 * XXX Make a static, create-time flags word, so we don't
   1067 	 * XXX have to lock to read it!
   1068 	 */
   1069 	simple_lock(&ex->ex_slock);
   1070 	coalesce = (ex->ex_flags & EXF_NOCOALESCE) == 0;
   1071 	simple_unlock(&ex->ex_slock);
   1072 
   1073 	if (coalesce) {
   1074 		/* Allocate a region descriptor. */
   1075 		nrp = extent_alloc_region_descriptor(ex, flags);
   1076 		if (nrp == NULL)
   1077 			return (ENOMEM);
   1078 	}
   1079 
   1080 	simple_lock(&ex->ex_slock);
   1081 
   1082 	/*
   1083 	 * Find region and deallocate.  Several possibilities:
   1084 	 *
   1085 	 *	1. (start == er_start) && (end == er_end):
   1086 	 *	   Free descriptor.
   1087 	 *
   1088 	 *	2. (start == er_start) && (end < er_end):
   1089 	 *	   Adjust er_start.
   1090 	 *
   1091 	 *	3. (start > er_start) && (end == er_end):
   1092 	 *	   Adjust er_end.
   1093 	 *
   1094 	 *	4. (start > er_start) && (end < er_end):
   1095 	 *	   Fragment region.  Requires descriptor alloc.
   1096 	 *
   1097 	 * Cases 2, 3, and 4 require that the EXF_NOCOALESCE flag
   1098 	 * is not set.
   1099 	 */
   1100 	LIST_FOREACH(rp, &ex->ex_regions, er_link) {
   1101 		/*
   1102 		 * Save ourselves some comparisons; does the current
   1103 		 * region end before chunk to be freed begins?  If so,
   1104 		 * then we haven't found the appropriate region descriptor.
   1105 		 */
   1106 		if (rp->er_end < start)
   1107 			continue;
   1108 
   1109 		/*
   1110 		 * Save ourselves some traversal; does the current
   1111 		 * region begin after the chunk to be freed ends?  If so,
   1112 		 * then we've already passed any possible region descriptors
   1113 		 * that might have contained the chunk to be freed.
   1114 		 */
   1115 		if (rp->er_start > end)
   1116 			break;
   1117 
   1118 		/* Case 1. */
   1119 		if ((start == rp->er_start) && (end == rp->er_end)) {
   1120 			LIST_REMOVE(rp, er_link);
   1121 			extent_free_region_descriptor(ex, rp);
   1122 			goto done;
   1123 		}
   1124 
   1125 		/*
   1126 		 * The following cases all require that EXF_NOCOALESCE
   1127 		 * is not set.
   1128 		 */
   1129 		if (!coalesce)
   1130 			continue;
   1131 
   1132 		/* Case 2. */
   1133 		if ((start == rp->er_start) && (end < rp->er_end)) {
   1134 			rp->er_start = (end + 1);
   1135 			goto done;
   1136 		}
   1137 
   1138 		/* Case 3. */
   1139 		if ((start > rp->er_start) && (end == rp->er_end)) {
   1140 			rp->er_end = (start - 1);
   1141 			goto done;
   1142 		}
   1143 
   1144 		/* Case 4. */
   1145 		if ((start > rp->er_start) && (end < rp->er_end)) {
   1146 			/* Fill in new descriptor. */
   1147 			nrp->er_start = end + 1;
   1148 			nrp->er_end = rp->er_end;
   1149 
   1150 			/* Adjust current descriptor. */
   1151 			rp->er_end = start - 1;
   1152 
   1153 			/* Insert new descriptor after current. */
   1154 			LIST_INSERT_AFTER(rp, nrp, er_link);
   1155 
   1156 			/* We used the new descriptor, so don't free it below */
   1157 			nrp = NULL;
   1158 			goto done;
   1159 		}
   1160 	}
   1161 
   1162 	/* Region not found, or request otherwise invalid. */
   1163 	simple_unlock(&ex->ex_slock);
   1164 	extent_print(ex);
   1165 	printf("extent_free: start 0x%lx, end 0x%lx\n", start, end);
   1166 	panic("extent_free: region not found");
   1167 
   1168  done:
   1169 	if (nrp != NULL)
   1170 		extent_free_region_descriptor(ex, nrp);
   1171 	if (ex->ex_flags & EXF_WANTED) {
   1172 		ex->ex_flags &= ~EXF_WANTED;
   1173 		wakeup(ex);
   1174 	}
   1175 	simple_unlock(&ex->ex_slock);
   1176 	return (0);
   1177 }
   1178 
   1179 void
   1180 extent_print(struct extent *ex)
   1181 {
   1182 	struct extent_region *rp;
   1183 
   1184 	if (ex == NULL)
   1185 		panic("extent_print: NULL extent");
   1186 
   1187 	simple_lock(&ex->ex_slock);
   1188 
   1189 	printf("extent `%s' (0x%lx - 0x%lx), flags = 0x%x\n", ex->ex_name,
   1190 	    ex->ex_start, ex->ex_end, ex->ex_flags);
   1191 
   1192 	LIST_FOREACH(rp, &ex->ex_regions, er_link)
   1193 		printf("     0x%lx - 0x%lx\n", rp->er_start, rp->er_end);
   1194 
   1195 	simple_unlock(&ex->ex_slock);
   1196 }
   1197