subr_extent.c revision 1.54 1 /* $NetBSD: subr_extent.c,v 1.54 2005/12/24 19:12:23 perry Exp $ */
2
3 /*-
4 * Copyright (c) 1996, 1998 The NetBSD Foundation, Inc.
5 * All rights reserved.
6 *
7 * This code is derived from software contributed to The NetBSD Foundation
8 * by Jason R. Thorpe and Matthias Drochner.
9 *
10 * Redistribution and use in source and binary forms, with or without
11 * modification, are permitted provided that the following conditions
12 * are met:
13 * 1. Redistributions of source code must retain the above copyright
14 * notice, this list of conditions and the following disclaimer.
15 * 2. Redistributions in binary form must reproduce the above copyright
16 * notice, this list of conditions and the following disclaimer in the
17 * documentation and/or other materials provided with the distribution.
18 * 3. All advertising materials mentioning features or use of this software
19 * must display the following acknowledgement:
20 * This product includes software developed by the NetBSD
21 * Foundation, Inc. and its contributors.
22 * 4. Neither the name of The NetBSD Foundation nor the names of its
23 * contributors may be used to endorse or promote products derived
24 * from this software without specific prior written permission.
25 *
26 * THIS SOFTWARE IS PROVIDED BY THE NETBSD FOUNDATION, INC. AND CONTRIBUTORS
27 * ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED
28 * TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
29 * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE FOUNDATION OR CONTRIBUTORS
30 * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
31 * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
32 * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
33 * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
34 * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
35 * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
36 * POSSIBILITY OF SUCH DAMAGE.
37 */
38
39 /*
40 * General purpose extent manager.
41 */
42
43 #include <sys/cdefs.h>
44 __KERNEL_RCSID(0, "$NetBSD: subr_extent.c,v 1.54 2005/12/24 19:12:23 perry Exp $");
45
46 #ifdef _KERNEL
47 #include "opt_lockdebug.h"
48
49 #include <sys/param.h>
50 #include <sys/extent.h>
51 #include <sys/malloc.h>
52 #include <sys/pool.h>
53 #include <sys/time.h>
54 #include <sys/systm.h>
55 #include <sys/proc.h>
56 #include <sys/lock.h>
57
58 #include <uvm/uvm_extern.h>
59
60 #define KMEM_IS_RUNNING (kmem_map != NULL)
61 #elif defined(_EXTENT_TESTING)
62 /*
63 * user-land definitions, so it can fit into a testing harness.
64 */
65 #include <sys/param.h>
66 #include <sys/pool.h>
67 #include <sys/extent.h>
68 #include <errno.h>
69 #include <stdlib.h>
70 #include <stdio.h>
71 #include <string.h>
72
73 /*
74 * Use multi-line #defines to avoid screwing up the kernel tags file;
75 * without this, ctags produces a tags file where panic() shows up
76 * in subr_extent.c rather than subr_prf.c.
77 */
78 #define \
79 malloc(s, t, flags) malloc(s)
80 #define \
81 free(p, t) free(p)
82 #define \
83 tsleep(chan, pri, str, timo) (EWOULDBLOCK)
84 #define \
85 ltsleep(chan,pri,str,timo,lck) (EWOULDBLOCK)
86 #define \
87 wakeup(chan) ((void)0)
88 #define \
89 pool_get(pool, flags) malloc((pool)->pr_size,0,0)
90 #define \
91 pool_put(pool, rp) free(rp,0)
92 #define \
93 panic(a) printf(a)
94 #define \
95 splhigh() (1)
96 #define \
97 splx(s) ((void)(s))
98
99 #define \
100 simple_lock_init(l) ((void)(l))
101 #define \
102 simple_lock(l) ((void)(l))
103 #define \
104 simple_unlock(l) ((void)(l))
105 #define KMEM_IS_RUNNING (1)
106 #endif
107
108 static struct pool expool;
109 static struct simplelock expool_init_slock = SIMPLELOCK_INITIALIZER;
110 static int expool_initialized;
111
112 /*
113 * Macro to align to an arbitrary power-of-two boundary.
114 */
115 #define EXTENT_ALIGN(_start, _align, _skew) \
116 (((((_start) - (_skew)) + ((_align) - 1)) & (-(_align))) + (_skew))
117
118 /*
119 * Create the extent_region pool.
120 * (This is deferred until one of our callers thinks we can malloc()).
121 */
122
123 static inline void
124 expool_init(void)
125 {
126
127 simple_lock(&expool_init_slock);
128 if (expool_initialized) {
129 simple_unlock(&expool_init_slock);
130 return;
131 }
132
133 #if defined(_KERNEL)
134 pool_init(&expool, sizeof(struct extent_region), 0, 0, 0,
135 "extent", NULL);
136 #else
137 expool.pr_size = sizeof(struct extent_region);
138 #endif
139
140 expool_initialized = 1;
141 simple_unlock(&expool_init_slock);
142 }
143
144 /*
145 * Allocate an extent region descriptor. EXTENT MUST NOT BE LOCKED,
146 * AS THIS FUNCTION MAY BLOCK! We will handle any locking we may need.
147 */
148 static struct extent_region *
149 extent_alloc_region_descriptor(struct extent *ex, int flags)
150 {
151 struct extent_region *rp;
152 int exflags;
153 int s;
154
155 /*
156 * If the kernel memory allocator is not yet running, we can't
157 * use it (obviously).
158 */
159 if (KMEM_IS_RUNNING == 0)
160 flags &= ~EX_MALLOCOK;
161
162 /*
163 * XXX Make a static, create-time flags word, so we don't
164 * XXX have to lock to read it!
165 */
166 simple_lock(&ex->ex_slock);
167 exflags = ex->ex_flags;
168 simple_unlock(&ex->ex_slock);
169
170 if (exflags & EXF_FIXED) {
171 struct extent_fixed *fex = (struct extent_fixed *)ex;
172
173 for (;;) {
174 simple_lock(&ex->ex_slock);
175 if ((rp = LIST_FIRST(&fex->fex_freelist)) != NULL) {
176 /*
177 * Don't muck with flags after pulling it off
178 * the freelist; it may have been dynamically
179 * allocated, and kindly given to us. We
180 * need to remember that information.
181 */
182 LIST_REMOVE(rp, er_link);
183 simple_unlock(&ex->ex_slock);
184 return (rp);
185 }
186 if (flags & EX_MALLOCOK) {
187 simple_unlock(&ex->ex_slock);
188 goto alloc;
189 }
190 if ((flags & EX_WAITOK) == 0) {
191 simple_unlock(&ex->ex_slock);
192 return (NULL);
193 }
194 ex->ex_flags |= EXF_FLWANTED;
195 if (ltsleep(&fex->fex_freelist,
196 PNORELOCK| PRIBIO | ((flags & EX_CATCH) ? PCATCH : 0),
197 "extnt", 0, &ex->ex_slock))
198 return (NULL);
199 }
200 }
201
202 alloc:
203 s = splhigh();
204 if (expool_initialized == 0)
205 expool_init();
206 rp = pool_get(&expool, (flags & EX_WAITOK) ? PR_WAITOK : 0);
207 splx(s);
208
209 if (rp != NULL)
210 rp->er_flags = ER_ALLOC;
211
212 return (rp);
213 }
214
215 /*
216 * Free an extent region descriptor. EXTENT _MUST_ BE LOCKED! This
217 * is safe as we do not block here.
218 */
219 static void
220 extent_free_region_descriptor(struct extent *ex, struct extent_region *rp)
221 {
222 int s;
223
224 if (ex->ex_flags & EXF_FIXED) {
225 struct extent_fixed *fex = (struct extent_fixed *)ex;
226
227 /*
228 * If someone's waiting for a region descriptor,
229 * be nice and give them this one, rather than
230 * just free'ing it back to the system.
231 */
232 if (rp->er_flags & ER_ALLOC) {
233 if (ex->ex_flags & EXF_FLWANTED) {
234 /* Clear all but ER_ALLOC flag. */
235 rp->er_flags = ER_ALLOC;
236 LIST_INSERT_HEAD(&fex->fex_freelist, rp,
237 er_link);
238 goto wake_em_up;
239 } else {
240 s = splhigh();
241 pool_put(&expool, rp);
242 splx(s);
243 }
244 } else {
245 /* Clear all flags. */
246 rp->er_flags = 0;
247 LIST_INSERT_HEAD(&fex->fex_freelist, rp, er_link);
248 }
249
250 if (ex->ex_flags & EXF_FLWANTED) {
251 wake_em_up:
252 ex->ex_flags &= ~EXF_FLWANTED;
253 wakeup(&fex->fex_freelist);
254 }
255 return;
256 }
257
258 /*
259 * We know it's dynamically allocated if we get here.
260 */
261 s = splhigh();
262 pool_put(&expool, rp);
263 splx(s);
264 }
265
266 /*
267 * Allocate and initialize an extent map.
268 */
269 struct extent *
270 extent_create(const char *name, u_long start, u_long end,
271 struct malloc_type *mtype, caddr_t storage, size_t storagesize, int flags)
272 {
273 struct extent *ex;
274 caddr_t cp = storage;
275 size_t sz = storagesize;
276 struct extent_region *rp;
277 int fixed_extent = (storage != NULL);
278 int s;
279
280 #ifdef DIAGNOSTIC
281 /* Check arguments. */
282 if (name == NULL)
283 panic("extent_create: name == NULL");
284 if (end < start) {
285 printf("extent_create: extent `%s', start 0x%lx, end 0x%lx\n",
286 name, start, end);
287 panic("extent_create: end < start");
288 }
289 if (fixed_extent && (storagesize < sizeof(struct extent_fixed)))
290 panic("extent_create: fixed extent, bad storagesize 0x%lx",
291 (u_long)storagesize);
292 if (fixed_extent == 0 && (storagesize != 0 || storage != NULL))
293 panic("extent_create: storage provided for non-fixed");
294 #endif
295
296 /* Allocate extent descriptor. */
297 if (fixed_extent) {
298 struct extent_fixed *fex;
299
300 memset(storage, 0, storagesize);
301
302 /*
303 * Align all descriptors on "long" boundaries.
304 */
305 fex = (struct extent_fixed *)cp;
306 ex = (struct extent *)fex;
307 cp += ALIGN(sizeof(struct extent_fixed));
308 sz -= ALIGN(sizeof(struct extent_fixed));
309 fex->fex_storage = storage;
310 fex->fex_storagesize = storagesize;
311
312 /*
313 * In a fixed extent, we have to pre-allocate region
314 * descriptors and place them in the extent's freelist.
315 */
316 LIST_INIT(&fex->fex_freelist);
317 while (sz >= ALIGN(sizeof(struct extent_region))) {
318 rp = (struct extent_region *)cp;
319 cp += ALIGN(sizeof(struct extent_region));
320 sz -= ALIGN(sizeof(struct extent_region));
321 LIST_INSERT_HEAD(&fex->fex_freelist, rp, er_link);
322 }
323 } else {
324 s = splhigh();
325 if (expool_initialized == 0)
326 expool_init();
327 splx(s);
328
329 ex = (struct extent *)malloc(sizeof(struct extent),
330 mtype, (flags & EX_WAITOK) ? M_WAITOK : M_NOWAIT);
331 if (ex == NULL)
332 return (NULL);
333 }
334
335 /* Fill in the extent descriptor and return it to the caller. */
336 simple_lock_init(&ex->ex_slock);
337 LIST_INIT(&ex->ex_regions);
338 ex->ex_name = name;
339 ex->ex_start = start;
340 ex->ex_end = end;
341 ex->ex_mtype = mtype;
342 ex->ex_flags = 0;
343 if (fixed_extent)
344 ex->ex_flags |= EXF_FIXED;
345 if (flags & EX_NOCOALESCE)
346 ex->ex_flags |= EXF_NOCOALESCE;
347 return (ex);
348 }
349
350 /*
351 * Destroy an extent map.
352 * Since we're freeing the data, there can't be any references
353 * so we don't need any locking.
354 */
355 void
356 extent_destroy(struct extent *ex)
357 {
358 struct extent_region *rp, *orp;
359
360 #ifdef DIAGNOSTIC
361 /* Check arguments. */
362 if (ex == NULL)
363 panic("extent_destroy: NULL extent");
364 #endif
365
366 /* Free all region descriptors in extent. */
367 for (rp = LIST_FIRST(&ex->ex_regions); rp != NULL; ) {
368 orp = rp;
369 rp = LIST_NEXT(rp, er_link);
370 LIST_REMOVE(orp, er_link);
371 extent_free_region_descriptor(ex, orp);
372 }
373
374 /* If we're not a fixed extent, free the extent descriptor itself. */
375 if ((ex->ex_flags & EXF_FIXED) == 0)
376 free(ex, ex->ex_mtype);
377 }
378
379 /*
380 * Insert a region descriptor into the sorted region list after the
381 * entry "after" or at the head of the list (if "after" is NULL).
382 * The region descriptor we insert is passed in "rp". We must
383 * allocate the region descriptor before calling this function!
384 * If we don't need the region descriptor, it will be freed here.
385 */
386 static void
387 extent_insert_and_optimize(struct extent *ex, u_long start, u_long size,
388 int flags, struct extent_region *after, struct extent_region *rp)
389 {
390 struct extent_region *nextr;
391 int appended = 0;
392
393 if (after == NULL) {
394 /*
395 * We're the first in the region list. If there's
396 * a region after us, attempt to coalesce to save
397 * descriptor overhead.
398 */
399 if (((ex->ex_flags & EXF_NOCOALESCE) == 0) &&
400 (LIST_FIRST(&ex->ex_regions) != NULL) &&
401 ((start + size) == LIST_FIRST(&ex->ex_regions)->er_start)) {
402 /*
403 * We can coalesce. Prepend us to the first region.
404 */
405 LIST_FIRST(&ex->ex_regions)->er_start = start;
406 extent_free_region_descriptor(ex, rp);
407 return;
408 }
409
410 /*
411 * Can't coalesce. Fill in the region descriptor
412 * in, and insert us at the head of the region list.
413 */
414 rp->er_start = start;
415 rp->er_end = start + (size - 1);
416 LIST_INSERT_HEAD(&ex->ex_regions, rp, er_link);
417 return;
418 }
419
420 /*
421 * If EXF_NOCOALESCE is set, coalescing is disallowed.
422 */
423 if (ex->ex_flags & EXF_NOCOALESCE)
424 goto cant_coalesce;
425
426 /*
427 * Attempt to coalesce with the region before us.
428 */
429 if ((after->er_end + 1) == start) {
430 /*
431 * We can coalesce. Append ourselves and make
432 * note of it.
433 */
434 after->er_end = start + (size - 1);
435 appended = 1;
436 }
437
438 /*
439 * Attempt to coalesce with the region after us.
440 */
441 if ((LIST_NEXT(after, er_link) != NULL) &&
442 ((start + size) == LIST_NEXT(after, er_link)->er_start)) {
443 /*
444 * We can coalesce. Note that if we appended ourselves
445 * to the previous region, we exactly fit the gap, and
446 * can free the "next" region descriptor.
447 */
448 if (appended) {
449 /*
450 * Yup, we can free it up.
451 */
452 after->er_end = LIST_NEXT(after, er_link)->er_end;
453 nextr = LIST_NEXT(after, er_link);
454 LIST_REMOVE(nextr, er_link);
455 extent_free_region_descriptor(ex, nextr);
456 } else {
457 /*
458 * Nope, just prepend us to the next region.
459 */
460 LIST_NEXT(after, er_link)->er_start = start;
461 }
462
463 extent_free_region_descriptor(ex, rp);
464 return;
465 }
466
467 /*
468 * We weren't able to coalesce with the next region, but
469 * we don't need to allocate a region descriptor if we
470 * appended ourselves to the previous region.
471 */
472 if (appended) {
473 extent_free_region_descriptor(ex, rp);
474 return;
475 }
476
477 cant_coalesce:
478
479 /*
480 * Fill in the region descriptor and insert ourselves
481 * into the region list.
482 */
483 rp->er_start = start;
484 rp->er_end = start + (size - 1);
485 LIST_INSERT_AFTER(after, rp, er_link);
486 }
487
488 /*
489 * Allocate a specific region in an extent map.
490 */
491 int
492 extent_alloc_region(struct extent *ex, u_long start, u_long size, int flags)
493 {
494 struct extent_region *rp, *last, *myrp;
495 u_long end = start + (size - 1);
496 int error;
497
498 #ifdef DIAGNOSTIC
499 /* Check arguments. */
500 if (ex == NULL)
501 panic("extent_alloc_region: NULL extent");
502 if (size < 1) {
503 printf("extent_alloc_region: extent `%s', size 0x%lx\n",
504 ex->ex_name, size);
505 panic("extent_alloc_region: bad size");
506 }
507 if (end < start) {
508 printf(
509 "extent_alloc_region: extent `%s', start 0x%lx, size 0x%lx\n",
510 ex->ex_name, start, size);
511 panic("extent_alloc_region: overflow");
512 }
513 #endif
514 #ifdef LOCKDEBUG
515 if (flags & EX_WAITSPACE)
516 simple_lock_only_held(NULL,
517 "extent_alloc_region(EX_WAITSPACE)");
518 #endif
519
520 /*
521 * Make sure the requested region lies within the
522 * extent.
523 *
524 * We don't lock to check the range, because those values
525 * are never modified, and if another thread deletes the
526 * extent, we're screwed anyway.
527 */
528 if ((start < ex->ex_start) || (end > ex->ex_end)) {
529 #ifdef DIAGNOSTIC
530 printf("extent_alloc_region: extent `%s' (0x%lx - 0x%lx)\n",
531 ex->ex_name, ex->ex_start, ex->ex_end);
532 printf("extent_alloc_region: start 0x%lx, end 0x%lx\n",
533 start, end);
534 panic("extent_alloc_region: region lies outside extent");
535 #else
536 return (EINVAL);
537 #endif
538 }
539
540 /*
541 * Allocate the region descriptor. It will be freed later
542 * if we can coalesce with another region. Don't lock before
543 * here! This could block.
544 */
545 myrp = extent_alloc_region_descriptor(ex, flags);
546 if (myrp == NULL) {
547 #ifdef DIAGNOSTIC
548 printf(
549 "extent_alloc_region: can't allocate region descriptor\n");
550 #endif
551 return (ENOMEM);
552 }
553
554 alloc_start:
555 simple_lock(&ex->ex_slock);
556
557 /*
558 * Attempt to place ourselves in the desired area of the
559 * extent. We save ourselves some work by keeping the list sorted.
560 * In other words, if the start of the current region is greater
561 * than the end of our region, we don't have to search any further.
562 */
563
564 /*
565 * Keep a pointer to the last region we looked at so
566 * that we don't have to traverse the list again when
567 * we insert ourselves. If "last" is NULL when we
568 * finally insert ourselves, we go at the head of the
569 * list. See extent_insert_and_optimize() for details.
570 */
571 last = NULL;
572
573 LIST_FOREACH(rp, &ex->ex_regions, er_link) {
574 if (rp->er_start > end) {
575 /*
576 * We lie before this region and don't
577 * conflict.
578 */
579 break;
580 }
581
582 /*
583 * The current region begins before we end.
584 * Check for a conflict.
585 */
586 if (rp->er_end >= start) {
587 /*
588 * We conflict. If we can (and want to) wait,
589 * do so.
590 */
591 if (flags & EX_WAITSPACE) {
592 ex->ex_flags |= EXF_WANTED;
593 error = ltsleep(ex,
594 PNORELOCK | PRIBIO | ((flags & EX_CATCH) ? PCATCH : 0),
595 "extnt", 0, &ex->ex_slock);
596 if (error)
597 return (error);
598 goto alloc_start;
599 }
600 extent_free_region_descriptor(ex, myrp);
601 simple_unlock(&ex->ex_slock);
602 return (EAGAIN);
603 }
604 /*
605 * We don't conflict, but this region lies before
606 * us. Keep a pointer to this region, and keep
607 * trying.
608 */
609 last = rp;
610 }
611
612 /*
613 * We don't conflict with any regions. "last" points
614 * to the region we fall after, or is NULL if we belong
615 * at the beginning of the region list. Insert ourselves.
616 */
617 extent_insert_and_optimize(ex, start, size, flags, last, myrp);
618 simple_unlock(&ex->ex_slock);
619 return (0);
620 }
621
622 /*
623 * Macro to check (x + y) <= z. This check is designed to fail
624 * if an overflow occurs.
625 */
626 #define LE_OV(x, y, z) ((((x) + (y)) >= (x)) && (((x) + (y)) <= (z)))
627
628 /*
629 * Allocate a region in an extent map subregion.
630 *
631 * If EX_FAST is specified, we return the first fit in the map.
632 * Otherwise, we try to minimize fragmentation by finding the
633 * smallest gap that will hold the request.
634 *
635 * The allocated region is aligned to "alignment", which must be
636 * a power of 2.
637 */
638 int
639 extent_alloc_subregion1(struct extent *ex, u_long substart, u_long subend,
640 u_long size, u_long alignment, u_long skew, u_long boundary,
641 int flags, u_long *result)
642 {
643 struct extent_region *rp, *myrp, *last, *bestlast;
644 u_long newstart, newend, exend, beststart, bestovh, ovh;
645 u_long dontcross;
646 int error;
647
648 #ifdef DIAGNOSTIC
649 /*
650 * Check arguments.
651 *
652 * We don't lock to check these, because these values
653 * are never modified, and if another thread deletes the
654 * extent, we're screwed anyway.
655 */
656 if (ex == NULL)
657 panic("extent_alloc_subregion: NULL extent");
658 if (result == NULL)
659 panic("extent_alloc_subregion: NULL result pointer");
660 if ((substart < ex->ex_start) || (substart > ex->ex_end) ||
661 (subend > ex->ex_end) || (subend < ex->ex_start)) {
662 printf("extent_alloc_subregion: extent `%s', ex_start 0x%lx, ex_end 0x%lx\n",
663 ex->ex_name, ex->ex_start, ex->ex_end);
664 printf("extent_alloc_subregion: substart 0x%lx, subend 0x%lx\n",
665 substart, subend);
666 panic("extent_alloc_subregion: bad subregion");
667 }
668 if ((size < 1) || ((size - 1) > (subend - substart))) {
669 printf("extent_alloc_subregion: extent `%s', size 0x%lx\n",
670 ex->ex_name, size);
671 panic("extent_alloc_subregion: bad size");
672 }
673 if (alignment == 0)
674 panic("extent_alloc_subregion: bad alignment");
675 if (boundary && (boundary < size)) {
676 printf(
677 "extent_alloc_subregion: extent `%s', size 0x%lx, "
678 "boundary 0x%lx\n", ex->ex_name, size, boundary);
679 panic("extent_alloc_subregion: bad boundary");
680 }
681 #endif
682 #ifdef LOCKDEBUG
683 if (flags & EX_WAITSPACE)
684 simple_lock_only_held(NULL,
685 "extent_alloc_subregion1(EX_WAITSPACE)");
686 #endif
687
688 /*
689 * Allocate the region descriptor. It will be freed later
690 * if we can coalesce with another region. Don't lock before
691 * here! This could block.
692 */
693 myrp = extent_alloc_region_descriptor(ex, flags);
694 if (myrp == NULL) {
695 #ifdef DIAGNOSTIC
696 printf(
697 "extent_alloc_subregion: can't allocate region descriptor\n");
698 #endif
699 return (ENOMEM);
700 }
701
702 alloc_start:
703 simple_lock(&ex->ex_slock);
704
705 /*
706 * Keep a pointer to the last region we looked at so
707 * that we don't have to traverse the list again when
708 * we insert ourselves. If "last" is NULL when we
709 * finally insert ourselves, we go at the head of the
710 * list. See extent_insert_and_optimize() for deatails.
711 */
712 last = NULL;
713
714 /*
715 * Keep track of size and location of the smallest
716 * chunk we fit in.
717 *
718 * Since the extent can be as large as the numeric range
719 * of the CPU (0 - 0xffffffff for 32-bit systems), the
720 * best overhead value can be the maximum unsigned integer.
721 * Thus, we initialize "bestovh" to 0, since we insert ourselves
722 * into the region list immediately on an exact match (which
723 * is the only case where "bestovh" would be set to 0).
724 */
725 bestovh = 0;
726 beststart = 0;
727 bestlast = NULL;
728
729 /*
730 * Keep track of end of free region. This is either the end of extent
731 * or the start of a region past the subend.
732 */
733 exend = ex->ex_end;
734
735 /*
736 * For N allocated regions, we must make (N + 1)
737 * checks for unallocated space. The first chunk we
738 * check is the area from the beginning of the subregion
739 * to the first allocated region after that point.
740 */
741 newstart = EXTENT_ALIGN(substart, alignment, skew);
742 if (newstart < ex->ex_start) {
743 #ifdef DIAGNOSTIC
744 printf(
745 "extent_alloc_subregion: extent `%s' (0x%lx - 0x%lx), alignment 0x%lx\n",
746 ex->ex_name, ex->ex_start, ex->ex_end, alignment);
747 simple_unlock(&ex->ex_slock);
748 panic("extent_alloc_subregion: overflow after alignment");
749 #else
750 extent_free_region_descriptor(ex, myrp);
751 simple_unlock(&ex->ex_slock);
752 return (EINVAL);
753 #endif
754 }
755
756 /*
757 * Find the first allocated region that begins on or after
758 * the subregion start, advancing the "last" pointer along
759 * the way.
760 */
761 LIST_FOREACH(rp, &ex->ex_regions, er_link) {
762 if (rp->er_start >= newstart)
763 break;
764 last = rp;
765 }
766
767 /*
768 * Relocate the start of our candidate region to the end of
769 * the last allocated region (if there was one overlapping
770 * our subrange).
771 */
772 if (last != NULL && last->er_end >= newstart)
773 newstart = EXTENT_ALIGN((last->er_end + 1), alignment, skew);
774
775 for (; rp != NULL; rp = LIST_NEXT(rp, er_link)) {
776 /*
777 * If the region pasts the subend, bail out and see
778 * if we fit against the subend.
779 */
780 if (rp->er_start > subend) {
781 exend = rp->er_start;
782 break;
783 }
784
785 /*
786 * Check the chunk before "rp". Note that our
787 * comparison is safe from overflow conditions.
788 */
789 if (LE_OV(newstart, size, rp->er_start)) {
790 /*
791 * Do a boundary check, if necessary. Note
792 * that a region may *begin* on the boundary,
793 * but it must end before the boundary.
794 */
795 if (boundary) {
796 newend = newstart + (size - 1);
797
798 /*
799 * Calculate the next boundary after the start
800 * of this region.
801 */
802 dontcross = EXTENT_ALIGN(newstart+1, boundary,
803 (flags & EX_BOUNDZERO) ? 0 : ex->ex_start)
804 - 1;
805
806 #if 0
807 printf("newstart=%lx newend=%lx ex_start=%lx ex_end=%lx boundary=%lx dontcross=%lx\n",
808 newstart, newend, ex->ex_start, ex->ex_end,
809 boundary, dontcross);
810 #endif
811
812 /* Check for overflow */
813 if (dontcross < ex->ex_start)
814 dontcross = ex->ex_end;
815 else if (newend > dontcross) {
816 /*
817 * Candidate region crosses boundary.
818 * Throw away the leading part and see
819 * if we still fit.
820 */
821 newstart = dontcross + 1;
822 newend = newstart + (size - 1);
823 dontcross += boundary;
824 if (!LE_OV(newstart, size, rp->er_start))
825 goto skip;
826 }
827
828 /*
829 * If we run past the end of
830 * the extent or the boundary
831 * overflows, then the request
832 * can't fit.
833 */
834 if (newstart + size - 1 > ex->ex_end ||
835 dontcross < newstart)
836 goto fail;
837 }
838
839 /*
840 * We would fit into this space. Calculate
841 * the overhead (wasted space). If we exactly
842 * fit, or we're taking the first fit, insert
843 * ourselves into the region list.
844 */
845 ovh = rp->er_start - newstart - size;
846 if ((flags & EX_FAST) || (ovh == 0))
847 goto found;
848
849 /*
850 * Don't exactly fit, but check to see
851 * if we're better than any current choice.
852 */
853 if ((bestovh == 0) || (ovh < bestovh)) {
854 bestovh = ovh;
855 beststart = newstart;
856 bestlast = last;
857 }
858 }
859
860 skip:
861 /*
862 * Skip past the current region and check again.
863 */
864 newstart = EXTENT_ALIGN((rp->er_end + 1), alignment, skew);
865 if (newstart < rp->er_end) {
866 /*
867 * Overflow condition. Don't error out, since
868 * we might have a chunk of space that we can
869 * use.
870 */
871 goto fail;
872 }
873
874 last = rp;
875 }
876
877 /*
878 * The final check is from the current starting point to the
879 * end of the subregion. If there were no allocated regions,
880 * "newstart" is set to the beginning of the subregion, or
881 * just past the end of the last allocated region, adjusted
882 * for alignment in either case.
883 */
884 if (LE_OV(newstart, (size - 1), subend)) {
885 /*
886 * Do a boundary check, if necessary. Note
887 * that a region may *begin* on the boundary,
888 * but it must end before the boundary.
889 */
890 if (boundary) {
891 newend = newstart + (size - 1);
892
893 /*
894 * Calculate the next boundary after the start
895 * of this region.
896 */
897 dontcross = EXTENT_ALIGN(newstart+1, boundary,
898 (flags & EX_BOUNDZERO) ? 0 : ex->ex_start)
899 - 1;
900
901 #if 0
902 printf("newstart=%lx newend=%lx ex_start=%lx ex_end=%lx boundary=%lx dontcross=%lx\n",
903 newstart, newend, ex->ex_start, ex->ex_end,
904 boundary, dontcross);
905 #endif
906
907 /* Check for overflow */
908 if (dontcross < ex->ex_start)
909 dontcross = ex->ex_end;
910 else if (newend > dontcross) {
911 /*
912 * Candidate region crosses boundary.
913 * Throw away the leading part and see
914 * if we still fit.
915 */
916 newstart = dontcross + 1;
917 newend = newstart + (size - 1);
918 dontcross += boundary;
919 if (!LE_OV(newstart, (size - 1), subend))
920 goto fail;
921 }
922
923 /*
924 * If we run past the end of
925 * the extent or the boundary
926 * overflows, then the request
927 * can't fit.
928 */
929 if (newstart + size - 1 > ex->ex_end ||
930 dontcross < newstart)
931 goto fail;
932 }
933
934 /*
935 * We would fit into this space. Calculate
936 * the overhead (wasted space). If we exactly
937 * fit, or we're taking the first fit, insert
938 * ourselves into the region list.
939 */
940 ovh = exend - newstart - (size - 1);
941 if ((flags & EX_FAST) || (ovh == 0))
942 goto found;
943
944 /*
945 * Don't exactly fit, but check to see
946 * if we're better than any current choice.
947 */
948 if ((bestovh == 0) || (ovh < bestovh)) {
949 bestovh = ovh;
950 beststart = newstart;
951 bestlast = last;
952 }
953 }
954
955 fail:
956 /*
957 * One of the following two conditions have
958 * occurred:
959 *
960 * There is no chunk large enough to hold the request.
961 *
962 * If EX_FAST was not specified, there is not an
963 * exact match for the request.
964 *
965 * Note that if we reach this point and EX_FAST is
966 * set, then we know there is no space in the extent for
967 * the request.
968 */
969 if (((flags & EX_FAST) == 0) && (bestovh != 0)) {
970 /*
971 * We have a match that's "good enough".
972 */
973 newstart = beststart;
974 last = bestlast;
975 goto found;
976 }
977
978 /*
979 * No space currently available. Wait for it to free up,
980 * if possible.
981 */
982 if (flags & EX_WAITSPACE) {
983 ex->ex_flags |= EXF_WANTED;
984 error = ltsleep(ex,
985 PNORELOCK | PRIBIO | ((flags & EX_CATCH) ? PCATCH : 0),
986 "extnt", 0, &ex->ex_slock);
987 if (error)
988 return (error);
989 goto alloc_start;
990 }
991
992 extent_free_region_descriptor(ex, myrp);
993 simple_unlock(&ex->ex_slock);
994 return (EAGAIN);
995
996 found:
997 /*
998 * Insert ourselves into the region list.
999 */
1000 extent_insert_and_optimize(ex, newstart, size, flags, last, myrp);
1001 simple_unlock(&ex->ex_slock);
1002 *result = newstart;
1003 return (0);
1004 }
1005
1006 int
1007 extent_free(struct extent *ex, u_long start, u_long size, int flags)
1008 {
1009 struct extent_region *rp, *nrp = NULL;
1010 u_long end = start + (size - 1);
1011 int exflags;
1012
1013 #ifdef DIAGNOSTIC
1014 /*
1015 * Check arguments.
1016 *
1017 * We don't lock to check these, because these values
1018 * are never modified, and if another thread deletes the
1019 * extent, we're screwed anyway.
1020 */
1021 if (ex == NULL)
1022 panic("extent_free: NULL extent");
1023 if ((start < ex->ex_start) || (end > ex->ex_end)) {
1024 extent_print(ex);
1025 printf("extent_free: extent `%s', start 0x%lx, size 0x%lx\n",
1026 ex->ex_name, start, size);
1027 panic("extent_free: extent `%s', region not within extent",
1028 ex->ex_name);
1029 }
1030 /* Check for an overflow. */
1031 if (end < start) {
1032 extent_print(ex);
1033 printf("extent_free: extent `%s', start 0x%lx, size 0x%lx\n",
1034 ex->ex_name, start, size);
1035 panic("extent_free: overflow");
1036 }
1037 #endif
1038
1039 /*
1040 * If we're allowing coalescing, we must allocate a region
1041 * descriptor now, since it might block.
1042 *
1043 * XXX Make a static, create-time flags word, so we don't
1044 * XXX have to lock to read it!
1045 */
1046 simple_lock(&ex->ex_slock);
1047 exflags = ex->ex_flags;
1048 simple_unlock(&ex->ex_slock);
1049
1050 if ((exflags & EXF_NOCOALESCE) == 0) {
1051 /* Allocate a region descriptor. */
1052 nrp = extent_alloc_region_descriptor(ex, flags);
1053 if (nrp == NULL)
1054 return (ENOMEM);
1055 }
1056
1057 simple_lock(&ex->ex_slock);
1058
1059 /*
1060 * Find region and deallocate. Several possibilities:
1061 *
1062 * 1. (start == er_start) && (end == er_end):
1063 * Free descriptor.
1064 *
1065 * 2. (start == er_start) && (end < er_end):
1066 * Adjust er_start.
1067 *
1068 * 3. (start > er_start) && (end == er_end):
1069 * Adjust er_end.
1070 *
1071 * 4. (start > er_start) && (end < er_end):
1072 * Fragment region. Requires descriptor alloc.
1073 *
1074 * Cases 2, 3, and 4 require that the EXF_NOCOALESCE flag
1075 * is not set.
1076 */
1077 LIST_FOREACH(rp, &ex->ex_regions, er_link) {
1078 /*
1079 * Save ourselves some comparisons; does the current
1080 * region end before chunk to be freed begins? If so,
1081 * then we haven't found the appropriate region descriptor.
1082 */
1083 if (rp->er_end < start)
1084 continue;
1085
1086 /*
1087 * Save ourselves some traversal; does the current
1088 * region begin after the chunk to be freed ends? If so,
1089 * then we've already passed any possible region descriptors
1090 * that might have contained the chunk to be freed.
1091 */
1092 if (rp->er_start > end)
1093 break;
1094
1095 /* Case 1. */
1096 if ((start == rp->er_start) && (end == rp->er_end)) {
1097 LIST_REMOVE(rp, er_link);
1098 extent_free_region_descriptor(ex, rp);
1099 goto done;
1100 }
1101
1102 /*
1103 * The following cases all require that EXF_NOCOALESCE
1104 * is not set.
1105 */
1106 if (ex->ex_flags & EXF_NOCOALESCE)
1107 continue;
1108
1109 /* Case 2. */
1110 if ((start == rp->er_start) && (end < rp->er_end)) {
1111 rp->er_start = (end + 1);
1112 goto done;
1113 }
1114
1115 /* Case 3. */
1116 if ((start > rp->er_start) && (end == rp->er_end)) {
1117 rp->er_end = (start - 1);
1118 goto done;
1119 }
1120
1121 /* Case 4. */
1122 if ((start > rp->er_start) && (end < rp->er_end)) {
1123 /* Fill in new descriptor. */
1124 nrp->er_start = end + 1;
1125 nrp->er_end = rp->er_end;
1126
1127 /* Adjust current descriptor. */
1128 rp->er_end = start - 1;
1129
1130 /* Insert new descriptor after current. */
1131 LIST_INSERT_AFTER(rp, nrp, er_link);
1132
1133 /* We used the new descriptor, so don't free it below */
1134 nrp = NULL;
1135 goto done;
1136 }
1137 }
1138
1139 /* Region not found, or request otherwise invalid. */
1140 simple_unlock(&ex->ex_slock);
1141 extent_print(ex);
1142 printf("extent_free: start 0x%lx, end 0x%lx\n", start, end);
1143 panic("extent_free: region not found");
1144
1145 done:
1146 if (nrp != NULL)
1147 extent_free_region_descriptor(ex, nrp);
1148 if (ex->ex_flags & EXF_WANTED) {
1149 ex->ex_flags &= ~EXF_WANTED;
1150 wakeup(ex);
1151 }
1152 simple_unlock(&ex->ex_slock);
1153 return (0);
1154 }
1155
1156 void
1157 extent_print(struct extent *ex)
1158 {
1159 struct extent_region *rp;
1160
1161 if (ex == NULL)
1162 panic("extent_print: NULL extent");
1163
1164 simple_lock(&ex->ex_slock);
1165
1166 printf("extent `%s' (0x%lx - 0x%lx), flags = 0x%x\n", ex->ex_name,
1167 ex->ex_start, ex->ex_end, ex->ex_flags);
1168
1169 LIST_FOREACH(rp, &ex->ex_regions, er_link)
1170 printf(" 0x%lx - 0x%lx\n", rp->er_start, rp->er_end);
1171
1172 simple_unlock(&ex->ex_slock);
1173 }
1174