subr_extent.c revision 1.77 1 /* $NetBSD: subr_extent.c,v 1.77 2015/07/28 19:38:48 christos Exp $ */
2
3 /*-
4 * Copyright (c) 1996, 1998, 2007 The NetBSD Foundation, Inc.
5 * All rights reserved.
6 *
7 * This code is derived from software contributed to The NetBSD Foundation
8 * by Jason R. Thorpe and Matthias Drochner.
9 *
10 * Redistribution and use in source and binary forms, with or without
11 * modification, are permitted provided that the following conditions
12 * are met:
13 * 1. Redistributions of source code must retain the above copyright
14 * notice, this list of conditions and the following disclaimer.
15 * 2. Redistributions in binary form must reproduce the above copyright
16 * notice, this list of conditions and the following disclaimer in the
17 * documentation and/or other materials provided with the distribution.
18 *
19 * THIS SOFTWARE IS PROVIDED BY THE NETBSD FOUNDATION, INC. AND CONTRIBUTORS
20 * ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED
21 * TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
22 * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE FOUNDATION OR CONTRIBUTORS
23 * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
24 * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
25 * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
26 * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
27 * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
28 * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
29 * POSSIBILITY OF SUCH DAMAGE.
30 */
31
32 /*
33 * General purpose extent manager.
34 */
35
36 #include <sys/cdefs.h>
37 __KERNEL_RCSID(0, "$NetBSD: subr_extent.c,v 1.77 2015/07/28 19:38:48 christos Exp $");
38
39 #ifdef _KERNEL
40 #include "opt_lockdebug.h"
41
42 #include <sys/param.h>
43 #include <sys/extent.h>
44 #include <sys/kmem.h>
45 #include <sys/pool.h>
46 #include <sys/time.h>
47 #include <sys/systm.h>
48 #include <sys/proc.h>
49
50 #include <uvm/uvm_extern.h>
51
52 #elif defined(_EXTENT_TESTING)
53
54 #include <stdbool.h> // XXX: no sys/stdbool.h
55 /*
56 * user-land definitions, so it can fit into a testing harness.
57 */
58 #include <sys/param.h>
59 #include <sys/pool.h>
60 #include <sys/extent.h>
61
62 #include <errno.h>
63 #include <stdlib.h>
64 #include <stdio.h>
65 #include <string.h>
66
67 /*
68 * Use multi-line #defines to avoid screwing up the kernel tags file;
69 * without this, ctags produces a tags file where panic() shows up
70 * in subr_extent.c rather than subr_prf.c.
71 */
72 #define \
73 kmem_alloc(s, flags) malloc(s)
74 #define \
75 kmem_free(p, s) free(p)
76 #define \
77 cv_wait_sig(cv, lock) (EWOULDBLOCK)
78 #define \
79 pool_get(pool, flags) kmem_alloc((pool)->pr_size,0)
80 #define \
81 pool_put(pool, rp) kmem_free(rp,0)
82 #define \
83 panic(a) printf(a)
84 #define mutex_init(a, b, c)
85 #define mutex_destroy(a)
86 #define mutex_enter(l)
87 #define mutex_exit(l)
88 #define cv_wait(cv, lock)
89 #define cv_broadcast(cv)
90 #define cv_init(a, b)
91 #define cv_destroy(a)
92 #define KMEM_IS_RUNNING (1)
93 #define IPL_VM (0)
94 #define MUTEX_DEFAULT (0)
95 #endif
96
97 static struct pool expool;
98
99 /*
100 * Macro to align to an arbitrary power-of-two boundary.
101 */
102 #define EXTENT_ALIGN(_start, _align, _skew) \
103 (((((_start) - (_skew)) + ((_align) - 1)) & (-(_align))) + (_skew))
104
105 /*
106 * Create the extent_region pool.
107 */
108 void
109 extent_init(void)
110 {
111
112 #if defined(_KERNEL)
113 pool_init(&expool, sizeof(struct extent_region), 0, 0, 0,
114 "extent", NULL, IPL_VM);
115 #else
116 expool.pr_size = sizeof(struct extent_region);
117 #endif
118 }
119
120 /*
121 * Allocate an extent region descriptor. EXTENT MUST NOT BE LOCKED.
122 * We will handle any locking we may need.
123 */
124 static struct extent_region *
125 extent_alloc_region_descriptor(struct extent *ex, int flags)
126 {
127 struct extent_region *rp;
128 int exflags, error;
129
130 /*
131 * XXX Make a static, create-time flags word, so we don't
132 * XXX have to lock to read it!
133 */
134 mutex_enter(&ex->ex_lock);
135 exflags = ex->ex_flags;
136 mutex_exit(&ex->ex_lock);
137
138 if (exflags & EXF_FIXED) {
139 struct extent_fixed *fex = (struct extent_fixed *)ex;
140
141 mutex_enter(&ex->ex_lock);
142 for (;;) {
143 if ((rp = LIST_FIRST(&fex->fex_freelist)) != NULL) {
144 /*
145 * Don't muck with flags after pulling it off
146 * the freelist; it may have been dynamically
147 * allocated, and kindly given to us. We
148 * need to remember that information.
149 */
150 LIST_REMOVE(rp, er_link);
151 mutex_exit(&ex->ex_lock);
152 return (rp);
153 }
154 if (flags & EX_MALLOCOK) {
155 mutex_exit(&ex->ex_lock);
156 goto alloc;
157 }
158 if ((flags & EX_WAITOK) == 0) {
159 mutex_exit(&ex->ex_lock);
160 return (NULL);
161 }
162 ex->ex_flags |= EXF_FLWANTED;
163 if ((flags & EX_CATCH) != 0)
164 error = cv_wait_sig(&ex->ex_cv, &ex->ex_lock);
165 else {
166 cv_wait(&ex->ex_cv, &ex->ex_lock);
167 error = 0;
168 }
169 if (error != 0) {
170 mutex_exit(&ex->ex_lock);
171 return (NULL);
172 }
173 }
174 }
175
176 alloc:
177 rp = pool_get(&expool, (flags & EX_WAITOK) ? PR_WAITOK : 0);
178
179 if (rp != NULL)
180 rp->er_flags = ER_ALLOC;
181
182 return (rp);
183 }
184
185 /*
186 * Free an extent region descriptor. EXTENT _MUST_ BE LOCKED!
187 */
188 static void
189 extent_free_region_descriptor(struct extent *ex, struct extent_region *rp)
190 {
191
192 if (ex->ex_flags & EXF_FIXED) {
193 struct extent_fixed *fex = (struct extent_fixed *)ex;
194
195 /*
196 * If someone's waiting for a region descriptor,
197 * be nice and give them this one, rather than
198 * just free'ing it back to the system.
199 */
200 if (rp->er_flags & ER_ALLOC) {
201 if (ex->ex_flags & EXF_FLWANTED) {
202 /* Clear all but ER_ALLOC flag. */
203 rp->er_flags = ER_ALLOC;
204 LIST_INSERT_HEAD(&fex->fex_freelist, rp,
205 er_link);
206 goto wake_em_up;
207 } else
208 pool_put(&expool, rp);
209 } else {
210 /* Clear all flags. */
211 rp->er_flags = 0;
212 LIST_INSERT_HEAD(&fex->fex_freelist, rp, er_link);
213 }
214
215 wake_em_up:
216 ex->ex_flags &= ~EXF_FLWANTED;
217 cv_broadcast(&ex->ex_cv);
218 return;
219 }
220
221 /*
222 * We know it's dynamically allocated if we get here.
223 */
224 pool_put(&expool, rp);
225 }
226
227 /*
228 * Allocate and initialize an extent map.
229 */
230 struct extent *
231 extent_create(const char *name, u_long start, u_long end,
232 void *storage, size_t storagesize, int flags)
233 {
234 struct extent *ex;
235 char *cp = storage;
236 size_t sz = storagesize;
237 struct extent_region *rp;
238 int fixed_extent = (storage != NULL);
239
240 #ifndef _KERNEL
241 extent_init();
242 #endif
243
244 #ifdef DIAGNOSTIC
245 /* Check arguments. */
246 if (name == NULL)
247 panic("extent_create: name == NULL");
248 if (end < start) {
249 printf("extent_create: extent `%s', start 0x%lx, end 0x%lx\n",
250 name, start, end);
251 panic("extent_create: end < start");
252 }
253 if (fixed_extent && (storagesize < sizeof(struct extent_fixed)))
254 panic("extent_create: fixed extent, bad storagesize 0x%lx",
255 (u_long)storagesize);
256 if (fixed_extent == 0 && (storagesize != 0 || storage != NULL))
257 panic("extent_create: storage provided for non-fixed");
258 #endif
259
260 /* Allocate extent descriptor. */
261 if (fixed_extent) {
262 struct extent_fixed *fex;
263
264 memset(storage, 0, storagesize);
265
266 /*
267 * Align all descriptors on "long" boundaries.
268 */
269 fex = (struct extent_fixed *)cp;
270 ex = (struct extent *)fex;
271 cp += ALIGN(sizeof(struct extent_fixed));
272 sz -= ALIGN(sizeof(struct extent_fixed));
273 fex->fex_storage = storage;
274 fex->fex_storagesize = storagesize;
275
276 /*
277 * In a fixed extent, we have to pre-allocate region
278 * descriptors and place them in the extent's freelist.
279 */
280 LIST_INIT(&fex->fex_freelist);
281 while (sz >= ALIGN(sizeof(struct extent_region))) {
282 rp = (struct extent_region *)cp;
283 cp += ALIGN(sizeof(struct extent_region));
284 sz -= ALIGN(sizeof(struct extent_region));
285 LIST_INSERT_HEAD(&fex->fex_freelist, rp, er_link);
286 }
287 } else {
288 ex = kmem_alloc(sizeof(*ex),
289 (flags & EX_WAITOK) ? KM_SLEEP : KM_NOSLEEP);
290 if (ex == NULL)
291 return (NULL);
292 }
293
294 /* Fill in the extent descriptor and return it to the caller. */
295 mutex_init(&ex->ex_lock, MUTEX_DEFAULT, IPL_VM);
296 cv_init(&ex->ex_cv, "extent");
297 LIST_INIT(&ex->ex_regions);
298 ex->ex_name = name;
299 ex->ex_start = start;
300 ex->ex_end = end;
301 ex->ex_flags = 0;
302 if (fixed_extent)
303 ex->ex_flags |= EXF_FIXED;
304 if (flags & EX_NOCOALESCE)
305 ex->ex_flags |= EXF_NOCOALESCE;
306 return (ex);
307 }
308
309 /*
310 * Destroy an extent map.
311 * Since we're freeing the data, there can't be any references
312 * so we don't need any locking.
313 */
314 void
315 extent_destroy(struct extent *ex)
316 {
317 struct extent_region *rp, *orp;
318
319 #ifdef DIAGNOSTIC
320 /* Check arguments. */
321 if (ex == NULL)
322 panic("extent_destroy: NULL extent");
323 #endif
324
325 /* Free all region descriptors in extent. */
326 for (rp = LIST_FIRST(&ex->ex_regions); rp != NULL; ) {
327 orp = rp;
328 rp = LIST_NEXT(rp, er_link);
329 LIST_REMOVE(orp, er_link);
330 extent_free_region_descriptor(ex, orp);
331 }
332
333 cv_destroy(&ex->ex_cv);
334 mutex_destroy(&ex->ex_lock);
335
336 /* If we're not a fixed extent, free the extent descriptor itself. */
337 if ((ex->ex_flags & EXF_FIXED) == 0)
338 kmem_free(ex, sizeof(*ex));
339 }
340
341 /*
342 * Insert a region descriptor into the sorted region list after the
343 * entry "after" or at the head of the list (if "after" is NULL).
344 * The region descriptor we insert is passed in "rp". We must
345 * allocate the region descriptor before calling this function!
346 * If we don't need the region descriptor, it will be freed here.
347 */
348 static void
349 extent_insert_and_optimize(struct extent *ex, u_long start, u_long size,
350 int flags, struct extent_region *after, struct extent_region *rp)
351 {
352 struct extent_region *nextr;
353 int appended = 0;
354
355 if (after == NULL) {
356 /*
357 * We're the first in the region list. If there's
358 * a region after us, attempt to coalesce to save
359 * descriptor overhead.
360 */
361 if (((ex->ex_flags & EXF_NOCOALESCE) == 0) &&
362 (LIST_FIRST(&ex->ex_regions) != NULL) &&
363 ((start + size) == LIST_FIRST(&ex->ex_regions)->er_start)) {
364 /*
365 * We can coalesce. Prepend us to the first region.
366 */
367 LIST_FIRST(&ex->ex_regions)->er_start = start;
368 extent_free_region_descriptor(ex, rp);
369 return;
370 }
371
372 /*
373 * Can't coalesce. Fill in the region descriptor
374 * in, and insert us at the head of the region list.
375 */
376 rp->er_start = start;
377 rp->er_end = start + (size - 1);
378 LIST_INSERT_HEAD(&ex->ex_regions, rp, er_link);
379 return;
380 }
381
382 /*
383 * If EXF_NOCOALESCE is set, coalescing is disallowed.
384 */
385 if (ex->ex_flags & EXF_NOCOALESCE)
386 goto cant_coalesce;
387
388 /*
389 * Attempt to coalesce with the region before us.
390 */
391 if ((after->er_end + 1) == start) {
392 /*
393 * We can coalesce. Append ourselves and make
394 * note of it.
395 */
396 after->er_end = start + (size - 1);
397 appended = 1;
398 }
399
400 /*
401 * Attempt to coalesce with the region after us.
402 */
403 if ((LIST_NEXT(after, er_link) != NULL) &&
404 ((start + size) == LIST_NEXT(after, er_link)->er_start)) {
405 /*
406 * We can coalesce. Note that if we appended ourselves
407 * to the previous region, we exactly fit the gap, and
408 * can free the "next" region descriptor.
409 */
410 if (appended) {
411 /*
412 * Yup, we can free it up.
413 */
414 after->er_end = LIST_NEXT(after, er_link)->er_end;
415 nextr = LIST_NEXT(after, er_link);
416 LIST_REMOVE(nextr, er_link);
417 extent_free_region_descriptor(ex, nextr);
418 } else {
419 /*
420 * Nope, just prepend us to the next region.
421 */
422 LIST_NEXT(after, er_link)->er_start = start;
423 }
424
425 extent_free_region_descriptor(ex, rp);
426 return;
427 }
428
429 /*
430 * We weren't able to coalesce with the next region, but
431 * we don't need to allocate a region descriptor if we
432 * appended ourselves to the previous region.
433 */
434 if (appended) {
435 extent_free_region_descriptor(ex, rp);
436 return;
437 }
438
439 cant_coalesce:
440
441 /*
442 * Fill in the region descriptor and insert ourselves
443 * into the region list.
444 */
445 rp->er_start = start;
446 rp->er_end = start + (size - 1);
447 LIST_INSERT_AFTER(after, rp, er_link);
448 }
449
450 /*
451 * Allocate a specific region in an extent map.
452 */
453 int
454 extent_alloc_region(struct extent *ex, u_long start, u_long size, int flags)
455 {
456 struct extent_region *rp, *last, *myrp;
457 u_long end = start + (size - 1);
458 int error;
459
460 #ifdef DIAGNOSTIC
461 /* Check arguments. */
462 if (ex == NULL)
463 panic("extent_alloc_region: NULL extent");
464 if (size < 1) {
465 printf("extent_alloc_region: extent `%s', size 0x%lx\n",
466 ex->ex_name, size);
467 panic("extent_alloc_region: bad size");
468 }
469 if (end < start) {
470 printf(
471 "extent_alloc_region: extent `%s', start 0x%lx, size 0x%lx\n",
472 ex->ex_name, start, size);
473 panic("extent_alloc_region: overflow");
474 }
475 #endif
476 #ifdef LOCKDEBUG
477 if (flags & EX_WAITSPACE) {
478 ASSERT_SLEEPABLE();
479 }
480 #endif
481
482 /*
483 * Make sure the requested region lies within the
484 * extent.
485 *
486 * We don't lock to check the range, because those values
487 * are never modified, and if another thread deletes the
488 * extent, we're screwed anyway.
489 */
490 if ((start < ex->ex_start) || (end > ex->ex_end)) {
491 #ifdef DIAGNOSTIC
492 printf("extent_alloc_region: extent `%s' (0x%lx - 0x%lx)\n",
493 ex->ex_name, ex->ex_start, ex->ex_end);
494 printf("extent_alloc_region: start 0x%lx, end 0x%lx\n",
495 start, end);
496 panic("extent_alloc_region: region lies outside extent");
497 #else
498 return (EINVAL);
499 #endif
500 }
501
502 /*
503 * Allocate the region descriptor. It will be freed later
504 * if we can coalesce with another region. Don't lock before
505 * here! This could block.
506 */
507 myrp = extent_alloc_region_descriptor(ex, flags);
508 if (myrp == NULL) {
509 #ifdef DIAGNOSTIC
510 printf(
511 "extent_alloc_region: can't allocate region descriptor\n");
512 #endif
513 return (ENOMEM);
514 }
515
516 mutex_enter(&ex->ex_lock);
517 alloc_start:
518
519 /*
520 * Attempt to place ourselves in the desired area of the
521 * extent. We save ourselves some work by keeping the list sorted.
522 * In other words, if the start of the current region is greater
523 * than the end of our region, we don't have to search any further.
524 */
525
526 /*
527 * Keep a pointer to the last region we looked at so
528 * that we don't have to traverse the list again when
529 * we insert ourselves. If "last" is NULL when we
530 * finally insert ourselves, we go at the head of the
531 * list. See extent_insert_and_optimize() for details.
532 */
533 last = NULL;
534
535 LIST_FOREACH(rp, &ex->ex_regions, er_link) {
536 if (rp->er_start > end) {
537 /*
538 * We lie before this region and don't
539 * conflict.
540 */
541 break;
542 }
543
544 /*
545 * The current region begins before we end.
546 * Check for a conflict.
547 */
548 if (rp->er_end >= start) {
549 /*
550 * We conflict. If we can (and want to) wait,
551 * do so.
552 */
553 if (flags & EX_WAITSPACE) {
554 if ((flags & EX_CATCH) != 0)
555 error = cv_wait_sig(&ex->ex_cv,
556 &ex->ex_lock);
557 else {
558 cv_wait(&ex->ex_cv, &ex->ex_lock);
559 error = 0;
560 }
561 if (error == 0)
562 goto alloc_start;
563 mutex_exit(&ex->ex_lock);
564 } else {
565 mutex_exit(&ex->ex_lock);
566 error = EAGAIN;
567 }
568 extent_free_region_descriptor(ex, myrp);
569 return error;
570 }
571 /*
572 * We don't conflict, but this region lies before
573 * us. Keep a pointer to this region, and keep
574 * trying.
575 */
576 last = rp;
577 }
578
579 /*
580 * We don't conflict with any regions. "last" points
581 * to the region we fall after, or is NULL if we belong
582 * at the beginning of the region list. Insert ourselves.
583 */
584 extent_insert_and_optimize(ex, start, size, flags, last, myrp);
585 mutex_exit(&ex->ex_lock);
586 return (0);
587 }
588
589 /*
590 * Macro to check (x + y) <= z. This check is designed to fail
591 * if an overflow occurs.
592 */
593 #define LE_OV(x, y, z) ((((x) + (y)) >= (x)) && (((x) + (y)) <= (z)))
594
595 /*
596 * Allocate a region in an extent map subregion.
597 *
598 * If EX_FAST is specified, we return the first fit in the map.
599 * Otherwise, we try to minimize fragmentation by finding the
600 * smallest gap that will hold the request.
601 *
602 * The allocated region is aligned to "alignment", which must be
603 * a power of 2.
604 */
605 int
606 extent_alloc_subregion1(struct extent *ex, u_long substart, u_long subend,
607 u_long size, u_long alignment, u_long skew, u_long boundary,
608 int flags, u_long *result)
609 {
610 struct extent_region *rp, *myrp, *last, *bestlast;
611 u_long newstart, newend, exend, beststart, bestovh, ovh;
612 u_long dontcross;
613 int error;
614
615 #ifdef DIAGNOSTIC
616 /*
617 * Check arguments.
618 *
619 * We don't lock to check these, because these values
620 * are never modified, and if another thread deletes the
621 * extent, we're screwed anyway.
622 */
623 if (ex == NULL)
624 panic("extent_alloc_subregion: NULL extent");
625 if (result == NULL)
626 panic("extent_alloc_subregion: NULL result pointer");
627 if ((substart < ex->ex_start) || (substart > ex->ex_end) ||
628 (subend > ex->ex_end) || (subend < ex->ex_start)) {
629 printf("extent_alloc_subregion: extent `%s', ex_start 0x%lx, ex_end 0x%lx\n",
630 ex->ex_name, ex->ex_start, ex->ex_end);
631 printf("extent_alloc_subregion: substart 0x%lx, subend 0x%lx\n",
632 substart, subend);
633 panic("extent_alloc_subregion: bad subregion");
634 }
635 if ((size < 1) || ((size - 1) > (subend - substart))) {
636 printf("extent_alloc_subregion: extent `%s', size 0x%lx\n",
637 ex->ex_name, size);
638 panic("extent_alloc_subregion: bad size");
639 }
640 if (alignment == 0)
641 panic("extent_alloc_subregion: bad alignment");
642 if (boundary && (boundary < size)) {
643 printf(
644 "extent_alloc_subregion: extent `%s', size 0x%lx, "
645 "boundary 0x%lx\n", ex->ex_name, size, boundary);
646 panic("extent_alloc_subregion: bad boundary");
647 }
648 #endif
649 #ifdef LOCKDEBUG
650 if (flags & EX_WAITSPACE) {
651 ASSERT_SLEEPABLE();
652 }
653 #endif
654
655 /*
656 * Allocate the region descriptor. It will be freed later
657 * if we can coalesce with another region. Don't lock before
658 * here! This could block.
659 */
660 myrp = extent_alloc_region_descriptor(ex, flags);
661 if (myrp == NULL) {
662 #ifdef DIAGNOSTIC
663 printf(
664 "extent_alloc_subregion: can't allocate region descriptor\n");
665 #endif
666 return (ENOMEM);
667 }
668
669 alloc_start:
670 mutex_enter(&ex->ex_lock);
671
672 /*
673 * Keep a pointer to the last region we looked at so
674 * that we don't have to traverse the list again when
675 * we insert ourselves. If "last" is NULL when we
676 * finally insert ourselves, we go at the head of the
677 * list. See extent_insert_and_optimize() for deatails.
678 */
679 last = NULL;
680
681 /*
682 * Keep track of size and location of the smallest
683 * chunk we fit in.
684 *
685 * Since the extent can be as large as the numeric range
686 * of the CPU (0 - 0xffffffff for 32-bit systems), the
687 * best overhead value can be the maximum unsigned integer.
688 * Thus, we initialize "bestovh" to 0, since we insert ourselves
689 * into the region list immediately on an exact match (which
690 * is the only case where "bestovh" would be set to 0).
691 */
692 bestovh = 0;
693 beststart = 0;
694 bestlast = NULL;
695
696 /*
697 * Keep track of end of free region. This is either the end of extent
698 * or the start of a region past the subend.
699 */
700 exend = ex->ex_end;
701
702 /*
703 * For N allocated regions, we must make (N + 1)
704 * checks for unallocated space. The first chunk we
705 * check is the area from the beginning of the subregion
706 * to the first allocated region after that point.
707 */
708 newstart = EXTENT_ALIGN(substart, alignment, skew);
709 if (newstart < ex->ex_start) {
710 #ifdef DIAGNOSTIC
711 printf(
712 "extent_alloc_subregion: extent `%s' (0x%lx - 0x%lx), alignment 0x%lx\n",
713 ex->ex_name, ex->ex_start, ex->ex_end, alignment);
714 mutex_exit(&ex->ex_lock);
715 panic("extent_alloc_subregion: overflow after alignment");
716 #else
717 extent_free_region_descriptor(ex, myrp);
718 mutex_exit(&ex->ex_lock);
719 return (EINVAL);
720 #endif
721 }
722
723 /*
724 * Find the first allocated region that begins on or after
725 * the subregion start, advancing the "last" pointer along
726 * the way.
727 */
728 LIST_FOREACH(rp, &ex->ex_regions, er_link) {
729 if (rp->er_start >= newstart)
730 break;
731 last = rp;
732 }
733
734 /*
735 * Relocate the start of our candidate region to the end of
736 * the last allocated region (if there was one overlapping
737 * our subrange).
738 */
739 if (last != NULL && last->er_end >= newstart)
740 newstart = EXTENT_ALIGN((last->er_end + 1), alignment, skew);
741
742 for (; rp != NULL; rp = LIST_NEXT(rp, er_link)) {
743 /*
744 * If the region pasts the subend, bail out and see
745 * if we fit against the subend.
746 */
747 if (rp->er_start > subend) {
748 exend = rp->er_start;
749 break;
750 }
751
752 /*
753 * Check the chunk before "rp". Note that our
754 * comparison is safe from overflow conditions.
755 */
756 if (LE_OV(newstart, size, rp->er_start)) {
757 /*
758 * Do a boundary check, if necessary. Note
759 * that a region may *begin* on the boundary,
760 * but it must end before the boundary.
761 */
762 if (boundary) {
763 newend = newstart + (size - 1);
764
765 /*
766 * Calculate the next boundary after the start
767 * of this region.
768 */
769 dontcross = EXTENT_ALIGN(newstart+1, boundary,
770 (flags & EX_BOUNDZERO) ? 0 : ex->ex_start)
771 - 1;
772
773 #if 0
774 printf("newstart=%lx newend=%lx ex_start=%lx ex_end=%lx boundary=%lx dontcross=%lx\n",
775 newstart, newend, ex->ex_start, ex->ex_end,
776 boundary, dontcross);
777 #endif
778
779 /* Check for overflow */
780 if (dontcross < ex->ex_start)
781 dontcross = ex->ex_end;
782 else if (newend > dontcross) {
783 /*
784 * Candidate region crosses boundary.
785 * Throw away the leading part and see
786 * if we still fit.
787 */
788 newstart = dontcross + 1;
789 newend = newstart + (size - 1);
790 dontcross += boundary;
791 if (!LE_OV(newstart, size, rp->er_start))
792 goto skip;
793 }
794
795 /*
796 * If we run past the end of
797 * the extent or the boundary
798 * overflows, then the request
799 * can't fit.
800 */
801 if (newstart + size - 1 > ex->ex_end ||
802 dontcross < newstart)
803 goto fail;
804 }
805
806 /*
807 * We would fit into this space. Calculate
808 * the overhead (wasted space). If we exactly
809 * fit, or we're taking the first fit, insert
810 * ourselves into the region list.
811 */
812 ovh = rp->er_start - newstart - size;
813 if ((flags & EX_FAST) || (ovh == 0))
814 goto found;
815
816 /*
817 * Don't exactly fit, but check to see
818 * if we're better than any current choice.
819 */
820 if ((bestovh == 0) || (ovh < bestovh)) {
821 bestovh = ovh;
822 beststart = newstart;
823 bestlast = last;
824 }
825 }
826
827 skip:
828 /*
829 * Skip past the current region and check again.
830 */
831 newstart = EXTENT_ALIGN((rp->er_end + 1), alignment, skew);
832 if (newstart < rp->er_end) {
833 /*
834 * Overflow condition. Don't error out, since
835 * we might have a chunk of space that we can
836 * use.
837 */
838 goto fail;
839 }
840
841 last = rp;
842 }
843
844 /*
845 * The final check is from the current starting point to the
846 * end of the subregion. If there were no allocated regions,
847 * "newstart" is set to the beginning of the subregion, or
848 * just past the end of the last allocated region, adjusted
849 * for alignment in either case.
850 */
851 if (LE_OV(newstart, (size - 1), subend)) {
852 /*
853 * Do a boundary check, if necessary. Note
854 * that a region may *begin* on the boundary,
855 * but it must end before the boundary.
856 */
857 if (boundary) {
858 newend = newstart + (size - 1);
859
860 /*
861 * Calculate the next boundary after the start
862 * of this region.
863 */
864 dontcross = EXTENT_ALIGN(newstart+1, boundary,
865 (flags & EX_BOUNDZERO) ? 0 : ex->ex_start)
866 - 1;
867
868 #if 0
869 printf("newstart=%lx newend=%lx ex_start=%lx ex_end=%lx boundary=%lx dontcross=%lx\n",
870 newstart, newend, ex->ex_start, ex->ex_end,
871 boundary, dontcross);
872 #endif
873
874 /* Check for overflow */
875 if (dontcross < ex->ex_start)
876 dontcross = ex->ex_end;
877 else if (newend > dontcross) {
878 /*
879 * Candidate region crosses boundary.
880 * Throw away the leading part and see
881 * if we still fit.
882 */
883 newstart = dontcross + 1;
884 newend = newstart + (size - 1);
885 dontcross += boundary;
886 if (!LE_OV(newstart, (size - 1), subend))
887 goto fail;
888 }
889
890 /*
891 * If we run past the end of
892 * the extent or the boundary
893 * overflows, then the request
894 * can't fit.
895 */
896 if (newstart + size - 1 > ex->ex_end ||
897 dontcross < newstart)
898 goto fail;
899 }
900
901 /*
902 * We would fit into this space. Calculate
903 * the overhead (wasted space). If we exactly
904 * fit, or we're taking the first fit, insert
905 * ourselves into the region list.
906 */
907 ovh = exend - newstart - (size - 1);
908 if ((flags & EX_FAST) || (ovh == 0))
909 goto found;
910
911 /*
912 * Don't exactly fit, but check to see
913 * if we're better than any current choice.
914 */
915 if ((bestovh == 0) || (ovh < bestovh)) {
916 bestovh = ovh;
917 beststart = newstart;
918 bestlast = last;
919 }
920 }
921
922 fail:
923 /*
924 * One of the following two conditions have
925 * occurred:
926 *
927 * There is no chunk large enough to hold the request.
928 *
929 * If EX_FAST was not specified, there is not an
930 * exact match for the request.
931 *
932 * Note that if we reach this point and EX_FAST is
933 * set, then we know there is no space in the extent for
934 * the request.
935 */
936 if (((flags & EX_FAST) == 0) && (bestovh != 0)) {
937 /*
938 * We have a match that's "good enough".
939 */
940 newstart = beststart;
941 last = bestlast;
942 goto found;
943 }
944
945 /*
946 * No space currently available. Wait for it to free up,
947 * if possible.
948 */
949 if (flags & EX_WAITSPACE) {
950 if ((flags & EX_CATCH) != 0) {
951 error = cv_wait_sig(&ex->ex_cv, &ex->ex_lock);
952 } else {
953 cv_wait(&ex->ex_cv, &ex->ex_lock);
954 error = 0;
955 }
956 if (error == 0)
957 goto alloc_start;
958 mutex_exit(&ex->ex_lock);
959 } else {
960 mutex_exit(&ex->ex_lock);
961 error = EAGAIN;
962 }
963
964 extent_free_region_descriptor(ex, myrp);
965 return error;
966
967 found:
968 /*
969 * Insert ourselves into the region list.
970 */
971 extent_insert_and_optimize(ex, newstart, size, flags, last, myrp);
972 mutex_exit(&ex->ex_lock);
973 *result = newstart;
974 return (0);
975 }
976
977 int
978 extent_alloc_subregion(struct extent *ex, u_long start, u_long end, u_long size,
979 u_long alignment, u_long boundary, int flags, u_long *result)
980 {
981
982 return (extent_alloc_subregion1(ex, start, end, size, alignment,
983 0, boundary, flags, result));
984 }
985
986 int
987 extent_alloc(struct extent *ex, u_long size, u_long alignment, u_long boundary,
988 int flags, u_long *result)
989 {
990
991 return (extent_alloc_subregion1(ex, ex->ex_start, ex->ex_end,
992 size, alignment, 0, boundary,
993 flags, result));
994 }
995
996 int
997 extent_alloc1(struct extent *ex, u_long size, u_long alignment, u_long skew,
998 u_long boundary, int flags, u_long *result)
999 {
1000
1001 return (extent_alloc_subregion1(ex, ex->ex_start, ex->ex_end,
1002 size, alignment, skew, boundary,
1003 flags, result));
1004 }
1005
1006 int
1007 extent_free(struct extent *ex, u_long start, u_long size, int flags)
1008 {
1009 struct extent_region *rp, *nrp = NULL;
1010 u_long end = start + (size - 1);
1011 int coalesce;
1012
1013 #ifdef DIAGNOSTIC
1014 /*
1015 * Check arguments.
1016 *
1017 * We don't lock to check these, because these values
1018 * are never modified, and if another thread deletes the
1019 * extent, we're screwed anyway.
1020 */
1021 if (ex == NULL)
1022 panic("extent_free: NULL extent");
1023 if ((start < ex->ex_start) || (end > ex->ex_end)) {
1024 extent_print(ex);
1025 printf("extent_free: extent `%s', start 0x%lx, size 0x%lx\n",
1026 ex->ex_name, start, size);
1027 panic("extent_free: extent `%s', region not within extent",
1028 ex->ex_name);
1029 }
1030 /* Check for an overflow. */
1031 if (end < start) {
1032 extent_print(ex);
1033 printf("extent_free: extent `%s', start 0x%lx, size 0x%lx\n",
1034 ex->ex_name, start, size);
1035 panic("extent_free: overflow");
1036 }
1037 #endif
1038
1039 /*
1040 * If we're allowing coalescing, we must allocate a region
1041 * descriptor now, since it might block.
1042 *
1043 * XXX Make a static, create-time flags word, so we don't
1044 * XXX have to lock to read it!
1045 */
1046 mutex_enter(&ex->ex_lock);
1047 coalesce = (ex->ex_flags & EXF_NOCOALESCE) == 0;
1048 mutex_exit(&ex->ex_lock);
1049
1050 if (coalesce) {
1051 /* Allocate a region descriptor. */
1052 nrp = extent_alloc_region_descriptor(ex, flags);
1053 if (nrp == NULL)
1054 return (ENOMEM);
1055 }
1056
1057 mutex_enter(&ex->ex_lock);
1058
1059 /*
1060 * Find region and deallocate. Several possibilities:
1061 *
1062 * 1. (start == er_start) && (end == er_end):
1063 * Free descriptor.
1064 *
1065 * 2. (start == er_start) && (end < er_end):
1066 * Adjust er_start.
1067 *
1068 * 3. (start > er_start) && (end == er_end):
1069 * Adjust er_end.
1070 *
1071 * 4. (start > er_start) && (end < er_end):
1072 * Fragment region. Requires descriptor alloc.
1073 *
1074 * Cases 2, 3, and 4 require that the EXF_NOCOALESCE flag
1075 * is not set.
1076 */
1077 LIST_FOREACH(rp, &ex->ex_regions, er_link) {
1078 /*
1079 * Save ourselves some comparisons; does the current
1080 * region end before chunk to be freed begins? If so,
1081 * then we haven't found the appropriate region descriptor.
1082 */
1083 if (rp->er_end < start)
1084 continue;
1085
1086 /*
1087 * Save ourselves some traversal; does the current
1088 * region begin after the chunk to be freed ends? If so,
1089 * then we've already passed any possible region descriptors
1090 * that might have contained the chunk to be freed.
1091 */
1092 if (rp->er_start > end)
1093 break;
1094
1095 /* Case 1. */
1096 if ((start == rp->er_start) && (end == rp->er_end)) {
1097 LIST_REMOVE(rp, er_link);
1098 extent_free_region_descriptor(ex, rp);
1099 goto done;
1100 }
1101
1102 /*
1103 * The following cases all require that EXF_NOCOALESCE
1104 * is not set.
1105 */
1106 if (!coalesce)
1107 continue;
1108
1109 /* Case 2. */
1110 if ((start == rp->er_start) && (end < rp->er_end)) {
1111 rp->er_start = (end + 1);
1112 goto done;
1113 }
1114
1115 /* Case 3. */
1116 if ((start > rp->er_start) && (end == rp->er_end)) {
1117 rp->er_end = (start - 1);
1118 goto done;
1119 }
1120
1121 /* Case 4. */
1122 if ((start > rp->er_start) && (end < rp->er_end)) {
1123 /* Fill in new descriptor. */
1124 nrp->er_start = end + 1;
1125 nrp->er_end = rp->er_end;
1126
1127 /* Adjust current descriptor. */
1128 rp->er_end = start - 1;
1129
1130 /* Insert new descriptor after current. */
1131 LIST_INSERT_AFTER(rp, nrp, er_link);
1132
1133 /* We used the new descriptor, so don't free it below */
1134 nrp = NULL;
1135 goto done;
1136 }
1137 }
1138
1139 /* Region not found, or request otherwise invalid. */
1140 mutex_exit(&ex->ex_lock);
1141 extent_print(ex);
1142 printf("extent_free: start 0x%lx, end 0x%lx\n", start, end);
1143 panic("extent_free: region not found");
1144
1145 done:
1146 if (nrp != NULL)
1147 extent_free_region_descriptor(ex, nrp);
1148 cv_broadcast(&ex->ex_cv);
1149 mutex_exit(&ex->ex_lock);
1150 return (0);
1151 }
1152
1153 void
1154 extent_print(struct extent *ex)
1155 {
1156 struct extent_region *rp;
1157
1158 if (ex == NULL)
1159 panic("extent_print: NULL extent");
1160
1161 mutex_enter(&ex->ex_lock);
1162
1163 printf("extent `%s' (0x%lx - 0x%lx), flags = 0x%x\n", ex->ex_name,
1164 ex->ex_start, ex->ex_end, ex->ex_flags);
1165
1166 LIST_FOREACH(rp, &ex->ex_regions, er_link)
1167 printf(" 0x%lx - 0x%lx\n", rp->er_start, rp->er_end);
1168
1169 mutex_exit(&ex->ex_lock);
1170 }
1171