subr_extent.c revision 1.3 1 /* $NetBSD: subr_extent.c,v 1.3 1996/07/25 20:41:48 thorpej Exp $ */
2
3 /*-
4 * Copyright (c) 1996 The NetBSD Foundation, Inc.
5 * All rights reserved.
6 *
7 * This code is derived from software contributed to The NetBSD Foundation
8 * by Jason R. Thorpe and Matthias Drochner.
9 *
10 * Redistribution and use in source and binary forms, with or without
11 * modification, are permitted provided that the following conditions
12 * are met:
13 * 1. Redistributions of source code must retain the above copyright
14 * notice, this list of conditions and the following disclaimer.
15 * 2. Redistributions in binary form must reproduce the above copyright
16 * notice, this list of conditions and the following disclaimer in the
17 * documentation and/or other materials provided with the distribution.
18 * 3. All advertising materials mentioning features or use of this software
19 * must display the following acknowledgement:
20 * This product includes software developed by the NetBSD
21 * Foundation, Inc. and its contributors.
22 * 4. Neither the name of The NetBSD Foundation nor the names of its
23 * contributors may be used to endorse or promote products derived
24 * from this software without specific prior written permission.
25 *
26 * THIS SOFTWARE IS PROVIDED BY THE NETBSD FOUNDATION, INC. AND CONTRIBUTORS
27 * ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED
28 * TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
29 * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE
30 * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
31 * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
32 * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
33 * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
34 * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
35 * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
36 * POSSIBILITY OF SUCH DAMAGE.
37 */
38
39 /*
40 * General purpose extent manager.
41 */
42
43 #include <sys/param.h>
44 #include <sys/extent.h>
45 #include <sys/malloc.h>
46 #include <sys/time.h>
47 #include <sys/systm.h>
48 #include <sys/proc.h>
49
50 static int extent_insert_and_optimize __P((struct extent *, u_long, u_long,
51 int, struct extent_region *));
52 static struct extent_region *extent_alloc_region_descriptor
53 __P((struct extent *, int));
54 static void extent_free_region_descriptor __P((struct extent *,
55 struct extent_region *));
56
57 /*
58 * Allocate and initialize an extent map.
59 */
60 struct extent *
61 extent_create(name, start, end, mtype, storage, storagesize, flags)
62 char *name;
63 u_long start, end;
64 int mtype;
65 caddr_t storage;
66 size_t storagesize;
67 int flags;
68 {
69 struct extent *ex;
70 caddr_t cp = storage;
71 size_t sz = storagesize;
72 struct extent_region *rp;
73 int fixed_extent = (storage != NULL);
74
75 /* Check arguments. */
76 if (name == NULL)
77 panic("extent_create: name == NULL");
78 if (end < start) {
79 printf("extent_create: extent `%s', start 0x%lx, end 0x%lx\n",
80 name, start, end);
81 panic("extent_create: end < start");
82 }
83 if (fixed_extent && (storagesize < sizeof(struct extent_fixed)))
84 panic("extent_create: fixed extent, bad storagesize 0x%x",
85 storagesize);
86
87 /* Allocate extent descriptor. */
88 if (fixed_extent) {
89 struct extent_fixed *fex;
90
91 bzero(storage, storagesize);
92
93 /*
94 * Align all descriptors on "long" boundaries.
95 */
96 fex = (struct extent_fixed *)cp;
97 ex = (struct extent *)fex;
98 cp += EXTENT_ALIGN(sizeof(struct extent_fixed), sizeof(long));
99 sz -= EXTENT_ALIGN(sizeof(struct extent_fixed), sizeof(long));
100 fex->fex_storage = storage;
101 fex->fex_storagesize = storagesize;
102
103 /*
104 * In a fixed extent, we have to pre-allocate region
105 * descriptors and place them in the extent's freelist.
106 */
107 LIST_INIT(&fex->fex_freelist);
108 while (sz >= sizeof(struct extent_region)) {
109 rp = (struct extent_region *)cp;
110 cp += EXTENT_ALIGN(sizeof(struct extent_region),
111 sizeof(long));
112 sz -= EXTENT_ALIGN(sizeof(struct extent_region),
113 sizeof(long));
114 LIST_INSERT_HEAD(&fex->fex_freelist, rp, er_link);
115 }
116 } else {
117 ex = (struct extent *)malloc(sizeof(struct extent),
118 mtype, (flags & EX_WAITOK) ? M_WAITOK : M_NOWAIT);
119 if (ex == NULL)
120 return (NULL);
121 }
122
123 /* Fill in the extent descriptor and return it to the caller. */
124 LIST_INIT(&ex->ex_regions);
125 ex->ex_name = name;
126 ex->ex_start = start;
127 ex->ex_end = end;
128 ex->ex_mtype = mtype;
129 ex->ex_flags = 0;
130 if (fixed_extent)
131 ex->ex_flags |= EXF_FIXED;
132 if (flags & EX_NOBLOB)
133 ex->ex_flags |= EXF_NOBLOB;
134 return (ex);
135 }
136
137 /*
138 * Destroy an extent map.
139 */
140 void
141 extent_destroy(ex)
142 struct extent *ex;
143 {
144 struct extent_region *rp, *orp;
145 int mtype;
146
147 /* Check arguments. */
148 if (ex == NULL)
149 panic("extent_destroy: NULL extent");
150
151 mtype = ex->ex_mtype;
152
153 /* Free all region descriptors in extent. */
154 for (rp = ex->ex_regions.lh_first; rp != NULL; ) {
155 orp = rp;
156 rp = rp->er_link.le_next;
157 LIST_REMOVE(orp, er_link);
158 extent_free_region_descriptor(ex, orp);
159 }
160
161 /* If we're not a fixed extent, free the extent descriptor itself. */
162 if ((ex->ex_flags & EXF_FIXED) == 0)
163 free(ex, mtype);
164 }
165
166 /*
167 * Insert a region descriptor into the sorted region list after the
168 * entry "after" or at the head of the list (if "after" is NULL).
169 */
170 static int
171 extent_insert_and_optimize(ex, start, size, flags, after)
172 struct extent *ex;
173 u_long start, size;
174 int flags;
175 struct extent_region *after;
176 {
177 struct extent_region *rp;
178 int appended = 0;
179
180 if (after == NULL) {
181 /*
182 * We're the first in the region list. If there's
183 * a region after us, attempt to coalesce to save
184 * descriptor overhead.
185 */
186 if (((ex->ex_flags & EXF_NOBLOB) == 0) &&
187 (ex->ex_regions.lh_first != NULL) &&
188 ((start + size) == ex->ex_regions.lh_first->er_start)) {
189 /*
190 * We can coalesce. Prepend us to the first region.
191 */
192 ex->ex_regions.lh_first->er_start = start;
193 return (0);
194 }
195
196 /*
197 * Can't coalesce. Allocate a region descriptor, fill it
198 * in, and insert us at the head of the region list.
199 */
200 rp = extent_alloc_region_descriptor(ex, flags);
201 if (rp == NULL) {
202 #if defined(DEBUG) || defined(DIAGNOSTIC)
203 printf("extent `%s': can't alloc region descriptor.\n",
204 ex->ex_name);
205 #endif
206 return (ENOMEM);
207 }
208 rp->er_start = start;
209 rp->er_end = start + (size - 1);
210 LIST_INSERT_HEAD(&ex->ex_regions, rp, er_link);
211 return (0);
212 }
213
214 /*
215 * If EXF_NOBLOB is set, coalescing is disallowed.
216 */
217 if (ex->ex_flags & EXF_NOBLOB)
218 goto allocate_region_descriptor;
219
220 /*
221 * Attempt to coalesce with the region before us.
222 */
223 if ((after->er_end + 1) == start) {
224 /*
225 * We can coalesce. Append ourselves and make
226 * note of it.
227 */
228 after->er_end = start + (size - 1);
229 appended = 1;
230 }
231
232 /*
233 * Attempt to coalesce with the region after us.
234 */
235 if ((after->er_link.le_next != NULL) &&
236 ((start + size) == after->er_link.le_next->er_start)) {
237 /*
238 * We can coalesce. Note that if we appended ourselves
239 * to the previous region, we exactly fit the gap, and
240 * can free the "next" region descriptor.
241 */
242 if (appended) {
243 /*
244 * Yup, we can free it up.
245 */
246 after->er_end = after->er_link.le_next->er_end;
247 LIST_REMOVE(after->er_link.le_next, er_link);
248 extent_free_region_descriptor(ex,
249 after->er_link.le_next);
250 } else {
251 /*
252 * Nope, just prepend us to the next region.
253 */
254 after->er_link.le_next->er_start = start;
255 }
256 return (0);
257 }
258
259 /*
260 * We weren't able to coalesce with the next region, but
261 * we don't need to allocate a region descriptor if we
262 * appended ourselves to the previous region.
263 */
264 if (appended)
265 return (0);
266
267 allocate_region_descriptor:
268
269 /*
270 * Allocate a region descriptor and insert ourselves
271 * into the region list.
272 */
273 rp = extent_alloc_region_descriptor(ex, flags);
274 if (rp == NULL) {
275 #if defined(DEBUG) || defined(DIAGNOSTIC)
276 printf("extent `%s': can't allocate region descriptor.\n",
277 ex->ex_name);
278 #endif
279 return (ENOMEM);
280 }
281 rp->er_start = start;
282 rp->er_end = start + (size - 1);
283 LIST_INSERT_AFTER(after, rp, er_link);
284 return (0);
285 }
286
287 /*
288 * Allocate a specific region in an extent map.
289 */
290 int
291 extent_alloc_region(ex, start, size, flags)
292 struct extent *ex;
293 u_long start, size;
294 int flags;
295 {
296 struct extent_region *rp, *last;
297 u_long end = start + (size - 1);
298 int error;
299
300 /* Check arguments. */
301 if (ex == NULL)
302 panic("extent_alloc_region: NULL extent");
303 if (size < 1) {
304 printf("extent_alloc_region: extent `%s', size 0x%lx\n",
305 ex->ex_name, size);
306 panic("extent_alloc_region: bad size");
307 }
308 if (end < start) {
309 printf(
310 "extent_alloc_region: extent `%s', start 0x%lx, size 0x%lx\n",
311 ex->ex_name, start, size);
312 panic("extent_alloc_region: overflow");
313 }
314 /*
315 * Make sure the requested region lies within the
316 * extent.
317 */
318 if ((start < ex->ex_start) || (end > ex->ex_end)) {
319 printf("extent_alloc_region: extent `%s' (0x%lx - 0x%lx)\n",
320 ex->ex_name, ex->ex_start, ex->ex_end);
321 printf("extent_alloc_region: start 0x%lx, end 0x%lx\n",
322 start, end);
323 panic("extent_alloc_region: region lies outside extent");
324 }
325
326 alloc_start:
327 /*
328 * Attempt to place ourselves in the desired area of the
329 * extent. We save ourselves some work by keeping the list sorted.
330 * In other words, if the start of the current region is greater
331 * than the end of our region, we don't have to search any further.
332 */
333
334 /*
335 * Keep a pointer to the last region we looked at so
336 * that we don't have to traverse the list again when
337 * we insert ourselves. If "last" is NULL when we
338 * finally insert ourselves, we go at the head of the
339 * list. See extent_insert_and_optimize() for details.
340 */
341 last = NULL;
342
343 for (rp = ex->ex_regions.lh_first; rp != NULL;
344 rp = rp->er_link.le_next) {
345 if (rp->er_start > end) {
346 /*
347 * We lie before this region and don't
348 * conflict.
349 */
350 break;
351 }
352
353 /*
354 * The current region begins before we end.
355 * Check for a conflict.
356 */
357 if (rp->er_end >= start) {
358 /*
359 * We conflict. If we can wait, do so.
360 */
361 if (flags & EX_WAITOK) {
362 ex->ex_flags |= EXF_WANTED;
363 error = tsleep(ex,
364 PRIBIO | ((flags & EX_CATCH) ? PCATCH : 0),
365 "extnt", 0);
366 if (error)
367 return (error);
368 goto alloc_start;
369 }
370 return (EAGAIN);
371 }
372
373 /*
374 * We don't conflict, but this region lies before
375 * us. Keep a pointer to this region, and keep
376 * trying.
377 */
378 last = rp;
379 }
380
381 /*
382 * We don't conflict with any regions. "last" points
383 * to the region we fall after, or is NULL if we belong
384 * at the beginning of the region list. Insert ourselves.
385 */
386 return (extent_insert_and_optimize(ex, start, size, flags, last));
387 }
388
389 /*
390 * Macro to check (x + y) <= z. This check is designed to fail
391 * if an overflow occurs.
392 */
393 #define LE_OV(x, y, z) ((((x) + (y)) >= (x)) && (((x) + (y)) <= (z)))
394
395 /*
396 * Allocate a region in an extent map subregion.
397 *
398 * If EX_FAST is specified, we return the first fit in the map.
399 * Otherwise, we try to minimize fragmentation by finding the
400 * smallest gap that will hold the request.
401 *
402 * The allocated region is aligned to "alignment", which must be
403 * a power of 2.
404 */
405 int
406 extent_alloc_subregion(ex, substart, subend, size, alignment, boundary,
407 flags, result)
408 struct extent *ex;
409 u_long substart, subend, size, alignment, boundary;
410 int flags;
411 u_long *result;
412 {
413 struct extent_region *rp, *last, *bestlast;
414 u_long newstart, newend, beststart, bestovh, ovh;
415 u_long dontcross, odontcross;
416 int error;
417
418 /* Check arguments. */
419 if (ex == NULL)
420 panic("extent_alloc_subregion: NULL extent");
421 if (result == NULL)
422 panic("extent_alloc_subregion: NULL result pointer");
423 if ((substart < ex->ex_start) || (substart >= ex->ex_end) ||
424 (subend > ex->ex_end) || (subend <= ex->ex_start)) {
425 printf("extent_alloc_subregion: extent `%s', ex_start 0x%lx, ex_end 0x%lx\n",
426 ex->ex_name, ex->ex_start, ex->ex_end);
427 printf("extent_alloc_subregion: substart 0x%lx, subend 0x%lx\n",
428 substart, subend);
429 panic("extent_alloc_subregion: bad subregion");
430 }
431 if ((size < 1) || (size > ((subend - substart) + 1))) {
432 printf("extent_alloc_subregion: extent `%s', size 0x%lx\n",
433 ex->ex_name, size);
434 panic("extent_alloc_subregion: bad size");
435 }
436 if (alignment == 0)
437 panic("extent_alloc_subregion: bad alignment");
438 if (boundary && (boundary < size)) {
439 printf(
440 "extent_alloc_subregion: extent `%s', size 0x%lx,
441 boundary 0x%lx\n", ex->ex_name, size, boundary);
442 panic("extent_alloc_subregion: bad boundary");
443 }
444
445 alloc_start:
446 /*
447 * Keep a pointer to the last region we looked at so
448 * that we don't have to traverse the list again when
449 * we insert ourselves. If "last" is NULL when we
450 * finally insert ourselves, we go at the head of the
451 * list. See extent_insert_and_optimize() for deatails.
452 */
453 last = NULL;
454
455 /*
456 * Initialize the "don't cross" boundary, a.k.a a line
457 * that a region should not cross. If the boundary lies
458 * before the region starts, we add the "boundary" argument
459 * until we get a meaningful comparison.
460 */
461 dontcross = 0;
462 if (boundary) {
463 dontcross = ex->ex_start + boundary;
464 while (dontcross < substart)
465 dontcross += boundary;
466 }
467
468 /*
469 * Keep track of size and location of the smallest
470 * chunk we fit in.
471 *
472 * Since the extent can be as large as the numeric range
473 * of the CPU (0 - 0xffffffff for 32-bit systems), the
474 * best overhead value can be the maximum unsigned integer.
475 * Thus, we initialize "bestovh" to 0, since we insert ourselves
476 * into the region list immediately on an exact match (which
477 * is the only case where "bestovh" would be set to 0).
478 */
479 bestovh = 0;
480 beststart = 0;
481 bestlast = NULL;
482
483 /*
484 * For N allocated regions, we must make (N + 1)
485 * checks for unallocated space. The first chunk we
486 * check is the area from the beginning of the subregion
487 * to the first allocated region.
488 */
489 newstart = EXTENT_ALIGN(substart, alignment);
490 if (newstart < ex->ex_start) {
491 printf(
492 "extent_alloc_subregion: extent `%s' (0x%lx - 0x%lx), alignment 0x%lx\n",
493 ex->ex_name, ex->ex_start, ex->ex_end, alignment);
494 panic("extent_alloc_subregion: overflow after alignment");
495 }
496
497 for (rp = ex->ex_regions.lh_first; rp != NULL;
498 rp = rp->er_link.le_next) {
499 /*
500 * Check the chunk before "rp". Note that our
501 * comparison is safe from overflow conditions.
502 */
503 if (LE_OV(newstart, size, rp->er_start)) {
504 /*
505 * Do a boundary check, if necessary. Note
506 * that a region may *begin* on the boundary,
507 * but it must end before the boundary.
508 */
509 if (boundary) {
510 newend = newstart + (size - 1);
511
512 /*
513 * Adjust boundary for a meaningful
514 * comparison.
515 */
516 while (dontcross <= newstart) {
517 odontcross = dontcross;
518 dontcross += boundary;
519
520 /*
521 * If we run past the end of
522 * the extent or the boundary
523 * overflows, then the request
524 * can't fit.
525 */
526 if ((dontcross > ex->ex_end) ||
527 (dontcross < odontcross))
528 goto fail;
529 }
530
531 /* Do the boundary check. */
532 if (newend >= dontcross) {
533 /*
534 * Candidate region crosses
535 * boundary. Try again.
536 */
537 continue;
538 }
539 }
540
541 /*
542 * We would fit into this space. Calculate
543 * the overhead (wasted space). If we exactly
544 * fit, or we're taking the first fit, insert
545 * ourselves into the region list.
546 */
547 ovh = rp->er_start - newstart - size;
548 if ((flags & EX_FAST) || (ovh == 0))
549 goto found;
550
551 /*
552 * Don't exactly fit, but check to see
553 * if we're better than any current choice.
554 */
555 if ((bestovh == 0) || (ovh < bestovh)) {
556 bestovh = ovh;
557 beststart = newstart;
558 bestlast = last;
559 }
560 }
561
562 /*
563 * Skip past the current region and check again.
564 */
565 newstart = EXTENT_ALIGN((rp->er_end + 1), alignment);
566 if (newstart < rp->er_end) {
567 /*
568 * Overflow condition. Don't error out, since
569 * we might have a chunk of space that we can
570 * use.
571 */
572 goto fail;
573 }
574
575 last = rp;
576 }
577
578 /*
579 * The final check is from the current starting point to the
580 * end of the subregion. If there were no allocated regions,
581 * "newstart" is set to the beginning of the subregion, or
582 * just past the end of the last allocated region, adjusted
583 * for alignment in either case.
584 */
585 if (LE_OV(newstart, (size - 1), subend)) {
586 /*
587 * We would fit into this space. Calculate
588 * the overhead (wasted space). If we exactly
589 * fit, or we're taking the first fit, insert
590 * ourselves into the region list.
591 */
592 ovh = ex->ex_end - newstart - (size - 1);
593 if ((flags & EX_FAST) || (ovh == 0))
594 goto found;
595
596 /*
597 * Don't exactly fit, but check to see
598 * if we're better than any current choice.
599 */
600 if ((bestovh == 0) || (ovh < bestovh)) {
601 bestovh = ovh;
602 beststart = newstart;
603 bestlast = last;
604 }
605 }
606
607 fail:
608 /*
609 * One of the following two conditions have
610 * occurred:
611 *
612 * There is no chunk large enough to hold the request.
613 *
614 * If EX_FAST was not specified, there is not an
615 * exact match for the request.
616 *
617 * Note that if we reach this point and EX_FAST is
618 * set, then we know there is no space in the extent for
619 * the request.
620 */
621 if (((flags & EX_FAST) == 0) && (bestovh != 0)) {
622 /*
623 * We have a match that's "good enough".
624 */
625 newstart = beststart;
626 last = bestlast;
627 goto found;
628 }
629
630 /*
631 * No space currently available. Wait for it to free up,
632 * if possible.
633 */
634 if (flags & EX_WAITOK) {
635 ex->ex_flags |= EXF_WANTED;
636 error = tsleep(ex,
637 PRIBIO | ((flags & EX_CATCH) ? PCATCH : 0), "extnt", 0);
638 if (error)
639 return (error);
640 goto alloc_start;
641 }
642
643 return (EAGAIN);
644
645 found:
646 /*
647 * Insert ourselves into the region list.
648 */
649 error = extent_insert_and_optimize(ex, newstart, size, flags, last);
650 if (error == 0)
651 *result = newstart;
652 return (error);
653 }
654
655 int
656 extent_free(ex, start, size, flags)
657 struct extent *ex;
658 u_long start, size;
659 int flags;
660 {
661 struct extent_region *rp;
662 u_long end = start + (size - 1);
663
664 /* Check arguments. */
665 if (ex == NULL)
666 panic("extent_free: NULL extent");
667 if ((start < ex->ex_start) || (start > ex->ex_end)) {
668 extent_print(ex);
669 printf("extent_free: extent `%s', start 0x%lx, size 0x%lx\n",
670 ex->ex_name, start, size);
671 panic("extent_free: extent `%s', region not within extent",
672 ex->ex_name);
673 }
674 /* Check for an overflow. */
675 if (end < start) {
676 extent_print(ex);
677 printf("extent_free: extent `%s', start 0x%lx, size 0x%lx\n",
678 ex->ex_name, start, size);
679 panic("extent_free: overflow");
680 }
681
682 /*
683 * Find region and deallocate. Several possibilities:
684 *
685 * 1. (start == er_start) && (end == er_end):
686 * Free descriptor.
687 *
688 * 2. (start == er_start) && (end < er_end):
689 * Adjust er_start.
690 *
691 * 3. (start > er_start) && (end == er_end):
692 * Adjust er_end.
693 *
694 * 4. (start > er_start) && (end < er_end):
695 * Fragment region. Requires descriptor alloc.
696 *
697 * Cases 2, 3, and 4 require that the EXF_NOBLOB flag
698 * is not set.
699 */
700 for (rp = ex->ex_regions.lh_first; rp != NULL;
701 rp = rp->er_link.le_next) {
702 /*
703 * Save ourselves some comparisons; does the current
704 * region end before chunk to be freed begins? If so,
705 * then we haven't found the appropriate region descriptor.
706 */
707 if (rp->er_end < start)
708 continue;
709
710 /*
711 * Save ourselves some traversal; does the current
712 * region begin after the chunk to be freed ends? If so,
713 * then we've already passed any possible region descriptors
714 * that might have contained the chunk to be freed.
715 */
716 if (rp->er_start > end)
717 break;
718
719 /* Case 1. */
720 if ((start == rp->er_start) && (end == rp->er_end)) {
721 LIST_REMOVE(rp, er_link);
722 extent_free_region_descriptor(ex, rp);
723 goto done;
724 }
725
726 /*
727 * The following cases all require that EXF_NOBLOB
728 * is not set.
729 */
730 if (ex->ex_flags & EXF_NOBLOB)
731 continue;
732
733 /* Case 2. */
734 if ((start == rp->er_start) && (end < rp->er_end)) {
735 rp->er_start = (end + 1);
736 goto done;
737 }
738
739 /* Case 3. */
740 if ((start > rp->er_start) && (end == rp->er_end)) {
741 rp->er_end = (start - 1);
742 goto done;
743 }
744
745 /* Case 4. */
746 if ((start > rp->er_start) && (end < rp->er_end)) {
747 struct extent_region *nrp;
748
749 /* Allocate a region descriptor. */
750 nrp = extent_alloc_region_descriptor(ex, flags);
751 if (nrp == NULL)
752 return (ENOMEM);
753
754 /* Fill in new descriptor. */
755 nrp->er_start = end + 1;
756 nrp->er_end = rp->er_end;
757
758 /* Adjust current descriptor. */
759 rp->er_end = start - 1;
760
761 /* Instert new descriptor after current. */
762 LIST_INSERT_AFTER(rp, nrp, er_link);
763 goto done;
764 }
765 }
766
767 /* Region not found, or request otherwise invalid. */
768 extent_print(ex);
769 printf("extent_free: start 0x%lx, end 0x%lx\n", start, end);
770 panic("extent_free: region not found");
771
772 done:
773 if (ex->ex_flags & EXF_WANTED) {
774 ex->ex_flags &= ~EXF_WANTED;
775 wakeup(ex);
776 }
777 return (0);
778 }
779
780 static struct extent_region *
781 extent_alloc_region_descriptor(ex, flags)
782 struct extent *ex;
783 int flags;
784 {
785 struct extent_region *rp;
786 int s;
787
788 if (ex->ex_flags & EXF_FIXED) {
789 struct extent_fixed *fex = (struct extent_fixed *)ex;
790
791 while (fex->fex_freelist.lh_first == NULL) {
792 if (flags & EX_MALLOCOK)
793 goto alloc;
794
795 if ((flags & EX_WAITOK) == 0)
796 return (NULL);
797 ex->ex_flags |= EXF_FLWANTED;
798 if (tsleep(&fex->fex_freelist,
799 PRIBIO | ((flags & EX_CATCH) ? PCATCH : 0),
800 "extnt", 0))
801 return (NULL);
802 }
803 /* Atomic. */
804 s = splhigh();
805 rp = fex->fex_freelist.lh_first;
806 LIST_REMOVE(rp, er_link);
807 splx(s);
808
809 rp->er_flags = 0;
810
811 return (rp);
812 }
813
814 alloc:
815 rp = (struct extent_region *)
816 malloc(sizeof(struct extent_region), ex->ex_mtype,
817 (flags & EX_WAITOK) ? M_WAITOK : M_NOWAIT);
818
819 rp->er_flags = ER_ALLOC;
820
821 return (rp);
822 }
823
824 static void
825 extent_free_region_descriptor(ex, rp)
826 struct extent *ex;
827 struct extent_region *rp;
828 {
829
830 if ((rp->er_flags & ER_ALLOC) == 0) {
831 struct extent_fixed *fex = (struct extent_fixed *)ex;
832
833 LIST_INSERT_HEAD(&fex->fex_freelist, rp, er_link);
834 if (ex->ex_flags & EXF_FLWANTED) {
835 ex->ex_flags &= ~EXF_FLWANTED;
836 wakeup(&fex->fex_freelist);
837 }
838 } else
839 free(rp, ex->ex_mtype);
840 }
841
842 void
843 extent_print(ex)
844 struct extent *ex;
845 {
846 struct extent_region *rp;
847
848 if (ex == NULL)
849 panic("extent_print: NULL extent");
850
851 printf("extent `%s' (0x%lx - 0x%lx), flags = 0x%x\n", ex->ex_name,
852 ex->ex_start, ex->ex_end, ex->ex_flags);
853
854 for (rp = ex->ex_regions.lh_first; rp != NULL;
855 rp = rp->er_link.le_next)
856 printf(" 0x%lx - 0x%lx\n", rp->er_start, rp->er_end);
857 }
858