subr_extent.c revision 1.13 1 /* $NetBSD: subr_extent.c,v 1.13 1998/07/15 12:38:29 pk Exp $ */
2
3 /*-
4 * Copyright (c) 1996, 1998 The NetBSD Foundation, Inc.
5 * All rights reserved.
6 *
7 * This code is derived from software contributed to The NetBSD Foundation
8 * by Jason R. Thorpe and Matthias Drochner.
9 *
10 * Redistribution and use in source and binary forms, with or without
11 * modification, are permitted provided that the following conditions
12 * are met:
13 * 1. Redistributions of source code must retain the above copyright
14 * notice, this list of conditions and the following disclaimer.
15 * 2. Redistributions in binary form must reproduce the above copyright
16 * notice, this list of conditions and the following disclaimer in the
17 * documentation and/or other materials provided with the distribution.
18 * 3. All advertising materials mentioning features or use of this software
19 * must display the following acknowledgement:
20 * This product includes software developed by the NetBSD
21 * Foundation, Inc. and its contributors.
22 * 4. Neither the name of The NetBSD Foundation nor the names of its
23 * contributors may be used to endorse or promote products derived
24 * from this software without specific prior written permission.
25 *
26 * THIS SOFTWARE IS PROVIDED BY THE NETBSD FOUNDATION, INC. AND CONTRIBUTORS
27 * ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED
28 * TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
29 * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE FOUNDATION OR CONTRIBUTORS
30 * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
31 * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
32 * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
33 * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
34 * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
35 * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
36 * POSSIBILITY OF SUCH DAMAGE.
37 */
38
39 /*
40 * General purpose extent manager.
41 */
42
43 #ifdef _KERNEL
44 #include <sys/param.h>
45 #include <sys/extent.h>
46 #include <sys/malloc.h>
47 #include <sys/time.h>
48 #include <sys/systm.h>
49 #include <sys/proc.h>
50 #include <sys/lock.h>
51 #else
52 /*
53 * user-land definitions, so it can fit into a testing harness.
54 */
55 #include <sys/param.h>
56 #include <sys/extent.h>
57 #include <errno.h>
58 #include <stdlib.h>
59 #include <stdio.h>
60
61 #define malloc(s, t, flags) malloc(s)
62 #define free(p, t) free(p)
63 #define tsleep(chan, pri, str, timo) (EWOULDBLOCK)
64 #define wakeup(chan) ((void)0)
65 #endif
66
67 static void extent_insert_and_optimize __P((struct extent *, u_long, u_long,
68 int, struct extent_region *, struct extent_region *));
69 static struct extent_region *extent_alloc_region_descriptor
70 __P((struct extent *, int));
71 static void extent_free_region_descriptor __P((struct extent *,
72 struct extent_region *));
73
74 /*
75 * Macro to align to an arbitrary power-of-two boundary.
76 */
77 #define EXTENT_ALIGN(_start, _align) \
78 (((_start) + ((_align) - 1)) & (-(_align)))
79
80 /*
81 * Allocate and initialize an extent map.
82 */
83 struct extent *
84 extent_create(name, start, end, mtype, storage, storagesize, flags)
85 const char *name;
86 u_long start, end;
87 int mtype;
88 caddr_t storage;
89 size_t storagesize;
90 int flags;
91 {
92 struct extent *ex;
93 caddr_t cp = storage;
94 size_t sz = storagesize;
95 struct extent_region *rp;
96 int fixed_extent = (storage != NULL);
97
98 #ifdef DIAGNOSTIC
99 /* Check arguments. */
100 if (name == NULL)
101 panic("extent_create: name == NULL");
102 if (end < start) {
103 printf("extent_create: extent `%s', start 0x%lx, end 0x%lx\n",
104 name, start, end);
105 panic("extent_create: end < start");
106 }
107 if (fixed_extent && (storagesize < sizeof(struct extent_fixed)))
108 panic("extent_create: fixed extent, bad storagesize 0x%x",
109 storagesize);
110 if (fixed_extent == 0 && (storagesize != 0 || storage != NULL))
111 panic("extent_create: storage provided for non-fixed");
112 #endif
113
114 /* Allocate extent descriptor. */
115 if (fixed_extent) {
116 struct extent_fixed *fex;
117
118 bzero(storage, storagesize);
119
120 /*
121 * Align all descriptors on "long" boundaries.
122 */
123 fex = (struct extent_fixed *)cp;
124 ex = (struct extent *)fex;
125 cp += ALIGN(sizeof(struct extent_fixed));
126 sz -= ALIGN(sizeof(struct extent_fixed));
127 fex->fex_storage = storage;
128 fex->fex_storagesize = storagesize;
129
130 /*
131 * In a fixed extent, we have to pre-allocate region
132 * descriptors and place them in the extent's freelist.
133 */
134 LIST_INIT(&fex->fex_freelist);
135 while (sz >= ALIGN(sizeof(struct extent_region))) {
136 rp = (struct extent_region *)cp;
137 cp += ALIGN(sizeof(struct extent_region));
138 sz -= ALIGN(sizeof(struct extent_region));
139 LIST_INSERT_HEAD(&fex->fex_freelist, rp, er_link);
140 }
141 } else {
142 ex = (struct extent *)malloc(sizeof(struct extent),
143 mtype, (flags & EX_WAITOK) ? M_WAITOK : M_NOWAIT);
144 if (ex == NULL)
145 return (NULL);
146 }
147
148 /* Fill in the extent descriptor and return it to the caller. */
149 simple_lock_init(&ex->ex_slock);
150 LIST_INIT(&ex->ex_regions);
151 ex->ex_name = name;
152 ex->ex_start = start;
153 ex->ex_end = end;
154 ex->ex_mtype = mtype;
155 ex->ex_flags = 0;
156 if (fixed_extent)
157 ex->ex_flags |= EXF_FIXED;
158 if (flags & EX_NOCOALESCE)
159 ex->ex_flags |= EXF_NOCOALESCE;
160 return (ex);
161 }
162
163 /*
164 * Destroy an extent map.
165 */
166 void
167 extent_destroy(ex)
168 struct extent *ex;
169 {
170 struct extent_region *rp, *orp;
171
172 #ifdef DIAGNOSTIC
173 /* Check arguments. */
174 if (ex == NULL)
175 panic("extent_destroy: NULL extent");
176 #endif
177
178 simple_lock(&ex->ex_slock);
179
180 /* Free all region descriptors in extent. */
181 for (rp = ex->ex_regions.lh_first; rp != NULL; ) {
182 orp = rp;
183 rp = rp->er_link.le_next;
184 LIST_REMOVE(orp, er_link);
185 extent_free_region_descriptor(ex, orp);
186 }
187
188 /* If we're not a fixed extent, free the extent descriptor itself. */
189 if ((ex->ex_flags & EXF_FIXED) == 0)
190 free(ex, ex->ex_mtype);
191 }
192
193 /*
194 * Insert a region descriptor into the sorted region list after the
195 * entry "after" or at the head of the list (if "after" is NULL).
196 * The region descriptor we insert is passed in "rp". We must
197 * allocate the region descriptor before calling this function!
198 * If we don't need the region descriptor, it will be freed here.
199 */
200 static void
201 extent_insert_and_optimize(ex, start, size, flags, after, rp)
202 struct extent *ex;
203 u_long start, size;
204 int flags;
205 struct extent_region *after, *rp;
206 {
207 struct extent_region *nextr;
208 int appended = 0;
209
210 if (after == NULL) {
211 /*
212 * We're the first in the region list. If there's
213 * a region after us, attempt to coalesce to save
214 * descriptor overhead.
215 */
216 if (((ex->ex_flags & EXF_NOCOALESCE) == 0) &&
217 (ex->ex_regions.lh_first != NULL) &&
218 ((start + size) == ex->ex_regions.lh_first->er_start)) {
219 /*
220 * We can coalesce. Prepend us to the first region.
221 */
222 ex->ex_regions.lh_first->er_start = start;
223 extent_free_region_descriptor(ex, rp);
224 return;
225 }
226
227 /*
228 * Can't coalesce. Fill in the region descriptor
229 * in, and insert us at the head of the region list.
230 */
231 rp->er_start = start;
232 rp->er_end = start + (size - 1);
233 LIST_INSERT_HEAD(&ex->ex_regions, rp, er_link);
234 return;
235 }
236
237 /*
238 * If EXF_NOCOALESCE is set, coalescing is disallowed.
239 */
240 if (ex->ex_flags & EXF_NOCOALESCE)
241 goto cant_coalesce;
242
243 /*
244 * Attempt to coalesce with the region before us.
245 */
246 if ((after->er_end + 1) == start) {
247 /*
248 * We can coalesce. Append ourselves and make
249 * note of it.
250 */
251 after->er_end = start + (size - 1);
252 appended = 1;
253 }
254
255 /*
256 * Attempt to coalesce with the region after us.
257 */
258 if ((after->er_link.le_next != NULL) &&
259 ((start + size) == after->er_link.le_next->er_start)) {
260 /*
261 * We can coalesce. Note that if we appended ourselves
262 * to the previous region, we exactly fit the gap, and
263 * can free the "next" region descriptor.
264 */
265 if (appended) {
266 /*
267 * Yup, we can free it up.
268 */
269 after->er_end = after->er_link.le_next->er_end;
270 nextr = after->er_link.le_next;
271 LIST_REMOVE(nextr, er_link);
272 extent_free_region_descriptor(ex, nextr);
273 } else {
274 /*
275 * Nope, just prepend us to the next region.
276 */
277 after->er_link.le_next->er_start = start;
278 }
279
280 extent_free_region_descriptor(ex, rp);
281 return;
282 }
283
284 /*
285 * We weren't able to coalesce with the next region, but
286 * we don't need to allocate a region descriptor if we
287 * appended ourselves to the previous region.
288 */
289 if (appended) {
290 extent_free_region_descriptor(ex, rp);
291 return;
292 }
293
294 cant_coalesce:
295
296 /*
297 * Fill in the region descriptor and insert ourselves
298 * into the region list.
299 */
300 rp->er_start = start;
301 rp->er_end = start + (size - 1);
302 LIST_INSERT_AFTER(after, rp, er_link);
303 }
304
305 /*
306 * Allocate a specific region in an extent map.
307 */
308 int
309 extent_alloc_region(ex, start, size, flags)
310 struct extent *ex;
311 u_long start, size;
312 int flags;
313 {
314 struct extent_region *rp, *last, *myrp;
315 u_long end = start + (size - 1);
316 int error;
317
318 #ifdef DIAGNOSTIC
319 /* Check arguments. */
320 if (ex == NULL)
321 panic("extent_alloc_region: NULL extent");
322 if (size < 1) {
323 printf("extent_alloc_region: extent `%s', size 0x%lx\n",
324 ex->ex_name, size);
325 panic("extent_alloc_region: bad size");
326 }
327 if (end < start) {
328 printf(
329 "extent_alloc_region: extent `%s', start 0x%lx, size 0x%lx\n",
330 ex->ex_name, start, size);
331 panic("extent_alloc_region: overflow");
332 }
333 #endif
334
335 /*
336 * Make sure the requested region lies within the
337 * extent.
338 *
339 * We don't lock to check the range, because those values
340 * are never modified, and if another thread deletes the
341 * extent, we're screwed anyway.
342 */
343 if ((start < ex->ex_start) || (end > ex->ex_end)) {
344 #ifdef DIAGNOSTIC
345 printf("extent_alloc_region: extent `%s' (0x%lx - 0x%lx)\n",
346 ex->ex_name, ex->ex_start, ex->ex_end);
347 printf("extent_alloc_region: start 0x%lx, end 0x%lx\n",
348 start, end);
349 panic("extent_alloc_region: region lies outside extent");
350 #else
351 return (EINVAL);
352 #endif
353 }
354
355 /*
356 * Allocate the region descriptor. It will be freed later
357 * if we can coalesce with another region. Don't lock before
358 * here! This could block.
359 */
360 myrp = extent_alloc_region_descriptor(ex, flags);
361 if (myrp == NULL) {
362 #ifdef DIAGNOSTIC
363 printf(
364 "extent_alloc_region: can't allocate region descriptor\n");
365 #endif
366 return (ENOMEM);
367 }
368
369 alloc_start:
370 simple_lock(&ex->ex_slock);
371
372 /*
373 * Attempt to place ourselves in the desired area of the
374 * extent. We save ourselves some work by keeping the list sorted.
375 * In other words, if the start of the current region is greater
376 * than the end of our region, we don't have to search any further.
377 */
378
379 /*
380 * Keep a pointer to the last region we looked at so
381 * that we don't have to traverse the list again when
382 * we insert ourselves. If "last" is NULL when we
383 * finally insert ourselves, we go at the head of the
384 * list. See extent_insert_and_optimize() for details.
385 */
386 last = NULL;
387
388 for (rp = ex->ex_regions.lh_first; rp != NULL;
389 rp = rp->er_link.le_next) {
390 if (rp->er_start > end) {
391 /*
392 * We lie before this region and don't
393 * conflict.
394 */
395 break;
396 }
397
398 /*
399 * The current region begins before we end.
400 * Check for a conflict.
401 */
402 if (rp->er_end >= start) {
403 /*
404 * We conflict. If we can (and want to) wait,
405 * do so.
406 */
407 if (flags & EX_WAITSPACE) {
408 ex->ex_flags |= EXF_WANTED;
409 simple_unlock(&ex->ex_slock);
410 error = tsleep(ex,
411 PRIBIO | ((flags & EX_CATCH) ? PCATCH : 0),
412 "extnt", 0);
413 if (error)
414 return (error);
415 goto alloc_start;
416 }
417 extent_free_region_descriptor(ex, myrp);
418 simple_unlock(&ex->ex_slock);
419 return (EAGAIN);
420 }
421 /*
422 * We don't conflict, but this region lies before
423 * us. Keep a pointer to this region, and keep
424 * trying.
425 */
426 last = rp;
427 }
428
429 /*
430 * We don't conflict with any regions. "last" points
431 * to the region we fall after, or is NULL if we belong
432 * at the beginning of the region list. Insert ourselves.
433 */
434 extent_insert_and_optimize(ex, start, size, flags, last, myrp);
435 simple_unlock(&ex->ex_slock);
436 return (0);
437 }
438
439 /*
440 * Macro to check (x + y) <= z. This check is designed to fail
441 * if an overflow occurs.
442 */
443 #define LE_OV(x, y, z) ((((x) + (y)) >= (x)) && (((x) + (y)) <= (z)))
444
445 /*
446 * Allocate a region in an extent map subregion.
447 *
448 * If EX_FAST is specified, we return the first fit in the map.
449 * Otherwise, we try to minimize fragmentation by finding the
450 * smallest gap that will hold the request.
451 *
452 * The allocated region is aligned to "alignment", which must be
453 * a power of 2.
454 */
455 int
456 extent_alloc_subregion(ex, substart, subend, size, alignment, boundary,
457 flags, result)
458 struct extent *ex;
459 u_long substart, subend, size, alignment, boundary;
460 int flags;
461 u_long *result;
462 {
463 struct extent_region *rp, *myrp, *last, *bestlast;
464 u_long newstart, newend, beststart, bestovh, ovh;
465 u_long dontcross, odontcross;
466 int error;
467
468 #ifdef DIAGNOSTIC
469 /*
470 * Check arguments.
471 *
472 * We don't lock to check these, because these values
473 * are never modified, and if another thread deletes the
474 * extent, we're screwed anyway.
475 */
476 if (ex == NULL)
477 panic("extent_alloc_subregion: NULL extent");
478 if (result == NULL)
479 panic("extent_alloc_subregion: NULL result pointer");
480 if ((substart < ex->ex_start) || (substart > ex->ex_end) ||
481 (subend > ex->ex_end) || (subend < ex->ex_start)) {
482 printf("extent_alloc_subregion: extent `%s', ex_start 0x%lx, ex_end 0x%lx\n",
483 ex->ex_name, ex->ex_start, ex->ex_end);
484 printf("extent_alloc_subregion: substart 0x%lx, subend 0x%lx\n",
485 substart, subend);
486 panic("extent_alloc_subregion: bad subregion");
487 }
488 if ((size < 1) || ((size - 1) > (subend - substart))) {
489 printf("extent_alloc_subregion: extent `%s', size 0x%lx\n",
490 ex->ex_name, size);
491 panic("extent_alloc_subregion: bad size");
492 }
493 if (alignment == 0)
494 panic("extent_alloc_subregion: bad alignment");
495 if (boundary && (boundary < size)) {
496 printf(
497 "extent_alloc_subregion: extent `%s', size 0x%lx,
498 boundary 0x%lx\n", ex->ex_name, size, boundary);
499 panic("extent_alloc_subregion: bad boundary");
500 }
501 #endif
502
503 /*
504 * Allocate the region descriptor. It will be freed later
505 * if we can coalesce with another region. Don't lock before
506 * here! This could block.
507 */
508 myrp = extent_alloc_region_descriptor(ex, flags);
509 if (myrp == NULL) {
510 #ifdef DIAGNOSTIC
511 printf(
512 "extent_alloc_subregion: can't allocate region descriptor\n");
513 #endif
514 return (ENOMEM);
515 }
516
517 alloc_start:
518 simple_lock(&ex->ex_slock);
519
520 /*
521 * Keep a pointer to the last region we looked at so
522 * that we don't have to traverse the list again when
523 * we insert ourselves. If "last" is NULL when we
524 * finally insert ourselves, we go at the head of the
525 * list. See extent_insert_and_optimize() for deatails.
526 */
527 last = NULL;
528
529 /*
530 * Initialize the "don't cross" boundary, a.k.a a line
531 * that a region should not cross. If the boundary lies
532 * before the region starts, we add the "boundary" argument
533 * until we get a meaningful comparison.
534 *
535 * Start the boundary lines at 0 if the caller requests it.
536 */
537 dontcross = 0;
538 if (boundary) {
539 dontcross =
540 ((flags & EX_BOUNDZERO) ? 0 : ex->ex_start) + boundary;
541 while (dontcross < substart)
542 dontcross += boundary;
543 }
544
545 /*
546 * Keep track of size and location of the smallest
547 * chunk we fit in.
548 *
549 * Since the extent can be as large as the numeric range
550 * of the CPU (0 - 0xffffffff for 32-bit systems), the
551 * best overhead value can be the maximum unsigned integer.
552 * Thus, we initialize "bestovh" to 0, since we insert ourselves
553 * into the region list immediately on an exact match (which
554 * is the only case where "bestovh" would be set to 0).
555 */
556 bestovh = 0;
557 beststart = 0;
558 bestlast = NULL;
559
560 /*
561 * For N allocated regions, we must make (N + 1)
562 * checks for unallocated space. The first chunk we
563 * check is the area from the beginning of the subregion
564 * to the first allocated region after that point.
565 */
566 newstart = EXTENT_ALIGN(substart, alignment);
567 if (newstart < ex->ex_start) {
568 #ifdef DIAGNOSTIC
569 printf(
570 "extent_alloc_subregion: extent `%s' (0x%lx - 0x%lx), alignment 0x%lx\n",
571 ex->ex_name, ex->ex_start, ex->ex_end, alignment);
572 simple_unlock(&ex->ex_slock);
573 panic("extent_alloc_subregion: overflow after alignment");
574 #else
575 extent_free_region_descriptor(ex, myrp);
576 simple_unlock(&ex->ex_slock);
577 return (EINVAL);
578 #endif
579 }
580
581 /*
582 * Find the first allocated region that begins on or after
583 * the subregion start, advancing the "last" pointer along
584 * the way.
585 */
586 for (rp = ex->ex_regions.lh_first; rp != NULL;
587 rp = rp->er_link.le_next) {
588 if (rp->er_start >= newstart)
589 break;
590 last = rp;
591 }
592
593 for (; rp != NULL; rp = rp->er_link.le_next) {
594 /*
595 * Check the chunk before "rp". Note that our
596 * comparison is safe from overflow conditions.
597 */
598 if (LE_OV(newstart, size, rp->er_start)) {
599 /*
600 * Do a boundary check, if necessary. Note
601 * that a region may *begin* on the boundary,
602 * but it must end before the boundary.
603 */
604 if (boundary) {
605 newend = newstart + (size - 1);
606
607 /*
608 * Adjust boundary for a meaningful
609 * comparison.
610 */
611 while (dontcross <= newstart) {
612 odontcross = dontcross;
613 dontcross += boundary;
614
615 /*
616 * If we run past the end of
617 * the extent or the boundary
618 * overflows, then the request
619 * can't fit.
620 */
621 if ((dontcross > ex->ex_end) ||
622 (dontcross < odontcross))
623 goto fail;
624 }
625
626 /* Do the boundary check. */
627 if (newend >= dontcross) {
628 /*
629 * Candidate region crosses
630 * boundary. Try again.
631 */
632 continue;
633 }
634 }
635
636 /*
637 * We would fit into this space. Calculate
638 * the overhead (wasted space). If we exactly
639 * fit, or we're taking the first fit, insert
640 * ourselves into the region list.
641 */
642 ovh = rp->er_start - newstart - size;
643 if ((flags & EX_FAST) || (ovh == 0))
644 goto found;
645
646 /*
647 * Don't exactly fit, but check to see
648 * if we're better than any current choice.
649 */
650 if ((bestovh == 0) || (ovh < bestovh)) {
651 bestovh = ovh;
652 beststart = newstart;
653 bestlast = last;
654 }
655 }
656
657 /*
658 * Skip past the current region and check again.
659 */
660 newstart = EXTENT_ALIGN((rp->er_end + 1), alignment);
661 if (newstart < rp->er_end) {
662 /*
663 * Overflow condition. Don't error out, since
664 * we might have a chunk of space that we can
665 * use.
666 */
667 goto fail;
668 }
669
670 last = rp;
671 }
672
673 /*
674 * The final check is from the current starting point to the
675 * end of the subregion. If there were no allocated regions,
676 * "newstart" is set to the beginning of the subregion, or
677 * just past the end of the last allocated region, adjusted
678 * for alignment in either case.
679 */
680 if (LE_OV(newstart, (size - 1), subend)) {
681 /*
682 * We would fit into this space. Calculate
683 * the overhead (wasted space). If we exactly
684 * fit, or we're taking the first fit, insert
685 * ourselves into the region list.
686 */
687 ovh = ex->ex_end - newstart - (size - 1);
688 if ((flags & EX_FAST) || (ovh == 0))
689 goto found;
690
691 /*
692 * Don't exactly fit, but check to see
693 * if we're better than any current choice.
694 */
695 if ((bestovh == 0) || (ovh < bestovh)) {
696 bestovh = ovh;
697 beststart = newstart;
698 bestlast = last;
699 }
700 }
701
702 fail:
703 /*
704 * One of the following two conditions have
705 * occurred:
706 *
707 * There is no chunk large enough to hold the request.
708 *
709 * If EX_FAST was not specified, there is not an
710 * exact match for the request.
711 *
712 * Note that if we reach this point and EX_FAST is
713 * set, then we know there is no space in the extent for
714 * the request.
715 */
716 if (((flags & EX_FAST) == 0) && (bestovh != 0)) {
717 /*
718 * We have a match that's "good enough".
719 */
720 newstart = beststart;
721 last = bestlast;
722 goto found;
723 }
724
725 /*
726 * No space currently available. Wait for it to free up,
727 * if possible.
728 */
729 if (flags & EX_WAITSPACE) {
730 ex->ex_flags |= EXF_WANTED;
731 simple_unlock(&ex->ex_slock);
732 error = tsleep(ex,
733 PRIBIO | ((flags & EX_CATCH) ? PCATCH : 0), "extnt", 0);
734 if (error)
735 return (error);
736 goto alloc_start;
737 }
738
739 extent_free_region_descriptor(ex, myrp);
740 simple_unlock(&ex->ex_slock);
741 return (EAGAIN);
742
743 found:
744 /*
745 * Insert ourselves into the region list.
746 */
747 extent_insert_and_optimize(ex, newstart, size, flags, last, myrp);
748 simple_unlock(&ex->ex_slock);
749 *result = newstart;
750 return (0);
751 }
752
753 int
754 extent_free(ex, start, size, flags)
755 struct extent *ex;
756 u_long start, size;
757 int flags;
758 {
759 struct extent_region *rp, *nrp = NULL;
760 u_long end = start + (size - 1);
761 int exflags;
762
763 #ifdef DIAGNOSTIC
764 /*
765 * Check arguments.
766 *
767 * We don't lock to check these, because these values
768 * are never modified, and if another thread deletes the
769 * extent, we're screwed anyway.
770 */
771 if (ex == NULL)
772 panic("extent_free: NULL extent");
773 if ((start < ex->ex_start) || (start > ex->ex_end)) {
774 extent_print(ex);
775 printf("extent_free: extent `%s', start 0x%lx, size 0x%lx\n",
776 ex->ex_name, start, size);
777 panic("extent_free: extent `%s', region not within extent",
778 ex->ex_name);
779 }
780 /* Check for an overflow. */
781 if (end < start) {
782 extent_print(ex);
783 printf("extent_free: extent `%s', start 0x%lx, size 0x%lx\n",
784 ex->ex_name, start, size);
785 panic("extent_free: overflow");
786 }
787 #endif
788
789 /*
790 * If we're allowing coalescing, we must allocate a region
791 * descriptor now, since it might block.
792 *
793 * XXX Make a static, create-time flags word, so we don't
794 * XXX have to lock to read it!
795 */
796 simple_lock(&ex->ex_slock);
797 exflags = ex->ex_flags;
798 simple_unlock(&ex->ex_slock);
799 if ((exflags & EXF_NOCOALESCE) == 0) {
800 /* Allocate a region descriptor. */
801 nrp = extent_alloc_region_descriptor(ex, flags);
802 if (nrp == NULL)
803 return (ENOMEM);
804 }
805
806 simple_lock(&ex->ex_slock);
807
808 /*
809 * Find region and deallocate. Several possibilities:
810 *
811 * 1. (start == er_start) && (end == er_end):
812 * Free descriptor.
813 *
814 * 2. (start == er_start) && (end < er_end):
815 * Adjust er_start.
816 *
817 * 3. (start > er_start) && (end == er_end):
818 * Adjust er_end.
819 *
820 * 4. (start > er_start) && (end < er_end):
821 * Fragment region. Requires descriptor alloc.
822 *
823 * Cases 2, 3, and 4 require that the EXF_NOCOALESCE flag
824 * is not set.
825 */
826 for (rp = ex->ex_regions.lh_first; rp != NULL;
827 rp = rp->er_link.le_next) {
828 /*
829 * Save ourselves some comparisons; does the current
830 * region end before chunk to be freed begins? If so,
831 * then we haven't found the appropriate region descriptor.
832 */
833 if (rp->er_end < start)
834 continue;
835
836 /*
837 * Save ourselves some traversal; does the current
838 * region begin after the chunk to be freed ends? If so,
839 * then we've already passed any possible region descriptors
840 * that might have contained the chunk to be freed.
841 */
842 if (rp->er_start > end)
843 break;
844
845 /* Case 1. */
846 if ((start == rp->er_start) && (end == rp->er_end)) {
847 LIST_REMOVE(rp, er_link);
848 extent_free_region_descriptor(ex, rp);
849 goto done;
850 }
851
852 /*
853 * The following cases all require that EXF_NOCOALESCE
854 * is not set.
855 */
856 if (ex->ex_flags & EXF_NOCOALESCE)
857 continue;
858
859 /* Case 2. */
860 if ((start == rp->er_start) && (end < rp->er_end)) {
861 rp->er_start = (end + 1);
862 goto done;
863 }
864
865 /* Case 3. */
866 if ((start > rp->er_start) && (end == rp->er_end)) {
867 rp->er_end = (start - 1);
868 goto done;
869 }
870
871 /* Case 4. */
872 if ((start > rp->er_start) && (end < rp->er_end)) {
873 /* Fill in new descriptor. */
874 nrp->er_start = end + 1;
875 nrp->er_end = rp->er_end;
876
877 /* Adjust current descriptor. */
878 rp->er_end = start - 1;
879
880 /* Insert new descriptor after current. */
881 LIST_INSERT_AFTER(rp, nrp, er_link);
882
883 /* We used the new descriptor, so don't free it below */
884 nrp = NULL;
885 goto done;
886 }
887 }
888
889 /* Region not found, or request otherwise invalid. */
890 simple_unlock(&ex->ex_slock);
891 extent_print(ex);
892 printf("extent_free: start 0x%lx, end 0x%lx\n", start, end);
893 panic("extent_free: region not found");
894
895 done:
896 if (nrp != NULL)
897 extent_free_region_descriptor(ex, nrp);
898 if (ex->ex_flags & EXF_WANTED) {
899 ex->ex_flags &= ~EXF_WANTED;
900 wakeup(ex);
901 }
902 simple_unlock(&ex->ex_slock);
903 return (0);
904 }
905
906 /*
907 * Allocate an extent region descriptor. EXTENT MUST NOT BE LOCKED,
908 * AS THIS FUNCTION MAY BLOCK! We will handle any locking we may need.
909 */
910 static struct extent_region *
911 extent_alloc_region_descriptor(ex, flags)
912 struct extent *ex;
913 int flags;
914 {
915 struct extent_region *rp;
916 int exflags;
917
918 /*
919 * XXX Make a static, create-time flags word, so we don't
920 * XXX have to lock to read it!
921 */
922 simple_lock(&ex->ex_slock);
923 exflags = ex->ex_flags;
924 simple_unlock(&ex->ex_slock);
925
926 if (exflags & EXF_FIXED) {
927 struct extent_fixed *fex = (struct extent_fixed *)ex;
928
929 for (;;) {
930 simple_lock(&ex->ex_slock);
931 if ((rp = fex->fex_freelist.lh_first) != NULL) {
932 /*
933 * Don't muck with flags after pulling it off
934 * the freelist; it may have been dynamically
935 * allocated, and kindly given to us. We
936 * need to remember that information.
937 */
938 LIST_REMOVE(rp, er_link);
939 simple_unlock(&ex->ex_slock);
940 return (rp);
941 }
942 if (flags & EX_MALLOCOK) {
943 simple_unlock(&ex->ex_slock);
944 goto alloc;
945 }
946 if ((flags & EX_WAITOK) == 0) {
947 simple_unlock(&ex->ex_slock);
948 return (NULL);
949 }
950 ex->ex_flags |= EXF_FLWANTED;
951 simple_unlock(&ex->ex_slock);
952 if (tsleep(&fex->fex_freelist,
953 PRIBIO | ((flags & EX_CATCH) ? PCATCH : 0),
954 "extnt", 0))
955 return (NULL);
956 }
957 }
958
959 alloc:
960 rp = (struct extent_region *)
961 malloc(sizeof(struct extent_region), ex->ex_mtype,
962 (flags & EX_WAITOK) ? M_WAITOK : M_NOWAIT);
963
964 if (rp != NULL)
965 rp->er_flags = ER_ALLOC;
966
967 return (rp);
968 }
969
970 /*
971 * Free an extent region descriptor. EXTENT _MUST_ BE LOCKED! This
972 * is safe as we do not block here.
973 */
974 static void
975 extent_free_region_descriptor(ex, rp)
976 struct extent *ex;
977 struct extent_region *rp;
978 {
979
980 if (ex->ex_flags & EXF_FIXED) {
981 struct extent_fixed *fex = (struct extent_fixed *)ex;
982
983 /*
984 * If someone's waiting for a region descriptor,
985 * be nice and give them this one, rather than
986 * just free'ing it back to the system.
987 */
988 if (rp->er_flags & ER_ALLOC) {
989 if (ex->ex_flags & EXF_FLWANTED) {
990 /* Clear all but ER_ALLOC flag. */
991 rp->er_flags = ER_ALLOC;
992 LIST_INSERT_HEAD(&fex->fex_freelist, rp,
993 er_link);
994 goto wake_em_up;
995 } else {
996 free(rp, ex->ex_mtype);
997 }
998 } else {
999 /* Clear all flags. */
1000 rp->er_flags = 0;
1001 LIST_INSERT_HEAD(&fex->fex_freelist, rp, er_link);
1002 }
1003
1004 if (ex->ex_flags & EXF_FLWANTED) {
1005 wake_em_up:
1006 ex->ex_flags &= ~EXF_FLWANTED;
1007 wakeup(&fex->fex_freelist);
1008 }
1009 return;
1010 }
1011
1012 /*
1013 * We know it's dynamically allocated if we get here.
1014 */
1015 free(rp, ex->ex_mtype);
1016 }
1017
1018 void
1019 extent_print(ex)
1020 struct extent *ex;
1021 {
1022 struct extent_region *rp;
1023
1024 if (ex == NULL)
1025 panic("extent_print: NULL extent");
1026
1027 simple_lock(&ex->ex_slock);
1028
1029 printf("extent `%s' (0x%lx - 0x%lx), flags = 0x%x\n", ex->ex_name,
1030 ex->ex_start, ex->ex_end, ex->ex_flags);
1031
1032 for (rp = ex->ex_regions.lh_first; rp != NULL;
1033 rp = rp->er_link.le_next)
1034 printf(" 0x%lx - 0x%lx\n", rp->er_start, rp->er_end);
1035
1036 simple_unlock(&ex->ex_slock);
1037 }
1038