subr_extent.c revision 1.34 1 /* $NetBSD: subr_extent.c,v 1.34 2000/06/26 14:21:14 mrg Exp $ */
2
3 /*-
4 * Copyright (c) 1996, 1998 The NetBSD Foundation, Inc.
5 * All rights reserved.
6 *
7 * This code is derived from software contributed to The NetBSD Foundation
8 * by Jason R. Thorpe and Matthias Drochner.
9 *
10 * Redistribution and use in source and binary forms, with or without
11 * modification, are permitted provided that the following conditions
12 * are met:
13 * 1. Redistributions of source code must retain the above copyright
14 * notice, this list of conditions and the following disclaimer.
15 * 2. Redistributions in binary form must reproduce the above copyright
16 * notice, this list of conditions and the following disclaimer in the
17 * documentation and/or other materials provided with the distribution.
18 * 3. All advertising materials mentioning features or use of this software
19 * must display the following acknowledgement:
20 * This product includes software developed by the NetBSD
21 * Foundation, Inc. and its contributors.
22 * 4. Neither the name of The NetBSD Foundation nor the names of its
23 * contributors may be used to endorse or promote products derived
24 * from this software without specific prior written permission.
25 *
26 * THIS SOFTWARE IS PROVIDED BY THE NETBSD FOUNDATION, INC. AND CONTRIBUTORS
27 * ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED
28 * TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
29 * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE FOUNDATION OR CONTRIBUTORS
30 * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
31 * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
32 * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
33 * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
34 * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
35 * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
36 * POSSIBILITY OF SUCH DAMAGE.
37 */
38
39 /*
40 * General purpose extent manager.
41 */
42
43 #ifdef _KERNEL
44 #include <sys/param.h>
45 #include <sys/extent.h>
46 #include <sys/malloc.h>
47 #include <sys/pool.h>
48 #include <sys/time.h>
49 #include <sys/systm.h>
50 #include <sys/proc.h>
51 #include <sys/lock.h>
52
53 #include <vm/vm.h>
54
55 #include <uvm/uvm_extern.h>
56
57 #define KMEM_IS_RUNNING (kmem_map != NULL)
58 #elif defined(_EXTENT_TESTING)
59 /*
60 * user-land definitions, so it can fit into a testing harness.
61 */
62 #include <sys/param.h>
63 #include <sys/pool.h>
64 #include <sys/extent.h>
65 #include <errno.h>
66 #include <stdlib.h>
67 #include <stdio.h>
68 #include <string.h>
69
70 /*
71 * Use multi-line #defines to avoid screwing up the kernel tags file;
72 * without this, ctags produces a tags file where panic() shows up
73 * in subr_extent.c rather than subr_prf.c.
74 */
75 #define \
76 malloc(s, t, flags) malloc(s)
77 #define \
78 free(p, t) free(p)
79 #define \
80 tsleep(chan, pri, str, timo) (EWOULDBLOCK)
81 #define \
82 wakeup(chan) ((void)0)
83 #define \
84 pool_get(pool, flags) malloc(pool->pr_size,0,0)
85 #define \
86 pool_put(pool, rp) free(rp,0)
87 #define \
88 panic(a) printf(a)
89 #define \
90 splhigh() (1)
91 #define \
92 splx(s) ((void)(s))
93
94 #define \
95 simple_lock_init(l) ((void)(l))
96 #define \
97 simple_lock(l) ((void)(l))
98 #define \
99 simple_unlock(l) ((void)(l))
100 #define KMEM_IS_RUNNING (1)
101 #endif
102
103 static pool_handle_t expool_create __P((void));
104 static void extent_insert_and_optimize __P((struct extent *, u_long, u_long,
105 int, struct extent_region *, struct extent_region *));
106 static struct extent_region *extent_alloc_region_descriptor
107 __P((struct extent *, int));
108 static void extent_free_region_descriptor __P((struct extent *,
109 struct extent_region *));
110
111 static pool_handle_t expool;
112
113 /*
114 * Macro to align to an arbitrary power-of-two boundary.
115 */
116 #define EXTENT_ALIGN(_start, _align, _skew) \
117 (((((_start) - (_skew)) + ((_align) - 1)) & (-(_align))) + (_skew))
118
119 /*
120 * Create the extent_region pool.
121 * (This is deferred until one of our callers thinks we can malloc()).
122 */
123
124 static pool_handle_t expool_create()
125 {
126 #if defined(_KERNEL)
127 expool = pool_create(sizeof(struct extent_region), 0, 0,
128 0, "extent", 0, 0, 0, 0);
129 #else
130 expool = (pool_handle_t)malloc(sizeof(*expool),0,0);
131 expool->pr_size = sizeof(struct extent_region);
132 #endif
133 return (expool);
134 }
135
136 /*
137 * Allocate and initialize an extent map.
138 */
139 struct extent *
140 extent_create(name, start, end, mtype, storage, storagesize, flags)
141 const char *name;
142 u_long start, end;
143 int mtype;
144 caddr_t storage;
145 size_t storagesize;
146 int flags;
147 {
148 struct extent *ex;
149 caddr_t cp = storage;
150 size_t sz = storagesize;
151 struct extent_region *rp;
152 int fixed_extent = (storage != NULL);
153 int s;
154
155 #ifdef DIAGNOSTIC
156 /* Check arguments. */
157 if (name == NULL)
158 panic("extent_create: name == NULL");
159 if (end < start) {
160 printf("extent_create: extent `%s', start 0x%lx, end 0x%lx\n",
161 name, start, end);
162 panic("extent_create: end < start");
163 }
164 if (fixed_extent && (storagesize < sizeof(struct extent_fixed)))
165 panic("extent_create: fixed extent, bad storagesize 0x%lx",
166 (u_long)storagesize);
167 if (fixed_extent == 0 && (storagesize != 0 || storage != NULL))
168 panic("extent_create: storage provided for non-fixed");
169 #endif
170
171 /* Allocate extent descriptor. */
172 if (fixed_extent) {
173 struct extent_fixed *fex;
174
175 memset(storage, 0, storagesize);
176
177 /*
178 * Align all descriptors on "long" boundaries.
179 */
180 fex = (struct extent_fixed *)cp;
181 ex = (struct extent *)fex;
182 cp += ALIGN(sizeof(struct extent_fixed));
183 sz -= ALIGN(sizeof(struct extent_fixed));
184 fex->fex_storage = storage;
185 fex->fex_storagesize = storagesize;
186
187 /*
188 * In a fixed extent, we have to pre-allocate region
189 * descriptors and place them in the extent's freelist.
190 */
191 LIST_INIT(&fex->fex_freelist);
192 while (sz >= ALIGN(sizeof(struct extent_region))) {
193 rp = (struct extent_region *)cp;
194 cp += ALIGN(sizeof(struct extent_region));
195 sz -= ALIGN(sizeof(struct extent_region));
196 LIST_INSERT_HEAD(&fex->fex_freelist, rp, er_link);
197 }
198 } else {
199 s = splhigh();
200 if (expool == NULL)
201 expool_create();
202 splx(s);
203 if (expool == NULL)
204 return (NULL);
205
206 ex = (struct extent *)malloc(sizeof(struct extent),
207 mtype, (flags & EX_WAITOK) ? M_WAITOK : M_NOWAIT);
208 if (ex == NULL)
209 return (NULL);
210 }
211
212 /* Fill in the extent descriptor and return it to the caller. */
213 simple_lock_init(&ex->ex_slock);
214 LIST_INIT(&ex->ex_regions);
215 ex->ex_name = name;
216 ex->ex_start = start;
217 ex->ex_end = end;
218 ex->ex_mtype = mtype;
219 ex->ex_flags = 0;
220 if (fixed_extent)
221 ex->ex_flags |= EXF_FIXED;
222 if (flags & EX_NOCOALESCE)
223 ex->ex_flags |= EXF_NOCOALESCE;
224 return (ex);
225 }
226
227 /*
228 * Destroy an extent map.
229 * Since we're freeing the data, there can't be any references
230 * so we don't need any locking.
231 */
232 void
233 extent_destroy(ex)
234 struct extent *ex;
235 {
236 struct extent_region *rp, *orp;
237
238 #ifdef DIAGNOSTIC
239 /* Check arguments. */
240 if (ex == NULL)
241 panic("extent_destroy: NULL extent");
242 #endif
243
244 /* Free all region descriptors in extent. */
245 for (rp = ex->ex_regions.lh_first; rp != NULL; ) {
246 orp = rp;
247 rp = rp->er_link.le_next;
248 LIST_REMOVE(orp, er_link);
249 extent_free_region_descriptor(ex, orp);
250 }
251
252 /* If we're not a fixed extent, free the extent descriptor itself. */
253 if ((ex->ex_flags & EXF_FIXED) == 0)
254 free(ex, ex->ex_mtype);
255 }
256
257 /*
258 * Insert a region descriptor into the sorted region list after the
259 * entry "after" or at the head of the list (if "after" is NULL).
260 * The region descriptor we insert is passed in "rp". We must
261 * allocate the region descriptor before calling this function!
262 * If we don't need the region descriptor, it will be freed here.
263 */
264 static void
265 extent_insert_and_optimize(ex, start, size, flags, after, rp)
266 struct extent *ex;
267 u_long start, size;
268 int flags;
269 struct extent_region *after, *rp;
270 {
271 struct extent_region *nextr;
272 int appended = 0;
273
274 if (after == NULL) {
275 /*
276 * We're the first in the region list. If there's
277 * a region after us, attempt to coalesce to save
278 * descriptor overhead.
279 */
280 if (((ex->ex_flags & EXF_NOCOALESCE) == 0) &&
281 (ex->ex_regions.lh_first != NULL) &&
282 ((start + size) == ex->ex_regions.lh_first->er_start)) {
283 /*
284 * We can coalesce. Prepend us to the first region.
285 */
286 ex->ex_regions.lh_first->er_start = start;
287 extent_free_region_descriptor(ex, rp);
288 return;
289 }
290
291 /*
292 * Can't coalesce. Fill in the region descriptor
293 * in, and insert us at the head of the region list.
294 */
295 rp->er_start = start;
296 rp->er_end = start + (size - 1);
297 LIST_INSERT_HEAD(&ex->ex_regions, rp, er_link);
298 return;
299 }
300
301 /*
302 * If EXF_NOCOALESCE is set, coalescing is disallowed.
303 */
304 if (ex->ex_flags & EXF_NOCOALESCE)
305 goto cant_coalesce;
306
307 /*
308 * Attempt to coalesce with the region before us.
309 */
310 if ((after->er_end + 1) == start) {
311 /*
312 * We can coalesce. Append ourselves and make
313 * note of it.
314 */
315 after->er_end = start + (size - 1);
316 appended = 1;
317 }
318
319 /*
320 * Attempt to coalesce with the region after us.
321 */
322 if ((after->er_link.le_next != NULL) &&
323 ((start + size) == after->er_link.le_next->er_start)) {
324 /*
325 * We can coalesce. Note that if we appended ourselves
326 * to the previous region, we exactly fit the gap, and
327 * can free the "next" region descriptor.
328 */
329 if (appended) {
330 /*
331 * Yup, we can free it up.
332 */
333 after->er_end = after->er_link.le_next->er_end;
334 nextr = after->er_link.le_next;
335 LIST_REMOVE(nextr, er_link);
336 extent_free_region_descriptor(ex, nextr);
337 } else {
338 /*
339 * Nope, just prepend us to the next region.
340 */
341 after->er_link.le_next->er_start = start;
342 }
343
344 extent_free_region_descriptor(ex, rp);
345 return;
346 }
347
348 /*
349 * We weren't able to coalesce with the next region, but
350 * we don't need to allocate a region descriptor if we
351 * appended ourselves to the previous region.
352 */
353 if (appended) {
354 extent_free_region_descriptor(ex, rp);
355 return;
356 }
357
358 cant_coalesce:
359
360 /*
361 * Fill in the region descriptor and insert ourselves
362 * into the region list.
363 */
364 rp->er_start = start;
365 rp->er_end = start + (size - 1);
366 LIST_INSERT_AFTER(after, rp, er_link);
367 }
368
369 /*
370 * Allocate a specific region in an extent map.
371 */
372 int
373 extent_alloc_region(ex, start, size, flags)
374 struct extent *ex;
375 u_long start, size;
376 int flags;
377 {
378 struct extent_region *rp, *last, *myrp;
379 u_long end = start + (size - 1);
380 int error;
381
382 #ifdef DIAGNOSTIC
383 /* Check arguments. */
384 if (ex == NULL)
385 panic("extent_alloc_region: NULL extent");
386 if (size < 1) {
387 printf("extent_alloc_region: extent `%s', size 0x%lx\n",
388 ex->ex_name, size);
389 panic("extent_alloc_region: bad size");
390 }
391 if (end < start) {
392 printf(
393 "extent_alloc_region: extent `%s', start 0x%lx, size 0x%lx\n",
394 ex->ex_name, start, size);
395 panic("extent_alloc_region: overflow");
396 }
397 #endif
398
399 /*
400 * Make sure the requested region lies within the
401 * extent.
402 *
403 * We don't lock to check the range, because those values
404 * are never modified, and if another thread deletes the
405 * extent, we're screwed anyway.
406 */
407 if ((start < ex->ex_start) || (end > ex->ex_end)) {
408 #ifdef DIAGNOSTIC
409 printf("extent_alloc_region: extent `%s' (0x%lx - 0x%lx)\n",
410 ex->ex_name, ex->ex_start, ex->ex_end);
411 printf("extent_alloc_region: start 0x%lx, end 0x%lx\n",
412 start, end);
413 panic("extent_alloc_region: region lies outside extent");
414 #else
415 return (EINVAL);
416 #endif
417 }
418
419 /*
420 * Allocate the region descriptor. It will be freed later
421 * if we can coalesce with another region. Don't lock before
422 * here! This could block.
423 */
424 myrp = extent_alloc_region_descriptor(ex, flags);
425 if (myrp == NULL) {
426 #ifdef DIAGNOSTIC
427 printf(
428 "extent_alloc_region: can't allocate region descriptor\n");
429 #endif
430 return (ENOMEM);
431 }
432
433 alloc_start:
434 simple_lock(&ex->ex_slock);
435
436 /*
437 * Attempt to place ourselves in the desired area of the
438 * extent. We save ourselves some work by keeping the list sorted.
439 * In other words, if the start of the current region is greater
440 * than the end of our region, we don't have to search any further.
441 */
442
443 /*
444 * Keep a pointer to the last region we looked at so
445 * that we don't have to traverse the list again when
446 * we insert ourselves. If "last" is NULL when we
447 * finally insert ourselves, we go at the head of the
448 * list. See extent_insert_and_optimize() for details.
449 */
450 last = NULL;
451
452 for (rp = ex->ex_regions.lh_first; rp != NULL;
453 rp = rp->er_link.le_next) {
454 if (rp->er_start > end) {
455 /*
456 * We lie before this region and don't
457 * conflict.
458 */
459 break;
460 }
461
462 /*
463 * The current region begins before we end.
464 * Check for a conflict.
465 */
466 if (rp->er_end >= start) {
467 /*
468 * We conflict. If we can (and want to) wait,
469 * do so.
470 */
471 if (flags & EX_WAITSPACE) {
472 ex->ex_flags |= EXF_WANTED;
473 simple_unlock(&ex->ex_slock);
474 error = tsleep(ex,
475 PRIBIO | ((flags & EX_CATCH) ? PCATCH : 0),
476 "extnt", 0);
477 if (error)
478 return (error);
479 goto alloc_start;
480 }
481 extent_free_region_descriptor(ex, myrp);
482 simple_unlock(&ex->ex_slock);
483 return (EAGAIN);
484 }
485 /*
486 * We don't conflict, but this region lies before
487 * us. Keep a pointer to this region, and keep
488 * trying.
489 */
490 last = rp;
491 }
492
493 /*
494 * We don't conflict with any regions. "last" points
495 * to the region we fall after, or is NULL if we belong
496 * at the beginning of the region list. Insert ourselves.
497 */
498 extent_insert_and_optimize(ex, start, size, flags, last, myrp);
499 simple_unlock(&ex->ex_slock);
500 return (0);
501 }
502
503 /*
504 * Macro to check (x + y) <= z. This check is designed to fail
505 * if an overflow occurs.
506 */
507 #define LE_OV(x, y, z) ((((x) + (y)) >= (x)) && (((x) + (y)) <= (z)))
508
509 /*
510 * Allocate a region in an extent map subregion.
511 *
512 * If EX_FAST is specified, we return the first fit in the map.
513 * Otherwise, we try to minimize fragmentation by finding the
514 * smallest gap that will hold the request.
515 *
516 * The allocated region is aligned to "alignment", which must be
517 * a power of 2.
518 */
519 int
520 extent_alloc_subregion1(ex, substart, subend, size, alignment, skew, boundary,
521 flags, result)
522 struct extent *ex;
523 u_long substart, subend, size, alignment, skew, boundary;
524 int flags;
525 u_long *result;
526 {
527 struct extent_region *rp, *myrp, *last, *bestlast;
528 u_long newstart, newend, beststart, bestovh, ovh;
529 u_long dontcross;
530 int error;
531
532 #ifdef DIAGNOSTIC
533 /*
534 * Check arguments.
535 *
536 * We don't lock to check these, because these values
537 * are never modified, and if another thread deletes the
538 * extent, we're screwed anyway.
539 */
540 if (ex == NULL)
541 panic("extent_alloc_subregion: NULL extent");
542 if (result == NULL)
543 panic("extent_alloc_subregion: NULL result pointer");
544 if ((substart < ex->ex_start) || (substart > ex->ex_end) ||
545 (subend > ex->ex_end) || (subend < ex->ex_start)) {
546 printf("extent_alloc_subregion: extent `%s', ex_start 0x%lx, ex_end 0x%lx\n",
547 ex->ex_name, ex->ex_start, ex->ex_end);
548 printf("extent_alloc_subregion: substart 0x%lx, subend 0x%lx\n",
549 substart, subend);
550 panic("extent_alloc_subregion: bad subregion");
551 }
552 if ((size < 1) || ((size - 1) > (subend - substart))) {
553 printf("extent_alloc_subregion: extent `%s', size 0x%lx\n",
554 ex->ex_name, size);
555 panic("extent_alloc_subregion: bad size");
556 }
557 if (alignment == 0)
558 panic("extent_alloc_subregion: bad alignment");
559 if (boundary && (boundary < size)) {
560 printf(
561 "extent_alloc_subregion: extent `%s', size 0x%lx,
562 boundary 0x%lx\n", ex->ex_name, size, boundary);
563 panic("extent_alloc_subregion: bad boundary");
564 }
565 #endif
566
567 /*
568 * Allocate the region descriptor. It will be freed later
569 * if we can coalesce with another region. Don't lock before
570 * here! This could block.
571 */
572 myrp = extent_alloc_region_descriptor(ex, flags);
573 if (myrp == NULL) {
574 #ifdef DIAGNOSTIC
575 printf(
576 "extent_alloc_subregion: can't allocate region descriptor\n");
577 #endif
578 return (ENOMEM);
579 }
580
581 alloc_start:
582 simple_lock(&ex->ex_slock);
583
584 /*
585 * Keep a pointer to the last region we looked at so
586 * that we don't have to traverse the list again when
587 * we insert ourselves. If "last" is NULL when we
588 * finally insert ourselves, we go at the head of the
589 * list. See extent_insert_and_optimize() for deatails.
590 */
591 last = NULL;
592
593 /*
594 * Keep track of size and location of the smallest
595 * chunk we fit in.
596 *
597 * Since the extent can be as large as the numeric range
598 * of the CPU (0 - 0xffffffff for 32-bit systems), the
599 * best overhead value can be the maximum unsigned integer.
600 * Thus, we initialize "bestovh" to 0, since we insert ourselves
601 * into the region list immediately on an exact match (which
602 * is the only case where "bestovh" would be set to 0).
603 */
604 bestovh = 0;
605 beststart = 0;
606 bestlast = NULL;
607
608 /*
609 * For N allocated regions, we must make (N + 1)
610 * checks for unallocated space. The first chunk we
611 * check is the area from the beginning of the subregion
612 * to the first allocated region after that point.
613 */
614 newstart = EXTENT_ALIGN(substart, alignment, skew);
615 if (newstart < ex->ex_start) {
616 #ifdef DIAGNOSTIC
617 printf(
618 "extent_alloc_subregion: extent `%s' (0x%lx - 0x%lx), alignment 0x%lx\n",
619 ex->ex_name, ex->ex_start, ex->ex_end, alignment);
620 simple_unlock(&ex->ex_slock);
621 panic("extent_alloc_subregion: overflow after alignment");
622 #else
623 extent_free_region_descriptor(ex, myrp);
624 simple_unlock(&ex->ex_slock);
625 return (EINVAL);
626 #endif
627 }
628
629 /*
630 * Find the first allocated region that begins on or after
631 * the subregion start, advancing the "last" pointer along
632 * the way.
633 */
634 for (rp = ex->ex_regions.lh_first; rp != NULL;
635 rp = rp->er_link.le_next) {
636 if (rp->er_start >= newstart)
637 break;
638 last = rp;
639 }
640
641 /*
642 * Relocate the start of our candidate region to the end of
643 * the last allocated region (if there was one overlapping
644 * our subrange).
645 */
646 if (last != NULL && last->er_end >= newstart)
647 newstart = EXTENT_ALIGN((last->er_end + 1), alignment, skew);
648
649 for (; rp != NULL; rp = rp->er_link.le_next) {
650 /*
651 * Check the chunk before "rp". Note that our
652 * comparison is safe from overflow conditions.
653 */
654 if (LE_OV(newstart, size, rp->er_start)) {
655 /*
656 * Do a boundary check, if necessary. Note
657 * that a region may *begin* on the boundary,
658 * but it must end before the boundary.
659 */
660 if (boundary) {
661 newend = newstart + (size - 1);
662
663 /*
664 * Calculate the next boundary after the start
665 * of this region.
666 */
667 dontcross = EXTENT_ALIGN(newstart+1, boundary,
668 (flags & EX_BOUNDZERO) ? 0 : ex->ex_start)
669 - 1;
670
671 #if 0
672 printf("newstart=%lx newend=%lx ex_start=%lx ex_end=%lx boundary=%lx dontcross=%lx\n",
673 newstart, newend, ex->ex_start, ex->ex_end,
674 boundary, dontcross);
675 #endif
676
677 /* Check for overflow */
678 if (dontcross < ex->ex_start)
679 dontcross = ex->ex_end;
680 else if (newend > dontcross) {
681 /*
682 * Candidate region crosses boundary.
683 * Throw away the leading part and see
684 * if we still fit.
685 */
686 newstart = dontcross + 1;
687 newend = newstart + (size - 1);
688 dontcross += boundary;
689 if (!LE_OV(newstart, size, rp->er_start))
690 continue;
691 }
692
693 /*
694 * If we run past the end of
695 * the extent or the boundary
696 * overflows, then the request
697 * can't fit.
698 */
699 if (dontcross > ex->ex_end ||
700 dontcross < newstart)
701 goto fail;
702 }
703
704 /*
705 * We would fit into this space. Calculate
706 * the overhead (wasted space). If we exactly
707 * fit, or we're taking the first fit, insert
708 * ourselves into the region list.
709 */
710 ovh = rp->er_start - newstart - size;
711 if ((flags & EX_FAST) || (ovh == 0))
712 goto found;
713
714 /*
715 * Don't exactly fit, but check to see
716 * if we're better than any current choice.
717 */
718 if ((bestovh == 0) || (ovh < bestovh)) {
719 bestovh = ovh;
720 beststart = newstart;
721 bestlast = last;
722 }
723 }
724
725 /*
726 * Skip past the current region and check again.
727 */
728 newstart = EXTENT_ALIGN((rp->er_end + 1), alignment, skew);
729 if (newstart < rp->er_end) {
730 /*
731 * Overflow condition. Don't error out, since
732 * we might have a chunk of space that we can
733 * use.
734 */
735 goto fail;
736 }
737
738 last = rp;
739 }
740
741 /*
742 * The final check is from the current starting point to the
743 * end of the subregion. If there were no allocated regions,
744 * "newstart" is set to the beginning of the subregion, or
745 * just past the end of the last allocated region, adjusted
746 * for alignment in either case.
747 */
748 if (LE_OV(newstart, (size - 1), subend)) {
749 /*
750 * Do a boundary check, if necessary. Note
751 * that a region may *begin* on the boundary,
752 * but it must end before the boundary.
753 */
754 if (boundary) {
755 newend = newstart + (size - 1);
756
757 /*
758 * Calculate the next boundary after the start
759 * of this region.
760 */
761 dontcross = EXTENT_ALIGN(newstart+1, boundary,
762 (flags & EX_BOUNDZERO) ? 0 : ex->ex_start)
763 - 1;
764
765 #if 0
766 printf("newstart=%lx newend=%lx ex_start=%lx ex_end=%lx boundary=%lx dontcross=%lx\n",
767 newstart, newend, ex->ex_start, ex->ex_end,
768 boundary, dontcross);
769 #endif
770
771 /* Check for overflow */
772 if (dontcross < ex->ex_start)
773 dontcross = ex->ex_end;
774 else if (newend > dontcross) {
775 /*
776 * Candidate region crosses boundary.
777 * Throw away the leading part and see
778 * if we still fit.
779 */
780 newstart = dontcross + 1;
781 newend = newstart + (size - 1);
782 dontcross += boundary;
783 if (!LE_OV(newstart, (size - 1), subend))
784 goto fail;
785 }
786
787 /*
788 * If we run past the end of
789 * the extent or the boundary
790 * overflows, then the request
791 * can't fit.
792 */
793 if (dontcross > ex->ex_end ||
794 dontcross < newstart)
795 goto fail;
796 }
797
798 /*
799 * We would fit into this space. Calculate
800 * the overhead (wasted space). If we exactly
801 * fit, or we're taking the first fit, insert
802 * ourselves into the region list.
803 */
804 ovh = ex->ex_end - newstart - (size - 1);
805 if ((flags & EX_FAST) || (ovh == 0))
806 goto found;
807
808 /*
809 * Don't exactly fit, but check to see
810 * if we're better than any current choice.
811 */
812 if ((bestovh == 0) || (ovh < bestovh)) {
813 bestovh = ovh;
814 beststart = newstart;
815 bestlast = last;
816 }
817 }
818
819 fail:
820 /*
821 * One of the following two conditions have
822 * occurred:
823 *
824 * There is no chunk large enough to hold the request.
825 *
826 * If EX_FAST was not specified, there is not an
827 * exact match for the request.
828 *
829 * Note that if we reach this point and EX_FAST is
830 * set, then we know there is no space in the extent for
831 * the request.
832 */
833 if (((flags & EX_FAST) == 0) && (bestovh != 0)) {
834 /*
835 * We have a match that's "good enough".
836 */
837 newstart = beststart;
838 last = bestlast;
839 goto found;
840 }
841
842 /*
843 * No space currently available. Wait for it to free up,
844 * if possible.
845 */
846 if (flags & EX_WAITSPACE) {
847 ex->ex_flags |= EXF_WANTED;
848 simple_unlock(&ex->ex_slock);
849 error = tsleep(ex,
850 PRIBIO | ((flags & EX_CATCH) ? PCATCH : 0), "extnt", 0);
851 if (error)
852 return (error);
853 goto alloc_start;
854 }
855
856 extent_free_region_descriptor(ex, myrp);
857 simple_unlock(&ex->ex_slock);
858 return (EAGAIN);
859
860 found:
861 /*
862 * Insert ourselves into the region list.
863 */
864 extent_insert_and_optimize(ex, newstart, size, flags, last, myrp);
865 simple_unlock(&ex->ex_slock);
866 *result = newstart;
867 return (0);
868 }
869
870 int
871 extent_free(ex, start, size, flags)
872 struct extent *ex;
873 u_long start, size;
874 int flags;
875 {
876 struct extent_region *rp, *nrp = NULL;
877 u_long end = start + (size - 1);
878 int exflags;
879
880 #ifdef DIAGNOSTIC
881 /*
882 * Check arguments.
883 *
884 * We don't lock to check these, because these values
885 * are never modified, and if another thread deletes the
886 * extent, we're screwed anyway.
887 */
888 if (ex == NULL)
889 panic("extent_free: NULL extent");
890 if ((start < ex->ex_start) || (start > ex->ex_end)) {
891 extent_print(ex);
892 printf("extent_free: extent `%s', start 0x%lx, size 0x%lx\n",
893 ex->ex_name, start, size);
894 panic("extent_free: extent `%s', region not within extent",
895 ex->ex_name);
896 }
897 /* Check for an overflow. */
898 if (end < start) {
899 extent_print(ex);
900 printf("extent_free: extent `%s', start 0x%lx, size 0x%lx\n",
901 ex->ex_name, start, size);
902 panic("extent_free: overflow");
903 }
904 #endif
905
906 /*
907 * If we're allowing coalescing, we must allocate a region
908 * descriptor now, since it might block.
909 *
910 * XXX Make a static, create-time flags word, so we don't
911 * XXX have to lock to read it!
912 */
913 simple_lock(&ex->ex_slock);
914 exflags = ex->ex_flags;
915 simple_unlock(&ex->ex_slock);
916
917 if ((exflags & EXF_NOCOALESCE) == 0) {
918 /* Allocate a region descriptor. */
919 nrp = extent_alloc_region_descriptor(ex, flags);
920 if (nrp == NULL)
921 return (ENOMEM);
922 }
923
924 simple_lock(&ex->ex_slock);
925
926 /*
927 * Find region and deallocate. Several possibilities:
928 *
929 * 1. (start == er_start) && (end == er_end):
930 * Free descriptor.
931 *
932 * 2. (start == er_start) && (end < er_end):
933 * Adjust er_start.
934 *
935 * 3. (start > er_start) && (end == er_end):
936 * Adjust er_end.
937 *
938 * 4. (start > er_start) && (end < er_end):
939 * Fragment region. Requires descriptor alloc.
940 *
941 * Cases 2, 3, and 4 require that the EXF_NOCOALESCE flag
942 * is not set.
943 */
944 for (rp = ex->ex_regions.lh_first; rp != NULL;
945 rp = rp->er_link.le_next) {
946 /*
947 * Save ourselves some comparisons; does the current
948 * region end before chunk to be freed begins? If so,
949 * then we haven't found the appropriate region descriptor.
950 */
951 if (rp->er_end < start)
952 continue;
953
954 /*
955 * Save ourselves some traversal; does the current
956 * region begin after the chunk to be freed ends? If so,
957 * then we've already passed any possible region descriptors
958 * that might have contained the chunk to be freed.
959 */
960 if (rp->er_start > end)
961 break;
962
963 /* Case 1. */
964 if ((start == rp->er_start) && (end == rp->er_end)) {
965 LIST_REMOVE(rp, er_link);
966 extent_free_region_descriptor(ex, rp);
967 goto done;
968 }
969
970 /*
971 * The following cases all require that EXF_NOCOALESCE
972 * is not set.
973 */
974 if (ex->ex_flags & EXF_NOCOALESCE)
975 continue;
976
977 /* Case 2. */
978 if ((start == rp->er_start) && (end < rp->er_end)) {
979 rp->er_start = (end + 1);
980 goto done;
981 }
982
983 /* Case 3. */
984 if ((start > rp->er_start) && (end == rp->er_end)) {
985 rp->er_end = (start - 1);
986 goto done;
987 }
988
989 /* Case 4. */
990 if ((start > rp->er_start) && (end < rp->er_end)) {
991 /* Fill in new descriptor. */
992 nrp->er_start = end + 1;
993 nrp->er_end = rp->er_end;
994
995 /* Adjust current descriptor. */
996 rp->er_end = start - 1;
997
998 /* Insert new descriptor after current. */
999 LIST_INSERT_AFTER(rp, nrp, er_link);
1000
1001 /* We used the new descriptor, so don't free it below */
1002 nrp = NULL;
1003 goto done;
1004 }
1005 }
1006
1007 /* Region not found, or request otherwise invalid. */
1008 simple_unlock(&ex->ex_slock);
1009 extent_print(ex);
1010 printf("extent_free: start 0x%lx, end 0x%lx\n", start, end);
1011 panic("extent_free: region not found");
1012
1013 done:
1014 if (nrp != NULL)
1015 extent_free_region_descriptor(ex, nrp);
1016 if (ex->ex_flags & EXF_WANTED) {
1017 ex->ex_flags &= ~EXF_WANTED;
1018 wakeup(ex);
1019 }
1020 simple_unlock(&ex->ex_slock);
1021 return (0);
1022 }
1023
1024 /*
1025 * Allocate an extent region descriptor. EXTENT MUST NOT BE LOCKED,
1026 * AS THIS FUNCTION MAY BLOCK! We will handle any locking we may need.
1027 */
1028 static struct extent_region *
1029 extent_alloc_region_descriptor(ex, flags)
1030 struct extent *ex;
1031 int flags;
1032 {
1033 struct extent_region *rp;
1034 int exflags;
1035 int s;
1036
1037 /*
1038 * If the kernel memory allocator is not yet running, we can't
1039 * use it (obviously).
1040 */
1041 if (KMEM_IS_RUNNING == 0)
1042 flags &= ~EX_MALLOCOK;
1043
1044 /*
1045 * XXX Make a static, create-time flags word, so we don't
1046 * XXX have to lock to read it!
1047 */
1048 simple_lock(&ex->ex_slock);
1049 exflags = ex->ex_flags;
1050 simple_unlock(&ex->ex_slock);
1051
1052 if (exflags & EXF_FIXED) {
1053 struct extent_fixed *fex = (struct extent_fixed *)ex;
1054
1055 for (;;) {
1056 simple_lock(&ex->ex_slock);
1057 if ((rp = fex->fex_freelist.lh_first) != NULL) {
1058 /*
1059 * Don't muck with flags after pulling it off
1060 * the freelist; it may have been dynamically
1061 * allocated, and kindly given to us. We
1062 * need to remember that information.
1063 */
1064 LIST_REMOVE(rp, er_link);
1065 simple_unlock(&ex->ex_slock);
1066 return (rp);
1067 }
1068 if (flags & EX_MALLOCOK) {
1069 simple_unlock(&ex->ex_slock);
1070 goto alloc;
1071 }
1072 if ((flags & EX_WAITOK) == 0) {
1073 simple_unlock(&ex->ex_slock);
1074 return (NULL);
1075 }
1076 ex->ex_flags |= EXF_FLWANTED;
1077 simple_unlock(&ex->ex_slock);
1078 if (tsleep(&fex->fex_freelist,
1079 PRIBIO | ((flags & EX_CATCH) ? PCATCH : 0),
1080 "extnt", 0))
1081 return (NULL);
1082 }
1083 }
1084
1085 alloc:
1086 s = splhigh();
1087 if (expool == NULL && !expool_create()) {
1088 splx(s);
1089 return (NULL);
1090 }
1091
1092 rp = pool_get(expool, (flags & EX_WAITOK) ? PR_WAITOK : 0);
1093 splx(s);
1094
1095 if (rp != NULL)
1096 rp->er_flags = ER_ALLOC;
1097
1098 return (rp);
1099 }
1100
1101 /*
1102 * Free an extent region descriptor. EXTENT _MUST_ BE LOCKED! This
1103 * is safe as we do not block here.
1104 */
1105 static void
1106 extent_free_region_descriptor(ex, rp)
1107 struct extent *ex;
1108 struct extent_region *rp;
1109 {
1110 int s;
1111
1112 if (ex->ex_flags & EXF_FIXED) {
1113 struct extent_fixed *fex = (struct extent_fixed *)ex;
1114
1115 /*
1116 * If someone's waiting for a region descriptor,
1117 * be nice and give them this one, rather than
1118 * just free'ing it back to the system.
1119 */
1120 if (rp->er_flags & ER_ALLOC) {
1121 if (ex->ex_flags & EXF_FLWANTED) {
1122 /* Clear all but ER_ALLOC flag. */
1123 rp->er_flags = ER_ALLOC;
1124 LIST_INSERT_HEAD(&fex->fex_freelist, rp,
1125 er_link);
1126 goto wake_em_up;
1127 } else {
1128 s = splhigh();
1129 pool_put(expool, rp);
1130 splx(s);
1131 }
1132 } else {
1133 /* Clear all flags. */
1134 rp->er_flags = 0;
1135 LIST_INSERT_HEAD(&fex->fex_freelist, rp, er_link);
1136 }
1137
1138 if (ex->ex_flags & EXF_FLWANTED) {
1139 wake_em_up:
1140 ex->ex_flags &= ~EXF_FLWANTED;
1141 wakeup(&fex->fex_freelist);
1142 }
1143 return;
1144 }
1145
1146 /*
1147 * We know it's dynamically allocated if we get here.
1148 */
1149 s = splhigh();
1150 pool_put(expool, rp);
1151 splx(s);
1152 }
1153
1154 void
1155 extent_print(ex)
1156 struct extent *ex;
1157 {
1158 struct extent_region *rp;
1159
1160 if (ex == NULL)
1161 panic("extent_print: NULL extent");
1162
1163 simple_lock(&ex->ex_slock);
1164
1165 printf("extent `%s' (0x%lx - 0x%lx), flags = 0x%x\n", ex->ex_name,
1166 ex->ex_start, ex->ex_end, ex->ex_flags);
1167
1168 for (rp = ex->ex_regions.lh_first; rp != NULL;
1169 rp = rp->er_link.le_next)
1170 printf(" 0x%lx - 0x%lx\n", rp->er_start, rp->er_end);
1171
1172 simple_unlock(&ex->ex_slock);
1173 }
1174