subr_extent.c revision 1.41 1 /* $NetBSD: subr_extent.c,v 1.41 2001/05/09 23:38:20 thorpej Exp $ */
2
3 /*-
4 * Copyright (c) 1996, 1998 The NetBSD Foundation, Inc.
5 * All rights reserved.
6 *
7 * This code is derived from software contributed to The NetBSD Foundation
8 * by Jason R. Thorpe and Matthias Drochner.
9 *
10 * Redistribution and use in source and binary forms, with or without
11 * modification, are permitted provided that the following conditions
12 * are met:
13 * 1. Redistributions of source code must retain the above copyright
14 * notice, this list of conditions and the following disclaimer.
15 * 2. Redistributions in binary form must reproduce the above copyright
16 * notice, this list of conditions and the following disclaimer in the
17 * documentation and/or other materials provided with the distribution.
18 * 3. All advertising materials mentioning features or use of this software
19 * must display the following acknowledgement:
20 * This product includes software developed by the NetBSD
21 * Foundation, Inc. and its contributors.
22 * 4. Neither the name of The NetBSD Foundation nor the names of its
23 * contributors may be used to endorse or promote products derived
24 * from this software without specific prior written permission.
25 *
26 * THIS SOFTWARE IS PROVIDED BY THE NETBSD FOUNDATION, INC. AND CONTRIBUTORS
27 * ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED
28 * TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
29 * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE FOUNDATION OR CONTRIBUTORS
30 * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
31 * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
32 * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
33 * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
34 * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
35 * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
36 * POSSIBILITY OF SUCH DAMAGE.
37 */
38
39 /*
40 * General purpose extent manager.
41 */
42
43 #ifdef _KERNEL
44 #include <sys/param.h>
45 #include <sys/extent.h>
46 #include <sys/malloc.h>
47 #include <sys/pool.h>
48 #include <sys/time.h>
49 #include <sys/systm.h>
50 #include <sys/proc.h>
51 #include <sys/lock.h>
52
53 #include <uvm/uvm_extern.h>
54
55 #define KMEM_IS_RUNNING (kmem_map != NULL)
56 #elif defined(_EXTENT_TESTING)
57 /*
58 * user-land definitions, so it can fit into a testing harness.
59 */
60 #include <sys/param.h>
61 #include <sys/pool.h>
62 #include <sys/extent.h>
63 #include <errno.h>
64 #include <stdlib.h>
65 #include <stdio.h>
66 #include <string.h>
67
68 /*
69 * Use multi-line #defines to avoid screwing up the kernel tags file;
70 * without this, ctags produces a tags file where panic() shows up
71 * in subr_extent.c rather than subr_prf.c.
72 */
73 #define \
74 malloc(s, t, flags) malloc(s)
75 #define \
76 free(p, t) free(p)
77 #define \
78 tsleep(chan, pri, str, timo) (EWOULDBLOCK)
79 #define \
80 ltsleep(chan,pri,str,timo,lck) (EWOULDBLOCK)
81 #define \
82 wakeup(chan) ((void)0)
83 #define \
84 pool_get(pool, flags) malloc((pool)->pr_size,0,0)
85 #define \
86 pool_put(pool, rp) free(rp,0)
87 #define \
88 panic(a) printf(a)
89 #define \
90 splhigh() (1)
91 #define \
92 splx(s) ((void)(s))
93
94 #define \
95 simple_lock_init(l) ((void)(l))
96 #define \
97 simple_lock(l) ((void)(l))
98 #define \
99 simple_unlock(l) ((void)(l))
100 #define KMEM_IS_RUNNING (1)
101 #endif
102
103 static void extent_insert_and_optimize __P((struct extent *, u_long, u_long,
104 int, struct extent_region *, struct extent_region *));
105 static struct extent_region *extent_alloc_region_descriptor
106 __P((struct extent *, int));
107 static void extent_free_region_descriptor __P((struct extent *,
108 struct extent_region *));
109
110 static struct pool expool;
111 static struct simplelock expool_init_slock = SIMPLELOCK_INITIALIZER;
112 static int expool_initialized;
113
114 /*
115 * Macro to align to an arbitrary power-of-two boundary.
116 */
117 #define EXTENT_ALIGN(_start, _align, _skew) \
118 (((((_start) - (_skew)) + ((_align) - 1)) & (-(_align))) + (_skew))
119
120 /*
121 * Create the extent_region pool.
122 * (This is deferred until one of our callers thinks we can malloc()).
123 */
124
125 static __inline void
126 expool_init(void)
127 {
128
129 simple_lock(&expool_init_slock);
130 if (expool_initialized) {
131 simple_unlock(&expool_init_slock);
132 return;
133 }
134
135 #if defined(_KERNEL)
136 pool_init(&expool, sizeof(struct extent_region), 0, 0, 0,
137 "extent", 0, 0, 0, 0);
138 #else
139 expool.pr_size = sizeof(struct extent_region);
140 #endif
141
142 expool_initialized = 1;
143 simple_unlock(&expool_init_slock);
144 }
145
146 /*
147 * Allocate and initialize an extent map.
148 */
149 struct extent *
150 extent_create(name, start, end, mtype, storage, storagesize, flags)
151 const char *name;
152 u_long start, end;
153 int mtype;
154 caddr_t storage;
155 size_t storagesize;
156 int flags;
157 {
158 struct extent *ex;
159 caddr_t cp = storage;
160 size_t sz = storagesize;
161 struct extent_region *rp;
162 int fixed_extent = (storage != NULL);
163 int s;
164
165 #ifdef DIAGNOSTIC
166 /* Check arguments. */
167 if (name == NULL)
168 panic("extent_create: name == NULL");
169 if (end < start) {
170 printf("extent_create: extent `%s', start 0x%lx, end 0x%lx\n",
171 name, start, end);
172 panic("extent_create: end < start");
173 }
174 if (fixed_extent && (storagesize < sizeof(struct extent_fixed)))
175 panic("extent_create: fixed extent, bad storagesize 0x%lx",
176 (u_long)storagesize);
177 if (fixed_extent == 0 && (storagesize != 0 || storage != NULL))
178 panic("extent_create: storage provided for non-fixed");
179 #endif
180
181 /* Allocate extent descriptor. */
182 if (fixed_extent) {
183 struct extent_fixed *fex;
184
185 memset(storage, 0, storagesize);
186
187 /*
188 * Align all descriptors on "long" boundaries.
189 */
190 fex = (struct extent_fixed *)cp;
191 ex = (struct extent *)fex;
192 cp += ALIGN(sizeof(struct extent_fixed));
193 sz -= ALIGN(sizeof(struct extent_fixed));
194 fex->fex_storage = storage;
195 fex->fex_storagesize = storagesize;
196
197 /*
198 * In a fixed extent, we have to pre-allocate region
199 * descriptors and place them in the extent's freelist.
200 */
201 LIST_INIT(&fex->fex_freelist);
202 while (sz >= ALIGN(sizeof(struct extent_region))) {
203 rp = (struct extent_region *)cp;
204 cp += ALIGN(sizeof(struct extent_region));
205 sz -= ALIGN(sizeof(struct extent_region));
206 LIST_INSERT_HEAD(&fex->fex_freelist, rp, er_link);
207 }
208 } else {
209 s = splhigh();
210 if (expool_initialized == 0)
211 expool_init();
212 splx(s);
213
214 ex = (struct extent *)malloc(sizeof(struct extent),
215 mtype, (flags & EX_WAITOK) ? M_WAITOK : M_NOWAIT);
216 if (ex == NULL)
217 return (NULL);
218 }
219
220 /* Fill in the extent descriptor and return it to the caller. */
221 simple_lock_init(&ex->ex_slock);
222 LIST_INIT(&ex->ex_regions);
223 ex->ex_name = name;
224 ex->ex_start = start;
225 ex->ex_end = end;
226 ex->ex_mtype = mtype;
227 ex->ex_flags = 0;
228 if (fixed_extent)
229 ex->ex_flags |= EXF_FIXED;
230 if (flags & EX_NOCOALESCE)
231 ex->ex_flags |= EXF_NOCOALESCE;
232 return (ex);
233 }
234
235 /*
236 * Destroy an extent map.
237 * Since we're freeing the data, there can't be any references
238 * so we don't need any locking.
239 */
240 void
241 extent_destroy(ex)
242 struct extent *ex;
243 {
244 struct extent_region *rp, *orp;
245
246 #ifdef DIAGNOSTIC
247 /* Check arguments. */
248 if (ex == NULL)
249 panic("extent_destroy: NULL extent");
250 #endif
251
252 /* Free all region descriptors in extent. */
253 for (rp = ex->ex_regions.lh_first; rp != NULL; ) {
254 orp = rp;
255 rp = rp->er_link.le_next;
256 LIST_REMOVE(orp, er_link);
257 extent_free_region_descriptor(ex, orp);
258 }
259
260 /* If we're not a fixed extent, free the extent descriptor itself. */
261 if ((ex->ex_flags & EXF_FIXED) == 0)
262 free(ex, ex->ex_mtype);
263 }
264
265 /*
266 * Insert a region descriptor into the sorted region list after the
267 * entry "after" or at the head of the list (if "after" is NULL).
268 * The region descriptor we insert is passed in "rp". We must
269 * allocate the region descriptor before calling this function!
270 * If we don't need the region descriptor, it will be freed here.
271 */
272 static void
273 extent_insert_and_optimize(ex, start, size, flags, after, rp)
274 struct extent *ex;
275 u_long start, size;
276 int flags;
277 struct extent_region *after, *rp;
278 {
279 struct extent_region *nextr;
280 int appended = 0;
281
282 if (after == NULL) {
283 /*
284 * We're the first in the region list. If there's
285 * a region after us, attempt to coalesce to save
286 * descriptor overhead.
287 */
288 if (((ex->ex_flags & EXF_NOCOALESCE) == 0) &&
289 (ex->ex_regions.lh_first != NULL) &&
290 ((start + size) == ex->ex_regions.lh_first->er_start)) {
291 /*
292 * We can coalesce. Prepend us to the first region.
293 */
294 ex->ex_regions.lh_first->er_start = start;
295 extent_free_region_descriptor(ex, rp);
296 return;
297 }
298
299 /*
300 * Can't coalesce. Fill in the region descriptor
301 * in, and insert us at the head of the region list.
302 */
303 rp->er_start = start;
304 rp->er_end = start + (size - 1);
305 LIST_INSERT_HEAD(&ex->ex_regions, rp, er_link);
306 return;
307 }
308
309 /*
310 * If EXF_NOCOALESCE is set, coalescing is disallowed.
311 */
312 if (ex->ex_flags & EXF_NOCOALESCE)
313 goto cant_coalesce;
314
315 /*
316 * Attempt to coalesce with the region before us.
317 */
318 if ((after->er_end + 1) == start) {
319 /*
320 * We can coalesce. Append ourselves and make
321 * note of it.
322 */
323 after->er_end = start + (size - 1);
324 appended = 1;
325 }
326
327 /*
328 * Attempt to coalesce with the region after us.
329 */
330 if ((after->er_link.le_next != NULL) &&
331 ((start + size) == after->er_link.le_next->er_start)) {
332 /*
333 * We can coalesce. Note that if we appended ourselves
334 * to the previous region, we exactly fit the gap, and
335 * can free the "next" region descriptor.
336 */
337 if (appended) {
338 /*
339 * Yup, we can free it up.
340 */
341 after->er_end = after->er_link.le_next->er_end;
342 nextr = after->er_link.le_next;
343 LIST_REMOVE(nextr, er_link);
344 extent_free_region_descriptor(ex, nextr);
345 } else {
346 /*
347 * Nope, just prepend us to the next region.
348 */
349 after->er_link.le_next->er_start = start;
350 }
351
352 extent_free_region_descriptor(ex, rp);
353 return;
354 }
355
356 /*
357 * We weren't able to coalesce with the next region, but
358 * we don't need to allocate a region descriptor if we
359 * appended ourselves to the previous region.
360 */
361 if (appended) {
362 extent_free_region_descriptor(ex, rp);
363 return;
364 }
365
366 cant_coalesce:
367
368 /*
369 * Fill in the region descriptor and insert ourselves
370 * into the region list.
371 */
372 rp->er_start = start;
373 rp->er_end = start + (size - 1);
374 LIST_INSERT_AFTER(after, rp, er_link);
375 }
376
377 /*
378 * Allocate a specific region in an extent map.
379 */
380 int
381 extent_alloc_region(ex, start, size, flags)
382 struct extent *ex;
383 u_long start, size;
384 int flags;
385 {
386 struct extent_region *rp, *last, *myrp;
387 u_long end = start + (size - 1);
388 int error;
389
390 #ifdef DIAGNOSTIC
391 /* Check arguments. */
392 if (ex == NULL)
393 panic("extent_alloc_region: NULL extent");
394 if (size < 1) {
395 printf("extent_alloc_region: extent `%s', size 0x%lx\n",
396 ex->ex_name, size);
397 panic("extent_alloc_region: bad size");
398 }
399 if (end < start) {
400 printf(
401 "extent_alloc_region: extent `%s', start 0x%lx, size 0x%lx\n",
402 ex->ex_name, start, size);
403 panic("extent_alloc_region: overflow");
404 }
405 #endif
406
407 /*
408 * Make sure the requested region lies within the
409 * extent.
410 *
411 * We don't lock to check the range, because those values
412 * are never modified, and if another thread deletes the
413 * extent, we're screwed anyway.
414 */
415 if ((start < ex->ex_start) || (end > ex->ex_end)) {
416 #ifdef DIAGNOSTIC
417 printf("extent_alloc_region: extent `%s' (0x%lx - 0x%lx)\n",
418 ex->ex_name, ex->ex_start, ex->ex_end);
419 printf("extent_alloc_region: start 0x%lx, end 0x%lx\n",
420 start, end);
421 panic("extent_alloc_region: region lies outside extent");
422 #else
423 return (EINVAL);
424 #endif
425 }
426
427 /*
428 * Allocate the region descriptor. It will be freed later
429 * if we can coalesce with another region. Don't lock before
430 * here! This could block.
431 */
432 myrp = extent_alloc_region_descriptor(ex, flags);
433 if (myrp == NULL) {
434 #ifdef DIAGNOSTIC
435 printf(
436 "extent_alloc_region: can't allocate region descriptor\n");
437 #endif
438 return (ENOMEM);
439 }
440
441 alloc_start:
442 simple_lock(&ex->ex_slock);
443
444 /*
445 * Attempt to place ourselves in the desired area of the
446 * extent. We save ourselves some work by keeping the list sorted.
447 * In other words, if the start of the current region is greater
448 * than the end of our region, we don't have to search any further.
449 */
450
451 /*
452 * Keep a pointer to the last region we looked at so
453 * that we don't have to traverse the list again when
454 * we insert ourselves. If "last" is NULL when we
455 * finally insert ourselves, we go at the head of the
456 * list. See extent_insert_and_optimize() for details.
457 */
458 last = NULL;
459
460 for (rp = ex->ex_regions.lh_first; rp != NULL;
461 rp = rp->er_link.le_next) {
462 if (rp->er_start > end) {
463 /*
464 * We lie before this region and don't
465 * conflict.
466 */
467 break;
468 }
469
470 /*
471 * The current region begins before we end.
472 * Check for a conflict.
473 */
474 if (rp->er_end >= start) {
475 /*
476 * We conflict. If we can (and want to) wait,
477 * do so.
478 */
479 if (flags & EX_WAITSPACE) {
480 ex->ex_flags |= EXF_WANTED;
481 error = ltsleep(ex,
482 PNORELOCK | PRIBIO | ((flags & EX_CATCH) ? PCATCH : 0),
483 "extnt", 0, &ex->ex_slock);
484 if (error)
485 return (error);
486 goto alloc_start;
487 }
488 extent_free_region_descriptor(ex, myrp);
489 simple_unlock(&ex->ex_slock);
490 return (EAGAIN);
491 }
492 /*
493 * We don't conflict, but this region lies before
494 * us. Keep a pointer to this region, and keep
495 * trying.
496 */
497 last = rp;
498 }
499
500 /*
501 * We don't conflict with any regions. "last" points
502 * to the region we fall after, or is NULL if we belong
503 * at the beginning of the region list. Insert ourselves.
504 */
505 extent_insert_and_optimize(ex, start, size, flags, last, myrp);
506 simple_unlock(&ex->ex_slock);
507 return (0);
508 }
509
510 /*
511 * Macro to check (x + y) <= z. This check is designed to fail
512 * if an overflow occurs.
513 */
514 #define LE_OV(x, y, z) ((((x) + (y)) >= (x)) && (((x) + (y)) <= (z)))
515
516 /*
517 * Allocate a region in an extent map subregion.
518 *
519 * If EX_FAST is specified, we return the first fit in the map.
520 * Otherwise, we try to minimize fragmentation by finding the
521 * smallest gap that will hold the request.
522 *
523 * The allocated region is aligned to "alignment", which must be
524 * a power of 2.
525 */
526 int
527 extent_alloc_subregion1(ex, substart, subend, size, alignment, skew, boundary,
528 flags, result)
529 struct extent *ex;
530 u_long substart, subend, size, alignment, skew, boundary;
531 int flags;
532 u_long *result;
533 {
534 struct extent_region *rp, *myrp, *last, *bestlast;
535 u_long newstart, newend, beststart, bestovh, ovh;
536 u_long dontcross;
537 int error;
538
539 #ifdef DIAGNOSTIC
540 /*
541 * Check arguments.
542 *
543 * We don't lock to check these, because these values
544 * are never modified, and if another thread deletes the
545 * extent, we're screwed anyway.
546 */
547 if (ex == NULL)
548 panic("extent_alloc_subregion: NULL extent");
549 if (result == NULL)
550 panic("extent_alloc_subregion: NULL result pointer");
551 if ((substart < ex->ex_start) || (substart > ex->ex_end) ||
552 (subend > ex->ex_end) || (subend < ex->ex_start)) {
553 printf("extent_alloc_subregion: extent `%s', ex_start 0x%lx, ex_end 0x%lx\n",
554 ex->ex_name, ex->ex_start, ex->ex_end);
555 printf("extent_alloc_subregion: substart 0x%lx, subend 0x%lx\n",
556 substart, subend);
557 panic("extent_alloc_subregion: bad subregion");
558 }
559 if ((size < 1) || ((size - 1) > (subend - substart))) {
560 printf("extent_alloc_subregion: extent `%s', size 0x%lx\n",
561 ex->ex_name, size);
562 panic("extent_alloc_subregion: bad size");
563 }
564 if (alignment == 0)
565 panic("extent_alloc_subregion: bad alignment");
566 if (boundary && (boundary < size)) {
567 printf(
568 "extent_alloc_subregion: extent `%s', size 0x%lx, "
569 "boundary 0x%lx\n", ex->ex_name, size, boundary);
570 panic("extent_alloc_subregion: bad boundary");
571 }
572 #endif
573
574 /*
575 * Allocate the region descriptor. It will be freed later
576 * if we can coalesce with another region. Don't lock before
577 * here! This could block.
578 */
579 myrp = extent_alloc_region_descriptor(ex, flags);
580 if (myrp == NULL) {
581 #ifdef DIAGNOSTIC
582 printf(
583 "extent_alloc_subregion: can't allocate region descriptor\n");
584 #endif
585 return (ENOMEM);
586 }
587
588 alloc_start:
589 simple_lock(&ex->ex_slock);
590
591 /*
592 * Keep a pointer to the last region we looked at so
593 * that we don't have to traverse the list again when
594 * we insert ourselves. If "last" is NULL when we
595 * finally insert ourselves, we go at the head of the
596 * list. See extent_insert_and_optimize() for deatails.
597 */
598 last = NULL;
599
600 /*
601 * Keep track of size and location of the smallest
602 * chunk we fit in.
603 *
604 * Since the extent can be as large as the numeric range
605 * of the CPU (0 - 0xffffffff for 32-bit systems), the
606 * best overhead value can be the maximum unsigned integer.
607 * Thus, we initialize "bestovh" to 0, since we insert ourselves
608 * into the region list immediately on an exact match (which
609 * is the only case where "bestovh" would be set to 0).
610 */
611 bestovh = 0;
612 beststart = 0;
613 bestlast = NULL;
614
615 /*
616 * For N allocated regions, we must make (N + 1)
617 * checks for unallocated space. The first chunk we
618 * check is the area from the beginning of the subregion
619 * to the first allocated region after that point.
620 */
621 newstart = EXTENT_ALIGN(substart, alignment, skew);
622 if (newstart < ex->ex_start) {
623 #ifdef DIAGNOSTIC
624 printf(
625 "extent_alloc_subregion: extent `%s' (0x%lx - 0x%lx), alignment 0x%lx\n",
626 ex->ex_name, ex->ex_start, ex->ex_end, alignment);
627 simple_unlock(&ex->ex_slock);
628 panic("extent_alloc_subregion: overflow after alignment");
629 #else
630 extent_free_region_descriptor(ex, myrp);
631 simple_unlock(&ex->ex_slock);
632 return (EINVAL);
633 #endif
634 }
635
636 /*
637 * Find the first allocated region that begins on or after
638 * the subregion start, advancing the "last" pointer along
639 * the way.
640 */
641 for (rp = ex->ex_regions.lh_first; rp != NULL;
642 rp = rp->er_link.le_next) {
643 if (rp->er_start >= newstart)
644 break;
645 last = rp;
646 }
647
648 /*
649 * Relocate the start of our candidate region to the end of
650 * the last allocated region (if there was one overlapping
651 * our subrange).
652 */
653 if (last != NULL && last->er_end >= newstart)
654 newstart = EXTENT_ALIGN((last->er_end + 1), alignment, skew);
655
656 for (; rp != NULL; rp = rp->er_link.le_next) {
657 /*
658 * Check the chunk before "rp". Note that our
659 * comparison is safe from overflow conditions.
660 */
661 if (LE_OV(newstart, size, rp->er_start)) {
662 /*
663 * Do a boundary check, if necessary. Note
664 * that a region may *begin* on the boundary,
665 * but it must end before the boundary.
666 */
667 if (boundary) {
668 newend = newstart + (size - 1);
669
670 /*
671 * Calculate the next boundary after the start
672 * of this region.
673 */
674 dontcross = EXTENT_ALIGN(newstart+1, boundary,
675 (flags & EX_BOUNDZERO) ? 0 : ex->ex_start)
676 - 1;
677
678 #if 0
679 printf("newstart=%lx newend=%lx ex_start=%lx ex_end=%lx boundary=%lx dontcross=%lx\n",
680 newstart, newend, ex->ex_start, ex->ex_end,
681 boundary, dontcross);
682 #endif
683
684 /* Check for overflow */
685 if (dontcross < ex->ex_start)
686 dontcross = ex->ex_end;
687 else if (newend > dontcross) {
688 /*
689 * Candidate region crosses boundary.
690 * Throw away the leading part and see
691 * if we still fit.
692 */
693 newstart = dontcross + 1;
694 newend = newstart + (size - 1);
695 dontcross += boundary;
696 if (!LE_OV(newstart, size, rp->er_start))
697 continue;
698 }
699
700 /*
701 * If we run past the end of
702 * the extent or the boundary
703 * overflows, then the request
704 * can't fit.
705 */
706 if (newstart + size - 1 > ex->ex_end ||
707 dontcross < newstart)
708 goto fail;
709 }
710
711 /*
712 * We would fit into this space. Calculate
713 * the overhead (wasted space). If we exactly
714 * fit, or we're taking the first fit, insert
715 * ourselves into the region list.
716 */
717 ovh = rp->er_start - newstart - size;
718 if ((flags & EX_FAST) || (ovh == 0))
719 goto found;
720
721 /*
722 * Don't exactly fit, but check to see
723 * if we're better than any current choice.
724 */
725 if ((bestovh == 0) || (ovh < bestovh)) {
726 bestovh = ovh;
727 beststart = newstart;
728 bestlast = last;
729 }
730 }
731
732 /*
733 * Skip past the current region and check again.
734 */
735 newstart = EXTENT_ALIGN((rp->er_end + 1), alignment, skew);
736 if (newstart < rp->er_end) {
737 /*
738 * Overflow condition. Don't error out, since
739 * we might have a chunk of space that we can
740 * use.
741 */
742 goto fail;
743 }
744
745 last = rp;
746 }
747
748 /*
749 * The final check is from the current starting point to the
750 * end of the subregion. If there were no allocated regions,
751 * "newstart" is set to the beginning of the subregion, or
752 * just past the end of the last allocated region, adjusted
753 * for alignment in either case.
754 */
755 if (LE_OV(newstart, (size - 1), subend)) {
756 /*
757 * Do a boundary check, if necessary. Note
758 * that a region may *begin* on the boundary,
759 * but it must end before the boundary.
760 */
761 if (boundary) {
762 newend = newstart + (size - 1);
763
764 /*
765 * Calculate the next boundary after the start
766 * of this region.
767 */
768 dontcross = EXTENT_ALIGN(newstart+1, boundary,
769 (flags & EX_BOUNDZERO) ? 0 : ex->ex_start)
770 - 1;
771
772 #if 0
773 printf("newstart=%lx newend=%lx ex_start=%lx ex_end=%lx boundary=%lx dontcross=%lx\n",
774 newstart, newend, ex->ex_start, ex->ex_end,
775 boundary, dontcross);
776 #endif
777
778 /* Check for overflow */
779 if (dontcross < ex->ex_start)
780 dontcross = ex->ex_end;
781 else if (newend > dontcross) {
782 /*
783 * Candidate region crosses boundary.
784 * Throw away the leading part and see
785 * if we still fit.
786 */
787 newstart = dontcross + 1;
788 newend = newstart + (size - 1);
789 dontcross += boundary;
790 if (!LE_OV(newstart, (size - 1), subend))
791 goto fail;
792 }
793
794 /*
795 * If we run past the end of
796 * the extent or the boundary
797 * overflows, then the request
798 * can't fit.
799 */
800 if (newstart + size - 1 > ex->ex_end ||
801 dontcross < newstart)
802 goto fail;
803 }
804
805 /*
806 * We would fit into this space. Calculate
807 * the overhead (wasted space). If we exactly
808 * fit, or we're taking the first fit, insert
809 * ourselves into the region list.
810 */
811 ovh = ex->ex_end - newstart - (size - 1);
812 if ((flags & EX_FAST) || (ovh == 0))
813 goto found;
814
815 /*
816 * Don't exactly fit, but check to see
817 * if we're better than any current choice.
818 */
819 if ((bestovh == 0) || (ovh < bestovh)) {
820 bestovh = ovh;
821 beststart = newstart;
822 bestlast = last;
823 }
824 }
825
826 fail:
827 /*
828 * One of the following two conditions have
829 * occurred:
830 *
831 * There is no chunk large enough to hold the request.
832 *
833 * If EX_FAST was not specified, there is not an
834 * exact match for the request.
835 *
836 * Note that if we reach this point and EX_FAST is
837 * set, then we know there is no space in the extent for
838 * the request.
839 */
840 if (((flags & EX_FAST) == 0) && (bestovh != 0)) {
841 /*
842 * We have a match that's "good enough".
843 */
844 newstart = beststart;
845 last = bestlast;
846 goto found;
847 }
848
849 /*
850 * No space currently available. Wait for it to free up,
851 * if possible.
852 */
853 if (flags & EX_WAITSPACE) {
854 ex->ex_flags |= EXF_WANTED;
855 error = ltsleep(ex,
856 PNORELOCK | PRIBIO | ((flags & EX_CATCH) ? PCATCH : 0),
857 "extnt", 0, &ex->ex_slock);
858 if (error)
859 return (error);
860 goto alloc_start;
861 }
862
863 extent_free_region_descriptor(ex, myrp);
864 simple_unlock(&ex->ex_slock);
865 return (EAGAIN);
866
867 found:
868 /*
869 * Insert ourselves into the region list.
870 */
871 extent_insert_and_optimize(ex, newstart, size, flags, last, myrp);
872 simple_unlock(&ex->ex_slock);
873 *result = newstart;
874 return (0);
875 }
876
877 int
878 extent_free(ex, start, size, flags)
879 struct extent *ex;
880 u_long start, size;
881 int flags;
882 {
883 struct extent_region *rp, *nrp = NULL;
884 u_long end = start + (size - 1);
885 int exflags;
886
887 #ifdef DIAGNOSTIC
888 /*
889 * Check arguments.
890 *
891 * We don't lock to check these, because these values
892 * are never modified, and if another thread deletes the
893 * extent, we're screwed anyway.
894 */
895 if (ex == NULL)
896 panic("extent_free: NULL extent");
897 if ((start < ex->ex_start) || (start > ex->ex_end)) {
898 extent_print(ex);
899 printf("extent_free: extent `%s', start 0x%lx, size 0x%lx\n",
900 ex->ex_name, start, size);
901 panic("extent_free: extent `%s', region not within extent",
902 ex->ex_name);
903 }
904 /* Check for an overflow. */
905 if (end < start) {
906 extent_print(ex);
907 printf("extent_free: extent `%s', start 0x%lx, size 0x%lx\n",
908 ex->ex_name, start, size);
909 panic("extent_free: overflow");
910 }
911 #endif
912
913 /*
914 * If we're allowing coalescing, we must allocate a region
915 * descriptor now, since it might block.
916 *
917 * XXX Make a static, create-time flags word, so we don't
918 * XXX have to lock to read it!
919 */
920 simple_lock(&ex->ex_slock);
921 exflags = ex->ex_flags;
922 simple_unlock(&ex->ex_slock);
923
924 if ((exflags & EXF_NOCOALESCE) == 0) {
925 /* Allocate a region descriptor. */
926 nrp = extent_alloc_region_descriptor(ex, flags);
927 if (nrp == NULL)
928 return (ENOMEM);
929 }
930
931 simple_lock(&ex->ex_slock);
932
933 /*
934 * Find region and deallocate. Several possibilities:
935 *
936 * 1. (start == er_start) && (end == er_end):
937 * Free descriptor.
938 *
939 * 2. (start == er_start) && (end < er_end):
940 * Adjust er_start.
941 *
942 * 3. (start > er_start) && (end == er_end):
943 * Adjust er_end.
944 *
945 * 4. (start > er_start) && (end < er_end):
946 * Fragment region. Requires descriptor alloc.
947 *
948 * Cases 2, 3, and 4 require that the EXF_NOCOALESCE flag
949 * is not set.
950 */
951 for (rp = ex->ex_regions.lh_first; rp != NULL;
952 rp = rp->er_link.le_next) {
953 /*
954 * Save ourselves some comparisons; does the current
955 * region end before chunk to be freed begins? If so,
956 * then we haven't found the appropriate region descriptor.
957 */
958 if (rp->er_end < start)
959 continue;
960
961 /*
962 * Save ourselves some traversal; does the current
963 * region begin after the chunk to be freed ends? If so,
964 * then we've already passed any possible region descriptors
965 * that might have contained the chunk to be freed.
966 */
967 if (rp->er_start > end)
968 break;
969
970 /* Case 1. */
971 if ((start == rp->er_start) && (end == rp->er_end)) {
972 LIST_REMOVE(rp, er_link);
973 extent_free_region_descriptor(ex, rp);
974 goto done;
975 }
976
977 /*
978 * The following cases all require that EXF_NOCOALESCE
979 * is not set.
980 */
981 if (ex->ex_flags & EXF_NOCOALESCE)
982 continue;
983
984 /* Case 2. */
985 if ((start == rp->er_start) && (end < rp->er_end)) {
986 rp->er_start = (end + 1);
987 goto done;
988 }
989
990 /* Case 3. */
991 if ((start > rp->er_start) && (end == rp->er_end)) {
992 rp->er_end = (start - 1);
993 goto done;
994 }
995
996 /* Case 4. */
997 if ((start > rp->er_start) && (end < rp->er_end)) {
998 /* Fill in new descriptor. */
999 nrp->er_start = end + 1;
1000 nrp->er_end = rp->er_end;
1001
1002 /* Adjust current descriptor. */
1003 rp->er_end = start - 1;
1004
1005 /* Insert new descriptor after current. */
1006 LIST_INSERT_AFTER(rp, nrp, er_link);
1007
1008 /* We used the new descriptor, so don't free it below */
1009 nrp = NULL;
1010 goto done;
1011 }
1012 }
1013
1014 /* Region not found, or request otherwise invalid. */
1015 simple_unlock(&ex->ex_slock);
1016 extent_print(ex);
1017 printf("extent_free: start 0x%lx, end 0x%lx\n", start, end);
1018 panic("extent_free: region not found");
1019
1020 done:
1021 if (nrp != NULL)
1022 extent_free_region_descriptor(ex, nrp);
1023 if (ex->ex_flags & EXF_WANTED) {
1024 ex->ex_flags &= ~EXF_WANTED;
1025 wakeup(ex);
1026 }
1027 simple_unlock(&ex->ex_slock);
1028 return (0);
1029 }
1030
1031 /*
1032 * Allocate an extent region descriptor. EXTENT MUST NOT BE LOCKED,
1033 * AS THIS FUNCTION MAY BLOCK! We will handle any locking we may need.
1034 */
1035 static struct extent_region *
1036 extent_alloc_region_descriptor(ex, flags)
1037 struct extent *ex;
1038 int flags;
1039 {
1040 struct extent_region *rp;
1041 int exflags;
1042 int s;
1043
1044 /*
1045 * If the kernel memory allocator is not yet running, we can't
1046 * use it (obviously).
1047 */
1048 if (KMEM_IS_RUNNING == 0)
1049 flags &= ~EX_MALLOCOK;
1050
1051 /*
1052 * XXX Make a static, create-time flags word, so we don't
1053 * XXX have to lock to read it!
1054 */
1055 simple_lock(&ex->ex_slock);
1056 exflags = ex->ex_flags;
1057 simple_unlock(&ex->ex_slock);
1058
1059 if (exflags & EXF_FIXED) {
1060 struct extent_fixed *fex = (struct extent_fixed *)ex;
1061
1062 for (;;) {
1063 simple_lock(&ex->ex_slock);
1064 if ((rp = fex->fex_freelist.lh_first) != NULL) {
1065 /*
1066 * Don't muck with flags after pulling it off
1067 * the freelist; it may have been dynamically
1068 * allocated, and kindly given to us. We
1069 * need to remember that information.
1070 */
1071 LIST_REMOVE(rp, er_link);
1072 simple_unlock(&ex->ex_slock);
1073 return (rp);
1074 }
1075 if (flags & EX_MALLOCOK) {
1076 simple_unlock(&ex->ex_slock);
1077 goto alloc;
1078 }
1079 if ((flags & EX_WAITOK) == 0) {
1080 simple_unlock(&ex->ex_slock);
1081 return (NULL);
1082 }
1083 ex->ex_flags |= EXF_FLWANTED;
1084 if (ltsleep(&fex->fex_freelist,
1085 PNORELOCK| PRIBIO | ((flags & EX_CATCH) ? PCATCH : 0),
1086 "extnt", 0, &ex->ex_slock))
1087 return (NULL);
1088 }
1089 }
1090
1091 alloc:
1092 s = splhigh();
1093 if (expool_initialized == 0)
1094 expool_init();
1095 rp = pool_get(&expool, (flags & EX_WAITOK) ? PR_WAITOK : 0);
1096 splx(s);
1097
1098 if (rp != NULL)
1099 rp->er_flags = ER_ALLOC;
1100
1101 return (rp);
1102 }
1103
1104 /*
1105 * Free an extent region descriptor. EXTENT _MUST_ BE LOCKED! This
1106 * is safe as we do not block here.
1107 */
1108 static void
1109 extent_free_region_descriptor(ex, rp)
1110 struct extent *ex;
1111 struct extent_region *rp;
1112 {
1113 int s;
1114
1115 if (ex->ex_flags & EXF_FIXED) {
1116 struct extent_fixed *fex = (struct extent_fixed *)ex;
1117
1118 /*
1119 * If someone's waiting for a region descriptor,
1120 * be nice and give them this one, rather than
1121 * just free'ing it back to the system.
1122 */
1123 if (rp->er_flags & ER_ALLOC) {
1124 if (ex->ex_flags & EXF_FLWANTED) {
1125 /* Clear all but ER_ALLOC flag. */
1126 rp->er_flags = ER_ALLOC;
1127 LIST_INSERT_HEAD(&fex->fex_freelist, rp,
1128 er_link);
1129 goto wake_em_up;
1130 } else {
1131 s = splhigh();
1132 pool_put(&expool, rp);
1133 splx(s);
1134 }
1135 } else {
1136 /* Clear all flags. */
1137 rp->er_flags = 0;
1138 LIST_INSERT_HEAD(&fex->fex_freelist, rp, er_link);
1139 }
1140
1141 if (ex->ex_flags & EXF_FLWANTED) {
1142 wake_em_up:
1143 ex->ex_flags &= ~EXF_FLWANTED;
1144 wakeup(&fex->fex_freelist);
1145 }
1146 return;
1147 }
1148
1149 /*
1150 * We know it's dynamically allocated if we get here.
1151 */
1152 s = splhigh();
1153 pool_put(&expool, rp);
1154 splx(s);
1155 }
1156
1157 void
1158 extent_print(ex)
1159 struct extent *ex;
1160 {
1161 struct extent_region *rp;
1162
1163 if (ex == NULL)
1164 panic("extent_print: NULL extent");
1165
1166 simple_lock(&ex->ex_slock);
1167
1168 printf("extent `%s' (0x%lx - 0x%lx), flags = 0x%x\n", ex->ex_name,
1169 ex->ex_start, ex->ex_end, ex->ex_flags);
1170
1171 for (rp = ex->ex_regions.lh_first; rp != NULL;
1172 rp = rp->er_link.le_next)
1173 printf(" 0x%lx - 0x%lx\n", rp->er_start, rp->er_end);
1174
1175 simple_unlock(&ex->ex_slock);
1176 }
1177