subr_extent.c revision 1.36 1 /* $NetBSD: subr_extent.c,v 1.36 2000/07/07 14:10:48 mrg Exp $ */
2
3 /*-
4 * Copyright (c) 1996, 1998 The NetBSD Foundation, Inc.
5 * All rights reserved.
6 *
7 * This code is derived from software contributed to The NetBSD Foundation
8 * by Jason R. Thorpe and Matthias Drochner.
9 *
10 * Redistribution and use in source and binary forms, with or without
11 * modification, are permitted provided that the following conditions
12 * are met:
13 * 1. Redistributions of source code must retain the above copyright
14 * notice, this list of conditions and the following disclaimer.
15 * 2. Redistributions in binary form must reproduce the above copyright
16 * notice, this list of conditions and the following disclaimer in the
17 * documentation and/or other materials provided with the distribution.
18 * 3. All advertising materials mentioning features or use of this software
19 * must display the following acknowledgement:
20 * This product includes software developed by the NetBSD
21 * Foundation, Inc. and its contributors.
22 * 4. Neither the name of The NetBSD Foundation nor the names of its
23 * contributors may be used to endorse or promote products derived
24 * from this software without specific prior written permission.
25 *
26 * THIS SOFTWARE IS PROVIDED BY THE NETBSD FOUNDATION, INC. AND CONTRIBUTORS
27 * ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED
28 * TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
29 * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE FOUNDATION OR CONTRIBUTORS
30 * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
31 * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
32 * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
33 * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
34 * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
35 * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
36 * POSSIBILITY OF SUCH DAMAGE.
37 */
38
39 /*
40 * General purpose extent manager.
41 */
42
43 #ifdef _KERNEL
44 #include <sys/param.h>
45 #include <sys/extent.h>
46 #include <sys/malloc.h>
47 #include <sys/pool.h>
48 #include <sys/time.h>
49 #include <sys/systm.h>
50 #include <sys/proc.h>
51 #include <sys/lock.h>
52
53 #include <uvm/uvm_extern.h>
54
55 #define KMEM_IS_RUNNING (kmem_map != NULL)
56 #elif defined(_EXTENT_TESTING)
57 /*
58 * user-land definitions, so it can fit into a testing harness.
59 */
60 #include <sys/param.h>
61 #include <sys/pool.h>
62 #include <sys/extent.h>
63 #include <errno.h>
64 #include <stdlib.h>
65 #include <stdio.h>
66 #include <string.h>
67
68 /*
69 * Use multi-line #defines to avoid screwing up the kernel tags file;
70 * without this, ctags produces a tags file where panic() shows up
71 * in subr_extent.c rather than subr_prf.c.
72 */
73 #define \
74 malloc(s, t, flags) malloc(s)
75 #define \
76 free(p, t) free(p)
77 #define \
78 tsleep(chan, pri, str, timo) (EWOULDBLOCK)
79 #define \
80 wakeup(chan) ((void)0)
81 #define \
82 pool_get(pool, flags) malloc(pool->pr_size,0,0)
83 #define \
84 pool_put(pool, rp) free(rp,0)
85 #define \
86 panic(a) printf(a)
87 #define \
88 splhigh() (1)
89 #define \
90 splx(s) ((void)(s))
91
92 #define \
93 simple_lock_init(l) ((void)(l))
94 #define \
95 simple_lock(l) ((void)(l))
96 #define \
97 simple_unlock(l) ((void)(l))
98 #define KMEM_IS_RUNNING (1)
99 #endif
100
101 static pool_handle_t expool_create __P((void));
102 static void extent_insert_and_optimize __P((struct extent *, u_long, u_long,
103 int, struct extent_region *, struct extent_region *));
104 static struct extent_region *extent_alloc_region_descriptor
105 __P((struct extent *, int));
106 static void extent_free_region_descriptor __P((struct extent *,
107 struct extent_region *));
108
109 static pool_handle_t expool;
110
111 /*
112 * Macro to align to an arbitrary power-of-two boundary.
113 */
114 #define EXTENT_ALIGN(_start, _align, _skew) \
115 (((((_start) - (_skew)) + ((_align) - 1)) & (-(_align))) + (_skew))
116
117 /*
118 * Create the extent_region pool.
119 * (This is deferred until one of our callers thinks we can malloc()).
120 */
121
122 static pool_handle_t expool_create()
123 {
124 #if defined(_KERNEL)
125 expool = pool_create(sizeof(struct extent_region), 0, 0,
126 0, "extent", 0, 0, 0, 0);
127 #else
128 expool = (pool_handle_t)malloc(sizeof(*expool),0,0);
129 expool->pr_size = sizeof(struct extent_region);
130 #endif
131 return (expool);
132 }
133
134 /*
135 * Allocate and initialize an extent map.
136 */
137 struct extent *
138 extent_create(name, start, end, mtype, storage, storagesize, flags)
139 const char *name;
140 u_long start, end;
141 int mtype;
142 caddr_t storage;
143 size_t storagesize;
144 int flags;
145 {
146 struct extent *ex;
147 caddr_t cp = storage;
148 size_t sz = storagesize;
149 struct extent_region *rp;
150 int fixed_extent = (storage != NULL);
151 int s;
152
153 #ifdef DIAGNOSTIC
154 /* Check arguments. */
155 if (name == NULL)
156 panic("extent_create: name == NULL");
157 if (end < start) {
158 printf("extent_create: extent `%s', start 0x%lx, end 0x%lx\n",
159 name, start, end);
160 panic("extent_create: end < start");
161 }
162 if (fixed_extent && (storagesize < sizeof(struct extent_fixed)))
163 panic("extent_create: fixed extent, bad storagesize 0x%lx",
164 (u_long)storagesize);
165 if (fixed_extent == 0 && (storagesize != 0 || storage != NULL))
166 panic("extent_create: storage provided for non-fixed");
167 #endif
168
169 /* Allocate extent descriptor. */
170 if (fixed_extent) {
171 struct extent_fixed *fex;
172
173 memset(storage, 0, storagesize);
174
175 /*
176 * Align all descriptors on "long" boundaries.
177 */
178 fex = (struct extent_fixed *)cp;
179 ex = (struct extent *)fex;
180 cp += ALIGN(sizeof(struct extent_fixed));
181 sz -= ALIGN(sizeof(struct extent_fixed));
182 fex->fex_storage = storage;
183 fex->fex_storagesize = storagesize;
184
185 /*
186 * In a fixed extent, we have to pre-allocate region
187 * descriptors and place them in the extent's freelist.
188 */
189 LIST_INIT(&fex->fex_freelist);
190 while (sz >= ALIGN(sizeof(struct extent_region))) {
191 rp = (struct extent_region *)cp;
192 cp += ALIGN(sizeof(struct extent_region));
193 sz -= ALIGN(sizeof(struct extent_region));
194 LIST_INSERT_HEAD(&fex->fex_freelist, rp, er_link);
195 }
196 } else {
197 s = splhigh();
198 if (expool == NULL)
199 expool_create();
200 splx(s);
201 if (expool == NULL)
202 return (NULL);
203
204 ex = (struct extent *)malloc(sizeof(struct extent),
205 mtype, (flags & EX_WAITOK) ? M_WAITOK : M_NOWAIT);
206 if (ex == NULL)
207 return (NULL);
208 }
209
210 /* Fill in the extent descriptor and return it to the caller. */
211 simple_lock_init(&ex->ex_slock);
212 LIST_INIT(&ex->ex_regions);
213 ex->ex_name = name;
214 ex->ex_start = start;
215 ex->ex_end = end;
216 ex->ex_mtype = mtype;
217 ex->ex_flags = 0;
218 if (fixed_extent)
219 ex->ex_flags |= EXF_FIXED;
220 if (flags & EX_NOCOALESCE)
221 ex->ex_flags |= EXF_NOCOALESCE;
222 return (ex);
223 }
224
225 /*
226 * Destroy an extent map.
227 * Since we're freeing the data, there can't be any references
228 * so we don't need any locking.
229 */
230 void
231 extent_destroy(ex)
232 struct extent *ex;
233 {
234 struct extent_region *rp, *orp;
235
236 #ifdef DIAGNOSTIC
237 /* Check arguments. */
238 if (ex == NULL)
239 panic("extent_destroy: NULL extent");
240 #endif
241
242 /* Free all region descriptors in extent. */
243 for (rp = ex->ex_regions.lh_first; rp != NULL; ) {
244 orp = rp;
245 rp = rp->er_link.le_next;
246 LIST_REMOVE(orp, er_link);
247 extent_free_region_descriptor(ex, orp);
248 }
249
250 /* If we're not a fixed extent, free the extent descriptor itself. */
251 if ((ex->ex_flags & EXF_FIXED) == 0)
252 free(ex, ex->ex_mtype);
253 }
254
255 /*
256 * Insert a region descriptor into the sorted region list after the
257 * entry "after" or at the head of the list (if "after" is NULL).
258 * The region descriptor we insert is passed in "rp". We must
259 * allocate the region descriptor before calling this function!
260 * If we don't need the region descriptor, it will be freed here.
261 */
262 static void
263 extent_insert_and_optimize(ex, start, size, flags, after, rp)
264 struct extent *ex;
265 u_long start, size;
266 int flags;
267 struct extent_region *after, *rp;
268 {
269 struct extent_region *nextr;
270 int appended = 0;
271
272 if (after == NULL) {
273 /*
274 * We're the first in the region list. If there's
275 * a region after us, attempt to coalesce to save
276 * descriptor overhead.
277 */
278 if (((ex->ex_flags & EXF_NOCOALESCE) == 0) &&
279 (ex->ex_regions.lh_first != NULL) &&
280 ((start + size) == ex->ex_regions.lh_first->er_start)) {
281 /*
282 * We can coalesce. Prepend us to the first region.
283 */
284 ex->ex_regions.lh_first->er_start = start;
285 extent_free_region_descriptor(ex, rp);
286 return;
287 }
288
289 /*
290 * Can't coalesce. Fill in the region descriptor
291 * in, and insert us at the head of the region list.
292 */
293 rp->er_start = start;
294 rp->er_end = start + (size - 1);
295 LIST_INSERT_HEAD(&ex->ex_regions, rp, er_link);
296 return;
297 }
298
299 /*
300 * If EXF_NOCOALESCE is set, coalescing is disallowed.
301 */
302 if (ex->ex_flags & EXF_NOCOALESCE)
303 goto cant_coalesce;
304
305 /*
306 * Attempt to coalesce with the region before us.
307 */
308 if ((after->er_end + 1) == start) {
309 /*
310 * We can coalesce. Append ourselves and make
311 * note of it.
312 */
313 after->er_end = start + (size - 1);
314 appended = 1;
315 }
316
317 /*
318 * Attempt to coalesce with the region after us.
319 */
320 if ((after->er_link.le_next != NULL) &&
321 ((start + size) == after->er_link.le_next->er_start)) {
322 /*
323 * We can coalesce. Note that if we appended ourselves
324 * to the previous region, we exactly fit the gap, and
325 * can free the "next" region descriptor.
326 */
327 if (appended) {
328 /*
329 * Yup, we can free it up.
330 */
331 after->er_end = after->er_link.le_next->er_end;
332 nextr = after->er_link.le_next;
333 LIST_REMOVE(nextr, er_link);
334 extent_free_region_descriptor(ex, nextr);
335 } else {
336 /*
337 * Nope, just prepend us to the next region.
338 */
339 after->er_link.le_next->er_start = start;
340 }
341
342 extent_free_region_descriptor(ex, rp);
343 return;
344 }
345
346 /*
347 * We weren't able to coalesce with the next region, but
348 * we don't need to allocate a region descriptor if we
349 * appended ourselves to the previous region.
350 */
351 if (appended) {
352 extent_free_region_descriptor(ex, rp);
353 return;
354 }
355
356 cant_coalesce:
357
358 /*
359 * Fill in the region descriptor and insert ourselves
360 * into the region list.
361 */
362 rp->er_start = start;
363 rp->er_end = start + (size - 1);
364 LIST_INSERT_AFTER(after, rp, er_link);
365 }
366
367 /*
368 * Allocate a specific region in an extent map.
369 */
370 int
371 extent_alloc_region(ex, start, size, flags)
372 struct extent *ex;
373 u_long start, size;
374 int flags;
375 {
376 struct extent_region *rp, *last, *myrp;
377 u_long end = start + (size - 1);
378 int error;
379
380 #ifdef DIAGNOSTIC
381 /* Check arguments. */
382 if (ex == NULL)
383 panic("extent_alloc_region: NULL extent");
384 if (size < 1) {
385 printf("extent_alloc_region: extent `%s', size 0x%lx\n",
386 ex->ex_name, size);
387 panic("extent_alloc_region: bad size");
388 }
389 if (end < start) {
390 printf(
391 "extent_alloc_region: extent `%s', start 0x%lx, size 0x%lx\n",
392 ex->ex_name, start, size);
393 panic("extent_alloc_region: overflow");
394 }
395 #endif
396
397 /*
398 * Make sure the requested region lies within the
399 * extent.
400 *
401 * We don't lock to check the range, because those values
402 * are never modified, and if another thread deletes the
403 * extent, we're screwed anyway.
404 */
405 if ((start < ex->ex_start) || (end > ex->ex_end)) {
406 #ifdef DIAGNOSTIC
407 printf("extent_alloc_region: extent `%s' (0x%lx - 0x%lx)\n",
408 ex->ex_name, ex->ex_start, ex->ex_end);
409 printf("extent_alloc_region: start 0x%lx, end 0x%lx\n",
410 start, end);
411 panic("extent_alloc_region: region lies outside extent");
412 #else
413 return (EINVAL);
414 #endif
415 }
416
417 /*
418 * Allocate the region descriptor. It will be freed later
419 * if we can coalesce with another region. Don't lock before
420 * here! This could block.
421 */
422 myrp = extent_alloc_region_descriptor(ex, flags);
423 if (myrp == NULL) {
424 #ifdef DIAGNOSTIC
425 printf(
426 "extent_alloc_region: can't allocate region descriptor\n");
427 #endif
428 return (ENOMEM);
429 }
430
431 alloc_start:
432 simple_lock(&ex->ex_slock);
433
434 /*
435 * Attempt to place ourselves in the desired area of the
436 * extent. We save ourselves some work by keeping the list sorted.
437 * In other words, if the start of the current region is greater
438 * than the end of our region, we don't have to search any further.
439 */
440
441 /*
442 * Keep a pointer to the last region we looked at so
443 * that we don't have to traverse the list again when
444 * we insert ourselves. If "last" is NULL when we
445 * finally insert ourselves, we go at the head of the
446 * list. See extent_insert_and_optimize() for details.
447 */
448 last = NULL;
449
450 for (rp = ex->ex_regions.lh_first; rp != NULL;
451 rp = rp->er_link.le_next) {
452 if (rp->er_start > end) {
453 /*
454 * We lie before this region and don't
455 * conflict.
456 */
457 break;
458 }
459
460 /*
461 * The current region begins before we end.
462 * Check for a conflict.
463 */
464 if (rp->er_end >= start) {
465 /*
466 * We conflict. If we can (and want to) wait,
467 * do so.
468 */
469 if (flags & EX_WAITSPACE) {
470 ex->ex_flags |= EXF_WANTED;
471 simple_unlock(&ex->ex_slock);
472 error = tsleep(ex,
473 PRIBIO | ((flags & EX_CATCH) ? PCATCH : 0),
474 "extnt", 0);
475 if (error)
476 return (error);
477 goto alloc_start;
478 }
479 extent_free_region_descriptor(ex, myrp);
480 simple_unlock(&ex->ex_slock);
481 return (EAGAIN);
482 }
483 /*
484 * We don't conflict, but this region lies before
485 * us. Keep a pointer to this region, and keep
486 * trying.
487 */
488 last = rp;
489 }
490
491 /*
492 * We don't conflict with any regions. "last" points
493 * to the region we fall after, or is NULL if we belong
494 * at the beginning of the region list. Insert ourselves.
495 */
496 extent_insert_and_optimize(ex, start, size, flags, last, myrp);
497 simple_unlock(&ex->ex_slock);
498 return (0);
499 }
500
501 /*
502 * Macro to check (x + y) <= z. This check is designed to fail
503 * if an overflow occurs.
504 */
505 #define LE_OV(x, y, z) ((((x) + (y)) >= (x)) && (((x) + (y)) <= (z)))
506
507 /*
508 * Allocate a region in an extent map subregion.
509 *
510 * If EX_FAST is specified, we return the first fit in the map.
511 * Otherwise, we try to minimize fragmentation by finding the
512 * smallest gap that will hold the request.
513 *
514 * The allocated region is aligned to "alignment", which must be
515 * a power of 2.
516 */
517 int
518 extent_alloc_subregion1(ex, substart, subend, size, alignment, skew, boundary,
519 flags, result)
520 struct extent *ex;
521 u_long substart, subend, size, alignment, skew, boundary;
522 int flags;
523 u_long *result;
524 {
525 struct extent_region *rp, *myrp, *last, *bestlast;
526 u_long newstart, newend, beststart, bestovh, ovh;
527 u_long dontcross;
528 int error;
529
530 #ifdef DIAGNOSTIC
531 /*
532 * Check arguments.
533 *
534 * We don't lock to check these, because these values
535 * are never modified, and if another thread deletes the
536 * extent, we're screwed anyway.
537 */
538 if (ex == NULL)
539 panic("extent_alloc_subregion: NULL extent");
540 if (result == NULL)
541 panic("extent_alloc_subregion: NULL result pointer");
542 if ((substart < ex->ex_start) || (substart > ex->ex_end) ||
543 (subend > ex->ex_end) || (subend < ex->ex_start)) {
544 printf("extent_alloc_subregion: extent `%s', ex_start 0x%lx, ex_end 0x%lx\n",
545 ex->ex_name, ex->ex_start, ex->ex_end);
546 printf("extent_alloc_subregion: substart 0x%lx, subend 0x%lx\n",
547 substart, subend);
548 panic("extent_alloc_subregion: bad subregion");
549 }
550 if ((size < 1) || ((size - 1) > (subend - substart))) {
551 printf("extent_alloc_subregion: extent `%s', size 0x%lx\n",
552 ex->ex_name, size);
553 panic("extent_alloc_subregion: bad size");
554 }
555 if (alignment == 0)
556 panic("extent_alloc_subregion: bad alignment");
557 if (boundary && (boundary < size)) {
558 printf(
559 "extent_alloc_subregion: extent `%s', size 0x%lx,
560 boundary 0x%lx\n", ex->ex_name, size, boundary);
561 panic("extent_alloc_subregion: bad boundary");
562 }
563 #endif
564
565 /*
566 * Allocate the region descriptor. It will be freed later
567 * if we can coalesce with another region. Don't lock before
568 * here! This could block.
569 */
570 myrp = extent_alloc_region_descriptor(ex, flags);
571 if (myrp == NULL) {
572 #ifdef DIAGNOSTIC
573 printf(
574 "extent_alloc_subregion: can't allocate region descriptor\n");
575 #endif
576 return (ENOMEM);
577 }
578
579 alloc_start:
580 simple_lock(&ex->ex_slock);
581
582 /*
583 * Keep a pointer to the last region we looked at so
584 * that we don't have to traverse the list again when
585 * we insert ourselves. If "last" is NULL when we
586 * finally insert ourselves, we go at the head of the
587 * list. See extent_insert_and_optimize() for deatails.
588 */
589 last = NULL;
590
591 /*
592 * Keep track of size and location of the smallest
593 * chunk we fit in.
594 *
595 * Since the extent can be as large as the numeric range
596 * of the CPU (0 - 0xffffffff for 32-bit systems), the
597 * best overhead value can be the maximum unsigned integer.
598 * Thus, we initialize "bestovh" to 0, since we insert ourselves
599 * into the region list immediately on an exact match (which
600 * is the only case where "bestovh" would be set to 0).
601 */
602 bestovh = 0;
603 beststart = 0;
604 bestlast = NULL;
605
606 /*
607 * For N allocated regions, we must make (N + 1)
608 * checks for unallocated space. The first chunk we
609 * check is the area from the beginning of the subregion
610 * to the first allocated region after that point.
611 */
612 newstart = EXTENT_ALIGN(substart, alignment, skew);
613 if (newstart < ex->ex_start) {
614 #ifdef DIAGNOSTIC
615 printf(
616 "extent_alloc_subregion: extent `%s' (0x%lx - 0x%lx), alignment 0x%lx\n",
617 ex->ex_name, ex->ex_start, ex->ex_end, alignment);
618 simple_unlock(&ex->ex_slock);
619 panic("extent_alloc_subregion: overflow after alignment");
620 #else
621 extent_free_region_descriptor(ex, myrp);
622 simple_unlock(&ex->ex_slock);
623 return (EINVAL);
624 #endif
625 }
626
627 /*
628 * Find the first allocated region that begins on or after
629 * the subregion start, advancing the "last" pointer along
630 * the way.
631 */
632 for (rp = ex->ex_regions.lh_first; rp != NULL;
633 rp = rp->er_link.le_next) {
634 if (rp->er_start >= newstart)
635 break;
636 last = rp;
637 }
638
639 /*
640 * Relocate the start of our candidate region to the end of
641 * the last allocated region (if there was one overlapping
642 * our subrange).
643 */
644 if (last != NULL && last->er_end >= newstart)
645 newstart = EXTENT_ALIGN((last->er_end + 1), alignment, skew);
646
647 for (; rp != NULL; rp = rp->er_link.le_next) {
648 /*
649 * Check the chunk before "rp". Note that our
650 * comparison is safe from overflow conditions.
651 */
652 if (LE_OV(newstart, size, rp->er_start)) {
653 /*
654 * Do a boundary check, if necessary. Note
655 * that a region may *begin* on the boundary,
656 * but it must end before the boundary.
657 */
658 if (boundary) {
659 newend = newstart + (size - 1);
660
661 /*
662 * Calculate the next boundary after the start
663 * of this region.
664 */
665 dontcross = EXTENT_ALIGN(newstart+1, boundary,
666 (flags & EX_BOUNDZERO) ? 0 : ex->ex_start)
667 - 1;
668
669 #if 0
670 printf("newstart=%lx newend=%lx ex_start=%lx ex_end=%lx boundary=%lx dontcross=%lx\n",
671 newstart, newend, ex->ex_start, ex->ex_end,
672 boundary, dontcross);
673 #endif
674
675 /* Check for overflow */
676 if (dontcross < ex->ex_start)
677 dontcross = ex->ex_end;
678 else if (newend > dontcross) {
679 /*
680 * Candidate region crosses boundary.
681 * Throw away the leading part and see
682 * if we still fit.
683 */
684 newstart = dontcross + 1;
685 newend = newstart + (size - 1);
686 dontcross += boundary;
687 if (!LE_OV(newstart, size, rp->er_start))
688 continue;
689 }
690
691 /*
692 * If we run past the end of
693 * the extent or the boundary
694 * overflows, then the request
695 * can't fit.
696 */
697 if (newstart + size - 1 > ex->ex_end ||
698 dontcross < newstart)
699 goto fail;
700 }
701
702 /*
703 * We would fit into this space. Calculate
704 * the overhead (wasted space). If we exactly
705 * fit, or we're taking the first fit, insert
706 * ourselves into the region list.
707 */
708 ovh = rp->er_start - newstart - size;
709 if ((flags & EX_FAST) || (ovh == 0))
710 goto found;
711
712 /*
713 * Don't exactly fit, but check to see
714 * if we're better than any current choice.
715 */
716 if ((bestovh == 0) || (ovh < bestovh)) {
717 bestovh = ovh;
718 beststart = newstart;
719 bestlast = last;
720 }
721 }
722
723 /*
724 * Skip past the current region and check again.
725 */
726 newstart = EXTENT_ALIGN((rp->er_end + 1), alignment, skew);
727 if (newstart < rp->er_end) {
728 /*
729 * Overflow condition. Don't error out, since
730 * we might have a chunk of space that we can
731 * use.
732 */
733 goto fail;
734 }
735
736 last = rp;
737 }
738
739 /*
740 * The final check is from the current starting point to the
741 * end of the subregion. If there were no allocated regions,
742 * "newstart" is set to the beginning of the subregion, or
743 * just past the end of the last allocated region, adjusted
744 * for alignment in either case.
745 */
746 if (LE_OV(newstart, (size - 1), subend)) {
747 /*
748 * Do a boundary check, if necessary. Note
749 * that a region may *begin* on the boundary,
750 * but it must end before the boundary.
751 */
752 if (boundary) {
753 newend = newstart + (size - 1);
754
755 /*
756 * Calculate the next boundary after the start
757 * of this region.
758 */
759 dontcross = EXTENT_ALIGN(newstart+1, boundary,
760 (flags & EX_BOUNDZERO) ? 0 : ex->ex_start)
761 - 1;
762
763 #if 0
764 printf("newstart=%lx newend=%lx ex_start=%lx ex_end=%lx boundary=%lx dontcross=%lx\n",
765 newstart, newend, ex->ex_start, ex->ex_end,
766 boundary, dontcross);
767 #endif
768
769 /* Check for overflow */
770 if (dontcross < ex->ex_start)
771 dontcross = ex->ex_end;
772 else if (newend > dontcross) {
773 /*
774 * Candidate region crosses boundary.
775 * Throw away the leading part and see
776 * if we still fit.
777 */
778 newstart = dontcross + 1;
779 newend = newstart + (size - 1);
780 dontcross += boundary;
781 if (!LE_OV(newstart, (size - 1), subend))
782 goto fail;
783 }
784
785 /*
786 * If we run past the end of
787 * the extent or the boundary
788 * overflows, then the request
789 * can't fit.
790 */
791 if (newstart + size - 1 > ex->ex_end ||
792 dontcross < newstart)
793 goto fail;
794 }
795
796 /*
797 * We would fit into this space. Calculate
798 * the overhead (wasted space). If we exactly
799 * fit, or we're taking the first fit, insert
800 * ourselves into the region list.
801 */
802 ovh = ex->ex_end - newstart - (size - 1);
803 if ((flags & EX_FAST) || (ovh == 0))
804 goto found;
805
806 /*
807 * Don't exactly fit, but check to see
808 * if we're better than any current choice.
809 */
810 if ((bestovh == 0) || (ovh < bestovh)) {
811 bestovh = ovh;
812 beststart = newstart;
813 bestlast = last;
814 }
815 }
816
817 fail:
818 /*
819 * One of the following two conditions have
820 * occurred:
821 *
822 * There is no chunk large enough to hold the request.
823 *
824 * If EX_FAST was not specified, there is not an
825 * exact match for the request.
826 *
827 * Note that if we reach this point and EX_FAST is
828 * set, then we know there is no space in the extent for
829 * the request.
830 */
831 if (((flags & EX_FAST) == 0) && (bestovh != 0)) {
832 /*
833 * We have a match that's "good enough".
834 */
835 newstart = beststart;
836 last = bestlast;
837 goto found;
838 }
839
840 /*
841 * No space currently available. Wait for it to free up,
842 * if possible.
843 */
844 if (flags & EX_WAITSPACE) {
845 ex->ex_flags |= EXF_WANTED;
846 simple_unlock(&ex->ex_slock);
847 error = tsleep(ex,
848 PRIBIO | ((flags & EX_CATCH) ? PCATCH : 0), "extnt", 0);
849 if (error)
850 return (error);
851 goto alloc_start;
852 }
853
854 extent_free_region_descriptor(ex, myrp);
855 simple_unlock(&ex->ex_slock);
856 return (EAGAIN);
857
858 found:
859 /*
860 * Insert ourselves into the region list.
861 */
862 extent_insert_and_optimize(ex, newstart, size, flags, last, myrp);
863 simple_unlock(&ex->ex_slock);
864 *result = newstart;
865 return (0);
866 }
867
868 int
869 extent_free(ex, start, size, flags)
870 struct extent *ex;
871 u_long start, size;
872 int flags;
873 {
874 struct extent_region *rp, *nrp = NULL;
875 u_long end = start + (size - 1);
876 int exflags;
877
878 #ifdef DIAGNOSTIC
879 /*
880 * Check arguments.
881 *
882 * We don't lock to check these, because these values
883 * are never modified, and if another thread deletes the
884 * extent, we're screwed anyway.
885 */
886 if (ex == NULL)
887 panic("extent_free: NULL extent");
888 if ((start < ex->ex_start) || (start > ex->ex_end)) {
889 extent_print(ex);
890 printf("extent_free: extent `%s', start 0x%lx, size 0x%lx\n",
891 ex->ex_name, start, size);
892 panic("extent_free: extent `%s', region not within extent",
893 ex->ex_name);
894 }
895 /* Check for an overflow. */
896 if (end < start) {
897 extent_print(ex);
898 printf("extent_free: extent `%s', start 0x%lx, size 0x%lx\n",
899 ex->ex_name, start, size);
900 panic("extent_free: overflow");
901 }
902 #endif
903
904 /*
905 * If we're allowing coalescing, we must allocate a region
906 * descriptor now, since it might block.
907 *
908 * XXX Make a static, create-time flags word, so we don't
909 * XXX have to lock to read it!
910 */
911 simple_lock(&ex->ex_slock);
912 exflags = ex->ex_flags;
913 simple_unlock(&ex->ex_slock);
914
915 if ((exflags & EXF_NOCOALESCE) == 0) {
916 /* Allocate a region descriptor. */
917 nrp = extent_alloc_region_descriptor(ex, flags);
918 if (nrp == NULL)
919 return (ENOMEM);
920 }
921
922 simple_lock(&ex->ex_slock);
923
924 /*
925 * Find region and deallocate. Several possibilities:
926 *
927 * 1. (start == er_start) && (end == er_end):
928 * Free descriptor.
929 *
930 * 2. (start == er_start) && (end < er_end):
931 * Adjust er_start.
932 *
933 * 3. (start > er_start) && (end == er_end):
934 * Adjust er_end.
935 *
936 * 4. (start > er_start) && (end < er_end):
937 * Fragment region. Requires descriptor alloc.
938 *
939 * Cases 2, 3, and 4 require that the EXF_NOCOALESCE flag
940 * is not set.
941 */
942 for (rp = ex->ex_regions.lh_first; rp != NULL;
943 rp = rp->er_link.le_next) {
944 /*
945 * Save ourselves some comparisons; does the current
946 * region end before chunk to be freed begins? If so,
947 * then we haven't found the appropriate region descriptor.
948 */
949 if (rp->er_end < start)
950 continue;
951
952 /*
953 * Save ourselves some traversal; does the current
954 * region begin after the chunk to be freed ends? If so,
955 * then we've already passed any possible region descriptors
956 * that might have contained the chunk to be freed.
957 */
958 if (rp->er_start > end)
959 break;
960
961 /* Case 1. */
962 if ((start == rp->er_start) && (end == rp->er_end)) {
963 LIST_REMOVE(rp, er_link);
964 extent_free_region_descriptor(ex, rp);
965 goto done;
966 }
967
968 /*
969 * The following cases all require that EXF_NOCOALESCE
970 * is not set.
971 */
972 if (ex->ex_flags & EXF_NOCOALESCE)
973 continue;
974
975 /* Case 2. */
976 if ((start == rp->er_start) && (end < rp->er_end)) {
977 rp->er_start = (end + 1);
978 goto done;
979 }
980
981 /* Case 3. */
982 if ((start > rp->er_start) && (end == rp->er_end)) {
983 rp->er_end = (start - 1);
984 goto done;
985 }
986
987 /* Case 4. */
988 if ((start > rp->er_start) && (end < rp->er_end)) {
989 /* Fill in new descriptor. */
990 nrp->er_start = end + 1;
991 nrp->er_end = rp->er_end;
992
993 /* Adjust current descriptor. */
994 rp->er_end = start - 1;
995
996 /* Insert new descriptor after current. */
997 LIST_INSERT_AFTER(rp, nrp, er_link);
998
999 /* We used the new descriptor, so don't free it below */
1000 nrp = NULL;
1001 goto done;
1002 }
1003 }
1004
1005 /* Region not found, or request otherwise invalid. */
1006 simple_unlock(&ex->ex_slock);
1007 extent_print(ex);
1008 printf("extent_free: start 0x%lx, end 0x%lx\n", start, end);
1009 panic("extent_free: region not found");
1010
1011 done:
1012 if (nrp != NULL)
1013 extent_free_region_descriptor(ex, nrp);
1014 if (ex->ex_flags & EXF_WANTED) {
1015 ex->ex_flags &= ~EXF_WANTED;
1016 wakeup(ex);
1017 }
1018 simple_unlock(&ex->ex_slock);
1019 return (0);
1020 }
1021
1022 /*
1023 * Allocate an extent region descriptor. EXTENT MUST NOT BE LOCKED,
1024 * AS THIS FUNCTION MAY BLOCK! We will handle any locking we may need.
1025 */
1026 static struct extent_region *
1027 extent_alloc_region_descriptor(ex, flags)
1028 struct extent *ex;
1029 int flags;
1030 {
1031 struct extent_region *rp;
1032 int exflags;
1033 int s;
1034
1035 /*
1036 * If the kernel memory allocator is not yet running, we can't
1037 * use it (obviously).
1038 */
1039 if (KMEM_IS_RUNNING == 0)
1040 flags &= ~EX_MALLOCOK;
1041
1042 /*
1043 * XXX Make a static, create-time flags word, so we don't
1044 * XXX have to lock to read it!
1045 */
1046 simple_lock(&ex->ex_slock);
1047 exflags = ex->ex_flags;
1048 simple_unlock(&ex->ex_slock);
1049
1050 if (exflags & EXF_FIXED) {
1051 struct extent_fixed *fex = (struct extent_fixed *)ex;
1052
1053 for (;;) {
1054 simple_lock(&ex->ex_slock);
1055 if ((rp = fex->fex_freelist.lh_first) != NULL) {
1056 /*
1057 * Don't muck with flags after pulling it off
1058 * the freelist; it may have been dynamically
1059 * allocated, and kindly given to us. We
1060 * need to remember that information.
1061 */
1062 LIST_REMOVE(rp, er_link);
1063 simple_unlock(&ex->ex_slock);
1064 return (rp);
1065 }
1066 if (flags & EX_MALLOCOK) {
1067 simple_unlock(&ex->ex_slock);
1068 goto alloc;
1069 }
1070 if ((flags & EX_WAITOK) == 0) {
1071 simple_unlock(&ex->ex_slock);
1072 return (NULL);
1073 }
1074 ex->ex_flags |= EXF_FLWANTED;
1075 simple_unlock(&ex->ex_slock);
1076 if (tsleep(&fex->fex_freelist,
1077 PRIBIO | ((flags & EX_CATCH) ? PCATCH : 0),
1078 "extnt", 0))
1079 return (NULL);
1080 }
1081 }
1082
1083 alloc:
1084 s = splhigh();
1085 if (expool == NULL && !expool_create()) {
1086 splx(s);
1087 return (NULL);
1088 }
1089
1090 rp = pool_get(expool, (flags & EX_WAITOK) ? PR_WAITOK : 0);
1091 splx(s);
1092
1093 if (rp != NULL)
1094 rp->er_flags = ER_ALLOC;
1095
1096 return (rp);
1097 }
1098
1099 /*
1100 * Free an extent region descriptor. EXTENT _MUST_ BE LOCKED! This
1101 * is safe as we do not block here.
1102 */
1103 static void
1104 extent_free_region_descriptor(ex, rp)
1105 struct extent *ex;
1106 struct extent_region *rp;
1107 {
1108 int s;
1109
1110 if (ex->ex_flags & EXF_FIXED) {
1111 struct extent_fixed *fex = (struct extent_fixed *)ex;
1112
1113 /*
1114 * If someone's waiting for a region descriptor,
1115 * be nice and give them this one, rather than
1116 * just free'ing it back to the system.
1117 */
1118 if (rp->er_flags & ER_ALLOC) {
1119 if (ex->ex_flags & EXF_FLWANTED) {
1120 /* Clear all but ER_ALLOC flag. */
1121 rp->er_flags = ER_ALLOC;
1122 LIST_INSERT_HEAD(&fex->fex_freelist, rp,
1123 er_link);
1124 goto wake_em_up;
1125 } else {
1126 s = splhigh();
1127 pool_put(expool, rp);
1128 splx(s);
1129 }
1130 } else {
1131 /* Clear all flags. */
1132 rp->er_flags = 0;
1133 LIST_INSERT_HEAD(&fex->fex_freelist, rp, er_link);
1134 }
1135
1136 if (ex->ex_flags & EXF_FLWANTED) {
1137 wake_em_up:
1138 ex->ex_flags &= ~EXF_FLWANTED;
1139 wakeup(&fex->fex_freelist);
1140 }
1141 return;
1142 }
1143
1144 /*
1145 * We know it's dynamically allocated if we get here.
1146 */
1147 s = splhigh();
1148 pool_put(expool, rp);
1149 splx(s);
1150 }
1151
1152 void
1153 extent_print(ex)
1154 struct extent *ex;
1155 {
1156 struct extent_region *rp;
1157
1158 if (ex == NULL)
1159 panic("extent_print: NULL extent");
1160
1161 simple_lock(&ex->ex_slock);
1162
1163 printf("extent `%s' (0x%lx - 0x%lx), flags = 0x%x\n", ex->ex_name,
1164 ex->ex_start, ex->ex_end, ex->ex_flags);
1165
1166 for (rp = ex->ex_regions.lh_first; rp != NULL;
1167 rp = rp->er_link.le_next)
1168 printf(" 0x%lx - 0x%lx\n", rp->er_start, rp->er_end);
1169
1170 simple_unlock(&ex->ex_slock);
1171 }
1172