subr_extent.c revision 1.40 1 /* $NetBSD: subr_extent.c,v 1.40 2001/04/27 00:06:11 marcus Exp $ */
2
3 /*-
4 * Copyright (c) 1996, 1998 The NetBSD Foundation, Inc.
5 * All rights reserved.
6 *
7 * This code is derived from software contributed to The NetBSD Foundation
8 * by Jason R. Thorpe and Matthias Drochner.
9 *
10 * Redistribution and use in source and binary forms, with or without
11 * modification, are permitted provided that the following conditions
12 * are met:
13 * 1. Redistributions of source code must retain the above copyright
14 * notice, this list of conditions and the following disclaimer.
15 * 2. Redistributions in binary form must reproduce the above copyright
16 * notice, this list of conditions and the following disclaimer in the
17 * documentation and/or other materials provided with the distribution.
18 * 3. All advertising materials mentioning features or use of this software
19 * must display the following acknowledgement:
20 * This product includes software developed by the NetBSD
21 * Foundation, Inc. and its contributors.
22 * 4. Neither the name of The NetBSD Foundation nor the names of its
23 * contributors may be used to endorse or promote products derived
24 * from this software without specific prior written permission.
25 *
26 * THIS SOFTWARE IS PROVIDED BY THE NETBSD FOUNDATION, INC. AND CONTRIBUTORS
27 * ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED
28 * TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
29 * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE FOUNDATION OR CONTRIBUTORS
30 * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
31 * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
32 * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
33 * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
34 * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
35 * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
36 * POSSIBILITY OF SUCH DAMAGE.
37 */
38
39 /*
40 * General purpose extent manager.
41 */
42
43 #ifdef _KERNEL
44 #include <sys/param.h>
45 #include <sys/extent.h>
46 #include <sys/malloc.h>
47 #include <sys/pool.h>
48 #include <sys/time.h>
49 #include <sys/systm.h>
50 #include <sys/proc.h>
51 #include <sys/lock.h>
52
53 #include <uvm/uvm_extern.h>
54
55 #define KMEM_IS_RUNNING (kmem_map != NULL)
56 #elif defined(_EXTENT_TESTING)
57 /*
58 * user-land definitions, so it can fit into a testing harness.
59 */
60 #include <sys/param.h>
61 #include <sys/pool.h>
62 #include <sys/extent.h>
63 #include <errno.h>
64 #include <stdlib.h>
65 #include <stdio.h>
66 #include <string.h>
67
68 /*
69 * Use multi-line #defines to avoid screwing up the kernel tags file;
70 * without this, ctags produces a tags file where panic() shows up
71 * in subr_extent.c rather than subr_prf.c.
72 */
73 #define \
74 malloc(s, t, flags) malloc(s)
75 #define \
76 free(p, t) free(p)
77 #define \
78 tsleep(chan, pri, str, timo) (EWOULDBLOCK)
79 #define \
80 ltsleep(chan,pri,str,timo,lck) (EWOULDBLOCK)
81 #define \
82 wakeup(chan) ((void)0)
83 #define \
84 pool_get(pool, flags) malloc(pool->pr_size,0,0)
85 #define \
86 pool_put(pool, rp) free(rp,0)
87 #define \
88 panic(a) printf(a)
89 #define \
90 splhigh() (1)
91 #define \
92 splx(s) ((void)(s))
93
94 #define \
95 simple_lock_init(l) ((void)(l))
96 #define \
97 simple_lock(l) ((void)(l))
98 #define \
99 simple_unlock(l) ((void)(l))
100 #define KMEM_IS_RUNNING (1)
101 #endif
102
103 static struct pool *expool_create __P((void));
104 static void extent_insert_and_optimize __P((struct extent *, u_long, u_long,
105 int, struct extent_region *, struct extent_region *));
106 static struct extent_region *extent_alloc_region_descriptor
107 __P((struct extent *, int));
108 static void extent_free_region_descriptor __P((struct extent *,
109 struct extent_region *));
110
111 static struct pool *expool;
112
113 /*
114 * Macro to align to an arbitrary power-of-two boundary.
115 */
116 #define EXTENT_ALIGN(_start, _align, _skew) \
117 (((((_start) - (_skew)) + ((_align) - 1)) & (-(_align))) + (_skew))
118
119 /*
120 * Create the extent_region pool.
121 * (This is deferred until one of our callers thinks we can malloc()).
122 */
123
124 static struct pool *expool_create()
125 {
126 #if defined(_KERNEL)
127 expool = pool_create(sizeof(struct extent_region), 0, 0,
128 0, "extent", 0, 0, 0, 0);
129 #else
130 expool = (struct pool *)malloc(sizeof(*expool),0,0);
131 expool->pr_size = sizeof(struct extent_region);
132 #endif
133 return (expool);
134 }
135
136 /*
137 * Allocate and initialize an extent map.
138 */
139 struct extent *
140 extent_create(name, start, end, mtype, storage, storagesize, flags)
141 const char *name;
142 u_long start, end;
143 int mtype;
144 caddr_t storage;
145 size_t storagesize;
146 int flags;
147 {
148 struct extent *ex;
149 caddr_t cp = storage;
150 size_t sz = storagesize;
151 struct extent_region *rp;
152 int fixed_extent = (storage != NULL);
153 int s;
154
155 #ifdef DIAGNOSTIC
156 /* Check arguments. */
157 if (name == NULL)
158 panic("extent_create: name == NULL");
159 if (end < start) {
160 printf("extent_create: extent `%s', start 0x%lx, end 0x%lx\n",
161 name, start, end);
162 panic("extent_create: end < start");
163 }
164 if (fixed_extent && (storagesize < sizeof(struct extent_fixed)))
165 panic("extent_create: fixed extent, bad storagesize 0x%lx",
166 (u_long)storagesize);
167 if (fixed_extent == 0 && (storagesize != 0 || storage != NULL))
168 panic("extent_create: storage provided for non-fixed");
169 #endif
170
171 /* Allocate extent descriptor. */
172 if (fixed_extent) {
173 struct extent_fixed *fex;
174
175 memset(storage, 0, storagesize);
176
177 /*
178 * Align all descriptors on "long" boundaries.
179 */
180 fex = (struct extent_fixed *)cp;
181 ex = (struct extent *)fex;
182 cp += ALIGN(sizeof(struct extent_fixed));
183 sz -= ALIGN(sizeof(struct extent_fixed));
184 fex->fex_storage = storage;
185 fex->fex_storagesize = storagesize;
186
187 /*
188 * In a fixed extent, we have to pre-allocate region
189 * descriptors and place them in the extent's freelist.
190 */
191 LIST_INIT(&fex->fex_freelist);
192 while (sz >= ALIGN(sizeof(struct extent_region))) {
193 rp = (struct extent_region *)cp;
194 cp += ALIGN(sizeof(struct extent_region));
195 sz -= ALIGN(sizeof(struct extent_region));
196 LIST_INSERT_HEAD(&fex->fex_freelist, rp, er_link);
197 }
198 } else {
199 s = splhigh();
200 if (expool == NULL)
201 expool_create();
202 splx(s);
203 if (expool == NULL)
204 return (NULL);
205
206 ex = (struct extent *)malloc(sizeof(struct extent),
207 mtype, (flags & EX_WAITOK) ? M_WAITOK : M_NOWAIT);
208 if (ex == NULL)
209 return (NULL);
210 }
211
212 /* Fill in the extent descriptor and return it to the caller. */
213 simple_lock_init(&ex->ex_slock);
214 LIST_INIT(&ex->ex_regions);
215 ex->ex_name = name;
216 ex->ex_start = start;
217 ex->ex_end = end;
218 ex->ex_mtype = mtype;
219 ex->ex_flags = 0;
220 if (fixed_extent)
221 ex->ex_flags |= EXF_FIXED;
222 if (flags & EX_NOCOALESCE)
223 ex->ex_flags |= EXF_NOCOALESCE;
224 return (ex);
225 }
226
227 /*
228 * Destroy an extent map.
229 * Since we're freeing the data, there can't be any references
230 * so we don't need any locking.
231 */
232 void
233 extent_destroy(ex)
234 struct extent *ex;
235 {
236 struct extent_region *rp, *orp;
237
238 #ifdef DIAGNOSTIC
239 /* Check arguments. */
240 if (ex == NULL)
241 panic("extent_destroy: NULL extent");
242 #endif
243
244 /* Free all region descriptors in extent. */
245 for (rp = ex->ex_regions.lh_first; rp != NULL; ) {
246 orp = rp;
247 rp = rp->er_link.le_next;
248 LIST_REMOVE(orp, er_link);
249 extent_free_region_descriptor(ex, orp);
250 }
251
252 /* If we're not a fixed extent, free the extent descriptor itself. */
253 if ((ex->ex_flags & EXF_FIXED) == 0)
254 free(ex, ex->ex_mtype);
255 }
256
257 /*
258 * Insert a region descriptor into the sorted region list after the
259 * entry "after" or at the head of the list (if "after" is NULL).
260 * The region descriptor we insert is passed in "rp". We must
261 * allocate the region descriptor before calling this function!
262 * If we don't need the region descriptor, it will be freed here.
263 */
264 static void
265 extent_insert_and_optimize(ex, start, size, flags, after, rp)
266 struct extent *ex;
267 u_long start, size;
268 int flags;
269 struct extent_region *after, *rp;
270 {
271 struct extent_region *nextr;
272 int appended = 0;
273
274 if (after == NULL) {
275 /*
276 * We're the first in the region list. If there's
277 * a region after us, attempt to coalesce to save
278 * descriptor overhead.
279 */
280 if (((ex->ex_flags & EXF_NOCOALESCE) == 0) &&
281 (ex->ex_regions.lh_first != NULL) &&
282 ((start + size) == ex->ex_regions.lh_first->er_start)) {
283 /*
284 * We can coalesce. Prepend us to the first region.
285 */
286 ex->ex_regions.lh_first->er_start = start;
287 extent_free_region_descriptor(ex, rp);
288 return;
289 }
290
291 /*
292 * Can't coalesce. Fill in the region descriptor
293 * in, and insert us at the head of the region list.
294 */
295 rp->er_start = start;
296 rp->er_end = start + (size - 1);
297 LIST_INSERT_HEAD(&ex->ex_regions, rp, er_link);
298 return;
299 }
300
301 /*
302 * If EXF_NOCOALESCE is set, coalescing is disallowed.
303 */
304 if (ex->ex_flags & EXF_NOCOALESCE)
305 goto cant_coalesce;
306
307 /*
308 * Attempt to coalesce with the region before us.
309 */
310 if ((after->er_end + 1) == start) {
311 /*
312 * We can coalesce. Append ourselves and make
313 * note of it.
314 */
315 after->er_end = start + (size - 1);
316 appended = 1;
317 }
318
319 /*
320 * Attempt to coalesce with the region after us.
321 */
322 if ((after->er_link.le_next != NULL) &&
323 ((start + size) == after->er_link.le_next->er_start)) {
324 /*
325 * We can coalesce. Note that if we appended ourselves
326 * to the previous region, we exactly fit the gap, and
327 * can free the "next" region descriptor.
328 */
329 if (appended) {
330 /*
331 * Yup, we can free it up.
332 */
333 after->er_end = after->er_link.le_next->er_end;
334 nextr = after->er_link.le_next;
335 LIST_REMOVE(nextr, er_link);
336 extent_free_region_descriptor(ex, nextr);
337 } else {
338 /*
339 * Nope, just prepend us to the next region.
340 */
341 after->er_link.le_next->er_start = start;
342 }
343
344 extent_free_region_descriptor(ex, rp);
345 return;
346 }
347
348 /*
349 * We weren't able to coalesce with the next region, but
350 * we don't need to allocate a region descriptor if we
351 * appended ourselves to the previous region.
352 */
353 if (appended) {
354 extent_free_region_descriptor(ex, rp);
355 return;
356 }
357
358 cant_coalesce:
359
360 /*
361 * Fill in the region descriptor and insert ourselves
362 * into the region list.
363 */
364 rp->er_start = start;
365 rp->er_end = start + (size - 1);
366 LIST_INSERT_AFTER(after, rp, er_link);
367 }
368
369 /*
370 * Allocate a specific region in an extent map.
371 */
372 int
373 extent_alloc_region(ex, start, size, flags)
374 struct extent *ex;
375 u_long start, size;
376 int flags;
377 {
378 struct extent_region *rp, *last, *myrp;
379 u_long end = start + (size - 1);
380 int error;
381
382 #ifdef DIAGNOSTIC
383 /* Check arguments. */
384 if (ex == NULL)
385 panic("extent_alloc_region: NULL extent");
386 if (size < 1) {
387 printf("extent_alloc_region: extent `%s', size 0x%lx\n",
388 ex->ex_name, size);
389 panic("extent_alloc_region: bad size");
390 }
391 if (end < start) {
392 printf(
393 "extent_alloc_region: extent `%s', start 0x%lx, size 0x%lx\n",
394 ex->ex_name, start, size);
395 panic("extent_alloc_region: overflow");
396 }
397 #endif
398
399 /*
400 * Make sure the requested region lies within the
401 * extent.
402 *
403 * We don't lock to check the range, because those values
404 * are never modified, and if another thread deletes the
405 * extent, we're screwed anyway.
406 */
407 if ((start < ex->ex_start) || (end > ex->ex_end)) {
408 #ifdef DIAGNOSTIC
409 printf("extent_alloc_region: extent `%s' (0x%lx - 0x%lx)\n",
410 ex->ex_name, ex->ex_start, ex->ex_end);
411 printf("extent_alloc_region: start 0x%lx, end 0x%lx\n",
412 start, end);
413 panic("extent_alloc_region: region lies outside extent");
414 #else
415 return (EINVAL);
416 #endif
417 }
418
419 /*
420 * Allocate the region descriptor. It will be freed later
421 * if we can coalesce with another region. Don't lock before
422 * here! This could block.
423 */
424 myrp = extent_alloc_region_descriptor(ex, flags);
425 if (myrp == NULL) {
426 #ifdef DIAGNOSTIC
427 printf(
428 "extent_alloc_region: can't allocate region descriptor\n");
429 #endif
430 return (ENOMEM);
431 }
432
433 alloc_start:
434 simple_lock(&ex->ex_slock);
435
436 /*
437 * Attempt to place ourselves in the desired area of the
438 * extent. We save ourselves some work by keeping the list sorted.
439 * In other words, if the start of the current region is greater
440 * than the end of our region, we don't have to search any further.
441 */
442
443 /*
444 * Keep a pointer to the last region we looked at so
445 * that we don't have to traverse the list again when
446 * we insert ourselves. If "last" is NULL when we
447 * finally insert ourselves, we go at the head of the
448 * list. See extent_insert_and_optimize() for details.
449 */
450 last = NULL;
451
452 for (rp = ex->ex_regions.lh_first; rp != NULL;
453 rp = rp->er_link.le_next) {
454 if (rp->er_start > end) {
455 /*
456 * We lie before this region and don't
457 * conflict.
458 */
459 break;
460 }
461
462 /*
463 * The current region begins before we end.
464 * Check for a conflict.
465 */
466 if (rp->er_end >= start) {
467 /*
468 * We conflict. If we can (and want to) wait,
469 * do so.
470 */
471 if (flags & EX_WAITSPACE) {
472 ex->ex_flags |= EXF_WANTED;
473 error = ltsleep(ex,
474 PNORELOCK | PRIBIO | ((flags & EX_CATCH) ? PCATCH : 0),
475 "extnt", 0, &ex->ex_slock);
476 if (error)
477 return (error);
478 goto alloc_start;
479 }
480 extent_free_region_descriptor(ex, myrp);
481 simple_unlock(&ex->ex_slock);
482 return (EAGAIN);
483 }
484 /*
485 * We don't conflict, but this region lies before
486 * us. Keep a pointer to this region, and keep
487 * trying.
488 */
489 last = rp;
490 }
491
492 /*
493 * We don't conflict with any regions. "last" points
494 * to the region we fall after, or is NULL if we belong
495 * at the beginning of the region list. Insert ourselves.
496 */
497 extent_insert_and_optimize(ex, start, size, flags, last, myrp);
498 simple_unlock(&ex->ex_slock);
499 return (0);
500 }
501
502 /*
503 * Macro to check (x + y) <= z. This check is designed to fail
504 * if an overflow occurs.
505 */
506 #define LE_OV(x, y, z) ((((x) + (y)) >= (x)) && (((x) + (y)) <= (z)))
507
508 /*
509 * Allocate a region in an extent map subregion.
510 *
511 * If EX_FAST is specified, we return the first fit in the map.
512 * Otherwise, we try to minimize fragmentation by finding the
513 * smallest gap that will hold the request.
514 *
515 * The allocated region is aligned to "alignment", which must be
516 * a power of 2.
517 */
518 int
519 extent_alloc_subregion1(ex, substart, subend, size, alignment, skew, boundary,
520 flags, result)
521 struct extent *ex;
522 u_long substart, subend, size, alignment, skew, boundary;
523 int flags;
524 u_long *result;
525 {
526 struct extent_region *rp, *myrp, *last, *bestlast;
527 u_long newstart, newend, beststart, bestovh, ovh;
528 u_long dontcross;
529 int error;
530
531 #ifdef DIAGNOSTIC
532 /*
533 * Check arguments.
534 *
535 * We don't lock to check these, because these values
536 * are never modified, and if another thread deletes the
537 * extent, we're screwed anyway.
538 */
539 if (ex == NULL)
540 panic("extent_alloc_subregion: NULL extent");
541 if (result == NULL)
542 panic("extent_alloc_subregion: NULL result pointer");
543 if ((substart < ex->ex_start) || (substart > ex->ex_end) ||
544 (subend > ex->ex_end) || (subend < ex->ex_start)) {
545 printf("extent_alloc_subregion: extent `%s', ex_start 0x%lx, ex_end 0x%lx\n",
546 ex->ex_name, ex->ex_start, ex->ex_end);
547 printf("extent_alloc_subregion: substart 0x%lx, subend 0x%lx\n",
548 substart, subend);
549 panic("extent_alloc_subregion: bad subregion");
550 }
551 if ((size < 1) || ((size - 1) > (subend - substart))) {
552 printf("extent_alloc_subregion: extent `%s', size 0x%lx\n",
553 ex->ex_name, size);
554 panic("extent_alloc_subregion: bad size");
555 }
556 if (alignment == 0)
557 panic("extent_alloc_subregion: bad alignment");
558 if (boundary && (boundary < size)) {
559 printf(
560 "extent_alloc_subregion: extent `%s', size 0x%lx, "
561 "boundary 0x%lx\n", ex->ex_name, size, boundary);
562 panic("extent_alloc_subregion: bad boundary");
563 }
564 #endif
565
566 /*
567 * Allocate the region descriptor. It will be freed later
568 * if we can coalesce with another region. Don't lock before
569 * here! This could block.
570 */
571 myrp = extent_alloc_region_descriptor(ex, flags);
572 if (myrp == NULL) {
573 #ifdef DIAGNOSTIC
574 printf(
575 "extent_alloc_subregion: can't allocate region descriptor\n");
576 #endif
577 return (ENOMEM);
578 }
579
580 alloc_start:
581 simple_lock(&ex->ex_slock);
582
583 /*
584 * Keep a pointer to the last region we looked at so
585 * that we don't have to traverse the list again when
586 * we insert ourselves. If "last" is NULL when we
587 * finally insert ourselves, we go at the head of the
588 * list. See extent_insert_and_optimize() for deatails.
589 */
590 last = NULL;
591
592 /*
593 * Keep track of size and location of the smallest
594 * chunk we fit in.
595 *
596 * Since the extent can be as large as the numeric range
597 * of the CPU (0 - 0xffffffff for 32-bit systems), the
598 * best overhead value can be the maximum unsigned integer.
599 * Thus, we initialize "bestovh" to 0, since we insert ourselves
600 * into the region list immediately on an exact match (which
601 * is the only case where "bestovh" would be set to 0).
602 */
603 bestovh = 0;
604 beststart = 0;
605 bestlast = NULL;
606
607 /*
608 * For N allocated regions, we must make (N + 1)
609 * checks for unallocated space. The first chunk we
610 * check is the area from the beginning of the subregion
611 * to the first allocated region after that point.
612 */
613 newstart = EXTENT_ALIGN(substart, alignment, skew);
614 if (newstart < ex->ex_start) {
615 #ifdef DIAGNOSTIC
616 printf(
617 "extent_alloc_subregion: extent `%s' (0x%lx - 0x%lx), alignment 0x%lx\n",
618 ex->ex_name, ex->ex_start, ex->ex_end, alignment);
619 simple_unlock(&ex->ex_slock);
620 panic("extent_alloc_subregion: overflow after alignment");
621 #else
622 extent_free_region_descriptor(ex, myrp);
623 simple_unlock(&ex->ex_slock);
624 return (EINVAL);
625 #endif
626 }
627
628 /*
629 * Find the first allocated region that begins on or after
630 * the subregion start, advancing the "last" pointer along
631 * the way.
632 */
633 for (rp = ex->ex_regions.lh_first; rp != NULL;
634 rp = rp->er_link.le_next) {
635 if (rp->er_start >= newstart)
636 break;
637 last = rp;
638 }
639
640 /*
641 * Relocate the start of our candidate region to the end of
642 * the last allocated region (if there was one overlapping
643 * our subrange).
644 */
645 if (last != NULL && last->er_end >= newstart)
646 newstart = EXTENT_ALIGN((last->er_end + 1), alignment, skew);
647
648 for (; rp != NULL; rp = rp->er_link.le_next) {
649 /*
650 * Check the chunk before "rp". Note that our
651 * comparison is safe from overflow conditions.
652 */
653 if (LE_OV(newstart, size, rp->er_start)) {
654 /*
655 * Do a boundary check, if necessary. Note
656 * that a region may *begin* on the boundary,
657 * but it must end before the boundary.
658 */
659 if (boundary) {
660 newend = newstart + (size - 1);
661
662 /*
663 * Calculate the next boundary after the start
664 * of this region.
665 */
666 dontcross = EXTENT_ALIGN(newstart+1, boundary,
667 (flags & EX_BOUNDZERO) ? 0 : ex->ex_start)
668 - 1;
669
670 #if 0
671 printf("newstart=%lx newend=%lx ex_start=%lx ex_end=%lx boundary=%lx dontcross=%lx\n",
672 newstart, newend, ex->ex_start, ex->ex_end,
673 boundary, dontcross);
674 #endif
675
676 /* Check for overflow */
677 if (dontcross < ex->ex_start)
678 dontcross = ex->ex_end;
679 else if (newend > dontcross) {
680 /*
681 * Candidate region crosses boundary.
682 * Throw away the leading part and see
683 * if we still fit.
684 */
685 newstart = dontcross + 1;
686 newend = newstart + (size - 1);
687 dontcross += boundary;
688 if (!LE_OV(newstart, size, rp->er_start))
689 continue;
690 }
691
692 /*
693 * If we run past the end of
694 * the extent or the boundary
695 * overflows, then the request
696 * can't fit.
697 */
698 if (newstart + size - 1 > ex->ex_end ||
699 dontcross < newstart)
700 goto fail;
701 }
702
703 /*
704 * We would fit into this space. Calculate
705 * the overhead (wasted space). If we exactly
706 * fit, or we're taking the first fit, insert
707 * ourselves into the region list.
708 */
709 ovh = rp->er_start - newstart - size;
710 if ((flags & EX_FAST) || (ovh == 0))
711 goto found;
712
713 /*
714 * Don't exactly fit, but check to see
715 * if we're better than any current choice.
716 */
717 if ((bestovh == 0) || (ovh < bestovh)) {
718 bestovh = ovh;
719 beststart = newstart;
720 bestlast = last;
721 }
722 }
723
724 /*
725 * Skip past the current region and check again.
726 */
727 newstart = EXTENT_ALIGN((rp->er_end + 1), alignment, skew);
728 if (newstart < rp->er_end) {
729 /*
730 * Overflow condition. Don't error out, since
731 * we might have a chunk of space that we can
732 * use.
733 */
734 goto fail;
735 }
736
737 last = rp;
738 }
739
740 /*
741 * The final check is from the current starting point to the
742 * end of the subregion. If there were no allocated regions,
743 * "newstart" is set to the beginning of the subregion, or
744 * just past the end of the last allocated region, adjusted
745 * for alignment in either case.
746 */
747 if (LE_OV(newstart, (size - 1), subend)) {
748 /*
749 * Do a boundary check, if necessary. Note
750 * that a region may *begin* on the boundary,
751 * but it must end before the boundary.
752 */
753 if (boundary) {
754 newend = newstart + (size - 1);
755
756 /*
757 * Calculate the next boundary after the start
758 * of this region.
759 */
760 dontcross = EXTENT_ALIGN(newstart+1, boundary,
761 (flags & EX_BOUNDZERO) ? 0 : ex->ex_start)
762 - 1;
763
764 #if 0
765 printf("newstart=%lx newend=%lx ex_start=%lx ex_end=%lx boundary=%lx dontcross=%lx\n",
766 newstart, newend, ex->ex_start, ex->ex_end,
767 boundary, dontcross);
768 #endif
769
770 /* Check for overflow */
771 if (dontcross < ex->ex_start)
772 dontcross = ex->ex_end;
773 else if (newend > dontcross) {
774 /*
775 * Candidate region crosses boundary.
776 * Throw away the leading part and see
777 * if we still fit.
778 */
779 newstart = dontcross + 1;
780 newend = newstart + (size - 1);
781 dontcross += boundary;
782 if (!LE_OV(newstart, (size - 1), subend))
783 goto fail;
784 }
785
786 /*
787 * If we run past the end of
788 * the extent or the boundary
789 * overflows, then the request
790 * can't fit.
791 */
792 if (newstart + size - 1 > ex->ex_end ||
793 dontcross < newstart)
794 goto fail;
795 }
796
797 /*
798 * We would fit into this space. Calculate
799 * the overhead (wasted space). If we exactly
800 * fit, or we're taking the first fit, insert
801 * ourselves into the region list.
802 */
803 ovh = ex->ex_end - newstart - (size - 1);
804 if ((flags & EX_FAST) || (ovh == 0))
805 goto found;
806
807 /*
808 * Don't exactly fit, but check to see
809 * if we're better than any current choice.
810 */
811 if ((bestovh == 0) || (ovh < bestovh)) {
812 bestovh = ovh;
813 beststart = newstart;
814 bestlast = last;
815 }
816 }
817
818 fail:
819 /*
820 * One of the following two conditions have
821 * occurred:
822 *
823 * There is no chunk large enough to hold the request.
824 *
825 * If EX_FAST was not specified, there is not an
826 * exact match for the request.
827 *
828 * Note that if we reach this point and EX_FAST is
829 * set, then we know there is no space in the extent for
830 * the request.
831 */
832 if (((flags & EX_FAST) == 0) && (bestovh != 0)) {
833 /*
834 * We have a match that's "good enough".
835 */
836 newstart = beststart;
837 last = bestlast;
838 goto found;
839 }
840
841 /*
842 * No space currently available. Wait for it to free up,
843 * if possible.
844 */
845 if (flags & EX_WAITSPACE) {
846 ex->ex_flags |= EXF_WANTED;
847 error = ltsleep(ex,
848 PNORELOCK | PRIBIO | ((flags & EX_CATCH) ? PCATCH : 0),
849 "extnt", 0, &ex->ex_slock);
850 if (error)
851 return (error);
852 goto alloc_start;
853 }
854
855 extent_free_region_descriptor(ex, myrp);
856 simple_unlock(&ex->ex_slock);
857 return (EAGAIN);
858
859 found:
860 /*
861 * Insert ourselves into the region list.
862 */
863 extent_insert_and_optimize(ex, newstart, size, flags, last, myrp);
864 simple_unlock(&ex->ex_slock);
865 *result = newstart;
866 return (0);
867 }
868
869 int
870 extent_free(ex, start, size, flags)
871 struct extent *ex;
872 u_long start, size;
873 int flags;
874 {
875 struct extent_region *rp, *nrp = NULL;
876 u_long end = start + (size - 1);
877 int exflags;
878
879 #ifdef DIAGNOSTIC
880 /*
881 * Check arguments.
882 *
883 * We don't lock to check these, because these values
884 * are never modified, and if another thread deletes the
885 * extent, we're screwed anyway.
886 */
887 if (ex == NULL)
888 panic("extent_free: NULL extent");
889 if ((start < ex->ex_start) || (start > ex->ex_end)) {
890 extent_print(ex);
891 printf("extent_free: extent `%s', start 0x%lx, size 0x%lx\n",
892 ex->ex_name, start, size);
893 panic("extent_free: extent `%s', region not within extent",
894 ex->ex_name);
895 }
896 /* Check for an overflow. */
897 if (end < start) {
898 extent_print(ex);
899 printf("extent_free: extent `%s', start 0x%lx, size 0x%lx\n",
900 ex->ex_name, start, size);
901 panic("extent_free: overflow");
902 }
903 #endif
904
905 /*
906 * If we're allowing coalescing, we must allocate a region
907 * descriptor now, since it might block.
908 *
909 * XXX Make a static, create-time flags word, so we don't
910 * XXX have to lock to read it!
911 */
912 simple_lock(&ex->ex_slock);
913 exflags = ex->ex_flags;
914 simple_unlock(&ex->ex_slock);
915
916 if ((exflags & EXF_NOCOALESCE) == 0) {
917 /* Allocate a region descriptor. */
918 nrp = extent_alloc_region_descriptor(ex, flags);
919 if (nrp == NULL)
920 return (ENOMEM);
921 }
922
923 simple_lock(&ex->ex_slock);
924
925 /*
926 * Find region and deallocate. Several possibilities:
927 *
928 * 1. (start == er_start) && (end == er_end):
929 * Free descriptor.
930 *
931 * 2. (start == er_start) && (end < er_end):
932 * Adjust er_start.
933 *
934 * 3. (start > er_start) && (end == er_end):
935 * Adjust er_end.
936 *
937 * 4. (start > er_start) && (end < er_end):
938 * Fragment region. Requires descriptor alloc.
939 *
940 * Cases 2, 3, and 4 require that the EXF_NOCOALESCE flag
941 * is not set.
942 */
943 for (rp = ex->ex_regions.lh_first; rp != NULL;
944 rp = rp->er_link.le_next) {
945 /*
946 * Save ourselves some comparisons; does the current
947 * region end before chunk to be freed begins? If so,
948 * then we haven't found the appropriate region descriptor.
949 */
950 if (rp->er_end < start)
951 continue;
952
953 /*
954 * Save ourselves some traversal; does the current
955 * region begin after the chunk to be freed ends? If so,
956 * then we've already passed any possible region descriptors
957 * that might have contained the chunk to be freed.
958 */
959 if (rp->er_start > end)
960 break;
961
962 /* Case 1. */
963 if ((start == rp->er_start) && (end == rp->er_end)) {
964 LIST_REMOVE(rp, er_link);
965 extent_free_region_descriptor(ex, rp);
966 goto done;
967 }
968
969 /*
970 * The following cases all require that EXF_NOCOALESCE
971 * is not set.
972 */
973 if (ex->ex_flags & EXF_NOCOALESCE)
974 continue;
975
976 /* Case 2. */
977 if ((start == rp->er_start) && (end < rp->er_end)) {
978 rp->er_start = (end + 1);
979 goto done;
980 }
981
982 /* Case 3. */
983 if ((start > rp->er_start) && (end == rp->er_end)) {
984 rp->er_end = (start - 1);
985 goto done;
986 }
987
988 /* Case 4. */
989 if ((start > rp->er_start) && (end < rp->er_end)) {
990 /* Fill in new descriptor. */
991 nrp->er_start = end + 1;
992 nrp->er_end = rp->er_end;
993
994 /* Adjust current descriptor. */
995 rp->er_end = start - 1;
996
997 /* Insert new descriptor after current. */
998 LIST_INSERT_AFTER(rp, nrp, er_link);
999
1000 /* We used the new descriptor, so don't free it below */
1001 nrp = NULL;
1002 goto done;
1003 }
1004 }
1005
1006 /* Region not found, or request otherwise invalid. */
1007 simple_unlock(&ex->ex_slock);
1008 extent_print(ex);
1009 printf("extent_free: start 0x%lx, end 0x%lx\n", start, end);
1010 panic("extent_free: region not found");
1011
1012 done:
1013 if (nrp != NULL)
1014 extent_free_region_descriptor(ex, nrp);
1015 if (ex->ex_flags & EXF_WANTED) {
1016 ex->ex_flags &= ~EXF_WANTED;
1017 wakeup(ex);
1018 }
1019 simple_unlock(&ex->ex_slock);
1020 return (0);
1021 }
1022
1023 /*
1024 * Allocate an extent region descriptor. EXTENT MUST NOT BE LOCKED,
1025 * AS THIS FUNCTION MAY BLOCK! We will handle any locking we may need.
1026 */
1027 static struct extent_region *
1028 extent_alloc_region_descriptor(ex, flags)
1029 struct extent *ex;
1030 int flags;
1031 {
1032 struct extent_region *rp;
1033 int exflags;
1034 int s;
1035
1036 /*
1037 * If the kernel memory allocator is not yet running, we can't
1038 * use it (obviously).
1039 */
1040 if (KMEM_IS_RUNNING == 0)
1041 flags &= ~EX_MALLOCOK;
1042
1043 /*
1044 * XXX Make a static, create-time flags word, so we don't
1045 * XXX have to lock to read it!
1046 */
1047 simple_lock(&ex->ex_slock);
1048 exflags = ex->ex_flags;
1049 simple_unlock(&ex->ex_slock);
1050
1051 if (exflags & EXF_FIXED) {
1052 struct extent_fixed *fex = (struct extent_fixed *)ex;
1053
1054 for (;;) {
1055 simple_lock(&ex->ex_slock);
1056 if ((rp = fex->fex_freelist.lh_first) != NULL) {
1057 /*
1058 * Don't muck with flags after pulling it off
1059 * the freelist; it may have been dynamically
1060 * allocated, and kindly given to us. We
1061 * need to remember that information.
1062 */
1063 LIST_REMOVE(rp, er_link);
1064 simple_unlock(&ex->ex_slock);
1065 return (rp);
1066 }
1067 if (flags & EX_MALLOCOK) {
1068 simple_unlock(&ex->ex_slock);
1069 goto alloc;
1070 }
1071 if ((flags & EX_WAITOK) == 0) {
1072 simple_unlock(&ex->ex_slock);
1073 return (NULL);
1074 }
1075 ex->ex_flags |= EXF_FLWANTED;
1076 if (ltsleep(&fex->fex_freelist,
1077 PNORELOCK| PRIBIO | ((flags & EX_CATCH) ? PCATCH : 0),
1078 "extnt", 0, &ex->ex_slock))
1079 return (NULL);
1080 }
1081 }
1082
1083 alloc:
1084 s = splhigh();
1085 if (expool == NULL && !expool_create()) {
1086 splx(s);
1087 return (NULL);
1088 }
1089
1090 rp = pool_get(expool, (flags & EX_WAITOK) ? PR_WAITOK : 0);
1091 splx(s);
1092
1093 if (rp != NULL)
1094 rp->er_flags = ER_ALLOC;
1095
1096 return (rp);
1097 }
1098
1099 /*
1100 * Free an extent region descriptor. EXTENT _MUST_ BE LOCKED! This
1101 * is safe as we do not block here.
1102 */
1103 static void
1104 extent_free_region_descriptor(ex, rp)
1105 struct extent *ex;
1106 struct extent_region *rp;
1107 {
1108 int s;
1109
1110 if (ex->ex_flags & EXF_FIXED) {
1111 struct extent_fixed *fex = (struct extent_fixed *)ex;
1112
1113 /*
1114 * If someone's waiting for a region descriptor,
1115 * be nice and give them this one, rather than
1116 * just free'ing it back to the system.
1117 */
1118 if (rp->er_flags & ER_ALLOC) {
1119 if (ex->ex_flags & EXF_FLWANTED) {
1120 /* Clear all but ER_ALLOC flag. */
1121 rp->er_flags = ER_ALLOC;
1122 LIST_INSERT_HEAD(&fex->fex_freelist, rp,
1123 er_link);
1124 goto wake_em_up;
1125 } else {
1126 s = splhigh();
1127 pool_put(expool, rp);
1128 splx(s);
1129 }
1130 } else {
1131 /* Clear all flags. */
1132 rp->er_flags = 0;
1133 LIST_INSERT_HEAD(&fex->fex_freelist, rp, er_link);
1134 }
1135
1136 if (ex->ex_flags & EXF_FLWANTED) {
1137 wake_em_up:
1138 ex->ex_flags &= ~EXF_FLWANTED;
1139 wakeup(&fex->fex_freelist);
1140 }
1141 return;
1142 }
1143
1144 /*
1145 * We know it's dynamically allocated if we get here.
1146 */
1147 s = splhigh();
1148 pool_put(expool, rp);
1149 splx(s);
1150 }
1151
1152 void
1153 extent_print(ex)
1154 struct extent *ex;
1155 {
1156 struct extent_region *rp;
1157
1158 if (ex == NULL)
1159 panic("extent_print: NULL extent");
1160
1161 simple_lock(&ex->ex_slock);
1162
1163 printf("extent `%s' (0x%lx - 0x%lx), flags = 0x%x\n", ex->ex_name,
1164 ex->ex_start, ex->ex_end, ex->ex_flags);
1165
1166 for (rp = ex->ex_regions.lh_first; rp != NULL;
1167 rp = rp->er_link.le_next)
1168 printf(" 0x%lx - 0x%lx\n", rp->er_start, rp->er_end);
1169
1170 simple_unlock(&ex->ex_slock);
1171 }
1172