subr_extent.c revision 1.37 1 /* $NetBSD: subr_extent.c,v 1.37 2000/08/12 16:29:36 sommerfeld Exp $ */
2
3 /*-
4 * Copyright (c) 1996, 1998 The NetBSD Foundation, Inc.
5 * All rights reserved.
6 *
7 * This code is derived from software contributed to The NetBSD Foundation
8 * by Jason R. Thorpe and Matthias Drochner.
9 *
10 * Redistribution and use in source and binary forms, with or without
11 * modification, are permitted provided that the following conditions
12 * are met:
13 * 1. Redistributions of source code must retain the above copyright
14 * notice, this list of conditions and the following disclaimer.
15 * 2. Redistributions in binary form must reproduce the above copyright
16 * notice, this list of conditions and the following disclaimer in the
17 * documentation and/or other materials provided with the distribution.
18 * 3. All advertising materials mentioning features or use of this software
19 * must display the following acknowledgement:
20 * This product includes software developed by the NetBSD
21 * Foundation, Inc. and its contributors.
22 * 4. Neither the name of The NetBSD Foundation nor the names of its
23 * contributors may be used to endorse or promote products derived
24 * from this software without specific prior written permission.
25 *
26 * THIS SOFTWARE IS PROVIDED BY THE NETBSD FOUNDATION, INC. AND CONTRIBUTORS
27 * ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED
28 * TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
29 * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE FOUNDATION OR CONTRIBUTORS
30 * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
31 * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
32 * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
33 * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
34 * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
35 * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
36 * POSSIBILITY OF SUCH DAMAGE.
37 */
38
39 /*
40 * General purpose extent manager.
41 */
42
43 #ifdef _KERNEL
44 #include <sys/param.h>
45 #include <sys/extent.h>
46 #include <sys/malloc.h>
47 #include <sys/pool.h>
48 #include <sys/time.h>
49 #include <sys/systm.h>
50 #include <sys/proc.h>
51 #include <sys/lock.h>
52
53 #include <uvm/uvm_extern.h>
54
55 #define KMEM_IS_RUNNING (kmem_map != NULL)
56 #elif defined(_EXTENT_TESTING)
57 /*
58 * user-land definitions, so it can fit into a testing harness.
59 */
60 #include <sys/param.h>
61 #include <sys/pool.h>
62 #include <sys/extent.h>
63 #include <errno.h>
64 #include <stdlib.h>
65 #include <stdio.h>
66 #include <string.h>
67
68 /*
69 * Use multi-line #defines to avoid screwing up the kernel tags file;
70 * without this, ctags produces a tags file where panic() shows up
71 * in subr_extent.c rather than subr_prf.c.
72 */
73 #define \
74 malloc(s, t, flags) malloc(s)
75 #define \
76 free(p, t) free(p)
77 #define \
78 tsleep(chan, pri, str, timo) (EWOULDBLOCK)
79 #define \
80 wakeup(chan) ((void)0)
81 #define \
82 pool_get(pool, flags) malloc(pool->pr_size,0,0)
83 #define \
84 pool_put(pool, rp) free(rp,0)
85 #define \
86 panic(a) printf(a)
87 #define \
88 splhigh() (1)
89 #define \
90 splx(s) ((void)(s))
91
92 #define \
93 simple_lock_init(l) ((void)(l))
94 #define \
95 simple_lock(l) ((void)(l))
96 #define \
97 simple_unlock(l) ((void)(l))
98 #define KMEM_IS_RUNNING (1)
99 #endif
100
101 static pool_handle_t expool_create __P((void));
102 static void extent_insert_and_optimize __P((struct extent *, u_long, u_long,
103 int, struct extent_region *, struct extent_region *));
104 static struct extent_region *extent_alloc_region_descriptor
105 __P((struct extent *, int));
106 static void extent_free_region_descriptor __P((struct extent *,
107 struct extent_region *));
108
109 static pool_handle_t expool;
110
111 /*
112 * Macro to align to an arbitrary power-of-two boundary.
113 */
114 #define EXTENT_ALIGN(_start, _align, _skew) \
115 (((((_start) - (_skew)) + ((_align) - 1)) & (-(_align))) + (_skew))
116
117 /*
118 * Create the extent_region pool.
119 * (This is deferred until one of our callers thinks we can malloc()).
120 */
121
122 static pool_handle_t expool_create()
123 {
124 #if defined(_KERNEL)
125 expool = pool_create(sizeof(struct extent_region), 0, 0,
126 0, "extent", 0, 0, 0, 0);
127 #else
128 expool = (pool_handle_t)malloc(sizeof(*expool),0,0);
129 expool->pr_size = sizeof(struct extent_region);
130 #endif
131 return (expool);
132 }
133
134 /*
135 * Allocate and initialize an extent map.
136 */
137 struct extent *
138 extent_create(name, start, end, mtype, storage, storagesize, flags)
139 const char *name;
140 u_long start, end;
141 int mtype;
142 caddr_t storage;
143 size_t storagesize;
144 int flags;
145 {
146 struct extent *ex;
147 caddr_t cp = storage;
148 size_t sz = storagesize;
149 struct extent_region *rp;
150 int fixed_extent = (storage != NULL);
151 int s;
152
153 #ifdef DIAGNOSTIC
154 /* Check arguments. */
155 if (name == NULL)
156 panic("extent_create: name == NULL");
157 if (end < start) {
158 printf("extent_create: extent `%s', start 0x%lx, end 0x%lx\n",
159 name, start, end);
160 panic("extent_create: end < start");
161 }
162 if (fixed_extent && (storagesize < sizeof(struct extent_fixed)))
163 panic("extent_create: fixed extent, bad storagesize 0x%lx",
164 (u_long)storagesize);
165 if (fixed_extent == 0 && (storagesize != 0 || storage != NULL))
166 panic("extent_create: storage provided for non-fixed");
167 #endif
168
169 /* Allocate extent descriptor. */
170 if (fixed_extent) {
171 struct extent_fixed *fex;
172
173 memset(storage, 0, storagesize);
174
175 /*
176 * Align all descriptors on "long" boundaries.
177 */
178 fex = (struct extent_fixed *)cp;
179 ex = (struct extent *)fex;
180 cp += ALIGN(sizeof(struct extent_fixed));
181 sz -= ALIGN(sizeof(struct extent_fixed));
182 fex->fex_storage = storage;
183 fex->fex_storagesize = storagesize;
184
185 /*
186 * In a fixed extent, we have to pre-allocate region
187 * descriptors and place them in the extent's freelist.
188 */
189 LIST_INIT(&fex->fex_freelist);
190 while (sz >= ALIGN(sizeof(struct extent_region))) {
191 rp = (struct extent_region *)cp;
192 cp += ALIGN(sizeof(struct extent_region));
193 sz -= ALIGN(sizeof(struct extent_region));
194 LIST_INSERT_HEAD(&fex->fex_freelist, rp, er_link);
195 }
196 } else {
197 s = splhigh();
198 if (expool == NULL)
199 expool_create();
200 splx(s);
201 if (expool == NULL)
202 return (NULL);
203
204 ex = (struct extent *)malloc(sizeof(struct extent),
205 mtype, (flags & EX_WAITOK) ? M_WAITOK : M_NOWAIT);
206 if (ex == NULL)
207 return (NULL);
208 }
209
210 /* Fill in the extent descriptor and return it to the caller. */
211 simple_lock_init(&ex->ex_slock);
212 LIST_INIT(&ex->ex_regions);
213 ex->ex_name = name;
214 ex->ex_start = start;
215 ex->ex_end = end;
216 ex->ex_mtype = mtype;
217 ex->ex_flags = 0;
218 if (fixed_extent)
219 ex->ex_flags |= EXF_FIXED;
220 if (flags & EX_NOCOALESCE)
221 ex->ex_flags |= EXF_NOCOALESCE;
222 return (ex);
223 }
224
225 /*
226 * Destroy an extent map.
227 * Since we're freeing the data, there can't be any references
228 * so we don't need any locking.
229 */
230 void
231 extent_destroy(ex)
232 struct extent *ex;
233 {
234 struct extent_region *rp, *orp;
235
236 #ifdef DIAGNOSTIC
237 /* Check arguments. */
238 if (ex == NULL)
239 panic("extent_destroy: NULL extent");
240 #endif
241
242 /* Free all region descriptors in extent. */
243 for (rp = ex->ex_regions.lh_first; rp != NULL; ) {
244 orp = rp;
245 rp = rp->er_link.le_next;
246 LIST_REMOVE(orp, er_link);
247 extent_free_region_descriptor(ex, orp);
248 }
249
250 /* If we're not a fixed extent, free the extent descriptor itself. */
251 if ((ex->ex_flags & EXF_FIXED) == 0)
252 free(ex, ex->ex_mtype);
253 }
254
255 /*
256 * Insert a region descriptor into the sorted region list after the
257 * entry "after" or at the head of the list (if "after" is NULL).
258 * The region descriptor we insert is passed in "rp". We must
259 * allocate the region descriptor before calling this function!
260 * If we don't need the region descriptor, it will be freed here.
261 */
262 static void
263 extent_insert_and_optimize(ex, start, size, flags, after, rp)
264 struct extent *ex;
265 u_long start, size;
266 int flags;
267 struct extent_region *after, *rp;
268 {
269 struct extent_region *nextr;
270 int appended = 0;
271
272 if (after == NULL) {
273 /*
274 * We're the first in the region list. If there's
275 * a region after us, attempt to coalesce to save
276 * descriptor overhead.
277 */
278 if (((ex->ex_flags & EXF_NOCOALESCE) == 0) &&
279 (ex->ex_regions.lh_first != NULL) &&
280 ((start + size) == ex->ex_regions.lh_first->er_start)) {
281 /*
282 * We can coalesce. Prepend us to the first region.
283 */
284 ex->ex_regions.lh_first->er_start = start;
285 extent_free_region_descriptor(ex, rp);
286 return;
287 }
288
289 /*
290 * Can't coalesce. Fill in the region descriptor
291 * in, and insert us at the head of the region list.
292 */
293 rp->er_start = start;
294 rp->er_end = start + (size - 1);
295 LIST_INSERT_HEAD(&ex->ex_regions, rp, er_link);
296 return;
297 }
298
299 /*
300 * If EXF_NOCOALESCE is set, coalescing is disallowed.
301 */
302 if (ex->ex_flags & EXF_NOCOALESCE)
303 goto cant_coalesce;
304
305 /*
306 * Attempt to coalesce with the region before us.
307 */
308 if ((after->er_end + 1) == start) {
309 /*
310 * We can coalesce. Append ourselves and make
311 * note of it.
312 */
313 after->er_end = start + (size - 1);
314 appended = 1;
315 }
316
317 /*
318 * Attempt to coalesce with the region after us.
319 */
320 if ((after->er_link.le_next != NULL) &&
321 ((start + size) == after->er_link.le_next->er_start)) {
322 /*
323 * We can coalesce. Note that if we appended ourselves
324 * to the previous region, we exactly fit the gap, and
325 * can free the "next" region descriptor.
326 */
327 if (appended) {
328 /*
329 * Yup, we can free it up.
330 */
331 after->er_end = after->er_link.le_next->er_end;
332 nextr = after->er_link.le_next;
333 LIST_REMOVE(nextr, er_link);
334 extent_free_region_descriptor(ex, nextr);
335 } else {
336 /*
337 * Nope, just prepend us to the next region.
338 */
339 after->er_link.le_next->er_start = start;
340 }
341
342 extent_free_region_descriptor(ex, rp);
343 return;
344 }
345
346 /*
347 * We weren't able to coalesce with the next region, but
348 * we don't need to allocate a region descriptor if we
349 * appended ourselves to the previous region.
350 */
351 if (appended) {
352 extent_free_region_descriptor(ex, rp);
353 return;
354 }
355
356 cant_coalesce:
357
358 /*
359 * Fill in the region descriptor and insert ourselves
360 * into the region list.
361 */
362 rp->er_start = start;
363 rp->er_end = start + (size - 1);
364 LIST_INSERT_AFTER(after, rp, er_link);
365 }
366
367 /*
368 * Allocate a specific region in an extent map.
369 */
370 int
371 extent_alloc_region(ex, start, size, flags)
372 struct extent *ex;
373 u_long start, size;
374 int flags;
375 {
376 struct extent_region *rp, *last, *myrp;
377 u_long end = start + (size - 1);
378 int error;
379
380 #ifdef DIAGNOSTIC
381 /* Check arguments. */
382 if (ex == NULL)
383 panic("extent_alloc_region: NULL extent");
384 if (size < 1) {
385 printf("extent_alloc_region: extent `%s', size 0x%lx\n",
386 ex->ex_name, size);
387 panic("extent_alloc_region: bad size");
388 }
389 if (end < start) {
390 printf(
391 "extent_alloc_region: extent `%s', start 0x%lx, size 0x%lx\n",
392 ex->ex_name, start, size);
393 panic("extent_alloc_region: overflow");
394 }
395 #endif
396
397 /*
398 * Make sure the requested region lies within the
399 * extent.
400 *
401 * We don't lock to check the range, because those values
402 * are never modified, and if another thread deletes the
403 * extent, we're screwed anyway.
404 */
405 if ((start < ex->ex_start) || (end > ex->ex_end)) {
406 #ifdef DIAGNOSTIC
407 printf("extent_alloc_region: extent `%s' (0x%lx - 0x%lx)\n",
408 ex->ex_name, ex->ex_start, ex->ex_end);
409 printf("extent_alloc_region: start 0x%lx, end 0x%lx\n",
410 start, end);
411 panic("extent_alloc_region: region lies outside extent");
412 #else
413 return (EINVAL);
414 #endif
415 }
416
417 /*
418 * Allocate the region descriptor. It will be freed later
419 * if we can coalesce with another region. Don't lock before
420 * here! This could block.
421 */
422 myrp = extent_alloc_region_descriptor(ex, flags);
423 if (myrp == NULL) {
424 #ifdef DIAGNOSTIC
425 printf(
426 "extent_alloc_region: can't allocate region descriptor\n");
427 #endif
428 return (ENOMEM);
429 }
430
431 alloc_start:
432 simple_lock(&ex->ex_slock);
433
434 /*
435 * Attempt to place ourselves in the desired area of the
436 * extent. We save ourselves some work by keeping the list sorted.
437 * In other words, if the start of the current region is greater
438 * than the end of our region, we don't have to search any further.
439 */
440
441 /*
442 * Keep a pointer to the last region we looked at so
443 * that we don't have to traverse the list again when
444 * we insert ourselves. If "last" is NULL when we
445 * finally insert ourselves, we go at the head of the
446 * list. See extent_insert_and_optimize() for details.
447 */
448 last = NULL;
449
450 for (rp = ex->ex_regions.lh_first; rp != NULL;
451 rp = rp->er_link.le_next) {
452 if (rp->er_start > end) {
453 /*
454 * We lie before this region and don't
455 * conflict.
456 */
457 break;
458 }
459
460 /*
461 * The current region begins before we end.
462 * Check for a conflict.
463 */
464 if (rp->er_end >= start) {
465 /*
466 * We conflict. If we can (and want to) wait,
467 * do so.
468 */
469 if (flags & EX_WAITSPACE) {
470 ex->ex_flags |= EXF_WANTED;
471 error = ltsleep(ex,
472 PNORELOCK | PRIBIO | ((flags & EX_CATCH) ? PCATCH : 0),
473 "extnt", 0, &ex->ex_slock);
474 if (error)
475 return (error);
476 goto alloc_start;
477 }
478 extent_free_region_descriptor(ex, myrp);
479 simple_unlock(&ex->ex_slock);
480 return (EAGAIN);
481 }
482 /*
483 * We don't conflict, but this region lies before
484 * us. Keep a pointer to this region, and keep
485 * trying.
486 */
487 last = rp;
488 }
489
490 /*
491 * We don't conflict with any regions. "last" points
492 * to the region we fall after, or is NULL if we belong
493 * at the beginning of the region list. Insert ourselves.
494 */
495 extent_insert_and_optimize(ex, start, size, flags, last, myrp);
496 simple_unlock(&ex->ex_slock);
497 return (0);
498 }
499
500 /*
501 * Macro to check (x + y) <= z. This check is designed to fail
502 * if an overflow occurs.
503 */
504 #define LE_OV(x, y, z) ((((x) + (y)) >= (x)) && (((x) + (y)) <= (z)))
505
506 /*
507 * Allocate a region in an extent map subregion.
508 *
509 * If EX_FAST is specified, we return the first fit in the map.
510 * Otherwise, we try to minimize fragmentation by finding the
511 * smallest gap that will hold the request.
512 *
513 * The allocated region is aligned to "alignment", which must be
514 * a power of 2.
515 */
516 int
517 extent_alloc_subregion1(ex, substart, subend, size, alignment, skew, boundary,
518 flags, result)
519 struct extent *ex;
520 u_long substart, subend, size, alignment, skew, boundary;
521 int flags;
522 u_long *result;
523 {
524 struct extent_region *rp, *myrp, *last, *bestlast;
525 u_long newstart, newend, beststart, bestovh, ovh;
526 u_long dontcross;
527 int error;
528
529 #ifdef DIAGNOSTIC
530 /*
531 * Check arguments.
532 *
533 * We don't lock to check these, because these values
534 * are never modified, and if another thread deletes the
535 * extent, we're screwed anyway.
536 */
537 if (ex == NULL)
538 panic("extent_alloc_subregion: NULL extent");
539 if (result == NULL)
540 panic("extent_alloc_subregion: NULL result pointer");
541 if ((substart < ex->ex_start) || (substart > ex->ex_end) ||
542 (subend > ex->ex_end) || (subend < ex->ex_start)) {
543 printf("extent_alloc_subregion: extent `%s', ex_start 0x%lx, ex_end 0x%lx\n",
544 ex->ex_name, ex->ex_start, ex->ex_end);
545 printf("extent_alloc_subregion: substart 0x%lx, subend 0x%lx\n",
546 substart, subend);
547 panic("extent_alloc_subregion: bad subregion");
548 }
549 if ((size < 1) || ((size - 1) > (subend - substart))) {
550 printf("extent_alloc_subregion: extent `%s', size 0x%lx\n",
551 ex->ex_name, size);
552 panic("extent_alloc_subregion: bad size");
553 }
554 if (alignment == 0)
555 panic("extent_alloc_subregion: bad alignment");
556 if (boundary && (boundary < size)) {
557 printf(
558 "extent_alloc_subregion: extent `%s', size 0x%lx,
559 boundary 0x%lx\n", ex->ex_name, size, boundary);
560 panic("extent_alloc_subregion: bad boundary");
561 }
562 #endif
563
564 /*
565 * Allocate the region descriptor. It will be freed later
566 * if we can coalesce with another region. Don't lock before
567 * here! This could block.
568 */
569 myrp = extent_alloc_region_descriptor(ex, flags);
570 if (myrp == NULL) {
571 #ifdef DIAGNOSTIC
572 printf(
573 "extent_alloc_subregion: can't allocate region descriptor\n");
574 #endif
575 return (ENOMEM);
576 }
577
578 alloc_start:
579 simple_lock(&ex->ex_slock);
580
581 /*
582 * Keep a pointer to the last region we looked at so
583 * that we don't have to traverse the list again when
584 * we insert ourselves. If "last" is NULL when we
585 * finally insert ourselves, we go at the head of the
586 * list. See extent_insert_and_optimize() for deatails.
587 */
588 last = NULL;
589
590 /*
591 * Keep track of size and location of the smallest
592 * chunk we fit in.
593 *
594 * Since the extent can be as large as the numeric range
595 * of the CPU (0 - 0xffffffff for 32-bit systems), the
596 * best overhead value can be the maximum unsigned integer.
597 * Thus, we initialize "bestovh" to 0, since we insert ourselves
598 * into the region list immediately on an exact match (which
599 * is the only case where "bestovh" would be set to 0).
600 */
601 bestovh = 0;
602 beststart = 0;
603 bestlast = NULL;
604
605 /*
606 * For N allocated regions, we must make (N + 1)
607 * checks for unallocated space. The first chunk we
608 * check is the area from the beginning of the subregion
609 * to the first allocated region after that point.
610 */
611 newstart = EXTENT_ALIGN(substart, alignment, skew);
612 if (newstart < ex->ex_start) {
613 #ifdef DIAGNOSTIC
614 printf(
615 "extent_alloc_subregion: extent `%s' (0x%lx - 0x%lx), alignment 0x%lx\n",
616 ex->ex_name, ex->ex_start, ex->ex_end, alignment);
617 simple_unlock(&ex->ex_slock);
618 panic("extent_alloc_subregion: overflow after alignment");
619 #else
620 extent_free_region_descriptor(ex, myrp);
621 simple_unlock(&ex->ex_slock);
622 return (EINVAL);
623 #endif
624 }
625
626 /*
627 * Find the first allocated region that begins on or after
628 * the subregion start, advancing the "last" pointer along
629 * the way.
630 */
631 for (rp = ex->ex_regions.lh_first; rp != NULL;
632 rp = rp->er_link.le_next) {
633 if (rp->er_start >= newstart)
634 break;
635 last = rp;
636 }
637
638 /*
639 * Relocate the start of our candidate region to the end of
640 * the last allocated region (if there was one overlapping
641 * our subrange).
642 */
643 if (last != NULL && last->er_end >= newstart)
644 newstart = EXTENT_ALIGN((last->er_end + 1), alignment, skew);
645
646 for (; rp != NULL; rp = rp->er_link.le_next) {
647 /*
648 * Check the chunk before "rp". Note that our
649 * comparison is safe from overflow conditions.
650 */
651 if (LE_OV(newstart, size, rp->er_start)) {
652 /*
653 * Do a boundary check, if necessary. Note
654 * that a region may *begin* on the boundary,
655 * but it must end before the boundary.
656 */
657 if (boundary) {
658 newend = newstart + (size - 1);
659
660 /*
661 * Calculate the next boundary after the start
662 * of this region.
663 */
664 dontcross = EXTENT_ALIGN(newstart+1, boundary,
665 (flags & EX_BOUNDZERO) ? 0 : ex->ex_start)
666 - 1;
667
668 #if 0
669 printf("newstart=%lx newend=%lx ex_start=%lx ex_end=%lx boundary=%lx dontcross=%lx\n",
670 newstart, newend, ex->ex_start, ex->ex_end,
671 boundary, dontcross);
672 #endif
673
674 /* Check for overflow */
675 if (dontcross < ex->ex_start)
676 dontcross = ex->ex_end;
677 else if (newend > dontcross) {
678 /*
679 * Candidate region crosses boundary.
680 * Throw away the leading part and see
681 * if we still fit.
682 */
683 newstart = dontcross + 1;
684 newend = newstart + (size - 1);
685 dontcross += boundary;
686 if (!LE_OV(newstart, size, rp->er_start))
687 continue;
688 }
689
690 /*
691 * If we run past the end of
692 * the extent or the boundary
693 * overflows, then the request
694 * can't fit.
695 */
696 if (newstart + size - 1 > ex->ex_end ||
697 dontcross < newstart)
698 goto fail;
699 }
700
701 /*
702 * We would fit into this space. Calculate
703 * the overhead (wasted space). If we exactly
704 * fit, or we're taking the first fit, insert
705 * ourselves into the region list.
706 */
707 ovh = rp->er_start - newstart - size;
708 if ((flags & EX_FAST) || (ovh == 0))
709 goto found;
710
711 /*
712 * Don't exactly fit, but check to see
713 * if we're better than any current choice.
714 */
715 if ((bestovh == 0) || (ovh < bestovh)) {
716 bestovh = ovh;
717 beststart = newstart;
718 bestlast = last;
719 }
720 }
721
722 /*
723 * Skip past the current region and check again.
724 */
725 newstart = EXTENT_ALIGN((rp->er_end + 1), alignment, skew);
726 if (newstart < rp->er_end) {
727 /*
728 * Overflow condition. Don't error out, since
729 * we might have a chunk of space that we can
730 * use.
731 */
732 goto fail;
733 }
734
735 last = rp;
736 }
737
738 /*
739 * The final check is from the current starting point to the
740 * end of the subregion. If there were no allocated regions,
741 * "newstart" is set to the beginning of the subregion, or
742 * just past the end of the last allocated region, adjusted
743 * for alignment in either case.
744 */
745 if (LE_OV(newstart, (size - 1), subend)) {
746 /*
747 * Do a boundary check, if necessary. Note
748 * that a region may *begin* on the boundary,
749 * but it must end before the boundary.
750 */
751 if (boundary) {
752 newend = newstart + (size - 1);
753
754 /*
755 * Calculate the next boundary after the start
756 * of this region.
757 */
758 dontcross = EXTENT_ALIGN(newstart+1, boundary,
759 (flags & EX_BOUNDZERO) ? 0 : ex->ex_start)
760 - 1;
761
762 #if 0
763 printf("newstart=%lx newend=%lx ex_start=%lx ex_end=%lx boundary=%lx dontcross=%lx\n",
764 newstart, newend, ex->ex_start, ex->ex_end,
765 boundary, dontcross);
766 #endif
767
768 /* Check for overflow */
769 if (dontcross < ex->ex_start)
770 dontcross = ex->ex_end;
771 else if (newend > dontcross) {
772 /*
773 * Candidate region crosses boundary.
774 * Throw away the leading part and see
775 * if we still fit.
776 */
777 newstart = dontcross + 1;
778 newend = newstart + (size - 1);
779 dontcross += boundary;
780 if (!LE_OV(newstart, (size - 1), subend))
781 goto fail;
782 }
783
784 /*
785 * If we run past the end of
786 * the extent or the boundary
787 * overflows, then the request
788 * can't fit.
789 */
790 if (newstart + size - 1 > ex->ex_end ||
791 dontcross < newstart)
792 goto fail;
793 }
794
795 /*
796 * We would fit into this space. Calculate
797 * the overhead (wasted space). If we exactly
798 * fit, or we're taking the first fit, insert
799 * ourselves into the region list.
800 */
801 ovh = ex->ex_end - newstart - (size - 1);
802 if ((flags & EX_FAST) || (ovh == 0))
803 goto found;
804
805 /*
806 * Don't exactly fit, but check to see
807 * if we're better than any current choice.
808 */
809 if ((bestovh == 0) || (ovh < bestovh)) {
810 bestovh = ovh;
811 beststart = newstart;
812 bestlast = last;
813 }
814 }
815
816 fail:
817 /*
818 * One of the following two conditions have
819 * occurred:
820 *
821 * There is no chunk large enough to hold the request.
822 *
823 * If EX_FAST was not specified, there is not an
824 * exact match for the request.
825 *
826 * Note that if we reach this point and EX_FAST is
827 * set, then we know there is no space in the extent for
828 * the request.
829 */
830 if (((flags & EX_FAST) == 0) && (bestovh != 0)) {
831 /*
832 * We have a match that's "good enough".
833 */
834 newstart = beststart;
835 last = bestlast;
836 goto found;
837 }
838
839 /*
840 * No space currently available. Wait for it to free up,
841 * if possible.
842 */
843 if (flags & EX_WAITSPACE) {
844 ex->ex_flags |= EXF_WANTED;
845 error = ltsleep(ex,
846 PNORELOCK | PRIBIO | ((flags & EX_CATCH) ? PCATCH : 0),
847 "extnt", 0, &ex->ex_slock);
848 if (error)
849 return (error);
850 goto alloc_start;
851 }
852
853 extent_free_region_descriptor(ex, myrp);
854 simple_unlock(&ex->ex_slock);
855 return (EAGAIN);
856
857 found:
858 /*
859 * Insert ourselves into the region list.
860 */
861 extent_insert_and_optimize(ex, newstart, size, flags, last, myrp);
862 simple_unlock(&ex->ex_slock);
863 *result = newstart;
864 return (0);
865 }
866
867 int
868 extent_free(ex, start, size, flags)
869 struct extent *ex;
870 u_long start, size;
871 int flags;
872 {
873 struct extent_region *rp, *nrp = NULL;
874 u_long end = start + (size - 1);
875 int exflags;
876
877 #ifdef DIAGNOSTIC
878 /*
879 * Check arguments.
880 *
881 * We don't lock to check these, because these values
882 * are never modified, and if another thread deletes the
883 * extent, we're screwed anyway.
884 */
885 if (ex == NULL)
886 panic("extent_free: NULL extent");
887 if ((start < ex->ex_start) || (start > ex->ex_end)) {
888 extent_print(ex);
889 printf("extent_free: extent `%s', start 0x%lx, size 0x%lx\n",
890 ex->ex_name, start, size);
891 panic("extent_free: extent `%s', region not within extent",
892 ex->ex_name);
893 }
894 /* Check for an overflow. */
895 if (end < start) {
896 extent_print(ex);
897 printf("extent_free: extent `%s', start 0x%lx, size 0x%lx\n",
898 ex->ex_name, start, size);
899 panic("extent_free: overflow");
900 }
901 #endif
902
903 /*
904 * If we're allowing coalescing, we must allocate a region
905 * descriptor now, since it might block.
906 *
907 * XXX Make a static, create-time flags word, so we don't
908 * XXX have to lock to read it!
909 */
910 simple_lock(&ex->ex_slock);
911 exflags = ex->ex_flags;
912 simple_unlock(&ex->ex_slock);
913
914 if ((exflags & EXF_NOCOALESCE) == 0) {
915 /* Allocate a region descriptor. */
916 nrp = extent_alloc_region_descriptor(ex, flags);
917 if (nrp == NULL)
918 return (ENOMEM);
919 }
920
921 simple_lock(&ex->ex_slock);
922
923 /*
924 * Find region and deallocate. Several possibilities:
925 *
926 * 1. (start == er_start) && (end == er_end):
927 * Free descriptor.
928 *
929 * 2. (start == er_start) && (end < er_end):
930 * Adjust er_start.
931 *
932 * 3. (start > er_start) && (end == er_end):
933 * Adjust er_end.
934 *
935 * 4. (start > er_start) && (end < er_end):
936 * Fragment region. Requires descriptor alloc.
937 *
938 * Cases 2, 3, and 4 require that the EXF_NOCOALESCE flag
939 * is not set.
940 */
941 for (rp = ex->ex_regions.lh_first; rp != NULL;
942 rp = rp->er_link.le_next) {
943 /*
944 * Save ourselves some comparisons; does the current
945 * region end before chunk to be freed begins? If so,
946 * then we haven't found the appropriate region descriptor.
947 */
948 if (rp->er_end < start)
949 continue;
950
951 /*
952 * Save ourselves some traversal; does the current
953 * region begin after the chunk to be freed ends? If so,
954 * then we've already passed any possible region descriptors
955 * that might have contained the chunk to be freed.
956 */
957 if (rp->er_start > end)
958 break;
959
960 /* Case 1. */
961 if ((start == rp->er_start) && (end == rp->er_end)) {
962 LIST_REMOVE(rp, er_link);
963 extent_free_region_descriptor(ex, rp);
964 goto done;
965 }
966
967 /*
968 * The following cases all require that EXF_NOCOALESCE
969 * is not set.
970 */
971 if (ex->ex_flags & EXF_NOCOALESCE)
972 continue;
973
974 /* Case 2. */
975 if ((start == rp->er_start) && (end < rp->er_end)) {
976 rp->er_start = (end + 1);
977 goto done;
978 }
979
980 /* Case 3. */
981 if ((start > rp->er_start) && (end == rp->er_end)) {
982 rp->er_end = (start - 1);
983 goto done;
984 }
985
986 /* Case 4. */
987 if ((start > rp->er_start) && (end < rp->er_end)) {
988 /* Fill in new descriptor. */
989 nrp->er_start = end + 1;
990 nrp->er_end = rp->er_end;
991
992 /* Adjust current descriptor. */
993 rp->er_end = start - 1;
994
995 /* Insert new descriptor after current. */
996 LIST_INSERT_AFTER(rp, nrp, er_link);
997
998 /* We used the new descriptor, so don't free it below */
999 nrp = NULL;
1000 goto done;
1001 }
1002 }
1003
1004 /* Region not found, or request otherwise invalid. */
1005 simple_unlock(&ex->ex_slock);
1006 extent_print(ex);
1007 printf("extent_free: start 0x%lx, end 0x%lx\n", start, end);
1008 panic("extent_free: region not found");
1009
1010 done:
1011 if (nrp != NULL)
1012 extent_free_region_descriptor(ex, nrp);
1013 if (ex->ex_flags & EXF_WANTED) {
1014 ex->ex_flags &= ~EXF_WANTED;
1015 wakeup(ex);
1016 }
1017 simple_unlock(&ex->ex_slock);
1018 return (0);
1019 }
1020
1021 /*
1022 * Allocate an extent region descriptor. EXTENT MUST NOT BE LOCKED,
1023 * AS THIS FUNCTION MAY BLOCK! We will handle any locking we may need.
1024 */
1025 static struct extent_region *
1026 extent_alloc_region_descriptor(ex, flags)
1027 struct extent *ex;
1028 int flags;
1029 {
1030 struct extent_region *rp;
1031 int exflags;
1032 int s;
1033
1034 /*
1035 * If the kernel memory allocator is not yet running, we can't
1036 * use it (obviously).
1037 */
1038 if (KMEM_IS_RUNNING == 0)
1039 flags &= ~EX_MALLOCOK;
1040
1041 /*
1042 * XXX Make a static, create-time flags word, so we don't
1043 * XXX have to lock to read it!
1044 */
1045 simple_lock(&ex->ex_slock);
1046 exflags = ex->ex_flags;
1047 simple_unlock(&ex->ex_slock);
1048
1049 if (exflags & EXF_FIXED) {
1050 struct extent_fixed *fex = (struct extent_fixed *)ex;
1051
1052 for (;;) {
1053 simple_lock(&ex->ex_slock);
1054 if ((rp = fex->fex_freelist.lh_first) != NULL) {
1055 /*
1056 * Don't muck with flags after pulling it off
1057 * the freelist; it may have been dynamically
1058 * allocated, and kindly given to us. We
1059 * need to remember that information.
1060 */
1061 LIST_REMOVE(rp, er_link);
1062 simple_unlock(&ex->ex_slock);
1063 return (rp);
1064 }
1065 if (flags & EX_MALLOCOK) {
1066 simple_unlock(&ex->ex_slock);
1067 goto alloc;
1068 }
1069 if ((flags & EX_WAITOK) == 0) {
1070 simple_unlock(&ex->ex_slock);
1071 return (NULL);
1072 }
1073 ex->ex_flags |= EXF_FLWANTED;
1074 if (ltsleep(&fex->fex_freelist,
1075 PNORELOCK| PRIBIO | ((flags & EX_CATCH) ? PCATCH : 0),
1076 "extnt", 0, &ex->ex_slock))
1077 return (NULL);
1078 }
1079 }
1080
1081 alloc:
1082 s = splhigh();
1083 if (expool == NULL && !expool_create()) {
1084 splx(s);
1085 return (NULL);
1086 }
1087
1088 rp = pool_get(expool, (flags & EX_WAITOK) ? PR_WAITOK : 0);
1089 splx(s);
1090
1091 if (rp != NULL)
1092 rp->er_flags = ER_ALLOC;
1093
1094 return (rp);
1095 }
1096
1097 /*
1098 * Free an extent region descriptor. EXTENT _MUST_ BE LOCKED! This
1099 * is safe as we do not block here.
1100 */
1101 static void
1102 extent_free_region_descriptor(ex, rp)
1103 struct extent *ex;
1104 struct extent_region *rp;
1105 {
1106 int s;
1107
1108 if (ex->ex_flags & EXF_FIXED) {
1109 struct extent_fixed *fex = (struct extent_fixed *)ex;
1110
1111 /*
1112 * If someone's waiting for a region descriptor,
1113 * be nice and give them this one, rather than
1114 * just free'ing it back to the system.
1115 */
1116 if (rp->er_flags & ER_ALLOC) {
1117 if (ex->ex_flags & EXF_FLWANTED) {
1118 /* Clear all but ER_ALLOC flag. */
1119 rp->er_flags = ER_ALLOC;
1120 LIST_INSERT_HEAD(&fex->fex_freelist, rp,
1121 er_link);
1122 goto wake_em_up;
1123 } else {
1124 s = splhigh();
1125 pool_put(expool, rp);
1126 splx(s);
1127 }
1128 } else {
1129 /* Clear all flags. */
1130 rp->er_flags = 0;
1131 LIST_INSERT_HEAD(&fex->fex_freelist, rp, er_link);
1132 }
1133
1134 if (ex->ex_flags & EXF_FLWANTED) {
1135 wake_em_up:
1136 ex->ex_flags &= ~EXF_FLWANTED;
1137 wakeup(&fex->fex_freelist);
1138 }
1139 return;
1140 }
1141
1142 /*
1143 * We know it's dynamically allocated if we get here.
1144 */
1145 s = splhigh();
1146 pool_put(expool, rp);
1147 splx(s);
1148 }
1149
1150 void
1151 extent_print(ex)
1152 struct extent *ex;
1153 {
1154 struct extent_region *rp;
1155
1156 if (ex == NULL)
1157 panic("extent_print: NULL extent");
1158
1159 simple_lock(&ex->ex_slock);
1160
1161 printf("extent `%s' (0x%lx - 0x%lx), flags = 0x%x\n", ex->ex_name,
1162 ex->ex_start, ex->ex_end, ex->ex_flags);
1163
1164 for (rp = ex->ex_regions.lh_first; rp != NULL;
1165 rp = rp->er_link.le_next)
1166 printf(" 0x%lx - 0x%lx\n", rp->er_start, rp->er_end);
1167
1168 simple_unlock(&ex->ex_slock);
1169 }
1170