uvm_physseg.c revision 1.10 1 /* $NetBSD: uvm_physseg.c,v 1.10 2019/09/20 11:09:43 maxv Exp $ */
2
3 /*
4 * Copyright (c) 1997 Charles D. Cranor and Washington University.
5 * Copyright (c) 1991, 1993, The Regents of the University of California.
6 *
7 * All rights reserved.
8 *
9 * This code is derived from software contributed to Berkeley by
10 * The Mach Operating System project at Carnegie-Mellon University.
11 *
12 * Redistribution and use in source and binary forms, with or without
13 * modification, are permitted provided that the following conditions
14 * are met:
15 * 1. Redistributions of source code must retain the above copyright
16 * notice, this list of conditions and the following disclaimer.
17 * 2. Redistributions in binary form must reproduce the above copyright
18 * notice, this list of conditions and the following disclaimer in the
19 * documentation and/or other materials provided with the distribution.
20 * 3. Neither the name of the University nor the names of its contributors
21 * may be used to endorse or promote products derived from this software
22 * without specific prior written permission.
23 *
24 * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND
25 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
26 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
27 * ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE
28 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
29 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
30 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
31 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
32 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
33 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
34 * SUCH DAMAGE.
35 *
36 * @(#)vm_page.h 7.3 (Berkeley) 4/21/91
37 * from: Id: uvm_page.h,v 1.1.2.6 1998/02/04 02:31:42 chuck Exp
38 *
39 *
40 * Copyright (c) 1987, 1990 Carnegie-Mellon University.
41 * All rights reserved.
42 *
43 * Permission to use, copy, modify and distribute this software and
44 * its documentation is hereby granted, provided that both the copyright
45 * notice and this permission notice appear in all copies of the
46 * software, derivative works or modified versions, and any portions
47 * thereof, and that both notices appear in supporting documentation.
48 *
49 * CARNEGIE MELLON ALLOWS FREE USE OF THIS SOFTWARE IN ITS "AS IS"
50 * CONDITION. CARNEGIE MELLON DISCLAIMS ANY LIABILITY OF ANY KIND
51 * FOR ANY DAMAGES WHATSOEVER RESULTING FROM THE USE OF THIS SOFTWARE.
52 *
53 * Carnegie Mellon requests users of this software to return to
54 *
55 * Software Distribution Coordinator or Software.Distribution (at) CS.CMU.EDU
56 * School of Computer Science
57 * Carnegie Mellon University
58 * Pittsburgh PA 15213-3890
59 *
60 * any improvements or extensions that they make and grant Carnegie the
61 * rights to redistribute these changes.
62 */
63
64 /*
65 * Consolidated API from uvm_page.c and others.
66 * Consolidated and designed by Cherry G. Mathew <cherry (at) zyx.in>
67 * rbtree(3) backing implementation by:
68 * Santhosh N. Raju <santhosh.raju (at) gmail.com>
69 */
70
71 #ifdef _KERNEL_OPT
72 #include "opt_uvm.h"
73 #endif
74
75 #include <sys/param.h>
76 #include <sys/types.h>
77 #include <sys/extent.h>
78 #include <sys/kmem.h>
79
80 #include <uvm/uvm.h>
81 #include <uvm/uvm_page.h>
82 #include <uvm/uvm_param.h>
83 #include <uvm/uvm_pdpolicy.h>
84 #include <uvm/uvm_physseg.h>
85
86 /*
87 * uvm_physseg: describes one segment of physical memory
88 */
89 struct uvm_physseg {
90 struct rb_node rb_node; /* tree information */
91 paddr_t start; /* PF# of first page in segment */
92 paddr_t end; /* (PF# of last page in segment) + 1 */
93 paddr_t avail_start; /* PF# of first free page in segment */
94 paddr_t avail_end; /* (PF# of last free page in segment) +1 */
95 struct vm_page *pgs; /* vm_page structures (from start) */
96 struct extent *ext; /* extent(9) structure to manage pgs[] */
97 int free_list; /* which free list they belong on */
98 u_int start_hint; /* start looking for free pages here */
99 /* protected by uvm_fpageqlock */
100 #ifdef __HAVE_PMAP_PHYSSEG
101 struct pmap_physseg pmseg; /* pmap specific (MD) data */
102 #endif
103 };
104
105 /*
106 * These functions are reserved for uvm(9) internal use and are not
107 * exported in the header file uvm_physseg.h
108 *
109 * Thus they are redefined here.
110 */
111 void uvm_physseg_init_seg(uvm_physseg_t, struct vm_page *);
112 void uvm_physseg_seg_chomp_slab(uvm_physseg_t, struct vm_page *, size_t);
113
114 /* returns a pgs array */
115 struct vm_page *uvm_physseg_seg_alloc_from_slab(uvm_physseg_t, size_t);
116
117 #if defined(UVM_HOTPLUG) /* rbtree impementation */
118
119 #define HANDLE_TO_PHYSSEG_NODE(h) ((struct uvm_physseg *)(h))
120 #define PHYSSEG_NODE_TO_HANDLE(u) ((uvm_physseg_t)(u))
121
122 struct uvm_physseg_graph {
123 struct rb_tree rb_tree; /* Tree for entries */
124 int nentries; /* Number of entries */
125 };
126
127 static struct uvm_physseg_graph uvm_physseg_graph;
128
129 /*
130 * Note on kmem(9) allocator usage:
131 * We take the conservative approach that plug/unplug are allowed to
132 * fail in high memory stress situations.
133 *
134 * We want to avoid re-entrant situations in which one plug/unplug
135 * operation is waiting on a previous one to complete, since this
136 * makes the design more complicated than necessary.
137 *
138 * We may review this and change its behaviour, once the use cases
139 * become more obvious.
140 */
141
142 /*
143 * Special alloc()/free() functions for boot time support:
144 * We assume that alloc() at boot time is only for new 'vm_physseg's
145 * This allows us to use a static array for memory allocation at boot
146 * time. Thus we avoid using kmem(9) which is not ready at this point
147 * in boot.
148 *
149 * After kmem(9) is ready, we use it. We currently discard any free()s
150 * to this static array, since the size is small enough to be a
151 * trivial waste on all architectures we run on.
152 */
153
154 static size_t nseg = 0;
155 static struct uvm_physseg uvm_physseg[VM_PHYSSEG_MAX];
156
157 static void *
158 uvm_physseg_alloc(size_t sz)
159 {
160 /*
161 * During boot time, we only support allocating vm_physseg
162 * entries from the static array.
163 * We need to assert for this.
164 */
165
166 if (__predict_false(uvm.page_init_done == false)) {
167 if (sz % sizeof(struct uvm_physseg))
168 panic("%s: tried to alloc size other than multiple"
169 " of struct uvm_physseg at boot\n", __func__);
170
171 size_t n = sz / sizeof(struct uvm_physseg);
172 nseg += n;
173
174 KASSERT(nseg > 0 && nseg <= VM_PHYSSEG_MAX);
175
176 return &uvm_physseg[nseg - n];
177 }
178
179 return kmem_zalloc(sz, KM_NOSLEEP);
180 }
181
182 static void
183 uvm_physseg_free(void *p, size_t sz)
184 {
185 /*
186 * This is a bit tricky. We do allow simulation of free()
187 * during boot (for eg: when MD code is "steal"ing memory,
188 * and the segment has been exhausted (and thus needs to be
189 * free() - ed.
190 * free() also complicates things because we leak the
191 * free(). Therefore calling code can't assume that free()-ed
192 * memory is available for alloc() again, at boot time.
193 *
194 * Thus we can't explicitly disallow free()s during
195 * boot time. However, the same restriction for alloc()
196 * applies to free(). We only allow uvm_physseg related free()s
197 * via this function during boot time.
198 */
199
200 if (__predict_false(uvm.page_init_done == false)) {
201 if (sz % sizeof(struct uvm_physseg))
202 panic("%s: tried to free size other than struct uvm_physseg"
203 " at boot\n", __func__);
204
205 }
206
207 /*
208 * Could have been in a single if(){} block - split for
209 * clarity
210 */
211
212 if ((struct uvm_physseg *)p >= uvm_physseg &&
213 (struct uvm_physseg *)p < (uvm_physseg + VM_PHYSSEG_MAX)) {
214 if (sz % sizeof(struct uvm_physseg))
215 panic("%s: tried to free() other than struct uvm_physseg"
216 " from static array\n", __func__);
217
218 if ((sz / sizeof(struct uvm_physseg)) >= VM_PHYSSEG_MAX)
219 panic("%s: tried to free() the entire static array!", __func__);
220 return; /* Nothing to free */
221 }
222
223 kmem_free(p, sz);
224 }
225
226 /* XXX: Multi page size */
227 bool
228 uvm_physseg_plug(paddr_t pfn, size_t pages, uvm_physseg_t *psp)
229 {
230 int preload;
231 size_t slabpages;
232 struct uvm_physseg *ps, *current_ps = NULL;
233 struct vm_page *slab = NULL, *pgs = NULL;
234
235 #ifdef DEBUG
236 paddr_t off;
237 uvm_physseg_t upm;
238 upm = uvm_physseg_find(pfn, &off);
239
240 ps = HANDLE_TO_PHYSSEG_NODE(upm);
241
242 if (ps != NULL) /* XXX; do we allow "update" plugs ? */
243 return false;
244 #endif
245
246 /*
247 * do we have room?
248 */
249
250 ps = uvm_physseg_alloc(sizeof (struct uvm_physseg));
251 if (ps == NULL) {
252 printf("uvm_page_physload: unable to load physical memory "
253 "segment\n");
254 printf("\t%d segments allocated, ignoring 0x%"PRIxPADDR" -> 0x%"PRIxPADDR"\n",
255 VM_PHYSSEG_MAX, pfn, pfn + pages + 1);
256 printf("\tincrease VM_PHYSSEG_MAX\n");
257 return false;
258 }
259
260 /* span init */
261 ps->start = pfn;
262 ps->end = pfn + pages;
263
264 /*
265 * XXX: Ugly hack because uvmexp.npages accounts for only
266 * those pages in the segment included below as well - this
267 * should be legacy and removed.
268 */
269
270 ps->avail_start = ps->start;
271 ps->avail_end = ps->end;
272
273 /*
274 * check to see if this is a "preload" (i.e. uvm_page_init hasn't been
275 * called yet, so kmem is not available).
276 */
277
278 preload = 1; /* We are going to assume it is a preload */
279
280 RB_TREE_FOREACH(current_ps, &(uvm_physseg_graph.rb_tree)) {
281 /* If there are non NULL pages then we are not in a preload */
282 if (current_ps->pgs != NULL) {
283 preload = 0;
284 /* Try to scavenge from earlier unplug()s. */
285 pgs = uvm_physseg_seg_alloc_from_slab(current_ps, pages);
286
287 if (pgs != NULL) {
288 break;
289 }
290 }
291 }
292
293
294 /*
295 * if VM is already running, attempt to kmem_alloc vm_page structures
296 */
297
298 if (!preload) {
299 if (pgs == NULL) { /* Brand new */
300 /* Iteratively try alloc down from uvmexp.npages */
301 for (slabpages = (size_t) uvmexp.npages; slabpages >= pages; slabpages--) {
302 slab = kmem_zalloc(sizeof *pgs * (long unsigned int)slabpages, KM_NOSLEEP);
303 if (slab != NULL)
304 break;
305 }
306
307 if (slab == NULL) {
308 uvm_physseg_free(ps, sizeof(struct uvm_physseg));
309 return false;
310 }
311
312 uvm_physseg_seg_chomp_slab(ps, slab, (size_t) slabpages);
313 /* We allocate enough for this plug */
314 pgs = uvm_physseg_seg_alloc_from_slab(ps, pages);
315
316 if (pgs == NULL) {
317 printf("unable to uvm_physseg_seg_alloc_from_slab() from backend\n");
318 return false;
319 }
320 } else {
321 /* Reuse scavenged extent */
322 ps->ext = current_ps->ext;
323 }
324
325 physmem += pages;
326 uvmpdpol_reinit();
327 } else { /* Boot time - see uvm_page.c:uvm_page_init() */
328 pgs = NULL;
329 ps->pgs = pgs;
330 }
331
332 /*
333 * now insert us in the proper place in uvm_physseg_graph.rb_tree
334 */
335
336 current_ps = rb_tree_insert_node(&(uvm_physseg_graph.rb_tree), ps);
337 if (current_ps != ps) {
338 panic("uvm_page_physload: Duplicate address range detected!");
339 }
340 uvm_physseg_graph.nentries++;
341
342 /*
343 * uvm_pagefree() requires the PHYS_TO_VM_PAGE(pgs[i]) on the
344 * newly allocated pgs[] to return the correct value. This is
345 * a bit of a chicken and egg problem, since it needs
346 * uvm_physseg_find() to succeed. For this, the node needs to
347 * be inserted *before* uvm_physseg_init_seg() happens.
348 *
349 * During boot, this happens anyway, since
350 * uvm_physseg_init_seg() is called later on and separately
351 * from uvm_page.c:uvm_page_init().
352 * In the case of hotplug we need to ensure this.
353 */
354
355 if (__predict_true(!preload))
356 uvm_physseg_init_seg(ps, pgs);
357
358 if (psp != NULL)
359 *psp = ps;
360
361 return true;
362 }
363
364 static int
365 uvm_physseg_compare_nodes(void *ctx, const void *nnode1, const void *nnode2)
366 {
367 const struct uvm_physseg *enode1 = nnode1;
368 const struct uvm_physseg *enode2 = nnode2;
369
370 KASSERT(enode1->start < enode2->start || enode1->start >= enode2->end);
371 KASSERT(enode2->start < enode1->start || enode2->start >= enode1->end);
372
373 if (enode1->start < enode2->start)
374 return -1;
375 if (enode1->start >= enode2->end)
376 return 1;
377 return 0;
378 }
379
380 static int
381 uvm_physseg_compare_key(void *ctx, const void *nnode, const void *pkey)
382 {
383 const struct uvm_physseg *enode = nnode;
384 const paddr_t pa = *(const paddr_t *) pkey;
385
386 if(enode->start <= pa && pa < enode->end)
387 return 0;
388 if (enode->start < pa)
389 return -1;
390 if (enode->end > pa)
391 return 1;
392
393 return 0;
394 }
395
396 static const rb_tree_ops_t uvm_physseg_tree_ops = {
397 .rbto_compare_nodes = uvm_physseg_compare_nodes,
398 .rbto_compare_key = uvm_physseg_compare_key,
399 .rbto_node_offset = offsetof(struct uvm_physseg, rb_node),
400 .rbto_context = NULL
401 };
402
403 /*
404 * uvm_physseg_init: init the physmem
405 *
406 * => physmem unit should not be in use at this point
407 */
408
409 void
410 uvm_physseg_init(void)
411 {
412 rb_tree_init(&(uvm_physseg_graph.rb_tree), &uvm_physseg_tree_ops);
413 uvm_physseg_graph.nentries = 0;
414 }
415
416 uvm_physseg_t
417 uvm_physseg_get_next(uvm_physseg_t upm)
418 {
419 /* next of invalid is invalid, not fatal */
420 if (uvm_physseg_valid_p(upm) == false)
421 return UVM_PHYSSEG_TYPE_INVALID;
422
423 return (uvm_physseg_t) rb_tree_iterate(&(uvm_physseg_graph.rb_tree), upm,
424 RB_DIR_RIGHT);
425 }
426
427 uvm_physseg_t
428 uvm_physseg_get_prev(uvm_physseg_t upm)
429 {
430 /* prev of invalid is invalid, not fatal */
431 if (uvm_physseg_valid_p(upm) == false)
432 return UVM_PHYSSEG_TYPE_INVALID;
433
434 return (uvm_physseg_t) rb_tree_iterate(&(uvm_physseg_graph.rb_tree), upm,
435 RB_DIR_LEFT);
436 }
437
438 uvm_physseg_t
439 uvm_physseg_get_last(void)
440 {
441 return (uvm_physseg_t) RB_TREE_MAX(&(uvm_physseg_graph.rb_tree));
442 }
443
444 uvm_physseg_t
445 uvm_physseg_get_first(void)
446 {
447 return (uvm_physseg_t) RB_TREE_MIN(&(uvm_physseg_graph.rb_tree));
448 }
449
450 paddr_t
451 uvm_physseg_get_highest_frame(void)
452 {
453 struct uvm_physseg *ps =
454 (uvm_physseg_t) RB_TREE_MAX(&(uvm_physseg_graph.rb_tree));
455
456 return ps->end - 1;
457 }
458
459 /*
460 * uvm_page_physunload: unload physical memory and return it to
461 * caller.
462 */
463 bool
464 uvm_page_physunload(uvm_physseg_t upm, int freelist, paddr_t *paddrp)
465 {
466 struct uvm_physseg *seg;
467
468 if (__predict_true(uvm.page_init_done == true))
469 panic("%s: unload attempted after uvm_page_init()\n", __func__);
470
471 seg = HANDLE_TO_PHYSSEG_NODE(upm);
472
473 if (seg->free_list != freelist) {
474 return false;
475 }
476
477 /*
478 * During cold boot, what we're about to unplug hasn't been
479 * put on the uvm freelist, nor has uvmexp.npages been
480 * updated. (This happens in uvm_page.c:uvm_page_init())
481 *
482 * For hotplug, we assume here that the pages being unloaded
483 * here are completely out of sight of uvm (ie; not on any uvm
484 * lists), and that uvmexp.npages has been suitably
485 * decremented before we're called.
486 *
487 * XXX: will avail_end == start if avail_start < avail_end?
488 */
489
490 /* try from front */
491 if (seg->avail_start == seg->start &&
492 seg->avail_start < seg->avail_end) {
493 *paddrp = ctob(seg->avail_start);
494 return uvm_physseg_unplug(seg->avail_start, 1);
495 }
496
497 /* try from rear */
498 if (seg->avail_end == seg->end &&
499 seg->avail_start < seg->avail_end) {
500 *paddrp = ctob(seg->avail_end - 1);
501 return uvm_physseg_unplug(seg->avail_end - 1, 1);
502 }
503
504 return false;
505 }
506
507 bool
508 uvm_page_physunload_force(uvm_physseg_t upm, int freelist, paddr_t *paddrp)
509 {
510 struct uvm_physseg *seg;
511
512 seg = HANDLE_TO_PHYSSEG_NODE(upm);
513
514 if (__predict_true(uvm.page_init_done == true))
515 panic("%s: unload attempted after uvm_page_init()\n", __func__);
516 /* any room in this bank? */
517 if (seg->avail_start >= seg->avail_end) {
518 return false; /* nope */
519 }
520
521 *paddrp = ctob(seg->avail_start);
522
523 /* Always unplug from front */
524 return uvm_physseg_unplug(seg->avail_start, 1);
525 }
526
527
528 /*
529 * vm_physseg_find: find vm_physseg structure that belongs to a PA
530 */
531 uvm_physseg_t
532 uvm_physseg_find(paddr_t pframe, psize_t *offp)
533 {
534 struct uvm_physseg * ps = NULL;
535
536 ps = rb_tree_find_node(&(uvm_physseg_graph.rb_tree), &pframe);
537
538 if(ps != NULL && offp != NULL)
539 *offp = pframe - ps->start;
540
541 return ps;
542 }
543
544 #else /* UVM_HOTPLUG */
545
546 /*
547 * physical memory config is stored in vm_physmem.
548 */
549
550 #define VM_PHYSMEM_PTR(i) (&vm_physmem[i])
551 #if VM_PHYSSEG_MAX == 1
552 #define VM_PHYSMEM_PTR_SWAP(i, j) /* impossible */
553 #else
554 #define VM_PHYSMEM_PTR_SWAP(i, j) \
555 do { vm_physmem[(i)] = vm_physmem[(j)]; } while (0)
556 #endif
557
558 #define HANDLE_TO_PHYSSEG_NODE(h) (VM_PHYSMEM_PTR((int)h))
559 #define PHYSSEG_NODE_TO_HANDLE(u) ((int)((vsize_t) (u - vm_physmem) / sizeof(struct uvm_physseg)))
560
561 static struct uvm_physseg vm_physmem[VM_PHYSSEG_MAX]; /* XXXCDC: uvm.physmem */
562 static int vm_nphysseg = 0; /* XXXCDC: uvm.nphysseg */
563 #define vm_nphysmem vm_nphysseg
564
565 void
566 uvm_physseg_init(void)
567 {
568 /* XXX: Provisioning for rb_tree related init(s) */
569 return;
570 }
571
572 int
573 uvm_physseg_get_next(uvm_physseg_t lcv)
574 {
575 /* next of invalid is invalid, not fatal */
576 if (uvm_physseg_valid_p(lcv) == false)
577 return UVM_PHYSSEG_TYPE_INVALID;
578
579 return (lcv + 1);
580 }
581
582 int
583 uvm_physseg_get_prev(uvm_physseg_t lcv)
584 {
585 /* prev of invalid is invalid, not fatal */
586 if (uvm_physseg_valid_p(lcv) == false)
587 return UVM_PHYSSEG_TYPE_INVALID;
588
589 return (lcv - 1);
590 }
591
592 int
593 uvm_physseg_get_last(void)
594 {
595 return (vm_nphysseg - 1);
596 }
597
598 int
599 uvm_physseg_get_first(void)
600 {
601 return 0;
602 }
603
604 paddr_t
605 uvm_physseg_get_highest_frame(void)
606 {
607 int lcv;
608 paddr_t last = 0;
609 struct uvm_physseg *ps;
610
611 for (lcv = 0; lcv < vm_nphysseg; lcv++) {
612 ps = VM_PHYSMEM_PTR(lcv);
613 if (last < ps->end)
614 last = ps->end;
615 }
616
617 return last;
618 }
619
620
621 static struct vm_page *
622 uvm_post_preload_check(void)
623 {
624 int preload, lcv;
625
626 /*
627 * check to see if this is a "preload" (i.e. uvm_page_init hasn't been
628 * called yet, so kmem is not available).
629 */
630
631 for (lcv = 0 ; lcv < vm_nphysmem ; lcv++) {
632 if (VM_PHYSMEM_PTR(lcv)->pgs)
633 break;
634 }
635 preload = (lcv == vm_nphysmem);
636
637 /*
638 * if VM is already running, attempt to kmem_alloc vm_page structures
639 */
640
641 if (!preload) {
642 panic("Tried to add RAM after uvm_page_init");
643 }
644
645 return NULL;
646 }
647
648 /*
649 * uvm_page_physunload: unload physical memory and return it to
650 * caller.
651 */
652 bool
653 uvm_page_physunload(uvm_physseg_t psi, int freelist, paddr_t *paddrp)
654 {
655 int x;
656 struct uvm_physseg *seg;
657
658 uvm_post_preload_check();
659
660 seg = VM_PHYSMEM_PTR(psi);
661
662 if (seg->free_list != freelist) {
663 return false;
664 }
665
666 /* try from front */
667 if (seg->avail_start == seg->start &&
668 seg->avail_start < seg->avail_end) {
669 *paddrp = ctob(seg->avail_start);
670 seg->avail_start++;
671 seg->start++;
672 /* nothing left? nuke it */
673 if (seg->avail_start == seg->end) {
674 if (vm_nphysmem == 1)
675 panic("uvm_page_physget: out of memory!");
676 vm_nphysmem--;
677 for (x = psi ; x < vm_nphysmem ; x++)
678 /* structure copy */
679 VM_PHYSMEM_PTR_SWAP(x, x + 1);
680 }
681 return (true);
682 }
683
684 /* try from rear */
685 if (seg->avail_end == seg->end &&
686 seg->avail_start < seg->avail_end) {
687 *paddrp = ctob(seg->avail_end - 1);
688 seg->avail_end--;
689 seg->end--;
690 /* nothing left? nuke it */
691 if (seg->avail_end == seg->start) {
692 if (vm_nphysmem == 1)
693 panic("uvm_page_physget: out of memory!");
694 vm_nphysmem--;
695 for (x = psi ; x < vm_nphysmem ; x++)
696 /* structure copy */
697 VM_PHYSMEM_PTR_SWAP(x, x + 1);
698 }
699 return (true);
700 }
701
702 return false;
703 }
704
705 bool
706 uvm_page_physunload_force(uvm_physseg_t psi, int freelist, paddr_t *paddrp)
707 {
708 int x;
709 struct uvm_physseg *seg;
710
711 uvm_post_preload_check();
712
713 seg = VM_PHYSMEM_PTR(psi);
714
715 /* any room in this bank? */
716 if (seg->avail_start >= seg->avail_end) {
717 return false; /* nope */
718 }
719
720 *paddrp = ctob(seg->avail_start);
721 seg->avail_start++;
722 /* truncate! */
723 seg->start = seg->avail_start;
724
725 /* nothing left? nuke it */
726 if (seg->avail_start == seg->end) {
727 if (vm_nphysmem == 1)
728 panic("uvm_page_physget: out of memory!");
729 vm_nphysmem--;
730 for (x = psi ; x < vm_nphysmem ; x++)
731 /* structure copy */
732 VM_PHYSMEM_PTR_SWAP(x, x + 1);
733 }
734 return (true);
735 }
736
737 bool
738 uvm_physseg_plug(paddr_t pfn, size_t pages, uvm_physseg_t *psp)
739 {
740 int lcv;
741 struct vm_page *pgs;
742 struct uvm_physseg *ps;
743
744 #ifdef DEBUG
745 paddr_t off;
746 uvm_physseg_t upm;
747 upm = uvm_physseg_find(pfn, &off);
748
749 if (uvm_physseg_valid_p(upm)) /* XXX; do we allow "update" plugs ? */
750 return false;
751 #endif
752
753 paddr_t start = pfn;
754 paddr_t end = pfn + pages;
755 paddr_t avail_start = start;
756 paddr_t avail_end = end;
757
758 if (uvmexp.pagesize == 0)
759 panic("uvm_page_physload: page size not set!");
760
761 /*
762 * do we have room?
763 */
764
765 if (vm_nphysmem == VM_PHYSSEG_MAX) {
766 printf("uvm_page_physload: unable to load physical memory "
767 "segment\n");
768 printf("\t%d segments allocated, ignoring 0x%llx -> 0x%llx\n",
769 VM_PHYSSEG_MAX, (long long)start, (long long)end);
770 printf("\tincrease VM_PHYSSEG_MAX\n");
771 if (psp != NULL)
772 *psp = UVM_PHYSSEG_TYPE_INVALID_OVERFLOW;
773 return false;
774 }
775
776 /*
777 * check to see if this is a "preload" (i.e. uvm_page_init hasn't been
778 * called yet, so kmem is not available).
779 */
780 pgs = uvm_post_preload_check();
781
782 /*
783 * now insert us in the proper place in vm_physmem[]
784 */
785
786 #if (VM_PHYSSEG_STRAT == VM_PSTRAT_RANDOM)
787 /* random: put it at the end (easy!) */
788 ps = VM_PHYSMEM_PTR(vm_nphysmem);
789 lcv = vm_nphysmem;
790 #elif (VM_PHYSSEG_STRAT == VM_PSTRAT_BSEARCH)
791 {
792 int x;
793 /* sort by address for binary search */
794 for (lcv = 0 ; lcv < vm_nphysmem ; lcv++)
795 if (start < VM_PHYSMEM_PTR(lcv)->start)
796 break;
797 ps = VM_PHYSMEM_PTR(lcv);
798 /* move back other entries, if necessary ... */
799 for (x = vm_nphysmem ; x > lcv ; x--)
800 /* structure copy */
801 VM_PHYSMEM_PTR_SWAP(x, x - 1);
802 }
803 #elif (VM_PHYSSEG_STRAT == VM_PSTRAT_BIGFIRST)
804 {
805 int x;
806 /* sort by largest segment first */
807 for (lcv = 0 ; lcv < vm_nphysmem ; lcv++)
808 if ((end - start) >
809 (VM_PHYSMEM_PTR(lcv)->end - VM_PHYSMEM_PTR(lcv)->start))
810 break;
811 ps = VM_PHYSMEM_PTR(lcv);
812 /* move back other entries, if necessary ... */
813 for (x = vm_nphysmem ; x > lcv ; x--)
814 /* structure copy */
815 VM_PHYSMEM_PTR_SWAP(x, x - 1);
816 }
817 #else
818 panic("uvm_page_physload: unknown physseg strategy selected!");
819 #endif
820
821 ps->start = start;
822 ps->end = end;
823 ps->avail_start = avail_start;
824 ps->avail_end = avail_end;
825
826 ps->pgs = pgs;
827
828 vm_nphysmem++;
829
830 if (psp != NULL)
831 *psp = lcv;
832
833 return true;
834 }
835
836 /*
837 * when VM_PHYSSEG_MAX is 1, we can simplify these functions
838 */
839
840 #if VM_PHYSSEG_MAX == 1
841 static inline int vm_physseg_find_contig(struct uvm_physseg *, int, paddr_t, psize_t *);
842 #elif (VM_PHYSSEG_STRAT == VM_PSTRAT_BSEARCH)
843 static inline int vm_physseg_find_bsearch(struct uvm_physseg *, int, paddr_t, psize_t *);
844 #else
845 static inline int vm_physseg_find_linear(struct uvm_physseg *, int, paddr_t, psize_t *);
846 #endif
847
848 /*
849 * vm_physseg_find: find vm_physseg structure that belongs to a PA
850 */
851 int
852 uvm_physseg_find(paddr_t pframe, psize_t *offp)
853 {
854
855 #if VM_PHYSSEG_MAX == 1
856 return vm_physseg_find_contig(vm_physmem, vm_nphysseg, pframe, offp);
857 #elif (VM_PHYSSEG_STRAT == VM_PSTRAT_BSEARCH)
858 return vm_physseg_find_bsearch(vm_physmem, vm_nphysseg, pframe, offp);
859 #else
860 return vm_physseg_find_linear(vm_physmem, vm_nphysseg, pframe, offp);
861 #endif
862 }
863
864 #if VM_PHYSSEG_MAX == 1
865 static inline int
866 vm_physseg_find_contig(struct uvm_physseg *segs, int nsegs, paddr_t pframe, psize_t *offp)
867 {
868
869 /* 'contig' case */
870 if (pframe >= segs[0].start && pframe < segs[0].end) {
871 if (offp)
872 *offp = pframe - segs[0].start;
873 return(0);
874 }
875 return(-1);
876 }
877
878 #elif (VM_PHYSSEG_STRAT == VM_PSTRAT_BSEARCH)
879
880 static inline int
881 vm_physseg_find_bsearch(struct uvm_physseg *segs, int nsegs, paddr_t pframe, psize_t *offp)
882 {
883 /* binary search for it */
884 int start, len, guess;
885
886 /*
887 * if try is too large (thus target is less than try) we reduce
888 * the length to trunc(len/2) [i.e. everything smaller than "try"]
889 *
890 * if the try is too small (thus target is greater than try) then
891 * we set the new start to be (try + 1). this means we need to
892 * reduce the length to (round(len/2) - 1).
893 *
894 * note "adjust" below which takes advantage of the fact that
895 * (round(len/2) - 1) == trunc((len - 1) / 2)
896 * for any value of len we may have
897 */
898
899 for (start = 0, len = nsegs ; len != 0 ; len = len / 2) {
900 guess = start + (len / 2); /* try in the middle */
901
902 /* start past our try? */
903 if (pframe >= segs[guess].start) {
904 /* was try correct? */
905 if (pframe < segs[guess].end) {
906 if (offp)
907 *offp = pframe - segs[guess].start;
908 return guess; /* got it */
909 }
910 start = guess + 1; /* next time, start here */
911 len--; /* "adjust" */
912 } else {
913 /*
914 * pframe before try, just reduce length of
915 * region, done in "for" loop
916 */
917 }
918 }
919 return(-1);
920 }
921
922 #else
923
924 static inline int
925 vm_physseg_find_linear(struct uvm_physseg *segs, int nsegs, paddr_t pframe, psize_t *offp)
926 {
927 /* linear search for it */
928 int lcv;
929
930 for (lcv = 0; lcv < nsegs; lcv++) {
931 if (pframe >= segs[lcv].start &&
932 pframe < segs[lcv].end) {
933 if (offp)
934 *offp = pframe - segs[lcv].start;
935 return(lcv); /* got it */
936 }
937 }
938 return(-1);
939 }
940 #endif
941 #endif /* UVM_HOTPLUG */
942
943 bool
944 uvm_physseg_valid_p(uvm_physseg_t upm)
945 {
946 struct uvm_physseg *ps;
947
948 if (upm == UVM_PHYSSEG_TYPE_INVALID ||
949 upm == UVM_PHYSSEG_TYPE_INVALID_EMPTY ||
950 upm == UVM_PHYSSEG_TYPE_INVALID_OVERFLOW)
951 return false;
952
953 /*
954 * This is the delicate init dance -
955 * needs to go with the dance.
956 */
957 if (uvm.page_init_done != true)
958 return true;
959
960 ps = HANDLE_TO_PHYSSEG_NODE(upm);
961
962 /* Extra checks needed only post uvm_page_init() */
963 if (ps->pgs == NULL)
964 return false;
965
966 /* XXX: etc. */
967
968 return true;
969
970 }
971
972 /*
973 * Boot protocol dictates that these must be able to return partially
974 * initialised segments.
975 */
976 paddr_t
977 uvm_physseg_get_start(uvm_physseg_t upm)
978 {
979 if (uvm_physseg_valid_p(upm) == false)
980 return (paddr_t) -1;
981
982 return HANDLE_TO_PHYSSEG_NODE(upm)->start;
983 }
984
985 paddr_t
986 uvm_physseg_get_end(uvm_physseg_t upm)
987 {
988 if (uvm_physseg_valid_p(upm) == false)
989 return (paddr_t) -1;
990
991 return HANDLE_TO_PHYSSEG_NODE(upm)->end;
992 }
993
994 paddr_t
995 uvm_physseg_get_avail_start(uvm_physseg_t upm)
996 {
997 if (uvm_physseg_valid_p(upm) == false)
998 return (paddr_t) -1;
999
1000 return HANDLE_TO_PHYSSEG_NODE(upm)->avail_start;
1001 }
1002
1003 #if defined(UVM_PHYSSEG_LEGACY)
1004 void
1005 uvm_physseg_set_avail_start(uvm_physseg_t upm, paddr_t avail_start)
1006 {
1007 struct uvm_physseg *ps = HANDLE_TO_PHYSSEG_NODE(upm);
1008
1009 #if defined(DIAGNOSTIC)
1010 paddr_t avail_end;
1011 avail_end = uvm_physseg_get_avail_end(upm);
1012 KASSERT(uvm_physseg_valid_p(upm));
1013 KASSERT(avail_start < avail_end && avail_start >= ps->start);
1014 #endif
1015
1016 ps->avail_start = avail_start;
1017 }
1018 void uvm_physseg_set_avail_end(uvm_physseg_t upm, paddr_t avail_end)
1019 {
1020 struct uvm_physseg *ps = HANDLE_TO_PHYSSEG_NODE(upm);
1021
1022 #if defined(DIAGNOSTIC)
1023 paddr_t avail_start;
1024 avail_start = uvm_physseg_get_avail_start(upm);
1025 KASSERT(uvm_physseg_valid_p(upm));
1026 KASSERT(avail_end > avail_start && avail_end <= ps->end);
1027 #endif
1028
1029 ps->avail_end = avail_end;
1030 }
1031
1032 #endif /* UVM_PHYSSEG_LEGACY */
1033
1034 paddr_t
1035 uvm_physseg_get_avail_end(uvm_physseg_t upm)
1036 {
1037 if (uvm_physseg_valid_p(upm) == false)
1038 return (paddr_t) -1;
1039
1040 return HANDLE_TO_PHYSSEG_NODE(upm)->avail_end;
1041 }
1042
1043 struct vm_page *
1044 uvm_physseg_get_pg(uvm_physseg_t upm, paddr_t idx)
1045 {
1046 KASSERT(uvm_physseg_valid_p(upm));
1047 return &HANDLE_TO_PHYSSEG_NODE(upm)->pgs[idx];
1048 }
1049
1050 #ifdef __HAVE_PMAP_PHYSSEG
1051 struct pmap_physseg *
1052 uvm_physseg_get_pmseg(uvm_physseg_t upm)
1053 {
1054 KASSERT(uvm_physseg_valid_p(upm));
1055 return &(HANDLE_TO_PHYSSEG_NODE(upm)->pmseg);
1056 }
1057 #endif
1058
1059 int
1060 uvm_physseg_get_free_list(uvm_physseg_t upm)
1061 {
1062 KASSERT(uvm_physseg_valid_p(upm));
1063 return HANDLE_TO_PHYSSEG_NODE(upm)->free_list;
1064 }
1065
1066 u_int
1067 uvm_physseg_get_start_hint(uvm_physseg_t upm)
1068 {
1069 KASSERT(uvm_physseg_valid_p(upm));
1070 return HANDLE_TO_PHYSSEG_NODE(upm)->start_hint;
1071 }
1072
1073 bool
1074 uvm_physseg_set_start_hint(uvm_physseg_t upm, u_int start_hint)
1075 {
1076 if (uvm_physseg_valid_p(upm) == false)
1077 return false;
1078
1079 HANDLE_TO_PHYSSEG_NODE(upm)->start_hint = start_hint;
1080 return true;
1081 }
1082
1083 void
1084 uvm_physseg_init_seg(uvm_physseg_t upm, struct vm_page *pgs)
1085 {
1086 psize_t i;
1087 psize_t n;
1088 paddr_t paddr;
1089 struct uvm_physseg *seg;
1090
1091 KASSERT(upm != UVM_PHYSSEG_TYPE_INVALID && pgs != NULL);
1092
1093 seg = HANDLE_TO_PHYSSEG_NODE(upm);
1094 KASSERT(seg != NULL);
1095 KASSERT(seg->pgs == NULL);
1096
1097 n = seg->end - seg->start;
1098 seg->pgs = pgs;
1099
1100 /* init and free vm_pages (we've already zeroed them) */
1101 paddr = ctob(seg->start);
1102 for (i = 0 ; i < n ; i++, paddr += PAGE_SIZE) {
1103 seg->pgs[i].phys_addr = paddr;
1104 #ifdef __HAVE_VM_PAGE_MD
1105 VM_MDPAGE_INIT(&seg->pgs[i]);
1106 #endif
1107 if (atop(paddr) >= seg->avail_start &&
1108 atop(paddr) < seg->avail_end) {
1109 uvmexp.npages++;
1110 mutex_enter(&uvm_pageqlock);
1111 /* add page to free pool */
1112 uvm_pagefree(&seg->pgs[i]);
1113 mutex_exit(&uvm_pageqlock);
1114 }
1115 }
1116 }
1117
1118 void
1119 uvm_physseg_seg_chomp_slab(uvm_physseg_t upm, struct vm_page *pgs, size_t n)
1120 {
1121 struct uvm_physseg *seg = HANDLE_TO_PHYSSEG_NODE(upm);
1122
1123 /* max number of pre-boot unplug()s allowed */
1124 #define UVM_PHYSSEG_BOOT_UNPLUG_MAX VM_PHYSSEG_MAX
1125
1126 static char btslab_ex_storage[EXTENT_FIXED_STORAGE_SIZE(UVM_PHYSSEG_BOOT_UNPLUG_MAX)];
1127
1128 if (__predict_false(uvm.page_init_done == false)) {
1129 seg->ext = extent_create("Boot time slab", (u_long) pgs, (u_long) (pgs + n),
1130 (void *)btslab_ex_storage, sizeof(btslab_ex_storage), 0);
1131 } else {
1132 seg->ext = extent_create("Hotplug slab", (u_long) pgs, (u_long) (pgs + n), NULL, 0, 0);
1133 }
1134
1135 KASSERT(seg->ext != NULL);
1136
1137 }
1138
1139 struct vm_page *
1140 uvm_physseg_seg_alloc_from_slab(uvm_physseg_t upm, size_t pages)
1141 {
1142 int err;
1143 struct uvm_physseg *seg;
1144 struct vm_page *pgs = NULL;
1145
1146 KASSERT(pages > 0);
1147
1148 seg = HANDLE_TO_PHYSSEG_NODE(upm);
1149
1150 if (__predict_false(seg->ext == NULL)) {
1151 /*
1152 * This is a situation unique to boot time.
1153 * It shouldn't happen at any point other than from
1154 * the first uvm_page.c:uvm_page_init() call
1155 * Since we're in a loop, we can get away with the
1156 * below.
1157 */
1158 KASSERT(uvm.page_init_done != true);
1159
1160 uvm_physseg_t upmp = uvm_physseg_get_prev(upm);
1161 KASSERT(upmp != UVM_PHYSSEG_TYPE_INVALID);
1162
1163 seg->ext = HANDLE_TO_PHYSSEG_NODE(upmp)->ext;
1164
1165 KASSERT(seg->ext != NULL);
1166 }
1167
1168 /* We allocate enough for this segment */
1169 err = extent_alloc(seg->ext, sizeof(*pgs) * pages, 1, 0, EX_BOUNDZERO, (u_long *)&pgs);
1170
1171 if (err != 0) {
1172 #ifdef DEBUG
1173 printf("%s: extent_alloc failed with error: %d \n",
1174 __func__, err);
1175 #endif
1176 }
1177
1178 return pgs;
1179 }
1180
1181 /*
1182 * uvm_page_physload: load physical memory into VM system
1183 *
1184 * => all args are PFs
1185 * => all pages in start/end get vm_page structures
1186 * => areas marked by avail_start/avail_end get added to the free page pool
1187 * => we are limited to VM_PHYSSEG_MAX physical memory segments
1188 */
1189
1190 uvm_physseg_t
1191 uvm_page_physload(paddr_t start, paddr_t end, paddr_t avail_start,
1192 paddr_t avail_end, int free_list)
1193 {
1194 struct uvm_physseg *ps;
1195 uvm_physseg_t upm;
1196
1197 if (__predict_true(uvm.page_init_done == true))
1198 panic("%s: unload attempted after uvm_page_init()\n", __func__);
1199 if (uvmexp.pagesize == 0)
1200 panic("uvm_page_physload: page size not set!");
1201 if (free_list >= VM_NFREELIST || free_list < VM_FREELIST_DEFAULT)
1202 panic("uvm_page_physload: bad free list %d", free_list);
1203 if (start >= end)
1204 panic("uvm_page_physload: start >= end");
1205
1206 if (uvm_physseg_plug(start, end - start, &upm) == false) {
1207 panic("uvm_physseg_plug() failed at boot.");
1208 /* NOTREACHED */
1209 return UVM_PHYSSEG_TYPE_INVALID; /* XXX: correct type */
1210 }
1211
1212 ps = HANDLE_TO_PHYSSEG_NODE(upm);
1213
1214 /* Legacy */
1215 ps->avail_start = avail_start;
1216 ps->avail_end = avail_end;
1217
1218 ps->free_list = free_list; /* XXX: */
1219
1220
1221 return upm;
1222 }
1223
1224 bool
1225 uvm_physseg_unplug(paddr_t pfn, size_t pages)
1226 {
1227 uvm_physseg_t upm;
1228 paddr_t off = 0, start __diagused, end;
1229 struct uvm_physseg *seg;
1230
1231 upm = uvm_physseg_find(pfn, &off);
1232
1233 if (!uvm_physseg_valid_p(upm)) {
1234 printf("%s: Tried to unplug from unknown offset\n", __func__);
1235 return false;
1236 }
1237
1238 seg = HANDLE_TO_PHYSSEG_NODE(upm);
1239
1240 start = uvm_physseg_get_start(upm);
1241 end = uvm_physseg_get_end(upm);
1242
1243 if (end < (pfn + pages)) {
1244 printf("%s: Tried to unplug oversized span \n", __func__);
1245 return false;
1246 }
1247
1248 KASSERT(pfn == start + off); /* sanity */
1249
1250 if (__predict_true(uvm.page_init_done == true)) {
1251 /* XXX: KASSERT() that seg->pgs[] are not on any uvm lists */
1252 if (extent_free(seg->ext, (u_long)(seg->pgs + off), sizeof(struct vm_page) * pages, EX_MALLOCOK | EX_NOWAIT) != 0)
1253 return false;
1254 }
1255
1256 if (off == 0 && (pfn + pages) == end) {
1257 #if defined(UVM_HOTPLUG) /* rbtree implementation */
1258 int segcount = 0;
1259 struct uvm_physseg *current_ps;
1260 /* Complete segment */
1261 if (uvm_physseg_graph.nentries == 1)
1262 panic("%s: out of memory!", __func__);
1263
1264 if (__predict_true(uvm.page_init_done == true)) {
1265 RB_TREE_FOREACH(current_ps, &(uvm_physseg_graph.rb_tree)) {
1266 if (seg->ext == current_ps->ext)
1267 segcount++;
1268 }
1269 KASSERT(segcount > 0);
1270
1271 if (segcount == 1) {
1272 extent_destroy(seg->ext);
1273 }
1274
1275 /*
1276 * We assume that the unplug will succeed from
1277 * this point onwards
1278 */
1279 uvmexp.npages -= (int) pages;
1280 }
1281
1282 rb_tree_remove_node(&(uvm_physseg_graph.rb_tree), upm);
1283 memset(seg, 0, sizeof(struct uvm_physseg));
1284 uvm_physseg_free(seg, sizeof(struct uvm_physseg));
1285 uvm_physseg_graph.nentries--;
1286 #else /* UVM_HOTPLUG */
1287 int x;
1288 if (vm_nphysmem == 1)
1289 panic("uvm_page_physget: out of memory!");
1290 vm_nphysmem--;
1291 for (x = upm ; x < vm_nphysmem ; x++)
1292 /* structure copy */
1293 VM_PHYSMEM_PTR_SWAP(x, x + 1);
1294 #endif /* UVM_HOTPLUG */
1295 /* XXX: KASSERT() that seg->pgs[] are not on any uvm lists */
1296 return true;
1297 }
1298
1299 if (off > 0 &&
1300 (pfn + pages) < end) {
1301 #if defined(UVM_HOTPLUG) /* rbtree implementation */
1302 /* middle chunk - need a new segment */
1303 struct uvm_physseg *ps, *current_ps;
1304 ps = uvm_physseg_alloc(sizeof (struct uvm_physseg));
1305 if (ps == NULL) {
1306 printf("%s: Unable to allocated new fragment vm_physseg \n",
1307 __func__);
1308 return false;
1309 }
1310
1311 /* Remove middle chunk */
1312 if (__predict_true(uvm.page_init_done == true)) {
1313 KASSERT(seg->ext != NULL);
1314 ps->ext = seg->ext;
1315
1316 /* XXX: KASSERT() that seg->pgs[] are not on any uvm lists */
1317 /*
1318 * We assume that the unplug will succeed from
1319 * this point onwards
1320 */
1321 uvmexp.npages -= (int) pages;
1322 }
1323
1324 ps->start = pfn + pages;
1325 ps->avail_start = ps->start; /* XXX: Legacy */
1326
1327 ps->end = seg->end;
1328 ps->avail_end = ps->end; /* XXX: Legacy */
1329
1330 seg->end = pfn;
1331 seg->avail_end = seg->end; /* XXX: Legacy */
1332
1333
1334 /*
1335 * The new pgs array points to the beginning of the
1336 * tail fragment.
1337 */
1338 if (__predict_true(uvm.page_init_done == true))
1339 ps->pgs = seg->pgs + off + pages;
1340
1341 current_ps = rb_tree_insert_node(&(uvm_physseg_graph.rb_tree), ps);
1342 if (current_ps != ps) {
1343 panic("uvm_page_physload: Duplicate address range detected!");
1344 }
1345 uvm_physseg_graph.nentries++;
1346 #else /* UVM_HOTPLUG */
1347 panic("%s: can't unplug() from the middle of a segment without"
1348 " UVM_HOTPLUG\n", __func__);
1349 /* NOTREACHED */
1350 #endif /* UVM_HOTPLUG */
1351 return true;
1352 }
1353
1354 if (off == 0 && (pfn + pages) < end) {
1355 /* Remove front chunk */
1356 if (__predict_true(uvm.page_init_done == true)) {
1357 /* XXX: KASSERT() that seg->pgs[] are not on any uvm lists */
1358 /*
1359 * We assume that the unplug will succeed from
1360 * this point onwards
1361 */
1362 uvmexp.npages -= (int) pages;
1363 }
1364
1365 /* Truncate */
1366 seg->start = pfn + pages;
1367 seg->avail_start = seg->start; /* XXX: Legacy */
1368
1369 /*
1370 * Move the pgs array start to the beginning of the
1371 * tail end.
1372 */
1373 if (__predict_true(uvm.page_init_done == true))
1374 seg->pgs += pages;
1375
1376 return true;
1377 }
1378
1379 if (off > 0 && (pfn + pages) == end) {
1380 /* back chunk */
1381
1382
1383 /* Truncate! */
1384 seg->end = pfn;
1385 seg->avail_end = seg->end; /* XXX: Legacy */
1386
1387 uvmexp.npages -= (int) pages;
1388
1389 return true;
1390 }
1391
1392 printf("%s: Tried to unplug unknown range \n", __func__);
1393
1394 return false;
1395 }
1396