uvm_physseg.c revision 1.4 1 /* $NetBSD: uvm_physseg.c,v 1.4 2016/12/25 03:39:26 christos Exp $ */
2
3 /*
4 * Copyright (c) 1997 Charles D. Cranor and Washington University.
5 * Copyright (c) 1991, 1993, The Regents of the University of California.
6 *
7 * All rights reserved.
8 *
9 * This code is derived from software contributed to Berkeley by
10 * The Mach Operating System project at Carnegie-Mellon University.
11 *
12 * Redistribution and use in source and binary forms, with or without
13 * modification, are permitted provided that the following conditions
14 * are met:
15 * 1. Redistributions of source code must retain the above copyright
16 * notice, this list of conditions and the following disclaimer.
17 * 2. Redistributions in binary form must reproduce the above copyright
18 * notice, this list of conditions and the following disclaimer in the
19 * documentation and/or other materials provided with the distribution.
20 * 3. Neither the name of the University nor the names of its contributors
21 * may be used to endorse or promote products derived from this software
22 * without specific prior written permission.
23 *
24 * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND
25 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
26 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
27 * ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE
28 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
29 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
30 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
31 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
32 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
33 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
34 * SUCH DAMAGE.
35 *
36 * @(#)vm_page.h 7.3 (Berkeley) 4/21/91
37 * from: Id: uvm_page.h,v 1.1.2.6 1998/02/04 02:31:42 chuck Exp
38 *
39 *
40 * Copyright (c) 1987, 1990 Carnegie-Mellon University.
41 * All rights reserved.
42 *
43 * Permission to use, copy, modify and distribute this software and
44 * its documentation is hereby granted, provided that both the copyright
45 * notice and this permission notice appear in all copies of the
46 * software, derivative works or modified versions, and any portions
47 * thereof, and that both notices appear in supporting documentation.
48 *
49 * CARNEGIE MELLON ALLOWS FREE USE OF THIS SOFTWARE IN ITS "AS IS"
50 * CONDITION. CARNEGIE MELLON DISCLAIMS ANY LIABILITY OF ANY KIND
51 * FOR ANY DAMAGES WHATSOEVER RESULTING FROM THE USE OF THIS SOFTWARE.
52 *
53 * Carnegie Mellon requests users of this software to return to
54 *
55 * Software Distribution Coordinator or Software.Distribution (at) CS.CMU.EDU
56 * School of Computer Science
57 * Carnegie Mellon University
58 * Pittsburgh PA 15213-3890
59 *
60 * any improvements or extensions that they make and grant Carnegie the
61 * rights to redistribute these changes.
62 */
63
64 /*
65 * Consolidated API from uvm_page.c and others.
66 * Consolidated and designed by Cherry G. Mathew <cherry (at) zyx.in>
67 * rbtree(3) backing implementation by:
68 * Santhosh N. Raju <santhosh.raju (at) gmail.com>
69 */
70
71 #ifdef _KERNEL_OPT
72 #include "opt_uvm.h"
73 #endif
74
75 #include <sys/param.h>
76 #include <sys/types.h>
77 #include <sys/extent.h>
78 #include <sys/kmem.h>
79
80 #include <uvm/uvm.h>
81 #include <uvm/uvm_page.h>
82 #include <uvm/uvm_param.h>
83 #include <uvm/uvm_pdpolicy.h>
84 #include <uvm/uvm_physseg.h>
85
86 /*
87 * uvm_physseg: describes one segment of physical memory
88 */
89 struct uvm_physseg {
90 struct rb_node rb_node; /* tree information */
91 paddr_t start; /* PF# of first page in segment */
92 paddr_t end; /* (PF# of last page in segment) + 1 */
93 paddr_t avail_start; /* PF# of first free page in segment */
94 paddr_t avail_end; /* (PF# of last free page in segment) +1 */
95 struct vm_page *pgs; /* vm_page structures (from start) */
96 struct extent *ext; /* extent(9) structure to manage pgs[] */
97 int free_list; /* which free list they belong on */
98 u_int start_hint; /* start looking for free pages here */
99 /* protected by uvm_fpageqlock */
100 #ifdef __HAVE_PMAP_PHYSSEG
101 struct pmap_physseg pmseg; /* pmap specific (MD) data */
102 #endif
103 };
104
105 /*
106 * These functions are reserved for uvm(9) internal use and are not
107 * exported in the header file uvm_physseg.h
108 *
109 * Thus they are redefined here.
110 */
111 void uvm_physseg_init_seg(uvm_physseg_t, struct vm_page *);
112 void uvm_physseg_seg_chomp_slab(uvm_physseg_t, struct vm_page *, size_t);
113
114 /* returns a pgs array */
115 struct vm_page *uvm_physseg_seg_alloc_from_slab(uvm_physseg_t, size_t);
116
117 #if defined(UVM_HOTPLUG) /* rbtree impementation */
118
119 #define HANDLE_TO_PHYSSEG_NODE(h) ((struct uvm_physseg *)(h))
120 #define PHYSSEG_NODE_TO_HANDLE(u) ((uvm_physseg_t)(u))
121
122 struct uvm_physseg_graph {
123 struct rb_tree rb_tree; /* Tree for entries */
124 int nentries; /* Number of entries */
125 };
126
127 static struct uvm_physseg_graph uvm_physseg_graph;
128
129 /*
130 * Note on kmem(9) allocator usage:
131 * We take the conservative approach that plug/unplug are allowed to
132 * fail in high memory stress situations.
133 *
134 * We want to avoid re-entrant situations in which one plug/unplug
135 * operation is waiting on a previous one to complete, since this
136 * makes the design more complicated than necessary.
137 *
138 * We may review this and change its behaviour, once the use cases
139 * become more obvious.
140 */
141
142 /*
143 * Special alloc()/free() functions for boot time support:
144 * We assume that alloc() at boot time is only for new 'vm_physseg's
145 * This allows us to use a static array for memory allocation at boot
146 * time. Thus we avoid using kmem(9) which is not ready at this point
147 * in boot.
148 *
149 * After kmem(9) is ready, we use it. We currently discard any free()s
150 * to this static array, since the size is small enough to be a
151 * trivial waste on all architectures we run on.
152 */
153
154 static size_t nseg = 0;
155 static struct uvm_physseg uvm_physseg[VM_PHYSSEG_MAX];
156
157 static void *
158 uvm_physseg_alloc(size_t sz)
159 {
160 /*
161 * During boot time, we only support allocating vm_physseg
162 * entries from the static array.
163 * We need to assert for this.
164 */
165
166 if (__predict_false(uvm.page_init_done == false)) {
167 if (sz % sizeof(struct uvm_physseg))
168 panic("%s: tried to alloc size other than multiple"
169 "of struct uvm_physseg at boot\n", __func__);
170
171 size_t n = sz / sizeof(struct uvm_physseg);
172 nseg += n;
173
174 KASSERT(nseg > 0 && nseg <= VM_PHYSSEG_MAX);
175
176 return &uvm_physseg[nseg - n];
177 }
178
179 return kmem_zalloc(sz, KM_NOSLEEP);
180 }
181
182 static void
183 uvm_physseg_free(void *p, size_t sz)
184 {
185 /*
186 * This is a bit tricky. We do allow simulation of free()
187 * during boot (for eg: when MD code is "steal"ing memory,
188 * and the segment has been exhausted (and thus needs to be
189 * free() - ed.
190 * free() also complicates things because we leak the
191 * free(). Therefore calling code can't assume that free()-ed
192 * memory is available for alloc() again, at boot time.
193 *
194 * Thus we can't explicitly disallow free()s during
195 * boot time. However, the same restriction for alloc()
196 * applies to free(). We only allow uvm_physseg related free()s
197 * via this function during boot time.
198 */
199
200 if (__predict_false(uvm.page_init_done == false)) {
201 if (sz % sizeof(struct uvm_physseg))
202 panic("%s: tried to free size other than struct uvm_physseg"
203 "at boot\n", __func__);
204
205 }
206
207 /*
208 * Could have been in a single if(){} block - split for
209 * clarity
210 */
211
212 if ((struct uvm_physseg *)p >= uvm_physseg &&
213 (struct uvm_physseg *)p < (uvm_physseg + VM_PHYSSEG_MAX)) {
214 if (sz % sizeof(struct uvm_physseg))
215 panic("%s: tried to free() other than struct uvm_physseg"
216 "from static array\n", __func__);
217
218 if ((sz / sizeof(struct uvm_physseg)) >= VM_PHYSSEG_MAX)
219 panic("%s: tried to free() the entire static array!", __func__);
220 return; /* Nothing to free */
221 }
222
223 kmem_free(p, sz);
224 }
225
226 /* XXX: Multi page size */
227 bool
228 uvm_physseg_plug(paddr_t pfn, size_t pages, uvm_physseg_t *psp)
229 {
230 int preload;
231 size_t slabpages;
232 struct uvm_physseg *ps, *current_ps = NULL;
233 struct vm_page *slab = NULL, *pgs = NULL;
234
235 #ifdef DEBUG
236 paddr_t off;
237 uvm_physseg_t upm;
238 upm = uvm_physseg_find(pfn, &off);
239
240 ps = HANDLE_TO_PHYSSEG_NODE(upm);
241
242 if (ps != NULL) /* XXX; do we allow "update" plugs ? */
243 return false;
244 #endif
245
246 /*
247 * do we have room?
248 */
249
250 ps = uvm_physseg_alloc(sizeof (struct uvm_physseg));
251 if (ps == NULL) {
252 printf("uvm_page_physload: unable to load physical memory "
253 "segment\n");
254 printf("\t%d segments allocated, ignoring 0x%"PRIxPADDR" -> 0x%"PRIxPADDR"\n",
255 VM_PHYSSEG_MAX, pfn, pfn + pages + 1);
256 printf("\tincrease VM_PHYSSEG_MAX\n");
257 return false;
258 }
259
260 /* span init */
261 ps->start = pfn;
262 ps->end = pfn + pages;
263
264 /*
265 * XXX: Ugly hack because uvmexp.npages accounts for only
266 * those pages in the segment included below as well - this
267 * should be legacy and removed.
268 */
269
270 ps->avail_start = ps->start;
271 ps->avail_end = ps->end;
272
273 /*
274 * check to see if this is a "preload" (i.e. uvm_page_init hasn't been
275 * called yet, so kmem is not available).
276 */
277
278 preload = 1; /* We are going to assume it is a preload */
279
280 RB_TREE_FOREACH(current_ps, &(uvm_physseg_graph.rb_tree)) {
281 /* If there are non NULL pages then we are not in a preload */
282 if (current_ps->pgs != NULL) {
283 preload = 0;
284 /* Try to scavenge from earlier unplug()s. */
285 pgs = uvm_physseg_seg_alloc_from_slab(current_ps, pages);
286
287 if (pgs != NULL) {
288 break;
289 }
290 }
291 }
292
293
294 /*
295 * if VM is already running, attempt to kmem_alloc vm_page structures
296 */
297
298 if (!preload) {
299 if (pgs == NULL) { /* Brand new */
300 /* Iteratively try alloc down from uvmexp.npages */
301 for (slabpages = (size_t) uvmexp.npages; slabpages >= pages; slabpages--) {
302 slab = kmem_zalloc(sizeof *pgs * (long unsigned int)slabpages, KM_NOSLEEP);
303 if (slab != NULL)
304 break;
305 }
306
307 if (slab == NULL) {
308 uvm_physseg_free(ps, sizeof(struct uvm_physseg));
309 return false;
310 }
311
312 uvm_physseg_seg_chomp_slab(ps, slab, (size_t) slabpages);
313 /* We allocate enough for this plug */
314 pgs = uvm_physseg_seg_alloc_from_slab(ps, pages);
315
316 if (pgs == NULL) {
317 printf("unable to uvm_physseg_seg_alloc_from_slab() from backend\n");
318 return false;
319 }
320 } else {
321 /* Reuse scavenged extent */
322 ps->ext = current_ps->ext;
323 }
324
325 physmem += pages;
326 uvmpdpol_reinit();
327 } else { /* Boot time - see uvm_page.c:uvm_page_init() */
328 pgs = NULL;
329 ps->pgs = pgs;
330 }
331
332 /*
333 * now insert us in the proper place in uvm_physseg_graph.rb_tree
334 */
335
336 current_ps = rb_tree_insert_node(&(uvm_physseg_graph.rb_tree), ps);
337 if (current_ps != ps) {
338 panic("uvm_page_physload: Duplicate address range detected!");
339 }
340 uvm_physseg_graph.nentries++;
341
342 /*
343 * uvm_pagefree() requires the PHYS_TO_VM_PAGE(pgs[i]) on the
344 * newly allocated pgs[] to return the correct value. This is
345 * a bit of a chicken and egg problem, since it needs
346 * uvm_physseg_find() to succeed. For this, the node needs to
347 * be inserted *before* uvm_physseg_init_seg() happens.
348 *
349 * During boot, this happens anyway, since
350 * uvm_physseg_init_seg() is called later on and separately
351 * from uvm_page.c:uvm_page_init().
352 * In the case of hotplug we need to ensure this.
353 */
354
355 if (__predict_true(!preload))
356 uvm_physseg_init_seg(ps, pgs);
357
358 if (psp != NULL)
359 *psp = ps;
360
361 return true;
362 }
363
364 static int
365 uvm_physseg_compare_nodes(void *ctx, const void *nnode1, const void *nnode2)
366 {
367 const struct uvm_physseg *enode1 = nnode1;
368 const struct uvm_physseg *enode2 = nnode2;
369
370 KASSERT(enode1->start < enode2->start || enode1->start >= enode2->end);
371 KASSERT(enode2->start < enode1->start || enode2->start >= enode1->end);
372
373 if (enode1->start < enode2->start)
374 return -1;
375 if (enode1->start >= enode2->end)
376 return 1;
377 return 0;
378 }
379
380 static int
381 uvm_physseg_compare_key(void *ctx, const void *nnode, const void *pkey)
382 {
383 const struct uvm_physseg *enode = nnode;
384 const paddr_t pa = *(const paddr_t *) pkey;
385
386 if(enode->start <= pa && pa < enode->end)
387 return 0;
388 if (enode->start < pa)
389 return -1;
390 if (enode->end > pa)
391 return 1;
392
393 return 0;
394 }
395
396 static const rb_tree_ops_t uvm_physseg_tree_ops = {
397 .rbto_compare_nodes = uvm_physseg_compare_nodes,
398 .rbto_compare_key = uvm_physseg_compare_key,
399 .rbto_node_offset = offsetof(struct uvm_physseg, rb_node),
400 .rbto_context = NULL
401 };
402
403 /*
404 * uvm_physseg_init: init the physmem
405 *
406 * => physmem unit should not be in use at this point
407 */
408
409 void
410 uvm_physseg_init(void)
411 {
412 rb_tree_init(&(uvm_physseg_graph.rb_tree), &uvm_physseg_tree_ops);
413 uvm_physseg_graph.nentries = 0;
414 }
415
416 uvm_physseg_t
417 uvm_physseg_get_next(uvm_physseg_t upm)
418 {
419 /* next of invalid is invalid, not fatal */
420 if (uvm_physseg_valid_p(upm) == false)
421 return UVM_PHYSSEG_TYPE_INVALID;
422
423 return (uvm_physseg_t) rb_tree_iterate(&(uvm_physseg_graph.rb_tree), upm,
424 RB_DIR_RIGHT);
425 }
426
427 uvm_physseg_t
428 uvm_physseg_get_prev(uvm_physseg_t upm)
429 {
430 /* prev of invalid is invalid, not fatal */
431 if (uvm_physseg_valid_p(upm) == false)
432 return UVM_PHYSSEG_TYPE_INVALID;
433
434 return (uvm_physseg_t) rb_tree_iterate(&(uvm_physseg_graph.rb_tree), upm,
435 RB_DIR_LEFT);
436 }
437
438 uvm_physseg_t
439 uvm_physseg_get_last(void)
440 {
441 return (uvm_physseg_t) RB_TREE_MAX(&(uvm_physseg_graph.rb_tree));
442 }
443
444 uvm_physseg_t
445 uvm_physseg_get_first(void)
446 {
447 return (uvm_physseg_t) RB_TREE_MIN(&(uvm_physseg_graph.rb_tree));
448 }
449
450 paddr_t
451 uvm_physseg_get_highest_frame(void)
452 {
453 struct uvm_physseg *ps =
454 (uvm_physseg_t) RB_TREE_MAX(&(uvm_physseg_graph.rb_tree));
455
456 return ps->end - 1;
457 }
458
459 /*
460 * uvm_page_physunload: unload physical memory and return it to
461 * caller.
462 */
463 bool
464 uvm_page_physunload(uvm_physseg_t upm, int freelist, paddr_t *paddrp)
465 {
466 struct uvm_physseg *seg;
467
468 if (__predict_true(uvm.page_init_done == true))
469 panic("%s: unload attempted after uvm_page_init()\n", __func__);
470
471 seg = HANDLE_TO_PHYSSEG_NODE(upm);
472
473 if (seg->free_list != freelist) {
474 paddrp = NULL;
475 return false;
476 }
477
478 /*
479 * During cold boot, what we're about to unplug hasn't been
480 * put on the uvm freelist, nor has uvmexp.npages been
481 * updated. (This happens in uvm_page.c:uvm_page_init())
482 *
483 * For hotplug, we assume here that the pages being unloaded
484 * here are completely out of sight of uvm (ie; not on any uvm
485 * lists), and that uvmexp.npages has been suitably
486 * decremented before we're called.
487 *
488 * XXX: will avail_end == start if avail_start < avail_end?
489 */
490
491 /* try from front */
492 if (seg->avail_start == seg->start &&
493 seg->avail_start < seg->avail_end) {
494 *paddrp = ctob(seg->avail_start);
495 return uvm_physseg_unplug(seg->avail_start, 1);
496 }
497
498 /* try from rear */
499 if (seg->avail_end == seg->end &&
500 seg->avail_start < seg->avail_end) {
501 *paddrp = ctob(seg->avail_end - 1);
502 return uvm_physseg_unplug(seg->avail_end - 1, 1);
503 }
504
505 return false;
506 }
507
508 bool
509 uvm_page_physunload_force(uvm_physseg_t upm, int freelist, paddr_t *paddrp)
510 {
511 struct uvm_physseg *seg;
512
513 seg = HANDLE_TO_PHYSSEG_NODE(upm);
514
515 if (__predict_true(uvm.page_init_done == true))
516 panic("%s: unload attempted after uvm_page_init()\n", __func__);
517 /* any room in this bank? */
518 if (seg->avail_start >= seg->avail_end) {
519 paddrp = NULL;
520 return false; /* nope */
521 }
522
523 *paddrp = ctob(seg->avail_start);
524
525 /* Always unplug from front */
526 return uvm_physseg_unplug(seg->avail_start, 1);
527 }
528
529
530 /*
531 * vm_physseg_find: find vm_physseg structure that belongs to a PA
532 */
533 uvm_physseg_t
534 uvm_physseg_find(paddr_t pframe, psize_t *offp)
535 {
536 struct uvm_physseg * ps = NULL;
537
538 ps = rb_tree_find_node(&(uvm_physseg_graph.rb_tree), &pframe);
539
540 if(ps != NULL && offp != NULL)
541 *offp = pframe - ps->start;
542
543 return ps;
544 }
545
546 #if defined(PMAP_STEAL_MEMORY)
547 void
548 uvm_physseg_set_avail_start(uvm_physseg_t upm, paddr_t avail_start)
549 {
550 struct uvm_physseg *ps = HANDLE_TO_PHYSSEG_NODE(upm);
551
552 #if defined(DIAGNOSTIC)
553 paddr_t avail_end;
554 avail_end = uvm_physseg_get_avail_end(upm);
555 #endif
556 KASSERT(avail_start < avail_end && avail_start >= ps->start);
557 ps->avail_start = avail_start;
558 }
559 void uvm_physseg_set_avail_end(uvm_physseg_t upm, paddr_t avail_end)
560 {
561 struct uvm_physseg *ps = HANDLE_TO_PHYSSEG_NODE(upm);
562
563 #if defined(DIAGNOSTIC)
564 paddr_t avail_start;
565 avail_start = uvm_physseg_get_avail_start(upm);
566 #endif
567
568 KASSERT(avail_end > avail_start && avail_end <= ps->end);
569
570 ps->avail_end = avail_end;
571 }
572
573 #endif /* PMAP_STEAL_MEMORY */
574 #else /* UVM_HOTPLUG */
575
576 /*
577 * physical memory config is stored in vm_physmem.
578 */
579
580 #define VM_PHYSMEM_PTR(i) (&vm_physmem[i])
581 #if VM_PHYSSEG_MAX == 1
582 #define VM_PHYSMEM_PTR_SWAP(i, j) /* impossible */
583 #else
584 #define VM_PHYSMEM_PTR_SWAP(i, j) \
585 do { vm_physmem[(i)] = vm_physmem[(j)]; } while (0)
586 #endif
587
588 #define HANDLE_TO_PHYSSEG_NODE(h) (VM_PHYSMEM_PTR((int)h))
589 #define PHYSSEG_NODE_TO_HANDLE(u) ((int)((vsize_t) (u - vm_physmem) / sizeof(struct uvm_physseg)))
590
591 static struct uvm_physseg vm_physmem[VM_PHYSSEG_MAX]; /* XXXCDC: uvm.physmem */
592 static int vm_nphysseg = 0; /* XXXCDC: uvm.nphysseg */
593 #define vm_nphysmem vm_nphysseg
594
595 void
596 uvm_physseg_init(void)
597 {
598 /* XXX: Provisioning for rb_tree related init(s) */
599 return;
600 }
601
602 int
603 uvm_physseg_get_next(uvm_physseg_t lcv)
604 {
605 /* next of invalid is invalid, not fatal */
606 if (uvm_physseg_valid_p(lcv) == false)
607 return UVM_PHYSSEG_TYPE_INVALID;
608
609 return (lcv + 1);
610 }
611
612 int
613 uvm_physseg_get_prev(uvm_physseg_t lcv)
614 {
615 /* prev of invalid is invalid, not fatal */
616 if (uvm_physseg_valid_p(lcv) == false)
617 return UVM_PHYSSEG_TYPE_INVALID;
618
619 return (lcv - 1);
620 }
621
622 int
623 uvm_physseg_get_last(void)
624 {
625 return (vm_nphysseg - 1);
626 }
627
628 int
629 uvm_physseg_get_first(void)
630 {
631 return 0;
632 }
633
634 paddr_t
635 uvm_physseg_get_highest_frame(void)
636 {
637 int lcv;
638 paddr_t last = 0;
639 struct uvm_physseg *ps;
640
641 for (lcv = 0; lcv < vm_nphysseg; lcv++) {
642 ps = VM_PHYSMEM_PTR(lcv);
643 if (last < ps->end)
644 last = ps->end;
645 }
646
647 return last;
648 }
649
650
651 static struct vm_page *
652 uvm_post_preload_check(void)
653 {
654 int preload, lcv;
655
656 /*
657 * check to see if this is a "preload" (i.e. uvm_page_init hasn't been
658 * called yet, so kmem is not available).
659 */
660
661 for (lcv = 0 ; lcv < vm_nphysmem ; lcv++) {
662 if (VM_PHYSMEM_PTR(lcv)->pgs)
663 break;
664 }
665 preload = (lcv == vm_nphysmem);
666
667 /*
668 * if VM is already running, attempt to kmem_alloc vm_page structures
669 */
670
671 if (!preload) {
672 panic("Tried to add RAM after uvm_page_init");
673 }
674
675 return NULL;
676 }
677
678 /*
679 * uvm_page_physunload: unload physical memory and return it to
680 * caller.
681 */
682 bool
683 uvm_page_physunload(uvm_physseg_t psi, int freelist, paddr_t *paddrp)
684 {
685 int x;
686 struct uvm_physseg *seg;
687
688 uvm_post_preload_check();
689
690 seg = VM_PHYSMEM_PTR(psi);
691
692 if (seg->free_list != freelist) {
693 paddrp = NULL;
694 return false;
695 }
696
697 /* try from front */
698 if (seg->avail_start == seg->start &&
699 seg->avail_start < seg->avail_end) {
700 *paddrp = ctob(seg->avail_start);
701 seg->avail_start++;
702 seg->start++;
703 /* nothing left? nuke it */
704 if (seg->avail_start == seg->end) {
705 if (vm_nphysmem == 1)
706 panic("uvm_page_physget: out of memory!");
707 vm_nphysmem--;
708 for (x = psi ; x < vm_nphysmem ; x++)
709 /* structure copy */
710 VM_PHYSMEM_PTR_SWAP(x, x + 1);
711 }
712 return (true);
713 }
714
715 /* try from rear */
716 if (seg->avail_end == seg->end &&
717 seg->avail_start < seg->avail_end) {
718 *paddrp = ctob(seg->avail_end - 1);
719 seg->avail_end--;
720 seg->end--;
721 /* nothing left? nuke it */
722 if (seg->avail_end == seg->start) {
723 if (vm_nphysmem == 1)
724 panic("uvm_page_physget: out of memory!");
725 vm_nphysmem--;
726 for (x = psi ; x < vm_nphysmem ; x++)
727 /* structure copy */
728 VM_PHYSMEM_PTR_SWAP(x, x + 1);
729 }
730 return (true);
731 }
732
733 return false;
734 }
735
736 bool
737 uvm_page_physunload_force(uvm_physseg_t psi, int freelist, paddr_t *paddrp)
738 {
739 int x;
740 struct uvm_physseg *seg;
741
742 uvm_post_preload_check();
743
744 seg = VM_PHYSMEM_PTR(psi);
745
746 /* any room in this bank? */
747 if (seg->avail_start >= seg->avail_end) {
748 paddrp = NULL;
749 return false; /* nope */
750 }
751
752 *paddrp = ctob(seg->avail_start);
753 seg->avail_start++;
754 /* truncate! */
755 seg->start = seg->avail_start;
756
757 /* nothing left? nuke it */
758 if (seg->avail_start == seg->end) {
759 if (vm_nphysmem == 1)
760 panic("uvm_page_physget: out of memory!");
761 vm_nphysmem--;
762 for (x = psi ; x < vm_nphysmem ; x++)
763 /* structure copy */
764 VM_PHYSMEM_PTR_SWAP(x, x + 1);
765 }
766 return (true);
767 }
768
769 bool
770 uvm_physseg_plug(paddr_t pfn, size_t pages, uvm_physseg_t *psp)
771 {
772 int lcv;
773 struct vm_page *pgs;
774 struct uvm_physseg *ps;
775
776 #ifdef DEBUG
777 paddr_t off;
778 uvm_physseg_t upm;
779 upm = uvm_physseg_find(pfn, &off);
780
781 if (uvm_physseg_valid_p(upm)) /* XXX; do we allow "update" plugs ? */
782 return false;
783 #endif
784
785 paddr_t start = pfn;
786 paddr_t end = pfn + pages;
787 paddr_t avail_start = start;
788 paddr_t avail_end = end;
789
790 if (uvmexp.pagesize == 0)
791 panic("uvm_page_physload: page size not set!");
792
793 /*
794 * do we have room?
795 */
796
797 if (vm_nphysmem == VM_PHYSSEG_MAX) {
798 printf("uvm_page_physload: unable to load physical memory "
799 "segment\n");
800 printf("\t%d segments allocated, ignoring 0x%llx -> 0x%llx\n",
801 VM_PHYSSEG_MAX, (long long)start, (long long)end);
802 printf("\tincrease VM_PHYSSEG_MAX\n");
803 if (psp != NULL)
804 *psp = UVM_PHYSSEG_TYPE_INVALID_OVERFLOW;
805 return false;
806 }
807
808 /*
809 * check to see if this is a "preload" (i.e. uvm_page_init hasn't been
810 * called yet, so kmem is not available).
811 */
812 pgs = uvm_post_preload_check();
813
814 /*
815 * now insert us in the proper place in vm_physmem[]
816 */
817
818 #if (VM_PHYSSEG_STRAT == VM_PSTRAT_RANDOM)
819 /* random: put it at the end (easy!) */
820 ps = VM_PHYSMEM_PTR(vm_nphysmem);
821 lcv = vm_nphysmem;
822 #elif (VM_PHYSSEG_STRAT == VM_PSTRAT_BSEARCH)
823 {
824 int x;
825 /* sort by address for binary search */
826 for (lcv = 0 ; lcv < vm_nphysmem ; lcv++)
827 if (start < VM_PHYSMEM_PTR(lcv)->start)
828 break;
829 ps = VM_PHYSMEM_PTR(lcv);
830 /* move back other entries, if necessary ... */
831 for (x = vm_nphysmem ; x > lcv ; x--)
832 /* structure copy */
833 VM_PHYSMEM_PTR_SWAP(x, x - 1);
834 }
835 #elif (VM_PHYSSEG_STRAT == VM_PSTRAT_BIGFIRST)
836 {
837 int x;
838 /* sort by largest segment first */
839 for (lcv = 0 ; lcv < vm_nphysmem ; lcv++)
840 if ((end - start) >
841 (VM_PHYSMEM_PTR(lcv)->end - VM_PHYSMEM_PTR(lcv)->start))
842 break;
843 ps = VM_PHYSMEM_PTR(lcv);
844 /* move back other entries, if necessary ... */
845 for (x = vm_nphysmem ; x > lcv ; x--)
846 /* structure copy */
847 VM_PHYSMEM_PTR_SWAP(x, x - 1);
848 }
849 #else
850 panic("uvm_page_physload: unknown physseg strategy selected!");
851 #endif
852
853 ps->start = start;
854 ps->end = end;
855 ps->avail_start = avail_start;
856 ps->avail_end = avail_end;
857
858 ps->pgs = pgs;
859
860 vm_nphysmem++;
861
862 if (psp != NULL)
863 *psp = lcv;
864
865 return true;
866 }
867
868 /*
869 * when VM_PHYSSEG_MAX is 1, we can simplify these functions
870 */
871
872 #if VM_PHYSSEG_MAX == 1
873 static inline int vm_physseg_find_contig(struct uvm_physseg *, int, paddr_t, psize_t *);
874 #elif (VM_PHYSSEG_STRAT == VM_PSTRAT_BSEARCH)
875 static inline int vm_physseg_find_bsearch(struct uvm_physseg *, int, paddr_t, psize_t *);
876 #else
877 static inline int vm_physseg_find_linear(struct uvm_physseg *, int, paddr_t, psize_t *);
878 #endif
879
880 /*
881 * vm_physseg_find: find vm_physseg structure that belongs to a PA
882 */
883 int
884 uvm_physseg_find(paddr_t pframe, psize_t *offp)
885 {
886
887 #if VM_PHYSSEG_MAX == 1
888 return vm_physseg_find_contig(vm_physmem, vm_nphysseg, pframe, offp);
889 #elif (VM_PHYSSEG_STRAT == VM_PSTRAT_BSEARCH)
890 return vm_physseg_find_bsearch(vm_physmem, vm_nphysseg, pframe, offp);
891 #else
892 return vm_physseg_find_linear(vm_physmem, vm_nphysseg, pframe, offp);
893 #endif
894 }
895
896 #if VM_PHYSSEG_MAX == 1
897 static inline int
898 vm_physseg_find_contig(struct uvm_physseg *segs, int nsegs, paddr_t pframe, psize_t *offp)
899 {
900
901 /* 'contig' case */
902 if (pframe >= segs[0].start && pframe < segs[0].end) {
903 if (offp)
904 *offp = pframe - segs[0].start;
905 return(0);
906 }
907 return(-1);
908 }
909
910 #elif (VM_PHYSSEG_STRAT == VM_PSTRAT_BSEARCH)
911
912 static inline int
913 vm_physseg_find_bsearch(struct uvm_physseg *segs, int nsegs, paddr_t pframe, psize_t *offp)
914 {
915 /* binary search for it */
916 int start, len, guess;
917
918 /*
919 * if try is too large (thus target is less than try) we reduce
920 * the length to trunc(len/2) [i.e. everything smaller than "try"]
921 *
922 * if the try is too small (thus target is greater than try) then
923 * we set the new start to be (try + 1). this means we need to
924 * reduce the length to (round(len/2) - 1).
925 *
926 * note "adjust" below which takes advantage of the fact that
927 * (round(len/2) - 1) == trunc((len - 1) / 2)
928 * for any value of len we may have
929 */
930
931 for (start = 0, len = nsegs ; len != 0 ; len = len / 2) {
932 guess = start + (len / 2); /* try in the middle */
933
934 /* start past our try? */
935 if (pframe >= segs[guess].start) {
936 /* was try correct? */
937 if (pframe < segs[guess].end) {
938 if (offp)
939 *offp = pframe - segs[guess].start;
940 return guess; /* got it */
941 }
942 start = guess + 1; /* next time, start here */
943 len--; /* "adjust" */
944 } else {
945 /*
946 * pframe before try, just reduce length of
947 * region, done in "for" loop
948 */
949 }
950 }
951 return(-1);
952 }
953
954 #else
955
956 static inline int
957 vm_physseg_find_linear(struct uvm_physseg *segs, int nsegs, paddr_t pframe, psize_t *offp)
958 {
959 /* linear search for it */
960 int lcv;
961
962 for (lcv = 0; lcv < nsegs; lcv++) {
963 if (pframe >= segs[lcv].start &&
964 pframe < segs[lcv].end) {
965 if (offp)
966 *offp = pframe - segs[lcv].start;
967 return(lcv); /* got it */
968 }
969 }
970 return(-1);
971 }
972 #endif
973 #endif /* UVM_HOTPLUG */
974
975 bool
976 uvm_physseg_valid_p(uvm_physseg_t upm)
977 {
978 struct uvm_physseg *ps;
979
980 if (upm == UVM_PHYSSEG_TYPE_INVALID ||
981 upm == UVM_PHYSSEG_TYPE_INVALID_EMPTY ||
982 upm == UVM_PHYSSEG_TYPE_INVALID_OVERFLOW)
983 return false;
984
985 /*
986 * This is the delicate init dance -
987 * needs to go with the dance.
988 */
989 if (uvm.page_init_done != true)
990 return true;
991
992 ps = HANDLE_TO_PHYSSEG_NODE(upm);
993
994 /* Extra checks needed only post uvm_page_init() */
995 if (ps->pgs == NULL)
996 return false;
997
998 /* XXX: etc. */
999
1000 return true;
1001
1002 }
1003
1004 /*
1005 * Boot protocol dictates that these must be able to return partially
1006 * initialised segments.
1007 */
1008 paddr_t
1009 uvm_physseg_get_start(uvm_physseg_t upm)
1010 {
1011 if (uvm_physseg_valid_p(upm) == false)
1012 return (paddr_t) -1;
1013
1014 return HANDLE_TO_PHYSSEG_NODE(upm)->start;
1015 }
1016
1017 paddr_t
1018 uvm_physseg_get_end(uvm_physseg_t upm)
1019 {
1020 if (uvm_physseg_valid_p(upm) == false)
1021 return (paddr_t) -1;
1022
1023 return HANDLE_TO_PHYSSEG_NODE(upm)->end;
1024 }
1025
1026 paddr_t
1027 uvm_physseg_get_avail_start(uvm_physseg_t upm)
1028 {
1029 if (uvm_physseg_valid_p(upm) == false)
1030 return (paddr_t) -1;
1031
1032 return HANDLE_TO_PHYSSEG_NODE(upm)->avail_start;
1033 }
1034
1035 #if defined(PMAP_STEAL_MEMORY)
1036 void
1037 uvm_physseg_set_avail_start(uvm_physseg_t upm, paddr_t avail_start)
1038 {
1039 KASSERT(uvm_physseg_valid_p(upm));
1040 HANDLE_TO_PHYSSEG_NODE(upm)->avail_start = avail_start;
1041 }
1042 #endif
1043
1044 paddr_t
1045 uvm_physseg_get_avail_end(uvm_physseg_t upm)
1046 {
1047 if (uvm_physseg_valid_p(upm) == false)
1048 return (paddr_t) -1;
1049
1050 return HANDLE_TO_PHYSSEG_NODE(upm)->avail_end;
1051 }
1052
1053 struct vm_page *
1054 uvm_physseg_get_pg(uvm_physseg_t upm, paddr_t idx)
1055 {
1056 KASSERT(uvm_physseg_valid_p(upm));
1057 return &HANDLE_TO_PHYSSEG_NODE(upm)->pgs[idx];
1058 }
1059
1060 #ifdef __HAVE_PMAP_PHYSSEG
1061 struct pmap_physseg *
1062 uvm_physseg_get_pmseg(uvm_physseg_t upm)
1063 {
1064 KASSERT(uvm_physseg_valid_p(upm));
1065 return &(HANDLE_TO_PHYSSEG_NODE(upm)->pmseg);
1066 }
1067 #endif
1068
1069 int
1070 uvm_physseg_get_free_list(uvm_physseg_t upm)
1071 {
1072 KASSERT(uvm_physseg_valid_p(upm));
1073 return HANDLE_TO_PHYSSEG_NODE(upm)->free_list;
1074 }
1075
1076 u_int
1077 uvm_physseg_get_start_hint(uvm_physseg_t upm)
1078 {
1079 KASSERT(uvm_physseg_valid_p(upm));
1080 return HANDLE_TO_PHYSSEG_NODE(upm)->start_hint;
1081 }
1082
1083 bool
1084 uvm_physseg_set_start_hint(uvm_physseg_t upm, u_int start_hint)
1085 {
1086 if (uvm_physseg_valid_p(upm) == false)
1087 return false;
1088
1089 HANDLE_TO_PHYSSEG_NODE(upm)->start_hint = start_hint;
1090 return true;
1091 }
1092
1093 void
1094 uvm_physseg_init_seg(uvm_physseg_t upm, struct vm_page *pgs)
1095 {
1096 psize_t i;
1097 psize_t n;
1098 paddr_t paddr;
1099 struct uvm_physseg *seg;
1100
1101 KASSERT(upm != UVM_PHYSSEG_TYPE_INVALID && pgs != NULL);
1102
1103 seg = HANDLE_TO_PHYSSEG_NODE(upm);
1104 KASSERT(seg != NULL);
1105 KASSERT(seg->pgs == NULL);
1106
1107 n = seg->end - seg->start;
1108 seg->pgs = pgs;
1109
1110 /* init and free vm_pages (we've already zeroed them) */
1111 paddr = ctob(seg->start);
1112 for (i = 0 ; i < n ; i++, paddr += PAGE_SIZE) {
1113 seg->pgs[i].phys_addr = paddr;
1114 #ifdef __HAVE_VM_PAGE_MD
1115 VM_MDPAGE_INIT(&seg->pgs[i]);
1116 #endif
1117 if (atop(paddr) >= seg->avail_start &&
1118 atop(paddr) < seg->avail_end) {
1119 uvmexp.npages++;
1120 mutex_enter(&uvm_pageqlock);
1121 /* add page to free pool */
1122 uvm_pagefree(&seg->pgs[i]);
1123 mutex_exit(&uvm_pageqlock);
1124 }
1125 }
1126 }
1127
1128 void
1129 uvm_physseg_seg_chomp_slab(uvm_physseg_t upm, struct vm_page *pgs, size_t n)
1130 {
1131 struct uvm_physseg *seg = HANDLE_TO_PHYSSEG_NODE(upm);
1132
1133 /* max number of pre-boot unplug()s allowed */
1134 #define UVM_PHYSSEG_BOOT_UNPLUG_MAX VM_PHYSSEG_MAX
1135
1136 static char btslab_ex_storage[EXTENT_FIXED_STORAGE_SIZE(UVM_PHYSSEG_BOOT_UNPLUG_MAX)];
1137
1138 if (__predict_false(uvm.page_init_done == false)) {
1139 seg->ext = extent_create("Boot time slab", (u_long) pgs, (u_long) (pgs + n),
1140 (void *)btslab_ex_storage, sizeof(btslab_ex_storage), 0);
1141 } else {
1142 seg->ext = extent_create("Hotplug slab", (u_long) pgs, (u_long) (pgs + n), NULL, 0, 0);
1143 }
1144
1145 KASSERT(seg->ext != NULL);
1146
1147 }
1148
1149 struct vm_page *
1150 uvm_physseg_seg_alloc_from_slab(uvm_physseg_t upm, size_t pages)
1151 {
1152 int err;
1153 struct uvm_physseg *seg;
1154 struct vm_page *pgs = NULL;
1155
1156 seg = HANDLE_TO_PHYSSEG_NODE(upm);
1157
1158 KASSERT(pages > 0);
1159
1160 if (__predict_false(seg->ext == NULL)) {
1161 /*
1162 * This is a situation unique to boot time.
1163 * It shouldn't happen at any point other than from
1164 * the first uvm_page.c:uvm_page_init() call
1165 * Since we're in a loop, we can get away with the
1166 * below.
1167 */
1168 KASSERT(uvm.page_init_done != true);
1169
1170 seg->ext = HANDLE_TO_PHYSSEG_NODE(uvm_physseg_get_prev(upm))->ext;
1171
1172 KASSERT(seg->ext != NULL);
1173 }
1174
1175 /* We allocate enough for this segment */
1176 err = extent_alloc(seg->ext, sizeof(*pgs) * pages, 1, 0, EX_BOUNDZERO, (u_long *)&pgs);
1177
1178 if (err != 0) {
1179 #ifdef DEBUG
1180 printf("%s: extent_alloc failed with error: %d \n",
1181 __func__, err);
1182 #endif
1183 }
1184
1185 return pgs;
1186 }
1187
1188 /*
1189 * uvm_page_physload: load physical memory into VM system
1190 *
1191 * => all args are PFs
1192 * => all pages in start/end get vm_page structures
1193 * => areas marked by avail_start/avail_end get added to the free page pool
1194 * => we are limited to VM_PHYSSEG_MAX physical memory segments
1195 */
1196
1197 uvm_physseg_t
1198 uvm_page_physload(paddr_t start, paddr_t end, paddr_t avail_start,
1199 paddr_t avail_end, int free_list)
1200 {
1201 struct uvm_physseg *ps;
1202 uvm_physseg_t upm;
1203
1204 if (__predict_true(uvm.page_init_done == true))
1205 panic("%s: unload attempted after uvm_page_init()\n", __func__);
1206 if (uvmexp.pagesize == 0)
1207 panic("uvm_page_physload: page size not set!");
1208 if (free_list >= VM_NFREELIST || free_list < VM_FREELIST_DEFAULT)
1209 panic("uvm_page_physload: bad free list %d", free_list);
1210 if (start >= end)
1211 panic("uvm_page_physload: start >= end");
1212
1213 if (uvm_physseg_plug(start, end - start, &upm) == false) {
1214 panic("uvm_physseg_plug() failed at boot.");
1215 /* NOTREACHED */
1216 return UVM_PHYSSEG_TYPE_INVALID; /* XXX: correct type */
1217 }
1218
1219 ps = HANDLE_TO_PHYSSEG_NODE(upm);
1220
1221 /* Legacy */
1222 ps->avail_start = avail_start;
1223 ps->avail_end = avail_end;
1224
1225 ps->free_list = free_list; /* XXX: */
1226
1227
1228 return upm;
1229 }
1230
1231 bool
1232 uvm_physseg_unplug(paddr_t pfn, size_t pages)
1233 {
1234 uvm_physseg_t upm;
1235 paddr_t off = 0, start, end;
1236 struct uvm_physseg *seg;
1237
1238 upm = uvm_physseg_find(pfn, &off);
1239
1240 if (!uvm_physseg_valid_p(upm)) {
1241 printf("%s: Tried to unplug from unknown offset\n", __func__);
1242 return false;
1243 }
1244
1245 seg = HANDLE_TO_PHYSSEG_NODE(upm);
1246
1247 start = uvm_physseg_get_start(upm);
1248 end = uvm_physseg_get_end(upm);
1249
1250 if (end < (pfn + pages)) {
1251 printf("%s: Tried to unplug oversized span \n", __func__);
1252 return false;
1253 }
1254
1255 #ifndef DIAGNOSTIC
1256 (void) start;
1257 #endif
1258 KASSERT(pfn == start + off); /* sanity */
1259
1260 if (__predict_true(uvm.page_init_done == true)) {
1261 /* XXX: KASSERT() that seg->pgs[] are not on any uvm lists */
1262 if (extent_free(seg->ext, (u_long)(seg->pgs + off), sizeof(struct vm_page) * pages, EX_MALLOCOK | EX_NOWAIT) != 0)
1263 return false;
1264 }
1265
1266 if (off == 0 && (pfn + pages) == end) {
1267 #if defined(UVM_HOTPLUG) /* rbtree implementation */
1268 int segcount = 0;
1269 struct uvm_physseg *current_ps;
1270 /* Complete segment */
1271 if (uvm_physseg_graph.nentries == 1)
1272 panic("%s: out of memory!", __func__);
1273
1274 if (__predict_true(uvm.page_init_done == true)) {
1275 RB_TREE_FOREACH(current_ps, &(uvm_physseg_graph.rb_tree)) {
1276 if (seg->ext == current_ps->ext)
1277 segcount++;
1278 }
1279 KASSERT(segcount > 0);
1280
1281 if (segcount == 1) {
1282 extent_destroy(seg->ext);
1283 }
1284
1285 /*
1286 * We assume that the unplug will succeed from
1287 * this point onwards
1288 */
1289 uvmexp.npages -= (int) pages;
1290 }
1291
1292 rb_tree_remove_node(&(uvm_physseg_graph.rb_tree), upm);
1293 memset(seg, 0, sizeof(struct uvm_physseg));
1294 uvm_physseg_free(seg, sizeof(struct uvm_physseg));
1295 uvm_physseg_graph.nentries--;
1296 #else /* UVM_HOTPLUG */
1297 int x;
1298 if (vm_nphysmem == 1)
1299 panic("uvm_page_physget: out of memory!");
1300 vm_nphysmem--;
1301 for (x = upm ; x < vm_nphysmem ; x++)
1302 /* structure copy */
1303 VM_PHYSMEM_PTR_SWAP(x, x + 1);
1304 #endif /* UVM_HOTPLUG */
1305 /* XXX: KASSERT() that seg->pgs[] are not on any uvm lists */
1306 return true;
1307 }
1308
1309 if (off > 0 &&
1310 (pfn + pages) < end) {
1311 #if defined(UVM_HOTPLUG) /* rbtree implementation */
1312 /* middle chunk - need a new segment */
1313 struct uvm_physseg *ps, *current_ps;
1314 ps = uvm_physseg_alloc(sizeof (struct uvm_physseg));
1315 if (ps == NULL) {
1316 printf("%s: Unable to allocated new fragment vm_physseg \n",
1317 __func__);
1318 return false;
1319 }
1320
1321 /* Remove middle chunk */
1322 if (__predict_true(uvm.page_init_done == true)) {
1323 KASSERT(seg->ext != NULL);
1324 ps->ext = seg->ext;
1325
1326 /* XXX: KASSERT() that seg->pgs[] are not on any uvm lists */
1327 /*
1328 * We assume that the unplug will succeed from
1329 * this point onwards
1330 */
1331 uvmexp.npages -= (int) pages;
1332 }
1333
1334 ps->start = pfn + pages;
1335 ps->avail_start = ps->start; /* XXX: Legacy */
1336
1337 ps->end = seg->end;
1338 ps->avail_end = ps->end; /* XXX: Legacy */
1339
1340 seg->end = pfn;
1341 seg->avail_end = seg->end; /* XXX: Legacy */
1342
1343
1344 /*
1345 * The new pgs array points to the beginning of the
1346 * tail fragment.
1347 */
1348 if (__predict_true(uvm.page_init_done == true))
1349 ps->pgs = seg->pgs + off + pages;
1350
1351 current_ps = rb_tree_insert_node(&(uvm_physseg_graph.rb_tree), ps);
1352 if (current_ps != ps) {
1353 panic("uvm_page_physload: Duplicate address range detected!");
1354 }
1355 uvm_physseg_graph.nentries++;
1356 #else /* UVM_HOTPLUG */
1357 panic("%s: can't unplug() from the middle of a segment without"
1358 "UVM_HOTPLUG\n", __func__);
1359 /* NOTREACHED */
1360 #endif /* UVM_HOTPLUG */
1361 return true;
1362 }
1363
1364 if (off == 0 && (pfn + pages) < end) {
1365 /* Remove front chunk */
1366 if (__predict_true(uvm.page_init_done == true)) {
1367 /* XXX: KASSERT() that seg->pgs[] are not on any uvm lists */
1368 /*
1369 * We assume that the unplug will succeed from
1370 * this point onwards
1371 */
1372 uvmexp.npages -= (int) pages;
1373 }
1374
1375 /* Truncate */
1376 seg->start = pfn + pages;
1377 seg->avail_start = seg->start; /* XXX: Legacy */
1378
1379 /*
1380 * Move the pgs array start to the beginning of the
1381 * tail end.
1382 */
1383 if (__predict_true(uvm.page_init_done == true))
1384 seg->pgs += pages;
1385
1386 return true;
1387 }
1388
1389 if (off > 0 && (pfn + pages) == end) {
1390 /* back chunk */
1391
1392
1393 /* Truncate! */
1394 seg->end = pfn;
1395 seg->avail_end = seg->end; /* XXX: Legacy */
1396
1397 uvmexp.npages -= (int) pages;
1398
1399 return true;
1400 }
1401
1402 printf("%s: Tried to unplug unknown range \n", __func__);
1403
1404 return false;
1405 }
1406