uvm_physseg.c revision 1.9 1 /* $NetBSD: uvm_physseg.c,v 1.9 2018/01/21 17:58:43 christos Exp $ */
2
3 /*
4 * Copyright (c) 1997 Charles D. Cranor and Washington University.
5 * Copyright (c) 1991, 1993, The Regents of the University of California.
6 *
7 * All rights reserved.
8 *
9 * This code is derived from software contributed to Berkeley by
10 * The Mach Operating System project at Carnegie-Mellon University.
11 *
12 * Redistribution and use in source and binary forms, with or without
13 * modification, are permitted provided that the following conditions
14 * are met:
15 * 1. Redistributions of source code must retain the above copyright
16 * notice, this list of conditions and the following disclaimer.
17 * 2. Redistributions in binary form must reproduce the above copyright
18 * notice, this list of conditions and the following disclaimer in the
19 * documentation and/or other materials provided with the distribution.
20 * 3. Neither the name of the University nor the names of its contributors
21 * may be used to endorse or promote products derived from this software
22 * without specific prior written permission.
23 *
24 * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND
25 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
26 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
27 * ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE
28 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
29 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
30 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
31 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
32 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
33 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
34 * SUCH DAMAGE.
35 *
36 * @(#)vm_page.h 7.3 (Berkeley) 4/21/91
37 * from: Id: uvm_page.h,v 1.1.2.6 1998/02/04 02:31:42 chuck Exp
38 *
39 *
40 * Copyright (c) 1987, 1990 Carnegie-Mellon University.
41 * All rights reserved.
42 *
43 * Permission to use, copy, modify and distribute this software and
44 * its documentation is hereby granted, provided that both the copyright
45 * notice and this permission notice appear in all copies of the
46 * software, derivative works or modified versions, and any portions
47 * thereof, and that both notices appear in supporting documentation.
48 *
49 * CARNEGIE MELLON ALLOWS FREE USE OF THIS SOFTWARE IN ITS "AS IS"
50 * CONDITION. CARNEGIE MELLON DISCLAIMS ANY LIABILITY OF ANY KIND
51 * FOR ANY DAMAGES WHATSOEVER RESULTING FROM THE USE OF THIS SOFTWARE.
52 *
53 * Carnegie Mellon requests users of this software to return to
54 *
55 * Software Distribution Coordinator or Software.Distribution (at) CS.CMU.EDU
56 * School of Computer Science
57 * Carnegie Mellon University
58 * Pittsburgh PA 15213-3890
59 *
60 * any improvements or extensions that they make and grant Carnegie the
61 * rights to redistribute these changes.
62 */
63
64 /*
65 * Consolidated API from uvm_page.c and others.
66 * Consolidated and designed by Cherry G. Mathew <cherry (at) zyx.in>
67 * rbtree(3) backing implementation by:
68 * Santhosh N. Raju <santhosh.raju (at) gmail.com>
69 */
70
71 #ifdef _KERNEL_OPT
72 #include "opt_uvm.h"
73 #endif
74
75 #include <sys/param.h>
76 #include <sys/types.h>
77 #include <sys/extent.h>
78 #include <sys/kmem.h>
79
80 #include <uvm/uvm.h>
81 #include <uvm/uvm_page.h>
82 #include <uvm/uvm_param.h>
83 #include <uvm/uvm_pdpolicy.h>
84 #include <uvm/uvm_physseg.h>
85
86 /*
87 * uvm_physseg: describes one segment of physical memory
88 */
89 struct uvm_physseg {
90 struct rb_node rb_node; /* tree information */
91 paddr_t start; /* PF# of first page in segment */
92 paddr_t end; /* (PF# of last page in segment) + 1 */
93 paddr_t avail_start; /* PF# of first free page in segment */
94 paddr_t avail_end; /* (PF# of last free page in segment) +1 */
95 struct vm_page *pgs; /* vm_page structures (from start) */
96 struct extent *ext; /* extent(9) structure to manage pgs[] */
97 int free_list; /* which free list they belong on */
98 u_int start_hint; /* start looking for free pages here */
99 /* protected by uvm_fpageqlock */
100 #ifdef __HAVE_PMAP_PHYSSEG
101 struct pmap_physseg pmseg; /* pmap specific (MD) data */
102 #endif
103 };
104
105 /*
106 * These functions are reserved for uvm(9) internal use and are not
107 * exported in the header file uvm_physseg.h
108 *
109 * Thus they are redefined here.
110 */
111 void uvm_physseg_init_seg(uvm_physseg_t, struct vm_page *);
112 void uvm_physseg_seg_chomp_slab(uvm_physseg_t, struct vm_page *, size_t);
113
114 /* returns a pgs array */
115 struct vm_page *uvm_physseg_seg_alloc_from_slab(uvm_physseg_t, size_t);
116
117 #if defined(UVM_HOTPLUG) /* rbtree impementation */
118
119 #define HANDLE_TO_PHYSSEG_NODE(h) ((struct uvm_physseg *)(h))
120 #define PHYSSEG_NODE_TO_HANDLE(u) ((uvm_physseg_t)(u))
121
122 struct uvm_physseg_graph {
123 struct rb_tree rb_tree; /* Tree for entries */
124 int nentries; /* Number of entries */
125 };
126
127 static struct uvm_physseg_graph uvm_physseg_graph;
128
129 /*
130 * Note on kmem(9) allocator usage:
131 * We take the conservative approach that plug/unplug are allowed to
132 * fail in high memory stress situations.
133 *
134 * We want to avoid re-entrant situations in which one plug/unplug
135 * operation is waiting on a previous one to complete, since this
136 * makes the design more complicated than necessary.
137 *
138 * We may review this and change its behaviour, once the use cases
139 * become more obvious.
140 */
141
142 /*
143 * Special alloc()/free() functions for boot time support:
144 * We assume that alloc() at boot time is only for new 'vm_physseg's
145 * This allows us to use a static array for memory allocation at boot
146 * time. Thus we avoid using kmem(9) which is not ready at this point
147 * in boot.
148 *
149 * After kmem(9) is ready, we use it. We currently discard any free()s
150 * to this static array, since the size is small enough to be a
151 * trivial waste on all architectures we run on.
152 */
153
154 static size_t nseg = 0;
155 static struct uvm_physseg uvm_physseg[VM_PHYSSEG_MAX];
156
157 static void *
158 uvm_physseg_alloc(size_t sz)
159 {
160 /*
161 * During boot time, we only support allocating vm_physseg
162 * entries from the static array.
163 * We need to assert for this.
164 */
165
166 if (__predict_false(uvm.page_init_done == false)) {
167 if (sz % sizeof(struct uvm_physseg))
168 panic("%s: tried to alloc size other than multiple"
169 " of struct uvm_physseg at boot\n", __func__);
170
171 size_t n = sz / sizeof(struct uvm_physseg);
172 nseg += n;
173
174 KASSERT(nseg > 0 && nseg <= VM_PHYSSEG_MAX);
175
176 return &uvm_physseg[nseg - n];
177 }
178
179 return kmem_zalloc(sz, KM_NOSLEEP);
180 }
181
182 static void
183 uvm_physseg_free(void *p, size_t sz)
184 {
185 /*
186 * This is a bit tricky. We do allow simulation of free()
187 * during boot (for eg: when MD code is "steal"ing memory,
188 * and the segment has been exhausted (and thus needs to be
189 * free() - ed.
190 * free() also complicates things because we leak the
191 * free(). Therefore calling code can't assume that free()-ed
192 * memory is available for alloc() again, at boot time.
193 *
194 * Thus we can't explicitly disallow free()s during
195 * boot time. However, the same restriction for alloc()
196 * applies to free(). We only allow uvm_physseg related free()s
197 * via this function during boot time.
198 */
199
200 if (__predict_false(uvm.page_init_done == false)) {
201 if (sz % sizeof(struct uvm_physseg))
202 panic("%s: tried to free size other than struct uvm_physseg"
203 " at boot\n", __func__);
204
205 }
206
207 /*
208 * Could have been in a single if(){} block - split for
209 * clarity
210 */
211
212 if ((struct uvm_physseg *)p >= uvm_physseg &&
213 (struct uvm_physseg *)p < (uvm_physseg + VM_PHYSSEG_MAX)) {
214 if (sz % sizeof(struct uvm_physseg))
215 panic("%s: tried to free() other than struct uvm_physseg"
216 " from static array\n", __func__);
217
218 if ((sz / sizeof(struct uvm_physseg)) >= VM_PHYSSEG_MAX)
219 panic("%s: tried to free() the entire static array!", __func__);
220 return; /* Nothing to free */
221 }
222
223 kmem_free(p, sz);
224 }
225
226 /* XXX: Multi page size */
227 bool
228 uvm_physseg_plug(paddr_t pfn, size_t pages, uvm_physseg_t *psp)
229 {
230 int preload;
231 size_t slabpages;
232 struct uvm_physseg *ps, *current_ps = NULL;
233 struct vm_page *slab = NULL, *pgs = NULL;
234
235 #ifdef DEBUG
236 paddr_t off;
237 uvm_physseg_t upm;
238 upm = uvm_physseg_find(pfn, &off);
239
240 ps = HANDLE_TO_PHYSSEG_NODE(upm);
241
242 if (ps != NULL) /* XXX; do we allow "update" plugs ? */
243 return false;
244 #endif
245
246 /*
247 * do we have room?
248 */
249
250 ps = uvm_physseg_alloc(sizeof (struct uvm_physseg));
251 if (ps == NULL) {
252 printf("uvm_page_physload: unable to load physical memory "
253 "segment\n");
254 printf("\t%d segments allocated, ignoring 0x%"PRIxPADDR" -> 0x%"PRIxPADDR"\n",
255 VM_PHYSSEG_MAX, pfn, pfn + pages + 1);
256 printf("\tincrease VM_PHYSSEG_MAX\n");
257 return false;
258 }
259
260 /* span init */
261 ps->start = pfn;
262 ps->end = pfn + pages;
263
264 /*
265 * XXX: Ugly hack because uvmexp.npages accounts for only
266 * those pages in the segment included below as well - this
267 * should be legacy and removed.
268 */
269
270 ps->avail_start = ps->start;
271 ps->avail_end = ps->end;
272
273 /*
274 * check to see if this is a "preload" (i.e. uvm_page_init hasn't been
275 * called yet, so kmem is not available).
276 */
277
278 preload = 1; /* We are going to assume it is a preload */
279
280 RB_TREE_FOREACH(current_ps, &(uvm_physseg_graph.rb_tree)) {
281 /* If there are non NULL pages then we are not in a preload */
282 if (current_ps->pgs != NULL) {
283 preload = 0;
284 /* Try to scavenge from earlier unplug()s. */
285 pgs = uvm_physseg_seg_alloc_from_slab(current_ps, pages);
286
287 if (pgs != NULL) {
288 break;
289 }
290 }
291 }
292
293
294 /*
295 * if VM is already running, attempt to kmem_alloc vm_page structures
296 */
297
298 if (!preload) {
299 if (pgs == NULL) { /* Brand new */
300 /* Iteratively try alloc down from uvmexp.npages */
301 for (slabpages = (size_t) uvmexp.npages; slabpages >= pages; slabpages--) {
302 slab = kmem_zalloc(sizeof *pgs * (long unsigned int)slabpages, KM_NOSLEEP);
303 if (slab != NULL)
304 break;
305 }
306
307 if (slab == NULL) {
308 uvm_physseg_free(ps, sizeof(struct uvm_physseg));
309 return false;
310 }
311
312 uvm_physseg_seg_chomp_slab(ps, slab, (size_t) slabpages);
313 /* We allocate enough for this plug */
314 pgs = uvm_physseg_seg_alloc_from_slab(ps, pages);
315
316 if (pgs == NULL) {
317 printf("unable to uvm_physseg_seg_alloc_from_slab() from backend\n");
318 return false;
319 }
320 } else {
321 /* Reuse scavenged extent */
322 ps->ext = current_ps->ext;
323 }
324
325 physmem += pages;
326 uvmpdpol_reinit();
327 } else { /* Boot time - see uvm_page.c:uvm_page_init() */
328 pgs = NULL;
329 ps->pgs = pgs;
330 }
331
332 /*
333 * now insert us in the proper place in uvm_physseg_graph.rb_tree
334 */
335
336 current_ps = rb_tree_insert_node(&(uvm_physseg_graph.rb_tree), ps);
337 if (current_ps != ps) {
338 panic("uvm_page_physload: Duplicate address range detected!");
339 }
340 uvm_physseg_graph.nentries++;
341
342 /*
343 * uvm_pagefree() requires the PHYS_TO_VM_PAGE(pgs[i]) on the
344 * newly allocated pgs[] to return the correct value. This is
345 * a bit of a chicken and egg problem, since it needs
346 * uvm_physseg_find() to succeed. For this, the node needs to
347 * be inserted *before* uvm_physseg_init_seg() happens.
348 *
349 * During boot, this happens anyway, since
350 * uvm_physseg_init_seg() is called later on and separately
351 * from uvm_page.c:uvm_page_init().
352 * In the case of hotplug we need to ensure this.
353 */
354
355 if (__predict_true(!preload))
356 uvm_physseg_init_seg(ps, pgs);
357
358 if (psp != NULL)
359 *psp = ps;
360
361 return true;
362 }
363
364 static int
365 uvm_physseg_compare_nodes(void *ctx, const void *nnode1, const void *nnode2)
366 {
367 const struct uvm_physseg *enode1 = nnode1;
368 const struct uvm_physseg *enode2 = nnode2;
369
370 KASSERT(enode1->start < enode2->start || enode1->start >= enode2->end);
371 KASSERT(enode2->start < enode1->start || enode2->start >= enode1->end);
372
373 if (enode1->start < enode2->start)
374 return -1;
375 if (enode1->start >= enode2->end)
376 return 1;
377 return 0;
378 }
379
380 static int
381 uvm_physseg_compare_key(void *ctx, const void *nnode, const void *pkey)
382 {
383 const struct uvm_physseg *enode = nnode;
384 const paddr_t pa = *(const paddr_t *) pkey;
385
386 if(enode->start <= pa && pa < enode->end)
387 return 0;
388 if (enode->start < pa)
389 return -1;
390 if (enode->end > pa)
391 return 1;
392
393 return 0;
394 }
395
396 static const rb_tree_ops_t uvm_physseg_tree_ops = {
397 .rbto_compare_nodes = uvm_physseg_compare_nodes,
398 .rbto_compare_key = uvm_physseg_compare_key,
399 .rbto_node_offset = offsetof(struct uvm_physseg, rb_node),
400 .rbto_context = NULL
401 };
402
403 /*
404 * uvm_physseg_init: init the physmem
405 *
406 * => physmem unit should not be in use at this point
407 */
408
409 void
410 uvm_physseg_init(void)
411 {
412 rb_tree_init(&(uvm_physseg_graph.rb_tree), &uvm_physseg_tree_ops);
413 uvm_physseg_graph.nentries = 0;
414 }
415
416 uvm_physseg_t
417 uvm_physseg_get_next(uvm_physseg_t upm)
418 {
419 /* next of invalid is invalid, not fatal */
420 if (uvm_physseg_valid_p(upm) == false)
421 return UVM_PHYSSEG_TYPE_INVALID;
422
423 return (uvm_physseg_t) rb_tree_iterate(&(uvm_physseg_graph.rb_tree), upm,
424 RB_DIR_RIGHT);
425 }
426
427 uvm_physseg_t
428 uvm_physseg_get_prev(uvm_physseg_t upm)
429 {
430 /* prev of invalid is invalid, not fatal */
431 if (uvm_physseg_valid_p(upm) == false)
432 return UVM_PHYSSEG_TYPE_INVALID;
433
434 return (uvm_physseg_t) rb_tree_iterate(&(uvm_physseg_graph.rb_tree), upm,
435 RB_DIR_LEFT);
436 }
437
438 uvm_physseg_t
439 uvm_physseg_get_last(void)
440 {
441 return (uvm_physseg_t) RB_TREE_MAX(&(uvm_physseg_graph.rb_tree));
442 }
443
444 uvm_physseg_t
445 uvm_physseg_get_first(void)
446 {
447 return (uvm_physseg_t) RB_TREE_MIN(&(uvm_physseg_graph.rb_tree));
448 }
449
450 paddr_t
451 uvm_physseg_get_highest_frame(void)
452 {
453 struct uvm_physseg *ps =
454 (uvm_physseg_t) RB_TREE_MAX(&(uvm_physseg_graph.rb_tree));
455
456 return ps->end - 1;
457 }
458
459 /*
460 * uvm_page_physunload: unload physical memory and return it to
461 * caller.
462 */
463 bool
464 uvm_page_physunload(uvm_physseg_t upm, int freelist, paddr_t *paddrp)
465 {
466 struct uvm_physseg *seg;
467
468 if (__predict_true(uvm.page_init_done == true))
469 panic("%s: unload attempted after uvm_page_init()\n", __func__);
470
471 seg = HANDLE_TO_PHYSSEG_NODE(upm);
472
473 if (seg->free_list != freelist) {
474 paddrp = NULL;
475 return false;
476 }
477
478 /*
479 * During cold boot, what we're about to unplug hasn't been
480 * put on the uvm freelist, nor has uvmexp.npages been
481 * updated. (This happens in uvm_page.c:uvm_page_init())
482 *
483 * For hotplug, we assume here that the pages being unloaded
484 * here are completely out of sight of uvm (ie; not on any uvm
485 * lists), and that uvmexp.npages has been suitably
486 * decremented before we're called.
487 *
488 * XXX: will avail_end == start if avail_start < avail_end?
489 */
490
491 /* try from front */
492 if (seg->avail_start == seg->start &&
493 seg->avail_start < seg->avail_end) {
494 *paddrp = ctob(seg->avail_start);
495 return uvm_physseg_unplug(seg->avail_start, 1);
496 }
497
498 /* try from rear */
499 if (seg->avail_end == seg->end &&
500 seg->avail_start < seg->avail_end) {
501 *paddrp = ctob(seg->avail_end - 1);
502 return uvm_physseg_unplug(seg->avail_end - 1, 1);
503 }
504
505 return false;
506 }
507
508 bool
509 uvm_page_physunload_force(uvm_physseg_t upm, int freelist, paddr_t *paddrp)
510 {
511 struct uvm_physseg *seg;
512
513 seg = HANDLE_TO_PHYSSEG_NODE(upm);
514
515 if (__predict_true(uvm.page_init_done == true))
516 panic("%s: unload attempted after uvm_page_init()\n", __func__);
517 /* any room in this bank? */
518 if (seg->avail_start >= seg->avail_end) {
519 paddrp = NULL;
520 return false; /* nope */
521 }
522
523 *paddrp = ctob(seg->avail_start);
524
525 /* Always unplug from front */
526 return uvm_physseg_unplug(seg->avail_start, 1);
527 }
528
529
530 /*
531 * vm_physseg_find: find vm_physseg structure that belongs to a PA
532 */
533 uvm_physseg_t
534 uvm_physseg_find(paddr_t pframe, psize_t *offp)
535 {
536 struct uvm_physseg * ps = NULL;
537
538 ps = rb_tree_find_node(&(uvm_physseg_graph.rb_tree), &pframe);
539
540 if(ps != NULL && offp != NULL)
541 *offp = pframe - ps->start;
542
543 return ps;
544 }
545
546 #else /* UVM_HOTPLUG */
547
548 /*
549 * physical memory config is stored in vm_physmem.
550 */
551
552 #define VM_PHYSMEM_PTR(i) (&vm_physmem[i])
553 #if VM_PHYSSEG_MAX == 1
554 #define VM_PHYSMEM_PTR_SWAP(i, j) /* impossible */
555 #else
556 #define VM_PHYSMEM_PTR_SWAP(i, j) \
557 do { vm_physmem[(i)] = vm_physmem[(j)]; } while (0)
558 #endif
559
560 #define HANDLE_TO_PHYSSEG_NODE(h) (VM_PHYSMEM_PTR((int)h))
561 #define PHYSSEG_NODE_TO_HANDLE(u) ((int)((vsize_t) (u - vm_physmem) / sizeof(struct uvm_physseg)))
562
563 static struct uvm_physseg vm_physmem[VM_PHYSSEG_MAX]; /* XXXCDC: uvm.physmem */
564 static int vm_nphysseg = 0; /* XXXCDC: uvm.nphysseg */
565 #define vm_nphysmem vm_nphysseg
566
567 void
568 uvm_physseg_init(void)
569 {
570 /* XXX: Provisioning for rb_tree related init(s) */
571 return;
572 }
573
574 int
575 uvm_physseg_get_next(uvm_physseg_t lcv)
576 {
577 /* next of invalid is invalid, not fatal */
578 if (uvm_physseg_valid_p(lcv) == false)
579 return UVM_PHYSSEG_TYPE_INVALID;
580
581 return (lcv + 1);
582 }
583
584 int
585 uvm_physseg_get_prev(uvm_physseg_t lcv)
586 {
587 /* prev of invalid is invalid, not fatal */
588 if (uvm_physseg_valid_p(lcv) == false)
589 return UVM_PHYSSEG_TYPE_INVALID;
590
591 return (lcv - 1);
592 }
593
594 int
595 uvm_physseg_get_last(void)
596 {
597 return (vm_nphysseg - 1);
598 }
599
600 int
601 uvm_physseg_get_first(void)
602 {
603 return 0;
604 }
605
606 paddr_t
607 uvm_physseg_get_highest_frame(void)
608 {
609 int lcv;
610 paddr_t last = 0;
611 struct uvm_physseg *ps;
612
613 for (lcv = 0; lcv < vm_nphysseg; lcv++) {
614 ps = VM_PHYSMEM_PTR(lcv);
615 if (last < ps->end)
616 last = ps->end;
617 }
618
619 return last;
620 }
621
622
623 static struct vm_page *
624 uvm_post_preload_check(void)
625 {
626 int preload, lcv;
627
628 /*
629 * check to see if this is a "preload" (i.e. uvm_page_init hasn't been
630 * called yet, so kmem is not available).
631 */
632
633 for (lcv = 0 ; lcv < vm_nphysmem ; lcv++) {
634 if (VM_PHYSMEM_PTR(lcv)->pgs)
635 break;
636 }
637 preload = (lcv == vm_nphysmem);
638
639 /*
640 * if VM is already running, attempt to kmem_alloc vm_page structures
641 */
642
643 if (!preload) {
644 panic("Tried to add RAM after uvm_page_init");
645 }
646
647 return NULL;
648 }
649
650 /*
651 * uvm_page_physunload: unload physical memory and return it to
652 * caller.
653 */
654 bool
655 uvm_page_physunload(uvm_physseg_t psi, int freelist, paddr_t *paddrp)
656 {
657 int x;
658 struct uvm_physseg *seg;
659
660 uvm_post_preload_check();
661
662 seg = VM_PHYSMEM_PTR(psi);
663
664 if (seg->free_list != freelist) {
665 paddrp = NULL;
666 return false;
667 }
668
669 /* try from front */
670 if (seg->avail_start == seg->start &&
671 seg->avail_start < seg->avail_end) {
672 *paddrp = ctob(seg->avail_start);
673 seg->avail_start++;
674 seg->start++;
675 /* nothing left? nuke it */
676 if (seg->avail_start == seg->end) {
677 if (vm_nphysmem == 1)
678 panic("uvm_page_physget: out of memory!");
679 vm_nphysmem--;
680 for (x = psi ; x < vm_nphysmem ; x++)
681 /* structure copy */
682 VM_PHYSMEM_PTR_SWAP(x, x + 1);
683 }
684 return (true);
685 }
686
687 /* try from rear */
688 if (seg->avail_end == seg->end &&
689 seg->avail_start < seg->avail_end) {
690 *paddrp = ctob(seg->avail_end - 1);
691 seg->avail_end--;
692 seg->end--;
693 /* nothing left? nuke it */
694 if (seg->avail_end == seg->start) {
695 if (vm_nphysmem == 1)
696 panic("uvm_page_physget: out of memory!");
697 vm_nphysmem--;
698 for (x = psi ; x < vm_nphysmem ; x++)
699 /* structure copy */
700 VM_PHYSMEM_PTR_SWAP(x, x + 1);
701 }
702 return (true);
703 }
704
705 return false;
706 }
707
708 bool
709 uvm_page_physunload_force(uvm_physseg_t psi, int freelist, paddr_t *paddrp)
710 {
711 int x;
712 struct uvm_physseg *seg;
713
714 uvm_post_preload_check();
715
716 seg = VM_PHYSMEM_PTR(psi);
717
718 /* any room in this bank? */
719 if (seg->avail_start >= seg->avail_end) {
720 paddrp = NULL;
721 return false; /* nope */
722 }
723
724 *paddrp = ctob(seg->avail_start);
725 seg->avail_start++;
726 /* truncate! */
727 seg->start = seg->avail_start;
728
729 /* nothing left? nuke it */
730 if (seg->avail_start == seg->end) {
731 if (vm_nphysmem == 1)
732 panic("uvm_page_physget: out of memory!");
733 vm_nphysmem--;
734 for (x = psi ; x < vm_nphysmem ; x++)
735 /* structure copy */
736 VM_PHYSMEM_PTR_SWAP(x, x + 1);
737 }
738 return (true);
739 }
740
741 bool
742 uvm_physseg_plug(paddr_t pfn, size_t pages, uvm_physseg_t *psp)
743 {
744 int lcv;
745 struct vm_page *pgs;
746 struct uvm_physseg *ps;
747
748 #ifdef DEBUG
749 paddr_t off;
750 uvm_physseg_t upm;
751 upm = uvm_physseg_find(pfn, &off);
752
753 if (uvm_physseg_valid_p(upm)) /* XXX; do we allow "update" plugs ? */
754 return false;
755 #endif
756
757 paddr_t start = pfn;
758 paddr_t end = pfn + pages;
759 paddr_t avail_start = start;
760 paddr_t avail_end = end;
761
762 if (uvmexp.pagesize == 0)
763 panic("uvm_page_physload: page size not set!");
764
765 /*
766 * do we have room?
767 */
768
769 if (vm_nphysmem == VM_PHYSSEG_MAX) {
770 printf("uvm_page_physload: unable to load physical memory "
771 "segment\n");
772 printf("\t%d segments allocated, ignoring 0x%llx -> 0x%llx\n",
773 VM_PHYSSEG_MAX, (long long)start, (long long)end);
774 printf("\tincrease VM_PHYSSEG_MAX\n");
775 if (psp != NULL)
776 *psp = UVM_PHYSSEG_TYPE_INVALID_OVERFLOW;
777 return false;
778 }
779
780 /*
781 * check to see if this is a "preload" (i.e. uvm_page_init hasn't been
782 * called yet, so kmem is not available).
783 */
784 pgs = uvm_post_preload_check();
785
786 /*
787 * now insert us in the proper place in vm_physmem[]
788 */
789
790 #if (VM_PHYSSEG_STRAT == VM_PSTRAT_RANDOM)
791 /* random: put it at the end (easy!) */
792 ps = VM_PHYSMEM_PTR(vm_nphysmem);
793 lcv = vm_nphysmem;
794 #elif (VM_PHYSSEG_STRAT == VM_PSTRAT_BSEARCH)
795 {
796 int x;
797 /* sort by address for binary search */
798 for (lcv = 0 ; lcv < vm_nphysmem ; lcv++)
799 if (start < VM_PHYSMEM_PTR(lcv)->start)
800 break;
801 ps = VM_PHYSMEM_PTR(lcv);
802 /* move back other entries, if necessary ... */
803 for (x = vm_nphysmem ; x > lcv ; x--)
804 /* structure copy */
805 VM_PHYSMEM_PTR_SWAP(x, x - 1);
806 }
807 #elif (VM_PHYSSEG_STRAT == VM_PSTRAT_BIGFIRST)
808 {
809 int x;
810 /* sort by largest segment first */
811 for (lcv = 0 ; lcv < vm_nphysmem ; lcv++)
812 if ((end - start) >
813 (VM_PHYSMEM_PTR(lcv)->end - VM_PHYSMEM_PTR(lcv)->start))
814 break;
815 ps = VM_PHYSMEM_PTR(lcv);
816 /* move back other entries, if necessary ... */
817 for (x = vm_nphysmem ; x > lcv ; x--)
818 /* structure copy */
819 VM_PHYSMEM_PTR_SWAP(x, x - 1);
820 }
821 #else
822 panic("uvm_page_physload: unknown physseg strategy selected!");
823 #endif
824
825 ps->start = start;
826 ps->end = end;
827 ps->avail_start = avail_start;
828 ps->avail_end = avail_end;
829
830 ps->pgs = pgs;
831
832 vm_nphysmem++;
833
834 if (psp != NULL)
835 *psp = lcv;
836
837 return true;
838 }
839
840 /*
841 * when VM_PHYSSEG_MAX is 1, we can simplify these functions
842 */
843
844 #if VM_PHYSSEG_MAX == 1
845 static inline int vm_physseg_find_contig(struct uvm_physseg *, int, paddr_t, psize_t *);
846 #elif (VM_PHYSSEG_STRAT == VM_PSTRAT_BSEARCH)
847 static inline int vm_physseg_find_bsearch(struct uvm_physseg *, int, paddr_t, psize_t *);
848 #else
849 static inline int vm_physseg_find_linear(struct uvm_physseg *, int, paddr_t, psize_t *);
850 #endif
851
852 /*
853 * vm_physseg_find: find vm_physseg structure that belongs to a PA
854 */
855 int
856 uvm_physseg_find(paddr_t pframe, psize_t *offp)
857 {
858
859 #if VM_PHYSSEG_MAX == 1
860 return vm_physseg_find_contig(vm_physmem, vm_nphysseg, pframe, offp);
861 #elif (VM_PHYSSEG_STRAT == VM_PSTRAT_BSEARCH)
862 return vm_physseg_find_bsearch(vm_physmem, vm_nphysseg, pframe, offp);
863 #else
864 return vm_physseg_find_linear(vm_physmem, vm_nphysseg, pframe, offp);
865 #endif
866 }
867
868 #if VM_PHYSSEG_MAX == 1
869 static inline int
870 vm_physseg_find_contig(struct uvm_physseg *segs, int nsegs, paddr_t pframe, psize_t *offp)
871 {
872
873 /* 'contig' case */
874 if (pframe >= segs[0].start && pframe < segs[0].end) {
875 if (offp)
876 *offp = pframe - segs[0].start;
877 return(0);
878 }
879 return(-1);
880 }
881
882 #elif (VM_PHYSSEG_STRAT == VM_PSTRAT_BSEARCH)
883
884 static inline int
885 vm_physseg_find_bsearch(struct uvm_physseg *segs, int nsegs, paddr_t pframe, psize_t *offp)
886 {
887 /* binary search for it */
888 int start, len, guess;
889
890 /*
891 * if try is too large (thus target is less than try) we reduce
892 * the length to trunc(len/2) [i.e. everything smaller than "try"]
893 *
894 * if the try is too small (thus target is greater than try) then
895 * we set the new start to be (try + 1). this means we need to
896 * reduce the length to (round(len/2) - 1).
897 *
898 * note "adjust" below which takes advantage of the fact that
899 * (round(len/2) - 1) == trunc((len - 1) / 2)
900 * for any value of len we may have
901 */
902
903 for (start = 0, len = nsegs ; len != 0 ; len = len / 2) {
904 guess = start + (len / 2); /* try in the middle */
905
906 /* start past our try? */
907 if (pframe >= segs[guess].start) {
908 /* was try correct? */
909 if (pframe < segs[guess].end) {
910 if (offp)
911 *offp = pframe - segs[guess].start;
912 return guess; /* got it */
913 }
914 start = guess + 1; /* next time, start here */
915 len--; /* "adjust" */
916 } else {
917 /*
918 * pframe before try, just reduce length of
919 * region, done in "for" loop
920 */
921 }
922 }
923 return(-1);
924 }
925
926 #else
927
928 static inline int
929 vm_physseg_find_linear(struct uvm_physseg *segs, int nsegs, paddr_t pframe, psize_t *offp)
930 {
931 /* linear search for it */
932 int lcv;
933
934 for (lcv = 0; lcv < nsegs; lcv++) {
935 if (pframe >= segs[lcv].start &&
936 pframe < segs[lcv].end) {
937 if (offp)
938 *offp = pframe - segs[lcv].start;
939 return(lcv); /* got it */
940 }
941 }
942 return(-1);
943 }
944 #endif
945 #endif /* UVM_HOTPLUG */
946
947 bool
948 uvm_physseg_valid_p(uvm_physseg_t upm)
949 {
950 struct uvm_physseg *ps;
951
952 if (upm == UVM_PHYSSEG_TYPE_INVALID ||
953 upm == UVM_PHYSSEG_TYPE_INVALID_EMPTY ||
954 upm == UVM_PHYSSEG_TYPE_INVALID_OVERFLOW)
955 return false;
956
957 /*
958 * This is the delicate init dance -
959 * needs to go with the dance.
960 */
961 if (uvm.page_init_done != true)
962 return true;
963
964 ps = HANDLE_TO_PHYSSEG_NODE(upm);
965
966 /* Extra checks needed only post uvm_page_init() */
967 if (ps->pgs == NULL)
968 return false;
969
970 /* XXX: etc. */
971
972 return true;
973
974 }
975
976 /*
977 * Boot protocol dictates that these must be able to return partially
978 * initialised segments.
979 */
980 paddr_t
981 uvm_physseg_get_start(uvm_physseg_t upm)
982 {
983 if (uvm_physseg_valid_p(upm) == false)
984 return (paddr_t) -1;
985
986 return HANDLE_TO_PHYSSEG_NODE(upm)->start;
987 }
988
989 paddr_t
990 uvm_physseg_get_end(uvm_physseg_t upm)
991 {
992 if (uvm_physseg_valid_p(upm) == false)
993 return (paddr_t) -1;
994
995 return HANDLE_TO_PHYSSEG_NODE(upm)->end;
996 }
997
998 paddr_t
999 uvm_physseg_get_avail_start(uvm_physseg_t upm)
1000 {
1001 if (uvm_physseg_valid_p(upm) == false)
1002 return (paddr_t) -1;
1003
1004 return HANDLE_TO_PHYSSEG_NODE(upm)->avail_start;
1005 }
1006
1007 #if defined(UVM_PHYSSEG_LEGACY)
1008 void
1009 uvm_physseg_set_avail_start(uvm_physseg_t upm, paddr_t avail_start)
1010 {
1011 struct uvm_physseg *ps = HANDLE_TO_PHYSSEG_NODE(upm);
1012
1013 #if defined(DIAGNOSTIC)
1014 paddr_t avail_end;
1015 avail_end = uvm_physseg_get_avail_end(upm);
1016 KASSERT(uvm_physseg_valid_p(upm));
1017 KASSERT(avail_start < avail_end && avail_start >= ps->start);
1018 #endif
1019
1020 ps->avail_start = avail_start;
1021 }
1022 void uvm_physseg_set_avail_end(uvm_physseg_t upm, paddr_t avail_end)
1023 {
1024 struct uvm_physseg *ps = HANDLE_TO_PHYSSEG_NODE(upm);
1025
1026 #if defined(DIAGNOSTIC)
1027 paddr_t avail_start;
1028 avail_start = uvm_physseg_get_avail_start(upm);
1029 KASSERT(uvm_physseg_valid_p(upm));
1030 KASSERT(avail_end > avail_start && avail_end <= ps->end);
1031 #endif
1032
1033 ps->avail_end = avail_end;
1034 }
1035
1036 #endif /* UVM_PHYSSEG_LEGACY */
1037
1038 paddr_t
1039 uvm_physseg_get_avail_end(uvm_physseg_t upm)
1040 {
1041 if (uvm_physseg_valid_p(upm) == false)
1042 return (paddr_t) -1;
1043
1044 return HANDLE_TO_PHYSSEG_NODE(upm)->avail_end;
1045 }
1046
1047 struct vm_page *
1048 uvm_physseg_get_pg(uvm_physseg_t upm, paddr_t idx)
1049 {
1050 KASSERT(uvm_physseg_valid_p(upm));
1051 return &HANDLE_TO_PHYSSEG_NODE(upm)->pgs[idx];
1052 }
1053
1054 #ifdef __HAVE_PMAP_PHYSSEG
1055 struct pmap_physseg *
1056 uvm_physseg_get_pmseg(uvm_physseg_t upm)
1057 {
1058 KASSERT(uvm_physseg_valid_p(upm));
1059 return &(HANDLE_TO_PHYSSEG_NODE(upm)->pmseg);
1060 }
1061 #endif
1062
1063 int
1064 uvm_physseg_get_free_list(uvm_physseg_t upm)
1065 {
1066 KASSERT(uvm_physseg_valid_p(upm));
1067 return HANDLE_TO_PHYSSEG_NODE(upm)->free_list;
1068 }
1069
1070 u_int
1071 uvm_physseg_get_start_hint(uvm_physseg_t upm)
1072 {
1073 KASSERT(uvm_physseg_valid_p(upm));
1074 return HANDLE_TO_PHYSSEG_NODE(upm)->start_hint;
1075 }
1076
1077 bool
1078 uvm_physseg_set_start_hint(uvm_physseg_t upm, u_int start_hint)
1079 {
1080 if (uvm_physseg_valid_p(upm) == false)
1081 return false;
1082
1083 HANDLE_TO_PHYSSEG_NODE(upm)->start_hint = start_hint;
1084 return true;
1085 }
1086
1087 void
1088 uvm_physseg_init_seg(uvm_physseg_t upm, struct vm_page *pgs)
1089 {
1090 psize_t i;
1091 psize_t n;
1092 paddr_t paddr;
1093 struct uvm_physseg *seg;
1094
1095 KASSERT(upm != UVM_PHYSSEG_TYPE_INVALID && pgs != NULL);
1096
1097 seg = HANDLE_TO_PHYSSEG_NODE(upm);
1098 KASSERT(seg != NULL);
1099 KASSERT(seg->pgs == NULL);
1100
1101 n = seg->end - seg->start;
1102 seg->pgs = pgs;
1103
1104 /* init and free vm_pages (we've already zeroed them) */
1105 paddr = ctob(seg->start);
1106 for (i = 0 ; i < n ; i++, paddr += PAGE_SIZE) {
1107 seg->pgs[i].phys_addr = paddr;
1108 #ifdef __HAVE_VM_PAGE_MD
1109 VM_MDPAGE_INIT(&seg->pgs[i]);
1110 #endif
1111 if (atop(paddr) >= seg->avail_start &&
1112 atop(paddr) < seg->avail_end) {
1113 uvmexp.npages++;
1114 mutex_enter(&uvm_pageqlock);
1115 /* add page to free pool */
1116 uvm_pagefree(&seg->pgs[i]);
1117 mutex_exit(&uvm_pageqlock);
1118 }
1119 }
1120 }
1121
1122 void
1123 uvm_physseg_seg_chomp_slab(uvm_physseg_t upm, struct vm_page *pgs, size_t n)
1124 {
1125 struct uvm_physseg *seg = HANDLE_TO_PHYSSEG_NODE(upm);
1126
1127 /* max number of pre-boot unplug()s allowed */
1128 #define UVM_PHYSSEG_BOOT_UNPLUG_MAX VM_PHYSSEG_MAX
1129
1130 static char btslab_ex_storage[EXTENT_FIXED_STORAGE_SIZE(UVM_PHYSSEG_BOOT_UNPLUG_MAX)];
1131
1132 if (__predict_false(uvm.page_init_done == false)) {
1133 seg->ext = extent_create("Boot time slab", (u_long) pgs, (u_long) (pgs + n),
1134 (void *)btslab_ex_storage, sizeof(btslab_ex_storage), 0);
1135 } else {
1136 seg->ext = extent_create("Hotplug slab", (u_long) pgs, (u_long) (pgs + n), NULL, 0, 0);
1137 }
1138
1139 KASSERT(seg->ext != NULL);
1140
1141 }
1142
1143 struct vm_page *
1144 uvm_physseg_seg_alloc_from_slab(uvm_physseg_t upm, size_t pages)
1145 {
1146 int err;
1147 struct uvm_physseg *seg;
1148 struct vm_page *pgs = NULL;
1149
1150 KASSERT(pages > 0);
1151
1152 seg = HANDLE_TO_PHYSSEG_NODE(upm);
1153
1154 if (__predict_false(seg->ext == NULL)) {
1155 /*
1156 * This is a situation unique to boot time.
1157 * It shouldn't happen at any point other than from
1158 * the first uvm_page.c:uvm_page_init() call
1159 * Since we're in a loop, we can get away with the
1160 * below.
1161 */
1162 KASSERT(uvm.page_init_done != true);
1163
1164 uvm_physseg_t upmp = uvm_physseg_get_prev(upm);
1165 KASSERT(upmp != UVM_PHYSSEG_TYPE_INVALID);
1166
1167 seg->ext = HANDLE_TO_PHYSSEG_NODE(upmp)->ext;
1168
1169 KASSERT(seg->ext != NULL);
1170 }
1171
1172 /* We allocate enough for this segment */
1173 err = extent_alloc(seg->ext, sizeof(*pgs) * pages, 1, 0, EX_BOUNDZERO, (u_long *)&pgs);
1174
1175 if (err != 0) {
1176 #ifdef DEBUG
1177 printf("%s: extent_alloc failed with error: %d \n",
1178 __func__, err);
1179 #endif
1180 }
1181
1182 return pgs;
1183 }
1184
1185 /*
1186 * uvm_page_physload: load physical memory into VM system
1187 *
1188 * => all args are PFs
1189 * => all pages in start/end get vm_page structures
1190 * => areas marked by avail_start/avail_end get added to the free page pool
1191 * => we are limited to VM_PHYSSEG_MAX physical memory segments
1192 */
1193
1194 uvm_physseg_t
1195 uvm_page_physload(paddr_t start, paddr_t end, paddr_t avail_start,
1196 paddr_t avail_end, int free_list)
1197 {
1198 struct uvm_physseg *ps;
1199 uvm_physseg_t upm;
1200
1201 if (__predict_true(uvm.page_init_done == true))
1202 panic("%s: unload attempted after uvm_page_init()\n", __func__);
1203 if (uvmexp.pagesize == 0)
1204 panic("uvm_page_physload: page size not set!");
1205 if (free_list >= VM_NFREELIST || free_list < VM_FREELIST_DEFAULT)
1206 panic("uvm_page_physload: bad free list %d", free_list);
1207 if (start >= end)
1208 panic("uvm_page_physload: start >= end");
1209
1210 if (uvm_physseg_plug(start, end - start, &upm) == false) {
1211 panic("uvm_physseg_plug() failed at boot.");
1212 /* NOTREACHED */
1213 return UVM_PHYSSEG_TYPE_INVALID; /* XXX: correct type */
1214 }
1215
1216 ps = HANDLE_TO_PHYSSEG_NODE(upm);
1217
1218 /* Legacy */
1219 ps->avail_start = avail_start;
1220 ps->avail_end = avail_end;
1221
1222 ps->free_list = free_list; /* XXX: */
1223
1224
1225 return upm;
1226 }
1227
1228 bool
1229 uvm_physseg_unplug(paddr_t pfn, size_t pages)
1230 {
1231 uvm_physseg_t upm;
1232 paddr_t off = 0, start __diagused, end;
1233 struct uvm_physseg *seg;
1234
1235 upm = uvm_physseg_find(pfn, &off);
1236
1237 if (!uvm_physseg_valid_p(upm)) {
1238 printf("%s: Tried to unplug from unknown offset\n", __func__);
1239 return false;
1240 }
1241
1242 seg = HANDLE_TO_PHYSSEG_NODE(upm);
1243
1244 start = uvm_physseg_get_start(upm);
1245 end = uvm_physseg_get_end(upm);
1246
1247 if (end < (pfn + pages)) {
1248 printf("%s: Tried to unplug oversized span \n", __func__);
1249 return false;
1250 }
1251
1252 KASSERT(pfn == start + off); /* sanity */
1253
1254 if (__predict_true(uvm.page_init_done == true)) {
1255 /* XXX: KASSERT() that seg->pgs[] are not on any uvm lists */
1256 if (extent_free(seg->ext, (u_long)(seg->pgs + off), sizeof(struct vm_page) * pages, EX_MALLOCOK | EX_NOWAIT) != 0)
1257 return false;
1258 }
1259
1260 if (off == 0 && (pfn + pages) == end) {
1261 #if defined(UVM_HOTPLUG) /* rbtree implementation */
1262 int segcount = 0;
1263 struct uvm_physseg *current_ps;
1264 /* Complete segment */
1265 if (uvm_physseg_graph.nentries == 1)
1266 panic("%s: out of memory!", __func__);
1267
1268 if (__predict_true(uvm.page_init_done == true)) {
1269 RB_TREE_FOREACH(current_ps, &(uvm_physseg_graph.rb_tree)) {
1270 if (seg->ext == current_ps->ext)
1271 segcount++;
1272 }
1273 KASSERT(segcount > 0);
1274
1275 if (segcount == 1) {
1276 extent_destroy(seg->ext);
1277 }
1278
1279 /*
1280 * We assume that the unplug will succeed from
1281 * this point onwards
1282 */
1283 uvmexp.npages -= (int) pages;
1284 }
1285
1286 rb_tree_remove_node(&(uvm_physseg_graph.rb_tree), upm);
1287 memset(seg, 0, sizeof(struct uvm_physseg));
1288 uvm_physseg_free(seg, sizeof(struct uvm_physseg));
1289 uvm_physseg_graph.nentries--;
1290 #else /* UVM_HOTPLUG */
1291 int x;
1292 if (vm_nphysmem == 1)
1293 panic("uvm_page_physget: out of memory!");
1294 vm_nphysmem--;
1295 for (x = upm ; x < vm_nphysmem ; x++)
1296 /* structure copy */
1297 VM_PHYSMEM_PTR_SWAP(x, x + 1);
1298 #endif /* UVM_HOTPLUG */
1299 /* XXX: KASSERT() that seg->pgs[] are not on any uvm lists */
1300 return true;
1301 }
1302
1303 if (off > 0 &&
1304 (pfn + pages) < end) {
1305 #if defined(UVM_HOTPLUG) /* rbtree implementation */
1306 /* middle chunk - need a new segment */
1307 struct uvm_physseg *ps, *current_ps;
1308 ps = uvm_physseg_alloc(sizeof (struct uvm_physseg));
1309 if (ps == NULL) {
1310 printf("%s: Unable to allocated new fragment vm_physseg \n",
1311 __func__);
1312 return false;
1313 }
1314
1315 /* Remove middle chunk */
1316 if (__predict_true(uvm.page_init_done == true)) {
1317 KASSERT(seg->ext != NULL);
1318 ps->ext = seg->ext;
1319
1320 /* XXX: KASSERT() that seg->pgs[] are not on any uvm lists */
1321 /*
1322 * We assume that the unplug will succeed from
1323 * this point onwards
1324 */
1325 uvmexp.npages -= (int) pages;
1326 }
1327
1328 ps->start = pfn + pages;
1329 ps->avail_start = ps->start; /* XXX: Legacy */
1330
1331 ps->end = seg->end;
1332 ps->avail_end = ps->end; /* XXX: Legacy */
1333
1334 seg->end = pfn;
1335 seg->avail_end = seg->end; /* XXX: Legacy */
1336
1337
1338 /*
1339 * The new pgs array points to the beginning of the
1340 * tail fragment.
1341 */
1342 if (__predict_true(uvm.page_init_done == true))
1343 ps->pgs = seg->pgs + off + pages;
1344
1345 current_ps = rb_tree_insert_node(&(uvm_physseg_graph.rb_tree), ps);
1346 if (current_ps != ps) {
1347 panic("uvm_page_physload: Duplicate address range detected!");
1348 }
1349 uvm_physseg_graph.nentries++;
1350 #else /* UVM_HOTPLUG */
1351 panic("%s: can't unplug() from the middle of a segment without"
1352 " UVM_HOTPLUG\n", __func__);
1353 /* NOTREACHED */
1354 #endif /* UVM_HOTPLUG */
1355 return true;
1356 }
1357
1358 if (off == 0 && (pfn + pages) < end) {
1359 /* Remove front chunk */
1360 if (__predict_true(uvm.page_init_done == true)) {
1361 /* XXX: KASSERT() that seg->pgs[] are not on any uvm lists */
1362 /*
1363 * We assume that the unplug will succeed from
1364 * this point onwards
1365 */
1366 uvmexp.npages -= (int) pages;
1367 }
1368
1369 /* Truncate */
1370 seg->start = pfn + pages;
1371 seg->avail_start = seg->start; /* XXX: Legacy */
1372
1373 /*
1374 * Move the pgs array start to the beginning of the
1375 * tail end.
1376 */
1377 if (__predict_true(uvm.page_init_done == true))
1378 seg->pgs += pages;
1379
1380 return true;
1381 }
1382
1383 if (off > 0 && (pfn + pages) == end) {
1384 /* back chunk */
1385
1386
1387 /* Truncate! */
1388 seg->end = pfn;
1389 seg->avail_end = seg->end; /* XXX: Legacy */
1390
1391 uvmexp.npages -= (int) pages;
1392
1393 return true;
1394 }
1395
1396 printf("%s: Tried to unplug unknown range \n", __func__);
1397
1398 return false;
1399 }
1400