uvm_physseg.c revision 1.9.4.1 1 /* $NetBSD: uvm_physseg.c,v 1.9.4.1 2020/04/08 14:09:05 martin Exp $ */
2
3 /*
4 * Copyright (c) 1997 Charles D. Cranor and Washington University.
5 * Copyright (c) 1991, 1993, The Regents of the University of California.
6 *
7 * All rights reserved.
8 *
9 * This code is derived from software contributed to Berkeley by
10 * The Mach Operating System project at Carnegie-Mellon University.
11 *
12 * Redistribution and use in source and binary forms, with or without
13 * modification, are permitted provided that the following conditions
14 * are met:
15 * 1. Redistributions of source code must retain the above copyright
16 * notice, this list of conditions and the following disclaimer.
17 * 2. Redistributions in binary form must reproduce the above copyright
18 * notice, this list of conditions and the following disclaimer in the
19 * documentation and/or other materials provided with the distribution.
20 * 3. Neither the name of the University nor the names of its contributors
21 * may be used to endorse or promote products derived from this software
22 * without specific prior written permission.
23 *
24 * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND
25 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
26 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
27 * ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE
28 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
29 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
30 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
31 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
32 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
33 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
34 * SUCH DAMAGE.
35 *
36 * @(#)vm_page.h 7.3 (Berkeley) 4/21/91
37 * from: Id: uvm_page.h,v 1.1.2.6 1998/02/04 02:31:42 chuck Exp
38 *
39 *
40 * Copyright (c) 1987, 1990 Carnegie-Mellon University.
41 * All rights reserved.
42 *
43 * Permission to use, copy, modify and distribute this software and
44 * its documentation is hereby granted, provided that both the copyright
45 * notice and this permission notice appear in all copies of the
46 * software, derivative works or modified versions, and any portions
47 * thereof, and that both notices appear in supporting documentation.
48 *
49 * CARNEGIE MELLON ALLOWS FREE USE OF THIS SOFTWARE IN ITS "AS IS"
50 * CONDITION. CARNEGIE MELLON DISCLAIMS ANY LIABILITY OF ANY KIND
51 * FOR ANY DAMAGES WHATSOEVER RESULTING FROM THE USE OF THIS SOFTWARE.
52 *
53 * Carnegie Mellon requests users of this software to return to
54 *
55 * Software Distribution Coordinator or Software.Distribution (at) CS.CMU.EDU
56 * School of Computer Science
57 * Carnegie Mellon University
58 * Pittsburgh PA 15213-3890
59 *
60 * any improvements or extensions that they make and grant Carnegie the
61 * rights to redistribute these changes.
62 */
63
64 /*
65 * Consolidated API from uvm_page.c and others.
66 * Consolidated and designed by Cherry G. Mathew <cherry (at) zyx.in>
67 * rbtree(3) backing implementation by:
68 * Santhosh N. Raju <santhosh.raju (at) gmail.com>
69 */
70
71 #ifdef _KERNEL_OPT
72 #include "opt_uvm.h"
73 #endif
74
75 #include <sys/param.h>
76 #include <sys/types.h>
77 #include <sys/extent.h>
78 #include <sys/kmem.h>
79
80 #include <uvm/uvm.h>
81 #include <uvm/uvm_page.h>
82 #include <uvm/uvm_param.h>
83 #include <uvm/uvm_pdpolicy.h>
84 #include <uvm/uvm_physseg.h>
85
86 /*
87 * uvm_physseg: describes one segment of physical memory
88 */
89 struct uvm_physseg {
90 /* used during RB tree lookup for PHYS_TO_VM_PAGE(). */
91 struct rb_node rb_node; /* tree information */
92 paddr_t start; /* PF# of first page in segment */
93 paddr_t end; /* (PF# of last page in segment) + 1 */
94 struct vm_page *pgs; /* vm_page structures (from start) */
95
96 /* less performance sensitive fields. */
97 paddr_t avail_start; /* PF# of first free page in segment */
98 paddr_t avail_end; /* (PF# of last free page in segment) +1 */
99 struct extent *ext; /* extent(9) structure to manage pgs[] */
100 int free_list; /* which free list they belong on */
101 u_int start_hint; /* start looking for free pages here */
102 #ifdef __HAVE_PMAP_PHYSSEG
103 struct pmap_physseg pmseg; /* pmap specific (MD) data */
104 #endif
105 };
106
107 /*
108 * These functions are reserved for uvm(9) internal use and are not
109 * exported in the header file uvm_physseg.h
110 *
111 * Thus they are redefined here.
112 */
113 void uvm_physseg_init_seg(uvm_physseg_t, struct vm_page *);
114 void uvm_physseg_seg_chomp_slab(uvm_physseg_t, struct vm_page *, size_t);
115
116 /* returns a pgs array */
117 struct vm_page *uvm_physseg_seg_alloc_from_slab(uvm_physseg_t, size_t);
118
119 #if defined(UVM_HOTPLUG) /* rbtree impementation */
120
121 #define HANDLE_TO_PHYSSEG_NODE(h) ((struct uvm_physseg *)(h))
122 #define PHYSSEG_NODE_TO_HANDLE(u) ((uvm_physseg_t)(u))
123
124 struct uvm_physseg_graph {
125 struct rb_tree rb_tree; /* Tree for entries */
126 int nentries; /* Number of entries */
127 } __aligned(COHERENCY_UNIT);
128
129 static struct uvm_physseg_graph uvm_physseg_graph __read_mostly;
130
131 /*
132 * Note on kmem(9) allocator usage:
133 * We take the conservative approach that plug/unplug are allowed to
134 * fail in high memory stress situations.
135 *
136 * We want to avoid re-entrant situations in which one plug/unplug
137 * operation is waiting on a previous one to complete, since this
138 * makes the design more complicated than necessary.
139 *
140 * We may review this and change its behaviour, once the use cases
141 * become more obvious.
142 */
143
144 /*
145 * Special alloc()/free() functions for boot time support:
146 * We assume that alloc() at boot time is only for new 'vm_physseg's
147 * This allows us to use a static array for memory allocation at boot
148 * time. Thus we avoid using kmem(9) which is not ready at this point
149 * in boot.
150 *
151 * After kmem(9) is ready, we use it. We currently discard any free()s
152 * to this static array, since the size is small enough to be a
153 * trivial waste on all architectures we run on.
154 */
155
156 static size_t nseg = 0;
157 static struct uvm_physseg uvm_physseg[VM_PHYSSEG_MAX];
158
159 static void *
160 uvm_physseg_alloc(size_t sz)
161 {
162 /*
163 * During boot time, we only support allocating vm_physseg
164 * entries from the static array.
165 * We need to assert for this.
166 */
167
168 if (__predict_false(uvm.page_init_done == false)) {
169 if (sz % sizeof(struct uvm_physseg))
170 panic("%s: tried to alloc size other than multiple"
171 " of struct uvm_physseg at boot\n", __func__);
172
173 size_t n = sz / sizeof(struct uvm_physseg);
174 nseg += n;
175
176 KASSERT(nseg > 0 && nseg <= VM_PHYSSEG_MAX);
177
178 return &uvm_physseg[nseg - n];
179 }
180
181 return kmem_zalloc(sz, KM_NOSLEEP);
182 }
183
184 static void
185 uvm_physseg_free(void *p, size_t sz)
186 {
187 /*
188 * This is a bit tricky. We do allow simulation of free()
189 * during boot (for eg: when MD code is "steal"ing memory,
190 * and the segment has been exhausted (and thus needs to be
191 * free() - ed.
192 * free() also complicates things because we leak the
193 * free(). Therefore calling code can't assume that free()-ed
194 * memory is available for alloc() again, at boot time.
195 *
196 * Thus we can't explicitly disallow free()s during
197 * boot time. However, the same restriction for alloc()
198 * applies to free(). We only allow uvm_physseg related free()s
199 * via this function during boot time.
200 */
201
202 if (__predict_false(uvm.page_init_done == false)) {
203 if (sz % sizeof(struct uvm_physseg))
204 panic("%s: tried to free size other than struct uvm_physseg"
205 " at boot\n", __func__);
206
207 }
208
209 /*
210 * Could have been in a single if(){} block - split for
211 * clarity
212 */
213
214 if ((struct uvm_physseg *)p >= uvm_physseg &&
215 (struct uvm_physseg *)p < (uvm_physseg + VM_PHYSSEG_MAX)) {
216 if (sz % sizeof(struct uvm_physseg))
217 panic("%s: tried to free() other than struct uvm_physseg"
218 " from static array\n", __func__);
219
220 if ((sz / sizeof(struct uvm_physseg)) >= VM_PHYSSEG_MAX)
221 panic("%s: tried to free() the entire static array!", __func__);
222 return; /* Nothing to free */
223 }
224
225 kmem_free(p, sz);
226 }
227
228 /* XXX: Multi page size */
229 bool
230 uvm_physseg_plug(paddr_t pfn, size_t pages, uvm_physseg_t *psp)
231 {
232 int preload;
233 size_t slabpages;
234 struct uvm_physseg *ps, *current_ps = NULL;
235 struct vm_page *slab = NULL, *pgs = NULL;
236
237 #ifdef DEBUG
238 paddr_t off;
239 uvm_physseg_t upm;
240 upm = uvm_physseg_find(pfn, &off);
241
242 ps = HANDLE_TO_PHYSSEG_NODE(upm);
243
244 if (ps != NULL) /* XXX; do we allow "update" plugs ? */
245 return false;
246 #endif
247
248 /*
249 * do we have room?
250 */
251
252 ps = uvm_physseg_alloc(sizeof (struct uvm_physseg));
253 if (ps == NULL) {
254 printf("uvm_page_physload: unable to load physical memory "
255 "segment\n");
256 printf("\t%d segments allocated, ignoring 0x%"PRIxPADDR" -> 0x%"PRIxPADDR"\n",
257 VM_PHYSSEG_MAX, pfn, pfn + pages + 1);
258 printf("\tincrease VM_PHYSSEG_MAX\n");
259 return false;
260 }
261
262 /* span init */
263 ps->start = pfn;
264 ps->end = pfn + pages;
265
266 /*
267 * XXX: Ugly hack because uvmexp.npages accounts for only
268 * those pages in the segment included below as well - this
269 * should be legacy and removed.
270 */
271
272 ps->avail_start = ps->start;
273 ps->avail_end = ps->end;
274
275 /*
276 * check to see if this is a "preload" (i.e. uvm_page_init hasn't been
277 * called yet, so kmem is not available).
278 */
279
280 preload = 1; /* We are going to assume it is a preload */
281
282 RB_TREE_FOREACH(current_ps, &(uvm_physseg_graph.rb_tree)) {
283 /* If there are non NULL pages then we are not in a preload */
284 if (current_ps->pgs != NULL) {
285 preload = 0;
286 /* Try to scavenge from earlier unplug()s. */
287 pgs = uvm_physseg_seg_alloc_from_slab(current_ps, pages);
288
289 if (pgs != NULL) {
290 break;
291 }
292 }
293 }
294
295
296 /*
297 * if VM is already running, attempt to kmem_alloc vm_page structures
298 */
299
300 if (!preload) {
301 if (pgs == NULL) { /* Brand new */
302 /* Iteratively try alloc down from uvmexp.npages */
303 for (slabpages = (size_t) uvmexp.npages; slabpages >= pages; slabpages--) {
304 slab = kmem_zalloc(sizeof *pgs * (long unsigned int)slabpages, KM_NOSLEEP);
305 if (slab != NULL)
306 break;
307 }
308
309 if (slab == NULL) {
310 uvm_physseg_free(ps, sizeof(struct uvm_physseg));
311 return false;
312 }
313
314 uvm_physseg_seg_chomp_slab(ps, slab, (size_t) slabpages);
315 /* We allocate enough for this plug */
316 pgs = uvm_physseg_seg_alloc_from_slab(ps, pages);
317
318 if (pgs == NULL) {
319 printf("unable to uvm_physseg_seg_alloc_from_slab() from backend\n");
320 return false;
321 }
322 } else {
323 /* Reuse scavenged extent */
324 ps->ext = current_ps->ext;
325 }
326
327 physmem += pages;
328 uvmpdpol_reinit();
329 } else { /* Boot time - see uvm_page.c:uvm_page_init() */
330 pgs = NULL;
331 ps->pgs = pgs;
332 }
333
334 /*
335 * now insert us in the proper place in uvm_physseg_graph.rb_tree
336 */
337
338 current_ps = rb_tree_insert_node(&(uvm_physseg_graph.rb_tree), ps);
339 if (current_ps != ps) {
340 panic("uvm_page_physload: Duplicate address range detected!");
341 }
342 uvm_physseg_graph.nentries++;
343
344 /*
345 * uvm_pagefree() requires the PHYS_TO_VM_PAGE(pgs[i]) on the
346 * newly allocated pgs[] to return the correct value. This is
347 * a bit of a chicken and egg problem, since it needs
348 * uvm_physseg_find() to succeed. For this, the node needs to
349 * be inserted *before* uvm_physseg_init_seg() happens.
350 *
351 * During boot, this happens anyway, since
352 * uvm_physseg_init_seg() is called later on and separately
353 * from uvm_page.c:uvm_page_init().
354 * In the case of hotplug we need to ensure this.
355 */
356
357 if (__predict_true(!preload))
358 uvm_physseg_init_seg(ps, pgs);
359
360 if (psp != NULL)
361 *psp = ps;
362
363 return true;
364 }
365
366 static int
367 uvm_physseg_compare_nodes(void *ctx, const void *nnode1, const void *nnode2)
368 {
369 const struct uvm_physseg *enode1 = nnode1;
370 const struct uvm_physseg *enode2 = nnode2;
371
372 KASSERT(enode1->start < enode2->start || enode1->start >= enode2->end);
373 KASSERT(enode2->start < enode1->start || enode2->start >= enode1->end);
374
375 if (enode1->start < enode2->start)
376 return -1;
377 if (enode1->start >= enode2->end)
378 return 1;
379 return 0;
380 }
381
382 static int
383 uvm_physseg_compare_key(void *ctx, const void *nnode, const void *pkey)
384 {
385 const struct uvm_physseg *enode = nnode;
386 const paddr_t pa = *(const paddr_t *) pkey;
387
388 if(enode->start <= pa && pa < enode->end)
389 return 0;
390 if (enode->start < pa)
391 return -1;
392 if (enode->end > pa)
393 return 1;
394
395 return 0;
396 }
397
398 static const rb_tree_ops_t uvm_physseg_tree_ops = {
399 .rbto_compare_nodes = uvm_physseg_compare_nodes,
400 .rbto_compare_key = uvm_physseg_compare_key,
401 .rbto_node_offset = offsetof(struct uvm_physseg, rb_node),
402 .rbto_context = NULL
403 };
404
405 /*
406 * uvm_physseg_init: init the physmem
407 *
408 * => physmem unit should not be in use at this point
409 */
410
411 void
412 uvm_physseg_init(void)
413 {
414 rb_tree_init(&(uvm_physseg_graph.rb_tree), &uvm_physseg_tree_ops);
415 uvm_physseg_graph.nentries = 0;
416 }
417
418 uvm_physseg_t
419 uvm_physseg_get_next(uvm_physseg_t upm)
420 {
421 /* next of invalid is invalid, not fatal */
422 if (uvm_physseg_valid_p(upm) == false)
423 return UVM_PHYSSEG_TYPE_INVALID;
424
425 return (uvm_physseg_t) rb_tree_iterate(&(uvm_physseg_graph.rb_tree), upm,
426 RB_DIR_RIGHT);
427 }
428
429 uvm_physseg_t
430 uvm_physseg_get_prev(uvm_physseg_t upm)
431 {
432 /* prev of invalid is invalid, not fatal */
433 if (uvm_physseg_valid_p(upm) == false)
434 return UVM_PHYSSEG_TYPE_INVALID;
435
436 return (uvm_physseg_t) rb_tree_iterate(&(uvm_physseg_graph.rb_tree), upm,
437 RB_DIR_LEFT);
438 }
439
440 uvm_physseg_t
441 uvm_physseg_get_last(void)
442 {
443 return (uvm_physseg_t) RB_TREE_MAX(&(uvm_physseg_graph.rb_tree));
444 }
445
446 uvm_physseg_t
447 uvm_physseg_get_first(void)
448 {
449 return (uvm_physseg_t) RB_TREE_MIN(&(uvm_physseg_graph.rb_tree));
450 }
451
452 paddr_t
453 uvm_physseg_get_highest_frame(void)
454 {
455 struct uvm_physseg *ps =
456 (uvm_physseg_t) RB_TREE_MAX(&(uvm_physseg_graph.rb_tree));
457
458 return ps->end - 1;
459 }
460
461 /*
462 * uvm_page_physunload: unload physical memory and return it to
463 * caller.
464 */
465 bool
466 uvm_page_physunload(uvm_physseg_t upm, int freelist, paddr_t *paddrp)
467 {
468 struct uvm_physseg *seg;
469
470 if (__predict_true(uvm.page_init_done == true))
471 panic("%s: unload attempted after uvm_page_init()\n", __func__);
472
473 seg = HANDLE_TO_PHYSSEG_NODE(upm);
474
475 if (seg->free_list != freelist) {
476 paddrp = NULL;
477 return false;
478 }
479
480 /*
481 * During cold boot, what we're about to unplug hasn't been
482 * put on the uvm freelist, nor has uvmexp.npages been
483 * updated. (This happens in uvm_page.c:uvm_page_init())
484 *
485 * For hotplug, we assume here that the pages being unloaded
486 * here are completely out of sight of uvm (ie; not on any uvm
487 * lists), and that uvmexp.npages has been suitably
488 * decremented before we're called.
489 *
490 * XXX: will avail_end == start if avail_start < avail_end?
491 */
492
493 /* try from front */
494 if (seg->avail_start == seg->start &&
495 seg->avail_start < seg->avail_end) {
496 *paddrp = ctob(seg->avail_start);
497 return uvm_physseg_unplug(seg->avail_start, 1);
498 }
499
500 /* try from rear */
501 if (seg->avail_end == seg->end &&
502 seg->avail_start < seg->avail_end) {
503 *paddrp = ctob(seg->avail_end - 1);
504 return uvm_physseg_unplug(seg->avail_end - 1, 1);
505 }
506
507 return false;
508 }
509
510 bool
511 uvm_page_physunload_force(uvm_physseg_t upm, int freelist, paddr_t *paddrp)
512 {
513 struct uvm_physseg *seg;
514
515 seg = HANDLE_TO_PHYSSEG_NODE(upm);
516
517 if (__predict_true(uvm.page_init_done == true))
518 panic("%s: unload attempted after uvm_page_init()\n", __func__);
519 /* any room in this bank? */
520 if (seg->avail_start >= seg->avail_end) {
521 paddrp = NULL;
522 return false; /* nope */
523 }
524
525 *paddrp = ctob(seg->avail_start);
526
527 /* Always unplug from front */
528 return uvm_physseg_unplug(seg->avail_start, 1);
529 }
530
531
532 /*
533 * vm_physseg_find: find vm_physseg structure that belongs to a PA
534 */
535 uvm_physseg_t
536 uvm_physseg_find(paddr_t pframe, psize_t *offp)
537 {
538 struct uvm_physseg * ps = NULL;
539
540 ps = rb_tree_find_node(&(uvm_physseg_graph.rb_tree), &pframe);
541
542 if(ps != NULL && offp != NULL)
543 *offp = pframe - ps->start;
544
545 return ps;
546 }
547
548 #else /* UVM_HOTPLUG */
549
550 /*
551 * physical memory config is stored in vm_physmem.
552 */
553
554 #define VM_PHYSMEM_PTR(i) (&vm_physmem[i])
555 #if VM_PHYSSEG_MAX == 1
556 #define VM_PHYSMEM_PTR_SWAP(i, j) /* impossible */
557 #else
558 #define VM_PHYSMEM_PTR_SWAP(i, j) \
559 do { vm_physmem[(i)] = vm_physmem[(j)]; } while (0)
560 #endif
561
562 #define HANDLE_TO_PHYSSEG_NODE(h) (VM_PHYSMEM_PTR((int)h))
563 #define PHYSSEG_NODE_TO_HANDLE(u) ((int)((vsize_t) (u - vm_physmem) / sizeof(struct uvm_physseg)))
564
565 static struct uvm_physseg vm_physmem[VM_PHYSSEG_MAX]; /* XXXCDC: uvm.physmem */
566 static int vm_nphysseg = 0; /* XXXCDC: uvm.nphysseg */
567 #define vm_nphysmem vm_nphysseg
568
569 void
570 uvm_physseg_init(void)
571 {
572 /* XXX: Provisioning for rb_tree related init(s) */
573 return;
574 }
575
576 int
577 uvm_physseg_get_next(uvm_physseg_t lcv)
578 {
579 /* next of invalid is invalid, not fatal */
580 if (uvm_physseg_valid_p(lcv) == false)
581 return UVM_PHYSSEG_TYPE_INVALID;
582
583 return (lcv + 1);
584 }
585
586 int
587 uvm_physseg_get_prev(uvm_physseg_t lcv)
588 {
589 /* prev of invalid is invalid, not fatal */
590 if (uvm_physseg_valid_p(lcv) == false)
591 return UVM_PHYSSEG_TYPE_INVALID;
592
593 return (lcv - 1);
594 }
595
596 int
597 uvm_physseg_get_last(void)
598 {
599 return (vm_nphysseg - 1);
600 }
601
602 int
603 uvm_physseg_get_first(void)
604 {
605 return 0;
606 }
607
608 paddr_t
609 uvm_physseg_get_highest_frame(void)
610 {
611 int lcv;
612 paddr_t last = 0;
613 struct uvm_physseg *ps;
614
615 for (lcv = 0; lcv < vm_nphysseg; lcv++) {
616 ps = VM_PHYSMEM_PTR(lcv);
617 if (last < ps->end)
618 last = ps->end;
619 }
620
621 return last;
622 }
623
624
625 static struct vm_page *
626 uvm_post_preload_check(void)
627 {
628 int preload, lcv;
629
630 /*
631 * check to see if this is a "preload" (i.e. uvm_page_init hasn't been
632 * called yet, so kmem is not available).
633 */
634
635 for (lcv = 0 ; lcv < vm_nphysmem ; lcv++) {
636 if (VM_PHYSMEM_PTR(lcv)->pgs)
637 break;
638 }
639 preload = (lcv == vm_nphysmem);
640
641 /*
642 * if VM is already running, attempt to kmem_alloc vm_page structures
643 */
644
645 if (!preload) {
646 panic("Tried to add RAM after uvm_page_init");
647 }
648
649 return NULL;
650 }
651
652 /*
653 * uvm_page_physunload: unload physical memory and return it to
654 * caller.
655 */
656 bool
657 uvm_page_physunload(uvm_physseg_t psi, int freelist, paddr_t *paddrp)
658 {
659 int x;
660 struct uvm_physseg *seg;
661
662 uvm_post_preload_check();
663
664 seg = VM_PHYSMEM_PTR(psi);
665
666 if (seg->free_list != freelist) {
667 paddrp = NULL;
668 return false;
669 }
670
671 /* try from front */
672 if (seg->avail_start == seg->start &&
673 seg->avail_start < seg->avail_end) {
674 *paddrp = ctob(seg->avail_start);
675 seg->avail_start++;
676 seg->start++;
677 /* nothing left? nuke it */
678 if (seg->avail_start == seg->end) {
679 if (vm_nphysmem == 1)
680 panic("uvm_page_physget: out of memory!");
681 vm_nphysmem--;
682 for (x = psi ; x < vm_nphysmem ; x++)
683 /* structure copy */
684 VM_PHYSMEM_PTR_SWAP(x, x + 1);
685 }
686 return (true);
687 }
688
689 /* try from rear */
690 if (seg->avail_end == seg->end &&
691 seg->avail_start < seg->avail_end) {
692 *paddrp = ctob(seg->avail_end - 1);
693 seg->avail_end--;
694 seg->end--;
695 /* nothing left? nuke it */
696 if (seg->avail_end == seg->start) {
697 if (vm_nphysmem == 1)
698 panic("uvm_page_physget: out of memory!");
699 vm_nphysmem--;
700 for (x = psi ; x < vm_nphysmem ; x++)
701 /* structure copy */
702 VM_PHYSMEM_PTR_SWAP(x, x + 1);
703 }
704 return (true);
705 }
706
707 return false;
708 }
709
710 bool
711 uvm_page_physunload_force(uvm_physseg_t psi, int freelist, paddr_t *paddrp)
712 {
713 int x;
714 struct uvm_physseg *seg;
715
716 uvm_post_preload_check();
717
718 seg = VM_PHYSMEM_PTR(psi);
719
720 /* any room in this bank? */
721 if (seg->avail_start >= seg->avail_end) {
722 paddrp = NULL;
723 return false; /* nope */
724 }
725
726 *paddrp = ctob(seg->avail_start);
727 seg->avail_start++;
728 /* truncate! */
729 seg->start = seg->avail_start;
730
731 /* nothing left? nuke it */
732 if (seg->avail_start == seg->end) {
733 if (vm_nphysmem == 1)
734 panic("uvm_page_physget: out of memory!");
735 vm_nphysmem--;
736 for (x = psi ; x < vm_nphysmem ; x++)
737 /* structure copy */
738 VM_PHYSMEM_PTR_SWAP(x, x + 1);
739 }
740 return (true);
741 }
742
743 bool
744 uvm_physseg_plug(paddr_t pfn, size_t pages, uvm_physseg_t *psp)
745 {
746 int lcv;
747 struct vm_page *pgs;
748 struct uvm_physseg *ps;
749
750 #ifdef DEBUG
751 paddr_t off;
752 uvm_physseg_t upm;
753 upm = uvm_physseg_find(pfn, &off);
754
755 if (uvm_physseg_valid_p(upm)) /* XXX; do we allow "update" plugs ? */
756 return false;
757 #endif
758
759 paddr_t start = pfn;
760 paddr_t end = pfn + pages;
761 paddr_t avail_start = start;
762 paddr_t avail_end = end;
763
764 if (uvmexp.pagesize == 0)
765 panic("uvm_page_physload: page size not set!");
766
767 /*
768 * do we have room?
769 */
770
771 if (vm_nphysmem == VM_PHYSSEG_MAX) {
772 printf("uvm_page_physload: unable to load physical memory "
773 "segment\n");
774 printf("\t%d segments allocated, ignoring 0x%llx -> 0x%llx\n",
775 VM_PHYSSEG_MAX, (long long)start, (long long)end);
776 printf("\tincrease VM_PHYSSEG_MAX\n");
777 if (psp != NULL)
778 *psp = UVM_PHYSSEG_TYPE_INVALID_OVERFLOW;
779 return false;
780 }
781
782 /*
783 * check to see if this is a "preload" (i.e. uvm_page_init hasn't been
784 * called yet, so kmem is not available).
785 */
786 pgs = uvm_post_preload_check();
787
788 /*
789 * now insert us in the proper place in vm_physmem[]
790 */
791
792 #if (VM_PHYSSEG_STRAT == VM_PSTRAT_RANDOM)
793 /* random: put it at the end (easy!) */
794 ps = VM_PHYSMEM_PTR(vm_nphysmem);
795 lcv = vm_nphysmem;
796 #elif (VM_PHYSSEG_STRAT == VM_PSTRAT_BSEARCH)
797 {
798 int x;
799 /* sort by address for binary search */
800 for (lcv = 0 ; lcv < vm_nphysmem ; lcv++)
801 if (start < VM_PHYSMEM_PTR(lcv)->start)
802 break;
803 ps = VM_PHYSMEM_PTR(lcv);
804 /* move back other entries, if necessary ... */
805 for (x = vm_nphysmem ; x > lcv ; x--)
806 /* structure copy */
807 VM_PHYSMEM_PTR_SWAP(x, x - 1);
808 }
809 #elif (VM_PHYSSEG_STRAT == VM_PSTRAT_BIGFIRST)
810 {
811 int x;
812 /* sort by largest segment first */
813 for (lcv = 0 ; lcv < vm_nphysmem ; lcv++)
814 if ((end - start) >
815 (VM_PHYSMEM_PTR(lcv)->end - VM_PHYSMEM_PTR(lcv)->start))
816 break;
817 ps = VM_PHYSMEM_PTR(lcv);
818 /* move back other entries, if necessary ... */
819 for (x = vm_nphysmem ; x > lcv ; x--)
820 /* structure copy */
821 VM_PHYSMEM_PTR_SWAP(x, x - 1);
822 }
823 #else
824 panic("uvm_page_physload: unknown physseg strategy selected!");
825 #endif
826
827 ps->start = start;
828 ps->end = end;
829 ps->avail_start = avail_start;
830 ps->avail_end = avail_end;
831
832 ps->pgs = pgs;
833
834 vm_nphysmem++;
835
836 if (psp != NULL)
837 *psp = lcv;
838
839 return true;
840 }
841
842 /*
843 * when VM_PHYSSEG_MAX is 1, we can simplify these functions
844 */
845
846 #if VM_PHYSSEG_MAX == 1
847 static inline int vm_physseg_find_contig(struct uvm_physseg *, int, paddr_t, psize_t *);
848 #elif (VM_PHYSSEG_STRAT == VM_PSTRAT_BSEARCH)
849 static inline int vm_physseg_find_bsearch(struct uvm_physseg *, int, paddr_t, psize_t *);
850 #else
851 static inline int vm_physseg_find_linear(struct uvm_physseg *, int, paddr_t, psize_t *);
852 #endif
853
854 /*
855 * vm_physseg_find: find vm_physseg structure that belongs to a PA
856 */
857 int
858 uvm_physseg_find(paddr_t pframe, psize_t *offp)
859 {
860
861 #if VM_PHYSSEG_MAX == 1
862 return vm_physseg_find_contig(vm_physmem, vm_nphysseg, pframe, offp);
863 #elif (VM_PHYSSEG_STRAT == VM_PSTRAT_BSEARCH)
864 return vm_physseg_find_bsearch(vm_physmem, vm_nphysseg, pframe, offp);
865 #else
866 return vm_physseg_find_linear(vm_physmem, vm_nphysseg, pframe, offp);
867 #endif
868 }
869
870 #if VM_PHYSSEG_MAX == 1
871 static inline int
872 vm_physseg_find_contig(struct uvm_physseg *segs, int nsegs, paddr_t pframe, psize_t *offp)
873 {
874
875 /* 'contig' case */
876 if (pframe >= segs[0].start && pframe < segs[0].end) {
877 if (offp)
878 *offp = pframe - segs[0].start;
879 return(0);
880 }
881 return(-1);
882 }
883
884 #elif (VM_PHYSSEG_STRAT == VM_PSTRAT_BSEARCH)
885
886 static inline int
887 vm_physseg_find_bsearch(struct uvm_physseg *segs, int nsegs, paddr_t pframe, psize_t *offp)
888 {
889 /* binary search for it */
890 int start, len, guess;
891
892 /*
893 * if try is too large (thus target is less than try) we reduce
894 * the length to trunc(len/2) [i.e. everything smaller than "try"]
895 *
896 * if the try is too small (thus target is greater than try) then
897 * we set the new start to be (try + 1). this means we need to
898 * reduce the length to (round(len/2) - 1).
899 *
900 * note "adjust" below which takes advantage of the fact that
901 * (round(len/2) - 1) == trunc((len - 1) / 2)
902 * for any value of len we may have
903 */
904
905 for (start = 0, len = nsegs ; len != 0 ; len = len / 2) {
906 guess = start + (len / 2); /* try in the middle */
907
908 /* start past our try? */
909 if (pframe >= segs[guess].start) {
910 /* was try correct? */
911 if (pframe < segs[guess].end) {
912 if (offp)
913 *offp = pframe - segs[guess].start;
914 return guess; /* got it */
915 }
916 start = guess + 1; /* next time, start here */
917 len--; /* "adjust" */
918 } else {
919 /*
920 * pframe before try, just reduce length of
921 * region, done in "for" loop
922 */
923 }
924 }
925 return(-1);
926 }
927
928 #else
929
930 static inline int
931 vm_physseg_find_linear(struct uvm_physseg *segs, int nsegs, paddr_t pframe, psize_t *offp)
932 {
933 /* linear search for it */
934 int lcv;
935
936 for (lcv = 0; lcv < nsegs; lcv++) {
937 if (pframe >= segs[lcv].start &&
938 pframe < segs[lcv].end) {
939 if (offp)
940 *offp = pframe - segs[lcv].start;
941 return(lcv); /* got it */
942 }
943 }
944 return(-1);
945 }
946 #endif
947 #endif /* UVM_HOTPLUG */
948
949 bool
950 uvm_physseg_valid_p(uvm_physseg_t upm)
951 {
952 struct uvm_physseg *ps;
953
954 if (upm == UVM_PHYSSEG_TYPE_INVALID ||
955 upm == UVM_PHYSSEG_TYPE_INVALID_EMPTY ||
956 upm == UVM_PHYSSEG_TYPE_INVALID_OVERFLOW)
957 return false;
958
959 /*
960 * This is the delicate init dance -
961 * needs to go with the dance.
962 */
963 if (uvm.page_init_done != true)
964 return true;
965
966 ps = HANDLE_TO_PHYSSEG_NODE(upm);
967
968 /* Extra checks needed only post uvm_page_init() */
969 if (ps->pgs == NULL)
970 return false;
971
972 /* XXX: etc. */
973
974 return true;
975
976 }
977
978 /*
979 * Boot protocol dictates that these must be able to return partially
980 * initialised segments.
981 */
982 paddr_t
983 uvm_physseg_get_start(uvm_physseg_t upm)
984 {
985 if (uvm_physseg_valid_p(upm) == false)
986 return (paddr_t) -1;
987
988 return HANDLE_TO_PHYSSEG_NODE(upm)->start;
989 }
990
991 paddr_t
992 uvm_physseg_get_end(uvm_physseg_t upm)
993 {
994 if (uvm_physseg_valid_p(upm) == false)
995 return (paddr_t) -1;
996
997 return HANDLE_TO_PHYSSEG_NODE(upm)->end;
998 }
999
1000 paddr_t
1001 uvm_physseg_get_avail_start(uvm_physseg_t upm)
1002 {
1003 if (uvm_physseg_valid_p(upm) == false)
1004 return (paddr_t) -1;
1005
1006 return HANDLE_TO_PHYSSEG_NODE(upm)->avail_start;
1007 }
1008
1009 #if defined(UVM_PHYSSEG_LEGACY)
1010 void
1011 uvm_physseg_set_avail_start(uvm_physseg_t upm, paddr_t avail_start)
1012 {
1013 struct uvm_physseg *ps = HANDLE_TO_PHYSSEG_NODE(upm);
1014
1015 #if defined(DIAGNOSTIC)
1016 paddr_t avail_end;
1017 avail_end = uvm_physseg_get_avail_end(upm);
1018 KASSERT(uvm_physseg_valid_p(upm));
1019 KASSERT(avail_start < avail_end && avail_start >= ps->start);
1020 #endif
1021
1022 ps->avail_start = avail_start;
1023 }
1024
1025 void
1026 uvm_physseg_set_avail_end(uvm_physseg_t upm, paddr_t avail_end)
1027 {
1028 struct uvm_physseg *ps = HANDLE_TO_PHYSSEG_NODE(upm);
1029
1030 #if defined(DIAGNOSTIC)
1031 paddr_t avail_start;
1032 avail_start = uvm_physseg_get_avail_start(upm);
1033 KASSERT(uvm_physseg_valid_p(upm));
1034 KASSERT(avail_end > avail_start && avail_end <= ps->end);
1035 #endif
1036
1037 ps->avail_end = avail_end;
1038 }
1039
1040 #endif /* UVM_PHYSSEG_LEGACY */
1041
1042 paddr_t
1043 uvm_physseg_get_avail_end(uvm_physseg_t upm)
1044 {
1045 if (uvm_physseg_valid_p(upm) == false)
1046 return (paddr_t) -1;
1047
1048 return HANDLE_TO_PHYSSEG_NODE(upm)->avail_end;
1049 }
1050
1051 struct vm_page *
1052 uvm_physseg_get_pg(uvm_physseg_t upm, paddr_t idx)
1053 {
1054 KASSERT(uvm_physseg_valid_p(upm));
1055 return &HANDLE_TO_PHYSSEG_NODE(upm)->pgs[idx];
1056 }
1057
1058 #ifdef __HAVE_PMAP_PHYSSEG
1059 struct pmap_physseg *
1060 uvm_physseg_get_pmseg(uvm_physseg_t upm)
1061 {
1062 KASSERT(uvm_physseg_valid_p(upm));
1063 return &(HANDLE_TO_PHYSSEG_NODE(upm)->pmseg);
1064 }
1065 #endif
1066
1067 int
1068 uvm_physseg_get_free_list(uvm_physseg_t upm)
1069 {
1070 KASSERT(uvm_physseg_valid_p(upm));
1071 return HANDLE_TO_PHYSSEG_NODE(upm)->free_list;
1072 }
1073
1074 u_int
1075 uvm_physseg_get_start_hint(uvm_physseg_t upm)
1076 {
1077 KASSERT(uvm_physseg_valid_p(upm));
1078 return HANDLE_TO_PHYSSEG_NODE(upm)->start_hint;
1079 }
1080
1081 bool
1082 uvm_physseg_set_start_hint(uvm_physseg_t upm, u_int start_hint)
1083 {
1084 if (uvm_physseg_valid_p(upm) == false)
1085 return false;
1086
1087 HANDLE_TO_PHYSSEG_NODE(upm)->start_hint = start_hint;
1088 return true;
1089 }
1090
1091 void
1092 uvm_physseg_init_seg(uvm_physseg_t upm, struct vm_page *pgs)
1093 {
1094 psize_t i;
1095 psize_t n;
1096 paddr_t paddr;
1097 struct uvm_physseg *seg;
1098 struct vm_page *pg;
1099
1100 KASSERT(upm != UVM_PHYSSEG_TYPE_INVALID && pgs != NULL);
1101
1102 seg = HANDLE_TO_PHYSSEG_NODE(upm);
1103 KASSERT(seg != NULL);
1104 KASSERT(seg->pgs == NULL);
1105
1106 n = seg->end - seg->start;
1107 seg->pgs = pgs;
1108
1109 /* init and free vm_pages (we've already zeroed them) */
1110 paddr = ctob(seg->start);
1111 for (i = 0 ; i < n ; i++, paddr += PAGE_SIZE) {
1112 pg = &seg->pgs[i];
1113 pg->phys_addr = paddr;
1114 #ifdef __HAVE_VM_PAGE_MD
1115 VM_MDPAGE_INIT(pg);
1116 #endif
1117 if (atop(paddr) >= seg->avail_start &&
1118 atop(paddr) < seg->avail_end) {
1119 uvmexp.npages++;
1120 /* add page to free pool */
1121 uvm_page_set_freelist(pg,
1122 uvm_page_lookup_freelist(pg));
1123 /* Disable LOCKDEBUG: too many and too early. */
1124 mutex_init(&pg->interlock, MUTEX_NODEBUG, IPL_NONE);
1125 uvm_pagefree(pg);
1126 }
1127 }
1128 }
1129
1130 void
1131 uvm_physseg_seg_chomp_slab(uvm_physseg_t upm, struct vm_page *pgs, size_t n)
1132 {
1133 struct uvm_physseg *seg = HANDLE_TO_PHYSSEG_NODE(upm);
1134
1135 /* max number of pre-boot unplug()s allowed */
1136 #define UVM_PHYSSEG_BOOT_UNPLUG_MAX VM_PHYSSEG_MAX
1137
1138 static char btslab_ex_storage[EXTENT_FIXED_STORAGE_SIZE(UVM_PHYSSEG_BOOT_UNPLUG_MAX)];
1139
1140 if (__predict_false(uvm.page_init_done == false)) {
1141 seg->ext = extent_create("Boot time slab", (u_long) pgs, (u_long) (pgs + n),
1142 (void *)btslab_ex_storage, sizeof(btslab_ex_storage), 0);
1143 } else {
1144 seg->ext = extent_create("Hotplug slab", (u_long) pgs, (u_long) (pgs + n), NULL, 0, 0);
1145 }
1146
1147 KASSERT(seg->ext != NULL);
1148
1149 }
1150
1151 struct vm_page *
1152 uvm_physseg_seg_alloc_from_slab(uvm_physseg_t upm, size_t pages)
1153 {
1154 int err;
1155 struct uvm_physseg *seg;
1156 struct vm_page *pgs = NULL;
1157
1158 KASSERT(pages > 0);
1159
1160 seg = HANDLE_TO_PHYSSEG_NODE(upm);
1161
1162 if (__predict_false(seg->ext == NULL)) {
1163 /*
1164 * This is a situation unique to boot time.
1165 * It shouldn't happen at any point other than from
1166 * the first uvm_page.c:uvm_page_init() call
1167 * Since we're in a loop, we can get away with the
1168 * below.
1169 */
1170 KASSERT(uvm.page_init_done != true);
1171
1172 uvm_physseg_t upmp = uvm_physseg_get_prev(upm);
1173 KASSERT(upmp != UVM_PHYSSEG_TYPE_INVALID);
1174
1175 seg->ext = HANDLE_TO_PHYSSEG_NODE(upmp)->ext;
1176
1177 KASSERT(seg->ext != NULL);
1178 }
1179
1180 /* We allocate enough for this segment */
1181 err = extent_alloc(seg->ext, sizeof(*pgs) * pages, 1, 0, EX_BOUNDZERO, (u_long *)&pgs);
1182
1183 if (err != 0) {
1184 #ifdef DEBUG
1185 printf("%s: extent_alloc failed with error: %d \n",
1186 __func__, err);
1187 #endif
1188 }
1189
1190 return pgs;
1191 }
1192
1193 /*
1194 * uvm_page_physload: load physical memory into VM system
1195 *
1196 * => all args are PFs
1197 * => all pages in start/end get vm_page structures
1198 * => areas marked by avail_start/avail_end get added to the free page pool
1199 * => we are limited to VM_PHYSSEG_MAX physical memory segments
1200 */
1201
1202 uvm_physseg_t
1203 uvm_page_physload(paddr_t start, paddr_t end, paddr_t avail_start,
1204 paddr_t avail_end, int free_list)
1205 {
1206 struct uvm_physseg *ps;
1207 uvm_physseg_t upm;
1208
1209 if (__predict_true(uvm.page_init_done == true))
1210 panic("%s: unload attempted after uvm_page_init()\n", __func__);
1211 if (uvmexp.pagesize == 0)
1212 panic("uvm_page_physload: page size not set!");
1213 if (free_list >= VM_NFREELIST || free_list < VM_FREELIST_DEFAULT)
1214 panic("uvm_page_physload: bad free list %d", free_list);
1215 if (start >= end)
1216 panic("uvm_page_physload: start >= end");
1217
1218 if (uvm_physseg_plug(start, end - start, &upm) == false) {
1219 panic("uvm_physseg_plug() failed at boot.");
1220 /* NOTREACHED */
1221 return UVM_PHYSSEG_TYPE_INVALID; /* XXX: correct type */
1222 }
1223
1224 ps = HANDLE_TO_PHYSSEG_NODE(upm);
1225
1226 /* Legacy */
1227 ps->avail_start = avail_start;
1228 ps->avail_end = avail_end;
1229
1230 ps->free_list = free_list; /* XXX: */
1231
1232
1233 return upm;
1234 }
1235
1236 bool
1237 uvm_physseg_unplug(paddr_t pfn, size_t pages)
1238 {
1239 uvm_physseg_t upm;
1240 paddr_t off = 0, start __diagused, end;
1241 struct uvm_physseg *seg;
1242
1243 upm = uvm_physseg_find(pfn, &off);
1244
1245 if (!uvm_physseg_valid_p(upm)) {
1246 printf("%s: Tried to unplug from unknown offset\n", __func__);
1247 return false;
1248 }
1249
1250 seg = HANDLE_TO_PHYSSEG_NODE(upm);
1251
1252 start = uvm_physseg_get_start(upm);
1253 end = uvm_physseg_get_end(upm);
1254
1255 if (end < (pfn + pages)) {
1256 printf("%s: Tried to unplug oversized span \n", __func__);
1257 return false;
1258 }
1259
1260 KASSERT(pfn == start + off); /* sanity */
1261
1262 if (__predict_true(uvm.page_init_done == true)) {
1263 /* XXX: KASSERT() that seg->pgs[] are not on any uvm lists */
1264 if (extent_free(seg->ext, (u_long)(seg->pgs + off), sizeof(struct vm_page) * pages, EX_MALLOCOK | EX_NOWAIT) != 0)
1265 return false;
1266 }
1267
1268 if (off == 0 && (pfn + pages) == end) {
1269 #if defined(UVM_HOTPLUG) /* rbtree implementation */
1270 int segcount = 0;
1271 struct uvm_physseg *current_ps;
1272 /* Complete segment */
1273 if (uvm_physseg_graph.nentries == 1)
1274 panic("%s: out of memory!", __func__);
1275
1276 if (__predict_true(uvm.page_init_done == true)) {
1277 RB_TREE_FOREACH(current_ps, &(uvm_physseg_graph.rb_tree)) {
1278 if (seg->ext == current_ps->ext)
1279 segcount++;
1280 }
1281 KASSERT(segcount > 0);
1282
1283 if (segcount == 1) {
1284 extent_destroy(seg->ext);
1285 }
1286
1287 /*
1288 * We assume that the unplug will succeed from
1289 * this point onwards
1290 */
1291 uvmexp.npages -= (int) pages;
1292 }
1293
1294 rb_tree_remove_node(&(uvm_physseg_graph.rb_tree), upm);
1295 memset(seg, 0, sizeof(struct uvm_physseg));
1296 uvm_physseg_free(seg, sizeof(struct uvm_physseg));
1297 uvm_physseg_graph.nentries--;
1298 #else /* UVM_HOTPLUG */
1299 int x;
1300 if (vm_nphysmem == 1)
1301 panic("uvm_page_physget: out of memory!");
1302 vm_nphysmem--;
1303 for (x = upm ; x < vm_nphysmem ; x++)
1304 /* structure copy */
1305 VM_PHYSMEM_PTR_SWAP(x, x + 1);
1306 #endif /* UVM_HOTPLUG */
1307 /* XXX: KASSERT() that seg->pgs[] are not on any uvm lists */
1308 return true;
1309 }
1310
1311 if (off > 0 &&
1312 (pfn + pages) < end) {
1313 #if defined(UVM_HOTPLUG) /* rbtree implementation */
1314 /* middle chunk - need a new segment */
1315 struct uvm_physseg *ps, *current_ps;
1316 ps = uvm_physseg_alloc(sizeof (struct uvm_physseg));
1317 if (ps == NULL) {
1318 printf("%s: Unable to allocated new fragment vm_physseg \n",
1319 __func__);
1320 return false;
1321 }
1322
1323 /* Remove middle chunk */
1324 if (__predict_true(uvm.page_init_done == true)) {
1325 KASSERT(seg->ext != NULL);
1326 ps->ext = seg->ext;
1327
1328 /* XXX: KASSERT() that seg->pgs[] are not on any uvm lists */
1329 /*
1330 * We assume that the unplug will succeed from
1331 * this point onwards
1332 */
1333 uvmexp.npages -= (int) pages;
1334 }
1335
1336 ps->start = pfn + pages;
1337 ps->avail_start = ps->start; /* XXX: Legacy */
1338
1339 ps->end = seg->end;
1340 ps->avail_end = ps->end; /* XXX: Legacy */
1341
1342 seg->end = pfn;
1343 seg->avail_end = seg->end; /* XXX: Legacy */
1344
1345
1346 /*
1347 * The new pgs array points to the beginning of the
1348 * tail fragment.
1349 */
1350 if (__predict_true(uvm.page_init_done == true))
1351 ps->pgs = seg->pgs + off + pages;
1352
1353 current_ps = rb_tree_insert_node(&(uvm_physseg_graph.rb_tree), ps);
1354 if (current_ps != ps) {
1355 panic("uvm_page_physload: Duplicate address range detected!");
1356 }
1357 uvm_physseg_graph.nentries++;
1358 #else /* UVM_HOTPLUG */
1359 panic("%s: can't unplug() from the middle of a segment without"
1360 " UVM_HOTPLUG\n", __func__);
1361 /* NOTREACHED */
1362 #endif /* UVM_HOTPLUG */
1363 return true;
1364 }
1365
1366 if (off == 0 && (pfn + pages) < end) {
1367 /* Remove front chunk */
1368 if (__predict_true(uvm.page_init_done == true)) {
1369 /* XXX: KASSERT() that seg->pgs[] are not on any uvm lists */
1370 /*
1371 * We assume that the unplug will succeed from
1372 * this point onwards
1373 */
1374 uvmexp.npages -= (int) pages;
1375 }
1376
1377 /* Truncate */
1378 seg->start = pfn + pages;
1379 seg->avail_start = seg->start; /* XXX: Legacy */
1380
1381 /*
1382 * Move the pgs array start to the beginning of the
1383 * tail end.
1384 */
1385 if (__predict_true(uvm.page_init_done == true))
1386 seg->pgs += pages;
1387
1388 return true;
1389 }
1390
1391 if (off > 0 && (pfn + pages) == end) {
1392 /* back chunk */
1393
1394
1395 /* Truncate! */
1396 seg->end = pfn;
1397 seg->avail_end = seg->end; /* XXX: Legacy */
1398
1399 uvmexp.npages -= (int) pages;
1400
1401 return true;
1402 }
1403
1404 printf("%s: Tried to unplug unknown range \n", __func__);
1405
1406 return false;
1407 }
1408