uvm_physseg.c revision 1.2 1 /* $NetBSD: uvm_physseg.c,v 1.2 2016/12/22 08:15:20 cherry Exp $ */
2
3 /*
4 * Copyright (c) 1997 Charles D. Cranor and Washington University.
5 * Copyright (c) 1991, 1993, The Regents of the University of California.
6 *
7 * All rights reserved.
8 *
9 * This code is derived from software contributed to Berkeley by
10 * The Mach Operating System project at Carnegie-Mellon University.
11 *
12 * Redistribution and use in source and binary forms, with or without
13 * modification, are permitted provided that the following conditions
14 * are met:
15 * 1. Redistributions of source code must retain the above copyright
16 * notice, this list of conditions and the following disclaimer.
17 * 2. Redistributions in binary form must reproduce the above copyright
18 * notice, this list of conditions and the following disclaimer in the
19 * documentation and/or other materials provided with the distribution.
20 * 3. Neither the name of the University nor the names of its contributors
21 * may be used to endorse or promote products derived from this software
22 * without specific prior written permission.
23 *
24 * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND
25 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
26 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
27 * ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE
28 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
29 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
30 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
31 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
32 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
33 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
34 * SUCH DAMAGE.
35 *
36 * @(#)vm_page.h 7.3 (Berkeley) 4/21/91
37 * from: Id: uvm_page.h,v 1.1.2.6 1998/02/04 02:31:42 chuck Exp
38 *
39 *
40 * Copyright (c) 1987, 1990 Carnegie-Mellon University.
41 * All rights reserved.
42 *
43 * Permission to use, copy, modify and distribute this software and
44 * its documentation is hereby granted, provided that both the copyright
45 * notice and this permission notice appear in all copies of the
46 * software, derivative works or modified versions, and any portions
47 * thereof, and that both notices appear in supporting documentation.
48 *
49 * CARNEGIE MELLON ALLOWS FREE USE OF THIS SOFTWARE IN ITS "AS IS"
50 * CONDITION. CARNEGIE MELLON DISCLAIMS ANY LIABILITY OF ANY KIND
51 * FOR ANY DAMAGES WHATSOEVER RESULTING FROM THE USE OF THIS SOFTWARE.
52 *
53 * Carnegie Mellon requests users of this software to return to
54 *
55 * Software Distribution Coordinator or Software.Distribution (at) CS.CMU.EDU
56 * School of Computer Science
57 * Carnegie Mellon University
58 * Pittsburgh PA 15213-3890
59 *
60 * any improvements or extensions that they make and grant Carnegie the
61 * rights to redistribute these changes.
62 */
63
64 /*
65 * Consolidated API from uvm_page.c and others.
66 * Consolidated and designed by Cherry G. Mathew <cherry (at) zyx.in>
67 * rbtree(3) backing implementation by:
68 * Santhosh N. Raju <santhosh.raju (at) gmail.com>
69 */
70
71 #ifdef _KERNEL_OPT
72 #include "opt_uvm.h"
73 #endif
74
75 #include <sys/param.h>
76 #include <sys/types.h>
77 #include <sys/extent.h>
78 #include <sys/kmem.h>
79
80 #include <uvm/uvm.h>
81 #include <uvm/uvm_page.h>
82 #include <uvm/uvm_param.h>
83 #include <uvm/uvm_pdpolicy.h>
84 #include <uvm/uvm_physseg.h>
85
86 /*
87 * uvm_physseg: describes one segment of physical memory
88 */
89 struct uvm_physseg {
90 struct rb_node rb_node; /* tree information */
91 paddr_t start; /* PF# of first page in segment */
92 paddr_t end; /* (PF# of last page in segment) + 1 */
93 paddr_t avail_start; /* PF# of first free page in segment */
94 paddr_t avail_end; /* (PF# of last free page in segment) +1 */
95 struct vm_page *pgs; /* vm_page structures (from start) */
96 struct extent *ext; /* extent(9) structure to manage pgs[] */
97 int free_list; /* which free list they belong on */
98 u_int start_hint; /* start looking for free pages here */
99 /* protected by uvm_fpageqlock */
100 #ifdef __HAVE_PMAP_PHYSSEG
101 struct pmap_physseg pmseg; /* pmap specific (MD) data */
102 #endif
103 };
104
105 /*
106 * These functions are reserved for uvm(9) internal use and are not
107 * exported in the header file uvm_physseg.h
108 *
109 * Thus they are redefined here.
110 */
111 void uvm_physseg_init_seg(uvm_physseg_t, struct vm_page *);
112 void uvm_physseg_seg_chomp_slab(uvm_physseg_t, struct vm_page *, size_t);
113
114 /* returns a pgs array */
115 struct vm_page *uvm_physseg_seg_alloc_from_slab(uvm_physseg_t, size_t);
116
117 #if defined(UVM_HOTPLUG) /* rbtree impementation */
118
119 #define HANDLE_TO_PHYSSEG_NODE(h) ((struct uvm_physseg *)(h))
120 #define PHYSSEG_NODE_TO_HANDLE(u) ((uvm_physseg_t)(u))
121
122 struct uvm_physseg_graph {
123 struct rb_tree rb_tree; /* Tree for entries */
124 int nentries; /* Number of entries */
125 };
126
127 static struct uvm_physseg_graph uvm_physseg_graph;
128
129 /*
130 * Note on kmem(9) allocator usage:
131 * We take the conservative approach that plug/unplug are allowed to
132 * fail in high memory stress situations.
133 *
134 * We want to avoid re-entrant situations in which one plug/unplug
135 * operation is waiting on a previous one to complete, since this
136 * makes the design more complicated than necessary.
137 *
138 * We may review this and change its behaviour, once the use cases
139 * become more obvious.
140 */
141
142 /*
143 * Special alloc()/free() functions for boot time support:
144 * We assume that alloc() at boot time is only for new 'vm_physseg's
145 * This allows us to use a static array for memory allocation at boot
146 * time. Thus we avoid using kmem(9) which is not ready at this point
147 * in boot.
148 *
149 * After kmem(9) is ready, we use it. We currently discard any free()s
150 * to this static array, since the size is small enough to be a
151 * trivial waste on all architectures we run on.
152 */
153
154 static size_t nseg = 0;
155 static struct uvm_physseg uvm_physseg[VM_PHYSSEG_MAX];
156
157 static void *
158 uvm_physseg_alloc(size_t sz)
159 {
160 /*
161 * During boot time, we only support allocating vm_physseg
162 * entries from the static array.
163 * We need to assert for this.
164 */
165
166 if (__predict_false(uvm.page_init_done == false)) {
167 if (sz % sizeof(struct uvm_physseg))
168 panic("%s: tried to alloc size other than multiple"
169 "of struct uvm_physseg at boot\n", __func__);
170
171 size_t n = sz / sizeof(struct uvm_physseg);
172 nseg += n;
173
174 KASSERT(nseg > 0 && nseg <= VM_PHYSSEG_MAX);
175
176 return &uvm_physseg[nseg - n];
177 }
178
179 return kmem_zalloc(sz, KM_NOSLEEP);
180 }
181
182 static void
183 uvm_physseg_free(void *p, size_t sz)
184 {
185 /*
186 * This is a bit tricky. We do allow simulation of free()
187 * during boot (for eg: when MD code is "steal"ing memory,
188 * and the segment has been exhausted (and thus needs to be
189 * free() - ed.
190 * free() also complicates things because we leak the
191 * free(). Therefore calling code can't assume that free()-ed
192 * memory is available for alloc() again, at boot time.
193 *
194 * Thus we can't explicitly disallow free()s during
195 * boot time. However, the same restriction for alloc()
196 * applies to free(). We only allow uvm_physseg related free()s
197 * via this function during boot time.
198 */
199
200 if (__predict_false(uvm.page_init_done == false)) {
201 if (sz % sizeof(struct uvm_physseg))
202 panic("%s: tried to free size other than struct uvm_physseg"
203 "at boot\n", __func__);
204
205 }
206
207 /*
208 * Could have been in a single if(){} block - split for
209 * clarity
210 */
211
212 if ((struct uvm_physseg *)p >= uvm_physseg &&
213 (struct uvm_physseg *)p < (uvm_physseg + VM_PHYSSEG_MAX)) {
214 if (sz % sizeof(struct uvm_physseg))
215 panic("%s: tried to free() other than struct uvm_physseg"
216 "from static array\n", __func__);
217
218 if ((sz / sizeof(struct uvm_physseg)) >= VM_PHYSSEG_MAX)
219 panic("%s: tried to free() the entire static array!", __func__);
220 return; /* Nothing to free */
221 }
222
223 kmem_free(p, sz);
224 }
225
226 /* XXX: Multi page size */
227 bool
228 uvm_physseg_plug(paddr_t pfn, size_t pages, uvm_physseg_t *psp)
229 {
230 int preload;
231 size_t slabpages;
232 struct uvm_physseg *ps, *current_ps = NULL;
233 struct vm_page *slab = NULL, *pgs = NULL;
234
235 #ifdef DEBUG
236 paddr_t off;
237 uvm_physseg_t upm;
238 upm = uvm_physseg_find(pfn, &off);
239
240 ps = HANDLE_TO_PHYSSEG_NODE(upm);
241
242 if (ps != NULL) /* XXX; do we allow "update" plugs ? */
243 return false;
244 #endif
245
246 /*
247 * do we have room?
248 */
249
250 ps = uvm_physseg_alloc(sizeof (struct uvm_physseg));
251 if (ps == NULL) {
252 printf("uvm_page_physload: unable to load physical memory "
253 "segment\n");
254 printf("\t%d segments allocated, ignoring 0x%"PRIxPADDR" -> 0x%"PRIxPADDR"\n",
255 VM_PHYSSEG_MAX, pfn, pfn + pages + 1);
256 printf("\tincrease VM_PHYSSEG_MAX\n");
257 return false;
258 }
259
260 /* span init */
261 ps->start = pfn;
262 ps->end = pfn + pages;
263
264 /*
265 * XXX: Ugly hack because uvmexp.npages accounts for only
266 * those pages in the segment included below as well - this
267 * should be legacy and removed.
268 */
269
270 ps->avail_start = ps->start;
271 ps->avail_end = ps->end;
272
273 /*
274 * check to see if this is a "preload" (i.e. uvm_page_init hasn't been
275 * called yet, so kmem is not available).
276 */
277
278 preload = 1; /* We are going to assume it is a preload */
279
280 RB_TREE_FOREACH(current_ps, &(uvm_physseg_graph.rb_tree)) {
281 /* If there are non NULL pages then we are not in a preload */
282 if (current_ps->pgs != NULL) {
283 preload = 0;
284 /* Try to scavenge from earlier unplug()s. */
285 pgs = uvm_physseg_seg_alloc_from_slab(current_ps, pages);
286
287 if (pgs != NULL) {
288 break;
289 }
290 }
291 }
292
293
294 /*
295 * if VM is already running, attempt to kmem_alloc vm_page structures
296 */
297
298 if (!preload) {
299 if (pgs == NULL) { /* Brand new */
300 /* Iteratively try alloc down from uvmexp.npages */
301 for (slabpages = (size_t) uvmexp.npages; slabpages >= pages; slabpages--) {
302 slab = kmem_zalloc(sizeof *pgs * (long unsigned int)slabpages, KM_NOSLEEP);
303 if (slab != NULL)
304 break;
305 }
306
307 if (slab == NULL) {
308 uvm_physseg_free(ps, sizeof(struct uvm_physseg));
309 return false;
310 }
311
312 uvm_physseg_seg_chomp_slab(ps, slab, (size_t) slabpages);
313 /* We allocate enough for this plug */
314 pgs = uvm_physseg_seg_alloc_from_slab(ps, pages);
315
316 if (pgs == NULL) {
317 printf("unable to uvm_physseg_seg_alloc_from_slab() from backend\n");
318 return false;
319 }
320 } else {
321 /* Reuse scavenged extent */
322 ps->ext = current_ps->ext;
323 }
324
325 physmem += pages;
326 uvmpdpol_reinit();
327 } else { /* Boot time - see uvm_page.c:uvm_page_init() */
328 pgs = NULL;
329 ps->pgs = pgs;
330 }
331
332 /*
333 * now insert us in the proper place in uvm_physseg_graph.rb_tree
334 */
335
336 current_ps = rb_tree_insert_node(&(uvm_physseg_graph.rb_tree), ps);
337 if (current_ps != ps) {
338 panic("uvm_page_physload: Duplicate address range detected!");
339 }
340 uvm_physseg_graph.nentries++;
341
342 /*
343 * uvm_pagefree() requires the PHYS_TO_VM_PAGE(pgs[i]) on the
344 * newly allocated pgs[] to return the correct value. This is
345 * a bit of a chicken and egg problem, since it needs
346 * uvm_physseg_find() to succeed. For this, the node needs to
347 * be inserted *before* uvm_physseg_init_seg() happens.
348 *
349 * During boot, this happens anyway, since
350 * uvm_physseg_init_seg() is called later on and separately
351 * from uvm_page.c:uvm_page_init().
352 * In the case of hotplug we need to ensure this.
353 */
354
355 if (__predict_true(!preload))
356 uvm_physseg_init_seg(ps, pgs);
357
358 if (psp != NULL)
359 *psp = ps;
360
361 return true;
362 }
363
364 static int
365 uvm_physseg_compare_nodes(void *ctx, const void *nnode1, const void *nnode2)
366 {
367 const struct uvm_physseg *enode1 = nnode1;
368 const struct uvm_physseg *enode2 = nnode2;
369
370 KASSERT(enode1->start < enode2->start || enode1->start >= enode2->end);
371 KASSERT(enode2->start < enode1->start || enode2->start >= enode1->end);
372
373 if (enode1->start < enode2->start)
374 return -1;
375 if (enode1->start >= enode2->end)
376 return 1;
377 return 0;
378 }
379
380 static int
381 uvm_physseg_compare_key(void *ctx, const void *nnode, const void *pkey)
382 {
383 const struct uvm_physseg *enode = nnode;
384 const paddr_t pa = *(const paddr_t *) pkey;
385
386 if(enode->start <= pa && pa < enode->end)
387 return 0;
388 if (enode->start < pa)
389 return -1;
390 if (enode->end > pa)
391 return 1;
392
393 return 0;
394 }
395
396 static const rb_tree_ops_t uvm_physseg_tree_ops = {
397 .rbto_compare_nodes = uvm_physseg_compare_nodes,
398 .rbto_compare_key = uvm_physseg_compare_key,
399 .rbto_node_offset = offsetof(struct uvm_physseg, rb_node),
400 .rbto_context = NULL
401 };
402
403 /*
404 * uvm_physseg_init: init the physmem
405 *
406 * => physmem unit should not be in use at this point
407 */
408
409 void
410 uvm_physseg_init(void)
411 {
412 rb_tree_init(&(uvm_physseg_graph.rb_tree), &uvm_physseg_tree_ops);
413 uvm_physseg_graph.nentries = 0;
414 }
415
416 uvm_physseg_t
417 uvm_physseg_get_next(uvm_physseg_t upm)
418 {
419 /* next of invalid is invalid, not fatal */
420 if (uvm_physseg_valid_p(upm) == false)
421 return UVM_PHYSSEG_TYPE_INVALID;
422
423 return (uvm_physseg_t) rb_tree_iterate(&(uvm_physseg_graph.rb_tree), upm,
424 RB_DIR_RIGHT);
425 }
426
427 uvm_physseg_t
428 uvm_physseg_get_prev(uvm_physseg_t upm)
429 {
430 /* prev of invalid is invalid, not fatal */
431 if (uvm_physseg_valid_p(upm) == false)
432 return UVM_PHYSSEG_TYPE_INVALID;
433
434 return (uvm_physseg_t) rb_tree_iterate(&(uvm_physseg_graph.rb_tree), upm,
435 RB_DIR_LEFT);
436 }
437
438 uvm_physseg_t
439 uvm_physseg_get_last(void)
440 {
441 return (uvm_physseg_t) RB_TREE_MAX(&(uvm_physseg_graph.rb_tree));
442 }
443
444 uvm_physseg_t
445 uvm_physseg_get_first(void)
446 {
447 return (uvm_physseg_t) RB_TREE_MIN(&(uvm_physseg_graph.rb_tree));
448 }
449
450 paddr_t
451 uvm_physseg_get_highest_frame(void)
452 {
453 struct uvm_physseg *ps =
454 (uvm_physseg_t) RB_TREE_MAX(&(uvm_physseg_graph.rb_tree));
455
456 return ps->end - 1;
457 }
458
459 /*
460 * uvm_page_physunload: unload physical memory and return it to
461 * caller.
462 */
463 bool
464 uvm_page_physunload(uvm_physseg_t upm, int freelist, paddr_t *paddrp)
465 {
466 struct uvm_physseg *seg;
467
468 if (__predict_true(uvm.page_init_done == true))
469 panic("%s: unload attempted after uvm_page_init()\n", __func__);
470
471 seg = HANDLE_TO_PHYSSEG_NODE(upm);
472
473 if (seg->free_list != freelist) {
474 paddrp = NULL;
475 return false;
476 }
477
478 /*
479 * During cold boot, what we're about to unplug hasn't been
480 * put on the uvm freelist, nor has uvmexp.npages been
481 * updated. (This happens in uvm_page.c:uvm_page_init())
482 *
483 * For hotplug, we assume here that the pages being unloaded
484 * here are completely out of sight of uvm (ie; not on any uvm
485 * lists), and that uvmexp.npages has been suitably
486 * decremented before we're called.
487 *
488 * XXX: will avail_end == start if avail_start < avail_end?
489 */
490
491 /* try from front */
492 if (seg->avail_start == seg->start &&
493 seg->avail_start < seg->avail_end) {
494 *paddrp = ctob(seg->avail_start);
495 return uvm_physseg_unplug(seg->avail_start, 1);
496 }
497
498 /* try from rear */
499 if (seg->avail_end == seg->end &&
500 seg->avail_start < seg->avail_end) {
501 *paddrp = ctob(seg->avail_end - 1);
502 return uvm_physseg_unplug(seg->avail_end - 1, 1);
503 }
504
505 return false;
506 }
507
508 bool
509 uvm_page_physunload_force(uvm_physseg_t upm, int freelist, paddr_t *paddrp)
510 {
511 struct uvm_physseg *seg;
512
513 seg = HANDLE_TO_PHYSSEG_NODE(upm);
514
515 if (__predict_true(uvm.page_init_done == true))
516 panic("%s: unload attempted after uvm_page_init()\n", __func__);
517 /* any room in this bank? */
518 if (seg->avail_start >= seg->avail_end) {
519 paddrp = NULL;
520 return false; /* nope */
521 }
522
523 *paddrp = ctob(seg->avail_start);
524
525 /* Always unplug from front */
526 return uvm_physseg_unplug(seg->avail_start, 1);
527 }
528
529
530 /*
531 * vm_physseg_find: find vm_physseg structure that belongs to a PA
532 */
533 uvm_physseg_t
534 uvm_physseg_find(paddr_t pframe, psize_t *offp)
535 {
536 struct uvm_physseg * ps = NULL;
537
538 ps = rb_tree_find_node(&(uvm_physseg_graph.rb_tree), &pframe);
539
540 if(ps != NULL && offp != NULL)
541 *offp = pframe - ps->start;
542
543 return ps;
544 }
545
546 #if defined(PMAP_STEAL_MEMORY)
547 void
548 uvm_physseg_set_avail_start(uvm_physseg_t upm, paddr_t avail_start)
549 {
550 struct uvm_physseg *ps = HANDLE_TO_PHYSSEG_NODE(upm);
551
552 #if defined(DIAGNOSTIC)
553 paddr_t avail_end;
554 avail_end = uvm_physseg_get_avail_end(upm);
555 #endif
556 KASSERT(avail_start < avail_end && avail_start >= ps->start);
557 ps->avail_start = avail_start;
558 }
559 void uvm_physseg_set_avail_end(uvm_physseg_t upm, paddr_t avail_end)
560 {
561 struct uvm_physseg *ps = HANDLE_TO_PHYSSEG_NODE(upm);
562
563 #if defined(DIAGNOSTIC)
564 paddr_t avail_start;
565 avail_start = uvm_physseg_get_avail_start(upm);
566 #endif
567
568 KASSERT(avail_end > avail_start && avail_end <= ps->end);
569
570 ps->avail_end = avail_end;
571 }
572
573 #endif /* PMAP_STEAL_MEMORY */
574 #else /* UVM_HOTPLUG */
575
576 /*
577 * physical memory config is stored in vm_physmem.
578 */
579
580 #define VM_PHYSMEM_PTR(i) (&vm_physmem[i])
581 #if VM_PHYSSEG_MAX == 1
582 #define VM_PHYSMEM_PTR_SWAP(i, j) /* impossible */
583 #else
584 #define VM_PHYSMEM_PTR_SWAP(i, j) \
585 do { vm_physmem[(i)] = vm_physmem[(j)]; } while (0)
586 #endif
587
588 #define HANDLE_TO_PHYSSEG_NODE(h) (VM_PHYSMEM_PTR((int)h))
589 #define PHYSSEG_NODE_TO_HANDLE(u) ((int)((vsize_t) (u - vm_physmem) / sizeof(struct uvm_physseg)))
590
591 static struct uvm_physseg vm_physmem[VM_PHYSSEG_MAX]; /* XXXCDC: uvm.physmem */
592 static int vm_nphysseg = 0; /* XXXCDC: uvm.nphysseg */
593 #define vm_nphysmem vm_nphysseg
594
595 void
596 uvm_physseg_init(void)
597 {
598 /* XXX: Provisioning for rb_tree related init(s) */
599 return;
600 }
601
602 int
603 uvm_physseg_get_next(uvm_physseg_t lcv)
604 {
605 /* next of invalid is invalid, not fatal */
606 if (uvm_physseg_valid_p(lcv) == false)
607 return UVM_PHYSSEG_TYPE_INVALID;
608
609 return (lcv + 1);
610 }
611
612 int
613 uvm_physseg_get_prev(uvm_physseg_t lcv)
614 {
615 /* prev of invalid is invalid, not fatal */
616 if (uvm_physseg_valid_p(lcv) == false)
617 return UVM_PHYSSEG_TYPE_INVALID;
618
619 return (lcv - 1);
620 }
621
622 int
623 uvm_physseg_get_last(void)
624 {
625 return (vm_nphysseg - 1);
626 }
627
628 int
629 uvm_physseg_get_first(void)
630 {
631 return 0;
632 }
633
634 paddr_t
635 uvm_physseg_get_highest_frame(void)
636 {
637 int lcv;
638 paddr_t last = 0;
639 struct uvm_physseg *ps;
640
641 for (lcv = 0; lcv < vm_nphysseg; lcv++) {
642 ps = VM_PHYSMEM_PTR(lcv);
643 if (last < ps->end)
644 last = ps->end;
645 }
646
647 return last;
648 }
649
650
651 static struct vm_page *
652 uvm_post_preload_check(void)
653 {
654 int preload, lcv;
655
656 /*
657 * check to see if this is a "preload" (i.e. uvm_page_init hasn't been
658 * called yet, so kmem is not available).
659 */
660
661 for (lcv = 0 ; lcv < vm_nphysmem ; lcv++) {
662 if (VM_PHYSMEM_PTR(lcv)->pgs)
663 break;
664 }
665 preload = (lcv == vm_nphysmem);
666
667 /*
668 * if VM is already running, attempt to kmem_alloc vm_page structures
669 */
670
671 if (!preload) {
672 panic("Tried to add RAM after uvm_page_init");
673 }
674
675 return NULL;
676 }
677
678 /*
679 * uvm_page_physunload: unload physical memory and return it to
680 * caller.
681 */
682 bool
683 uvm_page_physunload(uvm_physseg_t psi, int freelist, paddr_t *paddrp)
684 {
685 int x;
686 struct uvm_physseg *seg;
687
688 uvm_post_preload_check();
689
690 seg = VM_PHYSMEM_PTR(psi);
691
692 if (seg->free_list != freelist) {
693 paddrp = NULL;
694 return false;
695 }
696
697 /* try from front */
698 if (seg->avail_start == seg->start &&
699 seg->avail_start < seg->avail_end) {
700 *paddrp = ctob(seg->avail_start);
701 seg->avail_start++;
702 seg->start++;
703 /* nothing left? nuke it */
704 if (seg->avail_start == seg->end) {
705 if (vm_nphysmem == 1)
706 panic("uvm_page_physget: out of memory!");
707 vm_nphysmem--;
708 for (x = psi ; x < vm_nphysmem ; x++)
709 /* structure copy */
710 VM_PHYSMEM_PTR_SWAP(x, x + 1);
711 }
712 return (true);
713 }
714
715 /* try from rear */
716 if (seg->avail_end == seg->end &&
717 seg->avail_start < seg->avail_end) {
718 *paddrp = ctob(seg->avail_end - 1);
719 seg->avail_end--;
720 seg->end--;
721 /* nothing left? nuke it */
722 if (seg->avail_end == seg->start) {
723 if (vm_nphysmem == 1)
724 panic("uvm_page_physget: out of memory!");
725 vm_nphysmem--;
726 for (x = psi ; x < vm_nphysmem ; x++)
727 /* structure copy */
728 VM_PHYSMEM_PTR_SWAP(x, x + 1);
729 }
730 return (true);
731 }
732
733 return false;
734 }
735
736 bool
737 uvm_page_physunload_force(uvm_physseg_t psi, int freelist, paddr_t *paddrp)
738 {
739 int x;
740 struct uvm_physseg *seg;
741
742 uvm_post_preload_check();
743
744 seg = VM_PHYSMEM_PTR(psi);
745
746 /* any room in this bank? */
747 if (seg->avail_start >= seg->avail_end) {
748 paddrp = NULL;
749 return false; /* nope */
750 }
751
752 *paddrp = ctob(seg->avail_start);
753 seg->avail_start++;
754 /* truncate! */
755 seg->start = seg->avail_start;
756
757 /* nothing left? nuke it */
758 if (seg->avail_start == seg->end) {
759 if (vm_nphysmem == 1)
760 panic("uvm_page_physget: out of memory!");
761 vm_nphysmem--;
762 for (x = psi ; x < vm_nphysmem ; x++)
763 /* structure copy */
764 VM_PHYSMEM_PTR_SWAP(x, x + 1);
765 }
766 return (true);
767 }
768
769 bool
770 uvm_physseg_plug(paddr_t pfn, size_t pages, uvm_physseg_t *psp)
771 {
772 int lcv;
773 struct vm_page *pgs;
774 struct uvm_physseg *ps;
775
776 #ifdef DEBUG
777 paddr_t off;
778 uvm_physseg_t upm;
779 upm = uvm_physseg_find(pfn, &off);
780
781 if (uvm_physseg_valid_p(upm)) /* XXX; do we allow "update" plugs ? */
782 return false;
783 #endif
784
785 paddr_t start = pfn;
786 paddr_t end = pfn + pages;
787 paddr_t avail_start = start;
788 paddr_t avail_end = end;
789
790 if (uvmexp.pagesize == 0)
791 panic("uvm_page_physload: page size not set!");
792
793 /*
794 * do we have room?
795 */
796
797 if (vm_nphysmem == VM_PHYSSEG_MAX) {
798 printf("uvm_page_physload: unable to load physical memory "
799 "segment\n");
800 printf("\t%d segments allocated, ignoring 0x%llx -> 0x%llx\n",
801 VM_PHYSSEG_MAX, (long long)start, (long long)end);
802 printf("\tincrease VM_PHYSSEG_MAX\n");
803 if (psp != NULL)
804 *psp = UVM_PHYSSEG_TYPE_INVALID_OVERFLOW;
805 return false;
806 }
807
808 /*
809 * check to see if this is a "preload" (i.e. uvm_page_init hasn't been
810 * called yet, so kmem is not available).
811 */
812 pgs = uvm_post_preload_check();
813
814 /*
815 * now insert us in the proper place in vm_physmem[]
816 */
817
818 #if (VM_PHYSSEG_STRAT == VM_PSTRAT_RANDOM)
819 /* random: put it at the end (easy!) */
820 ps = VM_PHYSMEM_PTR(vm_nphysmem);
821 #elif (VM_PHYSSEG_STRAT == VM_PSTRAT_BSEARCH)
822 {
823 int x;
824 /* sort by address for binary search */
825 for (lcv = 0 ; lcv < vm_nphysmem ; lcv++)
826 if (start < VM_PHYSMEM_PTR(lcv)->start)
827 break;
828 ps = VM_PHYSMEM_PTR(lcv);
829 /* move back other entries, if necessary ... */
830 for (x = vm_nphysmem ; x > lcv ; x--)
831 /* structure copy */
832 VM_PHYSMEM_PTR_SWAP(x, x - 1);
833 }
834 #elif (VM_PHYSSEG_STRAT == VM_PSTRAT_BIGFIRST)
835 {
836 int x;
837 /* sort by largest segment first */
838 for (lcv = 0 ; lcv < vm_nphysmem ; lcv++)
839 if ((end - start) >
840 (VM_PHYSMEM_PTR(lcv)->end - VM_PHYSMEM_PTR(lcv)->start))
841 break;
842 ps = VM_PHYSMEM_PTR(lcv);
843 /* move back other entries, if necessary ... */
844 for (x = vm_nphysmem ; x > lcv ; x--)
845 /* structure copy */
846 VM_PHYSMEM_PTR_SWAP(x, x - 1);
847 }
848 #else
849 panic("uvm_page_physload: unknown physseg strategy selected!");
850 #endif
851
852 ps->start = start;
853 ps->end = end;
854 ps->avail_start = avail_start;
855 ps->avail_end = avail_end;
856
857 ps->pgs = pgs;
858
859 vm_nphysmem++;
860
861 if (psp != NULL)
862 *psp = lcv;
863
864 return true;
865 }
866
867 /*
868 * when VM_PHYSSEG_MAX is 1, we can simplify these functions
869 */
870
871 #if VM_PHYSSEG_MAX == 1
872 static inline int vm_physseg_find_contig(struct uvm_physseg *, int, paddr_t, psize_t *);
873 #elif (VM_PHYSSEG_STRAT == VM_PSTRAT_BSEARCH)
874 static inline int vm_physseg_find_bsearch(struct uvm_physseg *, int, paddr_t, psize_t *);
875 #else
876 static inline int vm_physseg_find_linear(struct uvm_physseg *, int, paddr_t, psize_t *);
877 #endif
878
879 /*
880 * vm_physseg_find: find vm_physseg structure that belongs to a PA
881 */
882 int
883 uvm_physseg_find(paddr_t pframe, psize_t *offp)
884 {
885
886 #if VM_PHYSSEG_MAX == 1
887 return vm_physseg_find_contig(vm_physmem, vm_nphysseg, pframe, offp);
888 #elif (VM_PHYSSEG_STRAT == VM_PSTRAT_BSEARCH)
889 return vm_physseg_find_bsearch(vm_physmem, vm_nphysseg, pframe, offp);
890 #else
891 return vm_physseg_find_linear(vm_physmem, vm_nphysseg, pframe, offp);
892 #endif
893 }
894
895 #if VM_PHYSSEG_MAX == 1
896 static inline int
897 vm_physseg_find_contig(struct uvm_physseg *segs, int nsegs, paddr_t pframe, psize_t *offp)
898 {
899
900 /* 'contig' case */
901 if (pframe >= segs[0].start && pframe < segs[0].end) {
902 if (offp)
903 *offp = pframe - segs[0].start;
904 return(0);
905 }
906 return(-1);
907 }
908
909 #elif (VM_PHYSSEG_STRAT == VM_PSTRAT_BSEARCH)
910
911 static inline int
912 vm_physseg_find_bsearch(struct uvm_physseg *segs, int nsegs, paddr_t pframe, psize_t *offp)
913 {
914 /* binary search for it */
915 int start, len, guess;
916
917 /*
918 * if try is too large (thus target is less than try) we reduce
919 * the length to trunc(len/2) [i.e. everything smaller than "try"]
920 *
921 * if the try is too small (thus target is greater than try) then
922 * we set the new start to be (try + 1). this means we need to
923 * reduce the length to (round(len/2) - 1).
924 *
925 * note "adjust" below which takes advantage of the fact that
926 * (round(len/2) - 1) == trunc((len - 1) / 2)
927 * for any value of len we may have
928 */
929
930 for (start = 0, len = nsegs ; len != 0 ; len = len / 2) {
931 guess = start + (len / 2); /* try in the middle */
932
933 /* start past our try? */
934 if (pframe >= segs[guess].start) {
935 /* was try correct? */
936 if (pframe < segs[guess].end) {
937 if (offp)
938 *offp = pframe - segs[guess].start;
939 return guess; /* got it */
940 }
941 start = guess + 1; /* next time, start here */
942 len--; /* "adjust" */
943 } else {
944 /*
945 * pframe before try, just reduce length of
946 * region, done in "for" loop
947 */
948 }
949 }
950 return(-1);
951 }
952
953 #else
954
955 static inline int
956 vm_physseg_find_linear(struct uvm_physseg *segs, int nsegs, paddr_t pframe, psize_t *offp)
957 {
958 /* linear search for it */
959 int lcv;
960
961 for (lcv = 0; lcv < nsegs; lcv++) {
962 if (pframe >= segs[lcv].start &&
963 pframe < segs[lcv].end) {
964 if (offp)
965 *offp = pframe - segs[lcv].start;
966 return(lcv); /* got it */
967 }
968 }
969 return(-1);
970 }
971 #endif
972 #endif /* UVM_HOTPLUG */
973
974 bool
975 uvm_physseg_valid_p(uvm_physseg_t upm)
976 {
977 struct uvm_physseg *ps;
978
979 if (upm == UVM_PHYSSEG_TYPE_INVALID ||
980 upm == UVM_PHYSSEG_TYPE_INVALID_EMPTY ||
981 upm == UVM_PHYSSEG_TYPE_INVALID_OVERFLOW)
982 return false;
983
984 /*
985 * This is the delicate init dance -
986 * needs to go with the dance.
987 */
988 if (uvm.page_init_done != true)
989 return true;
990
991 ps = HANDLE_TO_PHYSSEG_NODE(upm);
992
993 /* Extra checks needed only post uvm_page_init() */
994 if (ps->pgs == NULL)
995 return false;
996
997 /* XXX: etc. */
998
999 return true;
1000
1001 }
1002
1003 /*
1004 * Boot protocol dictates that these must be able to return partially
1005 * initialised segments.
1006 */
1007 paddr_t
1008 uvm_physseg_get_start(uvm_physseg_t upm)
1009 {
1010 if (uvm_physseg_valid_p(upm) == false)
1011 return (paddr_t) -1;
1012
1013 return HANDLE_TO_PHYSSEG_NODE(upm)->start;
1014 }
1015
1016 paddr_t
1017 uvm_physseg_get_end(uvm_physseg_t upm)
1018 {
1019 if (uvm_physseg_valid_p(upm) == false)
1020 return (paddr_t) -1;
1021
1022 return HANDLE_TO_PHYSSEG_NODE(upm)->end;
1023 }
1024
1025 paddr_t
1026 uvm_physseg_get_avail_start(uvm_physseg_t upm)
1027 {
1028 if (uvm_physseg_valid_p(upm) == false)
1029 return (paddr_t) -1;
1030
1031 return HANDLE_TO_PHYSSEG_NODE(upm)->avail_start;
1032 }
1033
1034 paddr_t
1035 uvm_physseg_get_avail_end(uvm_physseg_t upm)
1036 {
1037 if (uvm_physseg_valid_p(upm) == false)
1038 return (paddr_t) -1;
1039
1040 return HANDLE_TO_PHYSSEG_NODE(upm)->avail_end;
1041 }
1042
1043 struct vm_page *
1044 uvm_physseg_get_pg(uvm_physseg_t upm, paddr_t idx)
1045 {
1046 KASSERT(uvm_physseg_valid_p(upm));
1047 return &HANDLE_TO_PHYSSEG_NODE(upm)->pgs[idx];
1048 }
1049
1050 #ifdef __HAVE_PMAP_PHYSSEG
1051 struct pmap_physseg *
1052 uvm_physseg_get_pmseg(uvm_physseg_t upm)
1053 {
1054 KASSERT(uvm_physseg_valid_p(upm));
1055 return &(HANDLE_TO_PHYSSEG_NODE(upm)->pmseg);
1056 }
1057 #endif
1058
1059 int
1060 uvm_physseg_get_free_list(uvm_physseg_t upm)
1061 {
1062 KASSERT(uvm_physseg_valid_p(upm));
1063 return HANDLE_TO_PHYSSEG_NODE(upm)->free_list;
1064 }
1065
1066 u_int
1067 uvm_physseg_get_start_hint(uvm_physseg_t upm)
1068 {
1069 KASSERT(uvm_physseg_valid_p(upm));
1070 return HANDLE_TO_PHYSSEG_NODE(upm)->start_hint;
1071 }
1072
1073 bool
1074 uvm_physseg_set_start_hint(uvm_physseg_t upm, u_int start_hint)
1075 {
1076 if (uvm_physseg_valid_p(upm) == false)
1077 return false;
1078
1079 HANDLE_TO_PHYSSEG_NODE(upm)->start_hint = start_hint;
1080 return true;
1081 }
1082
1083 void
1084 uvm_physseg_init_seg(uvm_physseg_t upm, struct vm_page *pgs)
1085 {
1086 psize_t i;
1087 psize_t n;
1088 paddr_t paddr;
1089 struct uvm_physseg *seg;
1090
1091 KASSERT(upm != UVM_PHYSSEG_TYPE_INVALID && pgs != NULL);
1092
1093 seg = HANDLE_TO_PHYSSEG_NODE(upm);
1094 KASSERT(seg != NULL);
1095 KASSERT(seg->pgs == NULL);
1096
1097 n = seg->end - seg->start;
1098 seg->pgs = pgs;
1099
1100 /* init and free vm_pages (we've already zeroed them) */
1101 paddr = ctob(seg->start);
1102 for (i = 0 ; i < n ; i++, paddr += PAGE_SIZE) {
1103 seg->pgs[i].phys_addr = paddr;
1104 #ifdef __HAVE_VM_PAGE_MD
1105 VM_MDPAGE_INIT(&seg->pgs[i]);
1106 #endif
1107 if (atop(paddr) >= seg->avail_start &&
1108 atop(paddr) < seg->avail_end) {
1109 uvmexp.npages++;
1110 mutex_enter(&uvm_pageqlock);
1111 /* add page to free pool */
1112 uvm_pagefree(&seg->pgs[i]);
1113 mutex_exit(&uvm_pageqlock);
1114 }
1115 }
1116 }
1117
1118 void
1119 uvm_physseg_seg_chomp_slab(uvm_physseg_t upm, struct vm_page *pgs, size_t n)
1120 {
1121 struct uvm_physseg *seg = HANDLE_TO_PHYSSEG_NODE(upm);
1122
1123 /* max number of pre-boot unplug()s allowed */
1124 #define UVM_PHYSSEG_BOOT_UNPLUG_MAX VM_PHYSSEG_MAX
1125
1126 static char btslab_ex_storage[EXTENT_FIXED_STORAGE_SIZE(UVM_PHYSSEG_BOOT_UNPLUG_MAX)];
1127
1128 if (__predict_false(uvm.page_init_done == false)) {
1129 seg->ext = extent_create("Boot time slab", (u_long) pgs, (u_long) (pgs + n),
1130 (void *)btslab_ex_storage, sizeof(btslab_ex_storage), 0);
1131 } else {
1132 seg->ext = extent_create("Hotplug slab", (u_long) pgs, (u_long) (pgs + n), NULL, 0, 0);
1133 }
1134
1135 KASSERT(seg->ext != NULL);
1136
1137 }
1138
1139 struct vm_page *
1140 uvm_physseg_seg_alloc_from_slab(uvm_physseg_t upm, size_t pages)
1141 {
1142 int err;
1143 struct uvm_physseg *seg;
1144 struct vm_page *pgs = NULL;
1145
1146 seg = HANDLE_TO_PHYSSEG_NODE(upm);
1147
1148 KASSERT(pages > 0);
1149
1150 if (__predict_false(seg->ext == NULL)) {
1151 /*
1152 * This is a situation unique to boot time.
1153 * It shouldn't happen at any point other than from
1154 * the first uvm_page.c:uvm_page_init() call
1155 * Since we're in a loop, we can get away with the
1156 * below.
1157 */
1158 KASSERT(uvm.page_init_done != true);
1159
1160 seg->ext = HANDLE_TO_PHYSSEG_NODE(uvm_physseg_get_prev(upm))->ext;
1161
1162 KASSERT(seg->ext != NULL);
1163 }
1164
1165 /* We allocate enough for this segment */
1166 err = extent_alloc(seg->ext, sizeof(*pgs) * pages, 1, 0, EX_BOUNDZERO, (u_long *)&pgs);
1167
1168 if (err != 0) {
1169 #ifdef DEBUG
1170 printf("%s: extent_alloc failed with error: %d \n",
1171 __func__, err);
1172 #endif
1173 }
1174
1175 return pgs;
1176 }
1177
1178 /*
1179 * uvm_page_physload: load physical memory into VM system
1180 *
1181 * => all args are PFs
1182 * => all pages in start/end get vm_page structures
1183 * => areas marked by avail_start/avail_end get added to the free page pool
1184 * => we are limited to VM_PHYSSEG_MAX physical memory segments
1185 */
1186
1187 uvm_physseg_t
1188 uvm_page_physload(paddr_t start, paddr_t end, paddr_t avail_start,
1189 paddr_t avail_end, int free_list)
1190 {
1191 struct uvm_physseg *ps;
1192 uvm_physseg_t upm;
1193
1194 if (__predict_true(uvm.page_init_done == true))
1195 panic("%s: unload attempted after uvm_page_init()\n", __func__);
1196 if (uvmexp.pagesize == 0)
1197 panic("uvm_page_physload: page size not set!");
1198 if (free_list >= VM_NFREELIST || free_list < VM_FREELIST_DEFAULT)
1199 panic("uvm_page_physload: bad free list %d", free_list);
1200 if (start >= end)
1201 panic("uvm_page_physload: start >= end");
1202
1203 if (uvm_physseg_plug(start, end - start, &upm) == false) {
1204 panic("uvm_physseg_plug() failed at boot.");
1205 /* NOTREACHED */
1206 return UVM_PHYSSEG_TYPE_INVALID; /* XXX: correct type */
1207 }
1208
1209 ps = HANDLE_TO_PHYSSEG_NODE(upm);
1210
1211 /* Legacy */
1212 ps->avail_start = avail_start;
1213 ps->avail_end = avail_end;
1214
1215 ps->free_list = free_list; /* XXX: */
1216
1217
1218 return upm;
1219 }
1220
1221 bool
1222 uvm_physseg_unplug(paddr_t pfn, size_t pages)
1223 {
1224 uvm_physseg_t upm;
1225 paddr_t off = 0, start, end;
1226 struct uvm_physseg *seg;
1227
1228 upm = uvm_physseg_find(pfn, &off);
1229
1230 if (!uvm_physseg_valid_p(upm)) {
1231 printf("%s: Tried to unplug from unknown offset\n", __func__);
1232 return false;
1233 }
1234
1235 seg = HANDLE_TO_PHYSSEG_NODE(upm);
1236
1237 start = uvm_physseg_get_start(upm);
1238 end = uvm_physseg_get_end(upm);
1239
1240 if (end < (pfn + pages)) {
1241 printf("%s: Tried to unplug oversized span \n", __func__);
1242 return false;
1243 }
1244
1245 #ifndef DIAGNOSTIC
1246 (void) start;
1247 #endif
1248 KASSERT(pfn == start + off); /* sanity */
1249
1250 if (__predict_true(uvm.page_init_done == true)) {
1251 /* XXX: KASSERT() that seg->pgs[] are not on any uvm lists */
1252 if (extent_free(seg->ext, (u_long)(seg->pgs + off), sizeof(struct vm_page) * pages, EX_MALLOCOK | EX_NOWAIT) != 0)
1253 return false;
1254 }
1255
1256 if (off == 0 && (pfn + pages) == end) {
1257 #if defined(UVM_HOTPLUG) /* rbtree implementation */
1258 int segcount = 0;
1259 struct uvm_physseg *current_ps;
1260 /* Complete segment */
1261 if (uvm_physseg_graph.nentries == 1)
1262 panic("%s: out of memory!", __func__);
1263
1264 if (__predict_true(uvm.page_init_done == true)) {
1265 RB_TREE_FOREACH(current_ps, &(uvm_physseg_graph.rb_tree)) {
1266 if (seg->ext == current_ps->ext)
1267 segcount++;
1268 }
1269 KASSERT(segcount > 0);
1270
1271 if (segcount == 1) {
1272 extent_destroy(seg->ext);
1273 }
1274
1275 /*
1276 * We assume that the unplug will succeed from
1277 * this point onwards
1278 */
1279 uvmexp.npages -= (int) pages;
1280 }
1281
1282 rb_tree_remove_node(&(uvm_physseg_graph.rb_tree), upm);
1283 memset(seg, 0, sizeof(struct uvm_physseg));
1284 uvm_physseg_free(seg, sizeof(struct uvm_physseg));
1285 uvm_physseg_graph.nentries--;
1286 #else /* UVM_HOTPLUG */
1287 int x;
1288 if (vm_nphysmem == 1)
1289 panic("uvm_page_physget: out of memory!");
1290 vm_nphysmem--;
1291 for (x = upm ; x < vm_nphysmem ; x++)
1292 /* structure copy */
1293 VM_PHYSMEM_PTR_SWAP(x, x + 1);
1294 #endif /* UVM_HOTPLUG */
1295 /* XXX: KASSERT() that seg->pgs[] are not on any uvm lists */
1296 return true;
1297 }
1298
1299 if (off > 0 &&
1300 (pfn + pages) < end) {
1301 #if defined(UVM_HOTPLUG) /* rbtree implementation */
1302 /* middle chunk - need a new segment */
1303 struct uvm_physseg *ps, *current_ps;
1304 ps = uvm_physseg_alloc(sizeof (struct uvm_physseg));
1305 if (ps == NULL) {
1306 printf("%s: Unable to allocated new fragment vm_physseg \n",
1307 __func__);
1308 return false;
1309 }
1310
1311 /* Remove middle chunk */
1312 if (__predict_true(uvm.page_init_done == true)) {
1313 KASSERT(seg->ext != NULL);
1314 ps->ext = seg->ext;
1315
1316 /* XXX: KASSERT() that seg->pgs[] are not on any uvm lists */
1317 /*
1318 * We assume that the unplug will succeed from
1319 * this point onwards
1320 */
1321 uvmexp.npages -= (int) pages;
1322 }
1323
1324 ps->start = pfn + pages;
1325 ps->avail_start = ps->start; /* XXX: Legacy */
1326
1327 ps->end = seg->end;
1328 ps->avail_end = ps->end; /* XXX: Legacy */
1329
1330 seg->end = pfn;
1331 seg->avail_end = seg->end; /* XXX: Legacy */
1332
1333
1334 /*
1335 * The new pgs array points to the beginning of the
1336 * tail fragment.
1337 */
1338 if (__predict_true(uvm.page_init_done == true))
1339 ps->pgs = seg->pgs + off + pages;
1340
1341 current_ps = rb_tree_insert_node(&(uvm_physseg_graph.rb_tree), ps);
1342 if (current_ps != ps) {
1343 panic("uvm_page_physload: Duplicate address range detected!");
1344 }
1345 uvm_physseg_graph.nentries++;
1346 #else /* UVM_HOTPLUG */
1347 panic("%s: can't unplug() from the middle of a segment without"
1348 "UVM_HOTPLUG\n", __func__);
1349 /* NOTREACHED */
1350 #endif /* UVM_HOTPLUG */
1351 return true;
1352 }
1353
1354 if (off == 0 && (pfn + pages) < end) {
1355 /* Remove front chunk */
1356 if (__predict_true(uvm.page_init_done == true)) {
1357 /* XXX: KASSERT() that seg->pgs[] are not on any uvm lists */
1358 /*
1359 * We assume that the unplug will succeed from
1360 * this point onwards
1361 */
1362 uvmexp.npages -= (int) pages;
1363 }
1364
1365 /* Truncate */
1366 seg->start = pfn + pages;
1367 seg->avail_start = seg->start; /* XXX: Legacy */
1368
1369 /*
1370 * Move the pgs array start to the beginning of the
1371 * tail end.
1372 */
1373 if (__predict_true(uvm.page_init_done == true))
1374 seg->pgs += pages;
1375
1376 return true;
1377 }
1378
1379 if (off > 0 && (pfn + pages) == end) {
1380 /* back chunk */
1381
1382
1383 /* Truncate! */
1384 seg->end = pfn;
1385 seg->avail_end = seg->end; /* XXX: Legacy */
1386
1387 uvmexp.npages -= (int) pages;
1388
1389 return true;
1390 }
1391
1392 printf("%s: Tried to unplug unknown range \n", __func__);
1393
1394 return false;
1395 }
1396