Home | History | Annotate | Line # | Download | only in uvm
uvm_physseg.c revision 1.9.4.1
      1  1.9.4.1    martin /* $NetBSD: uvm_physseg.c,v 1.9.4.1 2020/04/08 14:09:05 martin Exp $ */
      2      1.1    cherry 
      3      1.1    cherry /*
      4      1.1    cherry  * Copyright (c) 1997 Charles D. Cranor and Washington University.
      5      1.1    cherry  * Copyright (c) 1991, 1993, The Regents of the University of California.
      6      1.1    cherry  *
      7      1.1    cherry  * All rights reserved.
      8      1.1    cherry  *
      9      1.1    cherry  * This code is derived from software contributed to Berkeley by
     10      1.1    cherry  * The Mach Operating System project at Carnegie-Mellon University.
     11      1.1    cherry  *
     12      1.1    cherry  * Redistribution and use in source and binary forms, with or without
     13      1.1    cherry  * modification, are permitted provided that the following conditions
     14      1.1    cherry  * are met:
     15      1.1    cherry  * 1. Redistributions of source code must retain the above copyright
     16      1.1    cherry  *    notice, this list of conditions and the following disclaimer.
     17      1.1    cherry  * 2. Redistributions in binary form must reproduce the above copyright
     18      1.1    cherry  *    notice, this list of conditions and the following disclaimer in the
     19      1.1    cherry  *    documentation and/or other materials provided with the distribution.
     20      1.1    cherry  * 3. Neither the name of the University nor the names of its contributors
     21      1.1    cherry  *    may be used to endorse or promote products derived from this software
     22      1.1    cherry  *    without specific prior written permission.
     23      1.1    cherry  *
     24      1.1    cherry  * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND
     25      1.1    cherry  * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
     26      1.1    cherry  * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
     27      1.1    cherry  * ARE DISCLAIMED.  IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE
     28      1.1    cherry  * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
     29      1.1    cherry  * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
     30      1.1    cherry  * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
     31      1.1    cherry  * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
     32      1.1    cherry  * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
     33      1.1    cherry  * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
     34      1.1    cherry  * SUCH DAMAGE.
     35      1.1    cherry  *
     36      1.1    cherry  *	@(#)vm_page.h   7.3 (Berkeley) 4/21/91
     37      1.1    cherry  * from: Id: uvm_page.h,v 1.1.2.6 1998/02/04 02:31:42 chuck Exp
     38      1.1    cherry  *
     39      1.1    cherry  *
     40      1.1    cherry  * Copyright (c) 1987, 1990 Carnegie-Mellon University.
     41      1.1    cherry  * All rights reserved.
     42      1.1    cherry  *
     43      1.1    cherry  * Permission to use, copy, modify and distribute this software and
     44      1.1    cherry  * its documentation is hereby granted, provided that both the copyright
     45      1.1    cherry  * notice and this permission notice appear in all copies of the
     46      1.1    cherry  * software, derivative works or modified versions, and any portions
     47      1.1    cherry  * thereof, and that both notices appear in supporting documentation.
     48      1.1    cherry  *
     49      1.1    cherry  * CARNEGIE MELLON ALLOWS FREE USE OF THIS SOFTWARE IN ITS "AS IS"
     50      1.1    cherry  * CONDITION.  CARNEGIE MELLON DISCLAIMS ANY LIABILITY OF ANY KIND
     51      1.1    cherry  * FOR ANY DAMAGES WHATSOEVER RESULTING FROM THE USE OF THIS SOFTWARE.
     52      1.1    cherry  *
     53      1.1    cherry  * Carnegie Mellon requests users of this software to return to
     54      1.1    cherry  *
     55      1.1    cherry  *  Software Distribution Coordinator  or  Software.Distribution (at) CS.CMU.EDU
     56      1.1    cherry  *  School of Computer Science
     57      1.1    cherry  *  Carnegie Mellon University
     58      1.1    cherry  *  Pittsburgh PA 15213-3890
     59      1.1    cherry  *
     60      1.1    cherry  * any improvements or extensions that they make and grant Carnegie the
     61      1.1    cherry  * rights to redistribute these changes.
     62      1.1    cherry  */
     63      1.1    cherry 
     64      1.1    cherry /*
     65      1.1    cherry  * Consolidated API from uvm_page.c and others.
     66      1.1    cherry  * Consolidated and designed by Cherry G. Mathew <cherry (at) zyx.in>
     67      1.1    cherry  * rbtree(3) backing implementation by:
     68      1.1    cherry  * Santhosh N. Raju <santhosh.raju (at) gmail.com>
     69      1.1    cherry  */
     70      1.1    cherry 
     71      1.1    cherry #ifdef _KERNEL_OPT
     72      1.1    cherry #include "opt_uvm.h"
     73      1.1    cherry #endif
     74      1.1    cherry 
     75      1.1    cherry #include <sys/param.h>
     76      1.1    cherry #include <sys/types.h>
     77      1.1    cherry #include <sys/extent.h>
     78      1.1    cherry #include <sys/kmem.h>
     79      1.1    cherry 
     80      1.1    cherry #include <uvm/uvm.h>
     81      1.1    cherry #include <uvm/uvm_page.h>
     82      1.1    cherry #include <uvm/uvm_param.h>
     83      1.1    cherry #include <uvm/uvm_pdpolicy.h>
     84      1.1    cherry #include <uvm/uvm_physseg.h>
     85      1.1    cherry 
     86      1.1    cherry /*
     87      1.1    cherry  * uvm_physseg: describes one segment of physical memory
     88      1.1    cherry  */
     89      1.1    cherry struct uvm_physseg {
     90  1.9.4.1    martin 	/* used during RB tree lookup for PHYS_TO_VM_PAGE(). */
     91      1.1    cherry 	struct  rb_node rb_node;	/* tree information */
     92      1.1    cherry 	paddr_t	start;			/* PF# of first page in segment */
     93      1.1    cherry 	paddr_t	end;			/* (PF# of last page in segment) + 1 */
     94  1.9.4.1    martin 	struct	vm_page *pgs;		/* vm_page structures (from start) */
     95  1.9.4.1    martin 
     96  1.9.4.1    martin 	/* less performance sensitive fields. */
     97      1.1    cherry 	paddr_t	avail_start;		/* PF# of first free page in segment */
     98      1.1    cherry 	paddr_t	avail_end;		/* (PF# of last free page in segment) +1  */
     99      1.1    cherry 	struct  extent *ext;		/* extent(9) structure to manage pgs[] */
    100      1.1    cherry 	int	free_list;		/* which free list they belong on */
    101      1.1    cherry 	u_int	start_hint;		/* start looking for free pages here */
    102      1.1    cherry #ifdef __HAVE_PMAP_PHYSSEG
    103      1.1    cherry 	struct	pmap_physseg pmseg;	/* pmap specific (MD) data */
    104      1.1    cherry #endif
    105      1.1    cherry };
    106      1.1    cherry 
    107      1.1    cherry /*
    108      1.1    cherry  * These functions are reserved for uvm(9) internal use and are not
    109      1.1    cherry  * exported in the header file uvm_physseg.h
    110      1.1    cherry  *
    111      1.1    cherry  * Thus they are redefined here.
    112      1.1    cherry  */
    113      1.1    cherry void uvm_physseg_init_seg(uvm_physseg_t, struct vm_page *);
    114      1.1    cherry void uvm_physseg_seg_chomp_slab(uvm_physseg_t, struct vm_page *, size_t);
    115      1.1    cherry 
    116      1.1    cherry /* returns a pgs array */
    117      1.1    cherry struct vm_page *uvm_physseg_seg_alloc_from_slab(uvm_physseg_t, size_t);
    118      1.1    cherry 
    119      1.1    cherry #if defined(UVM_HOTPLUG) /* rbtree impementation */
    120      1.1    cherry 
    121      1.1    cherry #define		HANDLE_TO_PHYSSEG_NODE(h)	((struct uvm_physseg *)(h))
    122      1.1    cherry #define		PHYSSEG_NODE_TO_HANDLE(u)	((uvm_physseg_t)(u))
    123      1.1    cherry 
    124      1.1    cherry struct uvm_physseg_graph {
    125      1.1    cherry 	struct rb_tree rb_tree;		/* Tree for entries */
    126      1.1    cherry 	int            nentries;	/* Number of entries */
    127  1.9.4.1    martin } __aligned(COHERENCY_UNIT);
    128      1.1    cherry 
    129  1.9.4.1    martin static struct uvm_physseg_graph uvm_physseg_graph __read_mostly;
    130      1.1    cherry 
    131      1.1    cherry /*
    132      1.1    cherry  * Note on kmem(9) allocator usage:
    133      1.1    cherry  * We take the conservative approach that plug/unplug are allowed to
    134      1.1    cherry  * fail in high memory stress situations.
    135      1.1    cherry  *
    136      1.1    cherry  * We want to avoid re-entrant situations in which one plug/unplug
    137      1.1    cherry  * operation is waiting on a previous one to complete, since this
    138      1.1    cherry  * makes the design more complicated than necessary.
    139      1.1    cherry  *
    140      1.1    cherry  * We may review this and change its behaviour, once the use cases
    141      1.1    cherry  * become more obvious.
    142      1.1    cherry  */
    143      1.1    cherry 
    144      1.1    cherry /*
    145      1.1    cherry  * Special alloc()/free() functions for boot time support:
    146      1.1    cherry  * We assume that alloc() at boot time is only for new 'vm_physseg's
    147      1.1    cherry  * This allows us to use a static array for memory allocation at boot
    148      1.1    cherry  * time. Thus we avoid using kmem(9) which is not ready at this point
    149      1.1    cherry  * in boot.
    150      1.1    cherry  *
    151      1.1    cherry  * After kmem(9) is ready, we use it. We currently discard any free()s
    152      1.1    cherry  * to this static array, since the size is small enough to be a
    153      1.1    cherry  * trivial waste on all architectures we run on.
    154      1.1    cherry  */
    155      1.1    cherry 
    156      1.1    cherry static size_t nseg = 0;
    157      1.1    cherry static struct uvm_physseg uvm_physseg[VM_PHYSSEG_MAX];
    158      1.1    cherry 
    159      1.1    cherry static void *
    160      1.1    cherry uvm_physseg_alloc(size_t sz)
    161      1.1    cherry {
    162      1.1    cherry 	/*
    163      1.1    cherry 	 * During boot time, we only support allocating vm_physseg
    164      1.1    cherry 	 * entries from the static array.
    165      1.1    cherry 	 * We need to assert for this.
    166      1.1    cherry 	 */
    167      1.1    cherry 
    168      1.1    cherry 	if (__predict_false(uvm.page_init_done == false)) {
    169      1.1    cherry 		if (sz % sizeof(struct uvm_physseg))
    170      1.1    cherry 			panic("%s: tried to alloc size other than multiple"
    171      1.7       uwe 			    " of struct uvm_physseg at boot\n", __func__);
    172      1.1    cherry 
    173      1.1    cherry 		size_t n = sz / sizeof(struct uvm_physseg);
    174      1.1    cherry 		nseg += n;
    175      1.1    cherry 
    176      1.1    cherry 		KASSERT(nseg > 0 && nseg <= VM_PHYSSEG_MAX);
    177      1.1    cherry 
    178      1.1    cherry 		return &uvm_physseg[nseg - n];
    179      1.1    cherry 	}
    180      1.1    cherry 
    181      1.1    cherry 	return kmem_zalloc(sz, KM_NOSLEEP);
    182      1.1    cherry }
    183      1.1    cherry 
    184      1.1    cherry static void
    185      1.1    cherry uvm_physseg_free(void *p, size_t sz)
    186      1.1    cherry {
    187      1.1    cherry 	/*
    188      1.1    cherry 	 * This is a bit tricky. We do allow simulation of free()
    189      1.1    cherry 	 * during boot (for eg: when MD code is "steal"ing memory,
    190      1.1    cherry 	 * and the segment has been exhausted (and thus needs to be
    191      1.1    cherry 	 * free() - ed.
    192      1.1    cherry 	 * free() also complicates things because we leak the
    193      1.1    cherry 	 * free(). Therefore calling code can't assume that free()-ed
    194      1.1    cherry 	 * memory is available for alloc() again, at boot time.
    195      1.1    cherry 	 *
    196      1.1    cherry 	 * Thus we can't explicitly disallow free()s during
    197      1.1    cherry 	 * boot time. However, the same restriction for alloc()
    198      1.1    cherry 	 * applies to free(). We only allow uvm_physseg related free()s
    199      1.1    cherry 	 * via this function during boot time.
    200      1.1    cherry 	 */
    201      1.1    cherry 
    202      1.1    cherry 	if (__predict_false(uvm.page_init_done == false)) {
    203      1.1    cherry 		if (sz % sizeof(struct uvm_physseg))
    204      1.1    cherry 			panic("%s: tried to free size other than struct uvm_physseg"
    205      1.7       uwe 			    " at boot\n", __func__);
    206      1.1    cherry 
    207      1.1    cherry 	}
    208      1.1    cherry 
    209      1.1    cherry 	/*
    210      1.1    cherry 	 * Could have been in a single if(){} block - split for
    211      1.1    cherry 	 * clarity
    212      1.1    cherry 	 */
    213      1.1    cherry 
    214      1.1    cherry 	if ((struct uvm_physseg *)p >= uvm_physseg &&
    215      1.1    cherry 	    (struct uvm_physseg *)p < (uvm_physseg + VM_PHYSSEG_MAX)) {
    216      1.1    cherry 		if (sz % sizeof(struct uvm_physseg))
    217      1.1    cherry 			panic("%s: tried to free() other than struct uvm_physseg"
    218      1.7       uwe 			    " from static array\n", __func__);
    219      1.1    cherry 
    220      1.1    cherry 		if ((sz / sizeof(struct uvm_physseg)) >= VM_PHYSSEG_MAX)
    221      1.1    cherry 			panic("%s: tried to free() the entire static array!", __func__);
    222      1.1    cherry 		return; /* Nothing to free */
    223      1.1    cherry 	}
    224      1.1    cherry 
    225      1.1    cherry 	kmem_free(p, sz);
    226      1.1    cherry }
    227      1.1    cherry 
    228      1.1    cherry /* XXX: Multi page size */
    229      1.1    cherry bool
    230      1.1    cherry uvm_physseg_plug(paddr_t pfn, size_t pages, uvm_physseg_t *psp)
    231      1.1    cherry {
    232      1.1    cherry 	int preload;
    233      1.1    cherry 	size_t slabpages;
    234      1.1    cherry 	struct uvm_physseg *ps, *current_ps = NULL;
    235      1.1    cherry 	struct vm_page *slab = NULL, *pgs = NULL;
    236      1.1    cherry 
    237      1.1    cherry #ifdef DEBUG
    238      1.1    cherry 	paddr_t off;
    239      1.1    cherry 	uvm_physseg_t upm;
    240      1.1    cherry 	upm = uvm_physseg_find(pfn, &off);
    241      1.1    cherry 
    242      1.1    cherry 	ps = HANDLE_TO_PHYSSEG_NODE(upm);
    243      1.1    cherry 
    244      1.1    cherry 	if (ps != NULL) /* XXX; do we allow "update" plugs ? */
    245      1.1    cherry 		return false;
    246      1.1    cherry #endif
    247      1.1    cherry 
    248      1.1    cherry 	/*
    249      1.1    cherry 	 * do we have room?
    250      1.1    cherry 	 */
    251      1.1    cherry 
    252      1.1    cherry 	ps = uvm_physseg_alloc(sizeof (struct uvm_physseg));
    253      1.1    cherry 	if (ps == NULL) {
    254      1.1    cherry 		printf("uvm_page_physload: unable to load physical memory "
    255      1.1    cherry 		    "segment\n");
    256      1.1    cherry 		printf("\t%d segments allocated, ignoring 0x%"PRIxPADDR" -> 0x%"PRIxPADDR"\n",
    257      1.1    cherry 		    VM_PHYSSEG_MAX, pfn, pfn + pages + 1);
    258      1.1    cherry 		printf("\tincrease VM_PHYSSEG_MAX\n");
    259      1.1    cherry 		return false;
    260      1.1    cherry 	}
    261      1.1    cherry 
    262      1.1    cherry 	/* span init */
    263      1.1    cherry 	ps->start = pfn;
    264      1.1    cherry 	ps->end = pfn + pages;
    265      1.1    cherry 
    266      1.1    cherry 	/*
    267      1.1    cherry 	 * XXX: Ugly hack because uvmexp.npages accounts for only
    268      1.1    cherry 	 * those pages in the segment included below as well - this
    269      1.1    cherry 	 * should be legacy and removed.
    270      1.1    cherry 	 */
    271      1.1    cherry 
    272      1.1    cherry 	ps->avail_start = ps->start;
    273      1.1    cherry 	ps->avail_end = ps->end;
    274      1.1    cherry 
    275      1.1    cherry 	/*
    276      1.1    cherry 	 * check to see if this is a "preload" (i.e. uvm_page_init hasn't been
    277      1.1    cherry 	 * called yet, so kmem is not available).
    278      1.1    cherry 	 */
    279      1.1    cherry 
    280      1.1    cherry 	preload = 1; /* We are going to assume it is a preload */
    281      1.1    cherry 
    282      1.1    cherry 	RB_TREE_FOREACH(current_ps, &(uvm_physseg_graph.rb_tree)) {
    283      1.1    cherry 		/* If there are non NULL pages then we are not in a preload */
    284      1.1    cherry 		if (current_ps->pgs != NULL) {
    285      1.1    cherry 			preload = 0;
    286      1.1    cherry 			/* Try to scavenge from earlier unplug()s. */
    287      1.1    cherry 			pgs = uvm_physseg_seg_alloc_from_slab(current_ps, pages);
    288      1.1    cherry 
    289      1.1    cherry 			if (pgs != NULL) {
    290      1.1    cherry 				break;
    291      1.1    cherry 			}
    292      1.1    cherry 		}
    293      1.1    cherry 	}
    294      1.1    cherry 
    295      1.1    cherry 
    296      1.1    cherry 	/*
    297      1.1    cherry 	 * if VM is already running, attempt to kmem_alloc vm_page structures
    298      1.1    cherry 	 */
    299      1.1    cherry 
    300      1.1    cherry 	if (!preload) {
    301      1.1    cherry 		if (pgs == NULL) { /* Brand new */
    302      1.1    cherry 			/* Iteratively try alloc down from uvmexp.npages */
    303      1.1    cherry 			for (slabpages = (size_t) uvmexp.npages; slabpages >= pages; slabpages--) {
    304      1.1    cherry 				slab = kmem_zalloc(sizeof *pgs * (long unsigned int)slabpages, KM_NOSLEEP);
    305      1.1    cherry 				if (slab != NULL)
    306      1.1    cherry 					break;
    307      1.1    cherry 			}
    308      1.1    cherry 
    309      1.1    cherry 			if (slab == NULL) {
    310      1.1    cherry 				uvm_physseg_free(ps, sizeof(struct uvm_physseg));
    311      1.1    cherry 				return false;
    312      1.1    cherry 			}
    313      1.1    cherry 
    314      1.1    cherry 			uvm_physseg_seg_chomp_slab(ps, slab, (size_t) slabpages);
    315      1.1    cherry 			/* We allocate enough for this plug */
    316      1.1    cherry 			pgs = uvm_physseg_seg_alloc_from_slab(ps, pages);
    317      1.1    cherry 
    318      1.1    cherry 			if (pgs == NULL) {
    319      1.1    cherry 				printf("unable to uvm_physseg_seg_alloc_from_slab() from backend\n");
    320      1.1    cherry 				return false;
    321      1.1    cherry 			}
    322      1.1    cherry 		} else {
    323      1.1    cherry 			/* Reuse scavenged extent */
    324      1.1    cherry 			ps->ext = current_ps->ext;
    325      1.1    cherry 		}
    326      1.1    cherry 
    327      1.1    cherry 		physmem += pages;
    328      1.1    cherry 		uvmpdpol_reinit();
    329      1.1    cherry 	} else { /* Boot time - see uvm_page.c:uvm_page_init() */
    330      1.1    cherry 		pgs = NULL;
    331      1.1    cherry 		ps->pgs = pgs;
    332      1.1    cherry 	}
    333      1.1    cherry 
    334      1.1    cherry 	/*
    335      1.1    cherry 	 * now insert us in the proper place in uvm_physseg_graph.rb_tree
    336      1.1    cherry 	 */
    337      1.1    cherry 
    338      1.1    cherry 	current_ps = rb_tree_insert_node(&(uvm_physseg_graph.rb_tree), ps);
    339      1.1    cherry 	if (current_ps != ps) {
    340      1.1    cherry 		panic("uvm_page_physload: Duplicate address range detected!");
    341      1.1    cherry 	}
    342      1.1    cherry 	uvm_physseg_graph.nentries++;
    343      1.1    cherry 
    344      1.1    cherry 	/*
    345      1.1    cherry 	 * uvm_pagefree() requires the PHYS_TO_VM_PAGE(pgs[i]) on the
    346      1.1    cherry 	 * newly allocated pgs[] to return the correct value. This is
    347      1.1    cherry 	 * a bit of a chicken and egg problem, since it needs
    348      1.1    cherry 	 * uvm_physseg_find() to succeed. For this, the node needs to
    349      1.1    cherry 	 * be inserted *before* uvm_physseg_init_seg() happens.
    350      1.1    cherry 	 *
    351      1.1    cherry 	 * During boot, this happens anyway, since
    352      1.1    cherry 	 * uvm_physseg_init_seg() is called later on and separately
    353      1.1    cherry 	 * from uvm_page.c:uvm_page_init().
    354      1.1    cherry 	 * In the case of hotplug we need to ensure this.
    355      1.1    cherry 	 */
    356      1.1    cherry 
    357      1.1    cherry 	if (__predict_true(!preload))
    358      1.1    cherry 		uvm_physseg_init_seg(ps, pgs);
    359      1.1    cherry 
    360      1.1    cherry 	if (psp != NULL)
    361      1.1    cherry 		*psp = ps;
    362      1.1    cherry 
    363      1.1    cherry 	return true;
    364      1.1    cherry }
    365      1.1    cherry 
    366      1.1    cherry static int
    367      1.1    cherry uvm_physseg_compare_nodes(void *ctx, const void *nnode1, const void *nnode2)
    368      1.1    cherry {
    369      1.1    cherry 	const struct uvm_physseg *enode1 = nnode1;
    370      1.1    cherry 	const struct uvm_physseg *enode2 = nnode2;
    371      1.1    cherry 
    372      1.1    cherry 	KASSERT(enode1->start < enode2->start || enode1->start >= enode2->end);
    373      1.1    cherry 	KASSERT(enode2->start < enode1->start || enode2->start >= enode1->end);
    374      1.1    cherry 
    375      1.1    cherry 	if (enode1->start < enode2->start)
    376      1.1    cherry 		return -1;
    377      1.1    cherry 	if (enode1->start >= enode2->end)
    378      1.1    cherry 		return 1;
    379      1.1    cherry 	return 0;
    380      1.1    cherry }
    381      1.1    cherry 
    382      1.1    cherry static int
    383      1.1    cherry uvm_physseg_compare_key(void *ctx, const void *nnode, const void *pkey)
    384      1.1    cherry {
    385      1.1    cherry 	const struct uvm_physseg *enode = nnode;
    386      1.1    cherry 	const paddr_t pa = *(const paddr_t *) pkey;
    387      1.1    cherry 
    388      1.1    cherry 	if(enode->start <= pa && pa < enode->end)
    389      1.1    cherry 		return 0;
    390      1.1    cherry 	if (enode->start < pa)
    391      1.1    cherry 		return -1;
    392      1.1    cherry 	if (enode->end > pa)
    393      1.1    cherry 		return 1;
    394      1.1    cherry 
    395      1.1    cherry 	return 0;
    396      1.1    cherry }
    397      1.1    cherry 
    398      1.1    cherry static const rb_tree_ops_t uvm_physseg_tree_ops = {
    399      1.1    cherry 	.rbto_compare_nodes = uvm_physseg_compare_nodes,
    400      1.1    cherry 	.rbto_compare_key = uvm_physseg_compare_key,
    401      1.1    cherry 	.rbto_node_offset = offsetof(struct uvm_physseg, rb_node),
    402      1.1    cherry 	.rbto_context = NULL
    403      1.1    cherry };
    404      1.1    cherry 
    405      1.1    cherry /*
    406      1.1    cherry  * uvm_physseg_init: init the physmem
    407      1.1    cherry  *
    408      1.1    cherry  * => physmem unit should not be in use at this point
    409      1.1    cherry  */
    410      1.1    cherry 
    411      1.1    cherry void
    412      1.1    cherry uvm_physseg_init(void)
    413      1.1    cherry {
    414      1.1    cherry 	rb_tree_init(&(uvm_physseg_graph.rb_tree), &uvm_physseg_tree_ops);
    415      1.1    cherry 	uvm_physseg_graph.nentries = 0;
    416      1.1    cherry }
    417      1.1    cherry 
    418      1.1    cherry uvm_physseg_t
    419      1.1    cherry uvm_physseg_get_next(uvm_physseg_t upm)
    420      1.1    cherry {
    421      1.1    cherry 	/* next of invalid is invalid, not fatal */
    422      1.2    cherry 	if (uvm_physseg_valid_p(upm) == false)
    423      1.1    cherry 		return UVM_PHYSSEG_TYPE_INVALID;
    424      1.1    cherry 
    425      1.1    cherry 	return (uvm_physseg_t) rb_tree_iterate(&(uvm_physseg_graph.rb_tree), upm,
    426      1.1    cherry 	    RB_DIR_RIGHT);
    427      1.1    cherry }
    428      1.1    cherry 
    429      1.1    cherry uvm_physseg_t
    430      1.1    cherry uvm_physseg_get_prev(uvm_physseg_t upm)
    431      1.1    cherry {
    432      1.1    cherry 	/* prev of invalid is invalid, not fatal */
    433      1.2    cherry 	if (uvm_physseg_valid_p(upm) == false)
    434      1.1    cherry 		return UVM_PHYSSEG_TYPE_INVALID;
    435      1.1    cherry 
    436      1.1    cherry 	return (uvm_physseg_t) rb_tree_iterate(&(uvm_physseg_graph.rb_tree), upm,
    437      1.1    cherry 	    RB_DIR_LEFT);
    438      1.1    cherry }
    439      1.1    cherry 
    440      1.1    cherry uvm_physseg_t
    441      1.1    cherry uvm_physseg_get_last(void)
    442      1.1    cherry {
    443      1.1    cherry 	return (uvm_physseg_t) RB_TREE_MAX(&(uvm_physseg_graph.rb_tree));
    444      1.1    cherry }
    445      1.1    cherry 
    446      1.1    cherry uvm_physseg_t
    447      1.1    cherry uvm_physseg_get_first(void)
    448      1.1    cherry {
    449      1.1    cherry 	return (uvm_physseg_t) RB_TREE_MIN(&(uvm_physseg_graph.rb_tree));
    450      1.1    cherry }
    451      1.1    cherry 
    452      1.1    cherry paddr_t
    453      1.1    cherry uvm_physseg_get_highest_frame(void)
    454      1.1    cherry {
    455      1.1    cherry 	struct uvm_physseg *ps =
    456      1.1    cherry 	    (uvm_physseg_t) RB_TREE_MAX(&(uvm_physseg_graph.rb_tree));
    457      1.1    cherry 
    458      1.1    cherry 	return ps->end - 1;
    459      1.1    cherry }
    460      1.1    cherry 
    461      1.1    cherry /*
    462      1.1    cherry  * uvm_page_physunload: unload physical memory and return it to
    463      1.1    cherry  * caller.
    464      1.1    cherry  */
    465      1.1    cherry bool
    466      1.1    cherry uvm_page_physunload(uvm_physseg_t upm, int freelist, paddr_t *paddrp)
    467      1.1    cherry {
    468      1.1    cherry 	struct uvm_physseg *seg;
    469      1.1    cherry 
    470      1.1    cherry 	if (__predict_true(uvm.page_init_done == true))
    471      1.1    cherry 		panic("%s: unload attempted after uvm_page_init()\n", __func__);
    472      1.1    cherry 
    473      1.1    cherry 	seg = HANDLE_TO_PHYSSEG_NODE(upm);
    474      1.1    cherry 
    475      1.1    cherry 	if (seg->free_list != freelist) {
    476      1.1    cherry 		paddrp = NULL;
    477      1.1    cherry 		return false;
    478      1.1    cherry 	}
    479      1.1    cherry 
    480      1.1    cherry 	/*
    481      1.1    cherry 	 * During cold boot, what we're about to unplug hasn't been
    482      1.1    cherry 	 * put on the uvm freelist, nor has uvmexp.npages been
    483      1.1    cherry 	 * updated. (This happens in uvm_page.c:uvm_page_init())
    484      1.1    cherry 	 *
    485      1.1    cherry 	 * For hotplug, we assume here that the pages being unloaded
    486      1.1    cherry 	 * here are completely out of sight of uvm (ie; not on any uvm
    487      1.1    cherry 	 * lists), and that  uvmexp.npages has been suitably
    488      1.1    cherry 	 * decremented before we're called.
    489      1.1    cherry 	 *
    490      1.1    cherry 	 * XXX: will avail_end == start if avail_start < avail_end?
    491      1.1    cherry 	 */
    492      1.1    cherry 
    493      1.1    cherry 	/* try from front */
    494      1.1    cherry 	if (seg->avail_start == seg->start &&
    495      1.1    cherry 	    seg->avail_start < seg->avail_end) {
    496      1.1    cherry 		*paddrp = ctob(seg->avail_start);
    497      1.1    cherry 		return uvm_physseg_unplug(seg->avail_start, 1);
    498      1.1    cherry 	}
    499      1.1    cherry 
    500      1.1    cherry 	/* try from rear */
    501      1.1    cherry 	if (seg->avail_end == seg->end &&
    502      1.1    cherry 	    seg->avail_start < seg->avail_end) {
    503      1.1    cherry 		*paddrp = ctob(seg->avail_end - 1);
    504      1.1    cherry 		return uvm_physseg_unplug(seg->avail_end - 1, 1);
    505      1.1    cherry 	}
    506      1.1    cherry 
    507      1.1    cherry 	return false;
    508      1.1    cherry }
    509      1.1    cherry 
    510      1.1    cherry bool
    511      1.1    cherry uvm_page_physunload_force(uvm_physseg_t upm, int freelist, paddr_t *paddrp)
    512      1.1    cherry {
    513      1.1    cherry 	struct uvm_physseg *seg;
    514      1.1    cherry 
    515      1.1    cherry 	seg = HANDLE_TO_PHYSSEG_NODE(upm);
    516      1.1    cherry 
    517      1.1    cherry 	if (__predict_true(uvm.page_init_done == true))
    518      1.1    cherry 		panic("%s: unload attempted after uvm_page_init()\n", __func__);
    519      1.1    cherry 	/* any room in this bank? */
    520      1.1    cherry 	if (seg->avail_start >= seg->avail_end) {
    521      1.1    cherry 		paddrp = NULL;
    522      1.1    cherry 		return false; /* nope */
    523      1.1    cherry 	}
    524      1.1    cherry 
    525      1.1    cherry 	*paddrp = ctob(seg->avail_start);
    526      1.1    cherry 
    527      1.1    cherry 	/* Always unplug from front */
    528      1.1    cherry 	return uvm_physseg_unplug(seg->avail_start, 1);
    529      1.1    cherry }
    530      1.1    cherry 
    531      1.1    cherry 
    532      1.1    cherry /*
    533      1.1    cherry  * vm_physseg_find: find vm_physseg structure that belongs to a PA
    534      1.1    cherry  */
    535      1.1    cherry uvm_physseg_t
    536      1.1    cherry uvm_physseg_find(paddr_t pframe, psize_t *offp)
    537      1.1    cherry {
    538      1.1    cherry 	struct uvm_physseg * ps = NULL;
    539      1.1    cherry 
    540      1.1    cherry 	ps = rb_tree_find_node(&(uvm_physseg_graph.rb_tree), &pframe);
    541      1.1    cherry 
    542      1.1    cherry 	if(ps != NULL && offp != NULL)
    543      1.1    cherry 		*offp = pframe - ps->start;
    544      1.1    cherry 
    545      1.1    cherry 	return ps;
    546      1.1    cherry }
    547      1.1    cherry 
    548      1.1    cherry #else  /* UVM_HOTPLUG */
    549      1.1    cherry 
    550      1.1    cherry /*
    551      1.1    cherry  * physical memory config is stored in vm_physmem.
    552      1.1    cherry  */
    553      1.1    cherry 
    554      1.1    cherry #define	VM_PHYSMEM_PTR(i)	(&vm_physmem[i])
    555      1.1    cherry #if VM_PHYSSEG_MAX == 1
    556      1.1    cherry #define VM_PHYSMEM_PTR_SWAP(i, j) /* impossible */
    557      1.1    cherry #else
    558      1.1    cherry #define VM_PHYSMEM_PTR_SWAP(i, j)					      \
    559      1.1    cherry 	do { vm_physmem[(i)] = vm_physmem[(j)]; } while (0)
    560      1.1    cherry #endif
    561      1.1    cherry 
    562      1.1    cherry #define		HANDLE_TO_PHYSSEG_NODE(h)	(VM_PHYSMEM_PTR((int)h))
    563      1.1    cherry #define		PHYSSEG_NODE_TO_HANDLE(u)	((int)((vsize_t) (u - vm_physmem) / sizeof(struct uvm_physseg)))
    564      1.1    cherry 
    565      1.1    cherry static struct uvm_physseg vm_physmem[VM_PHYSSEG_MAX];	/* XXXCDC: uvm.physmem */
    566      1.1    cherry static int vm_nphysseg = 0;				/* XXXCDC: uvm.nphysseg */
    567      1.1    cherry #define	vm_nphysmem	vm_nphysseg
    568      1.1    cherry 
    569      1.1    cherry void
    570      1.1    cherry uvm_physseg_init(void)
    571      1.1    cherry {
    572      1.1    cherry 	/* XXX: Provisioning for rb_tree related init(s) */
    573      1.1    cherry 	return;
    574      1.1    cherry }
    575      1.1    cherry 
    576      1.1    cherry int
    577      1.1    cherry uvm_physseg_get_next(uvm_physseg_t lcv)
    578      1.1    cherry {
    579      1.1    cherry 	/* next of invalid is invalid, not fatal */
    580      1.2    cherry 	if (uvm_physseg_valid_p(lcv) == false)
    581      1.1    cherry 		return UVM_PHYSSEG_TYPE_INVALID;
    582      1.1    cherry 
    583      1.1    cherry 	return (lcv + 1);
    584      1.1    cherry }
    585      1.1    cherry 
    586      1.1    cherry int
    587      1.1    cherry uvm_physseg_get_prev(uvm_physseg_t lcv)
    588      1.1    cherry {
    589      1.1    cherry 	/* prev of invalid is invalid, not fatal */
    590      1.2    cherry 	if (uvm_physseg_valid_p(lcv) == false)
    591      1.1    cherry 		return UVM_PHYSSEG_TYPE_INVALID;
    592      1.1    cherry 
    593      1.1    cherry 	return (lcv - 1);
    594      1.1    cherry }
    595      1.1    cherry 
    596      1.1    cherry int
    597      1.1    cherry uvm_physseg_get_last(void)
    598      1.1    cherry {
    599      1.1    cherry 	return (vm_nphysseg - 1);
    600      1.1    cherry }
    601      1.1    cherry 
    602      1.1    cherry int
    603      1.1    cherry uvm_physseg_get_first(void)
    604      1.1    cherry {
    605      1.1    cherry 	return 0;
    606      1.1    cherry }
    607      1.1    cherry 
    608      1.1    cherry paddr_t
    609      1.1    cherry uvm_physseg_get_highest_frame(void)
    610      1.1    cherry {
    611      1.1    cherry 	int lcv;
    612      1.1    cherry 	paddr_t last = 0;
    613      1.1    cherry 	struct uvm_physseg *ps;
    614      1.1    cherry 
    615      1.1    cherry 	for (lcv = 0; lcv < vm_nphysseg; lcv++) {
    616      1.1    cherry 		ps = VM_PHYSMEM_PTR(lcv);
    617      1.1    cherry 		if (last < ps->end)
    618      1.1    cherry 			last = ps->end;
    619      1.1    cherry 	}
    620      1.1    cherry 
    621      1.1    cherry 	return last;
    622      1.1    cherry }
    623      1.1    cherry 
    624      1.1    cherry 
    625      1.1    cherry static struct vm_page *
    626      1.1    cherry uvm_post_preload_check(void)
    627      1.1    cherry {
    628      1.1    cherry 	int preload, lcv;
    629      1.1    cherry 
    630      1.1    cherry 	/*
    631      1.1    cherry 	 * check to see if this is a "preload" (i.e. uvm_page_init hasn't been
    632      1.1    cherry 	 * called yet, so kmem is not available).
    633      1.1    cherry 	 */
    634      1.1    cherry 
    635      1.1    cherry 	for (lcv = 0 ; lcv < vm_nphysmem ; lcv++) {
    636      1.1    cherry 		if (VM_PHYSMEM_PTR(lcv)->pgs)
    637      1.1    cherry 			break;
    638      1.1    cherry 	}
    639      1.1    cherry 	preload = (lcv == vm_nphysmem);
    640      1.1    cherry 
    641      1.1    cherry 	/*
    642      1.1    cherry 	 * if VM is already running, attempt to kmem_alloc vm_page structures
    643      1.1    cherry 	 */
    644      1.1    cherry 
    645      1.1    cherry 	if (!preload) {
    646      1.1    cherry 		panic("Tried to add RAM after uvm_page_init");
    647      1.1    cherry 	}
    648      1.1    cherry 
    649      1.1    cherry 	return NULL;
    650      1.1    cherry }
    651      1.1    cherry 
    652      1.1    cherry /*
    653      1.1    cherry  * uvm_page_physunload: unload physical memory and return it to
    654      1.1    cherry  * caller.
    655      1.1    cherry  */
    656      1.1    cherry bool
    657      1.1    cherry uvm_page_physunload(uvm_physseg_t psi, int freelist, paddr_t *paddrp)
    658      1.1    cherry {
    659      1.1    cherry 	int x;
    660      1.1    cherry 	struct uvm_physseg *seg;
    661      1.1    cherry 
    662      1.1    cherry 	uvm_post_preload_check();
    663      1.1    cherry 
    664      1.1    cherry 	seg = VM_PHYSMEM_PTR(psi);
    665      1.1    cherry 
    666      1.1    cherry 	if (seg->free_list != freelist) {
    667      1.1    cherry 		paddrp = NULL;
    668      1.1    cherry 		return false;
    669      1.1    cherry 	}
    670      1.1    cherry 
    671      1.1    cherry 	/* try from front */
    672      1.1    cherry 	if (seg->avail_start == seg->start &&
    673      1.1    cherry 	    seg->avail_start < seg->avail_end) {
    674      1.1    cherry 		*paddrp = ctob(seg->avail_start);
    675      1.1    cherry 		seg->avail_start++;
    676      1.1    cherry 		seg->start++;
    677      1.1    cherry 		/* nothing left?   nuke it */
    678      1.1    cherry 		if (seg->avail_start == seg->end) {
    679      1.1    cherry 			if (vm_nphysmem == 1)
    680      1.1    cherry 				panic("uvm_page_physget: out of memory!");
    681      1.1    cherry 			vm_nphysmem--;
    682      1.1    cherry 			for (x = psi ; x < vm_nphysmem ; x++)
    683      1.1    cherry 				/* structure copy */
    684      1.1    cherry 				VM_PHYSMEM_PTR_SWAP(x, x + 1);
    685      1.1    cherry 		}
    686      1.1    cherry 		return (true);
    687      1.1    cherry 	}
    688      1.1    cherry 
    689      1.1    cherry 	/* try from rear */
    690      1.1    cherry 	if (seg->avail_end == seg->end &&
    691      1.1    cherry 	    seg->avail_start < seg->avail_end) {
    692      1.1    cherry 		*paddrp = ctob(seg->avail_end - 1);
    693      1.1    cherry 		seg->avail_end--;
    694      1.1    cherry 		seg->end--;
    695      1.1    cherry 		/* nothing left?   nuke it */
    696      1.1    cherry 		if (seg->avail_end == seg->start) {
    697      1.1    cherry 			if (vm_nphysmem == 1)
    698      1.1    cherry 				panic("uvm_page_physget: out of memory!");
    699      1.1    cherry 			vm_nphysmem--;
    700      1.1    cherry 			for (x = psi ; x < vm_nphysmem ; x++)
    701      1.1    cherry 				/* structure copy */
    702      1.1    cherry 				VM_PHYSMEM_PTR_SWAP(x, x + 1);
    703      1.1    cherry 		}
    704      1.1    cherry 		return (true);
    705      1.1    cherry 	}
    706      1.1    cherry 
    707      1.1    cherry 	return false;
    708      1.1    cherry }
    709      1.1    cherry 
    710      1.1    cherry bool
    711      1.1    cherry uvm_page_physunload_force(uvm_physseg_t psi, int freelist, paddr_t *paddrp)
    712      1.1    cherry {
    713      1.1    cherry 	int x;
    714      1.1    cherry 	struct uvm_physseg *seg;
    715      1.1    cherry 
    716      1.1    cherry 	uvm_post_preload_check();
    717      1.1    cherry 
    718      1.1    cherry 	seg = VM_PHYSMEM_PTR(psi);
    719      1.1    cherry 
    720      1.1    cherry 	/* any room in this bank? */
    721      1.1    cherry 	if (seg->avail_start >= seg->avail_end) {
    722      1.1    cherry 		paddrp = NULL;
    723      1.1    cherry 		return false; /* nope */
    724      1.1    cherry 	}
    725      1.1    cherry 
    726      1.1    cherry 	*paddrp = ctob(seg->avail_start);
    727      1.1    cherry 	seg->avail_start++;
    728      1.1    cherry 	/* truncate! */
    729      1.1    cherry 	seg->start = seg->avail_start;
    730      1.1    cherry 
    731      1.1    cherry 	/* nothing left?   nuke it */
    732      1.1    cherry 	if (seg->avail_start == seg->end) {
    733      1.1    cherry 		if (vm_nphysmem == 1)
    734      1.1    cherry 			panic("uvm_page_physget: out of memory!");
    735      1.1    cherry 		vm_nphysmem--;
    736      1.1    cherry 		for (x = psi ; x < vm_nphysmem ; x++)
    737      1.1    cherry 			/* structure copy */
    738      1.1    cherry 			VM_PHYSMEM_PTR_SWAP(x, x + 1);
    739      1.1    cherry 	}
    740      1.1    cherry 	return (true);
    741      1.1    cherry }
    742      1.1    cherry 
    743      1.1    cherry bool
    744      1.1    cherry uvm_physseg_plug(paddr_t pfn, size_t pages, uvm_physseg_t *psp)
    745      1.1    cherry {
    746      1.1    cherry 	int lcv;
    747      1.1    cherry 	struct vm_page *pgs;
    748      1.1    cherry 	struct uvm_physseg *ps;
    749      1.1    cherry 
    750      1.1    cherry #ifdef DEBUG
    751      1.1    cherry 	paddr_t off;
    752      1.1    cherry 	uvm_physseg_t upm;
    753      1.1    cherry 	upm = uvm_physseg_find(pfn, &off);
    754      1.1    cherry 
    755      1.2    cherry 	if (uvm_physseg_valid_p(upm)) /* XXX; do we allow "update" plugs ? */
    756      1.1    cherry 		return false;
    757      1.1    cherry #endif
    758      1.1    cherry 
    759      1.1    cherry 	paddr_t start = pfn;
    760      1.1    cherry 	paddr_t end = pfn + pages;
    761      1.1    cherry 	paddr_t avail_start = start;
    762      1.1    cherry 	paddr_t avail_end = end;
    763      1.1    cherry 
    764      1.1    cherry 	if (uvmexp.pagesize == 0)
    765      1.1    cherry 		panic("uvm_page_physload: page size not set!");
    766      1.1    cherry 
    767      1.1    cherry 	/*
    768      1.1    cherry 	 * do we have room?
    769      1.1    cherry 	 */
    770      1.1    cherry 
    771      1.1    cherry 	if (vm_nphysmem == VM_PHYSSEG_MAX) {
    772      1.1    cherry 		printf("uvm_page_physload: unable to load physical memory "
    773      1.1    cherry 		    "segment\n");
    774      1.1    cherry 		printf("\t%d segments allocated, ignoring 0x%llx -> 0x%llx\n",
    775      1.1    cherry 		    VM_PHYSSEG_MAX, (long long)start, (long long)end);
    776      1.1    cherry 		printf("\tincrease VM_PHYSSEG_MAX\n");
    777      1.1    cherry 		if (psp != NULL)
    778      1.1    cherry 			*psp = UVM_PHYSSEG_TYPE_INVALID_OVERFLOW;
    779      1.1    cherry 		return false;
    780      1.1    cherry 	}
    781      1.1    cherry 
    782      1.1    cherry 	/*
    783      1.1    cherry 	 * check to see if this is a "preload" (i.e. uvm_page_init hasn't been
    784      1.1    cherry 	 * called yet, so kmem is not available).
    785      1.1    cherry 	 */
    786      1.1    cherry 	pgs = uvm_post_preload_check();
    787      1.1    cherry 
    788      1.1    cherry 	/*
    789      1.1    cherry 	 * now insert us in the proper place in vm_physmem[]
    790      1.1    cherry 	 */
    791      1.1    cherry 
    792      1.1    cherry #if (VM_PHYSSEG_STRAT == VM_PSTRAT_RANDOM)
    793      1.1    cherry 	/* random: put it at the end (easy!) */
    794      1.1    cherry 	ps = VM_PHYSMEM_PTR(vm_nphysmem);
    795      1.3    cherry 	lcv = vm_nphysmem;
    796      1.1    cherry #elif (VM_PHYSSEG_STRAT == VM_PSTRAT_BSEARCH)
    797      1.1    cherry 	{
    798      1.1    cherry 		int x;
    799      1.1    cherry 		/* sort by address for binary search */
    800      1.1    cherry 		for (lcv = 0 ; lcv < vm_nphysmem ; lcv++)
    801      1.1    cherry 			if (start < VM_PHYSMEM_PTR(lcv)->start)
    802      1.1    cherry 				break;
    803      1.1    cherry 		ps = VM_PHYSMEM_PTR(lcv);
    804      1.1    cherry 		/* move back other entries, if necessary ... */
    805      1.1    cherry 		for (x = vm_nphysmem ; x > lcv ; x--)
    806      1.1    cherry 			/* structure copy */
    807      1.1    cherry 			VM_PHYSMEM_PTR_SWAP(x, x - 1);
    808      1.1    cherry 	}
    809      1.1    cherry #elif (VM_PHYSSEG_STRAT == VM_PSTRAT_BIGFIRST)
    810      1.1    cherry 	{
    811      1.1    cherry 		int x;
    812      1.1    cherry 		/* sort by largest segment first */
    813      1.1    cherry 		for (lcv = 0 ; lcv < vm_nphysmem ; lcv++)
    814      1.1    cherry 			if ((end - start) >
    815      1.1    cherry 			    (VM_PHYSMEM_PTR(lcv)->end - VM_PHYSMEM_PTR(lcv)->start))
    816      1.1    cherry 				break;
    817      1.1    cherry 		ps = VM_PHYSMEM_PTR(lcv);
    818      1.1    cherry 		/* move back other entries, if necessary ... */
    819      1.1    cherry 		for (x = vm_nphysmem ; x > lcv ; x--)
    820      1.1    cherry 			/* structure copy */
    821      1.1    cherry 			VM_PHYSMEM_PTR_SWAP(x, x - 1);
    822      1.1    cherry 	}
    823      1.1    cherry #else
    824      1.1    cherry 	panic("uvm_page_physload: unknown physseg strategy selected!");
    825      1.1    cherry #endif
    826      1.1    cherry 
    827      1.1    cherry 	ps->start = start;
    828      1.1    cherry 	ps->end = end;
    829      1.1    cherry 	ps->avail_start = avail_start;
    830      1.1    cherry 	ps->avail_end = avail_end;
    831      1.1    cherry 
    832      1.1    cherry 	ps->pgs = pgs;
    833      1.1    cherry 
    834      1.1    cherry 	vm_nphysmem++;
    835      1.1    cherry 
    836      1.1    cherry 	if (psp != NULL)
    837      1.1    cherry 		*psp = lcv;
    838      1.1    cherry 
    839      1.1    cherry 	return true;
    840      1.1    cherry }
    841      1.1    cherry 
    842      1.1    cherry /*
    843      1.1    cherry  * when VM_PHYSSEG_MAX is 1, we can simplify these functions
    844      1.1    cherry  */
    845      1.1    cherry 
    846      1.1    cherry #if VM_PHYSSEG_MAX == 1
    847      1.1    cherry static inline int vm_physseg_find_contig(struct uvm_physseg *, int, paddr_t, psize_t *);
    848      1.1    cherry #elif (VM_PHYSSEG_STRAT == VM_PSTRAT_BSEARCH)
    849      1.1    cherry static inline int vm_physseg_find_bsearch(struct uvm_physseg *, int, paddr_t, psize_t *);
    850      1.1    cherry #else
    851      1.1    cherry static inline int vm_physseg_find_linear(struct uvm_physseg *, int, paddr_t, psize_t *);
    852      1.1    cherry #endif
    853      1.1    cherry 
    854      1.1    cherry /*
    855      1.1    cherry  * vm_physseg_find: find vm_physseg structure that belongs to a PA
    856      1.1    cherry  */
    857      1.1    cherry int
    858      1.1    cherry uvm_physseg_find(paddr_t pframe, psize_t *offp)
    859      1.1    cherry {
    860      1.1    cherry 
    861      1.1    cherry #if VM_PHYSSEG_MAX == 1
    862      1.1    cherry 	return vm_physseg_find_contig(vm_physmem, vm_nphysseg, pframe, offp);
    863      1.1    cherry #elif (VM_PHYSSEG_STRAT == VM_PSTRAT_BSEARCH)
    864      1.1    cherry 	return vm_physseg_find_bsearch(vm_physmem, vm_nphysseg, pframe, offp);
    865      1.1    cherry #else
    866      1.1    cherry 	return vm_physseg_find_linear(vm_physmem, vm_nphysseg, pframe, offp);
    867      1.1    cherry #endif
    868      1.1    cherry }
    869      1.1    cherry 
    870      1.1    cherry #if VM_PHYSSEG_MAX == 1
    871      1.1    cherry static inline int
    872      1.1    cherry vm_physseg_find_contig(struct uvm_physseg *segs, int nsegs, paddr_t pframe, psize_t *offp)
    873      1.1    cherry {
    874      1.1    cherry 
    875      1.1    cherry 	/* 'contig' case */
    876      1.1    cherry 	if (pframe >= segs[0].start && pframe < segs[0].end) {
    877      1.1    cherry 		if (offp)
    878      1.1    cherry 			*offp = pframe - segs[0].start;
    879      1.1    cherry 		return(0);
    880      1.1    cherry 	}
    881      1.1    cherry 	return(-1);
    882      1.1    cherry }
    883      1.1    cherry 
    884      1.1    cherry #elif (VM_PHYSSEG_STRAT == VM_PSTRAT_BSEARCH)
    885      1.1    cherry 
    886      1.1    cherry static inline int
    887      1.1    cherry vm_physseg_find_bsearch(struct uvm_physseg *segs, int nsegs, paddr_t pframe, psize_t *offp)
    888      1.1    cherry {
    889      1.1    cherry 	/* binary search for it */
    890      1.1    cherry 	int	start, len, guess;
    891      1.1    cherry 
    892      1.1    cherry 	/*
    893      1.1    cherry 	 * if try is too large (thus target is less than try) we reduce
    894      1.1    cherry 	 * the length to trunc(len/2) [i.e. everything smaller than "try"]
    895      1.1    cherry 	 *
    896      1.1    cherry 	 * if the try is too small (thus target is greater than try) then
    897      1.1    cherry 	 * we set the new start to be (try + 1).   this means we need to
    898      1.1    cherry 	 * reduce the length to (round(len/2) - 1).
    899      1.1    cherry 	 *
    900      1.1    cherry 	 * note "adjust" below which takes advantage of the fact that
    901      1.1    cherry 	 *  (round(len/2) - 1) == trunc((len - 1) / 2)
    902      1.1    cherry 	 * for any value of len we may have
    903      1.1    cherry 	 */
    904      1.1    cherry 
    905      1.1    cherry 	for (start = 0, len = nsegs ; len != 0 ; len = len / 2) {
    906      1.1    cherry 		guess = start + (len / 2);	/* try in the middle */
    907      1.1    cherry 
    908      1.1    cherry 		/* start past our try? */
    909      1.1    cherry 		if (pframe >= segs[guess].start) {
    910      1.1    cherry 			/* was try correct? */
    911      1.1    cherry 			if (pframe < segs[guess].end) {
    912      1.1    cherry 				if (offp)
    913      1.1    cherry 					*offp = pframe - segs[guess].start;
    914      1.1    cherry 				return guess;            /* got it */
    915      1.1    cherry 			}
    916      1.1    cherry 			start = guess + 1;	/* next time, start here */
    917      1.1    cherry 			len--;			/* "adjust" */
    918      1.1    cherry 		} else {
    919      1.1    cherry 			/*
    920      1.1    cherry 			 * pframe before try, just reduce length of
    921      1.1    cherry 			 * region, done in "for" loop
    922      1.1    cherry 			 */
    923      1.1    cherry 		}
    924      1.1    cherry 	}
    925      1.1    cherry 	return(-1);
    926      1.1    cherry }
    927      1.1    cherry 
    928      1.1    cherry #else
    929      1.1    cherry 
    930      1.1    cherry static inline int
    931      1.1    cherry vm_physseg_find_linear(struct uvm_physseg *segs, int nsegs, paddr_t pframe, psize_t *offp)
    932      1.1    cherry {
    933      1.1    cherry 	/* linear search for it */
    934      1.1    cherry 	int	lcv;
    935      1.1    cherry 
    936      1.1    cherry 	for (lcv = 0; lcv < nsegs; lcv++) {
    937      1.1    cherry 		if (pframe >= segs[lcv].start &&
    938      1.1    cherry 		    pframe < segs[lcv].end) {
    939      1.1    cherry 			if (offp)
    940      1.1    cherry 				*offp = pframe - segs[lcv].start;
    941      1.1    cherry 			return(lcv);		   /* got it */
    942      1.1    cherry 		}
    943      1.1    cherry 	}
    944      1.1    cherry 	return(-1);
    945      1.1    cherry }
    946      1.1    cherry #endif
    947      1.1    cherry #endif /* UVM_HOTPLUG */
    948      1.1    cherry 
    949      1.1    cherry bool
    950      1.2    cherry uvm_physseg_valid_p(uvm_physseg_t upm)
    951      1.1    cherry {
    952      1.1    cherry 	struct uvm_physseg *ps;
    953      1.1    cherry 
    954      1.1    cherry 	if (upm == UVM_PHYSSEG_TYPE_INVALID ||
    955      1.1    cherry 	    upm == UVM_PHYSSEG_TYPE_INVALID_EMPTY ||
    956      1.1    cherry 	    upm == UVM_PHYSSEG_TYPE_INVALID_OVERFLOW)
    957      1.1    cherry 		return false;
    958      1.1    cherry 
    959      1.1    cherry 	/*
    960      1.1    cherry 	 * This is the delicate init dance -
    961      1.1    cherry 	 * needs to go with the dance.
    962      1.1    cherry 	 */
    963      1.1    cherry 	if (uvm.page_init_done != true)
    964      1.1    cherry 		return true;
    965      1.1    cherry 
    966      1.1    cherry 	ps = HANDLE_TO_PHYSSEG_NODE(upm);
    967      1.1    cherry 
    968      1.1    cherry 	/* Extra checks needed only post uvm_page_init() */
    969      1.1    cherry 	if (ps->pgs == NULL)
    970      1.1    cherry 		return false;
    971      1.1    cherry 
    972      1.1    cherry 	/* XXX: etc. */
    973      1.1    cherry 
    974      1.1    cherry 	return true;
    975      1.1    cherry 
    976      1.1    cherry }
    977      1.1    cherry 
    978      1.1    cherry /*
    979      1.1    cherry  * Boot protocol dictates that these must be able to return partially
    980      1.1    cherry  * initialised segments.
    981      1.1    cherry  */
    982      1.1    cherry paddr_t
    983      1.1    cherry uvm_physseg_get_start(uvm_physseg_t upm)
    984      1.1    cherry {
    985      1.2    cherry 	if (uvm_physseg_valid_p(upm) == false)
    986      1.1    cherry 		return (paddr_t) -1;
    987      1.1    cherry 
    988      1.1    cherry 	return HANDLE_TO_PHYSSEG_NODE(upm)->start;
    989      1.1    cherry }
    990      1.1    cherry 
    991      1.1    cherry paddr_t
    992      1.1    cherry uvm_physseg_get_end(uvm_physseg_t upm)
    993      1.1    cherry {
    994      1.2    cherry 	if (uvm_physseg_valid_p(upm) == false)
    995      1.1    cherry 		return (paddr_t) -1;
    996      1.1    cherry 
    997      1.1    cherry 	return HANDLE_TO_PHYSSEG_NODE(upm)->end;
    998      1.1    cherry }
    999      1.1    cherry 
   1000      1.1    cherry paddr_t
   1001      1.1    cherry uvm_physseg_get_avail_start(uvm_physseg_t upm)
   1002      1.1    cherry {
   1003      1.2    cherry 	if (uvm_physseg_valid_p(upm) == false)
   1004      1.1    cherry 		return (paddr_t) -1;
   1005      1.1    cherry 
   1006      1.1    cherry 	return HANDLE_TO_PHYSSEG_NODE(upm)->avail_start;
   1007      1.1    cherry }
   1008      1.1    cherry 
   1009      1.6       rin #if defined(UVM_PHYSSEG_LEGACY)
   1010      1.4  christos void
   1011      1.4  christos uvm_physseg_set_avail_start(uvm_physseg_t upm, paddr_t avail_start)
   1012      1.4  christos {
   1013      1.5    cherry 	struct uvm_physseg *ps = HANDLE_TO_PHYSSEG_NODE(upm);
   1014      1.5    cherry 
   1015      1.5    cherry #if defined(DIAGNOSTIC)
   1016      1.5    cherry 	paddr_t avail_end;
   1017      1.5    cherry 	avail_end = uvm_physseg_get_avail_end(upm);
   1018      1.4  christos 	KASSERT(uvm_physseg_valid_p(upm));
   1019      1.5    cherry 	KASSERT(avail_start < avail_end && avail_start >= ps->start);
   1020      1.5    cherry #endif
   1021      1.5    cherry 
   1022      1.5    cherry 	ps->avail_start = avail_start;
   1023      1.4  christos }
   1024  1.9.4.1    martin 
   1025  1.9.4.1    martin void
   1026  1.9.4.1    martin uvm_physseg_set_avail_end(uvm_physseg_t upm, paddr_t avail_end)
   1027      1.5    cherry {
   1028      1.5    cherry 	struct uvm_physseg *ps = HANDLE_TO_PHYSSEG_NODE(upm);
   1029      1.5    cherry 
   1030      1.5    cherry #if defined(DIAGNOSTIC)
   1031      1.5    cherry 	paddr_t avail_start;
   1032      1.5    cherry 	avail_start = uvm_physseg_get_avail_start(upm);
   1033      1.5    cherry 	KASSERT(uvm_physseg_valid_p(upm));
   1034      1.5    cherry 	KASSERT(avail_end > avail_start && avail_end <= ps->end);
   1035      1.4  christos #endif
   1036      1.4  christos 
   1037      1.5    cherry 	ps->avail_end = avail_end;
   1038      1.5    cherry }
   1039      1.5    cherry 
   1040      1.6       rin #endif /* UVM_PHYSSEG_LEGACY */
   1041      1.5    cherry 
   1042      1.1    cherry paddr_t
   1043      1.1    cherry uvm_physseg_get_avail_end(uvm_physseg_t upm)
   1044      1.1    cherry {
   1045      1.2    cherry 	if (uvm_physseg_valid_p(upm) == false)
   1046      1.1    cherry 		return (paddr_t) -1;
   1047      1.1    cherry 
   1048      1.1    cherry 	return HANDLE_TO_PHYSSEG_NODE(upm)->avail_end;
   1049      1.1    cherry }
   1050      1.1    cherry 
   1051      1.1    cherry struct vm_page *
   1052      1.1    cherry uvm_physseg_get_pg(uvm_physseg_t upm, paddr_t idx)
   1053      1.1    cherry {
   1054      1.2    cherry 	KASSERT(uvm_physseg_valid_p(upm));
   1055      1.1    cherry 	return &HANDLE_TO_PHYSSEG_NODE(upm)->pgs[idx];
   1056      1.1    cherry }
   1057      1.1    cherry 
   1058      1.1    cherry #ifdef __HAVE_PMAP_PHYSSEG
   1059      1.1    cherry struct pmap_physseg *
   1060      1.1    cherry uvm_physseg_get_pmseg(uvm_physseg_t upm)
   1061      1.1    cherry {
   1062      1.2    cherry 	KASSERT(uvm_physseg_valid_p(upm));
   1063      1.1    cherry 	return &(HANDLE_TO_PHYSSEG_NODE(upm)->pmseg);
   1064      1.1    cherry }
   1065      1.1    cherry #endif
   1066      1.1    cherry 
   1067      1.1    cherry int
   1068      1.1    cherry uvm_physseg_get_free_list(uvm_physseg_t upm)
   1069      1.1    cherry {
   1070      1.2    cherry 	KASSERT(uvm_physseg_valid_p(upm));
   1071      1.1    cherry 	return HANDLE_TO_PHYSSEG_NODE(upm)->free_list;
   1072      1.1    cherry }
   1073      1.1    cherry 
   1074      1.1    cherry u_int
   1075      1.1    cherry uvm_physseg_get_start_hint(uvm_physseg_t upm)
   1076      1.1    cherry {
   1077      1.2    cherry 	KASSERT(uvm_physseg_valid_p(upm));
   1078      1.1    cherry 	return HANDLE_TO_PHYSSEG_NODE(upm)->start_hint;
   1079      1.1    cherry }
   1080      1.1    cherry 
   1081      1.1    cherry bool
   1082      1.1    cherry uvm_physseg_set_start_hint(uvm_physseg_t upm, u_int start_hint)
   1083      1.1    cherry {
   1084      1.2    cherry 	if (uvm_physseg_valid_p(upm) == false)
   1085      1.1    cherry 		return false;
   1086      1.1    cherry 
   1087      1.1    cherry 	HANDLE_TO_PHYSSEG_NODE(upm)->start_hint = start_hint;
   1088      1.1    cherry 	return true;
   1089      1.1    cherry }
   1090      1.1    cherry 
   1091      1.1    cherry void
   1092      1.1    cherry uvm_physseg_init_seg(uvm_physseg_t upm, struct vm_page *pgs)
   1093      1.1    cherry {
   1094      1.1    cherry 	psize_t i;
   1095      1.1    cherry 	psize_t n;
   1096      1.1    cherry 	paddr_t paddr;
   1097      1.1    cherry 	struct uvm_physseg *seg;
   1098  1.9.4.1    martin 	struct vm_page *pg;
   1099      1.1    cherry 
   1100      1.1    cherry 	KASSERT(upm != UVM_PHYSSEG_TYPE_INVALID && pgs != NULL);
   1101      1.1    cherry 
   1102      1.1    cherry 	seg = HANDLE_TO_PHYSSEG_NODE(upm);
   1103      1.1    cherry 	KASSERT(seg != NULL);
   1104      1.1    cherry 	KASSERT(seg->pgs == NULL);
   1105      1.1    cherry 
   1106      1.1    cherry 	n = seg->end - seg->start;
   1107      1.1    cherry 	seg->pgs = pgs;
   1108      1.1    cherry 
   1109      1.1    cherry 	/* init and free vm_pages (we've already zeroed them) */
   1110      1.1    cherry 	paddr = ctob(seg->start);
   1111      1.1    cherry 	for (i = 0 ; i < n ; i++, paddr += PAGE_SIZE) {
   1112  1.9.4.1    martin 		pg = &seg->pgs[i];
   1113  1.9.4.1    martin 		pg->phys_addr = paddr;
   1114      1.1    cherry #ifdef __HAVE_VM_PAGE_MD
   1115  1.9.4.1    martin 		VM_MDPAGE_INIT(pg);
   1116      1.1    cherry #endif
   1117      1.1    cherry 		if (atop(paddr) >= seg->avail_start &&
   1118      1.1    cherry 		    atop(paddr) < seg->avail_end) {
   1119      1.1    cherry 			uvmexp.npages++;
   1120      1.1    cherry 			/* add page to free pool */
   1121  1.9.4.1    martin 			uvm_page_set_freelist(pg,
   1122  1.9.4.1    martin 			    uvm_page_lookup_freelist(pg));
   1123  1.9.4.1    martin 			/* Disable LOCKDEBUG: too many and too early. */
   1124  1.9.4.1    martin 			mutex_init(&pg->interlock, MUTEX_NODEBUG, IPL_NONE);
   1125  1.9.4.1    martin 			uvm_pagefree(pg);
   1126      1.1    cherry 		}
   1127      1.1    cherry 	}
   1128      1.1    cherry }
   1129      1.1    cherry 
   1130      1.1    cherry void
   1131      1.1    cherry uvm_physseg_seg_chomp_slab(uvm_physseg_t upm, struct vm_page *pgs, size_t n)
   1132      1.1    cherry {
   1133      1.1    cherry 	struct uvm_physseg *seg = HANDLE_TO_PHYSSEG_NODE(upm);
   1134      1.1    cherry 
   1135      1.1    cherry 	/* max number of pre-boot unplug()s allowed */
   1136      1.1    cherry #define UVM_PHYSSEG_BOOT_UNPLUG_MAX VM_PHYSSEG_MAX
   1137      1.1    cherry 
   1138      1.1    cherry 	static char btslab_ex_storage[EXTENT_FIXED_STORAGE_SIZE(UVM_PHYSSEG_BOOT_UNPLUG_MAX)];
   1139      1.1    cherry 
   1140      1.1    cherry 	if (__predict_false(uvm.page_init_done == false)) {
   1141      1.1    cherry 		seg->ext = extent_create("Boot time slab", (u_long) pgs, (u_long) (pgs + n),
   1142      1.1    cherry 		    (void *)btslab_ex_storage, sizeof(btslab_ex_storage), 0);
   1143      1.1    cherry 	} else {
   1144      1.1    cherry 		seg->ext = extent_create("Hotplug slab", (u_long) pgs, (u_long) (pgs + n), NULL, 0, 0);
   1145      1.1    cherry 	}
   1146      1.1    cherry 
   1147      1.1    cherry 	KASSERT(seg->ext != NULL);
   1148      1.1    cherry 
   1149      1.1    cherry }
   1150      1.1    cherry 
   1151      1.1    cherry struct vm_page *
   1152      1.1    cherry uvm_physseg_seg_alloc_from_slab(uvm_physseg_t upm, size_t pages)
   1153      1.1    cherry {
   1154      1.1    cherry 	int err;
   1155      1.1    cherry 	struct uvm_physseg *seg;
   1156      1.1    cherry 	struct vm_page *pgs = NULL;
   1157      1.1    cherry 
   1158      1.9  christos 	KASSERT(pages > 0);
   1159      1.9  christos 
   1160      1.1    cherry 	seg = HANDLE_TO_PHYSSEG_NODE(upm);
   1161      1.1    cherry 
   1162      1.1    cherry 	if (__predict_false(seg->ext == NULL)) {
   1163      1.1    cherry 		/*
   1164      1.1    cherry 		 * This is a situation unique to boot time.
   1165      1.1    cherry 		 * It shouldn't happen at any point other than from
   1166      1.1    cherry 		 * the first uvm_page.c:uvm_page_init() call
   1167      1.1    cherry 		 * Since we're in a loop, we can get away with the
   1168      1.1    cherry 		 * below.
   1169      1.1    cherry 		 */
   1170      1.1    cherry 		KASSERT(uvm.page_init_done != true);
   1171      1.1    cherry 
   1172      1.9  christos 		uvm_physseg_t upmp = uvm_physseg_get_prev(upm);
   1173      1.9  christos 		KASSERT(upmp != UVM_PHYSSEG_TYPE_INVALID);
   1174      1.9  christos 
   1175      1.9  christos 		seg->ext = HANDLE_TO_PHYSSEG_NODE(upmp)->ext;
   1176      1.1    cherry 
   1177      1.1    cherry 		KASSERT(seg->ext != NULL);
   1178      1.1    cherry 	}
   1179      1.1    cherry 
   1180      1.1    cherry 	/* We allocate enough for this segment */
   1181      1.1    cherry 	err = extent_alloc(seg->ext, sizeof(*pgs) * pages, 1, 0, EX_BOUNDZERO, (u_long *)&pgs);
   1182      1.1    cherry 
   1183      1.1    cherry 	if (err != 0) {
   1184      1.1    cherry #ifdef DEBUG
   1185      1.1    cherry 		printf("%s: extent_alloc failed with error: %d \n",
   1186      1.1    cherry 		    __func__, err);
   1187      1.1    cherry #endif
   1188      1.1    cherry 	}
   1189      1.1    cherry 
   1190      1.1    cherry 	return pgs;
   1191      1.1    cherry }
   1192      1.1    cherry 
   1193      1.1    cherry /*
   1194      1.1    cherry  * uvm_page_physload: load physical memory into VM system
   1195      1.1    cherry  *
   1196      1.1    cherry  * => all args are PFs
   1197      1.1    cherry  * => all pages in start/end get vm_page structures
   1198      1.1    cherry  * => areas marked by avail_start/avail_end get added to the free page pool
   1199      1.1    cherry  * => we are limited to VM_PHYSSEG_MAX physical memory segments
   1200      1.1    cherry  */
   1201      1.1    cherry 
   1202      1.1    cherry uvm_physseg_t
   1203      1.1    cherry uvm_page_physload(paddr_t start, paddr_t end, paddr_t avail_start,
   1204      1.1    cherry     paddr_t avail_end, int free_list)
   1205      1.1    cherry {
   1206      1.1    cherry 	struct uvm_physseg *ps;
   1207      1.1    cherry 	uvm_physseg_t upm;
   1208      1.1    cherry 
   1209      1.1    cherry 	if (__predict_true(uvm.page_init_done == true))
   1210      1.1    cherry 		panic("%s: unload attempted after uvm_page_init()\n", __func__);
   1211      1.1    cherry 	if (uvmexp.pagesize == 0)
   1212      1.1    cherry 		panic("uvm_page_physload: page size not set!");
   1213      1.1    cherry 	if (free_list >= VM_NFREELIST || free_list < VM_FREELIST_DEFAULT)
   1214      1.1    cherry 		panic("uvm_page_physload: bad free list %d", free_list);
   1215      1.1    cherry 	if (start >= end)
   1216      1.1    cherry 		panic("uvm_page_physload: start >= end");
   1217      1.1    cherry 
   1218      1.1    cherry 	if (uvm_physseg_plug(start, end - start, &upm) == false) {
   1219      1.1    cherry 		panic("uvm_physseg_plug() failed at boot.");
   1220      1.1    cherry 		/* NOTREACHED */
   1221      1.1    cherry 		return UVM_PHYSSEG_TYPE_INVALID; /* XXX: correct type */
   1222      1.1    cherry 	}
   1223      1.1    cherry 
   1224      1.1    cherry 	ps = HANDLE_TO_PHYSSEG_NODE(upm);
   1225      1.1    cherry 
   1226      1.1    cherry 	/* Legacy */
   1227      1.1    cherry 	ps->avail_start = avail_start;
   1228      1.1    cherry 	ps->avail_end = avail_end;
   1229      1.1    cherry 
   1230      1.1    cherry 	ps->free_list = free_list; /* XXX: */
   1231      1.1    cherry 
   1232      1.1    cherry 
   1233      1.1    cherry 	return upm;
   1234      1.1    cherry }
   1235      1.1    cherry 
   1236      1.1    cherry bool
   1237      1.1    cherry uvm_physseg_unplug(paddr_t pfn, size_t pages)
   1238      1.1    cherry {
   1239      1.1    cherry 	uvm_physseg_t upm;
   1240      1.8  riastrad 	paddr_t off = 0, start __diagused, end;
   1241      1.1    cherry 	struct uvm_physseg *seg;
   1242      1.1    cherry 
   1243      1.1    cherry 	upm = uvm_physseg_find(pfn, &off);
   1244      1.1    cherry 
   1245      1.2    cherry 	if (!uvm_physseg_valid_p(upm)) {
   1246      1.1    cherry 		printf("%s: Tried to unplug from unknown offset\n", __func__);
   1247      1.1    cherry 		return false;
   1248      1.1    cherry 	}
   1249      1.1    cherry 
   1250      1.1    cherry 	seg = HANDLE_TO_PHYSSEG_NODE(upm);
   1251      1.1    cherry 
   1252      1.1    cherry 	start = uvm_physseg_get_start(upm);
   1253      1.1    cherry 	end = uvm_physseg_get_end(upm);
   1254      1.1    cherry 
   1255      1.1    cherry 	if (end < (pfn + pages)) {
   1256      1.1    cherry 		printf("%s: Tried to unplug oversized span \n", __func__);
   1257      1.1    cherry 		return false;
   1258      1.1    cherry 	}
   1259      1.1    cherry 
   1260      1.1    cherry 	KASSERT(pfn == start + off); /* sanity */
   1261      1.1    cherry 
   1262      1.1    cherry 	if (__predict_true(uvm.page_init_done == true)) {
   1263      1.1    cherry 		/* XXX: KASSERT() that seg->pgs[] are not on any uvm lists */
   1264      1.1    cherry 		if (extent_free(seg->ext, (u_long)(seg->pgs + off), sizeof(struct vm_page) * pages, EX_MALLOCOK | EX_NOWAIT) != 0)
   1265      1.1    cherry 			return false;
   1266      1.1    cherry 	}
   1267      1.1    cherry 
   1268      1.1    cherry 	if (off == 0 && (pfn + pages) == end) {
   1269      1.1    cherry #if defined(UVM_HOTPLUG) /* rbtree implementation */
   1270      1.1    cherry 		int segcount = 0;
   1271      1.1    cherry 		struct uvm_physseg *current_ps;
   1272      1.1    cherry 		/* Complete segment */
   1273      1.1    cherry 		if (uvm_physseg_graph.nentries == 1)
   1274      1.1    cherry 			panic("%s: out of memory!", __func__);
   1275      1.1    cherry 
   1276      1.1    cherry 		if (__predict_true(uvm.page_init_done == true)) {
   1277      1.1    cherry 			RB_TREE_FOREACH(current_ps, &(uvm_physseg_graph.rb_tree)) {
   1278      1.1    cherry 				if (seg->ext == current_ps->ext)
   1279      1.1    cherry 					segcount++;
   1280      1.1    cherry 			}
   1281      1.1    cherry 			KASSERT(segcount > 0);
   1282      1.1    cherry 
   1283      1.1    cherry 			if (segcount == 1) {
   1284      1.1    cherry 				extent_destroy(seg->ext);
   1285      1.1    cherry 			}
   1286      1.1    cherry 
   1287      1.1    cherry 			/*
   1288      1.1    cherry 			 * We assume that the unplug will succeed from
   1289      1.1    cherry 			 *  this point onwards
   1290      1.1    cherry 			 */
   1291      1.1    cherry 			uvmexp.npages -= (int) pages;
   1292      1.1    cherry 		}
   1293      1.1    cherry 
   1294      1.1    cherry 		rb_tree_remove_node(&(uvm_physseg_graph.rb_tree), upm);
   1295      1.1    cherry 		memset(seg, 0, sizeof(struct uvm_physseg));
   1296      1.1    cherry 		uvm_physseg_free(seg, sizeof(struct uvm_physseg));
   1297      1.1    cherry 		uvm_physseg_graph.nentries--;
   1298      1.1    cherry #else /* UVM_HOTPLUG */
   1299      1.1    cherry 		int x;
   1300      1.1    cherry 		if (vm_nphysmem == 1)
   1301      1.1    cherry 			panic("uvm_page_physget: out of memory!");
   1302      1.1    cherry 		vm_nphysmem--;
   1303      1.1    cherry 		for (x = upm ; x < vm_nphysmem ; x++)
   1304      1.1    cherry 			/* structure copy */
   1305      1.1    cherry 			VM_PHYSMEM_PTR_SWAP(x, x + 1);
   1306      1.1    cherry #endif /* UVM_HOTPLUG */
   1307      1.1    cherry 		/* XXX: KASSERT() that seg->pgs[] are not on any uvm lists */
   1308      1.1    cherry 		return true;
   1309      1.1    cherry 	}
   1310      1.1    cherry 
   1311      1.1    cherry 	if (off > 0 &&
   1312      1.1    cherry 	    (pfn + pages) < end) {
   1313      1.1    cherry #if defined(UVM_HOTPLUG) /* rbtree implementation */
   1314      1.1    cherry 		/* middle chunk - need a new segment */
   1315      1.1    cherry 		struct uvm_physseg *ps, *current_ps;
   1316      1.1    cherry 		ps = uvm_physseg_alloc(sizeof (struct uvm_physseg));
   1317      1.1    cherry 		if (ps == NULL) {
   1318      1.1    cherry 			printf("%s: Unable to allocated new fragment vm_physseg \n",
   1319      1.1    cherry 			    __func__);
   1320      1.1    cherry 			return false;
   1321      1.1    cherry 		}
   1322      1.1    cherry 
   1323      1.1    cherry 		/* Remove middle chunk */
   1324      1.1    cherry 		if (__predict_true(uvm.page_init_done == true)) {
   1325      1.1    cherry 			KASSERT(seg->ext != NULL);
   1326      1.1    cherry 			ps->ext = seg->ext;
   1327      1.1    cherry 
   1328      1.1    cherry 			/* XXX: KASSERT() that seg->pgs[] are not on any uvm lists */
   1329      1.1    cherry 			/*
   1330      1.1    cherry 			 * We assume that the unplug will succeed from
   1331      1.1    cherry 			 *  this point onwards
   1332      1.1    cherry 			 */
   1333      1.1    cherry 			uvmexp.npages -= (int) pages;
   1334      1.1    cherry 		}
   1335      1.1    cherry 
   1336      1.1    cherry 		ps->start = pfn + pages;
   1337      1.1    cherry 		ps->avail_start = ps->start; /* XXX: Legacy */
   1338      1.1    cherry 
   1339      1.1    cherry 		ps->end = seg->end;
   1340      1.1    cherry 		ps->avail_end = ps->end; /* XXX: Legacy */
   1341      1.1    cherry 
   1342      1.1    cherry 		seg->end = pfn;
   1343      1.1    cherry 		seg->avail_end = seg->end; /* XXX: Legacy */
   1344      1.1    cherry 
   1345      1.1    cherry 
   1346      1.1    cherry 		/*
   1347      1.1    cherry 		 * The new pgs array points to the beginning of the
   1348      1.1    cherry 		 * tail fragment.
   1349      1.1    cherry 		 */
   1350      1.1    cherry 		if (__predict_true(uvm.page_init_done == true))
   1351      1.1    cherry 			ps->pgs = seg->pgs + off + pages;
   1352      1.1    cherry 
   1353      1.1    cherry 		current_ps = rb_tree_insert_node(&(uvm_physseg_graph.rb_tree), ps);
   1354      1.1    cherry 		if (current_ps != ps) {
   1355      1.1    cherry 			panic("uvm_page_physload: Duplicate address range detected!");
   1356      1.1    cherry 		}
   1357      1.1    cherry 		uvm_physseg_graph.nentries++;
   1358      1.1    cherry #else /* UVM_HOTPLUG */
   1359      1.1    cherry 		panic("%s: can't unplug() from the middle of a segment without"
   1360      1.7       uwe 		    " UVM_HOTPLUG\n",  __func__);
   1361      1.1    cherry 		/* NOTREACHED */
   1362      1.1    cherry #endif /* UVM_HOTPLUG */
   1363      1.1    cherry 		return true;
   1364      1.1    cherry 	}
   1365      1.1    cherry 
   1366      1.1    cherry 	if (off == 0 && (pfn + pages) < end) {
   1367      1.1    cherry 		/* Remove front chunk */
   1368      1.1    cherry 		if (__predict_true(uvm.page_init_done == true)) {
   1369      1.1    cherry 			/* XXX: KASSERT() that seg->pgs[] are not on any uvm lists */
   1370      1.1    cherry 			/*
   1371      1.1    cherry 			 * We assume that the unplug will succeed from
   1372      1.1    cherry 			 *  this point onwards
   1373      1.1    cherry 			 */
   1374      1.1    cherry 			uvmexp.npages -= (int) pages;
   1375      1.1    cherry 		}
   1376      1.1    cherry 
   1377      1.1    cherry 		/* Truncate */
   1378      1.1    cherry 		seg->start = pfn + pages;
   1379      1.1    cherry 		seg->avail_start = seg->start; /* XXX: Legacy */
   1380      1.1    cherry 
   1381      1.1    cherry 		/*
   1382      1.1    cherry 		 * Move the pgs array start to the beginning of the
   1383      1.1    cherry 		 * tail end.
   1384      1.1    cherry 		 */
   1385      1.1    cherry 		if (__predict_true(uvm.page_init_done == true))
   1386      1.1    cherry 			seg->pgs += pages;
   1387      1.1    cherry 
   1388      1.1    cherry 		return true;
   1389      1.1    cherry 	}
   1390      1.1    cherry 
   1391      1.1    cherry 	if (off > 0 && (pfn + pages) == end) {
   1392      1.1    cherry 		/* back chunk */
   1393      1.1    cherry 
   1394      1.1    cherry 
   1395      1.1    cherry 		/* Truncate! */
   1396      1.1    cherry 		seg->end = pfn;
   1397      1.1    cherry 		seg->avail_end = seg->end; /* XXX: Legacy */
   1398      1.1    cherry 
   1399      1.1    cherry 		uvmexp.npages -= (int) pages;
   1400      1.1    cherry 
   1401      1.1    cherry 		return true;
   1402      1.1    cherry 	}
   1403      1.1    cherry 
   1404      1.1    cherry 	printf("%s: Tried to unplug unknown range \n", __func__);
   1405      1.1    cherry 
   1406      1.1    cherry 	return false;
   1407      1.1    cherry }
   1408