Home | History | Annotate | Line # | Download | only in uvm
uvm_physseg.c revision 1.13
      1  1.13        ad /* $NetBSD: uvm_physseg.c,v 1.13 2019/12/21 14:41:44 ad Exp $ */
      2   1.1    cherry 
      3   1.1    cherry /*
      4   1.1    cherry  * Copyright (c) 1997 Charles D. Cranor and Washington University.
      5   1.1    cherry  * Copyright (c) 1991, 1993, The Regents of the University of California.
      6   1.1    cherry  *
      7   1.1    cherry  * All rights reserved.
      8   1.1    cherry  *
      9   1.1    cherry  * This code is derived from software contributed to Berkeley by
     10   1.1    cherry  * The Mach Operating System project at Carnegie-Mellon University.
     11   1.1    cherry  *
     12   1.1    cherry  * Redistribution and use in source and binary forms, with or without
     13   1.1    cherry  * modification, are permitted provided that the following conditions
     14   1.1    cherry  * are met:
     15   1.1    cherry  * 1. Redistributions of source code must retain the above copyright
     16   1.1    cherry  *    notice, this list of conditions and the following disclaimer.
     17   1.1    cherry  * 2. Redistributions in binary form must reproduce the above copyright
     18   1.1    cherry  *    notice, this list of conditions and the following disclaimer in the
     19   1.1    cherry  *    documentation and/or other materials provided with the distribution.
     20   1.1    cherry  * 3. Neither the name of the University nor the names of its contributors
     21   1.1    cherry  *    may be used to endorse or promote products derived from this software
     22   1.1    cherry  *    without specific prior written permission.
     23   1.1    cherry  *
     24   1.1    cherry  * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND
     25   1.1    cherry  * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
     26   1.1    cherry  * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
     27   1.1    cherry  * ARE DISCLAIMED.  IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE
     28   1.1    cherry  * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
     29   1.1    cherry  * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
     30   1.1    cherry  * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
     31   1.1    cherry  * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
     32   1.1    cherry  * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
     33   1.1    cherry  * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
     34   1.1    cherry  * SUCH DAMAGE.
     35   1.1    cherry  *
     36   1.1    cherry  *	@(#)vm_page.h   7.3 (Berkeley) 4/21/91
     37   1.1    cherry  * from: Id: uvm_page.h,v 1.1.2.6 1998/02/04 02:31:42 chuck Exp
     38   1.1    cherry  *
     39   1.1    cherry  *
     40   1.1    cherry  * Copyright (c) 1987, 1990 Carnegie-Mellon University.
     41   1.1    cherry  * All rights reserved.
     42   1.1    cherry  *
     43   1.1    cherry  * Permission to use, copy, modify and distribute this software and
     44   1.1    cherry  * its documentation is hereby granted, provided that both the copyright
     45   1.1    cherry  * notice and this permission notice appear in all copies of the
     46   1.1    cherry  * software, derivative works or modified versions, and any portions
     47   1.1    cherry  * thereof, and that both notices appear in supporting documentation.
     48   1.1    cherry  *
     49   1.1    cherry  * CARNEGIE MELLON ALLOWS FREE USE OF THIS SOFTWARE IN ITS "AS IS"
     50   1.1    cherry  * CONDITION.  CARNEGIE MELLON DISCLAIMS ANY LIABILITY OF ANY KIND
     51   1.1    cherry  * FOR ANY DAMAGES WHATSOEVER RESULTING FROM THE USE OF THIS SOFTWARE.
     52   1.1    cherry  *
     53   1.1    cherry  * Carnegie Mellon requests users of this software to return to
     54   1.1    cherry  *
     55   1.1    cherry  *  Software Distribution Coordinator  or  Software.Distribution (at) CS.CMU.EDU
     56   1.1    cherry  *  School of Computer Science
     57   1.1    cherry  *  Carnegie Mellon University
     58   1.1    cherry  *  Pittsburgh PA 15213-3890
     59   1.1    cherry  *
     60   1.1    cherry  * any improvements or extensions that they make and grant Carnegie the
     61   1.1    cherry  * rights to redistribute these changes.
     62   1.1    cherry  */
     63   1.1    cherry 
     64   1.1    cherry /*
     65   1.1    cherry  * Consolidated API from uvm_page.c and others.
     66   1.1    cherry  * Consolidated and designed by Cherry G. Mathew <cherry (at) zyx.in>
     67   1.1    cherry  * rbtree(3) backing implementation by:
     68   1.1    cherry  * Santhosh N. Raju <santhosh.raju (at) gmail.com>
     69   1.1    cherry  */
     70   1.1    cherry 
     71   1.1    cherry #ifdef _KERNEL_OPT
     72   1.1    cherry #include "opt_uvm.h"
     73   1.1    cherry #endif
     74   1.1    cherry 
     75   1.1    cherry #include <sys/param.h>
     76   1.1    cherry #include <sys/types.h>
     77   1.1    cherry #include <sys/extent.h>
     78   1.1    cherry #include <sys/kmem.h>
     79   1.1    cherry 
     80   1.1    cherry #include <uvm/uvm.h>
     81   1.1    cherry #include <uvm/uvm_page.h>
     82   1.1    cherry #include <uvm/uvm_param.h>
     83   1.1    cherry #include <uvm/uvm_pdpolicy.h>
     84   1.1    cherry #include <uvm/uvm_physseg.h>
     85   1.1    cherry 
     86   1.1    cherry /*
     87   1.1    cherry  * uvm_physseg: describes one segment of physical memory
     88   1.1    cherry  */
     89   1.1    cherry struct uvm_physseg {
     90   1.1    cherry 	struct  rb_node rb_node;	/* tree information */
     91   1.1    cherry 	paddr_t	start;			/* PF# of first page in segment */
     92   1.1    cherry 	paddr_t	end;			/* (PF# of last page in segment) + 1 */
     93   1.1    cherry 	paddr_t	avail_start;		/* PF# of first free page in segment */
     94   1.1    cherry 	paddr_t	avail_end;		/* (PF# of last free page in segment) +1  */
     95   1.1    cherry 	struct	vm_page *pgs;		/* vm_page structures (from start) */
     96   1.1    cherry 	struct  extent *ext;		/* extent(9) structure to manage pgs[] */
     97   1.1    cherry 	int	free_list;		/* which free list they belong on */
     98   1.1    cherry 	u_int	start_hint;		/* start looking for free pages here */
     99   1.1    cherry #ifdef __HAVE_PMAP_PHYSSEG
    100   1.1    cherry 	struct	pmap_physseg pmseg;	/* pmap specific (MD) data */
    101   1.1    cherry #endif
    102   1.1    cherry };
    103   1.1    cherry 
    104   1.1    cherry /*
    105   1.1    cherry  * These functions are reserved for uvm(9) internal use and are not
    106   1.1    cherry  * exported in the header file uvm_physseg.h
    107   1.1    cherry  *
    108   1.1    cherry  * Thus they are redefined here.
    109   1.1    cherry  */
    110   1.1    cherry void uvm_physseg_init_seg(uvm_physseg_t, struct vm_page *);
    111   1.1    cherry void uvm_physseg_seg_chomp_slab(uvm_physseg_t, struct vm_page *, size_t);
    112   1.1    cherry 
    113   1.1    cherry /* returns a pgs array */
    114   1.1    cherry struct vm_page *uvm_physseg_seg_alloc_from_slab(uvm_physseg_t, size_t);
    115   1.1    cherry 
    116   1.1    cherry #if defined(UVM_HOTPLUG) /* rbtree impementation */
    117   1.1    cherry 
    118   1.1    cherry #define		HANDLE_TO_PHYSSEG_NODE(h)	((struct uvm_physseg *)(h))
    119   1.1    cherry #define		PHYSSEG_NODE_TO_HANDLE(u)	((uvm_physseg_t)(u))
    120   1.1    cherry 
    121   1.1    cherry struct uvm_physseg_graph {
    122   1.1    cherry 	struct rb_tree rb_tree;		/* Tree for entries */
    123   1.1    cherry 	int            nentries;	/* Number of entries */
    124   1.1    cherry };
    125   1.1    cherry 
    126   1.1    cherry static struct uvm_physseg_graph uvm_physseg_graph;
    127   1.1    cherry 
    128   1.1    cherry /*
    129   1.1    cherry  * Note on kmem(9) allocator usage:
    130   1.1    cherry  * We take the conservative approach that plug/unplug are allowed to
    131   1.1    cherry  * fail in high memory stress situations.
    132   1.1    cherry  *
    133   1.1    cherry  * We want to avoid re-entrant situations in which one plug/unplug
    134   1.1    cherry  * operation is waiting on a previous one to complete, since this
    135   1.1    cherry  * makes the design more complicated than necessary.
    136   1.1    cherry  *
    137   1.1    cherry  * We may review this and change its behaviour, once the use cases
    138   1.1    cherry  * become more obvious.
    139   1.1    cherry  */
    140   1.1    cherry 
    141   1.1    cherry /*
    142   1.1    cherry  * Special alloc()/free() functions for boot time support:
    143   1.1    cherry  * We assume that alloc() at boot time is only for new 'vm_physseg's
    144   1.1    cherry  * This allows us to use a static array for memory allocation at boot
    145   1.1    cherry  * time. Thus we avoid using kmem(9) which is not ready at this point
    146   1.1    cherry  * in boot.
    147   1.1    cherry  *
    148   1.1    cherry  * After kmem(9) is ready, we use it. We currently discard any free()s
    149   1.1    cherry  * to this static array, since the size is small enough to be a
    150   1.1    cherry  * trivial waste on all architectures we run on.
    151   1.1    cherry  */
    152   1.1    cherry 
    153   1.1    cherry static size_t nseg = 0;
    154   1.1    cherry static struct uvm_physseg uvm_physseg[VM_PHYSSEG_MAX];
    155   1.1    cherry 
    156   1.1    cherry static void *
    157   1.1    cherry uvm_physseg_alloc(size_t sz)
    158   1.1    cherry {
    159   1.1    cherry 	/*
    160   1.1    cherry 	 * During boot time, we only support allocating vm_physseg
    161   1.1    cherry 	 * entries from the static array.
    162   1.1    cherry 	 * We need to assert for this.
    163   1.1    cherry 	 */
    164   1.1    cherry 
    165   1.1    cherry 	if (__predict_false(uvm.page_init_done == false)) {
    166   1.1    cherry 		if (sz % sizeof(struct uvm_physseg))
    167   1.1    cherry 			panic("%s: tried to alloc size other than multiple"
    168   1.7       uwe 			    " of struct uvm_physseg at boot\n", __func__);
    169   1.1    cherry 
    170   1.1    cherry 		size_t n = sz / sizeof(struct uvm_physseg);
    171   1.1    cherry 		nseg += n;
    172   1.1    cherry 
    173   1.1    cherry 		KASSERT(nseg > 0 && nseg <= VM_PHYSSEG_MAX);
    174   1.1    cherry 
    175   1.1    cherry 		return &uvm_physseg[nseg - n];
    176   1.1    cherry 	}
    177   1.1    cherry 
    178   1.1    cherry 	return kmem_zalloc(sz, KM_NOSLEEP);
    179   1.1    cherry }
    180   1.1    cherry 
    181   1.1    cherry static void
    182   1.1    cherry uvm_physseg_free(void *p, size_t sz)
    183   1.1    cherry {
    184   1.1    cherry 	/*
    185   1.1    cherry 	 * This is a bit tricky. We do allow simulation of free()
    186   1.1    cherry 	 * during boot (for eg: when MD code is "steal"ing memory,
    187   1.1    cherry 	 * and the segment has been exhausted (and thus needs to be
    188   1.1    cherry 	 * free() - ed.
    189   1.1    cherry 	 * free() also complicates things because we leak the
    190   1.1    cherry 	 * free(). Therefore calling code can't assume that free()-ed
    191   1.1    cherry 	 * memory is available for alloc() again, at boot time.
    192   1.1    cherry 	 *
    193   1.1    cherry 	 * Thus we can't explicitly disallow free()s during
    194   1.1    cherry 	 * boot time. However, the same restriction for alloc()
    195   1.1    cherry 	 * applies to free(). We only allow uvm_physseg related free()s
    196   1.1    cherry 	 * via this function during boot time.
    197   1.1    cherry 	 */
    198   1.1    cherry 
    199   1.1    cherry 	if (__predict_false(uvm.page_init_done == false)) {
    200   1.1    cherry 		if (sz % sizeof(struct uvm_physseg))
    201   1.1    cherry 			panic("%s: tried to free size other than struct uvm_physseg"
    202   1.7       uwe 			    " at boot\n", __func__);
    203   1.1    cherry 
    204   1.1    cherry 	}
    205   1.1    cherry 
    206   1.1    cherry 	/*
    207   1.1    cherry 	 * Could have been in a single if(){} block - split for
    208   1.1    cherry 	 * clarity
    209   1.1    cherry 	 */
    210   1.1    cherry 
    211   1.1    cherry 	if ((struct uvm_physseg *)p >= uvm_physseg &&
    212   1.1    cherry 	    (struct uvm_physseg *)p < (uvm_physseg + VM_PHYSSEG_MAX)) {
    213   1.1    cherry 		if (sz % sizeof(struct uvm_physseg))
    214   1.1    cherry 			panic("%s: tried to free() other than struct uvm_physseg"
    215   1.7       uwe 			    " from static array\n", __func__);
    216   1.1    cherry 
    217   1.1    cherry 		if ((sz / sizeof(struct uvm_physseg)) >= VM_PHYSSEG_MAX)
    218   1.1    cherry 			panic("%s: tried to free() the entire static array!", __func__);
    219   1.1    cherry 		return; /* Nothing to free */
    220   1.1    cherry 	}
    221   1.1    cherry 
    222   1.1    cherry 	kmem_free(p, sz);
    223   1.1    cherry }
    224   1.1    cherry 
    225   1.1    cherry /* XXX: Multi page size */
    226   1.1    cherry bool
    227   1.1    cherry uvm_physseg_plug(paddr_t pfn, size_t pages, uvm_physseg_t *psp)
    228   1.1    cherry {
    229   1.1    cherry 	int preload;
    230   1.1    cherry 	size_t slabpages;
    231   1.1    cherry 	struct uvm_physseg *ps, *current_ps = NULL;
    232   1.1    cherry 	struct vm_page *slab = NULL, *pgs = NULL;
    233   1.1    cherry 
    234   1.1    cherry #ifdef DEBUG
    235   1.1    cherry 	paddr_t off;
    236   1.1    cherry 	uvm_physseg_t upm;
    237   1.1    cherry 	upm = uvm_physseg_find(pfn, &off);
    238   1.1    cherry 
    239   1.1    cherry 	ps = HANDLE_TO_PHYSSEG_NODE(upm);
    240   1.1    cherry 
    241   1.1    cherry 	if (ps != NULL) /* XXX; do we allow "update" plugs ? */
    242   1.1    cherry 		return false;
    243   1.1    cherry #endif
    244   1.1    cherry 
    245   1.1    cherry 	/*
    246   1.1    cherry 	 * do we have room?
    247   1.1    cherry 	 */
    248   1.1    cherry 
    249   1.1    cherry 	ps = uvm_physseg_alloc(sizeof (struct uvm_physseg));
    250   1.1    cherry 	if (ps == NULL) {
    251   1.1    cherry 		printf("uvm_page_physload: unable to load physical memory "
    252   1.1    cherry 		    "segment\n");
    253   1.1    cherry 		printf("\t%d segments allocated, ignoring 0x%"PRIxPADDR" -> 0x%"PRIxPADDR"\n",
    254   1.1    cherry 		    VM_PHYSSEG_MAX, pfn, pfn + pages + 1);
    255   1.1    cherry 		printf("\tincrease VM_PHYSSEG_MAX\n");
    256   1.1    cherry 		return false;
    257   1.1    cherry 	}
    258   1.1    cherry 
    259   1.1    cherry 	/* span init */
    260   1.1    cherry 	ps->start = pfn;
    261   1.1    cherry 	ps->end = pfn + pages;
    262   1.1    cherry 
    263   1.1    cherry 	/*
    264   1.1    cherry 	 * XXX: Ugly hack because uvmexp.npages accounts for only
    265   1.1    cherry 	 * those pages in the segment included below as well - this
    266   1.1    cherry 	 * should be legacy and removed.
    267   1.1    cherry 	 */
    268   1.1    cherry 
    269   1.1    cherry 	ps->avail_start = ps->start;
    270   1.1    cherry 	ps->avail_end = ps->end;
    271   1.1    cherry 
    272   1.1    cherry 	/*
    273   1.1    cherry 	 * check to see if this is a "preload" (i.e. uvm_page_init hasn't been
    274   1.1    cherry 	 * called yet, so kmem is not available).
    275   1.1    cherry 	 */
    276   1.1    cherry 
    277   1.1    cherry 	preload = 1; /* We are going to assume it is a preload */
    278   1.1    cherry 
    279   1.1    cherry 	RB_TREE_FOREACH(current_ps, &(uvm_physseg_graph.rb_tree)) {
    280   1.1    cherry 		/* If there are non NULL pages then we are not in a preload */
    281   1.1    cherry 		if (current_ps->pgs != NULL) {
    282   1.1    cherry 			preload = 0;
    283   1.1    cherry 			/* Try to scavenge from earlier unplug()s. */
    284   1.1    cherry 			pgs = uvm_physseg_seg_alloc_from_slab(current_ps, pages);
    285   1.1    cherry 
    286   1.1    cherry 			if (pgs != NULL) {
    287   1.1    cherry 				break;
    288   1.1    cherry 			}
    289   1.1    cherry 		}
    290   1.1    cherry 	}
    291   1.1    cherry 
    292   1.1    cherry 
    293   1.1    cherry 	/*
    294   1.1    cherry 	 * if VM is already running, attempt to kmem_alloc vm_page structures
    295   1.1    cherry 	 */
    296   1.1    cherry 
    297   1.1    cherry 	if (!preload) {
    298   1.1    cherry 		if (pgs == NULL) { /* Brand new */
    299   1.1    cherry 			/* Iteratively try alloc down from uvmexp.npages */
    300   1.1    cherry 			for (slabpages = (size_t) uvmexp.npages; slabpages >= pages; slabpages--) {
    301   1.1    cherry 				slab = kmem_zalloc(sizeof *pgs * (long unsigned int)slabpages, KM_NOSLEEP);
    302   1.1    cherry 				if (slab != NULL)
    303   1.1    cherry 					break;
    304   1.1    cherry 			}
    305   1.1    cherry 
    306   1.1    cherry 			if (slab == NULL) {
    307   1.1    cherry 				uvm_physseg_free(ps, sizeof(struct uvm_physseg));
    308   1.1    cherry 				return false;
    309   1.1    cherry 			}
    310   1.1    cherry 
    311   1.1    cherry 			uvm_physseg_seg_chomp_slab(ps, slab, (size_t) slabpages);
    312   1.1    cherry 			/* We allocate enough for this plug */
    313   1.1    cherry 			pgs = uvm_physseg_seg_alloc_from_slab(ps, pages);
    314   1.1    cherry 
    315   1.1    cherry 			if (pgs == NULL) {
    316   1.1    cherry 				printf("unable to uvm_physseg_seg_alloc_from_slab() from backend\n");
    317   1.1    cherry 				return false;
    318   1.1    cherry 			}
    319   1.1    cherry 		} else {
    320   1.1    cherry 			/* Reuse scavenged extent */
    321   1.1    cherry 			ps->ext = current_ps->ext;
    322   1.1    cherry 		}
    323   1.1    cherry 
    324   1.1    cherry 		physmem += pages;
    325   1.1    cherry 		uvmpdpol_reinit();
    326   1.1    cherry 	} else { /* Boot time - see uvm_page.c:uvm_page_init() */
    327   1.1    cherry 		pgs = NULL;
    328   1.1    cherry 		ps->pgs = pgs;
    329   1.1    cherry 	}
    330   1.1    cherry 
    331   1.1    cherry 	/*
    332   1.1    cherry 	 * now insert us in the proper place in uvm_physseg_graph.rb_tree
    333   1.1    cherry 	 */
    334   1.1    cherry 
    335   1.1    cherry 	current_ps = rb_tree_insert_node(&(uvm_physseg_graph.rb_tree), ps);
    336   1.1    cherry 	if (current_ps != ps) {
    337   1.1    cherry 		panic("uvm_page_physload: Duplicate address range detected!");
    338   1.1    cherry 	}
    339   1.1    cherry 	uvm_physseg_graph.nentries++;
    340   1.1    cherry 
    341   1.1    cherry 	/*
    342   1.1    cherry 	 * uvm_pagefree() requires the PHYS_TO_VM_PAGE(pgs[i]) on the
    343   1.1    cherry 	 * newly allocated pgs[] to return the correct value. This is
    344   1.1    cherry 	 * a bit of a chicken and egg problem, since it needs
    345   1.1    cherry 	 * uvm_physseg_find() to succeed. For this, the node needs to
    346   1.1    cherry 	 * be inserted *before* uvm_physseg_init_seg() happens.
    347   1.1    cherry 	 *
    348   1.1    cherry 	 * During boot, this happens anyway, since
    349   1.1    cherry 	 * uvm_physseg_init_seg() is called later on and separately
    350   1.1    cherry 	 * from uvm_page.c:uvm_page_init().
    351   1.1    cherry 	 * In the case of hotplug we need to ensure this.
    352   1.1    cherry 	 */
    353   1.1    cherry 
    354   1.1    cherry 	if (__predict_true(!preload))
    355   1.1    cherry 		uvm_physseg_init_seg(ps, pgs);
    356   1.1    cherry 
    357   1.1    cherry 	if (psp != NULL)
    358   1.1    cherry 		*psp = ps;
    359   1.1    cherry 
    360   1.1    cherry 	return true;
    361   1.1    cherry }
    362   1.1    cherry 
    363   1.1    cherry static int
    364   1.1    cherry uvm_physseg_compare_nodes(void *ctx, const void *nnode1, const void *nnode2)
    365   1.1    cherry {
    366   1.1    cherry 	const struct uvm_physseg *enode1 = nnode1;
    367   1.1    cherry 	const struct uvm_physseg *enode2 = nnode2;
    368   1.1    cherry 
    369   1.1    cherry 	KASSERT(enode1->start < enode2->start || enode1->start >= enode2->end);
    370   1.1    cherry 	KASSERT(enode2->start < enode1->start || enode2->start >= enode1->end);
    371   1.1    cherry 
    372   1.1    cherry 	if (enode1->start < enode2->start)
    373   1.1    cherry 		return -1;
    374   1.1    cherry 	if (enode1->start >= enode2->end)
    375   1.1    cherry 		return 1;
    376   1.1    cherry 	return 0;
    377   1.1    cherry }
    378   1.1    cherry 
    379   1.1    cherry static int
    380   1.1    cherry uvm_physseg_compare_key(void *ctx, const void *nnode, const void *pkey)
    381   1.1    cherry {
    382   1.1    cherry 	const struct uvm_physseg *enode = nnode;
    383   1.1    cherry 	const paddr_t pa = *(const paddr_t *) pkey;
    384   1.1    cherry 
    385   1.1    cherry 	if(enode->start <= pa && pa < enode->end)
    386   1.1    cherry 		return 0;
    387   1.1    cherry 	if (enode->start < pa)
    388   1.1    cherry 		return -1;
    389   1.1    cherry 	if (enode->end > pa)
    390   1.1    cherry 		return 1;
    391   1.1    cherry 
    392   1.1    cherry 	return 0;
    393   1.1    cherry }
    394   1.1    cherry 
    395   1.1    cherry static const rb_tree_ops_t uvm_physseg_tree_ops = {
    396   1.1    cherry 	.rbto_compare_nodes = uvm_physseg_compare_nodes,
    397   1.1    cherry 	.rbto_compare_key = uvm_physseg_compare_key,
    398   1.1    cherry 	.rbto_node_offset = offsetof(struct uvm_physseg, rb_node),
    399   1.1    cherry 	.rbto_context = NULL
    400   1.1    cherry };
    401   1.1    cherry 
    402   1.1    cherry /*
    403   1.1    cherry  * uvm_physseg_init: init the physmem
    404   1.1    cherry  *
    405   1.1    cherry  * => physmem unit should not be in use at this point
    406   1.1    cherry  */
    407   1.1    cherry 
    408   1.1    cherry void
    409   1.1    cherry uvm_physseg_init(void)
    410   1.1    cherry {
    411   1.1    cherry 	rb_tree_init(&(uvm_physseg_graph.rb_tree), &uvm_physseg_tree_ops);
    412   1.1    cherry 	uvm_physseg_graph.nentries = 0;
    413   1.1    cherry }
    414   1.1    cherry 
    415   1.1    cherry uvm_physseg_t
    416   1.1    cherry uvm_physseg_get_next(uvm_physseg_t upm)
    417   1.1    cherry {
    418   1.1    cherry 	/* next of invalid is invalid, not fatal */
    419   1.2    cherry 	if (uvm_physseg_valid_p(upm) == false)
    420   1.1    cherry 		return UVM_PHYSSEG_TYPE_INVALID;
    421   1.1    cherry 
    422   1.1    cherry 	return (uvm_physseg_t) rb_tree_iterate(&(uvm_physseg_graph.rb_tree), upm,
    423   1.1    cherry 	    RB_DIR_RIGHT);
    424   1.1    cherry }
    425   1.1    cherry 
    426   1.1    cherry uvm_physseg_t
    427   1.1    cherry uvm_physseg_get_prev(uvm_physseg_t upm)
    428   1.1    cherry {
    429   1.1    cherry 	/* prev of invalid is invalid, not fatal */
    430   1.2    cherry 	if (uvm_physseg_valid_p(upm) == false)
    431   1.1    cherry 		return UVM_PHYSSEG_TYPE_INVALID;
    432   1.1    cherry 
    433   1.1    cherry 	return (uvm_physseg_t) rb_tree_iterate(&(uvm_physseg_graph.rb_tree), upm,
    434   1.1    cherry 	    RB_DIR_LEFT);
    435   1.1    cherry }
    436   1.1    cherry 
    437   1.1    cherry uvm_physseg_t
    438   1.1    cherry uvm_physseg_get_last(void)
    439   1.1    cherry {
    440   1.1    cherry 	return (uvm_physseg_t) RB_TREE_MAX(&(uvm_physseg_graph.rb_tree));
    441   1.1    cherry }
    442   1.1    cherry 
    443   1.1    cherry uvm_physseg_t
    444   1.1    cherry uvm_physseg_get_first(void)
    445   1.1    cherry {
    446   1.1    cherry 	return (uvm_physseg_t) RB_TREE_MIN(&(uvm_physseg_graph.rb_tree));
    447   1.1    cherry }
    448   1.1    cherry 
    449   1.1    cherry paddr_t
    450   1.1    cherry uvm_physseg_get_highest_frame(void)
    451   1.1    cherry {
    452   1.1    cherry 	struct uvm_physseg *ps =
    453   1.1    cherry 	    (uvm_physseg_t) RB_TREE_MAX(&(uvm_physseg_graph.rb_tree));
    454   1.1    cherry 
    455   1.1    cherry 	return ps->end - 1;
    456   1.1    cherry }
    457   1.1    cherry 
    458   1.1    cherry /*
    459   1.1    cherry  * uvm_page_physunload: unload physical memory and return it to
    460   1.1    cherry  * caller.
    461   1.1    cherry  */
    462   1.1    cherry bool
    463   1.1    cherry uvm_page_physunload(uvm_physseg_t upm, int freelist, paddr_t *paddrp)
    464   1.1    cherry {
    465   1.1    cherry 	struct uvm_physseg *seg;
    466   1.1    cherry 
    467   1.1    cherry 	if (__predict_true(uvm.page_init_done == true))
    468   1.1    cherry 		panic("%s: unload attempted after uvm_page_init()\n", __func__);
    469   1.1    cherry 
    470   1.1    cherry 	seg = HANDLE_TO_PHYSSEG_NODE(upm);
    471   1.1    cherry 
    472   1.1    cherry 	if (seg->free_list != freelist) {
    473   1.1    cherry 		return false;
    474   1.1    cherry 	}
    475   1.1    cherry 
    476   1.1    cherry 	/*
    477   1.1    cherry 	 * During cold boot, what we're about to unplug hasn't been
    478   1.1    cherry 	 * put on the uvm freelist, nor has uvmexp.npages been
    479   1.1    cherry 	 * updated. (This happens in uvm_page.c:uvm_page_init())
    480   1.1    cherry 	 *
    481   1.1    cherry 	 * For hotplug, we assume here that the pages being unloaded
    482   1.1    cherry 	 * here are completely out of sight of uvm (ie; not on any uvm
    483   1.1    cherry 	 * lists), and that  uvmexp.npages has been suitably
    484   1.1    cherry 	 * decremented before we're called.
    485   1.1    cherry 	 *
    486   1.1    cherry 	 * XXX: will avail_end == start if avail_start < avail_end?
    487   1.1    cherry 	 */
    488   1.1    cherry 
    489   1.1    cherry 	/* try from front */
    490   1.1    cherry 	if (seg->avail_start == seg->start &&
    491   1.1    cherry 	    seg->avail_start < seg->avail_end) {
    492   1.1    cherry 		*paddrp = ctob(seg->avail_start);
    493   1.1    cherry 		return uvm_physseg_unplug(seg->avail_start, 1);
    494   1.1    cherry 	}
    495   1.1    cherry 
    496   1.1    cherry 	/* try from rear */
    497   1.1    cherry 	if (seg->avail_end == seg->end &&
    498   1.1    cherry 	    seg->avail_start < seg->avail_end) {
    499   1.1    cherry 		*paddrp = ctob(seg->avail_end - 1);
    500   1.1    cherry 		return uvm_physseg_unplug(seg->avail_end - 1, 1);
    501   1.1    cherry 	}
    502   1.1    cherry 
    503   1.1    cherry 	return false;
    504   1.1    cherry }
    505   1.1    cherry 
    506   1.1    cherry bool
    507   1.1    cherry uvm_page_physunload_force(uvm_physseg_t upm, int freelist, paddr_t *paddrp)
    508   1.1    cherry {
    509   1.1    cherry 	struct uvm_physseg *seg;
    510   1.1    cherry 
    511   1.1    cherry 	seg = HANDLE_TO_PHYSSEG_NODE(upm);
    512   1.1    cherry 
    513   1.1    cherry 	if (__predict_true(uvm.page_init_done == true))
    514   1.1    cherry 		panic("%s: unload attempted after uvm_page_init()\n", __func__);
    515   1.1    cherry 	/* any room in this bank? */
    516   1.1    cherry 	if (seg->avail_start >= seg->avail_end) {
    517   1.1    cherry 		return false; /* nope */
    518   1.1    cherry 	}
    519   1.1    cherry 
    520   1.1    cherry 	*paddrp = ctob(seg->avail_start);
    521   1.1    cherry 
    522   1.1    cherry 	/* Always unplug from front */
    523   1.1    cherry 	return uvm_physseg_unplug(seg->avail_start, 1);
    524   1.1    cherry }
    525   1.1    cherry 
    526   1.1    cherry 
    527   1.1    cherry /*
    528   1.1    cherry  * vm_physseg_find: find vm_physseg structure that belongs to a PA
    529   1.1    cherry  */
    530   1.1    cherry uvm_physseg_t
    531   1.1    cherry uvm_physseg_find(paddr_t pframe, psize_t *offp)
    532   1.1    cherry {
    533   1.1    cherry 	struct uvm_physseg * ps = NULL;
    534   1.1    cherry 
    535   1.1    cherry 	ps = rb_tree_find_node(&(uvm_physseg_graph.rb_tree), &pframe);
    536   1.1    cherry 
    537   1.1    cherry 	if(ps != NULL && offp != NULL)
    538   1.1    cherry 		*offp = pframe - ps->start;
    539   1.1    cherry 
    540   1.1    cherry 	return ps;
    541   1.1    cherry }
    542   1.1    cherry 
    543   1.1    cherry #else  /* UVM_HOTPLUG */
    544   1.1    cherry 
    545   1.1    cherry /*
    546   1.1    cherry  * physical memory config is stored in vm_physmem.
    547   1.1    cherry  */
    548   1.1    cherry 
    549   1.1    cherry #define	VM_PHYSMEM_PTR(i)	(&vm_physmem[i])
    550   1.1    cherry #if VM_PHYSSEG_MAX == 1
    551   1.1    cherry #define VM_PHYSMEM_PTR_SWAP(i, j) /* impossible */
    552   1.1    cherry #else
    553   1.1    cherry #define VM_PHYSMEM_PTR_SWAP(i, j)					      \
    554   1.1    cherry 	do { vm_physmem[(i)] = vm_physmem[(j)]; } while (0)
    555   1.1    cherry #endif
    556   1.1    cherry 
    557   1.1    cherry #define		HANDLE_TO_PHYSSEG_NODE(h)	(VM_PHYSMEM_PTR((int)h))
    558   1.1    cherry #define		PHYSSEG_NODE_TO_HANDLE(u)	((int)((vsize_t) (u - vm_physmem) / sizeof(struct uvm_physseg)))
    559   1.1    cherry 
    560   1.1    cherry static struct uvm_physseg vm_physmem[VM_PHYSSEG_MAX];	/* XXXCDC: uvm.physmem */
    561   1.1    cherry static int vm_nphysseg = 0;				/* XXXCDC: uvm.nphysseg */
    562   1.1    cherry #define	vm_nphysmem	vm_nphysseg
    563   1.1    cherry 
    564   1.1    cherry void
    565   1.1    cherry uvm_physseg_init(void)
    566   1.1    cherry {
    567   1.1    cherry 	/* XXX: Provisioning for rb_tree related init(s) */
    568   1.1    cherry 	return;
    569   1.1    cherry }
    570   1.1    cherry 
    571   1.1    cherry int
    572   1.1    cherry uvm_physseg_get_next(uvm_physseg_t lcv)
    573   1.1    cherry {
    574   1.1    cherry 	/* next of invalid is invalid, not fatal */
    575   1.2    cherry 	if (uvm_physseg_valid_p(lcv) == false)
    576   1.1    cherry 		return UVM_PHYSSEG_TYPE_INVALID;
    577   1.1    cherry 
    578   1.1    cherry 	return (lcv + 1);
    579   1.1    cherry }
    580   1.1    cherry 
    581   1.1    cherry int
    582   1.1    cherry uvm_physseg_get_prev(uvm_physseg_t lcv)
    583   1.1    cherry {
    584   1.1    cherry 	/* prev of invalid is invalid, not fatal */
    585   1.2    cherry 	if (uvm_physseg_valid_p(lcv) == false)
    586   1.1    cherry 		return UVM_PHYSSEG_TYPE_INVALID;
    587   1.1    cherry 
    588   1.1    cherry 	return (lcv - 1);
    589   1.1    cherry }
    590   1.1    cherry 
    591   1.1    cherry int
    592   1.1    cherry uvm_physseg_get_last(void)
    593   1.1    cherry {
    594   1.1    cherry 	return (vm_nphysseg - 1);
    595   1.1    cherry }
    596   1.1    cherry 
    597   1.1    cherry int
    598   1.1    cherry uvm_physseg_get_first(void)
    599   1.1    cherry {
    600   1.1    cherry 	return 0;
    601   1.1    cherry }
    602   1.1    cherry 
    603   1.1    cherry paddr_t
    604   1.1    cherry uvm_physseg_get_highest_frame(void)
    605   1.1    cherry {
    606   1.1    cherry 	int lcv;
    607   1.1    cherry 	paddr_t last = 0;
    608   1.1    cherry 	struct uvm_physseg *ps;
    609   1.1    cherry 
    610   1.1    cherry 	for (lcv = 0; lcv < vm_nphysseg; lcv++) {
    611   1.1    cherry 		ps = VM_PHYSMEM_PTR(lcv);
    612   1.1    cherry 		if (last < ps->end)
    613   1.1    cherry 			last = ps->end;
    614   1.1    cherry 	}
    615   1.1    cherry 
    616   1.1    cherry 	return last;
    617   1.1    cherry }
    618   1.1    cherry 
    619   1.1    cherry 
    620   1.1    cherry static struct vm_page *
    621   1.1    cherry uvm_post_preload_check(void)
    622   1.1    cherry {
    623   1.1    cherry 	int preload, lcv;
    624   1.1    cherry 
    625   1.1    cherry 	/*
    626   1.1    cherry 	 * check to see if this is a "preload" (i.e. uvm_page_init hasn't been
    627   1.1    cherry 	 * called yet, so kmem is not available).
    628   1.1    cherry 	 */
    629   1.1    cherry 
    630   1.1    cherry 	for (lcv = 0 ; lcv < vm_nphysmem ; lcv++) {
    631   1.1    cherry 		if (VM_PHYSMEM_PTR(lcv)->pgs)
    632   1.1    cherry 			break;
    633   1.1    cherry 	}
    634   1.1    cherry 	preload = (lcv == vm_nphysmem);
    635   1.1    cherry 
    636   1.1    cherry 	/*
    637   1.1    cherry 	 * if VM is already running, attempt to kmem_alloc vm_page structures
    638   1.1    cherry 	 */
    639   1.1    cherry 
    640   1.1    cherry 	if (!preload) {
    641   1.1    cherry 		panic("Tried to add RAM after uvm_page_init");
    642   1.1    cherry 	}
    643   1.1    cherry 
    644   1.1    cherry 	return NULL;
    645   1.1    cherry }
    646   1.1    cherry 
    647   1.1    cherry /*
    648   1.1    cherry  * uvm_page_physunload: unload physical memory and return it to
    649   1.1    cherry  * caller.
    650   1.1    cherry  */
    651   1.1    cherry bool
    652   1.1    cherry uvm_page_physunload(uvm_physseg_t psi, int freelist, paddr_t *paddrp)
    653   1.1    cherry {
    654   1.1    cherry 	int x;
    655   1.1    cherry 	struct uvm_physseg *seg;
    656   1.1    cherry 
    657   1.1    cherry 	uvm_post_preload_check();
    658   1.1    cherry 
    659   1.1    cherry 	seg = VM_PHYSMEM_PTR(psi);
    660   1.1    cherry 
    661   1.1    cherry 	if (seg->free_list != freelist) {
    662   1.1    cherry 		return false;
    663   1.1    cherry 	}
    664   1.1    cherry 
    665   1.1    cherry 	/* try from front */
    666   1.1    cherry 	if (seg->avail_start == seg->start &&
    667   1.1    cherry 	    seg->avail_start < seg->avail_end) {
    668   1.1    cherry 		*paddrp = ctob(seg->avail_start);
    669   1.1    cherry 		seg->avail_start++;
    670   1.1    cherry 		seg->start++;
    671   1.1    cherry 		/* nothing left?   nuke it */
    672   1.1    cherry 		if (seg->avail_start == seg->end) {
    673   1.1    cherry 			if (vm_nphysmem == 1)
    674   1.1    cherry 				panic("uvm_page_physget: out of memory!");
    675   1.1    cherry 			vm_nphysmem--;
    676   1.1    cherry 			for (x = psi ; x < vm_nphysmem ; x++)
    677   1.1    cherry 				/* structure copy */
    678   1.1    cherry 				VM_PHYSMEM_PTR_SWAP(x, x + 1);
    679   1.1    cherry 		}
    680   1.1    cherry 		return (true);
    681   1.1    cherry 	}
    682   1.1    cherry 
    683   1.1    cherry 	/* try from rear */
    684   1.1    cherry 	if (seg->avail_end == seg->end &&
    685   1.1    cherry 	    seg->avail_start < seg->avail_end) {
    686   1.1    cherry 		*paddrp = ctob(seg->avail_end - 1);
    687   1.1    cherry 		seg->avail_end--;
    688   1.1    cherry 		seg->end--;
    689   1.1    cherry 		/* nothing left?   nuke it */
    690   1.1    cherry 		if (seg->avail_end == seg->start) {
    691   1.1    cherry 			if (vm_nphysmem == 1)
    692   1.1    cherry 				panic("uvm_page_physget: out of memory!");
    693   1.1    cherry 			vm_nphysmem--;
    694   1.1    cherry 			for (x = psi ; x < vm_nphysmem ; x++)
    695   1.1    cherry 				/* structure copy */
    696   1.1    cherry 				VM_PHYSMEM_PTR_SWAP(x, x + 1);
    697   1.1    cherry 		}
    698   1.1    cherry 		return (true);
    699   1.1    cherry 	}
    700   1.1    cherry 
    701   1.1    cherry 	return false;
    702   1.1    cherry }
    703   1.1    cherry 
    704   1.1    cherry bool
    705   1.1    cherry uvm_page_physunload_force(uvm_physseg_t psi, int freelist, paddr_t *paddrp)
    706   1.1    cherry {
    707   1.1    cherry 	int x;
    708   1.1    cherry 	struct uvm_physseg *seg;
    709   1.1    cherry 
    710   1.1    cherry 	uvm_post_preload_check();
    711   1.1    cherry 
    712   1.1    cherry 	seg = VM_PHYSMEM_PTR(psi);
    713   1.1    cherry 
    714   1.1    cherry 	/* any room in this bank? */
    715   1.1    cherry 	if (seg->avail_start >= seg->avail_end) {
    716   1.1    cherry 		return false; /* nope */
    717   1.1    cherry 	}
    718   1.1    cherry 
    719   1.1    cherry 	*paddrp = ctob(seg->avail_start);
    720   1.1    cherry 	seg->avail_start++;
    721   1.1    cherry 	/* truncate! */
    722   1.1    cherry 	seg->start = seg->avail_start;
    723   1.1    cherry 
    724   1.1    cherry 	/* nothing left?   nuke it */
    725   1.1    cherry 	if (seg->avail_start == seg->end) {
    726   1.1    cherry 		if (vm_nphysmem == 1)
    727   1.1    cherry 			panic("uvm_page_physget: out of memory!");
    728   1.1    cherry 		vm_nphysmem--;
    729   1.1    cherry 		for (x = psi ; x < vm_nphysmem ; x++)
    730   1.1    cherry 			/* structure copy */
    731   1.1    cherry 			VM_PHYSMEM_PTR_SWAP(x, x + 1);
    732   1.1    cherry 	}
    733   1.1    cherry 	return (true);
    734   1.1    cherry }
    735   1.1    cherry 
    736   1.1    cherry bool
    737   1.1    cherry uvm_physseg_plug(paddr_t pfn, size_t pages, uvm_physseg_t *psp)
    738   1.1    cherry {
    739   1.1    cherry 	int lcv;
    740   1.1    cherry 	struct vm_page *pgs;
    741   1.1    cherry 	struct uvm_physseg *ps;
    742   1.1    cherry 
    743   1.1    cherry #ifdef DEBUG
    744   1.1    cherry 	paddr_t off;
    745   1.1    cherry 	uvm_physseg_t upm;
    746   1.1    cherry 	upm = uvm_physseg_find(pfn, &off);
    747   1.1    cherry 
    748   1.2    cherry 	if (uvm_physseg_valid_p(upm)) /* XXX; do we allow "update" plugs ? */
    749   1.1    cherry 		return false;
    750   1.1    cherry #endif
    751   1.1    cherry 
    752   1.1    cherry 	paddr_t start = pfn;
    753   1.1    cherry 	paddr_t end = pfn + pages;
    754   1.1    cherry 	paddr_t avail_start = start;
    755   1.1    cherry 	paddr_t avail_end = end;
    756   1.1    cherry 
    757   1.1    cherry 	if (uvmexp.pagesize == 0)
    758   1.1    cherry 		panic("uvm_page_physload: page size not set!");
    759   1.1    cherry 
    760   1.1    cherry 	/*
    761   1.1    cherry 	 * do we have room?
    762   1.1    cherry 	 */
    763   1.1    cherry 
    764   1.1    cherry 	if (vm_nphysmem == VM_PHYSSEG_MAX) {
    765   1.1    cherry 		printf("uvm_page_physload: unable to load physical memory "
    766   1.1    cherry 		    "segment\n");
    767   1.1    cherry 		printf("\t%d segments allocated, ignoring 0x%llx -> 0x%llx\n",
    768   1.1    cherry 		    VM_PHYSSEG_MAX, (long long)start, (long long)end);
    769   1.1    cherry 		printf("\tincrease VM_PHYSSEG_MAX\n");
    770   1.1    cherry 		if (psp != NULL)
    771   1.1    cherry 			*psp = UVM_PHYSSEG_TYPE_INVALID_OVERFLOW;
    772   1.1    cherry 		return false;
    773   1.1    cherry 	}
    774   1.1    cherry 
    775   1.1    cherry 	/*
    776   1.1    cherry 	 * check to see if this is a "preload" (i.e. uvm_page_init hasn't been
    777   1.1    cherry 	 * called yet, so kmem is not available).
    778   1.1    cherry 	 */
    779   1.1    cherry 	pgs = uvm_post_preload_check();
    780   1.1    cherry 
    781   1.1    cherry 	/*
    782   1.1    cherry 	 * now insert us in the proper place in vm_physmem[]
    783   1.1    cherry 	 */
    784   1.1    cherry 
    785   1.1    cherry #if (VM_PHYSSEG_STRAT == VM_PSTRAT_RANDOM)
    786   1.1    cherry 	/* random: put it at the end (easy!) */
    787   1.1    cherry 	ps = VM_PHYSMEM_PTR(vm_nphysmem);
    788   1.3    cherry 	lcv = vm_nphysmem;
    789   1.1    cherry #elif (VM_PHYSSEG_STRAT == VM_PSTRAT_BSEARCH)
    790   1.1    cherry 	{
    791   1.1    cherry 		int x;
    792   1.1    cherry 		/* sort by address for binary search */
    793   1.1    cherry 		for (lcv = 0 ; lcv < vm_nphysmem ; lcv++)
    794   1.1    cherry 			if (start < VM_PHYSMEM_PTR(lcv)->start)
    795   1.1    cherry 				break;
    796   1.1    cherry 		ps = VM_PHYSMEM_PTR(lcv);
    797   1.1    cherry 		/* move back other entries, if necessary ... */
    798   1.1    cherry 		for (x = vm_nphysmem ; x > lcv ; x--)
    799   1.1    cherry 			/* structure copy */
    800   1.1    cherry 			VM_PHYSMEM_PTR_SWAP(x, x - 1);
    801   1.1    cherry 	}
    802   1.1    cherry #elif (VM_PHYSSEG_STRAT == VM_PSTRAT_BIGFIRST)
    803   1.1    cherry 	{
    804   1.1    cherry 		int x;
    805   1.1    cherry 		/* sort by largest segment first */
    806   1.1    cherry 		for (lcv = 0 ; lcv < vm_nphysmem ; lcv++)
    807   1.1    cherry 			if ((end - start) >
    808   1.1    cherry 			    (VM_PHYSMEM_PTR(lcv)->end - VM_PHYSMEM_PTR(lcv)->start))
    809   1.1    cherry 				break;
    810   1.1    cherry 		ps = VM_PHYSMEM_PTR(lcv);
    811   1.1    cherry 		/* move back other entries, if necessary ... */
    812   1.1    cherry 		for (x = vm_nphysmem ; x > lcv ; x--)
    813   1.1    cherry 			/* structure copy */
    814   1.1    cherry 			VM_PHYSMEM_PTR_SWAP(x, x - 1);
    815   1.1    cherry 	}
    816   1.1    cherry #else
    817   1.1    cherry 	panic("uvm_page_physload: unknown physseg strategy selected!");
    818   1.1    cherry #endif
    819   1.1    cherry 
    820   1.1    cherry 	ps->start = start;
    821   1.1    cherry 	ps->end = end;
    822   1.1    cherry 	ps->avail_start = avail_start;
    823   1.1    cherry 	ps->avail_end = avail_end;
    824   1.1    cherry 
    825   1.1    cherry 	ps->pgs = pgs;
    826   1.1    cherry 
    827   1.1    cherry 	vm_nphysmem++;
    828   1.1    cherry 
    829   1.1    cherry 	if (psp != NULL)
    830   1.1    cherry 		*psp = lcv;
    831   1.1    cherry 
    832   1.1    cherry 	return true;
    833   1.1    cherry }
    834   1.1    cherry 
    835   1.1    cherry /*
    836   1.1    cherry  * when VM_PHYSSEG_MAX is 1, we can simplify these functions
    837   1.1    cherry  */
    838   1.1    cherry 
    839   1.1    cherry #if VM_PHYSSEG_MAX == 1
    840   1.1    cherry static inline int vm_physseg_find_contig(struct uvm_physseg *, int, paddr_t, psize_t *);
    841   1.1    cherry #elif (VM_PHYSSEG_STRAT == VM_PSTRAT_BSEARCH)
    842   1.1    cherry static inline int vm_physseg_find_bsearch(struct uvm_physseg *, int, paddr_t, psize_t *);
    843   1.1    cherry #else
    844   1.1    cherry static inline int vm_physseg_find_linear(struct uvm_physseg *, int, paddr_t, psize_t *);
    845   1.1    cherry #endif
    846   1.1    cherry 
    847   1.1    cherry /*
    848   1.1    cherry  * vm_physseg_find: find vm_physseg structure that belongs to a PA
    849   1.1    cherry  */
    850   1.1    cherry int
    851   1.1    cherry uvm_physseg_find(paddr_t pframe, psize_t *offp)
    852   1.1    cherry {
    853   1.1    cherry 
    854   1.1    cherry #if VM_PHYSSEG_MAX == 1
    855   1.1    cherry 	return vm_physseg_find_contig(vm_physmem, vm_nphysseg, pframe, offp);
    856   1.1    cherry #elif (VM_PHYSSEG_STRAT == VM_PSTRAT_BSEARCH)
    857   1.1    cherry 	return vm_physseg_find_bsearch(vm_physmem, vm_nphysseg, pframe, offp);
    858   1.1    cherry #else
    859   1.1    cherry 	return vm_physseg_find_linear(vm_physmem, vm_nphysseg, pframe, offp);
    860   1.1    cherry #endif
    861   1.1    cherry }
    862   1.1    cherry 
    863   1.1    cherry #if VM_PHYSSEG_MAX == 1
    864   1.1    cherry static inline int
    865   1.1    cherry vm_physseg_find_contig(struct uvm_physseg *segs, int nsegs, paddr_t pframe, psize_t *offp)
    866   1.1    cherry {
    867   1.1    cherry 
    868   1.1    cherry 	/* 'contig' case */
    869   1.1    cherry 	if (pframe >= segs[0].start && pframe < segs[0].end) {
    870   1.1    cherry 		if (offp)
    871   1.1    cherry 			*offp = pframe - segs[0].start;
    872   1.1    cherry 		return(0);
    873   1.1    cherry 	}
    874   1.1    cherry 	return(-1);
    875   1.1    cherry }
    876   1.1    cherry 
    877   1.1    cherry #elif (VM_PHYSSEG_STRAT == VM_PSTRAT_BSEARCH)
    878   1.1    cherry 
    879   1.1    cherry static inline int
    880   1.1    cherry vm_physseg_find_bsearch(struct uvm_physseg *segs, int nsegs, paddr_t pframe, psize_t *offp)
    881   1.1    cherry {
    882   1.1    cherry 	/* binary search for it */
    883   1.1    cherry 	int	start, len, guess;
    884   1.1    cherry 
    885   1.1    cherry 	/*
    886   1.1    cherry 	 * if try is too large (thus target is less than try) we reduce
    887   1.1    cherry 	 * the length to trunc(len/2) [i.e. everything smaller than "try"]
    888   1.1    cherry 	 *
    889   1.1    cherry 	 * if the try is too small (thus target is greater than try) then
    890   1.1    cherry 	 * we set the new start to be (try + 1).   this means we need to
    891   1.1    cherry 	 * reduce the length to (round(len/2) - 1).
    892   1.1    cherry 	 *
    893   1.1    cherry 	 * note "adjust" below which takes advantage of the fact that
    894   1.1    cherry 	 *  (round(len/2) - 1) == trunc((len - 1) / 2)
    895   1.1    cherry 	 * for any value of len we may have
    896   1.1    cherry 	 */
    897   1.1    cherry 
    898   1.1    cherry 	for (start = 0, len = nsegs ; len != 0 ; len = len / 2) {
    899   1.1    cherry 		guess = start + (len / 2);	/* try in the middle */
    900   1.1    cherry 
    901   1.1    cherry 		/* start past our try? */
    902   1.1    cherry 		if (pframe >= segs[guess].start) {
    903   1.1    cherry 			/* was try correct? */
    904   1.1    cherry 			if (pframe < segs[guess].end) {
    905   1.1    cherry 				if (offp)
    906   1.1    cherry 					*offp = pframe - segs[guess].start;
    907   1.1    cherry 				return guess;            /* got it */
    908   1.1    cherry 			}
    909   1.1    cherry 			start = guess + 1;	/* next time, start here */
    910   1.1    cherry 			len--;			/* "adjust" */
    911   1.1    cherry 		} else {
    912   1.1    cherry 			/*
    913   1.1    cherry 			 * pframe before try, just reduce length of
    914   1.1    cherry 			 * region, done in "for" loop
    915   1.1    cherry 			 */
    916   1.1    cherry 		}
    917   1.1    cherry 	}
    918   1.1    cherry 	return(-1);
    919   1.1    cherry }
    920   1.1    cherry 
    921   1.1    cherry #else
    922   1.1    cherry 
    923   1.1    cherry static inline int
    924   1.1    cherry vm_physseg_find_linear(struct uvm_physseg *segs, int nsegs, paddr_t pframe, psize_t *offp)
    925   1.1    cherry {
    926   1.1    cherry 	/* linear search for it */
    927   1.1    cherry 	int	lcv;
    928   1.1    cherry 
    929   1.1    cherry 	for (lcv = 0; lcv < nsegs; lcv++) {
    930   1.1    cherry 		if (pframe >= segs[lcv].start &&
    931   1.1    cherry 		    pframe < segs[lcv].end) {
    932   1.1    cherry 			if (offp)
    933   1.1    cherry 				*offp = pframe - segs[lcv].start;
    934   1.1    cherry 			return(lcv);		   /* got it */
    935   1.1    cherry 		}
    936   1.1    cherry 	}
    937   1.1    cherry 	return(-1);
    938   1.1    cherry }
    939   1.1    cherry #endif
    940   1.1    cherry #endif /* UVM_HOTPLUG */
    941   1.1    cherry 
    942   1.1    cherry bool
    943   1.2    cherry uvm_physseg_valid_p(uvm_physseg_t upm)
    944   1.1    cherry {
    945   1.1    cherry 	struct uvm_physseg *ps;
    946   1.1    cherry 
    947   1.1    cherry 	if (upm == UVM_PHYSSEG_TYPE_INVALID ||
    948   1.1    cherry 	    upm == UVM_PHYSSEG_TYPE_INVALID_EMPTY ||
    949   1.1    cherry 	    upm == UVM_PHYSSEG_TYPE_INVALID_OVERFLOW)
    950   1.1    cherry 		return false;
    951   1.1    cherry 
    952   1.1    cherry 	/*
    953   1.1    cherry 	 * This is the delicate init dance -
    954   1.1    cherry 	 * needs to go with the dance.
    955   1.1    cherry 	 */
    956   1.1    cherry 	if (uvm.page_init_done != true)
    957   1.1    cherry 		return true;
    958   1.1    cherry 
    959   1.1    cherry 	ps = HANDLE_TO_PHYSSEG_NODE(upm);
    960   1.1    cherry 
    961   1.1    cherry 	/* Extra checks needed only post uvm_page_init() */
    962   1.1    cherry 	if (ps->pgs == NULL)
    963   1.1    cherry 		return false;
    964   1.1    cherry 
    965   1.1    cherry 	/* XXX: etc. */
    966   1.1    cherry 
    967   1.1    cherry 	return true;
    968   1.1    cherry 
    969   1.1    cherry }
    970   1.1    cherry 
    971   1.1    cherry /*
    972   1.1    cherry  * Boot protocol dictates that these must be able to return partially
    973   1.1    cherry  * initialised segments.
    974   1.1    cherry  */
    975   1.1    cherry paddr_t
    976   1.1    cherry uvm_physseg_get_start(uvm_physseg_t upm)
    977   1.1    cherry {
    978   1.2    cherry 	if (uvm_physseg_valid_p(upm) == false)
    979   1.1    cherry 		return (paddr_t) -1;
    980   1.1    cherry 
    981   1.1    cherry 	return HANDLE_TO_PHYSSEG_NODE(upm)->start;
    982   1.1    cherry }
    983   1.1    cherry 
    984   1.1    cherry paddr_t
    985   1.1    cherry uvm_physseg_get_end(uvm_physseg_t upm)
    986   1.1    cherry {
    987   1.2    cherry 	if (uvm_physseg_valid_p(upm) == false)
    988   1.1    cherry 		return (paddr_t) -1;
    989   1.1    cherry 
    990   1.1    cherry 	return HANDLE_TO_PHYSSEG_NODE(upm)->end;
    991   1.1    cherry }
    992   1.1    cherry 
    993   1.1    cherry paddr_t
    994   1.1    cherry uvm_physseg_get_avail_start(uvm_physseg_t upm)
    995   1.1    cherry {
    996   1.2    cherry 	if (uvm_physseg_valid_p(upm) == false)
    997   1.1    cherry 		return (paddr_t) -1;
    998   1.1    cherry 
    999   1.1    cherry 	return HANDLE_TO_PHYSSEG_NODE(upm)->avail_start;
   1000   1.1    cherry }
   1001   1.1    cherry 
   1002   1.6       rin #if defined(UVM_PHYSSEG_LEGACY)
   1003   1.4  christos void
   1004   1.4  christos uvm_physseg_set_avail_start(uvm_physseg_t upm, paddr_t avail_start)
   1005   1.4  christos {
   1006   1.5    cherry 	struct uvm_physseg *ps = HANDLE_TO_PHYSSEG_NODE(upm);
   1007   1.5    cherry 
   1008   1.5    cherry #if defined(DIAGNOSTIC)
   1009   1.5    cherry 	paddr_t avail_end;
   1010   1.5    cherry 	avail_end = uvm_physseg_get_avail_end(upm);
   1011   1.4  christos 	KASSERT(uvm_physseg_valid_p(upm));
   1012   1.5    cherry 	KASSERT(avail_start < avail_end && avail_start >= ps->start);
   1013   1.5    cherry #endif
   1014   1.5    cherry 
   1015   1.5    cherry 	ps->avail_start = avail_start;
   1016   1.4  christos }
   1017  1.12        ad 
   1018  1.12        ad void
   1019  1.12        ad uvm_physseg_set_avail_end(uvm_physseg_t upm, paddr_t avail_end)
   1020   1.5    cherry {
   1021   1.5    cherry 	struct uvm_physseg *ps = HANDLE_TO_PHYSSEG_NODE(upm);
   1022   1.5    cherry 
   1023   1.5    cherry #if defined(DIAGNOSTIC)
   1024   1.5    cherry 	paddr_t avail_start;
   1025   1.5    cherry 	avail_start = uvm_physseg_get_avail_start(upm);
   1026   1.5    cherry 	KASSERT(uvm_physseg_valid_p(upm));
   1027   1.5    cherry 	KASSERT(avail_end > avail_start && avail_end <= ps->end);
   1028   1.4  christos #endif
   1029   1.4  christos 
   1030   1.5    cherry 	ps->avail_end = avail_end;
   1031   1.5    cherry }
   1032   1.5    cherry 
   1033   1.6       rin #endif /* UVM_PHYSSEG_LEGACY */
   1034   1.5    cherry 
   1035   1.1    cherry paddr_t
   1036   1.1    cherry uvm_physseg_get_avail_end(uvm_physseg_t upm)
   1037   1.1    cherry {
   1038   1.2    cherry 	if (uvm_physseg_valid_p(upm) == false)
   1039   1.1    cherry 		return (paddr_t) -1;
   1040   1.1    cherry 
   1041   1.1    cherry 	return HANDLE_TO_PHYSSEG_NODE(upm)->avail_end;
   1042   1.1    cherry }
   1043   1.1    cherry 
   1044   1.1    cherry struct vm_page *
   1045   1.1    cherry uvm_physseg_get_pg(uvm_physseg_t upm, paddr_t idx)
   1046   1.1    cherry {
   1047   1.2    cherry 	KASSERT(uvm_physseg_valid_p(upm));
   1048   1.1    cherry 	return &HANDLE_TO_PHYSSEG_NODE(upm)->pgs[idx];
   1049   1.1    cherry }
   1050   1.1    cherry 
   1051   1.1    cherry #ifdef __HAVE_PMAP_PHYSSEG
   1052   1.1    cherry struct pmap_physseg *
   1053   1.1    cherry uvm_physseg_get_pmseg(uvm_physseg_t upm)
   1054   1.1    cherry {
   1055   1.2    cherry 	KASSERT(uvm_physseg_valid_p(upm));
   1056   1.1    cherry 	return &(HANDLE_TO_PHYSSEG_NODE(upm)->pmseg);
   1057   1.1    cherry }
   1058   1.1    cherry #endif
   1059   1.1    cherry 
   1060   1.1    cherry int
   1061   1.1    cherry uvm_physseg_get_free_list(uvm_physseg_t upm)
   1062   1.1    cherry {
   1063   1.2    cherry 	KASSERT(uvm_physseg_valid_p(upm));
   1064   1.1    cherry 	return HANDLE_TO_PHYSSEG_NODE(upm)->free_list;
   1065   1.1    cherry }
   1066   1.1    cherry 
   1067   1.1    cherry u_int
   1068   1.1    cherry uvm_physseg_get_start_hint(uvm_physseg_t upm)
   1069   1.1    cherry {
   1070   1.2    cherry 	KASSERT(uvm_physseg_valid_p(upm));
   1071   1.1    cherry 	return HANDLE_TO_PHYSSEG_NODE(upm)->start_hint;
   1072   1.1    cherry }
   1073   1.1    cherry 
   1074   1.1    cherry bool
   1075   1.1    cherry uvm_physseg_set_start_hint(uvm_physseg_t upm, u_int start_hint)
   1076   1.1    cherry {
   1077   1.2    cherry 	if (uvm_physseg_valid_p(upm) == false)
   1078   1.1    cherry 		return false;
   1079   1.1    cherry 
   1080   1.1    cherry 	HANDLE_TO_PHYSSEG_NODE(upm)->start_hint = start_hint;
   1081   1.1    cherry 	return true;
   1082   1.1    cherry }
   1083   1.1    cherry 
   1084   1.1    cherry void
   1085   1.1    cherry uvm_physseg_init_seg(uvm_physseg_t upm, struct vm_page *pgs)
   1086   1.1    cherry {
   1087   1.1    cherry 	psize_t i;
   1088   1.1    cherry 	psize_t n;
   1089   1.1    cherry 	paddr_t paddr;
   1090   1.1    cherry 	struct uvm_physseg *seg;
   1091  1.11        ad 	struct vm_page *pg;
   1092   1.1    cherry 
   1093   1.1    cherry 	KASSERT(upm != UVM_PHYSSEG_TYPE_INVALID && pgs != NULL);
   1094   1.1    cherry 
   1095   1.1    cherry 	seg = HANDLE_TO_PHYSSEG_NODE(upm);
   1096   1.1    cherry 	KASSERT(seg != NULL);
   1097   1.1    cherry 	KASSERT(seg->pgs == NULL);
   1098   1.1    cherry 
   1099   1.1    cherry 	n = seg->end - seg->start;
   1100   1.1    cherry 	seg->pgs = pgs;
   1101   1.1    cherry 
   1102   1.1    cherry 	/* init and free vm_pages (we've already zeroed them) */
   1103   1.1    cherry 	paddr = ctob(seg->start);
   1104   1.1    cherry 	for (i = 0 ; i < n ; i++, paddr += PAGE_SIZE) {
   1105  1.13        ad 		pg = &seg->pgs[i];
   1106  1.13        ad 		pg->phys_addr = paddr;
   1107   1.1    cherry #ifdef __HAVE_VM_PAGE_MD
   1108  1.13        ad 		VM_MDPAGE_INIT(pg);
   1109   1.1    cherry #endif
   1110   1.1    cherry 		if (atop(paddr) >= seg->avail_start &&
   1111   1.1    cherry 		    atop(paddr) < seg->avail_end) {
   1112   1.1    cherry 			uvmexp.npages++;
   1113   1.1    cherry 			/* add page to free pool */
   1114  1.13        ad 			uvm_page_set_freelist(pg,
   1115  1.13        ad 			    uvm_page_lookup_freelist(pg));
   1116  1.11        ad 			/* Disable LOCKDEBUG: too many and too early. */
   1117  1.11        ad 			mutex_init(&pg->interlock, MUTEX_NODEBUG, IPL_NONE);
   1118  1.11        ad 			uvm_pagefree(pg);
   1119   1.1    cherry 		}
   1120   1.1    cherry 	}
   1121   1.1    cherry }
   1122   1.1    cherry 
   1123   1.1    cherry void
   1124   1.1    cherry uvm_physseg_seg_chomp_slab(uvm_physseg_t upm, struct vm_page *pgs, size_t n)
   1125   1.1    cherry {
   1126   1.1    cherry 	struct uvm_physseg *seg = HANDLE_TO_PHYSSEG_NODE(upm);
   1127   1.1    cherry 
   1128   1.1    cherry 	/* max number of pre-boot unplug()s allowed */
   1129   1.1    cherry #define UVM_PHYSSEG_BOOT_UNPLUG_MAX VM_PHYSSEG_MAX
   1130   1.1    cherry 
   1131   1.1    cherry 	static char btslab_ex_storage[EXTENT_FIXED_STORAGE_SIZE(UVM_PHYSSEG_BOOT_UNPLUG_MAX)];
   1132   1.1    cherry 
   1133   1.1    cherry 	if (__predict_false(uvm.page_init_done == false)) {
   1134   1.1    cherry 		seg->ext = extent_create("Boot time slab", (u_long) pgs, (u_long) (pgs + n),
   1135   1.1    cherry 		    (void *)btslab_ex_storage, sizeof(btslab_ex_storage), 0);
   1136   1.1    cherry 	} else {
   1137   1.1    cherry 		seg->ext = extent_create("Hotplug slab", (u_long) pgs, (u_long) (pgs + n), NULL, 0, 0);
   1138   1.1    cherry 	}
   1139   1.1    cherry 
   1140   1.1    cherry 	KASSERT(seg->ext != NULL);
   1141   1.1    cherry 
   1142   1.1    cherry }
   1143   1.1    cherry 
   1144   1.1    cherry struct vm_page *
   1145   1.1    cherry uvm_physseg_seg_alloc_from_slab(uvm_physseg_t upm, size_t pages)
   1146   1.1    cherry {
   1147   1.1    cherry 	int err;
   1148   1.1    cherry 	struct uvm_physseg *seg;
   1149   1.1    cherry 	struct vm_page *pgs = NULL;
   1150   1.1    cherry 
   1151   1.9  christos 	KASSERT(pages > 0);
   1152   1.9  christos 
   1153   1.1    cherry 	seg = HANDLE_TO_PHYSSEG_NODE(upm);
   1154   1.1    cherry 
   1155   1.1    cherry 	if (__predict_false(seg->ext == NULL)) {
   1156   1.1    cherry 		/*
   1157   1.1    cherry 		 * This is a situation unique to boot time.
   1158   1.1    cherry 		 * It shouldn't happen at any point other than from
   1159   1.1    cherry 		 * the first uvm_page.c:uvm_page_init() call
   1160   1.1    cherry 		 * Since we're in a loop, we can get away with the
   1161   1.1    cherry 		 * below.
   1162   1.1    cherry 		 */
   1163   1.1    cherry 		KASSERT(uvm.page_init_done != true);
   1164   1.1    cherry 
   1165   1.9  christos 		uvm_physseg_t upmp = uvm_physseg_get_prev(upm);
   1166   1.9  christos 		KASSERT(upmp != UVM_PHYSSEG_TYPE_INVALID);
   1167   1.9  christos 
   1168   1.9  christos 		seg->ext = HANDLE_TO_PHYSSEG_NODE(upmp)->ext;
   1169   1.1    cherry 
   1170   1.1    cherry 		KASSERT(seg->ext != NULL);
   1171   1.1    cherry 	}
   1172   1.1    cherry 
   1173   1.1    cherry 	/* We allocate enough for this segment */
   1174   1.1    cherry 	err = extent_alloc(seg->ext, sizeof(*pgs) * pages, 1, 0, EX_BOUNDZERO, (u_long *)&pgs);
   1175   1.1    cherry 
   1176   1.1    cherry 	if (err != 0) {
   1177   1.1    cherry #ifdef DEBUG
   1178   1.1    cherry 		printf("%s: extent_alloc failed with error: %d \n",
   1179   1.1    cherry 		    __func__, err);
   1180   1.1    cherry #endif
   1181   1.1    cherry 	}
   1182   1.1    cherry 
   1183   1.1    cherry 	return pgs;
   1184   1.1    cherry }
   1185   1.1    cherry 
   1186   1.1    cherry /*
   1187   1.1    cherry  * uvm_page_physload: load physical memory into VM system
   1188   1.1    cherry  *
   1189   1.1    cherry  * => all args are PFs
   1190   1.1    cherry  * => all pages in start/end get vm_page structures
   1191   1.1    cherry  * => areas marked by avail_start/avail_end get added to the free page pool
   1192   1.1    cherry  * => we are limited to VM_PHYSSEG_MAX physical memory segments
   1193   1.1    cherry  */
   1194   1.1    cherry 
   1195   1.1    cherry uvm_physseg_t
   1196   1.1    cherry uvm_page_physload(paddr_t start, paddr_t end, paddr_t avail_start,
   1197   1.1    cherry     paddr_t avail_end, int free_list)
   1198   1.1    cherry {
   1199   1.1    cherry 	struct uvm_physseg *ps;
   1200   1.1    cherry 	uvm_physseg_t upm;
   1201   1.1    cherry 
   1202   1.1    cherry 	if (__predict_true(uvm.page_init_done == true))
   1203   1.1    cherry 		panic("%s: unload attempted after uvm_page_init()\n", __func__);
   1204   1.1    cherry 	if (uvmexp.pagesize == 0)
   1205   1.1    cherry 		panic("uvm_page_physload: page size not set!");
   1206   1.1    cherry 	if (free_list >= VM_NFREELIST || free_list < VM_FREELIST_DEFAULT)
   1207   1.1    cherry 		panic("uvm_page_physload: bad free list %d", free_list);
   1208   1.1    cherry 	if (start >= end)
   1209   1.1    cherry 		panic("uvm_page_physload: start >= end");
   1210   1.1    cherry 
   1211   1.1    cherry 	if (uvm_physseg_plug(start, end - start, &upm) == false) {
   1212   1.1    cherry 		panic("uvm_physseg_plug() failed at boot.");
   1213   1.1    cherry 		/* NOTREACHED */
   1214   1.1    cherry 		return UVM_PHYSSEG_TYPE_INVALID; /* XXX: correct type */
   1215   1.1    cherry 	}
   1216   1.1    cherry 
   1217   1.1    cherry 	ps = HANDLE_TO_PHYSSEG_NODE(upm);
   1218   1.1    cherry 
   1219   1.1    cherry 	/* Legacy */
   1220   1.1    cherry 	ps->avail_start = avail_start;
   1221   1.1    cherry 	ps->avail_end = avail_end;
   1222   1.1    cherry 
   1223   1.1    cherry 	ps->free_list = free_list; /* XXX: */
   1224   1.1    cherry 
   1225   1.1    cherry 
   1226   1.1    cherry 	return upm;
   1227   1.1    cherry }
   1228   1.1    cherry 
   1229   1.1    cherry bool
   1230   1.1    cherry uvm_physseg_unplug(paddr_t pfn, size_t pages)
   1231   1.1    cherry {
   1232   1.1    cherry 	uvm_physseg_t upm;
   1233   1.8  riastrad 	paddr_t off = 0, start __diagused, end;
   1234   1.1    cherry 	struct uvm_physseg *seg;
   1235   1.1    cherry 
   1236   1.1    cherry 	upm = uvm_physseg_find(pfn, &off);
   1237   1.1    cherry 
   1238   1.2    cherry 	if (!uvm_physseg_valid_p(upm)) {
   1239   1.1    cherry 		printf("%s: Tried to unplug from unknown offset\n", __func__);
   1240   1.1    cherry 		return false;
   1241   1.1    cherry 	}
   1242   1.1    cherry 
   1243   1.1    cherry 	seg = HANDLE_TO_PHYSSEG_NODE(upm);
   1244   1.1    cherry 
   1245   1.1    cherry 	start = uvm_physseg_get_start(upm);
   1246   1.1    cherry 	end = uvm_physseg_get_end(upm);
   1247   1.1    cherry 
   1248   1.1    cherry 	if (end < (pfn + pages)) {
   1249   1.1    cherry 		printf("%s: Tried to unplug oversized span \n", __func__);
   1250   1.1    cherry 		return false;
   1251   1.1    cherry 	}
   1252   1.1    cherry 
   1253   1.1    cherry 	KASSERT(pfn == start + off); /* sanity */
   1254   1.1    cherry 
   1255   1.1    cherry 	if (__predict_true(uvm.page_init_done == true)) {
   1256   1.1    cherry 		/* XXX: KASSERT() that seg->pgs[] are not on any uvm lists */
   1257   1.1    cherry 		if (extent_free(seg->ext, (u_long)(seg->pgs + off), sizeof(struct vm_page) * pages, EX_MALLOCOK | EX_NOWAIT) != 0)
   1258   1.1    cherry 			return false;
   1259   1.1    cherry 	}
   1260   1.1    cherry 
   1261   1.1    cherry 	if (off == 0 && (pfn + pages) == end) {
   1262   1.1    cherry #if defined(UVM_HOTPLUG) /* rbtree implementation */
   1263   1.1    cherry 		int segcount = 0;
   1264   1.1    cherry 		struct uvm_physseg *current_ps;
   1265   1.1    cherry 		/* Complete segment */
   1266   1.1    cherry 		if (uvm_physseg_graph.nentries == 1)
   1267   1.1    cherry 			panic("%s: out of memory!", __func__);
   1268   1.1    cherry 
   1269   1.1    cherry 		if (__predict_true(uvm.page_init_done == true)) {
   1270   1.1    cherry 			RB_TREE_FOREACH(current_ps, &(uvm_physseg_graph.rb_tree)) {
   1271   1.1    cherry 				if (seg->ext == current_ps->ext)
   1272   1.1    cherry 					segcount++;
   1273   1.1    cherry 			}
   1274   1.1    cherry 			KASSERT(segcount > 0);
   1275   1.1    cherry 
   1276   1.1    cherry 			if (segcount == 1) {
   1277   1.1    cherry 				extent_destroy(seg->ext);
   1278   1.1    cherry 			}
   1279   1.1    cherry 
   1280   1.1    cherry 			/*
   1281   1.1    cherry 			 * We assume that the unplug will succeed from
   1282   1.1    cherry 			 *  this point onwards
   1283   1.1    cherry 			 */
   1284   1.1    cherry 			uvmexp.npages -= (int) pages;
   1285   1.1    cherry 		}
   1286   1.1    cherry 
   1287   1.1    cherry 		rb_tree_remove_node(&(uvm_physseg_graph.rb_tree), upm);
   1288   1.1    cherry 		memset(seg, 0, sizeof(struct uvm_physseg));
   1289   1.1    cherry 		uvm_physseg_free(seg, sizeof(struct uvm_physseg));
   1290   1.1    cherry 		uvm_physseg_graph.nentries--;
   1291   1.1    cherry #else /* UVM_HOTPLUG */
   1292   1.1    cherry 		int x;
   1293   1.1    cherry 		if (vm_nphysmem == 1)
   1294   1.1    cherry 			panic("uvm_page_physget: out of memory!");
   1295   1.1    cherry 		vm_nphysmem--;
   1296   1.1    cherry 		for (x = upm ; x < vm_nphysmem ; x++)
   1297   1.1    cherry 			/* structure copy */
   1298   1.1    cherry 			VM_PHYSMEM_PTR_SWAP(x, x + 1);
   1299   1.1    cherry #endif /* UVM_HOTPLUG */
   1300   1.1    cherry 		/* XXX: KASSERT() that seg->pgs[] are not on any uvm lists */
   1301   1.1    cherry 		return true;
   1302   1.1    cherry 	}
   1303   1.1    cherry 
   1304   1.1    cherry 	if (off > 0 &&
   1305   1.1    cherry 	    (pfn + pages) < end) {
   1306   1.1    cherry #if defined(UVM_HOTPLUG) /* rbtree implementation */
   1307   1.1    cherry 		/* middle chunk - need a new segment */
   1308   1.1    cherry 		struct uvm_physseg *ps, *current_ps;
   1309   1.1    cherry 		ps = uvm_physseg_alloc(sizeof (struct uvm_physseg));
   1310   1.1    cherry 		if (ps == NULL) {
   1311   1.1    cherry 			printf("%s: Unable to allocated new fragment vm_physseg \n",
   1312   1.1    cherry 			    __func__);
   1313   1.1    cherry 			return false;
   1314   1.1    cherry 		}
   1315   1.1    cherry 
   1316   1.1    cherry 		/* Remove middle chunk */
   1317   1.1    cherry 		if (__predict_true(uvm.page_init_done == true)) {
   1318   1.1    cherry 			KASSERT(seg->ext != NULL);
   1319   1.1    cherry 			ps->ext = seg->ext;
   1320   1.1    cherry 
   1321   1.1    cherry 			/* XXX: KASSERT() that seg->pgs[] are not on any uvm lists */
   1322   1.1    cherry 			/*
   1323   1.1    cherry 			 * We assume that the unplug will succeed from
   1324   1.1    cherry 			 *  this point onwards
   1325   1.1    cherry 			 */
   1326   1.1    cherry 			uvmexp.npages -= (int) pages;
   1327   1.1    cherry 		}
   1328   1.1    cherry 
   1329   1.1    cherry 		ps->start = pfn + pages;
   1330   1.1    cherry 		ps->avail_start = ps->start; /* XXX: Legacy */
   1331   1.1    cherry 
   1332   1.1    cherry 		ps->end = seg->end;
   1333   1.1    cherry 		ps->avail_end = ps->end; /* XXX: Legacy */
   1334   1.1    cherry 
   1335   1.1    cherry 		seg->end = pfn;
   1336   1.1    cherry 		seg->avail_end = seg->end; /* XXX: Legacy */
   1337   1.1    cherry 
   1338   1.1    cherry 
   1339   1.1    cherry 		/*
   1340   1.1    cherry 		 * The new pgs array points to the beginning of the
   1341   1.1    cherry 		 * tail fragment.
   1342   1.1    cherry 		 */
   1343   1.1    cherry 		if (__predict_true(uvm.page_init_done == true))
   1344   1.1    cherry 			ps->pgs = seg->pgs + off + pages;
   1345   1.1    cherry 
   1346   1.1    cherry 		current_ps = rb_tree_insert_node(&(uvm_physseg_graph.rb_tree), ps);
   1347   1.1    cherry 		if (current_ps != ps) {
   1348   1.1    cherry 			panic("uvm_page_physload: Duplicate address range detected!");
   1349   1.1    cherry 		}
   1350   1.1    cherry 		uvm_physseg_graph.nentries++;
   1351   1.1    cherry #else /* UVM_HOTPLUG */
   1352   1.1    cherry 		panic("%s: can't unplug() from the middle of a segment without"
   1353   1.7       uwe 		    " UVM_HOTPLUG\n",  __func__);
   1354   1.1    cherry 		/* NOTREACHED */
   1355   1.1    cherry #endif /* UVM_HOTPLUG */
   1356   1.1    cherry 		return true;
   1357   1.1    cherry 	}
   1358   1.1    cherry 
   1359   1.1    cherry 	if (off == 0 && (pfn + pages) < end) {
   1360   1.1    cherry 		/* Remove front chunk */
   1361   1.1    cherry 		if (__predict_true(uvm.page_init_done == true)) {
   1362   1.1    cherry 			/* XXX: KASSERT() that seg->pgs[] are not on any uvm lists */
   1363   1.1    cherry 			/*
   1364   1.1    cherry 			 * We assume that the unplug will succeed from
   1365   1.1    cherry 			 *  this point onwards
   1366   1.1    cherry 			 */
   1367   1.1    cherry 			uvmexp.npages -= (int) pages;
   1368   1.1    cherry 		}
   1369   1.1    cherry 
   1370   1.1    cherry 		/* Truncate */
   1371   1.1    cherry 		seg->start = pfn + pages;
   1372   1.1    cherry 		seg->avail_start = seg->start; /* XXX: Legacy */
   1373   1.1    cherry 
   1374   1.1    cherry 		/*
   1375   1.1    cherry 		 * Move the pgs array start to the beginning of the
   1376   1.1    cherry 		 * tail end.
   1377   1.1    cherry 		 */
   1378   1.1    cherry 		if (__predict_true(uvm.page_init_done == true))
   1379   1.1    cherry 			seg->pgs += pages;
   1380   1.1    cherry 
   1381   1.1    cherry 		return true;
   1382   1.1    cherry 	}
   1383   1.1    cherry 
   1384   1.1    cherry 	if (off > 0 && (pfn + pages) == end) {
   1385   1.1    cherry 		/* back chunk */
   1386   1.1    cherry 
   1387   1.1    cherry 
   1388   1.1    cherry 		/* Truncate! */
   1389   1.1    cherry 		seg->end = pfn;
   1390   1.1    cherry 		seg->avail_end = seg->end; /* XXX: Legacy */
   1391   1.1    cherry 
   1392   1.1    cherry 		uvmexp.npages -= (int) pages;
   1393   1.1    cherry 
   1394   1.1    cherry 		return true;
   1395   1.1    cherry 	}
   1396   1.1    cherry 
   1397   1.1    cherry 	printf("%s: Tried to unplug unknown range \n", __func__);
   1398   1.1    cherry 
   1399   1.1    cherry 	return false;
   1400   1.1    cherry }
   1401