Home | History | Annotate | Line # | Download | only in uvm
uvm_physseg.c revision 1.12
      1  1.12        ad /* $NetBSD: uvm_physseg.c,v 1.12 2019/12/20 19:03:17 ad Exp $ */
      2   1.1    cherry 
      3   1.1    cherry /*
      4   1.1    cherry  * Copyright (c) 1997 Charles D. Cranor and Washington University.
      5   1.1    cherry  * Copyright (c) 1991, 1993, The Regents of the University of California.
      6   1.1    cherry  *
      7   1.1    cherry  * All rights reserved.
      8   1.1    cherry  *
      9   1.1    cherry  * This code is derived from software contributed to Berkeley by
     10   1.1    cherry  * The Mach Operating System project at Carnegie-Mellon University.
     11   1.1    cherry  *
     12   1.1    cherry  * Redistribution and use in source and binary forms, with or without
     13   1.1    cherry  * modification, are permitted provided that the following conditions
     14   1.1    cherry  * are met:
     15   1.1    cherry  * 1. Redistributions of source code must retain the above copyright
     16   1.1    cherry  *    notice, this list of conditions and the following disclaimer.
     17   1.1    cherry  * 2. Redistributions in binary form must reproduce the above copyright
     18   1.1    cherry  *    notice, this list of conditions and the following disclaimer in the
     19   1.1    cherry  *    documentation and/or other materials provided with the distribution.
     20   1.1    cherry  * 3. Neither the name of the University nor the names of its contributors
     21   1.1    cherry  *    may be used to endorse or promote products derived from this software
     22   1.1    cherry  *    without specific prior written permission.
     23   1.1    cherry  *
     24   1.1    cherry  * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND
     25   1.1    cherry  * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
     26   1.1    cherry  * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
     27   1.1    cherry  * ARE DISCLAIMED.  IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE
     28   1.1    cherry  * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
     29   1.1    cherry  * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
     30   1.1    cherry  * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
     31   1.1    cherry  * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
     32   1.1    cherry  * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
     33   1.1    cherry  * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
     34   1.1    cherry  * SUCH DAMAGE.
     35   1.1    cherry  *
     36   1.1    cherry  *	@(#)vm_page.h   7.3 (Berkeley) 4/21/91
     37   1.1    cherry  * from: Id: uvm_page.h,v 1.1.2.6 1998/02/04 02:31:42 chuck Exp
     38   1.1    cherry  *
     39   1.1    cherry  *
     40   1.1    cherry  * Copyright (c) 1987, 1990 Carnegie-Mellon University.
     41   1.1    cherry  * All rights reserved.
     42   1.1    cherry  *
     43   1.1    cherry  * Permission to use, copy, modify and distribute this software and
     44   1.1    cherry  * its documentation is hereby granted, provided that both the copyright
     45   1.1    cherry  * notice and this permission notice appear in all copies of the
     46   1.1    cherry  * software, derivative works or modified versions, and any portions
     47   1.1    cherry  * thereof, and that both notices appear in supporting documentation.
     48   1.1    cherry  *
     49   1.1    cherry  * CARNEGIE MELLON ALLOWS FREE USE OF THIS SOFTWARE IN ITS "AS IS"
     50   1.1    cherry  * CONDITION.  CARNEGIE MELLON DISCLAIMS ANY LIABILITY OF ANY KIND
     51   1.1    cherry  * FOR ANY DAMAGES WHATSOEVER RESULTING FROM THE USE OF THIS SOFTWARE.
     52   1.1    cherry  *
     53   1.1    cherry  * Carnegie Mellon requests users of this software to return to
     54   1.1    cherry  *
     55   1.1    cherry  *  Software Distribution Coordinator  or  Software.Distribution (at) CS.CMU.EDU
     56   1.1    cherry  *  School of Computer Science
     57   1.1    cherry  *  Carnegie Mellon University
     58   1.1    cherry  *  Pittsburgh PA 15213-3890
     59   1.1    cherry  *
     60   1.1    cherry  * any improvements or extensions that they make and grant Carnegie the
     61   1.1    cherry  * rights to redistribute these changes.
     62   1.1    cherry  */
     63   1.1    cherry 
     64   1.1    cherry /*
     65   1.1    cherry  * Consolidated API from uvm_page.c and others.
     66   1.1    cherry  * Consolidated and designed by Cherry G. Mathew <cherry (at) zyx.in>
     67   1.1    cherry  * rbtree(3) backing implementation by:
     68   1.1    cherry  * Santhosh N. Raju <santhosh.raju (at) gmail.com>
     69   1.1    cherry  */
     70   1.1    cherry 
     71   1.1    cherry #ifdef _KERNEL_OPT
     72   1.1    cherry #include "opt_uvm.h"
     73   1.1    cherry #endif
     74   1.1    cherry 
     75   1.1    cherry #include <sys/param.h>
     76   1.1    cherry #include <sys/types.h>
     77   1.1    cherry #include <sys/extent.h>
     78   1.1    cherry #include <sys/kmem.h>
     79   1.1    cherry 
     80   1.1    cherry #include <uvm/uvm.h>
     81   1.1    cherry #include <uvm/uvm_page.h>
     82   1.1    cherry #include <uvm/uvm_param.h>
     83   1.1    cherry #include <uvm/uvm_pdpolicy.h>
     84   1.1    cherry #include <uvm/uvm_physseg.h>
     85   1.1    cherry 
     86   1.1    cherry /*
     87   1.1    cherry  * uvm_physseg: describes one segment of physical memory
     88   1.1    cherry  */
     89   1.1    cherry struct uvm_physseg {
     90   1.1    cherry 	struct  rb_node rb_node;	/* tree information */
     91   1.1    cherry 	paddr_t	start;			/* PF# of first page in segment */
     92   1.1    cherry 	paddr_t	end;			/* (PF# of last page in segment) + 1 */
     93   1.1    cherry 	paddr_t	avail_start;		/* PF# of first free page in segment */
     94   1.1    cherry 	paddr_t	avail_end;		/* (PF# of last free page in segment) +1  */
     95   1.1    cherry 	struct	vm_page *pgs;		/* vm_page structures (from start) */
     96   1.1    cherry 	struct  extent *ext;		/* extent(9) structure to manage pgs[] */
     97   1.1    cherry 	int	free_list;		/* which free list they belong on */
     98   1.1    cherry 	u_int	start_hint;		/* start looking for free pages here */
     99   1.1    cherry 					/* protected by uvm_fpageqlock */
    100   1.1    cherry #ifdef __HAVE_PMAP_PHYSSEG
    101   1.1    cherry 	struct	pmap_physseg pmseg;	/* pmap specific (MD) data */
    102   1.1    cherry #endif
    103   1.1    cherry };
    104   1.1    cherry 
    105   1.1    cherry /*
    106   1.1    cherry  * These functions are reserved for uvm(9) internal use and are not
    107   1.1    cherry  * exported in the header file uvm_physseg.h
    108   1.1    cherry  *
    109   1.1    cherry  * Thus they are redefined here.
    110   1.1    cherry  */
    111   1.1    cherry void uvm_physseg_init_seg(uvm_physseg_t, struct vm_page *);
    112   1.1    cherry void uvm_physseg_seg_chomp_slab(uvm_physseg_t, struct vm_page *, size_t);
    113   1.1    cherry 
    114   1.1    cherry /* returns a pgs array */
    115   1.1    cherry struct vm_page *uvm_physseg_seg_alloc_from_slab(uvm_physseg_t, size_t);
    116   1.1    cherry 
    117   1.1    cherry #if defined(UVM_HOTPLUG) /* rbtree impementation */
    118   1.1    cherry 
    119   1.1    cherry #define		HANDLE_TO_PHYSSEG_NODE(h)	((struct uvm_physseg *)(h))
    120   1.1    cherry #define		PHYSSEG_NODE_TO_HANDLE(u)	((uvm_physseg_t)(u))
    121   1.1    cherry 
    122   1.1    cherry struct uvm_physseg_graph {
    123   1.1    cherry 	struct rb_tree rb_tree;		/* Tree for entries */
    124   1.1    cherry 	int            nentries;	/* Number of entries */
    125   1.1    cherry };
    126   1.1    cherry 
    127   1.1    cherry static struct uvm_physseg_graph uvm_physseg_graph;
    128   1.1    cherry 
    129   1.1    cherry /*
    130   1.1    cherry  * Note on kmem(9) allocator usage:
    131   1.1    cherry  * We take the conservative approach that plug/unplug are allowed to
    132   1.1    cherry  * fail in high memory stress situations.
    133   1.1    cherry  *
    134   1.1    cherry  * We want to avoid re-entrant situations in which one plug/unplug
    135   1.1    cherry  * operation is waiting on a previous one to complete, since this
    136   1.1    cherry  * makes the design more complicated than necessary.
    137   1.1    cherry  *
    138   1.1    cherry  * We may review this and change its behaviour, once the use cases
    139   1.1    cherry  * become more obvious.
    140   1.1    cherry  */
    141   1.1    cherry 
    142   1.1    cherry /*
    143   1.1    cherry  * Special alloc()/free() functions for boot time support:
    144   1.1    cherry  * We assume that alloc() at boot time is only for new 'vm_physseg's
    145   1.1    cherry  * This allows us to use a static array for memory allocation at boot
    146   1.1    cherry  * time. Thus we avoid using kmem(9) which is not ready at this point
    147   1.1    cherry  * in boot.
    148   1.1    cherry  *
    149   1.1    cherry  * After kmem(9) is ready, we use it. We currently discard any free()s
    150   1.1    cherry  * to this static array, since the size is small enough to be a
    151   1.1    cherry  * trivial waste on all architectures we run on.
    152   1.1    cherry  */
    153   1.1    cherry 
    154   1.1    cherry static size_t nseg = 0;
    155   1.1    cherry static struct uvm_physseg uvm_physseg[VM_PHYSSEG_MAX];
    156   1.1    cherry 
    157   1.1    cherry static void *
    158   1.1    cherry uvm_physseg_alloc(size_t sz)
    159   1.1    cherry {
    160   1.1    cherry 	/*
    161   1.1    cherry 	 * During boot time, we only support allocating vm_physseg
    162   1.1    cherry 	 * entries from the static array.
    163   1.1    cherry 	 * We need to assert for this.
    164   1.1    cherry 	 */
    165   1.1    cherry 
    166   1.1    cherry 	if (__predict_false(uvm.page_init_done == false)) {
    167   1.1    cherry 		if (sz % sizeof(struct uvm_physseg))
    168   1.1    cherry 			panic("%s: tried to alloc size other than multiple"
    169   1.7       uwe 			    " of struct uvm_physseg at boot\n", __func__);
    170   1.1    cherry 
    171   1.1    cherry 		size_t n = sz / sizeof(struct uvm_physseg);
    172   1.1    cherry 		nseg += n;
    173   1.1    cherry 
    174   1.1    cherry 		KASSERT(nseg > 0 && nseg <= VM_PHYSSEG_MAX);
    175   1.1    cherry 
    176   1.1    cherry 		return &uvm_physseg[nseg - n];
    177   1.1    cherry 	}
    178   1.1    cherry 
    179   1.1    cherry 	return kmem_zalloc(sz, KM_NOSLEEP);
    180   1.1    cherry }
    181   1.1    cherry 
    182   1.1    cherry static void
    183   1.1    cherry uvm_physseg_free(void *p, size_t sz)
    184   1.1    cherry {
    185   1.1    cherry 	/*
    186   1.1    cherry 	 * This is a bit tricky. We do allow simulation of free()
    187   1.1    cherry 	 * during boot (for eg: when MD code is "steal"ing memory,
    188   1.1    cherry 	 * and the segment has been exhausted (and thus needs to be
    189   1.1    cherry 	 * free() - ed.
    190   1.1    cherry 	 * free() also complicates things because we leak the
    191   1.1    cherry 	 * free(). Therefore calling code can't assume that free()-ed
    192   1.1    cherry 	 * memory is available for alloc() again, at boot time.
    193   1.1    cherry 	 *
    194   1.1    cherry 	 * Thus we can't explicitly disallow free()s during
    195   1.1    cherry 	 * boot time. However, the same restriction for alloc()
    196   1.1    cherry 	 * applies to free(). We only allow uvm_physseg related free()s
    197   1.1    cherry 	 * via this function during boot time.
    198   1.1    cherry 	 */
    199   1.1    cherry 
    200   1.1    cherry 	if (__predict_false(uvm.page_init_done == false)) {
    201   1.1    cherry 		if (sz % sizeof(struct uvm_physseg))
    202   1.1    cherry 			panic("%s: tried to free size other than struct uvm_physseg"
    203   1.7       uwe 			    " at boot\n", __func__);
    204   1.1    cherry 
    205   1.1    cherry 	}
    206   1.1    cherry 
    207   1.1    cherry 	/*
    208   1.1    cherry 	 * Could have been in a single if(){} block - split for
    209   1.1    cherry 	 * clarity
    210   1.1    cherry 	 */
    211   1.1    cherry 
    212   1.1    cherry 	if ((struct uvm_physseg *)p >= uvm_physseg &&
    213   1.1    cherry 	    (struct uvm_physseg *)p < (uvm_physseg + VM_PHYSSEG_MAX)) {
    214   1.1    cherry 		if (sz % sizeof(struct uvm_physseg))
    215   1.1    cherry 			panic("%s: tried to free() other than struct uvm_physseg"
    216   1.7       uwe 			    " from static array\n", __func__);
    217   1.1    cherry 
    218   1.1    cherry 		if ((sz / sizeof(struct uvm_physseg)) >= VM_PHYSSEG_MAX)
    219   1.1    cherry 			panic("%s: tried to free() the entire static array!", __func__);
    220   1.1    cherry 		return; /* Nothing to free */
    221   1.1    cherry 	}
    222   1.1    cherry 
    223   1.1    cherry 	kmem_free(p, sz);
    224   1.1    cherry }
    225   1.1    cherry 
    226   1.1    cherry /* XXX: Multi page size */
    227   1.1    cherry bool
    228   1.1    cherry uvm_physseg_plug(paddr_t pfn, size_t pages, uvm_physseg_t *psp)
    229   1.1    cherry {
    230   1.1    cherry 	int preload;
    231   1.1    cherry 	size_t slabpages;
    232   1.1    cherry 	struct uvm_physseg *ps, *current_ps = NULL;
    233   1.1    cherry 	struct vm_page *slab = NULL, *pgs = NULL;
    234   1.1    cherry 
    235   1.1    cherry #ifdef DEBUG
    236   1.1    cherry 	paddr_t off;
    237   1.1    cherry 	uvm_physseg_t upm;
    238   1.1    cherry 	upm = uvm_physseg_find(pfn, &off);
    239   1.1    cherry 
    240   1.1    cherry 	ps = HANDLE_TO_PHYSSEG_NODE(upm);
    241   1.1    cherry 
    242   1.1    cherry 	if (ps != NULL) /* XXX; do we allow "update" plugs ? */
    243   1.1    cherry 		return false;
    244   1.1    cherry #endif
    245   1.1    cherry 
    246   1.1    cherry 	/*
    247   1.1    cherry 	 * do we have room?
    248   1.1    cherry 	 */
    249   1.1    cherry 
    250   1.1    cherry 	ps = uvm_physseg_alloc(sizeof (struct uvm_physseg));
    251   1.1    cherry 	if (ps == NULL) {
    252   1.1    cherry 		printf("uvm_page_physload: unable to load physical memory "
    253   1.1    cherry 		    "segment\n");
    254   1.1    cherry 		printf("\t%d segments allocated, ignoring 0x%"PRIxPADDR" -> 0x%"PRIxPADDR"\n",
    255   1.1    cherry 		    VM_PHYSSEG_MAX, pfn, pfn + pages + 1);
    256   1.1    cherry 		printf("\tincrease VM_PHYSSEG_MAX\n");
    257   1.1    cherry 		return false;
    258   1.1    cherry 	}
    259   1.1    cherry 
    260   1.1    cherry 	/* span init */
    261   1.1    cherry 	ps->start = pfn;
    262   1.1    cherry 	ps->end = pfn + pages;
    263   1.1    cherry 
    264   1.1    cherry 	/*
    265   1.1    cherry 	 * XXX: Ugly hack because uvmexp.npages accounts for only
    266   1.1    cherry 	 * those pages in the segment included below as well - this
    267   1.1    cherry 	 * should be legacy and removed.
    268   1.1    cherry 	 */
    269   1.1    cherry 
    270   1.1    cherry 	ps->avail_start = ps->start;
    271   1.1    cherry 	ps->avail_end = ps->end;
    272   1.1    cherry 
    273   1.1    cherry 	/*
    274   1.1    cherry 	 * check to see if this is a "preload" (i.e. uvm_page_init hasn't been
    275   1.1    cherry 	 * called yet, so kmem is not available).
    276   1.1    cherry 	 */
    277   1.1    cherry 
    278   1.1    cherry 	preload = 1; /* We are going to assume it is a preload */
    279   1.1    cherry 
    280   1.1    cherry 	RB_TREE_FOREACH(current_ps, &(uvm_physseg_graph.rb_tree)) {
    281   1.1    cherry 		/* If there are non NULL pages then we are not in a preload */
    282   1.1    cherry 		if (current_ps->pgs != NULL) {
    283   1.1    cherry 			preload = 0;
    284   1.1    cherry 			/* Try to scavenge from earlier unplug()s. */
    285   1.1    cherry 			pgs = uvm_physseg_seg_alloc_from_slab(current_ps, pages);
    286   1.1    cherry 
    287   1.1    cherry 			if (pgs != NULL) {
    288   1.1    cherry 				break;
    289   1.1    cherry 			}
    290   1.1    cherry 		}
    291   1.1    cherry 	}
    292   1.1    cherry 
    293   1.1    cherry 
    294   1.1    cherry 	/*
    295   1.1    cherry 	 * if VM is already running, attempt to kmem_alloc vm_page structures
    296   1.1    cherry 	 */
    297   1.1    cherry 
    298   1.1    cherry 	if (!preload) {
    299   1.1    cherry 		if (pgs == NULL) { /* Brand new */
    300   1.1    cherry 			/* Iteratively try alloc down from uvmexp.npages */
    301   1.1    cherry 			for (slabpages = (size_t) uvmexp.npages; slabpages >= pages; slabpages--) {
    302   1.1    cherry 				slab = kmem_zalloc(sizeof *pgs * (long unsigned int)slabpages, KM_NOSLEEP);
    303   1.1    cherry 				if (slab != NULL)
    304   1.1    cherry 					break;
    305   1.1    cherry 			}
    306   1.1    cherry 
    307   1.1    cherry 			if (slab == NULL) {
    308   1.1    cherry 				uvm_physseg_free(ps, sizeof(struct uvm_physseg));
    309   1.1    cherry 				return false;
    310   1.1    cherry 			}
    311   1.1    cherry 
    312   1.1    cherry 			uvm_physseg_seg_chomp_slab(ps, slab, (size_t) slabpages);
    313   1.1    cherry 			/* We allocate enough for this plug */
    314   1.1    cherry 			pgs = uvm_physseg_seg_alloc_from_slab(ps, pages);
    315   1.1    cherry 
    316   1.1    cherry 			if (pgs == NULL) {
    317   1.1    cherry 				printf("unable to uvm_physseg_seg_alloc_from_slab() from backend\n");
    318   1.1    cherry 				return false;
    319   1.1    cherry 			}
    320   1.1    cherry 		} else {
    321   1.1    cherry 			/* Reuse scavenged extent */
    322   1.1    cherry 			ps->ext = current_ps->ext;
    323   1.1    cherry 		}
    324   1.1    cherry 
    325   1.1    cherry 		physmem += pages;
    326   1.1    cherry 		uvmpdpol_reinit();
    327   1.1    cherry 	} else { /* Boot time - see uvm_page.c:uvm_page_init() */
    328   1.1    cherry 		pgs = NULL;
    329   1.1    cherry 		ps->pgs = pgs;
    330   1.1    cherry 	}
    331   1.1    cherry 
    332   1.1    cherry 	/*
    333   1.1    cherry 	 * now insert us in the proper place in uvm_physseg_graph.rb_tree
    334   1.1    cherry 	 */
    335   1.1    cherry 
    336   1.1    cherry 	current_ps = rb_tree_insert_node(&(uvm_physseg_graph.rb_tree), ps);
    337   1.1    cherry 	if (current_ps != ps) {
    338   1.1    cherry 		panic("uvm_page_physload: Duplicate address range detected!");
    339   1.1    cherry 	}
    340   1.1    cherry 	uvm_physseg_graph.nentries++;
    341   1.1    cherry 
    342   1.1    cherry 	/*
    343   1.1    cherry 	 * uvm_pagefree() requires the PHYS_TO_VM_PAGE(pgs[i]) on the
    344   1.1    cherry 	 * newly allocated pgs[] to return the correct value. This is
    345   1.1    cherry 	 * a bit of a chicken and egg problem, since it needs
    346   1.1    cherry 	 * uvm_physseg_find() to succeed. For this, the node needs to
    347   1.1    cherry 	 * be inserted *before* uvm_physseg_init_seg() happens.
    348   1.1    cherry 	 *
    349   1.1    cherry 	 * During boot, this happens anyway, since
    350   1.1    cherry 	 * uvm_physseg_init_seg() is called later on and separately
    351   1.1    cherry 	 * from uvm_page.c:uvm_page_init().
    352   1.1    cherry 	 * In the case of hotplug we need to ensure this.
    353   1.1    cherry 	 */
    354   1.1    cherry 
    355   1.1    cherry 	if (__predict_true(!preload))
    356   1.1    cherry 		uvm_physseg_init_seg(ps, pgs);
    357   1.1    cherry 
    358   1.1    cherry 	if (psp != NULL)
    359   1.1    cherry 		*psp = ps;
    360   1.1    cherry 
    361   1.1    cherry 	return true;
    362   1.1    cherry }
    363   1.1    cherry 
    364   1.1    cherry static int
    365   1.1    cherry uvm_physseg_compare_nodes(void *ctx, const void *nnode1, const void *nnode2)
    366   1.1    cherry {
    367   1.1    cherry 	const struct uvm_physseg *enode1 = nnode1;
    368   1.1    cherry 	const struct uvm_physseg *enode2 = nnode2;
    369   1.1    cherry 
    370   1.1    cherry 	KASSERT(enode1->start < enode2->start || enode1->start >= enode2->end);
    371   1.1    cherry 	KASSERT(enode2->start < enode1->start || enode2->start >= enode1->end);
    372   1.1    cherry 
    373   1.1    cherry 	if (enode1->start < enode2->start)
    374   1.1    cherry 		return -1;
    375   1.1    cherry 	if (enode1->start >= enode2->end)
    376   1.1    cherry 		return 1;
    377   1.1    cherry 	return 0;
    378   1.1    cherry }
    379   1.1    cherry 
    380   1.1    cherry static int
    381   1.1    cherry uvm_physseg_compare_key(void *ctx, const void *nnode, const void *pkey)
    382   1.1    cherry {
    383   1.1    cherry 	const struct uvm_physseg *enode = nnode;
    384   1.1    cherry 	const paddr_t pa = *(const paddr_t *) pkey;
    385   1.1    cherry 
    386   1.1    cherry 	if(enode->start <= pa && pa < enode->end)
    387   1.1    cherry 		return 0;
    388   1.1    cherry 	if (enode->start < pa)
    389   1.1    cherry 		return -1;
    390   1.1    cherry 	if (enode->end > pa)
    391   1.1    cherry 		return 1;
    392   1.1    cherry 
    393   1.1    cherry 	return 0;
    394   1.1    cherry }
    395   1.1    cherry 
    396   1.1    cherry static const rb_tree_ops_t uvm_physseg_tree_ops = {
    397   1.1    cherry 	.rbto_compare_nodes = uvm_physseg_compare_nodes,
    398   1.1    cherry 	.rbto_compare_key = uvm_physseg_compare_key,
    399   1.1    cherry 	.rbto_node_offset = offsetof(struct uvm_physseg, rb_node),
    400   1.1    cherry 	.rbto_context = NULL
    401   1.1    cherry };
    402   1.1    cherry 
    403   1.1    cherry /*
    404   1.1    cherry  * uvm_physseg_init: init the physmem
    405   1.1    cherry  *
    406   1.1    cherry  * => physmem unit should not be in use at this point
    407   1.1    cherry  */
    408   1.1    cherry 
    409   1.1    cherry void
    410   1.1    cherry uvm_physseg_init(void)
    411   1.1    cherry {
    412   1.1    cherry 	rb_tree_init(&(uvm_physseg_graph.rb_tree), &uvm_physseg_tree_ops);
    413   1.1    cherry 	uvm_physseg_graph.nentries = 0;
    414   1.1    cherry }
    415   1.1    cherry 
    416   1.1    cherry uvm_physseg_t
    417   1.1    cherry uvm_physseg_get_next(uvm_physseg_t upm)
    418   1.1    cherry {
    419   1.1    cherry 	/* next of invalid is invalid, not fatal */
    420   1.2    cherry 	if (uvm_physseg_valid_p(upm) == false)
    421   1.1    cherry 		return UVM_PHYSSEG_TYPE_INVALID;
    422   1.1    cherry 
    423   1.1    cherry 	return (uvm_physseg_t) rb_tree_iterate(&(uvm_physseg_graph.rb_tree), upm,
    424   1.1    cherry 	    RB_DIR_RIGHT);
    425   1.1    cherry }
    426   1.1    cherry 
    427   1.1    cherry uvm_physseg_t
    428   1.1    cherry uvm_physseg_get_prev(uvm_physseg_t upm)
    429   1.1    cherry {
    430   1.1    cherry 	/* prev of invalid is invalid, not fatal */
    431   1.2    cherry 	if (uvm_physseg_valid_p(upm) == false)
    432   1.1    cherry 		return UVM_PHYSSEG_TYPE_INVALID;
    433   1.1    cherry 
    434   1.1    cherry 	return (uvm_physseg_t) rb_tree_iterate(&(uvm_physseg_graph.rb_tree), upm,
    435   1.1    cherry 	    RB_DIR_LEFT);
    436   1.1    cherry }
    437   1.1    cherry 
    438   1.1    cherry uvm_physseg_t
    439   1.1    cherry uvm_physseg_get_last(void)
    440   1.1    cherry {
    441   1.1    cherry 	return (uvm_physseg_t) RB_TREE_MAX(&(uvm_physseg_graph.rb_tree));
    442   1.1    cherry }
    443   1.1    cherry 
    444   1.1    cherry uvm_physseg_t
    445   1.1    cherry uvm_physseg_get_first(void)
    446   1.1    cherry {
    447   1.1    cherry 	return (uvm_physseg_t) RB_TREE_MIN(&(uvm_physseg_graph.rb_tree));
    448   1.1    cherry }
    449   1.1    cherry 
    450   1.1    cherry paddr_t
    451   1.1    cherry uvm_physseg_get_highest_frame(void)
    452   1.1    cherry {
    453   1.1    cherry 	struct uvm_physseg *ps =
    454   1.1    cherry 	    (uvm_physseg_t) RB_TREE_MAX(&(uvm_physseg_graph.rb_tree));
    455   1.1    cherry 
    456   1.1    cherry 	return ps->end - 1;
    457   1.1    cherry }
    458   1.1    cherry 
    459   1.1    cherry /*
    460   1.1    cherry  * uvm_page_physunload: unload physical memory and return it to
    461   1.1    cherry  * caller.
    462   1.1    cherry  */
    463   1.1    cherry bool
    464   1.1    cherry uvm_page_physunload(uvm_physseg_t upm, int freelist, paddr_t *paddrp)
    465   1.1    cherry {
    466   1.1    cherry 	struct uvm_physseg *seg;
    467   1.1    cherry 
    468   1.1    cherry 	if (__predict_true(uvm.page_init_done == true))
    469   1.1    cherry 		panic("%s: unload attempted after uvm_page_init()\n", __func__);
    470   1.1    cherry 
    471   1.1    cherry 	seg = HANDLE_TO_PHYSSEG_NODE(upm);
    472   1.1    cherry 
    473   1.1    cherry 	if (seg->free_list != freelist) {
    474   1.1    cherry 		return false;
    475   1.1    cherry 	}
    476   1.1    cherry 
    477   1.1    cherry 	/*
    478   1.1    cherry 	 * During cold boot, what we're about to unplug hasn't been
    479   1.1    cherry 	 * put on the uvm freelist, nor has uvmexp.npages been
    480   1.1    cherry 	 * updated. (This happens in uvm_page.c:uvm_page_init())
    481   1.1    cherry 	 *
    482   1.1    cherry 	 * For hotplug, we assume here that the pages being unloaded
    483   1.1    cherry 	 * here are completely out of sight of uvm (ie; not on any uvm
    484   1.1    cherry 	 * lists), and that  uvmexp.npages has been suitably
    485   1.1    cherry 	 * decremented before we're called.
    486   1.1    cherry 	 *
    487   1.1    cherry 	 * XXX: will avail_end == start if avail_start < avail_end?
    488   1.1    cherry 	 */
    489   1.1    cherry 
    490   1.1    cherry 	/* try from front */
    491   1.1    cherry 	if (seg->avail_start == seg->start &&
    492   1.1    cherry 	    seg->avail_start < seg->avail_end) {
    493   1.1    cherry 		*paddrp = ctob(seg->avail_start);
    494   1.1    cherry 		return uvm_physseg_unplug(seg->avail_start, 1);
    495   1.1    cherry 	}
    496   1.1    cherry 
    497   1.1    cherry 	/* try from rear */
    498   1.1    cherry 	if (seg->avail_end == seg->end &&
    499   1.1    cherry 	    seg->avail_start < seg->avail_end) {
    500   1.1    cherry 		*paddrp = ctob(seg->avail_end - 1);
    501   1.1    cherry 		return uvm_physseg_unplug(seg->avail_end - 1, 1);
    502   1.1    cherry 	}
    503   1.1    cherry 
    504   1.1    cherry 	return false;
    505   1.1    cherry }
    506   1.1    cherry 
    507   1.1    cherry bool
    508   1.1    cherry uvm_page_physunload_force(uvm_physseg_t upm, int freelist, paddr_t *paddrp)
    509   1.1    cherry {
    510   1.1    cherry 	struct uvm_physseg *seg;
    511   1.1    cherry 
    512   1.1    cherry 	seg = HANDLE_TO_PHYSSEG_NODE(upm);
    513   1.1    cherry 
    514   1.1    cherry 	if (__predict_true(uvm.page_init_done == true))
    515   1.1    cherry 		panic("%s: unload attempted after uvm_page_init()\n", __func__);
    516   1.1    cherry 	/* any room in this bank? */
    517   1.1    cherry 	if (seg->avail_start >= seg->avail_end) {
    518   1.1    cherry 		return false; /* nope */
    519   1.1    cherry 	}
    520   1.1    cherry 
    521   1.1    cherry 	*paddrp = ctob(seg->avail_start);
    522   1.1    cherry 
    523   1.1    cherry 	/* Always unplug from front */
    524   1.1    cherry 	return uvm_physseg_unplug(seg->avail_start, 1);
    525   1.1    cherry }
    526   1.1    cherry 
    527   1.1    cherry 
    528   1.1    cherry /*
    529   1.1    cherry  * vm_physseg_find: find vm_physseg structure that belongs to a PA
    530   1.1    cherry  */
    531   1.1    cherry uvm_physseg_t
    532   1.1    cherry uvm_physseg_find(paddr_t pframe, psize_t *offp)
    533   1.1    cherry {
    534   1.1    cherry 	struct uvm_physseg * ps = NULL;
    535   1.1    cherry 
    536   1.1    cherry 	ps = rb_tree_find_node(&(uvm_physseg_graph.rb_tree), &pframe);
    537   1.1    cherry 
    538   1.1    cherry 	if(ps != NULL && offp != NULL)
    539   1.1    cherry 		*offp = pframe - ps->start;
    540   1.1    cherry 
    541   1.1    cherry 	return ps;
    542   1.1    cherry }
    543   1.1    cherry 
    544   1.1    cherry #else  /* UVM_HOTPLUG */
    545   1.1    cherry 
    546   1.1    cherry /*
    547   1.1    cherry  * physical memory config is stored in vm_physmem.
    548   1.1    cherry  */
    549   1.1    cherry 
    550   1.1    cherry #define	VM_PHYSMEM_PTR(i)	(&vm_physmem[i])
    551   1.1    cherry #if VM_PHYSSEG_MAX == 1
    552   1.1    cherry #define VM_PHYSMEM_PTR_SWAP(i, j) /* impossible */
    553   1.1    cherry #else
    554   1.1    cherry #define VM_PHYSMEM_PTR_SWAP(i, j)					      \
    555   1.1    cherry 	do { vm_physmem[(i)] = vm_physmem[(j)]; } while (0)
    556   1.1    cherry #endif
    557   1.1    cherry 
    558   1.1    cherry #define		HANDLE_TO_PHYSSEG_NODE(h)	(VM_PHYSMEM_PTR((int)h))
    559   1.1    cherry #define		PHYSSEG_NODE_TO_HANDLE(u)	((int)((vsize_t) (u - vm_physmem) / sizeof(struct uvm_physseg)))
    560   1.1    cherry 
    561   1.1    cherry static struct uvm_physseg vm_physmem[VM_PHYSSEG_MAX];	/* XXXCDC: uvm.physmem */
    562   1.1    cherry static int vm_nphysseg = 0;				/* XXXCDC: uvm.nphysseg */
    563   1.1    cherry #define	vm_nphysmem	vm_nphysseg
    564   1.1    cherry 
    565   1.1    cherry void
    566   1.1    cherry uvm_physseg_init(void)
    567   1.1    cherry {
    568   1.1    cherry 	/* XXX: Provisioning for rb_tree related init(s) */
    569   1.1    cherry 	return;
    570   1.1    cherry }
    571   1.1    cherry 
    572   1.1    cherry int
    573   1.1    cherry uvm_physseg_get_next(uvm_physseg_t lcv)
    574   1.1    cherry {
    575   1.1    cherry 	/* next of invalid is invalid, not fatal */
    576   1.2    cherry 	if (uvm_physseg_valid_p(lcv) == false)
    577   1.1    cherry 		return UVM_PHYSSEG_TYPE_INVALID;
    578   1.1    cherry 
    579   1.1    cherry 	return (lcv + 1);
    580   1.1    cherry }
    581   1.1    cherry 
    582   1.1    cherry int
    583   1.1    cherry uvm_physseg_get_prev(uvm_physseg_t lcv)
    584   1.1    cherry {
    585   1.1    cherry 	/* prev of invalid is invalid, not fatal */
    586   1.2    cherry 	if (uvm_physseg_valid_p(lcv) == false)
    587   1.1    cherry 		return UVM_PHYSSEG_TYPE_INVALID;
    588   1.1    cherry 
    589   1.1    cherry 	return (lcv - 1);
    590   1.1    cherry }
    591   1.1    cherry 
    592   1.1    cherry int
    593   1.1    cherry uvm_physseg_get_last(void)
    594   1.1    cherry {
    595   1.1    cherry 	return (vm_nphysseg - 1);
    596   1.1    cherry }
    597   1.1    cherry 
    598   1.1    cherry int
    599   1.1    cherry uvm_physseg_get_first(void)
    600   1.1    cherry {
    601   1.1    cherry 	return 0;
    602   1.1    cherry }
    603   1.1    cherry 
    604   1.1    cherry paddr_t
    605   1.1    cherry uvm_physseg_get_highest_frame(void)
    606   1.1    cherry {
    607   1.1    cherry 	int lcv;
    608   1.1    cherry 	paddr_t last = 0;
    609   1.1    cherry 	struct uvm_physseg *ps;
    610   1.1    cherry 
    611   1.1    cherry 	for (lcv = 0; lcv < vm_nphysseg; lcv++) {
    612   1.1    cherry 		ps = VM_PHYSMEM_PTR(lcv);
    613   1.1    cherry 		if (last < ps->end)
    614   1.1    cherry 			last = ps->end;
    615   1.1    cherry 	}
    616   1.1    cherry 
    617   1.1    cherry 	return last;
    618   1.1    cherry }
    619   1.1    cherry 
    620   1.1    cherry 
    621   1.1    cherry static struct vm_page *
    622   1.1    cherry uvm_post_preload_check(void)
    623   1.1    cherry {
    624   1.1    cherry 	int preload, lcv;
    625   1.1    cherry 
    626   1.1    cherry 	/*
    627   1.1    cherry 	 * check to see if this is a "preload" (i.e. uvm_page_init hasn't been
    628   1.1    cherry 	 * called yet, so kmem is not available).
    629   1.1    cherry 	 */
    630   1.1    cherry 
    631   1.1    cherry 	for (lcv = 0 ; lcv < vm_nphysmem ; lcv++) {
    632   1.1    cherry 		if (VM_PHYSMEM_PTR(lcv)->pgs)
    633   1.1    cherry 			break;
    634   1.1    cherry 	}
    635   1.1    cherry 	preload = (lcv == vm_nphysmem);
    636   1.1    cherry 
    637   1.1    cherry 	/*
    638   1.1    cherry 	 * if VM is already running, attempt to kmem_alloc vm_page structures
    639   1.1    cherry 	 */
    640   1.1    cherry 
    641   1.1    cherry 	if (!preload) {
    642   1.1    cherry 		panic("Tried to add RAM after uvm_page_init");
    643   1.1    cherry 	}
    644   1.1    cherry 
    645   1.1    cherry 	return NULL;
    646   1.1    cherry }
    647   1.1    cherry 
    648   1.1    cherry /*
    649   1.1    cherry  * uvm_page_physunload: unload physical memory and return it to
    650   1.1    cherry  * caller.
    651   1.1    cherry  */
    652   1.1    cherry bool
    653   1.1    cherry uvm_page_physunload(uvm_physseg_t psi, int freelist, paddr_t *paddrp)
    654   1.1    cherry {
    655   1.1    cherry 	int x;
    656   1.1    cherry 	struct uvm_physseg *seg;
    657   1.1    cherry 
    658   1.1    cherry 	uvm_post_preload_check();
    659   1.1    cherry 
    660   1.1    cherry 	seg = VM_PHYSMEM_PTR(psi);
    661   1.1    cherry 
    662   1.1    cherry 	if (seg->free_list != freelist) {
    663   1.1    cherry 		return false;
    664   1.1    cherry 	}
    665   1.1    cherry 
    666   1.1    cherry 	/* try from front */
    667   1.1    cherry 	if (seg->avail_start == seg->start &&
    668   1.1    cherry 	    seg->avail_start < seg->avail_end) {
    669   1.1    cherry 		*paddrp = ctob(seg->avail_start);
    670   1.1    cherry 		seg->avail_start++;
    671   1.1    cherry 		seg->start++;
    672   1.1    cherry 		/* nothing left?   nuke it */
    673   1.1    cherry 		if (seg->avail_start == seg->end) {
    674   1.1    cherry 			if (vm_nphysmem == 1)
    675   1.1    cherry 				panic("uvm_page_physget: out of memory!");
    676   1.1    cherry 			vm_nphysmem--;
    677   1.1    cherry 			for (x = psi ; x < vm_nphysmem ; x++)
    678   1.1    cherry 				/* structure copy */
    679   1.1    cherry 				VM_PHYSMEM_PTR_SWAP(x, x + 1);
    680   1.1    cherry 		}
    681   1.1    cherry 		return (true);
    682   1.1    cherry 	}
    683   1.1    cherry 
    684   1.1    cherry 	/* try from rear */
    685   1.1    cherry 	if (seg->avail_end == seg->end &&
    686   1.1    cherry 	    seg->avail_start < seg->avail_end) {
    687   1.1    cherry 		*paddrp = ctob(seg->avail_end - 1);
    688   1.1    cherry 		seg->avail_end--;
    689   1.1    cherry 		seg->end--;
    690   1.1    cherry 		/* nothing left?   nuke it */
    691   1.1    cherry 		if (seg->avail_end == seg->start) {
    692   1.1    cherry 			if (vm_nphysmem == 1)
    693   1.1    cherry 				panic("uvm_page_physget: out of memory!");
    694   1.1    cherry 			vm_nphysmem--;
    695   1.1    cherry 			for (x = psi ; x < vm_nphysmem ; x++)
    696   1.1    cherry 				/* structure copy */
    697   1.1    cherry 				VM_PHYSMEM_PTR_SWAP(x, x + 1);
    698   1.1    cherry 		}
    699   1.1    cherry 		return (true);
    700   1.1    cherry 	}
    701   1.1    cherry 
    702   1.1    cherry 	return false;
    703   1.1    cherry }
    704   1.1    cherry 
    705   1.1    cherry bool
    706   1.1    cherry uvm_page_physunload_force(uvm_physseg_t psi, int freelist, paddr_t *paddrp)
    707   1.1    cherry {
    708   1.1    cherry 	int x;
    709   1.1    cherry 	struct uvm_physseg *seg;
    710   1.1    cherry 
    711   1.1    cherry 	uvm_post_preload_check();
    712   1.1    cherry 
    713   1.1    cherry 	seg = VM_PHYSMEM_PTR(psi);
    714   1.1    cherry 
    715   1.1    cherry 	/* any room in this bank? */
    716   1.1    cherry 	if (seg->avail_start >= seg->avail_end) {
    717   1.1    cherry 		return false; /* nope */
    718   1.1    cherry 	}
    719   1.1    cherry 
    720   1.1    cherry 	*paddrp = ctob(seg->avail_start);
    721   1.1    cherry 	seg->avail_start++;
    722   1.1    cherry 	/* truncate! */
    723   1.1    cherry 	seg->start = seg->avail_start;
    724   1.1    cherry 
    725   1.1    cherry 	/* nothing left?   nuke it */
    726   1.1    cherry 	if (seg->avail_start == seg->end) {
    727   1.1    cherry 		if (vm_nphysmem == 1)
    728   1.1    cherry 			panic("uvm_page_physget: out of memory!");
    729   1.1    cherry 		vm_nphysmem--;
    730   1.1    cherry 		for (x = psi ; x < vm_nphysmem ; x++)
    731   1.1    cherry 			/* structure copy */
    732   1.1    cherry 			VM_PHYSMEM_PTR_SWAP(x, x + 1);
    733   1.1    cherry 	}
    734   1.1    cherry 	return (true);
    735   1.1    cherry }
    736   1.1    cherry 
    737   1.1    cherry bool
    738   1.1    cherry uvm_physseg_plug(paddr_t pfn, size_t pages, uvm_physseg_t *psp)
    739   1.1    cherry {
    740   1.1    cherry 	int lcv;
    741   1.1    cherry 	struct vm_page *pgs;
    742   1.1    cherry 	struct uvm_physseg *ps;
    743   1.1    cherry 
    744   1.1    cherry #ifdef DEBUG
    745   1.1    cherry 	paddr_t off;
    746   1.1    cherry 	uvm_physseg_t upm;
    747   1.1    cherry 	upm = uvm_physseg_find(pfn, &off);
    748   1.1    cherry 
    749   1.2    cherry 	if (uvm_physseg_valid_p(upm)) /* XXX; do we allow "update" plugs ? */
    750   1.1    cherry 		return false;
    751   1.1    cherry #endif
    752   1.1    cherry 
    753   1.1    cherry 	paddr_t start = pfn;
    754   1.1    cherry 	paddr_t end = pfn + pages;
    755   1.1    cherry 	paddr_t avail_start = start;
    756   1.1    cherry 	paddr_t avail_end = end;
    757   1.1    cherry 
    758   1.1    cherry 	if (uvmexp.pagesize == 0)
    759   1.1    cherry 		panic("uvm_page_physload: page size not set!");
    760   1.1    cherry 
    761   1.1    cherry 	/*
    762   1.1    cherry 	 * do we have room?
    763   1.1    cherry 	 */
    764   1.1    cherry 
    765   1.1    cherry 	if (vm_nphysmem == VM_PHYSSEG_MAX) {
    766   1.1    cherry 		printf("uvm_page_physload: unable to load physical memory "
    767   1.1    cherry 		    "segment\n");
    768   1.1    cherry 		printf("\t%d segments allocated, ignoring 0x%llx -> 0x%llx\n",
    769   1.1    cherry 		    VM_PHYSSEG_MAX, (long long)start, (long long)end);
    770   1.1    cherry 		printf("\tincrease VM_PHYSSEG_MAX\n");
    771   1.1    cherry 		if (psp != NULL)
    772   1.1    cherry 			*psp = UVM_PHYSSEG_TYPE_INVALID_OVERFLOW;
    773   1.1    cherry 		return false;
    774   1.1    cherry 	}
    775   1.1    cherry 
    776   1.1    cherry 	/*
    777   1.1    cherry 	 * check to see if this is a "preload" (i.e. uvm_page_init hasn't been
    778   1.1    cherry 	 * called yet, so kmem is not available).
    779   1.1    cherry 	 */
    780   1.1    cherry 	pgs = uvm_post_preload_check();
    781   1.1    cherry 
    782   1.1    cherry 	/*
    783   1.1    cherry 	 * now insert us in the proper place in vm_physmem[]
    784   1.1    cherry 	 */
    785   1.1    cherry 
    786   1.1    cherry #if (VM_PHYSSEG_STRAT == VM_PSTRAT_RANDOM)
    787   1.1    cherry 	/* random: put it at the end (easy!) */
    788   1.1    cherry 	ps = VM_PHYSMEM_PTR(vm_nphysmem);
    789   1.3    cherry 	lcv = vm_nphysmem;
    790   1.1    cherry #elif (VM_PHYSSEG_STRAT == VM_PSTRAT_BSEARCH)
    791   1.1    cherry 	{
    792   1.1    cherry 		int x;
    793   1.1    cherry 		/* sort by address for binary search */
    794   1.1    cherry 		for (lcv = 0 ; lcv < vm_nphysmem ; lcv++)
    795   1.1    cherry 			if (start < VM_PHYSMEM_PTR(lcv)->start)
    796   1.1    cherry 				break;
    797   1.1    cherry 		ps = VM_PHYSMEM_PTR(lcv);
    798   1.1    cherry 		/* move back other entries, if necessary ... */
    799   1.1    cherry 		for (x = vm_nphysmem ; x > lcv ; x--)
    800   1.1    cherry 			/* structure copy */
    801   1.1    cherry 			VM_PHYSMEM_PTR_SWAP(x, x - 1);
    802   1.1    cherry 	}
    803   1.1    cherry #elif (VM_PHYSSEG_STRAT == VM_PSTRAT_BIGFIRST)
    804   1.1    cherry 	{
    805   1.1    cherry 		int x;
    806   1.1    cherry 		/* sort by largest segment first */
    807   1.1    cherry 		for (lcv = 0 ; lcv < vm_nphysmem ; lcv++)
    808   1.1    cherry 			if ((end - start) >
    809   1.1    cherry 			    (VM_PHYSMEM_PTR(lcv)->end - VM_PHYSMEM_PTR(lcv)->start))
    810   1.1    cherry 				break;
    811   1.1    cherry 		ps = VM_PHYSMEM_PTR(lcv);
    812   1.1    cherry 		/* move back other entries, if necessary ... */
    813   1.1    cherry 		for (x = vm_nphysmem ; x > lcv ; x--)
    814   1.1    cherry 			/* structure copy */
    815   1.1    cherry 			VM_PHYSMEM_PTR_SWAP(x, x - 1);
    816   1.1    cherry 	}
    817   1.1    cherry #else
    818   1.1    cherry 	panic("uvm_page_physload: unknown physseg strategy selected!");
    819   1.1    cherry #endif
    820   1.1    cherry 
    821   1.1    cherry 	ps->start = start;
    822   1.1    cherry 	ps->end = end;
    823   1.1    cherry 	ps->avail_start = avail_start;
    824   1.1    cherry 	ps->avail_end = avail_end;
    825   1.1    cherry 
    826   1.1    cherry 	ps->pgs = pgs;
    827   1.1    cherry 
    828   1.1    cherry 	vm_nphysmem++;
    829   1.1    cherry 
    830   1.1    cherry 	if (psp != NULL)
    831   1.1    cherry 		*psp = lcv;
    832   1.1    cherry 
    833   1.1    cherry 	return true;
    834   1.1    cherry }
    835   1.1    cherry 
    836   1.1    cherry /*
    837   1.1    cherry  * when VM_PHYSSEG_MAX is 1, we can simplify these functions
    838   1.1    cherry  */
    839   1.1    cherry 
    840   1.1    cherry #if VM_PHYSSEG_MAX == 1
    841   1.1    cherry static inline int vm_physseg_find_contig(struct uvm_physseg *, int, paddr_t, psize_t *);
    842   1.1    cherry #elif (VM_PHYSSEG_STRAT == VM_PSTRAT_BSEARCH)
    843   1.1    cherry static inline int vm_physseg_find_bsearch(struct uvm_physseg *, int, paddr_t, psize_t *);
    844   1.1    cherry #else
    845   1.1    cherry static inline int vm_physseg_find_linear(struct uvm_physseg *, int, paddr_t, psize_t *);
    846   1.1    cherry #endif
    847   1.1    cherry 
    848   1.1    cherry /*
    849   1.1    cherry  * vm_physseg_find: find vm_physseg structure that belongs to a PA
    850   1.1    cherry  */
    851   1.1    cherry int
    852   1.1    cherry uvm_physseg_find(paddr_t pframe, psize_t *offp)
    853   1.1    cherry {
    854   1.1    cherry 
    855   1.1    cherry #if VM_PHYSSEG_MAX == 1
    856   1.1    cherry 	return vm_physseg_find_contig(vm_physmem, vm_nphysseg, pframe, offp);
    857   1.1    cherry #elif (VM_PHYSSEG_STRAT == VM_PSTRAT_BSEARCH)
    858   1.1    cherry 	return vm_physseg_find_bsearch(vm_physmem, vm_nphysseg, pframe, offp);
    859   1.1    cherry #else
    860   1.1    cherry 	return vm_physseg_find_linear(vm_physmem, vm_nphysseg, pframe, offp);
    861   1.1    cherry #endif
    862   1.1    cherry }
    863   1.1    cherry 
    864   1.1    cherry #if VM_PHYSSEG_MAX == 1
    865   1.1    cherry static inline int
    866   1.1    cherry vm_physseg_find_contig(struct uvm_physseg *segs, int nsegs, paddr_t pframe, psize_t *offp)
    867   1.1    cherry {
    868   1.1    cherry 
    869   1.1    cherry 	/* 'contig' case */
    870   1.1    cherry 	if (pframe >= segs[0].start && pframe < segs[0].end) {
    871   1.1    cherry 		if (offp)
    872   1.1    cherry 			*offp = pframe - segs[0].start;
    873   1.1    cherry 		return(0);
    874   1.1    cherry 	}
    875   1.1    cherry 	return(-1);
    876   1.1    cherry }
    877   1.1    cherry 
    878   1.1    cherry #elif (VM_PHYSSEG_STRAT == VM_PSTRAT_BSEARCH)
    879   1.1    cherry 
    880   1.1    cherry static inline int
    881   1.1    cherry vm_physseg_find_bsearch(struct uvm_physseg *segs, int nsegs, paddr_t pframe, psize_t *offp)
    882   1.1    cherry {
    883   1.1    cherry 	/* binary search for it */
    884   1.1    cherry 	int	start, len, guess;
    885   1.1    cherry 
    886   1.1    cherry 	/*
    887   1.1    cherry 	 * if try is too large (thus target is less than try) we reduce
    888   1.1    cherry 	 * the length to trunc(len/2) [i.e. everything smaller than "try"]
    889   1.1    cherry 	 *
    890   1.1    cherry 	 * if the try is too small (thus target is greater than try) then
    891   1.1    cherry 	 * we set the new start to be (try + 1).   this means we need to
    892   1.1    cherry 	 * reduce the length to (round(len/2) - 1).
    893   1.1    cherry 	 *
    894   1.1    cherry 	 * note "adjust" below which takes advantage of the fact that
    895   1.1    cherry 	 *  (round(len/2) - 1) == trunc((len - 1) / 2)
    896   1.1    cherry 	 * for any value of len we may have
    897   1.1    cherry 	 */
    898   1.1    cherry 
    899   1.1    cherry 	for (start = 0, len = nsegs ; len != 0 ; len = len / 2) {
    900   1.1    cherry 		guess = start + (len / 2);	/* try in the middle */
    901   1.1    cherry 
    902   1.1    cherry 		/* start past our try? */
    903   1.1    cherry 		if (pframe >= segs[guess].start) {
    904   1.1    cherry 			/* was try correct? */
    905   1.1    cherry 			if (pframe < segs[guess].end) {
    906   1.1    cherry 				if (offp)
    907   1.1    cherry 					*offp = pframe - segs[guess].start;
    908   1.1    cherry 				return guess;            /* got it */
    909   1.1    cherry 			}
    910   1.1    cherry 			start = guess + 1;	/* next time, start here */
    911   1.1    cherry 			len--;			/* "adjust" */
    912   1.1    cherry 		} else {
    913   1.1    cherry 			/*
    914   1.1    cherry 			 * pframe before try, just reduce length of
    915   1.1    cherry 			 * region, done in "for" loop
    916   1.1    cherry 			 */
    917   1.1    cherry 		}
    918   1.1    cherry 	}
    919   1.1    cherry 	return(-1);
    920   1.1    cherry }
    921   1.1    cherry 
    922   1.1    cherry #else
    923   1.1    cherry 
    924   1.1    cherry static inline int
    925   1.1    cherry vm_physseg_find_linear(struct uvm_physseg *segs, int nsegs, paddr_t pframe, psize_t *offp)
    926   1.1    cherry {
    927   1.1    cherry 	/* linear search for it */
    928   1.1    cherry 	int	lcv;
    929   1.1    cherry 
    930   1.1    cherry 	for (lcv = 0; lcv < nsegs; lcv++) {
    931   1.1    cherry 		if (pframe >= segs[lcv].start &&
    932   1.1    cherry 		    pframe < segs[lcv].end) {
    933   1.1    cherry 			if (offp)
    934   1.1    cherry 				*offp = pframe - segs[lcv].start;
    935   1.1    cherry 			return(lcv);		   /* got it */
    936   1.1    cherry 		}
    937   1.1    cherry 	}
    938   1.1    cherry 	return(-1);
    939   1.1    cherry }
    940   1.1    cherry #endif
    941   1.1    cherry #endif /* UVM_HOTPLUG */
    942   1.1    cherry 
    943   1.1    cherry bool
    944   1.2    cherry uvm_physseg_valid_p(uvm_physseg_t upm)
    945   1.1    cherry {
    946   1.1    cherry 	struct uvm_physseg *ps;
    947   1.1    cherry 
    948   1.1    cherry 	if (upm == UVM_PHYSSEG_TYPE_INVALID ||
    949   1.1    cherry 	    upm == UVM_PHYSSEG_TYPE_INVALID_EMPTY ||
    950   1.1    cherry 	    upm == UVM_PHYSSEG_TYPE_INVALID_OVERFLOW)
    951   1.1    cherry 		return false;
    952   1.1    cherry 
    953   1.1    cherry 	/*
    954   1.1    cherry 	 * This is the delicate init dance -
    955   1.1    cherry 	 * needs to go with the dance.
    956   1.1    cherry 	 */
    957   1.1    cherry 	if (uvm.page_init_done != true)
    958   1.1    cherry 		return true;
    959   1.1    cherry 
    960   1.1    cherry 	ps = HANDLE_TO_PHYSSEG_NODE(upm);
    961   1.1    cherry 
    962   1.1    cherry 	/* Extra checks needed only post uvm_page_init() */
    963   1.1    cherry 	if (ps->pgs == NULL)
    964   1.1    cherry 		return false;
    965   1.1    cherry 
    966   1.1    cherry 	/* XXX: etc. */
    967   1.1    cherry 
    968   1.1    cherry 	return true;
    969   1.1    cherry 
    970   1.1    cherry }
    971   1.1    cherry 
    972   1.1    cherry /*
    973   1.1    cherry  * Boot protocol dictates that these must be able to return partially
    974   1.1    cherry  * initialised segments.
    975   1.1    cherry  */
    976   1.1    cherry paddr_t
    977   1.1    cherry uvm_physseg_get_start(uvm_physseg_t upm)
    978   1.1    cherry {
    979   1.2    cherry 	if (uvm_physseg_valid_p(upm) == false)
    980   1.1    cherry 		return (paddr_t) -1;
    981   1.1    cherry 
    982   1.1    cherry 	return HANDLE_TO_PHYSSEG_NODE(upm)->start;
    983   1.1    cherry }
    984   1.1    cherry 
    985   1.1    cherry paddr_t
    986   1.1    cherry uvm_physseg_get_end(uvm_physseg_t upm)
    987   1.1    cherry {
    988   1.2    cherry 	if (uvm_physseg_valid_p(upm) == false)
    989   1.1    cherry 		return (paddr_t) -1;
    990   1.1    cherry 
    991   1.1    cherry 	return HANDLE_TO_PHYSSEG_NODE(upm)->end;
    992   1.1    cherry }
    993   1.1    cherry 
    994   1.1    cherry paddr_t
    995   1.1    cherry uvm_physseg_get_avail_start(uvm_physseg_t upm)
    996   1.1    cherry {
    997   1.2    cherry 	if (uvm_physseg_valid_p(upm) == false)
    998   1.1    cherry 		return (paddr_t) -1;
    999   1.1    cherry 
   1000   1.1    cherry 	return HANDLE_TO_PHYSSEG_NODE(upm)->avail_start;
   1001   1.1    cherry }
   1002   1.1    cherry 
   1003   1.6       rin #if defined(UVM_PHYSSEG_LEGACY)
   1004   1.4  christos void
   1005   1.4  christos uvm_physseg_set_avail_start(uvm_physseg_t upm, paddr_t avail_start)
   1006   1.4  christos {
   1007   1.5    cherry 	struct uvm_physseg *ps = HANDLE_TO_PHYSSEG_NODE(upm);
   1008   1.5    cherry 
   1009   1.5    cherry #if defined(DIAGNOSTIC)
   1010   1.5    cherry 	paddr_t avail_end;
   1011   1.5    cherry 	avail_end = uvm_physseg_get_avail_end(upm);
   1012   1.4  christos 	KASSERT(uvm_physseg_valid_p(upm));
   1013   1.5    cherry 	KASSERT(avail_start < avail_end && avail_start >= ps->start);
   1014   1.5    cherry #endif
   1015   1.5    cherry 
   1016   1.5    cherry 	ps->avail_start = avail_start;
   1017   1.4  christos }
   1018  1.12        ad 
   1019  1.12        ad void
   1020  1.12        ad uvm_physseg_set_avail_end(uvm_physseg_t upm, paddr_t avail_end)
   1021   1.5    cherry {
   1022   1.5    cherry 	struct uvm_physseg *ps = HANDLE_TO_PHYSSEG_NODE(upm);
   1023   1.5    cherry 
   1024   1.5    cherry #if defined(DIAGNOSTIC)
   1025   1.5    cherry 	paddr_t avail_start;
   1026   1.5    cherry 	avail_start = uvm_physseg_get_avail_start(upm);
   1027   1.5    cherry 	KASSERT(uvm_physseg_valid_p(upm));
   1028   1.5    cherry 	KASSERT(avail_end > avail_start && avail_end <= ps->end);
   1029   1.4  christos #endif
   1030   1.4  christos 
   1031   1.5    cherry 	ps->avail_end = avail_end;
   1032   1.5    cherry }
   1033   1.5    cherry 
   1034   1.6       rin #endif /* UVM_PHYSSEG_LEGACY */
   1035   1.5    cherry 
   1036   1.1    cherry paddr_t
   1037   1.1    cherry uvm_physseg_get_avail_end(uvm_physseg_t upm)
   1038   1.1    cherry {
   1039   1.2    cherry 	if (uvm_physseg_valid_p(upm) == false)
   1040   1.1    cherry 		return (paddr_t) -1;
   1041   1.1    cherry 
   1042   1.1    cherry 	return HANDLE_TO_PHYSSEG_NODE(upm)->avail_end;
   1043   1.1    cherry }
   1044   1.1    cherry 
   1045   1.1    cherry struct vm_page *
   1046   1.1    cherry uvm_physseg_get_pg(uvm_physseg_t upm, paddr_t idx)
   1047   1.1    cherry {
   1048   1.2    cherry 	KASSERT(uvm_physseg_valid_p(upm));
   1049   1.1    cherry 	return &HANDLE_TO_PHYSSEG_NODE(upm)->pgs[idx];
   1050   1.1    cherry }
   1051   1.1    cherry 
   1052   1.1    cherry #ifdef __HAVE_PMAP_PHYSSEG
   1053   1.1    cherry struct pmap_physseg *
   1054   1.1    cherry uvm_physseg_get_pmseg(uvm_physseg_t upm)
   1055   1.1    cherry {
   1056   1.2    cherry 	KASSERT(uvm_physseg_valid_p(upm));
   1057   1.1    cherry 	return &(HANDLE_TO_PHYSSEG_NODE(upm)->pmseg);
   1058   1.1    cherry }
   1059   1.1    cherry #endif
   1060   1.1    cherry 
   1061   1.1    cherry int
   1062   1.1    cherry uvm_physseg_get_free_list(uvm_physseg_t upm)
   1063   1.1    cherry {
   1064   1.2    cherry 	KASSERT(uvm_physseg_valid_p(upm));
   1065   1.1    cherry 	return HANDLE_TO_PHYSSEG_NODE(upm)->free_list;
   1066   1.1    cherry }
   1067   1.1    cherry 
   1068   1.1    cherry u_int
   1069   1.1    cherry uvm_physseg_get_start_hint(uvm_physseg_t upm)
   1070   1.1    cherry {
   1071   1.2    cherry 	KASSERT(uvm_physseg_valid_p(upm));
   1072   1.1    cherry 	return HANDLE_TO_PHYSSEG_NODE(upm)->start_hint;
   1073   1.1    cherry }
   1074   1.1    cherry 
   1075   1.1    cherry bool
   1076   1.1    cherry uvm_physseg_set_start_hint(uvm_physseg_t upm, u_int start_hint)
   1077   1.1    cherry {
   1078   1.2    cherry 	if (uvm_physseg_valid_p(upm) == false)
   1079   1.1    cherry 		return false;
   1080   1.1    cherry 
   1081   1.1    cherry 	HANDLE_TO_PHYSSEG_NODE(upm)->start_hint = start_hint;
   1082   1.1    cherry 	return true;
   1083   1.1    cherry }
   1084   1.1    cherry 
   1085   1.1    cherry void
   1086   1.1    cherry uvm_physseg_init_seg(uvm_physseg_t upm, struct vm_page *pgs)
   1087   1.1    cherry {
   1088   1.1    cherry 	psize_t i;
   1089   1.1    cherry 	psize_t n;
   1090   1.1    cherry 	paddr_t paddr;
   1091   1.1    cherry 	struct uvm_physseg *seg;
   1092  1.11        ad 	struct vm_page *pg;
   1093   1.1    cherry 
   1094   1.1    cherry 	KASSERT(upm != UVM_PHYSSEG_TYPE_INVALID && pgs != NULL);
   1095   1.1    cherry 
   1096   1.1    cherry 	seg = HANDLE_TO_PHYSSEG_NODE(upm);
   1097   1.1    cherry 	KASSERT(seg != NULL);
   1098   1.1    cherry 	KASSERT(seg->pgs == NULL);
   1099   1.1    cherry 
   1100   1.1    cherry 	n = seg->end - seg->start;
   1101   1.1    cherry 	seg->pgs = pgs;
   1102   1.1    cherry 
   1103   1.1    cherry 	/* init and free vm_pages (we've already zeroed them) */
   1104   1.1    cherry 	paddr = ctob(seg->start);
   1105   1.1    cherry 	for (i = 0 ; i < n ; i++, paddr += PAGE_SIZE) {
   1106   1.1    cherry 		seg->pgs[i].phys_addr = paddr;
   1107   1.1    cherry #ifdef __HAVE_VM_PAGE_MD
   1108   1.1    cherry 		VM_MDPAGE_INIT(&seg->pgs[i]);
   1109   1.1    cherry #endif
   1110   1.1    cherry 		if (atop(paddr) >= seg->avail_start &&
   1111   1.1    cherry 		    atop(paddr) < seg->avail_end) {
   1112   1.1    cherry 			uvmexp.npages++;
   1113   1.1    cherry 			/* add page to free pool */
   1114  1.11        ad 			pg = &seg->pgs[i];
   1115  1.11        ad 			/* Disable LOCKDEBUG: too many and too early. */
   1116  1.11        ad 			mutex_init(&pg->interlock, MUTEX_NODEBUG, IPL_NONE);
   1117  1.11        ad 			uvm_pagefree(pg);
   1118   1.1    cherry 		}
   1119   1.1    cherry 	}
   1120   1.1    cherry }
   1121   1.1    cherry 
   1122   1.1    cherry void
   1123   1.1    cherry uvm_physseg_seg_chomp_slab(uvm_physseg_t upm, struct vm_page *pgs, size_t n)
   1124   1.1    cherry {
   1125   1.1    cherry 	struct uvm_physseg *seg = HANDLE_TO_PHYSSEG_NODE(upm);
   1126   1.1    cherry 
   1127   1.1    cherry 	/* max number of pre-boot unplug()s allowed */
   1128   1.1    cherry #define UVM_PHYSSEG_BOOT_UNPLUG_MAX VM_PHYSSEG_MAX
   1129   1.1    cherry 
   1130   1.1    cherry 	static char btslab_ex_storage[EXTENT_FIXED_STORAGE_SIZE(UVM_PHYSSEG_BOOT_UNPLUG_MAX)];
   1131   1.1    cherry 
   1132   1.1    cherry 	if (__predict_false(uvm.page_init_done == false)) {
   1133   1.1    cherry 		seg->ext = extent_create("Boot time slab", (u_long) pgs, (u_long) (pgs + n),
   1134   1.1    cherry 		    (void *)btslab_ex_storage, sizeof(btslab_ex_storage), 0);
   1135   1.1    cherry 	} else {
   1136   1.1    cherry 		seg->ext = extent_create("Hotplug slab", (u_long) pgs, (u_long) (pgs + n), NULL, 0, 0);
   1137   1.1    cherry 	}
   1138   1.1    cherry 
   1139   1.1    cherry 	KASSERT(seg->ext != NULL);
   1140   1.1    cherry 
   1141   1.1    cherry }
   1142   1.1    cherry 
   1143   1.1    cherry struct vm_page *
   1144   1.1    cherry uvm_physseg_seg_alloc_from_slab(uvm_physseg_t upm, size_t pages)
   1145   1.1    cherry {
   1146   1.1    cherry 	int err;
   1147   1.1    cherry 	struct uvm_physseg *seg;
   1148   1.1    cherry 	struct vm_page *pgs = NULL;
   1149   1.1    cherry 
   1150   1.9  christos 	KASSERT(pages > 0);
   1151   1.9  christos 
   1152   1.1    cherry 	seg = HANDLE_TO_PHYSSEG_NODE(upm);
   1153   1.1    cherry 
   1154   1.1    cherry 	if (__predict_false(seg->ext == NULL)) {
   1155   1.1    cherry 		/*
   1156   1.1    cherry 		 * This is a situation unique to boot time.
   1157   1.1    cherry 		 * It shouldn't happen at any point other than from
   1158   1.1    cherry 		 * the first uvm_page.c:uvm_page_init() call
   1159   1.1    cherry 		 * Since we're in a loop, we can get away with the
   1160   1.1    cherry 		 * below.
   1161   1.1    cherry 		 */
   1162   1.1    cherry 		KASSERT(uvm.page_init_done != true);
   1163   1.1    cherry 
   1164   1.9  christos 		uvm_physseg_t upmp = uvm_physseg_get_prev(upm);
   1165   1.9  christos 		KASSERT(upmp != UVM_PHYSSEG_TYPE_INVALID);
   1166   1.9  christos 
   1167   1.9  christos 		seg->ext = HANDLE_TO_PHYSSEG_NODE(upmp)->ext;
   1168   1.1    cherry 
   1169   1.1    cherry 		KASSERT(seg->ext != NULL);
   1170   1.1    cherry 	}
   1171   1.1    cherry 
   1172   1.1    cherry 	/* We allocate enough for this segment */
   1173   1.1    cherry 	err = extent_alloc(seg->ext, sizeof(*pgs) * pages, 1, 0, EX_BOUNDZERO, (u_long *)&pgs);
   1174   1.1    cherry 
   1175   1.1    cherry 	if (err != 0) {
   1176   1.1    cherry #ifdef DEBUG
   1177   1.1    cherry 		printf("%s: extent_alloc failed with error: %d \n",
   1178   1.1    cherry 		    __func__, err);
   1179   1.1    cherry #endif
   1180   1.1    cherry 	}
   1181   1.1    cherry 
   1182   1.1    cherry 	return pgs;
   1183   1.1    cherry }
   1184   1.1    cherry 
   1185   1.1    cherry /*
   1186   1.1    cherry  * uvm_page_physload: load physical memory into VM system
   1187   1.1    cherry  *
   1188   1.1    cherry  * => all args are PFs
   1189   1.1    cherry  * => all pages in start/end get vm_page structures
   1190   1.1    cherry  * => areas marked by avail_start/avail_end get added to the free page pool
   1191   1.1    cherry  * => we are limited to VM_PHYSSEG_MAX physical memory segments
   1192   1.1    cherry  */
   1193   1.1    cherry 
   1194   1.1    cherry uvm_physseg_t
   1195   1.1    cherry uvm_page_physload(paddr_t start, paddr_t end, paddr_t avail_start,
   1196   1.1    cherry     paddr_t avail_end, int free_list)
   1197   1.1    cherry {
   1198   1.1    cherry 	struct uvm_physseg *ps;
   1199   1.1    cherry 	uvm_physseg_t upm;
   1200   1.1    cherry 
   1201   1.1    cherry 	if (__predict_true(uvm.page_init_done == true))
   1202   1.1    cherry 		panic("%s: unload attempted after uvm_page_init()\n", __func__);
   1203   1.1    cherry 	if (uvmexp.pagesize == 0)
   1204   1.1    cherry 		panic("uvm_page_physload: page size not set!");
   1205   1.1    cherry 	if (free_list >= VM_NFREELIST || free_list < VM_FREELIST_DEFAULT)
   1206   1.1    cherry 		panic("uvm_page_physload: bad free list %d", free_list);
   1207   1.1    cherry 	if (start >= end)
   1208   1.1    cherry 		panic("uvm_page_physload: start >= end");
   1209   1.1    cherry 
   1210   1.1    cherry 	if (uvm_physseg_plug(start, end - start, &upm) == false) {
   1211   1.1    cherry 		panic("uvm_physseg_plug() failed at boot.");
   1212   1.1    cherry 		/* NOTREACHED */
   1213   1.1    cherry 		return UVM_PHYSSEG_TYPE_INVALID; /* XXX: correct type */
   1214   1.1    cherry 	}
   1215   1.1    cherry 
   1216   1.1    cherry 	ps = HANDLE_TO_PHYSSEG_NODE(upm);
   1217   1.1    cherry 
   1218   1.1    cherry 	/* Legacy */
   1219   1.1    cherry 	ps->avail_start = avail_start;
   1220   1.1    cherry 	ps->avail_end = avail_end;
   1221   1.1    cherry 
   1222   1.1    cherry 	ps->free_list = free_list; /* XXX: */
   1223   1.1    cherry 
   1224   1.1    cherry 
   1225   1.1    cherry 	return upm;
   1226   1.1    cherry }
   1227   1.1    cherry 
   1228   1.1    cherry bool
   1229   1.1    cherry uvm_physseg_unplug(paddr_t pfn, size_t pages)
   1230   1.1    cherry {
   1231   1.1    cherry 	uvm_physseg_t upm;
   1232   1.8  riastrad 	paddr_t off = 0, start __diagused, end;
   1233   1.1    cherry 	struct uvm_physseg *seg;
   1234   1.1    cherry 
   1235   1.1    cherry 	upm = uvm_physseg_find(pfn, &off);
   1236   1.1    cherry 
   1237   1.2    cherry 	if (!uvm_physseg_valid_p(upm)) {
   1238   1.1    cherry 		printf("%s: Tried to unplug from unknown offset\n", __func__);
   1239   1.1    cherry 		return false;
   1240   1.1    cherry 	}
   1241   1.1    cherry 
   1242   1.1    cherry 	seg = HANDLE_TO_PHYSSEG_NODE(upm);
   1243   1.1    cherry 
   1244   1.1    cherry 	start = uvm_physseg_get_start(upm);
   1245   1.1    cherry 	end = uvm_physseg_get_end(upm);
   1246   1.1    cherry 
   1247   1.1    cherry 	if (end < (pfn + pages)) {
   1248   1.1    cherry 		printf("%s: Tried to unplug oversized span \n", __func__);
   1249   1.1    cherry 		return false;
   1250   1.1    cherry 	}
   1251   1.1    cherry 
   1252   1.1    cherry 	KASSERT(pfn == start + off); /* sanity */
   1253   1.1    cherry 
   1254   1.1    cherry 	if (__predict_true(uvm.page_init_done == true)) {
   1255   1.1    cherry 		/* XXX: KASSERT() that seg->pgs[] are not on any uvm lists */
   1256   1.1    cherry 		if (extent_free(seg->ext, (u_long)(seg->pgs + off), sizeof(struct vm_page) * pages, EX_MALLOCOK | EX_NOWAIT) != 0)
   1257   1.1    cherry 			return false;
   1258   1.1    cherry 	}
   1259   1.1    cherry 
   1260   1.1    cherry 	if (off == 0 && (pfn + pages) == end) {
   1261   1.1    cherry #if defined(UVM_HOTPLUG) /* rbtree implementation */
   1262   1.1    cherry 		int segcount = 0;
   1263   1.1    cherry 		struct uvm_physseg *current_ps;
   1264   1.1    cherry 		/* Complete segment */
   1265   1.1    cherry 		if (uvm_physseg_graph.nentries == 1)
   1266   1.1    cherry 			panic("%s: out of memory!", __func__);
   1267   1.1    cherry 
   1268   1.1    cherry 		if (__predict_true(uvm.page_init_done == true)) {
   1269   1.1    cherry 			RB_TREE_FOREACH(current_ps, &(uvm_physseg_graph.rb_tree)) {
   1270   1.1    cherry 				if (seg->ext == current_ps->ext)
   1271   1.1    cherry 					segcount++;
   1272   1.1    cherry 			}
   1273   1.1    cherry 			KASSERT(segcount > 0);
   1274   1.1    cherry 
   1275   1.1    cherry 			if (segcount == 1) {
   1276   1.1    cherry 				extent_destroy(seg->ext);
   1277   1.1    cherry 			}
   1278   1.1    cherry 
   1279   1.1    cherry 			/*
   1280   1.1    cherry 			 * We assume that the unplug will succeed from
   1281   1.1    cherry 			 *  this point onwards
   1282   1.1    cherry 			 */
   1283   1.1    cherry 			uvmexp.npages -= (int) pages;
   1284   1.1    cherry 		}
   1285   1.1    cherry 
   1286   1.1    cherry 		rb_tree_remove_node(&(uvm_physseg_graph.rb_tree), upm);
   1287   1.1    cherry 		memset(seg, 0, sizeof(struct uvm_physseg));
   1288   1.1    cherry 		uvm_physseg_free(seg, sizeof(struct uvm_physseg));
   1289   1.1    cherry 		uvm_physseg_graph.nentries--;
   1290   1.1    cherry #else /* UVM_HOTPLUG */
   1291   1.1    cherry 		int x;
   1292   1.1    cherry 		if (vm_nphysmem == 1)
   1293   1.1    cherry 			panic("uvm_page_physget: out of memory!");
   1294   1.1    cherry 		vm_nphysmem--;
   1295   1.1    cherry 		for (x = upm ; x < vm_nphysmem ; x++)
   1296   1.1    cherry 			/* structure copy */
   1297   1.1    cherry 			VM_PHYSMEM_PTR_SWAP(x, x + 1);
   1298   1.1    cherry #endif /* UVM_HOTPLUG */
   1299   1.1    cherry 		/* XXX: KASSERT() that seg->pgs[] are not on any uvm lists */
   1300   1.1    cherry 		return true;
   1301   1.1    cherry 	}
   1302   1.1    cherry 
   1303   1.1    cherry 	if (off > 0 &&
   1304   1.1    cherry 	    (pfn + pages) < end) {
   1305   1.1    cherry #if defined(UVM_HOTPLUG) /* rbtree implementation */
   1306   1.1    cherry 		/* middle chunk - need a new segment */
   1307   1.1    cherry 		struct uvm_physseg *ps, *current_ps;
   1308   1.1    cherry 		ps = uvm_physseg_alloc(sizeof (struct uvm_physseg));
   1309   1.1    cherry 		if (ps == NULL) {
   1310   1.1    cherry 			printf("%s: Unable to allocated new fragment vm_physseg \n",
   1311   1.1    cherry 			    __func__);
   1312   1.1    cherry 			return false;
   1313   1.1    cherry 		}
   1314   1.1    cherry 
   1315   1.1    cherry 		/* Remove middle chunk */
   1316   1.1    cherry 		if (__predict_true(uvm.page_init_done == true)) {
   1317   1.1    cherry 			KASSERT(seg->ext != NULL);
   1318   1.1    cherry 			ps->ext = seg->ext;
   1319   1.1    cherry 
   1320   1.1    cherry 			/* XXX: KASSERT() that seg->pgs[] are not on any uvm lists */
   1321   1.1    cherry 			/*
   1322   1.1    cherry 			 * We assume that the unplug will succeed from
   1323   1.1    cherry 			 *  this point onwards
   1324   1.1    cherry 			 */
   1325   1.1    cherry 			uvmexp.npages -= (int) pages;
   1326   1.1    cherry 		}
   1327   1.1    cherry 
   1328   1.1    cherry 		ps->start = pfn + pages;
   1329   1.1    cherry 		ps->avail_start = ps->start; /* XXX: Legacy */
   1330   1.1    cherry 
   1331   1.1    cherry 		ps->end = seg->end;
   1332   1.1    cherry 		ps->avail_end = ps->end; /* XXX: Legacy */
   1333   1.1    cherry 
   1334   1.1    cherry 		seg->end = pfn;
   1335   1.1    cherry 		seg->avail_end = seg->end; /* XXX: Legacy */
   1336   1.1    cherry 
   1337   1.1    cherry 
   1338   1.1    cherry 		/*
   1339   1.1    cherry 		 * The new pgs array points to the beginning of the
   1340   1.1    cherry 		 * tail fragment.
   1341   1.1    cherry 		 */
   1342   1.1    cherry 		if (__predict_true(uvm.page_init_done == true))
   1343   1.1    cherry 			ps->pgs = seg->pgs + off + pages;
   1344   1.1    cherry 
   1345   1.1    cherry 		current_ps = rb_tree_insert_node(&(uvm_physseg_graph.rb_tree), ps);
   1346   1.1    cherry 		if (current_ps != ps) {
   1347   1.1    cherry 			panic("uvm_page_physload: Duplicate address range detected!");
   1348   1.1    cherry 		}
   1349   1.1    cherry 		uvm_physseg_graph.nentries++;
   1350   1.1    cherry #else /* UVM_HOTPLUG */
   1351   1.1    cherry 		panic("%s: can't unplug() from the middle of a segment without"
   1352   1.7       uwe 		    " UVM_HOTPLUG\n",  __func__);
   1353   1.1    cherry 		/* NOTREACHED */
   1354   1.1    cherry #endif /* UVM_HOTPLUG */
   1355   1.1    cherry 		return true;
   1356   1.1    cherry 	}
   1357   1.1    cherry 
   1358   1.1    cherry 	if (off == 0 && (pfn + pages) < end) {
   1359   1.1    cherry 		/* Remove front chunk */
   1360   1.1    cherry 		if (__predict_true(uvm.page_init_done == true)) {
   1361   1.1    cherry 			/* XXX: KASSERT() that seg->pgs[] are not on any uvm lists */
   1362   1.1    cherry 			/*
   1363   1.1    cherry 			 * We assume that the unplug will succeed from
   1364   1.1    cherry 			 *  this point onwards
   1365   1.1    cherry 			 */
   1366   1.1    cherry 			uvmexp.npages -= (int) pages;
   1367   1.1    cherry 		}
   1368   1.1    cherry 
   1369   1.1    cherry 		/* Truncate */
   1370   1.1    cherry 		seg->start = pfn + pages;
   1371   1.1    cherry 		seg->avail_start = seg->start; /* XXX: Legacy */
   1372   1.1    cherry 
   1373   1.1    cherry 		/*
   1374   1.1    cherry 		 * Move the pgs array start to the beginning of the
   1375   1.1    cherry 		 * tail end.
   1376   1.1    cherry 		 */
   1377   1.1    cherry 		if (__predict_true(uvm.page_init_done == true))
   1378   1.1    cherry 			seg->pgs += pages;
   1379   1.1    cherry 
   1380   1.1    cherry 		return true;
   1381   1.1    cherry 	}
   1382   1.1    cherry 
   1383   1.1    cherry 	if (off > 0 && (pfn + pages) == end) {
   1384   1.1    cherry 		/* back chunk */
   1385   1.1    cherry 
   1386   1.1    cherry 
   1387   1.1    cherry 		/* Truncate! */
   1388   1.1    cherry 		seg->end = pfn;
   1389   1.1    cherry 		seg->avail_end = seg->end; /* XXX: Legacy */
   1390   1.1    cherry 
   1391   1.1    cherry 		uvmexp.npages -= (int) pages;
   1392   1.1    cherry 
   1393   1.1    cherry 		return true;
   1394   1.1    cherry 	}
   1395   1.1    cherry 
   1396   1.1    cherry 	printf("%s: Tried to unplug unknown range \n", __func__);
   1397   1.1    cherry 
   1398   1.1    cherry 	return false;
   1399   1.1    cherry }
   1400