Home | History | Annotate | Line # | Download | only in uvm
      1  1.20       tnn /* $NetBSD: uvm_physseg.c,v 1.20 2024/01/13 09:44:42 tnn Exp $ */
      2   1.1    cherry 
      3   1.1    cherry /*
      4   1.1    cherry  * Copyright (c) 1997 Charles D. Cranor and Washington University.
      5   1.1    cherry  * Copyright (c) 1991, 1993, The Regents of the University of California.
      6   1.1    cherry  *
      7   1.1    cherry  * All rights reserved.
      8   1.1    cherry  *
      9   1.1    cherry  * This code is derived from software contributed to Berkeley by
     10   1.1    cherry  * The Mach Operating System project at Carnegie-Mellon University.
     11   1.1    cherry  *
     12   1.1    cherry  * Redistribution and use in source and binary forms, with or without
     13   1.1    cherry  * modification, are permitted provided that the following conditions
     14   1.1    cherry  * are met:
     15   1.1    cherry  * 1. Redistributions of source code must retain the above copyright
     16   1.1    cherry  *    notice, this list of conditions and the following disclaimer.
     17   1.1    cherry  * 2. Redistributions in binary form must reproduce the above copyright
     18   1.1    cherry  *    notice, this list of conditions and the following disclaimer in the
     19   1.1    cherry  *    documentation and/or other materials provided with the distribution.
     20   1.1    cherry  * 3. Neither the name of the University nor the names of its contributors
     21   1.1    cherry  *    may be used to endorse or promote products derived from this software
     22   1.1    cherry  *    without specific prior written permission.
     23   1.1    cherry  *
     24   1.1    cherry  * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND
     25   1.1    cherry  * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
     26   1.1    cherry  * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
     27   1.1    cherry  * ARE DISCLAIMED.  IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE
     28   1.1    cherry  * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
     29   1.1    cherry  * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
     30   1.1    cherry  * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
     31   1.1    cherry  * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
     32   1.1    cherry  * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
     33   1.1    cherry  * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
     34   1.1    cherry  * SUCH DAMAGE.
     35   1.1    cherry  *
     36   1.1    cherry  *	@(#)vm_page.h   7.3 (Berkeley) 4/21/91
     37   1.1    cherry  * from: Id: uvm_page.h,v 1.1.2.6 1998/02/04 02:31:42 chuck Exp
     38   1.1    cherry  *
     39   1.1    cherry  *
     40   1.1    cherry  * Copyright (c) 1987, 1990 Carnegie-Mellon University.
     41   1.1    cherry  * All rights reserved.
     42   1.1    cherry  *
     43   1.1    cherry  * Permission to use, copy, modify and distribute this software and
     44   1.1    cherry  * its documentation is hereby granted, provided that both the copyright
     45   1.1    cherry  * notice and this permission notice appear in all copies of the
     46   1.1    cherry  * software, derivative works or modified versions, and any portions
     47   1.1    cherry  * thereof, and that both notices appear in supporting documentation.
     48   1.1    cherry  *
     49   1.1    cherry  * CARNEGIE MELLON ALLOWS FREE USE OF THIS SOFTWARE IN ITS "AS IS"
     50   1.1    cherry  * CONDITION.  CARNEGIE MELLON DISCLAIMS ANY LIABILITY OF ANY KIND
     51   1.1    cherry  * FOR ANY DAMAGES WHATSOEVER RESULTING FROM THE USE OF THIS SOFTWARE.
     52   1.1    cherry  *
     53   1.1    cherry  * Carnegie Mellon requests users of this software to return to
     54   1.1    cherry  *
     55   1.1    cherry  *  Software Distribution Coordinator  or  Software.Distribution (at) CS.CMU.EDU
     56   1.1    cherry  *  School of Computer Science
     57   1.1    cherry  *  Carnegie Mellon University
     58   1.1    cherry  *  Pittsburgh PA 15213-3890
     59   1.1    cherry  *
     60   1.1    cherry  * any improvements or extensions that they make and grant Carnegie the
     61   1.1    cherry  * rights to redistribute these changes.
     62   1.1    cherry  */
     63   1.1    cherry 
     64   1.1    cherry /*
     65   1.1    cherry  * Consolidated API from uvm_page.c and others.
     66   1.1    cherry  * Consolidated and designed by Cherry G. Mathew <cherry (at) zyx.in>
     67   1.1    cherry  * rbtree(3) backing implementation by:
     68   1.1    cherry  * Santhosh N. Raju <santhosh.raju (at) gmail.com>
     69   1.1    cherry  */
     70   1.1    cherry 
     71   1.1    cherry #ifdef _KERNEL_OPT
     72   1.1    cherry #include "opt_uvm.h"
     73   1.1    cherry #endif
     74   1.1    cherry 
     75   1.1    cherry #include <sys/param.h>
     76   1.1    cherry #include <sys/types.h>
     77   1.1    cherry #include <sys/extent.h>
     78   1.1    cherry #include <sys/kmem.h>
     79   1.1    cherry 
     80   1.1    cherry #include <uvm/uvm.h>
     81   1.1    cherry #include <uvm/uvm_page.h>
     82   1.1    cherry #include <uvm/uvm_param.h>
     83   1.1    cherry #include <uvm/uvm_pdpolicy.h>
     84   1.1    cherry #include <uvm/uvm_physseg.h>
     85   1.1    cherry 
     86   1.1    cherry /*
     87   1.1    cherry  * uvm_physseg: describes one segment of physical memory
     88   1.1    cherry  */
     89   1.1    cherry struct uvm_physseg {
     90  1.14        ad 	/* used during RB tree lookup for PHYS_TO_VM_PAGE(). */
     91  1.19        ad #if defined(UVM_HOTPLUG)
     92   1.1    cherry 	struct  rb_node rb_node;	/* tree information */
     93  1.19        ad #endif
     94   1.1    cherry 	paddr_t	start;			/* PF# of first page in segment */
     95   1.1    cherry 	paddr_t	end;			/* (PF# of last page in segment) + 1 */
     96  1.14        ad 	struct	vm_page *pgs;		/* vm_page structures (from start) */
     97  1.14        ad 
     98  1.14        ad 	/* less performance sensitive fields. */
     99   1.1    cherry 	paddr_t	avail_start;		/* PF# of first free page in segment */
    100   1.1    cherry 	paddr_t	avail_end;		/* (PF# of last free page in segment) +1  */
    101   1.1    cherry 	struct  extent *ext;		/* extent(9) structure to manage pgs[] */
    102   1.1    cherry 	int	free_list;		/* which free list they belong on */
    103  1.20       tnn 	u_long	start_hint;		/* start looking for free pages here */
    104   1.1    cherry #ifdef __HAVE_PMAP_PHYSSEG
    105   1.1    cherry 	struct	pmap_physseg pmseg;	/* pmap specific (MD) data */
    106   1.1    cherry #endif
    107   1.1    cherry };
    108   1.1    cherry 
    109   1.1    cherry /*
    110   1.1    cherry  * These functions are reserved for uvm(9) internal use and are not
    111   1.1    cherry  * exported in the header file uvm_physseg.h
    112   1.1    cherry  *
    113   1.1    cherry  * Thus they are redefined here.
    114   1.1    cherry  */
    115   1.1    cherry void uvm_physseg_init_seg(uvm_physseg_t, struct vm_page *);
    116   1.1    cherry void uvm_physseg_seg_chomp_slab(uvm_physseg_t, struct vm_page *, size_t);
    117   1.1    cherry 
    118   1.1    cherry /* returns a pgs array */
    119   1.1    cherry struct vm_page *uvm_physseg_seg_alloc_from_slab(uvm_physseg_t, size_t);
    120   1.1    cherry 
    121   1.1    cherry #if defined(UVM_HOTPLUG) /* rbtree impementation */
    122   1.1    cherry 
    123   1.1    cherry #define		HANDLE_TO_PHYSSEG_NODE(h)	((struct uvm_physseg *)(h))
    124   1.1    cherry #define		PHYSSEG_NODE_TO_HANDLE(u)	((uvm_physseg_t)(u))
    125   1.1    cherry 
    126   1.1    cherry struct uvm_physseg_graph {
    127   1.1    cherry 	struct rb_tree rb_tree;		/* Tree for entries */
    128   1.1    cherry 	int            nentries;	/* Number of entries */
    129  1.14        ad } __aligned(COHERENCY_UNIT);
    130   1.1    cherry 
    131  1.14        ad static struct uvm_physseg_graph uvm_physseg_graph __read_mostly;
    132   1.1    cherry 
    133   1.1    cherry /*
    134   1.1    cherry  * Note on kmem(9) allocator usage:
    135   1.1    cherry  * We take the conservative approach that plug/unplug are allowed to
    136   1.1    cherry  * fail in high memory stress situations.
    137   1.1    cherry  *
    138   1.1    cherry  * We want to avoid re-entrant situations in which one plug/unplug
    139   1.1    cherry  * operation is waiting on a previous one to complete, since this
    140   1.1    cherry  * makes the design more complicated than necessary.
    141   1.1    cherry  *
    142   1.1    cherry  * We may review this and change its behaviour, once the use cases
    143   1.1    cherry  * become more obvious.
    144   1.1    cherry  */
    145   1.1    cherry 
    146   1.1    cherry /*
    147   1.1    cherry  * Special alloc()/free() functions for boot time support:
    148   1.1    cherry  * We assume that alloc() at boot time is only for new 'vm_physseg's
    149   1.1    cherry  * This allows us to use a static array for memory allocation at boot
    150   1.1    cherry  * time. Thus we avoid using kmem(9) which is not ready at this point
    151   1.1    cherry  * in boot.
    152   1.1    cherry  *
    153   1.1    cherry  * After kmem(9) is ready, we use it. We currently discard any free()s
    154   1.1    cherry  * to this static array, since the size is small enough to be a
    155   1.1    cherry  * trivial waste on all architectures we run on.
    156   1.1    cherry  */
    157   1.1    cherry 
    158   1.1    cherry static size_t nseg = 0;
    159   1.1    cherry static struct uvm_physseg uvm_physseg[VM_PHYSSEG_MAX];
    160   1.1    cherry 
    161   1.1    cherry static void *
    162   1.1    cherry uvm_physseg_alloc(size_t sz)
    163   1.1    cherry {
    164   1.1    cherry 	/*
    165   1.1    cherry 	 * During boot time, we only support allocating vm_physseg
    166   1.1    cherry 	 * entries from the static array.
    167   1.1    cherry 	 * We need to assert for this.
    168   1.1    cherry 	 */
    169   1.1    cherry 
    170   1.1    cherry 	if (__predict_false(uvm.page_init_done == false)) {
    171   1.1    cherry 		if (sz % sizeof(struct uvm_physseg))
    172   1.1    cherry 			panic("%s: tried to alloc size other than multiple"
    173   1.7       uwe 			    " of struct uvm_physseg at boot\n", __func__);
    174   1.1    cherry 
    175   1.1    cherry 		size_t n = sz / sizeof(struct uvm_physseg);
    176   1.1    cherry 		nseg += n;
    177   1.1    cherry 
    178  1.18  riastrad 		KASSERT(nseg > 0);
    179  1.18  riastrad 		KASSERT(nseg <= VM_PHYSSEG_MAX);
    180   1.1    cherry 
    181   1.1    cherry 		return &uvm_physseg[nseg - n];
    182   1.1    cherry 	}
    183   1.1    cherry 
    184   1.1    cherry 	return kmem_zalloc(sz, KM_NOSLEEP);
    185   1.1    cherry }
    186   1.1    cherry 
    187   1.1    cherry static void
    188   1.1    cherry uvm_physseg_free(void *p, size_t sz)
    189   1.1    cherry {
    190   1.1    cherry 	/*
    191   1.1    cherry 	 * This is a bit tricky. We do allow simulation of free()
    192   1.1    cherry 	 * during boot (for eg: when MD code is "steal"ing memory,
    193   1.1    cherry 	 * and the segment has been exhausted (and thus needs to be
    194   1.1    cherry 	 * free() - ed.
    195   1.1    cherry 	 * free() also complicates things because we leak the
    196   1.1    cherry 	 * free(). Therefore calling code can't assume that free()-ed
    197   1.1    cherry 	 * memory is available for alloc() again, at boot time.
    198   1.1    cherry 	 *
    199   1.1    cherry 	 * Thus we can't explicitly disallow free()s during
    200   1.1    cherry 	 * boot time. However, the same restriction for alloc()
    201   1.1    cherry 	 * applies to free(). We only allow uvm_physseg related free()s
    202   1.1    cherry 	 * via this function during boot time.
    203   1.1    cherry 	 */
    204   1.1    cherry 
    205   1.1    cherry 	if (__predict_false(uvm.page_init_done == false)) {
    206   1.1    cherry 		if (sz % sizeof(struct uvm_physseg))
    207   1.1    cherry 			panic("%s: tried to free size other than struct uvm_physseg"
    208   1.7       uwe 			    " at boot\n", __func__);
    209   1.1    cherry 
    210   1.1    cherry 	}
    211   1.1    cherry 
    212   1.1    cherry 	/*
    213   1.1    cherry 	 * Could have been in a single if(){} block - split for
    214   1.1    cherry 	 * clarity
    215   1.1    cherry 	 */
    216   1.1    cherry 
    217   1.1    cherry 	if ((struct uvm_physseg *)p >= uvm_physseg &&
    218   1.1    cherry 	    (struct uvm_physseg *)p < (uvm_physseg + VM_PHYSSEG_MAX)) {
    219   1.1    cherry 		if (sz % sizeof(struct uvm_physseg))
    220   1.1    cherry 			panic("%s: tried to free() other than struct uvm_physseg"
    221   1.7       uwe 			    " from static array\n", __func__);
    222   1.1    cherry 
    223   1.1    cherry 		if ((sz / sizeof(struct uvm_physseg)) >= VM_PHYSSEG_MAX)
    224   1.1    cherry 			panic("%s: tried to free() the entire static array!", __func__);
    225   1.1    cherry 		return; /* Nothing to free */
    226   1.1    cherry 	}
    227   1.1    cherry 
    228   1.1    cherry 	kmem_free(p, sz);
    229   1.1    cherry }
    230   1.1    cherry 
    231   1.1    cherry /* XXX: Multi page size */
    232   1.1    cherry bool
    233   1.1    cherry uvm_physseg_plug(paddr_t pfn, size_t pages, uvm_physseg_t *psp)
    234   1.1    cherry {
    235   1.1    cherry 	int preload;
    236   1.1    cherry 	size_t slabpages;
    237   1.1    cherry 	struct uvm_physseg *ps, *current_ps = NULL;
    238   1.1    cherry 	struct vm_page *slab = NULL, *pgs = NULL;
    239   1.1    cherry 
    240   1.1    cherry #ifdef DEBUG
    241   1.1    cherry 	paddr_t off;
    242   1.1    cherry 	uvm_physseg_t upm;
    243   1.1    cherry 	upm = uvm_physseg_find(pfn, &off);
    244   1.1    cherry 
    245   1.1    cherry 	ps = HANDLE_TO_PHYSSEG_NODE(upm);
    246   1.1    cherry 
    247   1.1    cherry 	if (ps != NULL) /* XXX; do we allow "update" plugs ? */
    248   1.1    cherry 		return false;
    249   1.1    cherry #endif
    250   1.1    cherry 
    251   1.1    cherry 	/*
    252   1.1    cherry 	 * do we have room?
    253   1.1    cherry 	 */
    254   1.1    cherry 
    255   1.1    cherry 	ps = uvm_physseg_alloc(sizeof (struct uvm_physseg));
    256   1.1    cherry 	if (ps == NULL) {
    257   1.1    cherry 		printf("uvm_page_physload: unable to load physical memory "
    258   1.1    cherry 		    "segment\n");
    259   1.1    cherry 		printf("\t%d segments allocated, ignoring 0x%"PRIxPADDR" -> 0x%"PRIxPADDR"\n",
    260   1.1    cherry 		    VM_PHYSSEG_MAX, pfn, pfn + pages + 1);
    261   1.1    cherry 		printf("\tincrease VM_PHYSSEG_MAX\n");
    262   1.1    cherry 		return false;
    263   1.1    cherry 	}
    264   1.1    cherry 
    265   1.1    cherry 	/* span init */
    266   1.1    cherry 	ps->start = pfn;
    267   1.1    cherry 	ps->end = pfn + pages;
    268   1.1    cherry 
    269   1.1    cherry 	/*
    270   1.1    cherry 	 * XXX: Ugly hack because uvmexp.npages accounts for only
    271   1.1    cherry 	 * those pages in the segment included below as well - this
    272   1.1    cherry 	 * should be legacy and removed.
    273   1.1    cherry 	 */
    274   1.1    cherry 
    275   1.1    cherry 	ps->avail_start = ps->start;
    276   1.1    cherry 	ps->avail_end = ps->end;
    277   1.1    cherry 
    278   1.1    cherry 	/*
    279   1.1    cherry 	 * check to see if this is a "preload" (i.e. uvm_page_init hasn't been
    280   1.1    cherry 	 * called yet, so kmem is not available).
    281   1.1    cherry 	 */
    282   1.1    cherry 
    283   1.1    cherry 	preload = 1; /* We are going to assume it is a preload */
    284   1.1    cherry 
    285   1.1    cherry 	RB_TREE_FOREACH(current_ps, &(uvm_physseg_graph.rb_tree)) {
    286   1.1    cherry 		/* If there are non NULL pages then we are not in a preload */
    287   1.1    cherry 		if (current_ps->pgs != NULL) {
    288   1.1    cherry 			preload = 0;
    289   1.1    cherry 			/* Try to scavenge from earlier unplug()s. */
    290   1.1    cherry 			pgs = uvm_physseg_seg_alloc_from_slab(current_ps, pages);
    291   1.1    cherry 
    292   1.1    cherry 			if (pgs != NULL) {
    293   1.1    cherry 				break;
    294   1.1    cherry 			}
    295   1.1    cherry 		}
    296   1.1    cherry 	}
    297   1.1    cherry 
    298   1.1    cherry 
    299   1.1    cherry 	/*
    300   1.1    cherry 	 * if VM is already running, attempt to kmem_alloc vm_page structures
    301   1.1    cherry 	 */
    302   1.1    cherry 
    303   1.1    cherry 	if (!preload) {
    304   1.1    cherry 		if (pgs == NULL) { /* Brand new */
    305   1.1    cherry 			/* Iteratively try alloc down from uvmexp.npages */
    306   1.1    cherry 			for (slabpages = (size_t) uvmexp.npages; slabpages >= pages; slabpages--) {
    307   1.1    cherry 				slab = kmem_zalloc(sizeof *pgs * (long unsigned int)slabpages, KM_NOSLEEP);
    308   1.1    cherry 				if (slab != NULL)
    309   1.1    cherry 					break;
    310   1.1    cherry 			}
    311   1.1    cherry 
    312   1.1    cherry 			if (slab == NULL) {
    313   1.1    cherry 				uvm_physseg_free(ps, sizeof(struct uvm_physseg));
    314   1.1    cherry 				return false;
    315   1.1    cherry 			}
    316   1.1    cherry 
    317   1.1    cherry 			uvm_physseg_seg_chomp_slab(ps, slab, (size_t) slabpages);
    318   1.1    cherry 			/* We allocate enough for this plug */
    319   1.1    cherry 			pgs = uvm_physseg_seg_alloc_from_slab(ps, pages);
    320   1.1    cherry 
    321   1.1    cherry 			if (pgs == NULL) {
    322   1.1    cherry 				printf("unable to uvm_physseg_seg_alloc_from_slab() from backend\n");
    323   1.1    cherry 				return false;
    324   1.1    cherry 			}
    325   1.1    cherry 		} else {
    326   1.1    cherry 			/* Reuse scavenged extent */
    327   1.1    cherry 			ps->ext = current_ps->ext;
    328   1.1    cherry 		}
    329   1.1    cherry 
    330   1.1    cherry 		physmem += pages;
    331   1.1    cherry 		uvmpdpol_reinit();
    332   1.1    cherry 	} else { /* Boot time - see uvm_page.c:uvm_page_init() */
    333   1.1    cherry 		pgs = NULL;
    334   1.1    cherry 		ps->pgs = pgs;
    335   1.1    cherry 	}
    336   1.1    cherry 
    337   1.1    cherry 	/*
    338   1.1    cherry 	 * now insert us in the proper place in uvm_physseg_graph.rb_tree
    339   1.1    cherry 	 */
    340   1.1    cherry 
    341   1.1    cherry 	current_ps = rb_tree_insert_node(&(uvm_physseg_graph.rb_tree), ps);
    342   1.1    cherry 	if (current_ps != ps) {
    343   1.1    cherry 		panic("uvm_page_physload: Duplicate address range detected!");
    344   1.1    cherry 	}
    345   1.1    cherry 	uvm_physseg_graph.nentries++;
    346   1.1    cherry 
    347   1.1    cherry 	/*
    348   1.1    cherry 	 * uvm_pagefree() requires the PHYS_TO_VM_PAGE(pgs[i]) on the
    349   1.1    cherry 	 * newly allocated pgs[] to return the correct value. This is
    350   1.1    cherry 	 * a bit of a chicken and egg problem, since it needs
    351   1.1    cherry 	 * uvm_physseg_find() to succeed. For this, the node needs to
    352   1.1    cherry 	 * be inserted *before* uvm_physseg_init_seg() happens.
    353   1.1    cherry 	 *
    354   1.1    cherry 	 * During boot, this happens anyway, since
    355   1.1    cherry 	 * uvm_physseg_init_seg() is called later on and separately
    356   1.1    cherry 	 * from uvm_page.c:uvm_page_init().
    357   1.1    cherry 	 * In the case of hotplug we need to ensure this.
    358   1.1    cherry 	 */
    359   1.1    cherry 
    360   1.1    cherry 	if (__predict_true(!preload))
    361   1.1    cherry 		uvm_physseg_init_seg(ps, pgs);
    362   1.1    cherry 
    363   1.1    cherry 	if (psp != NULL)
    364   1.1    cherry 		*psp = ps;
    365   1.1    cherry 
    366   1.1    cherry 	return true;
    367   1.1    cherry }
    368   1.1    cherry 
    369   1.1    cherry static int
    370   1.1    cherry uvm_physseg_compare_nodes(void *ctx, const void *nnode1, const void *nnode2)
    371   1.1    cherry {
    372   1.1    cherry 	const struct uvm_physseg *enode1 = nnode1;
    373   1.1    cherry 	const struct uvm_physseg *enode2 = nnode2;
    374   1.1    cherry 
    375   1.1    cherry 	KASSERT(enode1->start < enode2->start || enode1->start >= enode2->end);
    376   1.1    cherry 	KASSERT(enode2->start < enode1->start || enode2->start >= enode1->end);
    377   1.1    cherry 
    378   1.1    cherry 	if (enode1->start < enode2->start)
    379   1.1    cherry 		return -1;
    380   1.1    cherry 	if (enode1->start >= enode2->end)
    381   1.1    cherry 		return 1;
    382   1.1    cherry 	return 0;
    383   1.1    cherry }
    384   1.1    cherry 
    385   1.1    cherry static int
    386   1.1    cherry uvm_physseg_compare_key(void *ctx, const void *nnode, const void *pkey)
    387   1.1    cherry {
    388   1.1    cherry 	const struct uvm_physseg *enode = nnode;
    389   1.1    cherry 	const paddr_t pa = *(const paddr_t *) pkey;
    390   1.1    cherry 
    391   1.1    cherry 	if(enode->start <= pa && pa < enode->end)
    392   1.1    cherry 		return 0;
    393   1.1    cherry 	if (enode->start < pa)
    394   1.1    cherry 		return -1;
    395   1.1    cherry 	if (enode->end > pa)
    396   1.1    cherry 		return 1;
    397   1.1    cherry 
    398   1.1    cherry 	return 0;
    399   1.1    cherry }
    400   1.1    cherry 
    401   1.1    cherry static const rb_tree_ops_t uvm_physseg_tree_ops = {
    402   1.1    cherry 	.rbto_compare_nodes = uvm_physseg_compare_nodes,
    403   1.1    cherry 	.rbto_compare_key = uvm_physseg_compare_key,
    404   1.1    cherry 	.rbto_node_offset = offsetof(struct uvm_physseg, rb_node),
    405   1.1    cherry 	.rbto_context = NULL
    406   1.1    cherry };
    407   1.1    cherry 
    408   1.1    cherry /*
    409   1.1    cherry  * uvm_physseg_init: init the physmem
    410   1.1    cherry  *
    411   1.1    cherry  * => physmem unit should not be in use at this point
    412   1.1    cherry  */
    413   1.1    cherry 
    414   1.1    cherry void
    415   1.1    cherry uvm_physseg_init(void)
    416   1.1    cherry {
    417   1.1    cherry 	rb_tree_init(&(uvm_physseg_graph.rb_tree), &uvm_physseg_tree_ops);
    418   1.1    cherry 	uvm_physseg_graph.nentries = 0;
    419   1.1    cherry }
    420   1.1    cherry 
    421   1.1    cherry uvm_physseg_t
    422   1.1    cherry uvm_physseg_get_next(uvm_physseg_t upm)
    423   1.1    cherry {
    424   1.1    cherry 	/* next of invalid is invalid, not fatal */
    425   1.2    cherry 	if (uvm_physseg_valid_p(upm) == false)
    426   1.1    cherry 		return UVM_PHYSSEG_TYPE_INVALID;
    427   1.1    cherry 
    428   1.1    cherry 	return (uvm_physseg_t) rb_tree_iterate(&(uvm_physseg_graph.rb_tree), upm,
    429   1.1    cherry 	    RB_DIR_RIGHT);
    430   1.1    cherry }
    431   1.1    cherry 
    432   1.1    cherry uvm_physseg_t
    433   1.1    cherry uvm_physseg_get_prev(uvm_physseg_t upm)
    434   1.1    cherry {
    435   1.1    cherry 	/* prev of invalid is invalid, not fatal */
    436   1.2    cherry 	if (uvm_physseg_valid_p(upm) == false)
    437   1.1    cherry 		return UVM_PHYSSEG_TYPE_INVALID;
    438   1.1    cherry 
    439   1.1    cherry 	return (uvm_physseg_t) rb_tree_iterate(&(uvm_physseg_graph.rb_tree), upm,
    440   1.1    cherry 	    RB_DIR_LEFT);
    441   1.1    cherry }
    442   1.1    cherry 
    443   1.1    cherry uvm_physseg_t
    444   1.1    cherry uvm_physseg_get_last(void)
    445   1.1    cherry {
    446   1.1    cherry 	return (uvm_physseg_t) RB_TREE_MAX(&(uvm_physseg_graph.rb_tree));
    447   1.1    cherry }
    448   1.1    cherry 
    449   1.1    cherry uvm_physseg_t
    450   1.1    cherry uvm_physseg_get_first(void)
    451   1.1    cherry {
    452   1.1    cherry 	return (uvm_physseg_t) RB_TREE_MIN(&(uvm_physseg_graph.rb_tree));
    453   1.1    cherry }
    454   1.1    cherry 
    455   1.1    cherry paddr_t
    456   1.1    cherry uvm_physseg_get_highest_frame(void)
    457   1.1    cherry {
    458   1.1    cherry 	struct uvm_physseg *ps =
    459   1.1    cherry 	    (uvm_physseg_t) RB_TREE_MAX(&(uvm_physseg_graph.rb_tree));
    460   1.1    cherry 
    461   1.1    cherry 	return ps->end - 1;
    462   1.1    cherry }
    463   1.1    cherry 
    464   1.1    cherry /*
    465   1.1    cherry  * uvm_page_physunload: unload physical memory and return it to
    466   1.1    cherry  * caller.
    467   1.1    cherry  */
    468   1.1    cherry bool
    469   1.1    cherry uvm_page_physunload(uvm_physseg_t upm, int freelist, paddr_t *paddrp)
    470   1.1    cherry {
    471   1.1    cherry 	struct uvm_physseg *seg;
    472   1.1    cherry 
    473   1.1    cherry 	if (__predict_true(uvm.page_init_done == true))
    474   1.1    cherry 		panic("%s: unload attempted after uvm_page_init()\n", __func__);
    475   1.1    cherry 
    476   1.1    cherry 	seg = HANDLE_TO_PHYSSEG_NODE(upm);
    477   1.1    cherry 
    478   1.1    cherry 	if (seg->free_list != freelist) {
    479   1.1    cherry 		return false;
    480   1.1    cherry 	}
    481   1.1    cherry 
    482   1.1    cherry 	/*
    483   1.1    cherry 	 * During cold boot, what we're about to unplug hasn't been
    484   1.1    cherry 	 * put on the uvm freelist, nor has uvmexp.npages been
    485   1.1    cherry 	 * updated. (This happens in uvm_page.c:uvm_page_init())
    486   1.1    cherry 	 *
    487   1.1    cherry 	 * For hotplug, we assume here that the pages being unloaded
    488   1.1    cherry 	 * here are completely out of sight of uvm (ie; not on any uvm
    489   1.1    cherry 	 * lists), and that  uvmexp.npages has been suitably
    490   1.1    cherry 	 * decremented before we're called.
    491   1.1    cherry 	 *
    492   1.1    cherry 	 * XXX: will avail_end == start if avail_start < avail_end?
    493   1.1    cherry 	 */
    494   1.1    cherry 
    495   1.1    cherry 	/* try from front */
    496   1.1    cherry 	if (seg->avail_start == seg->start &&
    497   1.1    cherry 	    seg->avail_start < seg->avail_end) {
    498   1.1    cherry 		*paddrp = ctob(seg->avail_start);
    499   1.1    cherry 		return uvm_physseg_unplug(seg->avail_start, 1);
    500   1.1    cherry 	}
    501   1.1    cherry 
    502   1.1    cherry 	/* try from rear */
    503   1.1    cherry 	if (seg->avail_end == seg->end &&
    504   1.1    cherry 	    seg->avail_start < seg->avail_end) {
    505   1.1    cherry 		*paddrp = ctob(seg->avail_end - 1);
    506   1.1    cherry 		return uvm_physseg_unplug(seg->avail_end - 1, 1);
    507   1.1    cherry 	}
    508   1.1    cherry 
    509   1.1    cherry 	return false;
    510   1.1    cherry }
    511   1.1    cherry 
    512   1.1    cherry bool
    513   1.1    cherry uvm_page_physunload_force(uvm_physseg_t upm, int freelist, paddr_t *paddrp)
    514   1.1    cherry {
    515   1.1    cherry 	struct uvm_physseg *seg;
    516   1.1    cherry 
    517   1.1    cherry 	seg = HANDLE_TO_PHYSSEG_NODE(upm);
    518   1.1    cherry 
    519   1.1    cherry 	if (__predict_true(uvm.page_init_done == true))
    520   1.1    cherry 		panic("%s: unload attempted after uvm_page_init()\n", __func__);
    521   1.1    cherry 	/* any room in this bank? */
    522   1.1    cherry 	if (seg->avail_start >= seg->avail_end) {
    523   1.1    cherry 		return false; /* nope */
    524   1.1    cherry 	}
    525   1.1    cherry 
    526   1.1    cherry 	*paddrp = ctob(seg->avail_start);
    527   1.1    cherry 
    528   1.1    cherry 	/* Always unplug from front */
    529   1.1    cherry 	return uvm_physseg_unplug(seg->avail_start, 1);
    530   1.1    cherry }
    531   1.1    cherry 
    532   1.1    cherry 
    533   1.1    cherry /*
    534   1.1    cherry  * vm_physseg_find: find vm_physseg structure that belongs to a PA
    535   1.1    cherry  */
    536   1.1    cherry uvm_physseg_t
    537   1.1    cherry uvm_physseg_find(paddr_t pframe, psize_t *offp)
    538   1.1    cherry {
    539   1.1    cherry 	struct uvm_physseg * ps = NULL;
    540   1.1    cherry 
    541   1.1    cherry 	ps = rb_tree_find_node(&(uvm_physseg_graph.rb_tree), &pframe);
    542   1.1    cherry 
    543   1.1    cherry 	if(ps != NULL && offp != NULL)
    544   1.1    cherry 		*offp = pframe - ps->start;
    545   1.1    cherry 
    546   1.1    cherry 	return ps;
    547   1.1    cherry }
    548   1.1    cherry 
    549   1.1    cherry #else  /* UVM_HOTPLUG */
    550   1.1    cherry 
    551   1.1    cherry /*
    552   1.1    cherry  * physical memory config is stored in vm_physmem.
    553   1.1    cherry  */
    554   1.1    cherry 
    555   1.1    cherry #define	VM_PHYSMEM_PTR(i)	(&vm_physmem[i])
    556   1.1    cherry #if VM_PHYSSEG_MAX == 1
    557   1.1    cherry #define VM_PHYSMEM_PTR_SWAP(i, j) /* impossible */
    558   1.1    cherry #else
    559   1.1    cherry #define VM_PHYSMEM_PTR_SWAP(i, j)					      \
    560   1.1    cherry 	do { vm_physmem[(i)] = vm_physmem[(j)]; } while (0)
    561   1.1    cherry #endif
    562   1.1    cherry 
    563   1.1    cherry #define		HANDLE_TO_PHYSSEG_NODE(h)	(VM_PHYSMEM_PTR((int)h))
    564   1.1    cherry #define		PHYSSEG_NODE_TO_HANDLE(u)	((int)((vsize_t) (u - vm_physmem) / sizeof(struct uvm_physseg)))
    565   1.1    cherry 
    566  1.19        ad /* XXXCDC: uvm.physmem */
    567  1.19        ad static struct uvm_physseg vm_physmem[VM_PHYSSEG_MAX] __read_mostly;
    568  1.19        ad /* XXXCDC: uvm.nphysseg */
    569  1.19        ad static int vm_nphysseg __read_mostly = 0;
    570   1.1    cherry #define	vm_nphysmem	vm_nphysseg
    571   1.1    cherry 
    572   1.1    cherry void
    573   1.1    cherry uvm_physseg_init(void)
    574   1.1    cherry {
    575   1.1    cherry 	/* XXX: Provisioning for rb_tree related init(s) */
    576   1.1    cherry 	return;
    577   1.1    cherry }
    578   1.1    cherry 
    579   1.1    cherry int
    580   1.1    cherry uvm_physseg_get_next(uvm_physseg_t lcv)
    581   1.1    cherry {
    582   1.1    cherry 	/* next of invalid is invalid, not fatal */
    583   1.2    cherry 	if (uvm_physseg_valid_p(lcv) == false)
    584   1.1    cherry 		return UVM_PHYSSEG_TYPE_INVALID;
    585   1.1    cherry 
    586   1.1    cherry 	return (lcv + 1);
    587   1.1    cherry }
    588   1.1    cherry 
    589   1.1    cherry int
    590   1.1    cherry uvm_physseg_get_prev(uvm_physseg_t lcv)
    591   1.1    cherry {
    592   1.1    cherry 	/* prev of invalid is invalid, not fatal */
    593   1.2    cherry 	if (uvm_physseg_valid_p(lcv) == false)
    594   1.1    cherry 		return UVM_PHYSSEG_TYPE_INVALID;
    595   1.1    cherry 
    596   1.1    cherry 	return (lcv - 1);
    597   1.1    cherry }
    598   1.1    cherry 
    599   1.1    cherry int
    600   1.1    cherry uvm_physseg_get_last(void)
    601   1.1    cherry {
    602   1.1    cherry 	return (vm_nphysseg - 1);
    603   1.1    cherry }
    604   1.1    cherry 
    605   1.1    cherry int
    606   1.1    cherry uvm_physseg_get_first(void)
    607   1.1    cherry {
    608   1.1    cherry 	return 0;
    609   1.1    cherry }
    610   1.1    cherry 
    611   1.1    cherry paddr_t
    612   1.1    cherry uvm_physseg_get_highest_frame(void)
    613   1.1    cherry {
    614   1.1    cherry 	int lcv;
    615   1.1    cherry 	paddr_t last = 0;
    616   1.1    cherry 	struct uvm_physseg *ps;
    617   1.1    cherry 
    618   1.1    cherry 	for (lcv = 0; lcv < vm_nphysseg; lcv++) {
    619   1.1    cherry 		ps = VM_PHYSMEM_PTR(lcv);
    620   1.1    cherry 		if (last < ps->end)
    621   1.1    cherry 			last = ps->end;
    622   1.1    cherry 	}
    623   1.1    cherry 
    624   1.1    cherry 	return last;
    625   1.1    cherry }
    626   1.1    cherry 
    627   1.1    cherry 
    628   1.1    cherry static struct vm_page *
    629   1.1    cherry uvm_post_preload_check(void)
    630   1.1    cherry {
    631   1.1    cherry 	int preload, lcv;
    632   1.1    cherry 
    633   1.1    cherry 	/*
    634   1.1    cherry 	 * check to see if this is a "preload" (i.e. uvm_page_init hasn't been
    635   1.1    cherry 	 * called yet, so kmem is not available).
    636   1.1    cherry 	 */
    637   1.1    cherry 
    638   1.1    cherry 	for (lcv = 0 ; lcv < vm_nphysmem ; lcv++) {
    639   1.1    cherry 		if (VM_PHYSMEM_PTR(lcv)->pgs)
    640   1.1    cherry 			break;
    641   1.1    cherry 	}
    642   1.1    cherry 	preload = (lcv == vm_nphysmem);
    643   1.1    cherry 
    644   1.1    cherry 	/*
    645   1.1    cherry 	 * if VM is already running, attempt to kmem_alloc vm_page structures
    646   1.1    cherry 	 */
    647   1.1    cherry 
    648   1.1    cherry 	if (!preload) {
    649   1.1    cherry 		panic("Tried to add RAM after uvm_page_init");
    650   1.1    cherry 	}
    651   1.1    cherry 
    652   1.1    cherry 	return NULL;
    653   1.1    cherry }
    654   1.1    cherry 
    655   1.1    cherry /*
    656   1.1    cherry  * uvm_page_physunload: unload physical memory and return it to
    657   1.1    cherry  * caller.
    658   1.1    cherry  */
    659   1.1    cherry bool
    660   1.1    cherry uvm_page_physunload(uvm_physseg_t psi, int freelist, paddr_t *paddrp)
    661   1.1    cherry {
    662   1.1    cherry 	int x;
    663   1.1    cherry 	struct uvm_physseg *seg;
    664   1.1    cherry 
    665   1.1    cherry 	uvm_post_preload_check();
    666   1.1    cherry 
    667   1.1    cherry 	seg = VM_PHYSMEM_PTR(psi);
    668   1.1    cherry 
    669   1.1    cherry 	if (seg->free_list != freelist) {
    670   1.1    cherry 		return false;
    671   1.1    cherry 	}
    672   1.1    cherry 
    673   1.1    cherry 	/* try from front */
    674   1.1    cherry 	if (seg->avail_start == seg->start &&
    675   1.1    cherry 	    seg->avail_start < seg->avail_end) {
    676   1.1    cherry 		*paddrp = ctob(seg->avail_start);
    677   1.1    cherry 		seg->avail_start++;
    678   1.1    cherry 		seg->start++;
    679   1.1    cherry 		/* nothing left?   nuke it */
    680   1.1    cherry 		if (seg->avail_start == seg->end) {
    681   1.1    cherry 			if (vm_nphysmem == 1)
    682   1.1    cherry 				panic("uvm_page_physget: out of memory!");
    683   1.1    cherry 			vm_nphysmem--;
    684   1.1    cherry 			for (x = psi ; x < vm_nphysmem ; x++)
    685   1.1    cherry 				/* structure copy */
    686   1.1    cherry 				VM_PHYSMEM_PTR_SWAP(x, x + 1);
    687   1.1    cherry 		}
    688   1.1    cherry 		return (true);
    689   1.1    cherry 	}
    690   1.1    cherry 
    691   1.1    cherry 	/* try from rear */
    692   1.1    cherry 	if (seg->avail_end == seg->end &&
    693   1.1    cherry 	    seg->avail_start < seg->avail_end) {
    694   1.1    cherry 		*paddrp = ctob(seg->avail_end - 1);
    695   1.1    cherry 		seg->avail_end--;
    696   1.1    cherry 		seg->end--;
    697   1.1    cherry 		/* nothing left?   nuke it */
    698   1.1    cherry 		if (seg->avail_end == seg->start) {
    699   1.1    cherry 			if (vm_nphysmem == 1)
    700   1.1    cherry 				panic("uvm_page_physget: out of memory!");
    701   1.1    cherry 			vm_nphysmem--;
    702   1.1    cherry 			for (x = psi ; x < vm_nphysmem ; x++)
    703   1.1    cherry 				/* structure copy */
    704   1.1    cherry 				VM_PHYSMEM_PTR_SWAP(x, x + 1);
    705   1.1    cherry 		}
    706   1.1    cherry 		return (true);
    707   1.1    cherry 	}
    708   1.1    cherry 
    709   1.1    cherry 	return false;
    710   1.1    cherry }
    711   1.1    cherry 
    712   1.1    cherry bool
    713   1.1    cherry uvm_page_physunload_force(uvm_physseg_t psi, int freelist, paddr_t *paddrp)
    714   1.1    cherry {
    715   1.1    cherry 	int x;
    716   1.1    cherry 	struct uvm_physseg *seg;
    717   1.1    cherry 
    718   1.1    cherry 	uvm_post_preload_check();
    719   1.1    cherry 
    720   1.1    cherry 	seg = VM_PHYSMEM_PTR(psi);
    721   1.1    cherry 
    722   1.1    cherry 	/* any room in this bank? */
    723   1.1    cherry 	if (seg->avail_start >= seg->avail_end) {
    724   1.1    cherry 		return false; /* nope */
    725   1.1    cherry 	}
    726   1.1    cherry 
    727   1.1    cherry 	*paddrp = ctob(seg->avail_start);
    728   1.1    cherry 	seg->avail_start++;
    729   1.1    cherry 	/* truncate! */
    730   1.1    cherry 	seg->start = seg->avail_start;
    731   1.1    cherry 
    732   1.1    cherry 	/* nothing left?   nuke it */
    733   1.1    cherry 	if (seg->avail_start == seg->end) {
    734   1.1    cherry 		if (vm_nphysmem == 1)
    735   1.1    cherry 			panic("uvm_page_physget: out of memory!");
    736   1.1    cherry 		vm_nphysmem--;
    737   1.1    cherry 		for (x = psi ; x < vm_nphysmem ; x++)
    738   1.1    cherry 			/* structure copy */
    739   1.1    cherry 			VM_PHYSMEM_PTR_SWAP(x, x + 1);
    740   1.1    cherry 	}
    741   1.1    cherry 	return (true);
    742   1.1    cherry }
    743   1.1    cherry 
    744   1.1    cherry bool
    745   1.1    cherry uvm_physseg_plug(paddr_t pfn, size_t pages, uvm_physseg_t *psp)
    746   1.1    cherry {
    747   1.1    cherry 	int lcv;
    748   1.1    cherry 	struct vm_page *pgs;
    749   1.1    cherry 	struct uvm_physseg *ps;
    750   1.1    cherry 
    751   1.1    cherry #ifdef DEBUG
    752   1.1    cherry 	paddr_t off;
    753   1.1    cherry 	uvm_physseg_t upm;
    754   1.1    cherry 	upm = uvm_physseg_find(pfn, &off);
    755   1.1    cherry 
    756   1.2    cherry 	if (uvm_physseg_valid_p(upm)) /* XXX; do we allow "update" plugs ? */
    757   1.1    cherry 		return false;
    758   1.1    cherry #endif
    759   1.1    cherry 
    760   1.1    cherry 	paddr_t start = pfn;
    761   1.1    cherry 	paddr_t end = pfn + pages;
    762   1.1    cherry 	paddr_t avail_start = start;
    763   1.1    cherry 	paddr_t avail_end = end;
    764   1.1    cherry 
    765   1.1    cherry 	if (uvmexp.pagesize == 0)
    766   1.1    cherry 		panic("uvm_page_physload: page size not set!");
    767   1.1    cherry 
    768   1.1    cherry 	/*
    769   1.1    cherry 	 * do we have room?
    770   1.1    cherry 	 */
    771   1.1    cherry 
    772   1.1    cherry 	if (vm_nphysmem == VM_PHYSSEG_MAX) {
    773   1.1    cherry 		printf("uvm_page_physload: unable to load physical memory "
    774   1.1    cherry 		    "segment\n");
    775   1.1    cherry 		printf("\t%d segments allocated, ignoring 0x%llx -> 0x%llx\n",
    776   1.1    cherry 		    VM_PHYSSEG_MAX, (long long)start, (long long)end);
    777   1.1    cherry 		printf("\tincrease VM_PHYSSEG_MAX\n");
    778   1.1    cherry 		if (psp != NULL)
    779   1.1    cherry 			*psp = UVM_PHYSSEG_TYPE_INVALID_OVERFLOW;
    780   1.1    cherry 		return false;
    781   1.1    cherry 	}
    782   1.1    cherry 
    783   1.1    cherry 	/*
    784   1.1    cherry 	 * check to see if this is a "preload" (i.e. uvm_page_init hasn't been
    785   1.1    cherry 	 * called yet, so kmem is not available).
    786   1.1    cherry 	 */
    787   1.1    cherry 	pgs = uvm_post_preload_check();
    788   1.1    cherry 
    789   1.1    cherry 	/*
    790   1.1    cherry 	 * now insert us in the proper place in vm_physmem[]
    791   1.1    cherry 	 */
    792   1.1    cherry 
    793   1.1    cherry #if (VM_PHYSSEG_STRAT == VM_PSTRAT_RANDOM)
    794   1.1    cherry 	/* random: put it at the end (easy!) */
    795   1.1    cherry 	ps = VM_PHYSMEM_PTR(vm_nphysmem);
    796   1.3    cherry 	lcv = vm_nphysmem;
    797   1.1    cherry #elif (VM_PHYSSEG_STRAT == VM_PSTRAT_BSEARCH)
    798   1.1    cherry 	{
    799   1.1    cherry 		int x;
    800   1.1    cherry 		/* sort by address for binary search */
    801   1.1    cherry 		for (lcv = 0 ; lcv < vm_nphysmem ; lcv++)
    802   1.1    cherry 			if (start < VM_PHYSMEM_PTR(lcv)->start)
    803   1.1    cherry 				break;
    804   1.1    cherry 		ps = VM_PHYSMEM_PTR(lcv);
    805   1.1    cherry 		/* move back other entries, if necessary ... */
    806   1.1    cherry 		for (x = vm_nphysmem ; x > lcv ; x--)
    807   1.1    cherry 			/* structure copy */
    808   1.1    cherry 			VM_PHYSMEM_PTR_SWAP(x, x - 1);
    809   1.1    cherry 	}
    810   1.1    cherry #elif (VM_PHYSSEG_STRAT == VM_PSTRAT_BIGFIRST)
    811   1.1    cherry 	{
    812   1.1    cherry 		int x;
    813   1.1    cherry 		/* sort by largest segment first */
    814   1.1    cherry 		for (lcv = 0 ; lcv < vm_nphysmem ; lcv++)
    815   1.1    cherry 			if ((end - start) >
    816   1.1    cherry 			    (VM_PHYSMEM_PTR(lcv)->end - VM_PHYSMEM_PTR(lcv)->start))
    817   1.1    cherry 				break;
    818   1.1    cherry 		ps = VM_PHYSMEM_PTR(lcv);
    819   1.1    cherry 		/* move back other entries, if necessary ... */
    820   1.1    cherry 		for (x = vm_nphysmem ; x > lcv ; x--)
    821   1.1    cherry 			/* structure copy */
    822   1.1    cherry 			VM_PHYSMEM_PTR_SWAP(x, x - 1);
    823   1.1    cherry 	}
    824   1.1    cherry #else
    825   1.1    cherry 	panic("uvm_page_physload: unknown physseg strategy selected!");
    826   1.1    cherry #endif
    827   1.1    cherry 
    828   1.1    cherry 	ps->start = start;
    829   1.1    cherry 	ps->end = end;
    830   1.1    cherry 	ps->avail_start = avail_start;
    831   1.1    cherry 	ps->avail_end = avail_end;
    832   1.1    cherry 
    833   1.1    cherry 	ps->pgs = pgs;
    834   1.1    cherry 
    835   1.1    cherry 	vm_nphysmem++;
    836   1.1    cherry 
    837   1.1    cherry 	if (psp != NULL)
    838   1.1    cherry 		*psp = lcv;
    839   1.1    cherry 
    840   1.1    cherry 	return true;
    841   1.1    cherry }
    842   1.1    cherry 
    843   1.1    cherry /*
    844   1.1    cherry  * when VM_PHYSSEG_MAX is 1, we can simplify these functions
    845   1.1    cherry  */
    846   1.1    cherry 
    847   1.1    cherry #if VM_PHYSSEG_MAX == 1
    848   1.1    cherry static inline int vm_physseg_find_contig(struct uvm_physseg *, int, paddr_t, psize_t *);
    849   1.1    cherry #elif (VM_PHYSSEG_STRAT == VM_PSTRAT_BSEARCH)
    850   1.1    cherry static inline int vm_physseg_find_bsearch(struct uvm_physseg *, int, paddr_t, psize_t *);
    851   1.1    cherry #else
    852   1.1    cherry static inline int vm_physseg_find_linear(struct uvm_physseg *, int, paddr_t, psize_t *);
    853   1.1    cherry #endif
    854   1.1    cherry 
    855   1.1    cherry /*
    856   1.1    cherry  * vm_physseg_find: find vm_physseg structure that belongs to a PA
    857   1.1    cherry  */
    858  1.19        ad inline int
    859   1.1    cherry uvm_physseg_find(paddr_t pframe, psize_t *offp)
    860   1.1    cherry {
    861   1.1    cherry 
    862   1.1    cherry #if VM_PHYSSEG_MAX == 1
    863   1.1    cherry 	return vm_physseg_find_contig(vm_physmem, vm_nphysseg, pframe, offp);
    864   1.1    cherry #elif (VM_PHYSSEG_STRAT == VM_PSTRAT_BSEARCH)
    865   1.1    cherry 	return vm_physseg_find_bsearch(vm_physmem, vm_nphysseg, pframe, offp);
    866   1.1    cherry #else
    867   1.1    cherry 	return vm_physseg_find_linear(vm_physmem, vm_nphysseg, pframe, offp);
    868   1.1    cherry #endif
    869   1.1    cherry }
    870   1.1    cherry 
    871   1.1    cherry #if VM_PHYSSEG_MAX == 1
    872   1.1    cherry static inline int
    873   1.1    cherry vm_physseg_find_contig(struct uvm_physseg *segs, int nsegs, paddr_t pframe, psize_t *offp)
    874   1.1    cherry {
    875   1.1    cherry 
    876   1.1    cherry 	/* 'contig' case */
    877   1.1    cherry 	if (pframe >= segs[0].start && pframe < segs[0].end) {
    878   1.1    cherry 		if (offp)
    879   1.1    cherry 			*offp = pframe - segs[0].start;
    880   1.1    cherry 		return(0);
    881   1.1    cherry 	}
    882   1.1    cherry 	return(-1);
    883   1.1    cherry }
    884   1.1    cherry 
    885   1.1    cherry #elif (VM_PHYSSEG_STRAT == VM_PSTRAT_BSEARCH)
    886   1.1    cherry 
    887   1.1    cherry static inline int
    888   1.1    cherry vm_physseg_find_bsearch(struct uvm_physseg *segs, int nsegs, paddr_t pframe, psize_t *offp)
    889   1.1    cherry {
    890   1.1    cherry 	/* binary search for it */
    891   1.1    cherry 	int	start, len, guess;
    892   1.1    cherry 
    893   1.1    cherry 	/*
    894   1.1    cherry 	 * if try is too large (thus target is less than try) we reduce
    895   1.1    cherry 	 * the length to trunc(len/2) [i.e. everything smaller than "try"]
    896   1.1    cherry 	 *
    897   1.1    cherry 	 * if the try is too small (thus target is greater than try) then
    898   1.1    cherry 	 * we set the new start to be (try + 1).   this means we need to
    899   1.1    cherry 	 * reduce the length to (round(len/2) - 1).
    900   1.1    cherry 	 *
    901   1.1    cherry 	 * note "adjust" below which takes advantage of the fact that
    902   1.1    cherry 	 *  (round(len/2) - 1) == trunc((len - 1) / 2)
    903   1.1    cherry 	 * for any value of len we may have
    904   1.1    cherry 	 */
    905   1.1    cherry 
    906   1.1    cherry 	for (start = 0, len = nsegs ; len != 0 ; len = len / 2) {
    907   1.1    cherry 		guess = start + (len / 2);	/* try in the middle */
    908   1.1    cherry 
    909   1.1    cherry 		/* start past our try? */
    910   1.1    cherry 		if (pframe >= segs[guess].start) {
    911   1.1    cherry 			/* was try correct? */
    912   1.1    cherry 			if (pframe < segs[guess].end) {
    913   1.1    cherry 				if (offp)
    914   1.1    cherry 					*offp = pframe - segs[guess].start;
    915   1.1    cherry 				return guess;            /* got it */
    916   1.1    cherry 			}
    917   1.1    cherry 			start = guess + 1;	/* next time, start here */
    918   1.1    cherry 			len--;			/* "adjust" */
    919   1.1    cherry 		} else {
    920   1.1    cherry 			/*
    921   1.1    cherry 			 * pframe before try, just reduce length of
    922   1.1    cherry 			 * region, done in "for" loop
    923   1.1    cherry 			 */
    924   1.1    cherry 		}
    925   1.1    cherry 	}
    926   1.1    cherry 	return(-1);
    927   1.1    cherry }
    928   1.1    cherry 
    929   1.1    cherry #else
    930   1.1    cherry 
    931   1.1    cherry static inline int
    932   1.1    cherry vm_physseg_find_linear(struct uvm_physseg *segs, int nsegs, paddr_t pframe, psize_t *offp)
    933   1.1    cherry {
    934   1.1    cherry 	/* linear search for it */
    935   1.1    cherry 	int	lcv;
    936   1.1    cherry 
    937   1.1    cherry 	for (lcv = 0; lcv < nsegs; lcv++) {
    938   1.1    cherry 		if (pframe >= segs[lcv].start &&
    939   1.1    cherry 		    pframe < segs[lcv].end) {
    940   1.1    cherry 			if (offp)
    941   1.1    cherry 				*offp = pframe - segs[lcv].start;
    942   1.1    cherry 			return(lcv);		   /* got it */
    943   1.1    cherry 		}
    944   1.1    cherry 	}
    945   1.1    cherry 	return(-1);
    946   1.1    cherry }
    947   1.1    cherry #endif
    948   1.1    cherry #endif /* UVM_HOTPLUG */
    949   1.1    cherry 
    950  1.19        ad /*
    951  1.19        ad  * PHYS_TO_VM_PAGE: find vm_page for a PA.  used by MI code to get vm_pages
    952  1.19        ad  * back from an I/O mapping (ugh!).  used in some MD code as well.  it can
    953  1.19        ad  * be prominent in flamegraphs, so optimise it and try to make it easy for
    954  1.19        ad  * the compiler by including next to the inline lookup routines.
    955  1.19        ad  */
    956  1.19        ad struct vm_page *
    957  1.19        ad uvm_phys_to_vm_page(paddr_t pa)
    958  1.19        ad {
    959  1.19        ad #if VM_PHYSSEG_STRAT != VM_PSTRAT_BSEARCH
    960  1.19        ad 	/* 'contig' and linear cases */
    961  1.19        ad 	KASSERT(vm_nphysseg > 0);
    962  1.19        ad 	struct uvm_physseg *ps = &vm_physmem[0];
    963  1.19        ad 	struct uvm_physseg *end = &vm_physmem[vm_nphysseg];
    964  1.19        ad 	paddr_t pframe = atop(pa);
    965  1.19        ad 	do {
    966  1.19        ad 		if (pframe >= ps->start && pframe < ps->end) {
    967  1.19        ad 			return &ps->pgs[pframe - ps->start];
    968  1.19        ad 		}
    969  1.19        ad 	} while (VM_PHYSSEG_MAX > 1 && __predict_false(++ps < end));
    970  1.19        ad 	return NULL;
    971  1.19        ad #else
    972  1.19        ad 	/* binary search for it */
    973  1.19        ad 	paddr_t pf = atop(pa);
    974  1.19        ad 	paddr_t	off;
    975  1.19        ad 	uvm_physseg_t	upm;
    976  1.19        ad 
    977  1.19        ad 	upm = uvm_physseg_find(pf, &off);
    978  1.19        ad 	if (upm != UVM_PHYSSEG_TYPE_INVALID)
    979  1.19        ad 		return uvm_physseg_get_pg(upm, off);
    980  1.19        ad 	return(NULL);
    981  1.19        ad #endif
    982  1.19        ad }
    983  1.19        ad 
    984   1.1    cherry bool
    985   1.2    cherry uvm_physseg_valid_p(uvm_physseg_t upm)
    986   1.1    cherry {
    987   1.1    cherry 	struct uvm_physseg *ps;
    988   1.1    cherry 
    989   1.1    cherry 	if (upm == UVM_PHYSSEG_TYPE_INVALID ||
    990   1.1    cherry 	    upm == UVM_PHYSSEG_TYPE_INVALID_EMPTY ||
    991   1.1    cherry 	    upm == UVM_PHYSSEG_TYPE_INVALID_OVERFLOW)
    992   1.1    cherry 		return false;
    993   1.1    cherry 
    994   1.1    cherry 	/*
    995   1.1    cherry 	 * This is the delicate init dance -
    996   1.1    cherry 	 * needs to go with the dance.
    997   1.1    cherry 	 */
    998   1.1    cherry 	if (uvm.page_init_done != true)
    999   1.1    cherry 		return true;
   1000   1.1    cherry 
   1001   1.1    cherry 	ps = HANDLE_TO_PHYSSEG_NODE(upm);
   1002   1.1    cherry 
   1003   1.1    cherry 	/* Extra checks needed only post uvm_page_init() */
   1004   1.1    cherry 	if (ps->pgs == NULL)
   1005   1.1    cherry 		return false;
   1006   1.1    cherry 
   1007   1.1    cherry 	/* XXX: etc. */
   1008   1.1    cherry 
   1009   1.1    cherry 	return true;
   1010   1.1    cherry 
   1011   1.1    cherry }
   1012   1.1    cherry 
   1013   1.1    cherry /*
   1014   1.1    cherry  * Boot protocol dictates that these must be able to return partially
   1015   1.1    cherry  * initialised segments.
   1016   1.1    cherry  */
   1017   1.1    cherry paddr_t
   1018   1.1    cherry uvm_physseg_get_start(uvm_physseg_t upm)
   1019   1.1    cherry {
   1020   1.2    cherry 	if (uvm_physseg_valid_p(upm) == false)
   1021   1.1    cherry 		return (paddr_t) -1;
   1022   1.1    cherry 
   1023   1.1    cherry 	return HANDLE_TO_PHYSSEG_NODE(upm)->start;
   1024   1.1    cherry }
   1025   1.1    cherry 
   1026   1.1    cherry paddr_t
   1027   1.1    cherry uvm_physseg_get_end(uvm_physseg_t upm)
   1028   1.1    cherry {
   1029   1.2    cherry 	if (uvm_physseg_valid_p(upm) == false)
   1030   1.1    cherry 		return (paddr_t) -1;
   1031   1.1    cherry 
   1032   1.1    cherry 	return HANDLE_TO_PHYSSEG_NODE(upm)->end;
   1033   1.1    cherry }
   1034   1.1    cherry 
   1035   1.1    cherry paddr_t
   1036   1.1    cherry uvm_physseg_get_avail_start(uvm_physseg_t upm)
   1037   1.1    cherry {
   1038   1.2    cherry 	if (uvm_physseg_valid_p(upm) == false)
   1039   1.1    cherry 		return (paddr_t) -1;
   1040   1.1    cherry 
   1041   1.1    cherry 	return HANDLE_TO_PHYSSEG_NODE(upm)->avail_start;
   1042   1.1    cherry }
   1043   1.1    cherry 
   1044   1.6       rin #if defined(UVM_PHYSSEG_LEGACY)
   1045   1.4  christos void
   1046   1.4  christos uvm_physseg_set_avail_start(uvm_physseg_t upm, paddr_t avail_start)
   1047   1.4  christos {
   1048   1.5    cherry 	struct uvm_physseg *ps = HANDLE_TO_PHYSSEG_NODE(upm);
   1049   1.5    cherry 
   1050   1.5    cherry #if defined(DIAGNOSTIC)
   1051   1.5    cherry 	paddr_t avail_end;
   1052   1.5    cherry 	avail_end = uvm_physseg_get_avail_end(upm);
   1053   1.4  christos 	KASSERT(uvm_physseg_valid_p(upm));
   1054  1.18  riastrad 	KASSERT(avail_start < avail_end);
   1055  1.18  riastrad 	KASSERT(avail_start >= ps->start);
   1056   1.5    cherry #endif
   1057   1.5    cherry 
   1058   1.5    cherry 	ps->avail_start = avail_start;
   1059   1.4  christos }
   1060  1.12        ad 
   1061  1.12        ad void
   1062  1.12        ad uvm_physseg_set_avail_end(uvm_physseg_t upm, paddr_t avail_end)
   1063   1.5    cherry {
   1064   1.5    cherry 	struct uvm_physseg *ps = HANDLE_TO_PHYSSEG_NODE(upm);
   1065   1.5    cherry 
   1066   1.5    cherry #if defined(DIAGNOSTIC)
   1067   1.5    cherry 	paddr_t avail_start;
   1068   1.5    cherry 	avail_start = uvm_physseg_get_avail_start(upm);
   1069   1.5    cherry 	KASSERT(uvm_physseg_valid_p(upm));
   1070  1.18  riastrad 	KASSERT(avail_end > avail_start);
   1071  1.18  riastrad 	KASSERT(avail_end <= ps->end);
   1072   1.4  christos #endif
   1073   1.4  christos 
   1074   1.5    cherry 	ps->avail_end = avail_end;
   1075   1.5    cherry }
   1076   1.5    cherry 
   1077   1.6       rin #endif /* UVM_PHYSSEG_LEGACY */
   1078   1.5    cherry 
   1079   1.1    cherry paddr_t
   1080   1.1    cherry uvm_physseg_get_avail_end(uvm_physseg_t upm)
   1081   1.1    cherry {
   1082   1.2    cherry 	if (uvm_physseg_valid_p(upm) == false)
   1083   1.1    cherry 		return (paddr_t) -1;
   1084   1.1    cherry 
   1085   1.1    cherry 	return HANDLE_TO_PHYSSEG_NODE(upm)->avail_end;
   1086   1.1    cherry }
   1087   1.1    cherry 
   1088  1.19        ad inline struct vm_page *
   1089   1.1    cherry uvm_physseg_get_pg(uvm_physseg_t upm, paddr_t idx)
   1090   1.1    cherry {
   1091   1.2    cherry 	KASSERT(uvm_physseg_valid_p(upm));
   1092   1.1    cherry 	return &HANDLE_TO_PHYSSEG_NODE(upm)->pgs[idx];
   1093   1.1    cherry }
   1094   1.1    cherry 
   1095   1.1    cherry #ifdef __HAVE_PMAP_PHYSSEG
   1096   1.1    cherry struct pmap_physseg *
   1097   1.1    cherry uvm_physseg_get_pmseg(uvm_physseg_t upm)
   1098   1.1    cherry {
   1099   1.2    cherry 	KASSERT(uvm_physseg_valid_p(upm));
   1100   1.1    cherry 	return &(HANDLE_TO_PHYSSEG_NODE(upm)->pmseg);
   1101   1.1    cherry }
   1102   1.1    cherry #endif
   1103   1.1    cherry 
   1104   1.1    cherry int
   1105   1.1    cherry uvm_physseg_get_free_list(uvm_physseg_t upm)
   1106   1.1    cherry {
   1107   1.2    cherry 	KASSERT(uvm_physseg_valid_p(upm));
   1108   1.1    cherry 	return HANDLE_TO_PHYSSEG_NODE(upm)->free_list;
   1109   1.1    cherry }
   1110   1.1    cherry 
   1111  1.20       tnn u_long
   1112   1.1    cherry uvm_physseg_get_start_hint(uvm_physseg_t upm)
   1113   1.1    cherry {
   1114   1.2    cherry 	KASSERT(uvm_physseg_valid_p(upm));
   1115   1.1    cherry 	return HANDLE_TO_PHYSSEG_NODE(upm)->start_hint;
   1116   1.1    cherry }
   1117   1.1    cherry 
   1118   1.1    cherry bool
   1119  1.20       tnn uvm_physseg_set_start_hint(uvm_physseg_t upm, u_long start_hint)
   1120   1.1    cherry {
   1121   1.2    cherry 	if (uvm_physseg_valid_p(upm) == false)
   1122   1.1    cherry 		return false;
   1123   1.1    cherry 
   1124   1.1    cherry 	HANDLE_TO_PHYSSEG_NODE(upm)->start_hint = start_hint;
   1125   1.1    cherry 	return true;
   1126   1.1    cherry }
   1127   1.1    cherry 
   1128   1.1    cherry void
   1129   1.1    cherry uvm_physseg_init_seg(uvm_physseg_t upm, struct vm_page *pgs)
   1130   1.1    cherry {
   1131   1.1    cherry 	psize_t i;
   1132   1.1    cherry 	psize_t n;
   1133   1.1    cherry 	paddr_t paddr;
   1134   1.1    cherry 	struct uvm_physseg *seg;
   1135  1.11        ad 	struct vm_page *pg;
   1136   1.1    cherry 
   1137  1.18  riastrad 	KASSERT(upm != UVM_PHYSSEG_TYPE_INVALID);
   1138  1.18  riastrad 	KASSERT(pgs != NULL);
   1139   1.1    cherry 
   1140   1.1    cherry 	seg = HANDLE_TO_PHYSSEG_NODE(upm);
   1141   1.1    cherry 	KASSERT(seg != NULL);
   1142   1.1    cherry 	KASSERT(seg->pgs == NULL);
   1143   1.1    cherry 
   1144   1.1    cherry 	n = seg->end - seg->start;
   1145   1.1    cherry 	seg->pgs = pgs;
   1146   1.1    cherry 
   1147   1.1    cherry 	/* init and free vm_pages (we've already zeroed them) */
   1148   1.1    cherry 	paddr = ctob(seg->start);
   1149   1.1    cherry 	for (i = 0 ; i < n ; i++, paddr += PAGE_SIZE) {
   1150  1.13        ad 		pg = &seg->pgs[i];
   1151  1.13        ad 		pg->phys_addr = paddr;
   1152   1.1    cherry #ifdef __HAVE_VM_PAGE_MD
   1153  1.13        ad 		VM_MDPAGE_INIT(pg);
   1154   1.1    cherry #endif
   1155   1.1    cherry 		if (atop(paddr) >= seg->avail_start &&
   1156   1.1    cherry 		    atop(paddr) < seg->avail_end) {
   1157   1.1    cherry 			uvmexp.npages++;
   1158   1.1    cherry 			/* add page to free pool */
   1159  1.13        ad 			uvm_page_set_freelist(pg,
   1160  1.13        ad 			    uvm_page_lookup_freelist(pg));
   1161  1.11        ad 			/* Disable LOCKDEBUG: too many and too early. */
   1162  1.11        ad 			mutex_init(&pg->interlock, MUTEX_NODEBUG, IPL_NONE);
   1163  1.11        ad 			uvm_pagefree(pg);
   1164   1.1    cherry 		}
   1165   1.1    cherry 	}
   1166   1.1    cherry }
   1167   1.1    cherry 
   1168   1.1    cherry void
   1169   1.1    cherry uvm_physseg_seg_chomp_slab(uvm_physseg_t upm, struct vm_page *pgs, size_t n)
   1170   1.1    cherry {
   1171   1.1    cherry 	struct uvm_physseg *seg = HANDLE_TO_PHYSSEG_NODE(upm);
   1172   1.1    cherry 
   1173   1.1    cherry 	/* max number of pre-boot unplug()s allowed */
   1174   1.1    cherry #define UVM_PHYSSEG_BOOT_UNPLUG_MAX VM_PHYSSEG_MAX
   1175   1.1    cherry 
   1176   1.1    cherry 	static char btslab_ex_storage[EXTENT_FIXED_STORAGE_SIZE(UVM_PHYSSEG_BOOT_UNPLUG_MAX)];
   1177   1.1    cherry 
   1178   1.1    cherry 	if (__predict_false(uvm.page_init_done == false)) {
   1179   1.1    cherry 		seg->ext = extent_create("Boot time slab", (u_long) pgs, (u_long) (pgs + n),
   1180   1.1    cherry 		    (void *)btslab_ex_storage, sizeof(btslab_ex_storage), 0);
   1181   1.1    cherry 	} else {
   1182   1.1    cherry 		seg->ext = extent_create("Hotplug slab", (u_long) pgs, (u_long) (pgs + n), NULL, 0, 0);
   1183   1.1    cherry 	}
   1184   1.1    cherry 
   1185   1.1    cherry 	KASSERT(seg->ext != NULL);
   1186   1.1    cherry 
   1187   1.1    cherry }
   1188   1.1    cherry 
   1189   1.1    cherry struct vm_page *
   1190   1.1    cherry uvm_physseg_seg_alloc_from_slab(uvm_physseg_t upm, size_t pages)
   1191   1.1    cherry {
   1192   1.1    cherry 	int err;
   1193   1.1    cherry 	struct uvm_physseg *seg;
   1194   1.1    cherry 	struct vm_page *pgs = NULL;
   1195   1.1    cherry 
   1196   1.9  christos 	KASSERT(pages > 0);
   1197   1.9  christos 
   1198   1.1    cherry 	seg = HANDLE_TO_PHYSSEG_NODE(upm);
   1199   1.1    cherry 
   1200   1.1    cherry 	if (__predict_false(seg->ext == NULL)) {
   1201   1.1    cherry 		/*
   1202   1.1    cherry 		 * This is a situation unique to boot time.
   1203   1.1    cherry 		 * It shouldn't happen at any point other than from
   1204   1.1    cherry 		 * the first uvm_page.c:uvm_page_init() call
   1205   1.1    cherry 		 * Since we're in a loop, we can get away with the
   1206   1.1    cherry 		 * below.
   1207   1.1    cherry 		 */
   1208   1.1    cherry 		KASSERT(uvm.page_init_done != true);
   1209   1.1    cherry 
   1210   1.9  christos 		uvm_physseg_t upmp = uvm_physseg_get_prev(upm);
   1211   1.9  christos 		KASSERT(upmp != UVM_PHYSSEG_TYPE_INVALID);
   1212   1.9  christos 
   1213   1.9  christos 		seg->ext = HANDLE_TO_PHYSSEG_NODE(upmp)->ext;
   1214   1.1    cherry 
   1215   1.1    cherry 		KASSERT(seg->ext != NULL);
   1216   1.1    cherry 	}
   1217   1.1    cherry 
   1218   1.1    cherry 	/* We allocate enough for this segment */
   1219   1.1    cherry 	err = extent_alloc(seg->ext, sizeof(*pgs) * pages, 1, 0, EX_BOUNDZERO, (u_long *)&pgs);
   1220   1.1    cherry 
   1221   1.1    cherry 	if (err != 0) {
   1222   1.1    cherry #ifdef DEBUG
   1223   1.1    cherry 		printf("%s: extent_alloc failed with error: %d \n",
   1224   1.1    cherry 		    __func__, err);
   1225   1.1    cherry #endif
   1226   1.1    cherry 	}
   1227   1.1    cherry 
   1228   1.1    cherry 	return pgs;
   1229   1.1    cherry }
   1230   1.1    cherry 
   1231   1.1    cherry /*
   1232   1.1    cherry  * uvm_page_physload: load physical memory into VM system
   1233   1.1    cherry  *
   1234   1.1    cherry  * => all args are PFs
   1235   1.1    cherry  * => all pages in start/end get vm_page structures
   1236   1.1    cherry  * => areas marked by avail_start/avail_end get added to the free page pool
   1237   1.1    cherry  * => we are limited to VM_PHYSSEG_MAX physical memory segments
   1238   1.1    cherry  */
   1239   1.1    cherry 
   1240   1.1    cherry uvm_physseg_t
   1241   1.1    cherry uvm_page_physload(paddr_t start, paddr_t end, paddr_t avail_start,
   1242   1.1    cherry     paddr_t avail_end, int free_list)
   1243   1.1    cherry {
   1244   1.1    cherry 	struct uvm_physseg *ps;
   1245   1.1    cherry 	uvm_physseg_t upm;
   1246   1.1    cherry 
   1247   1.1    cherry 	if (__predict_true(uvm.page_init_done == true))
   1248   1.1    cherry 		panic("%s: unload attempted after uvm_page_init()\n", __func__);
   1249   1.1    cherry 	if (uvmexp.pagesize == 0)
   1250   1.1    cherry 		panic("uvm_page_physload: page size not set!");
   1251   1.1    cherry 	if (free_list >= VM_NFREELIST || free_list < VM_FREELIST_DEFAULT)
   1252   1.1    cherry 		panic("uvm_page_physload: bad free list %d", free_list);
   1253   1.1    cherry 	if (start >= end)
   1254  1.17       rin 		panic("uvm_page_physload: start[%" PRIxPADDR "] >= end[%"
   1255  1.17       rin 		    PRIxPADDR "]", start, end);
   1256   1.1    cherry 
   1257   1.1    cherry 	if (uvm_physseg_plug(start, end - start, &upm) == false) {
   1258   1.1    cherry 		panic("uvm_physseg_plug() failed at boot.");
   1259   1.1    cherry 		/* NOTREACHED */
   1260   1.1    cherry 		return UVM_PHYSSEG_TYPE_INVALID; /* XXX: correct type */
   1261   1.1    cherry 	}
   1262   1.1    cherry 
   1263   1.1    cherry 	ps = HANDLE_TO_PHYSSEG_NODE(upm);
   1264   1.1    cherry 
   1265   1.1    cherry 	/* Legacy */
   1266   1.1    cherry 	ps->avail_start = avail_start;
   1267   1.1    cherry 	ps->avail_end = avail_end;
   1268   1.1    cherry 
   1269   1.1    cherry 	ps->free_list = free_list; /* XXX: */
   1270   1.1    cherry 
   1271   1.1    cherry 
   1272   1.1    cherry 	return upm;
   1273   1.1    cherry }
   1274   1.1    cherry 
   1275   1.1    cherry bool
   1276   1.1    cherry uvm_physseg_unplug(paddr_t pfn, size_t pages)
   1277   1.1    cherry {
   1278   1.1    cherry 	uvm_physseg_t upm;
   1279   1.8  riastrad 	paddr_t off = 0, start __diagused, end;
   1280   1.1    cherry 	struct uvm_physseg *seg;
   1281   1.1    cherry 
   1282   1.1    cherry 	upm = uvm_physseg_find(pfn, &off);
   1283   1.1    cherry 
   1284   1.2    cherry 	if (!uvm_physseg_valid_p(upm)) {
   1285   1.1    cherry 		printf("%s: Tried to unplug from unknown offset\n", __func__);
   1286   1.1    cherry 		return false;
   1287   1.1    cherry 	}
   1288   1.1    cherry 
   1289   1.1    cherry 	seg = HANDLE_TO_PHYSSEG_NODE(upm);
   1290   1.1    cherry 
   1291   1.1    cherry 	start = uvm_physseg_get_start(upm);
   1292   1.1    cherry 	end = uvm_physseg_get_end(upm);
   1293   1.1    cherry 
   1294   1.1    cherry 	if (end < (pfn + pages)) {
   1295   1.1    cherry 		printf("%s: Tried to unplug oversized span \n", __func__);
   1296   1.1    cherry 		return false;
   1297   1.1    cherry 	}
   1298   1.1    cherry 
   1299   1.1    cherry 	KASSERT(pfn == start + off); /* sanity */
   1300   1.1    cherry 
   1301   1.1    cherry 	if (__predict_true(uvm.page_init_done == true)) {
   1302   1.1    cherry 		/* XXX: KASSERT() that seg->pgs[] are not on any uvm lists */
   1303   1.1    cherry 		if (extent_free(seg->ext, (u_long)(seg->pgs + off), sizeof(struct vm_page) * pages, EX_MALLOCOK | EX_NOWAIT) != 0)
   1304   1.1    cherry 			return false;
   1305   1.1    cherry 	}
   1306   1.1    cherry 
   1307   1.1    cherry 	if (off == 0 && (pfn + pages) == end) {
   1308   1.1    cherry #if defined(UVM_HOTPLUG) /* rbtree implementation */
   1309   1.1    cherry 		int segcount = 0;
   1310   1.1    cherry 		struct uvm_physseg *current_ps;
   1311   1.1    cherry 		/* Complete segment */
   1312   1.1    cherry 		if (uvm_physseg_graph.nentries == 1)
   1313   1.1    cherry 			panic("%s: out of memory!", __func__);
   1314   1.1    cherry 
   1315   1.1    cherry 		if (__predict_true(uvm.page_init_done == true)) {
   1316   1.1    cherry 			RB_TREE_FOREACH(current_ps, &(uvm_physseg_graph.rb_tree)) {
   1317   1.1    cherry 				if (seg->ext == current_ps->ext)
   1318   1.1    cherry 					segcount++;
   1319   1.1    cherry 			}
   1320   1.1    cherry 			KASSERT(segcount > 0);
   1321   1.1    cherry 
   1322   1.1    cherry 			if (segcount == 1) {
   1323   1.1    cherry 				extent_destroy(seg->ext);
   1324   1.1    cherry 			}
   1325   1.1    cherry 
   1326   1.1    cherry 			/*
   1327   1.1    cherry 			 * We assume that the unplug will succeed from
   1328   1.1    cherry 			 *  this point onwards
   1329   1.1    cherry 			 */
   1330   1.1    cherry 			uvmexp.npages -= (int) pages;
   1331   1.1    cherry 		}
   1332   1.1    cherry 
   1333   1.1    cherry 		rb_tree_remove_node(&(uvm_physseg_graph.rb_tree), upm);
   1334   1.1    cherry 		memset(seg, 0, sizeof(struct uvm_physseg));
   1335   1.1    cherry 		uvm_physseg_free(seg, sizeof(struct uvm_physseg));
   1336   1.1    cherry 		uvm_physseg_graph.nentries--;
   1337   1.1    cherry #else /* UVM_HOTPLUG */
   1338   1.1    cherry 		int x;
   1339   1.1    cherry 		if (vm_nphysmem == 1)
   1340   1.1    cherry 			panic("uvm_page_physget: out of memory!");
   1341   1.1    cherry 		vm_nphysmem--;
   1342   1.1    cherry 		for (x = upm ; x < vm_nphysmem ; x++)
   1343   1.1    cherry 			/* structure copy */
   1344   1.1    cherry 			VM_PHYSMEM_PTR_SWAP(x, x + 1);
   1345   1.1    cherry #endif /* UVM_HOTPLUG */
   1346   1.1    cherry 		/* XXX: KASSERT() that seg->pgs[] are not on any uvm lists */
   1347   1.1    cherry 		return true;
   1348   1.1    cherry 	}
   1349   1.1    cherry 
   1350   1.1    cherry 	if (off > 0 &&
   1351   1.1    cherry 	    (pfn + pages) < end) {
   1352   1.1    cherry #if defined(UVM_HOTPLUG) /* rbtree implementation */
   1353   1.1    cherry 		/* middle chunk - need a new segment */
   1354   1.1    cherry 		struct uvm_physseg *ps, *current_ps;
   1355   1.1    cherry 		ps = uvm_physseg_alloc(sizeof (struct uvm_physseg));
   1356   1.1    cherry 		if (ps == NULL) {
   1357   1.1    cherry 			printf("%s: Unable to allocated new fragment vm_physseg \n",
   1358   1.1    cherry 			    __func__);
   1359   1.1    cherry 			return false;
   1360   1.1    cherry 		}
   1361   1.1    cherry 
   1362   1.1    cherry 		/* Remove middle chunk */
   1363   1.1    cherry 		if (__predict_true(uvm.page_init_done == true)) {
   1364   1.1    cherry 			KASSERT(seg->ext != NULL);
   1365   1.1    cherry 			ps->ext = seg->ext;
   1366   1.1    cherry 
   1367   1.1    cherry 			/* XXX: KASSERT() that seg->pgs[] are not on any uvm lists */
   1368   1.1    cherry 			/*
   1369   1.1    cherry 			 * We assume that the unplug will succeed from
   1370   1.1    cherry 			 *  this point onwards
   1371   1.1    cherry 			 */
   1372   1.1    cherry 			uvmexp.npages -= (int) pages;
   1373   1.1    cherry 		}
   1374   1.1    cherry 
   1375   1.1    cherry 		ps->start = pfn + pages;
   1376   1.1    cherry 		ps->avail_start = ps->start; /* XXX: Legacy */
   1377   1.1    cherry 
   1378   1.1    cherry 		ps->end = seg->end;
   1379   1.1    cherry 		ps->avail_end = ps->end; /* XXX: Legacy */
   1380   1.1    cherry 
   1381   1.1    cherry 		seg->end = pfn;
   1382   1.1    cherry 		seg->avail_end = seg->end; /* XXX: Legacy */
   1383   1.1    cherry 
   1384   1.1    cherry 
   1385   1.1    cherry 		/*
   1386   1.1    cherry 		 * The new pgs array points to the beginning of the
   1387   1.1    cherry 		 * tail fragment.
   1388   1.1    cherry 		 */
   1389   1.1    cherry 		if (__predict_true(uvm.page_init_done == true))
   1390   1.1    cherry 			ps->pgs = seg->pgs + off + pages;
   1391   1.1    cherry 
   1392   1.1    cherry 		current_ps = rb_tree_insert_node(&(uvm_physseg_graph.rb_tree), ps);
   1393   1.1    cherry 		if (current_ps != ps) {
   1394   1.1    cherry 			panic("uvm_page_physload: Duplicate address range detected!");
   1395   1.1    cherry 		}
   1396   1.1    cherry 		uvm_physseg_graph.nentries++;
   1397   1.1    cherry #else /* UVM_HOTPLUG */
   1398   1.1    cherry 		panic("%s: can't unplug() from the middle of a segment without"
   1399   1.7       uwe 		    " UVM_HOTPLUG\n",  __func__);
   1400   1.1    cherry 		/* NOTREACHED */
   1401   1.1    cherry #endif /* UVM_HOTPLUG */
   1402   1.1    cherry 		return true;
   1403   1.1    cherry 	}
   1404   1.1    cherry 
   1405   1.1    cherry 	if (off == 0 && (pfn + pages) < end) {
   1406   1.1    cherry 		/* Remove front chunk */
   1407   1.1    cherry 		if (__predict_true(uvm.page_init_done == true)) {
   1408   1.1    cherry 			/* XXX: KASSERT() that seg->pgs[] are not on any uvm lists */
   1409   1.1    cherry 			/*
   1410   1.1    cherry 			 * We assume that the unplug will succeed from
   1411   1.1    cherry 			 *  this point onwards
   1412   1.1    cherry 			 */
   1413   1.1    cherry 			uvmexp.npages -= (int) pages;
   1414   1.1    cherry 		}
   1415   1.1    cherry 
   1416   1.1    cherry 		/* Truncate */
   1417   1.1    cherry 		seg->start = pfn + pages;
   1418   1.1    cherry 		seg->avail_start = seg->start; /* XXX: Legacy */
   1419   1.1    cherry 
   1420   1.1    cherry 		/*
   1421   1.1    cherry 		 * Move the pgs array start to the beginning of the
   1422   1.1    cherry 		 * tail end.
   1423   1.1    cherry 		 */
   1424   1.1    cherry 		if (__predict_true(uvm.page_init_done == true))
   1425   1.1    cherry 			seg->pgs += pages;
   1426   1.1    cherry 
   1427   1.1    cherry 		return true;
   1428   1.1    cherry 	}
   1429   1.1    cherry 
   1430   1.1    cherry 	if (off > 0 && (pfn + pages) == end) {
   1431   1.1    cherry 		/* back chunk */
   1432   1.1    cherry 
   1433   1.1    cherry 
   1434   1.1    cherry 		/* Truncate! */
   1435   1.1    cherry 		seg->end = pfn;
   1436   1.1    cherry 		seg->avail_end = seg->end; /* XXX: Legacy */
   1437   1.1    cherry 
   1438   1.1    cherry 		uvmexp.npages -= (int) pages;
   1439   1.1    cherry 
   1440   1.1    cherry 		return true;
   1441   1.1    cherry 	}
   1442   1.1    cherry 
   1443   1.1    cherry 	printf("%s: Tried to unplug unknown range \n", __func__);
   1444   1.1    cherry 
   1445   1.1    cherry 	return false;
   1446   1.1    cherry }
   1447