Home | History | Annotate | Line # | Download | only in uvm
uvm_page.c revision 1.24
      1 /*	$NetBSD: uvm_page.c,v 1.24 1999/07/22 22:58:38 thorpej Exp $	*/
      2 
      3 /*
      4  * Copyright (c) 1997 Charles D. Cranor and Washington University.
      5  * Copyright (c) 1991, 1993, The Regents of the University of California.
      6  *
      7  * All rights reserved.
      8  *
      9  * This code is derived from software contributed to Berkeley by
     10  * The Mach Operating System project at Carnegie-Mellon University.
     11  *
     12  * Redistribution and use in source and binary forms, with or without
     13  * modification, are permitted provided that the following conditions
     14  * are met:
     15  * 1. Redistributions of source code must retain the above copyright
     16  *    notice, this list of conditions and the following disclaimer.
     17  * 2. Redistributions in binary form must reproduce the above copyright
     18  *    notice, this list of conditions and the following disclaimer in the
     19  *    documentation and/or other materials provided with the distribution.
     20  * 3. All advertising materials mentioning features or use of this software
     21  *    must display the following acknowledgement:
     22  *	This product includes software developed by Charles D. Cranor,
     23  *      Washington University, the University of California, Berkeley and
     24  *      its contributors.
     25  * 4. Neither the name of the University nor the names of its contributors
     26  *    may be used to endorse or promote products derived from this software
     27  *    without specific prior written permission.
     28  *
     29  * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND
     30  * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
     31  * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
     32  * ARE DISCLAIMED.  IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE
     33  * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
     34  * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
     35  * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
     36  * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
     37  * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
     38  * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
     39  * SUCH DAMAGE.
     40  *
     41  *	@(#)vm_page.c   8.3 (Berkeley) 3/21/94
     42  * from: Id: uvm_page.c,v 1.1.2.18 1998/02/06 05:24:42 chs Exp
     43  *
     44  *
     45  * Copyright (c) 1987, 1990 Carnegie-Mellon University.
     46  * All rights reserved.
     47  *
     48  * Permission to use, copy, modify and distribute this software and
     49  * its documentation is hereby granted, provided that both the copyright
     50  * notice and this permission notice appear in all copies of the
     51  * software, derivative works or modified versions, and any portions
     52  * thereof, and that both notices appear in supporting documentation.
     53  *
     54  * CARNEGIE MELLON ALLOWS FREE USE OF THIS SOFTWARE IN ITS "AS IS"
     55  * CONDITION.  CARNEGIE MELLON DISCLAIMS ANY LIABILITY OF ANY KIND
     56  * FOR ANY DAMAGES WHATSOEVER RESULTING FROM THE USE OF THIS SOFTWARE.
     57  *
     58  * Carnegie Mellon requests users of this software to return to
     59  *
     60  *  Software Distribution Coordinator  or  Software.Distribution (at) CS.CMU.EDU
     61  *  School of Computer Science
     62  *  Carnegie Mellon University
     63  *  Pittsburgh PA 15213-3890
     64  *
     65  * any improvements or extensions that they make and grant Carnegie the
     66  * rights to redistribute these changes.
     67  */
     68 
     69 /*
     70  * uvm_page.c: page ops.
     71  */
     72 
     73 #include "opt_pmap_new.h"
     74 
     75 #include <sys/param.h>
     76 #include <sys/systm.h>
     77 #include <sys/malloc.h>
     78 #include <sys/proc.h>
     79 
     80 #include <vm/vm.h>
     81 #include <vm/vm_page.h>
     82 #include <vm/vm_kern.h>
     83 
     84 #define UVM_PAGE                /* pull in uvm_page.h functions */
     85 #include <uvm/uvm.h>
     86 
     87 /*
     88  * global vars... XXXCDC: move to uvm. structure.
     89  */
     90 
     91 /*
     92  * physical memory config is stored in vm_physmem.
     93  */
     94 
     95 struct vm_physseg vm_physmem[VM_PHYSSEG_MAX];	/* XXXCDC: uvm.physmem */
     96 int vm_nphysseg = 0;				/* XXXCDC: uvm.nphysseg */
     97 
     98 /*
     99  * local variables
    100  */
    101 
    102 /*
    103  * these variables record the values returned by vm_page_bootstrap,
    104  * for debugging purposes.  The implementation of uvm_pageboot_alloc
    105  * and pmap_startup here also uses them internally.
    106  */
    107 
    108 static vaddr_t      virtual_space_start;
    109 static vaddr_t      virtual_space_end;
    110 
    111 /*
    112  * we use a hash table with only one bucket during bootup.  we will
    113  * later rehash (resize) the hash table once malloc() is ready.
    114  * we static allocate the bootstrap bucket below...
    115  */
    116 
    117 static struct pglist uvm_bootbucket;
    118 
    119 /*
    120  * local prototypes
    121  */
    122 
    123 static void uvm_pageinsert __P((struct vm_page *));
    124 
    125 
    126 /*
    127  * inline functions
    128  */
    129 
    130 /*
    131  * uvm_pageinsert: insert a page in the object and the hash table
    132  *
    133  * => caller must lock object
    134  * => caller must lock page queues
    135  * => call should have already set pg's object and offset pointers
    136  *    and bumped the version counter
    137  */
    138 
    139 __inline static void
    140 uvm_pageinsert(pg)
    141 	struct vm_page *pg;
    142 {
    143 	struct pglist *buck;
    144 	int s;
    145 
    146 #ifdef DIAGNOSTIC
    147 	if (pg->flags & PG_TABLED)
    148 		panic("uvm_pageinsert: already inserted");
    149 #endif
    150 
    151 	buck = &uvm.page_hash[uvm_pagehash(pg->uobject,pg->offset)];
    152 	s = splimp();
    153 	simple_lock(&uvm.hashlock);
    154 	TAILQ_INSERT_TAIL(buck, pg, hashq);	/* put in hash */
    155 	simple_unlock(&uvm.hashlock);
    156 	splx(s);
    157 
    158 	TAILQ_INSERT_TAIL(&pg->uobject->memq, pg, listq); /* put in object */
    159 	pg->flags |= PG_TABLED;
    160 	pg->uobject->uo_npages++;
    161 
    162 }
    163 
    164 /*
    165  * uvm_page_remove: remove page from object and hash
    166  *
    167  * => caller must lock object
    168  * => caller must lock page queues
    169  */
    170 
    171 void __inline
    172 uvm_pageremove(pg)
    173 	struct vm_page *pg;
    174 {
    175 	struct pglist *buck;
    176 	int s;
    177 
    178 #ifdef DIAGNOSTIC
    179 	if ((pg->flags & (PG_FAULTING)) != 0)
    180 		panic("uvm_pageremove: page is faulting");
    181 #endif
    182 
    183 	if ((pg->flags & PG_TABLED) == 0)
    184 		return;				/* XXX: log */
    185 
    186 	buck = &uvm.page_hash[uvm_pagehash(pg->uobject,pg->offset)];
    187 	s = splimp();
    188 	simple_lock(&uvm.hashlock);
    189 	TAILQ_REMOVE(buck, pg, hashq);
    190 	simple_unlock(&uvm.hashlock);
    191 	splx(s);
    192 
    193 	/* object should be locked */
    194 	TAILQ_REMOVE(&pg->uobject->memq, pg, listq);
    195 
    196 	pg->flags &= ~PG_TABLED;
    197 	pg->uobject->uo_npages--;
    198 	pg->uobject = NULL;
    199 	pg->version++;
    200 
    201 }
    202 
    203 /*
    204  * uvm_page_init: init the page system.   called from uvm_init().
    205  *
    206  * => we return the range of kernel virtual memory in kvm_startp/kvm_endp
    207  */
    208 
    209 void
    210 uvm_page_init(kvm_startp, kvm_endp)
    211 	vaddr_t *kvm_startp, *kvm_endp;
    212 {
    213 	int freepages, pagecount;
    214 	vm_page_t pagearray;
    215 	int lcv, n, i;
    216 	paddr_t paddr;
    217 
    218 
    219 	/*
    220 	 * step 1: init the page queues and page queue locks
    221 	 */
    222 	for (lcv = 0; lcv < VM_NFREELIST; lcv++)
    223 	  TAILQ_INIT(&uvm.page_free[lcv]);
    224 	TAILQ_INIT(&uvm.page_active);
    225 	TAILQ_INIT(&uvm.page_inactive_swp);
    226 	TAILQ_INIT(&uvm.page_inactive_obj);
    227 	simple_lock_init(&uvm.pageqlock);
    228 	simple_lock_init(&uvm.fpageqlock);
    229 
    230 	/*
    231 	 * step 2: init the <obj,offset> => <page> hash table. for now
    232 	 * we just have one bucket (the bootstrap bucket).   later on we
    233 	 * will malloc() new buckets as we dynamically resize the hash table.
    234 	 */
    235 
    236 	uvm.page_nhash = 1;			/* 1 bucket */
    237 	uvm.page_hashmask = 0;		/* mask for hash function */
    238 	uvm.page_hash = &uvm_bootbucket;	/* install bootstrap bucket */
    239 	TAILQ_INIT(uvm.page_hash);		/* init hash table */
    240 	simple_lock_init(&uvm.hashlock);	/* init hash table lock */
    241 
    242 	/*
    243 	 * step 3: allocate vm_page structures.
    244 	 */
    245 
    246 	/*
    247 	 * sanity check:
    248 	 * before calling this function the MD code is expected to register
    249 	 * some free RAM with the uvm_page_physload() function.   our job
    250 	 * now is to allocate vm_page structures for this memory.
    251 	 */
    252 
    253 	if (vm_nphysseg == 0)
    254 		panic("vm_page_bootstrap: no memory pre-allocated");
    255 
    256 	/*
    257 	 * first calculate the number of free pages...
    258 	 *
    259 	 * note that we use start/end rather than avail_start/avail_end.
    260 	 * this allows us to allocate extra vm_page structures in case we
    261 	 * want to return some memory to the pool after booting.
    262 	 */
    263 
    264 	freepages = 0;
    265 	for (lcv = 0 ; lcv < vm_nphysseg ; lcv++)
    266 		freepages += (vm_physmem[lcv].end - vm_physmem[lcv].start);
    267 
    268 	/*
    269 	 * we now know we have (PAGE_SIZE * freepages) bytes of memory we can
    270 	 * use.   for each page of memory we use we need a vm_page structure.
    271 	 * thus, the total number of pages we can use is the total size of
    272 	 * the memory divided by the PAGE_SIZE plus the size of the vm_page
    273 	 * structure.   we add one to freepages as a fudge factor to avoid
    274 	 * truncation errors (since we can only allocate in terms of whole
    275 	 * pages).
    276 	 */
    277 
    278 	pagecount = ((freepages + 1) << PAGE_SHIFT) /
    279 	    (PAGE_SIZE + sizeof(struct vm_page));
    280 	pagearray = (vm_page_t)uvm_pageboot_alloc(pagecount *
    281 	    sizeof(struct vm_page));
    282 	memset(pagearray, 0, pagecount * sizeof(struct vm_page));
    283 
    284 	/*
    285 	 * step 4: init the vm_page structures and put them in the correct
    286 	 * place...
    287 	 */
    288 
    289 	for (lcv = 0 ; lcv < vm_nphysseg ; lcv++) {
    290 
    291 		n = vm_physmem[lcv].end - vm_physmem[lcv].start;
    292 		if (n > pagecount) {
    293 			printf("uvm_page_init: lost %d page(s) in init\n",
    294 			    n - pagecount);
    295 			panic("uvm_page_init");  /* XXXCDC: shouldn't happen? */
    296 			/* n = pagecount; */
    297 		}
    298 		/* set up page array pointers */
    299 		vm_physmem[lcv].pgs = pagearray;
    300 		pagearray += n;
    301 		pagecount -= n;
    302 		vm_physmem[lcv].lastpg = vm_physmem[lcv].pgs + (n - 1);
    303 
    304 		/* init and free vm_pages (we've already zeroed them) */
    305 		paddr = ptoa(vm_physmem[lcv].start);
    306 		for (i = 0 ; i < n ; i++, paddr += PAGE_SIZE) {
    307 			vm_physmem[lcv].pgs[i].phys_addr = paddr;
    308 			if (atop(paddr) >= vm_physmem[lcv].avail_start &&
    309 			    atop(paddr) <= vm_physmem[lcv].avail_end) {
    310 				uvmexp.npages++;
    311 				/* add page to free pool */
    312 				uvm_pagefree(&vm_physmem[lcv].pgs[i]);
    313 			}
    314 		}
    315 	}
    316 	/*
    317 	 * step 5: pass up the values of virtual_space_start and
    318 	 * virtual_space_end (obtained by uvm_pageboot_alloc) to the upper
    319 	 * layers of the VM.
    320 	 */
    321 
    322 	*kvm_startp = round_page(virtual_space_start);
    323 	*kvm_endp = trunc_page(virtual_space_end);
    324 
    325 	/*
    326 	 * step 6: init pagedaemon lock
    327 	 */
    328 
    329 	simple_lock_init(&uvm.pagedaemon_lock);
    330 
    331 	/*
    332 	 * step 7: init reserve thresholds
    333 	 * XXXCDC - values may need adjusting
    334 	 */
    335 	uvmexp.reserve_pagedaemon = 1;
    336 	uvmexp.reserve_kernel = 5;
    337 
    338 	/*
    339 	 * done!
    340 	 */
    341 
    342 }
    343 
    344 /*
    345  * uvm_setpagesize: set the page size
    346  *
    347  * => sets page_shift and page_mask from uvmexp.pagesize.
    348  * => XXXCDC: move global vars.
    349  */
    350 
    351 void
    352 uvm_setpagesize()
    353 {
    354 	if (uvmexp.pagesize == 0)
    355 		uvmexp.pagesize = DEFAULT_PAGE_SIZE;
    356 	uvmexp.pagemask = uvmexp.pagesize - 1;
    357 	if ((uvmexp.pagemask & uvmexp.pagesize) != 0)
    358 		panic("uvm_setpagesize: page size not a power of two");
    359 	for (uvmexp.pageshift = 0; ; uvmexp.pageshift++)
    360 		if ((1 << uvmexp.pageshift) == uvmexp.pagesize)
    361 			break;
    362 }
    363 
    364 /*
    365  * uvm_pageboot_alloc: steal memory from physmem for bootstrapping
    366  */
    367 
    368 vaddr_t
    369 uvm_pageboot_alloc(size)
    370 	vsize_t size;
    371 {
    372 #if defined(PMAP_STEAL_MEMORY)
    373 	vaddr_t addr;
    374 
    375 	/*
    376 	 * defer bootstrap allocation to MD code (it may want to allocate
    377 	 * from a direct-mapped segment).  pmap_steal_memory should round
    378 	 * off virtual_space_start/virtual_space_end.
    379 	 */
    380 
    381 	addr = pmap_steal_memory(size, &virtual_space_start,
    382 	    &virtual_space_end);
    383 
    384 	return(addr);
    385 
    386 #else /* !PMAP_STEAL_MEMORY */
    387 
    388 	static boolean_t initialized = FALSE;
    389 	vaddr_t addr, vaddr;
    390 	paddr_t paddr;
    391 
    392 	/* round to page size */
    393 	size = round_page(size);
    394 
    395 	/*
    396 	 * on first call to this function, initialize ourselves.
    397 	 */
    398 	if (initialized == FALSE) {
    399 		pmap_virtual_space(&virtual_space_start, &virtual_space_end);
    400 
    401 		/* round it the way we like it */
    402 		virtual_space_start = round_page(virtual_space_start);
    403 		virtual_space_end = trunc_page(virtual_space_end);
    404 
    405 		initialized = TRUE;
    406 	}
    407 
    408 	/*
    409 	 * allocate virtual memory for this request
    410 	 */
    411 	if (virtual_space_start == virtual_space_end ||
    412 	    (virtual_space_end - virtual_space_start) < size)
    413 		panic("uvm_pageboot_alloc: out of virtual space");
    414 
    415 	addr = virtual_space_start;
    416 
    417 #ifdef PMAP_GROWKERNEL
    418 	/*
    419 	 * If the kernel pmap can't map the requested space,
    420 	 * then allocate more resources for it.
    421 	 */
    422 	if (uvm_maxkaddr < (addr + size)) {
    423 		uvm_maxkaddr = pmap_growkernel(addr + size);
    424 		if (uvm_maxkaddr < (addr + size))
    425 			panic("uvm_pageboot_alloc: pmap_growkernel() failed");
    426 	}
    427 #endif
    428 
    429 	virtual_space_start += size;
    430 
    431 	/*
    432 	 * allocate and mapin physical pages to back new virtual pages
    433 	 */
    434 
    435 	for (vaddr = round_page(addr) ; vaddr < addr + size ;
    436 	    vaddr += PAGE_SIZE) {
    437 
    438 		if (!uvm_page_physget(&paddr))
    439 			panic("uvm_pageboot_alloc: out of memory");
    440 
    441 		/* XXX: should be wired, but some pmaps don't like that ... */
    442 #if defined(PMAP_NEW)
    443 		/*
    444 		 * Note this memory is no longer managed, so using
    445 		 * pmap_kenter is safe.
    446 		 */
    447 		pmap_kenter_pa(vaddr, paddr, VM_PROT_READ|VM_PROT_WRITE);
    448 #else
    449 		pmap_enter(pmap_kernel(), vaddr, paddr,
    450 		    VM_PROT_READ|VM_PROT_WRITE, FALSE,
    451 		    VM_PROT_READ|VM_PROT_WRITE);
    452 #endif
    453 
    454 	}
    455 	return(addr);
    456 #endif	/* PMAP_STEAL_MEMORY */
    457 }
    458 
    459 #if !defined(PMAP_STEAL_MEMORY)
    460 /*
    461  * uvm_page_physget: "steal" one page from the vm_physmem structure.
    462  *
    463  * => attempt to allocate it off the end of a segment in which the "avail"
    464  *    values match the start/end values.   if we can't do that, then we
    465  *    will advance both values (making them equal, and removing some
    466  *    vm_page structures from the non-avail area).
    467  * => return false if out of memory.
    468  */
    469 
    470 boolean_t
    471 uvm_page_physget(paddrp)
    472 	paddr_t *paddrp;
    473 {
    474 	int lcv, x;
    475 
    476 	/* pass 1: try allocating from a matching end */
    477 #if (VM_PHYSSEG_STRAT == VM_PSTRAT_BIGFIRST)
    478 	for (lcv = vm_nphysseg - 1 ; lcv >= 0 ; lcv--)
    479 #else
    480 	for (lcv = 0 ; lcv < vm_nphysseg ; lcv++)
    481 #endif
    482 	{
    483 
    484 		if (vm_physmem[lcv].pgs)
    485 			panic("vm_page_physget: called _after_ bootstrap");
    486 
    487 		/* try from front */
    488 		if (vm_physmem[lcv].avail_start == vm_physmem[lcv].start &&
    489 		    vm_physmem[lcv].avail_start < vm_physmem[lcv].avail_end) {
    490 			*paddrp = ptoa(vm_physmem[lcv].avail_start);
    491 			vm_physmem[lcv].avail_start++;
    492 			vm_physmem[lcv].start++;
    493 			/* nothing left?   nuke it */
    494 			if (vm_physmem[lcv].avail_start ==
    495 			    vm_physmem[lcv].end) {
    496 				if (vm_nphysseg == 1)
    497 				    panic("vm_page_physget: out of memory!");
    498 				vm_nphysseg--;
    499 				for (x = lcv ; x < vm_nphysseg ; x++)
    500 					/* structure copy */
    501 					vm_physmem[x] = vm_physmem[x+1];
    502 			}
    503 			return (TRUE);
    504 		}
    505 
    506 		/* try from rear */
    507 		if (vm_physmem[lcv].avail_end == vm_physmem[lcv].end &&
    508 		    vm_physmem[lcv].avail_start < vm_physmem[lcv].avail_end) {
    509 			*paddrp = ptoa(vm_physmem[lcv].avail_end - 1);
    510 			vm_physmem[lcv].avail_end--;
    511 			vm_physmem[lcv].end--;
    512 			/* nothing left?   nuke it */
    513 			if (vm_physmem[lcv].avail_end ==
    514 			    vm_physmem[lcv].start) {
    515 				if (vm_nphysseg == 1)
    516 				    panic("vm_page_physget: out of memory!");
    517 				vm_nphysseg--;
    518 				for (x = lcv ; x < vm_nphysseg ; x++)
    519 					/* structure copy */
    520 					vm_physmem[x] = vm_physmem[x+1];
    521 			}
    522 			return (TRUE);
    523 		}
    524 	}
    525 
    526 	/* pass2: forget about matching ends, just allocate something */
    527 #if (VM_PHYSSEG_STRAT == VM_PSTRAT_BIGFIRST)
    528 	for (lcv = vm_nphysseg - 1 ; lcv >= 0 ; lcv--)
    529 #else
    530 	for (lcv = 0 ; lcv < vm_nphysseg ; lcv++)
    531 #endif
    532 	{
    533 
    534 		/* any room in this bank? */
    535 		if (vm_physmem[lcv].avail_start >= vm_physmem[lcv].avail_end)
    536 			continue;  /* nope */
    537 
    538 		*paddrp = ptoa(vm_physmem[lcv].avail_start);
    539 		vm_physmem[lcv].avail_start++;
    540 		/* truncate! */
    541 		vm_physmem[lcv].start = vm_physmem[lcv].avail_start;
    542 
    543 		/* nothing left?   nuke it */
    544 		if (vm_physmem[lcv].avail_start == vm_physmem[lcv].end) {
    545 			if (vm_nphysseg == 1)
    546 				panic("vm_page_physget: out of memory!");
    547 			vm_nphysseg--;
    548 			for (x = lcv ; x < vm_nphysseg ; x++)
    549 				/* structure copy */
    550 				vm_physmem[x] = vm_physmem[x+1];
    551 		}
    552 		return (TRUE);
    553 	}
    554 
    555 	return (FALSE);        /* whoops! */
    556 }
    557 #endif /* PMAP_STEAL_MEMORY */
    558 
    559 /*
    560  * uvm_page_physload: load physical memory into VM system
    561  *
    562  * => all args are PFs
    563  * => all pages in start/end get vm_page structures
    564  * => areas marked by avail_start/avail_end get added to the free page pool
    565  * => we are limited to VM_PHYSSEG_MAX physical memory segments
    566  */
    567 
    568 void
    569 uvm_page_physload(start, end, avail_start, avail_end, free_list)
    570 	vaddr_t start, end, avail_start, avail_end;
    571 	int free_list;
    572 {
    573 	int preload, lcv;
    574 	psize_t npages;
    575 	struct vm_page *pgs;
    576 	struct vm_physseg *ps;
    577 
    578 	if (uvmexp.pagesize == 0)
    579 		panic("vm_page_physload: page size not set!");
    580 
    581 	if (free_list >= VM_NFREELIST || free_list < VM_FREELIST_DEFAULT)
    582 		panic("uvm_page_physload: bad free list %d\n", free_list);
    583 
    584 	/*
    585 	 * do we have room?
    586 	 */
    587 	if (vm_nphysseg == VM_PHYSSEG_MAX) {
    588 		printf("vm_page_physload: unable to load physical memory "
    589 		    "segment\n");
    590 		printf("\t%d segments allocated, ignoring 0x%lx -> 0x%lx\n",
    591 		    VM_PHYSSEG_MAX, start, end);
    592 		return;
    593 	}
    594 
    595 	/*
    596 	 * check to see if this is a "preload" (i.e. uvm_mem_init hasn't been
    597 	 * called yet, so malloc is not available).
    598 	 */
    599 	for (lcv = 0 ; lcv < vm_nphysseg ; lcv++) {
    600 		if (vm_physmem[lcv].pgs)
    601 			break;
    602 	}
    603 	preload = (lcv == vm_nphysseg);
    604 
    605 	/*
    606 	 * if VM is already running, attempt to malloc() vm_page structures
    607 	 */
    608 	if (!preload) {
    609 #if defined(VM_PHYSSEG_NOADD)
    610 		panic("vm_page_physload: tried to add RAM after vm_mem_init");
    611 #else
    612 		/* XXXCDC: need some sort of lockout for this case */
    613 		paddr_t paddr;
    614 		npages = end - start;  /* # of pages */
    615 		MALLOC(pgs, struct vm_page *, sizeof(struct vm_page) * npages,
    616 					 M_VMPAGE, M_NOWAIT);
    617 		if (pgs == NULL) {
    618 			printf("vm_page_physload: can not malloc vm_page "
    619 			    "structs for segment\n");
    620 			printf("\tignoring 0x%lx -> 0x%lx\n", start, end);
    621 			return;
    622 		}
    623 		/* zero data, init phys_addr and free_list, and free pages */
    624 		memset(pgs, 0, sizeof(struct vm_page) * npages);
    625 		for (lcv = 0, paddr = ptoa(start) ;
    626 				 lcv < npages ; lcv++, paddr += PAGE_SIZE) {
    627 			pgs[lcv].phys_addr = paddr;
    628 			pgs[lcv].free_list = free_list;
    629 			if (atop(paddr) >= avail_start &&
    630 			    atop(paddr) <= avail_end)
    631 				uvm_pagefree(&pgs[lcv]);
    632 		}
    633 		/* XXXCDC: incomplete: need to update uvmexp.free, what else? */
    634 		/* XXXCDC: need hook to tell pmap to rebuild pv_list, etc... */
    635 #endif
    636 	} else {
    637 
    638 		/* gcc complains if these don't get init'd */
    639 		pgs = NULL;
    640 		npages = 0;
    641 
    642 	}
    643 
    644 	/*
    645 	 * now insert us in the proper place in vm_physmem[]
    646 	 */
    647 
    648 #if (VM_PHYSSEG_STRAT == VM_PSTRAT_RANDOM)
    649 
    650 	/* random: put it at the end (easy!) */
    651 	ps = &vm_physmem[vm_nphysseg];
    652 
    653 #elif (VM_PHYSSEG_STRAT == VM_PSTRAT_BSEARCH)
    654 
    655 	{
    656 		int x;
    657 		/* sort by address for binary search */
    658 		for (lcv = 0 ; lcv < vm_nphysseg ; lcv++)
    659 			if (start < vm_physmem[lcv].start)
    660 				break;
    661 		ps = &vm_physmem[lcv];
    662 		/* move back other entries, if necessary ... */
    663 		for (x = vm_nphysseg ; x > lcv ; x--)
    664 			/* structure copy */
    665 			vm_physmem[x] = vm_physmem[x - 1];
    666 	}
    667 
    668 #elif (VM_PHYSSEG_STRAT == VM_PSTRAT_BIGFIRST)
    669 
    670 	{
    671 		int x;
    672 		/* sort by largest segment first */
    673 		for (lcv = 0 ; lcv < vm_nphysseg ; lcv++)
    674 			if ((end - start) >
    675 			    (vm_physmem[lcv].end - vm_physmem[lcv].start))
    676 				break;
    677 		ps = &vm_physmem[lcv];
    678 		/* move back other entries, if necessary ... */
    679 		for (x = vm_nphysseg ; x > lcv ; x--)
    680 			/* structure copy */
    681 			vm_physmem[x] = vm_physmem[x - 1];
    682 	}
    683 
    684 #else
    685 
    686 	panic("vm_page_physload: unknown physseg strategy selected!");
    687 
    688 #endif
    689 
    690 	ps->start = start;
    691 	ps->end = end;
    692 	ps->avail_start = avail_start;
    693 	ps->avail_end = avail_end;
    694 	if (preload) {
    695 		ps->pgs = NULL;
    696 	} else {
    697 		ps->pgs = pgs;
    698 		ps->lastpg = pgs + npages - 1;
    699 	}
    700 	ps->free_list = free_list;
    701 	vm_nphysseg++;
    702 
    703 	/*
    704 	 * done!
    705 	 */
    706 
    707 	if (!preload)
    708 		uvm_page_rehash();
    709 
    710 	return;
    711 }
    712 
    713 /*
    714  * uvm_page_rehash: reallocate hash table based on number of free pages.
    715  */
    716 
    717 void
    718 uvm_page_rehash()
    719 {
    720 	int freepages, lcv, bucketcount, s, oldcount;
    721 	struct pglist *newbuckets, *oldbuckets;
    722 	struct vm_page *pg;
    723 
    724 	/*
    725 	 * compute number of pages that can go in the free pool
    726 	 */
    727 
    728 	freepages = 0;
    729 	for (lcv = 0 ; lcv < vm_nphysseg ; lcv++)
    730 		freepages +=
    731 		    (vm_physmem[lcv].avail_end - vm_physmem[lcv].avail_start);
    732 
    733 	/*
    734 	 * compute number of buckets needed for this number of pages
    735 	 */
    736 
    737 	bucketcount = 1;
    738 	while (bucketcount < freepages)
    739 		bucketcount = bucketcount * 2;
    740 
    741 	/*
    742 	 * malloc new buckets
    743 	 */
    744 
    745 	MALLOC(newbuckets, struct pglist *, sizeof(struct pglist) * bucketcount,
    746 					 M_VMPBUCKET, M_NOWAIT);
    747 	if (newbuckets == NULL) {
    748 		printf("vm_page_physrehash: WARNING: could not grow page "
    749 		    "hash table\n");
    750 		return;
    751 	}
    752 	for (lcv = 0 ; lcv < bucketcount ; lcv++)
    753 		TAILQ_INIT(&newbuckets[lcv]);
    754 
    755 	/*
    756 	 * now replace the old buckets with the new ones and rehash everything
    757 	 */
    758 
    759 	s = splimp();
    760 	simple_lock(&uvm.hashlock);
    761 	/* swap old for new ... */
    762 	oldbuckets = uvm.page_hash;
    763 	oldcount = uvm.page_nhash;
    764 	uvm.page_hash = newbuckets;
    765 	uvm.page_nhash = bucketcount;
    766 	uvm.page_hashmask = bucketcount - 1;  /* power of 2 */
    767 
    768 	/* ... and rehash */
    769 	for (lcv = 0 ; lcv < oldcount ; lcv++) {
    770 		while ((pg = oldbuckets[lcv].tqh_first) != NULL) {
    771 			TAILQ_REMOVE(&oldbuckets[lcv], pg, hashq);
    772 			TAILQ_INSERT_TAIL(
    773 			  &uvm.page_hash[uvm_pagehash(pg->uobject, pg->offset)],
    774 			  pg, hashq);
    775 		}
    776 	}
    777 	simple_unlock(&uvm.hashlock);
    778 	splx(s);
    779 
    780 	/*
    781 	 * free old bucket array if we malloc'd it previously
    782 	 */
    783 
    784 	if (oldbuckets != &uvm_bootbucket)
    785 		FREE(oldbuckets, M_VMPBUCKET);
    786 
    787 	/*
    788 	 * done
    789 	 */
    790 	return;
    791 }
    792 
    793 
    794 #if 1 /* XXXCDC: TMP TMP TMP DEBUG DEBUG DEBUG */
    795 
    796 void uvm_page_physdump __P((void)); /* SHUT UP GCC */
    797 
    798 /* call from DDB */
    799 void
    800 uvm_page_physdump()
    801 {
    802 	int lcv;
    803 
    804 	printf("rehash: physical memory config [segs=%d of %d]:\n",
    805 				 vm_nphysseg, VM_PHYSSEG_MAX);
    806 	for (lcv = 0 ; lcv < vm_nphysseg ; lcv++)
    807 		printf("0x%lx->0x%lx [0x%lx->0x%lx]\n", vm_physmem[lcv].start,
    808 		    vm_physmem[lcv].end, vm_physmem[lcv].avail_start,
    809 		    vm_physmem[lcv].avail_end);
    810 	printf("STRATEGY = ");
    811 	switch (VM_PHYSSEG_STRAT) {
    812 	case VM_PSTRAT_RANDOM: printf("RANDOM\n"); break;
    813 	case VM_PSTRAT_BSEARCH: printf("BSEARCH\n"); break;
    814 	case VM_PSTRAT_BIGFIRST: printf("BIGFIRST\n"); break;
    815 	default: printf("<<UNKNOWN>>!!!!\n");
    816 	}
    817 	printf("number of buckets = %d\n", uvm.page_nhash);
    818 }
    819 #endif
    820 
    821 /*
    822  * uvm_pagealloc_strat: allocate vm_page from a particular free list.
    823  *
    824  * => return null if no pages free
    825  * => wake up pagedaemon if number of free pages drops below low water mark
    826  * => if obj != NULL, obj must be locked (to put in hash)
    827  * => if anon != NULL, anon must be locked (to put in anon)
    828  * => only one of obj or anon can be non-null
    829  * => caller must activate/deactivate page if it is not wired.
    830  * => free_list is ignored if strat == UVM_PGA_STRAT_NORMAL.
    831  */
    832 
    833 struct vm_page *
    834 uvm_pagealloc_strat(obj, off, anon, flags, strat, free_list)
    835 	struct uvm_object *obj;
    836 	vaddr_t off;
    837 	int flags;
    838 	struct vm_anon *anon;
    839 	int strat, free_list;
    840 {
    841 	int lcv, s;
    842 	struct vm_page *pg;
    843 	struct pglist *freeq;
    844 	boolean_t use_reserve;
    845 
    846 #ifdef DIAGNOSTIC
    847 	/* sanity check */
    848 	if (obj && anon)
    849 		panic("uvm_pagealloc: obj and anon != NULL");
    850 #endif
    851 
    852 	s = uvm_lock_fpageq();		/* lock free page queue */
    853 
    854 	/*
    855 	 * check to see if we need to generate some free pages waking
    856 	 * the pagedaemon.
    857 	 */
    858 
    859 	if (uvmexp.free < uvmexp.freemin || (uvmexp.free < uvmexp.freetarg &&
    860 	    uvmexp.inactive < uvmexp.inactarg))
    861 		wakeup(&uvm.pagedaemon);
    862 
    863 	/*
    864 	 * fail if any of these conditions is true:
    865 	 * [1]  there really are no free pages, or
    866 	 * [2]  only kernel "reserved" pages remain and
    867 	 *        the page isn't being allocated to a kernel object.
    868 	 * [3]  only pagedaemon "reserved" pages remain and
    869 	 *        the requestor isn't the pagedaemon.
    870 	 */
    871 
    872 	use_reserve = (flags & UVM_PGA_USERESERVE) ||
    873 		(obj && UVM_OBJ_IS_KERN_OBJECT(obj));
    874 	if ((uvmexp.free <= uvmexp.reserve_kernel && !use_reserve) ||
    875 	    (uvmexp.free <= uvmexp.reserve_pagedaemon &&
    876 	     !(use_reserve && curproc == uvm.pagedaemon_proc)))
    877 		goto fail;
    878 
    879  again:
    880 	switch (strat) {
    881 	case UVM_PGA_STRAT_NORMAL:
    882 		/* Check all freelists in descending priority order. */
    883 		for (lcv = 0; lcv < VM_NFREELIST; lcv++) {
    884 			freeq = &uvm.page_free[lcv];
    885 			if ((pg = freeq->tqh_first) != NULL)
    886 				goto gotit;
    887 		}
    888 
    889 		/* No pages free! */
    890 		goto fail;
    891 
    892 	case UVM_PGA_STRAT_ONLY:
    893 	case UVM_PGA_STRAT_FALLBACK:
    894 		/* Attempt to allocate from the specified free list. */
    895 #ifdef DIAGNOSTIC
    896 		if (free_list >= VM_NFREELIST || free_list < 0)
    897 			panic("uvm_pagealloc_strat: bad free list %d",
    898 			    free_list);
    899 #endif
    900 		freeq = &uvm.page_free[free_list];
    901 		if ((pg = freeq->tqh_first) != NULL)
    902 			goto gotit;
    903 
    904 		/* Fall back, if possible. */
    905 		if (strat == UVM_PGA_STRAT_FALLBACK) {
    906 			strat = UVM_PGA_STRAT_NORMAL;
    907 			goto again;
    908 		}
    909 
    910 		/* No pages free! */
    911 		goto fail;
    912 
    913 	default:
    914 		panic("uvm_pagealloc_strat: bad strat %d", strat);
    915 		/* NOTREACHED */
    916 	}
    917 
    918  gotit:
    919 	TAILQ_REMOVE(freeq, pg, pageq);
    920 	uvmexp.free--;
    921 
    922 	uvm_unlock_fpageq(s);		/* unlock free page queue */
    923 
    924 	pg->offset = off;
    925 	pg->uobject = obj;
    926 	pg->uanon = anon;
    927 	pg->flags = PG_BUSY|PG_CLEAN|PG_FAKE;
    928 	pg->version++;
    929 	pg->wire_count = 0;
    930 	pg->loan_count = 0;
    931 	if (anon) {
    932 		anon->u.an_page = pg;
    933 		pg->pqflags = PQ_ANON;
    934 	} else {
    935 		if (obj)
    936 			uvm_pageinsert(pg);
    937 		pg->pqflags = 0;
    938 	}
    939 #if defined(UVM_PAGE_TRKOWN)
    940 	pg->owner_tag = NULL;
    941 #endif
    942 	UVM_PAGE_OWN(pg, "new alloc");
    943 
    944 	return(pg);
    945 
    946  fail:
    947 	uvm_unlock_fpageq(s);
    948 	return (NULL);
    949 }
    950 
    951 /*
    952  * uvm_pagerealloc: reallocate a page from one object to another
    953  *
    954  * => both objects must be locked
    955  */
    956 
    957 void
    958 uvm_pagerealloc(pg, newobj, newoff)
    959 	struct vm_page *pg;
    960 	struct uvm_object *newobj;
    961 	vaddr_t newoff;
    962 {
    963 	/*
    964 	 * remove it from the old object
    965 	 */
    966 
    967 	if (pg->uobject) {
    968 		uvm_pageremove(pg);
    969 	}
    970 
    971 	/*
    972 	 * put it in the new object
    973 	 */
    974 
    975 	if (newobj) {
    976 		pg->uobject = newobj;
    977 		pg->offset = newoff;
    978 		pg->version++;
    979 		uvm_pageinsert(pg);
    980 	}
    981 
    982 	return;
    983 }
    984 
    985 
    986 /*
    987  * uvm_pagefree: free page
    988  *
    989  * => erase page's identity (i.e. remove from hash/object)
    990  * => put page on free list
    991  * => caller must lock owning object (either anon or uvm_object)
    992  * => caller must lock page queues
    993  * => assumes all valid mappings of pg are gone
    994  */
    995 
    996 void uvm_pagefree(pg)
    997 
    998 struct vm_page *pg;
    999 
   1000 {
   1001 	int s;
   1002 	int saved_loan_count = pg->loan_count;
   1003 
   1004 	/*
   1005 	 * if the page was an object page (and thus "TABLED"), remove it
   1006 	 * from the object.
   1007 	 */
   1008 
   1009 	if (pg->flags & PG_TABLED) {
   1010 
   1011 		/*
   1012 		 * if the object page is on loan we are going to drop ownership.
   1013 		 * it is possible that an anon will take over as owner for this
   1014 		 * page later on.   the anon will want a !PG_CLEAN page so that
   1015 		 * it knows it needs to allocate swap if it wants to page the
   1016 		 * page out.
   1017 		 */
   1018 
   1019 		if (saved_loan_count)
   1020 			pg->flags &= ~PG_CLEAN;	/* in case an anon takes over */
   1021 
   1022 		uvm_pageremove(pg);
   1023 
   1024 		/*
   1025 		 * if our page was on loan, then we just lost control over it
   1026 		 * (in fact, if it was loaned to an anon, the anon may have
   1027 		 * already taken over ownership of the page by now and thus
   1028 		 * changed the loan_count [e.g. in uvmfault_anonget()]) we just
   1029 		 * return (when the last loan is dropped, then the page can be
   1030 		 * freed by whatever was holding the last loan).
   1031 		 */
   1032 		if (saved_loan_count)
   1033 			return;
   1034 
   1035 	} else if (saved_loan_count && (pg->pqflags & PQ_ANON)) {
   1036 
   1037 		/*
   1038 		 * if our page is owned by an anon and is loaned out to the
   1039 		 * kernel then we just want to drop ownership and return.
   1040 		 * the kernel must free the page when all its loans clear ...
   1041 		 * note that the kernel can't change the loan status of our
   1042 		 * page as long as we are holding PQ lock.
   1043 		 */
   1044 		pg->pqflags &= ~PQ_ANON;
   1045 		pg->uanon = NULL;
   1046 		return;
   1047 	}
   1048 
   1049 #ifdef DIAGNOSTIC
   1050 	if (saved_loan_count) {
   1051 		printf("uvm_pagefree: warning: freeing page with a loan "
   1052 		    "count of %d\n", saved_loan_count);
   1053 		panic("uvm_pagefree: loan count");
   1054 	}
   1055 #endif
   1056 
   1057 
   1058 	/*
   1059 	 * now remove the page from the queues
   1060 	 */
   1061 
   1062 	if (pg->pqflags & PQ_ACTIVE) {
   1063 		TAILQ_REMOVE(&uvm.page_active, pg, pageq);
   1064 		pg->pqflags &= ~PQ_ACTIVE;
   1065 		uvmexp.active--;
   1066 	}
   1067 	if (pg->pqflags & PQ_INACTIVE) {
   1068 		if (pg->pqflags & PQ_SWAPBACKED)
   1069 			TAILQ_REMOVE(&uvm.page_inactive_swp, pg, pageq);
   1070 		else
   1071 			TAILQ_REMOVE(&uvm.page_inactive_obj, pg, pageq);
   1072 		pg->pqflags &= ~PQ_INACTIVE;
   1073 		uvmexp.inactive--;
   1074 	}
   1075 
   1076 	/*
   1077 	 * if the page was wired, unwire it now.
   1078 	 */
   1079 	if (pg->wire_count)
   1080 	{
   1081 		pg->wire_count = 0;
   1082 		uvmexp.wired--;
   1083 	}
   1084 
   1085 	/*
   1086 	 * and put on free queue
   1087 	 */
   1088 
   1089 	s = uvm_lock_fpageq();
   1090 	TAILQ_INSERT_TAIL(&uvm.page_free[uvm_page_lookup_freelist(pg)],
   1091 	    pg, pageq);
   1092 	pg->pqflags = PQ_FREE;
   1093 #ifdef DEBUG
   1094 	pg->uobject = (void *)0xdeadbeef;
   1095 	pg->offset = 0xdeadbeef;
   1096 	pg->uanon = (void *)0xdeadbeef;
   1097 #endif
   1098 	uvmexp.free++;
   1099 	uvm_unlock_fpageq(s);
   1100 }
   1101 
   1102 #if defined(UVM_PAGE_TRKOWN)
   1103 /*
   1104  * uvm_page_own: set or release page ownership
   1105  *
   1106  * => this is a debugging function that keeps track of who sets PG_BUSY
   1107  *	and where they do it.   it can be used to track down problems
   1108  *	such a process setting "PG_BUSY" and never releasing it.
   1109  * => page's object [if any] must be locked
   1110  * => if "tag" is NULL then we are releasing page ownership
   1111  */
   1112 void
   1113 uvm_page_own(pg, tag)
   1114 	struct vm_page *pg;
   1115 	char *tag;
   1116 {
   1117 	/* gain ownership? */
   1118 	if (tag) {
   1119 		if (pg->owner_tag) {
   1120 			printf("uvm_page_own: page %p already owned "
   1121 			    "by proc %d [%s]\n", pg,
   1122 			     pg->owner, pg->owner_tag);
   1123 			panic("uvm_page_own");
   1124 		}
   1125 		pg->owner = (curproc) ? curproc->p_pid :  (pid_t) -1;
   1126 		pg->owner_tag = tag;
   1127 		return;
   1128 	}
   1129 
   1130 	/* drop ownership */
   1131 	if (pg->owner_tag == NULL) {
   1132 		printf("uvm_page_own: dropping ownership of an non-owned "
   1133 		    "page (%p)\n", pg);
   1134 		panic("uvm_page_own");
   1135 	}
   1136 	pg->owner_tag = NULL;
   1137 	return;
   1138 }
   1139 #endif
   1140