Home | History | Annotate | Line # | Download | only in uvm
uvm_page.c revision 1.20
      1 /*	$NetBSD: uvm_page.c,v 1.20 1999/05/20 23:03:23 thorpej Exp $	*/
      2 
      3 /*
      4  * Copyright (c) 1997 Charles D. Cranor and Washington University.
      5  * Copyright (c) 1991, 1993, The Regents of the University of California.
      6  *
      7  * All rights reserved.
      8  *
      9  * This code is derived from software contributed to Berkeley by
     10  * The Mach Operating System project at Carnegie-Mellon University.
     11  *
     12  * Redistribution and use in source and binary forms, with or without
     13  * modification, are permitted provided that the following conditions
     14  * are met:
     15  * 1. Redistributions of source code must retain the above copyright
     16  *    notice, this list of conditions and the following disclaimer.
     17  * 2. Redistributions in binary form must reproduce the above copyright
     18  *    notice, this list of conditions and the following disclaimer in the
     19  *    documentation and/or other materials provided with the distribution.
     20  * 3. All advertising materials mentioning features or use of this software
     21  *    must display the following acknowledgement:
     22  *	This product includes software developed by Charles D. Cranor,
     23  *      Washington University, the University of California, Berkeley and
     24  *      its contributors.
     25  * 4. Neither the name of the University nor the names of its contributors
     26  *    may be used to endorse or promote products derived from this software
     27  *    without specific prior written permission.
     28  *
     29  * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND
     30  * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
     31  * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
     32  * ARE DISCLAIMED.  IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE
     33  * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
     34  * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
     35  * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
     36  * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
     37  * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
     38  * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
     39  * SUCH DAMAGE.
     40  *
     41  *	@(#)vm_page.c   8.3 (Berkeley) 3/21/94
     42  * from: Id: uvm_page.c,v 1.1.2.18 1998/02/06 05:24:42 chs Exp
     43  *
     44  *
     45  * Copyright (c) 1987, 1990 Carnegie-Mellon University.
     46  * All rights reserved.
     47  *
     48  * Permission to use, copy, modify and distribute this software and
     49  * its documentation is hereby granted, provided that both the copyright
     50  * notice and this permission notice appear in all copies of the
     51  * software, derivative works or modified versions, and any portions
     52  * thereof, and that both notices appear in supporting documentation.
     53  *
     54  * CARNEGIE MELLON ALLOWS FREE USE OF THIS SOFTWARE IN ITS "AS IS"
     55  * CONDITION.  CARNEGIE MELLON DISCLAIMS ANY LIABILITY OF ANY KIND
     56  * FOR ANY DAMAGES WHATSOEVER RESULTING FROM THE USE OF THIS SOFTWARE.
     57  *
     58  * Carnegie Mellon requests users of this software to return to
     59  *
     60  *  Software Distribution Coordinator  or  Software.Distribution (at) CS.CMU.EDU
     61  *  School of Computer Science
     62  *  Carnegie Mellon University
     63  *  Pittsburgh PA 15213-3890
     64  *
     65  * any improvements or extensions that they make and grant Carnegie the
     66  * rights to redistribute these changes.
     67  */
     68 
     69 /*
     70  * uvm_page.c: page ops.
     71  */
     72 
     73 #include "opt_pmap_new.h"
     74 
     75 #include <sys/param.h>
     76 #include <sys/systm.h>
     77 #include <sys/malloc.h>
     78 #include <sys/proc.h>
     79 
     80 #include <vm/vm.h>
     81 #include <vm/vm_page.h>
     82 #include <vm/vm_kern.h>
     83 
     84 #define UVM_PAGE                /* pull in uvm_page.h functions */
     85 #include <uvm/uvm.h>
     86 
     87 /*
     88  * global vars... XXXCDC: move to uvm. structure.
     89  */
     90 
     91 /*
     92  * physical memory config is stored in vm_physmem.
     93  */
     94 
     95 struct vm_physseg vm_physmem[VM_PHYSSEG_MAX];	/* XXXCDC: uvm.physmem */
     96 int vm_nphysseg = 0;				/* XXXCDC: uvm.nphysseg */
     97 
     98 /*
     99  * local variables
    100  */
    101 
    102 /*
    103  * these variables record the values returned by vm_page_bootstrap,
    104  * for debugging purposes.  The implementation of uvm_pageboot_alloc
    105  * and pmap_startup here also uses them internally.
    106  */
    107 
    108 static vaddr_t      virtual_space_start;
    109 static vaddr_t      virtual_space_end;
    110 
    111 /*
    112  * we use a hash table with only one bucket during bootup.  we will
    113  * later rehash (resize) the hash table once malloc() is ready.
    114  * we static allocate the bootstrap bucket below...
    115  */
    116 
    117 static struct pglist uvm_bootbucket;
    118 
    119 /*
    120  * local prototypes
    121  */
    122 
    123 static void uvm_pageinsert __P((struct vm_page *));
    124 
    125 
    126 /*
    127  * inline functions
    128  */
    129 
    130 /*
    131  * uvm_pageinsert: insert a page in the object and the hash table
    132  *
    133  * => caller must lock object
    134  * => caller must lock page queues
    135  * => call should have already set pg's object and offset pointers
    136  *    and bumped the version counter
    137  */
    138 
    139 __inline static void
    140 uvm_pageinsert(pg)
    141 	struct vm_page *pg;
    142 {
    143 	struct pglist *buck;
    144 	int s;
    145 
    146 #ifdef DIAGNOSTIC
    147 	if (pg->flags & PG_TABLED)
    148 		panic("uvm_pageinsert: already inserted");
    149 #endif
    150 
    151 	buck = &uvm.page_hash[uvm_pagehash(pg->uobject,pg->offset)];
    152 	s = splimp();
    153 	simple_lock(&uvm.hashlock);
    154 	TAILQ_INSERT_TAIL(buck, pg, hashq);	/* put in hash */
    155 	simple_unlock(&uvm.hashlock);
    156 	splx(s);
    157 
    158 	TAILQ_INSERT_TAIL(&pg->uobject->memq, pg, listq); /* put in object */
    159 	pg->flags |= PG_TABLED;
    160 	pg->uobject->uo_npages++;
    161 
    162 }
    163 
    164 /*
    165  * uvm_page_remove: remove page from object and hash
    166  *
    167  * => caller must lock object
    168  * => caller must lock page queues
    169  */
    170 
    171 void __inline
    172 uvm_pageremove(pg)
    173 	struct vm_page *pg;
    174 {
    175 	struct pglist *buck;
    176 	int s;
    177 
    178 #ifdef DIAGNOSTIC
    179 	if ((pg->flags & (PG_FAULTING)) != 0)
    180 		panic("uvm_pageremove: page is faulting");
    181 #endif
    182 
    183 	if ((pg->flags & PG_TABLED) == 0)
    184 		return;				/* XXX: log */
    185 
    186 	buck = &uvm.page_hash[uvm_pagehash(pg->uobject,pg->offset)];
    187 	s = splimp();
    188 	simple_lock(&uvm.hashlock);
    189 	TAILQ_REMOVE(buck, pg, hashq);
    190 	simple_unlock(&uvm.hashlock);
    191 	splx(s);
    192 
    193 	/* object should be locked */
    194 	TAILQ_REMOVE(&pg->uobject->memq, pg, listq);
    195 
    196 	pg->flags &= ~PG_TABLED;
    197 	pg->uobject->uo_npages--;
    198 	pg->uobject = NULL;
    199 	pg->version++;
    200 
    201 }
    202 
    203 /*
    204  * uvm_page_init: init the page system.   called from uvm_init().
    205  *
    206  * => we return the range of kernel virtual memory in kvm_startp/kvm_endp
    207  */
    208 
    209 void
    210 uvm_page_init(kvm_startp, kvm_endp)
    211 	vaddr_t *kvm_startp, *kvm_endp;
    212 {
    213 	int freepages, pagecount;
    214 	vm_page_t pagearray;
    215 	int lcv, n, i;
    216 	paddr_t paddr;
    217 
    218 
    219 	/*
    220 	 * step 1: init the page queues and page queue locks
    221 	 */
    222 	for (lcv = 0; lcv < VM_NFREELIST; lcv++)
    223 	  TAILQ_INIT(&uvm.page_free[lcv]);
    224 	TAILQ_INIT(&uvm.page_active);
    225 	TAILQ_INIT(&uvm.page_inactive_swp);
    226 	TAILQ_INIT(&uvm.page_inactive_obj);
    227 	simple_lock_init(&uvm.pageqlock);
    228 	simple_lock_init(&uvm.fpageqlock);
    229 
    230 	/*
    231 	 * step 2: init the <obj,offset> => <page> hash table. for now
    232 	 * we just have one bucket (the bootstrap bucket).   later on we
    233 	 * will malloc() new buckets as we dynamically resize the hash table.
    234 	 */
    235 
    236 	uvm.page_nhash = 1;			/* 1 bucket */
    237 	uvm.page_hashmask = 0;		/* mask for hash function */
    238 	uvm.page_hash = &uvm_bootbucket;	/* install bootstrap bucket */
    239 	TAILQ_INIT(uvm.page_hash);		/* init hash table */
    240 	simple_lock_init(&uvm.hashlock);	/* init hash table lock */
    241 
    242 	/*
    243 	 * step 3: allocate vm_page structures.
    244 	 */
    245 
    246 	/*
    247 	 * sanity check:
    248 	 * before calling this function the MD code is expected to register
    249 	 * some free RAM with the uvm_page_physload() function.   our job
    250 	 * now is to allocate vm_page structures for this memory.
    251 	 */
    252 
    253 	if (vm_nphysseg == 0)
    254 		panic("vm_page_bootstrap: no memory pre-allocated");
    255 
    256 	/*
    257 	 * first calculate the number of free pages...
    258 	 *
    259 	 * note that we use start/end rather than avail_start/avail_end.
    260 	 * this allows us to allocate extra vm_page structures in case we
    261 	 * want to return some memory to the pool after booting.
    262 	 */
    263 
    264 	freepages = 0;
    265 	for (lcv = 0 ; lcv < vm_nphysseg ; lcv++)
    266 		freepages += (vm_physmem[lcv].end - vm_physmem[lcv].start);
    267 
    268 	/*
    269 	 * we now know we have (PAGE_SIZE * freepages) bytes of memory we can
    270 	 * use.   for each page of memory we use we need a vm_page structure.
    271 	 * thus, the total number of pages we can use is the total size of
    272 	 * the memory divided by the PAGE_SIZE plus the size of the vm_page
    273 	 * structure.   we add one to freepages as a fudge factor to avoid
    274 	 * truncation errors (since we can only allocate in terms of whole
    275 	 * pages).
    276 	 */
    277 
    278 	pagecount = ((freepages + 1) << PAGE_SHIFT) /
    279 	    (PAGE_SIZE + sizeof(struct vm_page));
    280 	pagearray = (vm_page_t)uvm_pageboot_alloc(pagecount *
    281 	    sizeof(struct vm_page));
    282 	memset(pagearray, 0, pagecount * sizeof(struct vm_page));
    283 
    284 	/*
    285 	 * step 4: init the vm_page structures and put them in the correct
    286 	 * place...
    287 	 */
    288 
    289 	for (lcv = 0 ; lcv < vm_nphysseg ; lcv++) {
    290 
    291 		n = vm_physmem[lcv].end - vm_physmem[lcv].start;
    292 		if (n > pagecount) {
    293 			printf("uvm_page_init: lost %d page(s) in init\n",
    294 			    n - pagecount);
    295 			panic("uvm_page_init");  /* XXXCDC: shouldn't happen? */
    296 			/* n = pagecount; */
    297 		}
    298 		/* set up page array pointers */
    299 		vm_physmem[lcv].pgs = pagearray;
    300 		pagearray += n;
    301 		pagecount -= n;
    302 		vm_physmem[lcv].lastpg = vm_physmem[lcv].pgs + (n - 1);
    303 
    304 		/* init and free vm_pages (we've already zeroed them) */
    305 		paddr = ptoa(vm_physmem[lcv].start);
    306 		for (i = 0 ; i < n ; i++, paddr += PAGE_SIZE) {
    307 			vm_physmem[lcv].pgs[i].phys_addr = paddr;
    308 			if (atop(paddr) >= vm_physmem[lcv].avail_start &&
    309 			    atop(paddr) <= vm_physmem[lcv].avail_end) {
    310 				uvmexp.npages++;
    311 				/* add page to free pool */
    312 				uvm_pagefree(&vm_physmem[lcv].pgs[i]);
    313 			}
    314 		}
    315 	}
    316 	/*
    317 	 * step 5: pass up the values of virtual_space_start and
    318 	 * virtual_space_end (obtained by uvm_pageboot_alloc) to the upper
    319 	 * layers of the VM.
    320 	 */
    321 
    322 	*kvm_startp = round_page(virtual_space_start);
    323 	*kvm_endp = trunc_page(virtual_space_end);
    324 
    325 	/*
    326 	 * step 6: init pagedaemon lock
    327 	 */
    328 
    329 	simple_lock_init(&uvm.pagedaemon_lock);
    330 
    331 	/*
    332 	 * step 7: init reserve thresholds
    333 	 * XXXCDC - values may need adjusting
    334 	 */
    335 	uvmexp.reserve_pagedaemon = 1;
    336 	uvmexp.reserve_kernel = 5;
    337 
    338 	/*
    339 	 * done!
    340 	 */
    341 
    342 }
    343 
    344 /*
    345  * uvm_setpagesize: set the page size
    346  *
    347  * => sets page_shift and page_mask from uvmexp.pagesize.
    348  * => XXXCDC: move global vars.
    349  */
    350 
    351 void
    352 uvm_setpagesize()
    353 {
    354 	if (uvmexp.pagesize == 0)
    355 		uvmexp.pagesize = DEFAULT_PAGE_SIZE;
    356 	uvmexp.pagemask = uvmexp.pagesize - 1;
    357 	if ((uvmexp.pagemask & uvmexp.pagesize) != 0)
    358 		panic("uvm_setpagesize: page size not a power of two");
    359 	for (uvmexp.pageshift = 0; ; uvmexp.pageshift++)
    360 		if ((1 << uvmexp.pageshift) == uvmexp.pagesize)
    361 			break;
    362 }
    363 
    364 /*
    365  * uvm_pageboot_alloc: steal memory from physmem for bootstrapping
    366  */
    367 
    368 vaddr_t
    369 uvm_pageboot_alloc(size)
    370 	vsize_t size;
    371 {
    372 #if defined(PMAP_STEAL_MEMORY)
    373 	vaddr_t addr;
    374 
    375 	/*
    376 	 * defer bootstrap allocation to MD code (it may want to allocate
    377 	 * from a direct-mapped segment).  pmap_steal_memory should round
    378 	 * off virtual_space_start/virtual_space_end.
    379 	 */
    380 
    381 	addr = pmap_steal_memory(size, &virtual_space_start,
    382 	    &virtual_space_end);
    383 
    384 	return(addr);
    385 
    386 #else /* !PMAP_STEAL_MEMORY */
    387 
    388 	static boolean_t initialized = FALSE;
    389 	vaddr_t addr, vaddr;
    390 	paddr_t paddr;
    391 
    392 	/* round to page size */
    393 	size = round_page(size);
    394 
    395 	/*
    396 	 * on first call to this function, initialize ourselves.
    397 	 */
    398 	if (initialized == FALSE) {
    399 		pmap_virtual_space(&virtual_space_start, &virtual_space_end);
    400 
    401 		/* round it the way we like it */
    402 		virtual_space_start = round_page(virtual_space_start);
    403 		virtual_space_end = trunc_page(virtual_space_end);
    404 
    405 		initialized = TRUE;
    406 	}
    407 
    408 	/*
    409 	 * allocate virtual memory for this request
    410 	 */
    411 	if (virtual_space_start == virtual_space_end ||
    412 	    (virtual_space_end - virtual_space_start) < size)
    413 		panic("uvm_pageboot_alloc: out of virtual space");
    414 
    415 	addr = virtual_space_start;
    416 
    417 #ifdef PMAP_GROWKERNEL
    418 	/*
    419 	 * If the kernel pmap can't map the requested space,
    420 	 * then allocate more resources for it.
    421 	 */
    422 	if (uvm_maxkaddr < (addr + size)) {
    423 		uvm_maxkaddr = pmap_growkernel(addr + size);
    424 		if (uvm_maxkaddr < (addr + size))
    425 			panic("uvm_pageboot_alloc: pmap_growkernel() failed");
    426 	}
    427 #endif
    428 
    429 	virtual_space_start += size;
    430 
    431 	/*
    432 	 * allocate and mapin physical pages to back new virtual pages
    433 	 */
    434 
    435 	for (vaddr = round_page(addr) ; vaddr < addr + size ;
    436 	    vaddr += PAGE_SIZE) {
    437 
    438 		if (!uvm_page_physget(&paddr))
    439 			panic("uvm_pageboot_alloc: out of memory");
    440 
    441 		/* XXX: should be wired, but some pmaps don't like that ... */
    442 #if defined(PMAP_NEW)
    443 		pmap_kenter_pa(vaddr, paddr, VM_PROT_READ|VM_PROT_WRITE);
    444 #else
    445 		pmap_enter(pmap_kernel(), vaddr, paddr,
    446 		    VM_PROT_READ|VM_PROT_WRITE, FALSE,
    447 		    VM_PROT_READ|VM_PROT_WRITE);
    448 #endif
    449 
    450 	}
    451 	return(addr);
    452 #endif	/* PMAP_STEAL_MEMORY */
    453 }
    454 
    455 #if !defined(PMAP_STEAL_MEMORY)
    456 /*
    457  * uvm_page_physget: "steal" one page from the vm_physmem structure.
    458  *
    459  * => attempt to allocate it off the end of a segment in which the "avail"
    460  *    values match the start/end values.   if we can't do that, then we
    461  *    will advance both values (making them equal, and removing some
    462  *    vm_page structures from the non-avail area).
    463  * => return false if out of memory.
    464  */
    465 
    466 boolean_t
    467 uvm_page_physget(paddrp)
    468 	paddr_t *paddrp;
    469 {
    470 	int lcv, x;
    471 
    472 	/* pass 1: try allocating from a matching end */
    473 #if (VM_PHYSSEG_STRAT == VM_PSTRAT_BIGFIRST)
    474 	for (lcv = vm_nphysseg - 1 ; lcv >= 0 ; lcv--)
    475 #else
    476 	for (lcv = 0 ; lcv < vm_nphysseg ; lcv++)
    477 #endif
    478 	{
    479 
    480 		if (vm_physmem[lcv].pgs)
    481 			panic("vm_page_physget: called _after_ bootstrap");
    482 
    483 		/* try from front */
    484 		if (vm_physmem[lcv].avail_start == vm_physmem[lcv].start &&
    485 		    vm_physmem[lcv].avail_start < vm_physmem[lcv].avail_end) {
    486 			*paddrp = ptoa(vm_physmem[lcv].avail_start);
    487 			vm_physmem[lcv].avail_start++;
    488 			vm_physmem[lcv].start++;
    489 			/* nothing left?   nuke it */
    490 			if (vm_physmem[lcv].avail_start ==
    491 			    vm_physmem[lcv].end) {
    492 				if (vm_nphysseg == 1)
    493 				    panic("vm_page_physget: out of memory!");
    494 				vm_nphysseg--;
    495 				for (x = lcv ; x < vm_nphysseg ; x++)
    496 					/* structure copy */
    497 					vm_physmem[x] = vm_physmem[x+1];
    498 			}
    499 			return (TRUE);
    500 		}
    501 
    502 		/* try from rear */
    503 		if (vm_physmem[lcv].avail_end == vm_physmem[lcv].end &&
    504 		    vm_physmem[lcv].avail_start < vm_physmem[lcv].avail_end) {
    505 			*paddrp = ptoa(vm_physmem[lcv].avail_end - 1);
    506 			vm_physmem[lcv].avail_end--;
    507 			vm_physmem[lcv].end--;
    508 			/* nothing left?   nuke it */
    509 			if (vm_physmem[lcv].avail_end ==
    510 			    vm_physmem[lcv].start) {
    511 				if (vm_nphysseg == 1)
    512 				    panic("vm_page_physget: out of memory!");
    513 				vm_nphysseg--;
    514 				for (x = lcv ; x < vm_nphysseg ; x++)
    515 					/* structure copy */
    516 					vm_physmem[x] = vm_physmem[x+1];
    517 			}
    518 			return (TRUE);
    519 		}
    520 	}
    521 
    522 	/* pass2: forget about matching ends, just allocate something */
    523 #if (VM_PHYSSEG_STRAT == VM_PSTRAT_BIGFIRST)
    524 	for (lcv = vm_nphysseg - 1 ; lcv >= 0 ; lcv--)
    525 #else
    526 	for (lcv = 0 ; lcv < vm_nphysseg ; lcv++)
    527 #endif
    528 	{
    529 
    530 		/* any room in this bank? */
    531 		if (vm_physmem[lcv].avail_start >= vm_physmem[lcv].avail_end)
    532 			continue;  /* nope */
    533 
    534 		*paddrp = ptoa(vm_physmem[lcv].avail_start);
    535 		vm_physmem[lcv].avail_start++;
    536 		/* truncate! */
    537 		vm_physmem[lcv].start = vm_physmem[lcv].avail_start;
    538 
    539 		/* nothing left?   nuke it */
    540 		if (vm_physmem[lcv].avail_start == vm_physmem[lcv].end) {
    541 			if (vm_nphysseg == 1)
    542 				panic("vm_page_physget: out of memory!");
    543 			vm_nphysseg--;
    544 			for (x = lcv ; x < vm_nphysseg ; x++)
    545 				/* structure copy */
    546 				vm_physmem[x] = vm_physmem[x+1];
    547 		}
    548 		return (TRUE);
    549 	}
    550 
    551 	return (FALSE);        /* whoops! */
    552 }
    553 #endif /* PMAP_STEAL_MEMORY */
    554 
    555 /*
    556  * uvm_page_physload: load physical memory into VM system
    557  *
    558  * => all args are PFs
    559  * => all pages in start/end get vm_page structures
    560  * => areas marked by avail_start/avail_end get added to the free page pool
    561  * => we are limited to VM_PHYSSEG_MAX physical memory segments
    562  */
    563 
    564 void
    565 uvm_page_physload(start, end, avail_start, avail_end, free_list)
    566 	vaddr_t start, end, avail_start, avail_end;
    567 	int free_list;
    568 {
    569 	int preload, lcv;
    570 	psize_t npages;
    571 	struct vm_page *pgs;
    572 	struct vm_physseg *ps;
    573 
    574 	if (uvmexp.pagesize == 0)
    575 		panic("vm_page_physload: page size not set!");
    576 
    577 	if (free_list >= VM_NFREELIST || free_list < VM_FREELIST_DEFAULT)
    578 		panic("uvm_page_physload: bad free list %d\n", free_list);
    579 
    580 	/*
    581 	 * do we have room?
    582 	 */
    583 	if (vm_nphysseg == VM_PHYSSEG_MAX) {
    584 		printf("vm_page_physload: unable to load physical memory "
    585 		    "segment\n");
    586 		printf("\t%d segments allocated, ignoring 0x%lx -> 0x%lx\n",
    587 		    VM_PHYSSEG_MAX, start, end);
    588 		return;
    589 	}
    590 
    591 	/*
    592 	 * check to see if this is a "preload" (i.e. uvm_mem_init hasn't been
    593 	 * called yet, so malloc is not available).
    594 	 */
    595 	for (lcv = 0 ; lcv < vm_nphysseg ; lcv++) {
    596 		if (vm_physmem[lcv].pgs)
    597 			break;
    598 	}
    599 	preload = (lcv == vm_nphysseg);
    600 
    601 	/*
    602 	 * if VM is already running, attempt to malloc() vm_page structures
    603 	 */
    604 	if (!preload) {
    605 #if defined(VM_PHYSSEG_NOADD)
    606 		panic("vm_page_physload: tried to add RAM after vm_mem_init");
    607 #else
    608 		/* XXXCDC: need some sort of lockout for this case */
    609 		paddr_t paddr;
    610 		npages = end - start;  /* # of pages */
    611 		MALLOC(pgs, struct vm_page *, sizeof(struct vm_page) * npages,
    612 					 M_VMPAGE, M_NOWAIT);
    613 		if (pgs == NULL) {
    614 			printf("vm_page_physload: can not malloc vm_page "
    615 			    "structs for segment\n");
    616 			printf("\tignoring 0x%lx -> 0x%lx\n", start, end);
    617 			return;
    618 		}
    619 		/* zero data, init phys_addr and free_list, and free pages */
    620 		memset(pgs, 0, sizeof(struct vm_page) * npages);
    621 		for (lcv = 0, paddr = ptoa(start) ;
    622 				 lcv < npages ; lcv++, paddr += PAGE_SIZE) {
    623 			pgs[lcv].phys_addr = paddr;
    624 			pgs[lcv].free_list = free_list;
    625 			if (atop(paddr) >= avail_start &&
    626 			    atop(paddr) <= avail_end)
    627 				uvm_pagefree(&pgs[lcv]);
    628 		}
    629 		/* XXXCDC: incomplete: need to update uvmexp.free, what else? */
    630 		/* XXXCDC: need hook to tell pmap to rebuild pv_list, etc... */
    631 #endif
    632 	} else {
    633 
    634 		/* gcc complains if these don't get init'd */
    635 		pgs = NULL;
    636 		npages = 0;
    637 
    638 	}
    639 
    640 	/*
    641 	 * now insert us in the proper place in vm_physmem[]
    642 	 */
    643 
    644 #if (VM_PHYSSEG_STRAT == VM_PSTRAT_RANDOM)
    645 
    646 	/* random: put it at the end (easy!) */
    647 	ps = &vm_physmem[vm_nphysseg];
    648 
    649 #elif (VM_PHYSSEG_STRAT == VM_PSTRAT_BSEARCH)
    650 
    651 	{
    652 		int x;
    653 		/* sort by address for binary search */
    654 		for (lcv = 0 ; lcv < vm_nphysseg ; lcv++)
    655 			if (start < vm_physmem[lcv].start)
    656 				break;
    657 		ps = &vm_physmem[lcv];
    658 		/* move back other entries, if necessary ... */
    659 		for (x = vm_nphysseg ; x > lcv ; x--)
    660 			/* structure copy */
    661 			vm_physmem[x] = vm_physmem[x - 1];
    662 	}
    663 
    664 #elif (VM_PHYSSEG_STRAT == VM_PSTRAT_BIGFIRST)
    665 
    666 	{
    667 		int x;
    668 		/* sort by largest segment first */
    669 		for (lcv = 0 ; lcv < vm_nphysseg ; lcv++)
    670 			if ((end - start) >
    671 			    (vm_physmem[lcv].end - vm_physmem[lcv].start))
    672 				break;
    673 		ps = &vm_physmem[lcv];
    674 		/* move back other entries, if necessary ... */
    675 		for (x = vm_nphysseg ; x > lcv ; x--)
    676 			/* structure copy */
    677 			vm_physmem[x] = vm_physmem[x - 1];
    678 	}
    679 
    680 #else
    681 
    682 	panic("vm_page_physload: unknown physseg strategy selected!");
    683 
    684 #endif
    685 
    686 	ps->start = start;
    687 	ps->end = end;
    688 	ps->avail_start = avail_start;
    689 	ps->avail_end = avail_end;
    690 	if (preload) {
    691 		ps->pgs = NULL;
    692 	} else {
    693 		ps->pgs = pgs;
    694 		ps->lastpg = pgs + npages - 1;
    695 	}
    696 	ps->free_list = free_list;
    697 	vm_nphysseg++;
    698 
    699 	/*
    700 	 * done!
    701 	 */
    702 
    703 	if (!preload)
    704 		uvm_page_rehash();
    705 
    706 	return;
    707 }
    708 
    709 /*
    710  * uvm_page_rehash: reallocate hash table based on number of free pages.
    711  */
    712 
    713 void
    714 uvm_page_rehash()
    715 {
    716 	int freepages, lcv, bucketcount, s, oldcount;
    717 	struct pglist *newbuckets, *oldbuckets;
    718 	struct vm_page *pg;
    719 
    720 	/*
    721 	 * compute number of pages that can go in the free pool
    722 	 */
    723 
    724 	freepages = 0;
    725 	for (lcv = 0 ; lcv < vm_nphysseg ; lcv++)
    726 		freepages +=
    727 		    (vm_physmem[lcv].avail_end - vm_physmem[lcv].avail_start);
    728 
    729 	/*
    730 	 * compute number of buckets needed for this number of pages
    731 	 */
    732 
    733 	bucketcount = 1;
    734 	while (bucketcount < freepages)
    735 		bucketcount = bucketcount * 2;
    736 
    737 	/*
    738 	 * malloc new buckets
    739 	 */
    740 
    741 	MALLOC(newbuckets, struct pglist *, sizeof(struct pglist) * bucketcount,
    742 					 M_VMPBUCKET, M_NOWAIT);
    743 	if (newbuckets == NULL) {
    744 		printf("vm_page_physrehash: WARNING: could not grow page "
    745 		    "hash table\n");
    746 		return;
    747 	}
    748 	for (lcv = 0 ; lcv < bucketcount ; lcv++)
    749 		TAILQ_INIT(&newbuckets[lcv]);
    750 
    751 	/*
    752 	 * now replace the old buckets with the new ones and rehash everything
    753 	 */
    754 
    755 	s = splimp();
    756 	simple_lock(&uvm.hashlock);
    757 	/* swap old for new ... */
    758 	oldbuckets = uvm.page_hash;
    759 	oldcount = uvm.page_nhash;
    760 	uvm.page_hash = newbuckets;
    761 	uvm.page_nhash = bucketcount;
    762 	uvm.page_hashmask = bucketcount - 1;  /* power of 2 */
    763 
    764 	/* ... and rehash */
    765 	for (lcv = 0 ; lcv < oldcount ; lcv++) {
    766 		while ((pg = oldbuckets[lcv].tqh_first) != NULL) {
    767 			TAILQ_REMOVE(&oldbuckets[lcv], pg, hashq);
    768 			TAILQ_INSERT_TAIL(
    769 			  &uvm.page_hash[uvm_pagehash(pg->uobject, pg->offset)],
    770 			  pg, hashq);
    771 		}
    772 	}
    773 	simple_unlock(&uvm.hashlock);
    774 	splx(s);
    775 
    776 	/*
    777 	 * free old bucket array if we malloc'd it previously
    778 	 */
    779 
    780 	if (oldbuckets != &uvm_bootbucket)
    781 		FREE(oldbuckets, M_VMPBUCKET);
    782 
    783 	/*
    784 	 * done
    785 	 */
    786 	return;
    787 }
    788 
    789 
    790 #if 1 /* XXXCDC: TMP TMP TMP DEBUG DEBUG DEBUG */
    791 
    792 void uvm_page_physdump __P((void)); /* SHUT UP GCC */
    793 
    794 /* call from DDB */
    795 void
    796 uvm_page_physdump()
    797 {
    798 	int lcv;
    799 
    800 	printf("rehash: physical memory config [segs=%d of %d]:\n",
    801 				 vm_nphysseg, VM_PHYSSEG_MAX);
    802 	for (lcv = 0 ; lcv < vm_nphysseg ; lcv++)
    803 		printf("0x%lx->0x%lx [0x%lx->0x%lx]\n", vm_physmem[lcv].start,
    804 		    vm_physmem[lcv].end, vm_physmem[lcv].avail_start,
    805 		    vm_physmem[lcv].avail_end);
    806 	printf("STRATEGY = ");
    807 	switch (VM_PHYSSEG_STRAT) {
    808 	case VM_PSTRAT_RANDOM: printf("RANDOM\n"); break;
    809 	case VM_PSTRAT_BSEARCH: printf("BSEARCH\n"); break;
    810 	case VM_PSTRAT_BIGFIRST: printf("BIGFIRST\n"); break;
    811 	default: printf("<<UNKNOWN>>!!!!\n");
    812 	}
    813 	printf("number of buckets = %d\n", uvm.page_nhash);
    814 }
    815 #endif
    816 
    817 /*
    818  * uvm_pagealloc_strat: allocate vm_page from a particular free list.
    819  *
    820  * => return null if no pages free
    821  * => wake up pagedaemon if number of free pages drops below low water mark
    822  * => if obj != NULL, obj must be locked (to put in hash)
    823  * => if anon != NULL, anon must be locked (to put in anon)
    824  * => only one of obj or anon can be non-null
    825  * => caller must activate/deactivate page if it is not wired.
    826  * => free_list is ignored if strat == UVM_PGA_STRAT_NORMAL.
    827  */
    828 
    829 struct vm_page *
    830 uvm_pagealloc_strat(obj, off, anon, flags, strat, free_list)
    831 	struct uvm_object *obj;
    832 	vaddr_t off;
    833 	int flags;
    834 	struct vm_anon *anon;
    835 	int strat, free_list;
    836 {
    837 	int lcv, s;
    838 	struct vm_page *pg;
    839 	struct pglist *freeq;
    840 	boolean_t use_reserve;
    841 
    842 #ifdef DIAGNOSTIC
    843 	/* sanity check */
    844 	if (obj && anon)
    845 		panic("uvm_pagealloc: obj and anon != NULL");
    846 #endif
    847 
    848 	s = splimp();
    849 
    850 	uvm_lock_fpageq();		/* lock free page queue */
    851 
    852 	/*
    853 	 * check to see if we need to generate some free pages waking
    854 	 * the pagedaemon.
    855 	 */
    856 
    857 	if (uvmexp.free < uvmexp.freemin || (uvmexp.free < uvmexp.freetarg &&
    858 	    uvmexp.inactive < uvmexp.inactarg))
    859 		thread_wakeup(&uvm.pagedaemon);
    860 
    861 	/*
    862 	 * fail if any of these conditions is true:
    863 	 * [1]  there really are no free pages, or
    864 	 * [2]  only kernel "reserved" pages remain and
    865 	 *        the page isn't being allocated to a kernel object.
    866 	 * [3]  only pagedaemon "reserved" pages remain and
    867 	 *        the requestor isn't the pagedaemon.
    868 	 */
    869 
    870 	use_reserve = (flags & UVM_PGA_USERESERVE) ||
    871 		(obj && obj->uo_refs == UVM_OBJ_KERN);
    872 	if ((uvmexp.free <= uvmexp.reserve_kernel && !use_reserve) ||
    873 	    (uvmexp.free <= uvmexp.reserve_pagedaemon &&
    874 	     !(use_reserve && curproc == uvm.pagedaemon_proc)))
    875 		goto fail;
    876 
    877  again:
    878 	switch (strat) {
    879 	case UVM_PGA_STRAT_NORMAL:
    880 		/* Check all freelists in descending priority order. */
    881 		for (lcv = 0; lcv < VM_NFREELIST; lcv++) {
    882 			freeq = &uvm.page_free[lcv];
    883 			if ((pg = freeq->tqh_first) != NULL)
    884 				goto gotit;
    885 		}
    886 
    887 		/* No pages free! */
    888 		goto fail;
    889 
    890 	case UVM_PGA_STRAT_ONLY:
    891 	case UVM_PGA_STRAT_FALLBACK:
    892 		/* Attempt to allocate from the specified free list. */
    893 #ifdef DIAGNOSTIC
    894 		if (free_list >= VM_NFREELIST || free_list < 0)
    895 			panic("uvm_pagealloc_strat: bad free list %d",
    896 			    free_list);
    897 #endif
    898 		freeq = &uvm.page_free[free_list];
    899 		if ((pg = freeq->tqh_first) != NULL)
    900 			goto gotit;
    901 
    902 		/* Fall back, if possible. */
    903 		if (strat == UVM_PGA_STRAT_FALLBACK) {
    904 			strat = UVM_PGA_STRAT_NORMAL;
    905 			goto again;
    906 		}
    907 
    908 		/* No pages free! */
    909 		goto fail;
    910 
    911 	default:
    912 		panic("uvm_pagealloc_strat: bad strat %d", strat);
    913 		/* NOTREACHED */
    914 	}
    915 
    916  gotit:
    917 	TAILQ_REMOVE(freeq, pg, pageq);
    918 	uvmexp.free--;
    919 
    920 	uvm_unlock_fpageq();		/* unlock free page queue */
    921 	splx(s);
    922 
    923 	pg->offset = off;
    924 	pg->uobject = obj;
    925 	pg->uanon = anon;
    926 	pg->flags = PG_BUSY|PG_CLEAN|PG_FAKE;
    927 	pg->version++;
    928 	pg->wire_count = 0;
    929 	pg->loan_count = 0;
    930 	if (anon) {
    931 		anon->u.an_page = pg;
    932 		pg->pqflags = PQ_ANON;
    933 	} else {
    934 		if (obj)
    935 			uvm_pageinsert(pg);
    936 		pg->pqflags = 0;
    937 	}
    938 #if defined(UVM_PAGE_TRKOWN)
    939 	pg->owner_tag = NULL;
    940 #endif
    941 	UVM_PAGE_OWN(pg, "new alloc");
    942 
    943 	return(pg);
    944 
    945  fail:
    946 	uvm_unlock_fpageq();
    947 	splx(s);
    948 	return (NULL);
    949 }
    950 
    951 /*
    952  * uvm_pagerealloc: reallocate a page from one object to another
    953  *
    954  * => both objects must be locked
    955  */
    956 
    957 void
    958 uvm_pagerealloc(pg, newobj, newoff)
    959 	struct vm_page *pg;
    960 	struct uvm_object *newobj;
    961 	vaddr_t newoff;
    962 {
    963 	/*
    964 	 * remove it from the old object
    965 	 */
    966 
    967 	if (pg->uobject) {
    968 		uvm_pageremove(pg);
    969 	}
    970 
    971 	/*
    972 	 * put it in the new object
    973 	 */
    974 
    975 	if (newobj) {
    976 		pg->uobject = newobj;
    977 		pg->offset = newoff;
    978 		pg->version++;
    979 		uvm_pageinsert(pg);
    980 	}
    981 
    982 	return;
    983 }
    984 
    985 
    986 /*
    987  * uvm_pagefree: free page
    988  *
    989  * => erase page's identity (i.e. remove from hash/object)
    990  * => put page on free list
    991  * => caller must lock owning object (either anon or uvm_object)
    992  * => caller must lock page queues
    993  * => assumes all valid mappings of pg are gone
    994  */
    995 
    996 void uvm_pagefree(pg)
    997 
    998 struct vm_page *pg;
    999 
   1000 {
   1001 	int s;
   1002 	int saved_loan_count = pg->loan_count;
   1003 
   1004 	/*
   1005 	 * if the page was an object page (and thus "TABLED"), remove it
   1006 	 * from the object.
   1007 	 */
   1008 
   1009 	if (pg->flags & PG_TABLED) {
   1010 
   1011 		/*
   1012 		 * if the object page is on loan we are going to drop ownership.
   1013 		 * it is possible that an anon will take over as owner for this
   1014 		 * page later on.   the anon will want a !PG_CLEAN page so that
   1015 		 * it knows it needs to allocate swap if it wants to page the
   1016 		 * page out.
   1017 		 */
   1018 
   1019 		if (saved_loan_count)
   1020 			pg->flags &= ~PG_CLEAN;	/* in case an anon takes over */
   1021 
   1022 		uvm_pageremove(pg);
   1023 
   1024 		/*
   1025 		 * if our page was on loan, then we just lost control over it
   1026 		 * (in fact, if it was loaned to an anon, the anon may have
   1027 		 * already taken over ownership of the page by now and thus
   1028 		 * changed the loan_count [e.g. in uvmfault_anonget()]) we just
   1029 		 * return (when the last loan is dropped, then the page can be
   1030 		 * freed by whatever was holding the last loan).
   1031 		 */
   1032 		if (saved_loan_count)
   1033 			return;
   1034 
   1035 	} else if (saved_loan_count && (pg->pqflags & PQ_ANON)) {
   1036 
   1037 		/*
   1038 		 * if our page is owned by an anon and is loaned out to the
   1039 		 * kernel then we just want to drop ownership and return.
   1040 		 * the kernel must free the page when all its loans clear ...
   1041 		 * note that the kernel can't change the loan status of our
   1042 		 * page as long as we are holding PQ lock.
   1043 		 */
   1044 		pg->pqflags &= ~PQ_ANON;
   1045 		pg->uanon = NULL;
   1046 		return;
   1047 	}
   1048 
   1049 #ifdef DIAGNOSTIC
   1050 	if (saved_loan_count) {
   1051 		printf("uvm_pagefree: warning: freeing page with a loan "
   1052 		    "count of %d\n", saved_loan_count);
   1053 		panic("uvm_pagefree: loan count");
   1054 	}
   1055 #endif
   1056 
   1057 
   1058 	/*
   1059 	 * now remove the page from the queues
   1060 	 */
   1061 
   1062 	if (pg->pqflags & PQ_ACTIVE) {
   1063 		TAILQ_REMOVE(&uvm.page_active, pg, pageq);
   1064 		pg->pqflags &= ~PQ_ACTIVE;
   1065 		uvmexp.active--;
   1066 	}
   1067 	if (pg->pqflags & PQ_INACTIVE) {
   1068 		if (pg->pqflags & PQ_SWAPBACKED)
   1069 			TAILQ_REMOVE(&uvm.page_inactive_swp, pg, pageq);
   1070 		else
   1071 			TAILQ_REMOVE(&uvm.page_inactive_obj, pg, pageq);
   1072 		pg->pqflags &= ~PQ_INACTIVE;
   1073 		uvmexp.inactive--;
   1074 	}
   1075 
   1076 	/*
   1077 	 * if the page was wired, unwire it now.
   1078 	 */
   1079 	if (pg->wire_count)
   1080 	{
   1081 		pg->wire_count = 0;
   1082 		uvmexp.wired--;
   1083 	}
   1084 
   1085 	/*
   1086 	 * and put on free queue
   1087 	 */
   1088 
   1089 	s = splimp();
   1090 	uvm_lock_fpageq();
   1091 	TAILQ_INSERT_TAIL(&uvm.page_free[uvm_page_lookup_freelist(pg)],
   1092 	    pg, pageq);
   1093 	pg->pqflags = PQ_FREE;
   1094 #ifdef DEBUG
   1095 	pg->uobject = (void *)0xdeadbeef;
   1096 	pg->offset = 0xdeadbeef;
   1097 	pg->uanon = (void *)0xdeadbeef;
   1098 #endif
   1099 	uvmexp.free++;
   1100 	uvm_unlock_fpageq();
   1101 	splx(s);
   1102 }
   1103 
   1104 #if defined(UVM_PAGE_TRKOWN)
   1105 /*
   1106  * uvm_page_own: set or release page ownership
   1107  *
   1108  * => this is a debugging function that keeps track of who sets PG_BUSY
   1109  *	and where they do it.   it can be used to track down problems
   1110  *	such a process setting "PG_BUSY" and never releasing it.
   1111  * => page's object [if any] must be locked
   1112  * => if "tag" is NULL then we are releasing page ownership
   1113  */
   1114 void
   1115 uvm_page_own(pg, tag)
   1116 	struct vm_page *pg;
   1117 	char *tag;
   1118 {
   1119 	/* gain ownership? */
   1120 	if (tag) {
   1121 		if (pg->owner_tag) {
   1122 			printf("uvm_page_own: page %p already owned "
   1123 			    "by proc %d [%s]\n", pg,
   1124 			     pg->owner, pg->owner_tag);
   1125 			panic("uvm_page_own");
   1126 		}
   1127 		pg->owner = (curproc) ? curproc->p_pid :  (pid_t) -1;
   1128 		pg->owner_tag = tag;
   1129 		return;
   1130 	}
   1131 
   1132 	/* drop ownership */
   1133 	if (pg->owner_tag == NULL) {
   1134 		printf("uvm_page_own: dropping ownership of an non-owned "
   1135 		    "page (%p)\n", pg);
   1136 		panic("uvm_page_own");
   1137 	}
   1138 	pg->owner_tag = NULL;
   1139 	return;
   1140 }
   1141 #endif
   1142