Home | History | Annotate | Line # | Download | only in uvm
uvm_page.c revision 1.78
      1 /*	$NetBSD: uvm_page.c,v 1.78 2002/06/20 15:05:29 chs Exp $	*/
      2 
      3 /*
      4  * Copyright (c) 1997 Charles D. Cranor and Washington University.
      5  * Copyright (c) 1991, 1993, The Regents of the University of California.
      6  *
      7  * All rights reserved.
      8  *
      9  * This code is derived from software contributed to Berkeley by
     10  * The Mach Operating System project at Carnegie-Mellon University.
     11  *
     12  * Redistribution and use in source and binary forms, with or without
     13  * modification, are permitted provided that the following conditions
     14  * are met:
     15  * 1. Redistributions of source code must retain the above copyright
     16  *    notice, this list of conditions and the following disclaimer.
     17  * 2. Redistributions in binary form must reproduce the above copyright
     18  *    notice, this list of conditions and the following disclaimer in the
     19  *    documentation and/or other materials provided with the distribution.
     20  * 3. All advertising materials mentioning features or use of this software
     21  *    must display the following acknowledgement:
     22  *	This product includes software developed by Charles D. Cranor,
     23  *      Washington University, the University of California, Berkeley and
     24  *      its contributors.
     25  * 4. Neither the name of the University nor the names of its contributors
     26  *    may be used to endorse or promote products derived from this software
     27  *    without specific prior written permission.
     28  *
     29  * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND
     30  * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
     31  * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
     32  * ARE DISCLAIMED.  IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE
     33  * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
     34  * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
     35  * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
     36  * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
     37  * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
     38  * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
     39  * SUCH DAMAGE.
     40  *
     41  *	@(#)vm_page.c   8.3 (Berkeley) 3/21/94
     42  * from: Id: uvm_page.c,v 1.1.2.18 1998/02/06 05:24:42 chs Exp
     43  *
     44  *
     45  * Copyright (c) 1987, 1990 Carnegie-Mellon University.
     46  * All rights reserved.
     47  *
     48  * Permission to use, copy, modify and distribute this software and
     49  * its documentation is hereby granted, provided that both the copyright
     50  * notice and this permission notice appear in all copies of the
     51  * software, derivative works or modified versions, and any portions
     52  * thereof, and that both notices appear in supporting documentation.
     53  *
     54  * CARNEGIE MELLON ALLOWS FREE USE OF THIS SOFTWARE IN ITS "AS IS"
     55  * CONDITION.  CARNEGIE MELLON DISCLAIMS ANY LIABILITY OF ANY KIND
     56  * FOR ANY DAMAGES WHATSOEVER RESULTING FROM THE USE OF THIS SOFTWARE.
     57  *
     58  * Carnegie Mellon requests users of this software to return to
     59  *
     60  *  Software Distribution Coordinator  or  Software.Distribution (at) CS.CMU.EDU
     61  *  School of Computer Science
     62  *  Carnegie Mellon University
     63  *  Pittsburgh PA 15213-3890
     64  *
     65  * any improvements or extensions that they make and grant Carnegie the
     66  * rights to redistribute these changes.
     67  */
     68 
     69 /*
     70  * uvm_page.c: page ops.
     71  */
     72 
     73 #include <sys/cdefs.h>
     74 __KERNEL_RCSID(0, "$NetBSD: uvm_page.c,v 1.78 2002/06/20 15:05:29 chs Exp $");
     75 
     76 #include "opt_uvmhist.h"
     77 
     78 #include <sys/param.h>
     79 #include <sys/systm.h>
     80 #include <sys/malloc.h>
     81 #include <sys/sched.h>
     82 #include <sys/kernel.h>
     83 #include <sys/vnode.h>
     84 #include <sys/proc.h>
     85 
     86 #define UVM_PAGE                /* pull in uvm_page.h functions */
     87 #include <uvm/uvm.h>
     88 
     89 /*
     90  * global vars... XXXCDC: move to uvm. structure.
     91  */
     92 
     93 /*
     94  * physical memory config is stored in vm_physmem.
     95  */
     96 
     97 struct vm_physseg vm_physmem[VM_PHYSSEG_MAX];	/* XXXCDC: uvm.physmem */
     98 int vm_nphysseg = 0;				/* XXXCDC: uvm.nphysseg */
     99 
    100 /*
    101  * Some supported CPUs in a given architecture don't support all
    102  * of the things necessary to do idle page zero'ing efficiently.
    103  * We therefore provide a way to disable it from machdep code here.
    104  */
    105 /*
    106  * XXX disabled until we can find a way to do this without causing
    107  * problems for either cpu caches or DMA latency.
    108  */
    109 boolean_t vm_page_zero_enable = FALSE;
    110 
    111 /*
    112  * local variables
    113  */
    114 
    115 /*
    116  * these variables record the values returned by vm_page_bootstrap,
    117  * for debugging purposes.  The implementation of uvm_pageboot_alloc
    118  * and pmap_startup here also uses them internally.
    119  */
    120 
    121 static vaddr_t      virtual_space_start;
    122 static vaddr_t      virtual_space_end;
    123 
    124 /*
    125  * we use a hash table with only one bucket during bootup.  we will
    126  * later rehash (resize) the hash table once the allocator is ready.
    127  * we static allocate the one bootstrap bucket below...
    128  */
    129 
    130 static struct pglist uvm_bootbucket;
    131 
    132 /*
    133  * we allocate an initial number of page colors in uvm_page_init(),
    134  * and remember them.  We may re-color pages as cache sizes are
    135  * discovered during the autoconfiguration phase.  But we can never
    136  * free the initial set of buckets, since they are allocated using
    137  * uvm_pageboot_alloc().
    138  */
    139 
    140 static boolean_t have_recolored_pages /* = FALSE */;
    141 
    142 /*
    143  * local prototypes
    144  */
    145 
    146 static void uvm_pageinsert __P((struct vm_page *));
    147 static void uvm_pageremove __P((struct vm_page *));
    148 
    149 /*
    150  * inline functions
    151  */
    152 
    153 /*
    154  * uvm_pageinsert: insert a page in the object and the hash table
    155  *
    156  * => caller must lock object
    157  * => caller must lock page queues
    158  * => call should have already set pg's object and offset pointers
    159  *    and bumped the version counter
    160  */
    161 
    162 __inline static void
    163 uvm_pageinsert(pg)
    164 	struct vm_page *pg;
    165 {
    166 	struct pglist *buck;
    167 	struct uvm_object *uobj = pg->uobject;
    168 
    169 	KASSERT((pg->flags & PG_TABLED) == 0);
    170 	buck = &uvm.page_hash[uvm_pagehash(uobj, pg->offset)];
    171 	simple_lock(&uvm.hashlock);
    172 	TAILQ_INSERT_TAIL(buck, pg, hashq);
    173 	simple_unlock(&uvm.hashlock);
    174 
    175 	if (UVM_OBJ_IS_AOBJ(uobj)) {
    176 		uvmexp.anonpages++;
    177 	}
    178 
    179 	TAILQ_INSERT_TAIL(&uobj->memq, pg, listq);
    180 	pg->flags |= PG_TABLED;
    181 	uobj->uo_npages++;
    182 }
    183 
    184 /*
    185  * uvm_page_remove: remove page from object and hash
    186  *
    187  * => caller must lock object
    188  * => caller must lock page queues
    189  */
    190 
    191 static __inline void
    192 uvm_pageremove(pg)
    193 	struct vm_page *pg;
    194 {
    195 	struct pglist *buck;
    196 	struct uvm_object *uobj = pg->uobject;
    197 
    198 	KASSERT(pg->flags & PG_TABLED);
    199 	buck = &uvm.page_hash[uvm_pagehash(uobj ,pg->offset)];
    200 	simple_lock(&uvm.hashlock);
    201 	TAILQ_REMOVE(buck, pg, hashq);
    202 	simple_unlock(&uvm.hashlock);
    203 
    204 	if (UVM_OBJ_IS_VTEXT(uobj)) {
    205 		uvmexp.execpages--;
    206 	} else if (UVM_OBJ_IS_VNODE(uobj)) {
    207 		uvmexp.filepages--;
    208 	} else if (UVM_OBJ_IS_AOBJ(uobj)) {
    209 		uvmexp.anonpages--;
    210 	}
    211 
    212 	/* object should be locked */
    213 	uobj->uo_npages--;
    214 	TAILQ_REMOVE(&uobj->memq, pg, listq);
    215 	pg->flags &= ~PG_TABLED;
    216 	pg->uobject = NULL;
    217 }
    218 
    219 static void
    220 uvm_page_init_buckets(struct pgfreelist *pgfl)
    221 {
    222 	int color, i;
    223 
    224 	for (color = 0; color < uvmexp.ncolors; color++) {
    225 		for (i = 0; i < PGFL_NQUEUES; i++) {
    226 			TAILQ_INIT(&pgfl->pgfl_buckets[
    227 			    color].pgfl_queues[i]);
    228 		}
    229 	}
    230 }
    231 
    232 /*
    233  * uvm_page_init: init the page system.   called from uvm_init().
    234  *
    235  * => we return the range of kernel virtual memory in kvm_startp/kvm_endp
    236  */
    237 
    238 void
    239 uvm_page_init(kvm_startp, kvm_endp)
    240 	vaddr_t *kvm_startp, *kvm_endp;
    241 {
    242 	vsize_t freepages, pagecount, bucketcount, n;
    243 	struct pgflbucket *bucketarray;
    244 	struct vm_page *pagearray;
    245 	int lcv, i;
    246 	paddr_t paddr;
    247 
    248 	/*
    249 	 * init the page queues and page queue locks, except the free
    250 	 * list; we allocate that later (with the initial vm_page
    251 	 * structures).
    252 	 */
    253 
    254 	TAILQ_INIT(&uvm.page_active);
    255 	TAILQ_INIT(&uvm.page_inactive);
    256 	simple_lock_init(&uvm.pageqlock);
    257 	simple_lock_init(&uvm.fpageqlock);
    258 
    259 	/*
    260 	 * init the <obj,offset> => <page> hash table.  for now
    261 	 * we just have one bucket (the bootstrap bucket).  later on we
    262 	 * will allocate new buckets as we dynamically resize the hash table.
    263 	 */
    264 
    265 	uvm.page_nhash = 1;			/* 1 bucket */
    266 	uvm.page_hashmask = 0;			/* mask for hash function */
    267 	uvm.page_hash = &uvm_bootbucket;	/* install bootstrap bucket */
    268 	TAILQ_INIT(uvm.page_hash);		/* init hash table */
    269 	simple_lock_init(&uvm.hashlock);	/* init hash table lock */
    270 
    271 	/*
    272 	 * allocate vm_page structures.
    273 	 */
    274 
    275 	/*
    276 	 * sanity check:
    277 	 * before calling this function the MD code is expected to register
    278 	 * some free RAM with the uvm_page_physload() function.   our job
    279 	 * now is to allocate vm_page structures for this memory.
    280 	 */
    281 
    282 	if (vm_nphysseg == 0)
    283 		panic("uvm_page_bootstrap: no memory pre-allocated");
    284 
    285 	/*
    286 	 * first calculate the number of free pages...
    287 	 *
    288 	 * note that we use start/end rather than avail_start/avail_end.
    289 	 * this allows us to allocate extra vm_page structures in case we
    290 	 * want to return some memory to the pool after booting.
    291 	 */
    292 
    293 	freepages = 0;
    294 	for (lcv = 0 ; lcv < vm_nphysseg ; lcv++)
    295 		freepages += (vm_physmem[lcv].end - vm_physmem[lcv].start);
    296 
    297 	/*
    298 	 * Let MD code initialize the number of colors, or default
    299 	 * to 1 color if MD code doesn't care.
    300 	 */
    301 	if (uvmexp.ncolors == 0)
    302 		uvmexp.ncolors = 1;
    303 	uvmexp.colormask = uvmexp.ncolors - 1;
    304 
    305 	/*
    306 	 * we now know we have (PAGE_SIZE * freepages) bytes of memory we can
    307 	 * use.   for each page of memory we use we need a vm_page structure.
    308 	 * thus, the total number of pages we can use is the total size of
    309 	 * the memory divided by the PAGE_SIZE plus the size of the vm_page
    310 	 * structure.   we add one to freepages as a fudge factor to avoid
    311 	 * truncation errors (since we can only allocate in terms of whole
    312 	 * pages).
    313 	 */
    314 
    315 	bucketcount = uvmexp.ncolors * VM_NFREELIST;
    316 	pagecount = ((freepages + 1) << PAGE_SHIFT) /
    317 	    (PAGE_SIZE + sizeof(struct vm_page));
    318 
    319 	bucketarray = (void *)uvm_pageboot_alloc((bucketcount *
    320 	    sizeof(struct pgflbucket)) + (pagecount *
    321 	    sizeof(struct vm_page)));
    322 	pagearray = (struct vm_page *)(bucketarray + bucketcount);
    323 
    324 	for (lcv = 0; lcv < VM_NFREELIST; lcv++) {
    325 		uvm.page_free[lcv].pgfl_buckets =
    326 		    (bucketarray + (lcv * uvmexp.ncolors));
    327 		uvm_page_init_buckets(&uvm.page_free[lcv]);
    328 	}
    329 	memset(pagearray, 0, pagecount * sizeof(struct vm_page));
    330 
    331 	/*
    332 	 * init the vm_page structures and put them in the correct place.
    333 	 */
    334 
    335 	for (lcv = 0 ; lcv < vm_nphysseg ; lcv++) {
    336 		n = vm_physmem[lcv].end - vm_physmem[lcv].start;
    337 
    338 		/* set up page array pointers */
    339 		vm_physmem[lcv].pgs = pagearray;
    340 		pagearray += n;
    341 		pagecount -= n;
    342 		vm_physmem[lcv].lastpg = vm_physmem[lcv].pgs + (n - 1);
    343 
    344 		/* init and free vm_pages (we've already zeroed them) */
    345 		paddr = ptoa(vm_physmem[lcv].start);
    346 		for (i = 0 ; i < n ; i++, paddr += PAGE_SIZE) {
    347 			vm_physmem[lcv].pgs[i].phys_addr = paddr;
    348 #ifdef __HAVE_VM_PAGE_MD
    349 			VM_MDPAGE_INIT(&vm_physmem[lcv].pgs[i]);
    350 #endif
    351 			if (atop(paddr) >= vm_physmem[lcv].avail_start &&
    352 			    atop(paddr) <= vm_physmem[lcv].avail_end) {
    353 				uvmexp.npages++;
    354 				/* add page to free pool */
    355 				uvm_pagefree(&vm_physmem[lcv].pgs[i]);
    356 			}
    357 		}
    358 	}
    359 
    360 	/*
    361 	 * pass up the values of virtual_space_start and
    362 	 * virtual_space_end (obtained by uvm_pageboot_alloc) to the upper
    363 	 * layers of the VM.
    364 	 */
    365 
    366 	*kvm_startp = round_page(virtual_space_start);
    367 	*kvm_endp = trunc_page(virtual_space_end);
    368 
    369 	/*
    370 	 * init locks for kernel threads
    371 	 */
    372 
    373 	simple_lock_init(&uvm.pagedaemon_lock);
    374 	simple_lock_init(&uvm.aiodoned_lock);
    375 
    376 	/*
    377 	 * init various thresholds.
    378 	 */
    379 
    380 	uvmexp.reserve_pagedaemon = 1;
    381 	uvmexp.reserve_kernel = 5;
    382 	uvmexp.anonminpct = 10;
    383 	uvmexp.fileminpct = 10;
    384 	uvmexp.execminpct = 5;
    385 	uvmexp.anonmaxpct = 80;
    386 	uvmexp.filemaxpct = 50;
    387 	uvmexp.execmaxpct = 30;
    388 	uvmexp.anonmin = uvmexp.anonminpct * 256 / 100;
    389 	uvmexp.filemin = uvmexp.fileminpct * 256 / 100;
    390 	uvmexp.execmin = uvmexp.execminpct * 256 / 100;
    391 	uvmexp.anonmax = uvmexp.anonmaxpct * 256 / 100;
    392 	uvmexp.filemax = uvmexp.filemaxpct * 256 / 100;
    393 	uvmexp.execmax = uvmexp.execmaxpct * 256 / 100;
    394 
    395 	/*
    396 	 * determine if we should zero pages in the idle loop.
    397 	 */
    398 
    399 	uvm.page_idle_zero = vm_page_zero_enable;
    400 
    401 	/*
    402 	 * done!
    403 	 */
    404 
    405 	uvm.page_init_done = TRUE;
    406 }
    407 
    408 /*
    409  * uvm_setpagesize: set the page size
    410  *
    411  * => sets page_shift and page_mask from uvmexp.pagesize.
    412  */
    413 
    414 void
    415 uvm_setpagesize()
    416 {
    417 	if (uvmexp.pagesize == 0)
    418 		uvmexp.pagesize = DEFAULT_PAGE_SIZE;
    419 	uvmexp.pagemask = uvmexp.pagesize - 1;
    420 	if ((uvmexp.pagemask & uvmexp.pagesize) != 0)
    421 		panic("uvm_setpagesize: page size not a power of two");
    422 	for (uvmexp.pageshift = 0; ; uvmexp.pageshift++)
    423 		if ((1 << uvmexp.pageshift) == uvmexp.pagesize)
    424 			break;
    425 }
    426 
    427 /*
    428  * uvm_pageboot_alloc: steal memory from physmem for bootstrapping
    429  */
    430 
    431 vaddr_t
    432 uvm_pageboot_alloc(size)
    433 	vsize_t size;
    434 {
    435 	static boolean_t initialized = FALSE;
    436 	vaddr_t addr;
    437 #if !defined(PMAP_STEAL_MEMORY)
    438 	vaddr_t vaddr;
    439 	paddr_t paddr;
    440 #endif
    441 
    442 	/*
    443 	 * on first call to this function, initialize ourselves.
    444 	 */
    445 	if (initialized == FALSE) {
    446 		pmap_virtual_space(&virtual_space_start, &virtual_space_end);
    447 
    448 		/* round it the way we like it */
    449 		virtual_space_start = round_page(virtual_space_start);
    450 		virtual_space_end = trunc_page(virtual_space_end);
    451 
    452 		initialized = TRUE;
    453 	}
    454 
    455 	/* round to page size */
    456 	size = round_page(size);
    457 
    458 #if defined(PMAP_STEAL_MEMORY)
    459 
    460 	/*
    461 	 * defer bootstrap allocation to MD code (it may want to allocate
    462 	 * from a direct-mapped segment).  pmap_steal_memory should adjust
    463 	 * virtual_space_start/virtual_space_end if necessary.
    464 	 */
    465 
    466 	addr = pmap_steal_memory(size, &virtual_space_start,
    467 	    &virtual_space_end);
    468 
    469 	return(addr);
    470 
    471 #else /* !PMAP_STEAL_MEMORY */
    472 
    473 	/*
    474 	 * allocate virtual memory for this request
    475 	 */
    476 	if (virtual_space_start == virtual_space_end ||
    477 	    (virtual_space_end - virtual_space_start) < size)
    478 		panic("uvm_pageboot_alloc: out of virtual space");
    479 
    480 	addr = virtual_space_start;
    481 
    482 #ifdef PMAP_GROWKERNEL
    483 	/*
    484 	 * If the kernel pmap can't map the requested space,
    485 	 * then allocate more resources for it.
    486 	 */
    487 	if (uvm_maxkaddr < (addr + size)) {
    488 		uvm_maxkaddr = pmap_growkernel(addr + size);
    489 		if (uvm_maxkaddr < (addr + size))
    490 			panic("uvm_pageboot_alloc: pmap_growkernel() failed");
    491 	}
    492 #endif
    493 
    494 	virtual_space_start += size;
    495 
    496 	/*
    497 	 * allocate and mapin physical pages to back new virtual pages
    498 	 */
    499 
    500 	for (vaddr = round_page(addr) ; vaddr < addr + size ;
    501 	    vaddr += PAGE_SIZE) {
    502 
    503 		if (!uvm_page_physget(&paddr))
    504 			panic("uvm_pageboot_alloc: out of memory");
    505 
    506 		/*
    507 		 * Note this memory is no longer managed, so using
    508 		 * pmap_kenter is safe.
    509 		 */
    510 		pmap_kenter_pa(vaddr, paddr, VM_PROT_READ|VM_PROT_WRITE);
    511 	}
    512 	pmap_update(pmap_kernel());
    513 	return(addr);
    514 #endif	/* PMAP_STEAL_MEMORY */
    515 }
    516 
    517 #if !defined(PMAP_STEAL_MEMORY)
    518 /*
    519  * uvm_page_physget: "steal" one page from the vm_physmem structure.
    520  *
    521  * => attempt to allocate it off the end of a segment in which the "avail"
    522  *    values match the start/end values.   if we can't do that, then we
    523  *    will advance both values (making them equal, and removing some
    524  *    vm_page structures from the non-avail area).
    525  * => return false if out of memory.
    526  */
    527 
    528 /* subroutine: try to allocate from memory chunks on the specified freelist */
    529 static boolean_t uvm_page_physget_freelist __P((paddr_t *, int));
    530 
    531 static boolean_t
    532 uvm_page_physget_freelist(paddrp, freelist)
    533 	paddr_t *paddrp;
    534 	int freelist;
    535 {
    536 	int lcv, x;
    537 
    538 	/* pass 1: try allocating from a matching end */
    539 #if (VM_PHYSSEG_STRAT == VM_PSTRAT_BIGFIRST)
    540 	for (lcv = vm_nphysseg - 1 ; lcv >= 0 ; lcv--)
    541 #else
    542 	for (lcv = 0 ; lcv < vm_nphysseg ; lcv++)
    543 #endif
    544 	{
    545 
    546 		if (uvm.page_init_done == TRUE)
    547 			panic("uvm_page_physget: called _after_ bootstrap");
    548 
    549 		if (vm_physmem[lcv].free_list != freelist)
    550 			continue;
    551 
    552 		/* try from front */
    553 		if (vm_physmem[lcv].avail_start == vm_physmem[lcv].start &&
    554 		    vm_physmem[lcv].avail_start < vm_physmem[lcv].avail_end) {
    555 			*paddrp = ptoa(vm_physmem[lcv].avail_start);
    556 			vm_physmem[lcv].avail_start++;
    557 			vm_physmem[lcv].start++;
    558 			/* nothing left?   nuke it */
    559 			if (vm_physmem[lcv].avail_start ==
    560 			    vm_physmem[lcv].end) {
    561 				if (vm_nphysseg == 1)
    562 				    panic("vum_page_physget: out of memory!");
    563 				vm_nphysseg--;
    564 				for (x = lcv ; x < vm_nphysseg ; x++)
    565 					/* structure copy */
    566 					vm_physmem[x] = vm_physmem[x+1];
    567 			}
    568 			return (TRUE);
    569 		}
    570 
    571 		/* try from rear */
    572 		if (vm_physmem[lcv].avail_end == vm_physmem[lcv].end &&
    573 		    vm_physmem[lcv].avail_start < vm_physmem[lcv].avail_end) {
    574 			*paddrp = ptoa(vm_physmem[lcv].avail_end - 1);
    575 			vm_physmem[lcv].avail_end--;
    576 			vm_physmem[lcv].end--;
    577 			/* nothing left?   nuke it */
    578 			if (vm_physmem[lcv].avail_end ==
    579 			    vm_physmem[lcv].start) {
    580 				if (vm_nphysseg == 1)
    581 				    panic("uvm_page_physget: out of memory!");
    582 				vm_nphysseg--;
    583 				for (x = lcv ; x < vm_nphysseg ; x++)
    584 					/* structure copy */
    585 					vm_physmem[x] = vm_physmem[x+1];
    586 			}
    587 			return (TRUE);
    588 		}
    589 	}
    590 
    591 	/* pass2: forget about matching ends, just allocate something */
    592 #if (VM_PHYSSEG_STRAT == VM_PSTRAT_BIGFIRST)
    593 	for (lcv = vm_nphysseg - 1 ; lcv >= 0 ; lcv--)
    594 #else
    595 	for (lcv = 0 ; lcv < vm_nphysseg ; lcv++)
    596 #endif
    597 	{
    598 
    599 		/* any room in this bank? */
    600 		if (vm_physmem[lcv].avail_start >= vm_physmem[lcv].avail_end)
    601 			continue;  /* nope */
    602 
    603 		*paddrp = ptoa(vm_physmem[lcv].avail_start);
    604 		vm_physmem[lcv].avail_start++;
    605 		/* truncate! */
    606 		vm_physmem[lcv].start = vm_physmem[lcv].avail_start;
    607 
    608 		/* nothing left?   nuke it */
    609 		if (vm_physmem[lcv].avail_start == vm_physmem[lcv].end) {
    610 			if (vm_nphysseg == 1)
    611 				panic("uvm_page_physget: out of memory!");
    612 			vm_nphysseg--;
    613 			for (x = lcv ; x < vm_nphysseg ; x++)
    614 				/* structure copy */
    615 				vm_physmem[x] = vm_physmem[x+1];
    616 		}
    617 		return (TRUE);
    618 	}
    619 
    620 	return (FALSE);        /* whoops! */
    621 }
    622 
    623 boolean_t
    624 uvm_page_physget(paddrp)
    625 	paddr_t *paddrp;
    626 {
    627 	int i;
    628 
    629 	/* try in the order of freelist preference */
    630 	for (i = 0; i < VM_NFREELIST; i++)
    631 		if (uvm_page_physget_freelist(paddrp, i) == TRUE)
    632 			return (TRUE);
    633 	return (FALSE);
    634 }
    635 #endif /* PMAP_STEAL_MEMORY */
    636 
    637 /*
    638  * uvm_page_physload: load physical memory into VM system
    639  *
    640  * => all args are PFs
    641  * => all pages in start/end get vm_page structures
    642  * => areas marked by avail_start/avail_end get added to the free page pool
    643  * => we are limited to VM_PHYSSEG_MAX physical memory segments
    644  */
    645 
    646 void
    647 uvm_page_physload(start, end, avail_start, avail_end, free_list)
    648 	paddr_t start, end, avail_start, avail_end;
    649 	int free_list;
    650 {
    651 	int preload, lcv;
    652 	psize_t npages;
    653 	struct vm_page *pgs;
    654 	struct vm_physseg *ps;
    655 
    656 	if (uvmexp.pagesize == 0)
    657 		panic("uvm_page_physload: page size not set!");
    658 	if (free_list >= VM_NFREELIST || free_list < VM_FREELIST_DEFAULT)
    659 		panic("uvm_page_physload: bad free list %d\n", free_list);
    660 	if (start >= end)
    661 		panic("uvm_page_physload: start >= end");
    662 
    663 	/*
    664 	 * do we have room?
    665 	 */
    666 
    667 	if (vm_nphysseg == VM_PHYSSEG_MAX) {
    668 		printf("uvm_page_physload: unable to load physical memory "
    669 		    "segment\n");
    670 		printf("\t%d segments allocated, ignoring 0x%llx -> 0x%llx\n",
    671 		    VM_PHYSSEG_MAX, (long long)start, (long long)end);
    672 		printf("\tincrease VM_PHYSSEG_MAX\n");
    673 		return;
    674 	}
    675 
    676 	/*
    677 	 * check to see if this is a "preload" (i.e. uvm_mem_init hasn't been
    678 	 * called yet, so malloc is not available).
    679 	 */
    680 
    681 	for (lcv = 0 ; lcv < vm_nphysseg ; lcv++) {
    682 		if (vm_physmem[lcv].pgs)
    683 			break;
    684 	}
    685 	preload = (lcv == vm_nphysseg);
    686 
    687 	/*
    688 	 * if VM is already running, attempt to malloc() vm_page structures
    689 	 */
    690 
    691 	if (!preload) {
    692 #if defined(VM_PHYSSEG_NOADD)
    693 		panic("uvm_page_physload: tried to add RAM after vm_mem_init");
    694 #else
    695 		/* XXXCDC: need some sort of lockout for this case */
    696 		paddr_t paddr;
    697 		npages = end - start;  /* # of pages */
    698 		pgs = malloc(sizeof(struct vm_page) * npages,
    699 		    M_VMPAGE, M_NOWAIT);
    700 		if (pgs == NULL) {
    701 			printf("uvm_page_physload: can not malloc vm_page "
    702 			    "structs for segment\n");
    703 			printf("\tignoring 0x%lx -> 0x%lx\n", start, end);
    704 			return;
    705 		}
    706 		/* zero data, init phys_addr and free_list, and free pages */
    707 		memset(pgs, 0, sizeof(struct vm_page) * npages);
    708 		for (lcv = 0, paddr = ptoa(start) ;
    709 				 lcv < npages ; lcv++, paddr += PAGE_SIZE) {
    710 			pgs[lcv].phys_addr = paddr;
    711 			pgs[lcv].free_list = free_list;
    712 			if (atop(paddr) >= avail_start &&
    713 			    atop(paddr) <= avail_end)
    714 				uvm_pagefree(&pgs[lcv]);
    715 		}
    716 		/* XXXCDC: incomplete: need to update uvmexp.free, what else? */
    717 		/* XXXCDC: need hook to tell pmap to rebuild pv_list, etc... */
    718 #endif
    719 	} else {
    720 		pgs = NULL;
    721 		npages = 0;
    722 	}
    723 
    724 	/*
    725 	 * now insert us in the proper place in vm_physmem[]
    726 	 */
    727 
    728 #if (VM_PHYSSEG_STRAT == VM_PSTRAT_RANDOM)
    729 	/* random: put it at the end (easy!) */
    730 	ps = &vm_physmem[vm_nphysseg];
    731 #elif (VM_PHYSSEG_STRAT == VM_PSTRAT_BSEARCH)
    732 	{
    733 		int x;
    734 		/* sort by address for binary search */
    735 		for (lcv = 0 ; lcv < vm_nphysseg ; lcv++)
    736 			if (start < vm_physmem[lcv].start)
    737 				break;
    738 		ps = &vm_physmem[lcv];
    739 		/* move back other entries, if necessary ... */
    740 		for (x = vm_nphysseg ; x > lcv ; x--)
    741 			/* structure copy */
    742 			vm_physmem[x] = vm_physmem[x - 1];
    743 	}
    744 #elif (VM_PHYSSEG_STRAT == VM_PSTRAT_BIGFIRST)
    745 	{
    746 		int x;
    747 		/* sort by largest segment first */
    748 		for (lcv = 0 ; lcv < vm_nphysseg ; lcv++)
    749 			if ((end - start) >
    750 			    (vm_physmem[lcv].end - vm_physmem[lcv].start))
    751 				break;
    752 		ps = &vm_physmem[lcv];
    753 		/* move back other entries, if necessary ... */
    754 		for (x = vm_nphysseg ; x > lcv ; x--)
    755 			/* structure copy */
    756 			vm_physmem[x] = vm_physmem[x - 1];
    757 	}
    758 #else
    759 	panic("uvm_page_physload: unknown physseg strategy selected!");
    760 #endif
    761 
    762 	ps->start = start;
    763 	ps->end = end;
    764 	ps->avail_start = avail_start;
    765 	ps->avail_end = avail_end;
    766 	if (preload) {
    767 		ps->pgs = NULL;
    768 	} else {
    769 		ps->pgs = pgs;
    770 		ps->lastpg = pgs + npages - 1;
    771 	}
    772 	ps->free_list = free_list;
    773 	vm_nphysseg++;
    774 
    775 	if (!preload)
    776 		uvm_page_rehash();
    777 }
    778 
    779 /*
    780  * uvm_page_rehash: reallocate hash table based on number of free pages.
    781  */
    782 
    783 void
    784 uvm_page_rehash()
    785 {
    786 	int freepages, lcv, bucketcount, oldcount;
    787 	struct pglist *newbuckets, *oldbuckets;
    788 	struct vm_page *pg;
    789 	size_t newsize, oldsize;
    790 
    791 	/*
    792 	 * compute number of pages that can go in the free pool
    793 	 */
    794 
    795 	freepages = 0;
    796 	for (lcv = 0 ; lcv < vm_nphysseg ; lcv++)
    797 		freepages +=
    798 		    (vm_physmem[lcv].avail_end - vm_physmem[lcv].avail_start);
    799 
    800 	/*
    801 	 * compute number of buckets needed for this number of pages
    802 	 */
    803 
    804 	bucketcount = 1;
    805 	while (bucketcount < freepages)
    806 		bucketcount = bucketcount * 2;
    807 
    808 	/*
    809 	 * compute the size of the current table and new table.
    810 	 */
    811 
    812 	oldbuckets = uvm.page_hash;
    813 	oldcount = uvm.page_nhash;
    814 	oldsize = round_page(sizeof(struct pglist) * oldcount);
    815 	newsize = round_page(sizeof(struct pglist) * bucketcount);
    816 
    817 	/*
    818 	 * allocate the new buckets
    819 	 */
    820 
    821 	newbuckets = (struct pglist *) uvm_km_alloc(kernel_map, newsize);
    822 	if (newbuckets == NULL) {
    823 		printf("uvm_page_physrehash: WARNING: could not grow page "
    824 		    "hash table\n");
    825 		return;
    826 	}
    827 	for (lcv = 0 ; lcv < bucketcount ; lcv++)
    828 		TAILQ_INIT(&newbuckets[lcv]);
    829 
    830 	/*
    831 	 * now replace the old buckets with the new ones and rehash everything
    832 	 */
    833 
    834 	simple_lock(&uvm.hashlock);
    835 	uvm.page_hash = newbuckets;
    836 	uvm.page_nhash = bucketcount;
    837 	uvm.page_hashmask = bucketcount - 1;  /* power of 2 */
    838 
    839 	/* ... and rehash */
    840 	for (lcv = 0 ; lcv < oldcount ; lcv++) {
    841 		while ((pg = oldbuckets[lcv].tqh_first) != NULL) {
    842 			TAILQ_REMOVE(&oldbuckets[lcv], pg, hashq);
    843 			TAILQ_INSERT_TAIL(
    844 			  &uvm.page_hash[uvm_pagehash(pg->uobject, pg->offset)],
    845 			  pg, hashq);
    846 		}
    847 	}
    848 	simple_unlock(&uvm.hashlock);
    849 
    850 	/*
    851 	 * free old bucket array if is not the boot-time table
    852 	 */
    853 
    854 	if (oldbuckets != &uvm_bootbucket)
    855 		uvm_km_free(kernel_map, (vaddr_t) oldbuckets, oldsize);
    856 }
    857 
    858 /*
    859  * uvm_page_recolor: Recolor the pages if the new bucket count is
    860  * larger than the old one.
    861  */
    862 
    863 void
    864 uvm_page_recolor(int newncolors)
    865 {
    866 	struct pgflbucket *bucketarray, *oldbucketarray;
    867 	struct pgfreelist pgfl;
    868 	struct vm_page *pg;
    869 	vsize_t bucketcount;
    870 	int s, lcv, color, i, ocolors;
    871 
    872 	if (newncolors <= uvmexp.ncolors)
    873 		return;
    874 
    875 	if (uvm.page_init_done == FALSE) {
    876 		uvmexp.ncolors = newncolors;
    877 		return;
    878 	}
    879 
    880 	bucketcount = newncolors * VM_NFREELIST;
    881 	bucketarray = malloc(bucketcount * sizeof(struct pgflbucket),
    882 	    M_VMPAGE, M_NOWAIT);
    883 	if (bucketarray == NULL) {
    884 		printf("WARNING: unable to allocate %ld page color buckets\n",
    885 		    (long) bucketcount);
    886 		return;
    887 	}
    888 
    889 	s = uvm_lock_fpageq();
    890 
    891 	/* Make sure we should still do this. */
    892 	if (newncolors <= uvmexp.ncolors) {
    893 		uvm_unlock_fpageq(s);
    894 		free(bucketarray, M_VMPAGE);
    895 		return;
    896 	}
    897 
    898 	oldbucketarray = uvm.page_free[0].pgfl_buckets;
    899 	ocolors = uvmexp.ncolors;
    900 
    901 	uvmexp.ncolors = newncolors;
    902 	uvmexp.colormask = uvmexp.ncolors - 1;
    903 
    904 	for (lcv = 0; lcv < VM_NFREELIST; lcv++) {
    905 		pgfl.pgfl_buckets = (bucketarray + (lcv * newncolors));
    906 		uvm_page_init_buckets(&pgfl);
    907 		for (color = 0; color < ocolors; color++) {
    908 			for (i = 0; i < PGFL_NQUEUES; i++) {
    909 				while ((pg = TAILQ_FIRST(&uvm.page_free[
    910 				    lcv].pgfl_buckets[color].pgfl_queues[i]))
    911 				    != NULL) {
    912 					TAILQ_REMOVE(&uvm.page_free[
    913 					    lcv].pgfl_buckets[
    914 					    color].pgfl_queues[i], pg, pageq);
    915 					TAILQ_INSERT_TAIL(&pgfl.pgfl_buckets[
    916 					    VM_PGCOLOR_BUCKET(pg)].pgfl_queues[
    917 					    i], pg, pageq);
    918 				}
    919 			}
    920 		}
    921 		uvm.page_free[lcv].pgfl_buckets = pgfl.pgfl_buckets;
    922 	}
    923 
    924 	if (have_recolored_pages) {
    925 		uvm_unlock_fpageq(s);
    926 		free(oldbucketarray, M_VMPAGE);
    927 		return;
    928 	}
    929 
    930 	have_recolored_pages = TRUE;
    931 	uvm_unlock_fpageq(s);
    932 }
    933 
    934 /*
    935  * uvm_pagealloc_pgfl: helper routine for uvm_pagealloc_strat
    936  */
    937 
    938 static __inline struct vm_page *
    939 uvm_pagealloc_pgfl(struct pgfreelist *pgfl, int try1, int try2,
    940     int *trycolorp)
    941 {
    942 	struct pglist *freeq;
    943 	struct vm_page *pg;
    944 	int color, trycolor = *trycolorp;
    945 
    946 	color = trycolor;
    947 	do {
    948 		if ((pg = TAILQ_FIRST((freeq =
    949 		    &pgfl->pgfl_buckets[color].pgfl_queues[try1]))) != NULL)
    950 			goto gotit;
    951 		if ((pg = TAILQ_FIRST((freeq =
    952 		    &pgfl->pgfl_buckets[color].pgfl_queues[try2]))) != NULL)
    953 			goto gotit;
    954 		color = (color + 1) & uvmexp.colormask;
    955 	} while (color != trycolor);
    956 
    957 	return (NULL);
    958 
    959  gotit:
    960 	TAILQ_REMOVE(freeq, pg, pageq);
    961 	uvmexp.free--;
    962 
    963 	/* update zero'd page count */
    964 	if (pg->flags & PG_ZERO)
    965 		uvmexp.zeropages--;
    966 
    967 	if (color == trycolor)
    968 		uvmexp.colorhit++;
    969 	else {
    970 		uvmexp.colormiss++;
    971 		*trycolorp = color;
    972 	}
    973 
    974 	return (pg);
    975 }
    976 
    977 /*
    978  * uvm_pagealloc_strat: allocate vm_page from a particular free list.
    979  *
    980  * => return null if no pages free
    981  * => wake up pagedaemon if number of free pages drops below low water mark
    982  * => if obj != NULL, obj must be locked (to put in hash)
    983  * => if anon != NULL, anon must be locked (to put in anon)
    984  * => only one of obj or anon can be non-null
    985  * => caller must activate/deactivate page if it is not wired.
    986  * => free_list is ignored if strat == UVM_PGA_STRAT_NORMAL.
    987  * => policy decision: it is more important to pull a page off of the
    988  *	appropriate priority free list than it is to get a zero'd or
    989  *	unknown contents page.  This is because we live with the
    990  *	consequences of a bad free list decision for the entire
    991  *	lifetime of the page, e.g. if the page comes from memory that
    992  *	is slower to access.
    993  */
    994 
    995 struct vm_page *
    996 uvm_pagealloc_strat(obj, off, anon, flags, strat, free_list)
    997 	struct uvm_object *obj;
    998 	voff_t off;
    999 	int flags;
   1000 	struct vm_anon *anon;
   1001 	int strat, free_list;
   1002 {
   1003 	int lcv, try1, try2, s, zeroit = 0, color;
   1004 	struct vm_page *pg;
   1005 	boolean_t use_reserve;
   1006 
   1007 	KASSERT(obj == NULL || anon == NULL);
   1008 	KASSERT(off == trunc_page(off));
   1009 	LOCK_ASSERT(obj == NULL || simple_lock_held(&obj->vmobjlock));
   1010 	LOCK_ASSERT(anon == NULL || simple_lock_held(&anon->an_lock));
   1011 
   1012 	s = uvm_lock_fpageq();
   1013 
   1014 	/*
   1015 	 * This implements a global round-robin page coloring
   1016 	 * algorithm.
   1017 	 *
   1018 	 * XXXJRT: Should we make the `nextcolor' per-cpu?
   1019 	 * XXXJRT: What about virtually-indexed caches?
   1020 	 */
   1021 
   1022 	color = uvm.page_free_nextcolor;
   1023 
   1024 	/*
   1025 	 * check to see if we need to generate some free pages waking
   1026 	 * the pagedaemon.
   1027 	 */
   1028 
   1029 	UVM_KICK_PDAEMON();
   1030 
   1031 	/*
   1032 	 * fail if any of these conditions is true:
   1033 	 * [1]  there really are no free pages, or
   1034 	 * [2]  only kernel "reserved" pages remain and
   1035 	 *        the page isn't being allocated to a kernel object.
   1036 	 * [3]  only pagedaemon "reserved" pages remain and
   1037 	 *        the requestor isn't the pagedaemon.
   1038 	 */
   1039 
   1040 	use_reserve = (flags & UVM_PGA_USERESERVE) ||
   1041 		(obj && UVM_OBJ_IS_KERN_OBJECT(obj));
   1042 	if ((uvmexp.free <= uvmexp.reserve_kernel && !use_reserve) ||
   1043 	    (uvmexp.free <= uvmexp.reserve_pagedaemon &&
   1044 	     !(use_reserve && curproc == uvm.pagedaemon_proc)))
   1045 		goto fail;
   1046 
   1047 #if PGFL_NQUEUES != 2
   1048 #error uvm_pagealloc_strat needs to be updated
   1049 #endif
   1050 
   1051 	/*
   1052 	 * If we want a zero'd page, try the ZEROS queue first, otherwise
   1053 	 * we try the UNKNOWN queue first.
   1054 	 */
   1055 	if (flags & UVM_PGA_ZERO) {
   1056 		try1 = PGFL_ZEROS;
   1057 		try2 = PGFL_UNKNOWN;
   1058 	} else {
   1059 		try1 = PGFL_UNKNOWN;
   1060 		try2 = PGFL_ZEROS;
   1061 	}
   1062 
   1063  again:
   1064 	switch (strat) {
   1065 	case UVM_PGA_STRAT_NORMAL:
   1066 		/* Check all freelists in descending priority order. */
   1067 		for (lcv = 0; lcv < VM_NFREELIST; lcv++) {
   1068 			pg = uvm_pagealloc_pgfl(&uvm.page_free[lcv],
   1069 			    try1, try2, &color);
   1070 			if (pg != NULL)
   1071 				goto gotit;
   1072 		}
   1073 
   1074 		/* No pages free! */
   1075 		goto fail;
   1076 
   1077 	case UVM_PGA_STRAT_ONLY:
   1078 	case UVM_PGA_STRAT_FALLBACK:
   1079 		/* Attempt to allocate from the specified free list. */
   1080 		KASSERT(free_list >= 0 && free_list < VM_NFREELIST);
   1081 		pg = uvm_pagealloc_pgfl(&uvm.page_free[free_list],
   1082 		    try1, try2, &color);
   1083 		if (pg != NULL)
   1084 			goto gotit;
   1085 
   1086 		/* Fall back, if possible. */
   1087 		if (strat == UVM_PGA_STRAT_FALLBACK) {
   1088 			strat = UVM_PGA_STRAT_NORMAL;
   1089 			goto again;
   1090 		}
   1091 
   1092 		/* No pages free! */
   1093 		goto fail;
   1094 
   1095 	default:
   1096 		panic("uvm_pagealloc_strat: bad strat %d", strat);
   1097 		/* NOTREACHED */
   1098 	}
   1099 
   1100  gotit:
   1101 	/*
   1102 	 * We now know which color we actually allocated from; set
   1103 	 * the next color accordingly.
   1104 	 */
   1105 
   1106 	uvm.page_free_nextcolor = (color + 1) & uvmexp.colormask;
   1107 
   1108 	/*
   1109 	 * update allocation statistics and remember if we have to
   1110 	 * zero the page
   1111 	 */
   1112 
   1113 	if (flags & UVM_PGA_ZERO) {
   1114 		if (pg->flags & PG_ZERO) {
   1115 			uvmexp.pga_zerohit++;
   1116 			zeroit = 0;
   1117 		} else {
   1118 			uvmexp.pga_zeromiss++;
   1119 			zeroit = 1;
   1120 		}
   1121 	}
   1122 	uvm_unlock_fpageq(s);
   1123 
   1124 	pg->offset = off;
   1125 	pg->uobject = obj;
   1126 	pg->uanon = anon;
   1127 	pg->flags = PG_BUSY|PG_CLEAN|PG_FAKE;
   1128 	if (anon) {
   1129 		anon->u.an_page = pg;
   1130 		pg->pqflags = PQ_ANON;
   1131 		uvmexp.anonpages++;
   1132 	} else {
   1133 		if (obj) {
   1134 			uvm_pageinsert(pg);
   1135 		}
   1136 		pg->pqflags = 0;
   1137 	}
   1138 #if defined(UVM_PAGE_TRKOWN)
   1139 	pg->owner_tag = NULL;
   1140 #endif
   1141 	UVM_PAGE_OWN(pg, "new alloc");
   1142 
   1143 	if (flags & UVM_PGA_ZERO) {
   1144 		/*
   1145 		 * A zero'd page is not clean.  If we got a page not already
   1146 		 * zero'd, then we have to zero it ourselves.
   1147 		 */
   1148 		pg->flags &= ~PG_CLEAN;
   1149 		if (zeroit)
   1150 			pmap_zero_page(VM_PAGE_TO_PHYS(pg));
   1151 	}
   1152 
   1153 	return(pg);
   1154 
   1155  fail:
   1156 	uvm_unlock_fpageq(s);
   1157 	return (NULL);
   1158 }
   1159 
   1160 /*
   1161  * uvm_pagerealloc: reallocate a page from one object to another
   1162  *
   1163  * => both objects must be locked
   1164  */
   1165 
   1166 void
   1167 uvm_pagerealloc(pg, newobj, newoff)
   1168 	struct vm_page *pg;
   1169 	struct uvm_object *newobj;
   1170 	voff_t newoff;
   1171 {
   1172 	/*
   1173 	 * remove it from the old object
   1174 	 */
   1175 
   1176 	if (pg->uobject) {
   1177 		uvm_pageremove(pg);
   1178 	}
   1179 
   1180 	/*
   1181 	 * put it in the new object
   1182 	 */
   1183 
   1184 	if (newobj) {
   1185 		pg->uobject = newobj;
   1186 		pg->offset = newoff;
   1187 		uvm_pageinsert(pg);
   1188 	}
   1189 }
   1190 
   1191 /*
   1192  * uvm_pagefree: free page
   1193  *
   1194  * => erase page's identity (i.e. remove from hash/object)
   1195  * => put page on free list
   1196  * => caller must lock owning object (either anon or uvm_object)
   1197  * => caller must lock page queues
   1198  * => assumes all valid mappings of pg are gone
   1199  */
   1200 
   1201 void
   1202 uvm_pagefree(pg)
   1203 	struct vm_page *pg;
   1204 {
   1205 	int s;
   1206 
   1207 	KASSERT((pg->flags & PG_PAGEOUT) == 0);
   1208 	LOCK_ASSERT(simple_lock_held(&uvm.pageqlock) ||
   1209 		    (pg->pqflags & (PQ_ACTIVE|PQ_INACTIVE)) == 0);
   1210 	LOCK_ASSERT(pg->uobject == NULL ||
   1211 		    simple_lock_held(&pg->uobject->vmobjlock));
   1212 	LOCK_ASSERT(pg->uobject != NULL || pg->uanon == NULL ||
   1213 		    simple_lock_held(&pg->uanon->an_lock));
   1214 
   1215 #ifdef DEBUG
   1216 	if (pg->uobject == (void *)0xdeadbeef &&
   1217 	    pg->uanon == (void *)0xdeadbeef) {
   1218 		panic("uvm_pagefree: freeing free page %p\n", pg);
   1219 	}
   1220 #endif
   1221 
   1222 	/*
   1223 	 * if the page is loaned, resolve the loan instead of freeing.
   1224 	 */
   1225 
   1226 	if (pg->loan_count) {
   1227 		KASSERT(pg->wire_count == 0);
   1228 
   1229 		/*
   1230 		 * if the page is owned by an anon then we just want to
   1231 		 * drop anon ownership.  the kernel will free the page when
   1232 		 * it is done with it.  if the page is owned by an object,
   1233 		 * remove it from the object and mark it dirty for the benefit
   1234 		 * of possible anon owners.
   1235 		 *
   1236 		 * regardless of previous ownership, wakeup any waiters,
   1237 		 * unbusy the page, and we're done.
   1238 		 */
   1239 
   1240 		if (pg->uobject != NULL) {
   1241 			uvm_pageremove(pg);
   1242 			pg->flags &= ~PG_CLEAN;
   1243 		} else if (pg->uanon != NULL) {
   1244 			if ((pg->pqflags & PQ_ANON) == 0) {
   1245 				pg->loan_count--;
   1246 			} else {
   1247 				pg->pqflags &= ~PQ_ANON;
   1248 			}
   1249 			pg->uanon = NULL;
   1250 		}
   1251 		if (pg->flags & PG_WANTED) {
   1252 			wakeup(pg);
   1253 		}
   1254 		pg->flags &= ~(PG_WANTED|PG_BUSY|PG_RELEASED);
   1255 #ifdef UVM_PAGE_TRKOWN
   1256 		pg->owner_tag = NULL;
   1257 #endif
   1258 		if (pg->loan_count) {
   1259 			uvm_pagedequeue(pg);
   1260 			return;
   1261 		}
   1262 	}
   1263 
   1264 	/*
   1265 	 * remove page from its object or anon.
   1266 	 */
   1267 
   1268 	if (pg->uobject != NULL) {
   1269 		uvm_pageremove(pg);
   1270 	} else if (pg->uanon != NULL) {
   1271 		pg->uanon->u.an_page = NULL;
   1272 		uvmexp.anonpages--;
   1273 	}
   1274 
   1275 	/*
   1276 	 * now remove the page from the queues.
   1277 	 */
   1278 
   1279 	uvm_pagedequeue(pg);
   1280 
   1281 	/*
   1282 	 * if the page was wired, unwire it now.
   1283 	 */
   1284 
   1285 	if (pg->wire_count) {
   1286 		pg->wire_count = 0;
   1287 		uvmexp.wired--;
   1288 	}
   1289 
   1290 	/*
   1291 	 * and put on free queue
   1292 	 */
   1293 
   1294 	pg->flags &= ~PG_ZERO;
   1295 
   1296 	s = uvm_lock_fpageq();
   1297 	TAILQ_INSERT_TAIL(&uvm.page_free[
   1298 	    uvm_page_lookup_freelist(pg)].pgfl_buckets[
   1299 	    VM_PGCOLOR_BUCKET(pg)].pgfl_queues[PGFL_UNKNOWN], pg, pageq);
   1300 	pg->pqflags = PQ_FREE;
   1301 #ifdef DEBUG
   1302 	pg->uobject = (void *)0xdeadbeef;
   1303 	pg->offset = 0xdeadbeef;
   1304 	pg->uanon = (void *)0xdeadbeef;
   1305 #endif
   1306 	uvmexp.free++;
   1307 
   1308 	if (uvmexp.zeropages < UVM_PAGEZERO_TARGET)
   1309 		uvm.page_idle_zero = vm_page_zero_enable;
   1310 
   1311 	uvm_unlock_fpageq(s);
   1312 }
   1313 
   1314 /*
   1315  * uvm_page_unbusy: unbusy an array of pages.
   1316  *
   1317  * => pages must either all belong to the same object, or all belong to anons.
   1318  * => if pages are object-owned, object must be locked.
   1319  * => if pages are anon-owned, anons must be locked.
   1320  * => caller must lock page queues if pages may be released.
   1321  */
   1322 
   1323 void
   1324 uvm_page_unbusy(pgs, npgs)
   1325 	struct vm_page **pgs;
   1326 	int npgs;
   1327 {
   1328 	struct vm_page *pg;
   1329 	int i;
   1330 	UVMHIST_FUNC("uvm_page_unbusy"); UVMHIST_CALLED(ubchist);
   1331 
   1332 	for (i = 0; i < npgs; i++) {
   1333 		pg = pgs[i];
   1334 		if (pg == NULL) {
   1335 			continue;
   1336 		}
   1337 		if (pg->flags & PG_WANTED) {
   1338 			wakeup(pg);
   1339 		}
   1340 		if (pg->flags & PG_RELEASED) {
   1341 			UVMHIST_LOG(ubchist, "releasing pg %p", pg,0,0,0);
   1342 			pg->flags &= ~PG_RELEASED;
   1343 			uvm_pagefree(pg);
   1344 		} else {
   1345 			UVMHIST_LOG(ubchist, "unbusying pg %p", pg,0,0,0);
   1346 			pg->flags &= ~(PG_WANTED|PG_BUSY);
   1347 			UVM_PAGE_OWN(pg, NULL);
   1348 		}
   1349 	}
   1350 }
   1351 
   1352 #if defined(UVM_PAGE_TRKOWN)
   1353 /*
   1354  * uvm_page_own: set or release page ownership
   1355  *
   1356  * => this is a debugging function that keeps track of who sets PG_BUSY
   1357  *	and where they do it.   it can be used to track down problems
   1358  *	such a process setting "PG_BUSY" and never releasing it.
   1359  * => page's object [if any] must be locked
   1360  * => if "tag" is NULL then we are releasing page ownership
   1361  */
   1362 void
   1363 uvm_page_own(pg, tag)
   1364 	struct vm_page *pg;
   1365 	char *tag;
   1366 {
   1367 	KASSERT((pg->flags & (PG_PAGEOUT|PG_RELEASED)) == 0);
   1368 
   1369 	/* gain ownership? */
   1370 	if (tag) {
   1371 		if (pg->owner_tag) {
   1372 			printf("uvm_page_own: page %p already owned "
   1373 			    "by proc %d [%s]\n", pg,
   1374 			    pg->owner, pg->owner_tag);
   1375 			panic("uvm_page_own");
   1376 		}
   1377 		pg->owner = (curproc) ? curproc->p_pid :  (pid_t) -1;
   1378 		pg->owner_tag = tag;
   1379 		return;
   1380 	}
   1381 
   1382 	/* drop ownership */
   1383 	if (pg->owner_tag == NULL) {
   1384 		printf("uvm_page_own: dropping ownership of an non-owned "
   1385 		    "page (%p)\n", pg);
   1386 		panic("uvm_page_own");
   1387 	}
   1388 	KASSERT((pg->pqflags & (PQ_ACTIVE|PQ_INACTIVE)) ||
   1389 	    (pg->uanon == NULL && pg->uobject == NULL) ||
   1390 	    pg->uobject == uvm.kernel_object ||
   1391 	    pg->wire_count > 0 ||
   1392 	    (pg->loan_count == 1 && pg->uanon == NULL) ||
   1393 	    pg->loan_count > 1);
   1394 	pg->owner_tag = NULL;
   1395 }
   1396 #endif
   1397 
   1398 /*
   1399  * uvm_pageidlezero: zero free pages while the system is idle.
   1400  *
   1401  * => try to complete one color bucket at a time, to reduce our impact
   1402  *	on the CPU cache.
   1403  * => we loop until we either reach the target or whichqs indicates that
   1404  *	there is a process ready to run.
   1405  */
   1406 void
   1407 uvm_pageidlezero()
   1408 {
   1409 	struct vm_page *pg;
   1410 	struct pgfreelist *pgfl;
   1411 	int free_list, s, firstbucket;
   1412 	static int nextbucket;
   1413 
   1414 	s = uvm_lock_fpageq();
   1415 	firstbucket = nextbucket;
   1416 	do {
   1417 		if (sched_whichqs != 0) {
   1418 			uvm_unlock_fpageq(s);
   1419 			return;
   1420 		}
   1421 		if (uvmexp.zeropages >= UVM_PAGEZERO_TARGET) {
   1422 			uvm.page_idle_zero = FALSE;
   1423 			uvm_unlock_fpageq(s);
   1424 			return;
   1425 		}
   1426 		for (free_list = 0; free_list < VM_NFREELIST; free_list++) {
   1427 			pgfl = &uvm.page_free[free_list];
   1428 			while ((pg = TAILQ_FIRST(&pgfl->pgfl_buckets[
   1429 			    nextbucket].pgfl_queues[PGFL_UNKNOWN])) != NULL) {
   1430 				if (sched_whichqs != 0) {
   1431 					uvm_unlock_fpageq(s);
   1432 					return;
   1433 				}
   1434 
   1435 				TAILQ_REMOVE(&pgfl->pgfl_buckets[
   1436 				    nextbucket].pgfl_queues[PGFL_UNKNOWN],
   1437 				    pg, pageq);
   1438 				uvmexp.free--;
   1439 				uvm_unlock_fpageq(s);
   1440 #ifdef PMAP_PAGEIDLEZERO
   1441 				if (!PMAP_PAGEIDLEZERO(VM_PAGE_TO_PHYS(pg))) {
   1442 
   1443 					/*
   1444 					 * The machine-dependent code detected
   1445 					 * some reason for us to abort zeroing
   1446 					 * pages, probably because there is a
   1447 					 * process now ready to run.
   1448 					 */
   1449 
   1450 					s = uvm_lock_fpageq();
   1451 					TAILQ_INSERT_HEAD(&pgfl->pgfl_buckets[
   1452 					    nextbucket].pgfl_queues[
   1453 					    PGFL_UNKNOWN], pg, pageq);
   1454 					uvmexp.free++;
   1455 					uvmexp.zeroaborts++;
   1456 					uvm_unlock_fpageq(s);
   1457 					return;
   1458 				}
   1459 #else
   1460 				pmap_zero_page(VM_PAGE_TO_PHYS(pg));
   1461 #endif /* PMAP_PAGEIDLEZERO */
   1462 				pg->flags |= PG_ZERO;
   1463 
   1464 				s = uvm_lock_fpageq();
   1465 				TAILQ_INSERT_HEAD(&pgfl->pgfl_buckets[
   1466 				    nextbucket].pgfl_queues[PGFL_ZEROS],
   1467 				    pg, pageq);
   1468 				uvmexp.free++;
   1469 				uvmexp.zeropages++;
   1470 			}
   1471 		}
   1472 		nextbucket = (nextbucket + 1) & uvmexp.colormask;
   1473 	} while (nextbucket != firstbucket);
   1474 	uvm_unlock_fpageq(s);
   1475 }
   1476