Home | History | Annotate | Line # | Download | only in uvm
uvm_page.c revision 1.35
      1 /*	$NetBSD: uvm_page.c,v 1.35 2000/05/26 21:20:34 thorpej Exp $	*/
      2 
      3 /*
      4  * Copyright (c) 1997 Charles D. Cranor and Washington University.
      5  * Copyright (c) 1991, 1993, The Regents of the University of California.
      6  *
      7  * All rights reserved.
      8  *
      9  * This code is derived from software contributed to Berkeley by
     10  * The Mach Operating System project at Carnegie-Mellon University.
     11  *
     12  * Redistribution and use in source and binary forms, with or without
     13  * modification, are permitted provided that the following conditions
     14  * are met:
     15  * 1. Redistributions of source code must retain the above copyright
     16  *    notice, this list of conditions and the following disclaimer.
     17  * 2. Redistributions in binary form must reproduce the above copyright
     18  *    notice, this list of conditions and the following disclaimer in the
     19  *    documentation and/or other materials provided with the distribution.
     20  * 3. All advertising materials mentioning features or use of this software
     21  *    must display the following acknowledgement:
     22  *	This product includes software developed by Charles D. Cranor,
     23  *      Washington University, the University of California, Berkeley and
     24  *      its contributors.
     25  * 4. Neither the name of the University nor the names of its contributors
     26  *    may be used to endorse or promote products derived from this software
     27  *    without specific prior written permission.
     28  *
     29  * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND
     30  * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
     31  * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
     32  * ARE DISCLAIMED.  IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE
     33  * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
     34  * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
     35  * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
     36  * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
     37  * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
     38  * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
     39  * SUCH DAMAGE.
     40  *
     41  *	@(#)vm_page.c   8.3 (Berkeley) 3/21/94
     42  * from: Id: uvm_page.c,v 1.1.2.18 1998/02/06 05:24:42 chs Exp
     43  *
     44  *
     45  * Copyright (c) 1987, 1990 Carnegie-Mellon University.
     46  * All rights reserved.
     47  *
     48  * Permission to use, copy, modify and distribute this software and
     49  * its documentation is hereby granted, provided that both the copyright
     50  * notice and this permission notice appear in all copies of the
     51  * software, derivative works or modified versions, and any portions
     52  * thereof, and that both notices appear in supporting documentation.
     53  *
     54  * CARNEGIE MELLON ALLOWS FREE USE OF THIS SOFTWARE IN ITS "AS IS"
     55  * CONDITION.  CARNEGIE MELLON DISCLAIMS ANY LIABILITY OF ANY KIND
     56  * FOR ANY DAMAGES WHATSOEVER RESULTING FROM THE USE OF THIS SOFTWARE.
     57  *
     58  * Carnegie Mellon requests users of this software to return to
     59  *
     60  *  Software Distribution Coordinator  or  Software.Distribution (at) CS.CMU.EDU
     61  *  School of Computer Science
     62  *  Carnegie Mellon University
     63  *  Pittsburgh PA 15213-3890
     64  *
     65  * any improvements or extensions that they make and grant Carnegie the
     66  * rights to redistribute these changes.
     67  */
     68 
     69 /*
     70  * uvm_page.c: page ops.
     71  */
     72 
     73 #include <sys/param.h>
     74 #include <sys/systm.h>
     75 #include <sys/malloc.h>
     76 #include <sys/sched.h>
     77 
     78 #include <vm/vm.h>
     79 #include <vm/vm_page.h>
     80 #include <vm/vm_kern.h>
     81 
     82 #define UVM_PAGE                /* pull in uvm_page.h functions */
     83 #include <uvm/uvm.h>
     84 
     85 /*
     86  * global vars... XXXCDC: move to uvm. structure.
     87  */
     88 
     89 /*
     90  * physical memory config is stored in vm_physmem.
     91  */
     92 
     93 struct vm_physseg vm_physmem[VM_PHYSSEG_MAX];	/* XXXCDC: uvm.physmem */
     94 int vm_nphysseg = 0;				/* XXXCDC: uvm.nphysseg */
     95 
     96 /*
     97  * for testing the idle page zero loop.
     98  */
     99 
    100 boolean_t vm_page_zero_enable = TRUE;
    101 
    102 /*
    103  * local variables
    104  */
    105 
    106 /*
    107  * these variables record the values returned by vm_page_bootstrap,
    108  * for debugging purposes.  The implementation of uvm_pageboot_alloc
    109  * and pmap_startup here also uses them internally.
    110  */
    111 
    112 static vaddr_t      virtual_space_start;
    113 static vaddr_t      virtual_space_end;
    114 
    115 /*
    116  * we use a hash table with only one bucket during bootup.  we will
    117  * later rehash (resize) the hash table once the allocator is ready.
    118  * we static allocate the one bootstrap bucket below...
    119  */
    120 
    121 static struct pglist uvm_bootbucket;
    122 
    123 /*
    124  * local prototypes
    125  */
    126 
    127 static void uvm_pageinsert __P((struct vm_page *));
    128 
    129 
    130 /*
    131  * inline functions
    132  */
    133 
    134 /*
    135  * uvm_pageinsert: insert a page in the object and the hash table
    136  *
    137  * => caller must lock object
    138  * => caller must lock page queues
    139  * => call should have already set pg's object and offset pointers
    140  *    and bumped the version counter
    141  */
    142 
    143 __inline static void
    144 uvm_pageinsert(pg)
    145 	struct vm_page *pg;
    146 {
    147 	struct pglist *buck;
    148 	int s;
    149 
    150 #ifdef DIAGNOSTIC
    151 	if (pg->flags & PG_TABLED)
    152 		panic("uvm_pageinsert: already inserted");
    153 #endif
    154 
    155 	buck = &uvm.page_hash[uvm_pagehash(pg->uobject,pg->offset)];
    156 	s = splimp();
    157 	simple_lock(&uvm.hashlock);
    158 	TAILQ_INSERT_TAIL(buck, pg, hashq);	/* put in hash */
    159 	simple_unlock(&uvm.hashlock);
    160 	splx(s);
    161 
    162 	TAILQ_INSERT_TAIL(&pg->uobject->memq, pg, listq); /* put in object */
    163 	pg->flags |= PG_TABLED;
    164 	pg->uobject->uo_npages++;
    165 
    166 }
    167 
    168 /*
    169  * uvm_page_remove: remove page from object and hash
    170  *
    171  * => caller must lock object
    172  * => caller must lock page queues
    173  */
    174 
    175 void __inline
    176 uvm_pageremove(pg)
    177 	struct vm_page *pg;
    178 {
    179 	struct pglist *buck;
    180 	int s;
    181 
    182 #ifdef DIAGNOSTIC
    183 	if ((pg->flags & (PG_FAULTING)) != 0)
    184 		panic("uvm_pageremove: page is faulting");
    185 #endif
    186 
    187 	if ((pg->flags & PG_TABLED) == 0)
    188 		return;				/* XXX: log */
    189 
    190 	buck = &uvm.page_hash[uvm_pagehash(pg->uobject,pg->offset)];
    191 	s = splimp();
    192 	simple_lock(&uvm.hashlock);
    193 	TAILQ_REMOVE(buck, pg, hashq);
    194 	simple_unlock(&uvm.hashlock);
    195 	splx(s);
    196 
    197 	/* object should be locked */
    198 	TAILQ_REMOVE(&pg->uobject->memq, pg, listq);
    199 
    200 	pg->flags &= ~PG_TABLED;
    201 	pg->uobject->uo_npages--;
    202 	pg->uobject = NULL;
    203 	pg->version++;
    204 
    205 }
    206 
    207 /*
    208  * uvm_page_init: init the page system.   called from uvm_init().
    209  *
    210  * => we return the range of kernel virtual memory in kvm_startp/kvm_endp
    211  */
    212 
    213 void
    214 uvm_page_init(kvm_startp, kvm_endp)
    215 	vaddr_t *kvm_startp, *kvm_endp;
    216 {
    217 	vsize_t freepages, pagecount, n;
    218 	vm_page_t pagearray;
    219 	int lcv, i;
    220 	paddr_t paddr;
    221 
    222 
    223 	/*
    224 	 * step 1: init the page queues and page queue locks
    225 	 */
    226 	for (lcv = 0; lcv < VM_NFREELIST; lcv++) {
    227 		for (i = 0; i < PGFL_NQUEUES; i++)
    228 			TAILQ_INIT(&uvm.page_free[lcv].pgfl_queues[i]);
    229 	}
    230 	TAILQ_INIT(&uvm.page_active);
    231 	TAILQ_INIT(&uvm.page_inactive_swp);
    232 	TAILQ_INIT(&uvm.page_inactive_obj);
    233 	simple_lock_init(&uvm.pageqlock);
    234 	simple_lock_init(&uvm.fpageqlock);
    235 
    236 	/*
    237 	 * step 2: init the <obj,offset> => <page> hash table. for now
    238 	 * we just have one bucket (the bootstrap bucket).   later on we
    239 	 * will allocate new buckets as we dynamically resize the hash table.
    240 	 */
    241 
    242 	uvm.page_nhash = 1;			/* 1 bucket */
    243 	uvm.page_hashmask = 0;		/* mask for hash function */
    244 	uvm.page_hash = &uvm_bootbucket;	/* install bootstrap bucket */
    245 	TAILQ_INIT(uvm.page_hash);		/* init hash table */
    246 	simple_lock_init(&uvm.hashlock);	/* init hash table lock */
    247 
    248 	/*
    249 	 * step 3: allocate vm_page structures.
    250 	 */
    251 
    252 	/*
    253 	 * sanity check:
    254 	 * before calling this function the MD code is expected to register
    255 	 * some free RAM with the uvm_page_physload() function.   our job
    256 	 * now is to allocate vm_page structures for this memory.
    257 	 */
    258 
    259 	if (vm_nphysseg == 0)
    260 		panic("vm_page_bootstrap: no memory pre-allocated");
    261 
    262 	/*
    263 	 * first calculate the number of free pages...
    264 	 *
    265 	 * note that we use start/end rather than avail_start/avail_end.
    266 	 * this allows us to allocate extra vm_page structures in case we
    267 	 * want to return some memory to the pool after booting.
    268 	 */
    269 
    270 	freepages = 0;
    271 	for (lcv = 0 ; lcv < vm_nphysseg ; lcv++)
    272 		freepages += (vm_physmem[lcv].end - vm_physmem[lcv].start);
    273 
    274 	/*
    275 	 * we now know we have (PAGE_SIZE * freepages) bytes of memory we can
    276 	 * use.   for each page of memory we use we need a vm_page structure.
    277 	 * thus, the total number of pages we can use is the total size of
    278 	 * the memory divided by the PAGE_SIZE plus the size of the vm_page
    279 	 * structure.   we add one to freepages as a fudge factor to avoid
    280 	 * truncation errors (since we can only allocate in terms of whole
    281 	 * pages).
    282 	 */
    283 
    284 	pagecount = ((freepages + 1) << PAGE_SHIFT) /
    285 	    (PAGE_SIZE + sizeof(struct vm_page));
    286 	pagearray = (vm_page_t)uvm_pageboot_alloc(pagecount *
    287 	    sizeof(struct vm_page));
    288 	memset(pagearray, 0, pagecount * sizeof(struct vm_page));
    289 
    290 	/*
    291 	 * step 4: init the vm_page structures and put them in the correct
    292 	 * place...
    293 	 */
    294 
    295 	for (lcv = 0 ; lcv < vm_nphysseg ; lcv++) {
    296 
    297 		n = vm_physmem[lcv].end - vm_physmem[lcv].start;
    298 		if (n > pagecount) {
    299 			printf("uvm_page_init: lost %ld page(s) in init\n",
    300 			    (long)(n - pagecount));
    301 			panic("uvm_page_init");  /* XXXCDC: shouldn't happen? */
    302 			/* n = pagecount; */
    303 		}
    304 		/* set up page array pointers */
    305 		vm_physmem[lcv].pgs = pagearray;
    306 		pagearray += n;
    307 		pagecount -= n;
    308 		vm_physmem[lcv].lastpg = vm_physmem[lcv].pgs + (n - 1);
    309 
    310 		/* init and free vm_pages (we've already zeroed them) */
    311 		paddr = ptoa(vm_physmem[lcv].start);
    312 		for (i = 0 ; i < n ; i++, paddr += PAGE_SIZE) {
    313 			vm_physmem[lcv].pgs[i].phys_addr = paddr;
    314 			if (atop(paddr) >= vm_physmem[lcv].avail_start &&
    315 			    atop(paddr) <= vm_physmem[lcv].avail_end) {
    316 				uvmexp.npages++;
    317 				/* add page to free pool */
    318 				uvm_pagefree(&vm_physmem[lcv].pgs[i]);
    319 			}
    320 		}
    321 	}
    322 	/*
    323 	 * step 5: pass up the values of virtual_space_start and
    324 	 * virtual_space_end (obtained by uvm_pageboot_alloc) to the upper
    325 	 * layers of the VM.
    326 	 */
    327 
    328 	*kvm_startp = round_page(virtual_space_start);
    329 	*kvm_endp = trunc_page(virtual_space_end);
    330 
    331 	/*
    332 	 * step 6: init pagedaemon lock
    333 	 */
    334 
    335 	simple_lock_init(&uvm.pagedaemon_lock);
    336 
    337 	/*
    338 	 * step 7: init reserve thresholds
    339 	 * XXXCDC - values may need adjusting
    340 	 */
    341 	uvmexp.reserve_pagedaemon = 1;
    342 	uvmexp.reserve_kernel = 5;
    343 
    344 	/*
    345 	 * step 8: determine if we should zero pages in the idle
    346 	 * loop.
    347 	 *
    348 	 * XXXJRT - might consider zero'ing up to the target *now*,
    349 	 *	    but that could take an awfully long time if you
    350 	 *	    have a lot of memory.
    351 	 */
    352 	uvm.page_idle_zero = vm_page_zero_enable;
    353 
    354 	/*
    355 	 * done!
    356 	 */
    357 
    358 	uvm.page_init_done = TRUE;
    359 }
    360 
    361 /*
    362  * uvm_setpagesize: set the page size
    363  *
    364  * => sets page_shift and page_mask from uvmexp.pagesize.
    365  * => XXXCDC: move global vars.
    366  */
    367 
    368 void
    369 uvm_setpagesize()
    370 {
    371 	if (uvmexp.pagesize == 0)
    372 		uvmexp.pagesize = DEFAULT_PAGE_SIZE;
    373 	uvmexp.pagemask = uvmexp.pagesize - 1;
    374 	if ((uvmexp.pagemask & uvmexp.pagesize) != 0)
    375 		panic("uvm_setpagesize: page size not a power of two");
    376 	for (uvmexp.pageshift = 0; ; uvmexp.pageshift++)
    377 		if ((1 << uvmexp.pageshift) == uvmexp.pagesize)
    378 			break;
    379 }
    380 
    381 /*
    382  * uvm_pageboot_alloc: steal memory from physmem for bootstrapping
    383  */
    384 
    385 vaddr_t
    386 uvm_pageboot_alloc(size)
    387 	vsize_t size;
    388 {
    389 #if defined(PMAP_STEAL_MEMORY)
    390 	vaddr_t addr;
    391 
    392 	/*
    393 	 * defer bootstrap allocation to MD code (it may want to allocate
    394 	 * from a direct-mapped segment).  pmap_steal_memory should round
    395 	 * off virtual_space_start/virtual_space_end.
    396 	 */
    397 
    398 	addr = pmap_steal_memory(size, &virtual_space_start,
    399 	    &virtual_space_end);
    400 
    401 	return(addr);
    402 
    403 #else /* !PMAP_STEAL_MEMORY */
    404 
    405 	static boolean_t initialized = FALSE;
    406 	vaddr_t addr, vaddr;
    407 	paddr_t paddr;
    408 
    409 	/* round to page size */
    410 	size = round_page(size);
    411 
    412 	/*
    413 	 * on first call to this function, initialize ourselves.
    414 	 */
    415 	if (initialized == FALSE) {
    416 		pmap_virtual_space(&virtual_space_start, &virtual_space_end);
    417 
    418 		/* round it the way we like it */
    419 		virtual_space_start = round_page(virtual_space_start);
    420 		virtual_space_end = trunc_page(virtual_space_end);
    421 
    422 		initialized = TRUE;
    423 	}
    424 
    425 	/*
    426 	 * allocate virtual memory for this request
    427 	 */
    428 	if (virtual_space_start == virtual_space_end ||
    429 	    (virtual_space_end - virtual_space_start) < size)
    430 		panic("uvm_pageboot_alloc: out of virtual space");
    431 
    432 	addr = virtual_space_start;
    433 
    434 #ifdef PMAP_GROWKERNEL
    435 	/*
    436 	 * If the kernel pmap can't map the requested space,
    437 	 * then allocate more resources for it.
    438 	 */
    439 	if (uvm_maxkaddr < (addr + size)) {
    440 		uvm_maxkaddr = pmap_growkernel(addr + size);
    441 		if (uvm_maxkaddr < (addr + size))
    442 			panic("uvm_pageboot_alloc: pmap_growkernel() failed");
    443 	}
    444 #endif
    445 
    446 	virtual_space_start += size;
    447 
    448 	/*
    449 	 * allocate and mapin physical pages to back new virtual pages
    450 	 */
    451 
    452 	for (vaddr = round_page(addr) ; vaddr < addr + size ;
    453 	    vaddr += PAGE_SIZE) {
    454 
    455 		if (!uvm_page_physget(&paddr))
    456 			panic("uvm_pageboot_alloc: out of memory");
    457 
    458 		/*
    459 		 * Note this memory is no longer managed, so using
    460 		 * pmap_kenter is safe.
    461 		 */
    462 		pmap_kenter_pa(vaddr, paddr, VM_PROT_READ|VM_PROT_WRITE);
    463 	}
    464 	return(addr);
    465 #endif	/* PMAP_STEAL_MEMORY */
    466 }
    467 
    468 #if !defined(PMAP_STEAL_MEMORY)
    469 /*
    470  * uvm_page_physget: "steal" one page from the vm_physmem structure.
    471  *
    472  * => attempt to allocate it off the end of a segment in which the "avail"
    473  *    values match the start/end values.   if we can't do that, then we
    474  *    will advance both values (making them equal, and removing some
    475  *    vm_page structures from the non-avail area).
    476  * => return false if out of memory.
    477  */
    478 
    479 /* subroutine: try to allocate from memory chunks on the specified freelist */
    480 static boolean_t uvm_page_physget_freelist __P((paddr_t *, int));
    481 
    482 static boolean_t
    483 uvm_page_physget_freelist(paddrp, freelist)
    484 	paddr_t *paddrp;
    485 	int freelist;
    486 {
    487 	int lcv, x;
    488 
    489 	/* pass 1: try allocating from a matching end */
    490 #if (VM_PHYSSEG_STRAT == VM_PSTRAT_BIGFIRST)
    491 	for (lcv = vm_nphysseg - 1 ; lcv >= 0 ; lcv--)
    492 #else
    493 	for (lcv = 0 ; lcv < vm_nphysseg ; lcv++)
    494 #endif
    495 	{
    496 
    497 		if (uvm.page_init_done == TRUE)
    498 			panic("vm_page_physget: called _after_ bootstrap");
    499 
    500 		if (vm_physmem[lcv].free_list != freelist)
    501 			continue;
    502 
    503 		/* try from front */
    504 		if (vm_physmem[lcv].avail_start == vm_physmem[lcv].start &&
    505 		    vm_physmem[lcv].avail_start < vm_physmem[lcv].avail_end) {
    506 			*paddrp = ptoa(vm_physmem[lcv].avail_start);
    507 			vm_physmem[lcv].avail_start++;
    508 			vm_physmem[lcv].start++;
    509 			/* nothing left?   nuke it */
    510 			if (vm_physmem[lcv].avail_start ==
    511 			    vm_physmem[lcv].end) {
    512 				if (vm_nphysseg == 1)
    513 				    panic("vm_page_physget: out of memory!");
    514 				vm_nphysseg--;
    515 				for (x = lcv ; x < vm_nphysseg ; x++)
    516 					/* structure copy */
    517 					vm_physmem[x] = vm_physmem[x+1];
    518 			}
    519 			return (TRUE);
    520 		}
    521 
    522 		/* try from rear */
    523 		if (vm_physmem[lcv].avail_end == vm_physmem[lcv].end &&
    524 		    vm_physmem[lcv].avail_start < vm_physmem[lcv].avail_end) {
    525 			*paddrp = ptoa(vm_physmem[lcv].avail_end - 1);
    526 			vm_physmem[lcv].avail_end--;
    527 			vm_physmem[lcv].end--;
    528 			/* nothing left?   nuke it */
    529 			if (vm_physmem[lcv].avail_end ==
    530 			    vm_physmem[lcv].start) {
    531 				if (vm_nphysseg == 1)
    532 				    panic("vm_page_physget: out of memory!");
    533 				vm_nphysseg--;
    534 				for (x = lcv ; x < vm_nphysseg ; x++)
    535 					/* structure copy */
    536 					vm_physmem[x] = vm_physmem[x+1];
    537 			}
    538 			return (TRUE);
    539 		}
    540 	}
    541 
    542 	/* pass2: forget about matching ends, just allocate something */
    543 #if (VM_PHYSSEG_STRAT == VM_PSTRAT_BIGFIRST)
    544 	for (lcv = vm_nphysseg - 1 ; lcv >= 0 ; lcv--)
    545 #else
    546 	for (lcv = 0 ; lcv < vm_nphysseg ; lcv++)
    547 #endif
    548 	{
    549 
    550 		/* any room in this bank? */
    551 		if (vm_physmem[lcv].avail_start >= vm_physmem[lcv].avail_end)
    552 			continue;  /* nope */
    553 
    554 		*paddrp = ptoa(vm_physmem[lcv].avail_start);
    555 		vm_physmem[lcv].avail_start++;
    556 		/* truncate! */
    557 		vm_physmem[lcv].start = vm_physmem[lcv].avail_start;
    558 
    559 		/* nothing left?   nuke it */
    560 		if (vm_physmem[lcv].avail_start == vm_physmem[lcv].end) {
    561 			if (vm_nphysseg == 1)
    562 				panic("vm_page_physget: out of memory!");
    563 			vm_nphysseg--;
    564 			for (x = lcv ; x < vm_nphysseg ; x++)
    565 				/* structure copy */
    566 				vm_physmem[x] = vm_physmem[x+1];
    567 		}
    568 		return (TRUE);
    569 	}
    570 
    571 	return (FALSE);        /* whoops! */
    572 }
    573 
    574 boolean_t
    575 uvm_page_physget(paddrp)
    576 	paddr_t *paddrp;
    577 {
    578 	int i;
    579 
    580 	/* try in the order of freelist preference */
    581 	for (i = 0; i < VM_NFREELIST; i++)
    582 		if (uvm_page_physget_freelist(paddrp, i) == TRUE)
    583 			return (TRUE);
    584 	return (FALSE);
    585 }
    586 #endif /* PMAP_STEAL_MEMORY */
    587 
    588 /*
    589  * uvm_page_physload: load physical memory into VM system
    590  *
    591  * => all args are PFs
    592  * => all pages in start/end get vm_page structures
    593  * => areas marked by avail_start/avail_end get added to the free page pool
    594  * => we are limited to VM_PHYSSEG_MAX physical memory segments
    595  */
    596 
    597 void
    598 uvm_page_physload(start, end, avail_start, avail_end, free_list)
    599 	paddr_t start, end, avail_start, avail_end;
    600 	int free_list;
    601 {
    602 	int preload, lcv;
    603 	psize_t npages;
    604 	struct vm_page *pgs;
    605 	struct vm_physseg *ps;
    606 
    607 	if (uvmexp.pagesize == 0)
    608 		panic("vm_page_physload: page size not set!");
    609 
    610 	if (free_list >= VM_NFREELIST || free_list < VM_FREELIST_DEFAULT)
    611 		panic("uvm_page_physload: bad free list %d\n", free_list);
    612 
    613 	if (start >= end)
    614 		panic("uvm_page_physload: start >= end");
    615 
    616 	/*
    617 	 * do we have room?
    618 	 */
    619 	if (vm_nphysseg == VM_PHYSSEG_MAX) {
    620 		printf("vm_page_physload: unable to load physical memory "
    621 		    "segment\n");
    622 		printf("\t%d segments allocated, ignoring 0x%lx -> 0x%lx\n",
    623 		    VM_PHYSSEG_MAX, start, end);
    624 		return;
    625 	}
    626 
    627 	/*
    628 	 * check to see if this is a "preload" (i.e. uvm_mem_init hasn't been
    629 	 * called yet, so malloc is not available).
    630 	 */
    631 	for (lcv = 0 ; lcv < vm_nphysseg ; lcv++) {
    632 		if (vm_physmem[lcv].pgs)
    633 			break;
    634 	}
    635 	preload = (lcv == vm_nphysseg);
    636 
    637 	/*
    638 	 * if VM is already running, attempt to malloc() vm_page structures
    639 	 */
    640 	if (!preload) {
    641 #if defined(VM_PHYSSEG_NOADD)
    642 		panic("vm_page_physload: tried to add RAM after vm_mem_init");
    643 #else
    644 		/* XXXCDC: need some sort of lockout for this case */
    645 		paddr_t paddr;
    646 		npages = end - start;  /* # of pages */
    647 		MALLOC(pgs, struct vm_page *, sizeof(struct vm_page) * npages,
    648 					 M_VMPAGE, M_NOWAIT);
    649 		if (pgs == NULL) {
    650 			printf("vm_page_physload: can not malloc vm_page "
    651 			    "structs for segment\n");
    652 			printf("\tignoring 0x%lx -> 0x%lx\n", start, end);
    653 			return;
    654 		}
    655 		/* zero data, init phys_addr and free_list, and free pages */
    656 		memset(pgs, 0, sizeof(struct vm_page) * npages);
    657 		for (lcv = 0, paddr = ptoa(start) ;
    658 				 lcv < npages ; lcv++, paddr += PAGE_SIZE) {
    659 			pgs[lcv].phys_addr = paddr;
    660 			pgs[lcv].free_list = free_list;
    661 			if (atop(paddr) >= avail_start &&
    662 			    atop(paddr) <= avail_end)
    663 				uvm_pagefree(&pgs[lcv]);
    664 		}
    665 		/* XXXCDC: incomplete: need to update uvmexp.free, what else? */
    666 		/* XXXCDC: need hook to tell pmap to rebuild pv_list, etc... */
    667 #endif
    668 	} else {
    669 
    670 		/* gcc complains if these don't get init'd */
    671 		pgs = NULL;
    672 		npages = 0;
    673 
    674 	}
    675 
    676 	/*
    677 	 * now insert us in the proper place in vm_physmem[]
    678 	 */
    679 
    680 #if (VM_PHYSSEG_STRAT == VM_PSTRAT_RANDOM)
    681 
    682 	/* random: put it at the end (easy!) */
    683 	ps = &vm_physmem[vm_nphysseg];
    684 
    685 #elif (VM_PHYSSEG_STRAT == VM_PSTRAT_BSEARCH)
    686 
    687 	{
    688 		int x;
    689 		/* sort by address for binary search */
    690 		for (lcv = 0 ; lcv < vm_nphysseg ; lcv++)
    691 			if (start < vm_physmem[lcv].start)
    692 				break;
    693 		ps = &vm_physmem[lcv];
    694 		/* move back other entries, if necessary ... */
    695 		for (x = vm_nphysseg ; x > lcv ; x--)
    696 			/* structure copy */
    697 			vm_physmem[x] = vm_physmem[x - 1];
    698 	}
    699 
    700 #elif (VM_PHYSSEG_STRAT == VM_PSTRAT_BIGFIRST)
    701 
    702 	{
    703 		int x;
    704 		/* sort by largest segment first */
    705 		for (lcv = 0 ; lcv < vm_nphysseg ; lcv++)
    706 			if ((end - start) >
    707 			    (vm_physmem[lcv].end - vm_physmem[lcv].start))
    708 				break;
    709 		ps = &vm_physmem[lcv];
    710 		/* move back other entries, if necessary ... */
    711 		for (x = vm_nphysseg ; x > lcv ; x--)
    712 			/* structure copy */
    713 			vm_physmem[x] = vm_physmem[x - 1];
    714 	}
    715 
    716 #else
    717 
    718 	panic("vm_page_physload: unknown physseg strategy selected!");
    719 
    720 #endif
    721 
    722 	ps->start = start;
    723 	ps->end = end;
    724 	ps->avail_start = avail_start;
    725 	ps->avail_end = avail_end;
    726 	if (preload) {
    727 		ps->pgs = NULL;
    728 	} else {
    729 		ps->pgs = pgs;
    730 		ps->lastpg = pgs + npages - 1;
    731 	}
    732 	ps->free_list = free_list;
    733 	vm_nphysseg++;
    734 
    735 	/*
    736 	 * done!
    737 	 */
    738 
    739 	if (!preload)
    740 		uvm_page_rehash();
    741 
    742 	return;
    743 }
    744 
    745 /*
    746  * uvm_page_rehash: reallocate hash table based on number of free pages.
    747  */
    748 
    749 void
    750 uvm_page_rehash()
    751 {
    752 	int freepages, lcv, bucketcount, s, oldcount;
    753 	struct pglist *newbuckets, *oldbuckets;
    754 	struct vm_page *pg;
    755 	size_t newsize, oldsize;
    756 
    757 	/*
    758 	 * compute number of pages that can go in the free pool
    759 	 */
    760 
    761 	freepages = 0;
    762 	for (lcv = 0 ; lcv < vm_nphysseg ; lcv++)
    763 		freepages +=
    764 		    (vm_physmem[lcv].avail_end - vm_physmem[lcv].avail_start);
    765 
    766 	/*
    767 	 * compute number of buckets needed for this number of pages
    768 	 */
    769 
    770 	bucketcount = 1;
    771 	while (bucketcount < freepages)
    772 		bucketcount = bucketcount * 2;
    773 
    774 	/*
    775 	 * compute the size of the current table and new table.
    776 	 */
    777 
    778 	oldbuckets = uvm.page_hash;
    779 	oldcount = uvm.page_nhash;
    780 	oldsize = round_page(sizeof(struct pglist) * oldcount);
    781 	newsize = round_page(sizeof(struct pglist) * bucketcount);
    782 
    783 	/*
    784 	 * allocate the new buckets
    785 	 */
    786 
    787 	newbuckets = (struct pglist *) uvm_km_alloc(kernel_map, newsize);
    788 	if (newbuckets == NULL) {
    789 		printf("uvm_page_physrehash: WARNING: could not grow page "
    790 		    "hash table\n");
    791 		return;
    792 	}
    793 	for (lcv = 0 ; lcv < bucketcount ; lcv++)
    794 		TAILQ_INIT(&newbuckets[lcv]);
    795 
    796 	/*
    797 	 * now replace the old buckets with the new ones and rehash everything
    798 	 */
    799 
    800 	s = splimp();
    801 	simple_lock(&uvm.hashlock);
    802 	uvm.page_hash = newbuckets;
    803 	uvm.page_nhash = bucketcount;
    804 	uvm.page_hashmask = bucketcount - 1;  /* power of 2 */
    805 
    806 	/* ... and rehash */
    807 	for (lcv = 0 ; lcv < oldcount ; lcv++) {
    808 		while ((pg = oldbuckets[lcv].tqh_first) != NULL) {
    809 			TAILQ_REMOVE(&oldbuckets[lcv], pg, hashq);
    810 			TAILQ_INSERT_TAIL(
    811 			  &uvm.page_hash[uvm_pagehash(pg->uobject, pg->offset)],
    812 			  pg, hashq);
    813 		}
    814 	}
    815 	simple_unlock(&uvm.hashlock);
    816 	splx(s);
    817 
    818 	/*
    819 	 * free old bucket array if is not the boot-time table
    820 	 */
    821 
    822 	if (oldbuckets != &uvm_bootbucket)
    823 		uvm_km_free(kernel_map, (vaddr_t) oldbuckets, oldsize);
    824 
    825 	/*
    826 	 * done
    827 	 */
    828 	return;
    829 }
    830 
    831 
    832 #if 1 /* XXXCDC: TMP TMP TMP DEBUG DEBUG DEBUG */
    833 
    834 void uvm_page_physdump __P((void)); /* SHUT UP GCC */
    835 
    836 /* call from DDB */
    837 void
    838 uvm_page_physdump()
    839 {
    840 	int lcv;
    841 
    842 	printf("rehash: physical memory config [segs=%d of %d]:\n",
    843 				 vm_nphysseg, VM_PHYSSEG_MAX);
    844 	for (lcv = 0 ; lcv < vm_nphysseg ; lcv++)
    845 		printf("0x%lx->0x%lx [0x%lx->0x%lx]\n", vm_physmem[lcv].start,
    846 		    vm_physmem[lcv].end, vm_physmem[lcv].avail_start,
    847 		    vm_physmem[lcv].avail_end);
    848 	printf("STRATEGY = ");
    849 	switch (VM_PHYSSEG_STRAT) {
    850 	case VM_PSTRAT_RANDOM: printf("RANDOM\n"); break;
    851 	case VM_PSTRAT_BSEARCH: printf("BSEARCH\n"); break;
    852 	case VM_PSTRAT_BIGFIRST: printf("BIGFIRST\n"); break;
    853 	default: printf("<<UNKNOWN>>!!!!\n");
    854 	}
    855 	printf("number of buckets = %d\n", uvm.page_nhash);
    856 }
    857 #endif
    858 
    859 /*
    860  * uvm_pagealloc_strat: allocate vm_page from a particular free list.
    861  *
    862  * => return null if no pages free
    863  * => wake up pagedaemon if number of free pages drops below low water mark
    864  * => if obj != NULL, obj must be locked (to put in hash)
    865  * => if anon != NULL, anon must be locked (to put in anon)
    866  * => only one of obj or anon can be non-null
    867  * => caller must activate/deactivate page if it is not wired.
    868  * => free_list is ignored if strat == UVM_PGA_STRAT_NORMAL.
    869  * => policy decision: it is more important to pull a page off of the
    870  *	appropriate priority free list than it is to get a zero'd or
    871  *	unknown contents page.  This is because we live with the
    872  *	consequences of a bad free list decision for the entire
    873  *	lifetime of the page, e.g. if the page comes from memory that
    874  *	is slower to access.
    875  */
    876 
    877 struct vm_page *
    878 uvm_pagealloc_strat(obj, off, anon, flags, strat, free_list)
    879 	struct uvm_object *obj;
    880 	voff_t off;
    881 	int flags;
    882 	struct vm_anon *anon;
    883 	int strat, free_list;
    884 {
    885 	int lcv, try1, try2, s, zeroit = 0;
    886 	struct vm_page *pg;
    887 	struct pglist *freeq;
    888 	struct pgfreelist *pgfl;
    889 	boolean_t use_reserve;
    890 
    891 #ifdef DIAGNOSTIC
    892 	/* sanity check */
    893 	if (obj && anon)
    894 		panic("uvm_pagealloc: obj and anon != NULL");
    895 #endif
    896 
    897 	s = uvm_lock_fpageq();		/* lock free page queue */
    898 
    899 	/*
    900 	 * check to see if we need to generate some free pages waking
    901 	 * the pagedaemon.
    902 	 */
    903 
    904 	if (uvmexp.free < uvmexp.freemin || (uvmexp.free < uvmexp.freetarg &&
    905 	    uvmexp.inactive < uvmexp.inactarg))
    906 		wakeup(&uvm.pagedaemon);
    907 
    908 	/*
    909 	 * fail if any of these conditions is true:
    910 	 * [1]  there really are no free pages, or
    911 	 * [2]  only kernel "reserved" pages remain and
    912 	 *        the page isn't being allocated to a kernel object.
    913 	 * [3]  only pagedaemon "reserved" pages remain and
    914 	 *        the requestor isn't the pagedaemon.
    915 	 */
    916 
    917 	use_reserve = (flags & UVM_PGA_USERESERVE) ||
    918 		(obj && UVM_OBJ_IS_KERN_OBJECT(obj));
    919 	if ((uvmexp.free <= uvmexp.reserve_kernel && !use_reserve) ||
    920 	    (uvmexp.free <= uvmexp.reserve_pagedaemon &&
    921 	     !(use_reserve && curproc == uvm.pagedaemon_proc)))
    922 		goto fail;
    923 
    924 #if PGFL_NQUEUES != 2
    925 #error uvm_pagealloc_strat needs to be updated
    926 #endif
    927 
    928 	/*
    929 	 * If we want a zero'd page, try the ZEROS queue first, otherwise
    930 	 * we try the UNKNOWN queue first.
    931 	 */
    932 	if (flags & UVM_PGA_ZERO) {
    933 		try1 = PGFL_ZEROS;
    934 		try2 = PGFL_UNKNOWN;
    935 	} else {
    936 		try1 = PGFL_UNKNOWN;
    937 		try2 = PGFL_ZEROS;
    938 	}
    939 
    940  again:
    941 	switch (strat) {
    942 	case UVM_PGA_STRAT_NORMAL:
    943 		/* Check all freelists in descending priority order. */
    944 		for (lcv = 0; lcv < VM_NFREELIST; lcv++) {
    945 			pgfl = &uvm.page_free[lcv];
    946 			if ((pg = TAILQ_FIRST((freeq =
    947 			      &pgfl->pgfl_queues[try1]))) != NULL ||
    948 			    (pg = TAILQ_FIRST((freeq =
    949 			      &pgfl->pgfl_queues[try2]))) != NULL)
    950 				goto gotit;
    951 		}
    952 
    953 		/* No pages free! */
    954 		goto fail;
    955 
    956 	case UVM_PGA_STRAT_ONLY:
    957 	case UVM_PGA_STRAT_FALLBACK:
    958 		/* Attempt to allocate from the specified free list. */
    959 #ifdef DIAGNOSTIC
    960 		if (free_list >= VM_NFREELIST || free_list < 0)
    961 			panic("uvm_pagealloc_strat: bad free list %d",
    962 			    free_list);
    963 #endif
    964 		pgfl = &uvm.page_free[free_list];
    965 		if ((pg = TAILQ_FIRST((freeq =
    966 		      &pgfl->pgfl_queues[try1]))) != NULL ||
    967 		    (pg = TAILQ_FIRST((freeq =
    968 		      &pgfl->pgfl_queues[try2]))) != NULL)
    969 			goto gotit;
    970 
    971 		/* Fall back, if possible. */
    972 		if (strat == UVM_PGA_STRAT_FALLBACK) {
    973 			strat = UVM_PGA_STRAT_NORMAL;
    974 			goto again;
    975 		}
    976 
    977 		/* No pages free! */
    978 		goto fail;
    979 
    980 	default:
    981 		panic("uvm_pagealloc_strat: bad strat %d", strat);
    982 		/* NOTREACHED */
    983 	}
    984 
    985  gotit:
    986 	TAILQ_REMOVE(freeq, pg, pageq);
    987 	uvmexp.free--;
    988 
    989 	/* update zero'd page count */
    990 	if (pg->flags & PG_ZERO)
    991 		uvmexp.zeropages--;
    992 
    993 	/*
    994 	 * update allocation statistics and remember if we have to
    995 	 * zero the page
    996 	 */
    997 	if (flags & UVM_PGA_ZERO) {
    998 		if (pg->flags & PG_ZERO) {
    999 			uvmexp.pga_zerohit++;
   1000 			zeroit = 0;
   1001 		} else {
   1002 			uvmexp.pga_zeromiss++;
   1003 			zeroit = 1;
   1004 		}
   1005 	}
   1006 
   1007 	uvm_unlock_fpageq(s);		/* unlock free page queue */
   1008 
   1009 	pg->offset = off;
   1010 	pg->uobject = obj;
   1011 	pg->uanon = anon;
   1012 	pg->flags = PG_BUSY|PG_CLEAN|PG_FAKE;
   1013 	pg->version++;
   1014 	pg->wire_count = 0;
   1015 	pg->loan_count = 0;
   1016 	if (anon) {
   1017 		anon->u.an_page = pg;
   1018 		pg->pqflags = PQ_ANON;
   1019 	} else {
   1020 		if (obj)
   1021 			uvm_pageinsert(pg);
   1022 		pg->pqflags = 0;
   1023 	}
   1024 #if defined(UVM_PAGE_TRKOWN)
   1025 	pg->owner_tag = NULL;
   1026 #endif
   1027 	UVM_PAGE_OWN(pg, "new alloc");
   1028 
   1029 	if (flags & UVM_PGA_ZERO) {
   1030 		/*
   1031 		 * A zero'd page is not clean.  If we got a page not already
   1032 		 * zero'd, then we have to zero it ourselves.
   1033 		 */
   1034 		pg->flags &= ~PG_CLEAN;
   1035 		if (zeroit)
   1036 			pmap_zero_page(VM_PAGE_TO_PHYS(pg));
   1037 	}
   1038 
   1039 	return(pg);
   1040 
   1041  fail:
   1042 	uvm_unlock_fpageq(s);
   1043 	return (NULL);
   1044 }
   1045 
   1046 /*
   1047  * uvm_pagerealloc: reallocate a page from one object to another
   1048  *
   1049  * => both objects must be locked
   1050  */
   1051 
   1052 void
   1053 uvm_pagerealloc(pg, newobj, newoff)
   1054 	struct vm_page *pg;
   1055 	struct uvm_object *newobj;
   1056 	voff_t newoff;
   1057 {
   1058 	/*
   1059 	 * remove it from the old object
   1060 	 */
   1061 
   1062 	if (pg->uobject) {
   1063 		uvm_pageremove(pg);
   1064 	}
   1065 
   1066 	/*
   1067 	 * put it in the new object
   1068 	 */
   1069 
   1070 	if (newobj) {
   1071 		pg->uobject = newobj;
   1072 		pg->offset = newoff;
   1073 		pg->version++;
   1074 		uvm_pageinsert(pg);
   1075 	}
   1076 
   1077 	return;
   1078 }
   1079 
   1080 
   1081 /*
   1082  * uvm_pagefree: free page
   1083  *
   1084  * => erase page's identity (i.e. remove from hash/object)
   1085  * => put page on free list
   1086  * => caller must lock owning object (either anon or uvm_object)
   1087  * => caller must lock page queues
   1088  * => assumes all valid mappings of pg are gone
   1089  */
   1090 
   1091 void uvm_pagefree(pg)
   1092 
   1093 struct vm_page *pg;
   1094 
   1095 {
   1096 	int s;
   1097 	int saved_loan_count = pg->loan_count;
   1098 
   1099 	/*
   1100 	 * if the page was an object page (and thus "TABLED"), remove it
   1101 	 * from the object.
   1102 	 */
   1103 
   1104 	if (pg->flags & PG_TABLED) {
   1105 
   1106 		/*
   1107 		 * if the object page is on loan we are going to drop ownership.
   1108 		 * it is possible that an anon will take over as owner for this
   1109 		 * page later on.   the anon will want a !PG_CLEAN page so that
   1110 		 * it knows it needs to allocate swap if it wants to page the
   1111 		 * page out.
   1112 		 */
   1113 
   1114 		if (saved_loan_count)
   1115 			pg->flags &= ~PG_CLEAN;	/* in case an anon takes over */
   1116 
   1117 		uvm_pageremove(pg);
   1118 
   1119 		/*
   1120 		 * if our page was on loan, then we just lost control over it
   1121 		 * (in fact, if it was loaned to an anon, the anon may have
   1122 		 * already taken over ownership of the page by now and thus
   1123 		 * changed the loan_count [e.g. in uvmfault_anonget()]) we just
   1124 		 * return (when the last loan is dropped, then the page can be
   1125 		 * freed by whatever was holding the last loan).
   1126 		 */
   1127 		if (saved_loan_count)
   1128 			return;
   1129 
   1130 	} else if (saved_loan_count && (pg->pqflags & PQ_ANON)) {
   1131 
   1132 		/*
   1133 		 * if our page is owned by an anon and is loaned out to the
   1134 		 * kernel then we just want to drop ownership and return.
   1135 		 * the kernel must free the page when all its loans clear ...
   1136 		 * note that the kernel can't change the loan status of our
   1137 		 * page as long as we are holding PQ lock.
   1138 		 */
   1139 		pg->pqflags &= ~PQ_ANON;
   1140 		pg->uanon = NULL;
   1141 		return;
   1142 	}
   1143 
   1144 #ifdef DIAGNOSTIC
   1145 	if (saved_loan_count) {
   1146 		printf("uvm_pagefree: warning: freeing page with a loan "
   1147 		    "count of %d\n", saved_loan_count);
   1148 		panic("uvm_pagefree: loan count");
   1149 	}
   1150 #endif
   1151 
   1152 
   1153 	/*
   1154 	 * now remove the page from the queues
   1155 	 */
   1156 
   1157 	if (pg->pqflags & PQ_ACTIVE) {
   1158 		TAILQ_REMOVE(&uvm.page_active, pg, pageq);
   1159 		pg->pqflags &= ~PQ_ACTIVE;
   1160 		uvmexp.active--;
   1161 	}
   1162 	if (pg->pqflags & PQ_INACTIVE) {
   1163 		if (pg->pqflags & PQ_SWAPBACKED)
   1164 			TAILQ_REMOVE(&uvm.page_inactive_swp, pg, pageq);
   1165 		else
   1166 			TAILQ_REMOVE(&uvm.page_inactive_obj, pg, pageq);
   1167 		pg->pqflags &= ~PQ_INACTIVE;
   1168 		uvmexp.inactive--;
   1169 	}
   1170 
   1171 	/*
   1172 	 * if the page was wired, unwire it now.
   1173 	 */
   1174 	if (pg->wire_count) {
   1175 		pg->wire_count = 0;
   1176 		uvmexp.wired--;
   1177 	}
   1178 
   1179 	/*
   1180 	 * and put on free queue
   1181 	 */
   1182 
   1183 	pg->flags &= ~PG_ZERO;
   1184 
   1185 	s = uvm_lock_fpageq();
   1186 	TAILQ_INSERT_TAIL(&uvm.page_free[
   1187 	    uvm_page_lookup_freelist(pg)].pgfl_queues[PGFL_UNKNOWN], pg, pageq);
   1188 	pg->pqflags = PQ_FREE;
   1189 #ifdef DEBUG
   1190 	pg->uobject = (void *)0xdeadbeef;
   1191 	pg->offset = 0xdeadbeef;
   1192 	pg->uanon = (void *)0xdeadbeef;
   1193 #endif
   1194 	uvmexp.free++;
   1195 
   1196 	if (uvmexp.zeropages < UVM_PAGEZERO_TARGET)
   1197 		uvm.page_idle_zero = vm_page_zero_enable;
   1198 
   1199 	uvm_unlock_fpageq(s);
   1200 }
   1201 
   1202 #if defined(UVM_PAGE_TRKOWN)
   1203 /*
   1204  * uvm_page_own: set or release page ownership
   1205  *
   1206  * => this is a debugging function that keeps track of who sets PG_BUSY
   1207  *	and where they do it.   it can be used to track down problems
   1208  *	such a process setting "PG_BUSY" and never releasing it.
   1209  * => page's object [if any] must be locked
   1210  * => if "tag" is NULL then we are releasing page ownership
   1211  */
   1212 void
   1213 uvm_page_own(pg, tag)
   1214 	struct vm_page *pg;
   1215 	char *tag;
   1216 {
   1217 	/* gain ownership? */
   1218 	if (tag) {
   1219 		if (pg->owner_tag) {
   1220 			printf("uvm_page_own: page %p already owned "
   1221 			    "by proc %d [%s]\n", pg,
   1222 			     pg->owner, pg->owner_tag);
   1223 			panic("uvm_page_own");
   1224 		}
   1225 		pg->owner = (curproc) ? curproc->p_pid :  (pid_t) -1;
   1226 		pg->owner_tag = tag;
   1227 		return;
   1228 	}
   1229 
   1230 	/* drop ownership */
   1231 	if (pg->owner_tag == NULL) {
   1232 		printf("uvm_page_own: dropping ownership of an non-owned "
   1233 		    "page (%p)\n", pg);
   1234 		panic("uvm_page_own");
   1235 	}
   1236 	pg->owner_tag = NULL;
   1237 	return;
   1238 }
   1239 #endif
   1240 
   1241 /*
   1242  * uvm_pageidlezero: zero free pages while the system is idle.
   1243  *
   1244  * => we do at least one iteration per call, if we are below the target.
   1245  * => we loop until we either reach the target or whichqs indicates that
   1246  *	there is a process ready to run.
   1247  */
   1248 void
   1249 uvm_pageidlezero()
   1250 {
   1251 	struct vm_page *pg;
   1252 	struct pgfreelist *pgfl;
   1253 	int free_list, s;
   1254 
   1255 	do {
   1256 		s = uvm_lock_fpageq();
   1257 
   1258 		if (uvmexp.zeropages >= UVM_PAGEZERO_TARGET) {
   1259 			uvm.page_idle_zero = FALSE;
   1260 			uvm_unlock_fpageq(s);
   1261 			return;
   1262 		}
   1263 
   1264 		for (free_list = 0; free_list < VM_NFREELIST; free_list++) {
   1265 			pgfl = &uvm.page_free[free_list];
   1266 			if ((pg = TAILQ_FIRST(&pgfl->pgfl_queues[
   1267 			    PGFL_UNKNOWN])) != NULL)
   1268 				break;
   1269 		}
   1270 
   1271 		if (pg == NULL) {
   1272 			/*
   1273 			 * No non-zero'd pages; don't bother trying again
   1274 			 * until we know we have non-zero'd pages free.
   1275 			 */
   1276 			uvm.page_idle_zero = FALSE;
   1277 			uvm_unlock_fpageq(s);
   1278 			return;
   1279 		}
   1280 
   1281 		TAILQ_REMOVE(&pgfl->pgfl_queues[PGFL_UNKNOWN], pg, pageq);
   1282 		uvmexp.free--;
   1283 		uvm_unlock_fpageq(s);
   1284 
   1285 #ifdef PMAP_PAGEIDLEZERO
   1286 		PMAP_PAGEIDLEZERO(VM_PAGE_TO_PHYS(pg));
   1287 #else
   1288 		/*
   1289 		 * XXX This will toast the cache unless the pmap_zero_page()
   1290 		 * XXX implementation does uncached access.
   1291 		 */
   1292 		pmap_zero_page(VM_PAGE_TO_PHYS(pg));
   1293 #endif
   1294 		pg->flags |= PG_ZERO;
   1295 
   1296 		s = uvm_lock_fpageq();
   1297 		TAILQ_INSERT_HEAD(&pgfl->pgfl_queues[PGFL_ZEROS], pg, pageq);
   1298 		uvmexp.free++;
   1299 		uvmexp.zeropages++;
   1300 		uvm_unlock_fpageq(s);
   1301 	} while (sched_whichqs == 0);
   1302 }
   1303