Home | History | Annotate | Line # | Download | only in uvm
uvm_page.c revision 1.9
      1  1.9  thorpej /*	$NetBSD: uvm_page.c,v 1.9 1998/04/16 03:54:35 thorpej Exp $	*/
      2  1.1      mrg 
      3  1.1      mrg /*
      4  1.1      mrg  * XXXCDC: "ROUGH DRAFT" QUALITY UVM PRE-RELEASE FILE!
      5  1.1      mrg  *         >>>USE AT YOUR OWN RISK, WORK IS NOT FINISHED<<<
      6  1.1      mrg  */
      7  1.1      mrg /*
      8  1.1      mrg  * Copyright (c) 1997 Charles D. Cranor and Washington University.
      9  1.1      mrg  * Copyright (c) 1991, 1993, The Regents of the University of California.
     10  1.1      mrg  *
     11  1.1      mrg  * All rights reserved.
     12  1.1      mrg  *
     13  1.1      mrg  * This code is derived from software contributed to Berkeley by
     14  1.1      mrg  * The Mach Operating System project at Carnegie-Mellon University.
     15  1.1      mrg  *
     16  1.1      mrg  * Redistribution and use in source and binary forms, with or without
     17  1.1      mrg  * modification, are permitted provided that the following conditions
     18  1.1      mrg  * are met:
     19  1.1      mrg  * 1. Redistributions of source code must retain the above copyright
     20  1.1      mrg  *    notice, this list of conditions and the following disclaimer.
     21  1.1      mrg  * 2. Redistributions in binary form must reproduce the above copyright
     22  1.1      mrg  *    notice, this list of conditions and the following disclaimer in the
     23  1.1      mrg  *    documentation and/or other materials provided with the distribution.
     24  1.1      mrg  * 3. All advertising materials mentioning features or use of this software
     25  1.1      mrg  *    must display the following acknowledgement:
     26  1.1      mrg  *	This product includes software developed by Charles D. Cranor,
     27  1.1      mrg  *      Washington University, the University of California, Berkeley and
     28  1.1      mrg  *      its contributors.
     29  1.1      mrg  * 4. Neither the name of the University nor the names of its contributors
     30  1.1      mrg  *    may be used to endorse or promote products derived from this software
     31  1.1      mrg  *    without specific prior written permission.
     32  1.1      mrg  *
     33  1.1      mrg  * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND
     34  1.1      mrg  * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
     35  1.1      mrg  * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
     36  1.1      mrg  * ARE DISCLAIMED.  IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE
     37  1.1      mrg  * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
     38  1.1      mrg  * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
     39  1.1      mrg  * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
     40  1.1      mrg  * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
     41  1.1      mrg  * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
     42  1.1      mrg  * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
     43  1.1      mrg  * SUCH DAMAGE.
     44  1.1      mrg  *
     45  1.1      mrg  *	@(#)vm_page.c   8.3 (Berkeley) 3/21/94
     46  1.4      mrg  * from: Id: uvm_page.c,v 1.1.2.18 1998/02/06 05:24:42 chs Exp
     47  1.1      mrg  *
     48  1.1      mrg  *
     49  1.1      mrg  * Copyright (c) 1987, 1990 Carnegie-Mellon University.
     50  1.1      mrg  * All rights reserved.
     51  1.1      mrg  *
     52  1.1      mrg  * Permission to use, copy, modify and distribute this software and
     53  1.1      mrg  * its documentation is hereby granted, provided that both the copyright
     54  1.1      mrg  * notice and this permission notice appear in all copies of the
     55  1.1      mrg  * software, derivative works or modified versions, and any portions
     56  1.1      mrg  * thereof, and that both notices appear in supporting documentation.
     57  1.1      mrg  *
     58  1.1      mrg  * CARNEGIE MELLON ALLOWS FREE USE OF THIS SOFTWARE IN ITS "AS IS"
     59  1.1      mrg  * CONDITION.  CARNEGIE MELLON DISCLAIMS ANY LIABILITY OF ANY KIND
     60  1.1      mrg  * FOR ANY DAMAGES WHATSOEVER RESULTING FROM THE USE OF THIS SOFTWARE.
     61  1.1      mrg  *
     62  1.1      mrg  * Carnegie Mellon requests users of this software to return to
     63  1.1      mrg  *
     64  1.1      mrg  *  Software Distribution Coordinator  or  Software.Distribution (at) CS.CMU.EDU
     65  1.1      mrg  *  School of Computer Science
     66  1.1      mrg  *  Carnegie Mellon University
     67  1.1      mrg  *  Pittsburgh PA 15213-3890
     68  1.1      mrg  *
     69  1.1      mrg  * any improvements or extensions that they make and grant Carnegie the
     70  1.1      mrg  * rights to redistribute these changes.
     71  1.1      mrg  */
     72  1.1      mrg 
     73  1.1      mrg /*
     74  1.1      mrg  * uvm_page.c: page ops.
     75  1.1      mrg  */
     76  1.6      mrg 
     77  1.6      mrg #include "opt_pmap_new.h"
     78  1.1      mrg 
     79  1.1      mrg #include <sys/param.h>
     80  1.1      mrg #include <sys/systm.h>
     81  1.1      mrg #include <sys/malloc.h>
     82  1.1      mrg #include <sys/mount.h>
     83  1.1      mrg #include <sys/proc.h>
     84  1.1      mrg 
     85  1.1      mrg #include <vm/vm.h>
     86  1.1      mrg #include <vm/vm_page.h>
     87  1.1      mrg #include <vm/vm_kern.h>
     88  1.1      mrg 
     89  1.1      mrg #include <sys/syscallargs.h>
     90  1.1      mrg 
     91  1.1      mrg #define UVM_PAGE                /* pull in uvm_page.h functions */
     92  1.1      mrg #include <uvm/uvm.h>
     93  1.1      mrg 
     94  1.1      mrg /*
     95  1.1      mrg  * global vars... XXXCDC: move to uvm. structure.
     96  1.1      mrg  */
     97  1.1      mrg 
     98  1.1      mrg /*
     99  1.1      mrg  * physical memory config is stored in vm_physmem.
    100  1.1      mrg  */
    101  1.1      mrg 
    102  1.1      mrg struct vm_physseg vm_physmem[VM_PHYSSEG_MAX];	/* XXXCDC: uvm.physmem */
    103  1.1      mrg int vm_nphysseg = 0;				/* XXXCDC: uvm.nphysseg */
    104  1.1      mrg 
    105  1.1      mrg /*
    106  1.1      mrg  * local variables
    107  1.1      mrg  */
    108  1.1      mrg 
    109  1.1      mrg /*
    110  1.1      mrg  * these variables record the values returned by vm_page_bootstrap,
    111  1.1      mrg  * for debugging purposes.  The implementation of uvm_pageboot_alloc
    112  1.1      mrg  * and pmap_startup here also uses them internally.
    113  1.1      mrg  */
    114  1.1      mrg 
    115  1.1      mrg static vm_offset_t      virtual_space_start;
    116  1.1      mrg static vm_offset_t      virtual_space_end;
    117  1.1      mrg 
    118  1.1      mrg /*
    119  1.1      mrg  * we use a hash table with only one bucket during bootup.  we will
    120  1.1      mrg  * later rehash (resize) the hash table once malloc() is ready.
    121  1.1      mrg  * we static allocate the bootstrap bucket below...
    122  1.1      mrg  */
    123  1.1      mrg 
    124  1.1      mrg static struct pglist uvm_bootbucket;
    125  1.1      mrg 
    126  1.1      mrg /*
    127  1.1      mrg  * local prototypes
    128  1.1      mrg  */
    129  1.1      mrg 
    130  1.1      mrg static void uvm_pageinsert __P((struct vm_page *));
    131  1.1      mrg #if !defined(PMAP_STEAL_MEMORY)
    132  1.1      mrg static boolean_t uvm_page_physget __P((vm_offset_t *));
    133  1.1      mrg #endif
    134  1.1      mrg 
    135  1.1      mrg 
    136  1.1      mrg /*
    137  1.1      mrg  * inline functions
    138  1.1      mrg  */
    139  1.1      mrg 
    140  1.1      mrg /*
    141  1.1      mrg  * uvm_pageinsert: insert a page in the object and the hash table
    142  1.1      mrg  *
    143  1.1      mrg  * => caller must lock object
    144  1.1      mrg  * => caller must lock page queues
    145  1.1      mrg  * => call should have already set pg's object and offset pointers
    146  1.1      mrg  *    and bumped the version counter
    147  1.1      mrg  */
    148  1.1      mrg 
    149  1.7      mrg __inline static void
    150  1.7      mrg uvm_pageinsert(pg)
    151  1.7      mrg 	struct vm_page *pg;
    152  1.1      mrg {
    153  1.7      mrg 	struct pglist *buck;
    154  1.7      mrg 	int s;
    155  1.1      mrg 
    156  1.1      mrg #ifdef DIAGNOSTIC
    157  1.7      mrg 	if (pg->flags & PG_TABLED)
    158  1.7      mrg 		panic("uvm_pageinsert: already inserted");
    159  1.1      mrg #endif
    160  1.1      mrg 
    161  1.7      mrg 	buck = &uvm.page_hash[uvm_pagehash(pg->uobject,pg->offset)];
    162  1.7      mrg 	s = splimp();
    163  1.7      mrg 	simple_lock(&uvm.hashlock);
    164  1.7      mrg 	TAILQ_INSERT_TAIL(buck, pg, hashq);	/* put in hash */
    165  1.7      mrg 	simple_unlock(&uvm.hashlock);
    166  1.7      mrg 	splx(s);
    167  1.7      mrg 
    168  1.7      mrg 	TAILQ_INSERT_TAIL(&pg->uobject->memq, pg, listq); /* put in object */
    169  1.7      mrg 	pg->flags |= PG_TABLED;
    170  1.7      mrg 	pg->uobject->uo_npages++;
    171  1.1      mrg 
    172  1.1      mrg }
    173  1.1      mrg 
    174  1.1      mrg /*
    175  1.1      mrg  * uvm_page_remove: remove page from object and hash
    176  1.1      mrg  *
    177  1.1      mrg  * => caller must lock object
    178  1.1      mrg  * => caller must lock page queues
    179  1.1      mrg  */
    180  1.1      mrg 
    181  1.7      mrg void __inline
    182  1.7      mrg uvm_pageremove(pg)
    183  1.7      mrg 	struct vm_page *pg;
    184  1.1      mrg {
    185  1.7      mrg 	struct pglist *buck;
    186  1.7      mrg 	int s;
    187  1.1      mrg 
    188  1.1      mrg #ifdef DIAGNOSTIC
    189  1.7      mrg 	if ((pg->flags & (PG_FAULTING)) != 0)
    190  1.7      mrg 		panic("uvm_pageremove: page is faulting");
    191  1.1      mrg #endif
    192  1.1      mrg 
    193  1.7      mrg 	if ((pg->flags & PG_TABLED) == 0)
    194  1.7      mrg 		return;				/* XXX: log */
    195  1.1      mrg 
    196  1.7      mrg 	buck = &uvm.page_hash[uvm_pagehash(pg->uobject,pg->offset)];
    197  1.7      mrg 	s = splimp();
    198  1.7      mrg 	simple_lock(&uvm.hashlock);
    199  1.7      mrg 	TAILQ_REMOVE(buck, pg, hashq);
    200  1.7      mrg 	simple_unlock(&uvm.hashlock);
    201  1.7      mrg 	splx(s);
    202  1.7      mrg 
    203  1.7      mrg 	/* object should be locked */
    204  1.7      mrg 	TAILQ_REMOVE(&pg->uobject->memq, pg, listq);
    205  1.7      mrg 
    206  1.7      mrg 	pg->flags &= ~PG_TABLED;
    207  1.7      mrg 	pg->uobject->uo_npages--;
    208  1.7      mrg 	pg->uobject = NULL;
    209  1.7      mrg 	pg->version++;
    210  1.1      mrg 
    211  1.1      mrg }
    212  1.1      mrg 
    213  1.1      mrg /*
    214  1.1      mrg  * uvm_page_init: init the page system.   called from uvm_init().
    215  1.1      mrg  *
    216  1.1      mrg  * => we return the range of kernel virtual memory in kvm_startp/kvm_endp
    217  1.1      mrg  */
    218  1.1      mrg 
    219  1.7      mrg void
    220  1.7      mrg uvm_page_init(kvm_startp, kvm_endp)
    221  1.7      mrg 	vm_offset_t *kvm_startp, *kvm_endp;
    222  1.1      mrg {
    223  1.7      mrg 	int freepages, pagecount;
    224  1.7      mrg 	vm_page_t pagearray;
    225  1.7      mrg 	int lcv, n, i;
    226  1.7      mrg 	vm_offset_t paddr;
    227  1.7      mrg 
    228  1.7      mrg 
    229  1.7      mrg 	/*
    230  1.7      mrg 	 * step 1: init the page queues and page queue locks
    231  1.7      mrg 	 */
    232  1.7      mrg 
    233  1.7      mrg 	TAILQ_INIT(&uvm.page_free);
    234  1.7      mrg 	TAILQ_INIT(&uvm.page_active);
    235  1.7      mrg 	TAILQ_INIT(&uvm.page_inactive_swp);
    236  1.7      mrg 	TAILQ_INIT(&uvm.page_inactive_obj);
    237  1.7      mrg 	simple_lock_init(&uvm.pageqlock);
    238  1.7      mrg 	simple_lock_init(&uvm.fpageqlock);
    239  1.7      mrg 
    240  1.7      mrg 	/*
    241  1.7      mrg 	 * step 2: init the <obj,offset> => <page> hash table. for now
    242  1.7      mrg 	 * we just have one bucket (the bootstrap bucket).   later on we
    243  1.7      mrg 	 * will malloc() new buckets as we dynamically resize the hash table.
    244  1.7      mrg 	 */
    245  1.7      mrg 
    246  1.7      mrg 	uvm.page_nhash = 1;			/* 1 bucket */
    247  1.7      mrg 	uvm.page_hashmask = 0;		/* mask for hash function */
    248  1.7      mrg 	uvm.page_hash = &uvm_bootbucket;	/* install bootstrap bucket */
    249  1.7      mrg 	TAILQ_INIT(uvm.page_hash);		/* init hash table */
    250  1.7      mrg 	simple_lock_init(&uvm.hashlock);	/* init hash table lock */
    251  1.7      mrg 
    252  1.7      mrg 	/*
    253  1.7      mrg 	 * step 3: allocate vm_page structures.
    254  1.7      mrg 	 */
    255  1.7      mrg 
    256  1.7      mrg 	/*
    257  1.7      mrg 	 * sanity check:
    258  1.7      mrg 	 * before calling this function the MD code is expected to register
    259  1.7      mrg 	 * some free RAM with the uvm_page_physload() function.   our job
    260  1.7      mrg 	 * now is to allocate vm_page structures for this memory.
    261  1.7      mrg 	 */
    262  1.7      mrg 
    263  1.7      mrg 	if (vm_nphysseg == 0)
    264  1.7      mrg 		panic("vm_page_bootstrap: no memory pre-allocated");
    265  1.7      mrg 
    266  1.7      mrg 	/*
    267  1.7      mrg 	 * first calculate the number of free pages...
    268  1.7      mrg 	 *
    269  1.7      mrg 	 * note that we use start/end rather than avail_start/avail_end.
    270  1.7      mrg 	 * this allows us to allocate extra vm_page structures in case we
    271  1.7      mrg 	 * want to return some memory to the pool after booting.
    272  1.7      mrg 	 */
    273  1.7      mrg 
    274  1.7      mrg 	freepages = 0;
    275  1.7      mrg 	for (lcv = 0 ; lcv < vm_nphysseg ; lcv++)
    276  1.7      mrg 		freepages += (vm_physmem[lcv].end - vm_physmem[lcv].start);
    277  1.7      mrg 
    278  1.7      mrg 	/*
    279  1.7      mrg 	 * we now know we have (PAGE_SIZE * freepages) bytes of memory we can
    280  1.7      mrg 	 * use.   for each page of memory we use we need a vm_page structure.
    281  1.7      mrg 	 * thus, the total number of pages we can use is the total size of
    282  1.7      mrg 	 * the memory divided by the PAGE_SIZE plus the size of the vm_page
    283  1.7      mrg 	 * structure.   we add one to freepages as a fudge factor to avoid
    284  1.7      mrg 	 * truncation errors (since we can only allocate in terms of whole
    285  1.7      mrg 	 * pages).
    286  1.7      mrg 	 */
    287  1.7      mrg 
    288  1.7      mrg 	pagecount = (PAGE_SIZE * (freepages + 1)) /
    289  1.7      mrg 	    (PAGE_SIZE + sizeof(struct vm_page));
    290  1.7      mrg 	pagearray = (vm_page_t)uvm_pageboot_alloc(pagecount *
    291  1.7      mrg 	    sizeof(struct vm_page));
    292  1.7      mrg 	bzero(pagearray, pagecount * sizeof(struct vm_page));
    293  1.7      mrg 
    294  1.7      mrg 	/*
    295  1.7      mrg 	 * step 4: init the vm_page structures and put them in the correct
    296  1.7      mrg 	 * place...
    297  1.7      mrg 	 */
    298  1.7      mrg 
    299  1.7      mrg 	for (lcv = 0 ; lcv < vm_nphysseg ; lcv++) {
    300  1.7      mrg 
    301  1.7      mrg 		n = vm_physmem[lcv].end - vm_physmem[lcv].start;
    302  1.7      mrg 		if (n > pagecount) {
    303  1.7      mrg 			printf("uvm_page_init: lost %d page(s) in init\n",
    304  1.7      mrg 			    n - pagecount);
    305  1.7      mrg 			panic("uvm_page_init");  /* XXXCDC: shouldn't happen? */
    306  1.7      mrg 			/* n = pagecount; */
    307  1.7      mrg 		}
    308  1.7      mrg 		/* set up page array pointers */
    309  1.7      mrg 		vm_physmem[lcv].pgs = pagearray;
    310  1.7      mrg 		pagearray += n;
    311  1.7      mrg 		pagecount -= n;
    312  1.7      mrg 		vm_physmem[lcv].lastpg = vm_physmem[lcv].pgs + (n - 1);
    313  1.7      mrg 
    314  1.7      mrg 		/* init and free vm_pages (we've already bzero'd them) */
    315  1.7      mrg 		paddr = ptoa(vm_physmem[lcv].start);
    316  1.7      mrg 		for (i = 0 ; i < n ; i++, paddr += PAGE_SIZE) {
    317  1.7      mrg 			vm_physmem[lcv].pgs[i].phys_addr = paddr;
    318  1.7      mrg 			if (atop(paddr) >= vm_physmem[lcv].avail_start &&
    319  1.7      mrg 			    atop(paddr) <= vm_physmem[lcv].avail_end) {
    320  1.7      mrg 				uvmexp.npages++;
    321  1.7      mrg 				/* add page to free pool */
    322  1.7      mrg 				uvm_pagefree(&vm_physmem[lcv].pgs[i]);
    323  1.7      mrg 			}
    324  1.7      mrg 		}
    325  1.7      mrg 	}
    326  1.7      mrg 	/*
    327  1.7      mrg 	 * step 5: pass up the values of virtual_space_start and
    328  1.7      mrg 	 * virtual_space_end (obtained by uvm_pageboot_alloc) to the upper
    329  1.7      mrg 	 * layers of the VM.
    330  1.7      mrg 	 */
    331  1.7      mrg 
    332  1.7      mrg 	*kvm_startp = round_page(virtual_space_start);
    333  1.7      mrg 	*kvm_endp = trunc_page(virtual_space_end);
    334  1.7      mrg 
    335  1.7      mrg 	/*
    336  1.7      mrg 	 * step 6: init pagedaemon lock
    337  1.7      mrg 	 */
    338  1.7      mrg 
    339  1.7      mrg 	simple_lock_init(&uvm.pagedaemon_lock);
    340  1.7      mrg 
    341  1.7      mrg 	/*
    342  1.7      mrg 	 * step 7: init reserve thresholds
    343  1.7      mrg 	 * XXXCDC - values may need adjusting
    344  1.7      mrg 	 */
    345  1.7      mrg 	uvmexp.reserve_pagedaemon = 1;
    346  1.7      mrg 	uvmexp.reserve_kernel = 5;
    347  1.7      mrg 
    348  1.7      mrg 	/*
    349  1.7      mrg 	 * done!
    350  1.7      mrg 	 */
    351  1.1      mrg 
    352  1.1      mrg }
    353  1.1      mrg 
    354  1.1      mrg /*
    355  1.1      mrg  * uvm_setpagesize: set the page size
    356  1.1      mrg  *
    357  1.1      mrg  * => sets page_shift and page_mask from uvmexp.pagesize.
    358  1.1      mrg  * => XXXCDC: move global vars.
    359  1.1      mrg  */
    360  1.1      mrg 
    361  1.7      mrg void
    362  1.7      mrg uvm_setpagesize()
    363  1.1      mrg {
    364  1.7      mrg 	if (uvmexp.pagesize == 0)
    365  1.7      mrg 		uvmexp.pagesize = DEFAULT_PAGE_SIZE;
    366  1.7      mrg 	uvmexp.pagemask = uvmexp.pagesize - 1;
    367  1.7      mrg 	if ((uvmexp.pagemask & uvmexp.pagesize) != 0)
    368  1.7      mrg 		panic("uvm_setpagesize: page size not a power of two");
    369  1.7      mrg 	for (uvmexp.pageshift = 0; ; uvmexp.pageshift++)
    370  1.7      mrg 		if ((1 << uvmexp.pageshift) == uvmexp.pagesize)
    371  1.7      mrg 			break;
    372  1.1      mrg }
    373  1.1      mrg 
    374  1.1      mrg /*
    375  1.1      mrg  * uvm_pageboot_alloc: steal memory from physmem for bootstrapping
    376  1.1      mrg  */
    377  1.1      mrg 
    378  1.7      mrg vm_offset_t
    379  1.7      mrg uvm_pageboot_alloc(size)
    380  1.7      mrg 	vm_size_t size;
    381  1.1      mrg {
    382  1.1      mrg #if defined(PMAP_STEAL_MEMORY)
    383  1.7      mrg 	vm_offset_t addr;
    384  1.1      mrg 
    385  1.7      mrg 	/*
    386  1.7      mrg 	 * defer bootstrap allocation to MD code (it may want to allocate
    387  1.7      mrg 	 * from a direct-mapped segment).  pmap_steal_memory should round
    388  1.7      mrg 	 * off virtual_space_start/virtual_space_end.
    389  1.7      mrg 	 */
    390  1.1      mrg 
    391  1.7      mrg 	addr = pmap_steal_memory(size, &virtual_space_start,
    392  1.7      mrg 	    &virtual_space_end);
    393  1.1      mrg 
    394  1.7      mrg 	return(addr);
    395  1.1      mrg 
    396  1.1      mrg #else /* !PMAP_STEAL_MEMORY */
    397  1.1      mrg 
    398  1.7      mrg 	vm_offset_t addr, vaddr, paddr;
    399  1.1      mrg 
    400  1.7      mrg 	/* round to page size */
    401  1.7      mrg 	size = round_page(size);
    402  1.1      mrg 
    403  1.7      mrg 	/*
    404  1.7      mrg 	 * on first call to this function init ourselves.   we detect this
    405  1.7      mrg 	 * by checking virtual_space_start/end which are in the zero'd BSS area.
    406  1.7      mrg 	 */
    407  1.1      mrg 
    408  1.7      mrg 	if (virtual_space_start == virtual_space_end) {
    409  1.7      mrg 		pmap_virtual_space(&virtual_space_start, &virtual_space_end);
    410  1.1      mrg 
    411  1.7      mrg 		/* round it the way we like it */
    412  1.7      mrg 		virtual_space_start = round_page(virtual_space_start);
    413  1.7      mrg 		virtual_space_end = trunc_page(virtual_space_end);
    414  1.7      mrg 	}
    415  1.1      mrg 
    416  1.7      mrg 	/*
    417  1.7      mrg 	 * allocate virtual memory for this request
    418  1.7      mrg 	 */
    419  1.1      mrg 
    420  1.7      mrg 	addr = virtual_space_start;
    421  1.7      mrg 	virtual_space_start += size;
    422  1.1      mrg 
    423  1.9  thorpej 	/*
    424  1.7      mrg 	 * allocate and mapin physical pages to back new virtual pages
    425  1.7      mrg 	 */
    426  1.1      mrg 
    427  1.7      mrg 	for (vaddr = round_page(addr) ; vaddr < addr + size ;
    428  1.7      mrg 	    vaddr += PAGE_SIZE) {
    429  1.1      mrg 
    430  1.7      mrg 		if (!uvm_page_physget(&paddr))
    431  1.7      mrg 			panic("uvm_pageboot_alloc: out of memory");
    432  1.1      mrg 
    433  1.7      mrg 		/* XXX: should be wired, but some pmaps don't like that ... */
    434  1.1      mrg #if defined(PMAP_NEW)
    435  1.7      mrg 		pmap_kenter_pa(vaddr, paddr, VM_PROT_READ|VM_PROT_WRITE);
    436  1.1      mrg #else
    437  1.7      mrg 		pmap_enter(pmap_kernel(), vaddr, paddr,
    438  1.7      mrg 		    VM_PROT_READ|VM_PROT_WRITE, FALSE);
    439  1.1      mrg #endif
    440  1.1      mrg 
    441  1.7      mrg 	}
    442  1.1      mrg 
    443  1.7      mrg 	return(addr);
    444  1.1      mrg #endif	/* PMAP_STEAL_MEMORY */
    445  1.1      mrg }
    446  1.1      mrg 
    447  1.1      mrg #if !defined(PMAP_STEAL_MEMORY)
    448  1.1      mrg /*
    449  1.1      mrg  * uvm_page_physget: "steal" one page from the vm_physmem structure.
    450  1.1      mrg  *
    451  1.1      mrg  * => attempt to allocate it off the end of a segment in which the "avail"
    452  1.1      mrg  *    values match the start/end values.   if we can't do that, then we
    453  1.1      mrg  *    will advance both values (making them equal, and removing some
    454  1.1      mrg  *    vm_page structures from the non-avail area).
    455  1.1      mrg  * => return false if out of memory.
    456  1.1      mrg  */
    457  1.1      mrg 
    458  1.7      mrg static boolean_t
    459  1.7      mrg uvm_page_physget(paddrp)
    460  1.7      mrg 	vm_offset_t *paddrp;
    461  1.1      mrg {
    462  1.7      mrg 	int lcv, x;
    463  1.1      mrg 
    464  1.7      mrg 	/* pass 1: try allocating from a matching end */
    465  1.1      mrg #if (VM_PHYSSEG_STRAT == VM_PSTRAT_BIGFIRST)
    466  1.7      mrg 	for (lcv = vm_nphysseg - 1 ; lcv >= 0 ; lcv--)
    467  1.1      mrg #else
    468  1.7      mrg 	for (lcv = 0 ; lcv < vm_nphysseg ; lcv++)
    469  1.1      mrg #endif
    470  1.7      mrg 	{
    471  1.1      mrg 
    472  1.7      mrg 		if (vm_physmem[lcv].pgs)
    473  1.7      mrg 			panic("vm_page_physget: called _after_ bootstrap");
    474  1.1      mrg 
    475  1.7      mrg 		/* try from front */
    476  1.7      mrg 		if (vm_physmem[lcv].avail_start == vm_physmem[lcv].start &&
    477  1.7      mrg 		    vm_physmem[lcv].avail_start < vm_physmem[lcv].avail_end) {
    478  1.7      mrg 			*paddrp = ptoa(vm_physmem[lcv].avail_start);
    479  1.7      mrg 			vm_physmem[lcv].avail_start++;
    480  1.7      mrg 			vm_physmem[lcv].start++;
    481  1.7      mrg 			/* nothing left?   nuke it */
    482  1.7      mrg 			if (vm_physmem[lcv].avail_start ==
    483  1.7      mrg 			    vm_physmem[lcv].end) {
    484  1.7      mrg 				if (vm_nphysseg == 1)
    485  1.7      mrg 				    panic("vm_page_physget: out of memory!");
    486  1.7      mrg 				vm_nphysseg--;
    487  1.7      mrg 				for (x = lcv ; x < vm_nphysseg ; x++)
    488  1.7      mrg 					/* structure copy */
    489  1.7      mrg 					vm_physmem[x] = vm_physmem[x+1];
    490  1.7      mrg 			}
    491  1.7      mrg 			return (TRUE);
    492  1.7      mrg 		}
    493  1.7      mrg 
    494  1.7      mrg 		/* try from rear */
    495  1.7      mrg 		if (vm_physmem[lcv].avail_end == vm_physmem[lcv].end &&
    496  1.7      mrg 		    vm_physmem[lcv].avail_start < vm_physmem[lcv].avail_end) {
    497  1.7      mrg 			*paddrp = ptoa(vm_physmem[lcv].avail_end - 1);
    498  1.7      mrg 			vm_physmem[lcv].avail_end--;
    499  1.7      mrg 			vm_physmem[lcv].end--;
    500  1.7      mrg 			/* nothing left?   nuke it */
    501  1.7      mrg 			if (vm_physmem[lcv].avail_end ==
    502  1.7      mrg 			    vm_physmem[lcv].start) {
    503  1.7      mrg 				if (vm_nphysseg == 1)
    504  1.7      mrg 				    panic("vm_page_physget: out of memory!");
    505  1.7      mrg 				vm_nphysseg--;
    506  1.7      mrg 				for (x = lcv ; x < vm_nphysseg ; x++)
    507  1.7      mrg 					/* structure copy */
    508  1.7      mrg 					vm_physmem[x] = vm_physmem[x+1];
    509  1.7      mrg 			}
    510  1.7      mrg 			return (TRUE);
    511  1.7      mrg 		}
    512  1.7      mrg 	}
    513  1.1      mrg 
    514  1.7      mrg 	/* pass2: forget about matching ends, just allocate something */
    515  1.1      mrg #if (VM_PHYSSEG_STRAT == VM_PSTRAT_BIGFIRST)
    516  1.7      mrg 	for (lcv = vm_nphysseg - 1 ; lcv >= 0 ; lcv--)
    517  1.1      mrg #else
    518  1.7      mrg 	for (lcv = 0 ; lcv < vm_nphysseg ; lcv++)
    519  1.1      mrg #endif
    520  1.7      mrg 	{
    521  1.1      mrg 
    522  1.7      mrg 		/* any room in this bank? */
    523  1.7      mrg 		if (vm_physmem[lcv].avail_start >= vm_physmem[lcv].avail_end)
    524  1.7      mrg 			continue;  /* nope */
    525  1.7      mrg 
    526  1.7      mrg 		*paddrp = ptoa(vm_physmem[lcv].avail_start);
    527  1.7      mrg 		vm_physmem[lcv].avail_start++;
    528  1.7      mrg 		/* truncate! */
    529  1.7      mrg 		vm_physmem[lcv].start = vm_physmem[lcv].avail_start;
    530  1.7      mrg 
    531  1.7      mrg 		/* nothing left?   nuke it */
    532  1.7      mrg 		if (vm_physmem[lcv].avail_start == vm_physmem[lcv].end) {
    533  1.7      mrg 			if (vm_nphysseg == 1)
    534  1.7      mrg 				panic("vm_page_physget: out of memory!");
    535  1.7      mrg 			vm_nphysseg--;
    536  1.7      mrg 			for (x = lcv ; x < vm_nphysseg ; x++)
    537  1.7      mrg 				/* structure copy */
    538  1.7      mrg 				vm_physmem[x] = vm_physmem[x+1];
    539  1.7      mrg 		}
    540  1.7      mrg 		return (TRUE);
    541  1.7      mrg 	}
    542  1.1      mrg 
    543  1.7      mrg 	return (FALSE);        /* whoops! */
    544  1.1      mrg }
    545  1.1      mrg #endif /* PMAP_STEAL_MEMORY */
    546  1.1      mrg 
    547  1.1      mrg /*
    548  1.1      mrg  * uvm_page_physload: load physical memory into VM system
    549  1.1      mrg  *
    550  1.1      mrg  * => all args are PFs
    551  1.1      mrg  * => all pages in start/end get vm_page structures
    552  1.1      mrg  * => areas marked by avail_start/avail_end get added to the free page pool
    553  1.1      mrg  * => we are limited to VM_PHYSSEG_MAX physical memory segments
    554  1.1      mrg  */
    555  1.1      mrg 
    556  1.7      mrg void
    557  1.7      mrg uvm_page_physload(start, end, avail_start, avail_end)
    558  1.7      mrg 	vm_offset_t start, end, avail_start, avail_end;
    559  1.1      mrg {
    560  1.7      mrg 	int preload, lcv, npages;
    561  1.7      mrg 	struct vm_page *pgs;
    562  1.7      mrg 	struct vm_physseg *ps;
    563  1.7      mrg 
    564  1.7      mrg 	if (uvmexp.pagesize == 0)
    565  1.7      mrg 		panic("vm_page_physload: page size not set!");
    566  1.7      mrg 
    567  1.7      mrg 	/*
    568  1.7      mrg 	 * do we have room?
    569  1.7      mrg 	 */
    570  1.7      mrg 	if (vm_nphysseg == VM_PHYSSEG_MAX) {
    571  1.7      mrg 		printf("vm_page_physload: unable to load physical memory "
    572  1.7      mrg 		    "segment\n");
    573  1.7      mrg 		printf("\t%d segments allocated, ignoring 0x%lx -> 0x%lx\n",
    574  1.7      mrg 		    VM_PHYSSEG_MAX, start, end);
    575  1.7      mrg 		return;
    576  1.7      mrg 	}
    577  1.7      mrg 
    578  1.7      mrg 	/*
    579  1.7      mrg 	 * check to see if this is a "preload" (i.e. uvm_mem_init hasn't been
    580  1.7      mrg 	 * called yet, so malloc is not available).
    581  1.7      mrg 	 */
    582  1.7      mrg 	for (lcv = 0 ; lcv < vm_nphysseg ; lcv++) {
    583  1.7      mrg 		if (vm_physmem[lcv].pgs)
    584  1.7      mrg 			break;
    585  1.7      mrg 	}
    586  1.7      mrg 	preload = (lcv == vm_nphysseg);
    587  1.7      mrg 
    588  1.7      mrg 	/*
    589  1.7      mrg 	 * if VM is already running, attempt to malloc() vm_page structures
    590  1.7      mrg 	 */
    591  1.7      mrg 	if (!preload) {
    592  1.1      mrg #if defined(VM_PHYSSEG_NOADD)
    593  1.7      mrg 		panic("vm_page_physload: tried to add RAM after vm_mem_init");
    594  1.1      mrg #else
    595  1.7      mrg 		/* XXXCDC: need some sort of lockout for this case */
    596  1.7      mrg 		vm_offset_t paddr;
    597  1.7      mrg 		npages = end - start;  /* # of pages */
    598  1.7      mrg 		MALLOC(pgs, struct vm_page *, sizeof(struct vm_page) * npages,
    599  1.7      mrg 					 M_VMPAGE, M_NOWAIT);
    600  1.7      mrg 		if (pgs == NULL) {
    601  1.7      mrg 			printf("vm_page_physload: can not malloc vm_page "
    602  1.7      mrg 			    "structs for segment\n");
    603  1.7      mrg 			printf("\tignoring 0x%lx -> 0x%lx\n", start, end);
    604  1.7      mrg 			return;
    605  1.7      mrg 		}
    606  1.7      mrg 		/* zero data, init phys_addr, and free pages */
    607  1.7      mrg 		bzero(pgs, sizeof(struct vm_page) * npages);
    608  1.7      mrg 		for (lcv = 0, paddr = ptoa(start) ;
    609  1.7      mrg 				 lcv < npages ; lcv++, paddr += PAGE_SIZE) {
    610  1.7      mrg 			pgs[lcv].phys_addr = paddr;
    611  1.7      mrg 			if (atop(paddr) >= avail_start &&
    612  1.7      mrg 			    atop(paddr) <= avail_end)
    613  1.8    chuck 				uvm_pagefree(&pgs[lcv]);
    614  1.7      mrg 		}
    615  1.7      mrg 		/* XXXCDC: incomplete: need to update uvmexp.free, what else? */
    616  1.7      mrg 		/* XXXCDC: need hook to tell pmap to rebuild pv_list, etc... */
    617  1.1      mrg #endif
    618  1.7      mrg 	} else {
    619  1.1      mrg 
    620  1.7      mrg 		/* gcc complains if these don't get init'd */
    621  1.7      mrg 		pgs = NULL;
    622  1.7      mrg 		npages = 0;
    623  1.1      mrg 
    624  1.7      mrg 	}
    625  1.1      mrg 
    626  1.7      mrg 	/*
    627  1.7      mrg 	 * now insert us in the proper place in vm_physmem[]
    628  1.7      mrg 	 */
    629  1.1      mrg 
    630  1.1      mrg #if (VM_PHYSSEG_STRAT == VM_PSTRAT_RANDOM)
    631  1.1      mrg 
    632  1.7      mrg 	/* random: put it at the end (easy!) */
    633  1.7      mrg 	ps = &vm_physmem[vm_nphysseg];
    634  1.1      mrg 
    635  1.1      mrg #elif (VM_PHYSSEG_STRAT == VM_PSTRAT_BSEARCH)
    636  1.1      mrg 
    637  1.7      mrg 	{
    638  1.7      mrg 		int x;
    639  1.7      mrg 		/* sort by address for binary search */
    640  1.7      mrg 		for (lcv = 0 ; lcv < vm_nphysseg ; lcv++)
    641  1.7      mrg 			if (start < vm_physmem[lcv].start)
    642  1.7      mrg 				break;
    643  1.7      mrg 		ps = &vm_physmem[lcv];
    644  1.7      mrg 		/* move back other entries, if necessary ... */
    645  1.7      mrg 		for (x = vm_nphysseg ; x > lcv ; x--)
    646  1.7      mrg 			/* structure copy */
    647  1.7      mrg 			vm_physmem[x] = vm_physmem[x - 1];
    648  1.7      mrg 	}
    649  1.1      mrg 
    650  1.1      mrg #elif (VM_PHYSSEG_STRAT == VM_PSTRAT_BIGFIRST)
    651  1.1      mrg 
    652  1.7      mrg 	{
    653  1.7      mrg 		int x;
    654  1.7      mrg 		/* sort by largest segment first */
    655  1.7      mrg 		for (lcv = 0 ; lcv < vm_nphysseg ; lcv++)
    656  1.7      mrg 			if ((end - start) >
    657  1.7      mrg 			    (vm_physmem[lcv].end - vm_physmem[lcv].start))
    658  1.7      mrg 				break;
    659  1.7      mrg 		ps = &vm_physmem[lcv];
    660  1.7      mrg 		/* move back other entries, if necessary ... */
    661  1.7      mrg 		for (x = vm_nphysseg ; x > lcv ; x--)
    662  1.7      mrg 			/* structure copy */
    663  1.7      mrg 			vm_physmem[x] = vm_physmem[x - 1];
    664  1.7      mrg 	}
    665  1.1      mrg 
    666  1.1      mrg #else
    667  1.1      mrg 
    668  1.7      mrg 	panic("vm_page_physload: unknown physseg strategy selected!");
    669  1.1      mrg 
    670  1.1      mrg #endif
    671  1.1      mrg 
    672  1.7      mrg 	ps->start = start;
    673  1.7      mrg 	ps->end = end;
    674  1.7      mrg 	ps->avail_start = avail_start;
    675  1.7      mrg 	ps->avail_end = avail_end;
    676  1.7      mrg 	if (preload) {
    677  1.7      mrg 		ps->pgs = NULL;
    678  1.7      mrg 	} else {
    679  1.7      mrg 		ps->pgs = pgs;
    680  1.7      mrg 		ps->lastpg = pgs + npages - 1;
    681  1.7      mrg 	}
    682  1.7      mrg 	vm_nphysseg++;
    683  1.7      mrg 
    684  1.7      mrg 	/*
    685  1.7      mrg 	 * done!
    686  1.7      mrg 	 */
    687  1.1      mrg 
    688  1.7      mrg 	if (!preload)
    689  1.7      mrg 		uvm_page_rehash();
    690  1.1      mrg 
    691  1.7      mrg 	return;
    692  1.1      mrg }
    693  1.1      mrg 
    694  1.1      mrg /*
    695  1.1      mrg  * uvm_page_rehash: reallocate hash table based on number of free pages.
    696  1.1      mrg  */
    697  1.1      mrg 
    698  1.7      mrg void
    699  1.7      mrg uvm_page_rehash()
    700  1.1      mrg {
    701  1.7      mrg 	int freepages, lcv, bucketcount, s, oldcount;
    702  1.7      mrg 	struct pglist *newbuckets, *oldbuckets;
    703  1.7      mrg 	struct vm_page *pg;
    704  1.7      mrg 
    705  1.7      mrg 	/*
    706  1.7      mrg 	 * compute number of pages that can go in the free pool
    707  1.7      mrg 	 */
    708  1.7      mrg 
    709  1.7      mrg 	freepages = 0;
    710  1.7      mrg 	for (lcv = 0 ; lcv < vm_nphysseg ; lcv++)
    711  1.7      mrg 		freepages +=
    712  1.7      mrg 		    (vm_physmem[lcv].avail_end - vm_physmem[lcv].avail_start);
    713  1.7      mrg 
    714  1.7      mrg 	/*
    715  1.7      mrg 	 * compute number of buckets needed for this number of pages
    716  1.7      mrg 	 */
    717  1.7      mrg 
    718  1.7      mrg 	bucketcount = 1;
    719  1.7      mrg 	while (bucketcount < freepages)
    720  1.7      mrg 		bucketcount = bucketcount * 2;
    721  1.7      mrg 
    722  1.7      mrg 	/*
    723  1.7      mrg 	 * malloc new buckets
    724  1.7      mrg 	 */
    725  1.7      mrg 
    726  1.7      mrg 	MALLOC(newbuckets, struct pglist *, sizeof(struct pglist) * bucketcount,
    727  1.7      mrg 					 M_VMPBUCKET, M_NOWAIT);
    728  1.7      mrg 	if (newbuckets == NULL) {
    729  1.7      mrg 		printf("vm_page_physrehash: WARNING: could not grow page "
    730  1.7      mrg 		    "hash table\n");
    731  1.7      mrg 		return;
    732  1.7      mrg 	}
    733  1.7      mrg 	for (lcv = 0 ; lcv < bucketcount ; lcv++)
    734  1.7      mrg 		TAILQ_INIT(&newbuckets[lcv]);
    735  1.7      mrg 
    736  1.7      mrg 	/*
    737  1.7      mrg 	 * now replace the old buckets with the new ones and rehash everything
    738  1.7      mrg 	 */
    739  1.7      mrg 
    740  1.7      mrg 	s = splimp();
    741  1.7      mrg 	simple_lock(&uvm.hashlock);
    742  1.7      mrg 	/* swap old for new ... */
    743  1.7      mrg 	oldbuckets = uvm.page_hash;
    744  1.7      mrg 	oldcount = uvm.page_nhash;
    745  1.7      mrg 	uvm.page_hash = newbuckets;
    746  1.7      mrg 	uvm.page_nhash = bucketcount;
    747  1.7      mrg 	uvm.page_hashmask = bucketcount - 1;  /* power of 2 */
    748  1.7      mrg 
    749  1.7      mrg 	/* ... and rehash */
    750  1.7      mrg 	for (lcv = 0 ; lcv < oldcount ; lcv++) {
    751  1.7      mrg 		while ((pg = oldbuckets[lcv].tqh_first) != NULL) {
    752  1.7      mrg 			TAILQ_REMOVE(&oldbuckets[lcv], pg, hashq);
    753  1.7      mrg 			TAILQ_INSERT_TAIL(
    754  1.7      mrg 			  &uvm.page_hash[uvm_pagehash(pg->uobject, pg->offset)],
    755  1.7      mrg 			  pg, hashq);
    756  1.7      mrg 		}
    757  1.7      mrg 	}
    758  1.7      mrg 	simple_unlock(&uvm.hashlock);
    759  1.7      mrg 	splx(s);
    760  1.7      mrg 
    761  1.7      mrg 	/*
    762  1.7      mrg 	 * free old bucket array if we malloc'd it previously
    763  1.7      mrg 	 */
    764  1.7      mrg 
    765  1.7      mrg 	if (oldbuckets != &uvm_bootbucket)
    766  1.7      mrg 		FREE(oldbuckets, M_VMPBUCKET);
    767  1.7      mrg 
    768  1.7      mrg 	/*
    769  1.7      mrg 	 * done
    770  1.7      mrg 	 */
    771  1.7      mrg 	return;
    772  1.1      mrg }
    773  1.1      mrg 
    774  1.1      mrg 
    775  1.1      mrg #if 1 /* XXXCDC: TMP TMP TMP DEBUG DEBUG DEBUG */
    776  1.1      mrg 
    777  1.1      mrg void uvm_page_physdump __P((void)); /* SHUT UP GCC */
    778  1.1      mrg 
    779  1.1      mrg /* call from DDB */
    780  1.7      mrg void
    781  1.7      mrg uvm_page_physdump()
    782  1.7      mrg {
    783  1.7      mrg 	int lcv;
    784  1.7      mrg 
    785  1.7      mrg 	printf("rehash: physical memory config [segs=%d of %d]:\n",
    786  1.7      mrg 				 vm_nphysseg, VM_PHYSSEG_MAX);
    787  1.7      mrg 	for (lcv = 0 ; lcv < vm_nphysseg ; lcv++)
    788  1.7      mrg 		printf("0x%lx->0x%lx [0x%lx->0x%lx]\n", vm_physmem[lcv].start,
    789  1.7      mrg 		    vm_physmem[lcv].end, vm_physmem[lcv].avail_start,
    790  1.7      mrg 		    vm_physmem[lcv].avail_end);
    791  1.7      mrg 	printf("STRATEGY = ");
    792  1.7      mrg 	switch (VM_PHYSSEG_STRAT) {
    793  1.7      mrg 	case VM_PSTRAT_RANDOM: printf("RANDOM\n"); break;
    794  1.7      mrg 	case VM_PSTRAT_BSEARCH: printf("BSEARCH\n"); break;
    795  1.7      mrg 	case VM_PSTRAT_BIGFIRST: printf("BIGFIRST\n"); break;
    796  1.7      mrg 	default: printf("<<UNKNOWN>>!!!!\n");
    797  1.7      mrg 	}
    798  1.7      mrg 	printf("number of buckets = %d\n", uvm.page_nhash);
    799  1.1      mrg }
    800  1.1      mrg #endif
    801  1.1      mrg 
    802  1.1      mrg /*
    803  1.1      mrg  * uvm_pagealloc: allocate vm_page.
    804  1.1      mrg  *
    805  1.1      mrg  * => return null if no pages free
    806  1.1      mrg  * => wake up pagedaemon if number of free pages drops below low water mark
    807  1.1      mrg  * => if obj != NULL, obj must be locked (to put in hash)
    808  1.1      mrg  * => if anon != NULL, anon must be locked (to put in anon)
    809  1.1      mrg  * => only one of obj or anon can be non-null
    810  1.1      mrg  * => caller must activate/deactivate page if it is not wired.
    811  1.1      mrg  */
    812  1.1      mrg 
    813  1.7      mrg struct vm_page *
    814  1.7      mrg uvm_pagealloc(obj, off, anon)
    815  1.7      mrg 	struct uvm_object *obj;
    816  1.7      mrg 	vm_offset_t off;
    817  1.7      mrg 	struct vm_anon *anon;
    818  1.1      mrg {
    819  1.7      mrg 	int s;
    820  1.7      mrg 	struct vm_page *pg;
    821  1.1      mrg 
    822  1.1      mrg #ifdef DIAGNOSTIC
    823  1.7      mrg 	/* sanity check */
    824  1.7      mrg 	if (obj && anon)
    825  1.7      mrg 		panic("uvm_pagealloc: obj and anon != NULL");
    826  1.1      mrg #endif
    827  1.1      mrg 
    828  1.7      mrg 	s = splimp();
    829  1.1      mrg 
    830  1.7      mrg 	uvm_lock_fpageq();		/* lock free page queue */
    831  1.1      mrg 
    832  1.7      mrg 	/*
    833  1.7      mrg 	 * check to see if we need to generate some free pages waking
    834  1.7      mrg 	 * the pagedaemon.
    835  1.7      mrg 	 */
    836  1.7      mrg 
    837  1.7      mrg 	if (uvmexp.free < uvmexp.freemin || (uvmexp.free < uvmexp.freetarg &&
    838  1.7      mrg 	    uvmexp.inactive < uvmexp.inactarg))
    839  1.7      mrg 		thread_wakeup(&uvm.pagedaemon);
    840  1.7      mrg 
    841  1.7      mrg 	/*
    842  1.7      mrg 	 * fail if any of these conditions is true:
    843  1.7      mrg 	 * [1]  there really are no free pages, or
    844  1.7      mrg 	 * [2]  only kernel "reserved" pages remain and
    845  1.7      mrg 	 *        the page isn't being allocated to a kernel object.
    846  1.7      mrg 	 * [3]  only pagedaemon "reserved" pages remain and
    847  1.7      mrg 	 *        the requestor isn't the pagedaemon.
    848  1.7      mrg 	 */
    849  1.7      mrg 
    850  1.7      mrg 	pg = uvm.page_free.tqh_first;
    851  1.7      mrg 	if (pg == NULL ||
    852  1.7      mrg 	    (uvmexp.free <= uvmexp.reserve_kernel &&
    853  1.7      mrg 	     !(obj && obj->uo_refs == UVM_OBJ_KERN)) ||
    854  1.7      mrg 	    (uvmexp.free <= uvmexp.reserve_pagedaemon &&
    855  1.7      mrg 	     !(obj == uvmexp.kmem_object && curproc == uvm.pagedaemon_proc))) {
    856  1.7      mrg 		uvm_unlock_fpageq();
    857  1.7      mrg 		splx(s);
    858  1.7      mrg 		return(NULL);
    859  1.7      mrg 	}
    860  1.7      mrg 
    861  1.7      mrg 	TAILQ_REMOVE(&uvm.page_free, pg, pageq);
    862  1.7      mrg 	uvmexp.free--;
    863  1.7      mrg 
    864  1.7      mrg 	uvm_unlock_fpageq();		/* unlock free page queue */
    865  1.7      mrg 	splx(s);
    866  1.7      mrg 
    867  1.7      mrg 	pg->offset = off;
    868  1.7      mrg 	pg->uobject = obj;
    869  1.7      mrg 	pg->uanon = anon;
    870  1.7      mrg 	pg->flags = PG_BUSY|PG_CLEAN|PG_FAKE;
    871  1.7      mrg 	pg->version++;
    872  1.7      mrg 	pg->wire_count = 0;
    873  1.7      mrg 	pg->loan_count = 0;
    874  1.7      mrg 	if (anon) {
    875  1.7      mrg 		anon->u.an_page = pg;
    876  1.7      mrg 		pg->pqflags = PQ_ANON;
    877  1.7      mrg 	} else {
    878  1.7      mrg 		if (obj)
    879  1.7      mrg 			uvm_pageinsert(pg);
    880  1.7      mrg 		pg->pqflags = 0;
    881  1.7      mrg 	}
    882  1.1      mrg #if defined(UVM_PAGE_TRKOWN)
    883  1.7      mrg 	pg->owner_tag = NULL;
    884  1.1      mrg #endif
    885  1.7      mrg 	UVM_PAGE_OWN(pg, "new alloc");
    886  1.1      mrg 
    887  1.7      mrg 	return(pg);
    888  1.1      mrg }
    889  1.1      mrg 
    890  1.1      mrg /*
    891  1.1      mrg  * uvm_pagerealloc: reallocate a page from one object to another
    892  1.1      mrg  *
    893  1.1      mrg  * => both objects must be locked
    894  1.1      mrg  */
    895  1.1      mrg 
    896  1.7      mrg void
    897  1.7      mrg uvm_pagerealloc(pg, newobj, newoff)
    898  1.7      mrg 	struct vm_page *pg;
    899  1.7      mrg 	struct uvm_object *newobj;
    900  1.7      mrg 	vm_offset_t newoff;
    901  1.1      mrg {
    902  1.7      mrg 	/*
    903  1.7      mrg 	 * remove it from the old object
    904  1.7      mrg 	 */
    905  1.7      mrg 
    906  1.7      mrg 	if (pg->uobject) {
    907  1.7      mrg 		uvm_pageremove(pg);
    908  1.7      mrg 	}
    909  1.7      mrg 
    910  1.7      mrg 	/*
    911  1.7      mrg 	 * put it in the new object
    912  1.7      mrg 	 */
    913  1.7      mrg 
    914  1.7      mrg 	if (newobj) {
    915  1.7      mrg 		pg->uobject = newobj;
    916  1.7      mrg 		pg->offset = newoff;
    917  1.7      mrg 		pg->version++;
    918  1.7      mrg 		uvm_pageinsert(pg);
    919  1.7      mrg 	}
    920  1.1      mrg 
    921  1.7      mrg 	return;
    922  1.1      mrg }
    923  1.1      mrg 
    924  1.1      mrg 
    925  1.1      mrg /*
    926  1.1      mrg  * uvm_pagefree: free page
    927  1.1      mrg  *
    928  1.1      mrg  * => erase page's identity (i.e. remove from hash/object)
    929  1.1      mrg  * => put page on free list
    930  1.1      mrg  * => caller must lock owning object (either anon or uvm_object)
    931  1.1      mrg  * => caller must lock page queues
    932  1.1      mrg  * => assumes all valid mappings of pg are gone
    933  1.1      mrg  */
    934  1.1      mrg 
    935  1.1      mrg void uvm_pagefree(pg)
    936  1.1      mrg 
    937  1.1      mrg struct vm_page *pg;
    938  1.1      mrg 
    939  1.1      mrg {
    940  1.7      mrg 	int s;
    941  1.7      mrg 	int saved_loan_count = pg->loan_count;
    942  1.1      mrg 
    943  1.7      mrg 	/*
    944  1.7      mrg 	 * if the page was an object page (and thus "TABLED"), remove it
    945  1.7      mrg 	 * from the object.
    946  1.7      mrg 	 */
    947  1.7      mrg 
    948  1.7      mrg 	if (pg->flags & PG_TABLED) {
    949  1.7      mrg 
    950  1.7      mrg 		/*
    951  1.7      mrg 		 * if the object page is on loan we are going to drop ownership.
    952  1.7      mrg 		 * it is possible that an anon will take over as owner for this
    953  1.7      mrg 		 * page later on.   the anon will want a !PG_CLEAN page so that
    954  1.7      mrg 		 * it knows it needs to allocate swap if it wants to page the
    955  1.7      mrg 		 * page out.
    956  1.7      mrg 		 */
    957  1.7      mrg 
    958  1.7      mrg 		if (saved_loan_count)
    959  1.7      mrg 			pg->flags &= ~PG_CLEAN;	/* in case an anon takes over */
    960  1.7      mrg 
    961  1.7      mrg 		uvm_pageremove(pg);
    962  1.7      mrg 
    963  1.7      mrg 		/*
    964  1.7      mrg 		 * if our page was on loan, then we just lost control over it
    965  1.7      mrg 		 * (in fact, if it was loaned to an anon, the anon may have
    966  1.7      mrg 		 * already taken over ownership of the page by now and thus
    967  1.7      mrg 		 * changed the loan_count [e.g. in uvmfault_anonget()]) we just
    968  1.7      mrg 		 * return (when the last loan is dropped, then the page can be
    969  1.7      mrg 		 * freed by whatever was holding the last loan).
    970  1.7      mrg 		 */
    971  1.7      mrg 		if (saved_loan_count)
    972  1.7      mrg 			return;
    973  1.7      mrg 
    974  1.7      mrg 	} else if (saved_loan_count && (pg->pqflags & PQ_ANON)) {
    975  1.7      mrg 
    976  1.7      mrg 		/*
    977  1.7      mrg 		 * if our page is owned by an anon and is loaned out to the
    978  1.7      mrg 		 * kernel then we just want to drop ownership and return.
    979  1.7      mrg 		 * the kernel must free the page when all its loans clear ...
    980  1.7      mrg 		 * note that the kernel can't change the loan status of our
    981  1.7      mrg 		 * page as long as we are holding PQ lock.
    982  1.7      mrg 		 */
    983  1.7      mrg 		pg->pqflags &= ~PQ_ANON;
    984  1.7      mrg 		pg->uanon = NULL;
    985  1.7      mrg 		return;
    986  1.7      mrg 	}
    987  1.1      mrg 
    988  1.1      mrg #ifdef DIAGNOSTIC
    989  1.7      mrg 	if (saved_loan_count) {
    990  1.7      mrg 		printf("uvm_pagefree: warning: freeing page with a loan "
    991  1.7      mrg 		    "count of %d\n", saved_loan_count);
    992  1.7      mrg 		panic("uvm_pagefree: loan count");
    993  1.7      mrg 	}
    994  1.1      mrg #endif
    995  1.7      mrg 
    996  1.1      mrg 
    997  1.7      mrg 	/*
    998  1.7      mrg 	 * now remove the page from the queues
    999  1.7      mrg 	 */
   1000  1.7      mrg 
   1001  1.7      mrg 	if (pg->pqflags & PQ_ACTIVE) {
   1002  1.7      mrg 		TAILQ_REMOVE(&uvm.page_active, pg, pageq);
   1003  1.7      mrg 		pg->pqflags &= ~PQ_ACTIVE;
   1004  1.7      mrg 		uvmexp.active--;
   1005  1.7      mrg 	}
   1006  1.7      mrg 	if (pg->pqflags & PQ_INACTIVE) {
   1007  1.7      mrg 		if (pg->pqflags & PQ_SWAPBACKED)
   1008  1.7      mrg 			TAILQ_REMOVE(&uvm.page_inactive_swp, pg, pageq);
   1009  1.7      mrg 		else
   1010  1.7      mrg 			TAILQ_REMOVE(&uvm.page_inactive_obj, pg, pageq);
   1011  1.7      mrg 		pg->pqflags &= ~PQ_INACTIVE;
   1012  1.7      mrg 		uvmexp.inactive--;
   1013  1.7      mrg 	}
   1014  1.7      mrg 
   1015  1.7      mrg 	/*
   1016  1.7      mrg 	 * if the page was wired, unwire it now.
   1017  1.7      mrg 	 */
   1018  1.7      mrg 	if (pg->wire_count)
   1019  1.7      mrg 	{
   1020  1.7      mrg 		pg->wire_count = 0;
   1021  1.7      mrg 		uvmexp.wired--;
   1022  1.7      mrg 	}
   1023  1.7      mrg 
   1024  1.7      mrg 	/*
   1025  1.7      mrg 	 * and put on free queue
   1026  1.7      mrg 	 */
   1027  1.7      mrg 
   1028  1.7      mrg 	s = splimp();
   1029  1.7      mrg 	uvm_lock_fpageq();
   1030  1.7      mrg 	TAILQ_INSERT_TAIL(&uvm.page_free, pg, pageq);
   1031  1.7      mrg 	pg->pqflags = PQ_FREE;
   1032  1.3      chs #ifdef DEBUG
   1033  1.7      mrg 	pg->uobject = (void *)0xdeadbeef;
   1034  1.7      mrg 	pg->offset = 0xdeadbeef;
   1035  1.7      mrg 	pg->uanon = (void *)0xdeadbeef;
   1036  1.3      chs #endif
   1037  1.7      mrg 	uvmexp.free++;
   1038  1.7      mrg 	uvm_unlock_fpageq();
   1039  1.7      mrg 	splx(s);
   1040  1.1      mrg }
   1041  1.1      mrg 
   1042  1.1      mrg #if defined(UVM_PAGE_TRKOWN)
   1043  1.1      mrg /*
   1044  1.1      mrg  * uvm_page_own: set or release page ownership
   1045  1.1      mrg  *
   1046  1.1      mrg  * => this is a debugging function that keeps track of who sets PG_BUSY
   1047  1.1      mrg  *	and where they do it.   it can be used to track down problems
   1048  1.1      mrg  *	such a process setting "PG_BUSY" and never releasing it.
   1049  1.1      mrg  * => page's object [if any] must be locked
   1050  1.1      mrg  * => if "tag" is NULL then we are releasing page ownership
   1051  1.1      mrg  */
   1052  1.7      mrg void
   1053  1.7      mrg uvm_page_own(pg, tag)
   1054  1.7      mrg 	struct vm_page *pg;
   1055  1.7      mrg 	char *tag;
   1056  1.1      mrg {
   1057  1.7      mrg 	/* gain ownership? */
   1058  1.7      mrg 	if (tag) {
   1059  1.7      mrg 		if (pg->owner_tag) {
   1060  1.7      mrg 			printf("uvm_page_own: page %p already owned "
   1061  1.7      mrg 			    "by proc %d [%s]\n", pg,
   1062  1.7      mrg 			     pg->owner, pg->owner_tag);
   1063  1.7      mrg 			panic("uvm_page_own");
   1064  1.7      mrg 		}
   1065  1.7      mrg 		pg->owner = (curproc) ? curproc->p_pid :  (pid_t) -1;
   1066  1.7      mrg 		pg->owner_tag = tag;
   1067  1.7      mrg 		return;
   1068  1.7      mrg 	}
   1069  1.7      mrg 
   1070  1.7      mrg 	/* drop ownership */
   1071  1.7      mrg 	if (pg->owner_tag == NULL) {
   1072  1.7      mrg 		printf("uvm_page_own: dropping ownership of an non-owned "
   1073  1.7      mrg 		    "page (%p)\n", pg);
   1074  1.7      mrg 		panic("uvm_page_own");
   1075  1.7      mrg 	}
   1076  1.7      mrg 	pg->owner_tag = NULL;
   1077  1.7      mrg 	return;
   1078  1.1      mrg }
   1079  1.1      mrg #endif
   1080