Home | History | Annotate | Line # | Download | only in uvm
      1  1.109     skrll /*	$NetBSD: uvm_page.h,v 1.109 2020/12/20 16:38:26 skrll Exp $	*/
      2    1.1       mrg 
      3   1.26       chs /*
      4    1.1       mrg  * Copyright (c) 1997 Charles D. Cranor and Washington University.
      5   1.26       chs  * Copyright (c) 1991, 1993, The Regents of the University of California.
      6    1.1       mrg  *
      7    1.1       mrg  * All rights reserved.
      8    1.1       mrg  *
      9    1.1       mrg  * This code is derived from software contributed to Berkeley by
     10    1.1       mrg  * The Mach Operating System project at Carnegie-Mellon University.
     11    1.1       mrg  *
     12    1.1       mrg  * Redistribution and use in source and binary forms, with or without
     13    1.1       mrg  * modification, are permitted provided that the following conditions
     14    1.1       mrg  * are met:
     15    1.1       mrg  * 1. Redistributions of source code must retain the above copyright
     16    1.1       mrg  *    notice, this list of conditions and the following disclaimer.
     17    1.1       mrg  * 2. Redistributions in binary form must reproduce the above copyright
     18    1.1       mrg  *    notice, this list of conditions and the following disclaimer in the
     19    1.1       mrg  *    documentation and/or other materials provided with the distribution.
     20   1.71     chuck  * 3. Neither the name of the University nor the names of its contributors
     21    1.1       mrg  *    may be used to endorse or promote products derived from this software
     22    1.1       mrg  *    without specific prior written permission.
     23    1.1       mrg  *
     24    1.1       mrg  * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND
     25    1.1       mrg  * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
     26    1.1       mrg  * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
     27    1.1       mrg  * ARE DISCLAIMED.  IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE
     28    1.1       mrg  * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
     29    1.1       mrg  * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
     30    1.1       mrg  * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
     31    1.1       mrg  * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
     32    1.1       mrg  * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
     33    1.1       mrg  * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
     34    1.1       mrg  * SUCH DAMAGE.
     35    1.1       mrg  *
     36    1.1       mrg  *	@(#)vm_page.h   7.3 (Berkeley) 4/21/91
     37    1.3       mrg  * from: Id: uvm_page.h,v 1.1.2.6 1998/02/04 02:31:42 chuck Exp
     38    1.1       mrg  *
     39    1.1       mrg  *
     40    1.1       mrg  * Copyright (c) 1987, 1990 Carnegie-Mellon University.
     41    1.1       mrg  * All rights reserved.
     42   1.26       chs  *
     43    1.1       mrg  * Permission to use, copy, modify and distribute this software and
     44    1.1       mrg  * its documentation is hereby granted, provided that both the copyright
     45    1.1       mrg  * notice and this permission notice appear in all copies of the
     46    1.1       mrg  * software, derivative works or modified versions, and any portions
     47    1.1       mrg  * thereof, and that both notices appear in supporting documentation.
     48   1.26       chs  *
     49   1.26       chs  * CARNEGIE MELLON ALLOWS FREE USE OF THIS SOFTWARE IN ITS "AS IS"
     50   1.26       chs  * CONDITION.  CARNEGIE MELLON DISCLAIMS ANY LIABILITY OF ANY KIND
     51    1.1       mrg  * FOR ANY DAMAGES WHATSOEVER RESULTING FROM THE USE OF THIS SOFTWARE.
     52   1.26       chs  *
     53    1.1       mrg  * Carnegie Mellon requests users of this software to return to
     54    1.1       mrg  *
     55    1.1       mrg  *  Software Distribution Coordinator  or  Software.Distribution (at) CS.CMU.EDU
     56    1.1       mrg  *  School of Computer Science
     57    1.1       mrg  *  Carnegie Mellon University
     58    1.1       mrg  *  Pittsburgh PA 15213-3890
     59    1.1       mrg  *
     60    1.1       mrg  * any improvements or extensions that they make and grant Carnegie the
     61    1.1       mrg  * rights to redistribute these changes.
     62    1.1       mrg  */
     63    1.1       mrg 
     64    1.4     perry #ifndef _UVM_UVM_PAGE_H_
     65    1.4     perry #define _UVM_UVM_PAGE_H_
     66    1.4     perry 
     67   1.99  riastrad #ifdef _KERNEL_OPT
     68   1.99  riastrad #include "opt_uvm_page_trkown.h"
     69   1.99  riastrad #endif
     70   1.99  riastrad 
     71  1.101       rin #include <sys/rwlock.h>
     72  1.101       rin 
     73   1.74     rmind #include <uvm/uvm_extern.h>
     74   1.74     rmind #include <uvm/uvm_pglist.h>
     75    1.1       mrg 
     76   1.16       mrg /*
     77   1.74     rmind  * Management of resident (logical) pages.
     78   1.16       mrg  *
     79   1.74     rmind  * Each resident page has a vm_page structure, indexed by page number.
     80   1.74     rmind  * There are several lists in the structure:
     81   1.16       mrg  *
     82   1.74     rmind  * - A red-black tree rooted with the containing object is used to
     83   1.74     rmind  *   quickly perform object+offset lookups.
     84   1.74     rmind  * - A list of all pages for a given object, for a quick deactivation
     85   1.74     rmind  *   at a time of deallocation.
     86   1.74     rmind  * - An ordered list of pages due for pageout.
     87   1.74     rmind  *
     88   1.74     rmind  * In addition, the structure contains the object and offset to which
     89   1.74     rmind  * this page belongs (for pageout) and sundry status bits.
     90   1.74     rmind  *
     91   1.74     rmind  * Note that the page structure has no lock of its own.  The page is
     92   1.74     rmind  * generally protected by its owner's lock (UVM object or amap/anon).
     93   1.74     rmind  * It should be noted that UVM has to serialize pmap(9) operations on
     94   1.74     rmind  * the managed pages, e.g. for pmap_enter() calls.  Hence, the lock
     95   1.74     rmind  * order is as follows:
     96   1.74     rmind  *
     97   1.74     rmind  *	[vmpage-owner-lock] ->
     98   1.74     rmind  *		any pmap locks (e.g. PV hash lock)
     99   1.74     rmind  *
    100   1.74     rmind  * Since the kernel is always self-consistent, no serialization is
    101   1.74     rmind  * required for unmanaged mappings, e.g. for pmap_kenter_pa() calls.
    102   1.74     rmind  *
    103   1.74     rmind  * Field markings and the corresponding locks:
    104   1.74     rmind  *
    105   1.77  riastrad  * f:	free page queue lock, uvm_fpageqlock
    106   1.77  riastrad  * o:	page owner (uvm_object::vmobjlock, vm_amap::am_lock, vm_anon::an_lock)
    107   1.85        ad  * i:	vm_page::interlock
    108   1.85        ad  *        => flags set and cleared only with o&i held can
    109   1.85        ad  *           safely be tested for with only o held.
    110   1.85        ad  * o,i:	o|i for read, o&i for write (depends on context - if could be loaned)
    111   1.85        ad  *	  => see uvm_loan.c
    112   1.77  riastrad  * w:	wired page queue or uvm_pglistalloc:
    113   1.85        ad  *	  => wired page queue: o&i to change, stable from wire to unwire
    114   1.77  riastrad  *		XXX What about concurrent or nested wire?
    115   1.77  riastrad  *	  => uvm_pglistalloc: owned by caller
    116   1.74     rmind  * ?:	locked by pmap or assumed page owner's lock
    117   1.85        ad  * p:	locked by pagedaemon policy module (pdpolicy)
    118   1.85        ad  * c:	cpu private
    119   1.85        ad  * s:	stable, does not change
    120   1.16       mrg  *
    121   1.91        ad  * UVM and pmap(9) may use uvm_page_owner_locked_p() to assert whether the
    122   1.74     rmind  * page owner's lock is acquired.
    123   1.77  riastrad  *
    124   1.80  riastrad  * A page can have one of four identities:
    125   1.77  riastrad  *
    126   1.77  riastrad  * o free
    127   1.77  riastrad  *   => pageq.list is entry on global free page queue
    128   1.77  riastrad  *   => uanon is unused (or (void *)0xdeadbeef for DEBUG)
    129   1.77  riastrad  *   => uobject is unused (or (void *)0xdeadbeef for DEBUG)
    130   1.85        ad  *   => PG_FREE is set in flags
    131   1.77  riastrad  * o owned by a uvm_object
    132   1.77  riastrad  *   => pageq.queue is entry on wired page queue, if any
    133   1.78  riastrad  *   => uanon is NULL or the vm_anon to which it has been O->A loaned
    134   1.77  riastrad  *   => uobject is owner
    135   1.77  riastrad  * o owned by a vm_anon
    136   1.77  riastrad  *   => pageq is unused (XXX correct?)
    137   1.77  riastrad  *   => uanon is owner
    138   1.77  riastrad  *   => uobject is NULL
    139   1.85        ad  *   => PG_ANON is set in flags
    140   1.77  riastrad  * o allocated by uvm_pglistalloc
    141   1.77  riastrad  *   => pageq.queue is entry on resulting pglist, owned by caller
    142   1.77  riastrad  *   => uanon is unused
    143   1.77  riastrad  *   => uobject is unused
    144   1.77  riastrad  *
    145   1.77  riastrad  * The following transitions are allowed:
    146   1.77  riastrad  *
    147   1.77  riastrad  * - uvm_pagealloc: free -> owned by a uvm_object/vm_anon
    148   1.77  riastrad  * - uvm_pagefree: owned by a uvm_object/vm_anon -> free
    149   1.77  riastrad  * - uvm_pglistalloc: free -> allocated by uvm_pglistalloc
    150   1.77  riastrad  * - uvm_pglistfree: allocated by uvm_pglistalloc -> free
    151   1.92        ad  *
    152   1.92        ad  * On the ordering of fields:
    153   1.92        ad  *
    154  1.103        ad  * The fields most heavily used during fault processing are clustered
    155  1.103        ad  * together at the start of the structure to reduce cache misses.
    156  1.103        ad  * XXX This entire thing should be shrunk to fit in one cache line.
    157   1.16       mrg  */
    158   1.16       mrg 
    159   1.16       mrg struct vm_page {
    160  1.103        ad 	/* _LP64: first cache line */
    161   1.54        ad 	union {
    162   1.77  riastrad 		TAILQ_ENTRY(vm_page) queue;	/* w: wired page queue
    163   1.77  riastrad 						 * or uvm_pglistalloc output */
    164   1.77  riastrad 		LIST_ENTRY(vm_page) list;	/* f: global free page queue */
    165   1.77  riastrad 	} pageq;
    166   1.92        ad 	uint32_t		pqflags;	/* i: pagedaemon flags */
    167   1.96        ad 	uint32_t		flags;		/* o: object flags */
    168   1.92        ad 	paddr_t			phys_addr;	/* o: physical address of pg */
    169   1.85        ad 	uint32_t		loan_count;	/* o,i: num. active loans */
    170   1.85        ad 	uint32_t		wire_count;	/* o,i: wired down map refs */
    171   1.92        ad 	struct vm_anon		*uanon;		/* o,i: anon */
    172   1.92        ad 	struct uvm_object	*uobject;	/* o,i: object */
    173   1.92        ad 	voff_t			offset;		/* o: offset into object */
    174   1.21   thorpej 
    175  1.103        ad 	/* _LP64: second cache line */
    176  1.103        ad 	kmutex_t		interlock;	/* s: lock on identity */
    177  1.103        ad 	TAILQ_ENTRY(vm_page)	pdqueue;	/* p: pagedaemon queue */
    178  1.103        ad 
    179   1.22   thorpej #ifdef __HAVE_VM_PAGE_MD
    180   1.74     rmind 	struct vm_page_md	mdpage;		/* ?: pmap-specific data */
    181   1.22   thorpej #endif
    182   1.21   thorpej 
    183   1.16       mrg #if defined(UVM_PAGE_TRKOWN)
    184   1.18       chs 	/* debugging fields to track page ownership */
    185   1.18       chs 	pid_t			owner;		/* proc that set PG_BUSY */
    186   1.48  perseant 	lwpid_t			lowner;		/* lwp that set PG_BUSY */
    187   1.40       chs 	const char		*owner_tag;	/* why it was set busy */
    188   1.16       mrg #endif
    189   1.16       mrg };
    190   1.16       mrg 
    191   1.16       mrg /*
    192   1.97        ad  * Overview of UVM page flags, stored in pg->flags.
    193   1.75     rmind  *
    194   1.75     rmind  * Locking notes:
    195   1.75     rmind  *
    196   1.77  riastrad  * PG_, struct vm_page::flags	=> locked by owner
    197   1.85        ad  * PG_AOBJ			=> additionally locked by vm_page::interlock
    198   1.85        ad  * PG_ANON			=> additionally locked by vm_page::interlock
    199   1.85        ad  * PG_FREE			=> additionally locked by uvm_fpageqlock
    200   1.85        ad  *				   for uvm_pglistalloc()
    201   1.75     rmind  *
    202   1.75     rmind  * Flag descriptions:
    203   1.75     rmind  *
    204   1.96        ad  * PG_CLEAN:
    205   1.96        ad  *	Page is known clean.
    206   1.96        ad  *	The contents of the page is consistent with its backing store.
    207   1.96        ad  *
    208   1.96        ad  * PG_DIRTY:
    209   1.96        ad  *	Page is known dirty.
    210   1.96        ad  *	To avoid losing data, the contents of the page should be written
    211   1.96        ad  *	back to the backing store before freeing the page.
    212   1.96        ad  *
    213   1.75     rmind  * PG_BUSY:
    214   1.75     rmind  *	Page is long-term locked, usually because of I/O (transfer from the
    215   1.75     rmind  *	page memory to the backing store) is in progress.  LWP attempting
    216  1.100        ad  *	to access the page shall set PQ_WANTED and wait.  PG_BUSY may only
    217  1.100        ad  *	be set with a write lock held on the object.
    218   1.75     rmind  *
    219   1.96        ad  * PG_PAGEOUT:
    220   1.96        ad  *	Indicates that the page is being paged-out in preparation for
    221   1.96        ad  *	being freed.
    222   1.96        ad  *
    223   1.75     rmind  * PG_RELEASED:
    224   1.75     rmind  *	Indicates that the page, which is currently PG_BUSY, should be freed
    225   1.75     rmind  *	after the release of long-term lock.  It is responsibility of the
    226   1.75     rmind  *	owning LWP (i.e. which set PG_BUSY) to do it.
    227   1.75     rmind  *
    228   1.75     rmind  * PG_FAKE:
    229   1.75     rmind  *	Page has been allocated, but not yet initialised.  The flag is used
    230   1.75     rmind  *	to avoid overwriting of valid data, e.g. to prevent read from the
    231   1.75     rmind  *	backing store when in-core data is newer.
    232   1.75     rmind  *
    233   1.75     rmind  * PG_RDONLY:
    234   1.75     rmind  *	Indicates that the page must be mapped read-only.
    235   1.75     rmind  *
    236   1.75     rmind  * PG_MARKER:
    237   1.96        ad  *	Dummy marker page, generally used for list traversal.
    238   1.96        ad  */
    239   1.96        ad 
    240   1.96        ad /*
    241   1.96        ad  * if you want to renumber PG_CLEAN and PG_DIRTY, check __CTASSERTs in
    242   1.96        ad  * uvm_page_status.c first.
    243   1.75     rmind  */
    244   1.75     rmind 
    245   1.96        ad #define	PG_CLEAN	0x00000001	/* page is known clean */
    246   1.96        ad #define	PG_DIRTY	0x00000002	/* page is known dirty */
    247   1.96        ad #define	PG_BUSY		0x00000004	/* page is locked */
    248   1.96        ad #define	PG_PAGEOUT	0x00000010	/* page to be freed for pagedaemon */
    249   1.96        ad #define	PG_RELEASED	0x00000020	/* page to be freed when unbusied */
    250   1.96        ad #define	PG_FAKE		0x00000040	/* page is not yet initialized */
    251   1.96        ad #define	PG_RDONLY	0x00000080	/* page must be mapped read-only */
    252   1.96        ad #define	PG_TABLED	0x00000200	/* page is tabled in object */
    253   1.96        ad #define	PG_AOBJ		0x00000400	/* page is part of an anonymous
    254   1.85        ad 					   uvm_object */
    255   1.96        ad #define	PG_ANON		0x00000800	/* page is part of an anon, rather
    256   1.85        ad 					   than an uvm_object */
    257   1.96        ad #define	PG_FILE		0x00001000	/* file backed (non-anonymous) */
    258   1.96        ad #define	PG_READAHEAD	0x00002000	/* read-ahead but not "hit" yet */
    259   1.96        ad #define	PG_FREE		0x00004000	/* page is on free list */
    260   1.96        ad #define	PG_MARKER	0x00008000	/* dummy marker page */
    261   1.96        ad #define	PG_PAGER1	0x00010000	/* pager-specific flag */
    262  1.107       chs #define	PG_PGLCA	0x00020000	/* allocated by uvm_pglistalloc_contig */
    263   1.96        ad 
    264   1.96        ad #define	PG_STAT		(PG_ANON|PG_AOBJ|PG_FILE)
    265   1.96        ad #define	PG_SWAPBACKED	(PG_ANON|PG_AOBJ)
    266   1.16       mrg 
    267   1.46      yamt #define	UVM_PGFLAGBITS \
    268  1.100        ad 	"\20\1CLEAN\2DIRTY\3BUSY" \
    269   1.96        ad 	"\5PAGEOUT\6RELEASED\7FAKE\10RDONLY" \
    270   1.96        ad 	"\11ZERO\12TABLED\13AOBJ\14ANON" \
    271   1.96        ad 	"\15FILE\16READAHEAD\17FREE\20MARKER" \
    272  1.107       chs 	"\21PAGER1\22PGLCA"
    273   1.16       mrg 
    274   1.16       mrg /*
    275   1.97        ad  * Flags stored in pg->pqflags, which is protected by pg->interlock.
    276   1.93        ad  *
    277  1.100        ad  * PQ_PRIVATE:
    278  1.100        ad  *	... is for uvmpdpol to do whatever it wants with.
    279  1.100        ad  *
    280  1.100        ad  * PQ_INTENT_SET:
    281  1.100        ad  *	Indicates that the intent set on the page has not yet been realized.
    282  1.100        ad  *
    283  1.100        ad  * PQ_INTENT_QUEUED:
    284  1.100        ad  *	Indicates that the page is, or will soon be, on a per-CPU queue for
    285  1.100        ad  *	the intent to be realized.
    286  1.100        ad  *
    287  1.100        ad  * PQ_WANTED:
    288  1.100        ad  *	Indicates that the page, which is currently PG_BUSY, is wanted by
    289  1.100        ad  *	some other LWP.  The page owner (i.e. LWP which set PG_BUSY) is
    290  1.100        ad  *	responsible to clear both flags and wake up any waiters once it has
    291  1.100        ad  *	released the long-term lock (PG_BUSY).
    292   1.93        ad  */
    293   1.93        ad 
    294   1.93        ad #define	PQ_INTENT_A		0x00000000	/* intend activation */
    295   1.93        ad #define	PQ_INTENT_I		0x00000001	/* intend deactivation */
    296   1.93        ad #define	PQ_INTENT_E		0x00000002	/* intend enqueue */
    297   1.93        ad #define	PQ_INTENT_D		0x00000003	/* intend dequeue */
    298   1.93        ad #define	PQ_INTENT_MASK		0x00000003	/* mask of intended state */
    299   1.93        ad #define	PQ_INTENT_SET		0x00000004	/* not realized yet */
    300   1.93        ad #define	PQ_INTENT_QUEUED	0x00000008	/* queued for processing */
    301   1.97        ad #define	PQ_PRIVATE		0x00000ff0	/* private for pdpolicy */
    302  1.100        ad #define	PQ_WANTED		0x00001000	/* someone is waiting for page */
    303   1.97        ad 
    304   1.97        ad #define	UVM_PQFLAGBITS \
    305   1.97        ad 	"\20\1INTENT_0\2INTENT_1\3INTENT_SET\4INTENT_QUEUED" \
    306   1.97        ad 	"\5PRIVATE1\6PRIVATE2\7PRIVATE3\10PRIVATE4" \
    307  1.100        ad 	"\11PRIVATE5\12PRIVATE6\13PRIVATE7\14PRIVATE8" \
    308  1.100        ad 	"\15WANTED"
    309   1.93        ad 
    310   1.93        ad /*
    311   1.16       mrg  * physical memory layout structure
    312   1.16       mrg  *
    313   1.16       mrg  * MD vmparam.h must #define:
    314   1.16       mrg  *   VM_PHYSEG_MAX = max number of physical memory segments we support
    315   1.16       mrg  *		   (if this is "1" then we revert to a "contig" case)
    316   1.16       mrg  *   VM_PHYSSEG_STRAT: memory sort/search options (for VM_PHYSEG_MAX > 1)
    317   1.16       mrg  * 	- VM_PSTRAT_RANDOM:   linear search (random order)
    318   1.16       mrg  *	- VM_PSTRAT_BSEARCH:  binary search (sorted by address)
    319   1.16       mrg  *	- VM_PSTRAT_BIGFIRST: linear search (sorted by largest segment first)
    320   1.16       mrg  *      - others?
    321   1.17       mrg  *   XXXCDC: eventually we should purge all left-over global variables...
    322   1.16       mrg  */
    323   1.16       mrg #define VM_PSTRAT_RANDOM	1
    324   1.16       mrg #define VM_PSTRAT_BSEARCH	2
    325   1.16       mrg #define VM_PSTRAT_BIGFIRST	3
    326   1.16       mrg 
    327   1.13   thorpej #ifdef _KERNEL
    328   1.13   thorpej 
    329    1.1       mrg /*
    330    1.8     chuck  * prototypes: the following prototypes define the interface to pages
    331    1.1       mrg  */
    332    1.1       mrg 
    333   1.37  junyoung void uvm_page_init(vaddr_t *, vaddr_t *);
    334  1.107       chs void uvm_pglistalloc_init(void);
    335    1.1       mrg #if defined(UVM_PAGE_TRKOWN)
    336   1.40       chs void uvm_page_own(struct vm_page *, const char *);
    337    1.1       mrg #endif
    338    1.8     chuck #if !defined(PMAP_STEAL_MEMORY)
    339   1.47   thorpej bool uvm_page_physget(paddr_t *);
    340    1.8     chuck #endif
    341   1.37  junyoung void uvm_page_recolor(int);
    342   1.89        ad void uvm_page_rebucket(void);
    343   1.37  junyoung 
    344   1.43      yamt void uvm_pageactivate(struct vm_page *);
    345   1.37  junyoung vaddr_t uvm_pageboot_alloc(vsize_t);
    346   1.43      yamt void uvm_pagecopy(struct vm_page *, struct vm_page *);
    347   1.43      yamt void uvm_pagedeactivate(struct vm_page *);
    348   1.43      yamt void uvm_pagedequeue(struct vm_page *);
    349   1.46      yamt void uvm_pageenqueue(struct vm_page *);
    350   1.37  junyoung void uvm_pagefree(struct vm_page *);
    351   1.93        ad void uvm_pagelock(struct vm_page *);
    352   1.93        ad void uvm_pagelock2(struct vm_page *, struct vm_page *);
    353   1.93        ad void uvm_pageunlock(struct vm_page *);
    354   1.93        ad void uvm_pageunlock2(struct vm_page *, struct vm_page *);
    355   1.37  junyoung void uvm_page_unbusy(struct vm_page **, int);
    356   1.43      yamt struct vm_page *uvm_pagelookup(struct uvm_object *, voff_t);
    357   1.43      yamt void uvm_pageunwire(struct vm_page *);
    358   1.43      yamt void uvm_pagewire(struct vm_page *);
    359   1.43      yamt void uvm_pagezero(struct vm_page *);
    360   1.57   thorpej bool uvm_pageismanaged(paddr_t);
    361   1.98        ad bool uvm_page_owner_locked_p(struct vm_page *, bool);
    362   1.89        ad void uvm_pgfl_lock(void);
    363   1.89        ad void uvm_pgfl_unlock(void);
    364   1.96        ad unsigned int uvm_pagegetdirty(struct vm_page *);
    365   1.96        ad void uvm_pagemarkdirty(struct vm_page *, unsigned int);
    366   1.96        ad bool uvm_pagecheckdirty(struct vm_page *, bool);
    367   1.96        ad bool uvm_pagereadonly_p(struct vm_page *);
    368   1.96        ad bool uvm_page_locked_p(struct vm_page *);
    369  1.102        ad void uvm_pagewakeup(struct vm_page *);
    370  1.104        ad bool uvm_pagewanted_p(struct vm_page *);
    371  1.100        ad void uvm_pagewait(struct vm_page *, krwlock_t *, const char *);
    372    1.9   thorpej 
    373   1.43      yamt int uvm_page_lookup_freelist(struct vm_page *);
    374   1.16       mrg 
    375   1.65  uebayasi struct vm_page *uvm_phys_to_vm_page(paddr_t);
    376   1.65  uebayasi paddr_t uvm_vm_page_to_phys(const struct vm_page *);
    377   1.16       mrg 
    378   1.83  jdolecek #if defined(PMAP_DIRECT)
    379   1.84  jdolecek extern bool ubc_direct;
    380   1.83  jdolecek int uvm_direct_process(struct vm_page **, u_int, voff_t, vsize_t,
    381   1.83  jdolecek 	    int (*)(void *, size_t, void *), void *);
    382   1.83  jdolecek #endif
    383   1.83  jdolecek 
    384   1.16       mrg /*
    385   1.96        ad  * page dirtiness status for uvm_pagegetdirty and uvm_pagemarkdirty
    386   1.96        ad  *
    387   1.96        ad  * UNKNOWN means that we need to consult pmap to know if the page is
    388   1.96        ad  * dirty or not.
    389   1.96        ad  * basically, UVM_PAGE_STATUS_CLEAN implies that the page has no writable
    390   1.96        ad  * mapping.
    391   1.96        ad  *
    392   1.96        ad  * if you want to renumber these, check __CTASSERTs in
    393   1.96        ad  * uvm_page_status.c first.
    394   1.96        ad  */
    395   1.96        ad 
    396   1.96        ad #define	UVM_PAGE_STATUS_UNKNOWN	0
    397   1.96        ad #define	UVM_PAGE_STATUS_CLEAN	1
    398   1.96        ad #define	UVM_PAGE_STATUS_DIRTY	2
    399   1.96        ad #define	UVM_PAGE_NUM_STATUS	3
    400   1.96        ad 
    401   1.96        ad /*
    402   1.16       mrg  * macros
    403   1.16       mrg  */
    404   1.31       chs 
    405   1.65  uebayasi #define VM_PAGE_TO_PHYS(entry)	uvm_vm_page_to_phys(entry)
    406   1.20   thorpej 
    407   1.69  uebayasi #ifdef __HAVE_VM_PAGE_MD
    408   1.69  uebayasi #define	VM_PAGE_TO_MD(pg)	(&(pg)->mdpage)
    409  1.109     skrll #define	VM_MD_TO_PAGE(md)	(container_of((md), struct vm_page, mdpage))
    410   1.69  uebayasi #endif
    411   1.69  uebayasi 
    412   1.20   thorpej /*
    413   1.88        ad  * Compute the page color for a given page.
    414   1.20   thorpej  */
    415   1.88        ad #define	VM_PGCOLOR(pg) \
    416   1.24   thorpej 	(atop(VM_PAGE_TO_PHYS((pg))) & uvmexp.colormask)
    417   1.65  uebayasi #define	PHYS_TO_VM_PAGE(pa)	uvm_phys_to_vm_page(pa)
    418   1.89        ad 
    419   1.89        ad /*
    420   1.89        ad  * VM_PAGE_IS_FREE() can't tell if the page is on global free list, or a
    421   1.89        ad  * per-CPU cache.  If you need to be certain, pause caching.
    422   1.89        ad  */
    423   1.85        ad #define VM_PAGE_IS_FREE(entry)  ((entry)->flags & PG_FREE)
    424   1.35      yamt 
    425   1.88        ad /*
    426   1.88        ad  * Use the lower 10 bits of pg->phys_addr to cache some some locators for
    427   1.88        ad  * the page.  This implies that the smallest possible page size is 1kB, and
    428   1.88        ad  * that nobody should use pg->phys_addr directly (use VM_PAGE_TO_PHYS()).
    429   1.88        ad  *
    430   1.88        ad  * - 5 bits for the freelist index, because uvm_page_lookup_freelist()
    431   1.88        ad  *   traverses an rbtree and therefore features prominently in traces
    432   1.88        ad  *   captured during performance test.  It would probably be more useful to
    433   1.88        ad  *   cache physseg index here because freelist can be inferred from physseg,
    434   1.88        ad  *   but it requires changes to allocation for UVM_HOTPLUG, so for now we'll
    435   1.88        ad  *   go with freelist.
    436   1.88        ad  *
    437   1.88        ad  * - 5 bits for "bucket", a way for us to categorise pages further as
    438   1.88        ad  *   needed (e.g. NUMA node).
    439   1.88        ad  *
    440   1.88        ad  * None of this is set in stone; it can be adjusted as needed.
    441   1.88        ad  */
    442   1.94        ad 
    443   1.94        ad #define	UVM_PHYSADDR_FREELIST	__BITS(0,4)
    444   1.94        ad #define	UVM_PHYSADDR_BUCKET	__BITS(5,9)
    445   1.94        ad 
    446   1.88        ad static inline unsigned
    447   1.88        ad uvm_page_get_freelist(struct vm_page *pg)
    448   1.88        ad {
    449   1.94        ad 	unsigned fl = __SHIFTOUT(pg->phys_addr, UVM_PHYSADDR_FREELIST);
    450   1.88        ad 	KASSERT(fl == (unsigned)uvm_page_lookup_freelist(pg));
    451   1.88        ad 	return fl;
    452   1.88        ad }
    453   1.88        ad 
    454   1.88        ad static inline unsigned
    455   1.88        ad uvm_page_get_bucket(struct vm_page *pg)
    456   1.88        ad {
    457   1.94        ad 	return __SHIFTOUT(pg->phys_addr, UVM_PHYSADDR_BUCKET);
    458   1.88        ad }
    459   1.88        ad 
    460   1.88        ad static inline void
    461   1.88        ad uvm_page_set_freelist(struct vm_page *pg, unsigned fl)
    462   1.88        ad {
    463   1.88        ad 	KASSERT(fl < 32);
    464   1.94        ad 	pg->phys_addr &= ~UVM_PHYSADDR_FREELIST;
    465   1.94        ad 	pg->phys_addr |= __SHIFTIN(fl, UVM_PHYSADDR_FREELIST);
    466   1.88        ad }
    467   1.88        ad 
    468   1.88        ad static inline void
    469   1.88        ad uvm_page_set_bucket(struct vm_page *pg, unsigned b)
    470   1.88        ad {
    471   1.88        ad 	KASSERT(b < 32);
    472   1.94        ad 	pg->phys_addr &= ~UVM_PHYSADDR_BUCKET;
    473   1.94        ad 	pg->phys_addr |= __SHIFTIN(b, UVM_PHYSADDR_BUCKET);
    474   1.88        ad }
    475   1.88        ad 
    476   1.13   thorpej #endif /* _KERNEL */
    477    1.1       mrg 
    478    1.4     perry #endif /* _UVM_UVM_PAGE_H_ */
    479