Home | History | Annotate | Line # | Download | only in uvm
uvm_page.h revision 1.98
      1 /*	$NetBSD: uvm_page.h,v 1.98 2020/02/23 15:46:43 ad Exp $	*/
      2 
      3 /*
      4  * Copyright (c) 1997 Charles D. Cranor and Washington University.
      5  * Copyright (c) 1991, 1993, The Regents of the University of California.
      6  *
      7  * All rights reserved.
      8  *
      9  * This code is derived from software contributed to Berkeley by
     10  * The Mach Operating System project at Carnegie-Mellon University.
     11  *
     12  * Redistribution and use in source and binary forms, with or without
     13  * modification, are permitted provided that the following conditions
     14  * are met:
     15  * 1. Redistributions of source code must retain the above copyright
     16  *    notice, this list of conditions and the following disclaimer.
     17  * 2. Redistributions in binary form must reproduce the above copyright
     18  *    notice, this list of conditions and the following disclaimer in the
     19  *    documentation and/or other materials provided with the distribution.
     20  * 3. Neither the name of the University nor the names of its contributors
     21  *    may be used to endorse or promote products derived from this software
     22  *    without specific prior written permission.
     23  *
     24  * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND
     25  * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
     26  * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
     27  * ARE DISCLAIMED.  IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE
     28  * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
     29  * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
     30  * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
     31  * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
     32  * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
     33  * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
     34  * SUCH DAMAGE.
     35  *
     36  *	@(#)vm_page.h   7.3 (Berkeley) 4/21/91
     37  * from: Id: uvm_page.h,v 1.1.2.6 1998/02/04 02:31:42 chuck Exp
     38  *
     39  *
     40  * Copyright (c) 1987, 1990 Carnegie-Mellon University.
     41  * All rights reserved.
     42  *
     43  * Permission to use, copy, modify and distribute this software and
     44  * its documentation is hereby granted, provided that both the copyright
     45  * notice and this permission notice appear in all copies of the
     46  * software, derivative works or modified versions, and any portions
     47  * thereof, and that both notices appear in supporting documentation.
     48  *
     49  * CARNEGIE MELLON ALLOWS FREE USE OF THIS SOFTWARE IN ITS "AS IS"
     50  * CONDITION.  CARNEGIE MELLON DISCLAIMS ANY LIABILITY OF ANY KIND
     51  * FOR ANY DAMAGES WHATSOEVER RESULTING FROM THE USE OF THIS SOFTWARE.
     52  *
     53  * Carnegie Mellon requests users of this software to return to
     54  *
     55  *  Software Distribution Coordinator  or  Software.Distribution (at) CS.CMU.EDU
     56  *  School of Computer Science
     57  *  Carnegie Mellon University
     58  *  Pittsburgh PA 15213-3890
     59  *
     60  * any improvements or extensions that they make and grant Carnegie the
     61  * rights to redistribute these changes.
     62  */
     63 
     64 #ifndef _UVM_UVM_PAGE_H_
     65 #define _UVM_UVM_PAGE_H_
     66 
     67 #include <uvm/uvm_extern.h>
     68 #include <uvm/uvm_pglist.h>
     69 
     70 /*
     71  * Management of resident (logical) pages.
     72  *
     73  * Each resident page has a vm_page structure, indexed by page number.
     74  * There are several lists in the structure:
     75  *
     76  * - A red-black tree rooted with the containing object is used to
     77  *   quickly perform object+offset lookups.
     78  * - A list of all pages for a given object, for a quick deactivation
     79  *   at a time of deallocation.
     80  * - An ordered list of pages due for pageout.
     81  *
     82  * In addition, the structure contains the object and offset to which
     83  * this page belongs (for pageout) and sundry status bits.
     84  *
     85  * Note that the page structure has no lock of its own.  The page is
     86  * generally protected by its owner's lock (UVM object or amap/anon).
     87  * It should be noted that UVM has to serialize pmap(9) operations on
     88  * the managed pages, e.g. for pmap_enter() calls.  Hence, the lock
     89  * order is as follows:
     90  *
     91  *	[vmpage-owner-lock] ->
     92  *		any pmap locks (e.g. PV hash lock)
     93  *
     94  * Since the kernel is always self-consistent, no serialization is
     95  * required for unmanaged mappings, e.g. for pmap_kenter_pa() calls.
     96  *
     97  * Field markings and the corresponding locks:
     98  *
     99  * f:	free page queue lock, uvm_fpageqlock
    100  * o:	page owner (uvm_object::vmobjlock, vm_amap::am_lock, vm_anon::an_lock)
    101  * i:	vm_page::interlock
    102  *        => flags set and cleared only with o&i held can
    103  *           safely be tested for with only o held.
    104  * o,i:	o|i for read, o&i for write (depends on context - if could be loaned)
    105  *	  => see uvm_loan.c
    106  * w:	wired page queue or uvm_pglistalloc:
    107  *	  => wired page queue: o&i to change, stable from wire to unwire
    108  *		XXX What about concurrent or nested wire?
    109  *	  => uvm_pglistalloc: owned by caller
    110  * ?:	locked by pmap or assumed page owner's lock
    111  * p:	locked by pagedaemon policy module (pdpolicy)
    112  * c:	cpu private
    113  * s:	stable, does not change
    114  *
    115  * UVM and pmap(9) may use uvm_page_owner_locked_p() to assert whether the
    116  * page owner's lock is acquired.
    117  *
    118  * A page can have one of four identities:
    119  *
    120  * o free
    121  *   => pageq.list is entry on global free page queue
    122  *   => uanon is unused (or (void *)0xdeadbeef for DEBUG)
    123  *   => uobject is unused (or (void *)0xdeadbeef for DEBUG)
    124  *   => PG_FREE is set in flags
    125  * o owned by a uvm_object
    126  *   => pageq.queue is entry on wired page queue, if any
    127  *   => uanon is NULL or the vm_anon to which it has been O->A loaned
    128  *   => uobject is owner
    129  * o owned by a vm_anon
    130  *   => pageq is unused (XXX correct?)
    131  *   => uanon is owner
    132  *   => uobject is NULL
    133  *   => PG_ANON is set in flags
    134  * o allocated by uvm_pglistalloc
    135  *   => pageq.queue is entry on resulting pglist, owned by caller
    136  *   => uanon is unused
    137  *   => uobject is unused
    138  *
    139  * The following transitions are allowed:
    140  *
    141  * - uvm_pagealloc: free -> owned by a uvm_object/vm_anon
    142  * - uvm_pagefree: owned by a uvm_object/vm_anon -> free
    143  * - uvm_pglistalloc: free -> allocated by uvm_pglistalloc
    144  * - uvm_pglistfree: allocated by uvm_pglistalloc -> free
    145  *
    146  * On the ordering of fields:
    147  *
    148  * The fields most heavily used by the page allocator and uvmpdpol are
    149  * clustered together at the start of the structure, so that while under
    150  * global lock it's more likely that only one cache line for each page need
    151  * be touched.
    152  */
    153 
    154 struct vm_page {
    155 	union {
    156 		TAILQ_ENTRY(vm_page) queue;	/* w: wired page queue
    157 						 * or uvm_pglistalloc output */
    158 		LIST_ENTRY(vm_page) list;	/* f: global free page queue */
    159 	} pageq;
    160 	TAILQ_ENTRY(vm_page)	pdqueue;	/* p: pagedaemon queue */
    161 	kmutex_t		interlock;	/* s: lock on identity */
    162 	uint32_t		pqflags;	/* i: pagedaemon flags */
    163 	uint32_t		flags;		/* o: object flags */
    164 	paddr_t			phys_addr;	/* o: physical address of pg */
    165 	uint32_t		loan_count;	/* o,i: num. active loans */
    166 	uint32_t		wire_count;	/* o,i: wired down map refs */
    167 	struct vm_anon		*uanon;		/* o,i: anon */
    168 	struct uvm_object	*uobject;	/* o,i: object */
    169 	voff_t			offset;		/* o: offset into object */
    170 
    171 #ifdef __HAVE_VM_PAGE_MD
    172 	struct vm_page_md	mdpage;		/* ?: pmap-specific data */
    173 #endif
    174 
    175 #if defined(UVM_PAGE_TRKOWN)
    176 	/* debugging fields to track page ownership */
    177 	pid_t			owner;		/* proc that set PG_BUSY */
    178 	lwpid_t			lowner;		/* lwp that set PG_BUSY */
    179 	const char		*owner_tag;	/* why it was set busy */
    180 #endif
    181 };
    182 
    183 /*
    184  * Overview of UVM page flags, stored in pg->flags.
    185  *
    186  * Locking notes:
    187  *
    188  * PG_, struct vm_page::flags	=> locked by owner
    189  * PG_AOBJ			=> additionally locked by vm_page::interlock
    190  * PG_ANON			=> additionally locked by vm_page::interlock
    191  * PG_FREE			=> additionally locked by uvm_fpageqlock
    192  *				   for uvm_pglistalloc()
    193  *
    194  * Flag descriptions:
    195  *
    196  * PG_CLEAN:
    197  *	Page is known clean.
    198  *	The contents of the page is consistent with its backing store.
    199  *
    200  * PG_DIRTY:
    201  *	Page is known dirty.
    202  *	To avoid losing data, the contents of the page should be written
    203  *	back to the backing store before freeing the page.
    204  *
    205  * PG_BUSY:
    206  *	Page is long-term locked, usually because of I/O (transfer from the
    207  *	page memory to the backing store) is in progress.  LWP attempting
    208  *	to access the page shall set PG_WANTED and wait.
    209  *
    210  * PG_WANTED:
    211  *	Indicates that the page, which is currently PG_BUSY, is wanted by
    212  *	some other LWP.  The page owner (i.e. LWP which set PG_BUSY) is
    213  *	responsible to clear both flags and wake up any waiters once it has
    214  *	released the long-term lock (PG_BUSY).
    215  *
    216  * PG_PAGEOUT:
    217  *	Indicates that the page is being paged-out in preparation for
    218  *	being freed.
    219  *
    220  * PG_RELEASED:
    221  *	Indicates that the page, which is currently PG_BUSY, should be freed
    222  *	after the release of long-term lock.  It is responsibility of the
    223  *	owning LWP (i.e. which set PG_BUSY) to do it.
    224  *
    225  * PG_FAKE:
    226  *	Page has been allocated, but not yet initialised.  The flag is used
    227  *	to avoid overwriting of valid data, e.g. to prevent read from the
    228  *	backing store when in-core data is newer.
    229  *
    230  * PG_RDONLY:
    231  *	Indicates that the page must be mapped read-only.
    232  *
    233  * PG_ZERO:
    234  *	Indicates that the page has been pre-zeroed.  This flag is only
    235  *	set when the page is not in the queues and is cleared when the
    236  *	page is placed on the free list.
    237  *
    238  * PG_MARKER:
    239  *	Dummy marker page, generally used for list traversal.
    240  */
    241 
    242 /*
    243  * if you want to renumber PG_CLEAN and PG_DIRTY, check __CTASSERTs in
    244  * uvm_page_status.c first.
    245  */
    246 
    247 #define	PG_CLEAN	0x00000001	/* page is known clean */
    248 #define	PG_DIRTY	0x00000002	/* page is known dirty */
    249 #define	PG_BUSY		0x00000004	/* page is locked */
    250 #define	PG_WANTED	0x00000008	/* someone is waiting for page */
    251 #define	PG_PAGEOUT	0x00000010	/* page to be freed for pagedaemon */
    252 #define	PG_RELEASED	0x00000020	/* page to be freed when unbusied */
    253 #define	PG_FAKE		0x00000040	/* page is not yet initialized */
    254 #define	PG_RDONLY	0x00000080	/* page must be mapped read-only */
    255 #define	PG_ZERO		0x00000100	/* page is pre-zero'd */
    256 #define	PG_TABLED	0x00000200	/* page is tabled in object */
    257 #define	PG_AOBJ		0x00000400	/* page is part of an anonymous
    258 					   uvm_object */
    259 #define	PG_ANON		0x00000800	/* page is part of an anon, rather
    260 					   than an uvm_object */
    261 #define	PG_FILE		0x00001000	/* file backed (non-anonymous) */
    262 #define	PG_READAHEAD	0x00002000	/* read-ahead but not "hit" yet */
    263 #define	PG_FREE		0x00004000	/* page is on free list */
    264 #define	PG_MARKER	0x00008000	/* dummy marker page */
    265 #define	PG_PAGER1	0x00010000	/* pager-specific flag */
    266 
    267 #define	PG_STAT		(PG_ANON|PG_AOBJ|PG_FILE)
    268 #define	PG_SWAPBACKED	(PG_ANON|PG_AOBJ)
    269 
    270 #define	UVM_PGFLAGBITS \
    271 	"\20\1CLEAN\2DIRTY\3BUSY\4WANTED" \
    272 	"\5PAGEOUT\6RELEASED\7FAKE\10RDONLY" \
    273 	"\11ZERO\12TABLED\13AOBJ\14ANON" \
    274 	"\15FILE\16READAHEAD\17FREE\20MARKER" \
    275 	"\21PAGER1"
    276 
    277 /*
    278  * Flags stored in pg->pqflags, which is protected by pg->interlock.
    279  *
    280  * PQ_PRIVATE is for uvmpdpol to do whatever it wants with.
    281  */
    282 
    283 #define	PQ_INTENT_A		0x00000000	/* intend activation */
    284 #define	PQ_INTENT_I		0x00000001	/* intend deactivation */
    285 #define	PQ_INTENT_E		0x00000002	/* intend enqueue */
    286 #define	PQ_INTENT_D		0x00000003	/* intend dequeue */
    287 #define	PQ_INTENT_MASK		0x00000003	/* mask of intended state */
    288 #define	PQ_INTENT_SET		0x00000004	/* not realized yet */
    289 #define	PQ_INTENT_QUEUED	0x00000008	/* queued for processing */
    290 #define	PQ_PRIVATE		0x00000ff0	/* private for pdpolicy */
    291 
    292 #define	UVM_PQFLAGBITS \
    293 	"\20\1INTENT_0\2INTENT_1\3INTENT_SET\4INTENT_QUEUED" \
    294 	"\5PRIVATE1\6PRIVATE2\7PRIVATE3\10PRIVATE4" \
    295 	"\11PRIVATE5\12PRIVATE6\13PRIVATE7\14PRIVATE8"
    296 
    297 /*
    298  * physical memory layout structure
    299  *
    300  * MD vmparam.h must #define:
    301  *   VM_PHYSEG_MAX = max number of physical memory segments we support
    302  *		   (if this is "1" then we revert to a "contig" case)
    303  *   VM_PHYSSEG_STRAT: memory sort/search options (for VM_PHYSEG_MAX > 1)
    304  * 	- VM_PSTRAT_RANDOM:   linear search (random order)
    305  *	- VM_PSTRAT_BSEARCH:  binary search (sorted by address)
    306  *	- VM_PSTRAT_BIGFIRST: linear search (sorted by largest segment first)
    307  *      - others?
    308  *   XXXCDC: eventually we should purge all left-over global variables...
    309  */
    310 #define VM_PSTRAT_RANDOM	1
    311 #define VM_PSTRAT_BSEARCH	2
    312 #define VM_PSTRAT_BIGFIRST	3
    313 
    314 #ifdef _KERNEL
    315 
    316 /*
    317  * globals
    318  */
    319 
    320 extern bool vm_page_zero_enable;
    321 
    322 /*
    323  * prototypes: the following prototypes define the interface to pages
    324  */
    325 
    326 void uvm_page_init(vaddr_t *, vaddr_t *);
    327 #if defined(UVM_PAGE_TRKOWN)
    328 void uvm_page_own(struct vm_page *, const char *);
    329 #endif
    330 #if !defined(PMAP_STEAL_MEMORY)
    331 bool uvm_page_physget(paddr_t *);
    332 #endif
    333 void uvm_page_recolor(int);
    334 void uvm_page_rebucket(void);
    335 void uvm_pageidlezero(void);
    336 
    337 void uvm_pageactivate(struct vm_page *);
    338 vaddr_t uvm_pageboot_alloc(vsize_t);
    339 void uvm_pagecopy(struct vm_page *, struct vm_page *);
    340 void uvm_pagedeactivate(struct vm_page *);
    341 void uvm_pagedequeue(struct vm_page *);
    342 void uvm_pageenqueue(struct vm_page *);
    343 void uvm_pagefree(struct vm_page *);
    344 void uvm_pagelock(struct vm_page *);
    345 void uvm_pagelock2(struct vm_page *, struct vm_page *);
    346 void uvm_pageunlock(struct vm_page *);
    347 void uvm_pageunlock2(struct vm_page *, struct vm_page *);
    348 void uvm_page_unbusy(struct vm_page **, int);
    349 struct vm_page *uvm_pagelookup(struct uvm_object *, voff_t);
    350 void uvm_pageunwire(struct vm_page *);
    351 void uvm_pagewire(struct vm_page *);
    352 void uvm_pagezero(struct vm_page *);
    353 bool uvm_pageismanaged(paddr_t);
    354 bool uvm_page_owner_locked_p(struct vm_page *, bool);
    355 void uvm_pgfl_lock(void);
    356 void uvm_pgfl_unlock(void);
    357 unsigned int uvm_pagegetdirty(struct vm_page *);
    358 void uvm_pagemarkdirty(struct vm_page *, unsigned int);
    359 bool uvm_pagecheckdirty(struct vm_page *, bool);
    360 bool uvm_pagereadonly_p(struct vm_page *);
    361 bool uvm_page_locked_p(struct vm_page *);
    362 
    363 int uvm_page_lookup_freelist(struct vm_page *);
    364 
    365 struct vm_page *uvm_phys_to_vm_page(paddr_t);
    366 paddr_t uvm_vm_page_to_phys(const struct vm_page *);
    367 
    368 #if defined(PMAP_DIRECT)
    369 extern bool ubc_direct;
    370 int uvm_direct_process(struct vm_page **, u_int, voff_t, vsize_t,
    371 	    int (*)(void *, size_t, void *), void *);
    372 #endif
    373 
    374 /*
    375  * page dirtiness status for uvm_pagegetdirty and uvm_pagemarkdirty
    376  *
    377  * UNKNOWN means that we need to consult pmap to know if the page is
    378  * dirty or not.
    379  * basically, UVM_PAGE_STATUS_CLEAN implies that the page has no writable
    380  * mapping.
    381  *
    382  * if you want to renumber these, check __CTASSERTs in
    383  * uvm_page_status.c first.
    384  */
    385 
    386 #define	UVM_PAGE_STATUS_UNKNOWN	0
    387 #define	UVM_PAGE_STATUS_CLEAN	1
    388 #define	UVM_PAGE_STATUS_DIRTY	2
    389 #define	UVM_PAGE_NUM_STATUS	3
    390 
    391 /*
    392  * macros
    393  */
    394 
    395 #define VM_PAGE_TO_PHYS(entry)	uvm_vm_page_to_phys(entry)
    396 
    397 #ifdef __HAVE_VM_PAGE_MD
    398 #define	VM_PAGE_TO_MD(pg)	(&(pg)->mdpage)
    399 #endif
    400 
    401 /*
    402  * Compute the page color for a given page.
    403  */
    404 #define	VM_PGCOLOR(pg) \
    405 	(atop(VM_PAGE_TO_PHYS((pg))) & uvmexp.colormask)
    406 #define	PHYS_TO_VM_PAGE(pa)	uvm_phys_to_vm_page(pa)
    407 
    408 /*
    409  * VM_PAGE_IS_FREE() can't tell if the page is on global free list, or a
    410  * per-CPU cache.  If you need to be certain, pause caching.
    411  */
    412 #define VM_PAGE_IS_FREE(entry)  ((entry)->flags & PG_FREE)
    413 
    414 /*
    415  * Use the lower 10 bits of pg->phys_addr to cache some some locators for
    416  * the page.  This implies that the smallest possible page size is 1kB, and
    417  * that nobody should use pg->phys_addr directly (use VM_PAGE_TO_PHYS()).
    418  *
    419  * - 5 bits for the freelist index, because uvm_page_lookup_freelist()
    420  *   traverses an rbtree and therefore features prominently in traces
    421  *   captured during performance test.  It would probably be more useful to
    422  *   cache physseg index here because freelist can be inferred from physseg,
    423  *   but it requires changes to allocation for UVM_HOTPLUG, so for now we'll
    424  *   go with freelist.
    425  *
    426  * - 5 bits for "bucket", a way for us to categorise pages further as
    427  *   needed (e.g. NUMA node).
    428  *
    429  * None of this is set in stone; it can be adjusted as needed.
    430  */
    431 
    432 #define	UVM_PHYSADDR_FREELIST	__BITS(0,4)
    433 #define	UVM_PHYSADDR_BUCKET	__BITS(5,9)
    434 
    435 static inline unsigned
    436 uvm_page_get_freelist(struct vm_page *pg)
    437 {
    438 	unsigned fl = __SHIFTOUT(pg->phys_addr, UVM_PHYSADDR_FREELIST);
    439 	KASSERT(fl == (unsigned)uvm_page_lookup_freelist(pg));
    440 	return fl;
    441 }
    442 
    443 static inline unsigned
    444 uvm_page_get_bucket(struct vm_page *pg)
    445 {
    446 	return __SHIFTOUT(pg->phys_addr, UVM_PHYSADDR_BUCKET);
    447 }
    448 
    449 static inline void
    450 uvm_page_set_freelist(struct vm_page *pg, unsigned fl)
    451 {
    452 	KASSERT(fl < 32);
    453 	pg->phys_addr &= ~UVM_PHYSADDR_FREELIST;
    454 	pg->phys_addr |= __SHIFTIN(fl, UVM_PHYSADDR_FREELIST);
    455 }
    456 
    457 static inline void
    458 uvm_page_set_bucket(struct vm_page *pg, unsigned b)
    459 {
    460 	KASSERT(b < 32);
    461 	pg->phys_addr &= ~UVM_PHYSADDR_BUCKET;
    462 	pg->phys_addr |= __SHIFTIN(b, UVM_PHYSADDR_BUCKET);
    463 }
    464 
    465 #ifdef DEBUG
    466 void uvm_pagezerocheck(struct vm_page *);
    467 #endif /* DEBUG */
    468 
    469 #endif /* _KERNEL */
    470 
    471 #endif /* _UVM_UVM_PAGE_H_ */
    472