Home | History | Annotate | Line # | Download | only in uvm
uvm_page.h revision 1.55
      1 /*	$NetBSD: uvm_page.h,v 1.55 2008/06/04 15:06:04 ad Exp $	*/
      2 
      3 /*
      4  * Copyright (c) 1997 Charles D. Cranor and Washington University.
      5  * Copyright (c) 1991, 1993, The Regents of the University of California.
      6  *
      7  * All rights reserved.
      8  *
      9  * This code is derived from software contributed to Berkeley by
     10  * The Mach Operating System project at Carnegie-Mellon University.
     11  *
     12  * Redistribution and use in source and binary forms, with or without
     13  * modification, are permitted provided that the following conditions
     14  * are met:
     15  * 1. Redistributions of source code must retain the above copyright
     16  *    notice, this list of conditions and the following disclaimer.
     17  * 2. Redistributions in binary form must reproduce the above copyright
     18  *    notice, this list of conditions and the following disclaimer in the
     19  *    documentation and/or other materials provided with the distribution.
     20  * 3. All advertising materials mentioning features or use of this software
     21  *    must display the following acknowledgement:
     22  *	This product includes software developed by Charles D. Cranor,
     23  *      Washington University, the University of California, Berkeley and
     24  *      its contributors.
     25  * 4. Neither the name of the University nor the names of its contributors
     26  *    may be used to endorse or promote products derived from this software
     27  *    without specific prior written permission.
     28  *
     29  * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND
     30  * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
     31  * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
     32  * ARE DISCLAIMED.  IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE
     33  * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
     34  * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
     35  * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
     36  * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
     37  * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
     38  * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
     39  * SUCH DAMAGE.
     40  *
     41  *	@(#)vm_page.h   7.3 (Berkeley) 4/21/91
     42  * from: Id: uvm_page.h,v 1.1.2.6 1998/02/04 02:31:42 chuck Exp
     43  *
     44  *
     45  * Copyright (c) 1987, 1990 Carnegie-Mellon University.
     46  * All rights reserved.
     47  *
     48  * Permission to use, copy, modify and distribute this software and
     49  * its documentation is hereby granted, provided that both the copyright
     50  * notice and this permission notice appear in all copies of the
     51  * software, derivative works or modified versions, and any portions
     52  * thereof, and that both notices appear in supporting documentation.
     53  *
     54  * CARNEGIE MELLON ALLOWS FREE USE OF THIS SOFTWARE IN ITS "AS IS"
     55  * CONDITION.  CARNEGIE MELLON DISCLAIMS ANY LIABILITY OF ANY KIND
     56  * FOR ANY DAMAGES WHATSOEVER RESULTING FROM THE USE OF THIS SOFTWARE.
     57  *
     58  * Carnegie Mellon requests users of this software to return to
     59  *
     60  *  Software Distribution Coordinator  or  Software.Distribution (at) CS.CMU.EDU
     61  *  School of Computer Science
     62  *  Carnegie Mellon University
     63  *  Pittsburgh PA 15213-3890
     64  *
     65  * any improvements or extensions that they make and grant Carnegie the
     66  * rights to redistribute these changes.
     67  */
     68 
     69 #ifndef _UVM_UVM_PAGE_H_
     70 #define _UVM_UVM_PAGE_H_
     71 
     72 /*
     73  * uvm_page.h
     74  */
     75 
     76 /*
     77  *	Resident memory system definitions.
     78  */
     79 
     80 /*
     81  *	Management of resident (logical) pages.
     82  *
     83  *	A small structure is kept for each resident
     84  *	page, indexed by page number.  Each structure
     85  *	is an element of several lists:
     86  *
     87  *		A red-black tree rooted with the containing
     88  *		object is used to quickly perform object+
     89  *		offset lookups
     90  *
     91  *		A list of all pages for a given object,
     92  *		so they can be quickly deactivated at
     93  *		time of deallocation.
     94  *
     95  *		An ordered list of pages due for pageout.
     96  *
     97  *	In addition, the structure contains the object
     98  *	and offset to which this page belongs (for pageout),
     99  *	and sundry status bits.
    100  *
    101  *	Fields in this structure are locked either by the lock on the
    102  *	object that the page belongs to (O) or by the lock on the page
    103  *	queues (P) [or both].
    104  */
    105 
    106 /*
    107  * locking note: the mach version of this data structure had bit
    108  * fields for the flags, and the bit fields were divided into two
    109  * items (depending on who locked what).  some time, in BSD, the bit
    110  * fields were dumped and all the flags were lumped into one short.
    111  * that is fine for a single threaded uniprocessor OS, but bad if you
    112  * want to actual make use of locking.  so, we've separated things
    113  * back out again.
    114  *
    115  * note the page structure has no lock of its own.
    116  */
    117 
    118 #include <uvm/uvm_extern.h>
    119 #include <uvm/uvm_pglist.h>
    120 
    121 #include <sys/rb.h>
    122 
    123 struct vm_page {
    124 	struct rb_node		rb_node;	/* tree of pages in obj (O) */
    125 
    126 	union {
    127 		TAILQ_ENTRY(vm_page) queue;
    128 		LIST_ENTRY(vm_page) list;
    129 	} pageq;				/* queue info for FIFO
    130 						 * queue or free list (P) */
    131 	union {
    132 		TAILQ_ENTRY(vm_page) queue;
    133 		LIST_ENTRY(vm_page) list;
    134 	} listq;				/* pages in same object (O)*/
    135 
    136 	struct vm_anon		*uanon;		/* anon (O,P) */
    137 	struct uvm_object	*uobject;	/* object (O,P) */
    138 	voff_t			offset;		/* offset into object (O,P) */
    139 	uint16_t		flags;		/* object flags [O] */
    140 	uint16_t		loan_count;	/* number of active loans
    141 						 * to read: [O or P]
    142 						 * to modify: [O _and_ P] */
    143 	uint16_t		wire_count;	/* wired down map refs [P] */
    144 	uint16_t		pqflags;	/* page queue flags [P] */
    145 	paddr_t			phys_addr;	/* physical address of page */
    146 
    147 #ifdef __HAVE_VM_PAGE_MD
    148 	struct vm_page_md	mdpage;		/* pmap-specific data */
    149 #endif
    150 
    151 #if defined(UVM_PAGE_TRKOWN)
    152 	/* debugging fields to track page ownership */
    153 	pid_t			owner;		/* proc that set PG_BUSY */
    154 	lwpid_t			lowner;		/* lwp that set PG_BUSY */
    155 	const char		*owner_tag;	/* why it was set busy */
    156 #endif
    157 };
    158 
    159 /*
    160  * These are the flags defined for vm_page.
    161  */
    162 
    163 /*
    164  * locking rules:
    165  *   PG_ ==> locked by object lock
    166  *   PQ_ ==> lock by page queue lock
    167  *   PQ_FREE is locked by free queue lock and is mutex with all other PQs
    168  *
    169  * PG_ZERO is used to indicate that a page has been pre-zero'd.  This flag
    170  * is only set when the page is on no queues, and is cleared when the page
    171  * is placed on the free list.
    172  */
    173 
    174 #define	PG_BUSY		0x0001		/* page is locked */
    175 #define	PG_WANTED	0x0002		/* someone is waiting for page */
    176 #define	PG_TABLED	0x0004		/* page is in VP table  */
    177 #define	PG_CLEAN	0x0008		/* page has not been modified */
    178 #define	PG_PAGEOUT	0x0010		/* page to be freed for pagedaemon */
    179 #define PG_RELEASED	0x0020		/* page to be freed when unbusied */
    180 #define	PG_FAKE		0x0040		/* page is not yet initialized */
    181 #define	PG_RDONLY	0x0080		/* page must be mapped read-only */
    182 #define	PG_ZERO		0x0100		/* page is pre-zero'd */
    183 
    184 #define PG_PAGER1	0x1000		/* pager-specific flag */
    185 
    186 #define	UVM_PGFLAGBITS \
    187 	"\20\1BUSY\2WANTED\3TABLED\4CLEAN\5PAGEOUT\6RELEASED\7FAKE\10RDONLY" \
    188 	"\11ZERO\15PAGER1"
    189 
    190 #define PQ_FREE		0x0001		/* page is on free list */
    191 #define PQ_ANON		0x0002		/* page is part of an anon, rather
    192 					   than an uvm_object */
    193 #define PQ_AOBJ		0x0004		/* page is part of an anonymous
    194 					   uvm_object */
    195 #define PQ_SWAPBACKED	(PQ_ANON|PQ_AOBJ)
    196 #define PQ_READAHEAD	0x0008	/* read-ahead but has not been "hit" yet */
    197 
    198 #define PQ_PRIVATE1	0x0100
    199 #define PQ_PRIVATE2	0x0200
    200 #define PQ_PRIVATE3	0x0400
    201 #define PQ_PRIVATE4	0x0800
    202 #define PQ_PRIVATE5	0x1000
    203 #define PQ_PRIVATE6	0x2000
    204 #define PQ_PRIVATE7	0x4000
    205 #define PQ_PRIVATE8	0x8000
    206 
    207 #define	UVM_PQFLAGBITS \
    208 	"\20\1FREE\2ANON\3AOBJ\4READAHEAD" \
    209 	"\11PRIVATE1\12PRIVATE2\13PRIVATE3\14PRIVATE4" \
    210 	"\15PRIVATE5\16PRIVATE6\17PRIVATE7\20PRIVATE8"
    211 
    212 /*
    213  * physical memory layout structure
    214  *
    215  * MD vmparam.h must #define:
    216  *   VM_PHYSEG_MAX = max number of physical memory segments we support
    217  *		   (if this is "1" then we revert to a "contig" case)
    218  *   VM_PHYSSEG_STRAT: memory sort/search options (for VM_PHYSEG_MAX > 1)
    219  * 	- VM_PSTRAT_RANDOM:   linear search (random order)
    220  *	- VM_PSTRAT_BSEARCH:  binary search (sorted by address)
    221  *	- VM_PSTRAT_BIGFIRST: linear search (sorted by largest segment first)
    222  *      - others?
    223  *   XXXCDC: eventually we should purge all left-over global variables...
    224  */
    225 #define VM_PSTRAT_RANDOM	1
    226 #define VM_PSTRAT_BSEARCH	2
    227 #define VM_PSTRAT_BIGFIRST	3
    228 
    229 /*
    230  * vm_physseg: describes one segment of physical memory
    231  */
    232 struct vm_physseg {
    233 	paddr_t	start;			/* PF# of first page in segment */
    234 	paddr_t	end;			/* (PF# of last page in segment) + 1 */
    235 	paddr_t	avail_start;		/* PF# of first free page in segment */
    236 	paddr_t	avail_end;		/* (PF# of last free page in segment) +1  */
    237 	int	free_list;		/* which free list they belong on */
    238 	struct	vm_page *pgs;		/* vm_page structures (from start) */
    239 	struct	vm_page *lastpg;	/* vm_page structure for end */
    240 #ifdef __HAVE_PMAP_PHYSSEG
    241 	struct	pmap_physseg pmseg;	/* pmap specific (MD) data */
    242 #endif
    243 };
    244 
    245 #ifdef _KERNEL
    246 
    247 /*
    248  * globals
    249  */
    250 
    251 extern bool vm_page_zero_enable;
    252 
    253 /*
    254  * physical memory config is stored in vm_physmem.
    255  */
    256 
    257 extern struct vm_physseg vm_physmem[VM_PHYSSEG_MAX];
    258 extern int vm_nphysseg;
    259 
    260 /*
    261  * prototypes: the following prototypes define the interface to pages
    262  */
    263 
    264 void uvm_page_init(vaddr_t *, vaddr_t *);
    265 #if defined(UVM_PAGE_TRKOWN)
    266 void uvm_page_own(struct vm_page *, const char *);
    267 #endif
    268 #if !defined(PMAP_STEAL_MEMORY)
    269 bool uvm_page_physget(paddr_t *);
    270 #endif
    271 void uvm_page_rehash(void);
    272 void uvm_page_recolor(int);
    273 void uvm_pageidlezero(void);
    274 
    275 void uvm_pageactivate(struct vm_page *);
    276 vaddr_t uvm_pageboot_alloc(vsize_t);
    277 void uvm_pagecopy(struct vm_page *, struct vm_page *);
    278 void uvm_pagedeactivate(struct vm_page *);
    279 void uvm_pagedequeue(struct vm_page *);
    280 void uvm_pageenqueue(struct vm_page *);
    281 void uvm_pagefree(struct vm_page *);
    282 void uvm_page_unbusy(struct vm_page **, int);
    283 struct vm_page *uvm_pagelookup(struct uvm_object *, voff_t);
    284 void uvm_pageunwire(struct vm_page *);
    285 void uvm_pagewait(struct vm_page *, int);
    286 void uvm_pagewake(struct vm_page *);
    287 void uvm_pagewire(struct vm_page *);
    288 void uvm_pagezero(struct vm_page *);
    289 
    290 int uvm_page_lookup_freelist(struct vm_page *);
    291 
    292 static struct vm_page *PHYS_TO_VM_PAGE(paddr_t);
    293 static int vm_physseg_find(paddr_t, int *);
    294 
    295 /*
    296  * macros
    297  */
    298 
    299 #define UVM_PAGE_HASH_PENALTY	4	/* XXX: a guess */
    300 
    301 #define uvm_pagehash(obj,off) \
    302 	(((unsigned long)obj+(unsigned long)atop(off)) & uvm.page_hashmask)
    303 
    304 #define VM_PAGE_TO_PHYS(entry)	((entry)->phys_addr)
    305 
    306 /*
    307  * Compute the page color bucket for a given page.
    308  */
    309 #define	VM_PGCOLOR_BUCKET(pg) \
    310 	(atop(VM_PAGE_TO_PHYS((pg))) & uvmexp.colormask)
    311 
    312 /*
    313  * when VM_PHYSSEG_MAX is 1, we can simplify these functions
    314  */
    315 
    316 /*
    317  * vm_physseg_find: find vm_physseg structure that belongs to a PA
    318  */
    319 static __inline int
    320 vm_physseg_find(paddr_t pframe, int *offp)
    321 {
    322 #if VM_PHYSSEG_MAX == 1
    323 
    324 	/* 'contig' case */
    325 	if (pframe >= vm_physmem[0].start && pframe < vm_physmem[0].end) {
    326 		if (offp)
    327 			*offp = pframe - vm_physmem[0].start;
    328 		return(0);
    329 	}
    330 	return(-1);
    331 
    332 #elif (VM_PHYSSEG_STRAT == VM_PSTRAT_BSEARCH)
    333 	/* binary search for it */
    334 	u_int	start, len, try;
    335 
    336 	/*
    337 	 * if try is too large (thus target is less than try) we reduce
    338 	 * the length to trunc(len/2) [i.e. everything smaller than "try"]
    339 	 *
    340 	 * if the try is too small (thus target is greater than try) then
    341 	 * we set the new start to be (try + 1).   this means we need to
    342 	 * reduce the length to (round(len/2) - 1).
    343 	 *
    344 	 * note "adjust" below which takes advantage of the fact that
    345 	 *  (round(len/2) - 1) == trunc((len - 1) / 2)
    346 	 * for any value of len we may have
    347 	 */
    348 
    349 	for (start = 0, len = vm_nphysseg ; len != 0 ; len = len / 2) {
    350 		try = start + (len / 2);	/* try in the middle */
    351 
    352 		/* start past our try? */
    353 		if (pframe >= vm_physmem[try].start) {
    354 			/* was try correct? */
    355 			if (pframe < vm_physmem[try].end) {
    356 				if (offp)
    357 					*offp = pframe - vm_physmem[try].start;
    358 				return(try);            /* got it */
    359 			}
    360 			start = try + 1;	/* next time, start here */
    361 			len--;			/* "adjust" */
    362 		} else {
    363 			/*
    364 			 * pframe before try, just reduce length of
    365 			 * region, done in "for" loop
    366 			 */
    367 		}
    368 	}
    369 	return(-1);
    370 
    371 #else
    372 	/* linear search for it */
    373 	int	lcv;
    374 
    375 	for (lcv = 0; lcv < vm_nphysseg; lcv++) {
    376 		if (pframe >= vm_physmem[lcv].start &&
    377 		    pframe < vm_physmem[lcv].end) {
    378 			if (offp)
    379 				*offp = pframe - vm_physmem[lcv].start;
    380 			return(lcv);		   /* got it */
    381 		}
    382 	}
    383 	return(-1);
    384 
    385 #endif
    386 }
    387 
    388 
    389 /*
    390  * IS_VM_PHYSADDR: only used my mips/pmax/pica trap/pmap.
    391  */
    392 
    393 #define IS_VM_PHYSADDR(PA) (vm_physseg_find(atop(PA), NULL) != -1)
    394 
    395 /*
    396  * PHYS_TO_VM_PAGE: find vm_page for a PA.   used by MI code to get vm_pages
    397  * back from an I/O mapping (ugh!).   used in some MD code as well.
    398  */
    399 static __inline struct vm_page *
    400 PHYS_TO_VM_PAGE(paddr_t pa)
    401 {
    402 	paddr_t pf = atop(pa);
    403 	int	off;
    404 	int	psi;
    405 
    406 	psi = vm_physseg_find(pf, &off);
    407 	if (psi != -1)
    408 		return(&vm_physmem[psi].pgs[off]);
    409 	return(NULL);
    410 }
    411 
    412 #define VM_PAGE_IS_FREE(entry)  ((entry)->pqflags & PQ_FREE)
    413 #define	VM_FREE_PAGE_TO_CPU(pg)	((struct uvm_cpu *)((uintptr_t)pg->offset))
    414 
    415 #ifdef DEBUG
    416 void uvm_pagezerocheck(struct vm_page *);
    417 #endif /* DEBUG */
    418 
    419 #endif /* _KERNEL */
    420 
    421 #endif /* _UVM_UVM_PAGE_H_ */
    422