uvm_page.h revision 1.59.2.16 1 /* $NetBSD: uvm_page.h,v 1.59.2.16 2010/04/27 08:23:48 uebayasi Exp $ */
2
3 /*
4 * Copyright (c) 1997 Charles D. Cranor and Washington University.
5 * Copyright (c) 1991, 1993, The Regents of the University of California.
6 *
7 * All rights reserved.
8 *
9 * This code is derived from software contributed to Berkeley by
10 * The Mach Operating System project at Carnegie-Mellon University.
11 *
12 * Redistribution and use in source and binary forms, with or without
13 * modification, are permitted provided that the following conditions
14 * are met:
15 * 1. Redistributions of source code must retain the above copyright
16 * notice, this list of conditions and the following disclaimer.
17 * 2. Redistributions in binary form must reproduce the above copyright
18 * notice, this list of conditions and the following disclaimer in the
19 * documentation and/or other materials provided with the distribution.
20 * 3. All advertising materials mentioning features or use of this software
21 * must display the following acknowledgement:
22 * This product includes software developed by Charles D. Cranor,
23 * Washington University, the University of California, Berkeley and
24 * its contributors.
25 * 4. Neither the name of the University nor the names of its contributors
26 * may be used to endorse or promote products derived from this software
27 * without specific prior written permission.
28 *
29 * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND
30 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
31 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
32 * ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE
33 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
34 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
35 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
36 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
37 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
38 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
39 * SUCH DAMAGE.
40 *
41 * @(#)vm_page.h 7.3 (Berkeley) 4/21/91
42 * from: Id: uvm_page.h,v 1.1.2.6 1998/02/04 02:31:42 chuck Exp
43 *
44 *
45 * Copyright (c) 1987, 1990 Carnegie-Mellon University.
46 * All rights reserved.
47 *
48 * Permission to use, copy, modify and distribute this software and
49 * its documentation is hereby granted, provided that both the copyright
50 * notice and this permission notice appear in all copies of the
51 * software, derivative works or modified versions, and any portions
52 * thereof, and that both notices appear in supporting documentation.
53 *
54 * CARNEGIE MELLON ALLOWS FREE USE OF THIS SOFTWARE IN ITS "AS IS"
55 * CONDITION. CARNEGIE MELLON DISCLAIMS ANY LIABILITY OF ANY KIND
56 * FOR ANY DAMAGES WHATSOEVER RESULTING FROM THE USE OF THIS SOFTWARE.
57 *
58 * Carnegie Mellon requests users of this software to return to
59 *
60 * Software Distribution Coordinator or Software.Distribution (at) CS.CMU.EDU
61 * School of Computer Science
62 * Carnegie Mellon University
63 * Pittsburgh PA 15213-3890
64 *
65 * any improvements or extensions that they make and grant Carnegie the
66 * rights to redistribute these changes.
67 */
68
69 #ifndef _UVM_UVM_PAGE_H_
70 #define _UVM_UVM_PAGE_H_
71
72 /*
73 * uvm_page.h
74 */
75
76 /*
77 * Resident memory system definitions.
78 */
79
80 /*
81 * Management of resident (logical) pages.
82 *
83 * A small structure is kept for each resident
84 * page, indexed by page number. Each structure
85 * is an element of several lists:
86 *
87 * A red-black tree rooted with the containing
88 * object is used to quickly perform object+
89 * offset lookups
90 *
91 * A list of all pages for a given object,
92 * so they can be quickly deactivated at
93 * time of deallocation.
94 *
95 * An ordered list of pages due for pageout.
96 *
97 * In addition, the structure contains the object
98 * and offset to which this page belongs (for pageout),
99 * and sundry status bits.
100 *
101 * Fields in this structure are locked either by the lock on the
102 * object that the page belongs to (O) or by the lock on the page
103 * queues (P) [or both].
104 */
105
106 /*
107 * locking note: the mach version of this data structure had bit
108 * fields for the flags, and the bit fields were divided into two
109 * items (depending on who locked what). some time, in BSD, the bit
110 * fields were dumped and all the flags were lumped into one short.
111 * that is fine for a single threaded uniprocessor OS, but bad if you
112 * want to actual make use of locking. so, we've separated things
113 * back out again.
114 *
115 * note the page structure has no lock of its own.
116 */
117
118 #include <uvm/uvm_extern.h>
119 #include <uvm/uvm_pglist.h>
120
121 #include <sys/rb.h>
122
123 struct vm_page {
124 struct rb_node rb_node; /* tree of pages in obj (O) */
125
126 union {
127 TAILQ_ENTRY(vm_page) queue;
128 LIST_ENTRY(vm_page) list;
129 } pageq; /* queue info for FIFO
130 * queue or free list (P) */
131 union {
132 TAILQ_ENTRY(vm_page) queue;
133 LIST_ENTRY(vm_page) list;
134 } listq; /* pages in same object (O)*/
135
136 struct vm_anon *uanon; /* anon (O,P) */
137 struct uvm_object *uobject; /* object (O,P) */
138 voff_t offset; /* offset into object (O,P) */
139 uint16_t flags; /* object flags [O] */
140 uint16_t loan_count; /* number of active loans
141 * to read: [O or P]
142 * to modify: [O _and_ P] */
143 uint16_t wire_count; /* wired down map refs [P] */
144 uint16_t pqflags; /* page queue flags [P] */
145 #if 1
146 paddr_t phys_addr; /* physical address of page */
147 #endif
148
149
150 #ifdef __HAVE_VM_PAGE_MD
151 struct vm_page_md mdpage; /* pmap-specific data */
152 #endif
153
154 #if defined(UVM_PAGE_TRKOWN)
155 /* debugging fields to track page ownership */
156 pid_t owner; /* proc that set PG_BUSY */
157 lwpid_t lowner; /* lwp that set PG_BUSY */
158 const char *owner_tag; /* why it was set busy */
159 #endif
160 };
161
162 /*
163 * These are the flags defined for vm_page.
164 */
165
166 /*
167 * locking rules:
168 * PG_ ==> locked by object lock
169 * PQ_ ==> lock by page queue lock
170 * PQ_FREE is locked by free queue lock and is mutex with all other PQs
171 *
172 * PG_ZERO is used to indicate that a page has been pre-zero'd. This flag
173 * is only set when the page is on no queues, and is cleared when the page
174 * is placed on the free list.
175 */
176
177 #define PG_BUSY 0x0001 /* page is locked */
178 #define PG_WANTED 0x0002 /* someone is waiting for page */
179 #define PG_TABLED 0x0004 /* page is in VP table */
180 #define PG_CLEAN 0x0008 /* page has not been modified */
181 #define PG_PAGEOUT 0x0010 /* page to be freed for pagedaemon */
182 #define PG_RELEASED 0x0020 /* page to be freed when unbusied */
183 #define PG_FAKE 0x0040 /* page is not yet initialized */
184 #define PG_RDONLY 0x0080 /* page must be mapped read-only */
185 #define PG_ZERO 0x0100 /* page is pre-zero'd */
186
187 #define PG_PAGER1 0x1000 /* pager-specific flag */
188
189 #define UVM_PGFLAGBITS \
190 "\20\1BUSY\2WANTED\3TABLED\4CLEAN\5PAGEOUT\6RELEASED\7FAKE\10RDONLY" \
191 "\11ZERO\15PAGER1"
192
193 #define PQ_FREE 0x0001 /* page is on free list */
194 #define PQ_ANON 0x0002 /* page is part of an anon, rather
195 than an uvm_object */
196 #define PQ_AOBJ 0x0004 /* page is part of an anonymous
197 uvm_object */
198 #define PQ_SWAPBACKED (PQ_ANON|PQ_AOBJ)
199 #define PQ_READAHEAD 0x0008 /* read-ahead but has not been "hit" yet */
200
201 #define PQ_PRIVATE1 0x0100
202 #define PQ_PRIVATE2 0x0200
203 #define PQ_PRIVATE3 0x0400
204 #define PQ_PRIVATE4 0x0800
205 #define PQ_PRIVATE5 0x1000
206 #define PQ_PRIVATE6 0x2000
207 #define PQ_PRIVATE7 0x4000
208 #define PQ_PRIVATE8 0x8000
209
210 #define UVM_PQFLAGBITS \
211 "\20\1FREE\2ANON\3AOBJ\4READAHEAD" \
212 "\11PRIVATE1\12PRIVATE2\13PRIVATE3\14PRIVATE4" \
213 "\15PRIVATE5\16PRIVATE6\17PRIVATE7\20PRIVATE8"
214
215 /*
216 * physical memory layout structure
217 *
218 * MD vmparam.h must #define:
219 * VM_PHYSEG_MAX = max number of physical memory segments we support
220 * (if this is "1" then we revert to a "contig" case)
221 * VM_PHYSSEG_STRAT: memory sort/search options (for VM_PHYSEG_MAX > 1)
222 * - VM_PSTRAT_RANDOM: linear search (random order)
223 * - VM_PSTRAT_BSEARCH: binary search (sorted by address)
224 * - VM_PSTRAT_BIGFIRST: linear search (sorted by largest segment first)
225 * - others?
226 * XXXCDC: eventually we should purge all left-over global variables...
227 */
228 #define VM_PSTRAT_RANDOM 1
229 #define VM_PSTRAT_BSEARCH 2
230 #define VM_PSTRAT_BIGFIRST 3
231
232 /*
233 * vm_physseg: describes one segment of physical memory
234 */
235 struct vm_physseg {
236 paddr_t start; /* PF# of first page in segment */
237 paddr_t end; /* (PF# of last page in segment) + 1 */
238 paddr_t avail_start; /* PF# of first free page in segment */
239 paddr_t avail_end; /* (PF# of last free page in segment) +1 */
240 int free_list; /* which free list they belong on */
241 struct vm_page *pgs; /* vm_page structures (from start) */
242 struct vm_page *endpg; /* vm_page structure for end */
243 #ifdef __HAVE_PMAP_PHYSSEG
244 struct pmap_physseg pmseg; /* pmap specific (MD) data */
245 #endif
246 };
247
248 #ifdef _KERNEL
249
250 /*
251 * globals
252 */
253
254 extern bool vm_page_zero_enable;
255
256 /*
257 * physical memory config is stored in vm_physmem.
258 */
259
260 extern struct vm_physseg vm_physmem[VM_PHYSSEG_MAX];
261 extern int vm_nphysmem;
262 #ifdef XIP
263 extern struct vm_physseg vm_physdev[VM_PHYSSEG_MAX];
264 extern int vm_nphysdev;
265 #endif
266
267 #define vm_nphysseg vm_nphysmem /* XXX backward compat */
268
269 /*
270 * prototypes: the following prototypes define the interface to pages
271 */
272
273 void uvm_page_init(vaddr_t *, vaddr_t *);
274 #if defined(UVM_PAGE_TRKOWN)
275 void uvm_page_own(struct vm_page *, const char *);
276 #endif
277 #if !defined(PMAP_STEAL_MEMORY)
278 bool uvm_page_physget(paddr_t *);
279 #endif
280 void uvm_page_recolor(int);
281 void uvm_pageidlezero(void);
282
283 void uvm_pageactivate(struct vm_page *);
284 vaddr_t uvm_pageboot_alloc(vsize_t);
285 void uvm_pagecopy(struct vm_page *, struct vm_page *);
286 void uvm_pagedeactivate(struct vm_page *);
287 void uvm_pagedequeue(struct vm_page *);
288 void uvm_pageenqueue(struct vm_page *);
289 void uvm_pagefree(struct vm_page *);
290 void uvm_page_unbusy(struct vm_page **, int);
291 struct vm_page *uvm_pagelookup(struct uvm_object *, voff_t);
292 void uvm_pageunwire(struct vm_page *);
293 void uvm_pagewait(struct vm_page *, int);
294 void uvm_pagewake(struct vm_page *);
295 void uvm_pagewire(struct vm_page *);
296 void uvm_pagezero(struct vm_page *);
297 bool uvm_pageismanaged(paddr_t);
298 #ifdef DEVICE_PAGE
299 bool uvm_pageisdevice_p(const struct vm_page *);
300 #else
301 #define uvm_pageisdevice_p(x) false
302 #endif
303
304 int uvm_page_lookup_freelist(struct vm_page *);
305
306 int vm_physseg_find(paddr_t, int *);
307 struct vm_page *uvm_phys_to_vm_page(paddr_t);
308 paddr_t uvm_vm_page_to_phys(const struct vm_page *);
309 #ifdef XIP
310 int vm_physseg_find_device(paddr_t, int *);
311 struct vm_page *uvm_phys_to_vm_page_device(paddr_t);
312 paddr_t uvm_vm_page_to_phys_device(const struct vm_page *);
313 #endif
314
315 /*
316 * macros
317 */
318
319 #define UVM_PAGE_TREE_PENALTY 4 /* XXX: a guess */
320
321 #define VM_PAGE_TO_PHYS(entry) uvm_vm_page_to_phys(entry)
322
323 #ifdef __HAVE_VM_PAGE_MD
324 #ifndef XIP
325 #define VM_PAGE_TO_MD(pg) (&(pg)->mdpage)
326 #else
327 struct vm_page_md *uvm_vm_page_to_md(struct vm_page *);
328 #define VM_PAGE_TO_MD(pg) uvm_vm_page_to_md(pg)
329 #endif
330 #endif
331
332 /*
333 * Compute the page color bucket for a given page.
334 */
335 #define VM_PGCOLOR_BUCKET(pg) \
336 (atop(VM_PAGE_TO_PHYS((pg))) & uvmexp.colormask)
337
338 #define PHYS_TO_VM_PAGE(pa) uvm_phys_to_vm_page(pa)
339
340 #define VM_PAGE_IS_FREE(entry) ((entry)->pqflags & PQ_FREE)
341 #define VM_FREE_PAGE_TO_CPU(pg) ((struct uvm_cpu *)((uintptr_t)pg->offset))
342
343 #ifdef DEBUG
344 void uvm_pagezerocheck(struct vm_page *);
345 #endif /* DEBUG */
346
347 #endif /* _KERNEL */
348
349 #endif /* _UVM_UVM_PAGE_H_ */
350