uvm_page.h revision 1.17 1 1.17 mrg /* $NetBSD: uvm_page.h,v 1.17 2000/10/03 20:50:49 mrg Exp $ */
2 1.1 mrg
3 1.1 mrg /*
4 1.1 mrg * Copyright (c) 1997 Charles D. Cranor and Washington University.
5 1.1 mrg * Copyright (c) 1991, 1993, The Regents of the University of California.
6 1.1 mrg *
7 1.1 mrg * All rights reserved.
8 1.1 mrg *
9 1.1 mrg * This code is derived from software contributed to Berkeley by
10 1.1 mrg * The Mach Operating System project at Carnegie-Mellon University.
11 1.1 mrg *
12 1.1 mrg * Redistribution and use in source and binary forms, with or without
13 1.1 mrg * modification, are permitted provided that the following conditions
14 1.1 mrg * are met:
15 1.1 mrg * 1. Redistributions of source code must retain the above copyright
16 1.1 mrg * notice, this list of conditions and the following disclaimer.
17 1.1 mrg * 2. Redistributions in binary form must reproduce the above copyright
18 1.1 mrg * notice, this list of conditions and the following disclaimer in the
19 1.1 mrg * documentation and/or other materials provided with the distribution.
20 1.1 mrg * 3. All advertising materials mentioning features or use of this software
21 1.1 mrg * must display the following acknowledgement:
22 1.1 mrg * This product includes software developed by Charles D. Cranor,
23 1.1 mrg * Washington University, the University of California, Berkeley and
24 1.1 mrg * its contributors.
25 1.1 mrg * 4. Neither the name of the University nor the names of its contributors
26 1.1 mrg * may be used to endorse or promote products derived from this software
27 1.1 mrg * without specific prior written permission.
28 1.1 mrg *
29 1.1 mrg * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND
30 1.1 mrg * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
31 1.1 mrg * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
32 1.1 mrg * ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE
33 1.1 mrg * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
34 1.1 mrg * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
35 1.1 mrg * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
36 1.1 mrg * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
37 1.1 mrg * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
38 1.1 mrg * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
39 1.1 mrg * SUCH DAMAGE.
40 1.1 mrg *
41 1.1 mrg * @(#)vm_page.h 7.3 (Berkeley) 4/21/91
42 1.3 mrg * from: Id: uvm_page.h,v 1.1.2.6 1998/02/04 02:31:42 chuck Exp
43 1.1 mrg *
44 1.1 mrg *
45 1.1 mrg * Copyright (c) 1987, 1990 Carnegie-Mellon University.
46 1.1 mrg * All rights reserved.
47 1.1 mrg *
48 1.1 mrg * Permission to use, copy, modify and distribute this software and
49 1.1 mrg * its documentation is hereby granted, provided that both the copyright
50 1.1 mrg * notice and this permission notice appear in all copies of the
51 1.1 mrg * software, derivative works or modified versions, and any portions
52 1.1 mrg * thereof, and that both notices appear in supporting documentation.
53 1.1 mrg *
54 1.1 mrg * CARNEGIE MELLON ALLOWS FREE USE OF THIS SOFTWARE IN ITS "AS IS"
55 1.1 mrg * CONDITION. CARNEGIE MELLON DISCLAIMS ANY LIABILITY OF ANY KIND
56 1.1 mrg * FOR ANY DAMAGES WHATSOEVER RESULTING FROM THE USE OF THIS SOFTWARE.
57 1.1 mrg *
58 1.1 mrg * Carnegie Mellon requests users of this software to return to
59 1.1 mrg *
60 1.1 mrg * Software Distribution Coordinator or Software.Distribution (at) CS.CMU.EDU
61 1.1 mrg * School of Computer Science
62 1.1 mrg * Carnegie Mellon University
63 1.1 mrg * Pittsburgh PA 15213-3890
64 1.1 mrg *
65 1.1 mrg * any improvements or extensions that they make and grant Carnegie the
66 1.1 mrg * rights to redistribute these changes.
67 1.1 mrg */
68 1.1 mrg
69 1.4 perry #ifndef _UVM_UVM_PAGE_H_
70 1.4 perry #define _UVM_UVM_PAGE_H_
71 1.4 perry
72 1.1 mrg /*
73 1.1 mrg * uvm_page.h
74 1.1 mrg */
75 1.1 mrg
76 1.16 mrg /*
77 1.16 mrg * Resident memory system definitions.
78 1.16 mrg */
79 1.16 mrg
80 1.16 mrg /*
81 1.16 mrg * Management of resident (logical) pages.
82 1.16 mrg *
83 1.16 mrg * A small structure is kept for each resident
84 1.16 mrg * page, indexed by page number. Each structure
85 1.16 mrg * is an element of several lists:
86 1.16 mrg *
87 1.16 mrg * A hash table bucket used to quickly
88 1.16 mrg * perform object/offset lookups
89 1.16 mrg *
90 1.16 mrg * A list of all pages for a given object,
91 1.16 mrg * so they can be quickly deactivated at
92 1.16 mrg * time of deallocation.
93 1.16 mrg *
94 1.16 mrg * An ordered list of pages due for pageout.
95 1.16 mrg *
96 1.16 mrg * In addition, the structure contains the object
97 1.16 mrg * and offset to which this page belongs (for pageout),
98 1.16 mrg * and sundry status bits.
99 1.16 mrg *
100 1.16 mrg * Fields in this structure are locked either by the lock on the
101 1.16 mrg * object that the page belongs to (O) or by the lock on the page
102 1.16 mrg * queues (P) [or both].
103 1.16 mrg */
104 1.16 mrg
105 1.16 mrg /*
106 1.16 mrg * locking note: the mach version of this data structure had bit
107 1.16 mrg * fields for the flags, and the bit fields were divided into two
108 1.16 mrg * items (depending on who locked what). some time, in BSD, the bit
109 1.16 mrg * fields were dumped and all the flags were lumped into one short.
110 1.16 mrg * that is fine for a single threaded uniprocessor OS, but bad if you
111 1.16 mrg * want to actual make use of locking (simple_lock's). so, we've
112 1.16 mrg * seperated things back out again.
113 1.16 mrg *
114 1.16 mrg * note the page structure has no lock of its own.
115 1.16 mrg */
116 1.16 mrg
117 1.16 mrg #include <uvm/uvm_extern.h>
118 1.16 mrg #include <uvm/uvm_pglist.h>
119 1.16 mrg
120 1.16 mrg struct vm_page {
121 1.16 mrg TAILQ_ENTRY(vm_page) pageq; /* queue info for FIFO
122 1.16 mrg * queue or free list (P) */
123 1.16 mrg TAILQ_ENTRY(vm_page) hashq; /* hash table links (O)*/
124 1.16 mrg TAILQ_ENTRY(vm_page) listq; /* pages in same object (O)*/
125 1.16 mrg
126 1.16 mrg struct vm_anon *uanon; /* anon (O,P) */
127 1.16 mrg struct uvm_object *uobject; /* object (O,P) */
128 1.16 mrg voff_t offset; /* offset into object (O,P) */
129 1.16 mrg
130 1.16 mrg u_short flags; /* object flags [O] */
131 1.16 mrg u_short version; /* version count [O] */
132 1.16 mrg u_short wire_count; /* wired down map refs [P] */
133 1.16 mrg u_short pqflags; /* page queue flags [P] */
134 1.16 mrg u_int loan_count; /* number of active loans
135 1.16 mrg * to read: [O or P]
136 1.16 mrg * to modify: [O _and_ P] */
137 1.16 mrg paddr_t phys_addr; /* physical address of page */
138 1.16 mrg #if defined(UVM_PAGE_TRKOWN)
139 1.16 mrg /* debugging fields to track page ownership */
140 1.16 mrg pid_t owner; /* proc that set PG_BUSY */
141 1.16 mrg char *owner_tag; /* why it was set busy */
142 1.16 mrg #endif
143 1.16 mrg };
144 1.16 mrg
145 1.16 mrg /*
146 1.16 mrg * These are the flags defined for vm_page.
147 1.16 mrg *
148 1.16 mrg * Note: PG_FILLED and PG_DIRTY are added for the filesystems.
149 1.16 mrg */
150 1.16 mrg
151 1.16 mrg /*
152 1.16 mrg * locking rules:
153 1.16 mrg * PG_ ==> locked by object lock
154 1.16 mrg * PQ_ ==> lock by page queue lock
155 1.16 mrg * PQ_FREE is locked by free queue lock and is mutex with all other PQs
156 1.16 mrg *
157 1.16 mrg * PG_ZERO is used to indicate that a page has been pre-zero'd. This flag
158 1.16 mrg * is only set when the page is on no queues, and is cleared when the page
159 1.16 mrg * is placed on the free list.
160 1.16 mrg *
161 1.16 mrg * possible deadwood: PG_FAULTING, PQ_LAUNDRY
162 1.16 mrg */
163 1.16 mrg #define PG_CLEAN 0x0008 /* page has not been modified */
164 1.16 mrg #define PG_BUSY 0x0010 /* page is in transit */
165 1.16 mrg #define PG_WANTED 0x0020 /* someone is waiting for page */
166 1.16 mrg #define PG_TABLED 0x0040 /* page is in VP table */
167 1.16 mrg #define PG_ZERO 0x0100 /* page is pre-zero'd */
168 1.16 mrg #define PG_FAKE 0x0200 /* page is placeholder for pagein */
169 1.16 mrg #define PG_FILLED 0x0400 /* client flag to set when filled */
170 1.16 mrg #define PG_DIRTY 0x0800 /* client flag to set when dirty */
171 1.16 mrg #define PG_RELEASED 0x1000 /* page released while paging */
172 1.16 mrg #define PG_FAULTING 0x2000 /* page is being faulted in */
173 1.16 mrg #define PG_CLEANCHK 0x4000 /* clean bit has been checked */
174 1.16 mrg
175 1.16 mrg #define PQ_FREE 0x0001 /* page is on free list */
176 1.16 mrg #define PQ_INACTIVE 0x0002 /* page is in inactive list */
177 1.16 mrg #define PQ_ACTIVE 0x0004 /* page is in active list */
178 1.16 mrg #define PQ_LAUNDRY 0x0008 /* page is being cleaned now */
179 1.16 mrg #define PQ_ANON 0x0010 /* page is part of an anon, rather
180 1.16 mrg than an uvm_object */
181 1.16 mrg #define PQ_AOBJ 0x0020 /* page is part of an anonymous
182 1.16 mrg uvm_object */
183 1.16 mrg #define PQ_SWAPBACKED (PQ_ANON|PQ_AOBJ)
184 1.16 mrg
185 1.16 mrg /*
186 1.16 mrg * physical memory layout structure
187 1.16 mrg *
188 1.16 mrg * MD vmparam.h must #define:
189 1.16 mrg * VM_PHYSEG_MAX = max number of physical memory segments we support
190 1.16 mrg * (if this is "1" then we revert to a "contig" case)
191 1.16 mrg * VM_PHYSSEG_STRAT: memory sort/search options (for VM_PHYSEG_MAX > 1)
192 1.16 mrg * - VM_PSTRAT_RANDOM: linear search (random order)
193 1.16 mrg * - VM_PSTRAT_BSEARCH: binary search (sorted by address)
194 1.16 mrg * - VM_PSTRAT_BIGFIRST: linear search (sorted by largest segment first)
195 1.16 mrg * - others?
196 1.17 mrg * XXXCDC: eventually we should purge all left-over global variables...
197 1.16 mrg */
198 1.16 mrg #define VM_PSTRAT_RANDOM 1
199 1.16 mrg #define VM_PSTRAT_BSEARCH 2
200 1.16 mrg #define VM_PSTRAT_BIGFIRST 3
201 1.16 mrg
202 1.16 mrg /*
203 1.16 mrg * vm_physmemseg: describes one segment of physical memory
204 1.16 mrg */
205 1.16 mrg struct vm_physseg {
206 1.16 mrg paddr_t start; /* PF# of first page in segment */
207 1.16 mrg paddr_t end; /* (PF# of last page in segment) + 1 */
208 1.16 mrg paddr_t avail_start; /* PF# of first free page in segment */
209 1.16 mrg paddr_t avail_end; /* (PF# of last free page in segment) +1 */
210 1.16 mrg int free_list; /* which free list they belong on */
211 1.16 mrg struct vm_page *pgs; /* vm_page structures (from start) */
212 1.16 mrg struct vm_page *lastpg; /* vm_page structure for end */
213 1.16 mrg struct pmap_physseg pmseg; /* pmap specific (MD) data */
214 1.16 mrg };
215 1.16 mrg
216 1.13 thorpej #ifdef _KERNEL
217 1.13 thorpej
218 1.1 mrg /*
219 1.15 thorpej * globals
220 1.15 thorpej */
221 1.15 thorpej
222 1.15 thorpej extern boolean_t vm_page_zero_enable;
223 1.15 thorpej
224 1.15 thorpej /*
225 1.16 mrg * Each pageable resident page falls into one of three lists:
226 1.16 mrg *
227 1.16 mrg * free
228 1.16 mrg * Available for allocation now.
229 1.16 mrg * inactive
230 1.16 mrg * Not referenced in any map, but still has an
231 1.16 mrg * object/offset-page mapping, and may be dirty.
232 1.16 mrg * This is the list of pages that should be
233 1.16 mrg * paged out next.
234 1.16 mrg * active
235 1.16 mrg * A list of pages which have been placed in
236 1.16 mrg * at least one physical map. This list is
237 1.16 mrg * ordered, in LRU-like fashion.
238 1.1 mrg */
239 1.1 mrg
240 1.16 mrg extern
241 1.16 mrg struct pglist vm_page_queue_free; /* memory free queue */
242 1.16 mrg extern
243 1.16 mrg struct pglist vm_page_queue_active; /* active memory queue */
244 1.16 mrg extern
245 1.16 mrg struct pglist vm_page_queue_inactive; /* inactive memory queue */
246 1.1 mrg
247 1.16 mrg /*
248 1.16 mrg * physical memory config is stored in vm_physmem.
249 1.16 mrg */
250 1.1 mrg
251 1.16 mrg extern struct vm_physseg vm_physmem[VM_PHYSSEG_MAX];
252 1.16 mrg extern int vm_nphysseg;
253 1.15 thorpej
254 1.1 mrg /*
255 1.1 mrg * handle inline options
256 1.1 mrg */
257 1.1 mrg
258 1.1 mrg #ifdef UVM_PAGE_INLINE
259 1.1 mrg #define PAGE_INLINE static __inline
260 1.1 mrg #else
261 1.1 mrg #define PAGE_INLINE /* nothing */
262 1.1 mrg #endif /* UVM_PAGE_INLINE */
263 1.1 mrg
264 1.1 mrg /*
265 1.8 chuck * prototypes: the following prototypes define the interface to pages
266 1.1 mrg */
267 1.1 mrg
268 1.10 eeh void uvm_page_init __P((vaddr_t *, vaddr_t *));
269 1.1 mrg #if defined(UVM_PAGE_TRKOWN)
270 1.1 mrg void uvm_page_own __P((struct vm_page *, char *));
271 1.1 mrg #endif
272 1.8 chuck #if !defined(PMAP_STEAL_MEMORY)
273 1.10 eeh boolean_t uvm_page_physget __P((paddr_t *));
274 1.8 chuck #endif
275 1.1 mrg void uvm_page_rehash __P((void));
276 1.15 thorpej void uvm_pageidlezero __P((void));
277 1.12 thorpej
278 1.12 thorpej PAGE_INLINE int uvm_lock_fpageq __P((void));
279 1.12 thorpej PAGE_INLINE void uvm_unlock_fpageq __P((int));
280 1.8 chuck
281 1.1 mrg PAGE_INLINE void uvm_pageactivate __P((struct vm_page *));
282 1.10 eeh vaddr_t uvm_pageboot_alloc __P((vsize_t));
283 1.1 mrg PAGE_INLINE void uvm_pagecopy __P((struct vm_page *, struct vm_page *));
284 1.1 mrg PAGE_INLINE void uvm_pagedeactivate __P((struct vm_page *));
285 1.1 mrg void uvm_pagefree __P((struct vm_page *));
286 1.14 kleink PAGE_INLINE struct vm_page *uvm_pagelookup __P((struct uvm_object *, voff_t));
287 1.1 mrg void uvm_pageremove __P((struct vm_page *));
288 1.1 mrg /* uvm_pagerename: not needed */
289 1.1 mrg PAGE_INLINE void uvm_pageunwire __P((struct vm_page *));
290 1.1 mrg PAGE_INLINE void uvm_pagewait __P((struct vm_page *, int));
291 1.1 mrg PAGE_INLINE void uvm_pagewake __P((struct vm_page *));
292 1.7 chuck PAGE_INLINE void uvm_pagewire __P((struct vm_page *));
293 1.1 mrg PAGE_INLINE void uvm_pagezero __P((struct vm_page *));
294 1.9 thorpej
295 1.9 thorpej PAGE_INLINE int uvm_page_lookup_freelist __P((struct vm_page *));
296 1.16 mrg
297 1.16 mrg static struct vm_page *PHYS_TO_VM_PAGE __P((paddr_t));
298 1.16 mrg static int vm_physseg_find __P((paddr_t, int *));
299 1.16 mrg
300 1.16 mrg /*
301 1.16 mrg * macros
302 1.16 mrg */
303 1.16 mrg
304 1.16 mrg #define uvm_lock_pageq() simple_lock(&uvm.pageqlock)
305 1.16 mrg #define uvm_unlock_pageq() simple_unlock(&uvm.pageqlock)
306 1.16 mrg
307 1.16 mrg #define uvm_pagehash(obj,off) \
308 1.16 mrg (((unsigned long)obj+(unsigned long)atop(off)) & uvm.page_hashmask)
309 1.16 mrg
310 1.16 mrg #define UVM_PAGEZERO_TARGET (uvmexp.free)
311 1.16 mrg
312 1.16 mrg #define VM_PAGE_TO_PHYS(entry) ((entry)->phys_addr)
313 1.16 mrg
314 1.16 mrg /*
315 1.16 mrg * when VM_PHYSSEG_MAX is 1, we can simplify these functions
316 1.16 mrg */
317 1.16 mrg
318 1.16 mrg /*
319 1.16 mrg * vm_physseg_find: find vm_physseg structure that belongs to a PA
320 1.16 mrg */
321 1.16 mrg static __inline int
322 1.16 mrg vm_physseg_find(pframe, offp)
323 1.16 mrg paddr_t pframe;
324 1.16 mrg int *offp;
325 1.16 mrg {
326 1.16 mrg #if VM_PHYSSEG_MAX == 1
327 1.16 mrg
328 1.16 mrg /* 'contig' case */
329 1.16 mrg if (pframe >= vm_physmem[0].start && pframe < vm_physmem[0].end) {
330 1.16 mrg if (offp)
331 1.16 mrg *offp = pframe - vm_physmem[0].start;
332 1.16 mrg return(0);
333 1.16 mrg }
334 1.16 mrg return(-1);
335 1.16 mrg
336 1.16 mrg #elif (VM_PHYSSEG_STRAT == VM_PSTRAT_BSEARCH)
337 1.16 mrg /* binary search for it */
338 1.16 mrg int start, len, try;
339 1.16 mrg
340 1.16 mrg /*
341 1.16 mrg * if try is too large (thus target is less than than try) we reduce
342 1.16 mrg * the length to trunc(len/2) [i.e. everything smaller than "try"]
343 1.16 mrg *
344 1.16 mrg * if the try is too small (thus target is greater than try) then
345 1.16 mrg * we set the new start to be (try + 1). this means we need to
346 1.16 mrg * reduce the length to (round(len/2) - 1).
347 1.16 mrg *
348 1.16 mrg * note "adjust" below which takes advantage of the fact that
349 1.16 mrg * (round(len/2) - 1) == trunc((len - 1) / 2)
350 1.16 mrg * for any value of len we may have
351 1.16 mrg */
352 1.16 mrg
353 1.16 mrg for (start = 0, len = vm_nphysseg ; len != 0 ; len = len / 2) {
354 1.16 mrg try = start + (len / 2); /* try in the middle */
355 1.16 mrg
356 1.16 mrg /* start past our try? */
357 1.16 mrg if (pframe >= vm_physmem[try].start) {
358 1.16 mrg /* was try correct? */
359 1.16 mrg if (pframe < vm_physmem[try].end) {
360 1.16 mrg if (offp)
361 1.16 mrg *offp = pframe - vm_physmem[try].start;
362 1.16 mrg return(try); /* got it */
363 1.16 mrg }
364 1.16 mrg start = try + 1; /* next time, start here */
365 1.16 mrg len--; /* "adjust" */
366 1.16 mrg } else {
367 1.16 mrg /*
368 1.16 mrg * pframe before try, just reduce length of
369 1.16 mrg * region, done in "for" loop
370 1.16 mrg */
371 1.16 mrg }
372 1.16 mrg }
373 1.16 mrg return(-1);
374 1.16 mrg
375 1.16 mrg #else
376 1.16 mrg /* linear search for it */
377 1.16 mrg int lcv;
378 1.16 mrg
379 1.16 mrg for (lcv = 0; lcv < vm_nphysseg; lcv++) {
380 1.16 mrg if (pframe >= vm_physmem[lcv].start &&
381 1.16 mrg pframe < vm_physmem[lcv].end) {
382 1.16 mrg if (offp)
383 1.16 mrg *offp = pframe - vm_physmem[lcv].start;
384 1.16 mrg return(lcv); /* got it */
385 1.16 mrg }
386 1.16 mrg }
387 1.16 mrg return(-1);
388 1.16 mrg
389 1.16 mrg #endif
390 1.16 mrg }
391 1.16 mrg
392 1.16 mrg
393 1.16 mrg /*
394 1.16 mrg * IS_VM_PHYSADDR: only used my mips/pmax/pica trap/pmap.
395 1.16 mrg */
396 1.16 mrg
397 1.16 mrg #define IS_VM_PHYSADDR(PA) (vm_physseg_find(atop(PA), NULL) != -1)
398 1.16 mrg
399 1.16 mrg /*
400 1.16 mrg * PHYS_TO_VM_PAGE: find vm_page for a PA. used by MI code to get vm_pages
401 1.16 mrg * back from an I/O mapping (ugh!). used in some MD code as well.
402 1.16 mrg */
403 1.16 mrg static __inline struct vm_page *
404 1.16 mrg PHYS_TO_VM_PAGE(pa)
405 1.16 mrg paddr_t pa;
406 1.16 mrg {
407 1.16 mrg paddr_t pf = atop(pa);
408 1.16 mrg int off;
409 1.16 mrg int psi;
410 1.16 mrg
411 1.16 mrg psi = vm_physseg_find(pf, &off);
412 1.16 mrg if (psi != -1)
413 1.16 mrg return(&vm_physmem[psi].pgs[off]);
414 1.16 mrg return(NULL);
415 1.16 mrg }
416 1.16 mrg
417 1.16 mrg #define VM_PAGE_IS_FREE(entry) ((entry)->pqflags & PQ_FREE)
418 1.16 mrg
419 1.16 mrg extern
420 1.16 mrg simple_lock_data_t vm_page_queue_lock; /* lock on active and inactive
421 1.16 mrg page queues */
422 1.16 mrg extern /* lock on free page queue */
423 1.16 mrg simple_lock_data_t vm_page_queue_free_lock;
424 1.16 mrg
425 1.16 mrg #define PAGE_ASSERT_WAIT(m, interruptible) { \
426 1.16 mrg (m)->flags |= PG_WANTED; \
427 1.16 mrg assert_wait((m), (interruptible)); \
428 1.16 mrg }
429 1.16 mrg
430 1.16 mrg #define PAGE_WAKEUP(m) { \
431 1.16 mrg (m)->flags &= ~PG_BUSY; \
432 1.16 mrg if ((m)->flags & PG_WANTED) { \
433 1.16 mrg (m)->flags &= ~PG_WANTED; \
434 1.16 mrg wakeup((m)); \
435 1.16 mrg } \
436 1.16 mrg }
437 1.16 mrg
438 1.16 mrg #define vm_page_lock_queues() simple_lock(&vm_page_queue_lock)
439 1.16 mrg #define vm_page_unlock_queues() simple_unlock(&vm_page_queue_lock)
440 1.16 mrg
441 1.16 mrg #define vm_page_set_modified(m) { (m)->flags &= ~PG_CLEAN; }
442 1.16 mrg
443 1.16 mrg #define VM_PAGE_INIT(mem, obj, offset) { \
444 1.16 mrg (mem)->flags = PG_BUSY | PG_CLEAN | PG_FAKE; \
445 1.16 mrg if (obj) \
446 1.16 mrg vm_page_insert((mem), (obj), (offset)); \
447 1.16 mrg else \
448 1.16 mrg (mem)->object = NULL; \
449 1.16 mrg (mem)->wire_count = 0; \
450 1.16 mrg }
451 1.16 mrg
452 1.16 mrg #if VM_PAGE_DEBUG
453 1.16 mrg
454 1.16 mrg /*
455 1.16 mrg * VM_PAGE_CHECK: debugging check of a vm_page structure
456 1.16 mrg */
457 1.16 mrg static __inline void
458 1.16 mrg VM_PAGE_CHECK(mem)
459 1.16 mrg struct vm_page *mem;
460 1.16 mrg {
461 1.16 mrg int lcv;
462 1.16 mrg
463 1.16 mrg for (lcv = 0 ; lcv < vm_nphysseg ; lcv++) {
464 1.16 mrg if ((unsigned int) mem >= (unsigned int) vm_physmem[lcv].pgs &&
465 1.16 mrg (unsigned int) mem <= (unsigned int) vm_physmem[lcv].lastpg)
466 1.16 mrg break;
467 1.16 mrg }
468 1.16 mrg if (lcv == vm_nphysseg ||
469 1.16 mrg (mem->flags & (PG_ACTIVE|PG_INACTIVE)) == (PG_ACTIVE|PG_INACTIVE))
470 1.16 mrg panic("vm_page_check: not valid!");
471 1.16 mrg return;
472 1.16 mrg }
473 1.16 mrg
474 1.16 mrg #else /* VM_PAGE_DEBUG */
475 1.16 mrg #define VM_PAGE_CHECK(mem)
476 1.16 mrg #endif /* VM_PAGE_DEBUG */
477 1.13 thorpej
478 1.13 thorpej #endif /* _KERNEL */
479 1.1 mrg
480 1.4 perry #endif /* _UVM_UVM_PAGE_H_ */
481