uvm_page.c revision 1.174 1 /* $NetBSD: uvm_page.c,v 1.174 2011/06/12 03:36:03 rmind Exp $ */
2
3 /*
4 * Copyright (c) 1997 Charles D. Cranor and Washington University.
5 * Copyright (c) 1991, 1993, The Regents of the University of California.
6 *
7 * All rights reserved.
8 *
9 * This code is derived from software contributed to Berkeley by
10 * The Mach Operating System project at Carnegie-Mellon University.
11 *
12 * Redistribution and use in source and binary forms, with or without
13 * modification, are permitted provided that the following conditions
14 * are met:
15 * 1. Redistributions of source code must retain the above copyright
16 * notice, this list of conditions and the following disclaimer.
17 * 2. Redistributions in binary form must reproduce the above copyright
18 * notice, this list of conditions and the following disclaimer in the
19 * documentation and/or other materials provided with the distribution.
20 * 3. Neither the name of the University nor the names of its contributors
21 * may be used to endorse or promote products derived from this software
22 * without specific prior written permission.
23 *
24 * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND
25 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
26 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
27 * ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE
28 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
29 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
30 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
31 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
32 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
33 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
34 * SUCH DAMAGE.
35 *
36 * @(#)vm_page.c 8.3 (Berkeley) 3/21/94
37 * from: Id: uvm_page.c,v 1.1.2.18 1998/02/06 05:24:42 chs Exp
38 *
39 *
40 * Copyright (c) 1987, 1990 Carnegie-Mellon University.
41 * All rights reserved.
42 *
43 * Permission to use, copy, modify and distribute this software and
44 * its documentation is hereby granted, provided that both the copyright
45 * notice and this permission notice appear in all copies of the
46 * software, derivative works or modified versions, and any portions
47 * thereof, and that both notices appear in supporting documentation.
48 *
49 * CARNEGIE MELLON ALLOWS FREE USE OF THIS SOFTWARE IN ITS "AS IS"
50 * CONDITION. CARNEGIE MELLON DISCLAIMS ANY LIABILITY OF ANY KIND
51 * FOR ANY DAMAGES WHATSOEVER RESULTING FROM THE USE OF THIS SOFTWARE.
52 *
53 * Carnegie Mellon requests users of this software to return to
54 *
55 * Software Distribution Coordinator or Software.Distribution (at) CS.CMU.EDU
56 * School of Computer Science
57 * Carnegie Mellon University
58 * Pittsburgh PA 15213-3890
59 *
60 * any improvements or extensions that they make and grant Carnegie the
61 * rights to redistribute these changes.
62 */
63
64 /*
65 * uvm_page.c: page ops.
66 */
67
68 #include <sys/cdefs.h>
69 __KERNEL_RCSID(0, "$NetBSD: uvm_page.c,v 1.174 2011/06/12 03:36:03 rmind Exp $");
70
71 #include "opt_ddb.h"
72 #include "opt_uvmhist.h"
73 #include "opt_readahead.h"
74
75 #include <sys/param.h>
76 #include <sys/systm.h>
77 #include <sys/malloc.h>
78 #include <sys/sched.h>
79 #include <sys/kernel.h>
80 #include <sys/vnode.h>
81 #include <sys/proc.h>
82 #include <sys/atomic.h>
83 #include <sys/cpu.h>
84
85 #include <uvm/uvm.h>
86 #include <uvm/uvm_ddb.h>
87 #include <uvm/uvm_pdpolicy.h>
88
89 /*
90 * global vars... XXXCDC: move to uvm. structure.
91 */
92
93 /*
94 * physical memory config is stored in vm_physmem.
95 */
96
97 struct vm_physseg vm_physmem[VM_PHYSSEG_MAX]; /* XXXCDC: uvm.physmem */
98 int vm_nphysseg = 0; /* XXXCDC: uvm.nphysseg */
99 #define vm_nphysmem vm_nphysseg
100
101 /*
102 * Some supported CPUs in a given architecture don't support all
103 * of the things necessary to do idle page zero'ing efficiently.
104 * We therefore provide a way to enable it from machdep code here.
105 */
106 bool vm_page_zero_enable = false;
107
108 /*
109 * number of pages per-CPU to reserve for the kernel.
110 */
111 int vm_page_reserve_kernel = 5;
112
113 /*
114 * physical memory size;
115 */
116 int physmem;
117
118 /*
119 * local variables
120 */
121
122 /*
123 * these variables record the values returned by vm_page_bootstrap,
124 * for debugging purposes. The implementation of uvm_pageboot_alloc
125 * and pmap_startup here also uses them internally.
126 */
127
128 static vaddr_t virtual_space_start;
129 static vaddr_t virtual_space_end;
130
131 /*
132 * we allocate an initial number of page colors in uvm_page_init(),
133 * and remember them. We may re-color pages as cache sizes are
134 * discovered during the autoconfiguration phase. But we can never
135 * free the initial set of buckets, since they are allocated using
136 * uvm_pageboot_alloc().
137 */
138
139 static bool have_recolored_pages /* = false */;
140
141 MALLOC_DEFINE(M_VMPAGE, "VM page", "VM page");
142
143 #ifdef DEBUG
144 vaddr_t uvm_zerocheckkva;
145 #endif /* DEBUG */
146
147 /*
148 * local prototypes
149 */
150
151 static void uvm_pageinsert(struct uvm_object *, struct vm_page *);
152 static void uvm_pageremove(struct uvm_object *, struct vm_page *);
153
154 /*
155 * per-object tree of pages
156 */
157
158 static signed int
159 uvm_page_compare_nodes(void *ctx, const void *n1, const void *n2)
160 {
161 const struct vm_page *pg1 = n1;
162 const struct vm_page *pg2 = n2;
163 const voff_t a = pg1->offset;
164 const voff_t b = pg2->offset;
165
166 if (a < b)
167 return -1;
168 if (a > b)
169 return 1;
170 return 0;
171 }
172
173 static signed int
174 uvm_page_compare_key(void *ctx, const void *n, const void *key)
175 {
176 const struct vm_page *pg = n;
177 const voff_t a = pg->offset;
178 const voff_t b = *(const voff_t *)key;
179
180 if (a < b)
181 return -1;
182 if (a > b)
183 return 1;
184 return 0;
185 }
186
187 const rb_tree_ops_t uvm_page_tree_ops = {
188 .rbto_compare_nodes = uvm_page_compare_nodes,
189 .rbto_compare_key = uvm_page_compare_key,
190 .rbto_node_offset = offsetof(struct vm_page, rb_node),
191 .rbto_context = NULL
192 };
193
194 /*
195 * inline functions
196 */
197
198 /*
199 * uvm_pageinsert: insert a page in the object.
200 *
201 * => caller must lock object
202 * => caller must lock page queues
203 * => call should have already set pg's object and offset pointers
204 * and bumped the version counter
205 */
206
207 static inline void
208 uvm_pageinsert_list(struct uvm_object *uobj, struct vm_page *pg,
209 struct vm_page *where)
210 {
211
212 KASSERT(uobj == pg->uobject);
213 KASSERT(mutex_owned(uobj->vmobjlock));
214 KASSERT((pg->flags & PG_TABLED) == 0);
215 KASSERT(where == NULL || (where->flags & PG_TABLED));
216 KASSERT(where == NULL || (where->uobject == uobj));
217
218 if (UVM_OBJ_IS_VNODE(uobj)) {
219 if (uobj->uo_npages == 0) {
220 struct vnode *vp = (struct vnode *)uobj;
221
222 vholdl(vp);
223 }
224 if (UVM_OBJ_IS_VTEXT(uobj)) {
225 atomic_inc_uint(&uvmexp.execpages);
226 } else {
227 atomic_inc_uint(&uvmexp.filepages);
228 }
229 } else if (UVM_OBJ_IS_AOBJ(uobj)) {
230 atomic_inc_uint(&uvmexp.anonpages);
231 }
232
233 if (where)
234 TAILQ_INSERT_AFTER(&uobj->memq, where, pg, listq.queue);
235 else
236 TAILQ_INSERT_TAIL(&uobj->memq, pg, listq.queue);
237 pg->flags |= PG_TABLED;
238 uobj->uo_npages++;
239 }
240
241
242 static inline void
243 uvm_pageinsert_tree(struct uvm_object *uobj, struct vm_page *pg)
244 {
245 struct vm_page *ret;
246
247 KASSERT(uobj == pg->uobject);
248 ret = rb_tree_insert_node(&uobj->rb_tree, pg);
249 KASSERT(ret == pg);
250 }
251
252 static inline void
253 uvm_pageinsert(struct uvm_object *uobj, struct vm_page *pg)
254 {
255
256 KDASSERT(uobj != NULL);
257 uvm_pageinsert_tree(uobj, pg);
258 uvm_pageinsert_list(uobj, pg, NULL);
259 }
260
261 /*
262 * uvm_page_remove: remove page from object.
263 *
264 * => caller must lock object
265 * => caller must lock page queues
266 */
267
268 static inline void
269 uvm_pageremove_list(struct uvm_object *uobj, struct vm_page *pg)
270 {
271
272 KASSERT(uobj == pg->uobject);
273 KASSERT(mutex_owned(uobj->vmobjlock));
274 KASSERT(pg->flags & PG_TABLED);
275
276 if (UVM_OBJ_IS_VNODE(uobj)) {
277 if (uobj->uo_npages == 1) {
278 struct vnode *vp = (struct vnode *)uobj;
279
280 holdrelel(vp);
281 }
282 if (UVM_OBJ_IS_VTEXT(uobj)) {
283 atomic_dec_uint(&uvmexp.execpages);
284 } else {
285 atomic_dec_uint(&uvmexp.filepages);
286 }
287 } else if (UVM_OBJ_IS_AOBJ(uobj)) {
288 atomic_dec_uint(&uvmexp.anonpages);
289 }
290
291 /* object should be locked */
292 uobj->uo_npages--;
293 TAILQ_REMOVE(&uobj->memq, pg, listq.queue);
294 pg->flags &= ~PG_TABLED;
295 pg->uobject = NULL;
296 }
297
298 static inline void
299 uvm_pageremove_tree(struct uvm_object *uobj, struct vm_page *pg)
300 {
301
302 KASSERT(uobj == pg->uobject);
303 rb_tree_remove_node(&uobj->rb_tree, pg);
304 }
305
306 static inline void
307 uvm_pageremove(struct uvm_object *uobj, struct vm_page *pg)
308 {
309
310 KDASSERT(uobj != NULL);
311 uvm_pageremove_tree(uobj, pg);
312 uvm_pageremove_list(uobj, pg);
313 }
314
315 static void
316 uvm_page_init_buckets(struct pgfreelist *pgfl)
317 {
318 int color, i;
319
320 for (color = 0; color < uvmexp.ncolors; color++) {
321 for (i = 0; i < PGFL_NQUEUES; i++) {
322 LIST_INIT(&pgfl->pgfl_buckets[color].pgfl_queues[i]);
323 }
324 }
325 }
326
327 /*
328 * uvm_page_init: init the page system. called from uvm_init().
329 *
330 * => we return the range of kernel virtual memory in kvm_startp/kvm_endp
331 */
332
333 void
334 uvm_page_init(vaddr_t *kvm_startp, vaddr_t *kvm_endp)
335 {
336 static struct uvm_cpu boot_cpu;
337 psize_t freepages, pagecount, bucketcount, n;
338 struct pgflbucket *bucketarray, *cpuarray;
339 struct vm_physseg *seg;
340 struct vm_page *pagearray;
341 int lcv;
342 u_int i;
343 paddr_t paddr;
344
345 KASSERT(ncpu <= 1);
346 CTASSERT(sizeof(pagearray->offset) >= sizeof(struct uvm_cpu *));
347
348 /*
349 * init the page queues and page queue locks, except the free
350 * list; we allocate that later (with the initial vm_page
351 * structures).
352 */
353
354 uvm.cpus[0] = &boot_cpu;
355 curcpu()->ci_data.cpu_uvm = &boot_cpu;
356 uvm_reclaim_init();
357 uvmpdpol_init();
358 mutex_init(&uvm_pageqlock, MUTEX_DRIVER, IPL_NONE);
359 mutex_init(&uvm_fpageqlock, MUTEX_DRIVER, IPL_VM);
360
361 /*
362 * allocate vm_page structures.
363 */
364
365 /*
366 * sanity check:
367 * before calling this function the MD code is expected to register
368 * some free RAM with the uvm_page_physload() function. our job
369 * now is to allocate vm_page structures for this memory.
370 */
371
372 if (vm_nphysmem == 0)
373 panic("uvm_page_bootstrap: no memory pre-allocated");
374
375 /*
376 * first calculate the number of free pages...
377 *
378 * note that we use start/end rather than avail_start/avail_end.
379 * this allows us to allocate extra vm_page structures in case we
380 * want to return some memory to the pool after booting.
381 */
382
383 freepages = 0;
384 for (lcv = 0 ; lcv < vm_nphysmem ; lcv++) {
385 seg = VM_PHYSMEM_PTR(lcv);
386 freepages += (seg->end - seg->start);
387 }
388
389 /*
390 * Let MD code initialize the number of colors, or default
391 * to 1 color if MD code doesn't care.
392 */
393 if (uvmexp.ncolors == 0)
394 uvmexp.ncolors = 1;
395 uvmexp.colormask = uvmexp.ncolors - 1;
396
397 /*
398 * we now know we have (PAGE_SIZE * freepages) bytes of memory we can
399 * use. for each page of memory we use we need a vm_page structure.
400 * thus, the total number of pages we can use is the total size of
401 * the memory divided by the PAGE_SIZE plus the size of the vm_page
402 * structure. we add one to freepages as a fudge factor to avoid
403 * truncation errors (since we can only allocate in terms of whole
404 * pages).
405 */
406
407 bucketcount = uvmexp.ncolors * VM_NFREELIST;
408 pagecount = ((freepages + 1) << PAGE_SHIFT) /
409 (PAGE_SIZE + sizeof(struct vm_page));
410
411 bucketarray = (void *)uvm_pageboot_alloc((bucketcount *
412 sizeof(struct pgflbucket) * 2) + (pagecount *
413 sizeof(struct vm_page)));
414 cpuarray = bucketarray + bucketcount;
415 pagearray = (struct vm_page *)(bucketarray + bucketcount * 2);
416
417 for (lcv = 0; lcv < VM_NFREELIST; lcv++) {
418 uvm.page_free[lcv].pgfl_buckets =
419 (bucketarray + (lcv * uvmexp.ncolors));
420 uvm_page_init_buckets(&uvm.page_free[lcv]);
421 uvm.cpus[0]->page_free[lcv].pgfl_buckets =
422 (cpuarray + (lcv * uvmexp.ncolors));
423 uvm_page_init_buckets(&uvm.cpus[0]->page_free[lcv]);
424 }
425 memset(pagearray, 0, pagecount * sizeof(struct vm_page));
426
427 /*
428 * init the vm_page structures and put them in the correct place.
429 */
430
431 for (lcv = 0 ; lcv < vm_nphysmem ; lcv++) {
432 seg = VM_PHYSMEM_PTR(lcv);
433 n = seg->end - seg->start;
434
435 /* set up page array pointers */
436 seg->pgs = pagearray;
437 pagearray += n;
438 pagecount -= n;
439 seg->lastpg = seg->pgs + n;
440
441 /* init and free vm_pages (we've already zeroed them) */
442 paddr = ctob(seg->start);
443 for (i = 0 ; i < n ; i++, paddr += PAGE_SIZE) {
444 seg->pgs[i].phys_addr = paddr;
445 #ifdef __HAVE_VM_PAGE_MD
446 VM_MDPAGE_INIT(&seg->pgs[i]);
447 #endif
448 if (atop(paddr) >= seg->avail_start &&
449 atop(paddr) < seg->avail_end) {
450 uvmexp.npages++;
451 /* add page to free pool */
452 uvm_pagefree(&seg->pgs[i]);
453 }
454 }
455 }
456
457 /*
458 * pass up the values of virtual_space_start and
459 * virtual_space_end (obtained by uvm_pageboot_alloc) to the upper
460 * layers of the VM.
461 */
462
463 *kvm_startp = round_page(virtual_space_start);
464 *kvm_endp = trunc_page(virtual_space_end);
465 #ifdef DEBUG
466 /*
467 * steal kva for uvm_pagezerocheck().
468 */
469 uvm_zerocheckkva = *kvm_startp;
470 *kvm_startp += PAGE_SIZE;
471 #endif /* DEBUG */
472
473 /*
474 * init various thresholds.
475 */
476
477 uvmexp.reserve_pagedaemon = 1;
478 uvmexp.reserve_kernel = vm_page_reserve_kernel;
479
480 /*
481 * determine if we should zero pages in the idle loop.
482 */
483
484 uvm.cpus[0]->page_idle_zero = vm_page_zero_enable;
485
486 /*
487 * done!
488 */
489
490 uvm.page_init_done = true;
491 }
492
493 /*
494 * uvm_setpagesize: set the page size
495 *
496 * => sets page_shift and page_mask from uvmexp.pagesize.
497 */
498
499 void
500 uvm_setpagesize(void)
501 {
502
503 /*
504 * If uvmexp.pagesize is 0 at this point, we expect PAGE_SIZE
505 * to be a constant (indicated by being a non-zero value).
506 */
507 if (uvmexp.pagesize == 0) {
508 if (PAGE_SIZE == 0)
509 panic("uvm_setpagesize: uvmexp.pagesize not set");
510 uvmexp.pagesize = PAGE_SIZE;
511 }
512 uvmexp.pagemask = uvmexp.pagesize - 1;
513 if ((uvmexp.pagemask & uvmexp.pagesize) != 0)
514 panic("uvm_setpagesize: page size %u (%#x) not a power of two",
515 uvmexp.pagesize, uvmexp.pagesize);
516 for (uvmexp.pageshift = 0; ; uvmexp.pageshift++)
517 if ((1 << uvmexp.pageshift) == uvmexp.pagesize)
518 break;
519 }
520
521 /*
522 * uvm_pageboot_alloc: steal memory from physmem for bootstrapping
523 */
524
525 vaddr_t
526 uvm_pageboot_alloc(vsize_t size)
527 {
528 static bool initialized = false;
529 vaddr_t addr;
530 #if !defined(PMAP_STEAL_MEMORY)
531 vaddr_t vaddr;
532 paddr_t paddr;
533 #endif
534
535 /*
536 * on first call to this function, initialize ourselves.
537 */
538 if (initialized == false) {
539 pmap_virtual_space(&virtual_space_start, &virtual_space_end);
540
541 /* round it the way we like it */
542 virtual_space_start = round_page(virtual_space_start);
543 virtual_space_end = trunc_page(virtual_space_end);
544
545 initialized = true;
546 }
547
548 /* round to page size */
549 size = round_page(size);
550
551 #if defined(PMAP_STEAL_MEMORY)
552
553 /*
554 * defer bootstrap allocation to MD code (it may want to allocate
555 * from a direct-mapped segment). pmap_steal_memory should adjust
556 * virtual_space_start/virtual_space_end if necessary.
557 */
558
559 addr = pmap_steal_memory(size, &virtual_space_start,
560 &virtual_space_end);
561
562 return(addr);
563
564 #else /* !PMAP_STEAL_MEMORY */
565
566 /*
567 * allocate virtual memory for this request
568 */
569 if (virtual_space_start == virtual_space_end ||
570 (virtual_space_end - virtual_space_start) < size)
571 panic("uvm_pageboot_alloc: out of virtual space");
572
573 addr = virtual_space_start;
574
575 #ifdef PMAP_GROWKERNEL
576 /*
577 * If the kernel pmap can't map the requested space,
578 * then allocate more resources for it.
579 */
580 if (uvm_maxkaddr < (addr + size)) {
581 uvm_maxkaddr = pmap_growkernel(addr + size);
582 if (uvm_maxkaddr < (addr + size))
583 panic("uvm_pageboot_alloc: pmap_growkernel() failed");
584 }
585 #endif
586
587 virtual_space_start += size;
588
589 /*
590 * allocate and mapin physical pages to back new virtual pages
591 */
592
593 for (vaddr = round_page(addr) ; vaddr < addr + size ;
594 vaddr += PAGE_SIZE) {
595
596 if (!uvm_page_physget(&paddr))
597 panic("uvm_pageboot_alloc: out of memory");
598
599 /*
600 * Note this memory is no longer managed, so using
601 * pmap_kenter is safe.
602 */
603 pmap_kenter_pa(vaddr, paddr, VM_PROT_READ|VM_PROT_WRITE, 0);
604 }
605 pmap_update(pmap_kernel());
606 return(addr);
607 #endif /* PMAP_STEAL_MEMORY */
608 }
609
610 #if !defined(PMAP_STEAL_MEMORY)
611 /*
612 * uvm_page_physget: "steal" one page from the vm_physmem structure.
613 *
614 * => attempt to allocate it off the end of a segment in which the "avail"
615 * values match the start/end values. if we can't do that, then we
616 * will advance both values (making them equal, and removing some
617 * vm_page structures from the non-avail area).
618 * => return false if out of memory.
619 */
620
621 /* subroutine: try to allocate from memory chunks on the specified freelist */
622 static bool uvm_page_physget_freelist(paddr_t *, int);
623
624 static bool
625 uvm_page_physget_freelist(paddr_t *paddrp, int freelist)
626 {
627 struct vm_physseg *seg;
628 int lcv, x;
629
630 /* pass 1: try allocating from a matching end */
631 #if (VM_PHYSSEG_STRAT == VM_PSTRAT_BIGFIRST)
632 for (lcv = vm_nphysmem - 1 ; lcv >= 0 ; lcv--)
633 #else
634 for (lcv = 0 ; lcv < vm_nphysmem ; lcv++)
635 #endif
636 {
637 seg = VM_PHYSMEM_PTR(lcv);
638
639 if (uvm.page_init_done == true)
640 panic("uvm_page_physget: called _after_ bootstrap");
641
642 if (seg->free_list != freelist)
643 continue;
644
645 /* try from front */
646 if (seg->avail_start == seg->start &&
647 seg->avail_start < seg->avail_end) {
648 *paddrp = ctob(seg->avail_start);
649 seg->avail_start++;
650 seg->start++;
651 /* nothing left? nuke it */
652 if (seg->avail_start == seg->end) {
653 if (vm_nphysmem == 1)
654 panic("uvm_page_physget: out of memory!");
655 vm_nphysmem--;
656 for (x = lcv ; x < vm_nphysmem ; x++)
657 /* structure copy */
658 VM_PHYSMEM_PTR_SWAP(x, x + 1);
659 }
660 return (true);
661 }
662
663 /* try from rear */
664 if (seg->avail_end == seg->end &&
665 seg->avail_start < seg->avail_end) {
666 *paddrp = ctob(seg->avail_end - 1);
667 seg->avail_end--;
668 seg->end--;
669 /* nothing left? nuke it */
670 if (seg->avail_end == seg->start) {
671 if (vm_nphysmem == 1)
672 panic("uvm_page_physget: out of memory!");
673 vm_nphysmem--;
674 for (x = lcv ; x < vm_nphysmem ; x++)
675 /* structure copy */
676 VM_PHYSMEM_PTR_SWAP(x, x + 1);
677 }
678 return (true);
679 }
680 }
681
682 /* pass2: forget about matching ends, just allocate something */
683 #if (VM_PHYSSEG_STRAT == VM_PSTRAT_BIGFIRST)
684 for (lcv = vm_nphysmem - 1 ; lcv >= 0 ; lcv--)
685 #else
686 for (lcv = 0 ; lcv < vm_nphysmem ; lcv++)
687 #endif
688 {
689 seg = VM_PHYSMEM_PTR(lcv);
690
691 /* any room in this bank? */
692 if (seg->avail_start >= seg->avail_end)
693 continue; /* nope */
694
695 *paddrp = ctob(seg->avail_start);
696 seg->avail_start++;
697 /* truncate! */
698 seg->start = seg->avail_start;
699
700 /* nothing left? nuke it */
701 if (seg->avail_start == seg->end) {
702 if (vm_nphysmem == 1)
703 panic("uvm_page_physget: out of memory!");
704 vm_nphysmem--;
705 for (x = lcv ; x < vm_nphysmem ; x++)
706 /* structure copy */
707 VM_PHYSMEM_PTR_SWAP(x, x + 1);
708 }
709 return (true);
710 }
711
712 return (false); /* whoops! */
713 }
714
715 bool
716 uvm_page_physget(paddr_t *paddrp)
717 {
718 int i;
719
720 /* try in the order of freelist preference */
721 for (i = 0; i < VM_NFREELIST; i++)
722 if (uvm_page_physget_freelist(paddrp, i) == true)
723 return (true);
724 return (false);
725 }
726 #endif /* PMAP_STEAL_MEMORY */
727
728 /*
729 * uvm_page_physload: load physical memory into VM system
730 *
731 * => all args are PFs
732 * => all pages in start/end get vm_page structures
733 * => areas marked by avail_start/avail_end get added to the free page pool
734 * => we are limited to VM_PHYSSEG_MAX physical memory segments
735 */
736
737 void
738 uvm_page_physload(paddr_t start, paddr_t end, paddr_t avail_start,
739 paddr_t avail_end, int free_list)
740 {
741 int preload, lcv;
742 psize_t npages;
743 struct vm_page *pgs;
744 struct vm_physseg *ps;
745
746 if (uvmexp.pagesize == 0)
747 panic("uvm_page_physload: page size not set!");
748 if (free_list >= VM_NFREELIST || free_list < VM_FREELIST_DEFAULT)
749 panic("uvm_page_physload: bad free list %d", free_list);
750 if (start >= end)
751 panic("uvm_page_physload: start >= end");
752
753 /*
754 * do we have room?
755 */
756
757 if (vm_nphysmem == VM_PHYSSEG_MAX) {
758 printf("uvm_page_physload: unable to load physical memory "
759 "segment\n");
760 printf("\t%d segments allocated, ignoring 0x%llx -> 0x%llx\n",
761 VM_PHYSSEG_MAX, (long long)start, (long long)end);
762 printf("\tincrease VM_PHYSSEG_MAX\n");
763 return;
764 }
765
766 /*
767 * check to see if this is a "preload" (i.e. uvm_page_init hasn't been
768 * called yet, so malloc is not available).
769 */
770
771 for (lcv = 0 ; lcv < vm_nphysmem ; lcv++) {
772 if (VM_PHYSMEM_PTR(lcv)->pgs)
773 break;
774 }
775 preload = (lcv == vm_nphysmem);
776
777 /*
778 * if VM is already running, attempt to malloc() vm_page structures
779 */
780
781 if (!preload) {
782 panic("uvm_page_physload: tried to add RAM after vm_mem_init");
783 } else {
784 pgs = NULL;
785 npages = 0;
786 }
787
788 /*
789 * now insert us in the proper place in vm_physmem[]
790 */
791
792 #if (VM_PHYSSEG_STRAT == VM_PSTRAT_RANDOM)
793 /* random: put it at the end (easy!) */
794 ps = VM_PHYSMEM_PTR(vm_nphysmem);
795 #elif (VM_PHYSSEG_STRAT == VM_PSTRAT_BSEARCH)
796 {
797 int x;
798 /* sort by address for binary search */
799 for (lcv = 0 ; lcv < vm_nphysmem ; lcv++)
800 if (start < VM_PHYSMEM_PTR(lcv)->start)
801 break;
802 ps = VM_PHYSMEM_PTR(lcv);
803 /* move back other entries, if necessary ... */
804 for (x = vm_nphysmem ; x > lcv ; x--)
805 /* structure copy */
806 VM_PHYSMEM_PTR_SWAP(x, x - 1);
807 }
808 #elif (VM_PHYSSEG_STRAT == VM_PSTRAT_BIGFIRST)
809 {
810 int x;
811 /* sort by largest segment first */
812 for (lcv = 0 ; lcv < vm_nphysmem ; lcv++)
813 if ((end - start) >
814 (VM_PHYSMEM_PTR(lcv)->end - VM_PHYSMEM_PTR(lcv)->start))
815 break;
816 ps = VM_PHYSMEM_PTR(lcv);
817 /* move back other entries, if necessary ... */
818 for (x = vm_nphysmem ; x > lcv ; x--)
819 /* structure copy */
820 VM_PHYSMEM_PTR_SWAP(x, x - 1);
821 }
822 #else
823 panic("uvm_page_physload: unknown physseg strategy selected!");
824 #endif
825
826 ps->start = start;
827 ps->end = end;
828 ps->avail_start = avail_start;
829 ps->avail_end = avail_end;
830 if (preload) {
831 ps->pgs = NULL;
832 } else {
833 ps->pgs = pgs;
834 ps->lastpg = pgs + npages;
835 }
836 ps->free_list = free_list;
837 vm_nphysmem++;
838
839 if (!preload) {
840 uvmpdpol_reinit();
841 }
842 }
843
844 /*
845 * when VM_PHYSSEG_MAX is 1, we can simplify these functions
846 */
847
848 #if VM_PHYSSEG_MAX == 1
849 static inline int vm_physseg_find_contig(struct vm_physseg *, int, paddr_t, int *);
850 #elif (VM_PHYSSEG_STRAT == VM_PSTRAT_BSEARCH)
851 static inline int vm_physseg_find_bsearch(struct vm_physseg *, int, paddr_t, int *);
852 #else
853 static inline int vm_physseg_find_linear(struct vm_physseg *, int, paddr_t, int *);
854 #endif
855
856 /*
857 * vm_physseg_find: find vm_physseg structure that belongs to a PA
858 */
859 int
860 vm_physseg_find(paddr_t pframe, int *offp)
861 {
862
863 #if VM_PHYSSEG_MAX == 1
864 return vm_physseg_find_contig(vm_physmem, vm_nphysseg, pframe, offp);
865 #elif (VM_PHYSSEG_STRAT == VM_PSTRAT_BSEARCH)
866 return vm_physseg_find_bsearch(vm_physmem, vm_nphysseg, pframe, offp);
867 #else
868 return vm_physseg_find_linear(vm_physmem, vm_nphysseg, pframe, offp);
869 #endif
870 }
871
872 #if VM_PHYSSEG_MAX == 1
873 static inline int
874 vm_physseg_find_contig(struct vm_physseg *segs, int nsegs, paddr_t pframe, int *offp)
875 {
876
877 /* 'contig' case */
878 if (pframe >= segs[0].start && pframe < segs[0].end) {
879 if (offp)
880 *offp = pframe - segs[0].start;
881 return(0);
882 }
883 return(-1);
884 }
885
886 #elif (VM_PHYSSEG_STRAT == VM_PSTRAT_BSEARCH)
887
888 static inline int
889 vm_physseg_find_bsearch(struct vm_physseg *segs, int nsegs, paddr_t pframe, int *offp)
890 {
891 /* binary search for it */
892 u_int start, len, try;
893
894 /*
895 * if try is too large (thus target is less than try) we reduce
896 * the length to trunc(len/2) [i.e. everything smaller than "try"]
897 *
898 * if the try is too small (thus target is greater than try) then
899 * we set the new start to be (try + 1). this means we need to
900 * reduce the length to (round(len/2) - 1).
901 *
902 * note "adjust" below which takes advantage of the fact that
903 * (round(len/2) - 1) == trunc((len - 1) / 2)
904 * for any value of len we may have
905 */
906
907 for (start = 0, len = nsegs ; len != 0 ; len = len / 2) {
908 try = start + (len / 2); /* try in the middle */
909
910 /* start past our try? */
911 if (pframe >= segs[try].start) {
912 /* was try correct? */
913 if (pframe < segs[try].end) {
914 if (offp)
915 *offp = pframe - segs[try].start;
916 return(try); /* got it */
917 }
918 start = try + 1; /* next time, start here */
919 len--; /* "adjust" */
920 } else {
921 /*
922 * pframe before try, just reduce length of
923 * region, done in "for" loop
924 */
925 }
926 }
927 return(-1);
928 }
929
930 #else
931
932 static inline int
933 vm_physseg_find_linear(struct vm_physseg *segs, int nsegs, paddr_t pframe, int *offp)
934 {
935 /* linear search for it */
936 int lcv;
937
938 for (lcv = 0; lcv < nsegs; lcv++) {
939 if (pframe >= segs[lcv].start &&
940 pframe < segs[lcv].end) {
941 if (offp)
942 *offp = pframe - segs[lcv].start;
943 return(lcv); /* got it */
944 }
945 }
946 return(-1);
947 }
948 #endif
949
950 /*
951 * PHYS_TO_VM_PAGE: find vm_page for a PA. used by MI code to get vm_pages
952 * back from an I/O mapping (ugh!). used in some MD code as well.
953 */
954 struct vm_page *
955 uvm_phys_to_vm_page(paddr_t pa)
956 {
957 paddr_t pf = atop(pa);
958 int off;
959 int psi;
960
961 psi = vm_physseg_find(pf, &off);
962 if (psi != -1)
963 return(&VM_PHYSMEM_PTR(psi)->pgs[off]);
964 return(NULL);
965 }
966
967 paddr_t
968 uvm_vm_page_to_phys(const struct vm_page *pg)
969 {
970
971 return pg->phys_addr;
972 }
973
974 /*
975 * uvm_page_recolor: Recolor the pages if the new bucket count is
976 * larger than the old one.
977 */
978
979 void
980 uvm_page_recolor(int newncolors)
981 {
982 struct pgflbucket *bucketarray, *cpuarray, *oldbucketarray;
983 struct pgfreelist gpgfl, pgfl;
984 struct vm_page *pg;
985 vsize_t bucketcount;
986 int lcv, color, i, ocolors;
987 struct uvm_cpu *ucpu;
988
989 if (newncolors <= uvmexp.ncolors)
990 return;
991
992 if (uvm.page_init_done == false) {
993 uvmexp.ncolors = newncolors;
994 return;
995 }
996
997 bucketcount = newncolors * VM_NFREELIST;
998 bucketarray = malloc(bucketcount * sizeof(struct pgflbucket) * 2,
999 M_VMPAGE, M_NOWAIT);
1000 cpuarray = bucketarray + bucketcount;
1001 if (bucketarray == NULL) {
1002 printf("WARNING: unable to allocate %ld page color buckets\n",
1003 (long) bucketcount);
1004 return;
1005 }
1006
1007 mutex_spin_enter(&uvm_fpageqlock);
1008
1009 /* Make sure we should still do this. */
1010 if (newncolors <= uvmexp.ncolors) {
1011 mutex_spin_exit(&uvm_fpageqlock);
1012 free(bucketarray, M_VMPAGE);
1013 return;
1014 }
1015
1016 oldbucketarray = uvm.page_free[0].pgfl_buckets;
1017 ocolors = uvmexp.ncolors;
1018
1019 uvmexp.ncolors = newncolors;
1020 uvmexp.colormask = uvmexp.ncolors - 1;
1021
1022 ucpu = curcpu()->ci_data.cpu_uvm;
1023 for (lcv = 0; lcv < VM_NFREELIST; lcv++) {
1024 gpgfl.pgfl_buckets = (bucketarray + (lcv * newncolors));
1025 pgfl.pgfl_buckets = (cpuarray + (lcv * uvmexp.ncolors));
1026 uvm_page_init_buckets(&gpgfl);
1027 uvm_page_init_buckets(&pgfl);
1028 for (color = 0; color < ocolors; color++) {
1029 for (i = 0; i < PGFL_NQUEUES; i++) {
1030 while ((pg = LIST_FIRST(&uvm.page_free[
1031 lcv].pgfl_buckets[color].pgfl_queues[i]))
1032 != NULL) {
1033 LIST_REMOVE(pg, pageq.list); /* global */
1034 LIST_REMOVE(pg, listq.list); /* cpu */
1035 LIST_INSERT_HEAD(&gpgfl.pgfl_buckets[
1036 VM_PGCOLOR_BUCKET(pg)].pgfl_queues[
1037 i], pg, pageq.list);
1038 LIST_INSERT_HEAD(&pgfl.pgfl_buckets[
1039 VM_PGCOLOR_BUCKET(pg)].pgfl_queues[
1040 i], pg, listq.list);
1041 }
1042 }
1043 }
1044 uvm.page_free[lcv].pgfl_buckets = gpgfl.pgfl_buckets;
1045 ucpu->page_free[lcv].pgfl_buckets = pgfl.pgfl_buckets;
1046 }
1047
1048 if (have_recolored_pages) {
1049 mutex_spin_exit(&uvm_fpageqlock);
1050 free(oldbucketarray, M_VMPAGE);
1051 return;
1052 }
1053
1054 have_recolored_pages = true;
1055 mutex_spin_exit(&uvm_fpageqlock);
1056 }
1057
1058 /*
1059 * uvm_cpu_attach: initialize per-CPU data structures.
1060 */
1061
1062 void
1063 uvm_cpu_attach(struct cpu_info *ci)
1064 {
1065 struct pgflbucket *bucketarray;
1066 struct pgfreelist pgfl;
1067 struct uvm_cpu *ucpu;
1068 vsize_t bucketcount;
1069 int lcv;
1070
1071 if (CPU_IS_PRIMARY(ci)) {
1072 /* Already done in uvm_page_init(). */
1073 return;
1074 }
1075
1076 /* Add more reserve pages for this CPU. */
1077 uvmexp.reserve_kernel += vm_page_reserve_kernel;
1078
1079 /* Configure this CPU's free lists. */
1080 bucketcount = uvmexp.ncolors * VM_NFREELIST;
1081 bucketarray = malloc(bucketcount * sizeof(struct pgflbucket),
1082 M_VMPAGE, M_WAITOK);
1083 ucpu = kmem_zalloc(sizeof(*ucpu), KM_SLEEP);
1084 uvm.cpus[cpu_index(ci)] = ucpu;
1085 ci->ci_data.cpu_uvm = ucpu;
1086 for (lcv = 0; lcv < VM_NFREELIST; lcv++) {
1087 pgfl.pgfl_buckets = (bucketarray + (lcv * uvmexp.ncolors));
1088 uvm_page_init_buckets(&pgfl);
1089 ucpu->page_free[lcv].pgfl_buckets = pgfl.pgfl_buckets;
1090 }
1091 }
1092
1093 /*
1094 * uvm_pagealloc_pgfl: helper routine for uvm_pagealloc_strat
1095 */
1096
1097 static struct vm_page *
1098 uvm_pagealloc_pgfl(struct uvm_cpu *ucpu, int flist, int try1, int try2,
1099 int *trycolorp)
1100 {
1101 struct pgflist *freeq;
1102 struct vm_page *pg;
1103 int color, trycolor = *trycolorp;
1104 struct pgfreelist *gpgfl, *pgfl;
1105
1106 KASSERT(mutex_owned(&uvm_fpageqlock));
1107
1108 color = trycolor;
1109 pgfl = &ucpu->page_free[flist];
1110 gpgfl = &uvm.page_free[flist];
1111 do {
1112 /* cpu, try1 */
1113 if ((pg = LIST_FIRST((freeq =
1114 &pgfl->pgfl_buckets[color].pgfl_queues[try1]))) != NULL) {
1115 VM_FREE_PAGE_TO_CPU(pg)->pages[try1]--;
1116 uvmexp.cpuhit++;
1117 goto gotit;
1118 }
1119 /* global, try1 */
1120 if ((pg = LIST_FIRST((freeq =
1121 &gpgfl->pgfl_buckets[color].pgfl_queues[try1]))) != NULL) {
1122 VM_FREE_PAGE_TO_CPU(pg)->pages[try1]--;
1123 uvmexp.cpumiss++;
1124 goto gotit;
1125 }
1126 /* cpu, try2 */
1127 if ((pg = LIST_FIRST((freeq =
1128 &pgfl->pgfl_buckets[color].pgfl_queues[try2]))) != NULL) {
1129 VM_FREE_PAGE_TO_CPU(pg)->pages[try2]--;
1130 uvmexp.cpuhit++;
1131 goto gotit;
1132 }
1133 /* global, try2 */
1134 if ((pg = LIST_FIRST((freeq =
1135 &gpgfl->pgfl_buckets[color].pgfl_queues[try2]))) != NULL) {
1136 VM_FREE_PAGE_TO_CPU(pg)->pages[try2]--;
1137 uvmexp.cpumiss++;
1138 goto gotit;
1139 }
1140 color = (color + 1) & uvmexp.colormask;
1141 } while (color != trycolor);
1142
1143 return (NULL);
1144
1145 gotit:
1146 LIST_REMOVE(pg, pageq.list); /* global list */
1147 LIST_REMOVE(pg, listq.list); /* per-cpu list */
1148 uvmexp.free--;
1149
1150 /* update zero'd page count */
1151 if (pg->flags & PG_ZERO)
1152 uvmexp.zeropages--;
1153
1154 if (color == trycolor)
1155 uvmexp.colorhit++;
1156 else {
1157 uvmexp.colormiss++;
1158 *trycolorp = color;
1159 }
1160
1161 return (pg);
1162 }
1163
1164 /*
1165 * uvm_pagealloc_strat: allocate vm_page from a particular free list.
1166 *
1167 * => return null if no pages free
1168 * => wake up pagedaemon if number of free pages drops below low water mark
1169 * => if obj != NULL, obj must be locked (to put in obj's tree)
1170 * => if anon != NULL, anon must be locked (to put in anon)
1171 * => only one of obj or anon can be non-null
1172 * => caller must activate/deactivate page if it is not wired.
1173 * => free_list is ignored if strat == UVM_PGA_STRAT_NORMAL.
1174 * => policy decision: it is more important to pull a page off of the
1175 * appropriate priority free list than it is to get a zero'd or
1176 * unknown contents page. This is because we live with the
1177 * consequences of a bad free list decision for the entire
1178 * lifetime of the page, e.g. if the page comes from memory that
1179 * is slower to access.
1180 */
1181
1182 struct vm_page *
1183 uvm_pagealloc_strat(struct uvm_object *obj, voff_t off, struct vm_anon *anon,
1184 int flags, int strat, int free_list)
1185 {
1186 int lcv, try1, try2, zeroit = 0, color;
1187 struct uvm_cpu *ucpu;
1188 struct vm_page *pg;
1189 lwp_t *l;
1190
1191 KASSERT(obj == NULL || anon == NULL);
1192 KASSERT(anon == NULL || (flags & UVM_FLAG_COLORMATCH) || off == 0);
1193 KASSERT(off == trunc_page(off));
1194 KASSERT(obj == NULL || mutex_owned(obj->vmobjlock));
1195 KASSERT(anon == NULL || mutex_owned(anon->an_lock));
1196
1197 mutex_spin_enter(&uvm_fpageqlock);
1198
1199 /*
1200 * This implements a global round-robin page coloring
1201 * algorithm.
1202 */
1203
1204 ucpu = curcpu()->ci_data.cpu_uvm;
1205 if (flags & UVM_FLAG_COLORMATCH) {
1206 color = atop(off) & uvmexp.colormask;
1207 } else {
1208 color = ucpu->page_free_nextcolor;
1209 }
1210
1211 /*
1212 * check to see if we need to generate some free pages waking
1213 * the pagedaemon.
1214 */
1215
1216 uvm_kick_pdaemon();
1217
1218 /*
1219 * fail if any of these conditions is true:
1220 * [1] there really are no free pages, or
1221 * [2] only kernel "reserved" pages remain and
1222 * reserved pages have not been requested.
1223 * [3] only pagedaemon "reserved" pages remain and
1224 * the requestor isn't the pagedaemon.
1225 * we make kernel reserve pages available if called by a
1226 * kernel thread or a realtime thread.
1227 */
1228 l = curlwp;
1229 if (__predict_true(l != NULL) && lwp_eprio(l) >= PRI_KTHREAD) {
1230 flags |= UVM_PGA_USERESERVE;
1231 }
1232 if ((uvmexp.free <= uvmexp.reserve_kernel &&
1233 (flags & UVM_PGA_USERESERVE) == 0) ||
1234 (uvmexp.free <= uvmexp.reserve_pagedaemon &&
1235 curlwp != uvm.pagedaemon_lwp))
1236 goto fail;
1237
1238 #if PGFL_NQUEUES != 2
1239 #error uvm_pagealloc_strat needs to be updated
1240 #endif
1241
1242 /*
1243 * If we want a zero'd page, try the ZEROS queue first, otherwise
1244 * we try the UNKNOWN queue first.
1245 */
1246 if (flags & UVM_PGA_ZERO) {
1247 try1 = PGFL_ZEROS;
1248 try2 = PGFL_UNKNOWN;
1249 } else {
1250 try1 = PGFL_UNKNOWN;
1251 try2 = PGFL_ZEROS;
1252 }
1253
1254 again:
1255 switch (strat) {
1256 case UVM_PGA_STRAT_NORMAL:
1257 /* Check freelists: descending priority (ascending id) order */
1258 for (lcv = 0; lcv < VM_NFREELIST; lcv++) {
1259 pg = uvm_pagealloc_pgfl(ucpu, lcv,
1260 try1, try2, &color);
1261 if (pg != NULL)
1262 goto gotit;
1263 }
1264
1265 /* No pages free! */
1266 goto fail;
1267
1268 case UVM_PGA_STRAT_ONLY:
1269 case UVM_PGA_STRAT_FALLBACK:
1270 /* Attempt to allocate from the specified free list. */
1271 KASSERT(free_list >= 0 && free_list < VM_NFREELIST);
1272 pg = uvm_pagealloc_pgfl(ucpu, free_list,
1273 try1, try2, &color);
1274 if (pg != NULL)
1275 goto gotit;
1276
1277 /* Fall back, if possible. */
1278 if (strat == UVM_PGA_STRAT_FALLBACK) {
1279 strat = UVM_PGA_STRAT_NORMAL;
1280 goto again;
1281 }
1282
1283 /* No pages free! */
1284 goto fail;
1285
1286 default:
1287 panic("uvm_pagealloc_strat: bad strat %d", strat);
1288 /* NOTREACHED */
1289 }
1290
1291 gotit:
1292 /*
1293 * We now know which color we actually allocated from; set
1294 * the next color accordingly.
1295 */
1296
1297 ucpu->page_free_nextcolor = (color + 1) & uvmexp.colormask;
1298
1299 /*
1300 * update allocation statistics and remember if we have to
1301 * zero the page
1302 */
1303
1304 if (flags & UVM_PGA_ZERO) {
1305 if (pg->flags & PG_ZERO) {
1306 uvmexp.pga_zerohit++;
1307 zeroit = 0;
1308 } else {
1309 uvmexp.pga_zeromiss++;
1310 zeroit = 1;
1311 }
1312 if (ucpu->pages[PGFL_ZEROS] < ucpu->pages[PGFL_UNKNOWN]) {
1313 ucpu->page_idle_zero = vm_page_zero_enable;
1314 }
1315 }
1316 KASSERT(pg->pqflags == PQ_FREE);
1317
1318 pg->offset = off;
1319 pg->uobject = obj;
1320 pg->uanon = anon;
1321 pg->flags = PG_BUSY|PG_CLEAN|PG_FAKE;
1322 if (anon) {
1323 anon->an_page = pg;
1324 pg->pqflags = PQ_ANON;
1325 atomic_inc_uint(&uvmexp.anonpages);
1326 } else {
1327 if (obj) {
1328 uvm_pageinsert(obj, pg);
1329 }
1330 pg->pqflags = 0;
1331 }
1332 mutex_spin_exit(&uvm_fpageqlock);
1333
1334 #if defined(UVM_PAGE_TRKOWN)
1335 pg->owner_tag = NULL;
1336 #endif
1337 UVM_PAGE_OWN(pg, "new alloc");
1338
1339 if (flags & UVM_PGA_ZERO) {
1340 /*
1341 * A zero'd page is not clean. If we got a page not already
1342 * zero'd, then we have to zero it ourselves.
1343 */
1344 pg->flags &= ~PG_CLEAN;
1345 if (zeroit)
1346 pmap_zero_page(VM_PAGE_TO_PHYS(pg));
1347 }
1348
1349 return(pg);
1350
1351 fail:
1352 mutex_spin_exit(&uvm_fpageqlock);
1353 return (NULL);
1354 }
1355
1356 /*
1357 * uvm_pagereplace: replace a page with another
1358 *
1359 * => object must be locked
1360 */
1361
1362 void
1363 uvm_pagereplace(struct vm_page *oldpg, struct vm_page *newpg)
1364 {
1365 struct uvm_object *uobj = oldpg->uobject;
1366
1367 KASSERT((oldpg->flags & PG_TABLED) != 0);
1368 KASSERT(uobj != NULL);
1369 KASSERT((newpg->flags & PG_TABLED) == 0);
1370 KASSERT(newpg->uobject == NULL);
1371 KASSERT(mutex_owned(uobj->vmobjlock));
1372
1373 newpg->uobject = uobj;
1374 newpg->offset = oldpg->offset;
1375
1376 uvm_pageremove_tree(uobj, oldpg);
1377 uvm_pageinsert_tree(uobj, newpg);
1378 uvm_pageinsert_list(uobj, newpg, oldpg);
1379 uvm_pageremove_list(uobj, oldpg);
1380 }
1381
1382 /*
1383 * uvm_pagerealloc: reallocate a page from one object to another
1384 *
1385 * => both objects must be locked
1386 */
1387
1388 void
1389 uvm_pagerealloc(struct vm_page *pg, struct uvm_object *newobj, voff_t newoff)
1390 {
1391 /*
1392 * remove it from the old object
1393 */
1394
1395 if (pg->uobject) {
1396 uvm_pageremove(pg->uobject, pg);
1397 }
1398
1399 /*
1400 * put it in the new object
1401 */
1402
1403 if (newobj) {
1404 pg->uobject = newobj;
1405 pg->offset = newoff;
1406 uvm_pageinsert(newobj, pg);
1407 }
1408 }
1409
1410 #ifdef DEBUG
1411 /*
1412 * check if page is zero-filled
1413 *
1414 * - called with free page queue lock held.
1415 */
1416 void
1417 uvm_pagezerocheck(struct vm_page *pg)
1418 {
1419 int *p, *ep;
1420
1421 KASSERT(uvm_zerocheckkva != 0);
1422 KASSERT(mutex_owned(&uvm_fpageqlock));
1423
1424 /*
1425 * XXX assuming pmap_kenter_pa and pmap_kremove never call
1426 * uvm page allocator.
1427 *
1428 * it might be better to have "CPU-local temporary map" pmap interface.
1429 */
1430 pmap_kenter_pa(uvm_zerocheckkva, VM_PAGE_TO_PHYS(pg), VM_PROT_READ, 0);
1431 p = (int *)uvm_zerocheckkva;
1432 ep = (int *)((char *)p + PAGE_SIZE);
1433 pmap_update(pmap_kernel());
1434 while (p < ep) {
1435 if (*p != 0)
1436 panic("PG_ZERO page isn't zero-filled");
1437 p++;
1438 }
1439 pmap_kremove(uvm_zerocheckkva, PAGE_SIZE);
1440 /*
1441 * pmap_update() is not necessary here because no one except us
1442 * uses this VA.
1443 */
1444 }
1445 #endif /* DEBUG */
1446
1447 /*
1448 * uvm_pagefree: free page
1449 *
1450 * => erase page's identity (i.e. remove from object)
1451 * => put page on free list
1452 * => caller must lock owning object (either anon or uvm_object)
1453 * => caller must lock page queues
1454 * => assumes all valid mappings of pg are gone
1455 */
1456
1457 void
1458 uvm_pagefree(struct vm_page *pg)
1459 {
1460 struct pgflist *pgfl;
1461 struct uvm_cpu *ucpu;
1462 int index, color, queue;
1463 bool iszero;
1464
1465 #ifdef DEBUG
1466 if (pg->uobject == (void *)0xdeadbeef &&
1467 pg->uanon == (void *)0xdeadbeef) {
1468 panic("uvm_pagefree: freeing free page %p", pg);
1469 }
1470 #endif /* DEBUG */
1471
1472 KASSERT((pg->flags & PG_PAGEOUT) == 0);
1473 KASSERT(!(pg->pqflags & PQ_FREE));
1474 KASSERT(mutex_owned(&uvm_pageqlock) || !uvmpdpol_pageisqueued_p(pg));
1475 KASSERT(pg->uobject == NULL || mutex_owned(pg->uobject->vmobjlock));
1476 KASSERT(pg->uobject != NULL || pg->uanon == NULL ||
1477 mutex_owned(pg->uanon->an_lock));
1478
1479 /*
1480 * if the page is loaned, resolve the loan instead of freeing.
1481 */
1482
1483 if (pg->loan_count) {
1484 KASSERT(pg->wire_count == 0);
1485
1486 /*
1487 * if the page is owned by an anon then we just want to
1488 * drop anon ownership. the kernel will free the page when
1489 * it is done with it. if the page is owned by an object,
1490 * remove it from the object and mark it dirty for the benefit
1491 * of possible anon owners.
1492 *
1493 * regardless of previous ownership, wakeup any waiters,
1494 * unbusy the page, and we're done.
1495 */
1496
1497 if (pg->uobject != NULL) {
1498 uvm_pageremove(pg->uobject, pg);
1499 pg->flags &= ~PG_CLEAN;
1500 } else if (pg->uanon != NULL) {
1501 if ((pg->pqflags & PQ_ANON) == 0) {
1502 pg->loan_count--;
1503 } else {
1504 pg->pqflags &= ~PQ_ANON;
1505 atomic_dec_uint(&uvmexp.anonpages);
1506 }
1507 pg->uanon->an_page = NULL;
1508 pg->uanon = NULL;
1509 }
1510 if (pg->flags & PG_WANTED) {
1511 wakeup(pg);
1512 }
1513 pg->flags &= ~(PG_WANTED|PG_BUSY|PG_RELEASED|PG_PAGER1);
1514 #ifdef UVM_PAGE_TRKOWN
1515 pg->owner_tag = NULL;
1516 #endif
1517 if (pg->loan_count) {
1518 KASSERT(pg->uobject == NULL);
1519 if (pg->uanon == NULL) {
1520 uvm_pagedequeue(pg);
1521 }
1522 return;
1523 }
1524 }
1525
1526 /*
1527 * remove page from its object or anon.
1528 */
1529
1530 if (pg->uobject != NULL) {
1531 uvm_pageremove(pg->uobject, pg);
1532 } else if (pg->uanon != NULL) {
1533 pg->uanon->an_page = NULL;
1534 atomic_dec_uint(&uvmexp.anonpages);
1535 }
1536
1537 /*
1538 * now remove the page from the queues.
1539 */
1540
1541 uvm_pagedequeue(pg);
1542
1543 /*
1544 * if the page was wired, unwire it now.
1545 */
1546
1547 if (pg->wire_count) {
1548 pg->wire_count = 0;
1549 uvmexp.wired--;
1550 }
1551
1552 /*
1553 * and put on free queue
1554 */
1555
1556 iszero = (pg->flags & PG_ZERO);
1557 index = uvm_page_lookup_freelist(pg);
1558 color = VM_PGCOLOR_BUCKET(pg);
1559 queue = (iszero ? PGFL_ZEROS : PGFL_UNKNOWN);
1560
1561 #ifdef DEBUG
1562 pg->uobject = (void *)0xdeadbeef;
1563 pg->uanon = (void *)0xdeadbeef;
1564 #endif
1565
1566 mutex_spin_enter(&uvm_fpageqlock);
1567 pg->pqflags = PQ_FREE;
1568
1569 #ifdef DEBUG
1570 if (iszero)
1571 uvm_pagezerocheck(pg);
1572 #endif /* DEBUG */
1573
1574
1575 /* global list */
1576 pgfl = &uvm.page_free[index].pgfl_buckets[color].pgfl_queues[queue];
1577 LIST_INSERT_HEAD(pgfl, pg, pageq.list);
1578 uvmexp.free++;
1579 if (iszero) {
1580 uvmexp.zeropages++;
1581 }
1582
1583 /* per-cpu list */
1584 ucpu = curcpu()->ci_data.cpu_uvm;
1585 pg->offset = (uintptr_t)ucpu;
1586 pgfl = &ucpu->page_free[index].pgfl_buckets[color].pgfl_queues[queue];
1587 LIST_INSERT_HEAD(pgfl, pg, listq.list);
1588 ucpu->pages[queue]++;
1589 if (ucpu->pages[PGFL_ZEROS] < ucpu->pages[PGFL_UNKNOWN]) {
1590 ucpu->page_idle_zero = vm_page_zero_enable;
1591 }
1592
1593 mutex_spin_exit(&uvm_fpageqlock);
1594 }
1595
1596 /*
1597 * uvm_page_unbusy: unbusy an array of pages.
1598 *
1599 * => pages must either all belong to the same object, or all belong to anons.
1600 * => if pages are object-owned, object must be locked.
1601 * => if pages are anon-owned, anons must be locked.
1602 * => caller must lock page queues if pages may be released.
1603 * => caller must make sure that anon-owned pages are not PG_RELEASED.
1604 */
1605
1606 void
1607 uvm_page_unbusy(struct vm_page **pgs, int npgs)
1608 {
1609 struct vm_page *pg;
1610 int i;
1611 UVMHIST_FUNC("uvm_page_unbusy"); UVMHIST_CALLED(ubchist);
1612
1613 for (i = 0; i < npgs; i++) {
1614 pg = pgs[i];
1615 if (pg == NULL || pg == PGO_DONTCARE) {
1616 continue;
1617 }
1618
1619 KASSERT(pg->uobject == NULL ||
1620 mutex_owned(pg->uobject->vmobjlock));
1621 KASSERT(pg->uobject != NULL ||
1622 (pg->uanon != NULL && mutex_owned(pg->uanon->an_lock)));
1623
1624 KASSERT(pg->flags & PG_BUSY);
1625 KASSERT((pg->flags & PG_PAGEOUT) == 0);
1626 if (pg->flags & PG_WANTED) {
1627 wakeup(pg);
1628 }
1629 if (pg->flags & PG_RELEASED) {
1630 UVMHIST_LOG(ubchist, "releasing pg %p", pg,0,0,0);
1631 KASSERT(pg->uobject != NULL ||
1632 (pg->uanon != NULL && pg->uanon->an_ref > 0));
1633 pg->flags &= ~PG_RELEASED;
1634 uvm_pagefree(pg);
1635 } else {
1636 UVMHIST_LOG(ubchist, "unbusying pg %p", pg,0,0,0);
1637 KASSERT((pg->flags & PG_FAKE) == 0);
1638 pg->flags &= ~(PG_WANTED|PG_BUSY);
1639 UVM_PAGE_OWN(pg, NULL);
1640 }
1641 }
1642 }
1643
1644 #if defined(UVM_PAGE_TRKOWN)
1645 /*
1646 * uvm_page_own: set or release page ownership
1647 *
1648 * => this is a debugging function that keeps track of who sets PG_BUSY
1649 * and where they do it. it can be used to track down problems
1650 * such a process setting "PG_BUSY" and never releasing it.
1651 * => page's object [if any] must be locked
1652 * => if "tag" is NULL then we are releasing page ownership
1653 */
1654 void
1655 uvm_page_own(struct vm_page *pg, const char *tag)
1656 {
1657 struct uvm_object *uobj;
1658 struct vm_anon *anon;
1659
1660 KASSERT((pg->flags & (PG_PAGEOUT|PG_RELEASED)) == 0);
1661
1662 uobj = pg->uobject;
1663 anon = pg->uanon;
1664 if (uobj != NULL) {
1665 KASSERT(mutex_owned(uobj->vmobjlock));
1666 } else if (anon != NULL) {
1667 KASSERT(mutex_owned(anon->an_lock));
1668 }
1669
1670 KASSERT((pg->flags & PG_WANTED) == 0);
1671
1672 /* gain ownership? */
1673 if (tag) {
1674 KASSERT((pg->flags & PG_BUSY) != 0);
1675 if (pg->owner_tag) {
1676 printf("uvm_page_own: page %p already owned "
1677 "by proc %d [%s]\n", pg,
1678 pg->owner, pg->owner_tag);
1679 panic("uvm_page_own");
1680 }
1681 pg->owner = (curproc) ? curproc->p_pid : (pid_t) -1;
1682 pg->lowner = (curlwp) ? curlwp->l_lid : (lwpid_t) -1;
1683 pg->owner_tag = tag;
1684 return;
1685 }
1686
1687 /* drop ownership */
1688 KASSERT((pg->flags & PG_BUSY) == 0);
1689 if (pg->owner_tag == NULL) {
1690 printf("uvm_page_own: dropping ownership of an non-owned "
1691 "page (%p)\n", pg);
1692 panic("uvm_page_own");
1693 }
1694 if (!uvmpdpol_pageisqueued_p(pg)) {
1695 KASSERT((pg->uanon == NULL && pg->uobject == NULL) ||
1696 pg->wire_count > 0);
1697 } else {
1698 KASSERT(pg->wire_count == 0);
1699 }
1700 pg->owner_tag = NULL;
1701 }
1702 #endif
1703
1704 /*
1705 * uvm_pageidlezero: zero free pages while the system is idle.
1706 *
1707 * => try to complete one color bucket at a time, to reduce our impact
1708 * on the CPU cache.
1709 * => we loop until we either reach the target or there is a lwp ready
1710 * to run, or MD code detects a reason to break early.
1711 */
1712 void
1713 uvm_pageidlezero(void)
1714 {
1715 struct vm_page *pg;
1716 struct pgfreelist *pgfl, *gpgfl;
1717 struct uvm_cpu *ucpu;
1718 int free_list, firstbucket, nextbucket;
1719 bool lcont = false;
1720
1721 ucpu = curcpu()->ci_data.cpu_uvm;
1722 if (!ucpu->page_idle_zero ||
1723 ucpu->pages[PGFL_UNKNOWN] < uvmexp.ncolors) {
1724 ucpu->page_idle_zero = false;
1725 return;
1726 }
1727 if (!mutex_tryenter(&uvm_fpageqlock)) {
1728 /* Contention: let other CPUs to use the lock. */
1729 return;
1730 }
1731 firstbucket = ucpu->page_free_nextcolor;
1732 nextbucket = firstbucket;
1733 do {
1734 for (free_list = 0; free_list < VM_NFREELIST; free_list++) {
1735 if (sched_curcpu_runnable_p()) {
1736 goto quit;
1737 }
1738 pgfl = &ucpu->page_free[free_list];
1739 gpgfl = &uvm.page_free[free_list];
1740 while ((pg = LIST_FIRST(&pgfl->pgfl_buckets[
1741 nextbucket].pgfl_queues[PGFL_UNKNOWN])) != NULL) {
1742 if (lcont || sched_curcpu_runnable_p()) {
1743 goto quit;
1744 }
1745 LIST_REMOVE(pg, pageq.list); /* global list */
1746 LIST_REMOVE(pg, listq.list); /* per-cpu list */
1747 ucpu->pages[PGFL_UNKNOWN]--;
1748 uvmexp.free--;
1749 KASSERT(pg->pqflags == PQ_FREE);
1750 pg->pqflags = 0;
1751 mutex_spin_exit(&uvm_fpageqlock);
1752 #ifdef PMAP_PAGEIDLEZERO
1753 if (!PMAP_PAGEIDLEZERO(VM_PAGE_TO_PHYS(pg))) {
1754
1755 /*
1756 * The machine-dependent code detected
1757 * some reason for us to abort zeroing
1758 * pages, probably because there is a
1759 * process now ready to run.
1760 */
1761
1762 mutex_spin_enter(&uvm_fpageqlock);
1763 pg->pqflags = PQ_FREE;
1764 LIST_INSERT_HEAD(&gpgfl->pgfl_buckets[
1765 nextbucket].pgfl_queues[
1766 PGFL_UNKNOWN], pg, pageq.list);
1767 LIST_INSERT_HEAD(&pgfl->pgfl_buckets[
1768 nextbucket].pgfl_queues[
1769 PGFL_UNKNOWN], pg, listq.list);
1770 ucpu->pages[PGFL_UNKNOWN]++;
1771 uvmexp.free++;
1772 uvmexp.zeroaborts++;
1773 goto quit;
1774 }
1775 #else
1776 pmap_zero_page(VM_PAGE_TO_PHYS(pg));
1777 #endif /* PMAP_PAGEIDLEZERO */
1778 pg->flags |= PG_ZERO;
1779
1780 if (!mutex_tryenter(&uvm_fpageqlock)) {
1781 lcont = true;
1782 mutex_spin_enter(&uvm_fpageqlock);
1783 } else {
1784 lcont = false;
1785 }
1786 pg->pqflags = PQ_FREE;
1787 LIST_INSERT_HEAD(&gpgfl->pgfl_buckets[
1788 nextbucket].pgfl_queues[PGFL_ZEROS],
1789 pg, pageq.list);
1790 LIST_INSERT_HEAD(&pgfl->pgfl_buckets[
1791 nextbucket].pgfl_queues[PGFL_ZEROS],
1792 pg, listq.list);
1793 ucpu->pages[PGFL_ZEROS]++;
1794 uvmexp.free++;
1795 uvmexp.zeropages++;
1796 }
1797 }
1798 if (ucpu->pages[PGFL_UNKNOWN] < uvmexp.ncolors) {
1799 break;
1800 }
1801 nextbucket = (nextbucket + 1) & uvmexp.colormask;
1802 } while (nextbucket != firstbucket);
1803 ucpu->page_idle_zero = false;
1804 quit:
1805 mutex_spin_exit(&uvm_fpageqlock);
1806 }
1807
1808 /*
1809 * uvm_pagelookup: look up a page
1810 *
1811 * => caller should lock object to keep someone from pulling the page
1812 * out from under it
1813 */
1814
1815 struct vm_page *
1816 uvm_pagelookup(struct uvm_object *obj, voff_t off)
1817 {
1818 struct vm_page *pg;
1819
1820 KASSERT(mutex_owned(obj->vmobjlock));
1821
1822 pg = rb_tree_find_node(&obj->rb_tree, &off);
1823
1824 KASSERT(pg == NULL || obj->uo_npages != 0);
1825 KASSERT(pg == NULL || (pg->flags & (PG_RELEASED|PG_PAGEOUT)) == 0 ||
1826 (pg->flags & PG_BUSY) != 0);
1827 return pg;
1828 }
1829
1830 /*
1831 * uvm_pagewire: wire the page, thus removing it from the daemon's grasp
1832 *
1833 * => caller must lock page queues
1834 */
1835
1836 void
1837 uvm_pagewire(struct vm_page *pg)
1838 {
1839 KASSERT(mutex_owned(&uvm_pageqlock));
1840 #if defined(READAHEAD_STATS)
1841 if ((pg->pqflags & PQ_READAHEAD) != 0) {
1842 uvm_ra_hit.ev_count++;
1843 pg->pqflags &= ~PQ_READAHEAD;
1844 }
1845 #endif /* defined(READAHEAD_STATS) */
1846 if (pg->wire_count == 0) {
1847 uvm_pagedequeue(pg);
1848 uvmexp.wired++;
1849 }
1850 pg->wire_count++;
1851 }
1852
1853 /*
1854 * uvm_pageunwire: unwire the page.
1855 *
1856 * => activate if wire count goes to zero.
1857 * => caller must lock page queues
1858 */
1859
1860 void
1861 uvm_pageunwire(struct vm_page *pg)
1862 {
1863 KASSERT(mutex_owned(&uvm_pageqlock));
1864 pg->wire_count--;
1865 if (pg->wire_count == 0) {
1866 uvm_pageactivate(pg);
1867 uvmexp.wired--;
1868 }
1869 }
1870
1871 /*
1872 * uvm_pagedeactivate: deactivate page
1873 *
1874 * => caller must lock page queues
1875 * => caller must check to make sure page is not wired
1876 * => object that page belongs to must be locked (so we can adjust pg->flags)
1877 * => caller must clear the reference on the page before calling
1878 */
1879
1880 void
1881 uvm_pagedeactivate(struct vm_page *pg)
1882 {
1883
1884 KASSERT(mutex_owned(&uvm_pageqlock));
1885 KASSERT(uvm_page_locked_p(pg));
1886 KASSERT(pg->wire_count != 0 || uvmpdpol_pageisqueued_p(pg));
1887 uvmpdpol_pagedeactivate(pg);
1888 }
1889
1890 /*
1891 * uvm_pageactivate: activate page
1892 *
1893 * => caller must lock page queues
1894 */
1895
1896 void
1897 uvm_pageactivate(struct vm_page *pg)
1898 {
1899
1900 KASSERT(mutex_owned(&uvm_pageqlock));
1901 KASSERT(uvm_page_locked_p(pg));
1902 #if defined(READAHEAD_STATS)
1903 if ((pg->pqflags & PQ_READAHEAD) != 0) {
1904 uvm_ra_hit.ev_count++;
1905 pg->pqflags &= ~PQ_READAHEAD;
1906 }
1907 #endif /* defined(READAHEAD_STATS) */
1908 if (pg->wire_count != 0) {
1909 return;
1910 }
1911 uvmpdpol_pageactivate(pg);
1912 }
1913
1914 /*
1915 * uvm_pagedequeue: remove a page from any paging queue
1916 */
1917
1918 void
1919 uvm_pagedequeue(struct vm_page *pg)
1920 {
1921
1922 if (uvmpdpol_pageisqueued_p(pg)) {
1923 KASSERT(mutex_owned(&uvm_pageqlock));
1924 }
1925
1926 uvmpdpol_pagedequeue(pg);
1927 }
1928
1929 /*
1930 * uvm_pageenqueue: add a page to a paging queue without activating.
1931 * used where a page is not really demanded (yet). eg. read-ahead
1932 */
1933
1934 void
1935 uvm_pageenqueue(struct vm_page *pg)
1936 {
1937
1938 KASSERT(mutex_owned(&uvm_pageqlock));
1939 if (pg->wire_count != 0) {
1940 return;
1941 }
1942 uvmpdpol_pageenqueue(pg);
1943 }
1944
1945 /*
1946 * uvm_pagezero: zero fill a page
1947 *
1948 * => if page is part of an object then the object should be locked
1949 * to protect pg->flags.
1950 */
1951
1952 void
1953 uvm_pagezero(struct vm_page *pg)
1954 {
1955 pg->flags &= ~PG_CLEAN;
1956 pmap_zero_page(VM_PAGE_TO_PHYS(pg));
1957 }
1958
1959 /*
1960 * uvm_pagecopy: copy a page
1961 *
1962 * => if page is part of an object then the object should be locked
1963 * to protect pg->flags.
1964 */
1965
1966 void
1967 uvm_pagecopy(struct vm_page *src, struct vm_page *dst)
1968 {
1969
1970 dst->flags &= ~PG_CLEAN;
1971 pmap_copy_page(VM_PAGE_TO_PHYS(src), VM_PAGE_TO_PHYS(dst));
1972 }
1973
1974 /*
1975 * uvm_pageismanaged: test it see that a page (specified by PA) is managed.
1976 */
1977
1978 bool
1979 uvm_pageismanaged(paddr_t pa)
1980 {
1981
1982 return (vm_physseg_find(atop(pa), NULL) != -1);
1983 }
1984
1985 /*
1986 * uvm_page_lookup_freelist: look up the free list for the specified page
1987 */
1988
1989 int
1990 uvm_page_lookup_freelist(struct vm_page *pg)
1991 {
1992 int lcv;
1993
1994 lcv = vm_physseg_find(atop(VM_PAGE_TO_PHYS(pg)), NULL);
1995 KASSERT(lcv != -1);
1996 return (VM_PHYSMEM_PTR(lcv)->free_list);
1997 }
1998
1999 /*
2000 * uvm_page_locked_p: return true if object associated with page is
2001 * locked. this is a weak check for runtime assertions only.
2002 */
2003
2004 bool
2005 uvm_page_locked_p(struct vm_page *pg)
2006 {
2007
2008 if (pg->uobject != NULL) {
2009 return mutex_owned(pg->uobject->vmobjlock);
2010 }
2011 if (pg->uanon != NULL) {
2012 return mutex_owned(pg->uanon->an_lock);
2013 }
2014 return true;
2015 }
2016
2017 #if defined(DDB) || defined(DEBUGPRINT)
2018
2019 /*
2020 * uvm_page_printit: actually print the page
2021 */
2022
2023 static const char page_flagbits[] = UVM_PGFLAGBITS;
2024 static const char page_pqflagbits[] = UVM_PQFLAGBITS;
2025
2026 void
2027 uvm_page_printit(struct vm_page *pg, bool full,
2028 void (*pr)(const char *, ...))
2029 {
2030 struct vm_page *tpg;
2031 struct uvm_object *uobj;
2032 struct pgflist *pgl;
2033 char pgbuf[128];
2034 char pqbuf[128];
2035
2036 (*pr)("PAGE %p:\n", pg);
2037 snprintb(pgbuf, sizeof(pgbuf), page_flagbits, pg->flags);
2038 snprintb(pqbuf, sizeof(pqbuf), page_pqflagbits, pg->pqflags);
2039 (*pr)(" flags=%s, pqflags=%s, wire_count=%d, pa=0x%lx\n",
2040 pgbuf, pqbuf, pg->wire_count, (long)VM_PAGE_TO_PHYS(pg));
2041 (*pr)(" uobject=%p, uanon=%p, offset=0x%llx loan_count=%d\n",
2042 pg->uobject, pg->uanon, (long long)pg->offset, pg->loan_count);
2043 #if defined(UVM_PAGE_TRKOWN)
2044 if (pg->flags & PG_BUSY)
2045 (*pr)(" owning process = %d, tag=%s\n",
2046 pg->owner, pg->owner_tag);
2047 else
2048 (*pr)(" page not busy, no owner\n");
2049 #else
2050 (*pr)(" [page ownership tracking disabled]\n");
2051 #endif
2052
2053 if (!full)
2054 return;
2055
2056 /* cross-verify object/anon */
2057 if ((pg->pqflags & PQ_FREE) == 0) {
2058 if (pg->pqflags & PQ_ANON) {
2059 if (pg->uanon == NULL || pg->uanon->an_page != pg)
2060 (*pr)(" >>> ANON DOES NOT POINT HERE <<< (%p)\n",
2061 (pg->uanon) ? pg->uanon->an_page : NULL);
2062 else
2063 (*pr)(" anon backpointer is OK\n");
2064 } else {
2065 uobj = pg->uobject;
2066 if (uobj) {
2067 (*pr)(" checking object list\n");
2068 TAILQ_FOREACH(tpg, &uobj->memq, listq.queue) {
2069 if (tpg == pg) {
2070 break;
2071 }
2072 }
2073 if (tpg)
2074 (*pr)(" page found on object list\n");
2075 else
2076 (*pr)(" >>> PAGE NOT FOUND ON OBJECT LIST! <<<\n");
2077 }
2078 }
2079 }
2080
2081 /* cross-verify page queue */
2082 if (pg->pqflags & PQ_FREE) {
2083 int fl = uvm_page_lookup_freelist(pg);
2084 int color = VM_PGCOLOR_BUCKET(pg);
2085 pgl = &uvm.page_free[fl].pgfl_buckets[color].pgfl_queues[
2086 ((pg)->flags & PG_ZERO) ? PGFL_ZEROS : PGFL_UNKNOWN];
2087 } else {
2088 pgl = NULL;
2089 }
2090
2091 if (pgl) {
2092 (*pr)(" checking pageq list\n");
2093 LIST_FOREACH(tpg, pgl, pageq.list) {
2094 if (tpg == pg) {
2095 break;
2096 }
2097 }
2098 if (tpg)
2099 (*pr)(" page found on pageq list\n");
2100 else
2101 (*pr)(" >>> PAGE NOT FOUND ON PAGEQ LIST! <<<\n");
2102 }
2103 }
2104
2105 /*
2106 * uvm_pages_printthem - print a summary of all managed pages
2107 */
2108
2109 void
2110 uvm_page_printall(void (*pr)(const char *, ...))
2111 {
2112 unsigned i;
2113 struct vm_page *pg;
2114
2115 (*pr)("%18s %4s %4s %18s %18s"
2116 #ifdef UVM_PAGE_TRKOWN
2117 " OWNER"
2118 #endif
2119 "\n", "PAGE", "FLAG", "PQ", "UOBJECT", "UANON");
2120 for (i = 0; i < vm_nphysmem; i++) {
2121 for (pg = VM_PHYSMEM_PTR(i)->pgs; pg < VM_PHYSMEM_PTR(i)->lastpg; pg++) {
2122 (*pr)("%18p %04x %04x %18p %18p",
2123 pg, pg->flags, pg->pqflags, pg->uobject,
2124 pg->uanon);
2125 #ifdef UVM_PAGE_TRKOWN
2126 if (pg->flags & PG_BUSY)
2127 (*pr)(" %d [%s]", pg->owner, pg->owner_tag);
2128 #endif
2129 (*pr)("\n");
2130 }
2131 }
2132 }
2133
2134 #endif /* DDB || DEBUGPRINT */
2135