uvm_page.c revision 1.178 1 /* $NetBSD: uvm_page.c,v 1.178 2011/10/06 12:26:03 uebayasi Exp $ */
2
3 /*
4 * Copyright (c) 1997 Charles D. Cranor and Washington University.
5 * Copyright (c) 1991, 1993, The Regents of the University of California.
6 *
7 * All rights reserved.
8 *
9 * This code is derived from software contributed to Berkeley by
10 * The Mach Operating System project at Carnegie-Mellon University.
11 *
12 * Redistribution and use in source and binary forms, with or without
13 * modification, are permitted provided that the following conditions
14 * are met:
15 * 1. Redistributions of source code must retain the above copyright
16 * notice, this list of conditions and the following disclaimer.
17 * 2. Redistributions in binary form must reproduce the above copyright
18 * notice, this list of conditions and the following disclaimer in the
19 * documentation and/or other materials provided with the distribution.
20 * 3. Neither the name of the University nor the names of its contributors
21 * may be used to endorse or promote products derived from this software
22 * without specific prior written permission.
23 *
24 * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND
25 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
26 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
27 * ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE
28 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
29 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
30 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
31 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
32 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
33 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
34 * SUCH DAMAGE.
35 *
36 * @(#)vm_page.c 8.3 (Berkeley) 3/21/94
37 * from: Id: uvm_page.c,v 1.1.2.18 1998/02/06 05:24:42 chs Exp
38 *
39 *
40 * Copyright (c) 1987, 1990 Carnegie-Mellon University.
41 * All rights reserved.
42 *
43 * Permission to use, copy, modify and distribute this software and
44 * its documentation is hereby granted, provided that both the copyright
45 * notice and this permission notice appear in all copies of the
46 * software, derivative works or modified versions, and any portions
47 * thereof, and that both notices appear in supporting documentation.
48 *
49 * CARNEGIE MELLON ALLOWS FREE USE OF THIS SOFTWARE IN ITS "AS IS"
50 * CONDITION. CARNEGIE MELLON DISCLAIMS ANY LIABILITY OF ANY KIND
51 * FOR ANY DAMAGES WHATSOEVER RESULTING FROM THE USE OF THIS SOFTWARE.
52 *
53 * Carnegie Mellon requests users of this software to return to
54 *
55 * Software Distribution Coordinator or Software.Distribution (at) CS.CMU.EDU
56 * School of Computer Science
57 * Carnegie Mellon University
58 * Pittsburgh PA 15213-3890
59 *
60 * any improvements or extensions that they make and grant Carnegie the
61 * rights to redistribute these changes.
62 */
63
64 /*
65 * uvm_page.c: page ops.
66 */
67
68 #include <sys/cdefs.h>
69 __KERNEL_RCSID(0, "$NetBSD: uvm_page.c,v 1.178 2011/10/06 12:26:03 uebayasi Exp $");
70
71 #include "opt_ddb.h"
72 #include "opt_uvmhist.h"
73 #include "opt_readahead.h"
74
75 #include <sys/param.h>
76 #include <sys/systm.h>
77 #include <sys/malloc.h>
78 #include <sys/sched.h>
79 #include <sys/kernel.h>
80 #include <sys/vnode.h>
81 #include <sys/proc.h>
82 #include <sys/atomic.h>
83 #include <sys/cpu.h>
84
85 #include <uvm/uvm.h>
86 #include <uvm/uvm_ddb.h>
87 #include <uvm/uvm_pdpolicy.h>
88
89 /*
90 * global vars... XXXCDC: move to uvm. structure.
91 */
92
93 /*
94 * physical memory config is stored in vm_physmem.
95 */
96
97 struct vm_physseg vm_physmem[VM_PHYSSEG_MAX]; /* XXXCDC: uvm.physmem */
98 int vm_nphysseg = 0; /* XXXCDC: uvm.nphysseg */
99 #define vm_nphysmem vm_nphysseg
100
101 /*
102 * Some supported CPUs in a given architecture don't support all
103 * of the things necessary to do idle page zero'ing efficiently.
104 * We therefore provide a way to enable it from machdep code here.
105 */
106 bool vm_page_zero_enable = false;
107
108 /*
109 * number of pages per-CPU to reserve for the kernel.
110 */
111 int vm_page_reserve_kernel = 5;
112
113 /*
114 * physical memory size;
115 */
116 int physmem;
117
118 /*
119 * local variables
120 */
121
122 /*
123 * these variables record the values returned by vm_page_bootstrap,
124 * for debugging purposes. The implementation of uvm_pageboot_alloc
125 * and pmap_startup here also uses them internally.
126 */
127
128 static vaddr_t virtual_space_start;
129 static vaddr_t virtual_space_end;
130
131 /*
132 * we allocate an initial number of page colors in uvm_page_init(),
133 * and remember them. We may re-color pages as cache sizes are
134 * discovered during the autoconfiguration phase. But we can never
135 * free the initial set of buckets, since they are allocated using
136 * uvm_pageboot_alloc().
137 */
138
139 static bool have_recolored_pages /* = false */;
140
141 MALLOC_DEFINE(M_VMPAGE, "VM page", "VM page");
142
143 #ifdef DEBUG
144 vaddr_t uvm_zerocheckkva;
145 #endif /* DEBUG */
146
147 /*
148 * local prototypes
149 */
150
151 static void uvm_pageinsert(struct uvm_object *, struct vm_page *);
152 static void uvm_pageremove(struct uvm_object *, struct vm_page *);
153
154 /*
155 * per-object tree of pages
156 */
157
158 static signed int
159 uvm_page_compare_nodes(void *ctx, const void *n1, const void *n2)
160 {
161 const struct vm_page *pg1 = n1;
162 const struct vm_page *pg2 = n2;
163 const voff_t a = pg1->offset;
164 const voff_t b = pg2->offset;
165
166 if (a < b)
167 return -1;
168 if (a > b)
169 return 1;
170 return 0;
171 }
172
173 static signed int
174 uvm_page_compare_key(void *ctx, const void *n, const void *key)
175 {
176 const struct vm_page *pg = n;
177 const voff_t a = pg->offset;
178 const voff_t b = *(const voff_t *)key;
179
180 if (a < b)
181 return -1;
182 if (a > b)
183 return 1;
184 return 0;
185 }
186
187 const rb_tree_ops_t uvm_page_tree_ops = {
188 .rbto_compare_nodes = uvm_page_compare_nodes,
189 .rbto_compare_key = uvm_page_compare_key,
190 .rbto_node_offset = offsetof(struct vm_page, rb_node),
191 .rbto_context = NULL
192 };
193
194 /*
195 * inline functions
196 */
197
198 /*
199 * uvm_pageinsert: insert a page in the object.
200 *
201 * => caller must lock object
202 * => caller must lock page queues
203 * => call should have already set pg's object and offset pointers
204 * and bumped the version counter
205 */
206
207 static inline void
208 uvm_pageinsert_list(struct uvm_object *uobj, struct vm_page *pg,
209 struct vm_page *where)
210 {
211
212 KASSERT(uobj == pg->uobject);
213 KASSERT(mutex_owned(uobj->vmobjlock));
214 KASSERT((pg->flags & PG_TABLED) == 0);
215 KASSERT(where == NULL || (where->flags & PG_TABLED));
216 KASSERT(where == NULL || (where->uobject == uobj));
217
218 if (UVM_OBJ_IS_VNODE(uobj)) {
219 if (uobj->uo_npages == 0) {
220 struct vnode *vp = (struct vnode *)uobj;
221
222 vholdl(vp);
223 }
224 if (UVM_OBJ_IS_VTEXT(uobj)) {
225 atomic_inc_uint(&uvmexp.execpages);
226 } else {
227 atomic_inc_uint(&uvmexp.filepages);
228 }
229 } else if (UVM_OBJ_IS_AOBJ(uobj)) {
230 atomic_inc_uint(&uvmexp.anonpages);
231 }
232
233 if (where)
234 TAILQ_INSERT_AFTER(&uobj->memq, where, pg, listq.queue);
235 else
236 TAILQ_INSERT_TAIL(&uobj->memq, pg, listq.queue);
237 pg->flags |= PG_TABLED;
238 uobj->uo_npages++;
239 }
240
241
242 static inline void
243 uvm_pageinsert_tree(struct uvm_object *uobj, struct vm_page *pg)
244 {
245 struct vm_page *ret;
246
247 KASSERT(uobj == pg->uobject);
248 ret = rb_tree_insert_node(&uobj->rb_tree, pg);
249 KASSERT(ret == pg);
250 }
251
252 static inline void
253 uvm_pageinsert(struct uvm_object *uobj, struct vm_page *pg)
254 {
255
256 KDASSERT(uobj != NULL);
257 uvm_pageinsert_tree(uobj, pg);
258 uvm_pageinsert_list(uobj, pg, NULL);
259 }
260
261 /*
262 * uvm_page_remove: remove page from object.
263 *
264 * => caller must lock object
265 * => caller must lock page queues
266 */
267
268 static inline void
269 uvm_pageremove_list(struct uvm_object *uobj, struct vm_page *pg)
270 {
271
272 KASSERT(uobj == pg->uobject);
273 KASSERT(mutex_owned(uobj->vmobjlock));
274 KASSERT(pg->flags & PG_TABLED);
275
276 if (UVM_OBJ_IS_VNODE(uobj)) {
277 if (uobj->uo_npages == 1) {
278 struct vnode *vp = (struct vnode *)uobj;
279
280 holdrelel(vp);
281 }
282 if (UVM_OBJ_IS_VTEXT(uobj)) {
283 atomic_dec_uint(&uvmexp.execpages);
284 } else {
285 atomic_dec_uint(&uvmexp.filepages);
286 }
287 } else if (UVM_OBJ_IS_AOBJ(uobj)) {
288 atomic_dec_uint(&uvmexp.anonpages);
289 }
290
291 /* object should be locked */
292 uobj->uo_npages--;
293 TAILQ_REMOVE(&uobj->memq, pg, listq.queue);
294 pg->flags &= ~PG_TABLED;
295 pg->uobject = NULL;
296 }
297
298 static inline void
299 uvm_pageremove_tree(struct uvm_object *uobj, struct vm_page *pg)
300 {
301
302 KASSERT(uobj == pg->uobject);
303 rb_tree_remove_node(&uobj->rb_tree, pg);
304 }
305
306 static inline void
307 uvm_pageremove(struct uvm_object *uobj, struct vm_page *pg)
308 {
309
310 KDASSERT(uobj != NULL);
311 uvm_pageremove_tree(uobj, pg);
312 uvm_pageremove_list(uobj, pg);
313 }
314
315 static void
316 uvm_page_init_buckets(struct pgfreelist *pgfl)
317 {
318 int color, i;
319
320 for (color = 0; color < uvmexp.ncolors; color++) {
321 for (i = 0; i < PGFL_NQUEUES; i++) {
322 LIST_INIT(&pgfl->pgfl_buckets[color].pgfl_queues[i]);
323 }
324 }
325 }
326
327 /*
328 * uvm_page_init: init the page system. called from uvm_init().
329 *
330 * => we return the range of kernel virtual memory in kvm_startp/kvm_endp
331 */
332
333 void
334 uvm_page_init(vaddr_t *kvm_startp, vaddr_t *kvm_endp)
335 {
336 static struct uvm_cpu boot_cpu;
337 psize_t freepages, pagecount, bucketcount, n;
338 struct pgflbucket *bucketarray, *cpuarray;
339 struct vm_physseg *seg;
340 struct vm_page *pagearray;
341 int lcv;
342 u_int i;
343 paddr_t paddr;
344
345 KASSERT(ncpu <= 1);
346 CTASSERT(sizeof(pagearray->offset) >= sizeof(struct uvm_cpu *));
347
348 /*
349 * init the page queues and page queue locks, except the free
350 * list; we allocate that later (with the initial vm_page
351 * structures).
352 */
353
354 uvm.cpus[0] = &boot_cpu;
355 curcpu()->ci_data.cpu_uvm = &boot_cpu;
356 uvm_reclaim_init();
357 uvmpdpol_init();
358 mutex_init(&uvm_pageqlock, MUTEX_DRIVER, IPL_NONE);
359 mutex_init(&uvm_fpageqlock, MUTEX_DRIVER, IPL_VM);
360
361 /*
362 * allocate vm_page structures.
363 */
364
365 /*
366 * sanity check:
367 * before calling this function the MD code is expected to register
368 * some free RAM with the uvm_page_physload() function. our job
369 * now is to allocate vm_page structures for this memory.
370 */
371
372 if (vm_nphysmem == 0)
373 panic("uvm_page_bootstrap: no memory pre-allocated");
374
375 /*
376 * first calculate the number of free pages...
377 *
378 * note that we use start/end rather than avail_start/avail_end.
379 * this allows us to allocate extra vm_page structures in case we
380 * want to return some memory to the pool after booting.
381 */
382
383 freepages = 0;
384 for (lcv = 0 ; lcv < vm_nphysmem ; lcv++) {
385 seg = VM_PHYSMEM_PTR(lcv);
386 freepages += (seg->end - seg->start);
387 }
388
389 /*
390 * Let MD code initialize the number of colors, or default
391 * to 1 color if MD code doesn't care.
392 */
393 if (uvmexp.ncolors == 0)
394 uvmexp.ncolors = 1;
395 uvmexp.colormask = uvmexp.ncolors - 1;
396 KASSERT((uvmexp.colormask & uvmexp.ncolors) == 0);
397
398 /*
399 * we now know we have (PAGE_SIZE * freepages) bytes of memory we can
400 * use. for each page of memory we use we need a vm_page structure.
401 * thus, the total number of pages we can use is the total size of
402 * the memory divided by the PAGE_SIZE plus the size of the vm_page
403 * structure. we add one to freepages as a fudge factor to avoid
404 * truncation errors (since we can only allocate in terms of whole
405 * pages).
406 */
407
408 bucketcount = uvmexp.ncolors * VM_NFREELIST;
409 pagecount = ((freepages + 1) << PAGE_SHIFT) /
410 (PAGE_SIZE + sizeof(struct vm_page));
411
412 bucketarray = (void *)uvm_pageboot_alloc((bucketcount *
413 sizeof(struct pgflbucket) * 2) + (pagecount *
414 sizeof(struct vm_page)));
415 cpuarray = bucketarray + bucketcount;
416 pagearray = (struct vm_page *)(bucketarray + bucketcount * 2);
417
418 for (lcv = 0; lcv < VM_NFREELIST; lcv++) {
419 uvm.page_free[lcv].pgfl_buckets =
420 (bucketarray + (lcv * uvmexp.ncolors));
421 uvm_page_init_buckets(&uvm.page_free[lcv]);
422 uvm.cpus[0]->page_free[lcv].pgfl_buckets =
423 (cpuarray + (lcv * uvmexp.ncolors));
424 uvm_page_init_buckets(&uvm.cpus[0]->page_free[lcv]);
425 }
426 memset(pagearray, 0, pagecount * sizeof(struct vm_page));
427
428 /*
429 * init the vm_page structures and put them in the correct place.
430 */
431
432 for (lcv = 0 ; lcv < vm_nphysmem ; lcv++) {
433 seg = VM_PHYSMEM_PTR(lcv);
434 n = seg->end - seg->start;
435
436 /* set up page array pointers */
437 seg->pgs = pagearray;
438 pagearray += n;
439 pagecount -= n;
440 seg->lastpg = seg->pgs + n;
441
442 /* init and free vm_pages (we've already zeroed them) */
443 paddr = ctob(seg->start);
444 for (i = 0 ; i < n ; i++, paddr += PAGE_SIZE) {
445 seg->pgs[i].phys_addr = paddr;
446 #ifdef __HAVE_VM_PAGE_MD
447 VM_MDPAGE_INIT(&seg->pgs[i]);
448 #endif
449 if (atop(paddr) >= seg->avail_start &&
450 atop(paddr) < seg->avail_end) {
451 uvmexp.npages++;
452 /* add page to free pool */
453 uvm_pagefree(&seg->pgs[i]);
454 }
455 }
456 }
457
458 /*
459 * pass up the values of virtual_space_start and
460 * virtual_space_end (obtained by uvm_pageboot_alloc) to the upper
461 * layers of the VM.
462 */
463
464 *kvm_startp = round_page(virtual_space_start);
465 *kvm_endp = trunc_page(virtual_space_end);
466 #ifdef DEBUG
467 /*
468 * steal kva for uvm_pagezerocheck().
469 */
470 uvm_zerocheckkva = *kvm_startp;
471 *kvm_startp += PAGE_SIZE;
472 #endif /* DEBUG */
473
474 /*
475 * init various thresholds.
476 */
477
478 uvmexp.reserve_pagedaemon = 1;
479 uvmexp.reserve_kernel = vm_page_reserve_kernel;
480
481 /*
482 * determine if we should zero pages in the idle loop.
483 */
484
485 uvm.cpus[0]->page_idle_zero = vm_page_zero_enable;
486
487 /*
488 * done!
489 */
490
491 uvm.page_init_done = true;
492 }
493
494 /*
495 * uvm_setpagesize: set the page size
496 *
497 * => sets page_shift and page_mask from uvmexp.pagesize.
498 */
499
500 void
501 uvm_setpagesize(void)
502 {
503
504 /*
505 * If uvmexp.pagesize is 0 at this point, we expect PAGE_SIZE
506 * to be a constant (indicated by being a non-zero value).
507 */
508 if (uvmexp.pagesize == 0) {
509 if (PAGE_SIZE == 0)
510 panic("uvm_setpagesize: uvmexp.pagesize not set");
511 uvmexp.pagesize = PAGE_SIZE;
512 }
513 uvmexp.pagemask = uvmexp.pagesize - 1;
514 if ((uvmexp.pagemask & uvmexp.pagesize) != 0)
515 panic("uvm_setpagesize: page size %u (%#x) not a power of two",
516 uvmexp.pagesize, uvmexp.pagesize);
517 for (uvmexp.pageshift = 0; ; uvmexp.pageshift++)
518 if ((1 << uvmexp.pageshift) == uvmexp.pagesize)
519 break;
520 }
521
522 /*
523 * uvm_pageboot_alloc: steal memory from physmem for bootstrapping
524 */
525
526 vaddr_t
527 uvm_pageboot_alloc(vsize_t size)
528 {
529 static bool initialized = false;
530 vaddr_t addr;
531 #if !defined(PMAP_STEAL_MEMORY)
532 vaddr_t vaddr;
533 paddr_t paddr;
534 #endif
535
536 /*
537 * on first call to this function, initialize ourselves.
538 */
539 if (initialized == false) {
540 pmap_virtual_space(&virtual_space_start, &virtual_space_end);
541
542 /* round it the way we like it */
543 virtual_space_start = round_page(virtual_space_start);
544 virtual_space_end = trunc_page(virtual_space_end);
545
546 initialized = true;
547 }
548
549 /* round to page size */
550 size = round_page(size);
551
552 #if defined(PMAP_STEAL_MEMORY)
553
554 /*
555 * defer bootstrap allocation to MD code (it may want to allocate
556 * from a direct-mapped segment). pmap_steal_memory should adjust
557 * virtual_space_start/virtual_space_end if necessary.
558 */
559
560 addr = pmap_steal_memory(size, &virtual_space_start,
561 &virtual_space_end);
562
563 return(addr);
564
565 #else /* !PMAP_STEAL_MEMORY */
566
567 /*
568 * allocate virtual memory for this request
569 */
570 if (virtual_space_start == virtual_space_end ||
571 (virtual_space_end - virtual_space_start) < size)
572 panic("uvm_pageboot_alloc: out of virtual space");
573
574 addr = virtual_space_start;
575
576 #ifdef PMAP_GROWKERNEL
577 /*
578 * If the kernel pmap can't map the requested space,
579 * then allocate more resources for it.
580 */
581 if (uvm_maxkaddr < (addr + size)) {
582 uvm_maxkaddr = pmap_growkernel(addr + size);
583 if (uvm_maxkaddr < (addr + size))
584 panic("uvm_pageboot_alloc: pmap_growkernel() failed");
585 }
586 #endif
587
588 virtual_space_start += size;
589
590 /*
591 * allocate and mapin physical pages to back new virtual pages
592 */
593
594 for (vaddr = round_page(addr) ; vaddr < addr + size ;
595 vaddr += PAGE_SIZE) {
596
597 if (!uvm_page_physget(&paddr))
598 panic("uvm_pageboot_alloc: out of memory");
599
600 /*
601 * Note this memory is no longer managed, so using
602 * pmap_kenter is safe.
603 */
604 pmap_kenter_pa(vaddr, paddr, VM_PROT_READ|VM_PROT_WRITE, 0);
605 }
606 pmap_update(pmap_kernel());
607 return(addr);
608 #endif /* PMAP_STEAL_MEMORY */
609 }
610
611 #if !defined(PMAP_STEAL_MEMORY)
612 /*
613 * uvm_page_physget: "steal" one page from the vm_physmem structure.
614 *
615 * => attempt to allocate it off the end of a segment in which the "avail"
616 * values match the start/end values. if we can't do that, then we
617 * will advance both values (making them equal, and removing some
618 * vm_page structures from the non-avail area).
619 * => return false if out of memory.
620 */
621
622 /* subroutine: try to allocate from memory chunks on the specified freelist */
623 static bool uvm_page_physget_freelist(paddr_t *, int);
624
625 static bool
626 uvm_page_physget_freelist(paddr_t *paddrp, int freelist)
627 {
628 struct vm_physseg *seg;
629 int lcv, x;
630
631 /* pass 1: try allocating from a matching end */
632 #if (VM_PHYSSEG_STRAT == VM_PSTRAT_BIGFIRST)
633 for (lcv = vm_nphysmem - 1 ; lcv >= 0 ; lcv--)
634 #else
635 for (lcv = 0 ; lcv < vm_nphysmem ; lcv++)
636 #endif
637 {
638 seg = VM_PHYSMEM_PTR(lcv);
639
640 if (uvm.page_init_done == true)
641 panic("uvm_page_physget: called _after_ bootstrap");
642
643 if (seg->free_list != freelist)
644 continue;
645
646 /* try from front */
647 if (seg->avail_start == seg->start &&
648 seg->avail_start < seg->avail_end) {
649 *paddrp = ctob(seg->avail_start);
650 seg->avail_start++;
651 seg->start++;
652 /* nothing left? nuke it */
653 if (seg->avail_start == seg->end) {
654 if (vm_nphysmem == 1)
655 panic("uvm_page_physget: out of memory!");
656 vm_nphysmem--;
657 for (x = lcv ; x < vm_nphysmem ; x++)
658 /* structure copy */
659 VM_PHYSMEM_PTR_SWAP(x, x + 1);
660 }
661 return (true);
662 }
663
664 /* try from rear */
665 if (seg->avail_end == seg->end &&
666 seg->avail_start < seg->avail_end) {
667 *paddrp = ctob(seg->avail_end - 1);
668 seg->avail_end--;
669 seg->end--;
670 /* nothing left? nuke it */
671 if (seg->avail_end == seg->start) {
672 if (vm_nphysmem == 1)
673 panic("uvm_page_physget: out of memory!");
674 vm_nphysmem--;
675 for (x = lcv ; x < vm_nphysmem ; x++)
676 /* structure copy */
677 VM_PHYSMEM_PTR_SWAP(x, x + 1);
678 }
679 return (true);
680 }
681 }
682
683 /* pass2: forget about matching ends, just allocate something */
684 #if (VM_PHYSSEG_STRAT == VM_PSTRAT_BIGFIRST)
685 for (lcv = vm_nphysmem - 1 ; lcv >= 0 ; lcv--)
686 #else
687 for (lcv = 0 ; lcv < vm_nphysmem ; lcv++)
688 #endif
689 {
690 seg = VM_PHYSMEM_PTR(lcv);
691
692 /* any room in this bank? */
693 if (seg->avail_start >= seg->avail_end)
694 continue; /* nope */
695
696 *paddrp = ctob(seg->avail_start);
697 seg->avail_start++;
698 /* truncate! */
699 seg->start = seg->avail_start;
700
701 /* nothing left? nuke it */
702 if (seg->avail_start == seg->end) {
703 if (vm_nphysmem == 1)
704 panic("uvm_page_physget: out of memory!");
705 vm_nphysmem--;
706 for (x = lcv ; x < vm_nphysmem ; x++)
707 /* structure copy */
708 VM_PHYSMEM_PTR_SWAP(x, x + 1);
709 }
710 return (true);
711 }
712
713 return (false); /* whoops! */
714 }
715
716 bool
717 uvm_page_physget(paddr_t *paddrp)
718 {
719 int i;
720
721 /* try in the order of freelist preference */
722 for (i = 0; i < VM_NFREELIST; i++)
723 if (uvm_page_physget_freelist(paddrp, i) == true)
724 return (true);
725 return (false);
726 }
727 #endif /* PMAP_STEAL_MEMORY */
728
729 /*
730 * uvm_page_physload: load physical memory into VM system
731 *
732 * => all args are PFs
733 * => all pages in start/end get vm_page structures
734 * => areas marked by avail_start/avail_end get added to the free page pool
735 * => we are limited to VM_PHYSSEG_MAX physical memory segments
736 */
737
738 void
739 uvm_page_physload(paddr_t start, paddr_t end, paddr_t avail_start,
740 paddr_t avail_end, int free_list)
741 {
742 int preload, lcv;
743 psize_t npages;
744 struct vm_page *pgs;
745 struct vm_physseg *ps;
746
747 if (uvmexp.pagesize == 0)
748 panic("uvm_page_physload: page size not set!");
749 if (free_list >= VM_NFREELIST || free_list < VM_FREELIST_DEFAULT)
750 panic("uvm_page_physload: bad free list %d", free_list);
751 if (start >= end)
752 panic("uvm_page_physload: start >= end");
753
754 /*
755 * do we have room?
756 */
757
758 if (vm_nphysmem == VM_PHYSSEG_MAX) {
759 printf("uvm_page_physload: unable to load physical memory "
760 "segment\n");
761 printf("\t%d segments allocated, ignoring 0x%llx -> 0x%llx\n",
762 VM_PHYSSEG_MAX, (long long)start, (long long)end);
763 printf("\tincrease VM_PHYSSEG_MAX\n");
764 return;
765 }
766
767 /*
768 * check to see if this is a "preload" (i.e. uvm_page_init hasn't been
769 * called yet, so malloc is not available).
770 */
771
772 for (lcv = 0 ; lcv < vm_nphysmem ; lcv++) {
773 if (VM_PHYSMEM_PTR(lcv)->pgs)
774 break;
775 }
776 preload = (lcv == vm_nphysmem);
777
778 /*
779 * if VM is already running, attempt to malloc() vm_page structures
780 */
781
782 if (!preload) {
783 panic("uvm_page_physload: tried to add RAM after vm_mem_init");
784 } else {
785 pgs = NULL;
786 npages = 0;
787 }
788
789 /*
790 * now insert us in the proper place in vm_physmem[]
791 */
792
793 #if (VM_PHYSSEG_STRAT == VM_PSTRAT_RANDOM)
794 /* random: put it at the end (easy!) */
795 ps = VM_PHYSMEM_PTR(vm_nphysmem);
796 #elif (VM_PHYSSEG_STRAT == VM_PSTRAT_BSEARCH)
797 {
798 int x;
799 /* sort by address for binary search */
800 for (lcv = 0 ; lcv < vm_nphysmem ; lcv++)
801 if (start < VM_PHYSMEM_PTR(lcv)->start)
802 break;
803 ps = VM_PHYSMEM_PTR(lcv);
804 /* move back other entries, if necessary ... */
805 for (x = vm_nphysmem ; x > lcv ; x--)
806 /* structure copy */
807 VM_PHYSMEM_PTR_SWAP(x, x - 1);
808 }
809 #elif (VM_PHYSSEG_STRAT == VM_PSTRAT_BIGFIRST)
810 {
811 int x;
812 /* sort by largest segment first */
813 for (lcv = 0 ; lcv < vm_nphysmem ; lcv++)
814 if ((end - start) >
815 (VM_PHYSMEM_PTR(lcv)->end - VM_PHYSMEM_PTR(lcv)->start))
816 break;
817 ps = VM_PHYSMEM_PTR(lcv);
818 /* move back other entries, if necessary ... */
819 for (x = vm_nphysmem ; x > lcv ; x--)
820 /* structure copy */
821 VM_PHYSMEM_PTR_SWAP(x, x - 1);
822 }
823 #else
824 panic("uvm_page_physload: unknown physseg strategy selected!");
825 #endif
826
827 ps->start = start;
828 ps->end = end;
829 ps->avail_start = avail_start;
830 ps->avail_end = avail_end;
831 if (preload) {
832 ps->pgs = NULL;
833 } else {
834 ps->pgs = pgs;
835 ps->lastpg = pgs + npages;
836 }
837 ps->free_list = free_list;
838 vm_nphysmem++;
839
840 if (!preload) {
841 uvmpdpol_reinit();
842 }
843 }
844
845 /*
846 * when VM_PHYSSEG_MAX is 1, we can simplify these functions
847 */
848
849 #if VM_PHYSSEG_MAX == 1
850 static inline int vm_physseg_find_contig(struct vm_physseg *, int, paddr_t, int *);
851 #elif (VM_PHYSSEG_STRAT == VM_PSTRAT_BSEARCH)
852 static inline int vm_physseg_find_bsearch(struct vm_physseg *, int, paddr_t, int *);
853 #else
854 static inline int vm_physseg_find_linear(struct vm_physseg *, int, paddr_t, int *);
855 #endif
856
857 /*
858 * vm_physseg_find: find vm_physseg structure that belongs to a PA
859 */
860 int
861 vm_physseg_find(paddr_t pframe, int *offp)
862 {
863
864 #if VM_PHYSSEG_MAX == 1
865 return vm_physseg_find_contig(vm_physmem, vm_nphysseg, pframe, offp);
866 #elif (VM_PHYSSEG_STRAT == VM_PSTRAT_BSEARCH)
867 return vm_physseg_find_bsearch(vm_physmem, vm_nphysseg, pframe, offp);
868 #else
869 return vm_physseg_find_linear(vm_physmem, vm_nphysseg, pframe, offp);
870 #endif
871 }
872
873 #if VM_PHYSSEG_MAX == 1
874 static inline int
875 vm_physseg_find_contig(struct vm_physseg *segs, int nsegs, paddr_t pframe, int *offp)
876 {
877
878 /* 'contig' case */
879 if (pframe >= segs[0].start && pframe < segs[0].end) {
880 if (offp)
881 *offp = pframe - segs[0].start;
882 return(0);
883 }
884 return(-1);
885 }
886
887 #elif (VM_PHYSSEG_STRAT == VM_PSTRAT_BSEARCH)
888
889 static inline int
890 vm_physseg_find_bsearch(struct vm_physseg *segs, int nsegs, paddr_t pframe, int *offp)
891 {
892 /* binary search for it */
893 u_int start, len, try;
894
895 /*
896 * if try is too large (thus target is less than try) we reduce
897 * the length to trunc(len/2) [i.e. everything smaller than "try"]
898 *
899 * if the try is too small (thus target is greater than try) then
900 * we set the new start to be (try + 1). this means we need to
901 * reduce the length to (round(len/2) - 1).
902 *
903 * note "adjust" below which takes advantage of the fact that
904 * (round(len/2) - 1) == trunc((len - 1) / 2)
905 * for any value of len we may have
906 */
907
908 for (start = 0, len = nsegs ; len != 0 ; len = len / 2) {
909 try = start + (len / 2); /* try in the middle */
910
911 /* start past our try? */
912 if (pframe >= segs[try].start) {
913 /* was try correct? */
914 if (pframe < segs[try].end) {
915 if (offp)
916 *offp = pframe - segs[try].start;
917 return(try); /* got it */
918 }
919 start = try + 1; /* next time, start here */
920 len--; /* "adjust" */
921 } else {
922 /*
923 * pframe before try, just reduce length of
924 * region, done in "for" loop
925 */
926 }
927 }
928 return(-1);
929 }
930
931 #else
932
933 static inline int
934 vm_physseg_find_linear(struct vm_physseg *segs, int nsegs, paddr_t pframe, int *offp)
935 {
936 /* linear search for it */
937 int lcv;
938
939 for (lcv = 0; lcv < nsegs; lcv++) {
940 if (pframe >= segs[lcv].start &&
941 pframe < segs[lcv].end) {
942 if (offp)
943 *offp = pframe - segs[lcv].start;
944 return(lcv); /* got it */
945 }
946 }
947 return(-1);
948 }
949 #endif
950
951 /*
952 * PHYS_TO_VM_PAGE: find vm_page for a PA. used by MI code to get vm_pages
953 * back from an I/O mapping (ugh!). used in some MD code as well.
954 */
955 struct vm_page *
956 uvm_phys_to_vm_page(paddr_t pa)
957 {
958 paddr_t pf = atop(pa);
959 int off;
960 int psi;
961
962 psi = vm_physseg_find(pf, &off);
963 if (psi != -1)
964 return(&VM_PHYSMEM_PTR(psi)->pgs[off]);
965 return(NULL);
966 }
967
968 paddr_t
969 uvm_vm_page_to_phys(const struct vm_page *pg)
970 {
971
972 return pg->phys_addr;
973 }
974
975 /*
976 * uvm_page_recolor: Recolor the pages if the new bucket count is
977 * larger than the old one.
978 */
979
980 void
981 uvm_page_recolor(int newncolors)
982 {
983 struct pgflbucket *bucketarray, *cpuarray, *oldbucketarray;
984 struct pgfreelist gpgfl, pgfl;
985 struct vm_page *pg;
986 vsize_t bucketcount;
987 int lcv, color, i, ocolors;
988 struct uvm_cpu *ucpu;
989
990 KASSERT(((newncolors - 1) & newncolors) == 0);
991
992 if (newncolors <= uvmexp.ncolors)
993 return;
994
995 if (uvm.page_init_done == false) {
996 uvmexp.ncolors = newncolors;
997 return;
998 }
999
1000 bucketcount = newncolors * VM_NFREELIST;
1001 bucketarray = malloc(bucketcount * sizeof(struct pgflbucket) * 2,
1002 M_VMPAGE, M_NOWAIT);
1003 cpuarray = bucketarray + bucketcount;
1004 if (bucketarray == NULL) {
1005 printf("WARNING: unable to allocate %ld page color buckets\n",
1006 (long) bucketcount);
1007 return;
1008 }
1009
1010 mutex_spin_enter(&uvm_fpageqlock);
1011
1012 /* Make sure we should still do this. */
1013 if (newncolors <= uvmexp.ncolors) {
1014 mutex_spin_exit(&uvm_fpageqlock);
1015 free(bucketarray, M_VMPAGE);
1016 return;
1017 }
1018
1019 oldbucketarray = uvm.page_free[0].pgfl_buckets;
1020 ocolors = uvmexp.ncolors;
1021
1022 uvmexp.ncolors = newncolors;
1023 uvmexp.colormask = uvmexp.ncolors - 1;
1024
1025 ucpu = curcpu()->ci_data.cpu_uvm;
1026 for (lcv = 0; lcv < VM_NFREELIST; lcv++) {
1027 gpgfl.pgfl_buckets = (bucketarray + (lcv * newncolors));
1028 pgfl.pgfl_buckets = (cpuarray + (lcv * uvmexp.ncolors));
1029 uvm_page_init_buckets(&gpgfl);
1030 uvm_page_init_buckets(&pgfl);
1031 for (color = 0; color < ocolors; color++) {
1032 for (i = 0; i < PGFL_NQUEUES; i++) {
1033 while ((pg = LIST_FIRST(&uvm.page_free[
1034 lcv].pgfl_buckets[color].pgfl_queues[i]))
1035 != NULL) {
1036 LIST_REMOVE(pg, pageq.list); /* global */
1037 LIST_REMOVE(pg, listq.list); /* cpu */
1038 LIST_INSERT_HEAD(&gpgfl.pgfl_buckets[
1039 VM_PGCOLOR_BUCKET(pg)].pgfl_queues[
1040 i], pg, pageq.list);
1041 LIST_INSERT_HEAD(&pgfl.pgfl_buckets[
1042 VM_PGCOLOR_BUCKET(pg)].pgfl_queues[
1043 i], pg, listq.list);
1044 }
1045 }
1046 }
1047 uvm.page_free[lcv].pgfl_buckets = gpgfl.pgfl_buckets;
1048 ucpu->page_free[lcv].pgfl_buckets = pgfl.pgfl_buckets;
1049 }
1050
1051 if (!have_recolored_pages)
1052 oldbucketarray = NULL;
1053
1054 have_recolored_pages = true;
1055 mutex_spin_exit(&uvm_fpageqlock);
1056
1057 if (oldbucketarray)
1058 free(oldbucketarray, M_VMPAGE);
1059
1060 /*
1061 * this calls uvm_km_alloc() which may want to hold
1062 * uvm_fpageqlock.
1063 */
1064 uvm_pager_realloc_emerg();
1065 }
1066
1067 /*
1068 * uvm_cpu_attach: initialize per-CPU data structures.
1069 */
1070
1071 void
1072 uvm_cpu_attach(struct cpu_info *ci)
1073 {
1074 struct pgflbucket *bucketarray;
1075 struct pgfreelist pgfl;
1076 struct uvm_cpu *ucpu;
1077 vsize_t bucketcount;
1078 int lcv;
1079
1080 if (CPU_IS_PRIMARY(ci)) {
1081 /* Already done in uvm_page_init(). */
1082 return;
1083 }
1084
1085 /* Add more reserve pages for this CPU. */
1086 uvmexp.reserve_kernel += vm_page_reserve_kernel;
1087
1088 /* Configure this CPU's free lists. */
1089 bucketcount = uvmexp.ncolors * VM_NFREELIST;
1090 bucketarray = malloc(bucketcount * sizeof(struct pgflbucket),
1091 M_VMPAGE, M_WAITOK);
1092 ucpu = kmem_zalloc(sizeof(*ucpu), KM_SLEEP);
1093 uvm.cpus[cpu_index(ci)] = ucpu;
1094 ci->ci_data.cpu_uvm = ucpu;
1095 for (lcv = 0; lcv < VM_NFREELIST; lcv++) {
1096 pgfl.pgfl_buckets = (bucketarray + (lcv * uvmexp.ncolors));
1097 uvm_page_init_buckets(&pgfl);
1098 ucpu->page_free[lcv].pgfl_buckets = pgfl.pgfl_buckets;
1099 }
1100 }
1101
1102 /*
1103 * uvm_pagealloc_pgfl: helper routine for uvm_pagealloc_strat
1104 */
1105
1106 static struct vm_page *
1107 uvm_pagealloc_pgfl(struct uvm_cpu *ucpu, int flist, int try1, int try2,
1108 int *trycolorp)
1109 {
1110 struct pgflist *freeq;
1111 struct vm_page *pg;
1112 int color, trycolor = *trycolorp;
1113 struct pgfreelist *gpgfl, *pgfl;
1114
1115 KASSERT(mutex_owned(&uvm_fpageqlock));
1116
1117 color = trycolor;
1118 pgfl = &ucpu->page_free[flist];
1119 gpgfl = &uvm.page_free[flist];
1120 do {
1121 /* cpu, try1 */
1122 if ((pg = LIST_FIRST((freeq =
1123 &pgfl->pgfl_buckets[color].pgfl_queues[try1]))) != NULL) {
1124 VM_FREE_PAGE_TO_CPU(pg)->pages[try1]--;
1125 uvmexp.cpuhit++;
1126 goto gotit;
1127 }
1128 /* global, try1 */
1129 if ((pg = LIST_FIRST((freeq =
1130 &gpgfl->pgfl_buckets[color].pgfl_queues[try1]))) != NULL) {
1131 VM_FREE_PAGE_TO_CPU(pg)->pages[try1]--;
1132 uvmexp.cpumiss++;
1133 goto gotit;
1134 }
1135 /* cpu, try2 */
1136 if ((pg = LIST_FIRST((freeq =
1137 &pgfl->pgfl_buckets[color].pgfl_queues[try2]))) != NULL) {
1138 VM_FREE_PAGE_TO_CPU(pg)->pages[try2]--;
1139 uvmexp.cpuhit++;
1140 goto gotit;
1141 }
1142 /* global, try2 */
1143 if ((pg = LIST_FIRST((freeq =
1144 &gpgfl->pgfl_buckets[color].pgfl_queues[try2]))) != NULL) {
1145 VM_FREE_PAGE_TO_CPU(pg)->pages[try2]--;
1146 uvmexp.cpumiss++;
1147 goto gotit;
1148 }
1149 color = (color + 1) & uvmexp.colormask;
1150 } while (color != trycolor);
1151
1152 return (NULL);
1153
1154 gotit:
1155 LIST_REMOVE(pg, pageq.list); /* global list */
1156 LIST_REMOVE(pg, listq.list); /* per-cpu list */
1157 uvmexp.free--;
1158
1159 /* update zero'd page count */
1160 if (pg->flags & PG_ZERO)
1161 uvmexp.zeropages--;
1162
1163 if (color == trycolor)
1164 uvmexp.colorhit++;
1165 else {
1166 uvmexp.colormiss++;
1167 *trycolorp = color;
1168 }
1169
1170 return (pg);
1171 }
1172
1173 /*
1174 * uvm_pagealloc_strat: allocate vm_page from a particular free list.
1175 *
1176 * => return null if no pages free
1177 * => wake up pagedaemon if number of free pages drops below low water mark
1178 * => if obj != NULL, obj must be locked (to put in obj's tree)
1179 * => if anon != NULL, anon must be locked (to put in anon)
1180 * => only one of obj or anon can be non-null
1181 * => caller must activate/deactivate page if it is not wired.
1182 * => free_list is ignored if strat == UVM_PGA_STRAT_NORMAL.
1183 * => policy decision: it is more important to pull a page off of the
1184 * appropriate priority free list than it is to get a zero'd or
1185 * unknown contents page. This is because we live with the
1186 * consequences of a bad free list decision for the entire
1187 * lifetime of the page, e.g. if the page comes from memory that
1188 * is slower to access.
1189 */
1190
1191 struct vm_page *
1192 uvm_pagealloc_strat(struct uvm_object *obj, voff_t off, struct vm_anon *anon,
1193 int flags, int strat, int free_list)
1194 {
1195 int lcv, try1, try2, zeroit = 0, color;
1196 struct uvm_cpu *ucpu;
1197 struct vm_page *pg;
1198 lwp_t *l;
1199
1200 KASSERT(obj == NULL || anon == NULL);
1201 KASSERT(anon == NULL || (flags & UVM_FLAG_COLORMATCH) || off == 0);
1202 KASSERT(off == trunc_page(off));
1203 KASSERT(obj == NULL || mutex_owned(obj->vmobjlock));
1204 KASSERT(anon == NULL || anon->an_lock == NULL ||
1205 mutex_owned(anon->an_lock));
1206
1207 mutex_spin_enter(&uvm_fpageqlock);
1208
1209 /*
1210 * This implements a global round-robin page coloring
1211 * algorithm.
1212 */
1213
1214 ucpu = curcpu()->ci_data.cpu_uvm;
1215 if (flags & UVM_FLAG_COLORMATCH) {
1216 color = atop(off) & uvmexp.colormask;
1217 } else {
1218 color = ucpu->page_free_nextcolor;
1219 }
1220
1221 /*
1222 * check to see if we need to generate some free pages waking
1223 * the pagedaemon.
1224 */
1225
1226 uvm_kick_pdaemon();
1227
1228 /*
1229 * fail if any of these conditions is true:
1230 * [1] there really are no free pages, or
1231 * [2] only kernel "reserved" pages remain and
1232 * reserved pages have not been requested.
1233 * [3] only pagedaemon "reserved" pages remain and
1234 * the requestor isn't the pagedaemon.
1235 * we make kernel reserve pages available if called by a
1236 * kernel thread or a realtime thread.
1237 */
1238 l = curlwp;
1239 if (__predict_true(l != NULL) && lwp_eprio(l) >= PRI_KTHREAD) {
1240 flags |= UVM_PGA_USERESERVE;
1241 }
1242 if ((uvmexp.free <= uvmexp.reserve_kernel &&
1243 (flags & UVM_PGA_USERESERVE) == 0) ||
1244 (uvmexp.free <= uvmexp.reserve_pagedaemon &&
1245 curlwp != uvm.pagedaemon_lwp))
1246 goto fail;
1247
1248 #if PGFL_NQUEUES != 2
1249 #error uvm_pagealloc_strat needs to be updated
1250 #endif
1251
1252 /*
1253 * If we want a zero'd page, try the ZEROS queue first, otherwise
1254 * we try the UNKNOWN queue first.
1255 */
1256 if (flags & UVM_PGA_ZERO) {
1257 try1 = PGFL_ZEROS;
1258 try2 = PGFL_UNKNOWN;
1259 } else {
1260 try1 = PGFL_UNKNOWN;
1261 try2 = PGFL_ZEROS;
1262 }
1263
1264 again:
1265 switch (strat) {
1266 case UVM_PGA_STRAT_NORMAL:
1267 /* Check freelists: descending priority (ascending id) order */
1268 for (lcv = 0; lcv < VM_NFREELIST; lcv++) {
1269 pg = uvm_pagealloc_pgfl(ucpu, lcv,
1270 try1, try2, &color);
1271 if (pg != NULL)
1272 goto gotit;
1273 }
1274
1275 /* No pages free! */
1276 goto fail;
1277
1278 case UVM_PGA_STRAT_ONLY:
1279 case UVM_PGA_STRAT_FALLBACK:
1280 /* Attempt to allocate from the specified free list. */
1281 KASSERT(free_list >= 0 && free_list < VM_NFREELIST);
1282 pg = uvm_pagealloc_pgfl(ucpu, free_list,
1283 try1, try2, &color);
1284 if (pg != NULL)
1285 goto gotit;
1286
1287 /* Fall back, if possible. */
1288 if (strat == UVM_PGA_STRAT_FALLBACK) {
1289 strat = UVM_PGA_STRAT_NORMAL;
1290 goto again;
1291 }
1292
1293 /* No pages free! */
1294 goto fail;
1295
1296 default:
1297 panic("uvm_pagealloc_strat: bad strat %d", strat);
1298 /* NOTREACHED */
1299 }
1300
1301 gotit:
1302 /*
1303 * We now know which color we actually allocated from; set
1304 * the next color accordingly.
1305 */
1306
1307 ucpu->page_free_nextcolor = (color + 1) & uvmexp.colormask;
1308
1309 /*
1310 * update allocation statistics and remember if we have to
1311 * zero the page
1312 */
1313
1314 if (flags & UVM_PGA_ZERO) {
1315 if (pg->flags & PG_ZERO) {
1316 uvmexp.pga_zerohit++;
1317 zeroit = 0;
1318 } else {
1319 uvmexp.pga_zeromiss++;
1320 zeroit = 1;
1321 }
1322 if (ucpu->pages[PGFL_ZEROS] < ucpu->pages[PGFL_UNKNOWN]) {
1323 ucpu->page_idle_zero = vm_page_zero_enable;
1324 }
1325 }
1326 KASSERT(pg->pqflags == PQ_FREE);
1327
1328 pg->offset = off;
1329 pg->uobject = obj;
1330 pg->uanon = anon;
1331 pg->flags = PG_BUSY|PG_CLEAN|PG_FAKE;
1332 if (anon) {
1333 anon->an_page = pg;
1334 pg->pqflags = PQ_ANON;
1335 atomic_inc_uint(&uvmexp.anonpages);
1336 } else {
1337 if (obj) {
1338 uvm_pageinsert(obj, pg);
1339 }
1340 pg->pqflags = 0;
1341 }
1342 mutex_spin_exit(&uvm_fpageqlock);
1343
1344 #if defined(UVM_PAGE_TRKOWN)
1345 pg->owner_tag = NULL;
1346 #endif
1347 UVM_PAGE_OWN(pg, "new alloc");
1348
1349 if (flags & UVM_PGA_ZERO) {
1350 /*
1351 * A zero'd page is not clean. If we got a page not already
1352 * zero'd, then we have to zero it ourselves.
1353 */
1354 pg->flags &= ~PG_CLEAN;
1355 if (zeroit)
1356 pmap_zero_page(VM_PAGE_TO_PHYS(pg));
1357 }
1358
1359 return(pg);
1360
1361 fail:
1362 mutex_spin_exit(&uvm_fpageqlock);
1363 return (NULL);
1364 }
1365
1366 /*
1367 * uvm_pagereplace: replace a page with another
1368 *
1369 * => object must be locked
1370 */
1371
1372 void
1373 uvm_pagereplace(struct vm_page *oldpg, struct vm_page *newpg)
1374 {
1375 struct uvm_object *uobj = oldpg->uobject;
1376
1377 KASSERT((oldpg->flags & PG_TABLED) != 0);
1378 KASSERT(uobj != NULL);
1379 KASSERT((newpg->flags & PG_TABLED) == 0);
1380 KASSERT(newpg->uobject == NULL);
1381 KASSERT(mutex_owned(uobj->vmobjlock));
1382
1383 newpg->uobject = uobj;
1384 newpg->offset = oldpg->offset;
1385
1386 uvm_pageremove_tree(uobj, oldpg);
1387 uvm_pageinsert_tree(uobj, newpg);
1388 uvm_pageinsert_list(uobj, newpg, oldpg);
1389 uvm_pageremove_list(uobj, oldpg);
1390 }
1391
1392 /*
1393 * uvm_pagerealloc: reallocate a page from one object to another
1394 *
1395 * => both objects must be locked
1396 */
1397
1398 void
1399 uvm_pagerealloc(struct vm_page *pg, struct uvm_object *newobj, voff_t newoff)
1400 {
1401 /*
1402 * remove it from the old object
1403 */
1404
1405 if (pg->uobject) {
1406 uvm_pageremove(pg->uobject, pg);
1407 }
1408
1409 /*
1410 * put it in the new object
1411 */
1412
1413 if (newobj) {
1414 pg->uobject = newobj;
1415 pg->offset = newoff;
1416 uvm_pageinsert(newobj, pg);
1417 }
1418 }
1419
1420 #ifdef DEBUG
1421 /*
1422 * check if page is zero-filled
1423 *
1424 * - called with free page queue lock held.
1425 */
1426 void
1427 uvm_pagezerocheck(struct vm_page *pg)
1428 {
1429 int *p, *ep;
1430
1431 KASSERT(uvm_zerocheckkva != 0);
1432 KASSERT(mutex_owned(&uvm_fpageqlock));
1433
1434 /*
1435 * XXX assuming pmap_kenter_pa and pmap_kremove never call
1436 * uvm page allocator.
1437 *
1438 * it might be better to have "CPU-local temporary map" pmap interface.
1439 */
1440 pmap_kenter_pa(uvm_zerocheckkva, VM_PAGE_TO_PHYS(pg), VM_PROT_READ, 0);
1441 p = (int *)uvm_zerocheckkva;
1442 ep = (int *)((char *)p + PAGE_SIZE);
1443 pmap_update(pmap_kernel());
1444 while (p < ep) {
1445 if (*p != 0)
1446 panic("PG_ZERO page isn't zero-filled");
1447 p++;
1448 }
1449 pmap_kremove(uvm_zerocheckkva, PAGE_SIZE);
1450 /*
1451 * pmap_update() is not necessary here because no one except us
1452 * uses this VA.
1453 */
1454 }
1455 #endif /* DEBUG */
1456
1457 /*
1458 * uvm_pagefree: free page
1459 *
1460 * => erase page's identity (i.e. remove from object)
1461 * => put page on free list
1462 * => caller must lock owning object (either anon or uvm_object)
1463 * => caller must lock page queues
1464 * => assumes all valid mappings of pg are gone
1465 */
1466
1467 void
1468 uvm_pagefree(struct vm_page *pg)
1469 {
1470 struct pgflist *pgfl;
1471 struct uvm_cpu *ucpu;
1472 int index, color, queue;
1473 bool iszero;
1474
1475 #ifdef DEBUG
1476 if (pg->uobject == (void *)0xdeadbeef &&
1477 pg->uanon == (void *)0xdeadbeef) {
1478 panic("uvm_pagefree: freeing free page %p", pg);
1479 }
1480 #endif /* DEBUG */
1481
1482 KASSERT((pg->flags & PG_PAGEOUT) == 0);
1483 KASSERT(!(pg->pqflags & PQ_FREE));
1484 KASSERT(mutex_owned(&uvm_pageqlock) || !uvmpdpol_pageisqueued_p(pg));
1485 KASSERT(pg->uobject == NULL || mutex_owned(pg->uobject->vmobjlock));
1486 KASSERT(pg->uobject != NULL || pg->uanon == NULL ||
1487 mutex_owned(pg->uanon->an_lock));
1488
1489 /*
1490 * if the page is loaned, resolve the loan instead of freeing.
1491 */
1492
1493 if (pg->loan_count) {
1494 KASSERT(pg->wire_count == 0);
1495
1496 /*
1497 * if the page is owned by an anon then we just want to
1498 * drop anon ownership. the kernel will free the page when
1499 * it is done with it. if the page is owned by an object,
1500 * remove it from the object and mark it dirty for the benefit
1501 * of possible anon owners.
1502 *
1503 * regardless of previous ownership, wakeup any waiters,
1504 * unbusy the page, and we're done.
1505 */
1506
1507 if (pg->uobject != NULL) {
1508 uvm_pageremove(pg->uobject, pg);
1509 pg->flags &= ~PG_CLEAN;
1510 } else if (pg->uanon != NULL) {
1511 if ((pg->pqflags & PQ_ANON) == 0) {
1512 pg->loan_count--;
1513 } else {
1514 pg->pqflags &= ~PQ_ANON;
1515 atomic_dec_uint(&uvmexp.anonpages);
1516 }
1517 pg->uanon->an_page = NULL;
1518 pg->uanon = NULL;
1519 }
1520 if (pg->flags & PG_WANTED) {
1521 wakeup(pg);
1522 }
1523 pg->flags &= ~(PG_WANTED|PG_BUSY|PG_RELEASED|PG_PAGER1);
1524 #ifdef UVM_PAGE_TRKOWN
1525 pg->owner_tag = NULL;
1526 #endif
1527 if (pg->loan_count) {
1528 KASSERT(pg->uobject == NULL);
1529 if (pg->uanon == NULL) {
1530 uvm_pagedequeue(pg);
1531 }
1532 return;
1533 }
1534 }
1535
1536 /*
1537 * remove page from its object or anon.
1538 */
1539
1540 if (pg->uobject != NULL) {
1541 uvm_pageremove(pg->uobject, pg);
1542 } else if (pg->uanon != NULL) {
1543 pg->uanon->an_page = NULL;
1544 atomic_dec_uint(&uvmexp.anonpages);
1545 }
1546
1547 /*
1548 * now remove the page from the queues.
1549 */
1550
1551 uvm_pagedequeue(pg);
1552
1553 /*
1554 * if the page was wired, unwire it now.
1555 */
1556
1557 if (pg->wire_count) {
1558 pg->wire_count = 0;
1559 uvmexp.wired--;
1560 }
1561
1562 /*
1563 * and put on free queue
1564 */
1565
1566 iszero = (pg->flags & PG_ZERO);
1567 index = uvm_page_lookup_freelist(pg);
1568 color = VM_PGCOLOR_BUCKET(pg);
1569 queue = (iszero ? PGFL_ZEROS : PGFL_UNKNOWN);
1570
1571 #ifdef DEBUG
1572 pg->uobject = (void *)0xdeadbeef;
1573 pg->uanon = (void *)0xdeadbeef;
1574 #endif
1575
1576 mutex_spin_enter(&uvm_fpageqlock);
1577 pg->pqflags = PQ_FREE;
1578
1579 #ifdef DEBUG
1580 if (iszero)
1581 uvm_pagezerocheck(pg);
1582 #endif /* DEBUG */
1583
1584
1585 /* global list */
1586 pgfl = &uvm.page_free[index].pgfl_buckets[color].pgfl_queues[queue];
1587 LIST_INSERT_HEAD(pgfl, pg, pageq.list);
1588 uvmexp.free++;
1589 if (iszero) {
1590 uvmexp.zeropages++;
1591 }
1592
1593 /* per-cpu list */
1594 ucpu = curcpu()->ci_data.cpu_uvm;
1595 pg->offset = (uintptr_t)ucpu;
1596 pgfl = &ucpu->page_free[index].pgfl_buckets[color].pgfl_queues[queue];
1597 LIST_INSERT_HEAD(pgfl, pg, listq.list);
1598 ucpu->pages[queue]++;
1599 if (ucpu->pages[PGFL_ZEROS] < ucpu->pages[PGFL_UNKNOWN]) {
1600 ucpu->page_idle_zero = vm_page_zero_enable;
1601 }
1602
1603 mutex_spin_exit(&uvm_fpageqlock);
1604 }
1605
1606 /*
1607 * uvm_page_unbusy: unbusy an array of pages.
1608 *
1609 * => pages must either all belong to the same object, or all belong to anons.
1610 * => if pages are object-owned, object must be locked.
1611 * => if pages are anon-owned, anons must be locked.
1612 * => caller must lock page queues if pages may be released.
1613 * => caller must make sure that anon-owned pages are not PG_RELEASED.
1614 */
1615
1616 void
1617 uvm_page_unbusy(struct vm_page **pgs, int npgs)
1618 {
1619 struct vm_page *pg;
1620 int i;
1621 UVMHIST_FUNC("uvm_page_unbusy"); UVMHIST_CALLED(ubchist);
1622
1623 for (i = 0; i < npgs; i++) {
1624 pg = pgs[i];
1625 if (pg == NULL || pg == PGO_DONTCARE) {
1626 continue;
1627 }
1628
1629 KASSERT(pg->uobject == NULL ||
1630 mutex_owned(pg->uobject->vmobjlock));
1631 KASSERT(pg->uobject != NULL ||
1632 (pg->uanon != NULL && mutex_owned(pg->uanon->an_lock)));
1633
1634 KASSERT(pg->flags & PG_BUSY);
1635 KASSERT((pg->flags & PG_PAGEOUT) == 0);
1636 if (pg->flags & PG_WANTED) {
1637 wakeup(pg);
1638 }
1639 if (pg->flags & PG_RELEASED) {
1640 UVMHIST_LOG(ubchist, "releasing pg %p", pg,0,0,0);
1641 KASSERT(pg->uobject != NULL ||
1642 (pg->uanon != NULL && pg->uanon->an_ref > 0));
1643 pg->flags &= ~PG_RELEASED;
1644 uvm_pagefree(pg);
1645 } else {
1646 UVMHIST_LOG(ubchist, "unbusying pg %p", pg,0,0,0);
1647 KASSERT((pg->flags & PG_FAKE) == 0);
1648 pg->flags &= ~(PG_WANTED|PG_BUSY);
1649 UVM_PAGE_OWN(pg, NULL);
1650 }
1651 }
1652 }
1653
1654 #if defined(UVM_PAGE_TRKOWN)
1655 /*
1656 * uvm_page_own: set or release page ownership
1657 *
1658 * => this is a debugging function that keeps track of who sets PG_BUSY
1659 * and where they do it. it can be used to track down problems
1660 * such a process setting "PG_BUSY" and never releasing it.
1661 * => page's object [if any] must be locked
1662 * => if "tag" is NULL then we are releasing page ownership
1663 */
1664 void
1665 uvm_page_own(struct vm_page *pg, const char *tag)
1666 {
1667 struct uvm_object *uobj;
1668 struct vm_anon *anon;
1669
1670 KASSERT((pg->flags & (PG_PAGEOUT|PG_RELEASED)) == 0);
1671
1672 uobj = pg->uobject;
1673 anon = pg->uanon;
1674 if (uobj != NULL) {
1675 KASSERT(mutex_owned(uobj->vmobjlock));
1676 } else if (anon != NULL) {
1677 KASSERT(mutex_owned(anon->an_lock));
1678 }
1679
1680 KASSERT((pg->flags & PG_WANTED) == 0);
1681
1682 /* gain ownership? */
1683 if (tag) {
1684 KASSERT((pg->flags & PG_BUSY) != 0);
1685 if (pg->owner_tag) {
1686 printf("uvm_page_own: page %p already owned "
1687 "by proc %d [%s]\n", pg,
1688 pg->owner, pg->owner_tag);
1689 panic("uvm_page_own");
1690 }
1691 pg->owner = (curproc) ? curproc->p_pid : (pid_t) -1;
1692 pg->lowner = (curlwp) ? curlwp->l_lid : (lwpid_t) -1;
1693 pg->owner_tag = tag;
1694 return;
1695 }
1696
1697 /* drop ownership */
1698 KASSERT((pg->flags & PG_BUSY) == 0);
1699 if (pg->owner_tag == NULL) {
1700 printf("uvm_page_own: dropping ownership of an non-owned "
1701 "page (%p)\n", pg);
1702 panic("uvm_page_own");
1703 }
1704 if (!uvmpdpol_pageisqueued_p(pg)) {
1705 KASSERT((pg->uanon == NULL && pg->uobject == NULL) ||
1706 pg->wire_count > 0);
1707 } else {
1708 KASSERT(pg->wire_count == 0);
1709 }
1710 pg->owner_tag = NULL;
1711 }
1712 #endif
1713
1714 /*
1715 * uvm_pageidlezero: zero free pages while the system is idle.
1716 *
1717 * => try to complete one color bucket at a time, to reduce our impact
1718 * on the CPU cache.
1719 * => we loop until we either reach the target or there is a lwp ready
1720 * to run, or MD code detects a reason to break early.
1721 */
1722 void
1723 uvm_pageidlezero(void)
1724 {
1725 struct vm_page *pg;
1726 struct pgfreelist *pgfl, *gpgfl;
1727 struct uvm_cpu *ucpu;
1728 int free_list, firstbucket, nextbucket;
1729 bool lcont = false;
1730
1731 ucpu = curcpu()->ci_data.cpu_uvm;
1732 if (!ucpu->page_idle_zero ||
1733 ucpu->pages[PGFL_UNKNOWN] < uvmexp.ncolors) {
1734 ucpu->page_idle_zero = false;
1735 return;
1736 }
1737 if (!mutex_tryenter(&uvm_fpageqlock)) {
1738 /* Contention: let other CPUs to use the lock. */
1739 return;
1740 }
1741 firstbucket = ucpu->page_free_nextcolor;
1742 nextbucket = firstbucket;
1743 do {
1744 for (free_list = 0; free_list < VM_NFREELIST; free_list++) {
1745 if (sched_curcpu_runnable_p()) {
1746 goto quit;
1747 }
1748 pgfl = &ucpu->page_free[free_list];
1749 gpgfl = &uvm.page_free[free_list];
1750 while ((pg = LIST_FIRST(&pgfl->pgfl_buckets[
1751 nextbucket].pgfl_queues[PGFL_UNKNOWN])) != NULL) {
1752 if (lcont || sched_curcpu_runnable_p()) {
1753 goto quit;
1754 }
1755 LIST_REMOVE(pg, pageq.list); /* global list */
1756 LIST_REMOVE(pg, listq.list); /* per-cpu list */
1757 ucpu->pages[PGFL_UNKNOWN]--;
1758 uvmexp.free--;
1759 KASSERT(pg->pqflags == PQ_FREE);
1760 pg->pqflags = 0;
1761 mutex_spin_exit(&uvm_fpageqlock);
1762 #ifdef PMAP_PAGEIDLEZERO
1763 if (!PMAP_PAGEIDLEZERO(VM_PAGE_TO_PHYS(pg))) {
1764
1765 /*
1766 * The machine-dependent code detected
1767 * some reason for us to abort zeroing
1768 * pages, probably because there is a
1769 * process now ready to run.
1770 */
1771
1772 mutex_spin_enter(&uvm_fpageqlock);
1773 pg->pqflags = PQ_FREE;
1774 LIST_INSERT_HEAD(&gpgfl->pgfl_buckets[
1775 nextbucket].pgfl_queues[
1776 PGFL_UNKNOWN], pg, pageq.list);
1777 LIST_INSERT_HEAD(&pgfl->pgfl_buckets[
1778 nextbucket].pgfl_queues[
1779 PGFL_UNKNOWN], pg, listq.list);
1780 ucpu->pages[PGFL_UNKNOWN]++;
1781 uvmexp.free++;
1782 uvmexp.zeroaborts++;
1783 goto quit;
1784 }
1785 #else
1786 pmap_zero_page(VM_PAGE_TO_PHYS(pg));
1787 #endif /* PMAP_PAGEIDLEZERO */
1788 pg->flags |= PG_ZERO;
1789
1790 if (!mutex_tryenter(&uvm_fpageqlock)) {
1791 lcont = true;
1792 mutex_spin_enter(&uvm_fpageqlock);
1793 } else {
1794 lcont = false;
1795 }
1796 pg->pqflags = PQ_FREE;
1797 LIST_INSERT_HEAD(&gpgfl->pgfl_buckets[
1798 nextbucket].pgfl_queues[PGFL_ZEROS],
1799 pg, pageq.list);
1800 LIST_INSERT_HEAD(&pgfl->pgfl_buckets[
1801 nextbucket].pgfl_queues[PGFL_ZEROS],
1802 pg, listq.list);
1803 ucpu->pages[PGFL_ZEROS]++;
1804 uvmexp.free++;
1805 uvmexp.zeropages++;
1806 }
1807 }
1808 if (ucpu->pages[PGFL_UNKNOWN] < uvmexp.ncolors) {
1809 break;
1810 }
1811 nextbucket = (nextbucket + 1) & uvmexp.colormask;
1812 } while (nextbucket != firstbucket);
1813 ucpu->page_idle_zero = false;
1814 quit:
1815 mutex_spin_exit(&uvm_fpageqlock);
1816 }
1817
1818 /*
1819 * uvm_pagelookup: look up a page
1820 *
1821 * => caller should lock object to keep someone from pulling the page
1822 * out from under it
1823 */
1824
1825 struct vm_page *
1826 uvm_pagelookup(struct uvm_object *obj, voff_t off)
1827 {
1828 struct vm_page *pg;
1829
1830 KASSERT(mutex_owned(obj->vmobjlock));
1831
1832 pg = rb_tree_find_node(&obj->rb_tree, &off);
1833
1834 KASSERT(pg == NULL || obj->uo_npages != 0);
1835 KASSERT(pg == NULL || (pg->flags & (PG_RELEASED|PG_PAGEOUT)) == 0 ||
1836 (pg->flags & PG_BUSY) != 0);
1837 return pg;
1838 }
1839
1840 /*
1841 * uvm_pagewire: wire the page, thus removing it from the daemon's grasp
1842 *
1843 * => caller must lock page queues
1844 */
1845
1846 void
1847 uvm_pagewire(struct vm_page *pg)
1848 {
1849 KASSERT(mutex_owned(&uvm_pageqlock));
1850 #if defined(READAHEAD_STATS)
1851 if ((pg->pqflags & PQ_READAHEAD) != 0) {
1852 uvm_ra_hit.ev_count++;
1853 pg->pqflags &= ~PQ_READAHEAD;
1854 }
1855 #endif /* defined(READAHEAD_STATS) */
1856 if (pg->wire_count == 0) {
1857 uvm_pagedequeue(pg);
1858 uvmexp.wired++;
1859 }
1860 pg->wire_count++;
1861 }
1862
1863 /*
1864 * uvm_pageunwire: unwire the page.
1865 *
1866 * => activate if wire count goes to zero.
1867 * => caller must lock page queues
1868 */
1869
1870 void
1871 uvm_pageunwire(struct vm_page *pg)
1872 {
1873 KASSERT(mutex_owned(&uvm_pageqlock));
1874 pg->wire_count--;
1875 if (pg->wire_count == 0) {
1876 uvm_pageactivate(pg);
1877 uvmexp.wired--;
1878 }
1879 }
1880
1881 /*
1882 * uvm_pagedeactivate: deactivate page
1883 *
1884 * => caller must lock page queues
1885 * => caller must check to make sure page is not wired
1886 * => object that page belongs to must be locked (so we can adjust pg->flags)
1887 * => caller must clear the reference on the page before calling
1888 */
1889
1890 void
1891 uvm_pagedeactivate(struct vm_page *pg)
1892 {
1893
1894 KASSERT(mutex_owned(&uvm_pageqlock));
1895 KASSERT(uvm_page_locked_p(pg));
1896 KASSERT(pg->wire_count != 0 || uvmpdpol_pageisqueued_p(pg));
1897 uvmpdpol_pagedeactivate(pg);
1898 }
1899
1900 /*
1901 * uvm_pageactivate: activate page
1902 *
1903 * => caller must lock page queues
1904 */
1905
1906 void
1907 uvm_pageactivate(struct vm_page *pg)
1908 {
1909
1910 KASSERT(mutex_owned(&uvm_pageqlock));
1911 KASSERT(uvm_page_locked_p(pg));
1912 #if defined(READAHEAD_STATS)
1913 if ((pg->pqflags & PQ_READAHEAD) != 0) {
1914 uvm_ra_hit.ev_count++;
1915 pg->pqflags &= ~PQ_READAHEAD;
1916 }
1917 #endif /* defined(READAHEAD_STATS) */
1918 if (pg->wire_count != 0) {
1919 return;
1920 }
1921 uvmpdpol_pageactivate(pg);
1922 }
1923
1924 /*
1925 * uvm_pagedequeue: remove a page from any paging queue
1926 */
1927
1928 void
1929 uvm_pagedequeue(struct vm_page *pg)
1930 {
1931
1932 if (uvmpdpol_pageisqueued_p(pg)) {
1933 KASSERT(mutex_owned(&uvm_pageqlock));
1934 }
1935
1936 uvmpdpol_pagedequeue(pg);
1937 }
1938
1939 /*
1940 * uvm_pageenqueue: add a page to a paging queue without activating.
1941 * used where a page is not really demanded (yet). eg. read-ahead
1942 */
1943
1944 void
1945 uvm_pageenqueue(struct vm_page *pg)
1946 {
1947
1948 KASSERT(mutex_owned(&uvm_pageqlock));
1949 if (pg->wire_count != 0) {
1950 return;
1951 }
1952 uvmpdpol_pageenqueue(pg);
1953 }
1954
1955 /*
1956 * uvm_pagezero: zero fill a page
1957 *
1958 * => if page is part of an object then the object should be locked
1959 * to protect pg->flags.
1960 */
1961
1962 void
1963 uvm_pagezero(struct vm_page *pg)
1964 {
1965 pg->flags &= ~PG_CLEAN;
1966 pmap_zero_page(VM_PAGE_TO_PHYS(pg));
1967 }
1968
1969 /*
1970 * uvm_pagecopy: copy a page
1971 *
1972 * => if page is part of an object then the object should be locked
1973 * to protect pg->flags.
1974 */
1975
1976 void
1977 uvm_pagecopy(struct vm_page *src, struct vm_page *dst)
1978 {
1979
1980 dst->flags &= ~PG_CLEAN;
1981 pmap_copy_page(VM_PAGE_TO_PHYS(src), VM_PAGE_TO_PHYS(dst));
1982 }
1983
1984 /*
1985 * uvm_pageismanaged: test it see that a page (specified by PA) is managed.
1986 */
1987
1988 bool
1989 uvm_pageismanaged(paddr_t pa)
1990 {
1991
1992 return (vm_physseg_find(atop(pa), NULL) != -1);
1993 }
1994
1995 /*
1996 * uvm_page_lookup_freelist: look up the free list for the specified page
1997 */
1998
1999 int
2000 uvm_page_lookup_freelist(struct vm_page *pg)
2001 {
2002 int lcv;
2003
2004 lcv = vm_physseg_find(atop(VM_PAGE_TO_PHYS(pg)), NULL);
2005 KASSERT(lcv != -1);
2006 return (VM_PHYSMEM_PTR(lcv)->free_list);
2007 }
2008
2009 /*
2010 * uvm_page_locked_p: return true if object associated with page is
2011 * locked. this is a weak check for runtime assertions only.
2012 */
2013
2014 bool
2015 uvm_page_locked_p(struct vm_page *pg)
2016 {
2017
2018 if (pg->uobject != NULL) {
2019 return mutex_owned(pg->uobject->vmobjlock);
2020 }
2021 if (pg->uanon != NULL) {
2022 return mutex_owned(pg->uanon->an_lock);
2023 }
2024 return true;
2025 }
2026
2027 #if defined(DDB) || defined(DEBUGPRINT)
2028
2029 /*
2030 * uvm_page_printit: actually print the page
2031 */
2032
2033 static const char page_flagbits[] = UVM_PGFLAGBITS;
2034 static const char page_pqflagbits[] = UVM_PQFLAGBITS;
2035
2036 void
2037 uvm_page_printit(struct vm_page *pg, bool full,
2038 void (*pr)(const char *, ...))
2039 {
2040 struct vm_page *tpg;
2041 struct uvm_object *uobj;
2042 struct pgflist *pgl;
2043 char pgbuf[128];
2044 char pqbuf[128];
2045
2046 (*pr)("PAGE %p:\n", pg);
2047 snprintb(pgbuf, sizeof(pgbuf), page_flagbits, pg->flags);
2048 snprintb(pqbuf, sizeof(pqbuf), page_pqflagbits, pg->pqflags);
2049 (*pr)(" flags=%s, pqflags=%s, wire_count=%d, pa=0x%lx\n",
2050 pgbuf, pqbuf, pg->wire_count, (long)VM_PAGE_TO_PHYS(pg));
2051 (*pr)(" uobject=%p, uanon=%p, offset=0x%llx loan_count=%d\n",
2052 pg->uobject, pg->uanon, (long long)pg->offset, pg->loan_count);
2053 #if defined(UVM_PAGE_TRKOWN)
2054 if (pg->flags & PG_BUSY)
2055 (*pr)(" owning process = %d, tag=%s\n",
2056 pg->owner, pg->owner_tag);
2057 else
2058 (*pr)(" page not busy, no owner\n");
2059 #else
2060 (*pr)(" [page ownership tracking disabled]\n");
2061 #endif
2062
2063 if (!full)
2064 return;
2065
2066 /* cross-verify object/anon */
2067 if ((pg->pqflags & PQ_FREE) == 0) {
2068 if (pg->pqflags & PQ_ANON) {
2069 if (pg->uanon == NULL || pg->uanon->an_page != pg)
2070 (*pr)(" >>> ANON DOES NOT POINT HERE <<< (%p)\n",
2071 (pg->uanon) ? pg->uanon->an_page : NULL);
2072 else
2073 (*pr)(" anon backpointer is OK\n");
2074 } else {
2075 uobj = pg->uobject;
2076 if (uobj) {
2077 (*pr)(" checking object list\n");
2078 TAILQ_FOREACH(tpg, &uobj->memq, listq.queue) {
2079 if (tpg == pg) {
2080 break;
2081 }
2082 }
2083 if (tpg)
2084 (*pr)(" page found on object list\n");
2085 else
2086 (*pr)(" >>> PAGE NOT FOUND ON OBJECT LIST! <<<\n");
2087 }
2088 }
2089 }
2090
2091 /* cross-verify page queue */
2092 if (pg->pqflags & PQ_FREE) {
2093 int fl = uvm_page_lookup_freelist(pg);
2094 int color = VM_PGCOLOR_BUCKET(pg);
2095 pgl = &uvm.page_free[fl].pgfl_buckets[color].pgfl_queues[
2096 ((pg)->flags & PG_ZERO) ? PGFL_ZEROS : PGFL_UNKNOWN];
2097 } else {
2098 pgl = NULL;
2099 }
2100
2101 if (pgl) {
2102 (*pr)(" checking pageq list\n");
2103 LIST_FOREACH(tpg, pgl, pageq.list) {
2104 if (tpg == pg) {
2105 break;
2106 }
2107 }
2108 if (tpg)
2109 (*pr)(" page found on pageq list\n");
2110 else
2111 (*pr)(" >>> PAGE NOT FOUND ON PAGEQ LIST! <<<\n");
2112 }
2113 }
2114
2115 /*
2116 * uvm_pages_printthem - print a summary of all managed pages
2117 */
2118
2119 void
2120 uvm_page_printall(void (*pr)(const char *, ...))
2121 {
2122 unsigned i;
2123 struct vm_page *pg;
2124
2125 (*pr)("%18s %4s %4s %18s %18s"
2126 #ifdef UVM_PAGE_TRKOWN
2127 " OWNER"
2128 #endif
2129 "\n", "PAGE", "FLAG", "PQ", "UOBJECT", "UANON");
2130 for (i = 0; i < vm_nphysmem; i++) {
2131 for (pg = VM_PHYSMEM_PTR(i)->pgs; pg < VM_PHYSMEM_PTR(i)->lastpg; pg++) {
2132 (*pr)("%18p %04x %04x %18p %18p",
2133 pg, pg->flags, pg->pqflags, pg->uobject,
2134 pg->uanon);
2135 #ifdef UVM_PAGE_TRKOWN
2136 if (pg->flags & PG_BUSY)
2137 (*pr)(" %d [%s]", pg->owner, pg->owner_tag);
2138 #endif
2139 (*pr)("\n");
2140 }
2141 }
2142 }
2143
2144 #endif /* DDB || DEBUGPRINT */
2145