uvm_page.c revision 1.60 1 /* $NetBSD: uvm_page.c,v 1.60 2001/05/02 01:22:20 thorpej Exp $ */
2
3 /*
4 * Copyright (c) 1997 Charles D. Cranor and Washington University.
5 * Copyright (c) 1991, 1993, The Regents of the University of California.
6 *
7 * All rights reserved.
8 *
9 * This code is derived from software contributed to Berkeley by
10 * The Mach Operating System project at Carnegie-Mellon University.
11 *
12 * Redistribution and use in source and binary forms, with or without
13 * modification, are permitted provided that the following conditions
14 * are met:
15 * 1. Redistributions of source code must retain the above copyright
16 * notice, this list of conditions and the following disclaimer.
17 * 2. Redistributions in binary form must reproduce the above copyright
18 * notice, this list of conditions and the following disclaimer in the
19 * documentation and/or other materials provided with the distribution.
20 * 3. All advertising materials mentioning features or use of this software
21 * must display the following acknowledgement:
22 * This product includes software developed by Charles D. Cranor,
23 * Washington University, the University of California, Berkeley and
24 * its contributors.
25 * 4. Neither the name of the University nor the names of its contributors
26 * may be used to endorse or promote products derived from this software
27 * without specific prior written permission.
28 *
29 * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND
30 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
31 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
32 * ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE
33 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
34 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
35 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
36 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
37 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
38 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
39 * SUCH DAMAGE.
40 *
41 * @(#)vm_page.c 8.3 (Berkeley) 3/21/94
42 * from: Id: uvm_page.c,v 1.1.2.18 1998/02/06 05:24:42 chs Exp
43 *
44 *
45 * Copyright (c) 1987, 1990 Carnegie-Mellon University.
46 * All rights reserved.
47 *
48 * Permission to use, copy, modify and distribute this software and
49 * its documentation is hereby granted, provided that both the copyright
50 * notice and this permission notice appear in all copies of the
51 * software, derivative works or modified versions, and any portions
52 * thereof, and that both notices appear in supporting documentation.
53 *
54 * CARNEGIE MELLON ALLOWS FREE USE OF THIS SOFTWARE IN ITS "AS IS"
55 * CONDITION. CARNEGIE MELLON DISCLAIMS ANY LIABILITY OF ANY KIND
56 * FOR ANY DAMAGES WHATSOEVER RESULTING FROM THE USE OF THIS SOFTWARE.
57 *
58 * Carnegie Mellon requests users of this software to return to
59 *
60 * Software Distribution Coordinator or Software.Distribution (at) CS.CMU.EDU
61 * School of Computer Science
62 * Carnegie Mellon University
63 * Pittsburgh PA 15213-3890
64 *
65 * any improvements or extensions that they make and grant Carnegie the
66 * rights to redistribute these changes.
67 */
68
69 /*
70 * uvm_page.c: page ops.
71 */
72
73 #include "opt_uvmhist.h"
74
75 #include <sys/param.h>
76 #include <sys/systm.h>
77 #include <sys/malloc.h>
78 #include <sys/sched.h>
79 #include <sys/kernel.h>
80 #include <sys/vnode.h>
81
82 #define UVM_PAGE /* pull in uvm_page.h functions */
83 #include <uvm/uvm.h>
84
85 /*
86 * global vars... XXXCDC: move to uvm. structure.
87 */
88
89 /*
90 * physical memory config is stored in vm_physmem.
91 */
92
93 struct vm_physseg vm_physmem[VM_PHYSSEG_MAX]; /* XXXCDC: uvm.physmem */
94 int vm_nphysseg = 0; /* XXXCDC: uvm.nphysseg */
95
96 /*
97 * Some supported CPUs in a given architecture don't support all
98 * of the things necessary to do idle page zero'ing efficiently.
99 * We therefore provide a way to disable it from machdep code here.
100 */
101 /*
102 * XXX disabled until we can find a way to do this without causing
103 * problems for either cpu caches or DMA latency.
104 */
105 boolean_t vm_page_zero_enable = FALSE;
106
107 /*
108 * local variables
109 */
110
111 /*
112 * these variables record the values returned by vm_page_bootstrap,
113 * for debugging purposes. The implementation of uvm_pageboot_alloc
114 * and pmap_startup here also uses them internally.
115 */
116
117 static vaddr_t virtual_space_start;
118 static vaddr_t virtual_space_end;
119
120 /*
121 * we use a hash table with only one bucket during bootup. we will
122 * later rehash (resize) the hash table once the allocator is ready.
123 * we static allocate the one bootstrap bucket below...
124 */
125
126 static struct pglist uvm_bootbucket;
127
128 /*
129 * we allocate an initial number of page colors in uvm_page_init(),
130 * and remember them. We may re-color pages as cache sizes are
131 * discovered during the autoconfiguration phase. But we can never
132 * free the initial set of buckets, since they are allocated using
133 * uvm_pageboot_alloc().
134 */
135
136 static boolean_t have_recolored_pages /* = FALSE */;
137
138 /*
139 * local prototypes
140 */
141
142 static void uvm_pageinsert __P((struct vm_page *));
143 static void uvm_pageremove __P((struct vm_page *));
144
145 /*
146 * inline functions
147 */
148
149 /*
150 * uvm_pageinsert: insert a page in the object and the hash table
151 *
152 * => caller must lock object
153 * => caller must lock page queues
154 * => call should have already set pg's object and offset pointers
155 * and bumped the version counter
156 */
157
158 __inline static void
159 uvm_pageinsert(pg)
160 struct vm_page *pg;
161 {
162 struct pglist *buck;
163 int s;
164
165 KASSERT((pg->flags & PG_TABLED) == 0);
166 buck = &uvm.page_hash[uvm_pagehash(pg->uobject,pg->offset)];
167 s = splvm();
168 simple_lock(&uvm.hashlock);
169 TAILQ_INSERT_TAIL(buck, pg, hashq); /* put in hash */
170 simple_unlock(&uvm.hashlock);
171 splx(s);
172
173 TAILQ_INSERT_TAIL(&pg->uobject->memq, pg, listq); /* put in object */
174 pg->flags |= PG_TABLED;
175 pg->uobject->uo_npages++;
176 }
177
178 /*
179 * uvm_page_remove: remove page from object and hash
180 *
181 * => caller must lock object
182 * => caller must lock page queues
183 */
184
185 static __inline void
186 uvm_pageremove(pg)
187 struct vm_page *pg;
188 {
189 struct pglist *buck;
190 int s;
191
192 KASSERT(pg->flags & PG_TABLED);
193 buck = &uvm.page_hash[uvm_pagehash(pg->uobject,pg->offset)];
194 s = splvm();
195 simple_lock(&uvm.hashlock);
196 TAILQ_REMOVE(buck, pg, hashq);
197 simple_unlock(&uvm.hashlock);
198 splx(s);
199
200 if (UVM_OBJ_IS_VTEXT(pg->uobject)) {
201 uvmexp.vtextpages--;
202 } else if (UVM_OBJ_IS_VNODE(pg->uobject)) {
203 uvmexp.vnodepages--;
204 }
205
206 /* object should be locked */
207 TAILQ_REMOVE(&pg->uobject->memq, pg, listq);
208
209 pg->flags &= ~PG_TABLED;
210 pg->uobject->uo_npages--;
211 pg->uobject = NULL;
212 pg->version++;
213 }
214
215 static void
216 uvm_page_init_buckets(struct pgfreelist *pgfl)
217 {
218 int color, i;
219
220 for (color = 0; color < uvmexp.ncolors; color++) {
221 for (i = 0; i < PGFL_NQUEUES; i++) {
222 TAILQ_INIT(&pgfl->pgfl_buckets[
223 color].pgfl_queues[i]);
224 }
225 }
226 }
227
228 /*
229 * uvm_page_init: init the page system. called from uvm_init().
230 *
231 * => we return the range of kernel virtual memory in kvm_startp/kvm_endp
232 */
233
234 void
235 uvm_page_init(kvm_startp, kvm_endp)
236 vaddr_t *kvm_startp, *kvm_endp;
237 {
238 vsize_t freepages, pagecount, bucketcount, n;
239 struct pgflbucket *bucketarray;
240 vm_page_t pagearray;
241 int lcv, i;
242 paddr_t paddr;
243
244 /*
245 * init the page queues and page queue locks, except the free
246 * list; we allocate that later (with the initial vm_page
247 * structures).
248 */
249
250 TAILQ_INIT(&uvm.page_active);
251 TAILQ_INIT(&uvm.page_inactive_swp);
252 TAILQ_INIT(&uvm.page_inactive_obj);
253 simple_lock_init(&uvm.pageqlock);
254 simple_lock_init(&uvm.fpageqlock);
255
256 /*
257 * init the <obj,offset> => <page> hash table. for now
258 * we just have one bucket (the bootstrap bucket). later on we
259 * will allocate new buckets as we dynamically resize the hash table.
260 */
261
262 uvm.page_nhash = 1; /* 1 bucket */
263 uvm.page_hashmask = 0; /* mask for hash function */
264 uvm.page_hash = &uvm_bootbucket; /* install bootstrap bucket */
265 TAILQ_INIT(uvm.page_hash); /* init hash table */
266 simple_lock_init(&uvm.hashlock); /* init hash table lock */
267
268 /*
269 * allocate vm_page structures.
270 */
271
272 /*
273 * sanity check:
274 * before calling this function the MD code is expected to register
275 * some free RAM with the uvm_page_physload() function. our job
276 * now is to allocate vm_page structures for this memory.
277 */
278
279 if (vm_nphysseg == 0)
280 panic("uvm_page_bootstrap: no memory pre-allocated");
281
282 /*
283 * first calculate the number of free pages...
284 *
285 * note that we use start/end rather than avail_start/avail_end.
286 * this allows us to allocate extra vm_page structures in case we
287 * want to return some memory to the pool after booting.
288 */
289
290 freepages = 0;
291 for (lcv = 0 ; lcv < vm_nphysseg ; lcv++)
292 freepages += (vm_physmem[lcv].end - vm_physmem[lcv].start);
293
294 /*
295 * Let MD code initialize the number of colors, or default
296 * to 1 color if MD code doesn't care.
297 */
298 if (uvmexp.ncolors == 0)
299 uvmexp.ncolors = 1;
300 uvmexp.colormask = uvmexp.ncolors - 1;
301
302 /*
303 * we now know we have (PAGE_SIZE * freepages) bytes of memory we can
304 * use. for each page of memory we use we need a vm_page structure.
305 * thus, the total number of pages we can use is the total size of
306 * the memory divided by the PAGE_SIZE plus the size of the vm_page
307 * structure. we add one to freepages as a fudge factor to avoid
308 * truncation errors (since we can only allocate in terms of whole
309 * pages).
310 */
311
312 bucketcount = uvmexp.ncolors * VM_NFREELIST;
313 pagecount = ((freepages + 1) << PAGE_SHIFT) /
314 (PAGE_SIZE + sizeof(struct vm_page));
315
316 bucketarray = (void *) uvm_pageboot_alloc((bucketcount *
317 sizeof(struct pgflbucket)) + (pagecount *
318 sizeof(struct vm_page)));
319 pagearray = (struct vm_page *)(bucketarray + bucketcount);
320
321 for (lcv = 0; lcv < VM_NFREELIST; lcv++) {
322 uvm.page_free[lcv].pgfl_buckets =
323 (bucketarray + (lcv * uvmexp.ncolors));
324 uvm_page_init_buckets(&uvm.page_free[lcv]);
325 }
326
327 memset(pagearray, 0, pagecount * sizeof(struct vm_page));
328
329 /*
330 * init the vm_page structures and put them in the correct place.
331 */
332
333 for (lcv = 0 ; lcv < vm_nphysseg ; lcv++) {
334 n = vm_physmem[lcv].end - vm_physmem[lcv].start;
335 if (n > pagecount) {
336 printf("uvm_page_init: lost %ld page(s) in init\n",
337 (long)(n - pagecount));
338 panic("uvm_page_init"); /* XXXCDC: shouldn't happen? */
339 /* n = pagecount; */
340 }
341
342 /* set up page array pointers */
343 vm_physmem[lcv].pgs = pagearray;
344 pagearray += n;
345 pagecount -= n;
346 vm_physmem[lcv].lastpg = vm_physmem[lcv].pgs + (n - 1);
347
348 /* init and free vm_pages (we've already zeroed them) */
349 paddr = ptoa(vm_physmem[lcv].start);
350 for (i = 0 ; i < n ; i++, paddr += PAGE_SIZE) {
351 vm_physmem[lcv].pgs[i].phys_addr = paddr;
352 #ifdef __HAVE_VM_PAGE_MD
353 VM_MDPAGE_INIT(&vm_physmem[lcv].pgs[i]);
354 #endif
355 if (atop(paddr) >= vm_physmem[lcv].avail_start &&
356 atop(paddr) <= vm_physmem[lcv].avail_end) {
357 uvmexp.npages++;
358 /* add page to free pool */
359 uvm_pagefree(&vm_physmem[lcv].pgs[i]);
360 }
361 }
362 }
363
364 /*
365 * pass up the values of virtual_space_start and
366 * virtual_space_end (obtained by uvm_pageboot_alloc) to the upper
367 * layers of the VM.
368 */
369
370 *kvm_startp = round_page(virtual_space_start);
371 *kvm_endp = trunc_page(virtual_space_end);
372
373 /*
374 * init locks for kernel threads
375 */
376
377 simple_lock_init(&uvm.pagedaemon_lock);
378 simple_lock_init(&uvm.aiodoned_lock);
379
380 /*
381 * init various thresholds.
382 * XXXCDC - values may need adjusting
383 */
384
385 uvmexp.reserve_pagedaemon = 1;
386 uvmexp.reserve_kernel = 5;
387 uvmexp.anonminpct = 10;
388 uvmexp.vnodeminpct = 10;
389 uvmexp.vtextminpct = 5;
390 uvmexp.anonmin = uvmexp.anonminpct * 256 / 100;
391 uvmexp.vnodemin = uvmexp.vnodeminpct * 256 / 100;
392 uvmexp.vtextmin = uvmexp.vtextminpct * 256 / 100;
393
394 /*
395 * determine if we should zero pages in the idle loop.
396 */
397
398 uvm.page_idle_zero = vm_page_zero_enable;
399
400 /*
401 * done!
402 */
403
404 uvm.page_init_done = TRUE;
405 }
406
407 /*
408 * uvm_setpagesize: set the page size
409 *
410 * => sets page_shift and page_mask from uvmexp.pagesize.
411 */
412
413 void
414 uvm_setpagesize()
415 {
416 if (uvmexp.pagesize == 0)
417 uvmexp.pagesize = DEFAULT_PAGE_SIZE;
418 uvmexp.pagemask = uvmexp.pagesize - 1;
419 if ((uvmexp.pagemask & uvmexp.pagesize) != 0)
420 panic("uvm_setpagesize: page size not a power of two");
421 for (uvmexp.pageshift = 0; ; uvmexp.pageshift++)
422 if ((1 << uvmexp.pageshift) == uvmexp.pagesize)
423 break;
424 }
425
426 /*
427 * uvm_pageboot_alloc: steal memory from physmem for bootstrapping
428 */
429
430 vaddr_t
431 uvm_pageboot_alloc(size)
432 vsize_t size;
433 {
434 static boolean_t initialized = FALSE;
435 vaddr_t addr;
436 #if !defined(PMAP_STEAL_MEMORY)
437 vaddr_t vaddr;
438 paddr_t paddr;
439 #endif
440
441 /*
442 * on first call to this function, initialize ourselves.
443 */
444 if (initialized == FALSE) {
445 pmap_virtual_space(&virtual_space_start, &virtual_space_end);
446
447 /* round it the way we like it */
448 virtual_space_start = round_page(virtual_space_start);
449 virtual_space_end = trunc_page(virtual_space_end);
450
451 initialized = TRUE;
452 }
453
454 /* round to page size */
455 size = round_page(size);
456
457 #if defined(PMAP_STEAL_MEMORY)
458
459 /*
460 * defer bootstrap allocation to MD code (it may want to allocate
461 * from a direct-mapped segment). pmap_steal_memory should adjust
462 * virtual_space_start/virtual_space_end if necessary.
463 */
464
465 addr = pmap_steal_memory(size, &virtual_space_start,
466 &virtual_space_end);
467
468 return(addr);
469
470 #else /* !PMAP_STEAL_MEMORY */
471
472 /*
473 * allocate virtual memory for this request
474 */
475 if (virtual_space_start == virtual_space_end ||
476 (virtual_space_end - virtual_space_start) < size)
477 panic("uvm_pageboot_alloc: out of virtual space");
478
479 addr = virtual_space_start;
480
481 #ifdef PMAP_GROWKERNEL
482 /*
483 * If the kernel pmap can't map the requested space,
484 * then allocate more resources for it.
485 */
486 if (uvm_maxkaddr < (addr + size)) {
487 uvm_maxkaddr = pmap_growkernel(addr + size);
488 if (uvm_maxkaddr < (addr + size))
489 panic("uvm_pageboot_alloc: pmap_growkernel() failed");
490 }
491 #endif
492
493 virtual_space_start += size;
494
495 /*
496 * allocate and mapin physical pages to back new virtual pages
497 */
498
499 for (vaddr = round_page(addr) ; vaddr < addr + size ;
500 vaddr += PAGE_SIZE) {
501
502 if (!uvm_page_physget(&paddr))
503 panic("uvm_pageboot_alloc: out of memory");
504
505 /*
506 * Note this memory is no longer managed, so using
507 * pmap_kenter is safe.
508 */
509 pmap_kenter_pa(vaddr, paddr, VM_PROT_READ|VM_PROT_WRITE);
510 }
511 pmap_update();
512 return(addr);
513 #endif /* PMAP_STEAL_MEMORY */
514 }
515
516 #if !defined(PMAP_STEAL_MEMORY)
517 /*
518 * uvm_page_physget: "steal" one page from the vm_physmem structure.
519 *
520 * => attempt to allocate it off the end of a segment in which the "avail"
521 * values match the start/end values. if we can't do that, then we
522 * will advance both values (making them equal, and removing some
523 * vm_page structures from the non-avail area).
524 * => return false if out of memory.
525 */
526
527 /* subroutine: try to allocate from memory chunks on the specified freelist */
528 static boolean_t uvm_page_physget_freelist __P((paddr_t *, int));
529
530 static boolean_t
531 uvm_page_physget_freelist(paddrp, freelist)
532 paddr_t *paddrp;
533 int freelist;
534 {
535 int lcv, x;
536
537 /* pass 1: try allocating from a matching end */
538 #if (VM_PHYSSEG_STRAT == VM_PSTRAT_BIGFIRST)
539 for (lcv = vm_nphysseg - 1 ; lcv >= 0 ; lcv--)
540 #else
541 for (lcv = 0 ; lcv < vm_nphysseg ; lcv++)
542 #endif
543 {
544
545 if (uvm.page_init_done == TRUE)
546 panic("uvm_page_physget: called _after_ bootstrap");
547
548 if (vm_physmem[lcv].free_list != freelist)
549 continue;
550
551 /* try from front */
552 if (vm_physmem[lcv].avail_start == vm_physmem[lcv].start &&
553 vm_physmem[lcv].avail_start < vm_physmem[lcv].avail_end) {
554 *paddrp = ptoa(vm_physmem[lcv].avail_start);
555 vm_physmem[lcv].avail_start++;
556 vm_physmem[lcv].start++;
557 /* nothing left? nuke it */
558 if (vm_physmem[lcv].avail_start ==
559 vm_physmem[lcv].end) {
560 if (vm_nphysseg == 1)
561 panic("vum_page_physget: out of memory!");
562 vm_nphysseg--;
563 for (x = lcv ; x < vm_nphysseg ; x++)
564 /* structure copy */
565 vm_physmem[x] = vm_physmem[x+1];
566 }
567 return (TRUE);
568 }
569
570 /* try from rear */
571 if (vm_physmem[lcv].avail_end == vm_physmem[lcv].end &&
572 vm_physmem[lcv].avail_start < vm_physmem[lcv].avail_end) {
573 *paddrp = ptoa(vm_physmem[lcv].avail_end - 1);
574 vm_physmem[lcv].avail_end--;
575 vm_physmem[lcv].end--;
576 /* nothing left? nuke it */
577 if (vm_physmem[lcv].avail_end ==
578 vm_physmem[lcv].start) {
579 if (vm_nphysseg == 1)
580 panic("uvm_page_physget: out of memory!");
581 vm_nphysseg--;
582 for (x = lcv ; x < vm_nphysseg ; x++)
583 /* structure copy */
584 vm_physmem[x] = vm_physmem[x+1];
585 }
586 return (TRUE);
587 }
588 }
589
590 /* pass2: forget about matching ends, just allocate something */
591 #if (VM_PHYSSEG_STRAT == VM_PSTRAT_BIGFIRST)
592 for (lcv = vm_nphysseg - 1 ; lcv >= 0 ; lcv--)
593 #else
594 for (lcv = 0 ; lcv < vm_nphysseg ; lcv++)
595 #endif
596 {
597
598 /* any room in this bank? */
599 if (vm_physmem[lcv].avail_start >= vm_physmem[lcv].avail_end)
600 continue; /* nope */
601
602 *paddrp = ptoa(vm_physmem[lcv].avail_start);
603 vm_physmem[lcv].avail_start++;
604 /* truncate! */
605 vm_physmem[lcv].start = vm_physmem[lcv].avail_start;
606
607 /* nothing left? nuke it */
608 if (vm_physmem[lcv].avail_start == vm_physmem[lcv].end) {
609 if (vm_nphysseg == 1)
610 panic("uvm_page_physget: out of memory!");
611 vm_nphysseg--;
612 for (x = lcv ; x < vm_nphysseg ; x++)
613 /* structure copy */
614 vm_physmem[x] = vm_physmem[x+1];
615 }
616 return (TRUE);
617 }
618
619 return (FALSE); /* whoops! */
620 }
621
622 boolean_t
623 uvm_page_physget(paddrp)
624 paddr_t *paddrp;
625 {
626 int i;
627
628 /* try in the order of freelist preference */
629 for (i = 0; i < VM_NFREELIST; i++)
630 if (uvm_page_physget_freelist(paddrp, i) == TRUE)
631 return (TRUE);
632 return (FALSE);
633 }
634 #endif /* PMAP_STEAL_MEMORY */
635
636 /*
637 * uvm_page_physload: load physical memory into VM system
638 *
639 * => all args are PFs
640 * => all pages in start/end get vm_page structures
641 * => areas marked by avail_start/avail_end get added to the free page pool
642 * => we are limited to VM_PHYSSEG_MAX physical memory segments
643 */
644
645 void
646 uvm_page_physload(start, end, avail_start, avail_end, free_list)
647 paddr_t start, end, avail_start, avail_end;
648 int free_list;
649 {
650 int preload, lcv;
651 psize_t npages;
652 struct vm_page *pgs;
653 struct vm_physseg *ps;
654
655 if (uvmexp.pagesize == 0)
656 panic("uvm_page_physload: page size not set!");
657
658 if (free_list >= VM_NFREELIST || free_list < VM_FREELIST_DEFAULT)
659 panic("uvm_page_physload: bad free list %d\n", free_list);
660
661 if (start >= end)
662 panic("uvm_page_physload: start >= end");
663
664 /*
665 * do we have room?
666 */
667 if (vm_nphysseg == VM_PHYSSEG_MAX) {
668 printf("uvm_page_physload: unable to load physical memory "
669 "segment\n");
670 printf("\t%d segments allocated, ignoring 0x%llx -> 0x%llx\n",
671 VM_PHYSSEG_MAX, (long long)start, (long long)end);
672 printf("\tincrease VM_PHYSSEG_MAX\n");
673 return;
674 }
675
676 /*
677 * check to see if this is a "preload" (i.e. uvm_mem_init hasn't been
678 * called yet, so malloc is not available).
679 */
680 for (lcv = 0 ; lcv < vm_nphysseg ; lcv++) {
681 if (vm_physmem[lcv].pgs)
682 break;
683 }
684 preload = (lcv == vm_nphysseg);
685
686 /*
687 * if VM is already running, attempt to malloc() vm_page structures
688 */
689 if (!preload) {
690 #if defined(VM_PHYSSEG_NOADD)
691 panic("uvm_page_physload: tried to add RAM after vm_mem_init");
692 #else
693 /* XXXCDC: need some sort of lockout for this case */
694 paddr_t paddr;
695 npages = end - start; /* # of pages */
696 pgs = malloc(sizeof(struct vm_page) * npages,
697 M_VMPAGE, M_NOWAIT);
698 if (pgs == NULL) {
699 printf("uvm_page_physload: can not malloc vm_page "
700 "structs for segment\n");
701 printf("\tignoring 0x%lx -> 0x%lx\n", start, end);
702 return;
703 }
704 /* zero data, init phys_addr and free_list, and free pages */
705 memset(pgs, 0, sizeof(struct vm_page) * npages);
706 for (lcv = 0, paddr = ptoa(start) ;
707 lcv < npages ; lcv++, paddr += PAGE_SIZE) {
708 pgs[lcv].phys_addr = paddr;
709 pgs[lcv].free_list = free_list;
710 if (atop(paddr) >= avail_start &&
711 atop(paddr) <= avail_end)
712 uvm_pagefree(&pgs[lcv]);
713 }
714 /* XXXCDC: incomplete: need to update uvmexp.free, what else? */
715 /* XXXCDC: need hook to tell pmap to rebuild pv_list, etc... */
716 #endif
717 } else {
718
719 /* gcc complains if these don't get init'd */
720 pgs = NULL;
721 npages = 0;
722
723 }
724
725 /*
726 * now insert us in the proper place in vm_physmem[]
727 */
728
729 #if (VM_PHYSSEG_STRAT == VM_PSTRAT_RANDOM)
730
731 /* random: put it at the end (easy!) */
732 ps = &vm_physmem[vm_nphysseg];
733
734 #elif (VM_PHYSSEG_STRAT == VM_PSTRAT_BSEARCH)
735
736 {
737 int x;
738 /* sort by address for binary search */
739 for (lcv = 0 ; lcv < vm_nphysseg ; lcv++)
740 if (start < vm_physmem[lcv].start)
741 break;
742 ps = &vm_physmem[lcv];
743 /* move back other entries, if necessary ... */
744 for (x = vm_nphysseg ; x > lcv ; x--)
745 /* structure copy */
746 vm_physmem[x] = vm_physmem[x - 1];
747 }
748
749 #elif (VM_PHYSSEG_STRAT == VM_PSTRAT_BIGFIRST)
750
751 {
752 int x;
753 /* sort by largest segment first */
754 for (lcv = 0 ; lcv < vm_nphysseg ; lcv++)
755 if ((end - start) >
756 (vm_physmem[lcv].end - vm_physmem[lcv].start))
757 break;
758 ps = &vm_physmem[lcv];
759 /* move back other entries, if necessary ... */
760 for (x = vm_nphysseg ; x > lcv ; x--)
761 /* structure copy */
762 vm_physmem[x] = vm_physmem[x - 1];
763 }
764
765 #else
766
767 panic("uvm_page_physload: unknown physseg strategy selected!");
768
769 #endif
770
771 ps->start = start;
772 ps->end = end;
773 ps->avail_start = avail_start;
774 ps->avail_end = avail_end;
775 if (preload) {
776 ps->pgs = NULL;
777 } else {
778 ps->pgs = pgs;
779 ps->lastpg = pgs + npages - 1;
780 }
781 ps->free_list = free_list;
782 vm_nphysseg++;
783
784 /*
785 * done!
786 */
787
788 if (!preload)
789 uvm_page_rehash();
790
791 return;
792 }
793
794 /*
795 * uvm_page_rehash: reallocate hash table based on number of free pages.
796 */
797
798 void
799 uvm_page_rehash()
800 {
801 int freepages, lcv, bucketcount, s, oldcount;
802 struct pglist *newbuckets, *oldbuckets;
803 struct vm_page *pg;
804 size_t newsize, oldsize;
805
806 /*
807 * compute number of pages that can go in the free pool
808 */
809
810 freepages = 0;
811 for (lcv = 0 ; lcv < vm_nphysseg ; lcv++)
812 freepages +=
813 (vm_physmem[lcv].avail_end - vm_physmem[lcv].avail_start);
814
815 /*
816 * compute number of buckets needed for this number of pages
817 */
818
819 bucketcount = 1;
820 while (bucketcount < freepages)
821 bucketcount = bucketcount * 2;
822
823 /*
824 * compute the size of the current table and new table.
825 */
826
827 oldbuckets = uvm.page_hash;
828 oldcount = uvm.page_nhash;
829 oldsize = round_page(sizeof(struct pglist) * oldcount);
830 newsize = round_page(sizeof(struct pglist) * bucketcount);
831
832 /*
833 * allocate the new buckets
834 */
835
836 newbuckets = (struct pglist *) uvm_km_alloc(kernel_map, newsize);
837 if (newbuckets == NULL) {
838 printf("uvm_page_physrehash: WARNING: could not grow page "
839 "hash table\n");
840 return;
841 }
842 for (lcv = 0 ; lcv < bucketcount ; lcv++)
843 TAILQ_INIT(&newbuckets[lcv]);
844
845 /*
846 * now replace the old buckets with the new ones and rehash everything
847 */
848
849 s = splvm();
850 simple_lock(&uvm.hashlock);
851 uvm.page_hash = newbuckets;
852 uvm.page_nhash = bucketcount;
853 uvm.page_hashmask = bucketcount - 1; /* power of 2 */
854
855 /* ... and rehash */
856 for (lcv = 0 ; lcv < oldcount ; lcv++) {
857 while ((pg = oldbuckets[lcv].tqh_first) != NULL) {
858 TAILQ_REMOVE(&oldbuckets[lcv], pg, hashq);
859 TAILQ_INSERT_TAIL(
860 &uvm.page_hash[uvm_pagehash(pg->uobject, pg->offset)],
861 pg, hashq);
862 }
863 }
864 simple_unlock(&uvm.hashlock);
865 splx(s);
866
867 /*
868 * free old bucket array if is not the boot-time table
869 */
870
871 if (oldbuckets != &uvm_bootbucket)
872 uvm_km_free(kernel_map, (vaddr_t) oldbuckets, oldsize);
873
874 /*
875 * done
876 */
877 return;
878 }
879
880 /*
881 * uvm_page_recolor: Recolor the pages if the new bucket count is
882 * larger than the old one.
883 */
884
885 void
886 uvm_page_recolor(int newncolors)
887 {
888 struct pgflbucket *bucketarray, *oldbucketarray;
889 struct pgfreelist pgfl;
890 vm_page_t pg;
891 vsize_t bucketcount;
892 int s, lcv, color, i, ocolors;
893
894 if (newncolors <= uvmexp.ncolors)
895 return;
896
897 bucketcount = newncolors * VM_NFREELIST;
898 bucketarray = malloc(bucketcount * sizeof(struct pgflbucket),
899 M_VMPAGE, M_NOWAIT);
900 if (bucketarray == NULL) {
901 printf("WARNING: unable to allocate %ld page color buckets\n",
902 (long) bucketcount);
903 return;
904 }
905
906 s = uvm_lock_fpageq();
907
908 /* Make sure we should still do this. */
909 if (newncolors <= uvmexp.ncolors) {
910 uvm_unlock_fpageq(s);
911 free(bucketarray, M_VMPAGE);
912 return;
913 }
914
915 oldbucketarray = uvm.page_free[0].pgfl_buckets;
916 ocolors = uvmexp.ncolors;
917
918 uvmexp.ncolors = newncolors;
919 uvmexp.colormask = uvmexp.ncolors - 1;
920
921 for (lcv = 0; lcv < VM_NFREELIST; lcv++) {
922 pgfl.pgfl_buckets = (bucketarray + (lcv * newncolors));
923 uvm_page_init_buckets(&pgfl);
924 for (color = 0; color < ocolors; color++) {
925 for (i = 0; i < PGFL_NQUEUES; i++) {
926 while ((pg = TAILQ_FIRST(&uvm.page_free[
927 lcv].pgfl_buckets[color].pgfl_queues[i]))
928 != NULL) {
929 TAILQ_REMOVE(&uvm.page_free[
930 lcv].pgfl_buckets[
931 color].pgfl_queues[i], pg, pageq);
932 TAILQ_INSERT_TAIL(&pgfl.pgfl_buckets[
933 VM_PGCOLOR_BUCKET(pg)].pgfl_queues[
934 i], pg, pageq);
935 }
936 }
937 }
938 uvm.page_free[lcv].pgfl_buckets = pgfl.pgfl_buckets;
939 }
940
941 if (have_recolored_pages) {
942 uvm_unlock_fpageq(s);
943 free(oldbucketarray, M_VMPAGE);
944 return;
945 }
946
947 have_recolored_pages = TRUE;
948 uvm_unlock_fpageq(s);
949 }
950
951 #if 1 /* XXXCDC: TMP TMP TMP DEBUG DEBUG DEBUG */
952
953 void uvm_page_physdump __P((void)); /* SHUT UP GCC */
954
955 /* call from DDB */
956 void
957 uvm_page_physdump()
958 {
959 int lcv;
960
961 printf("rehash: physical memory config [segs=%d of %d]:\n",
962 vm_nphysseg, VM_PHYSSEG_MAX);
963 for (lcv = 0 ; lcv < vm_nphysseg ; lcv++)
964 printf("0x%llx->0x%llx [0x%llx->0x%llx]\n",
965 (long long)vm_physmem[lcv].start,
966 (long long)vm_physmem[lcv].end,
967 (long long)vm_physmem[lcv].avail_start,
968 (long long)vm_physmem[lcv].avail_end);
969 printf("STRATEGY = ");
970 switch (VM_PHYSSEG_STRAT) {
971 case VM_PSTRAT_RANDOM: printf("RANDOM\n"); break;
972 case VM_PSTRAT_BSEARCH: printf("BSEARCH\n"); break;
973 case VM_PSTRAT_BIGFIRST: printf("BIGFIRST\n"); break;
974 default: printf("<<UNKNOWN>>!!!!\n");
975 }
976 printf("number of buckets = %d\n", uvm.page_nhash);
977 }
978 #endif
979
980 /*
981 * uvm_pagealloc_pgfl: helper routine for uvm_pagealloc_strat
982 */
983
984 static __inline struct vm_page *
985 uvm_pagealloc_pgfl(struct pgfreelist *pgfl, int try1, int try2,
986 unsigned int *trycolorp)
987 {
988 struct pglist *freeq;
989 struct vm_page *pg;
990 int color, trycolor = *trycolorp;
991
992 color = trycolor;
993 do {
994 if ((pg = TAILQ_FIRST((freeq =
995 &pgfl->pgfl_buckets[color].pgfl_queues[try1]))) != NULL)
996 goto gotit;
997 if ((pg = TAILQ_FIRST((freeq =
998 &pgfl->pgfl_buckets[color].pgfl_queues[try2]))) != NULL)
999 goto gotit;
1000 color = (color + 1) & uvmexp.colormask;
1001 } while (color != trycolor);
1002
1003 return (NULL);
1004
1005 gotit:
1006 TAILQ_REMOVE(freeq, pg, pageq);
1007 uvmexp.free--;
1008
1009 /* update zero'd page count */
1010 if (pg->flags & PG_ZERO)
1011 uvmexp.zeropages--;
1012
1013 if (color == trycolor)
1014 uvmexp.colorhit++;
1015 else {
1016 uvmexp.colormiss++;
1017 *trycolorp = color;
1018 }
1019
1020 return (pg);
1021 }
1022
1023 /*
1024 * uvm_pagealloc_strat: allocate vm_page from a particular free list.
1025 *
1026 * => return null if no pages free
1027 * => wake up pagedaemon if number of free pages drops below low water mark
1028 * => if obj != NULL, obj must be locked (to put in hash)
1029 * => if anon != NULL, anon must be locked (to put in anon)
1030 * => only one of obj or anon can be non-null
1031 * => caller must activate/deactivate page if it is not wired.
1032 * => free_list is ignored if strat == UVM_PGA_STRAT_NORMAL.
1033 * => policy decision: it is more important to pull a page off of the
1034 * appropriate priority free list than it is to get a zero'd or
1035 * unknown contents page. This is because we live with the
1036 * consequences of a bad free list decision for the entire
1037 * lifetime of the page, e.g. if the page comes from memory that
1038 * is slower to access.
1039 */
1040
1041 struct vm_page *
1042 uvm_pagealloc_strat(obj, off, anon, flags, strat, free_list)
1043 struct uvm_object *obj;
1044 voff_t off;
1045 int flags;
1046 struct vm_anon *anon;
1047 int strat, free_list;
1048 {
1049 int lcv, try1, try2, s, zeroit = 0, color;
1050 struct vm_page *pg;
1051 boolean_t use_reserve;
1052
1053 KASSERT(obj == NULL || anon == NULL);
1054 KASSERT(off == trunc_page(off));
1055
1056 LOCK_ASSERT(obj == NULL || simple_lock_held(&obj->vmobjlock));
1057 LOCK_ASSERT(anon == NULL || simple_lock_held(&anon->an_lock));
1058
1059 s = uvm_lock_fpageq();
1060
1061 /*
1062 * This implements a global round-robin page coloring
1063 * algorithm.
1064 *
1065 * XXXJRT: Should we make the `nextcolor' per-cpu?
1066 * XXXJRT: What about virtually-indexed caches?
1067 */
1068 color = uvm.page_free_nextcolor;
1069
1070 /*
1071 * check to see if we need to generate some free pages waking
1072 * the pagedaemon.
1073 */
1074
1075 if (uvmexp.free + uvmexp.paging < uvmexp.freemin ||
1076 (uvmexp.free + uvmexp.paging < uvmexp.freetarg &&
1077 uvmexp.inactive < uvmexp.inactarg)) {
1078 wakeup(&uvm.pagedaemon);
1079 }
1080
1081 /*
1082 * fail if any of these conditions is true:
1083 * [1] there really are no free pages, or
1084 * [2] only kernel "reserved" pages remain and
1085 * the page isn't being allocated to a kernel object.
1086 * [3] only pagedaemon "reserved" pages remain and
1087 * the requestor isn't the pagedaemon.
1088 */
1089
1090 use_reserve = (flags & UVM_PGA_USERESERVE) ||
1091 (obj && UVM_OBJ_IS_KERN_OBJECT(obj));
1092 if ((uvmexp.free <= uvmexp.reserve_kernel && !use_reserve) ||
1093 (uvmexp.free <= uvmexp.reserve_pagedaemon &&
1094 !(use_reserve && curproc == uvm.pagedaemon_proc)))
1095 goto fail;
1096
1097 #if PGFL_NQUEUES != 2
1098 #error uvm_pagealloc_strat needs to be updated
1099 #endif
1100
1101 /*
1102 * If we want a zero'd page, try the ZEROS queue first, otherwise
1103 * we try the UNKNOWN queue first.
1104 */
1105 if (flags & UVM_PGA_ZERO) {
1106 try1 = PGFL_ZEROS;
1107 try2 = PGFL_UNKNOWN;
1108 } else {
1109 try1 = PGFL_UNKNOWN;
1110 try2 = PGFL_ZEROS;
1111 }
1112
1113 again:
1114 switch (strat) {
1115 case UVM_PGA_STRAT_NORMAL:
1116 /* Check all freelists in descending priority order. */
1117 for (lcv = 0; lcv < VM_NFREELIST; lcv++) {
1118 pg = uvm_pagealloc_pgfl(&uvm.page_free[lcv],
1119 try1, try2, &color);
1120 if (pg != NULL)
1121 goto gotit;
1122 }
1123
1124 /* No pages free! */
1125 goto fail;
1126
1127 case UVM_PGA_STRAT_ONLY:
1128 case UVM_PGA_STRAT_FALLBACK:
1129 /* Attempt to allocate from the specified free list. */
1130 KASSERT(free_list >= 0 && free_list < VM_NFREELIST);
1131 pg = uvm_pagealloc_pgfl(&uvm.page_free[free_list],
1132 try1, try2, &color);
1133 if (pg != NULL)
1134 goto gotit;
1135
1136 /* Fall back, if possible. */
1137 if (strat == UVM_PGA_STRAT_FALLBACK) {
1138 strat = UVM_PGA_STRAT_NORMAL;
1139 goto again;
1140 }
1141
1142 /* No pages free! */
1143 goto fail;
1144
1145 default:
1146 panic("uvm_pagealloc_strat: bad strat %d", strat);
1147 /* NOTREACHED */
1148 }
1149
1150 gotit:
1151 /*
1152 * We now know which color we actually allocated from; set
1153 * the next color accordingly.
1154 */
1155 uvm.page_free_nextcolor = (color + 1) & uvmexp.colormask;
1156
1157 /*
1158 * update allocation statistics and remember if we have to
1159 * zero the page
1160 */
1161 if (flags & UVM_PGA_ZERO) {
1162 if (pg->flags & PG_ZERO) {
1163 uvmexp.pga_zerohit++;
1164 zeroit = 0;
1165 } else {
1166 uvmexp.pga_zeromiss++;
1167 zeroit = 1;
1168 }
1169 }
1170
1171 uvm_unlock_fpageq(s); /* unlock free page queue */
1172
1173 pg->offset = off;
1174 pg->uobject = obj;
1175 pg->uanon = anon;
1176 pg->flags = PG_BUSY|PG_CLEAN|PG_FAKE;
1177 pg->version++;
1178 if (anon) {
1179 anon->u.an_page = pg;
1180 pg->pqflags = PQ_ANON;
1181 uvmexp.anonpages++;
1182 } else {
1183 if (obj)
1184 uvm_pageinsert(pg);
1185 pg->pqflags = 0;
1186 }
1187 #if defined(UVM_PAGE_TRKOWN)
1188 pg->owner_tag = NULL;
1189 #endif
1190 UVM_PAGE_OWN(pg, "new alloc");
1191
1192 if (flags & UVM_PGA_ZERO) {
1193 /*
1194 * A zero'd page is not clean. If we got a page not already
1195 * zero'd, then we have to zero it ourselves.
1196 */
1197 pg->flags &= ~PG_CLEAN;
1198 if (zeroit)
1199 pmap_zero_page(VM_PAGE_TO_PHYS(pg));
1200 }
1201
1202 return(pg);
1203
1204 fail:
1205 uvm_unlock_fpageq(s);
1206 return (NULL);
1207 }
1208
1209 /*
1210 * uvm_pagerealloc: reallocate a page from one object to another
1211 *
1212 * => both objects must be locked
1213 */
1214
1215 void
1216 uvm_pagerealloc(pg, newobj, newoff)
1217 struct vm_page *pg;
1218 struct uvm_object *newobj;
1219 voff_t newoff;
1220 {
1221 /*
1222 * remove it from the old object
1223 */
1224
1225 if (pg->uobject) {
1226 uvm_pageremove(pg);
1227 }
1228
1229 /*
1230 * put it in the new object
1231 */
1232
1233 if (newobj) {
1234 pg->uobject = newobj;
1235 pg->offset = newoff;
1236 pg->version++;
1237 uvm_pageinsert(pg);
1238 }
1239 }
1240
1241
1242 /*
1243 * uvm_pagefree: free page
1244 *
1245 * => erase page's identity (i.e. remove from hash/object)
1246 * => put page on free list
1247 * => caller must lock owning object (either anon or uvm_object)
1248 * => caller must lock page queues
1249 * => assumes all valid mappings of pg are gone
1250 */
1251
1252 void
1253 uvm_pagefree(pg)
1254 struct vm_page *pg;
1255 {
1256 int s;
1257 int saved_loan_count = pg->loan_count;
1258
1259 #ifdef DEBUG
1260 if (pg->uobject == (void *)0xdeadbeef &&
1261 pg->uanon == (void *)0xdeadbeef) {
1262 panic("uvm_pagefree: freeing free page %p\n", pg);
1263 }
1264 #endif
1265
1266 /*
1267 * if the page was an object page (and thus "TABLED"), remove it
1268 * from the object.
1269 */
1270
1271 if (pg->flags & PG_TABLED) {
1272
1273 /*
1274 * if the object page is on loan we are going to drop ownership.
1275 * it is possible that an anon will take over as owner for this
1276 * page later on. the anon will want a !PG_CLEAN page so that
1277 * it knows it needs to allocate swap if it wants to page the
1278 * page out.
1279 */
1280
1281 if (saved_loan_count)
1282 pg->flags &= ~PG_CLEAN; /* in case an anon takes over */
1283 uvm_pageremove(pg);
1284
1285 /*
1286 * if our page was on loan, then we just lost control over it
1287 * (in fact, if it was loaned to an anon, the anon may have
1288 * already taken over ownership of the page by now and thus
1289 * changed the loan_count [e.g. in uvmfault_anonget()]) we just
1290 * return (when the last loan is dropped, then the page can be
1291 * freed by whatever was holding the last loan).
1292 */
1293
1294 if (saved_loan_count)
1295 return;
1296 } else if (saved_loan_count && (pg->pqflags & PQ_ANON)) {
1297
1298 /*
1299 * if our page is owned by an anon and is loaned out to the
1300 * kernel then we just want to drop ownership and return.
1301 * the kernel must free the page when all its loans clear ...
1302 * note that the kernel can't change the loan status of our
1303 * page as long as we are holding PQ lock.
1304 */
1305
1306 pg->pqflags &= ~PQ_ANON;
1307 pg->uanon = NULL;
1308 return;
1309 }
1310 KASSERT(saved_loan_count == 0);
1311
1312 /*
1313 * now remove the page from the queues
1314 */
1315
1316 if (pg->pqflags & PQ_ACTIVE) {
1317 TAILQ_REMOVE(&uvm.page_active, pg, pageq);
1318 pg->pqflags &= ~PQ_ACTIVE;
1319 uvmexp.active--;
1320 }
1321 if (pg->pqflags & PQ_INACTIVE) {
1322 if (pg->pqflags & PQ_SWAPBACKED)
1323 TAILQ_REMOVE(&uvm.page_inactive_swp, pg, pageq);
1324 else
1325 TAILQ_REMOVE(&uvm.page_inactive_obj, pg, pageq);
1326 pg->pqflags &= ~PQ_INACTIVE;
1327 uvmexp.inactive--;
1328 }
1329
1330 /*
1331 * if the page was wired, unwire it now.
1332 */
1333
1334 if (pg->wire_count) {
1335 pg->wire_count = 0;
1336 uvmexp.wired--;
1337 }
1338 if (pg->uanon) {
1339 uvmexp.anonpages--;
1340 }
1341
1342 /*
1343 * and put on free queue
1344 */
1345
1346 pg->flags &= ~PG_ZERO;
1347
1348 s = uvm_lock_fpageq();
1349 TAILQ_INSERT_TAIL(&uvm.page_free[
1350 uvm_page_lookup_freelist(pg)].pgfl_buckets[
1351 VM_PGCOLOR_BUCKET(pg)].pgfl_queues[PGFL_UNKNOWN], pg, pageq);
1352 pg->pqflags = PQ_FREE;
1353 #ifdef DEBUG
1354 pg->uobject = (void *)0xdeadbeef;
1355 pg->offset = 0xdeadbeef;
1356 pg->uanon = (void *)0xdeadbeef;
1357 #endif
1358 uvmexp.free++;
1359
1360 if (uvmexp.zeropages < UVM_PAGEZERO_TARGET)
1361 uvm.page_idle_zero = vm_page_zero_enable;
1362
1363 uvm_unlock_fpageq(s);
1364 }
1365
1366 /*
1367 * uvm_page_unbusy: unbusy an array of pages.
1368 *
1369 * => pages must either all belong to the same object, or all belong to anons.
1370 * => if pages are object-owned, object must be locked.
1371 * => if pages are anon-owned, anons must be unlockd and have 0 refcount.
1372 */
1373
1374 void
1375 uvm_page_unbusy(pgs, npgs)
1376 struct vm_page **pgs;
1377 int npgs;
1378 {
1379 struct vm_page *pg;
1380 struct uvm_object *uobj;
1381 int i;
1382 UVMHIST_FUNC("uvm_page_unbusy"); UVMHIST_CALLED(ubchist);
1383
1384 for (i = 0; i < npgs; i++) {
1385 pg = pgs[i];
1386
1387 if (pg == NULL) {
1388 continue;
1389 }
1390 if (pg->flags & PG_WANTED) {
1391 wakeup(pg);
1392 }
1393 if (pg->flags & PG_RELEASED) {
1394 UVMHIST_LOG(ubchist, "releasing pg %p", pg,0,0,0);
1395 uobj = pg->uobject;
1396 if (uobj != NULL) {
1397 uobj->pgops->pgo_releasepg(pg, NULL);
1398 } else {
1399 pg->flags &= ~(PG_BUSY);
1400 UVM_PAGE_OWN(pg, NULL);
1401 uvm_anfree(pg->uanon);
1402 }
1403 } else {
1404 UVMHIST_LOG(ubchist, "unbusying pg %p", pg,0,0,0);
1405 KASSERT(pg->wire_count ||
1406 (pg->pqflags & (PQ_ACTIVE|PQ_INACTIVE)));
1407 pg->flags &= ~(PG_WANTED|PG_BUSY);
1408 UVM_PAGE_OWN(pg, NULL);
1409 }
1410 }
1411 }
1412
1413 #if defined(UVM_PAGE_TRKOWN)
1414 /*
1415 * uvm_page_own: set or release page ownership
1416 *
1417 * => this is a debugging function that keeps track of who sets PG_BUSY
1418 * and where they do it. it can be used to track down problems
1419 * such a process setting "PG_BUSY" and never releasing it.
1420 * => page's object [if any] must be locked
1421 * => if "tag" is NULL then we are releasing page ownership
1422 */
1423 void
1424 uvm_page_own(pg, tag)
1425 struct vm_page *pg;
1426 char *tag;
1427 {
1428 /* gain ownership? */
1429 if (tag) {
1430 if (pg->owner_tag) {
1431 printf("uvm_page_own: page %p already owned "
1432 "by proc %d [%s]\n", pg,
1433 pg->owner, pg->owner_tag);
1434 panic("uvm_page_own");
1435 }
1436 pg->owner = (curproc) ? curproc->p_pid : (pid_t) -1;
1437 pg->owner_tag = tag;
1438 return;
1439 }
1440
1441 /* drop ownership */
1442 if (pg->owner_tag == NULL) {
1443 printf("uvm_page_own: dropping ownership of an non-owned "
1444 "page (%p)\n", pg);
1445 panic("uvm_page_own");
1446 }
1447 pg->owner_tag = NULL;
1448 return;
1449 }
1450 #endif
1451
1452 /*
1453 * uvm_pageidlezero: zero free pages while the system is idle.
1454 *
1455 * => try to complete one color bucket at a time, to reduce our impact
1456 * on the CPU cache.
1457 * => we loop until we either reach the target or whichqs indicates that
1458 * there is a process ready to run.
1459 */
1460 void
1461 uvm_pageidlezero()
1462 {
1463 struct vm_page *pg;
1464 struct pgfreelist *pgfl;
1465 int free_list, s, firstbucket;
1466 static int nextbucket;
1467
1468 s = uvm_lock_fpageq();
1469
1470 firstbucket = nextbucket;
1471 do {
1472 if (sched_whichqs != 0) {
1473 uvm_unlock_fpageq(s);
1474 return;
1475 }
1476
1477 if (uvmexp.zeropages >= UVM_PAGEZERO_TARGET) {
1478 uvm.page_idle_zero = FALSE;
1479 uvm_unlock_fpageq(s);
1480 return;
1481 }
1482
1483 for (free_list = 0; free_list < VM_NFREELIST; free_list++) {
1484 pgfl = &uvm.page_free[free_list];
1485 while ((pg = TAILQ_FIRST(&pgfl->pgfl_buckets[
1486 nextbucket].pgfl_queues[PGFL_UNKNOWN])) != NULL) {
1487 if (sched_whichqs != 0) {
1488 uvm_unlock_fpageq(s);
1489 return;
1490 }
1491
1492 TAILQ_REMOVE(&pgfl->pgfl_buckets[
1493 nextbucket].pgfl_queues[PGFL_UNKNOWN],
1494 pg, pageq);
1495 uvmexp.free--;
1496 uvm_unlock_fpageq(s);
1497 #ifdef PMAP_PAGEIDLEZERO
1498 if (PMAP_PAGEIDLEZERO(VM_PAGE_TO_PHYS(pg)) ==
1499 FALSE) {
1500 /*
1501 * The machine-dependent code detected
1502 * some reason for us to abort zeroing
1503 * pages, probably because there is a
1504 * process now ready to run.
1505 */
1506 s = uvm_lock_fpageq();
1507 TAILQ_INSERT_HEAD(&pgfl->pgfl_buckets[
1508 nextbucket].pgfl_queues[
1509 PGFL_UNKNOWN], pg, pageq);
1510 uvmexp.free++;
1511 uvmexp.zeroaborts++;
1512 uvm_unlock_fpageq(s);
1513 return;
1514 }
1515 #else
1516 pmap_zero_page(VM_PAGE_TO_PHYS(pg));
1517 #endif /* PMAP_PAGEIDLEZERO */
1518 pg->flags |= PG_ZERO;
1519
1520 s = uvm_lock_fpageq();
1521 TAILQ_INSERT_HEAD(&pgfl->pgfl_buckets[
1522 nextbucket].pgfl_queues[PGFL_ZEROS],
1523 pg, pageq);
1524 uvmexp.free++;
1525 uvmexp.zeropages++;
1526 }
1527 }
1528
1529 nextbucket = (nextbucket + 1) & uvmexp.colormask;
1530 } while (nextbucket != firstbucket);
1531
1532 uvm_unlock_fpageq(s);
1533 }
1534