Home | History | Annotate | Line # | Download | only in uvm
uvm_pager.c revision 1.81.4.3
      1  1.81.4.3        ad /*	$NetBSD: uvm_pager.c,v 1.81.4.3 2007/04/09 22:10:08 ad Exp $	*/
      2       1.1       mrg 
      3       1.1       mrg /*
      4       1.1       mrg  *
      5       1.1       mrg  * Copyright (c) 1997 Charles D. Cranor and Washington University.
      6       1.1       mrg  * All rights reserved.
      7       1.1       mrg  *
      8       1.1       mrg  * Redistribution and use in source and binary forms, with or without
      9       1.1       mrg  * modification, are permitted provided that the following conditions
     10       1.1       mrg  * are met:
     11       1.1       mrg  * 1. Redistributions of source code must retain the above copyright
     12       1.1       mrg  *    notice, this list of conditions and the following disclaimer.
     13       1.1       mrg  * 2. Redistributions in binary form must reproduce the above copyright
     14       1.1       mrg  *    notice, this list of conditions and the following disclaimer in the
     15       1.1       mrg  *    documentation and/or other materials provided with the distribution.
     16       1.1       mrg  * 3. All advertising materials mentioning features or use of this software
     17       1.1       mrg  *    must display the following acknowledgement:
     18       1.1       mrg  *      This product includes software developed by Charles D. Cranor and
     19       1.1       mrg  *      Washington University.
     20       1.1       mrg  * 4. The name of the author may not be used to endorse or promote products
     21       1.1       mrg  *    derived from this software without specific prior written permission.
     22       1.1       mrg  *
     23       1.1       mrg  * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
     24       1.1       mrg  * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
     25       1.1       mrg  * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
     26       1.1       mrg  * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
     27       1.1       mrg  * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
     28       1.1       mrg  * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
     29       1.1       mrg  * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
     30       1.1       mrg  * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
     31       1.1       mrg  * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
     32       1.1       mrg  * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
     33       1.3       mrg  *
     34       1.3       mrg  * from: Id: uvm_pager.c,v 1.1.2.23 1998/02/02 20:38:06 chuck Exp
     35       1.1       mrg  */
     36       1.1       mrg 
     37       1.1       mrg /*
     38       1.1       mrg  * uvm_pager.c: generic functions used to assist the pagers.
     39       1.1       mrg  */
     40      1.54     lukem 
     41      1.54     lukem #include <sys/cdefs.h>
     42  1.81.4.3        ad __KERNEL_RCSID(0, "$NetBSD: uvm_pager.c,v 1.81.4.3 2007/04/09 22:10:08 ad Exp $");
     43      1.54     lukem 
     44      1.54     lukem #include "opt_uvmhist.h"
     45      1.72      yamt #include "opt_readahead.h"
     46       1.1       mrg 
     47       1.1       mrg #include <sys/param.h>
     48       1.1       mrg #include <sys/systm.h>
     49       1.1       mrg #include <sys/proc.h>
     50       1.1       mrg #include <sys/malloc.h>
     51      1.35       chs #include <sys/pool.h>
     52      1.35       chs #include <sys/vnode.h>
     53       1.1       mrg 
     54       1.1       mrg #include <uvm/uvm.h>
     55       1.1       mrg 
     56      1.35       chs struct pool *uvm_aiobuf_pool;
     57      1.35       chs 
     58       1.1       mrg /*
     59       1.1       mrg  * list of uvm pagers in the system
     60       1.1       mrg  */
     61       1.1       mrg 
     62      1.57      matt struct uvm_pagerops * const uvmpagerops[] = {
     63      1.10   thorpej 	&aobj_pager,
     64       1.6       mrg 	&uvm_deviceops,
     65       1.6       mrg 	&uvm_vnodeops,
     66      1.35       chs 	&ubc_pager,
     67       1.1       mrg };
     68       1.1       mrg 
     69       1.1       mrg /*
     70       1.1       mrg  * the pager map: provides KVA for I/O
     71       1.1       mrg  */
     72       1.1       mrg 
     73      1.47       chs struct vm_map *pager_map;		/* XXX */
     74  1.81.4.1        ad kmutex_t pager_map_wanted_lock;
     75      1.80   thorpej bool pager_map_wanted;	/* locked by pager map */
     76      1.35       chs static vaddr_t emergva;
     77      1.80   thorpej static bool emerginuse;
     78       1.1       mrg 
     79       1.1       mrg /*
     80       1.1       mrg  * uvm_pager_init: init pagers (at boot time)
     81       1.1       mrg  */
     82       1.1       mrg 
     83       1.6       mrg void
     84      1.67   thorpej uvm_pager_init(void)
     85       1.6       mrg {
     86      1.59   thorpej 	u_int lcv;
     87      1.50       chs 	vaddr_t sva, eva;
     88       1.1       mrg 
     89       1.6       mrg 	/*
     90       1.6       mrg 	 * init pager map
     91       1.6       mrg 	 */
     92       1.6       mrg 
     93      1.50       chs 	sva = 0;
     94      1.50       chs 	pager_map = uvm_km_suballoc(kernel_map, &sva, &eva, PAGER_MAP_SIZE, 0,
     95      1.81   thorpej 	    false, NULL);
     96  1.81.4.1        ad 	mutex_init(&pager_map_wanted_lock, MUTEX_DEFAULT, IPL_NONE);
     97      1.81   thorpej 	pager_map_wanted = false;
     98      1.66      yamt 	emergva = uvm_km_alloc(kernel_map, round_page(MAXPHYS), 0,
     99      1.66      yamt 	    UVM_KMF_VAONLY);
    100      1.66      yamt #if defined(DEBUG)
    101      1.66      yamt 	if (emergva == 0)
    102      1.66      yamt 		panic("emergva");
    103      1.66      yamt #endif
    104      1.81   thorpej 	emerginuse = false;
    105       1.6       mrg 
    106       1.6       mrg 	/*
    107       1.6       mrg 	 * init ASYNC I/O queue
    108       1.6       mrg 	 */
    109      1.45       chs 
    110       1.6       mrg 	TAILQ_INIT(&uvm.aio_done);
    111       1.1       mrg 
    112       1.6       mrg 	/*
    113       1.6       mrg 	 * call pager init functions
    114       1.6       mrg 	 */
    115       1.6       mrg 	for (lcv = 0 ; lcv < sizeof(uvmpagerops)/sizeof(struct uvm_pagerops *);
    116       1.6       mrg 	    lcv++) {
    117       1.6       mrg 		if (uvmpagerops[lcv]->pgo_init)
    118       1.6       mrg 			uvmpagerops[lcv]->pgo_init();
    119       1.6       mrg 	}
    120       1.1       mrg }
    121       1.1       mrg 
    122       1.1       mrg /*
    123       1.1       mrg  * uvm_pagermapin: map pages into KVA (pager_map) for I/O that needs mappings
    124       1.1       mrg  *
    125       1.1       mrg  * we basically just map in a blank map entry to reserve the space in the
    126       1.1       mrg  * map and then use pmap_enter() to put the mappings in by hand.
    127       1.1       mrg  */
    128       1.1       mrg 
    129       1.9       eeh vaddr_t
    130      1.67   thorpej uvm_pagermapin(struct vm_page **pps, int npages, int flags)
    131       1.1       mrg {
    132       1.9       eeh 	vsize_t size;
    133       1.9       eeh 	vaddr_t kva;
    134       1.9       eeh 	vaddr_t cva;
    135       1.6       mrg 	struct vm_page *pp;
    136      1.29   thorpej 	vm_prot_t prot;
    137  1.81.4.3        ad 	const bool pdaemon = curlwp == uvm.pagedaemon_lwp;
    138       1.6       mrg 	UVMHIST_FUNC("uvm_pagermapin"); UVMHIST_CALLED(maphist);
    139       1.1       mrg 
    140      1.35       chs 	UVMHIST_LOG(maphist,"(pps=0x%x, npages=%d)", pps, npages,0,0);
    141      1.29   thorpej 
    142      1.29   thorpej 	/*
    143      1.29   thorpej 	 * compute protection.  outgoing I/O only needs read
    144      1.29   thorpej 	 * access to the page, whereas incoming needs read/write.
    145      1.29   thorpej 	 */
    146      1.29   thorpej 
    147      1.29   thorpej 	prot = VM_PROT_READ;
    148      1.29   thorpej 	if (flags & UVMPAGER_MAPIN_READ)
    149      1.29   thorpej 		prot |= VM_PROT_WRITE;
    150       1.1       mrg 
    151       1.1       mrg ReStart:
    152      1.12       chs 	size = npages << PAGE_SHIFT;
    153      1.29   thorpej 	kva = 0;			/* let system choose VA */
    154       1.1       mrg 
    155      1.75      yamt 	if (uvm_map(pager_map, &kva, size, NULL, UVM_UNKNOWN_OFFSET, 0,
    156      1.75      yamt 	    UVM_FLAG_NOMERGE | (pdaemon ? UVM_FLAG_NOWAIT : 0)) != 0) {
    157      1.75      yamt 		if (pdaemon) {
    158  1.81.4.1        ad 			mutex_enter(&pager_map_wanted_lock);
    159      1.35       chs 			if (emerginuse) {
    160      1.35       chs 				UVM_UNLOCK_AND_WAIT(&emergva,
    161      1.81   thorpej 				    &pager_map_wanted_lock, false,
    162      1.35       chs 				    "emergva", 0);
    163      1.35       chs 				goto ReStart;
    164      1.35       chs 			}
    165      1.81   thorpej 			emerginuse = true;
    166  1.81.4.1        ad 			mutex_exit(&pager_map_wanted_lock);
    167      1.35       chs 			kva = emergva;
    168      1.60       tls 			/* The shift implicitly truncates to PAGE_SIZE */
    169      1.60       tls 			KASSERT(npages <= (MAXPHYS >> PAGE_SHIFT));
    170      1.35       chs 			goto enter;
    171      1.35       chs 		}
    172      1.29   thorpej 		if ((flags & UVMPAGER_MAPIN_WAITOK) == 0) {
    173       1.6       mrg 			UVMHIST_LOG(maphist,"<- NOWAIT failed", 0,0,0,0);
    174      1.29   thorpej 			return(0);
    175       1.6       mrg 		}
    176  1.81.4.1        ad 		mutex_enter(&pager_map_wanted_lock);
    177      1.81   thorpej 		pager_map_wanted = true;
    178       1.6       mrg 		UVMHIST_LOG(maphist, "  SLEEPING on pager_map",0,0,0,0);
    179      1.81   thorpej 		UVM_UNLOCK_AND_WAIT(pager_map, &pager_map_wanted_lock, false,
    180      1.35       chs 		    "pager_map", 0);
    181       1.6       mrg 		goto ReStart;
    182       1.6       mrg 	}
    183       1.1       mrg 
    184      1.35       chs enter:
    185       1.6       mrg 	/* got it */
    186       1.6       mrg 	for (cva = kva ; size != 0 ; size -= PAGE_SIZE, cva += PAGE_SIZE) {
    187       1.6       mrg 		pp = *pps++;
    188      1.40       mrg 		KASSERT(pp);
    189      1.38       chs 		KASSERT(pp->flags & PG_BUSY);
    190      1.50       chs 		pmap_kenter_pa(cva, VM_PAGE_TO_PHYS(pp), prot);
    191       1.6       mrg 	}
    192      1.49     chris 	pmap_update(vm_map_pmap(pager_map));
    193       1.1       mrg 
    194       1.6       mrg 	UVMHIST_LOG(maphist, "<- done (KVA=0x%x)", kva,0,0,0);
    195       1.6       mrg 	return(kva);
    196       1.1       mrg }
    197       1.1       mrg 
    198       1.1       mrg /*
    199       1.1       mrg  * uvm_pagermapout: remove pager_map mapping
    200       1.1       mrg  *
    201       1.1       mrg  * we remove our mappings by hand and then remove the mapping (waking
    202       1.1       mrg  * up anyone wanting space).
    203       1.1       mrg  */
    204       1.1       mrg 
    205       1.6       mrg void
    206      1.67   thorpej uvm_pagermapout(vaddr_t kva, int npages)
    207       1.6       mrg {
    208      1.12       chs 	vsize_t size = npages << PAGE_SHIFT;
    209      1.47       chs 	struct vm_map_entry *entries;
    210       1.6       mrg 	UVMHIST_FUNC("uvm_pagermapout"); UVMHIST_CALLED(maphist);
    211      1.35       chs 
    212       1.6       mrg 	UVMHIST_LOG(maphist, " (kva=0x%x, npages=%d)", kva, npages,0,0);
    213       1.1       mrg 
    214       1.6       mrg 	/*
    215       1.6       mrg 	 * duplicate uvm_unmap, but add in pager_map_wanted handling.
    216       1.6       mrg 	 */
    217       1.6       mrg 
    218      1.50       chs 	pmap_kremove(kva, npages << PAGE_SHIFT);
    219      1.35       chs 	if (kva == emergva) {
    220  1.81.4.1        ad 		mutex_enter(&pager_map_wanted_lock);
    221      1.81   thorpej 		emerginuse = false;
    222      1.35       chs 		wakeup(&emergva);
    223  1.81.4.1        ad 		mutex_exit(&pager_map_wanted_lock);
    224      1.50       chs 		return;
    225      1.35       chs 	}
    226      1.35       chs 
    227       1.6       mrg 	vm_map_lock(pager_map);
    228      1.66      yamt 	uvm_unmap_remove(pager_map, kva, kva + size, &entries, NULL, 0);
    229  1.81.4.1        ad 	mutex_enter(&pager_map_wanted_lock);
    230       1.6       mrg 	if (pager_map_wanted) {
    231      1.81   thorpej 		pager_map_wanted = false;
    232       1.6       mrg 		wakeup(pager_map);
    233       1.6       mrg 	}
    234  1.81.4.1        ad 	mutex_exit(&pager_map_wanted_lock);
    235       1.6       mrg 	vm_map_unlock(pager_map);
    236       1.6       mrg 	if (entries)
    237       1.6       mrg 		uvm_unmap_detach(entries, 0);
    238      1.49     chris 	pmap_update(pmap_kernel());
    239       1.6       mrg 	UVMHIST_LOG(maphist,"<- done",0,0,0,0);
    240       1.1       mrg }
    241       1.1       mrg 
    242       1.1       mrg /*
    243      1.35       chs  * interrupt-context iodone handler for nested i/o bufs.
    244      1.35       chs  *
    245  1.81.4.2        ad  * => the buffer is private so need not be locked here
    246      1.35       chs  */
    247      1.35       chs 
    248      1.35       chs void
    249      1.67   thorpej uvm_aio_biodone1(struct buf *bp)
    250      1.35       chs {
    251      1.35       chs 	struct buf *mbp = bp->b_private;
    252      1.35       chs 
    253      1.35       chs 	KASSERT(mbp != bp);
    254      1.35       chs 	if (bp->b_flags & B_ERROR) {
    255      1.35       chs 		mbp->b_flags |= B_ERROR;
    256      1.35       chs 		mbp->b_error = bp->b_error;
    257      1.35       chs 	}
    258      1.35       chs 	mbp->b_resid -= bp->b_bcount;
    259      1.73      yamt 	putiobuf(bp);
    260      1.35       chs 	if (mbp->b_resid == 0) {
    261      1.35       chs 		biodone(mbp);
    262      1.35       chs 	}
    263      1.35       chs }
    264      1.35       chs 
    265      1.35       chs /*
    266      1.35       chs  * interrupt-context iodone handler for single-buf i/os
    267      1.35       chs  * or the top-level buf of a nested-buf i/o.
    268      1.35       chs  */
    269      1.35       chs 
    270      1.35       chs void
    271      1.67   thorpej uvm_aio_biodone(struct buf *bp)
    272      1.35       chs {
    273      1.35       chs 	/* reset b_iodone for when this is a single-buf i/o. */
    274      1.35       chs 	bp->b_iodone = uvm_aio_aiodone;
    275      1.35       chs 
    276      1.79      yamt 	workqueue_enqueue(uvm.aiodone_queue, &bp->b_work);
    277      1.35       chs }
    278      1.35       chs 
    279      1.35       chs /*
    280      1.35       chs  * uvm_aio_aiodone: do iodone processing for async i/os.
    281      1.35       chs  * this should be called in thread context, not interrupt context.
    282      1.35       chs  */
    283      1.35       chs 
    284      1.35       chs void
    285      1.67   thorpej uvm_aio_aiodone(struct buf *bp)
    286      1.35       chs {
    287      1.35       chs 	int npages = bp->b_bufsize >> PAGE_SHIFT;
    288      1.35       chs 	struct vm_page *pg, *pgs[npages];
    289      1.35       chs 	struct uvm_object *uobj;
    290  1.81.4.1        ad 	kmutex_t *slock;
    291  1.81.4.2        ad 	int i, error, swslot;
    292      1.80   thorpej 	bool write, swap;
    293      1.35       chs 	UVMHIST_FUNC("uvm_aio_aiodone"); UVMHIST_CALLED(ubchist);
    294      1.35       chs 	UVMHIST_LOG(ubchist, "bp %p", bp, 0,0,0);
    295      1.35       chs 
    296      1.41       chs 	error = (bp->b_flags & B_ERROR) ? (bp->b_error ? bp->b_error : EIO) : 0;
    297      1.35       chs 	write = (bp->b_flags & B_READ) == 0;
    298      1.35       chs 	/* XXXUBC B_NOCACHE is for swap pager, should be done differently */
    299      1.36       chs 	if (write && !(bp->b_flags & B_NOCACHE) && bioops.io_pageiodone) {
    300      1.36       chs 		(*bioops.io_pageiodone)(bp);
    301      1.35       chs 	}
    302      1.35       chs 
    303      1.35       chs 	uobj = NULL;
    304      1.35       chs 	for (i = 0; i < npages; i++) {
    305      1.35       chs 		pgs[i] = uvm_pageratop((vaddr_t)bp->b_data + (i << PAGE_SHIFT));
    306      1.35       chs 		UVMHIST_LOG(ubchist, "pgs[%d] = %p", i, pgs[i],0,0);
    307      1.35       chs 	}
    308      1.35       chs 	uvm_pagermapout((vaddr_t)bp->b_data, npages);
    309      1.50       chs 
    310      1.50       chs 	swslot = 0;
    311      1.50       chs 	slock = NULL;
    312      1.55       chs 	pg = pgs[0];
    313      1.55       chs 	swap = (pg->uanon != NULL && pg->uobject == NULL) ||
    314      1.55       chs 		(pg->pqflags & PQ_AOBJ) != 0;
    315      1.50       chs 	if (!swap) {
    316      1.55       chs 		uobj = pg->uobject;
    317      1.50       chs 		slock = &uobj->vmobjlock;
    318  1.81.4.1        ad 		mutex_enter(slock);
    319  1.81.4.1        ad 		mutex_enter(&uvm_pageqlock);
    320      1.71      yamt 	} else {
    321      1.71      yamt #if defined(VMSWAP)
    322      1.71      yamt 		if (error) {
    323      1.71      yamt 			if (pg->uobject != NULL) {
    324      1.71      yamt 				swslot = uao_find_swslot(pg->uobject,
    325      1.71      yamt 				    pg->offset >> PAGE_SHIFT);
    326      1.77  christos 			} else {
    327      1.77  christos 				KASSERT(pg->uanon != NULL);
    328      1.71      yamt 				swslot = pg->uanon->an_swslot;
    329      1.71      yamt 			}
    330      1.71      yamt 			KASSERT(swslot);
    331      1.50       chs 		}
    332      1.71      yamt #else /* defined(VMSWAP) */
    333      1.71      yamt 		panic("%s: swap", __func__);
    334      1.71      yamt #endif /* defined(VMSWAP) */
    335      1.50       chs 	}
    336      1.35       chs 	for (i = 0; i < npages; i++) {
    337      1.35       chs 		pg = pgs[i];
    338      1.50       chs 		KASSERT(swap || pg->uobject == uobj);
    339      1.50       chs 		UVMHIST_LOG(ubchist, "pg %p", pg, 0,0,0);
    340      1.50       chs 
    341      1.71      yamt #if defined(VMSWAP)
    342      1.50       chs 		/*
    343      1.50       chs 		 * for swap i/os, lock each page's object (or anon)
    344      1.50       chs 		 * individually since each page may need a different lock.
    345      1.50       chs 		 */
    346      1.35       chs 
    347      1.35       chs 		if (swap) {
    348      1.55       chs 			if (pg->uobject != NULL) {
    349      1.55       chs 				slock = &pg->uobject->vmobjlock;
    350      1.55       chs 			} else {
    351      1.50       chs 				slock = &pg->uanon->an_lock;
    352      1.35       chs 			}
    353  1.81.4.1        ad 			mutex_enter(slock);
    354  1.81.4.1        ad 			mutex_enter(&uvm_pageqlock);
    355      1.50       chs 		}
    356      1.71      yamt #endif /* defined(VMSWAP) */
    357      1.50       chs 
    358      1.50       chs 		/*
    359      1.50       chs 		 * process errors.  for reads, just mark the page to be freed.
    360      1.50       chs 		 * for writes, if the error was ENOMEM, we assume this was
    361      1.50       chs 		 * a transient failure so we mark the page dirty so that
    362      1.50       chs 		 * we'll try to write it again later.  for all other write
    363      1.50       chs 		 * errors, we assume the error is permanent, thus the data
    364      1.50       chs 		 * in the page is lost.  bummer.
    365      1.50       chs 		 */
    366      1.50       chs 
    367      1.50       chs 		if (error) {
    368      1.61        pk 			int slot;
    369      1.50       chs 			if (!write) {
    370      1.50       chs 				pg->flags |= PG_RELEASED;
    371      1.50       chs 				continue;
    372      1.50       chs 			} else if (error == ENOMEM) {
    373      1.50       chs 				if (pg->flags & PG_PAGEOUT) {
    374      1.50       chs 					pg->flags &= ~PG_PAGEOUT;
    375      1.50       chs 					uvmexp.paging--;
    376      1.50       chs 				}
    377      1.50       chs 				pg->flags &= ~PG_CLEAN;
    378      1.50       chs 				uvm_pageactivate(pg);
    379      1.61        pk 				slot = 0;
    380      1.61        pk 			} else
    381      1.61        pk 				slot = SWSLOT_BAD;
    382      1.61        pk 
    383      1.71      yamt #if defined(VMSWAP)
    384      1.61        pk 			if (swap) {
    385      1.61        pk 				if (pg->uobject != NULL) {
    386      1.62        pk 					int oldslot;
    387      1.62        pk 					oldslot = uao_set_swslot(pg->uobject,
    388      1.62        pk 						pg->offset >> PAGE_SHIFT, slot);
    389      1.62        pk 					KASSERT(oldslot == swslot + i);
    390      1.61        pk 				} else {
    391      1.61        pk 					KASSERT(pg->uanon->an_swslot ==
    392      1.61        pk 						swslot + i);
    393      1.61        pk 					pg->uanon->an_swslot = slot;
    394      1.61        pk 				}
    395      1.50       chs 			}
    396      1.71      yamt #endif /* defined(VMSWAP) */
    397      1.50       chs 		}
    398      1.50       chs 
    399      1.50       chs 		/*
    400      1.50       chs 		 * if the page is PG_FAKE, this must have been a read to
    401      1.50       chs 		 * initialize the page.  clear PG_FAKE and activate the page.
    402      1.53       chs 		 * we must also clear the pmap "modified" flag since it may
    403      1.53       chs 		 * still be set from the page's previous identity.
    404      1.50       chs 		 */
    405      1.50       chs 
    406      1.50       chs 		if (pg->flags & PG_FAKE) {
    407      1.50       chs 			KASSERT(!write);
    408      1.50       chs 			pg->flags &= ~PG_FAKE;
    409      1.72      yamt #if defined(READAHEAD_STATS)
    410      1.78      yamt 			pg->pqflags |= PQ_READAHEAD;
    411      1.72      yamt 			uvm_ra_total.ev_count++;
    412      1.72      yamt #endif /* defined(READAHEAD_STATS) */
    413      1.78      yamt 			KASSERT((pg->flags & PG_CLEAN) != 0);
    414      1.78      yamt 			uvm_pageenqueue(pg);
    415      1.50       chs 			pmap_clear_modify(pg);
    416      1.35       chs 		}
    417      1.35       chs 
    418      1.35       chs 		/*
    419      1.53       chs 		 * do accounting for pagedaemon i/o and arrange to free
    420      1.53       chs 		 * the pages instead of just unbusying them.
    421      1.35       chs 		 */
    422      1.35       chs 
    423      1.53       chs 		if (pg->flags & PG_PAGEOUT) {
    424      1.50       chs 			pg->flags &= ~PG_PAGEOUT;
    425      1.50       chs 			uvmexp.paging--;
    426      1.64     enami 			uvmexp.pdfreed++;
    427      1.35       chs 			pg->flags |= PG_RELEASED;
    428      1.35       chs 		}
    429      1.35       chs 
    430      1.71      yamt #if defined(VMSWAP)
    431      1.35       chs 		/*
    432      1.50       chs 		 * for swap pages, unlock everything for this page now.
    433      1.35       chs 		 */
    434      1.35       chs 
    435      1.35       chs 		if (swap) {
    436      1.63      yamt 			if (pg->uobject == NULL && pg->uanon->an_ref == 0 &&
    437      1.63      yamt 			    (pg->flags & PG_RELEASED) != 0) {
    438  1.81.4.1        ad 				mutex_exit(&uvm_pageqlock);
    439      1.63      yamt 				uvm_anon_release(pg->uanon);
    440      1.63      yamt 			} else {
    441      1.63      yamt 				uvm_page_unbusy(&pg, 1);
    442  1.81.4.1        ad 				mutex_exit(&uvm_pageqlock);
    443  1.81.4.1        ad 				mutex_exit(slock);
    444      1.63      yamt 			}
    445      1.35       chs 		}
    446      1.71      yamt #endif /* defined(VMSWAP) */
    447      1.35       chs 	}
    448      1.35       chs 	if (!swap) {
    449      1.50       chs 		uvm_page_unbusy(pgs, npages);
    450  1.81.4.1        ad 		mutex_exit(&uvm_pageqlock);
    451  1.81.4.1        ad 		mutex_exit(slock);
    452      1.50       chs 	} else {
    453      1.71      yamt #if defined(VMSWAP)
    454      1.53       chs 		KASSERT(write);
    455      1.53       chs 
    456      1.53       chs 		/* these pages are now only in swap. */
    457  1.81.4.1        ad 		mutex_enter(&uvm_swap_data_lock);
    458      1.53       chs 		KASSERT(uvmexp.swpgonly + npages <= uvmexp.swpginuse);
    459      1.61        pk 		if (error != ENOMEM)
    460      1.61        pk 			uvmexp.swpgonly += npages;
    461  1.81.4.1        ad 		mutex_exit(&uvm_swap_data_lock);
    462      1.50       chs 		if (error) {
    463      1.61        pk 			if (error != ENOMEM)
    464      1.61        pk 				uvm_swap_markbad(swslot, npages);
    465      1.61        pk 			else
    466      1.61        pk 				uvm_swap_free(swslot, npages);
    467      1.50       chs 		}
    468      1.61        pk 		uvmexp.pdpending--;
    469      1.71      yamt #endif /* defined(VMSWAP) */
    470      1.35       chs 	}
    471      1.35       chs 	if (write && (bp->b_flags & B_AGE) != 0) {
    472      1.35       chs 		vwakeup(bp);
    473      1.35       chs 	}
    474      1.73      yamt 	putiobuf(bp);
    475       1.1       mrg }
    476      1.74      yamt 
    477      1.74      yamt /*
    478      1.74      yamt  * uvm_pageratop: convert KVAs in the pager map back to their page
    479      1.74      yamt  * structures.
    480      1.74      yamt  */
    481      1.74      yamt 
    482      1.74      yamt struct vm_page *
    483      1.74      yamt uvm_pageratop(vaddr_t kva)
    484      1.74      yamt {
    485      1.74      yamt 	struct vm_page *pg;
    486      1.74      yamt 	paddr_t pa;
    487      1.80   thorpej 	bool rv;
    488      1.74      yamt 
    489      1.74      yamt 	rv = pmap_extract(pmap_kernel(), kva, &pa);
    490      1.74      yamt 	KASSERT(rv);
    491      1.74      yamt 	pg = PHYS_TO_VM_PAGE(pa);
    492      1.74      yamt 	KASSERT(pg != NULL);
    493      1.74      yamt 	return (pg);
    494      1.74      yamt }
    495