Home | History | Annotate | Line # | Download | only in uvm
uvm_pager.c revision 1.101
      1  1.101     rmind /*	$NetBSD: uvm_pager.c,v 1.101 2011/06/12 03:36:03 rmind Exp $	*/
      2    1.1       mrg 
      3    1.1       mrg /*
      4    1.1       mrg  * Copyright (c) 1997 Charles D. Cranor and Washington University.
      5    1.1       mrg  * All rights reserved.
      6    1.1       mrg  *
      7    1.1       mrg  * Redistribution and use in source and binary forms, with or without
      8    1.1       mrg  * modification, are permitted provided that the following conditions
      9    1.1       mrg  * are met:
     10    1.1       mrg  * 1. Redistributions of source code must retain the above copyright
     11    1.1       mrg  *    notice, this list of conditions and the following disclaimer.
     12    1.1       mrg  * 2. Redistributions in binary form must reproduce the above copyright
     13    1.1       mrg  *    notice, this list of conditions and the following disclaimer in the
     14    1.1       mrg  *    documentation and/or other materials provided with the distribution.
     15    1.1       mrg  *
     16    1.1       mrg  * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
     17    1.1       mrg  * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
     18    1.1       mrg  * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
     19    1.1       mrg  * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
     20    1.1       mrg  * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
     21    1.1       mrg  * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
     22    1.1       mrg  * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
     23    1.1       mrg  * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
     24    1.1       mrg  * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
     25    1.1       mrg  * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
     26    1.3       mrg  *
     27    1.3       mrg  * from: Id: uvm_pager.c,v 1.1.2.23 1998/02/02 20:38:06 chuck Exp
     28    1.1       mrg  */
     29    1.1       mrg 
     30    1.1       mrg /*
     31    1.1       mrg  * uvm_pager.c: generic functions used to assist the pagers.
     32    1.1       mrg  */
     33   1.54     lukem 
     34   1.54     lukem #include <sys/cdefs.h>
     35  1.101     rmind __KERNEL_RCSID(0, "$NetBSD: uvm_pager.c,v 1.101 2011/06/12 03:36:03 rmind Exp $");
     36   1.54     lukem 
     37   1.54     lukem #include "opt_uvmhist.h"
     38   1.72      yamt #include "opt_readahead.h"
     39   1.87      yamt #include "opt_pagermap.h"
     40    1.1       mrg 
     41    1.1       mrg #include <sys/param.h>
     42    1.1       mrg #include <sys/systm.h>
     43   1.35       chs #include <sys/vnode.h>
     44   1.93     pooka #include <sys/buf.h>
     45    1.1       mrg 
     46    1.1       mrg #include <uvm/uvm.h>
     47    1.1       mrg 
     48   1.87      yamt /*
     49   1.87      yamt  * XXX
     50   1.87      yamt  * this is needed until the device strategy interface
     51   1.87      yamt  * is changed to do physically-addressed i/o.
     52   1.87      yamt  */
     53   1.87      yamt 
     54   1.87      yamt #ifndef PAGER_MAP_DEFAULT_SIZE
     55   1.87      yamt #define PAGER_MAP_DEFAULT_SIZE	(16 * 1024 * 1024)
     56   1.87      yamt #endif
     57   1.87      yamt 
     58   1.87      yamt #ifndef PAGER_MAP_SIZE
     59   1.87      yamt #define PAGER_MAP_SIZE	PAGER_MAP_DEFAULT_SIZE
     60   1.87      yamt #endif
     61   1.87      yamt 
     62   1.87      yamt size_t pager_map_size = PAGER_MAP_SIZE;
     63   1.87      yamt 
     64    1.1       mrg /*
     65    1.1       mrg  * list of uvm pagers in the system
     66    1.1       mrg  */
     67    1.1       mrg 
     68   1.89      yamt const struct uvm_pagerops * const uvmpagerops[] = {
     69   1.10   thorpej 	&aobj_pager,
     70    1.6       mrg 	&uvm_deviceops,
     71    1.6       mrg 	&uvm_vnodeops,
     72   1.35       chs 	&ubc_pager,
     73    1.1       mrg };
     74    1.1       mrg 
     75    1.1       mrg /*
     76    1.1       mrg  * the pager map: provides KVA for I/O
     77    1.1       mrg  */
     78    1.1       mrg 
     79   1.47       chs struct vm_map *pager_map;		/* XXX */
     80   1.84        ad kmutex_t pager_map_wanted_lock;
     81   1.80   thorpej bool pager_map_wanted;	/* locked by pager map */
     82   1.35       chs static vaddr_t emergva;
     83   1.80   thorpej static bool emerginuse;
     84    1.1       mrg 
     85    1.1       mrg /*
     86    1.1       mrg  * uvm_pager_init: init pagers (at boot time)
     87    1.1       mrg  */
     88    1.1       mrg 
     89    1.6       mrg void
     90   1.67   thorpej uvm_pager_init(void)
     91    1.6       mrg {
     92   1.59   thorpej 	u_int lcv;
     93   1.50       chs 	vaddr_t sva, eva;
     94    1.1       mrg 
     95    1.6       mrg 	/*
     96    1.6       mrg 	 * init pager map
     97    1.6       mrg 	 */
     98    1.6       mrg 
     99   1.50       chs 	sva = 0;
    100   1.87      yamt 	pager_map = uvm_km_suballoc(kernel_map, &sva, &eva, pager_map_size, 0,
    101   1.81   thorpej 	    false, NULL);
    102   1.84        ad 	mutex_init(&pager_map_wanted_lock, MUTEX_DEFAULT, IPL_NONE);
    103   1.81   thorpej 	pager_map_wanted = false;
    104   1.66      yamt 	emergva = uvm_km_alloc(kernel_map, round_page(MAXPHYS), 0,
    105   1.66      yamt 	    UVM_KMF_VAONLY);
    106   1.66      yamt #if defined(DEBUG)
    107   1.66      yamt 	if (emergva == 0)
    108   1.66      yamt 		panic("emergva");
    109   1.66      yamt #endif
    110   1.81   thorpej 	emerginuse = false;
    111    1.6       mrg 
    112    1.6       mrg 	/*
    113    1.6       mrg 	 * init ASYNC I/O queue
    114    1.6       mrg 	 */
    115   1.45       chs 
    116    1.6       mrg 	TAILQ_INIT(&uvm.aio_done);
    117    1.1       mrg 
    118    1.6       mrg 	/*
    119    1.6       mrg 	 * call pager init functions
    120    1.6       mrg 	 */
    121   1.88      yamt 	for (lcv = 0 ; lcv < __arraycount(uvmpagerops); lcv++) {
    122    1.6       mrg 		if (uvmpagerops[lcv]->pgo_init)
    123    1.6       mrg 			uvmpagerops[lcv]->pgo_init();
    124    1.6       mrg 	}
    125    1.1       mrg }
    126    1.1       mrg 
    127    1.1       mrg /*
    128    1.1       mrg  * uvm_pagermapin: map pages into KVA (pager_map) for I/O that needs mappings
    129    1.1       mrg  *
    130    1.1       mrg  * we basically just map in a blank map entry to reserve the space in the
    131    1.1       mrg  * map and then use pmap_enter() to put the mappings in by hand.
    132    1.1       mrg  */
    133    1.1       mrg 
    134    1.9       eeh vaddr_t
    135   1.67   thorpej uvm_pagermapin(struct vm_page **pps, int npages, int flags)
    136    1.1       mrg {
    137    1.9       eeh 	vsize_t size;
    138    1.9       eeh 	vaddr_t kva;
    139    1.9       eeh 	vaddr_t cva;
    140    1.6       mrg 	struct vm_page *pp;
    141   1.29   thorpej 	vm_prot_t prot;
    142   1.82        ad 	const bool pdaemon = curlwp == uvm.pagedaemon_lwp;
    143    1.6       mrg 	UVMHIST_FUNC("uvm_pagermapin"); UVMHIST_CALLED(maphist);
    144    1.1       mrg 
    145   1.35       chs 	UVMHIST_LOG(maphist,"(pps=0x%x, npages=%d)", pps, npages,0,0);
    146   1.29   thorpej 
    147   1.29   thorpej 	/*
    148   1.29   thorpej 	 * compute protection.  outgoing I/O only needs read
    149   1.29   thorpej 	 * access to the page, whereas incoming needs read/write.
    150   1.29   thorpej 	 */
    151   1.29   thorpej 
    152   1.29   thorpej 	prot = VM_PROT_READ;
    153   1.29   thorpej 	if (flags & UVMPAGER_MAPIN_READ)
    154   1.29   thorpej 		prot |= VM_PROT_WRITE;
    155    1.1       mrg 
    156    1.1       mrg ReStart:
    157   1.12       chs 	size = npages << PAGE_SHIFT;
    158   1.29   thorpej 	kva = 0;			/* let system choose VA */
    159    1.1       mrg 
    160   1.75      yamt 	if (uvm_map(pager_map, &kva, size, NULL, UVM_UNKNOWN_OFFSET, 0,
    161   1.75      yamt 	    UVM_FLAG_NOMERGE | (pdaemon ? UVM_FLAG_NOWAIT : 0)) != 0) {
    162   1.75      yamt 		if (pdaemon) {
    163   1.84        ad 			mutex_enter(&pager_map_wanted_lock);
    164   1.35       chs 			if (emerginuse) {
    165   1.90        ad 				UVM_UNLOCK_AND_WAIT(&emergva,
    166   1.90        ad 				    &pager_map_wanted_lock, false,
    167   1.90        ad 				    "emergva", 0);
    168   1.35       chs 				goto ReStart;
    169   1.35       chs 			}
    170   1.81   thorpej 			emerginuse = true;
    171   1.84        ad 			mutex_exit(&pager_map_wanted_lock);
    172   1.35       chs 			kva = emergva;
    173   1.60       tls 			/* The shift implicitly truncates to PAGE_SIZE */
    174   1.60       tls 			KASSERT(npages <= (MAXPHYS >> PAGE_SHIFT));
    175   1.35       chs 			goto enter;
    176   1.35       chs 		}
    177   1.29   thorpej 		if ((flags & UVMPAGER_MAPIN_WAITOK) == 0) {
    178    1.6       mrg 			UVMHIST_LOG(maphist,"<- NOWAIT failed", 0,0,0,0);
    179   1.29   thorpej 			return(0);
    180    1.6       mrg 		}
    181   1.84        ad 		mutex_enter(&pager_map_wanted_lock);
    182   1.81   thorpej 		pager_map_wanted = true;
    183    1.6       mrg 		UVMHIST_LOG(maphist, "  SLEEPING on pager_map",0,0,0,0);
    184   1.90        ad 		UVM_UNLOCK_AND_WAIT(pager_map, &pager_map_wanted_lock, false,
    185   1.90        ad 		    "pager_map", 0);
    186    1.6       mrg 		goto ReStart;
    187    1.6       mrg 	}
    188    1.1       mrg 
    189   1.35       chs enter:
    190    1.6       mrg 	/* got it */
    191    1.6       mrg 	for (cva = kva ; size != 0 ; size -= PAGE_SIZE, cva += PAGE_SIZE) {
    192    1.6       mrg 		pp = *pps++;
    193   1.40       mrg 		KASSERT(pp);
    194   1.38       chs 		KASSERT(pp->flags & PG_BUSY);
    195   1.97    cegger 		pmap_kenter_pa(cva, VM_PAGE_TO_PHYS(pp), prot, 0);
    196    1.6       mrg 	}
    197   1.49     chris 	pmap_update(vm_map_pmap(pager_map));
    198    1.1       mrg 
    199    1.6       mrg 	UVMHIST_LOG(maphist, "<- done (KVA=0x%x)", kva,0,0,0);
    200    1.6       mrg 	return(kva);
    201    1.1       mrg }
    202    1.1       mrg 
    203    1.1       mrg /*
    204    1.1       mrg  * uvm_pagermapout: remove pager_map mapping
    205    1.1       mrg  *
    206    1.1       mrg  * we remove our mappings by hand and then remove the mapping (waking
    207    1.1       mrg  * up anyone wanting space).
    208    1.1       mrg  */
    209    1.1       mrg 
    210    1.6       mrg void
    211   1.67   thorpej uvm_pagermapout(vaddr_t kva, int npages)
    212    1.6       mrg {
    213   1.12       chs 	vsize_t size = npages << PAGE_SHIFT;
    214   1.47       chs 	struct vm_map_entry *entries;
    215    1.6       mrg 	UVMHIST_FUNC("uvm_pagermapout"); UVMHIST_CALLED(maphist);
    216   1.35       chs 
    217    1.6       mrg 	UVMHIST_LOG(maphist, " (kva=0x%x, npages=%d)", kva, npages,0,0);
    218    1.1       mrg 
    219    1.6       mrg 	/*
    220    1.6       mrg 	 * duplicate uvm_unmap, but add in pager_map_wanted handling.
    221    1.6       mrg 	 */
    222    1.6       mrg 
    223   1.50       chs 	pmap_kremove(kva, npages << PAGE_SHIFT);
    224   1.98     rmind 	pmap_update(pmap_kernel());
    225   1.98     rmind 
    226   1.35       chs 	if (kva == emergva) {
    227   1.84        ad 		mutex_enter(&pager_map_wanted_lock);
    228   1.81   thorpej 		emerginuse = false;
    229   1.35       chs 		wakeup(&emergva);
    230   1.84        ad 		mutex_exit(&pager_map_wanted_lock);
    231   1.50       chs 		return;
    232   1.35       chs 	}
    233   1.35       chs 
    234    1.6       mrg 	vm_map_lock(pager_map);
    235   1.66      yamt 	uvm_unmap_remove(pager_map, kva, kva + size, &entries, NULL, 0);
    236   1.84        ad 	mutex_enter(&pager_map_wanted_lock);
    237    1.6       mrg 	if (pager_map_wanted) {
    238   1.81   thorpej 		pager_map_wanted = false;
    239    1.6       mrg 		wakeup(pager_map);
    240    1.6       mrg 	}
    241   1.84        ad 	mutex_exit(&pager_map_wanted_lock);
    242    1.6       mrg 	vm_map_unlock(pager_map);
    243    1.6       mrg 	if (entries)
    244    1.6       mrg 		uvm_unmap_detach(entries, 0);
    245    1.6       mrg 	UVMHIST_LOG(maphist,"<- done",0,0,0,0);
    246    1.1       mrg }
    247    1.1       mrg 
    248    1.1       mrg /*
    249   1.35       chs  * interrupt-context iodone handler for single-buf i/os
    250   1.35       chs  * or the top-level buf of a nested-buf i/o.
    251   1.35       chs  */
    252   1.35       chs 
    253   1.35       chs void
    254   1.67   thorpej uvm_aio_biodone(struct buf *bp)
    255   1.35       chs {
    256   1.35       chs 	/* reset b_iodone for when this is a single-buf i/o. */
    257   1.35       chs 	bp->b_iodone = uvm_aio_aiodone;
    258   1.35       chs 
    259   1.83     rmind 	workqueue_enqueue(uvm.aiodone_queue, &bp->b_work, NULL);
    260   1.35       chs }
    261   1.35       chs 
    262   1.35       chs void
    263   1.91      yamt uvm_aio_aiodone_pages(struct vm_page **pgs, int npages, bool write, int error)
    264   1.35       chs {
    265   1.35       chs 	struct uvm_object *uobj;
    266   1.91      yamt 	struct vm_page *pg;
    267   1.90        ad 	kmutex_t *slock;
    268   1.91      yamt 	int pageout_done;
    269   1.91      yamt 	int swslot;
    270   1.91      yamt 	int i;
    271   1.91      yamt 	bool swap;
    272   1.92    simonb 	UVMHIST_FUNC("uvm_aio_aiodone_pages"); UVMHIST_CALLED(ubchist);
    273   1.50       chs 
    274   1.50       chs 	swslot = 0;
    275   1.91      yamt 	pageout_done = 0;
    276   1.50       chs 	slock = NULL;
    277   1.91      yamt 	uobj = NULL;
    278   1.55       chs 	pg = pgs[0];
    279   1.55       chs 	swap = (pg->uanon != NULL && pg->uobject == NULL) ||
    280   1.55       chs 		(pg->pqflags & PQ_AOBJ) != 0;
    281   1.50       chs 	if (!swap) {
    282   1.55       chs 		uobj = pg->uobject;
    283  1.101     rmind 		slock = uobj->vmobjlock;
    284   1.90        ad 		mutex_enter(slock);
    285   1.90        ad 		mutex_enter(&uvm_pageqlock);
    286   1.71      yamt 	} else {
    287   1.71      yamt #if defined(VMSWAP)
    288   1.71      yamt 		if (error) {
    289   1.71      yamt 			if (pg->uobject != NULL) {
    290   1.71      yamt 				swslot = uao_find_swslot(pg->uobject,
    291   1.71      yamt 				    pg->offset >> PAGE_SHIFT);
    292   1.77  christos 			} else {
    293   1.77  christos 				KASSERT(pg->uanon != NULL);
    294   1.71      yamt 				swslot = pg->uanon->an_swslot;
    295   1.71      yamt 			}
    296   1.71      yamt 			KASSERT(swslot);
    297   1.50       chs 		}
    298   1.71      yamt #else /* defined(VMSWAP) */
    299   1.71      yamt 		panic("%s: swap", __func__);
    300   1.71      yamt #endif /* defined(VMSWAP) */
    301   1.50       chs 	}
    302   1.35       chs 	for (i = 0; i < npages; i++) {
    303   1.35       chs 		pg = pgs[i];
    304   1.50       chs 		KASSERT(swap || pg->uobject == uobj);
    305   1.50       chs 		UVMHIST_LOG(ubchist, "pg %p", pg, 0,0,0);
    306   1.50       chs 
    307   1.71      yamt #if defined(VMSWAP)
    308   1.50       chs 		/*
    309   1.50       chs 		 * for swap i/os, lock each page's object (or anon)
    310   1.50       chs 		 * individually since each page may need a different lock.
    311   1.50       chs 		 */
    312   1.35       chs 
    313   1.35       chs 		if (swap) {
    314   1.55       chs 			if (pg->uobject != NULL) {
    315  1.101     rmind 				slock = pg->uobject->vmobjlock;
    316   1.55       chs 			} else {
    317  1.101     rmind 				slock = pg->uanon->an_lock;
    318   1.35       chs 			}
    319   1.90        ad 			mutex_enter(slock);
    320   1.90        ad 			mutex_enter(&uvm_pageqlock);
    321   1.50       chs 		}
    322   1.71      yamt #endif /* defined(VMSWAP) */
    323   1.50       chs 
    324   1.50       chs 		/*
    325   1.50       chs 		 * process errors.  for reads, just mark the page to be freed.
    326   1.50       chs 		 * for writes, if the error was ENOMEM, we assume this was
    327   1.50       chs 		 * a transient failure so we mark the page dirty so that
    328   1.50       chs 		 * we'll try to write it again later.  for all other write
    329   1.50       chs 		 * errors, we assume the error is permanent, thus the data
    330   1.50       chs 		 * in the page is lost.  bummer.
    331   1.50       chs 		 */
    332   1.50       chs 
    333   1.50       chs 		if (error) {
    334   1.61        pk 			int slot;
    335   1.50       chs 			if (!write) {
    336   1.50       chs 				pg->flags |= PG_RELEASED;
    337   1.50       chs 				continue;
    338   1.50       chs 			} else if (error == ENOMEM) {
    339   1.50       chs 				if (pg->flags & PG_PAGEOUT) {
    340   1.50       chs 					pg->flags &= ~PG_PAGEOUT;
    341   1.90        ad 					pageout_done++;
    342   1.50       chs 				}
    343   1.50       chs 				pg->flags &= ~PG_CLEAN;
    344   1.50       chs 				uvm_pageactivate(pg);
    345   1.61        pk 				slot = 0;
    346   1.61        pk 			} else
    347   1.61        pk 				slot = SWSLOT_BAD;
    348   1.61        pk 
    349   1.71      yamt #if defined(VMSWAP)
    350   1.61        pk 			if (swap) {
    351   1.61        pk 				if (pg->uobject != NULL) {
    352   1.62        pk 					int oldslot;
    353   1.62        pk 					oldslot = uao_set_swslot(pg->uobject,
    354   1.62        pk 						pg->offset >> PAGE_SHIFT, slot);
    355   1.62        pk 					KASSERT(oldslot == swslot + i);
    356   1.61        pk 				} else {
    357   1.61        pk 					KASSERT(pg->uanon->an_swslot ==
    358   1.61        pk 						swslot + i);
    359   1.61        pk 					pg->uanon->an_swslot = slot;
    360   1.61        pk 				}
    361   1.50       chs 			}
    362   1.71      yamt #endif /* defined(VMSWAP) */
    363   1.50       chs 		}
    364   1.50       chs 
    365   1.50       chs 		/*
    366   1.50       chs 		 * if the page is PG_FAKE, this must have been a read to
    367   1.50       chs 		 * initialize the page.  clear PG_FAKE and activate the page.
    368   1.53       chs 		 * we must also clear the pmap "modified" flag since it may
    369   1.53       chs 		 * still be set from the page's previous identity.
    370   1.50       chs 		 */
    371   1.50       chs 
    372   1.50       chs 		if (pg->flags & PG_FAKE) {
    373   1.50       chs 			KASSERT(!write);
    374   1.50       chs 			pg->flags &= ~PG_FAKE;
    375   1.72      yamt #if defined(READAHEAD_STATS)
    376   1.78      yamt 			pg->pqflags |= PQ_READAHEAD;
    377   1.72      yamt 			uvm_ra_total.ev_count++;
    378   1.72      yamt #endif /* defined(READAHEAD_STATS) */
    379   1.78      yamt 			KASSERT((pg->flags & PG_CLEAN) != 0);
    380   1.78      yamt 			uvm_pageenqueue(pg);
    381   1.50       chs 			pmap_clear_modify(pg);
    382   1.35       chs 		}
    383   1.35       chs 
    384   1.35       chs 		/*
    385   1.53       chs 		 * do accounting for pagedaemon i/o and arrange to free
    386   1.53       chs 		 * the pages instead of just unbusying them.
    387   1.35       chs 		 */
    388   1.35       chs 
    389   1.53       chs 		if (pg->flags & PG_PAGEOUT) {
    390   1.50       chs 			pg->flags &= ~PG_PAGEOUT;
    391   1.90        ad 			pageout_done++;
    392   1.64     enami 			uvmexp.pdfreed++;
    393   1.35       chs 			pg->flags |= PG_RELEASED;
    394   1.35       chs 		}
    395   1.35       chs 
    396   1.71      yamt #if defined(VMSWAP)
    397   1.35       chs 		/*
    398   1.50       chs 		 * for swap pages, unlock everything for this page now.
    399   1.35       chs 		 */
    400   1.35       chs 
    401   1.35       chs 		if (swap) {
    402   1.63      yamt 			if (pg->uobject == NULL && pg->uanon->an_ref == 0 &&
    403   1.63      yamt 			    (pg->flags & PG_RELEASED) != 0) {
    404   1.90        ad 				mutex_exit(&uvm_pageqlock);
    405   1.63      yamt 				uvm_anon_release(pg->uanon);
    406   1.63      yamt 			} else {
    407   1.63      yamt 				uvm_page_unbusy(&pg, 1);
    408   1.90        ad 				mutex_exit(&uvm_pageqlock);
    409   1.90        ad 				mutex_exit(slock);
    410   1.63      yamt 			}
    411   1.35       chs 		}
    412   1.71      yamt #endif /* defined(VMSWAP) */
    413   1.35       chs 	}
    414   1.90        ad 	uvm_pageout_done(pageout_done);
    415   1.35       chs 	if (!swap) {
    416   1.50       chs 		uvm_page_unbusy(pgs, npages);
    417   1.90        ad 		mutex_exit(&uvm_pageqlock);
    418   1.90        ad 		mutex_exit(slock);
    419   1.50       chs 	} else {
    420   1.71      yamt #if defined(VMSWAP)
    421   1.53       chs 		KASSERT(write);
    422   1.53       chs 
    423   1.53       chs 		/* these pages are now only in swap. */
    424   1.84        ad 		mutex_enter(&uvm_swap_data_lock);
    425   1.53       chs 		KASSERT(uvmexp.swpgonly + npages <= uvmexp.swpginuse);
    426   1.61        pk 		if (error != ENOMEM)
    427   1.61        pk 			uvmexp.swpgonly += npages;
    428   1.84        ad 		mutex_exit(&uvm_swap_data_lock);
    429   1.50       chs 		if (error) {
    430   1.61        pk 			if (error != ENOMEM)
    431   1.61        pk 				uvm_swap_markbad(swslot, npages);
    432   1.61        pk 			else
    433   1.61        pk 				uvm_swap_free(swslot, npages);
    434   1.50       chs 		}
    435   1.61        pk 		uvmexp.pdpending--;
    436   1.71      yamt #endif /* defined(VMSWAP) */
    437   1.35       chs 	}
    438   1.91      yamt }
    439   1.91      yamt 
    440   1.91      yamt /*
    441   1.91      yamt  * uvm_aio_aiodone: do iodone processing for async i/os.
    442   1.91      yamt  * this should be called in thread context, not interrupt context.
    443   1.91      yamt  */
    444   1.91      yamt 
    445   1.91      yamt void
    446   1.91      yamt uvm_aio_aiodone(struct buf *bp)
    447   1.91      yamt {
    448   1.91      yamt 	int npages = bp->b_bufsize >> PAGE_SHIFT;
    449   1.91      yamt 	struct vm_page *pgs[npages];
    450   1.91      yamt 	int i, error;
    451   1.91      yamt 	bool write;
    452   1.91      yamt 	UVMHIST_FUNC("uvm_aio_aiodone"); UVMHIST_CALLED(ubchist);
    453   1.91      yamt 	UVMHIST_LOG(ubchist, "bp %p", bp, 0,0,0);
    454   1.91      yamt 
    455   1.91      yamt 	error = bp->b_error;
    456   1.91      yamt 	write = (bp->b_flags & B_READ) == 0;
    457   1.91      yamt 
    458   1.91      yamt 	for (i = 0; i < npages; i++) {
    459   1.91      yamt 		pgs[i] = uvm_pageratop((vaddr_t)bp->b_data + (i << PAGE_SHIFT));
    460   1.91      yamt 		UVMHIST_LOG(ubchist, "pgs[%d] = %p", i, pgs[i],0,0);
    461   1.91      yamt 	}
    462   1.91      yamt 	uvm_pagermapout((vaddr_t)bp->b_data, npages);
    463   1.91      yamt 
    464   1.91      yamt 	uvm_aio_aiodone_pages(pgs, npages, write, error);
    465   1.91      yamt 
    466   1.90        ad 	if (write && (bp->b_cflags & BC_AGE) != 0) {
    467   1.90        ad 		mutex_enter(bp->b_objlock);
    468   1.35       chs 		vwakeup(bp);
    469   1.90        ad 		mutex_exit(bp->b_objlock);
    470   1.35       chs 	}
    471   1.73      yamt 	putiobuf(bp);
    472    1.1       mrg }
    473   1.74      yamt 
    474   1.74      yamt /*
    475   1.74      yamt  * uvm_pageratop: convert KVAs in the pager map back to their page
    476   1.74      yamt  * structures.
    477   1.74      yamt  */
    478   1.74      yamt 
    479   1.74      yamt struct vm_page *
    480   1.74      yamt uvm_pageratop(vaddr_t kva)
    481   1.74      yamt {
    482   1.74      yamt 	struct vm_page *pg;
    483   1.74      yamt 	paddr_t pa;
    484   1.80   thorpej 	bool rv;
    485   1.74      yamt 
    486   1.74      yamt 	rv = pmap_extract(pmap_kernel(), kva, &pa);
    487   1.74      yamt 	KASSERT(rv);
    488   1.74      yamt 	pg = PHYS_TO_VM_PAGE(pa);
    489   1.74      yamt 	KASSERT(pg != NULL);
    490   1.74      yamt 	return (pg);
    491   1.74      yamt }
    492