Home | History | Annotate | Line # | Download | only in uvm
uvm_pager.c revision 1.95
      1 /*	$NetBSD: uvm_pager.c,v 1.95 2009/03/30 16:36:36 yamt Exp $	*/
      2 
      3 /*
      4  *
      5  * Copyright (c) 1997 Charles D. Cranor and Washington University.
      6  * All rights reserved.
      7  *
      8  * Redistribution and use in source and binary forms, with or without
      9  * modification, are permitted provided that the following conditions
     10  * are met:
     11  * 1. Redistributions of source code must retain the above copyright
     12  *    notice, this list of conditions and the following disclaimer.
     13  * 2. Redistributions in binary form must reproduce the above copyright
     14  *    notice, this list of conditions and the following disclaimer in the
     15  *    documentation and/or other materials provided with the distribution.
     16  * 3. All advertising materials mentioning features or use of this software
     17  *    must display the following acknowledgement:
     18  *      This product includes software developed by Charles D. Cranor and
     19  *      Washington University.
     20  * 4. The name of the author may not be used to endorse or promote products
     21  *    derived from this software without specific prior written permission.
     22  *
     23  * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
     24  * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
     25  * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
     26  * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
     27  * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
     28  * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
     29  * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
     30  * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
     31  * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
     32  * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
     33  *
     34  * from: Id: uvm_pager.c,v 1.1.2.23 1998/02/02 20:38:06 chuck Exp
     35  */
     36 
     37 /*
     38  * uvm_pager.c: generic functions used to assist the pagers.
     39  */
     40 
     41 #include <sys/cdefs.h>
     42 __KERNEL_RCSID(0, "$NetBSD: uvm_pager.c,v 1.95 2009/03/30 16:36:36 yamt Exp $");
     43 
     44 #include "opt_uvmhist.h"
     45 #include "opt_readahead.h"
     46 #include "opt_pagermap.h"
     47 
     48 #include <sys/param.h>
     49 #include <sys/systm.h>
     50 #include <sys/proc.h>
     51 #include <sys/malloc.h>
     52 #include <sys/vnode.h>
     53 #include <sys/buf.h>
     54 
     55 #include <uvm/uvm.h>
     56 
     57 /*
     58  * XXX
     59  * this is needed until the device strategy interface
     60  * is changed to do physically-addressed i/o.
     61  */
     62 
     63 #ifndef PAGER_MAP_DEFAULT_SIZE
     64 #define PAGER_MAP_DEFAULT_SIZE	(16 * 1024 * 1024)
     65 #endif
     66 
     67 #ifndef PAGER_MAP_SIZE
     68 #define PAGER_MAP_SIZE	PAGER_MAP_DEFAULT_SIZE
     69 #endif
     70 
     71 size_t pager_map_size = PAGER_MAP_SIZE;
     72 
     73 /*
     74  * list of uvm pagers in the system
     75  */
     76 
     77 const struct uvm_pagerops * const uvmpagerops[] = {
     78 	&aobj_pager,
     79 	&uvm_deviceops,
     80 	&uvm_vnodeops,
     81 	&ubc_pager,
     82 };
     83 
     84 /*
     85  * the pager map: provides KVA for I/O
     86  */
     87 
     88 struct vm_map *pager_map;		/* XXX */
     89 kmutex_t pager_map_wanted_lock;
     90 bool pager_map_wanted;	/* locked by pager map */
     91 static vaddr_t emergva;
     92 static bool emerginuse;
     93 
     94 /*
     95  * uvm_pager_init: init pagers (at boot time)
     96  */
     97 
     98 void
     99 uvm_pager_init(void)
    100 {
    101 	u_int lcv;
    102 	vaddr_t sva, eva;
    103 
    104 	/*
    105 	 * init pager map
    106 	 */
    107 
    108 	sva = 0;
    109 	pager_map = uvm_km_suballoc(kernel_map, &sva, &eva, pager_map_size, 0,
    110 	    false, NULL);
    111 	mutex_init(&pager_map_wanted_lock, MUTEX_DEFAULT, IPL_NONE);
    112 	pager_map_wanted = false;
    113 	emergva = uvm_km_alloc(kernel_map, round_page(MAXPHYS), 0,
    114 	    UVM_KMF_VAONLY);
    115 #if defined(DEBUG)
    116 	if (emergva == 0)
    117 		panic("emergva");
    118 #endif
    119 	emerginuse = false;
    120 
    121 	/*
    122 	 * init ASYNC I/O queue
    123 	 */
    124 
    125 	TAILQ_INIT(&uvm.aio_done);
    126 
    127 	/*
    128 	 * call pager init functions
    129 	 */
    130 	for (lcv = 0 ; lcv < __arraycount(uvmpagerops); lcv++) {
    131 		if (uvmpagerops[lcv]->pgo_init)
    132 			uvmpagerops[lcv]->pgo_init();
    133 	}
    134 }
    135 
    136 /*
    137  * uvm_pagermapin: map pages into KVA (pager_map) for I/O that needs mappings
    138  *
    139  * we basically just map in a blank map entry to reserve the space in the
    140  * map and then use pmap_enter() to put the mappings in by hand.
    141  */
    142 
    143 vaddr_t
    144 uvm_pagermapin(struct vm_page **pps, int npages, int flags)
    145 {
    146 	vsize_t size;
    147 	vaddr_t kva;
    148 	vaddr_t cva;
    149 	struct vm_page *pp;
    150 	vm_prot_t prot;
    151 	const bool pdaemon = curlwp == uvm.pagedaemon_lwp;
    152 	UVMHIST_FUNC("uvm_pagermapin"); UVMHIST_CALLED(maphist);
    153 
    154 	UVMHIST_LOG(maphist,"(pps=0x%x, npages=%d)", pps, npages,0,0);
    155 
    156 	/*
    157 	 * compute protection.  outgoing I/O only needs read
    158 	 * access to the page, whereas incoming needs read/write.
    159 	 */
    160 
    161 	prot = VM_PROT_READ;
    162 	if (flags & UVMPAGER_MAPIN_READ)
    163 		prot |= VM_PROT_WRITE;
    164 
    165 ReStart:
    166 	size = npages << PAGE_SHIFT;
    167 	kva = 0;			/* let system choose VA */
    168 
    169 	if (uvm_map(pager_map, &kva, size, NULL, UVM_UNKNOWN_OFFSET, 0,
    170 	    UVM_FLAG_NOMERGE | (pdaemon ? UVM_FLAG_NOWAIT : 0)) != 0) {
    171 		if (pdaemon) {
    172 			mutex_enter(&pager_map_wanted_lock);
    173 			if (emerginuse) {
    174 				UVM_UNLOCK_AND_WAIT(&emergva,
    175 				    &pager_map_wanted_lock, false,
    176 				    "emergva", 0);
    177 				goto ReStart;
    178 			}
    179 			emerginuse = true;
    180 			mutex_exit(&pager_map_wanted_lock);
    181 			kva = emergva;
    182 			/* The shift implicitly truncates to PAGE_SIZE */
    183 			KASSERT(npages <= (MAXPHYS >> PAGE_SHIFT));
    184 			goto enter;
    185 		}
    186 		if ((flags & UVMPAGER_MAPIN_WAITOK) == 0) {
    187 			UVMHIST_LOG(maphist,"<- NOWAIT failed", 0,0,0,0);
    188 			return(0);
    189 		}
    190 		mutex_enter(&pager_map_wanted_lock);
    191 		pager_map_wanted = true;
    192 		UVMHIST_LOG(maphist, "  SLEEPING on pager_map",0,0,0,0);
    193 		UVM_UNLOCK_AND_WAIT(pager_map, &pager_map_wanted_lock, false,
    194 		    "pager_map", 0);
    195 		goto ReStart;
    196 	}
    197 
    198 enter:
    199 	/* got it */
    200 	for (cva = kva ; size != 0 ; size -= PAGE_SIZE, cva += PAGE_SIZE) {
    201 		pp = *pps++;
    202 		KASSERT(pp);
    203 		KASSERT(pp->flags & PG_BUSY);
    204 		pmap_kenter_pa(cva, VM_PAGE_TO_PHYS(pp), prot);
    205 	}
    206 	pmap_update(vm_map_pmap(pager_map));
    207 
    208 	UVMHIST_LOG(maphist, "<- done (KVA=0x%x)", kva,0,0,0);
    209 	return(kva);
    210 }
    211 
    212 /*
    213  * uvm_pagermapout: remove pager_map mapping
    214  *
    215  * we remove our mappings by hand and then remove the mapping (waking
    216  * up anyone wanting space).
    217  */
    218 
    219 void
    220 uvm_pagermapout(vaddr_t kva, int npages)
    221 {
    222 	vsize_t size = npages << PAGE_SHIFT;
    223 	struct vm_map_entry *entries;
    224 	UVMHIST_FUNC("uvm_pagermapout"); UVMHIST_CALLED(maphist);
    225 
    226 	UVMHIST_LOG(maphist, " (kva=0x%x, npages=%d)", kva, npages,0,0);
    227 
    228 	/*
    229 	 * duplicate uvm_unmap, but add in pager_map_wanted handling.
    230 	 */
    231 
    232 	pmap_kremove(kva, npages << PAGE_SHIFT);
    233 	if (kva == emergva) {
    234 		mutex_enter(&pager_map_wanted_lock);
    235 		emerginuse = false;
    236 		wakeup(&emergva);
    237 		mutex_exit(&pager_map_wanted_lock);
    238 		return;
    239 	}
    240 
    241 	vm_map_lock(pager_map);
    242 	uvm_unmap_remove(pager_map, kva, kva + size, &entries, NULL, 0);
    243 	mutex_enter(&pager_map_wanted_lock);
    244 	if (pager_map_wanted) {
    245 		pager_map_wanted = false;
    246 		wakeup(pager_map);
    247 	}
    248 	mutex_exit(&pager_map_wanted_lock);
    249 	vm_map_unlock(pager_map);
    250 	if (entries)
    251 		uvm_unmap_detach(entries, 0);
    252 	pmap_update(pmap_kernel());
    253 	UVMHIST_LOG(maphist,"<- done",0,0,0,0);
    254 }
    255 
    256 /*
    257  * interrupt-context iodone handler for nested i/o bufs.
    258  *
    259  * => the buffer is private so need not be locked here
    260  */
    261 
    262 void
    263 uvm_aio_biodone1(struct buf *bp)
    264 {
    265 	struct buf *mbp = bp->b_private;
    266 
    267 	KASSERT(mbp != bp);
    268 	if (bp->b_error != 0) {
    269 		mbp->b_error = bp->b_error;
    270 	}
    271 	mbp->b_resid -= bp->b_bcount;
    272 	putiobuf(bp);
    273 	if (mbp->b_resid == 0) {
    274 		biodone(mbp);
    275 	}
    276 }
    277 
    278 /*
    279  * interrupt-context iodone handler for single-buf i/os
    280  * or the top-level buf of a nested-buf i/o.
    281  */
    282 
    283 void
    284 uvm_aio_biodone(struct buf *bp)
    285 {
    286 	/* reset b_iodone for when this is a single-buf i/o. */
    287 	bp->b_iodone = uvm_aio_aiodone;
    288 
    289 	workqueue_enqueue(uvm.aiodone_queue, &bp->b_work, NULL);
    290 }
    291 
    292 void
    293 uvm_aio_aiodone_pages(struct vm_page **pgs, int npages, bool write, int error)
    294 {
    295 	struct uvm_object *uobj;
    296 	struct vm_page *pg;
    297 	kmutex_t *slock;
    298 	int pageout_done;
    299 	int swslot;
    300 	int i;
    301 	bool swap;
    302 	UVMHIST_FUNC("uvm_aio_aiodone_pages"); UVMHIST_CALLED(ubchist);
    303 
    304 	swslot = 0;
    305 	pageout_done = 0;
    306 	slock = NULL;
    307 	uobj = NULL;
    308 	pg = pgs[0];
    309 	swap = (pg->uanon != NULL && pg->uobject == NULL) ||
    310 		(pg->pqflags & PQ_AOBJ) != 0;
    311 	if (!swap) {
    312 		uobj = pg->uobject;
    313 		slock = &uobj->vmobjlock;
    314 		mutex_enter(slock);
    315 		mutex_enter(&uvm_pageqlock);
    316 	} else {
    317 #if defined(VMSWAP)
    318 		if (error) {
    319 			if (pg->uobject != NULL) {
    320 				swslot = uao_find_swslot(pg->uobject,
    321 				    pg->offset >> PAGE_SHIFT);
    322 			} else {
    323 				KASSERT(pg->uanon != NULL);
    324 				swslot = pg->uanon->an_swslot;
    325 			}
    326 			KASSERT(swslot);
    327 		}
    328 #else /* defined(VMSWAP) */
    329 		panic("%s: swap", __func__);
    330 #endif /* defined(VMSWAP) */
    331 	}
    332 	for (i = 0; i < npages; i++) {
    333 		pg = pgs[i];
    334 		KASSERT(swap || pg->uobject == uobj);
    335 		UVMHIST_LOG(ubchist, "pg %p", pg, 0,0,0);
    336 
    337 #if defined(VMSWAP)
    338 		/*
    339 		 * for swap i/os, lock each page's object (or anon)
    340 		 * individually since each page may need a different lock.
    341 		 */
    342 
    343 		if (swap) {
    344 			if (pg->uobject != NULL) {
    345 				slock = &pg->uobject->vmobjlock;
    346 			} else {
    347 				slock = &pg->uanon->an_lock;
    348 			}
    349 			mutex_enter(slock);
    350 			mutex_enter(&uvm_pageqlock);
    351 		}
    352 #endif /* defined(VMSWAP) */
    353 
    354 		/*
    355 		 * process errors.  for reads, just mark the page to be freed.
    356 		 * for writes, if the error was ENOMEM, we assume this was
    357 		 * a transient failure so we mark the page dirty so that
    358 		 * we'll try to write it again later.  for all other write
    359 		 * errors, we assume the error is permanent, thus the data
    360 		 * in the page is lost.  bummer.
    361 		 */
    362 
    363 		if (error) {
    364 			int slot;
    365 			if (!write) {
    366 				pg->flags |= PG_RELEASED;
    367 				continue;
    368 			} else if (error == ENOMEM) {
    369 				if (pg->flags & PG_PAGEOUT) {
    370 					pg->flags &= ~PG_PAGEOUT;
    371 					pageout_done++;
    372 				}
    373 				pg->flags &= ~PG_CLEAN;
    374 				uvm_pageactivate(pg);
    375 				slot = 0;
    376 			} else
    377 				slot = SWSLOT_BAD;
    378 
    379 #if defined(VMSWAP)
    380 			if (swap) {
    381 				if (pg->uobject != NULL) {
    382 					int oldslot;
    383 					oldslot = uao_set_swslot(pg->uobject,
    384 						pg->offset >> PAGE_SHIFT, slot);
    385 					KASSERT(oldslot == swslot + i);
    386 				} else {
    387 					KASSERT(pg->uanon->an_swslot ==
    388 						swslot + i);
    389 					pg->uanon->an_swslot = slot;
    390 				}
    391 			}
    392 #endif /* defined(VMSWAP) */
    393 		}
    394 
    395 		/*
    396 		 * if the page is PG_FAKE, this must have been a read to
    397 		 * initialize the page.  clear PG_FAKE and activate the page.
    398 		 * we must also clear the pmap "modified" flag since it may
    399 		 * still be set from the page's previous identity.
    400 		 */
    401 
    402 		if (pg->flags & PG_FAKE) {
    403 			KASSERT(!write);
    404 			pg->flags &= ~PG_FAKE;
    405 #if defined(READAHEAD_STATS)
    406 			pg->pqflags |= PQ_READAHEAD;
    407 			uvm_ra_total.ev_count++;
    408 #endif /* defined(READAHEAD_STATS) */
    409 			KASSERT((pg->flags & PG_CLEAN) != 0);
    410 			uvm_pageenqueue(pg);
    411 			pmap_clear_modify(pg);
    412 		}
    413 
    414 		/*
    415 		 * do accounting for pagedaemon i/o and arrange to free
    416 		 * the pages instead of just unbusying them.
    417 		 */
    418 
    419 		if (pg->flags & PG_PAGEOUT) {
    420 			pg->flags &= ~PG_PAGEOUT;
    421 			pageout_done++;
    422 			uvmexp.pdfreed++;
    423 			pg->flags |= PG_RELEASED;
    424 		}
    425 
    426 #if defined(VMSWAP)
    427 		/*
    428 		 * for swap pages, unlock everything for this page now.
    429 		 */
    430 
    431 		if (swap) {
    432 			if (pg->uobject == NULL && pg->uanon->an_ref == 0 &&
    433 			    (pg->flags & PG_RELEASED) != 0) {
    434 				mutex_exit(&uvm_pageqlock);
    435 				uvm_anon_release(pg->uanon);
    436 			} else {
    437 				uvm_page_unbusy(&pg, 1);
    438 				mutex_exit(&uvm_pageqlock);
    439 				mutex_exit(slock);
    440 			}
    441 		}
    442 #endif /* defined(VMSWAP) */
    443 	}
    444 	uvm_pageout_done(pageout_done);
    445 	if (!swap) {
    446 		uvm_page_unbusy(pgs, npages);
    447 		mutex_exit(&uvm_pageqlock);
    448 		mutex_exit(slock);
    449 	} else {
    450 #if defined(VMSWAP)
    451 		KASSERT(write);
    452 
    453 		/* these pages are now only in swap. */
    454 		mutex_enter(&uvm_swap_data_lock);
    455 		KASSERT(uvmexp.swpgonly + npages <= uvmexp.swpginuse);
    456 		if (error != ENOMEM)
    457 			uvmexp.swpgonly += npages;
    458 		mutex_exit(&uvm_swap_data_lock);
    459 		if (error) {
    460 			if (error != ENOMEM)
    461 				uvm_swap_markbad(swslot, npages);
    462 			else
    463 				uvm_swap_free(swslot, npages);
    464 		}
    465 		uvmexp.pdpending--;
    466 #endif /* defined(VMSWAP) */
    467 	}
    468 }
    469 
    470 /*
    471  * uvm_aio_aiodone: do iodone processing for async i/os.
    472  * this should be called in thread context, not interrupt context.
    473  */
    474 
    475 void
    476 uvm_aio_aiodone(struct buf *bp)
    477 {
    478 	int npages = bp->b_bufsize >> PAGE_SHIFT;
    479 	struct vm_page *pgs[npages];
    480 	int i, error;
    481 	bool write;
    482 	UVMHIST_FUNC("uvm_aio_aiodone"); UVMHIST_CALLED(ubchist);
    483 	UVMHIST_LOG(ubchist, "bp %p", bp, 0,0,0);
    484 
    485 	error = bp->b_error;
    486 	write = (bp->b_flags & B_READ) == 0;
    487 
    488 	for (i = 0; i < npages; i++) {
    489 		pgs[i] = uvm_pageratop((vaddr_t)bp->b_data + (i << PAGE_SHIFT));
    490 		UVMHIST_LOG(ubchist, "pgs[%d] = %p", i, pgs[i],0,0);
    491 	}
    492 	uvm_pagermapout((vaddr_t)bp->b_data, npages);
    493 
    494 	uvm_aio_aiodone_pages(pgs, npages, write, error);
    495 
    496 	if (write && (bp->b_cflags & BC_AGE) != 0) {
    497 		mutex_enter(bp->b_objlock);
    498 		vwakeup(bp);
    499 		mutex_exit(bp->b_objlock);
    500 	}
    501 	putiobuf(bp);
    502 }
    503 
    504 /*
    505  * uvm_pageratop: convert KVAs in the pager map back to their page
    506  * structures.
    507  */
    508 
    509 struct vm_page *
    510 uvm_pageratop(vaddr_t kva)
    511 {
    512 	struct vm_page *pg;
    513 	paddr_t pa;
    514 	bool rv;
    515 
    516 	rv = pmap_extract(pmap_kernel(), kva, &pa);
    517 	KASSERT(rv);
    518 	pg = PHYS_TO_VM_PAGE(pa);
    519 	KASSERT(pg != NULL);
    520 	return (pg);
    521 }
    522