Home | History | Annotate | Line # | Download | only in uvm
uvm_pager.c revision 1.51
      1 /*	$NetBSD: uvm_pager.c,v 1.51 2001/10/15 00:37:51 chs Exp $	*/
      2 
      3 /*
      4  *
      5  * Copyright (c) 1997 Charles D. Cranor and Washington University.
      6  * All rights reserved.
      7  *
      8  * Redistribution and use in source and binary forms, with or without
      9  * modification, are permitted provided that the following conditions
     10  * are met:
     11  * 1. Redistributions of source code must retain the above copyright
     12  *    notice, this list of conditions and the following disclaimer.
     13  * 2. Redistributions in binary form must reproduce the above copyright
     14  *    notice, this list of conditions and the following disclaimer in the
     15  *    documentation and/or other materials provided with the distribution.
     16  * 3. All advertising materials mentioning features or use of this software
     17  *    must display the following acknowledgement:
     18  *      This product includes software developed by Charles D. Cranor and
     19  *      Washington University.
     20  * 4. The name of the author may not be used to endorse or promote products
     21  *    derived from this software without specific prior written permission.
     22  *
     23  * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
     24  * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
     25  * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
     26  * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
     27  * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
     28  * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
     29  * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
     30  * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
     31  * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
     32  * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
     33  *
     34  * from: Id: uvm_pager.c,v 1.1.2.23 1998/02/02 20:38:06 chuck Exp
     35  */
     36 
     37 #include "opt_uvmhist.h"
     38 
     39 /*
     40  * uvm_pager.c: generic functions used to assist the pagers.
     41  */
     42 
     43 #include <sys/param.h>
     44 #include <sys/systm.h>
     45 #include <sys/proc.h>
     46 #include <sys/malloc.h>
     47 #include <sys/pool.h>
     48 #include <sys/vnode.h>
     49 
     50 #define UVM_PAGER
     51 #include <uvm/uvm.h>
     52 
     53 struct pool *uvm_aiobuf_pool;
     54 
     55 /*
     56  * list of uvm pagers in the system
     57  */
     58 
     59 extern struct uvm_pagerops uvm_deviceops;
     60 extern struct uvm_pagerops uvm_vnodeops;
     61 extern struct uvm_pagerops ubc_pager;
     62 
     63 struct uvm_pagerops *uvmpagerops[] = {
     64 	&aobj_pager,
     65 	&uvm_deviceops,
     66 	&uvm_vnodeops,
     67 	&ubc_pager,
     68 };
     69 
     70 /*
     71  * the pager map: provides KVA for I/O
     72  */
     73 
     74 struct vm_map *pager_map;		/* XXX */
     75 struct simplelock pager_map_wanted_lock;
     76 boolean_t pager_map_wanted;	/* locked by pager map */
     77 static vaddr_t emergva;
     78 static boolean_t emerginuse;
     79 
     80 /*
     81  * uvm_pager_init: init pagers (at boot time)
     82  */
     83 
     84 void
     85 uvm_pager_init()
     86 {
     87 	int lcv;
     88 	vaddr_t sva, eva;
     89 
     90 	/*
     91 	 * init pager map
     92 	 */
     93 
     94 	sva = 0;
     95 	pager_map = uvm_km_suballoc(kernel_map, &sva, &eva, PAGER_MAP_SIZE, 0,
     96 	    FALSE, NULL);
     97 	simple_lock_init(&pager_map_wanted_lock);
     98 	pager_map_wanted = FALSE;
     99 	emergva = uvm_km_valloc(kernel_map, MAXBSIZE);
    100 	emerginuse = FALSE;
    101 
    102 	/*
    103 	 * init ASYNC I/O queue
    104 	 */
    105 
    106 	TAILQ_INIT(&uvm.aio_done);
    107 
    108 	/*
    109 	 * call pager init functions
    110 	 */
    111 	for (lcv = 0 ; lcv < sizeof(uvmpagerops)/sizeof(struct uvm_pagerops *);
    112 	    lcv++) {
    113 		if (uvmpagerops[lcv]->pgo_init)
    114 			uvmpagerops[lcv]->pgo_init();
    115 	}
    116 }
    117 
    118 /*
    119  * uvm_pagermapin: map pages into KVA (pager_map) for I/O that needs mappings
    120  *
    121  * we basically just map in a blank map entry to reserve the space in the
    122  * map and then use pmap_enter() to put the mappings in by hand.
    123  */
    124 
    125 vaddr_t
    126 uvm_pagermapin(pps, npages, flags)
    127 	struct vm_page **pps;
    128 	int npages;
    129 	int flags;
    130 {
    131 	vsize_t size;
    132 	vaddr_t kva;
    133 	vaddr_t cva;
    134 	struct vm_page *pp;
    135 	vm_prot_t prot;
    136 	UVMHIST_FUNC("uvm_pagermapin"); UVMHIST_CALLED(maphist);
    137 
    138 	UVMHIST_LOG(maphist,"(pps=0x%x, npages=%d)", pps, npages,0,0);
    139 
    140 	/*
    141 	 * compute protection.  outgoing I/O only needs read
    142 	 * access to the page, whereas incoming needs read/write.
    143 	 */
    144 
    145 	prot = VM_PROT_READ;
    146 	if (flags & UVMPAGER_MAPIN_READ)
    147 		prot |= VM_PROT_WRITE;
    148 
    149 ReStart:
    150 	size = npages << PAGE_SHIFT;
    151 	kva = 0;			/* let system choose VA */
    152 
    153 	if (uvm_map(pager_map, &kva, size, NULL,
    154 	      UVM_UNKNOWN_OFFSET, 0, UVM_FLAG_NOMERGE) != 0) {
    155 		if (curproc == uvm.pagedaemon_proc) {
    156 			simple_lock(&pager_map_wanted_lock);
    157 			if (emerginuse) {
    158 				UVM_UNLOCK_AND_WAIT(&emergva,
    159 				    &pager_map_wanted_lock, FALSE,
    160 				    "emergva", 0);
    161 				goto ReStart;
    162 			}
    163 			emerginuse = TRUE;
    164 			simple_unlock(&pager_map_wanted_lock);
    165 			kva = emergva;
    166 			KASSERT(npages <= MAXBSIZE >> PAGE_SHIFT);
    167 			goto enter;
    168 		}
    169 		if ((flags & UVMPAGER_MAPIN_WAITOK) == 0) {
    170 			UVMHIST_LOG(maphist,"<- NOWAIT failed", 0,0,0,0);
    171 			return(0);
    172 		}
    173 		simple_lock(&pager_map_wanted_lock);
    174 		pager_map_wanted = TRUE;
    175 		UVMHIST_LOG(maphist, "  SLEEPING on pager_map",0,0,0,0);
    176 		UVM_UNLOCK_AND_WAIT(pager_map, &pager_map_wanted_lock, FALSE,
    177 		    "pager_map", 0);
    178 		goto ReStart;
    179 	}
    180 
    181 enter:
    182 	/* got it */
    183 	for (cva = kva ; size != 0 ; size -= PAGE_SIZE, cva += PAGE_SIZE) {
    184 		pp = *pps++;
    185 		KASSERT(pp);
    186 		KASSERT(pp->flags & PG_BUSY);
    187 		pmap_kenter_pa(cva, VM_PAGE_TO_PHYS(pp), prot);
    188 	}
    189 	pmap_update(vm_map_pmap(pager_map));
    190 
    191 	UVMHIST_LOG(maphist, "<- done (KVA=0x%x)", kva,0,0,0);
    192 	return(kva);
    193 }
    194 
    195 /*
    196  * uvm_pagermapout: remove pager_map mapping
    197  *
    198  * we remove our mappings by hand and then remove the mapping (waking
    199  * up anyone wanting space).
    200  */
    201 
    202 void
    203 uvm_pagermapout(kva, npages)
    204 	vaddr_t kva;
    205 	int npages;
    206 {
    207 	vsize_t size = npages << PAGE_SHIFT;
    208 	struct vm_map_entry *entries;
    209 	UVMHIST_FUNC("uvm_pagermapout"); UVMHIST_CALLED(maphist);
    210 
    211 	UVMHIST_LOG(maphist, " (kva=0x%x, npages=%d)", kva, npages,0,0);
    212 
    213 	/*
    214 	 * duplicate uvm_unmap, but add in pager_map_wanted handling.
    215 	 */
    216 
    217 	pmap_kremove(kva, npages << PAGE_SHIFT);
    218 	if (kva == emergva) {
    219 		simple_lock(&pager_map_wanted_lock);
    220 		emerginuse = FALSE;
    221 		wakeup(&emergva);
    222 		simple_unlock(&pager_map_wanted_lock);
    223 		return;
    224 	}
    225 
    226 	vm_map_lock(pager_map);
    227 	uvm_unmap_remove(pager_map, kva, kva + size, &entries);
    228 	simple_lock(&pager_map_wanted_lock);
    229 	if (pager_map_wanted) {
    230 		pager_map_wanted = FALSE;
    231 		wakeup(pager_map);
    232 	}
    233 	simple_unlock(&pager_map_wanted_lock);
    234 	vm_map_unlock(pager_map);
    235 	if (entries)
    236 		uvm_unmap_detach(entries, 0);
    237 	pmap_update(pmap_kernel());
    238 	UVMHIST_LOG(maphist,"<- done",0,0,0,0);
    239 }
    240 
    241 /*
    242  * interrupt-context iodone handler for nested i/o bufs.
    243  *
    244  * => must be at splbio().
    245  */
    246 
    247 void
    248 uvm_aio_biodone1(bp)
    249 	struct buf *bp;
    250 {
    251 	struct buf *mbp = bp->b_private;
    252 
    253 	KASSERT(mbp != bp);
    254 	if (bp->b_flags & B_ERROR) {
    255 		mbp->b_flags |= B_ERROR;
    256 		mbp->b_error = bp->b_error;
    257 	}
    258 	mbp->b_resid -= bp->b_bcount;
    259 	pool_put(&bufpool, bp);
    260 	if (mbp->b_resid == 0) {
    261 		biodone(mbp);
    262 	}
    263 }
    264 
    265 /*
    266  * interrupt-context iodone handler for single-buf i/os
    267  * or the top-level buf of a nested-buf i/o.
    268  *
    269  * => must be at splbio().
    270  */
    271 
    272 void
    273 uvm_aio_biodone(bp)
    274 	struct buf *bp;
    275 {
    276 	/* reset b_iodone for when this is a single-buf i/o. */
    277 	bp->b_iodone = uvm_aio_aiodone;
    278 
    279 	simple_lock(&uvm.aiodoned_lock);	/* locks uvm.aio_done */
    280 	TAILQ_INSERT_TAIL(&uvm.aio_done, bp, b_freelist);
    281 	wakeup(&uvm.aiodoned);
    282 	simple_unlock(&uvm.aiodoned_lock);
    283 }
    284 
    285 /*
    286  * uvm_aio_aiodone: do iodone processing for async i/os.
    287  * this should be called in thread context, not interrupt context.
    288  */
    289 
    290 void
    291 uvm_aio_aiodone(bp)
    292 	struct buf *bp;
    293 {
    294 	int npages = bp->b_bufsize >> PAGE_SHIFT;
    295 	struct vm_page *pg, *pgs[npages];
    296 	struct uvm_object *uobj;
    297 	struct simplelock *slock;
    298 	int s, i, error, swslot;
    299 	boolean_t write, swap, pageout, async;
    300 	UVMHIST_FUNC("uvm_aio_aiodone"); UVMHIST_CALLED(ubchist);
    301 	UVMHIST_LOG(ubchist, "bp %p", bp, 0,0,0);
    302 
    303 	error = (bp->b_flags & B_ERROR) ? (bp->b_error ? bp->b_error : EIO) : 0;
    304 	write = (bp->b_flags & B_READ) == 0;
    305 	/* XXXUBC B_NOCACHE is for swap pager, should be done differently */
    306 	if (write && !(bp->b_flags & B_NOCACHE) && bioops.io_pageiodone) {
    307 		(*bioops.io_pageiodone)(bp);
    308 	}
    309 
    310 	uobj = NULL;
    311 	for (i = 0; i < npages; i++) {
    312 		pgs[i] = uvm_pageratop((vaddr_t)bp->b_data + (i << PAGE_SHIFT));
    313 		UVMHIST_LOG(ubchist, "pgs[%d] = %p", i, pgs[i],0,0);
    314 	}
    315 	uvm_pagermapout((vaddr_t)bp->b_data, npages);
    316 
    317 	swap = (pgs[0]->pqflags & PQ_SWAPBACKED) != 0;
    318 	swslot = 0;
    319 	slock = NULL;
    320 	pageout = (pgs[0]->flags & PG_PAGEOUT) != 0;
    321 	async = (bp->b_flags & B_ASYNC) != 0;
    322 	if (!swap) {
    323 		uobj = pgs[0]->uobject;
    324 		slock = &uobj->vmobjlock;
    325 		simple_lock(slock);
    326 		uvm_lock_pageq();
    327 	} else if (error) {
    328 		pg = pgs[0];
    329 		if (pg->pqflags & PQ_ANON) {
    330 			swslot = pg->uanon->an_swslot;
    331 		} else {
    332 			swslot = uao_find_swslot(pg->uobject, pg->offset);
    333 		}
    334 		KASSERT(swslot);
    335 	}
    336 	for (i = 0; i < npages; i++) {
    337 		pg = pgs[i];
    338 		KASSERT(swap || pg->uobject == uobj);
    339 		KASSERT(pageout ^ ((pg->flags & PG_PAGEOUT) == 0));
    340 		UVMHIST_LOG(ubchist, "pg %p", pg, 0,0,0);
    341 
    342 		/*
    343 		 * for swap i/os, lock each page's object (or anon)
    344 		 * individually since each page may need a different lock.
    345 		 */
    346 
    347 		if (swap) {
    348 			if (pg->pqflags & PQ_ANON) {
    349 				slock = &pg->uanon->an_lock;
    350 			} else {
    351 				slock = &pg->uobject->vmobjlock;
    352 			}
    353 			simple_lock(slock);
    354 			uvm_lock_pageq();
    355 		}
    356 
    357 		/*
    358 		 * process errors.  for reads, just mark the page to be freed.
    359 		 * for writes, if the error was ENOMEM, we assume this was
    360 		 * a transient failure so we mark the page dirty so that
    361 		 * we'll try to write it again later.  for all other write
    362 		 * errors, we assume the error is permanent, thus the data
    363 		 * in the page is lost.  bummer.
    364 		 */
    365 
    366 		if (error) {
    367 			if (!write) {
    368 				KASSERT(!swap);
    369 				pg->flags |= PG_RELEASED;
    370 				continue;
    371 			} else if (error == ENOMEM) {
    372 				if (pg->flags & PG_PAGEOUT) {
    373 					pg->flags &= ~PG_PAGEOUT;
    374 					uvmexp.paging--;
    375 				}
    376 				pg->flags &= ~PG_CLEAN;
    377 				uvm_pageactivate(pg);
    378 			}
    379 		}
    380 
    381 		/*
    382 		 * if the page is PG_FAKE, this must have been a read to
    383 		 * initialize the page.  clear PG_FAKE and activate the page.
    384 		 */
    385 
    386 		if (pg->flags & PG_FAKE) {
    387 			KASSERT(!write);
    388 			pg->flags &= ~PG_FAKE;
    389 			uvm_pageactivate(pg);
    390 			pmap_clear_modify(pg);
    391 		}
    392 
    393 		/*
    394 		 * for async reads, this may be the first time the page
    395 		 * is unlocked after being created, so we need to be sure
    396 		 * the page is on a paging queue.
    397 		 */
    398 
    399 		if (!write) {
    400 			uvm_pageactivate(pg);
    401 		} else if (pg->flags & PG_PAGEOUT) {
    402 			pg->flags &= ~PG_PAGEOUT;
    403 			uvmexp.paging--;
    404 			pg->flags |= PG_RELEASED;
    405 		}
    406 
    407 		/*
    408 		 * for swap pages, unlock everything for this page now.
    409 		 */
    410 
    411 		if (swap) {
    412 			uvm_page_unbusy(&pg, 1);
    413 			uvm_unlock_pageq();
    414 			simple_unlock(slock);
    415 		}
    416 	}
    417 	if (!swap) {
    418 		uvm_page_unbusy(pgs, npages);
    419 		uvm_unlock_pageq();
    420 		simple_unlock(slock);
    421 	} else {
    422 		KASSERT(write && pageout);
    423 		if (error) {
    424 			uvm_swap_markbad(swslot, npages);
    425 		}
    426 	}
    427 	s = splbio();
    428 	if (write && (bp->b_flags & B_AGE) != 0) {
    429 		vwakeup(bp);
    430 	}
    431 	pool_put(&bufpool, bp);
    432 	splx(s);
    433 }
    434