Home | History | Annotate | Line # | Download | only in uvm
uvm_pager.c revision 1.126
      1 /*	$NetBSD: uvm_pager.c,v 1.126 2020/06/25 09:58:44 jdolecek Exp $	*/
      2 
      3 /*
      4  * Copyright (c) 1997 Charles D. Cranor and Washington University.
      5  * All rights reserved.
      6  *
      7  * Redistribution and use in source and binary forms, with or without
      8  * modification, are permitted provided that the following conditions
      9  * are met:
     10  * 1. Redistributions of source code must retain the above copyright
     11  *    notice, this list of conditions and the following disclaimer.
     12  * 2. Redistributions in binary form must reproduce the above copyright
     13  *    notice, this list of conditions and the following disclaimer in the
     14  *    documentation and/or other materials provided with the distribution.
     15  *
     16  * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
     17  * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
     18  * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
     19  * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
     20  * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
     21  * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
     22  * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
     23  * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
     24  * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
     25  * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
     26  *
     27  * from: Id: uvm_pager.c,v 1.1.2.23 1998/02/02 20:38:06 chuck Exp
     28  */
     29 
     30 /*
     31  * uvm_pager.c: generic functions used to assist the pagers.
     32  */
     33 
     34 #include <sys/cdefs.h>
     35 __KERNEL_RCSID(0, "$NetBSD: uvm_pager.c,v 1.126 2020/06/25 09:58:44 jdolecek Exp $");
     36 
     37 #include "opt_uvmhist.h"
     38 #include "opt_readahead.h"
     39 #include "opt_pagermap.h"
     40 
     41 #include <sys/param.h>
     42 #include <sys/systm.h>
     43 #include <sys/atomic.h>
     44 #include <sys/vnode.h>
     45 #include <sys/buf.h>
     46 
     47 #include <uvm/uvm.h>
     48 
     49 /*
     50  * XXX
     51  * this is needed until the device strategy interface
     52  * is changed to do physically-addressed i/o.
     53  */
     54 
     55 #ifndef PAGER_MAP_DEFAULT_SIZE
     56 #define PAGER_MAP_DEFAULT_SIZE	(16 * 1024 * 1024)
     57 #endif
     58 
     59 #ifndef PAGER_MAP_SIZE
     60 #define PAGER_MAP_SIZE	PAGER_MAP_DEFAULT_SIZE
     61 #endif
     62 
     63 size_t pager_map_size = PAGER_MAP_SIZE;
     64 
     65 /*
     66  * list of uvm pagers in the system
     67  */
     68 
     69 const struct uvm_pagerops * const uvmpagerops[] = {
     70 	&aobj_pager,
     71 	&uvm_deviceops,
     72 	&uvm_vnodeops,
     73 	&ubc_pager,
     74 };
     75 
     76 /*
     77  * the pager map: provides KVA for I/O
     78  */
     79 
     80 struct vm_map *pager_map;		/* XXX */
     81 kmutex_t pager_map_wanted_lock __cacheline_aligned;
     82 bool pager_map_wanted;	/* locked by pager map */
     83 static vaddr_t emergva;
     84 static int emerg_ncolors;
     85 static bool emerginuse;
     86 
     87 void
     88 uvm_pager_realloc_emerg(void)
     89 {
     90 	vaddr_t new_emergva, old_emergva;
     91 	int old_emerg_ncolors;
     92 
     93 	if (__predict_true(emergva != 0 && emerg_ncolors >= uvmexp.ncolors))
     94 		return;
     95 
     96 	KASSERT(!emerginuse);
     97 
     98 	new_emergva = uvm_km_alloc(kernel_map,
     99 	    round_page(MAXPHYS) + ptoa(uvmexp.ncolors), ptoa(uvmexp.ncolors),
    100 	    UVM_KMF_VAONLY);
    101 
    102 	KASSERT(new_emergva != 0);
    103 
    104 	old_emergva = emergva;
    105 	old_emerg_ncolors = emerg_ncolors;
    106 
    107 	/*
    108 	 * don't support re-color in late boot anyway.
    109 	 */
    110 	if (0) /* XXX */
    111 		mutex_enter(&pager_map_wanted_lock);
    112 
    113 	emergva = new_emergva;
    114 	emerg_ncolors = uvmexp.ncolors;
    115 	wakeup(&old_emergva);
    116 
    117 	if (0) /* XXX */
    118 		mutex_exit(&pager_map_wanted_lock);
    119 
    120 	if (old_emergva)
    121 		uvm_km_free(kernel_map, old_emergva,
    122 		    round_page(MAXPHYS) + ptoa(old_emerg_ncolors),
    123 		    UVM_KMF_VAONLY);
    124 }
    125 
    126 /*
    127  * uvm_pager_init: init pagers (at boot time)
    128  */
    129 
    130 void
    131 uvm_pager_init(void)
    132 {
    133 	u_int lcv;
    134 	vaddr_t sva, eva;
    135 
    136 	/*
    137 	 * init pager map
    138 	 */
    139 
    140 	sva = 0;
    141 	pager_map = uvm_km_suballoc(kernel_map, &sva, &eva, pager_map_size, 0,
    142 	    false, NULL);
    143 	mutex_init(&pager_map_wanted_lock, MUTEX_DEFAULT, IPL_NONE);
    144 	pager_map_wanted = false;
    145 
    146 	uvm_pager_realloc_emerg();
    147 
    148 	/*
    149 	 * call pager init functions
    150 	 */
    151 	for (lcv = 0 ; lcv < __arraycount(uvmpagerops); lcv++) {
    152 		if (uvmpagerops[lcv]->pgo_init)
    153 			uvmpagerops[lcv]->pgo_init();
    154 	}
    155 }
    156 
    157 #ifdef PMAP_DIRECT
    158 /*
    159  * uvm_pagermapdirect: map a single page via the pmap's direct segment
    160  *
    161  * this is an abuse of pmap_direct_process(), since the kva is being grabbed
    162  * and no processing is taking place, but for now..
    163  */
    164 
    165 static int
    166 uvm_pagermapdirect(void *kva, size_t sz, void *cookie)
    167 {
    168 
    169 	KASSERT(sz == PAGE_SIZE);
    170 	*(vaddr_t *)cookie = (vaddr_t)kva;
    171 	return 0;
    172 }
    173 #endif
    174 
    175 /*
    176  * uvm_pagermapin: map pages into KVA (pager_map) for I/O that needs mappings
    177  *
    178  * we basically just map in a blank map entry to reserve the space in the
    179  * map and then use pmap_enter() to put the mappings in by hand.
    180  */
    181 
    182 vaddr_t
    183 uvm_pagermapin(struct vm_page **pps, int npages, int flags)
    184 {
    185 	vsize_t size;
    186 	vaddr_t kva;
    187 	vaddr_t cva;
    188 	struct vm_page *pp;
    189 	vm_prot_t prot;
    190 	const bool pdaemon = (curlwp == uvm.pagedaemon_lwp);
    191 	const u_int first_color = VM_PGCOLOR(*pps);
    192 	UVMHIST_FUNC("uvm_pagermapin"); UVMHIST_CALLED(maphist);
    193 
    194 	UVMHIST_LOG(maphist,"(pps=%#jx, npages=%jd, first_color=%ju)",
    195 		(uintptr_t)pps, npages, first_color, 0);
    196 
    197 #ifdef PMAP_DIRECT
    198 	/*
    199 	 * for a single page the direct mapped segment can be used.
    200 	 */
    201 
    202 	if (npages == 1) {
    203 		int error __diagused;
    204 		KASSERT((pps[0]->flags & PG_BUSY) != 0);
    205 		error = pmap_direct_process(VM_PAGE_TO_PHYS(pps[0]), 0,
    206 		    PAGE_SIZE, uvm_pagermapdirect, &kva);
    207 		KASSERT(error == 0);
    208 		UVMHIST_LOG(maphist, "<- done, direct (KVA=%#jx)", kva,0,0,0);
    209 		return kva;
    210 	}
    211 #endif
    212 
    213 	/*
    214 	 * compute protection.  outgoing I/O only needs read
    215 	 * access to the page, whereas incoming needs read/write.
    216 	 */
    217 
    218 	prot = VM_PROT_READ;
    219 	if (flags & UVMPAGER_MAPIN_READ)
    220 		prot |= VM_PROT_WRITE;
    221 
    222 ReStart:
    223 	size = ptoa(npages);
    224 	kva = 0;			/* let system choose VA */
    225 
    226 	if (uvm_map(pager_map, &kva, size, NULL, UVM_UNKNOWN_OFFSET,
    227 	    first_color, UVM_FLAG_COLORMATCH | UVM_FLAG_NOMERGE
    228 	    | (pdaemon ? UVM_FLAG_NOWAIT : 0)) != 0) {
    229 		if (pdaemon) {
    230 			mutex_enter(&pager_map_wanted_lock);
    231 			if (emerginuse) {
    232 				UVM_UNLOCK_AND_WAIT(&emergva,
    233 				    &pager_map_wanted_lock, false,
    234 				    "emergva", 0);
    235 				goto ReStart;
    236 			}
    237 			emerginuse = true;
    238 			mutex_exit(&pager_map_wanted_lock);
    239 			kva = emergva + ptoa(first_color);
    240 			/* The shift implicitly truncates to PAGE_SIZE */
    241 			KASSERT(npages <= (MAXPHYS >> PAGE_SHIFT));
    242 			goto enter;
    243 		}
    244 		if ((flags & UVMPAGER_MAPIN_WAITOK) == 0) {
    245 			UVMHIST_LOG(maphist,"<- NOWAIT failed", 0,0,0,0);
    246 			return(0);
    247 		}
    248 		mutex_enter(&pager_map_wanted_lock);
    249 		pager_map_wanted = true;
    250 		UVMHIST_LOG(maphist, "  SLEEPING on pager_map",0,0,0,0);
    251 		UVM_UNLOCK_AND_WAIT(pager_map, &pager_map_wanted_lock, false,
    252 		    "pager_map", 0);
    253 		goto ReStart;
    254 	}
    255 
    256 enter:
    257 	/* got it */
    258 	for (cva = kva; npages != 0; npages--, cva += PAGE_SIZE) {
    259 		pp = *pps++;
    260 		KASSERT(pp);
    261 		// KASSERT(!((VM_PAGE_TO_PHYS(pp) ^ cva) & uvmexp.colormask));
    262 		KASSERT(pp->flags & PG_BUSY);
    263 		pmap_kenter_pa(cva, VM_PAGE_TO_PHYS(pp), prot, 0);
    264 	}
    265 	pmap_update(vm_map_pmap(pager_map));
    266 
    267 	UVMHIST_LOG(maphist, "<- done (KVA=%#jx)", kva,0,0,0);
    268 	return(kva);
    269 }
    270 
    271 /*
    272  * uvm_pagermapout: remove pager_map mapping
    273  *
    274  * we remove our mappings by hand and then remove the mapping (waking
    275  * up anyone wanting space).
    276  */
    277 
    278 void
    279 uvm_pagermapout(vaddr_t kva, int npages)
    280 {
    281 	vsize_t size = ptoa(npages);
    282 	struct vm_map_entry *entries;
    283 	UVMHIST_FUNC("uvm_pagermapout"); UVMHIST_CALLED(maphist);
    284 
    285 	UVMHIST_LOG(maphist, " (kva=%#jx, npages=%jd)", kva, npages,0,0);
    286 
    287 #ifdef PMAP_DIRECT
    288 	/*
    289 	 * solitary pages are mapped directly.
    290 	 */
    291 
    292 	if (npages == 1) {
    293 		UVMHIST_LOG(maphist,"<- done, direct", 0,0,0,0);
    294 		return;
    295 	}
    296 #endif
    297 
    298 	/*
    299 	 * duplicate uvm_unmap, but add in pager_map_wanted handling.
    300 	 */
    301 
    302 	pmap_kremove(kva, size);
    303 	pmap_update(pmap_kernel());
    304 
    305 	if ((kva & ~ptoa(uvmexp.colormask)) == emergva) {
    306 		mutex_enter(&pager_map_wanted_lock);
    307 		KASSERT(emerginuse);
    308 		emerginuse = false;
    309 		wakeup(&emergva);
    310 		mutex_exit(&pager_map_wanted_lock);
    311 		return;
    312 	}
    313 
    314 	vm_map_lock(pager_map);
    315 	uvm_unmap_remove(pager_map, kva, kva + size, &entries, 0);
    316 	mutex_enter(&pager_map_wanted_lock);
    317 	if (pager_map_wanted) {
    318 		pager_map_wanted = false;
    319 		wakeup(pager_map);
    320 	}
    321 	mutex_exit(&pager_map_wanted_lock);
    322 	vm_map_unlock(pager_map);
    323 	if (entries)
    324 		uvm_unmap_detach(entries, 0);
    325 	UVMHIST_LOG(maphist,"<- done",0,0,0,0);
    326 }
    327 
    328 void
    329 uvm_aio_aiodone_pages(struct vm_page **pgs, int npages, bool write, int error)
    330 {
    331 	struct uvm_object *uobj;
    332 	struct vm_page *pg;
    333 	krwlock_t *slock;
    334 	int pageout_done;	/* number of PG_PAGEOUT pages processed */
    335 	int swslot;
    336 	int i;
    337 	bool swap;
    338 	UVMHIST_FUNC("uvm_aio_aiodone_pages"); UVMHIST_CALLED(ubchist);
    339 
    340 	swslot = 0;
    341 	pageout_done = 0;
    342 	slock = NULL;
    343 	uobj = NULL;
    344 	pg = pgs[0];
    345 	swap = (pg->uanon != NULL && pg->uobject == NULL) ||
    346 		(pg->flags & PG_AOBJ) != 0;
    347 	if (!swap) {
    348 		uobj = pg->uobject;
    349 		slock = uobj->vmobjlock;
    350 		rw_enter(slock, RW_WRITER);
    351 	} else {
    352 #if defined(VMSWAP)
    353 		if (error) {
    354 			if (pg->uobject != NULL) {
    355 				swslot = uao_find_swslot(pg->uobject,
    356 				    pg->offset >> PAGE_SHIFT);
    357 			} else {
    358 				KASSERT(pg->uanon != NULL);
    359 				swslot = pg->uanon->an_swslot;
    360 			}
    361 			KASSERT(swslot);
    362 		}
    363 #else /* defined(VMSWAP) */
    364 		panic("%s: swap", __func__);
    365 #endif /* defined(VMSWAP) */
    366 	}
    367 	for (i = 0; i < npages; i++) {
    368 #if defined(VMSWAP)
    369 		bool anon_disposed = false; /* XXX gcc */
    370 #endif /* defined(VMSWAP) */
    371 
    372 		pg = pgs[i];
    373 		KASSERT(swap || pg->uobject == uobj);
    374 		UVMHIST_LOG(ubchist, "pg %#jx", (uintptr_t)pg, 0,0,0);
    375 
    376 #if defined(VMSWAP)
    377 		/*
    378 		 * for swap i/os, lock each page's object (or anon)
    379 		 * individually since each page may need a different lock.
    380 		 */
    381 
    382 		if (swap) {
    383 			if (pg->uobject != NULL) {
    384 				slock = pg->uobject->vmobjlock;
    385 			} else {
    386 				slock = pg->uanon->an_lock;
    387 			}
    388 			rw_enter(slock, RW_WRITER);
    389 			anon_disposed = (pg->flags & PG_RELEASED) != 0;
    390 			KASSERT(!anon_disposed || pg->uobject != NULL ||
    391 			    pg->uanon->an_ref == 0);
    392 		}
    393 #endif /* defined(VMSWAP) */
    394 
    395 		if (write && uobj != NULL) {
    396 			KASSERT(radix_tree_get_tag(&uobj->uo_pages,
    397 			    pg->offset >> PAGE_SHIFT, UVM_PAGE_WRITEBACK_TAG));
    398 			radix_tree_clear_tag(&uobj->uo_pages,
    399 			    pg->offset >> PAGE_SHIFT, UVM_PAGE_WRITEBACK_TAG);
    400 		}
    401 
    402 		/*
    403 		 * process errors.  for reads, just mark the page to be freed.
    404 		 * for writes, if the error was ENOMEM, we assume this was
    405 		 * a transient failure so we mark the page dirty so that
    406 		 * we'll try to write it again later.  for all other write
    407 		 * errors, we assume the error is permanent, thus the data
    408 		 * in the page is lost.  bummer.
    409 		 */
    410 
    411 		if (error) {
    412 			int slot;
    413 			if (!write) {
    414 				pg->flags |= PG_RELEASED;
    415 				continue;
    416 			} else if (error == ENOMEM) {
    417 				if (pg->flags & PG_PAGEOUT) {
    418 					pg->flags &= ~PG_PAGEOUT;
    419 					pageout_done++;
    420 				}
    421 				uvm_pagemarkdirty(pg, UVM_PAGE_STATUS_DIRTY);
    422 				uvm_pagelock(pg);
    423 				uvm_pageactivate(pg);
    424 				uvm_pageunlock(pg);
    425 				slot = 0;
    426 			} else
    427 				slot = SWSLOT_BAD;
    428 
    429 #if defined(VMSWAP)
    430 			if (swap) {
    431 				if (pg->uobject != NULL) {
    432 					int oldslot __diagused;
    433 					oldslot = uao_set_swslot(pg->uobject,
    434 						pg->offset >> PAGE_SHIFT, slot);
    435 					KASSERT(oldslot == swslot + i);
    436 				} else {
    437 					KASSERT(pg->uanon->an_swslot ==
    438 						swslot + i);
    439 					pg->uanon->an_swslot = slot;
    440 				}
    441 			}
    442 #endif /* defined(VMSWAP) */
    443 		}
    444 
    445 		/*
    446 		 * if the page is PG_FAKE, this must have been a read to
    447 		 * initialize the page.  clear PG_FAKE and activate the page.
    448 		 */
    449 
    450 		if (pg->flags & PG_FAKE) {
    451 			KASSERT(!write);
    452 			pg->flags &= ~PG_FAKE;
    453 #if defined(READAHEAD_STATS)
    454 			pg->flags |= PG_READAHEAD;
    455 			uvm_ra_total.ev_count++;
    456 #endif /* defined(READAHEAD_STATS) */
    457 			KASSERT(uvm_pagegetdirty(pg) == UVM_PAGE_STATUS_CLEAN);
    458 			uvm_pagelock(pg);
    459 			uvm_pageenqueue(pg);
    460 			uvm_pageunlock(pg);
    461 		}
    462 
    463 		/*
    464 		 * do accounting for pagedaemon i/o and arrange to free
    465 		 * the pages instead of just unbusying them.
    466 		 */
    467 
    468 		if (pg->flags & PG_PAGEOUT) {
    469 			pg->flags &= ~PG_PAGEOUT;
    470 			pageout_done++;
    471 			atomic_inc_uint(&uvmexp.pdfreed);
    472 			pg->flags |= PG_RELEASED;
    473 		}
    474 
    475 #if defined(VMSWAP)
    476 		/*
    477 		 * for swap pages, unlock everything for this page now.
    478 		 */
    479 
    480 		if (swap) {
    481 			if (pg->uobject == NULL && anon_disposed) {
    482 				uvm_anon_release(pg->uanon);
    483 			} else {
    484 				uvm_page_unbusy(&pg, 1);
    485 				rw_exit(slock);
    486 			}
    487 		}
    488 #endif /* defined(VMSWAP) */
    489 	}
    490 	if (pageout_done != 0) {
    491 		uvm_pageout_done(pageout_done);
    492 	}
    493 	if (!swap) {
    494 		uvm_page_unbusy(pgs, npages);
    495 		rw_exit(slock);
    496 	} else {
    497 #if defined(VMSWAP)
    498 		KASSERT(write);
    499 
    500 		/* these pages are now only in swap. */
    501 		if (error != ENOMEM) {
    502 			atomic_add_int(&uvmexp.swpgonly, npages);
    503 		}
    504 		if (error) {
    505 			if (error != ENOMEM)
    506 				uvm_swap_markbad(swslot, npages);
    507 			else
    508 				uvm_swap_free(swslot, npages);
    509 		}
    510 		atomic_dec_uint(&uvmexp.pdpending);
    511 #endif /* defined(VMSWAP) */
    512 	}
    513 }
    514 
    515 /*
    516  * uvm_aio_aiodone: do iodone processing for async i/os.
    517  * this should be called in thread context, not interrupt context.
    518  */
    519 void
    520 uvm_aio_aiodone(struct buf *bp)
    521 {
    522 	const int npages = bp->b_bufsize >> PAGE_SHIFT;
    523 	struct vm_page *pgs[howmany(MAXPHYS, MIN_PAGE_SIZE)];
    524 	int i, error;
    525 	bool write;
    526 	UVMHIST_FUNC("uvm_aio_aiodone"); UVMHIST_CALLED(ubchist);
    527 	UVMHIST_LOG(ubchist, "bp %#jx", (uintptr_t)bp, 0,0,0);
    528 
    529 	KASSERT(bp->b_bufsize <= MAXPHYS);
    530 	KASSERT(npages <= __arraycount(pgs));
    531 
    532 	error = bp->b_error;
    533 	write = (bp->b_flags & B_READ) == 0;
    534 
    535 	for (i = 0; i < npages; i++) {
    536 		pgs[i] = uvm_pageratop((vaddr_t)bp->b_data + (i << PAGE_SHIFT));
    537 		UVMHIST_LOG(ubchist, "pgs[%jd] = %#jx", i,
    538 		    (uintptr_t)pgs[i], 0, 0);
    539 	}
    540 	uvm_pagermapout((vaddr_t)bp->b_data, npages);
    541 
    542 	uvm_aio_aiodone_pages(pgs, npages, write, error);
    543 
    544 	if (write && (bp->b_cflags & BC_AGE) != 0) {
    545 		mutex_enter(bp->b_objlock);
    546 		vwakeup(bp);
    547 		mutex_exit(bp->b_objlock);
    548 	}
    549 	putiobuf(bp);
    550 }
    551 
    552 /*
    553  * uvm_pageratop: convert KVAs in the pager map back to their page
    554  * structures.
    555  */
    556 
    557 struct vm_page *
    558 uvm_pageratop(vaddr_t kva)
    559 {
    560 	struct vm_page *pg;
    561 	paddr_t pa;
    562 	bool rv __diagused;
    563 
    564 	rv = pmap_extract(pmap_kernel(), kva, &pa);
    565 	KASSERT(rv);
    566 	pg = PHYS_TO_VM_PAGE(pa);
    567 	KASSERT(pg != NULL);
    568 	return (pg);
    569 }
    570