Home | History | Annotate | Line # | Download | only in uvm
uvm_bio.c revision 1.11
      1 /*	$NetBSD: uvm_bio.c,v 1.11 2001/03/19 00:29:04 chs Exp $	*/
      2 
      3 /*
      4  * Copyright (c) 1998 Chuck Silvers.
      5  * All rights reserved.
      6  *
      7  * Redistribution and use in source and binary forms, with or without
      8  * modification, are permitted provided that the following conditions
      9  * are met:
     10  * 1. Redistributions of source code must retain the above copyright
     11  *    notice, this list of conditions and the following disclaimer.
     12  * 2. Redistributions in binary form must reproduce the above copyright
     13  *    notice, this list of conditions and the following disclaimer in the
     14  *    documentation and/or other materials provided with the distribution.
     15  * 3. The name of the author may not be used to endorse or promote products
     16  *    derived from this software without specific prior written permission.
     17  *
     18  * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
     19  * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
     20  * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
     21  * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
     22  * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING,
     23  * BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
     24  * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED
     25  * AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
     26  * OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
     27  * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
     28  * SUCH DAMAGE.
     29  *
     30  */
     31 
     32 #include "opt_uvmhist.h"
     33 
     34 /*
     35  * uvm_bio.c: buffered i/o vnode mapping cache
     36  */
     37 
     38 
     39 #include <sys/param.h>
     40 #include <sys/systm.h>
     41 #include <sys/malloc.h>
     42 #include <sys/kernel.h>
     43 #include <sys/vnode.h>
     44 
     45 #include <uvm/uvm.h>
     46 #include <uvm/uvm_page.h>
     47 
     48 /*
     49  * global data structures
     50  */
     51 
     52 /*
     53  * local functions
     54  */
     55 
     56 static int	ubc_fault __P((struct uvm_faultinfo *, vaddr_t,
     57 			       vm_page_t *, int, int, vm_fault_t, vm_prot_t,
     58 			       int));
     59 static struct ubc_map *ubc_find_mapping __P((struct uvm_object *, voff_t));
     60 
     61 /*
     62  * local data structues
     63  */
     64 
     65 #define UBC_HASH(uobj, offset) (((((u_long)(uobj)) >> 8) + \
     66 				 (((u_long)(offset)) >> PAGE_SHIFT)) & \
     67 				ubc_object.hashmask)
     68 
     69 #define UBC_QUEUE(offset) (&ubc_object.inactive[((offset) >> ubc_winshift) & \
     70 					       (UBC_NQUEUES - 1)])
     71 
     72 struct ubc_map
     73 {
     74 	struct uvm_object *	uobj;		/* mapped object */
     75 	voff_t			offset;		/* offset into uobj */
     76 	int			refcount;	/* refcount on mapping */
     77 	voff_t			writeoff;	/* overwrite offset */
     78 	vsize_t			writelen;	/* overwrite len */
     79 
     80 	LIST_ENTRY(ubc_map)	hash;		/* hash table */
     81 	TAILQ_ENTRY(ubc_map)	inactive;	/* inactive queue */
     82 };
     83 
     84 static struct ubc_object
     85 {
     86 	struct uvm_object uobj;		/* glue for uvm_map() */
     87 	char *kva;			/* where ubc_object is mapped */
     88 	struct ubc_map *umap;		/* array of ubc_map's */
     89 
     90 	LIST_HEAD(, ubc_map) *hash;	/* hashtable for cached ubc_map's */
     91 	u_long hashmask;		/* mask for hashtable */
     92 
     93 	TAILQ_HEAD(ubc_inactive_head, ubc_map) *inactive;
     94 					/* inactive queues for ubc_map's */
     95 
     96 } ubc_object;
     97 
     98 struct uvm_pagerops ubc_pager =
     99 {
    100 	NULL,		/* init */
    101 	NULL,		/* reference */
    102 	NULL,		/* detach */
    103 	ubc_fault,	/* fault */
    104 	/* ... rest are NULL */
    105 };
    106 
    107 int ubc_nwins = UBC_NWINS;
    108 int ubc_winshift = UBC_WINSHIFT;
    109 int ubc_winsize;
    110 #ifdef PMAP_PREFER
    111 int ubc_nqueues;
    112 boolean_t ubc_release_unmap = FALSE;
    113 #define UBC_NQUEUES ubc_nqueues
    114 #define UBC_RELEASE_UNMAP ubc_release_unmap
    115 #else
    116 #define UBC_NQUEUES 1
    117 #define UBC_RELEASE_UNMAP FALSE
    118 #endif
    119 
    120 /*
    121  * ubc_init
    122  *
    123  * init pager private data structures.
    124  */
    125 
    126 void
    127 ubc_init(void)
    128 {
    129 	struct ubc_map *umap;
    130 	vaddr_t va;
    131 	int i;
    132 
    133 	/*
    134 	 * init ubc_object.
    135 	 * alloc and init ubc_map's.
    136 	 * init inactive queues.
    137 	 * alloc and init hashtable.
    138 	 * map in ubc_object.
    139 	 */
    140 
    141 	simple_lock_init(&ubc_object.uobj.vmobjlock);
    142 	ubc_object.uobj.pgops = &ubc_pager;
    143 	TAILQ_INIT(&ubc_object.uobj.memq);
    144 	ubc_object.uobj.uo_npages = 0;
    145 	ubc_object.uobj.uo_refs = UVM_OBJ_KERN;
    146 
    147 	ubc_object.umap = malloc(ubc_nwins * sizeof(struct ubc_map),
    148 				 M_TEMP, M_NOWAIT);
    149 	if (ubc_object.umap == NULL)
    150 		panic("ubc_init: failed to allocate ubc_map");
    151 	bzero(ubc_object.umap, ubc_nwins * sizeof(struct ubc_map));
    152 
    153 	va = (vaddr_t)1L;
    154 #ifdef PMAP_PREFER
    155 	PMAP_PREFER(0, &va);
    156 	ubc_nqueues = va >> ubc_winshift;
    157 	if (ubc_nqueues == 0) {
    158 		ubc_nqueues = 1;
    159 	}
    160 	if (ubc_nqueues != 1) {
    161 		ubc_release_unmap = TRUE;
    162 	}
    163 #endif
    164 	ubc_winsize = 1 << ubc_winshift;
    165 	ubc_object.inactive = malloc(UBC_NQUEUES *
    166 				     sizeof(struct ubc_inactive_head),
    167 				     M_TEMP, M_NOWAIT);
    168 	if (ubc_object.inactive == NULL)
    169 		panic("ubc_init: failed to allocate inactive queue heads");
    170 	for (i = 0; i < UBC_NQUEUES; i++) {
    171 		TAILQ_INIT(&ubc_object.inactive[i]);
    172 	}
    173 	for (i = 0; i < ubc_nwins; i++) {
    174 		umap = &ubc_object.umap[i];
    175 		TAILQ_INSERT_TAIL(&ubc_object.inactive[i & (UBC_NQUEUES - 1)],
    176 				  umap, inactive);
    177 	}
    178 
    179 	ubc_object.hash = hashinit(ubc_nwins, HASH_LIST, M_TEMP, M_NOWAIT,
    180 				   &ubc_object.hashmask);
    181 	for (i = 0; i <= ubc_object.hashmask; i++) {
    182 		LIST_INIT(&ubc_object.hash[i]);
    183 	}
    184 
    185 	if (uvm_map(kernel_map, (vaddr_t *)&ubc_object.kva,
    186 		    ubc_nwins << ubc_winshift, &ubc_object.uobj, 0, (vsize_t)va,
    187 		    UVM_MAPFLAG(UVM_PROT_ALL, UVM_PROT_ALL, UVM_INH_NONE,
    188 				UVM_ADV_RANDOM, UVM_FLAG_NOMERGE)) != 0) {
    189 		panic("ubc_init: failed to map ubc_object\n");
    190 	}
    191 	UVMHIST_INIT(ubchist, 300);
    192 }
    193 
    194 
    195 /*
    196  * ubc_fault: fault routine for ubc mapping
    197  */
    198 int
    199 ubc_fault(ufi, ign1, ign2, ign3, ign4, fault_type, access_type, flags)
    200 	struct uvm_faultinfo *ufi;
    201 	vaddr_t ign1;
    202 	vm_page_t *ign2;
    203 	int ign3, ign4;
    204 	vm_fault_t fault_type;
    205 	vm_prot_t access_type;
    206 	int flags;
    207 {
    208 	struct uvm_object *uobj;
    209 	struct vnode *vp;
    210 	struct ubc_map *umap;
    211 	vaddr_t va, eva, ubc_offset, slot_offset;
    212 	int i, error, rv, npages;
    213 	struct vm_page *pgs[(1 << ubc_winshift) >> PAGE_SHIFT], *pg;
    214 	UVMHIST_FUNC("ubc_fault");  UVMHIST_CALLED(ubchist);
    215 
    216 	/*
    217 	 * no need to try with PGO_LOCKED...
    218 	 * we don't need to have the map locked since we know that
    219 	 * no one will mess with it until our reference is released.
    220 	 */
    221 	if (flags & PGO_LOCKED) {
    222 #if 0
    223 		return EBUSY;
    224 #else
    225 		uvmfault_unlockall(ufi, NULL, &ubc_object.uobj, NULL);
    226 		flags &= ~PGO_LOCKED;
    227 #endif
    228 	}
    229 
    230 	va = ufi->orig_rvaddr;
    231 	ubc_offset = va - (vaddr_t)ubc_object.kva;
    232 
    233 	UVMHIST_LOG(ubchist, "va 0x%lx ubc_offset 0x%lx at %d",
    234 		    va, ubc_offset, access_type,0);
    235 
    236 	umap = &ubc_object.umap[ubc_offset >> ubc_winshift];
    237 	KASSERT(umap->refcount != 0);
    238 	slot_offset = trunc_page(ubc_offset & (ubc_winsize - 1));
    239 
    240 	/* no umap locking needed since we have a ref on the umap */
    241 	uobj = umap->uobj;
    242 	vp = (struct vnode *)uobj;
    243 	KASSERT(uobj != NULL);
    244 
    245 	npages = (ubc_winsize - slot_offset) >> PAGE_SHIFT;
    246 
    247 	/*
    248 	 * XXXUBC
    249 	 * if npages is more than 1 we have to be sure that
    250 	 * we set PGO_OVERWRITE correctly.
    251 	 */
    252 	if (access_type == VM_PROT_WRITE) {
    253 		npages = 1;
    254 	}
    255 
    256 again:
    257 	memset(pgs, 0, sizeof (pgs));
    258 	simple_lock(&uobj->vmobjlock);
    259 
    260 	UVMHIST_LOG(ubchist, "slot_offset 0x%x writeoff 0x%x writelen 0x%x "
    261 		    "u_size 0x%x", slot_offset, umap->writeoff, umap->writelen,
    262 		    vp->v_uvm.u_size);
    263 
    264 	if (access_type & VM_PROT_WRITE &&
    265 	    slot_offset >= umap->writeoff &&
    266 	    (slot_offset + PAGE_SIZE <= umap->writeoff + umap->writelen ||
    267 	     slot_offset + PAGE_SIZE >= vp->v_uvm.u_size - umap->offset)) {
    268 		UVMHIST_LOG(ubchist, "setting PGO_OVERWRITE", 0,0,0,0);
    269 		flags |= PGO_OVERWRITE;
    270 	}
    271 	else { UVMHIST_LOG(ubchist, "NOT setting PGO_OVERWRITE", 0,0,0,0); }
    272 	/* XXX be sure to zero any part of the page past EOF */
    273 
    274 	/*
    275 	 * XXX
    276 	 * ideally we'd like to pre-fault all of the pages we're overwriting.
    277 	 * so for PGO_OVERWRITE, we should call VOP_GETPAGES() with all of the
    278 	 * pages in [writeoff, writeoff+writesize] instead of just the one.
    279 	 */
    280 
    281 	UVMHIST_LOG(ubchist, "getpages vp %p offset 0x%x npages %d",
    282 		    uobj, umap->offset + slot_offset, npages, 0);
    283 
    284 	error = VOP_GETPAGES(vp, umap->offset + slot_offset, pgs, &npages, 0,
    285 	    access_type, 0, flags);
    286 	UVMHIST_LOG(ubchist, "getpages error %d npages %d", error, npages,0,0);
    287 
    288 	if (error == EAGAIN) {
    289 		tsleep(&lbolt, PVM, "ubc_fault", 0);
    290 		goto again;
    291 	}
    292 	if (error) {
    293 		return error;
    294 	}
    295 	if (npages == 0) {
    296 		return 0;
    297 	}
    298 
    299 	va = ufi->orig_rvaddr;
    300 	eva = ufi->orig_rvaddr + (npages << PAGE_SHIFT);
    301 
    302 	UVMHIST_LOG(ubchist, "va 0x%lx eva 0x%lx", va, eva, 0,0);
    303 	simple_lock(&uobj->vmobjlock);
    304 	for (i = 0; va < eva; i++, va += PAGE_SIZE) {
    305 		UVMHIST_LOG(ubchist, "pgs[%d] = %p", i, pgs[i],0,0);
    306 		pg = pgs[i];
    307 
    308 		if (pg == NULL || pg == PGO_DONTCARE) {
    309 			continue;
    310 		}
    311 		if (pg->flags & PG_WANTED) {
    312 			wakeup(pg);
    313 		}
    314 		KASSERT((pg->flags & PG_FAKE) == 0);
    315 		if (pg->flags & PG_RELEASED) {
    316 			rv = uobj->pgops->pgo_releasepg(pg, NULL);
    317 			KASSERT(rv);
    318 			continue;
    319 		}
    320 		KASSERT(access_type == VM_PROT_READ ||
    321 			(pg->flags & PG_RDONLY) == 0);
    322 
    323 		uvm_lock_pageq();
    324 		uvm_pageactivate(pg);
    325 		uvm_unlock_pageq();
    326 
    327 		pmap_enter(ufi->orig_map->pmap, va, VM_PAGE_TO_PHYS(pg),
    328 			   VM_PROT_READ | VM_PROT_WRITE, access_type);
    329 
    330 		pg->flags &= ~(PG_BUSY);
    331 		UVM_PAGE_OWN(pg, NULL);
    332 	}
    333 	simple_unlock(&uobj->vmobjlock);
    334 	return 0;
    335 }
    336 
    337 /*
    338  * local functions
    339  */
    340 
    341 struct ubc_map *
    342 ubc_find_mapping(uobj, offset)
    343 	struct uvm_object *uobj;
    344 	voff_t offset;
    345 {
    346 	struct ubc_map *umap;
    347 
    348 	LIST_FOREACH(umap, &ubc_object.hash[UBC_HASH(uobj, offset)], hash) {
    349 		if (umap->uobj == uobj && umap->offset == offset) {
    350 			return umap;
    351 		}
    352 	}
    353 	return NULL;
    354 }
    355 
    356 
    357 /*
    358  * ubc interface functions
    359  */
    360 
    361 /*
    362  * ubc_alloc:  allocate a buffer mapping
    363  */
    364 void *
    365 ubc_alloc(uobj, offset, lenp, flags)
    366 	struct uvm_object *uobj;
    367 	voff_t offset;
    368 	vsize_t *lenp;
    369 	int flags;
    370 {
    371 	int s;
    372 	vaddr_t slot_offset, va;
    373 	struct ubc_map *umap;
    374 	voff_t umap_offset;
    375 	UVMHIST_FUNC("ubc_alloc"); UVMHIST_CALLED(ubchist);
    376 
    377 	UVMHIST_LOG(ubchist, "uobj %p offset 0x%lx len 0x%lx filesize 0x%x",
    378 		    uobj, offset, *lenp, ((struct uvm_vnode *)uobj)->u_size);
    379 
    380 	umap_offset = (offset & ~((voff_t)ubc_winsize - 1));
    381 	slot_offset = (vaddr_t)(offset & ((voff_t)ubc_winsize - 1));
    382 	*lenp = min(*lenp, ubc_winsize - slot_offset);
    383 
    384 	/*
    385 	 * the vnode is always locked here, so we don't need to add a ref.
    386 	 */
    387 
    388 	s = splbio();
    389 
    390 again:
    391 	simple_lock(&ubc_object.uobj.vmobjlock);
    392 	umap = ubc_find_mapping(uobj, umap_offset);
    393 	if (umap == NULL) {
    394 		umap = TAILQ_FIRST(UBC_QUEUE(offset));
    395 		if (umap == NULL) {
    396 			simple_unlock(&ubc_object.uobj.vmobjlock);
    397 			tsleep(&lbolt, PVM, "ubc_alloc", 0);
    398 			goto again;
    399 		}
    400 
    401 		/*
    402 		 * remove from old hash (if any),
    403 		 * add to new hash.
    404 		 */
    405 
    406 		if (umap->uobj != NULL) {
    407 			LIST_REMOVE(umap, hash);
    408 		}
    409 
    410 		umap->uobj = uobj;
    411 		umap->offset = umap_offset;
    412 
    413 		LIST_INSERT_HEAD(&ubc_object.hash[UBC_HASH(uobj, umap_offset)],
    414 				 umap, hash);
    415 
    416 		va = (vaddr_t)(ubc_object.kva +
    417 			       ((umap - ubc_object.umap) << ubc_winshift));
    418 		pmap_remove(pmap_kernel(), va, va + ubc_winsize);
    419 	}
    420 
    421 	if (umap->refcount == 0) {
    422 		TAILQ_REMOVE(UBC_QUEUE(offset), umap, inactive);
    423 	}
    424 
    425 #ifdef DIAGNOSTIC
    426 	if ((flags & UBC_WRITE) &&
    427 	    (umap->writeoff || umap->writelen)) {
    428 		panic("ubc_fault: concurrent writes vp %p", uobj);
    429 	}
    430 #endif
    431 	if (flags & UBC_WRITE) {
    432 		umap->writeoff = slot_offset;
    433 		umap->writelen = *lenp;
    434 	}
    435 
    436 	umap->refcount++;
    437 	simple_unlock(&ubc_object.uobj.vmobjlock);
    438 	splx(s);
    439 	UVMHIST_LOG(ubchist, "umap %p refs %d va %p",
    440 		    umap, umap->refcount,
    441 		    ubc_object.kva + ((umap - ubc_object.umap) << ubc_winshift),
    442 		    0);
    443 
    444 	return ubc_object.kva +
    445 		((umap - ubc_object.umap) << ubc_winshift) + slot_offset;
    446 }
    447 
    448 
    449 void
    450 ubc_release(va, wlen)
    451 	void *va;
    452 	vsize_t wlen;
    453 {
    454 	struct ubc_map *umap;
    455 	struct uvm_object *uobj;
    456 	int s;
    457 	UVMHIST_FUNC("ubc_release"); UVMHIST_CALLED(ubchist);
    458 
    459 	UVMHIST_LOG(ubchist, "va %p", va,0,0,0);
    460 
    461 	s = splbio();
    462 	simple_lock(&ubc_object.uobj.vmobjlock);
    463 
    464 	umap = &ubc_object.umap[((char *)va - ubc_object.kva) >> ubc_winshift];
    465 	uobj = umap->uobj;
    466 	KASSERT(uobj != NULL);
    467 
    468 	umap->writeoff = 0;
    469 	umap->writelen = 0;
    470 	umap->refcount--;
    471 	if (umap->refcount == 0) {
    472 		if (UBC_RELEASE_UNMAP &&
    473 		    (((struct vnode *)uobj)->v_flag & VTEXT)) {
    474 			vaddr_t va;
    475 
    476 			/*
    477 			 * if this file is the executable image of
    478 			 * some process, that process will likely have
    479 			 * the file mapped at an alignment other than
    480 			 * what PMAP_PREFER() would like.  we'd like
    481 			 * to have process text be able to use the
    482 			 * cache even if someone is also reading the
    483 			 * file, so invalidate mappings of such files
    484 			 * as soon as possible.
    485 			 */
    486 
    487 			va = (vaddr_t)(ubc_object.kva +
    488 			    ((umap - ubc_object.umap) << ubc_winshift));
    489 			pmap_remove(pmap_kernel(), va, va + ubc_winsize);
    490 			LIST_REMOVE(umap, hash);
    491 			umap->uobj = NULL;
    492 			TAILQ_INSERT_HEAD(UBC_QUEUE(umap->offset), umap,
    493 			    inactive);
    494 		} else {
    495 			TAILQ_INSERT_TAIL(UBC_QUEUE(umap->offset), umap,
    496 			    inactive);
    497 		}
    498 	}
    499 	UVMHIST_LOG(ubchist, "umap %p refs %d", umap, umap->refcount,0,0);
    500 	simple_unlock(&ubc_object.uobj.vmobjlock);
    501 	splx(s);
    502 }
    503 
    504 
    505 /*
    506  * removing a range of mappings from the ubc mapping cache.
    507  */
    508 
    509 void
    510 ubc_flush(uobj, start, end)
    511 	struct uvm_object *uobj;
    512 	voff_t start, end;
    513 {
    514 	struct ubc_map *umap;
    515 	vaddr_t va;
    516 	int s;
    517 	UVMHIST_FUNC("ubc_flush");  UVMHIST_CALLED(ubchist);
    518 
    519 	UVMHIST_LOG(ubchist, "uobj %p start 0x%lx end 0x%lx",
    520 		    uobj, start, end,0);
    521 
    522 	s = splbio();
    523 	simple_lock(&ubc_object.uobj.vmobjlock);
    524 	for (umap = ubc_object.umap;
    525 	     umap < &ubc_object.umap[ubc_nwins];
    526 	     umap++) {
    527 
    528 		if (umap->uobj != uobj ||
    529 		    umap->offset < start ||
    530 		    (umap->offset >= end && end != 0) ||
    531 		    umap->refcount > 0) {
    532 			continue;
    533 		}
    534 
    535 		/*
    536 		 * remove from hash,
    537 		 * move to head of inactive queue.
    538 		 */
    539 
    540 		va = (vaddr_t)(ubc_object.kva +
    541 			       ((umap - ubc_object.umap) << ubc_winshift));
    542 		pmap_remove(pmap_kernel(), va, va + ubc_winsize);
    543 
    544 		LIST_REMOVE(umap, hash);
    545 		umap->uobj = NULL;
    546 		TAILQ_REMOVE(UBC_QUEUE(umap->offset), umap, inactive);
    547 		TAILQ_INSERT_HEAD(UBC_QUEUE(umap->offset), umap, inactive);
    548 	}
    549 	simple_unlock(&ubc_object.uobj.vmobjlock);
    550 	splx(s);
    551 }
    552