Home | History | Annotate | Line # | Download | only in uvm
uvm_vnode.c revision 1.97.2.5
      1 /*	$NetBSD: uvm_vnode.c,v 1.97.2.5 2012/02/17 08:18:57 yamt Exp $	*/
      2 
      3 /*
      4  * Copyright (c) 1997 Charles D. Cranor and Washington University.
      5  * Copyright (c) 1991, 1993
      6  *      The Regents of the University of California.
      7  * Copyright (c) 1990 University of Utah.
      8  *
      9  * All rights reserved.
     10  *
     11  * This code is derived from software contributed to Berkeley by
     12  * the Systems Programming Group of the University of Utah Computer
     13  * Science Department.
     14  *
     15  * Redistribution and use in source and binary forms, with or without
     16  * modification, are permitted provided that the following conditions
     17  * are met:
     18  * 1. Redistributions of source code must retain the above copyright
     19  *    notice, this list of conditions and the following disclaimer.
     20  * 2. Redistributions in binary form must reproduce the above copyright
     21  *    notice, this list of conditions and the following disclaimer in the
     22  *    documentation and/or other materials provided with the distribution.
     23  * 3. Neither the name of the University nor the names of its contributors
     24  *    may be used to endorse or promote products derived from this software
     25  *    without specific prior written permission.
     26  *
     27  * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND
     28  * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
     29  * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
     30  * ARE DISCLAIMED.  IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE
     31  * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
     32  * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
     33  * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
     34  * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
     35  * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
     36  * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
     37  * SUCH DAMAGE.
     38  *
     39  *      @(#)vnode_pager.c       8.8 (Berkeley) 2/13/94
     40  * from: Id: uvm_vnode.c,v 1.1.2.26 1998/02/02 20:38:07 chuck Exp
     41  */
     42 
     43 /*
     44  * uvm_vnode.c: the vnode pager.
     45  */
     46 
     47 #include <sys/cdefs.h>
     48 __KERNEL_RCSID(0, "$NetBSD: uvm_vnode.c,v 1.97.2.5 2012/02/17 08:18:57 yamt Exp $");
     49 
     50 #include "opt_uvmhist.h"
     51 
     52 #include <sys/param.h>
     53 #include <sys/systm.h>
     54 #include <sys/kernel.h>
     55 #include <sys/vnode.h>
     56 #include <sys/disklabel.h>
     57 #include <sys/ioctl.h>
     58 #include <sys/fcntl.h>
     59 #include <sys/conf.h>
     60 #include <sys/pool.h>
     61 #include <sys/mount.h>
     62 
     63 #include <miscfs/specfs/specdev.h>
     64 
     65 #include <uvm/uvm.h>
     66 #include <uvm/uvm_readahead.h>
     67 #include <uvm/uvm_page_array.h>
     68 
     69 /*
     70  * functions
     71  */
     72 
     73 static void	uvn_detach(struct uvm_object *);
     74 static int	uvn_get(struct uvm_object *, voff_t, struct vm_page **, int *,
     75 			int, vm_prot_t, int, int);
     76 static int	uvn_put(struct uvm_object *, voff_t, voff_t, int);
     77 static void	uvn_reference(struct uvm_object *);
     78 
     79 static int	uvn_findpage(struct uvm_object *, voff_t, struct vm_page **,
     80 			     unsigned int, struct uvm_page_array *a,
     81 			     unsigned int);
     82 
     83 /*
     84  * master pager structure
     85  */
     86 
     87 const struct uvm_pagerops uvm_vnodeops = {
     88 	.pgo_reference = uvn_reference,
     89 	.pgo_detach = uvn_detach,
     90 	.pgo_get = uvn_get,
     91 	.pgo_put = uvn_put,
     92 };
     93 
     94 /*
     95  * the ops!
     96  */
     97 
     98 /*
     99  * uvn_reference
    100  *
    101  * duplicate a reference to a VM object.  Note that the reference
    102  * count must already be at least one (the passed in reference) so
    103  * there is no chance of the uvn being killed or locked out here.
    104  *
    105  * => caller must call with object unlocked.
    106  * => caller must be using the same accessprot as was used at attach time
    107  */
    108 
    109 static void
    110 uvn_reference(struct uvm_object *uobj)
    111 {
    112 	vref((struct vnode *)uobj);
    113 }
    114 
    115 
    116 /*
    117  * uvn_detach
    118  *
    119  * remove a reference to a VM object.
    120  *
    121  * => caller must call with object unlocked and map locked.
    122  */
    123 
    124 static void
    125 uvn_detach(struct uvm_object *uobj)
    126 {
    127 	vrele((struct vnode *)uobj);
    128 }
    129 
    130 /*
    131  * uvn_put: flush page data to backing store.
    132  *
    133  * => object must be locked on entry!   VOP_PUTPAGES must unlock it.
    134  * => flags: PGO_SYNCIO -- use sync. I/O
    135  */
    136 
    137 static int
    138 uvn_put(struct uvm_object *uobj, voff_t offlo, voff_t offhi, int flags)
    139 {
    140 	struct vnode *vp = (struct vnode *)uobj;
    141 	int error;
    142 
    143 	KASSERT(mutex_owned(vp->v_interlock));
    144 	error = VOP_PUTPAGES(vp, offlo, offhi, flags);
    145 
    146 	return error;
    147 }
    148 
    149 
    150 /*
    151  * uvn_get: get pages (synchronously) from backing store
    152  *
    153  * => prefer map unlocked (not required)
    154  * => object must be locked!  we will _unlock_ it before starting any I/O.
    155  * => flags: PGO_ALLPAGES: get all of the pages
    156  *           PGO_LOCKED: fault data structures are locked
    157  * => NOTE: offset is the offset of pps[0], _NOT_ pps[centeridx]
    158  * => NOTE: caller must check for released pages!!
    159  */
    160 
    161 static int
    162 uvn_get(struct uvm_object *uobj, voff_t offset,
    163     struct vm_page **pps /* IN/OUT */,
    164     int *npagesp /* IN (OUT if PGO_LOCKED)*/,
    165     int centeridx, vm_prot_t access_type, int advice, int flags)
    166 {
    167 	struct vnode *vp = (struct vnode *)uobj;
    168 	int error;
    169 
    170 	UVMHIST_FUNC("uvn_get"); UVMHIST_CALLED(ubchist);
    171 
    172 	UVMHIST_LOG(ubchist, "vp %p off 0x%x", vp, (int)offset, 0,0);
    173 
    174 	if ((access_type & VM_PROT_WRITE) == 0 && (flags & PGO_LOCKED) == 0) {
    175 		vn_ra_allocctx(vp);
    176 		uvm_ra_request(vp->v_ractx, advice, uobj, offset,
    177 		    *npagesp << PAGE_SHIFT);
    178 	}
    179 
    180 	error = VOP_GETPAGES(vp, offset, pps, npagesp, centeridx,
    181 			     access_type, advice, flags);
    182 
    183 	KASSERT(((flags & PGO_LOCKED) != 0 && mutex_owned(vp->v_interlock)) ||
    184 	    (flags & PGO_LOCKED) == 0);
    185 	return error;
    186 }
    187 
    188 
    189 /*
    190  * uvn_findpages:
    191  * return the page for the uobj and offset requested, allocating if needed.
    192  * => uobj must be locked.
    193  * => returned pages will be BUSY.
    194  */
    195 
    196 int
    197 uvn_findpages(struct uvm_object *uobj, voff_t offset, unsigned int *npagesp,
    198     struct vm_page **pgs, struct uvm_page_array *a, unsigned int flags)
    199 {
    200 	unsigned int count, found, npages;
    201 	int i, rv;
    202 	struct uvm_page_array a_store;
    203 
    204 	if (a == NULL) {
    205 		a = &a_store;
    206 		uvm_page_array_init(a);
    207 	}
    208 	count = found = 0;
    209 	npages = *npagesp;
    210 	if (flags & UFP_BACKWARD) {
    211 		for (i = npages - 1; i >= 0; i--, offset -= PAGE_SIZE) {
    212 			rv = uvn_findpage(uobj, offset, &pgs[i], flags, a,
    213 			    i + 1);
    214 			if (rv == 0) {
    215 				if (flags & UFP_DIRTYONLY)
    216 					break;
    217 			} else
    218 				found++;
    219 			count++;
    220 		}
    221 	} else {
    222 		for (i = 0; i < npages; i++, offset += PAGE_SIZE) {
    223 			rv = uvn_findpage(uobj, offset, &pgs[i], flags, a,
    224 			    npages - i);
    225 			if (rv == 0) {
    226 				if (flags & UFP_DIRTYONLY)
    227 					break;
    228 			} else
    229 				found++;
    230 			count++;
    231 		}
    232 	}
    233 	if (a == &a_store) {
    234 		uvm_page_array_fini(a);
    235 	}
    236 	*npagesp = count;
    237 	return (found);
    238 }
    239 
    240 static int
    241 uvn_findpage(struct uvm_object *uobj, voff_t offset, struct vm_page **pgp,
    242     unsigned int flags, struct uvm_page_array *a, unsigned int nleft)
    243 {
    244 	struct vm_page *pg;
    245 	const unsigned int fillflags =
    246 	    ((flags & UFP_BACKWARD) ? UVM_PAGE_ARRAY_FILL_BACKWARD : 0) |
    247 	    ((flags & UFP_DIRTYONLY) ?
    248 	    (UVM_PAGE_ARRAY_FILL_DIRTYONLY|UVM_PAGE_ARRAY_FILL_DENSE) : 0);
    249 	UVMHIST_FUNC("uvn_findpage"); UVMHIST_CALLED(ubchist);
    250 	UVMHIST_LOG(ubchist, "vp %p off 0x%lx", uobj, offset,0,0);
    251 
    252 	KASSERT(mutex_owned(uobj->vmobjlock));
    253 
    254 	if (*pgp != NULL) {
    255 		UVMHIST_LOG(ubchist, "dontcare", 0,0,0,0);
    256 		goto skip_offset;
    257 	}
    258 	for (;;) {
    259 		/*
    260 		 * look for an existing page.
    261 		 *
    262 		 * XXX fragile API
    263 		 * note that the array can be the one supplied by the caller of
    264 		 * uvm_findpages.  in that case, fillflags used by the caller
    265 		 * might not match strictly with ours.
    266 		 * in particular, the caller might have filled the array
    267 		 * without DIRTYONLY or DENSE but passed us UFP_DIRTYONLY.
    268 		 */
    269 		pg = uvm_page_array_fill_and_peek(a, uobj, offset, nleft,
    270 		    fillflags);
    271 		if (pg != NULL && pg->offset != offset) {
    272 			KASSERT(
    273 			    ((fillflags & UVM_PAGE_ARRAY_FILL_BACKWARD) != 0)
    274 			    == (pg->offset < offset));
    275 			pg = NULL;
    276 		}
    277 
    278 		/* nope?  allocate one now */
    279 		if (pg == NULL) {
    280 			if (flags & UFP_NOALLOC) {
    281 				UVMHIST_LOG(ubchist, "noalloc", 0,0,0,0);
    282 				return 0;
    283 			}
    284 			pg = uvm_pagealloc(uobj, offset, NULL,
    285 			    UVM_FLAG_COLORMATCH);
    286 			if (pg == NULL) {
    287 				if (flags & UFP_NOWAIT) {
    288 					UVMHIST_LOG(ubchist, "nowait",0,0,0,0);
    289 					return 0;
    290 				}
    291 				mutex_exit(uobj->vmobjlock);
    292 				uvm_wait("uvn_fp1");
    293 				uvm_page_array_clear(a);
    294 				mutex_enter(uobj->vmobjlock);
    295 				continue;
    296 			}
    297 			UVMHIST_LOG(ubchist, "alloced %p (color %u)", pg,
    298 			    VM_PGCOLOR_BUCKET(pg), 0,0);
    299 			break;
    300 		} else if (flags & UFP_NOCACHE) {
    301 			UVMHIST_LOG(ubchist, "nocache",0,0,0,0);
    302 			goto skip;
    303 		}
    304 
    305 		/* page is there, see if we need to wait on it */
    306 		if ((pg->flags & PG_BUSY) != 0) {
    307 			if (flags & UFP_NOWAIT) {
    308 				UVMHIST_LOG(ubchist, "nowait",0,0,0,0);
    309 				goto skip;
    310 			}
    311 			pg->flags |= PG_WANTED;
    312 			UVMHIST_LOG(ubchist, "wait %p (color %u)", pg,
    313 			    VM_PGCOLOR_BUCKET(pg), 0,0);
    314 			UVM_UNLOCK_AND_WAIT(pg, uobj->vmobjlock, 0,
    315 					    "uvn_fp2", 0);
    316 			uvm_page_array_clear(a);
    317 			mutex_enter(uobj->vmobjlock);
    318 			continue;
    319 		}
    320 
    321 		/* skip PG_RDONLY pages if requested */
    322 		if ((flags & UFP_NORDONLY) != 0 &&
    323 		    (pg->flags & PG_RDONLY) != 0) {
    324 			UVMHIST_LOG(ubchist, "nordonly",0,0,0,0);
    325 			goto skip;
    326 		}
    327 
    328 		/*
    329 		 * check for PG_PAGER1 requests
    330 		 */
    331 		if ((flags & UFP_NOPAGER1) != 0 &&
    332 		    (pg->flags & PG_PAGER1) != 0) {
    333 			UVMHIST_LOG(ubchist, "nopager1",0,0,0,0);
    334 			goto skip;
    335 		}
    336 		if ((flags & UFP_ONLYPAGER1) != 0 &&
    337 		    (pg->flags & PG_PAGER1) == 0) {
    338 			UVMHIST_LOG(ubchist, "onlypager1",0,0,0,0);
    339 			goto skip;
    340 		}
    341 
    342 		/* stop on clean pages if requested */
    343 		if (flags & UFP_DIRTYONLY) {
    344 			const bool dirty = uvm_pagecheckdirty(pg, false);
    345 
    346 			if (!dirty) {
    347 				UVMHIST_LOG(ubchist, "dirtonly", 0,0,0,0);
    348 				return 0;
    349 			}
    350 		}
    351 
    352 		/* mark the page BUSY and we're done. */
    353 		pg->flags |= PG_BUSY;
    354 		UVM_PAGE_OWN(pg, "uvn_findpage");
    355 		UVMHIST_LOG(ubchist, "found %p (color %u)",
    356 		    pg, VM_PGCOLOR_BUCKET(pg), 0,0);
    357 		uvm_page_array_advance(a);
    358 		break;
    359 	}
    360 	*pgp = pg;
    361 	return 1;
    362 
    363 skip_offset:
    364 	/*
    365 	 * skip this offset
    366 	 */
    367 	pg = uvm_page_array_peek(a);
    368 	if (pg != NULL) {
    369 		if (pg->offset == offset) {
    370 			uvm_page_array_advance(a);
    371 		} else {
    372 			KASSERT((fillflags & UVM_PAGE_ARRAY_FILL_DENSE) == 0);
    373 		}
    374 	}
    375 	return 0;
    376 
    377 skip:
    378 	/*
    379 	 * skip this page
    380 	 */
    381 	KASSERT(pg != NULL);
    382 	uvm_page_array_advance(a);
    383 	return 0;
    384 }
    385 
    386 /*
    387  * uvm_vnp_setsize: grow or shrink a vnode uobj
    388  *
    389  * grow   => just update size value
    390  * shrink => toss un-needed pages
    391  *
    392  * => we assume that the caller has a reference of some sort to the
    393  *	vnode in question so that it will not be yanked out from under
    394  *	us.
    395  */
    396 
    397 void
    398 uvm_vnp_setsize(struct vnode *vp, voff_t newsize)
    399 {
    400 	struct uvm_object *uobj = &vp->v_uobj;
    401 	voff_t pgend = round_page(newsize);
    402 	voff_t oldsize;
    403 	UVMHIST_FUNC("uvm_vnp_setsize"); UVMHIST_CALLED(ubchist);
    404 
    405 	mutex_enter(uobj->vmobjlock);
    406 	UVMHIST_LOG(ubchist, "vp %p old 0x%x new 0x%x",
    407 	    vp, vp->v_size, newsize, 0);
    408 
    409 	/*
    410 	 * now check if the size has changed: if we shrink we had better
    411 	 * toss some pages...
    412 	 */
    413 
    414 	KASSERT(newsize != VSIZENOTSET);
    415 	KASSERT(vp->v_size <= vp->v_writesize);
    416 	KASSERT(vp->v_size == vp->v_writesize ||
    417 	    newsize == vp->v_writesize || newsize <= vp->v_size);
    418 
    419 	oldsize = vp->v_writesize;
    420 	KASSERT(oldsize != VSIZENOTSET || pgend > oldsize);
    421 
    422 	if (oldsize > pgend) {
    423 		(void) uvn_put(uobj, pgend, 0, PGO_FREE | PGO_SYNCIO);
    424 		mutex_enter(uobj->vmobjlock);
    425 	}
    426 	vp->v_size = vp->v_writesize = newsize;
    427 	mutex_exit(uobj->vmobjlock);
    428 }
    429 
    430 void
    431 uvm_vnp_setwritesize(struct vnode *vp, voff_t newsize)
    432 {
    433 
    434 	mutex_enter(vp->v_interlock);
    435 	KASSERT(newsize != VSIZENOTSET);
    436 	KASSERT(vp->v_size != VSIZENOTSET);
    437 	KASSERT(vp->v_writesize != VSIZENOTSET);
    438 	KASSERT(vp->v_size <= vp->v_writesize);
    439 	KASSERT(vp->v_size <= newsize);
    440 	vp->v_writesize = newsize;
    441 	mutex_exit(vp->v_interlock);
    442 }
    443 
    444 bool
    445 uvn_text_p(struct uvm_object *uobj)
    446 {
    447 	struct vnode *vp = (struct vnode *)uobj;
    448 
    449 	return (vp->v_iflag & VI_EXECMAP) != 0;
    450 }
    451 
    452 bool
    453 uvn_clean_p(struct uvm_object *uobj)
    454 {
    455 	struct vnode *vp = (struct vnode *)uobj;
    456 
    457 	return (vp->v_iflag & VI_ONWORKLST) == 0;
    458 }
    459 
    460 bool
    461 uvn_needs_writefault_p(struct uvm_object *uobj)
    462 {
    463 	struct vnode *vp = (struct vnode *)uobj;
    464 
    465 	return uvn_clean_p(uobj) ||
    466 	    (vp->v_iflag & (VI_WRMAP|VI_WRMAPDIRTY)) == VI_WRMAP;
    467 }
    468