Home | History | Annotate | Line # | Download | only in uvm
uvm_vnode.c revision 1.93.6.1
      1 /*	$NetBSD: uvm_vnode.c,v 1.93.6.1 2011/06/06 09:10:24 jruoho Exp $	*/
      2 
      3 /*
      4  * Copyright (c) 1997 Charles D. Cranor and Washington University.
      5  * Copyright (c) 1991, 1993
      6  *      The Regents of the University of California.
      7  * Copyright (c) 1990 University of Utah.
      8  *
      9  * All rights reserved.
     10  *
     11  * This code is derived from software contributed to Berkeley by
     12  * the Systems Programming Group of the University of Utah Computer
     13  * Science Department.
     14  *
     15  * Redistribution and use in source and binary forms, with or without
     16  * modification, are permitted provided that the following conditions
     17  * are met:
     18  * 1. Redistributions of source code must retain the above copyright
     19  *    notice, this list of conditions and the following disclaimer.
     20  * 2. Redistributions in binary form must reproduce the above copyright
     21  *    notice, this list of conditions and the following disclaimer in the
     22  *    documentation and/or other materials provided with the distribution.
     23  * 3. Neither the name of the University nor the names of its contributors
     24  *    may be used to endorse or promote products derived from this software
     25  *    without specific prior written permission.
     26  *
     27  * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND
     28  * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
     29  * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
     30  * ARE DISCLAIMED.  IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE
     31  * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
     32  * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
     33  * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
     34  * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
     35  * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
     36  * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
     37  * SUCH DAMAGE.
     38  *
     39  *      @(#)vnode_pager.c       8.8 (Berkeley) 2/13/94
     40  * from: Id: uvm_vnode.c,v 1.1.2.26 1998/02/02 20:38:07 chuck Exp
     41  */
     42 
     43 /*
     44  * uvm_vnode.c: the vnode pager.
     45  */
     46 
     47 #include <sys/cdefs.h>
     48 __KERNEL_RCSID(0, "$NetBSD: uvm_vnode.c,v 1.93.6.1 2011/06/06 09:10:24 jruoho Exp $");
     49 
     50 #include "opt_uvmhist.h"
     51 
     52 #include <sys/param.h>
     53 #include <sys/systm.h>
     54 #include <sys/kernel.h>
     55 #include <sys/vnode.h>
     56 #include <sys/disklabel.h>
     57 #include <sys/ioctl.h>
     58 #include <sys/fcntl.h>
     59 #include <sys/conf.h>
     60 #include <sys/pool.h>
     61 #include <sys/mount.h>
     62 
     63 #include <miscfs/specfs/specdev.h>
     64 
     65 #include <uvm/uvm.h>
     66 #include <uvm/uvm_readahead.h>
     67 
     68 /*
     69  * functions
     70  */
     71 
     72 static void	uvn_detach(struct uvm_object *);
     73 static int	uvn_get(struct uvm_object *, voff_t, struct vm_page **, int *,
     74 			int, vm_prot_t, int, int);
     75 static int	uvn_put(struct uvm_object *, voff_t, voff_t, int);
     76 static void	uvn_reference(struct uvm_object *);
     77 
     78 static int	uvn_findpage(struct uvm_object *, voff_t, struct vm_page **,
     79 			     int);
     80 
     81 /*
     82  * master pager structure
     83  */
     84 
     85 const struct uvm_pagerops uvm_vnodeops = {
     86 	.pgo_reference = uvn_reference,
     87 	.pgo_detach = uvn_detach,
     88 	.pgo_get = uvn_get,
     89 	.pgo_put = uvn_put,
     90 };
     91 
     92 /*
     93  * the ops!
     94  */
     95 
     96 /*
     97  * uvn_reference
     98  *
     99  * duplicate a reference to a VM object.  Note that the reference
    100  * count must already be at least one (the passed in reference) so
    101  * there is no chance of the uvn being killed or locked out here.
    102  *
    103  * => caller must call with object unlocked.
    104  * => caller must be using the same accessprot as was used at attach time
    105  */
    106 
    107 static void
    108 uvn_reference(struct uvm_object *uobj)
    109 {
    110 	vref((struct vnode *)uobj);
    111 }
    112 
    113 
    114 /*
    115  * uvn_detach
    116  *
    117  * remove a reference to a VM object.
    118  *
    119  * => caller must call with object unlocked and map locked.
    120  */
    121 
    122 static void
    123 uvn_detach(struct uvm_object *uobj)
    124 {
    125 	vrele((struct vnode *)uobj);
    126 }
    127 
    128 /*
    129  * uvn_put: flush page data to backing store.
    130  *
    131  * => object must be locked on entry!   VOP_PUTPAGES must unlock it.
    132  * => flags: PGO_SYNCIO -- use sync. I/O
    133  * => note: caller must set PG_CLEAN and pmap_clear_modify (if needed)
    134  */
    135 
    136 static int
    137 uvn_put(struct uvm_object *uobj, voff_t offlo, voff_t offhi, int flags)
    138 {
    139 	struct vnode *vp = (struct vnode *)uobj;
    140 	int error;
    141 
    142 	KASSERT(mutex_owned(&vp->v_interlock));
    143 	error = VOP_PUTPAGES(vp, offlo, offhi, flags);
    144 
    145 	return error;
    146 }
    147 
    148 
    149 /*
    150  * uvn_get: get pages (synchronously) from backing store
    151  *
    152  * => prefer map unlocked (not required)
    153  * => object must be locked!  we will _unlock_ it before starting any I/O.
    154  * => flags: PGO_ALLPAGES: get all of the pages
    155  *           PGO_LOCKED: fault data structures are locked
    156  * => NOTE: offset is the offset of pps[0], _NOT_ pps[centeridx]
    157  * => NOTE: caller must check for released pages!!
    158  */
    159 
    160 static int
    161 uvn_get(struct uvm_object *uobj, voff_t offset,
    162     struct vm_page **pps /* IN/OUT */,
    163     int *npagesp /* IN (OUT if PGO_LOCKED)*/,
    164     int centeridx, vm_prot_t access_type, int advice, int flags)
    165 {
    166 	struct vnode *vp = (struct vnode *)uobj;
    167 	int error;
    168 
    169 	UVMHIST_FUNC("uvn_get"); UVMHIST_CALLED(ubchist);
    170 
    171 	UVMHIST_LOG(ubchist, "vp %p off 0x%x", vp, (int)offset, 0,0);
    172 
    173 	if ((access_type & VM_PROT_WRITE) == 0 && (flags & PGO_LOCKED) == 0) {
    174 		vn_ra_allocctx(vp);
    175 		uvm_ra_request(vp->v_ractx, advice, uobj, offset,
    176 		    *npagesp << PAGE_SHIFT);
    177 	}
    178 
    179 	error = VOP_GETPAGES(vp, offset, pps, npagesp, centeridx,
    180 			     access_type, advice, flags);
    181 
    182 	KASSERT(((flags & PGO_LOCKED) != 0 && mutex_owned(&vp->v_interlock)) ||
    183 	    (flags & PGO_LOCKED) == 0);
    184 	return error;
    185 }
    186 
    187 
    188 /*
    189  * uvn_findpages:
    190  * return the page for the uobj and offset requested, allocating if needed.
    191  * => uobj must be locked.
    192  * => returned pages will be BUSY.
    193  */
    194 
    195 int
    196 uvn_findpages(struct uvm_object *uobj, voff_t offset, int *npagesp,
    197     struct vm_page **pgs, int flags)
    198 {
    199 	int i, count, found, npages, rv;
    200 
    201 	count = found = 0;
    202 	npages = *npagesp;
    203 	if (flags & UFP_BACKWARD) {
    204 		for (i = npages - 1; i >= 0; i--, offset -= PAGE_SIZE) {
    205 			rv = uvn_findpage(uobj, offset, &pgs[i], flags);
    206 			if (rv == 0) {
    207 				if (flags & UFP_DIRTYONLY)
    208 					break;
    209 			} else
    210 				found++;
    211 			count++;
    212 		}
    213 	} else {
    214 		for (i = 0; i < npages; i++, offset += PAGE_SIZE) {
    215 			rv = uvn_findpage(uobj, offset, &pgs[i], flags);
    216 			if (rv == 0) {
    217 				if (flags & UFP_DIRTYONLY)
    218 					break;
    219 			} else
    220 				found++;
    221 			count++;
    222 		}
    223 	}
    224 	*npagesp = count;
    225 	return (found);
    226 }
    227 
    228 static int
    229 uvn_findpage(struct uvm_object *uobj, voff_t offset, struct vm_page **pgp,
    230     int flags)
    231 {
    232 	struct vm_page *pg;
    233 	bool dirty;
    234 	UVMHIST_FUNC("uvn_findpage"); UVMHIST_CALLED(ubchist);
    235 	UVMHIST_LOG(ubchist, "vp %p off 0x%lx", uobj, offset,0,0);
    236 
    237 	if (*pgp != NULL) {
    238 		UVMHIST_LOG(ubchist, "dontcare", 0,0,0,0);
    239 		return 0;
    240 	}
    241 	for (;;) {
    242 		/* look for an existing page */
    243 		pg = uvm_pagelookup(uobj, offset);
    244 
    245 		/* nope?  allocate one now */
    246 		if (pg == NULL) {
    247 			if (flags & UFP_NOALLOC) {
    248 				UVMHIST_LOG(ubchist, "noalloc", 0,0,0,0);
    249 				return 0;
    250 			}
    251 			pg = uvm_pagealloc(uobj, offset, NULL, 0);
    252 			if (pg == NULL) {
    253 				if (flags & UFP_NOWAIT) {
    254 					UVMHIST_LOG(ubchist, "nowait",0,0,0,0);
    255 					return 0;
    256 				}
    257 				mutex_exit(&uobj->vmobjlock);
    258 				uvm_wait("uvn_fp1");
    259 				mutex_enter(&uobj->vmobjlock);
    260 				continue;
    261 			}
    262 			UVMHIST_LOG(ubchist, "alloced %p", pg,0,0,0);
    263 			break;
    264 		} else if (flags & UFP_NOCACHE) {
    265 			UVMHIST_LOG(ubchist, "nocache",0,0,0,0);
    266 			return 0;
    267 		}
    268 
    269 		/* page is there, see if we need to wait on it */
    270 		if ((pg->flags & PG_BUSY) != 0) {
    271 			if (flags & UFP_NOWAIT) {
    272 				UVMHIST_LOG(ubchist, "nowait",0,0,0,0);
    273 				return 0;
    274 			}
    275 			pg->flags |= PG_WANTED;
    276 			UVMHIST_LOG(ubchist, "wait %p", pg,0,0,0);
    277 			UVM_UNLOCK_AND_WAIT(pg, &uobj->vmobjlock, 0,
    278 					    "uvn_fp2", 0);
    279 			mutex_enter(&uobj->vmobjlock);
    280 			continue;
    281 		}
    282 
    283 		/* skip PG_RDONLY pages if requested */
    284 		if ((flags & UFP_NORDONLY) && (pg->flags & PG_RDONLY)) {
    285 			UVMHIST_LOG(ubchist, "nordonly",0,0,0,0);
    286 			return 0;
    287 		}
    288 
    289 		/* stop on clean pages if requested */
    290 		if (flags & UFP_DIRTYONLY) {
    291 			dirty = pmap_clear_modify(pg) ||
    292 				(pg->flags & PG_CLEAN) == 0;
    293 			pg->flags |= PG_CLEAN;
    294 			if (!dirty) {
    295 				UVMHIST_LOG(ubchist, "dirtonly", 0,0,0,0);
    296 				return 0;
    297 			}
    298 		}
    299 
    300 		/* mark the page BUSY and we're done. */
    301 		pg->flags |= PG_BUSY;
    302 		UVM_PAGE_OWN(pg, "uvn_findpage");
    303 		UVMHIST_LOG(ubchist, "found %p", pg,0,0,0);
    304 		break;
    305 	}
    306 	*pgp = pg;
    307 	return 1;
    308 }
    309 
    310 /*
    311  * uvm_vnp_setsize: grow or shrink a vnode uobj
    312  *
    313  * grow   => just update size value
    314  * shrink => toss un-needed pages
    315  *
    316  * => we assume that the caller has a reference of some sort to the
    317  *	vnode in question so that it will not be yanked out from under
    318  *	us.
    319  */
    320 
    321 void
    322 uvm_vnp_setsize(struct vnode *vp, voff_t newsize)
    323 {
    324 	struct uvm_object *uobj = &vp->v_uobj;
    325 	voff_t pgend = round_page(newsize);
    326 	voff_t oldsize;
    327 	UVMHIST_FUNC("uvm_vnp_setsize"); UVMHIST_CALLED(ubchist);
    328 
    329 	mutex_enter(&uobj->vmobjlock);
    330 	UVMHIST_LOG(ubchist, "vp %p old 0x%x new 0x%x",
    331 	    vp, vp->v_size, newsize, 0);
    332 
    333 	/*
    334 	 * now check if the size has changed: if we shrink we had better
    335 	 * toss some pages...
    336 	 */
    337 
    338 	KASSERT(newsize != VSIZENOTSET);
    339 	KASSERT(vp->v_size <= vp->v_writesize);
    340 	KASSERT(vp->v_size == vp->v_writesize ||
    341 	    newsize == vp->v_writesize || newsize <= vp->v_size);
    342 
    343 	oldsize = vp->v_writesize;
    344 	KASSERT(oldsize != VSIZENOTSET || pgend > oldsize);
    345 
    346 	if (oldsize > pgend) {
    347 		(void) uvn_put(uobj, pgend, 0, PGO_FREE | PGO_SYNCIO);
    348 		mutex_enter(&uobj->vmobjlock);
    349 	}
    350 	vp->v_size = vp->v_writesize = newsize;
    351 	mutex_exit(&uobj->vmobjlock);
    352 }
    353 
    354 void
    355 uvm_vnp_setwritesize(struct vnode *vp, voff_t newsize)
    356 {
    357 
    358 	mutex_enter(&vp->v_interlock);
    359 	KASSERT(newsize != VSIZENOTSET);
    360 	KASSERT(vp->v_size != VSIZENOTSET);
    361 	KASSERT(vp->v_writesize != VSIZENOTSET);
    362 	KASSERT(vp->v_size <= vp->v_writesize);
    363 	KASSERT(vp->v_size <= newsize);
    364 	vp->v_writesize = newsize;
    365 	mutex_exit(&vp->v_interlock);
    366 }
    367 
    368 bool
    369 uvn_text_p(struct uvm_object *uobj)
    370 {
    371 	struct vnode *vp = (struct vnode *)uobj;
    372 
    373 	return (vp->v_iflag & VI_EXECMAP) != 0;
    374 }
    375 
    376 bool
    377 uvn_clean_p(struct uvm_object *uobj)
    378 {
    379 	struct vnode *vp = (struct vnode *)uobj;
    380 
    381 	return (vp->v_iflag & VI_ONWORKLST) == 0;
    382 }
    383 
    384 bool
    385 uvn_needs_writefault_p(struct uvm_object *uobj)
    386 {
    387 	struct vnode *vp = (struct vnode *)uobj;
    388 
    389 	return uvn_clean_p(uobj) ||
    390 	    (vp->v_iflag & (VI_WRMAP|VI_WRMAPDIRTY)) == VI_WRMAP;
    391 }
    392