Home | History | Annotate | Line # | Download | only in uvm
uvm_vnode.c revision 1.121
      1 /*	$NetBSD: uvm_vnode.c,v 1.121 2024/04/05 13:05:41 riastradh Exp $	*/
      2 
      3 /*
      4  * Copyright (c) 1997 Charles D. Cranor and Washington University.
      5  * Copyright (c) 1991, 1993
      6  *      The Regents of the University of California.
      7  * Copyright (c) 1990 University of Utah.
      8  *
      9  * All rights reserved.
     10  *
     11  * This code is derived from software contributed to Berkeley by
     12  * the Systems Programming Group of the University of Utah Computer
     13  * Science Department.
     14  *
     15  * Redistribution and use in source and binary forms, with or without
     16  * modification, are permitted provided that the following conditions
     17  * are met:
     18  * 1. Redistributions of source code must retain the above copyright
     19  *    notice, this list of conditions and the following disclaimer.
     20  * 2. Redistributions in binary form must reproduce the above copyright
     21  *    notice, this list of conditions and the following disclaimer in the
     22  *    documentation and/or other materials provided with the distribution.
     23  * 3. Neither the name of the University nor the names of its contributors
     24  *    may be used to endorse or promote products derived from this software
     25  *    without specific prior written permission.
     26  *
     27  * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND
     28  * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
     29  * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
     30  * ARE DISCLAIMED.  IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE
     31  * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
     32  * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
     33  * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
     34  * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
     35  * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
     36  * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
     37  * SUCH DAMAGE.
     38  *
     39  *      @(#)vnode_pager.c       8.8 (Berkeley) 2/13/94
     40  * from: Id: uvm_vnode.c,v 1.1.2.26 1998/02/02 20:38:07 chuck Exp
     41  */
     42 
     43 /*
     44  * uvm_vnode.c: the vnode pager.
     45  */
     46 
     47 #include <sys/cdefs.h>
     48 __KERNEL_RCSID(0, "$NetBSD: uvm_vnode.c,v 1.121 2024/04/05 13:05:41 riastradh Exp $");
     49 
     50 #ifdef _KERNEL_OPT
     51 #include "opt_uvmhist.h"
     52 #endif
     53 
     54 #include <sys/atomic.h>
     55 #include <sys/param.h>
     56 #include <sys/systm.h>
     57 #include <sys/kernel.h>
     58 #include <sys/vnode.h>
     59 #include <sys/disklabel.h>
     60 #include <sys/ioctl.h>
     61 #include <sys/fcntl.h>
     62 #include <sys/conf.h>
     63 #include <sys/pool.h>
     64 #include <sys/mount.h>
     65 
     66 #include <miscfs/specfs/specdev.h>
     67 
     68 #include <uvm/uvm.h>
     69 #include <uvm/uvm_readahead.h>
     70 #include <uvm/uvm_page_array.h>
     71 
     72 #ifdef UVMHIST
     73 UVMHIST_DEFINE(ubchist);
     74 #endif
     75 
     76 /*
     77  * functions
     78  */
     79 
     80 static void	uvn_alloc_ractx(struct uvm_object *);
     81 static void	uvn_detach(struct uvm_object *);
     82 static int	uvn_get(struct uvm_object *, voff_t, struct vm_page **, int *,
     83 			int, vm_prot_t, int, int);
     84 static void	uvn_markdirty(struct uvm_object *);
     85 static int	uvn_put(struct uvm_object *, voff_t, voff_t, int);
     86 static void	uvn_reference(struct uvm_object *);
     87 
     88 static int	uvn_findpage(struct uvm_object *, voff_t, struct vm_page **,
     89 			     unsigned int, struct uvm_page_array *a,
     90 			     unsigned int);
     91 
     92 /*
     93  * master pager structure
     94  */
     95 
     96 const struct uvm_pagerops uvm_vnodeops = {
     97 	.pgo_reference = uvn_reference,
     98 	.pgo_detach = uvn_detach,
     99 	.pgo_get = uvn_get,
    100 	.pgo_put = uvn_put,
    101 	.pgo_markdirty = uvn_markdirty,
    102 };
    103 
    104 /*
    105  * the ops!
    106  */
    107 
    108 /*
    109  * uvn_reference
    110  *
    111  * duplicate a reference to a VM object.  Note that the reference
    112  * count must already be at least one (the passed in reference) so
    113  * there is no chance of the uvn being killed or locked out here.
    114  *
    115  * => caller must call with object unlocked.
    116  * => caller must be using the same accessprot as was used at attach time
    117  */
    118 
    119 static void
    120 uvn_reference(struct uvm_object *uobj)
    121 {
    122 	vref((struct vnode *)uobj);
    123 }
    124 
    125 
    126 /*
    127  * uvn_detach
    128  *
    129  * remove a reference to a VM object.
    130  *
    131  * => caller must call with object unlocked and map locked.
    132  */
    133 
    134 static void
    135 uvn_detach(struct uvm_object *uobj)
    136 {
    137 	vrele((struct vnode *)uobj);
    138 }
    139 
    140 /*
    141  * uvn_put: flush page data to backing store.
    142  *
    143  * => object must be locked on entry!   VOP_PUTPAGES must unlock it.
    144  * => flags: PGO_SYNCIO -- use sync. I/O
    145  */
    146 
    147 static int
    148 uvn_put(struct uvm_object *uobj, voff_t offlo, voff_t offhi, int flags)
    149 {
    150 	struct vnode *vp = (struct vnode *)uobj;
    151 	int error;
    152 
    153 	KASSERT(rw_write_held(uobj->vmobjlock));
    154 	error = VOP_PUTPAGES(vp, offlo, offhi, flags);
    155 
    156 	return error;
    157 }
    158 
    159 /*
    160  * uvn_get: get pages (synchronously) from backing store
    161  *
    162  * => prefer map unlocked (not required)
    163  * => object must be locked!  we will _unlock_ it before starting any I/O.
    164  * => flags: PGO_LOCKED: fault data structures are locked
    165  * => NOTE: offset is the offset of pps[0], _NOT_ pps[centeridx]
    166  * => NOTE: caller must check for released pages!!
    167  */
    168 
    169 static int
    170 uvn_get(struct uvm_object *uobj, voff_t offset,
    171     struct vm_page **pps /* IN/OUT */,
    172     int *npagesp /* IN (OUT if PGO_LOCKED)*/,
    173     int centeridx, vm_prot_t access_type, int advice, int flags)
    174 {
    175 	struct vnode *vp = (struct vnode *)uobj;
    176 	int error;
    177 
    178 	UVMHIST_FUNC(__func__);
    179 	UVMHIST_CALLARGS(ubchist, "vp %#jx off %#jx", (uintptr_t)vp, offset,
    180 	    0, 0);
    181 
    182 	if (vp->v_type == VREG && (access_type & VM_PROT_WRITE) == 0
    183 	    && (flags & PGO_LOCKED) == 0 && vp->v_tag != VT_TMPFS) {
    184 		uvn_alloc_ractx(uobj);
    185 		uvm_ra_request(vp->v_ractx, advice, uobj, offset,
    186 		    *npagesp << PAGE_SHIFT);
    187 	}
    188 
    189 	error = VOP_GETPAGES(vp, offset, pps, npagesp, centeridx,
    190 			     access_type, advice, flags);
    191 
    192 	if (flags & PGO_LOCKED)
    193 		KASSERT(rw_lock_held(uobj->vmobjlock));
    194 	return error;
    195 }
    196 
    197 /*
    198  * uvn_markdirty: called when the object gains first dirty page
    199  *
    200  * => uobj must be write locked.
    201  */
    202 
    203 static void
    204 uvn_markdirty(struct uvm_object *uobj)
    205 {
    206 	struct vnode *vp = (struct vnode *)uobj;
    207 
    208 	KASSERT(rw_write_held(uobj->vmobjlock));
    209 
    210 	mutex_enter(vp->v_interlock);
    211 	if ((vp->v_iflag & VI_ONWORKLST) == 0) {
    212 		vn_syncer_add_to_worklist(vp, filedelay);
    213 	}
    214 	mutex_exit(vp->v_interlock);
    215 }
    216 
    217 /*
    218  * uvn_findpages:
    219  * return the page for the uobj and offset requested, allocating if needed.
    220  * => uobj must be locked.
    221  * => returned pages will be BUSY.
    222  */
    223 
    224 int
    225 uvn_findpages(struct uvm_object *uobj, voff_t offset, unsigned int *npagesp,
    226     struct vm_page **pgs, struct uvm_page_array *a, unsigned int flags)
    227 {
    228 	unsigned int count, found, npages;
    229 	int i, rv;
    230 	struct uvm_page_array a_store;
    231 
    232 	if (a == NULL) {
    233 		/*
    234 		 * XXX fragile API
    235 		 * note that the array can be the one supplied by the caller of
    236 		 * uvn_findpages.  in that case, fillflags used by the caller
    237 		 * might not match strictly with ours.
    238 		 * in particular, the caller might have filled the array
    239 		 * without DENSE but passed us UFP_DIRTYONLY (thus DENSE).
    240 		 */
    241 		const unsigned int fillflags =
    242 		    ((flags & UFP_BACKWARD) ? UVM_PAGE_ARRAY_FILL_BACKWARD : 0) |
    243 		    ((flags & UFP_DIRTYONLY) ?
    244 		    (UVM_PAGE_ARRAY_FILL_DIRTY|UVM_PAGE_ARRAY_FILL_DENSE) : 0);
    245 		a = &a_store;
    246 		uvm_page_array_init(a, uobj, fillflags);
    247 	}
    248 	count = found = 0;
    249 	npages = *npagesp;
    250 	if (flags & UFP_BACKWARD) {
    251 		for (i = npages - 1; i >= 0; i--, offset -= PAGE_SIZE) {
    252 			rv = uvn_findpage(uobj, offset, &pgs[i], flags, a,
    253 			    i + 1);
    254 			if (rv == 0) {
    255 				if (flags & UFP_DIRTYONLY)
    256 					break;
    257 			} else
    258 				found++;
    259 			count++;
    260 		}
    261 	} else {
    262 		for (i = 0; i < npages; i++, offset += PAGE_SIZE) {
    263 			rv = uvn_findpage(uobj, offset, &pgs[i], flags, a,
    264 			    npages - i);
    265 			if (rv == 0) {
    266 				if (flags & UFP_DIRTYONLY)
    267 					break;
    268 			} else
    269 				found++;
    270 			count++;
    271 		}
    272 	}
    273 	if (a == &a_store) {
    274 		uvm_page_array_fini(a);
    275 	}
    276 	*npagesp = count;
    277 	return (found);
    278 }
    279 
    280 /*
    281  * uvn_findpage: find a single page
    282  *
    283  * if a suitable page was found, put it in *pgp and return 1.
    284  * otherwise return 0.
    285  */
    286 
    287 static int
    288 uvn_findpage(struct uvm_object *uobj, voff_t offset, struct vm_page **pgp,
    289     unsigned int flags, struct uvm_page_array *a, unsigned int nleft)
    290 {
    291 	struct vm_page *pg;
    292 	UVMHIST_FUNC(__func__);
    293 	UVMHIST_CALLARGS(ubchist, "vp %#jx off %#jx", (uintptr_t)uobj, offset,
    294 	    0, 0);
    295 
    296 	/*
    297 	 * NOBUSY must come with NOWAIT and NOALLOC.  if NOBUSY is
    298 	 * specified, this may be called with a reader lock.
    299 	 */
    300 
    301 	KASSERT(rw_lock_held(uobj->vmobjlock));
    302 	KASSERT((flags & UFP_NOBUSY) == 0 || (flags & UFP_NOWAIT) != 0);
    303 	KASSERT((flags & UFP_NOBUSY) == 0 || (flags & UFP_NOALLOC) != 0);
    304 	KASSERT((flags & UFP_NOBUSY) != 0 || rw_write_held(uobj->vmobjlock));
    305 
    306 	if (*pgp != NULL) {
    307 		UVMHIST_LOG(ubchist, "dontcare", 0,0,0,0);
    308 		goto skip_offset;
    309 	}
    310 	for (;;) {
    311 		/*
    312 		 * look for an existing page.
    313 		 */
    314 		pg = uvm_page_array_fill_and_peek(a, offset, nleft);
    315 		if (pg != NULL && pg->offset != offset) {
    316 			struct vm_page __diagused *tpg;
    317 			KASSERT(
    318 			    ((a->ar_flags & UVM_PAGE_ARRAY_FILL_BACKWARD) != 0)
    319 			    == (pg->offset < offset));
    320 			KASSERT((tpg = uvm_pagelookup(uobj, offset)) == NULL ||
    321 				((a->ar_flags & UVM_PAGE_ARRAY_FILL_DIRTY) != 0 &&
    322 				 !uvm_obj_page_dirty_p(tpg)));
    323 			pg = NULL;
    324 			if ((a->ar_flags & UVM_PAGE_ARRAY_FILL_DENSE) != 0) {
    325 				UVMHIST_LOG(ubchist, "dense", 0,0,0,0);
    326 				return 0;
    327 			}
    328 		}
    329 
    330 		/* nope?  allocate one now */
    331 		if (pg == NULL) {
    332 			if (flags & UFP_NOALLOC) {
    333 				UVMHIST_LOG(ubchist, "noalloc", 0,0,0,0);
    334 				return 0;
    335 			}
    336 			pg = uvm_pagealloc(uobj, offset, NULL,
    337 			    UVM_FLAG_COLORMATCH);
    338 			if (pg == NULL) {
    339 				if (flags & UFP_NOWAIT) {
    340 					UVMHIST_LOG(ubchist, "nowait",0,0,0,0);
    341 					return 0;
    342 				}
    343 				rw_exit(uobj->vmobjlock);
    344 				uvm_wait("uvnfp1");
    345 				uvm_page_array_clear(a);
    346 				rw_enter(uobj->vmobjlock, RW_WRITER);
    347 				continue;
    348 			}
    349 			UVMHIST_LOG(ubchist, "alloced %#jx (color %ju)",
    350 			    (uintptr_t)pg, VM_PGCOLOR(pg), 0, 0);
    351 			KASSERTMSG(uvm_pagegetdirty(pg) ==
    352 			    UVM_PAGE_STATUS_CLEAN, "page %p not clean", pg);
    353 			break;
    354 		} else if (flags & UFP_NOCACHE) {
    355 			UVMHIST_LOG(ubchist, "nocache",0,0,0,0);
    356 			goto skip;
    357 		}
    358 
    359 		/* page is there, see if we need to wait on it */
    360 		if ((pg->flags & PG_BUSY) != 0) {
    361 			if (flags & UFP_NOWAIT) {
    362 				UVMHIST_LOG(ubchist, "nowait",0,0,0,0);
    363 				goto skip;
    364 			}
    365 			UVMHIST_LOG(ubchist, "wait %#jx (color %ju)",
    366 			    (uintptr_t)pg, VM_PGCOLOR(pg), 0, 0);
    367 			uvm_pagewait(pg, uobj->vmobjlock, "uvnfp2");
    368 			uvm_page_array_clear(a);
    369 			rw_enter(uobj->vmobjlock, RW_WRITER);
    370 			continue;
    371 		}
    372 
    373 		/* skip PG_RDONLY pages if requested */
    374 		if ((flags & UFP_NORDONLY) && (pg->flags & PG_RDONLY)) {
    375 			UVMHIST_LOG(ubchist, "nordonly",0,0,0,0);
    376 			goto skip;
    377 		}
    378 
    379 		/* stop on clean pages if requested */
    380 		if (flags & UFP_DIRTYONLY) {
    381 			const bool dirty = uvm_pagecheckdirty(pg, false);
    382 			if (!dirty) {
    383 				UVMHIST_LOG(ubchist, "dirtonly", 0,0,0,0);
    384 				return 0;
    385 			}
    386 		}
    387 
    388 		/* mark the page BUSY and we're done. */
    389 		if ((flags & UFP_NOBUSY) == 0) {
    390 			pg->flags |= PG_BUSY;
    391 			UVM_PAGE_OWN(pg, "uvn_findpage");
    392 		}
    393 		UVMHIST_LOG(ubchist, "found %#jx (color %ju)",
    394 		    (uintptr_t)pg, VM_PGCOLOR(pg), 0, 0);
    395 		uvm_page_array_advance(a);
    396 		break;
    397 	}
    398 	*pgp = pg;
    399 	return 1;
    400 
    401  skip_offset:
    402 	/*
    403 	 * skip this offset
    404 	 */
    405 	pg = uvm_page_array_peek(a);
    406 	if (pg != NULL) {
    407 		if (pg->offset == offset) {
    408 			uvm_page_array_advance(a);
    409 		} else {
    410 			KASSERT((a->ar_flags & UVM_PAGE_ARRAY_FILL_DENSE) == 0);
    411 		}
    412 	}
    413 	return 0;
    414 
    415  skip:
    416 	/*
    417 	 * skip this page
    418 	 */
    419 	KASSERT(pg != NULL);
    420 	uvm_page_array_advance(a);
    421 	return 0;
    422 }
    423 
    424 /*
    425  * uvm_vnp_setsize: grow or shrink a vnode uobj
    426  *
    427  * grow   => just update size value
    428  * shrink => toss un-needed pages
    429  *
    430  * => we assume that the caller has a reference of some sort to the
    431  *	vnode in question so that it will not be yanked out from under
    432  *	us.
    433  */
    434 
    435 void
    436 uvm_vnp_setsize(struct vnode *vp, voff_t newsize)
    437 {
    438 	struct uvm_object *uobj = &vp->v_uobj;
    439 	voff_t pgend = round_page(newsize);
    440 	voff_t oldsize;
    441 	UVMHIST_FUNC(__func__); UVMHIST_CALLED(ubchist);
    442 
    443 	rw_enter(uobj->vmobjlock, RW_WRITER);
    444 	UVMHIST_LOG(ubchist, "vp %#jx old %#jx new %#jx",
    445 	    (uintptr_t)vp, vp->v_size, newsize, 0);
    446 
    447 	/*
    448 	 * now check if the size has changed: if we shrink we had better
    449 	 * toss some pages...
    450 	 */
    451 
    452 	KASSERT(newsize != VSIZENOTSET);
    453 	KASSERT(newsize >= 0);
    454 	KASSERTMSG(vp->v_size <= vp->v_writesize, "vp=%p"
    455 	    " v_size=0x%llx v_writesize=0x%llx", vp,
    456 	    (unsigned long long)vp->v_size,
    457 	    (unsigned long long)vp->v_writesize);
    458 	KASSERTMSG((vp->v_size == vp->v_writesize ||
    459 		newsize == vp->v_writesize || newsize <= vp->v_size),
    460 	    "vp=%p v_size=0x%llx v_writesize=0x%llx newsize=0x%llx",
    461 	    vp,
    462 	    (unsigned long long)vp->v_size,
    463 	    (unsigned long long)vp->v_writesize,
    464 	    (unsigned long long)newsize);
    465 
    466 	oldsize = vp->v_writesize;
    467 
    468 	/*
    469 	 * check whether size shrinks
    470 	 * if old size hasn't been set, there are no pages to drop
    471 	 * if there was an integer overflow in pgend, then this is no shrink
    472 	 */
    473 	if (oldsize > pgend && oldsize != VSIZENOTSET && pgend >= 0) {
    474 		(void) uvn_put(uobj, pgend, 0, PGO_FREE | PGO_SYNCIO);
    475 		rw_enter(uobj->vmobjlock, RW_WRITER);
    476 	}
    477 	mutex_enter(vp->v_interlock);
    478 	vp->v_size = vp->v_writesize = newsize;
    479 	mutex_exit(vp->v_interlock);
    480 	rw_exit(uobj->vmobjlock);
    481 }
    482 
    483 void
    484 uvm_vnp_setwritesize(struct vnode *vp, voff_t newsize)
    485 {
    486 
    487 	rw_enter(vp->v_uobj.vmobjlock, RW_WRITER);
    488 	KASSERT(newsize != VSIZENOTSET);
    489 	KASSERT(newsize >= 0);
    490 	KASSERT(vp->v_size != VSIZENOTSET);
    491 	KASSERT(vp->v_writesize != VSIZENOTSET);
    492 	KASSERTMSG(vp->v_size <= vp->v_writesize, "vp=%p"
    493 	    " v_size=0x%llx v_writesize=0x%llx newsize=0x%llx", vp,
    494 	    (unsigned long long)vp->v_size,
    495 	    (unsigned long long)vp->v_writesize,
    496 	    (unsigned long long)newsize);
    497 	KASSERTMSG(vp->v_size <= newsize, "vp=%p"
    498 	    " v_size=0x%llx v_writesize=0x%llx newsize=0x%llx", vp,
    499 	    (unsigned long long)vp->v_size,
    500 	    (unsigned long long)vp->v_writesize,
    501 	    (unsigned long long)newsize);
    502 	mutex_enter(vp->v_interlock);
    503 	vp->v_writesize = newsize;
    504 	mutex_exit(vp->v_interlock);
    505 	rw_exit(vp->v_uobj.vmobjlock);
    506 }
    507 
    508 bool
    509 uvn_text_p(struct uvm_object *uobj)
    510 {
    511 	struct vnode *vp = (struct vnode *)uobj;
    512 	int iflag;
    513 
    514 	/*
    515 	 * v_interlock is not held here, but VI_EXECMAP is only ever changed
    516 	 * with the vmobjlock held too.
    517 	 */
    518 	iflag = atomic_load_relaxed(&vp->v_iflag);
    519 	return (iflag & VI_EXECMAP) != 0;
    520 }
    521 
    522 static void
    523 uvn_alloc_ractx(struct uvm_object *uobj)
    524 {
    525 	struct vnode *vp = (struct vnode *)uobj;
    526 	struct uvm_ractx *ra = NULL;
    527 
    528 	KASSERT(rw_write_held(uobj->vmobjlock));
    529 
    530 	if (vp->v_type != VREG) {
    531 		return;
    532 	}
    533 	if (vp->v_ractx != NULL) {
    534 		return;
    535 	}
    536 	if (vp->v_ractx == NULL) {
    537 		rw_exit(uobj->vmobjlock);
    538 		ra = uvm_ra_allocctx();
    539 		rw_enter(uobj->vmobjlock, RW_WRITER);
    540 		if (ra != NULL && vp->v_ractx == NULL) {
    541 			vp->v_ractx = ra;
    542 			ra = NULL;
    543 		}
    544 	}
    545 	if (ra != NULL) {
    546 		uvm_ra_freectx(ra);
    547 	}
    548 }
    549