1 1.121 riastrad /* $NetBSD: uvm_vnode.c,v 1.121 2024/04/05 13:05:41 riastradh Exp $ */ 2 1.1 mrg 3 1.1 mrg /* 4 1.1 mrg * Copyright (c) 1997 Charles D. Cranor and Washington University. 5 1.1 mrg * Copyright (c) 1991, 1993 6 1.49 chs * The Regents of the University of California. 7 1.1 mrg * Copyright (c) 1990 University of Utah. 8 1.1 mrg * 9 1.1 mrg * All rights reserved. 10 1.1 mrg * 11 1.1 mrg * This code is derived from software contributed to Berkeley by 12 1.1 mrg * the Systems Programming Group of the University of Utah Computer 13 1.1 mrg * Science Department. 14 1.1 mrg * 15 1.1 mrg * Redistribution and use in source and binary forms, with or without 16 1.1 mrg * modification, are permitted provided that the following conditions 17 1.1 mrg * are met: 18 1.1 mrg * 1. Redistributions of source code must retain the above copyright 19 1.1 mrg * notice, this list of conditions and the following disclaimer. 20 1.1 mrg * 2. Redistributions in binary form must reproduce the above copyright 21 1.1 mrg * notice, this list of conditions and the following disclaimer in the 22 1.1 mrg * documentation and/or other materials provided with the distribution. 23 1.94 chuck * 3. Neither the name of the University nor the names of its contributors 24 1.1 mrg * may be used to endorse or promote products derived from this software 25 1.1 mrg * without specific prior written permission. 26 1.1 mrg * 27 1.1 mrg * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND 28 1.1 mrg * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE 29 1.1 mrg * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE 30 1.1 mrg * ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE 31 1.1 mrg * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL 32 1.1 mrg * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS 33 1.1 mrg * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) 34 1.1 mrg * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT 35 1.1 mrg * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY 36 1.1 mrg * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF 37 1.1 mrg * SUCH DAMAGE. 38 1.1 mrg * 39 1.1 mrg * @(#)vnode_pager.c 8.8 (Berkeley) 2/13/94 40 1.3 mrg * from: Id: uvm_vnode.c,v 1.1.2.26 1998/02/02 20:38:07 chuck Exp 41 1.1 mrg */ 42 1.1 mrg 43 1.55 lukem /* 44 1.55 lukem * uvm_vnode.c: the vnode pager. 45 1.55 lukem */ 46 1.55 lukem 47 1.55 lukem #include <sys/cdefs.h> 48 1.121 riastrad __KERNEL_RCSID(0, "$NetBSD: uvm_vnode.c,v 1.121 2024/04/05 13:05:41 riastradh Exp $"); 49 1.55 lukem 50 1.100 pooka #ifdef _KERNEL_OPT 51 1.4 mrg #include "opt_uvmhist.h" 52 1.100 pooka #endif 53 1.1 mrg 54 1.110 ad #include <sys/atomic.h> 55 1.1 mrg #include <sys/param.h> 56 1.1 mrg #include <sys/systm.h> 57 1.37 chs #include <sys/kernel.h> 58 1.1 mrg #include <sys/vnode.h> 59 1.13 thorpej #include <sys/disklabel.h> 60 1.13 thorpej #include <sys/ioctl.h> 61 1.13 thorpej #include <sys/fcntl.h> 62 1.13 thorpej #include <sys/conf.h> 63 1.37 chs #include <sys/pool.h> 64 1.37 chs #include <sys/mount.h> 65 1.13 thorpej 66 1.13 thorpej #include <miscfs/specfs/specdev.h> 67 1.1 mrg 68 1.1 mrg #include <uvm/uvm.h> 69 1.68 yamt #include <uvm/uvm_readahead.h> 70 1.105 ad #include <uvm/uvm_page_array.h> 71 1.1 mrg 72 1.99 matt #ifdef UVMHIST 73 1.99 matt UVMHIST_DEFINE(ubchist); 74 1.99 matt #endif 75 1.99 matt 76 1.1 mrg /* 77 1.1 mrg * functions 78 1.1 mrg */ 79 1.1 mrg 80 1.106 ad static void uvn_alloc_ractx(struct uvm_object *); 81 1.66 thorpej static void uvn_detach(struct uvm_object *); 82 1.66 thorpej static int uvn_get(struct uvm_object *, voff_t, struct vm_page **, int *, 83 1.66 thorpej int, vm_prot_t, int, int); 84 1.110 ad static void uvn_markdirty(struct uvm_object *); 85 1.66 thorpej static int uvn_put(struct uvm_object *, voff_t, voff_t, int); 86 1.66 thorpej static void uvn_reference(struct uvm_object *); 87 1.52 chs 88 1.66 thorpej static int uvn_findpage(struct uvm_object *, voff_t, struct vm_page **, 89 1.105 ad unsigned int, struct uvm_page_array *a, 90 1.105 ad unsigned int); 91 1.1 mrg 92 1.1 mrg /* 93 1.1 mrg * master pager structure 94 1.1 mrg */ 95 1.1 mrg 96 1.89 yamt const struct uvm_pagerops uvm_vnodeops = { 97 1.88 yamt .pgo_reference = uvn_reference, 98 1.88 yamt .pgo_detach = uvn_detach, 99 1.88 yamt .pgo_get = uvn_get, 100 1.88 yamt .pgo_put = uvn_put, 101 1.110 ad .pgo_markdirty = uvn_markdirty, 102 1.1 mrg }; 103 1.1 mrg 104 1.1 mrg /* 105 1.1 mrg * the ops! 106 1.1 mrg */ 107 1.1 mrg 108 1.1 mrg /* 109 1.1 mrg * uvn_reference 110 1.1 mrg * 111 1.1 mrg * duplicate a reference to a VM object. Note that the reference 112 1.49 chs * count must already be at least one (the passed in reference) so 113 1.1 mrg * there is no chance of the uvn being killed or locked out here. 114 1.1 mrg * 115 1.49 chs * => caller must call with object unlocked. 116 1.1 mrg * => caller must be using the same accessprot as was used at attach time 117 1.1 mrg */ 118 1.1 mrg 119 1.66 thorpej static void 120 1.65 thorpej uvn_reference(struct uvm_object *uobj) 121 1.1 mrg { 122 1.93 pooka vref((struct vnode *)uobj); 123 1.1 mrg } 124 1.1 mrg 125 1.52 chs 126 1.1 mrg /* 127 1.1 mrg * uvn_detach 128 1.1 mrg * 129 1.1 mrg * remove a reference to a VM object. 130 1.1 mrg * 131 1.1 mrg * => caller must call with object unlocked and map locked. 132 1.1 mrg */ 133 1.52 chs 134 1.66 thorpej static void 135 1.65 thorpej uvn_detach(struct uvm_object *uobj) 136 1.8 mrg { 137 1.37 chs vrele((struct vnode *)uobj); 138 1.1 mrg } 139 1.1 mrg 140 1.1 mrg /* 141 1.1 mrg * uvn_put: flush page data to backing store. 142 1.1 mrg * 143 1.53 sommerfe * => object must be locked on entry! VOP_PUTPAGES must unlock it. 144 1.1 mrg * => flags: PGO_SYNCIO -- use sync. I/O 145 1.1 mrg */ 146 1.1 mrg 147 1.66 thorpej static int 148 1.65 thorpej uvn_put(struct uvm_object *uobj, voff_t offlo, voff_t offhi, int flags) 149 1.1 mrg { 150 1.37 chs struct vnode *vp = (struct vnode *)uobj; 151 1.37 chs int error; 152 1.1 mrg 153 1.106 ad KASSERT(rw_write_held(uobj->vmobjlock)); 154 1.54 chs error = VOP_PUTPAGES(vp, offlo, offhi, flags); 155 1.90 ad 156 1.48 chs return error; 157 1.1 mrg } 158 1.1 mrg 159 1.1 mrg /* 160 1.1 mrg * uvn_get: get pages (synchronously) from backing store 161 1.1 mrg * 162 1.1 mrg * => prefer map unlocked (not required) 163 1.1 mrg * => object must be locked! we will _unlock_ it before starting any I/O. 164 1.113 ad * => flags: PGO_LOCKED: fault data structures are locked 165 1.1 mrg * => NOTE: offset is the offset of pps[0], _NOT_ pps[centeridx] 166 1.1 mrg * => NOTE: caller must check for released pages!! 167 1.1 mrg */ 168 1.49 chs 169 1.66 thorpej static int 170 1.65 thorpej uvn_get(struct uvm_object *uobj, voff_t offset, 171 1.65 thorpej struct vm_page **pps /* IN/OUT */, 172 1.65 thorpej int *npagesp /* IN (OUT if PGO_LOCKED)*/, 173 1.65 thorpej int centeridx, vm_prot_t access_type, int advice, int flags) 174 1.8 mrg { 175 1.37 chs struct vnode *vp = (struct vnode *)uobj; 176 1.37 chs int error; 177 1.67 yamt 178 1.115 skrll UVMHIST_FUNC(__func__); 179 1.118 skrll UVMHIST_CALLARGS(ubchist, "vp %#jx off %#jx", (uintptr_t)vp, offset, 180 1.103 pgoyette 0, 0); 181 1.68 yamt 182 1.98 martin if (vp->v_type == VREG && (access_type & VM_PROT_WRITE) == 0 183 1.112 ad && (flags & PGO_LOCKED) == 0 && vp->v_tag != VT_TMPFS) { 184 1.106 ad uvn_alloc_ractx(uobj); 185 1.68 yamt uvm_ra_request(vp->v_ractx, advice, uobj, offset, 186 1.68 yamt *npagesp << PAGE_SHIFT); 187 1.68 yamt } 188 1.68 yamt 189 1.37 chs error = VOP_GETPAGES(vp, offset, pps, npagesp, centeridx, 190 1.37 chs access_type, advice, flags); 191 1.67 yamt 192 1.120 riastrad if (flags & PGO_LOCKED) 193 1.120 riastrad KASSERT(rw_lock_held(uobj->vmobjlock)); 194 1.48 chs return error; 195 1.37 chs } 196 1.8 mrg 197 1.110 ad /* 198 1.110 ad * uvn_markdirty: called when the object gains first dirty page 199 1.110 ad * 200 1.110 ad * => uobj must be write locked. 201 1.110 ad */ 202 1.110 ad 203 1.110 ad static void 204 1.110 ad uvn_markdirty(struct uvm_object *uobj) 205 1.110 ad { 206 1.110 ad struct vnode *vp = (struct vnode *)uobj; 207 1.110 ad 208 1.110 ad KASSERT(rw_write_held(uobj->vmobjlock)); 209 1.110 ad 210 1.110 ad mutex_enter(vp->v_interlock); 211 1.110 ad if ((vp->v_iflag & VI_ONWORKLST) == 0) { 212 1.110 ad vn_syncer_add_to_worklist(vp, filedelay); 213 1.110 ad } 214 1.110 ad mutex_exit(vp->v_interlock); 215 1.110 ad } 216 1.8 mrg 217 1.37 chs /* 218 1.37 chs * uvn_findpages: 219 1.37 chs * return the page for the uobj and offset requested, allocating if needed. 220 1.37 chs * => uobj must be locked. 221 1.52 chs * => returned pages will be BUSY. 222 1.37 chs */ 223 1.1 mrg 224 1.58 enami int 225 1.105 ad uvn_findpages(struct uvm_object *uobj, voff_t offset, unsigned int *npagesp, 226 1.105 ad struct vm_page **pgs, struct uvm_page_array *a, unsigned int flags) 227 1.37 chs { 228 1.105 ad unsigned int count, found, npages; 229 1.105 ad int i, rv; 230 1.105 ad struct uvm_page_array a_store; 231 1.105 ad 232 1.105 ad if (a == NULL) { 233 1.114 ad /* 234 1.114 ad * XXX fragile API 235 1.114 ad * note that the array can be the one supplied by the caller of 236 1.114 ad * uvn_findpages. in that case, fillflags used by the caller 237 1.114 ad * might not match strictly with ours. 238 1.114 ad * in particular, the caller might have filled the array 239 1.114 ad * without DENSE but passed us UFP_DIRTYONLY (thus DENSE). 240 1.114 ad */ 241 1.114 ad const unsigned int fillflags = 242 1.114 ad ((flags & UFP_BACKWARD) ? UVM_PAGE_ARRAY_FILL_BACKWARD : 0) | 243 1.114 ad ((flags & UFP_DIRTYONLY) ? 244 1.114 ad (UVM_PAGE_ARRAY_FILL_DIRTY|UVM_PAGE_ARRAY_FILL_DENSE) : 0); 245 1.105 ad a = &a_store; 246 1.114 ad uvm_page_array_init(a, uobj, fillflags); 247 1.105 ad } 248 1.58 enami count = found = 0; 249 1.37 chs npages = *npagesp; 250 1.52 chs if (flags & UFP_BACKWARD) { 251 1.52 chs for (i = npages - 1; i >= 0; i--, offset -= PAGE_SIZE) { 252 1.105 ad rv = uvn_findpage(uobj, offset, &pgs[i], flags, a, 253 1.105 ad i + 1); 254 1.58 enami if (rv == 0) { 255 1.58 enami if (flags & UFP_DIRTYONLY) 256 1.58 enami break; 257 1.58 enami } else 258 1.58 enami found++; 259 1.52 chs count++; 260 1.52 chs } 261 1.52 chs } else { 262 1.52 chs for (i = 0; i < npages; i++, offset += PAGE_SIZE) { 263 1.105 ad rv = uvn_findpage(uobj, offset, &pgs[i], flags, a, 264 1.105 ad npages - i); 265 1.58 enami if (rv == 0) { 266 1.58 enami if (flags & UFP_DIRTYONLY) 267 1.58 enami break; 268 1.58 enami } else 269 1.58 enami found++; 270 1.52 chs count++; 271 1.52 chs } 272 1.37 chs } 273 1.105 ad if (a == &a_store) { 274 1.105 ad uvm_page_array_fini(a); 275 1.105 ad } 276 1.52 chs *npagesp = count; 277 1.58 enami return (found); 278 1.37 chs } 279 1.8 mrg 280 1.105 ad /* 281 1.105 ad * uvn_findpage: find a single page 282 1.105 ad * 283 1.105 ad * if a suitable page was found, put it in *pgp and return 1. 284 1.105 ad * otherwise return 0. 285 1.105 ad */ 286 1.105 ad 287 1.66 thorpej static int 288 1.65 thorpej uvn_findpage(struct uvm_object *uobj, voff_t offset, struct vm_page **pgp, 289 1.105 ad unsigned int flags, struct uvm_page_array *a, unsigned int nleft) 290 1.37 chs { 291 1.37 chs struct vm_page *pg; 292 1.115 skrll UVMHIST_FUNC(__func__); 293 1.118 skrll UVMHIST_CALLARGS(ubchist, "vp %#jx off %#jx", (uintptr_t)uobj, offset, 294 1.103 pgoyette 0, 0); 295 1.8 mrg 296 1.111 ad /* 297 1.111 ad * NOBUSY must come with NOWAIT and NOALLOC. if NOBUSY is 298 1.111 ad * specified, this may be called with a reader lock. 299 1.111 ad */ 300 1.111 ad 301 1.111 ad KASSERT(rw_lock_held(uobj->vmobjlock)); 302 1.111 ad KASSERT((flags & UFP_NOBUSY) == 0 || (flags & UFP_NOWAIT) != 0); 303 1.111 ad KASSERT((flags & UFP_NOBUSY) == 0 || (flags & UFP_NOALLOC) != 0); 304 1.111 ad KASSERT((flags & UFP_NOBUSY) != 0 || rw_write_held(uobj->vmobjlock)); 305 1.96 rmind 306 1.37 chs if (*pgp != NULL) { 307 1.37 chs UVMHIST_LOG(ubchist, "dontcare", 0,0,0,0); 308 1.105 ad goto skip_offset; 309 1.37 chs } 310 1.37 chs for (;;) { 311 1.105 ad /* 312 1.105 ad * look for an existing page. 313 1.105 ad */ 314 1.114 ad pg = uvm_page_array_fill_and_peek(a, offset, nleft); 315 1.105 ad if (pg != NULL && pg->offset != offset) { 316 1.117 chs struct vm_page __diagused *tpg; 317 1.105 ad KASSERT( 318 1.114 ad ((a->ar_flags & UVM_PAGE_ARRAY_FILL_BACKWARD) != 0) 319 1.105 ad == (pg->offset < offset)); 320 1.117 chs KASSERT((tpg = uvm_pagelookup(uobj, offset)) == NULL || 321 1.116 chs ((a->ar_flags & UVM_PAGE_ARRAY_FILL_DIRTY) != 0 && 322 1.117 chs !uvm_obj_page_dirty_p(tpg))); 323 1.105 ad pg = NULL; 324 1.114 ad if ((a->ar_flags & UVM_PAGE_ARRAY_FILL_DENSE) != 0) { 325 1.105 ad UVMHIST_LOG(ubchist, "dense", 0,0,0,0); 326 1.105 ad return 0; 327 1.105 ad } 328 1.105 ad } 329 1.37 chs 330 1.52 chs /* nope? allocate one now */ 331 1.37 chs if (pg == NULL) { 332 1.37 chs if (flags & UFP_NOALLOC) { 333 1.37 chs UVMHIST_LOG(ubchist, "noalloc", 0,0,0,0); 334 1.37 chs return 0; 335 1.37 chs } 336 1.97 matt pg = uvm_pagealloc(uobj, offset, NULL, 337 1.97 matt UVM_FLAG_COLORMATCH); 338 1.37 chs if (pg == NULL) { 339 1.37 chs if (flags & UFP_NOWAIT) { 340 1.37 chs UVMHIST_LOG(ubchist, "nowait",0,0,0,0); 341 1.37 chs return 0; 342 1.8 mrg } 343 1.106 ad rw_exit(uobj->vmobjlock); 344 1.108 rjs uvm_wait("uvnfp1"); 345 1.105 ad uvm_page_array_clear(a); 346 1.106 ad rw_enter(uobj->vmobjlock, RW_WRITER); 347 1.37 chs continue; 348 1.47 chs } 349 1.103 pgoyette UVMHIST_LOG(ubchist, "alloced %#jx (color %ju)", 350 1.104 ad (uintptr_t)pg, VM_PGCOLOR(pg), 0, 0); 351 1.105 ad KASSERTMSG(uvm_pagegetdirty(pg) == 352 1.105 ad UVM_PAGE_STATUS_CLEAN, "page %p not clean", pg); 353 1.37 chs break; 354 1.37 chs } else if (flags & UFP_NOCACHE) { 355 1.37 chs UVMHIST_LOG(ubchist, "nocache",0,0,0,0); 356 1.105 ad goto skip; 357 1.8 mrg } 358 1.8 mrg 359 1.37 chs /* page is there, see if we need to wait on it */ 360 1.52 chs if ((pg->flags & PG_BUSY) != 0) { 361 1.37 chs if (flags & UFP_NOWAIT) { 362 1.37 chs UVMHIST_LOG(ubchist, "nowait",0,0,0,0); 363 1.105 ad goto skip; 364 1.37 chs } 365 1.103 pgoyette UVMHIST_LOG(ubchist, "wait %#jx (color %ju)", 366 1.104 ad (uintptr_t)pg, VM_PGCOLOR(pg), 0, 0); 367 1.109 ad uvm_pagewait(pg, uobj->vmobjlock, "uvnfp2"); 368 1.105 ad uvm_page_array_clear(a); 369 1.106 ad rw_enter(uobj->vmobjlock, RW_WRITER); 370 1.37 chs continue; 371 1.8 mrg } 372 1.49 chs 373 1.37 chs /* skip PG_RDONLY pages if requested */ 374 1.37 chs if ((flags & UFP_NORDONLY) && (pg->flags & PG_RDONLY)) { 375 1.37 chs UVMHIST_LOG(ubchist, "nordonly",0,0,0,0); 376 1.105 ad goto skip; 377 1.8 mrg } 378 1.8 mrg 379 1.52 chs /* stop on clean pages if requested */ 380 1.52 chs if (flags & UFP_DIRTYONLY) { 381 1.105 ad const bool dirty = uvm_pagecheckdirty(pg, false); 382 1.52 chs if (!dirty) { 383 1.58 enami UVMHIST_LOG(ubchist, "dirtonly", 0,0,0,0); 384 1.52 chs return 0; 385 1.52 chs } 386 1.52 chs } 387 1.52 chs 388 1.37 chs /* mark the page BUSY and we're done. */ 389 1.111 ad if ((flags & UFP_NOBUSY) == 0) { 390 1.111 ad pg->flags |= PG_BUSY; 391 1.111 ad UVM_PAGE_OWN(pg, "uvn_findpage"); 392 1.111 ad } 393 1.103 pgoyette UVMHIST_LOG(ubchist, "found %#jx (color %ju)", 394 1.104 ad (uintptr_t)pg, VM_PGCOLOR(pg), 0, 0); 395 1.105 ad uvm_page_array_advance(a); 396 1.37 chs break; 397 1.8 mrg } 398 1.37 chs *pgp = pg; 399 1.37 chs return 1; 400 1.105 ad 401 1.105 ad skip_offset: 402 1.105 ad /* 403 1.105 ad * skip this offset 404 1.105 ad */ 405 1.105 ad pg = uvm_page_array_peek(a); 406 1.105 ad if (pg != NULL) { 407 1.105 ad if (pg->offset == offset) { 408 1.105 ad uvm_page_array_advance(a); 409 1.105 ad } else { 410 1.114 ad KASSERT((a->ar_flags & UVM_PAGE_ARRAY_FILL_DENSE) == 0); 411 1.105 ad } 412 1.105 ad } 413 1.105 ad return 0; 414 1.105 ad 415 1.105 ad skip: 416 1.105 ad /* 417 1.105 ad * skip this page 418 1.105 ad */ 419 1.105 ad KASSERT(pg != NULL); 420 1.105 ad uvm_page_array_advance(a); 421 1.105 ad return 0; 422 1.1 mrg } 423 1.1 mrg 424 1.1 mrg /* 425 1.52 chs * uvm_vnp_setsize: grow or shrink a vnode uobj 426 1.1 mrg * 427 1.1 mrg * grow => just update size value 428 1.1 mrg * shrink => toss un-needed pages 429 1.1 mrg * 430 1.49 chs * => we assume that the caller has a reference of some sort to the 431 1.1 mrg * vnode in question so that it will not be yanked out from under 432 1.1 mrg * us. 433 1.1 mrg */ 434 1.1 mrg 435 1.8 mrg void 436 1.65 thorpej uvm_vnp_setsize(struct vnode *vp, voff_t newsize) 437 1.8 mrg { 438 1.52 chs struct uvm_object *uobj = &vp->v_uobj; 439 1.46 enami voff_t pgend = round_page(newsize); 440 1.72 yamt voff_t oldsize; 441 1.115 skrll UVMHIST_FUNC(__func__); UVMHIST_CALLED(ubchist); 442 1.37 chs 443 1.106 ad rw_enter(uobj->vmobjlock, RW_WRITER); 444 1.118 skrll UVMHIST_LOG(ubchist, "vp %#jx old %#jx new %#jx", 445 1.103 pgoyette (uintptr_t)vp, vp->v_size, newsize, 0); 446 1.1 mrg 447 1.8 mrg /* 448 1.37 chs * now check if the size has changed: if we shrink we had better 449 1.37 chs * toss some pages... 450 1.8 mrg */ 451 1.1 mrg 452 1.119 riastrad KASSERT(newsize != VSIZENOTSET); 453 1.119 riastrad KASSERT(newsize >= 0); 454 1.121 riastrad KASSERTMSG(vp->v_size <= vp->v_writesize, "vp=%p" 455 1.121 riastrad " v_size=0x%llx v_writesize=0x%llx", vp, 456 1.121 riastrad (unsigned long long)vp->v_size, 457 1.121 riastrad (unsigned long long)vp->v_writesize); 458 1.121 riastrad KASSERTMSG((vp->v_size == vp->v_writesize || 459 1.121 riastrad newsize == vp->v_writesize || newsize <= vp->v_size), 460 1.121 riastrad "vp=%p v_size=0x%llx v_writesize=0x%llx newsize=0x%llx", 461 1.121 riastrad vp, 462 1.121 riastrad (unsigned long long)vp->v_size, 463 1.121 riastrad (unsigned long long)vp->v_writesize, 464 1.121 riastrad (unsigned long long)newsize); 465 1.85 pooka 466 1.85 pooka oldsize = vp->v_writesize; 467 1.85 pooka 468 1.101 mlelstv /* 469 1.102 wiz * check whether size shrinks 470 1.101 mlelstv * if old size hasn't been set, there are no pages to drop 471 1.101 mlelstv * if there was an integer overflow in pgend, then this is no shrink 472 1.101 mlelstv */ 473 1.101 mlelstv if (oldsize > pgend && oldsize != VSIZENOTSET && pgend >= 0) { 474 1.57 chs (void) uvn_put(uobj, pgend, 0, PGO_FREE | PGO_SYNCIO); 475 1.106 ad rw_enter(uobj->vmobjlock, RW_WRITER); 476 1.8 mrg } 477 1.106 ad mutex_enter(vp->v_interlock); 478 1.82 yamt vp->v_size = vp->v_writesize = newsize; 479 1.106 ad mutex_exit(vp->v_interlock); 480 1.106 ad rw_exit(uobj->vmobjlock); 481 1.1 mrg } 482 1.1 mrg 483 1.82 yamt void 484 1.82 yamt uvm_vnp_setwritesize(struct vnode *vp, voff_t newsize) 485 1.82 yamt { 486 1.82 yamt 487 1.106 ad rw_enter(vp->v_uobj.vmobjlock, RW_WRITER); 488 1.119 riastrad KASSERT(newsize != VSIZENOTSET); 489 1.119 riastrad KASSERT(newsize >= 0); 490 1.82 yamt KASSERT(vp->v_size != VSIZENOTSET); 491 1.82 yamt KASSERT(vp->v_writesize != VSIZENOTSET); 492 1.121 riastrad KASSERTMSG(vp->v_size <= vp->v_writesize, "vp=%p" 493 1.121 riastrad " v_size=0x%llx v_writesize=0x%llx newsize=0x%llx", vp, 494 1.121 riastrad (unsigned long long)vp->v_size, 495 1.121 riastrad (unsigned long long)vp->v_writesize, 496 1.121 riastrad (unsigned long long)newsize); 497 1.121 riastrad KASSERTMSG(vp->v_size <= newsize, "vp=%p" 498 1.121 riastrad " v_size=0x%llx v_writesize=0x%llx newsize=0x%llx", vp, 499 1.121 riastrad (unsigned long long)vp->v_size, 500 1.121 riastrad (unsigned long long)vp->v_writesize, 501 1.121 riastrad (unsigned long long)newsize); 502 1.106 ad mutex_enter(vp->v_interlock); 503 1.82 yamt vp->v_writesize = newsize; 504 1.96 rmind mutex_exit(vp->v_interlock); 505 1.106 ad rw_exit(vp->v_uobj.vmobjlock); 506 1.82 yamt } 507 1.82 yamt 508 1.79 thorpej bool 509 1.75 yamt uvn_text_p(struct uvm_object *uobj) 510 1.75 yamt { 511 1.75 yamt struct vnode *vp = (struct vnode *)uobj; 512 1.110 ad int iflag; 513 1.75 yamt 514 1.107 ad /* 515 1.107 ad * v_interlock is not held here, but VI_EXECMAP is only ever changed 516 1.107 ad * with the vmobjlock held too. 517 1.107 ad */ 518 1.110 ad iflag = atomic_load_relaxed(&vp->v_iflag); 519 1.110 ad return (iflag & VI_EXECMAP) != 0; 520 1.75 yamt } 521 1.75 yamt 522 1.106 ad static void 523 1.106 ad uvn_alloc_ractx(struct uvm_object *uobj) 524 1.106 ad { 525 1.106 ad struct vnode *vp = (struct vnode *)uobj; 526 1.106 ad struct uvm_ractx *ra = NULL; 527 1.106 ad 528 1.106 ad KASSERT(rw_write_held(uobj->vmobjlock)); 529 1.106 ad 530 1.106 ad if (vp->v_type != VREG) { 531 1.106 ad return; 532 1.106 ad } 533 1.106 ad if (vp->v_ractx != NULL) { 534 1.106 ad return; 535 1.106 ad } 536 1.106 ad if (vp->v_ractx == NULL) { 537 1.106 ad rw_exit(uobj->vmobjlock); 538 1.106 ad ra = uvm_ra_allocctx(); 539 1.106 ad rw_enter(uobj->vmobjlock, RW_WRITER); 540 1.106 ad if (ra != NULL && vp->v_ractx == NULL) { 541 1.106 ad vp->v_ractx = ra; 542 1.106 ad ra = NULL; 543 1.106 ad } 544 1.106 ad } 545 1.106 ad if (ra != NULL) { 546 1.106 ad uvm_ra_freectx(ra); 547 1.106 ad } 548 1.106 ad } 549