uvm_vnode.c revision 1.113 1 1.113 ad /* $NetBSD: uvm_vnode.c,v 1.113 2020/05/19 22:22:15 ad Exp $ */
2 1.1 mrg
3 1.1 mrg /*
4 1.1 mrg * Copyright (c) 1997 Charles D. Cranor and Washington University.
5 1.1 mrg * Copyright (c) 1991, 1993
6 1.49 chs * The Regents of the University of California.
7 1.1 mrg * Copyright (c) 1990 University of Utah.
8 1.1 mrg *
9 1.1 mrg * All rights reserved.
10 1.1 mrg *
11 1.1 mrg * This code is derived from software contributed to Berkeley by
12 1.1 mrg * the Systems Programming Group of the University of Utah Computer
13 1.1 mrg * Science Department.
14 1.1 mrg *
15 1.1 mrg * Redistribution and use in source and binary forms, with or without
16 1.1 mrg * modification, are permitted provided that the following conditions
17 1.1 mrg * are met:
18 1.1 mrg * 1. Redistributions of source code must retain the above copyright
19 1.1 mrg * notice, this list of conditions and the following disclaimer.
20 1.1 mrg * 2. Redistributions in binary form must reproduce the above copyright
21 1.1 mrg * notice, this list of conditions and the following disclaimer in the
22 1.1 mrg * documentation and/or other materials provided with the distribution.
23 1.94 chuck * 3. Neither the name of the University nor the names of its contributors
24 1.1 mrg * may be used to endorse or promote products derived from this software
25 1.1 mrg * without specific prior written permission.
26 1.1 mrg *
27 1.1 mrg * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND
28 1.1 mrg * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
29 1.1 mrg * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
30 1.1 mrg * ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE
31 1.1 mrg * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
32 1.1 mrg * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
33 1.1 mrg * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
34 1.1 mrg * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
35 1.1 mrg * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
36 1.1 mrg * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
37 1.1 mrg * SUCH DAMAGE.
38 1.1 mrg *
39 1.1 mrg * @(#)vnode_pager.c 8.8 (Berkeley) 2/13/94
40 1.3 mrg * from: Id: uvm_vnode.c,v 1.1.2.26 1998/02/02 20:38:07 chuck Exp
41 1.1 mrg */
42 1.1 mrg
43 1.55 lukem /*
44 1.55 lukem * uvm_vnode.c: the vnode pager.
45 1.55 lukem */
46 1.55 lukem
47 1.55 lukem #include <sys/cdefs.h>
48 1.113 ad __KERNEL_RCSID(0, "$NetBSD: uvm_vnode.c,v 1.113 2020/05/19 22:22:15 ad Exp $");
49 1.55 lukem
50 1.100 pooka #ifdef _KERNEL_OPT
51 1.4 mrg #include "opt_uvmhist.h"
52 1.100 pooka #endif
53 1.1 mrg
54 1.110 ad #include <sys/atomic.h>
55 1.1 mrg #include <sys/param.h>
56 1.1 mrg #include <sys/systm.h>
57 1.37 chs #include <sys/kernel.h>
58 1.1 mrg #include <sys/vnode.h>
59 1.13 thorpej #include <sys/disklabel.h>
60 1.13 thorpej #include <sys/ioctl.h>
61 1.13 thorpej #include <sys/fcntl.h>
62 1.13 thorpej #include <sys/conf.h>
63 1.37 chs #include <sys/pool.h>
64 1.37 chs #include <sys/mount.h>
65 1.13 thorpej
66 1.13 thorpej #include <miscfs/specfs/specdev.h>
67 1.1 mrg
68 1.1 mrg #include <uvm/uvm.h>
69 1.68 yamt #include <uvm/uvm_readahead.h>
70 1.105 ad #include <uvm/uvm_page_array.h>
71 1.1 mrg
72 1.99 matt #ifdef UVMHIST
73 1.99 matt UVMHIST_DEFINE(ubchist);
74 1.99 matt #endif
75 1.99 matt
76 1.1 mrg /*
77 1.1 mrg * functions
78 1.1 mrg */
79 1.1 mrg
80 1.106 ad static void uvn_alloc_ractx(struct uvm_object *);
81 1.66 thorpej static void uvn_detach(struct uvm_object *);
82 1.66 thorpej static int uvn_get(struct uvm_object *, voff_t, struct vm_page **, int *,
83 1.66 thorpej int, vm_prot_t, int, int);
84 1.110 ad static void uvn_markdirty(struct uvm_object *);
85 1.66 thorpej static int uvn_put(struct uvm_object *, voff_t, voff_t, int);
86 1.66 thorpej static void uvn_reference(struct uvm_object *);
87 1.52 chs
88 1.66 thorpej static int uvn_findpage(struct uvm_object *, voff_t, struct vm_page **,
89 1.105 ad unsigned int, struct uvm_page_array *a,
90 1.105 ad unsigned int);
91 1.1 mrg
92 1.1 mrg /*
93 1.1 mrg * master pager structure
94 1.1 mrg */
95 1.1 mrg
96 1.89 yamt const struct uvm_pagerops uvm_vnodeops = {
97 1.88 yamt .pgo_reference = uvn_reference,
98 1.88 yamt .pgo_detach = uvn_detach,
99 1.88 yamt .pgo_get = uvn_get,
100 1.88 yamt .pgo_put = uvn_put,
101 1.110 ad .pgo_markdirty = uvn_markdirty,
102 1.1 mrg };
103 1.1 mrg
104 1.1 mrg /*
105 1.1 mrg * the ops!
106 1.1 mrg */
107 1.1 mrg
108 1.1 mrg /*
109 1.1 mrg * uvn_reference
110 1.1 mrg *
111 1.1 mrg * duplicate a reference to a VM object. Note that the reference
112 1.49 chs * count must already be at least one (the passed in reference) so
113 1.1 mrg * there is no chance of the uvn being killed or locked out here.
114 1.1 mrg *
115 1.49 chs * => caller must call with object unlocked.
116 1.1 mrg * => caller must be using the same accessprot as was used at attach time
117 1.1 mrg */
118 1.1 mrg
119 1.66 thorpej static void
120 1.65 thorpej uvn_reference(struct uvm_object *uobj)
121 1.1 mrg {
122 1.93 pooka vref((struct vnode *)uobj);
123 1.1 mrg }
124 1.1 mrg
125 1.52 chs
126 1.1 mrg /*
127 1.1 mrg * uvn_detach
128 1.1 mrg *
129 1.1 mrg * remove a reference to a VM object.
130 1.1 mrg *
131 1.1 mrg * => caller must call with object unlocked and map locked.
132 1.1 mrg */
133 1.52 chs
134 1.66 thorpej static void
135 1.65 thorpej uvn_detach(struct uvm_object *uobj)
136 1.8 mrg {
137 1.37 chs vrele((struct vnode *)uobj);
138 1.1 mrg }
139 1.1 mrg
140 1.1 mrg /*
141 1.1 mrg * uvn_put: flush page data to backing store.
142 1.1 mrg *
143 1.53 sommerfe * => object must be locked on entry! VOP_PUTPAGES must unlock it.
144 1.1 mrg * => flags: PGO_SYNCIO -- use sync. I/O
145 1.1 mrg */
146 1.1 mrg
147 1.66 thorpej static int
148 1.65 thorpej uvn_put(struct uvm_object *uobj, voff_t offlo, voff_t offhi, int flags)
149 1.1 mrg {
150 1.37 chs struct vnode *vp = (struct vnode *)uobj;
151 1.37 chs int error;
152 1.1 mrg
153 1.106 ad KASSERT(rw_write_held(uobj->vmobjlock));
154 1.54 chs error = VOP_PUTPAGES(vp, offlo, offhi, flags);
155 1.90 ad
156 1.48 chs return error;
157 1.1 mrg }
158 1.1 mrg
159 1.1 mrg /*
160 1.1 mrg * uvn_get: get pages (synchronously) from backing store
161 1.1 mrg *
162 1.1 mrg * => prefer map unlocked (not required)
163 1.1 mrg * => object must be locked! we will _unlock_ it before starting any I/O.
164 1.113 ad * => flags: PGO_LOCKED: fault data structures are locked
165 1.1 mrg * => NOTE: offset is the offset of pps[0], _NOT_ pps[centeridx]
166 1.1 mrg * => NOTE: caller must check for released pages!!
167 1.1 mrg */
168 1.49 chs
169 1.66 thorpej static int
170 1.65 thorpej uvn_get(struct uvm_object *uobj, voff_t offset,
171 1.65 thorpej struct vm_page **pps /* IN/OUT */,
172 1.65 thorpej int *npagesp /* IN (OUT if PGO_LOCKED)*/,
173 1.65 thorpej int centeridx, vm_prot_t access_type, int advice, int flags)
174 1.8 mrg {
175 1.37 chs struct vnode *vp = (struct vnode *)uobj;
176 1.37 chs int error;
177 1.67 yamt
178 1.37 chs UVMHIST_FUNC("uvn_get"); UVMHIST_CALLED(ubchist);
179 1.37 chs
180 1.103 pgoyette UVMHIST_LOG(ubchist, "vp %#jx off 0x%jx", (uintptr_t)vp, (int)offset,
181 1.103 pgoyette 0, 0);
182 1.68 yamt
183 1.98 martin if (vp->v_type == VREG && (access_type & VM_PROT_WRITE) == 0
184 1.112 ad && (flags & PGO_LOCKED) == 0 && vp->v_tag != VT_TMPFS) {
185 1.106 ad uvn_alloc_ractx(uobj);
186 1.68 yamt uvm_ra_request(vp->v_ractx, advice, uobj, offset,
187 1.68 yamt *npagesp << PAGE_SHIFT);
188 1.68 yamt }
189 1.68 yamt
190 1.37 chs error = VOP_GETPAGES(vp, offset, pps, npagesp, centeridx,
191 1.37 chs access_type, advice, flags);
192 1.67 yamt
193 1.106 ad KASSERT(((flags & PGO_LOCKED) != 0 && rw_lock_held(uobj->vmobjlock)) ||
194 1.90 ad (flags & PGO_LOCKED) == 0);
195 1.48 chs return error;
196 1.37 chs }
197 1.8 mrg
198 1.110 ad /*
199 1.110 ad * uvn_markdirty: called when the object gains first dirty page
200 1.110 ad *
201 1.110 ad * => uobj must be write locked.
202 1.110 ad */
203 1.110 ad
204 1.110 ad static void
205 1.110 ad uvn_markdirty(struct uvm_object *uobj)
206 1.110 ad {
207 1.110 ad struct vnode *vp = (struct vnode *)uobj;
208 1.110 ad
209 1.110 ad KASSERT(rw_write_held(uobj->vmobjlock));
210 1.110 ad
211 1.110 ad mutex_enter(vp->v_interlock);
212 1.110 ad if ((vp->v_iflag & VI_ONWORKLST) == 0) {
213 1.110 ad vn_syncer_add_to_worklist(vp, filedelay);
214 1.110 ad }
215 1.110 ad mutex_exit(vp->v_interlock);
216 1.110 ad }
217 1.8 mrg
218 1.37 chs /*
219 1.37 chs * uvn_findpages:
220 1.37 chs * return the page for the uobj and offset requested, allocating if needed.
221 1.37 chs * => uobj must be locked.
222 1.52 chs * => returned pages will be BUSY.
223 1.37 chs */
224 1.1 mrg
225 1.58 enami int
226 1.105 ad uvn_findpages(struct uvm_object *uobj, voff_t offset, unsigned int *npagesp,
227 1.105 ad struct vm_page **pgs, struct uvm_page_array *a, unsigned int flags)
228 1.37 chs {
229 1.105 ad unsigned int count, found, npages;
230 1.105 ad int i, rv;
231 1.105 ad struct uvm_page_array a_store;
232 1.105 ad
233 1.105 ad if (a == NULL) {
234 1.105 ad a = &a_store;
235 1.105 ad uvm_page_array_init(a);
236 1.105 ad }
237 1.58 enami count = found = 0;
238 1.37 chs npages = *npagesp;
239 1.52 chs if (flags & UFP_BACKWARD) {
240 1.52 chs for (i = npages - 1; i >= 0; i--, offset -= PAGE_SIZE) {
241 1.105 ad rv = uvn_findpage(uobj, offset, &pgs[i], flags, a,
242 1.105 ad i + 1);
243 1.58 enami if (rv == 0) {
244 1.58 enami if (flags & UFP_DIRTYONLY)
245 1.58 enami break;
246 1.58 enami } else
247 1.58 enami found++;
248 1.52 chs count++;
249 1.52 chs }
250 1.52 chs } else {
251 1.52 chs for (i = 0; i < npages; i++, offset += PAGE_SIZE) {
252 1.105 ad rv = uvn_findpage(uobj, offset, &pgs[i], flags, a,
253 1.105 ad npages - i);
254 1.58 enami if (rv == 0) {
255 1.58 enami if (flags & UFP_DIRTYONLY)
256 1.58 enami break;
257 1.58 enami } else
258 1.58 enami found++;
259 1.52 chs count++;
260 1.52 chs }
261 1.37 chs }
262 1.105 ad if (a == &a_store) {
263 1.105 ad uvm_page_array_fini(a);
264 1.105 ad }
265 1.52 chs *npagesp = count;
266 1.58 enami return (found);
267 1.37 chs }
268 1.8 mrg
269 1.105 ad /*
270 1.105 ad * uvn_findpage: find a single page
271 1.105 ad *
272 1.105 ad * if a suitable page was found, put it in *pgp and return 1.
273 1.105 ad * otherwise return 0.
274 1.105 ad */
275 1.105 ad
276 1.66 thorpej static int
277 1.65 thorpej uvn_findpage(struct uvm_object *uobj, voff_t offset, struct vm_page **pgp,
278 1.105 ad unsigned int flags, struct uvm_page_array *a, unsigned int nleft)
279 1.37 chs {
280 1.37 chs struct vm_page *pg;
281 1.105 ad const unsigned int fillflags =
282 1.105 ad ((flags & UFP_BACKWARD) ? UVM_PAGE_ARRAY_FILL_BACKWARD : 0) |
283 1.105 ad ((flags & UFP_DIRTYONLY) ?
284 1.105 ad (UVM_PAGE_ARRAY_FILL_DIRTY|UVM_PAGE_ARRAY_FILL_DENSE) : 0);
285 1.37 chs UVMHIST_FUNC("uvn_findpage"); UVMHIST_CALLED(ubchist);
286 1.103 pgoyette UVMHIST_LOG(ubchist, "vp %#jx off 0x%jx", (uintptr_t)uobj, offset,
287 1.103 pgoyette 0, 0);
288 1.8 mrg
289 1.111 ad /*
290 1.111 ad * NOBUSY must come with NOWAIT and NOALLOC. if NOBUSY is
291 1.111 ad * specified, this may be called with a reader lock.
292 1.111 ad */
293 1.111 ad
294 1.111 ad KASSERT(rw_lock_held(uobj->vmobjlock));
295 1.111 ad KASSERT((flags & UFP_NOBUSY) == 0 || (flags & UFP_NOWAIT) != 0);
296 1.111 ad KASSERT((flags & UFP_NOBUSY) == 0 || (flags & UFP_NOALLOC) != 0);
297 1.111 ad KASSERT((flags & UFP_NOBUSY) != 0 || rw_write_held(uobj->vmobjlock));
298 1.96 rmind
299 1.37 chs if (*pgp != NULL) {
300 1.37 chs UVMHIST_LOG(ubchist, "dontcare", 0,0,0,0);
301 1.105 ad goto skip_offset;
302 1.37 chs }
303 1.37 chs for (;;) {
304 1.105 ad /*
305 1.105 ad * look for an existing page.
306 1.105 ad *
307 1.105 ad * XXX fragile API
308 1.105 ad * note that the array can be the one supplied by the caller of
309 1.105 ad * uvn_findpages. in that case, fillflags used by the caller
310 1.105 ad * might not match strictly with ours.
311 1.105 ad * in particular, the caller might have filled the array
312 1.105 ad * without DENSE but passed us UFP_DIRTYONLY (thus DENSE).
313 1.105 ad */
314 1.105 ad pg = uvm_page_array_fill_and_peek(a, uobj, offset, nleft,
315 1.105 ad fillflags);
316 1.105 ad if (pg != NULL && pg->offset != offset) {
317 1.105 ad KASSERT(
318 1.105 ad ((fillflags & UVM_PAGE_ARRAY_FILL_BACKWARD) != 0)
319 1.105 ad == (pg->offset < offset));
320 1.105 ad KASSERT(uvm_pagelookup(uobj, offset) == NULL
321 1.105 ad || ((fillflags & UVM_PAGE_ARRAY_FILL_DIRTY) != 0 &&
322 1.105 ad radix_tree_get_tag(&uobj->uo_pages,
323 1.105 ad offset >> PAGE_SHIFT, UVM_PAGE_DIRTY_TAG) == 0));
324 1.105 ad pg = NULL;
325 1.105 ad if ((fillflags & UVM_PAGE_ARRAY_FILL_DENSE) != 0) {
326 1.105 ad UVMHIST_LOG(ubchist, "dense", 0,0,0,0);
327 1.105 ad return 0;
328 1.105 ad }
329 1.105 ad }
330 1.37 chs
331 1.52 chs /* nope? allocate one now */
332 1.37 chs if (pg == NULL) {
333 1.37 chs if (flags & UFP_NOALLOC) {
334 1.37 chs UVMHIST_LOG(ubchist, "noalloc", 0,0,0,0);
335 1.37 chs return 0;
336 1.37 chs }
337 1.97 matt pg = uvm_pagealloc(uobj, offset, NULL,
338 1.97 matt UVM_FLAG_COLORMATCH);
339 1.37 chs if (pg == NULL) {
340 1.37 chs if (flags & UFP_NOWAIT) {
341 1.37 chs UVMHIST_LOG(ubchist, "nowait",0,0,0,0);
342 1.37 chs return 0;
343 1.8 mrg }
344 1.106 ad rw_exit(uobj->vmobjlock);
345 1.108 rjs uvm_wait("uvnfp1");
346 1.105 ad uvm_page_array_clear(a);
347 1.106 ad rw_enter(uobj->vmobjlock, RW_WRITER);
348 1.37 chs continue;
349 1.47 chs }
350 1.103 pgoyette UVMHIST_LOG(ubchist, "alloced %#jx (color %ju)",
351 1.104 ad (uintptr_t)pg, VM_PGCOLOR(pg), 0, 0);
352 1.105 ad KASSERTMSG(uvm_pagegetdirty(pg) ==
353 1.105 ad UVM_PAGE_STATUS_CLEAN, "page %p not clean", pg);
354 1.37 chs break;
355 1.37 chs } else if (flags & UFP_NOCACHE) {
356 1.37 chs UVMHIST_LOG(ubchist, "nocache",0,0,0,0);
357 1.105 ad goto skip;
358 1.8 mrg }
359 1.8 mrg
360 1.37 chs /* page is there, see if we need to wait on it */
361 1.52 chs if ((pg->flags & PG_BUSY) != 0) {
362 1.37 chs if (flags & UFP_NOWAIT) {
363 1.37 chs UVMHIST_LOG(ubchist, "nowait",0,0,0,0);
364 1.105 ad goto skip;
365 1.37 chs }
366 1.103 pgoyette UVMHIST_LOG(ubchist, "wait %#jx (color %ju)",
367 1.104 ad (uintptr_t)pg, VM_PGCOLOR(pg), 0, 0);
368 1.109 ad uvm_pagewait(pg, uobj->vmobjlock, "uvnfp2");
369 1.105 ad uvm_page_array_clear(a);
370 1.106 ad rw_enter(uobj->vmobjlock, RW_WRITER);
371 1.37 chs continue;
372 1.8 mrg }
373 1.49 chs
374 1.37 chs /* skip PG_RDONLY pages if requested */
375 1.37 chs if ((flags & UFP_NORDONLY) && (pg->flags & PG_RDONLY)) {
376 1.37 chs UVMHIST_LOG(ubchist, "nordonly",0,0,0,0);
377 1.105 ad goto skip;
378 1.8 mrg }
379 1.8 mrg
380 1.52 chs /* stop on clean pages if requested */
381 1.52 chs if (flags & UFP_DIRTYONLY) {
382 1.105 ad const bool dirty = uvm_pagecheckdirty(pg, false);
383 1.52 chs if (!dirty) {
384 1.58 enami UVMHIST_LOG(ubchist, "dirtonly", 0,0,0,0);
385 1.52 chs return 0;
386 1.52 chs }
387 1.52 chs }
388 1.52 chs
389 1.37 chs /* mark the page BUSY and we're done. */
390 1.111 ad if ((flags & UFP_NOBUSY) == 0) {
391 1.111 ad pg->flags |= PG_BUSY;
392 1.111 ad UVM_PAGE_OWN(pg, "uvn_findpage");
393 1.111 ad }
394 1.103 pgoyette UVMHIST_LOG(ubchist, "found %#jx (color %ju)",
395 1.104 ad (uintptr_t)pg, VM_PGCOLOR(pg), 0, 0);
396 1.105 ad uvm_page_array_advance(a);
397 1.37 chs break;
398 1.8 mrg }
399 1.37 chs *pgp = pg;
400 1.37 chs return 1;
401 1.105 ad
402 1.105 ad skip_offset:
403 1.105 ad /*
404 1.105 ad * skip this offset
405 1.105 ad */
406 1.105 ad pg = uvm_page_array_peek(a);
407 1.105 ad if (pg != NULL) {
408 1.105 ad if (pg->offset == offset) {
409 1.105 ad uvm_page_array_advance(a);
410 1.105 ad } else {
411 1.105 ad KASSERT((fillflags & UVM_PAGE_ARRAY_FILL_DENSE) == 0);
412 1.105 ad }
413 1.105 ad }
414 1.105 ad return 0;
415 1.105 ad
416 1.105 ad skip:
417 1.105 ad /*
418 1.105 ad * skip this page
419 1.105 ad */
420 1.105 ad KASSERT(pg != NULL);
421 1.105 ad uvm_page_array_advance(a);
422 1.105 ad return 0;
423 1.1 mrg }
424 1.1 mrg
425 1.1 mrg /*
426 1.52 chs * uvm_vnp_setsize: grow or shrink a vnode uobj
427 1.1 mrg *
428 1.1 mrg * grow => just update size value
429 1.1 mrg * shrink => toss un-needed pages
430 1.1 mrg *
431 1.49 chs * => we assume that the caller has a reference of some sort to the
432 1.1 mrg * vnode in question so that it will not be yanked out from under
433 1.1 mrg * us.
434 1.1 mrg */
435 1.1 mrg
436 1.8 mrg void
437 1.65 thorpej uvm_vnp_setsize(struct vnode *vp, voff_t newsize)
438 1.8 mrg {
439 1.52 chs struct uvm_object *uobj = &vp->v_uobj;
440 1.46 enami voff_t pgend = round_page(newsize);
441 1.72 yamt voff_t oldsize;
442 1.37 chs UVMHIST_FUNC("uvm_vnp_setsize"); UVMHIST_CALLED(ubchist);
443 1.37 chs
444 1.106 ad rw_enter(uobj->vmobjlock, RW_WRITER);
445 1.103 pgoyette UVMHIST_LOG(ubchist, "vp %#jx old 0x%jx new 0x%jx",
446 1.103 pgoyette (uintptr_t)vp, vp->v_size, newsize, 0);
447 1.1 mrg
448 1.8 mrg /*
449 1.37 chs * now check if the size has changed: if we shrink we had better
450 1.37 chs * toss some pages...
451 1.8 mrg */
452 1.1 mrg
453 1.101 mlelstv KASSERT(newsize != VSIZENOTSET && newsize >= 0);
454 1.85 pooka KASSERT(vp->v_size <= vp->v_writesize);
455 1.85 pooka KASSERT(vp->v_size == vp->v_writesize ||
456 1.85 pooka newsize == vp->v_writesize || newsize <= vp->v_size);
457 1.85 pooka
458 1.85 pooka oldsize = vp->v_writesize;
459 1.85 pooka
460 1.101 mlelstv /*
461 1.102 wiz * check whether size shrinks
462 1.101 mlelstv * if old size hasn't been set, there are no pages to drop
463 1.101 mlelstv * if there was an integer overflow in pgend, then this is no shrink
464 1.101 mlelstv */
465 1.101 mlelstv if (oldsize > pgend && oldsize != VSIZENOTSET && pgend >= 0) {
466 1.57 chs (void) uvn_put(uobj, pgend, 0, PGO_FREE | PGO_SYNCIO);
467 1.106 ad rw_enter(uobj->vmobjlock, RW_WRITER);
468 1.8 mrg }
469 1.106 ad mutex_enter(vp->v_interlock);
470 1.82 yamt vp->v_size = vp->v_writesize = newsize;
471 1.106 ad mutex_exit(vp->v_interlock);
472 1.106 ad rw_exit(uobj->vmobjlock);
473 1.1 mrg }
474 1.1 mrg
475 1.82 yamt void
476 1.82 yamt uvm_vnp_setwritesize(struct vnode *vp, voff_t newsize)
477 1.82 yamt {
478 1.82 yamt
479 1.106 ad rw_enter(vp->v_uobj.vmobjlock, RW_WRITER);
480 1.101 mlelstv KASSERT(newsize != VSIZENOTSET && newsize >= 0);
481 1.82 yamt KASSERT(vp->v_size != VSIZENOTSET);
482 1.82 yamt KASSERT(vp->v_writesize != VSIZENOTSET);
483 1.82 yamt KASSERT(vp->v_size <= vp->v_writesize);
484 1.82 yamt KASSERT(vp->v_size <= newsize);
485 1.106 ad mutex_enter(vp->v_interlock);
486 1.82 yamt vp->v_writesize = newsize;
487 1.96 rmind mutex_exit(vp->v_interlock);
488 1.106 ad rw_exit(vp->v_uobj.vmobjlock);
489 1.82 yamt }
490 1.82 yamt
491 1.79 thorpej bool
492 1.75 yamt uvn_text_p(struct uvm_object *uobj)
493 1.75 yamt {
494 1.75 yamt struct vnode *vp = (struct vnode *)uobj;
495 1.110 ad int iflag;
496 1.75 yamt
497 1.107 ad /*
498 1.107 ad * v_interlock is not held here, but VI_EXECMAP is only ever changed
499 1.107 ad * with the vmobjlock held too.
500 1.107 ad */
501 1.110 ad iflag = atomic_load_relaxed(&vp->v_iflag);
502 1.110 ad return (iflag & VI_EXECMAP) != 0;
503 1.75 yamt }
504 1.75 yamt
505 1.79 thorpej bool
506 1.75 yamt uvn_clean_p(struct uvm_object *uobj)
507 1.75 yamt {
508 1.75 yamt
509 1.107 ad return radix_tree_empty_tagged_tree_p(&uobj->uo_pages,
510 1.107 ad UVM_PAGE_DIRTY_TAG);
511 1.75 yamt }
512 1.75 yamt
513 1.106 ad static void
514 1.106 ad uvn_alloc_ractx(struct uvm_object *uobj)
515 1.106 ad {
516 1.106 ad struct vnode *vp = (struct vnode *)uobj;
517 1.106 ad struct uvm_ractx *ra = NULL;
518 1.106 ad
519 1.106 ad KASSERT(rw_write_held(uobj->vmobjlock));
520 1.106 ad
521 1.106 ad if (vp->v_type != VREG) {
522 1.106 ad return;
523 1.106 ad }
524 1.106 ad if (vp->v_ractx != NULL) {
525 1.106 ad return;
526 1.106 ad }
527 1.106 ad if (vp->v_ractx == NULL) {
528 1.106 ad rw_exit(uobj->vmobjlock);
529 1.106 ad ra = uvm_ra_allocctx();
530 1.106 ad rw_enter(uobj->vmobjlock, RW_WRITER);
531 1.106 ad if (ra != NULL && vp->v_ractx == NULL) {
532 1.106 ad vp->v_ractx = ra;
533 1.106 ad ra = NULL;
534 1.106 ad }
535 1.106 ad }
536 1.106 ad if (ra != NULL) {
537 1.106 ad uvm_ra_freectx(ra);
538 1.106 ad }
539 1.106 ad }
540