uvm_vnode.c revision 1.109 1 /* $NetBSD: uvm_vnode.c,v 1.109 2020/03/14 20:23:51 ad Exp $ */
2
3 /*
4 * Copyright (c) 1997 Charles D. Cranor and Washington University.
5 * Copyright (c) 1991, 1993
6 * The Regents of the University of California.
7 * Copyright (c) 1990 University of Utah.
8 *
9 * All rights reserved.
10 *
11 * This code is derived from software contributed to Berkeley by
12 * the Systems Programming Group of the University of Utah Computer
13 * Science Department.
14 *
15 * Redistribution and use in source and binary forms, with or without
16 * modification, are permitted provided that the following conditions
17 * are met:
18 * 1. Redistributions of source code must retain the above copyright
19 * notice, this list of conditions and the following disclaimer.
20 * 2. Redistributions in binary form must reproduce the above copyright
21 * notice, this list of conditions and the following disclaimer in the
22 * documentation and/or other materials provided with the distribution.
23 * 3. Neither the name of the University nor the names of its contributors
24 * may be used to endorse or promote products derived from this software
25 * without specific prior written permission.
26 *
27 * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND
28 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
29 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
30 * ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE
31 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
32 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
33 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
34 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
35 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
36 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
37 * SUCH DAMAGE.
38 *
39 * @(#)vnode_pager.c 8.8 (Berkeley) 2/13/94
40 * from: Id: uvm_vnode.c,v 1.1.2.26 1998/02/02 20:38:07 chuck Exp
41 */
42
43 /*
44 * uvm_vnode.c: the vnode pager.
45 */
46
47 #include <sys/cdefs.h>
48 __KERNEL_RCSID(0, "$NetBSD: uvm_vnode.c,v 1.109 2020/03/14 20:23:51 ad Exp $");
49
50 #ifdef _KERNEL_OPT
51 #include "opt_uvmhist.h"
52 #endif
53
54 #include <sys/param.h>
55 #include <sys/systm.h>
56 #include <sys/kernel.h>
57 #include <sys/vnode.h>
58 #include <sys/disklabel.h>
59 #include <sys/ioctl.h>
60 #include <sys/fcntl.h>
61 #include <sys/conf.h>
62 #include <sys/pool.h>
63 #include <sys/mount.h>
64
65 #include <miscfs/specfs/specdev.h>
66
67 #include <uvm/uvm.h>
68 #include <uvm/uvm_readahead.h>
69 #include <uvm/uvm_page_array.h>
70
71 #ifdef UVMHIST
72 UVMHIST_DEFINE(ubchist);
73 #endif
74
75 /*
76 * functions
77 */
78
79 static void uvn_alloc_ractx(struct uvm_object *);
80 static void uvn_detach(struct uvm_object *);
81 static int uvn_get(struct uvm_object *, voff_t, struct vm_page **, int *,
82 int, vm_prot_t, int, int);
83 static int uvn_put(struct uvm_object *, voff_t, voff_t, int);
84 static void uvn_reference(struct uvm_object *);
85
86 static int uvn_findpage(struct uvm_object *, voff_t, struct vm_page **,
87 unsigned int, struct uvm_page_array *a,
88 unsigned int);
89
90 /*
91 * master pager structure
92 */
93
94 const struct uvm_pagerops uvm_vnodeops = {
95 .pgo_reference = uvn_reference,
96 .pgo_detach = uvn_detach,
97 .pgo_get = uvn_get,
98 .pgo_put = uvn_put,
99 };
100
101 /*
102 * the ops!
103 */
104
105 /*
106 * uvn_reference
107 *
108 * duplicate a reference to a VM object. Note that the reference
109 * count must already be at least one (the passed in reference) so
110 * there is no chance of the uvn being killed or locked out here.
111 *
112 * => caller must call with object unlocked.
113 * => caller must be using the same accessprot as was used at attach time
114 */
115
116 static void
117 uvn_reference(struct uvm_object *uobj)
118 {
119 vref((struct vnode *)uobj);
120 }
121
122
123 /*
124 * uvn_detach
125 *
126 * remove a reference to a VM object.
127 *
128 * => caller must call with object unlocked and map locked.
129 */
130
131 static void
132 uvn_detach(struct uvm_object *uobj)
133 {
134 vrele((struct vnode *)uobj);
135 }
136
137 /*
138 * uvn_put: flush page data to backing store.
139 *
140 * => object must be locked on entry! VOP_PUTPAGES must unlock it.
141 * => flags: PGO_SYNCIO -- use sync. I/O
142 */
143
144 static int
145 uvn_put(struct uvm_object *uobj, voff_t offlo, voff_t offhi, int flags)
146 {
147 struct vnode *vp = (struct vnode *)uobj;
148 int error;
149
150 KASSERT(rw_write_held(uobj->vmobjlock));
151 error = VOP_PUTPAGES(vp, offlo, offhi, flags);
152
153 return error;
154 }
155
156
157 /*
158 * uvn_get: get pages (synchronously) from backing store
159 *
160 * => prefer map unlocked (not required)
161 * => object must be locked! we will _unlock_ it before starting any I/O.
162 * => flags: PGO_ALLPAGES: get all of the pages
163 * PGO_LOCKED: fault data structures are locked
164 * => NOTE: offset is the offset of pps[0], _NOT_ pps[centeridx]
165 * => NOTE: caller must check for released pages!!
166 */
167
168 static int
169 uvn_get(struct uvm_object *uobj, voff_t offset,
170 struct vm_page **pps /* IN/OUT */,
171 int *npagesp /* IN (OUT if PGO_LOCKED)*/,
172 int centeridx, vm_prot_t access_type, int advice, int flags)
173 {
174 struct vnode *vp = (struct vnode *)uobj;
175 int error;
176
177 UVMHIST_FUNC("uvn_get"); UVMHIST_CALLED(ubchist);
178
179 UVMHIST_LOG(ubchist, "vp %#jx off 0x%jx", (uintptr_t)vp, (int)offset,
180 0, 0);
181
182 if (vp->v_type == VREG && (access_type & VM_PROT_WRITE) == 0
183 && (flags & PGO_LOCKED) == 0) {
184 uvn_alloc_ractx(uobj);
185 uvm_ra_request(vp->v_ractx, advice, uobj, offset,
186 *npagesp << PAGE_SHIFT);
187 }
188
189 error = VOP_GETPAGES(vp, offset, pps, npagesp, centeridx,
190 access_type, advice, flags);
191
192 KASSERT(((flags & PGO_LOCKED) != 0 && rw_lock_held(uobj->vmobjlock)) ||
193 (flags & PGO_LOCKED) == 0);
194 return error;
195 }
196
197
198 /*
199 * uvn_findpages:
200 * return the page for the uobj and offset requested, allocating if needed.
201 * => uobj must be locked.
202 * => returned pages will be BUSY.
203 */
204
205 int
206 uvn_findpages(struct uvm_object *uobj, voff_t offset, unsigned int *npagesp,
207 struct vm_page **pgs, struct uvm_page_array *a, unsigned int flags)
208 {
209 unsigned int count, found, npages;
210 int i, rv;
211 struct uvm_page_array a_store;
212
213 if (a == NULL) {
214 a = &a_store;
215 uvm_page_array_init(a);
216 }
217 count = found = 0;
218 npages = *npagesp;
219 if (flags & UFP_BACKWARD) {
220 for (i = npages - 1; i >= 0; i--, offset -= PAGE_SIZE) {
221 rv = uvn_findpage(uobj, offset, &pgs[i], flags, a,
222 i + 1);
223 if (rv == 0) {
224 if (flags & UFP_DIRTYONLY)
225 break;
226 } else
227 found++;
228 count++;
229 }
230 } else {
231 for (i = 0; i < npages; i++, offset += PAGE_SIZE) {
232 rv = uvn_findpage(uobj, offset, &pgs[i], flags, a,
233 npages - i);
234 if (rv == 0) {
235 if (flags & UFP_DIRTYONLY)
236 break;
237 } else
238 found++;
239 count++;
240 }
241 }
242 if (a == &a_store) {
243 uvm_page_array_fini(a);
244 }
245 *npagesp = count;
246 return (found);
247 }
248
249 /*
250 * uvn_findpage: find a single page
251 *
252 * if a suitable page was found, put it in *pgp and return 1.
253 * otherwise return 0.
254 */
255
256 static int
257 uvn_findpage(struct uvm_object *uobj, voff_t offset, struct vm_page **pgp,
258 unsigned int flags, struct uvm_page_array *a, unsigned int nleft)
259 {
260 struct vm_page *pg;
261 const unsigned int fillflags =
262 ((flags & UFP_BACKWARD) ? UVM_PAGE_ARRAY_FILL_BACKWARD : 0) |
263 ((flags & UFP_DIRTYONLY) ?
264 (UVM_PAGE_ARRAY_FILL_DIRTY|UVM_PAGE_ARRAY_FILL_DENSE) : 0);
265 UVMHIST_FUNC("uvn_findpage"); UVMHIST_CALLED(ubchist);
266 UVMHIST_LOG(ubchist, "vp %#jx off 0x%jx", (uintptr_t)uobj, offset,
267 0, 0);
268
269 KASSERT(rw_write_held(uobj->vmobjlock));
270
271 if (*pgp != NULL) {
272 UVMHIST_LOG(ubchist, "dontcare", 0,0,0,0);
273 goto skip_offset;
274 }
275 for (;;) {
276 /*
277 * look for an existing page.
278 *
279 * XXX fragile API
280 * note that the array can be the one supplied by the caller of
281 * uvn_findpages. in that case, fillflags used by the caller
282 * might not match strictly with ours.
283 * in particular, the caller might have filled the array
284 * without DENSE but passed us UFP_DIRTYONLY (thus DENSE).
285 */
286 pg = uvm_page_array_fill_and_peek(a, uobj, offset, nleft,
287 fillflags);
288 if (pg != NULL && pg->offset != offset) {
289 KASSERT(
290 ((fillflags & UVM_PAGE_ARRAY_FILL_BACKWARD) != 0)
291 == (pg->offset < offset));
292 KASSERT(uvm_pagelookup(uobj, offset) == NULL
293 || ((fillflags & UVM_PAGE_ARRAY_FILL_DIRTY) != 0 &&
294 radix_tree_get_tag(&uobj->uo_pages,
295 offset >> PAGE_SHIFT, UVM_PAGE_DIRTY_TAG) == 0));
296 pg = NULL;
297 if ((fillflags & UVM_PAGE_ARRAY_FILL_DENSE) != 0) {
298 UVMHIST_LOG(ubchist, "dense", 0,0,0,0);
299 return 0;
300 }
301 }
302
303 /* nope? allocate one now */
304 if (pg == NULL) {
305 if (flags & UFP_NOALLOC) {
306 UVMHIST_LOG(ubchist, "noalloc", 0,0,0,0);
307 return 0;
308 }
309 pg = uvm_pagealloc(uobj, offset, NULL,
310 UVM_FLAG_COLORMATCH);
311 if (pg == NULL) {
312 if (flags & UFP_NOWAIT) {
313 UVMHIST_LOG(ubchist, "nowait",0,0,0,0);
314 return 0;
315 }
316 rw_exit(uobj->vmobjlock);
317 uvm_wait("uvnfp1");
318 uvm_page_array_clear(a);
319 rw_enter(uobj->vmobjlock, RW_WRITER);
320 continue;
321 }
322 UVMHIST_LOG(ubchist, "alloced %#jx (color %ju)",
323 (uintptr_t)pg, VM_PGCOLOR(pg), 0, 0);
324 KASSERTMSG(uvm_pagegetdirty(pg) ==
325 UVM_PAGE_STATUS_CLEAN, "page %p not clean", pg);
326 break;
327 } else if (flags & UFP_NOCACHE) {
328 UVMHIST_LOG(ubchist, "nocache",0,0,0,0);
329 goto skip;
330 }
331
332 /* page is there, see if we need to wait on it */
333 if ((pg->flags & PG_BUSY) != 0) {
334 if (flags & UFP_NOWAIT) {
335 UVMHIST_LOG(ubchist, "nowait",0,0,0,0);
336 goto skip;
337 }
338 UVMHIST_LOG(ubchist, "wait %#jx (color %ju)",
339 (uintptr_t)pg, VM_PGCOLOR(pg), 0, 0);
340 uvm_pagewait(pg, uobj->vmobjlock, "uvnfp2");
341 uvm_page_array_clear(a);
342 rw_enter(uobj->vmobjlock, RW_WRITER);
343 continue;
344 }
345
346 /* skip PG_RDONLY pages if requested */
347 if ((flags & UFP_NORDONLY) && (pg->flags & PG_RDONLY)) {
348 UVMHIST_LOG(ubchist, "nordonly",0,0,0,0);
349 goto skip;
350 }
351
352 /* stop on clean pages if requested */
353 if (flags & UFP_DIRTYONLY) {
354 const bool dirty = uvm_pagecheckdirty(pg, false);
355 if (!dirty) {
356 UVMHIST_LOG(ubchist, "dirtonly", 0,0,0,0);
357 return 0;
358 }
359 }
360
361 /* mark the page BUSY and we're done. */
362 pg->flags |= PG_BUSY;
363 UVM_PAGE_OWN(pg, "uvn_findpage");
364 UVMHIST_LOG(ubchist, "found %#jx (color %ju)",
365 (uintptr_t)pg, VM_PGCOLOR(pg), 0, 0);
366 uvm_page_array_advance(a);
367 break;
368 }
369 *pgp = pg;
370 return 1;
371
372 skip_offset:
373 /*
374 * skip this offset
375 */
376 pg = uvm_page_array_peek(a);
377 if (pg != NULL) {
378 if (pg->offset == offset) {
379 uvm_page_array_advance(a);
380 } else {
381 KASSERT((fillflags & UVM_PAGE_ARRAY_FILL_DENSE) == 0);
382 }
383 }
384 return 0;
385
386 skip:
387 /*
388 * skip this page
389 */
390 KASSERT(pg != NULL);
391 uvm_page_array_advance(a);
392 return 0;
393 }
394
395 /*
396 * uvm_vnp_setsize: grow or shrink a vnode uobj
397 *
398 * grow => just update size value
399 * shrink => toss un-needed pages
400 *
401 * => we assume that the caller has a reference of some sort to the
402 * vnode in question so that it will not be yanked out from under
403 * us.
404 */
405
406 void
407 uvm_vnp_setsize(struct vnode *vp, voff_t newsize)
408 {
409 struct uvm_object *uobj = &vp->v_uobj;
410 voff_t pgend = round_page(newsize);
411 voff_t oldsize;
412 UVMHIST_FUNC("uvm_vnp_setsize"); UVMHIST_CALLED(ubchist);
413
414 rw_enter(uobj->vmobjlock, RW_WRITER);
415 UVMHIST_LOG(ubchist, "vp %#jx old 0x%jx new 0x%jx",
416 (uintptr_t)vp, vp->v_size, newsize, 0);
417
418 /*
419 * now check if the size has changed: if we shrink we had better
420 * toss some pages...
421 */
422
423 KASSERT(newsize != VSIZENOTSET && newsize >= 0);
424 KASSERT(vp->v_size <= vp->v_writesize);
425 KASSERT(vp->v_size == vp->v_writesize ||
426 newsize == vp->v_writesize || newsize <= vp->v_size);
427
428 oldsize = vp->v_writesize;
429
430 /*
431 * check whether size shrinks
432 * if old size hasn't been set, there are no pages to drop
433 * if there was an integer overflow in pgend, then this is no shrink
434 */
435 if (oldsize > pgend && oldsize != VSIZENOTSET && pgend >= 0) {
436 (void) uvn_put(uobj, pgend, 0, PGO_FREE | PGO_SYNCIO);
437 rw_enter(uobj->vmobjlock, RW_WRITER);
438 }
439 mutex_enter(vp->v_interlock);
440 vp->v_size = vp->v_writesize = newsize;
441 mutex_exit(vp->v_interlock);
442 rw_exit(uobj->vmobjlock);
443 }
444
445 void
446 uvm_vnp_setwritesize(struct vnode *vp, voff_t newsize)
447 {
448
449 rw_enter(vp->v_uobj.vmobjlock, RW_WRITER);
450 KASSERT(newsize != VSIZENOTSET && newsize >= 0);
451 KASSERT(vp->v_size != VSIZENOTSET);
452 KASSERT(vp->v_writesize != VSIZENOTSET);
453 KASSERT(vp->v_size <= vp->v_writesize);
454 KASSERT(vp->v_size <= newsize);
455 mutex_enter(vp->v_interlock);
456 vp->v_writesize = newsize;
457 mutex_exit(vp->v_interlock);
458 rw_exit(vp->v_uobj.vmobjlock);
459 }
460
461 bool
462 uvn_text_p(struct uvm_object *uobj)
463 {
464 struct vnode *vp = (struct vnode *)uobj;
465
466 /*
467 * v_interlock is not held here, but VI_EXECMAP is only ever changed
468 * with the vmobjlock held too.
469 */
470 return (vp->v_iflag & VI_EXECMAP) != 0;
471 }
472
473 bool
474 uvn_clean_p(struct uvm_object *uobj)
475 {
476
477 return radix_tree_empty_tagged_tree_p(&uobj->uo_pages,
478 UVM_PAGE_DIRTY_TAG);
479 }
480
481 bool
482 uvn_needs_writefault_p(struct uvm_object *uobj)
483 {
484 struct vnode *vp = (struct vnode *)uobj;
485
486 /*
487 * v_interlock is not held here, but VI_WRMAP and VI_WRMAPDIRTY are
488 * only ever changed with the vmobjlock held too, or when it's known
489 * the uvm_object contains no pages (VI_PAGES clear).
490 */
491 return uvn_clean_p(uobj) ||
492 (vp->v_iflag & (VI_WRMAP|VI_WRMAPDIRTY)) == VI_WRMAP;
493 }
494
495 static void
496 uvn_alloc_ractx(struct uvm_object *uobj)
497 {
498 struct vnode *vp = (struct vnode *)uobj;
499 struct uvm_ractx *ra = NULL;
500
501 KASSERT(rw_write_held(uobj->vmobjlock));
502
503 if (vp->v_type != VREG) {
504 return;
505 }
506 if (vp->v_ractx != NULL) {
507 return;
508 }
509 if (vp->v_ractx == NULL) {
510 rw_exit(uobj->vmobjlock);
511 ra = uvm_ra_allocctx();
512 rw_enter(uobj->vmobjlock, RW_WRITER);
513 if (ra != NULL && vp->v_ractx == NULL) {
514 vp->v_ractx = ra;
515 ra = NULL;
516 }
517 }
518 if (ra != NULL) {
519 uvm_ra_freectx(ra);
520 }
521 }
522