uvm_vnode.c revision 1.103 1 /* $NetBSD: uvm_vnode.c,v 1.103 2017/10/28 00:37:13 pgoyette Exp $ */
2
3 /*
4 * Copyright (c) 1997 Charles D. Cranor and Washington University.
5 * Copyright (c) 1991, 1993
6 * The Regents of the University of California.
7 * Copyright (c) 1990 University of Utah.
8 *
9 * All rights reserved.
10 *
11 * This code is derived from software contributed to Berkeley by
12 * the Systems Programming Group of the University of Utah Computer
13 * Science Department.
14 *
15 * Redistribution and use in source and binary forms, with or without
16 * modification, are permitted provided that the following conditions
17 * are met:
18 * 1. Redistributions of source code must retain the above copyright
19 * notice, this list of conditions and the following disclaimer.
20 * 2. Redistributions in binary form must reproduce the above copyright
21 * notice, this list of conditions and the following disclaimer in the
22 * documentation and/or other materials provided with the distribution.
23 * 3. Neither the name of the University nor the names of its contributors
24 * may be used to endorse or promote products derived from this software
25 * without specific prior written permission.
26 *
27 * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND
28 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
29 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
30 * ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE
31 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
32 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
33 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
34 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
35 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
36 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
37 * SUCH DAMAGE.
38 *
39 * @(#)vnode_pager.c 8.8 (Berkeley) 2/13/94
40 * from: Id: uvm_vnode.c,v 1.1.2.26 1998/02/02 20:38:07 chuck Exp
41 */
42
43 /*
44 * uvm_vnode.c: the vnode pager.
45 */
46
47 #include <sys/cdefs.h>
48 __KERNEL_RCSID(0, "$NetBSD: uvm_vnode.c,v 1.103 2017/10/28 00:37:13 pgoyette Exp $");
49
50 #ifdef _KERNEL_OPT
51 #include "opt_uvmhist.h"
52 #endif
53
54 #include <sys/param.h>
55 #include <sys/systm.h>
56 #include <sys/kernel.h>
57 #include <sys/vnode.h>
58 #include <sys/disklabel.h>
59 #include <sys/ioctl.h>
60 #include <sys/fcntl.h>
61 #include <sys/conf.h>
62 #include <sys/pool.h>
63 #include <sys/mount.h>
64
65 #include <miscfs/specfs/specdev.h>
66
67 #include <uvm/uvm.h>
68 #include <uvm/uvm_readahead.h>
69
70 #ifdef UVMHIST
71 UVMHIST_DEFINE(ubchist);
72 #endif
73
74 /*
75 * functions
76 */
77
78 static void uvn_detach(struct uvm_object *);
79 static int uvn_get(struct uvm_object *, voff_t, struct vm_page **, int *,
80 int, vm_prot_t, int, int);
81 static int uvn_put(struct uvm_object *, voff_t, voff_t, int);
82 static void uvn_reference(struct uvm_object *);
83
84 static int uvn_findpage(struct uvm_object *, voff_t, struct vm_page **,
85 int);
86
87 /*
88 * master pager structure
89 */
90
91 const struct uvm_pagerops uvm_vnodeops = {
92 .pgo_reference = uvn_reference,
93 .pgo_detach = uvn_detach,
94 .pgo_get = uvn_get,
95 .pgo_put = uvn_put,
96 };
97
98 /*
99 * the ops!
100 */
101
102 /*
103 * uvn_reference
104 *
105 * duplicate a reference to a VM object. Note that the reference
106 * count must already be at least one (the passed in reference) so
107 * there is no chance of the uvn being killed or locked out here.
108 *
109 * => caller must call with object unlocked.
110 * => caller must be using the same accessprot as was used at attach time
111 */
112
113 static void
114 uvn_reference(struct uvm_object *uobj)
115 {
116 vref((struct vnode *)uobj);
117 }
118
119
120 /*
121 * uvn_detach
122 *
123 * remove a reference to a VM object.
124 *
125 * => caller must call with object unlocked and map locked.
126 */
127
128 static void
129 uvn_detach(struct uvm_object *uobj)
130 {
131 vrele((struct vnode *)uobj);
132 }
133
134 /*
135 * uvn_put: flush page data to backing store.
136 *
137 * => object must be locked on entry! VOP_PUTPAGES must unlock it.
138 * => flags: PGO_SYNCIO -- use sync. I/O
139 * => note: caller must set PG_CLEAN and pmap_clear_modify (if needed)
140 */
141
142 static int
143 uvn_put(struct uvm_object *uobj, voff_t offlo, voff_t offhi, int flags)
144 {
145 struct vnode *vp = (struct vnode *)uobj;
146 int error;
147
148 KASSERT(mutex_owned(vp->v_interlock));
149 error = VOP_PUTPAGES(vp, offlo, offhi, flags);
150
151 return error;
152 }
153
154
155 /*
156 * uvn_get: get pages (synchronously) from backing store
157 *
158 * => prefer map unlocked (not required)
159 * => object must be locked! we will _unlock_ it before starting any I/O.
160 * => flags: PGO_ALLPAGES: get all of the pages
161 * PGO_LOCKED: fault data structures are locked
162 * => NOTE: offset is the offset of pps[0], _NOT_ pps[centeridx]
163 * => NOTE: caller must check for released pages!!
164 */
165
166 static int
167 uvn_get(struct uvm_object *uobj, voff_t offset,
168 struct vm_page **pps /* IN/OUT */,
169 int *npagesp /* IN (OUT if PGO_LOCKED)*/,
170 int centeridx, vm_prot_t access_type, int advice, int flags)
171 {
172 struct vnode *vp = (struct vnode *)uobj;
173 int error;
174
175 UVMHIST_FUNC("uvn_get"); UVMHIST_CALLED(ubchist);
176
177 UVMHIST_LOG(ubchist, "vp %#jx off 0x%jx", (uintptr_t)vp, (int)offset,
178 0, 0);
179
180 if (vp->v_type == VREG && (access_type & VM_PROT_WRITE) == 0
181 && (flags & PGO_LOCKED) == 0) {
182 vn_ra_allocctx(vp);
183 uvm_ra_request(vp->v_ractx, advice, uobj, offset,
184 *npagesp << PAGE_SHIFT);
185 }
186
187 error = VOP_GETPAGES(vp, offset, pps, npagesp, centeridx,
188 access_type, advice, flags);
189
190 KASSERT(((flags & PGO_LOCKED) != 0 && mutex_owned(vp->v_interlock)) ||
191 (flags & PGO_LOCKED) == 0);
192 return error;
193 }
194
195
196 /*
197 * uvn_findpages:
198 * return the page for the uobj and offset requested, allocating if needed.
199 * => uobj must be locked.
200 * => returned pages will be BUSY.
201 */
202
203 int
204 uvn_findpages(struct uvm_object *uobj, voff_t offset, int *npagesp,
205 struct vm_page **pgs, int flags)
206 {
207 int i, count, found, npages, rv;
208
209 count = found = 0;
210 npages = *npagesp;
211 if (flags & UFP_BACKWARD) {
212 for (i = npages - 1; i >= 0; i--, offset -= PAGE_SIZE) {
213 rv = uvn_findpage(uobj, offset, &pgs[i], flags);
214 if (rv == 0) {
215 if (flags & UFP_DIRTYONLY)
216 break;
217 } else
218 found++;
219 count++;
220 }
221 } else {
222 for (i = 0; i < npages; i++, offset += PAGE_SIZE) {
223 rv = uvn_findpage(uobj, offset, &pgs[i], flags);
224 if (rv == 0) {
225 if (flags & UFP_DIRTYONLY)
226 break;
227 } else
228 found++;
229 count++;
230 }
231 }
232 *npagesp = count;
233 return (found);
234 }
235
236 static int
237 uvn_findpage(struct uvm_object *uobj, voff_t offset, struct vm_page **pgp,
238 int flags)
239 {
240 struct vm_page *pg;
241 bool dirty;
242 UVMHIST_FUNC("uvn_findpage"); UVMHIST_CALLED(ubchist);
243 UVMHIST_LOG(ubchist, "vp %#jx off 0x%jx", (uintptr_t)uobj, offset,
244 0, 0);
245
246 KASSERT(mutex_owned(uobj->vmobjlock));
247
248 if (*pgp != NULL) {
249 UVMHIST_LOG(ubchist, "dontcare", 0,0,0,0);
250 return 0;
251 }
252 for (;;) {
253 /* look for an existing page */
254 pg = uvm_pagelookup(uobj, offset);
255
256 /* nope? allocate one now */
257 if (pg == NULL) {
258 if (flags & UFP_NOALLOC) {
259 UVMHIST_LOG(ubchist, "noalloc", 0,0,0,0);
260 return 0;
261 }
262 pg = uvm_pagealloc(uobj, offset, NULL,
263 UVM_FLAG_COLORMATCH);
264 if (pg == NULL) {
265 if (flags & UFP_NOWAIT) {
266 UVMHIST_LOG(ubchist, "nowait",0,0,0,0);
267 return 0;
268 }
269 mutex_exit(uobj->vmobjlock);
270 uvm_wait("uvn_fp1");
271 mutex_enter(uobj->vmobjlock);
272 continue;
273 }
274 UVMHIST_LOG(ubchist, "alloced %#jx (color %ju)",
275 (uintptr_t)pg, VM_PGCOLOR_BUCKET(pg), 0, 0);
276 break;
277 } else if (flags & UFP_NOCACHE) {
278 UVMHIST_LOG(ubchist, "nocache",0,0,0,0);
279 return 0;
280 }
281
282 /* page is there, see if we need to wait on it */
283 if ((pg->flags & PG_BUSY) != 0) {
284 if (flags & UFP_NOWAIT) {
285 UVMHIST_LOG(ubchist, "nowait",0,0,0,0);
286 return 0;
287 }
288 pg->flags |= PG_WANTED;
289 UVMHIST_LOG(ubchist, "wait %#jx (color %ju)",
290 (uintptr_t)pg, VM_PGCOLOR_BUCKET(pg), 0, 0);
291 UVM_UNLOCK_AND_WAIT(pg, uobj->vmobjlock, 0,
292 "uvn_fp2", 0);
293 mutex_enter(uobj->vmobjlock);
294 continue;
295 }
296
297 /* skip PG_RDONLY pages if requested */
298 if ((flags & UFP_NORDONLY) && (pg->flags & PG_RDONLY)) {
299 UVMHIST_LOG(ubchist, "nordonly",0,0,0,0);
300 return 0;
301 }
302
303 /* stop on clean pages if requested */
304 if (flags & UFP_DIRTYONLY) {
305 dirty = pmap_clear_modify(pg) ||
306 (pg->flags & PG_CLEAN) == 0;
307 pg->flags |= PG_CLEAN;
308 if (!dirty) {
309 UVMHIST_LOG(ubchist, "dirtonly", 0,0,0,0);
310 return 0;
311 }
312 }
313
314 /* mark the page BUSY and we're done. */
315 pg->flags |= PG_BUSY;
316 UVM_PAGE_OWN(pg, "uvn_findpage");
317 UVMHIST_LOG(ubchist, "found %#jx (color %ju)",
318 (uintptr_t)pg, VM_PGCOLOR_BUCKET(pg), 0, 0);
319 break;
320 }
321 *pgp = pg;
322 return 1;
323 }
324
325 /*
326 * uvm_vnp_setsize: grow or shrink a vnode uobj
327 *
328 * grow => just update size value
329 * shrink => toss un-needed pages
330 *
331 * => we assume that the caller has a reference of some sort to the
332 * vnode in question so that it will not be yanked out from under
333 * us.
334 */
335
336 void
337 uvm_vnp_setsize(struct vnode *vp, voff_t newsize)
338 {
339 struct uvm_object *uobj = &vp->v_uobj;
340 voff_t pgend = round_page(newsize);
341 voff_t oldsize;
342 UVMHIST_FUNC("uvm_vnp_setsize"); UVMHIST_CALLED(ubchist);
343
344 mutex_enter(uobj->vmobjlock);
345 UVMHIST_LOG(ubchist, "vp %#jx old 0x%jx new 0x%jx",
346 (uintptr_t)vp, vp->v_size, newsize, 0);
347
348 /*
349 * now check if the size has changed: if we shrink we had better
350 * toss some pages...
351 */
352
353 KASSERT(newsize != VSIZENOTSET && newsize >= 0);
354 KASSERT(vp->v_size <= vp->v_writesize);
355 KASSERT(vp->v_size == vp->v_writesize ||
356 newsize == vp->v_writesize || newsize <= vp->v_size);
357
358 oldsize = vp->v_writesize;
359
360 /*
361 * check whether size shrinks
362 * if old size hasn't been set, there are no pages to drop
363 * if there was an integer overflow in pgend, then this is no shrink
364 */
365 if (oldsize > pgend && oldsize != VSIZENOTSET && pgend >= 0) {
366 (void) uvn_put(uobj, pgend, 0, PGO_FREE | PGO_SYNCIO);
367 mutex_enter(uobj->vmobjlock);
368 }
369 vp->v_size = vp->v_writesize = newsize;
370 mutex_exit(uobj->vmobjlock);
371 }
372
373 void
374 uvm_vnp_setwritesize(struct vnode *vp, voff_t newsize)
375 {
376
377 mutex_enter(vp->v_interlock);
378 KASSERT(newsize != VSIZENOTSET && newsize >= 0);
379 KASSERT(vp->v_size != VSIZENOTSET);
380 KASSERT(vp->v_writesize != VSIZENOTSET);
381 KASSERT(vp->v_size <= vp->v_writesize);
382 KASSERT(vp->v_size <= newsize);
383 vp->v_writesize = newsize;
384 mutex_exit(vp->v_interlock);
385 }
386
387 bool
388 uvn_text_p(struct uvm_object *uobj)
389 {
390 struct vnode *vp = (struct vnode *)uobj;
391
392 return (vp->v_iflag & VI_EXECMAP) != 0;
393 }
394
395 bool
396 uvn_clean_p(struct uvm_object *uobj)
397 {
398 struct vnode *vp = (struct vnode *)uobj;
399
400 return (vp->v_iflag & VI_ONWORKLST) == 0;
401 }
402
403 bool
404 uvn_needs_writefault_p(struct uvm_object *uobj)
405 {
406 struct vnode *vp = (struct vnode *)uobj;
407
408 return uvn_clean_p(uobj) ||
409 (vp->v_iflag & (VI_WRMAP|VI_WRMAPDIRTY)) == VI_WRMAP;
410 }
411