uvm_vnode.c revision 1.97.6.1 1 /* $NetBSD: uvm_vnode.c,v 1.97.6.1 2012/06/02 11:09:42 mrg Exp $ */
2
3 /*
4 * Copyright (c) 1997 Charles D. Cranor and Washington University.
5 * Copyright (c) 1991, 1993
6 * The Regents of the University of California.
7 * Copyright (c) 1990 University of Utah.
8 *
9 * All rights reserved.
10 *
11 * This code is derived from software contributed to Berkeley by
12 * the Systems Programming Group of the University of Utah Computer
13 * Science Department.
14 *
15 * Redistribution and use in source and binary forms, with or without
16 * modification, are permitted provided that the following conditions
17 * are met:
18 * 1. Redistributions of source code must retain the above copyright
19 * notice, this list of conditions and the following disclaimer.
20 * 2. Redistributions in binary form must reproduce the above copyright
21 * notice, this list of conditions and the following disclaimer in the
22 * documentation and/or other materials provided with the distribution.
23 * 3. Neither the name of the University nor the names of its contributors
24 * may be used to endorse or promote products derived from this software
25 * without specific prior written permission.
26 *
27 * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND
28 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
29 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
30 * ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE
31 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
32 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
33 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
34 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
35 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
36 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
37 * SUCH DAMAGE.
38 *
39 * @(#)vnode_pager.c 8.8 (Berkeley) 2/13/94
40 * from: Id: uvm_vnode.c,v 1.1.2.26 1998/02/02 20:38:07 chuck Exp
41 */
42
43 /*
44 * uvm_vnode.c: the vnode pager.
45 */
46
47 #include <sys/cdefs.h>
48 __KERNEL_RCSID(0, "$NetBSD: uvm_vnode.c,v 1.97.6.1 2012/06/02 11:09:42 mrg Exp $");
49
50 #include "opt_uvmhist.h"
51
52 #include <sys/param.h>
53 #include <sys/systm.h>
54 #include <sys/kernel.h>
55 #include <sys/vnode.h>
56 #include <sys/disklabel.h>
57 #include <sys/ioctl.h>
58 #include <sys/fcntl.h>
59 #include <sys/conf.h>
60 #include <sys/pool.h>
61 #include <sys/mount.h>
62
63 #include <miscfs/specfs/specdev.h>
64
65 #include <uvm/uvm.h>
66 #include <uvm/uvm_readahead.h>
67
68 /*
69 * functions
70 */
71
72 static void uvn_detach(struct uvm_object *);
73 static int uvn_get(struct uvm_object *, voff_t, struct vm_page **, int *,
74 int, vm_prot_t, int, int);
75 static int uvn_put(struct uvm_object *, voff_t, voff_t, int);
76 static void uvn_reference(struct uvm_object *);
77
78 static int uvn_findpage(struct uvm_object *, voff_t, struct vm_page **,
79 int);
80
81 /*
82 * master pager structure
83 */
84
85 const struct uvm_pagerops uvm_vnodeops = {
86 .pgo_reference = uvn_reference,
87 .pgo_detach = uvn_detach,
88 .pgo_get = uvn_get,
89 .pgo_put = uvn_put,
90 };
91
92 /*
93 * the ops!
94 */
95
96 /*
97 * uvn_reference
98 *
99 * duplicate a reference to a VM object. Note that the reference
100 * count must already be at least one (the passed in reference) so
101 * there is no chance of the uvn being killed or locked out here.
102 *
103 * => caller must call with object unlocked.
104 * => caller must be using the same accessprot as was used at attach time
105 */
106
107 static void
108 uvn_reference(struct uvm_object *uobj)
109 {
110 vref((struct vnode *)uobj);
111 }
112
113
114 /*
115 * uvn_detach
116 *
117 * remove a reference to a VM object.
118 *
119 * => caller must call with object unlocked and map locked.
120 */
121
122 static void
123 uvn_detach(struct uvm_object *uobj)
124 {
125 vrele((struct vnode *)uobj);
126 }
127
128 /*
129 * uvn_put: flush page data to backing store.
130 *
131 * => object must be locked on entry! VOP_PUTPAGES must unlock it.
132 * => flags: PGO_SYNCIO -- use sync. I/O
133 * => note: caller must set PG_CLEAN and pmap_clear_modify (if needed)
134 */
135
136 static int
137 uvn_put(struct uvm_object *uobj, voff_t offlo, voff_t offhi, int flags)
138 {
139 struct vnode *vp = (struct vnode *)uobj;
140 int error;
141
142 KASSERT(mutex_owned(vp->v_interlock));
143 error = VOP_PUTPAGES(vp, offlo, offhi, flags);
144
145 return error;
146 }
147
148
149 /*
150 * uvn_get: get pages (synchronously) from backing store
151 *
152 * => prefer map unlocked (not required)
153 * => object must be locked! we will _unlock_ it before starting any I/O.
154 * => flags: PGO_ALLPAGES: get all of the pages
155 * PGO_LOCKED: fault data structures are locked
156 * => NOTE: offset is the offset of pps[0], _NOT_ pps[centeridx]
157 * => NOTE: caller must check for released pages!!
158 */
159
160 static int
161 uvn_get(struct uvm_object *uobj, voff_t offset,
162 struct vm_page **pps /* IN/OUT */,
163 int *npagesp /* IN (OUT if PGO_LOCKED)*/,
164 int centeridx, vm_prot_t access_type, int advice, int flags)
165 {
166 struct vnode *vp = (struct vnode *)uobj;
167 int error;
168
169 UVMHIST_FUNC("uvn_get"); UVMHIST_CALLED(ubchist);
170
171 UVMHIST_LOG(ubchist, "vp %p off 0x%x", vp, (int)offset, 0,0);
172
173 if (vp->v_type == VREG && (access_type & VM_PROT_WRITE) == 0
174 && (flags & PGO_LOCKED) == 0) {
175 vn_ra_allocctx(vp);
176 uvm_ra_request(vp->v_ractx, advice, uobj, offset,
177 *npagesp << PAGE_SHIFT);
178 }
179
180 error = VOP_GETPAGES(vp, offset, pps, npagesp, centeridx,
181 access_type, advice, flags);
182
183 KASSERT(((flags & PGO_LOCKED) != 0 && mutex_owned(vp->v_interlock)) ||
184 (flags & PGO_LOCKED) == 0);
185 return error;
186 }
187
188
189 /*
190 * uvn_findpages:
191 * return the page for the uobj and offset requested, allocating if needed.
192 * => uobj must be locked.
193 * => returned pages will be BUSY.
194 */
195
196 int
197 uvn_findpages(struct uvm_object *uobj, voff_t offset, int *npagesp,
198 struct vm_page **pgs, int flags)
199 {
200 int i, count, found, npages, rv;
201
202 count = found = 0;
203 npages = *npagesp;
204 if (flags & UFP_BACKWARD) {
205 for (i = npages - 1; i >= 0; i--, offset -= PAGE_SIZE) {
206 rv = uvn_findpage(uobj, offset, &pgs[i], flags);
207 if (rv == 0) {
208 if (flags & UFP_DIRTYONLY)
209 break;
210 } else
211 found++;
212 count++;
213 }
214 } else {
215 for (i = 0; i < npages; i++, offset += PAGE_SIZE) {
216 rv = uvn_findpage(uobj, offset, &pgs[i], flags);
217 if (rv == 0) {
218 if (flags & UFP_DIRTYONLY)
219 break;
220 } else
221 found++;
222 count++;
223 }
224 }
225 *npagesp = count;
226 return (found);
227 }
228
229 static int
230 uvn_findpage(struct uvm_object *uobj, voff_t offset, struct vm_page **pgp,
231 int flags)
232 {
233 struct vm_page *pg;
234 bool dirty;
235 UVMHIST_FUNC("uvn_findpage"); UVMHIST_CALLED(ubchist);
236 UVMHIST_LOG(ubchist, "vp %p off 0x%lx", uobj, offset,0,0);
237
238 KASSERT(mutex_owned(uobj->vmobjlock));
239
240 if (*pgp != NULL) {
241 UVMHIST_LOG(ubchist, "dontcare", 0,0,0,0);
242 return 0;
243 }
244 for (;;) {
245 /* look for an existing page */
246 pg = uvm_pagelookup(uobj, offset);
247
248 /* nope? allocate one now */
249 if (pg == NULL) {
250 if (flags & UFP_NOALLOC) {
251 UVMHIST_LOG(ubchist, "noalloc", 0,0,0,0);
252 return 0;
253 }
254 pg = uvm_pagealloc(uobj, offset, NULL,
255 UVM_FLAG_COLORMATCH);
256 if (pg == NULL) {
257 if (flags & UFP_NOWAIT) {
258 UVMHIST_LOG(ubchist, "nowait",0,0,0,0);
259 return 0;
260 }
261 mutex_exit(uobj->vmobjlock);
262 uvm_wait("uvn_fp1");
263 mutex_enter(uobj->vmobjlock);
264 continue;
265 }
266 UVMHIST_LOG(ubchist, "alloced %p (color %u)", pg,
267 VM_PGCOLOR_BUCKET(pg), 0,0);
268 break;
269 } else if (flags & UFP_NOCACHE) {
270 UVMHIST_LOG(ubchist, "nocache",0,0,0,0);
271 return 0;
272 }
273
274 /* page is there, see if we need to wait on it */
275 if ((pg->flags & PG_BUSY) != 0) {
276 if (flags & UFP_NOWAIT) {
277 UVMHIST_LOG(ubchist, "nowait",0,0,0,0);
278 return 0;
279 }
280 pg->flags |= PG_WANTED;
281 UVMHIST_LOG(ubchist, "wait %p (color %u)", pg,
282 VM_PGCOLOR_BUCKET(pg), 0,0);
283 UVM_UNLOCK_AND_WAIT(pg, uobj->vmobjlock, 0,
284 "uvn_fp2", 0);
285 mutex_enter(uobj->vmobjlock);
286 continue;
287 }
288
289 /* skip PG_RDONLY pages if requested */
290 if ((flags & UFP_NORDONLY) && (pg->flags & PG_RDONLY)) {
291 UVMHIST_LOG(ubchist, "nordonly",0,0,0,0);
292 return 0;
293 }
294
295 /* stop on clean pages if requested */
296 if (flags & UFP_DIRTYONLY) {
297 dirty = pmap_clear_modify(pg) ||
298 (pg->flags & PG_CLEAN) == 0;
299 pg->flags |= PG_CLEAN;
300 if (!dirty) {
301 UVMHIST_LOG(ubchist, "dirtonly", 0,0,0,0);
302 return 0;
303 }
304 }
305
306 /* mark the page BUSY and we're done. */
307 pg->flags |= PG_BUSY;
308 UVM_PAGE_OWN(pg, "uvn_findpage");
309 UVMHIST_LOG(ubchist, "found %p (color %u)",
310 pg, VM_PGCOLOR_BUCKET(pg), 0,0);
311 break;
312 }
313 *pgp = pg;
314 return 1;
315 }
316
317 /*
318 * uvm_vnp_setsize: grow or shrink a vnode uobj
319 *
320 * grow => just update size value
321 * shrink => toss un-needed pages
322 *
323 * => we assume that the caller has a reference of some sort to the
324 * vnode in question so that it will not be yanked out from under
325 * us.
326 */
327
328 void
329 uvm_vnp_setsize(struct vnode *vp, voff_t newsize)
330 {
331 struct uvm_object *uobj = &vp->v_uobj;
332 voff_t pgend = round_page(newsize);
333 voff_t oldsize;
334 UVMHIST_FUNC("uvm_vnp_setsize"); UVMHIST_CALLED(ubchist);
335
336 mutex_enter(uobj->vmobjlock);
337 UVMHIST_LOG(ubchist, "vp %p old 0x%x new 0x%x",
338 vp, vp->v_size, newsize, 0);
339
340 /*
341 * now check if the size has changed: if we shrink we had better
342 * toss some pages...
343 */
344
345 KASSERT(newsize != VSIZENOTSET);
346 KASSERT(vp->v_size <= vp->v_writesize);
347 KASSERT(vp->v_size == vp->v_writesize ||
348 newsize == vp->v_writesize || newsize <= vp->v_size);
349
350 oldsize = vp->v_writesize;
351 KASSERT(oldsize != VSIZENOTSET || pgend > oldsize);
352
353 if (oldsize > pgend) {
354 (void) uvn_put(uobj, pgend, 0, PGO_FREE | PGO_SYNCIO);
355 mutex_enter(uobj->vmobjlock);
356 }
357 vp->v_size = vp->v_writesize = newsize;
358 mutex_exit(uobj->vmobjlock);
359 }
360
361 void
362 uvm_vnp_setwritesize(struct vnode *vp, voff_t newsize)
363 {
364
365 mutex_enter(vp->v_interlock);
366 KASSERT(newsize != VSIZENOTSET);
367 KASSERT(vp->v_size != VSIZENOTSET);
368 KASSERT(vp->v_writesize != VSIZENOTSET);
369 KASSERT(vp->v_size <= vp->v_writesize);
370 KASSERT(vp->v_size <= newsize);
371 vp->v_writesize = newsize;
372 mutex_exit(vp->v_interlock);
373 }
374
375 bool
376 uvn_text_p(struct uvm_object *uobj)
377 {
378 struct vnode *vp = (struct vnode *)uobj;
379
380 return (vp->v_iflag & VI_EXECMAP) != 0;
381 }
382
383 bool
384 uvn_clean_p(struct uvm_object *uobj)
385 {
386 struct vnode *vp = (struct vnode *)uobj;
387
388 return (vp->v_iflag & VI_ONWORKLST) == 0;
389 }
390
391 bool
392 uvn_needs_writefault_p(struct uvm_object *uobj)
393 {
394 struct vnode *vp = (struct vnode *)uobj;
395
396 return uvn_clean_p(uobj) ||
397 (vp->v_iflag & (VI_WRMAP|VI_WRMAPDIRTY)) == VI_WRMAP;
398 }
399