uvm_vnode.c revision 1.90 1 1.90 ad /* $NetBSD: uvm_vnode.c,v 1.90 2008/01/02 11:49:21 ad Exp $ */
2 1.1 mrg
3 1.1 mrg /*
4 1.1 mrg * Copyright (c) 1997 Charles D. Cranor and Washington University.
5 1.1 mrg * Copyright (c) 1991, 1993
6 1.49 chs * The Regents of the University of California.
7 1.1 mrg * Copyright (c) 1990 University of Utah.
8 1.1 mrg *
9 1.1 mrg * All rights reserved.
10 1.1 mrg *
11 1.1 mrg * This code is derived from software contributed to Berkeley by
12 1.1 mrg * the Systems Programming Group of the University of Utah Computer
13 1.1 mrg * Science Department.
14 1.1 mrg *
15 1.1 mrg * Redistribution and use in source and binary forms, with or without
16 1.1 mrg * modification, are permitted provided that the following conditions
17 1.1 mrg * are met:
18 1.1 mrg * 1. Redistributions of source code must retain the above copyright
19 1.1 mrg * notice, this list of conditions and the following disclaimer.
20 1.1 mrg * 2. Redistributions in binary form must reproduce the above copyright
21 1.1 mrg * notice, this list of conditions and the following disclaimer in the
22 1.1 mrg * documentation and/or other materials provided with the distribution.
23 1.1 mrg * 3. All advertising materials mentioning features or use of this software
24 1.1 mrg * must display the following acknowledgement:
25 1.1 mrg * This product includes software developed by Charles D. Cranor,
26 1.49 chs * Washington University, the University of California, Berkeley and
27 1.1 mrg * its contributors.
28 1.1 mrg * 4. Neither the name of the University nor the names of its contributors
29 1.1 mrg * may be used to endorse or promote products derived from this software
30 1.1 mrg * without specific prior written permission.
31 1.1 mrg *
32 1.1 mrg * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND
33 1.1 mrg * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
34 1.1 mrg * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
35 1.1 mrg * ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE
36 1.1 mrg * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
37 1.1 mrg * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
38 1.1 mrg * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
39 1.1 mrg * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
40 1.1 mrg * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
41 1.1 mrg * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
42 1.1 mrg * SUCH DAMAGE.
43 1.1 mrg *
44 1.1 mrg * @(#)vnode_pager.c 8.8 (Berkeley) 2/13/94
45 1.3 mrg * from: Id: uvm_vnode.c,v 1.1.2.26 1998/02/02 20:38:07 chuck Exp
46 1.1 mrg */
47 1.1 mrg
48 1.55 lukem /*
49 1.55 lukem * uvm_vnode.c: the vnode pager.
50 1.55 lukem */
51 1.55 lukem
52 1.55 lukem #include <sys/cdefs.h>
53 1.90 ad __KERNEL_RCSID(0, "$NetBSD: uvm_vnode.c,v 1.90 2008/01/02 11:49:21 ad Exp $");
54 1.55 lukem
55 1.6 thorpej #include "fs_nfs.h"
56 1.4 mrg #include "opt_uvmhist.h"
57 1.37 chs #include "opt_ddb.h"
58 1.1 mrg
59 1.1 mrg #include <sys/param.h>
60 1.1 mrg #include <sys/systm.h>
61 1.37 chs #include <sys/kernel.h>
62 1.1 mrg #include <sys/proc.h>
63 1.1 mrg #include <sys/malloc.h>
64 1.1 mrg #include <sys/vnode.h>
65 1.13 thorpej #include <sys/disklabel.h>
66 1.13 thorpej #include <sys/ioctl.h>
67 1.13 thorpej #include <sys/fcntl.h>
68 1.13 thorpej #include <sys/conf.h>
69 1.37 chs #include <sys/pool.h>
70 1.37 chs #include <sys/mount.h>
71 1.13 thorpej
72 1.13 thorpej #include <miscfs/specfs/specdev.h>
73 1.1 mrg
74 1.1 mrg #include <uvm/uvm.h>
75 1.68 yamt #include <uvm/uvm_readahead.h>
76 1.1 mrg
77 1.1 mrg /*
78 1.1 mrg * functions
79 1.1 mrg */
80 1.1 mrg
81 1.66 thorpej static void uvn_detach(struct uvm_object *);
82 1.66 thorpej static int uvn_get(struct uvm_object *, voff_t, struct vm_page **, int *,
83 1.66 thorpej int, vm_prot_t, int, int);
84 1.66 thorpej static int uvn_put(struct uvm_object *, voff_t, voff_t, int);
85 1.66 thorpej static void uvn_reference(struct uvm_object *);
86 1.52 chs
87 1.66 thorpej static int uvn_findpage(struct uvm_object *, voff_t, struct vm_page **,
88 1.66 thorpej int);
89 1.1 mrg
90 1.1 mrg /*
91 1.1 mrg * master pager structure
92 1.1 mrg */
93 1.1 mrg
94 1.89 yamt const struct uvm_pagerops uvm_vnodeops = {
95 1.88 yamt .pgo_reference = uvn_reference,
96 1.88 yamt .pgo_detach = uvn_detach,
97 1.88 yamt .pgo_get = uvn_get,
98 1.88 yamt .pgo_put = uvn_put,
99 1.1 mrg };
100 1.1 mrg
101 1.1 mrg /*
102 1.1 mrg * the ops!
103 1.1 mrg */
104 1.1 mrg
105 1.1 mrg /*
106 1.1 mrg * uvn_reference
107 1.1 mrg *
108 1.1 mrg * duplicate a reference to a VM object. Note that the reference
109 1.49 chs * count must already be at least one (the passed in reference) so
110 1.1 mrg * there is no chance of the uvn being killed or locked out here.
111 1.1 mrg *
112 1.49 chs * => caller must call with object unlocked.
113 1.1 mrg * => caller must be using the same accessprot as was used at attach time
114 1.1 mrg */
115 1.1 mrg
116 1.66 thorpej static void
117 1.65 thorpej uvn_reference(struct uvm_object *uobj)
118 1.1 mrg {
119 1.37 chs VREF((struct vnode *)uobj);
120 1.1 mrg }
121 1.1 mrg
122 1.52 chs
123 1.1 mrg /*
124 1.1 mrg * uvn_detach
125 1.1 mrg *
126 1.1 mrg * remove a reference to a VM object.
127 1.1 mrg *
128 1.1 mrg * => caller must call with object unlocked and map locked.
129 1.1 mrg */
130 1.52 chs
131 1.66 thorpej static void
132 1.65 thorpej uvn_detach(struct uvm_object *uobj)
133 1.8 mrg {
134 1.37 chs vrele((struct vnode *)uobj);
135 1.1 mrg }
136 1.1 mrg
137 1.1 mrg /*
138 1.1 mrg * uvn_put: flush page data to backing store.
139 1.1 mrg *
140 1.53 sommerfe * => object must be locked on entry! VOP_PUTPAGES must unlock it.
141 1.1 mrg * => flags: PGO_SYNCIO -- use sync. I/O
142 1.1 mrg * => note: caller must set PG_CLEAN and pmap_clear_modify (if needed)
143 1.1 mrg */
144 1.1 mrg
145 1.66 thorpej static int
146 1.65 thorpej uvn_put(struct uvm_object *uobj, voff_t offlo, voff_t offhi, int flags)
147 1.1 mrg {
148 1.37 chs struct vnode *vp = (struct vnode *)uobj;
149 1.37 chs int error;
150 1.1 mrg
151 1.90 ad KASSERT(mutex_owned(&vp->v_interlock));
152 1.54 chs error = VOP_PUTPAGES(vp, offlo, offhi, flags);
153 1.90 ad
154 1.48 chs return error;
155 1.1 mrg }
156 1.1 mrg
157 1.1 mrg
158 1.1 mrg /*
159 1.1 mrg * uvn_get: get pages (synchronously) from backing store
160 1.1 mrg *
161 1.1 mrg * => prefer map unlocked (not required)
162 1.1 mrg * => object must be locked! we will _unlock_ it before starting any I/O.
163 1.1 mrg * => flags: PGO_ALLPAGES: get all of the pages
164 1.1 mrg * PGO_LOCKED: fault data structures are locked
165 1.1 mrg * => NOTE: offset is the offset of pps[0], _NOT_ pps[centeridx]
166 1.1 mrg * => NOTE: caller must check for released pages!!
167 1.1 mrg */
168 1.49 chs
169 1.66 thorpej static int
170 1.65 thorpej uvn_get(struct uvm_object *uobj, voff_t offset,
171 1.65 thorpej struct vm_page **pps /* IN/OUT */,
172 1.65 thorpej int *npagesp /* IN (OUT if PGO_LOCKED)*/,
173 1.65 thorpej int centeridx, vm_prot_t access_type, int advice, int flags)
174 1.8 mrg {
175 1.37 chs struct vnode *vp = (struct vnode *)uobj;
176 1.37 chs int error;
177 1.67 yamt
178 1.37 chs UVMHIST_FUNC("uvn_get"); UVMHIST_CALLED(ubchist);
179 1.37 chs
180 1.37 chs UVMHIST_LOG(ubchist, "vp %p off 0x%x", vp, (int)offset, 0,0);
181 1.68 yamt
182 1.68 yamt if ((access_type & VM_PROT_WRITE) == 0 && (flags & PGO_LOCKED) == 0) {
183 1.68 yamt vn_ra_allocctx(vp);
184 1.68 yamt uvm_ra_request(vp->v_ractx, advice, uobj, offset,
185 1.68 yamt *npagesp << PAGE_SHIFT);
186 1.68 yamt }
187 1.68 yamt
188 1.37 chs error = VOP_GETPAGES(vp, offset, pps, npagesp, centeridx,
189 1.37 chs access_type, advice, flags);
190 1.67 yamt
191 1.90 ad KASSERT(((flags & PGO_LOCKED) != 0 && mutex_owned(&vp->v_interlock)) ||
192 1.90 ad (flags & PGO_LOCKED) == 0);
193 1.48 chs return error;
194 1.37 chs }
195 1.8 mrg
196 1.8 mrg
197 1.37 chs /*
198 1.37 chs * uvn_findpages:
199 1.37 chs * return the page for the uobj and offset requested, allocating if needed.
200 1.37 chs * => uobj must be locked.
201 1.52 chs * => returned pages will be BUSY.
202 1.37 chs */
203 1.1 mrg
204 1.58 enami int
205 1.65 thorpej uvn_findpages(struct uvm_object *uobj, voff_t offset, int *npagesp,
206 1.65 thorpej struct vm_page **pgs, int flags)
207 1.37 chs {
208 1.58 enami int i, count, found, npages, rv;
209 1.8 mrg
210 1.58 enami count = found = 0;
211 1.37 chs npages = *npagesp;
212 1.52 chs if (flags & UFP_BACKWARD) {
213 1.52 chs for (i = npages - 1; i >= 0; i--, offset -= PAGE_SIZE) {
214 1.52 chs rv = uvn_findpage(uobj, offset, &pgs[i], flags);
215 1.58 enami if (rv == 0) {
216 1.58 enami if (flags & UFP_DIRTYONLY)
217 1.58 enami break;
218 1.58 enami } else
219 1.58 enami found++;
220 1.52 chs count++;
221 1.52 chs }
222 1.52 chs } else {
223 1.52 chs for (i = 0; i < npages; i++, offset += PAGE_SIZE) {
224 1.52 chs rv = uvn_findpage(uobj, offset, &pgs[i], flags);
225 1.58 enami if (rv == 0) {
226 1.58 enami if (flags & UFP_DIRTYONLY)
227 1.58 enami break;
228 1.58 enami } else
229 1.58 enami found++;
230 1.52 chs count++;
231 1.52 chs }
232 1.37 chs }
233 1.52 chs *npagesp = count;
234 1.58 enami return (found);
235 1.37 chs }
236 1.8 mrg
237 1.66 thorpej static int
238 1.65 thorpej uvn_findpage(struct uvm_object *uobj, voff_t offset, struct vm_page **pgp,
239 1.65 thorpej int flags)
240 1.37 chs {
241 1.37 chs struct vm_page *pg;
242 1.79 thorpej bool dirty;
243 1.37 chs UVMHIST_FUNC("uvn_findpage"); UVMHIST_CALLED(ubchist);
244 1.37 chs UVMHIST_LOG(ubchist, "vp %p off 0x%lx", uobj, offset,0,0);
245 1.8 mrg
246 1.37 chs if (*pgp != NULL) {
247 1.37 chs UVMHIST_LOG(ubchist, "dontcare", 0,0,0,0);
248 1.37 chs return 0;
249 1.37 chs }
250 1.37 chs for (;;) {
251 1.37 chs /* look for an existing page */
252 1.37 chs pg = uvm_pagelookup(uobj, offset);
253 1.37 chs
254 1.52 chs /* nope? allocate one now */
255 1.37 chs if (pg == NULL) {
256 1.37 chs if (flags & UFP_NOALLOC) {
257 1.37 chs UVMHIST_LOG(ubchist, "noalloc", 0,0,0,0);
258 1.37 chs return 0;
259 1.37 chs }
260 1.47 chs pg = uvm_pagealloc(uobj, offset, NULL, 0);
261 1.37 chs if (pg == NULL) {
262 1.37 chs if (flags & UFP_NOWAIT) {
263 1.37 chs UVMHIST_LOG(ubchist, "nowait",0,0,0,0);
264 1.37 chs return 0;
265 1.8 mrg }
266 1.90 ad mutex_exit(&uobj->vmobjlock);
267 1.37 chs uvm_wait("uvn_fp1");
268 1.90 ad mutex_enter(&uobj->vmobjlock);
269 1.37 chs continue;
270 1.47 chs }
271 1.52 chs UVMHIST_LOG(ubchist, "alloced %p", pg,0,0,0);
272 1.37 chs break;
273 1.37 chs } else if (flags & UFP_NOCACHE) {
274 1.37 chs UVMHIST_LOG(ubchist, "nocache",0,0,0,0);
275 1.37 chs return 0;
276 1.8 mrg }
277 1.8 mrg
278 1.37 chs /* page is there, see if we need to wait on it */
279 1.52 chs if ((pg->flags & PG_BUSY) != 0) {
280 1.37 chs if (flags & UFP_NOWAIT) {
281 1.37 chs UVMHIST_LOG(ubchist, "nowait",0,0,0,0);
282 1.37 chs return 0;
283 1.37 chs }
284 1.37 chs pg->flags |= PG_WANTED;
285 1.58 enami UVMHIST_LOG(ubchist, "wait %p", pg,0,0,0);
286 1.37 chs UVM_UNLOCK_AND_WAIT(pg, &uobj->vmobjlock, 0,
287 1.37 chs "uvn_fp2", 0);
288 1.90 ad mutex_enter(&uobj->vmobjlock);
289 1.37 chs continue;
290 1.8 mrg }
291 1.49 chs
292 1.37 chs /* skip PG_RDONLY pages if requested */
293 1.37 chs if ((flags & UFP_NORDONLY) && (pg->flags & PG_RDONLY)) {
294 1.37 chs UVMHIST_LOG(ubchist, "nordonly",0,0,0,0);
295 1.37 chs return 0;
296 1.8 mrg }
297 1.8 mrg
298 1.52 chs /* stop on clean pages if requested */
299 1.52 chs if (flags & UFP_DIRTYONLY) {
300 1.52 chs dirty = pmap_clear_modify(pg) ||
301 1.52 chs (pg->flags & PG_CLEAN) == 0;
302 1.52 chs pg->flags |= PG_CLEAN;
303 1.52 chs if (!dirty) {
304 1.58 enami UVMHIST_LOG(ubchist, "dirtonly", 0,0,0,0);
305 1.52 chs return 0;
306 1.52 chs }
307 1.52 chs }
308 1.52 chs
309 1.37 chs /* mark the page BUSY and we're done. */
310 1.37 chs pg->flags |= PG_BUSY;
311 1.37 chs UVM_PAGE_OWN(pg, "uvn_findpage");
312 1.52 chs UVMHIST_LOG(ubchist, "found %p", pg,0,0,0);
313 1.37 chs break;
314 1.8 mrg }
315 1.37 chs *pgp = pg;
316 1.37 chs return 1;
317 1.1 mrg }
318 1.1 mrg
319 1.1 mrg /*
320 1.52 chs * uvm_vnp_setsize: grow or shrink a vnode uobj
321 1.1 mrg *
322 1.1 mrg * grow => just update size value
323 1.1 mrg * shrink => toss un-needed pages
324 1.1 mrg *
325 1.49 chs * => we assume that the caller has a reference of some sort to the
326 1.1 mrg * vnode in question so that it will not be yanked out from under
327 1.1 mrg * us.
328 1.1 mrg */
329 1.1 mrg
330 1.8 mrg void
331 1.65 thorpej uvm_vnp_setsize(struct vnode *vp, voff_t newsize)
332 1.8 mrg {
333 1.52 chs struct uvm_object *uobj = &vp->v_uobj;
334 1.46 enami voff_t pgend = round_page(newsize);
335 1.72 yamt voff_t oldsize;
336 1.37 chs UVMHIST_FUNC("uvm_vnp_setsize"); UVMHIST_CALLED(ubchist);
337 1.37 chs
338 1.90 ad mutex_enter(&uobj->vmobjlock);
339 1.52 chs UVMHIST_LOG(ubchist, "vp %p old 0x%x new 0x%x",
340 1.52 chs vp, vp->v_size, newsize, 0);
341 1.1 mrg
342 1.8 mrg /*
343 1.37 chs * now check if the size has changed: if we shrink we had better
344 1.37 chs * toss some pages...
345 1.8 mrg */
346 1.1 mrg
347 1.85 pooka KASSERT(newsize != VSIZENOTSET);
348 1.85 pooka KASSERT(vp->v_size <= vp->v_writesize);
349 1.85 pooka KASSERT(vp->v_size == vp->v_writesize ||
350 1.85 pooka newsize == vp->v_writesize || newsize <= vp->v_size);
351 1.85 pooka
352 1.85 pooka oldsize = vp->v_writesize;
353 1.85 pooka KASSERT(oldsize != VSIZENOTSET || pgend > oldsize);
354 1.85 pooka
355 1.85 pooka if (oldsize > pgend) {
356 1.57 chs (void) uvn_put(uobj, pgend, 0, PGO_FREE | PGO_SYNCIO);
357 1.90 ad mutex_enter(&uobj->vmobjlock);
358 1.8 mrg }
359 1.82 yamt vp->v_size = vp->v_writesize = newsize;
360 1.90 ad mutex_exit(&uobj->vmobjlock);
361 1.1 mrg }
362 1.1 mrg
363 1.82 yamt void
364 1.82 yamt uvm_vnp_setwritesize(struct vnode *vp, voff_t newsize)
365 1.82 yamt {
366 1.82 yamt
367 1.90 ad mutex_enter(&vp->v_interlock);
368 1.85 pooka KASSERT(newsize != VSIZENOTSET);
369 1.82 yamt KASSERT(vp->v_size != VSIZENOTSET);
370 1.82 yamt KASSERT(vp->v_writesize != VSIZENOTSET);
371 1.82 yamt KASSERT(vp->v_size <= vp->v_writesize);
372 1.82 yamt KASSERT(vp->v_size <= newsize);
373 1.82 yamt vp->v_writesize = newsize;
374 1.90 ad mutex_exit(&vp->v_interlock);
375 1.82 yamt }
376 1.82 yamt
377 1.1 mrg /*
378 1.37 chs * uvm_vnp_zerorange: set a range of bytes in a file to zero.
379 1.1 mrg */
380 1.1 mrg
381 1.8 mrg void
382 1.65 thorpej uvm_vnp_zerorange(struct vnode *vp, off_t off, size_t len)
383 1.8 mrg {
384 1.64 chs void *win;
385 1.64 chs int flags;
386 1.64 chs
387 1.64 chs /*
388 1.64 chs * XXXUBC invent kzero() and use it
389 1.64 chs */
390 1.8 mrg
391 1.64 chs while (len) {
392 1.64 chs vsize_t bytelen = len;
393 1.64 chs
394 1.68 yamt win = ubc_alloc(&vp->v_uobj, off, &bytelen, UVM_ADV_NORMAL,
395 1.68 yamt UBC_WRITE);
396 1.64 chs memset(win, 0, bytelen);
397 1.64 chs flags = UBC_WANT_UNMAP(vp) ? UBC_UNMAP : 0;
398 1.64 chs ubc_release(win, flags);
399 1.64 chs
400 1.64 chs off += bytelen;
401 1.64 chs len -= bytelen;
402 1.64 chs }
403 1.1 mrg }
404 1.75 yamt
405 1.79 thorpej bool
406 1.75 yamt uvn_text_p(struct uvm_object *uobj)
407 1.75 yamt {
408 1.75 yamt struct vnode *vp = (struct vnode *)uobj;
409 1.75 yamt
410 1.86 ad return (vp->v_iflag & VI_EXECMAP) != 0;
411 1.75 yamt }
412 1.75 yamt
413 1.79 thorpej bool
414 1.75 yamt uvn_clean_p(struct uvm_object *uobj)
415 1.75 yamt {
416 1.75 yamt struct vnode *vp = (struct vnode *)uobj;
417 1.75 yamt
418 1.86 ad return (vp->v_iflag & VI_ONWORKLST) == 0;
419 1.75 yamt }
420 1.75 yamt
421 1.79 thorpej bool
422 1.75 yamt uvn_needs_writefault_p(struct uvm_object *uobj)
423 1.75 yamt {
424 1.75 yamt struct vnode *vp = (struct vnode *)uobj;
425 1.75 yamt
426 1.75 yamt return uvn_clean_p(uobj) ||
427 1.86 ad (vp->v_iflag & (VI_WRMAP|VI_WRMAPDIRTY)) == VI_WRMAP;
428 1.75 yamt }
429