uvm_vnode.c revision 1.81.2.1 1 /* $NetBSD: uvm_vnode.c,v 1.81.2.1 2007/03/13 17:51:58 ad Exp $ */
2
3 /*
4 * Copyright (c) 1997 Charles D. Cranor and Washington University.
5 * Copyright (c) 1991, 1993
6 * The Regents of the University of California.
7 * Copyright (c) 1990 University of Utah.
8 *
9 * All rights reserved.
10 *
11 * This code is derived from software contributed to Berkeley by
12 * the Systems Programming Group of the University of Utah Computer
13 * Science Department.
14 *
15 * Redistribution and use in source and binary forms, with or without
16 * modification, are permitted provided that the following conditions
17 * are met:
18 * 1. Redistributions of source code must retain the above copyright
19 * notice, this list of conditions and the following disclaimer.
20 * 2. Redistributions in binary form must reproduce the above copyright
21 * notice, this list of conditions and the following disclaimer in the
22 * documentation and/or other materials provided with the distribution.
23 * 3. All advertising materials mentioning features or use of this software
24 * must display the following acknowledgement:
25 * This product includes software developed by Charles D. Cranor,
26 * Washington University, the University of California, Berkeley and
27 * its contributors.
28 * 4. Neither the name of the University nor the names of its contributors
29 * may be used to endorse or promote products derived from this software
30 * without specific prior written permission.
31 *
32 * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND
33 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
34 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
35 * ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE
36 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
37 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
38 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
39 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
40 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
41 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
42 * SUCH DAMAGE.
43 *
44 * @(#)vnode_pager.c 8.8 (Berkeley) 2/13/94
45 * from: Id: uvm_vnode.c,v 1.1.2.26 1998/02/02 20:38:07 chuck Exp
46 */
47
48 /*
49 * uvm_vnode.c: the vnode pager.
50 */
51
52 #include <sys/cdefs.h>
53 __KERNEL_RCSID(0, "$NetBSD: uvm_vnode.c,v 1.81.2.1 2007/03/13 17:51:58 ad Exp $");
54
55 #include "fs_nfs.h"
56 #include "opt_uvmhist.h"
57 #include "opt_ddb.h"
58
59 #include <sys/param.h>
60 #include <sys/systm.h>
61 #include <sys/kernel.h>
62 #include <sys/proc.h>
63 #include <sys/malloc.h>
64 #include <sys/vnode.h>
65 #include <sys/disklabel.h>
66 #include <sys/ioctl.h>
67 #include <sys/fcntl.h>
68 #include <sys/conf.h>
69 #include <sys/pool.h>
70 #include <sys/mount.h>
71
72 #include <miscfs/specfs/specdev.h>
73
74 #include <uvm/uvm.h>
75 #include <uvm/uvm_readahead.h>
76
77 /*
78 * functions
79 */
80
81 static void uvn_detach(struct uvm_object *);
82 static int uvn_get(struct uvm_object *, voff_t, struct vm_page **, int *,
83 int, vm_prot_t, int, int);
84 static int uvn_put(struct uvm_object *, voff_t, voff_t, int);
85 static void uvn_reference(struct uvm_object *);
86
87 static int uvn_findpage(struct uvm_object *, voff_t, struct vm_page **,
88 int);
89
90 /*
91 * master pager structure
92 */
93
94 struct uvm_pagerops uvm_vnodeops = {
95 NULL,
96 uvn_reference,
97 uvn_detach,
98 NULL,
99 uvn_get,
100 uvn_put,
101 };
102
103 /*
104 * the ops!
105 */
106
107 /*
108 * uvn_attach
109 *
110 * attach a vnode structure to a VM object. if the vnode is already
111 * attached, then just bump the reference count by one and return the
112 * VM object. if not already attached, attach and return the new VM obj.
113 * the "accessprot" tells the max access the attaching thread wants to
114 * our pages.
115 *
116 * => caller must _not_ already be holding the lock on the uvm_object.
117 * => in fact, nothing should be locked so that we can sleep here.
118 * => note that uvm_object is first thing in vnode structure, so their
119 * pointers are equiv.
120 */
121
122 struct uvm_object *
123 uvn_attach(void *arg, vm_prot_t accessprot)
124 {
125 struct vnode *vp = arg;
126 struct uvm_object *uobj = &vp->v_uobj;
127 struct vattr vattr;
128 const struct bdevsw *bdev;
129 int result;
130 struct partinfo pi;
131 voff_t used_vnode_size;
132 UVMHIST_FUNC("uvn_attach"); UVMHIST_CALLED(maphist);
133
134 UVMHIST_LOG(maphist, "(vn=0x%x)", arg,0,0,0);
135 used_vnode_size = (voff_t)0;
136
137 /*
138 * first get a lock on the uobj.
139 */
140
141 mutex_enter(&uobj->vmobjlock);
142 while (vp->v_flag & VXLOCK) {
143 vp->v_flag |= VXWANT;
144 UVMHIST_LOG(maphist, " SLEEPING on blocked vn",0,0,0,0);
145 UVM_UNLOCK_AND_WAIT(uobj, &uobj->vmobjlock, false,
146 "uvn_attach", 0);
147 mutex_enter(&uobj->vmobjlock);
148 UVMHIST_LOG(maphist," WOKE UP",0,0,0,0);
149 }
150
151 /*
152 * if we're mapping a BLK device, make sure it is a disk.
153 */
154 if (vp->v_type == VBLK) {
155 bdev = bdevsw_lookup(vp->v_rdev);
156 if (bdev == NULL || bdev->d_type != D_DISK) {
157 mutex_exit(&uobj->vmobjlock);
158 UVMHIST_LOG(maphist,"<- done (VBLK not D_DISK!)",
159 0,0,0,0);
160 return(NULL);
161 }
162 }
163 KASSERT(vp->v_type == VREG || vp->v_type == VBLK);
164
165 /*
166 * set up our idea of the size
167 * if this hasn't been done already.
168 */
169 if (vp->v_size == VSIZENOTSET) {
170
171
172 vp->v_flag |= VXLOCK;
173 mutex_exit(&uobj->vmobjlock); /* drop lock in case we sleep */
174 /* XXX: curproc? */
175 if (vp->v_type == VBLK) {
176 /*
177 * We could implement this as a specfs getattr call, but:
178 *
179 * (1) VOP_GETATTR() would get the file system
180 * vnode operation, not the specfs operation.
181 *
182 * (2) All we want is the size, anyhow.
183 */
184 bdev = bdevsw_lookup(vp->v_rdev);
185 if (bdev != NULL) {
186 result = (*bdev->d_ioctl)(vp->v_rdev, DIOCGPART,
187 (void *)&pi, FREAD, curlwp);
188 } else {
189 result = ENXIO;
190 }
191 if (result == 0) {
192 /* XXX should remember blocksize */
193 used_vnode_size = (voff_t)pi.disklab->d_secsize *
194 (voff_t)pi.part->p_size;
195 }
196 } else {
197 result = VOP_GETATTR(vp, &vattr, curlwp->l_cred, curlwp);
198 if (result == 0)
199 used_vnode_size = vattr.va_size;
200 }
201
202 /* relock object */
203 mutex_enter(&uobj->vmobjlock);
204
205 if (vp->v_flag & VXWANT) {
206 wakeup(vp);
207 }
208 vp->v_flag &= ~(VXLOCK|VXWANT);
209
210 if (result != 0) {
211 mutex_exit(&uobj->vmobjlock);
212 UVMHIST_LOG(maphist,"<- done (VOP_GETATTR FAILED!)", 0,0,0,0);
213 return(NULL);
214 }
215 vp->v_size = used_vnode_size;
216
217 }
218
219 mutex_exit(&uobj->vmobjlock);
220 UVMHIST_LOG(maphist,"<- done, refcnt=%d", vp->v_usecount,
221 0, 0, 0);
222
223 return uobj;
224 }
225
226
227 /*
228 * uvn_reference
229 *
230 * duplicate a reference to a VM object. Note that the reference
231 * count must already be at least one (the passed in reference) so
232 * there is no chance of the uvn being killed or locked out here.
233 *
234 * => caller must call with object unlocked.
235 * => caller must be using the same accessprot as was used at attach time
236 */
237
238 static void
239 uvn_reference(struct uvm_object *uobj)
240 {
241 VREF((struct vnode *)uobj);
242 }
243
244
245 /*
246 * uvn_detach
247 *
248 * remove a reference to a VM object.
249 *
250 * => caller must call with object unlocked and map locked.
251 */
252
253 static void
254 uvn_detach(struct uvm_object *uobj)
255 {
256 vrele((struct vnode *)uobj);
257 }
258
259 /*
260 * uvn_put: flush page data to backing store.
261 *
262 * => object must be locked on entry! VOP_PUTPAGES must unlock it.
263 * => flags: PGO_SYNCIO -- use sync. I/O
264 * => note: caller must set PG_CLEAN and pmap_clear_modify (if needed)
265 */
266
267 static int
268 uvn_put(struct uvm_object *uobj, voff_t offlo, voff_t offhi, int flags)
269 {
270 struct vnode *vp = (struct vnode *)uobj;
271 int error;
272
273 KERNEL_LOCK(1, curlwp);
274 KASSERT(mutex_owned(&vp->v_interlock));
275 error = VOP_PUTPAGES(vp, offlo, offhi, flags);
276 KASSERT(!mutex_owned(&vp->v_interlock));
277 KERNEL_UNLOCK_ONE(curlwp);
278
279 return error;
280 }
281
282
283 /*
284 * uvn_get: get pages (synchronously) from backing store
285 *
286 * => prefer map unlocked (not required)
287 * => object must be locked! we will _unlock_ it before starting any I/O.
288 * => flags: PGO_ALLPAGES: get all of the pages
289 * PGO_LOCKED: fault data structures are locked
290 * => NOTE: offset is the offset of pps[0], _NOT_ pps[centeridx]
291 * => NOTE: caller must check for released pages!!
292 */
293
294 static int
295 uvn_get(struct uvm_object *uobj, voff_t offset,
296 struct vm_page **pps /* IN/OUT */,
297 int *npagesp /* IN (OUT if PGO_LOCKED)*/,
298 int centeridx, vm_prot_t access_type, int advice, int flags)
299 {
300 struct vnode *vp = (struct vnode *)uobj;
301 int error;
302
303 UVMHIST_FUNC("uvn_get"); UVMHIST_CALLED(ubchist);
304
305 UVMHIST_LOG(ubchist, "vp %p off 0x%x", vp, (int)offset, 0,0);
306
307 if ((access_type & VM_PROT_WRITE) == 0 && (flags & PGO_LOCKED) == 0) {
308 mutex_exit(&vp->v_interlock);
309 vn_ra_allocctx(vp);
310 uvm_ra_request(vp->v_ractx, advice, uobj, offset,
311 *npagesp << PAGE_SHIFT);
312 mutex_enter(&vp->v_interlock);
313 }
314
315 KERNEL_LOCK(1, curlwp);
316 error = VOP_GETPAGES(vp, offset, pps, npagesp, centeridx,
317 access_type, advice, flags);
318 KERNEL_UNLOCK_ONE(curlwp);
319
320 KASSERT(((flags & PGO_LOCKED) != 0 &&
321 mutex_owned(&vp->v_interlock)) ||
322 ((flags & PGO_LOCKED) == 0 &&
323 !mutex_owned(&vp->v_interlock)));
324 return error;
325 }
326
327
328 /*
329 * uvn_findpages:
330 * return the page for the uobj and offset requested, allocating if needed.
331 * => uobj must be locked.
332 * => returned pages will be BUSY.
333 */
334
335 int
336 uvn_findpages(struct uvm_object *uobj, voff_t offset, int *npagesp,
337 struct vm_page **pgs, int flags)
338 {
339 int i, count, found, npages, rv;
340
341 count = found = 0;
342 npages = *npagesp;
343 if (flags & UFP_BACKWARD) {
344 for (i = npages - 1; i >= 0; i--, offset -= PAGE_SIZE) {
345 rv = uvn_findpage(uobj, offset, &pgs[i], flags);
346 if (rv == 0) {
347 if (flags & UFP_DIRTYONLY)
348 break;
349 } else
350 found++;
351 count++;
352 }
353 } else {
354 for (i = 0; i < npages; i++, offset += PAGE_SIZE) {
355 rv = uvn_findpage(uobj, offset, &pgs[i], flags);
356 if (rv == 0) {
357 if (flags & UFP_DIRTYONLY)
358 break;
359 } else
360 found++;
361 count++;
362 }
363 }
364 *npagesp = count;
365 return (found);
366 }
367
368 static int
369 uvn_findpage(struct uvm_object *uobj, voff_t offset, struct vm_page **pgp,
370 int flags)
371 {
372 struct vm_page *pg;
373 bool dirty;
374 UVMHIST_FUNC("uvn_findpage"); UVMHIST_CALLED(ubchist);
375 UVMHIST_LOG(ubchist, "vp %p off 0x%lx", uobj, offset,0,0);
376
377 if (*pgp != NULL) {
378 UVMHIST_LOG(ubchist, "dontcare", 0,0,0,0);
379 return 0;
380 }
381 for (;;) {
382 /* look for an existing page */
383 pg = uvm_pagelookup(uobj, offset);
384
385 /* nope? allocate one now */
386 if (pg == NULL) {
387 if (flags & UFP_NOALLOC) {
388 UVMHIST_LOG(ubchist, "noalloc", 0,0,0,0);
389 return 0;
390 }
391 pg = uvm_pagealloc(uobj, offset, NULL, 0);
392 if (pg == NULL) {
393 if (flags & UFP_NOWAIT) {
394 UVMHIST_LOG(ubchist, "nowait",0,0,0,0);
395 return 0;
396 }
397 mutex_exit(&uobj->vmobjlock);
398 uvm_wait("uvn_fp1");
399 mutex_enter(&uobj->vmobjlock);
400 continue;
401 }
402 UVMHIST_LOG(ubchist, "alloced %p", pg,0,0,0);
403 break;
404 } else if (flags & UFP_NOCACHE) {
405 UVMHIST_LOG(ubchist, "nocache",0,0,0,0);
406 return 0;
407 }
408
409 /* page is there, see if we need to wait on it */
410 if ((pg->flags & PG_BUSY) != 0) {
411 if (flags & UFP_NOWAIT) {
412 UVMHIST_LOG(ubchist, "nowait",0,0,0,0);
413 return 0;
414 }
415 pg->flags |= PG_WANTED;
416 UVMHIST_LOG(ubchist, "wait %p", pg,0,0,0);
417 UVM_UNLOCK_AND_WAIT(pg, &uobj->vmobjlock, 0,
418 "uvn_fp2", 0);
419 mutex_enter(&uobj->vmobjlock);
420 continue;
421 }
422
423 /* skip PG_RDONLY pages if requested */
424 if ((flags & UFP_NORDONLY) && (pg->flags & PG_RDONLY)) {
425 UVMHIST_LOG(ubchist, "nordonly",0,0,0,0);
426 return 0;
427 }
428
429 /* stop on clean pages if requested */
430 if (flags & UFP_DIRTYONLY) {
431 dirty = pmap_clear_modify(pg) ||
432 (pg->flags & PG_CLEAN) == 0;
433 pg->flags |= PG_CLEAN;
434 if (!dirty) {
435 UVMHIST_LOG(ubchist, "dirtonly", 0,0,0,0);
436 return 0;
437 }
438 }
439
440 /* mark the page BUSY and we're done. */
441 pg->flags |= PG_BUSY;
442 UVM_PAGE_OWN(pg, "uvn_findpage");
443 UVMHIST_LOG(ubchist, "found %p", pg,0,0,0);
444 break;
445 }
446 *pgp = pg;
447 return 1;
448 }
449
450 /*
451 * uvm_vnp_setsize: grow or shrink a vnode uobj
452 *
453 * grow => just update size value
454 * shrink => toss un-needed pages
455 *
456 * => we assume that the caller has a reference of some sort to the
457 * vnode in question so that it will not be yanked out from under
458 * us.
459 */
460
461 void
462 uvm_vnp_setsize(struct vnode *vp, voff_t newsize)
463 {
464 struct uvm_object *uobj = &vp->v_uobj;
465 voff_t pgend = round_page(newsize);
466 voff_t oldsize;
467 UVMHIST_FUNC("uvm_vnp_setsize"); UVMHIST_CALLED(ubchist);
468
469 mutex_enter(&uobj->vmobjlock);
470 UVMHIST_LOG(ubchist, "vp %p old 0x%x new 0x%x",
471 vp, vp->v_size, newsize, 0);
472
473 /*
474 * now check if the size has changed: if we shrink we had better
475 * toss some pages...
476 */
477
478 oldsize = vp->v_size;
479 if (oldsize > pgend && oldsize != VSIZENOTSET) {
480 (void) uvn_put(uobj, pgend, 0, PGO_FREE | PGO_SYNCIO);
481 mutex_enter(&uobj->vmobjlock);
482 }
483 vp->v_size = newsize;
484 mutex_exit(&uobj->vmobjlock);
485 }
486
487 /*
488 * uvm_vnp_zerorange: set a range of bytes in a file to zero.
489 */
490
491 void
492 uvm_vnp_zerorange(struct vnode *vp, off_t off, size_t len)
493 {
494 void *win;
495 int flags;
496
497 /*
498 * XXXUBC invent kzero() and use it
499 */
500
501 while (len) {
502 vsize_t bytelen = len;
503
504 win = ubc_alloc(&vp->v_uobj, off, &bytelen, UVM_ADV_NORMAL,
505 UBC_WRITE);
506 memset(win, 0, bytelen);
507 flags = UBC_WANT_UNMAP(vp) ? UBC_UNMAP : 0;
508 ubc_release(win, flags);
509
510 off += bytelen;
511 len -= bytelen;
512 }
513 }
514
515 bool
516 uvn_text_p(struct uvm_object *uobj)
517 {
518 struct vnode *vp = (struct vnode *)uobj;
519
520 return (vp->v_flag & VEXECMAP) != 0;
521 }
522
523 bool
524 uvn_clean_p(struct uvm_object *uobj)
525 {
526 struct vnode *vp = (struct vnode *)uobj;
527
528 return (vp->v_flag & VONWORKLST) == 0;
529 }
530
531 bool
532 uvn_needs_writefault_p(struct uvm_object *uobj)
533 {
534 struct vnode *vp = (struct vnode *)uobj;
535
536 return uvn_clean_p(uobj) ||
537 (vp->v_flag & (VWRITEMAP|VWRITEMAPDIRTY)) == VWRITEMAP;
538 }
539