uvm_vnode.c revision 1.66 1 /* $NetBSD: uvm_vnode.c,v 1.66 2005/06/27 02:29:32 thorpej Exp $ */
2
3 /*
4 * Copyright (c) 1997 Charles D. Cranor and Washington University.
5 * Copyright (c) 1991, 1993
6 * The Regents of the University of California.
7 * Copyright (c) 1990 University of Utah.
8 *
9 * All rights reserved.
10 *
11 * This code is derived from software contributed to Berkeley by
12 * the Systems Programming Group of the University of Utah Computer
13 * Science Department.
14 *
15 * Redistribution and use in source and binary forms, with or without
16 * modification, are permitted provided that the following conditions
17 * are met:
18 * 1. Redistributions of source code must retain the above copyright
19 * notice, this list of conditions and the following disclaimer.
20 * 2. Redistributions in binary form must reproduce the above copyright
21 * notice, this list of conditions and the following disclaimer in the
22 * documentation and/or other materials provided with the distribution.
23 * 3. All advertising materials mentioning features or use of this software
24 * must display the following acknowledgement:
25 * This product includes software developed by Charles D. Cranor,
26 * Washington University, the University of California, Berkeley and
27 * its contributors.
28 * 4. Neither the name of the University nor the names of its contributors
29 * may be used to endorse or promote products derived from this software
30 * without specific prior written permission.
31 *
32 * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND
33 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
34 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
35 * ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE
36 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
37 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
38 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
39 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
40 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
41 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
42 * SUCH DAMAGE.
43 *
44 * @(#)vnode_pager.c 8.8 (Berkeley) 2/13/94
45 * from: Id: uvm_vnode.c,v 1.1.2.26 1998/02/02 20:38:07 chuck Exp
46 */
47
48 /*
49 * uvm_vnode.c: the vnode pager.
50 */
51
52 #include <sys/cdefs.h>
53 __KERNEL_RCSID(0, "$NetBSD: uvm_vnode.c,v 1.66 2005/06/27 02:29:32 thorpej Exp $");
54
55 #include "fs_nfs.h"
56 #include "opt_uvmhist.h"
57 #include "opt_ddb.h"
58
59 #include <sys/param.h>
60 #include <sys/systm.h>
61 #include <sys/kernel.h>
62 #include <sys/proc.h>
63 #include <sys/malloc.h>
64 #include <sys/vnode.h>
65 #include <sys/disklabel.h>
66 #include <sys/ioctl.h>
67 #include <sys/fcntl.h>
68 #include <sys/conf.h>
69 #include <sys/pool.h>
70 #include <sys/mount.h>
71
72 #include <miscfs/specfs/specdev.h>
73
74 #include <uvm/uvm.h>
75
76 /*
77 * functions
78 */
79
80 static void uvn_detach(struct uvm_object *);
81 static int uvn_get(struct uvm_object *, voff_t, struct vm_page **, int *,
82 int, vm_prot_t, int, int);
83 static int uvn_put(struct uvm_object *, voff_t, voff_t, int);
84 static void uvn_reference(struct uvm_object *);
85
86 static int uvn_findpage(struct uvm_object *, voff_t, struct vm_page **,
87 int);
88
89 /*
90 * master pager structure
91 */
92
93 struct uvm_pagerops uvm_vnodeops = {
94 NULL,
95 uvn_reference,
96 uvn_detach,
97 NULL,
98 uvn_get,
99 uvn_put,
100 };
101
102 /*
103 * the ops!
104 */
105
106 /*
107 * uvn_attach
108 *
109 * attach a vnode structure to a VM object. if the vnode is already
110 * attached, then just bump the reference count by one and return the
111 * VM object. if not already attached, attach and return the new VM obj.
112 * the "accessprot" tells the max access the attaching thread wants to
113 * our pages.
114 *
115 * => caller must _not_ already be holding the lock on the uvm_object.
116 * => in fact, nothing should be locked so that we can sleep here.
117 * => note that uvm_object is first thing in vnode structure, so their
118 * pointers are equiv.
119 */
120
121 struct uvm_object *
122 uvn_attach(void *arg, vm_prot_t accessprot)
123 {
124 struct vnode *vp = arg;
125 struct uvm_object *uobj = &vp->v_uobj;
126 struct vattr vattr;
127 const struct bdevsw *bdev;
128 int result;
129 struct partinfo pi;
130 voff_t used_vnode_size;
131 UVMHIST_FUNC("uvn_attach"); UVMHIST_CALLED(maphist);
132
133 UVMHIST_LOG(maphist, "(vn=0x%x)", arg,0,0,0);
134 used_vnode_size = (voff_t)0;
135
136 /*
137 * first get a lock on the uobj.
138 */
139
140 simple_lock(&uobj->vmobjlock);
141 while (vp->v_flag & VXLOCK) {
142 vp->v_flag |= VXWANT;
143 UVMHIST_LOG(maphist, " SLEEPING on blocked vn",0,0,0,0);
144 UVM_UNLOCK_AND_WAIT(uobj, &uobj->vmobjlock, FALSE,
145 "uvn_attach", 0);
146 simple_lock(&uobj->vmobjlock);
147 UVMHIST_LOG(maphist," WOKE UP",0,0,0,0);
148 }
149
150 /*
151 * if we're mapping a BLK device, make sure it is a disk.
152 */
153 if (vp->v_type == VBLK) {
154 bdev = bdevsw_lookup(vp->v_rdev);
155 if (bdev == NULL || bdev->d_type != D_DISK) {
156 simple_unlock(&uobj->vmobjlock);
157 UVMHIST_LOG(maphist,"<- done (VBLK not D_DISK!)",
158 0,0,0,0);
159 return(NULL);
160 }
161 }
162 KASSERT(vp->v_type == VREG || vp->v_type == VBLK);
163
164 /*
165 * set up our idea of the size
166 * if this hasn't been done already.
167 */
168 if (vp->v_size == VSIZENOTSET) {
169
170
171 vp->v_flag |= VXLOCK;
172 simple_unlock(&uobj->vmobjlock); /* drop lock in case we sleep */
173 /* XXX: curproc? */
174 if (vp->v_type == VBLK) {
175 /*
176 * We could implement this as a specfs getattr call, but:
177 *
178 * (1) VOP_GETATTR() would get the file system
179 * vnode operation, not the specfs operation.
180 *
181 * (2) All we want is the size, anyhow.
182 */
183 bdev = bdevsw_lookup(vp->v_rdev);
184 if (bdev != NULL) {
185 result = (*bdev->d_ioctl)(vp->v_rdev, DIOCGPART,
186 (caddr_t)&pi, FREAD, curproc);
187 } else {
188 result = ENXIO;
189 }
190 if (result == 0) {
191 /* XXX should remember blocksize */
192 used_vnode_size = (voff_t)pi.disklab->d_secsize *
193 (voff_t)pi.part->p_size;
194 }
195 } else {
196 result = VOP_GETATTR(vp, &vattr, curproc->p_ucred, curproc);
197 if (result == 0)
198 used_vnode_size = vattr.va_size;
199 }
200
201 /* relock object */
202 simple_lock(&uobj->vmobjlock);
203
204 if (vp->v_flag & VXWANT) {
205 wakeup(vp);
206 }
207 vp->v_flag &= ~(VXLOCK|VXWANT);
208
209 if (result != 0) {
210 simple_unlock(&uobj->vmobjlock);
211 UVMHIST_LOG(maphist,"<- done (VOP_GETATTR FAILED!)", 0,0,0,0);
212 return(NULL);
213 }
214 vp->v_size = used_vnode_size;
215
216 }
217
218 simple_unlock(&uobj->vmobjlock);
219 UVMHIST_LOG(maphist,"<- done, refcnt=%d", vp->v_usecount,
220 0, 0, 0);
221 return uobj;
222 }
223
224
225 /*
226 * uvn_reference
227 *
228 * duplicate a reference to a VM object. Note that the reference
229 * count must already be at least one (the passed in reference) so
230 * there is no chance of the uvn being killed or locked out here.
231 *
232 * => caller must call with object unlocked.
233 * => caller must be using the same accessprot as was used at attach time
234 */
235
236 static void
237 uvn_reference(struct uvm_object *uobj)
238 {
239 VREF((struct vnode *)uobj);
240 }
241
242
243 /*
244 * uvn_detach
245 *
246 * remove a reference to a VM object.
247 *
248 * => caller must call with object unlocked and map locked.
249 */
250
251 static void
252 uvn_detach(struct uvm_object *uobj)
253 {
254 vrele((struct vnode *)uobj);
255 }
256
257 /*
258 * uvn_put: flush page data to backing store.
259 *
260 * => object must be locked on entry! VOP_PUTPAGES must unlock it.
261 * => flags: PGO_SYNCIO -- use sync. I/O
262 * => note: caller must set PG_CLEAN and pmap_clear_modify (if needed)
263 */
264
265 static int
266 uvn_put(struct uvm_object *uobj, voff_t offlo, voff_t offhi, int flags)
267 {
268 struct vnode *vp = (struct vnode *)uobj;
269 int error;
270
271 LOCK_ASSERT(simple_lock_held(&vp->v_interlock));
272 error = VOP_PUTPAGES(vp, offlo, offhi, flags);
273 LOCK_ASSERT(!simple_lock_held(&vp->v_interlock));
274 return error;
275 }
276
277
278 /*
279 * uvn_get: get pages (synchronously) from backing store
280 *
281 * => prefer map unlocked (not required)
282 * => object must be locked! we will _unlock_ it before starting any I/O.
283 * => flags: PGO_ALLPAGES: get all of the pages
284 * PGO_LOCKED: fault data structures are locked
285 * => NOTE: offset is the offset of pps[0], _NOT_ pps[centeridx]
286 * => NOTE: caller must check for released pages!!
287 */
288
289 static int
290 uvn_get(struct uvm_object *uobj, voff_t offset,
291 struct vm_page **pps /* IN/OUT */,
292 int *npagesp /* IN (OUT if PGO_LOCKED)*/,
293 int centeridx, vm_prot_t access_type, int advice, int flags)
294 {
295 struct vnode *vp = (struct vnode *)uobj;
296 int error;
297 UVMHIST_FUNC("uvn_get"); UVMHIST_CALLED(ubchist);
298
299 UVMHIST_LOG(ubchist, "vp %p off 0x%x", vp, (int)offset, 0,0);
300 error = VOP_GETPAGES(vp, offset, pps, npagesp, centeridx,
301 access_type, advice, flags);
302 return error;
303 }
304
305
306 /*
307 * uvn_findpages:
308 * return the page for the uobj and offset requested, allocating if needed.
309 * => uobj must be locked.
310 * => returned pages will be BUSY.
311 */
312
313 int
314 uvn_findpages(struct uvm_object *uobj, voff_t offset, int *npagesp,
315 struct vm_page **pgs, int flags)
316 {
317 int i, count, found, npages, rv;
318
319 count = found = 0;
320 npages = *npagesp;
321 if (flags & UFP_BACKWARD) {
322 for (i = npages - 1; i >= 0; i--, offset -= PAGE_SIZE) {
323 rv = uvn_findpage(uobj, offset, &pgs[i], flags);
324 if (rv == 0) {
325 if (flags & UFP_DIRTYONLY)
326 break;
327 } else
328 found++;
329 count++;
330 }
331 } else {
332 for (i = 0; i < npages; i++, offset += PAGE_SIZE) {
333 rv = uvn_findpage(uobj, offset, &pgs[i], flags);
334 if (rv == 0) {
335 if (flags & UFP_DIRTYONLY)
336 break;
337 } else
338 found++;
339 count++;
340 }
341 }
342 *npagesp = count;
343 return (found);
344 }
345
346 static int
347 uvn_findpage(struct uvm_object *uobj, voff_t offset, struct vm_page **pgp,
348 int flags)
349 {
350 struct vm_page *pg;
351 boolean_t dirty;
352 UVMHIST_FUNC("uvn_findpage"); UVMHIST_CALLED(ubchist);
353 UVMHIST_LOG(ubchist, "vp %p off 0x%lx", uobj, offset,0,0);
354
355 if (*pgp != NULL) {
356 UVMHIST_LOG(ubchist, "dontcare", 0,0,0,0);
357 return 0;
358 }
359 for (;;) {
360 /* look for an existing page */
361 pg = uvm_pagelookup(uobj, offset);
362
363 /* nope? allocate one now */
364 if (pg == NULL) {
365 if (flags & UFP_NOALLOC) {
366 UVMHIST_LOG(ubchist, "noalloc", 0,0,0,0);
367 return 0;
368 }
369 pg = uvm_pagealloc(uobj, offset, NULL, 0);
370 if (pg == NULL) {
371 if (flags & UFP_NOWAIT) {
372 UVMHIST_LOG(ubchist, "nowait",0,0,0,0);
373 return 0;
374 }
375 simple_unlock(&uobj->vmobjlock);
376 uvm_wait("uvn_fp1");
377 simple_lock(&uobj->vmobjlock);
378 continue;
379 }
380 UVMHIST_LOG(ubchist, "alloced %p", pg,0,0,0);
381 break;
382 } else if (flags & UFP_NOCACHE) {
383 UVMHIST_LOG(ubchist, "nocache",0,0,0,0);
384 return 0;
385 }
386
387 /* page is there, see if we need to wait on it */
388 if ((pg->flags & PG_BUSY) != 0) {
389 if (flags & UFP_NOWAIT) {
390 UVMHIST_LOG(ubchist, "nowait",0,0,0,0);
391 return 0;
392 }
393 pg->flags |= PG_WANTED;
394 UVMHIST_LOG(ubchist, "wait %p", pg,0,0,0);
395 UVM_UNLOCK_AND_WAIT(pg, &uobj->vmobjlock, 0,
396 "uvn_fp2", 0);
397 simple_lock(&uobj->vmobjlock);
398 continue;
399 }
400
401 /* skip PG_RDONLY pages if requested */
402 if ((flags & UFP_NORDONLY) && (pg->flags & PG_RDONLY)) {
403 UVMHIST_LOG(ubchist, "nordonly",0,0,0,0);
404 return 0;
405 }
406
407 /* stop on clean pages if requested */
408 if (flags & UFP_DIRTYONLY) {
409 dirty = pmap_clear_modify(pg) ||
410 (pg->flags & PG_CLEAN) == 0;
411 pg->flags |= PG_CLEAN;
412 if (!dirty) {
413 UVMHIST_LOG(ubchist, "dirtonly", 0,0,0,0);
414 return 0;
415 }
416 }
417
418 /* mark the page BUSY and we're done. */
419 pg->flags |= PG_BUSY;
420 UVM_PAGE_OWN(pg, "uvn_findpage");
421 UVMHIST_LOG(ubchist, "found %p", pg,0,0,0);
422 break;
423 }
424 *pgp = pg;
425 return 1;
426 }
427
428 /*
429 * uvm_vnp_setsize: grow or shrink a vnode uobj
430 *
431 * grow => just update size value
432 * shrink => toss un-needed pages
433 *
434 * => we assume that the caller has a reference of some sort to the
435 * vnode in question so that it will not be yanked out from under
436 * us.
437 */
438
439 void
440 uvm_vnp_setsize(struct vnode *vp, voff_t newsize)
441 {
442 struct uvm_object *uobj = &vp->v_uobj;
443 voff_t pgend = round_page(newsize);
444 UVMHIST_FUNC("uvm_vnp_setsize"); UVMHIST_CALLED(ubchist);
445
446 simple_lock(&uobj->vmobjlock);
447 UVMHIST_LOG(ubchist, "vp %p old 0x%x new 0x%x",
448 vp, vp->v_size, newsize, 0);
449
450 /*
451 * now check if the size has changed: if we shrink we had better
452 * toss some pages...
453 */
454
455 if (vp->v_size > pgend && vp->v_size != VSIZENOTSET) {
456 (void) uvn_put(uobj, pgend, 0, PGO_FREE | PGO_SYNCIO);
457 } else {
458 simple_unlock(&uobj->vmobjlock);
459 }
460 vp->v_size = newsize;
461 }
462
463 /*
464 * uvm_vnp_zerorange: set a range of bytes in a file to zero.
465 */
466
467 void
468 uvm_vnp_zerorange(struct vnode *vp, off_t off, size_t len)
469 {
470 void *win;
471 int flags;
472
473 /*
474 * XXXUBC invent kzero() and use it
475 */
476
477 while (len) {
478 vsize_t bytelen = len;
479
480 win = ubc_alloc(&vp->v_uobj, off, &bytelen, UBC_WRITE);
481 memset(win, 0, bytelen);
482 flags = UBC_WANT_UNMAP(vp) ? UBC_UNMAP : 0;
483 ubc_release(win, flags);
484
485 off += bytelen;
486 len -= bytelen;
487 }
488 }
489