uvm_vnode.c revision 1.46.2.9 1 /* $NetBSD: uvm_vnode.c,v 1.46.2.9 2002/06/20 03:50:47 nathanw Exp $ */
2
3 /*
4 * Copyright (c) 1997 Charles D. Cranor and Washington University.
5 * Copyright (c) 1991, 1993
6 * The Regents of the University of California.
7 * Copyright (c) 1990 University of Utah.
8 *
9 * All rights reserved.
10 *
11 * This code is derived from software contributed to Berkeley by
12 * the Systems Programming Group of the University of Utah Computer
13 * Science Department.
14 *
15 * Redistribution and use in source and binary forms, with or without
16 * modification, are permitted provided that the following conditions
17 * are met:
18 * 1. Redistributions of source code must retain the above copyright
19 * notice, this list of conditions and the following disclaimer.
20 * 2. Redistributions in binary form must reproduce the above copyright
21 * notice, this list of conditions and the following disclaimer in the
22 * documentation and/or other materials provided with the distribution.
23 * 3. All advertising materials mentioning features or use of this software
24 * must display the following acknowledgement:
25 * This product includes software developed by Charles D. Cranor,
26 * Washington University, the University of California, Berkeley and
27 * its contributors.
28 * 4. Neither the name of the University nor the names of its contributors
29 * may be used to endorse or promote products derived from this software
30 * without specific prior written permission.
31 *
32 * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND
33 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
34 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
35 * ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE
36 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
37 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
38 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
39 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
40 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
41 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
42 * SUCH DAMAGE.
43 *
44 * @(#)vnode_pager.c 8.8 (Berkeley) 2/13/94
45 * from: Id: uvm_vnode.c,v 1.1.2.26 1998/02/02 20:38:07 chuck Exp
46 */
47
48 /*
49 * uvm_vnode.c: the vnode pager.
50 */
51
52 #include <sys/cdefs.h>
53 __KERNEL_RCSID(0, "$NetBSD: uvm_vnode.c,v 1.46.2.9 2002/06/20 03:50:47 nathanw Exp $");
54
55 #include "fs_nfs.h"
56 #include "opt_uvmhist.h"
57 #include "opt_ddb.h"
58
59 #include <sys/param.h>
60 #include <sys/systm.h>
61 #include <sys/lwp.h>
62 #include <sys/kernel.h>
63 #include <sys/proc.h>
64 #include <sys/malloc.h>
65 #include <sys/vnode.h>
66 #include <sys/disklabel.h>
67 #include <sys/ioctl.h>
68 #include <sys/fcntl.h>
69 #include <sys/conf.h>
70 #include <sys/pool.h>
71 #include <sys/mount.h>
72
73 #include <miscfs/specfs/specdev.h>
74
75 #include <uvm/uvm.h>
76
77 /*
78 * functions
79 */
80
81 void uvn_detach __P((struct uvm_object *));
82 int uvn_get __P((struct uvm_object *, voff_t, struct vm_page **, int *, int,
83 vm_prot_t, int, int));
84 int uvn_put __P((struct uvm_object *, voff_t, voff_t, int));
85 void uvn_reference __P((struct uvm_object *));
86
87 int uvn_findpage __P((struct uvm_object *, voff_t, struct vm_page **, int));
88
89 /*
90 * master pager structure
91 */
92
93 struct uvm_pagerops uvm_vnodeops = {
94 NULL,
95 uvn_reference,
96 uvn_detach,
97 NULL,
98 uvn_get,
99 uvn_put,
100 };
101
102 /*
103 * the ops!
104 */
105
106 /*
107 * uvn_attach
108 *
109 * attach a vnode structure to a VM object. if the vnode is already
110 * attached, then just bump the reference count by one and return the
111 * VM object. if not already attached, attach and return the new VM obj.
112 * the "accessprot" tells the max access the attaching thread wants to
113 * our pages.
114 *
115 * => caller must _not_ already be holding the lock on the uvm_object.
116 * => in fact, nothing should be locked so that we can sleep here.
117 * => note that uvm_object is first thing in vnode structure, so their
118 * pointers are equiv.
119 */
120
121 struct uvm_object *
122 uvn_attach(arg, accessprot)
123 void *arg;
124 vm_prot_t accessprot;
125 {
126 struct vnode *vp = arg;
127 struct uvm_object *uobj = &vp->v_uobj;
128 struct vattr vattr;
129 int result;
130 struct partinfo pi;
131 voff_t used_vnode_size;
132 UVMHIST_FUNC("uvn_attach"); UVMHIST_CALLED(maphist);
133
134 UVMHIST_LOG(maphist, "(vn=0x%x)", arg,0,0,0);
135 used_vnode_size = (voff_t)0;
136
137 /*
138 * first get a lock on the uobj.
139 */
140
141 simple_lock(&uobj->vmobjlock);
142 while (vp->v_flag & VXLOCK) {
143 vp->v_flag |= VXWANT;
144 UVMHIST_LOG(maphist, " SLEEPING on blocked vn",0,0,0,0);
145 UVM_UNLOCK_AND_WAIT(uobj, &uobj->vmobjlock, FALSE,
146 "uvn_attach", 0);
147 simple_lock(&uobj->vmobjlock);
148 UVMHIST_LOG(maphist," WOKE UP",0,0,0,0);
149 }
150
151 /*
152 * if we're mapping a BLK device, make sure it is a disk.
153 */
154 if (vp->v_type == VBLK && bdevsw[major(vp->v_rdev)].d_type != D_DISK) {
155 simple_unlock(&uobj->vmobjlock);
156 UVMHIST_LOG(maphist,"<- done (VBLK not D_DISK!)", 0,0,0,0);
157 return(NULL);
158 }
159 KASSERT(vp->v_type == VREG || vp->v_type == VBLK);
160
161 /*
162 * set up our idea of the size
163 * if this hasn't been done already.
164 */
165 if (vp->v_size == VSIZENOTSET) {
166
167
168 vp->v_flag |= VXLOCK;
169 simple_unlock(&uobj->vmobjlock); /* drop lock in case we sleep */
170 /* XXX: curproc? */
171 if (vp->v_type == VBLK) {
172 /*
173 * We could implement this as a specfs getattr call, but:
174 *
175 * (1) VOP_GETATTR() would get the file system
176 * vnode operation, not the specfs operation.
177 *
178 * (2) All we want is the size, anyhow.
179 */
180 result = (*bdevsw[major(vp->v_rdev)].d_ioctl)(vp->v_rdev,
181 DIOCGPART, (caddr_t)&pi, FREAD, curproc->l_proc);
182 if (result == 0) {
183 /* XXX should remember blocksize */
184 used_vnode_size = (voff_t)pi.disklab->d_secsize *
185 (voff_t)pi.part->p_size;
186 }
187 } else {
188 result = VOP_GETATTR(vp, &vattr, curproc->l_proc->p_ucred,
189 curproc->l_proc);
190 if (result == 0)
191 used_vnode_size = vattr.va_size;
192 }
193
194 /* relock object */
195 simple_lock(&uobj->vmobjlock);
196
197 if (vp->v_flag & VXWANT) {
198 wakeup(vp);
199 }
200 vp->v_flag &= ~(VXLOCK|VXWANT);
201
202 if (result != 0) {
203 simple_unlock(&uobj->vmobjlock);
204 UVMHIST_LOG(maphist,"<- done (VOP_GETATTR FAILED!)", 0,0,0,0);
205 return(NULL);
206 }
207 vp->v_size = used_vnode_size;
208
209 }
210
211 simple_unlock(&uobj->vmobjlock);
212 UVMHIST_LOG(maphist,"<- done, refcnt=%d", vp->v_usecount,
213 0, 0, 0);
214 return uobj;
215 }
216
217
218 /*
219 * uvn_reference
220 *
221 * duplicate a reference to a VM object. Note that the reference
222 * count must already be at least one (the passed in reference) so
223 * there is no chance of the uvn being killed or locked out here.
224 *
225 * => caller must call with object unlocked.
226 * => caller must be using the same accessprot as was used at attach time
227 */
228
229 void
230 uvn_reference(uobj)
231 struct uvm_object *uobj;
232 {
233 VREF((struct vnode *)uobj);
234 }
235
236
237 /*
238 * uvn_detach
239 *
240 * remove a reference to a VM object.
241 *
242 * => caller must call with object unlocked and map locked.
243 */
244
245 void
246 uvn_detach(uobj)
247 struct uvm_object *uobj;
248 {
249 vrele((struct vnode *)uobj);
250 }
251
252 /*
253 * uvn_put: flush page data to backing store.
254 *
255 * => object must be locked on entry! VOP_PUTPAGES must unlock it.
256 * => flags: PGO_SYNCIO -- use sync. I/O
257 * => note: caller must set PG_CLEAN and pmap_clear_modify (if needed)
258 */
259
260 int
261 uvn_put(uobj, offlo, offhi, flags)
262 struct uvm_object *uobj;
263 voff_t offlo;
264 voff_t offhi;
265 int flags;
266 {
267 struct vnode *vp = (struct vnode *)uobj;
268 int error;
269
270 LOCK_ASSERT(simple_lock_held(&vp->v_interlock));
271 error = VOP_PUTPAGES(vp, offlo, offhi, flags);
272 LOCK_ASSERT(!simple_lock_held(&vp->v_interlock));
273 return error;
274 }
275
276
277 /*
278 * uvn_get: get pages (synchronously) from backing store
279 *
280 * => prefer map unlocked (not required)
281 * => object must be locked! we will _unlock_ it before starting any I/O.
282 * => flags: PGO_ALLPAGES: get all of the pages
283 * PGO_LOCKED: fault data structures are locked
284 * => NOTE: offset is the offset of pps[0], _NOT_ pps[centeridx]
285 * => NOTE: caller must check for released pages!!
286 */
287
288 int
289 uvn_get(uobj, offset, pps, npagesp, centeridx, access_type, advice, flags)
290 struct uvm_object *uobj;
291 voff_t offset;
292 struct vm_page **pps; /* IN/OUT */
293 int *npagesp; /* IN (OUT if PGO_LOCKED) */
294 int centeridx;
295 vm_prot_t access_type;
296 int advice, flags;
297 {
298 struct vnode *vp = (struct vnode *)uobj;
299 int error;
300 UVMHIST_FUNC("uvn_get"); UVMHIST_CALLED(ubchist);
301
302 UVMHIST_LOG(ubchist, "vp %p off 0x%x", vp, (int)offset, 0,0);
303 error = VOP_GETPAGES(vp, offset, pps, npagesp, centeridx,
304 access_type, advice, flags);
305 return error;
306 }
307
308
309 /*
310 * uvn_findpages:
311 * return the page for the uobj and offset requested, allocating if needed.
312 * => uobj must be locked.
313 * => returned pages will be BUSY.
314 */
315
316 int
317 uvn_findpages(uobj, offset, npagesp, pgs, flags)
318 struct uvm_object *uobj;
319 voff_t offset;
320 int *npagesp;
321 struct vm_page **pgs;
322 int flags;
323 {
324 int i, count, found, npages, rv;
325
326 count = found = 0;
327 npages = *npagesp;
328 if (flags & UFP_BACKWARD) {
329 for (i = npages - 1; i >= 0; i--, offset -= PAGE_SIZE) {
330 rv = uvn_findpage(uobj, offset, &pgs[i], flags);
331 if (rv == 0) {
332 if (flags & UFP_DIRTYONLY)
333 break;
334 } else
335 found++;
336 count++;
337 }
338 } else {
339 for (i = 0; i < npages; i++, offset += PAGE_SIZE) {
340 rv = uvn_findpage(uobj, offset, &pgs[i], flags);
341 if (rv == 0) {
342 if (flags & UFP_DIRTYONLY)
343 break;
344 } else
345 found++;
346 count++;
347 }
348 }
349 *npagesp = count;
350 return (found);
351 }
352
353 int
354 uvn_findpage(uobj, offset, pgp, flags)
355 struct uvm_object *uobj;
356 voff_t offset;
357 struct vm_page **pgp;
358 int flags;
359 {
360 struct vm_page *pg;
361 boolean_t dirty;
362 UVMHIST_FUNC("uvn_findpage"); UVMHIST_CALLED(ubchist);
363 UVMHIST_LOG(ubchist, "vp %p off 0x%lx", uobj, offset,0,0);
364
365 if (*pgp != NULL) {
366 UVMHIST_LOG(ubchist, "dontcare", 0,0,0,0);
367 return 0;
368 }
369 for (;;) {
370 /* look for an existing page */
371 pg = uvm_pagelookup(uobj, offset);
372
373 /* nope? allocate one now */
374 if (pg == NULL) {
375 if (flags & UFP_NOALLOC) {
376 UVMHIST_LOG(ubchist, "noalloc", 0,0,0,0);
377 return 0;
378 }
379 pg = uvm_pagealloc(uobj, offset, NULL, 0);
380 if (pg == NULL) {
381 if (flags & UFP_NOWAIT) {
382 UVMHIST_LOG(ubchist, "nowait",0,0,0,0);
383 return 0;
384 }
385 simple_unlock(&uobj->vmobjlock);
386 uvm_wait("uvn_fp1");
387 simple_lock(&uobj->vmobjlock);
388 continue;
389 }
390 if (UVM_OBJ_IS_VTEXT(uobj)) {
391 uvmexp.execpages++;
392 } else {
393 uvmexp.filepages++;
394 }
395 UVMHIST_LOG(ubchist, "alloced %p", pg,0,0,0);
396 break;
397 } else if (flags & UFP_NOCACHE) {
398 UVMHIST_LOG(ubchist, "nocache",0,0,0,0);
399 return 0;
400 }
401
402 /* page is there, see if we need to wait on it */
403 if ((pg->flags & PG_BUSY) != 0) {
404 if (flags & UFP_NOWAIT) {
405 UVMHIST_LOG(ubchist, "nowait",0,0,0,0);
406 return 0;
407 }
408 pg->flags |= PG_WANTED;
409 UVMHIST_LOG(ubchist, "wait %p", pg,0,0,0);
410 UVM_UNLOCK_AND_WAIT(pg, &uobj->vmobjlock, 0,
411 "uvn_fp2", 0);
412 simple_lock(&uobj->vmobjlock);
413 continue;
414 }
415
416 /* skip PG_RDONLY pages if requested */
417 if ((flags & UFP_NORDONLY) && (pg->flags & PG_RDONLY)) {
418 UVMHIST_LOG(ubchist, "nordonly",0,0,0,0);
419 return 0;
420 }
421
422 /* stop on clean pages if requested */
423 if (flags & UFP_DIRTYONLY) {
424 dirty = pmap_clear_modify(pg) ||
425 (pg->flags & PG_CLEAN) == 0;
426 pg->flags |= PG_CLEAN;
427 if (!dirty) {
428 UVMHIST_LOG(ubchist, "dirtonly", 0,0,0,0);
429 return 0;
430 }
431 }
432
433 /* mark the page BUSY and we're done. */
434 pg->flags |= PG_BUSY;
435 UVM_PAGE_OWN(pg, "uvn_findpage");
436 UVMHIST_LOG(ubchist, "found %p", pg,0,0,0);
437 break;
438 }
439 *pgp = pg;
440 return 1;
441 }
442
443 /*
444 * uvm_vnp_setsize: grow or shrink a vnode uobj
445 *
446 * grow => just update size value
447 * shrink => toss un-needed pages
448 *
449 * => we assume that the caller has a reference of some sort to the
450 * vnode in question so that it will not be yanked out from under
451 * us.
452 */
453
454 void
455 uvm_vnp_setsize(vp, newsize)
456 struct vnode *vp;
457 voff_t newsize;
458 {
459 struct uvm_object *uobj = &vp->v_uobj;
460 voff_t pgend = round_page(newsize);
461 UVMHIST_FUNC("uvm_vnp_setsize"); UVMHIST_CALLED(ubchist);
462
463 simple_lock(&uobj->vmobjlock);
464 UVMHIST_LOG(ubchist, "vp %p old 0x%x new 0x%x",
465 vp, vp->v_size, newsize, 0);
466
467 /*
468 * now check if the size has changed: if we shrink we had better
469 * toss some pages...
470 */
471
472 if (vp->v_size > pgend && vp->v_size != VSIZENOTSET) {
473 (void) uvn_put(uobj, pgend, 0, PGO_FREE | PGO_SYNCIO);
474 } else {
475 simple_unlock(&uobj->vmobjlock);
476 }
477 vp->v_size = newsize;
478 }
479
480 /*
481 * uvm_vnp_zerorange: set a range of bytes in a file to zero.
482 */
483
484 void
485 uvm_vnp_zerorange(vp, off, len)
486 struct vnode *vp;
487 off_t off;
488 size_t len;
489 {
490 void *win;
491
492 /*
493 * XXXUBC invent kzero() and use it
494 */
495
496 while (len) {
497 vsize_t bytelen = len;
498
499 win = ubc_alloc(&vp->v_uobj, off, &bytelen, UBC_WRITE);
500 memset(win, 0, bytelen);
501 ubc_release(win, 0);
502
503 off += bytelen;
504 len -= bytelen;
505 }
506 }
507