uvm_vnode.c revision 1.46.2.6 1 /* $NetBSD: uvm_vnode.c,v 1.46.2.6 2001/09/26 19:55:16 nathanw Exp $ */
2
3 /*
4 * Copyright (c) 1997 Charles D. Cranor and Washington University.
5 * Copyright (c) 1991, 1993
6 * The Regents of the University of California.
7 * Copyright (c) 1990 University of Utah.
8 *
9 * All rights reserved.
10 *
11 * This code is derived from software contributed to Berkeley by
12 * the Systems Programming Group of the University of Utah Computer
13 * Science Department.
14 *
15 * Redistribution and use in source and binary forms, with or without
16 * modification, are permitted provided that the following conditions
17 * are met:
18 * 1. Redistributions of source code must retain the above copyright
19 * notice, this list of conditions and the following disclaimer.
20 * 2. Redistributions in binary form must reproduce the above copyright
21 * notice, this list of conditions and the following disclaimer in the
22 * documentation and/or other materials provided with the distribution.
23 * 3. All advertising materials mentioning features or use of this software
24 * must display the following acknowledgement:
25 * This product includes software developed by Charles D. Cranor,
26 * Washington University, the University of California, Berkeley and
27 * its contributors.
28 * 4. Neither the name of the University nor the names of its contributors
29 * may be used to endorse or promote products derived from this software
30 * without specific prior written permission.
31 *
32 * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND
33 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
34 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
35 * ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE
36 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
37 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
38 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
39 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
40 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
41 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
42 * SUCH DAMAGE.
43 *
44 * @(#)vnode_pager.c 8.8 (Berkeley) 2/13/94
45 * from: Id: uvm_vnode.c,v 1.1.2.26 1998/02/02 20:38:07 chuck Exp
46 */
47
48 #include "fs_nfs.h"
49 #include "opt_uvmhist.h"
50 #include "opt_ddb.h"
51
52 /*
53 * uvm_vnode.c: the vnode pager.
54 */
55
56 #include <sys/param.h>
57 #include <sys/systm.h>
58 #include <sys/lwp.h>
59 #include <sys/kernel.h>
60 #include <sys/proc.h>
61 #include <sys/malloc.h>
62 #include <sys/vnode.h>
63 #include <sys/disklabel.h>
64 #include <sys/ioctl.h>
65 #include <sys/fcntl.h>
66 #include <sys/conf.h>
67 #include <sys/pool.h>
68 #include <sys/mount.h>
69
70 #include <miscfs/specfs/specdev.h>
71
72 #include <uvm/uvm.h>
73
74 /*
75 * functions
76 */
77
78 void uvn_detach __P((struct uvm_object *));
79 int uvn_get __P((struct uvm_object *, voff_t, struct vm_page **, int *, int,
80 vm_prot_t, int, int));
81 int uvn_put __P((struct uvm_object *, voff_t, voff_t, int));
82 void uvn_reference __P((struct uvm_object *));
83
84 int uvn_findpage __P((struct uvm_object *, voff_t, struct vm_page **, int));
85
86 /*
87 * master pager structure
88 */
89
90 struct uvm_pagerops uvm_vnodeops = {
91 NULL,
92 uvn_reference,
93 uvn_detach,
94 NULL,
95 uvn_get,
96 uvn_put,
97 };
98
99 /*
100 * the ops!
101 */
102
103 /*
104 * uvn_attach
105 *
106 * attach a vnode structure to a VM object. if the vnode is already
107 * attached, then just bump the reference count by one and return the
108 * VM object. if not already attached, attach and return the new VM obj.
109 * the "accessprot" tells the max access the attaching thread wants to
110 * our pages.
111 *
112 * => caller must _not_ already be holding the lock on the uvm_object.
113 * => in fact, nothing should be locked so that we can sleep here.
114 * => note that uvm_object is first thing in vnode structure, so their
115 * pointers are equiv.
116 */
117
118 struct uvm_object *
119 uvn_attach(arg, accessprot)
120 void *arg;
121 vm_prot_t accessprot;
122 {
123 struct vnode *vp = arg;
124 struct uvm_object *uobj = &vp->v_uobj;
125 struct vattr vattr;
126 int result;
127 struct partinfo pi;
128 voff_t used_vnode_size;
129 UVMHIST_FUNC("uvn_attach"); UVMHIST_CALLED(maphist);
130
131 UVMHIST_LOG(maphist, "(vn=0x%x)", arg,0,0,0);
132 used_vnode_size = (voff_t)0;
133
134 /*
135 * first get a lock on the uobj.
136 */
137
138 simple_lock(&uobj->vmobjlock);
139 while (vp->v_flag & VXLOCK) {
140 vp->v_flag |= VXWANT;
141 UVMHIST_LOG(maphist, " SLEEPING on blocked vn",0,0,0,0);
142 UVM_UNLOCK_AND_WAIT(uobj, &uobj->vmobjlock, FALSE,
143 "uvn_attach", 0);
144 simple_lock(&uobj->vmobjlock);
145 UVMHIST_LOG(maphist," WOKE UP",0,0,0,0);
146 }
147
148 /*
149 * if we're mapping a BLK device, make sure it is a disk.
150 */
151 if (vp->v_type == VBLK && bdevsw[major(vp->v_rdev)].d_type != D_DISK) {
152 simple_unlock(&uobj->vmobjlock);
153 UVMHIST_LOG(maphist,"<- done (VBLK not D_DISK!)", 0,0,0,0);
154 return(NULL);
155 }
156 KASSERT(vp->v_type == VREG || vp->v_type == VBLK);
157
158 /*
159 * set up our idea of the size
160 * if this hasn't been done already.
161 */
162 if (vp->v_size == VSIZENOTSET) {
163
164
165 vp->v_flag |= VXLOCK;
166 simple_unlock(&uobj->vmobjlock); /* drop lock in case we sleep */
167 /* XXX: curproc? */
168 if (vp->v_type == VBLK) {
169 /*
170 * We could implement this as a specfs getattr call, but:
171 *
172 * (1) VOP_GETATTR() would get the file system
173 * vnode operation, not the specfs operation.
174 *
175 * (2) All we want is the size, anyhow.
176 */
177 result = (*bdevsw[major(vp->v_rdev)].d_ioctl)(vp->v_rdev,
178 DIOCGPART, (caddr_t)&pi, FREAD, curproc->l_proc);
179 if (result == 0) {
180 /* XXX should remember blocksize */
181 used_vnode_size = (voff_t)pi.disklab->d_secsize *
182 (voff_t)pi.part->p_size;
183 }
184 } else {
185 result = VOP_GETATTR(vp, &vattr, curproc->l_proc->p_ucred,
186 curproc->l_proc);
187 if (result == 0)
188 used_vnode_size = vattr.va_size;
189 }
190
191 /* relock object */
192 simple_lock(&uobj->vmobjlock);
193
194 if (vp->v_flag & VXWANT) {
195 wakeup(vp);
196 }
197 vp->v_flag &= ~(VXLOCK|VXWANT);
198
199 if (result != 0) {
200 simple_unlock(&uobj->vmobjlock);
201 UVMHIST_LOG(maphist,"<- done (VOP_GETATTR FAILED!)", 0,0,0,0);
202 return(NULL);
203 }
204 vp->v_size = used_vnode_size;
205
206 }
207
208 simple_unlock(&uobj->vmobjlock);
209 UVMHIST_LOG(maphist,"<- done, refcnt=%d", vp->v_usecount,
210 0, 0, 0);
211 return uobj;
212 }
213
214
215 /*
216 * uvn_reference
217 *
218 * duplicate a reference to a VM object. Note that the reference
219 * count must already be at least one (the passed in reference) so
220 * there is no chance of the uvn being killed or locked out here.
221 *
222 * => caller must call with object unlocked.
223 * => caller must be using the same accessprot as was used at attach time
224 */
225
226 void
227 uvn_reference(uobj)
228 struct uvm_object *uobj;
229 {
230 VREF((struct vnode *)uobj);
231 }
232
233
234 /*
235 * uvn_detach
236 *
237 * remove a reference to a VM object.
238 *
239 * => caller must call with object unlocked and map locked.
240 */
241
242 void
243 uvn_detach(uobj)
244 struct uvm_object *uobj;
245 {
246 vrele((struct vnode *)uobj);
247 }
248
249 /*
250 * uvn_put: flush page data to backing store.
251 *
252 * => object must be locked on entry! VOP_PUTPAGES must unlock it.
253 * => flags: PGO_SYNCIO -- use sync. I/O
254 * => note: caller must set PG_CLEAN and pmap_clear_modify (if needed)
255 */
256
257 int
258 uvn_put(uobj, offlo, offhi, flags)
259 struct uvm_object *uobj;
260 voff_t offlo;
261 voff_t offhi;
262 int flags;
263 {
264 struct vnode *vp = (struct vnode *)uobj;
265 int error;
266
267 LOCK_ASSERT(simple_lock_held(&vp->v_interlock));
268 error = VOP_PUTPAGES(vp, offlo, offhi, flags);
269 LOCK_ASSERT(!simple_lock_held(&vp->v_interlock));
270 return error;
271 }
272
273
274 /*
275 * uvn_get: get pages (synchronously) from backing store
276 *
277 * => prefer map unlocked (not required)
278 * => object must be locked! we will _unlock_ it before starting any I/O.
279 * => flags: PGO_ALLPAGES: get all of the pages
280 * PGO_LOCKED: fault data structures are locked
281 * => NOTE: offset is the offset of pps[0], _NOT_ pps[centeridx]
282 * => NOTE: caller must check for released pages!!
283 */
284
285 int
286 uvn_get(uobj, offset, pps, npagesp, centeridx, access_type, advice, flags)
287 struct uvm_object *uobj;
288 voff_t offset;
289 struct vm_page **pps; /* IN/OUT */
290 int *npagesp; /* IN (OUT if PGO_LOCKED) */
291 int centeridx;
292 vm_prot_t access_type;
293 int advice, flags;
294 {
295 struct vnode *vp = (struct vnode *)uobj;
296 int error;
297 UVMHIST_FUNC("uvn_get"); UVMHIST_CALLED(ubchist);
298
299 UVMHIST_LOG(ubchist, "vp %p off 0x%x", vp, (int)offset, 0,0);
300 error = VOP_GETPAGES(vp, offset, pps, npagesp, centeridx,
301 access_type, advice, flags);
302 return error;
303 }
304
305
306 /*
307 * uvn_findpages:
308 * return the page for the uobj and offset requested, allocating if needed.
309 * => uobj must be locked.
310 * => returned pages will be BUSY.
311 */
312
313 void
314 uvn_findpages(uobj, offset, npagesp, pgs, flags)
315 struct uvm_object *uobj;
316 voff_t offset;
317 int *npagesp;
318 struct vm_page **pgs;
319 int flags;
320 {
321 int i, count, npages, rv;
322
323 count = 0;
324 npages = *npagesp;
325 if (flags & UFP_BACKWARD) {
326 for (i = npages - 1; i >= 0; i--, offset -= PAGE_SIZE) {
327 rv = uvn_findpage(uobj, offset, &pgs[i], flags);
328 if (flags & UFP_DIRTYONLY && rv == 0) {
329 break;
330 }
331 count++;
332 }
333 } else {
334 for (i = 0; i < npages; i++, offset += PAGE_SIZE) {
335 rv = uvn_findpage(uobj, offset, &pgs[i], flags);
336 if (flags & UFP_DIRTYONLY && rv == 0) {
337 break;
338 }
339 count++;
340 }
341 }
342 *npagesp = count;
343 }
344
345 int
346 uvn_findpage(uobj, offset, pgp, flags)
347 struct uvm_object *uobj;
348 voff_t offset;
349 struct vm_page **pgp;
350 int flags;
351 {
352 struct vm_page *pg;
353 boolean_t dirty;
354 UVMHIST_FUNC("uvn_findpage"); UVMHIST_CALLED(ubchist);
355 UVMHIST_LOG(ubchist, "vp %p off 0x%lx", uobj, offset,0,0);
356
357 if (*pgp != NULL) {
358 UVMHIST_LOG(ubchist, "dontcare", 0,0,0,0);
359 return 0;
360 }
361 for (;;) {
362 /* look for an existing page */
363 pg = uvm_pagelookup(uobj, offset);
364
365 /* nope? allocate one now */
366 if (pg == NULL) {
367 if (flags & UFP_NOALLOC) {
368 UVMHIST_LOG(ubchist, "noalloc", 0,0,0,0);
369 return 0;
370 }
371 pg = uvm_pagealloc(uobj, offset, NULL, 0);
372 if (pg == NULL) {
373 if (flags & UFP_NOWAIT) {
374 UVMHIST_LOG(ubchist, "nowait",0,0,0,0);
375 return 0;
376 }
377 simple_unlock(&uobj->vmobjlock);
378 uvm_wait("uvn_fp1");
379 simple_lock(&uobj->vmobjlock);
380 continue;
381 }
382 if (UVM_OBJ_IS_VTEXT(uobj)) {
383 uvmexp.vtextpages++;
384 } else {
385 uvmexp.vnodepages++;
386 }
387 UVMHIST_LOG(ubchist, "alloced %p", pg,0,0,0);
388 break;
389 } else if (flags & UFP_NOCACHE) {
390 UVMHIST_LOG(ubchist, "nocache",0,0,0,0);
391 return 0;
392 }
393
394 /* page is there, see if we need to wait on it */
395 if ((pg->flags & PG_BUSY) != 0) {
396 if (flags & UFP_NOWAIT) {
397 UVMHIST_LOG(ubchist, "nowait",0,0,0,0);
398 return 0;
399 }
400 pg->flags |= PG_WANTED;
401 UVM_UNLOCK_AND_WAIT(pg, &uobj->vmobjlock, 0,
402 "uvn_fp2", 0);
403 simple_lock(&uobj->vmobjlock);
404 continue;
405 }
406
407 /* skip PG_RDONLY pages if requested */
408 if ((flags & UFP_NORDONLY) && (pg->flags & PG_RDONLY)) {
409 UVMHIST_LOG(ubchist, "nordonly",0,0,0,0);
410 return 0;
411 }
412
413 /* stop on clean pages if requested */
414 if (flags & UFP_DIRTYONLY) {
415 dirty = pmap_clear_modify(pg) ||
416 (pg->flags & PG_CLEAN) == 0;
417 pg->flags |= PG_CLEAN;
418 if (!dirty) {
419 return 0;
420 }
421 }
422
423 /* mark the page BUSY and we're done. */
424 pg->flags |= PG_BUSY;
425 UVM_PAGE_OWN(pg, "uvn_findpage");
426 UVMHIST_LOG(ubchist, "found %p", pg,0,0,0);
427 break;
428 }
429 *pgp = pg;
430 return 1;
431 }
432
433 /*
434 * uvm_vnp_setsize: grow or shrink a vnode uobj
435 *
436 * grow => just update size value
437 * shrink => toss un-needed pages
438 *
439 * => we assume that the caller has a reference of some sort to the
440 * vnode in question so that it will not be yanked out from under
441 * us.
442 */
443
444 void
445 uvm_vnp_setsize(vp, newsize)
446 struct vnode *vp;
447 voff_t newsize;
448 {
449 struct uvm_object *uobj = &vp->v_uobj;
450 voff_t pgend = round_page(newsize);
451 UVMHIST_FUNC("uvm_vnp_setsize"); UVMHIST_CALLED(ubchist);
452
453 simple_lock(&uobj->vmobjlock);
454 UVMHIST_LOG(ubchist, "vp %p old 0x%x new 0x%x",
455 vp, vp->v_size, newsize, 0);
456
457 /*
458 * now check if the size has changed: if we shrink we had better
459 * toss some pages...
460 */
461
462 if (vp->v_size > pgend && vp->v_size != VSIZENOTSET) {
463 (void) uvn_put(uobj, pgend, 0, PGO_FREE);
464 } else {
465 simple_unlock(&uobj->vmobjlock);
466 }
467 vp->v_size = newsize;
468 }
469
470 /*
471 * uvm_vnp_zerorange: set a range of bytes in a file to zero.
472 */
473
474 void
475 uvm_vnp_zerorange(vp, off, len)
476 struct vnode *vp;
477 off_t off;
478 size_t len;
479 {
480 void *win;
481
482 /*
483 * XXXUBC invent kzero() and use it
484 */
485
486 while (len) {
487 vsize_t bytelen = len;
488
489 win = ubc_alloc(&vp->v_uobj, off, &bytelen, UBC_WRITE);
490 memset(win, 0, bytelen);
491 ubc_release(win, 0);
492
493 off += bytelen;
494 len -= bytelen;
495 }
496 }
497