genfs_vnops.c revision 1.29 1 1.29 chs /* $NetBSD: genfs_vnops.c,v 1.29 2001/02/18 15:03:42 chs Exp $ */
2 1.6 fvdl
3 1.6 fvdl /*
4 1.6 fvdl * Copyright (c) 1982, 1986, 1989, 1993
5 1.6 fvdl * The Regents of the University of California. All rights reserved.
6 1.6 fvdl *
7 1.6 fvdl * Redistribution and use in source and binary forms, with or without
8 1.6 fvdl * modification, are permitted provided that the following conditions
9 1.6 fvdl * are met:
10 1.6 fvdl * 1. Redistributions of source code must retain the above copyright
11 1.6 fvdl * notice, this list of conditions and the following disclaimer.
12 1.6 fvdl * 2. Redistributions in binary form must reproduce the above copyright
13 1.6 fvdl * notice, this list of conditions and the following disclaimer in the
14 1.6 fvdl * documentation and/or other materials provided with the distribution.
15 1.6 fvdl * 3. All advertising materials mentioning features or use of this software
16 1.6 fvdl * must display the following acknowledgement:
17 1.6 fvdl * This product includes software developed by the University of
18 1.6 fvdl * California, Berkeley and its contributors.
19 1.6 fvdl * 4. Neither the name of the University nor the names of its contributors
20 1.6 fvdl * may be used to endorse or promote products derived from this software
21 1.6 fvdl * without specific prior written permission.
22 1.6 fvdl *
23 1.6 fvdl * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND
24 1.6 fvdl * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
25 1.6 fvdl * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
26 1.6 fvdl * ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE
27 1.6 fvdl * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
28 1.6 fvdl * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
29 1.6 fvdl * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
30 1.6 fvdl * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
31 1.6 fvdl * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
32 1.6 fvdl * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
33 1.6 fvdl * SUCH DAMAGE.
34 1.6 fvdl *
35 1.6 fvdl */
36 1.5 perry
37 1.8 thorpej #include "opt_nfsserver.h"
38 1.8 thorpej
39 1.1 mycroft #include <sys/param.h>
40 1.1 mycroft #include <sys/systm.h>
41 1.6 fvdl #include <sys/proc.h>
42 1.1 mycroft #include <sys/kernel.h>
43 1.1 mycroft #include <sys/mount.h>
44 1.1 mycroft #include <sys/namei.h>
45 1.1 mycroft #include <sys/vnode.h>
46 1.13 wrstuden #include <sys/fcntl.h>
47 1.1 mycroft #include <sys/malloc.h>
48 1.3 mycroft #include <sys/poll.h>
49 1.1 mycroft
50 1.1 mycroft #include <miscfs/genfs/genfs.h>
51 1.6 fvdl #include <miscfs/specfs/specdev.h>
52 1.1 mycroft
53 1.21 chs #include <uvm/uvm.h>
54 1.21 chs #include <uvm/uvm_pager.h>
55 1.21 chs
56 1.8 thorpej #ifdef NFSSERVER
57 1.8 thorpej #include <nfs/rpcv2.h>
58 1.8 thorpej #include <nfs/nfsproto.h>
59 1.8 thorpej #include <nfs/nfs.h>
60 1.8 thorpej #include <nfs/nqnfs.h>
61 1.8 thorpej #include <nfs/nfs_var.h>
62 1.8 thorpej #endif
63 1.8 thorpej
64 1.1 mycroft int
65 1.3 mycroft genfs_poll(v)
66 1.1 mycroft void *v;
67 1.1 mycroft {
68 1.3 mycroft struct vop_poll_args /* {
69 1.1 mycroft struct vnode *a_vp;
70 1.3 mycroft int a_events;
71 1.1 mycroft struct proc *a_p;
72 1.1 mycroft } */ *ap = v;
73 1.1 mycroft
74 1.3 mycroft return (ap->a_events & (POLLIN | POLLOUT | POLLRDNORM | POLLWRNORM));
75 1.1 mycroft }
76 1.1 mycroft
77 1.1 mycroft int
78 1.1 mycroft genfs_fsync(v)
79 1.1 mycroft void *v;
80 1.1 mycroft {
81 1.1 mycroft struct vop_fsync_args /* {
82 1.1 mycroft struct vnode *a_vp;
83 1.1 mycroft struct ucred *a_cred;
84 1.7 kleink int a_flags;
85 1.20 fvdl off_t offlo;
86 1.20 fvdl off_t offhi;
87 1.1 mycroft struct proc *a_p;
88 1.1 mycroft } */ *ap = v;
89 1.16 augustss struct vnode *vp = ap->a_vp;
90 1.11 mycroft int wait;
91 1.1 mycroft
92 1.11 mycroft wait = (ap->a_flags & FSYNC_WAIT) != 0;
93 1.11 mycroft vflushbuf(vp, wait);
94 1.11 mycroft if ((ap->a_flags & FSYNC_DATAONLY) != 0)
95 1.7 kleink return (0);
96 1.11 mycroft else
97 1.18 mycroft return (VOP_UPDATE(vp, NULL, NULL, wait ? UPDATE_WAIT : 0));
98 1.1 mycroft }
99 1.1 mycroft
100 1.1 mycroft int
101 1.4 kleink genfs_seek(v)
102 1.4 kleink void *v;
103 1.4 kleink {
104 1.4 kleink struct vop_seek_args /* {
105 1.4 kleink struct vnode *a_vp;
106 1.4 kleink off_t a_oldoff;
107 1.4 kleink off_t a_newoff;
108 1.4 kleink struct ucred *a_ucred;
109 1.4 kleink } */ *ap = v;
110 1.4 kleink
111 1.4 kleink if (ap->a_newoff < 0)
112 1.4 kleink return (EINVAL);
113 1.4 kleink
114 1.4 kleink return (0);
115 1.4 kleink }
116 1.4 kleink
117 1.4 kleink int
118 1.1 mycroft genfs_abortop(v)
119 1.1 mycroft void *v;
120 1.1 mycroft {
121 1.1 mycroft struct vop_abortop_args /* {
122 1.1 mycroft struct vnode *a_dvp;
123 1.1 mycroft struct componentname *a_cnp;
124 1.1 mycroft } */ *ap = v;
125 1.1 mycroft
126 1.1 mycroft if ((ap->a_cnp->cn_flags & (HASBUF | SAVESTART)) == HASBUF)
127 1.19 thorpej PNBUF_PUT(ap->a_cnp->cn_pnbuf);
128 1.1 mycroft return (0);
129 1.13 wrstuden }
130 1.13 wrstuden
131 1.13 wrstuden int
132 1.13 wrstuden genfs_fcntl(v)
133 1.13 wrstuden void *v;
134 1.13 wrstuden {
135 1.13 wrstuden struct vop_fcntl_args /* {
136 1.13 wrstuden struct vnode *a_vp;
137 1.13 wrstuden u_int a_command;
138 1.13 wrstuden caddr_t a_data;
139 1.13 wrstuden int a_fflag;
140 1.13 wrstuden struct ucred *a_cred;
141 1.13 wrstuden struct proc *a_p;
142 1.13 wrstuden } */ *ap = v;
143 1.13 wrstuden
144 1.13 wrstuden if (ap->a_command == F_SETFL)
145 1.13 wrstuden return (0);
146 1.13 wrstuden else
147 1.13 wrstuden return (EOPNOTSUPP);
148 1.1 mycroft }
149 1.1 mycroft
150 1.1 mycroft /*ARGSUSED*/
151 1.1 mycroft int
152 1.1 mycroft genfs_badop(v)
153 1.1 mycroft void *v;
154 1.1 mycroft {
155 1.1 mycroft
156 1.1 mycroft panic("genfs: bad op");
157 1.1 mycroft }
158 1.1 mycroft
159 1.1 mycroft /*ARGSUSED*/
160 1.1 mycroft int
161 1.1 mycroft genfs_nullop(v)
162 1.1 mycroft void *v;
163 1.1 mycroft {
164 1.1 mycroft
165 1.1 mycroft return (0);
166 1.10 kleink }
167 1.10 kleink
168 1.10 kleink /*ARGSUSED*/
169 1.10 kleink int
170 1.10 kleink genfs_einval(v)
171 1.10 kleink void *v;
172 1.10 kleink {
173 1.10 kleink
174 1.10 kleink return (EINVAL);
175 1.1 mycroft }
176 1.1 mycroft
177 1.1 mycroft /*ARGSUSED*/
178 1.1 mycroft int
179 1.1 mycroft genfs_eopnotsupp(v)
180 1.1 mycroft void *v;
181 1.1 mycroft {
182 1.1 mycroft
183 1.1 mycroft return (EOPNOTSUPP);
184 1.1 mycroft }
185 1.1 mycroft
186 1.12 wrstuden /*
187 1.12 wrstuden * Called when an fs doesn't support a particular vop but the vop needs to
188 1.12 wrstuden * vrele, vput, or vunlock passed in vnodes.
189 1.12 wrstuden */
190 1.12 wrstuden int
191 1.12 wrstuden genfs_eopnotsupp_rele(v)
192 1.12 wrstuden void *v;
193 1.12 wrstuden {
194 1.12 wrstuden struct vop_generic_args /*
195 1.12 wrstuden struct vnodeop_desc *a_desc;
196 1.12 wrstuden / * other random data follows, presumably * /
197 1.12 wrstuden } */ *ap = v;
198 1.12 wrstuden struct vnodeop_desc *desc = ap->a_desc;
199 1.12 wrstuden struct vnode *vp;
200 1.12 wrstuden int flags, i, j, offset;
201 1.12 wrstuden
202 1.12 wrstuden flags = desc->vdesc_flags;
203 1.12 wrstuden for (i = 0; i < VDESC_MAX_VPS; flags >>=1, i++) {
204 1.12 wrstuden if ((offset = desc->vdesc_vp_offsets[i]) == VDESC_NO_OFFSET)
205 1.12 wrstuden break; /* stop at end of list */
206 1.12 wrstuden if ((j = flags & VDESC_VP0_WILLPUT)) {
207 1.12 wrstuden vp = *VOPARG_OFFSETTO(struct vnode**,offset,ap);
208 1.12 wrstuden switch (j) {
209 1.12 wrstuden case VDESC_VP0_WILLPUT:
210 1.12 wrstuden vput(vp);
211 1.12 wrstuden break;
212 1.12 wrstuden case VDESC_VP0_WILLUNLOCK:
213 1.12 wrstuden VOP_UNLOCK(vp, 0);
214 1.12 wrstuden break;
215 1.12 wrstuden case VDESC_VP0_WILLRELE:
216 1.12 wrstuden vrele(vp);
217 1.12 wrstuden break;
218 1.12 wrstuden }
219 1.12 wrstuden }
220 1.12 wrstuden }
221 1.12 wrstuden
222 1.12 wrstuden return (EOPNOTSUPP);
223 1.12 wrstuden }
224 1.12 wrstuden
225 1.1 mycroft /*ARGSUSED*/
226 1.1 mycroft int
227 1.1 mycroft genfs_ebadf(v)
228 1.1 mycroft void *v;
229 1.1 mycroft {
230 1.1 mycroft
231 1.1 mycroft return (EBADF);
232 1.9 matthias }
233 1.9 matthias
234 1.9 matthias /* ARGSUSED */
235 1.9 matthias int
236 1.9 matthias genfs_enoioctl(v)
237 1.9 matthias void *v;
238 1.9 matthias {
239 1.9 matthias
240 1.9 matthias return (ENOTTY);
241 1.6 fvdl }
242 1.6 fvdl
243 1.6 fvdl
244 1.6 fvdl /*
245 1.15 fvdl * Eliminate all activity associated with the requested vnode
246 1.6 fvdl * and with all vnodes aliased to the requested vnode.
247 1.6 fvdl */
248 1.6 fvdl int
249 1.6 fvdl genfs_revoke(v)
250 1.6 fvdl void *v;
251 1.6 fvdl {
252 1.6 fvdl struct vop_revoke_args /* {
253 1.6 fvdl struct vnode *a_vp;
254 1.6 fvdl int a_flags;
255 1.6 fvdl } */ *ap = v;
256 1.6 fvdl struct vnode *vp, *vq;
257 1.6 fvdl struct proc *p = curproc; /* XXX */
258 1.6 fvdl
259 1.6 fvdl #ifdef DIAGNOSTIC
260 1.6 fvdl if ((ap->a_flags & REVOKEALL) == 0)
261 1.6 fvdl panic("genfs_revoke: not revokeall");
262 1.6 fvdl #endif
263 1.6 fvdl
264 1.6 fvdl vp = ap->a_vp;
265 1.6 fvdl simple_lock(&vp->v_interlock);
266 1.6 fvdl
267 1.6 fvdl if (vp->v_flag & VALIASED) {
268 1.6 fvdl /*
269 1.6 fvdl * If a vgone (or vclean) is already in progress,
270 1.6 fvdl * wait until it is done and return.
271 1.6 fvdl */
272 1.6 fvdl if (vp->v_flag & VXLOCK) {
273 1.6 fvdl vp->v_flag |= VXWANT;
274 1.6 fvdl simple_unlock(&vp->v_interlock);
275 1.6 fvdl tsleep((caddr_t)vp, PINOD, "vop_revokeall", 0);
276 1.6 fvdl return (0);
277 1.6 fvdl }
278 1.6 fvdl /*
279 1.6 fvdl * Ensure that vp will not be vgone'd while we
280 1.6 fvdl * are eliminating its aliases.
281 1.6 fvdl */
282 1.6 fvdl vp->v_flag |= VXLOCK;
283 1.6 fvdl simple_unlock(&vp->v_interlock);
284 1.6 fvdl while (vp->v_flag & VALIASED) {
285 1.6 fvdl simple_lock(&spechash_slock);
286 1.6 fvdl for (vq = *vp->v_hashchain; vq; vq = vq->v_specnext) {
287 1.6 fvdl if (vq->v_rdev != vp->v_rdev ||
288 1.6 fvdl vq->v_type != vp->v_type || vp == vq)
289 1.6 fvdl continue;
290 1.6 fvdl simple_unlock(&spechash_slock);
291 1.6 fvdl vgone(vq);
292 1.6 fvdl break;
293 1.6 fvdl }
294 1.6 fvdl if (vq == NULLVP)
295 1.6 fvdl simple_unlock(&spechash_slock);
296 1.6 fvdl }
297 1.6 fvdl /*
298 1.6 fvdl * Remove the lock so that vgone below will
299 1.6 fvdl * really eliminate the vnode after which time
300 1.6 fvdl * vgone will awaken any sleepers.
301 1.6 fvdl */
302 1.6 fvdl simple_lock(&vp->v_interlock);
303 1.6 fvdl vp->v_flag &= ~VXLOCK;
304 1.6 fvdl }
305 1.6 fvdl vgonel(vp, p);
306 1.6 fvdl return (0);
307 1.6 fvdl }
308 1.6 fvdl
309 1.6 fvdl /*
310 1.12 wrstuden * Lock the node.
311 1.6 fvdl */
312 1.6 fvdl int
313 1.12 wrstuden genfs_lock(v)
314 1.6 fvdl void *v;
315 1.6 fvdl {
316 1.6 fvdl struct vop_lock_args /* {
317 1.6 fvdl struct vnode *a_vp;
318 1.6 fvdl int a_flags;
319 1.6 fvdl } */ *ap = v;
320 1.6 fvdl struct vnode *vp = ap->a_vp;
321 1.6 fvdl
322 1.12 wrstuden return (lockmgr(&vp->v_lock, ap->a_flags, &vp->v_interlock));
323 1.6 fvdl }
324 1.6 fvdl
325 1.6 fvdl /*
326 1.12 wrstuden * Unlock the node.
327 1.6 fvdl */
328 1.6 fvdl int
329 1.12 wrstuden genfs_unlock(v)
330 1.6 fvdl void *v;
331 1.6 fvdl {
332 1.6 fvdl struct vop_unlock_args /* {
333 1.6 fvdl struct vnode *a_vp;
334 1.6 fvdl int a_flags;
335 1.6 fvdl } */ *ap = v;
336 1.6 fvdl struct vnode *vp = ap->a_vp;
337 1.6 fvdl
338 1.12 wrstuden return (lockmgr(&vp->v_lock, ap->a_flags | LK_RELEASE,
339 1.12 wrstuden &vp->v_interlock));
340 1.6 fvdl }
341 1.6 fvdl
342 1.6 fvdl /*
343 1.12 wrstuden * Return whether or not the node is locked.
344 1.6 fvdl */
345 1.6 fvdl int
346 1.12 wrstuden genfs_islocked(v)
347 1.6 fvdl void *v;
348 1.6 fvdl {
349 1.6 fvdl struct vop_islocked_args /* {
350 1.6 fvdl struct vnode *a_vp;
351 1.6 fvdl } */ *ap = v;
352 1.6 fvdl struct vnode *vp = ap->a_vp;
353 1.6 fvdl
354 1.12 wrstuden return (lockstatus(&vp->v_lock));
355 1.12 wrstuden }
356 1.12 wrstuden
357 1.12 wrstuden /*
358 1.12 wrstuden * Stubs to use when there is no locking to be done on the underlying object.
359 1.12 wrstuden */
360 1.12 wrstuden int
361 1.12 wrstuden genfs_nolock(v)
362 1.12 wrstuden void *v;
363 1.12 wrstuden {
364 1.12 wrstuden struct vop_lock_args /* {
365 1.12 wrstuden struct vnode *a_vp;
366 1.12 wrstuden int a_flags;
367 1.12 wrstuden struct proc *a_p;
368 1.12 wrstuden } */ *ap = v;
369 1.12 wrstuden
370 1.12 wrstuden /*
371 1.12 wrstuden * Since we are not using the lock manager, we must clear
372 1.12 wrstuden * the interlock here.
373 1.12 wrstuden */
374 1.12 wrstuden if (ap->a_flags & LK_INTERLOCK)
375 1.12 wrstuden simple_unlock(&ap->a_vp->v_interlock);
376 1.12 wrstuden return (0);
377 1.12 wrstuden }
378 1.12 wrstuden
379 1.12 wrstuden int
380 1.12 wrstuden genfs_nounlock(v)
381 1.12 wrstuden void *v;
382 1.12 wrstuden {
383 1.12 wrstuden return (0);
384 1.12 wrstuden }
385 1.12 wrstuden
386 1.12 wrstuden int
387 1.12 wrstuden genfs_noislocked(v)
388 1.12 wrstuden void *v;
389 1.12 wrstuden {
390 1.12 wrstuden return (0);
391 1.8 thorpej }
392 1.8 thorpej
393 1.8 thorpej /*
394 1.8 thorpej * Local lease check for NFS servers. Just set up args and let
395 1.8 thorpej * nqsrv_getlease() do the rest. If NFSSERVER is not in the kernel,
396 1.8 thorpej * this is a null operation.
397 1.8 thorpej */
398 1.8 thorpej int
399 1.8 thorpej genfs_lease_check(v)
400 1.8 thorpej void *v;
401 1.8 thorpej {
402 1.8 thorpej #ifdef NFSSERVER
403 1.8 thorpej struct vop_lease_args /* {
404 1.8 thorpej struct vnode *a_vp;
405 1.8 thorpej struct proc *a_p;
406 1.8 thorpej struct ucred *a_cred;
407 1.8 thorpej int a_flag;
408 1.8 thorpej } */ *ap = v;
409 1.8 thorpej u_int32_t duration = 0;
410 1.8 thorpej int cache;
411 1.8 thorpej u_quad_t frev;
412 1.8 thorpej
413 1.8 thorpej (void) nqsrv_getlease(ap->a_vp, &duration, ND_CHECK | ap->a_flag,
414 1.8 thorpej NQLOCALSLP, ap->a_p, (struct mbuf *)0, &cache, &frev, ap->a_cred);
415 1.8 thorpej return (0);
416 1.8 thorpej #else
417 1.8 thorpej return (0);
418 1.8 thorpej #endif /* NFSSERVER */
419 1.21 chs }
420 1.21 chs
421 1.21 chs /*
422 1.21 chs * generic VM getpages routine.
423 1.21 chs * Return PG_BUSY pages for the given range,
424 1.21 chs * reading from backing store if necessary.
425 1.21 chs */
426 1.21 chs
427 1.21 chs int
428 1.21 chs genfs_getpages(v)
429 1.21 chs void *v;
430 1.21 chs {
431 1.21 chs struct vop_getpages_args /* {
432 1.21 chs struct vnode *a_vp;
433 1.21 chs voff_t a_offset;
434 1.21 chs vm_page_t *a_m;
435 1.21 chs int *a_count;
436 1.21 chs int a_centeridx;
437 1.21 chs vm_prot_t a_access_type;
438 1.21 chs int a_advice;
439 1.21 chs int a_flags;
440 1.21 chs } */ *ap = v;
441 1.21 chs
442 1.26 chs off_t newsize, eof;
443 1.26 chs off_t offset, origoffset, startoffset, endoffset, raoffset;
444 1.21 chs daddr_t lbn, blkno;
445 1.21 chs int s, i, error, npages, orignpages, npgs, run, ridx, pidx, pcount;
446 1.21 chs int fs_bshift, fs_bsize, dev_bshift, dev_bsize;
447 1.21 chs int flags = ap->a_flags;
448 1.21 chs size_t bytes, iobytes, tailbytes, totalbytes, skipbytes;
449 1.21 chs vaddr_t kva;
450 1.21 chs struct buf *bp, *mbp;
451 1.21 chs struct vnode *vp = ap->a_vp;
452 1.21 chs struct uvm_object *uobj = &vp->v_uvm.u_obj;
453 1.21 chs struct vm_page *pgs[16]; /* XXXUBC 16 */
454 1.21 chs struct ucred *cred = curproc->p_ucred; /* XXXUBC curproc */
455 1.21 chs boolean_t async = (flags & PGO_SYNCIO) == 0;
456 1.21 chs boolean_t write = (ap->a_access_type & VM_PROT_WRITE) != 0;
457 1.21 chs boolean_t sawhole = FALSE;
458 1.21 chs UVMHIST_FUNC("genfs_getpages"); UVMHIST_CALLED(ubchist);
459 1.21 chs
460 1.21 chs /* XXXUBC temp limit */
461 1.21 chs if (*ap->a_count > 16) {
462 1.21 chs return EINVAL;
463 1.21 chs }
464 1.21 chs
465 1.26 chs error = 0;
466 1.26 chs origoffset = ap->a_offset;
467 1.26 chs orignpages = *ap->a_count;
468 1.26 chs if (flags & PGO_PASTEOF) {
469 1.26 chs newsize = MAX(vp->v_uvm.u_size,
470 1.26 chs origoffset + (orignpages << PAGE_SHIFT));
471 1.26 chs } else {
472 1.26 chs newsize = vp->v_uvm.u_size;
473 1.26 chs }
474 1.26 chs error = VOP_SIZE(vp, newsize, &eof);
475 1.21 chs if (error) {
476 1.21 chs return error;
477 1.21 chs }
478 1.21 chs
479 1.21 chs #ifdef DIAGNOSTIC
480 1.21 chs if (ap->a_centeridx < 0 || ap->a_centeridx > *ap->a_count) {
481 1.21 chs panic("genfs_getpages: centeridx %d out of range",
482 1.21 chs ap->a_centeridx);
483 1.21 chs }
484 1.26 chs if (origoffset & (PAGE_SIZE - 1) || origoffset < 0) {
485 1.21 chs panic("genfs_getpages: offset 0x%x", (int)ap->a_offset);
486 1.21 chs }
487 1.21 chs if (*ap->a_count < 0) {
488 1.21 chs panic("genfs_getpages: count %d < 0", *ap->a_count);
489 1.21 chs }
490 1.21 chs #endif
491 1.21 chs
492 1.21 chs /*
493 1.21 chs * Bounds-check the request.
494 1.21 chs */
495 1.21 chs
496 1.21 chs if (origoffset + (ap->a_centeridx << PAGE_SHIFT) >= eof &&
497 1.21 chs (flags & PGO_PASTEOF) == 0) {
498 1.21 chs if ((flags & PGO_LOCKED) == 0) {
499 1.21 chs simple_unlock(&uobj->vmobjlock);
500 1.21 chs }
501 1.21 chs UVMHIST_LOG(ubchist, "off 0x%x count %d goes past EOF 0x%x",
502 1.21 chs origoffset, *ap->a_count, eof,0);
503 1.21 chs return EINVAL;
504 1.21 chs }
505 1.21 chs
506 1.21 chs /*
507 1.21 chs * For PGO_LOCKED requests, just return whatever's in memory.
508 1.21 chs */
509 1.21 chs
510 1.21 chs if (flags & PGO_LOCKED) {
511 1.21 chs uvn_findpages(uobj, origoffset, ap->a_count, ap->a_m,
512 1.21 chs UFP_NOWAIT|UFP_NOALLOC|UFP_NORDONLY);
513 1.21 chs
514 1.21 chs return ap->a_m[ap->a_centeridx] == NULL ? EBUSY : 0;
515 1.21 chs }
516 1.21 chs
517 1.21 chs /* vnode is VOP_LOCKed, uobj is locked */
518 1.21 chs
519 1.21 chs if (write && (vp->v_flag & VONWORKLST) == 0) {
520 1.21 chs vn_syncer_add_to_worklist(vp, filedelay);
521 1.21 chs }
522 1.21 chs
523 1.21 chs /*
524 1.21 chs * find the requested pages and make some simple checks.
525 1.21 chs * leave space in the page array for a whole block.
526 1.21 chs */
527 1.21 chs
528 1.21 chs fs_bshift = vp->v_mount->mnt_fs_bshift;
529 1.21 chs fs_bsize = 1 << fs_bshift;
530 1.21 chs dev_bshift = vp->v_mount->mnt_dev_bshift;
531 1.21 chs dev_bsize = 1 << dev_bshift;
532 1.21 chs KASSERT((eof & (dev_bsize - 1)) == 0);
533 1.21 chs
534 1.26 chs if ((flags & PGO_PASTEOF) == 0) {
535 1.26 chs orignpages = MIN(orignpages,
536 1.26 chs round_page(eof - origoffset) >> PAGE_SHIFT);
537 1.21 chs }
538 1.21 chs npages = orignpages;
539 1.21 chs startoffset = origoffset & ~(fs_bsize - 1);
540 1.21 chs endoffset = round_page((origoffset + (npages << PAGE_SHIFT)
541 1.21 chs + fs_bsize - 1) & ~(fs_bsize - 1));
542 1.26 chs if ((flags & PGO_PASTEOF) == 0) {
543 1.26 chs endoffset = MIN(endoffset, round_page(eof));
544 1.26 chs }
545 1.21 chs ridx = (origoffset - startoffset) >> PAGE_SHIFT;
546 1.21 chs
547 1.21 chs memset(pgs, 0, sizeof(pgs));
548 1.21 chs uvn_findpages(uobj, origoffset, &npages, &pgs[ridx], UFP_ALL);
549 1.21 chs
550 1.21 chs /*
551 1.21 chs * if PGO_OVERWRITE is set, don't bother reading the pages.
552 1.21 chs * PGO_OVERWRITE also means that the caller guarantees
553 1.21 chs * that the pages already have backing store allocated.
554 1.21 chs */
555 1.21 chs
556 1.21 chs if (flags & PGO_OVERWRITE) {
557 1.21 chs UVMHIST_LOG(ubchist, "PGO_OVERWRITE",0,0,0,0);
558 1.21 chs
559 1.21 chs for (i = 0; i < npages; i++) {
560 1.21 chs struct vm_page *pg = pgs[ridx + i];
561 1.21 chs
562 1.21 chs if (pg->flags & PG_FAKE) {
563 1.21 chs uvm_pagezero(pg);
564 1.21 chs pg->flags &= ~(PG_FAKE);
565 1.21 chs }
566 1.21 chs pg->flags &= ~(PG_RDONLY);
567 1.21 chs }
568 1.26 chs npages += ridx;
569 1.21 chs goto out;
570 1.21 chs }
571 1.21 chs
572 1.21 chs /*
573 1.21 chs * if the pages are already resident, just return them.
574 1.21 chs */
575 1.21 chs
576 1.21 chs for (i = 0; i < npages; i++) {
577 1.21 chs struct vm_page *pg = pgs[ridx + i];
578 1.21 chs
579 1.21 chs if ((pg->flags & PG_FAKE) ||
580 1.21 chs (write && (pg->flags & PG_RDONLY))) {
581 1.21 chs break;
582 1.21 chs }
583 1.21 chs }
584 1.21 chs if (i == npages) {
585 1.21 chs UVMHIST_LOG(ubchist, "returning cached pages", 0,0,0,0);
586 1.21 chs raoffset = origoffset + (orignpages << PAGE_SHIFT);
587 1.26 chs npages += ridx;
588 1.21 chs goto raout;
589 1.21 chs }
590 1.21 chs
591 1.21 chs /*
592 1.21 chs * the page wasn't resident and we're not overwriting,
593 1.21 chs * so we're going to have to do some i/o.
594 1.21 chs * find any additional pages needed to cover the expanded range.
595 1.21 chs */
596 1.21 chs
597 1.21 chs if (startoffset != origoffset) {
598 1.21 chs
599 1.21 chs /*
600 1.21 chs * XXXUBC we need to avoid deadlocks caused by locking
601 1.21 chs * additional pages at lower offsets than pages we
602 1.21 chs * already have locked. for now, unlock them all and
603 1.21 chs * start over.
604 1.21 chs */
605 1.21 chs
606 1.21 chs for (i = 0; i < npages; i++) {
607 1.21 chs struct vm_page *pg = pgs[ridx + i];
608 1.21 chs
609 1.21 chs if (pg->flags & PG_FAKE) {
610 1.21 chs pg->flags |= PG_RELEASED;
611 1.21 chs }
612 1.21 chs }
613 1.21 chs uvm_page_unbusy(&pgs[ridx], npages);
614 1.21 chs memset(pgs, 0, sizeof(pgs));
615 1.21 chs
616 1.21 chs UVMHIST_LOG(ubchist, "reset npages start 0x%x end 0x%x",
617 1.21 chs startoffset, endoffset, 0,0);
618 1.21 chs npages = (endoffset - startoffset) >> PAGE_SHIFT;
619 1.21 chs npgs = npages;
620 1.21 chs uvn_findpages(uobj, startoffset, &npgs, pgs, UFP_ALL);
621 1.21 chs }
622 1.21 chs simple_unlock(&uobj->vmobjlock);
623 1.21 chs
624 1.21 chs /*
625 1.21 chs * read the desired page(s).
626 1.21 chs */
627 1.21 chs
628 1.21 chs totalbytes = npages << PAGE_SHIFT;
629 1.26 chs bytes = MIN(totalbytes, eof - startoffset);
630 1.21 chs tailbytes = totalbytes - bytes;
631 1.21 chs skipbytes = 0;
632 1.21 chs
633 1.21 chs kva = uvm_pagermapin(pgs, npages, UVMPAGER_MAPIN_WAITOK |
634 1.21 chs UVMPAGER_MAPIN_READ);
635 1.21 chs
636 1.21 chs s = splbio();
637 1.21 chs mbp = pool_get(&bufpool, PR_WAITOK);
638 1.21 chs splx(s);
639 1.21 chs mbp->b_bufsize = totalbytes;
640 1.21 chs mbp->b_data = (void *)kva;
641 1.21 chs mbp->b_resid = mbp->b_bcount = bytes;
642 1.21 chs mbp->b_flags = B_BUSY|B_READ| (async ? B_CALL : 0);
643 1.21 chs mbp->b_iodone = uvm_aio_biodone;
644 1.21 chs mbp->b_vp = vp;
645 1.21 chs LIST_INIT(&mbp->b_dep);
646 1.21 chs
647 1.21 chs /*
648 1.21 chs * if EOF is in the middle of the last page, zero the part past EOF.
649 1.21 chs */
650 1.21 chs
651 1.23 chs if (tailbytes > 0 && (pgs[bytes >> PAGE_SHIFT]->flags & PG_FAKE)) {
652 1.21 chs memset((void *)(kva + bytes), 0, tailbytes);
653 1.21 chs }
654 1.21 chs
655 1.21 chs /*
656 1.21 chs * now loop over the pages, reading as needed.
657 1.21 chs */
658 1.21 chs
659 1.21 chs if (write) {
660 1.21 chs lockmgr(&vp->v_glock, LK_EXCLUSIVE, NULL);
661 1.21 chs } else {
662 1.21 chs lockmgr(&vp->v_glock, LK_SHARED, NULL);
663 1.21 chs }
664 1.21 chs
665 1.21 chs bp = NULL;
666 1.21 chs for (offset = startoffset;
667 1.21 chs bytes > 0;
668 1.21 chs offset += iobytes, bytes -= iobytes) {
669 1.21 chs
670 1.21 chs /*
671 1.21 chs * skip pages which don't need to be read.
672 1.21 chs */
673 1.21 chs
674 1.21 chs pidx = (offset - startoffset) >> PAGE_SHIFT;
675 1.21 chs while ((pgs[pidx]->flags & PG_FAKE) == 0) {
676 1.21 chs size_t b;
677 1.21 chs
678 1.24 chs KASSERT((offset & (PAGE_SIZE - 1)) == 0);
679 1.26 chs b = MIN(PAGE_SIZE, bytes);
680 1.21 chs offset += b;
681 1.21 chs bytes -= b;
682 1.21 chs skipbytes += b;
683 1.21 chs pidx++;
684 1.21 chs UVMHIST_LOG(ubchist, "skipping, new offset 0x%x",
685 1.21 chs offset, 0,0,0);
686 1.21 chs if (bytes == 0) {
687 1.21 chs goto loopdone;
688 1.21 chs }
689 1.21 chs }
690 1.21 chs
691 1.21 chs /*
692 1.21 chs * bmap the file to find out the blkno to read from and
693 1.21 chs * how much we can read in one i/o. if bmap returns an error,
694 1.21 chs * skip the rest of the top-level i/o.
695 1.21 chs */
696 1.21 chs
697 1.21 chs lbn = offset >> fs_bshift;
698 1.21 chs error = VOP_BMAP(vp, lbn, NULL, &blkno, &run);
699 1.21 chs if (error) {
700 1.21 chs UVMHIST_LOG(ubchist, "VOP_BMAP lbn 0x%x -> %d\n",
701 1.21 chs lbn, error,0,0);
702 1.21 chs skipbytes += bytes;
703 1.21 chs goto loopdone;
704 1.21 chs }
705 1.21 chs
706 1.21 chs /*
707 1.21 chs * see how many pages can be read with this i/o.
708 1.21 chs * reduce the i/o size if necessary to avoid
709 1.21 chs * overwriting pages with valid data.
710 1.21 chs */
711 1.21 chs
712 1.26 chs iobytes = MIN((((off_t)lbn + 1 + run) << fs_bshift) - offset,
713 1.26 chs bytes);
714 1.21 chs if (offset + iobytes > round_page(offset)) {
715 1.21 chs pcount = 1;
716 1.21 chs while (pidx + pcount < npages &&
717 1.21 chs pgs[pidx + pcount]->flags & PG_FAKE) {
718 1.21 chs pcount++;
719 1.21 chs }
720 1.26 chs iobytes = MIN(iobytes, (pcount << PAGE_SHIFT) -
721 1.21 chs (offset - trunc_page(offset)));
722 1.21 chs }
723 1.21 chs
724 1.21 chs /*
725 1.21 chs * if this block isn't allocated, zero it instead of reading it.
726 1.21 chs * if this is a read access, mark the pages we zeroed PG_RDONLY.
727 1.21 chs */
728 1.21 chs
729 1.21 chs if (blkno < 0) {
730 1.21 chs UVMHIST_LOG(ubchist, "lbn 0x%x -> HOLE", lbn,0,0,0);
731 1.21 chs
732 1.21 chs sawhole = TRUE;
733 1.21 chs memset((char *)kva + (offset - startoffset), 0,
734 1.21 chs iobytes);
735 1.21 chs skipbytes += iobytes;
736 1.21 chs
737 1.21 chs if (!write) {
738 1.21 chs int holepages =
739 1.21 chs (round_page(offset + iobytes) -
740 1.21 chs trunc_page(offset)) >> PAGE_SHIFT;
741 1.21 chs for (i = 0; i < holepages; i++) {
742 1.21 chs pgs[pidx + i]->flags |= PG_RDONLY;
743 1.21 chs }
744 1.21 chs }
745 1.21 chs continue;
746 1.21 chs }
747 1.21 chs
748 1.21 chs /*
749 1.21 chs * allocate a sub-buf for this piece of the i/o
750 1.21 chs * (or just use mbp if there's only 1 piece),
751 1.21 chs * and start it going.
752 1.21 chs */
753 1.21 chs
754 1.21 chs if (offset == startoffset && iobytes == bytes) {
755 1.21 chs bp = mbp;
756 1.21 chs } else {
757 1.21 chs s = splbio();
758 1.21 chs bp = pool_get(&bufpool, PR_WAITOK);
759 1.21 chs splx(s);
760 1.21 chs bp->b_data = (char *)kva + offset - startoffset;
761 1.21 chs bp->b_resid = bp->b_bcount = iobytes;
762 1.21 chs bp->b_flags = B_BUSY|B_READ|B_CALL;
763 1.21 chs bp->b_iodone = uvm_aio_biodone1;
764 1.21 chs bp->b_vp = vp;
765 1.21 chs LIST_INIT(&bp->b_dep);
766 1.21 chs }
767 1.21 chs bp->b_lblkno = 0;
768 1.21 chs bp->b_private = mbp;
769 1.21 chs
770 1.21 chs /* adjust physical blkno for partial blocks */
771 1.25 fvdl bp->b_blkno = blkno + ((offset - ((off_t)lbn << fs_bshift)) >>
772 1.21 chs dev_bshift);
773 1.21 chs
774 1.21 chs UVMHIST_LOG(ubchist, "bp %p offset 0x%x bcount 0x%x blkno 0x%x",
775 1.21 chs bp, offset, iobytes, bp->b_blkno);
776 1.21 chs
777 1.21 chs VOP_STRATEGY(bp);
778 1.21 chs }
779 1.21 chs
780 1.21 chs loopdone:
781 1.21 chs if (skipbytes) {
782 1.21 chs s = splbio();
783 1.21 chs if (error) {
784 1.21 chs mbp->b_flags |= B_ERROR;
785 1.21 chs mbp->b_error = error;
786 1.21 chs }
787 1.21 chs mbp->b_resid -= skipbytes;
788 1.21 chs if (mbp->b_resid == 0) {
789 1.21 chs biodone(mbp);
790 1.21 chs }
791 1.21 chs splx(s);
792 1.21 chs }
793 1.21 chs
794 1.21 chs if (async) {
795 1.21 chs UVMHIST_LOG(ubchist, "returning PEND",0,0,0,0);
796 1.21 chs lockmgr(&vp->v_glock, LK_RELEASE, NULL);
797 1.21 chs return EINPROGRESS;
798 1.21 chs }
799 1.21 chs if (bp != NULL) {
800 1.21 chs error = biowait(mbp);
801 1.21 chs }
802 1.21 chs s = splbio();
803 1.21 chs pool_put(&bufpool, mbp);
804 1.21 chs splx(s);
805 1.21 chs uvm_pagermapout(kva, npages);
806 1.24 chs raoffset = startoffset + totalbytes;
807 1.21 chs
808 1.21 chs /*
809 1.21 chs * if this we encountered a hole then we have to do a little more work.
810 1.21 chs * for read faults, we marked the page PG_RDONLY so that future
811 1.21 chs * write accesses to the page will fault again.
812 1.21 chs * for write faults, we must make sure that the backing store for
813 1.21 chs * the page is completely allocated while the pages are locked.
814 1.21 chs */
815 1.21 chs
816 1.21 chs if (error == 0 && sawhole && write) {
817 1.21 chs error = VOP_BALLOCN(vp, startoffset, npages << PAGE_SHIFT,
818 1.21 chs cred, 0);
819 1.21 chs if (error) {
820 1.21 chs UVMHIST_LOG(ubchist, "balloc lbn 0x%x -> %d",
821 1.21 chs lbn, error,0,0);
822 1.21 chs lockmgr(&vp->v_glock, LK_RELEASE, NULL);
823 1.21 chs simple_lock(&uobj->vmobjlock);
824 1.21 chs goto out;
825 1.21 chs }
826 1.21 chs }
827 1.21 chs lockmgr(&vp->v_glock, LK_RELEASE, NULL);
828 1.21 chs simple_lock(&uobj->vmobjlock);
829 1.21 chs
830 1.21 chs /*
831 1.21 chs * see if we want to start any readahead.
832 1.21 chs * XXXUBC for now, just read the next 128k on 64k boundaries.
833 1.21 chs * this is pretty nonsensical, but it is 50% faster than reading
834 1.21 chs * just the next 64k.
835 1.21 chs */
836 1.21 chs
837 1.21 chs raout:
838 1.24 chs if (!error && !async && !write && ((int)raoffset & 0xffff) == 0 &&
839 1.21 chs PAGE_SHIFT <= 16) {
840 1.21 chs int racount;
841 1.21 chs
842 1.21 chs racount = 1 << (16 - PAGE_SHIFT);
843 1.21 chs (void) VOP_GETPAGES(vp, raoffset, NULL, &racount, 0,
844 1.21 chs VM_PROT_READ, 0, 0);
845 1.21 chs simple_lock(&uobj->vmobjlock);
846 1.21 chs
847 1.21 chs racount = 1 << (16 - PAGE_SHIFT);
848 1.21 chs (void) VOP_GETPAGES(vp, raoffset + 0x10000, NULL, &racount, 0,
849 1.21 chs VM_PROT_READ, 0, 0);
850 1.21 chs simple_lock(&uobj->vmobjlock);
851 1.21 chs }
852 1.21 chs
853 1.21 chs /*
854 1.21 chs * we're almost done! release the pages...
855 1.21 chs * for errors, we free the pages.
856 1.21 chs * otherwise we activate them and mark them as valid and clean.
857 1.21 chs * also, unbusy pages that were not actually requested.
858 1.21 chs */
859 1.21 chs
860 1.21 chs out:
861 1.21 chs if (error) {
862 1.21 chs uvm_lock_pageq();
863 1.21 chs for (i = 0; i < npages; i++) {
864 1.21 chs if (pgs[i] == NULL) {
865 1.21 chs continue;
866 1.21 chs }
867 1.21 chs UVMHIST_LOG(ubchist, "examining pg %p flags 0x%x",
868 1.21 chs pgs[i], pgs[i]->flags, 0,0);
869 1.26 chs if (pgs[i]->flags & PG_WANTED) {
870 1.26 chs wakeup(pgs[i]);
871 1.26 chs }
872 1.26 chs if (pgs[i]->flags & PG_RELEASED) {
873 1.26 chs uvm_unlock_pageq();
874 1.26 chs (uobj->pgops->pgo_releasepg)(pgs[i], NULL);
875 1.26 chs uvm_lock_pageq();
876 1.21 chs continue;
877 1.21 chs }
878 1.26 chs if (pgs[i]->flags & PG_FAKE) {
879 1.26 chs uvm_pagefree(pgs[i]);
880 1.29 chs continue;
881 1.21 chs }
882 1.29 chs uvm_pageactivate(pgs[i]);
883 1.29 chs pgs[i]->flags &= ~(PG_WANTED|PG_BUSY);
884 1.29 chs UVM_PAGE_OWN(pgs[i], NULL);
885 1.21 chs }
886 1.21 chs uvm_unlock_pageq();
887 1.21 chs simple_unlock(&uobj->vmobjlock);
888 1.21 chs UVMHIST_LOG(ubchist, "returning error %d", error,0,0,0);
889 1.21 chs return error;
890 1.21 chs }
891 1.21 chs
892 1.21 chs UVMHIST_LOG(ubchist, "succeeding, npages %d", npages,0,0,0);
893 1.26 chs uvm_lock_pageq();
894 1.21 chs for (i = 0; i < npages; i++) {
895 1.21 chs if (pgs[i] == NULL) {
896 1.21 chs continue;
897 1.21 chs }
898 1.21 chs UVMHIST_LOG(ubchist, "examining pg %p flags 0x%x",
899 1.21 chs pgs[i], pgs[i]->flags, 0,0);
900 1.21 chs if (pgs[i]->flags & PG_FAKE) {
901 1.21 chs UVMHIST_LOG(ubchist, "unfaking pg %p offset 0x%x",
902 1.21 chs pgs[i], pgs[i]->offset,0,0);
903 1.21 chs pgs[i]->flags &= ~(PG_FAKE);
904 1.21 chs pmap_clear_modify(pgs[i]);
905 1.21 chs pmap_clear_reference(pgs[i]);
906 1.21 chs }
907 1.21 chs if (write) {
908 1.21 chs pgs[i]->flags &= ~(PG_RDONLY);
909 1.21 chs }
910 1.21 chs if (i < ridx || i >= ridx + orignpages || async) {
911 1.21 chs UVMHIST_LOG(ubchist, "unbusy pg %p offset 0x%x",
912 1.21 chs pgs[i], pgs[i]->offset,0,0);
913 1.21 chs if (pgs[i]->flags & PG_WANTED) {
914 1.21 chs wakeup(pgs[i]);
915 1.21 chs }
916 1.26 chs if (pgs[i]->flags & PG_RELEASED) {
917 1.26 chs uvm_unlock_pageq();
918 1.26 chs (uobj->pgops->pgo_releasepg)(pgs[i], NULL);
919 1.26 chs uvm_lock_pageq();
920 1.26 chs continue;
921 1.21 chs }
922 1.26 chs uvm_pageactivate(pgs[i]);
923 1.21 chs pgs[i]->flags &= ~(PG_WANTED|PG_BUSY);
924 1.21 chs UVM_PAGE_OWN(pgs[i], NULL);
925 1.21 chs }
926 1.21 chs }
927 1.26 chs uvm_unlock_pageq();
928 1.21 chs simple_unlock(&uobj->vmobjlock);
929 1.21 chs if (ap->a_m != NULL) {
930 1.21 chs memcpy(ap->a_m, &pgs[ridx],
931 1.21 chs orignpages * sizeof(struct vm_page *));
932 1.21 chs }
933 1.21 chs return 0;
934 1.21 chs }
935 1.21 chs
936 1.21 chs /*
937 1.21 chs * generic VM putpages routine.
938 1.21 chs * Write the given range of pages to backing store.
939 1.21 chs */
940 1.21 chs
941 1.21 chs int
942 1.21 chs genfs_putpages(v)
943 1.21 chs void *v;
944 1.21 chs {
945 1.21 chs struct vop_putpages_args /* {
946 1.21 chs struct vnode *a_vp;
947 1.21 chs struct vm_page **a_m;
948 1.21 chs int a_count;
949 1.21 chs int a_flags;
950 1.21 chs int *a_rtvals;
951 1.21 chs } */ *ap = v;
952 1.21 chs
953 1.29 chs int s, error, npages, run;
954 1.21 chs int fs_bshift, dev_bshift, dev_bsize;
955 1.21 chs vaddr_t kva;
956 1.21 chs off_t eof, offset, startoffset;
957 1.21 chs size_t bytes, iobytes, skipbytes;
958 1.21 chs daddr_t lbn, blkno;
959 1.21 chs struct vm_page *pg;
960 1.21 chs struct buf *mbp, *bp;
961 1.21 chs struct vnode *vp = ap->a_vp;
962 1.21 chs boolean_t async = (ap->a_flags & PGO_SYNCIO) == 0;
963 1.21 chs UVMHIST_FUNC("genfs_putpages"); UVMHIST_CALLED(ubchist);
964 1.29 chs UVMHIST_LOG(ubchist, "vp %p offset 0x%x count %d",
965 1.29 chs vp, ap->a_m[0]->offset, ap->a_count, 0);
966 1.21 chs
967 1.21 chs simple_unlock(&vp->v_uvm.u_obj.vmobjlock);
968 1.21 chs
969 1.21 chs error = VOP_SIZE(vp, vp->v_uvm.u_size, &eof);
970 1.21 chs if (error) {
971 1.21 chs return error;
972 1.21 chs }
973 1.21 chs
974 1.29 chs error = 0;
975 1.21 chs npages = ap->a_count;
976 1.21 chs fs_bshift = vp->v_mount->mnt_fs_bshift;
977 1.21 chs dev_bshift = vp->v_mount->mnt_dev_bshift;
978 1.21 chs dev_bsize = 1 << dev_bshift;
979 1.21 chs KASSERT((eof & (dev_bsize - 1)) == 0);
980 1.21 chs
981 1.21 chs pg = ap->a_m[0];
982 1.21 chs startoffset = pg->offset;
983 1.26 chs bytes = MIN(npages << PAGE_SHIFT, eof - startoffset);
984 1.21 chs skipbytes = 0;
985 1.21 chs KASSERT(bytes != 0);
986 1.21 chs
987 1.21 chs kva = uvm_pagermapin(ap->a_m, npages, UVMPAGER_MAPIN_WAITOK);
988 1.21 chs
989 1.21 chs s = splbio();
990 1.21 chs vp->v_numoutput += 2;
991 1.21 chs mbp = pool_get(&bufpool, PR_WAITOK);
992 1.21 chs UVMHIST_LOG(ubchist, "vp %p mbp %p num now %d bytes 0x%x",
993 1.21 chs vp, mbp, vp->v_numoutput, bytes);
994 1.21 chs splx(s);
995 1.21 chs mbp->b_bufsize = npages << PAGE_SHIFT;
996 1.21 chs mbp->b_data = (void *)kva;
997 1.21 chs mbp->b_resid = mbp->b_bcount = bytes;
998 1.21 chs mbp->b_flags = B_BUSY|B_WRITE|B_AGE |
999 1.21 chs (async ? B_CALL : 0) |
1000 1.21 chs (curproc == uvm.pagedaemon_proc ? B_PDAEMON : 0);
1001 1.21 chs mbp->b_iodone = uvm_aio_biodone;
1002 1.21 chs mbp->b_vp = vp;
1003 1.21 chs LIST_INIT(&mbp->b_dep);
1004 1.21 chs
1005 1.21 chs bp = NULL;
1006 1.21 chs for (offset = startoffset;
1007 1.21 chs bytes > 0;
1008 1.21 chs offset += iobytes, bytes -= iobytes) {
1009 1.21 chs lbn = offset >> fs_bshift;
1010 1.21 chs error = VOP_BMAP(vp, lbn, NULL, &blkno, &run);
1011 1.21 chs if (error) {
1012 1.21 chs UVMHIST_LOG(ubchist, "VOP_BMAP() -> %d", error,0,0,0);
1013 1.21 chs skipbytes += bytes;
1014 1.21 chs bytes = 0;
1015 1.21 chs break;
1016 1.21 chs }
1017 1.21 chs
1018 1.26 chs iobytes = MIN((((off_t)lbn + 1 + run) << fs_bshift) - offset,
1019 1.26 chs bytes);
1020 1.21 chs if (blkno == (daddr_t)-1) {
1021 1.21 chs skipbytes += iobytes;
1022 1.21 chs continue;
1023 1.21 chs }
1024 1.21 chs
1025 1.21 chs /* if it's really one i/o, don't make a second buf */
1026 1.21 chs if (offset == startoffset && iobytes == bytes) {
1027 1.21 chs bp = mbp;
1028 1.21 chs } else {
1029 1.21 chs s = splbio();
1030 1.21 chs vp->v_numoutput++;
1031 1.21 chs bp = pool_get(&bufpool, PR_WAITOK);
1032 1.21 chs UVMHIST_LOG(ubchist, "vp %p bp %p num now %d",
1033 1.21 chs vp, bp, vp->v_numoutput, 0);
1034 1.21 chs splx(s);
1035 1.21 chs bp->b_data = (char *)kva +
1036 1.21 chs (vaddr_t)(offset - pg->offset);
1037 1.21 chs bp->b_resid = bp->b_bcount = iobytes;
1038 1.21 chs bp->b_flags = B_BUSY|B_WRITE|B_CALL|B_ASYNC;
1039 1.21 chs bp->b_iodone = uvm_aio_biodone1;
1040 1.21 chs bp->b_vp = vp;
1041 1.21 chs LIST_INIT(&bp->b_dep);
1042 1.21 chs }
1043 1.21 chs bp->b_lblkno = 0;
1044 1.21 chs bp->b_private = mbp;
1045 1.21 chs
1046 1.21 chs /* adjust physical blkno for partial blocks */
1047 1.25 fvdl bp->b_blkno = blkno + ((offset - ((off_t)lbn << fs_bshift)) >>
1048 1.21 chs dev_bshift);
1049 1.21 chs UVMHIST_LOG(ubchist, "vp %p offset 0x%x bcount 0x%x blkno 0x%x",
1050 1.21 chs vp, offset, bp->b_bcount, bp->b_blkno);
1051 1.21 chs VOP_STRATEGY(bp);
1052 1.21 chs }
1053 1.21 chs if (skipbytes) {
1054 1.29 chs UVMHIST_LOG(ubchist, "skipbytes %d", skipbytes, 0,0,0);
1055 1.21 chs s = splbio();
1056 1.21 chs mbp->b_resid -= skipbytes;
1057 1.29 chs if (error) {
1058 1.29 chs mbp->b_flags |= B_ERROR;
1059 1.29 chs mbp->b_error = error;
1060 1.29 chs }
1061 1.21 chs if (mbp->b_resid == 0) {
1062 1.21 chs biodone(mbp);
1063 1.21 chs }
1064 1.21 chs splx(s);
1065 1.21 chs }
1066 1.21 chs if (async) {
1067 1.21 chs UVMHIST_LOG(ubchist, "returning PEND", 0,0,0,0);
1068 1.21 chs return EINPROGRESS;
1069 1.21 chs }
1070 1.21 chs if (bp != NULL) {
1071 1.21 chs UVMHIST_LOG(ubchist, "waiting for mbp %p", mbp,0,0,0);
1072 1.29 chs error = biowait(mbp);
1073 1.21 chs }
1074 1.22 chs if (bioops.io_pageiodone) {
1075 1.22 chs (*bioops.io_pageiodone)(mbp);
1076 1.21 chs }
1077 1.21 chs s = splbio();
1078 1.21 chs vwakeup(mbp);
1079 1.21 chs pool_put(&bufpool, mbp);
1080 1.21 chs splx(s);
1081 1.21 chs uvm_pagermapout(kva, npages);
1082 1.21 chs UVMHIST_LOG(ubchist, "returning, error %d", error,0,0,0);
1083 1.29 chs return error;
1084 1.21 chs }
1085 1.21 chs
1086 1.21 chs int
1087 1.21 chs genfs_size(v)
1088 1.21 chs void *v;
1089 1.21 chs {
1090 1.21 chs struct vop_size_args /* {
1091 1.21 chs struct vnode *a_vp;
1092 1.21 chs off_t a_size;
1093 1.21 chs off_t *a_eobp;
1094 1.21 chs } */ *ap = v;
1095 1.21 chs int bsize;
1096 1.21 chs
1097 1.21 chs bsize = 1 << ap->a_vp->v_mount->mnt_fs_bshift;
1098 1.24 chs *ap->a_eobp = (ap->a_size + bsize - 1) & ~(bsize - 1);
1099 1.21 chs return 0;
1100 1.1 mycroft }
1101