genfs_vnops.c revision 1.49 1 1.49 chs /* $NetBSD: genfs_vnops.c,v 1.49 2002/02/19 15:49:39 chs Exp $ */
2 1.6 fvdl
3 1.6 fvdl /*
4 1.6 fvdl * Copyright (c) 1982, 1986, 1989, 1993
5 1.6 fvdl * The Regents of the University of California. All rights reserved.
6 1.6 fvdl *
7 1.6 fvdl * Redistribution and use in source and binary forms, with or without
8 1.6 fvdl * modification, are permitted provided that the following conditions
9 1.6 fvdl * are met:
10 1.6 fvdl * 1. Redistributions of source code must retain the above copyright
11 1.6 fvdl * notice, this list of conditions and the following disclaimer.
12 1.6 fvdl * 2. Redistributions in binary form must reproduce the above copyright
13 1.6 fvdl * notice, this list of conditions and the following disclaimer in the
14 1.6 fvdl * documentation and/or other materials provided with the distribution.
15 1.6 fvdl * 3. All advertising materials mentioning features or use of this software
16 1.6 fvdl * must display the following acknowledgement:
17 1.6 fvdl * This product includes software developed by the University of
18 1.6 fvdl * California, Berkeley and its contributors.
19 1.6 fvdl * 4. Neither the name of the University nor the names of its contributors
20 1.6 fvdl * may be used to endorse or promote products derived from this software
21 1.6 fvdl * without specific prior written permission.
22 1.6 fvdl *
23 1.6 fvdl * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND
24 1.6 fvdl * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
25 1.6 fvdl * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
26 1.6 fvdl * ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE
27 1.6 fvdl * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
28 1.6 fvdl * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
29 1.6 fvdl * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
30 1.6 fvdl * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
31 1.6 fvdl * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
32 1.6 fvdl * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
33 1.6 fvdl * SUCH DAMAGE.
34 1.6 fvdl *
35 1.6 fvdl */
36 1.40 lukem
37 1.40 lukem #include <sys/cdefs.h>
38 1.49 chs __KERNEL_RCSID(0, "$NetBSD: genfs_vnops.c,v 1.49 2002/02/19 15:49:39 chs Exp $");
39 1.5 perry
40 1.8 thorpej #include "opt_nfsserver.h"
41 1.8 thorpej
42 1.1 mycroft #include <sys/param.h>
43 1.1 mycroft #include <sys/systm.h>
44 1.6 fvdl #include <sys/proc.h>
45 1.1 mycroft #include <sys/kernel.h>
46 1.1 mycroft #include <sys/mount.h>
47 1.1 mycroft #include <sys/namei.h>
48 1.1 mycroft #include <sys/vnode.h>
49 1.13 wrstuden #include <sys/fcntl.h>
50 1.1 mycroft #include <sys/malloc.h>
51 1.3 mycroft #include <sys/poll.h>
52 1.37 chs #include <sys/mman.h>
53 1.1 mycroft
54 1.1 mycroft #include <miscfs/genfs/genfs.h>
55 1.37 chs #include <miscfs/genfs/genfs_node.h>
56 1.6 fvdl #include <miscfs/specfs/specdev.h>
57 1.1 mycroft
58 1.21 chs #include <uvm/uvm.h>
59 1.21 chs #include <uvm/uvm_pager.h>
60 1.21 chs
61 1.8 thorpej #ifdef NFSSERVER
62 1.8 thorpej #include <nfs/rpcv2.h>
63 1.8 thorpej #include <nfs/nfsproto.h>
64 1.8 thorpej #include <nfs/nfs.h>
65 1.8 thorpej #include <nfs/nqnfs.h>
66 1.8 thorpej #include <nfs/nfs_var.h>
67 1.8 thorpej #endif
68 1.8 thorpej
69 1.41 christos #define MAX_READ_AHEAD 16 /* XXXUBC 16 */
70 1.41 christos
71 1.1 mycroft int
72 1.3 mycroft genfs_poll(v)
73 1.1 mycroft void *v;
74 1.1 mycroft {
75 1.3 mycroft struct vop_poll_args /* {
76 1.1 mycroft struct vnode *a_vp;
77 1.3 mycroft int a_events;
78 1.1 mycroft struct proc *a_p;
79 1.1 mycroft } */ *ap = v;
80 1.1 mycroft
81 1.3 mycroft return (ap->a_events & (POLLIN | POLLOUT | POLLRDNORM | POLLWRNORM));
82 1.1 mycroft }
83 1.1 mycroft
84 1.1 mycroft int
85 1.1 mycroft genfs_fsync(v)
86 1.1 mycroft void *v;
87 1.1 mycroft {
88 1.1 mycroft struct vop_fsync_args /* {
89 1.1 mycroft struct vnode *a_vp;
90 1.1 mycroft struct ucred *a_cred;
91 1.7 kleink int a_flags;
92 1.20 fvdl off_t offlo;
93 1.20 fvdl off_t offhi;
94 1.1 mycroft struct proc *a_p;
95 1.1 mycroft } */ *ap = v;
96 1.16 augustss struct vnode *vp = ap->a_vp;
97 1.11 mycroft int wait;
98 1.1 mycroft
99 1.11 mycroft wait = (ap->a_flags & FSYNC_WAIT) != 0;
100 1.11 mycroft vflushbuf(vp, wait);
101 1.11 mycroft if ((ap->a_flags & FSYNC_DATAONLY) != 0)
102 1.7 kleink return (0);
103 1.11 mycroft else
104 1.18 mycroft return (VOP_UPDATE(vp, NULL, NULL, wait ? UPDATE_WAIT : 0));
105 1.1 mycroft }
106 1.1 mycroft
107 1.1 mycroft int
108 1.4 kleink genfs_seek(v)
109 1.4 kleink void *v;
110 1.4 kleink {
111 1.4 kleink struct vop_seek_args /* {
112 1.4 kleink struct vnode *a_vp;
113 1.4 kleink off_t a_oldoff;
114 1.4 kleink off_t a_newoff;
115 1.4 kleink struct ucred *a_ucred;
116 1.4 kleink } */ *ap = v;
117 1.4 kleink
118 1.4 kleink if (ap->a_newoff < 0)
119 1.4 kleink return (EINVAL);
120 1.4 kleink
121 1.4 kleink return (0);
122 1.4 kleink }
123 1.4 kleink
124 1.4 kleink int
125 1.1 mycroft genfs_abortop(v)
126 1.1 mycroft void *v;
127 1.1 mycroft {
128 1.1 mycroft struct vop_abortop_args /* {
129 1.1 mycroft struct vnode *a_dvp;
130 1.1 mycroft struct componentname *a_cnp;
131 1.1 mycroft } */ *ap = v;
132 1.1 mycroft
133 1.1 mycroft if ((ap->a_cnp->cn_flags & (HASBUF | SAVESTART)) == HASBUF)
134 1.19 thorpej PNBUF_PUT(ap->a_cnp->cn_pnbuf);
135 1.1 mycroft return (0);
136 1.13 wrstuden }
137 1.13 wrstuden
138 1.13 wrstuden int
139 1.13 wrstuden genfs_fcntl(v)
140 1.13 wrstuden void *v;
141 1.13 wrstuden {
142 1.13 wrstuden struct vop_fcntl_args /* {
143 1.13 wrstuden struct vnode *a_vp;
144 1.13 wrstuden u_int a_command;
145 1.13 wrstuden caddr_t a_data;
146 1.13 wrstuden int a_fflag;
147 1.13 wrstuden struct ucred *a_cred;
148 1.13 wrstuden struct proc *a_p;
149 1.13 wrstuden } */ *ap = v;
150 1.13 wrstuden
151 1.13 wrstuden if (ap->a_command == F_SETFL)
152 1.13 wrstuden return (0);
153 1.13 wrstuden else
154 1.13 wrstuden return (EOPNOTSUPP);
155 1.1 mycroft }
156 1.1 mycroft
157 1.1 mycroft /*ARGSUSED*/
158 1.1 mycroft int
159 1.1 mycroft genfs_badop(v)
160 1.1 mycroft void *v;
161 1.1 mycroft {
162 1.1 mycroft
163 1.1 mycroft panic("genfs: bad op");
164 1.1 mycroft }
165 1.1 mycroft
166 1.1 mycroft /*ARGSUSED*/
167 1.1 mycroft int
168 1.1 mycroft genfs_nullop(v)
169 1.1 mycroft void *v;
170 1.1 mycroft {
171 1.1 mycroft
172 1.1 mycroft return (0);
173 1.10 kleink }
174 1.10 kleink
175 1.10 kleink /*ARGSUSED*/
176 1.10 kleink int
177 1.10 kleink genfs_einval(v)
178 1.10 kleink void *v;
179 1.10 kleink {
180 1.10 kleink
181 1.10 kleink return (EINVAL);
182 1.1 mycroft }
183 1.1 mycroft
184 1.1 mycroft /*ARGSUSED*/
185 1.1 mycroft int
186 1.1 mycroft genfs_eopnotsupp(v)
187 1.1 mycroft void *v;
188 1.1 mycroft {
189 1.1 mycroft
190 1.1 mycroft return (EOPNOTSUPP);
191 1.1 mycroft }
192 1.1 mycroft
193 1.12 wrstuden /*
194 1.12 wrstuden * Called when an fs doesn't support a particular vop but the vop needs to
195 1.12 wrstuden * vrele, vput, or vunlock passed in vnodes.
196 1.12 wrstuden */
197 1.12 wrstuden int
198 1.12 wrstuden genfs_eopnotsupp_rele(v)
199 1.12 wrstuden void *v;
200 1.12 wrstuden {
201 1.12 wrstuden struct vop_generic_args /*
202 1.12 wrstuden struct vnodeop_desc *a_desc;
203 1.12 wrstuden / * other random data follows, presumably * /
204 1.12 wrstuden } */ *ap = v;
205 1.12 wrstuden struct vnodeop_desc *desc = ap->a_desc;
206 1.12 wrstuden struct vnode *vp;
207 1.12 wrstuden int flags, i, j, offset;
208 1.12 wrstuden
209 1.12 wrstuden flags = desc->vdesc_flags;
210 1.12 wrstuden for (i = 0; i < VDESC_MAX_VPS; flags >>=1, i++) {
211 1.12 wrstuden if ((offset = desc->vdesc_vp_offsets[i]) == VDESC_NO_OFFSET)
212 1.12 wrstuden break; /* stop at end of list */
213 1.12 wrstuden if ((j = flags & VDESC_VP0_WILLPUT)) {
214 1.12 wrstuden vp = *VOPARG_OFFSETTO(struct vnode**,offset,ap);
215 1.12 wrstuden switch (j) {
216 1.12 wrstuden case VDESC_VP0_WILLPUT:
217 1.12 wrstuden vput(vp);
218 1.12 wrstuden break;
219 1.12 wrstuden case VDESC_VP0_WILLUNLOCK:
220 1.12 wrstuden VOP_UNLOCK(vp, 0);
221 1.12 wrstuden break;
222 1.12 wrstuden case VDESC_VP0_WILLRELE:
223 1.12 wrstuden vrele(vp);
224 1.12 wrstuden break;
225 1.12 wrstuden }
226 1.12 wrstuden }
227 1.12 wrstuden }
228 1.12 wrstuden
229 1.12 wrstuden return (EOPNOTSUPP);
230 1.12 wrstuden }
231 1.12 wrstuden
232 1.1 mycroft /*ARGSUSED*/
233 1.1 mycroft int
234 1.1 mycroft genfs_ebadf(v)
235 1.1 mycroft void *v;
236 1.1 mycroft {
237 1.1 mycroft
238 1.1 mycroft return (EBADF);
239 1.9 matthias }
240 1.9 matthias
241 1.9 matthias /* ARGSUSED */
242 1.9 matthias int
243 1.9 matthias genfs_enoioctl(v)
244 1.9 matthias void *v;
245 1.9 matthias {
246 1.9 matthias
247 1.9 matthias return (ENOTTY);
248 1.6 fvdl }
249 1.6 fvdl
250 1.6 fvdl
251 1.6 fvdl /*
252 1.15 fvdl * Eliminate all activity associated with the requested vnode
253 1.6 fvdl * and with all vnodes aliased to the requested vnode.
254 1.6 fvdl */
255 1.6 fvdl int
256 1.6 fvdl genfs_revoke(v)
257 1.6 fvdl void *v;
258 1.6 fvdl {
259 1.6 fvdl struct vop_revoke_args /* {
260 1.6 fvdl struct vnode *a_vp;
261 1.6 fvdl int a_flags;
262 1.6 fvdl } */ *ap = v;
263 1.6 fvdl struct vnode *vp, *vq;
264 1.6 fvdl struct proc *p = curproc; /* XXX */
265 1.6 fvdl
266 1.6 fvdl #ifdef DIAGNOSTIC
267 1.6 fvdl if ((ap->a_flags & REVOKEALL) == 0)
268 1.6 fvdl panic("genfs_revoke: not revokeall");
269 1.6 fvdl #endif
270 1.6 fvdl
271 1.6 fvdl vp = ap->a_vp;
272 1.6 fvdl simple_lock(&vp->v_interlock);
273 1.6 fvdl
274 1.6 fvdl if (vp->v_flag & VALIASED) {
275 1.6 fvdl /*
276 1.6 fvdl * If a vgone (or vclean) is already in progress,
277 1.6 fvdl * wait until it is done and return.
278 1.6 fvdl */
279 1.6 fvdl if (vp->v_flag & VXLOCK) {
280 1.6 fvdl vp->v_flag |= VXWANT;
281 1.6 fvdl simple_unlock(&vp->v_interlock);
282 1.6 fvdl tsleep((caddr_t)vp, PINOD, "vop_revokeall", 0);
283 1.6 fvdl return (0);
284 1.6 fvdl }
285 1.6 fvdl /*
286 1.6 fvdl * Ensure that vp will not be vgone'd while we
287 1.6 fvdl * are eliminating its aliases.
288 1.6 fvdl */
289 1.6 fvdl vp->v_flag |= VXLOCK;
290 1.6 fvdl simple_unlock(&vp->v_interlock);
291 1.6 fvdl while (vp->v_flag & VALIASED) {
292 1.6 fvdl simple_lock(&spechash_slock);
293 1.6 fvdl for (vq = *vp->v_hashchain; vq; vq = vq->v_specnext) {
294 1.6 fvdl if (vq->v_rdev != vp->v_rdev ||
295 1.6 fvdl vq->v_type != vp->v_type || vp == vq)
296 1.6 fvdl continue;
297 1.6 fvdl simple_unlock(&spechash_slock);
298 1.6 fvdl vgone(vq);
299 1.6 fvdl break;
300 1.6 fvdl }
301 1.6 fvdl if (vq == NULLVP)
302 1.6 fvdl simple_unlock(&spechash_slock);
303 1.6 fvdl }
304 1.6 fvdl /*
305 1.6 fvdl * Remove the lock so that vgone below will
306 1.6 fvdl * really eliminate the vnode after which time
307 1.6 fvdl * vgone will awaken any sleepers.
308 1.6 fvdl */
309 1.6 fvdl simple_lock(&vp->v_interlock);
310 1.6 fvdl vp->v_flag &= ~VXLOCK;
311 1.6 fvdl }
312 1.6 fvdl vgonel(vp, p);
313 1.6 fvdl return (0);
314 1.6 fvdl }
315 1.6 fvdl
316 1.6 fvdl /*
317 1.12 wrstuden * Lock the node.
318 1.6 fvdl */
319 1.6 fvdl int
320 1.12 wrstuden genfs_lock(v)
321 1.6 fvdl void *v;
322 1.6 fvdl {
323 1.6 fvdl struct vop_lock_args /* {
324 1.6 fvdl struct vnode *a_vp;
325 1.6 fvdl int a_flags;
326 1.6 fvdl } */ *ap = v;
327 1.6 fvdl struct vnode *vp = ap->a_vp;
328 1.6 fvdl
329 1.12 wrstuden return (lockmgr(&vp->v_lock, ap->a_flags, &vp->v_interlock));
330 1.6 fvdl }
331 1.6 fvdl
332 1.6 fvdl /*
333 1.12 wrstuden * Unlock the node.
334 1.6 fvdl */
335 1.6 fvdl int
336 1.12 wrstuden genfs_unlock(v)
337 1.6 fvdl void *v;
338 1.6 fvdl {
339 1.6 fvdl struct vop_unlock_args /* {
340 1.6 fvdl struct vnode *a_vp;
341 1.6 fvdl int a_flags;
342 1.6 fvdl } */ *ap = v;
343 1.6 fvdl struct vnode *vp = ap->a_vp;
344 1.6 fvdl
345 1.12 wrstuden return (lockmgr(&vp->v_lock, ap->a_flags | LK_RELEASE,
346 1.12 wrstuden &vp->v_interlock));
347 1.6 fvdl }
348 1.6 fvdl
349 1.6 fvdl /*
350 1.12 wrstuden * Return whether or not the node is locked.
351 1.6 fvdl */
352 1.6 fvdl int
353 1.12 wrstuden genfs_islocked(v)
354 1.6 fvdl void *v;
355 1.6 fvdl {
356 1.6 fvdl struct vop_islocked_args /* {
357 1.6 fvdl struct vnode *a_vp;
358 1.6 fvdl } */ *ap = v;
359 1.6 fvdl struct vnode *vp = ap->a_vp;
360 1.6 fvdl
361 1.12 wrstuden return (lockstatus(&vp->v_lock));
362 1.12 wrstuden }
363 1.12 wrstuden
364 1.12 wrstuden /*
365 1.12 wrstuden * Stubs to use when there is no locking to be done on the underlying object.
366 1.12 wrstuden */
367 1.12 wrstuden int
368 1.12 wrstuden genfs_nolock(v)
369 1.12 wrstuden void *v;
370 1.12 wrstuden {
371 1.12 wrstuden struct vop_lock_args /* {
372 1.12 wrstuden struct vnode *a_vp;
373 1.12 wrstuden int a_flags;
374 1.12 wrstuden struct proc *a_p;
375 1.12 wrstuden } */ *ap = v;
376 1.12 wrstuden
377 1.12 wrstuden /*
378 1.12 wrstuden * Since we are not using the lock manager, we must clear
379 1.12 wrstuden * the interlock here.
380 1.12 wrstuden */
381 1.12 wrstuden if (ap->a_flags & LK_INTERLOCK)
382 1.12 wrstuden simple_unlock(&ap->a_vp->v_interlock);
383 1.12 wrstuden return (0);
384 1.12 wrstuden }
385 1.12 wrstuden
386 1.12 wrstuden int
387 1.12 wrstuden genfs_nounlock(v)
388 1.12 wrstuden void *v;
389 1.12 wrstuden {
390 1.12 wrstuden return (0);
391 1.12 wrstuden }
392 1.12 wrstuden
393 1.12 wrstuden int
394 1.12 wrstuden genfs_noislocked(v)
395 1.12 wrstuden void *v;
396 1.12 wrstuden {
397 1.12 wrstuden return (0);
398 1.8 thorpej }
399 1.8 thorpej
400 1.8 thorpej /*
401 1.8 thorpej * Local lease check for NFS servers. Just set up args and let
402 1.8 thorpej * nqsrv_getlease() do the rest. If NFSSERVER is not in the kernel,
403 1.8 thorpej * this is a null operation.
404 1.8 thorpej */
405 1.8 thorpej int
406 1.8 thorpej genfs_lease_check(v)
407 1.8 thorpej void *v;
408 1.8 thorpej {
409 1.8 thorpej #ifdef NFSSERVER
410 1.8 thorpej struct vop_lease_args /* {
411 1.8 thorpej struct vnode *a_vp;
412 1.8 thorpej struct proc *a_p;
413 1.8 thorpej struct ucred *a_cred;
414 1.8 thorpej int a_flag;
415 1.8 thorpej } */ *ap = v;
416 1.8 thorpej u_int32_t duration = 0;
417 1.8 thorpej int cache;
418 1.8 thorpej u_quad_t frev;
419 1.8 thorpej
420 1.8 thorpej (void) nqsrv_getlease(ap->a_vp, &duration, ND_CHECK | ap->a_flag,
421 1.8 thorpej NQLOCALSLP, ap->a_p, (struct mbuf *)0, &cache, &frev, ap->a_cred);
422 1.8 thorpej return (0);
423 1.8 thorpej #else
424 1.8 thorpej return (0);
425 1.8 thorpej #endif /* NFSSERVER */
426 1.34 chs }
427 1.34 chs
428 1.34 chs int
429 1.34 chs genfs_mmap(v)
430 1.34 chs void *v;
431 1.34 chs {
432 1.34 chs return 0;
433 1.21 chs }
434 1.21 chs
435 1.21 chs /*
436 1.21 chs * generic VM getpages routine.
437 1.21 chs * Return PG_BUSY pages for the given range,
438 1.21 chs * reading from backing store if necessary.
439 1.21 chs */
440 1.21 chs
441 1.21 chs int
442 1.21 chs genfs_getpages(v)
443 1.21 chs void *v;
444 1.21 chs {
445 1.21 chs struct vop_getpages_args /* {
446 1.21 chs struct vnode *a_vp;
447 1.21 chs voff_t a_offset;
448 1.33 chs struct vm_page **a_m;
449 1.21 chs int *a_count;
450 1.21 chs int a_centeridx;
451 1.21 chs vm_prot_t a_access_type;
452 1.21 chs int a_advice;
453 1.21 chs int a_flags;
454 1.21 chs } */ *ap = v;
455 1.21 chs
456 1.30 chs off_t newsize, diskeof, memeof;
457 1.26 chs off_t offset, origoffset, startoffset, endoffset, raoffset;
458 1.21 chs daddr_t lbn, blkno;
459 1.21 chs int s, i, error, npages, orignpages, npgs, run, ridx, pidx, pcount;
460 1.37 chs int fs_bshift, fs_bsize, dev_bshift;
461 1.21 chs int flags = ap->a_flags;
462 1.21 chs size_t bytes, iobytes, tailbytes, totalbytes, skipbytes;
463 1.21 chs vaddr_t kva;
464 1.21 chs struct buf *bp, *mbp;
465 1.21 chs struct vnode *vp = ap->a_vp;
466 1.36 chs struct vnode *devvp;
467 1.37 chs struct genfs_node *gp = VTOG(vp);
468 1.37 chs struct uvm_object *uobj = &vp->v_uobj;
469 1.41 christos struct vm_page *pg, *pgs[MAX_READ_AHEAD];
470 1.21 chs struct ucred *cred = curproc->p_ucred; /* XXXUBC curproc */
471 1.21 chs boolean_t async = (flags & PGO_SYNCIO) == 0;
472 1.21 chs boolean_t write = (ap->a_access_type & VM_PROT_WRITE) != 0;
473 1.21 chs boolean_t sawhole = FALSE;
474 1.37 chs boolean_t overwrite = (flags & PGO_OVERWRITE) != 0;
475 1.21 chs UVMHIST_FUNC("genfs_getpages"); UVMHIST_CALLED(ubchist);
476 1.21 chs
477 1.30 chs UVMHIST_LOG(ubchist, "vp %p off 0x%x/%x count %d",
478 1.30 chs vp, ap->a_offset >> 32, ap->a_offset, *ap->a_count);
479 1.30 chs
480 1.21 chs /* XXXUBC temp limit */
481 1.41 christos if (*ap->a_count > MAX_READ_AHEAD) {
482 1.37 chs panic("genfs_getpages: too many pages");
483 1.21 chs }
484 1.21 chs
485 1.26 chs error = 0;
486 1.26 chs origoffset = ap->a_offset;
487 1.26 chs orignpages = *ap->a_count;
488 1.37 chs GOP_SIZE(vp, vp->v_size, &diskeof);
489 1.26 chs if (flags & PGO_PASTEOF) {
490 1.37 chs newsize = MAX(vp->v_size,
491 1.26 chs origoffset + (orignpages << PAGE_SHIFT));
492 1.37 chs GOP_SIZE(vp, newsize, &memeof);
493 1.26 chs } else {
494 1.30 chs memeof = diskeof;
495 1.21 chs }
496 1.30 chs KASSERT(ap->a_centeridx >= 0 || ap->a_centeridx <= orignpages);
497 1.30 chs KASSERT((origoffset & (PAGE_SIZE - 1)) == 0 && origoffset >= 0);
498 1.30 chs KASSERT(orignpages > 0);
499 1.21 chs
500 1.21 chs /*
501 1.21 chs * Bounds-check the request.
502 1.21 chs */
503 1.21 chs
504 1.30 chs if (origoffset + (ap->a_centeridx << PAGE_SHIFT) >= memeof) {
505 1.21 chs if ((flags & PGO_LOCKED) == 0) {
506 1.21 chs simple_unlock(&uobj->vmobjlock);
507 1.21 chs }
508 1.21 chs UVMHIST_LOG(ubchist, "off 0x%x count %d goes past EOF 0x%x",
509 1.30 chs origoffset, *ap->a_count, memeof,0);
510 1.21 chs return EINVAL;
511 1.21 chs }
512 1.21 chs
513 1.21 chs /*
514 1.21 chs * For PGO_LOCKED requests, just return whatever's in memory.
515 1.21 chs */
516 1.21 chs
517 1.21 chs if (flags & PGO_LOCKED) {
518 1.21 chs uvn_findpages(uobj, origoffset, ap->a_count, ap->a_m,
519 1.21 chs UFP_NOWAIT|UFP_NOALLOC|UFP_NORDONLY);
520 1.21 chs
521 1.21 chs return ap->a_m[ap->a_centeridx] == NULL ? EBUSY : 0;
522 1.21 chs }
523 1.21 chs
524 1.21 chs /* vnode is VOP_LOCKed, uobj is locked */
525 1.21 chs
526 1.21 chs if (write && (vp->v_flag & VONWORKLST) == 0) {
527 1.21 chs vn_syncer_add_to_worklist(vp, filedelay);
528 1.21 chs }
529 1.21 chs
530 1.21 chs /*
531 1.21 chs * find the requested pages and make some simple checks.
532 1.21 chs * leave space in the page array for a whole block.
533 1.21 chs */
534 1.21 chs
535 1.36 chs if (vp->v_type == VREG) {
536 1.36 chs fs_bshift = vp->v_mount->mnt_fs_bshift;
537 1.36 chs dev_bshift = vp->v_mount->mnt_dev_bshift;
538 1.36 chs } else {
539 1.36 chs fs_bshift = DEV_BSHIFT;
540 1.36 chs dev_bshift = DEV_BSHIFT;
541 1.36 chs }
542 1.21 chs fs_bsize = 1 << fs_bshift;
543 1.21 chs
544 1.30 chs orignpages = MIN(orignpages,
545 1.30 chs round_page(memeof - origoffset) >> PAGE_SHIFT);
546 1.21 chs npages = orignpages;
547 1.21 chs startoffset = origoffset & ~(fs_bsize - 1);
548 1.21 chs endoffset = round_page((origoffset + (npages << PAGE_SHIFT)
549 1.21 chs + fs_bsize - 1) & ~(fs_bsize - 1));
550 1.30 chs endoffset = MIN(endoffset, round_page(memeof));
551 1.21 chs ridx = (origoffset - startoffset) >> PAGE_SHIFT;
552 1.21 chs
553 1.21 chs memset(pgs, 0, sizeof(pgs));
554 1.21 chs uvn_findpages(uobj, origoffset, &npages, &pgs[ridx], UFP_ALL);
555 1.21 chs
556 1.21 chs /*
557 1.21 chs * if the pages are already resident, just return them.
558 1.21 chs */
559 1.21 chs
560 1.21 chs for (i = 0; i < npages; i++) {
561 1.21 chs struct vm_page *pg = pgs[ridx + i];
562 1.21 chs
563 1.21 chs if ((pg->flags & PG_FAKE) ||
564 1.21 chs (write && (pg->flags & PG_RDONLY))) {
565 1.21 chs break;
566 1.21 chs }
567 1.21 chs }
568 1.21 chs if (i == npages) {
569 1.21 chs UVMHIST_LOG(ubchist, "returning cached pages", 0,0,0,0);
570 1.21 chs raoffset = origoffset + (orignpages << PAGE_SHIFT);
571 1.26 chs npages += ridx;
572 1.21 chs goto raout;
573 1.21 chs }
574 1.21 chs
575 1.21 chs /*
576 1.37 chs * if PGO_OVERWRITE is set, don't bother reading the pages.
577 1.37 chs */
578 1.37 chs
579 1.37 chs if (flags & PGO_OVERWRITE) {
580 1.37 chs UVMHIST_LOG(ubchist, "PGO_OVERWRITE",0,0,0,0);
581 1.37 chs
582 1.37 chs for (i = 0; i < npages; i++) {
583 1.37 chs struct vm_page *pg = pgs[ridx + i];
584 1.37 chs
585 1.37 chs pg->flags &= ~(PG_RDONLY|PG_CLEAN);
586 1.37 chs }
587 1.37 chs npages += ridx;
588 1.37 chs goto out;
589 1.37 chs }
590 1.37 chs
591 1.37 chs /*
592 1.21 chs * the page wasn't resident and we're not overwriting,
593 1.21 chs * so we're going to have to do some i/o.
594 1.21 chs * find any additional pages needed to cover the expanded range.
595 1.21 chs */
596 1.21 chs
597 1.35 chs npages = (endoffset - startoffset) >> PAGE_SHIFT;
598 1.35 chs if (startoffset != origoffset || npages != orignpages) {
599 1.21 chs
600 1.21 chs /*
601 1.37 chs * we need to avoid deadlocks caused by locking
602 1.21 chs * additional pages at lower offsets than pages we
603 1.37 chs * already have locked. unlock them all and start over.
604 1.21 chs */
605 1.21 chs
606 1.35 chs for (i = 0; i < orignpages; i++) {
607 1.21 chs struct vm_page *pg = pgs[ridx + i];
608 1.21 chs
609 1.21 chs if (pg->flags & PG_FAKE) {
610 1.21 chs pg->flags |= PG_RELEASED;
611 1.21 chs }
612 1.21 chs }
613 1.35 chs uvm_page_unbusy(&pgs[ridx], orignpages);
614 1.21 chs memset(pgs, 0, sizeof(pgs));
615 1.21 chs
616 1.21 chs UVMHIST_LOG(ubchist, "reset npages start 0x%x end 0x%x",
617 1.21 chs startoffset, endoffset, 0,0);
618 1.21 chs npgs = npages;
619 1.21 chs uvn_findpages(uobj, startoffset, &npgs, pgs, UFP_ALL);
620 1.21 chs }
621 1.21 chs simple_unlock(&uobj->vmobjlock);
622 1.21 chs
623 1.21 chs /*
624 1.21 chs * read the desired page(s).
625 1.21 chs */
626 1.21 chs
627 1.21 chs totalbytes = npages << PAGE_SHIFT;
628 1.30 chs bytes = MIN(totalbytes, MAX(diskeof - startoffset, 0));
629 1.21 chs tailbytes = totalbytes - bytes;
630 1.21 chs skipbytes = 0;
631 1.21 chs
632 1.21 chs kva = uvm_pagermapin(pgs, npages, UVMPAGER_MAPIN_WAITOK |
633 1.21 chs UVMPAGER_MAPIN_READ);
634 1.21 chs
635 1.21 chs s = splbio();
636 1.21 chs mbp = pool_get(&bufpool, PR_WAITOK);
637 1.21 chs splx(s);
638 1.21 chs mbp->b_bufsize = totalbytes;
639 1.21 chs mbp->b_data = (void *)kva;
640 1.21 chs mbp->b_resid = mbp->b_bcount = bytes;
641 1.21 chs mbp->b_flags = B_BUSY|B_READ| (async ? B_CALL : 0);
642 1.37 chs mbp->b_iodone = (async ? uvm_aio_biodone : 0);
643 1.21 chs mbp->b_vp = vp;
644 1.21 chs LIST_INIT(&mbp->b_dep);
645 1.21 chs
646 1.21 chs /*
647 1.31 chs * if EOF is in the middle of the range, zero the part past EOF.
648 1.38 chs * if the page including EOF is not PG_FAKE, skip over it since
649 1.38 chs * in that case it has valid data that we need to preserve.
650 1.21 chs */
651 1.21 chs
652 1.31 chs if (tailbytes > 0) {
653 1.38 chs size_t tailstart = bytes;
654 1.38 chs
655 1.38 chs if ((pgs[bytes >> PAGE_SHIFT]->flags & PG_FAKE) == 0) {
656 1.38 chs tailstart = round_page(tailstart);
657 1.38 chs tailbytes -= tailstart - bytes;
658 1.38 chs }
659 1.37 chs UVMHIST_LOG(ubchist, "tailbytes %p 0x%x 0x%x",
660 1.38 chs kva, tailstart, tailbytes,0);
661 1.38 chs memset((void *)(kva + tailstart), 0, tailbytes);
662 1.21 chs }
663 1.21 chs
664 1.21 chs /*
665 1.21 chs * now loop over the pages, reading as needed.
666 1.21 chs */
667 1.21 chs
668 1.21 chs if (write) {
669 1.37 chs lockmgr(&gp->g_glock, LK_EXCLUSIVE, NULL);
670 1.21 chs } else {
671 1.37 chs lockmgr(&gp->g_glock, LK_SHARED, NULL);
672 1.21 chs }
673 1.21 chs
674 1.21 chs bp = NULL;
675 1.21 chs for (offset = startoffset;
676 1.21 chs bytes > 0;
677 1.21 chs offset += iobytes, bytes -= iobytes) {
678 1.21 chs
679 1.21 chs /*
680 1.21 chs * skip pages which don't need to be read.
681 1.21 chs */
682 1.21 chs
683 1.21 chs pidx = (offset - startoffset) >> PAGE_SHIFT;
684 1.35 chs while ((pgs[pidx]->flags & (PG_FAKE|PG_RDONLY)) == 0) {
685 1.21 chs size_t b;
686 1.21 chs
687 1.24 chs KASSERT((offset & (PAGE_SIZE - 1)) == 0);
688 1.26 chs b = MIN(PAGE_SIZE, bytes);
689 1.21 chs offset += b;
690 1.21 chs bytes -= b;
691 1.21 chs skipbytes += b;
692 1.21 chs pidx++;
693 1.21 chs UVMHIST_LOG(ubchist, "skipping, new offset 0x%x",
694 1.21 chs offset, 0,0,0);
695 1.21 chs if (bytes == 0) {
696 1.21 chs goto loopdone;
697 1.21 chs }
698 1.21 chs }
699 1.21 chs
700 1.21 chs /*
701 1.21 chs * bmap the file to find out the blkno to read from and
702 1.21 chs * how much we can read in one i/o. if bmap returns an error,
703 1.21 chs * skip the rest of the top-level i/o.
704 1.21 chs */
705 1.21 chs
706 1.21 chs lbn = offset >> fs_bshift;
707 1.36 chs error = VOP_BMAP(vp, lbn, &devvp, &blkno, &run);
708 1.21 chs if (error) {
709 1.21 chs UVMHIST_LOG(ubchist, "VOP_BMAP lbn 0x%x -> %d\n",
710 1.21 chs lbn, error,0,0);
711 1.21 chs skipbytes += bytes;
712 1.21 chs goto loopdone;
713 1.21 chs }
714 1.21 chs
715 1.21 chs /*
716 1.21 chs * see how many pages can be read with this i/o.
717 1.21 chs * reduce the i/o size if necessary to avoid
718 1.21 chs * overwriting pages with valid data.
719 1.21 chs */
720 1.21 chs
721 1.26 chs iobytes = MIN((((off_t)lbn + 1 + run) << fs_bshift) - offset,
722 1.26 chs bytes);
723 1.21 chs if (offset + iobytes > round_page(offset)) {
724 1.21 chs pcount = 1;
725 1.21 chs while (pidx + pcount < npages &&
726 1.21 chs pgs[pidx + pcount]->flags & PG_FAKE) {
727 1.21 chs pcount++;
728 1.21 chs }
729 1.26 chs iobytes = MIN(iobytes, (pcount << PAGE_SHIFT) -
730 1.21 chs (offset - trunc_page(offset)));
731 1.21 chs }
732 1.21 chs
733 1.21 chs /*
734 1.21 chs * if this block isn't allocated, zero it instead of reading it.
735 1.21 chs * if this is a read access, mark the pages we zeroed PG_RDONLY.
736 1.21 chs */
737 1.21 chs
738 1.21 chs if (blkno < 0) {
739 1.35 chs int holepages = (round_page(offset + iobytes) -
740 1.35 chs trunc_page(offset)) >> PAGE_SHIFT;
741 1.21 chs UVMHIST_LOG(ubchist, "lbn 0x%x -> HOLE", lbn,0,0,0);
742 1.21 chs
743 1.21 chs sawhole = TRUE;
744 1.21 chs memset((char *)kva + (offset - startoffset), 0,
745 1.21 chs iobytes);
746 1.21 chs skipbytes += iobytes;
747 1.21 chs
748 1.35 chs for (i = 0; i < holepages; i++) {
749 1.35 chs if (write) {
750 1.35 chs pgs[pidx + i]->flags &= ~PG_CLEAN;
751 1.35 chs } else {
752 1.21 chs pgs[pidx + i]->flags |= PG_RDONLY;
753 1.21 chs }
754 1.21 chs }
755 1.21 chs continue;
756 1.21 chs }
757 1.21 chs
758 1.21 chs /*
759 1.21 chs * allocate a sub-buf for this piece of the i/o
760 1.21 chs * (or just use mbp if there's only 1 piece),
761 1.21 chs * and start it going.
762 1.21 chs */
763 1.21 chs
764 1.21 chs if (offset == startoffset && iobytes == bytes) {
765 1.21 chs bp = mbp;
766 1.21 chs } else {
767 1.21 chs s = splbio();
768 1.21 chs bp = pool_get(&bufpool, PR_WAITOK);
769 1.21 chs splx(s);
770 1.21 chs bp->b_data = (char *)kva + offset - startoffset;
771 1.21 chs bp->b_resid = bp->b_bcount = iobytes;
772 1.21 chs bp->b_flags = B_BUSY|B_READ|B_CALL;
773 1.21 chs bp->b_iodone = uvm_aio_biodone1;
774 1.21 chs bp->b_vp = vp;
775 1.37 chs bp->b_proc = NULL;
776 1.21 chs LIST_INIT(&bp->b_dep);
777 1.21 chs }
778 1.21 chs bp->b_lblkno = 0;
779 1.21 chs bp->b_private = mbp;
780 1.37 chs if (devvp->v_type == VBLK) {
781 1.37 chs bp->b_dev = devvp->v_rdev;
782 1.37 chs }
783 1.21 chs
784 1.21 chs /* adjust physical blkno for partial blocks */
785 1.25 fvdl bp->b_blkno = blkno + ((offset - ((off_t)lbn << fs_bshift)) >>
786 1.21 chs dev_bshift);
787 1.21 chs
788 1.21 chs UVMHIST_LOG(ubchist, "bp %p offset 0x%x bcount 0x%x blkno 0x%x",
789 1.21 chs bp, offset, iobytes, bp->b_blkno);
790 1.21 chs
791 1.21 chs VOP_STRATEGY(bp);
792 1.21 chs }
793 1.21 chs
794 1.21 chs loopdone:
795 1.21 chs if (skipbytes) {
796 1.21 chs s = splbio();
797 1.21 chs if (error) {
798 1.21 chs mbp->b_flags |= B_ERROR;
799 1.21 chs mbp->b_error = error;
800 1.21 chs }
801 1.21 chs mbp->b_resid -= skipbytes;
802 1.21 chs if (mbp->b_resid == 0) {
803 1.21 chs biodone(mbp);
804 1.21 chs }
805 1.21 chs splx(s);
806 1.21 chs }
807 1.21 chs
808 1.21 chs if (async) {
809 1.32 chs UVMHIST_LOG(ubchist, "returning 0 (async)",0,0,0,0);
810 1.37 chs lockmgr(&gp->g_glock, LK_RELEASE, NULL);
811 1.32 chs return 0;
812 1.21 chs }
813 1.21 chs if (bp != NULL) {
814 1.21 chs error = biowait(mbp);
815 1.21 chs }
816 1.21 chs s = splbio();
817 1.21 chs pool_put(&bufpool, mbp);
818 1.21 chs splx(s);
819 1.21 chs uvm_pagermapout(kva, npages);
820 1.24 chs raoffset = startoffset + totalbytes;
821 1.21 chs
822 1.21 chs /*
823 1.21 chs * if this we encountered a hole then we have to do a little more work.
824 1.21 chs * for read faults, we marked the page PG_RDONLY so that future
825 1.21 chs * write accesses to the page will fault again.
826 1.21 chs * for write faults, we must make sure that the backing store for
827 1.21 chs * the page is completely allocated while the pages are locked.
828 1.21 chs */
829 1.21 chs
830 1.37 chs if (!error && sawhole && write) {
831 1.37 chs for (i = 0; i < npages; i++) {
832 1.37 chs if (pgs[i] == NULL) {
833 1.37 chs continue;
834 1.37 chs }
835 1.37 chs pgs[i]->flags &= ~PG_CLEAN;
836 1.37 chs UVMHIST_LOG(ubchist, "mark dirty pg %p", pgs[i],0,0,0);
837 1.21 chs }
838 1.37 chs error = GOP_ALLOC(vp, startoffset, npages << PAGE_SHIFT, 0,
839 1.37 chs cred);
840 1.37 chs UVMHIST_LOG(ubchist, "gop_alloc off 0x%x/0x%x -> %d",
841 1.37 chs startoffset, npages << PAGE_SHIFT, error,0);
842 1.21 chs }
843 1.37 chs lockmgr(&gp->g_glock, LK_RELEASE, NULL);
844 1.21 chs simple_lock(&uobj->vmobjlock);
845 1.21 chs
846 1.21 chs /*
847 1.21 chs * see if we want to start any readahead.
848 1.21 chs * XXXUBC for now, just read the next 128k on 64k boundaries.
849 1.21 chs * this is pretty nonsensical, but it is 50% faster than reading
850 1.21 chs * just the next 64k.
851 1.21 chs */
852 1.21 chs
853 1.21 chs raout:
854 1.24 chs if (!error && !async && !write && ((int)raoffset & 0xffff) == 0 &&
855 1.21 chs PAGE_SHIFT <= 16) {
856 1.41 christos off_t rasize;
857 1.21 chs int racount;
858 1.21 chs
859 1.41 christos /* XXXUBC temp limit, from above */
860 1.41 christos racount = MIN(1 << (16 - PAGE_SHIFT), MAX_READ_AHEAD);
861 1.41 christos rasize = racount << PAGE_SHIFT;
862 1.21 chs (void) VOP_GETPAGES(vp, raoffset, NULL, &racount, 0,
863 1.21 chs VM_PROT_READ, 0, 0);
864 1.21 chs simple_lock(&uobj->vmobjlock);
865 1.21 chs
866 1.41 christos /* XXXUBC temp limit, from above */
867 1.41 christos racount = MIN(1 << (16 - PAGE_SHIFT), MAX_READ_AHEAD);
868 1.41 christos (void) VOP_GETPAGES(vp, raoffset + rasize, NULL, &racount, 0,
869 1.21 chs VM_PROT_READ, 0, 0);
870 1.21 chs simple_lock(&uobj->vmobjlock);
871 1.21 chs }
872 1.21 chs
873 1.21 chs /*
874 1.21 chs * we're almost done! release the pages...
875 1.21 chs * for errors, we free the pages.
876 1.21 chs * otherwise we activate them and mark them as valid and clean.
877 1.21 chs * also, unbusy pages that were not actually requested.
878 1.21 chs */
879 1.21 chs
880 1.21 chs if (error) {
881 1.21 chs for (i = 0; i < npages; i++) {
882 1.21 chs if (pgs[i] == NULL) {
883 1.21 chs continue;
884 1.21 chs }
885 1.21 chs UVMHIST_LOG(ubchist, "examining pg %p flags 0x%x",
886 1.21 chs pgs[i], pgs[i]->flags, 0,0);
887 1.26 chs if (pgs[i]->flags & PG_FAKE) {
888 1.37 chs pgs[i]->flags |= PG_RELEASED;
889 1.21 chs }
890 1.21 chs }
891 1.37 chs uvm_lock_pageq();
892 1.37 chs uvm_page_unbusy(pgs, npages);
893 1.21 chs uvm_unlock_pageq();
894 1.21 chs simple_unlock(&uobj->vmobjlock);
895 1.21 chs UVMHIST_LOG(ubchist, "returning error %d", error,0,0,0);
896 1.21 chs return error;
897 1.21 chs }
898 1.21 chs
899 1.37 chs out:
900 1.21 chs UVMHIST_LOG(ubchist, "succeeding, npages %d", npages,0,0,0);
901 1.26 chs uvm_lock_pageq();
902 1.21 chs for (i = 0; i < npages; i++) {
903 1.37 chs pg = pgs[i];
904 1.37 chs if (pg == NULL) {
905 1.21 chs continue;
906 1.21 chs }
907 1.21 chs UVMHIST_LOG(ubchist, "examining pg %p flags 0x%x",
908 1.37 chs pg, pg->flags, 0,0);
909 1.37 chs if (pg->flags & PG_FAKE && !overwrite) {
910 1.37 chs pg->flags &= ~(PG_FAKE);
911 1.21 chs pmap_clear_modify(pgs[i]);
912 1.21 chs }
913 1.21 chs if (write) {
914 1.37 chs pg->flags &= ~(PG_RDONLY);
915 1.21 chs }
916 1.21 chs if (i < ridx || i >= ridx + orignpages || async) {
917 1.21 chs UVMHIST_LOG(ubchist, "unbusy pg %p offset 0x%x",
918 1.37 chs pg, pg->offset,0,0);
919 1.37 chs if (pg->flags & PG_WANTED) {
920 1.37 chs wakeup(pg);
921 1.37 chs }
922 1.37 chs if (pg->flags & PG_FAKE) {
923 1.37 chs KASSERT(overwrite);
924 1.37 chs uvm_pagezero(pg);
925 1.37 chs }
926 1.37 chs if (pg->flags & PG_RELEASED) {
927 1.37 chs uvm_pagefree(pg);
928 1.26 chs continue;
929 1.21 chs }
930 1.37 chs uvm_pageactivate(pg);
931 1.37 chs pg->flags &= ~(PG_WANTED|PG_BUSY|PG_FAKE);
932 1.37 chs UVM_PAGE_OWN(pg, NULL);
933 1.21 chs }
934 1.21 chs }
935 1.26 chs uvm_unlock_pageq();
936 1.21 chs simple_unlock(&uobj->vmobjlock);
937 1.21 chs if (ap->a_m != NULL) {
938 1.21 chs memcpy(ap->a_m, &pgs[ridx],
939 1.21 chs orignpages * sizeof(struct vm_page *));
940 1.21 chs }
941 1.21 chs return 0;
942 1.21 chs }
943 1.21 chs
944 1.21 chs /*
945 1.21 chs * generic VM putpages routine.
946 1.21 chs * Write the given range of pages to backing store.
947 1.37 chs *
948 1.37 chs * => "offhi == 0" means flush all pages at or after "offlo".
949 1.37 chs * => object should be locked by caller. we may _unlock_ the object
950 1.37 chs * if (and only if) we need to clean a page (PGO_CLEANIT), or
951 1.37 chs * if PGO_SYNCIO is set and there are pages busy.
952 1.37 chs * we return with the object locked.
953 1.37 chs * => if PGO_CLEANIT or PGO_SYNCIO is set, we may block (due to I/O).
954 1.37 chs * thus, a caller might want to unlock higher level resources
955 1.37 chs * (e.g. vm_map) before calling flush.
956 1.37 chs * => if neither PGO_CLEANIT nor PGO_SYNCIO is set, then we will neither
957 1.37 chs * unlock the object nor block.
958 1.37 chs * => if PGO_ALLPAGES is set, then all pages in the object will be processed.
959 1.37 chs * => NOTE: we rely on the fact that the object's memq is a TAILQ and
960 1.37 chs * that new pages are inserted on the tail end of the list. thus,
961 1.37 chs * we can make a complete pass through the object in one go by starting
962 1.37 chs * at the head and working towards the tail (new pages are put in
963 1.37 chs * front of us).
964 1.37 chs * => NOTE: we are allowed to lock the page queues, so the caller
965 1.37 chs * must not be holding the page queue lock.
966 1.37 chs *
967 1.37 chs * note on "cleaning" object and PG_BUSY pages:
968 1.37 chs * this routine is holding the lock on the object. the only time
969 1.37 chs * that it can run into a PG_BUSY page that it does not own is if
970 1.37 chs * some other process has started I/O on the page (e.g. either
971 1.37 chs * a pagein, or a pageout). if the PG_BUSY page is being paged
972 1.37 chs * in, then it can not be dirty (!PG_CLEAN) because no one has
973 1.37 chs * had a chance to modify it yet. if the PG_BUSY page is being
974 1.37 chs * paged out then it means that someone else has already started
975 1.37 chs * cleaning the page for us (how nice!). in this case, if we
976 1.37 chs * have syncio specified, then after we make our pass through the
977 1.37 chs * object we need to wait for the other PG_BUSY pages to clear
978 1.37 chs * off (i.e. we need to do an iosync). also note that once a
979 1.37 chs * page is PG_BUSY it must stay in its object until it is un-busyed.
980 1.37 chs *
981 1.37 chs * note on page traversal:
982 1.37 chs * we can traverse the pages in an object either by going down the
983 1.37 chs * linked list in "uobj->memq", or we can go over the address range
984 1.37 chs * by page doing hash table lookups for each address. depending
985 1.37 chs * on how many pages are in the object it may be cheaper to do one
986 1.37 chs * or the other. we set "by_list" to true if we are using memq.
987 1.37 chs * if the cost of a hash lookup was equal to the cost of the list
988 1.37 chs * traversal we could compare the number of pages in the start->stop
989 1.37 chs * range to the total number of pages in the object. however, it
990 1.37 chs * seems that a hash table lookup is more expensive than the linked
991 1.37 chs * list traversal, so we multiply the number of pages in the
992 1.37 chs * range by an estimate of the relatively higher cost of the hash lookup.
993 1.21 chs */
994 1.21 chs
995 1.21 chs int
996 1.21 chs genfs_putpages(v)
997 1.21 chs void *v;
998 1.21 chs {
999 1.21 chs struct vop_putpages_args /* {
1000 1.21 chs struct vnode *a_vp;
1001 1.37 chs voff_t a_offlo;
1002 1.37 chs voff_t a_offhi;
1003 1.21 chs int a_flags;
1004 1.21 chs } */ *ap = v;
1005 1.37 chs struct vnode *vp = ap->a_vp;
1006 1.37 chs struct uvm_object *uobj = &vp->v_uobj;
1007 1.46 chs struct simplelock *slock = &uobj->vmobjlock;
1008 1.37 chs off_t startoff = ap->a_offlo;
1009 1.37 chs off_t endoff = ap->a_offhi;
1010 1.37 chs off_t off;
1011 1.37 chs int flags = ap->a_flags;
1012 1.37 chs int n = MAXBSIZE >> PAGE_SHIFT;
1013 1.37 chs int i, s, error, npages, nback;
1014 1.37 chs int freeflag;
1015 1.37 chs struct vm_page *pgs[n], *pg, *nextpg, *tpg, curmp, endmp;
1016 1.49 chs boolean_t wasclean, by_list, needs_clean, yield;
1017 1.37 chs boolean_t async = (flags & PGO_SYNCIO) == 0;
1018 1.37 chs UVMHIST_FUNC("genfs_putpages"); UVMHIST_CALLED(ubchist);
1019 1.37 chs
1020 1.37 chs KASSERT(flags & (PGO_CLEANIT|PGO_FREE|PGO_DEACTIVATE));
1021 1.37 chs KASSERT((startoff & PAGE_MASK) == 0 && (endoff & PAGE_MASK) == 0);
1022 1.37 chs KASSERT(startoff < endoff || endoff == 0);
1023 1.37 chs
1024 1.37 chs UVMHIST_LOG(ubchist, "vp %p pages %d off 0x%x len 0x%x",
1025 1.37 chs vp, uobj->uo_npages, startoff, endoff - startoff);
1026 1.37 chs if (uobj->uo_npages == 0) {
1027 1.37 chs if (LIST_FIRST(&vp->v_dirtyblkhd) == NULL &&
1028 1.37 chs (vp->v_flag & VONWORKLST)) {
1029 1.37 chs vp->v_flag &= ~VONWORKLST;
1030 1.37 chs LIST_REMOVE(vp, v_synclist);
1031 1.37 chs }
1032 1.46 chs simple_unlock(slock);
1033 1.37 chs return 0;
1034 1.37 chs }
1035 1.37 chs
1036 1.37 chs /*
1037 1.37 chs * the vnode has pages, set up to process the request.
1038 1.37 chs */
1039 1.37 chs
1040 1.37 chs error = 0;
1041 1.44 chs s = splbio();
1042 1.44 chs wasclean = (vp->v_numoutput == 0);
1043 1.44 chs splx(s);
1044 1.37 chs off = startoff;
1045 1.37 chs if (endoff == 0 || flags & PGO_ALLPAGES) {
1046 1.37 chs endoff = trunc_page(LLONG_MAX);
1047 1.37 chs }
1048 1.37 chs by_list = (uobj->uo_npages <=
1049 1.37 chs ((endoff - startoff) >> PAGE_SHIFT) * UVM_PAGE_HASH_PENALTY);
1050 1.37 chs
1051 1.37 chs /*
1052 1.37 chs * start the loop. when scanning by list, hold the last page
1053 1.37 chs * in the list before we start. pages allocated after we start
1054 1.37 chs * will be added to the end of the list, so we can stop at the
1055 1.37 chs * current last page.
1056 1.37 chs */
1057 1.37 chs
1058 1.37 chs freeflag = (curproc == uvm.pagedaemon_proc) ? PG_PAGEOUT : PG_RELEASED;
1059 1.37 chs curmp.uobject = uobj;
1060 1.37 chs curmp.offset = (voff_t)-1;
1061 1.37 chs curmp.flags = PG_BUSY;
1062 1.37 chs endmp.uobject = uobj;
1063 1.37 chs endmp.offset = (voff_t)-1;
1064 1.37 chs endmp.flags = PG_BUSY;
1065 1.37 chs if (by_list) {
1066 1.37 chs pg = TAILQ_FIRST(&uobj->memq);
1067 1.37 chs TAILQ_INSERT_TAIL(&uobj->memq, &endmp, listq);
1068 1.37 chs PHOLD(curproc);
1069 1.37 chs } else {
1070 1.37 chs pg = uvm_pagelookup(uobj, off);
1071 1.37 chs }
1072 1.37 chs nextpg = NULL;
1073 1.37 chs while (by_list || off < endoff) {
1074 1.37 chs
1075 1.37 chs /*
1076 1.37 chs * if the current page is not interesting, move on to the next.
1077 1.37 chs */
1078 1.37 chs
1079 1.37 chs KASSERT(pg == NULL || pg->uobject == uobj);
1080 1.37 chs KASSERT(pg == NULL ||
1081 1.37 chs (pg->flags & (PG_RELEASED|PG_PAGEOUT)) == 0 ||
1082 1.37 chs (pg->flags & PG_BUSY) != 0);
1083 1.37 chs if (by_list) {
1084 1.37 chs if (pg == &endmp) {
1085 1.37 chs break;
1086 1.37 chs }
1087 1.37 chs if (pg->offset < startoff || pg->offset >= endoff ||
1088 1.37 chs pg->flags & (PG_RELEASED|PG_PAGEOUT)) {
1089 1.37 chs pg = TAILQ_NEXT(pg, listq);
1090 1.37 chs continue;
1091 1.37 chs }
1092 1.37 chs off = pg->offset;
1093 1.37 chs } else if (pg == NULL || pg->flags & (PG_RELEASED|PG_PAGEOUT)) {
1094 1.37 chs off += PAGE_SIZE;
1095 1.37 chs if (off < endoff) {
1096 1.37 chs pg = uvm_pagelookup(uobj, off);
1097 1.37 chs }
1098 1.37 chs continue;
1099 1.37 chs }
1100 1.21 chs
1101 1.37 chs /*
1102 1.37 chs * if the current page needs to be cleaned and it's busy,
1103 1.37 chs * wait for it to become unbusy.
1104 1.37 chs */
1105 1.37 chs
1106 1.49 chs yield = curproc->p_cpu->ci_schedstate.spc_flags &
1107 1.49 chs SPCF_SHOULDYIELD;
1108 1.49 chs if (pg->flags & PG_BUSY || yield) {
1109 1.37 chs KASSERT(curproc != uvm.pagedaemon_proc);
1110 1.37 chs UVMHIST_LOG(ubchist, "busy %p", pg,0,0,0);
1111 1.37 chs if (by_list) {
1112 1.37 chs TAILQ_INSERT_BEFORE(pg, &curmp, listq);
1113 1.37 chs UVMHIST_LOG(ubchist, "curmp next %p",
1114 1.37 chs TAILQ_NEXT(&curmp, listq), 0,0,0);
1115 1.37 chs }
1116 1.49 chs if (yield) {
1117 1.49 chs simple_unlock(slock);
1118 1.49 chs preempt(NULL);
1119 1.49 chs simple_lock(slock);
1120 1.49 chs } else {
1121 1.49 chs pg->flags |= PG_WANTED;
1122 1.49 chs UVM_UNLOCK_AND_WAIT(pg, slock, 0, "genput", 0);
1123 1.49 chs simple_lock(slock);
1124 1.49 chs }
1125 1.37 chs if (by_list) {
1126 1.37 chs UVMHIST_LOG(ubchist, "after next %p",
1127 1.37 chs TAILQ_NEXT(&curmp, listq), 0,0,0);
1128 1.37 chs pg = TAILQ_NEXT(&curmp, listq);
1129 1.37 chs TAILQ_REMOVE(&uobj->memq, &curmp, listq);
1130 1.37 chs } else {
1131 1.37 chs pg = uvm_pagelookup(uobj, off);
1132 1.37 chs }
1133 1.37 chs continue;
1134 1.49 chs }
1135 1.49 chs
1136 1.49 chs /*
1137 1.49 chs * if we're freeing, remove all mappings of the page now.
1138 1.49 chs * if we're cleaning, check if the page is needs to be cleaned.
1139 1.49 chs */
1140 1.49 chs
1141 1.49 chs if (flags & PGO_FREE) {
1142 1.49 chs pmap_page_protect(pg, VM_PROT_NONE);
1143 1.49 chs }
1144 1.49 chs if (flags & PGO_CLEANIT) {
1145 1.49 chs needs_clean = pmap_clear_modify(pg) ||
1146 1.49 chs (pg->flags & PG_CLEAN) == 0;
1147 1.49 chs pg->flags |= PG_CLEAN;
1148 1.49 chs } else {
1149 1.49 chs needs_clean = FALSE;
1150 1.37 chs }
1151 1.37 chs
1152 1.37 chs /*
1153 1.37 chs * if we're cleaning, build a cluster.
1154 1.37 chs * the cluster will consist of pages which are currently dirty,
1155 1.37 chs * but they will be returned to us marked clean.
1156 1.37 chs * if not cleaning, just operate on the one page.
1157 1.37 chs */
1158 1.37 chs
1159 1.37 chs if (needs_clean) {
1160 1.37 chs wasclean = FALSE;
1161 1.37 chs memset(pgs, 0, sizeof(pgs));
1162 1.37 chs pg->flags |= PG_BUSY;
1163 1.37 chs UVM_PAGE_OWN(pg, "genfs_putpages");
1164 1.37 chs
1165 1.37 chs /*
1166 1.37 chs * first look backward.
1167 1.37 chs */
1168 1.37 chs
1169 1.37 chs npages = MIN(n >> 1, off >> PAGE_SHIFT);
1170 1.37 chs nback = npages;
1171 1.37 chs uvn_findpages(uobj, off - PAGE_SIZE, &nback, &pgs[0],
1172 1.37 chs UFP_NOWAIT|UFP_NOALLOC|UFP_DIRTYONLY|UFP_BACKWARD);
1173 1.37 chs if (nback) {
1174 1.37 chs memmove(&pgs[0], &pgs[npages - nback],
1175 1.37 chs nback * sizeof(pgs[0]));
1176 1.47 enami if (npages - nback < nback)
1177 1.47 enami memset(&pgs[nback], 0,
1178 1.47 enami (npages - nback) * sizeof(pgs[0]));
1179 1.47 enami else
1180 1.47 enami memset(&pgs[npages - nback], 0,
1181 1.47 enami nback * sizeof(pgs[0]));
1182 1.48 enami n -= nback;
1183 1.37 chs }
1184 1.37 chs
1185 1.37 chs /*
1186 1.37 chs * then plug in our page of interest.
1187 1.37 chs */
1188 1.37 chs
1189 1.37 chs pgs[nback] = pg;
1190 1.37 chs
1191 1.37 chs /*
1192 1.37 chs * then look forward to fill in the remaining space in
1193 1.37 chs * the array of pages.
1194 1.37 chs */
1195 1.37 chs
1196 1.37 chs npages = MIN(n, (endoff - off) >> PAGE_SHIFT) - 1;
1197 1.37 chs uvn_findpages(uobj, off + PAGE_SIZE, &npages,
1198 1.37 chs &pgs[nback + 1],
1199 1.37 chs UFP_NOWAIT|UFP_NOALLOC|UFP_DIRTYONLY);
1200 1.37 chs npages += nback + 1;
1201 1.37 chs } else {
1202 1.37 chs pgs[0] = pg;
1203 1.37 chs npages = 1;
1204 1.37 chs }
1205 1.37 chs
1206 1.37 chs /*
1207 1.37 chs * apply FREE or DEACTIVATE options if requested.
1208 1.37 chs */
1209 1.37 chs
1210 1.37 chs if (flags & (PGO_DEACTIVATE|PGO_FREE)) {
1211 1.37 chs uvm_lock_pageq();
1212 1.37 chs }
1213 1.37 chs for (i = 0; i < npages; i++) {
1214 1.37 chs tpg = pgs[i];
1215 1.37 chs KASSERT(tpg->uobject == uobj);
1216 1.37 chs if (flags & PGO_DEACTIVATE &&
1217 1.37 chs (tpg->pqflags & PQ_INACTIVE) == 0 &&
1218 1.37 chs tpg->wire_count == 0) {
1219 1.37 chs (void) pmap_clear_reference(tpg);
1220 1.37 chs uvm_pagedeactivate(tpg);
1221 1.37 chs } else if (flags & PGO_FREE) {
1222 1.37 chs pmap_page_protect(tpg, VM_PROT_NONE);
1223 1.37 chs if (tpg->flags & PG_BUSY) {
1224 1.37 chs tpg->flags |= freeflag;
1225 1.37 chs if (freeflag == PG_PAGEOUT) {
1226 1.37 chs uvmexp.paging++;
1227 1.37 chs uvm_pagedequeue(tpg);
1228 1.37 chs }
1229 1.37 chs } else {
1230 1.37 chs nextpg = TAILQ_NEXT(tpg, listq);
1231 1.37 chs uvm_pagefree(tpg);
1232 1.37 chs }
1233 1.37 chs }
1234 1.37 chs }
1235 1.37 chs if (flags & (PGO_DEACTIVATE|PGO_FREE)) {
1236 1.37 chs uvm_unlock_pageq();
1237 1.37 chs }
1238 1.37 chs if (needs_clean) {
1239 1.37 chs
1240 1.37 chs /*
1241 1.37 chs * start the i/o. if we're traversing by list,
1242 1.37 chs * keep our place in the list with a marker page.
1243 1.37 chs */
1244 1.37 chs
1245 1.37 chs if (by_list) {
1246 1.37 chs TAILQ_INSERT_AFTER(&uobj->memq, pg, &curmp,
1247 1.37 chs listq);
1248 1.37 chs }
1249 1.46 chs simple_unlock(slock);
1250 1.37 chs error = GOP_WRITE(vp, pgs, npages, flags);
1251 1.46 chs simple_lock(slock);
1252 1.37 chs if (by_list) {
1253 1.37 chs pg = TAILQ_NEXT(&curmp, listq);
1254 1.37 chs TAILQ_REMOVE(&uobj->memq, &curmp, listq);
1255 1.37 chs }
1256 1.37 chs if (error == ENOMEM) {
1257 1.37 chs for (i = 0; i < npages; i++) {
1258 1.37 chs tpg = pgs[i];
1259 1.37 chs if (tpg->flags & PG_PAGEOUT) {
1260 1.37 chs tpg->flags &= ~PG_PAGEOUT;
1261 1.37 chs uvmexp.paging--;
1262 1.37 chs }
1263 1.37 chs tpg->flags &= ~PG_CLEAN;
1264 1.37 chs uvm_pageactivate(tpg);
1265 1.37 chs }
1266 1.37 chs uvm_page_unbusy(pgs, npages);
1267 1.37 chs }
1268 1.37 chs if (error) {
1269 1.37 chs break;
1270 1.37 chs }
1271 1.37 chs if (by_list) {
1272 1.37 chs continue;
1273 1.37 chs }
1274 1.37 chs }
1275 1.37 chs
1276 1.37 chs /*
1277 1.37 chs * find the next page and continue if there was no error.
1278 1.37 chs */
1279 1.37 chs
1280 1.37 chs if (by_list) {
1281 1.37 chs if (nextpg) {
1282 1.37 chs pg = nextpg;
1283 1.37 chs nextpg = NULL;
1284 1.37 chs } else {
1285 1.37 chs pg = TAILQ_NEXT(pg, listq);
1286 1.37 chs }
1287 1.37 chs } else {
1288 1.46 chs off += npages << PAGE_SHIFT;
1289 1.37 chs if (off < endoff) {
1290 1.37 chs pg = uvm_pagelookup(uobj, off);
1291 1.37 chs }
1292 1.37 chs }
1293 1.37 chs }
1294 1.37 chs if (by_list) {
1295 1.37 chs TAILQ_REMOVE(&uobj->memq, &endmp, listq);
1296 1.37 chs PRELE(curproc);
1297 1.37 chs }
1298 1.37 chs
1299 1.37 chs /*
1300 1.37 chs * if we're cleaning and there was nothing to clean,
1301 1.37 chs * take us off the syncer list. if we started any i/o
1302 1.37 chs * and we're doing sync i/o, wait for all writes to finish.
1303 1.37 chs */
1304 1.37 chs
1305 1.37 chs if ((flags & PGO_CLEANIT) && wasclean &&
1306 1.37 chs startoff == 0 && endoff == trunc_page(LLONG_MAX) &&
1307 1.37 chs LIST_FIRST(&vp->v_dirtyblkhd) == NULL &&
1308 1.37 chs (vp->v_flag & VONWORKLST)) {
1309 1.37 chs vp->v_flag &= ~VONWORKLST;
1310 1.37 chs LIST_REMOVE(vp, v_synclist);
1311 1.37 chs }
1312 1.37 chs if (!wasclean && !async) {
1313 1.37 chs s = splbio();
1314 1.37 chs while (vp->v_numoutput != 0) {
1315 1.37 chs vp->v_flag |= VBWAIT;
1316 1.46 chs UVM_UNLOCK_AND_WAIT(&vp->v_numoutput, slock, FALSE,
1317 1.46 chs "genput2", 0);
1318 1.46 chs simple_lock(slock);
1319 1.37 chs }
1320 1.37 chs splx(s);
1321 1.37 chs }
1322 1.37 chs simple_unlock(&uobj->vmobjlock);
1323 1.37 chs return error;
1324 1.37 chs }
1325 1.37 chs
1326 1.37 chs int
1327 1.37 chs genfs_gop_write(struct vnode *vp, struct vm_page **pgs, int npages, int flags)
1328 1.37 chs {
1329 1.37 chs int s, error, run;
1330 1.37 chs int fs_bshift, dev_bshift;
1331 1.21 chs vaddr_t kva;
1332 1.21 chs off_t eof, offset, startoffset;
1333 1.21 chs size_t bytes, iobytes, skipbytes;
1334 1.21 chs daddr_t lbn, blkno;
1335 1.21 chs struct vm_page *pg;
1336 1.21 chs struct buf *mbp, *bp;
1337 1.36 chs struct vnode *devvp;
1338 1.37 chs boolean_t async = (flags & PGO_SYNCIO) == 0;
1339 1.39 enami UVMHIST_FUNC("genfs_gop_write"); UVMHIST_CALLED(ubchist);
1340 1.21 chs
1341 1.37 chs UVMHIST_LOG(ubchist, "vp %p pgs %p npages %d flags 0x%x",
1342 1.37 chs vp, pgs, npages, flags);
1343 1.21 chs
1344 1.37 chs GOP_SIZE(vp, vp->v_size, &eof);
1345 1.36 chs if (vp->v_type == VREG) {
1346 1.36 chs fs_bshift = vp->v_mount->mnt_fs_bshift;
1347 1.36 chs dev_bshift = vp->v_mount->mnt_dev_bshift;
1348 1.36 chs } else {
1349 1.36 chs fs_bshift = DEV_BSHIFT;
1350 1.36 chs dev_bshift = DEV_BSHIFT;
1351 1.36 chs }
1352 1.37 chs error = 0;
1353 1.37 chs pg = pgs[0];
1354 1.21 chs startoffset = pg->offset;
1355 1.26 chs bytes = MIN(npages << PAGE_SHIFT, eof - startoffset);
1356 1.21 chs skipbytes = 0;
1357 1.21 chs KASSERT(bytes != 0);
1358 1.21 chs
1359 1.37 chs kva = uvm_pagermapin(pgs, npages, UVMPAGER_MAPIN_WRITE |
1360 1.37 chs UVMPAGER_MAPIN_WAITOK);
1361 1.21 chs
1362 1.21 chs s = splbio();
1363 1.21 chs vp->v_numoutput += 2;
1364 1.21 chs mbp = pool_get(&bufpool, PR_WAITOK);
1365 1.21 chs UVMHIST_LOG(ubchist, "vp %p mbp %p num now %d bytes 0x%x",
1366 1.21 chs vp, mbp, vp->v_numoutput, bytes);
1367 1.21 chs splx(s);
1368 1.21 chs mbp->b_bufsize = npages << PAGE_SHIFT;
1369 1.21 chs mbp->b_data = (void *)kva;
1370 1.21 chs mbp->b_resid = mbp->b_bcount = bytes;
1371 1.45 chs mbp->b_flags = B_BUSY|B_WRITE|B_AGE| (async ? (B_CALL|B_ASYNC) : 0);
1372 1.21 chs mbp->b_iodone = uvm_aio_biodone;
1373 1.21 chs mbp->b_vp = vp;
1374 1.21 chs LIST_INIT(&mbp->b_dep);
1375 1.21 chs
1376 1.21 chs bp = NULL;
1377 1.21 chs for (offset = startoffset;
1378 1.21 chs bytes > 0;
1379 1.21 chs offset += iobytes, bytes -= iobytes) {
1380 1.21 chs lbn = offset >> fs_bshift;
1381 1.36 chs error = VOP_BMAP(vp, lbn, &devvp, &blkno, &run);
1382 1.21 chs if (error) {
1383 1.21 chs UVMHIST_LOG(ubchist, "VOP_BMAP() -> %d", error,0,0,0);
1384 1.21 chs skipbytes += bytes;
1385 1.21 chs bytes = 0;
1386 1.21 chs break;
1387 1.21 chs }
1388 1.21 chs
1389 1.26 chs iobytes = MIN((((off_t)lbn + 1 + run) << fs_bshift) - offset,
1390 1.26 chs bytes);
1391 1.21 chs if (blkno == (daddr_t)-1) {
1392 1.21 chs skipbytes += iobytes;
1393 1.21 chs continue;
1394 1.21 chs }
1395 1.21 chs
1396 1.21 chs /* if it's really one i/o, don't make a second buf */
1397 1.21 chs if (offset == startoffset && iobytes == bytes) {
1398 1.21 chs bp = mbp;
1399 1.21 chs } else {
1400 1.21 chs s = splbio();
1401 1.21 chs vp->v_numoutput++;
1402 1.21 chs bp = pool_get(&bufpool, PR_WAITOK);
1403 1.21 chs UVMHIST_LOG(ubchist, "vp %p bp %p num now %d",
1404 1.21 chs vp, bp, vp->v_numoutput, 0);
1405 1.21 chs splx(s);
1406 1.21 chs bp->b_data = (char *)kva +
1407 1.21 chs (vaddr_t)(offset - pg->offset);
1408 1.21 chs bp->b_resid = bp->b_bcount = iobytes;
1409 1.45 chs bp->b_flags = B_BUSY|B_WRITE|B_CALL|B_ASYNC;
1410 1.21 chs bp->b_iodone = uvm_aio_biodone1;
1411 1.21 chs bp->b_vp = vp;
1412 1.21 chs LIST_INIT(&bp->b_dep);
1413 1.21 chs }
1414 1.21 chs bp->b_lblkno = 0;
1415 1.21 chs bp->b_private = mbp;
1416 1.37 chs if (devvp->v_type == VBLK) {
1417 1.37 chs bp->b_dev = devvp->v_rdev;
1418 1.37 chs }
1419 1.21 chs
1420 1.21 chs /* adjust physical blkno for partial blocks */
1421 1.25 fvdl bp->b_blkno = blkno + ((offset - ((off_t)lbn << fs_bshift)) >>
1422 1.21 chs dev_bshift);
1423 1.21 chs UVMHIST_LOG(ubchist, "vp %p offset 0x%x bcount 0x%x blkno 0x%x",
1424 1.21 chs vp, offset, bp->b_bcount, bp->b_blkno);
1425 1.21 chs VOP_STRATEGY(bp);
1426 1.21 chs }
1427 1.21 chs if (skipbytes) {
1428 1.29 chs UVMHIST_LOG(ubchist, "skipbytes %d", skipbytes, 0,0,0);
1429 1.21 chs s = splbio();
1430 1.29 chs if (error) {
1431 1.29 chs mbp->b_flags |= B_ERROR;
1432 1.29 chs mbp->b_error = error;
1433 1.29 chs }
1434 1.37 chs mbp->b_resid -= skipbytes;
1435 1.21 chs if (mbp->b_resid == 0) {
1436 1.21 chs biodone(mbp);
1437 1.21 chs }
1438 1.21 chs splx(s);
1439 1.21 chs }
1440 1.21 chs if (async) {
1441 1.32 chs UVMHIST_LOG(ubchist, "returning 0 (async)", 0,0,0,0);
1442 1.32 chs return 0;
1443 1.21 chs }
1444 1.37 chs UVMHIST_LOG(ubchist, "waiting for mbp %p", mbp,0,0,0);
1445 1.37 chs error = biowait(mbp);
1446 1.37 chs uvm_aio_aiodone(mbp);
1447 1.21 chs UVMHIST_LOG(ubchist, "returning, error %d", error,0,0,0);
1448 1.29 chs return error;
1449 1.42 chs }
1450 1.42 chs
1451 1.42 chs /*
1452 1.42 chs * VOP_PUTPAGES() for vnodes which never have pages.
1453 1.42 chs */
1454 1.42 chs
1455 1.42 chs int
1456 1.42 chs genfs_null_putpages(void *v)
1457 1.42 chs {
1458 1.42 chs struct vop_putpages_args /* {
1459 1.42 chs struct vnode *a_vp;
1460 1.42 chs voff_t a_offlo;
1461 1.42 chs voff_t a_offhi;
1462 1.42 chs int a_flags;
1463 1.42 chs } */ *ap = v;
1464 1.42 chs struct vnode *vp = ap->a_vp;
1465 1.42 chs
1466 1.42 chs KASSERT(vp->v_uobj.uo_npages == 0);
1467 1.42 chs simple_unlock(&vp->v_interlock);
1468 1.42 chs return (0);
1469 1.21 chs }
1470 1.21 chs
1471 1.37 chs void
1472 1.37 chs genfs_node_init(struct vnode *vp, struct genfs_ops *ops)
1473 1.37 chs {
1474 1.37 chs struct genfs_node *gp = VTOG(vp);
1475 1.37 chs
1476 1.37 chs lockinit(&gp->g_glock, PINOD, "glock", 0, 0);
1477 1.37 chs gp->g_op = ops;
1478 1.37 chs }
1479 1.37 chs
1480 1.37 chs void
1481 1.37 chs genfs_size(struct vnode *vp, off_t size, off_t *eobp)
1482 1.21 chs {
1483 1.21 chs int bsize;
1484 1.21 chs
1485 1.37 chs bsize = 1 << vp->v_mount->mnt_fs_bshift;
1486 1.37 chs *eobp = (size + bsize - 1) & ~(bsize - 1);
1487 1.43 chs }
1488 1.43 chs
1489 1.43 chs int
1490 1.43 chs genfs_compat_getpages(void *v)
1491 1.43 chs {
1492 1.43 chs struct vop_getpages_args /* {
1493 1.43 chs struct vnode *a_vp;
1494 1.43 chs voff_t a_offset;
1495 1.43 chs struct vm_page **a_m;
1496 1.43 chs int *a_count;
1497 1.43 chs int a_centeridx;
1498 1.43 chs vm_prot_t a_access_type;
1499 1.43 chs int a_advice;
1500 1.43 chs int a_flags;
1501 1.43 chs } */ *ap = v;
1502 1.43 chs
1503 1.43 chs off_t origoffset;
1504 1.43 chs struct vnode *vp = ap->a_vp;
1505 1.43 chs struct uvm_object *uobj = &vp->v_uobj;
1506 1.43 chs struct vm_page *pg, **pgs;
1507 1.43 chs vaddr_t kva;
1508 1.43 chs int i, error, orignpages, npages;
1509 1.43 chs struct iovec iov;
1510 1.43 chs struct uio uio;
1511 1.43 chs struct ucred *cred = curproc->p_ucred;
1512 1.43 chs boolean_t write = (ap->a_access_type & VM_PROT_WRITE) != 0;
1513 1.43 chs
1514 1.43 chs error = 0;
1515 1.43 chs origoffset = ap->a_offset;
1516 1.43 chs orignpages = *ap->a_count;
1517 1.43 chs pgs = ap->a_m;
1518 1.43 chs
1519 1.43 chs if (write && (vp->v_flag & VONWORKLST) == 0) {
1520 1.43 chs vn_syncer_add_to_worklist(vp, filedelay);
1521 1.43 chs }
1522 1.43 chs if (ap->a_flags & PGO_LOCKED) {
1523 1.43 chs uvn_findpages(uobj, origoffset, ap->a_count, ap->a_m,
1524 1.43 chs UFP_NOWAIT|UFP_NOALLOC|UFP_NORDONLY);
1525 1.43 chs
1526 1.43 chs return ap->a_m[ap->a_centeridx] == NULL ? EBUSY : 0;
1527 1.43 chs }
1528 1.43 chs if (origoffset + (ap->a_centeridx << PAGE_SHIFT) >= vp->v_size) {
1529 1.43 chs simple_unlock(&uobj->vmobjlock);
1530 1.43 chs return EINVAL;
1531 1.43 chs }
1532 1.43 chs npages = orignpages;
1533 1.43 chs uvn_findpages(uobj, origoffset, &npages, pgs, UFP_ALL);
1534 1.43 chs simple_unlock(&uobj->vmobjlock);
1535 1.43 chs kva = uvm_pagermapin(pgs, npages, UVMPAGER_MAPIN_WAITOK |
1536 1.43 chs UVMPAGER_MAPIN_READ);
1537 1.43 chs for (i = 0; i < npages; i++) {
1538 1.43 chs pg = pgs[i];
1539 1.43 chs if ((pg->flags & PG_FAKE) == 0) {
1540 1.43 chs continue;
1541 1.43 chs }
1542 1.43 chs iov.iov_base = (char *)kva + (i << PAGE_SHIFT);
1543 1.43 chs iov.iov_len = PAGE_SIZE;
1544 1.43 chs uio.uio_iov = &iov;
1545 1.43 chs uio.uio_iovcnt = 1;
1546 1.43 chs uio.uio_offset = origoffset + (i << PAGE_SHIFT);
1547 1.43 chs uio.uio_segflg = UIO_SYSSPACE;
1548 1.43 chs uio.uio_rw = UIO_READ;
1549 1.43 chs uio.uio_resid = PAGE_SIZE;
1550 1.43 chs uio.uio_procp = curproc;
1551 1.43 chs error = VOP_READ(vp, &uio, 0, cred);
1552 1.43 chs if (error) {
1553 1.43 chs break;
1554 1.43 chs }
1555 1.43 chs }
1556 1.43 chs uvm_pagermapout(kva, npages);
1557 1.43 chs simple_lock(&uobj->vmobjlock);
1558 1.43 chs uvm_lock_pageq();
1559 1.43 chs for (i = 0; i < npages; i++) {
1560 1.43 chs pg = pgs[i];
1561 1.43 chs if (error && (pg->flags & PG_FAKE) != 0) {
1562 1.43 chs pg->flags |= PG_RELEASED;
1563 1.43 chs } else {
1564 1.43 chs pmap_clear_modify(pg);
1565 1.43 chs uvm_pageactivate(pg);
1566 1.43 chs }
1567 1.43 chs }
1568 1.43 chs if (error) {
1569 1.43 chs uvm_page_unbusy(pgs, npages);
1570 1.43 chs }
1571 1.43 chs uvm_unlock_pageq();
1572 1.43 chs simple_unlock(&uobj->vmobjlock);
1573 1.43 chs return error;
1574 1.43 chs }
1575 1.43 chs
1576 1.43 chs int
1577 1.43 chs genfs_compat_gop_write(struct vnode *vp, struct vm_page **pgs, int npages,
1578 1.43 chs int flags)
1579 1.43 chs {
1580 1.43 chs off_t offset;
1581 1.43 chs struct iovec iov;
1582 1.43 chs struct uio uio;
1583 1.43 chs struct ucred *cred = curproc->p_ucred;
1584 1.43 chs struct buf *bp;
1585 1.43 chs vaddr_t kva;
1586 1.43 chs int s, error;
1587 1.43 chs
1588 1.43 chs offset = pgs[0]->offset;
1589 1.43 chs kva = uvm_pagermapin(pgs, npages, UVMPAGER_MAPIN_WRITE |
1590 1.43 chs UVMPAGER_MAPIN_WAITOK);
1591 1.43 chs
1592 1.43 chs iov.iov_base = (void *)kva;
1593 1.43 chs iov.iov_len = npages << PAGE_SHIFT;
1594 1.43 chs uio.uio_iov = &iov;
1595 1.43 chs uio.uio_iovcnt = npages;
1596 1.43 chs uio.uio_offset = offset;
1597 1.43 chs uio.uio_segflg = UIO_SYSSPACE;
1598 1.43 chs uio.uio_rw = UIO_WRITE;
1599 1.43 chs uio.uio_resid = npages << PAGE_SHIFT;
1600 1.43 chs uio.uio_procp = curproc;
1601 1.43 chs error = VOP_WRITE(vp, &uio, 0, cred);
1602 1.43 chs
1603 1.43 chs s = splbio();
1604 1.43 chs vp->v_numoutput++;
1605 1.43 chs bp = pool_get(&bufpool, PR_WAITOK);
1606 1.43 chs splx(s);
1607 1.43 chs
1608 1.43 chs bp->b_flags = B_BUSY | B_WRITE | B_AGE;
1609 1.43 chs bp->b_vp = vp;
1610 1.43 chs bp->b_lblkno = offset >> vp->v_mount->mnt_fs_bshift;
1611 1.43 chs bp->b_data = (char *)kva;
1612 1.43 chs bp->b_bcount = npages << PAGE_SHIFT;
1613 1.43 chs bp->b_bufsize = npages << PAGE_SHIFT;
1614 1.43 chs bp->b_resid = 0;
1615 1.43 chs LIST_INIT(&bp->b_dep);
1616 1.43 chs if (error) {
1617 1.43 chs bp->b_flags |= B_ERROR;
1618 1.43 chs bp->b_error = error;
1619 1.43 chs }
1620 1.43 chs uvm_aio_aiodone(bp);
1621 1.43 chs return error;
1622 1.1 mycroft }
1623