nfs_vnops.c revision 1.318 1 /* $NetBSD: nfs_vnops.c,v 1.318 2021/06/29 22:34:09 dholland Exp $ */
2
3 /*
4 * Copyright (c) 1989, 1993
5 * The Regents of the University of California. All rights reserved.
6 *
7 * This code is derived from software contributed to Berkeley by
8 * Rick Macklem at The University of Guelph.
9 *
10 * Redistribution and use in source and binary forms, with or without
11 * modification, are permitted provided that the following conditions
12 * are met:
13 * 1. Redistributions of source code must retain the above copyright
14 * notice, this list of conditions and the following disclaimer.
15 * 2. Redistributions in binary form must reproduce the above copyright
16 * notice, this list of conditions and the following disclaimer in the
17 * documentation and/or other materials provided with the distribution.
18 * 3. Neither the name of the University nor the names of its contributors
19 * may be used to endorse or promote products derived from this software
20 * without specific prior written permission.
21 *
22 * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND
23 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
24 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
25 * ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE
26 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
27 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
28 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
29 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
30 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
31 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
32 * SUCH DAMAGE.
33 *
34 * @(#)nfs_vnops.c 8.19 (Berkeley) 7/31/95
35 */
36
37 /*
38 * vnode op calls for Sun NFS version 2 and 3
39 */
40
41 #include <sys/cdefs.h>
42 __KERNEL_RCSID(0, "$NetBSD: nfs_vnops.c,v 1.318 2021/06/29 22:34:09 dholland Exp $");
43
44 #ifdef _KERNEL_OPT
45 #include "opt_nfs.h"
46 #include "opt_uvmhist.h"
47 #endif
48
49 #include <sys/param.h>
50 #include <sys/proc.h>
51 #include <sys/kernel.h>
52 #include <sys/systm.h>
53 #include <sys/resourcevar.h>
54 #include <sys/mount.h>
55 #include <sys/buf.h>
56 #include <sys/condvar.h>
57 #include <sys/disk.h>
58 #include <sys/malloc.h>
59 #include <sys/kmem.h>
60 #include <sys/mbuf.h>
61 #include <sys/mutex.h>
62 #include <sys/namei.h>
63 #include <sys/vnode.h>
64 #include <sys/dirent.h>
65 #include <sys/fcntl.h>
66 #include <sys/hash.h>
67 #include <sys/lockf.h>
68 #include <sys/stat.h>
69 #include <sys/unistd.h>
70 #include <sys/kauth.h>
71 #include <sys/cprng.h>
72
73 #ifdef UVMHIST
74 #include <uvm/uvm.h>
75 #endif
76 #include <uvm/uvm_extern.h>
77 #include <uvm/uvm_stat.h>
78
79 #include <miscfs/fifofs/fifo.h>
80 #include <miscfs/genfs/genfs.h>
81 #include <miscfs/genfs/genfs_node.h>
82 #include <miscfs/specfs/specdev.h>
83
84 #include <nfs/rpcv2.h>
85 #include <nfs/nfsproto.h>
86 #include <nfs/nfs.h>
87 #include <nfs/nfsnode.h>
88 #include <nfs/nfsmount.h>
89 #include <nfs/xdr_subs.h>
90 #include <nfs/nfsm_subs.h>
91 #include <nfs/nfs_var.h>
92
93 #include <net/if.h>
94 #include <netinet/in.h>
95 #include <netinet/in_var.h>
96
97 /*
98 * Global vfs data structures for nfs
99 */
100 int (**nfsv2_vnodeop_p)(void *);
101 const struct vnodeopv_entry_desc nfsv2_vnodeop_entries[] = {
102 { &vop_default_desc, vn_default_error },
103 { &vop_parsepath_desc, genfs_parsepath }, /* parsepath */
104 { &vop_lookup_desc, nfs_lookup }, /* lookup */
105 { &vop_create_desc, nfs_create }, /* create */
106 { &vop_mknod_desc, nfs_mknod }, /* mknod */
107 { &vop_open_desc, nfs_open }, /* open */
108 { &vop_close_desc, nfs_close }, /* close */
109 { &vop_access_desc, nfs_access }, /* access */
110 { &vop_accessx_desc, genfs_accessx }, /* accessx */
111 { &vop_getattr_desc, nfs_getattr }, /* getattr */
112 { &vop_setattr_desc, nfs_setattr }, /* setattr */
113 { &vop_read_desc, nfs_read }, /* read */
114 { &vop_write_desc, nfs_write }, /* write */
115 { &vop_fallocate_desc, genfs_eopnotsupp }, /* fallocate */
116 { &vop_fdiscard_desc, genfs_eopnotsupp }, /* fdiscard */
117 { &vop_fcntl_desc, genfs_fcntl }, /* fcntl */
118 { &vop_ioctl_desc, nfs_ioctl }, /* ioctl */
119 { &vop_poll_desc, nfs_poll }, /* poll */
120 { &vop_kqfilter_desc, nfs_kqfilter }, /* kqfilter */
121 { &vop_revoke_desc, nfs_revoke }, /* revoke */
122 { &vop_mmap_desc, nfs_mmap }, /* mmap */
123 { &vop_fsync_desc, nfs_fsync }, /* fsync */
124 { &vop_seek_desc, nfs_seek }, /* seek */
125 { &vop_remove_desc, nfs_remove }, /* remove */
126 { &vop_link_desc, nfs_link }, /* link */
127 { &vop_rename_desc, nfs_rename }, /* rename */
128 { &vop_mkdir_desc, nfs_mkdir }, /* mkdir */
129 { &vop_rmdir_desc, nfs_rmdir }, /* rmdir */
130 { &vop_symlink_desc, nfs_symlink }, /* symlink */
131 { &vop_readdir_desc, nfs_readdir }, /* readdir */
132 { &vop_readlink_desc, nfs_readlink }, /* readlink */
133 { &vop_abortop_desc, nfs_abortop }, /* abortop */
134 { &vop_inactive_desc, nfs_inactive }, /* inactive */
135 { &vop_reclaim_desc, nfs_reclaim }, /* reclaim */
136 { &vop_lock_desc, nfs_lock }, /* lock */
137 { &vop_unlock_desc, nfs_unlock }, /* unlock */
138 { &vop_bmap_desc, nfs_bmap }, /* bmap */
139 { &vop_strategy_desc, nfs_strategy }, /* strategy */
140 { &vop_print_desc, nfs_print }, /* print */
141 { &vop_islocked_desc, nfs_islocked }, /* islocked */
142 { &vop_pathconf_desc, nfs_pathconf }, /* pathconf */
143 { &vop_advlock_desc, nfs_advlock }, /* advlock */
144 { &vop_bwrite_desc, genfs_badop }, /* bwrite */
145 { &vop_getpages_desc, nfs_getpages }, /* getpages */
146 { &vop_putpages_desc, genfs_putpages }, /* putpages */
147 { NULL, NULL }
148 };
149 const struct vnodeopv_desc nfsv2_vnodeop_opv_desc =
150 { &nfsv2_vnodeop_p, nfsv2_vnodeop_entries };
151
152 /*
153 * Special device vnode ops
154 */
155 int (**spec_nfsv2nodeop_p)(void *);
156 const struct vnodeopv_entry_desc spec_nfsv2nodeop_entries[] = {
157 { &vop_default_desc, vn_default_error },
158 { &vop_parsepath_desc, genfs_parsepath }, /* parsepath */
159 { &vop_lookup_desc, spec_lookup }, /* lookup */
160 { &vop_create_desc, spec_create }, /* create */
161 { &vop_mknod_desc, spec_mknod }, /* mknod */
162 { &vop_open_desc, spec_open }, /* open */
163 { &vop_close_desc, nfsspec_close }, /* close */
164 { &vop_access_desc, nfsspec_access }, /* access */
165 { &vop_accessx_desc, genfs_accessx }, /* accessx */
166 { &vop_getattr_desc, nfs_getattr }, /* getattr */
167 { &vop_setattr_desc, nfs_setattr }, /* setattr */
168 { &vop_read_desc, nfsspec_read }, /* read */
169 { &vop_write_desc, nfsspec_write }, /* write */
170 { &vop_fallocate_desc, spec_fallocate }, /* fallocate */
171 { &vop_fdiscard_desc, spec_fdiscard }, /* fdiscard */
172 { &vop_fcntl_desc, genfs_fcntl }, /* fcntl */
173 { &vop_ioctl_desc, spec_ioctl }, /* ioctl */
174 { &vop_poll_desc, spec_poll }, /* poll */
175 { &vop_kqfilter_desc, spec_kqfilter }, /* kqfilter */
176 { &vop_revoke_desc, spec_revoke }, /* revoke */
177 { &vop_mmap_desc, spec_mmap }, /* mmap */
178 { &vop_fsync_desc, spec_fsync }, /* fsync */
179 { &vop_seek_desc, spec_seek }, /* seek */
180 { &vop_remove_desc, spec_remove }, /* remove */
181 { &vop_link_desc, spec_link }, /* link */
182 { &vop_rename_desc, spec_rename }, /* rename */
183 { &vop_mkdir_desc, spec_mkdir }, /* mkdir */
184 { &vop_rmdir_desc, spec_rmdir }, /* rmdir */
185 { &vop_symlink_desc, spec_symlink }, /* symlink */
186 { &vop_readdir_desc, spec_readdir }, /* readdir */
187 { &vop_readlink_desc, spec_readlink }, /* readlink */
188 { &vop_abortop_desc, spec_abortop }, /* abortop */
189 { &vop_inactive_desc, nfs_inactive }, /* inactive */
190 { &vop_reclaim_desc, nfs_reclaim }, /* reclaim */
191 { &vop_lock_desc, nfs_lock }, /* lock */
192 { &vop_unlock_desc, nfs_unlock }, /* unlock */
193 { &vop_bmap_desc, spec_bmap }, /* bmap */
194 { &vop_strategy_desc, spec_strategy }, /* strategy */
195 { &vop_print_desc, nfs_print }, /* print */
196 { &vop_islocked_desc, nfs_islocked }, /* islocked */
197 { &vop_pathconf_desc, spec_pathconf }, /* pathconf */
198 { &vop_advlock_desc, spec_advlock }, /* advlock */
199 { &vop_bwrite_desc, spec_bwrite }, /* bwrite */
200 { &vop_getpages_desc, spec_getpages }, /* getpages */
201 { &vop_putpages_desc, spec_putpages }, /* putpages */
202 { NULL, NULL }
203 };
204 const struct vnodeopv_desc spec_nfsv2nodeop_opv_desc =
205 { &spec_nfsv2nodeop_p, spec_nfsv2nodeop_entries };
206
207 int (**fifo_nfsv2nodeop_p)(void *);
208 const struct vnodeopv_entry_desc fifo_nfsv2nodeop_entries[] = {
209 { &vop_default_desc, vn_default_error },
210 { &vop_parsepath_desc, genfs_parsepath }, /* parsepath */
211 { &vop_lookup_desc, vn_fifo_bypass }, /* lookup */
212 { &vop_create_desc, vn_fifo_bypass }, /* create */
213 { &vop_mknod_desc, vn_fifo_bypass }, /* mknod */
214 { &vop_open_desc, vn_fifo_bypass }, /* open */
215 { &vop_close_desc, nfsfifo_close }, /* close */
216 { &vop_access_desc, nfsspec_access }, /* access */
217 { &vop_accessx_desc, genfs_accessx }, /* accessx */
218 { &vop_getattr_desc, nfs_getattr }, /* getattr */
219 { &vop_setattr_desc, nfs_setattr }, /* setattr */
220 { &vop_read_desc, nfsfifo_read }, /* read */
221 { &vop_write_desc, nfsfifo_write }, /* write */
222 { &vop_fallocate_desc, vn_fifo_bypass }, /* fallocate */
223 { &vop_fdiscard_desc, vn_fifo_bypass }, /* fdiscard */
224 { &vop_fcntl_desc, genfs_fcntl }, /* fcntl */
225 { &vop_ioctl_desc, vn_fifo_bypass }, /* ioctl */
226 { &vop_poll_desc, vn_fifo_bypass }, /* poll */
227 { &vop_kqfilter_desc, vn_fifo_bypass }, /* kqfilter */
228 { &vop_revoke_desc, vn_fifo_bypass }, /* revoke */
229 { &vop_mmap_desc, vn_fifo_bypass }, /* mmap */
230 { &vop_fsync_desc, nfs_fsync }, /* fsync */
231 { &vop_seek_desc, vn_fifo_bypass }, /* seek */
232 { &vop_remove_desc, vn_fifo_bypass }, /* remove */
233 { &vop_link_desc, vn_fifo_bypass }, /* link */
234 { &vop_rename_desc, vn_fifo_bypass }, /* rename */
235 { &vop_mkdir_desc, vn_fifo_bypass }, /* mkdir */
236 { &vop_rmdir_desc, vn_fifo_bypass }, /* rmdir */
237 { &vop_symlink_desc, vn_fifo_bypass }, /* symlink */
238 { &vop_readdir_desc, vn_fifo_bypass }, /* readdir */
239 { &vop_readlink_desc, vn_fifo_bypass }, /* readlink */
240 { &vop_abortop_desc, vn_fifo_bypass }, /* abortop */
241 { &vop_inactive_desc, nfs_inactive }, /* inactive */
242 { &vop_reclaim_desc, nfs_reclaim }, /* reclaim */
243 { &vop_lock_desc, nfs_lock }, /* lock */
244 { &vop_unlock_desc, nfs_unlock }, /* unlock */
245 { &vop_bmap_desc, vn_fifo_bypass }, /* bmap */
246 { &vop_strategy_desc, genfs_badop }, /* strategy */
247 { &vop_print_desc, nfs_print }, /* print */
248 { &vop_islocked_desc, nfs_islocked }, /* islocked */
249 { &vop_pathconf_desc, vn_fifo_bypass }, /* pathconf */
250 { &vop_advlock_desc, vn_fifo_bypass }, /* advlock */
251 { &vop_bwrite_desc, genfs_badop }, /* bwrite */
252 { &vop_putpages_desc, vn_fifo_bypass }, /* putpages */
253 { NULL, NULL }
254 };
255 const struct vnodeopv_desc fifo_nfsv2nodeop_opv_desc =
256 { &fifo_nfsv2nodeop_p, fifo_nfsv2nodeop_entries };
257
258 static int nfs_linkrpc(struct vnode *, struct vnode *, const char *,
259 size_t, kauth_cred_t, struct lwp *);
260 static void nfs_writerpc_extfree(struct mbuf *, void *, size_t, void *);
261
262 /*
263 * Global variables
264 */
265 extern u_int32_t nfs_true, nfs_false;
266 extern u_int32_t nfs_xdrneg1;
267 extern const nfstype nfsv3_type[9];
268
269 int nfs_numasync = 0;
270 #define DIRHDSIZ _DIRENT_NAMEOFF(dp)
271 #define UIO_ADVANCE(uio, siz) \
272 (void)((uio)->uio_resid -= (siz), \
273 (uio)->uio_iov->iov_base = (char *)(uio)->uio_iov->iov_base + (siz), \
274 (uio)->uio_iov->iov_len -= (siz))
275
276 static void nfs_cache_enter(struct vnode *, struct vnode *,
277 struct componentname *);
278
279 static void
280 nfs_cache_enter(struct vnode *dvp, struct vnode *vp,
281 struct componentname *cnp)
282 {
283 struct nfsnode *dnp = VTONFS(dvp);
284
285 if ((cnp->cn_flags & MAKEENTRY) == 0) {
286 return;
287 }
288 if (vp != NULL) {
289 struct nfsnode *np = VTONFS(vp);
290
291 np->n_ctime = np->n_vattr->va_ctime.tv_sec;
292 }
293
294 if (!timespecisset(&dnp->n_nctime))
295 dnp->n_nctime = dnp->n_vattr->va_mtime;
296
297 cache_enter(dvp, vp, cnp->cn_nameptr, cnp->cn_namelen, cnp->cn_flags);
298 }
299
300 /*
301 * nfs null call from vfs.
302 */
303 int
304 nfs_null(struct vnode *vp, kauth_cred_t cred, struct lwp *l)
305 {
306 char *bpos, *dpos;
307 int error = 0;
308 struct mbuf *mreq, *mrep, *md, *mb __unused;
309 struct nfsnode *np = VTONFS(vp);
310
311 nfsm_reqhead(np, NFSPROC_NULL, 0);
312 nfsm_request(np, NFSPROC_NULL, l, cred);
313 nfsm_reqdone;
314 return (error);
315 }
316
317 /*
318 * nfs access vnode op.
319 * For nfs version 2, just return ok. File accesses may fail later.
320 * For nfs version 3, use the access rpc to check accessibility. If file modes
321 * are changed on the server, accesses might still fail later.
322 */
323 int
324 nfs_access(void *v)
325 {
326 struct vop_access_args /* {
327 struct vnode *a_vp;
328 accmode_t a_accmode;
329 kauth_cred_t a_cred;
330 } */ *ap = v;
331 struct vnode *vp = ap->a_vp;
332 #ifndef NFS_V2_ONLY
333 u_int32_t *tl;
334 char *cp;
335 int32_t t1, t2;
336 char *bpos, *dpos, *cp2;
337 int error = 0, attrflag;
338 struct mbuf *mreq, *mrep, *md, *mb;
339 u_int32_t mode, rmode;
340 const int v3 = NFS_ISV3(vp);
341 #endif
342 int cachevalid;
343 struct nfsnode *np = VTONFS(vp);
344 struct nfsmount *nmp = VFSTONFS(vp->v_mount);
345
346 cachevalid = (np->n_accstamp != -1 &&
347 (time_uptime - np->n_accstamp) < nfs_attrtimeo(nmp, np) &&
348 np->n_accuid == kauth_cred_geteuid(ap->a_cred));
349
350 /*
351 * Check access cache first. If this request has been made for this
352 * uid shortly before, use the cached result.
353 */
354 if (cachevalid) {
355 if (!np->n_accerror) {
356 if ((np->n_accmode & ap->a_accmode) == ap->a_accmode)
357 return np->n_accerror;
358 } else if ((np->n_accmode & ap->a_accmode) == np->n_accmode)
359 return np->n_accerror;
360 }
361
362 #ifndef NFS_V2_ONLY
363 /*
364 * For nfs v3, do an access rpc, otherwise you are stuck emulating
365 * ufs_access() locally using the vattr. This may not be correct,
366 * since the server may apply other access criteria such as
367 * client uid-->server uid mapping that we do not know about, but
368 * this is better than just returning anything that is lying about
369 * in the cache.
370 */
371 if (v3) {
372 nfsstats.rpccnt[NFSPROC_ACCESS]++;
373 nfsm_reqhead(np, NFSPROC_ACCESS, NFSX_FH(v3) + NFSX_UNSIGNED);
374 nfsm_fhtom(np, v3);
375 nfsm_build(tl, u_int32_t *, NFSX_UNSIGNED);
376 if (ap->a_accmode & VREAD)
377 mode = NFSV3ACCESS_READ;
378 else
379 mode = 0;
380 if (vp->v_type != VDIR) {
381 if (ap->a_accmode & VWRITE)
382 mode |= (NFSV3ACCESS_MODIFY | NFSV3ACCESS_EXTEND);
383 if (ap->a_accmode & VEXEC)
384 mode |= NFSV3ACCESS_EXECUTE;
385 } else {
386 if (ap->a_accmode & VWRITE)
387 mode |= (NFSV3ACCESS_MODIFY | NFSV3ACCESS_EXTEND |
388 NFSV3ACCESS_DELETE);
389 if (ap->a_accmode & VEXEC)
390 mode |= NFSV3ACCESS_LOOKUP;
391 }
392 *tl = txdr_unsigned(mode);
393 nfsm_request(np, NFSPROC_ACCESS, curlwp, ap->a_cred);
394 nfsm_postop_attr(vp, attrflag, 0);
395 if (!error) {
396 nfsm_dissect(tl, u_int32_t *, NFSX_UNSIGNED);
397 rmode = fxdr_unsigned(u_int32_t, *tl);
398 /*
399 * The NFS V3 spec does not clarify whether or not
400 * the returned access bits can be a superset of
401 * the ones requested, so...
402 */
403 if ((rmode & mode) != mode)
404 error = EACCES;
405 }
406 nfsm_reqdone;
407 } else
408 #endif
409 return (nfsspec_access(ap));
410 #ifndef NFS_V2_ONLY
411 /*
412 * Disallow write attempts on filesystems mounted read-only;
413 * unless the file is a socket, fifo, or a block or character
414 * device resident on the filesystem.
415 */
416 if (!error && (ap->a_accmode & VWRITE) &&
417 (vp->v_mount->mnt_flag & MNT_RDONLY)) {
418 switch (vp->v_type) {
419 case VREG:
420 case VDIR:
421 case VLNK:
422 error = EROFS;
423 default:
424 break;
425 }
426 }
427
428 if (!error || error == EACCES) {
429 /*
430 * If we got the same result as for a previous,
431 * different request, OR it in. Don't update
432 * the timestamp in that case.
433 */
434 if (cachevalid && np->n_accstamp != -1 &&
435 error == np->n_accerror) {
436 if (!error)
437 np->n_accmode |= ap->a_accmode;
438 else if ((np->n_accmode & ap->a_accmode) == ap->a_accmode)
439 np->n_accmode = ap->a_accmode;
440 } else {
441 np->n_accstamp = time_uptime;
442 np->n_accuid = kauth_cred_geteuid(ap->a_cred);
443 np->n_accmode = ap->a_accmode;
444 np->n_accerror = error;
445 }
446 }
447
448 return (error);
449 #endif
450 }
451
452 /*
453 * nfs open vnode op
454 * Check to see if the type is ok
455 * and that deletion is not in progress.
456 * For paged in text files, you will need to flush the page cache
457 * if consistency is lost.
458 */
459 /* ARGSUSED */
460 int
461 nfs_open(void *v)
462 {
463 struct vop_open_args /* {
464 struct vnode *a_vp;
465 int a_mode;
466 kauth_cred_t a_cred;
467 } */ *ap = v;
468 struct vnode *vp = ap->a_vp;
469 struct nfsnode *np = VTONFS(vp);
470 int error;
471
472 if (vp->v_type != VREG && vp->v_type != VDIR && vp->v_type != VLNK) {
473 return (EACCES);
474 }
475
476 if (ap->a_mode & FREAD) {
477 if (np->n_rcred != NULL)
478 kauth_cred_free(np->n_rcred);
479 np->n_rcred = ap->a_cred;
480 kauth_cred_hold(np->n_rcred);
481 }
482 if (ap->a_mode & FWRITE) {
483 if (np->n_wcred != NULL)
484 kauth_cred_free(np->n_wcred);
485 np->n_wcred = ap->a_cred;
486 kauth_cred_hold(np->n_wcred);
487 }
488
489 error = nfs_flushstalebuf(vp, ap->a_cred, curlwp, 0);
490 if (error)
491 return error;
492
493 NFS_INVALIDATE_ATTRCACHE(np); /* For Open/Close consistency */
494
495 return (0);
496 }
497
498 /*
499 * nfs close vnode op
500 * What an NFS client should do upon close after writing is a debatable issue.
501 * Most NFS clients push delayed writes to the server upon close, basically for
502 * two reasons:
503 * 1 - So that any write errors may be reported back to the client process
504 * doing the close system call. By far the two most likely errors are
505 * NFSERR_NOSPC and NFSERR_DQUOT to indicate space allocation failure.
506 * 2 - To put a worst case upper bound on cache inconsistency between
507 * multiple clients for the file.
508 * There is also a consistency problem for Version 2 of the protocol w.r.t.
509 * not being able to tell if other clients are writing a file concurrently,
510 * since there is no way of knowing if the changed modify time in the reply
511 * is only due to the write for this client.
512 * (NFS Version 3 provides weak cache consistency data in the reply that
513 * should be sufficient to detect and handle this case.)
514 *
515 * The current code does the following:
516 * for NFS Version 2 - play it safe and flush/invalidate all dirty buffers
517 * for NFS Version 3 - flush dirty buffers to the server but don't invalidate
518 * or commit them (this satisfies 1 and 2 except for the
519 * case where the server crashes after this close but
520 * before the commit RPC, which is felt to be "good
521 * enough". Changing the last argument to nfs_flush() to
522 * a 1 would force a commit operation, if it is felt a
523 * commit is necessary now.
524 */
525 /* ARGSUSED */
526 int
527 nfs_close(void *v)
528 {
529 struct vop_close_args /* {
530 struct vnodeop_desc *a_desc;
531 struct vnode *a_vp;
532 int a_fflag;
533 kauth_cred_t a_cred;
534 } */ *ap = v;
535 struct vnode *vp = ap->a_vp;
536 struct nfsnode *np = VTONFS(vp);
537 int error = 0;
538 UVMHIST_FUNC("nfs_close"); UVMHIST_CALLED(ubchist);
539
540 if (vp->v_type == VREG) {
541 if (np->n_flag & NMODIFIED) {
542 #ifndef NFS_V2_ONLY
543 if (NFS_ISV3(vp)) {
544 error = nfs_flush(vp, ap->a_cred, MNT_WAIT, curlwp, 0);
545 np->n_flag &= ~NMODIFIED;
546 } else
547 #endif
548 error = nfs_vinvalbuf(vp, V_SAVE, ap->a_cred, curlwp, 1);
549 NFS_INVALIDATE_ATTRCACHE(np);
550 }
551 if (np->n_flag & NWRITEERR) {
552 np->n_flag &= ~NWRITEERR;
553 error = np->n_error;
554 }
555 }
556 UVMHIST_LOG(ubchist, "returning %d", error,0,0,0);
557 return (error);
558 }
559
560 /*
561 * nfs getattr call from vfs.
562 */
563 int
564 nfs_getattr(void *v)
565 {
566 struct vop_getattr_args /* {
567 struct vnode *a_vp;
568 struct vattr *a_vap;
569 kauth_cred_t a_cred;
570 } */ *ap = v;
571 struct vnode *vp = ap->a_vp;
572 struct nfsnode *np = VTONFS(vp);
573 char *cp;
574 u_int32_t *tl;
575 int32_t t1, t2;
576 char *bpos, *dpos;
577 int error = 0;
578 struct mbuf *mreq, *mrep, *md, *mb;
579 const int v3 = NFS_ISV3(vp);
580
581 /*
582 * Update local times for special files.
583 */
584 if (np->n_flag & (NACC | NUPD))
585 np->n_flag |= NCHG;
586
587 /*
588 * if we have delayed truncation, do it now.
589 */
590 nfs_delayedtruncate(vp);
591
592 /*
593 * First look in the cache.
594 */
595 if (nfs_getattrcache(vp, ap->a_vap) == 0)
596 return (0);
597 nfsstats.rpccnt[NFSPROC_GETATTR]++;
598 nfsm_reqhead(np, NFSPROC_GETATTR, NFSX_FH(v3));
599 nfsm_fhtom(np, v3);
600 nfsm_request(np, NFSPROC_GETATTR, curlwp, ap->a_cred);
601 if (!error) {
602 nfsm_loadattr(vp, ap->a_vap, 0);
603 if (vp->v_type == VDIR &&
604 ap->a_vap->va_blocksize < NFS_DIRFRAGSIZ)
605 ap->a_vap->va_blocksize = NFS_DIRFRAGSIZ;
606 }
607 nfsm_reqdone;
608 return (error);
609 }
610
611 /*
612 * nfs setattr call.
613 */
614 int
615 nfs_setattr(void *v)
616 {
617 struct vop_setattr_args /* {
618 struct vnodeop_desc *a_desc;
619 struct vnode *a_vp;
620 struct vattr *a_vap;
621 kauth_cred_t a_cred;
622 } */ *ap = v;
623 struct vnode *vp = ap->a_vp;
624 struct nfsnode *np = VTONFS(vp);
625 struct vattr *vap = ap->a_vap;
626 int error = 0;
627 u_quad_t tsize = 0;
628
629 /*
630 * Setting of flags is not supported.
631 */
632 if (vap->va_flags != VNOVAL)
633 return (EOPNOTSUPP);
634
635 /*
636 * Disallow write attempts if the filesystem is mounted read-only.
637 */
638 if ((vap->va_uid != (uid_t)VNOVAL ||
639 vap->va_gid != (gid_t)VNOVAL || vap->va_atime.tv_sec != VNOVAL ||
640 vap->va_mtime.tv_sec != VNOVAL || vap->va_mode != (mode_t)VNOVAL) &&
641 (vp->v_mount->mnt_flag & MNT_RDONLY))
642 return (EROFS);
643 if (vap->va_size != VNOVAL) {
644 if (vap->va_size > VFSTONFS(vp->v_mount)->nm_maxfilesize) {
645 return EFBIG;
646 }
647 switch (vp->v_type) {
648 case VDIR:
649 return (EISDIR);
650 case VCHR:
651 case VBLK:
652 case VSOCK:
653 case VFIFO:
654 if (vap->va_mtime.tv_sec == VNOVAL &&
655 vap->va_atime.tv_sec == VNOVAL &&
656 vap->va_mode == (mode_t)VNOVAL &&
657 vap->va_uid == (uid_t)VNOVAL &&
658 vap->va_gid == (gid_t)VNOVAL)
659 return (0);
660 vap->va_size = VNOVAL;
661 break;
662 default:
663 /*
664 * Disallow write attempts if the filesystem is
665 * mounted read-only.
666 */
667 if (vp->v_mount->mnt_flag & MNT_RDONLY)
668 return (EROFS);
669 genfs_node_wrlock(vp);
670 uvm_vnp_setsize(vp, vap->va_size);
671 tsize = np->n_size;
672 np->n_size = vap->va_size;
673 if (vap->va_size == 0)
674 error = nfs_vinvalbuf(vp, 0,
675 ap->a_cred, curlwp, 1);
676 else
677 error = nfs_vinvalbuf(vp, V_SAVE,
678 ap->a_cred, curlwp, 1);
679 if (error) {
680 uvm_vnp_setsize(vp, tsize);
681 genfs_node_unlock(vp);
682 return (error);
683 }
684 np->n_vattr->va_size = vap->va_size;
685 }
686 } else {
687 /*
688 * flush files before setattr because a later write of
689 * cached data might change timestamps or reset sugid bits
690 */
691 if ((vap->va_mtime.tv_sec != VNOVAL ||
692 vap->va_atime.tv_sec != VNOVAL ||
693 vap->va_mode != VNOVAL) &&
694 vp->v_type == VREG &&
695 (error = nfs_vinvalbuf(vp, V_SAVE, ap->a_cred,
696 curlwp, 1)) == EINTR)
697 return (error);
698 }
699 error = nfs_setattrrpc(vp, vap, ap->a_cred, curlwp);
700 if (vap->va_size != VNOVAL) {
701 if (error) {
702 np->n_size = np->n_vattr->va_size = tsize;
703 uvm_vnp_setsize(vp, np->n_size);
704 }
705 genfs_node_unlock(vp);
706 }
707 VN_KNOTE(vp, NOTE_ATTRIB);
708 return (error);
709 }
710
711 /*
712 * Do an nfs setattr rpc.
713 */
714 int
715 nfs_setattrrpc(struct vnode *vp, struct vattr *vap, kauth_cred_t cred, struct lwp *l)
716 {
717 struct nfsv2_sattr *sp;
718 char *cp;
719 int32_t t1, t2;
720 char *bpos, *dpos;
721 u_int32_t *tl;
722 int error = 0;
723 struct mbuf *mreq, *mrep, *md, *mb;
724 const int v3 = NFS_ISV3(vp);
725 struct nfsnode *np = VTONFS(vp);
726 #ifndef NFS_V2_ONLY
727 int wccflag = NFSV3_WCCRATTR;
728 char *cp2;
729 #endif
730
731 nfsstats.rpccnt[NFSPROC_SETATTR]++;
732 nfsm_reqhead(np, NFSPROC_SETATTR, NFSX_FH(v3) + NFSX_SATTR(v3));
733 nfsm_fhtom(np, v3);
734 #ifndef NFS_V2_ONLY
735 if (v3) {
736 nfsm_v3attrbuild(vap, true);
737 nfsm_build(tl, u_int32_t *, NFSX_UNSIGNED);
738 *tl = nfs_false;
739 } else {
740 #endif
741 nfsm_build(sp, struct nfsv2_sattr *, NFSX_V2SATTR);
742 if (vap->va_mode == (mode_t)VNOVAL)
743 sp->sa_mode = nfs_xdrneg1;
744 else
745 sp->sa_mode = vtonfsv2_mode(vp->v_type, vap->va_mode);
746 if (vap->va_uid == (uid_t)VNOVAL)
747 sp->sa_uid = nfs_xdrneg1;
748 else
749 sp->sa_uid = txdr_unsigned(vap->va_uid);
750 if (vap->va_gid == (gid_t)VNOVAL)
751 sp->sa_gid = nfs_xdrneg1;
752 else
753 sp->sa_gid = txdr_unsigned(vap->va_gid);
754 sp->sa_size = txdr_unsigned(vap->va_size);
755 txdr_nfsv2time(&vap->va_atime, &sp->sa_atime);
756 txdr_nfsv2time(&vap->va_mtime, &sp->sa_mtime);
757 #ifndef NFS_V2_ONLY
758 }
759 #endif
760 nfsm_request(np, NFSPROC_SETATTR, l, cred);
761 #ifndef NFS_V2_ONLY
762 if (v3) {
763 nfsm_wcc_data(vp, wccflag, NAC_NOTRUNC, false);
764 } else
765 #endif
766 nfsm_loadattr(vp, (struct vattr *)0, NAC_NOTRUNC);
767 nfsm_reqdone;
768 return (error);
769 }
770
771 /*
772 * nfs lookup call, one step at a time...
773 * First look in cache
774 * If not found, do the rpc.
775 */
776 int
777 nfs_lookup(void *v)
778 {
779 struct vop_lookup_v2_args /* {
780 struct vnodeop_desc *a_desc;
781 struct vnode *a_dvp;
782 struct vnode **a_vpp;
783 struct componentname *a_cnp;
784 } */ *ap = v;
785 struct componentname *cnp = ap->a_cnp;
786 struct vnode *dvp = ap->a_dvp;
787 struct vnode **vpp = ap->a_vpp;
788 int flags;
789 struct vnode *newvp;
790 u_int32_t *tl;
791 char *cp;
792 int32_t t1, t2;
793 char *bpos, *dpos, *cp2;
794 struct mbuf *mreq, *mrep, *md, *mb;
795 long len;
796 nfsfh_t *fhp;
797 struct nfsnode *np;
798 int cachefound;
799 int error = 0, attrflag, fhsize;
800 const int v3 = NFS_ISV3(dvp);
801
802 flags = cnp->cn_flags;
803
804 *vpp = NULLVP;
805 newvp = NULLVP;
806 if ((flags & ISLASTCN) && (dvp->v_mount->mnt_flag & MNT_RDONLY) &&
807 (cnp->cn_nameiop == DELETE || cnp->cn_nameiop == RENAME))
808 return (EROFS);
809 if (dvp->v_type != VDIR)
810 return (ENOTDIR);
811
812 /*
813 * RFC1813(nfsv3) 3.2 says clients should handle "." by themselves.
814 */
815 if (cnp->cn_namelen == 1 && cnp->cn_nameptr[0] == '.') {
816 error = VOP_ACCESS(dvp, VEXEC, cnp->cn_cred);
817 if (error)
818 return error;
819 if (cnp->cn_nameiop == RENAME && (flags & ISLASTCN))
820 return EISDIR;
821 vref(dvp);
822 *vpp = dvp;
823 return 0;
824 }
825
826 np = VTONFS(dvp);
827
828 /*
829 * Before performing an RPC, check the name cache to see if
830 * the directory/name pair we are looking for is known already.
831 * If the directory/name pair is found in the name cache,
832 * we have to ensure the directory has not changed from
833 * the time the cache entry has been created. If it has,
834 * the cache entry has to be ignored.
835 */
836 cachefound = cache_lookup_raw(dvp, cnp->cn_nameptr, cnp->cn_namelen,
837 cnp->cn_flags, NULL, vpp);
838 KASSERT(dvp != *vpp);
839 KASSERT((cnp->cn_flags & ISWHITEOUT) == 0);
840 if (cachefound) {
841 struct vattr vattr;
842
843 error = VOP_ACCESS(dvp, VEXEC, cnp->cn_cred);
844 if (error != 0) {
845 if (*vpp != NULLVP)
846 vrele(*vpp);
847 *vpp = NULLVP;
848 return error;
849 }
850
851 if (VOP_GETATTR(dvp, &vattr, cnp->cn_cred)
852 || timespeccmp(&vattr.va_mtime,
853 &VTONFS(dvp)->n_nctime, !=)) {
854 if (*vpp != NULLVP) {
855 vrele(*vpp);
856 *vpp = NULLVP;
857 }
858 cache_purge1(dvp, NULL, 0, PURGE_CHILDREN);
859 timespecclear(&np->n_nctime);
860 goto dorpc;
861 }
862
863 if (*vpp == NULLVP) {
864 /* namecache gave us a negative result */
865 error = ENOENT;
866 goto noentry;
867 }
868
869 /*
870 * investigate the vnode returned by cache_lookup_raw.
871 * if it isn't appropriate, do an rpc.
872 */
873 newvp = *vpp;
874 if ((flags & ISDOTDOT) != 0) {
875 VOP_UNLOCK(dvp);
876 }
877 error = vn_lock(newvp, LK_SHARED);
878 if ((flags & ISDOTDOT) != 0) {
879 vn_lock(dvp, LK_EXCLUSIVE | LK_RETRY);
880 }
881 if (error != 0) {
882 /* newvp has been reclaimed. */
883 vrele(newvp);
884 *vpp = NULLVP;
885 goto dorpc;
886 }
887 if (!VOP_GETATTR(newvp, &vattr, cnp->cn_cred)
888 && vattr.va_ctime.tv_sec == VTONFS(newvp)->n_ctime) {
889 nfsstats.lookupcache_hits++;
890 KASSERT(newvp->v_type != VNON);
891 VOP_UNLOCK(newvp);
892 return (0);
893 }
894 cache_purge1(newvp, NULL, 0, PURGE_PARENTS);
895 vput(newvp);
896 *vpp = NULLVP;
897 }
898 dorpc:
899 #if 0
900 /*
901 * because nfsv3 has the same CREATE semantics as ours,
902 * we don't have to perform LOOKUPs beforehand.
903 *
904 * XXX ideally we can do the same for nfsv2 in the case of !O_EXCL.
905 * XXX although we have no way to know if O_EXCL is requested or not.
906 */
907
908 if (v3 && cnp->cn_nameiop == CREATE &&
909 (flags & (ISLASTCN|ISDOTDOT)) == ISLASTCN &&
910 (dvp->v_mount->mnt_flag & MNT_RDONLY) == 0) {
911 return (EJUSTRETURN);
912 }
913 #endif /* 0 */
914
915 error = 0;
916 newvp = NULLVP;
917 nfsstats.lookupcache_misses++;
918 nfsstats.rpccnt[NFSPROC_LOOKUP]++;
919 len = cnp->cn_namelen;
920 nfsm_reqhead(np, NFSPROC_LOOKUP,
921 NFSX_FH(v3) + NFSX_UNSIGNED + nfsm_rndup(len));
922 nfsm_fhtom(np, v3);
923 nfsm_strtom(cnp->cn_nameptr, len, NFS_MAXNAMLEN);
924 nfsm_request(np, NFSPROC_LOOKUP, curlwp, cnp->cn_cred);
925 if (error) {
926 nfsm_postop_attr(dvp, attrflag, 0);
927 m_freem(mrep);
928 goto nfsmout;
929 }
930 nfsm_getfh(fhp, fhsize, v3);
931
932 /*
933 * Handle RENAME case...
934 */
935 if (cnp->cn_nameiop == RENAME && (flags & ISLASTCN)) {
936 if (NFS_CMPFH(np, fhp, fhsize)) {
937 m_freem(mrep);
938 return (EISDIR);
939 }
940 error = nfs_nget(dvp->v_mount, fhp, fhsize, &np);
941 if (error) {
942 m_freem(mrep);
943 return error;
944 }
945 newvp = NFSTOV(np);
946 #ifndef NFS_V2_ONLY
947 if (v3) {
948 nfsm_postop_attr(newvp, attrflag, 0);
949 nfsm_postop_attr(dvp, attrflag, 0);
950 } else
951 #endif
952 nfsm_loadattr(newvp, (struct vattr *)0, 0);
953 *vpp = newvp;
954 m_freem(mrep);
955 goto validate;
956 }
957
958 /*
959 * The postop attr handling is duplicated for each if case,
960 * because it should be done while dvp is locked (unlocking
961 * dvp is different for each case).
962 */
963
964 if (NFS_CMPFH(np, fhp, fhsize)) {
965 /*
966 * As we handle "." lookup locally, this is
967 * a broken server.
968 */
969 m_freem(mrep);
970 return EBADRPC;
971 } else if (flags & ISDOTDOT) {
972 /*
973 * ".." lookup
974 */
975 VOP_UNLOCK(dvp);
976 error = nfs_nget(dvp->v_mount, fhp, fhsize, &np);
977 vn_lock(dvp, LK_EXCLUSIVE | LK_RETRY);
978 if (error) {
979 m_freem(mrep);
980 return error;
981 }
982 newvp = NFSTOV(np);
983
984 #ifndef NFS_V2_ONLY
985 if (v3) {
986 nfsm_postop_attr(newvp, attrflag, 0);
987 nfsm_postop_attr(dvp, attrflag, 0);
988 } else
989 #endif
990 nfsm_loadattr(newvp, (struct vattr *)0, 0);
991 } else {
992 /*
993 * Other lookups.
994 */
995 error = nfs_nget(dvp->v_mount, fhp, fhsize, &np);
996 if (error) {
997 m_freem(mrep);
998 return error;
999 }
1000 newvp = NFSTOV(np);
1001 #ifndef NFS_V2_ONLY
1002 if (v3) {
1003 nfsm_postop_attr(newvp, attrflag, 0);
1004 nfsm_postop_attr(dvp, attrflag, 0);
1005 } else
1006 #endif
1007 nfsm_loadattr(newvp, (struct vattr *)0, 0);
1008 }
1009 if (cnp->cn_nameiop != DELETE || !(flags & ISLASTCN)) {
1010 nfs_cache_enter(dvp, newvp, cnp);
1011 }
1012 *vpp = newvp;
1013 nfsm_reqdone;
1014 if (error) {
1015 /*
1016 * We get here only because of errors returned by
1017 * the RPC. Otherwise we'll have returned above
1018 * (the nfsm_* macros will jump to nfsm_reqdone
1019 * on error).
1020 */
1021 if (error == ENOENT && cnp->cn_nameiop != CREATE) {
1022 nfs_cache_enter(dvp, NULL, cnp);
1023 }
1024 if (newvp != NULLVP) {
1025 if (newvp == dvp) {
1026 vrele(newvp);
1027 } else {
1028 vput(newvp);
1029 }
1030 }
1031 noentry:
1032 if ((cnp->cn_nameiop == CREATE || cnp->cn_nameiop == RENAME) &&
1033 (flags & ISLASTCN) && error == ENOENT) {
1034 if (dvp->v_mount->mnt_flag & MNT_RDONLY) {
1035 error = EROFS;
1036 } else {
1037 error = EJUSTRETURN;
1038 }
1039 }
1040 *vpp = NULL;
1041 return error;
1042 }
1043
1044 validate:
1045 /*
1046 * make sure we have valid type and size.
1047 */
1048
1049 newvp = *vpp;
1050 if (newvp->v_type == VNON) {
1051 struct vattr vattr; /* dummy */
1052
1053 KASSERT(VTONFS(newvp)->n_attrstamp == 0);
1054 error = VOP_GETATTR(newvp, &vattr, cnp->cn_cred);
1055 if (error) {
1056 vput(newvp);
1057 *vpp = NULL;
1058 }
1059 }
1060 if (error)
1061 return error;
1062 if (newvp != dvp)
1063 VOP_UNLOCK(newvp);
1064 return 0;
1065 }
1066
1067 /*
1068 * nfs read call.
1069 * Just call nfs_bioread() to do the work.
1070 */
1071 int
1072 nfs_read(void *v)
1073 {
1074 struct vop_read_args /* {
1075 struct vnode *a_vp;
1076 struct uio *a_uio;
1077 int a_ioflag;
1078 kauth_cred_t a_cred;
1079 } */ *ap = v;
1080 struct vnode *vp = ap->a_vp;
1081
1082 if (vp->v_type != VREG)
1083 return EISDIR;
1084 return (nfs_bioread(vp, ap->a_uio, ap->a_ioflag, ap->a_cred, 0));
1085 }
1086
1087 /*
1088 * nfs readlink call
1089 */
1090 int
1091 nfs_readlink(void *v)
1092 {
1093 struct vop_readlink_args /* {
1094 struct vnode *a_vp;
1095 struct uio *a_uio;
1096 kauth_cred_t a_cred;
1097 } */ *ap = v;
1098 struct vnode *vp = ap->a_vp;
1099 struct nfsnode *np = VTONFS(vp);
1100
1101 if (vp->v_type != VLNK)
1102 return (EPERM);
1103
1104 if (np->n_rcred != NULL) {
1105 kauth_cred_free(np->n_rcred);
1106 }
1107 np->n_rcred = ap->a_cred;
1108 kauth_cred_hold(np->n_rcred);
1109
1110 return (nfs_bioread(vp, ap->a_uio, 0, ap->a_cred, 0));
1111 }
1112
1113 /*
1114 * Do a readlink rpc.
1115 * Called by nfs_doio() from below the buffer cache.
1116 */
1117 int
1118 nfs_readlinkrpc(struct vnode *vp, struct uio *uiop, kauth_cred_t cred)
1119 {
1120 u_int32_t *tl;
1121 char *cp;
1122 int32_t t1, t2;
1123 char *bpos, *dpos, *cp2;
1124 int error = 0;
1125 uint32_t len;
1126 struct mbuf *mreq, *mrep, *md, *mb;
1127 const int v3 = NFS_ISV3(vp);
1128 struct nfsnode *np = VTONFS(vp);
1129 #ifndef NFS_V2_ONLY
1130 int attrflag;
1131 #endif
1132
1133 nfsstats.rpccnt[NFSPROC_READLINK]++;
1134 nfsm_reqhead(np, NFSPROC_READLINK, NFSX_FH(v3));
1135 nfsm_fhtom(np, v3);
1136 nfsm_request(np, NFSPROC_READLINK, curlwp, cred);
1137 #ifndef NFS_V2_ONLY
1138 if (v3)
1139 nfsm_postop_attr(vp, attrflag, 0);
1140 #endif
1141 if (!error) {
1142 #ifndef NFS_V2_ONLY
1143 if (v3) {
1144 nfsm_dissect(tl, uint32_t *, NFSX_UNSIGNED);
1145 len = fxdr_unsigned(uint32_t, *tl);
1146 if (len > NFS_MAXPATHLEN) {
1147 /*
1148 * this pathname is too long for us.
1149 */
1150 m_freem(mrep);
1151 /* Solaris returns EINVAL. should we follow? */
1152 error = ENAMETOOLONG;
1153 goto nfsmout;
1154 }
1155 } else
1156 #endif
1157 {
1158 nfsm_strsiz(len, NFS_MAXPATHLEN);
1159 }
1160 nfsm_mtouio(uiop, len);
1161 }
1162 nfsm_reqdone;
1163 return (error);
1164 }
1165
1166 /*
1167 * nfs read rpc call
1168 * Ditto above
1169 */
1170 int
1171 nfs_readrpc(struct vnode *vp, struct uio *uiop)
1172 {
1173 u_int32_t *tl;
1174 char *cp;
1175 int32_t t1, t2;
1176 char *bpos, *dpos, *cp2;
1177 struct mbuf *mreq, *mrep, *md, *mb;
1178 struct nfsmount *nmp;
1179 int error = 0, len, retlen, tsiz, eof __unused, byte_count;
1180 const int v3 = NFS_ISV3(vp);
1181 struct nfsnode *np = VTONFS(vp);
1182 #ifndef NFS_V2_ONLY
1183 int attrflag;
1184 #endif
1185
1186 #ifndef nolint
1187 eof = 0;
1188 #endif
1189 nmp = VFSTONFS(vp->v_mount);
1190 tsiz = uiop->uio_resid;
1191 if (uiop->uio_offset + tsiz > nmp->nm_maxfilesize)
1192 return (EFBIG);
1193 iostat_busy(nmp->nm_stats);
1194 byte_count = 0; /* count bytes actually transferred */
1195 while (tsiz > 0) {
1196 nfsstats.rpccnt[NFSPROC_READ]++;
1197 len = (tsiz > nmp->nm_rsize) ? nmp->nm_rsize : tsiz;
1198 nfsm_reqhead(np, NFSPROC_READ, NFSX_FH(v3) + NFSX_UNSIGNED * 3);
1199 nfsm_fhtom(np, v3);
1200 nfsm_build(tl, u_int32_t *, NFSX_UNSIGNED * 3);
1201 #ifndef NFS_V2_ONLY
1202 if (v3) {
1203 txdr_hyper(uiop->uio_offset, tl);
1204 *(tl + 2) = txdr_unsigned(len);
1205 } else
1206 #endif
1207 {
1208 *tl++ = txdr_unsigned(uiop->uio_offset);
1209 *tl++ = txdr_unsigned(len);
1210 *tl = 0;
1211 }
1212 nfsm_request(np, NFSPROC_READ, curlwp, np->n_rcred);
1213 #ifndef NFS_V2_ONLY
1214 if (v3) {
1215 nfsm_postop_attr(vp, attrflag, NAC_NOTRUNC);
1216 if (error) {
1217 m_freem(mrep);
1218 goto nfsmout;
1219 }
1220 nfsm_dissect(tl, u_int32_t *, 2 * NFSX_UNSIGNED);
1221 eof = fxdr_unsigned(int, *(tl + 1));
1222 } else
1223 #endif
1224 nfsm_loadattr(vp, (struct vattr *)0, NAC_NOTRUNC);
1225 nfsm_strsiz(retlen, nmp->nm_rsize);
1226 nfsm_mtouio(uiop, retlen);
1227 m_freem(mrep);
1228 tsiz -= retlen;
1229 byte_count += retlen;
1230 #ifndef NFS_V2_ONLY
1231 if (v3) {
1232 if (eof || retlen == 0)
1233 tsiz = 0;
1234 } else
1235 #endif
1236 if (retlen < len)
1237 tsiz = 0;
1238 }
1239 nfsmout:
1240 iostat_unbusy(nmp->nm_stats, byte_count, 1);
1241 return (error);
1242 }
1243
1244 struct nfs_writerpc_context {
1245 kmutex_t nwc_lock;
1246 kcondvar_t nwc_cv;
1247 int nwc_mbufcount;
1248 };
1249
1250 /*
1251 * free mbuf used to refer protected pages while write rpc call.
1252 * called at splvm.
1253 */
1254 static void
1255 nfs_writerpc_extfree(struct mbuf *m, void *tbuf, size_t size, void *arg)
1256 {
1257 struct nfs_writerpc_context *ctx = arg;
1258
1259 KASSERT(m != NULL);
1260 KASSERT(ctx != NULL);
1261 pool_cache_put(mb_cache, m);
1262 mutex_enter(&ctx->nwc_lock);
1263 if (--ctx->nwc_mbufcount == 0) {
1264 cv_signal(&ctx->nwc_cv);
1265 }
1266 mutex_exit(&ctx->nwc_lock);
1267 }
1268
1269 /*
1270 * nfs write call
1271 */
1272 int
1273 nfs_writerpc(struct vnode *vp, struct uio *uiop, int *iomode, bool pageprotected, bool *stalewriteverfp)
1274 {
1275 u_int32_t *tl;
1276 char *cp;
1277 int32_t t1, t2;
1278 char *bpos, *dpos;
1279 struct mbuf *mreq, *mrep, *md, *mb;
1280 struct nfsmount *nmp = VFSTONFS(vp->v_mount);
1281 int error = 0, len, tsiz, wccflag = NFSV3_WCCRATTR;
1282 const int v3 = NFS_ISV3(vp);
1283 int committed = NFSV3WRITE_FILESYNC;
1284 struct nfsnode *np = VTONFS(vp);
1285 struct nfs_writerpc_context ctx;
1286 int byte_count;
1287 size_t origresid;
1288 #ifndef NFS_V2_ONLY
1289 char *cp2;
1290 int rlen, commit;
1291 #endif
1292
1293 if (vp->v_mount->mnt_flag & MNT_RDONLY) {
1294 panic("writerpc readonly vp %p", vp);
1295 }
1296
1297 #ifdef DIAGNOSTIC
1298 if (uiop->uio_iovcnt != 1)
1299 panic("nfs: writerpc iovcnt > 1");
1300 #endif
1301 tsiz = uiop->uio_resid;
1302 if (uiop->uio_offset + tsiz > nmp->nm_maxfilesize)
1303 return EFBIG;
1304
1305 mutex_init(&ctx.nwc_lock, MUTEX_DRIVER, IPL_VM);
1306 cv_init(&ctx.nwc_cv, "nfsmblk");
1307 ctx.nwc_mbufcount = 1;
1308
1309 retry:
1310 origresid = uiop->uio_resid;
1311 KASSERT(origresid == uiop->uio_iov->iov_len);
1312 iostat_busy(nmp->nm_stats);
1313 byte_count = 0; /* count of bytes actually written */
1314 while (tsiz > 0) {
1315 uint32_t datalen; /* data bytes need to be allocated in mbuf */
1316 size_t backup;
1317 bool stalewriteverf = false;
1318
1319 nfsstats.rpccnt[NFSPROC_WRITE]++;
1320 len = uimin(tsiz, nmp->nm_wsize);
1321 datalen = pageprotected ? 0 : nfsm_rndup(len);
1322 nfsm_reqhead(np, NFSPROC_WRITE,
1323 NFSX_FH(v3) + 5 * NFSX_UNSIGNED + datalen);
1324 nfsm_fhtom(np, v3);
1325 #ifndef NFS_V2_ONLY
1326 if (v3) {
1327 nfsm_build(tl, u_int32_t *, 5 * NFSX_UNSIGNED);
1328 txdr_hyper(uiop->uio_offset, tl);
1329 tl += 2;
1330 *tl++ = txdr_unsigned(len);
1331 *tl++ = txdr_unsigned(*iomode);
1332 *tl = txdr_unsigned(len);
1333 } else
1334 #endif
1335 {
1336 u_int32_t x;
1337
1338 nfsm_build(tl, u_int32_t *, 4 * NFSX_UNSIGNED);
1339 /* Set both "begin" and "current" to non-garbage. */
1340 x = txdr_unsigned((u_int32_t)uiop->uio_offset);
1341 *tl++ = x; /* "begin offset" */
1342 *tl++ = x; /* "current offset" */
1343 x = txdr_unsigned(len);
1344 *tl++ = x; /* total to this offset */
1345 *tl = x; /* size of this write */
1346
1347 }
1348 if (pageprotected) {
1349 /*
1350 * since we know pages can't be modified during i/o,
1351 * no need to copy them for us.
1352 */
1353 struct mbuf *m;
1354 struct iovec *iovp = uiop->uio_iov;
1355
1356 m = m_get(M_WAIT, MT_DATA);
1357 MCLAIM(m, &nfs_mowner);
1358 MEXTADD(m, iovp->iov_base, len, M_MBUF,
1359 nfs_writerpc_extfree, &ctx);
1360 m->m_flags |= M_EXT_ROMAP;
1361 m->m_len = len;
1362 mb->m_next = m;
1363 /*
1364 * no need to maintain mb and bpos here
1365 * because no one care them later.
1366 */
1367 #if 0
1368 mb = m;
1369 bpos = mtod(void *, mb) + mb->m_len;
1370 #endif
1371 UIO_ADVANCE(uiop, len);
1372 uiop->uio_offset += len;
1373 mutex_enter(&ctx.nwc_lock);
1374 ctx.nwc_mbufcount++;
1375 mutex_exit(&ctx.nwc_lock);
1376 nfs_zeropad(mb, 0, nfsm_padlen(len));
1377 } else {
1378 nfsm_uiotom(uiop, len);
1379 }
1380 nfsm_request(np, NFSPROC_WRITE, curlwp, np->n_wcred);
1381 #ifndef NFS_V2_ONLY
1382 if (v3) {
1383 wccflag = NFSV3_WCCCHK;
1384 nfsm_wcc_data(vp, wccflag, NAC_NOTRUNC, !error);
1385 if (!error) {
1386 nfsm_dissect(tl, u_int32_t *, 2 * NFSX_UNSIGNED
1387 + NFSX_V3WRITEVERF);
1388 rlen = fxdr_unsigned(int, *tl++);
1389 if (rlen == 0) {
1390 error = NFSERR_IO;
1391 m_freem(mrep);
1392 break;
1393 } else if (rlen < len) {
1394 backup = len - rlen;
1395 UIO_ADVANCE(uiop, -backup);
1396 uiop->uio_offset -= backup;
1397 len = rlen;
1398 }
1399 commit = fxdr_unsigned(int, *tl++);
1400
1401 /*
1402 * Return the lowest committment level
1403 * obtained by any of the RPCs.
1404 */
1405 if (committed == NFSV3WRITE_FILESYNC)
1406 committed = commit;
1407 else if (committed == NFSV3WRITE_DATASYNC &&
1408 commit == NFSV3WRITE_UNSTABLE)
1409 committed = commit;
1410 mutex_enter(&nmp->nm_lock);
1411 if ((nmp->nm_iflag & NFSMNT_HASWRITEVERF) == 0){
1412 memcpy(nmp->nm_writeverf, tl,
1413 NFSX_V3WRITEVERF);
1414 nmp->nm_iflag |= NFSMNT_HASWRITEVERF;
1415 } else if ((nmp->nm_iflag &
1416 NFSMNT_STALEWRITEVERF) ||
1417 memcmp(tl, nmp->nm_writeverf,
1418 NFSX_V3WRITEVERF)) {
1419 memcpy(nmp->nm_writeverf, tl,
1420 NFSX_V3WRITEVERF);
1421 /*
1422 * note NFSMNT_STALEWRITEVERF
1423 * if we're the first thread to
1424 * notice it.
1425 */
1426 if ((nmp->nm_iflag &
1427 NFSMNT_STALEWRITEVERF) == 0) {
1428 stalewriteverf = true;
1429 nmp->nm_iflag |=
1430 NFSMNT_STALEWRITEVERF;
1431 }
1432 }
1433 mutex_exit(&nmp->nm_lock);
1434 }
1435 } else
1436 #endif
1437 nfsm_loadattr(vp, (struct vattr *)0, NAC_NOTRUNC);
1438 if (wccflag)
1439 VTONFS(vp)->n_mtime = VTONFS(vp)->n_vattr->va_mtime;
1440 m_freem(mrep);
1441 if (error)
1442 break;
1443 tsiz -= len;
1444 byte_count += len;
1445 if (stalewriteverf) {
1446 *stalewriteverfp = true;
1447 stalewriteverf = false;
1448 if (committed == NFSV3WRITE_UNSTABLE &&
1449 len != origresid) {
1450 /*
1451 * if our write requests weren't atomic but
1452 * unstable, datas in previous iterations
1453 * might have already been lost now.
1454 * then, we should resend them to nfsd.
1455 */
1456 backup = origresid - tsiz;
1457 UIO_ADVANCE(uiop, -backup);
1458 uiop->uio_offset -= backup;
1459 tsiz = origresid;
1460 goto retry;
1461 }
1462 }
1463 }
1464 nfsmout:
1465 iostat_unbusy(nmp->nm_stats, byte_count, 0);
1466 if (pageprotected) {
1467 /*
1468 * wait until mbufs go away.
1469 * retransmitted mbufs can survive longer than rpc requests
1470 * themselves.
1471 */
1472 mutex_enter(&ctx.nwc_lock);
1473 ctx.nwc_mbufcount--;
1474 while (ctx.nwc_mbufcount > 0) {
1475 cv_wait(&ctx.nwc_cv, &ctx.nwc_lock);
1476 }
1477 mutex_exit(&ctx.nwc_lock);
1478 }
1479 mutex_destroy(&ctx.nwc_lock);
1480 cv_destroy(&ctx.nwc_cv);
1481 *iomode = committed;
1482 if (error)
1483 uiop->uio_resid = tsiz;
1484 return (error);
1485 }
1486
1487 /*
1488 * nfs mknod rpc
1489 * For NFS v2 this is a kludge. Use a create rpc but with the IFMT bits of the
1490 * mode set to specify the file type and the size field for rdev.
1491 */
1492 int
1493 nfs_mknodrpc(struct vnode *dvp, struct vnode **vpp, struct componentname *cnp, struct vattr *vap)
1494 {
1495 struct nfsv2_sattr *sp;
1496 u_int32_t *tl;
1497 char *cp;
1498 int32_t t1, t2;
1499 struct vnode *newvp = (struct vnode *)0;
1500 struct nfsnode *dnp, *np;
1501 char *cp2;
1502 char *bpos, *dpos;
1503 int error = 0, wccflag = NFSV3_WCCRATTR, gotvp = 0;
1504 struct mbuf *mreq, *mrep, *md, *mb;
1505 u_int32_t rdev;
1506 const int v3 = NFS_ISV3(dvp);
1507
1508 if (vap->va_type == VCHR || vap->va_type == VBLK)
1509 rdev = txdr_unsigned(vap->va_rdev);
1510 else if (vap->va_type == VFIFO || vap->va_type == VSOCK)
1511 rdev = nfs_xdrneg1;
1512 else {
1513 VOP_ABORTOP(dvp, cnp);
1514 return (EOPNOTSUPP);
1515 }
1516 nfsstats.rpccnt[NFSPROC_MKNOD]++;
1517 dnp = VTONFS(dvp);
1518 nfsm_reqhead(dnp, NFSPROC_MKNOD, NFSX_FH(v3) + 4 * NFSX_UNSIGNED +
1519 + nfsm_rndup(cnp->cn_namelen) + NFSX_SATTR(v3));
1520 nfsm_fhtom(dnp, v3);
1521 nfsm_strtom(cnp->cn_nameptr, cnp->cn_namelen, NFS_MAXNAMLEN);
1522 #ifndef NFS_V2_ONLY
1523 if (v3) {
1524 nfsm_build(tl, u_int32_t *, NFSX_UNSIGNED);
1525 *tl++ = vtonfsv3_type(vap->va_type);
1526 nfsm_v3attrbuild(vap, false);
1527 if (vap->va_type == VCHR || vap->va_type == VBLK) {
1528 nfsm_build(tl, u_int32_t *, 2 * NFSX_UNSIGNED);
1529 *tl++ = txdr_unsigned(major(vap->va_rdev));
1530 *tl = txdr_unsigned(minor(vap->va_rdev));
1531 }
1532 } else
1533 #endif
1534 {
1535 nfsm_build(sp, struct nfsv2_sattr *, NFSX_V2SATTR);
1536 sp->sa_mode = vtonfsv2_mode(vap->va_type, vap->va_mode);
1537 sp->sa_uid = nfs_xdrneg1;
1538 sp->sa_gid = nfs_xdrneg1;
1539 sp->sa_size = rdev;
1540 txdr_nfsv2time(&vap->va_atime, &sp->sa_atime);
1541 txdr_nfsv2time(&vap->va_mtime, &sp->sa_mtime);
1542 }
1543 nfsm_request(dnp, NFSPROC_MKNOD, curlwp, cnp->cn_cred);
1544 if (!error) {
1545 nfsm_mtofh(dvp, newvp, v3, gotvp);
1546 if (!gotvp) {
1547 error = nfs_lookitup(dvp, cnp->cn_nameptr,
1548 cnp->cn_namelen, cnp->cn_cred, curlwp, &np);
1549 if (!error)
1550 newvp = NFSTOV(np);
1551 }
1552 }
1553 #ifndef NFS_V2_ONLY
1554 if (v3)
1555 nfsm_wcc_data(dvp, wccflag, 0, !error);
1556 #endif
1557 nfsm_reqdone;
1558 if (error) {
1559 if (newvp)
1560 vput(newvp);
1561 } else {
1562 nfs_cache_enter(dvp, newvp, cnp);
1563 *vpp = newvp;
1564 VOP_UNLOCK(newvp);
1565 }
1566 VTONFS(dvp)->n_flag |= NMODIFIED;
1567 if (!wccflag)
1568 NFS_INVALIDATE_ATTRCACHE(VTONFS(dvp));
1569 return (error);
1570 }
1571
1572 /*
1573 * nfs mknod vop
1574 * just call nfs_mknodrpc() to do the work.
1575 */
1576 /* ARGSUSED */
1577 int
1578 nfs_mknod(void *v)
1579 {
1580 struct vop_mknod_v3_args /* {
1581 struct vnode *a_dvp;
1582 struct vnode **a_vpp;
1583 struct componentname *a_cnp;
1584 struct vattr *a_vap;
1585 } */ *ap = v;
1586 struct vnode *dvp = ap->a_dvp;
1587 struct componentname *cnp = ap->a_cnp;
1588 int error;
1589
1590 error = nfs_mknodrpc(dvp, ap->a_vpp, cnp, ap->a_vap);
1591 VN_KNOTE(dvp, NOTE_WRITE);
1592 if (error == 0 || error == EEXIST)
1593 cache_purge1(dvp, cnp->cn_nameptr, cnp->cn_namelen, 0);
1594 return (error);
1595 }
1596
1597 /*
1598 * nfs file create call
1599 */
1600 int
1601 nfs_create(void *v)
1602 {
1603 struct vop_create_v3_args /* {
1604 struct vnode *a_dvp;
1605 struct vnode **a_vpp;
1606 struct componentname *a_cnp;
1607 struct vattr *a_vap;
1608 } */ *ap = v;
1609 struct vnode *dvp = ap->a_dvp;
1610 struct vattr *vap = ap->a_vap;
1611 struct componentname *cnp = ap->a_cnp;
1612 struct nfsv2_sattr *sp;
1613 u_int32_t *tl;
1614 char *cp;
1615 int32_t t1, t2;
1616 struct nfsnode *dnp, *np = (struct nfsnode *)0;
1617 struct vnode *newvp = (struct vnode *)0;
1618 char *bpos, *dpos, *cp2;
1619 int error, wccflag = NFSV3_WCCRATTR, gotvp = 0;
1620 struct mbuf *mreq, *mrep, *md, *mb;
1621 const int v3 = NFS_ISV3(dvp);
1622 u_int32_t excl_mode = NFSV3CREATE_UNCHECKED;
1623
1624 /*
1625 * Oops, not for me..
1626 */
1627 if (vap->va_type == VSOCK)
1628 return (nfs_mknodrpc(dvp, ap->a_vpp, cnp, vap));
1629
1630 KASSERT(vap->va_type == VREG);
1631
1632 #ifdef VA_EXCLUSIVE
1633 if (vap->va_vaflags & VA_EXCLUSIVE) {
1634 excl_mode = NFSV3CREATE_EXCLUSIVE;
1635 }
1636 #endif
1637 again:
1638 error = 0;
1639 nfsstats.rpccnt[NFSPROC_CREATE]++;
1640 dnp = VTONFS(dvp);
1641 nfsm_reqhead(dnp, NFSPROC_CREATE, NFSX_FH(v3) + 2 * NFSX_UNSIGNED +
1642 nfsm_rndup(cnp->cn_namelen) + NFSX_SATTR(v3));
1643 nfsm_fhtom(dnp, v3);
1644 nfsm_strtom(cnp->cn_nameptr, cnp->cn_namelen, NFS_MAXNAMLEN);
1645 #ifndef NFS_V2_ONLY
1646 if (v3) {
1647 nfsm_build(tl, u_int32_t *, NFSX_UNSIGNED);
1648 if (excl_mode == NFSV3CREATE_EXCLUSIVE) {
1649 *tl = txdr_unsigned(NFSV3CREATE_EXCLUSIVE);
1650 nfsm_build(tl, u_int32_t *, NFSX_V3CREATEVERF);
1651 *tl++ = cprng_fast32();
1652 *tl = cprng_fast32();
1653 } else {
1654 *tl = txdr_unsigned(excl_mode);
1655 nfsm_v3attrbuild(vap, false);
1656 }
1657 } else
1658 #endif
1659 {
1660 nfsm_build(sp, struct nfsv2_sattr *, NFSX_V2SATTR);
1661 sp->sa_mode = vtonfsv2_mode(vap->va_type, vap->va_mode);
1662 sp->sa_uid = nfs_xdrneg1;
1663 sp->sa_gid = nfs_xdrneg1;
1664 sp->sa_size = 0;
1665 txdr_nfsv2time(&vap->va_atime, &sp->sa_atime);
1666 txdr_nfsv2time(&vap->va_mtime, &sp->sa_mtime);
1667 }
1668 nfsm_request(dnp, NFSPROC_CREATE, curlwp, cnp->cn_cred);
1669 if (!error) {
1670 nfsm_mtofh(dvp, newvp, v3, gotvp);
1671 if (!gotvp) {
1672 error = nfs_lookitup(dvp, cnp->cn_nameptr,
1673 cnp->cn_namelen, cnp->cn_cred, curlwp, &np);
1674 if (!error)
1675 newvp = NFSTOV(np);
1676 }
1677 }
1678 #ifndef NFS_V2_ONLY
1679 if (v3)
1680 nfsm_wcc_data(dvp, wccflag, 0, !error);
1681 #endif
1682 nfsm_reqdone;
1683 if (error) {
1684 /*
1685 * nfs_request maps NFSERR_NOTSUPP to ENOTSUP.
1686 */
1687 if (v3 && error == ENOTSUP) {
1688 if (excl_mode == NFSV3CREATE_EXCLUSIVE) {
1689 excl_mode = NFSV3CREATE_GUARDED;
1690 goto again;
1691 } else if (excl_mode == NFSV3CREATE_GUARDED) {
1692 excl_mode = NFSV3CREATE_UNCHECKED;
1693 goto again;
1694 }
1695 }
1696 } else if (v3 && (excl_mode == NFSV3CREATE_EXCLUSIVE)) {
1697 struct timespec ts;
1698
1699 getnanotime(&ts);
1700
1701 /*
1702 * make sure that we'll update timestamps as
1703 * most server implementations use them to store
1704 * the create verifier.
1705 *
1706 * XXX it's better to use TOSERVER always.
1707 */
1708
1709 if (vap->va_atime.tv_sec == VNOVAL)
1710 vap->va_atime = ts;
1711 if (vap->va_mtime.tv_sec == VNOVAL)
1712 vap->va_mtime = ts;
1713
1714 error = nfs_setattrrpc(newvp, vap, cnp->cn_cred, curlwp);
1715 }
1716 if (error == 0) {
1717 if (cnp->cn_flags & MAKEENTRY)
1718 nfs_cache_enter(dvp, newvp, cnp);
1719 else
1720 cache_purge1(dvp, cnp->cn_nameptr, cnp->cn_namelen, 0);
1721 *ap->a_vpp = newvp;
1722 VOP_UNLOCK(newvp);
1723 } else {
1724 if (newvp)
1725 vput(newvp);
1726 if (error == EEXIST)
1727 cache_purge1(dvp, cnp->cn_nameptr, cnp->cn_namelen, 0);
1728 }
1729 VTONFS(dvp)->n_flag |= NMODIFIED;
1730 if (!wccflag)
1731 NFS_INVALIDATE_ATTRCACHE(VTONFS(dvp));
1732 VN_KNOTE(ap->a_dvp, NOTE_WRITE);
1733 return (error);
1734 }
1735
1736 /*
1737 * nfs file remove call
1738 * To try and make nfs semantics closer to ufs semantics, a file that has
1739 * other processes using the vnode is renamed instead of removed and then
1740 * removed later on the last close.
1741 * - If vrefcnt(vp) > 1
1742 * If a rename is not already in the works
1743 * call nfs_sillyrename() to set it up
1744 * else
1745 * do the remove rpc
1746 */
1747 int
1748 nfs_remove(void *v)
1749 {
1750 struct vop_remove_v2_args /* {
1751 struct vnodeop_desc *a_desc;
1752 struct vnode * a_dvp;
1753 struct vnode * a_vp;
1754 struct componentname * a_cnp;
1755 } */ *ap = v;
1756 struct vnode *vp = ap->a_vp;
1757 struct vnode *dvp = ap->a_dvp;
1758 struct componentname *cnp = ap->a_cnp;
1759 struct nfsnode *np = VTONFS(vp);
1760 int error = 0;
1761 struct vattr vattr;
1762
1763 #ifndef DIAGNOSTIC
1764 if (vrefcnt(vp) < 1)
1765 panic("nfs_remove: bad vrefcnt(vp)");
1766 #endif
1767 if (vp->v_type == VDIR)
1768 error = EPERM;
1769 else if (vrefcnt(vp) == 1 || (np->n_sillyrename &&
1770 VOP_GETATTR(vp, &vattr, cnp->cn_cred) == 0 &&
1771 vattr.va_nlink > 1)) {
1772 /*
1773 * Purge the name cache so that the chance of a lookup for
1774 * the name succeeding while the remove is in progress is
1775 * minimized. Without node locking it can still happen, such
1776 * that an I/O op returns ESTALE, but since you get this if
1777 * another host removes the file..
1778 */
1779 cache_purge(vp);
1780 /*
1781 * throw away biocache buffers, mainly to avoid
1782 * unnecessary delayed writes later.
1783 */
1784 error = nfs_vinvalbuf(vp, 0, cnp->cn_cred, curlwp, 1);
1785 /* Do the rpc */
1786 if (error != EINTR)
1787 error = nfs_removerpc(dvp, cnp->cn_nameptr,
1788 cnp->cn_namelen, cnp->cn_cred, curlwp);
1789 } else if (!np->n_sillyrename)
1790 error = nfs_sillyrename(dvp, vp, cnp, false);
1791 if (!error && nfs_getattrcache(vp, &vattr) == 0 &&
1792 vattr.va_nlink == 1) {
1793 np->n_flag |= NREMOVED;
1794 }
1795 NFS_INVALIDATE_ATTRCACHE(np);
1796 VN_KNOTE(vp, NOTE_DELETE);
1797 VN_KNOTE(dvp, NOTE_WRITE);
1798 if (dvp == vp)
1799 vrele(vp);
1800 else
1801 vput(vp);
1802 return (error);
1803 }
1804
1805 /*
1806 * nfs file remove rpc called from nfs_inactive
1807 */
1808 int
1809 nfs_removeit(struct sillyrename *sp)
1810 {
1811
1812 return (nfs_removerpc(sp->s_dvp, sp->s_name, sp->s_namlen, sp->s_cred,
1813 (struct lwp *)0));
1814 }
1815
1816 /*
1817 * Nfs remove rpc, called from nfs_remove() and nfs_removeit().
1818 */
1819 int
1820 nfs_removerpc(struct vnode *dvp, const char *name, int namelen, kauth_cred_t cred, struct lwp *l)
1821 {
1822 u_int32_t *tl;
1823 char *cp;
1824 #ifndef NFS_V2_ONLY
1825 int32_t t1;
1826 char *cp2;
1827 #endif
1828 int32_t t2;
1829 char *bpos, *dpos;
1830 int error = 0, wccflag = NFSV3_WCCRATTR;
1831 struct mbuf *mreq, *mrep, *md, *mb;
1832 const int v3 = NFS_ISV3(dvp);
1833 int rexmit = 0;
1834 struct nfsnode *dnp = VTONFS(dvp);
1835
1836 nfsstats.rpccnt[NFSPROC_REMOVE]++;
1837 nfsm_reqhead(dnp, NFSPROC_REMOVE,
1838 NFSX_FH(v3) + NFSX_UNSIGNED + nfsm_rndup(namelen));
1839 nfsm_fhtom(dnp, v3);
1840 nfsm_strtom(name, namelen, NFS_MAXNAMLEN);
1841 nfsm_request1(dnp, NFSPROC_REMOVE, l, cred, &rexmit);
1842 #ifndef NFS_V2_ONLY
1843 if (v3)
1844 nfsm_wcc_data(dvp, wccflag, 0, !error);
1845 #endif
1846 nfsm_reqdone;
1847 VTONFS(dvp)->n_flag |= NMODIFIED;
1848 if (!wccflag)
1849 NFS_INVALIDATE_ATTRCACHE(VTONFS(dvp));
1850 /*
1851 * Kludge City: If the first reply to the remove rpc is lost..
1852 * the reply to the retransmitted request will be ENOENT
1853 * since the file was in fact removed
1854 * Therefore, we cheat and return success.
1855 */
1856 if (rexmit && error == ENOENT)
1857 error = 0;
1858 return (error);
1859 }
1860
1861 /*
1862 * nfs file rename call
1863 */
1864 int
1865 nfs_rename(void *v)
1866 {
1867 struct vop_rename_args /* {
1868 struct vnode *a_fdvp;
1869 struct vnode *a_fvp;
1870 struct componentname *a_fcnp;
1871 struct vnode *a_tdvp;
1872 struct vnode *a_tvp;
1873 struct componentname *a_tcnp;
1874 } */ *ap = v;
1875 struct vnode *fvp = ap->a_fvp;
1876 struct vnode *tvp = ap->a_tvp;
1877 struct vnode *fdvp = ap->a_fdvp;
1878 struct vnode *tdvp = ap->a_tdvp;
1879 struct componentname *tcnp = ap->a_tcnp;
1880 struct componentname *fcnp = ap->a_fcnp;
1881 int error;
1882
1883 /* Check for cross-device rename */
1884 if ((fvp->v_mount != tdvp->v_mount) ||
1885 (tvp && (fvp->v_mount != tvp->v_mount))) {
1886 error = EXDEV;
1887 goto out;
1888 }
1889
1890 /*
1891 * If the tvp exists and is in use, sillyrename it before doing the
1892 * rename of the new file over it.
1893 *
1894 * Have sillyrename use link instead of rename if possible,
1895 * so that we don't lose the file if the rename fails, and so
1896 * that there's no window when the "to" file doesn't exist.
1897 */
1898 if (tvp && vrefcnt(tvp) > 1 && !VTONFS(tvp)->n_sillyrename &&
1899 tvp->v_type != VDIR && !nfs_sillyrename(tdvp, tvp, tcnp, true)) {
1900 VN_KNOTE(tvp, NOTE_DELETE);
1901 vput(tvp);
1902 tvp = NULL;
1903 }
1904
1905 error = nfs_renamerpc(fdvp, fcnp->cn_nameptr, fcnp->cn_namelen,
1906 tdvp, tcnp->cn_nameptr, tcnp->cn_namelen, tcnp->cn_cred,
1907 curlwp);
1908
1909 VN_KNOTE(fdvp, NOTE_WRITE);
1910 VN_KNOTE(tdvp, NOTE_WRITE);
1911 if (error == 0 || error == EEXIST) {
1912 if (fvp->v_type == VDIR)
1913 cache_purge(fvp);
1914 else
1915 cache_purge1(fdvp, fcnp->cn_nameptr, fcnp->cn_namelen,
1916 0);
1917 if (tvp != NULL && tvp->v_type == VDIR)
1918 cache_purge(tvp);
1919 else
1920 cache_purge1(tdvp, tcnp->cn_nameptr, tcnp->cn_namelen,
1921 0);
1922 }
1923 out:
1924 if (tdvp == tvp)
1925 vrele(tdvp);
1926 else
1927 vput(tdvp);
1928 if (tvp)
1929 vput(tvp);
1930 vrele(fdvp);
1931 vrele(fvp);
1932 return (error);
1933 }
1934
1935 /*
1936 * nfs file rename rpc called from nfs_remove() above
1937 */
1938 int
1939 nfs_renameit(struct vnode *sdvp, struct componentname *scnp, struct sillyrename *sp)
1940 {
1941 return (nfs_renamerpc(sdvp, scnp->cn_nameptr, scnp->cn_namelen,
1942 sdvp, sp->s_name, sp->s_namlen, scnp->cn_cred, curlwp));
1943 }
1944
1945 /*
1946 * Do an nfs rename rpc. Called from nfs_rename() and nfs_renameit().
1947 */
1948 int
1949 nfs_renamerpc(struct vnode *fdvp, const char *fnameptr, int fnamelen, struct vnode *tdvp, const char *tnameptr, int tnamelen, kauth_cred_t cred, struct lwp *l)
1950 {
1951 u_int32_t *tl;
1952 char *cp;
1953 #ifndef NFS_V2_ONLY
1954 int32_t t1;
1955 char *cp2;
1956 #endif
1957 int32_t t2;
1958 char *bpos, *dpos;
1959 int error = 0, fwccflag = NFSV3_WCCRATTR, twccflag = NFSV3_WCCRATTR;
1960 struct mbuf *mreq, *mrep, *md, *mb;
1961 const int v3 = NFS_ISV3(fdvp);
1962 int rexmit = 0;
1963 struct nfsnode *fdnp = VTONFS(fdvp);
1964
1965 nfsstats.rpccnt[NFSPROC_RENAME]++;
1966 nfsm_reqhead(fdnp, NFSPROC_RENAME,
1967 (NFSX_FH(v3) + NFSX_UNSIGNED)*2 + nfsm_rndup(fnamelen) +
1968 nfsm_rndup(tnamelen));
1969 nfsm_fhtom(fdnp, v3);
1970 nfsm_strtom(fnameptr, fnamelen, NFS_MAXNAMLEN);
1971 nfsm_fhtom(VTONFS(tdvp), v3);
1972 nfsm_strtom(tnameptr, tnamelen, NFS_MAXNAMLEN);
1973 nfsm_request1(fdnp, NFSPROC_RENAME, l, cred, &rexmit);
1974 #ifndef NFS_V2_ONLY
1975 if (v3) {
1976 nfsm_wcc_data(fdvp, fwccflag, 0, !error);
1977 nfsm_wcc_data(tdvp, twccflag, 0, !error);
1978 }
1979 #endif
1980 nfsm_reqdone;
1981 VTONFS(fdvp)->n_flag |= NMODIFIED;
1982 VTONFS(tdvp)->n_flag |= NMODIFIED;
1983 if (!fwccflag)
1984 NFS_INVALIDATE_ATTRCACHE(VTONFS(fdvp));
1985 if (!twccflag)
1986 NFS_INVALIDATE_ATTRCACHE(VTONFS(tdvp));
1987 /*
1988 * Kludge: Map ENOENT => 0 assuming that it is a reply to a retry.
1989 */
1990 if (rexmit && error == ENOENT)
1991 error = 0;
1992 return (error);
1993 }
1994
1995 /*
1996 * NFS link RPC, called from nfs_link.
1997 * Assumes dvp and vp locked, and leaves them that way.
1998 */
1999
2000 static int
2001 nfs_linkrpc(struct vnode *dvp, struct vnode *vp, const char *name,
2002 size_t namelen, kauth_cred_t cred, struct lwp *l)
2003 {
2004 u_int32_t *tl;
2005 char *cp;
2006 #ifndef NFS_V2_ONLY
2007 int32_t t1;
2008 char *cp2;
2009 #endif
2010 int32_t t2;
2011 char *bpos, *dpos;
2012 int error = 0, wccflag = NFSV3_WCCRATTR, attrflag = 0;
2013 struct mbuf *mreq, *mrep, *md, *mb;
2014 const int v3 = NFS_ISV3(dvp);
2015 int rexmit = 0;
2016 struct nfsnode *np = VTONFS(vp);
2017
2018 nfsstats.rpccnt[NFSPROC_LINK]++;
2019 nfsm_reqhead(np, NFSPROC_LINK,
2020 NFSX_FH(v3)*2 + NFSX_UNSIGNED + nfsm_rndup(namelen));
2021 nfsm_fhtom(np, v3);
2022 nfsm_fhtom(VTONFS(dvp), v3);
2023 nfsm_strtom(name, namelen, NFS_MAXNAMLEN);
2024 nfsm_request1(np, NFSPROC_LINK, l, cred, &rexmit);
2025 #ifndef NFS_V2_ONLY
2026 if (v3) {
2027 nfsm_postop_attr(vp, attrflag, 0);
2028 nfsm_wcc_data(dvp, wccflag, 0, !error);
2029 }
2030 #endif
2031 nfsm_reqdone;
2032
2033 VTONFS(dvp)->n_flag |= NMODIFIED;
2034 if (!attrflag)
2035 NFS_INVALIDATE_ATTRCACHE(VTONFS(vp));
2036 if (!wccflag)
2037 NFS_INVALIDATE_ATTRCACHE(VTONFS(dvp));
2038
2039 /*
2040 * Kludge: Map EEXIST => 0 assuming that it is a reply to a retry.
2041 */
2042 if (rexmit && error == EEXIST)
2043 error = 0;
2044
2045 return error;
2046 }
2047
2048 /*
2049 * nfs hard link create call
2050 */
2051 int
2052 nfs_link(void *v)
2053 {
2054 struct vop_link_v2_args /* {
2055 struct vnode *a_dvp;
2056 struct vnode *a_vp;
2057 struct componentname *a_cnp;
2058 } */ *ap = v;
2059 struct vnode *vp = ap->a_vp;
2060 struct vnode *dvp = ap->a_dvp;
2061 struct componentname *cnp = ap->a_cnp;
2062 int error = 0;
2063
2064 error = vn_lock(vp, LK_EXCLUSIVE);
2065 if (error != 0) {
2066 VOP_ABORTOP(dvp, cnp);
2067 return error;
2068 }
2069
2070 /*
2071 * Push all writes to the server, so that the attribute cache
2072 * doesn't get "out of sync" with the server.
2073 * XXX There should be a better way!
2074 */
2075 VOP_FSYNC(vp, cnp->cn_cred, FSYNC_WAIT, 0, 0);
2076
2077 error = nfs_linkrpc(dvp, vp, cnp->cn_nameptr, cnp->cn_namelen,
2078 cnp->cn_cred, curlwp);
2079
2080 if (error == 0) {
2081 cache_purge1(dvp, cnp->cn_nameptr, cnp->cn_namelen, 0);
2082 }
2083 VOP_UNLOCK(vp);
2084 VN_KNOTE(vp, NOTE_LINK);
2085 VN_KNOTE(dvp, NOTE_WRITE);
2086 return (error);
2087 }
2088
2089 /*
2090 * nfs symbolic link create call
2091 */
2092 int
2093 nfs_symlink(void *v)
2094 {
2095 struct vop_symlink_v3_args /* {
2096 struct vnode *a_dvp;
2097 struct vnode **a_vpp;
2098 struct componentname *a_cnp;
2099 struct vattr *a_vap;
2100 char *a_target;
2101 } */ *ap = v;
2102 struct vnode *dvp = ap->a_dvp;
2103 struct vattr *vap = ap->a_vap;
2104 struct componentname *cnp = ap->a_cnp;
2105 struct nfsv2_sattr *sp;
2106 u_int32_t *tl;
2107 char *cp;
2108 int32_t t1, t2;
2109 char *bpos, *dpos, *cp2;
2110 int slen, error = 0, wccflag = NFSV3_WCCRATTR, gotvp;
2111 struct mbuf *mreq, *mrep, *md, *mb;
2112 struct vnode *newvp = (struct vnode *)0;
2113 const int v3 = NFS_ISV3(dvp);
2114 int rexmit = 0;
2115 struct nfsnode *dnp = VTONFS(dvp);
2116
2117 *ap->a_vpp = NULL;
2118 nfsstats.rpccnt[NFSPROC_SYMLINK]++;
2119 slen = strlen(ap->a_target);
2120 nfsm_reqhead(dnp, NFSPROC_SYMLINK, NFSX_FH(v3) + 2*NFSX_UNSIGNED +
2121 nfsm_rndup(cnp->cn_namelen) + nfsm_rndup(slen) + NFSX_SATTR(v3));
2122 nfsm_fhtom(dnp, v3);
2123 nfsm_strtom(cnp->cn_nameptr, cnp->cn_namelen, NFS_MAXNAMLEN);
2124 #ifndef NFS_V2_ONlY
2125 if (v3)
2126 nfsm_v3attrbuild(vap, false);
2127 #endif
2128 nfsm_strtom(ap->a_target, slen, NFS_MAXPATHLEN);
2129 #ifndef NFS_V2_ONlY
2130 if (!v3) {
2131 nfsm_build(sp, struct nfsv2_sattr *, NFSX_V2SATTR);
2132 sp->sa_mode = vtonfsv2_mode(VLNK, vap->va_mode);
2133 sp->sa_uid = nfs_xdrneg1;
2134 sp->sa_gid = nfs_xdrneg1;
2135 sp->sa_size = nfs_xdrneg1;
2136 txdr_nfsv2time(&vap->va_atime, &sp->sa_atime);
2137 txdr_nfsv2time(&vap->va_mtime, &sp->sa_mtime);
2138 }
2139 #endif
2140 nfsm_request1(dnp, NFSPROC_SYMLINK, curlwp, cnp->cn_cred,
2141 &rexmit);
2142 #ifndef NFS_V2_ONlY
2143 if (v3) {
2144 if (!error)
2145 nfsm_mtofh(dvp, newvp, v3, gotvp);
2146 nfsm_wcc_data(dvp, wccflag, 0, !error);
2147 }
2148 #endif
2149 nfsm_reqdone;
2150 /*
2151 * Kludge: Map EEXIST => 0 assuming that it is a reply to a retry.
2152 */
2153 if (rexmit && error == EEXIST)
2154 error = 0;
2155 if (error == 0 || error == EEXIST)
2156 cache_purge1(dvp, cnp->cn_nameptr, cnp->cn_namelen, 0);
2157 if (error == 0 && newvp == NULL) {
2158 struct nfsnode *np = NULL;
2159
2160 error = nfs_lookitup(dvp, cnp->cn_nameptr, cnp->cn_namelen,
2161 cnp->cn_cred, curlwp, &np);
2162 if (error == 0)
2163 newvp = NFSTOV(np);
2164 }
2165 if (error) {
2166 if (newvp != NULL)
2167 vput(newvp);
2168 } else {
2169 *ap->a_vpp = newvp;
2170 VOP_UNLOCK(newvp);
2171 }
2172 VTONFS(dvp)->n_flag |= NMODIFIED;
2173 if (!wccflag)
2174 NFS_INVALIDATE_ATTRCACHE(VTONFS(dvp));
2175 VN_KNOTE(dvp, NOTE_WRITE);
2176 return (error);
2177 }
2178
2179 /*
2180 * nfs make dir call
2181 */
2182 int
2183 nfs_mkdir(void *v)
2184 {
2185 struct vop_mkdir_v3_args /* {
2186 struct vnode *a_dvp;
2187 struct vnode **a_vpp;
2188 struct componentname *a_cnp;
2189 struct vattr *a_vap;
2190 } */ *ap = v;
2191 struct vnode *dvp = ap->a_dvp;
2192 struct vattr *vap = ap->a_vap;
2193 struct componentname *cnp = ap->a_cnp;
2194 struct nfsv2_sattr *sp;
2195 u_int32_t *tl;
2196 char *cp;
2197 int32_t t1, t2;
2198 int len;
2199 struct nfsnode *dnp = VTONFS(dvp), *np = (struct nfsnode *)0;
2200 struct vnode *newvp = (struct vnode *)0;
2201 char *bpos, *dpos, *cp2;
2202 int error = 0, wccflag = NFSV3_WCCRATTR;
2203 int gotvp = 0;
2204 int rexmit = 0;
2205 struct mbuf *mreq, *mrep, *md, *mb;
2206 const int v3 = NFS_ISV3(dvp);
2207
2208 len = cnp->cn_namelen;
2209 nfsstats.rpccnt[NFSPROC_MKDIR]++;
2210 nfsm_reqhead(dnp, NFSPROC_MKDIR,
2211 NFSX_FH(v3) + NFSX_UNSIGNED + nfsm_rndup(len) + NFSX_SATTR(v3));
2212 nfsm_fhtom(dnp, v3);
2213 nfsm_strtom(cnp->cn_nameptr, len, NFS_MAXNAMLEN);
2214 #ifndef NFS_V2_ONLY
2215 if (v3) {
2216 nfsm_v3attrbuild(vap, false);
2217 } else
2218 #endif
2219 {
2220 nfsm_build(sp, struct nfsv2_sattr *, NFSX_V2SATTR);
2221 sp->sa_mode = vtonfsv2_mode(VDIR, vap->va_mode);
2222 sp->sa_uid = nfs_xdrneg1;
2223 sp->sa_gid = nfs_xdrneg1;
2224 sp->sa_size = nfs_xdrneg1;
2225 txdr_nfsv2time(&vap->va_atime, &sp->sa_atime);
2226 txdr_nfsv2time(&vap->va_mtime, &sp->sa_mtime);
2227 }
2228 nfsm_request1(dnp, NFSPROC_MKDIR, curlwp, cnp->cn_cred, &rexmit);
2229 if (!error)
2230 nfsm_mtofh(dvp, newvp, v3, gotvp);
2231 if (v3)
2232 nfsm_wcc_data(dvp, wccflag, 0, !error);
2233 nfsm_reqdone;
2234 VTONFS(dvp)->n_flag |= NMODIFIED;
2235 if (!wccflag)
2236 NFS_INVALIDATE_ATTRCACHE(VTONFS(dvp));
2237 /*
2238 * Kludge: Map EEXIST => 0 assuming that you have a reply to a retry
2239 * if we can succeed in looking up the directory.
2240 */
2241 if ((rexmit && error == EEXIST) || (!error && !gotvp)) {
2242 if (newvp) {
2243 vput(newvp);
2244 newvp = (struct vnode *)0;
2245 }
2246 error = nfs_lookitup(dvp, cnp->cn_nameptr, len, cnp->cn_cred,
2247 curlwp, &np);
2248 if (!error) {
2249 newvp = NFSTOV(np);
2250 if (newvp->v_type != VDIR || newvp == dvp)
2251 error = EEXIST;
2252 }
2253 }
2254 if (error) {
2255 if (newvp) {
2256 if (dvp != newvp)
2257 vput(newvp);
2258 else
2259 vrele(newvp);
2260 }
2261 } else {
2262 VN_KNOTE(dvp, NOTE_WRITE | NOTE_LINK);
2263 nfs_cache_enter(dvp, newvp, cnp);
2264 *ap->a_vpp = newvp;
2265 VOP_UNLOCK(newvp);
2266 }
2267 return (error);
2268 }
2269
2270 /*
2271 * nfs remove directory call
2272 */
2273 int
2274 nfs_rmdir(void *v)
2275 {
2276 struct vop_rmdir_v2_args /* {
2277 struct vnode *a_dvp;
2278 struct vnode *a_vp;
2279 struct componentname *a_cnp;
2280 } */ *ap = v;
2281 struct vnode *vp = ap->a_vp;
2282 struct vnode *dvp = ap->a_dvp;
2283 struct componentname *cnp = ap->a_cnp;
2284 u_int32_t *tl;
2285 char *cp;
2286 #ifndef NFS_V2_ONLY
2287 int32_t t1;
2288 char *cp2;
2289 #endif
2290 int32_t t2;
2291 char *bpos, *dpos;
2292 int error = 0, wccflag = NFSV3_WCCRATTR;
2293 int rexmit = 0;
2294 struct mbuf *mreq, *mrep, *md, *mb;
2295 const int v3 = NFS_ISV3(dvp);
2296 struct nfsnode *dnp;
2297
2298 if (dvp == vp) {
2299 vrele(vp);
2300 return (EINVAL);
2301 }
2302 nfsstats.rpccnt[NFSPROC_RMDIR]++;
2303 dnp = VTONFS(dvp);
2304 nfsm_reqhead(dnp, NFSPROC_RMDIR,
2305 NFSX_FH(v3) + NFSX_UNSIGNED + nfsm_rndup(cnp->cn_namelen));
2306 nfsm_fhtom(dnp, v3);
2307 nfsm_strtom(cnp->cn_nameptr, cnp->cn_namelen, NFS_MAXNAMLEN);
2308 nfsm_request1(dnp, NFSPROC_RMDIR, curlwp, cnp->cn_cred, &rexmit);
2309 #ifndef NFS_V2_ONLY
2310 if (v3)
2311 nfsm_wcc_data(dvp, wccflag, 0, !error);
2312 #endif
2313 nfsm_reqdone;
2314 VTONFS(dvp)->n_flag |= NMODIFIED;
2315 if (!wccflag)
2316 NFS_INVALIDATE_ATTRCACHE(VTONFS(dvp));
2317 VN_KNOTE(dvp, NOTE_WRITE | NOTE_LINK);
2318 VN_KNOTE(vp, NOTE_DELETE);
2319 cache_purge(vp);
2320 vput(vp);
2321 /*
2322 * Kludge: Map ENOENT => 0 assuming that you have a reply to a retry.
2323 */
2324 if (rexmit && error == ENOENT)
2325 error = 0;
2326 return (error);
2327 }
2328
2329 /*
2330 * nfs readdir call
2331 */
2332 int
2333 nfs_readdir(void *v)
2334 {
2335 struct vop_readdir_args /* {
2336 struct vnode *a_vp;
2337 struct uio *a_uio;
2338 kauth_cred_t a_cred;
2339 int *a_eofflag;
2340 off_t **a_cookies;
2341 int *a_ncookies;
2342 } */ *ap = v;
2343 struct vnode *vp = ap->a_vp;
2344 struct uio *uio = ap->a_uio;
2345 struct nfsmount *nmp = VFSTONFS(vp->v_mount);
2346 char *base = uio->uio_iov->iov_base;
2347 int tresid, error;
2348 size_t count, lost;
2349 struct dirent *dp;
2350 off_t *cookies = NULL;
2351 int ncookies = 0, nc;
2352
2353 if (vp->v_type != VDIR)
2354 return (EPERM);
2355
2356 lost = uio->uio_resid & (NFS_DIRFRAGSIZ - 1);
2357 count = uio->uio_resid - lost;
2358 if (count <= 0)
2359 return (EINVAL);
2360
2361 /*
2362 * Call nfs_bioread() to do the real work.
2363 */
2364 tresid = uio->uio_resid = count;
2365 error = nfs_bioread(vp, uio, 0, ap->a_cred,
2366 ap->a_cookies ? NFSBIO_CACHECOOKIES : 0);
2367
2368 if (!error && ap->a_cookies) {
2369 ncookies = count / 16;
2370 cookies = malloc(sizeof (off_t) * ncookies, M_TEMP, M_WAITOK);
2371 *ap->a_cookies = cookies;
2372 }
2373
2374 if (!error && uio->uio_resid == tresid) {
2375 uio->uio_resid += lost;
2376 nfsstats.direofcache_misses++;
2377 if (ap->a_cookies)
2378 *ap->a_ncookies = 0;
2379 *ap->a_eofflag = 1;
2380 return (0);
2381 }
2382
2383 if (!error && ap->a_cookies) {
2384 /*
2385 * Only the NFS server and emulations use cookies, and they
2386 * load the directory block into system space, so we can
2387 * just look at it directly.
2388 */
2389 if (!VMSPACE_IS_KERNEL_P(uio->uio_vmspace) ||
2390 uio->uio_iovcnt != 1)
2391 panic("nfs_readdir: lost in space");
2392 for (nc = 0; ncookies-- &&
2393 base < (char *)uio->uio_iov->iov_base; nc++){
2394 dp = (struct dirent *) base;
2395 if (dp->d_reclen == 0)
2396 break;
2397 if (nmp->nm_flag & NFSMNT_XLATECOOKIE)
2398 *(cookies++) = (off_t)NFS_GETCOOKIE32(dp);
2399 else
2400 *(cookies++) = NFS_GETCOOKIE(dp);
2401 base += dp->d_reclen;
2402 }
2403 uio->uio_resid +=
2404 ((char *)uio->uio_iov->iov_base - base);
2405 uio->uio_iov->iov_len +=
2406 ((char *)uio->uio_iov->iov_base - base);
2407 uio->uio_iov->iov_base = base;
2408 *ap->a_ncookies = nc;
2409 }
2410
2411 uio->uio_resid += lost;
2412 *ap->a_eofflag = 0;
2413 return (error);
2414 }
2415
2416 /*
2417 * Readdir rpc call.
2418 * Called from below the buffer cache by nfs_doio().
2419 */
2420 int
2421 nfs_readdirrpc(struct vnode *vp, struct uio *uiop, kauth_cred_t cred)
2422 {
2423 int len, left;
2424 struct dirent *dp = NULL;
2425 u_int32_t *tl;
2426 char *cp;
2427 int32_t t1, t2;
2428 char *bpos, *dpos, *cp2;
2429 struct mbuf *mreq, *mrep, *md, *mb;
2430 struct nfsmount *nmp = VFSTONFS(vp->v_mount);
2431 struct nfsnode *dnp = VTONFS(vp);
2432 u_quad_t fileno;
2433 int error = 0, more_dirs = 1, blksiz = 0, bigenough = 1;
2434 #ifndef NFS_V2_ONLY
2435 int attrflag;
2436 #endif
2437 int nrpcs = 0, reclen;
2438 const int v3 = NFS_ISV3(vp);
2439
2440 #ifdef DIAGNOSTIC
2441 /*
2442 * Should be called from buffer cache, so only amount of
2443 * NFS_DIRBLKSIZ will be requested.
2444 */
2445 if (uiop->uio_iovcnt != 1 || uiop->uio_resid != NFS_DIRBLKSIZ)
2446 panic("nfs readdirrpc bad uio");
2447 #endif
2448
2449 /*
2450 * Loop around doing readdir rpc's of size nm_readdirsize
2451 * truncated to a multiple of NFS_DIRFRAGSIZ.
2452 * The stopping criteria is EOF or buffer full.
2453 */
2454 while (more_dirs && bigenough) {
2455 /*
2456 * Heuristic: don't bother to do another RPC to further
2457 * fill up this block if there is not much room left. (< 50%
2458 * of the readdir RPC size). This wastes some buffer space
2459 * but can save up to 50% in RPC calls.
2460 */
2461 if (nrpcs > 0 && uiop->uio_resid < (nmp->nm_readdirsize / 2)) {
2462 bigenough = 0;
2463 break;
2464 }
2465 nfsstats.rpccnt[NFSPROC_READDIR]++;
2466 nfsm_reqhead(dnp, NFSPROC_READDIR, NFSX_FH(v3) +
2467 NFSX_READDIR(v3));
2468 nfsm_fhtom(dnp, v3);
2469 #ifndef NFS_V2_ONLY
2470 if (v3) {
2471 nfsm_build(tl, u_int32_t *, 5 * NFSX_UNSIGNED);
2472 if (nmp->nm_iflag & NFSMNT_SWAPCOOKIE) {
2473 txdr_swapcookie3(uiop->uio_offset, tl);
2474 } else {
2475 txdr_cookie3(uiop->uio_offset, tl);
2476 }
2477 tl += 2;
2478 *tl++ = dnp->n_cookieverf.nfsuquad[0];
2479 *tl++ = dnp->n_cookieverf.nfsuquad[1];
2480 } else
2481 #endif
2482 {
2483 nfsm_build(tl, u_int32_t *, 2 * NFSX_UNSIGNED);
2484 *tl++ = txdr_unsigned(uiop->uio_offset);
2485 }
2486 *tl = txdr_unsigned(nmp->nm_readdirsize);
2487 nfsm_request(dnp, NFSPROC_READDIR, curlwp, cred);
2488 nrpcs++;
2489 #ifndef NFS_V2_ONLY
2490 if (v3) {
2491 nfsm_postop_attr(vp, attrflag, 0);
2492 if (!error) {
2493 nfsm_dissect(tl, u_int32_t *,
2494 2 * NFSX_UNSIGNED);
2495 dnp->n_cookieverf.nfsuquad[0] = *tl++;
2496 dnp->n_cookieverf.nfsuquad[1] = *tl;
2497 } else {
2498 m_freem(mrep);
2499 goto nfsmout;
2500 }
2501 }
2502 #endif
2503 nfsm_dissect(tl, u_int32_t *, NFSX_UNSIGNED);
2504 more_dirs = fxdr_unsigned(int, *tl);
2505
2506 /* loop thru the dir entries, doctoring them to 4bsd form */
2507 while (more_dirs && bigenough) {
2508 #ifndef NFS_V2_ONLY
2509 if (v3) {
2510 nfsm_dissect(tl, u_int32_t *,
2511 3 * NFSX_UNSIGNED);
2512 fileno = fxdr_hyper(tl);
2513 len = fxdr_unsigned(int, *(tl + 2));
2514 } else
2515 #endif
2516 {
2517 nfsm_dissect(tl, u_int32_t *,
2518 2 * NFSX_UNSIGNED);
2519 fileno = fxdr_unsigned(u_quad_t, *tl++);
2520 len = fxdr_unsigned(int, *tl);
2521 }
2522 if (len <= 0 || len > NFS_MAXNAMLEN) {
2523 error = EBADRPC;
2524 m_freem(mrep);
2525 goto nfsmout;
2526 }
2527 /* for cookie stashing */
2528 reclen = _DIRENT_RECLEN(dp, len) + 2 * sizeof(off_t);
2529 left = NFS_DIRFRAGSIZ - blksiz;
2530 if (reclen > left) {
2531 memset(uiop->uio_iov->iov_base, 0, left);
2532 dp->d_reclen += left;
2533 UIO_ADVANCE(uiop, left);
2534 blksiz = 0;
2535 NFS_STASHCOOKIE(dp, uiop->uio_offset);
2536 }
2537 if (reclen > uiop->uio_resid)
2538 bigenough = 0;
2539 if (bigenough) {
2540 int tlen;
2541
2542 dp = (struct dirent *)uiop->uio_iov->iov_base;
2543 dp->d_fileno = fileno;
2544 dp->d_namlen = len;
2545 dp->d_reclen = reclen;
2546 dp->d_type = DT_UNKNOWN;
2547 blksiz += reclen;
2548 if (blksiz == NFS_DIRFRAGSIZ)
2549 blksiz = 0;
2550 UIO_ADVANCE(uiop, DIRHDSIZ);
2551 nfsm_mtouio(uiop, len);
2552 tlen = reclen - (DIRHDSIZ + len);
2553 (void)memset(uiop->uio_iov->iov_base, 0, tlen);
2554 UIO_ADVANCE(uiop, tlen);
2555 } else
2556 nfsm_adv(nfsm_rndup(len));
2557 #ifndef NFS_V2_ONLY
2558 if (v3) {
2559 nfsm_dissect(tl, u_int32_t *,
2560 3 * NFSX_UNSIGNED);
2561 } else
2562 #endif
2563 {
2564 nfsm_dissect(tl, u_int32_t *,
2565 2 * NFSX_UNSIGNED);
2566 }
2567 if (bigenough) {
2568 #ifndef NFS_V2_ONLY
2569 if (v3) {
2570 if (nmp->nm_iflag & NFSMNT_SWAPCOOKIE)
2571 uiop->uio_offset =
2572 fxdr_swapcookie3(tl);
2573 else
2574 uiop->uio_offset =
2575 fxdr_cookie3(tl);
2576 }
2577 else
2578 #endif
2579 {
2580 uiop->uio_offset =
2581 fxdr_unsigned(off_t, *tl);
2582 }
2583 NFS_STASHCOOKIE(dp, uiop->uio_offset);
2584 }
2585 if (v3)
2586 tl += 2;
2587 else
2588 tl++;
2589 more_dirs = fxdr_unsigned(int, *tl);
2590 }
2591 /*
2592 * If at end of rpc data, get the eof boolean
2593 */
2594 if (!more_dirs) {
2595 nfsm_dissect(tl, u_int32_t *, NFSX_UNSIGNED);
2596 more_dirs = (fxdr_unsigned(int, *tl) == 0);
2597
2598 /*
2599 * kludge: if we got no entries, treat it as EOF.
2600 * some server sometimes send a reply without any
2601 * entries or EOF.
2602 * although it might mean the server has very long name,
2603 * we can't handle such entries anyway.
2604 */
2605
2606 if (uiop->uio_resid >= NFS_DIRBLKSIZ)
2607 more_dirs = 0;
2608 }
2609 m_freem(mrep);
2610 }
2611 /*
2612 * Fill last record, iff any, out to a multiple of NFS_DIRFRAGSIZ
2613 * by increasing d_reclen for the last record.
2614 */
2615 if (blksiz > 0) {
2616 left = NFS_DIRFRAGSIZ - blksiz;
2617 memset(uiop->uio_iov->iov_base, 0, left);
2618 dp->d_reclen += left;
2619 NFS_STASHCOOKIE(dp, uiop->uio_offset);
2620 UIO_ADVANCE(uiop, left);
2621 }
2622
2623 /*
2624 * We are now either at the end of the directory or have filled the
2625 * block.
2626 */
2627 if (bigenough) {
2628 dnp->n_direofoffset = uiop->uio_offset;
2629 dnp->n_flag |= NEOFVALID;
2630 }
2631 nfsmout:
2632 return (error);
2633 }
2634
2635 #ifndef NFS_V2_ONLY
2636 /*
2637 * NFS V3 readdir plus RPC. Used in place of nfs_readdirrpc().
2638 */
2639 int
2640 nfs_readdirplusrpc(struct vnode *vp, struct uio *uiop, kauth_cred_t cred)
2641 {
2642 int len, left;
2643 struct dirent *dp = NULL;
2644 u_int32_t *tl;
2645 char *cp;
2646 int32_t t1, t2;
2647 struct vnode *newvp;
2648 char *bpos, *dpos, *cp2;
2649 struct mbuf *mreq, *mrep, *md, *mb;
2650 struct nameidata nami, *ndp = &nami;
2651 struct componentname *cnp = &ndp->ni_cnd;
2652 struct nfsmount *nmp = VFSTONFS(vp->v_mount);
2653 struct nfsnode *dnp = VTONFS(vp), *np;
2654 nfsfh_t *fhp;
2655 u_quad_t fileno;
2656 int error = 0, more_dirs = 1, blksiz = 0, doit, bigenough = 1, i;
2657 int attrflag, fhsize, nrpcs = 0, reclen;
2658 struct nfs_fattr fattr, *fp;
2659
2660 #ifdef DIAGNOSTIC
2661 if (uiop->uio_iovcnt != 1 || uiop->uio_resid != NFS_DIRBLKSIZ)
2662 panic("nfs readdirplusrpc bad uio");
2663 #endif
2664 ndp->ni_dvp = vp;
2665 newvp = NULLVP;
2666
2667 /*
2668 * Loop around doing readdir rpc's of size nm_readdirsize
2669 * truncated to a multiple of NFS_DIRFRAGSIZ.
2670 * The stopping criteria is EOF or buffer full.
2671 */
2672 while (more_dirs && bigenough) {
2673 if (nrpcs > 0 && uiop->uio_resid < (nmp->nm_readdirsize / 2)) {
2674 bigenough = 0;
2675 break;
2676 }
2677 nfsstats.rpccnt[NFSPROC_READDIRPLUS]++;
2678 nfsm_reqhead(dnp, NFSPROC_READDIRPLUS,
2679 NFSX_FH(1) + 6 * NFSX_UNSIGNED);
2680 nfsm_fhtom(dnp, 1);
2681 nfsm_build(tl, u_int32_t *, 6 * NFSX_UNSIGNED);
2682 if (nmp->nm_iflag & NFSMNT_SWAPCOOKIE) {
2683 txdr_swapcookie3(uiop->uio_offset, tl);
2684 } else {
2685 txdr_cookie3(uiop->uio_offset, tl);
2686 }
2687 tl += 2;
2688 *tl++ = dnp->n_cookieverf.nfsuquad[0];
2689 *tl++ = dnp->n_cookieverf.nfsuquad[1];
2690 *tl++ = txdr_unsigned(nmp->nm_readdirsize);
2691 *tl = txdr_unsigned(nmp->nm_rsize);
2692 nfsm_request(dnp, NFSPROC_READDIRPLUS, curlwp, cred);
2693 nfsm_postop_attr(vp, attrflag, 0);
2694 if (error) {
2695 m_freem(mrep);
2696 goto nfsmout;
2697 }
2698 nrpcs++;
2699 nfsm_dissect(tl, u_int32_t *, 3 * NFSX_UNSIGNED);
2700 dnp->n_cookieverf.nfsuquad[0] = *tl++;
2701 dnp->n_cookieverf.nfsuquad[1] = *tl++;
2702 more_dirs = fxdr_unsigned(int, *tl);
2703
2704 /* loop thru the dir entries, doctoring them to 4bsd form */
2705 while (more_dirs && bigenough) {
2706 nfsm_dissect(tl, u_int32_t *, 3 * NFSX_UNSIGNED);
2707 fileno = fxdr_hyper(tl);
2708 len = fxdr_unsigned(int, *(tl + 2));
2709 if (len <= 0 || len > NFS_MAXNAMLEN) {
2710 error = EBADRPC;
2711 m_freem(mrep);
2712 goto nfsmout;
2713 }
2714 /* for cookie stashing */
2715 reclen = _DIRENT_RECLEN(dp, len) + 2 * sizeof(off_t);
2716 left = NFS_DIRFRAGSIZ - blksiz;
2717 if (reclen > left) {
2718 /*
2719 * DIRFRAGSIZ is aligned, no need to align
2720 * again here.
2721 */
2722 memset(uiop->uio_iov->iov_base, 0, left);
2723 dp->d_reclen += left;
2724 UIO_ADVANCE(uiop, left);
2725 NFS_STASHCOOKIE(dp, uiop->uio_offset);
2726 blksiz = 0;
2727 }
2728 if (reclen > uiop->uio_resid)
2729 bigenough = 0;
2730 if (bigenough) {
2731 int tlen;
2732
2733 dp = (struct dirent *)uiop->uio_iov->iov_base;
2734 dp->d_fileno = fileno;
2735 dp->d_namlen = len;
2736 dp->d_reclen = reclen;
2737 dp->d_type = DT_UNKNOWN;
2738 blksiz += reclen;
2739 if (blksiz == NFS_DIRFRAGSIZ)
2740 blksiz = 0;
2741 UIO_ADVANCE(uiop, DIRHDSIZ);
2742 nfsm_mtouio(uiop, len);
2743 tlen = reclen - (DIRHDSIZ + len);
2744 (void)memset(uiop->uio_iov->iov_base, 0, tlen);
2745 UIO_ADVANCE(uiop, tlen);
2746 cnp->cn_nameptr = dp->d_name;
2747 cnp->cn_namelen = dp->d_namlen;
2748 } else
2749 nfsm_adv(nfsm_rndup(len));
2750 nfsm_dissect(tl, u_int32_t *, 3 * NFSX_UNSIGNED);
2751 if (bigenough) {
2752 if (nmp->nm_iflag & NFSMNT_SWAPCOOKIE)
2753 uiop->uio_offset =
2754 fxdr_swapcookie3(tl);
2755 else
2756 uiop->uio_offset =
2757 fxdr_cookie3(tl);
2758 NFS_STASHCOOKIE(dp, uiop->uio_offset);
2759 }
2760 tl += 2;
2761
2762 /*
2763 * Since the attributes are before the file handle
2764 * (sigh), we must skip over the attributes and then
2765 * come back and get them.
2766 */
2767 attrflag = fxdr_unsigned(int, *tl);
2768 if (attrflag) {
2769 nfsm_dissect(fp, struct nfs_fattr *, NFSX_V3FATTR);
2770 memcpy(&fattr, fp, NFSX_V3FATTR);
2771 nfsm_dissect(tl, u_int32_t *, NFSX_UNSIGNED);
2772 doit = fxdr_unsigned(int, *tl);
2773 if (doit) {
2774 nfsm_getfh(fhp, fhsize, 1);
2775 if (NFS_CMPFH(dnp, fhp, fhsize)) {
2776 vref(vp);
2777 newvp = vp;
2778 np = dnp;
2779 } else {
2780 error = nfs_nget1(vp->v_mount, fhp,
2781 fhsize, &np, LK_NOWAIT);
2782 if (!error)
2783 newvp = NFSTOV(np);
2784 }
2785 if (!error) {
2786 nfs_loadattrcache(&newvp, &fattr, 0, 0);
2787 if (bigenough) {
2788 dp->d_type =
2789 IFTODT(VTTOIF(np->n_vattr->va_type));
2790 ndp->ni_vp = newvp;
2791 nfs_cache_enter(ndp->ni_dvp,
2792 ndp->ni_vp, cnp);
2793 }
2794 }
2795 error = 0;
2796 }
2797 } else {
2798 /* Just skip over the file handle */
2799 nfsm_dissect(tl, u_int32_t *, NFSX_UNSIGNED);
2800 i = fxdr_unsigned(int, *tl);
2801 nfsm_adv(nfsm_rndup(i));
2802 }
2803 if (newvp != NULLVP) {
2804 if (newvp == vp)
2805 vrele(newvp);
2806 else
2807 vput(newvp);
2808 newvp = NULLVP;
2809 }
2810 nfsm_dissect(tl, u_int32_t *, NFSX_UNSIGNED);
2811 more_dirs = fxdr_unsigned(int, *tl);
2812 }
2813 /*
2814 * If at end of rpc data, get the eof boolean
2815 */
2816 if (!more_dirs) {
2817 nfsm_dissect(tl, u_int32_t *, NFSX_UNSIGNED);
2818 more_dirs = (fxdr_unsigned(int, *tl) == 0);
2819
2820 /*
2821 * kludge: see a comment in nfs_readdirrpc.
2822 */
2823
2824 if (uiop->uio_resid >= NFS_DIRBLKSIZ)
2825 more_dirs = 0;
2826 }
2827 m_freem(mrep);
2828 }
2829 /*
2830 * Fill last record, iff any, out to a multiple of NFS_DIRFRAGSIZ
2831 * by increasing d_reclen for the last record.
2832 */
2833 if (blksiz > 0) {
2834 left = NFS_DIRFRAGSIZ - blksiz;
2835 memset(uiop->uio_iov->iov_base, 0, left);
2836 dp->d_reclen += left;
2837 NFS_STASHCOOKIE(dp, uiop->uio_offset);
2838 UIO_ADVANCE(uiop, left);
2839 }
2840
2841 /*
2842 * We are now either at the end of the directory or have filled the
2843 * block.
2844 */
2845 if (bigenough) {
2846 dnp->n_direofoffset = uiop->uio_offset;
2847 dnp->n_flag |= NEOFVALID;
2848 }
2849 nfsmout:
2850 if (newvp != NULLVP) {
2851 if(newvp == vp)
2852 vrele(newvp);
2853 else
2854 vput(newvp);
2855 }
2856 return (error);
2857 }
2858 #endif
2859
2860 /*
2861 * Silly rename. To make the NFS filesystem that is stateless look a little
2862 * more like the "ufs" a remove of an active vnode is translated to a rename
2863 * to a funny looking filename that is removed by nfs_inactive on the
2864 * nfsnode. There is the potential for another process on a different client
2865 * to create the same funny name between the nfs_lookitup() fails and the
2866 * nfs_rename() completes, but...
2867 */
2868 int
2869 nfs_sillyrename(struct vnode *dvp, struct vnode *vp, struct componentname *cnp, bool dolink)
2870 {
2871 struct sillyrename *sp;
2872 struct nfsnode *np;
2873 int error;
2874 pid_t pid;
2875
2876 cache_purge(dvp);
2877 np = VTONFS(vp);
2878 #ifndef DIAGNOSTIC
2879 if (vp->v_type == VDIR)
2880 panic("nfs: sillyrename dir");
2881 #endif
2882 sp = kmem_alloc(sizeof(*sp), KM_SLEEP);
2883 sp->s_cred = kauth_cred_dup(cnp->cn_cred);
2884 sp->s_dvp = dvp;
2885 vref(dvp);
2886
2887 /* Fudge together a funny name */
2888 pid = curlwp->l_proc->p_pid;
2889 memcpy(sp->s_name, ".nfsAxxxx4.4", 13);
2890 sp->s_namlen = 12;
2891 sp->s_name[8] = hexdigits[pid & 0xf];
2892 sp->s_name[7] = hexdigits[(pid >> 4) & 0xf];
2893 sp->s_name[6] = hexdigits[(pid >> 8) & 0xf];
2894 sp->s_name[5] = hexdigits[(pid >> 12) & 0xf];
2895
2896 /* Try lookitups until we get one that isn't there */
2897 while (nfs_lookitup(dvp, sp->s_name, sp->s_namlen, sp->s_cred,
2898 curlwp, (struct nfsnode **)0) == 0) {
2899 sp->s_name[4]++;
2900 if (sp->s_name[4] > 'z') {
2901 error = EINVAL;
2902 goto bad;
2903 }
2904 }
2905 if (dolink) {
2906 error = nfs_linkrpc(dvp, vp, sp->s_name, sp->s_namlen,
2907 sp->s_cred, curlwp);
2908 /*
2909 * nfs_request maps NFSERR_NOTSUPP to ENOTSUP.
2910 */
2911 if (error == ENOTSUP) {
2912 error = nfs_renameit(dvp, cnp, sp);
2913 }
2914 } else {
2915 error = nfs_renameit(dvp, cnp, sp);
2916 }
2917 if (error)
2918 goto bad;
2919 error = nfs_lookitup(dvp, sp->s_name, sp->s_namlen, sp->s_cred,
2920 curlwp, &np);
2921 np->n_sillyrename = sp;
2922 return (0);
2923 bad:
2924 vrele(sp->s_dvp);
2925 kauth_cred_free(sp->s_cred);
2926 kmem_free(sp, sizeof(*sp));
2927 return (error);
2928 }
2929
2930 /*
2931 * Look up a file name and optionally either update the file handle or
2932 * allocate an nfsnode, depending on the value of npp.
2933 * npp == NULL --> just do the lookup
2934 * *npp == NULL --> allocate a new nfsnode and make sure attributes are
2935 * handled too
2936 * *npp != NULL --> update the file handle in the vnode
2937 */
2938 int
2939 nfs_lookitup(struct vnode *dvp, const char *name, int len, kauth_cred_t cred, struct lwp *l, struct nfsnode **npp)
2940 {
2941 u_int32_t *tl;
2942 char *cp;
2943 int32_t t1, t2;
2944 struct vnode *newvp = (struct vnode *)0;
2945 struct nfsnode *np, *dnp = VTONFS(dvp);
2946 char *bpos, *dpos, *cp2;
2947 int error = 0, ofhlen, fhlen;
2948 #ifndef NFS_V2_ONLY
2949 int attrflag;
2950 #endif
2951 struct mbuf *mreq, *mrep, *md, *mb;
2952 nfsfh_t *ofhp, *nfhp;
2953 const int v3 = NFS_ISV3(dvp);
2954
2955 nfsstats.rpccnt[NFSPROC_LOOKUP]++;
2956 nfsm_reqhead(dnp, NFSPROC_LOOKUP,
2957 NFSX_FH(v3) + NFSX_UNSIGNED + nfsm_rndup(len));
2958 nfsm_fhtom(dnp, v3);
2959 nfsm_strtom(name, len, NFS_MAXNAMLEN);
2960 nfsm_request(dnp, NFSPROC_LOOKUP, l, cred);
2961 if (npp && !error) {
2962 nfsm_getfh(nfhp, fhlen, v3);
2963 if (*npp) {
2964 np = *npp;
2965 newvp = NFSTOV(np);
2966 ofhlen = np->n_fhsize;
2967 ofhp = kmem_alloc(ofhlen, KM_SLEEP);
2968 memcpy(ofhp, np->n_fhp, ofhlen);
2969 error = vcache_rekey_enter(newvp->v_mount, newvp,
2970 ofhp, ofhlen, nfhp, fhlen);
2971 if (error) {
2972 kmem_free(ofhp, ofhlen);
2973 m_freem(mrep);
2974 return error;
2975 }
2976 if (np->n_fhsize > NFS_SMALLFH && fhlen <= NFS_SMALLFH) {
2977 kmem_free(np->n_fhp, np->n_fhsize);
2978 np->n_fhp = &np->n_fh;
2979 }
2980 #if NFS_SMALLFH < NFSX_V3FHMAX
2981 else if (np->n_fhsize <= NFS_SMALLFH && fhlen > NFS_SMALLFH)
2982 np->n_fhp = kmem_alloc(fhlen, KM_SLEEP);
2983 #endif
2984 memcpy(np->n_fhp, nfhp, fhlen);
2985 np->n_fhsize = fhlen;
2986 vcache_rekey_exit(newvp->v_mount, newvp,
2987 ofhp, ofhlen, np->n_fhp, fhlen);
2988 kmem_free(ofhp, ofhlen);
2989 } else if (NFS_CMPFH(dnp, nfhp, fhlen)) {
2990 vref(dvp);
2991 newvp = dvp;
2992 np = dnp;
2993 } else {
2994 error = nfs_nget(dvp->v_mount, nfhp, fhlen, &np);
2995 if (error) {
2996 m_freem(mrep);
2997 return (error);
2998 }
2999 newvp = NFSTOV(np);
3000 }
3001 #ifndef NFS_V2_ONLY
3002 if (v3) {
3003 nfsm_postop_attr(newvp, attrflag, 0);
3004 if (!attrflag && *npp == NULL) {
3005 m_freem(mrep);
3006 vput(newvp);
3007 return (ENOENT);
3008 }
3009 } else
3010 #endif
3011 nfsm_loadattr(newvp, (struct vattr *)0, 0);
3012 }
3013 nfsm_reqdone;
3014 if (npp && *npp == NULL) {
3015 if (error) {
3016 if (newvp)
3017 vput(newvp);
3018 } else
3019 *npp = np;
3020 }
3021 return (error);
3022 }
3023
3024 #ifndef NFS_V2_ONLY
3025 /*
3026 * Nfs Version 3 commit rpc
3027 */
3028 int
3029 nfs_commit(struct vnode *vp, off_t offset, uint32_t cnt, struct lwp *l)
3030 {
3031 char *cp;
3032 u_int32_t *tl;
3033 int32_t t1, t2;
3034 struct nfsmount *nmp = VFSTONFS(vp->v_mount);
3035 char *bpos, *dpos, *cp2;
3036 int error = 0, wccflag = NFSV3_WCCRATTR;
3037 struct mbuf *mreq, *mrep, *md, *mb;
3038 struct nfsnode *np;
3039
3040 KASSERT(NFS_ISV3(vp));
3041
3042 #ifdef NFS_DEBUG_COMMIT
3043 printf("commit %lu - %lu\n", (unsigned long)offset,
3044 (unsigned long)(offset + cnt));
3045 #endif
3046
3047 mutex_enter(&nmp->nm_lock);
3048 if ((nmp->nm_iflag & NFSMNT_HASWRITEVERF) == 0) {
3049 mutex_exit(&nmp->nm_lock);
3050 return (0);
3051 }
3052 mutex_exit(&nmp->nm_lock);
3053 nfsstats.rpccnt[NFSPROC_COMMIT]++;
3054 np = VTONFS(vp);
3055 nfsm_reqhead(np, NFSPROC_COMMIT, NFSX_FH(1));
3056 nfsm_fhtom(np, 1);
3057 nfsm_build(tl, u_int32_t *, 3 * NFSX_UNSIGNED);
3058 txdr_hyper(offset, tl);
3059 tl += 2;
3060 *tl = txdr_unsigned(cnt);
3061 nfsm_request(np, NFSPROC_COMMIT, l, np->n_wcred);
3062 nfsm_wcc_data(vp, wccflag, NAC_NOTRUNC, false);
3063 if (!error) {
3064 nfsm_dissect(tl, u_int32_t *, NFSX_V3WRITEVERF);
3065 mutex_enter(&nmp->nm_lock);
3066 if ((nmp->nm_iflag & NFSMNT_STALEWRITEVERF) ||
3067 memcmp(nmp->nm_writeverf, tl, NFSX_V3WRITEVERF)) {
3068 memcpy(nmp->nm_writeverf, tl, NFSX_V3WRITEVERF);
3069 error = NFSERR_STALEWRITEVERF;
3070 nmp->nm_iflag |= NFSMNT_STALEWRITEVERF;
3071 }
3072 mutex_exit(&nmp->nm_lock);
3073 }
3074 nfsm_reqdone;
3075 return (error);
3076 }
3077 #endif
3078
3079 /*
3080 * Kludge City..
3081 * - make nfs_bmap() essentially a no-op that does no translation
3082 * - do nfs_strategy() by doing I/O with nfs_readrpc/nfs_writerpc
3083 * (Maybe I could use the process's page mapping, but I was concerned that
3084 * Kernel Write might not be enabled and also figured copyout() would do
3085 * a lot more work than memcpy() and also it currently happens in the
3086 * context of the swapper process (2).
3087 */
3088 int
3089 nfs_bmap(void *v)
3090 {
3091 struct vop_bmap_args /* {
3092 struct vnode *a_vp;
3093 daddr_t a_bn;
3094 struct vnode **a_vpp;
3095 daddr_t *a_bnp;
3096 int *a_runp;
3097 } */ *ap = v;
3098 struct vnode *vp = ap->a_vp;
3099 int bshift = vp->v_mount->mnt_fs_bshift - vp->v_mount->mnt_dev_bshift;
3100
3101 if (ap->a_vpp != NULL)
3102 *ap->a_vpp = vp;
3103 if (ap->a_bnp != NULL)
3104 *ap->a_bnp = ap->a_bn << bshift;
3105 if (ap->a_runp != NULL)
3106 *ap->a_runp = 1024 * 1024; /* XXX */
3107 return (0);
3108 }
3109
3110 /*
3111 * Strategy routine.
3112 * For async requests when nfsiod(s) are running, queue the request by
3113 * calling nfs_asyncio(), otherwise just all nfs_doio() to do the
3114 * request.
3115 */
3116 int
3117 nfs_strategy(void *v)
3118 {
3119 struct vop_strategy_args *ap = v;
3120 struct buf *bp = ap->a_bp;
3121 int error = 0;
3122
3123 if ((bp->b_flags & (B_PHYS|B_ASYNC)) == (B_PHYS|B_ASYNC))
3124 panic("nfs physio/async");
3125
3126 /*
3127 * If the op is asynchronous and an i/o daemon is waiting
3128 * queue the request, wake it up and wait for completion
3129 * otherwise just do it ourselves.
3130 */
3131 if ((bp->b_flags & B_ASYNC) == 0 || nfs_asyncio(bp))
3132 error = nfs_doio(bp);
3133 return (error);
3134 }
3135
3136 /*
3137 * fsync vnode op. Just call nfs_flush() with commit == 1.
3138 */
3139 /* ARGSUSED */
3140 int
3141 nfs_fsync(void *v)
3142 {
3143 struct vop_fsync_args /* {
3144 struct vnodeop_desc *a_desc;
3145 struct vnode * a_vp;
3146 kauth_cred_t a_cred;
3147 int a_flags;
3148 off_t offlo;
3149 off_t offhi;
3150 struct lwp * a_l;
3151 } */ *ap = v;
3152
3153 struct vnode *vp = ap->a_vp;
3154
3155 if (vp->v_type != VREG)
3156 return 0;
3157
3158 return (nfs_flush(vp, ap->a_cred,
3159 (ap->a_flags & FSYNC_WAIT) != 0 ? MNT_WAIT : 0, curlwp, 1));
3160 }
3161
3162 /*
3163 * Flush all the data associated with a vnode.
3164 */
3165 int
3166 nfs_flush(struct vnode *vp, kauth_cred_t cred, int waitfor, struct lwp *l,
3167 int commit)
3168 {
3169 struct nfsnode *np = VTONFS(vp);
3170 int error;
3171 int flushflags = PGO_ALLPAGES|PGO_CLEANIT|PGO_SYNCIO;
3172 UVMHIST_FUNC("nfs_flush"); UVMHIST_CALLED(ubchist);
3173
3174 rw_enter(vp->v_uobj.vmobjlock, RW_WRITER);
3175 error = VOP_PUTPAGES(vp, 0, 0, flushflags);
3176 if (np->n_flag & NWRITEERR) {
3177 error = np->n_error;
3178 np->n_flag &= ~NWRITEERR;
3179 }
3180 UVMHIST_LOG(ubchist, "returning %d", error,0,0,0);
3181 return (error);
3182 }
3183
3184 /*
3185 * Return POSIX pathconf information applicable to nfs.
3186 *
3187 * N.B. The NFS V2 protocol doesn't support this RPC.
3188 */
3189 /* ARGSUSED */
3190 int
3191 nfs_pathconf(void *v)
3192 {
3193 struct vop_pathconf_args /* {
3194 struct vnode *a_vp;
3195 int a_name;
3196 register_t *a_retval;
3197 } */ *ap = v;
3198 struct nfsv3_pathconf *pcp;
3199 struct vnode *vp = ap->a_vp;
3200 struct mbuf *mreq, *mrep, *md, *mb;
3201 int32_t t1, t2;
3202 u_int32_t *tl;
3203 char *bpos, *dpos, *cp, *cp2;
3204 int error = 0, attrflag;
3205 #ifndef NFS_V2_ONLY
3206 struct nfsmount *nmp;
3207 unsigned int l;
3208 u_int64_t maxsize;
3209 #endif
3210 const int v3 = NFS_ISV3(vp);
3211 struct nfsnode *np = VTONFS(vp);
3212
3213 switch (ap->a_name) {
3214 /* Names that can be resolved locally. */
3215 case _PC_PIPE_BUF:
3216 *ap->a_retval = PIPE_BUF;
3217 break;
3218 case _PC_SYNC_IO:
3219 *ap->a_retval = 1;
3220 break;
3221 /* Names that cannot be resolved locally; do an RPC, if possible. */
3222 case _PC_LINK_MAX:
3223 case _PC_NAME_MAX:
3224 case _PC_CHOWN_RESTRICTED:
3225 case _PC_NO_TRUNC:
3226 if (!v3) {
3227 error = EINVAL;
3228 break;
3229 }
3230 nfsstats.rpccnt[NFSPROC_PATHCONF]++;
3231 nfsm_reqhead(np, NFSPROC_PATHCONF, NFSX_FH(1));
3232 nfsm_fhtom(np, 1);
3233 nfsm_request(np, NFSPROC_PATHCONF,
3234 curlwp, curlwp->l_cred); /* XXX */
3235 nfsm_postop_attr(vp, attrflag, 0);
3236 if (!error) {
3237 nfsm_dissect(pcp, struct nfsv3_pathconf *,
3238 NFSX_V3PATHCONF);
3239 switch (ap->a_name) {
3240 case _PC_LINK_MAX:
3241 *ap->a_retval =
3242 fxdr_unsigned(register_t, pcp->pc_linkmax);
3243 break;
3244 case _PC_NAME_MAX:
3245 *ap->a_retval =
3246 fxdr_unsigned(register_t, pcp->pc_namemax);
3247 break;
3248 case _PC_CHOWN_RESTRICTED:
3249 *ap->a_retval =
3250 (pcp->pc_chownrestricted == nfs_true);
3251 break;
3252 case _PC_NO_TRUNC:
3253 *ap->a_retval =
3254 (pcp->pc_notrunc == nfs_true);
3255 break;
3256 }
3257 }
3258 nfsm_reqdone;
3259 break;
3260 case _PC_FILESIZEBITS:
3261 #ifndef NFS_V2_ONLY
3262 if (v3) {
3263 nmp = VFSTONFS(vp->v_mount);
3264 if ((nmp->nm_iflag & NFSMNT_GOTFSINFO) == 0)
3265 if ((error = nfs_fsinfo(nmp, vp,
3266 curlwp->l_cred, curlwp)) != 0) /* XXX */
3267 break;
3268 for (l = 0, maxsize = nmp->nm_maxfilesize;
3269 (maxsize >> l) > 0; l++)
3270 ;
3271 *ap->a_retval = l + 1;
3272 } else
3273 #endif
3274 {
3275 *ap->a_retval = 32; /* NFS V2 limitation */
3276 }
3277 break;
3278 default:
3279 error = genfs_pathconf(ap);
3280 break;
3281 }
3282
3283 return (error);
3284 }
3285
3286 /*
3287 * NFS advisory byte-level locks.
3288 */
3289 int
3290 nfs_advlock(void *v)
3291 {
3292 struct vop_advlock_args /* {
3293 struct vnode *a_vp;
3294 void *a_id;
3295 int a_op;
3296 struct flock *a_fl;
3297 int a_flags;
3298 } */ *ap = v;
3299 struct nfsnode *np = VTONFS(ap->a_vp);
3300
3301 return lf_advlock(ap, &np->n_lockf, np->n_size);
3302 }
3303
3304 /*
3305 * Print out the contents of an nfsnode.
3306 */
3307 int
3308 nfs_print(void *v)
3309 {
3310 struct vop_print_args /* {
3311 struct vnode *a_vp;
3312 } */ *ap = v;
3313 struct vnode *vp = ap->a_vp;
3314 struct nfsnode *np = VTONFS(vp);
3315
3316 printf("tag VT_NFS, fileid %lld fsid 0x%llx",
3317 (unsigned long long)np->n_vattr->va_fileid,
3318 (unsigned long long)np->n_vattr->va_fsid);
3319 if (vp->v_type == VFIFO)
3320 VOCALL(fifo_vnodeop_p, VOFFSET(vop_print), v);
3321 printf("\n");
3322 return (0);
3323 }
3324
3325 /*
3326 * nfs unlock wrapper.
3327 */
3328 int
3329 nfs_unlock(void *v)
3330 {
3331 struct vop_unlock_args /* {
3332 struct vnode *a_vp;
3333 int a_flags;
3334 } */ *ap = v;
3335 struct vnode *vp = ap->a_vp;
3336
3337 /*
3338 * VOP_UNLOCK can be called by nfs_loadattrcache
3339 * with v_data == 0.
3340 */
3341 if (VTONFS(vp)) {
3342 nfs_delayedtruncate(vp);
3343 }
3344
3345 return genfs_unlock(v);
3346 }
3347
3348 /*
3349 * nfs special file access vnode op.
3350 * Essentially just get vattr and then imitate iaccess() since the device is
3351 * local to the client.
3352 */
3353 int
3354 nfsspec_access(void *v)
3355 {
3356 struct vop_access_args /* {
3357 struct vnode *a_vp;
3358 accmode_t a_accmode;
3359 kauth_cred_t a_cred;
3360 struct lwp *a_l;
3361 } */ *ap = v;
3362 struct vattr va;
3363 struct vnode *vp = ap->a_vp;
3364 int error;
3365
3366 error = VOP_GETATTR(vp, &va, ap->a_cred);
3367 if (error)
3368 return (error);
3369
3370 /*
3371 * Disallow write attempts on filesystems mounted read-only;
3372 * unless the file is a socket, fifo, or a block or character
3373 * device resident on the filesystem.
3374 */
3375 if ((ap->a_accmode & VWRITE) && (vp->v_mount->mnt_flag & MNT_RDONLY)) {
3376 switch (vp->v_type) {
3377 case VREG:
3378 case VDIR:
3379 case VLNK:
3380 return (EROFS);
3381 default:
3382 break;
3383 }
3384 }
3385
3386 return kauth_authorize_vnode(ap->a_cred, KAUTH_ACCESS_ACTION(
3387 ap->a_accmode, va.va_type, va.va_mode), vp, NULL, genfs_can_access(
3388 vp, ap->a_cred, va.va_uid, va.va_gid, va.va_mode, NULL,
3389 ap->a_accmode));
3390 }
3391
3392 /*
3393 * Read wrapper for special devices.
3394 */
3395 int
3396 nfsspec_read(void *v)
3397 {
3398 struct vop_read_args /* {
3399 struct vnode *a_vp;
3400 struct uio *a_uio;
3401 int a_ioflag;
3402 kauth_cred_t a_cred;
3403 } */ *ap = v;
3404 struct nfsnode *np = VTONFS(ap->a_vp);
3405
3406 /*
3407 * Set access flag.
3408 */
3409 np->n_flag |= NACC;
3410 getnanotime(&np->n_atim);
3411 return (VOCALL(spec_vnodeop_p, VOFFSET(vop_read), ap));
3412 }
3413
3414 /*
3415 * Write wrapper for special devices.
3416 */
3417 int
3418 nfsspec_write(void *v)
3419 {
3420 struct vop_write_args /* {
3421 struct vnode *a_vp;
3422 struct uio *a_uio;
3423 int a_ioflag;
3424 kauth_cred_t a_cred;
3425 } */ *ap = v;
3426 struct nfsnode *np = VTONFS(ap->a_vp);
3427
3428 /*
3429 * Set update flag.
3430 */
3431 np->n_flag |= NUPD;
3432 getnanotime(&np->n_mtim);
3433 return (VOCALL(spec_vnodeop_p, VOFFSET(vop_write), ap));
3434 }
3435
3436 /*
3437 * Close wrapper for special devices.
3438 *
3439 * Update the times on the nfsnode then do device close.
3440 */
3441 int
3442 nfsspec_close(void *v)
3443 {
3444 struct vop_close_args /* {
3445 struct vnode *a_vp;
3446 int a_fflag;
3447 kauth_cred_t a_cred;
3448 struct lwp *a_l;
3449 } */ *ap = v;
3450 struct vnode *vp = ap->a_vp;
3451 struct nfsnode *np = VTONFS(vp);
3452 struct vattr vattr;
3453
3454 if (np->n_flag & (NACC | NUPD)) {
3455 np->n_flag |= NCHG;
3456 if (vrefcnt(vp) == 1 &&
3457 (vp->v_mount->mnt_flag & MNT_RDONLY) == 0) {
3458 vattr_null(&vattr);
3459 if (np->n_flag & NACC)
3460 vattr.va_atime = np->n_atim;
3461 if (np->n_flag & NUPD)
3462 vattr.va_mtime = np->n_mtim;
3463 (void)VOP_SETATTR(vp, &vattr, ap->a_cred);
3464 }
3465 }
3466 return (VOCALL(spec_vnodeop_p, VOFFSET(vop_close), ap));
3467 }
3468
3469 /*
3470 * Read wrapper for fifos.
3471 */
3472 int
3473 nfsfifo_read(void *v)
3474 {
3475 struct vop_read_args /* {
3476 struct vnode *a_vp;
3477 struct uio *a_uio;
3478 int a_ioflag;
3479 kauth_cred_t a_cred;
3480 } */ *ap = v;
3481 struct nfsnode *np = VTONFS(ap->a_vp);
3482
3483 /*
3484 * Set access flag.
3485 */
3486 np->n_flag |= NACC;
3487 getnanotime(&np->n_atim);
3488 return (VOCALL(fifo_vnodeop_p, VOFFSET(vop_read), ap));
3489 }
3490
3491 /*
3492 * Write wrapper for fifos.
3493 */
3494 int
3495 nfsfifo_write(void *v)
3496 {
3497 struct vop_write_args /* {
3498 struct vnode *a_vp;
3499 struct uio *a_uio;
3500 int a_ioflag;
3501 kauth_cred_t a_cred;
3502 } */ *ap = v;
3503 struct nfsnode *np = VTONFS(ap->a_vp);
3504
3505 /*
3506 * Set update flag.
3507 */
3508 np->n_flag |= NUPD;
3509 getnanotime(&np->n_mtim);
3510 return (VOCALL(fifo_vnodeop_p, VOFFSET(vop_write), ap));
3511 }
3512
3513 /*
3514 * Close wrapper for fifos.
3515 *
3516 * Update the times on the nfsnode then do fifo close.
3517 */
3518 int
3519 nfsfifo_close(void *v)
3520 {
3521 struct vop_close_args /* {
3522 struct vnode *a_vp;
3523 int a_fflag;
3524 kauth_cred_t a_cred;
3525 struct lwp *a_l;
3526 } */ *ap = v;
3527 struct vnode *vp = ap->a_vp;
3528 struct nfsnode *np = VTONFS(vp);
3529 struct vattr vattr;
3530
3531 if (np->n_flag & (NACC | NUPD)) {
3532 struct timespec ts;
3533
3534 getnanotime(&ts);
3535 if (np->n_flag & NACC)
3536 np->n_atim = ts;
3537 if (np->n_flag & NUPD)
3538 np->n_mtim = ts;
3539 np->n_flag |= NCHG;
3540 if (vrefcnt(vp) == 1 &&
3541 (vp->v_mount->mnt_flag & MNT_RDONLY) == 0) {
3542 vattr_null(&vattr);
3543 if (np->n_flag & NACC)
3544 vattr.va_atime = np->n_atim;
3545 if (np->n_flag & NUPD)
3546 vattr.va_mtime = np->n_mtim;
3547 (void)VOP_SETATTR(vp, &vattr, ap->a_cred);
3548 }
3549 }
3550 return (VOCALL(fifo_vnodeop_p, VOFFSET(vop_close), ap));
3551 }
3552