nfs_vnops.c revision 1.317 1 /* $NetBSD: nfs_vnops.c,v 1.317 2020/09/05 16:30:12 riastradh Exp $ */
2
3 /*
4 * Copyright (c) 1989, 1993
5 * The Regents of the University of California. All rights reserved.
6 *
7 * This code is derived from software contributed to Berkeley by
8 * Rick Macklem at The University of Guelph.
9 *
10 * Redistribution and use in source and binary forms, with or without
11 * modification, are permitted provided that the following conditions
12 * are met:
13 * 1. Redistributions of source code must retain the above copyright
14 * notice, this list of conditions and the following disclaimer.
15 * 2. Redistributions in binary form must reproduce the above copyright
16 * notice, this list of conditions and the following disclaimer in the
17 * documentation and/or other materials provided with the distribution.
18 * 3. Neither the name of the University nor the names of its contributors
19 * may be used to endorse or promote products derived from this software
20 * without specific prior written permission.
21 *
22 * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND
23 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
24 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
25 * ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE
26 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
27 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
28 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
29 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
30 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
31 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
32 * SUCH DAMAGE.
33 *
34 * @(#)nfs_vnops.c 8.19 (Berkeley) 7/31/95
35 */
36
37 /*
38 * vnode op calls for Sun NFS version 2 and 3
39 */
40
41 #include <sys/cdefs.h>
42 __KERNEL_RCSID(0, "$NetBSD: nfs_vnops.c,v 1.317 2020/09/05 16:30:12 riastradh Exp $");
43
44 #ifdef _KERNEL_OPT
45 #include "opt_nfs.h"
46 #include "opt_uvmhist.h"
47 #endif
48
49 #include <sys/param.h>
50 #include <sys/proc.h>
51 #include <sys/kernel.h>
52 #include <sys/systm.h>
53 #include <sys/resourcevar.h>
54 #include <sys/mount.h>
55 #include <sys/buf.h>
56 #include <sys/condvar.h>
57 #include <sys/disk.h>
58 #include <sys/malloc.h>
59 #include <sys/kmem.h>
60 #include <sys/mbuf.h>
61 #include <sys/mutex.h>
62 #include <sys/namei.h>
63 #include <sys/vnode.h>
64 #include <sys/dirent.h>
65 #include <sys/fcntl.h>
66 #include <sys/hash.h>
67 #include <sys/lockf.h>
68 #include <sys/stat.h>
69 #include <sys/unistd.h>
70 #include <sys/kauth.h>
71 #include <sys/cprng.h>
72
73 #ifdef UVMHIST
74 #include <uvm/uvm.h>
75 #endif
76 #include <uvm/uvm_extern.h>
77 #include <uvm/uvm_stat.h>
78
79 #include <miscfs/fifofs/fifo.h>
80 #include <miscfs/genfs/genfs.h>
81 #include <miscfs/genfs/genfs_node.h>
82 #include <miscfs/specfs/specdev.h>
83
84 #include <nfs/rpcv2.h>
85 #include <nfs/nfsproto.h>
86 #include <nfs/nfs.h>
87 #include <nfs/nfsnode.h>
88 #include <nfs/nfsmount.h>
89 #include <nfs/xdr_subs.h>
90 #include <nfs/nfsm_subs.h>
91 #include <nfs/nfs_var.h>
92
93 #include <net/if.h>
94 #include <netinet/in.h>
95 #include <netinet/in_var.h>
96
97 /*
98 * Global vfs data structures for nfs
99 */
100 int (**nfsv2_vnodeop_p)(void *);
101 const struct vnodeopv_entry_desc nfsv2_vnodeop_entries[] = {
102 { &vop_default_desc, vn_default_error },
103 { &vop_lookup_desc, nfs_lookup }, /* lookup */
104 { &vop_create_desc, nfs_create }, /* create */
105 { &vop_mknod_desc, nfs_mknod }, /* mknod */
106 { &vop_open_desc, nfs_open }, /* open */
107 { &vop_close_desc, nfs_close }, /* close */
108 { &vop_access_desc, nfs_access }, /* access */
109 { &vop_accessx_desc, genfs_accessx }, /* accessx */
110 { &vop_getattr_desc, nfs_getattr }, /* getattr */
111 { &vop_setattr_desc, nfs_setattr }, /* setattr */
112 { &vop_read_desc, nfs_read }, /* read */
113 { &vop_write_desc, nfs_write }, /* write */
114 { &vop_fallocate_desc, genfs_eopnotsupp }, /* fallocate */
115 { &vop_fdiscard_desc, genfs_eopnotsupp }, /* fdiscard */
116 { &vop_fcntl_desc, genfs_fcntl }, /* fcntl */
117 { &vop_ioctl_desc, nfs_ioctl }, /* ioctl */
118 { &vop_poll_desc, nfs_poll }, /* poll */
119 { &vop_kqfilter_desc, nfs_kqfilter }, /* kqfilter */
120 { &vop_revoke_desc, nfs_revoke }, /* revoke */
121 { &vop_mmap_desc, nfs_mmap }, /* mmap */
122 { &vop_fsync_desc, nfs_fsync }, /* fsync */
123 { &vop_seek_desc, nfs_seek }, /* seek */
124 { &vop_remove_desc, nfs_remove }, /* remove */
125 { &vop_link_desc, nfs_link }, /* link */
126 { &vop_rename_desc, nfs_rename }, /* rename */
127 { &vop_mkdir_desc, nfs_mkdir }, /* mkdir */
128 { &vop_rmdir_desc, nfs_rmdir }, /* rmdir */
129 { &vop_symlink_desc, nfs_symlink }, /* symlink */
130 { &vop_readdir_desc, nfs_readdir }, /* readdir */
131 { &vop_readlink_desc, nfs_readlink }, /* readlink */
132 { &vop_abortop_desc, nfs_abortop }, /* abortop */
133 { &vop_inactive_desc, nfs_inactive }, /* inactive */
134 { &vop_reclaim_desc, nfs_reclaim }, /* reclaim */
135 { &vop_lock_desc, nfs_lock }, /* lock */
136 { &vop_unlock_desc, nfs_unlock }, /* unlock */
137 { &vop_bmap_desc, nfs_bmap }, /* bmap */
138 { &vop_strategy_desc, nfs_strategy }, /* strategy */
139 { &vop_print_desc, nfs_print }, /* print */
140 { &vop_islocked_desc, nfs_islocked }, /* islocked */
141 { &vop_pathconf_desc, nfs_pathconf }, /* pathconf */
142 { &vop_advlock_desc, nfs_advlock }, /* advlock */
143 { &vop_bwrite_desc, genfs_badop }, /* bwrite */
144 { &vop_getpages_desc, nfs_getpages }, /* getpages */
145 { &vop_putpages_desc, genfs_putpages }, /* putpages */
146 { NULL, NULL }
147 };
148 const struct vnodeopv_desc nfsv2_vnodeop_opv_desc =
149 { &nfsv2_vnodeop_p, nfsv2_vnodeop_entries };
150
151 /*
152 * Special device vnode ops
153 */
154 int (**spec_nfsv2nodeop_p)(void *);
155 const struct vnodeopv_entry_desc spec_nfsv2nodeop_entries[] = {
156 { &vop_default_desc, vn_default_error },
157 { &vop_lookup_desc, spec_lookup }, /* lookup */
158 { &vop_create_desc, spec_create }, /* create */
159 { &vop_mknod_desc, spec_mknod }, /* mknod */
160 { &vop_open_desc, spec_open }, /* open */
161 { &vop_close_desc, nfsspec_close }, /* close */
162 { &vop_access_desc, nfsspec_access }, /* access */
163 { &vop_accessx_desc, genfs_accessx }, /* accessx */
164 { &vop_getattr_desc, nfs_getattr }, /* getattr */
165 { &vop_setattr_desc, nfs_setattr }, /* setattr */
166 { &vop_read_desc, nfsspec_read }, /* read */
167 { &vop_write_desc, nfsspec_write }, /* write */
168 { &vop_fallocate_desc, spec_fallocate }, /* fallocate */
169 { &vop_fdiscard_desc, spec_fdiscard }, /* fdiscard */
170 { &vop_fcntl_desc, genfs_fcntl }, /* fcntl */
171 { &vop_ioctl_desc, spec_ioctl }, /* ioctl */
172 { &vop_poll_desc, spec_poll }, /* poll */
173 { &vop_kqfilter_desc, spec_kqfilter }, /* kqfilter */
174 { &vop_revoke_desc, spec_revoke }, /* revoke */
175 { &vop_mmap_desc, spec_mmap }, /* mmap */
176 { &vop_fsync_desc, spec_fsync }, /* fsync */
177 { &vop_seek_desc, spec_seek }, /* seek */
178 { &vop_remove_desc, spec_remove }, /* remove */
179 { &vop_link_desc, spec_link }, /* link */
180 { &vop_rename_desc, spec_rename }, /* rename */
181 { &vop_mkdir_desc, spec_mkdir }, /* mkdir */
182 { &vop_rmdir_desc, spec_rmdir }, /* rmdir */
183 { &vop_symlink_desc, spec_symlink }, /* symlink */
184 { &vop_readdir_desc, spec_readdir }, /* readdir */
185 { &vop_readlink_desc, spec_readlink }, /* readlink */
186 { &vop_abortop_desc, spec_abortop }, /* abortop */
187 { &vop_inactive_desc, nfs_inactive }, /* inactive */
188 { &vop_reclaim_desc, nfs_reclaim }, /* reclaim */
189 { &vop_lock_desc, nfs_lock }, /* lock */
190 { &vop_unlock_desc, nfs_unlock }, /* unlock */
191 { &vop_bmap_desc, spec_bmap }, /* bmap */
192 { &vop_strategy_desc, spec_strategy }, /* strategy */
193 { &vop_print_desc, nfs_print }, /* print */
194 { &vop_islocked_desc, nfs_islocked }, /* islocked */
195 { &vop_pathconf_desc, spec_pathconf }, /* pathconf */
196 { &vop_advlock_desc, spec_advlock }, /* advlock */
197 { &vop_bwrite_desc, spec_bwrite }, /* bwrite */
198 { &vop_getpages_desc, spec_getpages }, /* getpages */
199 { &vop_putpages_desc, spec_putpages }, /* putpages */
200 { NULL, NULL }
201 };
202 const struct vnodeopv_desc spec_nfsv2nodeop_opv_desc =
203 { &spec_nfsv2nodeop_p, spec_nfsv2nodeop_entries };
204
205 int (**fifo_nfsv2nodeop_p)(void *);
206 const struct vnodeopv_entry_desc fifo_nfsv2nodeop_entries[] = {
207 { &vop_default_desc, vn_default_error },
208 { &vop_lookup_desc, vn_fifo_bypass }, /* lookup */
209 { &vop_create_desc, vn_fifo_bypass }, /* create */
210 { &vop_mknod_desc, vn_fifo_bypass }, /* mknod */
211 { &vop_open_desc, vn_fifo_bypass }, /* open */
212 { &vop_close_desc, nfsfifo_close }, /* close */
213 { &vop_access_desc, nfsspec_access }, /* access */
214 { &vop_accessx_desc, genfs_accessx }, /* accessx */
215 { &vop_getattr_desc, nfs_getattr }, /* getattr */
216 { &vop_setattr_desc, nfs_setattr }, /* setattr */
217 { &vop_read_desc, nfsfifo_read }, /* read */
218 { &vop_write_desc, nfsfifo_write }, /* write */
219 { &vop_fallocate_desc, vn_fifo_bypass }, /* fallocate */
220 { &vop_fdiscard_desc, vn_fifo_bypass }, /* fdiscard */
221 { &vop_fcntl_desc, genfs_fcntl }, /* fcntl */
222 { &vop_ioctl_desc, vn_fifo_bypass }, /* ioctl */
223 { &vop_poll_desc, vn_fifo_bypass }, /* poll */
224 { &vop_kqfilter_desc, vn_fifo_bypass }, /* kqfilter */
225 { &vop_revoke_desc, vn_fifo_bypass }, /* revoke */
226 { &vop_mmap_desc, vn_fifo_bypass }, /* mmap */
227 { &vop_fsync_desc, nfs_fsync }, /* fsync */
228 { &vop_seek_desc, vn_fifo_bypass }, /* seek */
229 { &vop_remove_desc, vn_fifo_bypass }, /* remove */
230 { &vop_link_desc, vn_fifo_bypass }, /* link */
231 { &vop_rename_desc, vn_fifo_bypass }, /* rename */
232 { &vop_mkdir_desc, vn_fifo_bypass }, /* mkdir */
233 { &vop_rmdir_desc, vn_fifo_bypass }, /* rmdir */
234 { &vop_symlink_desc, vn_fifo_bypass }, /* symlink */
235 { &vop_readdir_desc, vn_fifo_bypass }, /* readdir */
236 { &vop_readlink_desc, vn_fifo_bypass }, /* readlink */
237 { &vop_abortop_desc, vn_fifo_bypass }, /* abortop */
238 { &vop_inactive_desc, nfs_inactive }, /* inactive */
239 { &vop_reclaim_desc, nfs_reclaim }, /* reclaim */
240 { &vop_lock_desc, nfs_lock }, /* lock */
241 { &vop_unlock_desc, nfs_unlock }, /* unlock */
242 { &vop_bmap_desc, vn_fifo_bypass }, /* bmap */
243 { &vop_strategy_desc, genfs_badop }, /* strategy */
244 { &vop_print_desc, nfs_print }, /* print */
245 { &vop_islocked_desc, nfs_islocked }, /* islocked */
246 { &vop_pathconf_desc, vn_fifo_bypass }, /* pathconf */
247 { &vop_advlock_desc, vn_fifo_bypass }, /* advlock */
248 { &vop_bwrite_desc, genfs_badop }, /* bwrite */
249 { &vop_putpages_desc, vn_fifo_bypass }, /* putpages */
250 { NULL, NULL }
251 };
252 const struct vnodeopv_desc fifo_nfsv2nodeop_opv_desc =
253 { &fifo_nfsv2nodeop_p, fifo_nfsv2nodeop_entries };
254
255 static int nfs_linkrpc(struct vnode *, struct vnode *, const char *,
256 size_t, kauth_cred_t, struct lwp *);
257 static void nfs_writerpc_extfree(struct mbuf *, void *, size_t, void *);
258
259 /*
260 * Global variables
261 */
262 extern u_int32_t nfs_true, nfs_false;
263 extern u_int32_t nfs_xdrneg1;
264 extern const nfstype nfsv3_type[9];
265
266 int nfs_numasync = 0;
267 #define DIRHDSIZ _DIRENT_NAMEOFF(dp)
268 #define UIO_ADVANCE(uio, siz) \
269 (void)((uio)->uio_resid -= (siz), \
270 (uio)->uio_iov->iov_base = (char *)(uio)->uio_iov->iov_base + (siz), \
271 (uio)->uio_iov->iov_len -= (siz))
272
273 static void nfs_cache_enter(struct vnode *, struct vnode *,
274 struct componentname *);
275
276 static void
277 nfs_cache_enter(struct vnode *dvp, struct vnode *vp,
278 struct componentname *cnp)
279 {
280 struct nfsnode *dnp = VTONFS(dvp);
281
282 if ((cnp->cn_flags & MAKEENTRY) == 0) {
283 return;
284 }
285 if (vp != NULL) {
286 struct nfsnode *np = VTONFS(vp);
287
288 np->n_ctime = np->n_vattr->va_ctime.tv_sec;
289 }
290
291 if (!timespecisset(&dnp->n_nctime))
292 dnp->n_nctime = dnp->n_vattr->va_mtime;
293
294 cache_enter(dvp, vp, cnp->cn_nameptr, cnp->cn_namelen, cnp->cn_flags);
295 }
296
297 /*
298 * nfs null call from vfs.
299 */
300 int
301 nfs_null(struct vnode *vp, kauth_cred_t cred, struct lwp *l)
302 {
303 char *bpos, *dpos;
304 int error = 0;
305 struct mbuf *mreq, *mrep, *md, *mb __unused;
306 struct nfsnode *np = VTONFS(vp);
307
308 nfsm_reqhead(np, NFSPROC_NULL, 0);
309 nfsm_request(np, NFSPROC_NULL, l, cred);
310 nfsm_reqdone;
311 return (error);
312 }
313
314 /*
315 * nfs access vnode op.
316 * For nfs version 2, just return ok. File accesses may fail later.
317 * For nfs version 3, use the access rpc to check accessibility. If file modes
318 * are changed on the server, accesses might still fail later.
319 */
320 int
321 nfs_access(void *v)
322 {
323 struct vop_access_args /* {
324 struct vnode *a_vp;
325 accmode_t a_accmode;
326 kauth_cred_t a_cred;
327 } */ *ap = v;
328 struct vnode *vp = ap->a_vp;
329 #ifndef NFS_V2_ONLY
330 u_int32_t *tl;
331 char *cp;
332 int32_t t1, t2;
333 char *bpos, *dpos, *cp2;
334 int error = 0, attrflag;
335 struct mbuf *mreq, *mrep, *md, *mb;
336 u_int32_t mode, rmode;
337 const int v3 = NFS_ISV3(vp);
338 #endif
339 int cachevalid;
340 struct nfsnode *np = VTONFS(vp);
341 struct nfsmount *nmp = VFSTONFS(vp->v_mount);
342
343 cachevalid = (np->n_accstamp != -1 &&
344 (time_uptime - np->n_accstamp) < nfs_attrtimeo(nmp, np) &&
345 np->n_accuid == kauth_cred_geteuid(ap->a_cred));
346
347 /*
348 * Check access cache first. If this request has been made for this
349 * uid shortly before, use the cached result.
350 */
351 if (cachevalid) {
352 if (!np->n_accerror) {
353 if ((np->n_accmode & ap->a_accmode) == ap->a_accmode)
354 return np->n_accerror;
355 } else if ((np->n_accmode & ap->a_accmode) == np->n_accmode)
356 return np->n_accerror;
357 }
358
359 #ifndef NFS_V2_ONLY
360 /*
361 * For nfs v3, do an access rpc, otherwise you are stuck emulating
362 * ufs_access() locally using the vattr. This may not be correct,
363 * since the server may apply other access criteria such as
364 * client uid-->server uid mapping that we do not know about, but
365 * this is better than just returning anything that is lying about
366 * in the cache.
367 */
368 if (v3) {
369 nfsstats.rpccnt[NFSPROC_ACCESS]++;
370 nfsm_reqhead(np, NFSPROC_ACCESS, NFSX_FH(v3) + NFSX_UNSIGNED);
371 nfsm_fhtom(np, v3);
372 nfsm_build(tl, u_int32_t *, NFSX_UNSIGNED);
373 if (ap->a_accmode & VREAD)
374 mode = NFSV3ACCESS_READ;
375 else
376 mode = 0;
377 if (vp->v_type != VDIR) {
378 if (ap->a_accmode & VWRITE)
379 mode |= (NFSV3ACCESS_MODIFY | NFSV3ACCESS_EXTEND);
380 if (ap->a_accmode & VEXEC)
381 mode |= NFSV3ACCESS_EXECUTE;
382 } else {
383 if (ap->a_accmode & VWRITE)
384 mode |= (NFSV3ACCESS_MODIFY | NFSV3ACCESS_EXTEND |
385 NFSV3ACCESS_DELETE);
386 if (ap->a_accmode & VEXEC)
387 mode |= NFSV3ACCESS_LOOKUP;
388 }
389 *tl = txdr_unsigned(mode);
390 nfsm_request(np, NFSPROC_ACCESS, curlwp, ap->a_cred);
391 nfsm_postop_attr(vp, attrflag, 0);
392 if (!error) {
393 nfsm_dissect(tl, u_int32_t *, NFSX_UNSIGNED);
394 rmode = fxdr_unsigned(u_int32_t, *tl);
395 /*
396 * The NFS V3 spec does not clarify whether or not
397 * the returned access bits can be a superset of
398 * the ones requested, so...
399 */
400 if ((rmode & mode) != mode)
401 error = EACCES;
402 }
403 nfsm_reqdone;
404 } else
405 #endif
406 return (nfsspec_access(ap));
407 #ifndef NFS_V2_ONLY
408 /*
409 * Disallow write attempts on filesystems mounted read-only;
410 * unless the file is a socket, fifo, or a block or character
411 * device resident on the filesystem.
412 */
413 if (!error && (ap->a_accmode & VWRITE) &&
414 (vp->v_mount->mnt_flag & MNT_RDONLY)) {
415 switch (vp->v_type) {
416 case VREG:
417 case VDIR:
418 case VLNK:
419 error = EROFS;
420 default:
421 break;
422 }
423 }
424
425 if (!error || error == EACCES) {
426 /*
427 * If we got the same result as for a previous,
428 * different request, OR it in. Don't update
429 * the timestamp in that case.
430 */
431 if (cachevalid && np->n_accstamp != -1 &&
432 error == np->n_accerror) {
433 if (!error)
434 np->n_accmode |= ap->a_accmode;
435 else if ((np->n_accmode & ap->a_accmode) == ap->a_accmode)
436 np->n_accmode = ap->a_accmode;
437 } else {
438 np->n_accstamp = time_uptime;
439 np->n_accuid = kauth_cred_geteuid(ap->a_cred);
440 np->n_accmode = ap->a_accmode;
441 np->n_accerror = error;
442 }
443 }
444
445 return (error);
446 #endif
447 }
448
449 /*
450 * nfs open vnode op
451 * Check to see if the type is ok
452 * and that deletion is not in progress.
453 * For paged in text files, you will need to flush the page cache
454 * if consistency is lost.
455 */
456 /* ARGSUSED */
457 int
458 nfs_open(void *v)
459 {
460 struct vop_open_args /* {
461 struct vnode *a_vp;
462 int a_mode;
463 kauth_cred_t a_cred;
464 } */ *ap = v;
465 struct vnode *vp = ap->a_vp;
466 struct nfsnode *np = VTONFS(vp);
467 int error;
468
469 if (vp->v_type != VREG && vp->v_type != VDIR && vp->v_type != VLNK) {
470 return (EACCES);
471 }
472
473 if (ap->a_mode & FREAD) {
474 if (np->n_rcred != NULL)
475 kauth_cred_free(np->n_rcred);
476 np->n_rcred = ap->a_cred;
477 kauth_cred_hold(np->n_rcred);
478 }
479 if (ap->a_mode & FWRITE) {
480 if (np->n_wcred != NULL)
481 kauth_cred_free(np->n_wcred);
482 np->n_wcred = ap->a_cred;
483 kauth_cred_hold(np->n_wcred);
484 }
485
486 error = nfs_flushstalebuf(vp, ap->a_cred, curlwp, 0);
487 if (error)
488 return error;
489
490 NFS_INVALIDATE_ATTRCACHE(np); /* For Open/Close consistency */
491
492 return (0);
493 }
494
495 /*
496 * nfs close vnode op
497 * What an NFS client should do upon close after writing is a debatable issue.
498 * Most NFS clients push delayed writes to the server upon close, basically for
499 * two reasons:
500 * 1 - So that any write errors may be reported back to the client process
501 * doing the close system call. By far the two most likely errors are
502 * NFSERR_NOSPC and NFSERR_DQUOT to indicate space allocation failure.
503 * 2 - To put a worst case upper bound on cache inconsistency between
504 * multiple clients for the file.
505 * There is also a consistency problem for Version 2 of the protocol w.r.t.
506 * not being able to tell if other clients are writing a file concurrently,
507 * since there is no way of knowing if the changed modify time in the reply
508 * is only due to the write for this client.
509 * (NFS Version 3 provides weak cache consistency data in the reply that
510 * should be sufficient to detect and handle this case.)
511 *
512 * The current code does the following:
513 * for NFS Version 2 - play it safe and flush/invalidate all dirty buffers
514 * for NFS Version 3 - flush dirty buffers to the server but don't invalidate
515 * or commit them (this satisfies 1 and 2 except for the
516 * case where the server crashes after this close but
517 * before the commit RPC, which is felt to be "good
518 * enough". Changing the last argument to nfs_flush() to
519 * a 1 would force a commit operation, if it is felt a
520 * commit is necessary now.
521 */
522 /* ARGSUSED */
523 int
524 nfs_close(void *v)
525 {
526 struct vop_close_args /* {
527 struct vnodeop_desc *a_desc;
528 struct vnode *a_vp;
529 int a_fflag;
530 kauth_cred_t a_cred;
531 } */ *ap = v;
532 struct vnode *vp = ap->a_vp;
533 struct nfsnode *np = VTONFS(vp);
534 int error = 0;
535 UVMHIST_FUNC("nfs_close"); UVMHIST_CALLED(ubchist);
536
537 if (vp->v_type == VREG) {
538 if (np->n_flag & NMODIFIED) {
539 #ifndef NFS_V2_ONLY
540 if (NFS_ISV3(vp)) {
541 error = nfs_flush(vp, ap->a_cred, MNT_WAIT, curlwp, 0);
542 np->n_flag &= ~NMODIFIED;
543 } else
544 #endif
545 error = nfs_vinvalbuf(vp, V_SAVE, ap->a_cred, curlwp, 1);
546 NFS_INVALIDATE_ATTRCACHE(np);
547 }
548 if (np->n_flag & NWRITEERR) {
549 np->n_flag &= ~NWRITEERR;
550 error = np->n_error;
551 }
552 }
553 UVMHIST_LOG(ubchist, "returning %d", error,0,0,0);
554 return (error);
555 }
556
557 /*
558 * nfs getattr call from vfs.
559 */
560 int
561 nfs_getattr(void *v)
562 {
563 struct vop_getattr_args /* {
564 struct vnode *a_vp;
565 struct vattr *a_vap;
566 kauth_cred_t a_cred;
567 } */ *ap = v;
568 struct vnode *vp = ap->a_vp;
569 struct nfsnode *np = VTONFS(vp);
570 char *cp;
571 u_int32_t *tl;
572 int32_t t1, t2;
573 char *bpos, *dpos;
574 int error = 0;
575 struct mbuf *mreq, *mrep, *md, *mb;
576 const int v3 = NFS_ISV3(vp);
577
578 /*
579 * Update local times for special files.
580 */
581 if (np->n_flag & (NACC | NUPD))
582 np->n_flag |= NCHG;
583
584 /*
585 * if we have delayed truncation, do it now.
586 */
587 nfs_delayedtruncate(vp);
588
589 /*
590 * First look in the cache.
591 */
592 if (nfs_getattrcache(vp, ap->a_vap) == 0)
593 return (0);
594 nfsstats.rpccnt[NFSPROC_GETATTR]++;
595 nfsm_reqhead(np, NFSPROC_GETATTR, NFSX_FH(v3));
596 nfsm_fhtom(np, v3);
597 nfsm_request(np, NFSPROC_GETATTR, curlwp, ap->a_cred);
598 if (!error) {
599 nfsm_loadattr(vp, ap->a_vap, 0);
600 if (vp->v_type == VDIR &&
601 ap->a_vap->va_blocksize < NFS_DIRFRAGSIZ)
602 ap->a_vap->va_blocksize = NFS_DIRFRAGSIZ;
603 }
604 nfsm_reqdone;
605 return (error);
606 }
607
608 /*
609 * nfs setattr call.
610 */
611 int
612 nfs_setattr(void *v)
613 {
614 struct vop_setattr_args /* {
615 struct vnodeop_desc *a_desc;
616 struct vnode *a_vp;
617 struct vattr *a_vap;
618 kauth_cred_t a_cred;
619 } */ *ap = v;
620 struct vnode *vp = ap->a_vp;
621 struct nfsnode *np = VTONFS(vp);
622 struct vattr *vap = ap->a_vap;
623 int error = 0;
624 u_quad_t tsize = 0;
625
626 /*
627 * Setting of flags is not supported.
628 */
629 if (vap->va_flags != VNOVAL)
630 return (EOPNOTSUPP);
631
632 /*
633 * Disallow write attempts if the filesystem is mounted read-only.
634 */
635 if ((vap->va_uid != (uid_t)VNOVAL ||
636 vap->va_gid != (gid_t)VNOVAL || vap->va_atime.tv_sec != VNOVAL ||
637 vap->va_mtime.tv_sec != VNOVAL || vap->va_mode != (mode_t)VNOVAL) &&
638 (vp->v_mount->mnt_flag & MNT_RDONLY))
639 return (EROFS);
640 if (vap->va_size != VNOVAL) {
641 if (vap->va_size > VFSTONFS(vp->v_mount)->nm_maxfilesize) {
642 return EFBIG;
643 }
644 switch (vp->v_type) {
645 case VDIR:
646 return (EISDIR);
647 case VCHR:
648 case VBLK:
649 case VSOCK:
650 case VFIFO:
651 if (vap->va_mtime.tv_sec == VNOVAL &&
652 vap->va_atime.tv_sec == VNOVAL &&
653 vap->va_mode == (mode_t)VNOVAL &&
654 vap->va_uid == (uid_t)VNOVAL &&
655 vap->va_gid == (gid_t)VNOVAL)
656 return (0);
657 vap->va_size = VNOVAL;
658 break;
659 default:
660 /*
661 * Disallow write attempts if the filesystem is
662 * mounted read-only.
663 */
664 if (vp->v_mount->mnt_flag & MNT_RDONLY)
665 return (EROFS);
666 genfs_node_wrlock(vp);
667 uvm_vnp_setsize(vp, vap->va_size);
668 tsize = np->n_size;
669 np->n_size = vap->va_size;
670 if (vap->va_size == 0)
671 error = nfs_vinvalbuf(vp, 0,
672 ap->a_cred, curlwp, 1);
673 else
674 error = nfs_vinvalbuf(vp, V_SAVE,
675 ap->a_cred, curlwp, 1);
676 if (error) {
677 uvm_vnp_setsize(vp, tsize);
678 genfs_node_unlock(vp);
679 return (error);
680 }
681 np->n_vattr->va_size = vap->va_size;
682 }
683 } else {
684 /*
685 * flush files before setattr because a later write of
686 * cached data might change timestamps or reset sugid bits
687 */
688 if ((vap->va_mtime.tv_sec != VNOVAL ||
689 vap->va_atime.tv_sec != VNOVAL ||
690 vap->va_mode != VNOVAL) &&
691 vp->v_type == VREG &&
692 (error = nfs_vinvalbuf(vp, V_SAVE, ap->a_cred,
693 curlwp, 1)) == EINTR)
694 return (error);
695 }
696 error = nfs_setattrrpc(vp, vap, ap->a_cred, curlwp);
697 if (vap->va_size != VNOVAL) {
698 if (error) {
699 np->n_size = np->n_vattr->va_size = tsize;
700 uvm_vnp_setsize(vp, np->n_size);
701 }
702 genfs_node_unlock(vp);
703 }
704 VN_KNOTE(vp, NOTE_ATTRIB);
705 return (error);
706 }
707
708 /*
709 * Do an nfs setattr rpc.
710 */
711 int
712 nfs_setattrrpc(struct vnode *vp, struct vattr *vap, kauth_cred_t cred, struct lwp *l)
713 {
714 struct nfsv2_sattr *sp;
715 char *cp;
716 int32_t t1, t2;
717 char *bpos, *dpos;
718 u_int32_t *tl;
719 int error = 0;
720 struct mbuf *mreq, *mrep, *md, *mb;
721 const int v3 = NFS_ISV3(vp);
722 struct nfsnode *np = VTONFS(vp);
723 #ifndef NFS_V2_ONLY
724 int wccflag = NFSV3_WCCRATTR;
725 char *cp2;
726 #endif
727
728 nfsstats.rpccnt[NFSPROC_SETATTR]++;
729 nfsm_reqhead(np, NFSPROC_SETATTR, NFSX_FH(v3) + NFSX_SATTR(v3));
730 nfsm_fhtom(np, v3);
731 #ifndef NFS_V2_ONLY
732 if (v3) {
733 nfsm_v3attrbuild(vap, true);
734 nfsm_build(tl, u_int32_t *, NFSX_UNSIGNED);
735 *tl = nfs_false;
736 } else {
737 #endif
738 nfsm_build(sp, struct nfsv2_sattr *, NFSX_V2SATTR);
739 if (vap->va_mode == (mode_t)VNOVAL)
740 sp->sa_mode = nfs_xdrneg1;
741 else
742 sp->sa_mode = vtonfsv2_mode(vp->v_type, vap->va_mode);
743 if (vap->va_uid == (uid_t)VNOVAL)
744 sp->sa_uid = nfs_xdrneg1;
745 else
746 sp->sa_uid = txdr_unsigned(vap->va_uid);
747 if (vap->va_gid == (gid_t)VNOVAL)
748 sp->sa_gid = nfs_xdrneg1;
749 else
750 sp->sa_gid = txdr_unsigned(vap->va_gid);
751 sp->sa_size = txdr_unsigned(vap->va_size);
752 txdr_nfsv2time(&vap->va_atime, &sp->sa_atime);
753 txdr_nfsv2time(&vap->va_mtime, &sp->sa_mtime);
754 #ifndef NFS_V2_ONLY
755 }
756 #endif
757 nfsm_request(np, NFSPROC_SETATTR, l, cred);
758 #ifndef NFS_V2_ONLY
759 if (v3) {
760 nfsm_wcc_data(vp, wccflag, NAC_NOTRUNC, false);
761 } else
762 #endif
763 nfsm_loadattr(vp, (struct vattr *)0, NAC_NOTRUNC);
764 nfsm_reqdone;
765 return (error);
766 }
767
768 /*
769 * nfs lookup call, one step at a time...
770 * First look in cache
771 * If not found, do the rpc.
772 */
773 int
774 nfs_lookup(void *v)
775 {
776 struct vop_lookup_v2_args /* {
777 struct vnodeop_desc *a_desc;
778 struct vnode *a_dvp;
779 struct vnode **a_vpp;
780 struct componentname *a_cnp;
781 } */ *ap = v;
782 struct componentname *cnp = ap->a_cnp;
783 struct vnode *dvp = ap->a_dvp;
784 struct vnode **vpp = ap->a_vpp;
785 int flags;
786 struct vnode *newvp;
787 u_int32_t *tl;
788 char *cp;
789 int32_t t1, t2;
790 char *bpos, *dpos, *cp2;
791 struct mbuf *mreq, *mrep, *md, *mb;
792 long len;
793 nfsfh_t *fhp;
794 struct nfsnode *np;
795 int cachefound;
796 int error = 0, attrflag, fhsize;
797 const int v3 = NFS_ISV3(dvp);
798
799 flags = cnp->cn_flags;
800
801 *vpp = NULLVP;
802 newvp = NULLVP;
803 if ((flags & ISLASTCN) && (dvp->v_mount->mnt_flag & MNT_RDONLY) &&
804 (cnp->cn_nameiop == DELETE || cnp->cn_nameiop == RENAME))
805 return (EROFS);
806 if (dvp->v_type != VDIR)
807 return (ENOTDIR);
808
809 /*
810 * RFC1813(nfsv3) 3.2 says clients should handle "." by themselves.
811 */
812 if (cnp->cn_namelen == 1 && cnp->cn_nameptr[0] == '.') {
813 error = VOP_ACCESS(dvp, VEXEC, cnp->cn_cred);
814 if (error)
815 return error;
816 if (cnp->cn_nameiop == RENAME && (flags & ISLASTCN))
817 return EISDIR;
818 vref(dvp);
819 *vpp = dvp;
820 return 0;
821 }
822
823 np = VTONFS(dvp);
824
825 /*
826 * Before performing an RPC, check the name cache to see if
827 * the directory/name pair we are looking for is known already.
828 * If the directory/name pair is found in the name cache,
829 * we have to ensure the directory has not changed from
830 * the time the cache entry has been created. If it has,
831 * the cache entry has to be ignored.
832 */
833 cachefound = cache_lookup_raw(dvp, cnp->cn_nameptr, cnp->cn_namelen,
834 cnp->cn_flags, NULL, vpp);
835 KASSERT(dvp != *vpp);
836 KASSERT((cnp->cn_flags & ISWHITEOUT) == 0);
837 if (cachefound) {
838 struct vattr vattr;
839
840 error = VOP_ACCESS(dvp, VEXEC, cnp->cn_cred);
841 if (error != 0) {
842 if (*vpp != NULLVP)
843 vrele(*vpp);
844 *vpp = NULLVP;
845 return error;
846 }
847
848 if (VOP_GETATTR(dvp, &vattr, cnp->cn_cred)
849 || timespeccmp(&vattr.va_mtime,
850 &VTONFS(dvp)->n_nctime, !=)) {
851 if (*vpp != NULLVP) {
852 vrele(*vpp);
853 *vpp = NULLVP;
854 }
855 cache_purge1(dvp, NULL, 0, PURGE_CHILDREN);
856 timespecclear(&np->n_nctime);
857 goto dorpc;
858 }
859
860 if (*vpp == NULLVP) {
861 /* namecache gave us a negative result */
862 error = ENOENT;
863 goto noentry;
864 }
865
866 /*
867 * investigate the vnode returned by cache_lookup_raw.
868 * if it isn't appropriate, do an rpc.
869 */
870 newvp = *vpp;
871 if ((flags & ISDOTDOT) != 0) {
872 VOP_UNLOCK(dvp);
873 }
874 error = vn_lock(newvp, LK_SHARED);
875 if ((flags & ISDOTDOT) != 0) {
876 vn_lock(dvp, LK_EXCLUSIVE | LK_RETRY);
877 }
878 if (error != 0) {
879 /* newvp has been reclaimed. */
880 vrele(newvp);
881 *vpp = NULLVP;
882 goto dorpc;
883 }
884 if (!VOP_GETATTR(newvp, &vattr, cnp->cn_cred)
885 && vattr.va_ctime.tv_sec == VTONFS(newvp)->n_ctime) {
886 nfsstats.lookupcache_hits++;
887 KASSERT(newvp->v_type != VNON);
888 VOP_UNLOCK(newvp);
889 return (0);
890 }
891 cache_purge1(newvp, NULL, 0, PURGE_PARENTS);
892 vput(newvp);
893 *vpp = NULLVP;
894 }
895 dorpc:
896 #if 0
897 /*
898 * because nfsv3 has the same CREATE semantics as ours,
899 * we don't have to perform LOOKUPs beforehand.
900 *
901 * XXX ideally we can do the same for nfsv2 in the case of !O_EXCL.
902 * XXX although we have no way to know if O_EXCL is requested or not.
903 */
904
905 if (v3 && cnp->cn_nameiop == CREATE &&
906 (flags & (ISLASTCN|ISDOTDOT)) == ISLASTCN &&
907 (dvp->v_mount->mnt_flag & MNT_RDONLY) == 0) {
908 return (EJUSTRETURN);
909 }
910 #endif /* 0 */
911
912 error = 0;
913 newvp = NULLVP;
914 nfsstats.lookupcache_misses++;
915 nfsstats.rpccnt[NFSPROC_LOOKUP]++;
916 len = cnp->cn_namelen;
917 nfsm_reqhead(np, NFSPROC_LOOKUP,
918 NFSX_FH(v3) + NFSX_UNSIGNED + nfsm_rndup(len));
919 nfsm_fhtom(np, v3);
920 nfsm_strtom(cnp->cn_nameptr, len, NFS_MAXNAMLEN);
921 nfsm_request(np, NFSPROC_LOOKUP, curlwp, cnp->cn_cred);
922 if (error) {
923 nfsm_postop_attr(dvp, attrflag, 0);
924 m_freem(mrep);
925 goto nfsmout;
926 }
927 nfsm_getfh(fhp, fhsize, v3);
928
929 /*
930 * Handle RENAME case...
931 */
932 if (cnp->cn_nameiop == RENAME && (flags & ISLASTCN)) {
933 if (NFS_CMPFH(np, fhp, fhsize)) {
934 m_freem(mrep);
935 return (EISDIR);
936 }
937 error = nfs_nget(dvp->v_mount, fhp, fhsize, &np);
938 if (error) {
939 m_freem(mrep);
940 return error;
941 }
942 newvp = NFSTOV(np);
943 #ifndef NFS_V2_ONLY
944 if (v3) {
945 nfsm_postop_attr(newvp, attrflag, 0);
946 nfsm_postop_attr(dvp, attrflag, 0);
947 } else
948 #endif
949 nfsm_loadattr(newvp, (struct vattr *)0, 0);
950 *vpp = newvp;
951 m_freem(mrep);
952 goto validate;
953 }
954
955 /*
956 * The postop attr handling is duplicated for each if case,
957 * because it should be done while dvp is locked (unlocking
958 * dvp is different for each case).
959 */
960
961 if (NFS_CMPFH(np, fhp, fhsize)) {
962 /*
963 * As we handle "." lookup locally, this is
964 * a broken server.
965 */
966 m_freem(mrep);
967 return EBADRPC;
968 } else if (flags & ISDOTDOT) {
969 /*
970 * ".." lookup
971 */
972 VOP_UNLOCK(dvp);
973 error = nfs_nget(dvp->v_mount, fhp, fhsize, &np);
974 vn_lock(dvp, LK_EXCLUSIVE | LK_RETRY);
975 if (error) {
976 m_freem(mrep);
977 return error;
978 }
979 newvp = NFSTOV(np);
980
981 #ifndef NFS_V2_ONLY
982 if (v3) {
983 nfsm_postop_attr(newvp, attrflag, 0);
984 nfsm_postop_attr(dvp, attrflag, 0);
985 } else
986 #endif
987 nfsm_loadattr(newvp, (struct vattr *)0, 0);
988 } else {
989 /*
990 * Other lookups.
991 */
992 error = nfs_nget(dvp->v_mount, fhp, fhsize, &np);
993 if (error) {
994 m_freem(mrep);
995 return error;
996 }
997 newvp = NFSTOV(np);
998 #ifndef NFS_V2_ONLY
999 if (v3) {
1000 nfsm_postop_attr(newvp, attrflag, 0);
1001 nfsm_postop_attr(dvp, attrflag, 0);
1002 } else
1003 #endif
1004 nfsm_loadattr(newvp, (struct vattr *)0, 0);
1005 }
1006 if (cnp->cn_nameiop != DELETE || !(flags & ISLASTCN)) {
1007 nfs_cache_enter(dvp, newvp, cnp);
1008 }
1009 *vpp = newvp;
1010 nfsm_reqdone;
1011 if (error) {
1012 /*
1013 * We get here only because of errors returned by
1014 * the RPC. Otherwise we'll have returned above
1015 * (the nfsm_* macros will jump to nfsm_reqdone
1016 * on error).
1017 */
1018 if (error == ENOENT && cnp->cn_nameiop != CREATE) {
1019 nfs_cache_enter(dvp, NULL, cnp);
1020 }
1021 if (newvp != NULLVP) {
1022 if (newvp == dvp) {
1023 vrele(newvp);
1024 } else {
1025 vput(newvp);
1026 }
1027 }
1028 noentry:
1029 if ((cnp->cn_nameiop == CREATE || cnp->cn_nameiop == RENAME) &&
1030 (flags & ISLASTCN) && error == ENOENT) {
1031 if (dvp->v_mount->mnt_flag & MNT_RDONLY) {
1032 error = EROFS;
1033 } else {
1034 error = EJUSTRETURN;
1035 }
1036 }
1037 *vpp = NULL;
1038 return error;
1039 }
1040
1041 validate:
1042 /*
1043 * make sure we have valid type and size.
1044 */
1045
1046 newvp = *vpp;
1047 if (newvp->v_type == VNON) {
1048 struct vattr vattr; /* dummy */
1049
1050 KASSERT(VTONFS(newvp)->n_attrstamp == 0);
1051 error = VOP_GETATTR(newvp, &vattr, cnp->cn_cred);
1052 if (error) {
1053 vput(newvp);
1054 *vpp = NULL;
1055 }
1056 }
1057 if (error)
1058 return error;
1059 if (newvp != dvp)
1060 VOP_UNLOCK(newvp);
1061 return 0;
1062 }
1063
1064 /*
1065 * nfs read call.
1066 * Just call nfs_bioread() to do the work.
1067 */
1068 int
1069 nfs_read(void *v)
1070 {
1071 struct vop_read_args /* {
1072 struct vnode *a_vp;
1073 struct uio *a_uio;
1074 int a_ioflag;
1075 kauth_cred_t a_cred;
1076 } */ *ap = v;
1077 struct vnode *vp = ap->a_vp;
1078
1079 if (vp->v_type != VREG)
1080 return EISDIR;
1081 return (nfs_bioread(vp, ap->a_uio, ap->a_ioflag, ap->a_cred, 0));
1082 }
1083
1084 /*
1085 * nfs readlink call
1086 */
1087 int
1088 nfs_readlink(void *v)
1089 {
1090 struct vop_readlink_args /* {
1091 struct vnode *a_vp;
1092 struct uio *a_uio;
1093 kauth_cred_t a_cred;
1094 } */ *ap = v;
1095 struct vnode *vp = ap->a_vp;
1096 struct nfsnode *np = VTONFS(vp);
1097
1098 if (vp->v_type != VLNK)
1099 return (EPERM);
1100
1101 if (np->n_rcred != NULL) {
1102 kauth_cred_free(np->n_rcred);
1103 }
1104 np->n_rcred = ap->a_cred;
1105 kauth_cred_hold(np->n_rcred);
1106
1107 return (nfs_bioread(vp, ap->a_uio, 0, ap->a_cred, 0));
1108 }
1109
1110 /*
1111 * Do a readlink rpc.
1112 * Called by nfs_doio() from below the buffer cache.
1113 */
1114 int
1115 nfs_readlinkrpc(struct vnode *vp, struct uio *uiop, kauth_cred_t cred)
1116 {
1117 u_int32_t *tl;
1118 char *cp;
1119 int32_t t1, t2;
1120 char *bpos, *dpos, *cp2;
1121 int error = 0;
1122 uint32_t len;
1123 struct mbuf *mreq, *mrep, *md, *mb;
1124 const int v3 = NFS_ISV3(vp);
1125 struct nfsnode *np = VTONFS(vp);
1126 #ifndef NFS_V2_ONLY
1127 int attrflag;
1128 #endif
1129
1130 nfsstats.rpccnt[NFSPROC_READLINK]++;
1131 nfsm_reqhead(np, NFSPROC_READLINK, NFSX_FH(v3));
1132 nfsm_fhtom(np, v3);
1133 nfsm_request(np, NFSPROC_READLINK, curlwp, cred);
1134 #ifndef NFS_V2_ONLY
1135 if (v3)
1136 nfsm_postop_attr(vp, attrflag, 0);
1137 #endif
1138 if (!error) {
1139 #ifndef NFS_V2_ONLY
1140 if (v3) {
1141 nfsm_dissect(tl, uint32_t *, NFSX_UNSIGNED);
1142 len = fxdr_unsigned(uint32_t, *tl);
1143 if (len > NFS_MAXPATHLEN) {
1144 /*
1145 * this pathname is too long for us.
1146 */
1147 m_freem(mrep);
1148 /* Solaris returns EINVAL. should we follow? */
1149 error = ENAMETOOLONG;
1150 goto nfsmout;
1151 }
1152 } else
1153 #endif
1154 {
1155 nfsm_strsiz(len, NFS_MAXPATHLEN);
1156 }
1157 nfsm_mtouio(uiop, len);
1158 }
1159 nfsm_reqdone;
1160 return (error);
1161 }
1162
1163 /*
1164 * nfs read rpc call
1165 * Ditto above
1166 */
1167 int
1168 nfs_readrpc(struct vnode *vp, struct uio *uiop)
1169 {
1170 u_int32_t *tl;
1171 char *cp;
1172 int32_t t1, t2;
1173 char *bpos, *dpos, *cp2;
1174 struct mbuf *mreq, *mrep, *md, *mb;
1175 struct nfsmount *nmp;
1176 int error = 0, len, retlen, tsiz, eof __unused, byte_count;
1177 const int v3 = NFS_ISV3(vp);
1178 struct nfsnode *np = VTONFS(vp);
1179 #ifndef NFS_V2_ONLY
1180 int attrflag;
1181 #endif
1182
1183 #ifndef nolint
1184 eof = 0;
1185 #endif
1186 nmp = VFSTONFS(vp->v_mount);
1187 tsiz = uiop->uio_resid;
1188 if (uiop->uio_offset + tsiz > nmp->nm_maxfilesize)
1189 return (EFBIG);
1190 iostat_busy(nmp->nm_stats);
1191 byte_count = 0; /* count bytes actually transferred */
1192 while (tsiz > 0) {
1193 nfsstats.rpccnt[NFSPROC_READ]++;
1194 len = (tsiz > nmp->nm_rsize) ? nmp->nm_rsize : tsiz;
1195 nfsm_reqhead(np, NFSPROC_READ, NFSX_FH(v3) + NFSX_UNSIGNED * 3);
1196 nfsm_fhtom(np, v3);
1197 nfsm_build(tl, u_int32_t *, NFSX_UNSIGNED * 3);
1198 #ifndef NFS_V2_ONLY
1199 if (v3) {
1200 txdr_hyper(uiop->uio_offset, tl);
1201 *(tl + 2) = txdr_unsigned(len);
1202 } else
1203 #endif
1204 {
1205 *tl++ = txdr_unsigned(uiop->uio_offset);
1206 *tl++ = txdr_unsigned(len);
1207 *tl = 0;
1208 }
1209 nfsm_request(np, NFSPROC_READ, curlwp, np->n_rcred);
1210 #ifndef NFS_V2_ONLY
1211 if (v3) {
1212 nfsm_postop_attr(vp, attrflag, NAC_NOTRUNC);
1213 if (error) {
1214 m_freem(mrep);
1215 goto nfsmout;
1216 }
1217 nfsm_dissect(tl, u_int32_t *, 2 * NFSX_UNSIGNED);
1218 eof = fxdr_unsigned(int, *(tl + 1));
1219 } else
1220 #endif
1221 nfsm_loadattr(vp, (struct vattr *)0, NAC_NOTRUNC);
1222 nfsm_strsiz(retlen, nmp->nm_rsize);
1223 nfsm_mtouio(uiop, retlen);
1224 m_freem(mrep);
1225 tsiz -= retlen;
1226 byte_count += retlen;
1227 #ifndef NFS_V2_ONLY
1228 if (v3) {
1229 if (eof || retlen == 0)
1230 tsiz = 0;
1231 } else
1232 #endif
1233 if (retlen < len)
1234 tsiz = 0;
1235 }
1236 nfsmout:
1237 iostat_unbusy(nmp->nm_stats, byte_count, 1);
1238 return (error);
1239 }
1240
1241 struct nfs_writerpc_context {
1242 kmutex_t nwc_lock;
1243 kcondvar_t nwc_cv;
1244 int nwc_mbufcount;
1245 };
1246
1247 /*
1248 * free mbuf used to refer protected pages while write rpc call.
1249 * called at splvm.
1250 */
1251 static void
1252 nfs_writerpc_extfree(struct mbuf *m, void *tbuf, size_t size, void *arg)
1253 {
1254 struct nfs_writerpc_context *ctx = arg;
1255
1256 KASSERT(m != NULL);
1257 KASSERT(ctx != NULL);
1258 pool_cache_put(mb_cache, m);
1259 mutex_enter(&ctx->nwc_lock);
1260 if (--ctx->nwc_mbufcount == 0) {
1261 cv_signal(&ctx->nwc_cv);
1262 }
1263 mutex_exit(&ctx->nwc_lock);
1264 }
1265
1266 /*
1267 * nfs write call
1268 */
1269 int
1270 nfs_writerpc(struct vnode *vp, struct uio *uiop, int *iomode, bool pageprotected, bool *stalewriteverfp)
1271 {
1272 u_int32_t *tl;
1273 char *cp;
1274 int32_t t1, t2;
1275 char *bpos, *dpos;
1276 struct mbuf *mreq, *mrep, *md, *mb;
1277 struct nfsmount *nmp = VFSTONFS(vp->v_mount);
1278 int error = 0, len, tsiz, wccflag = NFSV3_WCCRATTR;
1279 const int v3 = NFS_ISV3(vp);
1280 int committed = NFSV3WRITE_FILESYNC;
1281 struct nfsnode *np = VTONFS(vp);
1282 struct nfs_writerpc_context ctx;
1283 int byte_count;
1284 size_t origresid;
1285 #ifndef NFS_V2_ONLY
1286 char *cp2;
1287 int rlen, commit;
1288 #endif
1289
1290 if (vp->v_mount->mnt_flag & MNT_RDONLY) {
1291 panic("writerpc readonly vp %p", vp);
1292 }
1293
1294 #ifdef DIAGNOSTIC
1295 if (uiop->uio_iovcnt != 1)
1296 panic("nfs: writerpc iovcnt > 1");
1297 #endif
1298 tsiz = uiop->uio_resid;
1299 if (uiop->uio_offset + tsiz > nmp->nm_maxfilesize)
1300 return EFBIG;
1301
1302 mutex_init(&ctx.nwc_lock, MUTEX_DRIVER, IPL_VM);
1303 cv_init(&ctx.nwc_cv, "nfsmblk");
1304 ctx.nwc_mbufcount = 1;
1305
1306 retry:
1307 origresid = uiop->uio_resid;
1308 KASSERT(origresid == uiop->uio_iov->iov_len);
1309 iostat_busy(nmp->nm_stats);
1310 byte_count = 0; /* count of bytes actually written */
1311 while (tsiz > 0) {
1312 uint32_t datalen; /* data bytes need to be allocated in mbuf */
1313 size_t backup;
1314 bool stalewriteverf = false;
1315
1316 nfsstats.rpccnt[NFSPROC_WRITE]++;
1317 len = uimin(tsiz, nmp->nm_wsize);
1318 datalen = pageprotected ? 0 : nfsm_rndup(len);
1319 nfsm_reqhead(np, NFSPROC_WRITE,
1320 NFSX_FH(v3) + 5 * NFSX_UNSIGNED + datalen);
1321 nfsm_fhtom(np, v3);
1322 #ifndef NFS_V2_ONLY
1323 if (v3) {
1324 nfsm_build(tl, u_int32_t *, 5 * NFSX_UNSIGNED);
1325 txdr_hyper(uiop->uio_offset, tl);
1326 tl += 2;
1327 *tl++ = txdr_unsigned(len);
1328 *tl++ = txdr_unsigned(*iomode);
1329 *tl = txdr_unsigned(len);
1330 } else
1331 #endif
1332 {
1333 u_int32_t x;
1334
1335 nfsm_build(tl, u_int32_t *, 4 * NFSX_UNSIGNED);
1336 /* Set both "begin" and "current" to non-garbage. */
1337 x = txdr_unsigned((u_int32_t)uiop->uio_offset);
1338 *tl++ = x; /* "begin offset" */
1339 *tl++ = x; /* "current offset" */
1340 x = txdr_unsigned(len);
1341 *tl++ = x; /* total to this offset */
1342 *tl = x; /* size of this write */
1343
1344 }
1345 if (pageprotected) {
1346 /*
1347 * since we know pages can't be modified during i/o,
1348 * no need to copy them for us.
1349 */
1350 struct mbuf *m;
1351 struct iovec *iovp = uiop->uio_iov;
1352
1353 m = m_get(M_WAIT, MT_DATA);
1354 MCLAIM(m, &nfs_mowner);
1355 MEXTADD(m, iovp->iov_base, len, M_MBUF,
1356 nfs_writerpc_extfree, &ctx);
1357 m->m_flags |= M_EXT_ROMAP;
1358 m->m_len = len;
1359 mb->m_next = m;
1360 /*
1361 * no need to maintain mb and bpos here
1362 * because no one care them later.
1363 */
1364 #if 0
1365 mb = m;
1366 bpos = mtod(void *, mb) + mb->m_len;
1367 #endif
1368 UIO_ADVANCE(uiop, len);
1369 uiop->uio_offset += len;
1370 mutex_enter(&ctx.nwc_lock);
1371 ctx.nwc_mbufcount++;
1372 mutex_exit(&ctx.nwc_lock);
1373 nfs_zeropad(mb, 0, nfsm_padlen(len));
1374 } else {
1375 nfsm_uiotom(uiop, len);
1376 }
1377 nfsm_request(np, NFSPROC_WRITE, curlwp, np->n_wcred);
1378 #ifndef NFS_V2_ONLY
1379 if (v3) {
1380 wccflag = NFSV3_WCCCHK;
1381 nfsm_wcc_data(vp, wccflag, NAC_NOTRUNC, !error);
1382 if (!error) {
1383 nfsm_dissect(tl, u_int32_t *, 2 * NFSX_UNSIGNED
1384 + NFSX_V3WRITEVERF);
1385 rlen = fxdr_unsigned(int, *tl++);
1386 if (rlen == 0) {
1387 error = NFSERR_IO;
1388 m_freem(mrep);
1389 break;
1390 } else if (rlen < len) {
1391 backup = len - rlen;
1392 UIO_ADVANCE(uiop, -backup);
1393 uiop->uio_offset -= backup;
1394 len = rlen;
1395 }
1396 commit = fxdr_unsigned(int, *tl++);
1397
1398 /*
1399 * Return the lowest committment level
1400 * obtained by any of the RPCs.
1401 */
1402 if (committed == NFSV3WRITE_FILESYNC)
1403 committed = commit;
1404 else if (committed == NFSV3WRITE_DATASYNC &&
1405 commit == NFSV3WRITE_UNSTABLE)
1406 committed = commit;
1407 mutex_enter(&nmp->nm_lock);
1408 if ((nmp->nm_iflag & NFSMNT_HASWRITEVERF) == 0){
1409 memcpy(nmp->nm_writeverf, tl,
1410 NFSX_V3WRITEVERF);
1411 nmp->nm_iflag |= NFSMNT_HASWRITEVERF;
1412 } else if ((nmp->nm_iflag &
1413 NFSMNT_STALEWRITEVERF) ||
1414 memcmp(tl, nmp->nm_writeverf,
1415 NFSX_V3WRITEVERF)) {
1416 memcpy(nmp->nm_writeverf, tl,
1417 NFSX_V3WRITEVERF);
1418 /*
1419 * note NFSMNT_STALEWRITEVERF
1420 * if we're the first thread to
1421 * notice it.
1422 */
1423 if ((nmp->nm_iflag &
1424 NFSMNT_STALEWRITEVERF) == 0) {
1425 stalewriteverf = true;
1426 nmp->nm_iflag |=
1427 NFSMNT_STALEWRITEVERF;
1428 }
1429 }
1430 mutex_exit(&nmp->nm_lock);
1431 }
1432 } else
1433 #endif
1434 nfsm_loadattr(vp, (struct vattr *)0, NAC_NOTRUNC);
1435 if (wccflag)
1436 VTONFS(vp)->n_mtime = VTONFS(vp)->n_vattr->va_mtime;
1437 m_freem(mrep);
1438 if (error)
1439 break;
1440 tsiz -= len;
1441 byte_count += len;
1442 if (stalewriteverf) {
1443 *stalewriteverfp = true;
1444 stalewriteverf = false;
1445 if (committed == NFSV3WRITE_UNSTABLE &&
1446 len != origresid) {
1447 /*
1448 * if our write requests weren't atomic but
1449 * unstable, datas in previous iterations
1450 * might have already been lost now.
1451 * then, we should resend them to nfsd.
1452 */
1453 backup = origresid - tsiz;
1454 UIO_ADVANCE(uiop, -backup);
1455 uiop->uio_offset -= backup;
1456 tsiz = origresid;
1457 goto retry;
1458 }
1459 }
1460 }
1461 nfsmout:
1462 iostat_unbusy(nmp->nm_stats, byte_count, 0);
1463 if (pageprotected) {
1464 /*
1465 * wait until mbufs go away.
1466 * retransmitted mbufs can survive longer than rpc requests
1467 * themselves.
1468 */
1469 mutex_enter(&ctx.nwc_lock);
1470 ctx.nwc_mbufcount--;
1471 while (ctx.nwc_mbufcount > 0) {
1472 cv_wait(&ctx.nwc_cv, &ctx.nwc_lock);
1473 }
1474 mutex_exit(&ctx.nwc_lock);
1475 }
1476 mutex_destroy(&ctx.nwc_lock);
1477 cv_destroy(&ctx.nwc_cv);
1478 *iomode = committed;
1479 if (error)
1480 uiop->uio_resid = tsiz;
1481 return (error);
1482 }
1483
1484 /*
1485 * nfs mknod rpc
1486 * For NFS v2 this is a kludge. Use a create rpc but with the IFMT bits of the
1487 * mode set to specify the file type and the size field for rdev.
1488 */
1489 int
1490 nfs_mknodrpc(struct vnode *dvp, struct vnode **vpp, struct componentname *cnp, struct vattr *vap)
1491 {
1492 struct nfsv2_sattr *sp;
1493 u_int32_t *tl;
1494 char *cp;
1495 int32_t t1, t2;
1496 struct vnode *newvp = (struct vnode *)0;
1497 struct nfsnode *dnp, *np;
1498 char *cp2;
1499 char *bpos, *dpos;
1500 int error = 0, wccflag = NFSV3_WCCRATTR, gotvp = 0;
1501 struct mbuf *mreq, *mrep, *md, *mb;
1502 u_int32_t rdev;
1503 const int v3 = NFS_ISV3(dvp);
1504
1505 if (vap->va_type == VCHR || vap->va_type == VBLK)
1506 rdev = txdr_unsigned(vap->va_rdev);
1507 else if (vap->va_type == VFIFO || vap->va_type == VSOCK)
1508 rdev = nfs_xdrneg1;
1509 else {
1510 VOP_ABORTOP(dvp, cnp);
1511 return (EOPNOTSUPP);
1512 }
1513 nfsstats.rpccnt[NFSPROC_MKNOD]++;
1514 dnp = VTONFS(dvp);
1515 nfsm_reqhead(dnp, NFSPROC_MKNOD, NFSX_FH(v3) + 4 * NFSX_UNSIGNED +
1516 + nfsm_rndup(cnp->cn_namelen) + NFSX_SATTR(v3));
1517 nfsm_fhtom(dnp, v3);
1518 nfsm_strtom(cnp->cn_nameptr, cnp->cn_namelen, NFS_MAXNAMLEN);
1519 #ifndef NFS_V2_ONLY
1520 if (v3) {
1521 nfsm_build(tl, u_int32_t *, NFSX_UNSIGNED);
1522 *tl++ = vtonfsv3_type(vap->va_type);
1523 nfsm_v3attrbuild(vap, false);
1524 if (vap->va_type == VCHR || vap->va_type == VBLK) {
1525 nfsm_build(tl, u_int32_t *, 2 * NFSX_UNSIGNED);
1526 *tl++ = txdr_unsigned(major(vap->va_rdev));
1527 *tl = txdr_unsigned(minor(vap->va_rdev));
1528 }
1529 } else
1530 #endif
1531 {
1532 nfsm_build(sp, struct nfsv2_sattr *, NFSX_V2SATTR);
1533 sp->sa_mode = vtonfsv2_mode(vap->va_type, vap->va_mode);
1534 sp->sa_uid = nfs_xdrneg1;
1535 sp->sa_gid = nfs_xdrneg1;
1536 sp->sa_size = rdev;
1537 txdr_nfsv2time(&vap->va_atime, &sp->sa_atime);
1538 txdr_nfsv2time(&vap->va_mtime, &sp->sa_mtime);
1539 }
1540 nfsm_request(dnp, NFSPROC_MKNOD, curlwp, cnp->cn_cred);
1541 if (!error) {
1542 nfsm_mtofh(dvp, newvp, v3, gotvp);
1543 if (!gotvp) {
1544 error = nfs_lookitup(dvp, cnp->cn_nameptr,
1545 cnp->cn_namelen, cnp->cn_cred, curlwp, &np);
1546 if (!error)
1547 newvp = NFSTOV(np);
1548 }
1549 }
1550 #ifndef NFS_V2_ONLY
1551 if (v3)
1552 nfsm_wcc_data(dvp, wccflag, 0, !error);
1553 #endif
1554 nfsm_reqdone;
1555 if (error) {
1556 if (newvp)
1557 vput(newvp);
1558 } else {
1559 nfs_cache_enter(dvp, newvp, cnp);
1560 *vpp = newvp;
1561 VOP_UNLOCK(newvp);
1562 }
1563 VTONFS(dvp)->n_flag |= NMODIFIED;
1564 if (!wccflag)
1565 NFS_INVALIDATE_ATTRCACHE(VTONFS(dvp));
1566 return (error);
1567 }
1568
1569 /*
1570 * nfs mknod vop
1571 * just call nfs_mknodrpc() to do the work.
1572 */
1573 /* ARGSUSED */
1574 int
1575 nfs_mknod(void *v)
1576 {
1577 struct vop_mknod_v3_args /* {
1578 struct vnode *a_dvp;
1579 struct vnode **a_vpp;
1580 struct componentname *a_cnp;
1581 struct vattr *a_vap;
1582 } */ *ap = v;
1583 struct vnode *dvp = ap->a_dvp;
1584 struct componentname *cnp = ap->a_cnp;
1585 int error;
1586
1587 error = nfs_mknodrpc(dvp, ap->a_vpp, cnp, ap->a_vap);
1588 VN_KNOTE(dvp, NOTE_WRITE);
1589 if (error == 0 || error == EEXIST)
1590 cache_purge1(dvp, cnp->cn_nameptr, cnp->cn_namelen, 0);
1591 return (error);
1592 }
1593
1594 /*
1595 * nfs file create call
1596 */
1597 int
1598 nfs_create(void *v)
1599 {
1600 struct vop_create_v3_args /* {
1601 struct vnode *a_dvp;
1602 struct vnode **a_vpp;
1603 struct componentname *a_cnp;
1604 struct vattr *a_vap;
1605 } */ *ap = v;
1606 struct vnode *dvp = ap->a_dvp;
1607 struct vattr *vap = ap->a_vap;
1608 struct componentname *cnp = ap->a_cnp;
1609 struct nfsv2_sattr *sp;
1610 u_int32_t *tl;
1611 char *cp;
1612 int32_t t1, t2;
1613 struct nfsnode *dnp, *np = (struct nfsnode *)0;
1614 struct vnode *newvp = (struct vnode *)0;
1615 char *bpos, *dpos, *cp2;
1616 int error, wccflag = NFSV3_WCCRATTR, gotvp = 0;
1617 struct mbuf *mreq, *mrep, *md, *mb;
1618 const int v3 = NFS_ISV3(dvp);
1619 u_int32_t excl_mode = NFSV3CREATE_UNCHECKED;
1620
1621 /*
1622 * Oops, not for me..
1623 */
1624 if (vap->va_type == VSOCK)
1625 return (nfs_mknodrpc(dvp, ap->a_vpp, cnp, vap));
1626
1627 KASSERT(vap->va_type == VREG);
1628
1629 #ifdef VA_EXCLUSIVE
1630 if (vap->va_vaflags & VA_EXCLUSIVE) {
1631 excl_mode = NFSV3CREATE_EXCLUSIVE;
1632 }
1633 #endif
1634 again:
1635 error = 0;
1636 nfsstats.rpccnt[NFSPROC_CREATE]++;
1637 dnp = VTONFS(dvp);
1638 nfsm_reqhead(dnp, NFSPROC_CREATE, NFSX_FH(v3) + 2 * NFSX_UNSIGNED +
1639 nfsm_rndup(cnp->cn_namelen) + NFSX_SATTR(v3));
1640 nfsm_fhtom(dnp, v3);
1641 nfsm_strtom(cnp->cn_nameptr, cnp->cn_namelen, NFS_MAXNAMLEN);
1642 #ifndef NFS_V2_ONLY
1643 if (v3) {
1644 nfsm_build(tl, u_int32_t *, NFSX_UNSIGNED);
1645 if (excl_mode == NFSV3CREATE_EXCLUSIVE) {
1646 *tl = txdr_unsigned(NFSV3CREATE_EXCLUSIVE);
1647 nfsm_build(tl, u_int32_t *, NFSX_V3CREATEVERF);
1648 *tl++ = cprng_fast32();
1649 *tl = cprng_fast32();
1650 } else {
1651 *tl = txdr_unsigned(excl_mode);
1652 nfsm_v3attrbuild(vap, false);
1653 }
1654 } else
1655 #endif
1656 {
1657 nfsm_build(sp, struct nfsv2_sattr *, NFSX_V2SATTR);
1658 sp->sa_mode = vtonfsv2_mode(vap->va_type, vap->va_mode);
1659 sp->sa_uid = nfs_xdrneg1;
1660 sp->sa_gid = nfs_xdrneg1;
1661 sp->sa_size = 0;
1662 txdr_nfsv2time(&vap->va_atime, &sp->sa_atime);
1663 txdr_nfsv2time(&vap->va_mtime, &sp->sa_mtime);
1664 }
1665 nfsm_request(dnp, NFSPROC_CREATE, curlwp, cnp->cn_cred);
1666 if (!error) {
1667 nfsm_mtofh(dvp, newvp, v3, gotvp);
1668 if (!gotvp) {
1669 error = nfs_lookitup(dvp, cnp->cn_nameptr,
1670 cnp->cn_namelen, cnp->cn_cred, curlwp, &np);
1671 if (!error)
1672 newvp = NFSTOV(np);
1673 }
1674 }
1675 #ifndef NFS_V2_ONLY
1676 if (v3)
1677 nfsm_wcc_data(dvp, wccflag, 0, !error);
1678 #endif
1679 nfsm_reqdone;
1680 if (error) {
1681 /*
1682 * nfs_request maps NFSERR_NOTSUPP to ENOTSUP.
1683 */
1684 if (v3 && error == ENOTSUP) {
1685 if (excl_mode == NFSV3CREATE_EXCLUSIVE) {
1686 excl_mode = NFSV3CREATE_GUARDED;
1687 goto again;
1688 } else if (excl_mode == NFSV3CREATE_GUARDED) {
1689 excl_mode = NFSV3CREATE_UNCHECKED;
1690 goto again;
1691 }
1692 }
1693 } else if (v3 && (excl_mode == NFSV3CREATE_EXCLUSIVE)) {
1694 struct timespec ts;
1695
1696 getnanotime(&ts);
1697
1698 /*
1699 * make sure that we'll update timestamps as
1700 * most server implementations use them to store
1701 * the create verifier.
1702 *
1703 * XXX it's better to use TOSERVER always.
1704 */
1705
1706 if (vap->va_atime.tv_sec == VNOVAL)
1707 vap->va_atime = ts;
1708 if (vap->va_mtime.tv_sec == VNOVAL)
1709 vap->va_mtime = ts;
1710
1711 error = nfs_setattrrpc(newvp, vap, cnp->cn_cred, curlwp);
1712 }
1713 if (error == 0) {
1714 if (cnp->cn_flags & MAKEENTRY)
1715 nfs_cache_enter(dvp, newvp, cnp);
1716 else
1717 cache_purge1(dvp, cnp->cn_nameptr, cnp->cn_namelen, 0);
1718 *ap->a_vpp = newvp;
1719 VOP_UNLOCK(newvp);
1720 } else {
1721 if (newvp)
1722 vput(newvp);
1723 if (error == EEXIST)
1724 cache_purge1(dvp, cnp->cn_nameptr, cnp->cn_namelen, 0);
1725 }
1726 VTONFS(dvp)->n_flag |= NMODIFIED;
1727 if (!wccflag)
1728 NFS_INVALIDATE_ATTRCACHE(VTONFS(dvp));
1729 VN_KNOTE(ap->a_dvp, NOTE_WRITE);
1730 return (error);
1731 }
1732
1733 /*
1734 * nfs file remove call
1735 * To try and make nfs semantics closer to ufs semantics, a file that has
1736 * other processes using the vnode is renamed instead of removed and then
1737 * removed later on the last close.
1738 * - If vrefcnt(vp) > 1
1739 * If a rename is not already in the works
1740 * call nfs_sillyrename() to set it up
1741 * else
1742 * do the remove rpc
1743 */
1744 int
1745 nfs_remove(void *v)
1746 {
1747 struct vop_remove_v2_args /* {
1748 struct vnodeop_desc *a_desc;
1749 struct vnode * a_dvp;
1750 struct vnode * a_vp;
1751 struct componentname * a_cnp;
1752 } */ *ap = v;
1753 struct vnode *vp = ap->a_vp;
1754 struct vnode *dvp = ap->a_dvp;
1755 struct componentname *cnp = ap->a_cnp;
1756 struct nfsnode *np = VTONFS(vp);
1757 int error = 0;
1758 struct vattr vattr;
1759
1760 #ifndef DIAGNOSTIC
1761 if (vrefcnt(vp) < 1)
1762 panic("nfs_remove: bad vrefcnt(vp)");
1763 #endif
1764 if (vp->v_type == VDIR)
1765 error = EPERM;
1766 else if (vrefcnt(vp) == 1 || (np->n_sillyrename &&
1767 VOP_GETATTR(vp, &vattr, cnp->cn_cred) == 0 &&
1768 vattr.va_nlink > 1)) {
1769 /*
1770 * Purge the name cache so that the chance of a lookup for
1771 * the name succeeding while the remove is in progress is
1772 * minimized. Without node locking it can still happen, such
1773 * that an I/O op returns ESTALE, but since you get this if
1774 * another host removes the file..
1775 */
1776 cache_purge(vp);
1777 /*
1778 * throw away biocache buffers, mainly to avoid
1779 * unnecessary delayed writes later.
1780 */
1781 error = nfs_vinvalbuf(vp, 0, cnp->cn_cred, curlwp, 1);
1782 /* Do the rpc */
1783 if (error != EINTR)
1784 error = nfs_removerpc(dvp, cnp->cn_nameptr,
1785 cnp->cn_namelen, cnp->cn_cred, curlwp);
1786 } else if (!np->n_sillyrename)
1787 error = nfs_sillyrename(dvp, vp, cnp, false);
1788 if (!error && nfs_getattrcache(vp, &vattr) == 0 &&
1789 vattr.va_nlink == 1) {
1790 np->n_flag |= NREMOVED;
1791 }
1792 NFS_INVALIDATE_ATTRCACHE(np);
1793 VN_KNOTE(vp, NOTE_DELETE);
1794 VN_KNOTE(dvp, NOTE_WRITE);
1795 if (dvp == vp)
1796 vrele(vp);
1797 else
1798 vput(vp);
1799 return (error);
1800 }
1801
1802 /*
1803 * nfs file remove rpc called from nfs_inactive
1804 */
1805 int
1806 nfs_removeit(struct sillyrename *sp)
1807 {
1808
1809 return (nfs_removerpc(sp->s_dvp, sp->s_name, sp->s_namlen, sp->s_cred,
1810 (struct lwp *)0));
1811 }
1812
1813 /*
1814 * Nfs remove rpc, called from nfs_remove() and nfs_removeit().
1815 */
1816 int
1817 nfs_removerpc(struct vnode *dvp, const char *name, int namelen, kauth_cred_t cred, struct lwp *l)
1818 {
1819 u_int32_t *tl;
1820 char *cp;
1821 #ifndef NFS_V2_ONLY
1822 int32_t t1;
1823 char *cp2;
1824 #endif
1825 int32_t t2;
1826 char *bpos, *dpos;
1827 int error = 0, wccflag = NFSV3_WCCRATTR;
1828 struct mbuf *mreq, *mrep, *md, *mb;
1829 const int v3 = NFS_ISV3(dvp);
1830 int rexmit = 0;
1831 struct nfsnode *dnp = VTONFS(dvp);
1832
1833 nfsstats.rpccnt[NFSPROC_REMOVE]++;
1834 nfsm_reqhead(dnp, NFSPROC_REMOVE,
1835 NFSX_FH(v3) + NFSX_UNSIGNED + nfsm_rndup(namelen));
1836 nfsm_fhtom(dnp, v3);
1837 nfsm_strtom(name, namelen, NFS_MAXNAMLEN);
1838 nfsm_request1(dnp, NFSPROC_REMOVE, l, cred, &rexmit);
1839 #ifndef NFS_V2_ONLY
1840 if (v3)
1841 nfsm_wcc_data(dvp, wccflag, 0, !error);
1842 #endif
1843 nfsm_reqdone;
1844 VTONFS(dvp)->n_flag |= NMODIFIED;
1845 if (!wccflag)
1846 NFS_INVALIDATE_ATTRCACHE(VTONFS(dvp));
1847 /*
1848 * Kludge City: If the first reply to the remove rpc is lost..
1849 * the reply to the retransmitted request will be ENOENT
1850 * since the file was in fact removed
1851 * Therefore, we cheat and return success.
1852 */
1853 if (rexmit && error == ENOENT)
1854 error = 0;
1855 return (error);
1856 }
1857
1858 /*
1859 * nfs file rename call
1860 */
1861 int
1862 nfs_rename(void *v)
1863 {
1864 struct vop_rename_args /* {
1865 struct vnode *a_fdvp;
1866 struct vnode *a_fvp;
1867 struct componentname *a_fcnp;
1868 struct vnode *a_tdvp;
1869 struct vnode *a_tvp;
1870 struct componentname *a_tcnp;
1871 } */ *ap = v;
1872 struct vnode *fvp = ap->a_fvp;
1873 struct vnode *tvp = ap->a_tvp;
1874 struct vnode *fdvp = ap->a_fdvp;
1875 struct vnode *tdvp = ap->a_tdvp;
1876 struct componentname *tcnp = ap->a_tcnp;
1877 struct componentname *fcnp = ap->a_fcnp;
1878 int error;
1879
1880 /* Check for cross-device rename */
1881 if ((fvp->v_mount != tdvp->v_mount) ||
1882 (tvp && (fvp->v_mount != tvp->v_mount))) {
1883 error = EXDEV;
1884 goto out;
1885 }
1886
1887 /*
1888 * If the tvp exists and is in use, sillyrename it before doing the
1889 * rename of the new file over it.
1890 *
1891 * Have sillyrename use link instead of rename if possible,
1892 * so that we don't lose the file if the rename fails, and so
1893 * that there's no window when the "to" file doesn't exist.
1894 */
1895 if (tvp && vrefcnt(tvp) > 1 && !VTONFS(tvp)->n_sillyrename &&
1896 tvp->v_type != VDIR && !nfs_sillyrename(tdvp, tvp, tcnp, true)) {
1897 VN_KNOTE(tvp, NOTE_DELETE);
1898 vput(tvp);
1899 tvp = NULL;
1900 }
1901
1902 error = nfs_renamerpc(fdvp, fcnp->cn_nameptr, fcnp->cn_namelen,
1903 tdvp, tcnp->cn_nameptr, tcnp->cn_namelen, tcnp->cn_cred,
1904 curlwp);
1905
1906 VN_KNOTE(fdvp, NOTE_WRITE);
1907 VN_KNOTE(tdvp, NOTE_WRITE);
1908 if (error == 0 || error == EEXIST) {
1909 if (fvp->v_type == VDIR)
1910 cache_purge(fvp);
1911 else
1912 cache_purge1(fdvp, fcnp->cn_nameptr, fcnp->cn_namelen,
1913 0);
1914 if (tvp != NULL && tvp->v_type == VDIR)
1915 cache_purge(tvp);
1916 else
1917 cache_purge1(tdvp, tcnp->cn_nameptr, tcnp->cn_namelen,
1918 0);
1919 }
1920 out:
1921 if (tdvp == tvp)
1922 vrele(tdvp);
1923 else
1924 vput(tdvp);
1925 if (tvp)
1926 vput(tvp);
1927 vrele(fdvp);
1928 vrele(fvp);
1929 return (error);
1930 }
1931
1932 /*
1933 * nfs file rename rpc called from nfs_remove() above
1934 */
1935 int
1936 nfs_renameit(struct vnode *sdvp, struct componentname *scnp, struct sillyrename *sp)
1937 {
1938 return (nfs_renamerpc(sdvp, scnp->cn_nameptr, scnp->cn_namelen,
1939 sdvp, sp->s_name, sp->s_namlen, scnp->cn_cred, curlwp));
1940 }
1941
1942 /*
1943 * Do an nfs rename rpc. Called from nfs_rename() and nfs_renameit().
1944 */
1945 int
1946 nfs_renamerpc(struct vnode *fdvp, const char *fnameptr, int fnamelen, struct vnode *tdvp, const char *tnameptr, int tnamelen, kauth_cred_t cred, struct lwp *l)
1947 {
1948 u_int32_t *tl;
1949 char *cp;
1950 #ifndef NFS_V2_ONLY
1951 int32_t t1;
1952 char *cp2;
1953 #endif
1954 int32_t t2;
1955 char *bpos, *dpos;
1956 int error = 0, fwccflag = NFSV3_WCCRATTR, twccflag = NFSV3_WCCRATTR;
1957 struct mbuf *mreq, *mrep, *md, *mb;
1958 const int v3 = NFS_ISV3(fdvp);
1959 int rexmit = 0;
1960 struct nfsnode *fdnp = VTONFS(fdvp);
1961
1962 nfsstats.rpccnt[NFSPROC_RENAME]++;
1963 nfsm_reqhead(fdnp, NFSPROC_RENAME,
1964 (NFSX_FH(v3) + NFSX_UNSIGNED)*2 + nfsm_rndup(fnamelen) +
1965 nfsm_rndup(tnamelen));
1966 nfsm_fhtom(fdnp, v3);
1967 nfsm_strtom(fnameptr, fnamelen, NFS_MAXNAMLEN);
1968 nfsm_fhtom(VTONFS(tdvp), v3);
1969 nfsm_strtom(tnameptr, tnamelen, NFS_MAXNAMLEN);
1970 nfsm_request1(fdnp, NFSPROC_RENAME, l, cred, &rexmit);
1971 #ifndef NFS_V2_ONLY
1972 if (v3) {
1973 nfsm_wcc_data(fdvp, fwccflag, 0, !error);
1974 nfsm_wcc_data(tdvp, twccflag, 0, !error);
1975 }
1976 #endif
1977 nfsm_reqdone;
1978 VTONFS(fdvp)->n_flag |= NMODIFIED;
1979 VTONFS(tdvp)->n_flag |= NMODIFIED;
1980 if (!fwccflag)
1981 NFS_INVALIDATE_ATTRCACHE(VTONFS(fdvp));
1982 if (!twccflag)
1983 NFS_INVALIDATE_ATTRCACHE(VTONFS(tdvp));
1984 /*
1985 * Kludge: Map ENOENT => 0 assuming that it is a reply to a retry.
1986 */
1987 if (rexmit && error == ENOENT)
1988 error = 0;
1989 return (error);
1990 }
1991
1992 /*
1993 * NFS link RPC, called from nfs_link.
1994 * Assumes dvp and vp locked, and leaves them that way.
1995 */
1996
1997 static int
1998 nfs_linkrpc(struct vnode *dvp, struct vnode *vp, const char *name,
1999 size_t namelen, kauth_cred_t cred, struct lwp *l)
2000 {
2001 u_int32_t *tl;
2002 char *cp;
2003 #ifndef NFS_V2_ONLY
2004 int32_t t1;
2005 char *cp2;
2006 #endif
2007 int32_t t2;
2008 char *bpos, *dpos;
2009 int error = 0, wccflag = NFSV3_WCCRATTR, attrflag = 0;
2010 struct mbuf *mreq, *mrep, *md, *mb;
2011 const int v3 = NFS_ISV3(dvp);
2012 int rexmit = 0;
2013 struct nfsnode *np = VTONFS(vp);
2014
2015 nfsstats.rpccnt[NFSPROC_LINK]++;
2016 nfsm_reqhead(np, NFSPROC_LINK,
2017 NFSX_FH(v3)*2 + NFSX_UNSIGNED + nfsm_rndup(namelen));
2018 nfsm_fhtom(np, v3);
2019 nfsm_fhtom(VTONFS(dvp), v3);
2020 nfsm_strtom(name, namelen, NFS_MAXNAMLEN);
2021 nfsm_request1(np, NFSPROC_LINK, l, cred, &rexmit);
2022 #ifndef NFS_V2_ONLY
2023 if (v3) {
2024 nfsm_postop_attr(vp, attrflag, 0);
2025 nfsm_wcc_data(dvp, wccflag, 0, !error);
2026 }
2027 #endif
2028 nfsm_reqdone;
2029
2030 VTONFS(dvp)->n_flag |= NMODIFIED;
2031 if (!attrflag)
2032 NFS_INVALIDATE_ATTRCACHE(VTONFS(vp));
2033 if (!wccflag)
2034 NFS_INVALIDATE_ATTRCACHE(VTONFS(dvp));
2035
2036 /*
2037 * Kludge: Map EEXIST => 0 assuming that it is a reply to a retry.
2038 */
2039 if (rexmit && error == EEXIST)
2040 error = 0;
2041
2042 return error;
2043 }
2044
2045 /*
2046 * nfs hard link create call
2047 */
2048 int
2049 nfs_link(void *v)
2050 {
2051 struct vop_link_v2_args /* {
2052 struct vnode *a_dvp;
2053 struct vnode *a_vp;
2054 struct componentname *a_cnp;
2055 } */ *ap = v;
2056 struct vnode *vp = ap->a_vp;
2057 struct vnode *dvp = ap->a_dvp;
2058 struct componentname *cnp = ap->a_cnp;
2059 int error = 0;
2060
2061 error = vn_lock(vp, LK_EXCLUSIVE);
2062 if (error != 0) {
2063 VOP_ABORTOP(dvp, cnp);
2064 return error;
2065 }
2066
2067 /*
2068 * Push all writes to the server, so that the attribute cache
2069 * doesn't get "out of sync" with the server.
2070 * XXX There should be a better way!
2071 */
2072 VOP_FSYNC(vp, cnp->cn_cred, FSYNC_WAIT, 0, 0);
2073
2074 error = nfs_linkrpc(dvp, vp, cnp->cn_nameptr, cnp->cn_namelen,
2075 cnp->cn_cred, curlwp);
2076
2077 if (error == 0) {
2078 cache_purge1(dvp, cnp->cn_nameptr, cnp->cn_namelen, 0);
2079 }
2080 VOP_UNLOCK(vp);
2081 VN_KNOTE(vp, NOTE_LINK);
2082 VN_KNOTE(dvp, NOTE_WRITE);
2083 return (error);
2084 }
2085
2086 /*
2087 * nfs symbolic link create call
2088 */
2089 int
2090 nfs_symlink(void *v)
2091 {
2092 struct vop_symlink_v3_args /* {
2093 struct vnode *a_dvp;
2094 struct vnode **a_vpp;
2095 struct componentname *a_cnp;
2096 struct vattr *a_vap;
2097 char *a_target;
2098 } */ *ap = v;
2099 struct vnode *dvp = ap->a_dvp;
2100 struct vattr *vap = ap->a_vap;
2101 struct componentname *cnp = ap->a_cnp;
2102 struct nfsv2_sattr *sp;
2103 u_int32_t *tl;
2104 char *cp;
2105 int32_t t1, t2;
2106 char *bpos, *dpos, *cp2;
2107 int slen, error = 0, wccflag = NFSV3_WCCRATTR, gotvp;
2108 struct mbuf *mreq, *mrep, *md, *mb;
2109 struct vnode *newvp = (struct vnode *)0;
2110 const int v3 = NFS_ISV3(dvp);
2111 int rexmit = 0;
2112 struct nfsnode *dnp = VTONFS(dvp);
2113
2114 *ap->a_vpp = NULL;
2115 nfsstats.rpccnt[NFSPROC_SYMLINK]++;
2116 slen = strlen(ap->a_target);
2117 nfsm_reqhead(dnp, NFSPROC_SYMLINK, NFSX_FH(v3) + 2*NFSX_UNSIGNED +
2118 nfsm_rndup(cnp->cn_namelen) + nfsm_rndup(slen) + NFSX_SATTR(v3));
2119 nfsm_fhtom(dnp, v3);
2120 nfsm_strtom(cnp->cn_nameptr, cnp->cn_namelen, NFS_MAXNAMLEN);
2121 #ifndef NFS_V2_ONlY
2122 if (v3)
2123 nfsm_v3attrbuild(vap, false);
2124 #endif
2125 nfsm_strtom(ap->a_target, slen, NFS_MAXPATHLEN);
2126 #ifndef NFS_V2_ONlY
2127 if (!v3) {
2128 nfsm_build(sp, struct nfsv2_sattr *, NFSX_V2SATTR);
2129 sp->sa_mode = vtonfsv2_mode(VLNK, vap->va_mode);
2130 sp->sa_uid = nfs_xdrneg1;
2131 sp->sa_gid = nfs_xdrneg1;
2132 sp->sa_size = nfs_xdrneg1;
2133 txdr_nfsv2time(&vap->va_atime, &sp->sa_atime);
2134 txdr_nfsv2time(&vap->va_mtime, &sp->sa_mtime);
2135 }
2136 #endif
2137 nfsm_request1(dnp, NFSPROC_SYMLINK, curlwp, cnp->cn_cred,
2138 &rexmit);
2139 #ifndef NFS_V2_ONlY
2140 if (v3) {
2141 if (!error)
2142 nfsm_mtofh(dvp, newvp, v3, gotvp);
2143 nfsm_wcc_data(dvp, wccflag, 0, !error);
2144 }
2145 #endif
2146 nfsm_reqdone;
2147 /*
2148 * Kludge: Map EEXIST => 0 assuming that it is a reply to a retry.
2149 */
2150 if (rexmit && error == EEXIST)
2151 error = 0;
2152 if (error == 0 || error == EEXIST)
2153 cache_purge1(dvp, cnp->cn_nameptr, cnp->cn_namelen, 0);
2154 if (error == 0 && newvp == NULL) {
2155 struct nfsnode *np = NULL;
2156
2157 error = nfs_lookitup(dvp, cnp->cn_nameptr, cnp->cn_namelen,
2158 cnp->cn_cred, curlwp, &np);
2159 if (error == 0)
2160 newvp = NFSTOV(np);
2161 }
2162 if (error) {
2163 if (newvp != NULL)
2164 vput(newvp);
2165 } else {
2166 *ap->a_vpp = newvp;
2167 VOP_UNLOCK(newvp);
2168 }
2169 VTONFS(dvp)->n_flag |= NMODIFIED;
2170 if (!wccflag)
2171 NFS_INVALIDATE_ATTRCACHE(VTONFS(dvp));
2172 VN_KNOTE(dvp, NOTE_WRITE);
2173 return (error);
2174 }
2175
2176 /*
2177 * nfs make dir call
2178 */
2179 int
2180 nfs_mkdir(void *v)
2181 {
2182 struct vop_mkdir_v3_args /* {
2183 struct vnode *a_dvp;
2184 struct vnode **a_vpp;
2185 struct componentname *a_cnp;
2186 struct vattr *a_vap;
2187 } */ *ap = v;
2188 struct vnode *dvp = ap->a_dvp;
2189 struct vattr *vap = ap->a_vap;
2190 struct componentname *cnp = ap->a_cnp;
2191 struct nfsv2_sattr *sp;
2192 u_int32_t *tl;
2193 char *cp;
2194 int32_t t1, t2;
2195 int len;
2196 struct nfsnode *dnp = VTONFS(dvp), *np = (struct nfsnode *)0;
2197 struct vnode *newvp = (struct vnode *)0;
2198 char *bpos, *dpos, *cp2;
2199 int error = 0, wccflag = NFSV3_WCCRATTR;
2200 int gotvp = 0;
2201 int rexmit = 0;
2202 struct mbuf *mreq, *mrep, *md, *mb;
2203 const int v3 = NFS_ISV3(dvp);
2204
2205 len = cnp->cn_namelen;
2206 nfsstats.rpccnt[NFSPROC_MKDIR]++;
2207 nfsm_reqhead(dnp, NFSPROC_MKDIR,
2208 NFSX_FH(v3) + NFSX_UNSIGNED + nfsm_rndup(len) + NFSX_SATTR(v3));
2209 nfsm_fhtom(dnp, v3);
2210 nfsm_strtom(cnp->cn_nameptr, len, NFS_MAXNAMLEN);
2211 #ifndef NFS_V2_ONLY
2212 if (v3) {
2213 nfsm_v3attrbuild(vap, false);
2214 } else
2215 #endif
2216 {
2217 nfsm_build(sp, struct nfsv2_sattr *, NFSX_V2SATTR);
2218 sp->sa_mode = vtonfsv2_mode(VDIR, vap->va_mode);
2219 sp->sa_uid = nfs_xdrneg1;
2220 sp->sa_gid = nfs_xdrneg1;
2221 sp->sa_size = nfs_xdrneg1;
2222 txdr_nfsv2time(&vap->va_atime, &sp->sa_atime);
2223 txdr_nfsv2time(&vap->va_mtime, &sp->sa_mtime);
2224 }
2225 nfsm_request1(dnp, NFSPROC_MKDIR, curlwp, cnp->cn_cred, &rexmit);
2226 if (!error)
2227 nfsm_mtofh(dvp, newvp, v3, gotvp);
2228 if (v3)
2229 nfsm_wcc_data(dvp, wccflag, 0, !error);
2230 nfsm_reqdone;
2231 VTONFS(dvp)->n_flag |= NMODIFIED;
2232 if (!wccflag)
2233 NFS_INVALIDATE_ATTRCACHE(VTONFS(dvp));
2234 /*
2235 * Kludge: Map EEXIST => 0 assuming that you have a reply to a retry
2236 * if we can succeed in looking up the directory.
2237 */
2238 if ((rexmit && error == EEXIST) || (!error && !gotvp)) {
2239 if (newvp) {
2240 vput(newvp);
2241 newvp = (struct vnode *)0;
2242 }
2243 error = nfs_lookitup(dvp, cnp->cn_nameptr, len, cnp->cn_cred,
2244 curlwp, &np);
2245 if (!error) {
2246 newvp = NFSTOV(np);
2247 if (newvp->v_type != VDIR || newvp == dvp)
2248 error = EEXIST;
2249 }
2250 }
2251 if (error) {
2252 if (newvp) {
2253 if (dvp != newvp)
2254 vput(newvp);
2255 else
2256 vrele(newvp);
2257 }
2258 } else {
2259 VN_KNOTE(dvp, NOTE_WRITE | NOTE_LINK);
2260 nfs_cache_enter(dvp, newvp, cnp);
2261 *ap->a_vpp = newvp;
2262 VOP_UNLOCK(newvp);
2263 }
2264 return (error);
2265 }
2266
2267 /*
2268 * nfs remove directory call
2269 */
2270 int
2271 nfs_rmdir(void *v)
2272 {
2273 struct vop_rmdir_v2_args /* {
2274 struct vnode *a_dvp;
2275 struct vnode *a_vp;
2276 struct componentname *a_cnp;
2277 } */ *ap = v;
2278 struct vnode *vp = ap->a_vp;
2279 struct vnode *dvp = ap->a_dvp;
2280 struct componentname *cnp = ap->a_cnp;
2281 u_int32_t *tl;
2282 char *cp;
2283 #ifndef NFS_V2_ONLY
2284 int32_t t1;
2285 char *cp2;
2286 #endif
2287 int32_t t2;
2288 char *bpos, *dpos;
2289 int error = 0, wccflag = NFSV3_WCCRATTR;
2290 int rexmit = 0;
2291 struct mbuf *mreq, *mrep, *md, *mb;
2292 const int v3 = NFS_ISV3(dvp);
2293 struct nfsnode *dnp;
2294
2295 if (dvp == vp) {
2296 vrele(vp);
2297 return (EINVAL);
2298 }
2299 nfsstats.rpccnt[NFSPROC_RMDIR]++;
2300 dnp = VTONFS(dvp);
2301 nfsm_reqhead(dnp, NFSPROC_RMDIR,
2302 NFSX_FH(v3) + NFSX_UNSIGNED + nfsm_rndup(cnp->cn_namelen));
2303 nfsm_fhtom(dnp, v3);
2304 nfsm_strtom(cnp->cn_nameptr, cnp->cn_namelen, NFS_MAXNAMLEN);
2305 nfsm_request1(dnp, NFSPROC_RMDIR, curlwp, cnp->cn_cred, &rexmit);
2306 #ifndef NFS_V2_ONLY
2307 if (v3)
2308 nfsm_wcc_data(dvp, wccflag, 0, !error);
2309 #endif
2310 nfsm_reqdone;
2311 VTONFS(dvp)->n_flag |= NMODIFIED;
2312 if (!wccflag)
2313 NFS_INVALIDATE_ATTRCACHE(VTONFS(dvp));
2314 VN_KNOTE(dvp, NOTE_WRITE | NOTE_LINK);
2315 VN_KNOTE(vp, NOTE_DELETE);
2316 cache_purge(vp);
2317 vput(vp);
2318 /*
2319 * Kludge: Map ENOENT => 0 assuming that you have a reply to a retry.
2320 */
2321 if (rexmit && error == ENOENT)
2322 error = 0;
2323 return (error);
2324 }
2325
2326 /*
2327 * nfs readdir call
2328 */
2329 int
2330 nfs_readdir(void *v)
2331 {
2332 struct vop_readdir_args /* {
2333 struct vnode *a_vp;
2334 struct uio *a_uio;
2335 kauth_cred_t a_cred;
2336 int *a_eofflag;
2337 off_t **a_cookies;
2338 int *a_ncookies;
2339 } */ *ap = v;
2340 struct vnode *vp = ap->a_vp;
2341 struct uio *uio = ap->a_uio;
2342 struct nfsmount *nmp = VFSTONFS(vp->v_mount);
2343 char *base = uio->uio_iov->iov_base;
2344 int tresid, error;
2345 size_t count, lost;
2346 struct dirent *dp;
2347 off_t *cookies = NULL;
2348 int ncookies = 0, nc;
2349
2350 if (vp->v_type != VDIR)
2351 return (EPERM);
2352
2353 lost = uio->uio_resid & (NFS_DIRFRAGSIZ - 1);
2354 count = uio->uio_resid - lost;
2355 if (count <= 0)
2356 return (EINVAL);
2357
2358 /*
2359 * Call nfs_bioread() to do the real work.
2360 */
2361 tresid = uio->uio_resid = count;
2362 error = nfs_bioread(vp, uio, 0, ap->a_cred,
2363 ap->a_cookies ? NFSBIO_CACHECOOKIES : 0);
2364
2365 if (!error && ap->a_cookies) {
2366 ncookies = count / 16;
2367 cookies = malloc(sizeof (off_t) * ncookies, M_TEMP, M_WAITOK);
2368 *ap->a_cookies = cookies;
2369 }
2370
2371 if (!error && uio->uio_resid == tresid) {
2372 uio->uio_resid += lost;
2373 nfsstats.direofcache_misses++;
2374 if (ap->a_cookies)
2375 *ap->a_ncookies = 0;
2376 *ap->a_eofflag = 1;
2377 return (0);
2378 }
2379
2380 if (!error && ap->a_cookies) {
2381 /*
2382 * Only the NFS server and emulations use cookies, and they
2383 * load the directory block into system space, so we can
2384 * just look at it directly.
2385 */
2386 if (!VMSPACE_IS_KERNEL_P(uio->uio_vmspace) ||
2387 uio->uio_iovcnt != 1)
2388 panic("nfs_readdir: lost in space");
2389 for (nc = 0; ncookies-- &&
2390 base < (char *)uio->uio_iov->iov_base; nc++){
2391 dp = (struct dirent *) base;
2392 if (dp->d_reclen == 0)
2393 break;
2394 if (nmp->nm_flag & NFSMNT_XLATECOOKIE)
2395 *(cookies++) = (off_t)NFS_GETCOOKIE32(dp);
2396 else
2397 *(cookies++) = NFS_GETCOOKIE(dp);
2398 base += dp->d_reclen;
2399 }
2400 uio->uio_resid +=
2401 ((char *)uio->uio_iov->iov_base - base);
2402 uio->uio_iov->iov_len +=
2403 ((char *)uio->uio_iov->iov_base - base);
2404 uio->uio_iov->iov_base = base;
2405 *ap->a_ncookies = nc;
2406 }
2407
2408 uio->uio_resid += lost;
2409 *ap->a_eofflag = 0;
2410 return (error);
2411 }
2412
2413 /*
2414 * Readdir rpc call.
2415 * Called from below the buffer cache by nfs_doio().
2416 */
2417 int
2418 nfs_readdirrpc(struct vnode *vp, struct uio *uiop, kauth_cred_t cred)
2419 {
2420 int len, left;
2421 struct dirent *dp = NULL;
2422 u_int32_t *tl;
2423 char *cp;
2424 int32_t t1, t2;
2425 char *bpos, *dpos, *cp2;
2426 struct mbuf *mreq, *mrep, *md, *mb;
2427 struct nfsmount *nmp = VFSTONFS(vp->v_mount);
2428 struct nfsnode *dnp = VTONFS(vp);
2429 u_quad_t fileno;
2430 int error = 0, more_dirs = 1, blksiz = 0, bigenough = 1;
2431 #ifndef NFS_V2_ONLY
2432 int attrflag;
2433 #endif
2434 int nrpcs = 0, reclen;
2435 const int v3 = NFS_ISV3(vp);
2436
2437 #ifdef DIAGNOSTIC
2438 /*
2439 * Should be called from buffer cache, so only amount of
2440 * NFS_DIRBLKSIZ will be requested.
2441 */
2442 if (uiop->uio_iovcnt != 1 || uiop->uio_resid != NFS_DIRBLKSIZ)
2443 panic("nfs readdirrpc bad uio");
2444 #endif
2445
2446 /*
2447 * Loop around doing readdir rpc's of size nm_readdirsize
2448 * truncated to a multiple of NFS_DIRFRAGSIZ.
2449 * The stopping criteria is EOF or buffer full.
2450 */
2451 while (more_dirs && bigenough) {
2452 /*
2453 * Heuristic: don't bother to do another RPC to further
2454 * fill up this block if there is not much room left. (< 50%
2455 * of the readdir RPC size). This wastes some buffer space
2456 * but can save up to 50% in RPC calls.
2457 */
2458 if (nrpcs > 0 && uiop->uio_resid < (nmp->nm_readdirsize / 2)) {
2459 bigenough = 0;
2460 break;
2461 }
2462 nfsstats.rpccnt[NFSPROC_READDIR]++;
2463 nfsm_reqhead(dnp, NFSPROC_READDIR, NFSX_FH(v3) +
2464 NFSX_READDIR(v3));
2465 nfsm_fhtom(dnp, v3);
2466 #ifndef NFS_V2_ONLY
2467 if (v3) {
2468 nfsm_build(tl, u_int32_t *, 5 * NFSX_UNSIGNED);
2469 if (nmp->nm_iflag & NFSMNT_SWAPCOOKIE) {
2470 txdr_swapcookie3(uiop->uio_offset, tl);
2471 } else {
2472 txdr_cookie3(uiop->uio_offset, tl);
2473 }
2474 tl += 2;
2475 *tl++ = dnp->n_cookieverf.nfsuquad[0];
2476 *tl++ = dnp->n_cookieverf.nfsuquad[1];
2477 } else
2478 #endif
2479 {
2480 nfsm_build(tl, u_int32_t *, 2 * NFSX_UNSIGNED);
2481 *tl++ = txdr_unsigned(uiop->uio_offset);
2482 }
2483 *tl = txdr_unsigned(nmp->nm_readdirsize);
2484 nfsm_request(dnp, NFSPROC_READDIR, curlwp, cred);
2485 nrpcs++;
2486 #ifndef NFS_V2_ONLY
2487 if (v3) {
2488 nfsm_postop_attr(vp, attrflag, 0);
2489 if (!error) {
2490 nfsm_dissect(tl, u_int32_t *,
2491 2 * NFSX_UNSIGNED);
2492 dnp->n_cookieverf.nfsuquad[0] = *tl++;
2493 dnp->n_cookieverf.nfsuquad[1] = *tl;
2494 } else {
2495 m_freem(mrep);
2496 goto nfsmout;
2497 }
2498 }
2499 #endif
2500 nfsm_dissect(tl, u_int32_t *, NFSX_UNSIGNED);
2501 more_dirs = fxdr_unsigned(int, *tl);
2502
2503 /* loop thru the dir entries, doctoring them to 4bsd form */
2504 while (more_dirs && bigenough) {
2505 #ifndef NFS_V2_ONLY
2506 if (v3) {
2507 nfsm_dissect(tl, u_int32_t *,
2508 3 * NFSX_UNSIGNED);
2509 fileno = fxdr_hyper(tl);
2510 len = fxdr_unsigned(int, *(tl + 2));
2511 } else
2512 #endif
2513 {
2514 nfsm_dissect(tl, u_int32_t *,
2515 2 * NFSX_UNSIGNED);
2516 fileno = fxdr_unsigned(u_quad_t, *tl++);
2517 len = fxdr_unsigned(int, *tl);
2518 }
2519 if (len <= 0 || len > NFS_MAXNAMLEN) {
2520 error = EBADRPC;
2521 m_freem(mrep);
2522 goto nfsmout;
2523 }
2524 /* for cookie stashing */
2525 reclen = _DIRENT_RECLEN(dp, len) + 2 * sizeof(off_t);
2526 left = NFS_DIRFRAGSIZ - blksiz;
2527 if (reclen > left) {
2528 memset(uiop->uio_iov->iov_base, 0, left);
2529 dp->d_reclen += left;
2530 UIO_ADVANCE(uiop, left);
2531 blksiz = 0;
2532 NFS_STASHCOOKIE(dp, uiop->uio_offset);
2533 }
2534 if (reclen > uiop->uio_resid)
2535 bigenough = 0;
2536 if (bigenough) {
2537 int tlen;
2538
2539 dp = (struct dirent *)uiop->uio_iov->iov_base;
2540 dp->d_fileno = fileno;
2541 dp->d_namlen = len;
2542 dp->d_reclen = reclen;
2543 dp->d_type = DT_UNKNOWN;
2544 blksiz += reclen;
2545 if (blksiz == NFS_DIRFRAGSIZ)
2546 blksiz = 0;
2547 UIO_ADVANCE(uiop, DIRHDSIZ);
2548 nfsm_mtouio(uiop, len);
2549 tlen = reclen - (DIRHDSIZ + len);
2550 (void)memset(uiop->uio_iov->iov_base, 0, tlen);
2551 UIO_ADVANCE(uiop, tlen);
2552 } else
2553 nfsm_adv(nfsm_rndup(len));
2554 #ifndef NFS_V2_ONLY
2555 if (v3) {
2556 nfsm_dissect(tl, u_int32_t *,
2557 3 * NFSX_UNSIGNED);
2558 } else
2559 #endif
2560 {
2561 nfsm_dissect(tl, u_int32_t *,
2562 2 * NFSX_UNSIGNED);
2563 }
2564 if (bigenough) {
2565 #ifndef NFS_V2_ONLY
2566 if (v3) {
2567 if (nmp->nm_iflag & NFSMNT_SWAPCOOKIE)
2568 uiop->uio_offset =
2569 fxdr_swapcookie3(tl);
2570 else
2571 uiop->uio_offset =
2572 fxdr_cookie3(tl);
2573 }
2574 else
2575 #endif
2576 {
2577 uiop->uio_offset =
2578 fxdr_unsigned(off_t, *tl);
2579 }
2580 NFS_STASHCOOKIE(dp, uiop->uio_offset);
2581 }
2582 if (v3)
2583 tl += 2;
2584 else
2585 tl++;
2586 more_dirs = fxdr_unsigned(int, *tl);
2587 }
2588 /*
2589 * If at end of rpc data, get the eof boolean
2590 */
2591 if (!more_dirs) {
2592 nfsm_dissect(tl, u_int32_t *, NFSX_UNSIGNED);
2593 more_dirs = (fxdr_unsigned(int, *tl) == 0);
2594
2595 /*
2596 * kludge: if we got no entries, treat it as EOF.
2597 * some server sometimes send a reply without any
2598 * entries or EOF.
2599 * although it might mean the server has very long name,
2600 * we can't handle such entries anyway.
2601 */
2602
2603 if (uiop->uio_resid >= NFS_DIRBLKSIZ)
2604 more_dirs = 0;
2605 }
2606 m_freem(mrep);
2607 }
2608 /*
2609 * Fill last record, iff any, out to a multiple of NFS_DIRFRAGSIZ
2610 * by increasing d_reclen for the last record.
2611 */
2612 if (blksiz > 0) {
2613 left = NFS_DIRFRAGSIZ - blksiz;
2614 memset(uiop->uio_iov->iov_base, 0, left);
2615 dp->d_reclen += left;
2616 NFS_STASHCOOKIE(dp, uiop->uio_offset);
2617 UIO_ADVANCE(uiop, left);
2618 }
2619
2620 /*
2621 * We are now either at the end of the directory or have filled the
2622 * block.
2623 */
2624 if (bigenough) {
2625 dnp->n_direofoffset = uiop->uio_offset;
2626 dnp->n_flag |= NEOFVALID;
2627 }
2628 nfsmout:
2629 return (error);
2630 }
2631
2632 #ifndef NFS_V2_ONLY
2633 /*
2634 * NFS V3 readdir plus RPC. Used in place of nfs_readdirrpc().
2635 */
2636 int
2637 nfs_readdirplusrpc(struct vnode *vp, struct uio *uiop, kauth_cred_t cred)
2638 {
2639 int len, left;
2640 struct dirent *dp = NULL;
2641 u_int32_t *tl;
2642 char *cp;
2643 int32_t t1, t2;
2644 struct vnode *newvp;
2645 char *bpos, *dpos, *cp2;
2646 struct mbuf *mreq, *mrep, *md, *mb;
2647 struct nameidata nami, *ndp = &nami;
2648 struct componentname *cnp = &ndp->ni_cnd;
2649 struct nfsmount *nmp = VFSTONFS(vp->v_mount);
2650 struct nfsnode *dnp = VTONFS(vp), *np;
2651 nfsfh_t *fhp;
2652 u_quad_t fileno;
2653 int error = 0, more_dirs = 1, blksiz = 0, doit, bigenough = 1, i;
2654 int attrflag, fhsize, nrpcs = 0, reclen;
2655 struct nfs_fattr fattr, *fp;
2656
2657 #ifdef DIAGNOSTIC
2658 if (uiop->uio_iovcnt != 1 || uiop->uio_resid != NFS_DIRBLKSIZ)
2659 panic("nfs readdirplusrpc bad uio");
2660 #endif
2661 ndp->ni_dvp = vp;
2662 newvp = NULLVP;
2663
2664 /*
2665 * Loop around doing readdir rpc's of size nm_readdirsize
2666 * truncated to a multiple of NFS_DIRFRAGSIZ.
2667 * The stopping criteria is EOF or buffer full.
2668 */
2669 while (more_dirs && bigenough) {
2670 if (nrpcs > 0 && uiop->uio_resid < (nmp->nm_readdirsize / 2)) {
2671 bigenough = 0;
2672 break;
2673 }
2674 nfsstats.rpccnt[NFSPROC_READDIRPLUS]++;
2675 nfsm_reqhead(dnp, NFSPROC_READDIRPLUS,
2676 NFSX_FH(1) + 6 * NFSX_UNSIGNED);
2677 nfsm_fhtom(dnp, 1);
2678 nfsm_build(tl, u_int32_t *, 6 * NFSX_UNSIGNED);
2679 if (nmp->nm_iflag & NFSMNT_SWAPCOOKIE) {
2680 txdr_swapcookie3(uiop->uio_offset, tl);
2681 } else {
2682 txdr_cookie3(uiop->uio_offset, tl);
2683 }
2684 tl += 2;
2685 *tl++ = dnp->n_cookieverf.nfsuquad[0];
2686 *tl++ = dnp->n_cookieverf.nfsuquad[1];
2687 *tl++ = txdr_unsigned(nmp->nm_readdirsize);
2688 *tl = txdr_unsigned(nmp->nm_rsize);
2689 nfsm_request(dnp, NFSPROC_READDIRPLUS, curlwp, cred);
2690 nfsm_postop_attr(vp, attrflag, 0);
2691 if (error) {
2692 m_freem(mrep);
2693 goto nfsmout;
2694 }
2695 nrpcs++;
2696 nfsm_dissect(tl, u_int32_t *, 3 * NFSX_UNSIGNED);
2697 dnp->n_cookieverf.nfsuquad[0] = *tl++;
2698 dnp->n_cookieverf.nfsuquad[1] = *tl++;
2699 more_dirs = fxdr_unsigned(int, *tl);
2700
2701 /* loop thru the dir entries, doctoring them to 4bsd form */
2702 while (more_dirs && bigenough) {
2703 nfsm_dissect(tl, u_int32_t *, 3 * NFSX_UNSIGNED);
2704 fileno = fxdr_hyper(tl);
2705 len = fxdr_unsigned(int, *(tl + 2));
2706 if (len <= 0 || len > NFS_MAXNAMLEN) {
2707 error = EBADRPC;
2708 m_freem(mrep);
2709 goto nfsmout;
2710 }
2711 /* for cookie stashing */
2712 reclen = _DIRENT_RECLEN(dp, len) + 2 * sizeof(off_t);
2713 left = NFS_DIRFRAGSIZ - blksiz;
2714 if (reclen > left) {
2715 /*
2716 * DIRFRAGSIZ is aligned, no need to align
2717 * again here.
2718 */
2719 memset(uiop->uio_iov->iov_base, 0, left);
2720 dp->d_reclen += left;
2721 UIO_ADVANCE(uiop, left);
2722 NFS_STASHCOOKIE(dp, uiop->uio_offset);
2723 blksiz = 0;
2724 }
2725 if (reclen > uiop->uio_resid)
2726 bigenough = 0;
2727 if (bigenough) {
2728 int tlen;
2729
2730 dp = (struct dirent *)uiop->uio_iov->iov_base;
2731 dp->d_fileno = fileno;
2732 dp->d_namlen = len;
2733 dp->d_reclen = reclen;
2734 dp->d_type = DT_UNKNOWN;
2735 blksiz += reclen;
2736 if (blksiz == NFS_DIRFRAGSIZ)
2737 blksiz = 0;
2738 UIO_ADVANCE(uiop, DIRHDSIZ);
2739 nfsm_mtouio(uiop, len);
2740 tlen = reclen - (DIRHDSIZ + len);
2741 (void)memset(uiop->uio_iov->iov_base, 0, tlen);
2742 UIO_ADVANCE(uiop, tlen);
2743 cnp->cn_nameptr = dp->d_name;
2744 cnp->cn_namelen = dp->d_namlen;
2745 } else
2746 nfsm_adv(nfsm_rndup(len));
2747 nfsm_dissect(tl, u_int32_t *, 3 * NFSX_UNSIGNED);
2748 if (bigenough) {
2749 if (nmp->nm_iflag & NFSMNT_SWAPCOOKIE)
2750 uiop->uio_offset =
2751 fxdr_swapcookie3(tl);
2752 else
2753 uiop->uio_offset =
2754 fxdr_cookie3(tl);
2755 NFS_STASHCOOKIE(dp, uiop->uio_offset);
2756 }
2757 tl += 2;
2758
2759 /*
2760 * Since the attributes are before the file handle
2761 * (sigh), we must skip over the attributes and then
2762 * come back and get them.
2763 */
2764 attrflag = fxdr_unsigned(int, *tl);
2765 if (attrflag) {
2766 nfsm_dissect(fp, struct nfs_fattr *, NFSX_V3FATTR);
2767 memcpy(&fattr, fp, NFSX_V3FATTR);
2768 nfsm_dissect(tl, u_int32_t *, NFSX_UNSIGNED);
2769 doit = fxdr_unsigned(int, *tl);
2770 if (doit) {
2771 nfsm_getfh(fhp, fhsize, 1);
2772 if (NFS_CMPFH(dnp, fhp, fhsize)) {
2773 vref(vp);
2774 newvp = vp;
2775 np = dnp;
2776 } else {
2777 error = nfs_nget1(vp->v_mount, fhp,
2778 fhsize, &np, LK_NOWAIT);
2779 if (!error)
2780 newvp = NFSTOV(np);
2781 }
2782 if (!error) {
2783 nfs_loadattrcache(&newvp, &fattr, 0, 0);
2784 if (bigenough) {
2785 dp->d_type =
2786 IFTODT(VTTOIF(np->n_vattr->va_type));
2787 ndp->ni_vp = newvp;
2788 nfs_cache_enter(ndp->ni_dvp,
2789 ndp->ni_vp, cnp);
2790 }
2791 }
2792 error = 0;
2793 }
2794 } else {
2795 /* Just skip over the file handle */
2796 nfsm_dissect(tl, u_int32_t *, NFSX_UNSIGNED);
2797 i = fxdr_unsigned(int, *tl);
2798 nfsm_adv(nfsm_rndup(i));
2799 }
2800 if (newvp != NULLVP) {
2801 if (newvp == vp)
2802 vrele(newvp);
2803 else
2804 vput(newvp);
2805 newvp = NULLVP;
2806 }
2807 nfsm_dissect(tl, u_int32_t *, NFSX_UNSIGNED);
2808 more_dirs = fxdr_unsigned(int, *tl);
2809 }
2810 /*
2811 * If at end of rpc data, get the eof boolean
2812 */
2813 if (!more_dirs) {
2814 nfsm_dissect(tl, u_int32_t *, NFSX_UNSIGNED);
2815 more_dirs = (fxdr_unsigned(int, *tl) == 0);
2816
2817 /*
2818 * kludge: see a comment in nfs_readdirrpc.
2819 */
2820
2821 if (uiop->uio_resid >= NFS_DIRBLKSIZ)
2822 more_dirs = 0;
2823 }
2824 m_freem(mrep);
2825 }
2826 /*
2827 * Fill last record, iff any, out to a multiple of NFS_DIRFRAGSIZ
2828 * by increasing d_reclen for the last record.
2829 */
2830 if (blksiz > 0) {
2831 left = NFS_DIRFRAGSIZ - blksiz;
2832 memset(uiop->uio_iov->iov_base, 0, left);
2833 dp->d_reclen += left;
2834 NFS_STASHCOOKIE(dp, uiop->uio_offset);
2835 UIO_ADVANCE(uiop, left);
2836 }
2837
2838 /*
2839 * We are now either at the end of the directory or have filled the
2840 * block.
2841 */
2842 if (bigenough) {
2843 dnp->n_direofoffset = uiop->uio_offset;
2844 dnp->n_flag |= NEOFVALID;
2845 }
2846 nfsmout:
2847 if (newvp != NULLVP) {
2848 if(newvp == vp)
2849 vrele(newvp);
2850 else
2851 vput(newvp);
2852 }
2853 return (error);
2854 }
2855 #endif
2856
2857 /*
2858 * Silly rename. To make the NFS filesystem that is stateless look a little
2859 * more like the "ufs" a remove of an active vnode is translated to a rename
2860 * to a funny looking filename that is removed by nfs_inactive on the
2861 * nfsnode. There is the potential for another process on a different client
2862 * to create the same funny name between the nfs_lookitup() fails and the
2863 * nfs_rename() completes, but...
2864 */
2865 int
2866 nfs_sillyrename(struct vnode *dvp, struct vnode *vp, struct componentname *cnp, bool dolink)
2867 {
2868 struct sillyrename *sp;
2869 struct nfsnode *np;
2870 int error;
2871 pid_t pid;
2872
2873 cache_purge(dvp);
2874 np = VTONFS(vp);
2875 #ifndef DIAGNOSTIC
2876 if (vp->v_type == VDIR)
2877 panic("nfs: sillyrename dir");
2878 #endif
2879 sp = kmem_alloc(sizeof(*sp), KM_SLEEP);
2880 sp->s_cred = kauth_cred_dup(cnp->cn_cred);
2881 sp->s_dvp = dvp;
2882 vref(dvp);
2883
2884 /* Fudge together a funny name */
2885 pid = curlwp->l_proc->p_pid;
2886 memcpy(sp->s_name, ".nfsAxxxx4.4", 13);
2887 sp->s_namlen = 12;
2888 sp->s_name[8] = hexdigits[pid & 0xf];
2889 sp->s_name[7] = hexdigits[(pid >> 4) & 0xf];
2890 sp->s_name[6] = hexdigits[(pid >> 8) & 0xf];
2891 sp->s_name[5] = hexdigits[(pid >> 12) & 0xf];
2892
2893 /* Try lookitups until we get one that isn't there */
2894 while (nfs_lookitup(dvp, sp->s_name, sp->s_namlen, sp->s_cred,
2895 curlwp, (struct nfsnode **)0) == 0) {
2896 sp->s_name[4]++;
2897 if (sp->s_name[4] > 'z') {
2898 error = EINVAL;
2899 goto bad;
2900 }
2901 }
2902 if (dolink) {
2903 error = nfs_linkrpc(dvp, vp, sp->s_name, sp->s_namlen,
2904 sp->s_cred, curlwp);
2905 /*
2906 * nfs_request maps NFSERR_NOTSUPP to ENOTSUP.
2907 */
2908 if (error == ENOTSUP) {
2909 error = nfs_renameit(dvp, cnp, sp);
2910 }
2911 } else {
2912 error = nfs_renameit(dvp, cnp, sp);
2913 }
2914 if (error)
2915 goto bad;
2916 error = nfs_lookitup(dvp, sp->s_name, sp->s_namlen, sp->s_cred,
2917 curlwp, &np);
2918 np->n_sillyrename = sp;
2919 return (0);
2920 bad:
2921 vrele(sp->s_dvp);
2922 kauth_cred_free(sp->s_cred);
2923 kmem_free(sp, sizeof(*sp));
2924 return (error);
2925 }
2926
2927 /*
2928 * Look up a file name and optionally either update the file handle or
2929 * allocate an nfsnode, depending on the value of npp.
2930 * npp == NULL --> just do the lookup
2931 * *npp == NULL --> allocate a new nfsnode and make sure attributes are
2932 * handled too
2933 * *npp != NULL --> update the file handle in the vnode
2934 */
2935 int
2936 nfs_lookitup(struct vnode *dvp, const char *name, int len, kauth_cred_t cred, struct lwp *l, struct nfsnode **npp)
2937 {
2938 u_int32_t *tl;
2939 char *cp;
2940 int32_t t1, t2;
2941 struct vnode *newvp = (struct vnode *)0;
2942 struct nfsnode *np, *dnp = VTONFS(dvp);
2943 char *bpos, *dpos, *cp2;
2944 int error = 0, ofhlen, fhlen;
2945 #ifndef NFS_V2_ONLY
2946 int attrflag;
2947 #endif
2948 struct mbuf *mreq, *mrep, *md, *mb;
2949 nfsfh_t *ofhp, *nfhp;
2950 const int v3 = NFS_ISV3(dvp);
2951
2952 nfsstats.rpccnt[NFSPROC_LOOKUP]++;
2953 nfsm_reqhead(dnp, NFSPROC_LOOKUP,
2954 NFSX_FH(v3) + NFSX_UNSIGNED + nfsm_rndup(len));
2955 nfsm_fhtom(dnp, v3);
2956 nfsm_strtom(name, len, NFS_MAXNAMLEN);
2957 nfsm_request(dnp, NFSPROC_LOOKUP, l, cred);
2958 if (npp && !error) {
2959 nfsm_getfh(nfhp, fhlen, v3);
2960 if (*npp) {
2961 np = *npp;
2962 newvp = NFSTOV(np);
2963 ofhlen = np->n_fhsize;
2964 ofhp = kmem_alloc(ofhlen, KM_SLEEP);
2965 memcpy(ofhp, np->n_fhp, ofhlen);
2966 error = vcache_rekey_enter(newvp->v_mount, newvp,
2967 ofhp, ofhlen, nfhp, fhlen);
2968 if (error) {
2969 kmem_free(ofhp, ofhlen);
2970 m_freem(mrep);
2971 return error;
2972 }
2973 if (np->n_fhsize > NFS_SMALLFH && fhlen <= NFS_SMALLFH) {
2974 kmem_free(np->n_fhp, np->n_fhsize);
2975 np->n_fhp = &np->n_fh;
2976 }
2977 #if NFS_SMALLFH < NFSX_V3FHMAX
2978 else if (np->n_fhsize <= NFS_SMALLFH && fhlen > NFS_SMALLFH)
2979 np->n_fhp = kmem_alloc(fhlen, KM_SLEEP);
2980 #endif
2981 memcpy(np->n_fhp, nfhp, fhlen);
2982 np->n_fhsize = fhlen;
2983 vcache_rekey_exit(newvp->v_mount, newvp,
2984 ofhp, ofhlen, np->n_fhp, fhlen);
2985 kmem_free(ofhp, ofhlen);
2986 } else if (NFS_CMPFH(dnp, nfhp, fhlen)) {
2987 vref(dvp);
2988 newvp = dvp;
2989 np = dnp;
2990 } else {
2991 error = nfs_nget(dvp->v_mount, nfhp, fhlen, &np);
2992 if (error) {
2993 m_freem(mrep);
2994 return (error);
2995 }
2996 newvp = NFSTOV(np);
2997 }
2998 #ifndef NFS_V2_ONLY
2999 if (v3) {
3000 nfsm_postop_attr(newvp, attrflag, 0);
3001 if (!attrflag && *npp == NULL) {
3002 m_freem(mrep);
3003 vput(newvp);
3004 return (ENOENT);
3005 }
3006 } else
3007 #endif
3008 nfsm_loadattr(newvp, (struct vattr *)0, 0);
3009 }
3010 nfsm_reqdone;
3011 if (npp && *npp == NULL) {
3012 if (error) {
3013 if (newvp)
3014 vput(newvp);
3015 } else
3016 *npp = np;
3017 }
3018 return (error);
3019 }
3020
3021 #ifndef NFS_V2_ONLY
3022 /*
3023 * Nfs Version 3 commit rpc
3024 */
3025 int
3026 nfs_commit(struct vnode *vp, off_t offset, uint32_t cnt, struct lwp *l)
3027 {
3028 char *cp;
3029 u_int32_t *tl;
3030 int32_t t1, t2;
3031 struct nfsmount *nmp = VFSTONFS(vp->v_mount);
3032 char *bpos, *dpos, *cp2;
3033 int error = 0, wccflag = NFSV3_WCCRATTR;
3034 struct mbuf *mreq, *mrep, *md, *mb;
3035 struct nfsnode *np;
3036
3037 KASSERT(NFS_ISV3(vp));
3038
3039 #ifdef NFS_DEBUG_COMMIT
3040 printf("commit %lu - %lu\n", (unsigned long)offset,
3041 (unsigned long)(offset + cnt));
3042 #endif
3043
3044 mutex_enter(&nmp->nm_lock);
3045 if ((nmp->nm_iflag & NFSMNT_HASWRITEVERF) == 0) {
3046 mutex_exit(&nmp->nm_lock);
3047 return (0);
3048 }
3049 mutex_exit(&nmp->nm_lock);
3050 nfsstats.rpccnt[NFSPROC_COMMIT]++;
3051 np = VTONFS(vp);
3052 nfsm_reqhead(np, NFSPROC_COMMIT, NFSX_FH(1));
3053 nfsm_fhtom(np, 1);
3054 nfsm_build(tl, u_int32_t *, 3 * NFSX_UNSIGNED);
3055 txdr_hyper(offset, tl);
3056 tl += 2;
3057 *tl = txdr_unsigned(cnt);
3058 nfsm_request(np, NFSPROC_COMMIT, l, np->n_wcred);
3059 nfsm_wcc_data(vp, wccflag, NAC_NOTRUNC, false);
3060 if (!error) {
3061 nfsm_dissect(tl, u_int32_t *, NFSX_V3WRITEVERF);
3062 mutex_enter(&nmp->nm_lock);
3063 if ((nmp->nm_iflag & NFSMNT_STALEWRITEVERF) ||
3064 memcmp(nmp->nm_writeverf, tl, NFSX_V3WRITEVERF)) {
3065 memcpy(nmp->nm_writeverf, tl, NFSX_V3WRITEVERF);
3066 error = NFSERR_STALEWRITEVERF;
3067 nmp->nm_iflag |= NFSMNT_STALEWRITEVERF;
3068 }
3069 mutex_exit(&nmp->nm_lock);
3070 }
3071 nfsm_reqdone;
3072 return (error);
3073 }
3074 #endif
3075
3076 /*
3077 * Kludge City..
3078 * - make nfs_bmap() essentially a no-op that does no translation
3079 * - do nfs_strategy() by doing I/O with nfs_readrpc/nfs_writerpc
3080 * (Maybe I could use the process's page mapping, but I was concerned that
3081 * Kernel Write might not be enabled and also figured copyout() would do
3082 * a lot more work than memcpy() and also it currently happens in the
3083 * context of the swapper process (2).
3084 */
3085 int
3086 nfs_bmap(void *v)
3087 {
3088 struct vop_bmap_args /* {
3089 struct vnode *a_vp;
3090 daddr_t a_bn;
3091 struct vnode **a_vpp;
3092 daddr_t *a_bnp;
3093 int *a_runp;
3094 } */ *ap = v;
3095 struct vnode *vp = ap->a_vp;
3096 int bshift = vp->v_mount->mnt_fs_bshift - vp->v_mount->mnt_dev_bshift;
3097
3098 if (ap->a_vpp != NULL)
3099 *ap->a_vpp = vp;
3100 if (ap->a_bnp != NULL)
3101 *ap->a_bnp = ap->a_bn << bshift;
3102 if (ap->a_runp != NULL)
3103 *ap->a_runp = 1024 * 1024; /* XXX */
3104 return (0);
3105 }
3106
3107 /*
3108 * Strategy routine.
3109 * For async requests when nfsiod(s) are running, queue the request by
3110 * calling nfs_asyncio(), otherwise just all nfs_doio() to do the
3111 * request.
3112 */
3113 int
3114 nfs_strategy(void *v)
3115 {
3116 struct vop_strategy_args *ap = v;
3117 struct buf *bp = ap->a_bp;
3118 int error = 0;
3119
3120 if ((bp->b_flags & (B_PHYS|B_ASYNC)) == (B_PHYS|B_ASYNC))
3121 panic("nfs physio/async");
3122
3123 /*
3124 * If the op is asynchronous and an i/o daemon is waiting
3125 * queue the request, wake it up and wait for completion
3126 * otherwise just do it ourselves.
3127 */
3128 if ((bp->b_flags & B_ASYNC) == 0 || nfs_asyncio(bp))
3129 error = nfs_doio(bp);
3130 return (error);
3131 }
3132
3133 /*
3134 * fsync vnode op. Just call nfs_flush() with commit == 1.
3135 */
3136 /* ARGSUSED */
3137 int
3138 nfs_fsync(void *v)
3139 {
3140 struct vop_fsync_args /* {
3141 struct vnodeop_desc *a_desc;
3142 struct vnode * a_vp;
3143 kauth_cred_t a_cred;
3144 int a_flags;
3145 off_t offlo;
3146 off_t offhi;
3147 struct lwp * a_l;
3148 } */ *ap = v;
3149
3150 struct vnode *vp = ap->a_vp;
3151
3152 if (vp->v_type != VREG)
3153 return 0;
3154
3155 return (nfs_flush(vp, ap->a_cred,
3156 (ap->a_flags & FSYNC_WAIT) != 0 ? MNT_WAIT : 0, curlwp, 1));
3157 }
3158
3159 /*
3160 * Flush all the data associated with a vnode.
3161 */
3162 int
3163 nfs_flush(struct vnode *vp, kauth_cred_t cred, int waitfor, struct lwp *l,
3164 int commit)
3165 {
3166 struct nfsnode *np = VTONFS(vp);
3167 int error;
3168 int flushflags = PGO_ALLPAGES|PGO_CLEANIT|PGO_SYNCIO;
3169 UVMHIST_FUNC("nfs_flush"); UVMHIST_CALLED(ubchist);
3170
3171 rw_enter(vp->v_uobj.vmobjlock, RW_WRITER);
3172 error = VOP_PUTPAGES(vp, 0, 0, flushflags);
3173 if (np->n_flag & NWRITEERR) {
3174 error = np->n_error;
3175 np->n_flag &= ~NWRITEERR;
3176 }
3177 UVMHIST_LOG(ubchist, "returning %d", error,0,0,0);
3178 return (error);
3179 }
3180
3181 /*
3182 * Return POSIX pathconf information applicable to nfs.
3183 *
3184 * N.B. The NFS V2 protocol doesn't support this RPC.
3185 */
3186 /* ARGSUSED */
3187 int
3188 nfs_pathconf(void *v)
3189 {
3190 struct vop_pathconf_args /* {
3191 struct vnode *a_vp;
3192 int a_name;
3193 register_t *a_retval;
3194 } */ *ap = v;
3195 struct nfsv3_pathconf *pcp;
3196 struct vnode *vp = ap->a_vp;
3197 struct mbuf *mreq, *mrep, *md, *mb;
3198 int32_t t1, t2;
3199 u_int32_t *tl;
3200 char *bpos, *dpos, *cp, *cp2;
3201 int error = 0, attrflag;
3202 #ifndef NFS_V2_ONLY
3203 struct nfsmount *nmp;
3204 unsigned int l;
3205 u_int64_t maxsize;
3206 #endif
3207 const int v3 = NFS_ISV3(vp);
3208 struct nfsnode *np = VTONFS(vp);
3209
3210 switch (ap->a_name) {
3211 /* Names that can be resolved locally. */
3212 case _PC_PIPE_BUF:
3213 *ap->a_retval = PIPE_BUF;
3214 break;
3215 case _PC_SYNC_IO:
3216 *ap->a_retval = 1;
3217 break;
3218 /* Names that cannot be resolved locally; do an RPC, if possible. */
3219 case _PC_LINK_MAX:
3220 case _PC_NAME_MAX:
3221 case _PC_CHOWN_RESTRICTED:
3222 case _PC_NO_TRUNC:
3223 if (!v3) {
3224 error = EINVAL;
3225 break;
3226 }
3227 nfsstats.rpccnt[NFSPROC_PATHCONF]++;
3228 nfsm_reqhead(np, NFSPROC_PATHCONF, NFSX_FH(1));
3229 nfsm_fhtom(np, 1);
3230 nfsm_request(np, NFSPROC_PATHCONF,
3231 curlwp, curlwp->l_cred); /* XXX */
3232 nfsm_postop_attr(vp, attrflag, 0);
3233 if (!error) {
3234 nfsm_dissect(pcp, struct nfsv3_pathconf *,
3235 NFSX_V3PATHCONF);
3236 switch (ap->a_name) {
3237 case _PC_LINK_MAX:
3238 *ap->a_retval =
3239 fxdr_unsigned(register_t, pcp->pc_linkmax);
3240 break;
3241 case _PC_NAME_MAX:
3242 *ap->a_retval =
3243 fxdr_unsigned(register_t, pcp->pc_namemax);
3244 break;
3245 case _PC_CHOWN_RESTRICTED:
3246 *ap->a_retval =
3247 (pcp->pc_chownrestricted == nfs_true);
3248 break;
3249 case _PC_NO_TRUNC:
3250 *ap->a_retval =
3251 (pcp->pc_notrunc == nfs_true);
3252 break;
3253 }
3254 }
3255 nfsm_reqdone;
3256 break;
3257 case _PC_FILESIZEBITS:
3258 #ifndef NFS_V2_ONLY
3259 if (v3) {
3260 nmp = VFSTONFS(vp->v_mount);
3261 if ((nmp->nm_iflag & NFSMNT_GOTFSINFO) == 0)
3262 if ((error = nfs_fsinfo(nmp, vp,
3263 curlwp->l_cred, curlwp)) != 0) /* XXX */
3264 break;
3265 for (l = 0, maxsize = nmp->nm_maxfilesize;
3266 (maxsize >> l) > 0; l++)
3267 ;
3268 *ap->a_retval = l + 1;
3269 } else
3270 #endif
3271 {
3272 *ap->a_retval = 32; /* NFS V2 limitation */
3273 }
3274 break;
3275 default:
3276 error = genfs_pathconf(ap);
3277 break;
3278 }
3279
3280 return (error);
3281 }
3282
3283 /*
3284 * NFS advisory byte-level locks.
3285 */
3286 int
3287 nfs_advlock(void *v)
3288 {
3289 struct vop_advlock_args /* {
3290 struct vnode *a_vp;
3291 void *a_id;
3292 int a_op;
3293 struct flock *a_fl;
3294 int a_flags;
3295 } */ *ap = v;
3296 struct nfsnode *np = VTONFS(ap->a_vp);
3297
3298 return lf_advlock(ap, &np->n_lockf, np->n_size);
3299 }
3300
3301 /*
3302 * Print out the contents of an nfsnode.
3303 */
3304 int
3305 nfs_print(void *v)
3306 {
3307 struct vop_print_args /* {
3308 struct vnode *a_vp;
3309 } */ *ap = v;
3310 struct vnode *vp = ap->a_vp;
3311 struct nfsnode *np = VTONFS(vp);
3312
3313 printf("tag VT_NFS, fileid %lld fsid 0x%llx",
3314 (unsigned long long)np->n_vattr->va_fileid,
3315 (unsigned long long)np->n_vattr->va_fsid);
3316 if (vp->v_type == VFIFO)
3317 VOCALL(fifo_vnodeop_p, VOFFSET(vop_print), v);
3318 printf("\n");
3319 return (0);
3320 }
3321
3322 /*
3323 * nfs unlock wrapper.
3324 */
3325 int
3326 nfs_unlock(void *v)
3327 {
3328 struct vop_unlock_args /* {
3329 struct vnode *a_vp;
3330 int a_flags;
3331 } */ *ap = v;
3332 struct vnode *vp = ap->a_vp;
3333
3334 /*
3335 * VOP_UNLOCK can be called by nfs_loadattrcache
3336 * with v_data == 0.
3337 */
3338 if (VTONFS(vp)) {
3339 nfs_delayedtruncate(vp);
3340 }
3341
3342 return genfs_unlock(v);
3343 }
3344
3345 /*
3346 * nfs special file access vnode op.
3347 * Essentially just get vattr and then imitate iaccess() since the device is
3348 * local to the client.
3349 */
3350 int
3351 nfsspec_access(void *v)
3352 {
3353 struct vop_access_args /* {
3354 struct vnode *a_vp;
3355 accmode_t a_accmode;
3356 kauth_cred_t a_cred;
3357 struct lwp *a_l;
3358 } */ *ap = v;
3359 struct vattr va;
3360 struct vnode *vp = ap->a_vp;
3361 int error;
3362
3363 error = VOP_GETATTR(vp, &va, ap->a_cred);
3364 if (error)
3365 return (error);
3366
3367 /*
3368 * Disallow write attempts on filesystems mounted read-only;
3369 * unless the file is a socket, fifo, or a block or character
3370 * device resident on the filesystem.
3371 */
3372 if ((ap->a_accmode & VWRITE) && (vp->v_mount->mnt_flag & MNT_RDONLY)) {
3373 switch (vp->v_type) {
3374 case VREG:
3375 case VDIR:
3376 case VLNK:
3377 return (EROFS);
3378 default:
3379 break;
3380 }
3381 }
3382
3383 return kauth_authorize_vnode(ap->a_cred, KAUTH_ACCESS_ACTION(
3384 ap->a_accmode, va.va_type, va.va_mode), vp, NULL, genfs_can_access(
3385 vp, ap->a_cred, va.va_uid, va.va_gid, va.va_mode, NULL,
3386 ap->a_accmode));
3387 }
3388
3389 /*
3390 * Read wrapper for special devices.
3391 */
3392 int
3393 nfsspec_read(void *v)
3394 {
3395 struct vop_read_args /* {
3396 struct vnode *a_vp;
3397 struct uio *a_uio;
3398 int a_ioflag;
3399 kauth_cred_t a_cred;
3400 } */ *ap = v;
3401 struct nfsnode *np = VTONFS(ap->a_vp);
3402
3403 /*
3404 * Set access flag.
3405 */
3406 np->n_flag |= NACC;
3407 getnanotime(&np->n_atim);
3408 return (VOCALL(spec_vnodeop_p, VOFFSET(vop_read), ap));
3409 }
3410
3411 /*
3412 * Write wrapper for special devices.
3413 */
3414 int
3415 nfsspec_write(void *v)
3416 {
3417 struct vop_write_args /* {
3418 struct vnode *a_vp;
3419 struct uio *a_uio;
3420 int a_ioflag;
3421 kauth_cred_t a_cred;
3422 } */ *ap = v;
3423 struct nfsnode *np = VTONFS(ap->a_vp);
3424
3425 /*
3426 * Set update flag.
3427 */
3428 np->n_flag |= NUPD;
3429 getnanotime(&np->n_mtim);
3430 return (VOCALL(spec_vnodeop_p, VOFFSET(vop_write), ap));
3431 }
3432
3433 /*
3434 * Close wrapper for special devices.
3435 *
3436 * Update the times on the nfsnode then do device close.
3437 */
3438 int
3439 nfsspec_close(void *v)
3440 {
3441 struct vop_close_args /* {
3442 struct vnode *a_vp;
3443 int a_fflag;
3444 kauth_cred_t a_cred;
3445 struct lwp *a_l;
3446 } */ *ap = v;
3447 struct vnode *vp = ap->a_vp;
3448 struct nfsnode *np = VTONFS(vp);
3449 struct vattr vattr;
3450
3451 if (np->n_flag & (NACC | NUPD)) {
3452 np->n_flag |= NCHG;
3453 if (vrefcnt(vp) == 1 &&
3454 (vp->v_mount->mnt_flag & MNT_RDONLY) == 0) {
3455 vattr_null(&vattr);
3456 if (np->n_flag & NACC)
3457 vattr.va_atime = np->n_atim;
3458 if (np->n_flag & NUPD)
3459 vattr.va_mtime = np->n_mtim;
3460 (void)VOP_SETATTR(vp, &vattr, ap->a_cred);
3461 }
3462 }
3463 return (VOCALL(spec_vnodeop_p, VOFFSET(vop_close), ap));
3464 }
3465
3466 /*
3467 * Read wrapper for fifos.
3468 */
3469 int
3470 nfsfifo_read(void *v)
3471 {
3472 struct vop_read_args /* {
3473 struct vnode *a_vp;
3474 struct uio *a_uio;
3475 int a_ioflag;
3476 kauth_cred_t a_cred;
3477 } */ *ap = v;
3478 struct nfsnode *np = VTONFS(ap->a_vp);
3479
3480 /*
3481 * Set access flag.
3482 */
3483 np->n_flag |= NACC;
3484 getnanotime(&np->n_atim);
3485 return (VOCALL(fifo_vnodeop_p, VOFFSET(vop_read), ap));
3486 }
3487
3488 /*
3489 * Write wrapper for fifos.
3490 */
3491 int
3492 nfsfifo_write(void *v)
3493 {
3494 struct vop_write_args /* {
3495 struct vnode *a_vp;
3496 struct uio *a_uio;
3497 int a_ioflag;
3498 kauth_cred_t a_cred;
3499 } */ *ap = v;
3500 struct nfsnode *np = VTONFS(ap->a_vp);
3501
3502 /*
3503 * Set update flag.
3504 */
3505 np->n_flag |= NUPD;
3506 getnanotime(&np->n_mtim);
3507 return (VOCALL(fifo_vnodeop_p, VOFFSET(vop_write), ap));
3508 }
3509
3510 /*
3511 * Close wrapper for fifos.
3512 *
3513 * Update the times on the nfsnode then do fifo close.
3514 */
3515 int
3516 nfsfifo_close(void *v)
3517 {
3518 struct vop_close_args /* {
3519 struct vnode *a_vp;
3520 int a_fflag;
3521 kauth_cred_t a_cred;
3522 struct lwp *a_l;
3523 } */ *ap = v;
3524 struct vnode *vp = ap->a_vp;
3525 struct nfsnode *np = VTONFS(vp);
3526 struct vattr vattr;
3527
3528 if (np->n_flag & (NACC | NUPD)) {
3529 struct timespec ts;
3530
3531 getnanotime(&ts);
3532 if (np->n_flag & NACC)
3533 np->n_atim = ts;
3534 if (np->n_flag & NUPD)
3535 np->n_mtim = ts;
3536 np->n_flag |= NCHG;
3537 if (vrefcnt(vp) == 1 &&
3538 (vp->v_mount->mnt_flag & MNT_RDONLY) == 0) {
3539 vattr_null(&vattr);
3540 if (np->n_flag & NACC)
3541 vattr.va_atime = np->n_atim;
3542 if (np->n_flag & NUPD)
3543 vattr.va_mtime = np->n_mtim;
3544 (void)VOP_SETATTR(vp, &vattr, ap->a_cred);
3545 }
3546 }
3547 return (VOCALL(fifo_vnodeop_p, VOFFSET(vop_close), ap));
3548 }
3549