nfs_vnops.c revision 1.321 1 /* $NetBSD: nfs_vnops.c,v 1.321 2021/10/20 03:08:18 thorpej Exp $ */
2
3 /*
4 * Copyright (c) 1989, 1993
5 * The Regents of the University of California. All rights reserved.
6 *
7 * This code is derived from software contributed to Berkeley by
8 * Rick Macklem at The University of Guelph.
9 *
10 * Redistribution and use in source and binary forms, with or without
11 * modification, are permitted provided that the following conditions
12 * are met:
13 * 1. Redistributions of source code must retain the above copyright
14 * notice, this list of conditions and the following disclaimer.
15 * 2. Redistributions in binary form must reproduce the above copyright
16 * notice, this list of conditions and the following disclaimer in the
17 * documentation and/or other materials provided with the distribution.
18 * 3. Neither the name of the University nor the names of its contributors
19 * may be used to endorse or promote products derived from this software
20 * without specific prior written permission.
21 *
22 * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND
23 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
24 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
25 * ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE
26 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
27 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
28 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
29 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
30 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
31 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
32 * SUCH DAMAGE.
33 *
34 * @(#)nfs_vnops.c 8.19 (Berkeley) 7/31/95
35 */
36
37 /*
38 * vnode op calls for Sun NFS version 2 and 3
39 */
40
41 #include <sys/cdefs.h>
42 __KERNEL_RCSID(0, "$NetBSD: nfs_vnops.c,v 1.321 2021/10/20 03:08:18 thorpej Exp $");
43
44 #ifdef _KERNEL_OPT
45 #include "opt_nfs.h"
46 #include "opt_uvmhist.h"
47 #endif
48
49 #include <sys/param.h>
50 #include <sys/proc.h>
51 #include <sys/kernel.h>
52 #include <sys/systm.h>
53 #include <sys/resourcevar.h>
54 #include <sys/mount.h>
55 #include <sys/buf.h>
56 #include <sys/condvar.h>
57 #include <sys/disk.h>
58 #include <sys/malloc.h>
59 #include <sys/kmem.h>
60 #include <sys/mbuf.h>
61 #include <sys/mutex.h>
62 #include <sys/namei.h>
63 #include <sys/vnode.h>
64 #include <sys/dirent.h>
65 #include <sys/fcntl.h>
66 #include <sys/hash.h>
67 #include <sys/lockf.h>
68 #include <sys/stat.h>
69 #include <sys/unistd.h>
70 #include <sys/kauth.h>
71 #include <sys/cprng.h>
72
73 #ifdef UVMHIST
74 #include <uvm/uvm.h>
75 #endif
76 #include <uvm/uvm_extern.h>
77 #include <uvm/uvm_stat.h>
78
79 #include <miscfs/fifofs/fifo.h>
80 #include <miscfs/genfs/genfs.h>
81 #include <miscfs/genfs/genfs_node.h>
82 #include <miscfs/specfs/specdev.h>
83
84 #include <nfs/rpcv2.h>
85 #include <nfs/nfsproto.h>
86 #include <nfs/nfs.h>
87 #include <nfs/nfsnode.h>
88 #include <nfs/nfsmount.h>
89 #include <nfs/xdr_subs.h>
90 #include <nfs/nfsm_subs.h>
91 #include <nfs/nfs_var.h>
92
93 #include <net/if.h>
94 #include <netinet/in.h>
95 #include <netinet/in_var.h>
96
97 /*
98 * Global vfs data structures for nfs
99 */
100 int (**nfsv2_vnodeop_p)(void *);
101 const struct vnodeopv_entry_desc nfsv2_vnodeop_entries[] = {
102 { &vop_default_desc, vn_default_error },
103 { &vop_parsepath_desc, genfs_parsepath }, /* parsepath */
104 { &vop_lookup_desc, nfs_lookup }, /* lookup */
105 { &vop_create_desc, nfs_create }, /* create */
106 { &vop_mknod_desc, nfs_mknod }, /* mknod */
107 { &vop_open_desc, nfs_open }, /* open */
108 { &vop_close_desc, nfs_close }, /* close */
109 { &vop_access_desc, nfs_access }, /* access */
110 { &vop_accessx_desc, genfs_accessx }, /* accessx */
111 { &vop_getattr_desc, nfs_getattr }, /* getattr */
112 { &vop_setattr_desc, nfs_setattr }, /* setattr */
113 { &vop_read_desc, nfs_read }, /* read */
114 { &vop_write_desc, nfs_write }, /* write */
115 { &vop_fallocate_desc, genfs_eopnotsupp }, /* fallocate */
116 { &vop_fdiscard_desc, genfs_eopnotsupp }, /* fdiscard */
117 { &vop_fcntl_desc, genfs_fcntl }, /* fcntl */
118 { &vop_ioctl_desc, genfs_enoioctl }, /* ioctl */
119 { &vop_poll_desc, genfs_poll }, /* poll */
120 { &vop_kqfilter_desc, nfs_kqfilter }, /* kqfilter */
121 { &vop_revoke_desc, genfs_revoke }, /* revoke */
122 { &vop_mmap_desc, genfs_mmap }, /* mmap */
123 { &vop_fsync_desc, nfs_fsync }, /* fsync */
124 { &vop_seek_desc, genfs_seek }, /* seek */
125 { &vop_remove_desc, nfs_remove }, /* remove */
126 { &vop_link_desc, nfs_link }, /* link */
127 { &vop_rename_desc, nfs_rename }, /* rename */
128 { &vop_mkdir_desc, nfs_mkdir }, /* mkdir */
129 { &vop_rmdir_desc, nfs_rmdir }, /* rmdir */
130 { &vop_symlink_desc, nfs_symlink }, /* symlink */
131 { &vop_readdir_desc, nfs_readdir }, /* readdir */
132 { &vop_readlink_desc, nfs_readlink }, /* readlink */
133 { &vop_abortop_desc, genfs_abortop }, /* abortop */
134 { &vop_inactive_desc, nfs_inactive }, /* inactive */
135 { &vop_reclaim_desc, nfs_reclaim }, /* reclaim */
136 { &vop_lock_desc, genfs_lock }, /* lock */
137 { &vop_unlock_desc, nfs_unlock }, /* unlock */
138 { &vop_bmap_desc, nfs_bmap }, /* bmap */
139 { &vop_strategy_desc, nfs_strategy }, /* strategy */
140 { &vop_print_desc, nfs_print }, /* print */
141 { &vop_islocked_desc, genfs_islocked }, /* islocked */
142 { &vop_pathconf_desc, nfs_pathconf }, /* pathconf */
143 { &vop_advlock_desc, nfs_advlock }, /* advlock */
144 { &vop_bwrite_desc, genfs_badop }, /* bwrite */
145 { &vop_getpages_desc, nfs_getpages }, /* getpages */
146 { &vop_putpages_desc, genfs_putpages }, /* putpages */
147 { NULL, NULL }
148 };
149 const struct vnodeopv_desc nfsv2_vnodeop_opv_desc =
150 { &nfsv2_vnodeop_p, nfsv2_vnodeop_entries };
151
152 /*
153 * Special device vnode ops
154 */
155 int (**spec_nfsv2nodeop_p)(void *);
156 const struct vnodeopv_entry_desc spec_nfsv2nodeop_entries[] = {
157 { &vop_default_desc, vn_default_error },
158 GENFS_SPECOP_ENTRIES,
159 { &vop_close_desc, nfsspec_close }, /* close */
160 { &vop_access_desc, nfsspec_access }, /* access */
161 { &vop_accessx_desc, genfs_accessx }, /* accessx */
162 { &vop_getattr_desc, nfs_getattr }, /* getattr */
163 { &vop_setattr_desc, nfs_setattr }, /* setattr */
164 { &vop_read_desc, nfsspec_read }, /* read */
165 { &vop_write_desc, nfsspec_write }, /* write */
166 { &vop_fcntl_desc, genfs_fcntl }, /* fcntl */
167 { &vop_fsync_desc, spec_fsync }, /* fsync */
168 { &vop_inactive_desc, nfs_inactive }, /* inactive */
169 { &vop_reclaim_desc, nfs_reclaim }, /* reclaim */
170 { &vop_lock_desc, genfs_lock }, /* lock */
171 { &vop_unlock_desc, nfs_unlock }, /* unlock */
172 { &vop_print_desc, nfs_print }, /* print */
173 { &vop_islocked_desc, genfs_islocked }, /* islocked */
174 { &vop_bwrite_desc, vn_bwrite }, /* bwrite */
175 { NULL, NULL }
176 };
177 const struct vnodeopv_desc spec_nfsv2nodeop_opv_desc =
178 { &spec_nfsv2nodeop_p, spec_nfsv2nodeop_entries };
179
180 int (**fifo_nfsv2nodeop_p)(void *);
181 const struct vnodeopv_entry_desc fifo_nfsv2nodeop_entries[] = {
182 { &vop_default_desc, vn_default_error },
183 GENFS_FIFOOP_ENTRIES,
184 { &vop_close_desc, nfsfifo_close }, /* close */
185 { &vop_access_desc, nfsspec_access }, /* access */
186 { &vop_accessx_desc, genfs_accessx }, /* accessx */
187 { &vop_getattr_desc, nfs_getattr }, /* getattr */
188 { &vop_setattr_desc, nfs_setattr }, /* setattr */
189 { &vop_read_desc, nfsfifo_read }, /* read */
190 { &vop_write_desc, nfsfifo_write }, /* write */
191 { &vop_fcntl_desc, genfs_fcntl }, /* fcntl */
192 { &vop_fsync_desc, nfs_fsync }, /* fsync */
193 { &vop_inactive_desc, nfs_inactive }, /* inactive */
194 { &vop_reclaim_desc, nfs_reclaim }, /* reclaim */
195 { &vop_lock_desc, genfs_lock }, /* lock */
196 { &vop_unlock_desc, nfs_unlock }, /* unlock */
197 { &vop_strategy_desc, vn_fifo_bypass }, /* strategy */
198 { &vop_print_desc, nfs_print }, /* print */
199 { &vop_islocked_desc, genfs_islocked }, /* islocked */
200 { &vop_bwrite_desc, genfs_badop }, /* bwrite */
201 { NULL, NULL }
202 };
203 const struct vnodeopv_desc fifo_nfsv2nodeop_opv_desc =
204 { &fifo_nfsv2nodeop_p, fifo_nfsv2nodeop_entries };
205
206 static int nfs_linkrpc(struct vnode *, struct vnode *, const char *,
207 size_t, kauth_cred_t, struct lwp *);
208 static void nfs_writerpc_extfree(struct mbuf *, void *, size_t, void *);
209
210 /*
211 * Global variables
212 */
213 extern u_int32_t nfs_true, nfs_false;
214 extern u_int32_t nfs_xdrneg1;
215 extern const nfstype nfsv3_type[9];
216
217 int nfs_numasync = 0;
218 #define DIRHDSIZ _DIRENT_NAMEOFF(dp)
219 #define UIO_ADVANCE(uio, siz) \
220 (void)((uio)->uio_resid -= (siz), \
221 (uio)->uio_iov->iov_base = (char *)(uio)->uio_iov->iov_base + (siz), \
222 (uio)->uio_iov->iov_len -= (siz))
223
224 static void nfs_cache_enter(struct vnode *, struct vnode *,
225 struct componentname *);
226
227 static void
228 nfs_cache_enter(struct vnode *dvp, struct vnode *vp,
229 struct componentname *cnp)
230 {
231 struct nfsnode *dnp = VTONFS(dvp);
232
233 if ((cnp->cn_flags & MAKEENTRY) == 0) {
234 return;
235 }
236 if (vp != NULL) {
237 struct nfsnode *np = VTONFS(vp);
238
239 np->n_ctime = np->n_vattr->va_ctime.tv_sec;
240 }
241
242 if (!timespecisset(&dnp->n_nctime))
243 dnp->n_nctime = dnp->n_vattr->va_mtime;
244
245 cache_enter(dvp, vp, cnp->cn_nameptr, cnp->cn_namelen, cnp->cn_flags);
246 }
247
248 /*
249 * nfs null call from vfs.
250 */
251 int
252 nfs_null(struct vnode *vp, kauth_cred_t cred, struct lwp *l)
253 {
254 char *bpos, *dpos;
255 int error = 0;
256 struct mbuf *mreq, *mrep, *md, *mb __unused;
257 struct nfsnode *np = VTONFS(vp);
258
259 nfsm_reqhead(np, NFSPROC_NULL, 0);
260 nfsm_request(np, NFSPROC_NULL, l, cred);
261 nfsm_reqdone;
262 return (error);
263 }
264
265 /*
266 * nfs access vnode op.
267 * For nfs version 2, just return ok. File accesses may fail later.
268 * For nfs version 3, use the access rpc to check accessibility. If file modes
269 * are changed on the server, accesses might still fail later.
270 */
271 int
272 nfs_access(void *v)
273 {
274 struct vop_access_args /* {
275 struct vnode *a_vp;
276 accmode_t a_accmode;
277 kauth_cred_t a_cred;
278 } */ *ap = v;
279 struct vnode *vp = ap->a_vp;
280 #ifndef NFS_V2_ONLY
281 u_int32_t *tl;
282 char *cp;
283 int32_t t1, t2;
284 char *bpos, *dpos, *cp2;
285 int error = 0, attrflag;
286 struct mbuf *mreq, *mrep, *md, *mb;
287 u_int32_t mode, rmode;
288 const int v3 = NFS_ISV3(vp);
289 #endif
290 int cachevalid;
291 struct nfsnode *np = VTONFS(vp);
292 struct nfsmount *nmp = VFSTONFS(vp->v_mount);
293
294 cachevalid = (np->n_accstamp != -1 &&
295 (time_uptime - np->n_accstamp) < nfs_attrtimeo(nmp, np) &&
296 np->n_accuid == kauth_cred_geteuid(ap->a_cred));
297
298 /*
299 * Check access cache first. If this request has been made for this
300 * uid shortly before, use the cached result.
301 */
302 if (cachevalid) {
303 if (!np->n_accerror) {
304 if ((np->n_accmode & ap->a_accmode) == ap->a_accmode)
305 return np->n_accerror;
306 } else if ((np->n_accmode & ap->a_accmode) == np->n_accmode)
307 return np->n_accerror;
308 }
309
310 #ifndef NFS_V2_ONLY
311 /*
312 * For nfs v3, do an access rpc, otherwise you are stuck emulating
313 * ufs_access() locally using the vattr. This may not be correct,
314 * since the server may apply other access criteria such as
315 * client uid-->server uid mapping that we do not know about, but
316 * this is better than just returning anything that is lying about
317 * in the cache.
318 */
319 if (v3) {
320 nfsstats.rpccnt[NFSPROC_ACCESS]++;
321 nfsm_reqhead(np, NFSPROC_ACCESS, NFSX_FH(v3) + NFSX_UNSIGNED);
322 nfsm_fhtom(np, v3);
323 nfsm_build(tl, u_int32_t *, NFSX_UNSIGNED);
324 if (ap->a_accmode & VREAD)
325 mode = NFSV3ACCESS_READ;
326 else
327 mode = 0;
328 if (vp->v_type != VDIR) {
329 if (ap->a_accmode & VWRITE)
330 mode |= (NFSV3ACCESS_MODIFY | NFSV3ACCESS_EXTEND);
331 if (ap->a_accmode & VEXEC)
332 mode |= NFSV3ACCESS_EXECUTE;
333 } else {
334 if (ap->a_accmode & VWRITE)
335 mode |= (NFSV3ACCESS_MODIFY | NFSV3ACCESS_EXTEND |
336 NFSV3ACCESS_DELETE);
337 if (ap->a_accmode & VEXEC)
338 mode |= NFSV3ACCESS_LOOKUP;
339 }
340 *tl = txdr_unsigned(mode);
341 nfsm_request(np, NFSPROC_ACCESS, curlwp, ap->a_cred);
342 nfsm_postop_attr(vp, attrflag, 0);
343 if (!error) {
344 nfsm_dissect(tl, u_int32_t *, NFSX_UNSIGNED);
345 rmode = fxdr_unsigned(u_int32_t, *tl);
346 /*
347 * The NFS V3 spec does not clarify whether or not
348 * the returned access bits can be a superset of
349 * the ones requested, so...
350 */
351 if ((rmode & mode) != mode)
352 error = EACCES;
353 }
354 nfsm_reqdone;
355 } else
356 #endif
357 return (nfsspec_access(ap));
358 #ifndef NFS_V2_ONLY
359 /*
360 * Disallow write attempts on filesystems mounted read-only;
361 * unless the file is a socket, fifo, or a block or character
362 * device resident on the filesystem.
363 */
364 if (!error && (ap->a_accmode & VWRITE) &&
365 (vp->v_mount->mnt_flag & MNT_RDONLY)) {
366 switch (vp->v_type) {
367 case VREG:
368 case VDIR:
369 case VLNK:
370 error = EROFS;
371 default:
372 break;
373 }
374 }
375
376 if (!error || error == EACCES) {
377 /*
378 * If we got the same result as for a previous,
379 * different request, OR it in. Don't update
380 * the timestamp in that case.
381 */
382 if (cachevalid && np->n_accstamp != -1 &&
383 error == np->n_accerror) {
384 if (!error)
385 np->n_accmode |= ap->a_accmode;
386 else if ((np->n_accmode & ap->a_accmode) == ap->a_accmode)
387 np->n_accmode = ap->a_accmode;
388 } else {
389 np->n_accstamp = time_uptime;
390 np->n_accuid = kauth_cred_geteuid(ap->a_cred);
391 np->n_accmode = ap->a_accmode;
392 np->n_accerror = error;
393 }
394 }
395
396 return (error);
397 #endif
398 }
399
400 /*
401 * nfs open vnode op
402 * Check to see if the type is ok
403 * and that deletion is not in progress.
404 * For paged in text files, you will need to flush the page cache
405 * if consistency is lost.
406 */
407 /* ARGSUSED */
408 int
409 nfs_open(void *v)
410 {
411 struct vop_open_args /* {
412 struct vnode *a_vp;
413 int a_mode;
414 kauth_cred_t a_cred;
415 } */ *ap = v;
416 struct vnode *vp = ap->a_vp;
417 struct nfsnode *np = VTONFS(vp);
418 int error;
419
420 if (vp->v_type != VREG && vp->v_type != VDIR && vp->v_type != VLNK) {
421 return (EACCES);
422 }
423
424 if (ap->a_mode & FREAD) {
425 if (np->n_rcred != NULL)
426 kauth_cred_free(np->n_rcred);
427 np->n_rcred = ap->a_cred;
428 kauth_cred_hold(np->n_rcred);
429 }
430 if (ap->a_mode & FWRITE) {
431 if (np->n_wcred != NULL)
432 kauth_cred_free(np->n_wcred);
433 np->n_wcred = ap->a_cred;
434 kauth_cred_hold(np->n_wcred);
435 }
436
437 error = nfs_flushstalebuf(vp, ap->a_cred, curlwp, 0);
438 if (error)
439 return error;
440
441 NFS_INVALIDATE_ATTRCACHE(np); /* For Open/Close consistency */
442
443 return (0);
444 }
445
446 /*
447 * nfs close vnode op
448 * What an NFS client should do upon close after writing is a debatable issue.
449 * Most NFS clients push delayed writes to the server upon close, basically for
450 * two reasons:
451 * 1 - So that any write errors may be reported back to the client process
452 * doing the close system call. By far the two most likely errors are
453 * NFSERR_NOSPC and NFSERR_DQUOT to indicate space allocation failure.
454 * 2 - To put a worst case upper bound on cache inconsistency between
455 * multiple clients for the file.
456 * There is also a consistency problem for Version 2 of the protocol w.r.t.
457 * not being able to tell if other clients are writing a file concurrently,
458 * since there is no way of knowing if the changed modify time in the reply
459 * is only due to the write for this client.
460 * (NFS Version 3 provides weak cache consistency data in the reply that
461 * should be sufficient to detect and handle this case.)
462 *
463 * The current code does the following:
464 * for NFS Version 2 - play it safe and flush/invalidate all dirty buffers
465 * for NFS Version 3 - flush dirty buffers to the server but don't invalidate
466 * or commit them (this satisfies 1 and 2 except for the
467 * case where the server crashes after this close but
468 * before the commit RPC, which is felt to be "good
469 * enough". Changing the last argument to nfs_flush() to
470 * a 1 would force a commit operation, if it is felt a
471 * commit is necessary now.
472 */
473 /* ARGSUSED */
474 int
475 nfs_close(void *v)
476 {
477 struct vop_close_args /* {
478 struct vnodeop_desc *a_desc;
479 struct vnode *a_vp;
480 int a_fflag;
481 kauth_cred_t a_cred;
482 } */ *ap = v;
483 struct vnode *vp = ap->a_vp;
484 struct nfsnode *np = VTONFS(vp);
485 int error = 0;
486 UVMHIST_FUNC("nfs_close"); UVMHIST_CALLED(ubchist);
487
488 if (vp->v_type == VREG) {
489 if (np->n_flag & NMODIFIED) {
490 #ifndef NFS_V2_ONLY
491 if (NFS_ISV3(vp)) {
492 error = nfs_flush(vp, ap->a_cred, MNT_WAIT, curlwp, 0);
493 np->n_flag &= ~NMODIFIED;
494 } else
495 #endif
496 error = nfs_vinvalbuf(vp, V_SAVE, ap->a_cred, curlwp, 1);
497 NFS_INVALIDATE_ATTRCACHE(np);
498 }
499 if (np->n_flag & NWRITEERR) {
500 np->n_flag &= ~NWRITEERR;
501 error = np->n_error;
502 }
503 }
504 UVMHIST_LOG(ubchist, "returning %d", error,0,0,0);
505 return (error);
506 }
507
508 /*
509 * nfs getattr call from vfs.
510 */
511 int
512 nfs_getattr(void *v)
513 {
514 struct vop_getattr_args /* {
515 struct vnode *a_vp;
516 struct vattr *a_vap;
517 kauth_cred_t a_cred;
518 } */ *ap = v;
519 struct vnode *vp = ap->a_vp;
520 struct nfsnode *np = VTONFS(vp);
521 char *cp;
522 u_int32_t *tl;
523 int32_t t1, t2;
524 char *bpos, *dpos;
525 int error = 0;
526 struct mbuf *mreq, *mrep, *md, *mb;
527 const int v3 = NFS_ISV3(vp);
528
529 /*
530 * Update local times for special files.
531 */
532 if (np->n_flag & (NACC | NUPD))
533 np->n_flag |= NCHG;
534
535 /*
536 * if we have delayed truncation, do it now.
537 */
538 nfs_delayedtruncate(vp);
539
540 /*
541 * First look in the cache.
542 */
543 if (nfs_getattrcache(vp, ap->a_vap) == 0)
544 return (0);
545 nfsstats.rpccnt[NFSPROC_GETATTR]++;
546 nfsm_reqhead(np, NFSPROC_GETATTR, NFSX_FH(v3));
547 nfsm_fhtom(np, v3);
548 nfsm_request(np, NFSPROC_GETATTR, curlwp, ap->a_cred);
549 if (!error) {
550 nfsm_loadattr(vp, ap->a_vap, 0);
551 if (vp->v_type == VDIR &&
552 ap->a_vap->va_blocksize < NFS_DIRFRAGSIZ)
553 ap->a_vap->va_blocksize = NFS_DIRFRAGSIZ;
554 }
555 nfsm_reqdone;
556 return (error);
557 }
558
559 /*
560 * nfs setattr call.
561 */
562 int
563 nfs_setattr(void *v)
564 {
565 struct vop_setattr_args /* {
566 struct vnodeop_desc *a_desc;
567 struct vnode *a_vp;
568 struct vattr *a_vap;
569 kauth_cred_t a_cred;
570 } */ *ap = v;
571 struct vnode *vp = ap->a_vp;
572 struct nfsnode *np = VTONFS(vp);
573 struct vattr *vap = ap->a_vap;
574 int error = 0;
575 u_quad_t tsize = 0;
576
577 /*
578 * Setting of flags is not supported.
579 */
580 if (vap->va_flags != VNOVAL)
581 return (EOPNOTSUPP);
582
583 /*
584 * Disallow write attempts if the filesystem is mounted read-only.
585 */
586 if ((vap->va_uid != (uid_t)VNOVAL ||
587 vap->va_gid != (gid_t)VNOVAL || vap->va_atime.tv_sec != VNOVAL ||
588 vap->va_mtime.tv_sec != VNOVAL || vap->va_mode != (mode_t)VNOVAL) &&
589 (vp->v_mount->mnt_flag & MNT_RDONLY))
590 return (EROFS);
591 if (vap->va_size != VNOVAL) {
592 if (vap->va_size > VFSTONFS(vp->v_mount)->nm_maxfilesize) {
593 return EFBIG;
594 }
595 switch (vp->v_type) {
596 case VDIR:
597 return (EISDIR);
598 case VCHR:
599 case VBLK:
600 case VSOCK:
601 case VFIFO:
602 if (vap->va_mtime.tv_sec == VNOVAL &&
603 vap->va_atime.tv_sec == VNOVAL &&
604 vap->va_mode == (mode_t)VNOVAL &&
605 vap->va_uid == (uid_t)VNOVAL &&
606 vap->va_gid == (gid_t)VNOVAL)
607 return (0);
608 vap->va_size = VNOVAL;
609 break;
610 default:
611 /*
612 * Disallow write attempts if the filesystem is
613 * mounted read-only.
614 */
615 if (vp->v_mount->mnt_flag & MNT_RDONLY)
616 return (EROFS);
617 genfs_node_wrlock(vp);
618 uvm_vnp_setsize(vp, vap->va_size);
619 tsize = np->n_size;
620 np->n_size = vap->va_size;
621 if (vap->va_size == 0)
622 error = nfs_vinvalbuf(vp, 0,
623 ap->a_cred, curlwp, 1);
624 else
625 error = nfs_vinvalbuf(vp, V_SAVE,
626 ap->a_cred, curlwp, 1);
627 if (error) {
628 uvm_vnp_setsize(vp, tsize);
629 genfs_node_unlock(vp);
630 return (error);
631 }
632 np->n_vattr->va_size = vap->va_size;
633 }
634 } else {
635 /*
636 * flush files before setattr because a later write of
637 * cached data might change timestamps or reset sugid bits
638 */
639 if ((vap->va_mtime.tv_sec != VNOVAL ||
640 vap->va_atime.tv_sec != VNOVAL ||
641 vap->va_mode != VNOVAL) &&
642 vp->v_type == VREG &&
643 (error = nfs_vinvalbuf(vp, V_SAVE, ap->a_cred,
644 curlwp, 1)) == EINTR)
645 return (error);
646 }
647 error = nfs_setattrrpc(vp, vap, ap->a_cred, curlwp);
648 if (vap->va_size != VNOVAL) {
649 if (error) {
650 np->n_size = np->n_vattr->va_size = tsize;
651 uvm_vnp_setsize(vp, np->n_size);
652 }
653 genfs_node_unlock(vp);
654 }
655 return (error);
656 }
657
658 /*
659 * Do an nfs setattr rpc.
660 */
661 int
662 nfs_setattrrpc(struct vnode *vp, struct vattr *vap, kauth_cred_t cred, struct lwp *l)
663 {
664 struct nfsv2_sattr *sp;
665 char *cp;
666 int32_t t1, t2;
667 char *bpos, *dpos;
668 u_int32_t *tl;
669 int error = 0;
670 struct mbuf *mreq, *mrep, *md, *mb;
671 const int v3 = NFS_ISV3(vp);
672 struct nfsnode *np = VTONFS(vp);
673 #ifndef NFS_V2_ONLY
674 int wccflag = NFSV3_WCCRATTR;
675 char *cp2;
676 #endif
677
678 nfsstats.rpccnt[NFSPROC_SETATTR]++;
679 nfsm_reqhead(np, NFSPROC_SETATTR, NFSX_FH(v3) + NFSX_SATTR(v3));
680 nfsm_fhtom(np, v3);
681 #ifndef NFS_V2_ONLY
682 if (v3) {
683 nfsm_v3attrbuild(vap, true);
684 nfsm_build(tl, u_int32_t *, NFSX_UNSIGNED);
685 *tl = nfs_false;
686 } else {
687 #endif
688 nfsm_build(sp, struct nfsv2_sattr *, NFSX_V2SATTR);
689 if (vap->va_mode == (mode_t)VNOVAL)
690 sp->sa_mode = nfs_xdrneg1;
691 else
692 sp->sa_mode = vtonfsv2_mode(vp->v_type, vap->va_mode);
693 if (vap->va_uid == (uid_t)VNOVAL)
694 sp->sa_uid = nfs_xdrneg1;
695 else
696 sp->sa_uid = txdr_unsigned(vap->va_uid);
697 if (vap->va_gid == (gid_t)VNOVAL)
698 sp->sa_gid = nfs_xdrneg1;
699 else
700 sp->sa_gid = txdr_unsigned(vap->va_gid);
701 sp->sa_size = txdr_unsigned(vap->va_size);
702 txdr_nfsv2time(&vap->va_atime, &sp->sa_atime);
703 txdr_nfsv2time(&vap->va_mtime, &sp->sa_mtime);
704 #ifndef NFS_V2_ONLY
705 }
706 #endif
707 nfsm_request(np, NFSPROC_SETATTR, l, cred);
708 #ifndef NFS_V2_ONLY
709 if (v3) {
710 nfsm_wcc_data(vp, wccflag, NAC_NOTRUNC, false);
711 } else
712 #endif
713 nfsm_loadattr(vp, (struct vattr *)0, NAC_NOTRUNC);
714 nfsm_reqdone;
715 return (error);
716 }
717
718 /*
719 * nfs lookup call, one step at a time...
720 * First look in cache
721 * If not found, do the rpc.
722 */
723 int
724 nfs_lookup(void *v)
725 {
726 struct vop_lookup_v2_args /* {
727 struct vnodeop_desc *a_desc;
728 struct vnode *a_dvp;
729 struct vnode **a_vpp;
730 struct componentname *a_cnp;
731 } */ *ap = v;
732 struct componentname *cnp = ap->a_cnp;
733 struct vnode *dvp = ap->a_dvp;
734 struct vnode **vpp = ap->a_vpp;
735 int flags;
736 struct vnode *newvp;
737 u_int32_t *tl;
738 char *cp;
739 int32_t t1, t2;
740 char *bpos, *dpos, *cp2;
741 struct mbuf *mreq, *mrep, *md, *mb;
742 long len;
743 nfsfh_t *fhp;
744 struct nfsnode *np;
745 int cachefound;
746 int error = 0, attrflag, fhsize;
747 const int v3 = NFS_ISV3(dvp);
748
749 flags = cnp->cn_flags;
750
751 *vpp = NULLVP;
752 newvp = NULLVP;
753 if ((flags & ISLASTCN) && (dvp->v_mount->mnt_flag & MNT_RDONLY) &&
754 (cnp->cn_nameiop == DELETE || cnp->cn_nameiop == RENAME))
755 return (EROFS);
756 if (dvp->v_type != VDIR)
757 return (ENOTDIR);
758
759 /*
760 * RFC1813(nfsv3) 3.2 says clients should handle "." by themselves.
761 */
762 if (cnp->cn_namelen == 1 && cnp->cn_nameptr[0] == '.') {
763 error = VOP_ACCESS(dvp, VEXEC, cnp->cn_cred);
764 if (error)
765 return error;
766 if (cnp->cn_nameiop == RENAME && (flags & ISLASTCN))
767 return EISDIR;
768 vref(dvp);
769 *vpp = dvp;
770 return 0;
771 }
772
773 np = VTONFS(dvp);
774
775 /*
776 * Before performing an RPC, check the name cache to see if
777 * the directory/name pair we are looking for is known already.
778 * If the directory/name pair is found in the name cache,
779 * we have to ensure the directory has not changed from
780 * the time the cache entry has been created. If it has,
781 * the cache entry has to be ignored.
782 */
783 cachefound = cache_lookup_raw(dvp, cnp->cn_nameptr, cnp->cn_namelen,
784 cnp->cn_flags, NULL, vpp);
785 KASSERT(dvp != *vpp);
786 KASSERT((cnp->cn_flags & ISWHITEOUT) == 0);
787 if (cachefound) {
788 struct vattr vattr;
789
790 error = VOP_ACCESS(dvp, VEXEC, cnp->cn_cred);
791 if (error != 0) {
792 if (*vpp != NULLVP)
793 vrele(*vpp);
794 *vpp = NULLVP;
795 return error;
796 }
797
798 if (VOP_GETATTR(dvp, &vattr, cnp->cn_cred)
799 || timespeccmp(&vattr.va_mtime,
800 &VTONFS(dvp)->n_nctime, !=)) {
801 if (*vpp != NULLVP) {
802 vrele(*vpp);
803 *vpp = NULLVP;
804 }
805 cache_purge1(dvp, NULL, 0, PURGE_CHILDREN);
806 timespecclear(&np->n_nctime);
807 goto dorpc;
808 }
809
810 if (*vpp == NULLVP) {
811 /* namecache gave us a negative result */
812 error = ENOENT;
813 goto noentry;
814 }
815
816 /*
817 * investigate the vnode returned by cache_lookup_raw.
818 * if it isn't appropriate, do an rpc.
819 */
820 newvp = *vpp;
821 if ((flags & ISDOTDOT) != 0) {
822 VOP_UNLOCK(dvp);
823 }
824 error = vn_lock(newvp, LK_SHARED);
825 if ((flags & ISDOTDOT) != 0) {
826 vn_lock(dvp, LK_EXCLUSIVE | LK_RETRY);
827 }
828 if (error != 0) {
829 /* newvp has been reclaimed. */
830 vrele(newvp);
831 *vpp = NULLVP;
832 goto dorpc;
833 }
834 if (!VOP_GETATTR(newvp, &vattr, cnp->cn_cred)
835 && vattr.va_ctime.tv_sec == VTONFS(newvp)->n_ctime) {
836 nfsstats.lookupcache_hits++;
837 KASSERT(newvp->v_type != VNON);
838 VOP_UNLOCK(newvp);
839 return (0);
840 }
841 cache_purge1(newvp, NULL, 0, PURGE_PARENTS);
842 vput(newvp);
843 *vpp = NULLVP;
844 }
845 dorpc:
846 #if 0
847 /*
848 * because nfsv3 has the same CREATE semantics as ours,
849 * we don't have to perform LOOKUPs beforehand.
850 *
851 * XXX ideally we can do the same for nfsv2 in the case of !O_EXCL.
852 * XXX although we have no way to know if O_EXCL is requested or not.
853 */
854
855 if (v3 && cnp->cn_nameiop == CREATE &&
856 (flags & (ISLASTCN|ISDOTDOT)) == ISLASTCN &&
857 (dvp->v_mount->mnt_flag & MNT_RDONLY) == 0) {
858 return (EJUSTRETURN);
859 }
860 #endif /* 0 */
861
862 error = 0;
863 newvp = NULLVP;
864 nfsstats.lookupcache_misses++;
865 nfsstats.rpccnt[NFSPROC_LOOKUP]++;
866 len = cnp->cn_namelen;
867 nfsm_reqhead(np, NFSPROC_LOOKUP,
868 NFSX_FH(v3) + NFSX_UNSIGNED + nfsm_rndup(len));
869 nfsm_fhtom(np, v3);
870 nfsm_strtom(cnp->cn_nameptr, len, NFS_MAXNAMLEN);
871 nfsm_request(np, NFSPROC_LOOKUP, curlwp, cnp->cn_cred);
872 if (error) {
873 nfsm_postop_attr(dvp, attrflag, 0);
874 m_freem(mrep);
875 goto nfsmout;
876 }
877 nfsm_getfh(fhp, fhsize, v3);
878
879 /*
880 * Handle RENAME case...
881 */
882 if (cnp->cn_nameiop == RENAME && (flags & ISLASTCN)) {
883 if (NFS_CMPFH(np, fhp, fhsize)) {
884 m_freem(mrep);
885 return (EISDIR);
886 }
887 error = nfs_nget(dvp->v_mount, fhp, fhsize, &np);
888 if (error) {
889 m_freem(mrep);
890 return error;
891 }
892 newvp = NFSTOV(np);
893 #ifndef NFS_V2_ONLY
894 if (v3) {
895 nfsm_postop_attr(newvp, attrflag, 0);
896 nfsm_postop_attr(dvp, attrflag, 0);
897 } else
898 #endif
899 nfsm_loadattr(newvp, (struct vattr *)0, 0);
900 *vpp = newvp;
901 m_freem(mrep);
902 goto validate;
903 }
904
905 /*
906 * The postop attr handling is duplicated for each if case,
907 * because it should be done while dvp is locked (unlocking
908 * dvp is different for each case).
909 */
910
911 if (NFS_CMPFH(np, fhp, fhsize)) {
912 /*
913 * As we handle "." lookup locally, this is
914 * a broken server.
915 */
916 m_freem(mrep);
917 return EBADRPC;
918 } else if (flags & ISDOTDOT) {
919 /*
920 * ".." lookup
921 */
922 VOP_UNLOCK(dvp);
923 error = nfs_nget(dvp->v_mount, fhp, fhsize, &np);
924 vn_lock(dvp, LK_EXCLUSIVE | LK_RETRY);
925 if (error) {
926 m_freem(mrep);
927 return error;
928 }
929 newvp = NFSTOV(np);
930
931 #ifndef NFS_V2_ONLY
932 if (v3) {
933 nfsm_postop_attr(newvp, attrflag, 0);
934 nfsm_postop_attr(dvp, attrflag, 0);
935 } else
936 #endif
937 nfsm_loadattr(newvp, (struct vattr *)0, 0);
938 } else {
939 /*
940 * Other lookups.
941 */
942 error = nfs_nget(dvp->v_mount, fhp, fhsize, &np);
943 if (error) {
944 m_freem(mrep);
945 return error;
946 }
947 newvp = NFSTOV(np);
948 #ifndef NFS_V2_ONLY
949 if (v3) {
950 nfsm_postop_attr(newvp, attrflag, 0);
951 nfsm_postop_attr(dvp, attrflag, 0);
952 } else
953 #endif
954 nfsm_loadattr(newvp, (struct vattr *)0, 0);
955 }
956 if (cnp->cn_nameiop != DELETE || !(flags & ISLASTCN)) {
957 nfs_cache_enter(dvp, newvp, cnp);
958 }
959 *vpp = newvp;
960 nfsm_reqdone;
961 if (error) {
962 /*
963 * We get here only because of errors returned by
964 * the RPC. Otherwise we'll have returned above
965 * (the nfsm_* macros will jump to nfsm_reqdone
966 * on error).
967 */
968 if (error == ENOENT && cnp->cn_nameiop != CREATE) {
969 nfs_cache_enter(dvp, NULL, cnp);
970 }
971 if (newvp != NULLVP) {
972 if (newvp == dvp) {
973 vrele(newvp);
974 } else {
975 vput(newvp);
976 }
977 }
978 noentry:
979 if ((cnp->cn_nameiop == CREATE || cnp->cn_nameiop == RENAME) &&
980 (flags & ISLASTCN) && error == ENOENT) {
981 if (dvp->v_mount->mnt_flag & MNT_RDONLY) {
982 error = EROFS;
983 } else {
984 error = EJUSTRETURN;
985 }
986 }
987 *vpp = NULL;
988 return error;
989 }
990
991 validate:
992 /*
993 * make sure we have valid type and size.
994 */
995
996 newvp = *vpp;
997 if (newvp->v_type == VNON) {
998 struct vattr vattr; /* dummy */
999
1000 KASSERT(VTONFS(newvp)->n_attrstamp == 0);
1001 error = VOP_GETATTR(newvp, &vattr, cnp->cn_cred);
1002 if (error) {
1003 vput(newvp);
1004 *vpp = NULL;
1005 }
1006 }
1007 if (error)
1008 return error;
1009 if (newvp != dvp)
1010 VOP_UNLOCK(newvp);
1011 return 0;
1012 }
1013
1014 /*
1015 * nfs read call.
1016 * Just call nfs_bioread() to do the work.
1017 */
1018 int
1019 nfs_read(void *v)
1020 {
1021 struct vop_read_args /* {
1022 struct vnode *a_vp;
1023 struct uio *a_uio;
1024 int a_ioflag;
1025 kauth_cred_t a_cred;
1026 } */ *ap = v;
1027 struct vnode *vp = ap->a_vp;
1028
1029 if (vp->v_type != VREG)
1030 return EISDIR;
1031 return (nfs_bioread(vp, ap->a_uio, ap->a_ioflag, ap->a_cred, 0));
1032 }
1033
1034 /*
1035 * nfs readlink call
1036 */
1037 int
1038 nfs_readlink(void *v)
1039 {
1040 struct vop_readlink_args /* {
1041 struct vnode *a_vp;
1042 struct uio *a_uio;
1043 kauth_cred_t a_cred;
1044 } */ *ap = v;
1045 struct vnode *vp = ap->a_vp;
1046 struct nfsnode *np = VTONFS(vp);
1047
1048 if (vp->v_type != VLNK)
1049 return (EPERM);
1050
1051 if (np->n_rcred != NULL) {
1052 kauth_cred_free(np->n_rcred);
1053 }
1054 np->n_rcred = ap->a_cred;
1055 kauth_cred_hold(np->n_rcred);
1056
1057 return (nfs_bioread(vp, ap->a_uio, 0, ap->a_cred, 0));
1058 }
1059
1060 /*
1061 * Do a readlink rpc.
1062 * Called by nfs_doio() from below the buffer cache.
1063 */
1064 int
1065 nfs_readlinkrpc(struct vnode *vp, struct uio *uiop, kauth_cred_t cred)
1066 {
1067 u_int32_t *tl;
1068 char *cp;
1069 int32_t t1, t2;
1070 char *bpos, *dpos, *cp2;
1071 int error = 0;
1072 uint32_t len;
1073 struct mbuf *mreq, *mrep, *md, *mb;
1074 const int v3 = NFS_ISV3(vp);
1075 struct nfsnode *np = VTONFS(vp);
1076 #ifndef NFS_V2_ONLY
1077 int attrflag;
1078 #endif
1079
1080 nfsstats.rpccnt[NFSPROC_READLINK]++;
1081 nfsm_reqhead(np, NFSPROC_READLINK, NFSX_FH(v3));
1082 nfsm_fhtom(np, v3);
1083 nfsm_request(np, NFSPROC_READLINK, curlwp, cred);
1084 #ifndef NFS_V2_ONLY
1085 if (v3)
1086 nfsm_postop_attr(vp, attrflag, 0);
1087 #endif
1088 if (!error) {
1089 #ifndef NFS_V2_ONLY
1090 if (v3) {
1091 nfsm_dissect(tl, uint32_t *, NFSX_UNSIGNED);
1092 len = fxdr_unsigned(uint32_t, *tl);
1093 if (len > NFS_MAXPATHLEN) {
1094 /*
1095 * this pathname is too long for us.
1096 */
1097 m_freem(mrep);
1098 /* Solaris returns EINVAL. should we follow? */
1099 error = ENAMETOOLONG;
1100 goto nfsmout;
1101 }
1102 } else
1103 #endif
1104 {
1105 nfsm_strsiz(len, NFS_MAXPATHLEN);
1106 }
1107 nfsm_mtouio(uiop, len);
1108 }
1109 nfsm_reqdone;
1110 return (error);
1111 }
1112
1113 /*
1114 * nfs read rpc call
1115 * Ditto above
1116 */
1117 int
1118 nfs_readrpc(struct vnode *vp, struct uio *uiop)
1119 {
1120 u_int32_t *tl;
1121 char *cp;
1122 int32_t t1, t2;
1123 char *bpos, *dpos, *cp2;
1124 struct mbuf *mreq, *mrep, *md, *mb;
1125 struct nfsmount *nmp;
1126 int error = 0, len, retlen, tsiz, eof __unused, byte_count;
1127 const int v3 = NFS_ISV3(vp);
1128 struct nfsnode *np = VTONFS(vp);
1129 #ifndef NFS_V2_ONLY
1130 int attrflag;
1131 #endif
1132
1133 #ifndef nolint
1134 eof = 0;
1135 #endif
1136 nmp = VFSTONFS(vp->v_mount);
1137 tsiz = uiop->uio_resid;
1138 if (uiop->uio_offset + tsiz > nmp->nm_maxfilesize)
1139 return (EFBIG);
1140 iostat_busy(nmp->nm_stats);
1141 byte_count = 0; /* count bytes actually transferred */
1142 while (tsiz > 0) {
1143 nfsstats.rpccnt[NFSPROC_READ]++;
1144 len = (tsiz > nmp->nm_rsize) ? nmp->nm_rsize : tsiz;
1145 nfsm_reqhead(np, NFSPROC_READ, NFSX_FH(v3) + NFSX_UNSIGNED * 3);
1146 nfsm_fhtom(np, v3);
1147 nfsm_build(tl, u_int32_t *, NFSX_UNSIGNED * 3);
1148 #ifndef NFS_V2_ONLY
1149 if (v3) {
1150 txdr_hyper(uiop->uio_offset, tl);
1151 *(tl + 2) = txdr_unsigned(len);
1152 } else
1153 #endif
1154 {
1155 *tl++ = txdr_unsigned(uiop->uio_offset);
1156 *tl++ = txdr_unsigned(len);
1157 *tl = 0;
1158 }
1159 nfsm_request(np, NFSPROC_READ, curlwp, np->n_rcred);
1160 #ifndef NFS_V2_ONLY
1161 if (v3) {
1162 nfsm_postop_attr(vp, attrflag, NAC_NOTRUNC);
1163 if (error) {
1164 m_freem(mrep);
1165 goto nfsmout;
1166 }
1167 nfsm_dissect(tl, u_int32_t *, 2 * NFSX_UNSIGNED);
1168 eof = fxdr_unsigned(int, *(tl + 1));
1169 } else
1170 #endif
1171 nfsm_loadattr(vp, (struct vattr *)0, NAC_NOTRUNC);
1172 nfsm_strsiz(retlen, nmp->nm_rsize);
1173 nfsm_mtouio(uiop, retlen);
1174 m_freem(mrep);
1175 tsiz -= retlen;
1176 byte_count += retlen;
1177 #ifndef NFS_V2_ONLY
1178 if (v3) {
1179 if (eof || retlen == 0)
1180 tsiz = 0;
1181 } else
1182 #endif
1183 if (retlen < len)
1184 tsiz = 0;
1185 }
1186 nfsmout:
1187 iostat_unbusy(nmp->nm_stats, byte_count, 1);
1188 return (error);
1189 }
1190
1191 struct nfs_writerpc_context {
1192 kmutex_t nwc_lock;
1193 kcondvar_t nwc_cv;
1194 int nwc_mbufcount;
1195 };
1196
1197 /*
1198 * free mbuf used to refer protected pages while write rpc call.
1199 * called at splvm.
1200 */
1201 static void
1202 nfs_writerpc_extfree(struct mbuf *m, void *tbuf, size_t size, void *arg)
1203 {
1204 struct nfs_writerpc_context *ctx = arg;
1205
1206 KASSERT(m != NULL);
1207 KASSERT(ctx != NULL);
1208 pool_cache_put(mb_cache, m);
1209 mutex_enter(&ctx->nwc_lock);
1210 if (--ctx->nwc_mbufcount == 0) {
1211 cv_signal(&ctx->nwc_cv);
1212 }
1213 mutex_exit(&ctx->nwc_lock);
1214 }
1215
1216 /*
1217 * nfs write call
1218 */
1219 int
1220 nfs_writerpc(struct vnode *vp, struct uio *uiop, int *iomode, bool pageprotected, bool *stalewriteverfp)
1221 {
1222 u_int32_t *tl;
1223 char *cp;
1224 int32_t t1, t2;
1225 char *bpos, *dpos;
1226 struct mbuf *mreq, *mrep, *md, *mb;
1227 struct nfsmount *nmp = VFSTONFS(vp->v_mount);
1228 int error = 0, len, tsiz, wccflag = NFSV3_WCCRATTR;
1229 const int v3 = NFS_ISV3(vp);
1230 int committed = NFSV3WRITE_FILESYNC;
1231 struct nfsnode *np = VTONFS(vp);
1232 struct nfs_writerpc_context ctx;
1233 int byte_count;
1234 size_t origresid;
1235 #ifndef NFS_V2_ONLY
1236 char *cp2;
1237 int rlen, commit;
1238 #endif
1239
1240 if (vp->v_mount->mnt_flag & MNT_RDONLY) {
1241 panic("writerpc readonly vp %p", vp);
1242 }
1243
1244 #ifdef DIAGNOSTIC
1245 if (uiop->uio_iovcnt != 1)
1246 panic("nfs: writerpc iovcnt > 1");
1247 #endif
1248 tsiz = uiop->uio_resid;
1249 if (uiop->uio_offset + tsiz > nmp->nm_maxfilesize)
1250 return EFBIG;
1251
1252 mutex_init(&ctx.nwc_lock, MUTEX_DRIVER, IPL_VM);
1253 cv_init(&ctx.nwc_cv, "nfsmblk");
1254 ctx.nwc_mbufcount = 1;
1255
1256 retry:
1257 origresid = uiop->uio_resid;
1258 KASSERT(origresid == uiop->uio_iov->iov_len);
1259 iostat_busy(nmp->nm_stats);
1260 byte_count = 0; /* count of bytes actually written */
1261 while (tsiz > 0) {
1262 uint32_t datalen; /* data bytes need to be allocated in mbuf */
1263 size_t backup;
1264 bool stalewriteverf = false;
1265
1266 nfsstats.rpccnt[NFSPROC_WRITE]++;
1267 len = uimin(tsiz, nmp->nm_wsize);
1268 datalen = pageprotected ? 0 : nfsm_rndup(len);
1269 nfsm_reqhead(np, NFSPROC_WRITE,
1270 NFSX_FH(v3) + 5 * NFSX_UNSIGNED + datalen);
1271 nfsm_fhtom(np, v3);
1272 #ifndef NFS_V2_ONLY
1273 if (v3) {
1274 nfsm_build(tl, u_int32_t *, 5 * NFSX_UNSIGNED);
1275 txdr_hyper(uiop->uio_offset, tl);
1276 tl += 2;
1277 *tl++ = txdr_unsigned(len);
1278 *tl++ = txdr_unsigned(*iomode);
1279 *tl = txdr_unsigned(len);
1280 } else
1281 #endif
1282 {
1283 u_int32_t x;
1284
1285 nfsm_build(tl, u_int32_t *, 4 * NFSX_UNSIGNED);
1286 /* Set both "begin" and "current" to non-garbage. */
1287 x = txdr_unsigned((u_int32_t)uiop->uio_offset);
1288 *tl++ = x; /* "begin offset" */
1289 *tl++ = x; /* "current offset" */
1290 x = txdr_unsigned(len);
1291 *tl++ = x; /* total to this offset */
1292 *tl = x; /* size of this write */
1293
1294 }
1295 if (pageprotected) {
1296 /*
1297 * since we know pages can't be modified during i/o,
1298 * no need to copy them for us.
1299 */
1300 struct mbuf *m;
1301 struct iovec *iovp = uiop->uio_iov;
1302
1303 m = m_get(M_WAIT, MT_DATA);
1304 MCLAIM(m, &nfs_mowner);
1305 MEXTADD(m, iovp->iov_base, len, M_MBUF,
1306 nfs_writerpc_extfree, &ctx);
1307 m->m_flags |= M_EXT_ROMAP;
1308 m->m_len = len;
1309 mb->m_next = m;
1310 /*
1311 * no need to maintain mb and bpos here
1312 * because no one care them later.
1313 */
1314 #if 0
1315 mb = m;
1316 bpos = mtod(void *, mb) + mb->m_len;
1317 #endif
1318 UIO_ADVANCE(uiop, len);
1319 uiop->uio_offset += len;
1320 mutex_enter(&ctx.nwc_lock);
1321 ctx.nwc_mbufcount++;
1322 mutex_exit(&ctx.nwc_lock);
1323 nfs_zeropad(mb, 0, nfsm_padlen(len));
1324 } else {
1325 nfsm_uiotom(uiop, len);
1326 }
1327 nfsm_request(np, NFSPROC_WRITE, curlwp, np->n_wcred);
1328 #ifndef NFS_V2_ONLY
1329 if (v3) {
1330 wccflag = NFSV3_WCCCHK;
1331 nfsm_wcc_data(vp, wccflag, NAC_NOTRUNC, !error);
1332 if (!error) {
1333 nfsm_dissect(tl, u_int32_t *, 2 * NFSX_UNSIGNED
1334 + NFSX_V3WRITEVERF);
1335 rlen = fxdr_unsigned(int, *tl++);
1336 if (rlen == 0) {
1337 error = NFSERR_IO;
1338 m_freem(mrep);
1339 break;
1340 } else if (rlen < len) {
1341 backup = len - rlen;
1342 UIO_ADVANCE(uiop, -backup);
1343 uiop->uio_offset -= backup;
1344 len = rlen;
1345 }
1346 commit = fxdr_unsigned(int, *tl++);
1347
1348 /*
1349 * Return the lowest committment level
1350 * obtained by any of the RPCs.
1351 */
1352 if (committed == NFSV3WRITE_FILESYNC)
1353 committed = commit;
1354 else if (committed == NFSV3WRITE_DATASYNC &&
1355 commit == NFSV3WRITE_UNSTABLE)
1356 committed = commit;
1357 mutex_enter(&nmp->nm_lock);
1358 if ((nmp->nm_iflag & NFSMNT_HASWRITEVERF) == 0){
1359 memcpy(nmp->nm_writeverf, tl,
1360 NFSX_V3WRITEVERF);
1361 nmp->nm_iflag |= NFSMNT_HASWRITEVERF;
1362 } else if ((nmp->nm_iflag &
1363 NFSMNT_STALEWRITEVERF) ||
1364 memcmp(tl, nmp->nm_writeverf,
1365 NFSX_V3WRITEVERF)) {
1366 memcpy(nmp->nm_writeverf, tl,
1367 NFSX_V3WRITEVERF);
1368 /*
1369 * note NFSMNT_STALEWRITEVERF
1370 * if we're the first thread to
1371 * notice it.
1372 */
1373 if ((nmp->nm_iflag &
1374 NFSMNT_STALEWRITEVERF) == 0) {
1375 stalewriteverf = true;
1376 nmp->nm_iflag |=
1377 NFSMNT_STALEWRITEVERF;
1378 }
1379 }
1380 mutex_exit(&nmp->nm_lock);
1381 }
1382 } else
1383 #endif
1384 nfsm_loadattr(vp, (struct vattr *)0, NAC_NOTRUNC);
1385 if (wccflag)
1386 VTONFS(vp)->n_mtime = VTONFS(vp)->n_vattr->va_mtime;
1387 m_freem(mrep);
1388 if (error)
1389 break;
1390 tsiz -= len;
1391 byte_count += len;
1392 if (stalewriteverf) {
1393 *stalewriteverfp = true;
1394 stalewriteverf = false;
1395 if (committed == NFSV3WRITE_UNSTABLE &&
1396 len != origresid) {
1397 /*
1398 * if our write requests weren't atomic but
1399 * unstable, datas in previous iterations
1400 * might have already been lost now.
1401 * then, we should resend them to nfsd.
1402 */
1403 backup = origresid - tsiz;
1404 UIO_ADVANCE(uiop, -backup);
1405 uiop->uio_offset -= backup;
1406 tsiz = origresid;
1407 goto retry;
1408 }
1409 }
1410 }
1411 nfsmout:
1412 iostat_unbusy(nmp->nm_stats, byte_count, 0);
1413 if (pageprotected) {
1414 /*
1415 * wait until mbufs go away.
1416 * retransmitted mbufs can survive longer than rpc requests
1417 * themselves.
1418 */
1419 mutex_enter(&ctx.nwc_lock);
1420 ctx.nwc_mbufcount--;
1421 while (ctx.nwc_mbufcount > 0) {
1422 cv_wait(&ctx.nwc_cv, &ctx.nwc_lock);
1423 }
1424 mutex_exit(&ctx.nwc_lock);
1425 }
1426 mutex_destroy(&ctx.nwc_lock);
1427 cv_destroy(&ctx.nwc_cv);
1428 *iomode = committed;
1429 if (error)
1430 uiop->uio_resid = tsiz;
1431 return (error);
1432 }
1433
1434 /*
1435 * nfs mknod rpc
1436 * For NFS v2 this is a kludge. Use a create rpc but with the IFMT bits of the
1437 * mode set to specify the file type and the size field for rdev.
1438 */
1439 int
1440 nfs_mknodrpc(struct vnode *dvp, struct vnode **vpp, struct componentname *cnp, struct vattr *vap)
1441 {
1442 struct nfsv2_sattr *sp;
1443 u_int32_t *tl;
1444 char *cp;
1445 int32_t t1, t2;
1446 struct vnode *newvp = (struct vnode *)0;
1447 struct nfsnode *dnp, *np;
1448 char *cp2;
1449 char *bpos, *dpos;
1450 int error = 0, wccflag = NFSV3_WCCRATTR, gotvp = 0;
1451 struct mbuf *mreq, *mrep, *md, *mb;
1452 u_int32_t rdev;
1453 const int v3 = NFS_ISV3(dvp);
1454
1455 if (vap->va_type == VCHR || vap->va_type == VBLK)
1456 rdev = txdr_unsigned(vap->va_rdev);
1457 else if (vap->va_type == VFIFO || vap->va_type == VSOCK)
1458 rdev = nfs_xdrneg1;
1459 else {
1460 VOP_ABORTOP(dvp, cnp);
1461 return (EOPNOTSUPP);
1462 }
1463 nfsstats.rpccnt[NFSPROC_MKNOD]++;
1464 dnp = VTONFS(dvp);
1465 nfsm_reqhead(dnp, NFSPROC_MKNOD, NFSX_FH(v3) + 4 * NFSX_UNSIGNED +
1466 + nfsm_rndup(cnp->cn_namelen) + NFSX_SATTR(v3));
1467 nfsm_fhtom(dnp, v3);
1468 nfsm_strtom(cnp->cn_nameptr, cnp->cn_namelen, NFS_MAXNAMLEN);
1469 #ifndef NFS_V2_ONLY
1470 if (v3) {
1471 nfsm_build(tl, u_int32_t *, NFSX_UNSIGNED);
1472 *tl++ = vtonfsv3_type(vap->va_type);
1473 nfsm_v3attrbuild(vap, false);
1474 if (vap->va_type == VCHR || vap->va_type == VBLK) {
1475 nfsm_build(tl, u_int32_t *, 2 * NFSX_UNSIGNED);
1476 *tl++ = txdr_unsigned(major(vap->va_rdev));
1477 *tl = txdr_unsigned(minor(vap->va_rdev));
1478 }
1479 } else
1480 #endif
1481 {
1482 nfsm_build(sp, struct nfsv2_sattr *, NFSX_V2SATTR);
1483 sp->sa_mode = vtonfsv2_mode(vap->va_type, vap->va_mode);
1484 sp->sa_uid = nfs_xdrneg1;
1485 sp->sa_gid = nfs_xdrneg1;
1486 sp->sa_size = rdev;
1487 txdr_nfsv2time(&vap->va_atime, &sp->sa_atime);
1488 txdr_nfsv2time(&vap->va_mtime, &sp->sa_mtime);
1489 }
1490 nfsm_request(dnp, NFSPROC_MKNOD, curlwp, cnp->cn_cred);
1491 if (!error) {
1492 nfsm_mtofh(dvp, newvp, v3, gotvp);
1493 if (!gotvp) {
1494 error = nfs_lookitup(dvp, cnp->cn_nameptr,
1495 cnp->cn_namelen, cnp->cn_cred, curlwp, &np);
1496 if (!error)
1497 newvp = NFSTOV(np);
1498 }
1499 }
1500 #ifndef NFS_V2_ONLY
1501 if (v3)
1502 nfsm_wcc_data(dvp, wccflag, 0, !error);
1503 #endif
1504 nfsm_reqdone;
1505 if (error) {
1506 if (newvp)
1507 vput(newvp);
1508 } else {
1509 nfs_cache_enter(dvp, newvp, cnp);
1510 *vpp = newvp;
1511 VOP_UNLOCK(newvp);
1512 }
1513 VTONFS(dvp)->n_flag |= NMODIFIED;
1514 if (!wccflag)
1515 NFS_INVALIDATE_ATTRCACHE(VTONFS(dvp));
1516 return (error);
1517 }
1518
1519 /*
1520 * nfs mknod vop
1521 * just call nfs_mknodrpc() to do the work.
1522 */
1523 /* ARGSUSED */
1524 int
1525 nfs_mknod(void *v)
1526 {
1527 struct vop_mknod_v3_args /* {
1528 struct vnode *a_dvp;
1529 struct vnode **a_vpp;
1530 struct componentname *a_cnp;
1531 struct vattr *a_vap;
1532 } */ *ap = v;
1533 struct vnode *dvp = ap->a_dvp;
1534 struct componentname *cnp = ap->a_cnp;
1535 int error;
1536
1537 error = nfs_mknodrpc(dvp, ap->a_vpp, cnp, ap->a_vap);
1538 if (error == 0 || error == EEXIST)
1539 cache_purge1(dvp, cnp->cn_nameptr, cnp->cn_namelen, 0);
1540 return (error);
1541 }
1542
1543 /*
1544 * nfs file create call
1545 */
1546 int
1547 nfs_create(void *v)
1548 {
1549 struct vop_create_v3_args /* {
1550 struct vnode *a_dvp;
1551 struct vnode **a_vpp;
1552 struct componentname *a_cnp;
1553 struct vattr *a_vap;
1554 } */ *ap = v;
1555 struct vnode *dvp = ap->a_dvp;
1556 struct vattr *vap = ap->a_vap;
1557 struct componentname *cnp = ap->a_cnp;
1558 struct nfsv2_sattr *sp;
1559 u_int32_t *tl;
1560 char *cp;
1561 int32_t t1, t2;
1562 struct nfsnode *dnp, *np = (struct nfsnode *)0;
1563 struct vnode *newvp = (struct vnode *)0;
1564 char *bpos, *dpos, *cp2;
1565 int error, wccflag = NFSV3_WCCRATTR, gotvp = 0;
1566 struct mbuf *mreq, *mrep, *md, *mb;
1567 const int v3 = NFS_ISV3(dvp);
1568 u_int32_t excl_mode = NFSV3CREATE_UNCHECKED;
1569
1570 /*
1571 * Oops, not for me..
1572 */
1573 if (vap->va_type == VSOCK)
1574 return (nfs_mknodrpc(dvp, ap->a_vpp, cnp, vap));
1575
1576 KASSERT(vap->va_type == VREG);
1577
1578 #ifdef VA_EXCLUSIVE
1579 if (vap->va_vaflags & VA_EXCLUSIVE) {
1580 excl_mode = NFSV3CREATE_EXCLUSIVE;
1581 }
1582 #endif
1583 again:
1584 error = 0;
1585 nfsstats.rpccnt[NFSPROC_CREATE]++;
1586 dnp = VTONFS(dvp);
1587 nfsm_reqhead(dnp, NFSPROC_CREATE, NFSX_FH(v3) + 2 * NFSX_UNSIGNED +
1588 nfsm_rndup(cnp->cn_namelen) + NFSX_SATTR(v3));
1589 nfsm_fhtom(dnp, v3);
1590 nfsm_strtom(cnp->cn_nameptr, cnp->cn_namelen, NFS_MAXNAMLEN);
1591 #ifndef NFS_V2_ONLY
1592 if (v3) {
1593 nfsm_build(tl, u_int32_t *, NFSX_UNSIGNED);
1594 if (excl_mode == NFSV3CREATE_EXCLUSIVE) {
1595 *tl = txdr_unsigned(NFSV3CREATE_EXCLUSIVE);
1596 nfsm_build(tl, u_int32_t *, NFSX_V3CREATEVERF);
1597 *tl++ = cprng_fast32();
1598 *tl = cprng_fast32();
1599 } else {
1600 *tl = txdr_unsigned(excl_mode);
1601 nfsm_v3attrbuild(vap, false);
1602 }
1603 } else
1604 #endif
1605 {
1606 nfsm_build(sp, struct nfsv2_sattr *, NFSX_V2SATTR);
1607 sp->sa_mode = vtonfsv2_mode(vap->va_type, vap->va_mode);
1608 sp->sa_uid = nfs_xdrneg1;
1609 sp->sa_gid = nfs_xdrneg1;
1610 sp->sa_size = 0;
1611 txdr_nfsv2time(&vap->va_atime, &sp->sa_atime);
1612 txdr_nfsv2time(&vap->va_mtime, &sp->sa_mtime);
1613 }
1614 nfsm_request(dnp, NFSPROC_CREATE, curlwp, cnp->cn_cred);
1615 if (!error) {
1616 nfsm_mtofh(dvp, newvp, v3, gotvp);
1617 if (!gotvp) {
1618 error = nfs_lookitup(dvp, cnp->cn_nameptr,
1619 cnp->cn_namelen, cnp->cn_cred, curlwp, &np);
1620 if (!error)
1621 newvp = NFSTOV(np);
1622 }
1623 }
1624 #ifndef NFS_V2_ONLY
1625 if (v3)
1626 nfsm_wcc_data(dvp, wccflag, 0, !error);
1627 #endif
1628 nfsm_reqdone;
1629 if (error) {
1630 /*
1631 * nfs_request maps NFSERR_NOTSUPP to ENOTSUP.
1632 */
1633 if (v3 && error == ENOTSUP) {
1634 if (excl_mode == NFSV3CREATE_EXCLUSIVE) {
1635 excl_mode = NFSV3CREATE_GUARDED;
1636 goto again;
1637 } else if (excl_mode == NFSV3CREATE_GUARDED) {
1638 excl_mode = NFSV3CREATE_UNCHECKED;
1639 goto again;
1640 }
1641 }
1642 } else if (v3 && (excl_mode == NFSV3CREATE_EXCLUSIVE)) {
1643 struct timespec ts;
1644
1645 getnanotime(&ts);
1646
1647 /*
1648 * make sure that we'll update timestamps as
1649 * most server implementations use them to store
1650 * the create verifier.
1651 *
1652 * XXX it's better to use TOSERVER always.
1653 */
1654
1655 if (vap->va_atime.tv_sec == VNOVAL)
1656 vap->va_atime = ts;
1657 if (vap->va_mtime.tv_sec == VNOVAL)
1658 vap->va_mtime = ts;
1659
1660 error = nfs_setattrrpc(newvp, vap, cnp->cn_cred, curlwp);
1661 }
1662 if (error == 0) {
1663 if (cnp->cn_flags & MAKEENTRY)
1664 nfs_cache_enter(dvp, newvp, cnp);
1665 else
1666 cache_purge1(dvp, cnp->cn_nameptr, cnp->cn_namelen, 0);
1667 *ap->a_vpp = newvp;
1668 VOP_UNLOCK(newvp);
1669 } else {
1670 if (newvp)
1671 vput(newvp);
1672 if (error == EEXIST)
1673 cache_purge1(dvp, cnp->cn_nameptr, cnp->cn_namelen, 0);
1674 }
1675 VTONFS(dvp)->n_flag |= NMODIFIED;
1676 if (!wccflag)
1677 NFS_INVALIDATE_ATTRCACHE(VTONFS(dvp));
1678 return (error);
1679 }
1680
1681 /*
1682 * nfs file remove call
1683 * To try and make nfs semantics closer to ufs semantics, a file that has
1684 * other processes using the vnode is renamed instead of removed and then
1685 * removed later on the last close.
1686 * - If vrefcnt(vp) > 1
1687 * If a rename is not already in the works
1688 * call nfs_sillyrename() to set it up
1689 * else
1690 * do the remove rpc
1691 */
1692 int
1693 nfs_remove(void *v)
1694 {
1695 struct vop_remove_v3_args /* {
1696 struct vnodeop_desc *a_desc;
1697 struct vnode * a_dvp;
1698 struct vnode * a_vp;
1699 struct componentname * a_cnp;
1700 nlink_t ctx_vp_new_nlink;
1701 } */ *ap = v;
1702 struct vnode *vp = ap->a_vp;
1703 struct vnode *dvp = ap->a_dvp;
1704 struct componentname *cnp = ap->a_cnp;
1705 struct nfsnode *np = VTONFS(vp);
1706 int error = 0;
1707 struct vattr vattr;
1708
1709 #ifndef DIAGNOSTIC
1710 if (vrefcnt(vp) < 1)
1711 panic("nfs_remove: bad vrefcnt(vp)");
1712 #endif
1713 if (vp->v_type == VDIR)
1714 error = EPERM;
1715 else if (vrefcnt(vp) == 1 || (np->n_sillyrename &&
1716 VOP_GETATTR(vp, &vattr, cnp->cn_cred) == 0 &&
1717 vattr.va_nlink > 1)) {
1718 /*
1719 * Purge the name cache so that the chance of a lookup for
1720 * the name succeeding while the remove is in progress is
1721 * minimized. Without node locking it can still happen, such
1722 * that an I/O op returns ESTALE, but since you get this if
1723 * another host removes the file..
1724 */
1725 cache_purge(vp);
1726 /*
1727 * throw away biocache buffers, mainly to avoid
1728 * unnecessary delayed writes later.
1729 */
1730 error = nfs_vinvalbuf(vp, 0, cnp->cn_cred, curlwp, 1);
1731 /* Do the rpc */
1732 if (error != EINTR)
1733 error = nfs_removerpc(dvp, cnp->cn_nameptr,
1734 cnp->cn_namelen, cnp->cn_cred, curlwp);
1735 } else if (!np->n_sillyrename)
1736 error = nfs_sillyrename(dvp, vp, cnp, false);
1737 if (error == 0 && nfs_getattrcache(vp, &vattr) == 0) {
1738 ap->ctx_vp_new_nlink = vattr.va_nlink - 1;
1739 if (vattr.va_nlink == 1)
1740 np->n_flag |= NREMOVED;
1741 }
1742 NFS_INVALIDATE_ATTRCACHE(np);
1743 if (dvp == vp)
1744 vrele(vp);
1745 else
1746 vput(vp);
1747 return (error);
1748 }
1749
1750 /*
1751 * nfs file remove rpc called from nfs_inactive
1752 */
1753 int
1754 nfs_removeit(struct sillyrename *sp)
1755 {
1756
1757 return (nfs_removerpc(sp->s_dvp, sp->s_name, sp->s_namlen, sp->s_cred,
1758 (struct lwp *)0));
1759 }
1760
1761 /*
1762 * Nfs remove rpc, called from nfs_remove() and nfs_removeit().
1763 */
1764 int
1765 nfs_removerpc(struct vnode *dvp, const char *name, int namelen, kauth_cred_t cred, struct lwp *l)
1766 {
1767 u_int32_t *tl;
1768 char *cp;
1769 #ifndef NFS_V2_ONLY
1770 int32_t t1;
1771 char *cp2;
1772 #endif
1773 int32_t t2;
1774 char *bpos, *dpos;
1775 int error = 0, wccflag = NFSV3_WCCRATTR;
1776 struct mbuf *mreq, *mrep, *md, *mb;
1777 const int v3 = NFS_ISV3(dvp);
1778 int rexmit = 0;
1779 struct nfsnode *dnp = VTONFS(dvp);
1780
1781 nfsstats.rpccnt[NFSPROC_REMOVE]++;
1782 nfsm_reqhead(dnp, NFSPROC_REMOVE,
1783 NFSX_FH(v3) + NFSX_UNSIGNED + nfsm_rndup(namelen));
1784 nfsm_fhtom(dnp, v3);
1785 nfsm_strtom(name, namelen, NFS_MAXNAMLEN);
1786 nfsm_request1(dnp, NFSPROC_REMOVE, l, cred, &rexmit);
1787 #ifndef NFS_V2_ONLY
1788 if (v3)
1789 nfsm_wcc_data(dvp, wccflag, 0, !error);
1790 #endif
1791 nfsm_reqdone;
1792 VTONFS(dvp)->n_flag |= NMODIFIED;
1793 if (!wccflag)
1794 NFS_INVALIDATE_ATTRCACHE(VTONFS(dvp));
1795 /*
1796 * Kludge City: If the first reply to the remove rpc is lost..
1797 * the reply to the retransmitted request will be ENOENT
1798 * since the file was in fact removed
1799 * Therefore, we cheat and return success.
1800 */
1801 if (rexmit && error == ENOENT)
1802 error = 0;
1803 return (error);
1804 }
1805
1806 /*
1807 * nfs file rename call
1808 */
1809 int
1810 nfs_rename(void *v)
1811 {
1812 struct vop_rename_args /* {
1813 struct vnode *a_fdvp;
1814 struct vnode *a_fvp;
1815 struct componentname *a_fcnp;
1816 struct vnode *a_tdvp;
1817 struct vnode *a_tvp;
1818 struct componentname *a_tcnp;
1819 } */ *ap = v;
1820 struct vnode *fvp = ap->a_fvp;
1821 struct vnode *tvp = ap->a_tvp;
1822 struct vnode *fdvp = ap->a_fdvp;
1823 struct vnode *tdvp = ap->a_tdvp;
1824 struct componentname *tcnp = ap->a_tcnp;
1825 struct componentname *fcnp = ap->a_fcnp;
1826 int error;
1827
1828 /* Check for cross-device rename */
1829 if ((fvp->v_mount != tdvp->v_mount) ||
1830 (tvp && (fvp->v_mount != tvp->v_mount))) {
1831 error = EXDEV;
1832 goto out;
1833 }
1834
1835 /*
1836 * If the tvp exists and is in use, sillyrename it before doing the
1837 * rename of the new file over it.
1838 *
1839 * Have sillyrename use link instead of rename if possible,
1840 * so that we don't lose the file if the rename fails, and so
1841 * that there's no window when the "to" file doesn't exist.
1842 */
1843 if (tvp && vrefcnt(tvp) > 1 && !VTONFS(tvp)->n_sillyrename &&
1844 tvp->v_type != VDIR && !nfs_sillyrename(tdvp, tvp, tcnp, true)) {
1845 VN_KNOTE(tvp, NOTE_DELETE);
1846 vput(tvp);
1847 tvp = NULL;
1848 }
1849
1850 error = nfs_renamerpc(fdvp, fcnp->cn_nameptr, fcnp->cn_namelen,
1851 tdvp, tcnp->cn_nameptr, tcnp->cn_namelen, tcnp->cn_cred,
1852 curlwp);
1853
1854 VN_KNOTE(fdvp, NOTE_WRITE);
1855 VN_KNOTE(tdvp, NOTE_WRITE);
1856 if (error == 0 || error == EEXIST) {
1857 if (fvp->v_type == VDIR)
1858 cache_purge(fvp);
1859 else
1860 cache_purge1(fdvp, fcnp->cn_nameptr, fcnp->cn_namelen,
1861 0);
1862 if (tvp != NULL && tvp->v_type == VDIR)
1863 cache_purge(tvp);
1864 else
1865 cache_purge1(tdvp, tcnp->cn_nameptr, tcnp->cn_namelen,
1866 0);
1867 }
1868 out:
1869 if (tdvp == tvp)
1870 vrele(tdvp);
1871 else
1872 vput(tdvp);
1873 if (tvp)
1874 vput(tvp);
1875 vrele(fdvp);
1876 vrele(fvp);
1877 return (error);
1878 }
1879
1880 /*
1881 * nfs file rename rpc called from nfs_remove() above
1882 */
1883 int
1884 nfs_renameit(struct vnode *sdvp, struct componentname *scnp, struct sillyrename *sp)
1885 {
1886 return (nfs_renamerpc(sdvp, scnp->cn_nameptr, scnp->cn_namelen,
1887 sdvp, sp->s_name, sp->s_namlen, scnp->cn_cred, curlwp));
1888 }
1889
1890 /*
1891 * Do an nfs rename rpc. Called from nfs_rename() and nfs_renameit().
1892 */
1893 int
1894 nfs_renamerpc(struct vnode *fdvp, const char *fnameptr, int fnamelen, struct vnode *tdvp, const char *tnameptr, int tnamelen, kauth_cred_t cred, struct lwp *l)
1895 {
1896 u_int32_t *tl;
1897 char *cp;
1898 #ifndef NFS_V2_ONLY
1899 int32_t t1;
1900 char *cp2;
1901 #endif
1902 int32_t t2;
1903 char *bpos, *dpos;
1904 int error = 0, fwccflag = NFSV3_WCCRATTR, twccflag = NFSV3_WCCRATTR;
1905 struct mbuf *mreq, *mrep, *md, *mb;
1906 const int v3 = NFS_ISV3(fdvp);
1907 int rexmit = 0;
1908 struct nfsnode *fdnp = VTONFS(fdvp);
1909
1910 nfsstats.rpccnt[NFSPROC_RENAME]++;
1911 nfsm_reqhead(fdnp, NFSPROC_RENAME,
1912 (NFSX_FH(v3) + NFSX_UNSIGNED)*2 + nfsm_rndup(fnamelen) +
1913 nfsm_rndup(tnamelen));
1914 nfsm_fhtom(fdnp, v3);
1915 nfsm_strtom(fnameptr, fnamelen, NFS_MAXNAMLEN);
1916 nfsm_fhtom(VTONFS(tdvp), v3);
1917 nfsm_strtom(tnameptr, tnamelen, NFS_MAXNAMLEN);
1918 nfsm_request1(fdnp, NFSPROC_RENAME, l, cred, &rexmit);
1919 #ifndef NFS_V2_ONLY
1920 if (v3) {
1921 nfsm_wcc_data(fdvp, fwccflag, 0, !error);
1922 nfsm_wcc_data(tdvp, twccflag, 0, !error);
1923 }
1924 #endif
1925 nfsm_reqdone;
1926 VTONFS(fdvp)->n_flag |= NMODIFIED;
1927 VTONFS(tdvp)->n_flag |= NMODIFIED;
1928 if (!fwccflag)
1929 NFS_INVALIDATE_ATTRCACHE(VTONFS(fdvp));
1930 if (!twccflag)
1931 NFS_INVALIDATE_ATTRCACHE(VTONFS(tdvp));
1932 /*
1933 * Kludge: Map ENOENT => 0 assuming that it is a reply to a retry.
1934 */
1935 if (rexmit && error == ENOENT)
1936 error = 0;
1937 return (error);
1938 }
1939
1940 /*
1941 * NFS link RPC, called from nfs_link.
1942 * Assumes dvp and vp locked, and leaves them that way.
1943 */
1944
1945 static int
1946 nfs_linkrpc(struct vnode *dvp, struct vnode *vp, const char *name,
1947 size_t namelen, kauth_cred_t cred, struct lwp *l)
1948 {
1949 u_int32_t *tl;
1950 char *cp;
1951 #ifndef NFS_V2_ONLY
1952 int32_t t1;
1953 char *cp2;
1954 #endif
1955 int32_t t2;
1956 char *bpos, *dpos;
1957 int error = 0, wccflag = NFSV3_WCCRATTR, attrflag = 0;
1958 struct mbuf *mreq, *mrep, *md, *mb;
1959 const int v3 = NFS_ISV3(dvp);
1960 int rexmit = 0;
1961 struct nfsnode *np = VTONFS(vp);
1962
1963 nfsstats.rpccnt[NFSPROC_LINK]++;
1964 nfsm_reqhead(np, NFSPROC_LINK,
1965 NFSX_FH(v3)*2 + NFSX_UNSIGNED + nfsm_rndup(namelen));
1966 nfsm_fhtom(np, v3);
1967 nfsm_fhtom(VTONFS(dvp), v3);
1968 nfsm_strtom(name, namelen, NFS_MAXNAMLEN);
1969 nfsm_request1(np, NFSPROC_LINK, l, cred, &rexmit);
1970 #ifndef NFS_V2_ONLY
1971 if (v3) {
1972 nfsm_postop_attr(vp, attrflag, 0);
1973 nfsm_wcc_data(dvp, wccflag, 0, !error);
1974 }
1975 #endif
1976 nfsm_reqdone;
1977
1978 VTONFS(dvp)->n_flag |= NMODIFIED;
1979 if (!attrflag)
1980 NFS_INVALIDATE_ATTRCACHE(VTONFS(vp));
1981 if (!wccflag)
1982 NFS_INVALIDATE_ATTRCACHE(VTONFS(dvp));
1983
1984 /*
1985 * Kludge: Map EEXIST => 0 assuming that it is a reply to a retry.
1986 */
1987 if (rexmit && error == EEXIST)
1988 error = 0;
1989
1990 return error;
1991 }
1992
1993 /*
1994 * nfs hard link create call
1995 */
1996 int
1997 nfs_link(void *v)
1998 {
1999 struct vop_link_v2_args /* {
2000 struct vnode *a_dvp;
2001 struct vnode *a_vp;
2002 struct componentname *a_cnp;
2003 } */ *ap = v;
2004 struct vnode *vp = ap->a_vp;
2005 struct vnode *dvp = ap->a_dvp;
2006 struct componentname *cnp = ap->a_cnp;
2007 int error = 0;
2008
2009 error = vn_lock(vp, LK_EXCLUSIVE);
2010 if (error != 0) {
2011 VOP_ABORTOP(dvp, cnp);
2012 return error;
2013 }
2014
2015 /*
2016 * Push all writes to the server, so that the attribute cache
2017 * doesn't get "out of sync" with the server.
2018 * XXX There should be a better way!
2019 */
2020 VOP_FSYNC(vp, cnp->cn_cred, FSYNC_WAIT, 0, 0);
2021
2022 error = nfs_linkrpc(dvp, vp, cnp->cn_nameptr, cnp->cn_namelen,
2023 cnp->cn_cred, curlwp);
2024
2025 if (error == 0) {
2026 cache_purge1(dvp, cnp->cn_nameptr, cnp->cn_namelen, 0);
2027 }
2028 VOP_UNLOCK(vp);
2029 return (error);
2030 }
2031
2032 /*
2033 * nfs symbolic link create call
2034 */
2035 int
2036 nfs_symlink(void *v)
2037 {
2038 struct vop_symlink_v3_args /* {
2039 struct vnode *a_dvp;
2040 struct vnode **a_vpp;
2041 struct componentname *a_cnp;
2042 struct vattr *a_vap;
2043 char *a_target;
2044 } */ *ap = v;
2045 struct vnode *dvp = ap->a_dvp;
2046 struct vattr *vap = ap->a_vap;
2047 struct componentname *cnp = ap->a_cnp;
2048 struct nfsv2_sattr *sp;
2049 u_int32_t *tl;
2050 char *cp;
2051 int32_t t1, t2;
2052 char *bpos, *dpos, *cp2;
2053 int slen, error = 0, wccflag = NFSV3_WCCRATTR, gotvp;
2054 struct mbuf *mreq, *mrep, *md, *mb;
2055 struct vnode *newvp = (struct vnode *)0;
2056 const int v3 = NFS_ISV3(dvp);
2057 int rexmit = 0;
2058 struct nfsnode *dnp = VTONFS(dvp);
2059
2060 *ap->a_vpp = NULL;
2061 nfsstats.rpccnt[NFSPROC_SYMLINK]++;
2062 slen = strlen(ap->a_target);
2063 nfsm_reqhead(dnp, NFSPROC_SYMLINK, NFSX_FH(v3) + 2*NFSX_UNSIGNED +
2064 nfsm_rndup(cnp->cn_namelen) + nfsm_rndup(slen) + NFSX_SATTR(v3));
2065 nfsm_fhtom(dnp, v3);
2066 nfsm_strtom(cnp->cn_nameptr, cnp->cn_namelen, NFS_MAXNAMLEN);
2067 #ifndef NFS_V2_ONlY
2068 if (v3)
2069 nfsm_v3attrbuild(vap, false);
2070 #endif
2071 nfsm_strtom(ap->a_target, slen, NFS_MAXPATHLEN);
2072 #ifndef NFS_V2_ONlY
2073 if (!v3) {
2074 nfsm_build(sp, struct nfsv2_sattr *, NFSX_V2SATTR);
2075 sp->sa_mode = vtonfsv2_mode(VLNK, vap->va_mode);
2076 sp->sa_uid = nfs_xdrneg1;
2077 sp->sa_gid = nfs_xdrneg1;
2078 sp->sa_size = nfs_xdrneg1;
2079 txdr_nfsv2time(&vap->va_atime, &sp->sa_atime);
2080 txdr_nfsv2time(&vap->va_mtime, &sp->sa_mtime);
2081 }
2082 #endif
2083 nfsm_request1(dnp, NFSPROC_SYMLINK, curlwp, cnp->cn_cred,
2084 &rexmit);
2085 #ifndef NFS_V2_ONlY
2086 if (v3) {
2087 if (!error)
2088 nfsm_mtofh(dvp, newvp, v3, gotvp);
2089 nfsm_wcc_data(dvp, wccflag, 0, !error);
2090 }
2091 #endif
2092 nfsm_reqdone;
2093 /*
2094 * Kludge: Map EEXIST => 0 assuming that it is a reply to a retry.
2095 */
2096 if (rexmit && error == EEXIST)
2097 error = 0;
2098 if (error == 0 || error == EEXIST)
2099 cache_purge1(dvp, cnp->cn_nameptr, cnp->cn_namelen, 0);
2100 if (error == 0 && newvp == NULL) {
2101 struct nfsnode *np = NULL;
2102
2103 error = nfs_lookitup(dvp, cnp->cn_nameptr, cnp->cn_namelen,
2104 cnp->cn_cred, curlwp, &np);
2105 if (error == 0)
2106 newvp = NFSTOV(np);
2107 }
2108 if (error) {
2109 if (newvp != NULL)
2110 vput(newvp);
2111 } else {
2112 *ap->a_vpp = newvp;
2113 VOP_UNLOCK(newvp);
2114 }
2115 VTONFS(dvp)->n_flag |= NMODIFIED;
2116 if (!wccflag)
2117 NFS_INVALIDATE_ATTRCACHE(VTONFS(dvp));
2118 return (error);
2119 }
2120
2121 /*
2122 * nfs make dir call
2123 */
2124 int
2125 nfs_mkdir(void *v)
2126 {
2127 struct vop_mkdir_v3_args /* {
2128 struct vnode *a_dvp;
2129 struct vnode **a_vpp;
2130 struct componentname *a_cnp;
2131 struct vattr *a_vap;
2132 } */ *ap = v;
2133 struct vnode *dvp = ap->a_dvp;
2134 struct vattr *vap = ap->a_vap;
2135 struct componentname *cnp = ap->a_cnp;
2136 struct nfsv2_sattr *sp;
2137 u_int32_t *tl;
2138 char *cp;
2139 int32_t t1, t2;
2140 int len;
2141 struct nfsnode *dnp = VTONFS(dvp), *np = (struct nfsnode *)0;
2142 struct vnode *newvp = (struct vnode *)0;
2143 char *bpos, *dpos, *cp2;
2144 int error = 0, wccflag = NFSV3_WCCRATTR;
2145 int gotvp = 0;
2146 int rexmit = 0;
2147 struct mbuf *mreq, *mrep, *md, *mb;
2148 const int v3 = NFS_ISV3(dvp);
2149
2150 len = cnp->cn_namelen;
2151 nfsstats.rpccnt[NFSPROC_MKDIR]++;
2152 nfsm_reqhead(dnp, NFSPROC_MKDIR,
2153 NFSX_FH(v3) + NFSX_UNSIGNED + nfsm_rndup(len) + NFSX_SATTR(v3));
2154 nfsm_fhtom(dnp, v3);
2155 nfsm_strtom(cnp->cn_nameptr, len, NFS_MAXNAMLEN);
2156 #ifndef NFS_V2_ONLY
2157 if (v3) {
2158 nfsm_v3attrbuild(vap, false);
2159 } else
2160 #endif
2161 {
2162 nfsm_build(sp, struct nfsv2_sattr *, NFSX_V2SATTR);
2163 sp->sa_mode = vtonfsv2_mode(VDIR, vap->va_mode);
2164 sp->sa_uid = nfs_xdrneg1;
2165 sp->sa_gid = nfs_xdrneg1;
2166 sp->sa_size = nfs_xdrneg1;
2167 txdr_nfsv2time(&vap->va_atime, &sp->sa_atime);
2168 txdr_nfsv2time(&vap->va_mtime, &sp->sa_mtime);
2169 }
2170 nfsm_request1(dnp, NFSPROC_MKDIR, curlwp, cnp->cn_cred, &rexmit);
2171 if (!error)
2172 nfsm_mtofh(dvp, newvp, v3, gotvp);
2173 if (v3)
2174 nfsm_wcc_data(dvp, wccflag, 0, !error);
2175 nfsm_reqdone;
2176 VTONFS(dvp)->n_flag |= NMODIFIED;
2177 if (!wccflag)
2178 NFS_INVALIDATE_ATTRCACHE(VTONFS(dvp));
2179 /*
2180 * Kludge: Map EEXIST => 0 assuming that you have a reply to a retry
2181 * if we can succeed in looking up the directory.
2182 */
2183 if ((rexmit && error == EEXIST) || (!error && !gotvp)) {
2184 if (newvp) {
2185 vput(newvp);
2186 newvp = (struct vnode *)0;
2187 }
2188 error = nfs_lookitup(dvp, cnp->cn_nameptr, len, cnp->cn_cred,
2189 curlwp, &np);
2190 if (!error) {
2191 newvp = NFSTOV(np);
2192 if (newvp->v_type != VDIR || newvp == dvp)
2193 error = EEXIST;
2194 }
2195 }
2196 if (error) {
2197 if (newvp) {
2198 if (dvp != newvp)
2199 vput(newvp);
2200 else
2201 vrele(newvp);
2202 }
2203 } else {
2204 nfs_cache_enter(dvp, newvp, cnp);
2205 *ap->a_vpp = newvp;
2206 VOP_UNLOCK(newvp);
2207 }
2208 return (error);
2209 }
2210
2211 /*
2212 * nfs remove directory call
2213 */
2214 int
2215 nfs_rmdir(void *v)
2216 {
2217 struct vop_rmdir_v2_args /* {
2218 struct vnode *a_dvp;
2219 struct vnode *a_vp;
2220 struct componentname *a_cnp;
2221 } */ *ap = v;
2222 struct vnode *vp = ap->a_vp;
2223 struct vnode *dvp = ap->a_dvp;
2224 struct componentname *cnp = ap->a_cnp;
2225 u_int32_t *tl;
2226 char *cp;
2227 #ifndef NFS_V2_ONLY
2228 int32_t t1;
2229 char *cp2;
2230 #endif
2231 int32_t t2;
2232 char *bpos, *dpos;
2233 int error = 0, wccflag = NFSV3_WCCRATTR;
2234 int rexmit = 0;
2235 struct mbuf *mreq, *mrep, *md, *mb;
2236 const int v3 = NFS_ISV3(dvp);
2237 struct nfsnode *dnp;
2238
2239 if (dvp == vp) {
2240 vrele(vp);
2241 return (EINVAL);
2242 }
2243 nfsstats.rpccnt[NFSPROC_RMDIR]++;
2244 dnp = VTONFS(dvp);
2245 nfsm_reqhead(dnp, NFSPROC_RMDIR,
2246 NFSX_FH(v3) + NFSX_UNSIGNED + nfsm_rndup(cnp->cn_namelen));
2247 nfsm_fhtom(dnp, v3);
2248 nfsm_strtom(cnp->cn_nameptr, cnp->cn_namelen, NFS_MAXNAMLEN);
2249 nfsm_request1(dnp, NFSPROC_RMDIR, curlwp, cnp->cn_cred, &rexmit);
2250 #ifndef NFS_V2_ONLY
2251 if (v3)
2252 nfsm_wcc_data(dvp, wccflag, 0, !error);
2253 #endif
2254 nfsm_reqdone;
2255 VTONFS(dvp)->n_flag |= NMODIFIED;
2256 if (!wccflag)
2257 NFS_INVALIDATE_ATTRCACHE(VTONFS(dvp));
2258 cache_purge(vp);
2259 vput(vp);
2260 /*
2261 * Kludge: Map ENOENT => 0 assuming that you have a reply to a retry.
2262 */
2263 if (rexmit && error == ENOENT)
2264 error = 0;
2265 return (error);
2266 }
2267
2268 /*
2269 * nfs readdir call
2270 */
2271 int
2272 nfs_readdir(void *v)
2273 {
2274 struct vop_readdir_args /* {
2275 struct vnode *a_vp;
2276 struct uio *a_uio;
2277 kauth_cred_t a_cred;
2278 int *a_eofflag;
2279 off_t **a_cookies;
2280 int *a_ncookies;
2281 } */ *ap = v;
2282 struct vnode *vp = ap->a_vp;
2283 struct uio *uio = ap->a_uio;
2284 struct nfsmount *nmp = VFSTONFS(vp->v_mount);
2285 char *base = uio->uio_iov->iov_base;
2286 int tresid, error;
2287 size_t count, lost;
2288 struct dirent *dp;
2289 off_t *cookies = NULL;
2290 int ncookies = 0, nc;
2291
2292 if (vp->v_type != VDIR)
2293 return (EPERM);
2294
2295 lost = uio->uio_resid & (NFS_DIRFRAGSIZ - 1);
2296 count = uio->uio_resid - lost;
2297 if (count <= 0)
2298 return (EINVAL);
2299
2300 /*
2301 * Call nfs_bioread() to do the real work.
2302 */
2303 tresid = uio->uio_resid = count;
2304 error = nfs_bioread(vp, uio, 0, ap->a_cred,
2305 ap->a_cookies ? NFSBIO_CACHECOOKIES : 0);
2306
2307 if (!error && ap->a_cookies) {
2308 ncookies = count / 16;
2309 cookies = malloc(sizeof (off_t) * ncookies, M_TEMP, M_WAITOK);
2310 *ap->a_cookies = cookies;
2311 }
2312
2313 if (!error && uio->uio_resid == tresid) {
2314 uio->uio_resid += lost;
2315 nfsstats.direofcache_misses++;
2316 if (ap->a_cookies)
2317 *ap->a_ncookies = 0;
2318 *ap->a_eofflag = 1;
2319 return (0);
2320 }
2321
2322 if (!error && ap->a_cookies) {
2323 /*
2324 * Only the NFS server and emulations use cookies, and they
2325 * load the directory block into system space, so we can
2326 * just look at it directly.
2327 */
2328 if (!VMSPACE_IS_KERNEL_P(uio->uio_vmspace) ||
2329 uio->uio_iovcnt != 1)
2330 panic("nfs_readdir: lost in space");
2331 for (nc = 0; ncookies-- &&
2332 base < (char *)uio->uio_iov->iov_base; nc++){
2333 dp = (struct dirent *) base;
2334 if (dp->d_reclen == 0)
2335 break;
2336 if (nmp->nm_flag & NFSMNT_XLATECOOKIE)
2337 *(cookies++) = (off_t)NFS_GETCOOKIE32(dp);
2338 else
2339 *(cookies++) = NFS_GETCOOKIE(dp);
2340 base += dp->d_reclen;
2341 }
2342 uio->uio_resid +=
2343 ((char *)uio->uio_iov->iov_base - base);
2344 uio->uio_iov->iov_len +=
2345 ((char *)uio->uio_iov->iov_base - base);
2346 uio->uio_iov->iov_base = base;
2347 *ap->a_ncookies = nc;
2348 }
2349
2350 uio->uio_resid += lost;
2351 *ap->a_eofflag = 0;
2352 return (error);
2353 }
2354
2355 /*
2356 * Readdir rpc call.
2357 * Called from below the buffer cache by nfs_doio().
2358 */
2359 int
2360 nfs_readdirrpc(struct vnode *vp, struct uio *uiop, kauth_cred_t cred)
2361 {
2362 int len, left;
2363 struct dirent *dp = NULL;
2364 u_int32_t *tl;
2365 char *cp;
2366 int32_t t1, t2;
2367 char *bpos, *dpos, *cp2;
2368 struct mbuf *mreq, *mrep, *md, *mb;
2369 struct nfsmount *nmp = VFSTONFS(vp->v_mount);
2370 struct nfsnode *dnp = VTONFS(vp);
2371 u_quad_t fileno;
2372 int error = 0, more_dirs = 1, blksiz = 0, bigenough = 1;
2373 #ifndef NFS_V2_ONLY
2374 int attrflag;
2375 #endif
2376 int nrpcs = 0, reclen;
2377 const int v3 = NFS_ISV3(vp);
2378
2379 #ifdef DIAGNOSTIC
2380 /*
2381 * Should be called from buffer cache, so only amount of
2382 * NFS_DIRBLKSIZ will be requested.
2383 */
2384 if (uiop->uio_iovcnt != 1 || uiop->uio_resid != NFS_DIRBLKSIZ)
2385 panic("nfs readdirrpc bad uio");
2386 #endif
2387
2388 /*
2389 * Loop around doing readdir rpc's of size nm_readdirsize
2390 * truncated to a multiple of NFS_DIRFRAGSIZ.
2391 * The stopping criteria is EOF or buffer full.
2392 */
2393 while (more_dirs && bigenough) {
2394 /*
2395 * Heuristic: don't bother to do another RPC to further
2396 * fill up this block if there is not much room left. (< 50%
2397 * of the readdir RPC size). This wastes some buffer space
2398 * but can save up to 50% in RPC calls.
2399 */
2400 if (nrpcs > 0 && uiop->uio_resid < (nmp->nm_readdirsize / 2)) {
2401 bigenough = 0;
2402 break;
2403 }
2404 nfsstats.rpccnt[NFSPROC_READDIR]++;
2405 nfsm_reqhead(dnp, NFSPROC_READDIR, NFSX_FH(v3) +
2406 NFSX_READDIR(v3));
2407 nfsm_fhtom(dnp, v3);
2408 #ifndef NFS_V2_ONLY
2409 if (v3) {
2410 nfsm_build(tl, u_int32_t *, 5 * NFSX_UNSIGNED);
2411 if (nmp->nm_iflag & NFSMNT_SWAPCOOKIE) {
2412 txdr_swapcookie3(uiop->uio_offset, tl);
2413 } else {
2414 txdr_cookie3(uiop->uio_offset, tl);
2415 }
2416 tl += 2;
2417 *tl++ = dnp->n_cookieverf.nfsuquad[0];
2418 *tl++ = dnp->n_cookieverf.nfsuquad[1];
2419 } else
2420 #endif
2421 {
2422 nfsm_build(tl, u_int32_t *, 2 * NFSX_UNSIGNED);
2423 *tl++ = txdr_unsigned(uiop->uio_offset);
2424 }
2425 *tl = txdr_unsigned(nmp->nm_readdirsize);
2426 nfsm_request(dnp, NFSPROC_READDIR, curlwp, cred);
2427 nrpcs++;
2428 #ifndef NFS_V2_ONLY
2429 if (v3) {
2430 nfsm_postop_attr(vp, attrflag, 0);
2431 if (!error) {
2432 nfsm_dissect(tl, u_int32_t *,
2433 2 * NFSX_UNSIGNED);
2434 dnp->n_cookieverf.nfsuquad[0] = *tl++;
2435 dnp->n_cookieverf.nfsuquad[1] = *tl;
2436 } else {
2437 m_freem(mrep);
2438 goto nfsmout;
2439 }
2440 }
2441 #endif
2442 nfsm_dissect(tl, u_int32_t *, NFSX_UNSIGNED);
2443 more_dirs = fxdr_unsigned(int, *tl);
2444
2445 /* loop thru the dir entries, doctoring them to 4bsd form */
2446 while (more_dirs && bigenough) {
2447 #ifndef NFS_V2_ONLY
2448 if (v3) {
2449 nfsm_dissect(tl, u_int32_t *,
2450 3 * NFSX_UNSIGNED);
2451 fileno = fxdr_hyper(tl);
2452 len = fxdr_unsigned(int, *(tl + 2));
2453 } else
2454 #endif
2455 {
2456 nfsm_dissect(tl, u_int32_t *,
2457 2 * NFSX_UNSIGNED);
2458 fileno = fxdr_unsigned(u_quad_t, *tl++);
2459 len = fxdr_unsigned(int, *tl);
2460 }
2461 if (len <= 0 || len > NFS_MAXNAMLEN) {
2462 error = EBADRPC;
2463 m_freem(mrep);
2464 goto nfsmout;
2465 }
2466 /* for cookie stashing */
2467 reclen = _DIRENT_RECLEN(dp, len) + 2 * sizeof(off_t);
2468 left = NFS_DIRFRAGSIZ - blksiz;
2469 if (reclen > left) {
2470 memset(uiop->uio_iov->iov_base, 0, left);
2471 dp->d_reclen += left;
2472 UIO_ADVANCE(uiop, left);
2473 blksiz = 0;
2474 NFS_STASHCOOKIE(dp, uiop->uio_offset);
2475 }
2476 if (reclen > uiop->uio_resid)
2477 bigenough = 0;
2478 if (bigenough) {
2479 int tlen;
2480
2481 dp = (struct dirent *)uiop->uio_iov->iov_base;
2482 dp->d_fileno = fileno;
2483 dp->d_namlen = len;
2484 dp->d_reclen = reclen;
2485 dp->d_type = DT_UNKNOWN;
2486 blksiz += reclen;
2487 if (blksiz == NFS_DIRFRAGSIZ)
2488 blksiz = 0;
2489 UIO_ADVANCE(uiop, DIRHDSIZ);
2490 nfsm_mtouio(uiop, len);
2491 tlen = reclen - (DIRHDSIZ + len);
2492 (void)memset(uiop->uio_iov->iov_base, 0, tlen);
2493 UIO_ADVANCE(uiop, tlen);
2494 } else
2495 nfsm_adv(nfsm_rndup(len));
2496 #ifndef NFS_V2_ONLY
2497 if (v3) {
2498 nfsm_dissect(tl, u_int32_t *,
2499 3 * NFSX_UNSIGNED);
2500 } else
2501 #endif
2502 {
2503 nfsm_dissect(tl, u_int32_t *,
2504 2 * NFSX_UNSIGNED);
2505 }
2506 if (bigenough) {
2507 #ifndef NFS_V2_ONLY
2508 if (v3) {
2509 if (nmp->nm_iflag & NFSMNT_SWAPCOOKIE)
2510 uiop->uio_offset =
2511 fxdr_swapcookie3(tl);
2512 else
2513 uiop->uio_offset =
2514 fxdr_cookie3(tl);
2515 }
2516 else
2517 #endif
2518 {
2519 uiop->uio_offset =
2520 fxdr_unsigned(off_t, *tl);
2521 }
2522 NFS_STASHCOOKIE(dp, uiop->uio_offset);
2523 }
2524 if (v3)
2525 tl += 2;
2526 else
2527 tl++;
2528 more_dirs = fxdr_unsigned(int, *tl);
2529 }
2530 /*
2531 * If at end of rpc data, get the eof boolean
2532 */
2533 if (!more_dirs) {
2534 nfsm_dissect(tl, u_int32_t *, NFSX_UNSIGNED);
2535 more_dirs = (fxdr_unsigned(int, *tl) == 0);
2536
2537 /*
2538 * kludge: if we got no entries, treat it as EOF.
2539 * some server sometimes send a reply without any
2540 * entries or EOF.
2541 * although it might mean the server has very long name,
2542 * we can't handle such entries anyway.
2543 */
2544
2545 if (uiop->uio_resid >= NFS_DIRBLKSIZ)
2546 more_dirs = 0;
2547 }
2548 m_freem(mrep);
2549 }
2550 /*
2551 * Fill last record, iff any, out to a multiple of NFS_DIRFRAGSIZ
2552 * by increasing d_reclen for the last record.
2553 */
2554 if (blksiz > 0) {
2555 left = NFS_DIRFRAGSIZ - blksiz;
2556 memset(uiop->uio_iov->iov_base, 0, left);
2557 dp->d_reclen += left;
2558 NFS_STASHCOOKIE(dp, uiop->uio_offset);
2559 UIO_ADVANCE(uiop, left);
2560 }
2561
2562 /*
2563 * We are now either at the end of the directory or have filled the
2564 * block.
2565 */
2566 if (bigenough) {
2567 dnp->n_direofoffset = uiop->uio_offset;
2568 dnp->n_flag |= NEOFVALID;
2569 }
2570 nfsmout:
2571 return (error);
2572 }
2573
2574 #ifndef NFS_V2_ONLY
2575 /*
2576 * NFS V3 readdir plus RPC. Used in place of nfs_readdirrpc().
2577 */
2578 int
2579 nfs_readdirplusrpc(struct vnode *vp, struct uio *uiop, kauth_cred_t cred)
2580 {
2581 int len, left;
2582 struct dirent *dp = NULL;
2583 u_int32_t *tl;
2584 char *cp;
2585 int32_t t1, t2;
2586 struct vnode *newvp;
2587 char *bpos, *dpos, *cp2;
2588 struct mbuf *mreq, *mrep, *md, *mb;
2589 struct nameidata nami, *ndp = &nami;
2590 struct componentname *cnp = &ndp->ni_cnd;
2591 struct nfsmount *nmp = VFSTONFS(vp->v_mount);
2592 struct nfsnode *dnp = VTONFS(vp), *np;
2593 nfsfh_t *fhp;
2594 u_quad_t fileno;
2595 int error = 0, more_dirs = 1, blksiz = 0, doit, bigenough = 1, i;
2596 int attrflag, fhsize, nrpcs = 0, reclen;
2597 struct nfs_fattr fattr, *fp;
2598
2599 #ifdef DIAGNOSTIC
2600 if (uiop->uio_iovcnt != 1 || uiop->uio_resid != NFS_DIRBLKSIZ)
2601 panic("nfs readdirplusrpc bad uio");
2602 #endif
2603 ndp->ni_dvp = vp;
2604 newvp = NULLVP;
2605
2606 /*
2607 * Loop around doing readdir rpc's of size nm_readdirsize
2608 * truncated to a multiple of NFS_DIRFRAGSIZ.
2609 * The stopping criteria is EOF or buffer full.
2610 */
2611 while (more_dirs && bigenough) {
2612 if (nrpcs > 0 && uiop->uio_resid < (nmp->nm_readdirsize / 2)) {
2613 bigenough = 0;
2614 break;
2615 }
2616 nfsstats.rpccnt[NFSPROC_READDIRPLUS]++;
2617 nfsm_reqhead(dnp, NFSPROC_READDIRPLUS,
2618 NFSX_FH(1) + 6 * NFSX_UNSIGNED);
2619 nfsm_fhtom(dnp, 1);
2620 nfsm_build(tl, u_int32_t *, 6 * NFSX_UNSIGNED);
2621 if (nmp->nm_iflag & NFSMNT_SWAPCOOKIE) {
2622 txdr_swapcookie3(uiop->uio_offset, tl);
2623 } else {
2624 txdr_cookie3(uiop->uio_offset, tl);
2625 }
2626 tl += 2;
2627 *tl++ = dnp->n_cookieverf.nfsuquad[0];
2628 *tl++ = dnp->n_cookieverf.nfsuquad[1];
2629 *tl++ = txdr_unsigned(nmp->nm_readdirsize);
2630 *tl = txdr_unsigned(nmp->nm_rsize);
2631 nfsm_request(dnp, NFSPROC_READDIRPLUS, curlwp, cred);
2632 nfsm_postop_attr(vp, attrflag, 0);
2633 if (error) {
2634 m_freem(mrep);
2635 goto nfsmout;
2636 }
2637 nrpcs++;
2638 nfsm_dissect(tl, u_int32_t *, 3 * NFSX_UNSIGNED);
2639 dnp->n_cookieverf.nfsuquad[0] = *tl++;
2640 dnp->n_cookieverf.nfsuquad[1] = *tl++;
2641 more_dirs = fxdr_unsigned(int, *tl);
2642
2643 /* loop thru the dir entries, doctoring them to 4bsd form */
2644 while (more_dirs && bigenough) {
2645 nfsm_dissect(tl, u_int32_t *, 3 * NFSX_UNSIGNED);
2646 fileno = fxdr_hyper(tl);
2647 len = fxdr_unsigned(int, *(tl + 2));
2648 if (len <= 0 || len > NFS_MAXNAMLEN) {
2649 error = EBADRPC;
2650 m_freem(mrep);
2651 goto nfsmout;
2652 }
2653 /* for cookie stashing */
2654 reclen = _DIRENT_RECLEN(dp, len) + 2 * sizeof(off_t);
2655 left = NFS_DIRFRAGSIZ - blksiz;
2656 if (reclen > left) {
2657 /*
2658 * DIRFRAGSIZ is aligned, no need to align
2659 * again here.
2660 */
2661 memset(uiop->uio_iov->iov_base, 0, left);
2662 dp->d_reclen += left;
2663 UIO_ADVANCE(uiop, left);
2664 NFS_STASHCOOKIE(dp, uiop->uio_offset);
2665 blksiz = 0;
2666 }
2667 if (reclen > uiop->uio_resid)
2668 bigenough = 0;
2669 if (bigenough) {
2670 int tlen;
2671
2672 dp = (struct dirent *)uiop->uio_iov->iov_base;
2673 dp->d_fileno = fileno;
2674 dp->d_namlen = len;
2675 dp->d_reclen = reclen;
2676 dp->d_type = DT_UNKNOWN;
2677 blksiz += reclen;
2678 if (blksiz == NFS_DIRFRAGSIZ)
2679 blksiz = 0;
2680 UIO_ADVANCE(uiop, DIRHDSIZ);
2681 nfsm_mtouio(uiop, len);
2682 tlen = reclen - (DIRHDSIZ + len);
2683 (void)memset(uiop->uio_iov->iov_base, 0, tlen);
2684 UIO_ADVANCE(uiop, tlen);
2685 cnp->cn_nameptr = dp->d_name;
2686 cnp->cn_namelen = dp->d_namlen;
2687 } else
2688 nfsm_adv(nfsm_rndup(len));
2689 nfsm_dissect(tl, u_int32_t *, 3 * NFSX_UNSIGNED);
2690 if (bigenough) {
2691 if (nmp->nm_iflag & NFSMNT_SWAPCOOKIE)
2692 uiop->uio_offset =
2693 fxdr_swapcookie3(tl);
2694 else
2695 uiop->uio_offset =
2696 fxdr_cookie3(tl);
2697 NFS_STASHCOOKIE(dp, uiop->uio_offset);
2698 }
2699 tl += 2;
2700
2701 /*
2702 * Since the attributes are before the file handle
2703 * (sigh), we must skip over the attributes and then
2704 * come back and get them.
2705 */
2706 attrflag = fxdr_unsigned(int, *tl);
2707 if (attrflag) {
2708 nfsm_dissect(fp, struct nfs_fattr *, NFSX_V3FATTR);
2709 memcpy(&fattr, fp, NFSX_V3FATTR);
2710 nfsm_dissect(tl, u_int32_t *, NFSX_UNSIGNED);
2711 doit = fxdr_unsigned(int, *tl);
2712 if (doit) {
2713 nfsm_getfh(fhp, fhsize, 1);
2714 if (NFS_CMPFH(dnp, fhp, fhsize)) {
2715 vref(vp);
2716 newvp = vp;
2717 np = dnp;
2718 } else {
2719 error = nfs_nget1(vp->v_mount, fhp,
2720 fhsize, &np, LK_NOWAIT);
2721 if (!error)
2722 newvp = NFSTOV(np);
2723 }
2724 if (!error) {
2725 nfs_loadattrcache(&newvp, &fattr, 0, 0);
2726 if (bigenough) {
2727 dp->d_type =
2728 IFTODT(VTTOIF(np->n_vattr->va_type));
2729 ndp->ni_vp = newvp;
2730 nfs_cache_enter(ndp->ni_dvp,
2731 ndp->ni_vp, cnp);
2732 }
2733 }
2734 error = 0;
2735 }
2736 } else {
2737 /* Just skip over the file handle */
2738 nfsm_dissect(tl, u_int32_t *, NFSX_UNSIGNED);
2739 i = fxdr_unsigned(int, *tl);
2740 nfsm_adv(nfsm_rndup(i));
2741 }
2742 if (newvp != NULLVP) {
2743 if (newvp == vp)
2744 vrele(newvp);
2745 else
2746 vput(newvp);
2747 newvp = NULLVP;
2748 }
2749 nfsm_dissect(tl, u_int32_t *, NFSX_UNSIGNED);
2750 more_dirs = fxdr_unsigned(int, *tl);
2751 }
2752 /*
2753 * If at end of rpc data, get the eof boolean
2754 */
2755 if (!more_dirs) {
2756 nfsm_dissect(tl, u_int32_t *, NFSX_UNSIGNED);
2757 more_dirs = (fxdr_unsigned(int, *tl) == 0);
2758
2759 /*
2760 * kludge: see a comment in nfs_readdirrpc.
2761 */
2762
2763 if (uiop->uio_resid >= NFS_DIRBLKSIZ)
2764 more_dirs = 0;
2765 }
2766 m_freem(mrep);
2767 }
2768 /*
2769 * Fill last record, iff any, out to a multiple of NFS_DIRFRAGSIZ
2770 * by increasing d_reclen for the last record.
2771 */
2772 if (blksiz > 0) {
2773 left = NFS_DIRFRAGSIZ - blksiz;
2774 memset(uiop->uio_iov->iov_base, 0, left);
2775 dp->d_reclen += left;
2776 NFS_STASHCOOKIE(dp, uiop->uio_offset);
2777 UIO_ADVANCE(uiop, left);
2778 }
2779
2780 /*
2781 * We are now either at the end of the directory or have filled the
2782 * block.
2783 */
2784 if (bigenough) {
2785 dnp->n_direofoffset = uiop->uio_offset;
2786 dnp->n_flag |= NEOFVALID;
2787 }
2788 nfsmout:
2789 if (newvp != NULLVP) {
2790 if(newvp == vp)
2791 vrele(newvp);
2792 else
2793 vput(newvp);
2794 }
2795 return (error);
2796 }
2797 #endif
2798
2799 /*
2800 * Silly rename. To make the NFS filesystem that is stateless look a little
2801 * more like the "ufs" a remove of an active vnode is translated to a rename
2802 * to a funny looking filename that is removed by nfs_inactive on the
2803 * nfsnode. There is the potential for another process on a different client
2804 * to create the same funny name between the nfs_lookitup() fails and the
2805 * nfs_rename() completes, but...
2806 */
2807 int
2808 nfs_sillyrename(struct vnode *dvp, struct vnode *vp, struct componentname *cnp, bool dolink)
2809 {
2810 struct sillyrename *sp;
2811 struct nfsnode *np;
2812 int error;
2813 pid_t pid;
2814
2815 cache_purge(dvp);
2816 np = VTONFS(vp);
2817 #ifndef DIAGNOSTIC
2818 if (vp->v_type == VDIR)
2819 panic("nfs: sillyrename dir");
2820 #endif
2821 sp = kmem_alloc(sizeof(*sp), KM_SLEEP);
2822 sp->s_cred = kauth_cred_dup(cnp->cn_cred);
2823 sp->s_dvp = dvp;
2824 vref(dvp);
2825
2826 /* Fudge together a funny name */
2827 pid = curlwp->l_proc->p_pid;
2828 memcpy(sp->s_name, ".nfsAxxxx4.4", 13);
2829 sp->s_namlen = 12;
2830 sp->s_name[8] = hexdigits[pid & 0xf];
2831 sp->s_name[7] = hexdigits[(pid >> 4) & 0xf];
2832 sp->s_name[6] = hexdigits[(pid >> 8) & 0xf];
2833 sp->s_name[5] = hexdigits[(pid >> 12) & 0xf];
2834
2835 /* Try lookitups until we get one that isn't there */
2836 while (nfs_lookitup(dvp, sp->s_name, sp->s_namlen, sp->s_cred,
2837 curlwp, (struct nfsnode **)0) == 0) {
2838 sp->s_name[4]++;
2839 if (sp->s_name[4] > 'z') {
2840 error = EINVAL;
2841 goto bad;
2842 }
2843 }
2844 if (dolink) {
2845 error = nfs_linkrpc(dvp, vp, sp->s_name, sp->s_namlen,
2846 sp->s_cred, curlwp);
2847 /*
2848 * nfs_request maps NFSERR_NOTSUPP to ENOTSUP.
2849 */
2850 if (error == ENOTSUP) {
2851 error = nfs_renameit(dvp, cnp, sp);
2852 }
2853 } else {
2854 error = nfs_renameit(dvp, cnp, sp);
2855 }
2856 if (error)
2857 goto bad;
2858 error = nfs_lookitup(dvp, sp->s_name, sp->s_namlen, sp->s_cred,
2859 curlwp, &np);
2860 np->n_sillyrename = sp;
2861 return (0);
2862 bad:
2863 vrele(sp->s_dvp);
2864 kauth_cred_free(sp->s_cred);
2865 kmem_free(sp, sizeof(*sp));
2866 return (error);
2867 }
2868
2869 /*
2870 * Look up a file name and optionally either update the file handle or
2871 * allocate an nfsnode, depending on the value of npp.
2872 * npp == NULL --> just do the lookup
2873 * *npp == NULL --> allocate a new nfsnode and make sure attributes are
2874 * handled too
2875 * *npp != NULL --> update the file handle in the vnode
2876 */
2877 int
2878 nfs_lookitup(struct vnode *dvp, const char *name, int len, kauth_cred_t cred, struct lwp *l, struct nfsnode **npp)
2879 {
2880 u_int32_t *tl;
2881 char *cp;
2882 int32_t t1, t2;
2883 struct vnode *newvp = (struct vnode *)0;
2884 struct nfsnode *np, *dnp = VTONFS(dvp);
2885 char *bpos, *dpos, *cp2;
2886 int error = 0, ofhlen, fhlen;
2887 #ifndef NFS_V2_ONLY
2888 int attrflag;
2889 #endif
2890 struct mbuf *mreq, *mrep, *md, *mb;
2891 nfsfh_t *ofhp, *nfhp;
2892 const int v3 = NFS_ISV3(dvp);
2893
2894 nfsstats.rpccnt[NFSPROC_LOOKUP]++;
2895 nfsm_reqhead(dnp, NFSPROC_LOOKUP,
2896 NFSX_FH(v3) + NFSX_UNSIGNED + nfsm_rndup(len));
2897 nfsm_fhtom(dnp, v3);
2898 nfsm_strtom(name, len, NFS_MAXNAMLEN);
2899 nfsm_request(dnp, NFSPROC_LOOKUP, l, cred);
2900 if (npp && !error) {
2901 nfsm_getfh(nfhp, fhlen, v3);
2902 if (*npp) {
2903 np = *npp;
2904 newvp = NFSTOV(np);
2905 ofhlen = np->n_fhsize;
2906 ofhp = kmem_alloc(ofhlen, KM_SLEEP);
2907 memcpy(ofhp, np->n_fhp, ofhlen);
2908 error = vcache_rekey_enter(newvp->v_mount, newvp,
2909 ofhp, ofhlen, nfhp, fhlen);
2910 if (error) {
2911 kmem_free(ofhp, ofhlen);
2912 m_freem(mrep);
2913 return error;
2914 }
2915 if (np->n_fhsize > NFS_SMALLFH && fhlen <= NFS_SMALLFH) {
2916 kmem_free(np->n_fhp, np->n_fhsize);
2917 np->n_fhp = &np->n_fh;
2918 }
2919 #if NFS_SMALLFH < NFSX_V3FHMAX
2920 else if (np->n_fhsize <= NFS_SMALLFH && fhlen > NFS_SMALLFH)
2921 np->n_fhp = kmem_alloc(fhlen, KM_SLEEP);
2922 #endif
2923 memcpy(np->n_fhp, nfhp, fhlen);
2924 np->n_fhsize = fhlen;
2925 vcache_rekey_exit(newvp->v_mount, newvp,
2926 ofhp, ofhlen, np->n_fhp, fhlen);
2927 kmem_free(ofhp, ofhlen);
2928 } else if (NFS_CMPFH(dnp, nfhp, fhlen)) {
2929 vref(dvp);
2930 newvp = dvp;
2931 np = dnp;
2932 } else {
2933 error = nfs_nget(dvp->v_mount, nfhp, fhlen, &np);
2934 if (error) {
2935 m_freem(mrep);
2936 return (error);
2937 }
2938 newvp = NFSTOV(np);
2939 }
2940 #ifndef NFS_V2_ONLY
2941 if (v3) {
2942 nfsm_postop_attr(newvp, attrflag, 0);
2943 if (!attrflag && *npp == NULL) {
2944 m_freem(mrep);
2945 vput(newvp);
2946 return (ENOENT);
2947 }
2948 } else
2949 #endif
2950 nfsm_loadattr(newvp, (struct vattr *)0, 0);
2951 }
2952 nfsm_reqdone;
2953 if (npp && *npp == NULL) {
2954 if (error) {
2955 if (newvp)
2956 vput(newvp);
2957 } else
2958 *npp = np;
2959 }
2960 return (error);
2961 }
2962
2963 #ifndef NFS_V2_ONLY
2964 /*
2965 * Nfs Version 3 commit rpc
2966 */
2967 int
2968 nfs_commit(struct vnode *vp, off_t offset, uint32_t cnt, struct lwp *l)
2969 {
2970 char *cp;
2971 u_int32_t *tl;
2972 int32_t t1, t2;
2973 struct nfsmount *nmp = VFSTONFS(vp->v_mount);
2974 char *bpos, *dpos, *cp2;
2975 int error = 0, wccflag = NFSV3_WCCRATTR;
2976 struct mbuf *mreq, *mrep, *md, *mb;
2977 struct nfsnode *np;
2978
2979 KASSERT(NFS_ISV3(vp));
2980
2981 #ifdef NFS_DEBUG_COMMIT
2982 printf("commit %lu - %lu\n", (unsigned long)offset,
2983 (unsigned long)(offset + cnt));
2984 #endif
2985
2986 mutex_enter(&nmp->nm_lock);
2987 if ((nmp->nm_iflag & NFSMNT_HASWRITEVERF) == 0) {
2988 mutex_exit(&nmp->nm_lock);
2989 return (0);
2990 }
2991 mutex_exit(&nmp->nm_lock);
2992 nfsstats.rpccnt[NFSPROC_COMMIT]++;
2993 np = VTONFS(vp);
2994 nfsm_reqhead(np, NFSPROC_COMMIT, NFSX_FH(1));
2995 nfsm_fhtom(np, 1);
2996 nfsm_build(tl, u_int32_t *, 3 * NFSX_UNSIGNED);
2997 txdr_hyper(offset, tl);
2998 tl += 2;
2999 *tl = txdr_unsigned(cnt);
3000 nfsm_request(np, NFSPROC_COMMIT, l, np->n_wcred);
3001 nfsm_wcc_data(vp, wccflag, NAC_NOTRUNC, false);
3002 if (!error) {
3003 nfsm_dissect(tl, u_int32_t *, NFSX_V3WRITEVERF);
3004 mutex_enter(&nmp->nm_lock);
3005 if ((nmp->nm_iflag & NFSMNT_STALEWRITEVERF) ||
3006 memcmp(nmp->nm_writeverf, tl, NFSX_V3WRITEVERF)) {
3007 memcpy(nmp->nm_writeverf, tl, NFSX_V3WRITEVERF);
3008 error = NFSERR_STALEWRITEVERF;
3009 nmp->nm_iflag |= NFSMNT_STALEWRITEVERF;
3010 }
3011 mutex_exit(&nmp->nm_lock);
3012 }
3013 nfsm_reqdone;
3014 return (error);
3015 }
3016 #endif
3017
3018 /*
3019 * Kludge City..
3020 * - make nfs_bmap() essentially a no-op that does no translation
3021 * - do nfs_strategy() by doing I/O with nfs_readrpc/nfs_writerpc
3022 * (Maybe I could use the process's page mapping, but I was concerned that
3023 * Kernel Write might not be enabled and also figured copyout() would do
3024 * a lot more work than memcpy() and also it currently happens in the
3025 * context of the swapper process (2).
3026 */
3027 int
3028 nfs_bmap(void *v)
3029 {
3030 struct vop_bmap_args /* {
3031 struct vnode *a_vp;
3032 daddr_t a_bn;
3033 struct vnode **a_vpp;
3034 daddr_t *a_bnp;
3035 int *a_runp;
3036 } */ *ap = v;
3037 struct vnode *vp = ap->a_vp;
3038 int bshift = vp->v_mount->mnt_fs_bshift - vp->v_mount->mnt_dev_bshift;
3039
3040 if (ap->a_vpp != NULL)
3041 *ap->a_vpp = vp;
3042 if (ap->a_bnp != NULL)
3043 *ap->a_bnp = ap->a_bn << bshift;
3044 if (ap->a_runp != NULL)
3045 *ap->a_runp = 1024 * 1024; /* XXX */
3046 return (0);
3047 }
3048
3049 /*
3050 * Strategy routine.
3051 * For async requests when nfsiod(s) are running, queue the request by
3052 * calling nfs_asyncio(), otherwise just all nfs_doio() to do the
3053 * request.
3054 */
3055 int
3056 nfs_strategy(void *v)
3057 {
3058 struct vop_strategy_args *ap = v;
3059 struct buf *bp = ap->a_bp;
3060 int error = 0;
3061
3062 if ((bp->b_flags & (B_PHYS|B_ASYNC)) == (B_PHYS|B_ASYNC))
3063 panic("nfs physio/async");
3064
3065 /*
3066 * If the op is asynchronous and an i/o daemon is waiting
3067 * queue the request, wake it up and wait for completion
3068 * otherwise just do it ourselves.
3069 */
3070 if ((bp->b_flags & B_ASYNC) == 0 || nfs_asyncio(bp))
3071 error = nfs_doio(bp);
3072 return (error);
3073 }
3074
3075 /*
3076 * fsync vnode op. Just call nfs_flush() with commit == 1.
3077 */
3078 /* ARGSUSED */
3079 int
3080 nfs_fsync(void *v)
3081 {
3082 struct vop_fsync_args /* {
3083 struct vnodeop_desc *a_desc;
3084 struct vnode * a_vp;
3085 kauth_cred_t a_cred;
3086 int a_flags;
3087 off_t offlo;
3088 off_t offhi;
3089 struct lwp * a_l;
3090 } */ *ap = v;
3091
3092 struct vnode *vp = ap->a_vp;
3093
3094 if (vp->v_type != VREG)
3095 return 0;
3096
3097 return (nfs_flush(vp, ap->a_cred,
3098 (ap->a_flags & FSYNC_WAIT) != 0 ? MNT_WAIT : 0, curlwp, 1));
3099 }
3100
3101 /*
3102 * Flush all the data associated with a vnode.
3103 */
3104 int
3105 nfs_flush(struct vnode *vp, kauth_cred_t cred, int waitfor, struct lwp *l,
3106 int commit)
3107 {
3108 struct nfsnode *np = VTONFS(vp);
3109 int error;
3110 int flushflags = PGO_ALLPAGES|PGO_CLEANIT|PGO_SYNCIO;
3111 UVMHIST_FUNC("nfs_flush"); UVMHIST_CALLED(ubchist);
3112
3113 rw_enter(vp->v_uobj.vmobjlock, RW_WRITER);
3114 error = VOP_PUTPAGES(vp, 0, 0, flushflags);
3115 if (np->n_flag & NWRITEERR) {
3116 error = np->n_error;
3117 np->n_flag &= ~NWRITEERR;
3118 }
3119 UVMHIST_LOG(ubchist, "returning %d", error,0,0,0);
3120 return (error);
3121 }
3122
3123 /*
3124 * Return POSIX pathconf information applicable to nfs.
3125 *
3126 * N.B. The NFS V2 protocol doesn't support this RPC.
3127 */
3128 /* ARGSUSED */
3129 int
3130 nfs_pathconf(void *v)
3131 {
3132 struct vop_pathconf_args /* {
3133 struct vnode *a_vp;
3134 int a_name;
3135 register_t *a_retval;
3136 } */ *ap = v;
3137 struct nfsv3_pathconf *pcp;
3138 struct vnode *vp = ap->a_vp;
3139 struct mbuf *mreq, *mrep, *md, *mb;
3140 int32_t t1, t2;
3141 u_int32_t *tl;
3142 char *bpos, *dpos, *cp, *cp2;
3143 int error = 0, attrflag;
3144 #ifndef NFS_V2_ONLY
3145 struct nfsmount *nmp;
3146 unsigned int l;
3147 u_int64_t maxsize;
3148 #endif
3149 const int v3 = NFS_ISV3(vp);
3150 struct nfsnode *np = VTONFS(vp);
3151
3152 switch (ap->a_name) {
3153 /* Names that can be resolved locally. */
3154 case _PC_PIPE_BUF:
3155 *ap->a_retval = PIPE_BUF;
3156 break;
3157 case _PC_SYNC_IO:
3158 *ap->a_retval = 1;
3159 break;
3160 /* Names that cannot be resolved locally; do an RPC, if possible. */
3161 case _PC_LINK_MAX:
3162 case _PC_NAME_MAX:
3163 case _PC_CHOWN_RESTRICTED:
3164 case _PC_NO_TRUNC:
3165 if (!v3) {
3166 error = EINVAL;
3167 break;
3168 }
3169 nfsstats.rpccnt[NFSPROC_PATHCONF]++;
3170 nfsm_reqhead(np, NFSPROC_PATHCONF, NFSX_FH(1));
3171 nfsm_fhtom(np, 1);
3172 nfsm_request(np, NFSPROC_PATHCONF,
3173 curlwp, curlwp->l_cred); /* XXX */
3174 nfsm_postop_attr(vp, attrflag, 0);
3175 if (!error) {
3176 nfsm_dissect(pcp, struct nfsv3_pathconf *,
3177 NFSX_V3PATHCONF);
3178 switch (ap->a_name) {
3179 case _PC_LINK_MAX:
3180 *ap->a_retval =
3181 fxdr_unsigned(register_t, pcp->pc_linkmax);
3182 break;
3183 case _PC_NAME_MAX:
3184 *ap->a_retval =
3185 fxdr_unsigned(register_t, pcp->pc_namemax);
3186 break;
3187 case _PC_CHOWN_RESTRICTED:
3188 *ap->a_retval =
3189 (pcp->pc_chownrestricted == nfs_true);
3190 break;
3191 case _PC_NO_TRUNC:
3192 *ap->a_retval =
3193 (pcp->pc_notrunc == nfs_true);
3194 break;
3195 }
3196 }
3197 nfsm_reqdone;
3198 break;
3199 case _PC_FILESIZEBITS:
3200 #ifndef NFS_V2_ONLY
3201 if (v3) {
3202 nmp = VFSTONFS(vp->v_mount);
3203 if ((nmp->nm_iflag & NFSMNT_GOTFSINFO) == 0)
3204 if ((error = nfs_fsinfo(nmp, vp,
3205 curlwp->l_cred, curlwp)) != 0) /* XXX */
3206 break;
3207 for (l = 0, maxsize = nmp->nm_maxfilesize;
3208 (maxsize >> l) > 0; l++)
3209 ;
3210 *ap->a_retval = l + 1;
3211 } else
3212 #endif
3213 {
3214 *ap->a_retval = 32; /* NFS V2 limitation */
3215 }
3216 break;
3217 default:
3218 error = genfs_pathconf(ap);
3219 break;
3220 }
3221
3222 return (error);
3223 }
3224
3225 /*
3226 * NFS advisory byte-level locks.
3227 */
3228 int
3229 nfs_advlock(void *v)
3230 {
3231 struct vop_advlock_args /* {
3232 struct vnode *a_vp;
3233 void *a_id;
3234 int a_op;
3235 struct flock *a_fl;
3236 int a_flags;
3237 } */ *ap = v;
3238 struct nfsnode *np = VTONFS(ap->a_vp);
3239
3240 return lf_advlock(ap, &np->n_lockf, np->n_size);
3241 }
3242
3243 /*
3244 * Print out the contents of an nfsnode.
3245 */
3246 int
3247 nfs_print(void *v)
3248 {
3249 struct vop_print_args /* {
3250 struct vnode *a_vp;
3251 } */ *ap = v;
3252 struct vnode *vp = ap->a_vp;
3253 struct nfsnode *np = VTONFS(vp);
3254
3255 printf("tag VT_NFS, fileid %lld fsid 0x%llx",
3256 (unsigned long long)np->n_vattr->va_fileid,
3257 (unsigned long long)np->n_vattr->va_fsid);
3258 if (vp->v_type == VFIFO)
3259 VOCALL(fifo_vnodeop_p, VOFFSET(vop_print), v);
3260 printf("\n");
3261 return (0);
3262 }
3263
3264 /*
3265 * nfs unlock wrapper.
3266 */
3267 int
3268 nfs_unlock(void *v)
3269 {
3270 struct vop_unlock_args /* {
3271 struct vnode *a_vp;
3272 int a_flags;
3273 } */ *ap = v;
3274 struct vnode *vp = ap->a_vp;
3275
3276 /*
3277 * VOP_UNLOCK can be called by nfs_loadattrcache
3278 * with v_data == 0.
3279 */
3280 if (VTONFS(vp)) {
3281 nfs_delayedtruncate(vp);
3282 }
3283
3284 return genfs_unlock(v);
3285 }
3286
3287 /*
3288 * nfs special file access vnode op.
3289 * Essentially just get vattr and then imitate iaccess() since the device is
3290 * local to the client.
3291 */
3292 int
3293 nfsspec_access(void *v)
3294 {
3295 struct vop_access_args /* {
3296 struct vnode *a_vp;
3297 accmode_t a_accmode;
3298 kauth_cred_t a_cred;
3299 struct lwp *a_l;
3300 } */ *ap = v;
3301 struct vattr va;
3302 struct vnode *vp = ap->a_vp;
3303 int error;
3304
3305 error = VOP_GETATTR(vp, &va, ap->a_cred);
3306 if (error)
3307 return (error);
3308
3309 /*
3310 * Disallow write attempts on filesystems mounted read-only;
3311 * unless the file is a socket, fifo, or a block or character
3312 * device resident on the filesystem.
3313 */
3314 if ((ap->a_accmode & VWRITE) && (vp->v_mount->mnt_flag & MNT_RDONLY)) {
3315 switch (vp->v_type) {
3316 case VREG:
3317 case VDIR:
3318 case VLNK:
3319 return (EROFS);
3320 default:
3321 break;
3322 }
3323 }
3324
3325 return kauth_authorize_vnode(ap->a_cred, KAUTH_ACCESS_ACTION(
3326 ap->a_accmode, va.va_type, va.va_mode), vp, NULL, genfs_can_access(
3327 vp, ap->a_cred, va.va_uid, va.va_gid, va.va_mode, NULL,
3328 ap->a_accmode));
3329 }
3330
3331 /*
3332 * Read wrapper for special devices.
3333 */
3334 int
3335 nfsspec_read(void *v)
3336 {
3337 struct vop_read_args /* {
3338 struct vnode *a_vp;
3339 struct uio *a_uio;
3340 int a_ioflag;
3341 kauth_cred_t a_cred;
3342 } */ *ap = v;
3343 struct nfsnode *np = VTONFS(ap->a_vp);
3344
3345 /*
3346 * Set access flag.
3347 */
3348 np->n_flag |= NACC;
3349 getnanotime(&np->n_atim);
3350 return (VOCALL(spec_vnodeop_p, VOFFSET(vop_read), ap));
3351 }
3352
3353 /*
3354 * Write wrapper for special devices.
3355 */
3356 int
3357 nfsspec_write(void *v)
3358 {
3359 struct vop_write_args /* {
3360 struct vnode *a_vp;
3361 struct uio *a_uio;
3362 int a_ioflag;
3363 kauth_cred_t a_cred;
3364 } */ *ap = v;
3365 struct nfsnode *np = VTONFS(ap->a_vp);
3366
3367 /*
3368 * Set update flag.
3369 */
3370 np->n_flag |= NUPD;
3371 getnanotime(&np->n_mtim);
3372 return (VOCALL(spec_vnodeop_p, VOFFSET(vop_write), ap));
3373 }
3374
3375 /*
3376 * Close wrapper for special devices.
3377 *
3378 * Update the times on the nfsnode then do device close.
3379 */
3380 int
3381 nfsspec_close(void *v)
3382 {
3383 struct vop_close_args /* {
3384 struct vnode *a_vp;
3385 int a_fflag;
3386 kauth_cred_t a_cred;
3387 struct lwp *a_l;
3388 } */ *ap = v;
3389 struct vnode *vp = ap->a_vp;
3390 struct nfsnode *np = VTONFS(vp);
3391 struct vattr vattr;
3392
3393 if (np->n_flag & (NACC | NUPD)) {
3394 np->n_flag |= NCHG;
3395 if (vrefcnt(vp) == 1 &&
3396 (vp->v_mount->mnt_flag & MNT_RDONLY) == 0) {
3397 vattr_null(&vattr);
3398 if (np->n_flag & NACC)
3399 vattr.va_atime = np->n_atim;
3400 if (np->n_flag & NUPD)
3401 vattr.va_mtime = np->n_mtim;
3402 (void)VOP_SETATTR(vp, &vattr, ap->a_cred);
3403 }
3404 }
3405 return (VOCALL(spec_vnodeop_p, VOFFSET(vop_close), ap));
3406 }
3407
3408 /*
3409 * Read wrapper for fifos.
3410 */
3411 int
3412 nfsfifo_read(void *v)
3413 {
3414 struct vop_read_args /* {
3415 struct vnode *a_vp;
3416 struct uio *a_uio;
3417 int a_ioflag;
3418 kauth_cred_t a_cred;
3419 } */ *ap = v;
3420 struct nfsnode *np = VTONFS(ap->a_vp);
3421
3422 /*
3423 * Set access flag.
3424 */
3425 np->n_flag |= NACC;
3426 getnanotime(&np->n_atim);
3427 return (VOCALL(fifo_vnodeop_p, VOFFSET(vop_read), ap));
3428 }
3429
3430 /*
3431 * Write wrapper for fifos.
3432 */
3433 int
3434 nfsfifo_write(void *v)
3435 {
3436 struct vop_write_args /* {
3437 struct vnode *a_vp;
3438 struct uio *a_uio;
3439 int a_ioflag;
3440 kauth_cred_t a_cred;
3441 } */ *ap = v;
3442 struct nfsnode *np = VTONFS(ap->a_vp);
3443
3444 /*
3445 * Set update flag.
3446 */
3447 np->n_flag |= NUPD;
3448 getnanotime(&np->n_mtim);
3449 return (VOCALL(fifo_vnodeop_p, VOFFSET(vop_write), ap));
3450 }
3451
3452 /*
3453 * Close wrapper for fifos.
3454 *
3455 * Update the times on the nfsnode then do fifo close.
3456 */
3457 int
3458 nfsfifo_close(void *v)
3459 {
3460 struct vop_close_args /* {
3461 struct vnode *a_vp;
3462 int a_fflag;
3463 kauth_cred_t a_cred;
3464 struct lwp *a_l;
3465 } */ *ap = v;
3466 struct vnode *vp = ap->a_vp;
3467 struct nfsnode *np = VTONFS(vp);
3468 struct vattr vattr;
3469
3470 if (np->n_flag & (NACC | NUPD)) {
3471 struct timespec ts;
3472
3473 getnanotime(&ts);
3474 if (np->n_flag & NACC)
3475 np->n_atim = ts;
3476 if (np->n_flag & NUPD)
3477 np->n_mtim = ts;
3478 np->n_flag |= NCHG;
3479 if (vrefcnt(vp) == 1 &&
3480 (vp->v_mount->mnt_flag & MNT_RDONLY) == 0) {
3481 vattr_null(&vattr);
3482 if (np->n_flag & NACC)
3483 vattr.va_atime = np->n_atim;
3484 if (np->n_flag & NUPD)
3485 vattr.va_mtime = np->n_mtim;
3486 (void)VOP_SETATTR(vp, &vattr, ap->a_cred);
3487 }
3488 }
3489 return (VOCALL(fifo_vnodeop_p, VOFFSET(vop_close), ap));
3490 }
3491