nfs_vnops.c revision 1.253 1 /* $NetBSD: nfs_vnops.c,v 1.253 2007/04/29 08:41:10 yamt Exp $ */
2
3 /*
4 * Copyright (c) 1989, 1993
5 * The Regents of the University of California. All rights reserved.
6 *
7 * This code is derived from software contributed to Berkeley by
8 * Rick Macklem at The University of Guelph.
9 *
10 * Redistribution and use in source and binary forms, with or without
11 * modification, are permitted provided that the following conditions
12 * are met:
13 * 1. Redistributions of source code must retain the above copyright
14 * notice, this list of conditions and the following disclaimer.
15 * 2. Redistributions in binary form must reproduce the above copyright
16 * notice, this list of conditions and the following disclaimer in the
17 * documentation and/or other materials provided with the distribution.
18 * 3. Neither the name of the University nor the names of its contributors
19 * may be used to endorse or promote products derived from this software
20 * without specific prior written permission.
21 *
22 * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND
23 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
24 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
25 * ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE
26 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
27 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
28 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
29 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
30 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
31 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
32 * SUCH DAMAGE.
33 *
34 * @(#)nfs_vnops.c 8.19 (Berkeley) 7/31/95
35 */
36
37 /*
38 * vnode op calls for Sun NFS version 2 and 3
39 */
40
41 #include <sys/cdefs.h>
42 __KERNEL_RCSID(0, "$NetBSD: nfs_vnops.c,v 1.253 2007/04/29 08:41:10 yamt Exp $");
43
44 #include "opt_inet.h"
45 #include "opt_nfs.h"
46 #include "opt_uvmhist.h"
47
48 #include <sys/param.h>
49 #include <sys/proc.h>
50 #include <sys/kernel.h>
51 #include <sys/systm.h>
52 #include <sys/resourcevar.h>
53 #include <sys/mount.h>
54 #include <sys/buf.h>
55 #include <sys/condvar.h>
56 #include <sys/disk.h>
57 #include <sys/malloc.h>
58 #include <sys/mbuf.h>
59 #include <sys/mutex.h>
60 #include <sys/namei.h>
61 #include <sys/vnode.h>
62 #include <sys/dirent.h>
63 #include <sys/fcntl.h>
64 #include <sys/hash.h>
65 #include <sys/lockf.h>
66 #include <sys/stat.h>
67 #include <sys/unistd.h>
68 #include <sys/kauth.h>
69
70 #include <uvm/uvm_extern.h>
71 #include <uvm/uvm.h>
72
73 #include <miscfs/fifofs/fifo.h>
74 #include <miscfs/genfs/genfs.h>
75 #include <miscfs/genfs/genfs_node.h>
76 #include <miscfs/specfs/specdev.h>
77
78 #include <nfs/rpcv2.h>
79 #include <nfs/nfsproto.h>
80 #include <nfs/nfs.h>
81 #include <nfs/nfsnode.h>
82 #include <nfs/nfsmount.h>
83 #include <nfs/xdr_subs.h>
84 #include <nfs/nfsm_subs.h>
85 #include <nfs/nfs_var.h>
86
87 #include <net/if.h>
88 #include <netinet/in.h>
89 #include <netinet/in_var.h>
90
91 /*
92 * Global vfs data structures for nfs
93 */
94 int (**nfsv2_vnodeop_p) __P((void *));
95 const struct vnodeopv_entry_desc nfsv2_vnodeop_entries[] = {
96 { &vop_default_desc, vn_default_error },
97 { &vop_lookup_desc, nfs_lookup }, /* lookup */
98 { &vop_create_desc, nfs_create }, /* create */
99 { &vop_mknod_desc, nfs_mknod }, /* mknod */
100 { &vop_open_desc, nfs_open }, /* open */
101 { &vop_close_desc, nfs_close }, /* close */
102 { &vop_access_desc, nfs_access }, /* access */
103 { &vop_getattr_desc, nfs_getattr }, /* getattr */
104 { &vop_setattr_desc, nfs_setattr }, /* setattr */
105 { &vop_read_desc, nfs_read }, /* read */
106 { &vop_write_desc, nfs_write }, /* write */
107 { &vop_lease_desc, nfs_lease_check }, /* lease */
108 { &vop_fcntl_desc, genfs_fcntl }, /* fcntl */
109 { &vop_ioctl_desc, nfs_ioctl }, /* ioctl */
110 { &vop_poll_desc, nfs_poll }, /* poll */
111 { &vop_kqfilter_desc, nfs_kqfilter }, /* kqfilter */
112 { &vop_revoke_desc, nfs_revoke }, /* revoke */
113 { &vop_mmap_desc, nfs_mmap }, /* mmap */
114 { &vop_fsync_desc, nfs_fsync }, /* fsync */
115 { &vop_seek_desc, nfs_seek }, /* seek */
116 { &vop_remove_desc, nfs_remove }, /* remove */
117 { &vop_link_desc, nfs_link }, /* link */
118 { &vop_rename_desc, nfs_rename }, /* rename */
119 { &vop_mkdir_desc, nfs_mkdir }, /* mkdir */
120 { &vop_rmdir_desc, nfs_rmdir }, /* rmdir */
121 { &vop_symlink_desc, nfs_symlink }, /* symlink */
122 { &vop_readdir_desc, nfs_readdir }, /* readdir */
123 { &vop_readlink_desc, nfs_readlink }, /* readlink */
124 { &vop_abortop_desc, nfs_abortop }, /* abortop */
125 { &vop_inactive_desc, nfs_inactive }, /* inactive */
126 { &vop_reclaim_desc, nfs_reclaim }, /* reclaim */
127 { &vop_lock_desc, nfs_lock }, /* lock */
128 { &vop_unlock_desc, nfs_unlock }, /* unlock */
129 { &vop_bmap_desc, nfs_bmap }, /* bmap */
130 { &vop_strategy_desc, nfs_strategy }, /* strategy */
131 { &vop_print_desc, nfs_print }, /* print */
132 { &vop_islocked_desc, nfs_islocked }, /* islocked */
133 { &vop_pathconf_desc, nfs_pathconf }, /* pathconf */
134 { &vop_advlock_desc, nfs_advlock }, /* advlock */
135 { &vop_bwrite_desc, genfs_badop }, /* bwrite */
136 { &vop_getpages_desc, nfs_getpages }, /* getpages */
137 { &vop_putpages_desc, genfs_putpages }, /* putpages */
138 { NULL, NULL }
139 };
140 const struct vnodeopv_desc nfsv2_vnodeop_opv_desc =
141 { &nfsv2_vnodeop_p, nfsv2_vnodeop_entries };
142
143 /*
144 * Special device vnode ops
145 */
146 int (**spec_nfsv2nodeop_p) __P((void *));
147 const struct vnodeopv_entry_desc spec_nfsv2nodeop_entries[] = {
148 { &vop_default_desc, vn_default_error },
149 { &vop_lookup_desc, spec_lookup }, /* lookup */
150 { &vop_create_desc, spec_create }, /* create */
151 { &vop_mknod_desc, spec_mknod }, /* mknod */
152 { &vop_open_desc, spec_open }, /* open */
153 { &vop_close_desc, nfsspec_close }, /* close */
154 { &vop_access_desc, nfsspec_access }, /* access */
155 { &vop_getattr_desc, nfs_getattr }, /* getattr */
156 { &vop_setattr_desc, nfs_setattr }, /* setattr */
157 { &vop_read_desc, nfsspec_read }, /* read */
158 { &vop_write_desc, nfsspec_write }, /* write */
159 { &vop_lease_desc, spec_lease_check }, /* lease */
160 { &vop_fcntl_desc, genfs_fcntl }, /* fcntl */
161 { &vop_ioctl_desc, spec_ioctl }, /* ioctl */
162 { &vop_poll_desc, spec_poll }, /* poll */
163 { &vop_kqfilter_desc, spec_kqfilter }, /* kqfilter */
164 { &vop_revoke_desc, spec_revoke }, /* revoke */
165 { &vop_mmap_desc, spec_mmap }, /* mmap */
166 { &vop_fsync_desc, spec_fsync }, /* fsync */
167 { &vop_seek_desc, spec_seek }, /* seek */
168 { &vop_remove_desc, spec_remove }, /* remove */
169 { &vop_link_desc, spec_link }, /* link */
170 { &vop_rename_desc, spec_rename }, /* rename */
171 { &vop_mkdir_desc, spec_mkdir }, /* mkdir */
172 { &vop_rmdir_desc, spec_rmdir }, /* rmdir */
173 { &vop_symlink_desc, spec_symlink }, /* symlink */
174 { &vop_readdir_desc, spec_readdir }, /* readdir */
175 { &vop_readlink_desc, spec_readlink }, /* readlink */
176 { &vop_abortop_desc, spec_abortop }, /* abortop */
177 { &vop_inactive_desc, nfs_inactive }, /* inactive */
178 { &vop_reclaim_desc, nfs_reclaim }, /* reclaim */
179 { &vop_lock_desc, nfs_lock }, /* lock */
180 { &vop_unlock_desc, nfs_unlock }, /* unlock */
181 { &vop_bmap_desc, spec_bmap }, /* bmap */
182 { &vop_strategy_desc, spec_strategy }, /* strategy */
183 { &vop_print_desc, nfs_print }, /* print */
184 { &vop_islocked_desc, nfs_islocked }, /* islocked */
185 { &vop_pathconf_desc, spec_pathconf }, /* pathconf */
186 { &vop_advlock_desc, spec_advlock }, /* advlock */
187 { &vop_bwrite_desc, spec_bwrite }, /* bwrite */
188 { &vop_getpages_desc, spec_getpages }, /* getpages */
189 { &vop_putpages_desc, spec_putpages }, /* putpages */
190 { NULL, NULL }
191 };
192 const struct vnodeopv_desc spec_nfsv2nodeop_opv_desc =
193 { &spec_nfsv2nodeop_p, spec_nfsv2nodeop_entries };
194
195 int (**fifo_nfsv2nodeop_p) __P((void *));
196 const struct vnodeopv_entry_desc fifo_nfsv2nodeop_entries[] = {
197 { &vop_default_desc, vn_default_error },
198 { &vop_lookup_desc, fifo_lookup }, /* lookup */
199 { &vop_create_desc, fifo_create }, /* create */
200 { &vop_mknod_desc, fifo_mknod }, /* mknod */
201 { &vop_open_desc, fifo_open }, /* open */
202 { &vop_close_desc, nfsfifo_close }, /* close */
203 { &vop_access_desc, nfsspec_access }, /* access */
204 { &vop_getattr_desc, nfs_getattr }, /* getattr */
205 { &vop_setattr_desc, nfs_setattr }, /* setattr */
206 { &vop_read_desc, nfsfifo_read }, /* read */
207 { &vop_write_desc, nfsfifo_write }, /* write */
208 { &vop_lease_desc, fifo_lease_check }, /* lease */
209 { &vop_fcntl_desc, genfs_fcntl }, /* fcntl */
210 { &vop_ioctl_desc, fifo_ioctl }, /* ioctl */
211 { &vop_poll_desc, fifo_poll }, /* poll */
212 { &vop_kqfilter_desc, fifo_kqfilter }, /* kqfilter */
213 { &vop_revoke_desc, fifo_revoke }, /* revoke */
214 { &vop_mmap_desc, fifo_mmap }, /* mmap */
215 { &vop_fsync_desc, nfs_fsync }, /* fsync */
216 { &vop_seek_desc, fifo_seek }, /* seek */
217 { &vop_remove_desc, fifo_remove }, /* remove */
218 { &vop_link_desc, fifo_link }, /* link */
219 { &vop_rename_desc, fifo_rename }, /* rename */
220 { &vop_mkdir_desc, fifo_mkdir }, /* mkdir */
221 { &vop_rmdir_desc, fifo_rmdir }, /* rmdir */
222 { &vop_symlink_desc, fifo_symlink }, /* symlink */
223 { &vop_readdir_desc, fifo_readdir }, /* readdir */
224 { &vop_readlink_desc, fifo_readlink }, /* readlink */
225 { &vop_abortop_desc, fifo_abortop }, /* abortop */
226 { &vop_inactive_desc, nfs_inactive }, /* inactive */
227 { &vop_reclaim_desc, nfs_reclaim }, /* reclaim */
228 { &vop_lock_desc, nfs_lock }, /* lock */
229 { &vop_unlock_desc, nfs_unlock }, /* unlock */
230 { &vop_bmap_desc, fifo_bmap }, /* bmap */
231 { &vop_strategy_desc, genfs_badop }, /* strategy */
232 { &vop_print_desc, nfs_print }, /* print */
233 { &vop_islocked_desc, nfs_islocked }, /* islocked */
234 { &vop_pathconf_desc, fifo_pathconf }, /* pathconf */
235 { &vop_advlock_desc, fifo_advlock }, /* advlock */
236 { &vop_bwrite_desc, genfs_badop }, /* bwrite */
237 { &vop_putpages_desc, fifo_putpages }, /* putpages */
238 { NULL, NULL }
239 };
240 const struct vnodeopv_desc fifo_nfsv2nodeop_opv_desc =
241 { &fifo_nfsv2nodeop_p, fifo_nfsv2nodeop_entries };
242
243 static int nfs_linkrpc(struct vnode *, struct vnode *, const char *,
244 size_t, kauth_cred_t, struct lwp *);
245 static void nfs_writerpc_extfree(struct mbuf *, void *, size_t, void *);
246
247 /*
248 * Global variables
249 */
250 extern u_int32_t nfs_true, nfs_false;
251 extern u_int32_t nfs_xdrneg1;
252 extern const nfstype nfsv3_type[9];
253
254 int nfs_numasync = 0;
255 #define DIRHDSIZ _DIRENT_NAMEOFF(dp)
256 #define UIO_ADVANCE(uio, siz) \
257 (void)((uio)->uio_resid -= (siz), \
258 (uio)->uio_iov->iov_base = (char *)(uio)->uio_iov->iov_base + (siz), \
259 (uio)->uio_iov->iov_len -= (siz))
260
261 static void nfs_cache_enter(struct vnode *, struct vnode *,
262 struct componentname *);
263
264 static void
265 nfs_cache_enter(struct vnode *dvp, struct vnode *vp,
266 struct componentname *cnp)
267 {
268 struct nfsnode *dnp = VTONFS(dvp);
269
270 if (vp != NULL) {
271 struct nfsnode *np = VTONFS(vp);
272
273 np->n_ctime = np->n_vattr->va_ctime.tv_sec;
274 }
275
276 if (!timespecisset(&dnp->n_nctime))
277 dnp->n_nctime = dnp->n_vattr->va_mtime;
278
279 cache_enter(dvp, vp, cnp);
280 }
281
282 /*
283 * nfs null call from vfs.
284 */
285 int
286 nfs_null(vp, cred, l)
287 struct vnode *vp;
288 kauth_cred_t cred;
289 struct lwp *l;
290 {
291 char *bpos, *dpos;
292 int error = 0;
293 struct mbuf *mreq, *mrep, *md, *mb;
294 struct nfsnode *np = VTONFS(vp);
295
296 nfsm_reqhead(np, NFSPROC_NULL, 0);
297 nfsm_request(np, NFSPROC_NULL, l, cred);
298 nfsm_reqdone;
299 return (error);
300 }
301
302 /*
303 * nfs access vnode op.
304 * For nfs version 2, just return ok. File accesses may fail later.
305 * For nfs version 3, use the access rpc to check accessibility. If file modes
306 * are changed on the server, accesses might still fail later.
307 */
308 int
309 nfs_access(v)
310 void *v;
311 {
312 struct vop_access_args /* {
313 struct vnode *a_vp;
314 int a_mode;
315 kauth_cred_t a_cred;
316 struct lwp *a_l;
317 } */ *ap = v;
318 struct vnode *vp = ap->a_vp;
319 #ifndef NFS_V2_ONLY
320 u_int32_t *tl;
321 char *cp;
322 int32_t t1, t2;
323 char *bpos, *dpos, *cp2;
324 int error = 0, attrflag;
325 struct mbuf *mreq, *mrep, *md, *mb;
326 u_int32_t mode, rmode;
327 const int v3 = NFS_ISV3(vp);
328 #endif
329 int cachevalid;
330 struct nfsnode *np = VTONFS(vp);
331 struct nfsmount *nmp = VFSTONFS(vp->v_mount);
332
333 cachevalid = (np->n_accstamp != -1 &&
334 (time_uptime - np->n_accstamp) < NFS_ATTRTIMEO(nmp, np) &&
335 np->n_accuid == kauth_cred_geteuid(ap->a_cred));
336
337 /*
338 * Check access cache first. If this request has been made for this
339 * uid shortly before, use the cached result.
340 */
341 if (cachevalid) {
342 if (!np->n_accerror) {
343 if ((np->n_accmode & ap->a_mode) == ap->a_mode)
344 return np->n_accerror;
345 } else if ((np->n_accmode & ap->a_mode) == np->n_accmode)
346 return np->n_accerror;
347 }
348
349 #ifndef NFS_V2_ONLY
350 /*
351 * For nfs v3, do an access rpc, otherwise you are stuck emulating
352 * ufs_access() locally using the vattr. This may not be correct,
353 * since the server may apply other access criteria such as
354 * client uid-->server uid mapping that we do not know about, but
355 * this is better than just returning anything that is lying about
356 * in the cache.
357 */
358 if (v3) {
359 nfsstats.rpccnt[NFSPROC_ACCESS]++;
360 nfsm_reqhead(np, NFSPROC_ACCESS, NFSX_FH(v3) + NFSX_UNSIGNED);
361 nfsm_fhtom(np, v3);
362 nfsm_build(tl, u_int32_t *, NFSX_UNSIGNED);
363 if (ap->a_mode & VREAD)
364 mode = NFSV3ACCESS_READ;
365 else
366 mode = 0;
367 if (vp->v_type != VDIR) {
368 if (ap->a_mode & VWRITE)
369 mode |= (NFSV3ACCESS_MODIFY | NFSV3ACCESS_EXTEND);
370 if (ap->a_mode & VEXEC)
371 mode |= NFSV3ACCESS_EXECUTE;
372 } else {
373 if (ap->a_mode & VWRITE)
374 mode |= (NFSV3ACCESS_MODIFY | NFSV3ACCESS_EXTEND |
375 NFSV3ACCESS_DELETE);
376 if (ap->a_mode & VEXEC)
377 mode |= NFSV3ACCESS_LOOKUP;
378 }
379 *tl = txdr_unsigned(mode);
380 nfsm_request(np, NFSPROC_ACCESS, ap->a_l, ap->a_cred);
381 nfsm_postop_attr(vp, attrflag, 0);
382 if (!error) {
383 nfsm_dissect(tl, u_int32_t *, NFSX_UNSIGNED);
384 rmode = fxdr_unsigned(u_int32_t, *tl);
385 /*
386 * The NFS V3 spec does not clarify whether or not
387 * the returned access bits can be a superset of
388 * the ones requested, so...
389 */
390 if ((rmode & mode) != mode)
391 error = EACCES;
392 }
393 nfsm_reqdone;
394 } else
395 #endif
396 return (nfsspec_access(ap));
397 #ifndef NFS_V2_ONLY
398 /*
399 * Disallow write attempts on filesystems mounted read-only;
400 * unless the file is a socket, fifo, or a block or character
401 * device resident on the filesystem.
402 */
403 if (!error && (ap->a_mode & VWRITE) &&
404 (vp->v_mount->mnt_flag & MNT_RDONLY)) {
405 switch (vp->v_type) {
406 case VREG:
407 case VDIR:
408 case VLNK:
409 error = EROFS;
410 default:
411 break;
412 }
413 }
414
415 if (!error || error == EACCES) {
416 /*
417 * If we got the same result as for a previous,
418 * different request, OR it in. Don't update
419 * the timestamp in that case.
420 */
421 if (cachevalid && np->n_accstamp != -1 &&
422 error == np->n_accerror) {
423 if (!error)
424 np->n_accmode |= ap->a_mode;
425 else if ((np->n_accmode & ap->a_mode) == ap->a_mode)
426 np->n_accmode = ap->a_mode;
427 } else {
428 np->n_accstamp = time_uptime;
429 np->n_accuid = kauth_cred_geteuid(ap->a_cred);
430 np->n_accmode = ap->a_mode;
431 np->n_accerror = error;
432 }
433 }
434
435 return (error);
436 #endif
437 }
438
439 /*
440 * nfs open vnode op
441 * Check to see if the type is ok
442 * and that deletion is not in progress.
443 * For paged in text files, you will need to flush the page cache
444 * if consistency is lost.
445 */
446 /* ARGSUSED */
447 int
448 nfs_open(v)
449 void *v;
450 {
451 struct vop_open_args /* {
452 struct vnode *a_vp;
453 int a_mode;
454 kauth_cred_t a_cred;
455 struct lwp *a_l;
456 } */ *ap = v;
457 struct vnode *vp = ap->a_vp;
458 struct nfsnode *np = VTONFS(vp);
459 int error;
460
461 if (vp->v_type != VREG && vp->v_type != VDIR && vp->v_type != VLNK) {
462 return (EACCES);
463 }
464
465 if (ap->a_mode & FREAD) {
466 if (np->n_rcred != NULL)
467 kauth_cred_free(np->n_rcred);
468 np->n_rcred = ap->a_cred;
469 kauth_cred_hold(np->n_rcred);
470 }
471 if (ap->a_mode & FWRITE) {
472 if (np->n_wcred != NULL)
473 kauth_cred_free(np->n_wcred);
474 np->n_wcred = ap->a_cred;
475 kauth_cred_hold(np->n_wcred);
476 }
477
478 error = nfs_flushstalebuf(vp, ap->a_cred, ap->a_l, 0);
479 if (error)
480 return error;
481
482 NFS_INVALIDATE_ATTRCACHE(np); /* For Open/Close consistency */
483
484 return (0);
485 }
486
487 /*
488 * nfs close vnode op
489 * What an NFS client should do upon close after writing is a debatable issue.
490 * Most NFS clients push delayed writes to the server upon close, basically for
491 * two reasons:
492 * 1 - So that any write errors may be reported back to the client process
493 * doing the close system call. By far the two most likely errors are
494 * NFSERR_NOSPC and NFSERR_DQUOT to indicate space allocation failure.
495 * 2 - To put a worst case upper bound on cache inconsistency between
496 * multiple clients for the file.
497 * There is also a consistency problem for Version 2 of the protocol w.r.t.
498 * not being able to tell if other clients are writing a file concurrently,
499 * since there is no way of knowing if the changed modify time in the reply
500 * is only due to the write for this client.
501 * (NFS Version 3 provides weak cache consistency data in the reply that
502 * should be sufficient to detect and handle this case.)
503 *
504 * The current code does the following:
505 * for NFS Version 2 - play it safe and flush/invalidate all dirty buffers
506 * for NFS Version 3 - flush dirty buffers to the server but don't invalidate
507 * or commit them (this satisfies 1 and 2 except for the
508 * case where the server crashes after this close but
509 * before the commit RPC, which is felt to be "good
510 * enough". Changing the last argument to nfs_flush() to
511 * a 1 would force a commit operation, if it is felt a
512 * commit is necessary now.
513 */
514 /* ARGSUSED */
515 int
516 nfs_close(v)
517 void *v;
518 {
519 struct vop_close_args /* {
520 struct vnodeop_desc *a_desc;
521 struct vnode *a_vp;
522 int a_fflag;
523 kauth_cred_t a_cred;
524 struct lwp *a_l;
525 } */ *ap = v;
526 struct vnode *vp = ap->a_vp;
527 struct nfsnode *np = VTONFS(vp);
528 int error = 0;
529 UVMHIST_FUNC("nfs_close"); UVMHIST_CALLED(ubchist);
530
531 if (vp->v_type == VREG) {
532 if (np->n_flag & NMODIFIED) {
533 #ifndef NFS_V2_ONLY
534 if (NFS_ISV3(vp)) {
535 error = nfs_flush(vp, ap->a_cred, MNT_WAIT, ap->a_l, 0);
536 np->n_flag &= ~NMODIFIED;
537 } else
538 #endif
539 error = nfs_vinvalbuf(vp, V_SAVE, ap->a_cred, ap->a_l, 1);
540 NFS_INVALIDATE_ATTRCACHE(np);
541 }
542 if (np->n_flag & NWRITEERR) {
543 np->n_flag &= ~NWRITEERR;
544 error = np->n_error;
545 }
546 }
547 UVMHIST_LOG(ubchist, "returning %d", error,0,0,0);
548 return (error);
549 }
550
551 /*
552 * nfs getattr call from vfs.
553 */
554 int
555 nfs_getattr(v)
556 void *v;
557 {
558 struct vop_getattr_args /* {
559 struct vnode *a_vp;
560 struct vattr *a_vap;
561 kauth_cred_t a_cred;
562 struct lwp *a_l;
563 } */ *ap = v;
564 struct vnode *vp = ap->a_vp;
565 struct nfsnode *np = VTONFS(vp);
566 char *cp;
567 u_int32_t *tl;
568 int32_t t1, t2;
569 char *bpos, *dpos;
570 int error = 0;
571 struct mbuf *mreq, *mrep, *md, *mb;
572 const int v3 = NFS_ISV3(vp);
573
574 /*
575 * Update local times for special files.
576 */
577 if (np->n_flag & (NACC | NUPD))
578 np->n_flag |= NCHG;
579
580 /*
581 * if we have delayed truncation, do it now.
582 */
583 nfs_delayedtruncate(vp);
584
585 /*
586 * First look in the cache.
587 */
588 if (nfs_getattrcache(vp, ap->a_vap) == 0)
589 return (0);
590 nfsstats.rpccnt[NFSPROC_GETATTR]++;
591 nfsm_reqhead(np, NFSPROC_GETATTR, NFSX_FH(v3));
592 nfsm_fhtom(np, v3);
593 nfsm_request(np, NFSPROC_GETATTR, ap->a_l, ap->a_cred);
594 if (!error) {
595 nfsm_loadattr(vp, ap->a_vap, 0);
596 if (vp->v_type == VDIR &&
597 ap->a_vap->va_blocksize < NFS_DIRFRAGSIZ)
598 ap->a_vap->va_blocksize = NFS_DIRFRAGSIZ;
599 }
600 nfsm_reqdone;
601 return (error);
602 }
603
604 /*
605 * nfs setattr call.
606 */
607 int
608 nfs_setattr(v)
609 void *v;
610 {
611 struct vop_setattr_args /* {
612 struct vnodeop_desc *a_desc;
613 struct vnode *a_vp;
614 struct vattr *a_vap;
615 kauth_cred_t a_cred;
616 struct lwp *a_l;
617 } */ *ap = v;
618 struct vnode *vp = ap->a_vp;
619 struct nfsnode *np = VTONFS(vp);
620 struct vattr *vap = ap->a_vap;
621 int error = 0;
622 u_quad_t tsize = 0;
623
624 /*
625 * Setting of flags is not supported.
626 */
627 if (vap->va_flags != VNOVAL)
628 return (EOPNOTSUPP);
629
630 /*
631 * Disallow write attempts if the filesystem is mounted read-only.
632 */
633 if ((vap->va_uid != (uid_t)VNOVAL ||
634 vap->va_gid != (gid_t)VNOVAL || vap->va_atime.tv_sec != VNOVAL ||
635 vap->va_mtime.tv_sec != VNOVAL || vap->va_mode != (mode_t)VNOVAL) &&
636 (vp->v_mount->mnt_flag & MNT_RDONLY))
637 return (EROFS);
638 if (vap->va_size != VNOVAL) {
639 switch (vp->v_type) {
640 case VDIR:
641 return (EISDIR);
642 case VCHR:
643 case VBLK:
644 case VSOCK:
645 case VFIFO:
646 if (vap->va_mtime.tv_sec == VNOVAL &&
647 vap->va_atime.tv_sec == VNOVAL &&
648 vap->va_mode == (mode_t)VNOVAL &&
649 vap->va_uid == (uid_t)VNOVAL &&
650 vap->va_gid == (gid_t)VNOVAL)
651 return (0);
652 vap->va_size = VNOVAL;
653 break;
654 default:
655 /*
656 * Disallow write attempts if the filesystem is
657 * mounted read-only.
658 */
659 if (vp->v_mount->mnt_flag & MNT_RDONLY)
660 return (EROFS);
661 genfs_node_wrlock(vp);
662 uvm_vnp_setsize(vp, vap->va_size);
663 tsize = np->n_size;
664 np->n_size = vap->va_size;
665 if (vap->va_size == 0)
666 error = nfs_vinvalbuf(vp, 0,
667 ap->a_cred, ap->a_l, 1);
668 else
669 error = nfs_vinvalbuf(vp, V_SAVE,
670 ap->a_cred, ap->a_l, 1);
671 if (error) {
672 uvm_vnp_setsize(vp, tsize);
673 genfs_node_unlock(vp);
674 return (error);
675 }
676 np->n_vattr->va_size = vap->va_size;
677 }
678 } else {
679 /*
680 * flush files before setattr because a later write of
681 * cached data might change timestamps or reset sugid bits
682 */
683 if ((vap->va_mtime.tv_sec != VNOVAL ||
684 vap->va_atime.tv_sec != VNOVAL ||
685 vap->va_mode != VNOVAL) &&
686 vp->v_type == VREG &&
687 (error = nfs_vinvalbuf(vp, V_SAVE, ap->a_cred,
688 ap->a_l, 1)) == EINTR)
689 return (error);
690 }
691 error = nfs_setattrrpc(vp, vap, ap->a_cred, ap->a_l);
692 if (vap->va_size != VNOVAL) {
693 if (error) {
694 np->n_size = np->n_vattr->va_size = tsize;
695 uvm_vnp_setsize(vp, np->n_size);
696 }
697 genfs_node_unlock(vp);
698 }
699 VN_KNOTE(vp, NOTE_ATTRIB);
700 return (error);
701 }
702
703 /*
704 * Do an nfs setattr rpc.
705 */
706 int
707 nfs_setattrrpc(vp, vap, cred, l)
708 struct vnode *vp;
709 struct vattr *vap;
710 kauth_cred_t cred;
711 struct lwp *l;
712 {
713 struct nfsv2_sattr *sp;
714 char *cp;
715 int32_t t1, t2;
716 char *bpos, *dpos;
717 u_int32_t *tl;
718 int error = 0;
719 struct mbuf *mreq, *mrep, *md, *mb;
720 const int v3 = NFS_ISV3(vp);
721 struct nfsnode *np = VTONFS(vp);
722 #ifndef NFS_V2_ONLY
723 int wccflag = NFSV3_WCCRATTR;
724 char *cp2;
725 #endif
726
727 nfsstats.rpccnt[NFSPROC_SETATTR]++;
728 nfsm_reqhead(np, NFSPROC_SETATTR, NFSX_FH(v3) + NFSX_SATTR(v3));
729 nfsm_fhtom(np, v3);
730 #ifndef NFS_V2_ONLY
731 if (v3) {
732 nfsm_v3attrbuild(vap, true);
733 nfsm_build(tl, u_int32_t *, NFSX_UNSIGNED);
734 *tl = nfs_false;
735 } else {
736 #endif
737 nfsm_build(sp, struct nfsv2_sattr *, NFSX_V2SATTR);
738 if (vap->va_mode == (mode_t)VNOVAL)
739 sp->sa_mode = nfs_xdrneg1;
740 else
741 sp->sa_mode = vtonfsv2_mode(vp->v_type, vap->va_mode);
742 if (vap->va_uid == (uid_t)VNOVAL)
743 sp->sa_uid = nfs_xdrneg1;
744 else
745 sp->sa_uid = txdr_unsigned(vap->va_uid);
746 if (vap->va_gid == (gid_t)VNOVAL)
747 sp->sa_gid = nfs_xdrneg1;
748 else
749 sp->sa_gid = txdr_unsigned(vap->va_gid);
750 sp->sa_size = txdr_unsigned(vap->va_size);
751 txdr_nfsv2time(&vap->va_atime, &sp->sa_atime);
752 txdr_nfsv2time(&vap->va_mtime, &sp->sa_mtime);
753 #ifndef NFS_V2_ONLY
754 }
755 #endif
756 nfsm_request(np, NFSPROC_SETATTR, l, cred);
757 #ifndef NFS_V2_ONLY
758 if (v3) {
759 nfsm_wcc_data(vp, wccflag, NAC_NOTRUNC, false);
760 } else
761 #endif
762 nfsm_loadattr(vp, (struct vattr *)0, NAC_NOTRUNC);
763 nfsm_reqdone;
764 return (error);
765 }
766
767 /*
768 * nfs lookup call, one step at a time...
769 * First look in cache
770 * If not found, unlock the directory nfsnode and do the rpc
771 *
772 * This code is full of lock/unlock statements and checks, because
773 * we continue after cache_lookup has finished (we need to check
774 * with the attr cache and do an rpc if it has timed out). This means
775 * that the locking effects of cache_lookup have to be taken into
776 * account.
777 */
778 int
779 nfs_lookup(v)
780 void *v;
781 {
782 struct vop_lookup_args /* {
783 struct vnodeop_desc *a_desc;
784 struct vnode *a_dvp;
785 struct vnode **a_vpp;
786 struct componentname *a_cnp;
787 } */ *ap = v;
788 struct componentname *cnp = ap->a_cnp;
789 struct vnode *dvp = ap->a_dvp;
790 struct vnode **vpp = ap->a_vpp;
791 int flags;
792 struct vnode *newvp;
793 u_int32_t *tl;
794 char *cp;
795 int32_t t1, t2;
796 char *bpos, *dpos, *cp2;
797 struct mbuf *mreq, *mrep, *md, *mb;
798 long len;
799 nfsfh_t *fhp;
800 struct nfsnode *np;
801 int error = 0, attrflag, fhsize;
802 const int v3 = NFS_ISV3(dvp);
803
804 flags = cnp->cn_flags;
805
806 *vpp = NULLVP;
807 newvp = NULLVP;
808 if ((flags & ISLASTCN) && (dvp->v_mount->mnt_flag & MNT_RDONLY) &&
809 (cnp->cn_nameiop == DELETE || cnp->cn_nameiop == RENAME))
810 return (EROFS);
811 if (dvp->v_type != VDIR)
812 return (ENOTDIR);
813
814 /*
815 * RFC1813(nfsv3) 3.2 says clients should handle "." by themselves.
816 */
817 if (cnp->cn_namelen == 1 && cnp->cn_nameptr[0] == '.') {
818 error = VOP_ACCESS(dvp, VEXEC, cnp->cn_cred, cnp->cn_lwp);
819 if (error)
820 return error;
821 if (cnp->cn_nameiop == RENAME && (flags & ISLASTCN))
822 return EISDIR;
823 VREF(dvp);
824 *vpp = dvp;
825 if (cnp->cn_nameiop != LOOKUP && (flags & ISLASTCN))
826 cnp->cn_flags |= SAVENAME;
827 return 0;
828 }
829
830 np = VTONFS(dvp);
831
832 /*
833 * Before tediously performing a linear scan of the directory,
834 * check the name cache to see if the directory/name pair
835 * we are looking for is known already.
836 * If the directory/name pair is found in the name cache,
837 * we have to ensure the directory has not changed from
838 * the time the cache entry has been created. If it has,
839 * the cache entry has to be ignored.
840 */
841 error = cache_lookup_raw(dvp, vpp, cnp);
842 KASSERT(dvp != *vpp);
843 if (error >= 0) {
844 struct vattr vattr;
845 int err2;
846
847 if (error && error != ENOENT) {
848 *vpp = NULLVP;
849 return error;
850 }
851
852 err2 = VOP_ACCESS(dvp, VEXEC, cnp->cn_cred, cnp->cn_lwp);
853 if (err2 != 0) {
854 if (error == 0)
855 vrele(*vpp);
856 *vpp = NULLVP;
857 return err2;
858 }
859
860 if (VOP_GETATTR(dvp, &vattr, cnp->cn_cred,
861 cnp->cn_lwp) || timespeccmp(&vattr.va_mtime,
862 &VTONFS(dvp)->n_nctime, !=)) {
863 if (error == 0) {
864 vrele(*vpp);
865 *vpp = NULLVP;
866 }
867 cache_purge1(dvp, NULL, PURGE_CHILDREN);
868 timespecclear(&np->n_nctime);
869 goto dorpc;
870 }
871
872 if (error == ENOENT) {
873 goto noentry;
874 }
875
876 newvp = *vpp;
877 if (!VOP_GETATTR(newvp, &vattr, cnp->cn_cred, cnp->cn_lwp)
878 && vattr.va_ctime.tv_sec == VTONFS(newvp)->n_ctime) {
879 nfsstats.lookupcache_hits++;
880 if ((flags & ISDOTDOT) != 0) {
881 VOP_UNLOCK(dvp, 0);
882 }
883 error = vn_lock(newvp, LK_EXCLUSIVE);
884 if ((flags & ISDOTDOT) != 0) {
885 vn_lock(dvp, LK_EXCLUSIVE | LK_RETRY);
886 }
887 if (error) {
888 /* newvp has been revoked. */
889 vrele(newvp);
890 *vpp = NULL;
891 return error;
892 }
893 if (cnp->cn_nameiop != LOOKUP && (flags & ISLASTCN))
894 cnp->cn_flags |= SAVENAME;
895 KASSERT(newvp->v_type != VNON);
896 return (0);
897 }
898 cache_purge1(newvp, NULL, PURGE_PARENTS);
899 vrele(newvp);
900 *vpp = NULLVP;
901 }
902 dorpc:
903 #if 0
904 /*
905 * because nfsv3 has the same CREATE semantics as ours,
906 * we don't have to perform LOOKUPs beforehand.
907 *
908 * XXX ideally we can do the same for nfsv2 in the case of !O_EXCL.
909 * XXX although we have no way to know if O_EXCL is requested or not.
910 */
911
912 if (v3 && cnp->cn_nameiop == CREATE &&
913 (flags & (ISLASTCN|ISDOTDOT)) == ISLASTCN &&
914 (dvp->v_mount->mnt_flag & MNT_RDONLY) == 0) {
915 cnp->cn_flags |= SAVENAME;
916 return (EJUSTRETURN);
917 }
918 #endif /* 0 */
919
920 error = 0;
921 newvp = NULLVP;
922 nfsstats.lookupcache_misses++;
923 nfsstats.rpccnt[NFSPROC_LOOKUP]++;
924 len = cnp->cn_namelen;
925 nfsm_reqhead(np, NFSPROC_LOOKUP,
926 NFSX_FH(v3) + NFSX_UNSIGNED + nfsm_rndup(len));
927 nfsm_fhtom(np, v3);
928 nfsm_strtom(cnp->cn_nameptr, len, NFS_MAXNAMLEN);
929 nfsm_request(np, NFSPROC_LOOKUP, cnp->cn_lwp, cnp->cn_cred);
930 if (error) {
931 nfsm_postop_attr(dvp, attrflag, 0);
932 m_freem(mrep);
933 goto nfsmout;
934 }
935 nfsm_getfh(fhp, fhsize, v3);
936
937 /*
938 * Handle RENAME case...
939 */
940 if (cnp->cn_nameiop == RENAME && (flags & ISLASTCN)) {
941 if (NFS_CMPFH(np, fhp, fhsize)) {
942 m_freem(mrep);
943 return (EISDIR);
944 }
945 error = nfs_nget(dvp->v_mount, fhp, fhsize, &np);
946 if (error) {
947 m_freem(mrep);
948 return error;
949 }
950 newvp = NFSTOV(np);
951 #ifndef NFS_V2_ONLY
952 if (v3) {
953 nfsm_postop_attr(newvp, attrflag, 0);
954 nfsm_postop_attr(dvp, attrflag, 0);
955 } else
956 #endif
957 nfsm_loadattr(newvp, (struct vattr *)0, 0);
958 *vpp = newvp;
959 m_freem(mrep);
960 cnp->cn_flags |= SAVENAME;
961 goto validate;
962 }
963
964 /*
965 * The postop attr handling is duplicated for each if case,
966 * because it should be done while dvp is locked (unlocking
967 * dvp is different for each case).
968 */
969
970 if (NFS_CMPFH(np, fhp, fhsize)) {
971 /*
972 * "." lookup
973 */
974 VREF(dvp);
975 newvp = dvp;
976 #ifndef NFS_V2_ONLY
977 if (v3) {
978 nfsm_postop_attr(newvp, attrflag, 0);
979 nfsm_postop_attr(dvp, attrflag, 0);
980 } else
981 #endif
982 nfsm_loadattr(newvp, (struct vattr *)0, 0);
983 } else if (flags & ISDOTDOT) {
984 /*
985 * ".." lookup
986 */
987 VOP_UNLOCK(dvp, 0);
988 error = nfs_nget(dvp->v_mount, fhp, fhsize, &np);
989 vn_lock(dvp, LK_EXCLUSIVE | LK_RETRY);
990 if (error) {
991 m_freem(mrep);
992 return error;
993 }
994 newvp = NFSTOV(np);
995
996 #ifndef NFS_V2_ONLY
997 if (v3) {
998 nfsm_postop_attr(newvp, attrflag, 0);
999 nfsm_postop_attr(dvp, attrflag, 0);
1000 } else
1001 #endif
1002 nfsm_loadattr(newvp, (struct vattr *)0, 0);
1003 } else {
1004 /*
1005 * Other lookups.
1006 */
1007 error = nfs_nget(dvp->v_mount, fhp, fhsize, &np);
1008 if (error) {
1009 m_freem(mrep);
1010 return error;
1011 }
1012 newvp = NFSTOV(np);
1013 #ifndef NFS_V2_ONLY
1014 if (v3) {
1015 nfsm_postop_attr(newvp, attrflag, 0);
1016 nfsm_postop_attr(dvp, attrflag, 0);
1017 } else
1018 #endif
1019 nfsm_loadattr(newvp, (struct vattr *)0, 0);
1020 }
1021 if (cnp->cn_nameiop != LOOKUP && (flags & ISLASTCN))
1022 cnp->cn_flags |= SAVENAME;
1023 if ((cnp->cn_flags & MAKEENTRY) &&
1024 (cnp->cn_nameiop != DELETE || !(flags & ISLASTCN))) {
1025 nfs_cache_enter(dvp, newvp, cnp);
1026 }
1027 *vpp = newvp;
1028 nfsm_reqdone;
1029 if (error) {
1030 /*
1031 * We get here only because of errors returned by
1032 * the RPC. Otherwise we'll have returned above
1033 * (the nfsm_* macros will jump to nfsm_reqdone
1034 * on error).
1035 */
1036 if (error == ENOENT && (cnp->cn_flags & MAKEENTRY) &&
1037 cnp->cn_nameiop != CREATE) {
1038 nfs_cache_enter(dvp, NULL, cnp);
1039 }
1040 if (newvp != NULLVP) {
1041 if (newvp == dvp) {
1042 vrele(newvp);
1043 } else {
1044 vput(newvp);
1045 }
1046 }
1047 noentry:
1048 if ((cnp->cn_nameiop == CREATE || cnp->cn_nameiop == RENAME) &&
1049 (flags & ISLASTCN) && error == ENOENT) {
1050 if (dvp->v_mount->mnt_flag & MNT_RDONLY) {
1051 error = EROFS;
1052 } else {
1053 error = EJUSTRETURN;
1054 cnp->cn_flags |= SAVENAME;
1055 }
1056 }
1057 *vpp = NULL;
1058 return error;
1059 }
1060
1061 validate:
1062 /*
1063 * make sure we have valid type and size.
1064 */
1065
1066 newvp = *vpp;
1067 if (newvp->v_type == VNON) {
1068 struct vattr vattr; /* dummy */
1069
1070 KASSERT(VTONFS(newvp)->n_attrstamp == 0);
1071 error = VOP_GETATTR(newvp, &vattr, cnp->cn_cred, cnp->cn_lwp);
1072 if (error) {
1073 vput(newvp);
1074 *vpp = NULL;
1075 }
1076 }
1077
1078 return error;
1079 }
1080
1081 /*
1082 * nfs read call.
1083 * Just call nfs_bioread() to do the work.
1084 */
1085 int
1086 nfs_read(v)
1087 void *v;
1088 {
1089 struct vop_read_args /* {
1090 struct vnode *a_vp;
1091 struct uio *a_uio;
1092 int a_ioflag;
1093 kauth_cred_t a_cred;
1094 } */ *ap = v;
1095 struct vnode *vp = ap->a_vp;
1096
1097 if (vp->v_type != VREG)
1098 return EISDIR;
1099 return (nfs_bioread(vp, ap->a_uio, ap->a_ioflag, ap->a_cred, 0));
1100 }
1101
1102 /*
1103 * nfs readlink call
1104 */
1105 int
1106 nfs_readlink(v)
1107 void *v;
1108 {
1109 struct vop_readlink_args /* {
1110 struct vnode *a_vp;
1111 struct uio *a_uio;
1112 kauth_cred_t a_cred;
1113 } */ *ap = v;
1114 struct vnode *vp = ap->a_vp;
1115 struct nfsnode *np = VTONFS(vp);
1116
1117 if (vp->v_type != VLNK)
1118 return (EPERM);
1119
1120 if (np->n_rcred != NULL) {
1121 kauth_cred_free(np->n_rcred);
1122 }
1123 np->n_rcred = ap->a_cred;
1124 kauth_cred_hold(np->n_rcred);
1125
1126 return (nfs_bioread(vp, ap->a_uio, 0, ap->a_cred, 0));
1127 }
1128
1129 /*
1130 * Do a readlink rpc.
1131 * Called by nfs_doio() from below the buffer cache.
1132 */
1133 int
1134 nfs_readlinkrpc(vp, uiop, cred)
1135 struct vnode *vp;
1136 struct uio *uiop;
1137 kauth_cred_t cred;
1138 {
1139 u_int32_t *tl;
1140 char *cp;
1141 int32_t t1, t2;
1142 char *bpos, *dpos, *cp2;
1143 int error = 0;
1144 uint32_t len;
1145 struct mbuf *mreq, *mrep, *md, *mb;
1146 const int v3 = NFS_ISV3(vp);
1147 struct nfsnode *np = VTONFS(vp);
1148 #ifndef NFS_V2_ONLY
1149 int attrflag;
1150 #endif
1151
1152 nfsstats.rpccnt[NFSPROC_READLINK]++;
1153 nfsm_reqhead(np, NFSPROC_READLINK, NFSX_FH(v3));
1154 nfsm_fhtom(np, v3);
1155 nfsm_request(np, NFSPROC_READLINK, curlwp, cred);
1156 #ifndef NFS_V2_ONLY
1157 if (v3)
1158 nfsm_postop_attr(vp, attrflag, 0);
1159 #endif
1160 if (!error) {
1161 #ifndef NFS_V2_ONLY
1162 if (v3) {
1163 nfsm_dissect(tl, uint32_t *, NFSX_UNSIGNED);
1164 len = fxdr_unsigned(uint32_t, *tl);
1165 if (len > MAXPATHLEN) {
1166 /*
1167 * this pathname is too long for us.
1168 */
1169 m_freem(mrep);
1170 /* Solaris returns EINVAL. should we follow? */
1171 error = ENAMETOOLONG;
1172 goto nfsmout;
1173 }
1174 } else
1175 #endif
1176 {
1177 nfsm_strsiz(len, NFS_MAXPATHLEN);
1178 }
1179 nfsm_mtouio(uiop, len);
1180 }
1181 nfsm_reqdone;
1182 return (error);
1183 }
1184
1185 /*
1186 * nfs read rpc call
1187 * Ditto above
1188 */
1189 int
1190 nfs_readrpc(vp, uiop)
1191 struct vnode *vp;
1192 struct uio *uiop;
1193 {
1194 u_int32_t *tl;
1195 char *cp;
1196 int32_t t1, t2;
1197 char *bpos, *dpos, *cp2;
1198 struct mbuf *mreq, *mrep, *md, *mb;
1199 struct nfsmount *nmp;
1200 int error = 0, len, retlen, tsiz, eof, byte_count;
1201 const int v3 = NFS_ISV3(vp);
1202 struct nfsnode *np = VTONFS(vp);
1203 #ifndef NFS_V2_ONLY
1204 int attrflag;
1205 #endif
1206
1207 #ifndef nolint
1208 eof = 0;
1209 #endif
1210 nmp = VFSTONFS(vp->v_mount);
1211 tsiz = uiop->uio_resid;
1212 if (uiop->uio_offset + tsiz > nmp->nm_maxfilesize)
1213 return (EFBIG);
1214 iostat_busy(nmp->nm_stats);
1215 byte_count = 0; /* count bytes actually transferred */
1216 while (tsiz > 0) {
1217 nfsstats.rpccnt[NFSPROC_READ]++;
1218 len = (tsiz > nmp->nm_rsize) ? nmp->nm_rsize : tsiz;
1219 nfsm_reqhead(np, NFSPROC_READ, NFSX_FH(v3) + NFSX_UNSIGNED * 3);
1220 nfsm_fhtom(np, v3);
1221 nfsm_build(tl, u_int32_t *, NFSX_UNSIGNED * 3);
1222 #ifndef NFS_V2_ONLY
1223 if (v3) {
1224 txdr_hyper(uiop->uio_offset, tl);
1225 *(tl + 2) = txdr_unsigned(len);
1226 } else
1227 #endif
1228 {
1229 *tl++ = txdr_unsigned(uiop->uio_offset);
1230 *tl++ = txdr_unsigned(len);
1231 *tl = 0;
1232 }
1233 nfsm_request(np, NFSPROC_READ, curlwp, np->n_rcred);
1234 #ifndef NFS_V2_ONLY
1235 if (v3) {
1236 nfsm_postop_attr(vp, attrflag, NAC_NOTRUNC);
1237 if (error) {
1238 m_freem(mrep);
1239 goto nfsmout;
1240 }
1241 nfsm_dissect(tl, u_int32_t *, 2 * NFSX_UNSIGNED);
1242 eof = fxdr_unsigned(int, *(tl + 1));
1243 } else
1244 #endif
1245 nfsm_loadattr(vp, (struct vattr *)0, NAC_NOTRUNC);
1246 nfsm_strsiz(retlen, nmp->nm_rsize);
1247 nfsm_mtouio(uiop, retlen);
1248 m_freem(mrep);
1249 tsiz -= retlen;
1250 byte_count += retlen;
1251 #ifndef NFS_V2_ONLY
1252 if (v3) {
1253 if (eof || retlen == 0)
1254 tsiz = 0;
1255 } else
1256 #endif
1257 if (retlen < len)
1258 tsiz = 0;
1259 }
1260 nfsmout:
1261 iostat_unbusy(nmp->nm_stats, byte_count, 1);
1262 return (error);
1263 }
1264
1265 struct nfs_writerpc_context {
1266 kmutex_t nwc_lock;
1267 kcondvar_t nwc_cv;
1268 int nwc_mbufcount;
1269 };
1270
1271 /*
1272 * free mbuf used to refer protected pages while write rpc call.
1273 * called at splvm.
1274 */
1275 static void
1276 nfs_writerpc_extfree(struct mbuf *m, void *tbuf, size_t size, void *arg)
1277 {
1278 struct nfs_writerpc_context *ctx = arg;
1279
1280 KASSERT(m != NULL);
1281 KASSERT(ctx != NULL);
1282 pool_cache_put(&mbpool_cache, m);
1283 mutex_enter(&ctx->nwc_lock);
1284 if (--ctx->nwc_mbufcount == 0) {
1285 cv_signal(&ctx->nwc_cv);
1286 }
1287 mutex_exit(&ctx->nwc_lock);
1288 }
1289
1290 /*
1291 * nfs write call
1292 */
1293 int
1294 nfs_writerpc(vp, uiop, iomode, pageprotected, stalewriteverfp)
1295 struct vnode *vp;
1296 struct uio *uiop;
1297 int *iomode;
1298 bool pageprotected;
1299 bool *stalewriteverfp;
1300 {
1301 u_int32_t *tl;
1302 char *cp;
1303 int32_t t1, t2;
1304 char *bpos, *dpos;
1305 struct mbuf *mreq, *mrep, *md, *mb;
1306 struct nfsmount *nmp = VFSTONFS(vp->v_mount);
1307 int error = 0, len, tsiz, wccflag = NFSV3_WCCRATTR;
1308 const int v3 = NFS_ISV3(vp);
1309 int committed = NFSV3WRITE_FILESYNC;
1310 struct nfsnode *np = VTONFS(vp);
1311 struct nfs_writerpc_context ctx;
1312 int byte_count;
1313 struct lwp *l = NULL;
1314 size_t origresid;
1315 #ifndef NFS_V2_ONLY
1316 char *cp2;
1317 int rlen, commit;
1318 #endif
1319
1320 mutex_init(&ctx.nwc_lock, MUTEX_DRIVER, IPL_VM);
1321 cv_init(&ctx.nwc_cv, "nfsmblk");
1322 ctx.nwc_mbufcount = 1;
1323
1324 if (vp->v_mount->mnt_flag & MNT_RDONLY) {
1325 panic("writerpc readonly vp %p", vp);
1326 }
1327
1328 #ifdef DIAGNOSTIC
1329 if (uiop->uio_iovcnt != 1)
1330 panic("nfs: writerpc iovcnt > 1");
1331 #endif
1332 tsiz = uiop->uio_resid;
1333 if (uiop->uio_offset + tsiz > nmp->nm_maxfilesize)
1334 return (EFBIG);
1335 if (pageprotected) {
1336 l = curlwp;
1337 PHOLD(l);
1338 }
1339 retry:
1340 origresid = uiop->uio_resid;
1341 KASSERT(origresid == uiop->uio_iov->iov_len);
1342 iostat_busy(nmp->nm_stats);
1343 byte_count = 0; /* count of bytes actually written */
1344 while (tsiz > 0) {
1345 uint32_t datalen; /* data bytes need to be allocated in mbuf */
1346 uint32_t backup;
1347 bool stalewriteverf = false;
1348
1349 nfsstats.rpccnt[NFSPROC_WRITE]++;
1350 len = min(tsiz, nmp->nm_wsize);
1351 datalen = pageprotected ? 0 : nfsm_rndup(len);
1352 nfsm_reqhead(np, NFSPROC_WRITE,
1353 NFSX_FH(v3) + 5 * NFSX_UNSIGNED + datalen);
1354 nfsm_fhtom(np, v3);
1355 #ifndef NFS_V2_ONLY
1356 if (v3) {
1357 nfsm_build(tl, u_int32_t *, 5 * NFSX_UNSIGNED);
1358 txdr_hyper(uiop->uio_offset, tl);
1359 tl += 2;
1360 *tl++ = txdr_unsigned(len);
1361 *tl++ = txdr_unsigned(*iomode);
1362 *tl = txdr_unsigned(len);
1363 } else
1364 #endif
1365 {
1366 u_int32_t x;
1367
1368 nfsm_build(tl, u_int32_t *, 4 * NFSX_UNSIGNED);
1369 /* Set both "begin" and "current" to non-garbage. */
1370 x = txdr_unsigned((u_int32_t)uiop->uio_offset);
1371 *tl++ = x; /* "begin offset" */
1372 *tl++ = x; /* "current offset" */
1373 x = txdr_unsigned(len);
1374 *tl++ = x; /* total to this offset */
1375 *tl = x; /* size of this write */
1376
1377 }
1378 if (pageprotected) {
1379 /*
1380 * since we know pages can't be modified during i/o,
1381 * no need to copy them for us.
1382 */
1383 struct mbuf *m;
1384 struct iovec *iovp = uiop->uio_iov;
1385
1386 m = m_get(M_WAIT, MT_DATA);
1387 MCLAIM(m, &nfs_mowner);
1388 MEXTADD(m, iovp->iov_base, len, M_MBUF,
1389 nfs_writerpc_extfree, &ctx);
1390 m->m_flags |= M_EXT_ROMAP;
1391 m->m_len = len;
1392 mb->m_next = m;
1393 /*
1394 * no need to maintain mb and bpos here
1395 * because no one care them later.
1396 */
1397 #if 0
1398 mb = m;
1399 bpos = mtod(void *, mb) + mb->m_len;
1400 #endif
1401 UIO_ADVANCE(uiop, len);
1402 uiop->uio_offset += len;
1403 mutex_enter(&ctx.nwc_lock);
1404 ctx.nwc_mbufcount++;
1405 mutex_exit(&ctx.nwc_lock);
1406 nfs_zeropad(mb, 0, nfsm_padlen(len));
1407 } else {
1408 nfsm_uiotom(uiop, len);
1409 }
1410 nfsm_request(np, NFSPROC_WRITE, curlwp, np->n_wcred);
1411 #ifndef NFS_V2_ONLY
1412 if (v3) {
1413 wccflag = NFSV3_WCCCHK;
1414 nfsm_wcc_data(vp, wccflag, NAC_NOTRUNC, !error);
1415 if (!error) {
1416 nfsm_dissect(tl, u_int32_t *, 2 * NFSX_UNSIGNED
1417 + NFSX_V3WRITEVERF);
1418 rlen = fxdr_unsigned(int, *tl++);
1419 if (rlen == 0) {
1420 error = NFSERR_IO;
1421 m_freem(mrep);
1422 break;
1423 } else if (rlen < len) {
1424 backup = len - rlen;
1425 UIO_ADVANCE(uiop, -backup);
1426 uiop->uio_offset -= backup;
1427 len = rlen;
1428 }
1429 commit = fxdr_unsigned(int, *tl++);
1430
1431 /*
1432 * Return the lowest committment level
1433 * obtained by any of the RPCs.
1434 */
1435 if (committed == NFSV3WRITE_FILESYNC)
1436 committed = commit;
1437 else if (committed == NFSV3WRITE_DATASYNC &&
1438 commit == NFSV3WRITE_UNSTABLE)
1439 committed = commit;
1440 simple_lock(&nmp->nm_slock);
1441 if ((nmp->nm_iflag & NFSMNT_HASWRITEVERF) == 0){
1442 memcpy(nmp->nm_writeverf, tl,
1443 NFSX_V3WRITEVERF);
1444 nmp->nm_iflag |= NFSMNT_HASWRITEVERF;
1445 } else if ((nmp->nm_iflag &
1446 NFSMNT_STALEWRITEVERF) ||
1447 memcmp(tl, nmp->nm_writeverf,
1448 NFSX_V3WRITEVERF)) {
1449 memcpy(nmp->nm_writeverf, tl,
1450 NFSX_V3WRITEVERF);
1451 /*
1452 * note NFSMNT_STALEWRITEVERF
1453 * if we're the first thread to
1454 * notice it.
1455 */
1456 if ((nmp->nm_iflag &
1457 NFSMNT_STALEWRITEVERF) == 0) {
1458 stalewriteverf = true;
1459 nmp->nm_iflag |=
1460 NFSMNT_STALEWRITEVERF;
1461 }
1462 }
1463 simple_unlock(&nmp->nm_slock);
1464 }
1465 } else
1466 #endif
1467 nfsm_loadattr(vp, (struct vattr *)0, NAC_NOTRUNC);
1468 if (wccflag)
1469 VTONFS(vp)->n_mtime = VTONFS(vp)->n_vattr->va_mtime;
1470 m_freem(mrep);
1471 if (error)
1472 break;
1473 tsiz -= len;
1474 byte_count += len;
1475 if (stalewriteverf) {
1476 *stalewriteverfp = true;
1477 stalewriteverf = false;
1478 if (committed == NFSV3WRITE_UNSTABLE &&
1479 len != origresid) {
1480 /*
1481 * if our write requests weren't atomic but
1482 * unstable, datas in previous iterations
1483 * might have already been lost now.
1484 * then, we should resend them to nfsd.
1485 */
1486 backup = origresid - tsiz;
1487 UIO_ADVANCE(uiop, -backup);
1488 uiop->uio_offset -= backup;
1489 tsiz = origresid;
1490 goto retry;
1491 }
1492 }
1493 }
1494 nfsmout:
1495 iostat_unbusy(nmp->nm_stats, byte_count, 0);
1496 if (pageprotected) {
1497 /*
1498 * wait until mbufs go away.
1499 * retransmitted mbufs can survive longer than rpc requests
1500 * themselves.
1501 */
1502 mutex_enter(&ctx.nwc_lock);
1503 ctx.nwc_mbufcount--;
1504 while (ctx.nwc_mbufcount > 0) {
1505 cv_wait(&ctx.nwc_cv, &ctx.nwc_lock);
1506 }
1507 mutex_exit(&ctx.nwc_lock);
1508 PRELE(l);
1509 }
1510 *iomode = committed;
1511 if (error)
1512 uiop->uio_resid = tsiz;
1513 return (error);
1514 }
1515
1516 /*
1517 * nfs mknod rpc
1518 * For NFS v2 this is a kludge. Use a create rpc but with the IFMT bits of the
1519 * mode set to specify the file type and the size field for rdev.
1520 */
1521 int
1522 nfs_mknodrpc(dvp, vpp, cnp, vap)
1523 struct vnode *dvp;
1524 struct vnode **vpp;
1525 struct componentname *cnp;
1526 struct vattr *vap;
1527 {
1528 struct nfsv2_sattr *sp;
1529 u_int32_t *tl;
1530 char *cp;
1531 int32_t t1, t2;
1532 struct vnode *newvp = (struct vnode *)0;
1533 struct nfsnode *dnp, *np;
1534 char *cp2;
1535 char *bpos, *dpos;
1536 int error = 0, wccflag = NFSV3_WCCRATTR, gotvp = 0;
1537 struct mbuf *mreq, *mrep, *md, *mb;
1538 u_int32_t rdev;
1539 const int v3 = NFS_ISV3(dvp);
1540
1541 if (vap->va_type == VCHR || vap->va_type == VBLK)
1542 rdev = txdr_unsigned(vap->va_rdev);
1543 else if (vap->va_type == VFIFO || vap->va_type == VSOCK)
1544 rdev = nfs_xdrneg1;
1545 else {
1546 VOP_ABORTOP(dvp, cnp);
1547 vput(dvp);
1548 return (EOPNOTSUPP);
1549 }
1550 nfsstats.rpccnt[NFSPROC_MKNOD]++;
1551 dnp = VTONFS(dvp);
1552 nfsm_reqhead(dnp, NFSPROC_MKNOD, NFSX_FH(v3) + 4 * NFSX_UNSIGNED +
1553 + nfsm_rndup(cnp->cn_namelen) + NFSX_SATTR(v3));
1554 nfsm_fhtom(dnp, v3);
1555 nfsm_strtom(cnp->cn_nameptr, cnp->cn_namelen, NFS_MAXNAMLEN);
1556 #ifndef NFS_V2_ONLY
1557 if (v3) {
1558 nfsm_build(tl, u_int32_t *, NFSX_UNSIGNED);
1559 *tl++ = vtonfsv3_type(vap->va_type);
1560 nfsm_v3attrbuild(vap, false);
1561 if (vap->va_type == VCHR || vap->va_type == VBLK) {
1562 nfsm_build(tl, u_int32_t *, 2 * NFSX_UNSIGNED);
1563 *tl++ = txdr_unsigned(major(vap->va_rdev));
1564 *tl = txdr_unsigned(minor(vap->va_rdev));
1565 }
1566 } else
1567 #endif
1568 {
1569 nfsm_build(sp, struct nfsv2_sattr *, NFSX_V2SATTR);
1570 sp->sa_mode = vtonfsv2_mode(vap->va_type, vap->va_mode);
1571 sp->sa_uid = nfs_xdrneg1;
1572 sp->sa_gid = nfs_xdrneg1;
1573 sp->sa_size = rdev;
1574 txdr_nfsv2time(&vap->va_atime, &sp->sa_atime);
1575 txdr_nfsv2time(&vap->va_mtime, &sp->sa_mtime);
1576 }
1577 nfsm_request(dnp, NFSPROC_MKNOD, cnp->cn_lwp, cnp->cn_cred);
1578 if (!error) {
1579 nfsm_mtofh(dvp, newvp, v3, gotvp);
1580 if (!gotvp) {
1581 error = nfs_lookitup(dvp, cnp->cn_nameptr,
1582 cnp->cn_namelen, cnp->cn_cred, cnp->cn_lwp, &np);
1583 if (!error)
1584 newvp = NFSTOV(np);
1585 }
1586 }
1587 #ifndef NFS_V2_ONLY
1588 if (v3)
1589 nfsm_wcc_data(dvp, wccflag, 0, !error);
1590 #endif
1591 nfsm_reqdone;
1592 if (error) {
1593 if (newvp)
1594 vput(newvp);
1595 } else {
1596 if (cnp->cn_flags & MAKEENTRY)
1597 nfs_cache_enter(dvp, newvp, cnp);
1598 *vpp = newvp;
1599 }
1600 PNBUF_PUT(cnp->cn_pnbuf);
1601 VTONFS(dvp)->n_flag |= NMODIFIED;
1602 if (!wccflag)
1603 NFS_INVALIDATE_ATTRCACHE(VTONFS(dvp));
1604 vput(dvp);
1605 return (error);
1606 }
1607
1608 /*
1609 * nfs mknod vop
1610 * just call nfs_mknodrpc() to do the work.
1611 */
1612 /* ARGSUSED */
1613 int
1614 nfs_mknod(v)
1615 void *v;
1616 {
1617 struct vop_mknod_args /* {
1618 struct vnode *a_dvp;
1619 struct vnode **a_vpp;
1620 struct componentname *a_cnp;
1621 struct vattr *a_vap;
1622 } */ *ap = v;
1623 struct vnode *dvp = ap->a_dvp;
1624 struct componentname *cnp = ap->a_cnp;
1625 int error;
1626
1627 error = nfs_mknodrpc(dvp, ap->a_vpp, cnp, ap->a_vap);
1628 VN_KNOTE(dvp, NOTE_WRITE);
1629 if (error == 0 || error == EEXIST)
1630 cache_purge1(dvp, cnp, 0);
1631 return (error);
1632 }
1633
1634 #ifndef NFS_V2_ONLY
1635 static u_long create_verf;
1636 #endif
1637 /*
1638 * nfs file create call
1639 */
1640 int
1641 nfs_create(v)
1642 void *v;
1643 {
1644 struct vop_create_args /* {
1645 struct vnode *a_dvp;
1646 struct vnode **a_vpp;
1647 struct componentname *a_cnp;
1648 struct vattr *a_vap;
1649 } */ *ap = v;
1650 struct vnode *dvp = ap->a_dvp;
1651 struct vattr *vap = ap->a_vap;
1652 struct componentname *cnp = ap->a_cnp;
1653 struct nfsv2_sattr *sp;
1654 u_int32_t *tl;
1655 char *cp;
1656 int32_t t1, t2;
1657 struct nfsnode *dnp, *np = (struct nfsnode *)0;
1658 struct vnode *newvp = (struct vnode *)0;
1659 char *bpos, *dpos, *cp2;
1660 int error, wccflag = NFSV3_WCCRATTR, gotvp = 0, fmode = 0;
1661 struct mbuf *mreq, *mrep, *md, *mb;
1662 const int v3 = NFS_ISV3(dvp);
1663
1664 /*
1665 * Oops, not for me..
1666 */
1667 if (vap->va_type == VSOCK)
1668 return (nfs_mknodrpc(dvp, ap->a_vpp, cnp, vap));
1669
1670 KASSERT(vap->va_type == VREG);
1671
1672 #ifdef VA_EXCLUSIVE
1673 if (vap->va_vaflags & VA_EXCLUSIVE)
1674 fmode |= O_EXCL;
1675 #endif
1676 again:
1677 error = 0;
1678 nfsstats.rpccnt[NFSPROC_CREATE]++;
1679 dnp = VTONFS(dvp);
1680 nfsm_reqhead(dnp, NFSPROC_CREATE, NFSX_FH(v3) + 2 * NFSX_UNSIGNED +
1681 nfsm_rndup(cnp->cn_namelen) + NFSX_SATTR(v3));
1682 nfsm_fhtom(dnp, v3);
1683 nfsm_strtom(cnp->cn_nameptr, cnp->cn_namelen, NFS_MAXNAMLEN);
1684 #ifndef NFS_V2_ONLY
1685 if (v3) {
1686 nfsm_build(tl, u_int32_t *, NFSX_UNSIGNED);
1687 if (fmode & O_EXCL) {
1688 *tl = txdr_unsigned(NFSV3CREATE_EXCLUSIVE);
1689 nfsm_build(tl, u_int32_t *, NFSX_V3CREATEVERF);
1690 #ifdef INET
1691 if (TAILQ_FIRST(&in_ifaddrhead))
1692 *tl++ = TAILQ_FIRST(&in_ifaddrhead)->
1693 ia_addr.sin_addr.s_addr;
1694 else
1695 *tl++ = create_verf;
1696 #else
1697 *tl++ = create_verf;
1698 #endif
1699 *tl = ++create_verf;
1700 } else {
1701 *tl = txdr_unsigned(NFSV3CREATE_UNCHECKED);
1702 nfsm_v3attrbuild(vap, false);
1703 }
1704 } else
1705 #endif
1706 {
1707 nfsm_build(sp, struct nfsv2_sattr *, NFSX_V2SATTR);
1708 sp->sa_mode = vtonfsv2_mode(vap->va_type, vap->va_mode);
1709 sp->sa_uid = nfs_xdrneg1;
1710 sp->sa_gid = nfs_xdrneg1;
1711 sp->sa_size = 0;
1712 txdr_nfsv2time(&vap->va_atime, &sp->sa_atime);
1713 txdr_nfsv2time(&vap->va_mtime, &sp->sa_mtime);
1714 }
1715 nfsm_request(dnp, NFSPROC_CREATE, cnp->cn_lwp, cnp->cn_cred);
1716 if (!error) {
1717 nfsm_mtofh(dvp, newvp, v3, gotvp);
1718 if (!gotvp) {
1719 error = nfs_lookitup(dvp, cnp->cn_nameptr,
1720 cnp->cn_namelen, cnp->cn_cred, cnp->cn_lwp, &np);
1721 if (!error)
1722 newvp = NFSTOV(np);
1723 }
1724 }
1725 #ifndef NFS_V2_ONLY
1726 if (v3)
1727 nfsm_wcc_data(dvp, wccflag, 0, !error);
1728 #endif
1729 nfsm_reqdone;
1730 if (error) {
1731 /*
1732 * nfs_request maps NFSERR_NOTSUPP to ENOTSUP.
1733 */
1734 if (v3 && (fmode & O_EXCL) && error == ENOTSUP) {
1735 fmode &= ~O_EXCL;
1736 goto again;
1737 }
1738 } else if (v3 && (fmode & O_EXCL)) {
1739 struct timespec ts;
1740
1741 getnanotime(&ts);
1742
1743 /*
1744 * make sure that we'll update timestamps as
1745 * most server implementations use them to store
1746 * the create verifier.
1747 *
1748 * XXX it's better to use TOSERVER always.
1749 */
1750
1751 if (vap->va_atime.tv_sec == VNOVAL)
1752 vap->va_atime = ts;
1753 if (vap->va_mtime.tv_sec == VNOVAL)
1754 vap->va_mtime = ts;
1755
1756 error = nfs_setattrrpc(newvp, vap, cnp->cn_cred, cnp->cn_lwp);
1757 }
1758 if (error == 0) {
1759 if (cnp->cn_flags & MAKEENTRY)
1760 nfs_cache_enter(dvp, newvp, cnp);
1761 else
1762 cache_purge1(dvp, cnp, 0);
1763 *ap->a_vpp = newvp;
1764 } else {
1765 if (newvp)
1766 vput(newvp);
1767 if (error == EEXIST)
1768 cache_purge1(dvp, cnp, 0);
1769 }
1770 PNBUF_PUT(cnp->cn_pnbuf);
1771 VTONFS(dvp)->n_flag |= NMODIFIED;
1772 if (!wccflag)
1773 NFS_INVALIDATE_ATTRCACHE(VTONFS(dvp));
1774 VN_KNOTE(ap->a_dvp, NOTE_WRITE);
1775 vput(dvp);
1776 return (error);
1777 }
1778
1779 /*
1780 * nfs file remove call
1781 * To try and make nfs semantics closer to ufs semantics, a file that has
1782 * other processes using the vnode is renamed instead of removed and then
1783 * removed later on the last close.
1784 * - If v_usecount > 1
1785 * If a rename is not already in the works
1786 * call nfs_sillyrename() to set it up
1787 * else
1788 * do the remove rpc
1789 */
1790 int
1791 nfs_remove(v)
1792 void *v;
1793 {
1794 struct vop_remove_args /* {
1795 struct vnodeop_desc *a_desc;
1796 struct vnode * a_dvp;
1797 struct vnode * a_vp;
1798 struct componentname * a_cnp;
1799 } */ *ap = v;
1800 struct vnode *vp = ap->a_vp;
1801 struct vnode *dvp = ap->a_dvp;
1802 struct componentname *cnp = ap->a_cnp;
1803 struct nfsnode *np = VTONFS(vp);
1804 int error = 0;
1805 struct vattr vattr;
1806
1807 #ifndef DIAGNOSTIC
1808 if ((cnp->cn_flags & HASBUF) == 0)
1809 panic("nfs_remove: no name");
1810 if (vp->v_usecount < 1)
1811 panic("nfs_remove: bad v_usecount");
1812 #endif
1813 if (vp->v_type == VDIR)
1814 error = EPERM;
1815 else if (vp->v_usecount == 1 || (np->n_sillyrename &&
1816 VOP_GETATTR(vp, &vattr, cnp->cn_cred, cnp->cn_lwp) == 0 &&
1817 vattr.va_nlink > 1)) {
1818 /*
1819 * Purge the name cache so that the chance of a lookup for
1820 * the name succeeding while the remove is in progress is
1821 * minimized. Without node locking it can still happen, such
1822 * that an I/O op returns ESTALE, but since you get this if
1823 * another host removes the file..
1824 */
1825 cache_purge(vp);
1826 /*
1827 * throw away biocache buffers, mainly to avoid
1828 * unnecessary delayed writes later.
1829 */
1830 error = nfs_vinvalbuf(vp, 0, cnp->cn_cred, cnp->cn_lwp, 1);
1831 /* Do the rpc */
1832 if (error != EINTR)
1833 error = nfs_removerpc(dvp, cnp->cn_nameptr,
1834 cnp->cn_namelen, cnp->cn_cred, cnp->cn_lwp);
1835 } else if (!np->n_sillyrename)
1836 error = nfs_sillyrename(dvp, vp, cnp, false);
1837 PNBUF_PUT(cnp->cn_pnbuf);
1838 if (!error && nfs_getattrcache(vp, &vattr) == 0 &&
1839 vattr.va_nlink == 1) {
1840 np->n_flag |= NREMOVED;
1841 }
1842 NFS_INVALIDATE_ATTRCACHE(np);
1843 VN_KNOTE(vp, NOTE_DELETE);
1844 VN_KNOTE(dvp, NOTE_WRITE);
1845 if (dvp == vp)
1846 vrele(vp);
1847 else
1848 vput(vp);
1849 vput(dvp);
1850 return (error);
1851 }
1852
1853 /*
1854 * nfs file remove rpc called from nfs_inactive
1855 */
1856 int
1857 nfs_removeit(sp)
1858 struct sillyrename *sp;
1859 {
1860
1861 return (nfs_removerpc(sp->s_dvp, sp->s_name, sp->s_namlen, sp->s_cred,
1862 (struct lwp *)0));
1863 }
1864
1865 /*
1866 * Nfs remove rpc, called from nfs_remove() and nfs_removeit().
1867 */
1868 int
1869 nfs_removerpc(dvp, name, namelen, cred, l)
1870 struct vnode *dvp;
1871 const char *name;
1872 int namelen;
1873 kauth_cred_t cred;
1874 struct lwp *l;
1875 {
1876 u_int32_t *tl;
1877 char *cp;
1878 #ifndef NFS_V2_ONLY
1879 int32_t t1;
1880 char *cp2;
1881 #endif
1882 int32_t t2;
1883 char *bpos, *dpos;
1884 int error = 0, wccflag = NFSV3_WCCRATTR;
1885 struct mbuf *mreq, *mrep, *md, *mb;
1886 const int v3 = NFS_ISV3(dvp);
1887 int rexmit = 0;
1888 struct nfsnode *dnp = VTONFS(dvp);
1889
1890 nfsstats.rpccnt[NFSPROC_REMOVE]++;
1891 nfsm_reqhead(dnp, NFSPROC_REMOVE,
1892 NFSX_FH(v3) + NFSX_UNSIGNED + nfsm_rndup(namelen));
1893 nfsm_fhtom(dnp, v3);
1894 nfsm_strtom(name, namelen, NFS_MAXNAMLEN);
1895 nfsm_request1(dnp, NFSPROC_REMOVE, l, cred, &rexmit);
1896 #ifndef NFS_V2_ONLY
1897 if (v3)
1898 nfsm_wcc_data(dvp, wccflag, 0, !error);
1899 #endif
1900 nfsm_reqdone;
1901 VTONFS(dvp)->n_flag |= NMODIFIED;
1902 if (!wccflag)
1903 NFS_INVALIDATE_ATTRCACHE(VTONFS(dvp));
1904 /*
1905 * Kludge City: If the first reply to the remove rpc is lost..
1906 * the reply to the retransmitted request will be ENOENT
1907 * since the file was in fact removed
1908 * Therefore, we cheat and return success.
1909 */
1910 if (rexmit && error == ENOENT)
1911 error = 0;
1912 return (error);
1913 }
1914
1915 /*
1916 * nfs file rename call
1917 */
1918 int
1919 nfs_rename(v)
1920 void *v;
1921 {
1922 struct vop_rename_args /* {
1923 struct vnode *a_fdvp;
1924 struct vnode *a_fvp;
1925 struct componentname *a_fcnp;
1926 struct vnode *a_tdvp;
1927 struct vnode *a_tvp;
1928 struct componentname *a_tcnp;
1929 } */ *ap = v;
1930 struct vnode *fvp = ap->a_fvp;
1931 struct vnode *tvp = ap->a_tvp;
1932 struct vnode *fdvp = ap->a_fdvp;
1933 struct vnode *tdvp = ap->a_tdvp;
1934 struct componentname *tcnp = ap->a_tcnp;
1935 struct componentname *fcnp = ap->a_fcnp;
1936 int error;
1937
1938 #ifndef DIAGNOSTIC
1939 if ((tcnp->cn_flags & HASBUF) == 0 ||
1940 (fcnp->cn_flags & HASBUF) == 0)
1941 panic("nfs_rename: no name");
1942 #endif
1943 /* Check for cross-device rename */
1944 if ((fvp->v_mount != tdvp->v_mount) ||
1945 (tvp && (fvp->v_mount != tvp->v_mount))) {
1946 error = EXDEV;
1947 goto out;
1948 }
1949
1950 /*
1951 * If the tvp exists and is in use, sillyrename it before doing the
1952 * rename of the new file over it.
1953 *
1954 * Have sillyrename use link instead of rename if possible,
1955 * so that we don't lose the file if the rename fails, and so
1956 * that there's no window when the "to" file doesn't exist.
1957 */
1958 if (tvp && tvp->v_usecount > 1 && !VTONFS(tvp)->n_sillyrename &&
1959 tvp->v_type != VDIR && !nfs_sillyrename(tdvp, tvp, tcnp, true)) {
1960 VN_KNOTE(tvp, NOTE_DELETE);
1961 vput(tvp);
1962 tvp = NULL;
1963 }
1964
1965 error = nfs_renamerpc(fdvp, fcnp->cn_nameptr, fcnp->cn_namelen,
1966 tdvp, tcnp->cn_nameptr, tcnp->cn_namelen, tcnp->cn_cred,
1967 tcnp->cn_lwp);
1968
1969 VN_KNOTE(fdvp, NOTE_WRITE);
1970 VN_KNOTE(tdvp, NOTE_WRITE);
1971 if (error == 0 || error == EEXIST) {
1972 if (fvp->v_type == VDIR)
1973 cache_purge(fvp);
1974 else
1975 cache_purge1(fdvp, fcnp, 0);
1976 if (tvp != NULL && tvp->v_type == VDIR)
1977 cache_purge(tvp);
1978 else
1979 cache_purge1(tdvp, tcnp, 0);
1980 }
1981 out:
1982 if (tdvp == tvp)
1983 vrele(tdvp);
1984 else
1985 vput(tdvp);
1986 if (tvp)
1987 vput(tvp);
1988 vrele(fdvp);
1989 vrele(fvp);
1990 return (error);
1991 }
1992
1993 /*
1994 * nfs file rename rpc called from nfs_remove() above
1995 */
1996 int
1997 nfs_renameit(sdvp, scnp, sp)
1998 struct vnode *sdvp;
1999 struct componentname *scnp;
2000 struct sillyrename *sp;
2001 {
2002 return (nfs_renamerpc(sdvp, scnp->cn_nameptr, scnp->cn_namelen,
2003 sdvp, sp->s_name, sp->s_namlen, scnp->cn_cred, scnp->cn_lwp));
2004 }
2005
2006 /*
2007 * Do an nfs rename rpc. Called from nfs_rename() and nfs_renameit().
2008 */
2009 int
2010 nfs_renamerpc(fdvp, fnameptr, fnamelen, tdvp, tnameptr, tnamelen, cred, l)
2011 struct vnode *fdvp;
2012 const char *fnameptr;
2013 int fnamelen;
2014 struct vnode *tdvp;
2015 const char *tnameptr;
2016 int tnamelen;
2017 kauth_cred_t cred;
2018 struct lwp *l;
2019 {
2020 u_int32_t *tl;
2021 char *cp;
2022 #ifndef NFS_V2_ONLY
2023 int32_t t1;
2024 char *cp2;
2025 #endif
2026 int32_t t2;
2027 char *bpos, *dpos;
2028 int error = 0, fwccflag = NFSV3_WCCRATTR, twccflag = NFSV3_WCCRATTR;
2029 struct mbuf *mreq, *mrep, *md, *mb;
2030 const int v3 = NFS_ISV3(fdvp);
2031 int rexmit = 0;
2032 struct nfsnode *fdnp = VTONFS(fdvp);
2033
2034 nfsstats.rpccnt[NFSPROC_RENAME]++;
2035 nfsm_reqhead(fdnp, NFSPROC_RENAME,
2036 (NFSX_FH(v3) + NFSX_UNSIGNED)*2 + nfsm_rndup(fnamelen) +
2037 nfsm_rndup(tnamelen));
2038 nfsm_fhtom(fdnp, v3);
2039 nfsm_strtom(fnameptr, fnamelen, NFS_MAXNAMLEN);
2040 nfsm_fhtom(VTONFS(tdvp), v3);
2041 nfsm_strtom(tnameptr, tnamelen, NFS_MAXNAMLEN);
2042 nfsm_request1(fdnp, NFSPROC_RENAME, l, cred, &rexmit);
2043 #ifndef NFS_V2_ONLY
2044 if (v3) {
2045 nfsm_wcc_data(fdvp, fwccflag, 0, !error);
2046 nfsm_wcc_data(tdvp, twccflag, 0, !error);
2047 }
2048 #endif
2049 nfsm_reqdone;
2050 VTONFS(fdvp)->n_flag |= NMODIFIED;
2051 VTONFS(tdvp)->n_flag |= NMODIFIED;
2052 if (!fwccflag)
2053 NFS_INVALIDATE_ATTRCACHE(VTONFS(fdvp));
2054 if (!twccflag)
2055 NFS_INVALIDATE_ATTRCACHE(VTONFS(tdvp));
2056 /*
2057 * Kludge: Map ENOENT => 0 assuming that it is a reply to a retry.
2058 */
2059 if (rexmit && error == ENOENT)
2060 error = 0;
2061 return (error);
2062 }
2063
2064 /*
2065 * NFS link RPC, called from nfs_link.
2066 * Assumes dvp and vp locked, and leaves them that way.
2067 */
2068
2069 static int
2070 nfs_linkrpc(struct vnode *dvp, struct vnode *vp, const char *name,
2071 size_t namelen, kauth_cred_t cred, struct lwp *l)
2072 {
2073 u_int32_t *tl;
2074 char *cp;
2075 #ifndef NFS_V2_ONLY
2076 int32_t t1;
2077 char *cp2;
2078 #endif
2079 int32_t t2;
2080 char *bpos, *dpos;
2081 int error = 0, wccflag = NFSV3_WCCRATTR, attrflag = 0;
2082 struct mbuf *mreq, *mrep, *md, *mb;
2083 const int v3 = NFS_ISV3(dvp);
2084 int rexmit = 0;
2085 struct nfsnode *np = VTONFS(vp);
2086
2087 nfsstats.rpccnt[NFSPROC_LINK]++;
2088 nfsm_reqhead(np, NFSPROC_LINK,
2089 NFSX_FH(v3)*2 + NFSX_UNSIGNED + nfsm_rndup(namelen));
2090 nfsm_fhtom(np, v3);
2091 nfsm_fhtom(VTONFS(dvp), v3);
2092 nfsm_strtom(name, namelen, NFS_MAXNAMLEN);
2093 nfsm_request1(np, NFSPROC_LINK, l, cred, &rexmit);
2094 #ifndef NFS_V2_ONLY
2095 if (v3) {
2096 nfsm_postop_attr(vp, attrflag, 0);
2097 nfsm_wcc_data(dvp, wccflag, 0, !error);
2098 }
2099 #endif
2100 nfsm_reqdone;
2101
2102 VTONFS(dvp)->n_flag |= NMODIFIED;
2103 if (!attrflag)
2104 NFS_INVALIDATE_ATTRCACHE(VTONFS(vp));
2105 if (!wccflag)
2106 NFS_INVALIDATE_ATTRCACHE(VTONFS(dvp));
2107
2108 /*
2109 * Kludge: Map EEXIST => 0 assuming that it is a reply to a retry.
2110 */
2111 if (rexmit && error == EEXIST)
2112 error = 0;
2113
2114 return error;
2115 }
2116
2117 /*
2118 * nfs hard link create call
2119 */
2120 int
2121 nfs_link(v)
2122 void *v;
2123 {
2124 struct vop_link_args /* {
2125 struct vnode *a_dvp;
2126 struct vnode *a_vp;
2127 struct componentname *a_cnp;
2128 } */ *ap = v;
2129 struct vnode *vp = ap->a_vp;
2130 struct vnode *dvp = ap->a_dvp;
2131 struct componentname *cnp = ap->a_cnp;
2132 int error = 0;
2133
2134 if (dvp->v_mount != vp->v_mount) {
2135 VOP_ABORTOP(dvp, cnp);
2136 vput(dvp);
2137 return (EXDEV);
2138 }
2139 if (dvp != vp) {
2140 error = vn_lock(vp, LK_EXCLUSIVE);
2141 if (error != 0) {
2142 VOP_ABORTOP(dvp, cnp);
2143 vput(dvp);
2144 return error;
2145 }
2146 }
2147
2148 /*
2149 * Push all writes to the server, so that the attribute cache
2150 * doesn't get "out of sync" with the server.
2151 * XXX There should be a better way!
2152 */
2153 VOP_FSYNC(vp, cnp->cn_cred, FSYNC_WAIT, 0, 0, cnp->cn_lwp);
2154
2155 error = nfs_linkrpc(dvp, vp, cnp->cn_nameptr, cnp->cn_namelen,
2156 cnp->cn_cred, cnp->cn_lwp);
2157
2158 if (error == 0)
2159 cache_purge1(dvp, cnp, 0);
2160 PNBUF_PUT(cnp->cn_pnbuf);
2161 if (dvp != vp)
2162 VOP_UNLOCK(vp, 0);
2163 VN_KNOTE(vp, NOTE_LINK);
2164 VN_KNOTE(dvp, NOTE_WRITE);
2165 vput(dvp);
2166 return (error);
2167 }
2168
2169 /*
2170 * nfs symbolic link create call
2171 */
2172 int
2173 nfs_symlink(v)
2174 void *v;
2175 {
2176 struct vop_symlink_args /* {
2177 struct vnode *a_dvp;
2178 struct vnode **a_vpp;
2179 struct componentname *a_cnp;
2180 struct vattr *a_vap;
2181 char *a_target;
2182 } */ *ap = v;
2183 struct vnode *dvp = ap->a_dvp;
2184 struct vattr *vap = ap->a_vap;
2185 struct componentname *cnp = ap->a_cnp;
2186 struct nfsv2_sattr *sp;
2187 u_int32_t *tl;
2188 char *cp;
2189 int32_t t1, t2;
2190 char *bpos, *dpos, *cp2;
2191 int slen, error = 0, wccflag = NFSV3_WCCRATTR, gotvp;
2192 struct mbuf *mreq, *mrep, *md, *mb;
2193 struct vnode *newvp = (struct vnode *)0;
2194 const int v3 = NFS_ISV3(dvp);
2195 int rexmit = 0;
2196 struct nfsnode *dnp = VTONFS(dvp);
2197
2198 *ap->a_vpp = NULL;
2199 nfsstats.rpccnt[NFSPROC_SYMLINK]++;
2200 slen = strlen(ap->a_target);
2201 nfsm_reqhead(dnp, NFSPROC_SYMLINK, NFSX_FH(v3) + 2*NFSX_UNSIGNED +
2202 nfsm_rndup(cnp->cn_namelen) + nfsm_rndup(slen) + NFSX_SATTR(v3));
2203 nfsm_fhtom(dnp, v3);
2204 nfsm_strtom(cnp->cn_nameptr, cnp->cn_namelen, NFS_MAXNAMLEN);
2205 #ifndef NFS_V2_ONlY
2206 if (v3)
2207 nfsm_v3attrbuild(vap, false);
2208 #endif
2209 nfsm_strtom(ap->a_target, slen, NFS_MAXPATHLEN);
2210 #ifndef NFS_V2_ONlY
2211 if (!v3) {
2212 nfsm_build(sp, struct nfsv2_sattr *, NFSX_V2SATTR);
2213 sp->sa_mode = vtonfsv2_mode(VLNK, vap->va_mode);
2214 sp->sa_uid = nfs_xdrneg1;
2215 sp->sa_gid = nfs_xdrneg1;
2216 sp->sa_size = nfs_xdrneg1;
2217 txdr_nfsv2time(&vap->va_atime, &sp->sa_atime);
2218 txdr_nfsv2time(&vap->va_mtime, &sp->sa_mtime);
2219 }
2220 #endif
2221 nfsm_request1(dnp, NFSPROC_SYMLINK, cnp->cn_lwp, cnp->cn_cred,
2222 &rexmit);
2223 #ifndef NFS_V2_ONlY
2224 if (v3) {
2225 if (!error)
2226 nfsm_mtofh(dvp, newvp, v3, gotvp);
2227 nfsm_wcc_data(dvp, wccflag, 0, !error);
2228 }
2229 #endif
2230 nfsm_reqdone;
2231 /*
2232 * Kludge: Map EEXIST => 0 assuming that it is a reply to a retry.
2233 */
2234 if (rexmit && error == EEXIST)
2235 error = 0;
2236 if (error == 0 || error == EEXIST)
2237 cache_purge1(dvp, cnp, 0);
2238 if (error == 0 && newvp == NULL) {
2239 struct nfsnode *np = NULL;
2240
2241 error = nfs_lookitup(dvp, cnp->cn_nameptr, cnp->cn_namelen,
2242 cnp->cn_cred, cnp->cn_lwp, &np);
2243 if (error == 0)
2244 newvp = NFSTOV(np);
2245 }
2246 if (error) {
2247 if (newvp != NULL)
2248 vput(newvp);
2249 } else {
2250 *ap->a_vpp = newvp;
2251 }
2252 PNBUF_PUT(cnp->cn_pnbuf);
2253 VTONFS(dvp)->n_flag |= NMODIFIED;
2254 if (!wccflag)
2255 NFS_INVALIDATE_ATTRCACHE(VTONFS(dvp));
2256 VN_KNOTE(dvp, NOTE_WRITE);
2257 vput(dvp);
2258 return (error);
2259 }
2260
2261 /*
2262 * nfs make dir call
2263 */
2264 int
2265 nfs_mkdir(v)
2266 void *v;
2267 {
2268 struct vop_mkdir_args /* {
2269 struct vnode *a_dvp;
2270 struct vnode **a_vpp;
2271 struct componentname *a_cnp;
2272 struct vattr *a_vap;
2273 } */ *ap = v;
2274 struct vnode *dvp = ap->a_dvp;
2275 struct vattr *vap = ap->a_vap;
2276 struct componentname *cnp = ap->a_cnp;
2277 struct nfsv2_sattr *sp;
2278 u_int32_t *tl;
2279 char *cp;
2280 int32_t t1, t2;
2281 int len;
2282 struct nfsnode *dnp = VTONFS(dvp), *np = (struct nfsnode *)0;
2283 struct vnode *newvp = (struct vnode *)0;
2284 char *bpos, *dpos, *cp2;
2285 int error = 0, wccflag = NFSV3_WCCRATTR;
2286 int gotvp = 0;
2287 int rexmit = 0;
2288 struct mbuf *mreq, *mrep, *md, *mb;
2289 const int v3 = NFS_ISV3(dvp);
2290
2291 len = cnp->cn_namelen;
2292 nfsstats.rpccnt[NFSPROC_MKDIR]++;
2293 nfsm_reqhead(dnp, NFSPROC_MKDIR,
2294 NFSX_FH(v3) + NFSX_UNSIGNED + nfsm_rndup(len) + NFSX_SATTR(v3));
2295 nfsm_fhtom(dnp, v3);
2296 nfsm_strtom(cnp->cn_nameptr, len, NFS_MAXNAMLEN);
2297 #ifndef NFS_V2_ONLY
2298 if (v3) {
2299 nfsm_v3attrbuild(vap, false);
2300 } else
2301 #endif
2302 {
2303 nfsm_build(sp, struct nfsv2_sattr *, NFSX_V2SATTR);
2304 sp->sa_mode = vtonfsv2_mode(VDIR, vap->va_mode);
2305 sp->sa_uid = nfs_xdrneg1;
2306 sp->sa_gid = nfs_xdrneg1;
2307 sp->sa_size = nfs_xdrneg1;
2308 txdr_nfsv2time(&vap->va_atime, &sp->sa_atime);
2309 txdr_nfsv2time(&vap->va_mtime, &sp->sa_mtime);
2310 }
2311 nfsm_request1(dnp, NFSPROC_MKDIR, cnp->cn_lwp, cnp->cn_cred, &rexmit);
2312 if (!error)
2313 nfsm_mtofh(dvp, newvp, v3, gotvp);
2314 if (v3)
2315 nfsm_wcc_data(dvp, wccflag, 0, !error);
2316 nfsm_reqdone;
2317 VTONFS(dvp)->n_flag |= NMODIFIED;
2318 if (!wccflag)
2319 NFS_INVALIDATE_ATTRCACHE(VTONFS(dvp));
2320 /*
2321 * Kludge: Map EEXIST => 0 assuming that you have a reply to a retry
2322 * if we can succeed in looking up the directory.
2323 */
2324 if ((rexmit && error == EEXIST) || (!error && !gotvp)) {
2325 if (newvp) {
2326 vput(newvp);
2327 newvp = (struct vnode *)0;
2328 }
2329 error = nfs_lookitup(dvp, cnp->cn_nameptr, len, cnp->cn_cred,
2330 cnp->cn_lwp, &np);
2331 if (!error) {
2332 newvp = NFSTOV(np);
2333 if (newvp->v_type != VDIR || newvp == dvp)
2334 error = EEXIST;
2335 }
2336 }
2337 if (error) {
2338 if (newvp) {
2339 if (dvp != newvp)
2340 vput(newvp);
2341 else
2342 vrele(newvp);
2343 }
2344 } else {
2345 VN_KNOTE(dvp, NOTE_WRITE | NOTE_LINK);
2346 if (cnp->cn_flags & MAKEENTRY)
2347 nfs_cache_enter(dvp, newvp, cnp);
2348 *ap->a_vpp = newvp;
2349 }
2350 PNBUF_PUT(cnp->cn_pnbuf);
2351 vput(dvp);
2352 return (error);
2353 }
2354
2355 /*
2356 * nfs remove directory call
2357 */
2358 int
2359 nfs_rmdir(v)
2360 void *v;
2361 {
2362 struct vop_rmdir_args /* {
2363 struct vnode *a_dvp;
2364 struct vnode *a_vp;
2365 struct componentname *a_cnp;
2366 } */ *ap = v;
2367 struct vnode *vp = ap->a_vp;
2368 struct vnode *dvp = ap->a_dvp;
2369 struct componentname *cnp = ap->a_cnp;
2370 u_int32_t *tl;
2371 char *cp;
2372 #ifndef NFS_V2_ONLY
2373 int32_t t1;
2374 char *cp2;
2375 #endif
2376 int32_t t2;
2377 char *bpos, *dpos;
2378 int error = 0, wccflag = NFSV3_WCCRATTR;
2379 int rexmit = 0;
2380 struct mbuf *mreq, *mrep, *md, *mb;
2381 const int v3 = NFS_ISV3(dvp);
2382 struct nfsnode *dnp;
2383
2384 if (dvp == vp) {
2385 vrele(dvp);
2386 vput(dvp);
2387 PNBUF_PUT(cnp->cn_pnbuf);
2388 return (EINVAL);
2389 }
2390 nfsstats.rpccnt[NFSPROC_RMDIR]++;
2391 dnp = VTONFS(dvp);
2392 nfsm_reqhead(dnp, NFSPROC_RMDIR,
2393 NFSX_FH(v3) + NFSX_UNSIGNED + nfsm_rndup(cnp->cn_namelen));
2394 nfsm_fhtom(dnp, v3);
2395 nfsm_strtom(cnp->cn_nameptr, cnp->cn_namelen, NFS_MAXNAMLEN);
2396 nfsm_request1(dnp, NFSPROC_RMDIR, cnp->cn_lwp, cnp->cn_cred, &rexmit);
2397 #ifndef NFS_V2_ONLY
2398 if (v3)
2399 nfsm_wcc_data(dvp, wccflag, 0, !error);
2400 #endif
2401 nfsm_reqdone;
2402 PNBUF_PUT(cnp->cn_pnbuf);
2403 VTONFS(dvp)->n_flag |= NMODIFIED;
2404 if (!wccflag)
2405 NFS_INVALIDATE_ATTRCACHE(VTONFS(dvp));
2406 VN_KNOTE(dvp, NOTE_WRITE | NOTE_LINK);
2407 VN_KNOTE(vp, NOTE_DELETE);
2408 cache_purge(vp);
2409 vput(vp);
2410 vput(dvp);
2411 /*
2412 * Kludge: Map ENOENT => 0 assuming that you have a reply to a retry.
2413 */
2414 if (rexmit && error == ENOENT)
2415 error = 0;
2416 return (error);
2417 }
2418
2419 /*
2420 * nfs readdir call
2421 */
2422 int
2423 nfs_readdir(v)
2424 void *v;
2425 {
2426 struct vop_readdir_args /* {
2427 struct vnode *a_vp;
2428 struct uio *a_uio;
2429 kauth_cred_t a_cred;
2430 int *a_eofflag;
2431 off_t **a_cookies;
2432 int *a_ncookies;
2433 } */ *ap = v;
2434 struct vnode *vp = ap->a_vp;
2435 struct uio *uio = ap->a_uio;
2436 struct nfsmount *nmp = VFSTONFS(vp->v_mount);
2437 char *base = uio->uio_iov->iov_base;
2438 int tresid, error;
2439 size_t count, lost;
2440 struct dirent *dp;
2441 off_t *cookies = NULL;
2442 int ncookies = 0, nc;
2443
2444 if (vp->v_type != VDIR)
2445 return (EPERM);
2446
2447 lost = uio->uio_resid & (NFS_DIRFRAGSIZ - 1);
2448 count = uio->uio_resid - lost;
2449 if (count <= 0)
2450 return (EINVAL);
2451
2452 /*
2453 * Call nfs_bioread() to do the real work.
2454 */
2455 tresid = uio->uio_resid = count;
2456 error = nfs_bioread(vp, uio, 0, ap->a_cred,
2457 ap->a_cookies ? NFSBIO_CACHECOOKIES : 0);
2458
2459 if (!error && ap->a_cookies) {
2460 ncookies = count / 16;
2461 cookies = malloc(sizeof (off_t) * ncookies, M_TEMP, M_WAITOK);
2462 *ap->a_cookies = cookies;
2463 }
2464
2465 if (!error && uio->uio_resid == tresid) {
2466 uio->uio_resid += lost;
2467 nfsstats.direofcache_misses++;
2468 if (ap->a_cookies)
2469 *ap->a_ncookies = 0;
2470 *ap->a_eofflag = 1;
2471 return (0);
2472 }
2473
2474 if (!error && ap->a_cookies) {
2475 /*
2476 * Only the NFS server and emulations use cookies, and they
2477 * load the directory block into system space, so we can
2478 * just look at it directly.
2479 */
2480 if (!VMSPACE_IS_KERNEL_P(uio->uio_vmspace) ||
2481 uio->uio_iovcnt != 1)
2482 panic("nfs_readdir: lost in space");
2483 for (nc = 0; ncookies-- &&
2484 base < (char *)uio->uio_iov->iov_base; nc++){
2485 dp = (struct dirent *) base;
2486 if (dp->d_reclen == 0)
2487 break;
2488 if (nmp->nm_flag & NFSMNT_XLATECOOKIE)
2489 *(cookies++) = (off_t)NFS_GETCOOKIE32(dp);
2490 else
2491 *(cookies++) = NFS_GETCOOKIE(dp);
2492 base += dp->d_reclen;
2493 }
2494 uio->uio_resid +=
2495 ((char *)uio->uio_iov->iov_base - base);
2496 uio->uio_iov->iov_len +=
2497 ((char *)uio->uio_iov->iov_base - base);
2498 uio->uio_iov->iov_base = base;
2499 *ap->a_ncookies = nc;
2500 }
2501
2502 uio->uio_resid += lost;
2503 *ap->a_eofflag = 0;
2504 return (error);
2505 }
2506
2507 /*
2508 * Readdir rpc call.
2509 * Called from below the buffer cache by nfs_doio().
2510 */
2511 int
2512 nfs_readdirrpc(vp, uiop, cred)
2513 struct vnode *vp;
2514 struct uio *uiop;
2515 kauth_cred_t cred;
2516 {
2517 int len, left;
2518 struct dirent *dp = NULL;
2519 u_int32_t *tl;
2520 char *cp;
2521 int32_t t1, t2;
2522 char *bpos, *dpos, *cp2;
2523 struct mbuf *mreq, *mrep, *md, *mb;
2524 struct nfsmount *nmp = VFSTONFS(vp->v_mount);
2525 struct nfsnode *dnp = VTONFS(vp);
2526 u_quad_t fileno;
2527 int error = 0, more_dirs = 1, blksiz = 0, bigenough = 1;
2528 #ifndef NFS_V2_ONLY
2529 int attrflag;
2530 #endif
2531 int nrpcs = 0, reclen;
2532 const int v3 = NFS_ISV3(vp);
2533
2534 #ifdef DIAGNOSTIC
2535 /*
2536 * Should be called from buffer cache, so only amount of
2537 * NFS_DIRBLKSIZ will be requested.
2538 */
2539 if (uiop->uio_iovcnt != 1 || uiop->uio_resid != NFS_DIRBLKSIZ)
2540 panic("nfs readdirrpc bad uio");
2541 #endif
2542
2543 /*
2544 * Loop around doing readdir rpc's of size nm_readdirsize
2545 * truncated to a multiple of NFS_DIRFRAGSIZ.
2546 * The stopping criteria is EOF or buffer full.
2547 */
2548 while (more_dirs && bigenough) {
2549 /*
2550 * Heuristic: don't bother to do another RPC to further
2551 * fill up this block if there is not much room left. (< 50%
2552 * of the readdir RPC size). This wastes some buffer space
2553 * but can save up to 50% in RPC calls.
2554 */
2555 if (nrpcs > 0 && uiop->uio_resid < (nmp->nm_readdirsize / 2)) {
2556 bigenough = 0;
2557 break;
2558 }
2559 nfsstats.rpccnt[NFSPROC_READDIR]++;
2560 nfsm_reqhead(dnp, NFSPROC_READDIR, NFSX_FH(v3) +
2561 NFSX_READDIR(v3));
2562 nfsm_fhtom(dnp, v3);
2563 #ifndef NFS_V2_ONLY
2564 if (v3) {
2565 nfsm_build(tl, u_int32_t *, 5 * NFSX_UNSIGNED);
2566 if (nmp->nm_iflag & NFSMNT_SWAPCOOKIE) {
2567 txdr_swapcookie3(uiop->uio_offset, tl);
2568 } else {
2569 txdr_cookie3(uiop->uio_offset, tl);
2570 }
2571 tl += 2;
2572 *tl++ = dnp->n_cookieverf.nfsuquad[0];
2573 *tl++ = dnp->n_cookieverf.nfsuquad[1];
2574 } else
2575 #endif
2576 {
2577 nfsm_build(tl, u_int32_t *, 2 * NFSX_UNSIGNED);
2578 *tl++ = txdr_unsigned(uiop->uio_offset);
2579 }
2580 *tl = txdr_unsigned(nmp->nm_readdirsize);
2581 nfsm_request(dnp, NFSPROC_READDIR, curlwp, cred);
2582 nrpcs++;
2583 #ifndef NFS_V2_ONLY
2584 if (v3) {
2585 nfsm_postop_attr(vp, attrflag, 0);
2586 if (!error) {
2587 nfsm_dissect(tl, u_int32_t *,
2588 2 * NFSX_UNSIGNED);
2589 dnp->n_cookieverf.nfsuquad[0] = *tl++;
2590 dnp->n_cookieverf.nfsuquad[1] = *tl;
2591 } else {
2592 m_freem(mrep);
2593 goto nfsmout;
2594 }
2595 }
2596 #endif
2597 nfsm_dissect(tl, u_int32_t *, NFSX_UNSIGNED);
2598 more_dirs = fxdr_unsigned(int, *tl);
2599
2600 /* loop thru the dir entries, doctoring them to 4bsd form */
2601 while (more_dirs && bigenough) {
2602 #ifndef NFS_V2_ONLY
2603 if (v3) {
2604 nfsm_dissect(tl, u_int32_t *,
2605 3 * NFSX_UNSIGNED);
2606 fileno = fxdr_hyper(tl);
2607 len = fxdr_unsigned(int, *(tl + 2));
2608 } else
2609 #endif
2610 {
2611 nfsm_dissect(tl, u_int32_t *,
2612 2 * NFSX_UNSIGNED);
2613 fileno = fxdr_unsigned(u_quad_t, *tl++);
2614 len = fxdr_unsigned(int, *tl);
2615 }
2616 if (len <= 0 || len > NFS_MAXNAMLEN) {
2617 error = EBADRPC;
2618 m_freem(mrep);
2619 goto nfsmout;
2620 }
2621 /* for cookie stashing */
2622 reclen = _DIRENT_RECLEN(dp, len) + 2 * sizeof(off_t);
2623 left = NFS_DIRFRAGSIZ - blksiz;
2624 if (reclen > left) {
2625 memset(uiop->uio_iov->iov_base, 0, left);
2626 dp->d_reclen += left;
2627 UIO_ADVANCE(uiop, left);
2628 blksiz = 0;
2629 NFS_STASHCOOKIE(dp, uiop->uio_offset);
2630 }
2631 if (reclen > uiop->uio_resid)
2632 bigenough = 0;
2633 if (bigenough) {
2634 int tlen;
2635
2636 dp = (struct dirent *)uiop->uio_iov->iov_base;
2637 dp->d_fileno = fileno;
2638 dp->d_namlen = len;
2639 dp->d_reclen = reclen;
2640 dp->d_type = DT_UNKNOWN;
2641 blksiz += reclen;
2642 if (blksiz == NFS_DIRFRAGSIZ)
2643 blksiz = 0;
2644 UIO_ADVANCE(uiop, DIRHDSIZ);
2645 nfsm_mtouio(uiop, len);
2646 tlen = reclen - (DIRHDSIZ + len);
2647 (void)memset(uiop->uio_iov->iov_base, 0, tlen);
2648 UIO_ADVANCE(uiop, tlen);
2649 } else
2650 nfsm_adv(nfsm_rndup(len));
2651 #ifndef NFS_V2_ONLY
2652 if (v3) {
2653 nfsm_dissect(tl, u_int32_t *,
2654 3 * NFSX_UNSIGNED);
2655 } else
2656 #endif
2657 {
2658 nfsm_dissect(tl, u_int32_t *,
2659 2 * NFSX_UNSIGNED);
2660 }
2661 if (bigenough) {
2662 #ifndef NFS_V2_ONLY
2663 if (v3) {
2664 if (nmp->nm_iflag & NFSMNT_SWAPCOOKIE)
2665 uiop->uio_offset =
2666 fxdr_swapcookie3(tl);
2667 else
2668 uiop->uio_offset =
2669 fxdr_cookie3(tl);
2670 }
2671 else
2672 #endif
2673 {
2674 uiop->uio_offset =
2675 fxdr_unsigned(off_t, *tl);
2676 }
2677 NFS_STASHCOOKIE(dp, uiop->uio_offset);
2678 }
2679 if (v3)
2680 tl += 2;
2681 else
2682 tl++;
2683 more_dirs = fxdr_unsigned(int, *tl);
2684 }
2685 /*
2686 * If at end of rpc data, get the eof boolean
2687 */
2688 if (!more_dirs) {
2689 nfsm_dissect(tl, u_int32_t *, NFSX_UNSIGNED);
2690 more_dirs = (fxdr_unsigned(int, *tl) == 0);
2691
2692 /*
2693 * kludge: if we got no entries, treat it as EOF.
2694 * some server sometimes send a reply without any
2695 * entries or EOF.
2696 * although it might mean the server has very long name,
2697 * we can't handle such entries anyway.
2698 */
2699
2700 if (uiop->uio_resid >= NFS_DIRBLKSIZ)
2701 more_dirs = 0;
2702 }
2703 m_freem(mrep);
2704 }
2705 /*
2706 * Fill last record, iff any, out to a multiple of NFS_DIRFRAGSIZ
2707 * by increasing d_reclen for the last record.
2708 */
2709 if (blksiz > 0) {
2710 left = NFS_DIRFRAGSIZ - blksiz;
2711 memset(uiop->uio_iov->iov_base, 0, left);
2712 dp->d_reclen += left;
2713 NFS_STASHCOOKIE(dp, uiop->uio_offset);
2714 UIO_ADVANCE(uiop, left);
2715 }
2716
2717 /*
2718 * We are now either at the end of the directory or have filled the
2719 * block.
2720 */
2721 if (bigenough) {
2722 dnp->n_direofoffset = uiop->uio_offset;
2723 dnp->n_flag |= NEOFVALID;
2724 }
2725 nfsmout:
2726 return (error);
2727 }
2728
2729 #ifndef NFS_V2_ONLY
2730 /*
2731 * NFS V3 readdir plus RPC. Used in place of nfs_readdirrpc().
2732 */
2733 int
2734 nfs_readdirplusrpc(vp, uiop, cred)
2735 struct vnode *vp;
2736 struct uio *uiop;
2737 kauth_cred_t cred;
2738 {
2739 int len, left;
2740 struct dirent *dp = NULL;
2741 u_int32_t *tl;
2742 char *cp;
2743 int32_t t1, t2;
2744 struct vnode *newvp;
2745 char *bpos, *dpos, *cp2;
2746 struct mbuf *mreq, *mrep, *md, *mb;
2747 struct nameidata nami, *ndp = &nami;
2748 struct componentname *cnp = &ndp->ni_cnd;
2749 struct nfsmount *nmp = VFSTONFS(vp->v_mount);
2750 struct nfsnode *dnp = VTONFS(vp), *np;
2751 nfsfh_t *fhp;
2752 u_quad_t fileno;
2753 int error = 0, more_dirs = 1, blksiz = 0, doit, bigenough = 1, i;
2754 int attrflag, fhsize, nrpcs = 0, reclen;
2755 struct nfs_fattr fattr, *fp;
2756
2757 #ifdef DIAGNOSTIC
2758 if (uiop->uio_iovcnt != 1 || uiop->uio_resid != NFS_DIRBLKSIZ)
2759 panic("nfs readdirplusrpc bad uio");
2760 #endif
2761 ndp->ni_dvp = vp;
2762 newvp = NULLVP;
2763
2764 /*
2765 * Loop around doing readdir rpc's of size nm_readdirsize
2766 * truncated to a multiple of NFS_DIRFRAGSIZ.
2767 * The stopping criteria is EOF or buffer full.
2768 */
2769 while (more_dirs && bigenough) {
2770 if (nrpcs > 0 && uiop->uio_resid < (nmp->nm_readdirsize / 2)) {
2771 bigenough = 0;
2772 break;
2773 }
2774 nfsstats.rpccnt[NFSPROC_READDIRPLUS]++;
2775 nfsm_reqhead(dnp, NFSPROC_READDIRPLUS,
2776 NFSX_FH(1) + 6 * NFSX_UNSIGNED);
2777 nfsm_fhtom(dnp, 1);
2778 nfsm_build(tl, u_int32_t *, 6 * NFSX_UNSIGNED);
2779 if (nmp->nm_iflag & NFSMNT_SWAPCOOKIE) {
2780 txdr_swapcookie3(uiop->uio_offset, tl);
2781 } else {
2782 txdr_cookie3(uiop->uio_offset, tl);
2783 }
2784 tl += 2;
2785 *tl++ = dnp->n_cookieverf.nfsuquad[0];
2786 *tl++ = dnp->n_cookieverf.nfsuquad[1];
2787 *tl++ = txdr_unsigned(nmp->nm_readdirsize);
2788 *tl = txdr_unsigned(nmp->nm_rsize);
2789 nfsm_request(dnp, NFSPROC_READDIRPLUS, curlwp, cred);
2790 nfsm_postop_attr(vp, attrflag, 0);
2791 if (error) {
2792 m_freem(mrep);
2793 goto nfsmout;
2794 }
2795 nrpcs++;
2796 nfsm_dissect(tl, u_int32_t *, 3 * NFSX_UNSIGNED);
2797 dnp->n_cookieverf.nfsuquad[0] = *tl++;
2798 dnp->n_cookieverf.nfsuquad[1] = *tl++;
2799 more_dirs = fxdr_unsigned(int, *tl);
2800
2801 /* loop thru the dir entries, doctoring them to 4bsd form */
2802 while (more_dirs && bigenough) {
2803 nfsm_dissect(tl, u_int32_t *, 3 * NFSX_UNSIGNED);
2804 fileno = fxdr_hyper(tl);
2805 len = fxdr_unsigned(int, *(tl + 2));
2806 if (len <= 0 || len > NFS_MAXNAMLEN) {
2807 error = EBADRPC;
2808 m_freem(mrep);
2809 goto nfsmout;
2810 }
2811 /* for cookie stashing */
2812 reclen = _DIRENT_RECLEN(dp, len) + 2 * sizeof(off_t);
2813 left = NFS_DIRFRAGSIZ - blksiz;
2814 if (reclen > left) {
2815 /*
2816 * DIRFRAGSIZ is aligned, no need to align
2817 * again here.
2818 */
2819 memset(uiop->uio_iov->iov_base, 0, left);
2820 dp->d_reclen += left;
2821 UIO_ADVANCE(uiop, left);
2822 NFS_STASHCOOKIE(dp, uiop->uio_offset);
2823 blksiz = 0;
2824 }
2825 if (reclen > uiop->uio_resid)
2826 bigenough = 0;
2827 if (bigenough) {
2828 int tlen;
2829
2830 dp = (struct dirent *)uiop->uio_iov->iov_base;
2831 dp->d_fileno = fileno;
2832 dp->d_namlen = len;
2833 dp->d_reclen = reclen;
2834 dp->d_type = DT_UNKNOWN;
2835 blksiz += reclen;
2836 if (blksiz == NFS_DIRFRAGSIZ)
2837 blksiz = 0;
2838 UIO_ADVANCE(uiop, DIRHDSIZ);
2839 nfsm_mtouio(uiop, len);
2840 tlen = reclen - (DIRHDSIZ + len);
2841 (void)memset(uiop->uio_iov->iov_base, 0, tlen);
2842 UIO_ADVANCE(uiop, tlen);
2843 cnp->cn_nameptr = dp->d_name;
2844 cnp->cn_namelen = dp->d_namlen;
2845 } else
2846 nfsm_adv(nfsm_rndup(len));
2847 nfsm_dissect(tl, u_int32_t *, 3 * NFSX_UNSIGNED);
2848 if (bigenough) {
2849 if (nmp->nm_iflag & NFSMNT_SWAPCOOKIE)
2850 uiop->uio_offset =
2851 fxdr_swapcookie3(tl);
2852 else
2853 uiop->uio_offset =
2854 fxdr_cookie3(tl);
2855 NFS_STASHCOOKIE(dp, uiop->uio_offset);
2856 }
2857 tl += 2;
2858
2859 /*
2860 * Since the attributes are before the file handle
2861 * (sigh), we must skip over the attributes and then
2862 * come back and get them.
2863 */
2864 attrflag = fxdr_unsigned(int, *tl);
2865 if (attrflag) {
2866 nfsm_dissect(fp, struct nfs_fattr *, NFSX_V3FATTR);
2867 memcpy(&fattr, fp, NFSX_V3FATTR);
2868 nfsm_dissect(tl, u_int32_t *, NFSX_UNSIGNED);
2869 doit = fxdr_unsigned(int, *tl);
2870 if (doit) {
2871 nfsm_getfh(fhp, fhsize, 1);
2872 if (NFS_CMPFH(dnp, fhp, fhsize)) {
2873 VREF(vp);
2874 newvp = vp;
2875 np = dnp;
2876 } else {
2877 error = nfs_nget1(vp->v_mount, fhp,
2878 fhsize, &np, LK_NOWAIT);
2879 if (!error)
2880 newvp = NFSTOV(np);
2881 }
2882 if (!error) {
2883 const char *xcp;
2884
2885 nfs_loadattrcache(&newvp, &fattr, 0, 0);
2886 if (bigenough) {
2887 dp->d_type =
2888 IFTODT(VTTOIF(np->n_vattr->va_type));
2889 if (cnp->cn_namelen <= NCHNAMLEN) {
2890 ndp->ni_vp = newvp;
2891 xcp = cnp->cn_nameptr +
2892 cnp->cn_namelen;
2893 cnp->cn_hash =
2894 namei_hash(cnp->cn_nameptr, &xcp);
2895 nfs_cache_enter(ndp->ni_dvp,
2896 ndp->ni_vp, cnp);
2897 }
2898 }
2899 }
2900 error = 0;
2901 }
2902 } else {
2903 /* Just skip over the file handle */
2904 nfsm_dissect(tl, u_int32_t *, NFSX_UNSIGNED);
2905 i = fxdr_unsigned(int, *tl);
2906 nfsm_adv(nfsm_rndup(i));
2907 }
2908 if (newvp != NULLVP) {
2909 if (newvp == vp)
2910 vrele(newvp);
2911 else
2912 vput(newvp);
2913 newvp = NULLVP;
2914 }
2915 nfsm_dissect(tl, u_int32_t *, NFSX_UNSIGNED);
2916 more_dirs = fxdr_unsigned(int, *tl);
2917 }
2918 /*
2919 * If at end of rpc data, get the eof boolean
2920 */
2921 if (!more_dirs) {
2922 nfsm_dissect(tl, u_int32_t *, NFSX_UNSIGNED);
2923 more_dirs = (fxdr_unsigned(int, *tl) == 0);
2924
2925 /*
2926 * kludge: see a comment in nfs_readdirrpc.
2927 */
2928
2929 if (uiop->uio_resid >= NFS_DIRBLKSIZ)
2930 more_dirs = 0;
2931 }
2932 m_freem(mrep);
2933 }
2934 /*
2935 * Fill last record, iff any, out to a multiple of NFS_DIRFRAGSIZ
2936 * by increasing d_reclen for the last record.
2937 */
2938 if (blksiz > 0) {
2939 left = NFS_DIRFRAGSIZ - blksiz;
2940 memset(uiop->uio_iov->iov_base, 0, left);
2941 dp->d_reclen += left;
2942 NFS_STASHCOOKIE(dp, uiop->uio_offset);
2943 UIO_ADVANCE(uiop, left);
2944 }
2945
2946 /*
2947 * We are now either at the end of the directory or have filled the
2948 * block.
2949 */
2950 if (bigenough) {
2951 dnp->n_direofoffset = uiop->uio_offset;
2952 dnp->n_flag |= NEOFVALID;
2953 }
2954 nfsmout:
2955 if (newvp != NULLVP) {
2956 if(newvp == vp)
2957 vrele(newvp);
2958 else
2959 vput(newvp);
2960 }
2961 return (error);
2962 }
2963 #endif
2964
2965 /*
2966 * Silly rename. To make the NFS filesystem that is stateless look a little
2967 * more like the "ufs" a remove of an active vnode is translated to a rename
2968 * to a funny looking filename that is removed by nfs_inactive on the
2969 * nfsnode. There is the potential for another process on a different client
2970 * to create the same funny name between the nfs_lookitup() fails and the
2971 * nfs_rename() completes, but...
2972 */
2973 int
2974 nfs_sillyrename(dvp, vp, cnp, dolink)
2975 struct vnode *dvp, *vp;
2976 struct componentname *cnp;
2977 bool dolink;
2978 {
2979 struct sillyrename *sp;
2980 struct nfsnode *np;
2981 int error;
2982 short pid;
2983
2984 cache_purge(dvp);
2985 np = VTONFS(vp);
2986 #ifndef DIAGNOSTIC
2987 if (vp->v_type == VDIR)
2988 panic("nfs: sillyrename dir");
2989 #endif
2990 MALLOC(sp, struct sillyrename *, sizeof (struct sillyrename),
2991 M_NFSREQ, M_WAITOK);
2992 sp->s_cred = kauth_cred_dup(cnp->cn_cred);
2993 sp->s_dvp = dvp;
2994 VREF(dvp);
2995
2996 /* Fudge together a funny name */
2997 pid = cnp->cn_lwp->l_proc->p_pid;
2998 memcpy(sp->s_name, ".nfsAxxxx4.4", 13);
2999 sp->s_namlen = 12;
3000 sp->s_name[8] = hexdigits[pid & 0xf];
3001 sp->s_name[7] = hexdigits[(pid >> 4) & 0xf];
3002 sp->s_name[6] = hexdigits[(pid >> 8) & 0xf];
3003 sp->s_name[5] = hexdigits[(pid >> 12) & 0xf];
3004
3005 /* Try lookitups until we get one that isn't there */
3006 while (nfs_lookitup(dvp, sp->s_name, sp->s_namlen, sp->s_cred,
3007 cnp->cn_lwp, (struct nfsnode **)0) == 0) {
3008 sp->s_name[4]++;
3009 if (sp->s_name[4] > 'z') {
3010 error = EINVAL;
3011 goto bad;
3012 }
3013 }
3014 if (dolink) {
3015 error = nfs_linkrpc(dvp, vp, sp->s_name, sp->s_namlen,
3016 sp->s_cred, cnp->cn_lwp);
3017 /*
3018 * nfs_request maps NFSERR_NOTSUPP to ENOTSUP.
3019 */
3020 if (error == ENOTSUP) {
3021 error = nfs_renameit(dvp, cnp, sp);
3022 }
3023 } else {
3024 error = nfs_renameit(dvp, cnp, sp);
3025 }
3026 if (error)
3027 goto bad;
3028 error = nfs_lookitup(dvp, sp->s_name, sp->s_namlen, sp->s_cred,
3029 cnp->cn_lwp, &np);
3030 np->n_sillyrename = sp;
3031 return (0);
3032 bad:
3033 vrele(sp->s_dvp);
3034 kauth_cred_free(sp->s_cred);
3035 free((void *)sp, M_NFSREQ);
3036 return (error);
3037 }
3038
3039 /*
3040 * Look up a file name and optionally either update the file handle or
3041 * allocate an nfsnode, depending on the value of npp.
3042 * npp == NULL --> just do the lookup
3043 * *npp == NULL --> allocate a new nfsnode and make sure attributes are
3044 * handled too
3045 * *npp != NULL --> update the file handle in the vnode
3046 */
3047 int
3048 nfs_lookitup(dvp, name, len, cred, l, npp)
3049 struct vnode *dvp;
3050 const char *name;
3051 int len;
3052 kauth_cred_t cred;
3053 struct lwp *l;
3054 struct nfsnode **npp;
3055 {
3056 u_int32_t *tl;
3057 char *cp;
3058 int32_t t1, t2;
3059 struct vnode *newvp = (struct vnode *)0;
3060 struct nfsnode *np, *dnp = VTONFS(dvp);
3061 char *bpos, *dpos, *cp2;
3062 int error = 0, fhlen;
3063 #ifndef NFS_V2_ONLY
3064 int attrflag;
3065 #endif
3066 struct mbuf *mreq, *mrep, *md, *mb;
3067 nfsfh_t *nfhp;
3068 const int v3 = NFS_ISV3(dvp);
3069
3070 nfsstats.rpccnt[NFSPROC_LOOKUP]++;
3071 nfsm_reqhead(dnp, NFSPROC_LOOKUP,
3072 NFSX_FH(v3) + NFSX_UNSIGNED + nfsm_rndup(len));
3073 nfsm_fhtom(dnp, v3);
3074 nfsm_strtom(name, len, NFS_MAXNAMLEN);
3075 nfsm_request(dnp, NFSPROC_LOOKUP, l, cred);
3076 if (npp && !error) {
3077 nfsm_getfh(nfhp, fhlen, v3);
3078 if (*npp) {
3079 np = *npp;
3080 if (np->n_fhsize > NFS_SMALLFH && fhlen <= NFS_SMALLFH) {
3081 free((void *)np->n_fhp, M_NFSBIGFH);
3082 np->n_fhp = &np->n_fh;
3083 }
3084 #if NFS_SMALLFH < NFSX_V3FHMAX
3085 else if (np->n_fhsize <= NFS_SMALLFH && fhlen>NFS_SMALLFH)
3086 np->n_fhp =(nfsfh_t *)malloc(fhlen,M_NFSBIGFH,M_WAITOK);
3087 #endif
3088 memcpy((void *)np->n_fhp, (void *)nfhp, fhlen);
3089 np->n_fhsize = fhlen;
3090 newvp = NFSTOV(np);
3091 } else if (NFS_CMPFH(dnp, nfhp, fhlen)) {
3092 VREF(dvp);
3093 newvp = dvp;
3094 np = dnp;
3095 } else {
3096 error = nfs_nget(dvp->v_mount, nfhp, fhlen, &np);
3097 if (error) {
3098 m_freem(mrep);
3099 return (error);
3100 }
3101 newvp = NFSTOV(np);
3102 }
3103 #ifndef NFS_V2_ONLY
3104 if (v3) {
3105 nfsm_postop_attr(newvp, attrflag, 0);
3106 if (!attrflag && *npp == NULL) {
3107 m_freem(mrep);
3108 vput(newvp);
3109 return (ENOENT);
3110 }
3111 } else
3112 #endif
3113 nfsm_loadattr(newvp, (struct vattr *)0, 0);
3114 }
3115 nfsm_reqdone;
3116 if (npp && *npp == NULL) {
3117 if (error) {
3118 if (newvp)
3119 vput(newvp);
3120 } else
3121 *npp = np;
3122 }
3123 return (error);
3124 }
3125
3126 #ifndef NFS_V2_ONLY
3127 /*
3128 * Nfs Version 3 commit rpc
3129 */
3130 int
3131 nfs_commit(vp, offset, cnt, l)
3132 struct vnode *vp;
3133 off_t offset;
3134 uint32_t cnt;
3135 struct lwp *l;
3136 {
3137 char *cp;
3138 u_int32_t *tl;
3139 int32_t t1, t2;
3140 struct nfsmount *nmp = VFSTONFS(vp->v_mount);
3141 char *bpos, *dpos, *cp2;
3142 int error = 0, wccflag = NFSV3_WCCRATTR;
3143 struct mbuf *mreq, *mrep, *md, *mb;
3144 struct nfsnode *np;
3145
3146 KASSERT(NFS_ISV3(vp));
3147
3148 #ifdef NFS_DEBUG_COMMIT
3149 printf("commit %lu - %lu\n", (unsigned long)offset,
3150 (unsigned long)(offset + cnt));
3151 #endif
3152
3153 simple_lock(&nmp->nm_slock);
3154 if ((nmp->nm_iflag & NFSMNT_HASWRITEVERF) == 0) {
3155 simple_unlock(&nmp->nm_slock);
3156 return (0);
3157 }
3158 simple_unlock(&nmp->nm_slock);
3159 nfsstats.rpccnt[NFSPROC_COMMIT]++;
3160 np = VTONFS(vp);
3161 nfsm_reqhead(np, NFSPROC_COMMIT, NFSX_FH(1));
3162 nfsm_fhtom(np, 1);
3163 nfsm_build(tl, u_int32_t *, 3 * NFSX_UNSIGNED);
3164 txdr_hyper(offset, tl);
3165 tl += 2;
3166 *tl = txdr_unsigned(cnt);
3167 nfsm_request(np, NFSPROC_COMMIT, l, np->n_wcred);
3168 nfsm_wcc_data(vp, wccflag, NAC_NOTRUNC, false);
3169 if (!error) {
3170 nfsm_dissect(tl, u_int32_t *, NFSX_V3WRITEVERF);
3171 simple_lock(&nmp->nm_slock);
3172 if ((nmp->nm_iflag & NFSMNT_STALEWRITEVERF) ||
3173 memcmp(nmp->nm_writeverf, tl, NFSX_V3WRITEVERF)) {
3174 memcpy(nmp->nm_writeverf, tl, NFSX_V3WRITEVERF);
3175 error = NFSERR_STALEWRITEVERF;
3176 nmp->nm_iflag |= NFSMNT_STALEWRITEVERF;
3177 }
3178 simple_unlock(&nmp->nm_slock);
3179 }
3180 nfsm_reqdone;
3181 return (error);
3182 }
3183 #endif
3184
3185 /*
3186 * Kludge City..
3187 * - make nfs_bmap() essentially a no-op that does no translation
3188 * - do nfs_strategy() by doing I/O with nfs_readrpc/nfs_writerpc
3189 * (Maybe I could use the process's page mapping, but I was concerned that
3190 * Kernel Write might not be enabled and also figured copyout() would do
3191 * a lot more work than memcpy() and also it currently happens in the
3192 * context of the swapper process (2).
3193 */
3194 int
3195 nfs_bmap(v)
3196 void *v;
3197 {
3198 struct vop_bmap_args /* {
3199 struct vnode *a_vp;
3200 daddr_t a_bn;
3201 struct vnode **a_vpp;
3202 daddr_t *a_bnp;
3203 int *a_runp;
3204 } */ *ap = v;
3205 struct vnode *vp = ap->a_vp;
3206 int bshift = vp->v_mount->mnt_fs_bshift - vp->v_mount->mnt_dev_bshift;
3207
3208 if (ap->a_vpp != NULL)
3209 *ap->a_vpp = vp;
3210 if (ap->a_bnp != NULL)
3211 *ap->a_bnp = ap->a_bn << bshift;
3212 if (ap->a_runp != NULL)
3213 *ap->a_runp = 1024 * 1024; /* XXX */
3214 return (0);
3215 }
3216
3217 /*
3218 * Strategy routine.
3219 * For async requests when nfsiod(s) are running, queue the request by
3220 * calling nfs_asyncio(), otherwise just all nfs_doio() to do the
3221 * request.
3222 */
3223 int
3224 nfs_strategy(v)
3225 void *v;
3226 {
3227 struct vop_strategy_args *ap = v;
3228 struct buf *bp = ap->a_bp;
3229 int error = 0;
3230
3231 if ((bp->b_flags & (B_PHYS|B_ASYNC)) == (B_PHYS|B_ASYNC))
3232 panic("nfs physio/async");
3233
3234 /*
3235 * If the op is asynchronous and an i/o daemon is waiting
3236 * queue the request, wake it up and wait for completion
3237 * otherwise just do it ourselves.
3238 */
3239 if ((bp->b_flags & B_ASYNC) == 0 || nfs_asyncio(bp))
3240 error = nfs_doio(bp);
3241 return (error);
3242 }
3243
3244 /*
3245 * fsync vnode op. Just call nfs_flush() with commit == 1.
3246 */
3247 /* ARGSUSED */
3248 int
3249 nfs_fsync(v)
3250 void *v;
3251 {
3252 struct vop_fsync_args /* {
3253 struct vnodeop_desc *a_desc;
3254 struct vnode * a_vp;
3255 kauth_cred_t a_cred;
3256 int a_flags;
3257 off_t offlo;
3258 off_t offhi;
3259 struct lwp * a_l;
3260 } */ *ap = v;
3261
3262 struct vnode *vp = ap->a_vp;
3263
3264 if (vp->v_type != VREG)
3265 return 0;
3266
3267 return (nfs_flush(vp, ap->a_cred,
3268 (ap->a_flags & FSYNC_WAIT) != 0 ? MNT_WAIT : 0, ap->a_l, 1));
3269 }
3270
3271 /*
3272 * Flush all the data associated with a vnode.
3273 */
3274 int
3275 nfs_flush(struct vnode *vp, kauth_cred_t cred, int waitfor, struct lwp *l,
3276 int commit)
3277 {
3278 struct nfsnode *np = VTONFS(vp);
3279 int error;
3280 int flushflags = PGO_ALLPAGES|PGO_CLEANIT|PGO_SYNCIO;
3281 UVMHIST_FUNC("nfs_flush"); UVMHIST_CALLED(ubchist);
3282
3283 simple_lock(&vp->v_interlock);
3284 error = VOP_PUTPAGES(vp, 0, 0, flushflags);
3285 if (np->n_flag & NWRITEERR) {
3286 error = np->n_error;
3287 np->n_flag &= ~NWRITEERR;
3288 }
3289 UVMHIST_LOG(ubchist, "returning %d", error,0,0,0);
3290 return (error);
3291 }
3292
3293 /*
3294 * Return POSIX pathconf information applicable to nfs.
3295 *
3296 * N.B. The NFS V2 protocol doesn't support this RPC.
3297 */
3298 /* ARGSUSED */
3299 int
3300 nfs_pathconf(v)
3301 void *v;
3302 {
3303 struct vop_pathconf_args /* {
3304 struct vnode *a_vp;
3305 int a_name;
3306 register_t *a_retval;
3307 } */ *ap = v;
3308 struct nfsv3_pathconf *pcp;
3309 struct vnode *vp = ap->a_vp;
3310 struct mbuf *mreq, *mrep, *md, *mb;
3311 int32_t t1, t2;
3312 u_int32_t *tl;
3313 char *bpos, *dpos, *cp, *cp2;
3314 int error = 0, attrflag;
3315 #ifndef NFS_V2_ONLY
3316 struct nfsmount *nmp;
3317 unsigned int l;
3318 u_int64_t maxsize;
3319 #endif
3320 const int v3 = NFS_ISV3(vp);
3321 struct nfsnode *np = VTONFS(vp);
3322
3323 switch (ap->a_name) {
3324 /* Names that can be resolved locally. */
3325 case _PC_PIPE_BUF:
3326 *ap->a_retval = PIPE_BUF;
3327 break;
3328 case _PC_SYNC_IO:
3329 *ap->a_retval = 1;
3330 break;
3331 /* Names that cannot be resolved locally; do an RPC, if possible. */
3332 case _PC_LINK_MAX:
3333 case _PC_NAME_MAX:
3334 case _PC_CHOWN_RESTRICTED:
3335 case _PC_NO_TRUNC:
3336 if (!v3) {
3337 error = EINVAL;
3338 break;
3339 }
3340 nfsstats.rpccnt[NFSPROC_PATHCONF]++;
3341 nfsm_reqhead(np, NFSPROC_PATHCONF, NFSX_FH(1));
3342 nfsm_fhtom(np, 1);
3343 nfsm_request(np, NFSPROC_PATHCONF,
3344 curlwp, curlwp->l_cred); /* XXX */
3345 nfsm_postop_attr(vp, attrflag, 0);
3346 if (!error) {
3347 nfsm_dissect(pcp, struct nfsv3_pathconf *,
3348 NFSX_V3PATHCONF);
3349 switch (ap->a_name) {
3350 case _PC_LINK_MAX:
3351 *ap->a_retval =
3352 fxdr_unsigned(register_t, pcp->pc_linkmax);
3353 break;
3354 case _PC_NAME_MAX:
3355 *ap->a_retval =
3356 fxdr_unsigned(register_t, pcp->pc_namemax);
3357 break;
3358 case _PC_CHOWN_RESTRICTED:
3359 *ap->a_retval =
3360 (pcp->pc_chownrestricted == nfs_true);
3361 break;
3362 case _PC_NO_TRUNC:
3363 *ap->a_retval =
3364 (pcp->pc_notrunc == nfs_true);
3365 break;
3366 }
3367 }
3368 nfsm_reqdone;
3369 break;
3370 case _PC_FILESIZEBITS:
3371 #ifndef NFS_V2_ONLY
3372 if (v3) {
3373 nmp = VFSTONFS(vp->v_mount);
3374 if ((nmp->nm_iflag & NFSMNT_GOTFSINFO) == 0)
3375 if ((error = nfs_fsinfo(nmp, vp,
3376 curlwp->l_cred, curlwp)) != 0) /* XXX */
3377 break;
3378 for (l = 0, maxsize = nmp->nm_maxfilesize;
3379 (maxsize >> l) > 0; l++)
3380 ;
3381 *ap->a_retval = l + 1;
3382 } else
3383 #endif
3384 {
3385 *ap->a_retval = 32; /* NFS V2 limitation */
3386 }
3387 break;
3388 default:
3389 error = EINVAL;
3390 break;
3391 }
3392
3393 return (error);
3394 }
3395
3396 /*
3397 * NFS advisory byte-level locks.
3398 */
3399 int
3400 nfs_advlock(v)
3401 void *v;
3402 {
3403 struct vop_advlock_args /* {
3404 struct vnode *a_vp;
3405 void *a_id;
3406 int a_op;
3407 struct flock *a_fl;
3408 int a_flags;
3409 } */ *ap = v;
3410 struct nfsnode *np = VTONFS(ap->a_vp);
3411
3412 return lf_advlock(ap, &np->n_lockf, np->n_size);
3413 }
3414
3415 /*
3416 * Print out the contents of an nfsnode.
3417 */
3418 int
3419 nfs_print(v)
3420 void *v;
3421 {
3422 struct vop_print_args /* {
3423 struct vnode *a_vp;
3424 } */ *ap = v;
3425 struct vnode *vp = ap->a_vp;
3426 struct nfsnode *np = VTONFS(vp);
3427
3428 printf("tag VT_NFS, fileid %lld fsid 0x%lx",
3429 (unsigned long long)np->n_vattr->va_fileid, np->n_vattr->va_fsid);
3430 if (vp->v_type == VFIFO)
3431 fifo_printinfo(vp);
3432 printf("\n");
3433 return (0);
3434 }
3435
3436 /*
3437 * nfs unlock wrapper.
3438 */
3439 int
3440 nfs_unlock(void *v)
3441 {
3442 struct vop_unlock_args /* {
3443 struct vnode *a_vp;
3444 int a_flags;
3445 } */ *ap = v;
3446 struct vnode *vp = ap->a_vp;
3447
3448 /*
3449 * VOP_UNLOCK can be called by nfs_loadattrcache
3450 * with v_data == 0.
3451 */
3452 if (VTONFS(vp)) {
3453 nfs_delayedtruncate(vp);
3454 }
3455
3456 return genfs_unlock(v);
3457 }
3458
3459 /*
3460 * nfs special file access vnode op.
3461 * Essentially just get vattr and then imitate iaccess() since the device is
3462 * local to the client.
3463 */
3464 int
3465 nfsspec_access(v)
3466 void *v;
3467 {
3468 struct vop_access_args /* {
3469 struct vnode *a_vp;
3470 int a_mode;
3471 kauth_cred_t a_cred;
3472 struct lwp *a_l;
3473 } */ *ap = v;
3474 struct vattr va;
3475 struct vnode *vp = ap->a_vp;
3476 int error;
3477
3478 error = VOP_GETATTR(vp, &va, ap->a_cred, ap->a_l);
3479 if (error)
3480 return (error);
3481
3482 /*
3483 * Disallow write attempts on filesystems mounted read-only;
3484 * unless the file is a socket, fifo, or a block or character
3485 * device resident on the filesystem.
3486 */
3487 if ((ap->a_mode & VWRITE) && (vp->v_mount->mnt_flag & MNT_RDONLY)) {
3488 switch (vp->v_type) {
3489 case VREG:
3490 case VDIR:
3491 case VLNK:
3492 return (EROFS);
3493 default:
3494 break;
3495 }
3496 }
3497
3498 return (vaccess(va.va_type, va.va_mode,
3499 va.va_uid, va.va_gid, ap->a_mode, ap->a_cred));
3500 }
3501
3502 /*
3503 * Read wrapper for special devices.
3504 */
3505 int
3506 nfsspec_read(v)
3507 void *v;
3508 {
3509 struct vop_read_args /* {
3510 struct vnode *a_vp;
3511 struct uio *a_uio;
3512 int a_ioflag;
3513 kauth_cred_t a_cred;
3514 } */ *ap = v;
3515 struct nfsnode *np = VTONFS(ap->a_vp);
3516
3517 /*
3518 * Set access flag.
3519 */
3520 np->n_flag |= NACC;
3521 getnanotime(&np->n_atim);
3522 return (VOCALL(spec_vnodeop_p, VOFFSET(vop_read), ap));
3523 }
3524
3525 /*
3526 * Write wrapper for special devices.
3527 */
3528 int
3529 nfsspec_write(v)
3530 void *v;
3531 {
3532 struct vop_write_args /* {
3533 struct vnode *a_vp;
3534 struct uio *a_uio;
3535 int a_ioflag;
3536 kauth_cred_t a_cred;
3537 } */ *ap = v;
3538 struct nfsnode *np = VTONFS(ap->a_vp);
3539
3540 /*
3541 * Set update flag.
3542 */
3543 np->n_flag |= NUPD;
3544 getnanotime(&np->n_mtim);
3545 return (VOCALL(spec_vnodeop_p, VOFFSET(vop_write), ap));
3546 }
3547
3548 /*
3549 * Close wrapper for special devices.
3550 *
3551 * Update the times on the nfsnode then do device close.
3552 */
3553 int
3554 nfsspec_close(v)
3555 void *v;
3556 {
3557 struct vop_close_args /* {
3558 struct vnode *a_vp;
3559 int a_fflag;
3560 kauth_cred_t a_cred;
3561 struct lwp *a_l;
3562 } */ *ap = v;
3563 struct vnode *vp = ap->a_vp;
3564 struct nfsnode *np = VTONFS(vp);
3565 struct vattr vattr;
3566
3567 if (np->n_flag & (NACC | NUPD)) {
3568 np->n_flag |= NCHG;
3569 if (vp->v_usecount == 1 &&
3570 (vp->v_mount->mnt_flag & MNT_RDONLY) == 0) {
3571 VATTR_NULL(&vattr);
3572 if (np->n_flag & NACC)
3573 vattr.va_atime = np->n_atim;
3574 if (np->n_flag & NUPD)
3575 vattr.va_mtime = np->n_mtim;
3576 (void)VOP_SETATTR(vp, &vattr, ap->a_cred, ap->a_l);
3577 }
3578 }
3579 return (VOCALL(spec_vnodeop_p, VOFFSET(vop_close), ap));
3580 }
3581
3582 /*
3583 * Read wrapper for fifos.
3584 */
3585 int
3586 nfsfifo_read(v)
3587 void *v;
3588 {
3589 struct vop_read_args /* {
3590 struct vnode *a_vp;
3591 struct uio *a_uio;
3592 int a_ioflag;
3593 kauth_cred_t a_cred;
3594 } */ *ap = v;
3595 struct nfsnode *np = VTONFS(ap->a_vp);
3596
3597 /*
3598 * Set access flag.
3599 */
3600 np->n_flag |= NACC;
3601 getnanotime(&np->n_atim);
3602 return (VOCALL(fifo_vnodeop_p, VOFFSET(vop_read), ap));
3603 }
3604
3605 /*
3606 * Write wrapper for fifos.
3607 */
3608 int
3609 nfsfifo_write(v)
3610 void *v;
3611 {
3612 struct vop_write_args /* {
3613 struct vnode *a_vp;
3614 struct uio *a_uio;
3615 int a_ioflag;
3616 kauth_cred_t a_cred;
3617 } */ *ap = v;
3618 struct nfsnode *np = VTONFS(ap->a_vp);
3619
3620 /*
3621 * Set update flag.
3622 */
3623 np->n_flag |= NUPD;
3624 getnanotime(&np->n_mtim);
3625 return (VOCALL(fifo_vnodeop_p, VOFFSET(vop_write), ap));
3626 }
3627
3628 /*
3629 * Close wrapper for fifos.
3630 *
3631 * Update the times on the nfsnode then do fifo close.
3632 */
3633 int
3634 nfsfifo_close(v)
3635 void *v;
3636 {
3637 struct vop_close_args /* {
3638 struct vnode *a_vp;
3639 int a_fflag;
3640 kauth_cred_t a_cred;
3641 struct lwp *a_l;
3642 } */ *ap = v;
3643 struct vnode *vp = ap->a_vp;
3644 struct nfsnode *np = VTONFS(vp);
3645 struct vattr vattr;
3646
3647 if (np->n_flag & (NACC | NUPD)) {
3648 struct timespec ts;
3649
3650 getnanotime(&ts);
3651 if (np->n_flag & NACC)
3652 np->n_atim = ts;
3653 if (np->n_flag & NUPD)
3654 np->n_mtim = ts;
3655 np->n_flag |= NCHG;
3656 if (vp->v_usecount == 1 &&
3657 (vp->v_mount->mnt_flag & MNT_RDONLY) == 0) {
3658 VATTR_NULL(&vattr);
3659 if (np->n_flag & NACC)
3660 vattr.va_atime = np->n_atim;
3661 if (np->n_flag & NUPD)
3662 vattr.va_mtime = np->n_mtim;
3663 (void)VOP_SETATTR(vp, &vattr, ap->a_cred, ap->a_l);
3664 }
3665 }
3666 return (VOCALL(fifo_vnodeop_p, VOFFSET(vop_close), ap));
3667 }
3668