nfs_vnops.c revision 1.257 1 /* $NetBSD: nfs_vnops.c,v 1.257 2007/10/28 22:24:29 yamt Exp $ */
2
3 /*
4 * Copyright (c) 1989, 1993
5 * The Regents of the University of California. All rights reserved.
6 *
7 * This code is derived from software contributed to Berkeley by
8 * Rick Macklem at The University of Guelph.
9 *
10 * Redistribution and use in source and binary forms, with or without
11 * modification, are permitted provided that the following conditions
12 * are met:
13 * 1. Redistributions of source code must retain the above copyright
14 * notice, this list of conditions and the following disclaimer.
15 * 2. Redistributions in binary form must reproduce the above copyright
16 * notice, this list of conditions and the following disclaimer in the
17 * documentation and/or other materials provided with the distribution.
18 * 3. Neither the name of the University nor the names of its contributors
19 * may be used to endorse or promote products derived from this software
20 * without specific prior written permission.
21 *
22 * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND
23 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
24 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
25 * ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE
26 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
27 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
28 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
29 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
30 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
31 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
32 * SUCH DAMAGE.
33 *
34 * @(#)nfs_vnops.c 8.19 (Berkeley) 7/31/95
35 */
36
37 /*
38 * vnode op calls for Sun NFS version 2 and 3
39 */
40
41 #include <sys/cdefs.h>
42 __KERNEL_RCSID(0, "$NetBSD: nfs_vnops.c,v 1.257 2007/10/28 22:24:29 yamt Exp $");
43
44 #include "opt_inet.h"
45 #include "opt_nfs.h"
46 #include "opt_uvmhist.h"
47
48 #include <sys/param.h>
49 #include <sys/proc.h>
50 #include <sys/kernel.h>
51 #include <sys/systm.h>
52 #include <sys/resourcevar.h>
53 #include <sys/mount.h>
54 #include <sys/buf.h>
55 #include <sys/condvar.h>
56 #include <sys/disk.h>
57 #include <sys/malloc.h>
58 #include <sys/mbuf.h>
59 #include <sys/mutex.h>
60 #include <sys/namei.h>
61 #include <sys/vnode.h>
62 #include <sys/dirent.h>
63 #include <sys/fcntl.h>
64 #include <sys/hash.h>
65 #include <sys/lockf.h>
66 #include <sys/stat.h>
67 #include <sys/unistd.h>
68 #include <sys/kauth.h>
69
70 #include <uvm/uvm_extern.h>
71 #include <uvm/uvm.h>
72
73 #include <miscfs/fifofs/fifo.h>
74 #include <miscfs/genfs/genfs.h>
75 #include <miscfs/genfs/genfs_node.h>
76 #include <miscfs/specfs/specdev.h>
77
78 #include <nfs/rpcv2.h>
79 #include <nfs/nfsproto.h>
80 #include <nfs/nfs.h>
81 #include <nfs/nfsnode.h>
82 #include <nfs/nfsmount.h>
83 #include <nfs/xdr_subs.h>
84 #include <nfs/nfsm_subs.h>
85 #include <nfs/nfs_var.h>
86
87 #include <net/if.h>
88 #include <netinet/in.h>
89 #include <netinet/in_var.h>
90
91 /*
92 * Global vfs data structures for nfs
93 */
94 int (**nfsv2_vnodeop_p) __P((void *));
95 const struct vnodeopv_entry_desc nfsv2_vnodeop_entries[] = {
96 { &vop_default_desc, vn_default_error },
97 { &vop_lookup_desc, nfs_lookup }, /* lookup */
98 { &vop_create_desc, nfs_create }, /* create */
99 { &vop_mknod_desc, nfs_mknod }, /* mknod */
100 { &vop_open_desc, nfs_open }, /* open */
101 { &vop_close_desc, nfs_close }, /* close */
102 { &vop_access_desc, nfs_access }, /* access */
103 { &vop_getattr_desc, nfs_getattr }, /* getattr */
104 { &vop_setattr_desc, nfs_setattr }, /* setattr */
105 { &vop_read_desc, nfs_read }, /* read */
106 { &vop_write_desc, nfs_write }, /* write */
107 { &vop_lease_desc, nfs_lease_check }, /* lease */
108 { &vop_fcntl_desc, genfs_fcntl }, /* fcntl */
109 { &vop_ioctl_desc, nfs_ioctl }, /* ioctl */
110 { &vop_poll_desc, nfs_poll }, /* poll */
111 { &vop_kqfilter_desc, nfs_kqfilter }, /* kqfilter */
112 { &vop_revoke_desc, nfs_revoke }, /* revoke */
113 { &vop_mmap_desc, nfs_mmap }, /* mmap */
114 { &vop_fsync_desc, nfs_fsync }, /* fsync */
115 { &vop_seek_desc, nfs_seek }, /* seek */
116 { &vop_remove_desc, nfs_remove }, /* remove */
117 { &vop_link_desc, nfs_link }, /* link */
118 { &vop_rename_desc, nfs_rename }, /* rename */
119 { &vop_mkdir_desc, nfs_mkdir }, /* mkdir */
120 { &vop_rmdir_desc, nfs_rmdir }, /* rmdir */
121 { &vop_symlink_desc, nfs_symlink }, /* symlink */
122 { &vop_readdir_desc, nfs_readdir }, /* readdir */
123 { &vop_readlink_desc, nfs_readlink }, /* readlink */
124 { &vop_abortop_desc, nfs_abortop }, /* abortop */
125 { &vop_inactive_desc, nfs_inactive }, /* inactive */
126 { &vop_reclaim_desc, nfs_reclaim }, /* reclaim */
127 { &vop_lock_desc, nfs_lock }, /* lock */
128 { &vop_unlock_desc, nfs_unlock }, /* unlock */
129 { &vop_bmap_desc, nfs_bmap }, /* bmap */
130 { &vop_strategy_desc, nfs_strategy }, /* strategy */
131 { &vop_print_desc, nfs_print }, /* print */
132 { &vop_islocked_desc, nfs_islocked }, /* islocked */
133 { &vop_pathconf_desc, nfs_pathconf }, /* pathconf */
134 { &vop_advlock_desc, nfs_advlock }, /* advlock */
135 { &vop_bwrite_desc, genfs_badop }, /* bwrite */
136 { &vop_getpages_desc, nfs_getpages }, /* getpages */
137 { &vop_putpages_desc, genfs_putpages }, /* putpages */
138 { NULL, NULL }
139 };
140 const struct vnodeopv_desc nfsv2_vnodeop_opv_desc =
141 { &nfsv2_vnodeop_p, nfsv2_vnodeop_entries };
142
143 /*
144 * Special device vnode ops
145 */
146 int (**spec_nfsv2nodeop_p) __P((void *));
147 const struct vnodeopv_entry_desc spec_nfsv2nodeop_entries[] = {
148 { &vop_default_desc, vn_default_error },
149 { &vop_lookup_desc, spec_lookup }, /* lookup */
150 { &vop_create_desc, spec_create }, /* create */
151 { &vop_mknod_desc, spec_mknod }, /* mknod */
152 { &vop_open_desc, spec_open }, /* open */
153 { &vop_close_desc, nfsspec_close }, /* close */
154 { &vop_access_desc, nfsspec_access }, /* access */
155 { &vop_getattr_desc, nfs_getattr }, /* getattr */
156 { &vop_setattr_desc, nfs_setattr }, /* setattr */
157 { &vop_read_desc, nfsspec_read }, /* read */
158 { &vop_write_desc, nfsspec_write }, /* write */
159 { &vop_lease_desc, spec_lease_check }, /* lease */
160 { &vop_fcntl_desc, genfs_fcntl }, /* fcntl */
161 { &vop_ioctl_desc, spec_ioctl }, /* ioctl */
162 { &vop_poll_desc, spec_poll }, /* poll */
163 { &vop_kqfilter_desc, spec_kqfilter }, /* kqfilter */
164 { &vop_revoke_desc, spec_revoke }, /* revoke */
165 { &vop_mmap_desc, spec_mmap }, /* mmap */
166 { &vop_fsync_desc, spec_fsync }, /* fsync */
167 { &vop_seek_desc, spec_seek }, /* seek */
168 { &vop_remove_desc, spec_remove }, /* remove */
169 { &vop_link_desc, spec_link }, /* link */
170 { &vop_rename_desc, spec_rename }, /* rename */
171 { &vop_mkdir_desc, spec_mkdir }, /* mkdir */
172 { &vop_rmdir_desc, spec_rmdir }, /* rmdir */
173 { &vop_symlink_desc, spec_symlink }, /* symlink */
174 { &vop_readdir_desc, spec_readdir }, /* readdir */
175 { &vop_readlink_desc, spec_readlink }, /* readlink */
176 { &vop_abortop_desc, spec_abortop }, /* abortop */
177 { &vop_inactive_desc, nfs_inactive }, /* inactive */
178 { &vop_reclaim_desc, nfs_reclaim }, /* reclaim */
179 { &vop_lock_desc, nfs_lock }, /* lock */
180 { &vop_unlock_desc, nfs_unlock }, /* unlock */
181 { &vop_bmap_desc, spec_bmap }, /* bmap */
182 { &vop_strategy_desc, spec_strategy }, /* strategy */
183 { &vop_print_desc, nfs_print }, /* print */
184 { &vop_islocked_desc, nfs_islocked }, /* islocked */
185 { &vop_pathconf_desc, spec_pathconf }, /* pathconf */
186 { &vop_advlock_desc, spec_advlock }, /* advlock */
187 { &vop_bwrite_desc, spec_bwrite }, /* bwrite */
188 { &vop_getpages_desc, spec_getpages }, /* getpages */
189 { &vop_putpages_desc, spec_putpages }, /* putpages */
190 { NULL, NULL }
191 };
192 const struct vnodeopv_desc spec_nfsv2nodeop_opv_desc =
193 { &spec_nfsv2nodeop_p, spec_nfsv2nodeop_entries };
194
195 int (**fifo_nfsv2nodeop_p) __P((void *));
196 const struct vnodeopv_entry_desc fifo_nfsv2nodeop_entries[] = {
197 { &vop_default_desc, vn_default_error },
198 { &vop_lookup_desc, fifo_lookup }, /* lookup */
199 { &vop_create_desc, fifo_create }, /* create */
200 { &vop_mknod_desc, fifo_mknod }, /* mknod */
201 { &vop_open_desc, fifo_open }, /* open */
202 { &vop_close_desc, nfsfifo_close }, /* close */
203 { &vop_access_desc, nfsspec_access }, /* access */
204 { &vop_getattr_desc, nfs_getattr }, /* getattr */
205 { &vop_setattr_desc, nfs_setattr }, /* setattr */
206 { &vop_read_desc, nfsfifo_read }, /* read */
207 { &vop_write_desc, nfsfifo_write }, /* write */
208 { &vop_lease_desc, fifo_lease_check }, /* lease */
209 { &vop_fcntl_desc, genfs_fcntl }, /* fcntl */
210 { &vop_ioctl_desc, fifo_ioctl }, /* ioctl */
211 { &vop_poll_desc, fifo_poll }, /* poll */
212 { &vop_kqfilter_desc, fifo_kqfilter }, /* kqfilter */
213 { &vop_revoke_desc, fifo_revoke }, /* revoke */
214 { &vop_mmap_desc, fifo_mmap }, /* mmap */
215 { &vop_fsync_desc, nfs_fsync }, /* fsync */
216 { &vop_seek_desc, fifo_seek }, /* seek */
217 { &vop_remove_desc, fifo_remove }, /* remove */
218 { &vop_link_desc, fifo_link }, /* link */
219 { &vop_rename_desc, fifo_rename }, /* rename */
220 { &vop_mkdir_desc, fifo_mkdir }, /* mkdir */
221 { &vop_rmdir_desc, fifo_rmdir }, /* rmdir */
222 { &vop_symlink_desc, fifo_symlink }, /* symlink */
223 { &vop_readdir_desc, fifo_readdir }, /* readdir */
224 { &vop_readlink_desc, fifo_readlink }, /* readlink */
225 { &vop_abortop_desc, fifo_abortop }, /* abortop */
226 { &vop_inactive_desc, nfs_inactive }, /* inactive */
227 { &vop_reclaim_desc, nfs_reclaim }, /* reclaim */
228 { &vop_lock_desc, nfs_lock }, /* lock */
229 { &vop_unlock_desc, nfs_unlock }, /* unlock */
230 { &vop_bmap_desc, fifo_bmap }, /* bmap */
231 { &vop_strategy_desc, genfs_badop }, /* strategy */
232 { &vop_print_desc, nfs_print }, /* print */
233 { &vop_islocked_desc, nfs_islocked }, /* islocked */
234 { &vop_pathconf_desc, fifo_pathconf }, /* pathconf */
235 { &vop_advlock_desc, fifo_advlock }, /* advlock */
236 { &vop_bwrite_desc, genfs_badop }, /* bwrite */
237 { &vop_putpages_desc, fifo_putpages }, /* putpages */
238 { NULL, NULL }
239 };
240 const struct vnodeopv_desc fifo_nfsv2nodeop_opv_desc =
241 { &fifo_nfsv2nodeop_p, fifo_nfsv2nodeop_entries };
242
243 static int nfs_linkrpc(struct vnode *, struct vnode *, const char *,
244 size_t, kauth_cred_t, struct lwp *);
245 static void nfs_writerpc_extfree(struct mbuf *, void *, size_t, void *);
246
247 /*
248 * Global variables
249 */
250 extern u_int32_t nfs_true, nfs_false;
251 extern u_int32_t nfs_xdrneg1;
252 extern const nfstype nfsv3_type[9];
253
254 int nfs_numasync = 0;
255 #define DIRHDSIZ _DIRENT_NAMEOFF(dp)
256 #define UIO_ADVANCE(uio, siz) \
257 (void)((uio)->uio_resid -= (siz), \
258 (uio)->uio_iov->iov_base = (char *)(uio)->uio_iov->iov_base + (siz), \
259 (uio)->uio_iov->iov_len -= (siz))
260
261 static void nfs_cache_enter(struct vnode *, struct vnode *,
262 struct componentname *);
263
264 static void
265 nfs_cache_enter(struct vnode *dvp, struct vnode *vp,
266 struct componentname *cnp)
267 {
268 struct nfsnode *dnp = VTONFS(dvp);
269
270 if (vp != NULL) {
271 struct nfsnode *np = VTONFS(vp);
272
273 np->n_ctime = np->n_vattr->va_ctime.tv_sec;
274 }
275
276 if (!timespecisset(&dnp->n_nctime))
277 dnp->n_nctime = dnp->n_vattr->va_mtime;
278
279 cache_enter(dvp, vp, cnp);
280 }
281
282 /*
283 * nfs null call from vfs.
284 */
285 int
286 nfs_null(vp, cred, l)
287 struct vnode *vp;
288 kauth_cred_t cred;
289 struct lwp *l;
290 {
291 char *bpos, *dpos;
292 int error = 0;
293 struct mbuf *mreq, *mrep, *md, *mb;
294 struct nfsnode *np = VTONFS(vp);
295
296 nfsm_reqhead(np, NFSPROC_NULL, 0);
297 nfsm_request(np, NFSPROC_NULL, l, cred);
298 nfsm_reqdone;
299 return (error);
300 }
301
302 /*
303 * nfs access vnode op.
304 * For nfs version 2, just return ok. File accesses may fail later.
305 * For nfs version 3, use the access rpc to check accessibility. If file modes
306 * are changed on the server, accesses might still fail later.
307 */
308 int
309 nfs_access(v)
310 void *v;
311 {
312 struct vop_access_args /* {
313 struct vnode *a_vp;
314 int a_mode;
315 kauth_cred_t a_cred;
316 struct lwp *a_l;
317 } */ *ap = v;
318 struct vnode *vp = ap->a_vp;
319 #ifndef NFS_V2_ONLY
320 u_int32_t *tl;
321 char *cp;
322 int32_t t1, t2;
323 char *bpos, *dpos, *cp2;
324 int error = 0, attrflag;
325 struct mbuf *mreq, *mrep, *md, *mb;
326 u_int32_t mode, rmode;
327 const int v3 = NFS_ISV3(vp);
328 #endif
329 int cachevalid;
330 struct nfsnode *np = VTONFS(vp);
331 struct nfsmount *nmp = VFSTONFS(vp->v_mount);
332
333 cachevalid = (np->n_accstamp != -1 &&
334 (time_uptime - np->n_accstamp) < nfs_attrtimeo(nmp, np) &&
335 np->n_accuid == kauth_cred_geteuid(ap->a_cred));
336
337 /*
338 * Check access cache first. If this request has been made for this
339 * uid shortly before, use the cached result.
340 */
341 if (cachevalid) {
342 if (!np->n_accerror) {
343 if ((np->n_accmode & ap->a_mode) == ap->a_mode)
344 return np->n_accerror;
345 } else if ((np->n_accmode & ap->a_mode) == np->n_accmode)
346 return np->n_accerror;
347 }
348
349 #ifndef NFS_V2_ONLY
350 /*
351 * For nfs v3, do an access rpc, otherwise you are stuck emulating
352 * ufs_access() locally using the vattr. This may not be correct,
353 * since the server may apply other access criteria such as
354 * client uid-->server uid mapping that we do not know about, but
355 * this is better than just returning anything that is lying about
356 * in the cache.
357 */
358 if (v3) {
359 nfsstats.rpccnt[NFSPROC_ACCESS]++;
360 nfsm_reqhead(np, NFSPROC_ACCESS, NFSX_FH(v3) + NFSX_UNSIGNED);
361 nfsm_fhtom(np, v3);
362 nfsm_build(tl, u_int32_t *, NFSX_UNSIGNED);
363 if (ap->a_mode & VREAD)
364 mode = NFSV3ACCESS_READ;
365 else
366 mode = 0;
367 if (vp->v_type != VDIR) {
368 if (ap->a_mode & VWRITE)
369 mode |= (NFSV3ACCESS_MODIFY | NFSV3ACCESS_EXTEND);
370 if (ap->a_mode & VEXEC)
371 mode |= NFSV3ACCESS_EXECUTE;
372 } else {
373 if (ap->a_mode & VWRITE)
374 mode |= (NFSV3ACCESS_MODIFY | NFSV3ACCESS_EXTEND |
375 NFSV3ACCESS_DELETE);
376 if (ap->a_mode & VEXEC)
377 mode |= NFSV3ACCESS_LOOKUP;
378 }
379 *tl = txdr_unsigned(mode);
380 nfsm_request(np, NFSPROC_ACCESS, ap->a_l, ap->a_cred);
381 nfsm_postop_attr(vp, attrflag, 0);
382 if (!error) {
383 nfsm_dissect(tl, u_int32_t *, NFSX_UNSIGNED);
384 rmode = fxdr_unsigned(u_int32_t, *tl);
385 /*
386 * The NFS V3 spec does not clarify whether or not
387 * the returned access bits can be a superset of
388 * the ones requested, so...
389 */
390 if ((rmode & mode) != mode)
391 error = EACCES;
392 }
393 nfsm_reqdone;
394 } else
395 #endif
396 return (nfsspec_access(ap));
397 #ifndef NFS_V2_ONLY
398 /*
399 * Disallow write attempts on filesystems mounted read-only;
400 * unless the file is a socket, fifo, or a block or character
401 * device resident on the filesystem.
402 */
403 if (!error && (ap->a_mode & VWRITE) &&
404 (vp->v_mount->mnt_flag & MNT_RDONLY)) {
405 switch (vp->v_type) {
406 case VREG:
407 case VDIR:
408 case VLNK:
409 error = EROFS;
410 default:
411 break;
412 }
413 }
414
415 if (!error || error == EACCES) {
416 /*
417 * If we got the same result as for a previous,
418 * different request, OR it in. Don't update
419 * the timestamp in that case.
420 */
421 if (cachevalid && np->n_accstamp != -1 &&
422 error == np->n_accerror) {
423 if (!error)
424 np->n_accmode |= ap->a_mode;
425 else if ((np->n_accmode & ap->a_mode) == ap->a_mode)
426 np->n_accmode = ap->a_mode;
427 } else {
428 np->n_accstamp = time_uptime;
429 np->n_accuid = kauth_cred_geteuid(ap->a_cred);
430 np->n_accmode = ap->a_mode;
431 np->n_accerror = error;
432 }
433 }
434
435 return (error);
436 #endif
437 }
438
439 /*
440 * nfs open vnode op
441 * Check to see if the type is ok
442 * and that deletion is not in progress.
443 * For paged in text files, you will need to flush the page cache
444 * if consistency is lost.
445 */
446 /* ARGSUSED */
447 int
448 nfs_open(v)
449 void *v;
450 {
451 struct vop_open_args /* {
452 struct vnode *a_vp;
453 int a_mode;
454 kauth_cred_t a_cred;
455 struct lwp *a_l;
456 } */ *ap = v;
457 struct vnode *vp = ap->a_vp;
458 struct nfsnode *np = VTONFS(vp);
459 int error;
460
461 if (vp->v_type != VREG && vp->v_type != VDIR && vp->v_type != VLNK) {
462 return (EACCES);
463 }
464
465 if (ap->a_mode & FREAD) {
466 if (np->n_rcred != NULL)
467 kauth_cred_free(np->n_rcred);
468 np->n_rcred = ap->a_cred;
469 kauth_cred_hold(np->n_rcred);
470 }
471 if (ap->a_mode & FWRITE) {
472 if (np->n_wcred != NULL)
473 kauth_cred_free(np->n_wcred);
474 np->n_wcred = ap->a_cred;
475 kauth_cred_hold(np->n_wcred);
476 }
477
478 error = nfs_flushstalebuf(vp, ap->a_cred, ap->a_l, 0);
479 if (error)
480 return error;
481
482 NFS_INVALIDATE_ATTRCACHE(np); /* For Open/Close consistency */
483
484 return (0);
485 }
486
487 /*
488 * nfs close vnode op
489 * What an NFS client should do upon close after writing is a debatable issue.
490 * Most NFS clients push delayed writes to the server upon close, basically for
491 * two reasons:
492 * 1 - So that any write errors may be reported back to the client process
493 * doing the close system call. By far the two most likely errors are
494 * NFSERR_NOSPC and NFSERR_DQUOT to indicate space allocation failure.
495 * 2 - To put a worst case upper bound on cache inconsistency between
496 * multiple clients for the file.
497 * There is also a consistency problem for Version 2 of the protocol w.r.t.
498 * not being able to tell if other clients are writing a file concurrently,
499 * since there is no way of knowing if the changed modify time in the reply
500 * is only due to the write for this client.
501 * (NFS Version 3 provides weak cache consistency data in the reply that
502 * should be sufficient to detect and handle this case.)
503 *
504 * The current code does the following:
505 * for NFS Version 2 - play it safe and flush/invalidate all dirty buffers
506 * for NFS Version 3 - flush dirty buffers to the server but don't invalidate
507 * or commit them (this satisfies 1 and 2 except for the
508 * case where the server crashes after this close but
509 * before the commit RPC, which is felt to be "good
510 * enough". Changing the last argument to nfs_flush() to
511 * a 1 would force a commit operation, if it is felt a
512 * commit is necessary now.
513 */
514 /* ARGSUSED */
515 int
516 nfs_close(v)
517 void *v;
518 {
519 struct vop_close_args /* {
520 struct vnodeop_desc *a_desc;
521 struct vnode *a_vp;
522 int a_fflag;
523 kauth_cred_t a_cred;
524 struct lwp *a_l;
525 } */ *ap = v;
526 struct vnode *vp = ap->a_vp;
527 struct nfsnode *np = VTONFS(vp);
528 int error = 0;
529 UVMHIST_FUNC("nfs_close"); UVMHIST_CALLED(ubchist);
530
531 if (vp->v_type == VREG) {
532 if (np->n_flag & NMODIFIED) {
533 #ifndef NFS_V2_ONLY
534 if (NFS_ISV3(vp)) {
535 error = nfs_flush(vp, ap->a_cred, MNT_WAIT, ap->a_l, 0);
536 np->n_flag &= ~NMODIFIED;
537 } else
538 #endif
539 error = nfs_vinvalbuf(vp, V_SAVE, ap->a_cred, ap->a_l, 1);
540 NFS_INVALIDATE_ATTRCACHE(np);
541 }
542 if (np->n_flag & NWRITEERR) {
543 np->n_flag &= ~NWRITEERR;
544 error = np->n_error;
545 }
546 }
547 UVMHIST_LOG(ubchist, "returning %d", error,0,0,0);
548 return (error);
549 }
550
551 /*
552 * nfs getattr call from vfs.
553 */
554 int
555 nfs_getattr(v)
556 void *v;
557 {
558 struct vop_getattr_args /* {
559 struct vnode *a_vp;
560 struct vattr *a_vap;
561 kauth_cred_t a_cred;
562 struct lwp *a_l;
563 } */ *ap = v;
564 struct vnode *vp = ap->a_vp;
565 struct nfsnode *np = VTONFS(vp);
566 char *cp;
567 u_int32_t *tl;
568 int32_t t1, t2;
569 char *bpos, *dpos;
570 int error = 0;
571 struct mbuf *mreq, *mrep, *md, *mb;
572 const int v3 = NFS_ISV3(vp);
573
574 /*
575 * Update local times for special files.
576 */
577 if (np->n_flag & (NACC | NUPD))
578 np->n_flag |= NCHG;
579
580 /*
581 * if we have delayed truncation, do it now.
582 */
583 nfs_delayedtruncate(vp);
584
585 /*
586 * First look in the cache.
587 */
588 if (nfs_getattrcache(vp, ap->a_vap) == 0)
589 return (0);
590 nfsstats.rpccnt[NFSPROC_GETATTR]++;
591 nfsm_reqhead(np, NFSPROC_GETATTR, NFSX_FH(v3));
592 nfsm_fhtom(np, v3);
593 nfsm_request(np, NFSPROC_GETATTR, ap->a_l, ap->a_cred);
594 if (!error) {
595 nfsm_loadattr(vp, ap->a_vap, 0);
596 if (vp->v_type == VDIR &&
597 ap->a_vap->va_blocksize < NFS_DIRFRAGSIZ)
598 ap->a_vap->va_blocksize = NFS_DIRFRAGSIZ;
599 }
600 nfsm_reqdone;
601 return (error);
602 }
603
604 /*
605 * nfs setattr call.
606 */
607 int
608 nfs_setattr(v)
609 void *v;
610 {
611 struct vop_setattr_args /* {
612 struct vnodeop_desc *a_desc;
613 struct vnode *a_vp;
614 struct vattr *a_vap;
615 kauth_cred_t a_cred;
616 struct lwp *a_l;
617 } */ *ap = v;
618 struct vnode *vp = ap->a_vp;
619 struct nfsnode *np = VTONFS(vp);
620 struct vattr *vap = ap->a_vap;
621 int error = 0;
622 u_quad_t tsize = 0;
623
624 /*
625 * Setting of flags is not supported.
626 */
627 if (vap->va_flags != VNOVAL)
628 return (EOPNOTSUPP);
629
630 /*
631 * Disallow write attempts if the filesystem is mounted read-only.
632 */
633 if ((vap->va_uid != (uid_t)VNOVAL ||
634 vap->va_gid != (gid_t)VNOVAL || vap->va_atime.tv_sec != VNOVAL ||
635 vap->va_mtime.tv_sec != VNOVAL || vap->va_mode != (mode_t)VNOVAL) &&
636 (vp->v_mount->mnt_flag & MNT_RDONLY))
637 return (EROFS);
638 if (vap->va_size != VNOVAL) {
639 switch (vp->v_type) {
640 case VDIR:
641 return (EISDIR);
642 case VCHR:
643 case VBLK:
644 case VSOCK:
645 case VFIFO:
646 if (vap->va_mtime.tv_sec == VNOVAL &&
647 vap->va_atime.tv_sec == VNOVAL &&
648 vap->va_mode == (mode_t)VNOVAL &&
649 vap->va_uid == (uid_t)VNOVAL &&
650 vap->va_gid == (gid_t)VNOVAL)
651 return (0);
652 vap->va_size = VNOVAL;
653 break;
654 default:
655 /*
656 * Disallow write attempts if the filesystem is
657 * mounted read-only.
658 */
659 if (vp->v_mount->mnt_flag & MNT_RDONLY)
660 return (EROFS);
661 genfs_node_wrlock(vp);
662 uvm_vnp_setsize(vp, vap->va_size);
663 tsize = np->n_size;
664 np->n_size = vap->va_size;
665 if (vap->va_size == 0)
666 error = nfs_vinvalbuf(vp, 0,
667 ap->a_cred, ap->a_l, 1);
668 else
669 error = nfs_vinvalbuf(vp, V_SAVE,
670 ap->a_cred, ap->a_l, 1);
671 if (error) {
672 uvm_vnp_setsize(vp, tsize);
673 genfs_node_unlock(vp);
674 return (error);
675 }
676 np->n_vattr->va_size = vap->va_size;
677 }
678 } else {
679 /*
680 * flush files before setattr because a later write of
681 * cached data might change timestamps or reset sugid bits
682 */
683 if ((vap->va_mtime.tv_sec != VNOVAL ||
684 vap->va_atime.tv_sec != VNOVAL ||
685 vap->va_mode != VNOVAL) &&
686 vp->v_type == VREG &&
687 (error = nfs_vinvalbuf(vp, V_SAVE, ap->a_cred,
688 ap->a_l, 1)) == EINTR)
689 return (error);
690 }
691 error = nfs_setattrrpc(vp, vap, ap->a_cred, ap->a_l);
692 if (vap->va_size != VNOVAL) {
693 if (error) {
694 np->n_size = np->n_vattr->va_size = tsize;
695 uvm_vnp_setsize(vp, np->n_size);
696 }
697 genfs_node_unlock(vp);
698 }
699 VN_KNOTE(vp, NOTE_ATTRIB);
700 return (error);
701 }
702
703 /*
704 * Do an nfs setattr rpc.
705 */
706 int
707 nfs_setattrrpc(vp, vap, cred, l)
708 struct vnode *vp;
709 struct vattr *vap;
710 kauth_cred_t cred;
711 struct lwp *l;
712 {
713 struct nfsv2_sattr *sp;
714 char *cp;
715 int32_t t1, t2;
716 char *bpos, *dpos;
717 u_int32_t *tl;
718 int error = 0;
719 struct mbuf *mreq, *mrep, *md, *mb;
720 const int v3 = NFS_ISV3(vp);
721 struct nfsnode *np = VTONFS(vp);
722 #ifndef NFS_V2_ONLY
723 int wccflag = NFSV3_WCCRATTR;
724 char *cp2;
725 #endif
726
727 nfsstats.rpccnt[NFSPROC_SETATTR]++;
728 nfsm_reqhead(np, NFSPROC_SETATTR, NFSX_FH(v3) + NFSX_SATTR(v3));
729 nfsm_fhtom(np, v3);
730 #ifndef NFS_V2_ONLY
731 if (v3) {
732 nfsm_v3attrbuild(vap, true);
733 nfsm_build(tl, u_int32_t *, NFSX_UNSIGNED);
734 *tl = nfs_false;
735 } else {
736 #endif
737 nfsm_build(sp, struct nfsv2_sattr *, NFSX_V2SATTR);
738 if (vap->va_mode == (mode_t)VNOVAL)
739 sp->sa_mode = nfs_xdrneg1;
740 else
741 sp->sa_mode = vtonfsv2_mode(vp->v_type, vap->va_mode);
742 if (vap->va_uid == (uid_t)VNOVAL)
743 sp->sa_uid = nfs_xdrneg1;
744 else
745 sp->sa_uid = txdr_unsigned(vap->va_uid);
746 if (vap->va_gid == (gid_t)VNOVAL)
747 sp->sa_gid = nfs_xdrneg1;
748 else
749 sp->sa_gid = txdr_unsigned(vap->va_gid);
750 sp->sa_size = txdr_unsigned(vap->va_size);
751 txdr_nfsv2time(&vap->va_atime, &sp->sa_atime);
752 txdr_nfsv2time(&vap->va_mtime, &sp->sa_mtime);
753 #ifndef NFS_V2_ONLY
754 }
755 #endif
756 nfsm_request(np, NFSPROC_SETATTR, l, cred);
757 #ifndef NFS_V2_ONLY
758 if (v3) {
759 nfsm_wcc_data(vp, wccflag, NAC_NOTRUNC, false);
760 } else
761 #endif
762 nfsm_loadattr(vp, (struct vattr *)0, NAC_NOTRUNC);
763 nfsm_reqdone;
764 return (error);
765 }
766
767 /*
768 * nfs lookup call, one step at a time...
769 * First look in cache
770 * If not found, unlock the directory nfsnode and do the rpc
771 *
772 * This code is full of lock/unlock statements and checks, because
773 * we continue after cache_lookup has finished (we need to check
774 * with the attr cache and do an rpc if it has timed out). This means
775 * that the locking effects of cache_lookup have to be taken into
776 * account.
777 */
778 int
779 nfs_lookup(v)
780 void *v;
781 {
782 struct vop_lookup_args /* {
783 struct vnodeop_desc *a_desc;
784 struct vnode *a_dvp;
785 struct vnode **a_vpp;
786 struct componentname *a_cnp;
787 } */ *ap = v;
788 struct componentname *cnp = ap->a_cnp;
789 struct vnode *dvp = ap->a_dvp;
790 struct vnode **vpp = ap->a_vpp;
791 int flags;
792 struct vnode *newvp;
793 u_int32_t *tl;
794 char *cp;
795 int32_t t1, t2;
796 char *bpos, *dpos, *cp2;
797 struct mbuf *mreq, *mrep, *md, *mb;
798 long len;
799 nfsfh_t *fhp;
800 struct nfsnode *np;
801 int error = 0, attrflag, fhsize;
802 const int v3 = NFS_ISV3(dvp);
803
804 flags = cnp->cn_flags;
805
806 *vpp = NULLVP;
807 newvp = NULLVP;
808 if ((flags & ISLASTCN) && (dvp->v_mount->mnt_flag & MNT_RDONLY) &&
809 (cnp->cn_nameiop == DELETE || cnp->cn_nameiop == RENAME))
810 return (EROFS);
811 if (dvp->v_type != VDIR)
812 return (ENOTDIR);
813
814 /*
815 * RFC1813(nfsv3) 3.2 says clients should handle "." by themselves.
816 */
817 if (cnp->cn_namelen == 1 && cnp->cn_nameptr[0] == '.') {
818 error = VOP_ACCESS(dvp, VEXEC, cnp->cn_cred, cnp->cn_lwp);
819 if (error)
820 return error;
821 if (cnp->cn_nameiop == RENAME && (flags & ISLASTCN))
822 return EISDIR;
823 VREF(dvp);
824 *vpp = dvp;
825 if (cnp->cn_nameiop != LOOKUP && (flags & ISLASTCN))
826 cnp->cn_flags |= SAVENAME;
827 return 0;
828 }
829
830 np = VTONFS(dvp);
831
832 /*
833 * Before tediously performing a linear scan of the directory,
834 * check the name cache to see if the directory/name pair
835 * we are looking for is known already.
836 * If the directory/name pair is found in the name cache,
837 * we have to ensure the directory has not changed from
838 * the time the cache entry has been created. If it has,
839 * the cache entry has to be ignored.
840 */
841 error = cache_lookup_raw(dvp, vpp, cnp);
842 KASSERT(dvp != *vpp);
843 if (error >= 0) {
844 struct vattr vattr;
845 int err2;
846
847 if (error && error != ENOENT) {
848 *vpp = NULLVP;
849 return error;
850 }
851
852 err2 = VOP_ACCESS(dvp, VEXEC, cnp->cn_cred, cnp->cn_lwp);
853 if (err2 != 0) {
854 if (error == 0)
855 vrele(*vpp);
856 *vpp = NULLVP;
857 return err2;
858 }
859
860 if (VOP_GETATTR(dvp, &vattr, cnp->cn_cred,
861 cnp->cn_lwp) || timespeccmp(&vattr.va_mtime,
862 &VTONFS(dvp)->n_nctime, !=)) {
863 if (error == 0) {
864 vrele(*vpp);
865 *vpp = NULLVP;
866 }
867 cache_purge1(dvp, NULL, PURGE_CHILDREN);
868 timespecclear(&np->n_nctime);
869 goto dorpc;
870 }
871
872 if (error == ENOENT) {
873 goto noentry;
874 }
875
876 newvp = *vpp;
877 if (!VOP_GETATTR(newvp, &vattr, cnp->cn_cred, cnp->cn_lwp)
878 && vattr.va_ctime.tv_sec == VTONFS(newvp)->n_ctime) {
879 nfsstats.lookupcache_hits++;
880 if ((flags & ISDOTDOT) != 0) {
881 VOP_UNLOCK(dvp, 0);
882 }
883 error = vn_lock(newvp, LK_EXCLUSIVE);
884 if ((flags & ISDOTDOT) != 0) {
885 vn_lock(dvp, LK_EXCLUSIVE | LK_RETRY);
886 }
887 if (error) {
888 /* newvp has been revoked. */
889 vrele(newvp);
890 *vpp = NULL;
891 return error;
892 }
893 if (cnp->cn_nameiop != LOOKUP && (flags & ISLASTCN))
894 cnp->cn_flags |= SAVENAME;
895 KASSERT(newvp->v_type != VNON);
896 return (0);
897 }
898 cache_purge1(newvp, NULL, PURGE_PARENTS);
899 vrele(newvp);
900 *vpp = NULLVP;
901 }
902 dorpc:
903 #if 0
904 /*
905 * because nfsv3 has the same CREATE semantics as ours,
906 * we don't have to perform LOOKUPs beforehand.
907 *
908 * XXX ideally we can do the same for nfsv2 in the case of !O_EXCL.
909 * XXX although we have no way to know if O_EXCL is requested or not.
910 */
911
912 if (v3 && cnp->cn_nameiop == CREATE &&
913 (flags & (ISLASTCN|ISDOTDOT)) == ISLASTCN &&
914 (dvp->v_mount->mnt_flag & MNT_RDONLY) == 0) {
915 cnp->cn_flags |= SAVENAME;
916 return (EJUSTRETURN);
917 }
918 #endif /* 0 */
919
920 error = 0;
921 newvp = NULLVP;
922 nfsstats.lookupcache_misses++;
923 nfsstats.rpccnt[NFSPROC_LOOKUP]++;
924 len = cnp->cn_namelen;
925 nfsm_reqhead(np, NFSPROC_LOOKUP,
926 NFSX_FH(v3) + NFSX_UNSIGNED + nfsm_rndup(len));
927 nfsm_fhtom(np, v3);
928 nfsm_strtom(cnp->cn_nameptr, len, NFS_MAXNAMLEN);
929 nfsm_request(np, NFSPROC_LOOKUP, cnp->cn_lwp, cnp->cn_cred);
930 if (error) {
931 nfsm_postop_attr(dvp, attrflag, 0);
932 m_freem(mrep);
933 goto nfsmout;
934 }
935 nfsm_getfh(fhp, fhsize, v3);
936
937 /*
938 * Handle RENAME case...
939 */
940 if (cnp->cn_nameiop == RENAME && (flags & ISLASTCN)) {
941 if (NFS_CMPFH(np, fhp, fhsize)) {
942 m_freem(mrep);
943 return (EISDIR);
944 }
945 error = nfs_nget(dvp->v_mount, fhp, fhsize, &np);
946 if (error) {
947 m_freem(mrep);
948 return error;
949 }
950 newvp = NFSTOV(np);
951 #ifndef NFS_V2_ONLY
952 if (v3) {
953 nfsm_postop_attr(newvp, attrflag, 0);
954 nfsm_postop_attr(dvp, attrflag, 0);
955 } else
956 #endif
957 nfsm_loadattr(newvp, (struct vattr *)0, 0);
958 *vpp = newvp;
959 m_freem(mrep);
960 cnp->cn_flags |= SAVENAME;
961 goto validate;
962 }
963
964 /*
965 * The postop attr handling is duplicated for each if case,
966 * because it should be done while dvp is locked (unlocking
967 * dvp is different for each case).
968 */
969
970 if (NFS_CMPFH(np, fhp, fhsize)) {
971 /*
972 * "." lookup
973 */
974 VREF(dvp);
975 newvp = dvp;
976 #ifndef NFS_V2_ONLY
977 if (v3) {
978 nfsm_postop_attr(newvp, attrflag, 0);
979 nfsm_postop_attr(dvp, attrflag, 0);
980 } else
981 #endif
982 nfsm_loadattr(newvp, (struct vattr *)0, 0);
983 } else if (flags & ISDOTDOT) {
984 /*
985 * ".." lookup
986 */
987 VOP_UNLOCK(dvp, 0);
988 error = nfs_nget(dvp->v_mount, fhp, fhsize, &np);
989 vn_lock(dvp, LK_EXCLUSIVE | LK_RETRY);
990 if (error) {
991 m_freem(mrep);
992 return error;
993 }
994 newvp = NFSTOV(np);
995
996 #ifndef NFS_V2_ONLY
997 if (v3) {
998 nfsm_postop_attr(newvp, attrflag, 0);
999 nfsm_postop_attr(dvp, attrflag, 0);
1000 } else
1001 #endif
1002 nfsm_loadattr(newvp, (struct vattr *)0, 0);
1003 } else {
1004 /*
1005 * Other lookups.
1006 */
1007 error = nfs_nget(dvp->v_mount, fhp, fhsize, &np);
1008 if (error) {
1009 m_freem(mrep);
1010 return error;
1011 }
1012 newvp = NFSTOV(np);
1013 #ifndef NFS_V2_ONLY
1014 if (v3) {
1015 nfsm_postop_attr(newvp, attrflag, 0);
1016 nfsm_postop_attr(dvp, attrflag, 0);
1017 } else
1018 #endif
1019 nfsm_loadattr(newvp, (struct vattr *)0, 0);
1020 }
1021 if (cnp->cn_nameiop != LOOKUP && (flags & ISLASTCN))
1022 cnp->cn_flags |= SAVENAME;
1023 if ((cnp->cn_flags & MAKEENTRY) &&
1024 (cnp->cn_nameiop != DELETE || !(flags & ISLASTCN))) {
1025 nfs_cache_enter(dvp, newvp, cnp);
1026 }
1027 *vpp = newvp;
1028 nfsm_reqdone;
1029 if (error) {
1030 /*
1031 * We get here only because of errors returned by
1032 * the RPC. Otherwise we'll have returned above
1033 * (the nfsm_* macros will jump to nfsm_reqdone
1034 * on error).
1035 */
1036 if (error == ENOENT && (cnp->cn_flags & MAKEENTRY) &&
1037 cnp->cn_nameiop != CREATE) {
1038 nfs_cache_enter(dvp, NULL, cnp);
1039 }
1040 if (newvp != NULLVP) {
1041 if (newvp == dvp) {
1042 vrele(newvp);
1043 } else {
1044 vput(newvp);
1045 }
1046 }
1047 noentry:
1048 if ((cnp->cn_nameiop == CREATE || cnp->cn_nameiop == RENAME) &&
1049 (flags & ISLASTCN) && error == ENOENT) {
1050 if (dvp->v_mount->mnt_flag & MNT_RDONLY) {
1051 error = EROFS;
1052 } else {
1053 error = EJUSTRETURN;
1054 cnp->cn_flags |= SAVENAME;
1055 }
1056 }
1057 *vpp = NULL;
1058 return error;
1059 }
1060
1061 validate:
1062 /*
1063 * make sure we have valid type and size.
1064 */
1065
1066 newvp = *vpp;
1067 if (newvp->v_type == VNON) {
1068 struct vattr vattr; /* dummy */
1069
1070 KASSERT(VTONFS(newvp)->n_attrstamp == 0);
1071 error = VOP_GETATTR(newvp, &vattr, cnp->cn_cred, cnp->cn_lwp);
1072 if (error) {
1073 vput(newvp);
1074 *vpp = NULL;
1075 }
1076 }
1077
1078 return error;
1079 }
1080
1081 /*
1082 * nfs read call.
1083 * Just call nfs_bioread() to do the work.
1084 */
1085 int
1086 nfs_read(v)
1087 void *v;
1088 {
1089 struct vop_read_args /* {
1090 struct vnode *a_vp;
1091 struct uio *a_uio;
1092 int a_ioflag;
1093 kauth_cred_t a_cred;
1094 } */ *ap = v;
1095 struct vnode *vp = ap->a_vp;
1096
1097 if (vp->v_type != VREG)
1098 return EISDIR;
1099 return (nfs_bioread(vp, ap->a_uio, ap->a_ioflag, ap->a_cred, 0));
1100 }
1101
1102 /*
1103 * nfs readlink call
1104 */
1105 int
1106 nfs_readlink(v)
1107 void *v;
1108 {
1109 struct vop_readlink_args /* {
1110 struct vnode *a_vp;
1111 struct uio *a_uio;
1112 kauth_cred_t a_cred;
1113 } */ *ap = v;
1114 struct vnode *vp = ap->a_vp;
1115 struct nfsnode *np = VTONFS(vp);
1116
1117 if (vp->v_type != VLNK)
1118 return (EPERM);
1119
1120 if (np->n_rcred != NULL) {
1121 kauth_cred_free(np->n_rcred);
1122 }
1123 np->n_rcred = ap->a_cred;
1124 kauth_cred_hold(np->n_rcred);
1125
1126 return (nfs_bioread(vp, ap->a_uio, 0, ap->a_cred, 0));
1127 }
1128
1129 /*
1130 * Do a readlink rpc.
1131 * Called by nfs_doio() from below the buffer cache.
1132 */
1133 int
1134 nfs_readlinkrpc(vp, uiop, cred)
1135 struct vnode *vp;
1136 struct uio *uiop;
1137 kauth_cred_t cred;
1138 {
1139 u_int32_t *tl;
1140 char *cp;
1141 int32_t t1, t2;
1142 char *bpos, *dpos, *cp2;
1143 int error = 0;
1144 uint32_t len;
1145 struct mbuf *mreq, *mrep, *md, *mb;
1146 const int v3 = NFS_ISV3(vp);
1147 struct nfsnode *np = VTONFS(vp);
1148 #ifndef NFS_V2_ONLY
1149 int attrflag;
1150 #endif
1151
1152 nfsstats.rpccnt[NFSPROC_READLINK]++;
1153 nfsm_reqhead(np, NFSPROC_READLINK, NFSX_FH(v3));
1154 nfsm_fhtom(np, v3);
1155 nfsm_request(np, NFSPROC_READLINK, curlwp, cred);
1156 #ifndef NFS_V2_ONLY
1157 if (v3)
1158 nfsm_postop_attr(vp, attrflag, 0);
1159 #endif
1160 if (!error) {
1161 #ifndef NFS_V2_ONLY
1162 if (v3) {
1163 nfsm_dissect(tl, uint32_t *, NFSX_UNSIGNED);
1164 len = fxdr_unsigned(uint32_t, *tl);
1165 if (len > MAXPATHLEN) {
1166 /*
1167 * this pathname is too long for us.
1168 */
1169 m_freem(mrep);
1170 /* Solaris returns EINVAL. should we follow? */
1171 error = ENAMETOOLONG;
1172 goto nfsmout;
1173 }
1174 } else
1175 #endif
1176 {
1177 nfsm_strsiz(len, NFS_MAXPATHLEN);
1178 }
1179 nfsm_mtouio(uiop, len);
1180 }
1181 nfsm_reqdone;
1182 return (error);
1183 }
1184
1185 /*
1186 * nfs read rpc call
1187 * Ditto above
1188 */
1189 int
1190 nfs_readrpc(vp, uiop)
1191 struct vnode *vp;
1192 struct uio *uiop;
1193 {
1194 u_int32_t *tl;
1195 char *cp;
1196 int32_t t1, t2;
1197 char *bpos, *dpos, *cp2;
1198 struct mbuf *mreq, *mrep, *md, *mb;
1199 struct nfsmount *nmp;
1200 int error = 0, len, retlen, tsiz, eof, byte_count;
1201 const int v3 = NFS_ISV3(vp);
1202 struct nfsnode *np = VTONFS(vp);
1203 #ifndef NFS_V2_ONLY
1204 int attrflag;
1205 #endif
1206
1207 #ifndef nolint
1208 eof = 0;
1209 #endif
1210 nmp = VFSTONFS(vp->v_mount);
1211 tsiz = uiop->uio_resid;
1212 if (uiop->uio_offset + tsiz > nmp->nm_maxfilesize)
1213 return (EFBIG);
1214 iostat_busy(nmp->nm_stats);
1215 byte_count = 0; /* count bytes actually transferred */
1216 while (tsiz > 0) {
1217 nfsstats.rpccnt[NFSPROC_READ]++;
1218 len = (tsiz > nmp->nm_rsize) ? nmp->nm_rsize : tsiz;
1219 nfsm_reqhead(np, NFSPROC_READ, NFSX_FH(v3) + NFSX_UNSIGNED * 3);
1220 nfsm_fhtom(np, v3);
1221 nfsm_build(tl, u_int32_t *, NFSX_UNSIGNED * 3);
1222 #ifndef NFS_V2_ONLY
1223 if (v3) {
1224 txdr_hyper(uiop->uio_offset, tl);
1225 *(tl + 2) = txdr_unsigned(len);
1226 } else
1227 #endif
1228 {
1229 *tl++ = txdr_unsigned(uiop->uio_offset);
1230 *tl++ = txdr_unsigned(len);
1231 *tl = 0;
1232 }
1233 nfsm_request(np, NFSPROC_READ, curlwp, np->n_rcred);
1234 #ifndef NFS_V2_ONLY
1235 if (v3) {
1236 nfsm_postop_attr(vp, attrflag, NAC_NOTRUNC);
1237 if (error) {
1238 m_freem(mrep);
1239 goto nfsmout;
1240 }
1241 nfsm_dissect(tl, u_int32_t *, 2 * NFSX_UNSIGNED);
1242 eof = fxdr_unsigned(int, *(tl + 1));
1243 } else
1244 #endif
1245 nfsm_loadattr(vp, (struct vattr *)0, NAC_NOTRUNC);
1246 nfsm_strsiz(retlen, nmp->nm_rsize);
1247 nfsm_mtouio(uiop, retlen);
1248 m_freem(mrep);
1249 tsiz -= retlen;
1250 byte_count += retlen;
1251 #ifndef NFS_V2_ONLY
1252 if (v3) {
1253 if (eof || retlen == 0)
1254 tsiz = 0;
1255 } else
1256 #endif
1257 if (retlen < len)
1258 tsiz = 0;
1259 }
1260 nfsmout:
1261 iostat_unbusy(nmp->nm_stats, byte_count, 1);
1262 return (error);
1263 }
1264
1265 struct nfs_writerpc_context {
1266 kmutex_t nwc_lock;
1267 kcondvar_t nwc_cv;
1268 int nwc_mbufcount;
1269 };
1270
1271 /*
1272 * free mbuf used to refer protected pages while write rpc call.
1273 * called at splvm.
1274 */
1275 static void
1276 nfs_writerpc_extfree(struct mbuf *m, void *tbuf, size_t size, void *arg)
1277 {
1278 struct nfs_writerpc_context *ctx = arg;
1279
1280 KASSERT(m != NULL);
1281 KASSERT(ctx != NULL);
1282 pool_cache_put(&mbpool_cache, m);
1283 mutex_enter(&ctx->nwc_lock);
1284 if (--ctx->nwc_mbufcount == 0) {
1285 cv_signal(&ctx->nwc_cv);
1286 }
1287 mutex_exit(&ctx->nwc_lock);
1288 }
1289
1290 /*
1291 * nfs write call
1292 */
1293 int
1294 nfs_writerpc(vp, uiop, iomode, pageprotected, stalewriteverfp)
1295 struct vnode *vp;
1296 struct uio *uiop;
1297 int *iomode;
1298 bool pageprotected;
1299 bool *stalewriteverfp;
1300 {
1301 u_int32_t *tl;
1302 char *cp;
1303 int32_t t1, t2;
1304 char *bpos, *dpos;
1305 struct mbuf *mreq, *mrep, *md, *mb;
1306 struct nfsmount *nmp = VFSTONFS(vp->v_mount);
1307 int error = 0, len, tsiz, wccflag = NFSV3_WCCRATTR;
1308 const int v3 = NFS_ISV3(vp);
1309 int committed = NFSV3WRITE_FILESYNC;
1310 struct nfsnode *np = VTONFS(vp);
1311 struct nfs_writerpc_context ctx;
1312 int byte_count;
1313 struct lwp *l = NULL;
1314 size_t origresid;
1315 #ifndef NFS_V2_ONLY
1316 char *cp2;
1317 int rlen, commit;
1318 #endif
1319
1320 mutex_init(&ctx.nwc_lock, MUTEX_DRIVER, IPL_VM);
1321 cv_init(&ctx.nwc_cv, "nfsmblk");
1322 ctx.nwc_mbufcount = 1;
1323
1324 if (vp->v_mount->mnt_flag & MNT_RDONLY) {
1325 panic("writerpc readonly vp %p", vp);
1326 }
1327
1328 #ifdef DIAGNOSTIC
1329 if (uiop->uio_iovcnt != 1)
1330 panic("nfs: writerpc iovcnt > 1");
1331 #endif
1332 tsiz = uiop->uio_resid;
1333 if (uiop->uio_offset + tsiz > nmp->nm_maxfilesize)
1334 return (EFBIG);
1335 if (pageprotected) {
1336 l = curlwp;
1337 uvm_lwp_hold(l);
1338 }
1339 retry:
1340 origresid = uiop->uio_resid;
1341 KASSERT(origresid == uiop->uio_iov->iov_len);
1342 iostat_busy(nmp->nm_stats);
1343 byte_count = 0; /* count of bytes actually written */
1344 while (tsiz > 0) {
1345 uint32_t datalen; /* data bytes need to be allocated in mbuf */
1346 uint32_t backup;
1347 bool stalewriteverf = false;
1348
1349 nfsstats.rpccnt[NFSPROC_WRITE]++;
1350 len = min(tsiz, nmp->nm_wsize);
1351 datalen = pageprotected ? 0 : nfsm_rndup(len);
1352 nfsm_reqhead(np, NFSPROC_WRITE,
1353 NFSX_FH(v3) + 5 * NFSX_UNSIGNED + datalen);
1354 nfsm_fhtom(np, v3);
1355 #ifndef NFS_V2_ONLY
1356 if (v3) {
1357 nfsm_build(tl, u_int32_t *, 5 * NFSX_UNSIGNED);
1358 txdr_hyper(uiop->uio_offset, tl);
1359 tl += 2;
1360 *tl++ = txdr_unsigned(len);
1361 *tl++ = txdr_unsigned(*iomode);
1362 *tl = txdr_unsigned(len);
1363 } else
1364 #endif
1365 {
1366 u_int32_t x;
1367
1368 nfsm_build(tl, u_int32_t *, 4 * NFSX_UNSIGNED);
1369 /* Set both "begin" and "current" to non-garbage. */
1370 x = txdr_unsigned((u_int32_t)uiop->uio_offset);
1371 *tl++ = x; /* "begin offset" */
1372 *tl++ = x; /* "current offset" */
1373 x = txdr_unsigned(len);
1374 *tl++ = x; /* total to this offset */
1375 *tl = x; /* size of this write */
1376
1377 }
1378 if (pageprotected) {
1379 /*
1380 * since we know pages can't be modified during i/o,
1381 * no need to copy them for us.
1382 */
1383 struct mbuf *m;
1384 struct iovec *iovp = uiop->uio_iov;
1385
1386 m = m_get(M_WAIT, MT_DATA);
1387 MCLAIM(m, &nfs_mowner);
1388 MEXTADD(m, iovp->iov_base, len, M_MBUF,
1389 nfs_writerpc_extfree, &ctx);
1390 m->m_flags |= M_EXT_ROMAP;
1391 m->m_len = len;
1392 mb->m_next = m;
1393 /*
1394 * no need to maintain mb and bpos here
1395 * because no one care them later.
1396 */
1397 #if 0
1398 mb = m;
1399 bpos = mtod(void *, mb) + mb->m_len;
1400 #endif
1401 UIO_ADVANCE(uiop, len);
1402 uiop->uio_offset += len;
1403 mutex_enter(&ctx.nwc_lock);
1404 ctx.nwc_mbufcount++;
1405 mutex_exit(&ctx.nwc_lock);
1406 nfs_zeropad(mb, 0, nfsm_padlen(len));
1407 } else {
1408 nfsm_uiotom(uiop, len);
1409 }
1410 nfsm_request(np, NFSPROC_WRITE, curlwp, np->n_wcred);
1411 #ifndef NFS_V2_ONLY
1412 if (v3) {
1413 wccflag = NFSV3_WCCCHK;
1414 nfsm_wcc_data(vp, wccflag, NAC_NOTRUNC, !error);
1415 if (!error) {
1416 nfsm_dissect(tl, u_int32_t *, 2 * NFSX_UNSIGNED
1417 + NFSX_V3WRITEVERF);
1418 rlen = fxdr_unsigned(int, *tl++);
1419 if (rlen == 0) {
1420 error = NFSERR_IO;
1421 m_freem(mrep);
1422 break;
1423 } else if (rlen < len) {
1424 backup = len - rlen;
1425 UIO_ADVANCE(uiop, -backup);
1426 uiop->uio_offset -= backup;
1427 len = rlen;
1428 }
1429 commit = fxdr_unsigned(int, *tl++);
1430
1431 /*
1432 * Return the lowest committment level
1433 * obtained by any of the RPCs.
1434 */
1435 if (committed == NFSV3WRITE_FILESYNC)
1436 committed = commit;
1437 else if (committed == NFSV3WRITE_DATASYNC &&
1438 commit == NFSV3WRITE_UNSTABLE)
1439 committed = commit;
1440 mutex_enter(&nmp->nm_lock);
1441 if ((nmp->nm_iflag & NFSMNT_HASWRITEVERF) == 0){
1442 memcpy(nmp->nm_writeverf, tl,
1443 NFSX_V3WRITEVERF);
1444 nmp->nm_iflag |= NFSMNT_HASWRITEVERF;
1445 } else if ((nmp->nm_iflag &
1446 NFSMNT_STALEWRITEVERF) ||
1447 memcmp(tl, nmp->nm_writeverf,
1448 NFSX_V3WRITEVERF)) {
1449 memcpy(nmp->nm_writeverf, tl,
1450 NFSX_V3WRITEVERF);
1451 /*
1452 * note NFSMNT_STALEWRITEVERF
1453 * if we're the first thread to
1454 * notice it.
1455 */
1456 if ((nmp->nm_iflag &
1457 NFSMNT_STALEWRITEVERF) == 0) {
1458 stalewriteverf = true;
1459 nmp->nm_iflag |=
1460 NFSMNT_STALEWRITEVERF;
1461 }
1462 }
1463 mutex_exit(&nmp->nm_lock);
1464 }
1465 } else
1466 #endif
1467 nfsm_loadattr(vp, (struct vattr *)0, NAC_NOTRUNC);
1468 if (wccflag)
1469 VTONFS(vp)->n_mtime = VTONFS(vp)->n_vattr->va_mtime;
1470 m_freem(mrep);
1471 if (error)
1472 break;
1473 tsiz -= len;
1474 byte_count += len;
1475 if (stalewriteverf) {
1476 *stalewriteverfp = true;
1477 stalewriteverf = false;
1478 if (committed == NFSV3WRITE_UNSTABLE &&
1479 len != origresid) {
1480 /*
1481 * if our write requests weren't atomic but
1482 * unstable, datas in previous iterations
1483 * might have already been lost now.
1484 * then, we should resend them to nfsd.
1485 */
1486 backup = origresid - tsiz;
1487 UIO_ADVANCE(uiop, -backup);
1488 uiop->uio_offset -= backup;
1489 tsiz = origresid;
1490 goto retry;
1491 }
1492 }
1493 }
1494 nfsmout:
1495 iostat_unbusy(nmp->nm_stats, byte_count, 0);
1496 if (pageprotected) {
1497 /*
1498 * wait until mbufs go away.
1499 * retransmitted mbufs can survive longer than rpc requests
1500 * themselves.
1501 */
1502 mutex_enter(&ctx.nwc_lock);
1503 ctx.nwc_mbufcount--;
1504 while (ctx.nwc_mbufcount > 0) {
1505 cv_wait(&ctx.nwc_cv, &ctx.nwc_lock);
1506 }
1507 mutex_exit(&ctx.nwc_lock);
1508 uvm_lwp_rele(l);
1509 }
1510 mutex_destroy(&ctx.nwc_lock);
1511 cv_destroy(&ctx.nwc_cv);
1512 *iomode = committed;
1513 if (error)
1514 uiop->uio_resid = tsiz;
1515 return (error);
1516 }
1517
1518 /*
1519 * nfs mknod rpc
1520 * For NFS v2 this is a kludge. Use a create rpc but with the IFMT bits of the
1521 * mode set to specify the file type and the size field for rdev.
1522 */
1523 int
1524 nfs_mknodrpc(dvp, vpp, cnp, vap)
1525 struct vnode *dvp;
1526 struct vnode **vpp;
1527 struct componentname *cnp;
1528 struct vattr *vap;
1529 {
1530 struct nfsv2_sattr *sp;
1531 u_int32_t *tl;
1532 char *cp;
1533 int32_t t1, t2;
1534 struct vnode *newvp = (struct vnode *)0;
1535 struct nfsnode *dnp, *np;
1536 char *cp2;
1537 char *bpos, *dpos;
1538 int error = 0, wccflag = NFSV3_WCCRATTR, gotvp = 0;
1539 struct mbuf *mreq, *mrep, *md, *mb;
1540 u_int32_t rdev;
1541 const int v3 = NFS_ISV3(dvp);
1542
1543 if (vap->va_type == VCHR || vap->va_type == VBLK)
1544 rdev = txdr_unsigned(vap->va_rdev);
1545 else if (vap->va_type == VFIFO || vap->va_type == VSOCK)
1546 rdev = nfs_xdrneg1;
1547 else {
1548 VOP_ABORTOP(dvp, cnp);
1549 vput(dvp);
1550 return (EOPNOTSUPP);
1551 }
1552 nfsstats.rpccnt[NFSPROC_MKNOD]++;
1553 dnp = VTONFS(dvp);
1554 nfsm_reqhead(dnp, NFSPROC_MKNOD, NFSX_FH(v3) + 4 * NFSX_UNSIGNED +
1555 + nfsm_rndup(cnp->cn_namelen) + NFSX_SATTR(v3));
1556 nfsm_fhtom(dnp, v3);
1557 nfsm_strtom(cnp->cn_nameptr, cnp->cn_namelen, NFS_MAXNAMLEN);
1558 #ifndef NFS_V2_ONLY
1559 if (v3) {
1560 nfsm_build(tl, u_int32_t *, NFSX_UNSIGNED);
1561 *tl++ = vtonfsv3_type(vap->va_type);
1562 nfsm_v3attrbuild(vap, false);
1563 if (vap->va_type == VCHR || vap->va_type == VBLK) {
1564 nfsm_build(tl, u_int32_t *, 2 * NFSX_UNSIGNED);
1565 *tl++ = txdr_unsigned(major(vap->va_rdev));
1566 *tl = txdr_unsigned(minor(vap->va_rdev));
1567 }
1568 } else
1569 #endif
1570 {
1571 nfsm_build(sp, struct nfsv2_sattr *, NFSX_V2SATTR);
1572 sp->sa_mode = vtonfsv2_mode(vap->va_type, vap->va_mode);
1573 sp->sa_uid = nfs_xdrneg1;
1574 sp->sa_gid = nfs_xdrneg1;
1575 sp->sa_size = rdev;
1576 txdr_nfsv2time(&vap->va_atime, &sp->sa_atime);
1577 txdr_nfsv2time(&vap->va_mtime, &sp->sa_mtime);
1578 }
1579 nfsm_request(dnp, NFSPROC_MKNOD, cnp->cn_lwp, cnp->cn_cred);
1580 if (!error) {
1581 nfsm_mtofh(dvp, newvp, v3, gotvp);
1582 if (!gotvp) {
1583 error = nfs_lookitup(dvp, cnp->cn_nameptr,
1584 cnp->cn_namelen, cnp->cn_cred, cnp->cn_lwp, &np);
1585 if (!error)
1586 newvp = NFSTOV(np);
1587 }
1588 }
1589 #ifndef NFS_V2_ONLY
1590 if (v3)
1591 nfsm_wcc_data(dvp, wccflag, 0, !error);
1592 #endif
1593 nfsm_reqdone;
1594 if (error) {
1595 if (newvp)
1596 vput(newvp);
1597 } else {
1598 if (cnp->cn_flags & MAKEENTRY)
1599 nfs_cache_enter(dvp, newvp, cnp);
1600 *vpp = newvp;
1601 }
1602 PNBUF_PUT(cnp->cn_pnbuf);
1603 VTONFS(dvp)->n_flag |= NMODIFIED;
1604 if (!wccflag)
1605 NFS_INVALIDATE_ATTRCACHE(VTONFS(dvp));
1606 vput(dvp);
1607 return (error);
1608 }
1609
1610 /*
1611 * nfs mknod vop
1612 * just call nfs_mknodrpc() to do the work.
1613 */
1614 /* ARGSUSED */
1615 int
1616 nfs_mknod(v)
1617 void *v;
1618 {
1619 struct vop_mknod_args /* {
1620 struct vnode *a_dvp;
1621 struct vnode **a_vpp;
1622 struct componentname *a_cnp;
1623 struct vattr *a_vap;
1624 } */ *ap = v;
1625 struct vnode *dvp = ap->a_dvp;
1626 struct componentname *cnp = ap->a_cnp;
1627 int error;
1628
1629 error = nfs_mknodrpc(dvp, ap->a_vpp, cnp, ap->a_vap);
1630 VN_KNOTE(dvp, NOTE_WRITE);
1631 if (error == 0 || error == EEXIST)
1632 cache_purge1(dvp, cnp, 0);
1633 return (error);
1634 }
1635
1636 #ifndef NFS_V2_ONLY
1637 static u_long create_verf;
1638 #endif
1639 /*
1640 * nfs file create call
1641 */
1642 int
1643 nfs_create(v)
1644 void *v;
1645 {
1646 struct vop_create_args /* {
1647 struct vnode *a_dvp;
1648 struct vnode **a_vpp;
1649 struct componentname *a_cnp;
1650 struct vattr *a_vap;
1651 } */ *ap = v;
1652 struct vnode *dvp = ap->a_dvp;
1653 struct vattr *vap = ap->a_vap;
1654 struct componentname *cnp = ap->a_cnp;
1655 struct nfsv2_sattr *sp;
1656 u_int32_t *tl;
1657 char *cp;
1658 int32_t t1, t2;
1659 struct nfsnode *dnp, *np = (struct nfsnode *)0;
1660 struct vnode *newvp = (struct vnode *)0;
1661 char *bpos, *dpos, *cp2;
1662 int error, wccflag = NFSV3_WCCRATTR, gotvp = 0, fmode = 0;
1663 struct mbuf *mreq, *mrep, *md, *mb;
1664 const int v3 = NFS_ISV3(dvp);
1665
1666 /*
1667 * Oops, not for me..
1668 */
1669 if (vap->va_type == VSOCK)
1670 return (nfs_mknodrpc(dvp, ap->a_vpp, cnp, vap));
1671
1672 KASSERT(vap->va_type == VREG);
1673
1674 #ifdef VA_EXCLUSIVE
1675 if (vap->va_vaflags & VA_EXCLUSIVE)
1676 fmode |= O_EXCL;
1677 #endif
1678 again:
1679 error = 0;
1680 nfsstats.rpccnt[NFSPROC_CREATE]++;
1681 dnp = VTONFS(dvp);
1682 nfsm_reqhead(dnp, NFSPROC_CREATE, NFSX_FH(v3) + 2 * NFSX_UNSIGNED +
1683 nfsm_rndup(cnp->cn_namelen) + NFSX_SATTR(v3));
1684 nfsm_fhtom(dnp, v3);
1685 nfsm_strtom(cnp->cn_nameptr, cnp->cn_namelen, NFS_MAXNAMLEN);
1686 #ifndef NFS_V2_ONLY
1687 if (v3) {
1688 nfsm_build(tl, u_int32_t *, NFSX_UNSIGNED);
1689 if (fmode & O_EXCL) {
1690 *tl = txdr_unsigned(NFSV3CREATE_EXCLUSIVE);
1691 nfsm_build(tl, u_int32_t *, NFSX_V3CREATEVERF);
1692 #ifdef INET
1693 if (TAILQ_FIRST(&in_ifaddrhead))
1694 *tl++ = TAILQ_FIRST(&in_ifaddrhead)->
1695 ia_addr.sin_addr.s_addr;
1696 else
1697 *tl++ = create_verf;
1698 #else
1699 *tl++ = create_verf;
1700 #endif
1701 *tl = ++create_verf;
1702 } else {
1703 *tl = txdr_unsigned(NFSV3CREATE_UNCHECKED);
1704 nfsm_v3attrbuild(vap, false);
1705 }
1706 } else
1707 #endif
1708 {
1709 nfsm_build(sp, struct nfsv2_sattr *, NFSX_V2SATTR);
1710 sp->sa_mode = vtonfsv2_mode(vap->va_type, vap->va_mode);
1711 sp->sa_uid = nfs_xdrneg1;
1712 sp->sa_gid = nfs_xdrneg1;
1713 sp->sa_size = 0;
1714 txdr_nfsv2time(&vap->va_atime, &sp->sa_atime);
1715 txdr_nfsv2time(&vap->va_mtime, &sp->sa_mtime);
1716 }
1717 nfsm_request(dnp, NFSPROC_CREATE, cnp->cn_lwp, cnp->cn_cred);
1718 if (!error) {
1719 nfsm_mtofh(dvp, newvp, v3, gotvp);
1720 if (!gotvp) {
1721 error = nfs_lookitup(dvp, cnp->cn_nameptr,
1722 cnp->cn_namelen, cnp->cn_cred, cnp->cn_lwp, &np);
1723 if (!error)
1724 newvp = NFSTOV(np);
1725 }
1726 }
1727 #ifndef NFS_V2_ONLY
1728 if (v3)
1729 nfsm_wcc_data(dvp, wccflag, 0, !error);
1730 #endif
1731 nfsm_reqdone;
1732 if (error) {
1733 /*
1734 * nfs_request maps NFSERR_NOTSUPP to ENOTSUP.
1735 */
1736 if (v3 && (fmode & O_EXCL) && error == ENOTSUP) {
1737 fmode &= ~O_EXCL;
1738 goto again;
1739 }
1740 } else if (v3 && (fmode & O_EXCL)) {
1741 struct timespec ts;
1742
1743 getnanotime(&ts);
1744
1745 /*
1746 * make sure that we'll update timestamps as
1747 * most server implementations use them to store
1748 * the create verifier.
1749 *
1750 * XXX it's better to use TOSERVER always.
1751 */
1752
1753 if (vap->va_atime.tv_sec == VNOVAL)
1754 vap->va_atime = ts;
1755 if (vap->va_mtime.tv_sec == VNOVAL)
1756 vap->va_mtime = ts;
1757
1758 error = nfs_setattrrpc(newvp, vap, cnp->cn_cred, cnp->cn_lwp);
1759 }
1760 if (error == 0) {
1761 if (cnp->cn_flags & MAKEENTRY)
1762 nfs_cache_enter(dvp, newvp, cnp);
1763 else
1764 cache_purge1(dvp, cnp, 0);
1765 *ap->a_vpp = newvp;
1766 } else {
1767 if (newvp)
1768 vput(newvp);
1769 if (error == EEXIST)
1770 cache_purge1(dvp, cnp, 0);
1771 }
1772 PNBUF_PUT(cnp->cn_pnbuf);
1773 VTONFS(dvp)->n_flag |= NMODIFIED;
1774 if (!wccflag)
1775 NFS_INVALIDATE_ATTRCACHE(VTONFS(dvp));
1776 VN_KNOTE(ap->a_dvp, NOTE_WRITE);
1777 vput(dvp);
1778 return (error);
1779 }
1780
1781 /*
1782 * nfs file remove call
1783 * To try and make nfs semantics closer to ufs semantics, a file that has
1784 * other processes using the vnode is renamed instead of removed and then
1785 * removed later on the last close.
1786 * - If v_usecount > 1
1787 * If a rename is not already in the works
1788 * call nfs_sillyrename() to set it up
1789 * else
1790 * do the remove rpc
1791 */
1792 int
1793 nfs_remove(v)
1794 void *v;
1795 {
1796 struct vop_remove_args /* {
1797 struct vnodeop_desc *a_desc;
1798 struct vnode * a_dvp;
1799 struct vnode * a_vp;
1800 struct componentname * a_cnp;
1801 } */ *ap = v;
1802 struct vnode *vp = ap->a_vp;
1803 struct vnode *dvp = ap->a_dvp;
1804 struct componentname *cnp = ap->a_cnp;
1805 struct nfsnode *np = VTONFS(vp);
1806 int error = 0;
1807 struct vattr vattr;
1808
1809 #ifndef DIAGNOSTIC
1810 if ((cnp->cn_flags & HASBUF) == 0)
1811 panic("nfs_remove: no name");
1812 if (vp->v_usecount < 1)
1813 panic("nfs_remove: bad v_usecount");
1814 #endif
1815 if (vp->v_type == VDIR)
1816 error = EPERM;
1817 else if (vp->v_usecount == 1 || (np->n_sillyrename &&
1818 VOP_GETATTR(vp, &vattr, cnp->cn_cred, cnp->cn_lwp) == 0 &&
1819 vattr.va_nlink > 1)) {
1820 /*
1821 * Purge the name cache so that the chance of a lookup for
1822 * the name succeeding while the remove is in progress is
1823 * minimized. Without node locking it can still happen, such
1824 * that an I/O op returns ESTALE, but since you get this if
1825 * another host removes the file..
1826 */
1827 cache_purge(vp);
1828 /*
1829 * throw away biocache buffers, mainly to avoid
1830 * unnecessary delayed writes later.
1831 */
1832 error = nfs_vinvalbuf(vp, 0, cnp->cn_cred, cnp->cn_lwp, 1);
1833 /* Do the rpc */
1834 if (error != EINTR)
1835 error = nfs_removerpc(dvp, cnp->cn_nameptr,
1836 cnp->cn_namelen, cnp->cn_cred, cnp->cn_lwp);
1837 } else if (!np->n_sillyrename)
1838 error = nfs_sillyrename(dvp, vp, cnp, false);
1839 PNBUF_PUT(cnp->cn_pnbuf);
1840 if (!error && nfs_getattrcache(vp, &vattr) == 0 &&
1841 vattr.va_nlink == 1) {
1842 np->n_flag |= NREMOVED;
1843 }
1844 NFS_INVALIDATE_ATTRCACHE(np);
1845 VN_KNOTE(vp, NOTE_DELETE);
1846 VN_KNOTE(dvp, NOTE_WRITE);
1847 if (dvp == vp)
1848 vrele(vp);
1849 else
1850 vput(vp);
1851 vput(dvp);
1852 return (error);
1853 }
1854
1855 /*
1856 * nfs file remove rpc called from nfs_inactive
1857 */
1858 int
1859 nfs_removeit(sp)
1860 struct sillyrename *sp;
1861 {
1862
1863 return (nfs_removerpc(sp->s_dvp, sp->s_name, sp->s_namlen, sp->s_cred,
1864 (struct lwp *)0));
1865 }
1866
1867 /*
1868 * Nfs remove rpc, called from nfs_remove() and nfs_removeit().
1869 */
1870 int
1871 nfs_removerpc(dvp, name, namelen, cred, l)
1872 struct vnode *dvp;
1873 const char *name;
1874 int namelen;
1875 kauth_cred_t cred;
1876 struct lwp *l;
1877 {
1878 u_int32_t *tl;
1879 char *cp;
1880 #ifndef NFS_V2_ONLY
1881 int32_t t1;
1882 char *cp2;
1883 #endif
1884 int32_t t2;
1885 char *bpos, *dpos;
1886 int error = 0, wccflag = NFSV3_WCCRATTR;
1887 struct mbuf *mreq, *mrep, *md, *mb;
1888 const int v3 = NFS_ISV3(dvp);
1889 int rexmit = 0;
1890 struct nfsnode *dnp = VTONFS(dvp);
1891
1892 nfsstats.rpccnt[NFSPROC_REMOVE]++;
1893 nfsm_reqhead(dnp, NFSPROC_REMOVE,
1894 NFSX_FH(v3) + NFSX_UNSIGNED + nfsm_rndup(namelen));
1895 nfsm_fhtom(dnp, v3);
1896 nfsm_strtom(name, namelen, NFS_MAXNAMLEN);
1897 nfsm_request1(dnp, NFSPROC_REMOVE, l, cred, &rexmit);
1898 #ifndef NFS_V2_ONLY
1899 if (v3)
1900 nfsm_wcc_data(dvp, wccflag, 0, !error);
1901 #endif
1902 nfsm_reqdone;
1903 VTONFS(dvp)->n_flag |= NMODIFIED;
1904 if (!wccflag)
1905 NFS_INVALIDATE_ATTRCACHE(VTONFS(dvp));
1906 /*
1907 * Kludge City: If the first reply to the remove rpc is lost..
1908 * the reply to the retransmitted request will be ENOENT
1909 * since the file was in fact removed
1910 * Therefore, we cheat and return success.
1911 */
1912 if (rexmit && error == ENOENT)
1913 error = 0;
1914 return (error);
1915 }
1916
1917 /*
1918 * nfs file rename call
1919 */
1920 int
1921 nfs_rename(v)
1922 void *v;
1923 {
1924 struct vop_rename_args /* {
1925 struct vnode *a_fdvp;
1926 struct vnode *a_fvp;
1927 struct componentname *a_fcnp;
1928 struct vnode *a_tdvp;
1929 struct vnode *a_tvp;
1930 struct componentname *a_tcnp;
1931 } */ *ap = v;
1932 struct vnode *fvp = ap->a_fvp;
1933 struct vnode *tvp = ap->a_tvp;
1934 struct vnode *fdvp = ap->a_fdvp;
1935 struct vnode *tdvp = ap->a_tdvp;
1936 struct componentname *tcnp = ap->a_tcnp;
1937 struct componentname *fcnp = ap->a_fcnp;
1938 int error;
1939
1940 #ifndef DIAGNOSTIC
1941 if ((tcnp->cn_flags & HASBUF) == 0 ||
1942 (fcnp->cn_flags & HASBUF) == 0)
1943 panic("nfs_rename: no name");
1944 #endif
1945 /* Check for cross-device rename */
1946 if ((fvp->v_mount != tdvp->v_mount) ||
1947 (tvp && (fvp->v_mount != tvp->v_mount))) {
1948 error = EXDEV;
1949 goto out;
1950 }
1951
1952 /*
1953 * If the tvp exists and is in use, sillyrename it before doing the
1954 * rename of the new file over it.
1955 *
1956 * Have sillyrename use link instead of rename if possible,
1957 * so that we don't lose the file if the rename fails, and so
1958 * that there's no window when the "to" file doesn't exist.
1959 */
1960 if (tvp && tvp->v_usecount > 1 && !VTONFS(tvp)->n_sillyrename &&
1961 tvp->v_type != VDIR && !nfs_sillyrename(tdvp, tvp, tcnp, true)) {
1962 VN_KNOTE(tvp, NOTE_DELETE);
1963 vput(tvp);
1964 tvp = NULL;
1965 }
1966
1967 error = nfs_renamerpc(fdvp, fcnp->cn_nameptr, fcnp->cn_namelen,
1968 tdvp, tcnp->cn_nameptr, tcnp->cn_namelen, tcnp->cn_cred,
1969 tcnp->cn_lwp);
1970
1971 VN_KNOTE(fdvp, NOTE_WRITE);
1972 VN_KNOTE(tdvp, NOTE_WRITE);
1973 if (error == 0 || error == EEXIST) {
1974 if (fvp->v_type == VDIR)
1975 cache_purge(fvp);
1976 else
1977 cache_purge1(fdvp, fcnp, 0);
1978 if (tvp != NULL && tvp->v_type == VDIR)
1979 cache_purge(tvp);
1980 else
1981 cache_purge1(tdvp, tcnp, 0);
1982 }
1983 out:
1984 if (tdvp == tvp)
1985 vrele(tdvp);
1986 else
1987 vput(tdvp);
1988 if (tvp)
1989 vput(tvp);
1990 vrele(fdvp);
1991 vrele(fvp);
1992 return (error);
1993 }
1994
1995 /*
1996 * nfs file rename rpc called from nfs_remove() above
1997 */
1998 int
1999 nfs_renameit(sdvp, scnp, sp)
2000 struct vnode *sdvp;
2001 struct componentname *scnp;
2002 struct sillyrename *sp;
2003 {
2004 return (nfs_renamerpc(sdvp, scnp->cn_nameptr, scnp->cn_namelen,
2005 sdvp, sp->s_name, sp->s_namlen, scnp->cn_cred, scnp->cn_lwp));
2006 }
2007
2008 /*
2009 * Do an nfs rename rpc. Called from nfs_rename() and nfs_renameit().
2010 */
2011 int
2012 nfs_renamerpc(fdvp, fnameptr, fnamelen, tdvp, tnameptr, tnamelen, cred, l)
2013 struct vnode *fdvp;
2014 const char *fnameptr;
2015 int fnamelen;
2016 struct vnode *tdvp;
2017 const char *tnameptr;
2018 int tnamelen;
2019 kauth_cred_t cred;
2020 struct lwp *l;
2021 {
2022 u_int32_t *tl;
2023 char *cp;
2024 #ifndef NFS_V2_ONLY
2025 int32_t t1;
2026 char *cp2;
2027 #endif
2028 int32_t t2;
2029 char *bpos, *dpos;
2030 int error = 0, fwccflag = NFSV3_WCCRATTR, twccflag = NFSV3_WCCRATTR;
2031 struct mbuf *mreq, *mrep, *md, *mb;
2032 const int v3 = NFS_ISV3(fdvp);
2033 int rexmit = 0;
2034 struct nfsnode *fdnp = VTONFS(fdvp);
2035
2036 nfsstats.rpccnt[NFSPROC_RENAME]++;
2037 nfsm_reqhead(fdnp, NFSPROC_RENAME,
2038 (NFSX_FH(v3) + NFSX_UNSIGNED)*2 + nfsm_rndup(fnamelen) +
2039 nfsm_rndup(tnamelen));
2040 nfsm_fhtom(fdnp, v3);
2041 nfsm_strtom(fnameptr, fnamelen, NFS_MAXNAMLEN);
2042 nfsm_fhtom(VTONFS(tdvp), v3);
2043 nfsm_strtom(tnameptr, tnamelen, NFS_MAXNAMLEN);
2044 nfsm_request1(fdnp, NFSPROC_RENAME, l, cred, &rexmit);
2045 #ifndef NFS_V2_ONLY
2046 if (v3) {
2047 nfsm_wcc_data(fdvp, fwccflag, 0, !error);
2048 nfsm_wcc_data(tdvp, twccflag, 0, !error);
2049 }
2050 #endif
2051 nfsm_reqdone;
2052 VTONFS(fdvp)->n_flag |= NMODIFIED;
2053 VTONFS(tdvp)->n_flag |= NMODIFIED;
2054 if (!fwccflag)
2055 NFS_INVALIDATE_ATTRCACHE(VTONFS(fdvp));
2056 if (!twccflag)
2057 NFS_INVALIDATE_ATTRCACHE(VTONFS(tdvp));
2058 /*
2059 * Kludge: Map ENOENT => 0 assuming that it is a reply to a retry.
2060 */
2061 if (rexmit && error == ENOENT)
2062 error = 0;
2063 return (error);
2064 }
2065
2066 /*
2067 * NFS link RPC, called from nfs_link.
2068 * Assumes dvp and vp locked, and leaves them that way.
2069 */
2070
2071 static int
2072 nfs_linkrpc(struct vnode *dvp, struct vnode *vp, const char *name,
2073 size_t namelen, kauth_cred_t cred, struct lwp *l)
2074 {
2075 u_int32_t *tl;
2076 char *cp;
2077 #ifndef NFS_V2_ONLY
2078 int32_t t1;
2079 char *cp2;
2080 #endif
2081 int32_t t2;
2082 char *bpos, *dpos;
2083 int error = 0, wccflag = NFSV3_WCCRATTR, attrflag = 0;
2084 struct mbuf *mreq, *mrep, *md, *mb;
2085 const int v3 = NFS_ISV3(dvp);
2086 int rexmit = 0;
2087 struct nfsnode *np = VTONFS(vp);
2088
2089 nfsstats.rpccnt[NFSPROC_LINK]++;
2090 nfsm_reqhead(np, NFSPROC_LINK,
2091 NFSX_FH(v3)*2 + NFSX_UNSIGNED + nfsm_rndup(namelen));
2092 nfsm_fhtom(np, v3);
2093 nfsm_fhtom(VTONFS(dvp), v3);
2094 nfsm_strtom(name, namelen, NFS_MAXNAMLEN);
2095 nfsm_request1(np, NFSPROC_LINK, l, cred, &rexmit);
2096 #ifndef NFS_V2_ONLY
2097 if (v3) {
2098 nfsm_postop_attr(vp, attrflag, 0);
2099 nfsm_wcc_data(dvp, wccflag, 0, !error);
2100 }
2101 #endif
2102 nfsm_reqdone;
2103
2104 VTONFS(dvp)->n_flag |= NMODIFIED;
2105 if (!attrflag)
2106 NFS_INVALIDATE_ATTRCACHE(VTONFS(vp));
2107 if (!wccflag)
2108 NFS_INVALIDATE_ATTRCACHE(VTONFS(dvp));
2109
2110 /*
2111 * Kludge: Map EEXIST => 0 assuming that it is a reply to a retry.
2112 */
2113 if (rexmit && error == EEXIST)
2114 error = 0;
2115
2116 return error;
2117 }
2118
2119 /*
2120 * nfs hard link create call
2121 */
2122 int
2123 nfs_link(v)
2124 void *v;
2125 {
2126 struct vop_link_args /* {
2127 struct vnode *a_dvp;
2128 struct vnode *a_vp;
2129 struct componentname *a_cnp;
2130 } */ *ap = v;
2131 struct vnode *vp = ap->a_vp;
2132 struct vnode *dvp = ap->a_dvp;
2133 struct componentname *cnp = ap->a_cnp;
2134 int error = 0;
2135
2136 if (dvp->v_mount != vp->v_mount) {
2137 VOP_ABORTOP(dvp, cnp);
2138 vput(dvp);
2139 return (EXDEV);
2140 }
2141 if (dvp != vp) {
2142 error = vn_lock(vp, LK_EXCLUSIVE);
2143 if (error != 0) {
2144 VOP_ABORTOP(dvp, cnp);
2145 vput(dvp);
2146 return error;
2147 }
2148 }
2149
2150 /*
2151 * Push all writes to the server, so that the attribute cache
2152 * doesn't get "out of sync" with the server.
2153 * XXX There should be a better way!
2154 */
2155 VOP_FSYNC(vp, cnp->cn_cred, FSYNC_WAIT, 0, 0, cnp->cn_lwp);
2156
2157 error = nfs_linkrpc(dvp, vp, cnp->cn_nameptr, cnp->cn_namelen,
2158 cnp->cn_cred, cnp->cn_lwp);
2159
2160 if (error == 0)
2161 cache_purge1(dvp, cnp, 0);
2162 PNBUF_PUT(cnp->cn_pnbuf);
2163 if (dvp != vp)
2164 VOP_UNLOCK(vp, 0);
2165 VN_KNOTE(vp, NOTE_LINK);
2166 VN_KNOTE(dvp, NOTE_WRITE);
2167 vput(dvp);
2168 return (error);
2169 }
2170
2171 /*
2172 * nfs symbolic link create call
2173 */
2174 int
2175 nfs_symlink(v)
2176 void *v;
2177 {
2178 struct vop_symlink_args /* {
2179 struct vnode *a_dvp;
2180 struct vnode **a_vpp;
2181 struct componentname *a_cnp;
2182 struct vattr *a_vap;
2183 char *a_target;
2184 } */ *ap = v;
2185 struct vnode *dvp = ap->a_dvp;
2186 struct vattr *vap = ap->a_vap;
2187 struct componentname *cnp = ap->a_cnp;
2188 struct nfsv2_sattr *sp;
2189 u_int32_t *tl;
2190 char *cp;
2191 int32_t t1, t2;
2192 char *bpos, *dpos, *cp2;
2193 int slen, error = 0, wccflag = NFSV3_WCCRATTR, gotvp;
2194 struct mbuf *mreq, *mrep, *md, *mb;
2195 struct vnode *newvp = (struct vnode *)0;
2196 const int v3 = NFS_ISV3(dvp);
2197 int rexmit = 0;
2198 struct nfsnode *dnp = VTONFS(dvp);
2199
2200 *ap->a_vpp = NULL;
2201 nfsstats.rpccnt[NFSPROC_SYMLINK]++;
2202 slen = strlen(ap->a_target);
2203 nfsm_reqhead(dnp, NFSPROC_SYMLINK, NFSX_FH(v3) + 2*NFSX_UNSIGNED +
2204 nfsm_rndup(cnp->cn_namelen) + nfsm_rndup(slen) + NFSX_SATTR(v3));
2205 nfsm_fhtom(dnp, v3);
2206 nfsm_strtom(cnp->cn_nameptr, cnp->cn_namelen, NFS_MAXNAMLEN);
2207 #ifndef NFS_V2_ONlY
2208 if (v3)
2209 nfsm_v3attrbuild(vap, false);
2210 #endif
2211 nfsm_strtom(ap->a_target, slen, NFS_MAXPATHLEN);
2212 #ifndef NFS_V2_ONlY
2213 if (!v3) {
2214 nfsm_build(sp, struct nfsv2_sattr *, NFSX_V2SATTR);
2215 sp->sa_mode = vtonfsv2_mode(VLNK, vap->va_mode);
2216 sp->sa_uid = nfs_xdrneg1;
2217 sp->sa_gid = nfs_xdrneg1;
2218 sp->sa_size = nfs_xdrneg1;
2219 txdr_nfsv2time(&vap->va_atime, &sp->sa_atime);
2220 txdr_nfsv2time(&vap->va_mtime, &sp->sa_mtime);
2221 }
2222 #endif
2223 nfsm_request1(dnp, NFSPROC_SYMLINK, cnp->cn_lwp, cnp->cn_cred,
2224 &rexmit);
2225 #ifndef NFS_V2_ONlY
2226 if (v3) {
2227 if (!error)
2228 nfsm_mtofh(dvp, newvp, v3, gotvp);
2229 nfsm_wcc_data(dvp, wccflag, 0, !error);
2230 }
2231 #endif
2232 nfsm_reqdone;
2233 /*
2234 * Kludge: Map EEXIST => 0 assuming that it is a reply to a retry.
2235 */
2236 if (rexmit && error == EEXIST)
2237 error = 0;
2238 if (error == 0 || error == EEXIST)
2239 cache_purge1(dvp, cnp, 0);
2240 if (error == 0 && newvp == NULL) {
2241 struct nfsnode *np = NULL;
2242
2243 error = nfs_lookitup(dvp, cnp->cn_nameptr, cnp->cn_namelen,
2244 cnp->cn_cred, cnp->cn_lwp, &np);
2245 if (error == 0)
2246 newvp = NFSTOV(np);
2247 }
2248 if (error) {
2249 if (newvp != NULL)
2250 vput(newvp);
2251 } else {
2252 *ap->a_vpp = newvp;
2253 }
2254 PNBUF_PUT(cnp->cn_pnbuf);
2255 VTONFS(dvp)->n_flag |= NMODIFIED;
2256 if (!wccflag)
2257 NFS_INVALIDATE_ATTRCACHE(VTONFS(dvp));
2258 VN_KNOTE(dvp, NOTE_WRITE);
2259 vput(dvp);
2260 return (error);
2261 }
2262
2263 /*
2264 * nfs make dir call
2265 */
2266 int
2267 nfs_mkdir(v)
2268 void *v;
2269 {
2270 struct vop_mkdir_args /* {
2271 struct vnode *a_dvp;
2272 struct vnode **a_vpp;
2273 struct componentname *a_cnp;
2274 struct vattr *a_vap;
2275 } */ *ap = v;
2276 struct vnode *dvp = ap->a_dvp;
2277 struct vattr *vap = ap->a_vap;
2278 struct componentname *cnp = ap->a_cnp;
2279 struct nfsv2_sattr *sp;
2280 u_int32_t *tl;
2281 char *cp;
2282 int32_t t1, t2;
2283 int len;
2284 struct nfsnode *dnp = VTONFS(dvp), *np = (struct nfsnode *)0;
2285 struct vnode *newvp = (struct vnode *)0;
2286 char *bpos, *dpos, *cp2;
2287 int error = 0, wccflag = NFSV3_WCCRATTR;
2288 int gotvp = 0;
2289 int rexmit = 0;
2290 struct mbuf *mreq, *mrep, *md, *mb;
2291 const int v3 = NFS_ISV3(dvp);
2292
2293 len = cnp->cn_namelen;
2294 nfsstats.rpccnt[NFSPROC_MKDIR]++;
2295 nfsm_reqhead(dnp, NFSPROC_MKDIR,
2296 NFSX_FH(v3) + NFSX_UNSIGNED + nfsm_rndup(len) + NFSX_SATTR(v3));
2297 nfsm_fhtom(dnp, v3);
2298 nfsm_strtom(cnp->cn_nameptr, len, NFS_MAXNAMLEN);
2299 #ifndef NFS_V2_ONLY
2300 if (v3) {
2301 nfsm_v3attrbuild(vap, false);
2302 } else
2303 #endif
2304 {
2305 nfsm_build(sp, struct nfsv2_sattr *, NFSX_V2SATTR);
2306 sp->sa_mode = vtonfsv2_mode(VDIR, vap->va_mode);
2307 sp->sa_uid = nfs_xdrneg1;
2308 sp->sa_gid = nfs_xdrneg1;
2309 sp->sa_size = nfs_xdrneg1;
2310 txdr_nfsv2time(&vap->va_atime, &sp->sa_atime);
2311 txdr_nfsv2time(&vap->va_mtime, &sp->sa_mtime);
2312 }
2313 nfsm_request1(dnp, NFSPROC_MKDIR, cnp->cn_lwp, cnp->cn_cred, &rexmit);
2314 if (!error)
2315 nfsm_mtofh(dvp, newvp, v3, gotvp);
2316 if (v3)
2317 nfsm_wcc_data(dvp, wccflag, 0, !error);
2318 nfsm_reqdone;
2319 VTONFS(dvp)->n_flag |= NMODIFIED;
2320 if (!wccflag)
2321 NFS_INVALIDATE_ATTRCACHE(VTONFS(dvp));
2322 /*
2323 * Kludge: Map EEXIST => 0 assuming that you have a reply to a retry
2324 * if we can succeed in looking up the directory.
2325 */
2326 if ((rexmit && error == EEXIST) || (!error && !gotvp)) {
2327 if (newvp) {
2328 vput(newvp);
2329 newvp = (struct vnode *)0;
2330 }
2331 error = nfs_lookitup(dvp, cnp->cn_nameptr, len, cnp->cn_cred,
2332 cnp->cn_lwp, &np);
2333 if (!error) {
2334 newvp = NFSTOV(np);
2335 if (newvp->v_type != VDIR || newvp == dvp)
2336 error = EEXIST;
2337 }
2338 }
2339 if (error) {
2340 if (newvp) {
2341 if (dvp != newvp)
2342 vput(newvp);
2343 else
2344 vrele(newvp);
2345 }
2346 } else {
2347 VN_KNOTE(dvp, NOTE_WRITE | NOTE_LINK);
2348 if (cnp->cn_flags & MAKEENTRY)
2349 nfs_cache_enter(dvp, newvp, cnp);
2350 *ap->a_vpp = newvp;
2351 }
2352 PNBUF_PUT(cnp->cn_pnbuf);
2353 vput(dvp);
2354 return (error);
2355 }
2356
2357 /*
2358 * nfs remove directory call
2359 */
2360 int
2361 nfs_rmdir(v)
2362 void *v;
2363 {
2364 struct vop_rmdir_args /* {
2365 struct vnode *a_dvp;
2366 struct vnode *a_vp;
2367 struct componentname *a_cnp;
2368 } */ *ap = v;
2369 struct vnode *vp = ap->a_vp;
2370 struct vnode *dvp = ap->a_dvp;
2371 struct componentname *cnp = ap->a_cnp;
2372 u_int32_t *tl;
2373 char *cp;
2374 #ifndef NFS_V2_ONLY
2375 int32_t t1;
2376 char *cp2;
2377 #endif
2378 int32_t t2;
2379 char *bpos, *dpos;
2380 int error = 0, wccflag = NFSV3_WCCRATTR;
2381 int rexmit = 0;
2382 struct mbuf *mreq, *mrep, *md, *mb;
2383 const int v3 = NFS_ISV3(dvp);
2384 struct nfsnode *dnp;
2385
2386 if (dvp == vp) {
2387 vrele(dvp);
2388 vput(dvp);
2389 PNBUF_PUT(cnp->cn_pnbuf);
2390 return (EINVAL);
2391 }
2392 nfsstats.rpccnt[NFSPROC_RMDIR]++;
2393 dnp = VTONFS(dvp);
2394 nfsm_reqhead(dnp, NFSPROC_RMDIR,
2395 NFSX_FH(v3) + NFSX_UNSIGNED + nfsm_rndup(cnp->cn_namelen));
2396 nfsm_fhtom(dnp, v3);
2397 nfsm_strtom(cnp->cn_nameptr, cnp->cn_namelen, NFS_MAXNAMLEN);
2398 nfsm_request1(dnp, NFSPROC_RMDIR, cnp->cn_lwp, cnp->cn_cred, &rexmit);
2399 #ifndef NFS_V2_ONLY
2400 if (v3)
2401 nfsm_wcc_data(dvp, wccflag, 0, !error);
2402 #endif
2403 nfsm_reqdone;
2404 PNBUF_PUT(cnp->cn_pnbuf);
2405 VTONFS(dvp)->n_flag |= NMODIFIED;
2406 if (!wccflag)
2407 NFS_INVALIDATE_ATTRCACHE(VTONFS(dvp));
2408 VN_KNOTE(dvp, NOTE_WRITE | NOTE_LINK);
2409 VN_KNOTE(vp, NOTE_DELETE);
2410 cache_purge(vp);
2411 vput(vp);
2412 vput(dvp);
2413 /*
2414 * Kludge: Map ENOENT => 0 assuming that you have a reply to a retry.
2415 */
2416 if (rexmit && error == ENOENT)
2417 error = 0;
2418 return (error);
2419 }
2420
2421 /*
2422 * nfs readdir call
2423 */
2424 int
2425 nfs_readdir(v)
2426 void *v;
2427 {
2428 struct vop_readdir_args /* {
2429 struct vnode *a_vp;
2430 struct uio *a_uio;
2431 kauth_cred_t a_cred;
2432 int *a_eofflag;
2433 off_t **a_cookies;
2434 int *a_ncookies;
2435 } */ *ap = v;
2436 struct vnode *vp = ap->a_vp;
2437 struct uio *uio = ap->a_uio;
2438 struct nfsmount *nmp = VFSTONFS(vp->v_mount);
2439 char *base = uio->uio_iov->iov_base;
2440 int tresid, error;
2441 size_t count, lost;
2442 struct dirent *dp;
2443 off_t *cookies = NULL;
2444 int ncookies = 0, nc;
2445
2446 if (vp->v_type != VDIR)
2447 return (EPERM);
2448
2449 lost = uio->uio_resid & (NFS_DIRFRAGSIZ - 1);
2450 count = uio->uio_resid - lost;
2451 if (count <= 0)
2452 return (EINVAL);
2453
2454 /*
2455 * Call nfs_bioread() to do the real work.
2456 */
2457 tresid = uio->uio_resid = count;
2458 error = nfs_bioread(vp, uio, 0, ap->a_cred,
2459 ap->a_cookies ? NFSBIO_CACHECOOKIES : 0);
2460
2461 if (!error && ap->a_cookies) {
2462 ncookies = count / 16;
2463 cookies = malloc(sizeof (off_t) * ncookies, M_TEMP, M_WAITOK);
2464 *ap->a_cookies = cookies;
2465 }
2466
2467 if (!error && uio->uio_resid == tresid) {
2468 uio->uio_resid += lost;
2469 nfsstats.direofcache_misses++;
2470 if (ap->a_cookies)
2471 *ap->a_ncookies = 0;
2472 *ap->a_eofflag = 1;
2473 return (0);
2474 }
2475
2476 if (!error && ap->a_cookies) {
2477 /*
2478 * Only the NFS server and emulations use cookies, and they
2479 * load the directory block into system space, so we can
2480 * just look at it directly.
2481 */
2482 if (!VMSPACE_IS_KERNEL_P(uio->uio_vmspace) ||
2483 uio->uio_iovcnt != 1)
2484 panic("nfs_readdir: lost in space");
2485 for (nc = 0; ncookies-- &&
2486 base < (char *)uio->uio_iov->iov_base; nc++){
2487 dp = (struct dirent *) base;
2488 if (dp->d_reclen == 0)
2489 break;
2490 if (nmp->nm_flag & NFSMNT_XLATECOOKIE)
2491 *(cookies++) = (off_t)NFS_GETCOOKIE32(dp);
2492 else
2493 *(cookies++) = NFS_GETCOOKIE(dp);
2494 base += dp->d_reclen;
2495 }
2496 uio->uio_resid +=
2497 ((char *)uio->uio_iov->iov_base - base);
2498 uio->uio_iov->iov_len +=
2499 ((char *)uio->uio_iov->iov_base - base);
2500 uio->uio_iov->iov_base = base;
2501 *ap->a_ncookies = nc;
2502 }
2503
2504 uio->uio_resid += lost;
2505 *ap->a_eofflag = 0;
2506 return (error);
2507 }
2508
2509 /*
2510 * Readdir rpc call.
2511 * Called from below the buffer cache by nfs_doio().
2512 */
2513 int
2514 nfs_readdirrpc(vp, uiop, cred)
2515 struct vnode *vp;
2516 struct uio *uiop;
2517 kauth_cred_t cred;
2518 {
2519 int len, left;
2520 struct dirent *dp = NULL;
2521 u_int32_t *tl;
2522 char *cp;
2523 int32_t t1, t2;
2524 char *bpos, *dpos, *cp2;
2525 struct mbuf *mreq, *mrep, *md, *mb;
2526 struct nfsmount *nmp = VFSTONFS(vp->v_mount);
2527 struct nfsnode *dnp = VTONFS(vp);
2528 u_quad_t fileno;
2529 int error = 0, more_dirs = 1, blksiz = 0, bigenough = 1;
2530 #ifndef NFS_V2_ONLY
2531 int attrflag;
2532 #endif
2533 int nrpcs = 0, reclen;
2534 const int v3 = NFS_ISV3(vp);
2535
2536 #ifdef DIAGNOSTIC
2537 /*
2538 * Should be called from buffer cache, so only amount of
2539 * NFS_DIRBLKSIZ will be requested.
2540 */
2541 if (uiop->uio_iovcnt != 1 || uiop->uio_resid != NFS_DIRBLKSIZ)
2542 panic("nfs readdirrpc bad uio");
2543 #endif
2544
2545 /*
2546 * Loop around doing readdir rpc's of size nm_readdirsize
2547 * truncated to a multiple of NFS_DIRFRAGSIZ.
2548 * The stopping criteria is EOF or buffer full.
2549 */
2550 while (more_dirs && bigenough) {
2551 /*
2552 * Heuristic: don't bother to do another RPC to further
2553 * fill up this block if there is not much room left. (< 50%
2554 * of the readdir RPC size). This wastes some buffer space
2555 * but can save up to 50% in RPC calls.
2556 */
2557 if (nrpcs > 0 && uiop->uio_resid < (nmp->nm_readdirsize / 2)) {
2558 bigenough = 0;
2559 break;
2560 }
2561 nfsstats.rpccnt[NFSPROC_READDIR]++;
2562 nfsm_reqhead(dnp, NFSPROC_READDIR, NFSX_FH(v3) +
2563 NFSX_READDIR(v3));
2564 nfsm_fhtom(dnp, v3);
2565 #ifndef NFS_V2_ONLY
2566 if (v3) {
2567 nfsm_build(tl, u_int32_t *, 5 * NFSX_UNSIGNED);
2568 if (nmp->nm_iflag & NFSMNT_SWAPCOOKIE) {
2569 txdr_swapcookie3(uiop->uio_offset, tl);
2570 } else {
2571 txdr_cookie3(uiop->uio_offset, tl);
2572 }
2573 tl += 2;
2574 *tl++ = dnp->n_cookieverf.nfsuquad[0];
2575 *tl++ = dnp->n_cookieverf.nfsuquad[1];
2576 } else
2577 #endif
2578 {
2579 nfsm_build(tl, u_int32_t *, 2 * NFSX_UNSIGNED);
2580 *tl++ = txdr_unsigned(uiop->uio_offset);
2581 }
2582 *tl = txdr_unsigned(nmp->nm_readdirsize);
2583 nfsm_request(dnp, NFSPROC_READDIR, curlwp, cred);
2584 nrpcs++;
2585 #ifndef NFS_V2_ONLY
2586 if (v3) {
2587 nfsm_postop_attr(vp, attrflag, 0);
2588 if (!error) {
2589 nfsm_dissect(tl, u_int32_t *,
2590 2 * NFSX_UNSIGNED);
2591 dnp->n_cookieverf.nfsuquad[0] = *tl++;
2592 dnp->n_cookieverf.nfsuquad[1] = *tl;
2593 } else {
2594 m_freem(mrep);
2595 goto nfsmout;
2596 }
2597 }
2598 #endif
2599 nfsm_dissect(tl, u_int32_t *, NFSX_UNSIGNED);
2600 more_dirs = fxdr_unsigned(int, *tl);
2601
2602 /* loop thru the dir entries, doctoring them to 4bsd form */
2603 while (more_dirs && bigenough) {
2604 #ifndef NFS_V2_ONLY
2605 if (v3) {
2606 nfsm_dissect(tl, u_int32_t *,
2607 3 * NFSX_UNSIGNED);
2608 fileno = fxdr_hyper(tl);
2609 len = fxdr_unsigned(int, *(tl + 2));
2610 } else
2611 #endif
2612 {
2613 nfsm_dissect(tl, u_int32_t *,
2614 2 * NFSX_UNSIGNED);
2615 fileno = fxdr_unsigned(u_quad_t, *tl++);
2616 len = fxdr_unsigned(int, *tl);
2617 }
2618 if (len <= 0 || len > NFS_MAXNAMLEN) {
2619 error = EBADRPC;
2620 m_freem(mrep);
2621 goto nfsmout;
2622 }
2623 /* for cookie stashing */
2624 reclen = _DIRENT_RECLEN(dp, len) + 2 * sizeof(off_t);
2625 left = NFS_DIRFRAGSIZ - blksiz;
2626 if (reclen > left) {
2627 memset(uiop->uio_iov->iov_base, 0, left);
2628 dp->d_reclen += left;
2629 UIO_ADVANCE(uiop, left);
2630 blksiz = 0;
2631 NFS_STASHCOOKIE(dp, uiop->uio_offset);
2632 }
2633 if (reclen > uiop->uio_resid)
2634 bigenough = 0;
2635 if (bigenough) {
2636 int tlen;
2637
2638 dp = (struct dirent *)uiop->uio_iov->iov_base;
2639 dp->d_fileno = fileno;
2640 dp->d_namlen = len;
2641 dp->d_reclen = reclen;
2642 dp->d_type = DT_UNKNOWN;
2643 blksiz += reclen;
2644 if (blksiz == NFS_DIRFRAGSIZ)
2645 blksiz = 0;
2646 UIO_ADVANCE(uiop, DIRHDSIZ);
2647 nfsm_mtouio(uiop, len);
2648 tlen = reclen - (DIRHDSIZ + len);
2649 (void)memset(uiop->uio_iov->iov_base, 0, tlen);
2650 UIO_ADVANCE(uiop, tlen);
2651 } else
2652 nfsm_adv(nfsm_rndup(len));
2653 #ifndef NFS_V2_ONLY
2654 if (v3) {
2655 nfsm_dissect(tl, u_int32_t *,
2656 3 * NFSX_UNSIGNED);
2657 } else
2658 #endif
2659 {
2660 nfsm_dissect(tl, u_int32_t *,
2661 2 * NFSX_UNSIGNED);
2662 }
2663 if (bigenough) {
2664 #ifndef NFS_V2_ONLY
2665 if (v3) {
2666 if (nmp->nm_iflag & NFSMNT_SWAPCOOKIE)
2667 uiop->uio_offset =
2668 fxdr_swapcookie3(tl);
2669 else
2670 uiop->uio_offset =
2671 fxdr_cookie3(tl);
2672 }
2673 else
2674 #endif
2675 {
2676 uiop->uio_offset =
2677 fxdr_unsigned(off_t, *tl);
2678 }
2679 NFS_STASHCOOKIE(dp, uiop->uio_offset);
2680 }
2681 if (v3)
2682 tl += 2;
2683 else
2684 tl++;
2685 more_dirs = fxdr_unsigned(int, *tl);
2686 }
2687 /*
2688 * If at end of rpc data, get the eof boolean
2689 */
2690 if (!more_dirs) {
2691 nfsm_dissect(tl, u_int32_t *, NFSX_UNSIGNED);
2692 more_dirs = (fxdr_unsigned(int, *tl) == 0);
2693
2694 /*
2695 * kludge: if we got no entries, treat it as EOF.
2696 * some server sometimes send a reply without any
2697 * entries or EOF.
2698 * although it might mean the server has very long name,
2699 * we can't handle such entries anyway.
2700 */
2701
2702 if (uiop->uio_resid >= NFS_DIRBLKSIZ)
2703 more_dirs = 0;
2704 }
2705 m_freem(mrep);
2706 }
2707 /*
2708 * Fill last record, iff any, out to a multiple of NFS_DIRFRAGSIZ
2709 * by increasing d_reclen for the last record.
2710 */
2711 if (blksiz > 0) {
2712 left = NFS_DIRFRAGSIZ - blksiz;
2713 memset(uiop->uio_iov->iov_base, 0, left);
2714 dp->d_reclen += left;
2715 NFS_STASHCOOKIE(dp, uiop->uio_offset);
2716 UIO_ADVANCE(uiop, left);
2717 }
2718
2719 /*
2720 * We are now either at the end of the directory or have filled the
2721 * block.
2722 */
2723 if (bigenough) {
2724 dnp->n_direofoffset = uiop->uio_offset;
2725 dnp->n_flag |= NEOFVALID;
2726 }
2727 nfsmout:
2728 return (error);
2729 }
2730
2731 #ifndef NFS_V2_ONLY
2732 /*
2733 * NFS V3 readdir plus RPC. Used in place of nfs_readdirrpc().
2734 */
2735 int
2736 nfs_readdirplusrpc(vp, uiop, cred)
2737 struct vnode *vp;
2738 struct uio *uiop;
2739 kauth_cred_t cred;
2740 {
2741 int len, left;
2742 struct dirent *dp = NULL;
2743 u_int32_t *tl;
2744 char *cp;
2745 int32_t t1, t2;
2746 struct vnode *newvp;
2747 char *bpos, *dpos, *cp2;
2748 struct mbuf *mreq, *mrep, *md, *mb;
2749 struct nameidata nami, *ndp = &nami;
2750 struct componentname *cnp = &ndp->ni_cnd;
2751 struct nfsmount *nmp = VFSTONFS(vp->v_mount);
2752 struct nfsnode *dnp = VTONFS(vp), *np;
2753 nfsfh_t *fhp;
2754 u_quad_t fileno;
2755 int error = 0, more_dirs = 1, blksiz = 0, doit, bigenough = 1, i;
2756 int attrflag, fhsize, nrpcs = 0, reclen;
2757 struct nfs_fattr fattr, *fp;
2758
2759 #ifdef DIAGNOSTIC
2760 if (uiop->uio_iovcnt != 1 || uiop->uio_resid != NFS_DIRBLKSIZ)
2761 panic("nfs readdirplusrpc bad uio");
2762 #endif
2763 ndp->ni_dvp = vp;
2764 newvp = NULLVP;
2765
2766 /*
2767 * Loop around doing readdir rpc's of size nm_readdirsize
2768 * truncated to a multiple of NFS_DIRFRAGSIZ.
2769 * The stopping criteria is EOF or buffer full.
2770 */
2771 while (more_dirs && bigenough) {
2772 if (nrpcs > 0 && uiop->uio_resid < (nmp->nm_readdirsize / 2)) {
2773 bigenough = 0;
2774 break;
2775 }
2776 nfsstats.rpccnt[NFSPROC_READDIRPLUS]++;
2777 nfsm_reqhead(dnp, NFSPROC_READDIRPLUS,
2778 NFSX_FH(1) + 6 * NFSX_UNSIGNED);
2779 nfsm_fhtom(dnp, 1);
2780 nfsm_build(tl, u_int32_t *, 6 * NFSX_UNSIGNED);
2781 if (nmp->nm_iflag & NFSMNT_SWAPCOOKIE) {
2782 txdr_swapcookie3(uiop->uio_offset, tl);
2783 } else {
2784 txdr_cookie3(uiop->uio_offset, tl);
2785 }
2786 tl += 2;
2787 *tl++ = dnp->n_cookieverf.nfsuquad[0];
2788 *tl++ = dnp->n_cookieverf.nfsuquad[1];
2789 *tl++ = txdr_unsigned(nmp->nm_readdirsize);
2790 *tl = txdr_unsigned(nmp->nm_rsize);
2791 nfsm_request(dnp, NFSPROC_READDIRPLUS, curlwp, cred);
2792 nfsm_postop_attr(vp, attrflag, 0);
2793 if (error) {
2794 m_freem(mrep);
2795 goto nfsmout;
2796 }
2797 nrpcs++;
2798 nfsm_dissect(tl, u_int32_t *, 3 * NFSX_UNSIGNED);
2799 dnp->n_cookieverf.nfsuquad[0] = *tl++;
2800 dnp->n_cookieverf.nfsuquad[1] = *tl++;
2801 more_dirs = fxdr_unsigned(int, *tl);
2802
2803 /* loop thru the dir entries, doctoring them to 4bsd form */
2804 while (more_dirs && bigenough) {
2805 nfsm_dissect(tl, u_int32_t *, 3 * NFSX_UNSIGNED);
2806 fileno = fxdr_hyper(tl);
2807 len = fxdr_unsigned(int, *(tl + 2));
2808 if (len <= 0 || len > NFS_MAXNAMLEN) {
2809 error = EBADRPC;
2810 m_freem(mrep);
2811 goto nfsmout;
2812 }
2813 /* for cookie stashing */
2814 reclen = _DIRENT_RECLEN(dp, len) + 2 * sizeof(off_t);
2815 left = NFS_DIRFRAGSIZ - blksiz;
2816 if (reclen > left) {
2817 /*
2818 * DIRFRAGSIZ is aligned, no need to align
2819 * again here.
2820 */
2821 memset(uiop->uio_iov->iov_base, 0, left);
2822 dp->d_reclen += left;
2823 UIO_ADVANCE(uiop, left);
2824 NFS_STASHCOOKIE(dp, uiop->uio_offset);
2825 blksiz = 0;
2826 }
2827 if (reclen > uiop->uio_resid)
2828 bigenough = 0;
2829 if (bigenough) {
2830 int tlen;
2831
2832 dp = (struct dirent *)uiop->uio_iov->iov_base;
2833 dp->d_fileno = fileno;
2834 dp->d_namlen = len;
2835 dp->d_reclen = reclen;
2836 dp->d_type = DT_UNKNOWN;
2837 blksiz += reclen;
2838 if (blksiz == NFS_DIRFRAGSIZ)
2839 blksiz = 0;
2840 UIO_ADVANCE(uiop, DIRHDSIZ);
2841 nfsm_mtouio(uiop, len);
2842 tlen = reclen - (DIRHDSIZ + len);
2843 (void)memset(uiop->uio_iov->iov_base, 0, tlen);
2844 UIO_ADVANCE(uiop, tlen);
2845 cnp->cn_nameptr = dp->d_name;
2846 cnp->cn_namelen = dp->d_namlen;
2847 } else
2848 nfsm_adv(nfsm_rndup(len));
2849 nfsm_dissect(tl, u_int32_t *, 3 * NFSX_UNSIGNED);
2850 if (bigenough) {
2851 if (nmp->nm_iflag & NFSMNT_SWAPCOOKIE)
2852 uiop->uio_offset =
2853 fxdr_swapcookie3(tl);
2854 else
2855 uiop->uio_offset =
2856 fxdr_cookie3(tl);
2857 NFS_STASHCOOKIE(dp, uiop->uio_offset);
2858 }
2859 tl += 2;
2860
2861 /*
2862 * Since the attributes are before the file handle
2863 * (sigh), we must skip over the attributes and then
2864 * come back and get them.
2865 */
2866 attrflag = fxdr_unsigned(int, *tl);
2867 if (attrflag) {
2868 nfsm_dissect(fp, struct nfs_fattr *, NFSX_V3FATTR);
2869 memcpy(&fattr, fp, NFSX_V3FATTR);
2870 nfsm_dissect(tl, u_int32_t *, NFSX_UNSIGNED);
2871 doit = fxdr_unsigned(int, *tl);
2872 if (doit) {
2873 nfsm_getfh(fhp, fhsize, 1);
2874 if (NFS_CMPFH(dnp, fhp, fhsize)) {
2875 VREF(vp);
2876 newvp = vp;
2877 np = dnp;
2878 } else {
2879 error = nfs_nget1(vp->v_mount, fhp,
2880 fhsize, &np, LK_NOWAIT);
2881 if (!error)
2882 newvp = NFSTOV(np);
2883 }
2884 if (!error) {
2885 const char *xcp;
2886
2887 nfs_loadattrcache(&newvp, &fattr, 0, 0);
2888 if (bigenough) {
2889 dp->d_type =
2890 IFTODT(VTTOIF(np->n_vattr->va_type));
2891 if (cnp->cn_namelen <= NCHNAMLEN) {
2892 ndp->ni_vp = newvp;
2893 xcp = cnp->cn_nameptr +
2894 cnp->cn_namelen;
2895 cnp->cn_hash =
2896 namei_hash(cnp->cn_nameptr, &xcp);
2897 nfs_cache_enter(ndp->ni_dvp,
2898 ndp->ni_vp, cnp);
2899 }
2900 }
2901 }
2902 error = 0;
2903 }
2904 } else {
2905 /* Just skip over the file handle */
2906 nfsm_dissect(tl, u_int32_t *, NFSX_UNSIGNED);
2907 i = fxdr_unsigned(int, *tl);
2908 nfsm_adv(nfsm_rndup(i));
2909 }
2910 if (newvp != NULLVP) {
2911 if (newvp == vp)
2912 vrele(newvp);
2913 else
2914 vput(newvp);
2915 newvp = NULLVP;
2916 }
2917 nfsm_dissect(tl, u_int32_t *, NFSX_UNSIGNED);
2918 more_dirs = fxdr_unsigned(int, *tl);
2919 }
2920 /*
2921 * If at end of rpc data, get the eof boolean
2922 */
2923 if (!more_dirs) {
2924 nfsm_dissect(tl, u_int32_t *, NFSX_UNSIGNED);
2925 more_dirs = (fxdr_unsigned(int, *tl) == 0);
2926
2927 /*
2928 * kludge: see a comment in nfs_readdirrpc.
2929 */
2930
2931 if (uiop->uio_resid >= NFS_DIRBLKSIZ)
2932 more_dirs = 0;
2933 }
2934 m_freem(mrep);
2935 }
2936 /*
2937 * Fill last record, iff any, out to a multiple of NFS_DIRFRAGSIZ
2938 * by increasing d_reclen for the last record.
2939 */
2940 if (blksiz > 0) {
2941 left = NFS_DIRFRAGSIZ - blksiz;
2942 memset(uiop->uio_iov->iov_base, 0, left);
2943 dp->d_reclen += left;
2944 NFS_STASHCOOKIE(dp, uiop->uio_offset);
2945 UIO_ADVANCE(uiop, left);
2946 }
2947
2948 /*
2949 * We are now either at the end of the directory or have filled the
2950 * block.
2951 */
2952 if (bigenough) {
2953 dnp->n_direofoffset = uiop->uio_offset;
2954 dnp->n_flag |= NEOFVALID;
2955 }
2956 nfsmout:
2957 if (newvp != NULLVP) {
2958 if(newvp == vp)
2959 vrele(newvp);
2960 else
2961 vput(newvp);
2962 }
2963 return (error);
2964 }
2965 #endif
2966
2967 /*
2968 * Silly rename. To make the NFS filesystem that is stateless look a little
2969 * more like the "ufs" a remove of an active vnode is translated to a rename
2970 * to a funny looking filename that is removed by nfs_inactive on the
2971 * nfsnode. There is the potential for another process on a different client
2972 * to create the same funny name between the nfs_lookitup() fails and the
2973 * nfs_rename() completes, but...
2974 */
2975 int
2976 nfs_sillyrename(dvp, vp, cnp, dolink)
2977 struct vnode *dvp, *vp;
2978 struct componentname *cnp;
2979 bool dolink;
2980 {
2981 struct sillyrename *sp;
2982 struct nfsnode *np;
2983 int error;
2984 short pid;
2985
2986 cache_purge(dvp);
2987 np = VTONFS(vp);
2988 #ifndef DIAGNOSTIC
2989 if (vp->v_type == VDIR)
2990 panic("nfs: sillyrename dir");
2991 #endif
2992 MALLOC(sp, struct sillyrename *, sizeof (struct sillyrename),
2993 M_NFSREQ, M_WAITOK);
2994 sp->s_cred = kauth_cred_dup(cnp->cn_cred);
2995 sp->s_dvp = dvp;
2996 VREF(dvp);
2997
2998 /* Fudge together a funny name */
2999 pid = cnp->cn_lwp->l_proc->p_pid;
3000 memcpy(sp->s_name, ".nfsAxxxx4.4", 13);
3001 sp->s_namlen = 12;
3002 sp->s_name[8] = hexdigits[pid & 0xf];
3003 sp->s_name[7] = hexdigits[(pid >> 4) & 0xf];
3004 sp->s_name[6] = hexdigits[(pid >> 8) & 0xf];
3005 sp->s_name[5] = hexdigits[(pid >> 12) & 0xf];
3006
3007 /* Try lookitups until we get one that isn't there */
3008 while (nfs_lookitup(dvp, sp->s_name, sp->s_namlen, sp->s_cred,
3009 cnp->cn_lwp, (struct nfsnode **)0) == 0) {
3010 sp->s_name[4]++;
3011 if (sp->s_name[4] > 'z') {
3012 error = EINVAL;
3013 goto bad;
3014 }
3015 }
3016 if (dolink) {
3017 error = nfs_linkrpc(dvp, vp, sp->s_name, sp->s_namlen,
3018 sp->s_cred, cnp->cn_lwp);
3019 /*
3020 * nfs_request maps NFSERR_NOTSUPP to ENOTSUP.
3021 */
3022 if (error == ENOTSUP) {
3023 error = nfs_renameit(dvp, cnp, sp);
3024 }
3025 } else {
3026 error = nfs_renameit(dvp, cnp, sp);
3027 }
3028 if (error)
3029 goto bad;
3030 error = nfs_lookitup(dvp, sp->s_name, sp->s_namlen, sp->s_cred,
3031 cnp->cn_lwp, &np);
3032 np->n_sillyrename = sp;
3033 return (0);
3034 bad:
3035 vrele(sp->s_dvp);
3036 kauth_cred_free(sp->s_cred);
3037 free((void *)sp, M_NFSREQ);
3038 return (error);
3039 }
3040
3041 /*
3042 * Look up a file name and optionally either update the file handle or
3043 * allocate an nfsnode, depending on the value of npp.
3044 * npp == NULL --> just do the lookup
3045 * *npp == NULL --> allocate a new nfsnode and make sure attributes are
3046 * handled too
3047 * *npp != NULL --> update the file handle in the vnode
3048 */
3049 int
3050 nfs_lookitup(dvp, name, len, cred, l, npp)
3051 struct vnode *dvp;
3052 const char *name;
3053 int len;
3054 kauth_cred_t cred;
3055 struct lwp *l;
3056 struct nfsnode **npp;
3057 {
3058 u_int32_t *tl;
3059 char *cp;
3060 int32_t t1, t2;
3061 struct vnode *newvp = (struct vnode *)0;
3062 struct nfsnode *np, *dnp = VTONFS(dvp);
3063 char *bpos, *dpos, *cp2;
3064 int error = 0, fhlen;
3065 #ifndef NFS_V2_ONLY
3066 int attrflag;
3067 #endif
3068 struct mbuf *mreq, *mrep, *md, *mb;
3069 nfsfh_t *nfhp;
3070 const int v3 = NFS_ISV3(dvp);
3071
3072 nfsstats.rpccnt[NFSPROC_LOOKUP]++;
3073 nfsm_reqhead(dnp, NFSPROC_LOOKUP,
3074 NFSX_FH(v3) + NFSX_UNSIGNED + nfsm_rndup(len));
3075 nfsm_fhtom(dnp, v3);
3076 nfsm_strtom(name, len, NFS_MAXNAMLEN);
3077 nfsm_request(dnp, NFSPROC_LOOKUP, l, cred);
3078 if (npp && !error) {
3079 nfsm_getfh(nfhp, fhlen, v3);
3080 if (*npp) {
3081 np = *npp;
3082 if (np->n_fhsize > NFS_SMALLFH && fhlen <= NFS_SMALLFH) {
3083 free((void *)np->n_fhp, M_NFSBIGFH);
3084 np->n_fhp = &np->n_fh;
3085 }
3086 #if NFS_SMALLFH < NFSX_V3FHMAX
3087 else if (np->n_fhsize <= NFS_SMALLFH && fhlen>NFS_SMALLFH)
3088 np->n_fhp =(nfsfh_t *)malloc(fhlen,M_NFSBIGFH,M_WAITOK);
3089 #endif
3090 memcpy((void *)np->n_fhp, (void *)nfhp, fhlen);
3091 np->n_fhsize = fhlen;
3092 newvp = NFSTOV(np);
3093 } else if (NFS_CMPFH(dnp, nfhp, fhlen)) {
3094 VREF(dvp);
3095 newvp = dvp;
3096 np = dnp;
3097 } else {
3098 error = nfs_nget(dvp->v_mount, nfhp, fhlen, &np);
3099 if (error) {
3100 m_freem(mrep);
3101 return (error);
3102 }
3103 newvp = NFSTOV(np);
3104 }
3105 #ifndef NFS_V2_ONLY
3106 if (v3) {
3107 nfsm_postop_attr(newvp, attrflag, 0);
3108 if (!attrflag && *npp == NULL) {
3109 m_freem(mrep);
3110 vput(newvp);
3111 return (ENOENT);
3112 }
3113 } else
3114 #endif
3115 nfsm_loadattr(newvp, (struct vattr *)0, 0);
3116 }
3117 nfsm_reqdone;
3118 if (npp && *npp == NULL) {
3119 if (error) {
3120 if (newvp)
3121 vput(newvp);
3122 } else
3123 *npp = np;
3124 }
3125 return (error);
3126 }
3127
3128 #ifndef NFS_V2_ONLY
3129 /*
3130 * Nfs Version 3 commit rpc
3131 */
3132 int
3133 nfs_commit(vp, offset, cnt, l)
3134 struct vnode *vp;
3135 off_t offset;
3136 uint32_t cnt;
3137 struct lwp *l;
3138 {
3139 char *cp;
3140 u_int32_t *tl;
3141 int32_t t1, t2;
3142 struct nfsmount *nmp = VFSTONFS(vp->v_mount);
3143 char *bpos, *dpos, *cp2;
3144 int error = 0, wccflag = NFSV3_WCCRATTR;
3145 struct mbuf *mreq, *mrep, *md, *mb;
3146 struct nfsnode *np;
3147
3148 KASSERT(NFS_ISV3(vp));
3149
3150 #ifdef NFS_DEBUG_COMMIT
3151 printf("commit %lu - %lu\n", (unsigned long)offset,
3152 (unsigned long)(offset + cnt));
3153 #endif
3154
3155 mutex_enter(&nmp->nm_lock);
3156 if ((nmp->nm_iflag & NFSMNT_HASWRITEVERF) == 0) {
3157 mutex_exit(&nmp->nm_lock);
3158 return (0);
3159 }
3160 mutex_exit(&nmp->nm_lock);
3161 nfsstats.rpccnt[NFSPROC_COMMIT]++;
3162 np = VTONFS(vp);
3163 nfsm_reqhead(np, NFSPROC_COMMIT, NFSX_FH(1));
3164 nfsm_fhtom(np, 1);
3165 nfsm_build(tl, u_int32_t *, 3 * NFSX_UNSIGNED);
3166 txdr_hyper(offset, tl);
3167 tl += 2;
3168 *tl = txdr_unsigned(cnt);
3169 nfsm_request(np, NFSPROC_COMMIT, l, np->n_wcred);
3170 nfsm_wcc_data(vp, wccflag, NAC_NOTRUNC, false);
3171 if (!error) {
3172 nfsm_dissect(tl, u_int32_t *, NFSX_V3WRITEVERF);
3173 mutex_enter(&nmp->nm_lock);
3174 if ((nmp->nm_iflag & NFSMNT_STALEWRITEVERF) ||
3175 memcmp(nmp->nm_writeverf, tl, NFSX_V3WRITEVERF)) {
3176 memcpy(nmp->nm_writeverf, tl, NFSX_V3WRITEVERF);
3177 error = NFSERR_STALEWRITEVERF;
3178 nmp->nm_iflag |= NFSMNT_STALEWRITEVERF;
3179 }
3180 mutex_exit(&nmp->nm_lock);
3181 }
3182 nfsm_reqdone;
3183 return (error);
3184 }
3185 #endif
3186
3187 /*
3188 * Kludge City..
3189 * - make nfs_bmap() essentially a no-op that does no translation
3190 * - do nfs_strategy() by doing I/O with nfs_readrpc/nfs_writerpc
3191 * (Maybe I could use the process's page mapping, but I was concerned that
3192 * Kernel Write might not be enabled and also figured copyout() would do
3193 * a lot more work than memcpy() and also it currently happens in the
3194 * context of the swapper process (2).
3195 */
3196 int
3197 nfs_bmap(v)
3198 void *v;
3199 {
3200 struct vop_bmap_args /* {
3201 struct vnode *a_vp;
3202 daddr_t a_bn;
3203 struct vnode **a_vpp;
3204 daddr_t *a_bnp;
3205 int *a_runp;
3206 } */ *ap = v;
3207 struct vnode *vp = ap->a_vp;
3208 int bshift = vp->v_mount->mnt_fs_bshift - vp->v_mount->mnt_dev_bshift;
3209
3210 if (ap->a_vpp != NULL)
3211 *ap->a_vpp = vp;
3212 if (ap->a_bnp != NULL)
3213 *ap->a_bnp = ap->a_bn << bshift;
3214 if (ap->a_runp != NULL)
3215 *ap->a_runp = 1024 * 1024; /* XXX */
3216 return (0);
3217 }
3218
3219 /*
3220 * Strategy routine.
3221 * For async requests when nfsiod(s) are running, queue the request by
3222 * calling nfs_asyncio(), otherwise just all nfs_doio() to do the
3223 * request.
3224 */
3225 int
3226 nfs_strategy(v)
3227 void *v;
3228 {
3229 struct vop_strategy_args *ap = v;
3230 struct buf *bp = ap->a_bp;
3231 int error = 0;
3232
3233 if ((bp->b_flags & (B_PHYS|B_ASYNC)) == (B_PHYS|B_ASYNC))
3234 panic("nfs physio/async");
3235
3236 /*
3237 * If the op is asynchronous and an i/o daemon is waiting
3238 * queue the request, wake it up and wait for completion
3239 * otherwise just do it ourselves.
3240 */
3241 if ((bp->b_flags & B_ASYNC) == 0 || nfs_asyncio(bp))
3242 error = nfs_doio(bp);
3243 return (error);
3244 }
3245
3246 /*
3247 * fsync vnode op. Just call nfs_flush() with commit == 1.
3248 */
3249 /* ARGSUSED */
3250 int
3251 nfs_fsync(v)
3252 void *v;
3253 {
3254 struct vop_fsync_args /* {
3255 struct vnodeop_desc *a_desc;
3256 struct vnode * a_vp;
3257 kauth_cred_t a_cred;
3258 int a_flags;
3259 off_t offlo;
3260 off_t offhi;
3261 struct lwp * a_l;
3262 } */ *ap = v;
3263
3264 struct vnode *vp = ap->a_vp;
3265
3266 if (vp->v_type != VREG)
3267 return 0;
3268
3269 return (nfs_flush(vp, ap->a_cred,
3270 (ap->a_flags & FSYNC_WAIT) != 0 ? MNT_WAIT : 0, ap->a_l, 1));
3271 }
3272
3273 /*
3274 * Flush all the data associated with a vnode.
3275 */
3276 int
3277 nfs_flush(struct vnode *vp, kauth_cred_t cred, int waitfor, struct lwp *l,
3278 int commit)
3279 {
3280 struct nfsnode *np = VTONFS(vp);
3281 int error;
3282 int flushflags = PGO_ALLPAGES|PGO_CLEANIT|PGO_SYNCIO;
3283 UVMHIST_FUNC("nfs_flush"); UVMHIST_CALLED(ubchist);
3284
3285 simple_lock(&vp->v_interlock);
3286 error = VOP_PUTPAGES(vp, 0, 0, flushflags);
3287 if (np->n_flag & NWRITEERR) {
3288 error = np->n_error;
3289 np->n_flag &= ~NWRITEERR;
3290 }
3291 UVMHIST_LOG(ubchist, "returning %d", error,0,0,0);
3292 return (error);
3293 }
3294
3295 /*
3296 * Return POSIX pathconf information applicable to nfs.
3297 *
3298 * N.B. The NFS V2 protocol doesn't support this RPC.
3299 */
3300 /* ARGSUSED */
3301 int
3302 nfs_pathconf(v)
3303 void *v;
3304 {
3305 struct vop_pathconf_args /* {
3306 struct vnode *a_vp;
3307 int a_name;
3308 register_t *a_retval;
3309 } */ *ap = v;
3310 struct nfsv3_pathconf *pcp;
3311 struct vnode *vp = ap->a_vp;
3312 struct mbuf *mreq, *mrep, *md, *mb;
3313 int32_t t1, t2;
3314 u_int32_t *tl;
3315 char *bpos, *dpos, *cp, *cp2;
3316 int error = 0, attrflag;
3317 #ifndef NFS_V2_ONLY
3318 struct nfsmount *nmp;
3319 unsigned int l;
3320 u_int64_t maxsize;
3321 #endif
3322 const int v3 = NFS_ISV3(vp);
3323 struct nfsnode *np = VTONFS(vp);
3324
3325 switch (ap->a_name) {
3326 /* Names that can be resolved locally. */
3327 case _PC_PIPE_BUF:
3328 *ap->a_retval = PIPE_BUF;
3329 break;
3330 case _PC_SYNC_IO:
3331 *ap->a_retval = 1;
3332 break;
3333 /* Names that cannot be resolved locally; do an RPC, if possible. */
3334 case _PC_LINK_MAX:
3335 case _PC_NAME_MAX:
3336 case _PC_CHOWN_RESTRICTED:
3337 case _PC_NO_TRUNC:
3338 if (!v3) {
3339 error = EINVAL;
3340 break;
3341 }
3342 nfsstats.rpccnt[NFSPROC_PATHCONF]++;
3343 nfsm_reqhead(np, NFSPROC_PATHCONF, NFSX_FH(1));
3344 nfsm_fhtom(np, 1);
3345 nfsm_request(np, NFSPROC_PATHCONF,
3346 curlwp, curlwp->l_cred); /* XXX */
3347 nfsm_postop_attr(vp, attrflag, 0);
3348 if (!error) {
3349 nfsm_dissect(pcp, struct nfsv3_pathconf *,
3350 NFSX_V3PATHCONF);
3351 switch (ap->a_name) {
3352 case _PC_LINK_MAX:
3353 *ap->a_retval =
3354 fxdr_unsigned(register_t, pcp->pc_linkmax);
3355 break;
3356 case _PC_NAME_MAX:
3357 *ap->a_retval =
3358 fxdr_unsigned(register_t, pcp->pc_namemax);
3359 break;
3360 case _PC_CHOWN_RESTRICTED:
3361 *ap->a_retval =
3362 (pcp->pc_chownrestricted == nfs_true);
3363 break;
3364 case _PC_NO_TRUNC:
3365 *ap->a_retval =
3366 (pcp->pc_notrunc == nfs_true);
3367 break;
3368 }
3369 }
3370 nfsm_reqdone;
3371 break;
3372 case _PC_FILESIZEBITS:
3373 #ifndef NFS_V2_ONLY
3374 if (v3) {
3375 nmp = VFSTONFS(vp->v_mount);
3376 if ((nmp->nm_iflag & NFSMNT_GOTFSINFO) == 0)
3377 if ((error = nfs_fsinfo(nmp, vp,
3378 curlwp->l_cred, curlwp)) != 0) /* XXX */
3379 break;
3380 for (l = 0, maxsize = nmp->nm_maxfilesize;
3381 (maxsize >> l) > 0; l++)
3382 ;
3383 *ap->a_retval = l + 1;
3384 } else
3385 #endif
3386 {
3387 *ap->a_retval = 32; /* NFS V2 limitation */
3388 }
3389 break;
3390 default:
3391 error = EINVAL;
3392 break;
3393 }
3394
3395 return (error);
3396 }
3397
3398 /*
3399 * NFS advisory byte-level locks.
3400 */
3401 int
3402 nfs_advlock(v)
3403 void *v;
3404 {
3405 struct vop_advlock_args /* {
3406 struct vnode *a_vp;
3407 void *a_id;
3408 int a_op;
3409 struct flock *a_fl;
3410 int a_flags;
3411 } */ *ap = v;
3412 struct nfsnode *np = VTONFS(ap->a_vp);
3413
3414 return lf_advlock(ap, &np->n_lockf, np->n_size);
3415 }
3416
3417 /*
3418 * Print out the contents of an nfsnode.
3419 */
3420 int
3421 nfs_print(v)
3422 void *v;
3423 {
3424 struct vop_print_args /* {
3425 struct vnode *a_vp;
3426 } */ *ap = v;
3427 struct vnode *vp = ap->a_vp;
3428 struct nfsnode *np = VTONFS(vp);
3429
3430 printf("tag VT_NFS, fileid %lld fsid 0x%lx",
3431 (unsigned long long)np->n_vattr->va_fileid, np->n_vattr->va_fsid);
3432 if (vp->v_type == VFIFO)
3433 fifo_printinfo(vp);
3434 printf("\n");
3435 return (0);
3436 }
3437
3438 /*
3439 * nfs unlock wrapper.
3440 */
3441 int
3442 nfs_unlock(void *v)
3443 {
3444 struct vop_unlock_args /* {
3445 struct vnode *a_vp;
3446 int a_flags;
3447 } */ *ap = v;
3448 struct vnode *vp = ap->a_vp;
3449
3450 /*
3451 * VOP_UNLOCK can be called by nfs_loadattrcache
3452 * with v_data == 0.
3453 */
3454 if (VTONFS(vp)) {
3455 nfs_delayedtruncate(vp);
3456 }
3457
3458 return genfs_unlock(v);
3459 }
3460
3461 /*
3462 * nfs special file access vnode op.
3463 * Essentially just get vattr and then imitate iaccess() since the device is
3464 * local to the client.
3465 */
3466 int
3467 nfsspec_access(v)
3468 void *v;
3469 {
3470 struct vop_access_args /* {
3471 struct vnode *a_vp;
3472 int a_mode;
3473 kauth_cred_t a_cred;
3474 struct lwp *a_l;
3475 } */ *ap = v;
3476 struct vattr va;
3477 struct vnode *vp = ap->a_vp;
3478 int error;
3479
3480 error = VOP_GETATTR(vp, &va, ap->a_cred, ap->a_l);
3481 if (error)
3482 return (error);
3483
3484 /*
3485 * Disallow write attempts on filesystems mounted read-only;
3486 * unless the file is a socket, fifo, or a block or character
3487 * device resident on the filesystem.
3488 */
3489 if ((ap->a_mode & VWRITE) && (vp->v_mount->mnt_flag & MNT_RDONLY)) {
3490 switch (vp->v_type) {
3491 case VREG:
3492 case VDIR:
3493 case VLNK:
3494 return (EROFS);
3495 default:
3496 break;
3497 }
3498 }
3499
3500 return (vaccess(va.va_type, va.va_mode,
3501 va.va_uid, va.va_gid, ap->a_mode, ap->a_cred));
3502 }
3503
3504 /*
3505 * Read wrapper for special devices.
3506 */
3507 int
3508 nfsspec_read(v)
3509 void *v;
3510 {
3511 struct vop_read_args /* {
3512 struct vnode *a_vp;
3513 struct uio *a_uio;
3514 int a_ioflag;
3515 kauth_cred_t a_cred;
3516 } */ *ap = v;
3517 struct nfsnode *np = VTONFS(ap->a_vp);
3518
3519 /*
3520 * Set access flag.
3521 */
3522 np->n_flag |= NACC;
3523 getnanotime(&np->n_atim);
3524 return (VOCALL(spec_vnodeop_p, VOFFSET(vop_read), ap));
3525 }
3526
3527 /*
3528 * Write wrapper for special devices.
3529 */
3530 int
3531 nfsspec_write(v)
3532 void *v;
3533 {
3534 struct vop_write_args /* {
3535 struct vnode *a_vp;
3536 struct uio *a_uio;
3537 int a_ioflag;
3538 kauth_cred_t a_cred;
3539 } */ *ap = v;
3540 struct nfsnode *np = VTONFS(ap->a_vp);
3541
3542 /*
3543 * Set update flag.
3544 */
3545 np->n_flag |= NUPD;
3546 getnanotime(&np->n_mtim);
3547 return (VOCALL(spec_vnodeop_p, VOFFSET(vop_write), ap));
3548 }
3549
3550 /*
3551 * Close wrapper for special devices.
3552 *
3553 * Update the times on the nfsnode then do device close.
3554 */
3555 int
3556 nfsspec_close(v)
3557 void *v;
3558 {
3559 struct vop_close_args /* {
3560 struct vnode *a_vp;
3561 int a_fflag;
3562 kauth_cred_t a_cred;
3563 struct lwp *a_l;
3564 } */ *ap = v;
3565 struct vnode *vp = ap->a_vp;
3566 struct nfsnode *np = VTONFS(vp);
3567 struct vattr vattr;
3568
3569 if (np->n_flag & (NACC | NUPD)) {
3570 np->n_flag |= NCHG;
3571 if (vp->v_usecount == 1 &&
3572 (vp->v_mount->mnt_flag & MNT_RDONLY) == 0) {
3573 VATTR_NULL(&vattr);
3574 if (np->n_flag & NACC)
3575 vattr.va_atime = np->n_atim;
3576 if (np->n_flag & NUPD)
3577 vattr.va_mtime = np->n_mtim;
3578 (void)VOP_SETATTR(vp, &vattr, ap->a_cred, ap->a_l);
3579 }
3580 }
3581 return (VOCALL(spec_vnodeop_p, VOFFSET(vop_close), ap));
3582 }
3583
3584 /*
3585 * Read wrapper for fifos.
3586 */
3587 int
3588 nfsfifo_read(v)
3589 void *v;
3590 {
3591 struct vop_read_args /* {
3592 struct vnode *a_vp;
3593 struct uio *a_uio;
3594 int a_ioflag;
3595 kauth_cred_t a_cred;
3596 } */ *ap = v;
3597 struct nfsnode *np = VTONFS(ap->a_vp);
3598
3599 /*
3600 * Set access flag.
3601 */
3602 np->n_flag |= NACC;
3603 getnanotime(&np->n_atim);
3604 return (VOCALL(fifo_vnodeop_p, VOFFSET(vop_read), ap));
3605 }
3606
3607 /*
3608 * Write wrapper for fifos.
3609 */
3610 int
3611 nfsfifo_write(v)
3612 void *v;
3613 {
3614 struct vop_write_args /* {
3615 struct vnode *a_vp;
3616 struct uio *a_uio;
3617 int a_ioflag;
3618 kauth_cred_t a_cred;
3619 } */ *ap = v;
3620 struct nfsnode *np = VTONFS(ap->a_vp);
3621
3622 /*
3623 * Set update flag.
3624 */
3625 np->n_flag |= NUPD;
3626 getnanotime(&np->n_mtim);
3627 return (VOCALL(fifo_vnodeop_p, VOFFSET(vop_write), ap));
3628 }
3629
3630 /*
3631 * Close wrapper for fifos.
3632 *
3633 * Update the times on the nfsnode then do fifo close.
3634 */
3635 int
3636 nfsfifo_close(v)
3637 void *v;
3638 {
3639 struct vop_close_args /* {
3640 struct vnode *a_vp;
3641 int a_fflag;
3642 kauth_cred_t a_cred;
3643 struct lwp *a_l;
3644 } */ *ap = v;
3645 struct vnode *vp = ap->a_vp;
3646 struct nfsnode *np = VTONFS(vp);
3647 struct vattr vattr;
3648
3649 if (np->n_flag & (NACC | NUPD)) {
3650 struct timespec ts;
3651
3652 getnanotime(&ts);
3653 if (np->n_flag & NACC)
3654 np->n_atim = ts;
3655 if (np->n_flag & NUPD)
3656 np->n_mtim = ts;
3657 np->n_flag |= NCHG;
3658 if (vp->v_usecount == 1 &&
3659 (vp->v_mount->mnt_flag & MNT_RDONLY) == 0) {
3660 VATTR_NULL(&vattr);
3661 if (np->n_flag & NACC)
3662 vattr.va_atime = np->n_atim;
3663 if (np->n_flag & NUPD)
3664 vattr.va_mtime = np->n_mtim;
3665 (void)VOP_SETATTR(vp, &vattr, ap->a_cred, ap->a_l);
3666 }
3667 }
3668 return (VOCALL(fifo_vnodeop_p, VOFFSET(vop_close), ap));
3669 }
3670