nfs_vnops.c revision 1.248 1 /* $NetBSD: nfs_vnops.c,v 1.248 2006/12/27 12:51:22 yamt Exp $ */
2
3 /*
4 * Copyright (c) 1989, 1993
5 * The Regents of the University of California. All rights reserved.
6 *
7 * This code is derived from software contributed to Berkeley by
8 * Rick Macklem at The University of Guelph.
9 *
10 * Redistribution and use in source and binary forms, with or without
11 * modification, are permitted provided that the following conditions
12 * are met:
13 * 1. Redistributions of source code must retain the above copyright
14 * notice, this list of conditions and the following disclaimer.
15 * 2. Redistributions in binary form must reproduce the above copyright
16 * notice, this list of conditions and the following disclaimer in the
17 * documentation and/or other materials provided with the distribution.
18 * 3. Neither the name of the University nor the names of its contributors
19 * may be used to endorse or promote products derived from this software
20 * without specific prior written permission.
21 *
22 * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND
23 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
24 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
25 * ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE
26 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
27 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
28 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
29 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
30 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
31 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
32 * SUCH DAMAGE.
33 *
34 * @(#)nfs_vnops.c 8.19 (Berkeley) 7/31/95
35 */
36
37 /*
38 * vnode op calls for Sun NFS version 2 and 3
39 */
40
41 #include <sys/cdefs.h>
42 __KERNEL_RCSID(0, "$NetBSD: nfs_vnops.c,v 1.248 2006/12/27 12:51:22 yamt Exp $");
43
44 #include "opt_inet.h"
45 #include "opt_nfs.h"
46 #include "opt_uvmhist.h"
47
48 #include <sys/param.h>
49 #include <sys/proc.h>
50 #include <sys/kernel.h>
51 #include <sys/systm.h>
52 #include <sys/resourcevar.h>
53 #include <sys/proc.h>
54 #include <sys/mount.h>
55 #include <sys/buf.h>
56 #include <sys/disk.h>
57 #include <sys/malloc.h>
58 #include <sys/mbuf.h>
59 #include <sys/namei.h>
60 #include <sys/vnode.h>
61 #include <sys/dirent.h>
62 #include <sys/fcntl.h>
63 #include <sys/hash.h>
64 #include <sys/lockf.h>
65 #include <sys/stat.h>
66 #include <sys/unistd.h>
67 #include <sys/kauth.h>
68
69 #include <uvm/uvm_extern.h>
70 #include <uvm/uvm.h>
71
72 #include <miscfs/fifofs/fifo.h>
73 #include <miscfs/genfs/genfs.h>
74 #include <miscfs/genfs/genfs_node.h>
75 #include <miscfs/specfs/specdev.h>
76
77 #include <nfs/rpcv2.h>
78 #include <nfs/nfsproto.h>
79 #include <nfs/nfs.h>
80 #include <nfs/nfsnode.h>
81 #include <nfs/nfsmount.h>
82 #include <nfs/xdr_subs.h>
83 #include <nfs/nfsm_subs.h>
84 #include <nfs/nfs_var.h>
85
86 #include <net/if.h>
87 #include <netinet/in.h>
88 #include <netinet/in_var.h>
89
90 /*
91 * Global vfs data structures for nfs
92 */
93 int (**nfsv2_vnodeop_p) __P((void *));
94 const struct vnodeopv_entry_desc nfsv2_vnodeop_entries[] = {
95 { &vop_default_desc, vn_default_error },
96 { &vop_lookup_desc, nfs_lookup }, /* lookup */
97 { &vop_create_desc, nfs_create }, /* create */
98 { &vop_mknod_desc, nfs_mknod }, /* mknod */
99 { &vop_open_desc, nfs_open }, /* open */
100 { &vop_close_desc, nfs_close }, /* close */
101 { &vop_access_desc, nfs_access }, /* access */
102 { &vop_getattr_desc, nfs_getattr }, /* getattr */
103 { &vop_setattr_desc, nfs_setattr }, /* setattr */
104 { &vop_read_desc, nfs_read }, /* read */
105 { &vop_write_desc, nfs_write }, /* write */
106 { &vop_lease_desc, nfs_lease_check }, /* lease */
107 { &vop_fcntl_desc, genfs_fcntl }, /* fcntl */
108 { &vop_ioctl_desc, nfs_ioctl }, /* ioctl */
109 { &vop_poll_desc, nfs_poll }, /* poll */
110 { &vop_kqfilter_desc, nfs_kqfilter }, /* kqfilter */
111 { &vop_revoke_desc, nfs_revoke }, /* revoke */
112 { &vop_mmap_desc, nfs_mmap }, /* mmap */
113 { &vop_fsync_desc, nfs_fsync }, /* fsync */
114 { &vop_seek_desc, nfs_seek }, /* seek */
115 { &vop_remove_desc, nfs_remove }, /* remove */
116 { &vop_link_desc, nfs_link }, /* link */
117 { &vop_rename_desc, nfs_rename }, /* rename */
118 { &vop_mkdir_desc, nfs_mkdir }, /* mkdir */
119 { &vop_rmdir_desc, nfs_rmdir }, /* rmdir */
120 { &vop_symlink_desc, nfs_symlink }, /* symlink */
121 { &vop_readdir_desc, nfs_readdir }, /* readdir */
122 { &vop_readlink_desc, nfs_readlink }, /* readlink */
123 { &vop_abortop_desc, nfs_abortop }, /* abortop */
124 { &vop_inactive_desc, nfs_inactive }, /* inactive */
125 { &vop_reclaim_desc, nfs_reclaim }, /* reclaim */
126 { &vop_lock_desc, nfs_lock }, /* lock */
127 { &vop_unlock_desc, nfs_unlock }, /* unlock */
128 { &vop_bmap_desc, nfs_bmap }, /* bmap */
129 { &vop_strategy_desc, nfs_strategy }, /* strategy */
130 { &vop_print_desc, nfs_print }, /* print */
131 { &vop_islocked_desc, nfs_islocked }, /* islocked */
132 { &vop_pathconf_desc, nfs_pathconf }, /* pathconf */
133 { &vop_advlock_desc, nfs_advlock }, /* advlock */
134 { &vop_bwrite_desc, genfs_badop }, /* bwrite */
135 { &vop_getpages_desc, nfs_getpages }, /* getpages */
136 { &vop_putpages_desc, genfs_putpages }, /* putpages */
137 { NULL, NULL }
138 };
139 const struct vnodeopv_desc nfsv2_vnodeop_opv_desc =
140 { &nfsv2_vnodeop_p, nfsv2_vnodeop_entries };
141
142 /*
143 * Special device vnode ops
144 */
145 int (**spec_nfsv2nodeop_p) __P((void *));
146 const struct vnodeopv_entry_desc spec_nfsv2nodeop_entries[] = {
147 { &vop_default_desc, vn_default_error },
148 { &vop_lookup_desc, spec_lookup }, /* lookup */
149 { &vop_create_desc, spec_create }, /* create */
150 { &vop_mknod_desc, spec_mknod }, /* mknod */
151 { &vop_open_desc, spec_open }, /* open */
152 { &vop_close_desc, nfsspec_close }, /* close */
153 { &vop_access_desc, nfsspec_access }, /* access */
154 { &vop_getattr_desc, nfs_getattr }, /* getattr */
155 { &vop_setattr_desc, nfs_setattr }, /* setattr */
156 { &vop_read_desc, nfsspec_read }, /* read */
157 { &vop_write_desc, nfsspec_write }, /* write */
158 { &vop_lease_desc, spec_lease_check }, /* lease */
159 { &vop_fcntl_desc, genfs_fcntl }, /* fcntl */
160 { &vop_ioctl_desc, spec_ioctl }, /* ioctl */
161 { &vop_poll_desc, spec_poll }, /* poll */
162 { &vop_kqfilter_desc, spec_kqfilter }, /* kqfilter */
163 { &vop_revoke_desc, spec_revoke }, /* revoke */
164 { &vop_mmap_desc, spec_mmap }, /* mmap */
165 { &vop_fsync_desc, spec_fsync }, /* fsync */
166 { &vop_seek_desc, spec_seek }, /* seek */
167 { &vop_remove_desc, spec_remove }, /* remove */
168 { &vop_link_desc, spec_link }, /* link */
169 { &vop_rename_desc, spec_rename }, /* rename */
170 { &vop_mkdir_desc, spec_mkdir }, /* mkdir */
171 { &vop_rmdir_desc, spec_rmdir }, /* rmdir */
172 { &vop_symlink_desc, spec_symlink }, /* symlink */
173 { &vop_readdir_desc, spec_readdir }, /* readdir */
174 { &vop_readlink_desc, spec_readlink }, /* readlink */
175 { &vop_abortop_desc, spec_abortop }, /* abortop */
176 { &vop_inactive_desc, nfs_inactive }, /* inactive */
177 { &vop_reclaim_desc, nfs_reclaim }, /* reclaim */
178 { &vop_lock_desc, nfs_lock }, /* lock */
179 { &vop_unlock_desc, nfs_unlock }, /* unlock */
180 { &vop_bmap_desc, spec_bmap }, /* bmap */
181 { &vop_strategy_desc, spec_strategy }, /* strategy */
182 { &vop_print_desc, nfs_print }, /* print */
183 { &vop_islocked_desc, nfs_islocked }, /* islocked */
184 { &vop_pathconf_desc, spec_pathconf }, /* pathconf */
185 { &vop_advlock_desc, spec_advlock }, /* advlock */
186 { &vop_bwrite_desc, spec_bwrite }, /* bwrite */
187 { &vop_getpages_desc, spec_getpages }, /* getpages */
188 { &vop_putpages_desc, spec_putpages }, /* putpages */
189 { NULL, NULL }
190 };
191 const struct vnodeopv_desc spec_nfsv2nodeop_opv_desc =
192 { &spec_nfsv2nodeop_p, spec_nfsv2nodeop_entries };
193
194 int (**fifo_nfsv2nodeop_p) __P((void *));
195 const struct vnodeopv_entry_desc fifo_nfsv2nodeop_entries[] = {
196 { &vop_default_desc, vn_default_error },
197 { &vop_lookup_desc, fifo_lookup }, /* lookup */
198 { &vop_create_desc, fifo_create }, /* create */
199 { &vop_mknod_desc, fifo_mknod }, /* mknod */
200 { &vop_open_desc, fifo_open }, /* open */
201 { &vop_close_desc, nfsfifo_close }, /* close */
202 { &vop_access_desc, nfsspec_access }, /* access */
203 { &vop_getattr_desc, nfs_getattr }, /* getattr */
204 { &vop_setattr_desc, nfs_setattr }, /* setattr */
205 { &vop_read_desc, nfsfifo_read }, /* read */
206 { &vop_write_desc, nfsfifo_write }, /* write */
207 { &vop_lease_desc, fifo_lease_check }, /* lease */
208 { &vop_fcntl_desc, genfs_fcntl }, /* fcntl */
209 { &vop_ioctl_desc, fifo_ioctl }, /* ioctl */
210 { &vop_poll_desc, fifo_poll }, /* poll */
211 { &vop_kqfilter_desc, fifo_kqfilter }, /* kqfilter */
212 { &vop_revoke_desc, fifo_revoke }, /* revoke */
213 { &vop_mmap_desc, fifo_mmap }, /* mmap */
214 { &vop_fsync_desc, nfs_fsync }, /* fsync */
215 { &vop_seek_desc, fifo_seek }, /* seek */
216 { &vop_remove_desc, fifo_remove }, /* remove */
217 { &vop_link_desc, fifo_link }, /* link */
218 { &vop_rename_desc, fifo_rename }, /* rename */
219 { &vop_mkdir_desc, fifo_mkdir }, /* mkdir */
220 { &vop_rmdir_desc, fifo_rmdir }, /* rmdir */
221 { &vop_symlink_desc, fifo_symlink }, /* symlink */
222 { &vop_readdir_desc, fifo_readdir }, /* readdir */
223 { &vop_readlink_desc, fifo_readlink }, /* readlink */
224 { &vop_abortop_desc, fifo_abortop }, /* abortop */
225 { &vop_inactive_desc, nfs_inactive }, /* inactive */
226 { &vop_reclaim_desc, nfs_reclaim }, /* reclaim */
227 { &vop_lock_desc, nfs_lock }, /* lock */
228 { &vop_unlock_desc, nfs_unlock }, /* unlock */
229 { &vop_bmap_desc, fifo_bmap }, /* bmap */
230 { &vop_strategy_desc, genfs_badop }, /* strategy */
231 { &vop_print_desc, nfs_print }, /* print */
232 { &vop_islocked_desc, nfs_islocked }, /* islocked */
233 { &vop_pathconf_desc, fifo_pathconf }, /* pathconf */
234 { &vop_advlock_desc, fifo_advlock }, /* advlock */
235 { &vop_bwrite_desc, genfs_badop }, /* bwrite */
236 { &vop_putpages_desc, fifo_putpages }, /* putpages */
237 { NULL, NULL }
238 };
239 const struct vnodeopv_desc fifo_nfsv2nodeop_opv_desc =
240 { &fifo_nfsv2nodeop_p, fifo_nfsv2nodeop_entries };
241
242 static int nfs_linkrpc(struct vnode *, struct vnode *, const char *,
243 size_t, kauth_cred_t, struct lwp *);
244 static void nfs_writerpc_extfree(struct mbuf *, caddr_t, size_t, void *);
245
246 /*
247 * Global variables
248 */
249 extern u_int32_t nfs_true, nfs_false;
250 extern u_int32_t nfs_xdrneg1;
251 extern const nfstype nfsv3_type[9];
252
253 int nfs_numasync = 0;
254 #define DIRHDSIZ _DIRENT_NAMEOFF(dp)
255 #define UIO_ADVANCE(uio, siz) \
256 (void)((uio)->uio_resid -= (siz), \
257 (uio)->uio_iov->iov_base = (char *)(uio)->uio_iov->iov_base + (siz), \
258 (uio)->uio_iov->iov_len -= (siz))
259
260 static void nfs_cache_enter(struct vnode *, struct vnode *,
261 struct componentname *);
262
263 static void
264 nfs_cache_enter(struct vnode *dvp, struct vnode *vp,
265 struct componentname *cnp)
266 {
267 struct nfsnode *dnp = VTONFS(dvp);
268
269 if (vp != NULL) {
270 struct nfsnode *np = VTONFS(vp);
271
272 np->n_ctime = np->n_vattr->va_ctime.tv_sec;
273 }
274
275 if (!timespecisset(&dnp->n_nctime))
276 dnp->n_nctime = dnp->n_vattr->va_mtime;
277
278 cache_enter(dvp, vp, cnp);
279 }
280
281 /*
282 * nfs null call from vfs.
283 */
284 int
285 nfs_null(vp, cred, l)
286 struct vnode *vp;
287 kauth_cred_t cred;
288 struct lwp *l;
289 {
290 caddr_t bpos, dpos;
291 int error = 0;
292 struct mbuf *mreq, *mrep, *md, *mb;
293 struct nfsnode *np = VTONFS(vp);
294
295 nfsm_reqhead(np, NFSPROC_NULL, 0);
296 nfsm_request(np, NFSPROC_NULL, l, cred);
297 nfsm_reqdone;
298 return (error);
299 }
300
301 /*
302 * nfs access vnode op.
303 * For nfs version 2, just return ok. File accesses may fail later.
304 * For nfs version 3, use the access rpc to check accessibility. If file modes
305 * are changed on the server, accesses might still fail later.
306 */
307 int
308 nfs_access(v)
309 void *v;
310 {
311 struct vop_access_args /* {
312 struct vnode *a_vp;
313 int a_mode;
314 kauth_cred_t a_cred;
315 struct lwp *a_l;
316 } */ *ap = v;
317 struct vnode *vp = ap->a_vp;
318 #ifndef NFS_V2_ONLY
319 u_int32_t *tl;
320 caddr_t cp;
321 int32_t t1, t2;
322 caddr_t bpos, dpos, cp2;
323 int error = 0, attrflag;
324 struct mbuf *mreq, *mrep, *md, *mb;
325 u_int32_t mode, rmode;
326 const int v3 = NFS_ISV3(vp);
327 #endif
328 int cachevalid;
329 struct nfsnode *np = VTONFS(vp);
330 struct nfsmount *nmp = VFSTONFS(vp->v_mount);
331
332 cachevalid = (np->n_accstamp != -1 &&
333 (time_uptime - np->n_accstamp) < NFS_ATTRTIMEO(nmp, np) &&
334 np->n_accuid == kauth_cred_geteuid(ap->a_cred));
335
336 /*
337 * Check access cache first. If this request has been made for this
338 * uid shortly before, use the cached result.
339 */
340 if (cachevalid) {
341 if (!np->n_accerror) {
342 if ((np->n_accmode & ap->a_mode) == ap->a_mode)
343 return np->n_accerror;
344 } else if ((np->n_accmode & ap->a_mode) == np->n_accmode)
345 return np->n_accerror;
346 }
347
348 #ifndef NFS_V2_ONLY
349 /*
350 * For nfs v3, do an access rpc, otherwise you are stuck emulating
351 * ufs_access() locally using the vattr. This may not be correct,
352 * since the server may apply other access criteria such as
353 * client uid-->server uid mapping that we do not know about, but
354 * this is better than just returning anything that is lying about
355 * in the cache.
356 */
357 if (v3) {
358 nfsstats.rpccnt[NFSPROC_ACCESS]++;
359 nfsm_reqhead(np, NFSPROC_ACCESS, NFSX_FH(v3) + NFSX_UNSIGNED);
360 nfsm_fhtom(np, v3);
361 nfsm_build(tl, u_int32_t *, NFSX_UNSIGNED);
362 if (ap->a_mode & VREAD)
363 mode = NFSV3ACCESS_READ;
364 else
365 mode = 0;
366 if (vp->v_type != VDIR) {
367 if (ap->a_mode & VWRITE)
368 mode |= (NFSV3ACCESS_MODIFY | NFSV3ACCESS_EXTEND);
369 if (ap->a_mode & VEXEC)
370 mode |= NFSV3ACCESS_EXECUTE;
371 } else {
372 if (ap->a_mode & VWRITE)
373 mode |= (NFSV3ACCESS_MODIFY | NFSV3ACCESS_EXTEND |
374 NFSV3ACCESS_DELETE);
375 if (ap->a_mode & VEXEC)
376 mode |= NFSV3ACCESS_LOOKUP;
377 }
378 *tl = txdr_unsigned(mode);
379 nfsm_request(np, NFSPROC_ACCESS, ap->a_l, ap->a_cred);
380 nfsm_postop_attr(vp, attrflag, 0);
381 if (!error) {
382 nfsm_dissect(tl, u_int32_t *, NFSX_UNSIGNED);
383 rmode = fxdr_unsigned(u_int32_t, *tl);
384 /*
385 * The NFS V3 spec does not clarify whether or not
386 * the returned access bits can be a superset of
387 * the ones requested, so...
388 */
389 if ((rmode & mode) != mode)
390 error = EACCES;
391 }
392 nfsm_reqdone;
393 } else
394 #endif
395 return (nfsspec_access(ap));
396 #ifndef NFS_V2_ONLY
397 /*
398 * Disallow write attempts on filesystems mounted read-only;
399 * unless the file is a socket, fifo, or a block or character
400 * device resident on the filesystem.
401 */
402 if (!error && (ap->a_mode & VWRITE) &&
403 (vp->v_mount->mnt_flag & MNT_RDONLY)) {
404 switch (vp->v_type) {
405 case VREG:
406 case VDIR:
407 case VLNK:
408 error = EROFS;
409 default:
410 break;
411 }
412 }
413
414 if (!error || error == EACCES) {
415 /*
416 * If we got the same result as for a previous,
417 * different request, OR it in. Don't update
418 * the timestamp in that case.
419 */
420 if (cachevalid && np->n_accstamp != -1 &&
421 error == np->n_accerror) {
422 if (!error)
423 np->n_accmode |= ap->a_mode;
424 else if ((np->n_accmode & ap->a_mode) == ap->a_mode)
425 np->n_accmode = ap->a_mode;
426 } else {
427 np->n_accstamp = time_uptime;
428 np->n_accuid = kauth_cred_geteuid(ap->a_cred);
429 np->n_accmode = ap->a_mode;
430 np->n_accerror = error;
431 }
432 }
433
434 return (error);
435 #endif
436 }
437
438 /*
439 * nfs open vnode op
440 * Check to see if the type is ok
441 * and that deletion is not in progress.
442 * For paged in text files, you will need to flush the page cache
443 * if consistency is lost.
444 */
445 /* ARGSUSED */
446 int
447 nfs_open(v)
448 void *v;
449 {
450 struct vop_open_args /* {
451 struct vnode *a_vp;
452 int a_mode;
453 kauth_cred_t a_cred;
454 struct lwp *a_l;
455 } */ *ap = v;
456 struct vnode *vp = ap->a_vp;
457 struct nfsnode *np = VTONFS(vp);
458 int error;
459
460 if (vp->v_type != VREG && vp->v_type != VDIR && vp->v_type != VLNK) {
461 return (EACCES);
462 }
463
464 if (ap->a_mode & FREAD) {
465 if (np->n_rcred != NULL)
466 kauth_cred_free(np->n_rcred);
467 np->n_rcred = ap->a_cred;
468 kauth_cred_hold(np->n_rcred);
469 }
470 if (ap->a_mode & FWRITE) {
471 if (np->n_wcred != NULL)
472 kauth_cred_free(np->n_wcred);
473 np->n_wcred = ap->a_cred;
474 kauth_cred_hold(np->n_wcred);
475 }
476
477 error = nfs_flushstalebuf(vp, ap->a_cred, ap->a_l, 0);
478 if (error)
479 return error;
480
481 NFS_INVALIDATE_ATTRCACHE(np); /* For Open/Close consistency */
482
483 return (0);
484 }
485
486 /*
487 * nfs close vnode op
488 * What an NFS client should do upon close after writing is a debatable issue.
489 * Most NFS clients push delayed writes to the server upon close, basically for
490 * two reasons:
491 * 1 - So that any write errors may be reported back to the client process
492 * doing the close system call. By far the two most likely errors are
493 * NFSERR_NOSPC and NFSERR_DQUOT to indicate space allocation failure.
494 * 2 - To put a worst case upper bound on cache inconsistency between
495 * multiple clients for the file.
496 * There is also a consistency problem for Version 2 of the protocol w.r.t.
497 * not being able to tell if other clients are writing a file concurrently,
498 * since there is no way of knowing if the changed modify time in the reply
499 * is only due to the write for this client.
500 * (NFS Version 3 provides weak cache consistency data in the reply that
501 * should be sufficient to detect and handle this case.)
502 *
503 * The current code does the following:
504 * for NFS Version 2 - play it safe and flush/invalidate all dirty buffers
505 * for NFS Version 3 - flush dirty buffers to the server but don't invalidate
506 * or commit them (this satisfies 1 and 2 except for the
507 * case where the server crashes after this close but
508 * before the commit RPC, which is felt to be "good
509 * enough". Changing the last argument to nfs_flush() to
510 * a 1 would force a commit operation, if it is felt a
511 * commit is necessary now.
512 */
513 /* ARGSUSED */
514 int
515 nfs_close(v)
516 void *v;
517 {
518 struct vop_close_args /* {
519 struct vnodeop_desc *a_desc;
520 struct vnode *a_vp;
521 int a_fflag;
522 kauth_cred_t a_cred;
523 struct lwp *a_l;
524 } */ *ap = v;
525 struct vnode *vp = ap->a_vp;
526 struct nfsnode *np = VTONFS(vp);
527 int error = 0;
528 UVMHIST_FUNC("nfs_close"); UVMHIST_CALLED(ubchist);
529
530 if (vp->v_type == VREG) {
531 if (np->n_flag & NMODIFIED) {
532 #ifndef NFS_V2_ONLY
533 if (NFS_ISV3(vp)) {
534 error = nfs_flush(vp, ap->a_cred, MNT_WAIT, ap->a_l, 0);
535 np->n_flag &= ~NMODIFIED;
536 } else
537 #endif
538 error = nfs_vinvalbuf(vp, V_SAVE, ap->a_cred, ap->a_l, 1);
539 NFS_INVALIDATE_ATTRCACHE(np);
540 }
541 if (np->n_flag & NWRITEERR) {
542 np->n_flag &= ~NWRITEERR;
543 error = np->n_error;
544 }
545 }
546 UVMHIST_LOG(ubchist, "returning %d", error,0,0,0);
547 return (error);
548 }
549
550 /*
551 * nfs getattr call from vfs.
552 */
553 int
554 nfs_getattr(v)
555 void *v;
556 {
557 struct vop_getattr_args /* {
558 struct vnode *a_vp;
559 struct vattr *a_vap;
560 kauth_cred_t a_cred;
561 struct lwp *a_l;
562 } */ *ap = v;
563 struct vnode *vp = ap->a_vp;
564 struct nfsnode *np = VTONFS(vp);
565 caddr_t cp;
566 u_int32_t *tl;
567 int32_t t1, t2;
568 caddr_t bpos, dpos;
569 int error = 0;
570 struct mbuf *mreq, *mrep, *md, *mb;
571 const int v3 = NFS_ISV3(vp);
572
573 /*
574 * Update local times for special files.
575 */
576 if (np->n_flag & (NACC | NUPD))
577 np->n_flag |= NCHG;
578
579 /*
580 * if we have delayed truncation, do it now.
581 */
582 nfs_delayedtruncate(vp);
583
584 /*
585 * First look in the cache.
586 */
587 if (nfs_getattrcache(vp, ap->a_vap) == 0)
588 return (0);
589 nfsstats.rpccnt[NFSPROC_GETATTR]++;
590 nfsm_reqhead(np, NFSPROC_GETATTR, NFSX_FH(v3));
591 nfsm_fhtom(np, v3);
592 nfsm_request(np, NFSPROC_GETATTR, ap->a_l, ap->a_cred);
593 if (!error) {
594 nfsm_loadattr(vp, ap->a_vap, 0);
595 if (vp->v_type == VDIR &&
596 ap->a_vap->va_blocksize < NFS_DIRFRAGSIZ)
597 ap->a_vap->va_blocksize = NFS_DIRFRAGSIZ;
598 }
599 nfsm_reqdone;
600 return (error);
601 }
602
603 /*
604 * nfs setattr call.
605 */
606 int
607 nfs_setattr(v)
608 void *v;
609 {
610 struct vop_setattr_args /* {
611 struct vnodeop_desc *a_desc;
612 struct vnode *a_vp;
613 struct vattr *a_vap;
614 kauth_cred_t a_cred;
615 struct lwp *a_l;
616 } */ *ap = v;
617 struct vnode *vp = ap->a_vp;
618 struct nfsnode *np = VTONFS(vp);
619 struct vattr *vap = ap->a_vap;
620 int error = 0;
621 u_quad_t tsize = 0;
622
623 /*
624 * Setting of flags is not supported.
625 */
626 if (vap->va_flags != VNOVAL)
627 return (EOPNOTSUPP);
628
629 /*
630 * Disallow write attempts if the filesystem is mounted read-only.
631 */
632 if ((vap->va_uid != (uid_t)VNOVAL ||
633 vap->va_gid != (gid_t)VNOVAL || vap->va_atime.tv_sec != VNOVAL ||
634 vap->va_mtime.tv_sec != VNOVAL || vap->va_mode != (mode_t)VNOVAL) &&
635 (vp->v_mount->mnt_flag & MNT_RDONLY))
636 return (EROFS);
637 if (vap->va_size != VNOVAL) {
638 switch (vp->v_type) {
639 case VDIR:
640 return (EISDIR);
641 case VCHR:
642 case VBLK:
643 case VSOCK:
644 case VFIFO:
645 if (vap->va_mtime.tv_sec == VNOVAL &&
646 vap->va_atime.tv_sec == VNOVAL &&
647 vap->va_mode == (mode_t)VNOVAL &&
648 vap->va_uid == (uid_t)VNOVAL &&
649 vap->va_gid == (gid_t)VNOVAL)
650 return (0);
651 vap->va_size = VNOVAL;
652 break;
653 default:
654 /*
655 * Disallow write attempts if the filesystem is
656 * mounted read-only.
657 */
658 if (vp->v_mount->mnt_flag & MNT_RDONLY)
659 return (EROFS);
660 genfs_node_wrlock(vp);
661 uvm_vnp_setsize(vp, vap->va_size);
662 tsize = np->n_size;
663 np->n_size = vap->va_size;
664 if (vap->va_size == 0)
665 error = nfs_vinvalbuf(vp, 0,
666 ap->a_cred, ap->a_l, 1);
667 else
668 error = nfs_vinvalbuf(vp, V_SAVE,
669 ap->a_cred, ap->a_l, 1);
670 if (error) {
671 uvm_vnp_setsize(vp, tsize);
672 genfs_node_unlock(vp);
673 return (error);
674 }
675 np->n_vattr->va_size = vap->va_size;
676 }
677 } else {
678 /*
679 * flush files before setattr because a later write of
680 * cached data might change timestamps or reset sugid bits
681 */
682 if ((vap->va_mtime.tv_sec != VNOVAL ||
683 vap->va_atime.tv_sec != VNOVAL ||
684 vap->va_mode != VNOVAL) &&
685 vp->v_type == VREG &&
686 (error = nfs_vinvalbuf(vp, V_SAVE, ap->a_cred,
687 ap->a_l, 1)) == EINTR)
688 return (error);
689 }
690 error = nfs_setattrrpc(vp, vap, ap->a_cred, ap->a_l);
691 if (vap->va_size != VNOVAL) {
692 if (error) {
693 np->n_size = np->n_vattr->va_size = tsize;
694 uvm_vnp_setsize(vp, np->n_size);
695 }
696 genfs_node_unlock(vp);
697 }
698 VN_KNOTE(vp, NOTE_ATTRIB);
699 return (error);
700 }
701
702 /*
703 * Do an nfs setattr rpc.
704 */
705 int
706 nfs_setattrrpc(vp, vap, cred, l)
707 struct vnode *vp;
708 struct vattr *vap;
709 kauth_cred_t cred;
710 struct lwp *l;
711 {
712 struct nfsv2_sattr *sp;
713 caddr_t cp;
714 int32_t t1, t2;
715 caddr_t bpos, dpos;
716 u_int32_t *tl;
717 int error = 0;
718 struct mbuf *mreq, *mrep, *md, *mb;
719 const int v3 = NFS_ISV3(vp);
720 struct nfsnode *np = VTONFS(vp);
721 #ifndef NFS_V2_ONLY
722 int wccflag = NFSV3_WCCRATTR;
723 caddr_t cp2;
724 #endif
725
726 nfsstats.rpccnt[NFSPROC_SETATTR]++;
727 nfsm_reqhead(np, NFSPROC_SETATTR, NFSX_FH(v3) + NFSX_SATTR(v3));
728 nfsm_fhtom(np, v3);
729 #ifndef NFS_V2_ONLY
730 if (v3) {
731 nfsm_v3attrbuild(vap, TRUE);
732 nfsm_build(tl, u_int32_t *, NFSX_UNSIGNED);
733 *tl = nfs_false;
734 } else {
735 #endif
736 nfsm_build(sp, struct nfsv2_sattr *, NFSX_V2SATTR);
737 if (vap->va_mode == (mode_t)VNOVAL)
738 sp->sa_mode = nfs_xdrneg1;
739 else
740 sp->sa_mode = vtonfsv2_mode(vp->v_type, vap->va_mode);
741 if (vap->va_uid == (uid_t)VNOVAL)
742 sp->sa_uid = nfs_xdrneg1;
743 else
744 sp->sa_uid = txdr_unsigned(vap->va_uid);
745 if (vap->va_gid == (gid_t)VNOVAL)
746 sp->sa_gid = nfs_xdrneg1;
747 else
748 sp->sa_gid = txdr_unsigned(vap->va_gid);
749 sp->sa_size = txdr_unsigned(vap->va_size);
750 txdr_nfsv2time(&vap->va_atime, &sp->sa_atime);
751 txdr_nfsv2time(&vap->va_mtime, &sp->sa_mtime);
752 #ifndef NFS_V2_ONLY
753 }
754 #endif
755 nfsm_request(np, NFSPROC_SETATTR, l, cred);
756 #ifndef NFS_V2_ONLY
757 if (v3) {
758 nfsm_wcc_data(vp, wccflag, NAC_NOTRUNC, FALSE);
759 } else
760 #endif
761 nfsm_loadattr(vp, (struct vattr *)0, NAC_NOTRUNC);
762 nfsm_reqdone;
763 return (error);
764 }
765
766 /*
767 * nfs lookup call, one step at a time...
768 * First look in cache
769 * If not found, unlock the directory nfsnode and do the rpc
770 *
771 * This code is full of lock/unlock statements and checks, because
772 * we continue after cache_lookup has finished (we need to check
773 * with the attr cache and do an rpc if it has timed out). This means
774 * that the locking effects of cache_lookup have to be taken into
775 * account.
776 */
777 int
778 nfs_lookup(v)
779 void *v;
780 {
781 struct vop_lookup_args /* {
782 struct vnodeop_desc *a_desc;
783 struct vnode *a_dvp;
784 struct vnode **a_vpp;
785 struct componentname *a_cnp;
786 } */ *ap = v;
787 struct componentname *cnp = ap->a_cnp;
788 struct vnode *dvp = ap->a_dvp;
789 struct vnode **vpp = ap->a_vpp;
790 int flags;
791 struct vnode *newvp;
792 u_int32_t *tl;
793 caddr_t cp;
794 int32_t t1, t2;
795 caddr_t bpos, dpos, cp2;
796 struct mbuf *mreq, *mrep, *md, *mb;
797 long len;
798 nfsfh_t *fhp;
799 struct nfsnode *np;
800 int error = 0, attrflag, fhsize;
801 const int v3 = NFS_ISV3(dvp);
802
803 flags = cnp->cn_flags;
804
805 *vpp = NULLVP;
806 newvp = NULLVP;
807 if ((flags & ISLASTCN) && (dvp->v_mount->mnt_flag & MNT_RDONLY) &&
808 (cnp->cn_nameiop == DELETE || cnp->cn_nameiop == RENAME))
809 return (EROFS);
810 if (dvp->v_type != VDIR)
811 return (ENOTDIR);
812
813 /*
814 * RFC1813(nfsv3) 3.2 says clients should handle "." by themselves.
815 */
816 if (cnp->cn_namelen == 1 && cnp->cn_nameptr[0] == '.') {
817 error = VOP_ACCESS(dvp, VEXEC, cnp->cn_cred, cnp->cn_lwp);
818 if (error)
819 return error;
820 if (cnp->cn_nameiop == RENAME && (flags & ISLASTCN))
821 return EISDIR;
822 VREF(dvp);
823 *vpp = dvp;
824 if (cnp->cn_nameiop != LOOKUP && (flags & ISLASTCN))
825 cnp->cn_flags |= SAVENAME;
826 return 0;
827 }
828
829 np = VTONFS(dvp);
830
831 /*
832 * Before tediously performing a linear scan of the directory,
833 * check the name cache to see if the directory/name pair
834 * we are looking for is known already.
835 * If the directory/name pair is found in the name cache,
836 * we have to ensure the directory has not changed from
837 * the time the cache entry has been created. If it has,
838 * the cache entry has to be ignored.
839 */
840 error = cache_lookup_raw(dvp, vpp, cnp);
841 KASSERT(dvp != *vpp);
842 if (error >= 0) {
843 struct vattr vattr;
844 int err2;
845
846 if (error && error != ENOENT) {
847 *vpp = NULLVP;
848 return error;
849 }
850
851 err2 = VOP_ACCESS(dvp, VEXEC, cnp->cn_cred, cnp->cn_lwp);
852 if (err2 != 0) {
853 if (error == 0)
854 vrele(*vpp);
855 *vpp = NULLVP;
856 return err2;
857 }
858
859 if (VOP_GETATTR(dvp, &vattr, cnp->cn_cred,
860 cnp->cn_lwp) || timespeccmp(&vattr.va_mtime,
861 &VTONFS(dvp)->n_nctime, !=)) {
862 if (error == 0) {
863 vrele(*vpp);
864 *vpp = NULLVP;
865 }
866 cache_purge1(dvp, NULL, PURGE_CHILDREN);
867 timespecclear(&np->n_nctime);
868 goto dorpc;
869 }
870
871 if (error == ENOENT) {
872 goto noentry;
873 }
874
875 newvp = *vpp;
876 if (!VOP_GETATTR(newvp, &vattr, cnp->cn_cred, cnp->cn_lwp)
877 && vattr.va_ctime.tv_sec == VTONFS(newvp)->n_ctime) {
878 nfsstats.lookupcache_hits++;
879 if ((flags & ISDOTDOT) != 0) {
880 VOP_UNLOCK(dvp, 0);
881 }
882 error = vn_lock(newvp, LK_EXCLUSIVE);
883 if ((flags & ISDOTDOT) != 0) {
884 vn_lock(dvp, LK_EXCLUSIVE | LK_RETRY);
885 }
886 if (error) {
887 /* newvp has been revoked. */
888 vrele(newvp);
889 *vpp = NULL;
890 return error;
891 }
892 if (cnp->cn_nameiop != LOOKUP && (flags & ISLASTCN))
893 cnp->cn_flags |= SAVENAME;
894 KASSERT(newvp->v_type != VNON);
895 return (0);
896 }
897 cache_purge1(newvp, NULL, PURGE_PARENTS);
898 vrele(newvp);
899 *vpp = NULLVP;
900 }
901 dorpc:
902 #if 0
903 /*
904 * because nfsv3 has the same CREATE semantics as ours,
905 * we don't have to perform LOOKUPs beforehand.
906 *
907 * XXX ideally we can do the same for nfsv2 in the case of !O_EXCL.
908 * XXX although we have no way to know if O_EXCL is requested or not.
909 */
910
911 if (v3 && cnp->cn_nameiop == CREATE &&
912 (flags & (ISLASTCN|ISDOTDOT)) == ISLASTCN &&
913 (dvp->v_mount->mnt_flag & MNT_RDONLY) == 0) {
914 cnp->cn_flags |= SAVENAME;
915 return (EJUSTRETURN);
916 }
917 #endif /* 0 */
918
919 error = 0;
920 newvp = NULLVP;
921 nfsstats.lookupcache_misses++;
922 nfsstats.rpccnt[NFSPROC_LOOKUP]++;
923 len = cnp->cn_namelen;
924 nfsm_reqhead(np, NFSPROC_LOOKUP,
925 NFSX_FH(v3) + NFSX_UNSIGNED + nfsm_rndup(len));
926 nfsm_fhtom(np, v3);
927 nfsm_strtom(cnp->cn_nameptr, len, NFS_MAXNAMLEN);
928 nfsm_request(np, NFSPROC_LOOKUP, cnp->cn_lwp, cnp->cn_cred);
929 if (error) {
930 nfsm_postop_attr(dvp, attrflag, 0);
931 m_freem(mrep);
932 goto nfsmout;
933 }
934 nfsm_getfh(fhp, fhsize, v3);
935
936 /*
937 * Handle RENAME case...
938 */
939 if (cnp->cn_nameiop == RENAME && (flags & ISLASTCN)) {
940 if (NFS_CMPFH(np, fhp, fhsize)) {
941 m_freem(mrep);
942 return (EISDIR);
943 }
944 error = nfs_nget(dvp->v_mount, fhp, fhsize, &np);
945 if (error) {
946 m_freem(mrep);
947 return error;
948 }
949 newvp = NFSTOV(np);
950 #ifndef NFS_V2_ONLY
951 if (v3) {
952 nfsm_postop_attr(newvp, attrflag, 0);
953 nfsm_postop_attr(dvp, attrflag, 0);
954 } else
955 #endif
956 nfsm_loadattr(newvp, (struct vattr *)0, 0);
957 *vpp = newvp;
958 m_freem(mrep);
959 cnp->cn_flags |= SAVENAME;
960 goto validate;
961 }
962
963 /*
964 * The postop attr handling is duplicated for each if case,
965 * because it should be done while dvp is locked (unlocking
966 * dvp is different for each case).
967 */
968
969 if (NFS_CMPFH(np, fhp, fhsize)) {
970 /*
971 * "." lookup
972 */
973 VREF(dvp);
974 newvp = dvp;
975 #ifndef NFS_V2_ONLY
976 if (v3) {
977 nfsm_postop_attr(newvp, attrflag, 0);
978 nfsm_postop_attr(dvp, attrflag, 0);
979 } else
980 #endif
981 nfsm_loadattr(newvp, (struct vattr *)0, 0);
982 } else if (flags & ISDOTDOT) {
983 /*
984 * ".." lookup
985 */
986 VOP_UNLOCK(dvp, 0);
987 error = nfs_nget(dvp->v_mount, fhp, fhsize, &np);
988 vn_lock(dvp, LK_EXCLUSIVE | LK_RETRY);
989 if (error) {
990 m_freem(mrep);
991 return error;
992 }
993 newvp = NFSTOV(np);
994
995 #ifndef NFS_V2_ONLY
996 if (v3) {
997 nfsm_postop_attr(newvp, attrflag, 0);
998 nfsm_postop_attr(dvp, attrflag, 0);
999 } else
1000 #endif
1001 nfsm_loadattr(newvp, (struct vattr *)0, 0);
1002 } else {
1003 /*
1004 * Other lookups.
1005 */
1006 error = nfs_nget(dvp->v_mount, fhp, fhsize, &np);
1007 if (error) {
1008 m_freem(mrep);
1009 return error;
1010 }
1011 newvp = NFSTOV(np);
1012 #ifndef NFS_V2_ONLY
1013 if (v3) {
1014 nfsm_postop_attr(newvp, attrflag, 0);
1015 nfsm_postop_attr(dvp, attrflag, 0);
1016 } else
1017 #endif
1018 nfsm_loadattr(newvp, (struct vattr *)0, 0);
1019 }
1020 if (cnp->cn_nameiop != LOOKUP && (flags & ISLASTCN))
1021 cnp->cn_flags |= SAVENAME;
1022 if ((cnp->cn_flags & MAKEENTRY) &&
1023 (cnp->cn_nameiop != DELETE || !(flags & ISLASTCN))) {
1024 nfs_cache_enter(dvp, newvp, cnp);
1025 }
1026 *vpp = newvp;
1027 nfsm_reqdone;
1028 if (error) {
1029 /*
1030 * We get here only because of errors returned by
1031 * the RPC. Otherwise we'll have returned above
1032 * (the nfsm_* macros will jump to nfsm_reqdone
1033 * on error).
1034 */
1035 if (error == ENOENT && (cnp->cn_flags & MAKEENTRY) &&
1036 cnp->cn_nameiop != CREATE) {
1037 nfs_cache_enter(dvp, NULL, cnp);
1038 }
1039 if (newvp != NULLVP) {
1040 if (newvp == dvp) {
1041 vrele(newvp);
1042 } else {
1043 vput(newvp);
1044 }
1045 }
1046 noentry:
1047 if ((cnp->cn_nameiop == CREATE || cnp->cn_nameiop == RENAME) &&
1048 (flags & ISLASTCN) && error == ENOENT) {
1049 if (dvp->v_mount->mnt_flag & MNT_RDONLY) {
1050 error = EROFS;
1051 } else {
1052 error = EJUSTRETURN;
1053 cnp->cn_flags |= SAVENAME;
1054 }
1055 }
1056 *vpp = NULL;
1057 return error;
1058 }
1059
1060 validate:
1061 /*
1062 * make sure we have valid type and size.
1063 */
1064
1065 newvp = *vpp;
1066 if (newvp->v_type == VNON) {
1067 struct vattr vattr; /* dummy */
1068
1069 KASSERT(VTONFS(newvp)->n_attrstamp == 0);
1070 error = VOP_GETATTR(newvp, &vattr, cnp->cn_cred, cnp->cn_lwp);
1071 if (error) {
1072 vput(newvp);
1073 *vpp = NULL;
1074 }
1075 }
1076
1077 return error;
1078 }
1079
1080 /*
1081 * nfs read call.
1082 * Just call nfs_bioread() to do the work.
1083 */
1084 int
1085 nfs_read(v)
1086 void *v;
1087 {
1088 struct vop_read_args /* {
1089 struct vnode *a_vp;
1090 struct uio *a_uio;
1091 int a_ioflag;
1092 kauth_cred_t a_cred;
1093 } */ *ap = v;
1094 struct vnode *vp = ap->a_vp;
1095
1096 if (vp->v_type != VREG)
1097 return EISDIR;
1098 return (nfs_bioread(vp, ap->a_uio, ap->a_ioflag, ap->a_cred, 0));
1099 }
1100
1101 /*
1102 * nfs readlink call
1103 */
1104 int
1105 nfs_readlink(v)
1106 void *v;
1107 {
1108 struct vop_readlink_args /* {
1109 struct vnode *a_vp;
1110 struct uio *a_uio;
1111 kauth_cred_t a_cred;
1112 } */ *ap = v;
1113 struct vnode *vp = ap->a_vp;
1114 struct nfsnode *np = VTONFS(vp);
1115
1116 if (vp->v_type != VLNK)
1117 return (EPERM);
1118
1119 if (np->n_rcred != NULL) {
1120 kauth_cred_free(np->n_rcred);
1121 }
1122 np->n_rcred = ap->a_cred;
1123 kauth_cred_hold(np->n_rcred);
1124
1125 return (nfs_bioread(vp, ap->a_uio, 0, ap->a_cred, 0));
1126 }
1127
1128 /*
1129 * Do a readlink rpc.
1130 * Called by nfs_doio() from below the buffer cache.
1131 */
1132 int
1133 nfs_readlinkrpc(vp, uiop, cred)
1134 struct vnode *vp;
1135 struct uio *uiop;
1136 kauth_cred_t cred;
1137 {
1138 u_int32_t *tl;
1139 caddr_t cp;
1140 int32_t t1, t2;
1141 caddr_t bpos, dpos, cp2;
1142 int error = 0;
1143 uint32_t len;
1144 struct mbuf *mreq, *mrep, *md, *mb;
1145 const int v3 = NFS_ISV3(vp);
1146 struct nfsnode *np = VTONFS(vp);
1147 #ifndef NFS_V2_ONLY
1148 int attrflag;
1149 #endif
1150
1151 nfsstats.rpccnt[NFSPROC_READLINK]++;
1152 nfsm_reqhead(np, NFSPROC_READLINK, NFSX_FH(v3));
1153 nfsm_fhtom(np, v3);
1154 nfsm_request(np, NFSPROC_READLINK, curlwp, cred);
1155 #ifndef NFS_V2_ONLY
1156 if (v3)
1157 nfsm_postop_attr(vp, attrflag, 0);
1158 #endif
1159 if (!error) {
1160 #ifndef NFS_V2_ONLY
1161 if (v3) {
1162 nfsm_dissect(tl, uint32_t *, NFSX_UNSIGNED);
1163 len = fxdr_unsigned(uint32_t, *tl);
1164 if (len > MAXPATHLEN) {
1165 /*
1166 * this pathname is too long for us.
1167 */
1168 m_freem(mrep);
1169 /* Solaris returns EINVAL. should we follow? */
1170 error = ENAMETOOLONG;
1171 goto nfsmout;
1172 }
1173 } else
1174 #endif
1175 {
1176 nfsm_strsiz(len, NFS_MAXPATHLEN);
1177 }
1178 nfsm_mtouio(uiop, len);
1179 }
1180 nfsm_reqdone;
1181 return (error);
1182 }
1183
1184 /*
1185 * nfs read rpc call
1186 * Ditto above
1187 */
1188 int
1189 nfs_readrpc(vp, uiop)
1190 struct vnode *vp;
1191 struct uio *uiop;
1192 {
1193 u_int32_t *tl;
1194 caddr_t cp;
1195 int32_t t1, t2;
1196 caddr_t bpos, dpos, cp2;
1197 struct mbuf *mreq, *mrep, *md, *mb;
1198 struct nfsmount *nmp;
1199 int error = 0, len, retlen, tsiz, eof, byte_count;
1200 const int v3 = NFS_ISV3(vp);
1201 struct nfsnode *np = VTONFS(vp);
1202 #ifndef NFS_V2_ONLY
1203 int attrflag;
1204 #endif
1205
1206 #ifndef nolint
1207 eof = 0;
1208 #endif
1209 nmp = VFSTONFS(vp->v_mount);
1210 tsiz = uiop->uio_resid;
1211 if (uiop->uio_offset + tsiz > nmp->nm_maxfilesize)
1212 return (EFBIG);
1213 iostat_busy(nmp->nm_stats);
1214 byte_count = 0; /* count bytes actually transferred */
1215 while (tsiz > 0) {
1216 nfsstats.rpccnt[NFSPROC_READ]++;
1217 len = (tsiz > nmp->nm_rsize) ? nmp->nm_rsize : tsiz;
1218 nfsm_reqhead(np, NFSPROC_READ, NFSX_FH(v3) + NFSX_UNSIGNED * 3);
1219 nfsm_fhtom(np, v3);
1220 nfsm_build(tl, u_int32_t *, NFSX_UNSIGNED * 3);
1221 #ifndef NFS_V2_ONLY
1222 if (v3) {
1223 txdr_hyper(uiop->uio_offset, tl);
1224 *(tl + 2) = txdr_unsigned(len);
1225 } else
1226 #endif
1227 {
1228 *tl++ = txdr_unsigned(uiop->uio_offset);
1229 *tl++ = txdr_unsigned(len);
1230 *tl = 0;
1231 }
1232 nfsm_request(np, NFSPROC_READ, curlwp, np->n_rcred);
1233 #ifndef NFS_V2_ONLY
1234 if (v3) {
1235 nfsm_postop_attr(vp, attrflag, NAC_NOTRUNC);
1236 if (error) {
1237 m_freem(mrep);
1238 goto nfsmout;
1239 }
1240 nfsm_dissect(tl, u_int32_t *, 2 * NFSX_UNSIGNED);
1241 eof = fxdr_unsigned(int, *(tl + 1));
1242 } else
1243 #endif
1244 nfsm_loadattr(vp, (struct vattr *)0, NAC_NOTRUNC);
1245 nfsm_strsiz(retlen, nmp->nm_rsize);
1246 nfsm_mtouio(uiop, retlen);
1247 m_freem(mrep);
1248 tsiz -= retlen;
1249 byte_count += retlen;
1250 #ifndef NFS_V2_ONLY
1251 if (v3) {
1252 if (eof || retlen == 0)
1253 tsiz = 0;
1254 } else
1255 #endif
1256 if (retlen < len)
1257 tsiz = 0;
1258 }
1259 nfsmout:
1260 iostat_unbusy(nmp->nm_stats, byte_count, 1);
1261 return (error);
1262 }
1263
1264 struct nfs_writerpc_context {
1265 struct simplelock nwc_slock;
1266 volatile int nwc_mbufcount;
1267 };
1268
1269 /*
1270 * free mbuf used to refer protected pages while write rpc call.
1271 * called at splvm.
1272 */
1273 static void
1274 nfs_writerpc_extfree(struct mbuf *m, caddr_t tbuf, size_t size, void *arg)
1275 {
1276 struct nfs_writerpc_context *ctx = arg;
1277
1278 KASSERT(m != NULL);
1279 KASSERT(ctx != NULL);
1280 pool_cache_put(&mbpool_cache, m);
1281 simple_lock(&ctx->nwc_slock);
1282 if (--ctx->nwc_mbufcount == 0) {
1283 wakeup(ctx);
1284 }
1285 simple_unlock(&ctx->nwc_slock);
1286 }
1287
1288 /*
1289 * nfs write call
1290 */
1291 int
1292 nfs_writerpc(vp, uiop, iomode, pageprotected, stalewriteverfp)
1293 struct vnode *vp;
1294 struct uio *uiop;
1295 int *iomode;
1296 boolean_t pageprotected;
1297 boolean_t *stalewriteverfp;
1298 {
1299 u_int32_t *tl;
1300 caddr_t cp;
1301 int32_t t1, t2;
1302 caddr_t bpos, dpos;
1303 struct mbuf *mreq, *mrep, *md, *mb;
1304 struct nfsmount *nmp = VFSTONFS(vp->v_mount);
1305 int error = 0, len, tsiz, wccflag = NFSV3_WCCRATTR;
1306 const int v3 = NFS_ISV3(vp);
1307 int committed = NFSV3WRITE_FILESYNC;
1308 struct nfsnode *np = VTONFS(vp);
1309 struct nfs_writerpc_context ctx;
1310 int s, byte_count;
1311 struct lwp *l = NULL;
1312 size_t origresid;
1313 #ifndef NFS_V2_ONLY
1314 caddr_t cp2;
1315 int rlen, commit;
1316 #endif
1317
1318 simple_lock_init(&ctx.nwc_slock);
1319 ctx.nwc_mbufcount = 1;
1320
1321 if (vp->v_mount->mnt_flag & MNT_RDONLY) {
1322 panic("writerpc readonly vp %p", vp);
1323 }
1324
1325 #ifdef DIAGNOSTIC
1326 if (uiop->uio_iovcnt != 1)
1327 panic("nfs: writerpc iovcnt > 1");
1328 #endif
1329 tsiz = uiop->uio_resid;
1330 if (uiop->uio_offset + tsiz > nmp->nm_maxfilesize)
1331 return (EFBIG);
1332 if (pageprotected) {
1333 l = curlwp;
1334 PHOLD(l);
1335 }
1336 retry:
1337 origresid = uiop->uio_resid;
1338 KASSERT(origresid == uiop->uio_iov->iov_len);
1339 iostat_busy(nmp->nm_stats);
1340 byte_count = 0; /* count of bytes actually written */
1341 while (tsiz > 0) {
1342 uint32_t datalen; /* data bytes need to be allocated in mbuf */
1343 uint32_t backup;
1344 boolean_t stalewriteverf = FALSE;
1345
1346 nfsstats.rpccnt[NFSPROC_WRITE]++;
1347 len = min(tsiz, nmp->nm_wsize);
1348 datalen = pageprotected ? 0 : nfsm_rndup(len);
1349 nfsm_reqhead(np, NFSPROC_WRITE,
1350 NFSX_FH(v3) + 5 * NFSX_UNSIGNED + datalen);
1351 nfsm_fhtom(np, v3);
1352 #ifndef NFS_V2_ONLY
1353 if (v3) {
1354 nfsm_build(tl, u_int32_t *, 5 * NFSX_UNSIGNED);
1355 txdr_hyper(uiop->uio_offset, tl);
1356 tl += 2;
1357 *tl++ = txdr_unsigned(len);
1358 *tl++ = txdr_unsigned(*iomode);
1359 *tl = txdr_unsigned(len);
1360 } else
1361 #endif
1362 {
1363 u_int32_t x;
1364
1365 nfsm_build(tl, u_int32_t *, 4 * NFSX_UNSIGNED);
1366 /* Set both "begin" and "current" to non-garbage. */
1367 x = txdr_unsigned((u_int32_t)uiop->uio_offset);
1368 *tl++ = x; /* "begin offset" */
1369 *tl++ = x; /* "current offset" */
1370 x = txdr_unsigned(len);
1371 *tl++ = x; /* total to this offset */
1372 *tl = x; /* size of this write */
1373
1374 }
1375 if (pageprotected) {
1376 /*
1377 * since we know pages can't be modified during i/o,
1378 * no need to copy them for us.
1379 */
1380 struct mbuf *m;
1381 struct iovec *iovp = uiop->uio_iov;
1382
1383 m = m_get(M_WAIT, MT_DATA);
1384 MCLAIM(m, &nfs_mowner);
1385 MEXTADD(m, iovp->iov_base, len, M_MBUF,
1386 nfs_writerpc_extfree, &ctx);
1387 m->m_flags |= M_EXT_ROMAP;
1388 m->m_len = len;
1389 mb->m_next = m;
1390 /*
1391 * no need to maintain mb and bpos here
1392 * because no one care them later.
1393 */
1394 #if 0
1395 mb = m;
1396 bpos = mtod(caddr_t, mb) + mb->m_len;
1397 #endif
1398 UIO_ADVANCE(uiop, len);
1399 uiop->uio_offset += len;
1400 s = splvm();
1401 simple_lock(&ctx.nwc_slock);
1402 ctx.nwc_mbufcount++;
1403 simple_unlock(&ctx.nwc_slock);
1404 splx(s);
1405 nfs_zeropad(mb, 0, nfsm_padlen(len));
1406 } else {
1407 nfsm_uiotom(uiop, len);
1408 }
1409 nfsm_request(np, NFSPROC_WRITE, curlwp, np->n_wcred);
1410 #ifndef NFS_V2_ONLY
1411 if (v3) {
1412 wccflag = NFSV3_WCCCHK;
1413 nfsm_wcc_data(vp, wccflag, NAC_NOTRUNC, !error);
1414 if (!error) {
1415 nfsm_dissect(tl, u_int32_t *, 2 * NFSX_UNSIGNED
1416 + NFSX_V3WRITEVERF);
1417 rlen = fxdr_unsigned(int, *tl++);
1418 if (rlen == 0) {
1419 error = NFSERR_IO;
1420 m_freem(mrep);
1421 break;
1422 } else if (rlen < len) {
1423 backup = len - rlen;
1424 UIO_ADVANCE(uiop, -backup);
1425 uiop->uio_offset -= backup;
1426 len = rlen;
1427 }
1428 commit = fxdr_unsigned(int, *tl++);
1429
1430 /*
1431 * Return the lowest committment level
1432 * obtained by any of the RPCs.
1433 */
1434 if (committed == NFSV3WRITE_FILESYNC)
1435 committed = commit;
1436 else if (committed == NFSV3WRITE_DATASYNC &&
1437 commit == NFSV3WRITE_UNSTABLE)
1438 committed = commit;
1439 simple_lock(&nmp->nm_slock);
1440 if ((nmp->nm_iflag & NFSMNT_HASWRITEVERF) == 0){
1441 memcpy(nmp->nm_writeverf, tl,
1442 NFSX_V3WRITEVERF);
1443 nmp->nm_iflag |= NFSMNT_HASWRITEVERF;
1444 } else if ((nmp->nm_iflag &
1445 NFSMNT_STALEWRITEVERF) ||
1446 memcmp(tl, nmp->nm_writeverf,
1447 NFSX_V3WRITEVERF)) {
1448 memcpy(nmp->nm_writeverf, tl,
1449 NFSX_V3WRITEVERF);
1450 /*
1451 * note NFSMNT_STALEWRITEVERF
1452 * if we're the first thread to
1453 * notice it.
1454 */
1455 if ((nmp->nm_iflag &
1456 NFSMNT_STALEWRITEVERF) == 0) {
1457 stalewriteverf = TRUE;
1458 nmp->nm_iflag |=
1459 NFSMNT_STALEWRITEVERF;
1460 }
1461 }
1462 simple_unlock(&nmp->nm_slock);
1463 }
1464 } else
1465 #endif
1466 nfsm_loadattr(vp, (struct vattr *)0, NAC_NOTRUNC);
1467 if (wccflag)
1468 VTONFS(vp)->n_mtime = VTONFS(vp)->n_vattr->va_mtime;
1469 m_freem(mrep);
1470 if (error)
1471 break;
1472 tsiz -= len;
1473 byte_count += len;
1474 if (stalewriteverf) {
1475 *stalewriteverfp = TRUE;
1476 stalewriteverf = FALSE;
1477 if (committed == NFSV3WRITE_UNSTABLE &&
1478 len != origresid) {
1479 /*
1480 * if our write requests weren't atomic but
1481 * unstable, datas in previous iterations
1482 * might have already been lost now.
1483 * then, we should resend them to nfsd.
1484 */
1485 backup = origresid - tsiz;
1486 UIO_ADVANCE(uiop, -backup);
1487 uiop->uio_offset -= backup;
1488 tsiz = origresid;
1489 goto retry;
1490 }
1491 }
1492 }
1493 nfsmout:
1494 iostat_unbusy(nmp->nm_stats, byte_count, 0);
1495 if (pageprotected) {
1496 /*
1497 * wait until mbufs go away.
1498 * retransmitted mbufs can survive longer than rpc requests
1499 * themselves.
1500 */
1501 s = splvm();
1502 simple_lock(&ctx.nwc_slock);
1503 ctx.nwc_mbufcount--;
1504 while (ctx.nwc_mbufcount > 0) {
1505 ltsleep(&ctx, PRIBIO, "nfsmblk", 0, &ctx.nwc_slock);
1506 }
1507 simple_unlock(&ctx.nwc_slock);
1508 splx(s);
1509 PRELE(l);
1510 }
1511 *iomode = committed;
1512 if (error)
1513 uiop->uio_resid = tsiz;
1514 return (error);
1515 }
1516
1517 /*
1518 * nfs mknod rpc
1519 * For NFS v2 this is a kludge. Use a create rpc but with the IFMT bits of the
1520 * mode set to specify the file type and the size field for rdev.
1521 */
1522 int
1523 nfs_mknodrpc(dvp, vpp, cnp, vap)
1524 struct vnode *dvp;
1525 struct vnode **vpp;
1526 struct componentname *cnp;
1527 struct vattr *vap;
1528 {
1529 struct nfsv2_sattr *sp;
1530 u_int32_t *tl;
1531 caddr_t cp;
1532 int32_t t1, t2;
1533 struct vnode *newvp = (struct vnode *)0;
1534 struct nfsnode *dnp, *np;
1535 char *cp2;
1536 caddr_t bpos, dpos;
1537 int error = 0, wccflag = NFSV3_WCCRATTR, gotvp = 0;
1538 struct mbuf *mreq, *mrep, *md, *mb;
1539 u_int32_t rdev;
1540 const int v3 = NFS_ISV3(dvp);
1541
1542 if (vap->va_type == VCHR || vap->va_type == VBLK)
1543 rdev = txdr_unsigned(vap->va_rdev);
1544 else if (vap->va_type == VFIFO || vap->va_type == VSOCK)
1545 rdev = nfs_xdrneg1;
1546 else {
1547 VOP_ABORTOP(dvp, cnp);
1548 vput(dvp);
1549 return (EOPNOTSUPP);
1550 }
1551 nfsstats.rpccnt[NFSPROC_MKNOD]++;
1552 dnp = VTONFS(dvp);
1553 nfsm_reqhead(dnp, NFSPROC_MKNOD, NFSX_FH(v3) + 4 * NFSX_UNSIGNED +
1554 + nfsm_rndup(cnp->cn_namelen) + NFSX_SATTR(v3));
1555 nfsm_fhtom(dnp, v3);
1556 nfsm_strtom(cnp->cn_nameptr, cnp->cn_namelen, NFS_MAXNAMLEN);
1557 #ifndef NFS_V2_ONLY
1558 if (v3) {
1559 nfsm_build(tl, u_int32_t *, NFSX_UNSIGNED);
1560 *tl++ = vtonfsv3_type(vap->va_type);
1561 nfsm_v3attrbuild(vap, FALSE);
1562 if (vap->va_type == VCHR || vap->va_type == VBLK) {
1563 nfsm_build(tl, u_int32_t *, 2 * NFSX_UNSIGNED);
1564 *tl++ = txdr_unsigned(major(vap->va_rdev));
1565 *tl = txdr_unsigned(minor(vap->va_rdev));
1566 }
1567 } else
1568 #endif
1569 {
1570 nfsm_build(sp, struct nfsv2_sattr *, NFSX_V2SATTR);
1571 sp->sa_mode = vtonfsv2_mode(vap->va_type, vap->va_mode);
1572 sp->sa_uid = nfs_xdrneg1;
1573 sp->sa_gid = nfs_xdrneg1;
1574 sp->sa_size = rdev;
1575 txdr_nfsv2time(&vap->va_atime, &sp->sa_atime);
1576 txdr_nfsv2time(&vap->va_mtime, &sp->sa_mtime);
1577 }
1578 nfsm_request(dnp, NFSPROC_MKNOD, cnp->cn_lwp, cnp->cn_cred);
1579 if (!error) {
1580 nfsm_mtofh(dvp, newvp, v3, gotvp);
1581 if (!gotvp) {
1582 error = nfs_lookitup(dvp, cnp->cn_nameptr,
1583 cnp->cn_namelen, cnp->cn_cred, cnp->cn_lwp, &np);
1584 if (!error)
1585 newvp = NFSTOV(np);
1586 }
1587 }
1588 #ifndef NFS_V2_ONLY
1589 if (v3)
1590 nfsm_wcc_data(dvp, wccflag, 0, !error);
1591 #endif
1592 nfsm_reqdone;
1593 if (error) {
1594 if (newvp)
1595 vput(newvp);
1596 } else {
1597 if (cnp->cn_flags & MAKEENTRY)
1598 nfs_cache_enter(dvp, newvp, cnp);
1599 *vpp = newvp;
1600 }
1601 PNBUF_PUT(cnp->cn_pnbuf);
1602 VTONFS(dvp)->n_flag |= NMODIFIED;
1603 if (!wccflag)
1604 NFS_INVALIDATE_ATTRCACHE(VTONFS(dvp));
1605 vput(dvp);
1606 return (error);
1607 }
1608
1609 /*
1610 * nfs mknod vop
1611 * just call nfs_mknodrpc() to do the work.
1612 */
1613 /* ARGSUSED */
1614 int
1615 nfs_mknod(v)
1616 void *v;
1617 {
1618 struct vop_mknod_args /* {
1619 struct vnode *a_dvp;
1620 struct vnode **a_vpp;
1621 struct componentname *a_cnp;
1622 struct vattr *a_vap;
1623 } */ *ap = v;
1624 struct vnode *dvp = ap->a_dvp;
1625 struct componentname *cnp = ap->a_cnp;
1626 int error;
1627
1628 error = nfs_mknodrpc(dvp, ap->a_vpp, cnp, ap->a_vap);
1629 VN_KNOTE(dvp, NOTE_WRITE);
1630 if (error == 0 || error == EEXIST)
1631 cache_purge1(dvp, cnp, 0);
1632 return (error);
1633 }
1634
1635 #ifndef NFS_V2_ONLY
1636 static u_long create_verf;
1637 #endif
1638 /*
1639 * nfs file create call
1640 */
1641 int
1642 nfs_create(v)
1643 void *v;
1644 {
1645 struct vop_create_args /* {
1646 struct vnode *a_dvp;
1647 struct vnode **a_vpp;
1648 struct componentname *a_cnp;
1649 struct vattr *a_vap;
1650 } */ *ap = v;
1651 struct vnode *dvp = ap->a_dvp;
1652 struct vattr *vap = ap->a_vap;
1653 struct componentname *cnp = ap->a_cnp;
1654 struct nfsv2_sattr *sp;
1655 u_int32_t *tl;
1656 caddr_t cp;
1657 int32_t t1, t2;
1658 struct nfsnode *dnp, *np = (struct nfsnode *)0;
1659 struct vnode *newvp = (struct vnode *)0;
1660 caddr_t bpos, dpos, cp2;
1661 int error, wccflag = NFSV3_WCCRATTR, gotvp = 0, fmode = 0;
1662 struct mbuf *mreq, *mrep, *md, *mb;
1663 const int v3 = NFS_ISV3(dvp);
1664
1665 /*
1666 * Oops, not for me..
1667 */
1668 if (vap->va_type == VSOCK)
1669 return (nfs_mknodrpc(dvp, ap->a_vpp, cnp, vap));
1670
1671 KASSERT(vap->va_type == VREG);
1672
1673 #ifdef VA_EXCLUSIVE
1674 if (vap->va_vaflags & VA_EXCLUSIVE)
1675 fmode |= O_EXCL;
1676 #endif
1677 again:
1678 error = 0;
1679 nfsstats.rpccnt[NFSPROC_CREATE]++;
1680 dnp = VTONFS(dvp);
1681 nfsm_reqhead(dnp, NFSPROC_CREATE, NFSX_FH(v3) + 2 * NFSX_UNSIGNED +
1682 nfsm_rndup(cnp->cn_namelen) + NFSX_SATTR(v3));
1683 nfsm_fhtom(dnp, v3);
1684 nfsm_strtom(cnp->cn_nameptr, cnp->cn_namelen, NFS_MAXNAMLEN);
1685 #ifndef NFS_V2_ONLY
1686 if (v3) {
1687 nfsm_build(tl, u_int32_t *, NFSX_UNSIGNED);
1688 if (fmode & O_EXCL) {
1689 *tl = txdr_unsigned(NFSV3CREATE_EXCLUSIVE);
1690 nfsm_build(tl, u_int32_t *, NFSX_V3CREATEVERF);
1691 #ifdef INET
1692 if (TAILQ_FIRST(&in_ifaddrhead))
1693 *tl++ = TAILQ_FIRST(&in_ifaddrhead)->
1694 ia_addr.sin_addr.s_addr;
1695 else
1696 *tl++ = create_verf;
1697 #else
1698 *tl++ = create_verf;
1699 #endif
1700 *tl = ++create_verf;
1701 } else {
1702 *tl = txdr_unsigned(NFSV3CREATE_UNCHECKED);
1703 nfsm_v3attrbuild(vap, FALSE);
1704 }
1705 } else
1706 #endif
1707 {
1708 nfsm_build(sp, struct nfsv2_sattr *, NFSX_V2SATTR);
1709 sp->sa_mode = vtonfsv2_mode(vap->va_type, vap->va_mode);
1710 sp->sa_uid = nfs_xdrneg1;
1711 sp->sa_gid = nfs_xdrneg1;
1712 sp->sa_size = 0;
1713 txdr_nfsv2time(&vap->va_atime, &sp->sa_atime);
1714 txdr_nfsv2time(&vap->va_mtime, &sp->sa_mtime);
1715 }
1716 nfsm_request(dnp, NFSPROC_CREATE, cnp->cn_lwp, cnp->cn_cred);
1717 if (!error) {
1718 nfsm_mtofh(dvp, newvp, v3, gotvp);
1719 if (!gotvp) {
1720 error = nfs_lookitup(dvp, cnp->cn_nameptr,
1721 cnp->cn_namelen, cnp->cn_cred, cnp->cn_lwp, &np);
1722 if (!error)
1723 newvp = NFSTOV(np);
1724 }
1725 }
1726 #ifndef NFS_V2_ONLY
1727 if (v3)
1728 nfsm_wcc_data(dvp, wccflag, 0, !error);
1729 #endif
1730 nfsm_reqdone;
1731 if (error) {
1732 /*
1733 * nfs_request maps NFSERR_NOTSUPP to ENOTSUP.
1734 */
1735 if (v3 && (fmode & O_EXCL) && error == ENOTSUP) {
1736 fmode &= ~O_EXCL;
1737 goto again;
1738 }
1739 } else if (v3 && (fmode & O_EXCL)) {
1740 struct timespec ts;
1741
1742 getnanotime(&ts);
1743
1744 /*
1745 * make sure that we'll update timestamps as
1746 * most server implementations use them to store
1747 * the create verifier.
1748 *
1749 * XXX it's better to use TOSERVER always.
1750 */
1751
1752 if (vap->va_atime.tv_sec == VNOVAL)
1753 vap->va_atime = ts;
1754 if (vap->va_mtime.tv_sec == VNOVAL)
1755 vap->va_mtime = ts;
1756
1757 error = nfs_setattrrpc(newvp, vap, cnp->cn_cred, cnp->cn_lwp);
1758 }
1759 if (error == 0) {
1760 if (cnp->cn_flags & MAKEENTRY)
1761 nfs_cache_enter(dvp, newvp, cnp);
1762 else
1763 cache_purge1(dvp, cnp, 0);
1764 *ap->a_vpp = newvp;
1765 } else {
1766 if (newvp)
1767 vput(newvp);
1768 if (error == EEXIST)
1769 cache_purge1(dvp, cnp, 0);
1770 }
1771 PNBUF_PUT(cnp->cn_pnbuf);
1772 VTONFS(dvp)->n_flag |= NMODIFIED;
1773 if (!wccflag)
1774 NFS_INVALIDATE_ATTRCACHE(VTONFS(dvp));
1775 VN_KNOTE(ap->a_dvp, NOTE_WRITE);
1776 vput(dvp);
1777 return (error);
1778 }
1779
1780 /*
1781 * nfs file remove call
1782 * To try and make nfs semantics closer to ufs semantics, a file that has
1783 * other processes using the vnode is renamed instead of removed and then
1784 * removed later on the last close.
1785 * - If v_usecount > 1
1786 * If a rename is not already in the works
1787 * call nfs_sillyrename() to set it up
1788 * else
1789 * do the remove rpc
1790 */
1791 int
1792 nfs_remove(v)
1793 void *v;
1794 {
1795 struct vop_remove_args /* {
1796 struct vnodeop_desc *a_desc;
1797 struct vnode * a_dvp;
1798 struct vnode * a_vp;
1799 struct componentname * a_cnp;
1800 } */ *ap = v;
1801 struct vnode *vp = ap->a_vp;
1802 struct vnode *dvp = ap->a_dvp;
1803 struct componentname *cnp = ap->a_cnp;
1804 struct nfsnode *np = VTONFS(vp);
1805 int error = 0;
1806 struct vattr vattr;
1807
1808 #ifndef DIAGNOSTIC
1809 if ((cnp->cn_flags & HASBUF) == 0)
1810 panic("nfs_remove: no name");
1811 if (vp->v_usecount < 1)
1812 panic("nfs_remove: bad v_usecount");
1813 #endif
1814 if (vp->v_type == VDIR)
1815 error = EPERM;
1816 else if (vp->v_usecount == 1 || (np->n_sillyrename &&
1817 VOP_GETATTR(vp, &vattr, cnp->cn_cred, cnp->cn_lwp) == 0 &&
1818 vattr.va_nlink > 1)) {
1819 /*
1820 * Purge the name cache so that the chance of a lookup for
1821 * the name succeeding while the remove is in progress is
1822 * minimized. Without node locking it can still happen, such
1823 * that an I/O op returns ESTALE, but since you get this if
1824 * another host removes the file..
1825 */
1826 cache_purge(vp);
1827 /*
1828 * throw away biocache buffers, mainly to avoid
1829 * unnecessary delayed writes later.
1830 */
1831 error = nfs_vinvalbuf(vp, 0, cnp->cn_cred, cnp->cn_lwp, 1);
1832 /* Do the rpc */
1833 if (error != EINTR)
1834 error = nfs_removerpc(dvp, cnp->cn_nameptr,
1835 cnp->cn_namelen, cnp->cn_cred, cnp->cn_lwp);
1836 } else if (!np->n_sillyrename)
1837 error = nfs_sillyrename(dvp, vp, cnp, FALSE);
1838 PNBUF_PUT(cnp->cn_pnbuf);
1839 if (!error && nfs_getattrcache(vp, &vattr) == 0 &&
1840 vattr.va_nlink == 1) {
1841 np->n_flag |= NREMOVED;
1842 }
1843 NFS_INVALIDATE_ATTRCACHE(np);
1844 VN_KNOTE(vp, NOTE_DELETE);
1845 VN_KNOTE(dvp, NOTE_WRITE);
1846 if (dvp == vp)
1847 vrele(vp);
1848 else
1849 vput(vp);
1850 vput(dvp);
1851 return (error);
1852 }
1853
1854 /*
1855 * nfs file remove rpc called from nfs_inactive
1856 */
1857 int
1858 nfs_removeit(sp)
1859 struct sillyrename *sp;
1860 {
1861
1862 return (nfs_removerpc(sp->s_dvp, sp->s_name, sp->s_namlen, sp->s_cred,
1863 (struct lwp *)0));
1864 }
1865
1866 /*
1867 * Nfs remove rpc, called from nfs_remove() and nfs_removeit().
1868 */
1869 int
1870 nfs_removerpc(dvp, name, namelen, cred, l)
1871 struct vnode *dvp;
1872 const char *name;
1873 int namelen;
1874 kauth_cred_t cred;
1875 struct lwp *l;
1876 {
1877 u_int32_t *tl;
1878 caddr_t cp;
1879 #ifndef NFS_V2_ONLY
1880 int32_t t1;
1881 caddr_t cp2;
1882 #endif
1883 int32_t t2;
1884 caddr_t bpos, dpos;
1885 int error = 0, wccflag = NFSV3_WCCRATTR;
1886 struct mbuf *mreq, *mrep, *md, *mb;
1887 const int v3 = NFS_ISV3(dvp);
1888 int rexmit = 0;
1889 struct nfsnode *dnp = VTONFS(dvp);
1890
1891 nfsstats.rpccnt[NFSPROC_REMOVE]++;
1892 nfsm_reqhead(dnp, NFSPROC_REMOVE,
1893 NFSX_FH(v3) + NFSX_UNSIGNED + nfsm_rndup(namelen));
1894 nfsm_fhtom(dnp, v3);
1895 nfsm_strtom(name, namelen, NFS_MAXNAMLEN);
1896 nfsm_request1(dnp, NFSPROC_REMOVE, l, cred, &rexmit);
1897 #ifndef NFS_V2_ONLY
1898 if (v3)
1899 nfsm_wcc_data(dvp, wccflag, 0, !error);
1900 #endif
1901 nfsm_reqdone;
1902 VTONFS(dvp)->n_flag |= NMODIFIED;
1903 if (!wccflag)
1904 NFS_INVALIDATE_ATTRCACHE(VTONFS(dvp));
1905 /*
1906 * Kludge City: If the first reply to the remove rpc is lost..
1907 * the reply to the retransmitted request will be ENOENT
1908 * since the file was in fact removed
1909 * Therefore, we cheat and return success.
1910 */
1911 if (rexmit && error == ENOENT)
1912 error = 0;
1913 return (error);
1914 }
1915
1916 /*
1917 * nfs file rename call
1918 */
1919 int
1920 nfs_rename(v)
1921 void *v;
1922 {
1923 struct vop_rename_args /* {
1924 struct vnode *a_fdvp;
1925 struct vnode *a_fvp;
1926 struct componentname *a_fcnp;
1927 struct vnode *a_tdvp;
1928 struct vnode *a_tvp;
1929 struct componentname *a_tcnp;
1930 } */ *ap = v;
1931 struct vnode *fvp = ap->a_fvp;
1932 struct vnode *tvp = ap->a_tvp;
1933 struct vnode *fdvp = ap->a_fdvp;
1934 struct vnode *tdvp = ap->a_tdvp;
1935 struct componentname *tcnp = ap->a_tcnp;
1936 struct componentname *fcnp = ap->a_fcnp;
1937 int error;
1938
1939 #ifndef DIAGNOSTIC
1940 if ((tcnp->cn_flags & HASBUF) == 0 ||
1941 (fcnp->cn_flags & HASBUF) == 0)
1942 panic("nfs_rename: no name");
1943 #endif
1944 /* Check for cross-device rename */
1945 if ((fvp->v_mount != tdvp->v_mount) ||
1946 (tvp && (fvp->v_mount != tvp->v_mount))) {
1947 error = EXDEV;
1948 goto out;
1949 }
1950
1951 /*
1952 * If the tvp exists and is in use, sillyrename it before doing the
1953 * rename of the new file over it.
1954 *
1955 * Have sillyrename use link instead of rename if possible,
1956 * so that we don't lose the file if the rename fails, and so
1957 * that there's no window when the "to" file doesn't exist.
1958 */
1959 if (tvp && tvp->v_usecount > 1 && !VTONFS(tvp)->n_sillyrename &&
1960 tvp->v_type != VDIR && !nfs_sillyrename(tdvp, tvp, tcnp, TRUE)) {
1961 VN_KNOTE(tvp, NOTE_DELETE);
1962 vput(tvp);
1963 tvp = NULL;
1964 }
1965
1966 error = nfs_renamerpc(fdvp, fcnp->cn_nameptr, fcnp->cn_namelen,
1967 tdvp, tcnp->cn_nameptr, tcnp->cn_namelen, tcnp->cn_cred,
1968 tcnp->cn_lwp);
1969
1970 VN_KNOTE(fdvp, NOTE_WRITE);
1971 VN_KNOTE(tdvp, NOTE_WRITE);
1972 if (error == 0 || error == EEXIST) {
1973 if (fvp->v_type == VDIR)
1974 cache_purge(fvp);
1975 else
1976 cache_purge1(fdvp, fcnp, 0);
1977 if (tvp != NULL && tvp->v_type == VDIR)
1978 cache_purge(tvp);
1979 else
1980 cache_purge1(tdvp, tcnp, 0);
1981 }
1982 out:
1983 if (tdvp == tvp)
1984 vrele(tdvp);
1985 else
1986 vput(tdvp);
1987 if (tvp)
1988 vput(tvp);
1989 vrele(fdvp);
1990 vrele(fvp);
1991 return (error);
1992 }
1993
1994 /*
1995 * nfs file rename rpc called from nfs_remove() above
1996 */
1997 int
1998 nfs_renameit(sdvp, scnp, sp)
1999 struct vnode *sdvp;
2000 struct componentname *scnp;
2001 struct sillyrename *sp;
2002 {
2003 return (nfs_renamerpc(sdvp, scnp->cn_nameptr, scnp->cn_namelen,
2004 sdvp, sp->s_name, sp->s_namlen, scnp->cn_cred, scnp->cn_lwp));
2005 }
2006
2007 /*
2008 * Do an nfs rename rpc. Called from nfs_rename() and nfs_renameit().
2009 */
2010 int
2011 nfs_renamerpc(fdvp, fnameptr, fnamelen, tdvp, tnameptr, tnamelen, cred, l)
2012 struct vnode *fdvp;
2013 const char *fnameptr;
2014 int fnamelen;
2015 struct vnode *tdvp;
2016 const char *tnameptr;
2017 int tnamelen;
2018 kauth_cred_t cred;
2019 struct lwp *l;
2020 {
2021 u_int32_t *tl;
2022 caddr_t cp;
2023 #ifndef NFS_V2_ONLY
2024 int32_t t1;
2025 caddr_t cp2;
2026 #endif
2027 int32_t t2;
2028 caddr_t bpos, dpos;
2029 int error = 0, fwccflag = NFSV3_WCCRATTR, twccflag = NFSV3_WCCRATTR;
2030 struct mbuf *mreq, *mrep, *md, *mb;
2031 const int v3 = NFS_ISV3(fdvp);
2032 int rexmit = 0;
2033 struct nfsnode *fdnp = VTONFS(fdvp);
2034
2035 nfsstats.rpccnt[NFSPROC_RENAME]++;
2036 nfsm_reqhead(fdnp, NFSPROC_RENAME,
2037 (NFSX_FH(v3) + NFSX_UNSIGNED)*2 + nfsm_rndup(fnamelen) +
2038 nfsm_rndup(tnamelen));
2039 nfsm_fhtom(fdnp, v3);
2040 nfsm_strtom(fnameptr, fnamelen, NFS_MAXNAMLEN);
2041 nfsm_fhtom(VTONFS(tdvp), v3);
2042 nfsm_strtom(tnameptr, tnamelen, NFS_MAXNAMLEN);
2043 nfsm_request1(fdnp, NFSPROC_RENAME, l, cred, &rexmit);
2044 #ifndef NFS_V2_ONLY
2045 if (v3) {
2046 nfsm_wcc_data(fdvp, fwccflag, 0, !error);
2047 nfsm_wcc_data(tdvp, twccflag, 0, !error);
2048 }
2049 #endif
2050 nfsm_reqdone;
2051 VTONFS(fdvp)->n_flag |= NMODIFIED;
2052 VTONFS(tdvp)->n_flag |= NMODIFIED;
2053 if (!fwccflag)
2054 NFS_INVALIDATE_ATTRCACHE(VTONFS(fdvp));
2055 if (!twccflag)
2056 NFS_INVALIDATE_ATTRCACHE(VTONFS(tdvp));
2057 /*
2058 * Kludge: Map ENOENT => 0 assuming that it is a reply to a retry.
2059 */
2060 if (rexmit && error == ENOENT)
2061 error = 0;
2062 return (error);
2063 }
2064
2065 /*
2066 * NFS link RPC, called from nfs_link.
2067 * Assumes dvp and vp locked, and leaves them that way.
2068 */
2069
2070 static int
2071 nfs_linkrpc(struct vnode *dvp, struct vnode *vp, const char *name,
2072 size_t namelen, kauth_cred_t cred, struct lwp *l)
2073 {
2074 u_int32_t *tl;
2075 caddr_t cp;
2076 #ifndef NFS_V2_ONLY
2077 int32_t t1;
2078 caddr_t cp2;
2079 #endif
2080 int32_t t2;
2081 caddr_t bpos, dpos;
2082 int error = 0, wccflag = NFSV3_WCCRATTR, attrflag = 0;
2083 struct mbuf *mreq, *mrep, *md, *mb;
2084 const int v3 = NFS_ISV3(dvp);
2085 int rexmit = 0;
2086 struct nfsnode *np = VTONFS(vp);
2087
2088 nfsstats.rpccnt[NFSPROC_LINK]++;
2089 nfsm_reqhead(np, NFSPROC_LINK,
2090 NFSX_FH(v3)*2 + NFSX_UNSIGNED + nfsm_rndup(namelen));
2091 nfsm_fhtom(np, v3);
2092 nfsm_fhtom(VTONFS(dvp), v3);
2093 nfsm_strtom(name, namelen, NFS_MAXNAMLEN);
2094 nfsm_request1(np, NFSPROC_LINK, l, cred, &rexmit);
2095 #ifndef NFS_V2_ONLY
2096 if (v3) {
2097 nfsm_postop_attr(vp, attrflag, 0);
2098 nfsm_wcc_data(dvp, wccflag, 0, !error);
2099 }
2100 #endif
2101 nfsm_reqdone;
2102
2103 VTONFS(dvp)->n_flag |= NMODIFIED;
2104 if (!attrflag)
2105 NFS_INVALIDATE_ATTRCACHE(VTONFS(vp));
2106 if (!wccflag)
2107 NFS_INVALIDATE_ATTRCACHE(VTONFS(dvp));
2108
2109 /*
2110 * Kludge: Map EEXIST => 0 assuming that it is a reply to a retry.
2111 */
2112 if (rexmit && error == EEXIST)
2113 error = 0;
2114
2115 return error;
2116 }
2117
2118 /*
2119 * nfs hard link create call
2120 */
2121 int
2122 nfs_link(v)
2123 void *v;
2124 {
2125 struct vop_link_args /* {
2126 struct vnode *a_dvp;
2127 struct vnode *a_vp;
2128 struct componentname *a_cnp;
2129 } */ *ap = v;
2130 struct vnode *vp = ap->a_vp;
2131 struct vnode *dvp = ap->a_dvp;
2132 struct componentname *cnp = ap->a_cnp;
2133 int error = 0;
2134
2135 if (dvp->v_mount != vp->v_mount) {
2136 VOP_ABORTOP(dvp, cnp);
2137 vput(dvp);
2138 return (EXDEV);
2139 }
2140 if (dvp != vp) {
2141 error = vn_lock(vp, LK_EXCLUSIVE);
2142 if (error != 0) {
2143 VOP_ABORTOP(dvp, cnp);
2144 vput(dvp);
2145 return error;
2146 }
2147 }
2148
2149 /*
2150 * Push all writes to the server, so that the attribute cache
2151 * doesn't get "out of sync" with the server.
2152 * XXX There should be a better way!
2153 */
2154 VOP_FSYNC(vp, cnp->cn_cred, FSYNC_WAIT, 0, 0, cnp->cn_lwp);
2155
2156 error = nfs_linkrpc(dvp, vp, cnp->cn_nameptr, cnp->cn_namelen,
2157 cnp->cn_cred, cnp->cn_lwp);
2158
2159 if (error == 0)
2160 cache_purge1(dvp, cnp, 0);
2161 PNBUF_PUT(cnp->cn_pnbuf);
2162 if (dvp != vp)
2163 VOP_UNLOCK(vp, 0);
2164 VN_KNOTE(vp, NOTE_LINK);
2165 VN_KNOTE(dvp, NOTE_WRITE);
2166 vput(dvp);
2167 return (error);
2168 }
2169
2170 /*
2171 * nfs symbolic link create call
2172 */
2173 int
2174 nfs_symlink(v)
2175 void *v;
2176 {
2177 struct vop_symlink_args /* {
2178 struct vnode *a_dvp;
2179 struct vnode **a_vpp;
2180 struct componentname *a_cnp;
2181 struct vattr *a_vap;
2182 char *a_target;
2183 } */ *ap = v;
2184 struct vnode *dvp = ap->a_dvp;
2185 struct vattr *vap = ap->a_vap;
2186 struct componentname *cnp = ap->a_cnp;
2187 struct nfsv2_sattr *sp;
2188 u_int32_t *tl;
2189 caddr_t cp;
2190 int32_t t1, t2;
2191 caddr_t bpos, dpos, cp2;
2192 int slen, error = 0, wccflag = NFSV3_WCCRATTR, gotvp;
2193 struct mbuf *mreq, *mrep, *md, *mb;
2194 struct vnode *newvp = (struct vnode *)0;
2195 const int v3 = NFS_ISV3(dvp);
2196 int rexmit = 0;
2197 struct nfsnode *dnp = VTONFS(dvp);
2198
2199 *ap->a_vpp = NULL;
2200 nfsstats.rpccnt[NFSPROC_SYMLINK]++;
2201 slen = strlen(ap->a_target);
2202 nfsm_reqhead(dnp, NFSPROC_SYMLINK, NFSX_FH(v3) + 2*NFSX_UNSIGNED +
2203 nfsm_rndup(cnp->cn_namelen) + nfsm_rndup(slen) + NFSX_SATTR(v3));
2204 nfsm_fhtom(dnp, v3);
2205 nfsm_strtom(cnp->cn_nameptr, cnp->cn_namelen, NFS_MAXNAMLEN);
2206 #ifndef NFS_V2_ONlY
2207 if (v3)
2208 nfsm_v3attrbuild(vap, FALSE);
2209 #endif
2210 nfsm_strtom(ap->a_target, slen, NFS_MAXPATHLEN);
2211 #ifndef NFS_V2_ONlY
2212 if (!v3) {
2213 nfsm_build(sp, struct nfsv2_sattr *, NFSX_V2SATTR);
2214 sp->sa_mode = vtonfsv2_mode(VLNK, vap->va_mode);
2215 sp->sa_uid = nfs_xdrneg1;
2216 sp->sa_gid = nfs_xdrneg1;
2217 sp->sa_size = nfs_xdrneg1;
2218 txdr_nfsv2time(&vap->va_atime, &sp->sa_atime);
2219 txdr_nfsv2time(&vap->va_mtime, &sp->sa_mtime);
2220 }
2221 #endif
2222 nfsm_request1(dnp, NFSPROC_SYMLINK, cnp->cn_lwp, cnp->cn_cred,
2223 &rexmit);
2224 #ifndef NFS_V2_ONlY
2225 if (v3) {
2226 if (!error)
2227 nfsm_mtofh(dvp, newvp, v3, gotvp);
2228 nfsm_wcc_data(dvp, wccflag, 0, !error);
2229 }
2230 #endif
2231 nfsm_reqdone;
2232 /*
2233 * Kludge: Map EEXIST => 0 assuming that it is a reply to a retry.
2234 */
2235 if (rexmit && error == EEXIST)
2236 error = 0;
2237 if (error == 0 || error == EEXIST)
2238 cache_purge1(dvp, cnp, 0);
2239 if (error == 0 && newvp == NULL) {
2240 struct nfsnode *np = NULL;
2241
2242 error = nfs_lookitup(dvp, cnp->cn_nameptr, cnp->cn_namelen,
2243 cnp->cn_cred, cnp->cn_lwp, &np);
2244 if (error == 0)
2245 newvp = NFSTOV(np);
2246 }
2247 if (error) {
2248 if (newvp != NULL)
2249 vput(newvp);
2250 } else {
2251 *ap->a_vpp = newvp;
2252 }
2253 PNBUF_PUT(cnp->cn_pnbuf);
2254 VTONFS(dvp)->n_flag |= NMODIFIED;
2255 if (!wccflag)
2256 NFS_INVALIDATE_ATTRCACHE(VTONFS(dvp));
2257 VN_KNOTE(dvp, NOTE_WRITE);
2258 vput(dvp);
2259 return (error);
2260 }
2261
2262 /*
2263 * nfs make dir call
2264 */
2265 int
2266 nfs_mkdir(v)
2267 void *v;
2268 {
2269 struct vop_mkdir_args /* {
2270 struct vnode *a_dvp;
2271 struct vnode **a_vpp;
2272 struct componentname *a_cnp;
2273 struct vattr *a_vap;
2274 } */ *ap = v;
2275 struct vnode *dvp = ap->a_dvp;
2276 struct vattr *vap = ap->a_vap;
2277 struct componentname *cnp = ap->a_cnp;
2278 struct nfsv2_sattr *sp;
2279 u_int32_t *tl;
2280 caddr_t cp;
2281 int32_t t1, t2;
2282 int len;
2283 struct nfsnode *dnp = VTONFS(dvp), *np = (struct nfsnode *)0;
2284 struct vnode *newvp = (struct vnode *)0;
2285 caddr_t bpos, dpos, cp2;
2286 int error = 0, wccflag = NFSV3_WCCRATTR;
2287 int gotvp = 0;
2288 int rexmit = 0;
2289 struct mbuf *mreq, *mrep, *md, *mb;
2290 const int v3 = NFS_ISV3(dvp);
2291
2292 len = cnp->cn_namelen;
2293 nfsstats.rpccnt[NFSPROC_MKDIR]++;
2294 nfsm_reqhead(dnp, NFSPROC_MKDIR,
2295 NFSX_FH(v3) + NFSX_UNSIGNED + nfsm_rndup(len) + NFSX_SATTR(v3));
2296 nfsm_fhtom(dnp, v3);
2297 nfsm_strtom(cnp->cn_nameptr, len, NFS_MAXNAMLEN);
2298 #ifndef NFS_V2_ONLY
2299 if (v3) {
2300 nfsm_v3attrbuild(vap, FALSE);
2301 } else
2302 #endif
2303 {
2304 nfsm_build(sp, struct nfsv2_sattr *, NFSX_V2SATTR);
2305 sp->sa_mode = vtonfsv2_mode(VDIR, vap->va_mode);
2306 sp->sa_uid = nfs_xdrneg1;
2307 sp->sa_gid = nfs_xdrneg1;
2308 sp->sa_size = nfs_xdrneg1;
2309 txdr_nfsv2time(&vap->va_atime, &sp->sa_atime);
2310 txdr_nfsv2time(&vap->va_mtime, &sp->sa_mtime);
2311 }
2312 nfsm_request1(dnp, NFSPROC_MKDIR, cnp->cn_lwp, cnp->cn_cred, &rexmit);
2313 if (!error)
2314 nfsm_mtofh(dvp, newvp, v3, gotvp);
2315 if (v3)
2316 nfsm_wcc_data(dvp, wccflag, 0, !error);
2317 nfsm_reqdone;
2318 VTONFS(dvp)->n_flag |= NMODIFIED;
2319 if (!wccflag)
2320 NFS_INVALIDATE_ATTRCACHE(VTONFS(dvp));
2321 /*
2322 * Kludge: Map EEXIST => 0 assuming that you have a reply to a retry
2323 * if we can succeed in looking up the directory.
2324 */
2325 if ((rexmit && error == EEXIST) || (!error && !gotvp)) {
2326 if (newvp) {
2327 vput(newvp);
2328 newvp = (struct vnode *)0;
2329 }
2330 error = nfs_lookitup(dvp, cnp->cn_nameptr, len, cnp->cn_cred,
2331 cnp->cn_lwp, &np);
2332 if (!error) {
2333 newvp = NFSTOV(np);
2334 if (newvp->v_type != VDIR || newvp == dvp)
2335 error = EEXIST;
2336 }
2337 }
2338 if (error) {
2339 if (newvp) {
2340 if (dvp != newvp)
2341 vput(newvp);
2342 else
2343 vrele(newvp);
2344 }
2345 } else {
2346 VN_KNOTE(dvp, NOTE_WRITE | NOTE_LINK);
2347 if (cnp->cn_flags & MAKEENTRY)
2348 nfs_cache_enter(dvp, newvp, cnp);
2349 *ap->a_vpp = newvp;
2350 }
2351 PNBUF_PUT(cnp->cn_pnbuf);
2352 vput(dvp);
2353 return (error);
2354 }
2355
2356 /*
2357 * nfs remove directory call
2358 */
2359 int
2360 nfs_rmdir(v)
2361 void *v;
2362 {
2363 struct vop_rmdir_args /* {
2364 struct vnode *a_dvp;
2365 struct vnode *a_vp;
2366 struct componentname *a_cnp;
2367 } */ *ap = v;
2368 struct vnode *vp = ap->a_vp;
2369 struct vnode *dvp = ap->a_dvp;
2370 struct componentname *cnp = ap->a_cnp;
2371 u_int32_t *tl;
2372 caddr_t cp;
2373 #ifndef NFS_V2_ONLY
2374 int32_t t1;
2375 caddr_t cp2;
2376 #endif
2377 int32_t t2;
2378 caddr_t bpos, dpos;
2379 int error = 0, wccflag = NFSV3_WCCRATTR;
2380 int rexmit = 0;
2381 struct mbuf *mreq, *mrep, *md, *mb;
2382 const int v3 = NFS_ISV3(dvp);
2383 struct nfsnode *dnp;
2384
2385 if (dvp == vp) {
2386 vrele(dvp);
2387 vput(dvp);
2388 PNBUF_PUT(cnp->cn_pnbuf);
2389 return (EINVAL);
2390 }
2391 nfsstats.rpccnt[NFSPROC_RMDIR]++;
2392 dnp = VTONFS(dvp);
2393 nfsm_reqhead(dnp, NFSPROC_RMDIR,
2394 NFSX_FH(v3) + NFSX_UNSIGNED + nfsm_rndup(cnp->cn_namelen));
2395 nfsm_fhtom(dnp, v3);
2396 nfsm_strtom(cnp->cn_nameptr, cnp->cn_namelen, NFS_MAXNAMLEN);
2397 nfsm_request1(dnp, NFSPROC_RMDIR, cnp->cn_lwp, cnp->cn_cred, &rexmit);
2398 #ifndef NFS_V2_ONLY
2399 if (v3)
2400 nfsm_wcc_data(dvp, wccflag, 0, !error);
2401 #endif
2402 nfsm_reqdone;
2403 PNBUF_PUT(cnp->cn_pnbuf);
2404 VTONFS(dvp)->n_flag |= NMODIFIED;
2405 if (!wccflag)
2406 NFS_INVALIDATE_ATTRCACHE(VTONFS(dvp));
2407 VN_KNOTE(dvp, NOTE_WRITE | NOTE_LINK);
2408 VN_KNOTE(vp, NOTE_DELETE);
2409 cache_purge(vp);
2410 vput(vp);
2411 vput(dvp);
2412 /*
2413 * Kludge: Map ENOENT => 0 assuming that you have a reply to a retry.
2414 */
2415 if (rexmit && error == ENOENT)
2416 error = 0;
2417 return (error);
2418 }
2419
2420 /*
2421 * nfs readdir call
2422 */
2423 int
2424 nfs_readdir(v)
2425 void *v;
2426 {
2427 struct vop_readdir_args /* {
2428 struct vnode *a_vp;
2429 struct uio *a_uio;
2430 kauth_cred_t a_cred;
2431 int *a_eofflag;
2432 off_t **a_cookies;
2433 int *a_ncookies;
2434 } */ *ap = v;
2435 struct vnode *vp = ap->a_vp;
2436 struct uio *uio = ap->a_uio;
2437 struct nfsmount *nmp = VFSTONFS(vp->v_mount);
2438 char *base = uio->uio_iov->iov_base;
2439 int tresid, error;
2440 size_t count, lost;
2441 struct dirent *dp;
2442 off_t *cookies = NULL;
2443 int ncookies = 0, nc;
2444
2445 if (vp->v_type != VDIR)
2446 return (EPERM);
2447
2448 lost = uio->uio_resid & (NFS_DIRFRAGSIZ - 1);
2449 count = uio->uio_resid - lost;
2450 if (count <= 0)
2451 return (EINVAL);
2452
2453 /*
2454 * Call nfs_bioread() to do the real work.
2455 */
2456 tresid = uio->uio_resid = count;
2457 error = nfs_bioread(vp, uio, 0, ap->a_cred,
2458 ap->a_cookies ? NFSBIO_CACHECOOKIES : 0);
2459
2460 if (!error && ap->a_cookies) {
2461 ncookies = count / 16;
2462 cookies = malloc(sizeof (off_t) * ncookies, M_TEMP, M_WAITOK);
2463 *ap->a_cookies = cookies;
2464 }
2465
2466 if (!error && uio->uio_resid == tresid) {
2467 uio->uio_resid += lost;
2468 nfsstats.direofcache_misses++;
2469 if (ap->a_cookies)
2470 *ap->a_ncookies = 0;
2471 *ap->a_eofflag = 1;
2472 return (0);
2473 }
2474
2475 if (!error && ap->a_cookies) {
2476 /*
2477 * Only the NFS server and emulations use cookies, and they
2478 * load the directory block into system space, so we can
2479 * just look at it directly.
2480 */
2481 if (!VMSPACE_IS_KERNEL_P(uio->uio_vmspace) ||
2482 uio->uio_iovcnt != 1)
2483 panic("nfs_readdir: lost in space");
2484 for (nc = 0; ncookies-- &&
2485 base < (char *)uio->uio_iov->iov_base; nc++){
2486 dp = (struct dirent *) base;
2487 if (dp->d_reclen == 0)
2488 break;
2489 if (nmp->nm_flag & NFSMNT_XLATECOOKIE)
2490 *(cookies++) = (off_t)NFS_GETCOOKIE32(dp);
2491 else
2492 *(cookies++) = NFS_GETCOOKIE(dp);
2493 base += dp->d_reclen;
2494 }
2495 uio->uio_resid +=
2496 ((caddr_t)uio->uio_iov->iov_base - base);
2497 uio->uio_iov->iov_len +=
2498 ((caddr_t)uio->uio_iov->iov_base - base);
2499 uio->uio_iov->iov_base = base;
2500 *ap->a_ncookies = nc;
2501 }
2502
2503 uio->uio_resid += lost;
2504 *ap->a_eofflag = 0;
2505 return (error);
2506 }
2507
2508 /*
2509 * Readdir rpc call.
2510 * Called from below the buffer cache by nfs_doio().
2511 */
2512 int
2513 nfs_readdirrpc(vp, uiop, cred)
2514 struct vnode *vp;
2515 struct uio *uiop;
2516 kauth_cred_t cred;
2517 {
2518 int len, left;
2519 struct dirent *dp = NULL;
2520 u_int32_t *tl;
2521 caddr_t cp;
2522 int32_t t1, t2;
2523 caddr_t bpos, dpos, cp2;
2524 struct mbuf *mreq, *mrep, *md, *mb;
2525 struct nfsmount *nmp = VFSTONFS(vp->v_mount);
2526 struct nfsnode *dnp = VTONFS(vp);
2527 u_quad_t fileno;
2528 int error = 0, more_dirs = 1, blksiz = 0, bigenough = 1;
2529 #ifndef NFS_V2_ONLY
2530 int attrflag;
2531 #endif
2532 int nrpcs = 0, reclen;
2533 const int v3 = NFS_ISV3(vp);
2534
2535 #ifdef DIAGNOSTIC
2536 /*
2537 * Should be called from buffer cache, so only amount of
2538 * NFS_DIRBLKSIZ will be requested.
2539 */
2540 if (uiop->uio_iovcnt != 1 || uiop->uio_resid != NFS_DIRBLKSIZ)
2541 panic("nfs readdirrpc bad uio");
2542 #endif
2543
2544 /*
2545 * Loop around doing readdir rpc's of size nm_readdirsize
2546 * truncated to a multiple of NFS_DIRFRAGSIZ.
2547 * The stopping criteria is EOF or buffer full.
2548 */
2549 while (more_dirs && bigenough) {
2550 /*
2551 * Heuristic: don't bother to do another RPC to further
2552 * fill up this block if there is not much room left. (< 50%
2553 * of the readdir RPC size). This wastes some buffer space
2554 * but can save up to 50% in RPC calls.
2555 */
2556 if (nrpcs > 0 && uiop->uio_resid < (nmp->nm_readdirsize / 2)) {
2557 bigenough = 0;
2558 break;
2559 }
2560 nfsstats.rpccnt[NFSPROC_READDIR]++;
2561 nfsm_reqhead(dnp, NFSPROC_READDIR, NFSX_FH(v3) +
2562 NFSX_READDIR(v3));
2563 nfsm_fhtom(dnp, v3);
2564 #ifndef NFS_V2_ONLY
2565 if (v3) {
2566 nfsm_build(tl, u_int32_t *, 5 * NFSX_UNSIGNED);
2567 if (nmp->nm_iflag & NFSMNT_SWAPCOOKIE) {
2568 txdr_swapcookie3(uiop->uio_offset, tl);
2569 } else {
2570 txdr_cookie3(uiop->uio_offset, tl);
2571 }
2572 tl += 2;
2573 *tl++ = dnp->n_cookieverf.nfsuquad[0];
2574 *tl++ = dnp->n_cookieverf.nfsuquad[1];
2575 } else
2576 #endif
2577 {
2578 nfsm_build(tl, u_int32_t *, 2 * NFSX_UNSIGNED);
2579 *tl++ = txdr_unsigned(uiop->uio_offset);
2580 }
2581 *tl = txdr_unsigned(nmp->nm_readdirsize);
2582 nfsm_request(dnp, NFSPROC_READDIR, curlwp, cred);
2583 nrpcs++;
2584 #ifndef NFS_V2_ONLY
2585 if (v3) {
2586 nfsm_postop_attr(vp, attrflag, 0);
2587 if (!error) {
2588 nfsm_dissect(tl, u_int32_t *,
2589 2 * NFSX_UNSIGNED);
2590 dnp->n_cookieverf.nfsuquad[0] = *tl++;
2591 dnp->n_cookieverf.nfsuquad[1] = *tl;
2592 } else {
2593 m_freem(mrep);
2594 goto nfsmout;
2595 }
2596 }
2597 #endif
2598 nfsm_dissect(tl, u_int32_t *, NFSX_UNSIGNED);
2599 more_dirs = fxdr_unsigned(int, *tl);
2600
2601 /* loop thru the dir entries, doctoring them to 4bsd form */
2602 while (more_dirs && bigenough) {
2603 #ifndef NFS_V2_ONLY
2604 if (v3) {
2605 nfsm_dissect(tl, u_int32_t *,
2606 3 * NFSX_UNSIGNED);
2607 fileno = fxdr_hyper(tl);
2608 len = fxdr_unsigned(int, *(tl + 2));
2609 } else
2610 #endif
2611 {
2612 nfsm_dissect(tl, u_int32_t *,
2613 2 * NFSX_UNSIGNED);
2614 fileno = fxdr_unsigned(u_quad_t, *tl++);
2615 len = fxdr_unsigned(int, *tl);
2616 }
2617 if (len <= 0 || len > NFS_MAXNAMLEN) {
2618 error = EBADRPC;
2619 m_freem(mrep);
2620 goto nfsmout;
2621 }
2622 /* for cookie stashing */
2623 reclen = _DIRENT_RECLEN(dp, len) + 2 * sizeof(off_t);
2624 left = NFS_DIRFRAGSIZ - blksiz;
2625 if (reclen > left) {
2626 memset(uiop->uio_iov->iov_base, 0, left);
2627 dp->d_reclen += left;
2628 UIO_ADVANCE(uiop, left);
2629 blksiz = 0;
2630 NFS_STASHCOOKIE(dp, uiop->uio_offset);
2631 }
2632 if (reclen > uiop->uio_resid)
2633 bigenough = 0;
2634 if (bigenough) {
2635 int tlen;
2636
2637 dp = (struct dirent *)uiop->uio_iov->iov_base;
2638 dp->d_fileno = fileno;
2639 dp->d_namlen = len;
2640 dp->d_reclen = reclen;
2641 dp->d_type = DT_UNKNOWN;
2642 blksiz += reclen;
2643 if (blksiz == NFS_DIRFRAGSIZ)
2644 blksiz = 0;
2645 UIO_ADVANCE(uiop, DIRHDSIZ);
2646 nfsm_mtouio(uiop, len);
2647 tlen = reclen - (DIRHDSIZ + len);
2648 (void)memset(uiop->uio_iov->iov_base, 0, tlen);
2649 UIO_ADVANCE(uiop, tlen);
2650 } else
2651 nfsm_adv(nfsm_rndup(len));
2652 #ifndef NFS_V2_ONLY
2653 if (v3) {
2654 nfsm_dissect(tl, u_int32_t *,
2655 3 * NFSX_UNSIGNED);
2656 } else
2657 #endif
2658 {
2659 nfsm_dissect(tl, u_int32_t *,
2660 2 * NFSX_UNSIGNED);
2661 }
2662 if (bigenough) {
2663 #ifndef NFS_V2_ONLY
2664 if (v3) {
2665 if (nmp->nm_iflag & NFSMNT_SWAPCOOKIE)
2666 uiop->uio_offset =
2667 fxdr_swapcookie3(tl);
2668 else
2669 uiop->uio_offset =
2670 fxdr_cookie3(tl);
2671 }
2672 else
2673 #endif
2674 {
2675 uiop->uio_offset =
2676 fxdr_unsigned(off_t, *tl);
2677 }
2678 NFS_STASHCOOKIE(dp, uiop->uio_offset);
2679 }
2680 if (v3)
2681 tl += 2;
2682 else
2683 tl++;
2684 more_dirs = fxdr_unsigned(int, *tl);
2685 }
2686 /*
2687 * If at end of rpc data, get the eof boolean
2688 */
2689 if (!more_dirs) {
2690 nfsm_dissect(tl, u_int32_t *, NFSX_UNSIGNED);
2691 more_dirs = (fxdr_unsigned(int, *tl) == 0);
2692
2693 /*
2694 * kludge: if we got no entries, treat it as EOF.
2695 * some server sometimes send a reply without any
2696 * entries or EOF.
2697 * although it might mean the server has very long name,
2698 * we can't handle such entries anyway.
2699 */
2700
2701 if (uiop->uio_resid >= NFS_DIRBLKSIZ)
2702 more_dirs = 0;
2703 }
2704 m_freem(mrep);
2705 }
2706 /*
2707 * Fill last record, iff any, out to a multiple of NFS_DIRFRAGSIZ
2708 * by increasing d_reclen for the last record.
2709 */
2710 if (blksiz > 0) {
2711 left = NFS_DIRFRAGSIZ - blksiz;
2712 memset(uiop->uio_iov->iov_base, 0, left);
2713 dp->d_reclen += left;
2714 NFS_STASHCOOKIE(dp, uiop->uio_offset);
2715 UIO_ADVANCE(uiop, left);
2716 }
2717
2718 /*
2719 * We are now either at the end of the directory or have filled the
2720 * block.
2721 */
2722 if (bigenough) {
2723 dnp->n_direofoffset = uiop->uio_offset;
2724 dnp->n_flag |= NEOFVALID;
2725 }
2726 nfsmout:
2727 return (error);
2728 }
2729
2730 #ifndef NFS_V2_ONLY
2731 /*
2732 * NFS V3 readdir plus RPC. Used in place of nfs_readdirrpc().
2733 */
2734 int
2735 nfs_readdirplusrpc(vp, uiop, cred)
2736 struct vnode *vp;
2737 struct uio *uiop;
2738 kauth_cred_t cred;
2739 {
2740 int len, left;
2741 struct dirent *dp = NULL;
2742 u_int32_t *tl;
2743 caddr_t cp;
2744 int32_t t1, t2;
2745 struct vnode *newvp;
2746 caddr_t bpos, dpos, cp2;
2747 struct mbuf *mreq, *mrep, *md, *mb;
2748 struct nameidata nami, *ndp = &nami;
2749 struct componentname *cnp = &ndp->ni_cnd;
2750 struct nfsmount *nmp = VFSTONFS(vp->v_mount);
2751 struct nfsnode *dnp = VTONFS(vp), *np;
2752 nfsfh_t *fhp;
2753 u_quad_t fileno;
2754 int error = 0, more_dirs = 1, blksiz = 0, doit, bigenough = 1, i;
2755 int attrflag, fhsize, nrpcs = 0, reclen;
2756 struct nfs_fattr fattr, *fp;
2757
2758 #ifdef DIAGNOSTIC
2759 if (uiop->uio_iovcnt != 1 || uiop->uio_resid != NFS_DIRBLKSIZ)
2760 panic("nfs readdirplusrpc bad uio");
2761 #endif
2762 ndp->ni_dvp = vp;
2763 newvp = NULLVP;
2764
2765 /*
2766 * Loop around doing readdir rpc's of size nm_readdirsize
2767 * truncated to a multiple of NFS_DIRFRAGSIZ.
2768 * The stopping criteria is EOF or buffer full.
2769 */
2770 while (more_dirs && bigenough) {
2771 if (nrpcs > 0 && uiop->uio_resid < (nmp->nm_readdirsize / 2)) {
2772 bigenough = 0;
2773 break;
2774 }
2775 nfsstats.rpccnt[NFSPROC_READDIRPLUS]++;
2776 nfsm_reqhead(dnp, NFSPROC_READDIRPLUS,
2777 NFSX_FH(1) + 6 * NFSX_UNSIGNED);
2778 nfsm_fhtom(dnp, 1);
2779 nfsm_build(tl, u_int32_t *, 6 * NFSX_UNSIGNED);
2780 if (nmp->nm_iflag & NFSMNT_SWAPCOOKIE) {
2781 txdr_swapcookie3(uiop->uio_offset, tl);
2782 } else {
2783 txdr_cookie3(uiop->uio_offset, tl);
2784 }
2785 tl += 2;
2786 *tl++ = dnp->n_cookieverf.nfsuquad[0];
2787 *tl++ = dnp->n_cookieverf.nfsuquad[1];
2788 *tl++ = txdr_unsigned(nmp->nm_readdirsize);
2789 *tl = txdr_unsigned(nmp->nm_rsize);
2790 nfsm_request(dnp, NFSPROC_READDIRPLUS, curlwp, cred);
2791 nfsm_postop_attr(vp, attrflag, 0);
2792 if (error) {
2793 m_freem(mrep);
2794 goto nfsmout;
2795 }
2796 nrpcs++;
2797 nfsm_dissect(tl, u_int32_t *, 3 * NFSX_UNSIGNED);
2798 dnp->n_cookieverf.nfsuquad[0] = *tl++;
2799 dnp->n_cookieverf.nfsuquad[1] = *tl++;
2800 more_dirs = fxdr_unsigned(int, *tl);
2801
2802 /* loop thru the dir entries, doctoring them to 4bsd form */
2803 while (more_dirs && bigenough) {
2804 nfsm_dissect(tl, u_int32_t *, 3 * NFSX_UNSIGNED);
2805 fileno = fxdr_hyper(tl);
2806 len = fxdr_unsigned(int, *(tl + 2));
2807 if (len <= 0 || len > NFS_MAXNAMLEN) {
2808 error = EBADRPC;
2809 m_freem(mrep);
2810 goto nfsmout;
2811 }
2812 /* for cookie stashing */
2813 reclen = _DIRENT_RECLEN(dp, len) + 2 * sizeof(off_t);
2814 left = NFS_DIRFRAGSIZ - blksiz;
2815 if (reclen > left) {
2816 /*
2817 * DIRFRAGSIZ is aligned, no need to align
2818 * again here.
2819 */
2820 memset(uiop->uio_iov->iov_base, 0, left);
2821 dp->d_reclen += left;
2822 UIO_ADVANCE(uiop, left);
2823 NFS_STASHCOOKIE(dp, uiop->uio_offset);
2824 blksiz = 0;
2825 }
2826 if (reclen > uiop->uio_resid)
2827 bigenough = 0;
2828 if (bigenough) {
2829 int tlen;
2830
2831 dp = (struct dirent *)uiop->uio_iov->iov_base;
2832 dp->d_fileno = fileno;
2833 dp->d_namlen = len;
2834 dp->d_reclen = reclen;
2835 dp->d_type = DT_UNKNOWN;
2836 blksiz += reclen;
2837 if (blksiz == NFS_DIRFRAGSIZ)
2838 blksiz = 0;
2839 UIO_ADVANCE(uiop, DIRHDSIZ);
2840 nfsm_mtouio(uiop, len);
2841 tlen = reclen - (DIRHDSIZ + len);
2842 (void)memset(uiop->uio_iov->iov_base, 0, tlen);
2843 UIO_ADVANCE(uiop, tlen);
2844 cnp->cn_nameptr = dp->d_name;
2845 cnp->cn_namelen = dp->d_namlen;
2846 } else
2847 nfsm_adv(nfsm_rndup(len));
2848 nfsm_dissect(tl, u_int32_t *, 3 * NFSX_UNSIGNED);
2849 if (bigenough) {
2850 if (nmp->nm_iflag & NFSMNT_SWAPCOOKIE)
2851 uiop->uio_offset =
2852 fxdr_swapcookie3(tl);
2853 else
2854 uiop->uio_offset =
2855 fxdr_cookie3(tl);
2856 NFS_STASHCOOKIE(dp, uiop->uio_offset);
2857 }
2858 tl += 2;
2859
2860 /*
2861 * Since the attributes are before the file handle
2862 * (sigh), we must skip over the attributes and then
2863 * come back and get them.
2864 */
2865 attrflag = fxdr_unsigned(int, *tl);
2866 if (attrflag) {
2867 nfsm_dissect(fp, struct nfs_fattr *, NFSX_V3FATTR);
2868 memcpy(&fattr, fp, NFSX_V3FATTR);
2869 nfsm_dissect(tl, u_int32_t *, NFSX_UNSIGNED);
2870 doit = fxdr_unsigned(int, *tl);
2871 if (doit) {
2872 nfsm_getfh(fhp, fhsize, 1);
2873 if (NFS_CMPFH(dnp, fhp, fhsize)) {
2874 VREF(vp);
2875 newvp = vp;
2876 np = dnp;
2877 } else {
2878 error = nfs_nget1(vp->v_mount, fhp,
2879 fhsize, &np, LK_NOWAIT);
2880 if (!error)
2881 newvp = NFSTOV(np);
2882 }
2883 if (!error) {
2884 const char *xcp;
2885
2886 nfs_loadattrcache(&newvp, &fattr, 0, 0);
2887 if (bigenough) {
2888 dp->d_type =
2889 IFTODT(VTTOIF(np->n_vattr->va_type));
2890 if (cnp->cn_namelen <= NCHNAMLEN) {
2891 ndp->ni_vp = newvp;
2892 xcp = cnp->cn_nameptr +
2893 cnp->cn_namelen;
2894 cnp->cn_hash =
2895 namei_hash(cnp->cn_nameptr, &xcp);
2896 nfs_cache_enter(ndp->ni_dvp,
2897 ndp->ni_vp, cnp);
2898 }
2899 }
2900 }
2901 error = 0;
2902 }
2903 } else {
2904 /* Just skip over the file handle */
2905 nfsm_dissect(tl, u_int32_t *, NFSX_UNSIGNED);
2906 i = fxdr_unsigned(int, *tl);
2907 nfsm_adv(nfsm_rndup(i));
2908 }
2909 if (newvp != NULLVP) {
2910 if (newvp == vp)
2911 vrele(newvp);
2912 else
2913 vput(newvp);
2914 newvp = NULLVP;
2915 }
2916 nfsm_dissect(tl, u_int32_t *, NFSX_UNSIGNED);
2917 more_dirs = fxdr_unsigned(int, *tl);
2918 }
2919 /*
2920 * If at end of rpc data, get the eof boolean
2921 */
2922 if (!more_dirs) {
2923 nfsm_dissect(tl, u_int32_t *, NFSX_UNSIGNED);
2924 more_dirs = (fxdr_unsigned(int, *tl) == 0);
2925
2926 /*
2927 * kludge: see a comment in nfs_readdirrpc.
2928 */
2929
2930 if (uiop->uio_resid >= NFS_DIRBLKSIZ)
2931 more_dirs = 0;
2932 }
2933 m_freem(mrep);
2934 }
2935 /*
2936 * Fill last record, iff any, out to a multiple of NFS_DIRFRAGSIZ
2937 * by increasing d_reclen for the last record.
2938 */
2939 if (blksiz > 0) {
2940 left = NFS_DIRFRAGSIZ - blksiz;
2941 memset(uiop->uio_iov->iov_base, 0, left);
2942 dp->d_reclen += left;
2943 NFS_STASHCOOKIE(dp, uiop->uio_offset);
2944 UIO_ADVANCE(uiop, left);
2945 }
2946
2947 /*
2948 * We are now either at the end of the directory or have filled the
2949 * block.
2950 */
2951 if (bigenough) {
2952 dnp->n_direofoffset = uiop->uio_offset;
2953 dnp->n_flag |= NEOFVALID;
2954 }
2955 nfsmout:
2956 if (newvp != NULLVP) {
2957 if(newvp == vp)
2958 vrele(newvp);
2959 else
2960 vput(newvp);
2961 }
2962 return (error);
2963 }
2964 #endif
2965
2966 /*
2967 * Silly rename. To make the NFS filesystem that is stateless look a little
2968 * more like the "ufs" a remove of an active vnode is translated to a rename
2969 * to a funny looking filename that is removed by nfs_inactive on the
2970 * nfsnode. There is the potential for another process on a different client
2971 * to create the same funny name between the nfs_lookitup() fails and the
2972 * nfs_rename() completes, but...
2973 */
2974 int
2975 nfs_sillyrename(dvp, vp, cnp, dolink)
2976 struct vnode *dvp, *vp;
2977 struct componentname *cnp;
2978 boolean_t dolink;
2979 {
2980 struct sillyrename *sp;
2981 struct nfsnode *np;
2982 int error;
2983 short pid;
2984
2985 cache_purge(dvp);
2986 np = VTONFS(vp);
2987 #ifndef DIAGNOSTIC
2988 if (vp->v_type == VDIR)
2989 panic("nfs: sillyrename dir");
2990 #endif
2991 MALLOC(sp, struct sillyrename *, sizeof (struct sillyrename),
2992 M_NFSREQ, M_WAITOK);
2993 sp->s_cred = kauth_cred_dup(cnp->cn_cred);
2994 sp->s_dvp = dvp;
2995 VREF(dvp);
2996
2997 /* Fudge together a funny name */
2998 pid = cnp->cn_lwp->l_proc->p_pid;
2999 memcpy(sp->s_name, ".nfsAxxxx4.4", 13);
3000 sp->s_namlen = 12;
3001 sp->s_name[8] = hexdigits[pid & 0xf];
3002 sp->s_name[7] = hexdigits[(pid >> 4) & 0xf];
3003 sp->s_name[6] = hexdigits[(pid >> 8) & 0xf];
3004 sp->s_name[5] = hexdigits[(pid >> 12) & 0xf];
3005
3006 /* Try lookitups until we get one that isn't there */
3007 while (nfs_lookitup(dvp, sp->s_name, sp->s_namlen, sp->s_cred,
3008 cnp->cn_lwp, (struct nfsnode **)0) == 0) {
3009 sp->s_name[4]++;
3010 if (sp->s_name[4] > 'z') {
3011 error = EINVAL;
3012 goto bad;
3013 }
3014 }
3015 if (dolink) {
3016 error = nfs_linkrpc(dvp, vp, sp->s_name, sp->s_namlen,
3017 sp->s_cred, cnp->cn_lwp);
3018 /*
3019 * nfs_request maps NFSERR_NOTSUPP to ENOTSUP.
3020 */
3021 if (error == ENOTSUP) {
3022 error = nfs_renameit(dvp, cnp, sp);
3023 }
3024 } else {
3025 error = nfs_renameit(dvp, cnp, sp);
3026 }
3027 if (error)
3028 goto bad;
3029 error = nfs_lookitup(dvp, sp->s_name, sp->s_namlen, sp->s_cred,
3030 cnp->cn_lwp, &np);
3031 np->n_sillyrename = sp;
3032 return (0);
3033 bad:
3034 vrele(sp->s_dvp);
3035 kauth_cred_free(sp->s_cred);
3036 free((caddr_t)sp, M_NFSREQ);
3037 return (error);
3038 }
3039
3040 /*
3041 * Look up a file name and optionally either update the file handle or
3042 * allocate an nfsnode, depending on the value of npp.
3043 * npp == NULL --> just do the lookup
3044 * *npp == NULL --> allocate a new nfsnode and make sure attributes are
3045 * handled too
3046 * *npp != NULL --> update the file handle in the vnode
3047 */
3048 int
3049 nfs_lookitup(dvp, name, len, cred, l, npp)
3050 struct vnode *dvp;
3051 const char *name;
3052 int len;
3053 kauth_cred_t cred;
3054 struct lwp *l;
3055 struct nfsnode **npp;
3056 {
3057 u_int32_t *tl;
3058 caddr_t cp;
3059 int32_t t1, t2;
3060 struct vnode *newvp = (struct vnode *)0;
3061 struct nfsnode *np, *dnp = VTONFS(dvp);
3062 caddr_t bpos, dpos, cp2;
3063 int error = 0, fhlen;
3064 #ifndef NFS_V2_ONLY
3065 int attrflag;
3066 #endif
3067 struct mbuf *mreq, *mrep, *md, *mb;
3068 nfsfh_t *nfhp;
3069 const int v3 = NFS_ISV3(dvp);
3070
3071 nfsstats.rpccnt[NFSPROC_LOOKUP]++;
3072 nfsm_reqhead(dnp, NFSPROC_LOOKUP,
3073 NFSX_FH(v3) + NFSX_UNSIGNED + nfsm_rndup(len));
3074 nfsm_fhtom(dnp, v3);
3075 nfsm_strtom(name, len, NFS_MAXNAMLEN);
3076 nfsm_request(dnp, NFSPROC_LOOKUP, l, cred);
3077 if (npp && !error) {
3078 nfsm_getfh(nfhp, fhlen, v3);
3079 if (*npp) {
3080 np = *npp;
3081 if (np->n_fhsize > NFS_SMALLFH && fhlen <= NFS_SMALLFH) {
3082 free((caddr_t)np->n_fhp, M_NFSBIGFH);
3083 np->n_fhp = &np->n_fh;
3084 }
3085 #if NFS_SMALLFH < NFSX_V3FHMAX
3086 else if (np->n_fhsize <= NFS_SMALLFH && fhlen>NFS_SMALLFH)
3087 np->n_fhp =(nfsfh_t *)malloc(fhlen,M_NFSBIGFH,M_WAITOK);
3088 #endif
3089 memcpy((caddr_t)np->n_fhp, (caddr_t)nfhp, fhlen);
3090 np->n_fhsize = fhlen;
3091 newvp = NFSTOV(np);
3092 } else if (NFS_CMPFH(dnp, nfhp, fhlen)) {
3093 VREF(dvp);
3094 newvp = dvp;
3095 np = dnp;
3096 } else {
3097 error = nfs_nget(dvp->v_mount, nfhp, fhlen, &np);
3098 if (error) {
3099 m_freem(mrep);
3100 return (error);
3101 }
3102 newvp = NFSTOV(np);
3103 }
3104 #ifndef NFS_V2_ONLY
3105 if (v3) {
3106 nfsm_postop_attr(newvp, attrflag, 0);
3107 if (!attrflag && *npp == NULL) {
3108 m_freem(mrep);
3109 vput(newvp);
3110 return (ENOENT);
3111 }
3112 } else
3113 #endif
3114 nfsm_loadattr(newvp, (struct vattr *)0, 0);
3115 }
3116 nfsm_reqdone;
3117 if (npp && *npp == NULL) {
3118 if (error) {
3119 if (newvp)
3120 vput(newvp);
3121 } else
3122 *npp = np;
3123 }
3124 return (error);
3125 }
3126
3127 #ifndef NFS_V2_ONLY
3128 /*
3129 * Nfs Version 3 commit rpc
3130 */
3131 int
3132 nfs_commit(vp, offset, cnt, l)
3133 struct vnode *vp;
3134 off_t offset;
3135 uint32_t cnt;
3136 struct lwp *l;
3137 {
3138 caddr_t cp;
3139 u_int32_t *tl;
3140 int32_t t1, t2;
3141 struct nfsmount *nmp = VFSTONFS(vp->v_mount);
3142 caddr_t bpos, dpos, cp2;
3143 int error = 0, wccflag = NFSV3_WCCRATTR;
3144 struct mbuf *mreq, *mrep, *md, *mb;
3145 struct nfsnode *np;
3146
3147 KASSERT(NFS_ISV3(vp));
3148
3149 #ifdef NFS_DEBUG_COMMIT
3150 printf("commit %lu - %lu\n", (unsigned long)offset,
3151 (unsigned long)(offset + cnt));
3152 #endif
3153
3154 simple_lock(&nmp->nm_slock);
3155 if ((nmp->nm_iflag & NFSMNT_HASWRITEVERF) == 0) {
3156 simple_unlock(&nmp->nm_slock);
3157 return (0);
3158 }
3159 simple_unlock(&nmp->nm_slock);
3160 nfsstats.rpccnt[NFSPROC_COMMIT]++;
3161 np = VTONFS(vp);
3162 nfsm_reqhead(np, NFSPROC_COMMIT, NFSX_FH(1));
3163 nfsm_fhtom(np, 1);
3164 nfsm_build(tl, u_int32_t *, 3 * NFSX_UNSIGNED);
3165 txdr_hyper(offset, tl);
3166 tl += 2;
3167 *tl = txdr_unsigned(cnt);
3168 nfsm_request(np, NFSPROC_COMMIT, l, np->n_wcred);
3169 nfsm_wcc_data(vp, wccflag, NAC_NOTRUNC, FALSE);
3170 if (!error) {
3171 nfsm_dissect(tl, u_int32_t *, NFSX_V3WRITEVERF);
3172 simple_lock(&nmp->nm_slock);
3173 if ((nmp->nm_iflag & NFSMNT_STALEWRITEVERF) ||
3174 memcmp(nmp->nm_writeverf, tl, NFSX_V3WRITEVERF)) {
3175 memcpy(nmp->nm_writeverf, tl, NFSX_V3WRITEVERF);
3176 error = NFSERR_STALEWRITEVERF;
3177 nmp->nm_iflag |= NFSMNT_STALEWRITEVERF;
3178 }
3179 simple_unlock(&nmp->nm_slock);
3180 }
3181 nfsm_reqdone;
3182 return (error);
3183 }
3184 #endif
3185
3186 /*
3187 * Kludge City..
3188 * - make nfs_bmap() essentially a no-op that does no translation
3189 * - do nfs_strategy() by doing I/O with nfs_readrpc/nfs_writerpc
3190 * (Maybe I could use the process's page mapping, but I was concerned that
3191 * Kernel Write might not be enabled and also figured copyout() would do
3192 * a lot more work than memcpy() and also it currently happens in the
3193 * context of the swapper process (2).
3194 */
3195 int
3196 nfs_bmap(v)
3197 void *v;
3198 {
3199 struct vop_bmap_args /* {
3200 struct vnode *a_vp;
3201 daddr_t a_bn;
3202 struct vnode **a_vpp;
3203 daddr_t *a_bnp;
3204 int *a_runp;
3205 } */ *ap = v;
3206 struct vnode *vp = ap->a_vp;
3207 int bshift = vp->v_mount->mnt_fs_bshift - vp->v_mount->mnt_dev_bshift;
3208
3209 if (ap->a_vpp != NULL)
3210 *ap->a_vpp = vp;
3211 if (ap->a_bnp != NULL)
3212 *ap->a_bnp = ap->a_bn << bshift;
3213 if (ap->a_runp != NULL)
3214 *ap->a_runp = 1024 * 1024; /* XXX */
3215 return (0);
3216 }
3217
3218 /*
3219 * Strategy routine.
3220 * For async requests when nfsiod(s) are running, queue the request by
3221 * calling nfs_asyncio(), otherwise just all nfs_doio() to do the
3222 * request.
3223 */
3224 int
3225 nfs_strategy(v)
3226 void *v;
3227 {
3228 struct vop_strategy_args *ap = v;
3229 struct buf *bp = ap->a_bp;
3230 int error = 0;
3231
3232 if ((bp->b_flags & (B_PHYS|B_ASYNC)) == (B_PHYS|B_ASYNC))
3233 panic("nfs physio/async");
3234
3235 /*
3236 * If the op is asynchronous and an i/o daemon is waiting
3237 * queue the request, wake it up and wait for completion
3238 * otherwise just do it ourselves.
3239 */
3240 if ((bp->b_flags & B_ASYNC) == 0 || nfs_asyncio(bp))
3241 error = nfs_doio(bp);
3242 return (error);
3243 }
3244
3245 /*
3246 * fsync vnode op. Just call nfs_flush() with commit == 1.
3247 */
3248 /* ARGSUSED */
3249 int
3250 nfs_fsync(v)
3251 void *v;
3252 {
3253 struct vop_fsync_args /* {
3254 struct vnodeop_desc *a_desc;
3255 struct vnode * a_vp;
3256 kauth_cred_t a_cred;
3257 int a_flags;
3258 off_t offlo;
3259 off_t offhi;
3260 struct lwp * a_l;
3261 } */ *ap = v;
3262
3263 struct vnode *vp = ap->a_vp;
3264
3265 if (vp->v_type != VREG)
3266 return 0;
3267
3268 return (nfs_flush(vp, ap->a_cred,
3269 (ap->a_flags & FSYNC_WAIT) != 0 ? MNT_WAIT : 0, ap->a_l, 1));
3270 }
3271
3272 /*
3273 * Flush all the data associated with a vnode.
3274 */
3275 int
3276 nfs_flush(struct vnode *vp, kauth_cred_t cred, int waitfor, struct lwp *l,
3277 int commit)
3278 {
3279 struct nfsnode *np = VTONFS(vp);
3280 int error;
3281 int flushflags = PGO_ALLPAGES|PGO_CLEANIT|PGO_SYNCIO;
3282 UVMHIST_FUNC("nfs_flush"); UVMHIST_CALLED(ubchist);
3283
3284 simple_lock(&vp->v_interlock);
3285 error = VOP_PUTPAGES(vp, 0, 0, flushflags);
3286 if (np->n_flag & NWRITEERR) {
3287 error = np->n_error;
3288 np->n_flag &= ~NWRITEERR;
3289 }
3290 UVMHIST_LOG(ubchist, "returning %d", error,0,0,0);
3291 return (error);
3292 }
3293
3294 /*
3295 * Return POSIX pathconf information applicable to nfs.
3296 *
3297 * N.B. The NFS V2 protocol doesn't support this RPC.
3298 */
3299 /* ARGSUSED */
3300 int
3301 nfs_pathconf(v)
3302 void *v;
3303 {
3304 struct vop_pathconf_args /* {
3305 struct vnode *a_vp;
3306 int a_name;
3307 register_t *a_retval;
3308 } */ *ap = v;
3309 struct nfsv3_pathconf *pcp;
3310 struct vnode *vp = ap->a_vp;
3311 struct mbuf *mreq, *mrep, *md, *mb;
3312 int32_t t1, t2;
3313 u_int32_t *tl;
3314 caddr_t bpos, dpos, cp, cp2;
3315 int error = 0, attrflag;
3316 #ifndef NFS_V2_ONLY
3317 struct nfsmount *nmp;
3318 unsigned int l;
3319 u_int64_t maxsize;
3320 #endif
3321 const int v3 = NFS_ISV3(vp);
3322 struct nfsnode *np = VTONFS(vp);
3323
3324 switch (ap->a_name) {
3325 /* Names that can be resolved locally. */
3326 case _PC_PIPE_BUF:
3327 *ap->a_retval = PIPE_BUF;
3328 break;
3329 case _PC_SYNC_IO:
3330 *ap->a_retval = 1;
3331 break;
3332 /* Names that cannot be resolved locally; do an RPC, if possible. */
3333 case _PC_LINK_MAX:
3334 case _PC_NAME_MAX:
3335 case _PC_CHOWN_RESTRICTED:
3336 case _PC_NO_TRUNC:
3337 if (!v3) {
3338 error = EINVAL;
3339 break;
3340 }
3341 nfsstats.rpccnt[NFSPROC_PATHCONF]++;
3342 nfsm_reqhead(np, NFSPROC_PATHCONF, NFSX_FH(1));
3343 nfsm_fhtom(np, 1);
3344 nfsm_request(np, NFSPROC_PATHCONF,
3345 curlwp, curlwp->l_cred); /* XXX */
3346 nfsm_postop_attr(vp, attrflag, 0);
3347 if (!error) {
3348 nfsm_dissect(pcp, struct nfsv3_pathconf *,
3349 NFSX_V3PATHCONF);
3350 switch (ap->a_name) {
3351 case _PC_LINK_MAX:
3352 *ap->a_retval =
3353 fxdr_unsigned(register_t, pcp->pc_linkmax);
3354 break;
3355 case _PC_NAME_MAX:
3356 *ap->a_retval =
3357 fxdr_unsigned(register_t, pcp->pc_namemax);
3358 break;
3359 case _PC_CHOWN_RESTRICTED:
3360 *ap->a_retval =
3361 (pcp->pc_chownrestricted == nfs_true);
3362 break;
3363 case _PC_NO_TRUNC:
3364 *ap->a_retval =
3365 (pcp->pc_notrunc == nfs_true);
3366 break;
3367 }
3368 }
3369 nfsm_reqdone;
3370 break;
3371 case _PC_FILESIZEBITS:
3372 #ifndef NFS_V2_ONLY
3373 if (v3) {
3374 nmp = VFSTONFS(vp->v_mount);
3375 if ((nmp->nm_iflag & NFSMNT_GOTFSINFO) == 0)
3376 if ((error = nfs_fsinfo(nmp, vp,
3377 curlwp->l_cred, curlwp)) != 0) /* XXX */
3378 break;
3379 for (l = 0, maxsize = nmp->nm_maxfilesize;
3380 (maxsize >> l) > 0; l++)
3381 ;
3382 *ap->a_retval = l + 1;
3383 } else
3384 #endif
3385 {
3386 *ap->a_retval = 32; /* NFS V2 limitation */
3387 }
3388 break;
3389 default:
3390 error = EINVAL;
3391 break;
3392 }
3393
3394 return (error);
3395 }
3396
3397 /*
3398 * NFS advisory byte-level locks.
3399 */
3400 int
3401 nfs_advlock(v)
3402 void *v;
3403 {
3404 struct vop_advlock_args /* {
3405 struct vnode *a_vp;
3406 caddr_t a_id;
3407 int a_op;
3408 struct flock *a_fl;
3409 int a_flags;
3410 } */ *ap = v;
3411 struct nfsnode *np = VTONFS(ap->a_vp);
3412
3413 return lf_advlock(ap, &np->n_lockf, np->n_size);
3414 }
3415
3416 /*
3417 * Print out the contents of an nfsnode.
3418 */
3419 int
3420 nfs_print(v)
3421 void *v;
3422 {
3423 struct vop_print_args /* {
3424 struct vnode *a_vp;
3425 } */ *ap = v;
3426 struct vnode *vp = ap->a_vp;
3427 struct nfsnode *np = VTONFS(vp);
3428
3429 printf("tag VT_NFS, fileid %lld fsid 0x%lx",
3430 (unsigned long long)np->n_vattr->va_fileid, np->n_vattr->va_fsid);
3431 if (vp->v_type == VFIFO)
3432 fifo_printinfo(vp);
3433 printf("\n");
3434 return (0);
3435 }
3436
3437 /*
3438 * nfs unlock wrapper.
3439 */
3440 int
3441 nfs_unlock(void *v)
3442 {
3443 struct vop_unlock_args /* {
3444 struct vnode *a_vp;
3445 int a_flags;
3446 } */ *ap = v;
3447 struct vnode *vp = ap->a_vp;
3448
3449 /*
3450 * VOP_UNLOCK can be called by nfs_loadattrcache
3451 * with v_data == 0.
3452 */
3453 if (VTONFS(vp)) {
3454 nfs_delayedtruncate(vp);
3455 }
3456
3457 return genfs_unlock(v);
3458 }
3459
3460 /*
3461 * nfs special file access vnode op.
3462 * Essentially just get vattr and then imitate iaccess() since the device is
3463 * local to the client.
3464 */
3465 int
3466 nfsspec_access(v)
3467 void *v;
3468 {
3469 struct vop_access_args /* {
3470 struct vnode *a_vp;
3471 int a_mode;
3472 kauth_cred_t a_cred;
3473 struct lwp *a_l;
3474 } */ *ap = v;
3475 struct vattr va;
3476 struct vnode *vp = ap->a_vp;
3477 int error;
3478
3479 error = VOP_GETATTR(vp, &va, ap->a_cred, ap->a_l);
3480 if (error)
3481 return (error);
3482
3483 /*
3484 * Disallow write attempts on filesystems mounted read-only;
3485 * unless the file is a socket, fifo, or a block or character
3486 * device resident on the filesystem.
3487 */
3488 if ((ap->a_mode & VWRITE) && (vp->v_mount->mnt_flag & MNT_RDONLY)) {
3489 switch (vp->v_type) {
3490 case VREG:
3491 case VDIR:
3492 case VLNK:
3493 return (EROFS);
3494 default:
3495 break;
3496 }
3497 }
3498
3499 return (vaccess(va.va_type, va.va_mode,
3500 va.va_uid, va.va_gid, ap->a_mode, ap->a_cred));
3501 }
3502
3503 /*
3504 * Read wrapper for special devices.
3505 */
3506 int
3507 nfsspec_read(v)
3508 void *v;
3509 {
3510 struct vop_read_args /* {
3511 struct vnode *a_vp;
3512 struct uio *a_uio;
3513 int a_ioflag;
3514 kauth_cred_t a_cred;
3515 } */ *ap = v;
3516 struct nfsnode *np = VTONFS(ap->a_vp);
3517
3518 /*
3519 * Set access flag.
3520 */
3521 np->n_flag |= NACC;
3522 getnanotime(&np->n_atim);
3523 return (VOCALL(spec_vnodeop_p, VOFFSET(vop_read), ap));
3524 }
3525
3526 /*
3527 * Write wrapper for special devices.
3528 */
3529 int
3530 nfsspec_write(v)
3531 void *v;
3532 {
3533 struct vop_write_args /* {
3534 struct vnode *a_vp;
3535 struct uio *a_uio;
3536 int a_ioflag;
3537 kauth_cred_t a_cred;
3538 } */ *ap = v;
3539 struct nfsnode *np = VTONFS(ap->a_vp);
3540
3541 /*
3542 * Set update flag.
3543 */
3544 np->n_flag |= NUPD;
3545 getnanotime(&np->n_mtim);
3546 return (VOCALL(spec_vnodeop_p, VOFFSET(vop_write), ap));
3547 }
3548
3549 /*
3550 * Close wrapper for special devices.
3551 *
3552 * Update the times on the nfsnode then do device close.
3553 */
3554 int
3555 nfsspec_close(v)
3556 void *v;
3557 {
3558 struct vop_close_args /* {
3559 struct vnode *a_vp;
3560 int a_fflag;
3561 kauth_cred_t a_cred;
3562 struct lwp *a_l;
3563 } */ *ap = v;
3564 struct vnode *vp = ap->a_vp;
3565 struct nfsnode *np = VTONFS(vp);
3566 struct vattr vattr;
3567
3568 if (np->n_flag & (NACC | NUPD)) {
3569 np->n_flag |= NCHG;
3570 if (vp->v_usecount == 1 &&
3571 (vp->v_mount->mnt_flag & MNT_RDONLY) == 0) {
3572 VATTR_NULL(&vattr);
3573 if (np->n_flag & NACC)
3574 vattr.va_atime = np->n_atim;
3575 if (np->n_flag & NUPD)
3576 vattr.va_mtime = np->n_mtim;
3577 (void)VOP_SETATTR(vp, &vattr, ap->a_cred, ap->a_l);
3578 }
3579 }
3580 return (VOCALL(spec_vnodeop_p, VOFFSET(vop_close), ap));
3581 }
3582
3583 /*
3584 * Read wrapper for fifos.
3585 */
3586 int
3587 nfsfifo_read(v)
3588 void *v;
3589 {
3590 struct vop_read_args /* {
3591 struct vnode *a_vp;
3592 struct uio *a_uio;
3593 int a_ioflag;
3594 kauth_cred_t a_cred;
3595 } */ *ap = v;
3596 struct nfsnode *np = VTONFS(ap->a_vp);
3597
3598 /*
3599 * Set access flag.
3600 */
3601 np->n_flag |= NACC;
3602 getnanotime(&np->n_atim);
3603 return (VOCALL(fifo_vnodeop_p, VOFFSET(vop_read), ap));
3604 }
3605
3606 /*
3607 * Write wrapper for fifos.
3608 */
3609 int
3610 nfsfifo_write(v)
3611 void *v;
3612 {
3613 struct vop_write_args /* {
3614 struct vnode *a_vp;
3615 struct uio *a_uio;
3616 int a_ioflag;
3617 kauth_cred_t a_cred;
3618 } */ *ap = v;
3619 struct nfsnode *np = VTONFS(ap->a_vp);
3620
3621 /*
3622 * Set update flag.
3623 */
3624 np->n_flag |= NUPD;
3625 getnanotime(&np->n_mtim);
3626 return (VOCALL(fifo_vnodeop_p, VOFFSET(vop_write), ap));
3627 }
3628
3629 /*
3630 * Close wrapper for fifos.
3631 *
3632 * Update the times on the nfsnode then do fifo close.
3633 */
3634 int
3635 nfsfifo_close(v)
3636 void *v;
3637 {
3638 struct vop_close_args /* {
3639 struct vnode *a_vp;
3640 int a_fflag;
3641 kauth_cred_t a_cred;
3642 struct lwp *a_l;
3643 } */ *ap = v;
3644 struct vnode *vp = ap->a_vp;
3645 struct nfsnode *np = VTONFS(vp);
3646 struct vattr vattr;
3647
3648 if (np->n_flag & (NACC | NUPD)) {
3649 struct timespec ts;
3650
3651 getnanotime(&ts);
3652 if (np->n_flag & NACC)
3653 np->n_atim = ts;
3654 if (np->n_flag & NUPD)
3655 np->n_mtim = ts;
3656 np->n_flag |= NCHG;
3657 if (vp->v_usecount == 1 &&
3658 (vp->v_mount->mnt_flag & MNT_RDONLY) == 0) {
3659 VATTR_NULL(&vattr);
3660 if (np->n_flag & NACC)
3661 vattr.va_atime = np->n_atim;
3662 if (np->n_flag & NUPD)
3663 vattr.va_mtime = np->n_mtim;
3664 (void)VOP_SETATTR(vp, &vattr, ap->a_cred, ap->a_l);
3665 }
3666 }
3667 return (VOCALL(fifo_vnodeop_p, VOFFSET(vop_close), ap));
3668 }
3669