nfs_vnops.c revision 1.247 1 /* $NetBSD: nfs_vnops.c,v 1.247 2006/12/27 12:10:09 yamt Exp $ */
2
3 /*
4 * Copyright (c) 1989, 1993
5 * The Regents of the University of California. All rights reserved.
6 *
7 * This code is derived from software contributed to Berkeley by
8 * Rick Macklem at The University of Guelph.
9 *
10 * Redistribution and use in source and binary forms, with or without
11 * modification, are permitted provided that the following conditions
12 * are met:
13 * 1. Redistributions of source code must retain the above copyright
14 * notice, this list of conditions and the following disclaimer.
15 * 2. Redistributions in binary form must reproduce the above copyright
16 * notice, this list of conditions and the following disclaimer in the
17 * documentation and/or other materials provided with the distribution.
18 * 3. Neither the name of the University nor the names of its contributors
19 * may be used to endorse or promote products derived from this software
20 * without specific prior written permission.
21 *
22 * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND
23 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
24 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
25 * ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE
26 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
27 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
28 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
29 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
30 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
31 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
32 * SUCH DAMAGE.
33 *
34 * @(#)nfs_vnops.c 8.19 (Berkeley) 7/31/95
35 */
36
37 /*
38 * vnode op calls for Sun NFS version 2 and 3
39 */
40
41 #include <sys/cdefs.h>
42 __KERNEL_RCSID(0, "$NetBSD: nfs_vnops.c,v 1.247 2006/12/27 12:10:09 yamt Exp $");
43
44 #include "opt_inet.h"
45 #include "opt_nfs.h"
46 #include "opt_uvmhist.h"
47
48 #include <sys/param.h>
49 #include <sys/proc.h>
50 #include <sys/kernel.h>
51 #include <sys/systm.h>
52 #include <sys/resourcevar.h>
53 #include <sys/proc.h>
54 #include <sys/mount.h>
55 #include <sys/buf.h>
56 #include <sys/disk.h>
57 #include <sys/malloc.h>
58 #include <sys/mbuf.h>
59 #include <sys/namei.h>
60 #include <sys/vnode.h>
61 #include <sys/dirent.h>
62 #include <sys/fcntl.h>
63 #include <sys/hash.h>
64 #include <sys/lockf.h>
65 #include <sys/stat.h>
66 #include <sys/unistd.h>
67 #include <sys/kauth.h>
68
69 #include <uvm/uvm_extern.h>
70 #include <uvm/uvm.h>
71
72 #include <miscfs/fifofs/fifo.h>
73 #include <miscfs/genfs/genfs.h>
74 #include <miscfs/genfs/genfs_node.h>
75 #include <miscfs/specfs/specdev.h>
76
77 #include <nfs/rpcv2.h>
78 #include <nfs/nfsproto.h>
79 #include <nfs/nfs.h>
80 #include <nfs/nfsnode.h>
81 #include <nfs/nfsmount.h>
82 #include <nfs/xdr_subs.h>
83 #include <nfs/nfsm_subs.h>
84 #include <nfs/nfs_var.h>
85
86 #include <net/if.h>
87 #include <netinet/in.h>
88 #include <netinet/in_var.h>
89
90 /*
91 * Global vfs data structures for nfs
92 */
93 int (**nfsv2_vnodeop_p) __P((void *));
94 const struct vnodeopv_entry_desc nfsv2_vnodeop_entries[] = {
95 { &vop_default_desc, vn_default_error },
96 { &vop_lookup_desc, nfs_lookup }, /* lookup */
97 { &vop_create_desc, nfs_create }, /* create */
98 { &vop_mknod_desc, nfs_mknod }, /* mknod */
99 { &vop_open_desc, nfs_open }, /* open */
100 { &vop_close_desc, nfs_close }, /* close */
101 { &vop_access_desc, nfs_access }, /* access */
102 { &vop_getattr_desc, nfs_getattr }, /* getattr */
103 { &vop_setattr_desc, nfs_setattr }, /* setattr */
104 { &vop_read_desc, nfs_read }, /* read */
105 { &vop_write_desc, nfs_write }, /* write */
106 { &vop_lease_desc, nfs_lease_check }, /* lease */
107 { &vop_fcntl_desc, genfs_fcntl }, /* fcntl */
108 { &vop_ioctl_desc, nfs_ioctl }, /* ioctl */
109 { &vop_poll_desc, nfs_poll }, /* poll */
110 { &vop_kqfilter_desc, nfs_kqfilter }, /* kqfilter */
111 { &vop_revoke_desc, nfs_revoke }, /* revoke */
112 { &vop_mmap_desc, nfs_mmap }, /* mmap */
113 { &vop_fsync_desc, nfs_fsync }, /* fsync */
114 { &vop_seek_desc, nfs_seek }, /* seek */
115 { &vop_remove_desc, nfs_remove }, /* remove */
116 { &vop_link_desc, nfs_link }, /* link */
117 { &vop_rename_desc, nfs_rename }, /* rename */
118 { &vop_mkdir_desc, nfs_mkdir }, /* mkdir */
119 { &vop_rmdir_desc, nfs_rmdir }, /* rmdir */
120 { &vop_symlink_desc, nfs_symlink }, /* symlink */
121 { &vop_readdir_desc, nfs_readdir }, /* readdir */
122 { &vop_readlink_desc, nfs_readlink }, /* readlink */
123 { &vop_abortop_desc, nfs_abortop }, /* abortop */
124 { &vop_inactive_desc, nfs_inactive }, /* inactive */
125 { &vop_reclaim_desc, nfs_reclaim }, /* reclaim */
126 { &vop_lock_desc, nfs_lock }, /* lock */
127 { &vop_unlock_desc, nfs_unlock }, /* unlock */
128 { &vop_bmap_desc, nfs_bmap }, /* bmap */
129 { &vop_strategy_desc, nfs_strategy }, /* strategy */
130 { &vop_print_desc, nfs_print }, /* print */
131 { &vop_islocked_desc, nfs_islocked }, /* islocked */
132 { &vop_pathconf_desc, nfs_pathconf }, /* pathconf */
133 { &vop_advlock_desc, nfs_advlock }, /* advlock */
134 { &vop_bwrite_desc, genfs_badop }, /* bwrite */
135 { &vop_getpages_desc, nfs_getpages }, /* getpages */
136 { &vop_putpages_desc, genfs_putpages }, /* putpages */
137 { NULL, NULL }
138 };
139 const struct vnodeopv_desc nfsv2_vnodeop_opv_desc =
140 { &nfsv2_vnodeop_p, nfsv2_vnodeop_entries };
141
142 /*
143 * Special device vnode ops
144 */
145 int (**spec_nfsv2nodeop_p) __P((void *));
146 const struct vnodeopv_entry_desc spec_nfsv2nodeop_entries[] = {
147 { &vop_default_desc, vn_default_error },
148 { &vop_lookup_desc, spec_lookup }, /* lookup */
149 { &vop_create_desc, spec_create }, /* create */
150 { &vop_mknod_desc, spec_mknod }, /* mknod */
151 { &vop_open_desc, spec_open }, /* open */
152 { &vop_close_desc, nfsspec_close }, /* close */
153 { &vop_access_desc, nfsspec_access }, /* access */
154 { &vop_getattr_desc, nfs_getattr }, /* getattr */
155 { &vop_setattr_desc, nfs_setattr }, /* setattr */
156 { &vop_read_desc, nfsspec_read }, /* read */
157 { &vop_write_desc, nfsspec_write }, /* write */
158 { &vop_lease_desc, spec_lease_check }, /* lease */
159 { &vop_fcntl_desc, genfs_fcntl }, /* fcntl */
160 { &vop_ioctl_desc, spec_ioctl }, /* ioctl */
161 { &vop_poll_desc, spec_poll }, /* poll */
162 { &vop_kqfilter_desc, spec_kqfilter }, /* kqfilter */
163 { &vop_revoke_desc, spec_revoke }, /* revoke */
164 { &vop_mmap_desc, spec_mmap }, /* mmap */
165 { &vop_fsync_desc, spec_fsync }, /* fsync */
166 { &vop_seek_desc, spec_seek }, /* seek */
167 { &vop_remove_desc, spec_remove }, /* remove */
168 { &vop_link_desc, spec_link }, /* link */
169 { &vop_rename_desc, spec_rename }, /* rename */
170 { &vop_mkdir_desc, spec_mkdir }, /* mkdir */
171 { &vop_rmdir_desc, spec_rmdir }, /* rmdir */
172 { &vop_symlink_desc, spec_symlink }, /* symlink */
173 { &vop_readdir_desc, spec_readdir }, /* readdir */
174 { &vop_readlink_desc, spec_readlink }, /* readlink */
175 { &vop_abortop_desc, spec_abortop }, /* abortop */
176 { &vop_inactive_desc, nfs_inactive }, /* inactive */
177 { &vop_reclaim_desc, nfs_reclaim }, /* reclaim */
178 { &vop_lock_desc, nfs_lock }, /* lock */
179 { &vop_unlock_desc, nfs_unlock }, /* unlock */
180 { &vop_bmap_desc, spec_bmap }, /* bmap */
181 { &vop_strategy_desc, spec_strategy }, /* strategy */
182 { &vop_print_desc, nfs_print }, /* print */
183 { &vop_islocked_desc, nfs_islocked }, /* islocked */
184 { &vop_pathconf_desc, spec_pathconf }, /* pathconf */
185 { &vop_advlock_desc, spec_advlock }, /* advlock */
186 { &vop_bwrite_desc, spec_bwrite }, /* bwrite */
187 { &vop_getpages_desc, spec_getpages }, /* getpages */
188 { &vop_putpages_desc, spec_putpages }, /* putpages */
189 { NULL, NULL }
190 };
191 const struct vnodeopv_desc spec_nfsv2nodeop_opv_desc =
192 { &spec_nfsv2nodeop_p, spec_nfsv2nodeop_entries };
193
194 int (**fifo_nfsv2nodeop_p) __P((void *));
195 const struct vnodeopv_entry_desc fifo_nfsv2nodeop_entries[] = {
196 { &vop_default_desc, vn_default_error },
197 { &vop_lookup_desc, fifo_lookup }, /* lookup */
198 { &vop_create_desc, fifo_create }, /* create */
199 { &vop_mknod_desc, fifo_mknod }, /* mknod */
200 { &vop_open_desc, fifo_open }, /* open */
201 { &vop_close_desc, nfsfifo_close }, /* close */
202 { &vop_access_desc, nfsspec_access }, /* access */
203 { &vop_getattr_desc, nfs_getattr }, /* getattr */
204 { &vop_setattr_desc, nfs_setattr }, /* setattr */
205 { &vop_read_desc, nfsfifo_read }, /* read */
206 { &vop_write_desc, nfsfifo_write }, /* write */
207 { &vop_lease_desc, fifo_lease_check }, /* lease */
208 { &vop_fcntl_desc, genfs_fcntl }, /* fcntl */
209 { &vop_ioctl_desc, fifo_ioctl }, /* ioctl */
210 { &vop_poll_desc, fifo_poll }, /* poll */
211 { &vop_kqfilter_desc, fifo_kqfilter }, /* kqfilter */
212 { &vop_revoke_desc, fifo_revoke }, /* revoke */
213 { &vop_mmap_desc, fifo_mmap }, /* mmap */
214 { &vop_fsync_desc, nfs_fsync }, /* fsync */
215 { &vop_seek_desc, fifo_seek }, /* seek */
216 { &vop_remove_desc, fifo_remove }, /* remove */
217 { &vop_link_desc, fifo_link }, /* link */
218 { &vop_rename_desc, fifo_rename }, /* rename */
219 { &vop_mkdir_desc, fifo_mkdir }, /* mkdir */
220 { &vop_rmdir_desc, fifo_rmdir }, /* rmdir */
221 { &vop_symlink_desc, fifo_symlink }, /* symlink */
222 { &vop_readdir_desc, fifo_readdir }, /* readdir */
223 { &vop_readlink_desc, fifo_readlink }, /* readlink */
224 { &vop_abortop_desc, fifo_abortop }, /* abortop */
225 { &vop_inactive_desc, nfs_inactive }, /* inactive */
226 { &vop_reclaim_desc, nfs_reclaim }, /* reclaim */
227 { &vop_lock_desc, nfs_lock }, /* lock */
228 { &vop_unlock_desc, nfs_unlock }, /* unlock */
229 { &vop_bmap_desc, fifo_bmap }, /* bmap */
230 { &vop_strategy_desc, genfs_badop }, /* strategy */
231 { &vop_print_desc, nfs_print }, /* print */
232 { &vop_islocked_desc, nfs_islocked }, /* islocked */
233 { &vop_pathconf_desc, fifo_pathconf }, /* pathconf */
234 { &vop_advlock_desc, fifo_advlock }, /* advlock */
235 { &vop_bwrite_desc, genfs_badop }, /* bwrite */
236 { &vop_putpages_desc, fifo_putpages }, /* putpages */
237 { NULL, NULL }
238 };
239 const struct vnodeopv_desc fifo_nfsv2nodeop_opv_desc =
240 { &fifo_nfsv2nodeop_p, fifo_nfsv2nodeop_entries };
241
242 static int nfs_linkrpc(struct vnode *, struct vnode *, const char *,
243 size_t, kauth_cred_t, struct lwp *);
244 static void nfs_writerpc_extfree(struct mbuf *, caddr_t, size_t, void *);
245
246 /*
247 * Global variables
248 */
249 extern u_int32_t nfs_true, nfs_false;
250 extern u_int32_t nfs_xdrneg1;
251 extern const nfstype nfsv3_type[9];
252
253 int nfs_numasync = 0;
254 #define DIRHDSIZ _DIRENT_NAMEOFF(dp)
255 #define UIO_ADVANCE(uio, siz) \
256 (void)((uio)->uio_resid -= (siz), \
257 (uio)->uio_iov->iov_base = (char *)(uio)->uio_iov->iov_base + (siz), \
258 (uio)->uio_iov->iov_len -= (siz))
259
260 static void nfs_cache_enter(struct vnode *, struct vnode *,
261 struct componentname *);
262
263 static void
264 nfs_cache_enter(struct vnode *dvp, struct vnode *vp,
265 struct componentname *cnp)
266 {
267 struct nfsnode *dnp = VTONFS(dvp);
268
269 if (vp != NULL) {
270 struct nfsnode *np = VTONFS(vp);
271
272 np->n_ctime = np->n_vattr->va_ctime.tv_sec;
273 }
274
275 if (!timespecisset(&dnp->n_nctime))
276 dnp->n_nctime = dnp->n_vattr->va_mtime;
277
278 cache_enter(dvp, vp, cnp);
279 }
280
281 /*
282 * nfs null call from vfs.
283 */
284 int
285 nfs_null(vp, cred, l)
286 struct vnode *vp;
287 kauth_cred_t cred;
288 struct lwp *l;
289 {
290 caddr_t bpos, dpos;
291 int error = 0;
292 struct mbuf *mreq, *mrep, *md, *mb;
293 struct nfsnode *np = VTONFS(vp);
294
295 nfsm_reqhead(np, NFSPROC_NULL, 0);
296 nfsm_request(np, NFSPROC_NULL, l, cred);
297 nfsm_reqdone;
298 return (error);
299 }
300
301 /*
302 * nfs access vnode op.
303 * For nfs version 2, just return ok. File accesses may fail later.
304 * For nfs version 3, use the access rpc to check accessibility. If file modes
305 * are changed on the server, accesses might still fail later.
306 */
307 int
308 nfs_access(v)
309 void *v;
310 {
311 struct vop_access_args /* {
312 struct vnode *a_vp;
313 int a_mode;
314 kauth_cred_t a_cred;
315 struct lwp *a_l;
316 } */ *ap = v;
317 struct vnode *vp = ap->a_vp;
318 #ifndef NFS_V2_ONLY
319 u_int32_t *tl;
320 caddr_t cp;
321 int32_t t1, t2;
322 caddr_t bpos, dpos, cp2;
323 int error = 0, attrflag;
324 struct mbuf *mreq, *mrep, *md, *mb;
325 u_int32_t mode, rmode;
326 const int v3 = NFS_ISV3(vp);
327 #endif
328 int cachevalid;
329 struct nfsnode *np = VTONFS(vp);
330 struct nfsmount *nmp = VFSTONFS(vp->v_mount);
331
332 cachevalid = (np->n_accstamp != -1 &&
333 (time_uptime - np->n_accstamp) < NFS_ATTRTIMEO(nmp, np) &&
334 np->n_accuid == kauth_cred_geteuid(ap->a_cred));
335
336 /*
337 * Check access cache first. If this request has been made for this
338 * uid shortly before, use the cached result.
339 */
340 if (cachevalid) {
341 if (!np->n_accerror) {
342 if ((np->n_accmode & ap->a_mode) == ap->a_mode)
343 return np->n_accerror;
344 } else if ((np->n_accmode & ap->a_mode) == np->n_accmode)
345 return np->n_accerror;
346 }
347
348 #ifndef NFS_V2_ONLY
349 /*
350 * For nfs v3, do an access rpc, otherwise you are stuck emulating
351 * ufs_access() locally using the vattr. This may not be correct,
352 * since the server may apply other access criteria such as
353 * client uid-->server uid mapping that we do not know about, but
354 * this is better than just returning anything that is lying about
355 * in the cache.
356 */
357 if (v3) {
358 nfsstats.rpccnt[NFSPROC_ACCESS]++;
359 nfsm_reqhead(np, NFSPROC_ACCESS, NFSX_FH(v3) + NFSX_UNSIGNED);
360 nfsm_fhtom(np, v3);
361 nfsm_build(tl, u_int32_t *, NFSX_UNSIGNED);
362 if (ap->a_mode & VREAD)
363 mode = NFSV3ACCESS_READ;
364 else
365 mode = 0;
366 if (vp->v_type != VDIR) {
367 if (ap->a_mode & VWRITE)
368 mode |= (NFSV3ACCESS_MODIFY | NFSV3ACCESS_EXTEND);
369 if (ap->a_mode & VEXEC)
370 mode |= NFSV3ACCESS_EXECUTE;
371 } else {
372 if (ap->a_mode & VWRITE)
373 mode |= (NFSV3ACCESS_MODIFY | NFSV3ACCESS_EXTEND |
374 NFSV3ACCESS_DELETE);
375 if (ap->a_mode & VEXEC)
376 mode |= NFSV3ACCESS_LOOKUP;
377 }
378 *tl = txdr_unsigned(mode);
379 nfsm_request(np, NFSPROC_ACCESS, ap->a_l, ap->a_cred);
380 nfsm_postop_attr(vp, attrflag, 0);
381 if (!error) {
382 nfsm_dissect(tl, u_int32_t *, NFSX_UNSIGNED);
383 rmode = fxdr_unsigned(u_int32_t, *tl);
384 /*
385 * The NFS V3 spec does not clarify whether or not
386 * the returned access bits can be a superset of
387 * the ones requested, so...
388 */
389 if ((rmode & mode) != mode)
390 error = EACCES;
391 }
392 nfsm_reqdone;
393 } else
394 #endif
395 return (nfsspec_access(ap));
396 #ifndef NFS_V2_ONLY
397 /*
398 * Disallow write attempts on filesystems mounted read-only;
399 * unless the file is a socket, fifo, or a block or character
400 * device resident on the filesystem.
401 */
402 if (!error && (ap->a_mode & VWRITE) &&
403 (vp->v_mount->mnt_flag & MNT_RDONLY)) {
404 switch (vp->v_type) {
405 case VREG:
406 case VDIR:
407 case VLNK:
408 error = EROFS;
409 default:
410 break;
411 }
412 }
413
414 if (!error || error == EACCES) {
415 /*
416 * If we got the same result as for a previous,
417 * different request, OR it in. Don't update
418 * the timestamp in that case.
419 */
420 if (cachevalid && np->n_accstamp != -1 &&
421 error == np->n_accerror) {
422 if (!error)
423 np->n_accmode |= ap->a_mode;
424 else if ((np->n_accmode & ap->a_mode) == ap->a_mode)
425 np->n_accmode = ap->a_mode;
426 } else {
427 np->n_accstamp = time_uptime;
428 np->n_accuid = kauth_cred_geteuid(ap->a_cred);
429 np->n_accmode = ap->a_mode;
430 np->n_accerror = error;
431 }
432 }
433
434 return (error);
435 #endif
436 }
437
438 /*
439 * nfs open vnode op
440 * Check to see if the type is ok
441 * and that deletion is not in progress.
442 * For paged in text files, you will need to flush the page cache
443 * if consistency is lost.
444 */
445 /* ARGSUSED */
446 int
447 nfs_open(v)
448 void *v;
449 {
450 struct vop_open_args /* {
451 struct vnode *a_vp;
452 int a_mode;
453 kauth_cred_t a_cred;
454 struct lwp *a_l;
455 } */ *ap = v;
456 struct vnode *vp = ap->a_vp;
457 struct nfsnode *np = VTONFS(vp);
458 int error;
459
460 if (vp->v_type != VREG && vp->v_type != VDIR && vp->v_type != VLNK) {
461 return (EACCES);
462 }
463
464 if (ap->a_mode & FREAD) {
465 if (np->n_rcred != NULL)
466 kauth_cred_free(np->n_rcred);
467 np->n_rcred = ap->a_cred;
468 kauth_cred_hold(np->n_rcred);
469 }
470 if (ap->a_mode & FWRITE) {
471 if (np->n_wcred != NULL)
472 kauth_cred_free(np->n_wcred);
473 np->n_wcred = ap->a_cred;
474 kauth_cred_hold(np->n_wcred);
475 }
476
477 error = nfs_flushstalebuf(vp, ap->a_cred, ap->a_l, 0);
478 if (error)
479 return error;
480
481 NFS_INVALIDATE_ATTRCACHE(np); /* For Open/Close consistency */
482
483 return (0);
484 }
485
486 /*
487 * nfs close vnode op
488 * What an NFS client should do upon close after writing is a debatable issue.
489 * Most NFS clients push delayed writes to the server upon close, basically for
490 * two reasons:
491 * 1 - So that any write errors may be reported back to the client process
492 * doing the close system call. By far the two most likely errors are
493 * NFSERR_NOSPC and NFSERR_DQUOT to indicate space allocation failure.
494 * 2 - To put a worst case upper bound on cache inconsistency between
495 * multiple clients for the file.
496 * There is also a consistency problem for Version 2 of the protocol w.r.t.
497 * not being able to tell if other clients are writing a file concurrently,
498 * since there is no way of knowing if the changed modify time in the reply
499 * is only due to the write for this client.
500 * (NFS Version 3 provides weak cache consistency data in the reply that
501 * should be sufficient to detect and handle this case.)
502 *
503 * The current code does the following:
504 * for NFS Version 2 - play it safe and flush/invalidate all dirty buffers
505 * for NFS Version 3 - flush dirty buffers to the server but don't invalidate
506 * or commit them (this satisfies 1 and 2 except for the
507 * case where the server crashes after this close but
508 * before the commit RPC, which is felt to be "good
509 * enough". Changing the last argument to nfs_flush() to
510 * a 1 would force a commit operation, if it is felt a
511 * commit is necessary now.
512 * for NQNFS - do nothing now, since 2 is dealt with via leases and
513 * 1 should be dealt with via an fsync() system call for
514 * cases where write errors are important.
515 */
516 /* ARGSUSED */
517 int
518 nfs_close(v)
519 void *v;
520 {
521 struct vop_close_args /* {
522 struct vnodeop_desc *a_desc;
523 struct vnode *a_vp;
524 int a_fflag;
525 kauth_cred_t a_cred;
526 struct lwp *a_l;
527 } */ *ap = v;
528 struct vnode *vp = ap->a_vp;
529 struct nfsnode *np = VTONFS(vp);
530 int error = 0;
531 UVMHIST_FUNC("nfs_close"); UVMHIST_CALLED(ubchist);
532
533 if (vp->v_type == VREG) {
534 if ((VFSTONFS(vp->v_mount)->nm_flag & NFSMNT_NQNFS) == 0 &&
535 (np->n_flag & NMODIFIED)) {
536 #ifndef NFS_V2_ONLY
537 if (NFS_ISV3(vp)) {
538 error = nfs_flush(vp, ap->a_cred, MNT_WAIT, ap->a_l, 0);
539 np->n_flag &= ~NMODIFIED;
540 } else
541 #endif
542 error = nfs_vinvalbuf(vp, V_SAVE, ap->a_cred, ap->a_l, 1);
543 NFS_INVALIDATE_ATTRCACHE(np);
544 }
545 if (np->n_flag & NWRITEERR) {
546 np->n_flag &= ~NWRITEERR;
547 error = np->n_error;
548 }
549 }
550 UVMHIST_LOG(ubchist, "returning %d", error,0,0,0);
551 return (error);
552 }
553
554 /*
555 * nfs getattr call from vfs.
556 */
557 int
558 nfs_getattr(v)
559 void *v;
560 {
561 struct vop_getattr_args /* {
562 struct vnode *a_vp;
563 struct vattr *a_vap;
564 kauth_cred_t a_cred;
565 struct lwp *a_l;
566 } */ *ap = v;
567 struct vnode *vp = ap->a_vp;
568 struct nfsnode *np = VTONFS(vp);
569 caddr_t cp;
570 u_int32_t *tl;
571 int32_t t1, t2;
572 caddr_t bpos, dpos;
573 int error = 0;
574 struct mbuf *mreq, *mrep, *md, *mb;
575 const int v3 = NFS_ISV3(vp);
576
577 /*
578 * Update local times for special files.
579 */
580 if (np->n_flag & (NACC | NUPD))
581 np->n_flag |= NCHG;
582
583 /*
584 * if we have delayed truncation, do it now.
585 */
586 nfs_delayedtruncate(vp);
587
588 /*
589 * First look in the cache.
590 */
591 if (nfs_getattrcache(vp, ap->a_vap) == 0)
592 return (0);
593 nfsstats.rpccnt[NFSPROC_GETATTR]++;
594 nfsm_reqhead(np, NFSPROC_GETATTR, NFSX_FH(v3));
595 nfsm_fhtom(np, v3);
596 nfsm_request(np, NFSPROC_GETATTR, ap->a_l, ap->a_cred);
597 if (!error) {
598 nfsm_loadattr(vp, ap->a_vap, 0);
599 if (vp->v_type == VDIR &&
600 ap->a_vap->va_blocksize < NFS_DIRFRAGSIZ)
601 ap->a_vap->va_blocksize = NFS_DIRFRAGSIZ;
602 }
603 nfsm_reqdone;
604 return (error);
605 }
606
607 /*
608 * nfs setattr call.
609 */
610 int
611 nfs_setattr(v)
612 void *v;
613 {
614 struct vop_setattr_args /* {
615 struct vnodeop_desc *a_desc;
616 struct vnode *a_vp;
617 struct vattr *a_vap;
618 kauth_cred_t a_cred;
619 struct lwp *a_l;
620 } */ *ap = v;
621 struct vnode *vp = ap->a_vp;
622 struct nfsnode *np = VTONFS(vp);
623 struct vattr *vap = ap->a_vap;
624 int error = 0;
625 u_quad_t tsize = 0;
626
627 /*
628 * Setting of flags is not supported.
629 */
630 if (vap->va_flags != VNOVAL)
631 return (EOPNOTSUPP);
632
633 /*
634 * Disallow write attempts if the filesystem is mounted read-only.
635 */
636 if ((vap->va_uid != (uid_t)VNOVAL ||
637 vap->va_gid != (gid_t)VNOVAL || vap->va_atime.tv_sec != VNOVAL ||
638 vap->va_mtime.tv_sec != VNOVAL || vap->va_mode != (mode_t)VNOVAL) &&
639 (vp->v_mount->mnt_flag & MNT_RDONLY))
640 return (EROFS);
641 if (vap->va_size != VNOVAL) {
642 switch (vp->v_type) {
643 case VDIR:
644 return (EISDIR);
645 case VCHR:
646 case VBLK:
647 case VSOCK:
648 case VFIFO:
649 if (vap->va_mtime.tv_sec == VNOVAL &&
650 vap->va_atime.tv_sec == VNOVAL &&
651 vap->va_mode == (mode_t)VNOVAL &&
652 vap->va_uid == (uid_t)VNOVAL &&
653 vap->va_gid == (gid_t)VNOVAL)
654 return (0);
655 vap->va_size = VNOVAL;
656 break;
657 default:
658 /*
659 * Disallow write attempts if the filesystem is
660 * mounted read-only.
661 */
662 if (vp->v_mount->mnt_flag & MNT_RDONLY)
663 return (EROFS);
664 genfs_node_wrlock(vp);
665 uvm_vnp_setsize(vp, vap->va_size);
666 tsize = np->n_size;
667 np->n_size = vap->va_size;
668 if (vap->va_size == 0)
669 error = nfs_vinvalbuf(vp, 0,
670 ap->a_cred, ap->a_l, 1);
671 else
672 error = nfs_vinvalbuf(vp, V_SAVE,
673 ap->a_cred, ap->a_l, 1);
674 if (error) {
675 uvm_vnp_setsize(vp, tsize);
676 genfs_node_unlock(vp);
677 return (error);
678 }
679 np->n_vattr->va_size = vap->va_size;
680 }
681 } else {
682 /*
683 * flush files before setattr because a later write of
684 * cached data might change timestamps or reset sugid bits
685 */
686 if ((vap->va_mtime.tv_sec != VNOVAL ||
687 vap->va_atime.tv_sec != VNOVAL ||
688 vap->va_mode != VNOVAL) &&
689 vp->v_type == VREG &&
690 (error = nfs_vinvalbuf(vp, V_SAVE, ap->a_cred,
691 ap->a_l, 1)) == EINTR)
692 return (error);
693 }
694 error = nfs_setattrrpc(vp, vap, ap->a_cred, ap->a_l);
695 if (vap->va_size != VNOVAL) {
696 if (error) {
697 np->n_size = np->n_vattr->va_size = tsize;
698 uvm_vnp_setsize(vp, np->n_size);
699 }
700 genfs_node_unlock(vp);
701 }
702 VN_KNOTE(vp, NOTE_ATTRIB);
703 return (error);
704 }
705
706 /*
707 * Do an nfs setattr rpc.
708 */
709 int
710 nfs_setattrrpc(vp, vap, cred, l)
711 struct vnode *vp;
712 struct vattr *vap;
713 kauth_cred_t cred;
714 struct lwp *l;
715 {
716 struct nfsv2_sattr *sp;
717 caddr_t cp;
718 int32_t t1, t2;
719 caddr_t bpos, dpos;
720 u_int32_t *tl;
721 int error = 0;
722 struct mbuf *mreq, *mrep, *md, *mb;
723 const int v3 = NFS_ISV3(vp);
724 struct nfsnode *np = VTONFS(vp);
725 #ifndef NFS_V2_ONLY
726 int wccflag = NFSV3_WCCRATTR;
727 caddr_t cp2;
728 #endif
729
730 nfsstats.rpccnt[NFSPROC_SETATTR]++;
731 nfsm_reqhead(np, NFSPROC_SETATTR, NFSX_FH(v3) + NFSX_SATTR(v3));
732 nfsm_fhtom(np, v3);
733 #ifndef NFS_V2_ONLY
734 if (v3) {
735 nfsm_v3attrbuild(vap, TRUE);
736 nfsm_build(tl, u_int32_t *, NFSX_UNSIGNED);
737 *tl = nfs_false;
738 } else {
739 #endif
740 nfsm_build(sp, struct nfsv2_sattr *, NFSX_V2SATTR);
741 if (vap->va_mode == (mode_t)VNOVAL)
742 sp->sa_mode = nfs_xdrneg1;
743 else
744 sp->sa_mode = vtonfsv2_mode(vp->v_type, vap->va_mode);
745 if (vap->va_uid == (uid_t)VNOVAL)
746 sp->sa_uid = nfs_xdrneg1;
747 else
748 sp->sa_uid = txdr_unsigned(vap->va_uid);
749 if (vap->va_gid == (gid_t)VNOVAL)
750 sp->sa_gid = nfs_xdrneg1;
751 else
752 sp->sa_gid = txdr_unsigned(vap->va_gid);
753 sp->sa_size = txdr_unsigned(vap->va_size);
754 txdr_nfsv2time(&vap->va_atime, &sp->sa_atime);
755 txdr_nfsv2time(&vap->va_mtime, &sp->sa_mtime);
756 #ifndef NFS_V2_ONLY
757 }
758 #endif
759 nfsm_request(np, NFSPROC_SETATTR, l, cred);
760 #ifndef NFS_V2_ONLY
761 if (v3) {
762 nfsm_wcc_data(vp, wccflag, NAC_NOTRUNC, FALSE);
763 } else
764 #endif
765 nfsm_loadattr(vp, (struct vattr *)0, NAC_NOTRUNC);
766 nfsm_reqdone;
767 return (error);
768 }
769
770 /*
771 * nfs lookup call, one step at a time...
772 * First look in cache
773 * If not found, unlock the directory nfsnode and do the rpc
774 *
775 * This code is full of lock/unlock statements and checks, because
776 * we continue after cache_lookup has finished (we need to check
777 * with the attr cache and do an rpc if it has timed out). This means
778 * that the locking effects of cache_lookup have to be taken into
779 * account.
780 */
781 int
782 nfs_lookup(v)
783 void *v;
784 {
785 struct vop_lookup_args /* {
786 struct vnodeop_desc *a_desc;
787 struct vnode *a_dvp;
788 struct vnode **a_vpp;
789 struct componentname *a_cnp;
790 } */ *ap = v;
791 struct componentname *cnp = ap->a_cnp;
792 struct vnode *dvp = ap->a_dvp;
793 struct vnode **vpp = ap->a_vpp;
794 int flags;
795 struct vnode *newvp;
796 u_int32_t *tl;
797 caddr_t cp;
798 int32_t t1, t2;
799 caddr_t bpos, dpos, cp2;
800 struct mbuf *mreq, *mrep, *md, *mb;
801 long len;
802 nfsfh_t *fhp;
803 struct nfsnode *np;
804 int error = 0, attrflag, fhsize;
805 const int v3 = NFS_ISV3(dvp);
806
807 flags = cnp->cn_flags;
808
809 *vpp = NULLVP;
810 newvp = NULLVP;
811 if ((flags & ISLASTCN) && (dvp->v_mount->mnt_flag & MNT_RDONLY) &&
812 (cnp->cn_nameiop == DELETE || cnp->cn_nameiop == RENAME))
813 return (EROFS);
814 if (dvp->v_type != VDIR)
815 return (ENOTDIR);
816
817 /*
818 * RFC1813(nfsv3) 3.2 says clients should handle "." by themselves.
819 */
820 if (cnp->cn_namelen == 1 && cnp->cn_nameptr[0] == '.') {
821 error = VOP_ACCESS(dvp, VEXEC, cnp->cn_cred, cnp->cn_lwp);
822 if (error)
823 return error;
824 if (cnp->cn_nameiop == RENAME && (flags & ISLASTCN))
825 return EISDIR;
826 VREF(dvp);
827 *vpp = dvp;
828 if (cnp->cn_nameiop != LOOKUP && (flags & ISLASTCN))
829 cnp->cn_flags |= SAVENAME;
830 return 0;
831 }
832
833 np = VTONFS(dvp);
834
835 /*
836 * Before tediously performing a linear scan of the directory,
837 * check the name cache to see if the directory/name pair
838 * we are looking for is known already.
839 * If the directory/name pair is found in the name cache,
840 * we have to ensure the directory has not changed from
841 * the time the cache entry has been created. If it has,
842 * the cache entry has to be ignored.
843 */
844 error = cache_lookup_raw(dvp, vpp, cnp);
845 KASSERT(dvp != *vpp);
846 if (error >= 0) {
847 struct vattr vattr;
848 int err2;
849
850 if (error && error != ENOENT) {
851 *vpp = NULLVP;
852 return error;
853 }
854
855 err2 = VOP_ACCESS(dvp, VEXEC, cnp->cn_cred, cnp->cn_lwp);
856 if (err2 != 0) {
857 if (error == 0)
858 vrele(*vpp);
859 *vpp = NULLVP;
860 return err2;
861 }
862
863 if (VOP_GETATTR(dvp, &vattr, cnp->cn_cred,
864 cnp->cn_lwp) || timespeccmp(&vattr.va_mtime,
865 &VTONFS(dvp)->n_nctime, !=)) {
866 if (error == 0) {
867 vrele(*vpp);
868 *vpp = NULLVP;
869 }
870 cache_purge1(dvp, NULL, PURGE_CHILDREN);
871 timespecclear(&np->n_nctime);
872 goto dorpc;
873 }
874
875 if (error == ENOENT) {
876 goto noentry;
877 }
878
879 newvp = *vpp;
880 if (!VOP_GETATTR(newvp, &vattr, cnp->cn_cred, cnp->cn_lwp)
881 && vattr.va_ctime.tv_sec == VTONFS(newvp)->n_ctime) {
882 nfsstats.lookupcache_hits++;
883 if ((flags & ISDOTDOT) != 0) {
884 VOP_UNLOCK(dvp, 0);
885 }
886 error = vn_lock(newvp, LK_EXCLUSIVE);
887 if ((flags & ISDOTDOT) != 0) {
888 vn_lock(dvp, LK_EXCLUSIVE | LK_RETRY);
889 }
890 if (error) {
891 /* newvp has been revoked. */
892 vrele(newvp);
893 *vpp = NULL;
894 return error;
895 }
896 if (cnp->cn_nameiop != LOOKUP && (flags & ISLASTCN))
897 cnp->cn_flags |= SAVENAME;
898 KASSERT(newvp->v_type != VNON);
899 return (0);
900 }
901 cache_purge1(newvp, NULL, PURGE_PARENTS);
902 vrele(newvp);
903 *vpp = NULLVP;
904 }
905 dorpc:
906 #if 0
907 /*
908 * because nfsv3 has the same CREATE semantics as ours,
909 * we don't have to perform LOOKUPs beforehand.
910 *
911 * XXX ideally we can do the same for nfsv2 in the case of !O_EXCL.
912 * XXX although we have no way to know if O_EXCL is requested or not.
913 */
914
915 if (v3 && cnp->cn_nameiop == CREATE &&
916 (flags & (ISLASTCN|ISDOTDOT)) == ISLASTCN &&
917 (dvp->v_mount->mnt_flag & MNT_RDONLY) == 0) {
918 cnp->cn_flags |= SAVENAME;
919 return (EJUSTRETURN);
920 }
921 #endif /* 0 */
922
923 error = 0;
924 newvp = NULLVP;
925 nfsstats.lookupcache_misses++;
926 nfsstats.rpccnt[NFSPROC_LOOKUP]++;
927 len = cnp->cn_namelen;
928 nfsm_reqhead(np, NFSPROC_LOOKUP,
929 NFSX_FH(v3) + NFSX_UNSIGNED + nfsm_rndup(len));
930 nfsm_fhtom(np, v3);
931 nfsm_strtom(cnp->cn_nameptr, len, NFS_MAXNAMLEN);
932 nfsm_request(np, NFSPROC_LOOKUP, cnp->cn_lwp, cnp->cn_cred);
933 if (error) {
934 nfsm_postop_attr(dvp, attrflag, 0);
935 m_freem(mrep);
936 goto nfsmout;
937 }
938 nfsm_getfh(fhp, fhsize, v3);
939
940 /*
941 * Handle RENAME case...
942 */
943 if (cnp->cn_nameiop == RENAME && (flags & ISLASTCN)) {
944 if (NFS_CMPFH(np, fhp, fhsize)) {
945 m_freem(mrep);
946 return (EISDIR);
947 }
948 error = nfs_nget(dvp->v_mount, fhp, fhsize, &np);
949 if (error) {
950 m_freem(mrep);
951 return error;
952 }
953 newvp = NFSTOV(np);
954 #ifndef NFS_V2_ONLY
955 if (v3) {
956 nfsm_postop_attr(newvp, attrflag, 0);
957 nfsm_postop_attr(dvp, attrflag, 0);
958 } else
959 #endif
960 nfsm_loadattr(newvp, (struct vattr *)0, 0);
961 *vpp = newvp;
962 m_freem(mrep);
963 cnp->cn_flags |= SAVENAME;
964 goto validate;
965 }
966
967 /*
968 * The postop attr handling is duplicated for each if case,
969 * because it should be done while dvp is locked (unlocking
970 * dvp is different for each case).
971 */
972
973 if (NFS_CMPFH(np, fhp, fhsize)) {
974 /*
975 * "." lookup
976 */
977 VREF(dvp);
978 newvp = dvp;
979 #ifndef NFS_V2_ONLY
980 if (v3) {
981 nfsm_postop_attr(newvp, attrflag, 0);
982 nfsm_postop_attr(dvp, attrflag, 0);
983 } else
984 #endif
985 nfsm_loadattr(newvp, (struct vattr *)0, 0);
986 } else if (flags & ISDOTDOT) {
987 /*
988 * ".." lookup
989 */
990 VOP_UNLOCK(dvp, 0);
991 error = nfs_nget(dvp->v_mount, fhp, fhsize, &np);
992 vn_lock(dvp, LK_EXCLUSIVE | LK_RETRY);
993 if (error) {
994 m_freem(mrep);
995 return error;
996 }
997 newvp = NFSTOV(np);
998
999 #ifndef NFS_V2_ONLY
1000 if (v3) {
1001 nfsm_postop_attr(newvp, attrflag, 0);
1002 nfsm_postop_attr(dvp, attrflag, 0);
1003 } else
1004 #endif
1005 nfsm_loadattr(newvp, (struct vattr *)0, 0);
1006 } else {
1007 /*
1008 * Other lookups.
1009 */
1010 error = nfs_nget(dvp->v_mount, fhp, fhsize, &np);
1011 if (error) {
1012 m_freem(mrep);
1013 return error;
1014 }
1015 newvp = NFSTOV(np);
1016 #ifndef NFS_V2_ONLY
1017 if (v3) {
1018 nfsm_postop_attr(newvp, attrflag, 0);
1019 nfsm_postop_attr(dvp, attrflag, 0);
1020 } else
1021 #endif
1022 nfsm_loadattr(newvp, (struct vattr *)0, 0);
1023 }
1024 if (cnp->cn_nameiop != LOOKUP && (flags & ISLASTCN))
1025 cnp->cn_flags |= SAVENAME;
1026 if ((cnp->cn_flags & MAKEENTRY) &&
1027 (cnp->cn_nameiop != DELETE || !(flags & ISLASTCN))) {
1028 nfs_cache_enter(dvp, newvp, cnp);
1029 }
1030 *vpp = newvp;
1031 nfsm_reqdone;
1032 if (error) {
1033 /*
1034 * We get here only because of errors returned by
1035 * the RPC. Otherwise we'll have returned above
1036 * (the nfsm_* macros will jump to nfsm_reqdone
1037 * on error).
1038 */
1039 if (error == ENOENT && (cnp->cn_flags & MAKEENTRY) &&
1040 cnp->cn_nameiop != CREATE) {
1041 nfs_cache_enter(dvp, NULL, cnp);
1042 }
1043 if (newvp != NULLVP) {
1044 if (newvp == dvp) {
1045 vrele(newvp);
1046 } else {
1047 vput(newvp);
1048 }
1049 }
1050 noentry:
1051 if ((cnp->cn_nameiop == CREATE || cnp->cn_nameiop == RENAME) &&
1052 (flags & ISLASTCN) && error == ENOENT) {
1053 if (dvp->v_mount->mnt_flag & MNT_RDONLY) {
1054 error = EROFS;
1055 } else {
1056 error = EJUSTRETURN;
1057 cnp->cn_flags |= SAVENAME;
1058 }
1059 }
1060 *vpp = NULL;
1061 return error;
1062 }
1063
1064 validate:
1065 /*
1066 * make sure we have valid type and size.
1067 */
1068
1069 newvp = *vpp;
1070 if (newvp->v_type == VNON) {
1071 struct vattr vattr; /* dummy */
1072
1073 KASSERT(VTONFS(newvp)->n_attrstamp == 0);
1074 error = VOP_GETATTR(newvp, &vattr, cnp->cn_cred, cnp->cn_lwp);
1075 if (error) {
1076 vput(newvp);
1077 *vpp = NULL;
1078 }
1079 }
1080
1081 return error;
1082 }
1083
1084 /*
1085 * nfs read call.
1086 * Just call nfs_bioread() to do the work.
1087 */
1088 int
1089 nfs_read(v)
1090 void *v;
1091 {
1092 struct vop_read_args /* {
1093 struct vnode *a_vp;
1094 struct uio *a_uio;
1095 int a_ioflag;
1096 kauth_cred_t a_cred;
1097 } */ *ap = v;
1098 struct vnode *vp = ap->a_vp;
1099
1100 if (vp->v_type != VREG)
1101 return EISDIR;
1102 return (nfs_bioread(vp, ap->a_uio, ap->a_ioflag, ap->a_cred, 0));
1103 }
1104
1105 /*
1106 * nfs readlink call
1107 */
1108 int
1109 nfs_readlink(v)
1110 void *v;
1111 {
1112 struct vop_readlink_args /* {
1113 struct vnode *a_vp;
1114 struct uio *a_uio;
1115 kauth_cred_t a_cred;
1116 } */ *ap = v;
1117 struct vnode *vp = ap->a_vp;
1118 struct nfsnode *np = VTONFS(vp);
1119
1120 if (vp->v_type != VLNK)
1121 return (EPERM);
1122
1123 if (np->n_rcred != NULL) {
1124 kauth_cred_free(np->n_rcred);
1125 }
1126 np->n_rcred = ap->a_cred;
1127 kauth_cred_hold(np->n_rcred);
1128
1129 return (nfs_bioread(vp, ap->a_uio, 0, ap->a_cred, 0));
1130 }
1131
1132 /*
1133 * Do a readlink rpc.
1134 * Called by nfs_doio() from below the buffer cache.
1135 */
1136 int
1137 nfs_readlinkrpc(vp, uiop, cred)
1138 struct vnode *vp;
1139 struct uio *uiop;
1140 kauth_cred_t cred;
1141 {
1142 u_int32_t *tl;
1143 caddr_t cp;
1144 int32_t t1, t2;
1145 caddr_t bpos, dpos, cp2;
1146 int error = 0;
1147 uint32_t len;
1148 struct mbuf *mreq, *mrep, *md, *mb;
1149 const int v3 = NFS_ISV3(vp);
1150 struct nfsnode *np = VTONFS(vp);
1151 #ifndef NFS_V2_ONLY
1152 int attrflag;
1153 #endif
1154
1155 nfsstats.rpccnt[NFSPROC_READLINK]++;
1156 nfsm_reqhead(np, NFSPROC_READLINK, NFSX_FH(v3));
1157 nfsm_fhtom(np, v3);
1158 nfsm_request(np, NFSPROC_READLINK, curlwp, cred);
1159 #ifndef NFS_V2_ONLY
1160 if (v3)
1161 nfsm_postop_attr(vp, attrflag, 0);
1162 #endif
1163 if (!error) {
1164 #ifndef NFS_V2_ONLY
1165 if (v3) {
1166 nfsm_dissect(tl, uint32_t *, NFSX_UNSIGNED);
1167 len = fxdr_unsigned(uint32_t, *tl);
1168 if (len > MAXPATHLEN) {
1169 /*
1170 * this pathname is too long for us.
1171 */
1172 m_freem(mrep);
1173 /* Solaris returns EINVAL. should we follow? */
1174 error = ENAMETOOLONG;
1175 goto nfsmout;
1176 }
1177 } else
1178 #endif
1179 {
1180 nfsm_strsiz(len, NFS_MAXPATHLEN);
1181 }
1182 nfsm_mtouio(uiop, len);
1183 }
1184 nfsm_reqdone;
1185 return (error);
1186 }
1187
1188 /*
1189 * nfs read rpc call
1190 * Ditto above
1191 */
1192 int
1193 nfs_readrpc(vp, uiop)
1194 struct vnode *vp;
1195 struct uio *uiop;
1196 {
1197 u_int32_t *tl;
1198 caddr_t cp;
1199 int32_t t1, t2;
1200 caddr_t bpos, dpos, cp2;
1201 struct mbuf *mreq, *mrep, *md, *mb;
1202 struct nfsmount *nmp;
1203 int error = 0, len, retlen, tsiz, eof, byte_count;
1204 const int v3 = NFS_ISV3(vp);
1205 struct nfsnode *np = VTONFS(vp);
1206 #ifndef NFS_V2_ONLY
1207 int attrflag;
1208 #endif
1209
1210 #ifndef nolint
1211 eof = 0;
1212 #endif
1213 nmp = VFSTONFS(vp->v_mount);
1214 tsiz = uiop->uio_resid;
1215 if (uiop->uio_offset + tsiz > nmp->nm_maxfilesize)
1216 return (EFBIG);
1217 iostat_busy(nmp->nm_stats);
1218 byte_count = 0; /* count bytes actually transferred */
1219 while (tsiz > 0) {
1220 nfsstats.rpccnt[NFSPROC_READ]++;
1221 len = (tsiz > nmp->nm_rsize) ? nmp->nm_rsize : tsiz;
1222 nfsm_reqhead(np, NFSPROC_READ, NFSX_FH(v3) + NFSX_UNSIGNED * 3);
1223 nfsm_fhtom(np, v3);
1224 nfsm_build(tl, u_int32_t *, NFSX_UNSIGNED * 3);
1225 #ifndef NFS_V2_ONLY
1226 if (v3) {
1227 txdr_hyper(uiop->uio_offset, tl);
1228 *(tl + 2) = txdr_unsigned(len);
1229 } else
1230 #endif
1231 {
1232 *tl++ = txdr_unsigned(uiop->uio_offset);
1233 *tl++ = txdr_unsigned(len);
1234 *tl = 0;
1235 }
1236 nfsm_request(np, NFSPROC_READ, curlwp, np->n_rcred);
1237 #ifndef NFS_V2_ONLY
1238 if (v3) {
1239 nfsm_postop_attr(vp, attrflag, NAC_NOTRUNC);
1240 if (error) {
1241 m_freem(mrep);
1242 goto nfsmout;
1243 }
1244 nfsm_dissect(tl, u_int32_t *, 2 * NFSX_UNSIGNED);
1245 eof = fxdr_unsigned(int, *(tl + 1));
1246 } else
1247 #endif
1248 nfsm_loadattr(vp, (struct vattr *)0, NAC_NOTRUNC);
1249 nfsm_strsiz(retlen, nmp->nm_rsize);
1250 nfsm_mtouio(uiop, retlen);
1251 m_freem(mrep);
1252 tsiz -= retlen;
1253 byte_count += retlen;
1254 #ifndef NFS_V2_ONLY
1255 if (v3) {
1256 if (eof || retlen == 0)
1257 tsiz = 0;
1258 } else
1259 #endif
1260 if (retlen < len)
1261 tsiz = 0;
1262 }
1263 nfsmout:
1264 iostat_unbusy(nmp->nm_stats, byte_count, 1);
1265 return (error);
1266 }
1267
1268 struct nfs_writerpc_context {
1269 struct simplelock nwc_slock;
1270 volatile int nwc_mbufcount;
1271 };
1272
1273 /*
1274 * free mbuf used to refer protected pages while write rpc call.
1275 * called at splvm.
1276 */
1277 static void
1278 nfs_writerpc_extfree(struct mbuf *m, caddr_t tbuf, size_t size, void *arg)
1279 {
1280 struct nfs_writerpc_context *ctx = arg;
1281
1282 KASSERT(m != NULL);
1283 KASSERT(ctx != NULL);
1284 pool_cache_put(&mbpool_cache, m);
1285 simple_lock(&ctx->nwc_slock);
1286 if (--ctx->nwc_mbufcount == 0) {
1287 wakeup(ctx);
1288 }
1289 simple_unlock(&ctx->nwc_slock);
1290 }
1291
1292 /*
1293 * nfs write call
1294 */
1295 int
1296 nfs_writerpc(vp, uiop, iomode, pageprotected, stalewriteverfp)
1297 struct vnode *vp;
1298 struct uio *uiop;
1299 int *iomode;
1300 boolean_t pageprotected;
1301 boolean_t *stalewriteverfp;
1302 {
1303 u_int32_t *tl;
1304 caddr_t cp;
1305 int32_t t1, t2;
1306 caddr_t bpos, dpos;
1307 struct mbuf *mreq, *mrep, *md, *mb;
1308 struct nfsmount *nmp = VFSTONFS(vp->v_mount);
1309 int error = 0, len, tsiz, wccflag = NFSV3_WCCRATTR;
1310 const int v3 = NFS_ISV3(vp);
1311 int committed = NFSV3WRITE_FILESYNC;
1312 struct nfsnode *np = VTONFS(vp);
1313 struct nfs_writerpc_context ctx;
1314 int s, byte_count;
1315 struct lwp *l = NULL;
1316 size_t origresid;
1317 #ifndef NFS_V2_ONLY
1318 caddr_t cp2;
1319 int rlen, commit;
1320 #endif
1321
1322 simple_lock_init(&ctx.nwc_slock);
1323 ctx.nwc_mbufcount = 1;
1324
1325 if (vp->v_mount->mnt_flag & MNT_RDONLY) {
1326 panic("writerpc readonly vp %p", vp);
1327 }
1328
1329 #ifdef DIAGNOSTIC
1330 if (uiop->uio_iovcnt != 1)
1331 panic("nfs: writerpc iovcnt > 1");
1332 #endif
1333 tsiz = uiop->uio_resid;
1334 if (uiop->uio_offset + tsiz > nmp->nm_maxfilesize)
1335 return (EFBIG);
1336 if (pageprotected) {
1337 l = curlwp;
1338 PHOLD(l);
1339 }
1340 retry:
1341 origresid = uiop->uio_resid;
1342 KASSERT(origresid == uiop->uio_iov->iov_len);
1343 iostat_busy(nmp->nm_stats);
1344 byte_count = 0; /* count of bytes actually written */
1345 while (tsiz > 0) {
1346 uint32_t datalen; /* data bytes need to be allocated in mbuf */
1347 uint32_t backup;
1348 boolean_t stalewriteverf = FALSE;
1349
1350 nfsstats.rpccnt[NFSPROC_WRITE]++;
1351 len = min(tsiz, nmp->nm_wsize);
1352 datalen = pageprotected ? 0 : nfsm_rndup(len);
1353 nfsm_reqhead(np, NFSPROC_WRITE,
1354 NFSX_FH(v3) + 5 * NFSX_UNSIGNED + datalen);
1355 nfsm_fhtom(np, v3);
1356 #ifndef NFS_V2_ONLY
1357 if (v3) {
1358 nfsm_build(tl, u_int32_t *, 5 * NFSX_UNSIGNED);
1359 txdr_hyper(uiop->uio_offset, tl);
1360 tl += 2;
1361 *tl++ = txdr_unsigned(len);
1362 *tl++ = txdr_unsigned(*iomode);
1363 *tl = txdr_unsigned(len);
1364 } else
1365 #endif
1366 {
1367 u_int32_t x;
1368
1369 nfsm_build(tl, u_int32_t *, 4 * NFSX_UNSIGNED);
1370 /* Set both "begin" and "current" to non-garbage. */
1371 x = txdr_unsigned((u_int32_t)uiop->uio_offset);
1372 *tl++ = x; /* "begin offset" */
1373 *tl++ = x; /* "current offset" */
1374 x = txdr_unsigned(len);
1375 *tl++ = x; /* total to this offset */
1376 *tl = x; /* size of this write */
1377
1378 }
1379 if (pageprotected) {
1380 /*
1381 * since we know pages can't be modified during i/o,
1382 * no need to copy them for us.
1383 */
1384 struct mbuf *m;
1385 struct iovec *iovp = uiop->uio_iov;
1386
1387 m = m_get(M_WAIT, MT_DATA);
1388 MCLAIM(m, &nfs_mowner);
1389 MEXTADD(m, iovp->iov_base, len, M_MBUF,
1390 nfs_writerpc_extfree, &ctx);
1391 m->m_flags |= M_EXT_ROMAP;
1392 m->m_len = len;
1393 mb->m_next = m;
1394 /*
1395 * no need to maintain mb and bpos here
1396 * because no one care them later.
1397 */
1398 #if 0
1399 mb = m;
1400 bpos = mtod(caddr_t, mb) + mb->m_len;
1401 #endif
1402 UIO_ADVANCE(uiop, len);
1403 uiop->uio_offset += len;
1404 s = splvm();
1405 simple_lock(&ctx.nwc_slock);
1406 ctx.nwc_mbufcount++;
1407 simple_unlock(&ctx.nwc_slock);
1408 splx(s);
1409 nfs_zeropad(mb, 0, nfsm_padlen(len));
1410 } else {
1411 nfsm_uiotom(uiop, len);
1412 }
1413 nfsm_request(np, NFSPROC_WRITE, curlwp, np->n_wcred);
1414 #ifndef NFS_V2_ONLY
1415 if (v3) {
1416 wccflag = NFSV3_WCCCHK;
1417 nfsm_wcc_data(vp, wccflag, NAC_NOTRUNC, !error);
1418 if (!error) {
1419 nfsm_dissect(tl, u_int32_t *, 2 * NFSX_UNSIGNED
1420 + NFSX_V3WRITEVERF);
1421 rlen = fxdr_unsigned(int, *tl++);
1422 if (rlen == 0) {
1423 error = NFSERR_IO;
1424 m_freem(mrep);
1425 break;
1426 } else if (rlen < len) {
1427 backup = len - rlen;
1428 UIO_ADVANCE(uiop, -backup);
1429 uiop->uio_offset -= backup;
1430 len = rlen;
1431 }
1432 commit = fxdr_unsigned(int, *tl++);
1433
1434 /*
1435 * Return the lowest committment level
1436 * obtained by any of the RPCs.
1437 */
1438 if (committed == NFSV3WRITE_FILESYNC)
1439 committed = commit;
1440 else if (committed == NFSV3WRITE_DATASYNC &&
1441 commit == NFSV3WRITE_UNSTABLE)
1442 committed = commit;
1443 simple_lock(&nmp->nm_slock);
1444 if ((nmp->nm_iflag & NFSMNT_HASWRITEVERF) == 0){
1445 memcpy(nmp->nm_writeverf, tl,
1446 NFSX_V3WRITEVERF);
1447 nmp->nm_iflag |= NFSMNT_HASWRITEVERF;
1448 } else if ((nmp->nm_iflag &
1449 NFSMNT_STALEWRITEVERF) ||
1450 memcmp(tl, nmp->nm_writeverf,
1451 NFSX_V3WRITEVERF)) {
1452 memcpy(nmp->nm_writeverf, tl,
1453 NFSX_V3WRITEVERF);
1454 /*
1455 * note NFSMNT_STALEWRITEVERF
1456 * if we're the first thread to
1457 * notice it.
1458 */
1459 if ((nmp->nm_iflag &
1460 NFSMNT_STALEWRITEVERF) == 0) {
1461 stalewriteverf = TRUE;
1462 nmp->nm_iflag |=
1463 NFSMNT_STALEWRITEVERF;
1464 }
1465 }
1466 simple_unlock(&nmp->nm_slock);
1467 }
1468 } else
1469 #endif
1470 nfsm_loadattr(vp, (struct vattr *)0, NAC_NOTRUNC);
1471 if (wccflag)
1472 VTONFS(vp)->n_mtime = VTONFS(vp)->n_vattr->va_mtime;
1473 m_freem(mrep);
1474 if (error)
1475 break;
1476 tsiz -= len;
1477 byte_count += len;
1478 if (stalewriteverf) {
1479 *stalewriteverfp = TRUE;
1480 stalewriteverf = FALSE;
1481 if (committed == NFSV3WRITE_UNSTABLE &&
1482 len != origresid) {
1483 /*
1484 * if our write requests weren't atomic but
1485 * unstable, datas in previous iterations
1486 * might have already been lost now.
1487 * then, we should resend them to nfsd.
1488 */
1489 backup = origresid - tsiz;
1490 UIO_ADVANCE(uiop, -backup);
1491 uiop->uio_offset -= backup;
1492 tsiz = origresid;
1493 goto retry;
1494 }
1495 }
1496 }
1497 nfsmout:
1498 iostat_unbusy(nmp->nm_stats, byte_count, 0);
1499 if (pageprotected) {
1500 /*
1501 * wait until mbufs go away.
1502 * retransmitted mbufs can survive longer than rpc requests
1503 * themselves.
1504 */
1505 s = splvm();
1506 simple_lock(&ctx.nwc_slock);
1507 ctx.nwc_mbufcount--;
1508 while (ctx.nwc_mbufcount > 0) {
1509 ltsleep(&ctx, PRIBIO, "nfsmblk", 0, &ctx.nwc_slock);
1510 }
1511 simple_unlock(&ctx.nwc_slock);
1512 splx(s);
1513 PRELE(l);
1514 }
1515 *iomode = committed;
1516 if (error)
1517 uiop->uio_resid = tsiz;
1518 return (error);
1519 }
1520
1521 /*
1522 * nfs mknod rpc
1523 * For NFS v2 this is a kludge. Use a create rpc but with the IFMT bits of the
1524 * mode set to specify the file type and the size field for rdev.
1525 */
1526 int
1527 nfs_mknodrpc(dvp, vpp, cnp, vap)
1528 struct vnode *dvp;
1529 struct vnode **vpp;
1530 struct componentname *cnp;
1531 struct vattr *vap;
1532 {
1533 struct nfsv2_sattr *sp;
1534 u_int32_t *tl;
1535 caddr_t cp;
1536 int32_t t1, t2;
1537 struct vnode *newvp = (struct vnode *)0;
1538 struct nfsnode *dnp, *np;
1539 char *cp2;
1540 caddr_t bpos, dpos;
1541 int error = 0, wccflag = NFSV3_WCCRATTR, gotvp = 0;
1542 struct mbuf *mreq, *mrep, *md, *mb;
1543 u_int32_t rdev;
1544 const int v3 = NFS_ISV3(dvp);
1545
1546 if (vap->va_type == VCHR || vap->va_type == VBLK)
1547 rdev = txdr_unsigned(vap->va_rdev);
1548 else if (vap->va_type == VFIFO || vap->va_type == VSOCK)
1549 rdev = nfs_xdrneg1;
1550 else {
1551 VOP_ABORTOP(dvp, cnp);
1552 vput(dvp);
1553 return (EOPNOTSUPP);
1554 }
1555 nfsstats.rpccnt[NFSPROC_MKNOD]++;
1556 dnp = VTONFS(dvp);
1557 nfsm_reqhead(dnp, NFSPROC_MKNOD, NFSX_FH(v3) + 4 * NFSX_UNSIGNED +
1558 + nfsm_rndup(cnp->cn_namelen) + NFSX_SATTR(v3));
1559 nfsm_fhtom(dnp, v3);
1560 nfsm_strtom(cnp->cn_nameptr, cnp->cn_namelen, NFS_MAXNAMLEN);
1561 #ifndef NFS_V2_ONLY
1562 if (v3) {
1563 nfsm_build(tl, u_int32_t *, NFSX_UNSIGNED);
1564 *tl++ = vtonfsv3_type(vap->va_type);
1565 nfsm_v3attrbuild(vap, FALSE);
1566 if (vap->va_type == VCHR || vap->va_type == VBLK) {
1567 nfsm_build(tl, u_int32_t *, 2 * NFSX_UNSIGNED);
1568 *tl++ = txdr_unsigned(major(vap->va_rdev));
1569 *tl = txdr_unsigned(minor(vap->va_rdev));
1570 }
1571 } else
1572 #endif
1573 {
1574 nfsm_build(sp, struct nfsv2_sattr *, NFSX_V2SATTR);
1575 sp->sa_mode = vtonfsv2_mode(vap->va_type, vap->va_mode);
1576 sp->sa_uid = nfs_xdrneg1;
1577 sp->sa_gid = nfs_xdrneg1;
1578 sp->sa_size = rdev;
1579 txdr_nfsv2time(&vap->va_atime, &sp->sa_atime);
1580 txdr_nfsv2time(&vap->va_mtime, &sp->sa_mtime);
1581 }
1582 nfsm_request(dnp, NFSPROC_MKNOD, cnp->cn_lwp, cnp->cn_cred);
1583 if (!error) {
1584 nfsm_mtofh(dvp, newvp, v3, gotvp);
1585 if (!gotvp) {
1586 error = nfs_lookitup(dvp, cnp->cn_nameptr,
1587 cnp->cn_namelen, cnp->cn_cred, cnp->cn_lwp, &np);
1588 if (!error)
1589 newvp = NFSTOV(np);
1590 }
1591 }
1592 #ifndef NFS_V2_ONLY
1593 if (v3)
1594 nfsm_wcc_data(dvp, wccflag, 0, !error);
1595 #endif
1596 nfsm_reqdone;
1597 if (error) {
1598 if (newvp)
1599 vput(newvp);
1600 } else {
1601 if (cnp->cn_flags & MAKEENTRY)
1602 nfs_cache_enter(dvp, newvp, cnp);
1603 *vpp = newvp;
1604 }
1605 PNBUF_PUT(cnp->cn_pnbuf);
1606 VTONFS(dvp)->n_flag |= NMODIFIED;
1607 if (!wccflag)
1608 NFS_INVALIDATE_ATTRCACHE(VTONFS(dvp));
1609 vput(dvp);
1610 return (error);
1611 }
1612
1613 /*
1614 * nfs mknod vop
1615 * just call nfs_mknodrpc() to do the work.
1616 */
1617 /* ARGSUSED */
1618 int
1619 nfs_mknod(v)
1620 void *v;
1621 {
1622 struct vop_mknod_args /* {
1623 struct vnode *a_dvp;
1624 struct vnode **a_vpp;
1625 struct componentname *a_cnp;
1626 struct vattr *a_vap;
1627 } */ *ap = v;
1628 struct vnode *dvp = ap->a_dvp;
1629 struct componentname *cnp = ap->a_cnp;
1630 int error;
1631
1632 error = nfs_mknodrpc(dvp, ap->a_vpp, cnp, ap->a_vap);
1633 VN_KNOTE(dvp, NOTE_WRITE);
1634 if (error == 0 || error == EEXIST)
1635 cache_purge1(dvp, cnp, 0);
1636 return (error);
1637 }
1638
1639 #ifndef NFS_V2_ONLY
1640 static u_long create_verf;
1641 #endif
1642 /*
1643 * nfs file create call
1644 */
1645 int
1646 nfs_create(v)
1647 void *v;
1648 {
1649 struct vop_create_args /* {
1650 struct vnode *a_dvp;
1651 struct vnode **a_vpp;
1652 struct componentname *a_cnp;
1653 struct vattr *a_vap;
1654 } */ *ap = v;
1655 struct vnode *dvp = ap->a_dvp;
1656 struct vattr *vap = ap->a_vap;
1657 struct componentname *cnp = ap->a_cnp;
1658 struct nfsv2_sattr *sp;
1659 u_int32_t *tl;
1660 caddr_t cp;
1661 int32_t t1, t2;
1662 struct nfsnode *dnp, *np = (struct nfsnode *)0;
1663 struct vnode *newvp = (struct vnode *)0;
1664 caddr_t bpos, dpos, cp2;
1665 int error, wccflag = NFSV3_WCCRATTR, gotvp = 0, fmode = 0;
1666 struct mbuf *mreq, *mrep, *md, *mb;
1667 const int v3 = NFS_ISV3(dvp);
1668
1669 /*
1670 * Oops, not for me..
1671 */
1672 if (vap->va_type == VSOCK)
1673 return (nfs_mknodrpc(dvp, ap->a_vpp, cnp, vap));
1674
1675 KASSERT(vap->va_type == VREG);
1676
1677 #ifdef VA_EXCLUSIVE
1678 if (vap->va_vaflags & VA_EXCLUSIVE)
1679 fmode |= O_EXCL;
1680 #endif
1681 again:
1682 error = 0;
1683 nfsstats.rpccnt[NFSPROC_CREATE]++;
1684 dnp = VTONFS(dvp);
1685 nfsm_reqhead(dnp, NFSPROC_CREATE, NFSX_FH(v3) + 2 * NFSX_UNSIGNED +
1686 nfsm_rndup(cnp->cn_namelen) + NFSX_SATTR(v3));
1687 nfsm_fhtom(dnp, v3);
1688 nfsm_strtom(cnp->cn_nameptr, cnp->cn_namelen, NFS_MAXNAMLEN);
1689 #ifndef NFS_V2_ONLY
1690 if (v3) {
1691 nfsm_build(tl, u_int32_t *, NFSX_UNSIGNED);
1692 if (fmode & O_EXCL) {
1693 *tl = txdr_unsigned(NFSV3CREATE_EXCLUSIVE);
1694 nfsm_build(tl, u_int32_t *, NFSX_V3CREATEVERF);
1695 #ifdef INET
1696 if (TAILQ_FIRST(&in_ifaddrhead))
1697 *tl++ = TAILQ_FIRST(&in_ifaddrhead)->
1698 ia_addr.sin_addr.s_addr;
1699 else
1700 *tl++ = create_verf;
1701 #else
1702 *tl++ = create_verf;
1703 #endif
1704 *tl = ++create_verf;
1705 } else {
1706 *tl = txdr_unsigned(NFSV3CREATE_UNCHECKED);
1707 nfsm_v3attrbuild(vap, FALSE);
1708 }
1709 } else
1710 #endif
1711 {
1712 nfsm_build(sp, struct nfsv2_sattr *, NFSX_V2SATTR);
1713 sp->sa_mode = vtonfsv2_mode(vap->va_type, vap->va_mode);
1714 sp->sa_uid = nfs_xdrneg1;
1715 sp->sa_gid = nfs_xdrneg1;
1716 sp->sa_size = 0;
1717 txdr_nfsv2time(&vap->va_atime, &sp->sa_atime);
1718 txdr_nfsv2time(&vap->va_mtime, &sp->sa_mtime);
1719 }
1720 nfsm_request(dnp, NFSPROC_CREATE, cnp->cn_lwp, cnp->cn_cred);
1721 if (!error) {
1722 nfsm_mtofh(dvp, newvp, v3, gotvp);
1723 if (!gotvp) {
1724 error = nfs_lookitup(dvp, cnp->cn_nameptr,
1725 cnp->cn_namelen, cnp->cn_cred, cnp->cn_lwp, &np);
1726 if (!error)
1727 newvp = NFSTOV(np);
1728 }
1729 }
1730 #ifndef NFS_V2_ONLY
1731 if (v3)
1732 nfsm_wcc_data(dvp, wccflag, 0, !error);
1733 #endif
1734 nfsm_reqdone;
1735 if (error) {
1736 /*
1737 * nfs_request maps NFSERR_NOTSUPP to ENOTSUP.
1738 */
1739 if (v3 && (fmode & O_EXCL) && error == ENOTSUP) {
1740 fmode &= ~O_EXCL;
1741 goto again;
1742 }
1743 } else if (v3 && (fmode & O_EXCL)) {
1744 struct timespec ts;
1745
1746 getnanotime(&ts);
1747
1748 /*
1749 * make sure that we'll update timestamps as
1750 * most server implementations use them to store
1751 * the create verifier.
1752 *
1753 * XXX it's better to use TOSERVER always.
1754 */
1755
1756 if (vap->va_atime.tv_sec == VNOVAL)
1757 vap->va_atime = ts;
1758 if (vap->va_mtime.tv_sec == VNOVAL)
1759 vap->va_mtime = ts;
1760
1761 error = nfs_setattrrpc(newvp, vap, cnp->cn_cred, cnp->cn_lwp);
1762 }
1763 if (error == 0) {
1764 if (cnp->cn_flags & MAKEENTRY)
1765 nfs_cache_enter(dvp, newvp, cnp);
1766 else
1767 cache_purge1(dvp, cnp, 0);
1768 *ap->a_vpp = newvp;
1769 } else {
1770 if (newvp)
1771 vput(newvp);
1772 if (error == EEXIST)
1773 cache_purge1(dvp, cnp, 0);
1774 }
1775 PNBUF_PUT(cnp->cn_pnbuf);
1776 VTONFS(dvp)->n_flag |= NMODIFIED;
1777 if (!wccflag)
1778 NFS_INVALIDATE_ATTRCACHE(VTONFS(dvp));
1779 VN_KNOTE(ap->a_dvp, NOTE_WRITE);
1780 vput(dvp);
1781 return (error);
1782 }
1783
1784 /*
1785 * nfs file remove call
1786 * To try and make nfs semantics closer to ufs semantics, a file that has
1787 * other processes using the vnode is renamed instead of removed and then
1788 * removed later on the last close.
1789 * - If v_usecount > 1
1790 * If a rename is not already in the works
1791 * call nfs_sillyrename() to set it up
1792 * else
1793 * do the remove rpc
1794 */
1795 int
1796 nfs_remove(v)
1797 void *v;
1798 {
1799 struct vop_remove_args /* {
1800 struct vnodeop_desc *a_desc;
1801 struct vnode * a_dvp;
1802 struct vnode * a_vp;
1803 struct componentname * a_cnp;
1804 } */ *ap = v;
1805 struct vnode *vp = ap->a_vp;
1806 struct vnode *dvp = ap->a_dvp;
1807 struct componentname *cnp = ap->a_cnp;
1808 struct nfsnode *np = VTONFS(vp);
1809 int error = 0;
1810 struct vattr vattr;
1811
1812 #ifndef DIAGNOSTIC
1813 if ((cnp->cn_flags & HASBUF) == 0)
1814 panic("nfs_remove: no name");
1815 if (vp->v_usecount < 1)
1816 panic("nfs_remove: bad v_usecount");
1817 #endif
1818 if (vp->v_type == VDIR)
1819 error = EPERM;
1820 else if (vp->v_usecount == 1 || (np->n_sillyrename &&
1821 VOP_GETATTR(vp, &vattr, cnp->cn_cred, cnp->cn_lwp) == 0 &&
1822 vattr.va_nlink > 1)) {
1823 /*
1824 * Purge the name cache so that the chance of a lookup for
1825 * the name succeeding while the remove is in progress is
1826 * minimized. Without node locking it can still happen, such
1827 * that an I/O op returns ESTALE, but since you get this if
1828 * another host removes the file..
1829 */
1830 cache_purge(vp);
1831 /*
1832 * throw away biocache buffers, mainly to avoid
1833 * unnecessary delayed writes later.
1834 */
1835 error = nfs_vinvalbuf(vp, 0, cnp->cn_cred, cnp->cn_lwp, 1);
1836 /* Do the rpc */
1837 if (error != EINTR)
1838 error = nfs_removerpc(dvp, cnp->cn_nameptr,
1839 cnp->cn_namelen, cnp->cn_cred, cnp->cn_lwp);
1840 } else if (!np->n_sillyrename)
1841 error = nfs_sillyrename(dvp, vp, cnp, FALSE);
1842 PNBUF_PUT(cnp->cn_pnbuf);
1843 if (!error && nfs_getattrcache(vp, &vattr) == 0 &&
1844 vattr.va_nlink == 1) {
1845 np->n_flag |= NREMOVED;
1846 }
1847 NFS_INVALIDATE_ATTRCACHE(np);
1848 VN_KNOTE(vp, NOTE_DELETE);
1849 VN_KNOTE(dvp, NOTE_WRITE);
1850 if (dvp == vp)
1851 vrele(vp);
1852 else
1853 vput(vp);
1854 vput(dvp);
1855 return (error);
1856 }
1857
1858 /*
1859 * nfs file remove rpc called from nfs_inactive
1860 */
1861 int
1862 nfs_removeit(sp)
1863 struct sillyrename *sp;
1864 {
1865
1866 return (nfs_removerpc(sp->s_dvp, sp->s_name, sp->s_namlen, sp->s_cred,
1867 (struct lwp *)0));
1868 }
1869
1870 /*
1871 * Nfs remove rpc, called from nfs_remove() and nfs_removeit().
1872 */
1873 int
1874 nfs_removerpc(dvp, name, namelen, cred, l)
1875 struct vnode *dvp;
1876 const char *name;
1877 int namelen;
1878 kauth_cred_t cred;
1879 struct lwp *l;
1880 {
1881 u_int32_t *tl;
1882 caddr_t cp;
1883 #ifndef NFS_V2_ONLY
1884 int32_t t1;
1885 caddr_t cp2;
1886 #endif
1887 int32_t t2;
1888 caddr_t bpos, dpos;
1889 int error = 0, wccflag = NFSV3_WCCRATTR;
1890 struct mbuf *mreq, *mrep, *md, *mb;
1891 const int v3 = NFS_ISV3(dvp);
1892 int rexmit = 0;
1893 struct nfsnode *dnp = VTONFS(dvp);
1894
1895 nfsstats.rpccnt[NFSPROC_REMOVE]++;
1896 nfsm_reqhead(dnp, NFSPROC_REMOVE,
1897 NFSX_FH(v3) + NFSX_UNSIGNED + nfsm_rndup(namelen));
1898 nfsm_fhtom(dnp, v3);
1899 nfsm_strtom(name, namelen, NFS_MAXNAMLEN);
1900 nfsm_request1(dnp, NFSPROC_REMOVE, l, cred, &rexmit);
1901 #ifndef NFS_V2_ONLY
1902 if (v3)
1903 nfsm_wcc_data(dvp, wccflag, 0, !error);
1904 #endif
1905 nfsm_reqdone;
1906 VTONFS(dvp)->n_flag |= NMODIFIED;
1907 if (!wccflag)
1908 NFS_INVALIDATE_ATTRCACHE(VTONFS(dvp));
1909 /*
1910 * Kludge City: If the first reply to the remove rpc is lost..
1911 * the reply to the retransmitted request will be ENOENT
1912 * since the file was in fact removed
1913 * Therefore, we cheat and return success.
1914 */
1915 if (rexmit && error == ENOENT)
1916 error = 0;
1917 return (error);
1918 }
1919
1920 /*
1921 * nfs file rename call
1922 */
1923 int
1924 nfs_rename(v)
1925 void *v;
1926 {
1927 struct vop_rename_args /* {
1928 struct vnode *a_fdvp;
1929 struct vnode *a_fvp;
1930 struct componentname *a_fcnp;
1931 struct vnode *a_tdvp;
1932 struct vnode *a_tvp;
1933 struct componentname *a_tcnp;
1934 } */ *ap = v;
1935 struct vnode *fvp = ap->a_fvp;
1936 struct vnode *tvp = ap->a_tvp;
1937 struct vnode *fdvp = ap->a_fdvp;
1938 struct vnode *tdvp = ap->a_tdvp;
1939 struct componentname *tcnp = ap->a_tcnp;
1940 struct componentname *fcnp = ap->a_fcnp;
1941 int error;
1942
1943 #ifndef DIAGNOSTIC
1944 if ((tcnp->cn_flags & HASBUF) == 0 ||
1945 (fcnp->cn_flags & HASBUF) == 0)
1946 panic("nfs_rename: no name");
1947 #endif
1948 /* Check for cross-device rename */
1949 if ((fvp->v_mount != tdvp->v_mount) ||
1950 (tvp && (fvp->v_mount != tvp->v_mount))) {
1951 error = EXDEV;
1952 goto out;
1953 }
1954
1955 /*
1956 * If the tvp exists and is in use, sillyrename it before doing the
1957 * rename of the new file over it.
1958 *
1959 * Have sillyrename use link instead of rename if possible,
1960 * so that we don't lose the file if the rename fails, and so
1961 * that there's no window when the "to" file doesn't exist.
1962 */
1963 if (tvp && tvp->v_usecount > 1 && !VTONFS(tvp)->n_sillyrename &&
1964 tvp->v_type != VDIR && !nfs_sillyrename(tdvp, tvp, tcnp, TRUE)) {
1965 VN_KNOTE(tvp, NOTE_DELETE);
1966 vput(tvp);
1967 tvp = NULL;
1968 }
1969
1970 error = nfs_renamerpc(fdvp, fcnp->cn_nameptr, fcnp->cn_namelen,
1971 tdvp, tcnp->cn_nameptr, tcnp->cn_namelen, tcnp->cn_cred,
1972 tcnp->cn_lwp);
1973
1974 VN_KNOTE(fdvp, NOTE_WRITE);
1975 VN_KNOTE(tdvp, NOTE_WRITE);
1976 if (error == 0 || error == EEXIST) {
1977 if (fvp->v_type == VDIR)
1978 cache_purge(fvp);
1979 else
1980 cache_purge1(fdvp, fcnp, 0);
1981 if (tvp != NULL && tvp->v_type == VDIR)
1982 cache_purge(tvp);
1983 else
1984 cache_purge1(tdvp, tcnp, 0);
1985 }
1986 out:
1987 if (tdvp == tvp)
1988 vrele(tdvp);
1989 else
1990 vput(tdvp);
1991 if (tvp)
1992 vput(tvp);
1993 vrele(fdvp);
1994 vrele(fvp);
1995 return (error);
1996 }
1997
1998 /*
1999 * nfs file rename rpc called from nfs_remove() above
2000 */
2001 int
2002 nfs_renameit(sdvp, scnp, sp)
2003 struct vnode *sdvp;
2004 struct componentname *scnp;
2005 struct sillyrename *sp;
2006 {
2007 return (nfs_renamerpc(sdvp, scnp->cn_nameptr, scnp->cn_namelen,
2008 sdvp, sp->s_name, sp->s_namlen, scnp->cn_cred, scnp->cn_lwp));
2009 }
2010
2011 /*
2012 * Do an nfs rename rpc. Called from nfs_rename() and nfs_renameit().
2013 */
2014 int
2015 nfs_renamerpc(fdvp, fnameptr, fnamelen, tdvp, tnameptr, tnamelen, cred, l)
2016 struct vnode *fdvp;
2017 const char *fnameptr;
2018 int fnamelen;
2019 struct vnode *tdvp;
2020 const char *tnameptr;
2021 int tnamelen;
2022 kauth_cred_t cred;
2023 struct lwp *l;
2024 {
2025 u_int32_t *tl;
2026 caddr_t cp;
2027 #ifndef NFS_V2_ONLY
2028 int32_t t1;
2029 caddr_t cp2;
2030 #endif
2031 int32_t t2;
2032 caddr_t bpos, dpos;
2033 int error = 0, fwccflag = NFSV3_WCCRATTR, twccflag = NFSV3_WCCRATTR;
2034 struct mbuf *mreq, *mrep, *md, *mb;
2035 const int v3 = NFS_ISV3(fdvp);
2036 int rexmit = 0;
2037 struct nfsnode *fdnp = VTONFS(fdvp);
2038
2039 nfsstats.rpccnt[NFSPROC_RENAME]++;
2040 nfsm_reqhead(fdnp, NFSPROC_RENAME,
2041 (NFSX_FH(v3) + NFSX_UNSIGNED)*2 + nfsm_rndup(fnamelen) +
2042 nfsm_rndup(tnamelen));
2043 nfsm_fhtom(fdnp, v3);
2044 nfsm_strtom(fnameptr, fnamelen, NFS_MAXNAMLEN);
2045 nfsm_fhtom(VTONFS(tdvp), v3);
2046 nfsm_strtom(tnameptr, tnamelen, NFS_MAXNAMLEN);
2047 nfsm_request1(fdnp, NFSPROC_RENAME, l, cred, &rexmit);
2048 #ifndef NFS_V2_ONLY
2049 if (v3) {
2050 nfsm_wcc_data(fdvp, fwccflag, 0, !error);
2051 nfsm_wcc_data(tdvp, twccflag, 0, !error);
2052 }
2053 #endif
2054 nfsm_reqdone;
2055 VTONFS(fdvp)->n_flag |= NMODIFIED;
2056 VTONFS(tdvp)->n_flag |= NMODIFIED;
2057 if (!fwccflag)
2058 NFS_INVALIDATE_ATTRCACHE(VTONFS(fdvp));
2059 if (!twccflag)
2060 NFS_INVALIDATE_ATTRCACHE(VTONFS(tdvp));
2061 /*
2062 * Kludge: Map ENOENT => 0 assuming that it is a reply to a retry.
2063 */
2064 if (rexmit && error == ENOENT)
2065 error = 0;
2066 return (error);
2067 }
2068
2069 /*
2070 * NFS link RPC, called from nfs_link.
2071 * Assumes dvp and vp locked, and leaves them that way.
2072 */
2073
2074 static int
2075 nfs_linkrpc(struct vnode *dvp, struct vnode *vp, const char *name,
2076 size_t namelen, kauth_cred_t cred, struct lwp *l)
2077 {
2078 u_int32_t *tl;
2079 caddr_t cp;
2080 #ifndef NFS_V2_ONLY
2081 int32_t t1;
2082 caddr_t cp2;
2083 #endif
2084 int32_t t2;
2085 caddr_t bpos, dpos;
2086 int error = 0, wccflag = NFSV3_WCCRATTR, attrflag = 0;
2087 struct mbuf *mreq, *mrep, *md, *mb;
2088 const int v3 = NFS_ISV3(dvp);
2089 int rexmit = 0;
2090 struct nfsnode *np = VTONFS(vp);
2091
2092 nfsstats.rpccnt[NFSPROC_LINK]++;
2093 nfsm_reqhead(np, NFSPROC_LINK,
2094 NFSX_FH(v3)*2 + NFSX_UNSIGNED + nfsm_rndup(namelen));
2095 nfsm_fhtom(np, v3);
2096 nfsm_fhtom(VTONFS(dvp), v3);
2097 nfsm_strtom(name, namelen, NFS_MAXNAMLEN);
2098 nfsm_request1(np, NFSPROC_LINK, l, cred, &rexmit);
2099 #ifndef NFS_V2_ONLY
2100 if (v3) {
2101 nfsm_postop_attr(vp, attrflag, 0);
2102 nfsm_wcc_data(dvp, wccflag, 0, !error);
2103 }
2104 #endif
2105 nfsm_reqdone;
2106
2107 VTONFS(dvp)->n_flag |= NMODIFIED;
2108 if (!attrflag)
2109 NFS_INVALIDATE_ATTRCACHE(VTONFS(vp));
2110 if (!wccflag)
2111 NFS_INVALIDATE_ATTRCACHE(VTONFS(dvp));
2112
2113 /*
2114 * Kludge: Map EEXIST => 0 assuming that it is a reply to a retry.
2115 */
2116 if (rexmit && error == EEXIST)
2117 error = 0;
2118
2119 return error;
2120 }
2121
2122 /*
2123 * nfs hard link create call
2124 */
2125 int
2126 nfs_link(v)
2127 void *v;
2128 {
2129 struct vop_link_args /* {
2130 struct vnode *a_dvp;
2131 struct vnode *a_vp;
2132 struct componentname *a_cnp;
2133 } */ *ap = v;
2134 struct vnode *vp = ap->a_vp;
2135 struct vnode *dvp = ap->a_dvp;
2136 struct componentname *cnp = ap->a_cnp;
2137 int error = 0;
2138
2139 if (dvp->v_mount != vp->v_mount) {
2140 VOP_ABORTOP(dvp, cnp);
2141 vput(dvp);
2142 return (EXDEV);
2143 }
2144 if (dvp != vp) {
2145 error = vn_lock(vp, LK_EXCLUSIVE);
2146 if (error != 0) {
2147 VOP_ABORTOP(dvp, cnp);
2148 vput(dvp);
2149 return error;
2150 }
2151 }
2152
2153 /*
2154 * Push all writes to the server, so that the attribute cache
2155 * doesn't get "out of sync" with the server.
2156 * XXX There should be a better way!
2157 */
2158 VOP_FSYNC(vp, cnp->cn_cred, FSYNC_WAIT, 0, 0, cnp->cn_lwp);
2159
2160 error = nfs_linkrpc(dvp, vp, cnp->cn_nameptr, cnp->cn_namelen,
2161 cnp->cn_cred, cnp->cn_lwp);
2162
2163 if (error == 0)
2164 cache_purge1(dvp, cnp, 0);
2165 PNBUF_PUT(cnp->cn_pnbuf);
2166 if (dvp != vp)
2167 VOP_UNLOCK(vp, 0);
2168 VN_KNOTE(vp, NOTE_LINK);
2169 VN_KNOTE(dvp, NOTE_WRITE);
2170 vput(dvp);
2171 return (error);
2172 }
2173
2174 /*
2175 * nfs symbolic link create call
2176 */
2177 int
2178 nfs_symlink(v)
2179 void *v;
2180 {
2181 struct vop_symlink_args /* {
2182 struct vnode *a_dvp;
2183 struct vnode **a_vpp;
2184 struct componentname *a_cnp;
2185 struct vattr *a_vap;
2186 char *a_target;
2187 } */ *ap = v;
2188 struct vnode *dvp = ap->a_dvp;
2189 struct vattr *vap = ap->a_vap;
2190 struct componentname *cnp = ap->a_cnp;
2191 struct nfsv2_sattr *sp;
2192 u_int32_t *tl;
2193 caddr_t cp;
2194 int32_t t1, t2;
2195 caddr_t bpos, dpos, cp2;
2196 int slen, error = 0, wccflag = NFSV3_WCCRATTR, gotvp;
2197 struct mbuf *mreq, *mrep, *md, *mb;
2198 struct vnode *newvp = (struct vnode *)0;
2199 const int v3 = NFS_ISV3(dvp);
2200 int rexmit = 0;
2201 struct nfsnode *dnp = VTONFS(dvp);
2202
2203 *ap->a_vpp = NULL;
2204 nfsstats.rpccnt[NFSPROC_SYMLINK]++;
2205 slen = strlen(ap->a_target);
2206 nfsm_reqhead(dnp, NFSPROC_SYMLINK, NFSX_FH(v3) + 2*NFSX_UNSIGNED +
2207 nfsm_rndup(cnp->cn_namelen) + nfsm_rndup(slen) + NFSX_SATTR(v3));
2208 nfsm_fhtom(dnp, v3);
2209 nfsm_strtom(cnp->cn_nameptr, cnp->cn_namelen, NFS_MAXNAMLEN);
2210 #ifndef NFS_V2_ONlY
2211 if (v3)
2212 nfsm_v3attrbuild(vap, FALSE);
2213 #endif
2214 nfsm_strtom(ap->a_target, slen, NFS_MAXPATHLEN);
2215 #ifndef NFS_V2_ONlY
2216 if (!v3) {
2217 nfsm_build(sp, struct nfsv2_sattr *, NFSX_V2SATTR);
2218 sp->sa_mode = vtonfsv2_mode(VLNK, vap->va_mode);
2219 sp->sa_uid = nfs_xdrneg1;
2220 sp->sa_gid = nfs_xdrneg1;
2221 sp->sa_size = nfs_xdrneg1;
2222 txdr_nfsv2time(&vap->va_atime, &sp->sa_atime);
2223 txdr_nfsv2time(&vap->va_mtime, &sp->sa_mtime);
2224 }
2225 #endif
2226 nfsm_request1(dnp, NFSPROC_SYMLINK, cnp->cn_lwp, cnp->cn_cred,
2227 &rexmit);
2228 #ifndef NFS_V2_ONlY
2229 if (v3) {
2230 if (!error)
2231 nfsm_mtofh(dvp, newvp, v3, gotvp);
2232 nfsm_wcc_data(dvp, wccflag, 0, !error);
2233 }
2234 #endif
2235 nfsm_reqdone;
2236 /*
2237 * Kludge: Map EEXIST => 0 assuming that it is a reply to a retry.
2238 */
2239 if (rexmit && error == EEXIST)
2240 error = 0;
2241 if (error == 0 || error == EEXIST)
2242 cache_purge1(dvp, cnp, 0);
2243 if (error == 0 && newvp == NULL) {
2244 struct nfsnode *np = NULL;
2245
2246 error = nfs_lookitup(dvp, cnp->cn_nameptr, cnp->cn_namelen,
2247 cnp->cn_cred, cnp->cn_lwp, &np);
2248 if (error == 0)
2249 newvp = NFSTOV(np);
2250 }
2251 if (error) {
2252 if (newvp != NULL)
2253 vput(newvp);
2254 } else {
2255 *ap->a_vpp = newvp;
2256 }
2257 PNBUF_PUT(cnp->cn_pnbuf);
2258 VTONFS(dvp)->n_flag |= NMODIFIED;
2259 if (!wccflag)
2260 NFS_INVALIDATE_ATTRCACHE(VTONFS(dvp));
2261 VN_KNOTE(dvp, NOTE_WRITE);
2262 vput(dvp);
2263 return (error);
2264 }
2265
2266 /*
2267 * nfs make dir call
2268 */
2269 int
2270 nfs_mkdir(v)
2271 void *v;
2272 {
2273 struct vop_mkdir_args /* {
2274 struct vnode *a_dvp;
2275 struct vnode **a_vpp;
2276 struct componentname *a_cnp;
2277 struct vattr *a_vap;
2278 } */ *ap = v;
2279 struct vnode *dvp = ap->a_dvp;
2280 struct vattr *vap = ap->a_vap;
2281 struct componentname *cnp = ap->a_cnp;
2282 struct nfsv2_sattr *sp;
2283 u_int32_t *tl;
2284 caddr_t cp;
2285 int32_t t1, t2;
2286 int len;
2287 struct nfsnode *dnp = VTONFS(dvp), *np = (struct nfsnode *)0;
2288 struct vnode *newvp = (struct vnode *)0;
2289 caddr_t bpos, dpos, cp2;
2290 int error = 0, wccflag = NFSV3_WCCRATTR;
2291 int gotvp = 0;
2292 int rexmit = 0;
2293 struct mbuf *mreq, *mrep, *md, *mb;
2294 const int v3 = NFS_ISV3(dvp);
2295
2296 len = cnp->cn_namelen;
2297 nfsstats.rpccnt[NFSPROC_MKDIR]++;
2298 nfsm_reqhead(dnp, NFSPROC_MKDIR,
2299 NFSX_FH(v3) + NFSX_UNSIGNED + nfsm_rndup(len) + NFSX_SATTR(v3));
2300 nfsm_fhtom(dnp, v3);
2301 nfsm_strtom(cnp->cn_nameptr, len, NFS_MAXNAMLEN);
2302 #ifndef NFS_V2_ONLY
2303 if (v3) {
2304 nfsm_v3attrbuild(vap, FALSE);
2305 } else
2306 #endif
2307 {
2308 nfsm_build(sp, struct nfsv2_sattr *, NFSX_V2SATTR);
2309 sp->sa_mode = vtonfsv2_mode(VDIR, vap->va_mode);
2310 sp->sa_uid = nfs_xdrneg1;
2311 sp->sa_gid = nfs_xdrneg1;
2312 sp->sa_size = nfs_xdrneg1;
2313 txdr_nfsv2time(&vap->va_atime, &sp->sa_atime);
2314 txdr_nfsv2time(&vap->va_mtime, &sp->sa_mtime);
2315 }
2316 nfsm_request1(dnp, NFSPROC_MKDIR, cnp->cn_lwp, cnp->cn_cred, &rexmit);
2317 if (!error)
2318 nfsm_mtofh(dvp, newvp, v3, gotvp);
2319 if (v3)
2320 nfsm_wcc_data(dvp, wccflag, 0, !error);
2321 nfsm_reqdone;
2322 VTONFS(dvp)->n_flag |= NMODIFIED;
2323 if (!wccflag)
2324 NFS_INVALIDATE_ATTRCACHE(VTONFS(dvp));
2325 /*
2326 * Kludge: Map EEXIST => 0 assuming that you have a reply to a retry
2327 * if we can succeed in looking up the directory.
2328 */
2329 if ((rexmit && error == EEXIST) || (!error && !gotvp)) {
2330 if (newvp) {
2331 vput(newvp);
2332 newvp = (struct vnode *)0;
2333 }
2334 error = nfs_lookitup(dvp, cnp->cn_nameptr, len, cnp->cn_cred,
2335 cnp->cn_lwp, &np);
2336 if (!error) {
2337 newvp = NFSTOV(np);
2338 if (newvp->v_type != VDIR || newvp == dvp)
2339 error = EEXIST;
2340 }
2341 }
2342 if (error) {
2343 if (newvp) {
2344 if (dvp != newvp)
2345 vput(newvp);
2346 else
2347 vrele(newvp);
2348 }
2349 } else {
2350 VN_KNOTE(dvp, NOTE_WRITE | NOTE_LINK);
2351 if (cnp->cn_flags & MAKEENTRY)
2352 nfs_cache_enter(dvp, newvp, cnp);
2353 *ap->a_vpp = newvp;
2354 }
2355 PNBUF_PUT(cnp->cn_pnbuf);
2356 vput(dvp);
2357 return (error);
2358 }
2359
2360 /*
2361 * nfs remove directory call
2362 */
2363 int
2364 nfs_rmdir(v)
2365 void *v;
2366 {
2367 struct vop_rmdir_args /* {
2368 struct vnode *a_dvp;
2369 struct vnode *a_vp;
2370 struct componentname *a_cnp;
2371 } */ *ap = v;
2372 struct vnode *vp = ap->a_vp;
2373 struct vnode *dvp = ap->a_dvp;
2374 struct componentname *cnp = ap->a_cnp;
2375 u_int32_t *tl;
2376 caddr_t cp;
2377 #ifndef NFS_V2_ONLY
2378 int32_t t1;
2379 caddr_t cp2;
2380 #endif
2381 int32_t t2;
2382 caddr_t bpos, dpos;
2383 int error = 0, wccflag = NFSV3_WCCRATTR;
2384 int rexmit = 0;
2385 struct mbuf *mreq, *mrep, *md, *mb;
2386 const int v3 = NFS_ISV3(dvp);
2387 struct nfsnode *dnp;
2388
2389 if (dvp == vp) {
2390 vrele(dvp);
2391 vput(dvp);
2392 PNBUF_PUT(cnp->cn_pnbuf);
2393 return (EINVAL);
2394 }
2395 nfsstats.rpccnt[NFSPROC_RMDIR]++;
2396 dnp = VTONFS(dvp);
2397 nfsm_reqhead(dnp, NFSPROC_RMDIR,
2398 NFSX_FH(v3) + NFSX_UNSIGNED + nfsm_rndup(cnp->cn_namelen));
2399 nfsm_fhtom(dnp, v3);
2400 nfsm_strtom(cnp->cn_nameptr, cnp->cn_namelen, NFS_MAXNAMLEN);
2401 nfsm_request1(dnp, NFSPROC_RMDIR, cnp->cn_lwp, cnp->cn_cred, &rexmit);
2402 #ifndef NFS_V2_ONLY
2403 if (v3)
2404 nfsm_wcc_data(dvp, wccflag, 0, !error);
2405 #endif
2406 nfsm_reqdone;
2407 PNBUF_PUT(cnp->cn_pnbuf);
2408 VTONFS(dvp)->n_flag |= NMODIFIED;
2409 if (!wccflag)
2410 NFS_INVALIDATE_ATTRCACHE(VTONFS(dvp));
2411 VN_KNOTE(dvp, NOTE_WRITE | NOTE_LINK);
2412 VN_KNOTE(vp, NOTE_DELETE);
2413 cache_purge(vp);
2414 vput(vp);
2415 vput(dvp);
2416 /*
2417 * Kludge: Map ENOENT => 0 assuming that you have a reply to a retry.
2418 */
2419 if (rexmit && error == ENOENT)
2420 error = 0;
2421 return (error);
2422 }
2423
2424 /*
2425 * nfs readdir call
2426 */
2427 int
2428 nfs_readdir(v)
2429 void *v;
2430 {
2431 struct vop_readdir_args /* {
2432 struct vnode *a_vp;
2433 struct uio *a_uio;
2434 kauth_cred_t a_cred;
2435 int *a_eofflag;
2436 off_t **a_cookies;
2437 int *a_ncookies;
2438 } */ *ap = v;
2439 struct vnode *vp = ap->a_vp;
2440 struct uio *uio = ap->a_uio;
2441 struct nfsmount *nmp = VFSTONFS(vp->v_mount);
2442 char *base = uio->uio_iov->iov_base;
2443 int tresid, error;
2444 size_t count, lost;
2445 struct dirent *dp;
2446 off_t *cookies = NULL;
2447 int ncookies = 0, nc;
2448
2449 if (vp->v_type != VDIR)
2450 return (EPERM);
2451
2452 lost = uio->uio_resid & (NFS_DIRFRAGSIZ - 1);
2453 count = uio->uio_resid - lost;
2454 if (count <= 0)
2455 return (EINVAL);
2456
2457 /*
2458 * Call nfs_bioread() to do the real work.
2459 */
2460 tresid = uio->uio_resid = count;
2461 error = nfs_bioread(vp, uio, 0, ap->a_cred,
2462 ap->a_cookies ? NFSBIO_CACHECOOKIES : 0);
2463
2464 if (!error && ap->a_cookies) {
2465 ncookies = count / 16;
2466 cookies = malloc(sizeof (off_t) * ncookies, M_TEMP, M_WAITOK);
2467 *ap->a_cookies = cookies;
2468 }
2469
2470 if (!error && uio->uio_resid == tresid) {
2471 uio->uio_resid += lost;
2472 nfsstats.direofcache_misses++;
2473 if (ap->a_cookies)
2474 *ap->a_ncookies = 0;
2475 *ap->a_eofflag = 1;
2476 return (0);
2477 }
2478
2479 if (!error && ap->a_cookies) {
2480 /*
2481 * Only the NFS server and emulations use cookies, and they
2482 * load the directory block into system space, so we can
2483 * just look at it directly.
2484 */
2485 if (!VMSPACE_IS_KERNEL_P(uio->uio_vmspace) ||
2486 uio->uio_iovcnt != 1)
2487 panic("nfs_readdir: lost in space");
2488 for (nc = 0; ncookies-- &&
2489 base < (char *)uio->uio_iov->iov_base; nc++){
2490 dp = (struct dirent *) base;
2491 if (dp->d_reclen == 0)
2492 break;
2493 if (nmp->nm_flag & NFSMNT_XLATECOOKIE)
2494 *(cookies++) = (off_t)NFS_GETCOOKIE32(dp);
2495 else
2496 *(cookies++) = NFS_GETCOOKIE(dp);
2497 base += dp->d_reclen;
2498 }
2499 uio->uio_resid +=
2500 ((caddr_t)uio->uio_iov->iov_base - base);
2501 uio->uio_iov->iov_len +=
2502 ((caddr_t)uio->uio_iov->iov_base - base);
2503 uio->uio_iov->iov_base = base;
2504 *ap->a_ncookies = nc;
2505 }
2506
2507 uio->uio_resid += lost;
2508 *ap->a_eofflag = 0;
2509 return (error);
2510 }
2511
2512 /*
2513 * Readdir rpc call.
2514 * Called from below the buffer cache by nfs_doio().
2515 */
2516 int
2517 nfs_readdirrpc(vp, uiop, cred)
2518 struct vnode *vp;
2519 struct uio *uiop;
2520 kauth_cred_t cred;
2521 {
2522 int len, left;
2523 struct dirent *dp = NULL;
2524 u_int32_t *tl;
2525 caddr_t cp;
2526 int32_t t1, t2;
2527 caddr_t bpos, dpos, cp2;
2528 struct mbuf *mreq, *mrep, *md, *mb;
2529 struct nfsmount *nmp = VFSTONFS(vp->v_mount);
2530 struct nfsnode *dnp = VTONFS(vp);
2531 u_quad_t fileno;
2532 int error = 0, more_dirs = 1, blksiz = 0, bigenough = 1;
2533 #ifndef NFS_V2_ONLY
2534 int attrflag;
2535 #endif
2536 int nrpcs = 0, reclen;
2537 const int v3 = NFS_ISV3(vp);
2538
2539 #ifdef DIAGNOSTIC
2540 /*
2541 * Should be called from buffer cache, so only amount of
2542 * NFS_DIRBLKSIZ will be requested.
2543 */
2544 if (uiop->uio_iovcnt != 1 || uiop->uio_resid != NFS_DIRBLKSIZ)
2545 panic("nfs readdirrpc bad uio");
2546 #endif
2547
2548 /*
2549 * Loop around doing readdir rpc's of size nm_readdirsize
2550 * truncated to a multiple of NFS_DIRFRAGSIZ.
2551 * The stopping criteria is EOF or buffer full.
2552 */
2553 while (more_dirs && bigenough) {
2554 /*
2555 * Heuristic: don't bother to do another RPC to further
2556 * fill up this block if there is not much room left. (< 50%
2557 * of the readdir RPC size). This wastes some buffer space
2558 * but can save up to 50% in RPC calls.
2559 */
2560 if (nrpcs > 0 && uiop->uio_resid < (nmp->nm_readdirsize / 2)) {
2561 bigenough = 0;
2562 break;
2563 }
2564 nfsstats.rpccnt[NFSPROC_READDIR]++;
2565 nfsm_reqhead(dnp, NFSPROC_READDIR, NFSX_FH(v3) +
2566 NFSX_READDIR(v3));
2567 nfsm_fhtom(dnp, v3);
2568 #ifndef NFS_V2_ONLY
2569 if (v3) {
2570 nfsm_build(tl, u_int32_t *, 5 * NFSX_UNSIGNED);
2571 if (nmp->nm_iflag & NFSMNT_SWAPCOOKIE) {
2572 txdr_swapcookie3(uiop->uio_offset, tl);
2573 } else {
2574 txdr_cookie3(uiop->uio_offset, tl);
2575 }
2576 tl += 2;
2577 *tl++ = dnp->n_cookieverf.nfsuquad[0];
2578 *tl++ = dnp->n_cookieverf.nfsuquad[1];
2579 } else
2580 #endif
2581 {
2582 nfsm_build(tl, u_int32_t *, 2 * NFSX_UNSIGNED);
2583 *tl++ = txdr_unsigned(uiop->uio_offset);
2584 }
2585 *tl = txdr_unsigned(nmp->nm_readdirsize);
2586 nfsm_request(dnp, NFSPROC_READDIR, curlwp, cred);
2587 nrpcs++;
2588 #ifndef NFS_V2_ONLY
2589 if (v3) {
2590 nfsm_postop_attr(vp, attrflag, 0);
2591 if (!error) {
2592 nfsm_dissect(tl, u_int32_t *,
2593 2 * NFSX_UNSIGNED);
2594 dnp->n_cookieverf.nfsuquad[0] = *tl++;
2595 dnp->n_cookieverf.nfsuquad[1] = *tl;
2596 } else {
2597 m_freem(mrep);
2598 goto nfsmout;
2599 }
2600 }
2601 #endif
2602 nfsm_dissect(tl, u_int32_t *, NFSX_UNSIGNED);
2603 more_dirs = fxdr_unsigned(int, *tl);
2604
2605 /* loop thru the dir entries, doctoring them to 4bsd form */
2606 while (more_dirs && bigenough) {
2607 #ifndef NFS_V2_ONLY
2608 if (v3) {
2609 nfsm_dissect(tl, u_int32_t *,
2610 3 * NFSX_UNSIGNED);
2611 fileno = fxdr_hyper(tl);
2612 len = fxdr_unsigned(int, *(tl + 2));
2613 } else
2614 #endif
2615 {
2616 nfsm_dissect(tl, u_int32_t *,
2617 2 * NFSX_UNSIGNED);
2618 fileno = fxdr_unsigned(u_quad_t, *tl++);
2619 len = fxdr_unsigned(int, *tl);
2620 }
2621 if (len <= 0 || len > NFS_MAXNAMLEN) {
2622 error = EBADRPC;
2623 m_freem(mrep);
2624 goto nfsmout;
2625 }
2626 /* for cookie stashing */
2627 reclen = _DIRENT_RECLEN(dp, len) + 2 * sizeof(off_t);
2628 left = NFS_DIRFRAGSIZ - blksiz;
2629 if (reclen > left) {
2630 memset(uiop->uio_iov->iov_base, 0, left);
2631 dp->d_reclen += left;
2632 UIO_ADVANCE(uiop, left);
2633 blksiz = 0;
2634 NFS_STASHCOOKIE(dp, uiop->uio_offset);
2635 }
2636 if (reclen > uiop->uio_resid)
2637 bigenough = 0;
2638 if (bigenough) {
2639 int tlen;
2640
2641 dp = (struct dirent *)uiop->uio_iov->iov_base;
2642 dp->d_fileno = fileno;
2643 dp->d_namlen = len;
2644 dp->d_reclen = reclen;
2645 dp->d_type = DT_UNKNOWN;
2646 blksiz += reclen;
2647 if (blksiz == NFS_DIRFRAGSIZ)
2648 blksiz = 0;
2649 UIO_ADVANCE(uiop, DIRHDSIZ);
2650 nfsm_mtouio(uiop, len);
2651 tlen = reclen - (DIRHDSIZ + len);
2652 (void)memset(uiop->uio_iov->iov_base, 0, tlen);
2653 UIO_ADVANCE(uiop, tlen);
2654 } else
2655 nfsm_adv(nfsm_rndup(len));
2656 #ifndef NFS_V2_ONLY
2657 if (v3) {
2658 nfsm_dissect(tl, u_int32_t *,
2659 3 * NFSX_UNSIGNED);
2660 } else
2661 #endif
2662 {
2663 nfsm_dissect(tl, u_int32_t *,
2664 2 * NFSX_UNSIGNED);
2665 }
2666 if (bigenough) {
2667 #ifndef NFS_V2_ONLY
2668 if (v3) {
2669 if (nmp->nm_iflag & NFSMNT_SWAPCOOKIE)
2670 uiop->uio_offset =
2671 fxdr_swapcookie3(tl);
2672 else
2673 uiop->uio_offset =
2674 fxdr_cookie3(tl);
2675 }
2676 else
2677 #endif
2678 {
2679 uiop->uio_offset =
2680 fxdr_unsigned(off_t, *tl);
2681 }
2682 NFS_STASHCOOKIE(dp, uiop->uio_offset);
2683 }
2684 if (v3)
2685 tl += 2;
2686 else
2687 tl++;
2688 more_dirs = fxdr_unsigned(int, *tl);
2689 }
2690 /*
2691 * If at end of rpc data, get the eof boolean
2692 */
2693 if (!more_dirs) {
2694 nfsm_dissect(tl, u_int32_t *, NFSX_UNSIGNED);
2695 more_dirs = (fxdr_unsigned(int, *tl) == 0);
2696
2697 /*
2698 * kludge: if we got no entries, treat it as EOF.
2699 * some server sometimes send a reply without any
2700 * entries or EOF.
2701 * although it might mean the server has very long name,
2702 * we can't handle such entries anyway.
2703 */
2704
2705 if (uiop->uio_resid >= NFS_DIRBLKSIZ)
2706 more_dirs = 0;
2707 }
2708 m_freem(mrep);
2709 }
2710 /*
2711 * Fill last record, iff any, out to a multiple of NFS_DIRFRAGSIZ
2712 * by increasing d_reclen for the last record.
2713 */
2714 if (blksiz > 0) {
2715 left = NFS_DIRFRAGSIZ - blksiz;
2716 memset(uiop->uio_iov->iov_base, 0, left);
2717 dp->d_reclen += left;
2718 NFS_STASHCOOKIE(dp, uiop->uio_offset);
2719 UIO_ADVANCE(uiop, left);
2720 }
2721
2722 /*
2723 * We are now either at the end of the directory or have filled the
2724 * block.
2725 */
2726 if (bigenough) {
2727 dnp->n_direofoffset = uiop->uio_offset;
2728 dnp->n_flag |= NEOFVALID;
2729 }
2730 nfsmout:
2731 return (error);
2732 }
2733
2734 #ifndef NFS_V2_ONLY
2735 /*
2736 * NFS V3 readdir plus RPC. Used in place of nfs_readdirrpc().
2737 */
2738 int
2739 nfs_readdirplusrpc(vp, uiop, cred)
2740 struct vnode *vp;
2741 struct uio *uiop;
2742 kauth_cred_t cred;
2743 {
2744 int len, left;
2745 struct dirent *dp = NULL;
2746 u_int32_t *tl;
2747 caddr_t cp;
2748 int32_t t1, t2;
2749 struct vnode *newvp;
2750 caddr_t bpos, dpos, cp2;
2751 struct mbuf *mreq, *mrep, *md, *mb;
2752 struct nameidata nami, *ndp = &nami;
2753 struct componentname *cnp = &ndp->ni_cnd;
2754 struct nfsmount *nmp = VFSTONFS(vp->v_mount);
2755 struct nfsnode *dnp = VTONFS(vp), *np;
2756 nfsfh_t *fhp;
2757 u_quad_t fileno;
2758 int error = 0, more_dirs = 1, blksiz = 0, doit, bigenough = 1, i;
2759 int attrflag, fhsize, nrpcs = 0, reclen;
2760 struct nfs_fattr fattr, *fp;
2761
2762 #ifdef DIAGNOSTIC
2763 if (uiop->uio_iovcnt != 1 || uiop->uio_resid != NFS_DIRBLKSIZ)
2764 panic("nfs readdirplusrpc bad uio");
2765 #endif
2766 ndp->ni_dvp = vp;
2767 newvp = NULLVP;
2768
2769 /*
2770 * Loop around doing readdir rpc's of size nm_readdirsize
2771 * truncated to a multiple of NFS_DIRFRAGSIZ.
2772 * The stopping criteria is EOF or buffer full.
2773 */
2774 while (more_dirs && bigenough) {
2775 if (nrpcs > 0 && uiop->uio_resid < (nmp->nm_readdirsize / 2)) {
2776 bigenough = 0;
2777 break;
2778 }
2779 nfsstats.rpccnt[NFSPROC_READDIRPLUS]++;
2780 nfsm_reqhead(dnp, NFSPROC_READDIRPLUS,
2781 NFSX_FH(1) + 6 * NFSX_UNSIGNED);
2782 nfsm_fhtom(dnp, 1);
2783 nfsm_build(tl, u_int32_t *, 6 * NFSX_UNSIGNED);
2784 if (nmp->nm_iflag & NFSMNT_SWAPCOOKIE) {
2785 txdr_swapcookie3(uiop->uio_offset, tl);
2786 } else {
2787 txdr_cookie3(uiop->uio_offset, tl);
2788 }
2789 tl += 2;
2790 *tl++ = dnp->n_cookieverf.nfsuquad[0];
2791 *tl++ = dnp->n_cookieverf.nfsuquad[1];
2792 *tl++ = txdr_unsigned(nmp->nm_readdirsize);
2793 *tl = txdr_unsigned(nmp->nm_rsize);
2794 nfsm_request(dnp, NFSPROC_READDIRPLUS, curlwp, cred);
2795 nfsm_postop_attr(vp, attrflag, 0);
2796 if (error) {
2797 m_freem(mrep);
2798 goto nfsmout;
2799 }
2800 nrpcs++;
2801 nfsm_dissect(tl, u_int32_t *, 3 * NFSX_UNSIGNED);
2802 dnp->n_cookieverf.nfsuquad[0] = *tl++;
2803 dnp->n_cookieverf.nfsuquad[1] = *tl++;
2804 more_dirs = fxdr_unsigned(int, *tl);
2805
2806 /* loop thru the dir entries, doctoring them to 4bsd form */
2807 while (more_dirs && bigenough) {
2808 nfsm_dissect(tl, u_int32_t *, 3 * NFSX_UNSIGNED);
2809 fileno = fxdr_hyper(tl);
2810 len = fxdr_unsigned(int, *(tl + 2));
2811 if (len <= 0 || len > NFS_MAXNAMLEN) {
2812 error = EBADRPC;
2813 m_freem(mrep);
2814 goto nfsmout;
2815 }
2816 /* for cookie stashing */
2817 reclen = _DIRENT_RECLEN(dp, len) + 2 * sizeof(off_t);
2818 left = NFS_DIRFRAGSIZ - blksiz;
2819 if (reclen > left) {
2820 /*
2821 * DIRFRAGSIZ is aligned, no need to align
2822 * again here.
2823 */
2824 memset(uiop->uio_iov->iov_base, 0, left);
2825 dp->d_reclen += left;
2826 UIO_ADVANCE(uiop, left);
2827 NFS_STASHCOOKIE(dp, uiop->uio_offset);
2828 blksiz = 0;
2829 }
2830 if (reclen > uiop->uio_resid)
2831 bigenough = 0;
2832 if (bigenough) {
2833 int tlen;
2834
2835 dp = (struct dirent *)uiop->uio_iov->iov_base;
2836 dp->d_fileno = fileno;
2837 dp->d_namlen = len;
2838 dp->d_reclen = reclen;
2839 dp->d_type = DT_UNKNOWN;
2840 blksiz += reclen;
2841 if (blksiz == NFS_DIRFRAGSIZ)
2842 blksiz = 0;
2843 UIO_ADVANCE(uiop, DIRHDSIZ);
2844 nfsm_mtouio(uiop, len);
2845 tlen = reclen - (DIRHDSIZ + len);
2846 (void)memset(uiop->uio_iov->iov_base, 0, tlen);
2847 UIO_ADVANCE(uiop, tlen);
2848 cnp->cn_nameptr = dp->d_name;
2849 cnp->cn_namelen = dp->d_namlen;
2850 } else
2851 nfsm_adv(nfsm_rndup(len));
2852 nfsm_dissect(tl, u_int32_t *, 3 * NFSX_UNSIGNED);
2853 if (bigenough) {
2854 if (nmp->nm_iflag & NFSMNT_SWAPCOOKIE)
2855 uiop->uio_offset =
2856 fxdr_swapcookie3(tl);
2857 else
2858 uiop->uio_offset =
2859 fxdr_cookie3(tl);
2860 NFS_STASHCOOKIE(dp, uiop->uio_offset);
2861 }
2862 tl += 2;
2863
2864 /*
2865 * Since the attributes are before the file handle
2866 * (sigh), we must skip over the attributes and then
2867 * come back and get them.
2868 */
2869 attrflag = fxdr_unsigned(int, *tl);
2870 if (attrflag) {
2871 nfsm_dissect(fp, struct nfs_fattr *, NFSX_V3FATTR);
2872 memcpy(&fattr, fp, NFSX_V3FATTR);
2873 nfsm_dissect(tl, u_int32_t *, NFSX_UNSIGNED);
2874 doit = fxdr_unsigned(int, *tl);
2875 if (doit) {
2876 nfsm_getfh(fhp, fhsize, 1);
2877 if (NFS_CMPFH(dnp, fhp, fhsize)) {
2878 VREF(vp);
2879 newvp = vp;
2880 np = dnp;
2881 } else {
2882 error = nfs_nget1(vp->v_mount, fhp,
2883 fhsize, &np, LK_NOWAIT);
2884 if (!error)
2885 newvp = NFSTOV(np);
2886 }
2887 if (!error) {
2888 const char *xcp;
2889
2890 nfs_loadattrcache(&newvp, &fattr, 0, 0);
2891 if (bigenough) {
2892 dp->d_type =
2893 IFTODT(VTTOIF(np->n_vattr->va_type));
2894 if (cnp->cn_namelen <= NCHNAMLEN) {
2895 ndp->ni_vp = newvp;
2896 xcp = cnp->cn_nameptr +
2897 cnp->cn_namelen;
2898 cnp->cn_hash =
2899 namei_hash(cnp->cn_nameptr, &xcp);
2900 nfs_cache_enter(ndp->ni_dvp,
2901 ndp->ni_vp, cnp);
2902 }
2903 }
2904 }
2905 error = 0;
2906 }
2907 } else {
2908 /* Just skip over the file handle */
2909 nfsm_dissect(tl, u_int32_t *, NFSX_UNSIGNED);
2910 i = fxdr_unsigned(int, *tl);
2911 nfsm_adv(nfsm_rndup(i));
2912 }
2913 if (newvp != NULLVP) {
2914 if (newvp == vp)
2915 vrele(newvp);
2916 else
2917 vput(newvp);
2918 newvp = NULLVP;
2919 }
2920 nfsm_dissect(tl, u_int32_t *, NFSX_UNSIGNED);
2921 more_dirs = fxdr_unsigned(int, *tl);
2922 }
2923 /*
2924 * If at end of rpc data, get the eof boolean
2925 */
2926 if (!more_dirs) {
2927 nfsm_dissect(tl, u_int32_t *, NFSX_UNSIGNED);
2928 more_dirs = (fxdr_unsigned(int, *tl) == 0);
2929
2930 /*
2931 * kludge: see a comment in nfs_readdirrpc.
2932 */
2933
2934 if (uiop->uio_resid >= NFS_DIRBLKSIZ)
2935 more_dirs = 0;
2936 }
2937 m_freem(mrep);
2938 }
2939 /*
2940 * Fill last record, iff any, out to a multiple of NFS_DIRFRAGSIZ
2941 * by increasing d_reclen for the last record.
2942 */
2943 if (blksiz > 0) {
2944 left = NFS_DIRFRAGSIZ - blksiz;
2945 memset(uiop->uio_iov->iov_base, 0, left);
2946 dp->d_reclen += left;
2947 NFS_STASHCOOKIE(dp, uiop->uio_offset);
2948 UIO_ADVANCE(uiop, left);
2949 }
2950
2951 /*
2952 * We are now either at the end of the directory or have filled the
2953 * block.
2954 */
2955 if (bigenough) {
2956 dnp->n_direofoffset = uiop->uio_offset;
2957 dnp->n_flag |= NEOFVALID;
2958 }
2959 nfsmout:
2960 if (newvp != NULLVP) {
2961 if(newvp == vp)
2962 vrele(newvp);
2963 else
2964 vput(newvp);
2965 }
2966 return (error);
2967 }
2968 #endif
2969
2970 /*
2971 * Silly rename. To make the NFS filesystem that is stateless look a little
2972 * more like the "ufs" a remove of an active vnode is translated to a rename
2973 * to a funny looking filename that is removed by nfs_inactive on the
2974 * nfsnode. There is the potential for another process on a different client
2975 * to create the same funny name between the nfs_lookitup() fails and the
2976 * nfs_rename() completes, but...
2977 */
2978 int
2979 nfs_sillyrename(dvp, vp, cnp, dolink)
2980 struct vnode *dvp, *vp;
2981 struct componentname *cnp;
2982 boolean_t dolink;
2983 {
2984 struct sillyrename *sp;
2985 struct nfsnode *np;
2986 int error;
2987 short pid;
2988
2989 cache_purge(dvp);
2990 np = VTONFS(vp);
2991 #ifndef DIAGNOSTIC
2992 if (vp->v_type == VDIR)
2993 panic("nfs: sillyrename dir");
2994 #endif
2995 MALLOC(sp, struct sillyrename *, sizeof (struct sillyrename),
2996 M_NFSREQ, M_WAITOK);
2997 sp->s_cred = kauth_cred_dup(cnp->cn_cred);
2998 sp->s_dvp = dvp;
2999 VREF(dvp);
3000
3001 /* Fudge together a funny name */
3002 pid = cnp->cn_lwp->l_proc->p_pid;
3003 memcpy(sp->s_name, ".nfsAxxxx4.4", 13);
3004 sp->s_namlen = 12;
3005 sp->s_name[8] = hexdigits[pid & 0xf];
3006 sp->s_name[7] = hexdigits[(pid >> 4) & 0xf];
3007 sp->s_name[6] = hexdigits[(pid >> 8) & 0xf];
3008 sp->s_name[5] = hexdigits[(pid >> 12) & 0xf];
3009
3010 /* Try lookitups until we get one that isn't there */
3011 while (nfs_lookitup(dvp, sp->s_name, sp->s_namlen, sp->s_cred,
3012 cnp->cn_lwp, (struct nfsnode **)0) == 0) {
3013 sp->s_name[4]++;
3014 if (sp->s_name[4] > 'z') {
3015 error = EINVAL;
3016 goto bad;
3017 }
3018 }
3019 if (dolink) {
3020 error = nfs_linkrpc(dvp, vp, sp->s_name, sp->s_namlen,
3021 sp->s_cred, cnp->cn_lwp);
3022 /*
3023 * nfs_request maps NFSERR_NOTSUPP to ENOTSUP.
3024 */
3025 if (error == ENOTSUP) {
3026 error = nfs_renameit(dvp, cnp, sp);
3027 }
3028 } else {
3029 error = nfs_renameit(dvp, cnp, sp);
3030 }
3031 if (error)
3032 goto bad;
3033 error = nfs_lookitup(dvp, sp->s_name, sp->s_namlen, sp->s_cred,
3034 cnp->cn_lwp, &np);
3035 np->n_sillyrename = sp;
3036 return (0);
3037 bad:
3038 vrele(sp->s_dvp);
3039 kauth_cred_free(sp->s_cred);
3040 free((caddr_t)sp, M_NFSREQ);
3041 return (error);
3042 }
3043
3044 /*
3045 * Look up a file name and optionally either update the file handle or
3046 * allocate an nfsnode, depending on the value of npp.
3047 * npp == NULL --> just do the lookup
3048 * *npp == NULL --> allocate a new nfsnode and make sure attributes are
3049 * handled too
3050 * *npp != NULL --> update the file handle in the vnode
3051 */
3052 int
3053 nfs_lookitup(dvp, name, len, cred, l, npp)
3054 struct vnode *dvp;
3055 const char *name;
3056 int len;
3057 kauth_cred_t cred;
3058 struct lwp *l;
3059 struct nfsnode **npp;
3060 {
3061 u_int32_t *tl;
3062 caddr_t cp;
3063 int32_t t1, t2;
3064 struct vnode *newvp = (struct vnode *)0;
3065 struct nfsnode *np, *dnp = VTONFS(dvp);
3066 caddr_t bpos, dpos, cp2;
3067 int error = 0, fhlen;
3068 #ifndef NFS_V2_ONLY
3069 int attrflag;
3070 #endif
3071 struct mbuf *mreq, *mrep, *md, *mb;
3072 nfsfh_t *nfhp;
3073 const int v3 = NFS_ISV3(dvp);
3074
3075 nfsstats.rpccnt[NFSPROC_LOOKUP]++;
3076 nfsm_reqhead(dnp, NFSPROC_LOOKUP,
3077 NFSX_FH(v3) + NFSX_UNSIGNED + nfsm_rndup(len));
3078 nfsm_fhtom(dnp, v3);
3079 nfsm_strtom(name, len, NFS_MAXNAMLEN);
3080 nfsm_request(dnp, NFSPROC_LOOKUP, l, cred);
3081 if (npp && !error) {
3082 nfsm_getfh(nfhp, fhlen, v3);
3083 if (*npp) {
3084 np = *npp;
3085 if (np->n_fhsize > NFS_SMALLFH && fhlen <= NFS_SMALLFH) {
3086 free((caddr_t)np->n_fhp, M_NFSBIGFH);
3087 np->n_fhp = &np->n_fh;
3088 }
3089 #if NFS_SMALLFH < NFSX_V3FHMAX
3090 else if (np->n_fhsize <= NFS_SMALLFH && fhlen>NFS_SMALLFH)
3091 np->n_fhp =(nfsfh_t *)malloc(fhlen,M_NFSBIGFH,M_WAITOK);
3092 #endif
3093 memcpy((caddr_t)np->n_fhp, (caddr_t)nfhp, fhlen);
3094 np->n_fhsize = fhlen;
3095 newvp = NFSTOV(np);
3096 } else if (NFS_CMPFH(dnp, nfhp, fhlen)) {
3097 VREF(dvp);
3098 newvp = dvp;
3099 np = dnp;
3100 } else {
3101 error = nfs_nget(dvp->v_mount, nfhp, fhlen, &np);
3102 if (error) {
3103 m_freem(mrep);
3104 return (error);
3105 }
3106 newvp = NFSTOV(np);
3107 }
3108 #ifndef NFS_V2_ONLY
3109 if (v3) {
3110 nfsm_postop_attr(newvp, attrflag, 0);
3111 if (!attrflag && *npp == NULL) {
3112 m_freem(mrep);
3113 vput(newvp);
3114 return (ENOENT);
3115 }
3116 } else
3117 #endif
3118 nfsm_loadattr(newvp, (struct vattr *)0, 0);
3119 }
3120 nfsm_reqdone;
3121 if (npp && *npp == NULL) {
3122 if (error) {
3123 if (newvp)
3124 vput(newvp);
3125 } else
3126 *npp = np;
3127 }
3128 return (error);
3129 }
3130
3131 #ifndef NFS_V2_ONLY
3132 /*
3133 * Nfs Version 3 commit rpc
3134 */
3135 int
3136 nfs_commit(vp, offset, cnt, l)
3137 struct vnode *vp;
3138 off_t offset;
3139 uint32_t cnt;
3140 struct lwp *l;
3141 {
3142 caddr_t cp;
3143 u_int32_t *tl;
3144 int32_t t1, t2;
3145 struct nfsmount *nmp = VFSTONFS(vp->v_mount);
3146 caddr_t bpos, dpos, cp2;
3147 int error = 0, wccflag = NFSV3_WCCRATTR;
3148 struct mbuf *mreq, *mrep, *md, *mb;
3149 struct nfsnode *np;
3150
3151 KASSERT(NFS_ISV3(vp));
3152
3153 #ifdef NFS_DEBUG_COMMIT
3154 printf("commit %lu - %lu\n", (unsigned long)offset,
3155 (unsigned long)(offset + cnt));
3156 #endif
3157
3158 simple_lock(&nmp->nm_slock);
3159 if ((nmp->nm_iflag & NFSMNT_HASWRITEVERF) == 0) {
3160 simple_unlock(&nmp->nm_slock);
3161 return (0);
3162 }
3163 simple_unlock(&nmp->nm_slock);
3164 nfsstats.rpccnt[NFSPROC_COMMIT]++;
3165 np = VTONFS(vp);
3166 nfsm_reqhead(np, NFSPROC_COMMIT, NFSX_FH(1));
3167 nfsm_fhtom(np, 1);
3168 nfsm_build(tl, u_int32_t *, 3 * NFSX_UNSIGNED);
3169 txdr_hyper(offset, tl);
3170 tl += 2;
3171 *tl = txdr_unsigned(cnt);
3172 nfsm_request(np, NFSPROC_COMMIT, l, np->n_wcred);
3173 nfsm_wcc_data(vp, wccflag, NAC_NOTRUNC, FALSE);
3174 if (!error) {
3175 nfsm_dissect(tl, u_int32_t *, NFSX_V3WRITEVERF);
3176 simple_lock(&nmp->nm_slock);
3177 if ((nmp->nm_iflag & NFSMNT_STALEWRITEVERF) ||
3178 memcmp(nmp->nm_writeverf, tl, NFSX_V3WRITEVERF)) {
3179 memcpy(nmp->nm_writeverf, tl, NFSX_V3WRITEVERF);
3180 error = NFSERR_STALEWRITEVERF;
3181 nmp->nm_iflag |= NFSMNT_STALEWRITEVERF;
3182 }
3183 simple_unlock(&nmp->nm_slock);
3184 }
3185 nfsm_reqdone;
3186 return (error);
3187 }
3188 #endif
3189
3190 /*
3191 * Kludge City..
3192 * - make nfs_bmap() essentially a no-op that does no translation
3193 * - do nfs_strategy() by doing I/O with nfs_readrpc/nfs_writerpc
3194 * (Maybe I could use the process's page mapping, but I was concerned that
3195 * Kernel Write might not be enabled and also figured copyout() would do
3196 * a lot more work than memcpy() and also it currently happens in the
3197 * context of the swapper process (2).
3198 */
3199 int
3200 nfs_bmap(v)
3201 void *v;
3202 {
3203 struct vop_bmap_args /* {
3204 struct vnode *a_vp;
3205 daddr_t a_bn;
3206 struct vnode **a_vpp;
3207 daddr_t *a_bnp;
3208 int *a_runp;
3209 } */ *ap = v;
3210 struct vnode *vp = ap->a_vp;
3211 int bshift = vp->v_mount->mnt_fs_bshift - vp->v_mount->mnt_dev_bshift;
3212
3213 if (ap->a_vpp != NULL)
3214 *ap->a_vpp = vp;
3215 if (ap->a_bnp != NULL)
3216 *ap->a_bnp = ap->a_bn << bshift;
3217 if (ap->a_runp != NULL)
3218 *ap->a_runp = 1024 * 1024; /* XXX */
3219 return (0);
3220 }
3221
3222 /*
3223 * Strategy routine.
3224 * For async requests when nfsiod(s) are running, queue the request by
3225 * calling nfs_asyncio(), otherwise just all nfs_doio() to do the
3226 * request.
3227 */
3228 int
3229 nfs_strategy(v)
3230 void *v;
3231 {
3232 struct vop_strategy_args *ap = v;
3233 struct buf *bp = ap->a_bp;
3234 int error = 0;
3235
3236 if ((bp->b_flags & (B_PHYS|B_ASYNC)) == (B_PHYS|B_ASYNC))
3237 panic("nfs physio/async");
3238
3239 /*
3240 * If the op is asynchronous and an i/o daemon is waiting
3241 * queue the request, wake it up and wait for completion
3242 * otherwise just do it ourselves.
3243 */
3244 if ((bp->b_flags & B_ASYNC) == 0 || nfs_asyncio(bp))
3245 error = nfs_doio(bp);
3246 return (error);
3247 }
3248
3249 /*
3250 * fsync vnode op. Just call nfs_flush() with commit == 1.
3251 */
3252 /* ARGSUSED */
3253 int
3254 nfs_fsync(v)
3255 void *v;
3256 {
3257 struct vop_fsync_args /* {
3258 struct vnodeop_desc *a_desc;
3259 struct vnode * a_vp;
3260 kauth_cred_t a_cred;
3261 int a_flags;
3262 off_t offlo;
3263 off_t offhi;
3264 struct lwp * a_l;
3265 } */ *ap = v;
3266
3267 struct vnode *vp = ap->a_vp;
3268
3269 if (vp->v_type != VREG)
3270 return 0;
3271
3272 return (nfs_flush(vp, ap->a_cred,
3273 (ap->a_flags & FSYNC_WAIT) != 0 ? MNT_WAIT : 0, ap->a_l, 1));
3274 }
3275
3276 /*
3277 * Flush all the data associated with a vnode.
3278 */
3279 int
3280 nfs_flush(struct vnode *vp, kauth_cred_t cred, int waitfor, struct lwp *l,
3281 int commit)
3282 {
3283 struct nfsnode *np = VTONFS(vp);
3284 int error;
3285 int flushflags = PGO_ALLPAGES|PGO_CLEANIT|PGO_SYNCIO;
3286 UVMHIST_FUNC("nfs_flush"); UVMHIST_CALLED(ubchist);
3287
3288 simple_lock(&vp->v_interlock);
3289 error = VOP_PUTPAGES(vp, 0, 0, flushflags);
3290 if (np->n_flag & NWRITEERR) {
3291 error = np->n_error;
3292 np->n_flag &= ~NWRITEERR;
3293 }
3294 UVMHIST_LOG(ubchist, "returning %d", error,0,0,0);
3295 return (error);
3296 }
3297
3298 /*
3299 * Return POSIX pathconf information applicable to nfs.
3300 *
3301 * N.B. The NFS V2 protocol doesn't support this RPC.
3302 */
3303 /* ARGSUSED */
3304 int
3305 nfs_pathconf(v)
3306 void *v;
3307 {
3308 struct vop_pathconf_args /* {
3309 struct vnode *a_vp;
3310 int a_name;
3311 register_t *a_retval;
3312 } */ *ap = v;
3313 struct nfsv3_pathconf *pcp;
3314 struct vnode *vp = ap->a_vp;
3315 struct mbuf *mreq, *mrep, *md, *mb;
3316 int32_t t1, t2;
3317 u_int32_t *tl;
3318 caddr_t bpos, dpos, cp, cp2;
3319 int error = 0, attrflag;
3320 #ifndef NFS_V2_ONLY
3321 struct nfsmount *nmp;
3322 unsigned int l;
3323 u_int64_t maxsize;
3324 #endif
3325 const int v3 = NFS_ISV3(vp);
3326 struct nfsnode *np = VTONFS(vp);
3327
3328 switch (ap->a_name) {
3329 /* Names that can be resolved locally. */
3330 case _PC_PIPE_BUF:
3331 *ap->a_retval = PIPE_BUF;
3332 break;
3333 case _PC_SYNC_IO:
3334 *ap->a_retval = 1;
3335 break;
3336 /* Names that cannot be resolved locally; do an RPC, if possible. */
3337 case _PC_LINK_MAX:
3338 case _PC_NAME_MAX:
3339 case _PC_CHOWN_RESTRICTED:
3340 case _PC_NO_TRUNC:
3341 if (!v3) {
3342 error = EINVAL;
3343 break;
3344 }
3345 nfsstats.rpccnt[NFSPROC_PATHCONF]++;
3346 nfsm_reqhead(np, NFSPROC_PATHCONF, NFSX_FH(1));
3347 nfsm_fhtom(np, 1);
3348 nfsm_request(np, NFSPROC_PATHCONF,
3349 curlwp, curlwp->l_cred); /* XXX */
3350 nfsm_postop_attr(vp, attrflag, 0);
3351 if (!error) {
3352 nfsm_dissect(pcp, struct nfsv3_pathconf *,
3353 NFSX_V3PATHCONF);
3354 switch (ap->a_name) {
3355 case _PC_LINK_MAX:
3356 *ap->a_retval =
3357 fxdr_unsigned(register_t, pcp->pc_linkmax);
3358 break;
3359 case _PC_NAME_MAX:
3360 *ap->a_retval =
3361 fxdr_unsigned(register_t, pcp->pc_namemax);
3362 break;
3363 case _PC_CHOWN_RESTRICTED:
3364 *ap->a_retval =
3365 (pcp->pc_chownrestricted == nfs_true);
3366 break;
3367 case _PC_NO_TRUNC:
3368 *ap->a_retval =
3369 (pcp->pc_notrunc == nfs_true);
3370 break;
3371 }
3372 }
3373 nfsm_reqdone;
3374 break;
3375 case _PC_FILESIZEBITS:
3376 #ifndef NFS_V2_ONLY
3377 if (v3) {
3378 nmp = VFSTONFS(vp->v_mount);
3379 if ((nmp->nm_iflag & NFSMNT_GOTFSINFO) == 0)
3380 if ((error = nfs_fsinfo(nmp, vp,
3381 curlwp->l_cred, curlwp)) != 0) /* XXX */
3382 break;
3383 for (l = 0, maxsize = nmp->nm_maxfilesize;
3384 (maxsize >> l) > 0; l++)
3385 ;
3386 *ap->a_retval = l + 1;
3387 } else
3388 #endif
3389 {
3390 *ap->a_retval = 32; /* NFS V2 limitation */
3391 }
3392 break;
3393 default:
3394 error = EINVAL;
3395 break;
3396 }
3397
3398 return (error);
3399 }
3400
3401 /*
3402 * NFS advisory byte-level locks.
3403 */
3404 int
3405 nfs_advlock(v)
3406 void *v;
3407 {
3408 struct vop_advlock_args /* {
3409 struct vnode *a_vp;
3410 caddr_t a_id;
3411 int a_op;
3412 struct flock *a_fl;
3413 int a_flags;
3414 } */ *ap = v;
3415 struct nfsnode *np = VTONFS(ap->a_vp);
3416
3417 return lf_advlock(ap, &np->n_lockf, np->n_size);
3418 }
3419
3420 /*
3421 * Print out the contents of an nfsnode.
3422 */
3423 int
3424 nfs_print(v)
3425 void *v;
3426 {
3427 struct vop_print_args /* {
3428 struct vnode *a_vp;
3429 } */ *ap = v;
3430 struct vnode *vp = ap->a_vp;
3431 struct nfsnode *np = VTONFS(vp);
3432
3433 printf("tag VT_NFS, fileid %lld fsid 0x%lx",
3434 (unsigned long long)np->n_vattr->va_fileid, np->n_vattr->va_fsid);
3435 if (vp->v_type == VFIFO)
3436 fifo_printinfo(vp);
3437 printf("\n");
3438 return (0);
3439 }
3440
3441 /*
3442 * nfs unlock wrapper.
3443 */
3444 int
3445 nfs_unlock(void *v)
3446 {
3447 struct vop_unlock_args /* {
3448 struct vnode *a_vp;
3449 int a_flags;
3450 } */ *ap = v;
3451 struct vnode *vp = ap->a_vp;
3452
3453 /*
3454 * VOP_UNLOCK can be called by nfs_loadattrcache
3455 * with v_data == 0.
3456 */
3457 if (VTONFS(vp)) {
3458 nfs_delayedtruncate(vp);
3459 }
3460
3461 return genfs_unlock(v);
3462 }
3463
3464 /*
3465 * nfs special file access vnode op.
3466 * Essentially just get vattr and then imitate iaccess() since the device is
3467 * local to the client.
3468 */
3469 int
3470 nfsspec_access(v)
3471 void *v;
3472 {
3473 struct vop_access_args /* {
3474 struct vnode *a_vp;
3475 int a_mode;
3476 kauth_cred_t a_cred;
3477 struct lwp *a_l;
3478 } */ *ap = v;
3479 struct vattr va;
3480 struct vnode *vp = ap->a_vp;
3481 int error;
3482
3483 error = VOP_GETATTR(vp, &va, ap->a_cred, ap->a_l);
3484 if (error)
3485 return (error);
3486
3487 /*
3488 * Disallow write attempts on filesystems mounted read-only;
3489 * unless the file is a socket, fifo, or a block or character
3490 * device resident on the filesystem.
3491 */
3492 if ((ap->a_mode & VWRITE) && (vp->v_mount->mnt_flag & MNT_RDONLY)) {
3493 switch (vp->v_type) {
3494 case VREG:
3495 case VDIR:
3496 case VLNK:
3497 return (EROFS);
3498 default:
3499 break;
3500 }
3501 }
3502
3503 return (vaccess(va.va_type, va.va_mode,
3504 va.va_uid, va.va_gid, ap->a_mode, ap->a_cred));
3505 }
3506
3507 /*
3508 * Read wrapper for special devices.
3509 */
3510 int
3511 nfsspec_read(v)
3512 void *v;
3513 {
3514 struct vop_read_args /* {
3515 struct vnode *a_vp;
3516 struct uio *a_uio;
3517 int a_ioflag;
3518 kauth_cred_t a_cred;
3519 } */ *ap = v;
3520 struct nfsnode *np = VTONFS(ap->a_vp);
3521
3522 /*
3523 * Set access flag.
3524 */
3525 np->n_flag |= NACC;
3526 getnanotime(&np->n_atim);
3527 return (VOCALL(spec_vnodeop_p, VOFFSET(vop_read), ap));
3528 }
3529
3530 /*
3531 * Write wrapper for special devices.
3532 */
3533 int
3534 nfsspec_write(v)
3535 void *v;
3536 {
3537 struct vop_write_args /* {
3538 struct vnode *a_vp;
3539 struct uio *a_uio;
3540 int a_ioflag;
3541 kauth_cred_t a_cred;
3542 } */ *ap = v;
3543 struct nfsnode *np = VTONFS(ap->a_vp);
3544
3545 /*
3546 * Set update flag.
3547 */
3548 np->n_flag |= NUPD;
3549 getnanotime(&np->n_mtim);
3550 return (VOCALL(spec_vnodeop_p, VOFFSET(vop_write), ap));
3551 }
3552
3553 /*
3554 * Close wrapper for special devices.
3555 *
3556 * Update the times on the nfsnode then do device close.
3557 */
3558 int
3559 nfsspec_close(v)
3560 void *v;
3561 {
3562 struct vop_close_args /* {
3563 struct vnode *a_vp;
3564 int a_fflag;
3565 kauth_cred_t a_cred;
3566 struct lwp *a_l;
3567 } */ *ap = v;
3568 struct vnode *vp = ap->a_vp;
3569 struct nfsnode *np = VTONFS(vp);
3570 struct vattr vattr;
3571
3572 if (np->n_flag & (NACC | NUPD)) {
3573 np->n_flag |= NCHG;
3574 if (vp->v_usecount == 1 &&
3575 (vp->v_mount->mnt_flag & MNT_RDONLY) == 0) {
3576 VATTR_NULL(&vattr);
3577 if (np->n_flag & NACC)
3578 vattr.va_atime = np->n_atim;
3579 if (np->n_flag & NUPD)
3580 vattr.va_mtime = np->n_mtim;
3581 (void)VOP_SETATTR(vp, &vattr, ap->a_cred, ap->a_l);
3582 }
3583 }
3584 return (VOCALL(spec_vnodeop_p, VOFFSET(vop_close), ap));
3585 }
3586
3587 /*
3588 * Read wrapper for fifos.
3589 */
3590 int
3591 nfsfifo_read(v)
3592 void *v;
3593 {
3594 struct vop_read_args /* {
3595 struct vnode *a_vp;
3596 struct uio *a_uio;
3597 int a_ioflag;
3598 kauth_cred_t a_cred;
3599 } */ *ap = v;
3600 struct nfsnode *np = VTONFS(ap->a_vp);
3601
3602 /*
3603 * Set access flag.
3604 */
3605 np->n_flag |= NACC;
3606 getnanotime(&np->n_atim);
3607 return (VOCALL(fifo_vnodeop_p, VOFFSET(vop_read), ap));
3608 }
3609
3610 /*
3611 * Write wrapper for fifos.
3612 */
3613 int
3614 nfsfifo_write(v)
3615 void *v;
3616 {
3617 struct vop_write_args /* {
3618 struct vnode *a_vp;
3619 struct uio *a_uio;
3620 int a_ioflag;
3621 kauth_cred_t a_cred;
3622 } */ *ap = v;
3623 struct nfsnode *np = VTONFS(ap->a_vp);
3624
3625 /*
3626 * Set update flag.
3627 */
3628 np->n_flag |= NUPD;
3629 getnanotime(&np->n_mtim);
3630 return (VOCALL(fifo_vnodeop_p, VOFFSET(vop_write), ap));
3631 }
3632
3633 /*
3634 * Close wrapper for fifos.
3635 *
3636 * Update the times on the nfsnode then do fifo close.
3637 */
3638 int
3639 nfsfifo_close(v)
3640 void *v;
3641 {
3642 struct vop_close_args /* {
3643 struct vnode *a_vp;
3644 int a_fflag;
3645 kauth_cred_t a_cred;
3646 struct lwp *a_l;
3647 } */ *ap = v;
3648 struct vnode *vp = ap->a_vp;
3649 struct nfsnode *np = VTONFS(vp);
3650 struct vattr vattr;
3651
3652 if (np->n_flag & (NACC | NUPD)) {
3653 struct timespec ts;
3654
3655 getnanotime(&ts);
3656 if (np->n_flag & NACC)
3657 np->n_atim = ts;
3658 if (np->n_flag & NUPD)
3659 np->n_mtim = ts;
3660 np->n_flag |= NCHG;
3661 if (vp->v_usecount == 1 &&
3662 (vp->v_mount->mnt_flag & MNT_RDONLY) == 0) {
3663 VATTR_NULL(&vattr);
3664 if (np->n_flag & NACC)
3665 vattr.va_atime = np->n_atim;
3666 if (np->n_flag & NUPD)
3667 vattr.va_mtime = np->n_mtim;
3668 (void)VOP_SETATTR(vp, &vattr, ap->a_cred, ap->a_l);
3669 }
3670 }
3671 return (VOCALL(fifo_vnodeop_p, VOFFSET(vop_close), ap));
3672 }
3673