nfs_commonkrpc.c revision 1.1.1.1.12.1 1 /* $NetBSD: nfs_commonkrpc.c,v 1.1.1.1.12.1 2016/12/05 10:55:25 skrll Exp $ */
2 /*-
3 * Copyright (c) 1989, 1991, 1993, 1995
4 * The Regents of the University of California. All rights reserved.
5 *
6 * This code is derived from software contributed to Berkeley by
7 * Rick Macklem at The University of Guelph.
8 *
9 * Redistribution and use in source and binary forms, with or without
10 * modification, are permitted provided that the following conditions
11 * are met:
12 * 1. Redistributions of source code must retain the above copyright
13 * notice, this list of conditions and the following disclaimer.
14 * 2. Redistributions in binary form must reproduce the above copyright
15 * notice, this list of conditions and the following disclaimer in the
16 * documentation and/or other materials provided with the distribution.
17 * 4. Neither the name of the University nor the names of its contributors
18 * may be used to endorse or promote products derived from this software
19 * without specific prior written permission.
20 *
21 * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND
22 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
23 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
24 * ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE
25 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
26 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
27 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
28 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
29 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
30 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
31 * SUCH DAMAGE.
32 *
33 */
34
35 #include <sys/cdefs.h>
36 /* __FBSDID("FreeBSD: head/sys/fs/nfs/nfs_commonkrpc.c 304026 2016-08-12 22:44:59Z rmacklem "); */
37 __RCSID("$NetBSD: nfs_commonkrpc.c,v 1.1.1.1.12.1 2016/12/05 10:55:25 skrll Exp $");
38
39 /*
40 * Socket operations for use by nfs
41 */
42
43 #include "opt_kgssapi.h"
44 #include "opt_nfs.h"
45
46 #include <sys/param.h>
47 #include <sys/systm.h>
48 #include <sys/kernel.h>
49 #include <sys/limits.h>
50 #include <sys/lock.h>
51 #include <sys/malloc.h>
52 #include <sys/mbuf.h>
53 #include <sys/mount.h>
54 #include <sys/mutex.h>
55 #include <sys/proc.h>
56 #include <sys/signalvar.h>
57 #include <sys/syscallsubr.h>
58 #include <sys/sysctl.h>
59 #include <sys/syslog.h>
60 #include <sys/vnode.h>
61
62 #include <rpc/rpc.h>
63 #include <rpc/krpc.h>
64
65 #include <kgssapi/krb5/kcrypto.h>
66
67 #include <fs/nfs/nfsport.h>
68
69 #ifdef KDTRACE_HOOKS
70 #include <sys/dtrace_bsd.h>
71
72 dtrace_nfsclient_nfs23_start_probe_func_t
73 dtrace_nfscl_nfs234_start_probe;
74
75 dtrace_nfsclient_nfs23_done_probe_func_t
76 dtrace_nfscl_nfs234_done_probe;
77
78 /*
79 * Registered probes by RPC type.
80 */
81 uint32_t nfscl_nfs2_start_probes[NFSV41_NPROCS + 1];
82 uint32_t nfscl_nfs2_done_probes[NFSV41_NPROCS + 1];
83
84 uint32_t nfscl_nfs3_start_probes[NFSV41_NPROCS + 1];
85 uint32_t nfscl_nfs3_done_probes[NFSV41_NPROCS + 1];
86
87 uint32_t nfscl_nfs4_start_probes[NFSV41_NPROCS + 1];
88 uint32_t nfscl_nfs4_done_probes[NFSV41_NPROCS + 1];
89 #endif
90
91 NFSSTATESPINLOCK;
92 NFSREQSPINLOCK;
93 NFSDLOCKMUTEX;
94 extern struct nfsstatsv1 nfsstatsv1;
95 extern struct nfsreqhead nfsd_reqq;
96 extern int nfscl_ticks;
97 extern void (*ncl_call_invalcaches)(struct vnode *);
98 extern int nfs_numnfscbd;
99 extern int nfscl_debuglevel;
100
101 SVCPOOL *nfscbd_pool;
102 static int nfsrv_gsscallbackson = 0;
103 static int nfs_bufpackets = 4;
104 static int nfs_reconnects;
105 static int nfs3_jukebox_delay = 10;
106 static int nfs_skip_wcc_data_onerr = 1;
107
108 SYSCTL_DECL(_vfs_nfs);
109
110 SYSCTL_INT(_vfs_nfs, OID_AUTO, bufpackets, CTLFLAG_RW, &nfs_bufpackets, 0,
111 "Buffer reservation size 2 < x < 64");
112 SYSCTL_INT(_vfs_nfs, OID_AUTO, reconnects, CTLFLAG_RD, &nfs_reconnects, 0,
113 "Number of times the nfs client has had to reconnect");
114 SYSCTL_INT(_vfs_nfs, OID_AUTO, nfs3_jukebox_delay, CTLFLAG_RW, &nfs3_jukebox_delay, 0,
115 "Number of seconds to delay a retry after receiving EJUKEBOX");
116 SYSCTL_INT(_vfs_nfs, OID_AUTO, skip_wcc_data_onerr, CTLFLAG_RW, &nfs_skip_wcc_data_onerr, 0,
117 "Disable weak cache consistency checking when server returns an error");
118
119 static void nfs_down(struct nfsmount *, struct thread *, const char *,
120 int, int);
121 static void nfs_up(struct nfsmount *, struct thread *, const char *,
122 int, int);
123 static int nfs_msg(struct thread *, const char *, const char *, int);
124
125 struct nfs_cached_auth {
126 int ca_refs; /* refcount, including 1 from the cache */
127 uid_t ca_uid; /* uid that corresponds to this auth */
128 AUTH *ca_auth; /* RPC auth handle */
129 };
130
131 static int nfsv2_procid[NFS_V3NPROCS] = {
132 NFSV2PROC_NULL,
133 NFSV2PROC_GETATTR,
134 NFSV2PROC_SETATTR,
135 NFSV2PROC_LOOKUP,
136 NFSV2PROC_NOOP,
137 NFSV2PROC_READLINK,
138 NFSV2PROC_READ,
139 NFSV2PROC_WRITE,
140 NFSV2PROC_CREATE,
141 NFSV2PROC_MKDIR,
142 NFSV2PROC_SYMLINK,
143 NFSV2PROC_CREATE,
144 NFSV2PROC_REMOVE,
145 NFSV2PROC_RMDIR,
146 NFSV2PROC_RENAME,
147 NFSV2PROC_LINK,
148 NFSV2PROC_READDIR,
149 NFSV2PROC_NOOP,
150 NFSV2PROC_STATFS,
151 NFSV2PROC_NOOP,
152 NFSV2PROC_NOOP,
153 NFSV2PROC_NOOP,
154 };
155
156 /*
157 * Initialize sockets and congestion for a new NFS connection.
158 * We do not free the sockaddr if error.
159 */
160 int
161 newnfs_connect(struct nfsmount *nmp, struct nfssockreq *nrp,
162 struct ucred *cred, NFSPROC_T *p, int callback_retry_mult)
163 {
164 int rcvreserve, sndreserve;
165 int pktscale;
166 struct sockaddr *saddr;
167 struct ucred *origcred;
168 CLIENT *client;
169 struct netconfig *nconf;
170 struct socket *so;
171 int one = 1, retries, error = 0;
172 struct thread *td = curthread;
173 SVCXPRT *xprt;
174 struct timeval timo;
175
176 /*
177 * We need to establish the socket using the credentials of
178 * the mountpoint. Some parts of this process (such as
179 * sobind() and soconnect()) will use the curent thread's
180 * credential instead of the socket credential. To work
181 * around this, temporarily change the current thread's
182 * credential to that of the mountpoint.
183 *
184 * XXX: It would be better to explicitly pass the correct
185 * credential to sobind() and soconnect().
186 */
187 origcred = td->td_ucred;
188
189 /*
190 * Use the credential in nr_cred, if not NULL.
191 */
192 if (nrp->nr_cred != NULL)
193 td->td_ucred = nrp->nr_cred;
194 else
195 td->td_ucred = cred;
196 saddr = nrp->nr_nam;
197
198 if (saddr->sa_family == AF_INET)
199 if (nrp->nr_sotype == SOCK_DGRAM)
200 nconf = getnetconfigent("udp");
201 else
202 nconf = getnetconfigent("tcp");
203 else
204 if (nrp->nr_sotype == SOCK_DGRAM)
205 nconf = getnetconfigent("udp6");
206 else
207 nconf = getnetconfigent("tcp6");
208
209 pktscale = nfs_bufpackets;
210 if (pktscale < 2)
211 pktscale = 2;
212 if (pktscale > 64)
213 pktscale = 64;
214 /*
215 * soreserve() can fail if sb_max is too small, so shrink pktscale
216 * and try again if there is an error.
217 * Print a log message suggesting increasing sb_max.
218 * Creating a socket and doing this is necessary since, if the
219 * reservation sizes are too large and will make soreserve() fail,
220 * the connection will work until a large send is attempted and
221 * then it will loop in the krpc code.
222 */
223 so = NULL;
224 saddr = NFSSOCKADDR(nrp->nr_nam, struct sockaddr *);
225 error = socreate(saddr->sa_family, &so, nrp->nr_sotype,
226 nrp->nr_soproto, td->td_ucred, td);
227 if (error) {
228 td->td_ucred = origcred;
229 goto out;
230 }
231 do {
232 if (error != 0 && pktscale > 2)
233 pktscale--;
234 if (nrp->nr_sotype == SOCK_DGRAM) {
235 if (nmp != NULL) {
236 sndreserve = (NFS_MAXDGRAMDATA + NFS_MAXPKTHDR) *
237 pktscale;
238 rcvreserve = (NFS_MAXDGRAMDATA + NFS_MAXPKTHDR) *
239 pktscale;
240 } else {
241 sndreserve = rcvreserve = 1024 * pktscale;
242 }
243 } else {
244 if (nrp->nr_sotype != SOCK_STREAM)
245 panic("nfscon sotype");
246 if (nmp != NULL) {
247 sndreserve = (NFS_MAXBSIZE + NFS_MAXPKTHDR +
248 sizeof (u_int32_t)) * pktscale;
249 rcvreserve = (NFS_MAXBSIZE + NFS_MAXPKTHDR +
250 sizeof (u_int32_t)) * pktscale;
251 } else {
252 sndreserve = rcvreserve = 1024 * pktscale;
253 }
254 }
255 error = soreserve(so, sndreserve, rcvreserve);
256 } while (error != 0 && pktscale > 2);
257 soclose(so);
258 if (error) {
259 td->td_ucred = origcred;
260 goto out;
261 }
262
263 client = clnt_reconnect_create(nconf, saddr, nrp->nr_prog,
264 nrp->nr_vers, sndreserve, rcvreserve);
265 CLNT_CONTROL(client, CLSET_WAITCHAN, "nfsreq");
266 if (nmp != NULL) {
267 if ((nmp->nm_flag & NFSMNT_INT))
268 CLNT_CONTROL(client, CLSET_INTERRUPTIBLE, &one);
269 if ((nmp->nm_flag & NFSMNT_RESVPORT))
270 CLNT_CONTROL(client, CLSET_PRIVPORT, &one);
271 if (NFSHASSOFT(nmp)) {
272 if (nmp->nm_sotype == SOCK_DGRAM)
273 /*
274 * For UDP, the large timeout for a reconnect
275 * will be set to "nm_retry * nm_timeo / 2", so
276 * we only want to do 2 reconnect timeout
277 * retries.
278 */
279 retries = 2;
280 else
281 retries = nmp->nm_retry;
282 } else
283 retries = INT_MAX;
284 if (NFSHASNFSV4N(nmp)) {
285 /*
286 * Make sure the nfscbd_pool doesn't get destroyed
287 * while doing this.
288 */
289 NFSD_LOCK();
290 if (nfs_numnfscbd > 0) {
291 nfs_numnfscbd++;
292 NFSD_UNLOCK();
293 xprt = svc_vc_create_backchannel(nfscbd_pool);
294 CLNT_CONTROL(client, CLSET_BACKCHANNEL, xprt);
295 NFSD_LOCK();
296 nfs_numnfscbd--;
297 if (nfs_numnfscbd == 0)
298 wakeup(&nfs_numnfscbd);
299 }
300 NFSD_UNLOCK();
301 }
302 } else {
303 /*
304 * Three cases:
305 * - Null RPC callback to client
306 * - Non-Null RPC callback to client, wait a little longer
307 * - upcalls to nfsuserd and gssd (clp == NULL)
308 */
309 if (callback_retry_mult == 0) {
310 retries = NFSV4_UPCALLRETRY;
311 CLNT_CONTROL(client, CLSET_PRIVPORT, &one);
312 } else {
313 retries = NFSV4_CALLBACKRETRY * callback_retry_mult;
314 }
315 }
316 CLNT_CONTROL(client, CLSET_RETRIES, &retries);
317
318 if (nmp != NULL) {
319 /*
320 * For UDP, there are 2 timeouts:
321 * - CLSET_RETRY_TIMEOUT sets the initial timeout for the timer
322 * that does a retransmit of an RPC request using the same
323 * socket and xid. This is what you normally want to do,
324 * since NFS servers depend on "same xid" for their
325 * Duplicate Request Cache.
326 * - timeout specified in CLNT_CALL_MBUF(), which specifies when
327 * retransmits on the same socket should fail and a fresh
328 * socket created. Each of these timeouts counts as one
329 * CLSET_RETRIES as set above.
330 * Set the initial retransmit timeout for UDP. This timeout
331 * doesn't exist for TCP and the following call just fails,
332 * which is ok.
333 */
334 timo.tv_sec = nmp->nm_timeo / NFS_HZ;
335 timo.tv_usec = (nmp->nm_timeo % NFS_HZ) * 1000000 / NFS_HZ;
336 CLNT_CONTROL(client, CLSET_RETRY_TIMEOUT, &timo);
337 }
338
339 mtx_lock(&nrp->nr_mtx);
340 if (nrp->nr_client != NULL) {
341 mtx_unlock(&nrp->nr_mtx);
342 /*
343 * Someone else already connected.
344 */
345 CLNT_RELEASE(client);
346 } else {
347 nrp->nr_client = client;
348 /*
349 * Protocols that do not require connections may be optionally
350 * left unconnected for servers that reply from a port other
351 * than NFS_PORT.
352 */
353 if (nmp == NULL || (nmp->nm_flag & NFSMNT_NOCONN) == 0) {
354 mtx_unlock(&nrp->nr_mtx);
355 CLNT_CONTROL(client, CLSET_CONNECT, &one);
356 } else
357 mtx_unlock(&nrp->nr_mtx);
358 }
359
360
361 /* Restore current thread's credentials. */
362 td->td_ucred = origcred;
363
364 out:
365 NFSEXITCODE(error);
366 return (error);
367 }
368
369 /*
370 * NFS disconnect. Clean up and unlink.
371 */
372 void
373 newnfs_disconnect(struct nfssockreq *nrp)
374 {
375 CLIENT *client;
376
377 mtx_lock(&nrp->nr_mtx);
378 if (nrp->nr_client != NULL) {
379 client = nrp->nr_client;
380 nrp->nr_client = NULL;
381 mtx_unlock(&nrp->nr_mtx);
382 rpc_gss_secpurge_call(client);
383 CLNT_CLOSE(client);
384 CLNT_RELEASE(client);
385 } else {
386 mtx_unlock(&nrp->nr_mtx);
387 }
388 }
389
390 static AUTH *
391 nfs_getauth(struct nfssockreq *nrp, int secflavour, char *clnt_principal,
392 char *srv_principal, gss_OID mech_oid, struct ucred *cred)
393 {
394 rpc_gss_service_t svc;
395 AUTH *auth;
396
397 switch (secflavour) {
398 case RPCSEC_GSS_KRB5:
399 case RPCSEC_GSS_KRB5I:
400 case RPCSEC_GSS_KRB5P:
401 if (!mech_oid) {
402 if (!rpc_gss_mech_to_oid_call("kerberosv5", &mech_oid))
403 return (NULL);
404 }
405 if (secflavour == RPCSEC_GSS_KRB5)
406 svc = rpc_gss_svc_none;
407 else if (secflavour == RPCSEC_GSS_KRB5I)
408 svc = rpc_gss_svc_integrity;
409 else
410 svc = rpc_gss_svc_privacy;
411
412 if (clnt_principal == NULL)
413 auth = rpc_gss_secfind_call(nrp->nr_client, cred,
414 srv_principal, mech_oid, svc);
415 else {
416 auth = rpc_gss_seccreate_call(nrp->nr_client, cred,
417 clnt_principal, srv_principal, "kerberosv5",
418 svc, NULL, NULL, NULL);
419 return (auth);
420 }
421 if (auth != NULL)
422 return (auth);
423 /* fallthrough */
424 case AUTH_SYS:
425 default:
426 return (authunix_create(cred));
427
428 }
429 }
430
431 /*
432 * Callback from the RPC code to generate up/down notifications.
433 */
434
435 struct nfs_feedback_arg {
436 struct nfsmount *nf_mount;
437 int nf_lastmsg; /* last tprintf */
438 int nf_tprintfmsg;
439 struct thread *nf_td;
440 };
441
442 static void
443 nfs_feedback(int type, int proc, void *arg)
444 {
445 struct nfs_feedback_arg *nf = (struct nfs_feedback_arg *) arg;
446 struct nfsmount *nmp = nf->nf_mount;
447 time_t now;
448
449 switch (type) {
450 case FEEDBACK_REXMIT2:
451 case FEEDBACK_RECONNECT:
452 now = NFSD_MONOSEC;
453 if (nf->nf_lastmsg + nmp->nm_tprintf_delay < now) {
454 nfs_down(nmp, nf->nf_td,
455 "not responding", 0, NFSSTA_TIMEO);
456 nf->nf_tprintfmsg = TRUE;
457 nf->nf_lastmsg = now;
458 }
459 break;
460
461 case FEEDBACK_OK:
462 nfs_up(nf->nf_mount, nf->nf_td,
463 "is alive again", NFSSTA_TIMEO, nf->nf_tprintfmsg);
464 break;
465 }
466 }
467
468 /*
469 * newnfs_request - goes something like this
470 * - does the rpc by calling the krpc layer
471 * - break down rpc header and return with nfs reply
472 * nb: always frees up nd_mreq mbuf list
473 */
474 int
475 newnfs_request(struct nfsrv_descript *nd, struct nfsmount *nmp,
476 struct nfsclient *clp, struct nfssockreq *nrp, vnode_t vp,
477 struct thread *td, struct ucred *cred, u_int32_t prog, u_int32_t vers,
478 u_char *retsum, int toplevel, u_int64_t *xidp, struct nfsclsession *sep)
479 {
480 u_int32_t retseq, retval, *tl;
481 time_t waituntil;
482 int i = 0, j = 0, opcnt, set_sigset = 0, slot;
483 int trycnt, error = 0, usegssname = 0, secflavour = AUTH_SYS;
484 int freeslot, timeo;
485 u_int16_t procnum;
486 u_int trylater_delay = 1;
487 struct nfs_feedback_arg nf;
488 struct timeval timo;
489 AUTH *auth;
490 struct rpc_callextra ext;
491 enum clnt_stat stat;
492 struct nfsreq *rep = NULL;
493 char *srv_principal = NULL, *clnt_principal = NULL;
494 sigset_t oldset;
495 struct ucred *authcred;
496
497 if (xidp != NULL)
498 *xidp = 0;
499 /* Reject requests while attempting a forced unmount. */
500 if (nmp != NULL && (nmp->nm_mountp->mnt_kern_flag & MNTK_UNMOUNTF)) {
501 m_freem(nd->nd_mreq);
502 return (ESTALE);
503 }
504
505 /*
506 * Set authcred, which is used to acquire RPC credentials to
507 * the cred argument, by default. The crhold() should not be
508 * necessary, but will ensure that some future code change
509 * doesn't result in the credential being free'd prematurely.
510 */
511 authcred = crhold(cred);
512
513 /* For client side interruptible mounts, mask off the signals. */
514 if (nmp != NULL && td != NULL && NFSHASINT(nmp)) {
515 newnfs_set_sigmask(td, &oldset);
516 set_sigset = 1;
517 }
518
519 /*
520 * XXX if not already connected call nfs_connect now. Longer
521 * term, change nfs_mount to call nfs_connect unconditionally
522 * and let clnt_reconnect_create handle reconnects.
523 */
524 if (nrp->nr_client == NULL)
525 newnfs_connect(nmp, nrp, cred, td, 0);
526
527 /*
528 * For a client side mount, nmp is != NULL and clp == NULL. For
529 * server calls (callbacks or upcalls), nmp == NULL.
530 */
531 if (clp != NULL) {
532 NFSLOCKSTATE();
533 if ((clp->lc_flags & LCL_GSS) && nfsrv_gsscallbackson) {
534 secflavour = RPCSEC_GSS_KRB5;
535 if (nd->nd_procnum != NFSPROC_NULL) {
536 if (clp->lc_flags & LCL_GSSINTEGRITY)
537 secflavour = RPCSEC_GSS_KRB5I;
538 else if (clp->lc_flags & LCL_GSSPRIVACY)
539 secflavour = RPCSEC_GSS_KRB5P;
540 }
541 }
542 NFSUNLOCKSTATE();
543 } else if (nmp != NULL && NFSHASKERB(nmp) &&
544 nd->nd_procnum != NFSPROC_NULL) {
545 if (NFSHASALLGSSNAME(nmp) && nmp->nm_krbnamelen > 0)
546 nd->nd_flag |= ND_USEGSSNAME;
547 if ((nd->nd_flag & ND_USEGSSNAME) != 0) {
548 /*
549 * If there is a client side host based credential,
550 * use that, otherwise use the system uid, if set.
551 * The system uid is in the nmp->nm_sockreq.nr_cred
552 * credentials.
553 */
554 if (nmp->nm_krbnamelen > 0) {
555 usegssname = 1;
556 clnt_principal = nmp->nm_krbname;
557 } else if (nmp->nm_uid != (uid_t)-1) {
558 KASSERT(nmp->nm_sockreq.nr_cred != NULL,
559 ("newnfs_request: NULL nr_cred"));
560 crfree(authcred);
561 authcred = crhold(nmp->nm_sockreq.nr_cred);
562 }
563 } else if (nmp->nm_krbnamelen == 0 &&
564 nmp->nm_uid != (uid_t)-1 && cred->cr_uid == (uid_t)0) {
565 /*
566 * If there is no host based principal name and
567 * the system uid is set and this is root, use the
568 * system uid, since root won't have user
569 * credentials in a credentials cache file.
570 * The system uid is in the nmp->nm_sockreq.nr_cred
571 * credentials.
572 */
573 KASSERT(nmp->nm_sockreq.nr_cred != NULL,
574 ("newnfs_request: NULL nr_cred"));
575 crfree(authcred);
576 authcred = crhold(nmp->nm_sockreq.nr_cred);
577 }
578 if (NFSHASINTEGRITY(nmp))
579 secflavour = RPCSEC_GSS_KRB5I;
580 else if (NFSHASPRIVACY(nmp))
581 secflavour = RPCSEC_GSS_KRB5P;
582 else
583 secflavour = RPCSEC_GSS_KRB5;
584 srv_principal = NFSMNT_SRVKRBNAME(nmp);
585 } else if (nmp != NULL && !NFSHASKERB(nmp) &&
586 nd->nd_procnum != NFSPROC_NULL &&
587 (nd->nd_flag & ND_USEGSSNAME) != 0) {
588 /*
589 * Use the uid that did the mount when the RPC is doing
590 * NFSv4 system operations, as indicated by the
591 * ND_USEGSSNAME flag, for the AUTH_SYS case.
592 * The credentials in nm_sockreq.nr_cred were used for the
593 * mount.
594 */
595 KASSERT(nmp->nm_sockreq.nr_cred != NULL,
596 ("newnfs_request: NULL nr_cred"));
597 crfree(authcred);
598 authcred = crhold(nmp->nm_sockreq.nr_cred);
599 }
600
601 if (nmp != NULL) {
602 bzero(&nf, sizeof(struct nfs_feedback_arg));
603 nf.nf_mount = nmp;
604 nf.nf_td = td;
605 nf.nf_lastmsg = NFSD_MONOSEC -
606 ((nmp->nm_tprintf_delay)-(nmp->nm_tprintf_initial_delay));
607 }
608
609 if (nd->nd_procnum == NFSPROC_NULL)
610 auth = authnone_create();
611 else if (usegssname) {
612 /*
613 * For this case, the authenticator is held in the
614 * nfssockreq structure, so don't release the reference count
615 * held on it. --> Don't AUTH_DESTROY() it in this function.
616 */
617 if (nrp->nr_auth == NULL)
618 nrp->nr_auth = nfs_getauth(nrp, secflavour,
619 clnt_principal, srv_principal, NULL, authcred);
620 else
621 rpc_gss_refresh_auth_call(nrp->nr_auth);
622 auth = nrp->nr_auth;
623 } else
624 auth = nfs_getauth(nrp, secflavour, NULL,
625 srv_principal, NULL, authcred);
626 crfree(authcred);
627 if (auth == NULL) {
628 m_freem(nd->nd_mreq);
629 if (set_sigset)
630 newnfs_restore_sigmask(td, &oldset);
631 return (EACCES);
632 }
633 bzero(&ext, sizeof(ext));
634 ext.rc_auth = auth;
635 if (nmp != NULL) {
636 ext.rc_feedback = nfs_feedback;
637 ext.rc_feedback_arg = &nf;
638 }
639
640 procnum = nd->nd_procnum;
641 if ((nd->nd_flag & ND_NFSV4) &&
642 nd->nd_procnum != NFSPROC_NULL &&
643 nd->nd_procnum != NFSV4PROC_CBCOMPOUND)
644 procnum = NFSV4PROC_COMPOUND;
645
646 if (nmp != NULL) {
647 NFSINCRGLOBAL(nfsstatsv1.rpcrequests);
648
649 /* Map the procnum to the old NFSv2 one, as required. */
650 if ((nd->nd_flag & ND_NFSV2) != 0) {
651 if (nd->nd_procnum < NFS_V3NPROCS)
652 procnum = nfsv2_procid[nd->nd_procnum];
653 else
654 procnum = NFSV2PROC_NOOP;
655 }
656
657 /*
658 * Now only used for the R_DONTRECOVER case, but until that is
659 * supported within the krpc code, I need to keep a queue of
660 * outstanding RPCs for nfsv4 client requests.
661 */
662 if ((nd->nd_flag & ND_NFSV4) && procnum == NFSV4PROC_COMPOUND)
663 MALLOC(rep, struct nfsreq *, sizeof(struct nfsreq),
664 M_NFSDREQ, M_WAITOK);
665 #ifdef KDTRACE_HOOKS
666 if (dtrace_nfscl_nfs234_start_probe != NULL) {
667 uint32_t probe_id;
668 int probe_procnum;
669
670 if (nd->nd_flag & ND_NFSV4) {
671 probe_id =
672 nfscl_nfs4_start_probes[nd->nd_procnum];
673 probe_procnum = nd->nd_procnum;
674 } else if (nd->nd_flag & ND_NFSV3) {
675 probe_id = nfscl_nfs3_start_probes[procnum];
676 probe_procnum = procnum;
677 } else {
678 probe_id =
679 nfscl_nfs2_start_probes[nd->nd_procnum];
680 probe_procnum = procnum;
681 }
682 if (probe_id != 0)
683 (dtrace_nfscl_nfs234_start_probe)
684 (probe_id, vp, nd->nd_mreq, cred,
685 probe_procnum);
686 }
687 #endif
688 }
689 trycnt = 0;
690 freeslot = -1; /* Set to slot that needs to be free'd */
691 tryagain:
692 slot = -1; /* Slot that needs a sequence# increment. */
693 /*
694 * This timeout specifies when a new socket should be created,
695 * along with new xid values. For UDP, this should be done
696 * infrequently, since retransmits of RPC requests should normally
697 * use the same xid.
698 */
699 if (nmp == NULL) {
700 timo.tv_usec = 0;
701 if (clp == NULL)
702 timo.tv_sec = NFSV4_UPCALLTIMEO;
703 else
704 timo.tv_sec = NFSV4_CALLBACKTIMEO;
705 } else {
706 if (nrp->nr_sotype != SOCK_DGRAM) {
707 timo.tv_usec = 0;
708 if ((nmp->nm_flag & NFSMNT_NFSV4))
709 timo.tv_sec = INT_MAX;
710 else
711 timo.tv_sec = NFS_TCPTIMEO;
712 } else {
713 if (NFSHASSOFT(nmp)) {
714 /*
715 * CLSET_RETRIES is set to 2, so this should be
716 * half of the total timeout required.
717 */
718 timeo = nmp->nm_retry * nmp->nm_timeo / 2;
719 if (timeo < 1)
720 timeo = 1;
721 timo.tv_sec = timeo / NFS_HZ;
722 timo.tv_usec = (timeo % NFS_HZ) * 1000000 /
723 NFS_HZ;
724 } else {
725 /* For UDP hard mounts, use a large value. */
726 timo.tv_sec = NFS_MAXTIMEO / NFS_HZ;
727 timo.tv_usec = 0;
728 }
729 }
730
731 if (rep != NULL) {
732 rep->r_flags = 0;
733 rep->r_nmp = nmp;
734 /*
735 * Chain request into list of outstanding requests.
736 */
737 NFSLOCKREQ();
738 TAILQ_INSERT_TAIL(&nfsd_reqq, rep, r_chain);
739 NFSUNLOCKREQ();
740 }
741 }
742
743 nd->nd_mrep = NULL;
744 if (clp != NULL && sep != NULL)
745 stat = clnt_bck_call(nrp->nr_client, &ext, procnum,
746 nd->nd_mreq, &nd->nd_mrep, timo, sep->nfsess_xprt);
747 else
748 stat = CLNT_CALL_MBUF(nrp->nr_client, &ext, procnum,
749 nd->nd_mreq, &nd->nd_mrep, timo);
750
751 if (rep != NULL) {
752 /*
753 * RPC done, unlink the request.
754 */
755 NFSLOCKREQ();
756 TAILQ_REMOVE(&nfsd_reqq, rep, r_chain);
757 NFSUNLOCKREQ();
758 }
759
760 /*
761 * If there was a successful reply and a tprintf msg.
762 * tprintf a response.
763 */
764 if (stat == RPC_SUCCESS) {
765 error = 0;
766 } else if (stat == RPC_TIMEDOUT) {
767 NFSINCRGLOBAL(nfsstatsv1.rpctimeouts);
768 error = ETIMEDOUT;
769 } else if (stat == RPC_VERSMISMATCH) {
770 NFSINCRGLOBAL(nfsstatsv1.rpcinvalid);
771 error = EOPNOTSUPP;
772 } else if (stat == RPC_PROGVERSMISMATCH) {
773 NFSINCRGLOBAL(nfsstatsv1.rpcinvalid);
774 error = EPROTONOSUPPORT;
775 } else if (stat == RPC_INTR) {
776 error = EINTR;
777 } else {
778 NFSINCRGLOBAL(nfsstatsv1.rpcinvalid);
779 error = EACCES;
780 }
781 if (error) {
782 m_freem(nd->nd_mreq);
783 if (usegssname == 0)
784 AUTH_DESTROY(auth);
785 if (rep != NULL)
786 FREE((caddr_t)rep, M_NFSDREQ);
787 if (set_sigset)
788 newnfs_restore_sigmask(td, &oldset);
789 return (error);
790 }
791
792 KASSERT(nd->nd_mrep != NULL, ("mrep shouldn't be NULL if no error\n"));
793
794 /*
795 * Search for any mbufs that are not a multiple of 4 bytes long
796 * or with m_data not longword aligned.
797 * These could cause pointer alignment problems, so copy them to
798 * well aligned mbufs.
799 */
800 newnfs_realign(&nd->nd_mrep, M_WAITOK);
801 nd->nd_md = nd->nd_mrep;
802 nd->nd_dpos = NFSMTOD(nd->nd_md, caddr_t);
803 nd->nd_repstat = 0;
804 if (nd->nd_procnum != NFSPROC_NULL &&
805 nd->nd_procnum != NFSV4PROC_CBNULL) {
806 /* If sep == NULL, set it to the default in nmp. */
807 if (sep == NULL && nmp != NULL)
808 sep = NFSMNT_MDSSESSION(nmp);
809 /*
810 * and now the actual NFS xdr.
811 */
812 NFSM_DISSECT(tl, u_int32_t *, NFSX_UNSIGNED);
813 nd->nd_repstat = fxdr_unsigned(u_int32_t, *tl);
814 if (nd->nd_repstat >= 10000)
815 NFSCL_DEBUG(1, "proc=%d reps=%d\n", (int)nd->nd_procnum,
816 (int)nd->nd_repstat);
817
818 /*
819 * Get rid of the tag, return count and SEQUENCE result for
820 * NFSv4.
821 */
822 if ((nd->nd_flag & ND_NFSV4) != 0) {
823 NFSM_DISSECT(tl, u_int32_t *, NFSX_UNSIGNED);
824 i = fxdr_unsigned(int, *tl);
825 error = nfsm_advance(nd, NFSM_RNDUP(i), -1);
826 if (error)
827 goto nfsmout;
828 NFSM_DISSECT(tl, u_int32_t *, 3 * NFSX_UNSIGNED);
829 opcnt = fxdr_unsigned(int, *tl++);
830 i = fxdr_unsigned(int, *tl++);
831 j = fxdr_unsigned(int, *tl);
832 if (j >= 10000)
833 NFSCL_DEBUG(1, "fop=%d fst=%d\n", i, j);
834 /*
835 * If the first op is Sequence, free up the slot.
836 */
837 if ((nmp != NULL && i == NFSV4OP_SEQUENCE && j != 0) ||
838 (clp != NULL && i == NFSV4OP_CBSEQUENCE && j != 0))
839 NFSCL_DEBUG(1, "failed seq=%d\n", j);
840 if ((nmp != NULL && i == NFSV4OP_SEQUENCE && j == 0) ||
841 (clp != NULL && i == NFSV4OP_CBSEQUENCE && j == 0)
842 ) {
843 if (i == NFSV4OP_SEQUENCE)
844 NFSM_DISSECT(tl, uint32_t *,
845 NFSX_V4SESSIONID +
846 5 * NFSX_UNSIGNED);
847 else
848 NFSM_DISSECT(tl, uint32_t *,
849 NFSX_V4SESSIONID +
850 4 * NFSX_UNSIGNED);
851 mtx_lock(&sep->nfsess_mtx);
852 tl += NFSX_V4SESSIONID / NFSX_UNSIGNED;
853 retseq = fxdr_unsigned(uint32_t, *tl++);
854 slot = fxdr_unsigned(int, *tl++);
855 freeslot = slot;
856 if (retseq != sep->nfsess_slotseq[slot])
857 printf("retseq diff 0x%x\n", retseq);
858 retval = fxdr_unsigned(uint32_t, *++tl);
859 if ((retval + 1) < sep->nfsess_foreslots)
860 sep->nfsess_foreslots = (retval + 1);
861 else if ((retval + 1) > sep->nfsess_foreslots)
862 sep->nfsess_foreslots = (retval < 64) ?
863 (retval + 1) : 64;
864 mtx_unlock(&sep->nfsess_mtx);
865
866 /* Grab the op and status for the next one. */
867 if (opcnt > 1) {
868 NFSM_DISSECT(tl, uint32_t *,
869 2 * NFSX_UNSIGNED);
870 i = fxdr_unsigned(int, *tl++);
871 j = fxdr_unsigned(int, *tl);
872 }
873 }
874 }
875 if (nd->nd_repstat != 0) {
876 if (((nd->nd_repstat == NFSERR_DELAY ||
877 nd->nd_repstat == NFSERR_GRACE) &&
878 (nd->nd_flag & ND_NFSV4) &&
879 nd->nd_procnum != NFSPROC_DELEGRETURN &&
880 nd->nd_procnum != NFSPROC_SETATTR &&
881 nd->nd_procnum != NFSPROC_READ &&
882 nd->nd_procnum != NFSPROC_READDS &&
883 nd->nd_procnum != NFSPROC_WRITE &&
884 nd->nd_procnum != NFSPROC_WRITEDS &&
885 nd->nd_procnum != NFSPROC_OPEN &&
886 nd->nd_procnum != NFSPROC_CREATE &&
887 nd->nd_procnum != NFSPROC_OPENCONFIRM &&
888 nd->nd_procnum != NFSPROC_OPENDOWNGRADE &&
889 nd->nd_procnum != NFSPROC_CLOSE &&
890 nd->nd_procnum != NFSPROC_LOCK &&
891 nd->nd_procnum != NFSPROC_LOCKU) ||
892 (nd->nd_repstat == NFSERR_DELAY &&
893 (nd->nd_flag & ND_NFSV4) == 0) ||
894 nd->nd_repstat == NFSERR_RESOURCE) {
895 if (trylater_delay > NFS_TRYLATERDEL)
896 trylater_delay = NFS_TRYLATERDEL;
897 waituntil = NFSD_MONOSEC + trylater_delay;
898 while (NFSD_MONOSEC < waituntil)
899 (void) nfs_catnap(PZERO, 0, "nfstry");
900 trylater_delay *= 2;
901 if (slot != -1) {
902 mtx_lock(&sep->nfsess_mtx);
903 sep->nfsess_slotseq[slot]++;
904 *nd->nd_slotseq = txdr_unsigned(
905 sep->nfsess_slotseq[slot]);
906 mtx_unlock(&sep->nfsess_mtx);
907 }
908 m_freem(nd->nd_mrep);
909 nd->nd_mrep = NULL;
910 goto tryagain;
911 }
912
913 /*
914 * If the File Handle was stale, invalidate the
915 * lookup cache, just in case.
916 * (vp != NULL implies a client side call)
917 */
918 if (nd->nd_repstat == ESTALE && vp != NULL) {
919 cache_purge(vp);
920 if (ncl_call_invalcaches != NULL)
921 (*ncl_call_invalcaches)(vp);
922 }
923 }
924 if ((nd->nd_flag & ND_NFSV4) != 0) {
925 /* Free the slot, as required. */
926 if (freeslot != -1)
927 nfsv4_freeslot(sep, freeslot);
928 /*
929 * If this op is Putfh, throw its results away.
930 */
931 if (j >= 10000)
932 NFSCL_DEBUG(1, "nop=%d nst=%d\n", i, j);
933 if (nmp != NULL && i == NFSV4OP_PUTFH && j == 0) {
934 NFSM_DISSECT(tl,u_int32_t *,2 * NFSX_UNSIGNED);
935 i = fxdr_unsigned(int, *tl++);
936 j = fxdr_unsigned(int, *tl);
937 if (j >= 10000)
938 NFSCL_DEBUG(1, "n2op=%d n2st=%d\n", i,
939 j);
940 /*
941 * All Compounds that do an Op that must
942 * be in sequence consist of NFSV4OP_PUTFH
943 * followed by one of these. As such, we
944 * can determine if the seqid# should be
945 * incremented, here.
946 */
947 if ((i == NFSV4OP_OPEN ||
948 i == NFSV4OP_OPENCONFIRM ||
949 i == NFSV4OP_OPENDOWNGRADE ||
950 i == NFSV4OP_CLOSE ||
951 i == NFSV4OP_LOCK ||
952 i == NFSV4OP_LOCKU) &&
953 (j == 0 ||
954 (j != NFSERR_STALECLIENTID &&
955 j != NFSERR_STALESTATEID &&
956 j != NFSERR_BADSTATEID &&
957 j != NFSERR_BADSEQID &&
958 j != NFSERR_BADXDR &&
959 j != NFSERR_RESOURCE &&
960 j != NFSERR_NOFILEHANDLE)))
961 nd->nd_flag |= ND_INCRSEQID;
962 }
963 /*
964 * If this op's status is non-zero, mark
965 * that there is no more data to process.
966 */
967 if (j)
968 nd->nd_flag |= ND_NOMOREDATA;
969
970 /*
971 * If R_DONTRECOVER is set, replace the stale error
972 * reply, so that recovery isn't initiated.
973 */
974 if ((nd->nd_repstat == NFSERR_STALECLIENTID ||
975 nd->nd_repstat == NFSERR_BADSESSION ||
976 nd->nd_repstat == NFSERR_STALESTATEID) &&
977 rep != NULL && (rep->r_flags & R_DONTRECOVER))
978 nd->nd_repstat = NFSERR_STALEDONTRECOVER;
979 }
980 }
981
982 #ifdef KDTRACE_HOOKS
983 if (nmp != NULL && dtrace_nfscl_nfs234_done_probe != NULL) {
984 uint32_t probe_id;
985 int probe_procnum;
986
987 if (nd->nd_flag & ND_NFSV4) {
988 probe_id = nfscl_nfs4_done_probes[nd->nd_procnum];
989 probe_procnum = nd->nd_procnum;
990 } else if (nd->nd_flag & ND_NFSV3) {
991 probe_id = nfscl_nfs3_done_probes[procnum];
992 probe_procnum = procnum;
993 } else {
994 probe_id = nfscl_nfs2_done_probes[nd->nd_procnum];
995 probe_procnum = procnum;
996 }
997 if (probe_id != 0)
998 (dtrace_nfscl_nfs234_done_probe)(probe_id, vp,
999 nd->nd_mreq, cred, probe_procnum, 0);
1000 }
1001 #endif
1002
1003 m_freem(nd->nd_mreq);
1004 if (usegssname == 0)
1005 AUTH_DESTROY(auth);
1006 if (rep != NULL)
1007 FREE((caddr_t)rep, M_NFSDREQ);
1008 if (set_sigset)
1009 newnfs_restore_sigmask(td, &oldset);
1010 return (0);
1011 nfsmout:
1012 mbuf_freem(nd->nd_mrep);
1013 mbuf_freem(nd->nd_mreq);
1014 if (usegssname == 0)
1015 AUTH_DESTROY(auth);
1016 if (rep != NULL)
1017 FREE((caddr_t)rep, M_NFSDREQ);
1018 if (set_sigset)
1019 newnfs_restore_sigmask(td, &oldset);
1020 return (error);
1021 }
1022
1023 /*
1024 * Mark all of an nfs mount's outstanding requests with R_SOFTTERM and
1025 * wait for all requests to complete. This is used by forced unmounts
1026 * to terminate any outstanding RPCs.
1027 */
1028 int
1029 newnfs_nmcancelreqs(struct nfsmount *nmp)
1030 {
1031
1032 if (nmp->nm_sockreq.nr_client != NULL)
1033 CLNT_CLOSE(nmp->nm_sockreq.nr_client);
1034 return (0);
1035 }
1036
1037 /*
1038 * Any signal that can interrupt an NFS operation in an intr mount
1039 * should be added to this set. SIGSTOP and SIGKILL cannot be masked.
1040 */
1041 int newnfs_sig_set[] = {
1042 SIGINT,
1043 SIGTERM,
1044 SIGHUP,
1045 SIGKILL,
1046 SIGQUIT
1047 };
1048
1049 /*
1050 * Check to see if one of the signals in our subset is pending on
1051 * the process (in an intr mount).
1052 */
1053 static int
1054 nfs_sig_pending(sigset_t set)
1055 {
1056 int i;
1057
1058 for (i = 0 ; i < nitems(newnfs_sig_set); i++)
1059 if (SIGISMEMBER(set, newnfs_sig_set[i]))
1060 return (1);
1061 return (0);
1062 }
1063
1064 /*
1065 * The set/restore sigmask functions are used to (temporarily) overwrite
1066 * the thread td_sigmask during an RPC call (for example). These are also
1067 * used in other places in the NFS client that might tsleep().
1068 */
1069 void
1070 newnfs_set_sigmask(struct thread *td, sigset_t *oldset)
1071 {
1072 sigset_t newset;
1073 int i;
1074 struct proc *p;
1075
1076 SIGFILLSET(newset);
1077 if (td == NULL)
1078 td = curthread; /* XXX */
1079 p = td->td_proc;
1080 /* Remove the NFS set of signals from newset */
1081 PROC_LOCK(p);
1082 mtx_lock(&p->p_sigacts->ps_mtx);
1083 for (i = 0 ; i < nitems(newnfs_sig_set); i++) {
1084 /*
1085 * But make sure we leave the ones already masked
1086 * by the process, ie. remove the signal from the
1087 * temporary signalmask only if it wasn't already
1088 * in p_sigmask.
1089 */
1090 if (!SIGISMEMBER(td->td_sigmask, newnfs_sig_set[i]) &&
1091 !SIGISMEMBER(p->p_sigacts->ps_sigignore, newnfs_sig_set[i]))
1092 SIGDELSET(newset, newnfs_sig_set[i]);
1093 }
1094 mtx_unlock(&p->p_sigacts->ps_mtx);
1095 kern_sigprocmask(td, SIG_SETMASK, &newset, oldset,
1096 SIGPROCMASK_PROC_LOCKED);
1097 PROC_UNLOCK(p);
1098 }
1099
1100 void
1101 newnfs_restore_sigmask(struct thread *td, sigset_t *set)
1102 {
1103 if (td == NULL)
1104 td = curthread; /* XXX */
1105 kern_sigprocmask(td, SIG_SETMASK, set, NULL, 0);
1106 }
1107
1108 /*
1109 * NFS wrapper to msleep(), that shoves a new p_sigmask and restores the
1110 * old one after msleep() returns.
1111 */
1112 int
1113 newnfs_msleep(struct thread *td, void *ident, struct mtx *mtx, int priority, char *wmesg, int timo)
1114 {
1115 sigset_t oldset;
1116 int error;
1117 struct proc *p;
1118
1119 if ((priority & PCATCH) == 0)
1120 return msleep(ident, mtx, priority, wmesg, timo);
1121 if (td == NULL)
1122 td = curthread; /* XXX */
1123 newnfs_set_sigmask(td, &oldset);
1124 error = msleep(ident, mtx, priority, wmesg, timo);
1125 newnfs_restore_sigmask(td, &oldset);
1126 p = td->td_proc;
1127 return (error);
1128 }
1129
1130 /*
1131 * Test for a termination condition pending on the process.
1132 * This is used for NFSMNT_INT mounts.
1133 */
1134 int
1135 newnfs_sigintr(struct nfsmount *nmp, struct thread *td)
1136 {
1137 struct proc *p;
1138 sigset_t tmpset;
1139
1140 /* Terminate all requests while attempting a forced unmount. */
1141 if (nmp->nm_mountp->mnt_kern_flag & MNTK_UNMOUNTF)
1142 return (EIO);
1143 if (!(nmp->nm_flag & NFSMNT_INT))
1144 return (0);
1145 if (td == NULL)
1146 return (0);
1147 p = td->td_proc;
1148 PROC_LOCK(p);
1149 tmpset = p->p_siglist;
1150 SIGSETOR(tmpset, td->td_siglist);
1151 SIGSETNAND(tmpset, td->td_sigmask);
1152 mtx_lock(&p->p_sigacts->ps_mtx);
1153 SIGSETNAND(tmpset, p->p_sigacts->ps_sigignore);
1154 mtx_unlock(&p->p_sigacts->ps_mtx);
1155 if ((SIGNOTEMPTY(p->p_siglist) || SIGNOTEMPTY(td->td_siglist))
1156 && nfs_sig_pending(tmpset)) {
1157 PROC_UNLOCK(p);
1158 return (EINTR);
1159 }
1160 PROC_UNLOCK(p);
1161 return (0);
1162 }
1163
1164 static int
1165 nfs_msg(struct thread *td, const char *server, const char *msg, int error)
1166 {
1167 struct proc *p;
1168
1169 p = td ? td->td_proc : NULL;
1170 if (error) {
1171 tprintf(p, LOG_INFO, "nfs server %s: %s, error %d\n",
1172 server, msg, error);
1173 } else {
1174 tprintf(p, LOG_INFO, "nfs server %s: %s\n", server, msg);
1175 }
1176 return (0);
1177 }
1178
1179 static void
1180 nfs_down(struct nfsmount *nmp, struct thread *td, const char *msg,
1181 int error, int flags)
1182 {
1183 if (nmp == NULL)
1184 return;
1185 mtx_lock(&nmp->nm_mtx);
1186 if ((flags & NFSSTA_TIMEO) && !(nmp->nm_state & NFSSTA_TIMEO)) {
1187 nmp->nm_state |= NFSSTA_TIMEO;
1188 mtx_unlock(&nmp->nm_mtx);
1189 vfs_event_signal(&nmp->nm_mountp->mnt_stat.f_fsid,
1190 VQ_NOTRESP, 0);
1191 } else
1192 mtx_unlock(&nmp->nm_mtx);
1193 mtx_lock(&nmp->nm_mtx);
1194 if ((flags & NFSSTA_LOCKTIMEO) && !(nmp->nm_state & NFSSTA_LOCKTIMEO)) {
1195 nmp->nm_state |= NFSSTA_LOCKTIMEO;
1196 mtx_unlock(&nmp->nm_mtx);
1197 vfs_event_signal(&nmp->nm_mountp->mnt_stat.f_fsid,
1198 VQ_NOTRESPLOCK, 0);
1199 } else
1200 mtx_unlock(&nmp->nm_mtx);
1201 nfs_msg(td, nmp->nm_mountp->mnt_stat.f_mntfromname, msg, error);
1202 }
1203
1204 static void
1205 nfs_up(struct nfsmount *nmp, struct thread *td, const char *msg,
1206 int flags, int tprintfmsg)
1207 {
1208 if (nmp == NULL)
1209 return;
1210 if (tprintfmsg) {
1211 nfs_msg(td, nmp->nm_mountp->mnt_stat.f_mntfromname, msg, 0);
1212 }
1213
1214 mtx_lock(&nmp->nm_mtx);
1215 if ((flags & NFSSTA_TIMEO) && (nmp->nm_state & NFSSTA_TIMEO)) {
1216 nmp->nm_state &= ~NFSSTA_TIMEO;
1217 mtx_unlock(&nmp->nm_mtx);
1218 vfs_event_signal(&nmp->nm_mountp->mnt_stat.f_fsid,
1219 VQ_NOTRESP, 1);
1220 } else
1221 mtx_unlock(&nmp->nm_mtx);
1222
1223 mtx_lock(&nmp->nm_mtx);
1224 if ((flags & NFSSTA_LOCKTIMEO) && (nmp->nm_state & NFSSTA_LOCKTIMEO)) {
1225 nmp->nm_state &= ~NFSSTA_LOCKTIMEO;
1226 mtx_unlock(&nmp->nm_mtx);
1227 vfs_event_signal(&nmp->nm_mountp->mnt_stat.f_fsid,
1228 VQ_NOTRESPLOCK, 1);
1229 } else
1230 mtx_unlock(&nmp->nm_mtx);
1231 }
1232
1233