nfs_syscalls.c revision 1.130.6.1 1 /* $NetBSD: nfs_syscalls.c,v 1.130.6.1 2008/04/03 12:43:10 mjf Exp $ */
2
3 /*
4 * Copyright (c) 1989, 1993
5 * The Regents of the University of California. All rights reserved.
6 *
7 * This code is derived from software contributed to Berkeley by
8 * Rick Macklem at The University of Guelph.
9 *
10 * Redistribution and use in source and binary forms, with or without
11 * modification, are permitted provided that the following conditions
12 * are met:
13 * 1. Redistributions of source code must retain the above copyright
14 * notice, this list of conditions and the following disclaimer.
15 * 2. Redistributions in binary form must reproduce the above copyright
16 * notice, this list of conditions and the following disclaimer in the
17 * documentation and/or other materials provided with the distribution.
18 * 3. Neither the name of the University nor the names of its contributors
19 * may be used to endorse or promote products derived from this software
20 * without specific prior written permission.
21 *
22 * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND
23 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
24 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
25 * ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE
26 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
27 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
28 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
29 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
30 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
31 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
32 * SUCH DAMAGE.
33 *
34 * @(#)nfs_syscalls.c 8.5 (Berkeley) 3/30/95
35 */
36
37 #include <sys/cdefs.h>
38 __KERNEL_RCSID(0, "$NetBSD: nfs_syscalls.c,v 1.130.6.1 2008/04/03 12:43:10 mjf Exp $");
39
40 #include "fs_nfs.h"
41 #include "opt_nfs.h"
42 #include "opt_nfsserver.h"
43 #include "opt_iso.h"
44 #include "opt_inet.h"
45 #include "opt_compat_netbsd.h"
46
47 #include <sys/param.h>
48 #include <sys/systm.h>
49 #include <sys/kernel.h>
50 #include <sys/file.h>
51 #include <sys/stat.h>
52 #include <sys/vnode.h>
53 #include <sys/mount.h>
54 #include <sys/proc.h>
55 #include <sys/uio.h>
56 #include <sys/malloc.h>
57 #include <sys/kmem.h>
58 #include <sys/buf.h>
59 #include <sys/mbuf.h>
60 #include <sys/socket.h>
61 #include <sys/socketvar.h>
62 #include <sys/signalvar.h>
63 #include <sys/domain.h>
64 #include <sys/protosw.h>
65 #include <sys/namei.h>
66 #include <sys/syslog.h>
67 #include <sys/filedesc.h>
68 #include <sys/kthread.h>
69 #include <sys/kauth.h>
70 #include <sys/syscallargs.h>
71
72 #include <netinet/in.h>
73 #include <netinet/tcp.h>
74 #ifdef ISO
75 #include <netiso/iso.h>
76 #endif
77 #include <nfs/xdr_subs.h>
78 #include <nfs/rpcv2.h>
79 #include <nfs/nfsproto.h>
80 #include <nfs/nfs.h>
81 #include <nfs/nfsm_subs.h>
82 #include <nfs/nfsrvcache.h>
83 #include <nfs/nfsmount.h>
84 #include <nfs/nfsnode.h>
85 #include <nfs/nfsrtt.h>
86 #include <nfs/nfs_var.h>
87
88 /* Global defs. */
89 extern int32_t (*nfsrv3_procs[NFS_NPROCS]) __P((struct nfsrv_descript *,
90 struct nfssvc_sock *,
91 struct lwp *, struct mbuf **));
92 extern int nfsrvw_procrastinate;
93
94 struct nfssvc_sock *nfs_udpsock;
95 #ifdef ISO
96 struct nfssvc_sock *nfs_cltpsock;
97 #endif
98 #ifdef INET6
99 struct nfssvc_sock *nfs_udp6sock;
100 #endif
101 int nuidhash_max = NFS_MAXUIDHASH;
102 #ifdef NFSSERVER
103 static int nfs_numnfsd = 0;
104 static struct nfsdrt nfsdrt;
105 #endif
106
107 #ifdef NFSSERVER
108 kmutex_t nfsd_lock;
109 struct nfssvc_sockhead nfssvc_sockhead;
110 kcondvar_t nfsd_initcv;
111 struct nfssvc_sockhead nfssvc_sockpending;
112 struct nfsdhead nfsd_head;
113 struct nfsdidlehead nfsd_idle_head;
114
115 int nfssvc_sockhead_flag;
116 int nfsd_head_flag;
117 #endif
118
119 #ifdef NFS
120 /*
121 * locking order:
122 * nfs_iodlist_lock -> nid_lock -> nm_lock
123 */
124 kmutex_t nfs_iodlist_lock;
125 struct nfs_iodlist nfs_iodlist_idle;
126 struct nfs_iodlist nfs_iodlist_all;
127 int nfs_niothreads = -1; /* == "0, and has never been set" */
128 #endif
129
130 #ifdef NFSSERVER
131 static struct nfssvc_sock *nfsrv_sockalloc __P((void));
132 static void nfsrv_sockfree __P((struct nfssvc_sock *));
133 static void nfsd_rt __P((int, struct nfsrv_descript *, int));
134 #endif
135
136 /*
137 * NFS server system calls
138 */
139
140
141 /*
142 * Nfs server pseudo system call for the nfsd's
143 * Based on the flag value it either:
144 * - adds a socket to the selection list
145 * - remains in the kernel as an nfsd
146 * - remains in the kernel as an nfsiod
147 */
148 int
149 sys_nfssvc(struct lwp *l, const struct sys_nfssvc_args *uap, register_t *retval)
150 {
151 /* {
152 syscallarg(int) flag;
153 syscallarg(void *) argp;
154 } */
155 int error;
156 #ifdef NFSSERVER
157 file_t *fp;
158 struct mbuf *nam;
159 struct nfsd_args nfsdarg;
160 struct nfsd_srvargs nfsd_srvargs, *nsd = &nfsd_srvargs;
161 struct nfsd *nfsd;
162 struct nfssvc_sock *slp;
163 struct nfsuid *nuidp;
164 #endif
165
166 /*
167 * Must be super user
168 */
169 error = kauth_authorize_network(l->l_cred, KAUTH_NETWORK_NFS,
170 KAUTH_REQ_NETWORK_NFS_SVC, NULL, NULL, NULL);
171 if (error)
172 return (error);
173
174 /* Initialize NFS server / client shared data. */
175 nfs_init();
176
177 #ifdef NFSSERVER
178 mutex_enter(&nfsd_lock);
179 while (nfssvc_sockhead_flag & SLP_INIT) {
180 cv_wait(&nfsd_initcv, &nfsd_lock);
181 }
182 mutex_exit(&nfsd_lock);
183 #endif
184 if (SCARG(uap, flag) & NFSSVC_BIOD) {
185 #if defined(NFS) && defined(COMPAT_14)
186 error = kpause("nfsbiod", true, 0, NULL); /* dummy impl */
187 #else
188 error = ENOSYS;
189 #endif
190 } else if (SCARG(uap, flag) & NFSSVC_MNTD) {
191 error = ENOSYS;
192 } else if (SCARG(uap, flag) & NFSSVC_ADDSOCK) {
193 #ifndef NFSSERVER
194 error = ENOSYS;
195 #else
196 error = copyin(SCARG(uap, argp), (void *)&nfsdarg,
197 sizeof(nfsdarg));
198 if (error)
199 return (error);
200 /* getsock() will use the descriptor for us */
201 error = getsock(nfsdarg.sock, &fp);
202 if (error)
203 return (error);
204 /*
205 * Get the client address for connected sockets.
206 */
207 if (nfsdarg.name == NULL || nfsdarg.namelen == 0)
208 nam = (struct mbuf *)0;
209 else {
210 error = sockargs(&nam, nfsdarg.name, nfsdarg.namelen,
211 MT_SONAME);
212 if (error) {
213 fd_putfile(nfsdarg.sock);
214 return (error);
215 }
216 }
217 error = nfssvc_addsock(fp, nam);
218 fd_putfile(nfsdarg.sock);
219 #endif /* !NFSSERVER */
220 } else if (SCARG(uap, flag) & NFSSVC_SETEXPORTSLIST) {
221 #ifndef NFSSERVER
222 error = ENOSYS;
223 #else
224 struct export_args *args;
225 struct mountd_exports_list mel;
226
227 error = copyin(SCARG(uap, argp), &mel, sizeof(mel));
228 if (error != 0)
229 return error;
230
231 args = (struct export_args *)malloc(mel.mel_nexports *
232 sizeof(struct export_args), M_TEMP, M_WAITOK);
233 error = copyin(mel.mel_exports, args, mel.mel_nexports *
234 sizeof(struct export_args));
235 if (error != 0) {
236 free(args, M_TEMP);
237 return error;
238 }
239 mel.mel_exports = args;
240
241 error = mountd_set_exports_list(&mel, l);
242
243 free(args, M_TEMP);
244 #endif /* !NFSSERVER */
245 } else {
246 #ifndef NFSSERVER
247 error = ENOSYS;
248 #else
249 error = copyin(SCARG(uap, argp), (void *)nsd, sizeof (*nsd));
250 if (error)
251 return (error);
252 if ((SCARG(uap, flag) & NFSSVC_AUTHIN) &&
253 ((nfsd = nsd->nsd_nfsd)) != NULL &&
254 (nfsd->nfsd_slp->ns_flags & SLP_VALID)) {
255 slp = nfsd->nfsd_slp;
256
257 /*
258 * First check to see if another nfsd has already
259 * added this credential.
260 */
261 LIST_FOREACH(nuidp, NUIDHASH(slp, nsd->nsd_cr.cr_uid),
262 nu_hash) {
263 if (kauth_cred_geteuid(nuidp->nu_cr) ==
264 nsd->nsd_cr.cr_uid &&
265 (!nfsd->nfsd_nd->nd_nam2 ||
266 netaddr_match(NU_NETFAM(nuidp),
267 &nuidp->nu_haddr, nfsd->nfsd_nd->nd_nam2)))
268 break;
269 }
270 if (nuidp) {
271 kauth_cred_hold(nuidp->nu_cr);
272 nfsd->nfsd_nd->nd_cr = nuidp->nu_cr;
273 nfsd->nfsd_nd->nd_flag |= ND_KERBFULL;
274 } else {
275 /*
276 * Nope, so we will.
277 */
278 if (slp->ns_numuids < nuidhash_max) {
279 slp->ns_numuids++;
280 nuidp = kmem_alloc(sizeof(*nuidp), KM_SLEEP);
281 } else
282 nuidp = (struct nfsuid *)0;
283 if ((slp->ns_flags & SLP_VALID) == 0) {
284 if (nuidp)
285 kmem_free(nuidp, sizeof(*nuidp));
286 } else {
287 if (nuidp == (struct nfsuid *)0) {
288 nuidp = TAILQ_FIRST(&slp->ns_uidlruhead);
289 LIST_REMOVE(nuidp, nu_hash);
290 TAILQ_REMOVE(&slp->ns_uidlruhead, nuidp,
291 nu_lru);
292 if (nuidp->nu_flag & NU_NAM)
293 m_freem(nuidp->nu_nam);
294 }
295 nuidp->nu_flag = 0;
296 kauth_uucred_to_cred(nuidp->nu_cr,
297 &nsd->nsd_cr);
298 nuidp->nu_timestamp = nsd->nsd_timestamp;
299 nuidp->nu_expire = time_second + nsd->nsd_ttl;
300 /*
301 * and save the session key in nu_key.
302 */
303 memcpy(nuidp->nu_key, nsd->nsd_key,
304 sizeof(nsd->nsd_key));
305 if (nfsd->nfsd_nd->nd_nam2) {
306 struct sockaddr_in *saddr;
307
308 saddr = mtod(nfsd->nfsd_nd->nd_nam2,
309 struct sockaddr_in *);
310 switch (saddr->sin_family) {
311 case AF_INET:
312 nuidp->nu_flag |= NU_INETADDR;
313 nuidp->nu_inetaddr =
314 saddr->sin_addr.s_addr;
315 break;
316 case AF_ISO:
317 default:
318 nuidp->nu_flag |= NU_NAM;
319 nuidp->nu_nam = m_copym(
320 nfsd->nfsd_nd->nd_nam2, 0,
321 M_COPYALL, M_WAIT);
322 break;
323 };
324 }
325 TAILQ_INSERT_TAIL(&slp->ns_uidlruhead, nuidp,
326 nu_lru);
327 LIST_INSERT_HEAD(NUIDHASH(slp, nsd->nsd_uid),
328 nuidp, nu_hash);
329 kauth_cred_hold(nuidp->nu_cr);
330 nfsd->nfsd_nd->nd_cr = nuidp->nu_cr;
331 nfsd->nfsd_nd->nd_flag |= ND_KERBFULL;
332 }
333 }
334 }
335 if ((SCARG(uap, flag) & NFSSVC_AUTHINFAIL) &&
336 (nfsd = nsd->nsd_nfsd))
337 nfsd->nfsd_flag |= NFSD_AUTHFAIL;
338 error = nfssvc_nfsd(nsd, SCARG(uap, argp), l);
339 #endif /* !NFSSERVER */
340 }
341 if (error == EINTR || error == ERESTART)
342 error = 0;
343 return (error);
344 }
345
346 #ifdef NFSSERVER
347 MALLOC_DEFINE(M_NFSD, "NFS daemon", "Nfs server daemon structure");
348
349 static struct nfssvc_sock *
350 nfsrv_sockalloc()
351 {
352 struct nfssvc_sock *slp;
353
354 slp = kmem_alloc(sizeof(*slp), KM_SLEEP);
355 memset(slp, 0, sizeof (struct nfssvc_sock));
356 /* XXX could be IPL_SOFTNET */
357 mutex_init(&slp->ns_lock, MUTEX_DRIVER, IPL_VM);
358 mutex_init(&slp->ns_alock, MUTEX_DRIVER, IPL_VM);
359 cv_init(&slp->ns_cv, "nfsdsock");
360 TAILQ_INIT(&slp->ns_uidlruhead);
361 LIST_INIT(&slp->ns_tq);
362 SIMPLEQ_INIT(&slp->ns_sendq);
363 mutex_enter(&nfsd_lock);
364 TAILQ_INSERT_TAIL(&nfssvc_sockhead, slp, ns_chain);
365 mutex_exit(&nfsd_lock);
366
367 return slp;
368 }
369
370 static void
371 nfsrv_sockfree(struct nfssvc_sock *slp)
372 {
373
374 KASSERT(slp->ns_so == NULL);
375 KASSERT(slp->ns_fp == NULL);
376 KASSERT((slp->ns_flags & SLP_VALID) == 0);
377 mutex_destroy(&slp->ns_lock);
378 mutex_destroy(&slp->ns_alock);
379 cv_destroy(&slp->ns_cv);
380 kmem_free(slp, sizeof(*slp));
381 }
382
383 /*
384 * Adds a socket to the list for servicing by nfsds.
385 */
386 int
387 nfssvc_addsock(fp, mynam)
388 file_t *fp;
389 struct mbuf *mynam;
390 {
391 struct mbuf *m;
392 int siz;
393 struct nfssvc_sock *slp;
394 struct socket *so;
395 struct nfssvc_sock *tslp;
396 int error, s;
397
398 so = (struct socket *)fp->f_data;
399 tslp = (struct nfssvc_sock *)0;
400 /*
401 * Add it to the list, as required.
402 */
403 if (so->so_proto->pr_protocol == IPPROTO_UDP) {
404 #ifdef INET6
405 if (so->so_proto->pr_domain->dom_family == AF_INET6)
406 tslp = nfs_udp6sock;
407 else
408 #endif
409 tslp = nfs_udpsock;
410 if (tslp->ns_flags & SLP_VALID) {
411 m_freem(mynam);
412 return (EPERM);
413 }
414 #ifdef ISO
415 } else if (so->so_proto->pr_protocol == ISOPROTO_CLTP) {
416 tslp = nfs_cltpsock;
417 if (tslp->ns_flags & SLP_VALID) {
418 m_freem(mynam);
419 return (EPERM);
420 }
421 #endif /* ISO */
422 }
423 if (so->so_type == SOCK_STREAM)
424 siz = NFS_MAXPACKET + sizeof (u_long);
425 else
426 siz = NFS_MAXPACKET;
427 error = soreserve(so, siz, siz);
428 if (error) {
429 m_freem(mynam);
430 return (error);
431 }
432
433 /*
434 * Set protocol specific options { for now TCP only } and
435 * reserve some space. For datagram sockets, this can get called
436 * repeatedly for the same socket, but that isn't harmful.
437 */
438 if (so->so_type == SOCK_STREAM) {
439 m = m_get(M_WAIT, MT_SOOPTS);
440 MCLAIM(m, &nfs_mowner);
441 *mtod(m, int32_t *) = 1;
442 m->m_len = sizeof(int32_t);
443 sosetopt(so, SOL_SOCKET, SO_KEEPALIVE, m);
444 }
445 if ((so->so_proto->pr_domain->dom_family == AF_INET
446 #ifdef INET6
447 || so->so_proto->pr_domain->dom_family == AF_INET6
448 #endif
449 ) &&
450 so->so_proto->pr_protocol == IPPROTO_TCP) {
451 m = m_get(M_WAIT, MT_SOOPTS);
452 MCLAIM(m, &nfs_mowner);
453 *mtod(m, int32_t *) = 1;
454 m->m_len = sizeof(int32_t);
455 sosetopt(so, IPPROTO_TCP, TCP_NODELAY, m);
456 }
457 so->so_rcv.sb_flags &= ~SB_NOINTR;
458 so->so_rcv.sb_timeo = 0;
459 so->so_snd.sb_flags &= ~SB_NOINTR;
460 so->so_snd.sb_timeo = 0;
461 if (tslp) {
462 slp = tslp;
463 } else {
464 slp = nfsrv_sockalloc();
465 }
466 slp->ns_so = so;
467 slp->ns_nam = mynam;
468 mutex_enter(&fp->f_lock);
469 fp->f_count++;
470 mutex_exit(&fp->f_lock);
471 slp->ns_fp = fp;
472 slp->ns_flags = SLP_VALID;
473 slp->ns_aflags = SLP_A_NEEDQ;
474 slp->ns_gflags = 0;
475 slp->ns_sflags = 0;
476 KERNEL_LOCK(1, curlwp);
477 s = splsoftnet();
478 so->so_upcallarg = (void *)slp;
479 so->so_upcall = nfsrv_soupcall;
480 so->so_rcv.sb_flags |= SB_UPCALL;
481 splx(s);
482 KERNEL_UNLOCK_ONE(curlwp);
483 nfsrv_wakenfsd(slp);
484 return (0);
485 }
486
487 /*
488 * Called by nfssvc() for nfsds. Just loops around servicing rpc requests
489 * until it is killed by a signal.
490 */
491 int
492 nfssvc_nfsd(nsd, argp, l)
493 struct nfsd_srvargs *nsd;
494 void *argp;
495 struct lwp *l;
496 {
497 struct timeval tv;
498 struct mbuf *m;
499 struct nfssvc_sock *slp;
500 struct nfsd *nfsd = nsd->nsd_nfsd;
501 struct nfsrv_descript *nd = NULL;
502 struct mbuf *mreq;
503 u_quad_t cur_usec;
504 int error = 0, cacherep, siz, sotype, writes_todo;
505 struct proc *p = l->l_proc;
506 int s;
507 bool doreinit;
508
509 #ifndef nolint
510 cacherep = RC_DOIT;
511 writes_todo = 0;
512 #endif
513 uvm_lwp_hold(l);
514 if (nfsd == NULL) {
515 nsd->nsd_nfsd = nfsd = kmem_alloc(sizeof(*nfsd), KM_SLEEP);
516 memset(nfsd, 0, sizeof (struct nfsd));
517 cv_init(&nfsd->nfsd_cv, "nfsd");
518 nfsd->nfsd_procp = p;
519 mutex_enter(&nfsd_lock);
520 while ((nfssvc_sockhead_flag & SLP_INIT) != 0) {
521 KASSERT(nfs_numnfsd == 0);
522 cv_wait(&nfsd_initcv, &nfsd_lock);
523 }
524 TAILQ_INSERT_TAIL(&nfsd_head, nfsd, nfsd_chain);
525 nfs_numnfsd++;
526 mutex_exit(&nfsd_lock);
527 }
528 /*
529 * Loop getting rpc requests until SIGKILL.
530 */
531 for (;;) {
532 bool dummy;
533
534 if ((curcpu()->ci_schedstate.spc_flags & SPCF_SHOULDYIELD)
535 != 0) {
536 preempt();
537 }
538 if (nfsd->nfsd_slp == NULL) {
539 mutex_enter(&nfsd_lock);
540 while (nfsd->nfsd_slp == NULL &&
541 (nfsd_head_flag & NFSD_CHECKSLP) == 0) {
542 SLIST_INSERT_HEAD(&nfsd_idle_head, nfsd,
543 nfsd_idle);
544 error = cv_wait_sig(&nfsd->nfsd_cv, &nfsd_lock);
545 if (error) {
546 slp = nfsd->nfsd_slp;
547 nfsd->nfsd_slp = NULL;
548 if (!slp)
549 SLIST_REMOVE(&nfsd_idle_head,
550 nfsd, nfsd, nfsd_idle);
551 mutex_exit(&nfsd_lock);
552 if (slp) {
553 nfsrv_wakenfsd(slp);
554 nfsrv_slpderef(slp);
555 }
556 goto done;
557 }
558 }
559 if (nfsd->nfsd_slp == NULL &&
560 (nfsd_head_flag & NFSD_CHECKSLP) != 0) {
561 slp = TAILQ_FIRST(&nfssvc_sockpending);
562 if (slp) {
563 KASSERT((slp->ns_gflags & SLP_G_DOREC)
564 != 0);
565 TAILQ_REMOVE(&nfssvc_sockpending, slp,
566 ns_pending);
567 slp->ns_gflags &= ~SLP_G_DOREC;
568 slp->ns_sref++;
569 nfsd->nfsd_slp = slp;
570 } else
571 nfsd_head_flag &= ~NFSD_CHECKSLP;
572 }
573 KASSERT(nfsd->nfsd_slp == NULL ||
574 nfsd->nfsd_slp->ns_sref > 0);
575 mutex_exit(&nfsd_lock);
576 if ((slp = nfsd->nfsd_slp) == NULL)
577 continue;
578 if (slp->ns_flags & SLP_VALID) {
579 bool more;
580
581 if (nfsdsock_testbits(slp, SLP_A_NEEDQ)) {
582 nfsrv_rcv(slp);
583 }
584 if (nfsdsock_testbits(slp, SLP_A_DISCONN)) {
585 nfsrv_zapsock(slp);
586 }
587 error = nfsrv_dorec(slp, nfsd, &nd, &more);
588 getmicrotime(&tv);
589 cur_usec = (u_quad_t)tv.tv_sec * 1000000 +
590 (u_quad_t)tv.tv_usec;
591 writes_todo = 0;
592 if (error) {
593 struct nfsrv_descript *nd2;
594
595 mutex_enter(&nfsd_lock);
596 nd2 = LIST_FIRST(&slp->ns_tq);
597 if (nd2 != NULL &&
598 nd2->nd_time <= cur_usec) {
599 error = 0;
600 cacherep = RC_DOIT;
601 writes_todo = 1;
602 }
603 mutex_exit(&nfsd_lock);
604 }
605 if (error == 0 && more) {
606 nfsrv_wakenfsd(slp);
607 }
608 }
609 } else {
610 error = 0;
611 slp = nfsd->nfsd_slp;
612 }
613 KASSERT(slp != NULL);
614 KASSERT(nfsd->nfsd_slp == slp);
615 if (error || (slp->ns_flags & SLP_VALID) == 0) {
616 if (nd) {
617 nfsdreq_free(nd);
618 nd = NULL;
619 }
620 nfsd->nfsd_slp = NULL;
621 nfsrv_slpderef(slp);
622 continue;
623 }
624 sotype = slp->ns_so->so_type;
625 if (nd) {
626 getmicrotime(&nd->nd_starttime);
627 if (nd->nd_nam2)
628 nd->nd_nam = nd->nd_nam2;
629 else
630 nd->nd_nam = slp->ns_nam;
631
632 /*
633 * Check to see if authorization is needed.
634 */
635 if (nfsd->nfsd_flag & NFSD_NEEDAUTH) {
636 nfsd->nfsd_flag &= ~NFSD_NEEDAUTH;
637 nsd->nsd_haddr = mtod(nd->nd_nam,
638 struct sockaddr_in *)->sin_addr.s_addr;
639 nsd->nsd_authlen = nfsd->nfsd_authlen;
640 nsd->nsd_verflen = nfsd->nfsd_verflen;
641 if (!copyout(nfsd->nfsd_authstr,
642 nsd->nsd_authstr, nfsd->nfsd_authlen) &&
643 !copyout(nfsd->nfsd_verfstr,
644 nsd->nsd_verfstr, nfsd->nfsd_verflen) &&
645 !copyout(nsd, argp, sizeof (*nsd))) {
646 uvm_lwp_rele(l);
647 return (ENEEDAUTH);
648 }
649 cacherep = RC_DROPIT;
650 } else
651 cacherep = nfsrv_getcache(nd, slp, &mreq);
652
653 if (nfsd->nfsd_flag & NFSD_AUTHFAIL) {
654 nfsd->nfsd_flag &= ~NFSD_AUTHFAIL;
655 nd->nd_procnum = NFSPROC_NOOP;
656 nd->nd_repstat =
657 (NFSERR_AUTHERR | AUTH_TOOWEAK);
658 cacherep = RC_DOIT;
659 }
660 }
661
662 /*
663 * Loop to get all the write rpc relies that have been
664 * gathered together.
665 */
666 do {
667 switch (cacherep) {
668 case RC_DOIT:
669 mreq = NULL;
670 netexport_rdlock();
671 if (writes_todo || nd == NULL ||
672 (!(nd->nd_flag & ND_NFSV3) &&
673 nd->nd_procnum == NFSPROC_WRITE &&
674 nfsrvw_procrastinate > 0))
675 error = nfsrv_writegather(&nd, slp,
676 l, &mreq);
677 else
678 error =
679 (*(nfsrv3_procs[nd->nd_procnum]))
680 (nd, slp, l, &mreq);
681 netexport_rdunlock();
682 if (mreq == NULL) {
683 if (nd != NULL) {
684 if (nd->nd_nam2)
685 m_free(nd->nd_nam2);
686 if (nd->nd_mrep)
687 m_freem(nd->nd_mrep);
688 }
689 break;
690 }
691 if (error) {
692 nfsstats.srv_errs++;
693 nfsrv_updatecache(nd, false, mreq);
694 if (nd->nd_nam2)
695 m_freem(nd->nd_nam2);
696 break;
697 }
698 nfsstats.srvrpccnt[nd->nd_procnum]++;
699 nfsrv_updatecache(nd, true, mreq);
700 nd->nd_mrep = (struct mbuf *)0;
701 case RC_REPLY:
702 m = mreq;
703 siz = 0;
704 while (m) {
705 siz += m->m_len;
706 m = m->m_next;
707 }
708 if (siz <= 0 || siz > NFS_MAXPACKET) {
709 printf("mbuf siz=%d\n",siz);
710 panic("Bad nfs svc reply");
711 }
712 m = mreq;
713 m->m_pkthdr.len = siz;
714 m->m_pkthdr.rcvif = (struct ifnet *)0;
715 /*
716 * For stream protocols, prepend a Sun RPC
717 * Record Mark.
718 */
719 if (sotype == SOCK_STREAM) {
720 M_PREPEND(m, NFSX_UNSIGNED, M_WAIT);
721 *mtod(m, u_int32_t *) =
722 htonl(0x80000000 | siz);
723 }
724 nd->nd_mreq = m;
725 if (nfsrtton) {
726 nfsd_rt(slp->ns_so->so_type, nd,
727 cacherep);
728 }
729 error = nfsdsock_sendreply(slp, nd);
730 nd = NULL;
731 if (error == EPIPE)
732 nfsrv_zapsock(slp);
733 if (error == EINTR || error == ERESTART) {
734 nfsd->nfsd_slp = NULL;
735 nfsrv_slpderef(slp);
736 goto done;
737 }
738 break;
739 case RC_DROPIT:
740 if (nfsrtton)
741 nfsd_rt(sotype, nd, cacherep);
742 m_freem(nd->nd_mrep);
743 m_freem(nd->nd_nam2);
744 break;
745 }
746 if (nd) {
747 nfsdreq_free(nd);
748 nd = NULL;
749 }
750
751 /*
752 * Check to see if there are outstanding writes that
753 * need to be serviced.
754 */
755 getmicrotime(&tv);
756 cur_usec = (u_quad_t)tv.tv_sec * 1000000 +
757 (u_quad_t)tv.tv_usec;
758 s = splsoftclock();
759 if (LIST_FIRST(&slp->ns_tq) &&
760 LIST_FIRST(&slp->ns_tq)->nd_time <= cur_usec) {
761 cacherep = RC_DOIT;
762 writes_todo = 1;
763 } else
764 writes_todo = 0;
765 splx(s);
766 } while (writes_todo);
767 if (nfsrv_dorec(slp, nfsd, &nd, &dummy)) {
768 nfsd->nfsd_slp = NULL;
769 nfsrv_slpderef(slp);
770 }
771 }
772 done:
773 mutex_enter(&nfsd_lock);
774 TAILQ_REMOVE(&nfsd_head, nfsd, nfsd_chain);
775 doreinit = --nfs_numnfsd == 0;
776 if (doreinit)
777 nfssvc_sockhead_flag |= SLP_INIT;
778 mutex_exit(&nfsd_lock);
779 cv_destroy(&nfsd->nfsd_cv);
780 kmem_free(nfsd, sizeof(*nfsd));
781 nsd->nsd_nfsd = NULL;
782 if (doreinit)
783 nfsrv_init(true); /* Reinitialize everything */
784 uvm_lwp_rele(l);
785 return (error);
786 }
787
788 /*
789 * Shut down a socket associated with an nfssvc_sock structure.
790 * Should be called with the send lock set, if required.
791 * The trick here is to increment the sref at the start, so that the nfsds
792 * will stop using it and clear ns_flag at the end so that it will not be
793 * reassigned during cleanup.
794 *
795 * called at splsoftnet.
796 */
797 void
798 nfsrv_zapsock(slp)
799 struct nfssvc_sock *slp;
800 {
801 struct nfsuid *nuidp, *nnuidp;
802 struct nfsrv_descript *nwp;
803 struct socket *so;
804 struct mbuf *m;
805 int s;
806
807 if (nfsdsock_drain(slp)) {
808 return;
809 }
810 mutex_enter(&nfsd_lock);
811 if (slp->ns_gflags & SLP_G_DOREC) {
812 TAILQ_REMOVE(&nfssvc_sockpending, slp, ns_pending);
813 slp->ns_gflags &= ~SLP_G_DOREC;
814 }
815 mutex_exit(&nfsd_lock);
816
817 so = slp->ns_so;
818 KASSERT(so != NULL);
819 KERNEL_LOCK(1, curlwp);
820 s = splsoftnet();
821 so->so_upcall = NULL;
822 so->so_upcallarg = NULL;
823 so->so_rcv.sb_flags &= ~SB_UPCALL;
824 splx(s);
825 soshutdown(so, SHUT_RDWR);
826 KERNEL_UNLOCK_ONE(curlwp);
827
828 if (slp->ns_nam)
829 m_free(slp->ns_nam);
830 m_freem(slp->ns_raw);
831 m = slp->ns_rec;
832 while (m != NULL) {
833 struct mbuf *n;
834
835 n = m->m_nextpkt;
836 m_freem(m);
837 m = n;
838 }
839 for (nuidp = TAILQ_FIRST(&slp->ns_uidlruhead); nuidp != 0;
840 nuidp = nnuidp) {
841 nnuidp = TAILQ_NEXT(nuidp, nu_lru);
842 LIST_REMOVE(nuidp, nu_hash);
843 TAILQ_REMOVE(&slp->ns_uidlruhead, nuidp, nu_lru);
844 if (nuidp->nu_flag & NU_NAM)
845 m_freem(nuidp->nu_nam);
846 kmem_free(nuidp, sizeof(*nuidp));
847 }
848 mutex_enter(&nfsd_lock);
849 while ((nwp = LIST_FIRST(&slp->ns_tq)) != NULL) {
850 LIST_REMOVE(nwp, nd_tq);
851 mutex_exit(&nfsd_lock);
852 nfsdreq_free(nwp);
853 mutex_enter(&nfsd_lock);
854 }
855 mutex_exit(&nfsd_lock);
856 }
857
858 /*
859 * Derefence a server socket structure. If it has no more references and
860 * is no longer valid, you can throw it away.
861 */
862 void
863 nfsrv_slpderef(slp)
864 struct nfssvc_sock *slp;
865 {
866 uint32_t ref;
867
868 mutex_enter(&nfsd_lock);
869 KASSERT(slp->ns_sref > 0);
870 ref = --slp->ns_sref;
871 mutex_exit(&nfsd_lock);
872 if (ref == 0 && (slp->ns_flags & SLP_VALID) == 0) {
873 file_t *fp;
874
875 mutex_enter(&nfsd_lock);
876 KASSERT((slp->ns_gflags & SLP_G_DOREC) == 0);
877 TAILQ_REMOVE(&nfssvc_sockhead, slp, ns_chain);
878 mutex_exit(&nfsd_lock);
879
880 fp = slp->ns_fp;
881 if (fp != NULL) {
882 slp->ns_fp = NULL;
883 KASSERT(fp != NULL);
884 KASSERT(fp->f_data == slp->ns_so);
885 KASSERT(fp->f_count > 0);
886 closef(fp);
887 slp->ns_so = NULL;
888 }
889
890 nfsrv_sockfree(slp);
891 }
892 }
893
894 /*
895 * Initialize the data structures for the server.
896 * Handshake with any new nfsds starting up to avoid any chance of
897 * corruption.
898 */
899 void
900 nfsrv_init(terminating)
901 int terminating;
902 {
903 struct nfssvc_sock *slp;
904
905 if (!terminating) {
906 /* XXX could be IPL_SOFTNET */
907 mutex_init(&nfsd_lock, MUTEX_DRIVER, IPL_VM);
908 cv_init(&nfsd_initcv, "nfsdinit");
909 }
910
911 mutex_enter(&nfsd_lock);
912 if (!terminating && (nfssvc_sockhead_flag & SLP_INIT) != 0)
913 panic("nfsd init");
914 nfssvc_sockhead_flag |= SLP_INIT;
915
916 if (terminating) {
917 KASSERT(SLIST_EMPTY(&nfsd_idle_head));
918 KASSERT(TAILQ_EMPTY(&nfsd_head));
919 while ((slp = TAILQ_FIRST(&nfssvc_sockhead)) != NULL) {
920 mutex_exit(&nfsd_lock);
921 KASSERT(slp->ns_sref == 0);
922 slp->ns_sref++;
923 nfsrv_zapsock(slp);
924 nfsrv_slpderef(slp);
925 mutex_enter(&nfsd_lock);
926 }
927 KASSERT(TAILQ_EMPTY(&nfssvc_sockpending));
928 mutex_exit(&nfsd_lock);
929 nfsrv_cleancache(); /* And clear out server cache */
930 } else {
931 mutex_exit(&nfsd_lock);
932 nfs_pub.np_valid = 0;
933 }
934
935 TAILQ_INIT(&nfssvc_sockhead);
936 TAILQ_INIT(&nfssvc_sockpending);
937
938 TAILQ_INIT(&nfsd_head);
939 SLIST_INIT(&nfsd_idle_head);
940 nfsd_head_flag &= ~NFSD_CHECKSLP;
941
942 nfs_udpsock = nfsrv_sockalloc();
943
944 #ifdef INET6
945 nfs_udp6sock = nfsrv_sockalloc();
946 #endif
947
948 #ifdef ISO
949 nfs_cltpsock = nfsrv_sockalloc();
950 #endif
951
952 mutex_enter(&nfsd_lock);
953 nfssvc_sockhead_flag &= ~SLP_INIT;
954 cv_broadcast(&nfsd_initcv);
955 mutex_exit(&nfsd_lock);
956 }
957
958 /*
959 * Add entries to the server monitor log.
960 */
961 static void
962 nfsd_rt(sotype, nd, cacherep)
963 int sotype;
964 struct nfsrv_descript *nd;
965 int cacherep;
966 {
967 struct timeval tv;
968 struct drt *rt;
969
970 rt = &nfsdrt.drt[nfsdrt.pos];
971 if (cacherep == RC_DOIT)
972 rt->flag = 0;
973 else if (cacherep == RC_REPLY)
974 rt->flag = DRT_CACHEREPLY;
975 else
976 rt->flag = DRT_CACHEDROP;
977 if (sotype == SOCK_STREAM)
978 rt->flag |= DRT_TCP;
979 if (nd->nd_flag & ND_NFSV3)
980 rt->flag |= DRT_NFSV3;
981 rt->proc = nd->nd_procnum;
982 if (mtod(nd->nd_nam, struct sockaddr *)->sa_family == AF_INET)
983 rt->ipadr = mtod(nd->nd_nam, struct sockaddr_in *)->sin_addr.s_addr;
984 else
985 rt->ipadr = INADDR_ANY;
986 getmicrotime(&tv);
987 rt->resptime = ((tv.tv_sec - nd->nd_starttime.tv_sec) * 1000000) +
988 (tv.tv_usec - nd->nd_starttime.tv_usec);
989 rt->tstamp = tv;
990 nfsdrt.pos = (nfsdrt.pos + 1) % NFSRTTLOGSIZ;
991 }
992 #endif /* NFSSERVER */
993
994 #ifdef NFS
995
996 int nfs_defect = 0;
997 /*
998 * Asynchronous I/O threads for client nfs.
999 * They do read-ahead and write-behind operations on the block I/O cache.
1000 * Never returns unless it fails or gets killed.
1001 */
1002
1003 static void
1004 nfssvc_iod(void *arg)
1005 {
1006 struct buf *bp;
1007 struct nfs_iod *myiod;
1008 struct nfsmount *nmp;
1009
1010 myiod = kmem_alloc(sizeof(*myiod), KM_SLEEP);
1011 mutex_init(&myiod->nid_lock, MUTEX_DEFAULT, IPL_NONE);
1012 cv_init(&myiod->nid_cv, "nfsiod");
1013 myiod->nid_exiting = false;
1014 myiod->nid_mount = NULL;
1015 mutex_enter(&nfs_iodlist_lock);
1016 LIST_INSERT_HEAD(&nfs_iodlist_all, myiod, nid_all);
1017 mutex_exit(&nfs_iodlist_lock);
1018
1019 for (;;) {
1020 mutex_enter(&nfs_iodlist_lock);
1021 LIST_INSERT_HEAD(&nfs_iodlist_idle, myiod, nid_idle);
1022 mutex_exit(&nfs_iodlist_lock);
1023
1024 mutex_enter(&myiod->nid_lock);
1025 while (/*CONSTCOND*/ true) {
1026 nmp = myiod->nid_mount;
1027 if (nmp) {
1028 myiod->nid_mount = NULL;
1029 break;
1030 }
1031 if (__predict_false(myiod->nid_exiting)) {
1032 /*
1033 * drop nid_lock to preserve locking order.
1034 */
1035 mutex_exit(&myiod->nid_lock);
1036 mutex_enter(&nfs_iodlist_lock);
1037 mutex_enter(&myiod->nid_lock);
1038 /*
1039 * recheck nid_mount because nfs_asyncio can
1040 * pick us in the meantime as we are still on
1041 * nfs_iodlist_lock.
1042 */
1043 if (myiod->nid_mount != NULL) {
1044 mutex_exit(&nfs_iodlist_lock);
1045 continue;
1046 }
1047 LIST_REMOVE(myiod, nid_idle);
1048 mutex_exit(&nfs_iodlist_lock);
1049 goto quit;
1050 }
1051 cv_wait(&myiod->nid_cv, &myiod->nid_lock);
1052 }
1053 mutex_exit(&myiod->nid_lock);
1054
1055 mutex_enter(&nmp->nm_lock);
1056 while ((bp = TAILQ_FIRST(&nmp->nm_bufq)) != NULL) {
1057 /* Take one off the front of the list */
1058 TAILQ_REMOVE(&nmp->nm_bufq, bp, b_freelist);
1059 nmp->nm_bufqlen--;
1060 if (nmp->nm_bufqlen < 2 * nmp->nm_bufqiods) {
1061 cv_broadcast(&nmp->nm_aiocv);
1062 }
1063 mutex_exit(&nmp->nm_lock);
1064 KERNEL_LOCK(1, curlwp);
1065 (void)nfs_doio(bp);
1066 KERNEL_UNLOCK_LAST(curlwp);
1067 mutex_enter(&nmp->nm_lock);
1068 /*
1069 * If there are more than one iod on this mount,
1070 * then defect so that the iods can be shared out
1071 * fairly between the mounts
1072 */
1073 if (nfs_defect && nmp->nm_bufqiods > 1) {
1074 break;
1075 }
1076 }
1077 KASSERT(nmp->nm_bufqiods > 0);
1078 nmp->nm_bufqiods--;
1079 mutex_exit(&nmp->nm_lock);
1080 }
1081 quit:
1082 KASSERT(myiod->nid_mount == NULL);
1083 mutex_exit(&myiod->nid_lock);
1084
1085 cv_destroy(&myiod->nid_cv);
1086 mutex_destroy(&myiod->nid_lock);
1087 kmem_free(myiod, sizeof(*myiod));
1088
1089 kthread_exit(0);
1090 }
1091
1092 void
1093 nfs_iodinit()
1094 {
1095
1096 mutex_init(&nfs_iodlist_lock, MUTEX_DEFAULT, IPL_NONE);
1097 LIST_INIT(&nfs_iodlist_all);
1098 LIST_INIT(&nfs_iodlist_idle);
1099 }
1100
1101 int
1102 nfs_set_niothreads(int newval)
1103 {
1104 struct nfs_iod *nid;
1105 int error = 0;
1106
1107 #if defined(MULTIPROCESSOR)
1108 int hold_count;
1109 #endif /* defined(MULTIPROCESSOR) */
1110
1111 KERNEL_UNLOCK_ALL(curlwp, &hold_count);
1112
1113 mutex_enter(&nfs_iodlist_lock);
1114 /* clamp to sane range */
1115 nfs_niothreads = max(0, min(newval, NFS_MAXASYNCDAEMON));
1116
1117 while (nfs_numasync != nfs_niothreads && error == 0) {
1118 while (nfs_numasync < nfs_niothreads) {
1119
1120 /*
1121 * kthread_create can wait for pagedaemon and
1122 * pagedaemon can wait for nfsiod which needs to acquire
1123 * nfs_iodlist_lock.
1124 */
1125
1126 mutex_exit(&nfs_iodlist_lock);
1127 error = kthread_create(PRI_NONE, KTHREAD_MPSAFE, NULL,
1128 nfssvc_iod, NULL, NULL, "nfsio");
1129 mutex_enter(&nfs_iodlist_lock);
1130 if (error) {
1131 /* give up */
1132 nfs_niothreads = nfs_numasync;
1133 break;
1134 }
1135 nfs_numasync++;
1136 }
1137 while (nfs_numasync > nfs_niothreads) {
1138 nid = LIST_FIRST(&nfs_iodlist_all);
1139 if (nid == NULL) {
1140 /* iod has not started yet. */
1141 kpause("nfsiorm", false, hz, &nfs_iodlist_lock);
1142 continue;
1143 }
1144 LIST_REMOVE(nid, nid_all);
1145 mutex_enter(&nid->nid_lock);
1146 KASSERT(!nid->nid_exiting);
1147 nid->nid_exiting = true;
1148 cv_signal(&nid->nid_cv);
1149 mutex_exit(&nid->nid_lock);
1150 nfs_numasync--;
1151 }
1152 }
1153 mutex_exit(&nfs_iodlist_lock);
1154
1155 KERNEL_LOCK(hold_count, curlwp);
1156 return error;
1157 }
1158
1159 /*
1160 * Get an authorization string for the uid by having the mount_nfs sitting
1161 * on this mount point porpous out of the kernel and do it.
1162 */
1163 int
1164 nfs_getauth(nmp, rep, cred, auth_str, auth_len, verf_str, verf_len, key)
1165 struct nfsmount *nmp;
1166 struct nfsreq *rep;
1167 kauth_cred_t cred;
1168 char **auth_str;
1169 int *auth_len;
1170 char *verf_str;
1171 int *verf_len;
1172 NFSKERBKEY_T key; /* return session key */
1173 {
1174 int error = 0;
1175
1176 while ((nmp->nm_iflag & NFSMNT_WAITAUTH) == 0) {
1177 nmp->nm_iflag |= NFSMNT_WANTAUTH;
1178 (void) tsleep((void *)&nmp->nm_authtype, PSOCK,
1179 "nfsauth1", 2 * hz);
1180 error = nfs_sigintr(nmp, rep, rep->r_lwp);
1181 if (error) {
1182 nmp->nm_iflag &= ~NFSMNT_WANTAUTH;
1183 return (error);
1184 }
1185 }
1186 nmp->nm_iflag &= ~(NFSMNT_WAITAUTH | NFSMNT_WANTAUTH);
1187 nmp->nm_authstr = *auth_str = (char *)malloc(RPCAUTH_MAXSIZ, M_TEMP, M_WAITOK);
1188 nmp->nm_authlen = RPCAUTH_MAXSIZ;
1189 nmp->nm_verfstr = verf_str;
1190 nmp->nm_verflen = *verf_len;
1191 nmp->nm_authuid = kauth_cred_geteuid(cred);
1192 wakeup((void *)&nmp->nm_authstr);
1193
1194 /*
1195 * And wait for mount_nfs to do its stuff.
1196 */
1197 while ((nmp->nm_iflag & NFSMNT_HASAUTH) == 0 && error == 0) {
1198 (void) tsleep((void *)&nmp->nm_authlen, PSOCK,
1199 "nfsauth2", 2 * hz);
1200 error = nfs_sigintr(nmp, rep, rep->r_lwp);
1201 }
1202 if (nmp->nm_iflag & NFSMNT_AUTHERR) {
1203 nmp->nm_iflag &= ~NFSMNT_AUTHERR;
1204 error = EAUTH;
1205 }
1206 if (error)
1207 free((void *)*auth_str, M_TEMP);
1208 else {
1209 *auth_len = nmp->nm_authlen;
1210 *verf_len = nmp->nm_verflen;
1211 memcpy(key, nmp->nm_key, sizeof (NFSKERBKEY_T));
1212 }
1213 nmp->nm_iflag &= ~NFSMNT_HASAUTH;
1214 nmp->nm_iflag |= NFSMNT_WAITAUTH;
1215 if (nmp->nm_iflag & NFSMNT_WANTAUTH) {
1216 nmp->nm_iflag &= ~NFSMNT_WANTAUTH;
1217 wakeup((void *)&nmp->nm_authtype);
1218 }
1219 return (error);
1220 }
1221
1222 /*
1223 * Get a nickname authenticator and verifier.
1224 */
1225 int
1226 nfs_getnickauth(struct nfsmount *nmp, kauth_cred_t cred, char **auth_str,
1227 int *auth_len, char *verf_str, int verf_len)
1228 {
1229 struct timeval ktvin, ktvout, tv;
1230 struct nfsuid *nuidp;
1231 u_int32_t *nickp, *verfp;
1232
1233 memset(&ktvout, 0, sizeof ktvout); /* XXX gcc */
1234
1235 #ifdef DIAGNOSTIC
1236 if (verf_len < (4 * NFSX_UNSIGNED))
1237 panic("nfs_getnickauth verf too small");
1238 #endif
1239 LIST_FOREACH(nuidp, NMUIDHASH(nmp, kauth_cred_geteuid(cred)), nu_hash) {
1240 if (kauth_cred_geteuid(nuidp->nu_cr) == kauth_cred_geteuid(cred))
1241 break;
1242 }
1243 if (!nuidp || nuidp->nu_expire < time_second)
1244 return (EACCES);
1245
1246 /*
1247 * Move to the end of the lru list (end of lru == most recently used).
1248 */
1249 TAILQ_REMOVE(&nmp->nm_uidlruhead, nuidp, nu_lru);
1250 TAILQ_INSERT_TAIL(&nmp->nm_uidlruhead, nuidp, nu_lru);
1251
1252 nickp = (u_int32_t *)malloc(2 * NFSX_UNSIGNED, M_TEMP, M_WAITOK);
1253 *nickp++ = txdr_unsigned(RPCAKN_NICKNAME);
1254 *nickp = txdr_unsigned(nuidp->nu_nickname);
1255 *auth_str = (char *)nickp;
1256 *auth_len = 2 * NFSX_UNSIGNED;
1257
1258 /*
1259 * Now we must encrypt the verifier and package it up.
1260 */
1261 verfp = (u_int32_t *)verf_str;
1262 *verfp++ = txdr_unsigned(RPCAKN_NICKNAME);
1263 getmicrotime(&tv);
1264 if (tv.tv_sec > nuidp->nu_timestamp.tv_sec ||
1265 (tv.tv_sec == nuidp->nu_timestamp.tv_sec &&
1266 tv.tv_usec > nuidp->nu_timestamp.tv_usec))
1267 nuidp->nu_timestamp = tv;
1268 else
1269 nuidp->nu_timestamp.tv_usec++;
1270 ktvin.tv_sec = txdr_unsigned(nuidp->nu_timestamp.tv_sec);
1271 ktvin.tv_usec = txdr_unsigned(nuidp->nu_timestamp.tv_usec);
1272
1273 /*
1274 * Now encrypt the timestamp verifier in ecb mode using the session
1275 * key.
1276 */
1277 #ifdef NFSKERB
1278 XXX
1279 #endif
1280
1281 *verfp++ = ktvout.tv_sec;
1282 *verfp++ = ktvout.tv_usec;
1283 *verfp = 0;
1284 return (0);
1285 }
1286
1287 /*
1288 * Save the current nickname in a hash list entry on the mount point.
1289 */
1290 int
1291 nfs_savenickauth(nmp, cred, len, key, mdp, dposp, mrep)
1292 struct nfsmount *nmp;
1293 kauth_cred_t cred;
1294 int len;
1295 NFSKERBKEY_T key;
1296 struct mbuf **mdp;
1297 char **dposp;
1298 struct mbuf *mrep;
1299 {
1300 struct nfsuid *nuidp;
1301 u_int32_t *tl;
1302 int32_t t1;
1303 struct mbuf *md = *mdp;
1304 struct timeval ktvin, ktvout;
1305 u_int32_t nick;
1306 char *dpos = *dposp, *cp2;
1307 int deltasec, error = 0;
1308
1309 memset(&ktvout, 0, sizeof ktvout); /* XXX gcc */
1310
1311 if (len == (3 * NFSX_UNSIGNED)) {
1312 nfsm_dissect(tl, u_int32_t *, 3 * NFSX_UNSIGNED);
1313 ktvin.tv_sec = *tl++;
1314 ktvin.tv_usec = *tl++;
1315 nick = fxdr_unsigned(u_int32_t, *tl);
1316
1317 /*
1318 * Decrypt the timestamp in ecb mode.
1319 */
1320 #ifdef NFSKERB
1321 XXX
1322 #endif
1323 ktvout.tv_sec = fxdr_unsigned(long, ktvout.tv_sec);
1324 ktvout.tv_usec = fxdr_unsigned(long, ktvout.tv_usec);
1325 deltasec = time_second - ktvout.tv_sec;
1326 if (deltasec < 0)
1327 deltasec = -deltasec;
1328 /*
1329 * If ok, add it to the hash list for the mount point.
1330 */
1331 if (deltasec <= NFS_KERBCLOCKSKEW) {
1332 if (nmp->nm_numuids < nuidhash_max) {
1333 nmp->nm_numuids++;
1334 nuidp = kmem_alloc(sizeof(*nuidp), KM_SLEEP);
1335 } else {
1336 nuidp = TAILQ_FIRST(&nmp->nm_uidlruhead);
1337 LIST_REMOVE(nuidp, nu_hash);
1338 TAILQ_REMOVE(&nmp->nm_uidlruhead, nuidp,
1339 nu_lru);
1340 }
1341 nuidp->nu_flag = 0;
1342 kauth_cred_seteuid(nuidp->nu_cr, kauth_cred_geteuid(cred));
1343 nuidp->nu_expire = time_second + NFS_KERBTTL;
1344 nuidp->nu_timestamp = ktvout;
1345 nuidp->nu_nickname = nick;
1346 memcpy(nuidp->nu_key, key, sizeof (NFSKERBKEY_T));
1347 TAILQ_INSERT_TAIL(&nmp->nm_uidlruhead, nuidp,
1348 nu_lru);
1349 LIST_INSERT_HEAD(NMUIDHASH(nmp, kauth_cred_geteuid(cred)),
1350 nuidp, nu_hash);
1351 }
1352 } else
1353 nfsm_adv(nfsm_rndup(len));
1354 nfsmout:
1355 *mdp = md;
1356 *dposp = dpos;
1357 return (error);
1358 }
1359 #endif /* NFS */
1360