Home | History | Annotate | Line # | Download | only in kern
uipc_usrreq.c revision 1.146
      1 /*	$NetBSD: uipc_usrreq.c,v 1.146 2013/10/08 17:21:24 christos Exp $	*/
      2 
      3 /*-
      4  * Copyright (c) 1998, 2000, 2004, 2008, 2009 The NetBSD Foundation, Inc.
      5  * All rights reserved.
      6  *
      7  * This code is derived from software contributed to The NetBSD Foundation
      8  * by Jason R. Thorpe of the Numerical Aerospace Simulation Facility,
      9  * NASA Ames Research Center, and by Andrew Doran.
     10  *
     11  * Redistribution and use in source and binary forms, with or without
     12  * modification, are permitted provided that the following conditions
     13  * are met:
     14  * 1. Redistributions of source code must retain the above copyright
     15  *    notice, this list of conditions and the following disclaimer.
     16  * 2. Redistributions in binary form must reproduce the above copyright
     17  *    notice, this list of conditions and the following disclaimer in the
     18  *    documentation and/or other materials provided with the distribution.
     19  *
     20  * THIS SOFTWARE IS PROVIDED BY THE NETBSD FOUNDATION, INC. AND CONTRIBUTORS
     21  * ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED
     22  * TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
     23  * PURPOSE ARE DISCLAIMED.  IN NO EVENT SHALL THE FOUNDATION OR CONTRIBUTORS
     24  * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
     25  * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
     26  * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
     27  * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
     28  * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
     29  * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
     30  * POSSIBILITY OF SUCH DAMAGE.
     31  */
     32 
     33 /*
     34  * Copyright (c) 1982, 1986, 1989, 1991, 1993
     35  *	The Regents of the University of California.  All rights reserved.
     36  *
     37  * Redistribution and use in source and binary forms, with or without
     38  * modification, are permitted provided that the following conditions
     39  * are met:
     40  * 1. Redistributions of source code must retain the above copyright
     41  *    notice, this list of conditions and the following disclaimer.
     42  * 2. Redistributions in binary form must reproduce the above copyright
     43  *    notice, this list of conditions and the following disclaimer in the
     44  *    documentation and/or other materials provided with the distribution.
     45  * 3. Neither the name of the University nor the names of its contributors
     46  *    may be used to endorse or promote products derived from this software
     47  *    without specific prior written permission.
     48  *
     49  * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND
     50  * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
     51  * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
     52  * ARE DISCLAIMED.  IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE
     53  * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
     54  * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
     55  * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
     56  * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
     57  * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
     58  * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
     59  * SUCH DAMAGE.
     60  *
     61  *	@(#)uipc_usrreq.c	8.9 (Berkeley) 5/14/95
     62  */
     63 
     64 /*
     65  * Copyright (c) 1997 Christopher G. Demetriou.  All rights reserved.
     66  *
     67  * Redistribution and use in source and binary forms, with or without
     68  * modification, are permitted provided that the following conditions
     69  * are met:
     70  * 1. Redistributions of source code must retain the above copyright
     71  *    notice, this list of conditions and the following disclaimer.
     72  * 2. Redistributions in binary form must reproduce the above copyright
     73  *    notice, this list of conditions and the following disclaimer in the
     74  *    documentation and/or other materials provided with the distribution.
     75  * 3. All advertising materials mentioning features or use of this software
     76  *    must display the following acknowledgement:
     77  *	This product includes software developed by the University of
     78  *	California, Berkeley and its contributors.
     79  * 4. Neither the name of the University nor the names of its contributors
     80  *    may be used to endorse or promote products derived from this software
     81  *    without specific prior written permission.
     82  *
     83  * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND
     84  * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
     85  * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
     86  * ARE DISCLAIMED.  IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE
     87  * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
     88  * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
     89  * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
     90  * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
     91  * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
     92  * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
     93  * SUCH DAMAGE.
     94  *
     95  *	@(#)uipc_usrreq.c	8.9 (Berkeley) 5/14/95
     96  */
     97 
     98 #include <sys/cdefs.h>
     99 __KERNEL_RCSID(0, "$NetBSD: uipc_usrreq.c,v 1.146 2013/10/08 17:21:24 christos Exp $");
    100 
    101 #include <sys/param.h>
    102 #include <sys/systm.h>
    103 #include <sys/proc.h>
    104 #include <sys/filedesc.h>
    105 #include <sys/domain.h>
    106 #include <sys/protosw.h>
    107 #include <sys/socket.h>
    108 #include <sys/socketvar.h>
    109 #include <sys/unpcb.h>
    110 #include <sys/un.h>
    111 #include <sys/namei.h>
    112 #include <sys/vnode.h>
    113 #include <sys/file.h>
    114 #include <sys/stat.h>
    115 #include <sys/mbuf.h>
    116 #include <sys/kauth.h>
    117 #include <sys/kmem.h>
    118 #include <sys/atomic.h>
    119 #include <sys/uidinfo.h>
    120 #include <sys/kernel.h>
    121 #include <sys/kthread.h>
    122 
    123 /*
    124  * Unix communications domain.
    125  *
    126  * TODO:
    127  *	RDM
    128  *	rethink name space problems
    129  *	need a proper out-of-band
    130  *
    131  * Notes on locking:
    132  *
    133  * The generic rules noted in uipc_socket2.c apply.  In addition:
    134  *
    135  * o We have a global lock, uipc_lock.
    136  *
    137  * o All datagram sockets are locked by uipc_lock.
    138  *
    139  * o For stream socketpairs, the two endpoints are created sharing the same
    140  *   independent lock.  Sockets presented to PRU_CONNECT2 must already have
    141  *   matching locks.
    142  *
    143  * o Stream sockets created via socket() start life with their own
    144  *   independent lock.
    145  *
    146  * o Stream connections to a named endpoint are slightly more complicated.
    147  *   Sockets that have called listen() have their lock pointer mutated to
    148  *   the global uipc_lock.  When establishing a connection, the connecting
    149  *   socket also has its lock mutated to uipc_lock, which matches the head
    150  *   (listening socket).  We create a new socket for accept() to return, and
    151  *   that also shares the head's lock.  Until the connection is completely
    152  *   done on both ends, all three sockets are locked by uipc_lock.  Once the
    153  *   connection is complete, the association with the head's lock is broken.
    154  *   The connecting socket and the socket returned from accept() have their
    155  *   lock pointers mutated away from uipc_lock, and back to the connecting
    156  *   socket's original, independent lock.  The head continues to be locked
    157  *   by uipc_lock.
    158  *
    159  * o If uipc_lock is determined to be a significant source of contention,
    160  *   it could easily be hashed out.  It is difficult to simply make it an
    161  *   independent lock because of visibility / garbage collection issues:
    162  *   if a socket has been associated with a lock at any point, that lock
    163  *   must remain valid until the socket is no longer visible in the system.
    164  *   The lock must not be freed or otherwise destroyed until any sockets
    165  *   that had referenced it have also been destroyed.
    166  */
    167 const struct sockaddr_un sun_noname = {
    168 	.sun_len = offsetof(struct sockaddr_un, sun_path),
    169 	.sun_family = AF_LOCAL,
    170 };
    171 ino_t	unp_ino;			/* prototype for fake inode numbers */
    172 
    173 struct mbuf *unp_addsockcred(struct lwp *, struct mbuf *);
    174 static void unp_mark(file_t *);
    175 static void unp_scan(struct mbuf *, void (*)(file_t *), int);
    176 static void unp_discard_now(file_t *);
    177 static void unp_discard_later(file_t *);
    178 static void unp_thread(void *);
    179 static void unp_thread_kick(void);
    180 static kmutex_t *uipc_lock;
    181 
    182 static kcondvar_t unp_thread_cv;
    183 static lwp_t *unp_thread_lwp;
    184 static SLIST_HEAD(,file) unp_thread_discard;
    185 static int unp_defer;
    186 
    187 /*
    188  * Initialize Unix protocols.
    189  */
    190 void
    191 uipc_init(void)
    192 {
    193 	int error;
    194 
    195 	uipc_lock = mutex_obj_alloc(MUTEX_DEFAULT, IPL_NONE);
    196 	cv_init(&unp_thread_cv, "unpgc");
    197 
    198 	error = kthread_create(PRI_NONE, KTHREAD_MPSAFE, NULL, unp_thread,
    199 	    NULL, &unp_thread_lwp, "unpgc");
    200 	if (error != 0)
    201 		panic("uipc_init %d", error);
    202 }
    203 
    204 /*
    205  * A connection succeeded: disassociate both endpoints from the head's
    206  * lock, and make them share their own lock.  There is a race here: for
    207  * a very brief time one endpoint will be locked by a different lock
    208  * than the other end.  However, since the current thread holds the old
    209  * lock (the listening socket's lock, the head) access can still only be
    210  * made to one side of the connection.
    211  */
    212 static void
    213 unp_setpeerlocks(struct socket *so, struct socket *so2)
    214 {
    215 	struct unpcb *unp;
    216 	kmutex_t *lock;
    217 
    218 	KASSERT(solocked2(so, so2));
    219 
    220 	/*
    221 	 * Bail out if either end of the socket is not yet fully
    222 	 * connected or accepted.  We only break the lock association
    223 	 * with the head when the pair of sockets stand completely
    224 	 * on their own.
    225 	 */
    226 	KASSERT(so->so_head == NULL);
    227 	if (so2->so_head != NULL)
    228 		return;
    229 
    230 	/*
    231 	 * Drop references to old lock.  A third reference (from the
    232 	 * queue head) must be held as we still hold its lock.  Bonus:
    233 	 * we don't need to worry about garbage collecting the lock.
    234 	 */
    235 	lock = so->so_lock;
    236 	KASSERT(lock == uipc_lock);
    237 	mutex_obj_free(lock);
    238 	mutex_obj_free(lock);
    239 
    240 	/*
    241 	 * Grab stream lock from the initiator and share between the two
    242 	 * endpoints.  Issue memory barrier to ensure all modifications
    243 	 * become globally visible before the lock change.  so2 is
    244 	 * assumed not to have a stream lock, because it was created
    245 	 * purely for the server side to accept this connection and
    246 	 * started out life using the domain-wide lock.
    247 	 */
    248 	unp = sotounpcb(so);
    249 	KASSERT(unp->unp_streamlock != NULL);
    250 	KASSERT(sotounpcb(so2)->unp_streamlock == NULL);
    251 	lock = unp->unp_streamlock;
    252 	unp->unp_streamlock = NULL;
    253 	mutex_obj_hold(lock);
    254 	membar_exit();
    255 	/*
    256 	 * possible race if lock is not held - see comment in
    257 	 * uipc_usrreq(PRU_ACCEPT).
    258 	 */
    259 	KASSERT(mutex_owned(lock));
    260 	solockreset(so, lock);
    261 	solockreset(so2, lock);
    262 }
    263 
    264 /*
    265  * Reset a socket's lock back to the domain-wide lock.
    266  */
    267 static void
    268 unp_resetlock(struct socket *so)
    269 {
    270 	kmutex_t *olock, *nlock;
    271 	struct unpcb *unp;
    272 
    273 	KASSERT(solocked(so));
    274 
    275 	olock = so->so_lock;
    276 	nlock = uipc_lock;
    277 	if (olock == nlock)
    278 		return;
    279 	unp = sotounpcb(so);
    280 	KASSERT(unp->unp_streamlock == NULL);
    281 	unp->unp_streamlock = olock;
    282 	mutex_obj_hold(nlock);
    283 	mutex_enter(nlock);
    284 	solockreset(so, nlock);
    285 	mutex_exit(olock);
    286 }
    287 
    288 static void
    289 unp_free(struct unpcb *unp)
    290 {
    291 
    292 	if (unp->unp_addr)
    293 		free(unp->unp_addr, M_SONAME);
    294 	if (unp->unp_streamlock != NULL)
    295 		mutex_obj_free(unp->unp_streamlock);
    296 	free(unp, M_PCB);
    297 }
    298 
    299 int
    300 unp_output(struct mbuf *m, struct mbuf *control, struct unpcb *unp,
    301 	struct lwp *l)
    302 {
    303 	struct socket *so2;
    304 	const struct sockaddr_un *sun;
    305 
    306 	so2 = unp->unp_conn->unp_socket;
    307 
    308 	KASSERT(solocked(so2));
    309 
    310 	if (unp->unp_addr)
    311 		sun = unp->unp_addr;
    312 	else
    313 		sun = &sun_noname;
    314 	if (unp->unp_conn->unp_flags & UNP_WANTCRED)
    315 		control = unp_addsockcred(l, control);
    316 	if (sbappendaddr(&so2->so_rcv, (const struct sockaddr *)sun, m,
    317 	    control) == 0) {
    318 		so2->so_rcv.sb_overflowed++;
    319 		unp_dispose(control);
    320 		m_freem(control);
    321 		m_freem(m);
    322 		return (ENOBUFS);
    323 	} else {
    324 		sorwakeup(so2);
    325 		return (0);
    326 	}
    327 }
    328 
    329 void
    330 unp_setaddr(struct socket *so, struct mbuf *nam, bool peeraddr)
    331 {
    332 	const struct sockaddr_un *sun;
    333 	struct unpcb *unp;
    334 	bool ext;
    335 
    336 	KASSERT(solocked(so));
    337 	unp = sotounpcb(so);
    338 	ext = false;
    339 
    340 	for (;;) {
    341 		sun = NULL;
    342 		if (peeraddr) {
    343 			if (unp->unp_conn && unp->unp_conn->unp_addr)
    344 				sun = unp->unp_conn->unp_addr;
    345 		} else {
    346 			if (unp->unp_addr)
    347 				sun = unp->unp_addr;
    348 		}
    349 		if (sun == NULL)
    350 			sun = &sun_noname;
    351 		nam->m_len = sun->sun_len;
    352 		if (nam->m_len > MLEN && !ext) {
    353 			sounlock(so);
    354 			MEXTMALLOC(nam, MAXPATHLEN * 2, M_WAITOK);
    355 			solock(so);
    356 			ext = true;
    357 		} else {
    358 			KASSERT(nam->m_len <= MAXPATHLEN * 2);
    359 			memcpy(mtod(nam, void *), sun, (size_t)nam->m_len);
    360 			break;
    361 		}
    362 	}
    363 }
    364 
    365 /*ARGSUSED*/
    366 int
    367 uipc_usrreq(struct socket *so, int req, struct mbuf *m, struct mbuf *nam,
    368 	struct mbuf *control, struct lwp *l)
    369 {
    370 	struct unpcb *unp = sotounpcb(so);
    371 	struct socket *so2;
    372 	struct proc *p;
    373 	u_int newhiwat;
    374 	int error = 0;
    375 
    376 	if (req == PRU_CONTROL)
    377 		return (EOPNOTSUPP);
    378 
    379 #ifdef DIAGNOSTIC
    380 	if (req != PRU_SEND && req != PRU_SENDOOB && control)
    381 		panic("uipc_usrreq: unexpected control mbuf");
    382 #endif
    383 	p = l ? l->l_proc : NULL;
    384 	if (req != PRU_ATTACH) {
    385 		if (unp == NULL) {
    386 			error = EINVAL;
    387 			goto release;
    388 		}
    389 		KASSERT(solocked(so));
    390 	}
    391 
    392 	switch (req) {
    393 
    394 	case PRU_ATTACH:
    395 		if (unp != NULL) {
    396 			error = EISCONN;
    397 			break;
    398 		}
    399 		error = unp_attach(so);
    400 		break;
    401 
    402 	case PRU_DETACH:
    403 		unp_detach(unp);
    404 		break;
    405 
    406 	case PRU_BIND:
    407 		KASSERT(l != NULL);
    408 		error = unp_bind(so, nam, l);
    409 		break;
    410 
    411 	case PRU_LISTEN:
    412 		/*
    413 		 * If the socket can accept a connection, it must be
    414 		 * locked by uipc_lock.
    415 		 */
    416 		unp_resetlock(so);
    417 		if (unp->unp_vnode == NULL)
    418 			error = EINVAL;
    419 		break;
    420 
    421 	case PRU_CONNECT:
    422 		KASSERT(l != NULL);
    423 		error = unp_connect(so, nam, l);
    424 		break;
    425 
    426 	case PRU_CONNECT2:
    427 		error = unp_connect2(so, (struct socket *)nam, PRU_CONNECT2);
    428 		break;
    429 
    430 	case PRU_DISCONNECT:
    431 		unp_disconnect(unp);
    432 		break;
    433 
    434 	case PRU_ACCEPT:
    435 		KASSERT(so->so_lock == uipc_lock);
    436 		/*
    437 		 * Mark the initiating STREAM socket as connected *ONLY*
    438 		 * after it's been accepted.  This prevents a client from
    439 		 * overrunning a server and receiving ECONNREFUSED.
    440 		 */
    441 		if (unp->unp_conn == NULL) {
    442 			/*
    443 			 * This will use the empty socket and will not
    444 			 * allocate.
    445 			 */
    446 			unp_setaddr(so, nam, true);
    447 			break;
    448 		}
    449 		so2 = unp->unp_conn->unp_socket;
    450 		if (so2->so_state & SS_ISCONNECTING) {
    451 			KASSERT(solocked2(so, so->so_head));
    452 			KASSERT(solocked2(so2, so->so_head));
    453 			soisconnected(so2);
    454 		}
    455 		/*
    456 		 * If the connection is fully established, break the
    457 		 * association with uipc_lock and give the connected
    458 		 * pair a seperate lock to share.
    459 		 * There is a race here: sotounpcb(so2)->unp_streamlock
    460 		 * is not locked, so when changing so2->so_lock
    461 		 * another thread can grab it while so->so_lock is still
    462 		 * pointing to the (locked) uipc_lock.
    463 		 * this should be harmless, except that this makes
    464 		 * solocked2() and solocked() unreliable.
    465 		 * Another problem is that unp_setaddr() expects the
    466 		 * the socket locked. Grabing sotounpcb(so2)->unp_streamlock
    467 		 * fixes both issues.
    468 		 */
    469 		mutex_enter(sotounpcb(so2)->unp_streamlock);
    470 		unp_setpeerlocks(so2, so);
    471 		/*
    472 		 * Only now return peer's address, as we may need to
    473 		 * block in order to allocate memory.
    474 		 *
    475 		 * XXX Minor race: connection can be broken while
    476 		 * lock is dropped in unp_setaddr().  We will return
    477 		 * error == 0 and sun_noname as the peer address.
    478 		 */
    479 		unp_setaddr(so, nam, true);
    480 		/* so_lock now points to unp_streamlock */
    481 		mutex_exit(so2->so_lock);
    482 		break;
    483 
    484 	case PRU_SHUTDOWN:
    485 		socantsendmore(so);
    486 		unp_shutdown(unp);
    487 		break;
    488 
    489 	case PRU_RCVD:
    490 		switch (so->so_type) {
    491 
    492 		case SOCK_DGRAM:
    493 			panic("uipc 1");
    494 			/*NOTREACHED*/
    495 
    496 		case SOCK_SEQPACKET: /* FALLTHROUGH */
    497 		case SOCK_STREAM:
    498 #define	rcv (&so->so_rcv)
    499 #define snd (&so2->so_snd)
    500 			if (unp->unp_conn == 0)
    501 				break;
    502 			so2 = unp->unp_conn->unp_socket;
    503 			KASSERT(solocked2(so, so2));
    504 			/*
    505 			 * Adjust backpressure on sender
    506 			 * and wakeup any waiting to write.
    507 			 */
    508 			snd->sb_mbmax += unp->unp_mbcnt - rcv->sb_mbcnt;
    509 			unp->unp_mbcnt = rcv->sb_mbcnt;
    510 			newhiwat = snd->sb_hiwat + unp->unp_cc - rcv->sb_cc;
    511 			(void)chgsbsize(so2->so_uidinfo,
    512 			    &snd->sb_hiwat, newhiwat, RLIM_INFINITY);
    513 			unp->unp_cc = rcv->sb_cc;
    514 			sowwakeup(so2);
    515 #undef snd
    516 #undef rcv
    517 			break;
    518 
    519 		default:
    520 			panic("uipc 2");
    521 		}
    522 		break;
    523 
    524 	case PRU_SEND:
    525 		/*
    526 		 * Note: unp_internalize() rejects any control message
    527 		 * other than SCM_RIGHTS, and only allows one.  This
    528 		 * has the side-effect of preventing a caller from
    529 		 * forging SCM_CREDS.
    530 		 */
    531 		if (control) {
    532 			sounlock(so);
    533 			error = unp_internalize(&control);
    534 			solock(so);
    535 			if (error != 0) {
    536 				m_freem(control);
    537 				m_freem(m);
    538 				break;
    539 			}
    540 		}
    541 		switch (so->so_type) {
    542 
    543 		case SOCK_DGRAM: {
    544 			KASSERT(so->so_lock == uipc_lock);
    545 			if (nam) {
    546 				if ((so->so_state & SS_ISCONNECTED) != 0)
    547 					error = EISCONN;
    548 				else {
    549 					/*
    550 					 * Note: once connected, the
    551 					 * socket's lock must not be
    552 					 * dropped until we have sent
    553 					 * the message and disconnected.
    554 					 * This is necessary to prevent
    555 					 * intervening control ops, like
    556 					 * another connection.
    557 					 */
    558 					error = unp_connect(so, nam, l);
    559 				}
    560 			} else {
    561 				if ((so->so_state & SS_ISCONNECTED) == 0)
    562 					error = ENOTCONN;
    563 			}
    564 			if (error) {
    565 				unp_dispose(control);
    566 				m_freem(control);
    567 				m_freem(m);
    568 				break;
    569 			}
    570 			KASSERT(p != NULL);
    571 			error = unp_output(m, control, unp, l);
    572 			if (nam)
    573 				unp_disconnect(unp);
    574 			break;
    575 		}
    576 
    577 		case SOCK_SEQPACKET: /* FALLTHROUGH */
    578 		case SOCK_STREAM:
    579 #define	rcv (&so2->so_rcv)
    580 #define	snd (&so->so_snd)
    581 			if (unp->unp_conn == NULL) {
    582 				error = ENOTCONN;
    583 				break;
    584 			}
    585 			so2 = unp->unp_conn->unp_socket;
    586 			KASSERT(solocked2(so, so2));
    587 			if (unp->unp_conn->unp_flags & UNP_WANTCRED) {
    588 				/*
    589 				 * Credentials are passed only once on
    590 				 * SOCK_STREAM and SOCK_SEQPACKET.
    591 				 */
    592 				unp->unp_conn->unp_flags &= ~UNP_WANTCRED;
    593 				control = unp_addsockcred(l, control);
    594 			}
    595 			/*
    596 			 * Send to paired receive port, and then reduce
    597 			 * send buffer hiwater marks to maintain backpressure.
    598 			 * Wake up readers.
    599 			 */
    600 			if (control) {
    601 				if (sbappendcontrol(rcv, m, control) != 0)
    602 					control = NULL;
    603 			} else {
    604 				switch(so->so_type) {
    605 				case SOCK_SEQPACKET:
    606 					sbappendrecord(rcv, m);
    607 					break;
    608 				case SOCK_STREAM:
    609 					sbappend(rcv, m);
    610 					break;
    611 				default:
    612 					panic("uipc_usrreq");
    613 					break;
    614 				}
    615 			}
    616 			snd->sb_mbmax -=
    617 			    rcv->sb_mbcnt - unp->unp_conn->unp_mbcnt;
    618 			unp->unp_conn->unp_mbcnt = rcv->sb_mbcnt;
    619 			newhiwat = snd->sb_hiwat -
    620 			    (rcv->sb_cc - unp->unp_conn->unp_cc);
    621 			(void)chgsbsize(so->so_uidinfo,
    622 			    &snd->sb_hiwat, newhiwat, RLIM_INFINITY);
    623 			unp->unp_conn->unp_cc = rcv->sb_cc;
    624 			sorwakeup(so2);
    625 #undef snd
    626 #undef rcv
    627 			if (control != NULL) {
    628 				unp_dispose(control);
    629 				m_freem(control);
    630 			}
    631 			break;
    632 
    633 		default:
    634 			panic("uipc 4");
    635 		}
    636 		break;
    637 
    638 	case PRU_ABORT:
    639 		(void)unp_drop(unp, ECONNABORTED);
    640 
    641 		KASSERT(so->so_head == NULL);
    642 #ifdef DIAGNOSTIC
    643 		if (so->so_pcb == NULL)
    644 			panic("uipc 5: drop killed pcb");
    645 #endif
    646 		unp_detach(unp);
    647 		break;
    648 
    649 	case PRU_SENSE:
    650 		((struct stat *) m)->st_blksize = so->so_snd.sb_hiwat;
    651 		switch (so->so_type) {
    652 		case SOCK_SEQPACKET: /* FALLTHROUGH */
    653 		case SOCK_STREAM:
    654 			if (unp->unp_conn == 0)
    655 				break;
    656 
    657 			so2 = unp->unp_conn->unp_socket;
    658 			KASSERT(solocked2(so, so2));
    659 			((struct stat *) m)->st_blksize += so2->so_rcv.sb_cc;
    660 			break;
    661 		default:
    662 			break;
    663 		}
    664 		((struct stat *) m)->st_dev = NODEV;
    665 		if (unp->unp_ino == 0)
    666 			unp->unp_ino = unp_ino++;
    667 		((struct stat *) m)->st_atimespec =
    668 		    ((struct stat *) m)->st_mtimespec =
    669 		    ((struct stat *) m)->st_ctimespec = unp->unp_ctime;
    670 		((struct stat *) m)->st_ino = unp->unp_ino;
    671 		return (0);
    672 
    673 	case PRU_RCVOOB:
    674 		error = EOPNOTSUPP;
    675 		break;
    676 
    677 	case PRU_SENDOOB:
    678 		m_freem(control);
    679 		m_freem(m);
    680 		error = EOPNOTSUPP;
    681 		break;
    682 
    683 	case PRU_SOCKADDR:
    684 		unp_setaddr(so, nam, false);
    685 		break;
    686 
    687 	case PRU_PEERADDR:
    688 		unp_setaddr(so, nam, true);
    689 		break;
    690 
    691 	default:
    692 		panic("piusrreq");
    693 	}
    694 
    695 release:
    696 	return (error);
    697 }
    698 
    699 /*
    700  * Unix domain socket option processing.
    701  */
    702 int
    703 uipc_ctloutput(int op, struct socket *so, struct sockopt *sopt)
    704 {
    705 	struct unpcb *unp = sotounpcb(so);
    706 	int optval = 0, error = 0;
    707 
    708 	KASSERT(solocked(so));
    709 
    710 	if (sopt->sopt_level != 0) {
    711 		error = ENOPROTOOPT;
    712 	} else switch (op) {
    713 
    714 	case PRCO_SETOPT:
    715 		switch (sopt->sopt_name) {
    716 		case LOCAL_CREDS:
    717 		case LOCAL_CONNWAIT:
    718 			error = sockopt_getint(sopt, &optval);
    719 			if (error)
    720 				break;
    721 			switch (sopt->sopt_name) {
    722 #define	OPTSET(bit) \
    723 	if (optval) \
    724 		unp->unp_flags |= (bit); \
    725 	else \
    726 		unp->unp_flags &= ~(bit);
    727 
    728 			case LOCAL_CREDS:
    729 				OPTSET(UNP_WANTCRED);
    730 				break;
    731 			case LOCAL_CONNWAIT:
    732 				OPTSET(UNP_CONNWAIT);
    733 				break;
    734 			}
    735 			break;
    736 #undef OPTSET
    737 
    738 		default:
    739 			error = ENOPROTOOPT;
    740 			break;
    741 		}
    742 		break;
    743 
    744 	case PRCO_GETOPT:
    745 		sounlock(so);
    746 		switch (sopt->sopt_name) {
    747 		case LOCAL_PEEREID:
    748 			if (unp->unp_flags & UNP_EIDSVALID) {
    749 				error = sockopt_set(sopt,
    750 				    &unp->unp_connid, sizeof(unp->unp_connid));
    751 			} else {
    752 				error = EINVAL;
    753 			}
    754 			break;
    755 		case LOCAL_CREDS:
    756 #define	OPTBIT(bit)	(unp->unp_flags & (bit) ? 1 : 0)
    757 
    758 			optval = OPTBIT(UNP_WANTCRED);
    759 			error = sockopt_setint(sopt, optval);
    760 			break;
    761 #undef OPTBIT
    762 
    763 		default:
    764 			error = ENOPROTOOPT;
    765 			break;
    766 		}
    767 		solock(so);
    768 		break;
    769 	}
    770 	return (error);
    771 }
    772 
    773 /*
    774  * Both send and receive buffers are allocated PIPSIZ bytes of buffering
    775  * for stream sockets, although the total for sender and receiver is
    776  * actually only PIPSIZ.
    777  * Datagram sockets really use the sendspace as the maximum datagram size,
    778  * and don't really want to reserve the sendspace.  Their recvspace should
    779  * be large enough for at least one max-size datagram plus address.
    780  */
    781 #define	PIPSIZ	4096
    782 u_long	unpst_sendspace = PIPSIZ;
    783 u_long	unpst_recvspace = PIPSIZ;
    784 u_long	unpdg_sendspace = 2*1024;	/* really max datagram size */
    785 u_long	unpdg_recvspace = 4*1024;
    786 
    787 u_int	unp_rights;			/* files in flight */
    788 u_int	unp_rights_ratio = 2;		/* limit, fraction of maxfiles */
    789 
    790 int
    791 unp_attach(struct socket *so)
    792 {
    793 	struct unpcb *unp;
    794 	int error;
    795 
    796 	switch (so->so_type) {
    797 	case SOCK_SEQPACKET: /* FALLTHROUGH */
    798 	case SOCK_STREAM:
    799 		if (so->so_lock == NULL) {
    800 			/*
    801 			 * XXX Assuming that no socket locks are held,
    802 			 * as this call may sleep.
    803 			 */
    804 			so->so_lock = mutex_obj_alloc(MUTEX_DEFAULT, IPL_NONE);
    805 			solock(so);
    806 		}
    807 		if (so->so_snd.sb_hiwat == 0 || so->so_rcv.sb_hiwat == 0) {
    808 			error = soreserve(so, unpst_sendspace, unpst_recvspace);
    809 			if (error != 0)
    810 				return (error);
    811 		}
    812 		break;
    813 
    814 	case SOCK_DGRAM:
    815 		if (so->so_lock == NULL) {
    816 			mutex_obj_hold(uipc_lock);
    817 			so->so_lock = uipc_lock;
    818 			solock(so);
    819 		}
    820 		if (so->so_snd.sb_hiwat == 0 || so->so_rcv.sb_hiwat == 0) {
    821 			error = soreserve(so, unpdg_sendspace, unpdg_recvspace);
    822 			if (error != 0)
    823 				return (error);
    824 		}
    825 		break;
    826 
    827 	default:
    828 		panic("unp_attach");
    829 	}
    830 	KASSERT(solocked(so));
    831 	unp = malloc(sizeof(*unp), M_PCB, M_NOWAIT);
    832 	if (unp == NULL)
    833 		return (ENOBUFS);
    834 	memset(unp, 0, sizeof(*unp));
    835 	unp->unp_socket = so;
    836 	so->so_pcb = unp;
    837 	nanotime(&unp->unp_ctime);
    838 	return (0);
    839 }
    840 
    841 void
    842 unp_detach(struct unpcb *unp)
    843 {
    844 	struct socket *so;
    845 	vnode_t *vp;
    846 
    847 	so = unp->unp_socket;
    848 
    849  retry:
    850 	if ((vp = unp->unp_vnode) != NULL) {
    851 		sounlock(so);
    852 		/* Acquire v_interlock to protect against unp_connect(). */
    853 		/* XXXAD racy */
    854 		mutex_enter(vp->v_interlock);
    855 		vp->v_socket = NULL;
    856 		vrelel(vp, 0);
    857 		solock(so);
    858 		unp->unp_vnode = NULL;
    859 	}
    860 	if (unp->unp_conn)
    861 		unp_disconnect(unp);
    862 	while (unp->unp_refs) {
    863 		KASSERT(solocked2(so, unp->unp_refs->unp_socket));
    864 		if (unp_drop(unp->unp_refs, ECONNRESET)) {
    865 			solock(so);
    866 			goto retry;
    867 		}
    868 	}
    869 	soisdisconnected(so);
    870 	so->so_pcb = NULL;
    871 	if (unp_rights) {
    872 		/*
    873 		 * Normally the receive buffer is flushed later, in sofree,
    874 		 * but if our receive buffer holds references to files that
    875 		 * are now garbage, we will enqueue those file references to
    876 		 * the garbage collector and kick it into action.
    877 		 */
    878 		sorflush(so);
    879 		unp_free(unp);
    880 		unp_thread_kick();
    881 	} else
    882 		unp_free(unp);
    883 }
    884 
    885 /*
    886  * Allocate the new sockaddr.  We have to allocate one
    887  * extra byte so that we can ensure that the pathname
    888  * is nul-terminated. Note that unlike linux, we don't
    889  * include in the address length the NUL in the path
    890  * component, because doing so, would exceed sizeof(sockaddr_un)
    891  * for fully occupied pathnames. Linux is also inconsistent,
    892  * because it does not include the NUL in the length of
    893  * what it calls "abstract" unix sockets.
    894  */
    895 static struct sockaddr_un *
    896 makeun(struct mbuf *nam, size_t *addrlen) {
    897 	struct sockaddr_un *sun;
    898 
    899 	*addrlen = nam->m_len + 1;
    900 	sun = malloc(*addrlen, M_SONAME, M_WAITOK);
    901 	m_copydata(nam, 0, nam->m_len, (void *)sun);
    902 	*(((char *)sun) + nam->m_len) = '\0';
    903 	sun->sun_len = strlen(sun->sun_path) +
    904 	    offsetof(struct sockaddr_un, sun_path);
    905 	return sun;
    906 }
    907 
    908 int
    909 unp_bind(struct socket *so, struct mbuf *nam, struct lwp *l)
    910 {
    911 	struct sockaddr_un *sun;
    912 	struct unpcb *unp;
    913 	vnode_t *vp;
    914 	struct vattr vattr;
    915 	size_t addrlen;
    916 	int error;
    917 	struct pathbuf *pb;
    918 	struct nameidata nd;
    919 	proc_t *p;
    920 
    921 	unp = sotounpcb(so);
    922 	if (unp->unp_vnode != NULL)
    923 		return (EINVAL);
    924 	if ((unp->unp_flags & UNP_BUSY) != 0) {
    925 		/*
    926 		 * EALREADY may not be strictly accurate, but since this
    927 		 * is a major application error it's hardly a big deal.
    928 		 */
    929 		return (EALREADY);
    930 	}
    931 	unp->unp_flags |= UNP_BUSY;
    932 	sounlock(so);
    933 
    934 	p = l->l_proc;
    935 	sun = makeun(nam, &addrlen);
    936 
    937 	pb = pathbuf_create(sun->sun_path);
    938 	if (pb == NULL) {
    939 		error = ENOMEM;
    940 		goto bad;
    941 	}
    942 	NDINIT(&nd, CREATE, FOLLOW | LOCKPARENT | TRYEMULROOT, pb);
    943 
    944 /* SHOULD BE ABLE TO ADOPT EXISTING AND wakeup() ALA FIFO's */
    945 	if ((error = namei(&nd)) != 0) {
    946 		pathbuf_destroy(pb);
    947 		goto bad;
    948 	}
    949 	vp = nd.ni_vp;
    950 	if (vp != NULL) {
    951 		VOP_ABORTOP(nd.ni_dvp, &nd.ni_cnd);
    952 		if (nd.ni_dvp == vp)
    953 			vrele(nd.ni_dvp);
    954 		else
    955 			vput(nd.ni_dvp);
    956 		vrele(vp);
    957 		pathbuf_destroy(pb);
    958 		error = EADDRINUSE;
    959 		goto bad;
    960 	}
    961 	vattr_null(&vattr);
    962 	vattr.va_type = VSOCK;
    963 	vattr.va_mode = ACCESSPERMS & ~(p->p_cwdi->cwdi_cmask);
    964 	error = VOP_CREATE(nd.ni_dvp, &nd.ni_vp, &nd.ni_cnd, &vattr);
    965 	if (error) {
    966 		pathbuf_destroy(pb);
    967 		goto bad;
    968 	}
    969 	vp = nd.ni_vp;
    970 	solock(so);
    971 	vp->v_socket = unp->unp_socket;
    972 	unp->unp_vnode = vp;
    973 	unp->unp_addrlen = addrlen;
    974 	unp->unp_addr = sun;
    975 	unp->unp_connid.unp_pid = p->p_pid;
    976 	unp->unp_connid.unp_euid = kauth_cred_geteuid(l->l_cred);
    977 	unp->unp_connid.unp_egid = kauth_cred_getegid(l->l_cred);
    978 	unp->unp_flags |= UNP_EIDSBIND;
    979 	VOP_UNLOCK(vp);
    980 	unp->unp_flags &= ~UNP_BUSY;
    981 	pathbuf_destroy(pb);
    982 	return (0);
    983 
    984  bad:
    985 	free(sun, M_SONAME);
    986 	solock(so);
    987 	unp->unp_flags &= ~UNP_BUSY;
    988 	return (error);
    989 }
    990 
    991 int
    992 unp_connect(struct socket *so, struct mbuf *nam, struct lwp *l)
    993 {
    994 	struct sockaddr_un *sun;
    995 	vnode_t *vp;
    996 	struct socket *so2, *so3;
    997 	struct unpcb *unp, *unp2, *unp3;
    998 	size_t addrlen;
    999 	int error;
   1000 	struct pathbuf *pb;
   1001 	struct nameidata nd;
   1002 
   1003 	unp = sotounpcb(so);
   1004 	if ((unp->unp_flags & UNP_BUSY) != 0) {
   1005 		/*
   1006 		 * EALREADY may not be strictly accurate, but since this
   1007 		 * is a major application error it's hardly a big deal.
   1008 		 */
   1009 		return (EALREADY);
   1010 	}
   1011 	unp->unp_flags |= UNP_BUSY;
   1012 	sounlock(so);
   1013 
   1014 	sun = makeun(nam, &addrlen);
   1015 	pb = pathbuf_create(sun->sun_path);
   1016 	if (pb == NULL) {
   1017 		error = ENOMEM;
   1018 		goto bad2;
   1019 	}
   1020 
   1021 	NDINIT(&nd, LOOKUP, FOLLOW | LOCKLEAF | TRYEMULROOT, pb);
   1022 
   1023 	if ((error = namei(&nd)) != 0) {
   1024 		pathbuf_destroy(pb);
   1025 		goto bad2;
   1026 	}
   1027 	vp = nd.ni_vp;
   1028 	if (vp->v_type != VSOCK) {
   1029 		error = ENOTSOCK;
   1030 		goto bad;
   1031 	}
   1032 	pathbuf_destroy(pb);
   1033 	if ((error = VOP_ACCESS(vp, VWRITE, l->l_cred)) != 0)
   1034 		goto bad;
   1035 	/* Acquire v_interlock to protect against unp_detach(). */
   1036 	mutex_enter(vp->v_interlock);
   1037 	so2 = vp->v_socket;
   1038 	if (so2 == NULL) {
   1039 		mutex_exit(vp->v_interlock);
   1040 		error = ECONNREFUSED;
   1041 		goto bad;
   1042 	}
   1043 	if (so->so_type != so2->so_type) {
   1044 		mutex_exit(vp->v_interlock);
   1045 		error = EPROTOTYPE;
   1046 		goto bad;
   1047 	}
   1048 	solock(so);
   1049 	unp_resetlock(so);
   1050 	mutex_exit(vp->v_interlock);
   1051 	if ((so->so_proto->pr_flags & PR_CONNREQUIRED) != 0) {
   1052 		/*
   1053 		 * This may seem somewhat fragile but is OK: if we can
   1054 		 * see SO_ACCEPTCONN set on the endpoint, then it must
   1055 		 * be locked by the domain-wide uipc_lock.
   1056 		 */
   1057 		KASSERT((so2->so_options & SO_ACCEPTCONN) == 0 ||
   1058 		    so2->so_lock == uipc_lock);
   1059 		if ((so2->so_options & SO_ACCEPTCONN) == 0 ||
   1060 		    (so3 = sonewconn(so2, false)) == NULL) {
   1061 			error = ECONNREFUSED;
   1062 			sounlock(so);
   1063 			goto bad;
   1064 		}
   1065 		unp2 = sotounpcb(so2);
   1066 		unp3 = sotounpcb(so3);
   1067 		if (unp2->unp_addr) {
   1068 			unp3->unp_addr = malloc(unp2->unp_addrlen,
   1069 			    M_SONAME, M_WAITOK);
   1070 			memcpy(unp3->unp_addr, unp2->unp_addr,
   1071 			    unp2->unp_addrlen);
   1072 			unp3->unp_addrlen = unp2->unp_addrlen;
   1073 		}
   1074 		unp3->unp_flags = unp2->unp_flags;
   1075 		unp3->unp_connid.unp_pid = l->l_proc->p_pid;
   1076 		unp3->unp_connid.unp_euid = kauth_cred_geteuid(l->l_cred);
   1077 		unp3->unp_connid.unp_egid = kauth_cred_getegid(l->l_cred);
   1078 		unp3->unp_flags |= UNP_EIDSVALID;
   1079 		if (unp2->unp_flags & UNP_EIDSBIND) {
   1080 			unp->unp_connid = unp2->unp_connid;
   1081 			unp->unp_flags |= UNP_EIDSVALID;
   1082 		}
   1083 		so2 = so3;
   1084 	}
   1085 	error = unp_connect2(so, so2, PRU_CONNECT);
   1086 	sounlock(so);
   1087  bad:
   1088 	vput(vp);
   1089  bad2:
   1090 	free(sun, M_SONAME);
   1091 	solock(so);
   1092 	unp->unp_flags &= ~UNP_BUSY;
   1093 	return (error);
   1094 }
   1095 
   1096 int
   1097 unp_connect2(struct socket *so, struct socket *so2, int req)
   1098 {
   1099 	struct unpcb *unp = sotounpcb(so);
   1100 	struct unpcb *unp2;
   1101 
   1102 	if (so2->so_type != so->so_type)
   1103 		return (EPROTOTYPE);
   1104 
   1105 	/*
   1106 	 * All three sockets involved must be locked by same lock:
   1107 	 *
   1108 	 * local endpoint (so)
   1109 	 * remote endpoint (so2)
   1110 	 * queue head (so2->so_head, only if PR_CONNREQUIRED)
   1111 	 */
   1112 	KASSERT(solocked2(so, so2));
   1113 	KASSERT(so->so_head == NULL);
   1114 	if (so2->so_head != NULL) {
   1115 		KASSERT(so2->so_lock == uipc_lock);
   1116 		KASSERT(solocked2(so2, so2->so_head));
   1117 	}
   1118 
   1119 	unp2 = sotounpcb(so2);
   1120 	unp->unp_conn = unp2;
   1121 	switch (so->so_type) {
   1122 
   1123 	case SOCK_DGRAM:
   1124 		unp->unp_nextref = unp2->unp_refs;
   1125 		unp2->unp_refs = unp;
   1126 		soisconnected(so);
   1127 		break;
   1128 
   1129 	case SOCK_SEQPACKET: /* FALLTHROUGH */
   1130 	case SOCK_STREAM:
   1131 		unp2->unp_conn = unp;
   1132 		if (req == PRU_CONNECT &&
   1133 		    ((unp->unp_flags | unp2->unp_flags) & UNP_CONNWAIT))
   1134 			soisconnecting(so);
   1135 		else
   1136 			soisconnected(so);
   1137 		soisconnected(so2);
   1138 		/*
   1139 		 * If the connection is fully established, break the
   1140 		 * association with uipc_lock and give the connected
   1141 		 * pair a seperate lock to share.  For CONNECT2, we
   1142 		 * require that the locks already match (the sockets
   1143 		 * are created that way).
   1144 		 */
   1145 		if (req == PRU_CONNECT) {
   1146 			KASSERT(so2->so_head != NULL);
   1147 			unp_setpeerlocks(so, so2);
   1148 		}
   1149 		break;
   1150 
   1151 	default:
   1152 		panic("unp_connect2");
   1153 	}
   1154 	return (0);
   1155 }
   1156 
   1157 void
   1158 unp_disconnect(struct unpcb *unp)
   1159 {
   1160 	struct unpcb *unp2 = unp->unp_conn;
   1161 	struct socket *so;
   1162 
   1163 	if (unp2 == 0)
   1164 		return;
   1165 	unp->unp_conn = 0;
   1166 	so = unp->unp_socket;
   1167 	switch (so->so_type) {
   1168 	case SOCK_DGRAM:
   1169 		if (unp2->unp_refs == unp)
   1170 			unp2->unp_refs = unp->unp_nextref;
   1171 		else {
   1172 			unp2 = unp2->unp_refs;
   1173 			for (;;) {
   1174 				KASSERT(solocked2(so, unp2->unp_socket));
   1175 				if (unp2 == 0)
   1176 					panic("unp_disconnect");
   1177 				if (unp2->unp_nextref == unp)
   1178 					break;
   1179 				unp2 = unp2->unp_nextref;
   1180 			}
   1181 			unp2->unp_nextref = unp->unp_nextref;
   1182 		}
   1183 		unp->unp_nextref = 0;
   1184 		so->so_state &= ~SS_ISCONNECTED;
   1185 		break;
   1186 
   1187 	case SOCK_SEQPACKET: /* FALLTHROUGH */
   1188 	case SOCK_STREAM:
   1189 		KASSERT(solocked2(so, unp2->unp_socket));
   1190 		soisdisconnected(so);
   1191 		unp2->unp_conn = 0;
   1192 		soisdisconnected(unp2->unp_socket);
   1193 		break;
   1194 	}
   1195 }
   1196 
   1197 #ifdef notdef
   1198 unp_abort(struct unpcb *unp)
   1199 {
   1200 	unp_detach(unp);
   1201 }
   1202 #endif
   1203 
   1204 void
   1205 unp_shutdown(struct unpcb *unp)
   1206 {
   1207 	struct socket *so;
   1208 
   1209 	switch(unp->unp_socket->so_type) {
   1210 	case SOCK_SEQPACKET: /* FALLTHROUGH */
   1211 	case SOCK_STREAM:
   1212 		if (unp->unp_conn && (so = unp->unp_conn->unp_socket))
   1213 			socantrcvmore(so);
   1214 		break;
   1215 	default:
   1216 		break;
   1217 	}
   1218 }
   1219 
   1220 bool
   1221 unp_drop(struct unpcb *unp, int errno)
   1222 {
   1223 	struct socket *so = unp->unp_socket;
   1224 
   1225 	KASSERT(solocked(so));
   1226 
   1227 	so->so_error = errno;
   1228 	unp_disconnect(unp);
   1229 	if (so->so_head) {
   1230 		so->so_pcb = NULL;
   1231 		/* sofree() drops the socket lock */
   1232 		sofree(so);
   1233 		unp_free(unp);
   1234 		return true;
   1235 	}
   1236 	return false;
   1237 }
   1238 
   1239 #ifdef notdef
   1240 unp_drain(void)
   1241 {
   1242 
   1243 }
   1244 #endif
   1245 
   1246 int
   1247 unp_externalize(struct mbuf *rights, struct lwp *l, int flags)
   1248 {
   1249 	struct cmsghdr * const cm = mtod(rights, struct cmsghdr *);
   1250 	struct proc * const p = l->l_proc;
   1251 	file_t **rp;
   1252 	int error = 0;
   1253 
   1254 	const size_t nfds = (cm->cmsg_len - CMSG_ALIGN(sizeof(*cm))) /
   1255 	    sizeof(file_t *);
   1256 	if (nfds == 0)
   1257 		goto noop;
   1258 
   1259 	int * const fdp = kmem_alloc(nfds * sizeof(int), KM_SLEEP);
   1260 	rw_enter(&p->p_cwdi->cwdi_lock, RW_READER);
   1261 
   1262 	/* Make sure the recipient should be able to see the files.. */
   1263 	rp = (file_t **)CMSG_DATA(cm);
   1264 	for (size_t i = 0; i < nfds; i++) {
   1265 		file_t * const fp = *rp++;
   1266 		if (fp == NULL) {
   1267 			error = EINVAL;
   1268 			goto out;
   1269 		}
   1270 		/*
   1271 		 * If we are in a chroot'ed directory, and
   1272 		 * someone wants to pass us a directory, make
   1273 		 * sure it's inside the subtree we're allowed
   1274 		 * to access.
   1275 		 */
   1276 		if (p->p_cwdi->cwdi_rdir != NULL && fp->f_type == DTYPE_VNODE) {
   1277 			vnode_t *vp = (vnode_t *)fp->f_data;
   1278 			if ((vp->v_type == VDIR) &&
   1279 			    !vn_isunder(vp, p->p_cwdi->cwdi_rdir, l)) {
   1280 				error = EPERM;
   1281 				goto out;
   1282 			}
   1283 		}
   1284 	}
   1285 
   1286  restart:
   1287 	/*
   1288 	 * First loop -- allocate file descriptor table slots for the
   1289 	 * new files.
   1290 	 */
   1291 	for (size_t i = 0; i < nfds; i++) {
   1292 		if ((error = fd_alloc(p, 0, &fdp[i])) != 0) {
   1293 			/*
   1294 			 * Back out what we've done so far.
   1295 			 */
   1296 			while (i-- > 0) {
   1297 				fd_abort(p, NULL, fdp[i]);
   1298 			}
   1299 			if (error == ENOSPC) {
   1300 				fd_tryexpand(p);
   1301 				error = 0;
   1302 				goto restart;
   1303 			}
   1304 			/*
   1305 			 * This is the error that has historically
   1306 			 * been returned, and some callers may
   1307 			 * expect it.
   1308 			 */
   1309 			error = EMSGSIZE;
   1310 			goto out;
   1311 		}
   1312 	}
   1313 
   1314 	/*
   1315 	 * Now that adding them has succeeded, update all of the
   1316 	 * file passing state and affix the descriptors.
   1317 	 */
   1318 	rp = (file_t **)CMSG_DATA(cm);
   1319 	int *ofdp = (int *)CMSG_DATA(cm);
   1320 	for (size_t i = 0; i < nfds; i++) {
   1321 		file_t * const fp = *rp++;
   1322 		const int fd = fdp[i];
   1323 		atomic_dec_uint(&unp_rights);
   1324 		fd_set_exclose(l, fd, (flags & O_CLOEXEC) != 0);
   1325 		fd_affix(p, fp, fd);
   1326 		/*
   1327 		 * Done with this file pointer, replace it with a fd;
   1328 		 */
   1329 		*ofdp++ = fd;
   1330 		mutex_enter(&fp->f_lock);
   1331 		fp->f_msgcount--;
   1332 		mutex_exit(&fp->f_lock);
   1333 		/*
   1334 		 * Note that fd_affix() adds a reference to the file.
   1335 		 * The file may already have been closed by another
   1336 		 * LWP in the process, so we must drop the reference
   1337 		 * added by unp_internalize() with closef().
   1338 		 */
   1339 		closef(fp);
   1340 	}
   1341 
   1342 	/*
   1343 	 * Adjust length, in case of transition from large file_t
   1344 	 * pointers to ints.
   1345 	 */
   1346 	if (sizeof(file_t *) != sizeof(int)) {
   1347 		cm->cmsg_len = CMSG_LEN(nfds * sizeof(int));
   1348 		rights->m_len = CMSG_SPACE(nfds * sizeof(int));
   1349 	}
   1350  out:
   1351 	if (__predict_false(error != 0)) {
   1352 		file_t **const fpp = (file_t **)CMSG_DATA(cm);
   1353 		for (size_t i = 0; i < nfds; i++)
   1354 			unp_discard_now(fpp[i]);
   1355 		/*
   1356 		 * Truncate the array so that nobody will try to interpret
   1357 		 * what is now garbage in it.
   1358 		 */
   1359 		cm->cmsg_len = CMSG_LEN(0);
   1360 		rights->m_len = CMSG_SPACE(0);
   1361 	}
   1362 	rw_exit(&p->p_cwdi->cwdi_lock);
   1363 	kmem_free(fdp, nfds * sizeof(int));
   1364 
   1365  noop:
   1366 	/*
   1367 	 * Don't disclose kernel memory in the alignment space.
   1368 	 */
   1369 	KASSERT(cm->cmsg_len <= rights->m_len);
   1370 	memset(&mtod(rights, char *)[cm->cmsg_len], 0, rights->m_len -
   1371 	    cm->cmsg_len);
   1372 	return error;
   1373 }
   1374 
   1375 int
   1376 unp_internalize(struct mbuf **controlp)
   1377 {
   1378 	filedesc_t *fdescp = curlwp->l_fd;
   1379 	struct mbuf *control = *controlp;
   1380 	struct cmsghdr *newcm, *cm = mtod(control, struct cmsghdr *);
   1381 	file_t **rp, **files;
   1382 	file_t *fp;
   1383 	int i, fd, *fdp;
   1384 	int nfds, error;
   1385 	u_int maxmsg;
   1386 
   1387 	error = 0;
   1388 	newcm = NULL;
   1389 
   1390 	/* Sanity check the control message header. */
   1391 	if (cm->cmsg_type != SCM_RIGHTS || cm->cmsg_level != SOL_SOCKET ||
   1392 	    cm->cmsg_len > control->m_len ||
   1393 	    cm->cmsg_len < CMSG_ALIGN(sizeof(*cm)))
   1394 		return (EINVAL);
   1395 
   1396 	/*
   1397 	 * Verify that the file descriptors are valid, and acquire
   1398 	 * a reference to each.
   1399 	 */
   1400 	nfds = (cm->cmsg_len - CMSG_ALIGN(sizeof(*cm))) / sizeof(int);
   1401 	fdp = (int *)CMSG_DATA(cm);
   1402 	maxmsg = maxfiles / unp_rights_ratio;
   1403 	for (i = 0; i < nfds; i++) {
   1404 		fd = *fdp++;
   1405 		if (atomic_inc_uint_nv(&unp_rights) > maxmsg) {
   1406 			atomic_dec_uint(&unp_rights);
   1407 			nfds = i;
   1408 			error = EAGAIN;
   1409 			goto out;
   1410 		}
   1411 		if ((fp = fd_getfile(fd)) == NULL
   1412 		    || fp->f_type == DTYPE_KQUEUE) {
   1413 		    	if (fp)
   1414 		    		fd_putfile(fd);
   1415 			atomic_dec_uint(&unp_rights);
   1416 			nfds = i;
   1417 			error = EBADF;
   1418 			goto out;
   1419 		}
   1420 	}
   1421 
   1422 	/* Allocate new space and copy header into it. */
   1423 	newcm = malloc(CMSG_SPACE(nfds * sizeof(file_t *)), M_MBUF, M_WAITOK);
   1424 	if (newcm == NULL) {
   1425 		error = E2BIG;
   1426 		goto out;
   1427 	}
   1428 	memcpy(newcm, cm, sizeof(struct cmsghdr));
   1429 	files = (file_t **)CMSG_DATA(newcm);
   1430 
   1431 	/*
   1432 	 * Transform the file descriptors into file_t pointers, in
   1433 	 * reverse order so that if pointers are bigger than ints, the
   1434 	 * int won't get until we're done.  No need to lock, as we have
   1435 	 * already validated the descriptors with fd_getfile().
   1436 	 */
   1437 	fdp = (int *)CMSG_DATA(cm) + nfds;
   1438 	rp = files + nfds;
   1439 	for (i = 0; i < nfds; i++) {
   1440 		fp = fdescp->fd_dt->dt_ff[*--fdp]->ff_file;
   1441 		KASSERT(fp != NULL);
   1442 		mutex_enter(&fp->f_lock);
   1443 		*--rp = fp;
   1444 		fp->f_count++;
   1445 		fp->f_msgcount++;
   1446 		mutex_exit(&fp->f_lock);
   1447 	}
   1448 
   1449  out:
   1450  	/* Release descriptor references. */
   1451 	fdp = (int *)CMSG_DATA(cm);
   1452 	for (i = 0; i < nfds; i++) {
   1453 		fd_putfile(*fdp++);
   1454 		if (error != 0) {
   1455 			atomic_dec_uint(&unp_rights);
   1456 		}
   1457 	}
   1458 
   1459 	if (error == 0) {
   1460 		if (control->m_flags & M_EXT) {
   1461 			m_freem(control);
   1462 			*controlp = control = m_get(M_WAIT, MT_CONTROL);
   1463 		}
   1464 		MEXTADD(control, newcm, CMSG_SPACE(nfds * sizeof(file_t *)),
   1465 		    M_MBUF, NULL, NULL);
   1466 		cm = newcm;
   1467 		/*
   1468 		 * Adjust message & mbuf to note amount of space
   1469 		 * actually used.
   1470 		 */
   1471 		cm->cmsg_len = CMSG_LEN(nfds * sizeof(file_t *));
   1472 		control->m_len = CMSG_SPACE(nfds * sizeof(file_t *));
   1473 	}
   1474 
   1475 	return error;
   1476 }
   1477 
   1478 struct mbuf *
   1479 unp_addsockcred(struct lwp *l, struct mbuf *control)
   1480 {
   1481 	struct sockcred *sc;
   1482 	struct mbuf *m;
   1483 	void *p;
   1484 
   1485 	m = sbcreatecontrol1(&p, SOCKCREDSIZE(kauth_cred_ngroups(l->l_cred)),
   1486 		SCM_CREDS, SOL_SOCKET, M_WAITOK);
   1487 	if (m == NULL)
   1488 		return control;
   1489 
   1490 	sc = p;
   1491 	sc->sc_uid = kauth_cred_getuid(l->l_cred);
   1492 	sc->sc_euid = kauth_cred_geteuid(l->l_cred);
   1493 	sc->sc_gid = kauth_cred_getgid(l->l_cred);
   1494 	sc->sc_egid = kauth_cred_getegid(l->l_cred);
   1495 	sc->sc_ngroups = kauth_cred_ngroups(l->l_cred);
   1496 
   1497 	for (int i = 0; i < sc->sc_ngroups; i++)
   1498 		sc->sc_groups[i] = kauth_cred_group(l->l_cred, i);
   1499 
   1500 	return m_add(control, m);
   1501 }
   1502 
   1503 /*
   1504  * Do a mark-sweep GC of files in the system, to free up any which are
   1505  * caught in flight to an about-to-be-closed socket.  Additionally,
   1506  * process deferred file closures.
   1507  */
   1508 static void
   1509 unp_gc(file_t *dp)
   1510 {
   1511 	extern	struct domain unixdomain;
   1512 	file_t *fp, *np;
   1513 	struct socket *so, *so1;
   1514 	u_int i, old, new;
   1515 	bool didwork;
   1516 
   1517 	KASSERT(curlwp == unp_thread_lwp);
   1518 	KASSERT(mutex_owned(&filelist_lock));
   1519 
   1520 	/*
   1521 	 * First, process deferred file closures.
   1522 	 */
   1523 	while (!SLIST_EMPTY(&unp_thread_discard)) {
   1524 		fp = SLIST_FIRST(&unp_thread_discard);
   1525 		KASSERT(fp->f_unpcount > 0);
   1526 		KASSERT(fp->f_count > 0);
   1527 		KASSERT(fp->f_msgcount > 0);
   1528 		KASSERT(fp->f_count >= fp->f_unpcount);
   1529 		KASSERT(fp->f_count >= fp->f_msgcount);
   1530 		KASSERT(fp->f_msgcount >= fp->f_unpcount);
   1531 		SLIST_REMOVE_HEAD(&unp_thread_discard, f_unplist);
   1532 		i = fp->f_unpcount;
   1533 		fp->f_unpcount = 0;
   1534 		mutex_exit(&filelist_lock);
   1535 		for (; i != 0; i--) {
   1536 			unp_discard_now(fp);
   1537 		}
   1538 		mutex_enter(&filelist_lock);
   1539 	}
   1540 
   1541 	/*
   1542 	 * Clear mark bits.  Ensure that we don't consider new files
   1543 	 * entering the file table during this loop (they will not have
   1544 	 * FSCAN set).
   1545 	 */
   1546 	unp_defer = 0;
   1547 	LIST_FOREACH(fp, &filehead, f_list) {
   1548 		for (old = fp->f_flag;; old = new) {
   1549 			new = atomic_cas_uint(&fp->f_flag, old,
   1550 			    (old | FSCAN) & ~(FMARK|FDEFER));
   1551 			if (__predict_true(old == new)) {
   1552 				break;
   1553 			}
   1554 		}
   1555 	}
   1556 
   1557 	/*
   1558 	 * Iterate over the set of sockets, marking ones believed (based on
   1559 	 * refcount) to be referenced from a process, and marking for rescan
   1560 	 * sockets which are queued on a socket.  Recan continues descending
   1561 	 * and searching for sockets referenced by sockets (FDEFER), until
   1562 	 * there are no more socket->socket references to be discovered.
   1563 	 */
   1564 	do {
   1565 		didwork = false;
   1566 		for (fp = LIST_FIRST(&filehead); fp != NULL; fp = np) {
   1567 			KASSERT(mutex_owned(&filelist_lock));
   1568 			np = LIST_NEXT(fp, f_list);
   1569 			mutex_enter(&fp->f_lock);
   1570 			if ((fp->f_flag & FDEFER) != 0) {
   1571 				atomic_and_uint(&fp->f_flag, ~FDEFER);
   1572 				unp_defer--;
   1573 				KASSERT(fp->f_count != 0);
   1574 			} else {
   1575 				if (fp->f_count == 0 ||
   1576 				    (fp->f_flag & FMARK) != 0 ||
   1577 				    fp->f_count == fp->f_msgcount ||
   1578 				    fp->f_unpcount != 0) {
   1579 					mutex_exit(&fp->f_lock);
   1580 					continue;
   1581 				}
   1582 			}
   1583 			atomic_or_uint(&fp->f_flag, FMARK);
   1584 
   1585 			if (fp->f_type != DTYPE_SOCKET ||
   1586 			    (so = fp->f_data) == NULL ||
   1587 			    so->so_proto->pr_domain != &unixdomain ||
   1588 			    (so->so_proto->pr_flags & PR_RIGHTS) == 0) {
   1589 				mutex_exit(&fp->f_lock);
   1590 				continue;
   1591 			}
   1592 
   1593 			/* Gain file ref, mark our position, and unlock. */
   1594 			didwork = true;
   1595 			LIST_INSERT_AFTER(fp, dp, f_list);
   1596 			fp->f_count++;
   1597 			mutex_exit(&fp->f_lock);
   1598 			mutex_exit(&filelist_lock);
   1599 
   1600 			/*
   1601 			 * Mark files referenced from sockets queued on the
   1602 			 * accept queue as well.
   1603 			 */
   1604 			solock(so);
   1605 			unp_scan(so->so_rcv.sb_mb, unp_mark, 0);
   1606 			if ((so->so_options & SO_ACCEPTCONN) != 0) {
   1607 				TAILQ_FOREACH(so1, &so->so_q0, so_qe) {
   1608 					unp_scan(so1->so_rcv.sb_mb, unp_mark, 0);
   1609 				}
   1610 				TAILQ_FOREACH(so1, &so->so_q, so_qe) {
   1611 					unp_scan(so1->so_rcv.sb_mb, unp_mark, 0);
   1612 				}
   1613 			}
   1614 			sounlock(so);
   1615 
   1616 			/* Re-lock and restart from where we left off. */
   1617 			closef(fp);
   1618 			mutex_enter(&filelist_lock);
   1619 			np = LIST_NEXT(dp, f_list);
   1620 			LIST_REMOVE(dp, f_list);
   1621 		}
   1622 		/*
   1623 		 * Bail early if we did nothing in the loop above.  Could
   1624 		 * happen because of concurrent activity causing unp_defer
   1625 		 * to get out of sync.
   1626 		 */
   1627 	} while (unp_defer != 0 && didwork);
   1628 
   1629 	/*
   1630 	 * Sweep pass.
   1631 	 *
   1632 	 * We grab an extra reference to each of the files that are
   1633 	 * not otherwise accessible and then free the rights that are
   1634 	 * stored in messages on them.
   1635 	 */
   1636 	for (fp = LIST_FIRST(&filehead); fp != NULL; fp = np) {
   1637 		KASSERT(mutex_owned(&filelist_lock));
   1638 		np = LIST_NEXT(fp, f_list);
   1639 		mutex_enter(&fp->f_lock);
   1640 
   1641 		/*
   1642 		 * Ignore non-sockets.
   1643 		 * Ignore dead sockets, or sockets with pending close.
   1644 		 * Ignore sockets obviously referenced elsewhere.
   1645 		 * Ignore sockets marked as referenced by our scan.
   1646 		 * Ignore new sockets that did not exist during the scan.
   1647 		 */
   1648 		if (fp->f_type != DTYPE_SOCKET ||
   1649 		    fp->f_count == 0 || fp->f_unpcount != 0 ||
   1650 		    fp->f_count != fp->f_msgcount ||
   1651 		    (fp->f_flag & (FMARK | FSCAN)) != FSCAN) {
   1652 			mutex_exit(&fp->f_lock);
   1653 			continue;
   1654 		}
   1655 
   1656 		/* Gain file ref, mark our position, and unlock. */
   1657 		LIST_INSERT_AFTER(fp, dp, f_list);
   1658 		fp->f_count++;
   1659 		mutex_exit(&fp->f_lock);
   1660 		mutex_exit(&filelist_lock);
   1661 
   1662 		/*
   1663 		 * Flush all data from the socket's receive buffer.
   1664 		 * This will cause files referenced only by the
   1665 		 * socket to be queued for close.
   1666 		 */
   1667 		so = fp->f_data;
   1668 		solock(so);
   1669 		sorflush(so);
   1670 		sounlock(so);
   1671 
   1672 		/* Re-lock and restart from where we left off. */
   1673 		closef(fp);
   1674 		mutex_enter(&filelist_lock);
   1675 		np = LIST_NEXT(dp, f_list);
   1676 		LIST_REMOVE(dp, f_list);
   1677 	}
   1678 }
   1679 
   1680 /*
   1681  * Garbage collector thread.  While SCM_RIGHTS messages are in transit,
   1682  * wake once per second to garbage collect.  Run continually while we
   1683  * have deferred closes to process.
   1684  */
   1685 static void
   1686 unp_thread(void *cookie)
   1687 {
   1688 	file_t *dp;
   1689 
   1690 	/* Allocate a dummy file for our scans. */
   1691 	if ((dp = fgetdummy()) == NULL) {
   1692 		panic("unp_thread");
   1693 	}
   1694 
   1695 	mutex_enter(&filelist_lock);
   1696 	for (;;) {
   1697 		KASSERT(mutex_owned(&filelist_lock));
   1698 		if (SLIST_EMPTY(&unp_thread_discard)) {
   1699 			if (unp_rights != 0) {
   1700 				(void)cv_timedwait(&unp_thread_cv,
   1701 				    &filelist_lock, hz);
   1702 			} else {
   1703 				cv_wait(&unp_thread_cv, &filelist_lock);
   1704 			}
   1705 		}
   1706 		unp_gc(dp);
   1707 	}
   1708 	/* NOTREACHED */
   1709 }
   1710 
   1711 /*
   1712  * Kick the garbage collector into action if there is something for
   1713  * it to process.
   1714  */
   1715 static void
   1716 unp_thread_kick(void)
   1717 {
   1718 
   1719 	if (!SLIST_EMPTY(&unp_thread_discard) || unp_rights != 0) {
   1720 		mutex_enter(&filelist_lock);
   1721 		cv_signal(&unp_thread_cv);
   1722 		mutex_exit(&filelist_lock);
   1723 	}
   1724 }
   1725 
   1726 void
   1727 unp_dispose(struct mbuf *m)
   1728 {
   1729 
   1730 	if (m)
   1731 		unp_scan(m, unp_discard_later, 1);
   1732 }
   1733 
   1734 void
   1735 unp_scan(struct mbuf *m0, void (*op)(file_t *), int discard)
   1736 {
   1737 	struct mbuf *m;
   1738 	file_t **rp, *fp;
   1739 	struct cmsghdr *cm;
   1740 	int i, qfds;
   1741 
   1742 	while (m0) {
   1743 		for (m = m0; m; m = m->m_next) {
   1744 			if (m->m_type != MT_CONTROL ||
   1745 			    m->m_len < sizeof(*cm)) {
   1746 			    	continue;
   1747 			}
   1748 			cm = mtod(m, struct cmsghdr *);
   1749 			if (cm->cmsg_level != SOL_SOCKET ||
   1750 			    cm->cmsg_type != SCM_RIGHTS)
   1751 				continue;
   1752 			qfds = (cm->cmsg_len - CMSG_ALIGN(sizeof(*cm)))
   1753 			    / sizeof(file_t *);
   1754 			rp = (file_t **)CMSG_DATA(cm);
   1755 			for (i = 0; i < qfds; i++) {
   1756 				fp = *rp;
   1757 				if (discard) {
   1758 					*rp = 0;
   1759 				}
   1760 				(*op)(fp);
   1761 				rp++;
   1762 			}
   1763 		}
   1764 		m0 = m0->m_nextpkt;
   1765 	}
   1766 }
   1767 
   1768 void
   1769 unp_mark(file_t *fp)
   1770 {
   1771 
   1772 	if (fp == NULL)
   1773 		return;
   1774 
   1775 	/* If we're already deferred, don't screw up the defer count */
   1776 	mutex_enter(&fp->f_lock);
   1777 	if (fp->f_flag & (FMARK | FDEFER)) {
   1778 		mutex_exit(&fp->f_lock);
   1779 		return;
   1780 	}
   1781 
   1782 	/*
   1783 	 * Minimize the number of deferrals...  Sockets are the only type of
   1784 	 * file which can hold references to another file, so just mark
   1785 	 * other files, and defer unmarked sockets for the next pass.
   1786 	 */
   1787 	if (fp->f_type == DTYPE_SOCKET) {
   1788 		unp_defer++;
   1789 		KASSERT(fp->f_count != 0);
   1790 		atomic_or_uint(&fp->f_flag, FDEFER);
   1791 	} else {
   1792 		atomic_or_uint(&fp->f_flag, FMARK);
   1793 	}
   1794 	mutex_exit(&fp->f_lock);
   1795 }
   1796 
   1797 static void
   1798 unp_discard_now(file_t *fp)
   1799 {
   1800 
   1801 	if (fp == NULL)
   1802 		return;
   1803 
   1804 	KASSERT(fp->f_count > 0);
   1805 	KASSERT(fp->f_msgcount > 0);
   1806 
   1807 	mutex_enter(&fp->f_lock);
   1808 	fp->f_msgcount--;
   1809 	mutex_exit(&fp->f_lock);
   1810 	atomic_dec_uint(&unp_rights);
   1811 	(void)closef(fp);
   1812 }
   1813 
   1814 static void
   1815 unp_discard_later(file_t *fp)
   1816 {
   1817 
   1818 	if (fp == NULL)
   1819 		return;
   1820 
   1821 	KASSERT(fp->f_count > 0);
   1822 	KASSERT(fp->f_msgcount > 0);
   1823 
   1824 	mutex_enter(&filelist_lock);
   1825 	if (fp->f_unpcount++ == 0) {
   1826 		SLIST_INSERT_HEAD(&unp_thread_discard, fp, f_unplist);
   1827 	}
   1828 	mutex_exit(&filelist_lock);
   1829 }
   1830