Home | History | Annotate | Line # | Download | only in kern
uipc_socket2.c revision 1.144
      1 /*	$NetBSD: uipc_socket2.c,v 1.144 2024/12/06 18:36:31 riastradh Exp $	*/
      2 
      3 /*-
      4  * Copyright (c) 2008 The NetBSD Foundation, Inc.
      5  * All rights reserved.
      6  *
      7  * Redistribution and use in source and binary forms, with or without
      8  * modification, are permitted provided that the following conditions
      9  * are met:
     10  * 1. Redistributions of source code must retain the above copyright
     11  *    notice, this list of conditions and the following disclaimer.
     12  * 2. Redistributions in binary form must reproduce the above copyright
     13  *    notice, this list of conditions and the following disclaimer in the
     14  *    documentation and/or other materials provided with the distribution.
     15  *
     16  * THIS SOFTWARE IS PROVIDED BY THE NETBSD FOUNDATION, INC. AND CONTRIBUTORS
     17  * ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED
     18  * TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
     19  * PURPOSE ARE DISCLAIMED.  IN NO EVENT SHALL THE FOUNDATION OR CONTRIBUTORS
     20  * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
     21  * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
     22  * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
     23  * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
     24  * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
     25  * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
     26  * POSSIBILITY OF SUCH DAMAGE.
     27  */
     28 
     29 /*
     30  * Copyright (c) 1982, 1986, 1988, 1990, 1993
     31  *	The Regents of the University of California.  All rights reserved.
     32  *
     33  * Redistribution and use in source and binary forms, with or without
     34  * modification, are permitted provided that the following conditions
     35  * are met:
     36  * 1. Redistributions of source code must retain the above copyright
     37  *    notice, this list of conditions and the following disclaimer.
     38  * 2. Redistributions in binary form must reproduce the above copyright
     39  *    notice, this list of conditions and the following disclaimer in the
     40  *    documentation and/or other materials provided with the distribution.
     41  * 3. Neither the name of the University nor the names of its contributors
     42  *    may be used to endorse or promote products derived from this software
     43  *    without specific prior written permission.
     44  *
     45  * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND
     46  * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
     47  * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
     48  * ARE DISCLAIMED.  IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE
     49  * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
     50  * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
     51  * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
     52  * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
     53  * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
     54  * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
     55  * SUCH DAMAGE.
     56  *
     57  *	@(#)uipc_socket2.c	8.2 (Berkeley) 2/14/95
     58  */
     59 
     60 #include <sys/cdefs.h>
     61 __KERNEL_RCSID(0, "$NetBSD: uipc_socket2.c,v 1.144 2024/12/06 18:36:31 riastradh Exp $");
     62 
     63 #ifdef _KERNEL_OPT
     64 #include "opt_ddb.h"
     65 #include "opt_inet.h"
     66 #include "opt_mbuftrace.h"
     67 #include "opt_sb_max.h"
     68 #endif
     69 
     70 #include <sys/param.h>
     71 #include <sys/types.h>
     72 
     73 #include <sys/buf.h>
     74 #include <sys/domain.h>
     75 #include <sys/file.h>
     76 #include <sys/kauth.h>
     77 #include <sys/mbuf.h>
     78 #include <sys/poll.h>
     79 #include <sys/pool.h>
     80 #include <sys/proc.h>
     81 #include <sys/protosw.h>
     82 #include <sys/signalvar.h>
     83 #include <sys/socket.h>
     84 #include <sys/socketvar.h>
     85 #include <sys/systm.h>
     86 #include <sys/uidinfo.h>
     87 
     88 #ifdef DDB
     89 #include <sys/filedesc.h>
     90 #include <ddb/db_active.h>
     91 #endif
     92 
     93 /*
     94  * Primitive routines for operating on sockets and socket buffers.
     95  *
     96  * Connection life-cycle:
     97  *
     98  *	Normal sequence from the active (originating) side:
     99  *
    100  *	- soisconnecting() is called during processing of connect() call,
    101  *	- resulting in an eventual call to soisconnected() if/when the
    102  *	  connection is established.
    103  *
    104  *	When the connection is torn down during processing of disconnect():
    105  *
    106  *	- soisdisconnecting() is called and,
    107  *	- soisdisconnected() is called when the connection to the peer
    108  *	  is totally severed.
    109  *
    110  *	The semantics of these routines are such that connectionless protocols
    111  *	can call soisconnected() and soisdisconnected() only, bypassing the
    112  *	in-progress calls when setting up a ``connection'' takes no time.
    113  *
    114  *	From the passive side, a socket is created with two queues of sockets:
    115  *
    116  *	- so_q0 (0) for partial connections (i.e. connections in progress)
    117  *	- so_q (1) for connections already made and awaiting user acceptance.
    118  *
    119  *	As a protocol is preparing incoming connections, it creates a socket
    120  *	structure queued on so_q0 by calling sonewconn().  When the connection
    121  *	is established, soisconnected() is called, and transfers the
    122  *	socket structure to so_q, making it available to accept().
    123  *
    124  *	If a socket is closed with sockets on either so_q0 or so_q, these
    125  *	sockets are dropped.
    126  *
    127  * Locking rules and assumptions:
    128  *
    129  * o socket::so_lock can change on the fly.  The low level routines used
    130  *   to lock sockets are aware of this.  When so_lock is acquired, the
    131  *   routine locking must check to see if so_lock still points to the
    132  *   lock that was acquired.  If so_lock has changed in the meantime, the
    133  *   now irrelevant lock that was acquired must be dropped and the lock
    134  *   operation retried.  Although not proven here, this is completely safe
    135  *   on a multiprocessor system, even with relaxed memory ordering, given
    136  *   the next two rules:
    137  *
    138  * o In order to mutate so_lock, the lock pointed to by the current value
    139  *   of so_lock must be held: i.e., the socket must be held locked by the
    140  *   changing thread.  The thread must issue membar_release() to prevent
    141  *   memory accesses being reordered, and can set so_lock to the desired
    142  *   value.  If the lock pointed to by the new value of so_lock is not
    143  *   held by the changing thread, the socket must then be considered
    144  *   unlocked.
    145  *
    146  * o If so_lock is mutated, and the previous lock referred to by so_lock
    147  *   could still be visible to other threads in the system (e.g. via file
    148  *   descriptor or protocol-internal reference), then the old lock must
    149  *   remain valid until the socket and/or protocol control block has been
    150  *   torn down.
    151  *
    152  * o If a socket has a non-NULL so_head value (i.e. is in the process of
    153  *   connecting), then locking the socket must also lock the socket pointed
    154  *   to by so_head: their lock pointers must match.
    155  *
    156  * o If a socket has connections in progress (so_q, so_q0 not empty) then
    157  *   locking the socket must also lock the sockets attached to both queues.
    158  *   Again, their lock pointers must match.
    159  *
    160  * o Beyond the initial lock assignment in socreate(), assigning locks to
    161  *   sockets is the responsibility of the individual protocols / protocol
    162  *   domains.
    163  */
    164 
    165 static pool_cache_t	socket_cache;
    166 u_long			sb_max = SB_MAX;/* maximum socket buffer size */
    167 static u_long		sb_max_adj;	/* adjusted sb_max */
    168 
    169 void
    170 soisconnecting(struct socket *so)
    171 {
    172 
    173 	KASSERT(solocked(so));
    174 
    175 	so->so_state &= ~(SS_ISCONNECTED|SS_ISDISCONNECTING);
    176 	so->so_state |= SS_ISCONNECTING;
    177 }
    178 
    179 void
    180 soisconnected(struct socket *so)
    181 {
    182 	struct socket	*head;
    183 
    184 	head = so->so_head;
    185 
    186 	KASSERT(solocked(so));
    187 	KASSERT(head == NULL || solocked2(so, head));
    188 
    189 	so->so_state &= ~(SS_ISCONNECTING | SS_ISDISCONNECTING);
    190 	so->so_state |= SS_ISCONNECTED;
    191 	if (head && so->so_onq == &head->so_q0) {
    192 		if ((so->so_options & SO_ACCEPTFILTER) == 0) {
    193 			/*
    194 			 * Re-enqueue and wake up any waiters, e.g.
    195 			 * processes blocking on accept().
    196 			 */
    197 			soqremque(so, 0);
    198 			soqinsque(head, so, 1);
    199 			sorwakeup(head);
    200 			cv_broadcast(&head->so_cv);
    201 		} else {
    202 			so->so_upcall =
    203 			    head->so_accf->so_accept_filter->accf_callback;
    204 			so->so_upcallarg = head->so_accf->so_accept_filter_arg;
    205 			so->so_rcv.sb_flags |= SB_UPCALL;
    206 			so->so_options &= ~SO_ACCEPTFILTER;
    207 			(*so->so_upcall)(so, so->so_upcallarg,
    208 					 POLLIN|POLLRDNORM, M_DONTWAIT);
    209 		}
    210 	} else {
    211 		cv_broadcast(&so->so_cv);
    212 		sorwakeup(so);
    213 		sowwakeup(so);
    214 	}
    215 }
    216 
    217 void
    218 soisdisconnecting(struct socket *so)
    219 {
    220 
    221 	KASSERT(solocked(so));
    222 
    223 	so->so_state &= ~SS_ISCONNECTING;
    224 	so->so_state |= (SS_ISDISCONNECTING|SS_CANTRCVMORE|SS_CANTSENDMORE);
    225 	cv_broadcast(&so->so_cv);
    226 	sowwakeup(so);
    227 	sorwakeup(so);
    228 }
    229 
    230 void
    231 soisdisconnected(struct socket *so)
    232 {
    233 
    234 	KASSERT(solocked(so));
    235 
    236 	so->so_state &= ~(SS_ISCONNECTING|SS_ISCONNECTED|SS_ISDISCONNECTING);
    237 	so->so_state |= (SS_CANTRCVMORE|SS_CANTSENDMORE|SS_ISDISCONNECTED);
    238 	cv_broadcast(&so->so_cv);
    239 	sowwakeup(so);
    240 	sorwakeup(so);
    241 }
    242 
    243 void
    244 soinit2(void)
    245 {
    246 
    247 	socket_cache = pool_cache_init(sizeof(struct socket), 0, 0, 0,
    248 	    "socket", NULL, IPL_SOFTNET, NULL, NULL, NULL);
    249 }
    250 
    251 /*
    252  * sonewconn: accept a new connection.
    253  *
    254  * When an attempt at a new connection is noted on a socket which accepts
    255  * connections, sonewconn(9) is called.  If the connection is possible
    256  * (subject to space constraints, etc) then we allocate a new structure,
    257  * properly linked into the data structure of the original socket.
    258  *
    259  * => If 'soready' is true, then socket will become ready for accept() i.e.
    260  *    inserted into the so_q queue, SS_ISCONNECTED set and waiters awoken.
    261  * => May be called from soft-interrupt context.
    262  * => Listening socket should be locked.
    263  * => Returns the new socket locked.
    264  */
    265 struct socket *
    266 sonewconn(struct socket *head, bool soready)
    267 {
    268 	struct socket *so;
    269 	int soqueue, error;
    270 
    271 	KASSERT(solocked(head));
    272 
    273 	if (head->so_qlen + head->so_q0len > 3 * head->so_qlimit / 2) {
    274 		/*
    275 		 * Listen queue overflow.  If there is an accept filter
    276 		 * active, pass through the oldest cxn it's handling.
    277 		 */
    278 		if (head->so_accf == NULL) {
    279 			return NULL;
    280 		} else {
    281 			struct socket *so2, *next;
    282 
    283 			/* Pass the oldest connection waiting in the
    284 			   accept filter */
    285 			for (so2 = TAILQ_FIRST(&head->so_q0);
    286 			     so2 != NULL; so2 = next) {
    287 				next = TAILQ_NEXT(so2, so_qe);
    288 				if (so2->so_upcall == NULL) {
    289 					continue;
    290 				}
    291 				so2->so_upcall = NULL;
    292 				so2->so_upcallarg = NULL;
    293 				so2->so_options &= ~SO_ACCEPTFILTER;
    294 				so2->so_rcv.sb_flags &= ~SB_UPCALL;
    295 				soisconnected(so2);
    296 				break;
    297 			}
    298 
    299 			/* If nothing was nudged out of the acept filter, bail
    300 			 * out; otherwise proceed allocating the socket. */
    301 			if (so2 == NULL) {
    302 				return NULL;
    303 			}
    304 		}
    305 	}
    306 	if ((head->so_options & SO_ACCEPTFILTER) != 0) {
    307 		soready = false;
    308 	}
    309 	soqueue = soready ? 1 : 0;
    310 
    311 	if ((so = soget(false)) == NULL) {
    312 		return NULL;
    313 	}
    314 	so->so_type = head->so_type;
    315 	so->so_options = head->so_options & ~SO_ACCEPTCONN;
    316 	so->so_linger = head->so_linger;
    317 	so->so_state = head->so_state | SS_NOFDREF;
    318 	so->so_proto = head->so_proto;
    319 	so->so_timeo = head->so_timeo;
    320 	so->so_pgid = head->so_pgid;
    321 	so->so_send = head->so_send;
    322 	so->so_receive = head->so_receive;
    323 	so->so_uidinfo = head->so_uidinfo;
    324 	so->so_egid = head->so_egid;
    325 	so->so_cpid = head->so_cpid;
    326 
    327 	/*
    328 	 * Share the lock with the listening-socket, it may get unshared
    329 	 * once the connection is complete.
    330 	 *
    331 	 * so_lock is stable while we hold the socket locked, so no
    332 	 * need for atomic_load_* here.
    333 	 */
    334 	mutex_obj_hold(head->so_lock);
    335 	so->so_lock = head->so_lock;
    336 
    337 	/*
    338 	 * Reserve the space for socket buffers.
    339 	 */
    340 #ifdef MBUFTRACE
    341 	so->so_mowner = head->so_mowner;
    342 	so->so_rcv.sb_mowner = head->so_rcv.sb_mowner;
    343 	so->so_snd.sb_mowner = head->so_snd.sb_mowner;
    344 #endif
    345 	if (soreserve(so, head->so_snd.sb_hiwat, head->so_rcv.sb_hiwat)) {
    346 		goto out;
    347 	}
    348 	so->so_snd.sb_lowat = head->so_snd.sb_lowat;
    349 	so->so_rcv.sb_lowat = head->so_rcv.sb_lowat;
    350 	so->so_rcv.sb_timeo = head->so_rcv.sb_timeo;
    351 	so->so_snd.sb_timeo = head->so_snd.sb_timeo;
    352 	so->so_rcv.sb_flags |= head->so_rcv.sb_flags & (SB_AUTOSIZE | SB_ASYNC);
    353 	so->so_snd.sb_flags |= head->so_snd.sb_flags & (SB_AUTOSIZE | SB_ASYNC);
    354 
    355 	/*
    356 	 * Finally, perform the protocol attach.  Note: a new socket
    357 	 * lock may be assigned at this point (if so, it will be held).
    358 	 */
    359 	error = (*so->so_proto->pr_usrreqs->pr_attach)(so, 0);
    360 	if (error) {
    361 out:
    362 		KASSERT(solocked(so));
    363 		KASSERT(so->so_accf == NULL);
    364 		soput(so);
    365 
    366 		/* Note: the listening socket shall stay locked. */
    367 		KASSERT(solocked(head));
    368 		return NULL;
    369 	}
    370 	KASSERT(solocked2(head, so));
    371 
    372 	/*
    373 	 * Insert into the queue.  If ready, update the connection status
    374 	 * and wake up any waiters, e.g. processes blocking on accept().
    375 	 */
    376 	soqinsque(head, so, soqueue);
    377 	if (soready) {
    378 		so->so_state |= SS_ISCONNECTED;
    379 		sorwakeup(head);
    380 		cv_broadcast(&head->so_cv);
    381 	}
    382 	return so;
    383 }
    384 
    385 struct socket *
    386 soget(bool waitok)
    387 {
    388 	struct socket *so;
    389 
    390 	so = pool_cache_get(socket_cache, (waitok ? PR_WAITOK : PR_NOWAIT));
    391 	if (__predict_false(so == NULL))
    392 		return (NULL);
    393 	memset(so, 0, sizeof(*so));
    394 	TAILQ_INIT(&so->so_q0);
    395 	TAILQ_INIT(&so->so_q);
    396 	cv_init(&so->so_cv, "socket");
    397 	cv_init(&so->so_rcv.sb_cv, "netio");
    398 	cv_init(&so->so_snd.sb_cv, "netio");
    399 	selinit(&so->so_rcv.sb_sel);
    400 	selinit(&so->so_snd.sb_sel);
    401 	so->so_rcv.sb_so = so;
    402 	so->so_snd.sb_so = so;
    403 	return so;
    404 }
    405 
    406 void
    407 soput(struct socket *so)
    408 {
    409 
    410 	KASSERT(!cv_has_waiters(&so->so_cv));
    411 	KASSERT(!cv_has_waiters(&so->so_rcv.sb_cv));
    412 	KASSERT(!cv_has_waiters(&so->so_snd.sb_cv));
    413 	seldestroy(&so->so_rcv.sb_sel);
    414 	seldestroy(&so->so_snd.sb_sel);
    415 	mutex_obj_free(so->so_lock);
    416 	cv_destroy(&so->so_cv);
    417 	cv_destroy(&so->so_rcv.sb_cv);
    418 	cv_destroy(&so->so_snd.sb_cv);
    419 	pool_cache_put(socket_cache, so);
    420 }
    421 
    422 /*
    423  * soqinsque: insert socket of a new connection into the specified
    424  * accept queue of the listening socket (head).
    425  *
    426  *	q = 0: queue of partial connections
    427  *	q = 1: queue of incoming connections
    428  */
    429 void
    430 soqinsque(struct socket *head, struct socket *so, int q)
    431 {
    432 	KASSERT(q == 0 || q == 1);
    433 	KASSERT(solocked2(head, so));
    434 	KASSERT(so->so_onq == NULL);
    435 	KASSERT(so->so_head == NULL);
    436 
    437 	so->so_head = head;
    438 	if (q == 0) {
    439 		head->so_q0len++;
    440 		so->so_onq = &head->so_q0;
    441 	} else {
    442 		head->so_qlen++;
    443 		so->so_onq = &head->so_q;
    444 	}
    445 	TAILQ_INSERT_TAIL(so->so_onq, so, so_qe);
    446 }
    447 
    448 /*
    449  * soqremque: remove socket from the specified queue.
    450  *
    451  * => Returns true if socket was removed from the specified queue.
    452  * => False if socket was not removed (because it was in other queue).
    453  */
    454 bool
    455 soqremque(struct socket *so, int q)
    456 {
    457 	struct socket *head = so->so_head;
    458 
    459 	KASSERT(q == 0 || q == 1);
    460 	KASSERT(solocked(so));
    461 	KASSERT(so->so_onq != NULL);
    462 	KASSERT(head != NULL);
    463 
    464 	if (q == 0) {
    465 		if (so->so_onq != &head->so_q0)
    466 			return false;
    467 		head->so_q0len--;
    468 	} else {
    469 		if (so->so_onq != &head->so_q)
    470 			return false;
    471 		head->so_qlen--;
    472 	}
    473 	KASSERT(solocked2(so, head));
    474 	TAILQ_REMOVE(so->so_onq, so, so_qe);
    475 	so->so_onq = NULL;
    476 	so->so_head = NULL;
    477 	return true;
    478 }
    479 
    480 /*
    481  * socantsendmore: indicates that no more data will be sent on the
    482  * socket; it would normally be applied to a socket when the user
    483  * informs the system that no more data is to be sent, by the protocol
    484  * code (in case pr_shutdown()).
    485  */
    486 void
    487 socantsendmore(struct socket *so)
    488 {
    489 	KASSERT(solocked(so));
    490 
    491 	so->so_state |= SS_CANTSENDMORE;
    492 	sowwakeup(so);
    493 }
    494 
    495 /*
    496  * socantrcvmore(): indicates that no more data will be received and
    497  * will normally be applied to the socket by a protocol when it detects
    498  * that the peer will send no more data.  Data queued for reading in
    499  * the socket may yet be read.
    500  */
    501 void
    502 socantrcvmore(struct socket *so)
    503 {
    504 	KASSERT(solocked(so));
    505 
    506 	so->so_state |= SS_CANTRCVMORE;
    507 	sorwakeup(so);
    508 }
    509 
    510 /*
    511  * soroverflow(): indicates that data was attempted to be sent
    512  * but the receiving buffer overflowed.
    513  */
    514 void
    515 soroverflow(struct socket *so)
    516 {
    517 	KASSERT(solocked(so));
    518 
    519 	so->so_rcv.sb_overflowed++;
    520 	if (so->so_options & SO_RERROR)  {
    521 		so->so_rerror = ENOBUFS;
    522 		sorwakeup(so);
    523 	}
    524 }
    525 
    526 /*
    527  * Wait for data to arrive at/drain from a socket buffer.
    528  */
    529 int
    530 sbwait(struct sockbuf *sb)
    531 {
    532 	struct socket *so;
    533 	kmutex_t *lock;
    534 	int error;
    535 
    536 	so = sb->sb_so;
    537 
    538 	KASSERT(solocked(so));
    539 
    540 	sb->sb_flags |= SB_NOTIFY;
    541 	lock = so->so_lock;
    542 	if ((sb->sb_flags & SB_NOINTR) != 0)
    543 		error = cv_timedwait(&sb->sb_cv, lock, sb->sb_timeo);
    544 	else
    545 		error = cv_timedwait_sig(&sb->sb_cv, lock, sb->sb_timeo);
    546 	if (__predict_false(lock != atomic_load_relaxed(&so->so_lock)))
    547 		solockretry(so, lock);
    548 	return error;
    549 }
    550 
    551 /*
    552  * Wakeup processes waiting on a socket buffer.
    553  * Do asynchronous notification via SIGIO
    554  * if the socket buffer has the SB_ASYNC flag set.
    555  */
    556 void
    557 sowakeup(struct socket *so, struct sockbuf *sb, int code)
    558 {
    559 	int band;
    560 
    561 	KASSERT(solocked(so));
    562 	KASSERT(sb->sb_so == so);
    563 
    564 	switch (code) {
    565 	case POLL_IN:
    566 		band = POLLIN|POLLRDNORM;
    567 		break;
    568 
    569 	case POLL_OUT:
    570 		band = POLLOUT|POLLWRNORM;
    571 		break;
    572 
    573 	case POLL_HUP:
    574 		band = POLLHUP;
    575 		break;
    576 
    577 	default:
    578 		band = 0;
    579 #ifdef DIAGNOSTIC
    580 		printf("bad siginfo code %d in socket notification.\n", code);
    581 #endif
    582 		break;
    583 	}
    584 
    585 	sb->sb_flags &= ~SB_NOTIFY;
    586 	selnotify(&sb->sb_sel, band, NOTE_SUBMIT);
    587 	cv_broadcast(&sb->sb_cv);
    588 	if (sb->sb_flags & SB_ASYNC)
    589 		fownsignal(so->so_pgid, SIGIO, code, band, so);
    590 	if (sb->sb_flags & SB_UPCALL)
    591 		(*so->so_upcall)(so, so->so_upcallarg, band, M_DONTWAIT);
    592 }
    593 
    594 /*
    595  * Reset a socket's lock pointer.  Wake all threads waiting on the
    596  * socket's condition variables so that they can restart their waits
    597  * using the new lock.  The existing lock must be held.
    598  *
    599  * Caller must have issued membar_release before this.
    600  */
    601 void
    602 solockreset(struct socket *so, kmutex_t *lock)
    603 {
    604 
    605 	KASSERT(solocked(so));
    606 
    607 	so->so_lock = lock;
    608 	cv_broadcast(&so->so_snd.sb_cv);
    609 	cv_broadcast(&so->so_rcv.sb_cv);
    610 	cv_broadcast(&so->so_cv);
    611 }
    612 
    613 /*
    614  * Socket buffer (struct sockbuf) utility routines.
    615  *
    616  * Each socket contains two socket buffers: one for sending data and
    617  * one for receiving data.  Each buffer contains a queue of mbufs,
    618  * information about the number of mbufs and amount of data in the
    619  * queue, and other fields allowing poll() statements and notification
    620  * on data availability to be implemented.
    621  *
    622  * Data stored in a socket buffer is maintained as a list of records.
    623  * Each record is a list of mbufs chained together with the m_next
    624  * field.  Records are chained together with the m_nextpkt field. The upper
    625  * level routine soreceive() expects the following conventions to be
    626  * observed when placing information in the receive buffer:
    627  *
    628  * 1. If the protocol requires each message be preceded by the sender's
    629  *    name, then a record containing that name must be present before
    630  *    any associated data (mbuf's must be of type MT_SONAME).
    631  * 2. If the protocol supports the exchange of ``access rights'' (really
    632  *    just additional data associated with the message), and there are
    633  *    ``rights'' to be received, then a record containing this data
    634  *    should be present (mbuf's must be of type MT_CONTROL).
    635  * 3. If a name or rights record exists, then it must be followed by
    636  *    a data record, perhaps of zero length.
    637  *
    638  * Before using a new socket structure it is first necessary to reserve
    639  * buffer space to the socket, by calling sbreserve().  This should commit
    640  * some of the available buffer space in the system buffer pool for the
    641  * socket (currently, it does nothing but enforce limits).  The space
    642  * should be released by calling sbrelease() when the socket is destroyed.
    643  */
    644 
    645 int
    646 sb_max_set(u_long new_sbmax)
    647 {
    648 	int s;
    649 
    650 	if (new_sbmax < (16 * 1024))
    651 		return (EINVAL);
    652 
    653 	s = splsoftnet();
    654 	sb_max = new_sbmax;
    655 	sb_max_adj = (u_quad_t)new_sbmax * MCLBYTES / (MSIZE + MCLBYTES);
    656 	splx(s);
    657 
    658 	return (0);
    659 }
    660 
    661 int
    662 soreserve(struct socket *so, u_long sndcc, u_long rcvcc)
    663 {
    664 	KASSERT(so->so_pcb == NULL || solocked(so));
    665 
    666 	/*
    667 	 * there's at least one application (a configure script of screen)
    668 	 * which expects a fifo is writable even if it has "some" bytes
    669 	 * in its buffer.
    670 	 * so we want to make sure (hiwat - lowat) >= (some bytes).
    671 	 *
    672 	 * PIPE_BUF here is an arbitrary value chosen as (some bytes) above.
    673 	 * we expect it's large enough for such applications.
    674 	 */
    675 	u_long  lowat = MAX(sock_loan_thresh, MCLBYTES);
    676 	u_long  hiwat = lowat + PIPE_BUF;
    677 
    678 	if (sndcc < hiwat)
    679 		sndcc = hiwat;
    680 	if (sbreserve(&so->so_snd, sndcc, so) == 0)
    681 		goto bad;
    682 	if (sbreserve(&so->so_rcv, rcvcc, so) == 0)
    683 		goto bad2;
    684 	if (so->so_rcv.sb_lowat == 0)
    685 		so->so_rcv.sb_lowat = 1;
    686 	if (so->so_snd.sb_lowat == 0)
    687 		so->so_snd.sb_lowat = lowat;
    688 	if (so->so_snd.sb_lowat > so->so_snd.sb_hiwat)
    689 		so->so_snd.sb_lowat = so->so_snd.sb_hiwat;
    690 	return (0);
    691  bad2:
    692 	sbrelease(&so->so_snd, so);
    693  bad:
    694 	return (ENOBUFS);
    695 }
    696 
    697 /*
    698  * Allot mbufs to a sockbuf.
    699  * Attempt to scale mbmax so that mbcnt doesn't become limiting
    700  * if buffering efficiency is near the normal case.
    701  */
    702 int
    703 sbreserve(struct sockbuf *sb, u_long cc, struct socket *so)
    704 {
    705 	struct lwp *l = curlwp; /* XXX */
    706 	rlim_t maxcc;
    707 	struct uidinfo *uidinfo;
    708 
    709 	KASSERT(so->so_pcb == NULL || solocked(so));
    710 	KASSERT(sb->sb_so == so);
    711 	KASSERT(sb_max_adj != 0);
    712 
    713 	if (cc == 0 || cc > sb_max_adj)
    714 		return (0);
    715 
    716 	maxcc = l->l_proc->p_rlimit[RLIMIT_SBSIZE].rlim_cur;
    717 
    718 	uidinfo = so->so_uidinfo;
    719 	if (!chgsbsize(uidinfo, &sb->sb_hiwat, cc, maxcc))
    720 		return 0;
    721 	sb->sb_mbmax = uimin(cc * 2, sb_max);
    722 	if (sb->sb_lowat > sb->sb_hiwat)
    723 		sb->sb_lowat = sb->sb_hiwat;
    724 
    725 	return (1);
    726 }
    727 
    728 /*
    729  * Free mbufs held by a socket, and reserved mbuf space.  We do not assert
    730  * that the socket is held locked here: see sorflush().
    731  */
    732 void
    733 sbrelease(struct sockbuf *sb, struct socket *so)
    734 {
    735 
    736 	KASSERT(sb->sb_so == so);
    737 
    738 	sbflush(sb);
    739 	(void)chgsbsize(so->so_uidinfo, &sb->sb_hiwat, 0, RLIM_INFINITY);
    740 	sb->sb_mbmax = 0;
    741 }
    742 
    743 /*
    744  * Routines to add and remove
    745  * data from an mbuf queue.
    746  *
    747  * The routines sbappend() or sbappendrecord() are normally called to
    748  * append new mbufs to a socket buffer, after checking that adequate
    749  * space is available, comparing the function sbspace() with the amount
    750  * of data to be added.  sbappendrecord() differs from sbappend() in
    751  * that data supplied is treated as the beginning of a new record.
    752  * To place a sender's address, optional access rights, and data in a
    753  * socket receive buffer, sbappendaddr() should be used.  To place
    754  * access rights and data in a socket receive buffer, sbappendrights()
    755  * should be used.  In either case, the new data begins a new record.
    756  * Note that unlike sbappend() and sbappendrecord(), these routines check
    757  * for the caller that there will be enough space to store the data.
    758  * Each fails if there is not enough space, or if it cannot find mbufs
    759  * to store additional information in.
    760  *
    761  * Reliable protocols may use the socket send buffer to hold data
    762  * awaiting acknowledgement.  Data is normally copied from a socket
    763  * send buffer in a protocol with m_copym for output to a peer,
    764  * and then removing the data from the socket buffer with sbdrop()
    765  * or sbdroprecord() when the data is acknowledged by the peer.
    766  */
    767 
    768 #ifdef SOCKBUF_DEBUG
    769 void
    770 sblastrecordchk(struct sockbuf *sb, const char *where)
    771 {
    772 	struct mbuf *m = sb->sb_mb;
    773 
    774 	KASSERT(solocked(sb->sb_so));
    775 
    776 	while (m && m->m_nextpkt)
    777 		m = m->m_nextpkt;
    778 
    779 	if (m != sb->sb_lastrecord) {
    780 		printf("sblastrecordchk: sb_mb %p sb_lastrecord %p last %p\n",
    781 		    sb->sb_mb, sb->sb_lastrecord, m);
    782 		printf("packet chain:\n");
    783 		for (m = sb->sb_mb; m != NULL; m = m->m_nextpkt)
    784 			printf("\t%p\n", m);
    785 		panic("sblastrecordchk from %s", where);
    786 	}
    787 }
    788 
    789 void
    790 sblastmbufchk(struct sockbuf *sb, const char *where)
    791 {
    792 	struct mbuf *m = sb->sb_mb;
    793 	struct mbuf *n;
    794 
    795 	KASSERT(solocked(sb->sb_so));
    796 
    797 	while (m && m->m_nextpkt)
    798 		m = m->m_nextpkt;
    799 
    800 	while (m && m->m_next)
    801 		m = m->m_next;
    802 
    803 	if (m != sb->sb_mbtail) {
    804 		printf("sblastmbufchk: sb_mb %p sb_mbtail %p last %p\n",
    805 		    sb->sb_mb, sb->sb_mbtail, m);
    806 		printf("packet tree:\n");
    807 		for (m = sb->sb_mb; m != NULL; m = m->m_nextpkt) {
    808 			printf("\t");
    809 			for (n = m; n != NULL; n = n->m_next)
    810 				printf("%p ", n);
    811 			printf("\n");
    812 		}
    813 		panic("sblastmbufchk from %s", where);
    814 	}
    815 }
    816 #endif /* SOCKBUF_DEBUG */
    817 
    818 /*
    819  * Link a chain of records onto a socket buffer
    820  */
    821 #define	SBLINKRECORDCHAIN(sb, m0, mlast)				\
    822 do {									\
    823 	if ((sb)->sb_lastrecord != NULL)				\
    824 		(sb)->sb_lastrecord->m_nextpkt = (m0);			\
    825 	else								\
    826 		(sb)->sb_mb = (m0);					\
    827 	(sb)->sb_lastrecord = (mlast);					\
    828 } while (/*CONSTCOND*/0)
    829 
    830 
    831 #define	SBLINKRECORD(sb, m0)						\
    832     SBLINKRECORDCHAIN(sb, m0, m0)
    833 
    834 /*
    835  * Append mbuf chain m to the last record in the
    836  * socket buffer sb.  The additional space associated
    837  * the mbuf chain is recorded in sb.  Empty mbufs are
    838  * discarded and mbufs are compacted where possible.
    839  */
    840 void
    841 sbappend(struct sockbuf *sb, struct mbuf *m)
    842 {
    843 	struct mbuf	*n;
    844 
    845 	KASSERT(solocked(sb->sb_so));
    846 
    847 	if (m == NULL)
    848 		return;
    849 
    850 #ifdef MBUFTRACE
    851 	m_claimm(m, sb->sb_mowner);
    852 #endif
    853 
    854 	SBLASTRECORDCHK(sb, "sbappend 1");
    855 
    856 	if ((n = sb->sb_lastrecord) != NULL) {
    857 		/*
    858 		 * XXX Would like to simply use sb_mbtail here, but
    859 		 * XXX I need to verify that I won't miss an EOR that
    860 		 * XXX way.
    861 		 */
    862 		do {
    863 			if (n->m_flags & M_EOR) {
    864 				sbappendrecord(sb, m); /* XXXXXX!!!! */
    865 				return;
    866 			}
    867 		} while (n->m_next && (n = n->m_next));
    868 	} else {
    869 		/*
    870 		 * If this is the first record in the socket buffer, it's
    871 		 * also the last record.
    872 		 */
    873 		sb->sb_lastrecord = m;
    874 	}
    875 	sbcompress(sb, m, n);
    876 	SBLASTRECORDCHK(sb, "sbappend 2");
    877 }
    878 
    879 /*
    880  * This version of sbappend() should only be used when the caller
    881  * absolutely knows that there will never be more than one record
    882  * in the socket buffer, that is, a stream protocol (such as TCP).
    883  */
    884 void
    885 sbappendstream(struct sockbuf *sb, struct mbuf *m)
    886 {
    887 
    888 	KASSERT(solocked(sb->sb_so));
    889 	KDASSERT(m->m_nextpkt == NULL);
    890 	KASSERT(sb->sb_mb == sb->sb_lastrecord);
    891 
    892 	SBLASTMBUFCHK(sb, __func__);
    893 
    894 #ifdef MBUFTRACE
    895 	m_claimm(m, sb->sb_mowner);
    896 #endif
    897 
    898 	sbcompress(sb, m, sb->sb_mbtail);
    899 
    900 	sb->sb_lastrecord = sb->sb_mb;
    901 	SBLASTRECORDCHK(sb, __func__);
    902 }
    903 
    904 #ifdef SOCKBUF_DEBUG
    905 void
    906 sbcheck(struct sockbuf *sb)
    907 {
    908 	struct mbuf	*m, *m2;
    909 	u_long		len, mbcnt;
    910 
    911 	KASSERT(solocked(sb->sb_so));
    912 
    913 	len = 0;
    914 	mbcnt = 0;
    915 	for (m = sb->sb_mb; m; m = m->m_nextpkt) {
    916 		for (m2 = m; m2 != NULL; m2 = m2->m_next) {
    917 			len += m2->m_len;
    918 			mbcnt += MSIZE;
    919 			if (m2->m_flags & M_EXT)
    920 				mbcnt += m2->m_ext.ext_size;
    921 			if (m2->m_nextpkt != NULL)
    922 				panic("sbcheck nextpkt");
    923 		}
    924 	}
    925 	if (len != sb->sb_cc || mbcnt != sb->sb_mbcnt) {
    926 		printf("cc %lu != %lu || mbcnt %lu != %lu\n", len, sb->sb_cc,
    927 		    mbcnt, sb->sb_mbcnt);
    928 		panic("sbcheck");
    929 	}
    930 }
    931 #endif
    932 
    933 /*
    934  * As above, except the mbuf chain
    935  * begins a new record.
    936  */
    937 void
    938 sbappendrecord(struct sockbuf *sb, struct mbuf *m0)
    939 {
    940 	struct mbuf	*m;
    941 
    942 	KASSERT(solocked(sb->sb_so));
    943 
    944 	if (m0 == NULL)
    945 		return;
    946 
    947 #ifdef MBUFTRACE
    948 	m_claimm(m0, sb->sb_mowner);
    949 #endif
    950 	/*
    951 	 * Put the first mbuf on the queue.
    952 	 * Note this permits zero length records.
    953 	 */
    954 	sballoc(sb, m0);
    955 	SBLASTRECORDCHK(sb, "sbappendrecord 1");
    956 	SBLINKRECORD(sb, m0);
    957 	m = m0->m_next;
    958 	m0->m_next = 0;
    959 	if (m && (m0->m_flags & M_EOR)) {
    960 		m0->m_flags &= ~M_EOR;
    961 		m->m_flags |= M_EOR;
    962 	}
    963 	sbcompress(sb, m, m0);
    964 	SBLASTRECORDCHK(sb, "sbappendrecord 2");
    965 }
    966 
    967 /*
    968  * As above except that OOB data
    969  * is inserted at the beginning of the sockbuf,
    970  * but after any other OOB data.
    971  */
    972 void
    973 sbinsertoob(struct sockbuf *sb, struct mbuf *m0)
    974 {
    975 	struct mbuf	*m, **mp;
    976 
    977 	KASSERT(solocked(sb->sb_so));
    978 
    979 	if (m0 == NULL)
    980 		return;
    981 
    982 	SBLASTRECORDCHK(sb, "sbinsertoob 1");
    983 
    984 	for (mp = &sb->sb_mb; (m = *mp) != NULL; mp = &((*mp)->m_nextpkt)) {
    985 	    again:
    986 		switch (m->m_type) {
    987 
    988 		case MT_OOBDATA:
    989 			continue;		/* WANT next train */
    990 
    991 		case MT_CONTROL:
    992 			if ((m = m->m_next) != NULL)
    993 				goto again;	/* inspect THIS train further */
    994 		}
    995 		break;
    996 	}
    997 	/*
    998 	 * Put the first mbuf on the queue.
    999 	 * Note this permits zero length records.
   1000 	 */
   1001 	sballoc(sb, m0);
   1002 	m0->m_nextpkt = *mp;
   1003 	if (*mp == NULL) {
   1004 		/* m0 is actually the new tail */
   1005 		sb->sb_lastrecord = m0;
   1006 	}
   1007 	*mp = m0;
   1008 	m = m0->m_next;
   1009 	m0->m_next = 0;
   1010 	if (m && (m0->m_flags & M_EOR)) {
   1011 		m0->m_flags &= ~M_EOR;
   1012 		m->m_flags |= M_EOR;
   1013 	}
   1014 	sbcompress(sb, m, m0);
   1015 	SBLASTRECORDCHK(sb, "sbinsertoob 2");
   1016 }
   1017 
   1018 /*
   1019  * Append address and data, and optionally, control (ancillary) data
   1020  * to the receive queue of a socket.  If present,
   1021  * m0 must include a packet header with total length.
   1022  * Returns 0 if no space in sockbuf or insufficient mbufs.
   1023  */
   1024 int
   1025 sbappendaddr(struct sockbuf *sb, const struct sockaddr *asa, struct mbuf *m0,
   1026 	struct mbuf *control)
   1027 {
   1028 	struct mbuf	*m, *n, *nlast;
   1029 	int		space, len;
   1030 
   1031 	KASSERT(solocked(sb->sb_so));
   1032 
   1033 	space = asa->sa_len;
   1034 
   1035 	if (m0 != NULL) {
   1036 		if ((m0->m_flags & M_PKTHDR) == 0)
   1037 			panic("sbappendaddr");
   1038 		space += m0->m_pkthdr.len;
   1039 #ifdef MBUFTRACE
   1040 		m_claimm(m0, sb->sb_mowner);
   1041 #endif
   1042 	}
   1043 	for (n = control; n; n = n->m_next) {
   1044 		space += n->m_len;
   1045 		MCLAIM(n, sb->sb_mowner);
   1046 		if (n->m_next == NULL)	/* keep pointer to last control buf */
   1047 			break;
   1048 	}
   1049 	if (space > sbspace(sb))
   1050 		return (0);
   1051 	m = m_get(M_DONTWAIT, MT_SONAME);
   1052 	if (m == NULL)
   1053 		return (0);
   1054 	MCLAIM(m, sb->sb_mowner);
   1055 	/*
   1056 	 * XXX avoid 'comparison always true' warning which isn't easily
   1057 	 * avoided.
   1058 	 */
   1059 	len = asa->sa_len;
   1060 	if (len > MLEN) {
   1061 		MEXTMALLOC(m, asa->sa_len, M_NOWAIT);
   1062 		if ((m->m_flags & M_EXT) == 0) {
   1063 			m_free(m);
   1064 			return (0);
   1065 		}
   1066 	}
   1067 	m->m_len = asa->sa_len;
   1068 	memcpy(mtod(m, void *), asa, asa->sa_len);
   1069 	if (n)
   1070 		n->m_next = m0;		/* concatenate data to control */
   1071 	else
   1072 		control = m0;
   1073 	m->m_next = control;
   1074 
   1075 	SBLASTRECORDCHK(sb, "sbappendaddr 1");
   1076 
   1077 	for (n = m; n->m_next != NULL; n = n->m_next)
   1078 		sballoc(sb, n);
   1079 	sballoc(sb, n);
   1080 	nlast = n;
   1081 	SBLINKRECORD(sb, m);
   1082 
   1083 	sb->sb_mbtail = nlast;
   1084 	SBLASTMBUFCHK(sb, "sbappendaddr");
   1085 	SBLASTRECORDCHK(sb, "sbappendaddr 2");
   1086 
   1087 	return (1);
   1088 }
   1089 
   1090 /*
   1091  * Helper for sbappendchainaddr: prepend a struct sockaddr* to
   1092  * an mbuf chain.
   1093  */
   1094 static inline struct mbuf *
   1095 m_prepend_sockaddr(struct sockbuf *sb, struct mbuf *m0,
   1096 		   const struct sockaddr *asa)
   1097 {
   1098 	struct mbuf *m;
   1099 	const int salen = asa->sa_len;
   1100 
   1101 	KASSERT(solocked(sb->sb_so));
   1102 
   1103 	/* only the first in each chain need be a pkthdr */
   1104 	m = m_gethdr(M_DONTWAIT, MT_SONAME);
   1105 	if (m == NULL)
   1106 		return NULL;
   1107 	MCLAIM(m, sb->sb_mowner);
   1108 #ifdef notyet
   1109 	if (salen > MHLEN) {
   1110 		MEXTMALLOC(m, salen, M_NOWAIT);
   1111 		if ((m->m_flags & M_EXT) == 0) {
   1112 			m_free(m);
   1113 			return NULL;
   1114 		}
   1115 	}
   1116 #else
   1117 	KASSERT(salen <= MHLEN);
   1118 #endif
   1119 	m->m_len = salen;
   1120 	memcpy(mtod(m, void *), asa, salen);
   1121 	m->m_next = m0;
   1122 	m->m_pkthdr.len = salen + m0->m_pkthdr.len;
   1123 
   1124 	return m;
   1125 }
   1126 
   1127 int
   1128 sbappendaddrchain(struct sockbuf *sb, const struct sockaddr *asa,
   1129 		  struct mbuf *m0, int sbprio)
   1130 {
   1131 	struct mbuf *m, *n, *n0, *nlast;
   1132 	int error;
   1133 
   1134 	KASSERT(solocked(sb->sb_so));
   1135 
   1136 	/*
   1137 	 * XXX sbprio reserved for encoding priority of this* request:
   1138 	 *  SB_PRIO_NONE --> honour normal sb limits
   1139 	 *  SB_PRIO_ONESHOT_OVERFLOW --> if socket has any space,
   1140 	 *	take whole chain. Intended for large requests
   1141 	 *      that should be delivered atomically (all, or none).
   1142 	 * SB_PRIO_OVERDRAFT -- allow a small (2*MLEN) overflow
   1143 	 *       over normal socket limits, for messages indicating
   1144 	 *       buffer overflow in earlier normal/lower-priority messages
   1145 	 * SB_PRIO_BESTEFFORT -->  ignore limits entirely.
   1146 	 *       Intended for  kernel-generated messages only.
   1147 	 *        Up to generator to avoid total mbuf resource exhaustion.
   1148 	 */
   1149 	(void)sbprio;
   1150 
   1151 	if (m0 && (m0->m_flags & M_PKTHDR) == 0)
   1152 		panic("sbappendaddrchain");
   1153 
   1154 #ifdef notyet
   1155 	space = sbspace(sb);
   1156 
   1157 	/*
   1158 	 * Enforce SB_PRIO_* limits as described above.
   1159 	 */
   1160 #endif
   1161 
   1162 	n0 = NULL;
   1163 	nlast = NULL;
   1164 	for (m = m0; m; m = m->m_nextpkt) {
   1165 		struct mbuf *np;
   1166 
   1167 #ifdef MBUFTRACE
   1168 		m_claimm(m, sb->sb_mowner);
   1169 #endif
   1170 
   1171 		/* Prepend sockaddr to this record (m) of input chain m0 */
   1172 	  	n = m_prepend_sockaddr(sb, m, asa);
   1173 		if (n == NULL) {
   1174 			error = ENOBUFS;
   1175 			goto bad;
   1176 		}
   1177 
   1178 		/* Append record (asa+m) to end of new chain n0 */
   1179 		if (n0 == NULL) {
   1180 			n0 = n;
   1181 		} else {
   1182 			nlast->m_nextpkt = n;
   1183 		}
   1184 		/* Keep track of last record on new chain */
   1185 		nlast = n;
   1186 
   1187 		for (np = n; np; np = np->m_next)
   1188 			sballoc(sb, np);
   1189 	}
   1190 
   1191 	SBLASTRECORDCHK(sb, "sbappendaddrchain 1");
   1192 
   1193 	/* Drop the entire chain of (asa+m) records onto the socket */
   1194 	SBLINKRECORDCHAIN(sb, n0, nlast);
   1195 
   1196 	SBLASTRECORDCHK(sb, "sbappendaddrchain 2");
   1197 
   1198 	for (m = nlast; m->m_next; m = m->m_next)
   1199 		;
   1200 	sb->sb_mbtail = m;
   1201 	SBLASTMBUFCHK(sb, "sbappendaddrchain");
   1202 
   1203 	return (1);
   1204 
   1205 bad:
   1206 	/*
   1207 	 * On error, free the prepended addresses. For consistency
   1208 	 * with sbappendaddr(), leave it to our caller to free
   1209 	 * the input record chain passed to us as m0.
   1210 	 */
   1211 	while ((n = n0) != NULL) {
   1212 	  	struct mbuf *np;
   1213 
   1214 		/* Undo the sballoc() of this record */
   1215 		for (np = n; np; np = np->m_next)
   1216 			sbfree(sb, np);
   1217 
   1218 		n0 = n->m_nextpkt;	/* iterate at next prepended address */
   1219 		np = m_free(n);		/* free prepended address (not data) */
   1220 	}
   1221 	return error;
   1222 }
   1223 
   1224 
   1225 int
   1226 sbappendcontrol(struct sockbuf *sb, struct mbuf *m0, struct mbuf *control)
   1227 {
   1228 	struct mbuf	*m, *mlast, *n;
   1229 	int		space;
   1230 
   1231 	KASSERT(solocked(sb->sb_so));
   1232 
   1233 	space = 0;
   1234 	if (control == NULL)
   1235 		panic("sbappendcontrol");
   1236 	for (m = control; ; m = m->m_next) {
   1237 		space += m->m_len;
   1238 		MCLAIM(m, sb->sb_mowner);
   1239 		if (m->m_next == NULL)
   1240 			break;
   1241 	}
   1242 	n = m;			/* save pointer to last control buffer */
   1243 	for (m = m0; m; m = m->m_next) {
   1244 		MCLAIM(m, sb->sb_mowner);
   1245 		space += m->m_len;
   1246 	}
   1247 	if (space > sbspace(sb))
   1248 		return (0);
   1249 	n->m_next = m0;			/* concatenate data to control */
   1250 
   1251 	SBLASTRECORDCHK(sb, "sbappendcontrol 1");
   1252 
   1253 	for (m = control; m->m_next != NULL; m = m->m_next)
   1254 		sballoc(sb, m);
   1255 	sballoc(sb, m);
   1256 	mlast = m;
   1257 	SBLINKRECORD(sb, control);
   1258 
   1259 	sb->sb_mbtail = mlast;
   1260 	SBLASTMBUFCHK(sb, "sbappendcontrol");
   1261 	SBLASTRECORDCHK(sb, "sbappendcontrol 2");
   1262 
   1263 	return (1);
   1264 }
   1265 
   1266 /*
   1267  * Compress mbuf chain m into the socket
   1268  * buffer sb following mbuf n.  If n
   1269  * is null, the buffer is presumed empty.
   1270  */
   1271 void
   1272 sbcompress(struct sockbuf *sb, struct mbuf *m, struct mbuf *n)
   1273 {
   1274 	int		eor;
   1275 	struct mbuf	*o;
   1276 
   1277 	KASSERT(solocked(sb->sb_so));
   1278 
   1279 	eor = 0;
   1280 	while (m) {
   1281 		eor |= m->m_flags & M_EOR;
   1282 		if (m->m_len == 0 &&
   1283 		    (eor == 0 ||
   1284 		     (((o = m->m_next) || (o = n)) &&
   1285 		      o->m_type == m->m_type))) {
   1286 			if (sb->sb_lastrecord == m)
   1287 				sb->sb_lastrecord = m->m_next;
   1288 			m = m_free(m);
   1289 			continue;
   1290 		}
   1291 		if (n && (n->m_flags & M_EOR) == 0 &&
   1292 		    /* M_TRAILINGSPACE() checks buffer writeability */
   1293 		    m->m_len <= MCLBYTES / 4 && /* XXX Don't copy too much */
   1294 		    m->m_len <= M_TRAILINGSPACE(n) &&
   1295 		    n->m_type == m->m_type) {
   1296 			memcpy(mtod(n, char *) + n->m_len, mtod(m, void *),
   1297 			    (unsigned)m->m_len);
   1298 			n->m_len += m->m_len;
   1299 			sb->sb_cc += m->m_len;
   1300 			m = m_free(m);
   1301 			continue;
   1302 		}
   1303 		if (n)
   1304 			n->m_next = m;
   1305 		else
   1306 			sb->sb_mb = m;
   1307 		sb->sb_mbtail = m;
   1308 		sballoc(sb, m);
   1309 		n = m;
   1310 		m->m_flags &= ~M_EOR;
   1311 		m = m->m_next;
   1312 		n->m_next = 0;
   1313 	}
   1314 	if (eor) {
   1315 		if (n)
   1316 			n->m_flags |= eor;
   1317 		else
   1318 			printf("semi-panic: sbcompress\n");
   1319 	}
   1320 	SBLASTMBUFCHK(sb, __func__);
   1321 }
   1322 
   1323 /*
   1324  * Free all mbufs in a sockbuf.
   1325  * Check that all resources are reclaimed.
   1326  */
   1327 void
   1328 sbflush(struct sockbuf *sb)
   1329 {
   1330 
   1331 	KASSERT(solocked(sb->sb_so));
   1332 	KASSERT((sb->sb_flags & SB_LOCK) == 0);
   1333 
   1334 	while (sb->sb_mbcnt)
   1335 		sbdrop(sb, (int)sb->sb_cc);
   1336 
   1337 	KASSERT(sb->sb_cc == 0);
   1338 	KASSERT(sb->sb_mb == NULL);
   1339 	KASSERT(sb->sb_mbtail == NULL);
   1340 	KASSERT(sb->sb_lastrecord == NULL);
   1341 }
   1342 
   1343 /*
   1344  * Drop data from (the front of) a sockbuf.
   1345  */
   1346 void
   1347 sbdrop(struct sockbuf *sb, int len)
   1348 {
   1349 	struct mbuf	*m, *next;
   1350 
   1351 	KASSERT(solocked(sb->sb_so));
   1352 
   1353 	next = (m = sb->sb_mb) ? m->m_nextpkt : NULL;
   1354 	while (len > 0) {
   1355 		if (m == NULL) {
   1356 			if (next == NULL)
   1357 				panic("sbdrop(%p,%d): cc=%lu",
   1358 				    sb, len, sb->sb_cc);
   1359 			m = next;
   1360 			next = m->m_nextpkt;
   1361 			continue;
   1362 		}
   1363 		if (m->m_len > len) {
   1364 			m->m_len -= len;
   1365 			m->m_data += len;
   1366 			sb->sb_cc -= len;
   1367 			break;
   1368 		}
   1369 		len -= m->m_len;
   1370 		sbfree(sb, m);
   1371 		m = m_free(m);
   1372 	}
   1373 	while (m && m->m_len == 0) {
   1374 		sbfree(sb, m);
   1375 		m = m_free(m);
   1376 	}
   1377 	if (m) {
   1378 		sb->sb_mb = m;
   1379 		m->m_nextpkt = next;
   1380 	} else
   1381 		sb->sb_mb = next;
   1382 	/*
   1383 	 * First part is an inline SB_EMPTY_FIXUP().  Second part
   1384 	 * makes sure sb_lastrecord is up-to-date if we dropped
   1385 	 * part of the last record.
   1386 	 */
   1387 	m = sb->sb_mb;
   1388 	if (m == NULL) {
   1389 		sb->sb_mbtail = NULL;
   1390 		sb->sb_lastrecord = NULL;
   1391 	} else if (m->m_nextpkt == NULL)
   1392 		sb->sb_lastrecord = m;
   1393 }
   1394 
   1395 /*
   1396  * Drop a record off the front of a sockbuf
   1397  * and move the next record to the front.
   1398  */
   1399 void
   1400 sbdroprecord(struct sockbuf *sb)
   1401 {
   1402 	struct mbuf	*m, *mn;
   1403 
   1404 	KASSERT(solocked(sb->sb_so));
   1405 
   1406 	m = sb->sb_mb;
   1407 	if (m) {
   1408 		sb->sb_mb = m->m_nextpkt;
   1409 		do {
   1410 			sbfree(sb, m);
   1411 			mn = m_free(m);
   1412 		} while ((m = mn) != NULL);
   1413 	}
   1414 	SB_EMPTY_FIXUP(sb);
   1415 }
   1416 
   1417 /*
   1418  * Create a "control" mbuf containing the specified data
   1419  * with the specified type for presentation on a socket buffer.
   1420  */
   1421 struct mbuf *
   1422 sbcreatecontrol1(void **p, int size, int type, int level, int flags)
   1423 {
   1424 	struct cmsghdr	*cp;
   1425 	struct mbuf	*m;
   1426 	int space = CMSG_SPACE(size);
   1427 
   1428 	if ((flags & M_DONTWAIT) && space > MCLBYTES) {
   1429 		printf("%s: message too large %d\n", __func__, space);
   1430 		return NULL;
   1431 	}
   1432 
   1433 	if ((m = m_get(flags, MT_CONTROL)) == NULL)
   1434 		return NULL;
   1435 	if (space > MLEN) {
   1436 		if (space > MCLBYTES)
   1437 			MEXTMALLOC(m, space, M_WAITOK);
   1438 		else
   1439 			MCLGET(m, flags);
   1440 		if ((m->m_flags & M_EXT) == 0) {
   1441 			m_free(m);
   1442 			return NULL;
   1443 		}
   1444 	}
   1445 	cp = mtod(m, struct cmsghdr *);
   1446 	*p = CMSG_DATA(cp);
   1447 	m->m_len = space;
   1448 	cp->cmsg_len = CMSG_LEN(size);
   1449 	cp->cmsg_level = level;
   1450 	cp->cmsg_type = type;
   1451 
   1452 	memset(cp + 1, 0, CMSG_LEN(0) - sizeof(*cp));
   1453 	memset((uint8_t *)*p + size, 0, CMSG_ALIGN(size) - size);
   1454 
   1455 	return m;
   1456 }
   1457 
   1458 struct mbuf *
   1459 sbcreatecontrol(void *p, int size, int type, int level)
   1460 {
   1461 	struct mbuf *m;
   1462 	void *v;
   1463 
   1464 	m = sbcreatecontrol1(&v, size, type, level, M_DONTWAIT);
   1465 	if (m == NULL)
   1466 		return NULL;
   1467 	memcpy(v, p, size);
   1468 	return m;
   1469 }
   1470 
   1471 void
   1472 solockretry(struct socket *so, kmutex_t *lock)
   1473 {
   1474 
   1475 	while (lock != atomic_load_relaxed(&so->so_lock)) {
   1476 		mutex_exit(lock);
   1477 		lock = atomic_load_consume(&so->so_lock);
   1478 		mutex_enter(lock);
   1479 	}
   1480 }
   1481 
   1482 bool
   1483 solocked(const struct socket *so)
   1484 {
   1485 
   1486 	/*
   1487 	 * Used only for diagnostic assertions, so so_lock should be
   1488 	 * stable at this point, hence on need for atomic_load_*.
   1489 	 */
   1490 	return mutex_owned(so->so_lock);
   1491 }
   1492 
   1493 bool
   1494 solocked2(const struct socket *so1, const struct socket *so2)
   1495 {
   1496 	const kmutex_t *lock;
   1497 
   1498 	/*
   1499 	 * Used only for diagnostic assertions, so so_lock should be
   1500 	 * stable at this point, hence on need for atomic_load_*.
   1501 	 */
   1502 	lock = so1->so_lock;
   1503 	if (lock != so2->so_lock)
   1504 		return false;
   1505 	return mutex_owned(lock);
   1506 }
   1507 
   1508 /*
   1509  * sosetlock: assign a default lock to a new socket.
   1510  */
   1511 void
   1512 sosetlock(struct socket *so)
   1513 {
   1514 	if (so->so_lock == NULL) {
   1515 		kmutex_t *lock = softnet_lock;
   1516 
   1517 		so->so_lock = lock;
   1518 		mutex_obj_hold(lock);
   1519 		mutex_enter(lock);
   1520 	}
   1521 	KASSERT(solocked(so));
   1522 }
   1523 
   1524 /*
   1525  * Set lock on sockbuf sb; sleep if lock is already held.
   1526  * Unless SB_NOINTR is set on sockbuf, sleep is interruptible.
   1527  * Returns error without lock if sleep is interrupted.
   1528  */
   1529 int
   1530 sblock(struct sockbuf *sb, int wf)
   1531 {
   1532 	struct socket *so;
   1533 	kmutex_t *lock;
   1534 	int error;
   1535 
   1536 	KASSERT(solocked(sb->sb_so));
   1537 
   1538 	for (;;) {
   1539 		if (__predict_true((sb->sb_flags & SB_LOCK) == 0)) {
   1540 			sb->sb_flags |= SB_LOCK;
   1541 			return 0;
   1542 		}
   1543 		if (wf != M_WAITOK)
   1544 			return EWOULDBLOCK;
   1545 		so = sb->sb_so;
   1546 		lock = so->so_lock;
   1547 		if ((sb->sb_flags & SB_NOINTR) != 0) {
   1548 			cv_wait(&so->so_cv, lock);
   1549 			error = 0;
   1550 		} else
   1551 			error = cv_wait_sig(&so->so_cv, lock);
   1552 		if (__predict_false(lock != atomic_load_relaxed(&so->so_lock)))
   1553 			solockretry(so, lock);
   1554 		if (error != 0)
   1555 			return error;
   1556 	}
   1557 }
   1558 
   1559 void
   1560 sbunlock(struct sockbuf *sb)
   1561 {
   1562 	struct socket *so;
   1563 
   1564 	so = sb->sb_so;
   1565 
   1566 	KASSERT(solocked(so));
   1567 	KASSERT((sb->sb_flags & SB_LOCK) != 0);
   1568 
   1569 	sb->sb_flags &= ~SB_LOCK;
   1570 	cv_broadcast(&so->so_cv);
   1571 }
   1572 
   1573 int
   1574 sowait(struct socket *so, bool catch_p, int timo)
   1575 {
   1576 	kmutex_t *lock;
   1577 	int error;
   1578 
   1579 	KASSERT(solocked(so));
   1580 	KASSERT(catch_p || timo != 0);
   1581 
   1582 	lock = so->so_lock;
   1583 	if (catch_p)
   1584 		error = cv_timedwait_sig(&so->so_cv, lock, timo);
   1585 	else
   1586 		error = cv_timedwait(&so->so_cv, lock, timo);
   1587 	if (__predict_false(lock != atomic_load_relaxed(&so->so_lock)))
   1588 		solockretry(so, lock);
   1589 	return error;
   1590 }
   1591 
   1592 #ifdef DDB
   1593 
   1594 /*
   1595  * Currently, sofindproc() is used only from DDB. It could be used from others
   1596  * by using db_mutex_enter()
   1597  */
   1598 
   1599 static inline int
   1600 db_mutex_enter(kmutex_t *mtx)
   1601 {
   1602 	int rv;
   1603 
   1604 	if (!db_active) {
   1605 		mutex_enter(mtx);
   1606 		rv = 1;
   1607 	} else
   1608 		rv = mutex_tryenter(mtx);
   1609 
   1610 	return rv;
   1611 }
   1612 
   1613 int
   1614 sofindproc(struct socket *so, int all, void (*pr)(const char *, ...))
   1615 {
   1616 	proc_t *p;
   1617 	filedesc_t *fdp;
   1618 	fdtab_t *dt;
   1619 	fdfile_t *ff;
   1620 	file_t *fp = NULL;
   1621 	int found = 0;
   1622 	int i, t;
   1623 
   1624 	if (so == NULL)
   1625 		return 0;
   1626 
   1627 	t = db_mutex_enter(&proc_lock);
   1628 	if (!t) {
   1629 		pr("could not acquire proc_lock mutex\n");
   1630 		return 0;
   1631 	}
   1632 	PROCLIST_FOREACH(p, &allproc) {
   1633 		if (p->p_stat == SIDL)
   1634 			continue;
   1635 		fdp = p->p_fd;
   1636 		t = db_mutex_enter(&fdp->fd_lock);
   1637 		if (!t) {
   1638 			pr("could not acquire fd_lock mutex\n");
   1639 			continue;
   1640 		}
   1641 		dt = atomic_load_consume(&fdp->fd_dt);
   1642 		for (i = 0; i < dt->dt_nfiles; i++) {
   1643 			ff = dt->dt_ff[i];
   1644 			if (ff == NULL)
   1645 				continue;
   1646 
   1647 			fp = atomic_load_consume(&ff->ff_file);
   1648 			if (fp == NULL)
   1649 				continue;
   1650 
   1651 			t = db_mutex_enter(&fp->f_lock);
   1652 			if (!t) {
   1653 				pr("could not acquire f_lock mutex\n");
   1654 				continue;
   1655 			}
   1656 			if ((struct socket *)fp->f_data != so) {
   1657 				mutex_exit(&fp->f_lock);
   1658 				continue;
   1659 			}
   1660 			found++;
   1661 			if (pr)
   1662 				pr("socket %p: owner %s(pid=%d)\n",
   1663 				    so, p->p_comm, p->p_pid);
   1664 			mutex_exit(&fp->f_lock);
   1665 			if (all == 0)
   1666 				break;
   1667 		}
   1668 		mutex_exit(&fdp->fd_lock);
   1669 		if (all == 0 && found != 0)
   1670 			break;
   1671 	}
   1672 	mutex_exit(&proc_lock);
   1673 
   1674 	return found;
   1675 }
   1676 
   1677 void
   1678 socket_print(const char *modif, void (*pr)(const char *, ...))
   1679 {
   1680 	file_t *fp;
   1681 	struct socket *so;
   1682 	struct sockbuf *sb_snd, *sb_rcv;
   1683 	struct mbuf *m_rec, *m;
   1684 	bool opt_v = false;
   1685 	bool opt_m = false;
   1686 	bool opt_a = false;
   1687 	bool opt_p = false;
   1688 	int nrecs, nmbufs;
   1689 	char ch;
   1690 	const char *family;
   1691 
   1692 	while ( (ch = *(modif++)) != '\0') {
   1693 		switch (ch) {
   1694 		case 'v':
   1695 			opt_v = true;
   1696 			break;
   1697 		case 'm':
   1698 			opt_m = true;
   1699 			break;
   1700 		case 'a':
   1701 			opt_a = true;
   1702 			break;
   1703 		case 'p':
   1704 			opt_p = true;
   1705 			break;
   1706 		}
   1707 	}
   1708 	if (opt_v == false && pr)
   1709 		(pr)("Ignore empty sockets. use /v to print all.\n");
   1710 	if (opt_p == true && pr)
   1711 		(pr)("Don't search owner process.\n");
   1712 
   1713 	LIST_FOREACH(fp, &filehead, f_list) {
   1714 		if (fp->f_type != DTYPE_SOCKET)
   1715 			continue;
   1716 		so = (struct socket *)fp->f_data;
   1717 		if (so == NULL)
   1718 			continue;
   1719 
   1720 		if (so->so_proto->pr_domain->dom_family == AF_INET)
   1721 			family = "INET";
   1722 #ifdef INET6
   1723 		else if (so->so_proto->pr_domain->dom_family == AF_INET6)
   1724 			family = "INET6";
   1725 #endif
   1726 		else if (so->so_proto->pr_domain->dom_family == pseudo_AF_KEY)
   1727 			family = "KEY";
   1728 		else if (so->so_proto->pr_domain->dom_family == AF_ROUTE)
   1729 			family = "ROUTE";
   1730 		else
   1731 			continue;
   1732 
   1733 		sb_snd = &so->so_snd;
   1734 		sb_rcv = &so->so_rcv;
   1735 
   1736 		if (opt_v != true &&
   1737 		    sb_snd->sb_cc == 0 && sb_rcv->sb_cc == 0)
   1738 			continue;
   1739 
   1740 		pr("---SOCKET %p: type %s\n", so, family);
   1741 		if (opt_p != true)
   1742 			sofindproc(so, opt_a == true ? 1 : 0, pr);
   1743 		pr("Send Buffer Bytes: %d [bytes]\n", sb_snd->sb_cc);
   1744 		pr("Send Buffer mbufs:\n");
   1745 		m_rec = m = sb_snd->sb_mb;
   1746 		nrecs = 0;
   1747 		nmbufs = 0;
   1748 		while (m_rec) {
   1749 			nrecs++;
   1750 			if (opt_m == true)
   1751 				pr(" mbuf chain %p\n", m_rec);
   1752 			while (m) {
   1753 				nmbufs++;
   1754 				m = m->m_next;
   1755 			}
   1756 			m_rec = m = m_rec->m_nextpkt;
   1757 		}
   1758 		pr(" Total %d records, %d mbufs.\n", nrecs, nmbufs);
   1759 
   1760 		pr("Recv Buffer Usage: %d [bytes]\n", sb_rcv->sb_cc);
   1761 		pr("Recv Buffer mbufs:\n");
   1762 		m_rec = m = sb_rcv->sb_mb;
   1763 		nrecs = 0;
   1764 		nmbufs = 0;
   1765 		while (m_rec) {
   1766 			nrecs++;
   1767 			if (opt_m == true)
   1768 				pr(" mbuf chain %p\n", m_rec);
   1769 			while (m) {
   1770 				nmbufs++;
   1771 				m = m->m_next;
   1772 			}
   1773 			m_rec = m = m_rec->m_nextpkt;
   1774 		}
   1775 		pr(" Total %d records, %d mbufs.\n", nrecs, nmbufs);
   1776 	}
   1777 }
   1778 #endif /* DDB */
   1779