Home | History | Annotate | Line # | Download | only in kern
uipc_socket2.c revision 1.91.2.1
      1 /*	$NetBSD: uipc_socket2.c,v 1.91.2.1 2008/05/16 02:25:28 yamt Exp $	*/
      2 
      3 /*-
      4  * Copyright (c) 2008 The NetBSD Foundation, Inc.
      5  * All rights reserved.
      6  *
      7  * Redistribution and use in source and binary forms, with or without
      8  * modification, are permitted provided that the following conditions
      9  * are met:
     10  * 1. Redistributions of source code must retain the above copyright
     11  *    notice, this list of conditions and the following disclaimer.
     12  * 2. Redistributions in binary form must reproduce the above copyright
     13  *    notice, this list of conditions and the following disclaimer in the
     14  *    documentation and/or other materials provided with the distribution.
     15  *
     16  * THIS SOFTWARE IS PROVIDED BY THE NETBSD FOUNDATION, INC. AND CONTRIBUTORS
     17  * ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED
     18  * TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
     19  * PURPOSE ARE DISCLAIMED.  IN NO EVENT SHALL THE FOUNDATION OR CONTRIBUTORS
     20  * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
     21  * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
     22  * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
     23  * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
     24  * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
     25  * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
     26  * POSSIBILITY OF SUCH DAMAGE.
     27  */
     28 
     29 /*
     30  * Copyright (c) 1982, 1986, 1988, 1990, 1993
     31  *	The Regents of the University of California.  All rights reserved.
     32  *
     33  * Redistribution and use in source and binary forms, with or without
     34  * modification, are permitted provided that the following conditions
     35  * are met:
     36  * 1. Redistributions of source code must retain the above copyright
     37  *    notice, this list of conditions and the following disclaimer.
     38  * 2. Redistributions in binary form must reproduce the above copyright
     39  *    notice, this list of conditions and the following disclaimer in the
     40  *    documentation and/or other materials provided with the distribution.
     41  * 3. Neither the name of the University nor the names of its contributors
     42  *    may be used to endorse or promote products derived from this software
     43  *    without specific prior written permission.
     44  *
     45  * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND
     46  * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
     47  * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
     48  * ARE DISCLAIMED.  IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE
     49  * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
     50  * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
     51  * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
     52  * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
     53  * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
     54  * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
     55  * SUCH DAMAGE.
     56  *
     57  *	@(#)uipc_socket2.c	8.2 (Berkeley) 2/14/95
     58  */
     59 
     60 #include <sys/cdefs.h>
     61 __KERNEL_RCSID(0, "$NetBSD: uipc_socket2.c,v 1.91.2.1 2008/05/16 02:25:28 yamt Exp $");
     62 
     63 #include "opt_mbuftrace.h"
     64 #include "opt_sb_max.h"
     65 
     66 #include <sys/param.h>
     67 #include <sys/systm.h>
     68 #include <sys/proc.h>
     69 #include <sys/file.h>
     70 #include <sys/buf.h>
     71 #include <sys/malloc.h>
     72 #include <sys/mbuf.h>
     73 #include <sys/protosw.h>
     74 #include <sys/domain.h>
     75 #include <sys/poll.h>
     76 #include <sys/socket.h>
     77 #include <sys/socketvar.h>
     78 #include <sys/signalvar.h>
     79 #include <sys/kauth.h>
     80 #include <sys/pool.h>
     81 
     82 /*
     83  * Primitive routines for operating on sockets and socket buffers.
     84  *
     85  * Locking rules and assumptions:
     86  *
     87  * o socket::so_lock can change on the fly.  The low level routines used
     88  *   to lock sockets are aware of this.  When so_lock is acquired, the
     89  *   routine locking must check to see if so_lock still points to the
     90  *   lock that was acquired.  If so_lock has changed in the meantime, the
     91  *   now irellevant lock that was acquired must be dropped and the lock
     92  *   operation retried.  Although not proven here, this is completely safe
     93  *   on a multiprocessor system, even with relaxed memory ordering, given
     94  *   the next two rules:
     95  *
     96  * o In order to mutate so_lock, the lock pointed to by the current value
     97  *   of so_lock must be held: i.e., the socket must be held locked by the
     98  *   changing thread.  The thread must issue membar_exit() to prevent
     99  *   memory accesses being reordered, and can set so_lock to the desired
    100  *   value.  If the lock pointed to by the new value of so_lock is not
    101  *   held by the changing thread, the socket must then be considered
    102  *   unlocked.
    103  *
    104  * o If so_lock is mutated, and the previous lock referred to by so_lock
    105  *   could still be visible to other threads in the system (e.g. via file
    106  *   descriptor or protocol-internal reference), then the old lock must
    107  *   remain valid until the socket and/or protocol control block has been
    108  *   torn down.
    109  *
    110  * o If a socket has a non-NULL so_head value (i.e. is in the process of
    111  *   connecting), then locking the socket must also lock the socket pointed
    112  *   to by so_head: their lock pointers must match.
    113  *
    114  * o If a socket has connections in progress (so_q, so_q0 not empty) then
    115  *   locking the socket must also lock the sockets attached to both queues.
    116  *   Again, their lock pointers must match.
    117  *
    118  * o Beyond the initial lock assigment in socreate(), assigning locks to
    119  *   sockets is the responsibility of the individual protocols / protocol
    120  *   domains.
    121  */
    122 
    123 static POOL_INIT(socket_pool, sizeof(struct socket), 0, 0, 0, "sockpl", NULL,
    124     IPL_SOFTNET);
    125 
    126 u_long	sb_max = SB_MAX;	/* maximum socket buffer size */
    127 static u_long sb_max_adj;	/* adjusted sb_max */
    128 
    129 /*
    130  * Procedures to manipulate state flags of socket
    131  * and do appropriate wakeups.  Normal sequence from the
    132  * active (originating) side is that soisconnecting() is
    133  * called during processing of connect() call,
    134  * resulting in an eventual call to soisconnected() if/when the
    135  * connection is established.  When the connection is torn down
    136  * soisdisconnecting() is called during processing of disconnect() call,
    137  * and soisdisconnected() is called when the connection to the peer
    138  * is totally severed.  The semantics of these routines are such that
    139  * connectionless protocols can call soisconnected() and soisdisconnected()
    140  * only, bypassing the in-progress calls when setting up a ``connection''
    141  * takes no time.
    142  *
    143  * From the passive side, a socket is created with
    144  * two queues of sockets: so_q0 for connections in progress
    145  * and so_q for connections already made and awaiting user acceptance.
    146  * As a protocol is preparing incoming connections, it creates a socket
    147  * structure queued on so_q0 by calling sonewconn().  When the connection
    148  * is established, soisconnected() is called, and transfers the
    149  * socket structure to so_q, making it available to accept().
    150  *
    151  * If a socket is closed with sockets on either
    152  * so_q0 or so_q, these sockets are dropped.
    153  *
    154  * If higher level protocols are implemented in
    155  * the kernel, the wakeups done here will sometimes
    156  * cause software-interrupt process scheduling.
    157  */
    158 
    159 void
    160 soisconnecting(struct socket *so)
    161 {
    162 
    163 	KASSERT(solocked(so));
    164 
    165 	so->so_state &= ~(SS_ISCONNECTED|SS_ISDISCONNECTING);
    166 	so->so_state |= SS_ISCONNECTING;
    167 }
    168 
    169 void
    170 soisconnected(struct socket *so)
    171 {
    172 	struct socket	*head;
    173 
    174 	head = so->so_head;
    175 
    176 	KASSERT(solocked(so));
    177 	KASSERT(head == NULL || solocked2(so, head));
    178 
    179 	so->so_state &= ~(SS_ISCONNECTING|SS_ISDISCONNECTING|SS_ISCONFIRMING);
    180 	so->so_state |= SS_ISCONNECTED;
    181 	if (head && soqremque(so, 0)) {
    182 		soqinsque(head, so, 1);
    183 		sorwakeup(head);
    184 		cv_broadcast(&head->so_cv);
    185 	} else {
    186 		cv_broadcast(&so->so_cv);
    187 		sorwakeup(so);
    188 		sowwakeup(so);
    189 	}
    190 }
    191 
    192 void
    193 soisdisconnecting(struct socket *so)
    194 {
    195 
    196 	KASSERT(solocked(so));
    197 
    198 	so->so_state &= ~SS_ISCONNECTING;
    199 	so->so_state |= (SS_ISDISCONNECTING|SS_CANTRCVMORE|SS_CANTSENDMORE);
    200 	cv_broadcast(&so->so_cv);
    201 	sowwakeup(so);
    202 	sorwakeup(so);
    203 }
    204 
    205 void
    206 soisdisconnected(struct socket *so)
    207 {
    208 
    209 	KASSERT(solocked(so));
    210 
    211 	so->so_state &= ~(SS_ISCONNECTING|SS_ISCONNECTED|SS_ISDISCONNECTING);
    212 	so->so_state |= (SS_CANTRCVMORE|SS_CANTSENDMORE|SS_ISDISCONNECTED);
    213 	cv_broadcast(&so->so_cv);
    214 	sowwakeup(so);
    215 	sorwakeup(so);
    216 }
    217 
    218 /*
    219  * When an attempt at a new connection is noted on a socket
    220  * which accepts connections, sonewconn is called.  If the
    221  * connection is possible (subject to space constraints, etc.)
    222  * then we allocate a new structure, propoerly linked into the
    223  * data structure of the original socket, and return this.
    224  * Connstatus may be 0, SS_ISCONFIRMING, or SS_ISCONNECTED.
    225  */
    226 struct socket *
    227 sonewconn(struct socket *head, int connstatus)
    228 {
    229 	struct socket	*so;
    230 	int		soqueue, error;
    231 
    232 	KASSERT(solocked(head));
    233 
    234 	soqueue = connstatus ? 1 : 0;
    235 	if (head->so_qlen + head->so_q0len > 3 * head->so_qlimit / 2)
    236 		return ((struct socket *)0);
    237 	so = soget(false);
    238 	if (so == NULL)
    239 		return (NULL);
    240 	mutex_obj_hold(head->so_lock);
    241 	so->so_lock = head->so_lock;
    242 	so->so_type = head->so_type;
    243 	so->so_options = head->so_options &~ SO_ACCEPTCONN;
    244 	so->so_linger = head->so_linger;
    245 	so->so_state = head->so_state | SS_NOFDREF;
    246 	so->so_nbio = head->so_nbio;
    247 	so->so_proto = head->so_proto;
    248 	so->so_timeo = head->so_timeo;
    249 	so->so_pgid = head->so_pgid;
    250 	so->so_send = head->so_send;
    251 	so->so_receive = head->so_receive;
    252 	so->so_uidinfo = head->so_uidinfo;
    253 #ifdef MBUFTRACE
    254 	so->so_mowner = head->so_mowner;
    255 	so->so_rcv.sb_mowner = head->so_rcv.sb_mowner;
    256 	so->so_snd.sb_mowner = head->so_snd.sb_mowner;
    257 #endif
    258 	(void) soreserve(so, head->so_snd.sb_hiwat, head->so_rcv.sb_hiwat);
    259 	so->so_snd.sb_lowat = head->so_snd.sb_lowat;
    260 	so->so_rcv.sb_lowat = head->so_rcv.sb_lowat;
    261 	so->so_rcv.sb_timeo = head->so_rcv.sb_timeo;
    262 	so->so_snd.sb_timeo = head->so_snd.sb_timeo;
    263 	so->so_rcv.sb_flags |= head->so_rcv.sb_flags & SB_AUTOSIZE;
    264 	so->so_snd.sb_flags |= head->so_snd.sb_flags & SB_AUTOSIZE;
    265 	soqinsque(head, so, soqueue);
    266 	error = (*so->so_proto->pr_usrreq)(so, PRU_ATTACH, NULL, NULL,
    267 	    NULL, NULL);
    268 	KASSERT(solocked(so));
    269 	if (error != 0) {
    270 		(void) soqremque(so, soqueue);
    271 		soput(so);
    272 		return (NULL);
    273 	}
    274 	if (connstatus) {
    275 		sorwakeup(head);
    276 		cv_broadcast(&head->so_cv);
    277 		so->so_state |= connstatus;
    278 	}
    279 	return (so);
    280 }
    281 
    282 struct socket *
    283 soget(bool waitok)
    284 {
    285 	struct socket *so;
    286 
    287 	so = pool_get(&socket_pool, (waitok ? PR_WAITOK : PR_NOWAIT));
    288 	if (__predict_false(so == NULL))
    289 		return (NULL);
    290 	memset(so, 0, sizeof(*so));
    291 	TAILQ_INIT(&so->so_q0);
    292 	TAILQ_INIT(&so->so_q);
    293 	cv_init(&so->so_cv, "socket");
    294 	cv_init(&so->so_rcv.sb_cv, "netio");
    295 	cv_init(&so->so_snd.sb_cv, "netio");
    296 	selinit(&so->so_rcv.sb_sel);
    297 	selinit(&so->so_snd.sb_sel);
    298 	so->so_rcv.sb_so = so;
    299 	so->so_snd.sb_so = so;
    300 	return so;
    301 }
    302 
    303 void
    304 soput(struct socket *so)
    305 {
    306 
    307 	KASSERT(!cv_has_waiters(&so->so_cv));
    308 	KASSERT(!cv_has_waiters(&so->so_rcv.sb_cv));
    309 	KASSERT(!cv_has_waiters(&so->so_snd.sb_cv));
    310 	seldestroy(&so->so_rcv.sb_sel);
    311 	seldestroy(&so->so_snd.sb_sel);
    312 	mutex_obj_free(so->so_lock);
    313 	cv_destroy(&so->so_cv);
    314 	cv_destroy(&so->so_rcv.sb_cv);
    315 	cv_destroy(&so->so_snd.sb_cv);
    316 	pool_put(&socket_pool, so);
    317 }
    318 
    319 void
    320 soqinsque(struct socket *head, struct socket *so, int q)
    321 {
    322 
    323 	KASSERT(solocked2(head, so));
    324 
    325 #ifdef DIAGNOSTIC
    326 	if (so->so_onq != NULL)
    327 		panic("soqinsque");
    328 #endif
    329 
    330 	so->so_head = head;
    331 	if (q == 0) {
    332 		head->so_q0len++;
    333 		so->so_onq = &head->so_q0;
    334 	} else {
    335 		head->so_qlen++;
    336 		so->so_onq = &head->so_q;
    337 	}
    338 	TAILQ_INSERT_TAIL(so->so_onq, so, so_qe);
    339 }
    340 
    341 int
    342 soqremque(struct socket *so, int q)
    343 {
    344 	struct socket	*head;
    345 
    346 	head = so->so_head;
    347 
    348 	KASSERT(solocked(so));
    349 	if (q == 0) {
    350 		if (so->so_onq != &head->so_q0)
    351 			return (0);
    352 		head->so_q0len--;
    353 	} else {
    354 		if (so->so_onq != &head->so_q)
    355 			return (0);
    356 		head->so_qlen--;
    357 	}
    358 	KASSERT(solocked2(so, head));
    359 	TAILQ_REMOVE(so->so_onq, so, so_qe);
    360 	so->so_onq = NULL;
    361 	so->so_head = NULL;
    362 	return (1);
    363 }
    364 
    365 /*
    366  * Socantsendmore indicates that no more data will be sent on the
    367  * socket; it would normally be applied to a socket when the user
    368  * informs the system that no more data is to be sent, by the protocol
    369  * code (in case PRU_SHUTDOWN).  Socantrcvmore indicates that no more data
    370  * will be received, and will normally be applied to the socket by a
    371  * protocol when it detects that the peer will send no more data.
    372  * Data queued for reading in the socket may yet be read.
    373  */
    374 
    375 void
    376 socantsendmore(struct socket *so)
    377 {
    378 
    379 	KASSERT(solocked(so));
    380 
    381 	so->so_state |= SS_CANTSENDMORE;
    382 	sowwakeup(so);
    383 }
    384 
    385 void
    386 socantrcvmore(struct socket *so)
    387 {
    388 
    389 	KASSERT(solocked(so));
    390 
    391 	so->so_state |= SS_CANTRCVMORE;
    392 	sorwakeup(so);
    393 }
    394 
    395 /*
    396  * Wait for data to arrive at/drain from a socket buffer.
    397  */
    398 int
    399 sbwait(struct sockbuf *sb)
    400 {
    401 	struct socket *so;
    402 	kmutex_t *lock;
    403 	int error;
    404 
    405 	so = sb->sb_so;
    406 
    407 	KASSERT(solocked(so));
    408 
    409 	sb->sb_flags |= SB_NOTIFY;
    410 	lock = so->so_lock;
    411 	if ((sb->sb_flags & SB_NOINTR) != 0)
    412 		error = cv_timedwait(&sb->sb_cv, lock, sb->sb_timeo);
    413 	else
    414 		error = cv_timedwait_sig(&sb->sb_cv, lock, sb->sb_timeo);
    415 	if (__predict_false(lock != so->so_lock))
    416 		solockretry(so, lock);
    417 	return error;
    418 }
    419 
    420 /*
    421  * Wakeup processes waiting on a socket buffer.
    422  * Do asynchronous notification via SIGIO
    423  * if the socket buffer has the SB_ASYNC flag set.
    424  */
    425 void
    426 sowakeup(struct socket *so, struct sockbuf *sb, int code)
    427 {
    428 	int band;
    429 
    430 	KASSERT(solocked(so));
    431 	KASSERT(sb->sb_so == so);
    432 
    433 	if (code == POLL_IN)
    434 		band = POLLIN|POLLRDNORM;
    435 	else
    436 		band = POLLOUT|POLLWRNORM;
    437 	sb->sb_flags &= ~SB_NOTIFY;
    438 	selnotify(&sb->sb_sel, band, NOTE_SUBMIT);
    439 	cv_broadcast(&sb->sb_cv);
    440 	if (sb->sb_flags & SB_ASYNC)
    441 		fownsignal(so->so_pgid, SIGIO, code, band, so);
    442 	if (sb->sb_flags & SB_UPCALL)
    443 		(*so->so_upcall)(so, so->so_upcallarg, M_DONTWAIT);
    444 }
    445 
    446 /*
    447  * Socket buffer (struct sockbuf) utility routines.
    448  *
    449  * Each socket contains two socket buffers: one for sending data and
    450  * one for receiving data.  Each buffer contains a queue of mbufs,
    451  * information about the number of mbufs and amount of data in the
    452  * queue, and other fields allowing poll() statements and notification
    453  * on data availability to be implemented.
    454  *
    455  * Data stored in a socket buffer is maintained as a list of records.
    456  * Each record is a list of mbufs chained together with the m_next
    457  * field.  Records are chained together with the m_nextpkt field. The upper
    458  * level routine soreceive() expects the following conventions to be
    459  * observed when placing information in the receive buffer:
    460  *
    461  * 1. If the protocol requires each message be preceded by the sender's
    462  *    name, then a record containing that name must be present before
    463  *    any associated data (mbuf's must be of type MT_SONAME).
    464  * 2. If the protocol supports the exchange of ``access rights'' (really
    465  *    just additional data associated with the message), and there are
    466  *    ``rights'' to be received, then a record containing this data
    467  *    should be present (mbuf's must be of type MT_CONTROL).
    468  * 3. If a name or rights record exists, then it must be followed by
    469  *    a data record, perhaps of zero length.
    470  *
    471  * Before using a new socket structure it is first necessary to reserve
    472  * buffer space to the socket, by calling sbreserve().  This should commit
    473  * some of the available buffer space in the system buffer pool for the
    474  * socket (currently, it does nothing but enforce limits).  The space
    475  * should be released by calling sbrelease() when the socket is destroyed.
    476  */
    477 
    478 int
    479 sb_max_set(u_long new_sbmax)
    480 {
    481 	int s;
    482 
    483 	if (new_sbmax < (16 * 1024))
    484 		return (EINVAL);
    485 
    486 	s = splsoftnet();
    487 	sb_max = new_sbmax;
    488 	sb_max_adj = (u_quad_t)new_sbmax * MCLBYTES / (MSIZE + MCLBYTES);
    489 	splx(s);
    490 
    491 	return (0);
    492 }
    493 
    494 int
    495 soreserve(struct socket *so, u_long sndcc, u_long rcvcc)
    496 {
    497 
    498 	KASSERT(so->so_lock == NULL || solocked(so));
    499 
    500 	/*
    501 	 * there's at least one application (a configure script of screen)
    502 	 * which expects a fifo is writable even if it has "some" bytes
    503 	 * in its buffer.
    504 	 * so we want to make sure (hiwat - lowat) >= (some bytes).
    505 	 *
    506 	 * PIPE_BUF here is an arbitrary value chosen as (some bytes) above.
    507 	 * we expect it's large enough for such applications.
    508 	 */
    509 	u_long  lowat = MAX(sock_loan_thresh, MCLBYTES);
    510 	u_long  hiwat = lowat + PIPE_BUF;
    511 
    512 	if (sndcc < hiwat)
    513 		sndcc = hiwat;
    514 	if (sbreserve(&so->so_snd, sndcc, so) == 0)
    515 		goto bad;
    516 	if (sbreserve(&so->so_rcv, rcvcc, so) == 0)
    517 		goto bad2;
    518 	if (so->so_rcv.sb_lowat == 0)
    519 		so->so_rcv.sb_lowat = 1;
    520 	if (so->so_snd.sb_lowat == 0)
    521 		so->so_snd.sb_lowat = lowat;
    522 	if (so->so_snd.sb_lowat > so->so_snd.sb_hiwat)
    523 		so->so_snd.sb_lowat = so->so_snd.sb_hiwat;
    524 	return (0);
    525  bad2:
    526 	sbrelease(&so->so_snd, so);
    527  bad:
    528 	return (ENOBUFS);
    529 }
    530 
    531 /*
    532  * Allot mbufs to a sockbuf.
    533  * Attempt to scale mbmax so that mbcnt doesn't become limiting
    534  * if buffering efficiency is near the normal case.
    535  */
    536 int
    537 sbreserve(struct sockbuf *sb, u_long cc, struct socket *so)
    538 {
    539 	struct lwp *l = curlwp; /* XXX */
    540 	rlim_t maxcc;
    541 	struct uidinfo *uidinfo;
    542 
    543 	KASSERT(so->so_lock == NULL || solocked(so));
    544 	KASSERT(sb->sb_so == so);
    545 	KASSERT(sb_max_adj != 0);
    546 
    547 	if (cc == 0 || cc > sb_max_adj)
    548 		return (0);
    549 	if (so) {
    550 		if (kauth_cred_geteuid(l->l_cred) == so->so_uidinfo->ui_uid)
    551 			maxcc = l->l_proc->p_rlimit[RLIMIT_SBSIZE].rlim_cur;
    552 		else
    553 			maxcc = RLIM_INFINITY;
    554 		uidinfo = so->so_uidinfo;
    555 	} else {
    556 		uidinfo = uid_find(0);	/* XXX: nothing better */
    557 		maxcc = RLIM_INFINITY;
    558 	}
    559 	if (!chgsbsize(uidinfo, &sb->sb_hiwat, cc, maxcc))
    560 		return 0;
    561 	sb->sb_mbmax = min(cc * 2, sb_max);
    562 	if (sb->sb_lowat > sb->sb_hiwat)
    563 		sb->sb_lowat = sb->sb_hiwat;
    564 	return (1);
    565 }
    566 
    567 /*
    568  * Free mbufs held by a socket, and reserved mbuf space.  We do not assert
    569  * that the socket is held locked here: see sorflush().
    570  */
    571 void
    572 sbrelease(struct sockbuf *sb, struct socket *so)
    573 {
    574 
    575 	KASSERT(sb->sb_so == so);
    576 
    577 	sbflush(sb);
    578 	(void)chgsbsize(so->so_uidinfo, &sb->sb_hiwat, 0, RLIM_INFINITY);
    579 	sb->sb_mbmax = 0;
    580 }
    581 
    582 /*
    583  * Routines to add and remove
    584  * data from an mbuf queue.
    585  *
    586  * The routines sbappend() or sbappendrecord() are normally called to
    587  * append new mbufs to a socket buffer, after checking that adequate
    588  * space is available, comparing the function sbspace() with the amount
    589  * of data to be added.  sbappendrecord() differs from sbappend() in
    590  * that data supplied is treated as the beginning of a new record.
    591  * To place a sender's address, optional access rights, and data in a
    592  * socket receive buffer, sbappendaddr() should be used.  To place
    593  * access rights and data in a socket receive buffer, sbappendrights()
    594  * should be used.  In either case, the new data begins a new record.
    595  * Note that unlike sbappend() and sbappendrecord(), these routines check
    596  * for the caller that there will be enough space to store the data.
    597  * Each fails if there is not enough space, or if it cannot find mbufs
    598  * to store additional information in.
    599  *
    600  * Reliable protocols may use the socket send buffer to hold data
    601  * awaiting acknowledgement.  Data is normally copied from a socket
    602  * send buffer in a protocol with m_copy for output to a peer,
    603  * and then removing the data from the socket buffer with sbdrop()
    604  * or sbdroprecord() when the data is acknowledged by the peer.
    605  */
    606 
    607 #ifdef SOCKBUF_DEBUG
    608 void
    609 sblastrecordchk(struct sockbuf *sb, const char *where)
    610 {
    611 	struct mbuf *m = sb->sb_mb;
    612 
    613 	KASSERT(solocked(sb->sb_so));
    614 
    615 	while (m && m->m_nextpkt)
    616 		m = m->m_nextpkt;
    617 
    618 	if (m != sb->sb_lastrecord) {
    619 		printf("sblastrecordchk: sb_mb %p sb_lastrecord %p last %p\n",
    620 		    sb->sb_mb, sb->sb_lastrecord, m);
    621 		printf("packet chain:\n");
    622 		for (m = sb->sb_mb; m != NULL; m = m->m_nextpkt)
    623 			printf("\t%p\n", m);
    624 		panic("sblastrecordchk from %s", where);
    625 	}
    626 }
    627 
    628 void
    629 sblastmbufchk(struct sockbuf *sb, const char *where)
    630 {
    631 	struct mbuf *m = sb->sb_mb;
    632 	struct mbuf *n;
    633 
    634 	KASSERT(solocked(sb->sb_so));
    635 
    636 	while (m && m->m_nextpkt)
    637 		m = m->m_nextpkt;
    638 
    639 	while (m && m->m_next)
    640 		m = m->m_next;
    641 
    642 	if (m != sb->sb_mbtail) {
    643 		printf("sblastmbufchk: sb_mb %p sb_mbtail %p last %p\n",
    644 		    sb->sb_mb, sb->sb_mbtail, m);
    645 		printf("packet tree:\n");
    646 		for (m = sb->sb_mb; m != NULL; m = m->m_nextpkt) {
    647 			printf("\t");
    648 			for (n = m; n != NULL; n = n->m_next)
    649 				printf("%p ", n);
    650 			printf("\n");
    651 		}
    652 		panic("sblastmbufchk from %s", where);
    653 	}
    654 }
    655 #endif /* SOCKBUF_DEBUG */
    656 
    657 /*
    658  * Link a chain of records onto a socket buffer
    659  */
    660 #define	SBLINKRECORDCHAIN(sb, m0, mlast)				\
    661 do {									\
    662 	if ((sb)->sb_lastrecord != NULL)				\
    663 		(sb)->sb_lastrecord->m_nextpkt = (m0);			\
    664 	else								\
    665 		(sb)->sb_mb = (m0);					\
    666 	(sb)->sb_lastrecord = (mlast);					\
    667 } while (/*CONSTCOND*/0)
    668 
    669 
    670 #define	SBLINKRECORD(sb, m0)						\
    671     SBLINKRECORDCHAIN(sb, m0, m0)
    672 
    673 /*
    674  * Append mbuf chain m to the last record in the
    675  * socket buffer sb.  The additional space associated
    676  * the mbuf chain is recorded in sb.  Empty mbufs are
    677  * discarded and mbufs are compacted where possible.
    678  */
    679 void
    680 sbappend(struct sockbuf *sb, struct mbuf *m)
    681 {
    682 	struct mbuf	*n;
    683 
    684 	KASSERT(solocked(sb->sb_so));
    685 
    686 	if (m == 0)
    687 		return;
    688 
    689 #ifdef MBUFTRACE
    690 	m_claimm(m, sb->sb_mowner);
    691 #endif
    692 
    693 	SBLASTRECORDCHK(sb, "sbappend 1");
    694 
    695 	if ((n = sb->sb_lastrecord) != NULL) {
    696 		/*
    697 		 * XXX Would like to simply use sb_mbtail here, but
    698 		 * XXX I need to verify that I won't miss an EOR that
    699 		 * XXX way.
    700 		 */
    701 		do {
    702 			if (n->m_flags & M_EOR) {
    703 				sbappendrecord(sb, m); /* XXXXXX!!!! */
    704 				return;
    705 			}
    706 		} while (n->m_next && (n = n->m_next));
    707 	} else {
    708 		/*
    709 		 * If this is the first record in the socket buffer, it's
    710 		 * also the last record.
    711 		 */
    712 		sb->sb_lastrecord = m;
    713 	}
    714 	sbcompress(sb, m, n);
    715 	SBLASTRECORDCHK(sb, "sbappend 2");
    716 }
    717 
    718 /*
    719  * This version of sbappend() should only be used when the caller
    720  * absolutely knows that there will never be more than one record
    721  * in the socket buffer, that is, a stream protocol (such as TCP).
    722  */
    723 void
    724 sbappendstream(struct sockbuf *sb, struct mbuf *m)
    725 {
    726 
    727 	KASSERT(solocked(sb->sb_so));
    728 	KDASSERT(m->m_nextpkt == NULL);
    729 	KASSERT(sb->sb_mb == sb->sb_lastrecord);
    730 
    731 	SBLASTMBUFCHK(sb, __func__);
    732 
    733 #ifdef MBUFTRACE
    734 	m_claimm(m, sb->sb_mowner);
    735 #endif
    736 
    737 	sbcompress(sb, m, sb->sb_mbtail);
    738 
    739 	sb->sb_lastrecord = sb->sb_mb;
    740 	SBLASTRECORDCHK(sb, __func__);
    741 }
    742 
    743 #ifdef SOCKBUF_DEBUG
    744 void
    745 sbcheck(struct sockbuf *sb)
    746 {
    747 	struct mbuf	*m, *m2;
    748 	u_long		len, mbcnt;
    749 
    750 	KASSERT(solocked(sb->sb_so));
    751 
    752 	len = 0;
    753 	mbcnt = 0;
    754 	for (m = sb->sb_mb; m; m = m->m_nextpkt) {
    755 		for (m2 = m; m2 != NULL; m2 = m2->m_next) {
    756 			len += m2->m_len;
    757 			mbcnt += MSIZE;
    758 			if (m2->m_flags & M_EXT)
    759 				mbcnt += m2->m_ext.ext_size;
    760 			if (m2->m_nextpkt != NULL)
    761 				panic("sbcheck nextpkt");
    762 		}
    763 	}
    764 	if (len != sb->sb_cc || mbcnt != sb->sb_mbcnt) {
    765 		printf("cc %lu != %lu || mbcnt %lu != %lu\n", len, sb->sb_cc,
    766 		    mbcnt, sb->sb_mbcnt);
    767 		panic("sbcheck");
    768 	}
    769 }
    770 #endif
    771 
    772 /*
    773  * As above, except the mbuf chain
    774  * begins a new record.
    775  */
    776 void
    777 sbappendrecord(struct sockbuf *sb, struct mbuf *m0)
    778 {
    779 	struct mbuf	*m;
    780 
    781 	KASSERT(solocked(sb->sb_so));
    782 
    783 	if (m0 == 0)
    784 		return;
    785 
    786 #ifdef MBUFTRACE
    787 	m_claimm(m0, sb->sb_mowner);
    788 #endif
    789 	/*
    790 	 * Put the first mbuf on the queue.
    791 	 * Note this permits zero length records.
    792 	 */
    793 	sballoc(sb, m0);
    794 	SBLASTRECORDCHK(sb, "sbappendrecord 1");
    795 	SBLINKRECORD(sb, m0);
    796 	m = m0->m_next;
    797 	m0->m_next = 0;
    798 	if (m && (m0->m_flags & M_EOR)) {
    799 		m0->m_flags &= ~M_EOR;
    800 		m->m_flags |= M_EOR;
    801 	}
    802 	sbcompress(sb, m, m0);
    803 	SBLASTRECORDCHK(sb, "sbappendrecord 2");
    804 }
    805 
    806 /*
    807  * As above except that OOB data
    808  * is inserted at the beginning of the sockbuf,
    809  * but after any other OOB data.
    810  */
    811 void
    812 sbinsertoob(struct sockbuf *sb, struct mbuf *m0)
    813 {
    814 	struct mbuf	*m, **mp;
    815 
    816 	KASSERT(solocked(sb->sb_so));
    817 
    818 	if (m0 == 0)
    819 		return;
    820 
    821 	SBLASTRECORDCHK(sb, "sbinsertoob 1");
    822 
    823 	for (mp = &sb->sb_mb; (m = *mp) != NULL; mp = &((*mp)->m_nextpkt)) {
    824 	    again:
    825 		switch (m->m_type) {
    826 
    827 		case MT_OOBDATA:
    828 			continue;		/* WANT next train */
    829 
    830 		case MT_CONTROL:
    831 			if ((m = m->m_next) != NULL)
    832 				goto again;	/* inspect THIS train further */
    833 		}
    834 		break;
    835 	}
    836 	/*
    837 	 * Put the first mbuf on the queue.
    838 	 * Note this permits zero length records.
    839 	 */
    840 	sballoc(sb, m0);
    841 	m0->m_nextpkt = *mp;
    842 	if (*mp == NULL) {
    843 		/* m0 is actually the new tail */
    844 		sb->sb_lastrecord = m0;
    845 	}
    846 	*mp = m0;
    847 	m = m0->m_next;
    848 	m0->m_next = 0;
    849 	if (m && (m0->m_flags & M_EOR)) {
    850 		m0->m_flags &= ~M_EOR;
    851 		m->m_flags |= M_EOR;
    852 	}
    853 	sbcompress(sb, m, m0);
    854 	SBLASTRECORDCHK(sb, "sbinsertoob 2");
    855 }
    856 
    857 /*
    858  * Append address and data, and optionally, control (ancillary) data
    859  * to the receive queue of a socket.  If present,
    860  * m0 must include a packet header with total length.
    861  * Returns 0 if no space in sockbuf or insufficient mbufs.
    862  */
    863 int
    864 sbappendaddr(struct sockbuf *sb, const struct sockaddr *asa, struct mbuf *m0,
    865 	struct mbuf *control)
    866 {
    867 	struct mbuf	*m, *n, *nlast;
    868 	int		space, len;
    869 
    870 	KASSERT(solocked(sb->sb_so));
    871 
    872 	space = asa->sa_len;
    873 
    874 	if (m0 != NULL) {
    875 		if ((m0->m_flags & M_PKTHDR) == 0)
    876 			panic("sbappendaddr");
    877 		space += m0->m_pkthdr.len;
    878 #ifdef MBUFTRACE
    879 		m_claimm(m0, sb->sb_mowner);
    880 #endif
    881 	}
    882 	for (n = control; n; n = n->m_next) {
    883 		space += n->m_len;
    884 		MCLAIM(n, sb->sb_mowner);
    885 		if (n->m_next == 0)	/* keep pointer to last control buf */
    886 			break;
    887 	}
    888 	if (space > sbspace(sb))
    889 		return (0);
    890 	MGET(m, M_DONTWAIT, MT_SONAME);
    891 	if (m == 0)
    892 		return (0);
    893 	MCLAIM(m, sb->sb_mowner);
    894 	/*
    895 	 * XXX avoid 'comparison always true' warning which isn't easily
    896 	 * avoided.
    897 	 */
    898 	len = asa->sa_len;
    899 	if (len > MLEN) {
    900 		MEXTMALLOC(m, asa->sa_len, M_NOWAIT);
    901 		if ((m->m_flags & M_EXT) == 0) {
    902 			m_free(m);
    903 			return (0);
    904 		}
    905 	}
    906 	m->m_len = asa->sa_len;
    907 	memcpy(mtod(m, void *), asa, asa->sa_len);
    908 	if (n)
    909 		n->m_next = m0;		/* concatenate data to control */
    910 	else
    911 		control = m0;
    912 	m->m_next = control;
    913 
    914 	SBLASTRECORDCHK(sb, "sbappendaddr 1");
    915 
    916 	for (n = m; n->m_next != NULL; n = n->m_next)
    917 		sballoc(sb, n);
    918 	sballoc(sb, n);
    919 	nlast = n;
    920 	SBLINKRECORD(sb, m);
    921 
    922 	sb->sb_mbtail = nlast;
    923 	SBLASTMBUFCHK(sb, "sbappendaddr");
    924 	SBLASTRECORDCHK(sb, "sbappendaddr 2");
    925 
    926 	return (1);
    927 }
    928 
    929 /*
    930  * Helper for sbappendchainaddr: prepend a struct sockaddr* to
    931  * an mbuf chain.
    932  */
    933 static inline struct mbuf *
    934 m_prepend_sockaddr(struct sockbuf *sb, struct mbuf *m0,
    935 		   const struct sockaddr *asa)
    936 {
    937 	struct mbuf *m;
    938 	const int salen = asa->sa_len;
    939 
    940 	KASSERT(solocked(sb->sb_so));
    941 
    942 	/* only the first in each chain need be a pkthdr */
    943 	MGETHDR(m, M_DONTWAIT, MT_SONAME);
    944 	if (m == 0)
    945 		return (0);
    946 	MCLAIM(m, sb->sb_mowner);
    947 #ifdef notyet
    948 	if (salen > MHLEN) {
    949 		MEXTMALLOC(m, salen, M_NOWAIT);
    950 		if ((m->m_flags & M_EXT) == 0) {
    951 			m_free(m);
    952 			return (0);
    953 		}
    954 	}
    955 #else
    956 	KASSERT(salen <= MHLEN);
    957 #endif
    958 	m->m_len = salen;
    959 	memcpy(mtod(m, void *), asa, salen);
    960 	m->m_next = m0;
    961 	m->m_pkthdr.len = salen + m0->m_pkthdr.len;
    962 
    963 	return m;
    964 }
    965 
    966 int
    967 sbappendaddrchain(struct sockbuf *sb, const struct sockaddr *asa,
    968 		  struct mbuf *m0, int sbprio)
    969 {
    970 	int space;
    971 	struct mbuf *m, *n, *n0, *nlast;
    972 	int error;
    973 
    974 	KASSERT(solocked(sb->sb_so));
    975 
    976 	/*
    977 	 * XXX sbprio reserved for encoding priority of this* request:
    978 	 *  SB_PRIO_NONE --> honour normal sb limits
    979 	 *  SB_PRIO_ONESHOT_OVERFLOW --> if socket has any space,
    980 	 *	take whole chain. Intended for large requests
    981 	 *      that should be delivered atomically (all, or none).
    982 	 * SB_PRIO_OVERDRAFT -- allow a small (2*MLEN) overflow
    983 	 *       over normal socket limits, for messages indicating
    984 	 *       buffer overflow in earlier normal/lower-priority messages
    985 	 * SB_PRIO_BESTEFFORT -->  ignore limits entirely.
    986 	 *       Intended for  kernel-generated messages only.
    987 	 *        Up to generator to avoid total mbuf resource exhaustion.
    988 	 */
    989 	(void)sbprio;
    990 
    991 	if (m0 && (m0->m_flags & M_PKTHDR) == 0)
    992 		panic("sbappendaddrchain");
    993 
    994 	space = sbspace(sb);
    995 
    996 #ifdef notyet
    997 	/*
    998 	 * Enforce SB_PRIO_* limits as described above.
    999 	 */
   1000 #endif
   1001 
   1002 	n0 = NULL;
   1003 	nlast = NULL;
   1004 	for (m = m0; m; m = m->m_nextpkt) {
   1005 		struct mbuf *np;
   1006 
   1007 #ifdef MBUFTRACE
   1008 		m_claimm(m, sb->sb_mowner);
   1009 #endif
   1010 
   1011 		/* Prepend sockaddr to this record (m) of input chain m0 */
   1012 	  	n = m_prepend_sockaddr(sb, m, asa);
   1013 		if (n == NULL) {
   1014 			error = ENOBUFS;
   1015 			goto bad;
   1016 		}
   1017 
   1018 		/* Append record (asa+m) to end of new chain n0 */
   1019 		if (n0 == NULL) {
   1020 			n0 = n;
   1021 		} else {
   1022 			nlast->m_nextpkt = n;
   1023 		}
   1024 		/* Keep track of last record on new chain */
   1025 		nlast = n;
   1026 
   1027 		for (np = n; np; np = np->m_next)
   1028 			sballoc(sb, np);
   1029 	}
   1030 
   1031 	SBLASTRECORDCHK(sb, "sbappendaddrchain 1");
   1032 
   1033 	/* Drop the entire chain of (asa+m) records onto the socket */
   1034 	SBLINKRECORDCHAIN(sb, n0, nlast);
   1035 
   1036 	SBLASTRECORDCHK(sb, "sbappendaddrchain 2");
   1037 
   1038 	for (m = nlast; m->m_next; m = m->m_next)
   1039 		;
   1040 	sb->sb_mbtail = m;
   1041 	SBLASTMBUFCHK(sb, "sbappendaddrchain");
   1042 
   1043 	return (1);
   1044 
   1045 bad:
   1046 	/*
   1047 	 * On error, free the prepended addreseses. For consistency
   1048 	 * with sbappendaddr(), leave it to our caller to free
   1049 	 * the input record chain passed to us as m0.
   1050 	 */
   1051 	while ((n = n0) != NULL) {
   1052 	  	struct mbuf *np;
   1053 
   1054 		/* Undo the sballoc() of this record */
   1055 		for (np = n; np; np = np->m_next)
   1056 			sbfree(sb, np);
   1057 
   1058 		n0 = n->m_nextpkt;	/* iterate at next prepended address */
   1059 		MFREE(n, np);		/* free prepended address (not data) */
   1060 	}
   1061 	return 0;
   1062 }
   1063 
   1064 
   1065 int
   1066 sbappendcontrol(struct sockbuf *sb, struct mbuf *m0, struct mbuf *control)
   1067 {
   1068 	struct mbuf	*m, *mlast, *n;
   1069 	int		space;
   1070 
   1071 	KASSERT(solocked(sb->sb_so));
   1072 
   1073 	space = 0;
   1074 	if (control == 0)
   1075 		panic("sbappendcontrol");
   1076 	for (m = control; ; m = m->m_next) {
   1077 		space += m->m_len;
   1078 		MCLAIM(m, sb->sb_mowner);
   1079 		if (m->m_next == 0)
   1080 			break;
   1081 	}
   1082 	n = m;			/* save pointer to last control buffer */
   1083 	for (m = m0; m; m = m->m_next) {
   1084 		MCLAIM(m, sb->sb_mowner);
   1085 		space += m->m_len;
   1086 	}
   1087 	if (space > sbspace(sb))
   1088 		return (0);
   1089 	n->m_next = m0;			/* concatenate data to control */
   1090 
   1091 	SBLASTRECORDCHK(sb, "sbappendcontrol 1");
   1092 
   1093 	for (m = control; m->m_next != NULL; m = m->m_next)
   1094 		sballoc(sb, m);
   1095 	sballoc(sb, m);
   1096 	mlast = m;
   1097 	SBLINKRECORD(sb, control);
   1098 
   1099 	sb->sb_mbtail = mlast;
   1100 	SBLASTMBUFCHK(sb, "sbappendcontrol");
   1101 	SBLASTRECORDCHK(sb, "sbappendcontrol 2");
   1102 
   1103 	return (1);
   1104 }
   1105 
   1106 /*
   1107  * Compress mbuf chain m into the socket
   1108  * buffer sb following mbuf n.  If n
   1109  * is null, the buffer is presumed empty.
   1110  */
   1111 void
   1112 sbcompress(struct sockbuf *sb, struct mbuf *m, struct mbuf *n)
   1113 {
   1114 	int		eor;
   1115 	struct mbuf	*o;
   1116 
   1117 	KASSERT(solocked(sb->sb_so));
   1118 
   1119 	eor = 0;
   1120 	while (m) {
   1121 		eor |= m->m_flags & M_EOR;
   1122 		if (m->m_len == 0 &&
   1123 		    (eor == 0 ||
   1124 		     (((o = m->m_next) || (o = n)) &&
   1125 		      o->m_type == m->m_type))) {
   1126 			if (sb->sb_lastrecord == m)
   1127 				sb->sb_lastrecord = m->m_next;
   1128 			m = m_free(m);
   1129 			continue;
   1130 		}
   1131 		if (n && (n->m_flags & M_EOR) == 0 &&
   1132 		    /* M_TRAILINGSPACE() checks buffer writeability */
   1133 		    m->m_len <= MCLBYTES / 4 && /* XXX Don't copy too much */
   1134 		    m->m_len <= M_TRAILINGSPACE(n) &&
   1135 		    n->m_type == m->m_type) {
   1136 			memcpy(mtod(n, char *) + n->m_len, mtod(m, void *),
   1137 			    (unsigned)m->m_len);
   1138 			n->m_len += m->m_len;
   1139 			sb->sb_cc += m->m_len;
   1140 			m = m_free(m);
   1141 			continue;
   1142 		}
   1143 		if (n)
   1144 			n->m_next = m;
   1145 		else
   1146 			sb->sb_mb = m;
   1147 		sb->sb_mbtail = m;
   1148 		sballoc(sb, m);
   1149 		n = m;
   1150 		m->m_flags &= ~M_EOR;
   1151 		m = m->m_next;
   1152 		n->m_next = 0;
   1153 	}
   1154 	if (eor) {
   1155 		if (n)
   1156 			n->m_flags |= eor;
   1157 		else
   1158 			printf("semi-panic: sbcompress\n");
   1159 	}
   1160 	SBLASTMBUFCHK(sb, __func__);
   1161 }
   1162 
   1163 /*
   1164  * Free all mbufs in a sockbuf.
   1165  * Check that all resources are reclaimed.
   1166  */
   1167 void
   1168 sbflush(struct sockbuf *sb)
   1169 {
   1170 
   1171 	KASSERT(solocked(sb->sb_so));
   1172 	KASSERT((sb->sb_flags & SB_LOCK) == 0);
   1173 
   1174 	while (sb->sb_mbcnt)
   1175 		sbdrop(sb, (int)sb->sb_cc);
   1176 
   1177 	KASSERT(sb->sb_cc == 0);
   1178 	KASSERT(sb->sb_mb == NULL);
   1179 	KASSERT(sb->sb_mbtail == NULL);
   1180 	KASSERT(sb->sb_lastrecord == NULL);
   1181 }
   1182 
   1183 /*
   1184  * Drop data from (the front of) a sockbuf.
   1185  */
   1186 void
   1187 sbdrop(struct sockbuf *sb, int len)
   1188 {
   1189 	struct mbuf	*m, *mn, *next;
   1190 
   1191 	KASSERT(solocked(sb->sb_so));
   1192 
   1193 	next = (m = sb->sb_mb) ? m->m_nextpkt : 0;
   1194 	while (len > 0) {
   1195 		if (m == 0) {
   1196 			if (next == 0)
   1197 				panic("sbdrop");
   1198 			m = next;
   1199 			next = m->m_nextpkt;
   1200 			continue;
   1201 		}
   1202 		if (m->m_len > len) {
   1203 			m->m_len -= len;
   1204 			m->m_data += len;
   1205 			sb->sb_cc -= len;
   1206 			break;
   1207 		}
   1208 		len -= m->m_len;
   1209 		sbfree(sb, m);
   1210 		MFREE(m, mn);
   1211 		m = mn;
   1212 	}
   1213 	while (m && m->m_len == 0) {
   1214 		sbfree(sb, m);
   1215 		MFREE(m, mn);
   1216 		m = mn;
   1217 	}
   1218 	if (m) {
   1219 		sb->sb_mb = m;
   1220 		m->m_nextpkt = next;
   1221 	} else
   1222 		sb->sb_mb = next;
   1223 	/*
   1224 	 * First part is an inline SB_EMPTY_FIXUP().  Second part
   1225 	 * makes sure sb_lastrecord is up-to-date if we dropped
   1226 	 * part of the last record.
   1227 	 */
   1228 	m = sb->sb_mb;
   1229 	if (m == NULL) {
   1230 		sb->sb_mbtail = NULL;
   1231 		sb->sb_lastrecord = NULL;
   1232 	} else if (m->m_nextpkt == NULL)
   1233 		sb->sb_lastrecord = m;
   1234 }
   1235 
   1236 /*
   1237  * Drop a record off the front of a sockbuf
   1238  * and move the next record to the front.
   1239  */
   1240 void
   1241 sbdroprecord(struct sockbuf *sb)
   1242 {
   1243 	struct mbuf	*m, *mn;
   1244 
   1245 	KASSERT(solocked(sb->sb_so));
   1246 
   1247 	m = sb->sb_mb;
   1248 	if (m) {
   1249 		sb->sb_mb = m->m_nextpkt;
   1250 		do {
   1251 			sbfree(sb, m);
   1252 			MFREE(m, mn);
   1253 		} while ((m = mn) != NULL);
   1254 	}
   1255 	SB_EMPTY_FIXUP(sb);
   1256 }
   1257 
   1258 /*
   1259  * Create a "control" mbuf containing the specified data
   1260  * with the specified type for presentation on a socket buffer.
   1261  */
   1262 struct mbuf *
   1263 sbcreatecontrol(void *p, int size, int type, int level)
   1264 {
   1265 	struct cmsghdr	*cp;
   1266 	struct mbuf	*m;
   1267 
   1268 	if (CMSG_SPACE(size) > MCLBYTES) {
   1269 		printf("sbcreatecontrol: message too large %d\n", size);
   1270 		return NULL;
   1271 	}
   1272 
   1273 	if ((m = m_get(M_DONTWAIT, MT_CONTROL)) == NULL)
   1274 		return ((struct mbuf *) NULL);
   1275 	if (CMSG_SPACE(size) > MLEN) {
   1276 		MCLGET(m, M_DONTWAIT);
   1277 		if ((m->m_flags & M_EXT) == 0) {
   1278 			m_free(m);
   1279 			return NULL;
   1280 		}
   1281 	}
   1282 	cp = mtod(m, struct cmsghdr *);
   1283 	memcpy(CMSG_DATA(cp), p, size);
   1284 	m->m_len = CMSG_SPACE(size);
   1285 	cp->cmsg_len = CMSG_LEN(size);
   1286 	cp->cmsg_level = level;
   1287 	cp->cmsg_type = type;
   1288 	return (m);
   1289 }
   1290 
   1291 void
   1292 solockretry(struct socket *so, kmutex_t *lock)
   1293 {
   1294 
   1295 	while (lock != so->so_lock) {
   1296 		mutex_exit(lock);
   1297 		lock = so->so_lock;
   1298 		mutex_enter(lock);
   1299 	}
   1300 }
   1301 
   1302 bool
   1303 solocked(struct socket *so)
   1304 {
   1305 
   1306 	return mutex_owned(so->so_lock);
   1307 }
   1308 
   1309 bool
   1310 solocked2(struct socket *so1, struct socket *so2)
   1311 {
   1312 	kmutex_t *lock;
   1313 
   1314 	lock = so1->so_lock;
   1315 	if (lock != so2->so_lock)
   1316 		return false;
   1317 	return mutex_owned(lock);
   1318 }
   1319 
   1320 /*
   1321  * Assign a default lock to a new socket.  For PRU_ATTACH, and done by
   1322  * protocols that do not have special locking requirements.
   1323  */
   1324 void
   1325 sosetlock(struct socket *so)
   1326 {
   1327 	kmutex_t *lock;
   1328 
   1329 	if (so->so_lock == NULL) {
   1330 		lock = softnet_lock;
   1331 		so->so_lock = lock;
   1332 		mutex_obj_hold(lock);
   1333 		mutex_enter(lock);
   1334 	}
   1335 
   1336 	/* In all cases, lock must be held on return from PRU_ATTACH. */
   1337 	KASSERT(solocked(so));
   1338 }
   1339 
   1340 /*
   1341  * Set lock on sockbuf sb; sleep if lock is already held.
   1342  * Unless SB_NOINTR is set on sockbuf, sleep is interruptible.
   1343  * Returns error without lock if sleep is interrupted.
   1344  */
   1345 int
   1346 sblock(struct sockbuf *sb, int wf)
   1347 {
   1348 	struct socket *so;
   1349 	kmutex_t *lock;
   1350 	int error;
   1351 
   1352 	KASSERT(solocked(sb->sb_so));
   1353 
   1354 	for (;;) {
   1355 		if (__predict_true((sb->sb_flags & SB_LOCK) == 0)) {
   1356 			sb->sb_flags |= SB_LOCK;
   1357 			return 0;
   1358 		}
   1359 		if (wf != M_WAITOK)
   1360 			return EWOULDBLOCK;
   1361 		so = sb->sb_so;
   1362 		lock = so->so_lock;
   1363 		if ((sb->sb_flags & SB_NOINTR) != 0) {
   1364 			cv_wait(&so->so_cv, lock);
   1365 			error = 0;
   1366 		} else
   1367 			error = cv_wait_sig(&so->so_cv, lock);
   1368 		if (__predict_false(lock != so->so_lock))
   1369 			solockretry(so, lock);
   1370 		if (error != 0)
   1371 			return error;
   1372 	}
   1373 }
   1374 
   1375 void
   1376 sbunlock(struct sockbuf *sb)
   1377 {
   1378 	struct socket *so;
   1379 
   1380 	so = sb->sb_so;
   1381 
   1382 	KASSERT(solocked(so));
   1383 	KASSERT((sb->sb_flags & SB_LOCK) != 0);
   1384 
   1385 	sb->sb_flags &= ~SB_LOCK;
   1386 	cv_broadcast(&so->so_cv);
   1387 }
   1388 
   1389 int
   1390 sowait(struct socket *so, int timo)
   1391 {
   1392 	kmutex_t *lock;
   1393 	int error;
   1394 
   1395 	KASSERT(solocked(so));
   1396 
   1397 	lock = so->so_lock;
   1398 	error = cv_timedwait_sig(&so->so_cv, lock, timo);
   1399 	if (__predict_false(lock != so->so_lock))
   1400 		solockretry(so, lock);
   1401 	return error;
   1402 }
   1403