uipc_socket2.c revision 1.71 1 /* $NetBSD: uipc_socket2.c,v 1.71 2006/05/14 21:15:12 elad Exp $ */
2
3 /*
4 * Copyright (c) 1982, 1986, 1988, 1990, 1993
5 * The Regents of the University of California. All rights reserved.
6 *
7 * Redistribution and use in source and binary forms, with or without
8 * modification, are permitted provided that the following conditions
9 * are met:
10 * 1. Redistributions of source code must retain the above copyright
11 * notice, this list of conditions and the following disclaimer.
12 * 2. Redistributions in binary form must reproduce the above copyright
13 * notice, this list of conditions and the following disclaimer in the
14 * documentation and/or other materials provided with the distribution.
15 * 3. Neither the name of the University nor the names of its contributors
16 * may be used to endorse or promote products derived from this software
17 * without specific prior written permission.
18 *
19 * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND
20 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
21 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
22 * ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE
23 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
24 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
25 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
26 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
27 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
28 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
29 * SUCH DAMAGE.
30 *
31 * @(#)uipc_socket2.c 8.2 (Berkeley) 2/14/95
32 */
33
34 #include <sys/cdefs.h>
35 __KERNEL_RCSID(0, "$NetBSD: uipc_socket2.c,v 1.71 2006/05/14 21:15:12 elad Exp $");
36
37 #include "opt_mbuftrace.h"
38 #include "opt_sb_max.h"
39
40 #include <sys/param.h>
41 #include <sys/systm.h>
42 #include <sys/proc.h>
43 #include <sys/file.h>
44 #include <sys/buf.h>
45 #include <sys/malloc.h>
46 #include <sys/mbuf.h>
47 #include <sys/protosw.h>
48 #include <sys/poll.h>
49 #include <sys/socket.h>
50 #include <sys/socketvar.h>
51 #include <sys/signalvar.h>
52 #include <sys/kauth.h>
53
54 /*
55 * Primitive routines for operating on sockets and socket buffers
56 */
57
58 /* strings for sleep message: */
59 const char netcon[] = "netcon";
60 const char netcls[] = "netcls";
61 const char netio[] = "netio";
62 const char netlck[] = "netlck";
63
64 u_long sb_max = SB_MAX; /* maximum socket buffer size */
65 static u_long sb_max_adj; /* adjusted sb_max */
66
67 /*
68 * Procedures to manipulate state flags of socket
69 * and do appropriate wakeups. Normal sequence from the
70 * active (originating) side is that soisconnecting() is
71 * called during processing of connect() call,
72 * resulting in an eventual call to soisconnected() if/when the
73 * connection is established. When the connection is torn down
74 * soisdisconnecting() is called during processing of disconnect() call,
75 * and soisdisconnected() is called when the connection to the peer
76 * is totally severed. The semantics of these routines are such that
77 * connectionless protocols can call soisconnected() and soisdisconnected()
78 * only, bypassing the in-progress calls when setting up a ``connection''
79 * takes no time.
80 *
81 * From the passive side, a socket is created with
82 * two queues of sockets: so_q0 for connections in progress
83 * and so_q for connections already made and awaiting user acceptance.
84 * As a protocol is preparing incoming connections, it creates a socket
85 * structure queued on so_q0 by calling sonewconn(). When the connection
86 * is established, soisconnected() is called, and transfers the
87 * socket structure to so_q, making it available to accept().
88 *
89 * If a socket is closed with sockets on either
90 * so_q0 or so_q, these sockets are dropped.
91 *
92 * If higher level protocols are implemented in
93 * the kernel, the wakeups done here will sometimes
94 * cause software-interrupt process scheduling.
95 */
96
97 void
98 soisconnecting(struct socket *so)
99 {
100
101 so->so_state &= ~(SS_ISCONNECTED|SS_ISDISCONNECTING);
102 so->so_state |= SS_ISCONNECTING;
103 }
104
105 void
106 soisconnected(struct socket *so)
107 {
108 struct socket *head;
109
110 head = so->so_head;
111 so->so_state &= ~(SS_ISCONNECTING|SS_ISDISCONNECTING|SS_ISCONFIRMING);
112 so->so_state |= SS_ISCONNECTED;
113 if (head && soqremque(so, 0)) {
114 soqinsque(head, so, 1);
115 sorwakeup(head);
116 wakeup((caddr_t)&head->so_timeo);
117 } else {
118 wakeup((caddr_t)&so->so_timeo);
119 sorwakeup(so);
120 sowwakeup(so);
121 }
122 }
123
124 void
125 soisdisconnecting(struct socket *so)
126 {
127
128 so->so_state &= ~SS_ISCONNECTING;
129 so->so_state |= (SS_ISDISCONNECTING|SS_CANTRCVMORE|SS_CANTSENDMORE);
130 wakeup((caddr_t)&so->so_timeo);
131 sowwakeup(so);
132 sorwakeup(so);
133 }
134
135 void
136 soisdisconnected(struct socket *so)
137 {
138
139 so->so_state &= ~(SS_ISCONNECTING|SS_ISCONNECTED|SS_ISDISCONNECTING);
140 so->so_state |= (SS_CANTRCVMORE|SS_CANTSENDMORE|SS_ISDISCONNECTED);
141 wakeup((caddr_t)&so->so_timeo);
142 sowwakeup(so);
143 sorwakeup(so);
144 }
145
146 /*
147 * When an attempt at a new connection is noted on a socket
148 * which accepts connections, sonewconn is called. If the
149 * connection is possible (subject to space constraints, etc.)
150 * then we allocate a new structure, propoerly linked into the
151 * data structure of the original socket, and return this.
152 * Connstatus may be 0, or SO_ISCONFIRMING, or SO_ISCONNECTED.
153 *
154 * Currently, sonewconn() is defined as sonewconn1() in socketvar.h
155 * to catch calls that are missing the (new) second parameter.
156 */
157 struct socket *
158 sonewconn1(struct socket *head, int connstatus)
159 {
160 struct socket *so;
161 int soqueue;
162
163 soqueue = connstatus ? 1 : 0;
164 if (head->so_qlen + head->so_q0len > 3 * head->so_qlimit / 2)
165 return ((struct socket *)0);
166 so = pool_get(&socket_pool, PR_NOWAIT);
167 if (so == NULL)
168 return (NULL);
169 memset((caddr_t)so, 0, sizeof(*so));
170 so->so_type = head->so_type;
171 so->so_options = head->so_options &~ SO_ACCEPTCONN;
172 so->so_linger = head->so_linger;
173 so->so_state = head->so_state | SS_NOFDREF;
174 so->so_proto = head->so_proto;
175 so->so_timeo = head->so_timeo;
176 so->so_pgid = head->so_pgid;
177 so->so_send = head->so_send;
178 so->so_receive = head->so_receive;
179 so->so_uidinfo = head->so_uidinfo;
180 #ifdef MBUFTRACE
181 so->so_mowner = head->so_mowner;
182 so->so_rcv.sb_mowner = head->so_rcv.sb_mowner;
183 so->so_snd.sb_mowner = head->so_snd.sb_mowner;
184 #endif
185 (void) soreserve(so, head->so_snd.sb_hiwat, head->so_rcv.sb_hiwat);
186 soqinsque(head, so, soqueue);
187 if ((*so->so_proto->pr_usrreq)(so, PRU_ATTACH,
188 (struct mbuf *)0, (struct mbuf *)0, (struct mbuf *)0,
189 (struct lwp *)0)) {
190 (void) soqremque(so, soqueue);
191 pool_put(&socket_pool, so);
192 return (NULL);
193 }
194 if (connstatus) {
195 sorwakeup(head);
196 wakeup((caddr_t)&head->so_timeo);
197 so->so_state |= connstatus;
198 }
199 return (so);
200 }
201
202 void
203 soqinsque(struct socket *head, struct socket *so, int q)
204 {
205
206 #ifdef DIAGNOSTIC
207 if (so->so_onq != NULL)
208 panic("soqinsque");
209 #endif
210
211 so->so_head = head;
212 if (q == 0) {
213 head->so_q0len++;
214 so->so_onq = &head->so_q0;
215 } else {
216 head->so_qlen++;
217 so->so_onq = &head->so_q;
218 }
219 TAILQ_INSERT_TAIL(so->so_onq, so, so_qe);
220 }
221
222 int
223 soqremque(struct socket *so, int q)
224 {
225 struct socket *head;
226
227 head = so->so_head;
228 if (q == 0) {
229 if (so->so_onq != &head->so_q0)
230 return (0);
231 head->so_q0len--;
232 } else {
233 if (so->so_onq != &head->so_q)
234 return (0);
235 head->so_qlen--;
236 }
237 TAILQ_REMOVE(so->so_onq, so, so_qe);
238 so->so_onq = NULL;
239 so->so_head = NULL;
240 return (1);
241 }
242
243 /*
244 * Socantsendmore indicates that no more data will be sent on the
245 * socket; it would normally be applied to a socket when the user
246 * informs the system that no more data is to be sent, by the protocol
247 * code (in case PRU_SHUTDOWN). Socantrcvmore indicates that no more data
248 * will be received, and will normally be applied to the socket by a
249 * protocol when it detects that the peer will send no more data.
250 * Data queued for reading in the socket may yet be read.
251 */
252
253 void
254 socantsendmore(struct socket *so)
255 {
256
257 so->so_state |= SS_CANTSENDMORE;
258 sowwakeup(so);
259 }
260
261 void
262 socantrcvmore(struct socket *so)
263 {
264
265 so->so_state |= SS_CANTRCVMORE;
266 sorwakeup(so);
267 }
268
269 /*
270 * Wait for data to arrive at/drain from a socket buffer.
271 */
272 int
273 sbwait(struct sockbuf *sb)
274 {
275
276 sb->sb_flags |= SB_WAIT;
277 return (tsleep((caddr_t)&sb->sb_cc,
278 (sb->sb_flags & SB_NOINTR) ? PSOCK : PSOCK | PCATCH, netio,
279 sb->sb_timeo));
280 }
281
282 /*
283 * Lock a sockbuf already known to be locked;
284 * return any error returned from sleep (EINTR).
285 */
286 int
287 sb_lock(struct sockbuf *sb)
288 {
289 int error;
290
291 while (sb->sb_flags & SB_LOCK) {
292 sb->sb_flags |= SB_WANT;
293 error = tsleep((caddr_t)&sb->sb_flags,
294 (sb->sb_flags & SB_NOINTR) ? PSOCK : PSOCK|PCATCH,
295 netlck, 0);
296 if (error)
297 return (error);
298 }
299 sb->sb_flags |= SB_LOCK;
300 return (0);
301 }
302
303 /*
304 * Wakeup processes waiting on a socket buffer.
305 * Do asynchronous notification via SIGIO
306 * if the socket buffer has the SB_ASYNC flag set.
307 */
308 void
309 sowakeup(struct socket *so, struct sockbuf *sb, int code)
310 {
311 selnotify(&sb->sb_sel, 0);
312 sb->sb_flags &= ~SB_SEL;
313 if (sb->sb_flags & SB_WAIT) {
314 sb->sb_flags &= ~SB_WAIT;
315 wakeup((caddr_t)&sb->sb_cc);
316 }
317 if (sb->sb_flags & SB_ASYNC) {
318 int band;
319 if (code == POLL_IN)
320 band = POLLIN|POLLRDNORM;
321 else
322 band = POLLOUT|POLLWRNORM;
323 fownsignal(so->so_pgid, SIGIO, code, band, so);
324 }
325 if (sb->sb_flags & SB_UPCALL)
326 (*so->so_upcall)(so, so->so_upcallarg, M_DONTWAIT);
327 }
328
329 /*
330 * Socket buffer (struct sockbuf) utility routines.
331 *
332 * Each socket contains two socket buffers: one for sending data and
333 * one for receiving data. Each buffer contains a queue of mbufs,
334 * information about the number of mbufs and amount of data in the
335 * queue, and other fields allowing poll() statements and notification
336 * on data availability to be implemented.
337 *
338 * Data stored in a socket buffer is maintained as a list of records.
339 * Each record is a list of mbufs chained together with the m_next
340 * field. Records are chained together with the m_nextpkt field. The upper
341 * level routine soreceive() expects the following conventions to be
342 * observed when placing information in the receive buffer:
343 *
344 * 1. If the protocol requires each message be preceded by the sender's
345 * name, then a record containing that name must be present before
346 * any associated data (mbuf's must be of type MT_SONAME).
347 * 2. If the protocol supports the exchange of ``access rights'' (really
348 * just additional data associated with the message), and there are
349 * ``rights'' to be received, then a record containing this data
350 * should be present (mbuf's must be of type MT_CONTROL).
351 * 3. If a name or rights record exists, then it must be followed by
352 * a data record, perhaps of zero length.
353 *
354 * Before using a new socket structure it is first necessary to reserve
355 * buffer space to the socket, by calling sbreserve(). This should commit
356 * some of the available buffer space in the system buffer pool for the
357 * socket (currently, it does nothing but enforce limits). The space
358 * should be released by calling sbrelease() when the socket is destroyed.
359 */
360
361 int
362 sb_max_set(u_long new_sbmax)
363 {
364 int s;
365
366 if (new_sbmax < (16 * 1024))
367 return (EINVAL);
368
369 s = splsoftnet();
370 sb_max = new_sbmax;
371 sb_max_adj = (u_quad_t)new_sbmax * MCLBYTES / (MSIZE + MCLBYTES);
372 splx(s);
373
374 return (0);
375 }
376
377 int
378 soreserve(struct socket *so, u_long sndcc, u_long rcvcc)
379 {
380
381 if (sbreserve(&so->so_snd, sndcc, so) == 0)
382 goto bad;
383 if (sbreserve(&so->so_rcv, rcvcc, so) == 0)
384 goto bad2;
385 if (so->so_rcv.sb_lowat == 0)
386 so->so_rcv.sb_lowat = 1;
387 if (so->so_snd.sb_lowat == 0)
388 so->so_snd.sb_lowat = MCLBYTES;
389 if (so->so_snd.sb_lowat > so->so_snd.sb_hiwat)
390 so->so_snd.sb_lowat = so->so_snd.sb_hiwat;
391 return (0);
392 bad2:
393 sbrelease(&so->so_snd, so);
394 bad:
395 return (ENOBUFS);
396 }
397
398 /*
399 * Allot mbufs to a sockbuf.
400 * Attempt to scale mbmax so that mbcnt doesn't become limiting
401 * if buffering efficiency is near the normal case.
402 */
403 int
404 sbreserve(struct sockbuf *sb, u_long cc, struct socket *so)
405 {
406 struct proc *p = curproc; /* XXX */
407 rlim_t maxcc;
408 struct uidinfo *uidinfo;
409
410 KDASSERT(sb_max_adj != 0);
411 if (cc == 0 || cc > sb_max_adj)
412 return (0);
413 if (so) {
414 if (p && kauth_cred_geteuid(p->p_cred) == so->so_uidinfo->ui_uid)
415 maxcc = p->p_rlimit[RLIMIT_SBSIZE].rlim_cur;
416 else
417 maxcc = RLIM_INFINITY;
418 uidinfo = so->so_uidinfo;
419 } else {
420 uidinfo = uid_find(0); /* XXX: nothing better */
421 maxcc = RLIM_INFINITY;
422 }
423 if (!chgsbsize(uidinfo, &sb->sb_hiwat, cc, maxcc))
424 return 0;
425 sb->sb_mbmax = min(cc * 2, sb_max);
426 if (sb->sb_lowat > sb->sb_hiwat)
427 sb->sb_lowat = sb->sb_hiwat;
428 return (1);
429 }
430
431 /*
432 * Free mbufs held by a socket, and reserved mbuf space.
433 */
434 void
435 sbrelease(struct sockbuf *sb, struct socket *so)
436 {
437
438 sbflush(sb);
439 (void)chgsbsize(so->so_uidinfo, &sb->sb_hiwat, 0,
440 RLIM_INFINITY);
441 sb->sb_mbmax = 0;
442 }
443
444 /*
445 * Routines to add and remove
446 * data from an mbuf queue.
447 *
448 * The routines sbappend() or sbappendrecord() are normally called to
449 * append new mbufs to a socket buffer, after checking that adequate
450 * space is available, comparing the function sbspace() with the amount
451 * of data to be added. sbappendrecord() differs from sbappend() in
452 * that data supplied is treated as the beginning of a new record.
453 * To place a sender's address, optional access rights, and data in a
454 * socket receive buffer, sbappendaddr() should be used. To place
455 * access rights and data in a socket receive buffer, sbappendrights()
456 * should be used. In either case, the new data begins a new record.
457 * Note that unlike sbappend() and sbappendrecord(), these routines check
458 * for the caller that there will be enough space to store the data.
459 * Each fails if there is not enough space, or if it cannot find mbufs
460 * to store additional information in.
461 *
462 * Reliable protocols may use the socket send buffer to hold data
463 * awaiting acknowledgement. Data is normally copied from a socket
464 * send buffer in a protocol with m_copy for output to a peer,
465 * and then removing the data from the socket buffer with sbdrop()
466 * or sbdroprecord() when the data is acknowledged by the peer.
467 */
468
469 #ifdef SOCKBUF_DEBUG
470 void
471 sblastrecordchk(struct sockbuf *sb, const char *where)
472 {
473 struct mbuf *m = sb->sb_mb;
474
475 while (m && m->m_nextpkt)
476 m = m->m_nextpkt;
477
478 if (m != sb->sb_lastrecord) {
479 printf("sblastrecordchk: sb_mb %p sb_lastrecord %p last %p\n",
480 sb->sb_mb, sb->sb_lastrecord, m);
481 printf("packet chain:\n");
482 for (m = sb->sb_mb; m != NULL; m = m->m_nextpkt)
483 printf("\t%p\n", m);
484 panic("sblastrecordchk from %s", where);
485 }
486 }
487
488 void
489 sblastmbufchk(struct sockbuf *sb, const char *where)
490 {
491 struct mbuf *m = sb->sb_mb;
492 struct mbuf *n;
493
494 while (m && m->m_nextpkt)
495 m = m->m_nextpkt;
496
497 while (m && m->m_next)
498 m = m->m_next;
499
500 if (m != sb->sb_mbtail) {
501 printf("sblastmbufchk: sb_mb %p sb_mbtail %p last %p\n",
502 sb->sb_mb, sb->sb_mbtail, m);
503 printf("packet tree:\n");
504 for (m = sb->sb_mb; m != NULL; m = m->m_nextpkt) {
505 printf("\t");
506 for (n = m; n != NULL; n = n->m_next)
507 printf("%p ", n);
508 printf("\n");
509 }
510 panic("sblastmbufchk from %s", where);
511 }
512 }
513 #endif /* SOCKBUF_DEBUG */
514
515 /*
516 * Link a chain of records onto a socket buffer
517 */
518 #define SBLINKRECORDCHAIN(sb, m0, mlast) \
519 do { \
520 if ((sb)->sb_lastrecord != NULL) \
521 (sb)->sb_lastrecord->m_nextpkt = (m0); \
522 else \
523 (sb)->sb_mb = (m0); \
524 (sb)->sb_lastrecord = (mlast); \
525 } while (/*CONSTCOND*/0)
526
527
528 #define SBLINKRECORD(sb, m0) \
529 SBLINKRECORDCHAIN(sb, m0, m0)
530
531 /*
532 * Append mbuf chain m to the last record in the
533 * socket buffer sb. The additional space associated
534 * the mbuf chain is recorded in sb. Empty mbufs are
535 * discarded and mbufs are compacted where possible.
536 */
537 void
538 sbappend(struct sockbuf *sb, struct mbuf *m)
539 {
540 struct mbuf *n;
541
542 if (m == 0)
543 return;
544
545 #ifdef MBUFTRACE
546 m_claimm(m, sb->sb_mowner);
547 #endif
548
549 SBLASTRECORDCHK(sb, "sbappend 1");
550
551 if ((n = sb->sb_lastrecord) != NULL) {
552 /*
553 * XXX Would like to simply use sb_mbtail here, but
554 * XXX I need to verify that I won't miss an EOR that
555 * XXX way.
556 */
557 do {
558 if (n->m_flags & M_EOR) {
559 sbappendrecord(sb, m); /* XXXXXX!!!! */
560 return;
561 }
562 } while (n->m_next && (n = n->m_next));
563 } else {
564 /*
565 * If this is the first record in the socket buffer, it's
566 * also the last record.
567 */
568 sb->sb_lastrecord = m;
569 }
570 sbcompress(sb, m, n);
571 SBLASTRECORDCHK(sb, "sbappend 2");
572 }
573
574 /*
575 * This version of sbappend() should only be used when the caller
576 * absolutely knows that there will never be more than one record
577 * in the socket buffer, that is, a stream protocol (such as TCP).
578 */
579 void
580 sbappendstream(struct sockbuf *sb, struct mbuf *m)
581 {
582
583 KDASSERT(m->m_nextpkt == NULL);
584 KASSERT(sb->sb_mb == sb->sb_lastrecord);
585
586 SBLASTMBUFCHK(sb, __func__);
587
588 #ifdef MBUFTRACE
589 m_claimm(m, sb->sb_mowner);
590 #endif
591
592 sbcompress(sb, m, sb->sb_mbtail);
593
594 sb->sb_lastrecord = sb->sb_mb;
595 SBLASTRECORDCHK(sb, __func__);
596 }
597
598 #ifdef SOCKBUF_DEBUG
599 void
600 sbcheck(struct sockbuf *sb)
601 {
602 struct mbuf *m;
603 u_long len, mbcnt;
604
605 len = 0;
606 mbcnt = 0;
607 for (m = sb->sb_mb; m; m = m->m_next) {
608 len += m->m_len;
609 mbcnt += MSIZE;
610 if (m->m_flags & M_EXT)
611 mbcnt += m->m_ext.ext_size;
612 if (m->m_nextpkt)
613 panic("sbcheck nextpkt");
614 }
615 if (len != sb->sb_cc || mbcnt != sb->sb_mbcnt) {
616 printf("cc %lu != %lu || mbcnt %lu != %lu\n", len, sb->sb_cc,
617 mbcnt, sb->sb_mbcnt);
618 panic("sbcheck");
619 }
620 }
621 #endif
622
623 /*
624 * As above, except the mbuf chain
625 * begins a new record.
626 */
627 void
628 sbappendrecord(struct sockbuf *sb, struct mbuf *m0)
629 {
630 struct mbuf *m;
631
632 if (m0 == 0)
633 return;
634
635 #ifdef MBUFTRACE
636 m_claimm(m0, sb->sb_mowner);
637 #endif
638 /*
639 * Put the first mbuf on the queue.
640 * Note this permits zero length records.
641 */
642 sballoc(sb, m0);
643 SBLASTRECORDCHK(sb, "sbappendrecord 1");
644 SBLINKRECORD(sb, m0);
645 m = m0->m_next;
646 m0->m_next = 0;
647 if (m && (m0->m_flags & M_EOR)) {
648 m0->m_flags &= ~M_EOR;
649 m->m_flags |= M_EOR;
650 }
651 sbcompress(sb, m, m0);
652 SBLASTRECORDCHK(sb, "sbappendrecord 2");
653 }
654
655 /*
656 * As above except that OOB data
657 * is inserted at the beginning of the sockbuf,
658 * but after any other OOB data.
659 */
660 void
661 sbinsertoob(struct sockbuf *sb, struct mbuf *m0)
662 {
663 struct mbuf *m, **mp;
664
665 if (m0 == 0)
666 return;
667
668 SBLASTRECORDCHK(sb, "sbinsertoob 1");
669
670 for (mp = &sb->sb_mb; (m = *mp) != NULL; mp = &((*mp)->m_nextpkt)) {
671 again:
672 switch (m->m_type) {
673
674 case MT_OOBDATA:
675 continue; /* WANT next train */
676
677 case MT_CONTROL:
678 if ((m = m->m_next) != NULL)
679 goto again; /* inspect THIS train further */
680 }
681 break;
682 }
683 /*
684 * Put the first mbuf on the queue.
685 * Note this permits zero length records.
686 */
687 sballoc(sb, m0);
688 m0->m_nextpkt = *mp;
689 if (*mp == NULL) {
690 /* m0 is actually the new tail */
691 sb->sb_lastrecord = m0;
692 }
693 *mp = m0;
694 m = m0->m_next;
695 m0->m_next = 0;
696 if (m && (m0->m_flags & M_EOR)) {
697 m0->m_flags &= ~M_EOR;
698 m->m_flags |= M_EOR;
699 }
700 sbcompress(sb, m, m0);
701 SBLASTRECORDCHK(sb, "sbinsertoob 2");
702 }
703
704 /*
705 * Append address and data, and optionally, control (ancillary) data
706 * to the receive queue of a socket. If present,
707 * m0 must include a packet header with total length.
708 * Returns 0 if no space in sockbuf or insufficient mbufs.
709 */
710 int
711 sbappendaddr(struct sockbuf *sb, const struct sockaddr *asa, struct mbuf *m0,
712 struct mbuf *control)
713 {
714 struct mbuf *m, *n, *nlast;
715 int space, len;
716
717 space = asa->sa_len;
718
719 if (m0 != NULL) {
720 if ((m0->m_flags & M_PKTHDR) == 0)
721 panic("sbappendaddr");
722 space += m0->m_pkthdr.len;
723 #ifdef MBUFTRACE
724 m_claimm(m0, sb->sb_mowner);
725 #endif
726 }
727 for (n = control; n; n = n->m_next) {
728 space += n->m_len;
729 MCLAIM(n, sb->sb_mowner);
730 if (n->m_next == 0) /* keep pointer to last control buf */
731 break;
732 }
733 if (space > sbspace(sb))
734 return (0);
735 MGET(m, M_DONTWAIT, MT_SONAME);
736 if (m == 0)
737 return (0);
738 MCLAIM(m, sb->sb_mowner);
739 /*
740 * XXX avoid 'comparison always true' warning which isn't easily
741 * avoided.
742 */
743 len = asa->sa_len;
744 if (len > MLEN) {
745 MEXTMALLOC(m, asa->sa_len, M_NOWAIT);
746 if ((m->m_flags & M_EXT) == 0) {
747 m_free(m);
748 return (0);
749 }
750 }
751 m->m_len = asa->sa_len;
752 memcpy(mtod(m, caddr_t), asa, asa->sa_len);
753 if (n)
754 n->m_next = m0; /* concatenate data to control */
755 else
756 control = m0;
757 m->m_next = control;
758
759 SBLASTRECORDCHK(sb, "sbappendaddr 1");
760
761 for (n = m; n->m_next != NULL; n = n->m_next)
762 sballoc(sb, n);
763 sballoc(sb, n);
764 nlast = n;
765 SBLINKRECORD(sb, m);
766
767 sb->sb_mbtail = nlast;
768 SBLASTMBUFCHK(sb, "sbappendaddr");
769
770 SBLASTRECORDCHK(sb, "sbappendaddr 2");
771
772 return (1);
773 }
774
775 /*
776 * Helper for sbappendchainaddr: prepend a struct sockaddr* to
777 * an mbuf chain.
778 */
779 static inline struct mbuf *
780 m_prepend_sockaddr(struct sockbuf *sb, struct mbuf *m0,
781 const struct sockaddr *asa)
782 {
783 struct mbuf *m;
784 const int salen = asa->sa_len;
785
786 /* only the first in each chain need be a pkthdr */
787 MGETHDR(m, M_DONTWAIT, MT_SONAME);
788 if (m == 0)
789 return (0);
790 MCLAIM(m, sb->sb_mowner);
791 #ifdef notyet
792 if (salen > MHLEN) {
793 MEXTMALLOC(m, salen, M_NOWAIT);
794 if ((m->m_flags & M_EXT) == 0) {
795 m_free(m);
796 return (0);
797 }
798 }
799 #else
800 KASSERT(salen <= MHLEN);
801 #endif
802 m->m_len = salen;
803 memcpy(mtod(m, caddr_t), asa, salen);
804 m->m_next = m0;
805 m->m_pkthdr.len = salen + m0->m_pkthdr.len;
806
807 return m;
808 }
809
810 int
811 sbappendaddrchain(struct sockbuf *sb, const struct sockaddr *asa,
812 struct mbuf *m0, int sbprio)
813 {
814 int space;
815 struct mbuf *m, *n, *n0, *nlast;
816 int error;
817
818 /*
819 * XXX sbprio reserved for encoding priority of this* request:
820 * SB_PRIO_NONE --> honour normal sb limits
821 * SB_PRIO_ONESHOT_OVERFLOW --> if socket has any space,
822 * take whole chain. Intended for large requests
823 * that should be delivered atomically (all, or none).
824 * SB_PRIO_OVERDRAFT -- allow a small (2*MLEN) overflow
825 * over normal socket limits, for messages indicating
826 * buffer overflow in earlier normal/lower-priority messages
827 * SB_PRIO_BESTEFFORT --> ignore limits entirely.
828 * Intended for kernel-generated messages only.
829 * Up to generator to avoid total mbuf resource exhaustion.
830 */
831 (void)sbprio;
832
833 if (m0 && (m0->m_flags & M_PKTHDR) == 0)
834 panic("sbappendaddrchain");
835
836 space = sbspace(sb);
837
838 #ifdef notyet
839 /*
840 * Enforce SB_PRIO_* limits as described above.
841 */
842 #endif
843
844 n0 = NULL;
845 nlast = NULL;
846 for (m = m0; m; m = m->m_nextpkt) {
847 struct mbuf *np;
848
849 #ifdef MBUFTRACE
850 m_claimm(m, sb->sb_mowner);
851 #endif
852
853 /* Prepend sockaddr to this record (m) of input chain m0 */
854 n = m_prepend_sockaddr(sb, m, asa);
855 if (n == NULL) {
856 error = ENOBUFS;
857 goto bad;
858 }
859
860 /* Append record (asa+m) to end of new chain n0 */
861 if (n0 == NULL) {
862 n0 = n;
863 } else {
864 nlast->m_nextpkt = n;
865 }
866 /* Keep track of last record on new chain */
867 nlast = n;
868
869 for (np = n; np; np = np->m_next)
870 sballoc(sb, np);
871 }
872
873 SBLASTRECORDCHK(sb, "sbappendaddrchain 1");
874
875 /* Drop the entire chain of (asa+m) records onto the socket */
876 SBLINKRECORDCHAIN(sb, n0, nlast);
877
878 SBLASTRECORDCHK(sb, "sbappendaddrchain 2");
879
880 for (m = nlast; m->m_next; m = m->m_next)
881 ;
882 sb->sb_mbtail = m;
883 SBLASTMBUFCHK(sb, "sbappendaddrchain");
884
885 return (1);
886
887 bad:
888 /*
889 * On error, free the prepended addreseses. For consistency
890 * with sbappendaddr(), leave it to our caller to free
891 * the input record chain passed to us as m0.
892 */
893 while ((n = n0) != NULL) {
894 struct mbuf *np;
895
896 /* Undo the sballoc() of this record */
897 for (np = n; np; np = np->m_next)
898 sbfree(sb, np);
899
900 n0 = n->m_nextpkt; /* iterate at next prepended address */
901 MFREE(n, np); /* free prepended address (not data) */
902 }
903 return 0;
904 }
905
906
907 int
908 sbappendcontrol(struct sockbuf *sb, struct mbuf *m0, struct mbuf *control)
909 {
910 struct mbuf *m, *mlast, *n;
911 int space;
912
913 space = 0;
914 if (control == 0)
915 panic("sbappendcontrol");
916 for (m = control; ; m = m->m_next) {
917 space += m->m_len;
918 MCLAIM(m, sb->sb_mowner);
919 if (m->m_next == 0)
920 break;
921 }
922 n = m; /* save pointer to last control buffer */
923 for (m = m0; m; m = m->m_next) {
924 MCLAIM(m, sb->sb_mowner);
925 space += m->m_len;
926 }
927 if (space > sbspace(sb))
928 return (0);
929 n->m_next = m0; /* concatenate data to control */
930
931 SBLASTRECORDCHK(sb, "sbappendcontrol 1");
932
933 for (m = control; m->m_next != NULL; m = m->m_next)
934 sballoc(sb, m);
935 sballoc(sb, m);
936 mlast = m;
937 SBLINKRECORD(sb, control);
938
939 sb->sb_mbtail = mlast;
940 SBLASTMBUFCHK(sb, "sbappendcontrol");
941
942 SBLASTRECORDCHK(sb, "sbappendcontrol 2");
943
944 return (1);
945 }
946
947 /*
948 * Compress mbuf chain m into the socket
949 * buffer sb following mbuf n. If n
950 * is null, the buffer is presumed empty.
951 */
952 void
953 sbcompress(struct sockbuf *sb, struct mbuf *m, struct mbuf *n)
954 {
955 int eor;
956 struct mbuf *o;
957
958 eor = 0;
959 while (m) {
960 eor |= m->m_flags & M_EOR;
961 if (m->m_len == 0 &&
962 (eor == 0 ||
963 (((o = m->m_next) || (o = n)) &&
964 o->m_type == m->m_type))) {
965 if (sb->sb_lastrecord == m)
966 sb->sb_lastrecord = m->m_next;
967 m = m_free(m);
968 continue;
969 }
970 if (n && (n->m_flags & M_EOR) == 0 &&
971 /* M_TRAILINGSPACE() checks buffer writeability */
972 m->m_len <= MCLBYTES / 4 && /* XXX Don't copy too much */
973 m->m_len <= M_TRAILINGSPACE(n) &&
974 n->m_type == m->m_type) {
975 memcpy(mtod(n, caddr_t) + n->m_len, mtod(m, caddr_t),
976 (unsigned)m->m_len);
977 n->m_len += m->m_len;
978 sb->sb_cc += m->m_len;
979 m = m_free(m);
980 continue;
981 }
982 if (n)
983 n->m_next = m;
984 else
985 sb->sb_mb = m;
986 sb->sb_mbtail = m;
987 sballoc(sb, m);
988 n = m;
989 m->m_flags &= ~M_EOR;
990 m = m->m_next;
991 n->m_next = 0;
992 }
993 if (eor) {
994 if (n)
995 n->m_flags |= eor;
996 else
997 printf("semi-panic: sbcompress\n");
998 }
999 SBLASTMBUFCHK(sb, __func__);
1000 }
1001
1002 /*
1003 * Free all mbufs in a sockbuf.
1004 * Check that all resources are reclaimed.
1005 */
1006 void
1007 sbflush(struct sockbuf *sb)
1008 {
1009
1010 KASSERT((sb->sb_flags & SB_LOCK) == 0);
1011
1012 while (sb->sb_mbcnt)
1013 sbdrop(sb, (int)sb->sb_cc);
1014
1015 KASSERT(sb->sb_cc == 0);
1016 KASSERT(sb->sb_mb == NULL);
1017 KASSERT(sb->sb_mbtail == NULL);
1018 KASSERT(sb->sb_lastrecord == NULL);
1019 }
1020
1021 /*
1022 * Drop data from (the front of) a sockbuf.
1023 */
1024 void
1025 sbdrop(struct sockbuf *sb, int len)
1026 {
1027 struct mbuf *m, *mn, *next;
1028
1029 next = (m = sb->sb_mb) ? m->m_nextpkt : 0;
1030 while (len > 0) {
1031 if (m == 0) {
1032 if (next == 0)
1033 panic("sbdrop");
1034 m = next;
1035 next = m->m_nextpkt;
1036 continue;
1037 }
1038 if (m->m_len > len) {
1039 m->m_len -= len;
1040 m->m_data += len;
1041 sb->sb_cc -= len;
1042 break;
1043 }
1044 len -= m->m_len;
1045 sbfree(sb, m);
1046 MFREE(m, mn);
1047 m = mn;
1048 }
1049 while (m && m->m_len == 0) {
1050 sbfree(sb, m);
1051 MFREE(m, mn);
1052 m = mn;
1053 }
1054 if (m) {
1055 sb->sb_mb = m;
1056 m->m_nextpkt = next;
1057 } else
1058 sb->sb_mb = next;
1059 /*
1060 * First part is an inline SB_EMPTY_FIXUP(). Second part
1061 * makes sure sb_lastrecord is up-to-date if we dropped
1062 * part of the last record.
1063 */
1064 m = sb->sb_mb;
1065 if (m == NULL) {
1066 sb->sb_mbtail = NULL;
1067 sb->sb_lastrecord = NULL;
1068 } else if (m->m_nextpkt == NULL)
1069 sb->sb_lastrecord = m;
1070 }
1071
1072 /*
1073 * Drop a record off the front of a sockbuf
1074 * and move the next record to the front.
1075 */
1076 void
1077 sbdroprecord(struct sockbuf *sb)
1078 {
1079 struct mbuf *m, *mn;
1080
1081 m = sb->sb_mb;
1082 if (m) {
1083 sb->sb_mb = m->m_nextpkt;
1084 do {
1085 sbfree(sb, m);
1086 MFREE(m, mn);
1087 } while ((m = mn) != NULL);
1088 }
1089 SB_EMPTY_FIXUP(sb);
1090 }
1091
1092 /*
1093 * Create a "control" mbuf containing the specified data
1094 * with the specified type for presentation on a socket buffer.
1095 */
1096 struct mbuf *
1097 sbcreatecontrol(caddr_t p, int size, int type, int level)
1098 {
1099 struct cmsghdr *cp;
1100 struct mbuf *m;
1101
1102 if (CMSG_SPACE(size) > MCLBYTES) {
1103 printf("sbcreatecontrol: message too large %d\n", size);
1104 return NULL;
1105 }
1106
1107 if ((m = m_get(M_DONTWAIT, MT_CONTROL)) == NULL)
1108 return ((struct mbuf *) NULL);
1109 if (CMSG_SPACE(size) > MLEN) {
1110 MCLGET(m, M_DONTWAIT);
1111 if ((m->m_flags & M_EXT) == 0) {
1112 m_free(m);
1113 return NULL;
1114 }
1115 }
1116 cp = mtod(m, struct cmsghdr *);
1117 memcpy(CMSG_DATA(cp), p, size);
1118 m->m_len = CMSG_SPACE(size);
1119 cp->cmsg_len = CMSG_LEN(size);
1120 cp->cmsg_level = level;
1121 cp->cmsg_type = type;
1122 return (m);
1123 }
1124