sctp_indata.c revision 1.15.2.1 1 /* $NetBSD: sctp_indata.c,v 1.15.2.1 2025/08/02 05:57:50 perseant Exp $ */
2 /* $KAME: sctp_indata.c,v 1.36 2005/03/06 16:04:17 itojun Exp $ */
3
4 /*
5 * Copyright (C) 2002, 2003, 2004 Cisco Systems Inc,
6 * All rights reserved.
7 *
8 * Redistribution and use in source and binary forms, with or without
9 * modification, are permitted provided that the following conditions
10 * are met:
11 * 1. Redistributions of source code must retain the above copyright
12 * notice, this list of conditions and the following disclaimer.
13 * 2. Redistributions in binary form must reproduce the above copyright
14 * notice, this list of conditions and the following disclaimer in the
15 * documentation and/or other materials provided with the distribution.
16 * 3. Neither the name of the project nor the names of its contributors
17 * may be used to endorse or promote products derived from this software
18 * without specific prior written permission.
19 *
20 * THIS SOFTWARE IS PROVIDED BY THE PROJECT AND CONTRIBUTORS ``AS IS'' AND
21 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
22 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
23 * ARE DISCLAIMED. IN NO EVENT SHALL THE PROJECT OR CONTRIBUTORS BE LIABLE
24 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
25 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
26 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
27 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
28 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
29 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
30 * SUCH DAMAGE.
31 */
32
33 #include <sys/cdefs.h>
34 __KERNEL_RCSID(0, "$NetBSD: sctp_indata.c,v 1.15.2.1 2025/08/02 05:57:50 perseant Exp $");
35
36 #ifdef _KERNEL_OPT
37 #include "opt_ipsec.h"
38 #include "opt_inet.h"
39 #include "opt_sctp.h"
40 #endif /* _KERNEL_OPT */
41
42 #include <sys/param.h>
43 #include <sys/systm.h>
44 #include <sys/mbuf.h>
45 #include <sys/malloc.h>
46 #include <sys/socket.h>
47 #include <sys/socketvar.h>
48 #include <sys/sysctl.h>
49
50 #include <net/if.h>
51 #include <net/route.h>
52
53
54 #if defined(__FreeBSD__) && __FreeBSD_version >= 500000
55 #include <sys/limits.h>
56 #else
57 #include <machine/limits.h>
58 #endif
59 #include <machine/cpu.h>
60
61 #include <netinet/in.h>
62 #include <netinet/in_systm.h>
63 #include <netinet/ip.h>
64 #ifdef INET6
65 #include <netinet/ip6.h>
66 #endif /* INET6 */
67 #include <netinet/in_pcb.h>
68 #include <netinet/in_var.h>
69 #include <netinet/ip_var.h>
70 #ifdef INET6
71 #include <netinet6/ip6_var.h>
72 #endif /* INET6 */
73 #include <netinet/ip_icmp.h>
74 #include <netinet/icmp_var.h>
75 #include <netinet/sctp_var.h>
76 #include <netinet/sctp_pcb.h>
77 #include <netinet/sctp_header.h>
78 #include <netinet/sctputil.h>
79 #include <netinet/sctp_output.h>
80 #include <netinet/sctp_input.h>
81 #include <netinet/sctp_hashdriver.h>
82 #include <netinet/sctp_indata.h>
83 #include <netinet/sctp_uio.h>
84 #include <netinet/sctp_timer.h>
85 #ifdef IPSEC
86 #include <netipsec/ipsec.h>
87 #include <netipsec/key.h>
88 #endif /*IPSEC*/
89
90 #ifdef SCTP_DEBUG
91 extern u_int32_t sctp_debug_on;
92 #endif
93
94 /*
95 * NOTES: On the outbound side of things I need to check the sack timer to
96 * see if I should generate a sack into the chunk queue (if I have data to
97 * send that is and will be sending it .. for bundling.
98 *
99 * The callback in sctp_usrreq.c will get called when the socket is read
100 * from. This will cause sctp_service_queues() to get called on the top
101 * entry in the list.
102 */
103
104 extern int sctp_strict_sacks;
105
106 void
107 sctp_set_rwnd(struct sctp_tcb *stcb, struct sctp_association *asoc)
108 {
109 u_int32_t calc, calc_w_oh;
110
111 #ifdef SCTP_DEBUG
112 if (sctp_debug_on & SCTP_DEBUG_INDATA4) {
113 printf("cc:%lu hiwat:%lu lowat:%lu mbcnt:%lu mbmax:%lu\n",
114 (u_long)stcb->sctp_socket->so_rcv.sb_cc,
115 (u_long)stcb->sctp_socket->so_rcv.sb_hiwat,
116 (u_long)stcb->sctp_socket->so_rcv.sb_lowat,
117 (u_long)stcb->sctp_socket->so_rcv.sb_mbcnt,
118 (u_long)stcb->sctp_socket->so_rcv.sb_mbmax);
119 printf("Setting rwnd to: sb:%ld - (del:%d + reasm:%d str:%d)\n",
120 sctp_sbspace(&stcb->sctp_socket->so_rcv),
121 asoc->size_on_delivery_queue,
122 asoc->size_on_reasm_queue,
123 asoc->size_on_all_streams);
124 }
125 #endif
126 if (stcb->sctp_socket->so_rcv.sb_cc == 0 &&
127 asoc->size_on_delivery_queue == 0 &&
128 asoc->size_on_reasm_queue == 0 &&
129 asoc->size_on_all_streams == 0) {
130 /* Full rwnd granted */
131 asoc->my_rwnd = uimax(stcb->sctp_socket->so_rcv.sb_hiwat,
132 SCTP_MINIMAL_RWND);
133 return;
134 }
135 /* get actual space */
136 calc = (u_int32_t)sctp_sbspace(&stcb->sctp_socket->so_rcv);
137
138 /* take out what has NOT been put on socket queue and
139 * we yet hold for putting up.
140 */
141 calc = sctp_sbspace_sub(calc, (u_int32_t)asoc->size_on_delivery_queue);
142 calc = sctp_sbspace_sub(calc, (u_int32_t)asoc->size_on_reasm_queue);
143 calc = sctp_sbspace_sub(calc, (u_int32_t)asoc->size_on_all_streams);
144
145 /* what is the overhead of all these rwnd's */
146 calc_w_oh = sctp_sbspace_sub(calc, stcb->asoc.my_rwnd_control_len);
147
148 asoc->my_rwnd = calc;
149 if (calc_w_oh == 0) {
150 /* If our overhead is greater than the advertised
151 * rwnd, we clamp the rwnd to 1. This lets us
152 * still accept inbound segments, but hopefully will
153 * shut the sender down when he finally gets the message.
154 */
155 asoc->my_rwnd = 1;
156 } else {
157 /* SWS threshold */
158 if (asoc->my_rwnd &&
159 (asoc->my_rwnd < stcb->sctp_ep->sctp_ep.sctp_sws_receiver)) {
160 /* SWS engaged, tell peer none left */
161 asoc->my_rwnd = 1;
162 #ifdef SCTP_DEBUG
163 if (sctp_debug_on & SCTP_DEBUG_INDATA4) {
164 printf(" - SWS zeros\n");
165 }
166 } else {
167 if (sctp_debug_on & SCTP_DEBUG_INDATA4) {
168 printf("\n");
169 }
170 #endif
171 }
172 }
173 }
174
175 /*
176 * Take a chk structure and build it into an mbuf. Hmm should we change things
177 * so that instead we store the data side in a chunk?
178 */
179 static struct mbuf *
180 sctp_build_ctl_nchunk(struct sctp_tcb *stcb, uint32_t tsn, uint32_t ppid,
181 uint32_t context, uint16_t stream_no, uint16_t stream_seq, uint8_t flags)
182 {
183 struct sctp_sndrcvinfo *outinfo;
184 struct cmsghdr *cmh;
185 struct mbuf *ret;
186
187 if ((stcb->sctp_ep->sctp_flags & SCTP_PCB_FLAGS_RECVDATAIOEVNT) == 0) {
188 /* user does not want the sndrcv ctl */
189 return (NULL);
190 }
191
192 MGETHDR(ret, M_DONTWAIT, MT_CONTROL);
193 if (ret == NULL) {
194 /* No space */
195 return (ret);
196 }
197 /* We need a CMSG header followed by the struct */
198 cmh = mtod(ret, struct cmsghdr *);
199 outinfo = (struct sctp_sndrcvinfo *)CMSG_DATA(cmh);
200 cmh->cmsg_level = IPPROTO_SCTP;
201 cmh->cmsg_type = SCTP_SNDRCV;
202 cmh->cmsg_len = CMSG_LEN(sizeof(struct sctp_sndrcvinfo));
203 outinfo->sinfo_stream = stream_no;
204 outinfo->sinfo_ssn = stream_seq;
205 if (flags & SCTP_DATA_UNORDERED) {
206 outinfo->sinfo_flags = SCTP_UNORDERED;
207 } else {
208 outinfo->sinfo_flags = 0;
209 }
210 outinfo->sinfo_ppid = ppid;
211 outinfo->sinfo_context = context;
212 outinfo->sinfo_assoc_id = sctp_get_associd(stcb);
213 outinfo->sinfo_tsn = tsn;
214 outinfo->sinfo_cumtsn = stcb->asoc.cumulative_tsn;
215 ret->m_len = cmh->cmsg_len;
216 ret->m_pkthdr.len = ret->m_len;
217 /*
218 * We track how many control len's have gone upon the sb
219 * and do not count these in the rwnd calculation.
220 */
221 stcb->asoc.my_rwnd_control_len +=
222 CMSG_LEN(sizeof(struct sctp_sndrcvinfo));
223
224 return (ret);
225 }
226
227 /*
228 * Take a chk structure and build it into an mbuf. Should we change things
229 * so that instead we store the data side in a chunk?
230 */
231 static
232 struct mbuf *
233 sctp_build_ctl(struct sctp_tcb *stcb, struct sctp_tmit_chunk *chk)
234 {
235 struct sctp_sndrcvinfo *outinfo;
236 struct cmsghdr *cmh;
237 struct mbuf *ret;
238 if ((stcb->sctp_ep->sctp_flags & SCTP_PCB_FLAGS_RECVDATAIOEVNT) == 0) {
239 /* user does not want the sndrcv ctl */
240 return (NULL);
241 }
242 MGET(ret, M_DONTWAIT, MT_CONTROL);
243 if (ret == NULL) {
244 /* No space */
245 return (ret);
246 }
247
248 /* We need a CMSG header followed by the struct */
249 cmh = mtod(ret, struct cmsghdr *);
250 outinfo = (struct sctp_sndrcvinfo *)CMSG_DATA(cmh);
251 cmh->cmsg_level = IPPROTO_SCTP;
252 cmh->cmsg_type = SCTP_SNDRCV;
253 cmh->cmsg_len = CMSG_LEN(sizeof(struct sctp_sndrcvinfo));
254 outinfo->sinfo_stream = chk->rec.data.stream_number;
255 outinfo->sinfo_ssn = chk->rec.data.stream_seq;
256 if (chk->rec.data.rcv_flags & SCTP_DATA_UNORDERED) {
257 outinfo->sinfo_flags = SCTP_UNORDERED;
258 } else {
259 outinfo->sinfo_flags = 0;
260 }
261 outinfo->sinfo_ppid = chk->rec.data.payloadtype;
262 outinfo->sinfo_context = chk->rec.data.context;
263 outinfo->sinfo_assoc_id = sctp_get_associd(stcb);
264 outinfo->sinfo_tsn = chk->rec.data.TSN_seq;
265 outinfo->sinfo_cumtsn = stcb->asoc.cumulative_tsn;
266 ret->m_len = cmh->cmsg_len;
267 stcb->asoc.my_rwnd_control_len +=
268 CMSG_LEN(sizeof(struct sctp_sndrcvinfo));
269
270 return (ret);
271 }
272
273 int
274 sctp_deliver_data(struct sctp_tcb *stcb, struct sctp_association *asoc,
275 struct sctp_tmit_chunk *chk, int hold_locks)
276 {
277 struct mbuf *control, *m;
278 int free_it;
279 struct sockaddr_in6 sin6;
280 const struct sockaddr *to;
281
282 #ifdef SCTP_DEBUG
283 if (sctp_debug_on & SCTP_DEBUG_INDATA1) {
284 printf("I am now in Deliver data! (%p)\n", chk);
285 }
286 #endif
287 /* get a write lock on the inp if not already */
288 if (hold_locks == 0) {
289 SCTP_TCB_UNLOCK(stcb);
290 SCTP_INP_WLOCK(stcb->sctp_ep);
291 SCTP_TCB_LOCK(stcb);
292 }
293 free_it = 0;
294 /* We always add it to the queue */
295 if (stcb && (stcb->sctp_ep->sctp_flags & SCTP_PCB_FLAGS_SOCKET_GONE)) {
296 /* socket above is long gone */
297 #ifdef SCTP_DEBUG
298 if (sctp_debug_on & SCTP_DEBUG_INDATA1) {
299 printf("gone is gone!\n");
300 }
301 #endif
302 if (chk != NULL) {
303 sctp_m_freem(chk->data);
304 chk->data = NULL;
305 sctp_free_remote_addr(chk->whoTo);
306 SCTP_ZONE_FREE(sctppcbinfo.ipi_zone_chunk, chk);
307 sctppcbinfo.ipi_count_chunk--;
308 if ((int)sctppcbinfo.ipi_count_chunk < 0) {
309 panic("Chunk count is negative");
310 }
311 sctppcbinfo.ipi_gencnt_chunk++;
312 }
313 TAILQ_FOREACH(chk, &asoc->delivery_queue, sctp_next) {
314 asoc->size_on_delivery_queue -= chk->send_size;
315 asoc->cnt_on_delivery_queue--;
316 /*
317 * Lose the data pointer, since its in the socket buffer
318 */
319 sctp_m_freem(chk->data);
320 chk->data = NULL;
321 /* Now free the address and data */
322 sctp_free_remote_addr(chk->whoTo);
323 SCTP_ZONE_FREE(sctppcbinfo.ipi_zone_chunk, chk);
324 sctppcbinfo.ipi_count_chunk--;
325 if ((int)sctppcbinfo.ipi_count_chunk < 0) {
326 panic("Chunk count is negative");
327 }
328 sctppcbinfo.ipi_gencnt_chunk++;
329 }
330 if (hold_locks == 0) {
331 SCTP_INP_WUNLOCK(stcb->sctp_ep);
332 }
333 return (0);
334 }
335 if (chk != NULL) {
336 TAILQ_INSERT_TAIL(&asoc->delivery_queue, chk, sctp_next);
337 asoc->size_on_delivery_queue += chk->send_size;
338 asoc->cnt_on_delivery_queue++;
339 }
340 if (asoc->fragmented_delivery_inprogress) {
341 /*
342 * oh oh, fragmented delivery in progress
343 * return out of here.
344 */
345 #ifdef SCTP_DEBUG
346 if (sctp_debug_on & SCTP_DEBUG_INDATA1) {
347 printf("Fragmented delivery in progress?\n");
348 }
349 #endif
350 if (hold_locks == 0) {
351 SCTP_INP_WUNLOCK(stcb->sctp_ep);
352 }
353 return (0);
354 }
355 /* Now grab the first one */
356 chk = TAILQ_FIRST(&asoc->delivery_queue);
357 if (chk == NULL) {
358 /* Nothing in queue */
359 #ifdef SCTP_DEBUG
360 if (sctp_debug_on & SCTP_DEBUG_INDATA1) {
361 printf("Nothing in queue?\n");
362 }
363 #endif
364 asoc->size_on_delivery_queue = 0;
365 asoc->cnt_on_delivery_queue = 0;
366 if (hold_locks == 0) {
367 SCTP_INP_WUNLOCK(stcb->sctp_ep);
368 }
369 return (0);
370 }
371
372 if (stcb->sctp_socket->so_rcv.sb_cc >= stcb->sctp_socket->so_rcv.sb_hiwat) {
373 /* Boy, there really is NO room */
374 if (hold_locks == 0) {
375 SCTP_INP_WUNLOCK(stcb->sctp_ep);
376 }
377 return (0);
378 }
379 #ifdef SCTP_DEBUG
380 if (sctp_debug_on & SCTP_DEBUG_INDATA1) {
381 printf("Now to the delivery with chk(%p)!\n", chk);
382 }
383 #endif
384 /* XXX need to append PKTHDR to the socket buffer first */
385 if ((chk->data->m_flags & M_PKTHDR) == 0) {
386 MGETHDR(m, M_DONTWAIT, MT_DATA);
387 if (m == NULL) {
388 /* no room! */
389 if (hold_locks == 0) {
390 SCTP_INP_WUNLOCK(stcb->sctp_ep);
391 }
392 return (0);
393 }
394 m->m_pkthdr.len = chk->send_size;
395 m->m_len = 0;
396 m->m_next = chk->data;
397 chk->data = m;
398 }
399 if (chk->rec.data.rcv_flags & SCTP_DATA_LAST_FRAG) {
400 if (chk->data->m_next == NULL) {
401 /* hopefully we hit here most of the time */
402 chk->data->m_flags |= M_EOR;
403 } else {
404 /* Add the flag to the LAST mbuf in the chain */
405 m = chk->data;
406 while (m->m_next != NULL) {
407 m = m->m_next;
408 }
409 m->m_flags |= M_EOR;
410 }
411 }
412
413 if (chk->rec.data.rcv_flags & SCTP_DATA_FIRST_FRAG) {
414 struct sockaddr_in6 lsa6;
415
416 control = sctp_build_ctl(stcb, chk);
417 to = rtcache_getdst(&chk->whoTo->ro);
418 if ((stcb->sctp_ep->sctp_flags & SCTP_PCB_FLAGS_NEEDS_MAPPED_V4) &&
419 to->sa_family == AF_INET) {
420 const struct sockaddr_in *sin;
421
422 sin = (const struct sockaddr_in *)to;
423 in6_sin_2_v4mapsin6(sin, &sin6);
424 to = (struct sockaddr *)&sin6;
425 }
426 /* check and strip embedded scope junk */
427 to = (const struct sockaddr *)sctp_recover_scope((const struct sockaddr_in6 *)to,
428 &lsa6);
429 if (((const struct sockaddr_in *)to)->sin_port == 0) {
430 printf("Huh a, port is %d not net:%p %d?\n",
431 ((const struct sockaddr_in *)to)->sin_port,
432 chk->whoTo,
433 (int)(ntohs(stcb->rport)));
434 /*((struct sockaddr_in *)to)->sin_port = stcb->rport;*/
435 /* XXX */
436 }
437 if (sctp_sbspace(&stcb->sctp_socket->so_rcv) < (long)chk->send_size) {
438 /* Gak not enough room */
439 if (control) {
440 sctp_m_freem(control);
441 stcb->asoc.my_rwnd_control_len -=
442 CMSG_LEN(sizeof(struct sctp_sndrcvinfo));
443 }
444 goto skip;
445 }
446 if (!sbappendaddr_nocheck(&stcb->sctp_socket->so_rcv,
447 to, chk->data, control, stcb->asoc.my_vtag,
448 stcb->sctp_ep)) {
449 /* Gak not enough room */
450 if (control) {
451 sctp_m_freem(control);
452 stcb->asoc.my_rwnd_control_len -=
453 CMSG_LEN(sizeof(struct sctp_sndrcvinfo));
454 }
455 } else {
456 if ((stcb->sctp_ep->sctp_flags & SCTP_PCB_FLAGS_IN_TCPPOOL) == 0) {
457 if (sctp_add_to_socket_q(stcb->sctp_ep, stcb)) {
458 stcb->asoc.my_rwnd_control_len +=
459 sizeof(struct mbuf);
460 }
461 } else {
462 stcb->asoc.my_rwnd_control_len += sizeof(struct mbuf);
463 }
464 free_it = 1;
465 }
466 } else {
467 /* append to a already started message. */
468 if (sctp_sbspace(&stcb->sctp_socket->so_rcv) >=
469 (long)chk->send_size) {
470 sbappend(&stcb->sctp_socket->so_rcv, chk->data);
471 free_it = 1;
472 }
473 }
474 skip:
475 if (hold_locks == 0) {
476 SCTP_INP_WUNLOCK(stcb->sctp_ep);
477 }
478 /* free up the one we inserted */
479 if (free_it) {
480 /* Pull it off the queue */
481 #ifdef SCTP_DEBUG
482 if (sctp_debug_on & SCTP_DEBUG_INDATA1) {
483 printf("Free_it true, doing tickle wakeup\n");
484 }
485 #endif
486 sctp_sorwakeup(stcb->sctp_ep, stcb->sctp_socket);
487 TAILQ_REMOVE(&asoc->delivery_queue, chk, sctp_next);
488 asoc->size_on_delivery_queue -= chk->send_size;
489 asoc->cnt_on_delivery_queue--;
490 /* Lose the data pointer, since its in the socket buffer */
491 chk->data = NULL;
492 /* Now free the address and data */
493 sctp_free_remote_addr(chk->whoTo);
494 SCTP_ZONE_FREE(sctppcbinfo.ipi_zone_chunk, chk);
495 sctppcbinfo.ipi_count_chunk--;
496 if ((int)sctppcbinfo.ipi_count_chunk < 0) {
497 panic("Chunk count is negative");
498 }
499 sctppcbinfo.ipi_gencnt_chunk++;
500 }
501 return (free_it);
502 }
503
504 /*
505 * We are delivering currently from the reassembly queue. We must continue to
506 * deliver until we either:
507 * 1) run out of space.
508 * 2) run out of sequential TSN's
509 * 3) hit the SCTP_DATA_LAST_FRAG flag.
510 */
511 static void
512 sctp_service_reassembly(struct sctp_tcb *stcb, struct sctp_association *asoc, int hold_locks)
513 {
514 const struct sockaddr *to;
515 struct sockaddr_in6 sin6;
516 struct sctp_tmit_chunk *chk, *at;
517 struct mbuf *control, *m;
518 u_int16_t nxt_todel;
519 u_int16_t stream_no;
520 int cntDel;
521 cntDel = stream_no = 0;
522 if (hold_locks == 0) {
523 /*
524 * you always have the TCB lock, we need
525 * to have the inp write lock as well.
526 */
527 SCTP_TCB_UNLOCK(stcb);
528 SCTP_INP_WLOCK(stcb->sctp_ep);
529 SCTP_TCB_LOCK(stcb);
530 }
531 if (stcb && (stcb->sctp_ep->sctp_flags & SCTP_PCB_FLAGS_SOCKET_GONE)) {
532 /* socket above is long gone */
533 asoc->fragmented_delivery_inprogress = 0;
534 TAILQ_FOREACH(chk, &asoc->reasmqueue, sctp_next) {
535 asoc->size_on_delivery_queue -= chk->send_size;
536 asoc->cnt_on_delivery_queue--;
537 /*
538 * Lose the data pointer, since its in the socket buffer
539 */
540 sctp_m_freem(chk->data);
541 chk->data = NULL;
542 /* Now free the address and data */
543 sctp_free_remote_addr(chk->whoTo);
544 SCTP_ZONE_FREE(sctppcbinfo.ipi_zone_chunk, chk);
545 sctppcbinfo.ipi_count_chunk--;
546 if ((int)sctppcbinfo.ipi_count_chunk < 0) {
547 panic("Chunk count is negative");
548 }
549 sctppcbinfo.ipi_gencnt_chunk++;
550 }
551 if (hold_locks == 0) {
552 SCTP_INP_WUNLOCK(stcb->sctp_ep);
553 }
554 return;
555 }
556 do {
557 if (stcb->sctp_socket->so_rcv.sb_cc >=
558 stcb->sctp_socket->so_rcv.sb_hiwat) {
559 if (cntDel) {
560 sctp_sorwakeup(stcb->sctp_ep,
561 stcb->sctp_socket);
562 }
563 if (hold_locks == 0) {
564 SCTP_INP_WUNLOCK(stcb->sctp_ep);
565 }
566 return;
567 }
568 chk = TAILQ_FIRST(&asoc->reasmqueue);
569 if (chk == NULL) {
570 if (cntDel) {
571 sctp_sorwakeup(stcb->sctp_ep,
572 stcb->sctp_socket);
573 }
574 if (hold_locks == 0) {
575 SCTP_INP_WUNLOCK(stcb->sctp_ep);
576 }
577 return;
578 }
579 if (chk->rec.data.TSN_seq != (asoc->tsn_last_delivered + 1)) {
580 /* Can't deliver more :< */
581 if (cntDel) {
582 sctp_sorwakeup(stcb->sctp_ep,
583 stcb->sctp_socket);
584 }
585 if (hold_locks == 0) {
586 SCTP_INP_WUNLOCK(stcb->sctp_ep);
587 }
588 return;
589 }
590 stream_no = chk->rec.data.stream_number;
591 nxt_todel = asoc->strmin[stream_no].last_sequence_delivered + 1;
592 if (nxt_todel != chk->rec.data.stream_seq &&
593 (chk->rec.data.rcv_flags & SCTP_DATA_UNORDERED) == 0) {
594 /*
595 * Not the next sequence to deliver in its stream OR
596 * unordered
597 */
598 if (cntDel) {
599 sctp_sorwakeup(stcb->sctp_ep,
600 stcb->sctp_socket);
601 }
602 if (hold_locks == 0) {
603 SCTP_INP_WUNLOCK(stcb->sctp_ep);
604 }
605 return;
606 }
607
608 if ((chk->data->m_flags & M_PKTHDR) == 0) {
609 MGETHDR(m, M_DONTWAIT, MT_DATA);
610 if (m == NULL) {
611 /* no room! */
612 if (hold_locks == 0) {
613 SCTP_INP_WUNLOCK(stcb->sctp_ep);
614 }
615 return;
616 }
617 m->m_pkthdr.len = chk->send_size;
618 m->m_len = 0;
619 m->m_next = chk->data;
620 chk->data = m;
621 }
622 if (chk->rec.data.rcv_flags & SCTP_DATA_LAST_FRAG) {
623 if (chk->data->m_next == NULL) {
624 /* hopefully we hit here most of the time */
625 chk->data->m_flags |= M_EOR;
626 } else {
627 /* Add the flag to the LAST mbuf in the chain */
628 m = chk->data;
629 while (m->m_next != NULL) {
630 m = m->m_next;
631 }
632 m->m_flags |= M_EOR;
633 }
634 }
635 if (chk->rec.data.rcv_flags & SCTP_DATA_FIRST_FRAG) {
636 struct sockaddr_in6 lsa6;
637
638 control = sctp_build_ctl(stcb, chk);
639 to = rtcache_getdst(&chk->whoTo->ro);
640 if ((stcb->sctp_ep->sctp_flags & SCTP_PCB_FLAGS_NEEDS_MAPPED_V4) &&
641 to->sa_family == AF_INET) {
642 const struct sockaddr_in *sin;
643
644 sin = satocsin(to);
645 in6_sin_2_v4mapsin6(sin, &sin6);
646 to = (struct sockaddr *)&sin6;
647 }
648 /* check and strip embedded scope junk */
649 to = (const struct sockaddr *)sctp_recover_scope((const struct sockaddr_in6 *)to,
650 &lsa6);
651 if (((const struct sockaddr_in *)to)->sin_port == 0) {
652 printf("Huh b, port is %d not net:%p %d?\n",
653 ((const struct sockaddr_in *)to)->sin_port,
654 chk->whoTo,
655 (int)(ntohs(stcb->rport)));
656 /*((struct sockaddr_in *)to)->sin_port = stcb->rport;*/
657 /* XXX */
658 }
659 if (sctp_sbspace(&stcb->sctp_socket->so_rcv) <
660 (long)chk->send_size) {
661 if (control) {
662 sctp_m_freem(control);
663 stcb->asoc.my_rwnd_control_len -=
664 CMSG_LEN(sizeof(struct sctp_sndrcvinfo));
665 }
666 sctp_sorwakeup(stcb->sctp_ep,
667 stcb->sctp_socket);
668 if (hold_locks == 0) {
669 SCTP_INP_WUNLOCK(stcb->sctp_ep);
670 }
671 return;
672 }
673 if (!sbappendaddr_nocheck(&stcb->sctp_socket->so_rcv,
674 to, chk->data, control, stcb->asoc.my_vtag,
675 stcb->sctp_ep)) {
676 /* Gak not enough room */
677 if (control) {
678 sctp_m_freem(control);
679 stcb->asoc.my_rwnd_control_len -=
680 CMSG_LEN(sizeof(struct sctp_sndrcvinfo));
681 }
682 sctp_sorwakeup(stcb->sctp_ep,
683 stcb->sctp_socket);
684 if (hold_locks == 0) {
685 SCTP_INP_WUNLOCK(stcb->sctp_ep);
686 }
687 return;
688 }
689 if ((stcb->sctp_ep->sctp_flags & SCTP_PCB_FLAGS_IN_TCPPOOL) == 0) {
690 if (sctp_add_to_socket_q(stcb->sctp_ep, stcb)) {
691 stcb->asoc.my_rwnd_control_len +=
692 sizeof(struct mbuf);
693 }
694 } else {
695 stcb->asoc.my_rwnd_control_len += sizeof(struct mbuf);
696 }
697 cntDel++;
698 } else {
699 if (sctp_sbspace(&stcb->sctp_socket->so_rcv) >=
700 (long)chk->send_size) {
701 sbappend(&stcb->sctp_socket->so_rcv, chk->data);
702 cntDel++;
703 } else {
704 /* out of space in the sb */
705 sctp_sorwakeup(stcb->sctp_ep,
706 stcb->sctp_socket);
707 if (hold_locks == 0) {
708 SCTP_INP_WUNLOCK(stcb->sctp_ep);
709 }
710 return;
711 }
712 }
713 /* pull it we did it */
714 TAILQ_REMOVE(&asoc->reasmqueue, chk, sctp_next);
715 if (chk->rec.data.rcv_flags & SCTP_DATA_LAST_FRAG) {
716 asoc->fragmented_delivery_inprogress = 0;
717 if ((chk->rec.data.rcv_flags & SCTP_DATA_UNORDERED) == 0) {
718 asoc->strmin[stream_no].last_sequence_delivered++;
719 }
720 }
721 asoc->tsn_last_delivered = chk->rec.data.TSN_seq;
722 asoc->size_on_reasm_queue -= chk->send_size;
723 asoc->cnt_on_reasm_queue--;
724 /* free up the chk */
725 sctp_free_remote_addr(chk->whoTo);
726 chk->data = NULL;
727 SCTP_ZONE_FREE(sctppcbinfo.ipi_zone_chunk, chk);
728 sctppcbinfo.ipi_count_chunk--;
729 if ((int)sctppcbinfo.ipi_count_chunk < 0) {
730 panic("Chunk count is negative");
731 }
732 sctppcbinfo.ipi_gencnt_chunk++;
733 if (asoc->fragmented_delivery_inprogress == 0) {
734 /*
735 * Now lets see if we can deliver the next one on the
736 * stream
737 */
738 /*u_int16_t nxt_todel;*/
739 struct sctp_stream_in *strm;
740
741 strm = &asoc->strmin[stream_no];
742 nxt_todel = strm->last_sequence_delivered + 1;
743 chk = TAILQ_FIRST(&strm->inqueue);
744 if (chk && (nxt_todel == chk->rec.data.stream_seq)) {
745 while (chk != NULL) {
746 /* all delivered */
747 if (nxt_todel ==
748 chk->rec.data.stream_seq) {
749 at = TAILQ_NEXT(chk, sctp_next);
750 TAILQ_REMOVE(&strm->inqueue,
751 chk, sctp_next);
752 asoc->size_on_all_streams -=
753 chk->send_size;
754 asoc->cnt_on_all_streams--;
755 strm->last_sequence_delivered++;
756 /*
757 * We ignore the return of
758 * deliver_data here since we
759 * always can hold the chunk on
760 * the d-queue. And we have a
761 * finite number that can be
762 * delivered from the strq.
763 */
764 sctp_deliver_data(stcb, asoc, chk, 1);
765 chk = at;
766 } else {
767 break;
768 }
769 nxt_todel =
770 strm->last_sequence_delivered + 1;
771 }
772 }
773 if (!TAILQ_EMPTY(&asoc->delivery_queue)) {
774 /* Here if deliver_data fails, we must break */
775 if (sctp_deliver_data(stcb, asoc, NULL, 1) == 0)
776 break;
777 }
778 sctp_sorwakeup(stcb->sctp_ep, stcb->sctp_socket);
779 if (hold_locks == 0) {
780 SCTP_INP_WUNLOCK(stcb->sctp_ep);
781 }
782 return;
783 }
784 chk = TAILQ_FIRST(&asoc->reasmqueue);
785 } while (chk);
786 if (cntDel) {
787 sctp_sorwakeup(stcb->sctp_ep, stcb->sctp_socket);
788 }
789 if (hold_locks == 0) {
790 SCTP_INP_WUNLOCK(stcb->sctp_ep);
791 }
792 }
793
794 /*
795 * Queue the chunk either right into the socket buffer if it is the next one
796 * to go OR put it in the correct place in the delivery queue. If we do
797 * append to the so_buf, keep doing so until we are out of order.
798 * One big question still remains, what to do when the socket buffer is FULL??
799 */
800 static void
801 sctp_queue_data_to_stream(struct sctp_tcb *stcb, struct sctp_association *asoc,
802 struct sctp_tmit_chunk *chk, int *abort_flag)
803 {
804 struct sctp_stream_in *strm;
805 struct sctp_tmit_chunk *at;
806 int queue_needed;
807 u_int16_t nxt_todel;
808 struct mbuf *oper;
809
810 /*** FIX FIX FIX ???
811 * Need to add code to deal with 16 bit seq wrap
812 * without a TSN wrap for ordered delivery (maybe).
813 * FIX FIX FIX ???
814 */
815 queue_needed = 1;
816 asoc->size_on_all_streams += chk->send_size;
817 asoc->cnt_on_all_streams++;
818 strm = &asoc->strmin[chk->rec.data.stream_number];
819 nxt_todel = strm->last_sequence_delivered + 1;
820 #ifdef SCTP_STR_LOGGING
821 sctp_log_strm_del(chk, NULL, SCTP_STR_LOG_FROM_INTO_STRD);
822 #endif
823 #ifdef SCTP_DEBUG
824 if (sctp_debug_on & SCTP_DEBUG_INDATA1) {
825 printf("queue to stream called for ssn:%u lastdel:%u nxt:%u\n",
826 (u_int)chk->rec.data.stream_seq,
827 (u_int)strm->last_sequence_delivered, (u_int)nxt_todel);
828 }
829 #endif
830 if (compare_with_wrap(strm->last_sequence_delivered,
831 chk->rec.data.stream_seq, MAX_SEQ) ||
832 (strm->last_sequence_delivered == chk->rec.data.stream_seq)) {
833 /* The incoming sseq is behind where we last delivered? */
834 #ifdef SCTP_DEBUG
835 if (sctp_debug_on & SCTP_DEBUG_INDATA1) {
836 printf("Duplicate S-SEQ:%d delivered:%d from peer, Abort association\n",
837 chk->rec.data.stream_seq,
838 strm->last_sequence_delivered);
839 }
840 #endif
841 /*
842 * throw it in the stream so it gets cleaned up in
843 * association destruction
844 */
845 TAILQ_INSERT_HEAD(&strm->inqueue, chk, sctp_next);
846 MGET(oper, M_DONTWAIT, MT_DATA);
847 if (oper) {
848 struct sctp_paramhdr *ph;
849 u_int32_t *ippp;
850
851 oper->m_len = sizeof(struct sctp_paramhdr) +
852 sizeof(*ippp);
853 ph = mtod(oper, struct sctp_paramhdr *);
854 ph->param_type = htons(SCTP_CAUSE_PROTOCOL_VIOLATION);
855 ph->param_length = htons(oper->m_len);
856 ippp = (u_int32_t *)(ph + 1);
857 *ippp = htonl(0x00000001);
858 }
859 sctp_abort_an_association(stcb->sctp_ep, stcb,
860 SCTP_PEER_FAULTY, oper);
861
862 *abort_flag = 1;
863 return;
864
865 }
866 if (nxt_todel == chk->rec.data.stream_seq) {
867 /* can be delivered right away */
868 #ifdef SCTP_DEBUG
869 if (sctp_debug_on & SCTP_DEBUG_INDATA1) {
870 printf("It's NEXT!\n");
871 }
872 #endif
873 #ifdef SCTP_STR_LOGGING
874 sctp_log_strm_del(chk, NULL, SCTP_STR_LOG_FROM_IMMED_DEL);
875 #endif
876 queue_needed = 0;
877 asoc->size_on_all_streams -= chk->send_size;
878 asoc->cnt_on_all_streams--;
879 strm->last_sequence_delivered++;
880 sctp_deliver_data(stcb, asoc, chk, 0);
881 chk = TAILQ_FIRST(&strm->inqueue);
882 while (chk != NULL) {
883 /* all delivered */
884 nxt_todel = strm->last_sequence_delivered + 1;
885 if (nxt_todel == chk->rec.data.stream_seq) {
886 at = TAILQ_NEXT(chk, sctp_next);
887 TAILQ_REMOVE(&strm->inqueue, chk, sctp_next);
888 asoc->size_on_all_streams -= chk->send_size;
889 asoc->cnt_on_all_streams--;
890 strm->last_sequence_delivered++;
891 /*
892 * We ignore the return of deliver_data here
893 * since we always can hold the chunk on the
894 * d-queue. And we have a finite number that
895 * can be delivered from the strq.
896 */
897 #ifdef SCTP_STR_LOGGING
898 sctp_log_strm_del(chk, NULL,
899 SCTP_STR_LOG_FROM_IMMED_DEL);
900 #endif
901 sctp_deliver_data(stcb, asoc, chk, 0);
902 chk = at;
903 continue;
904 }
905 break;
906 }
907 }
908 if (queue_needed) {
909 /*
910 * Ok, we did not deliver this guy, find
911 * the correct place to put it on the queue.
912 */
913 #ifdef SCTP_DEBUG
914 if (sctp_debug_on & SCTP_DEBUG_INDATA1) {
915 printf("Queue Needed!\n");
916 }
917 #endif
918 if (TAILQ_EMPTY(&strm->inqueue)) {
919 /* Empty queue */
920 #ifdef SCTP_STR_LOGGING
921 sctp_log_strm_del(chk, NULL, SCTP_STR_LOG_FROM_INSERT_HD);
922 #endif
923 TAILQ_INSERT_HEAD(&strm->inqueue, chk, sctp_next);
924 } else {
925 TAILQ_FOREACH(at, &strm->inqueue, sctp_next) {
926 if (compare_with_wrap(at->rec.data.stream_seq,
927 chk->rec.data.stream_seq, MAX_SEQ)) {
928 /*
929 * one in queue is bigger than the new
930 * one, insert before this one
931 */
932 #ifdef SCTP_STR_LOGGING
933 sctp_log_strm_del(chk, at,
934 SCTP_STR_LOG_FROM_INSERT_MD);
935 #endif
936 TAILQ_INSERT_BEFORE(at, chk, sctp_next);
937 break;
938 } else if (at->rec.data.stream_seq ==
939 chk->rec.data.stream_seq) {
940 /*
941 * Gak, He sent me a duplicate str seq
942 * number
943 */
944 /*
945 * foo bar, I guess I will just free
946 * this new guy, should we abort too?
947 * FIX ME MAYBE? Or it COULD be that
948 * the SSN's have wrapped. Maybe I
949 * should compare to TSN somehow...
950 * sigh for now just blow away the
951 * chunk!
952 */
953
954 sctp_m_freem(chk->data);
955 chk->data = NULL;
956 asoc->size_on_all_streams -= chk->send_size;
957 asoc->cnt_on_all_streams--;
958 sctp_pegs[SCTP_DUP_SSN_RCVD]++;
959 sctp_free_remote_addr(chk->whoTo);
960 SCTP_ZONE_FREE(sctppcbinfo.ipi_zone_chunk, chk);
961 sctppcbinfo.ipi_count_chunk--;
962 if ((int)sctppcbinfo.ipi_count_chunk <
963 0) {
964 panic("Chunk count is negative");
965 }
966 sctppcbinfo.ipi_gencnt_chunk++;
967 return;
968 } else {
969 if (TAILQ_NEXT(at, sctp_next) == NULL) {
970 /*
971 * We are at the end, insert it
972 * after this one
973 */
974 #ifdef SCTP_STR_LOGGING
975 sctp_log_strm_del(chk, at,
976 SCTP_STR_LOG_FROM_INSERT_TL);
977 #endif
978 TAILQ_INSERT_AFTER(&strm->inqueue,
979 at, chk, sctp_next);
980 break;
981 }
982 }
983 }
984 }
985 } else {
986 /* We delivered some chunks, wake them up */
987
988 #ifdef SCTP_DEBUG
989 if (sctp_debug_on & SCTP_DEBUG_INDATA1) {
990 printf("Doing WAKEUP!\n");
991 }
992 #endif
993 sctp_sorwakeup(stcb->sctp_ep, stcb->sctp_socket);
994 }
995 }
996
997 /*
998 * Returns two things: You get the total size of the deliverable parts of the
999 * first fragmented message on the reassembly queue. And you get a 1 back if
1000 * all of the message is ready or a 0 back if the message is still incomplete
1001 */
1002 static int
1003 sctp_is_all_msg_on_reasm(struct sctp_association *asoc, int *t_size)
1004 {
1005 struct sctp_tmit_chunk *chk;
1006 u_int32_t tsn;
1007
1008 *t_size = 0;
1009 chk = TAILQ_FIRST(&asoc->reasmqueue);
1010 if (chk == NULL) {
1011 /* nothing on the queue */
1012 return (0);
1013 }
1014 if ((chk->rec.data.rcv_flags & SCTP_DATA_FIRST_FRAG) == 0) {
1015 /* Not a first on the queue */
1016 return (0);
1017 }
1018 tsn = chk->rec.data.TSN_seq;
1019 while (chk) {
1020 if (tsn != chk->rec.data.TSN_seq) {
1021 return (0);
1022 }
1023 *t_size += chk->send_size;
1024 if (chk->rec.data.rcv_flags & SCTP_DATA_LAST_FRAG) {
1025 return (1);
1026 }
1027 tsn++;
1028 chk = TAILQ_NEXT(chk, sctp_next);
1029 }
1030 return (0);
1031 }
1032
1033 /*
1034 * Dump onto the re-assembly queue, in its proper place. After dumping on
1035 * the queue, see if anything can be delivered. If so pull it off (or as much
1036 * as we can. If we run out of space then we must dump what we can and set
1037 * the appropriate flag to say we queued what we could.
1038 */
1039 static void
1040 sctp_queue_data_for_reasm(struct sctp_tcb *stcb, struct sctp_association *asoc,
1041 struct sctp_tmit_chunk *chk, int *abort_flag)
1042 {
1043 struct mbuf *oper;
1044 u_int16_t nxt_todel;
1045 u_int32_t cum_ackp1, prev_tsn, post_tsn;
1046 int tsize;
1047 struct sctp_tmit_chunk *at, *prev, *next;
1048
1049 prev = next = NULL;
1050 cum_ackp1 = asoc->tsn_last_delivered + 1;
1051
1052 if (TAILQ_EMPTY(&asoc->reasmqueue)) {
1053 /* This is the first one on the queue */
1054 TAILQ_INSERT_HEAD(&asoc->reasmqueue, chk, sctp_next);
1055 /*
1056 * we do not check for delivery of anything when
1057 * only one fragment is here
1058 */
1059 asoc->size_on_reasm_queue = chk->send_size;
1060 asoc->cnt_on_reasm_queue++;
1061 if (chk->rec.data.TSN_seq == cum_ackp1) {
1062 if (asoc->fragmented_delivery_inprogress == 0 &&
1063 (chk->rec.data.rcv_flags & SCTP_DATA_FIRST_FRAG) !=
1064 SCTP_DATA_FIRST_FRAG) {
1065 /*
1066 * An empty queue, no delivery inprogress, we
1067 * hit the next one and it does NOT have a
1068 * FIRST fragment mark.
1069 */
1070 #ifdef SCTP_DEBUG
1071 if (sctp_debug_on & SCTP_DEBUG_INDATA1) {
1072 printf("Gak, Evil plot, its not first, no fragmented delivery in progress\n");
1073 }
1074 #endif
1075 MGET(oper, M_DONTWAIT, MT_DATA);
1076 if (oper) {
1077 struct sctp_paramhdr *ph;
1078 u_int32_t *ippp;
1079
1080 oper->m_len =
1081 sizeof(struct sctp_paramhdr) +
1082 sizeof(*ippp);
1083 ph = mtod(oper, struct sctp_paramhdr *);
1084 ph->param_type =
1085 htons(SCTP_CAUSE_PROTOCOL_VIOLATION);
1086 ph->param_length = htons(oper->m_len);
1087 ippp = (u_int32_t *)(ph + 1);
1088 *ippp = htonl(0x10000001);
1089 }
1090 sctp_abort_an_association(stcb->sctp_ep, stcb,
1091 SCTP_PEER_FAULTY, oper);
1092 *abort_flag = 1;
1093 } else if (asoc->fragmented_delivery_inprogress &&
1094 (chk->rec.data.rcv_flags & SCTP_DATA_FIRST_FRAG) == SCTP_DATA_FIRST_FRAG) {
1095 /*
1096 * We are doing a partial delivery and the NEXT
1097 * chunk MUST be either the LAST or MIDDLE
1098 * fragment NOT a FIRST
1099 */
1100 #ifdef SCTP_DEBUG
1101 if (sctp_debug_on & SCTP_DEBUG_INDATA1) {
1102 printf("Gak, Evil plot, it IS a first and fragmented delivery in progress\n");
1103 }
1104 #endif
1105 MGET(oper, M_DONTWAIT, MT_DATA);
1106 if (oper) {
1107 struct sctp_paramhdr *ph;
1108 u_int32_t *ippp;
1109
1110 oper->m_len =
1111 sizeof(struct sctp_paramhdr) +
1112 sizeof(*ippp);
1113 ph = mtod(oper, struct sctp_paramhdr *);
1114 ph->param_type =
1115 htons(SCTP_CAUSE_PROTOCOL_VIOLATION);
1116 ph->param_length = htons(oper->m_len);
1117 ippp = (u_int32_t *)(ph + 1);
1118 *ippp = htonl(0x10000002);
1119 }
1120 sctp_abort_an_association(stcb->sctp_ep, stcb,
1121 SCTP_PEER_FAULTY, oper);
1122 *abort_flag = 1;
1123 } else if (asoc->fragmented_delivery_inprogress) {
1124 /* Here we are ok with a MIDDLE or LAST piece */
1125 if (chk->rec.data.stream_number !=
1126 asoc->str_of_pdapi) {
1127 /* Got to be the right STR No */
1128 #ifdef SCTP_DEBUG
1129 if (sctp_debug_on & SCTP_DEBUG_INDATA1) {
1130 printf("Gak, Evil plot, it IS not same stream number %d vs %d\n",
1131 chk->rec.data.stream_number,
1132 asoc->str_of_pdapi);
1133 }
1134 #endif
1135 MGET(oper, M_DONTWAIT, MT_DATA);
1136 if (oper) {
1137 struct sctp_paramhdr *ph;
1138 u_int32_t *ippp;
1139 oper->m_len =
1140 sizeof(struct sctp_paramhdr) +
1141 sizeof(*ippp);
1142 ph = mtod(oper,
1143 struct sctp_paramhdr *);
1144 ph->param_type =
1145 htons(SCTP_CAUSE_PROTOCOL_VIOLATION);
1146 ph->param_length =
1147 htons(oper->m_len);
1148 ippp = (u_int32_t *)(ph + 1);
1149 *ippp = htonl(0x10000003);
1150 }
1151 sctp_abort_an_association(stcb->sctp_ep,
1152 stcb, SCTP_PEER_FAULTY, oper);
1153 *abort_flag = 1;
1154 } else if ((asoc->fragment_flags & SCTP_DATA_UNORDERED) !=
1155 SCTP_DATA_UNORDERED &&
1156 chk->rec.data.stream_seq !=
1157 asoc->ssn_of_pdapi) {
1158 /* Got to be the right STR Seq */
1159 #ifdef SCTP_DEBUG
1160 if (sctp_debug_on & SCTP_DEBUG_INDATA1) {
1161 printf("Gak, Evil plot, it IS not same stream seq %d vs %d\n",
1162 chk->rec.data.stream_seq,
1163 asoc->ssn_of_pdapi);
1164 }
1165 #endif
1166 MGET(oper, M_DONTWAIT, MT_DATA);
1167 if (oper) {
1168 struct sctp_paramhdr *ph;
1169 u_int32_t *ippp;
1170 oper->m_len =
1171 sizeof(struct sctp_paramhdr) +
1172 sizeof(*ippp);
1173 ph = mtod(oper,
1174 struct sctp_paramhdr *);
1175 ph->param_type =
1176 htons(SCTP_CAUSE_PROTOCOL_VIOLATION);
1177 ph->param_length =
1178 htons(oper->m_len);
1179 ippp = (u_int32_t *)(ph + 1);
1180 *ippp = htonl(0x10000004);
1181 }
1182 sctp_abort_an_association(stcb->sctp_ep,
1183 stcb, SCTP_PEER_FAULTY, oper);
1184 *abort_flag = 1;
1185 }
1186 }
1187 }
1188 return;
1189 }
1190 /* Find its place */
1191 at = TAILQ_FIRST(&asoc->reasmqueue);
1192
1193 /* Grab the top flags */
1194 TAILQ_FOREACH(at, &asoc->reasmqueue, sctp_next) {
1195 if (compare_with_wrap(at->rec.data.TSN_seq,
1196 chk->rec.data.TSN_seq, MAX_TSN)) {
1197 /*
1198 * one in queue is bigger than the new one, insert
1199 * before this one
1200 */
1201 /* A check */
1202 asoc->size_on_reasm_queue += chk->send_size;
1203 asoc->cnt_on_reasm_queue++;
1204 next = at;
1205 TAILQ_INSERT_BEFORE(at, chk, sctp_next);
1206 break;
1207 } else if (at->rec.data.TSN_seq == chk->rec.data.TSN_seq) {
1208 /* Gak, He sent me a duplicate str seq number */
1209 /*
1210 * foo bar, I guess I will just free this new guy,
1211 * should we abort too? FIX ME MAYBE? Or it COULD be
1212 * that the SSN's have wrapped. Maybe I should compare
1213 * to TSN somehow... sigh for now just blow away the
1214 * chunk!
1215 */
1216 sctp_m_freem(chk->data);
1217 chk->data = NULL;
1218 sctp_free_remote_addr(chk->whoTo);
1219 SCTP_ZONE_FREE(sctppcbinfo.ipi_zone_chunk, chk);
1220 sctppcbinfo.ipi_count_chunk--;
1221 if ((int)sctppcbinfo.ipi_count_chunk < 0) {
1222 panic("Chunk count is negative");
1223 }
1224 sctppcbinfo.ipi_gencnt_chunk++;
1225 return;
1226 } else {
1227 prev = at;
1228 if (TAILQ_NEXT(at, sctp_next) == NULL) {
1229 /*
1230 * We are at the end, insert it after this one
1231 */
1232 /* check it first */
1233 asoc->size_on_reasm_queue += chk->send_size;
1234 asoc->cnt_on_reasm_queue++;
1235 TAILQ_INSERT_AFTER(&asoc->reasmqueue, at, chk, sctp_next);
1236 break;
1237 }
1238 }
1239 }
1240 /* Now the audits */
1241 if (prev) {
1242 prev_tsn = chk->rec.data.TSN_seq - 1;
1243 if (prev_tsn == prev->rec.data.TSN_seq) {
1244 /*
1245 * Ok the one I am dropping onto the end
1246 * is the NEXT. A bit of validation here.
1247 */
1248 if ((prev->rec.data.rcv_flags & SCTP_DATA_FRAG_MASK) ==
1249 SCTP_DATA_FIRST_FRAG ||
1250 (prev->rec.data.rcv_flags & SCTP_DATA_FRAG_MASK) ==
1251 SCTP_DATA_MIDDLE_FRAG) {
1252 /*
1253 * Insert chk MUST be a MIDDLE or LAST fragment
1254 */
1255 if ((chk->rec.data.rcv_flags & SCTP_DATA_FRAG_MASK) ==
1256 SCTP_DATA_FIRST_FRAG) {
1257 #ifdef SCTP_DEBUG
1258 if (sctp_debug_on & SCTP_DEBUG_INDATA1) {
1259 printf("Prev check - It can be a midlle or last but not a first\n");
1260 printf("Gak, Evil plot, it's a FIRST!\n");
1261 }
1262 #endif
1263 MGET(oper, M_DONTWAIT, MT_DATA);
1264 if (oper) {
1265 struct sctp_paramhdr *ph;
1266 u_int32_t *ippp;
1267
1268 oper->m_len =
1269 sizeof(struct sctp_paramhdr) +
1270 sizeof(*ippp);
1271 ph = mtod(oper,
1272 struct sctp_paramhdr *);
1273 ph->param_type =
1274 htons(SCTP_CAUSE_PROTOCOL_VIOLATION);
1275 ph->param_length =
1276 htons(oper->m_len);
1277
1278 ippp = (u_int32_t *)(ph + 1);
1279 *ippp = htonl(0x10000005);
1280 }
1281 sctp_abort_an_association(stcb->sctp_ep,
1282 stcb, SCTP_PEER_FAULTY, oper);
1283 *abort_flag = 1;
1284 return;
1285 }
1286 if (chk->rec.data.stream_number !=
1287 prev->rec.data.stream_number) {
1288 /*
1289 * Huh, need the correct STR here, they
1290 * must be the same.
1291 */
1292 #ifdef SCTP_DEBUG
1293 if (sctp_debug_on & SCTP_DEBUG_INDATA1) {
1294 printf("Prev check - Gak, Evil plot, ssn:%d not the same as at:%d\n",
1295 chk->rec.data.stream_number,
1296 prev->rec.data.stream_number);
1297 }
1298 #endif
1299 MGET(oper, M_DONTWAIT, MT_DATA);
1300 if (oper) {
1301 struct sctp_paramhdr *ph;
1302 u_int32_t *ippp;
1303
1304 oper->m_len =
1305 sizeof(struct sctp_paramhdr) +
1306 sizeof(*ippp);
1307 ph = mtod(oper,
1308 struct sctp_paramhdr *);
1309 ph->param_type =
1310 htons(SCTP_CAUSE_PROTOCOL_VIOLATION);
1311 ph->param_length =
1312 htons(oper->m_len);
1313 ippp = (u_int32_t *)(ph + 1);
1314 *ippp = htonl(0x10000006);
1315 }
1316
1317 sctp_abort_an_association(stcb->sctp_ep,
1318 stcb, SCTP_PEER_FAULTY, oper);
1319
1320 *abort_flag = 1;
1321 return;
1322 }
1323 if ((prev->rec.data.rcv_flags & SCTP_DATA_UNORDERED) == 0 &&
1324 chk->rec.data.stream_seq !=
1325 prev->rec.data.stream_seq) {
1326 /*
1327 * Huh, need the correct STR here, they
1328 * must be the same.
1329 */
1330 #ifdef SCTP_DEBUG
1331 if (sctp_debug_on & SCTP_DEBUG_INDATA1) {
1332 printf("Prev check - Gak, Evil plot, sseq:%d not the same as at:%d\n",
1333 chk->rec.data.stream_seq,
1334 prev->rec.data.stream_seq);
1335 }
1336 #endif
1337 MGET(oper, M_DONTWAIT, MT_DATA);
1338 if (oper) {
1339 struct sctp_paramhdr *ph;
1340 u_int32_t *ippp;
1341
1342 oper->m_len =
1343 sizeof(struct sctp_paramhdr) +
1344 sizeof(*ippp);
1345 ph = mtod(oper,
1346 struct sctp_paramhdr *);
1347 ph->param_type =
1348 htons(SCTP_CAUSE_PROTOCOL_VIOLATION);
1349 ph->param_length =
1350 htons(oper->m_len);
1351 ippp = (u_int32_t *)(ph + 1);
1352 *ippp = htonl(0x10000007);
1353 }
1354
1355 sctp_abort_an_association(stcb->sctp_ep,
1356 stcb, SCTP_PEER_FAULTY, oper);
1357
1358 *abort_flag = 1;
1359 return;
1360 }
1361 } else if ((prev->rec.data.rcv_flags & SCTP_DATA_FRAG_MASK) ==
1362 SCTP_DATA_LAST_FRAG) {
1363 /* Insert chk MUST be a FIRST */
1364 if ((chk->rec.data.rcv_flags & SCTP_DATA_FRAG_MASK) !=
1365 SCTP_DATA_FIRST_FRAG) {
1366 #ifdef SCTP_DEBUG
1367 if (sctp_debug_on & SCTP_DEBUG_INDATA1) {
1368 printf("Prev check - Gak, evil plot, its not FIRST and it must be!\n");
1369 }
1370 #endif
1371 MGET(oper, M_DONTWAIT, MT_DATA);
1372 if (oper) {
1373 struct sctp_paramhdr *ph;
1374 u_int32_t *ippp;
1375
1376 oper->m_len =
1377 sizeof(struct sctp_paramhdr) +
1378 sizeof(*ippp);
1379 ph = mtod(oper,
1380 struct sctp_paramhdr *);
1381 ph->param_type =
1382 htons(SCTP_CAUSE_PROTOCOL_VIOLATION);
1383 ph->param_length =
1384 htons(oper->m_len);
1385 ippp = (u_int32_t *)(ph + 1);
1386 *ippp = htonl(0x10000008);
1387 }
1388
1389 sctp_abort_an_association(stcb->sctp_ep,
1390 stcb, SCTP_PEER_FAULTY, oper);
1391
1392 *abort_flag = 1;
1393 return;
1394 }
1395 }
1396 }
1397 }
1398
1399 if (next) {
1400 post_tsn = chk->rec.data.TSN_seq + 1;
1401 if (post_tsn == next->rec.data.TSN_seq) {
1402 /*
1403 * Ok the one I am inserting ahead of
1404 * is my NEXT one. A bit of validation here.
1405 */
1406 if (next->rec.data.rcv_flags & SCTP_DATA_FIRST_FRAG) {
1407 /* Insert chk MUST be a last fragment */
1408 if ((chk->rec.data.rcv_flags&SCTP_DATA_FRAG_MASK)
1409 != SCTP_DATA_LAST_FRAG) {
1410 #ifdef SCTP_DEBUG
1411 if (sctp_debug_on & SCTP_DEBUG_INDATA1) {
1412 printf("Next chk - Next is FIRST, we must be LAST\n");
1413 printf("Gak, Evil plot, its not a last!\n");
1414 }
1415 #endif
1416 MGET(oper, M_DONTWAIT, MT_DATA);
1417 if (oper) {
1418 struct sctp_paramhdr *ph;
1419 u_int32_t *ippp;
1420
1421 oper->m_len =
1422 sizeof(struct sctp_paramhdr) +
1423 sizeof(*ippp);
1424 ph = mtod(oper,
1425 struct sctp_paramhdr *);
1426 ph->param_type =
1427 htons(SCTP_CAUSE_PROTOCOL_VIOLATION);
1428 ph->param_length =
1429 htons(oper->m_len);
1430 ippp = (u_int32_t *)(ph + 1);
1431 *ippp = htonl(0x10000009);
1432 }
1433
1434 sctp_abort_an_association(stcb->sctp_ep,
1435 stcb, SCTP_PEER_FAULTY, oper);
1436
1437 *abort_flag = 1;
1438 return;
1439 }
1440 } else if ((next->rec.data.rcv_flags & SCTP_DATA_FRAG_MASK) ==
1441 SCTP_DATA_MIDDLE_FRAG ||
1442 (next->rec.data.rcv_flags&SCTP_DATA_FRAG_MASK) ==
1443 SCTP_DATA_LAST_FRAG) {
1444 /* Insert chk CAN be MIDDLE or FIRST NOT LAST */
1445 if ((chk->rec.data.rcv_flags & SCTP_DATA_FRAG_MASK) ==
1446 SCTP_DATA_LAST_FRAG) {
1447 #ifdef SCTP_DEBUG
1448 if (sctp_debug_on & SCTP_DEBUG_INDATA1) {
1449 printf("Next chk - Next is a MIDDLE/LAST\n");
1450 printf("Gak, Evil plot, new prev chunk is a LAST\n");
1451 }
1452 #endif
1453 MGET(oper, M_DONTWAIT, MT_DATA);
1454 if (oper) {
1455 struct sctp_paramhdr *ph;
1456 u_int32_t *ippp;
1457
1458 oper->m_len =
1459 sizeof(struct sctp_paramhdr) +
1460 sizeof(*ippp);
1461 ph = mtod(oper,
1462 struct sctp_paramhdr *);
1463 ph->param_type =
1464 htons(SCTP_CAUSE_PROTOCOL_VIOLATION);
1465 ph->param_length =
1466 htons(oper->m_len);
1467 ippp = (u_int32_t *)(ph + 1);
1468 *ippp = htonl(0x1000000a);
1469 }
1470 sctp_abort_an_association(stcb->sctp_ep,
1471 stcb, SCTP_PEER_FAULTY, oper);
1472
1473 *abort_flag = 1;
1474 return;
1475 }
1476 if (chk->rec.data.stream_number !=
1477 next->rec.data.stream_number) {
1478 /*
1479 * Huh, need the correct STR here, they
1480 * must be the same.
1481 */
1482 #ifdef SCTP_DEBUG
1483 if (sctp_debug_on & SCTP_DEBUG_INDATA1) {
1484 printf("Next chk - Gak, Evil plot, ssn:%d not the same as at:%d\n",
1485 chk->rec.data.stream_number,
1486 next->rec.data.stream_number);
1487 }
1488 #endif
1489 MGET(oper, M_DONTWAIT, MT_DATA);
1490 if (oper) {
1491 struct sctp_paramhdr *ph;
1492 u_int32_t *ippp;
1493
1494 oper->m_len =
1495 sizeof(struct sctp_paramhdr) +
1496 sizeof(*ippp);
1497 ph = mtod(oper,
1498 struct sctp_paramhdr *);
1499 ph->param_type =
1500 htons(SCTP_CAUSE_PROTOCOL_VIOLATION);
1501 ph->param_length =
1502 htons(oper->m_len);
1503 ippp = (u_int32_t *)(ph + 1);
1504 *ippp = htonl(0x1000000b);
1505 }
1506
1507 sctp_abort_an_association(stcb->sctp_ep,
1508 stcb, SCTP_PEER_FAULTY, oper);
1509
1510 *abort_flag = 1;
1511 return;
1512 }
1513 if ((next->rec.data.rcv_flags & SCTP_DATA_UNORDERED) == 0 &&
1514 chk->rec.data.stream_seq !=
1515 next->rec.data.stream_seq) {
1516 /*
1517 * Huh, need the correct STR here, they
1518 * must be the same.
1519 */
1520 #ifdef SCTP_DEBUG
1521 if (sctp_debug_on & SCTP_DEBUG_INDATA1) {
1522 printf("Next chk - Gak, Evil plot, sseq:%d not the same as at:%d\n",
1523 chk->rec.data.stream_seq,
1524 next->rec.data.stream_seq);
1525 }
1526 #endif
1527 MGET(oper, M_DONTWAIT, MT_DATA);
1528 if (oper) {
1529 struct sctp_paramhdr *ph;
1530 u_int32_t *ippp;
1531
1532 oper->m_len =
1533 sizeof(struct sctp_paramhdr) +
1534 sizeof(*ippp);
1535 ph = mtod(oper,
1536 struct sctp_paramhdr *);
1537 ph->param_type =
1538 htons(SCTP_CAUSE_PROTOCOL_VIOLATION);
1539 ph->param_length =
1540 htons(oper->m_len);
1541 ippp = (u_int32_t *)(ph + 1);
1542 *ippp = htonl(0x1000000c);
1543 }
1544
1545 sctp_abort_an_association(stcb->sctp_ep,
1546 stcb, SCTP_PEER_FAULTY, oper);
1547
1548 *abort_flag = 1;
1549 return;
1550
1551 }
1552 }
1553 }
1554 }
1555 /*
1556 * now that we have all in there place we must check a number of
1557 * things to see if we can send data to the ULP.
1558 */
1559 /* we need to do some delivery, if we can */
1560 chk = TAILQ_FIRST(&asoc->reasmqueue);
1561 if (chk == NULL) {
1562 /* Huh? */
1563 asoc->size_on_reasm_queue = 0;
1564 asoc->cnt_on_reasm_queue = 0;
1565 return;
1566 }
1567 if (asoc->fragmented_delivery_inprogress == 0) {
1568 nxt_todel =
1569 asoc->strmin[chk->rec.data.stream_number].last_sequence_delivered + 1;
1570 if ((chk->rec.data.rcv_flags & SCTP_DATA_FIRST_FRAG) &&
1571 (nxt_todel == chk->rec.data.stream_seq ||
1572 (chk->rec.data.rcv_flags & SCTP_DATA_UNORDERED))) {
1573 /*
1574 * Yep the first one is here and its
1575 * ok to deliver but should we?
1576 */
1577 if (TAILQ_EMPTY(&asoc->delivery_queue) &&
1578 (sctp_is_all_msg_on_reasm(asoc, &tsize) ||
1579 (asoc->size_on_reasm_queue >=
1580 (stcb->sctp_socket->so_rcv.sb_hiwat >> 2) &&
1581 tsize))) {
1582 /*
1583 * Yes, we setup to
1584 * start reception, by backing down the TSN
1585 * just in case we can't deliver. If we
1586 */
1587 asoc->fragmented_delivery_inprogress = 1;
1588 asoc->tsn_last_delivered =
1589 chk->rec.data.TSN_seq - 1;
1590 asoc->str_of_pdapi =
1591 chk->rec.data.stream_number;
1592 asoc->ssn_of_pdapi = chk->rec.data.stream_seq;
1593 asoc->fragment_flags = chk->rec.data.rcv_flags;
1594 sctp_service_reassembly(stcb, asoc, 0);
1595 }
1596 }
1597 } else {
1598 sctp_service_reassembly(stcb, asoc, 0);
1599 }
1600 }
1601
1602 /*
1603 * This is an unfortunate routine. It checks to make sure a evil guy is not
1604 * stuffing us full of bad packet fragments. A broken peer could also do this
1605 * but this is doubtful. It is to bad I must worry about evil crackers sigh
1606 * :< more cycles.
1607 */
1608 static int
1609 sctp_does_chk_belong_to_reasm(struct sctp_association *asoc,
1610 struct sctp_tmit_chunk *chk)
1611 {
1612 struct sctp_tmit_chunk *at;
1613 u_int32_t tsn_est;
1614
1615 TAILQ_FOREACH(at, &asoc->reasmqueue, sctp_next) {
1616 if (compare_with_wrap(chk->rec.data.TSN_seq,
1617 at->rec.data.TSN_seq, MAX_TSN)) {
1618 /* is it one bigger? */
1619 tsn_est = at->rec.data.TSN_seq + 1;
1620 if (tsn_est == chk->rec.data.TSN_seq) {
1621 /* yep. It better be a last then*/
1622 if ((at->rec.data.rcv_flags & SCTP_DATA_FRAG_MASK) !=
1623 SCTP_DATA_LAST_FRAG) {
1624 /*
1625 * Ok this guy belongs next to a guy
1626 * that is NOT last, it should be a
1627 * middle/last, not a complete chunk.
1628 */
1629 return (1);
1630 } else {
1631 /*
1632 * This guy is ok since its a LAST and
1633 * the new chunk is a fully self-
1634 * contained one.
1635 */
1636 return (0);
1637 }
1638 }
1639 } else if (chk->rec.data.TSN_seq == at->rec.data.TSN_seq) {
1640 /* Software error since I have a dup? */
1641 return (1);
1642 } else {
1643 /*
1644 * Ok, 'at' is larger than new chunk but does it
1645 * need to be right before it.
1646 */
1647 tsn_est = chk->rec.data.TSN_seq + 1;
1648 if (tsn_est == at->rec.data.TSN_seq) {
1649 /* Yep, It better be a first */
1650 if ((at->rec.data.rcv_flags & SCTP_DATA_FRAG_MASK) !=
1651 SCTP_DATA_FIRST_FRAG) {
1652 return (1);
1653 } else {
1654 return (0);
1655 }
1656 }
1657 }
1658 }
1659 return (0);
1660 }
1661
1662 extern unsigned int sctp_max_chunks_on_queue;
1663 static int
1664 sctp_process_a_data_chunk(struct sctp_tcb *stcb, struct sctp_association *asoc,
1665 struct mbuf **m, int offset, struct sctp_data_chunk *ch, int chk_length,
1666 struct sctp_nets *net, u_int32_t *high_tsn, int *abort_flag,
1667 int *break_flag, int last_chunk)
1668 {
1669 /* Process a data chunk */
1670 /* struct sctp_tmit_chunk *chk;*/
1671 struct sctp_tmit_chunk *chk;
1672 u_int32_t tsn, gap;
1673 struct mbuf *dmbuf;
1674 int the_len;
1675 u_int16_t strmno, strmseq;
1676 struct mbuf *oper;
1677
1678 chk = NULL;
1679 tsn = ntohl(ch->dp.tsn);
1680 #ifdef SCTP_MAP_LOGGING
1681 sctp_log_map(0, tsn, asoc->cumulative_tsn, SCTP_MAP_PREPARE_SLIDE);
1682 #endif
1683 if (compare_with_wrap(asoc->cumulative_tsn, tsn, MAX_TSN) ||
1684 asoc->cumulative_tsn == tsn) {
1685 /* It is a duplicate */
1686 sctp_pegs[SCTP_DUPTSN_RECVD]++;
1687 if (asoc->numduptsns < SCTP_MAX_DUP_TSNS) {
1688 /* Record a dup for the next outbound sack */
1689 asoc->dup_tsns[asoc->numduptsns] = tsn;
1690 asoc->numduptsns++;
1691 }
1692 return (0);
1693 }
1694 /* Calculate the number of TSN's between the base and this TSN */
1695 if (tsn >= asoc->mapping_array_base_tsn) {
1696 gap = tsn - asoc->mapping_array_base_tsn;
1697 } else {
1698 gap = (MAX_TSN - asoc->mapping_array_base_tsn) + tsn + 1;
1699 }
1700 if (gap >= (SCTP_MAPPING_ARRAY << 3)) {
1701 /* Can't hold the bit in the mapping at max array, toss it */
1702 return (0);
1703 }
1704 if (gap >= (uint32_t)(asoc->mapping_array_size << 3)) {
1705 if (sctp_expand_mapping_array(asoc)) {
1706 /* Can't expand, drop it */
1707 return (0);
1708 }
1709 }
1710 if (compare_with_wrap(tsn, *high_tsn, MAX_TSN)) {
1711 *high_tsn = tsn;
1712 }
1713 /* See if we have received this one already */
1714 if (SCTP_IS_TSN_PRESENT(asoc->mapping_array, gap)) {
1715 sctp_pegs[SCTP_DUPTSN_RECVD]++;
1716 if (asoc->numduptsns < SCTP_MAX_DUP_TSNS) {
1717 /* Record a dup for the next outbound sack */
1718 asoc->dup_tsns[asoc->numduptsns] = tsn;
1719 asoc->numduptsns++;
1720 }
1721 if (!callout_pending(&asoc->dack_timer.timer)) {
1722 /*
1723 * By starting the timer we assure that we
1724 * WILL sack at the end of the packet
1725 * when sctp_sack_check gets called.
1726 */
1727 sctp_timer_start(SCTP_TIMER_TYPE_RECV, stcb->sctp_ep,
1728 stcb, NULL);
1729 }
1730 return (0);
1731 }
1732 /*
1733 * Check to see about the GONE flag, duplicates would cause
1734 * a sack to be sent up above
1735 */
1736 if (stcb && (stcb->sctp_ep->sctp_flags & SCTP_PCB_FLAGS_SOCKET_GONE)) {
1737 /*
1738 * wait a minute, this guy is gone, there is no
1739 * longer a receiver. Send peer an ABORT!
1740 */
1741 struct mbuf *op_err;
1742 op_err = sctp_generate_invmanparam(SCTP_CAUSE_OUT_OF_RESC);
1743 sctp_abort_an_association(stcb->sctp_ep, stcb, 0, op_err);
1744 *abort_flag = 1;
1745 return (0);
1746 }
1747 /*
1748 * Now before going further we see if there is room. If NOT then
1749 * we MAY let one through only IF this TSN is the one we are
1750 * waiting for on a partial delivery API.
1751 */
1752
1753 /* now do the tests */
1754 if (((asoc->cnt_on_all_streams +
1755 asoc->cnt_on_delivery_queue +
1756 asoc->cnt_on_reasm_queue +
1757 asoc->cnt_msg_on_sb) > sctp_max_chunks_on_queue) ||
1758 (((int)asoc->my_rwnd) <= 0)) {
1759 /*
1760 * When we have NO room in the rwnd we check
1761 * to make sure the reader is doing its job...
1762 */
1763 if (stcb->sctp_socket->so_rcv.sb_cc) {
1764 /* some to read, wake-up */
1765 sctp_sorwakeup(stcb->sctp_ep, stcb->sctp_socket);
1766 }
1767 /* now is it in the mapping array of what we have accepted? */
1768 if (compare_with_wrap(tsn,
1769 asoc->highest_tsn_inside_map, MAX_TSN)) {
1770
1771 /* Nope not in the valid range dump it */
1772 #ifdef SCTP_DEBUG
1773 if (sctp_debug_on & SCTP_DEBUG_INDATA1) {
1774 printf("My rwnd overrun1:tsn:%lx rwnd %lu sbspace:%ld delq:%d!\n",
1775 (u_long)tsn, (u_long)asoc->my_rwnd,
1776 sctp_sbspace(&stcb->sctp_socket->so_rcv),
1777 stcb->asoc.cnt_on_delivery_queue);
1778 }
1779 #endif
1780 sctp_set_rwnd(stcb, asoc);
1781 if ((asoc->cnt_on_all_streams +
1782 asoc->cnt_on_delivery_queue +
1783 asoc->cnt_on_reasm_queue +
1784 asoc->cnt_msg_on_sb) > sctp_max_chunks_on_queue) {
1785 sctp_pegs[SCTP_MSGC_DROP]++;
1786 } else {
1787 sctp_pegs[SCTP_RWND_DROPS]++;
1788 }
1789 *break_flag = 1;
1790 return (0);
1791 }
1792 }
1793 strmno = ntohs(ch->dp.stream_id);
1794 if (strmno >= asoc->streamincnt) {
1795 struct sctp_paramhdr *phdr;
1796 struct mbuf *mb;
1797
1798 MGETHDR(mb, M_DONTWAIT, MT_DATA);
1799 if (mb != NULL) {
1800 /* add some space up front so prepend will work well */
1801 mb->m_data += sizeof(struct sctp_chunkhdr);
1802 phdr = mtod(mb, struct sctp_paramhdr *);
1803 /*
1804 * Error causes are just param's and this one has
1805 * two back to back phdr, one with the error type
1806 * and size, the other with the streamid and a rsvd
1807 */
1808 mb->m_pkthdr.len = mb->m_len =
1809 (sizeof(struct sctp_paramhdr) * 2);
1810 phdr->param_type = htons(SCTP_CAUSE_INV_STRM);
1811 phdr->param_length =
1812 htons(sizeof(struct sctp_paramhdr) * 2);
1813 phdr++;
1814 /* We insert the stream in the type field */
1815 phdr->param_type = ch->dp.stream_id;
1816 /* And set the length to 0 for the rsvd field */
1817 phdr->param_length = 0;
1818 sctp_queue_op_err(stcb, mb);
1819 }
1820 sctp_pegs[SCTP_BAD_STRMNO]++;
1821 return (0);
1822 }
1823 /*
1824 * Before we continue lets validate that we are not
1825 * being fooled by an evil attacker. We can only
1826 * have 4k chunks based on our TSN spread allowed
1827 * by the mapping array 512 * 8 bits, so there is
1828 * no way our stream sequence numbers could have wrapped.
1829 * We of course only validate the FIRST fragment so the
1830 * bit must be set.
1831 */
1832 strmseq = ntohs(ch->dp.stream_sequence);
1833 if ((ch->ch.chunk_flags & SCTP_DATA_FIRST_FRAG) &&
1834 (ch->ch.chunk_flags & SCTP_DATA_UNORDERED) == 0 &&
1835 (compare_with_wrap(asoc->strmin[strmno].last_sequence_delivered,
1836 strmseq, MAX_SEQ) ||
1837 asoc->strmin[strmno].last_sequence_delivered == strmseq)) {
1838 /* The incoming sseq is behind where we last delivered? */
1839 #ifdef SCTP_DEBUG
1840 if (sctp_debug_on & SCTP_DEBUG_INDATA1) {
1841 printf("EVIL/Broken-Dup S-SEQ:%d delivered:%d from peer, Abort!\n",
1842 strmseq,
1843 asoc->strmin[strmno].last_sequence_delivered);
1844 }
1845 #endif
1846 /*
1847 * throw it in the stream so it gets cleaned up in
1848 * association destruction
1849 */
1850 MGET(oper, M_DONTWAIT, MT_DATA);
1851 if (oper) {
1852 struct sctp_paramhdr *ph;
1853 u_int32_t *ippp;
1854
1855 oper->m_len = sizeof(struct sctp_paramhdr) +
1856 sizeof(*ippp);
1857 ph = mtod(oper, struct sctp_paramhdr *);
1858 ph->param_type = htons(SCTP_CAUSE_PROTOCOL_VIOLATION);
1859 ph->param_length = htons(oper->m_len);
1860 ippp = (u_int32_t *)(ph + 1);
1861 *ippp = htonl(0x20000001);
1862 }
1863 sctp_abort_an_association(stcb->sctp_ep, stcb, SCTP_PEER_FAULTY,
1864 oper);
1865 sctp_pegs[SCTP_BAD_SSN_WRAP]++;
1866 *abort_flag = 1;
1867 return (0);
1868 }
1869
1870 the_len = (chk_length-sizeof(struct sctp_data_chunk));
1871 if (last_chunk == 0) {
1872 dmbuf = sctp_m_copym(*m,
1873 (offset + sizeof(struct sctp_data_chunk)),
1874 the_len, M_DONTWAIT);
1875 } else {
1876 /* We can steal the last chunk */
1877 dmbuf = *m;
1878 /* lop off the top part */
1879 m_adj(dmbuf, (offset + sizeof(struct sctp_data_chunk)));
1880 if (dmbuf->m_pkthdr.len > the_len) {
1881 /* Trim the end round bytes off too */
1882 m_adj(dmbuf, -(dmbuf->m_pkthdr.len-the_len));
1883 }
1884 sctp_pegs[SCTP_NO_COPY_IN]++;
1885 }
1886 if (dmbuf == NULL) {
1887 sctp_pegs[SCTP_DROP_NOMEMORY]++;
1888 return (0);
1889 }
1890 if ((ch->ch.chunk_flags & SCTP_DATA_NOT_FRAG) == SCTP_DATA_NOT_FRAG &&
1891 asoc->fragmented_delivery_inprogress == 0 &&
1892 TAILQ_EMPTY(&asoc->delivery_queue) &&
1893 ((ch->ch.chunk_flags & SCTP_DATA_UNORDERED) ||
1894 ((asoc->strmin[strmno].last_sequence_delivered + 1) == strmseq &&
1895 TAILQ_EMPTY(&asoc->strmin[strmno].inqueue))) &&
1896 ((long)(stcb->sctp_socket->so_rcv.sb_hiwat -
1897 stcb->sctp_socket->so_rcv.sb_cc) >= (long)the_len)) {
1898 /* Candidate for express delivery */
1899 /*
1900 * Its not fragmented,
1901 * No PD-API is up,
1902 * Nothing in the delivery queue,
1903 * Its un-ordered OR ordered and the next to deliver AND
1904 * nothing else is stuck on the stream queue,
1905 * And there is room for it in the socket buffer.
1906 * Lets just stuff it up the buffer....
1907 */
1908
1909 struct mbuf *control, *mmm;
1910 struct sockaddr_in6 sin6;
1911 struct sockaddr_in6 lsa6;
1912 const struct sockaddr *to;
1913
1914 /* It would be nice to avoid this copy if we could :< */
1915 control = sctp_build_ctl_nchunk(stcb, tsn,
1916 ch->dp.protocol_id, 0, strmno, strmseq,
1917 ch->ch.chunk_flags);
1918 /* XXX need to append PKTHDR to the socket buffer first */
1919
1920 if ((dmbuf->m_flags & M_PKTHDR) == 0) {
1921 struct mbuf *tmp;
1922 MGETHDR(tmp, M_DONTWAIT, MT_DATA);
1923 if (tmp == NULL) {
1924
1925 /* no room! */
1926 if (control) {
1927 sctp_m_freem(control);
1928 stcb->asoc.my_rwnd_control_len -=
1929 CMSG_LEN(sizeof(struct sctp_sndrcvinfo));
1930 }
1931
1932 goto failed_express_del;
1933 }
1934 tmp->m_pkthdr.len = the_len;
1935 tmp->m_len = 0;
1936 tmp->m_next = dmbuf;
1937 dmbuf = tmp;
1938 }
1939 to = rtcache_getdst(&net->ro);
1940 if ((stcb->sctp_ep->sctp_flags & SCTP_PCB_FLAGS_NEEDS_MAPPED_V4) &&
1941 to->sa_family == AF_INET) {
1942 const struct sockaddr_in *sin;
1943
1944 sin = satocsin(to);
1945 in6_sin_2_v4mapsin6(sin, &sin6);
1946 to = (struct sockaddr *)&sin6;
1947 }
1948
1949 /* check and strip embedded scope junk */
1950 to = (const struct sockaddr *)sctp_recover_scope((const struct sockaddr_in6 *)to,
1951 &lsa6);
1952 if (((const struct sockaddr_in *)to)->sin_port == 0) {
1953 printf("Huh c, port is %d not net:%p %d?\n",
1954 ((const struct sockaddr_in *)to)->sin_port,
1955 net,
1956 (int)(ntohs(stcb->rport)));
1957 /*((struct sockaddr_in *)to)->sin_port = stcb->rport;*/
1958 /* XXX */
1959 }
1960
1961 mmm = dmbuf;
1962 /* Mark the EOR */
1963 while (mmm->m_next != NULL) {
1964 mmm = mmm->m_next;
1965 }
1966 mmm->m_flags |= M_EOR;
1967 if (compare_with_wrap(tsn, asoc->highest_tsn_inside_map, MAX_TSN)) {
1968 /* we have a new high score */
1969 asoc->highest_tsn_inside_map = tsn;
1970 #ifdef SCTP_MAP_LOGGING
1971 sctp_log_map(0, 1, asoc->highest_tsn_inside_map, SCTP_MAP_SLIDE_RESULT);
1972 #endif
1973 }
1974 SCTP_TCB_UNLOCK(stcb);
1975 SCTP_INP_WLOCK(stcb->sctp_ep);
1976 SCTP_TCB_LOCK(stcb);
1977 if (!sbappendaddr_nocheck(&stcb->sctp_socket->so_rcv, to, dmbuf,
1978 control, stcb->asoc.my_vtag, stcb->sctp_ep)) {
1979 if (control) {
1980 sctp_m_freem(control);
1981 stcb->asoc.my_rwnd_control_len -=
1982 CMSG_LEN(sizeof(struct sctp_sndrcvinfo));
1983 }
1984 sctp_m_freem(dmbuf);
1985 goto failed_express_del;
1986 }
1987 if ((stcb->sctp_ep->sctp_flags & SCTP_PCB_FLAGS_IN_TCPPOOL) == 0) {
1988 if (sctp_add_to_socket_q(stcb->sctp_ep, stcb)) {
1989 stcb->asoc.my_rwnd_control_len += sizeof(struct mbuf);
1990 }
1991 } else {
1992 stcb->asoc.my_rwnd_control_len += sizeof(struct mbuf);
1993 }
1994 SCTP_INP_WUNLOCK(stcb->sctp_ep);
1995 sctp_sorwakeup(stcb->sctp_ep, stcb->sctp_socket);
1996 if ((ch->ch.chunk_flags & SCTP_DATA_UNORDERED) == 0) {
1997
1998 /* for ordered, bump what we delivered */
1999 asoc->strmin[strmno].last_sequence_delivered++;
2000 }
2001 sctp_pegs[SCTP_EXPRESS_ROUTE]++;
2002 #ifdef SCTP_STR_LOGGING
2003 sctp_log_strm_del_alt(tsn, strmseq,
2004 SCTP_STR_LOG_FROM_EXPRS_DEL);
2005 #endif
2006 #ifdef SCTP_DEBUG
2007 if (sctp_debug_on & SCTP_DEBUG_INDATA1) {
2008 printf("Express Delivery succeeds\n");
2009 }
2010 #endif
2011 goto finish_express_del;
2012 }
2013
2014 failed_express_del:
2015 /* If we reach here this is a new chunk */
2016 chk = (struct sctp_tmit_chunk *)SCTP_ZONE_GET(sctppcbinfo.ipi_zone_chunk);
2017 if (chk == NULL) {
2018 /* No memory so we drop the chunk */
2019 sctp_pegs[SCTP_DROP_NOMEMORY]++;
2020 if (last_chunk == 0) {
2021 /* we copied it, free the copy */
2022 sctp_m_freem(dmbuf);
2023 }
2024 return (0);
2025 }
2026 sctppcbinfo.ipi_count_chunk++;
2027 sctppcbinfo.ipi_gencnt_chunk++;
2028 chk->rec.data.TSN_seq = tsn;
2029 chk->rec.data.stream_seq = strmseq;
2030 chk->rec.data.stream_number = strmno;
2031 chk->rec.data.payloadtype = ch->dp.protocol_id;
2032 chk->rec.data.context = 0;
2033 chk->rec.data.doing_fast_retransmit = 0;
2034 chk->rec.data.rcv_flags = ch->ch.chunk_flags;
2035 chk->asoc = asoc;
2036 chk->send_size = the_len;
2037 chk->whoTo = net;
2038 net->ref_count++;
2039 chk->data = dmbuf;
2040
2041
2042 /* Mark it as received */
2043 /* Now queue it where it belongs */
2044 if ((chk->rec.data.rcv_flags & SCTP_DATA_NOT_FRAG) ==
2045 SCTP_DATA_NOT_FRAG) {
2046 /* First a sanity check */
2047 if (asoc->fragmented_delivery_inprogress) {
2048 /*
2049 * Ok, we have a fragmented delivery in progress
2050 * if this chunk is next to deliver OR belongs in
2051 * our view to the reassembly, the peer is evil
2052 * or broken.
2053 */
2054 u_int32_t estimate_tsn;
2055 estimate_tsn = asoc->tsn_last_delivered + 1;
2056 if (TAILQ_EMPTY(&asoc->reasmqueue) &&
2057 (estimate_tsn == chk->rec.data.TSN_seq)) {
2058 /* Evil/Broke peer */
2059 MGET(oper, M_DONTWAIT, MT_DATA);
2060 if (oper) {
2061 struct sctp_paramhdr *ph;
2062 u_int32_t *ippp;
2063
2064 oper->m_len =
2065 sizeof(struct sctp_paramhdr) +
2066 sizeof(*ippp);
2067 ph = mtod(oper, struct sctp_paramhdr *);
2068 ph->param_type =
2069 htons(SCTP_CAUSE_PROTOCOL_VIOLATION);
2070 ph->param_length = htons(oper->m_len);
2071 ippp = (u_int32_t *)(ph + 1);
2072 *ippp = htonl(0x20000002);
2073 }
2074 sctp_abort_an_association(stcb->sctp_ep, stcb,
2075 SCTP_PEER_FAULTY, oper);
2076
2077 *abort_flag = 1;
2078 sctp_pegs[SCTP_DROP_FRAG]++;
2079 return (0);
2080 } else {
2081 if (sctp_does_chk_belong_to_reasm(asoc, chk)) {
2082 MGET(oper, M_DONTWAIT, MT_DATA);
2083 if (oper) {
2084 struct sctp_paramhdr *ph;
2085 u_int32_t *ippp;
2086
2087 oper->m_len =
2088 sizeof(struct sctp_paramhdr) +
2089 sizeof(*ippp);
2090 ph = mtod(oper,
2091 struct sctp_paramhdr *);
2092 ph->param_type =
2093 htons(SCTP_CAUSE_PROTOCOL_VIOLATION);
2094 ph->param_length =
2095 htons(oper->m_len);
2096 ippp = (u_int32_t *)(ph + 1);
2097 *ippp = htonl(0x20000003);
2098 }
2099 sctp_abort_an_association(stcb->sctp_ep,
2100 stcb, SCTP_PEER_FAULTY, oper);
2101
2102 *abort_flag = 1;
2103 sctp_pegs[SCTP_DROP_FRAG]++;
2104 return (0);
2105 }
2106 }
2107 } else {
2108 if (!TAILQ_EMPTY(&asoc->reasmqueue)) {
2109 /*
2110 * Reassembly queue is NOT empty
2111 * validate that this chk does not need to
2112 * be in reasembly queue. If it does then
2113 * our peer is broken or evil.
2114 */
2115 if (sctp_does_chk_belong_to_reasm(asoc, chk)) {
2116 MGET(oper, M_DONTWAIT, MT_DATA);
2117 if (oper) {
2118 struct sctp_paramhdr *ph;
2119 u_int32_t *ippp;
2120
2121 oper->m_len =
2122 sizeof(struct sctp_paramhdr) +
2123 sizeof(*ippp);
2124 ph = mtod(oper,
2125 struct sctp_paramhdr *);
2126 ph->param_type =
2127 htons(SCTP_CAUSE_PROTOCOL_VIOLATION);
2128 ph->param_length =
2129 htons(oper->m_len);
2130 ippp = (u_int32_t *)(ph + 1);
2131 *ippp = htonl(0x20000004);
2132 }
2133 sctp_abort_an_association(stcb->sctp_ep,
2134 stcb, SCTP_PEER_FAULTY, oper);
2135
2136 *abort_flag = 1;
2137 sctp_pegs[SCTP_DROP_FRAG]++;
2138 return (0);
2139 }
2140 }
2141 }
2142 if (chk->rec.data.rcv_flags & SCTP_DATA_UNORDERED) {
2143 /* queue directly into socket buffer */
2144 sctp_deliver_data(stcb, asoc, chk, 0);
2145 sctp_sorwakeup(stcb->sctp_ep, stcb->sctp_socket);
2146 } else {
2147 /* Special check for when streams are resetting.
2148 * We could be more smart about this and check the
2149 * actual stream to see if it is not being reset.. that
2150 * way we would not create a HOLB when amongst streams
2151 * being reset and those not being reset.
2152 *
2153 * We take complete messages that have a stream reset
2154 * intervening (aka the TSN is after where our cum-ack needs
2155 * to be) off and put them on a pending_reply_queue. The
2156 * reassembly ones we do not have to worry about since
2157 * they are all sorted and processed by TSN order. It
2158 * is only the singletons I must worry about.
2159 */
2160 if ((asoc->pending_reply) &&
2161 ((compare_with_wrap(tsn, ntohl(asoc->pending_reply->reset_at_tsn), MAX_TSN)) ||
2162 (tsn == ntohl(asoc->pending_reply->reset_at_tsn)))
2163 ) {
2164 /* yep its past where we need to reset... go ahead and
2165 * queue it.
2166 */
2167 TAILQ_INSERT_TAIL(&asoc->pending_reply_queue , chk, sctp_next);
2168 } else {
2169 sctp_queue_data_to_stream(stcb, asoc, chk, abort_flag);
2170 }
2171 }
2172 } else {
2173 /* Into the re-assembly queue */
2174 sctp_queue_data_for_reasm(stcb, asoc, chk, abort_flag);
2175 if (*abort_flag) {
2176 sctp_pegs[SCTP_DROP_FRAG]++;
2177 return (0);
2178 }
2179 }
2180 if (compare_with_wrap(tsn, asoc->highest_tsn_inside_map, MAX_TSN)) {
2181 /* we have a new high score */
2182 asoc->highest_tsn_inside_map = tsn;
2183 #ifdef SCTP_MAP_LOGGING
2184 sctp_log_map(0, 2, asoc->highest_tsn_inside_map, SCTP_MAP_SLIDE_RESULT);
2185 #endif
2186 }
2187 finish_express_del:
2188 if (last_chunk) {
2189 *m = NULL;
2190 }
2191 sctp_pegs[SCTP_PEG_TSNS_RCVD]++;
2192 /* Set it present please */
2193 #ifdef SCTP_STR_LOGGING
2194 sctp_log_strm_del_alt(tsn, strmseq, SCTP_STR_LOG_FROM_MARK_TSN);
2195 #endif
2196 #ifdef SCTP_MAP_LOGGING
2197 sctp_log_map(asoc->mapping_array_base_tsn, asoc->cumulative_tsn,
2198 asoc->highest_tsn_inside_map, SCTP_MAP_PREPARE_SLIDE);
2199 #endif
2200 SCTP_SET_TSN_PRESENT(asoc->mapping_array, gap);
2201 return (1);
2202 }
2203
2204 void
2205 sctp_sack_check(struct sctp_tcb *stcb, int ok_to_sack, int was_a_gap, int *abort_flag)
2206 {
2207 /*
2208 * Now we also need to check the mapping array in a couple of ways.
2209 * 1) Did we move the cum-ack point?
2210 */
2211 struct sctp_association *asoc;
2212 int i, at;
2213 int m_size, all_ones;
2214 int slide_from, slide_end, lgap, distance;
2215 #ifdef SCTP_MAP_LOGGING
2216 uint32_t old_cumack, old_base, old_highest;
2217 unsigned char aux_array[64];
2218 #endif
2219
2220 asoc = &stcb->asoc;
2221 at = 0;
2222
2223 #ifdef SCTP_MAP_LOGGING
2224 old_cumack = asoc->cumulative_tsn;
2225 old_base = asoc->mapping_array_base_tsn;
2226 old_highest = asoc->highest_tsn_inside_map;
2227 if (asoc->mapping_array_size < 64)
2228 memcpy(aux_array, asoc->mapping_array,
2229 asoc->mapping_array_size);
2230 else
2231 memcpy(aux_array, asoc->mapping_array, 64);
2232 #endif
2233
2234 /*
2235 * We could probably improve this a small bit by calculating the
2236 * offset of the current cum-ack as the starting point.
2237 */
2238 all_ones = 1;
2239 m_size = stcb->asoc.mapping_array_size << 3;
2240 for (i = 0; i < m_size; i++) {
2241 if (!SCTP_IS_TSN_PRESENT(asoc->mapping_array, i)) {
2242 /*
2243 * Ok we found the first place that we are
2244 * missing a TSN.
2245 */
2246 at = i;
2247 all_ones = 0;
2248 asoc->cumulative_tsn = asoc->mapping_array_base_tsn +
2249 (i - 1);
2250 break;
2251 }
2252 }
2253 if (compare_with_wrap(asoc->cumulative_tsn,
2254 asoc->highest_tsn_inside_map,
2255 MAX_TSN)) {
2256 panic("huh, cumack greater than high-tsn in map");
2257 }
2258 if (all_ones ||
2259 (asoc->cumulative_tsn == asoc->highest_tsn_inside_map && at >= 8)) {
2260 /* The complete array was completed by a single FR */
2261 /* highest becomes the cum-ack */
2262 int clr;
2263 asoc->cumulative_tsn = asoc->highest_tsn_inside_map;
2264 /* clear the array */
2265 if (all_ones)
2266 clr = asoc->mapping_array_size;
2267 else {
2268 clr = (at >> 3) + 1;
2269 /*
2270 * this should be the allones case
2271 * but just in case :>
2272 */
2273 if (clr > asoc->mapping_array_size)
2274 clr = asoc->mapping_array_size;
2275 }
2276 memset(asoc->mapping_array, 0, clr);
2277 /* base becomes one ahead of the cum-ack */
2278 asoc->mapping_array_base_tsn = asoc->cumulative_tsn + 1;
2279 #ifdef SCTP_MAP_LOGGING
2280 sctp_log_map(old_base, old_cumack, old_highest,
2281 SCTP_MAP_PREPARE_SLIDE);
2282 sctp_log_map(asoc->mapping_array_base_tsn, asoc->cumulative_tsn,
2283 asoc->highest_tsn_inside_map, SCTP_MAP_SLIDE_CLEARED);
2284 #endif
2285 } else if (at >= 8) {
2286 /* we can slide the mapping array down */
2287 /* Calculate the new byte position we can move down */
2288 slide_from = at >> 3;
2289 /* now calculate the ceiling of the move using our highest TSN value */
2290 if (asoc->highest_tsn_inside_map >= asoc->mapping_array_base_tsn) {
2291 lgap = asoc->highest_tsn_inside_map -
2292 asoc->mapping_array_base_tsn;
2293 } else {
2294 lgap = (MAX_TSN - asoc->mapping_array_base_tsn) +
2295 asoc->highest_tsn_inside_map + 1;
2296 }
2297 slide_end = lgap >> 3;
2298 if (slide_end < slide_from) {
2299 panic("impossible slide");
2300 }
2301 distance = (slide_end-slide_from) + 1;
2302 #ifdef SCTP_MAP_LOGGING
2303 sctp_log_map(old_base, old_cumack, old_highest,
2304 SCTP_MAP_PREPARE_SLIDE);
2305 sctp_log_map((uint32_t)slide_from, (uint32_t)slide_end,
2306 (uint32_t)lgap, SCTP_MAP_SLIDE_FROM);
2307 #endif
2308 if (distance + slide_from > asoc->mapping_array_size ||
2309 distance < 0) {
2310 #ifdef SCTP_DEBUG
2311 if (sctp_debug_on & SCTP_DEBUG_INDATA1) {
2312 printf("Ugh bad addition.. you can't hrumpp!\n");
2313 }
2314 #endif
2315 /*
2316 * Here we do NOT slide forward the array so that
2317 * hopefully when more data comes in to fill it up
2318 * we will be able to slide it forward. Really
2319 * I don't think this should happen :-0
2320 */
2321
2322 #ifdef SCTP_MAP_LOGGING
2323 sctp_log_map((uint32_t)distance, (uint32_t)slide_from,
2324 (uint32_t)asoc->mapping_array_size,
2325 SCTP_MAP_SLIDE_NONE);
2326 #endif
2327 } else {
2328 int ii;
2329 for (ii = 0; ii < distance; ii++) {
2330 asoc->mapping_array[ii] =
2331 asoc->mapping_array[slide_from + ii];
2332 }
2333 for (ii = distance;ii <= slide_end; ii++) {
2334 asoc->mapping_array[ii] = 0;
2335 }
2336 asoc->mapping_array_base_tsn += (slide_from << 3);
2337 #ifdef SCTP_MAP_LOGGING
2338 sctp_log_map(asoc->mapping_array_base_tsn,
2339 asoc->cumulative_tsn, asoc->highest_tsn_inside_map,
2340 SCTP_MAP_SLIDE_RESULT);
2341 #endif
2342 }
2343 }
2344
2345 /* check the special flag for stream resets */
2346 if ((asoc->pending_reply) &&
2347 ((compare_with_wrap((asoc->cumulative_tsn+1), ntohl(asoc->pending_reply->reset_at_tsn), MAX_TSN)) ||
2348 ((asoc->cumulative_tsn+1) == ntohl(asoc->pending_reply->reset_at_tsn)))
2349 ) {
2350 /* we have finished working through the backlogged TSN's now
2351 * time to reset streams.
2352 * 1: call reset function.
2353 * 2: free pending_reply space
2354 * 3: distribute any chunks in pending_reply_queue.
2355 */
2356 struct sctp_tmit_chunk *chk;
2357 sctp_handle_stream_reset_response(stcb, asoc->pending_reply);
2358 free(asoc->pending_reply, M_PCB);
2359 asoc->pending_reply = NULL;
2360 chk = TAILQ_FIRST(&asoc->pending_reply_queue);
2361 while (chk) {
2362 TAILQ_REMOVE(&asoc->pending_reply_queue, chk, sctp_next);
2363 sctp_queue_data_to_stream(stcb, asoc, chk, abort_flag);
2364 if (*abort_flag) {
2365 return;
2366 }
2367 chk = TAILQ_FIRST(&asoc->pending_reply_queue);
2368 }
2369 }
2370 /*
2371 * Now we need to see if we need to queue a sack or just start
2372 * the timer (if allowed).
2373 */
2374 if (ok_to_sack) {
2375 if (SCTP_GET_STATE(asoc) == SCTP_STATE_SHUTDOWN_SENT) {
2376 /*
2377 * Ok special case, in SHUTDOWN-SENT case.
2378 * here we maker sure SACK timer is off and
2379 * instead send a SHUTDOWN and a SACK
2380 */
2381 if (callout_pending(&stcb->asoc.dack_timer.timer)) {
2382 sctp_timer_stop(SCTP_TIMER_TYPE_RECV,
2383 stcb->sctp_ep, stcb, NULL);
2384 }
2385 #ifdef SCTP_DEBUG
2386 if (sctp_debug_on & SCTP_DEBUG_OUTPUT4) {
2387 printf("%s:%d sends a shutdown\n",
2388 __FILE__,
2389 __LINE__
2390 );
2391 }
2392 #endif
2393 sctp_send_shutdown(stcb, stcb->asoc.primary_destination);
2394 sctp_send_sack(stcb);
2395 } else {
2396 int is_a_gap;
2397 /* is there a gap now ? */
2398 is_a_gap = compare_with_wrap(stcb->asoc.highest_tsn_inside_map,
2399 stcb->asoc.cumulative_tsn, MAX_TSN);
2400 if ((stcb->asoc.first_ack_sent == 0) || /* First time we send a sack */
2401 ((was_a_gap) && (is_a_gap == 0)) || /* was a gap, but no longer is one */
2402 (stcb->asoc.numduptsns) || /* we have dup's */
2403 (is_a_gap) || /* is still a gap */
2404 (callout_pending(&stcb->asoc.dack_timer.timer)) /* timer was up . second packet */
2405 ) {
2406 /*
2407 * Ok we must build a SACK since the timer
2408 * is pending, we got our first packet OR
2409 * there are gaps or duplicates.
2410 */
2411 stcb->asoc.first_ack_sent = 1;
2412 sctp_send_sack(stcb);
2413 /* The sending will stop the timer */
2414 } else {
2415 sctp_timer_start(SCTP_TIMER_TYPE_RECV,
2416 stcb->sctp_ep, stcb, NULL);
2417 }
2418 }
2419 }
2420 }
2421
2422 void
2423 sctp_service_queues(struct sctp_tcb *stcb, struct sctp_association *asoc, int hold_locks)
2424 {
2425 struct sctp_tmit_chunk *chk;
2426 int tsize, cntDel;
2427 u_int16_t nxt_todel;
2428
2429 cntDel = 0;
2430 if (asoc->fragmented_delivery_inprogress) {
2431 sctp_service_reassembly(stcb, asoc, hold_locks);
2432 }
2433 /* Can we proceed further, i.e. the PD-API is complete */
2434 if (asoc->fragmented_delivery_inprogress) {
2435 /* no */
2436 return;
2437 }
2438
2439 /*
2440 * Yes, reassembly delivery no longer in progress see if we
2441 * have some on the sb hold queue.
2442 */
2443 do {
2444 if (stcb->sctp_socket->so_rcv.sb_cc >= stcb->sctp_socket->so_rcv.sb_hiwat) {
2445 if (cntDel == 0)
2446 sctp_sorwakeup(stcb->sctp_ep, stcb->sctp_socket);
2447 break;
2448 }
2449 /* If deliver_data says no we must stop */
2450 if (sctp_deliver_data(stcb, asoc, (struct sctp_tmit_chunk *)NULL, hold_locks) == 0)
2451 break;
2452 cntDel++;
2453 chk = TAILQ_FIRST(&asoc->delivery_queue);
2454 } while (chk);
2455 if (cntDel) {
2456 sctp_sorwakeup(stcb->sctp_ep, stcb->sctp_socket);
2457 }
2458 /*
2459 * Now is there some other chunk I can deliver
2460 * from the reassembly queue.
2461 */
2462 chk = TAILQ_FIRST(&asoc->reasmqueue);
2463 if (chk == NULL) {
2464 asoc->size_on_reasm_queue = 0;
2465 asoc->cnt_on_reasm_queue = 0;
2466 return;
2467 }
2468 nxt_todel = asoc->strmin[chk->rec.data.stream_number].last_sequence_delivered + 1;
2469 if ((chk->rec.data.rcv_flags & SCTP_DATA_FIRST_FRAG) &&
2470 ((nxt_todel == chk->rec.data.stream_seq) ||
2471 (chk->rec.data.rcv_flags & SCTP_DATA_UNORDERED))) {
2472 /*
2473 * Yep the first one is here. We setup to
2474 * start reception, by backing down the TSN
2475 * just in case we can't deliver.
2476 */
2477
2478 /*
2479 * Before we start though either all of the
2480 * message should be here or 1/4 the socket buffer
2481 * max or nothing on the delivery queue and something
2482 * can be delivered.
2483 */
2484 if (TAILQ_EMPTY(&asoc->delivery_queue) &&
2485 (sctp_is_all_msg_on_reasm(asoc, &tsize) ||
2486 (asoc->size_on_reasm_queue >=
2487 (stcb->sctp_socket->so_rcv.sb_hiwat >> 2) && tsize))) {
2488 asoc->fragmented_delivery_inprogress = 1;
2489 asoc->tsn_last_delivered = chk->rec.data.TSN_seq-1;
2490 asoc->str_of_pdapi = chk->rec.data.stream_number;
2491 asoc->ssn_of_pdapi = chk->rec.data.stream_seq;
2492 asoc->fragment_flags = chk->rec.data.rcv_flags;
2493 sctp_service_reassembly(stcb, asoc, hold_locks);
2494 }
2495 }
2496 }
2497
2498 int
2499 sctp_process_data(struct mbuf **mm, int iphlen, int *offset, int length,
2500 struct sctphdr *sh, struct sctp_inpcb *inp, struct sctp_tcb *stcb,
2501 struct sctp_nets *net, u_int32_t *high_tsn)
2502 {
2503 struct sctp_data_chunk *ch, chunk_buf;
2504 struct sctp_association *asoc;
2505 int num_chunks = 0; /* number of control chunks processed */
2506 int chk_length, break_flag, last_chunk;
2507 int abort_flag = 0, was_a_gap = 0;
2508 struct mbuf *m;
2509
2510 /* set the rwnd */
2511 sctp_set_rwnd(stcb, &stcb->asoc);
2512
2513 m = *mm;
2514 asoc = &stcb->asoc;
2515 if (compare_with_wrap(stcb->asoc.highest_tsn_inside_map,
2516 stcb->asoc.cumulative_tsn, MAX_TSN)) {
2517 /* there was a gap before this data was processed */
2518 was_a_gap = 1;
2519 }
2520 /*
2521 * setup where we got the last DATA packet from for
2522 * any SACK that may need to go out. Don't bump
2523 * the net. This is done ONLY when a chunk
2524 * is assigned.
2525 */
2526 asoc->last_data_chunk_from = net;
2527
2528 /*
2529 * Now before we proceed we must figure out if this
2530 * is a wasted cluster... i.e. it is a small packet
2531 * sent in and yet the driver underneath allocated a
2532 * full cluster for it. If so we must copy it to a
2533 * smaller mbuf and free up the cluster mbuf. This
2534 * will help with cluster starvation.
2535 */
2536 if (m->m_len < (long)MHLEN && m->m_next == NULL) {
2537 /* we only handle mbufs that are singletons.. not chains */
2538 MGET(m, M_DONTWAIT, MT_DATA);
2539 if (m) {
2540 /* ok lets see if we can copy the data up */
2541 vaddr_t *from, *to;
2542
2543 if ((*mm)->m_flags & M_PKTHDR) {
2544 /* got to copy the header first */
2545 #ifdef __APPLE__
2546 M_COPY_PKTHDR(m, (*mm));
2547 #else
2548 m_move_pkthdr(m, (*mm));
2549 #endif
2550 }
2551 /* get the pointers and copy */
2552 to = mtod(m, vaddr_t *);
2553 from = mtod((*mm), vaddr_t *);
2554 memcpy(to, from, (*mm)->m_len);
2555 /* copy the length and free up the old */
2556 m->m_len = (*mm)->m_len;
2557 sctp_m_freem(*mm);
2558 /* sucess, back copy */
2559 *mm = m;
2560 } else {
2561 /* We are in trouble in the mbuf world .. yikes */
2562 m = *mm;
2563 }
2564 }
2565 /* get pointer to the first chunk header */
2566 ch = (struct sctp_data_chunk *)sctp_m_getptr(m, *offset,
2567 sizeof(chunk_buf), (u_int8_t *)&chunk_buf);
2568 if (ch == NULL) {
2569 printf(" ... its short\n");
2570 return (1);
2571 }
2572 /*
2573 * process all DATA chunks...
2574 */
2575
2576 #ifdef SCTP_DEBUG
2577 if (sctp_debug_on & SCTP_DEBUG_INPUT1) {
2578 printf("In process data off:%d length:%d iphlen:%d ch->type:%d\n",
2579 *offset, length, iphlen, (int)ch->ch.chunk_type);
2580 }
2581 #endif
2582
2583 *high_tsn = asoc->cumulative_tsn;
2584 break_flag = 0;
2585 while (ch->ch.chunk_type == SCTP_DATA) {
2586 /* validate chunk length */
2587 chk_length = ntohs(ch->ch.chunk_length);
2588 if ((size_t)chk_length < sizeof(struct sctp_data_chunk) + 1 ||
2589 length - *offset < chk_length) {
2590 /*
2591 * Need to send an abort since we had a invalid
2592 * data chunk.
2593 */
2594 struct mbuf *op_err;
2595 MGET(op_err, M_DONTWAIT, MT_DATA);
2596 if (op_err) {
2597 struct sctp_paramhdr *ph;
2598 u_int32_t *ippp;
2599
2600 op_err->m_len = sizeof(struct sctp_paramhdr) +
2601 sizeof(*ippp);
2602 ph = mtod(op_err, struct sctp_paramhdr *);
2603 ph->param_type =
2604 htons(SCTP_CAUSE_PROTOCOL_VIOLATION);
2605 ph->param_length = htons(op_err->m_len);
2606 ippp = (u_int32_t *)(ph + 1);
2607 *ippp = htonl(0x30000001);
2608 }
2609 sctp_abort_association(inp, stcb, m, iphlen, sh,
2610 op_err);
2611 return (2);
2612 }
2613 #ifdef SCTP_DEBUG
2614 if (sctp_debug_on & SCTP_DEBUG_INPUT1) {
2615 printf("A chunk of len:%d to process (tot:%d)\n",
2616 chk_length, length - *offset);
2617 }
2618 #endif
2619
2620 #ifdef SCTP_AUDITING_ENABLED
2621 sctp_audit_log(0xB1, 0);
2622 #endif
2623 if (SCTP_SIZE32(chk_length) == *offset - length) {
2624 last_chunk = 1;
2625 } else {
2626 last_chunk = 0;
2627 }
2628 if (sctp_process_a_data_chunk(stcb, asoc, mm, *offset, ch,
2629 chk_length, net, high_tsn, &abort_flag, &break_flag,
2630 last_chunk)) {
2631 num_chunks++;
2632 #ifdef SCTP_DEBUG
2633 if (sctp_debug_on & SCTP_DEBUG_INPUT1) {
2634 printf("Now incr num_chunks to %d\n",
2635 num_chunks);
2636 }
2637 #endif
2638 }
2639 if (abort_flag)
2640 return (2);
2641
2642 if (break_flag) {
2643 /*
2644 * Set because of out of rwnd space and no drop rep
2645 * space left.
2646 */
2647 break;
2648 }
2649
2650 *offset += SCTP_SIZE32(chk_length);
2651 if (*offset >= length) {
2652 /* no more data left in the mbuf chain */
2653 break;
2654 }
2655 ch = (struct sctp_data_chunk *)sctp_m_getptr(m, *offset,
2656 sizeof(chunk_buf), (u_int8_t *)&chunk_buf);
2657 if (ch == NULL) {
2658 *offset = length;
2659 break;
2660 }
2661 } /* while */
2662 if (break_flag) {
2663 /*
2664 * we need to report rwnd overrun drops.
2665 */
2666 sctp_send_packet_dropped(stcb, net, *mm, iphlen, 0);
2667 }
2668 if (num_chunks) {
2669 /*
2670 * Did we get data, if so update the time for
2671 * auto-close and give peer credit for being
2672 * alive.
2673 */
2674 sctp_pegs[SCTP_DATA_DG_RECV]++;
2675 stcb->asoc.overall_error_count = 0;
2676 SCTP_GETTIME_TIMEVAL(&stcb->asoc.time_last_rcvd);
2677 }
2678 /* now service all of the reassm queue and delivery queue */
2679 sctp_service_queues(stcb, asoc, 0);
2680 if (SCTP_GET_STATE(asoc) == SCTP_STATE_SHUTDOWN_SENT) {
2681 /*
2682 * Assure that we ack right away by making
2683 * sure that a d-ack timer is running. So the
2684 * sack_check will send a sack.
2685 */
2686 sctp_timer_start(SCTP_TIMER_TYPE_RECV, stcb->sctp_ep, stcb,
2687 net);
2688 }
2689 /* Start a sack timer or QUEUE a SACK for sending */
2690 sctp_sack_check(stcb, 1, was_a_gap, &abort_flag);
2691 if (abort_flag)
2692 return (2);
2693
2694 return (0);
2695 }
2696
2697 static void
2698 sctp_handle_segments(struct sctp_tcb *stcb, struct sctp_association *asoc,
2699 struct sctp_sack_chunk *ch, u_long last_tsn, u_long *biggest_tsn_acked,
2700 u_long *biggest_newly_acked_tsn, int num_seg, int *ecn_seg_sums)
2701 {
2702 /************************************************/
2703 /* process fragments and update sendqueue */
2704 /************************************************/
2705 struct sctp_sack *sack;
2706 struct sctp_gap_ack_block *frag;
2707 struct sctp_tmit_chunk *tp1;
2708 int i;
2709 unsigned int j;
2710 #ifdef SCTP_FR_LOGGING
2711 int num_frs=0;
2712 #endif
2713 uint16_t frag_strt, frag_end, primary_flag_set;
2714 u_long last_frag_high;
2715
2716 if (asoc->primary_destination->dest_state & SCTP_ADDR_SWITCH_PRIMARY) {
2717 primary_flag_set = 1;
2718 } else {
2719 primary_flag_set = 0;
2720 }
2721
2722 sack = &ch->sack;
2723 frag = (struct sctp_gap_ack_block *)((vaddr_t)sack +
2724 sizeof(struct sctp_sack));
2725 tp1 = NULL;
2726 last_frag_high = 0;
2727 for (i = 0; i < num_seg; i++) {
2728 frag_strt = ntohs(frag->start);
2729 frag_end = ntohs(frag->end);
2730 /* some sanity checks on the fragment offsets */
2731 if (frag_strt > frag_end) {
2732 /* this one is malformed, skip */
2733 frag++;
2734 continue;
2735 }
2736 if (compare_with_wrap((frag_end+last_tsn), *biggest_tsn_acked,
2737 MAX_TSN))
2738 *biggest_tsn_acked = frag_end+last_tsn;
2739
2740 /* mark acked dgs and find out the highestTSN being acked */
2741 if (tp1 == NULL) {
2742 tp1 = TAILQ_FIRST(&asoc->sent_queue);
2743
2744 /* save the locations of the last frags */
2745 last_frag_high = frag_end + last_tsn;
2746 } else {
2747 /*
2748 * now lets see if we need to reset the queue
2749 * due to a out-of-order SACK fragment
2750 */
2751 if (compare_with_wrap(frag_strt+last_tsn,
2752 last_frag_high, MAX_TSN)) {
2753 /*
2754 * if the new frag starts after the last TSN
2755 * frag covered, we are ok
2756 * and this one is beyond the last one
2757 */
2758 ;
2759 } else {
2760 /*
2761 * ok, they have reset us, so we need to reset
2762 * the queue this will cause extra hunting but
2763 * hey, they chose the performance
2764 * hit when they failed to order there gaps..
2765 */
2766 tp1 = TAILQ_FIRST(&asoc->sent_queue);
2767 }
2768 last_frag_high = frag_end + last_tsn;
2769 }
2770 for (j = frag_strt + last_tsn; j <= frag_end + last_tsn; j++) {
2771 while (tp1) {
2772 #ifdef SCTP_FR_LOGGING
2773 if (tp1->rec.data.doing_fast_retransmit)
2774 num_frs++;
2775 #endif
2776
2777 if (tp1->rec.data.TSN_seq == j) {
2778 if (tp1->sent != SCTP_DATAGRAM_UNSENT) {
2779 /* must be held until cum-ack passes */
2780 /* ECN Nonce: Add the nonce value to the sender's nonce sum */
2781 if (tp1->sent < SCTP_DATAGRAM_ACKED) {
2782 /*
2783 * If it is less than
2784 * ACKED, it is now
2785 * no-longer in flight.
2786 * Higher values may
2787 * already be set via
2788 * previous Gap Ack
2789 * Blocks...
2790 * i.e. ACKED or MARKED.
2791 */
2792 if (compare_with_wrap(tp1->rec.data.TSN_seq,
2793 *biggest_newly_acked_tsn,
2794 MAX_TSN)) {
2795 *biggest_newly_acked_tsn =
2796 tp1->rec.data.TSN_seq;
2797 }
2798 sctp_flight_size_decrease(tp1);
2799
2800 sctp_total_flight_decrease(stcb, tp1);
2801
2802 if (tp1->snd_count < 2) {
2803 /* True non-retransmitted chunk */
2804 tp1->whoTo->net_ack2 +=
2805 tp1->send_size;
2806
2807 /* update RTO too? */
2808 if (tp1->do_rtt) {
2809 tp1->whoTo->RTO =
2810 sctp_calculate_rto(stcb,
2811 asoc,
2812 tp1->whoTo,
2813 &tp1->sent_rcv_time);
2814 tp1->whoTo->rto_pending = 0;
2815 tp1->do_rtt = 0;
2816 }
2817 }
2818 }
2819 if (tp1->sent <= SCTP_DATAGRAM_RESEND &&
2820 tp1->sent != SCTP_DATAGRAM_UNSENT &&
2821 compare_with_wrap(tp1->rec.data.TSN_seq,
2822 asoc->this_sack_highest_gap,
2823 MAX_TSN)) {
2824 asoc->this_sack_highest_gap =
2825 tp1->rec.data.TSN_seq;
2826 if (primary_flag_set) {
2827 tp1->whoTo->cacc_saw_newack = 1;
2828 }
2829 }
2830 if (tp1->sent == SCTP_DATAGRAM_RESEND) {
2831 #ifdef SCTP_DEBUG
2832 if (sctp_debug_on &
2833 SCTP_DEBUG_INDATA3) {
2834 printf("Hmm. one that is in RESEND that is now ACKED\n");
2835 }
2836 #endif
2837 sctp_ucount_decr(asoc->sent_queue_retran_cnt);
2838 #ifdef SCTP_AUDITING_ENABLED
2839 sctp_audit_log(0xB2,
2840 (asoc->sent_queue_retran_cnt & 0x000000ff));
2841 #endif
2842
2843 }
2844 (*ecn_seg_sums) += tp1->rec.data.ect_nonce;
2845 (*ecn_seg_sums) &= SCTP_SACK_NONCE_SUM;
2846 tp1->sent = SCTP_DATAGRAM_MARKED;
2847 }
2848 break;
2849 } /* if (tp1->TSN_seq == j) */
2850 if (compare_with_wrap(tp1->rec.data.TSN_seq, j,
2851 MAX_TSN))
2852 break;
2853 tp1 = TAILQ_NEXT(tp1, sctp_next);
2854 }/* end while (tp1) */
2855 } /* end for (j = fragStart */
2856 frag++; /* next one */
2857 }
2858 #ifdef SCTP_FR_LOGGING
2859 if (num_frs)
2860 sctp_log_fr(*biggest_tsn_acked, *biggest_newly_acked_tsn,
2861 last_tsn, SCTP_FR_LOG_BIGGEST_TSNS);
2862 #endif
2863 }
2864
2865 static void
2866 sctp_check_for_revoked(struct sctp_association *asoc, u_long cum_ack,
2867 u_long biggest_tsn_acked)
2868 {
2869 struct sctp_tmit_chunk *tp1;
2870 int tot_revoked=0;
2871
2872 tp1 = TAILQ_FIRST(&asoc->sent_queue);
2873 while (tp1) {
2874 if (compare_with_wrap(tp1->rec.data.TSN_seq, cum_ack,
2875 MAX_TSN)) {
2876 /*
2877 * ok this guy is either ACK or MARKED. If it is ACKED
2878 * it has been previously acked but not this time i.e.
2879 * revoked. If it is MARKED it was ACK'ed again.
2880 */
2881 if (tp1->sent == SCTP_DATAGRAM_ACKED) {
2882 /* it has been revoked */
2883 /*
2884 * We do NOT add back to flight size here since
2885 * it is really NOT in flight. Resend (when/if
2886 * it occurs will add to flight size
2887 */
2888 tp1->sent = SCTP_DATAGRAM_SENT;
2889 tot_revoked++;
2890 } else if (tp1->sent == SCTP_DATAGRAM_MARKED) {
2891 /* it has been re-acked in this SACK */
2892 tp1->sent = SCTP_DATAGRAM_ACKED;
2893 }
2894 }
2895 if (compare_with_wrap(tp1->rec.data.TSN_seq, biggest_tsn_acked,
2896 MAX_TSN)) {
2897 /* above the sack */
2898 break;
2899 }
2900 if (tp1->sent == SCTP_DATAGRAM_UNSENT)
2901 break;
2902 tp1 = TAILQ_NEXT(tp1, sctp_next);
2903 }
2904 if (tot_revoked > 0) {
2905 /* Setup the ecn nonce re-sync point. We
2906 * do this since once data is revoked
2907 * we begin to retransmit things, which
2908 * do NOT have the ECN bits set. This means
2909 * we are now out of sync and must wait until
2910 * we get back in sync with the peer to
2911 * check ECN bits.
2912 */
2913 tp1 = TAILQ_FIRST(&asoc->send_queue);
2914 if (tp1 == NULL) {
2915 asoc->nonce_resync_tsn = asoc->sending_seq;
2916 } else {
2917 asoc->nonce_resync_tsn = tp1->rec.data.TSN_seq;
2918 }
2919 asoc->nonce_wait_for_ecne = 0;
2920 asoc->nonce_sum_check = 0;
2921 }
2922
2923 }
2924
2925 extern int sctp_peer_chunk_oh;
2926
2927 static void
2928 sctp_strike_gap_ack_chunks(struct sctp_tcb *stcb, struct sctp_association *asoc,
2929 u_long biggest_tsn_acked, int strike_enabled,
2930 u_long biggest_tsn_newly_acked, int accum_moved)
2931 {
2932 struct sctp_tmit_chunk *tp1;
2933 int strike_flag=0;
2934 struct timeval now;
2935 int tot_retrans=0;
2936 u_int32_t sending_seq;
2937 int primary_switch_active = 0;
2938 int double_switch_active = 0;
2939
2940 /* select the sending_seq, this is
2941 * either the next thing ready to
2942 * be sent but not transmitted, OR,
2943 * the next seq we assign.
2944 */
2945 tp1 = TAILQ_FIRST(&stcb->asoc.send_queue);
2946 if (tp1 == NULL) {
2947 sending_seq = asoc->sending_seq;
2948 } else {
2949 sending_seq = tp1->rec.data.TSN_seq;
2950 }
2951
2952 if (asoc->primary_destination->dest_state & SCTP_ADDR_SWITCH_PRIMARY) {
2953 primary_switch_active = 1;
2954 }
2955 if (asoc->primary_destination->dest_state & SCTP_ADDR_DOUBLE_SWITCH) {
2956 double_switch_active = 1;
2957 }
2958 if (stcb->asoc.peer_supports_prsctp ) {
2959 SCTP_GETTIME_TIMEVAL(&now);
2960 }
2961 tp1 = TAILQ_FIRST(&asoc->sent_queue);
2962 while (tp1) {
2963 strike_flag=0;
2964 if (compare_with_wrap(tp1->rec.data.TSN_seq, biggest_tsn_acked,
2965 MAX_TSN) ||
2966 tp1->sent == SCTP_DATAGRAM_UNSENT) {
2967 /* done */
2968 break;
2969 }
2970 if ((tp1->flags & (SCTP_PR_SCTP_ENABLED|SCTP_PR_SCTP_BUFFER)) ==
2971 SCTP_PR_SCTP_ENABLED &&
2972 tp1->sent < SCTP_DATAGRAM_ACKED) {
2973 /* Is it expired? */
2974 #ifndef __FreeBSD__
2975 if (timercmp(&now, &tp1->rec.data.timetodrop, >))
2976 #else
2977 if (timevalcmp(&now, &tp1->rec.data.timetodrop, >))
2978 #endif
2979 {
2980 /* Yes so drop it */
2981 if (tp1->data != NULL) {
2982 sctp_release_pr_sctp_chunk(stcb, tp1,
2983 (SCTP_RESPONSE_TO_USER_REQ|SCTP_NOTIFY_DATAGRAM_SENT),
2984 &asoc->sent_queue);
2985 }
2986 tp1 = TAILQ_NEXT(tp1, sctp_next);
2987 continue;
2988 }
2989 }
2990
2991 if (compare_with_wrap(tp1->rec.data.TSN_seq,
2992 asoc->this_sack_highest_gap, MAX_TSN)) {
2993 /* we are beyond the tsn in the sack */
2994 break;
2995 }
2996 if (tp1->sent >= SCTP_DATAGRAM_RESEND) {
2997 /* either a RESEND, ACKED, or MARKED */
2998 /* skip */
2999 tp1 = TAILQ_NEXT(tp1, sctp_next);
3000 continue;
3001 }
3002 if (primary_switch_active && (strike_enabled == 0)) {
3003 if (tp1->whoTo != asoc->primary_destination) {
3004 /*
3005 * We can only strike things on the primary if
3006 * the strike_enabled flag is clear
3007 */
3008 tp1 = TAILQ_NEXT(tp1, sctp_next);
3009 continue;
3010 }
3011 } else if (primary_switch_active) {
3012 if (tp1->whoTo->cacc_saw_newack == 0) {
3013 /*
3014 * Only one was received but it was NOT
3015 * this one.
3016 */
3017 tp1 = TAILQ_NEXT(tp1, sctp_next);
3018 continue;
3019 }
3020 }
3021 if (double_switch_active &&
3022 (compare_with_wrap(asoc->primary_destination->next_tsn_at_change,
3023 tp1->rec.data.TSN_seq, MAX_TSN))) {
3024 /*
3025 * With a double switch we do NOT mark unless we
3026 * are beyond the switch point.
3027 */
3028 tp1 = TAILQ_NEXT(tp1, sctp_next);
3029 continue;
3030 }
3031 /*
3032 * Here we check to see if we were have already done a FR
3033 * and if so we see if the biggest TSN we saw in the sack is
3034 * smaller than the recovery point. If so we don't strike the
3035 * tsn... otherwise we CAN strike the TSN.
3036 */
3037 if (accum_moved && asoc->fast_retran_loss_recovery) {
3038 /*
3039 * Strike the TSN if in fast-recovery and
3040 * cum-ack moved.
3041 */
3042 tp1->sent++;
3043 } else if (tp1->rec.data.doing_fast_retransmit) {
3044 /*
3045 * For those that have done a FR we must
3046 * take special consideration if we strike. I.e
3047 * the biggest_newly_acked must be higher
3048 * than the sending_seq at the time we did
3049 * the FR.
3050 */
3051 #ifdef SCTP_FR_TO_ALTERNATE
3052 /*
3053 * If FR's go to new networks, then we
3054 * must only do this for singly homed asoc's. However
3055 * if the FR's go to the same network (Armando's work)
3056 * then its ok to FR multiple times.
3057 */
3058 if (asoc->numnets < 2)
3059 #else
3060 if (1)
3061 #endif
3062 {
3063 if ((compare_with_wrap(biggest_tsn_newly_acked,
3064 tp1->rec.data.fast_retran_tsn, MAX_TSN)) ||
3065 (biggest_tsn_newly_acked ==
3066 tp1->rec.data.fast_retran_tsn)) {
3067 /*
3068 * Strike the TSN, since this ack is
3069 * beyond where things were when we did
3070 * a FR.
3071 */
3072 #ifdef SCTP_FR_LOGGING
3073 sctp_log_fr(biggest_tsn_newly_acked,
3074 tp1->rec.data.TSN_seq,
3075 tp1->rec.data.fast_retran_tsn,
3076 SCTP_FR_LOG_STRIKE_CHUNK);
3077 #endif
3078 tp1->sent++;
3079 strike_flag=1;
3080 }
3081 }
3082 } else if (compare_with_wrap(tp1->rec.data.TSN_seq,
3083 biggest_tsn_newly_acked, MAX_TSN)) {
3084 /*
3085 * We don't strike these:
3086 * This is the HTNA algorithm i.e. we don't strike
3087 * If our TSN is larger than the Highest TSN Newly
3088 * Acked.
3089 */
3090 ;
3091 } else {
3092 /* Strike the TSN */
3093 tp1->sent++;
3094 }
3095 if (tp1->sent == SCTP_DATAGRAM_RESEND) {
3096 /* Increment the count to resend */
3097 struct sctp_nets *alt;
3098
3099 #ifdef SCTP_FR_LOGGING
3100 sctp_log_fr(tp1->rec.data.TSN_seq, tp1->snd_count,
3101 0, SCTP_FR_MARKED);
3102 #endif
3103 if (strike_flag) {
3104 /* This is a subsequent FR */
3105 sctp_pegs[SCTP_DUP_FR]++;
3106 }
3107 asoc->sent_queue_retran_cnt++;
3108 #ifdef SCTP_FR_TO_ALTERNATE
3109 /* Can we find an alternate? */
3110 alt = sctp_find_alternate_net(stcb, tp1->whoTo);
3111 #else
3112 /*
3113 * default behavior is to NOT retransmit FR's
3114 * to an alternate. Armando Caro's paper details
3115 * why.
3116 */
3117 alt = tp1->whoTo;
3118 #endif
3119 tp1->rec.data.doing_fast_retransmit = 1;
3120 tot_retrans++;
3121 /* mark the sending seq for possible subsequent FR's */
3122 if (TAILQ_EMPTY(&asoc->send_queue)) {
3123 /*
3124 * If the queue of send is empty then its the
3125 * next sequence number that will be assigned so
3126 * we subtract one from this to get the one we
3127 * last sent.
3128 */
3129 tp1->rec.data.fast_retran_tsn = sending_seq - 1;
3130 } else {
3131 /*
3132 * If there are chunks on the send queue
3133 * (unsent data that has made it from the
3134 * stream queues but not out the door, we take
3135 * the first one (which will have the lowest
3136 * TSN) and subtract one to get the one we last
3137 * sent.
3138 */
3139 struct sctp_tmit_chunk *ttt;
3140 ttt = TAILQ_FIRST(&asoc->send_queue);
3141 tp1->rec.data.fast_retran_tsn =
3142 ttt->rec.data.TSN_seq - 1;
3143 }
3144 if (tp1->do_rtt) {
3145 /*
3146 * this guy had a RTO calculation pending on it,
3147 * cancel it
3148 */
3149 tp1->whoTo->rto_pending = 0;
3150 tp1->do_rtt = 0;
3151 }
3152 /* fix counts and things */
3153
3154 tp1->whoTo->net_ack++;
3155 sctp_flight_size_decrease(tp1);
3156 #ifdef SCTP_LOG_RWND
3157 sctp_log_rwnd(SCTP_INCREASE_PEER_RWND,
3158 asoc->peers_rwnd , tp1->send_size, sctp_peer_chunk_oh);
3159 #endif
3160 /* add back to the rwnd */
3161 asoc->peers_rwnd += (tp1->send_size + sctp_peer_chunk_oh);
3162
3163 /* remove from the total flight */
3164 sctp_total_flight_decrease(stcb, tp1);
3165 if (alt != tp1->whoTo) {
3166 /* yes, there is an alternate. */
3167 sctp_free_remote_addr(tp1->whoTo);
3168 tp1->whoTo = alt;
3169 alt->ref_count++;
3170 }
3171 }
3172 tp1 = TAILQ_NEXT(tp1, sctp_next);
3173 } /* while (tp1) */
3174
3175 if (tot_retrans > 0) {
3176 /* Setup the ecn nonce re-sync point. We
3177 * do this since once we go to FR something
3178 * we introduce a Karn's rule scenario and
3179 * won't know the totals for the ECN bits.
3180 */
3181 asoc->nonce_resync_tsn = sending_seq;
3182 asoc->nonce_wait_for_ecne = 0;
3183 asoc->nonce_sum_check = 0;
3184 }
3185
3186 }
3187
3188 struct sctp_tmit_chunk *
3189 sctp_try_advance_peer_ack_point(struct sctp_tcb *stcb,
3190 struct sctp_association *asoc)
3191 {
3192 struct sctp_tmit_chunk *tp1, *tp2, *a_adv=NULL;
3193 struct timeval now;
3194 int now_filled=0;
3195
3196 if (asoc->peer_supports_prsctp == 0) {
3197 return (NULL);
3198 }
3199 tp1 = TAILQ_FIRST(&asoc->sent_queue);
3200 while (tp1) {
3201 if (tp1->sent != SCTP_FORWARD_TSN_SKIP &&
3202 tp1->sent != SCTP_DATAGRAM_RESEND) {
3203 /* no chance to advance, out of here */
3204 break;
3205 }
3206 if ((tp1->flags & SCTP_PR_SCTP_ENABLED) == 0) {
3207 /*
3208 * We can't fwd-tsn past any that are reliable
3209 * aka retransmitted until the asoc fails.
3210 */
3211 break;
3212 }
3213 if (!now_filled) {
3214 SCTP_GETTIME_TIMEVAL(&now);
3215 now_filled = 1;
3216 }
3217 tp2 = TAILQ_NEXT(tp1, sctp_next);
3218 /*
3219 * now we got a chunk which is marked for another
3220 * retransmission to a PR-stream but has run
3221 * out its chances already maybe OR has been
3222 * marked to skip now. Can we skip it if its a
3223 * resend?
3224 */
3225 if (tp1->sent == SCTP_DATAGRAM_RESEND &&
3226 (tp1->flags & SCTP_PR_SCTP_BUFFER) == 0) {
3227 /*
3228 * Now is this one marked for resend and its time
3229 * is now up?
3230 */
3231 #ifndef __FreeBSD__
3232 if (timercmp(&now, &tp1->rec.data.timetodrop, >))
3233 #else
3234 if (timevalcmp(&now, &tp1->rec.data.timetodrop, >))
3235 #endif
3236 {
3237 /* Yes so drop it */
3238 if (tp1->data) {
3239 sctp_release_pr_sctp_chunk(stcb, tp1,
3240 (SCTP_RESPONSE_TO_USER_REQ|SCTP_NOTIFY_DATAGRAM_SENT),
3241 &asoc->sent_queue);
3242 }
3243 } else {
3244 /*
3245 * No, we are done when hit one for resend whos
3246 * time as not expired.
3247 */
3248 break;
3249 }
3250 }
3251 /*
3252 * Ok now if this chunk is marked to drop it
3253 * we can clean up the chunk, advance our peer ack point
3254 * and we can check the next chunk.
3255 */
3256 if (tp1->sent == SCTP_FORWARD_TSN_SKIP) {
3257 /* advance PeerAckPoint goes forward */
3258 asoc->advanced_peer_ack_point = tp1->rec.data.TSN_seq;
3259 a_adv = tp1;
3260 /*
3261 * we don't want to de-queue it here. Just wait for the
3262 * next peer SACK to come with a new cumTSN and then
3263 * the chunk will be dropped in the normal fashion.
3264 */
3265 if (tp1->data) {
3266 sctp_free_bufspace(stcb, asoc, tp1);
3267 #ifdef SCTP_DEBUG
3268 if (sctp_debug_on & SCTP_DEBUG_OUTPUT2) {
3269 printf("--total out:%lu total_mbuf_out:%lu\n",
3270 (u_long)asoc->total_output_queue_size,
3271 (u_long)asoc->total_output_mbuf_queue_size);
3272 }
3273 #endif
3274 /*
3275 * Maybe there should be another notification
3276 * type
3277 */
3278 sctp_ulp_notify(SCTP_NOTIFY_DG_FAIL, stcb,
3279 (SCTP_RESPONSE_TO_USER_REQ|SCTP_NOTIFY_DATAGRAM_SENT),
3280 tp1);
3281 sctp_m_freem(tp1->data);
3282 tp1->data = NULL;
3283 sctp_sowwakeup(stcb->sctp_ep,
3284 stcb->sctp_socket);
3285 }
3286 } else {
3287 /* If it is still in RESEND we can advance no further */
3288 break;
3289 }
3290 /*
3291 * If we hit here we just dumped tp1, move to next
3292 * tsn on sent queue.
3293 */
3294 tp1 = tp2;
3295 }
3296 return (a_adv);
3297 }
3298
3299 #ifdef SCTP_HIGH_SPEED
3300 struct sctp_hs_raise_drop {
3301 int32_t cwnd;
3302 int32_t increase;
3303 int32_t drop_percent;
3304 };
3305
3306 #define SCTP_HS_TABLE_SIZE 73
3307
3308 struct sctp_hs_raise_drop sctp_cwnd_adjust[SCTP_HS_TABLE_SIZE] = {
3309 {38,1,50}, /* 0 */
3310 {118,2,44}, /* 1 */
3311 {221,3,41}, /* 2 */
3312 {347,4,38}, /* 3 */
3313 {495,5,37}, /* 4 */
3314 {663,6,35}, /* 5 */
3315 {851,7,34}, /* 6 */
3316 {1058,8,33}, /* 7 */
3317 {1284,9,32}, /* 8 */
3318 {1529,10,31}, /* 9 */
3319 {1793,11,30}, /* 10 */
3320 {2076,12,29}, /* 11 */
3321 {2378,13,28}, /* 12 */
3322 {2699,14,28}, /* 13 */
3323 {3039,15,27}, /* 14 */
3324 {3399,16,27}, /* 15 */
3325 {3778,17,26}, /* 16 */
3326 {4177,18,26}, /* 17 */
3327 {4596,19,25}, /* 18 */
3328 {5036,20,25}, /* 19 */
3329 {5497,21,24}, /* 20 */
3330 {5979,22,24}, /* 21 */
3331 {6483,23,23}, /* 22 */
3332 {7009,24,23}, /* 23 */
3333 {7558,25,22}, /* 24 */
3334 {8130,26,22}, /* 25 */
3335 {8726,27,22}, /* 26 */
3336 {9346,28,21}, /* 27 */
3337 {9991,29,21}, /* 28 */
3338 {10661,30,21}, /* 29 */
3339 {11358,31,20}, /* 30 */
3340 {12082,32,20}, /* 31 */
3341 {12834,33,20}, /* 32 */
3342 {13614,34,19}, /* 33 */
3343 {14424,35,19}, /* 34 */
3344 {15265,36,19}, /* 35 */
3345 {16137,37,19}, /* 36 */
3346 {17042,38,18}, /* 37 */
3347 {17981,39,18}, /* 38 */
3348 {18955,40,18}, /* 39 */
3349 {19965,41,17}, /* 40 */
3350 {21013,42,17}, /* 41 */
3351 {22101,43,17}, /* 42 */
3352 {23230,44,17}, /* 43 */
3353 {24402,45,16}, /* 44 */
3354 {25618,46,16}, /* 45 */
3355 {26881,47,16}, /* 46 */
3356 {28193,48,16}, /* 47 */
3357 {29557,49,15}, /* 48 */
3358 {30975,50,15}, /* 49 */
3359 {32450,51,15}, /* 50 */
3360 {33986,52,15}, /* 51 */
3361 {35586,53,14}, /* 52 */
3362 {37253,54,14}, /* 53 */
3363 {38992,55,14}, /* 54 */
3364 {40808,56,14}, /* 55 */
3365 {42707,57,13}, /* 56 */
3366 {44694,58,13}, /* 57 */
3367 {46776,59,13}, /* 58 */
3368 {48961,60,13}, /* 59 */
3369 {51258,61,13}, /* 60 */
3370 {53677,62,12}, /* 61 */
3371 {56230,63,12}, /* 62 */
3372 {58932,64,12}, /* 63 */
3373 {61799,65,12}, /* 64 */
3374 {64851,66,11}, /* 65 */
3375 {68113,67,11}, /* 66 */
3376 {71617,68,11}, /* 67 */
3377 {75401,69,10}, /* 68 */
3378 {79517,70,10}, /* 69 */
3379 {84035,71,10}, /* 70 */
3380 {89053,72,10}, /* 71 */
3381 {94717,73,9} /* 72 */
3382 };
3383
3384 static void
3385 sctp_hs_cwnd_increase(struct sctp_nets *net)
3386 {
3387 int cur_val, i, indx, incr;
3388
3389 cur_val = net->cwnd >> 10;
3390 indx = SCTP_HS_TABLE_SIZE - 1;
3391
3392 if (cur_val < sctp_cwnd_adjust[0].cwnd) {
3393 /* normal mode */
3394 if (net->net_ack > net->mtu) {
3395 net->cwnd += net->mtu;
3396 #ifdef SCTP_CWND_LOGGING
3397 sctp_log_cwnd(net, net->mtu, SCTP_CWND_LOG_FROM_SS);
3398 #endif
3399 } else {
3400 net->cwnd += net->net_ack;
3401 #ifdef SCTP_CWND_LOGGING
3402 sctp_log_cwnd(net, net->net_ack, SCTP_CWND_LOG_FROM_SS);
3403 #endif
3404 }
3405 } else {
3406 for (i=net->last_hs_used; i<SCTP_HS_TABLE_SIZE; i++) {
3407 if (cur_val < sctp_cwnd_adjust[i].cwnd) {
3408 indx = i;
3409 break;
3410 }
3411 }
3412 net->last_hs_used = indx;
3413 incr = ((sctp_cwnd_adjust[indx].increase) << 10);
3414 net->cwnd += incr;
3415 #ifdef SCTP_CWND_LOGGING
3416 sctp_log_cwnd(net, incr, SCTP_CWND_LOG_FROM_SS);
3417 #endif
3418 }
3419 }
3420
3421 static void
3422 sctp_hs_cwnd_decrease(struct sctp_nets *net)
3423 {
3424 int cur_val, i, indx;
3425 #ifdef SCTP_CWND_LOGGING
3426 int old_cwnd = net->cwnd;
3427 #endif
3428
3429 cur_val = net->cwnd >> 10;
3430 indx = net->last_hs_used;
3431 if (cur_val < sctp_cwnd_adjust[0].cwnd) {
3432 /* normal mode */
3433 net->ssthresh = net->cwnd / 2;
3434 if (net->ssthresh < (net->mtu*2)) {
3435 net->ssthresh = 2 * net->mtu;
3436 }
3437 net->cwnd = net->ssthresh;
3438 #ifdef SCTP_CWND_LOGGING
3439 sctp_log_cwnd(net, (net->cwnd-old_cwnd), SCTP_CWND_LOG_FROM_FR);
3440 #endif
3441 } else {
3442 /* drop by the proper amount */
3443 net->ssthresh = net->cwnd - (int)((net->cwnd / 100) *
3444 sctp_cwnd_adjust[net->last_hs_used].drop_percent);
3445 net->cwnd = net->ssthresh;
3446 /* now where are we */
3447 indx = net->last_hs_used;
3448 cur_val = net->cwnd >> 10;
3449 /* reset where we are in the table */
3450 if (cur_val < sctp_cwnd_adjust[0].cwnd) {
3451 /* feel out of hs */
3452 net->last_hs_used = 0;
3453 } else {
3454 for (i = indx; i >= 1; i--) {
3455 if (cur_val > sctp_cwnd_adjust[i - 1].cwnd) {
3456 break;
3457 }
3458 }
3459 net->last_hs_used = indx;
3460 }
3461 }
3462 }
3463 #endif
3464
3465 void
3466 sctp_handle_sack(struct sctp_sack_chunk *ch, struct sctp_tcb *stcb,
3467 struct sctp_nets *net_from, int *abort_now)
3468 {
3469 struct sctp_association *asoc;
3470 struct sctp_sack *sack;
3471 struct sctp_tmit_chunk *tp1, *tp2;
3472 u_long cum_ack, last_tsn, biggest_tsn_acked, biggest_tsn_newly_acked;
3473 uint16_t num_seg;
3474 unsigned int sack_length;
3475 uint32_t send_s;
3476 int some_on_streamwheel;
3477 int strike_enabled = 0, cnt_of_cacc = 0;
3478 int accum_moved = 0;
3479 int marking_allowed = 1;
3480 int will_exit_fast_recovery=0;
3481 u_int32_t a_rwnd;
3482 struct sctp_nets *net = NULL;
3483 int nonce_sum_flag, ecn_seg_sums=0;
3484 asoc = &stcb->asoc;
3485
3486 /*
3487 * Handle the incoming sack on data I have been sending.
3488 */
3489
3490 /*
3491 * we take any chance we can to service our queues since we
3492 * cannot get awoken when the socket is read from :<
3493 */
3494 asoc->overall_error_count = 0;
3495
3496 if (asoc->sent_queue_retran_cnt) {
3497 #ifdef SCTP_DEBUG
3498 if (sctp_debug_on & SCTP_DEBUG_INDATA1) {
3499 printf("Handling SACK for asoc:%p retran:%d\n",
3500 asoc, asoc->sent_queue_retran_cnt);
3501 }
3502 #endif
3503 }
3504
3505 sctp_service_queues(stcb, asoc, 0);
3506
3507 /*
3508 * Now perform the actual SACK handling:
3509 * 1) Verify that it is not an old sack, if so discard.
3510 * 2) If there is nothing left in the send queue (cum-ack is equal
3511 * to last acked) then you have a duplicate too, update any rwnd
3512 * change and verify no timers are running. then return.
3513 * 3) Process any new consequtive data i.e. cum-ack moved
3514 * process these first and note that it moved.
3515 * 4) Process any sack blocks.
3516 * 5) Drop any acked from the queue.
3517 * 6) Check for any revoked blocks and mark.
3518 * 7) Update the cwnd.
3519 * 8) Nothing left, sync up flightsizes and things, stop all timers
3520 * and also check for shutdown_pending state. If so then go ahead
3521 * and send off the shutdown. If in shutdown recv, send off the
3522 * shutdown-ack and start that timer, Ret.
3523 * 9) Strike any non-acked things and do FR procedure if needed being
3524 * sure to set the FR flag.
3525 * 10) Do pr-sctp procedures.
3526 * 11) Apply any FR penalties.
3527 * 12) Assure we will SACK if in shutdown_recv state.
3528 */
3529
3530 sack_length = ntohs(ch->ch.chunk_length);
3531 if (sack_length < sizeof(struct sctp_sack_chunk)) {
3532 #ifdef SCTP_DEBUG
3533 if (sctp_debug_on & SCTP_DEBUG_INDATA1) {
3534 printf("Bad size on sack chunk .. to small\n");
3535 }
3536 #endif
3537 return;
3538 }
3539 /* ECN Nonce */
3540 nonce_sum_flag = ch->ch.chunk_flags & SCTP_SACK_NONCE_SUM;
3541 sack = &ch->sack;
3542 cum_ack = last_tsn = ntohl(sack->cum_tsn_ack);
3543 num_seg = ntohs(sack->num_gap_ack_blks);
3544
3545 /* reality check */
3546 if (TAILQ_EMPTY(&asoc->send_queue)) {
3547 send_s = asoc->sending_seq;
3548 } else {
3549 tp1 = TAILQ_FIRST(&asoc->send_queue);
3550 send_s = tp1->rec.data.TSN_seq;
3551 }
3552
3553 if (sctp_strict_sacks) {
3554 if (cum_ack == send_s ||
3555 compare_with_wrap(cum_ack, send_s, MAX_TSN)) {
3556 struct mbuf *oper;
3557 /*
3558 * no way, we have not even sent this TSN out yet.
3559 * Peer is hopelessly messed up with us.
3560 */
3561 hopeless_peer:
3562 *abort_now = 1;
3563 /* XXX */
3564 MGET(oper, M_DONTWAIT, MT_DATA);
3565 if (oper) {
3566 struct sctp_paramhdr *ph;
3567 u_int32_t *ippp;
3568
3569 oper->m_len = sizeof(struct sctp_paramhdr) +
3570 sizeof(*ippp);
3571 ph = mtod(oper, struct sctp_paramhdr *);
3572 ph->param_type = htons(SCTP_CAUSE_PROTOCOL_VIOLATION);
3573 ph->param_length = htons(oper->m_len);
3574 ippp = (u_int32_t *)(ph + 1);
3575 *ippp = htonl(0x30000002);
3576 }
3577 sctp_abort_an_association(stcb->sctp_ep, stcb, SCTP_PEER_FAULTY, oper);
3578 return;
3579 }
3580 }
3581 /* update the Rwnd of the peer */
3582 a_rwnd = (u_int32_t)ntohl(sack->a_rwnd);
3583 if (asoc->sent_queue_retran_cnt) {
3584 #ifdef SCTP_DEBUG
3585 if (sctp_debug_on & SCTP_DEBUG_INDATA1) {
3586 printf("cum_ack:%lx num_seg:%u last_acked_seq:%x\n",
3587 cum_ack, (u_int)num_seg, asoc->last_acked_seq);
3588 }
3589 #endif
3590 }
3591 if (compare_with_wrap(asoc->t3timeout_highest_marked, cum_ack, MAX_TSN)) {
3592 /* we are not allowed to mark for FR */
3593 marking_allowed = 0;
3594 }
3595 /**********************/
3596 /* 1) check the range */
3597 /**********************/
3598 if (compare_with_wrap(asoc->last_acked_seq, last_tsn, MAX_TSN)) {
3599 /* acking something behind */
3600 if (asoc->sent_queue_retran_cnt) {
3601 #ifdef SCTP_DEBUG
3602 if (sctp_debug_on & SCTP_DEBUG_INDATA1) {
3603 printf("The cum-ack is behind us\n");
3604 }
3605 #endif
3606 }
3607 return;
3608 }
3609
3610 if (TAILQ_EMPTY(&asoc->sent_queue)) {
3611 /* nothing left on sendqueue.. consider done */
3612 #ifdef SCTP_LOG_RWND
3613 sctp_log_rwnd_set(SCTP_SET_PEER_RWND_VIA_SACK,
3614 asoc->peers_rwnd, 0, 0, a_rwnd);
3615 #endif
3616 asoc->peers_rwnd = a_rwnd;
3617 if (asoc->sent_queue_retran_cnt) {
3618 #ifdef SCTP_DEBUG
3619 if (sctp_debug_on & SCTP_DEBUG_INDATA1) {
3620 printf("Huh? retran set but none on queue\n");
3621 }
3622 #endif
3623 asoc->sent_queue_retran_cnt = 0;
3624 }
3625 if (asoc->peers_rwnd < stcb->sctp_ep->sctp_ep.sctp_sws_sender) {
3626 /* SWS sender side engages */
3627 asoc->peers_rwnd = 0;
3628 }
3629 /* stop any timers */
3630 TAILQ_FOREACH(net, &asoc->nets, sctp_next) {
3631 sctp_timer_stop(SCTP_TIMER_TYPE_SEND, stcb->sctp_ep,
3632 stcb, net);
3633 net->partial_bytes_acked = 0;
3634 net->flight_size = 0;
3635 }
3636 asoc->total_flight = 0;
3637 asoc->total_flight_count = 0;
3638 return;
3639 }
3640 /*
3641 * We init netAckSz and netAckSz2 to 0. These are used to track 2
3642 * things. The total byte count acked is tracked in netAckSz AND
3643 * netAck2 is used to track the total bytes acked that are un-
3644 * amibguious and were never retransmitted. We track these on a
3645 * per destination address basis.
3646 */
3647 TAILQ_FOREACH(net, &asoc->nets, sctp_next) {
3648 net->prev_cwnd = net->cwnd;
3649 net->net_ack = 0;
3650 net->net_ack2 = 0;
3651 }
3652 /* process the new consecutive TSN first */
3653 tp1 = TAILQ_FIRST(&asoc->sent_queue);
3654 while (tp1) {
3655 if (compare_with_wrap(last_tsn, tp1->rec.data.TSN_seq,
3656 MAX_TSN) ||
3657 last_tsn == tp1->rec.data.TSN_seq) {
3658 if (tp1->sent != SCTP_DATAGRAM_UNSENT) {
3659 /* ECN Nonce: Add the nonce to the sender's nonce sum */
3660 asoc->nonce_sum_expect_base += tp1->rec.data.ect_nonce;
3661 accum_moved = 1;
3662 if (tp1->sent < SCTP_DATAGRAM_ACKED) {
3663 /*
3664 * If it is less than ACKED, it is now
3665 * no-longer in flight. Higher values
3666 * may occur during marking
3667 */
3668 if ((tp1->whoTo->dest_state &
3669 SCTP_ADDR_UNCONFIRMED) &&
3670 (tp1->snd_count < 2) ) {
3671 /*
3672 * If there was no retran and
3673 * the address is un-confirmed
3674 * and we sent there and are
3675 * now sacked.. its confirmed,
3676 * mark it so.
3677 */
3678 tp1->whoTo->dest_state &=
3679 ~SCTP_ADDR_UNCONFIRMED;
3680 }
3681 sctp_flight_size_decrease(tp1);
3682 sctp_total_flight_decrease(stcb, tp1);
3683 tp1->whoTo->net_ack += tp1->send_size;
3684 if (tp1->snd_count < 2) {
3685 /* True non-retransmitted chunk */
3686 tp1->whoTo->net_ack2 +=
3687 tp1->send_size;
3688 /* update RTO too? */
3689 if (tp1->do_rtt) {
3690 tp1->whoTo->RTO =
3691 sctp_calculate_rto(stcb,
3692 asoc, tp1->whoTo,
3693 &tp1->sent_rcv_time);
3694 tp1->whoTo->rto_pending = 0;
3695 tp1->do_rtt = 0;
3696 }
3697 }
3698 }
3699 if (tp1->sent == SCTP_DATAGRAM_RESEND) {
3700 #ifdef SCTP_DEBUG
3701 if (sctp_debug_on & SCTP_DEBUG_INDATA3) {
3702 printf("Hmm. one that is in RESEND that is now ACKED\n");
3703 }
3704 #endif
3705 sctp_ucount_decr(asoc->sent_queue_retran_cnt);
3706 #ifdef SCTP_AUDITING_ENABLED
3707 sctp_audit_log(0xB3,
3708 (asoc->sent_queue_retran_cnt & 0x000000ff));
3709 #endif
3710
3711 }
3712 tp1->sent = SCTP_DATAGRAM_ACKED;
3713 }
3714 } else {
3715 break;
3716 }
3717 tp1 = TAILQ_NEXT(tp1, sctp_next);
3718 }
3719 /*******************************************/
3720 /* cancel ALL T3-send timer if accum moved */
3721 /*******************************************/
3722 if (accum_moved) {
3723 TAILQ_FOREACH(net, &asoc->nets, sctp_next) {
3724 sctp_timer_stop(SCTP_TIMER_TYPE_SEND, stcb->sctp_ep,
3725 stcb, net);
3726 }
3727 }
3728 biggest_tsn_newly_acked = biggest_tsn_acked = last_tsn;
3729 /* always set this up to cum-ack */
3730 asoc->this_sack_highest_gap = last_tsn;
3731
3732 if (num_seg * sizeof(struct sctp_gap_ack_block) + sizeof(struct sctp_sack_chunk) > sack_length) {
3733 /* skip corrupt segments */
3734 strike_enabled = 0;
3735 goto skip_segments;
3736 }
3737
3738 if (num_seg > 0) {
3739 if (asoc->primary_destination->dest_state &
3740 SCTP_ADDR_SWITCH_PRIMARY) {
3741 /* clear the nets CACC flags */
3742 TAILQ_FOREACH(net, &asoc->nets, sctp_next) {
3743 net->cacc_saw_newack = 0;
3744 }
3745 }
3746 /*
3747 * thisSackHighestGap will increase while handling NEW segments
3748 */
3749
3750 sctp_handle_segments(stcb, asoc, ch, last_tsn,
3751 &biggest_tsn_acked, &biggest_tsn_newly_acked,
3752 num_seg, &ecn_seg_sums);
3753
3754 if (sctp_strict_sacks) {
3755 /* validate the biggest_tsn_acked in the gap acks
3756 * if strict adherence is wanted.
3757 */
3758 if ((biggest_tsn_acked == send_s) ||
3759 (compare_with_wrap(biggest_tsn_acked, send_s, MAX_TSN))) {
3760 /*
3761 * peer is either confused or we are under
3762 * attack. We must abort.
3763 */
3764 goto hopeless_peer;
3765 }
3766 }
3767
3768 if (asoc->primary_destination->dest_state &
3769 SCTP_ADDR_SWITCH_PRIMARY) {
3770 /* clear the nets CACC flags */
3771 TAILQ_FOREACH(net, &asoc->nets, sctp_next) {
3772 if (net->cacc_saw_newack) {
3773 cnt_of_cacc++;
3774 }
3775 }
3776 }
3777
3778 }
3779
3780 if (cnt_of_cacc < 2) {
3781 strike_enabled = 1;
3782 } else {
3783 strike_enabled = 0;
3784 }
3785 skip_segments:
3786 /********************************************/
3787 /* drop the acked chunks from the sendqueue */
3788 /********************************************/
3789 asoc->last_acked_seq = cum_ack;
3790 if (asoc->primary_destination->dest_state & SCTP_ADDR_SWITCH_PRIMARY) {
3791 if ((cum_ack == asoc->primary_destination->next_tsn_at_change) ||
3792 (compare_with_wrap(cum_ack,
3793 asoc->primary_destination->next_tsn_at_change, MAX_TSN))) {
3794 struct sctp_nets *lnet;
3795 /* Turn off the switch flag for ALL addresses */
3796 TAILQ_FOREACH(lnet, &asoc->nets, sctp_next) {
3797 asoc->primary_destination->dest_state &=
3798 ~(SCTP_ADDR_SWITCH_PRIMARY|SCTP_ADDR_DOUBLE_SWITCH);
3799 }
3800 }
3801 }
3802 /* Drag along the t3 timeout point so we don't have a problem at wrap */
3803 if (marking_allowed) {
3804 asoc->t3timeout_highest_marked = cum_ack;
3805 }
3806 tp1 = TAILQ_FIRST(&asoc->sent_queue);
3807 do {
3808 if (compare_with_wrap(tp1->rec.data.TSN_seq, cum_ack,
3809 MAX_TSN)) {
3810 break;
3811 }
3812 if (tp1->sent == SCTP_DATAGRAM_UNSENT) {
3813 /* no more sent on list */
3814 break;
3815 }
3816 tp2 = TAILQ_NEXT(tp1, sctp_next);
3817 TAILQ_REMOVE(&asoc->sent_queue, tp1, sctp_next);
3818 if (tp1->data) {
3819 sctp_free_bufspace(stcb, asoc, tp1);
3820 #ifdef SCTP_DEBUG
3821 if (sctp_debug_on & SCTP_DEBUG_OUTPUT2) {
3822 printf("--total out:%lu total_mbuf_out:%lu\n",
3823 (u_long)asoc->total_output_queue_size,
3824 (u_long)asoc->total_output_mbuf_queue_size);
3825 }
3826 #endif
3827
3828 sctp_m_freem(tp1->data);
3829 if (tp1->flags & SCTP_PR_SCTP_BUFFER) {
3830 asoc->sent_queue_cnt_removeable--;
3831 }
3832
3833 }
3834 tp1->data = NULL;
3835 asoc->sent_queue_cnt--;
3836 sctp_free_remote_addr(tp1->whoTo);
3837 sctppcbinfo.ipi_count_chunk--;
3838 asoc->chunks_on_out_queue--;
3839
3840 if ((int)sctppcbinfo.ipi_count_chunk < 0) {
3841 panic("Chunk count is going negative");
3842 }
3843 SCTP_ZONE_FREE(sctppcbinfo.ipi_zone_chunk, tp1);
3844 sctppcbinfo.ipi_gencnt_chunk++;
3845 sctp_sowwakeup(stcb->sctp_ep, stcb->sctp_socket);
3846 tp1 = tp2;
3847 } while (tp1 != NULL);
3848
3849
3850 if (asoc->fast_retran_loss_recovery && accum_moved) {
3851 if (compare_with_wrap(asoc->last_acked_seq,
3852 asoc->fast_recovery_tsn, MAX_TSN) ||
3853 asoc->last_acked_seq == asoc->fast_recovery_tsn) {
3854 /* Setup so we will exit RFC2582 fast recovery */
3855 will_exit_fast_recovery = 1;
3856 }
3857 }
3858
3859 /* Check for revoked fragments if we hand
3860 * fragments in a previous segment. If we
3861 * had no previous fragments we cannot have
3862 * a revoke issue.
3863 */
3864 if (asoc->saw_sack_with_frags)
3865 sctp_check_for_revoked(asoc, cum_ack, biggest_tsn_acked);
3866
3867 if (num_seg)
3868 asoc->saw_sack_with_frags = 1;
3869 else
3870 asoc->saw_sack_with_frags = 0;
3871
3872 /******************************/
3873 /* update cwnd */
3874 /******************************/
3875 TAILQ_FOREACH(net, &asoc->nets, sctp_next) {
3876 /* if nothing was acked on this destination skip it */
3877 if (net->net_ack == 0)
3878 continue;
3879
3880 if (net->net_ack2 > 0) {
3881 /*
3882 * Karn's rule applies to clearing error count,
3883 * this is optional.
3884 */
3885 net->error_count = 0;
3886 if ((net->dest_state&SCTP_ADDR_NOT_REACHABLE) ==
3887 SCTP_ADDR_NOT_REACHABLE) {
3888 /* addr came good */
3889 net->dest_state &= ~SCTP_ADDR_NOT_REACHABLE;
3890 net->dest_state |= SCTP_ADDR_REACHABLE;
3891 sctp_ulp_notify(SCTP_NOTIFY_INTERFACE_UP, stcb,
3892 SCTP_RECEIVED_SACK, (void *)net);
3893 /* now was it the primary? if so restore */
3894 if (net->dest_state & SCTP_ADDR_WAS_PRIMARY) {
3895 sctp_set_primary_addr(stcb, (struct sockaddr *)NULL, net);
3896 }
3897 }
3898 }
3899
3900 if (asoc->fast_retran_loss_recovery &&
3901 will_exit_fast_recovery == 0) {
3902 /* If we are in loss recovery we skip any cwnd update */
3903 sctp_pegs[SCTP_CWND_SKIP]++;
3904 goto skip_cwnd_update;
3905 }
3906 if (accum_moved) {
3907 /* If the cumulative ack moved we can proceed */
3908 if (net->cwnd <= net->ssthresh) {
3909 /* We are in slow start */
3910 if (net->flight_size + net->net_ack >=
3911 net->cwnd ) {
3912 #ifdef SCTP_HIGH_SPEED
3913 sctp_hs_cwnd_increase(net);
3914 #else
3915 if (net->net_ack > net->mtu) {
3916 net->cwnd += net->mtu;
3917 #ifdef SCTP_CWND_LOGGING
3918 sctp_log_cwnd(net, net->mtu,
3919 SCTP_CWND_LOG_FROM_SS);
3920 #endif
3921
3922 } else {
3923 net->cwnd += net->net_ack;
3924 #ifdef SCTP_CWND_LOGGING
3925 sctp_log_cwnd(net, net->net_ack,
3926 SCTP_CWND_LOG_FROM_SS);
3927 #endif
3928
3929 }
3930 #endif
3931 sctp_pegs[SCTP_CWND_SS]++;
3932 } else {
3933 unsigned int dif;
3934 sctp_pegs[SCTP_CWND_NOUSE_SS]++;
3935 dif = net->cwnd - (net->flight_size +
3936 net->net_ack);
3937 #ifdef SCTP_CWND_LOGGING
3938 /* sctp_log_cwnd(net, net->net_ack,
3939 SCTP_CWND_LOG_NOADV_SS);*/
3940 #endif
3941 if (dif > sctp_pegs[SCTP_CWND_DIFF_SA]) {
3942 sctp_pegs[SCTP_CWND_DIFF_SA] =
3943 dif;
3944 sctp_pegs[SCTP_OQS_AT_SS] =
3945 asoc->total_output_queue_size;
3946 sctp_pegs[SCTP_SQQ_AT_SS] =
3947 asoc->sent_queue_cnt;
3948 sctp_pegs[SCTP_SQC_AT_SS] =
3949 asoc->send_queue_cnt;
3950 }
3951 }
3952 } else {
3953 /* We are in congestion avoidance */
3954 if (net->flight_size + net->net_ack >=
3955 net->cwnd) {
3956 /*
3957 * add to pba only if we had a cwnd's
3958 * worth (or so) in flight OR the
3959 * burst limit was applied.
3960 */
3961 net->partial_bytes_acked +=
3962 net->net_ack;
3963
3964 /*
3965 * Do we need to increase
3966 * (if pba is > cwnd)?
3967 */
3968 if (net->partial_bytes_acked >=
3969 net->cwnd) {
3970 if (net->cwnd <
3971 net->partial_bytes_acked) {
3972 net->partial_bytes_acked -=
3973 net->cwnd;
3974 } else {
3975 net->partial_bytes_acked =
3976 0;
3977 }
3978 net->cwnd += net->mtu;
3979 #ifdef SCTP_CWND_LOGGING
3980 sctp_log_cwnd(net, net->mtu,
3981 SCTP_CWND_LOG_FROM_CA);
3982 #endif
3983 sctp_pegs[SCTP_CWND_CA]++;
3984 }
3985 } else {
3986 unsigned int dif;
3987 sctp_pegs[SCTP_CWND_NOUSE_CA]++;
3988 #ifdef SCTP_CWND_LOGGING
3989 /* sctp_log_cwnd(net, net->net_ack,
3990 SCTP_CWND_LOG_NOADV_CA);
3991 */
3992 #endif
3993 dif = net->cwnd - (net->flight_size +
3994 net->net_ack);
3995 if (dif > sctp_pegs[SCTP_CWND_DIFF_CA]) {
3996 sctp_pegs[SCTP_CWND_DIFF_CA] =
3997 dif;
3998 sctp_pegs[SCTP_OQS_AT_CA] =
3999 asoc->total_output_queue_size;
4000 sctp_pegs[SCTP_SQQ_AT_CA] =
4001 asoc->sent_queue_cnt;
4002 sctp_pegs[SCTP_SQC_AT_CA] =
4003 asoc->send_queue_cnt;
4004
4005 }
4006
4007 }
4008 }
4009 } else {
4010 sctp_pegs[SCTP_CWND_NOCUM]++;
4011 }
4012 skip_cwnd_update:
4013 /*
4014 * NOW, according to Karn's rule do we need to restore the
4015 * RTO timer back? Check our net_ack2. If not set then we
4016 * have a ambiguity.. i.e. all data ack'd was sent to more
4017 * than one place.
4018 */
4019
4020 if (net->net_ack2) {
4021 /* restore any doubled timers */
4022 net->RTO = ((net->lastsa >> 2) + net->lastsv) >> 1;
4023 if (net->RTO < stcb->asoc.minrto) {
4024 net->RTO = stcb->asoc.minrto;
4025 }
4026 if (net->RTO > stcb->asoc.maxrto) {
4027 net->RTO = stcb->asoc.maxrto;
4028 }
4029 }
4030 if (net->cwnd > sctp_pegs[SCTP_MAX_CWND]) {
4031 sctp_pegs[SCTP_MAX_CWND] = net->cwnd;
4032 }
4033 }
4034 /**********************************/
4035 /* Now what about shutdown issues */
4036 /**********************************/
4037 some_on_streamwheel = 0;
4038 if (!TAILQ_EMPTY(&asoc->out_wheel)) {
4039 /* Check to see if some data queued */
4040 struct sctp_stream_out *outs;
4041 TAILQ_FOREACH(outs, &asoc->out_wheel, next_spoke) {
4042 if (!TAILQ_EMPTY(&outs->outqueue)) {
4043 some_on_streamwheel = 1;
4044 break;
4045 }
4046 }
4047 }
4048 if (TAILQ_EMPTY(&asoc->send_queue) && TAILQ_EMPTY(&asoc->sent_queue) &&
4049 some_on_streamwheel == 0) {
4050 /* nothing left on sendqueue.. consider done */
4051 /* stop all timers */
4052 #ifdef SCTP_LOG_RWND
4053 sctp_log_rwnd_set(SCTP_SET_PEER_RWND_VIA_SACK,
4054 asoc->peers_rwnd, 0, 0, a_rwnd);
4055 #endif
4056 asoc->peers_rwnd = a_rwnd;
4057 if (asoc->peers_rwnd < stcb->sctp_ep->sctp_ep.sctp_sws_sender) {
4058 /* SWS sender side engages */
4059 asoc->peers_rwnd = 0;
4060 }
4061 /* stop any timers */
4062 TAILQ_FOREACH(net, &asoc->nets, sctp_next) {
4063 sctp_timer_stop(SCTP_TIMER_TYPE_SEND, stcb->sctp_ep,
4064 stcb, net);
4065 net->flight_size = 0;
4066 net->partial_bytes_acked = 0;
4067 }
4068 asoc->total_flight = 0;
4069 asoc->total_flight_count = 0;
4070 /* clean up */
4071 if (asoc->state & SCTP_STATE_SHUTDOWN_PENDING) {
4072 asoc->state = SCTP_STATE_SHUTDOWN_SENT;
4073 #ifdef SCTP_DEBUG
4074 if (sctp_debug_on & SCTP_DEBUG_OUTPUT4) {
4075 printf("%s:%d sends a shutdown\n",
4076 __FILE__,
4077 __LINE__
4078 );
4079 }
4080 #endif
4081 sctp_send_shutdown(stcb,
4082 stcb->asoc.primary_destination);
4083 sctp_timer_start(SCTP_TIMER_TYPE_SHUTDOWN,
4084 stcb->sctp_ep, stcb, asoc->primary_destination);
4085 sctp_timer_start(SCTP_TIMER_TYPE_SHUTDOWNGUARD,
4086 stcb->sctp_ep, stcb, asoc->primary_destination);
4087 } else if (SCTP_GET_STATE(asoc) == SCTP_STATE_SHUTDOWN_RECEIVED) {
4088 asoc->state = SCTP_STATE_SHUTDOWN_ACK_SENT;
4089
4090 sctp_send_shutdown_ack(stcb,
4091 stcb->asoc.primary_destination);
4092
4093 sctp_timer_start(SCTP_TIMER_TYPE_SHUTDOWNACK,
4094 stcb->sctp_ep, stcb, asoc->primary_destination);
4095 }
4096 return;
4097 }
4098 /*
4099 * Now here we are going to recycle net_ack for a different
4100 * use... HEADS UP.
4101 */
4102 TAILQ_FOREACH(net, &asoc->nets, sctp_next) {
4103 net->net_ack = 0;
4104 }
4105 if ((num_seg > 0) && marking_allowed) {
4106 sctp_strike_gap_ack_chunks(stcb, asoc, biggest_tsn_acked,
4107 strike_enabled, biggest_tsn_newly_acked, accum_moved);
4108 }
4109
4110 /*********************************************/
4111 /* Here we perform PR-SCTP procedures */
4112 /* (section 4.2) */
4113 /*********************************************/
4114 /* C1. update advancedPeerAckPoint */
4115 if (compare_with_wrap(cum_ack, asoc->advanced_peer_ack_point, MAX_TSN)) {
4116 asoc->advanced_peer_ack_point = cum_ack;
4117 }
4118 /* C2. try to further move advancedPeerAckPoint ahead */
4119 if (asoc->peer_supports_prsctp) {
4120 struct sctp_tmit_chunk *lchk;
4121 lchk = sctp_try_advance_peer_ack_point(stcb, asoc);
4122 /* C3. See if we need to send a Fwd-TSN */
4123 if (compare_with_wrap(asoc->advanced_peer_ack_point, cum_ack,
4124 MAX_TSN)) {
4125 /*
4126 * ISSUE with ECN, see FWD-TSN processing for notes
4127 * on issues that will occur when the ECN NONCE stuff
4128 * is put into SCTP for cross checking.
4129 */
4130 send_forward_tsn(stcb, asoc);
4131
4132 /* ECN Nonce: Disable Nonce Sum check when FWD TSN is sent and store resync tsn*/
4133 asoc->nonce_sum_check = 0;
4134 asoc->nonce_resync_tsn = asoc->advanced_peer_ack_point;
4135 if (lchk) {
4136 /* Assure a timer is up */
4137 sctp_timer_start(SCTP_TIMER_TYPE_SEND,
4138 stcb->sctp_ep, stcb, lchk->whoTo);
4139 }
4140 }
4141 }
4142 TAILQ_FOREACH(net, &asoc->nets, sctp_next) {
4143 if (asoc->fast_retran_loss_recovery == 0) {
4144 /* out of a RFC2582 Fast recovery window? */
4145 if (net->net_ack > 0) {
4146 /*
4147 * per section 7.2.3, are there
4148 * any destinations that had a fast
4149 * retransmit to them. If so what we
4150 * need to do is adjust ssthresh and
4151 * cwnd.
4152 */
4153 struct sctp_tmit_chunk *lchk;
4154 #ifdef SCTP_HIGH_SPEED
4155 sctp_hs_cwnd_decrease(net);
4156 #else
4157 #ifdef SCTP_CWND_LOGGING
4158 int old_cwnd = net->cwnd;
4159 #endif
4160 net->ssthresh = net->cwnd / 2;
4161 if (net->ssthresh < (net->mtu*2)) {
4162 net->ssthresh = 2 * net->mtu;
4163 }
4164 net->cwnd = net->ssthresh;
4165 #ifdef SCTP_CWND_LOGGING
4166 sctp_log_cwnd(net, (net->cwnd-old_cwnd),
4167 SCTP_CWND_LOG_FROM_FR);
4168 #endif
4169 #endif
4170
4171 lchk = TAILQ_FIRST(&asoc->send_queue);
4172
4173 net->partial_bytes_acked = 0;
4174 /* Turn on fast recovery window */
4175 asoc->fast_retran_loss_recovery = 1;
4176 if (lchk == NULL) {
4177 /* Mark end of the window */
4178 asoc->fast_recovery_tsn = asoc->sending_seq - 1;
4179 } else {
4180 asoc->fast_recovery_tsn = lchk->rec.data.TSN_seq - 1;
4181 }
4182
4183
4184 /* Disable Nonce Sum Checking and store the resync tsn*/
4185 asoc->nonce_sum_check = 0;
4186 asoc->nonce_resync_tsn = asoc->fast_recovery_tsn + 1;
4187
4188 sctp_timer_stop(SCTP_TIMER_TYPE_SEND,
4189 stcb->sctp_ep, stcb, net);
4190 sctp_timer_start(SCTP_TIMER_TYPE_SEND,
4191 stcb->sctp_ep, stcb, net);
4192 }
4193 } else if (net->net_ack > 0) {
4194 /*
4195 * Mark a peg that we WOULD have done a cwnd reduction
4196 * but RFC2582 prevented this action.
4197 */
4198 sctp_pegs[SCTP_FR_INAWINDOW]++;
4199 }
4200 }
4201
4202
4203 /******************************************************************
4204 * Here we do the stuff with ECN Nonce checking.
4205 * We basically check to see if the nonce sum flag was incorrect
4206 * or if resynchronization needs to be done. Also if we catch a
4207 * misbehaving receiver we give him the kick.
4208 ******************************************************************/
4209
4210 if (asoc->ecn_nonce_allowed) {
4211 if (asoc->nonce_sum_check) {
4212 if (nonce_sum_flag != ((asoc->nonce_sum_expect_base + ecn_seg_sums) & SCTP_SACK_NONCE_SUM)) {
4213 if (asoc->nonce_wait_for_ecne == 0) {
4214 struct sctp_tmit_chunk *lchk;
4215 lchk = TAILQ_FIRST(&asoc->send_queue);
4216 asoc->nonce_wait_for_ecne = 1;
4217 if (lchk) {
4218 asoc->nonce_wait_tsn = lchk->rec.data.TSN_seq;
4219 } else {
4220 asoc->nonce_wait_tsn = asoc->sending_seq;
4221 }
4222 } else {
4223 if (compare_with_wrap(asoc->last_acked_seq, asoc->nonce_wait_tsn, MAX_TSN) ||
4224 (asoc->last_acked_seq == asoc->nonce_wait_tsn)) {
4225 /* Misbehaving peer. We need to react to this guy */
4226 printf("Mis-behaving peer detected\n");
4227 asoc->ecn_allowed = 0;
4228 asoc->ecn_nonce_allowed = 0;
4229 }
4230 }
4231 }
4232 } else {
4233 /* See if Resynchronization Possible */
4234 if (compare_with_wrap(asoc->last_acked_seq, asoc->nonce_resync_tsn, MAX_TSN)) {
4235 asoc->nonce_sum_check = 1;
4236 /* now we must calculate what the base
4237 * is. We do this based on two things, we know
4238 * the total's for all the segments gap-acked
4239 * in the SACK, its stored in ecn_seg_sums.
4240 * We also know the SACK's nonce sum, its
4241 * in nonce_sum_flag. So we can build a truth
4242 * table to back-calculate the new value of asoc->nonce_sum_expect_base:
4243 *
4244 * SACK-flag-Value Seg-Sums Base
4245 * 0 0 0
4246 * 1 0 1
4247 * 0 1 1
4248 * 1 1 0
4249 */
4250 asoc->nonce_sum_expect_base = (ecn_seg_sums ^ nonce_sum_flag) & SCTP_SACK_NONCE_SUM;
4251 }
4252 }
4253 }
4254 /* Now are we exiting loss recovery ? */
4255 if (will_exit_fast_recovery) {
4256 /* Ok, we must exit fast recovery */
4257 asoc->fast_retran_loss_recovery = 0;
4258 }
4259 if ((asoc->sat_t3_loss_recovery) &&
4260 ((compare_with_wrap(asoc->last_acked_seq, asoc->sat_t3_recovery_tsn,
4261 MAX_TSN) ||
4262 (asoc->last_acked_seq == asoc->sat_t3_recovery_tsn)))) {
4263 /* end satellite t3 loss recovery */
4264 asoc->sat_t3_loss_recovery = 0;
4265 }
4266 /* Adjust and set the new rwnd value */
4267 #ifdef SCTP_LOG_RWND
4268 sctp_log_rwnd_set(SCTP_SET_PEER_RWND_VIA_SACK,
4269 asoc->peers_rwnd, asoc->total_flight, (asoc->sent_queue_cnt * sctp_peer_chunk_oh), a_rwnd);
4270 #endif
4271
4272 asoc->peers_rwnd = sctp_sbspace_sub(a_rwnd,
4273 (u_int32_t)(asoc->total_flight + (asoc->sent_queue_cnt * sctp_peer_chunk_oh)));
4274 if (asoc->peers_rwnd < stcb->sctp_ep->sctp_ep.sctp_sws_sender) {
4275 /* SWS sender side engages */
4276 asoc->peers_rwnd = 0;
4277 }
4278 /*
4279 * Now we must setup so we have a timer up for anyone with
4280 * outstanding data.
4281 */
4282 TAILQ_FOREACH(net, &asoc->nets, sctp_next) {
4283 struct sctp_tmit_chunk *chk;
4284 TAILQ_FOREACH(chk, &asoc->sent_queue, sctp_next) {
4285 if (chk->whoTo == net &&
4286 (chk->sent < SCTP_DATAGRAM_ACKED ||
4287 chk->sent == SCTP_FORWARD_TSN_SKIP)) {
4288 /*
4289 * Not ack'ed and still outstanding to this
4290 * destination or marked and must be
4291 * sacked after fwd-tsn sent.
4292 */
4293 sctp_timer_start(SCTP_TIMER_TYPE_SEND,
4294 stcb->sctp_ep, stcb, net);
4295 break;
4296 }
4297 }
4298 }
4299 }
4300
4301 void
4302 sctp_update_acked(struct sctp_tcb *stcb, struct sctp_shutdown_chunk *cp,
4303 struct sctp_nets *netp, int *abort_flag)
4304 {
4305 /* Mutate a shutdown into a SACK */
4306 struct sctp_sack_chunk sack;
4307
4308 /* Copy cum-ack */
4309 sack.sack.cum_tsn_ack = cp->cumulative_tsn_ack;
4310 /* Arrange so a_rwnd does NOT change */
4311 sack.ch.chunk_type = SCTP_SELECTIVE_ACK;
4312 sack.ch.chunk_flags = 0;
4313 sack.ch.chunk_length = ntohs(sizeof(struct sctp_sack_chunk));
4314 sack.sack.a_rwnd =
4315 htonl(stcb->asoc.peers_rwnd + stcb->asoc.total_flight);
4316 /*
4317 * no gaps in this one. This may cause a temporal view to reneging,
4318 * but hopefully the second chunk is a true SACK in the packet and
4319 * will correct this view. One will come soon after no matter what
4320 * to fix this.
4321 */
4322 sack.sack.num_gap_ack_blks = 0;
4323 sack.sack.num_dup_tsns = 0;
4324 /* Now call the SACK processor */
4325 sctp_handle_sack(&sack, stcb, netp, abort_flag);
4326 }
4327
4328 static void
4329 sctp_kick_prsctp_reorder_queue(struct sctp_tcb *stcb,
4330 struct sctp_stream_in *strmin)
4331 {
4332 struct sctp_tmit_chunk *chk, *nchk;
4333 struct sctp_association *asoc;
4334 int tt;
4335
4336 asoc = &stcb->asoc;
4337 tt = strmin->last_sequence_delivered;
4338 /*
4339 * First deliver anything prior to and including the stream no that
4340 * came in
4341 */
4342 chk = TAILQ_FIRST(&strmin->inqueue);
4343 while (chk) {
4344 nchk = TAILQ_NEXT(chk, sctp_next);
4345 if (compare_with_wrap(tt, chk->rec.data.stream_seq, MAX_SEQ) ||
4346 (tt == chk->rec.data.stream_seq)) {
4347 /* this is deliverable now */
4348 TAILQ_REMOVE(&strmin->inqueue, chk, sctp_next);
4349 /* subtract pending on streams */
4350 asoc->size_on_all_streams -= chk->send_size;
4351 asoc->cnt_on_all_streams--;
4352 /* deliver it to at least the delivery-q */
4353 sctp_deliver_data(stcb, &stcb->asoc, chk, 0);
4354 } else {
4355 /* no more delivery now. */
4356 break;
4357 }
4358 chk = nchk;
4359 }
4360 /*
4361 * now we must deliver things in queue the normal way if any
4362 * are now ready.
4363 */
4364 tt = strmin->last_sequence_delivered + 1;
4365 chk = TAILQ_FIRST(&strmin->inqueue);
4366 while (chk) {
4367 nchk = TAILQ_NEXT(chk, sctp_next);
4368 if (tt == chk->rec.data.stream_seq) {
4369 /* this is deliverable now */
4370 TAILQ_REMOVE(&strmin->inqueue, chk, sctp_next);
4371 /* subtract pending on streams */
4372 asoc->size_on_all_streams -= chk->send_size;
4373 asoc->cnt_on_all_streams--;
4374 /* deliver it to at least the delivery-q */
4375 strmin->last_sequence_delivered =
4376 chk->rec.data.stream_seq;
4377 sctp_deliver_data(stcb, &stcb->asoc, chk, 0);
4378 tt = strmin->last_sequence_delivered + 1;
4379 } else {
4380 break;
4381 }
4382 chk = nchk;
4383 }
4384
4385 }
4386
4387 void
4388 sctp_handle_forward_tsn(struct sctp_tcb *stcb,
4389 struct sctp_forward_tsn_chunk *fwd, int *abort_flag)
4390 {
4391 /*
4392 * ISSUES that MUST be fixed for ECN! When we are the
4393 * sender of the forward TSN, when the SACK comes back
4394 * that acknowledges the FWD-TSN we must reset the
4395 * NONCE sum to match correctly. This will get quite
4396 * tricky since we may have sent more data interveneing and
4397 * must carefully account for what the SACK says on the
4398 * nonce and any gaps that are reported. This work
4399 * will NOT be done here, but I note it here since
4400 * it is really related to PR-SCTP and FWD-TSN's
4401 */
4402
4403 /* The pr-sctp fwd tsn */
4404 /*
4405 * here we will perform all the data receiver side steps for
4406 * processing FwdTSN, as required in by pr-sctp draft:
4407 *
4408 * Assume we get FwdTSN(x):
4409 *
4410 * 1) update local cumTSN to x
4411 * 2) try to further advance cumTSN to x + others we have
4412 * 3) examine and update re-ordering queue on pr-in-streams
4413 * 4) clean up re-assembly queue
4414 * 5) Send a sack to report where we are.
4415 */
4416 struct sctp_strseq *stseq;
4417 struct sctp_association *asoc;
4418 u_int32_t new_cum_tsn, gap, back_out_htsn;
4419 unsigned int i, cnt_gone, fwd_sz, cumack_set_flag, m_size;
4420 struct sctp_stream_in *strm;
4421 struct sctp_tmit_chunk *chk, *at;
4422
4423 cumack_set_flag = 0;
4424 asoc = &stcb->asoc;
4425 cnt_gone = 0;
4426 if ((fwd_sz = ntohs(fwd->ch.chunk_length)) < sizeof(struct sctp_forward_tsn_chunk)) {
4427 #ifdef SCTP_DEBUG
4428 if (sctp_debug_on & SCTP_DEBUG_INDATA1) {
4429 printf("Bad size too small/big fwd-tsn\n");
4430 }
4431 #endif
4432 return;
4433 }
4434 m_size = (stcb->asoc.mapping_array_size << 3);
4435 /*************************************************************/
4436 /* 1. Here we update local cumTSN and shift the bitmap array */
4437 /*************************************************************/
4438 new_cum_tsn = ntohl(fwd->new_cumulative_tsn);
4439
4440 if (compare_with_wrap(asoc->cumulative_tsn, new_cum_tsn, MAX_TSN) ||
4441 asoc->cumulative_tsn == new_cum_tsn) {
4442 /* Already got there ... */
4443 return;
4444 }
4445
4446 back_out_htsn = asoc->highest_tsn_inside_map;
4447 if (compare_with_wrap(new_cum_tsn, asoc->highest_tsn_inside_map,
4448 MAX_TSN)) {
4449 asoc->highest_tsn_inside_map = new_cum_tsn;
4450 #ifdef SCTP_MAP_LOGGING
4451 sctp_log_map(0, 0, asoc->highest_tsn_inside_map, SCTP_MAP_SLIDE_RESULT);
4452 #endif
4453 }
4454 /*
4455 * now we know the new TSN is more advanced, let's find the
4456 * actual gap
4457 */
4458 if ((compare_with_wrap(new_cum_tsn, asoc->mapping_array_base_tsn,
4459 MAX_TSN)) ||
4460 (new_cum_tsn == asoc->mapping_array_base_tsn)) {
4461 gap = new_cum_tsn - asoc->mapping_array_base_tsn;
4462 } else {
4463 /* try to prevent underflow here */
4464 gap = new_cum_tsn + (MAX_TSN - asoc->mapping_array_base_tsn) + 1;
4465 }
4466
4467 if (gap >= m_size) {
4468 asoc->highest_tsn_inside_map = back_out_htsn;
4469 if ((long)gap > sctp_sbspace(&stcb->sctp_socket->so_rcv)) {
4470 /*
4471 * out of range (of single byte chunks in the rwnd I
4472 * give out)
4473 * too questionable. better to drop it silently
4474 */
4475 return;
4476 }
4477 if (asoc->highest_tsn_inside_map >
4478 asoc->mapping_array_base_tsn) {
4479 gap = asoc->highest_tsn_inside_map -
4480 asoc->mapping_array_base_tsn;
4481 } else {
4482 gap = asoc->highest_tsn_inside_map +
4483 (MAX_TSN - asoc->mapping_array_base_tsn) + 1;
4484 }
4485 cumack_set_flag = 1;
4486 }
4487 for (i = 0; i <= gap; i++) {
4488 SCTP_SET_TSN_PRESENT(asoc->mapping_array, i);
4489 }
4490 /*
4491 * Now after marking all, slide thing forward but no
4492 * sack please.
4493 */
4494 sctp_sack_check(stcb, 0, 0, abort_flag);
4495 if (*abort_flag)
4496 return;
4497
4498 if (cumack_set_flag) {
4499 /*
4500 * fwd-tsn went outside my gap array - not a
4501 * common occurrence. Do the same thing we
4502 * do when a cookie-echo arrives.
4503 */
4504 asoc->highest_tsn_inside_map = new_cum_tsn - 1;
4505 asoc->mapping_array_base_tsn = new_cum_tsn;
4506 asoc->cumulative_tsn = asoc->highest_tsn_inside_map;
4507 #ifdef SCTP_MAP_LOGGING
4508 sctp_log_map(0, 3, asoc->highest_tsn_inside_map, SCTP_MAP_SLIDE_RESULT);
4509 #endif
4510 asoc->last_echo_tsn = asoc->highest_tsn_inside_map;
4511 }
4512 /*************************************************************/
4513 /* 2. Clear up re-assembly queue */
4514 /*************************************************************/
4515
4516 /*
4517 * First service it if pd-api is up, just in case we can
4518 * progress it forward
4519 */
4520 if (asoc->fragmented_delivery_inprogress) {
4521 sctp_service_reassembly(stcb, asoc, 0);
4522 }
4523 if (!TAILQ_EMPTY(&asoc->reasmqueue)) {
4524 /* For each one on here see if we need to toss it */
4525 /*
4526 * For now large messages held on the reasmqueue that are
4527 * complete will be tossed too. We could in theory do more
4528 * work to spin through and stop after dumping one msg
4529 * aka seeing the start of a new msg at the head, and call
4530 * the delivery function... to see if it can be delivered...
4531 * But for now we just dump everything on the queue.
4532 */
4533 chk = TAILQ_FIRST(&asoc->reasmqueue);
4534 while (chk) {
4535 at = TAILQ_NEXT(chk, sctp_next);
4536 if (compare_with_wrap(asoc->cumulative_tsn,
4537 chk->rec.data.TSN_seq, MAX_TSN) ||
4538 asoc->cumulative_tsn == chk->rec.data.TSN_seq) {
4539 /* It needs to be tossed */
4540 TAILQ_REMOVE(&asoc->reasmqueue, chk, sctp_next);
4541 if (compare_with_wrap(chk->rec.data.TSN_seq,
4542 asoc->tsn_last_delivered, MAX_TSN)) {
4543 asoc->tsn_last_delivered =
4544 chk->rec.data.TSN_seq;
4545 asoc->str_of_pdapi =
4546 chk->rec.data.stream_number;
4547 asoc->ssn_of_pdapi =
4548 chk->rec.data.stream_seq;
4549 asoc->fragment_flags =
4550 chk->rec.data.rcv_flags;
4551 }
4552 asoc->size_on_reasm_queue -= chk->send_size;
4553 asoc->cnt_on_reasm_queue--;
4554 cnt_gone++;
4555
4556 /* Clear up any stream problem */
4557 if ((chk->rec.data.rcv_flags & SCTP_DATA_UNORDERED) !=
4558 SCTP_DATA_UNORDERED &&
4559 (compare_with_wrap(chk->rec.data.stream_seq,
4560 asoc->strmin[chk->rec.data.stream_number].last_sequence_delivered,
4561 MAX_SEQ))) {
4562 /*
4563 * We must dump forward this streams
4564 * sequence number if the chunk is not
4565 * unordered that is being skipped.
4566 * There is a chance that if the peer
4567 * does not include the last fragment
4568 * in its FWD-TSN we WILL have a problem
4569 * here since you would have a partial
4570 * chunk in queue that may not be
4571 * deliverable.
4572 * Also if a Partial delivery API as
4573 * started the user may get a partial
4574 * chunk. The next read returning a new
4575 * chunk... really ugly but I see no way
4576 * around it! Maybe a notify??
4577 */
4578 asoc->strmin[chk->rec.data.stream_number].last_sequence_delivered =
4579 chk->rec.data.stream_seq;
4580 }
4581 sctp_m_freem(chk->data);
4582 chk->data = NULL;
4583 sctp_free_remote_addr(chk->whoTo);
4584 SCTP_ZONE_FREE(sctppcbinfo.ipi_zone_chunk, chk);
4585 sctppcbinfo.ipi_count_chunk--;
4586 if ((int)sctppcbinfo.ipi_count_chunk < 0) {
4587 panic("Chunk count is negative");
4588 }
4589 sctppcbinfo.ipi_gencnt_chunk++;
4590 } else {
4591 /*
4592 * Ok we have gone beyond the end of the
4593 * fwd-tsn's mark. Some checks...
4594 */
4595 if ((asoc->fragmented_delivery_inprogress) &&
4596 (chk->rec.data.rcv_flags & SCTP_DATA_FIRST_FRAG)) {
4597 /* Special case PD-API is up and what we fwd-tsn'
4598 * over includes one that had the LAST_FRAG. We
4599 * no longer need to do the PD-API.
4600 */
4601 asoc->fragmented_delivery_inprogress = 0;
4602 sctp_ulp_notify(SCTP_NOTIFY_PARTIAL_DELVIERY_INDICATION,
4603 stcb, SCTP_PARTIAL_DELIVERY_ABORTED, (void *)NULL);
4604
4605 }
4606 break;
4607 }
4608 chk = at;
4609 }
4610 }
4611 if (asoc->fragmented_delivery_inprogress) {
4612 /*
4613 * Ok we removed cnt_gone chunks in the PD-API queue that
4614 * were being delivered. So now we must turn off the
4615 * flag.
4616 */
4617 sctp_ulp_notify(SCTP_NOTIFY_PARTIAL_DELVIERY_INDICATION,
4618 stcb, SCTP_PARTIAL_DELIVERY_ABORTED, (void *)NULL);
4619 asoc->fragmented_delivery_inprogress = 0;
4620 }
4621 /*************************************************************/
4622 /* 3. Update the PR-stream re-ordering queues */
4623 /*************************************************************/
4624 stseq = (struct sctp_strseq *)((vaddr_t)fwd + sizeof(*fwd));
4625 fwd_sz -= sizeof(*fwd);
4626 {
4627 /* New method. */
4628 int num_str;
4629 num_str = fwd_sz/sizeof(struct sctp_strseq);
4630 #ifdef SCTP_DEBUG
4631 if (sctp_debug_on & SCTP_DEBUG_INDATA1) {
4632 printf("Using NEW method, %d strseq's reported in FWD-TSN\n",
4633 num_str);
4634 }
4635 #endif
4636 for (i = 0; i < num_str; i++) {
4637 u_int16_t st;
4638 #if 0
4639 unsigned char *xx;
4640 /* Convert */
4641 xx = (unsigned char *)&stseq[i];
4642 #endif
4643 st = ntohs(stseq[i].stream);
4644 stseq[i].stream = st;
4645 st = ntohs(stseq[i].sequence);
4646 stseq[i].sequence = st;
4647 /* now process */
4648 if (stseq[i].stream > asoc->streamincnt) {
4649 #ifdef SCTP_DEBUG
4650 if (sctp_debug_on & SCTP_DEBUG_INDATA1) {
4651 printf("Bogus stream number %d "
4652 "streamincnt is %d\n",
4653 stseq[i].stream, asoc->streamincnt);
4654 }
4655 #endif
4656 /*
4657 * It is arguable if we should continue. Since
4658 * the peer sent bogus stream info we may be in
4659 * deep trouble..
4660 * a return may be a better choice?
4661 */
4662 continue;
4663 }
4664 strm = &asoc->strmin[stseq[i].stream];
4665 if (compare_with_wrap(stseq[i].sequence,
4666 strm->last_sequence_delivered, MAX_SEQ)) {
4667 /* Update the sequence number */
4668 strm->last_sequence_delivered =
4669 stseq[i].sequence;
4670 }
4671 /* now kick the stream the new way */
4672 sctp_kick_prsctp_reorder_queue(stcb, strm);
4673 }
4674 }
4675 }
4676