1 1.18 andvar /* $NetBSD: sctp_indata.c,v 1.18 2025/04/17 18:53:23 andvar Exp $ */ 2 1.1 rjs /* $KAME: sctp_indata.c,v 1.36 2005/03/06 16:04:17 itojun Exp $ */ 3 1.1 rjs 4 1.1 rjs /* 5 1.1 rjs * Copyright (C) 2002, 2003, 2004 Cisco Systems Inc, 6 1.1 rjs * All rights reserved. 7 1.1 rjs * 8 1.1 rjs * Redistribution and use in source and binary forms, with or without 9 1.1 rjs * modification, are permitted provided that the following conditions 10 1.1 rjs * are met: 11 1.1 rjs * 1. Redistributions of source code must retain the above copyright 12 1.1 rjs * notice, this list of conditions and the following disclaimer. 13 1.1 rjs * 2. Redistributions in binary form must reproduce the above copyright 14 1.1 rjs * notice, this list of conditions and the following disclaimer in the 15 1.1 rjs * documentation and/or other materials provided with the distribution. 16 1.1 rjs * 3. Neither the name of the project nor the names of its contributors 17 1.1 rjs * may be used to endorse or promote products derived from this software 18 1.1 rjs * without specific prior written permission. 19 1.1 rjs * 20 1.1 rjs * THIS SOFTWARE IS PROVIDED BY THE PROJECT AND CONTRIBUTORS ``AS IS'' AND 21 1.1 rjs * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE 22 1.1 rjs * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE 23 1.1 rjs * ARE DISCLAIMED. IN NO EVENT SHALL THE PROJECT OR CONTRIBUTORS BE LIABLE 24 1.1 rjs * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL 25 1.1 rjs * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS 26 1.1 rjs * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) 27 1.1 rjs * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT 28 1.1 rjs * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY 29 1.1 rjs * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF 30 1.1 rjs * SUCH DAMAGE. 31 1.1 rjs */ 32 1.1 rjs 33 1.1 rjs #include <sys/cdefs.h> 34 1.18 andvar __KERNEL_RCSID(0, "$NetBSD: sctp_indata.c,v 1.18 2025/04/17 18:53:23 andvar Exp $"); 35 1.1 rjs 36 1.1 rjs #ifdef _KERNEL_OPT 37 1.1 rjs #include "opt_ipsec.h" 38 1.1 rjs #include "opt_inet.h" 39 1.1 rjs #include "opt_sctp.h" 40 1.1 rjs #endif /* _KERNEL_OPT */ 41 1.1 rjs 42 1.1 rjs #include <sys/param.h> 43 1.1 rjs #include <sys/systm.h> 44 1.1 rjs #include <sys/mbuf.h> 45 1.1 rjs #include <sys/malloc.h> 46 1.1 rjs #include <sys/socket.h> 47 1.1 rjs #include <sys/socketvar.h> 48 1.1 rjs #include <sys/sysctl.h> 49 1.1 rjs 50 1.1 rjs #include <net/if.h> 51 1.1 rjs #include <net/route.h> 52 1.1 rjs 53 1.1 rjs 54 1.1 rjs #if defined(__FreeBSD__) && __FreeBSD_version >= 500000 55 1.1 rjs #include <sys/limits.h> 56 1.1 rjs #else 57 1.1 rjs #include <machine/limits.h> 58 1.1 rjs #endif 59 1.1 rjs #include <machine/cpu.h> 60 1.1 rjs 61 1.1 rjs #include <netinet/in.h> 62 1.1 rjs #include <netinet/in_systm.h> 63 1.1 rjs #include <netinet/ip.h> 64 1.1 rjs #ifdef INET6 65 1.1 rjs #include <netinet/ip6.h> 66 1.1 rjs #endif /* INET6 */ 67 1.1 rjs #include <netinet/in_pcb.h> 68 1.1 rjs #include <netinet/in_var.h> 69 1.1 rjs #include <netinet/ip_var.h> 70 1.1 rjs #ifdef INET6 71 1.1 rjs #include <netinet6/ip6_var.h> 72 1.1 rjs #endif /* INET6 */ 73 1.1 rjs #include <netinet/ip_icmp.h> 74 1.1 rjs #include <netinet/icmp_var.h> 75 1.1 rjs #include <netinet/sctp_var.h> 76 1.1 rjs #include <netinet/sctp_pcb.h> 77 1.1 rjs #include <netinet/sctp_header.h> 78 1.1 rjs #include <netinet/sctputil.h> 79 1.1 rjs #include <netinet/sctp_output.h> 80 1.1 rjs #include <netinet/sctp_input.h> 81 1.1 rjs #include <netinet/sctp_hashdriver.h> 82 1.1 rjs #include <netinet/sctp_indata.h> 83 1.1 rjs #include <netinet/sctp_uio.h> 84 1.1 rjs #include <netinet/sctp_timer.h> 85 1.1 rjs #ifdef IPSEC 86 1.4 rjs #include <netipsec/ipsec.h> 87 1.4 rjs #include <netipsec/key.h> 88 1.1 rjs #endif /*IPSEC*/ 89 1.1 rjs 90 1.1 rjs #ifdef SCTP_DEBUG 91 1.1 rjs extern u_int32_t sctp_debug_on; 92 1.1 rjs #endif 93 1.1 rjs 94 1.1 rjs /* 95 1.1 rjs * NOTES: On the outbound side of things I need to check the sack timer to 96 1.1 rjs * see if I should generate a sack into the chunk queue (if I have data to 97 1.1 rjs * send that is and will be sending it .. for bundling. 98 1.1 rjs * 99 1.1 rjs * The callback in sctp_usrreq.c will get called when the socket is read 100 1.1 rjs * from. This will cause sctp_service_queues() to get called on the top 101 1.1 rjs * entry in the list. 102 1.1 rjs */ 103 1.1 rjs 104 1.1 rjs extern int sctp_strict_sacks; 105 1.1 rjs 106 1.1 rjs void 107 1.1 rjs sctp_set_rwnd(struct sctp_tcb *stcb, struct sctp_association *asoc) 108 1.1 rjs { 109 1.1 rjs u_int32_t calc, calc_w_oh; 110 1.1 rjs 111 1.1 rjs #ifdef SCTP_DEBUG 112 1.1 rjs if (sctp_debug_on & SCTP_DEBUG_INDATA4) { 113 1.1 rjs printf("cc:%lu hiwat:%lu lowat:%lu mbcnt:%lu mbmax:%lu\n", 114 1.1 rjs (u_long)stcb->sctp_socket->so_rcv.sb_cc, 115 1.1 rjs (u_long)stcb->sctp_socket->so_rcv.sb_hiwat, 116 1.1 rjs (u_long)stcb->sctp_socket->so_rcv.sb_lowat, 117 1.1 rjs (u_long)stcb->sctp_socket->so_rcv.sb_mbcnt, 118 1.1 rjs (u_long)stcb->sctp_socket->so_rcv.sb_mbmax); 119 1.1 rjs printf("Setting rwnd to: sb:%ld - (del:%d + reasm:%d str:%d)\n", 120 1.1 rjs sctp_sbspace(&stcb->sctp_socket->so_rcv), 121 1.1 rjs asoc->size_on_delivery_queue, 122 1.1 rjs asoc->size_on_reasm_queue, 123 1.1 rjs asoc->size_on_all_streams); 124 1.1 rjs } 125 1.1 rjs #endif 126 1.1 rjs if (stcb->sctp_socket->so_rcv.sb_cc == 0 && 127 1.1 rjs asoc->size_on_delivery_queue == 0 && 128 1.1 rjs asoc->size_on_reasm_queue == 0 && 129 1.1 rjs asoc->size_on_all_streams == 0) { 130 1.1 rjs /* Full rwnd granted */ 131 1.7 riastrad asoc->my_rwnd = uimax(stcb->sctp_socket->so_rcv.sb_hiwat, 132 1.1 rjs SCTP_MINIMAL_RWND); 133 1.1 rjs return; 134 1.1 rjs } 135 1.1 rjs /* get actual space */ 136 1.1 rjs calc = (u_int32_t)sctp_sbspace(&stcb->sctp_socket->so_rcv); 137 1.1 rjs 138 1.1 rjs /* take out what has NOT been put on socket queue and 139 1.1 rjs * we yet hold for putting up. 140 1.1 rjs */ 141 1.1 rjs calc = sctp_sbspace_sub(calc, (u_int32_t)asoc->size_on_delivery_queue); 142 1.1 rjs calc = sctp_sbspace_sub(calc, (u_int32_t)asoc->size_on_reasm_queue); 143 1.1 rjs calc = sctp_sbspace_sub(calc, (u_int32_t)asoc->size_on_all_streams); 144 1.1 rjs 145 1.1 rjs /* what is the overhead of all these rwnd's */ 146 1.1 rjs calc_w_oh = sctp_sbspace_sub(calc, stcb->asoc.my_rwnd_control_len); 147 1.1 rjs 148 1.1 rjs asoc->my_rwnd = calc; 149 1.1 rjs if (calc_w_oh == 0) { 150 1.1 rjs /* If our overhead is greater than the advertised 151 1.1 rjs * rwnd, we clamp the rwnd to 1. This lets us 152 1.1 rjs * still accept inbound segments, but hopefully will 153 1.1 rjs * shut the sender down when he finally gets the message. 154 1.1 rjs */ 155 1.1 rjs asoc->my_rwnd = 1; 156 1.1 rjs } else { 157 1.1 rjs /* SWS threshold */ 158 1.1 rjs if (asoc->my_rwnd && 159 1.1 rjs (asoc->my_rwnd < stcb->sctp_ep->sctp_ep.sctp_sws_receiver)) { 160 1.1 rjs /* SWS engaged, tell peer none left */ 161 1.1 rjs asoc->my_rwnd = 1; 162 1.1 rjs #ifdef SCTP_DEBUG 163 1.1 rjs if (sctp_debug_on & SCTP_DEBUG_INDATA4) { 164 1.1 rjs printf(" - SWS zeros\n"); 165 1.1 rjs } 166 1.1 rjs } else { 167 1.1 rjs if (sctp_debug_on & SCTP_DEBUG_INDATA4) { 168 1.1 rjs printf("\n"); 169 1.1 rjs } 170 1.1 rjs #endif 171 1.1 rjs } 172 1.1 rjs } 173 1.1 rjs } 174 1.1 rjs 175 1.1 rjs /* 176 1.1 rjs * Take a chk structure and build it into an mbuf. Hmm should we change things 177 1.1 rjs * so that instead we store the data side in a chunk? 178 1.1 rjs */ 179 1.1 rjs static struct mbuf * 180 1.1 rjs sctp_build_ctl_nchunk(struct sctp_tcb *stcb, uint32_t tsn, uint32_t ppid, 181 1.1 rjs uint32_t context, uint16_t stream_no, uint16_t stream_seq, uint8_t flags) 182 1.1 rjs { 183 1.1 rjs struct sctp_sndrcvinfo *outinfo; 184 1.1 rjs struct cmsghdr *cmh; 185 1.1 rjs struct mbuf *ret; 186 1.1 rjs 187 1.1 rjs if ((stcb->sctp_ep->sctp_flags & SCTP_PCB_FLAGS_RECVDATAIOEVNT) == 0) { 188 1.1 rjs /* user does not want the sndrcv ctl */ 189 1.1 rjs return (NULL); 190 1.1 rjs } 191 1.1 rjs 192 1.1 rjs MGETHDR(ret, M_DONTWAIT, MT_CONTROL); 193 1.1 rjs if (ret == NULL) { 194 1.1 rjs /* No space */ 195 1.1 rjs return (ret); 196 1.1 rjs } 197 1.1 rjs /* We need a CMSG header followed by the struct */ 198 1.1 rjs cmh = mtod(ret, struct cmsghdr *); 199 1.1 rjs outinfo = (struct sctp_sndrcvinfo *)CMSG_DATA(cmh); 200 1.1 rjs cmh->cmsg_level = IPPROTO_SCTP; 201 1.1 rjs cmh->cmsg_type = SCTP_SNDRCV; 202 1.1 rjs cmh->cmsg_len = CMSG_LEN(sizeof(struct sctp_sndrcvinfo)); 203 1.1 rjs outinfo->sinfo_stream = stream_no; 204 1.1 rjs outinfo->sinfo_ssn = stream_seq; 205 1.1 rjs if (flags & SCTP_DATA_UNORDERED) { 206 1.5 rjs outinfo->sinfo_flags = SCTP_UNORDERED; 207 1.1 rjs } else { 208 1.1 rjs outinfo->sinfo_flags = 0; 209 1.1 rjs } 210 1.1 rjs outinfo->sinfo_ppid = ppid; 211 1.1 rjs outinfo->sinfo_context = context; 212 1.1 rjs outinfo->sinfo_assoc_id = sctp_get_associd(stcb); 213 1.1 rjs outinfo->sinfo_tsn = tsn; 214 1.1 rjs outinfo->sinfo_cumtsn = stcb->asoc.cumulative_tsn; 215 1.1 rjs ret->m_len = cmh->cmsg_len; 216 1.1 rjs ret->m_pkthdr.len = ret->m_len; 217 1.1 rjs /* 218 1.1 rjs * We track how many control len's have gone upon the sb 219 1.1 rjs * and do not count these in the rwnd calculation. 220 1.1 rjs */ 221 1.1 rjs stcb->asoc.my_rwnd_control_len += 222 1.1 rjs CMSG_LEN(sizeof(struct sctp_sndrcvinfo)); 223 1.1 rjs 224 1.1 rjs return (ret); 225 1.1 rjs } 226 1.1 rjs 227 1.1 rjs /* 228 1.1 rjs * Take a chk structure and build it into an mbuf. Should we change things 229 1.1 rjs * so that instead we store the data side in a chunk? 230 1.1 rjs */ 231 1.1 rjs static 232 1.1 rjs struct mbuf * 233 1.1 rjs sctp_build_ctl(struct sctp_tcb *stcb, struct sctp_tmit_chunk *chk) 234 1.1 rjs { 235 1.1 rjs struct sctp_sndrcvinfo *outinfo; 236 1.1 rjs struct cmsghdr *cmh; 237 1.1 rjs struct mbuf *ret; 238 1.1 rjs if ((stcb->sctp_ep->sctp_flags & SCTP_PCB_FLAGS_RECVDATAIOEVNT) == 0) { 239 1.1 rjs /* user does not want the sndrcv ctl */ 240 1.1 rjs return (NULL); 241 1.1 rjs } 242 1.1 rjs MGET(ret, M_DONTWAIT, MT_CONTROL); 243 1.1 rjs if (ret == NULL) { 244 1.1 rjs /* No space */ 245 1.1 rjs return (ret); 246 1.1 rjs } 247 1.1 rjs 248 1.1 rjs /* We need a CMSG header followed by the struct */ 249 1.1 rjs cmh = mtod(ret, struct cmsghdr *); 250 1.1 rjs outinfo = (struct sctp_sndrcvinfo *)CMSG_DATA(cmh); 251 1.1 rjs cmh->cmsg_level = IPPROTO_SCTP; 252 1.1 rjs cmh->cmsg_type = SCTP_SNDRCV; 253 1.1 rjs cmh->cmsg_len = CMSG_LEN(sizeof(struct sctp_sndrcvinfo)); 254 1.1 rjs outinfo->sinfo_stream = chk->rec.data.stream_number; 255 1.1 rjs outinfo->sinfo_ssn = chk->rec.data.stream_seq; 256 1.1 rjs if (chk->rec.data.rcv_flags & SCTP_DATA_UNORDERED) { 257 1.5 rjs outinfo->sinfo_flags = SCTP_UNORDERED; 258 1.1 rjs } else { 259 1.1 rjs outinfo->sinfo_flags = 0; 260 1.1 rjs } 261 1.1 rjs outinfo->sinfo_ppid = chk->rec.data.payloadtype; 262 1.1 rjs outinfo->sinfo_context = chk->rec.data.context; 263 1.1 rjs outinfo->sinfo_assoc_id = sctp_get_associd(stcb); 264 1.1 rjs outinfo->sinfo_tsn = chk->rec.data.TSN_seq; 265 1.1 rjs outinfo->sinfo_cumtsn = stcb->asoc.cumulative_tsn; 266 1.1 rjs ret->m_len = cmh->cmsg_len; 267 1.1 rjs stcb->asoc.my_rwnd_control_len += 268 1.1 rjs CMSG_LEN(sizeof(struct sctp_sndrcvinfo)); 269 1.1 rjs 270 1.1 rjs return (ret); 271 1.1 rjs } 272 1.1 rjs 273 1.1 rjs int 274 1.1 rjs sctp_deliver_data(struct sctp_tcb *stcb, struct sctp_association *asoc, 275 1.1 rjs struct sctp_tmit_chunk *chk, int hold_locks) 276 1.1 rjs { 277 1.1 rjs struct mbuf *control, *m; 278 1.1 rjs int free_it; 279 1.1 rjs struct sockaddr_in6 sin6; 280 1.1 rjs const struct sockaddr *to; 281 1.1 rjs 282 1.1 rjs #ifdef SCTP_DEBUG 283 1.1 rjs if (sctp_debug_on & SCTP_DEBUG_INDATA1) { 284 1.1 rjs printf("I am now in Deliver data! (%p)\n", chk); 285 1.1 rjs } 286 1.1 rjs #endif 287 1.1 rjs /* get a write lock on the inp if not already */ 288 1.1 rjs if (hold_locks == 0) { 289 1.1 rjs SCTP_TCB_UNLOCK(stcb); 290 1.1 rjs SCTP_INP_WLOCK(stcb->sctp_ep); 291 1.1 rjs SCTP_TCB_LOCK(stcb); 292 1.1 rjs } 293 1.1 rjs free_it = 0; 294 1.1 rjs /* We always add it to the queue */ 295 1.1 rjs if (stcb && (stcb->sctp_ep->sctp_flags & SCTP_PCB_FLAGS_SOCKET_GONE)) { 296 1.1 rjs /* socket above is long gone */ 297 1.1 rjs #ifdef SCTP_DEBUG 298 1.1 rjs if (sctp_debug_on & SCTP_DEBUG_INDATA1) { 299 1.1 rjs printf("gone is gone!\n"); 300 1.1 rjs } 301 1.1 rjs #endif 302 1.1 rjs if (chk != NULL) { 303 1.16 rin sctp_m_freem(chk->data); 304 1.1 rjs chk->data = NULL; 305 1.1 rjs sctp_free_remote_addr(chk->whoTo); 306 1.1 rjs SCTP_ZONE_FREE(sctppcbinfo.ipi_zone_chunk, chk); 307 1.1 rjs sctppcbinfo.ipi_count_chunk--; 308 1.1 rjs if ((int)sctppcbinfo.ipi_count_chunk < 0) { 309 1.1 rjs panic("Chunk count is negative"); 310 1.1 rjs } 311 1.1 rjs sctppcbinfo.ipi_gencnt_chunk++; 312 1.1 rjs } 313 1.1 rjs TAILQ_FOREACH(chk, &asoc->delivery_queue, sctp_next) { 314 1.1 rjs asoc->size_on_delivery_queue -= chk->send_size; 315 1.1 rjs asoc->cnt_on_delivery_queue--; 316 1.1 rjs /* 317 1.1 rjs * Lose the data pointer, since its in the socket buffer 318 1.1 rjs */ 319 1.16 rin sctp_m_freem(chk->data); 320 1.1 rjs chk->data = NULL; 321 1.1 rjs /* Now free the address and data */ 322 1.1 rjs sctp_free_remote_addr(chk->whoTo); 323 1.1 rjs SCTP_ZONE_FREE(sctppcbinfo.ipi_zone_chunk, chk); 324 1.1 rjs sctppcbinfo.ipi_count_chunk--; 325 1.1 rjs if ((int)sctppcbinfo.ipi_count_chunk < 0) { 326 1.1 rjs panic("Chunk count is negative"); 327 1.1 rjs } 328 1.1 rjs sctppcbinfo.ipi_gencnt_chunk++; 329 1.1 rjs } 330 1.1 rjs if (hold_locks == 0) { 331 1.1 rjs SCTP_INP_WUNLOCK(stcb->sctp_ep); 332 1.1 rjs } 333 1.1 rjs return (0); 334 1.1 rjs } 335 1.1 rjs if (chk != NULL) { 336 1.1 rjs TAILQ_INSERT_TAIL(&asoc->delivery_queue, chk, sctp_next); 337 1.1 rjs asoc->size_on_delivery_queue += chk->send_size; 338 1.1 rjs asoc->cnt_on_delivery_queue++; 339 1.1 rjs } 340 1.1 rjs if (asoc->fragmented_delivery_inprogress) { 341 1.1 rjs /* 342 1.1 rjs * oh oh, fragmented delivery in progress 343 1.1 rjs * return out of here. 344 1.1 rjs */ 345 1.1 rjs #ifdef SCTP_DEBUG 346 1.1 rjs if (sctp_debug_on & SCTP_DEBUG_INDATA1) { 347 1.1 rjs printf("Fragmented delivery in progress?\n"); 348 1.1 rjs } 349 1.1 rjs #endif 350 1.1 rjs if (hold_locks == 0) { 351 1.1 rjs SCTP_INP_WUNLOCK(stcb->sctp_ep); 352 1.1 rjs } 353 1.1 rjs return (0); 354 1.1 rjs } 355 1.1 rjs /* Now grab the first one */ 356 1.1 rjs chk = TAILQ_FIRST(&asoc->delivery_queue); 357 1.1 rjs if (chk == NULL) { 358 1.1 rjs /* Nothing in queue */ 359 1.1 rjs #ifdef SCTP_DEBUG 360 1.1 rjs if (sctp_debug_on & SCTP_DEBUG_INDATA1) { 361 1.1 rjs printf("Nothing in queue?\n"); 362 1.1 rjs } 363 1.1 rjs #endif 364 1.1 rjs asoc->size_on_delivery_queue = 0; 365 1.1 rjs asoc->cnt_on_delivery_queue = 0; 366 1.1 rjs if (hold_locks == 0) { 367 1.1 rjs SCTP_INP_WUNLOCK(stcb->sctp_ep); 368 1.1 rjs } 369 1.1 rjs return (0); 370 1.1 rjs } 371 1.1 rjs 372 1.1 rjs if (stcb->sctp_socket->so_rcv.sb_cc >= stcb->sctp_socket->so_rcv.sb_hiwat) { 373 1.1 rjs /* Boy, there really is NO room */ 374 1.1 rjs if (hold_locks == 0) { 375 1.1 rjs SCTP_INP_WUNLOCK(stcb->sctp_ep); 376 1.1 rjs } 377 1.1 rjs return (0); 378 1.1 rjs } 379 1.1 rjs #ifdef SCTP_DEBUG 380 1.1 rjs if (sctp_debug_on & SCTP_DEBUG_INDATA1) { 381 1.1 rjs printf("Now to the delivery with chk(%p)!\n", chk); 382 1.1 rjs } 383 1.1 rjs #endif 384 1.1 rjs /* XXX need to append PKTHDR to the socket buffer first */ 385 1.1 rjs if ((chk->data->m_flags & M_PKTHDR) == 0) { 386 1.1 rjs MGETHDR(m, M_DONTWAIT, MT_DATA); 387 1.1 rjs if (m == NULL) { 388 1.1 rjs /* no room! */ 389 1.1 rjs if (hold_locks == 0) { 390 1.1 rjs SCTP_INP_WUNLOCK(stcb->sctp_ep); 391 1.1 rjs } 392 1.1 rjs return (0); 393 1.1 rjs } 394 1.1 rjs m->m_pkthdr.len = chk->send_size; 395 1.1 rjs m->m_len = 0; 396 1.1 rjs m->m_next = chk->data; 397 1.1 rjs chk->data = m; 398 1.1 rjs } 399 1.1 rjs if (chk->rec.data.rcv_flags & SCTP_DATA_LAST_FRAG) { 400 1.1 rjs if (chk->data->m_next == NULL) { 401 1.1 rjs /* hopefully we hit here most of the time */ 402 1.1 rjs chk->data->m_flags |= M_EOR; 403 1.1 rjs } else { 404 1.1 rjs /* Add the flag to the LAST mbuf in the chain */ 405 1.1 rjs m = chk->data; 406 1.1 rjs while (m->m_next != NULL) { 407 1.1 rjs m = m->m_next; 408 1.1 rjs } 409 1.1 rjs m->m_flags |= M_EOR; 410 1.1 rjs } 411 1.1 rjs } 412 1.1 rjs 413 1.1 rjs if (chk->rec.data.rcv_flags & SCTP_DATA_FIRST_FRAG) { 414 1.1 rjs struct sockaddr_in6 lsa6; 415 1.1 rjs 416 1.1 rjs control = sctp_build_ctl(stcb, chk); 417 1.1 rjs to = rtcache_getdst(&chk->whoTo->ro); 418 1.1 rjs if ((stcb->sctp_ep->sctp_flags & SCTP_PCB_FLAGS_NEEDS_MAPPED_V4) && 419 1.1 rjs to->sa_family == AF_INET) { 420 1.1 rjs const struct sockaddr_in *sin; 421 1.1 rjs 422 1.1 rjs sin = (const struct sockaddr_in *)to; 423 1.3 rtr in6_sin_2_v4mapsin6(sin, &sin6); 424 1.1 rjs to = (struct sockaddr *)&sin6; 425 1.1 rjs } 426 1.1 rjs /* check and strip embedded scope junk */ 427 1.1 rjs to = (const struct sockaddr *)sctp_recover_scope((const struct sockaddr_in6 *)to, 428 1.1 rjs &lsa6); 429 1.1 rjs if (((const struct sockaddr_in *)to)->sin_port == 0) { 430 1.1 rjs printf("Huh a, port is %d not net:%p %d?\n", 431 1.1 rjs ((const struct sockaddr_in *)to)->sin_port, 432 1.1 rjs chk->whoTo, 433 1.1 rjs (int)(ntohs(stcb->rport))); 434 1.1 rjs /*((struct sockaddr_in *)to)->sin_port = stcb->rport;*/ 435 1.1 rjs /* XXX */ 436 1.1 rjs } 437 1.1 rjs if (sctp_sbspace(&stcb->sctp_socket->so_rcv) < (long)chk->send_size) { 438 1.1 rjs /* Gak not enough room */ 439 1.1 rjs if (control) { 440 1.1 rjs sctp_m_freem(control); 441 1.1 rjs stcb->asoc.my_rwnd_control_len -= 442 1.1 rjs CMSG_LEN(sizeof(struct sctp_sndrcvinfo)); 443 1.1 rjs } 444 1.1 rjs goto skip; 445 1.1 rjs } 446 1.1 rjs if (!sbappendaddr_nocheck(&stcb->sctp_socket->so_rcv, 447 1.1 rjs to, chk->data, control, stcb->asoc.my_vtag, 448 1.1 rjs stcb->sctp_ep)) { 449 1.1 rjs /* Gak not enough room */ 450 1.1 rjs if (control) { 451 1.1 rjs sctp_m_freem(control); 452 1.1 rjs stcb->asoc.my_rwnd_control_len -= 453 1.1 rjs CMSG_LEN(sizeof(struct sctp_sndrcvinfo)); 454 1.1 rjs } 455 1.1 rjs } else { 456 1.1 rjs if ((stcb->sctp_ep->sctp_flags & SCTP_PCB_FLAGS_IN_TCPPOOL) == 0) { 457 1.1 rjs if (sctp_add_to_socket_q(stcb->sctp_ep, stcb)) { 458 1.1 rjs stcb->asoc.my_rwnd_control_len += 459 1.1 rjs sizeof(struct mbuf); 460 1.1 rjs } 461 1.1 rjs } else { 462 1.1 rjs stcb->asoc.my_rwnd_control_len += sizeof(struct mbuf); 463 1.1 rjs } 464 1.1 rjs free_it = 1; 465 1.1 rjs } 466 1.1 rjs } else { 467 1.1 rjs /* append to a already started message. */ 468 1.1 rjs if (sctp_sbspace(&stcb->sctp_socket->so_rcv) >= 469 1.1 rjs (long)chk->send_size) { 470 1.1 rjs sbappend(&stcb->sctp_socket->so_rcv, chk->data); 471 1.1 rjs free_it = 1; 472 1.1 rjs } 473 1.1 rjs } 474 1.1 rjs skip: 475 1.1 rjs if (hold_locks == 0) { 476 1.1 rjs SCTP_INP_WUNLOCK(stcb->sctp_ep); 477 1.1 rjs } 478 1.1 rjs /* free up the one we inserted */ 479 1.1 rjs if (free_it) { 480 1.1 rjs /* Pull it off the queue */ 481 1.1 rjs #ifdef SCTP_DEBUG 482 1.1 rjs if (sctp_debug_on & SCTP_DEBUG_INDATA1) { 483 1.1 rjs printf("Free_it true, doing tickle wakeup\n"); 484 1.1 rjs } 485 1.1 rjs #endif 486 1.1 rjs sctp_sorwakeup(stcb->sctp_ep, stcb->sctp_socket); 487 1.1 rjs TAILQ_REMOVE(&asoc->delivery_queue, chk, sctp_next); 488 1.1 rjs asoc->size_on_delivery_queue -= chk->send_size; 489 1.1 rjs asoc->cnt_on_delivery_queue--; 490 1.1 rjs /* Lose the data pointer, since its in the socket buffer */ 491 1.1 rjs chk->data = NULL; 492 1.1 rjs /* Now free the address and data */ 493 1.1 rjs sctp_free_remote_addr(chk->whoTo); 494 1.1 rjs SCTP_ZONE_FREE(sctppcbinfo.ipi_zone_chunk, chk); 495 1.1 rjs sctppcbinfo.ipi_count_chunk--; 496 1.1 rjs if ((int)sctppcbinfo.ipi_count_chunk < 0) { 497 1.1 rjs panic("Chunk count is negative"); 498 1.1 rjs } 499 1.1 rjs sctppcbinfo.ipi_gencnt_chunk++; 500 1.1 rjs } 501 1.1 rjs return (free_it); 502 1.1 rjs } 503 1.1 rjs 504 1.1 rjs /* 505 1.1 rjs * We are delivering currently from the reassembly queue. We must continue to 506 1.1 rjs * deliver until we either: 507 1.1 rjs * 1) run out of space. 508 1.1 rjs * 2) run out of sequential TSN's 509 1.1 rjs * 3) hit the SCTP_DATA_LAST_FRAG flag. 510 1.1 rjs */ 511 1.1 rjs static void 512 1.1 rjs sctp_service_reassembly(struct sctp_tcb *stcb, struct sctp_association *asoc, int hold_locks) 513 1.1 rjs { 514 1.1 rjs const struct sockaddr *to; 515 1.1 rjs struct sockaddr_in6 sin6; 516 1.1 rjs struct sctp_tmit_chunk *chk, *at; 517 1.1 rjs struct mbuf *control, *m; 518 1.1 rjs u_int16_t nxt_todel; 519 1.1 rjs u_int16_t stream_no; 520 1.1 rjs int cntDel; 521 1.1 rjs cntDel = stream_no = 0; 522 1.1 rjs if (hold_locks == 0) { 523 1.1 rjs /* 524 1.1 rjs * you always have the TCB lock, we need 525 1.1 rjs * to have the inp write lock as well. 526 1.1 rjs */ 527 1.1 rjs SCTP_TCB_UNLOCK(stcb); 528 1.1 rjs SCTP_INP_WLOCK(stcb->sctp_ep); 529 1.1 rjs SCTP_TCB_LOCK(stcb); 530 1.1 rjs } 531 1.1 rjs if (stcb && (stcb->sctp_ep->sctp_flags & SCTP_PCB_FLAGS_SOCKET_GONE)) { 532 1.1 rjs /* socket above is long gone */ 533 1.1 rjs asoc->fragmented_delivery_inprogress = 0; 534 1.1 rjs TAILQ_FOREACH(chk, &asoc->reasmqueue, sctp_next) { 535 1.1 rjs asoc->size_on_delivery_queue -= chk->send_size; 536 1.1 rjs asoc->cnt_on_delivery_queue--; 537 1.1 rjs /* 538 1.1 rjs * Lose the data pointer, since its in the socket buffer 539 1.1 rjs */ 540 1.16 rin sctp_m_freem(chk->data); 541 1.1 rjs chk->data = NULL; 542 1.1 rjs /* Now free the address and data */ 543 1.1 rjs sctp_free_remote_addr(chk->whoTo); 544 1.1 rjs SCTP_ZONE_FREE(sctppcbinfo.ipi_zone_chunk, chk); 545 1.1 rjs sctppcbinfo.ipi_count_chunk--; 546 1.1 rjs if ((int)sctppcbinfo.ipi_count_chunk < 0) { 547 1.1 rjs panic("Chunk count is negative"); 548 1.1 rjs } 549 1.1 rjs sctppcbinfo.ipi_gencnt_chunk++; 550 1.1 rjs } 551 1.1 rjs if (hold_locks == 0) { 552 1.1 rjs SCTP_INP_WUNLOCK(stcb->sctp_ep); 553 1.1 rjs } 554 1.1 rjs return; 555 1.1 rjs } 556 1.1 rjs do { 557 1.1 rjs if (stcb->sctp_socket->so_rcv.sb_cc >= 558 1.1 rjs stcb->sctp_socket->so_rcv.sb_hiwat) { 559 1.1 rjs if (cntDel) { 560 1.1 rjs sctp_sorwakeup(stcb->sctp_ep, 561 1.1 rjs stcb->sctp_socket); 562 1.1 rjs } 563 1.1 rjs if (hold_locks == 0) { 564 1.1 rjs SCTP_INP_WUNLOCK(stcb->sctp_ep); 565 1.1 rjs } 566 1.1 rjs return; 567 1.1 rjs } 568 1.1 rjs chk = TAILQ_FIRST(&asoc->reasmqueue); 569 1.1 rjs if (chk == NULL) { 570 1.1 rjs if (cntDel) { 571 1.1 rjs sctp_sorwakeup(stcb->sctp_ep, 572 1.1 rjs stcb->sctp_socket); 573 1.1 rjs } 574 1.1 rjs if (hold_locks == 0) { 575 1.1 rjs SCTP_INP_WUNLOCK(stcb->sctp_ep); 576 1.1 rjs } 577 1.1 rjs return; 578 1.1 rjs } 579 1.1 rjs if (chk->rec.data.TSN_seq != (asoc->tsn_last_delivered + 1)) { 580 1.1 rjs /* Can't deliver more :< */ 581 1.1 rjs if (cntDel) { 582 1.1 rjs sctp_sorwakeup(stcb->sctp_ep, 583 1.1 rjs stcb->sctp_socket); 584 1.1 rjs } 585 1.1 rjs if (hold_locks == 0) { 586 1.1 rjs SCTP_INP_WUNLOCK(stcb->sctp_ep); 587 1.1 rjs } 588 1.1 rjs return; 589 1.1 rjs } 590 1.1 rjs stream_no = chk->rec.data.stream_number; 591 1.1 rjs nxt_todel = asoc->strmin[stream_no].last_sequence_delivered + 1; 592 1.1 rjs if (nxt_todel != chk->rec.data.stream_seq && 593 1.1 rjs (chk->rec.data.rcv_flags & SCTP_DATA_UNORDERED) == 0) { 594 1.1 rjs /* 595 1.1 rjs * Not the next sequence to deliver in its stream OR 596 1.1 rjs * unordered 597 1.1 rjs */ 598 1.1 rjs if (cntDel) { 599 1.1 rjs sctp_sorwakeup(stcb->sctp_ep, 600 1.1 rjs stcb->sctp_socket); 601 1.1 rjs } 602 1.1 rjs if (hold_locks == 0) { 603 1.1 rjs SCTP_INP_WUNLOCK(stcb->sctp_ep); 604 1.1 rjs } 605 1.1 rjs return; 606 1.1 rjs } 607 1.1 rjs 608 1.1 rjs if ((chk->data->m_flags & M_PKTHDR) == 0) { 609 1.1 rjs MGETHDR(m, M_DONTWAIT, MT_DATA); 610 1.1 rjs if (m == NULL) { 611 1.1 rjs /* no room! */ 612 1.1 rjs if (hold_locks == 0) { 613 1.1 rjs SCTP_INP_WUNLOCK(stcb->sctp_ep); 614 1.1 rjs } 615 1.1 rjs return; 616 1.1 rjs } 617 1.1 rjs m->m_pkthdr.len = chk->send_size; 618 1.1 rjs m->m_len = 0; 619 1.1 rjs m->m_next = chk->data; 620 1.1 rjs chk->data = m; 621 1.1 rjs } 622 1.1 rjs if (chk->rec.data.rcv_flags & SCTP_DATA_LAST_FRAG) { 623 1.1 rjs if (chk->data->m_next == NULL) { 624 1.1 rjs /* hopefully we hit here most of the time */ 625 1.1 rjs chk->data->m_flags |= M_EOR; 626 1.1 rjs } else { 627 1.1 rjs /* Add the flag to the LAST mbuf in the chain */ 628 1.1 rjs m = chk->data; 629 1.1 rjs while (m->m_next != NULL) { 630 1.1 rjs m = m->m_next; 631 1.1 rjs } 632 1.1 rjs m->m_flags |= M_EOR; 633 1.1 rjs } 634 1.1 rjs } 635 1.1 rjs if (chk->rec.data.rcv_flags & SCTP_DATA_FIRST_FRAG) { 636 1.1 rjs struct sockaddr_in6 lsa6; 637 1.1 rjs 638 1.1 rjs control = sctp_build_ctl(stcb, chk); 639 1.1 rjs to = rtcache_getdst(&chk->whoTo->ro); 640 1.1 rjs if ((stcb->sctp_ep->sctp_flags & SCTP_PCB_FLAGS_NEEDS_MAPPED_V4) && 641 1.1 rjs to->sa_family == AF_INET) { 642 1.1 rjs const struct sockaddr_in *sin; 643 1.1 rjs 644 1.1 rjs sin = satocsin(to); 645 1.3 rtr in6_sin_2_v4mapsin6(sin, &sin6); 646 1.1 rjs to = (struct sockaddr *)&sin6; 647 1.1 rjs } 648 1.1 rjs /* check and strip embedded scope junk */ 649 1.1 rjs to = (const struct sockaddr *)sctp_recover_scope((const struct sockaddr_in6 *)to, 650 1.1 rjs &lsa6); 651 1.1 rjs if (((const struct sockaddr_in *)to)->sin_port == 0) { 652 1.1 rjs printf("Huh b, port is %d not net:%p %d?\n", 653 1.1 rjs ((const struct sockaddr_in *)to)->sin_port, 654 1.1 rjs chk->whoTo, 655 1.1 rjs (int)(ntohs(stcb->rport))); 656 1.1 rjs /*((struct sockaddr_in *)to)->sin_port = stcb->rport;*/ 657 1.1 rjs /* XXX */ 658 1.1 rjs } 659 1.1 rjs if (sctp_sbspace(&stcb->sctp_socket->so_rcv) < 660 1.1 rjs (long)chk->send_size) { 661 1.1 rjs if (control) { 662 1.1 rjs sctp_m_freem(control); 663 1.1 rjs stcb->asoc.my_rwnd_control_len -= 664 1.1 rjs CMSG_LEN(sizeof(struct sctp_sndrcvinfo)); 665 1.1 rjs } 666 1.1 rjs sctp_sorwakeup(stcb->sctp_ep, 667 1.1 rjs stcb->sctp_socket); 668 1.1 rjs if (hold_locks == 0) { 669 1.1 rjs SCTP_INP_WUNLOCK(stcb->sctp_ep); 670 1.1 rjs } 671 1.1 rjs return; 672 1.1 rjs } 673 1.1 rjs if (!sbappendaddr_nocheck(&stcb->sctp_socket->so_rcv, 674 1.1 rjs to, chk->data, control, stcb->asoc.my_vtag, 675 1.1 rjs stcb->sctp_ep)) { 676 1.1 rjs /* Gak not enough room */ 677 1.1 rjs if (control) { 678 1.1 rjs sctp_m_freem(control); 679 1.1 rjs stcb->asoc.my_rwnd_control_len -= 680 1.1 rjs CMSG_LEN(sizeof(struct sctp_sndrcvinfo)); 681 1.1 rjs } 682 1.1 rjs sctp_sorwakeup(stcb->sctp_ep, 683 1.1 rjs stcb->sctp_socket); 684 1.1 rjs if (hold_locks == 0) { 685 1.1 rjs SCTP_INP_WUNLOCK(stcb->sctp_ep); 686 1.1 rjs } 687 1.1 rjs return; 688 1.1 rjs } 689 1.1 rjs if ((stcb->sctp_ep->sctp_flags & SCTP_PCB_FLAGS_IN_TCPPOOL) == 0) { 690 1.1 rjs if (sctp_add_to_socket_q(stcb->sctp_ep, stcb)) { 691 1.1 rjs stcb->asoc.my_rwnd_control_len += 692 1.1 rjs sizeof(struct mbuf); 693 1.1 rjs } 694 1.1 rjs } else { 695 1.1 rjs stcb->asoc.my_rwnd_control_len += sizeof(struct mbuf); 696 1.1 rjs } 697 1.1 rjs cntDel++; 698 1.1 rjs } else { 699 1.1 rjs if (sctp_sbspace(&stcb->sctp_socket->so_rcv) >= 700 1.1 rjs (long)chk->send_size) { 701 1.1 rjs sbappend(&stcb->sctp_socket->so_rcv, chk->data); 702 1.1 rjs cntDel++; 703 1.1 rjs } else { 704 1.1 rjs /* out of space in the sb */ 705 1.1 rjs sctp_sorwakeup(stcb->sctp_ep, 706 1.1 rjs stcb->sctp_socket); 707 1.1 rjs if (hold_locks == 0) { 708 1.1 rjs SCTP_INP_WUNLOCK(stcb->sctp_ep); 709 1.1 rjs } 710 1.1 rjs return; 711 1.1 rjs } 712 1.1 rjs } 713 1.1 rjs /* pull it we did it */ 714 1.1 rjs TAILQ_REMOVE(&asoc->reasmqueue, chk, sctp_next); 715 1.1 rjs if (chk->rec.data.rcv_flags & SCTP_DATA_LAST_FRAG) { 716 1.1 rjs asoc->fragmented_delivery_inprogress = 0; 717 1.1 rjs if ((chk->rec.data.rcv_flags & SCTP_DATA_UNORDERED) == 0) { 718 1.1 rjs asoc->strmin[stream_no].last_sequence_delivered++; 719 1.1 rjs } 720 1.1 rjs } 721 1.1 rjs asoc->tsn_last_delivered = chk->rec.data.TSN_seq; 722 1.1 rjs asoc->size_on_reasm_queue -= chk->send_size; 723 1.1 rjs asoc->cnt_on_reasm_queue--; 724 1.1 rjs /* free up the chk */ 725 1.1 rjs sctp_free_remote_addr(chk->whoTo); 726 1.1 rjs chk->data = NULL; 727 1.1 rjs SCTP_ZONE_FREE(sctppcbinfo.ipi_zone_chunk, chk); 728 1.1 rjs sctppcbinfo.ipi_count_chunk--; 729 1.1 rjs if ((int)sctppcbinfo.ipi_count_chunk < 0) { 730 1.1 rjs panic("Chunk count is negative"); 731 1.1 rjs } 732 1.1 rjs sctppcbinfo.ipi_gencnt_chunk++; 733 1.1 rjs if (asoc->fragmented_delivery_inprogress == 0) { 734 1.1 rjs /* 735 1.1 rjs * Now lets see if we can deliver the next one on the 736 1.1 rjs * stream 737 1.1 rjs */ 738 1.1 rjs /*u_int16_t nxt_todel;*/ 739 1.1 rjs struct sctp_stream_in *strm; 740 1.1 rjs 741 1.1 rjs strm = &asoc->strmin[stream_no]; 742 1.1 rjs nxt_todel = strm->last_sequence_delivered + 1; 743 1.1 rjs chk = TAILQ_FIRST(&strm->inqueue); 744 1.1 rjs if (chk && (nxt_todel == chk->rec.data.stream_seq)) { 745 1.1 rjs while (chk != NULL) { 746 1.1 rjs /* all delivered */ 747 1.1 rjs if (nxt_todel == 748 1.1 rjs chk->rec.data.stream_seq) { 749 1.1 rjs at = TAILQ_NEXT(chk, sctp_next); 750 1.1 rjs TAILQ_REMOVE(&strm->inqueue, 751 1.1 rjs chk, sctp_next); 752 1.1 rjs asoc->size_on_all_streams -= 753 1.1 rjs chk->send_size; 754 1.1 rjs asoc->cnt_on_all_streams--; 755 1.1 rjs strm->last_sequence_delivered++; 756 1.1 rjs /* 757 1.1 rjs * We ignore the return of 758 1.1 rjs * deliver_data here since we 759 1.1 rjs * always can hold the chunk on 760 1.1 rjs * the d-queue. And we have a 761 1.1 rjs * finite number that can be 762 1.1 rjs * delivered from the strq. 763 1.1 rjs */ 764 1.1 rjs sctp_deliver_data(stcb, asoc, chk, 1); 765 1.1 rjs chk = at; 766 1.1 rjs } else { 767 1.1 rjs break; 768 1.1 rjs } 769 1.1 rjs nxt_todel = 770 1.1 rjs strm->last_sequence_delivered + 1; 771 1.1 rjs } 772 1.1 rjs } 773 1.1 rjs if (!TAILQ_EMPTY(&asoc->delivery_queue)) { 774 1.1 rjs /* Here if deliver_data fails, we must break */ 775 1.1 rjs if (sctp_deliver_data(stcb, asoc, NULL, 1) == 0) 776 1.1 rjs break; 777 1.1 rjs } 778 1.1 rjs sctp_sorwakeup(stcb->sctp_ep, stcb->sctp_socket); 779 1.1 rjs if (hold_locks == 0) { 780 1.1 rjs SCTP_INP_WUNLOCK(stcb->sctp_ep); 781 1.1 rjs } 782 1.1 rjs return; 783 1.1 rjs } 784 1.1 rjs chk = TAILQ_FIRST(&asoc->reasmqueue); 785 1.1 rjs } while (chk); 786 1.1 rjs if (cntDel) { 787 1.1 rjs sctp_sorwakeup(stcb->sctp_ep, stcb->sctp_socket); 788 1.1 rjs } 789 1.1 rjs if (hold_locks == 0) { 790 1.1 rjs SCTP_INP_WUNLOCK(stcb->sctp_ep); 791 1.1 rjs } 792 1.1 rjs } 793 1.1 rjs 794 1.1 rjs /* 795 1.1 rjs * Queue the chunk either right into the socket buffer if it is the next one 796 1.1 rjs * to go OR put it in the correct place in the delivery queue. If we do 797 1.1 rjs * append to the so_buf, keep doing so until we are out of order. 798 1.1 rjs * One big question still remains, what to do when the socket buffer is FULL?? 799 1.1 rjs */ 800 1.1 rjs static void 801 1.1 rjs sctp_queue_data_to_stream(struct sctp_tcb *stcb, struct sctp_association *asoc, 802 1.1 rjs struct sctp_tmit_chunk *chk, int *abort_flag) 803 1.1 rjs { 804 1.1 rjs struct sctp_stream_in *strm; 805 1.1 rjs struct sctp_tmit_chunk *at; 806 1.1 rjs int queue_needed; 807 1.1 rjs u_int16_t nxt_todel; 808 1.1 rjs struct mbuf *oper; 809 1.1 rjs 810 1.1 rjs /*** FIX FIX FIX ??? 811 1.1 rjs * Need to add code to deal with 16 bit seq wrap 812 1.1 rjs * without a TSN wrap for ordered delivery (maybe). 813 1.1 rjs * FIX FIX FIX ??? 814 1.1 rjs */ 815 1.1 rjs queue_needed = 1; 816 1.1 rjs asoc->size_on_all_streams += chk->send_size; 817 1.1 rjs asoc->cnt_on_all_streams++; 818 1.1 rjs strm = &asoc->strmin[chk->rec.data.stream_number]; 819 1.1 rjs nxt_todel = strm->last_sequence_delivered + 1; 820 1.1 rjs #ifdef SCTP_STR_LOGGING 821 1.1 rjs sctp_log_strm_del(chk, NULL, SCTP_STR_LOG_FROM_INTO_STRD); 822 1.1 rjs #endif 823 1.1 rjs #ifdef SCTP_DEBUG 824 1.1 rjs if (sctp_debug_on & SCTP_DEBUG_INDATA1) { 825 1.1 rjs printf("queue to stream called for ssn:%u lastdel:%u nxt:%u\n", 826 1.1 rjs (u_int)chk->rec.data.stream_seq, 827 1.1 rjs (u_int)strm->last_sequence_delivered, (u_int)nxt_todel); 828 1.1 rjs } 829 1.1 rjs #endif 830 1.1 rjs if (compare_with_wrap(strm->last_sequence_delivered, 831 1.1 rjs chk->rec.data.stream_seq, MAX_SEQ) || 832 1.1 rjs (strm->last_sequence_delivered == chk->rec.data.stream_seq)) { 833 1.1 rjs /* The incoming sseq is behind where we last delivered? */ 834 1.1 rjs #ifdef SCTP_DEBUG 835 1.1 rjs if (sctp_debug_on & SCTP_DEBUG_INDATA1) { 836 1.1 rjs printf("Duplicate S-SEQ:%d delivered:%d from peer, Abort association\n", 837 1.1 rjs chk->rec.data.stream_seq, 838 1.1 rjs strm->last_sequence_delivered); 839 1.1 rjs } 840 1.1 rjs #endif 841 1.1 rjs /* 842 1.1 rjs * throw it in the stream so it gets cleaned up in 843 1.1 rjs * association destruction 844 1.1 rjs */ 845 1.1 rjs TAILQ_INSERT_HEAD(&strm->inqueue, chk, sctp_next); 846 1.1 rjs MGET(oper, M_DONTWAIT, MT_DATA); 847 1.1 rjs if (oper) { 848 1.1 rjs struct sctp_paramhdr *ph; 849 1.1 rjs u_int32_t *ippp; 850 1.1 rjs 851 1.1 rjs oper->m_len = sizeof(struct sctp_paramhdr) + 852 1.1 rjs sizeof(*ippp); 853 1.1 rjs ph = mtod(oper, struct sctp_paramhdr *); 854 1.1 rjs ph->param_type = htons(SCTP_CAUSE_PROTOCOL_VIOLATION); 855 1.1 rjs ph->param_length = htons(oper->m_len); 856 1.1 rjs ippp = (u_int32_t *)(ph + 1); 857 1.1 rjs *ippp = htonl(0x00000001); 858 1.1 rjs } 859 1.1 rjs sctp_abort_an_association(stcb->sctp_ep, stcb, 860 1.1 rjs SCTP_PEER_FAULTY, oper); 861 1.1 rjs 862 1.1 rjs *abort_flag = 1; 863 1.1 rjs return; 864 1.1 rjs 865 1.1 rjs } 866 1.1 rjs if (nxt_todel == chk->rec.data.stream_seq) { 867 1.1 rjs /* can be delivered right away */ 868 1.1 rjs #ifdef SCTP_DEBUG 869 1.1 rjs if (sctp_debug_on & SCTP_DEBUG_INDATA1) { 870 1.1 rjs printf("It's NEXT!\n"); 871 1.1 rjs } 872 1.1 rjs #endif 873 1.1 rjs #ifdef SCTP_STR_LOGGING 874 1.1 rjs sctp_log_strm_del(chk, NULL, SCTP_STR_LOG_FROM_IMMED_DEL); 875 1.1 rjs #endif 876 1.1 rjs queue_needed = 0; 877 1.1 rjs asoc->size_on_all_streams -= chk->send_size; 878 1.1 rjs asoc->cnt_on_all_streams--; 879 1.1 rjs strm->last_sequence_delivered++; 880 1.1 rjs sctp_deliver_data(stcb, asoc, chk, 0); 881 1.1 rjs chk = TAILQ_FIRST(&strm->inqueue); 882 1.1 rjs while (chk != NULL) { 883 1.1 rjs /* all delivered */ 884 1.1 rjs nxt_todel = strm->last_sequence_delivered + 1; 885 1.1 rjs if (nxt_todel == chk->rec.data.stream_seq) { 886 1.1 rjs at = TAILQ_NEXT(chk, sctp_next); 887 1.1 rjs TAILQ_REMOVE(&strm->inqueue, chk, sctp_next); 888 1.1 rjs asoc->size_on_all_streams -= chk->send_size; 889 1.1 rjs asoc->cnt_on_all_streams--; 890 1.1 rjs strm->last_sequence_delivered++; 891 1.1 rjs /* 892 1.1 rjs * We ignore the return of deliver_data here 893 1.1 rjs * since we always can hold the chunk on the 894 1.1 rjs * d-queue. And we have a finite number that 895 1.1 rjs * can be delivered from the strq. 896 1.1 rjs */ 897 1.1 rjs #ifdef SCTP_STR_LOGGING 898 1.1 rjs sctp_log_strm_del(chk, NULL, 899 1.1 rjs SCTP_STR_LOG_FROM_IMMED_DEL); 900 1.1 rjs #endif 901 1.1 rjs sctp_deliver_data(stcb, asoc, chk, 0); 902 1.1 rjs chk = at; 903 1.1 rjs continue; 904 1.1 rjs } 905 1.1 rjs break; 906 1.1 rjs } 907 1.1 rjs } 908 1.1 rjs if (queue_needed) { 909 1.1 rjs /* 910 1.1 rjs * Ok, we did not deliver this guy, find 911 1.1 rjs * the correct place to put it on the queue. 912 1.1 rjs */ 913 1.1 rjs #ifdef SCTP_DEBUG 914 1.1 rjs if (sctp_debug_on & SCTP_DEBUG_INDATA1) { 915 1.1 rjs printf("Queue Needed!\n"); 916 1.1 rjs } 917 1.1 rjs #endif 918 1.1 rjs if (TAILQ_EMPTY(&strm->inqueue)) { 919 1.1 rjs /* Empty queue */ 920 1.1 rjs #ifdef SCTP_STR_LOGGING 921 1.1 rjs sctp_log_strm_del(chk, NULL, SCTP_STR_LOG_FROM_INSERT_HD); 922 1.1 rjs #endif 923 1.1 rjs TAILQ_INSERT_HEAD(&strm->inqueue, chk, sctp_next); 924 1.1 rjs } else { 925 1.1 rjs TAILQ_FOREACH(at, &strm->inqueue, sctp_next) { 926 1.1 rjs if (compare_with_wrap(at->rec.data.stream_seq, 927 1.1 rjs chk->rec.data.stream_seq, MAX_SEQ)) { 928 1.1 rjs /* 929 1.1 rjs * one in queue is bigger than the new 930 1.1 rjs * one, insert before this one 931 1.1 rjs */ 932 1.1 rjs #ifdef SCTP_STR_LOGGING 933 1.1 rjs sctp_log_strm_del(chk, at, 934 1.1 rjs SCTP_STR_LOG_FROM_INSERT_MD); 935 1.1 rjs #endif 936 1.1 rjs TAILQ_INSERT_BEFORE(at, chk, sctp_next); 937 1.1 rjs break; 938 1.1 rjs } else if (at->rec.data.stream_seq == 939 1.1 rjs chk->rec.data.stream_seq) { 940 1.1 rjs /* 941 1.1 rjs * Gak, He sent me a duplicate str seq 942 1.1 rjs * number 943 1.1 rjs */ 944 1.1 rjs /* 945 1.1 rjs * foo bar, I guess I will just free 946 1.1 rjs * this new guy, should we abort too? 947 1.1 rjs * FIX ME MAYBE? Or it COULD be that 948 1.1 rjs * the SSN's have wrapped. Maybe I 949 1.1 rjs * should compare to TSN somehow... 950 1.1 rjs * sigh for now just blow away the 951 1.1 rjs * chunk! 952 1.1 rjs */ 953 1.1 rjs 954 1.16 rin sctp_m_freem(chk->data); 955 1.1 rjs chk->data = NULL; 956 1.1 rjs asoc->size_on_all_streams -= chk->send_size; 957 1.1 rjs asoc->cnt_on_all_streams--; 958 1.1 rjs sctp_pegs[SCTP_DUP_SSN_RCVD]++; 959 1.1 rjs sctp_free_remote_addr(chk->whoTo); 960 1.1 rjs SCTP_ZONE_FREE(sctppcbinfo.ipi_zone_chunk, chk); 961 1.1 rjs sctppcbinfo.ipi_count_chunk--; 962 1.1 rjs if ((int)sctppcbinfo.ipi_count_chunk < 963 1.1 rjs 0) { 964 1.1 rjs panic("Chunk count is negative"); 965 1.1 rjs } 966 1.1 rjs sctppcbinfo.ipi_gencnt_chunk++; 967 1.1 rjs return; 968 1.1 rjs } else { 969 1.1 rjs if (TAILQ_NEXT(at, sctp_next) == NULL) { 970 1.1 rjs /* 971 1.1 rjs * We are at the end, insert it 972 1.1 rjs * after this one 973 1.1 rjs */ 974 1.1 rjs #ifdef SCTP_STR_LOGGING 975 1.1 rjs sctp_log_strm_del(chk, at, 976 1.1 rjs SCTP_STR_LOG_FROM_INSERT_TL); 977 1.1 rjs #endif 978 1.1 rjs TAILQ_INSERT_AFTER(&strm->inqueue, 979 1.1 rjs at, chk, sctp_next); 980 1.1 rjs break; 981 1.1 rjs } 982 1.1 rjs } 983 1.1 rjs } 984 1.1 rjs } 985 1.1 rjs } else { 986 1.1 rjs /* We delivered some chunks, wake them up */ 987 1.1 rjs 988 1.1 rjs #ifdef SCTP_DEBUG 989 1.1 rjs if (sctp_debug_on & SCTP_DEBUG_INDATA1) { 990 1.1 rjs printf("Doing WAKEUP!\n"); 991 1.1 rjs } 992 1.1 rjs #endif 993 1.1 rjs sctp_sorwakeup(stcb->sctp_ep, stcb->sctp_socket); 994 1.1 rjs } 995 1.1 rjs } 996 1.1 rjs 997 1.1 rjs /* 998 1.1 rjs * Returns two things: You get the total size of the deliverable parts of the 999 1.1 rjs * first fragmented message on the reassembly queue. And you get a 1 back if 1000 1.1 rjs * all of the message is ready or a 0 back if the message is still incomplete 1001 1.1 rjs */ 1002 1.1 rjs static int 1003 1.1 rjs sctp_is_all_msg_on_reasm(struct sctp_association *asoc, int *t_size) 1004 1.1 rjs { 1005 1.1 rjs struct sctp_tmit_chunk *chk; 1006 1.1 rjs u_int32_t tsn; 1007 1.1 rjs 1008 1.1 rjs *t_size = 0; 1009 1.1 rjs chk = TAILQ_FIRST(&asoc->reasmqueue); 1010 1.1 rjs if (chk == NULL) { 1011 1.1 rjs /* nothing on the queue */ 1012 1.1 rjs return (0); 1013 1.1 rjs } 1014 1.1 rjs if ((chk->rec.data.rcv_flags & SCTP_DATA_FIRST_FRAG) == 0) { 1015 1.1 rjs /* Not a first on the queue */ 1016 1.1 rjs return (0); 1017 1.1 rjs } 1018 1.1 rjs tsn = chk->rec.data.TSN_seq; 1019 1.1 rjs while (chk) { 1020 1.1 rjs if (tsn != chk->rec.data.TSN_seq) { 1021 1.1 rjs return (0); 1022 1.1 rjs } 1023 1.1 rjs *t_size += chk->send_size; 1024 1.1 rjs if (chk->rec.data.rcv_flags & SCTP_DATA_LAST_FRAG) { 1025 1.1 rjs return (1); 1026 1.1 rjs } 1027 1.1 rjs tsn++; 1028 1.1 rjs chk = TAILQ_NEXT(chk, sctp_next); 1029 1.1 rjs } 1030 1.1 rjs return (0); 1031 1.1 rjs } 1032 1.1 rjs 1033 1.1 rjs /* 1034 1.1 rjs * Dump onto the re-assembly queue, in its proper place. After dumping on 1035 1.15 andvar * the queue, see if anything can be delivered. If so pull it off (or as much 1036 1.1 rjs * as we can. If we run out of space then we must dump what we can and set 1037 1.1 rjs * the appropriate flag to say we queued what we could. 1038 1.1 rjs */ 1039 1.1 rjs static void 1040 1.1 rjs sctp_queue_data_for_reasm(struct sctp_tcb *stcb, struct sctp_association *asoc, 1041 1.1 rjs struct sctp_tmit_chunk *chk, int *abort_flag) 1042 1.1 rjs { 1043 1.1 rjs struct mbuf *oper; 1044 1.1 rjs u_int16_t nxt_todel; 1045 1.1 rjs u_int32_t cum_ackp1, prev_tsn, post_tsn; 1046 1.1 rjs int tsize; 1047 1.1 rjs struct sctp_tmit_chunk *at, *prev, *next; 1048 1.1 rjs 1049 1.1 rjs prev = next = NULL; 1050 1.1 rjs cum_ackp1 = asoc->tsn_last_delivered + 1; 1051 1.1 rjs 1052 1.1 rjs if (TAILQ_EMPTY(&asoc->reasmqueue)) { 1053 1.1 rjs /* This is the first one on the queue */ 1054 1.1 rjs TAILQ_INSERT_HEAD(&asoc->reasmqueue, chk, sctp_next); 1055 1.1 rjs /* 1056 1.1 rjs * we do not check for delivery of anything when 1057 1.1 rjs * only one fragment is here 1058 1.1 rjs */ 1059 1.1 rjs asoc->size_on_reasm_queue = chk->send_size; 1060 1.1 rjs asoc->cnt_on_reasm_queue++; 1061 1.1 rjs if (chk->rec.data.TSN_seq == cum_ackp1) { 1062 1.1 rjs if (asoc->fragmented_delivery_inprogress == 0 && 1063 1.1 rjs (chk->rec.data.rcv_flags & SCTP_DATA_FIRST_FRAG) != 1064 1.1 rjs SCTP_DATA_FIRST_FRAG) { 1065 1.1 rjs /* 1066 1.1 rjs * An empty queue, no delivery inprogress, we 1067 1.1 rjs * hit the next one and it does NOT have a 1068 1.1 rjs * FIRST fragment mark. 1069 1.1 rjs */ 1070 1.1 rjs #ifdef SCTP_DEBUG 1071 1.1 rjs if (sctp_debug_on & SCTP_DEBUG_INDATA1) { 1072 1.1 rjs printf("Gak, Evil plot, its not first, no fragmented delivery in progress\n"); 1073 1.1 rjs } 1074 1.1 rjs #endif 1075 1.1 rjs MGET(oper, M_DONTWAIT, MT_DATA); 1076 1.1 rjs if (oper) { 1077 1.1 rjs struct sctp_paramhdr *ph; 1078 1.1 rjs u_int32_t *ippp; 1079 1.1 rjs 1080 1.1 rjs oper->m_len = 1081 1.1 rjs sizeof(struct sctp_paramhdr) + 1082 1.1 rjs sizeof(*ippp); 1083 1.1 rjs ph = mtod(oper, struct sctp_paramhdr *); 1084 1.1 rjs ph->param_type = 1085 1.1 rjs htons(SCTP_CAUSE_PROTOCOL_VIOLATION); 1086 1.1 rjs ph->param_length = htons(oper->m_len); 1087 1.1 rjs ippp = (u_int32_t *)(ph + 1); 1088 1.1 rjs *ippp = htonl(0x10000001); 1089 1.1 rjs } 1090 1.1 rjs sctp_abort_an_association(stcb->sctp_ep, stcb, 1091 1.1 rjs SCTP_PEER_FAULTY, oper); 1092 1.1 rjs *abort_flag = 1; 1093 1.1 rjs } else if (asoc->fragmented_delivery_inprogress && 1094 1.1 rjs (chk->rec.data.rcv_flags & SCTP_DATA_FIRST_FRAG) == SCTP_DATA_FIRST_FRAG) { 1095 1.1 rjs /* 1096 1.1 rjs * We are doing a partial delivery and the NEXT 1097 1.1 rjs * chunk MUST be either the LAST or MIDDLE 1098 1.1 rjs * fragment NOT a FIRST 1099 1.1 rjs */ 1100 1.1 rjs #ifdef SCTP_DEBUG 1101 1.1 rjs if (sctp_debug_on & SCTP_DEBUG_INDATA1) { 1102 1.1 rjs printf("Gak, Evil plot, it IS a first and fragmented delivery in progress\n"); 1103 1.1 rjs } 1104 1.1 rjs #endif 1105 1.1 rjs MGET(oper, M_DONTWAIT, MT_DATA); 1106 1.1 rjs if (oper) { 1107 1.1 rjs struct sctp_paramhdr *ph; 1108 1.1 rjs u_int32_t *ippp; 1109 1.1 rjs 1110 1.1 rjs oper->m_len = 1111 1.1 rjs sizeof(struct sctp_paramhdr) + 1112 1.1 rjs sizeof(*ippp); 1113 1.1 rjs ph = mtod(oper, struct sctp_paramhdr *); 1114 1.1 rjs ph->param_type = 1115 1.1 rjs htons(SCTP_CAUSE_PROTOCOL_VIOLATION); 1116 1.1 rjs ph->param_length = htons(oper->m_len); 1117 1.1 rjs ippp = (u_int32_t *)(ph + 1); 1118 1.1 rjs *ippp = htonl(0x10000002); 1119 1.1 rjs } 1120 1.1 rjs sctp_abort_an_association(stcb->sctp_ep, stcb, 1121 1.1 rjs SCTP_PEER_FAULTY, oper); 1122 1.1 rjs *abort_flag = 1; 1123 1.1 rjs } else if (asoc->fragmented_delivery_inprogress) { 1124 1.1 rjs /* Here we are ok with a MIDDLE or LAST piece */ 1125 1.1 rjs if (chk->rec.data.stream_number != 1126 1.1 rjs asoc->str_of_pdapi) { 1127 1.1 rjs /* Got to be the right STR No */ 1128 1.1 rjs #ifdef SCTP_DEBUG 1129 1.1 rjs if (sctp_debug_on & SCTP_DEBUG_INDATA1) { 1130 1.1 rjs printf("Gak, Evil plot, it IS not same stream number %d vs %d\n", 1131 1.1 rjs chk->rec.data.stream_number, 1132 1.1 rjs asoc->str_of_pdapi); 1133 1.1 rjs } 1134 1.1 rjs #endif 1135 1.1 rjs MGET(oper, M_DONTWAIT, MT_DATA); 1136 1.1 rjs if (oper) { 1137 1.1 rjs struct sctp_paramhdr *ph; 1138 1.1 rjs u_int32_t *ippp; 1139 1.1 rjs oper->m_len = 1140 1.1 rjs sizeof(struct sctp_paramhdr) + 1141 1.1 rjs sizeof(*ippp); 1142 1.1 rjs ph = mtod(oper, 1143 1.1 rjs struct sctp_paramhdr *); 1144 1.1 rjs ph->param_type = 1145 1.1 rjs htons(SCTP_CAUSE_PROTOCOL_VIOLATION); 1146 1.1 rjs ph->param_length = 1147 1.1 rjs htons(oper->m_len); 1148 1.1 rjs ippp = (u_int32_t *)(ph + 1); 1149 1.1 rjs *ippp = htonl(0x10000003); 1150 1.1 rjs } 1151 1.1 rjs sctp_abort_an_association(stcb->sctp_ep, 1152 1.1 rjs stcb, SCTP_PEER_FAULTY, oper); 1153 1.1 rjs *abort_flag = 1; 1154 1.1 rjs } else if ((asoc->fragment_flags & SCTP_DATA_UNORDERED) != 1155 1.1 rjs SCTP_DATA_UNORDERED && 1156 1.1 rjs chk->rec.data.stream_seq != 1157 1.1 rjs asoc->ssn_of_pdapi) { 1158 1.1 rjs /* Got to be the right STR Seq */ 1159 1.1 rjs #ifdef SCTP_DEBUG 1160 1.1 rjs if (sctp_debug_on & SCTP_DEBUG_INDATA1) { 1161 1.1 rjs printf("Gak, Evil plot, it IS not same stream seq %d vs %d\n", 1162 1.1 rjs chk->rec.data.stream_seq, 1163 1.1 rjs asoc->ssn_of_pdapi); 1164 1.1 rjs } 1165 1.1 rjs #endif 1166 1.1 rjs MGET(oper, M_DONTWAIT, MT_DATA); 1167 1.1 rjs if (oper) { 1168 1.1 rjs struct sctp_paramhdr *ph; 1169 1.1 rjs u_int32_t *ippp; 1170 1.1 rjs oper->m_len = 1171 1.1 rjs sizeof(struct sctp_paramhdr) + 1172 1.1 rjs sizeof(*ippp); 1173 1.1 rjs ph = mtod(oper, 1174 1.1 rjs struct sctp_paramhdr *); 1175 1.1 rjs ph->param_type = 1176 1.1 rjs htons(SCTP_CAUSE_PROTOCOL_VIOLATION); 1177 1.1 rjs ph->param_length = 1178 1.1 rjs htons(oper->m_len); 1179 1.1 rjs ippp = (u_int32_t *)(ph + 1); 1180 1.1 rjs *ippp = htonl(0x10000004); 1181 1.1 rjs } 1182 1.1 rjs sctp_abort_an_association(stcb->sctp_ep, 1183 1.1 rjs stcb, SCTP_PEER_FAULTY, oper); 1184 1.1 rjs *abort_flag = 1; 1185 1.1 rjs } 1186 1.1 rjs } 1187 1.1 rjs } 1188 1.1 rjs return; 1189 1.1 rjs } 1190 1.1 rjs /* Find its place */ 1191 1.1 rjs at = TAILQ_FIRST(&asoc->reasmqueue); 1192 1.1 rjs 1193 1.1 rjs /* Grab the top flags */ 1194 1.1 rjs TAILQ_FOREACH(at, &asoc->reasmqueue, sctp_next) { 1195 1.1 rjs if (compare_with_wrap(at->rec.data.TSN_seq, 1196 1.1 rjs chk->rec.data.TSN_seq, MAX_TSN)) { 1197 1.1 rjs /* 1198 1.1 rjs * one in queue is bigger than the new one, insert 1199 1.1 rjs * before this one 1200 1.1 rjs */ 1201 1.1 rjs /* A check */ 1202 1.1 rjs asoc->size_on_reasm_queue += chk->send_size; 1203 1.1 rjs asoc->cnt_on_reasm_queue++; 1204 1.1 rjs next = at; 1205 1.1 rjs TAILQ_INSERT_BEFORE(at, chk, sctp_next); 1206 1.1 rjs break; 1207 1.1 rjs } else if (at->rec.data.TSN_seq == chk->rec.data.TSN_seq) { 1208 1.1 rjs /* Gak, He sent me a duplicate str seq number */ 1209 1.1 rjs /* 1210 1.1 rjs * foo bar, I guess I will just free this new guy, 1211 1.1 rjs * should we abort too? FIX ME MAYBE? Or it COULD be 1212 1.1 rjs * that the SSN's have wrapped. Maybe I should compare 1213 1.1 rjs * to TSN somehow... sigh for now just blow away the 1214 1.1 rjs * chunk! 1215 1.1 rjs */ 1216 1.16 rin sctp_m_freem(chk->data); 1217 1.1 rjs chk->data = NULL; 1218 1.1 rjs sctp_free_remote_addr(chk->whoTo); 1219 1.1 rjs SCTP_ZONE_FREE(sctppcbinfo.ipi_zone_chunk, chk); 1220 1.1 rjs sctppcbinfo.ipi_count_chunk--; 1221 1.1 rjs if ((int)sctppcbinfo.ipi_count_chunk < 0) { 1222 1.1 rjs panic("Chunk count is negative"); 1223 1.1 rjs } 1224 1.1 rjs sctppcbinfo.ipi_gencnt_chunk++; 1225 1.1 rjs return; 1226 1.1 rjs } else { 1227 1.1 rjs prev = at; 1228 1.1 rjs if (TAILQ_NEXT(at, sctp_next) == NULL) { 1229 1.1 rjs /* 1230 1.1 rjs * We are at the end, insert it after this one 1231 1.1 rjs */ 1232 1.1 rjs /* check it first */ 1233 1.1 rjs asoc->size_on_reasm_queue += chk->send_size; 1234 1.1 rjs asoc->cnt_on_reasm_queue++; 1235 1.1 rjs TAILQ_INSERT_AFTER(&asoc->reasmqueue, at, chk, sctp_next); 1236 1.1 rjs break; 1237 1.1 rjs } 1238 1.1 rjs } 1239 1.1 rjs } 1240 1.1 rjs /* Now the audits */ 1241 1.1 rjs if (prev) { 1242 1.1 rjs prev_tsn = chk->rec.data.TSN_seq - 1; 1243 1.1 rjs if (prev_tsn == prev->rec.data.TSN_seq) { 1244 1.1 rjs /* 1245 1.1 rjs * Ok the one I am dropping onto the end 1246 1.18 andvar * is the NEXT. A bit of validation here. 1247 1.1 rjs */ 1248 1.1 rjs if ((prev->rec.data.rcv_flags & SCTP_DATA_FRAG_MASK) == 1249 1.1 rjs SCTP_DATA_FIRST_FRAG || 1250 1.1 rjs (prev->rec.data.rcv_flags & SCTP_DATA_FRAG_MASK) == 1251 1.1 rjs SCTP_DATA_MIDDLE_FRAG) { 1252 1.1 rjs /* 1253 1.1 rjs * Insert chk MUST be a MIDDLE or LAST fragment 1254 1.1 rjs */ 1255 1.1 rjs if ((chk->rec.data.rcv_flags & SCTP_DATA_FRAG_MASK) == 1256 1.1 rjs SCTP_DATA_FIRST_FRAG) { 1257 1.1 rjs #ifdef SCTP_DEBUG 1258 1.1 rjs if (sctp_debug_on & SCTP_DEBUG_INDATA1) { 1259 1.1 rjs printf("Prev check - It can be a midlle or last but not a first\n"); 1260 1.1 rjs printf("Gak, Evil plot, it's a FIRST!\n"); 1261 1.1 rjs } 1262 1.1 rjs #endif 1263 1.1 rjs MGET(oper, M_DONTWAIT, MT_DATA); 1264 1.1 rjs if (oper) { 1265 1.1 rjs struct sctp_paramhdr *ph; 1266 1.1 rjs u_int32_t *ippp; 1267 1.1 rjs 1268 1.1 rjs oper->m_len = 1269 1.1 rjs sizeof(struct sctp_paramhdr) + 1270 1.1 rjs sizeof(*ippp); 1271 1.1 rjs ph = mtod(oper, 1272 1.1 rjs struct sctp_paramhdr *); 1273 1.1 rjs ph->param_type = 1274 1.1 rjs htons(SCTP_CAUSE_PROTOCOL_VIOLATION); 1275 1.1 rjs ph->param_length = 1276 1.1 rjs htons(oper->m_len); 1277 1.1 rjs 1278 1.1 rjs ippp = (u_int32_t *)(ph + 1); 1279 1.1 rjs *ippp = htonl(0x10000005); 1280 1.1 rjs } 1281 1.1 rjs sctp_abort_an_association(stcb->sctp_ep, 1282 1.1 rjs stcb, SCTP_PEER_FAULTY, oper); 1283 1.1 rjs *abort_flag = 1; 1284 1.1 rjs return; 1285 1.1 rjs } 1286 1.1 rjs if (chk->rec.data.stream_number != 1287 1.1 rjs prev->rec.data.stream_number) { 1288 1.1 rjs /* 1289 1.1 rjs * Huh, need the correct STR here, they 1290 1.1 rjs * must be the same. 1291 1.1 rjs */ 1292 1.1 rjs #ifdef SCTP_DEBUG 1293 1.1 rjs if (sctp_debug_on & SCTP_DEBUG_INDATA1) { 1294 1.1 rjs printf("Prev check - Gak, Evil plot, ssn:%d not the same as at:%d\n", 1295 1.1 rjs chk->rec.data.stream_number, 1296 1.1 rjs prev->rec.data.stream_number); 1297 1.1 rjs } 1298 1.1 rjs #endif 1299 1.1 rjs MGET(oper, M_DONTWAIT, MT_DATA); 1300 1.1 rjs if (oper) { 1301 1.1 rjs struct sctp_paramhdr *ph; 1302 1.1 rjs u_int32_t *ippp; 1303 1.1 rjs 1304 1.1 rjs oper->m_len = 1305 1.1 rjs sizeof(struct sctp_paramhdr) + 1306 1.1 rjs sizeof(*ippp); 1307 1.1 rjs ph = mtod(oper, 1308 1.1 rjs struct sctp_paramhdr *); 1309 1.1 rjs ph->param_type = 1310 1.1 rjs htons(SCTP_CAUSE_PROTOCOL_VIOLATION); 1311 1.1 rjs ph->param_length = 1312 1.1 rjs htons(oper->m_len); 1313 1.1 rjs ippp = (u_int32_t *)(ph + 1); 1314 1.1 rjs *ippp = htonl(0x10000006); 1315 1.1 rjs } 1316 1.1 rjs 1317 1.1 rjs sctp_abort_an_association(stcb->sctp_ep, 1318 1.1 rjs stcb, SCTP_PEER_FAULTY, oper); 1319 1.1 rjs 1320 1.1 rjs *abort_flag = 1; 1321 1.1 rjs return; 1322 1.1 rjs } 1323 1.1 rjs if ((prev->rec.data.rcv_flags & SCTP_DATA_UNORDERED) == 0 && 1324 1.1 rjs chk->rec.data.stream_seq != 1325 1.1 rjs prev->rec.data.stream_seq) { 1326 1.1 rjs /* 1327 1.1 rjs * Huh, need the correct STR here, they 1328 1.1 rjs * must be the same. 1329 1.1 rjs */ 1330 1.1 rjs #ifdef SCTP_DEBUG 1331 1.1 rjs if (sctp_debug_on & SCTP_DEBUG_INDATA1) { 1332 1.1 rjs printf("Prev check - Gak, Evil plot, sseq:%d not the same as at:%d\n", 1333 1.1 rjs chk->rec.data.stream_seq, 1334 1.1 rjs prev->rec.data.stream_seq); 1335 1.1 rjs } 1336 1.1 rjs #endif 1337 1.1 rjs MGET(oper, M_DONTWAIT, MT_DATA); 1338 1.1 rjs if (oper) { 1339 1.1 rjs struct sctp_paramhdr *ph; 1340 1.1 rjs u_int32_t *ippp; 1341 1.1 rjs 1342 1.1 rjs oper->m_len = 1343 1.1 rjs sizeof(struct sctp_paramhdr) + 1344 1.1 rjs sizeof(*ippp); 1345 1.1 rjs ph = mtod(oper, 1346 1.1 rjs struct sctp_paramhdr *); 1347 1.1 rjs ph->param_type = 1348 1.1 rjs htons(SCTP_CAUSE_PROTOCOL_VIOLATION); 1349 1.1 rjs ph->param_length = 1350 1.1 rjs htons(oper->m_len); 1351 1.1 rjs ippp = (u_int32_t *)(ph + 1); 1352 1.1 rjs *ippp = htonl(0x10000007); 1353 1.1 rjs } 1354 1.1 rjs 1355 1.1 rjs sctp_abort_an_association(stcb->sctp_ep, 1356 1.1 rjs stcb, SCTP_PEER_FAULTY, oper); 1357 1.1 rjs 1358 1.1 rjs *abort_flag = 1; 1359 1.1 rjs return; 1360 1.1 rjs } 1361 1.1 rjs } else if ((prev->rec.data.rcv_flags & SCTP_DATA_FRAG_MASK) == 1362 1.1 rjs SCTP_DATA_LAST_FRAG) { 1363 1.1 rjs /* Insert chk MUST be a FIRST */ 1364 1.1 rjs if ((chk->rec.data.rcv_flags & SCTP_DATA_FRAG_MASK) != 1365 1.1 rjs SCTP_DATA_FIRST_FRAG) { 1366 1.1 rjs #ifdef SCTP_DEBUG 1367 1.1 rjs if (sctp_debug_on & SCTP_DEBUG_INDATA1) { 1368 1.1 rjs printf("Prev check - Gak, evil plot, its not FIRST and it must be!\n"); 1369 1.1 rjs } 1370 1.1 rjs #endif 1371 1.1 rjs MGET(oper, M_DONTWAIT, MT_DATA); 1372 1.1 rjs if (oper) { 1373 1.1 rjs struct sctp_paramhdr *ph; 1374 1.1 rjs u_int32_t *ippp; 1375 1.1 rjs 1376 1.1 rjs oper->m_len = 1377 1.1 rjs sizeof(struct sctp_paramhdr) + 1378 1.1 rjs sizeof(*ippp); 1379 1.1 rjs ph = mtod(oper, 1380 1.1 rjs struct sctp_paramhdr *); 1381 1.1 rjs ph->param_type = 1382 1.1 rjs htons(SCTP_CAUSE_PROTOCOL_VIOLATION); 1383 1.1 rjs ph->param_length = 1384 1.1 rjs htons(oper->m_len); 1385 1.1 rjs ippp = (u_int32_t *)(ph + 1); 1386 1.1 rjs *ippp = htonl(0x10000008); 1387 1.1 rjs } 1388 1.1 rjs 1389 1.1 rjs sctp_abort_an_association(stcb->sctp_ep, 1390 1.1 rjs stcb, SCTP_PEER_FAULTY, oper); 1391 1.1 rjs 1392 1.1 rjs *abort_flag = 1; 1393 1.1 rjs return; 1394 1.1 rjs } 1395 1.1 rjs } 1396 1.1 rjs } 1397 1.1 rjs } 1398 1.1 rjs 1399 1.1 rjs if (next) { 1400 1.1 rjs post_tsn = chk->rec.data.TSN_seq + 1; 1401 1.1 rjs if (post_tsn == next->rec.data.TSN_seq) { 1402 1.1 rjs /* 1403 1.1 rjs * Ok the one I am inserting ahead of 1404 1.18 andvar * is my NEXT one. A bit of validation here. 1405 1.1 rjs */ 1406 1.1 rjs if (next->rec.data.rcv_flags & SCTP_DATA_FIRST_FRAG) { 1407 1.1 rjs /* Insert chk MUST be a last fragment */ 1408 1.1 rjs if ((chk->rec.data.rcv_flags&SCTP_DATA_FRAG_MASK) 1409 1.1 rjs != SCTP_DATA_LAST_FRAG) { 1410 1.1 rjs #ifdef SCTP_DEBUG 1411 1.1 rjs if (sctp_debug_on & SCTP_DEBUG_INDATA1) { 1412 1.1 rjs printf("Next chk - Next is FIRST, we must be LAST\n"); 1413 1.1 rjs printf("Gak, Evil plot, its not a last!\n"); 1414 1.1 rjs } 1415 1.1 rjs #endif 1416 1.1 rjs MGET(oper, M_DONTWAIT, MT_DATA); 1417 1.1 rjs if (oper) { 1418 1.1 rjs struct sctp_paramhdr *ph; 1419 1.1 rjs u_int32_t *ippp; 1420 1.1 rjs 1421 1.1 rjs oper->m_len = 1422 1.1 rjs sizeof(struct sctp_paramhdr) + 1423 1.1 rjs sizeof(*ippp); 1424 1.1 rjs ph = mtod(oper, 1425 1.1 rjs struct sctp_paramhdr *); 1426 1.1 rjs ph->param_type = 1427 1.1 rjs htons(SCTP_CAUSE_PROTOCOL_VIOLATION); 1428 1.1 rjs ph->param_length = 1429 1.1 rjs htons(oper->m_len); 1430 1.1 rjs ippp = (u_int32_t *)(ph + 1); 1431 1.1 rjs *ippp = htonl(0x10000009); 1432 1.1 rjs } 1433 1.1 rjs 1434 1.1 rjs sctp_abort_an_association(stcb->sctp_ep, 1435 1.1 rjs stcb, SCTP_PEER_FAULTY, oper); 1436 1.1 rjs 1437 1.1 rjs *abort_flag = 1; 1438 1.1 rjs return; 1439 1.1 rjs } 1440 1.1 rjs } else if ((next->rec.data.rcv_flags & SCTP_DATA_FRAG_MASK) == 1441 1.1 rjs SCTP_DATA_MIDDLE_FRAG || 1442 1.1 rjs (next->rec.data.rcv_flags&SCTP_DATA_FRAG_MASK) == 1443 1.1 rjs SCTP_DATA_LAST_FRAG) { 1444 1.1 rjs /* Insert chk CAN be MIDDLE or FIRST NOT LAST */ 1445 1.1 rjs if ((chk->rec.data.rcv_flags & SCTP_DATA_FRAG_MASK) == 1446 1.1 rjs SCTP_DATA_LAST_FRAG) { 1447 1.1 rjs #ifdef SCTP_DEBUG 1448 1.1 rjs if (sctp_debug_on & SCTP_DEBUG_INDATA1) { 1449 1.1 rjs printf("Next chk - Next is a MIDDLE/LAST\n"); 1450 1.1 rjs printf("Gak, Evil plot, new prev chunk is a LAST\n"); 1451 1.1 rjs } 1452 1.1 rjs #endif 1453 1.1 rjs MGET(oper, M_DONTWAIT, MT_DATA); 1454 1.1 rjs if (oper) { 1455 1.1 rjs struct sctp_paramhdr *ph; 1456 1.1 rjs u_int32_t *ippp; 1457 1.1 rjs 1458 1.1 rjs oper->m_len = 1459 1.1 rjs sizeof(struct sctp_paramhdr) + 1460 1.1 rjs sizeof(*ippp); 1461 1.1 rjs ph = mtod(oper, 1462 1.1 rjs struct sctp_paramhdr *); 1463 1.1 rjs ph->param_type = 1464 1.1 rjs htons(SCTP_CAUSE_PROTOCOL_VIOLATION); 1465 1.1 rjs ph->param_length = 1466 1.1 rjs htons(oper->m_len); 1467 1.1 rjs ippp = (u_int32_t *)(ph + 1); 1468 1.1 rjs *ippp = htonl(0x1000000a); 1469 1.1 rjs } 1470 1.1 rjs sctp_abort_an_association(stcb->sctp_ep, 1471 1.1 rjs stcb, SCTP_PEER_FAULTY, oper); 1472 1.1 rjs 1473 1.1 rjs *abort_flag = 1; 1474 1.1 rjs return; 1475 1.1 rjs } 1476 1.1 rjs if (chk->rec.data.stream_number != 1477 1.1 rjs next->rec.data.stream_number) { 1478 1.1 rjs /* 1479 1.1 rjs * Huh, need the correct STR here, they 1480 1.1 rjs * must be the same. 1481 1.1 rjs */ 1482 1.1 rjs #ifdef SCTP_DEBUG 1483 1.1 rjs if (sctp_debug_on & SCTP_DEBUG_INDATA1) { 1484 1.1 rjs printf("Next chk - Gak, Evil plot, ssn:%d not the same as at:%d\n", 1485 1.1 rjs chk->rec.data.stream_number, 1486 1.1 rjs next->rec.data.stream_number); 1487 1.1 rjs } 1488 1.1 rjs #endif 1489 1.1 rjs MGET(oper, M_DONTWAIT, MT_DATA); 1490 1.1 rjs if (oper) { 1491 1.1 rjs struct sctp_paramhdr *ph; 1492 1.1 rjs u_int32_t *ippp; 1493 1.1 rjs 1494 1.1 rjs oper->m_len = 1495 1.1 rjs sizeof(struct sctp_paramhdr) + 1496 1.1 rjs sizeof(*ippp); 1497 1.1 rjs ph = mtod(oper, 1498 1.1 rjs struct sctp_paramhdr *); 1499 1.1 rjs ph->param_type = 1500 1.1 rjs htons(SCTP_CAUSE_PROTOCOL_VIOLATION); 1501 1.1 rjs ph->param_length = 1502 1.1 rjs htons(oper->m_len); 1503 1.1 rjs ippp = (u_int32_t *)(ph + 1); 1504 1.1 rjs *ippp = htonl(0x1000000b); 1505 1.1 rjs } 1506 1.1 rjs 1507 1.1 rjs sctp_abort_an_association(stcb->sctp_ep, 1508 1.1 rjs stcb, SCTP_PEER_FAULTY, oper); 1509 1.1 rjs 1510 1.1 rjs *abort_flag = 1; 1511 1.1 rjs return; 1512 1.1 rjs } 1513 1.1 rjs if ((next->rec.data.rcv_flags & SCTP_DATA_UNORDERED) == 0 && 1514 1.1 rjs chk->rec.data.stream_seq != 1515 1.1 rjs next->rec.data.stream_seq) { 1516 1.1 rjs /* 1517 1.1 rjs * Huh, need the correct STR here, they 1518 1.1 rjs * must be the same. 1519 1.1 rjs */ 1520 1.1 rjs #ifdef SCTP_DEBUG 1521 1.1 rjs if (sctp_debug_on & SCTP_DEBUG_INDATA1) { 1522 1.1 rjs printf("Next chk - Gak, Evil plot, sseq:%d not the same as at:%d\n", 1523 1.1 rjs chk->rec.data.stream_seq, 1524 1.1 rjs next->rec.data.stream_seq); 1525 1.1 rjs } 1526 1.1 rjs #endif 1527 1.1 rjs MGET(oper, M_DONTWAIT, MT_DATA); 1528 1.1 rjs if (oper) { 1529 1.1 rjs struct sctp_paramhdr *ph; 1530 1.1 rjs u_int32_t *ippp; 1531 1.1 rjs 1532 1.1 rjs oper->m_len = 1533 1.1 rjs sizeof(struct sctp_paramhdr) + 1534 1.1 rjs sizeof(*ippp); 1535 1.1 rjs ph = mtod(oper, 1536 1.1 rjs struct sctp_paramhdr *); 1537 1.1 rjs ph->param_type = 1538 1.1 rjs htons(SCTP_CAUSE_PROTOCOL_VIOLATION); 1539 1.1 rjs ph->param_length = 1540 1.1 rjs htons(oper->m_len); 1541 1.1 rjs ippp = (u_int32_t *)(ph + 1); 1542 1.1 rjs *ippp = htonl(0x1000000c); 1543 1.1 rjs } 1544 1.1 rjs 1545 1.1 rjs sctp_abort_an_association(stcb->sctp_ep, 1546 1.1 rjs stcb, SCTP_PEER_FAULTY, oper); 1547 1.1 rjs 1548 1.1 rjs *abort_flag = 1; 1549 1.1 rjs return; 1550 1.1 rjs 1551 1.1 rjs } 1552 1.1 rjs } 1553 1.1 rjs } 1554 1.1 rjs } 1555 1.1 rjs /* 1556 1.1 rjs * now that we have all in there place we must check a number of 1557 1.1 rjs * things to see if we can send data to the ULP. 1558 1.1 rjs */ 1559 1.1 rjs /* we need to do some delivery, if we can */ 1560 1.1 rjs chk = TAILQ_FIRST(&asoc->reasmqueue); 1561 1.1 rjs if (chk == NULL) { 1562 1.1 rjs /* Huh? */ 1563 1.1 rjs asoc->size_on_reasm_queue = 0; 1564 1.1 rjs asoc->cnt_on_reasm_queue = 0; 1565 1.1 rjs return; 1566 1.1 rjs } 1567 1.1 rjs if (asoc->fragmented_delivery_inprogress == 0) { 1568 1.1 rjs nxt_todel = 1569 1.1 rjs asoc->strmin[chk->rec.data.stream_number].last_sequence_delivered + 1; 1570 1.1 rjs if ((chk->rec.data.rcv_flags & SCTP_DATA_FIRST_FRAG) && 1571 1.1 rjs (nxt_todel == chk->rec.data.stream_seq || 1572 1.1 rjs (chk->rec.data.rcv_flags & SCTP_DATA_UNORDERED))) { 1573 1.1 rjs /* 1574 1.1 rjs * Yep the first one is here and its 1575 1.1 rjs * ok to deliver but should we? 1576 1.1 rjs */ 1577 1.1 rjs if (TAILQ_EMPTY(&asoc->delivery_queue) && 1578 1.1 rjs (sctp_is_all_msg_on_reasm(asoc, &tsize) || 1579 1.1 rjs (asoc->size_on_reasm_queue >= 1580 1.1 rjs (stcb->sctp_socket->so_rcv.sb_hiwat >> 2) && 1581 1.1 rjs tsize))) { 1582 1.1 rjs /* 1583 1.1 rjs * Yes, we setup to 1584 1.1 rjs * start reception, by backing down the TSN 1585 1.1 rjs * just in case we can't deliver. If we 1586 1.1 rjs */ 1587 1.1 rjs asoc->fragmented_delivery_inprogress = 1; 1588 1.1 rjs asoc->tsn_last_delivered = 1589 1.1 rjs chk->rec.data.TSN_seq - 1; 1590 1.1 rjs asoc->str_of_pdapi = 1591 1.1 rjs chk->rec.data.stream_number; 1592 1.1 rjs asoc->ssn_of_pdapi = chk->rec.data.stream_seq; 1593 1.1 rjs asoc->fragment_flags = chk->rec.data.rcv_flags; 1594 1.1 rjs sctp_service_reassembly(stcb, asoc, 0); 1595 1.1 rjs } 1596 1.1 rjs } 1597 1.1 rjs } else { 1598 1.1 rjs sctp_service_reassembly(stcb, asoc, 0); 1599 1.1 rjs } 1600 1.1 rjs } 1601 1.1 rjs 1602 1.1 rjs /* 1603 1.1 rjs * This is an unfortunate routine. It checks to make sure a evil guy is not 1604 1.1 rjs * stuffing us full of bad packet fragments. A broken peer could also do this 1605 1.1 rjs * but this is doubtful. It is to bad I must worry about evil crackers sigh 1606 1.1 rjs * :< more cycles. 1607 1.1 rjs */ 1608 1.1 rjs static int 1609 1.1 rjs sctp_does_chk_belong_to_reasm(struct sctp_association *asoc, 1610 1.1 rjs struct sctp_tmit_chunk *chk) 1611 1.1 rjs { 1612 1.1 rjs struct sctp_tmit_chunk *at; 1613 1.1 rjs u_int32_t tsn_est; 1614 1.1 rjs 1615 1.1 rjs TAILQ_FOREACH(at, &asoc->reasmqueue, sctp_next) { 1616 1.1 rjs if (compare_with_wrap(chk->rec.data.TSN_seq, 1617 1.1 rjs at->rec.data.TSN_seq, MAX_TSN)) { 1618 1.1 rjs /* is it one bigger? */ 1619 1.1 rjs tsn_est = at->rec.data.TSN_seq + 1; 1620 1.1 rjs if (tsn_est == chk->rec.data.TSN_seq) { 1621 1.1 rjs /* yep. It better be a last then*/ 1622 1.1 rjs if ((at->rec.data.rcv_flags & SCTP_DATA_FRAG_MASK) != 1623 1.1 rjs SCTP_DATA_LAST_FRAG) { 1624 1.1 rjs /* 1625 1.1 rjs * Ok this guy belongs next to a guy 1626 1.1 rjs * that is NOT last, it should be a 1627 1.1 rjs * middle/last, not a complete chunk. 1628 1.1 rjs */ 1629 1.1 rjs return (1); 1630 1.1 rjs } else { 1631 1.1 rjs /* 1632 1.1 rjs * This guy is ok since its a LAST and 1633 1.1 rjs * the new chunk is a fully self- 1634 1.1 rjs * contained one. 1635 1.1 rjs */ 1636 1.1 rjs return (0); 1637 1.1 rjs } 1638 1.1 rjs } 1639 1.1 rjs } else if (chk->rec.data.TSN_seq == at->rec.data.TSN_seq) { 1640 1.1 rjs /* Software error since I have a dup? */ 1641 1.1 rjs return (1); 1642 1.1 rjs } else { 1643 1.1 rjs /* 1644 1.1 rjs * Ok, 'at' is larger than new chunk but does it 1645 1.1 rjs * need to be right before it. 1646 1.1 rjs */ 1647 1.1 rjs tsn_est = chk->rec.data.TSN_seq + 1; 1648 1.1 rjs if (tsn_est == at->rec.data.TSN_seq) { 1649 1.1 rjs /* Yep, It better be a first */ 1650 1.1 rjs if ((at->rec.data.rcv_flags & SCTP_DATA_FRAG_MASK) != 1651 1.1 rjs SCTP_DATA_FIRST_FRAG) { 1652 1.1 rjs return (1); 1653 1.1 rjs } else { 1654 1.1 rjs return (0); 1655 1.1 rjs } 1656 1.1 rjs } 1657 1.1 rjs } 1658 1.1 rjs } 1659 1.1 rjs return (0); 1660 1.1 rjs } 1661 1.1 rjs 1662 1.1 rjs extern unsigned int sctp_max_chunks_on_queue; 1663 1.1 rjs static int 1664 1.1 rjs sctp_process_a_data_chunk(struct sctp_tcb *stcb, struct sctp_association *asoc, 1665 1.1 rjs struct mbuf **m, int offset, struct sctp_data_chunk *ch, int chk_length, 1666 1.1 rjs struct sctp_nets *net, u_int32_t *high_tsn, int *abort_flag, 1667 1.1 rjs int *break_flag, int last_chunk) 1668 1.1 rjs { 1669 1.1 rjs /* Process a data chunk */ 1670 1.1 rjs /* struct sctp_tmit_chunk *chk;*/ 1671 1.1 rjs struct sctp_tmit_chunk *chk; 1672 1.1 rjs u_int32_t tsn, gap; 1673 1.1 rjs struct mbuf *dmbuf; 1674 1.1 rjs int the_len; 1675 1.1 rjs u_int16_t strmno, strmseq; 1676 1.1 rjs struct mbuf *oper; 1677 1.1 rjs 1678 1.1 rjs chk = NULL; 1679 1.1 rjs tsn = ntohl(ch->dp.tsn); 1680 1.1 rjs #ifdef SCTP_MAP_LOGGING 1681 1.1 rjs sctp_log_map(0, tsn, asoc->cumulative_tsn, SCTP_MAP_PREPARE_SLIDE); 1682 1.1 rjs #endif 1683 1.1 rjs if (compare_with_wrap(asoc->cumulative_tsn, tsn, MAX_TSN) || 1684 1.1 rjs asoc->cumulative_tsn == tsn) { 1685 1.1 rjs /* It is a duplicate */ 1686 1.1 rjs sctp_pegs[SCTP_DUPTSN_RECVD]++; 1687 1.1 rjs if (asoc->numduptsns < SCTP_MAX_DUP_TSNS) { 1688 1.1 rjs /* Record a dup for the next outbound sack */ 1689 1.1 rjs asoc->dup_tsns[asoc->numduptsns] = tsn; 1690 1.1 rjs asoc->numduptsns++; 1691 1.1 rjs } 1692 1.1 rjs return (0); 1693 1.1 rjs } 1694 1.1 rjs /* Calculate the number of TSN's between the base and this TSN */ 1695 1.1 rjs if (tsn >= asoc->mapping_array_base_tsn) { 1696 1.1 rjs gap = tsn - asoc->mapping_array_base_tsn; 1697 1.1 rjs } else { 1698 1.1 rjs gap = (MAX_TSN - asoc->mapping_array_base_tsn) + tsn + 1; 1699 1.1 rjs } 1700 1.1 rjs if (gap >= (SCTP_MAPPING_ARRAY << 3)) { 1701 1.1 rjs /* Can't hold the bit in the mapping at max array, toss it */ 1702 1.1 rjs return (0); 1703 1.1 rjs } 1704 1.1 rjs if (gap >= (uint32_t)(asoc->mapping_array_size << 3)) { 1705 1.1 rjs if (sctp_expand_mapping_array(asoc)) { 1706 1.1 rjs /* Can't expand, drop it */ 1707 1.1 rjs return (0); 1708 1.1 rjs } 1709 1.1 rjs } 1710 1.1 rjs if (compare_with_wrap(tsn, *high_tsn, MAX_TSN)) { 1711 1.1 rjs *high_tsn = tsn; 1712 1.1 rjs } 1713 1.1 rjs /* See if we have received this one already */ 1714 1.1 rjs if (SCTP_IS_TSN_PRESENT(asoc->mapping_array, gap)) { 1715 1.1 rjs sctp_pegs[SCTP_DUPTSN_RECVD]++; 1716 1.1 rjs if (asoc->numduptsns < SCTP_MAX_DUP_TSNS) { 1717 1.1 rjs /* Record a dup for the next outbound sack */ 1718 1.1 rjs asoc->dup_tsns[asoc->numduptsns] = tsn; 1719 1.1 rjs asoc->numduptsns++; 1720 1.1 rjs } 1721 1.1 rjs if (!callout_pending(&asoc->dack_timer.timer)) { 1722 1.1 rjs /* 1723 1.1 rjs * By starting the timer we assure that we 1724 1.1 rjs * WILL sack at the end of the packet 1725 1.1 rjs * when sctp_sack_check gets called. 1726 1.1 rjs */ 1727 1.1 rjs sctp_timer_start(SCTP_TIMER_TYPE_RECV, stcb->sctp_ep, 1728 1.1 rjs stcb, NULL); 1729 1.1 rjs } 1730 1.1 rjs return (0); 1731 1.1 rjs } 1732 1.1 rjs /* 1733 1.1 rjs * Check to see about the GONE flag, duplicates would cause 1734 1.1 rjs * a sack to be sent up above 1735 1.1 rjs */ 1736 1.1 rjs if (stcb && (stcb->sctp_ep->sctp_flags & SCTP_PCB_FLAGS_SOCKET_GONE)) { 1737 1.1 rjs /* 1738 1.1 rjs * wait a minute, this guy is gone, there is no 1739 1.1 rjs * longer a receiver. Send peer an ABORT! 1740 1.1 rjs */ 1741 1.1 rjs struct mbuf *op_err; 1742 1.1 rjs op_err = sctp_generate_invmanparam(SCTP_CAUSE_OUT_OF_RESC); 1743 1.1 rjs sctp_abort_an_association(stcb->sctp_ep, stcb, 0, op_err); 1744 1.1 rjs *abort_flag = 1; 1745 1.1 rjs return (0); 1746 1.1 rjs } 1747 1.1 rjs /* 1748 1.1 rjs * Now before going further we see if there is room. If NOT then 1749 1.1 rjs * we MAY let one through only IF this TSN is the one we are 1750 1.1 rjs * waiting for on a partial delivery API. 1751 1.1 rjs */ 1752 1.1 rjs 1753 1.1 rjs /* now do the tests */ 1754 1.1 rjs if (((asoc->cnt_on_all_streams + 1755 1.1 rjs asoc->cnt_on_delivery_queue + 1756 1.1 rjs asoc->cnt_on_reasm_queue + 1757 1.1 rjs asoc->cnt_msg_on_sb) > sctp_max_chunks_on_queue) || 1758 1.1 rjs (((int)asoc->my_rwnd) <= 0)) { 1759 1.1 rjs /* 1760 1.1 rjs * When we have NO room in the rwnd we check 1761 1.1 rjs * to make sure the reader is doing its job... 1762 1.1 rjs */ 1763 1.1 rjs if (stcb->sctp_socket->so_rcv.sb_cc) { 1764 1.1 rjs /* some to read, wake-up */ 1765 1.1 rjs sctp_sorwakeup(stcb->sctp_ep, stcb->sctp_socket); 1766 1.1 rjs } 1767 1.1 rjs /* now is it in the mapping array of what we have accepted? */ 1768 1.1 rjs if (compare_with_wrap(tsn, 1769 1.1 rjs asoc->highest_tsn_inside_map, MAX_TSN)) { 1770 1.1 rjs 1771 1.1 rjs /* Nope not in the valid range dump it */ 1772 1.1 rjs #ifdef SCTP_DEBUG 1773 1.1 rjs if (sctp_debug_on & SCTP_DEBUG_INDATA1) { 1774 1.1 rjs printf("My rwnd overrun1:tsn:%lx rwnd %lu sbspace:%ld delq:%d!\n", 1775 1.1 rjs (u_long)tsn, (u_long)asoc->my_rwnd, 1776 1.1 rjs sctp_sbspace(&stcb->sctp_socket->so_rcv), 1777 1.1 rjs stcb->asoc.cnt_on_delivery_queue); 1778 1.1 rjs } 1779 1.1 rjs #endif 1780 1.1 rjs sctp_set_rwnd(stcb, asoc); 1781 1.1 rjs if ((asoc->cnt_on_all_streams + 1782 1.1 rjs asoc->cnt_on_delivery_queue + 1783 1.1 rjs asoc->cnt_on_reasm_queue + 1784 1.1 rjs asoc->cnt_msg_on_sb) > sctp_max_chunks_on_queue) { 1785 1.1 rjs sctp_pegs[SCTP_MSGC_DROP]++; 1786 1.1 rjs } else { 1787 1.1 rjs sctp_pegs[SCTP_RWND_DROPS]++; 1788 1.1 rjs } 1789 1.1 rjs *break_flag = 1; 1790 1.1 rjs return (0); 1791 1.1 rjs } 1792 1.1 rjs } 1793 1.1 rjs strmno = ntohs(ch->dp.stream_id); 1794 1.1 rjs if (strmno >= asoc->streamincnt) { 1795 1.1 rjs struct sctp_paramhdr *phdr; 1796 1.1 rjs struct mbuf *mb; 1797 1.1 rjs 1798 1.1 rjs MGETHDR(mb, M_DONTWAIT, MT_DATA); 1799 1.1 rjs if (mb != NULL) { 1800 1.1 rjs /* add some space up front so prepend will work well */ 1801 1.1 rjs mb->m_data += sizeof(struct sctp_chunkhdr); 1802 1.1 rjs phdr = mtod(mb, struct sctp_paramhdr *); 1803 1.1 rjs /* 1804 1.1 rjs * Error causes are just param's and this one has 1805 1.1 rjs * two back to back phdr, one with the error type 1806 1.1 rjs * and size, the other with the streamid and a rsvd 1807 1.1 rjs */ 1808 1.1 rjs mb->m_pkthdr.len = mb->m_len = 1809 1.1 rjs (sizeof(struct sctp_paramhdr) * 2); 1810 1.1 rjs phdr->param_type = htons(SCTP_CAUSE_INV_STRM); 1811 1.1 rjs phdr->param_length = 1812 1.1 rjs htons(sizeof(struct sctp_paramhdr) * 2); 1813 1.1 rjs phdr++; 1814 1.1 rjs /* We insert the stream in the type field */ 1815 1.1 rjs phdr->param_type = ch->dp.stream_id; 1816 1.1 rjs /* And set the length to 0 for the rsvd field */ 1817 1.1 rjs phdr->param_length = 0; 1818 1.1 rjs sctp_queue_op_err(stcb, mb); 1819 1.1 rjs } 1820 1.1 rjs sctp_pegs[SCTP_BAD_STRMNO]++; 1821 1.1 rjs return (0); 1822 1.1 rjs } 1823 1.1 rjs /* 1824 1.1 rjs * Before we continue lets validate that we are not 1825 1.1 rjs * being fooled by an evil attacker. We can only 1826 1.1 rjs * have 4k chunks based on our TSN spread allowed 1827 1.1 rjs * by the mapping array 512 * 8 bits, so there is 1828 1.1 rjs * no way our stream sequence numbers could have wrapped. 1829 1.1 rjs * We of course only validate the FIRST fragment so the 1830 1.1 rjs * bit must be set. 1831 1.1 rjs */ 1832 1.1 rjs strmseq = ntohs(ch->dp.stream_sequence); 1833 1.1 rjs if ((ch->ch.chunk_flags & SCTP_DATA_FIRST_FRAG) && 1834 1.1 rjs (ch->ch.chunk_flags & SCTP_DATA_UNORDERED) == 0 && 1835 1.1 rjs (compare_with_wrap(asoc->strmin[strmno].last_sequence_delivered, 1836 1.1 rjs strmseq, MAX_SEQ) || 1837 1.1 rjs asoc->strmin[strmno].last_sequence_delivered == strmseq)) { 1838 1.1 rjs /* The incoming sseq is behind where we last delivered? */ 1839 1.1 rjs #ifdef SCTP_DEBUG 1840 1.1 rjs if (sctp_debug_on & SCTP_DEBUG_INDATA1) { 1841 1.1 rjs printf("EVIL/Broken-Dup S-SEQ:%d delivered:%d from peer, Abort!\n", 1842 1.1 rjs strmseq, 1843 1.1 rjs asoc->strmin[strmno].last_sequence_delivered); 1844 1.1 rjs } 1845 1.1 rjs #endif 1846 1.1 rjs /* 1847 1.1 rjs * throw it in the stream so it gets cleaned up in 1848 1.1 rjs * association destruction 1849 1.1 rjs */ 1850 1.1 rjs MGET(oper, M_DONTWAIT, MT_DATA); 1851 1.1 rjs if (oper) { 1852 1.1 rjs struct sctp_paramhdr *ph; 1853 1.1 rjs u_int32_t *ippp; 1854 1.1 rjs 1855 1.1 rjs oper->m_len = sizeof(struct sctp_paramhdr) + 1856 1.1 rjs sizeof(*ippp); 1857 1.1 rjs ph = mtod(oper, struct sctp_paramhdr *); 1858 1.1 rjs ph->param_type = htons(SCTP_CAUSE_PROTOCOL_VIOLATION); 1859 1.1 rjs ph->param_length = htons(oper->m_len); 1860 1.1 rjs ippp = (u_int32_t *)(ph + 1); 1861 1.1 rjs *ippp = htonl(0x20000001); 1862 1.1 rjs } 1863 1.1 rjs sctp_abort_an_association(stcb->sctp_ep, stcb, SCTP_PEER_FAULTY, 1864 1.1 rjs oper); 1865 1.1 rjs sctp_pegs[SCTP_BAD_SSN_WRAP]++; 1866 1.1 rjs *abort_flag = 1; 1867 1.1 rjs return (0); 1868 1.1 rjs } 1869 1.1 rjs 1870 1.1 rjs the_len = (chk_length-sizeof(struct sctp_data_chunk)); 1871 1.1 rjs if (last_chunk == 0) { 1872 1.1 rjs dmbuf = sctp_m_copym(*m, 1873 1.1 rjs (offset + sizeof(struct sctp_data_chunk)), 1874 1.1 rjs the_len, M_DONTWAIT); 1875 1.1 rjs } else { 1876 1.1 rjs /* We can steal the last chunk */ 1877 1.1 rjs dmbuf = *m; 1878 1.1 rjs /* lop off the top part */ 1879 1.1 rjs m_adj(dmbuf, (offset + sizeof(struct sctp_data_chunk))); 1880 1.1 rjs if (dmbuf->m_pkthdr.len > the_len) { 1881 1.1 rjs /* Trim the end round bytes off too */ 1882 1.1 rjs m_adj(dmbuf, -(dmbuf->m_pkthdr.len-the_len)); 1883 1.1 rjs } 1884 1.1 rjs sctp_pegs[SCTP_NO_COPY_IN]++; 1885 1.1 rjs } 1886 1.1 rjs if (dmbuf == NULL) { 1887 1.1 rjs sctp_pegs[SCTP_DROP_NOMEMORY]++; 1888 1.1 rjs return (0); 1889 1.1 rjs } 1890 1.1 rjs if ((ch->ch.chunk_flags & SCTP_DATA_NOT_FRAG) == SCTP_DATA_NOT_FRAG && 1891 1.1 rjs asoc->fragmented_delivery_inprogress == 0 && 1892 1.1 rjs TAILQ_EMPTY(&asoc->delivery_queue) && 1893 1.1 rjs ((ch->ch.chunk_flags & SCTP_DATA_UNORDERED) || 1894 1.1 rjs ((asoc->strmin[strmno].last_sequence_delivered + 1) == strmseq && 1895 1.1 rjs TAILQ_EMPTY(&asoc->strmin[strmno].inqueue))) && 1896 1.1 rjs ((long)(stcb->sctp_socket->so_rcv.sb_hiwat - 1897 1.1 rjs stcb->sctp_socket->so_rcv.sb_cc) >= (long)the_len)) { 1898 1.1 rjs /* Candidate for express delivery */ 1899 1.1 rjs /* 1900 1.1 rjs * Its not fragmented, 1901 1.1 rjs * No PD-API is up, 1902 1.1 rjs * Nothing in the delivery queue, 1903 1.1 rjs * Its un-ordered OR ordered and the next to deliver AND 1904 1.1 rjs * nothing else is stuck on the stream queue, 1905 1.1 rjs * And there is room for it in the socket buffer. 1906 1.1 rjs * Lets just stuff it up the buffer.... 1907 1.1 rjs */ 1908 1.1 rjs 1909 1.1 rjs struct mbuf *control, *mmm; 1910 1.1 rjs struct sockaddr_in6 sin6; 1911 1.1 rjs struct sockaddr_in6 lsa6; 1912 1.1 rjs const struct sockaddr *to; 1913 1.1 rjs 1914 1.1 rjs /* It would be nice to avoid this copy if we could :< */ 1915 1.1 rjs control = sctp_build_ctl_nchunk(stcb, tsn, 1916 1.1 rjs ch->dp.protocol_id, 0, strmno, strmseq, 1917 1.1 rjs ch->ch.chunk_flags); 1918 1.1 rjs /* XXX need to append PKTHDR to the socket buffer first */ 1919 1.1 rjs 1920 1.1 rjs if ((dmbuf->m_flags & M_PKTHDR) == 0) { 1921 1.1 rjs struct mbuf *tmp; 1922 1.1 rjs MGETHDR(tmp, M_DONTWAIT, MT_DATA); 1923 1.1 rjs if (tmp == NULL) { 1924 1.1 rjs 1925 1.1 rjs /* no room! */ 1926 1.1 rjs if (control) { 1927 1.1 rjs sctp_m_freem(control); 1928 1.1 rjs stcb->asoc.my_rwnd_control_len -= 1929 1.1 rjs CMSG_LEN(sizeof(struct sctp_sndrcvinfo)); 1930 1.1 rjs } 1931 1.1 rjs 1932 1.1 rjs goto failed_express_del; 1933 1.1 rjs } 1934 1.1 rjs tmp->m_pkthdr.len = the_len; 1935 1.1 rjs tmp->m_len = 0; 1936 1.1 rjs tmp->m_next = dmbuf; 1937 1.1 rjs dmbuf = tmp; 1938 1.1 rjs } 1939 1.1 rjs to = rtcache_getdst(&net->ro); 1940 1.1 rjs if ((stcb->sctp_ep->sctp_flags & SCTP_PCB_FLAGS_NEEDS_MAPPED_V4) && 1941 1.1 rjs to->sa_family == AF_INET) { 1942 1.1 rjs const struct sockaddr_in *sin; 1943 1.1 rjs 1944 1.1 rjs sin = satocsin(to); 1945 1.3 rtr in6_sin_2_v4mapsin6(sin, &sin6); 1946 1.1 rjs to = (struct sockaddr *)&sin6; 1947 1.1 rjs } 1948 1.1 rjs 1949 1.1 rjs /* check and strip embedded scope junk */ 1950 1.1 rjs to = (const struct sockaddr *)sctp_recover_scope((const struct sockaddr_in6 *)to, 1951 1.1 rjs &lsa6); 1952 1.1 rjs if (((const struct sockaddr_in *)to)->sin_port == 0) { 1953 1.1 rjs printf("Huh c, port is %d not net:%p %d?\n", 1954 1.1 rjs ((const struct sockaddr_in *)to)->sin_port, 1955 1.1 rjs net, 1956 1.1 rjs (int)(ntohs(stcb->rport))); 1957 1.1 rjs /*((struct sockaddr_in *)to)->sin_port = stcb->rport;*/ 1958 1.1 rjs /* XXX */ 1959 1.1 rjs } 1960 1.1 rjs 1961 1.1 rjs mmm = dmbuf; 1962 1.1 rjs /* Mark the EOR */ 1963 1.1 rjs while (mmm->m_next != NULL) { 1964 1.1 rjs mmm = mmm->m_next; 1965 1.1 rjs } 1966 1.1 rjs mmm->m_flags |= M_EOR; 1967 1.1 rjs if (compare_with_wrap(tsn, asoc->highest_tsn_inside_map, MAX_TSN)) { 1968 1.1 rjs /* we have a new high score */ 1969 1.1 rjs asoc->highest_tsn_inside_map = tsn; 1970 1.1 rjs #ifdef SCTP_MAP_LOGGING 1971 1.1 rjs sctp_log_map(0, 1, asoc->highest_tsn_inside_map, SCTP_MAP_SLIDE_RESULT); 1972 1.1 rjs #endif 1973 1.1 rjs } 1974 1.1 rjs SCTP_TCB_UNLOCK(stcb); 1975 1.1 rjs SCTP_INP_WLOCK(stcb->sctp_ep); 1976 1.1 rjs SCTP_TCB_LOCK(stcb); 1977 1.1 rjs if (!sbappendaddr_nocheck(&stcb->sctp_socket->so_rcv, to, dmbuf, 1978 1.1 rjs control, stcb->asoc.my_vtag, stcb->sctp_ep)) { 1979 1.1 rjs if (control) { 1980 1.1 rjs sctp_m_freem(control); 1981 1.1 rjs stcb->asoc.my_rwnd_control_len -= 1982 1.1 rjs CMSG_LEN(sizeof(struct sctp_sndrcvinfo)); 1983 1.1 rjs } 1984 1.1 rjs sctp_m_freem(dmbuf); 1985 1.1 rjs goto failed_express_del; 1986 1.1 rjs } 1987 1.1 rjs if ((stcb->sctp_ep->sctp_flags & SCTP_PCB_FLAGS_IN_TCPPOOL) == 0) { 1988 1.1 rjs if (sctp_add_to_socket_q(stcb->sctp_ep, stcb)) { 1989 1.1 rjs stcb->asoc.my_rwnd_control_len += sizeof(struct mbuf); 1990 1.1 rjs } 1991 1.1 rjs } else { 1992 1.1 rjs stcb->asoc.my_rwnd_control_len += sizeof(struct mbuf); 1993 1.1 rjs } 1994 1.1 rjs SCTP_INP_WUNLOCK(stcb->sctp_ep); 1995 1.1 rjs sctp_sorwakeup(stcb->sctp_ep, stcb->sctp_socket); 1996 1.1 rjs if ((ch->ch.chunk_flags & SCTP_DATA_UNORDERED) == 0) { 1997 1.1 rjs 1998 1.1 rjs /* for ordered, bump what we delivered */ 1999 1.1 rjs asoc->strmin[strmno].last_sequence_delivered++; 2000 1.1 rjs } 2001 1.1 rjs sctp_pegs[SCTP_EXPRESS_ROUTE]++; 2002 1.1 rjs #ifdef SCTP_STR_LOGGING 2003 1.1 rjs sctp_log_strm_del_alt(tsn, strmseq, 2004 1.1 rjs SCTP_STR_LOG_FROM_EXPRS_DEL); 2005 1.1 rjs #endif 2006 1.1 rjs #ifdef SCTP_DEBUG 2007 1.1 rjs if (sctp_debug_on & SCTP_DEBUG_INDATA1) { 2008 1.1 rjs printf("Express Delivery succeeds\n"); 2009 1.1 rjs } 2010 1.1 rjs #endif 2011 1.1 rjs goto finish_express_del; 2012 1.1 rjs } 2013 1.1 rjs 2014 1.1 rjs failed_express_del: 2015 1.1 rjs /* If we reach here this is a new chunk */ 2016 1.1 rjs chk = (struct sctp_tmit_chunk *)SCTP_ZONE_GET(sctppcbinfo.ipi_zone_chunk); 2017 1.1 rjs if (chk == NULL) { 2018 1.1 rjs /* No memory so we drop the chunk */ 2019 1.1 rjs sctp_pegs[SCTP_DROP_NOMEMORY]++; 2020 1.1 rjs if (last_chunk == 0) { 2021 1.1 rjs /* we copied it, free the copy */ 2022 1.1 rjs sctp_m_freem(dmbuf); 2023 1.1 rjs } 2024 1.1 rjs return (0); 2025 1.1 rjs } 2026 1.1 rjs sctppcbinfo.ipi_count_chunk++; 2027 1.1 rjs sctppcbinfo.ipi_gencnt_chunk++; 2028 1.1 rjs chk->rec.data.TSN_seq = tsn; 2029 1.1 rjs chk->rec.data.stream_seq = strmseq; 2030 1.1 rjs chk->rec.data.stream_number = strmno; 2031 1.1 rjs chk->rec.data.payloadtype = ch->dp.protocol_id; 2032 1.1 rjs chk->rec.data.context = 0; 2033 1.1 rjs chk->rec.data.doing_fast_retransmit = 0; 2034 1.1 rjs chk->rec.data.rcv_flags = ch->ch.chunk_flags; 2035 1.1 rjs chk->asoc = asoc; 2036 1.1 rjs chk->send_size = the_len; 2037 1.1 rjs chk->whoTo = net; 2038 1.1 rjs net->ref_count++; 2039 1.1 rjs chk->data = dmbuf; 2040 1.1 rjs 2041 1.1 rjs 2042 1.1 rjs /* Mark it as received */ 2043 1.1 rjs /* Now queue it where it belongs */ 2044 1.1 rjs if ((chk->rec.data.rcv_flags & SCTP_DATA_NOT_FRAG) == 2045 1.1 rjs SCTP_DATA_NOT_FRAG) { 2046 1.1 rjs /* First a sanity check */ 2047 1.1 rjs if (asoc->fragmented_delivery_inprogress) { 2048 1.1 rjs /* 2049 1.1 rjs * Ok, we have a fragmented delivery in progress 2050 1.1 rjs * if this chunk is next to deliver OR belongs in 2051 1.1 rjs * our view to the reassembly, the peer is evil 2052 1.1 rjs * or broken. 2053 1.1 rjs */ 2054 1.1 rjs u_int32_t estimate_tsn; 2055 1.1 rjs estimate_tsn = asoc->tsn_last_delivered + 1; 2056 1.1 rjs if (TAILQ_EMPTY(&asoc->reasmqueue) && 2057 1.1 rjs (estimate_tsn == chk->rec.data.TSN_seq)) { 2058 1.1 rjs /* Evil/Broke peer */ 2059 1.1 rjs MGET(oper, M_DONTWAIT, MT_DATA); 2060 1.1 rjs if (oper) { 2061 1.1 rjs struct sctp_paramhdr *ph; 2062 1.1 rjs u_int32_t *ippp; 2063 1.1 rjs 2064 1.1 rjs oper->m_len = 2065 1.1 rjs sizeof(struct sctp_paramhdr) + 2066 1.1 rjs sizeof(*ippp); 2067 1.1 rjs ph = mtod(oper, struct sctp_paramhdr *); 2068 1.1 rjs ph->param_type = 2069 1.1 rjs htons(SCTP_CAUSE_PROTOCOL_VIOLATION); 2070 1.1 rjs ph->param_length = htons(oper->m_len); 2071 1.1 rjs ippp = (u_int32_t *)(ph + 1); 2072 1.1 rjs *ippp = htonl(0x20000002); 2073 1.1 rjs } 2074 1.1 rjs sctp_abort_an_association(stcb->sctp_ep, stcb, 2075 1.1 rjs SCTP_PEER_FAULTY, oper); 2076 1.1 rjs 2077 1.1 rjs *abort_flag = 1; 2078 1.1 rjs sctp_pegs[SCTP_DROP_FRAG]++; 2079 1.1 rjs return (0); 2080 1.1 rjs } else { 2081 1.1 rjs if (sctp_does_chk_belong_to_reasm(asoc, chk)) { 2082 1.1 rjs MGET(oper, M_DONTWAIT, MT_DATA); 2083 1.1 rjs if (oper) { 2084 1.1 rjs struct sctp_paramhdr *ph; 2085 1.1 rjs u_int32_t *ippp; 2086 1.1 rjs 2087 1.1 rjs oper->m_len = 2088 1.1 rjs sizeof(struct sctp_paramhdr) + 2089 1.1 rjs sizeof(*ippp); 2090 1.1 rjs ph = mtod(oper, 2091 1.1 rjs struct sctp_paramhdr *); 2092 1.1 rjs ph->param_type = 2093 1.1 rjs htons(SCTP_CAUSE_PROTOCOL_VIOLATION); 2094 1.1 rjs ph->param_length = 2095 1.1 rjs htons(oper->m_len); 2096 1.1 rjs ippp = (u_int32_t *)(ph + 1); 2097 1.1 rjs *ippp = htonl(0x20000003); 2098 1.1 rjs } 2099 1.1 rjs sctp_abort_an_association(stcb->sctp_ep, 2100 1.1 rjs stcb, SCTP_PEER_FAULTY, oper); 2101 1.1 rjs 2102 1.1 rjs *abort_flag = 1; 2103 1.1 rjs sctp_pegs[SCTP_DROP_FRAG]++; 2104 1.1 rjs return (0); 2105 1.1 rjs } 2106 1.1 rjs } 2107 1.1 rjs } else { 2108 1.1 rjs if (!TAILQ_EMPTY(&asoc->reasmqueue)) { 2109 1.1 rjs /* 2110 1.1 rjs * Reassembly queue is NOT empty 2111 1.1 rjs * validate that this chk does not need to 2112 1.1 rjs * be in reasembly queue. If it does then 2113 1.1 rjs * our peer is broken or evil. 2114 1.1 rjs */ 2115 1.1 rjs if (sctp_does_chk_belong_to_reasm(asoc, chk)) { 2116 1.1 rjs MGET(oper, M_DONTWAIT, MT_DATA); 2117 1.1 rjs if (oper) { 2118 1.1 rjs struct sctp_paramhdr *ph; 2119 1.1 rjs u_int32_t *ippp; 2120 1.1 rjs 2121 1.1 rjs oper->m_len = 2122 1.1 rjs sizeof(struct sctp_paramhdr) + 2123 1.1 rjs sizeof(*ippp); 2124 1.1 rjs ph = mtod(oper, 2125 1.1 rjs struct sctp_paramhdr *); 2126 1.1 rjs ph->param_type = 2127 1.1 rjs htons(SCTP_CAUSE_PROTOCOL_VIOLATION); 2128 1.1 rjs ph->param_length = 2129 1.1 rjs htons(oper->m_len); 2130 1.1 rjs ippp = (u_int32_t *)(ph + 1); 2131 1.1 rjs *ippp = htonl(0x20000004); 2132 1.1 rjs } 2133 1.1 rjs sctp_abort_an_association(stcb->sctp_ep, 2134 1.1 rjs stcb, SCTP_PEER_FAULTY, oper); 2135 1.1 rjs 2136 1.1 rjs *abort_flag = 1; 2137 1.1 rjs sctp_pegs[SCTP_DROP_FRAG]++; 2138 1.1 rjs return (0); 2139 1.1 rjs } 2140 1.1 rjs } 2141 1.1 rjs } 2142 1.1 rjs if (chk->rec.data.rcv_flags & SCTP_DATA_UNORDERED) { 2143 1.1 rjs /* queue directly into socket buffer */ 2144 1.1 rjs sctp_deliver_data(stcb, asoc, chk, 0); 2145 1.1 rjs sctp_sorwakeup(stcb->sctp_ep, stcb->sctp_socket); 2146 1.1 rjs } else { 2147 1.1 rjs /* Special check for when streams are resetting. 2148 1.1 rjs * We could be more smart about this and check the 2149 1.1 rjs * actual stream to see if it is not being reset.. that 2150 1.1 rjs * way we would not create a HOLB when amongst streams 2151 1.1 rjs * being reset and those not being reset. 2152 1.1 rjs * 2153 1.1 rjs * We take complete messages that have a stream reset 2154 1.1 rjs * intervening (aka the TSN is after where our cum-ack needs 2155 1.1 rjs * to be) off and put them on a pending_reply_queue. The 2156 1.1 rjs * reassembly ones we do not have to worry about since 2157 1.13 andvar * they are all sorted and processed by TSN order. It 2158 1.1 rjs * is only the singletons I must worry about. 2159 1.1 rjs */ 2160 1.1 rjs if ((asoc->pending_reply) && 2161 1.1 rjs ((compare_with_wrap(tsn, ntohl(asoc->pending_reply->reset_at_tsn), MAX_TSN)) || 2162 1.1 rjs (tsn == ntohl(asoc->pending_reply->reset_at_tsn))) 2163 1.1 rjs ) { 2164 1.1 rjs /* yep its past where we need to reset... go ahead and 2165 1.1 rjs * queue it. 2166 1.1 rjs */ 2167 1.1 rjs TAILQ_INSERT_TAIL(&asoc->pending_reply_queue , chk, sctp_next); 2168 1.1 rjs } else { 2169 1.1 rjs sctp_queue_data_to_stream(stcb, asoc, chk, abort_flag); 2170 1.1 rjs } 2171 1.1 rjs } 2172 1.1 rjs } else { 2173 1.1 rjs /* Into the re-assembly queue */ 2174 1.1 rjs sctp_queue_data_for_reasm(stcb, asoc, chk, abort_flag); 2175 1.1 rjs if (*abort_flag) { 2176 1.1 rjs sctp_pegs[SCTP_DROP_FRAG]++; 2177 1.1 rjs return (0); 2178 1.1 rjs } 2179 1.1 rjs } 2180 1.1 rjs if (compare_with_wrap(tsn, asoc->highest_tsn_inside_map, MAX_TSN)) { 2181 1.1 rjs /* we have a new high score */ 2182 1.1 rjs asoc->highest_tsn_inside_map = tsn; 2183 1.1 rjs #ifdef SCTP_MAP_LOGGING 2184 1.1 rjs sctp_log_map(0, 2, asoc->highest_tsn_inside_map, SCTP_MAP_SLIDE_RESULT); 2185 1.1 rjs #endif 2186 1.1 rjs } 2187 1.1 rjs finish_express_del: 2188 1.1 rjs if (last_chunk) { 2189 1.1 rjs *m = NULL; 2190 1.1 rjs } 2191 1.1 rjs sctp_pegs[SCTP_PEG_TSNS_RCVD]++; 2192 1.1 rjs /* Set it present please */ 2193 1.1 rjs #ifdef SCTP_STR_LOGGING 2194 1.1 rjs sctp_log_strm_del_alt(tsn, strmseq, SCTP_STR_LOG_FROM_MARK_TSN); 2195 1.1 rjs #endif 2196 1.1 rjs #ifdef SCTP_MAP_LOGGING 2197 1.1 rjs sctp_log_map(asoc->mapping_array_base_tsn, asoc->cumulative_tsn, 2198 1.1 rjs asoc->highest_tsn_inside_map, SCTP_MAP_PREPARE_SLIDE); 2199 1.1 rjs #endif 2200 1.1 rjs SCTP_SET_TSN_PRESENT(asoc->mapping_array, gap); 2201 1.1 rjs return (1); 2202 1.1 rjs } 2203 1.1 rjs 2204 1.1 rjs void 2205 1.1 rjs sctp_sack_check(struct sctp_tcb *stcb, int ok_to_sack, int was_a_gap, int *abort_flag) 2206 1.1 rjs { 2207 1.1 rjs /* 2208 1.1 rjs * Now we also need to check the mapping array in a couple of ways. 2209 1.1 rjs * 1) Did we move the cum-ack point? 2210 1.1 rjs */ 2211 1.1 rjs struct sctp_association *asoc; 2212 1.1 rjs int i, at; 2213 1.1 rjs int m_size, all_ones; 2214 1.1 rjs int slide_from, slide_end, lgap, distance; 2215 1.1 rjs #ifdef SCTP_MAP_LOGGING 2216 1.1 rjs uint32_t old_cumack, old_base, old_highest; 2217 1.1 rjs unsigned char aux_array[64]; 2218 1.1 rjs #endif 2219 1.1 rjs 2220 1.1 rjs asoc = &stcb->asoc; 2221 1.1 rjs at = 0; 2222 1.1 rjs 2223 1.1 rjs #ifdef SCTP_MAP_LOGGING 2224 1.1 rjs old_cumack = asoc->cumulative_tsn; 2225 1.1 rjs old_base = asoc->mapping_array_base_tsn; 2226 1.1 rjs old_highest = asoc->highest_tsn_inside_map; 2227 1.1 rjs if (asoc->mapping_array_size < 64) 2228 1.1 rjs memcpy(aux_array, asoc->mapping_array, 2229 1.1 rjs asoc->mapping_array_size); 2230 1.1 rjs else 2231 1.1 rjs memcpy(aux_array, asoc->mapping_array, 64); 2232 1.1 rjs #endif 2233 1.1 rjs 2234 1.1 rjs /* 2235 1.1 rjs * We could probably improve this a small bit by calculating the 2236 1.1 rjs * offset of the current cum-ack as the starting point. 2237 1.1 rjs */ 2238 1.1 rjs all_ones = 1; 2239 1.1 rjs m_size = stcb->asoc.mapping_array_size << 3; 2240 1.1 rjs for (i = 0; i < m_size; i++) { 2241 1.1 rjs if (!SCTP_IS_TSN_PRESENT(asoc->mapping_array, i)) { 2242 1.1 rjs /* 2243 1.1 rjs * Ok we found the first place that we are 2244 1.1 rjs * missing a TSN. 2245 1.1 rjs */ 2246 1.1 rjs at = i; 2247 1.1 rjs all_ones = 0; 2248 1.1 rjs asoc->cumulative_tsn = asoc->mapping_array_base_tsn + 2249 1.1 rjs (i - 1); 2250 1.1 rjs break; 2251 1.1 rjs } 2252 1.1 rjs } 2253 1.1 rjs if (compare_with_wrap(asoc->cumulative_tsn, 2254 1.1 rjs asoc->highest_tsn_inside_map, 2255 1.1 rjs MAX_TSN)) { 2256 1.1 rjs panic("huh, cumack greater than high-tsn in map"); 2257 1.1 rjs } 2258 1.1 rjs if (all_ones || 2259 1.1 rjs (asoc->cumulative_tsn == asoc->highest_tsn_inside_map && at >= 8)) { 2260 1.1 rjs /* The complete array was completed by a single FR */ 2261 1.12 andvar /* highest becomes the cum-ack */ 2262 1.1 rjs int clr; 2263 1.1 rjs asoc->cumulative_tsn = asoc->highest_tsn_inside_map; 2264 1.1 rjs /* clear the array */ 2265 1.1 rjs if (all_ones) 2266 1.1 rjs clr = asoc->mapping_array_size; 2267 1.1 rjs else { 2268 1.1 rjs clr = (at >> 3) + 1; 2269 1.1 rjs /* 2270 1.1 rjs * this should be the allones case 2271 1.1 rjs * but just in case :> 2272 1.1 rjs */ 2273 1.1 rjs if (clr > asoc->mapping_array_size) 2274 1.1 rjs clr = asoc->mapping_array_size; 2275 1.1 rjs } 2276 1.1 rjs memset(asoc->mapping_array, 0, clr); 2277 1.1 rjs /* base becomes one ahead of the cum-ack */ 2278 1.1 rjs asoc->mapping_array_base_tsn = asoc->cumulative_tsn + 1; 2279 1.1 rjs #ifdef SCTP_MAP_LOGGING 2280 1.1 rjs sctp_log_map(old_base, old_cumack, old_highest, 2281 1.1 rjs SCTP_MAP_PREPARE_SLIDE); 2282 1.1 rjs sctp_log_map(asoc->mapping_array_base_tsn, asoc->cumulative_tsn, 2283 1.1 rjs asoc->highest_tsn_inside_map, SCTP_MAP_SLIDE_CLEARED); 2284 1.1 rjs #endif 2285 1.1 rjs } else if (at >= 8) { 2286 1.1 rjs /* we can slide the mapping array down */ 2287 1.10 andvar /* Calculate the new byte position we can move down */ 2288 1.1 rjs slide_from = at >> 3; 2289 1.1 rjs /* now calculate the ceiling of the move using our highest TSN value */ 2290 1.1 rjs if (asoc->highest_tsn_inside_map >= asoc->mapping_array_base_tsn) { 2291 1.1 rjs lgap = asoc->highest_tsn_inside_map - 2292 1.1 rjs asoc->mapping_array_base_tsn; 2293 1.1 rjs } else { 2294 1.1 rjs lgap = (MAX_TSN - asoc->mapping_array_base_tsn) + 2295 1.1 rjs asoc->highest_tsn_inside_map + 1; 2296 1.1 rjs } 2297 1.1 rjs slide_end = lgap >> 3; 2298 1.1 rjs if (slide_end < slide_from) { 2299 1.1 rjs panic("impossible slide"); 2300 1.1 rjs } 2301 1.1 rjs distance = (slide_end-slide_from) + 1; 2302 1.1 rjs #ifdef SCTP_MAP_LOGGING 2303 1.1 rjs sctp_log_map(old_base, old_cumack, old_highest, 2304 1.1 rjs SCTP_MAP_PREPARE_SLIDE); 2305 1.1 rjs sctp_log_map((uint32_t)slide_from, (uint32_t)slide_end, 2306 1.1 rjs (uint32_t)lgap, SCTP_MAP_SLIDE_FROM); 2307 1.1 rjs #endif 2308 1.1 rjs if (distance + slide_from > asoc->mapping_array_size || 2309 1.1 rjs distance < 0) { 2310 1.1 rjs #ifdef SCTP_DEBUG 2311 1.1 rjs if (sctp_debug_on & SCTP_DEBUG_INDATA1) { 2312 1.1 rjs printf("Ugh bad addition.. you can't hrumpp!\n"); 2313 1.1 rjs } 2314 1.1 rjs #endif 2315 1.1 rjs /* 2316 1.1 rjs * Here we do NOT slide forward the array so that 2317 1.1 rjs * hopefully when more data comes in to fill it up 2318 1.1 rjs * we will be able to slide it forward. Really 2319 1.1 rjs * I don't think this should happen :-0 2320 1.1 rjs */ 2321 1.1 rjs 2322 1.1 rjs #ifdef SCTP_MAP_LOGGING 2323 1.1 rjs sctp_log_map((uint32_t)distance, (uint32_t)slide_from, 2324 1.1 rjs (uint32_t)asoc->mapping_array_size, 2325 1.1 rjs SCTP_MAP_SLIDE_NONE); 2326 1.1 rjs #endif 2327 1.1 rjs } else { 2328 1.1 rjs int ii; 2329 1.1 rjs for (ii = 0; ii < distance; ii++) { 2330 1.1 rjs asoc->mapping_array[ii] = 2331 1.1 rjs asoc->mapping_array[slide_from + ii]; 2332 1.1 rjs } 2333 1.1 rjs for (ii = distance;ii <= slide_end; ii++) { 2334 1.1 rjs asoc->mapping_array[ii] = 0; 2335 1.1 rjs } 2336 1.1 rjs asoc->mapping_array_base_tsn += (slide_from << 3); 2337 1.1 rjs #ifdef SCTP_MAP_LOGGING 2338 1.1 rjs sctp_log_map(asoc->mapping_array_base_tsn, 2339 1.1 rjs asoc->cumulative_tsn, asoc->highest_tsn_inside_map, 2340 1.1 rjs SCTP_MAP_SLIDE_RESULT); 2341 1.1 rjs #endif 2342 1.1 rjs } 2343 1.1 rjs } 2344 1.1 rjs 2345 1.1 rjs /* check the special flag for stream resets */ 2346 1.1 rjs if ((asoc->pending_reply) && 2347 1.1 rjs ((compare_with_wrap((asoc->cumulative_tsn+1), ntohl(asoc->pending_reply->reset_at_tsn), MAX_TSN)) || 2348 1.1 rjs ((asoc->cumulative_tsn+1) == ntohl(asoc->pending_reply->reset_at_tsn))) 2349 1.1 rjs ) { 2350 1.1 rjs /* we have finished working through the backlogged TSN's now 2351 1.1 rjs * time to reset streams. 2352 1.1 rjs * 1: call reset function. 2353 1.1 rjs * 2: free pending_reply space 2354 1.1 rjs * 3: distribute any chunks in pending_reply_queue. 2355 1.1 rjs */ 2356 1.1 rjs struct sctp_tmit_chunk *chk; 2357 1.1 rjs sctp_handle_stream_reset_response(stcb, asoc->pending_reply); 2358 1.1 rjs free(asoc->pending_reply, M_PCB); 2359 1.1 rjs asoc->pending_reply = NULL; 2360 1.1 rjs chk = TAILQ_FIRST(&asoc->pending_reply_queue); 2361 1.1 rjs while (chk) { 2362 1.1 rjs TAILQ_REMOVE(&asoc->pending_reply_queue, chk, sctp_next); 2363 1.1 rjs sctp_queue_data_to_stream(stcb, asoc, chk, abort_flag); 2364 1.1 rjs if (*abort_flag) { 2365 1.1 rjs return; 2366 1.1 rjs } 2367 1.1 rjs chk = TAILQ_FIRST(&asoc->pending_reply_queue); 2368 1.1 rjs } 2369 1.1 rjs } 2370 1.1 rjs /* 2371 1.1 rjs * Now we need to see if we need to queue a sack or just start 2372 1.1 rjs * the timer (if allowed). 2373 1.1 rjs */ 2374 1.1 rjs if (ok_to_sack) { 2375 1.1 rjs if (SCTP_GET_STATE(asoc) == SCTP_STATE_SHUTDOWN_SENT) { 2376 1.1 rjs /* 2377 1.1 rjs * Ok special case, in SHUTDOWN-SENT case. 2378 1.1 rjs * here we maker sure SACK timer is off and 2379 1.1 rjs * instead send a SHUTDOWN and a SACK 2380 1.1 rjs */ 2381 1.1 rjs if (callout_pending(&stcb->asoc.dack_timer.timer)) { 2382 1.1 rjs sctp_timer_stop(SCTP_TIMER_TYPE_RECV, 2383 1.1 rjs stcb->sctp_ep, stcb, NULL); 2384 1.1 rjs } 2385 1.1 rjs #ifdef SCTP_DEBUG 2386 1.1 rjs if (sctp_debug_on & SCTP_DEBUG_OUTPUT4) { 2387 1.1 rjs printf("%s:%d sends a shutdown\n", 2388 1.1 rjs __FILE__, 2389 1.1 rjs __LINE__ 2390 1.1 rjs ); 2391 1.1 rjs } 2392 1.1 rjs #endif 2393 1.1 rjs sctp_send_shutdown(stcb, stcb->asoc.primary_destination); 2394 1.1 rjs sctp_send_sack(stcb); 2395 1.1 rjs } else { 2396 1.1 rjs int is_a_gap; 2397 1.1 rjs /* is there a gap now ? */ 2398 1.1 rjs is_a_gap = compare_with_wrap(stcb->asoc.highest_tsn_inside_map, 2399 1.1 rjs stcb->asoc.cumulative_tsn, MAX_TSN); 2400 1.1 rjs if ((stcb->asoc.first_ack_sent == 0) || /* First time we send a sack */ 2401 1.1 rjs ((was_a_gap) && (is_a_gap == 0)) || /* was a gap, but no longer is one */ 2402 1.1 rjs (stcb->asoc.numduptsns) || /* we have dup's */ 2403 1.1 rjs (is_a_gap) || /* is still a gap */ 2404 1.1 rjs (callout_pending(&stcb->asoc.dack_timer.timer)) /* timer was up . second packet */ 2405 1.1 rjs ) { 2406 1.1 rjs /* 2407 1.1 rjs * Ok we must build a SACK since the timer 2408 1.1 rjs * is pending, we got our first packet OR 2409 1.1 rjs * there are gaps or duplicates. 2410 1.1 rjs */ 2411 1.1 rjs stcb->asoc.first_ack_sent = 1; 2412 1.1 rjs sctp_send_sack(stcb); 2413 1.1 rjs /* The sending will stop the timer */ 2414 1.1 rjs } else { 2415 1.1 rjs sctp_timer_start(SCTP_TIMER_TYPE_RECV, 2416 1.1 rjs stcb->sctp_ep, stcb, NULL); 2417 1.1 rjs } 2418 1.1 rjs } 2419 1.1 rjs } 2420 1.1 rjs } 2421 1.1 rjs 2422 1.1 rjs void 2423 1.1 rjs sctp_service_queues(struct sctp_tcb *stcb, struct sctp_association *asoc, int hold_locks) 2424 1.1 rjs { 2425 1.1 rjs struct sctp_tmit_chunk *chk; 2426 1.1 rjs int tsize, cntDel; 2427 1.1 rjs u_int16_t nxt_todel; 2428 1.1 rjs 2429 1.1 rjs cntDel = 0; 2430 1.1 rjs if (asoc->fragmented_delivery_inprogress) { 2431 1.1 rjs sctp_service_reassembly(stcb, asoc, hold_locks); 2432 1.1 rjs } 2433 1.1 rjs /* Can we proceed further, i.e. the PD-API is complete */ 2434 1.1 rjs if (asoc->fragmented_delivery_inprogress) { 2435 1.1 rjs /* no */ 2436 1.1 rjs return; 2437 1.1 rjs } 2438 1.1 rjs 2439 1.1 rjs /* 2440 1.1 rjs * Yes, reassembly delivery no longer in progress see if we 2441 1.1 rjs * have some on the sb hold queue. 2442 1.1 rjs */ 2443 1.1 rjs do { 2444 1.1 rjs if (stcb->sctp_socket->so_rcv.sb_cc >= stcb->sctp_socket->so_rcv.sb_hiwat) { 2445 1.1 rjs if (cntDel == 0) 2446 1.1 rjs sctp_sorwakeup(stcb->sctp_ep, stcb->sctp_socket); 2447 1.1 rjs break; 2448 1.1 rjs } 2449 1.1 rjs /* If deliver_data says no we must stop */ 2450 1.1 rjs if (sctp_deliver_data(stcb, asoc, (struct sctp_tmit_chunk *)NULL, hold_locks) == 0) 2451 1.1 rjs break; 2452 1.1 rjs cntDel++; 2453 1.1 rjs chk = TAILQ_FIRST(&asoc->delivery_queue); 2454 1.1 rjs } while (chk); 2455 1.1 rjs if (cntDel) { 2456 1.1 rjs sctp_sorwakeup(stcb->sctp_ep, stcb->sctp_socket); 2457 1.1 rjs } 2458 1.1 rjs /* 2459 1.1 rjs * Now is there some other chunk I can deliver 2460 1.1 rjs * from the reassembly queue. 2461 1.1 rjs */ 2462 1.1 rjs chk = TAILQ_FIRST(&asoc->reasmqueue); 2463 1.1 rjs if (chk == NULL) { 2464 1.1 rjs asoc->size_on_reasm_queue = 0; 2465 1.1 rjs asoc->cnt_on_reasm_queue = 0; 2466 1.1 rjs return; 2467 1.1 rjs } 2468 1.1 rjs nxt_todel = asoc->strmin[chk->rec.data.stream_number].last_sequence_delivered + 1; 2469 1.1 rjs if ((chk->rec.data.rcv_flags & SCTP_DATA_FIRST_FRAG) && 2470 1.1 rjs ((nxt_todel == chk->rec.data.stream_seq) || 2471 1.1 rjs (chk->rec.data.rcv_flags & SCTP_DATA_UNORDERED))) { 2472 1.1 rjs /* 2473 1.1 rjs * Yep the first one is here. We setup to 2474 1.1 rjs * start reception, by backing down the TSN 2475 1.1 rjs * just in case we can't deliver. 2476 1.1 rjs */ 2477 1.1 rjs 2478 1.1 rjs /* 2479 1.1 rjs * Before we start though either all of the 2480 1.1 rjs * message should be here or 1/4 the socket buffer 2481 1.1 rjs * max or nothing on the delivery queue and something 2482 1.1 rjs * can be delivered. 2483 1.1 rjs */ 2484 1.1 rjs if (TAILQ_EMPTY(&asoc->delivery_queue) && 2485 1.1 rjs (sctp_is_all_msg_on_reasm(asoc, &tsize) || 2486 1.1 rjs (asoc->size_on_reasm_queue >= 2487 1.1 rjs (stcb->sctp_socket->so_rcv.sb_hiwat >> 2) && tsize))) { 2488 1.1 rjs asoc->fragmented_delivery_inprogress = 1; 2489 1.1 rjs asoc->tsn_last_delivered = chk->rec.data.TSN_seq-1; 2490 1.1 rjs asoc->str_of_pdapi = chk->rec.data.stream_number; 2491 1.1 rjs asoc->ssn_of_pdapi = chk->rec.data.stream_seq; 2492 1.1 rjs asoc->fragment_flags = chk->rec.data.rcv_flags; 2493 1.1 rjs sctp_service_reassembly(stcb, asoc, hold_locks); 2494 1.1 rjs } 2495 1.1 rjs } 2496 1.1 rjs } 2497 1.1 rjs 2498 1.1 rjs int 2499 1.1 rjs sctp_process_data(struct mbuf **mm, int iphlen, int *offset, int length, 2500 1.1 rjs struct sctphdr *sh, struct sctp_inpcb *inp, struct sctp_tcb *stcb, 2501 1.1 rjs struct sctp_nets *net, u_int32_t *high_tsn) 2502 1.1 rjs { 2503 1.1 rjs struct sctp_data_chunk *ch, chunk_buf; 2504 1.1 rjs struct sctp_association *asoc; 2505 1.1 rjs int num_chunks = 0; /* number of control chunks processed */ 2506 1.1 rjs int chk_length, break_flag, last_chunk; 2507 1.1 rjs int abort_flag = 0, was_a_gap = 0; 2508 1.1 rjs struct mbuf *m; 2509 1.1 rjs 2510 1.1 rjs /* set the rwnd */ 2511 1.1 rjs sctp_set_rwnd(stcb, &stcb->asoc); 2512 1.1 rjs 2513 1.1 rjs m = *mm; 2514 1.1 rjs asoc = &stcb->asoc; 2515 1.1 rjs if (compare_with_wrap(stcb->asoc.highest_tsn_inside_map, 2516 1.1 rjs stcb->asoc.cumulative_tsn, MAX_TSN)) { 2517 1.1 rjs /* there was a gap before this data was processed */ 2518 1.1 rjs was_a_gap = 1; 2519 1.1 rjs } 2520 1.1 rjs /* 2521 1.1 rjs * setup where we got the last DATA packet from for 2522 1.1 rjs * any SACK that may need to go out. Don't bump 2523 1.1 rjs * the net. This is done ONLY when a chunk 2524 1.1 rjs * is assigned. 2525 1.1 rjs */ 2526 1.1 rjs asoc->last_data_chunk_from = net; 2527 1.1 rjs 2528 1.1 rjs /* 2529 1.1 rjs * Now before we proceed we must figure out if this 2530 1.1 rjs * is a wasted cluster... i.e. it is a small packet 2531 1.1 rjs * sent in and yet the driver underneath allocated a 2532 1.1 rjs * full cluster for it. If so we must copy it to a 2533 1.1 rjs * smaller mbuf and free up the cluster mbuf. This 2534 1.1 rjs * will help with cluster starvation. 2535 1.1 rjs */ 2536 1.1 rjs if (m->m_len < (long)MHLEN && m->m_next == NULL) { 2537 1.1 rjs /* we only handle mbufs that are singletons.. not chains */ 2538 1.1 rjs MGET(m, M_DONTWAIT, MT_DATA); 2539 1.1 rjs if (m) { 2540 1.1 rjs /* ok lets see if we can copy the data up */ 2541 1.1 rjs vaddr_t *from, *to; 2542 1.1 rjs 2543 1.1 rjs if ((*mm)->m_flags & M_PKTHDR) { 2544 1.1 rjs /* got to copy the header first */ 2545 1.1 rjs #ifdef __APPLE__ 2546 1.1 rjs M_COPY_PKTHDR(m, (*mm)); 2547 1.1 rjs #else 2548 1.8 maxv m_move_pkthdr(m, (*mm)); 2549 1.1 rjs #endif 2550 1.1 rjs } 2551 1.1 rjs /* get the pointers and copy */ 2552 1.1 rjs to = mtod(m, vaddr_t *); 2553 1.1 rjs from = mtod((*mm), vaddr_t *); 2554 1.1 rjs memcpy(to, from, (*mm)->m_len); 2555 1.1 rjs /* copy the length and free up the old */ 2556 1.1 rjs m->m_len = (*mm)->m_len; 2557 1.1 rjs sctp_m_freem(*mm); 2558 1.1 rjs /* sucess, back copy */ 2559 1.1 rjs *mm = m; 2560 1.1 rjs } else { 2561 1.1 rjs /* We are in trouble in the mbuf world .. yikes */ 2562 1.1 rjs m = *mm; 2563 1.1 rjs } 2564 1.1 rjs } 2565 1.1 rjs /* get pointer to the first chunk header */ 2566 1.1 rjs ch = (struct sctp_data_chunk *)sctp_m_getptr(m, *offset, 2567 1.1 rjs sizeof(chunk_buf), (u_int8_t *)&chunk_buf); 2568 1.1 rjs if (ch == NULL) { 2569 1.1 rjs printf(" ... its short\n"); 2570 1.1 rjs return (1); 2571 1.1 rjs } 2572 1.1 rjs /* 2573 1.1 rjs * process all DATA chunks... 2574 1.1 rjs */ 2575 1.1 rjs 2576 1.1 rjs #ifdef SCTP_DEBUG 2577 1.1 rjs if (sctp_debug_on & SCTP_DEBUG_INPUT1) { 2578 1.1 rjs printf("In process data off:%d length:%d iphlen:%d ch->type:%d\n", 2579 1.1 rjs *offset, length, iphlen, (int)ch->ch.chunk_type); 2580 1.1 rjs } 2581 1.1 rjs #endif 2582 1.1 rjs 2583 1.1 rjs *high_tsn = asoc->cumulative_tsn; 2584 1.1 rjs break_flag = 0; 2585 1.1 rjs while (ch->ch.chunk_type == SCTP_DATA) { 2586 1.1 rjs /* validate chunk length */ 2587 1.1 rjs chk_length = ntohs(ch->ch.chunk_length); 2588 1.1 rjs if ((size_t)chk_length < sizeof(struct sctp_data_chunk) + 1 || 2589 1.1 rjs length - *offset < chk_length) { 2590 1.1 rjs /* 2591 1.1 rjs * Need to send an abort since we had a invalid 2592 1.1 rjs * data chunk. 2593 1.1 rjs */ 2594 1.1 rjs struct mbuf *op_err; 2595 1.1 rjs MGET(op_err, M_DONTWAIT, MT_DATA); 2596 1.1 rjs if (op_err) { 2597 1.1 rjs struct sctp_paramhdr *ph; 2598 1.1 rjs u_int32_t *ippp; 2599 1.1 rjs 2600 1.1 rjs op_err->m_len = sizeof(struct sctp_paramhdr) + 2601 1.1 rjs sizeof(*ippp); 2602 1.1 rjs ph = mtod(op_err, struct sctp_paramhdr *); 2603 1.1 rjs ph->param_type = 2604 1.1 rjs htons(SCTP_CAUSE_PROTOCOL_VIOLATION); 2605 1.1 rjs ph->param_length = htons(op_err->m_len); 2606 1.1 rjs ippp = (u_int32_t *)(ph + 1); 2607 1.1 rjs *ippp = htonl(0x30000001); 2608 1.1 rjs } 2609 1.1 rjs sctp_abort_association(inp, stcb, m, iphlen, sh, 2610 1.1 rjs op_err); 2611 1.1 rjs return (2); 2612 1.1 rjs } 2613 1.1 rjs #ifdef SCTP_DEBUG 2614 1.1 rjs if (sctp_debug_on & SCTP_DEBUG_INPUT1) { 2615 1.1 rjs printf("A chunk of len:%d to process (tot:%d)\n", 2616 1.1 rjs chk_length, length - *offset); 2617 1.1 rjs } 2618 1.1 rjs #endif 2619 1.1 rjs 2620 1.1 rjs #ifdef SCTP_AUDITING_ENABLED 2621 1.1 rjs sctp_audit_log(0xB1, 0); 2622 1.1 rjs #endif 2623 1.1 rjs if (SCTP_SIZE32(chk_length) == *offset - length) { 2624 1.1 rjs last_chunk = 1; 2625 1.1 rjs } else { 2626 1.1 rjs last_chunk = 0; 2627 1.1 rjs } 2628 1.1 rjs if (sctp_process_a_data_chunk(stcb, asoc, mm, *offset, ch, 2629 1.1 rjs chk_length, net, high_tsn, &abort_flag, &break_flag, 2630 1.1 rjs last_chunk)) { 2631 1.1 rjs num_chunks++; 2632 1.1 rjs #ifdef SCTP_DEBUG 2633 1.1 rjs if (sctp_debug_on & SCTP_DEBUG_INPUT1) { 2634 1.1 rjs printf("Now incr num_chunks to %d\n", 2635 1.1 rjs num_chunks); 2636 1.1 rjs } 2637 1.1 rjs #endif 2638 1.1 rjs } 2639 1.1 rjs if (abort_flag) 2640 1.1 rjs return (2); 2641 1.1 rjs 2642 1.1 rjs if (break_flag) { 2643 1.1 rjs /* 2644 1.1 rjs * Set because of out of rwnd space and no drop rep 2645 1.1 rjs * space left. 2646 1.1 rjs */ 2647 1.1 rjs break; 2648 1.1 rjs } 2649 1.1 rjs 2650 1.1 rjs *offset += SCTP_SIZE32(chk_length); 2651 1.1 rjs if (*offset >= length) { 2652 1.1 rjs /* no more data left in the mbuf chain */ 2653 1.1 rjs break; 2654 1.1 rjs } 2655 1.1 rjs ch = (struct sctp_data_chunk *)sctp_m_getptr(m, *offset, 2656 1.1 rjs sizeof(chunk_buf), (u_int8_t *)&chunk_buf); 2657 1.1 rjs if (ch == NULL) { 2658 1.1 rjs *offset = length; 2659 1.1 rjs break; 2660 1.1 rjs } 2661 1.1 rjs } /* while */ 2662 1.1 rjs if (break_flag) { 2663 1.1 rjs /* 2664 1.1 rjs * we need to report rwnd overrun drops. 2665 1.1 rjs */ 2666 1.1 rjs sctp_send_packet_dropped(stcb, net, *mm, iphlen, 0); 2667 1.1 rjs } 2668 1.1 rjs if (num_chunks) { 2669 1.1 rjs /* 2670 1.1 rjs * Did we get data, if so update the time for 2671 1.1 rjs * auto-close and give peer credit for being 2672 1.1 rjs * alive. 2673 1.1 rjs */ 2674 1.1 rjs sctp_pegs[SCTP_DATA_DG_RECV]++; 2675 1.1 rjs stcb->asoc.overall_error_count = 0; 2676 1.1 rjs SCTP_GETTIME_TIMEVAL(&stcb->asoc.time_last_rcvd); 2677 1.1 rjs } 2678 1.1 rjs /* now service all of the reassm queue and delivery queue */ 2679 1.1 rjs sctp_service_queues(stcb, asoc, 0); 2680 1.1 rjs if (SCTP_GET_STATE(asoc) == SCTP_STATE_SHUTDOWN_SENT) { 2681 1.1 rjs /* 2682 1.1 rjs * Assure that we ack right away by making 2683 1.1 rjs * sure that a d-ack timer is running. So the 2684 1.1 rjs * sack_check will send a sack. 2685 1.1 rjs */ 2686 1.1 rjs sctp_timer_start(SCTP_TIMER_TYPE_RECV, stcb->sctp_ep, stcb, 2687 1.1 rjs net); 2688 1.1 rjs } 2689 1.1 rjs /* Start a sack timer or QUEUE a SACK for sending */ 2690 1.1 rjs sctp_sack_check(stcb, 1, was_a_gap, &abort_flag); 2691 1.1 rjs if (abort_flag) 2692 1.1 rjs return (2); 2693 1.1 rjs 2694 1.1 rjs return (0); 2695 1.1 rjs } 2696 1.1 rjs 2697 1.1 rjs static void 2698 1.1 rjs sctp_handle_segments(struct sctp_tcb *stcb, struct sctp_association *asoc, 2699 1.1 rjs struct sctp_sack_chunk *ch, u_long last_tsn, u_long *biggest_tsn_acked, 2700 1.1 rjs u_long *biggest_newly_acked_tsn, int num_seg, int *ecn_seg_sums) 2701 1.1 rjs { 2702 1.1 rjs /************************************************/ 2703 1.1 rjs /* process fragments and update sendqueue */ 2704 1.1 rjs /************************************************/ 2705 1.1 rjs struct sctp_sack *sack; 2706 1.1 rjs struct sctp_gap_ack_block *frag; 2707 1.1 rjs struct sctp_tmit_chunk *tp1; 2708 1.1 rjs int i; 2709 1.1 rjs unsigned int j; 2710 1.1 rjs #ifdef SCTP_FR_LOGGING 2711 1.1 rjs int num_frs=0; 2712 1.1 rjs #endif 2713 1.1 rjs uint16_t frag_strt, frag_end, primary_flag_set; 2714 1.1 rjs u_long last_frag_high; 2715 1.1 rjs 2716 1.1 rjs if (asoc->primary_destination->dest_state & SCTP_ADDR_SWITCH_PRIMARY) { 2717 1.1 rjs primary_flag_set = 1; 2718 1.1 rjs } else { 2719 1.1 rjs primary_flag_set = 0; 2720 1.1 rjs } 2721 1.1 rjs 2722 1.1 rjs sack = &ch->sack; 2723 1.1 rjs frag = (struct sctp_gap_ack_block *)((vaddr_t)sack + 2724 1.1 rjs sizeof(struct sctp_sack)); 2725 1.1 rjs tp1 = NULL; 2726 1.1 rjs last_frag_high = 0; 2727 1.1 rjs for (i = 0; i < num_seg; i++) { 2728 1.1 rjs frag_strt = ntohs(frag->start); 2729 1.1 rjs frag_end = ntohs(frag->end); 2730 1.14 andvar /* some sanity checks on the fragment offsets */ 2731 1.1 rjs if (frag_strt > frag_end) { 2732 1.1 rjs /* this one is malformed, skip */ 2733 1.1 rjs frag++; 2734 1.1 rjs continue; 2735 1.1 rjs } 2736 1.1 rjs if (compare_with_wrap((frag_end+last_tsn), *biggest_tsn_acked, 2737 1.1 rjs MAX_TSN)) 2738 1.1 rjs *biggest_tsn_acked = frag_end+last_tsn; 2739 1.1 rjs 2740 1.1 rjs /* mark acked dgs and find out the highestTSN being acked */ 2741 1.1 rjs if (tp1 == NULL) { 2742 1.1 rjs tp1 = TAILQ_FIRST(&asoc->sent_queue); 2743 1.1 rjs 2744 1.1 rjs /* save the locations of the last frags */ 2745 1.1 rjs last_frag_high = frag_end + last_tsn; 2746 1.1 rjs } else { 2747 1.1 rjs /* 2748 1.1 rjs * now lets see if we need to reset the queue 2749 1.1 rjs * due to a out-of-order SACK fragment 2750 1.1 rjs */ 2751 1.1 rjs if (compare_with_wrap(frag_strt+last_tsn, 2752 1.1 rjs last_frag_high, MAX_TSN)) { 2753 1.1 rjs /* 2754 1.1 rjs * if the new frag starts after the last TSN 2755 1.1 rjs * frag covered, we are ok 2756 1.1 rjs * and this one is beyond the last one 2757 1.1 rjs */ 2758 1.1 rjs ; 2759 1.1 rjs } else { 2760 1.1 rjs /* 2761 1.1 rjs * ok, they have reset us, so we need to reset 2762 1.1 rjs * the queue this will cause extra hunting but 2763 1.1 rjs * hey, they chose the performance 2764 1.1 rjs * hit when they failed to order there gaps.. 2765 1.1 rjs */ 2766 1.1 rjs tp1 = TAILQ_FIRST(&asoc->sent_queue); 2767 1.1 rjs } 2768 1.1 rjs last_frag_high = frag_end + last_tsn; 2769 1.1 rjs } 2770 1.1 rjs for (j = frag_strt + last_tsn; j <= frag_end + last_tsn; j++) { 2771 1.1 rjs while (tp1) { 2772 1.1 rjs #ifdef SCTP_FR_LOGGING 2773 1.1 rjs if (tp1->rec.data.doing_fast_retransmit) 2774 1.1 rjs num_frs++; 2775 1.1 rjs #endif 2776 1.1 rjs 2777 1.1 rjs if (tp1->rec.data.TSN_seq == j) { 2778 1.1 rjs if (tp1->sent != SCTP_DATAGRAM_UNSENT) { 2779 1.1 rjs /* must be held until cum-ack passes */ 2780 1.1 rjs /* ECN Nonce: Add the nonce value to the sender's nonce sum */ 2781 1.1 rjs if (tp1->sent < SCTP_DATAGRAM_ACKED) { 2782 1.1 rjs /* 2783 1.1 rjs * If it is less than 2784 1.1 rjs * ACKED, it is now 2785 1.1 rjs * no-longer in flight. 2786 1.1 rjs * Higher values may 2787 1.1 rjs * already be set via 2788 1.1 rjs * previous Gap Ack 2789 1.1 rjs * Blocks... 2790 1.1 rjs * i.e. ACKED or MARKED. 2791 1.1 rjs */ 2792 1.1 rjs if (compare_with_wrap(tp1->rec.data.TSN_seq, 2793 1.1 rjs *biggest_newly_acked_tsn, 2794 1.1 rjs MAX_TSN)) { 2795 1.1 rjs *biggest_newly_acked_tsn = 2796 1.1 rjs tp1->rec.data.TSN_seq; 2797 1.1 rjs } 2798 1.1 rjs sctp_flight_size_decrease(tp1); 2799 1.1 rjs 2800 1.1 rjs sctp_total_flight_decrease(stcb, tp1); 2801 1.1 rjs 2802 1.1 rjs if (tp1->snd_count < 2) { 2803 1.17 andvar /* True non-retransmitted chunk */ 2804 1.1 rjs tp1->whoTo->net_ack2 += 2805 1.1 rjs tp1->send_size; 2806 1.1 rjs 2807 1.1 rjs /* update RTO too? */ 2808 1.1 rjs if (tp1->do_rtt) { 2809 1.1 rjs tp1->whoTo->RTO = 2810 1.1 rjs sctp_calculate_rto(stcb, 2811 1.1 rjs asoc, 2812 1.1 rjs tp1->whoTo, 2813 1.1 rjs &tp1->sent_rcv_time); 2814 1.1 rjs tp1->whoTo->rto_pending = 0; 2815 1.1 rjs tp1->do_rtt = 0; 2816 1.1 rjs } 2817 1.1 rjs } 2818 1.1 rjs } 2819 1.1 rjs if (tp1->sent <= SCTP_DATAGRAM_RESEND && 2820 1.1 rjs tp1->sent != SCTP_DATAGRAM_UNSENT && 2821 1.1 rjs compare_with_wrap(tp1->rec.data.TSN_seq, 2822 1.1 rjs asoc->this_sack_highest_gap, 2823 1.1 rjs MAX_TSN)) { 2824 1.1 rjs asoc->this_sack_highest_gap = 2825 1.1 rjs tp1->rec.data.TSN_seq; 2826 1.1 rjs if (primary_flag_set) { 2827 1.1 rjs tp1->whoTo->cacc_saw_newack = 1; 2828 1.1 rjs } 2829 1.1 rjs } 2830 1.1 rjs if (tp1->sent == SCTP_DATAGRAM_RESEND) { 2831 1.1 rjs #ifdef SCTP_DEBUG 2832 1.1 rjs if (sctp_debug_on & 2833 1.1 rjs SCTP_DEBUG_INDATA3) { 2834 1.1 rjs printf("Hmm. one that is in RESEND that is now ACKED\n"); 2835 1.1 rjs } 2836 1.1 rjs #endif 2837 1.1 rjs sctp_ucount_decr(asoc->sent_queue_retran_cnt); 2838 1.1 rjs #ifdef SCTP_AUDITING_ENABLED 2839 1.1 rjs sctp_audit_log(0xB2, 2840 1.1 rjs (asoc->sent_queue_retran_cnt & 0x000000ff)); 2841 1.1 rjs #endif 2842 1.1 rjs 2843 1.1 rjs } 2844 1.1 rjs (*ecn_seg_sums) += tp1->rec.data.ect_nonce; 2845 1.1 rjs (*ecn_seg_sums) &= SCTP_SACK_NONCE_SUM; 2846 1.1 rjs tp1->sent = SCTP_DATAGRAM_MARKED; 2847 1.1 rjs } 2848 1.1 rjs break; 2849 1.1 rjs } /* if (tp1->TSN_seq == j) */ 2850 1.1 rjs if (compare_with_wrap(tp1->rec.data.TSN_seq, j, 2851 1.1 rjs MAX_TSN)) 2852 1.1 rjs break; 2853 1.1 rjs tp1 = TAILQ_NEXT(tp1, sctp_next); 2854 1.1 rjs }/* end while (tp1) */ 2855 1.1 rjs } /* end for (j = fragStart */ 2856 1.1 rjs frag++; /* next one */ 2857 1.1 rjs } 2858 1.1 rjs #ifdef SCTP_FR_LOGGING 2859 1.1 rjs if (num_frs) 2860 1.1 rjs sctp_log_fr(*biggest_tsn_acked, *biggest_newly_acked_tsn, 2861 1.1 rjs last_tsn, SCTP_FR_LOG_BIGGEST_TSNS); 2862 1.1 rjs #endif 2863 1.1 rjs } 2864 1.1 rjs 2865 1.1 rjs static void 2866 1.1 rjs sctp_check_for_revoked(struct sctp_association *asoc, u_long cum_ack, 2867 1.1 rjs u_long biggest_tsn_acked) 2868 1.1 rjs { 2869 1.1 rjs struct sctp_tmit_chunk *tp1; 2870 1.1 rjs int tot_revoked=0; 2871 1.1 rjs 2872 1.1 rjs tp1 = TAILQ_FIRST(&asoc->sent_queue); 2873 1.1 rjs while (tp1) { 2874 1.1 rjs if (compare_with_wrap(tp1->rec.data.TSN_seq, cum_ack, 2875 1.1 rjs MAX_TSN)) { 2876 1.1 rjs /* 2877 1.1 rjs * ok this guy is either ACK or MARKED. If it is ACKED 2878 1.1 rjs * it has been previously acked but not this time i.e. 2879 1.1 rjs * revoked. If it is MARKED it was ACK'ed again. 2880 1.1 rjs */ 2881 1.1 rjs if (tp1->sent == SCTP_DATAGRAM_ACKED) { 2882 1.1 rjs /* it has been revoked */ 2883 1.1 rjs /* 2884 1.1 rjs * We do NOT add back to flight size here since 2885 1.1 rjs * it is really NOT in flight. Resend (when/if 2886 1.1 rjs * it occurs will add to flight size 2887 1.1 rjs */ 2888 1.1 rjs tp1->sent = SCTP_DATAGRAM_SENT; 2889 1.1 rjs tot_revoked++; 2890 1.1 rjs } else if (tp1->sent == SCTP_DATAGRAM_MARKED) { 2891 1.1 rjs /* it has been re-acked in this SACK */ 2892 1.1 rjs tp1->sent = SCTP_DATAGRAM_ACKED; 2893 1.1 rjs } 2894 1.1 rjs } 2895 1.1 rjs if (compare_with_wrap(tp1->rec.data.TSN_seq, biggest_tsn_acked, 2896 1.1 rjs MAX_TSN)) { 2897 1.1 rjs /* above the sack */ 2898 1.1 rjs break; 2899 1.1 rjs } 2900 1.1 rjs if (tp1->sent == SCTP_DATAGRAM_UNSENT) 2901 1.1 rjs break; 2902 1.1 rjs tp1 = TAILQ_NEXT(tp1, sctp_next); 2903 1.1 rjs } 2904 1.1 rjs if (tot_revoked > 0) { 2905 1.1 rjs /* Setup the ecn nonce re-sync point. We 2906 1.1 rjs * do this since once data is revoked 2907 1.1 rjs * we begin to retransmit things, which 2908 1.1 rjs * do NOT have the ECN bits set. This means 2909 1.1 rjs * we are now out of sync and must wait until 2910 1.1 rjs * we get back in sync with the peer to 2911 1.1 rjs * check ECN bits. 2912 1.1 rjs */ 2913 1.1 rjs tp1 = TAILQ_FIRST(&asoc->send_queue); 2914 1.1 rjs if (tp1 == NULL) { 2915 1.1 rjs asoc->nonce_resync_tsn = asoc->sending_seq; 2916 1.1 rjs } else { 2917 1.1 rjs asoc->nonce_resync_tsn = tp1->rec.data.TSN_seq; 2918 1.1 rjs } 2919 1.1 rjs asoc->nonce_wait_for_ecne = 0; 2920 1.1 rjs asoc->nonce_sum_check = 0; 2921 1.1 rjs } 2922 1.1 rjs 2923 1.1 rjs } 2924 1.1 rjs 2925 1.1 rjs extern int sctp_peer_chunk_oh; 2926 1.1 rjs 2927 1.1 rjs static void 2928 1.1 rjs sctp_strike_gap_ack_chunks(struct sctp_tcb *stcb, struct sctp_association *asoc, 2929 1.1 rjs u_long biggest_tsn_acked, int strike_enabled, 2930 1.1 rjs u_long biggest_tsn_newly_acked, int accum_moved) 2931 1.1 rjs { 2932 1.1 rjs struct sctp_tmit_chunk *tp1; 2933 1.1 rjs int strike_flag=0; 2934 1.1 rjs struct timeval now; 2935 1.1 rjs int tot_retrans=0; 2936 1.1 rjs u_int32_t sending_seq; 2937 1.1 rjs int primary_switch_active = 0; 2938 1.1 rjs int double_switch_active = 0; 2939 1.1 rjs 2940 1.1 rjs /* select the sending_seq, this is 2941 1.1 rjs * either the next thing ready to 2942 1.1 rjs * be sent but not transmitted, OR, 2943 1.1 rjs * the next seq we assign. 2944 1.1 rjs */ 2945 1.1 rjs tp1 = TAILQ_FIRST(&stcb->asoc.send_queue); 2946 1.1 rjs if (tp1 == NULL) { 2947 1.1 rjs sending_seq = asoc->sending_seq; 2948 1.1 rjs } else { 2949 1.1 rjs sending_seq = tp1->rec.data.TSN_seq; 2950 1.1 rjs } 2951 1.1 rjs 2952 1.1 rjs if (asoc->primary_destination->dest_state & SCTP_ADDR_SWITCH_PRIMARY) { 2953 1.1 rjs primary_switch_active = 1; 2954 1.1 rjs } 2955 1.1 rjs if (asoc->primary_destination->dest_state & SCTP_ADDR_DOUBLE_SWITCH) { 2956 1.1 rjs double_switch_active = 1; 2957 1.1 rjs } 2958 1.1 rjs if (stcb->asoc.peer_supports_prsctp ) { 2959 1.1 rjs SCTP_GETTIME_TIMEVAL(&now); 2960 1.1 rjs } 2961 1.1 rjs tp1 = TAILQ_FIRST(&asoc->sent_queue); 2962 1.1 rjs while (tp1) { 2963 1.1 rjs strike_flag=0; 2964 1.1 rjs if (compare_with_wrap(tp1->rec.data.TSN_seq, biggest_tsn_acked, 2965 1.1 rjs MAX_TSN) || 2966 1.1 rjs tp1->sent == SCTP_DATAGRAM_UNSENT) { 2967 1.1 rjs /* done */ 2968 1.1 rjs break; 2969 1.1 rjs } 2970 1.1 rjs if ((tp1->flags & (SCTP_PR_SCTP_ENABLED|SCTP_PR_SCTP_BUFFER)) == 2971 1.1 rjs SCTP_PR_SCTP_ENABLED && 2972 1.1 rjs tp1->sent < SCTP_DATAGRAM_ACKED) { 2973 1.1 rjs /* Is it expired? */ 2974 1.1 rjs #ifndef __FreeBSD__ 2975 1.1 rjs if (timercmp(&now, &tp1->rec.data.timetodrop, >)) 2976 1.1 rjs #else 2977 1.1 rjs if (timevalcmp(&now, &tp1->rec.data.timetodrop, >)) 2978 1.1 rjs #endif 2979 1.1 rjs { 2980 1.1 rjs /* Yes so drop it */ 2981 1.1 rjs if (tp1->data != NULL) { 2982 1.1 rjs sctp_release_pr_sctp_chunk(stcb, tp1, 2983 1.1 rjs (SCTP_RESPONSE_TO_USER_REQ|SCTP_NOTIFY_DATAGRAM_SENT), 2984 1.1 rjs &asoc->sent_queue); 2985 1.1 rjs } 2986 1.1 rjs tp1 = TAILQ_NEXT(tp1, sctp_next); 2987 1.1 rjs continue; 2988 1.1 rjs } 2989 1.1 rjs } 2990 1.1 rjs 2991 1.1 rjs if (compare_with_wrap(tp1->rec.data.TSN_seq, 2992 1.1 rjs asoc->this_sack_highest_gap, MAX_TSN)) { 2993 1.1 rjs /* we are beyond the tsn in the sack */ 2994 1.1 rjs break; 2995 1.1 rjs } 2996 1.1 rjs if (tp1->sent >= SCTP_DATAGRAM_RESEND) { 2997 1.1 rjs /* either a RESEND, ACKED, or MARKED */ 2998 1.1 rjs /* skip */ 2999 1.1 rjs tp1 = TAILQ_NEXT(tp1, sctp_next); 3000 1.1 rjs continue; 3001 1.1 rjs } 3002 1.1 rjs if (primary_switch_active && (strike_enabled == 0)) { 3003 1.1 rjs if (tp1->whoTo != asoc->primary_destination) { 3004 1.1 rjs /* 3005 1.1 rjs * We can only strike things on the primary if 3006 1.1 rjs * the strike_enabled flag is clear 3007 1.1 rjs */ 3008 1.1 rjs tp1 = TAILQ_NEXT(tp1, sctp_next); 3009 1.1 rjs continue; 3010 1.1 rjs } 3011 1.1 rjs } else if (primary_switch_active) { 3012 1.1 rjs if (tp1->whoTo->cacc_saw_newack == 0) { 3013 1.1 rjs /* 3014 1.1 rjs * Only one was received but it was NOT 3015 1.1 rjs * this one. 3016 1.1 rjs */ 3017 1.1 rjs tp1 = TAILQ_NEXT(tp1, sctp_next); 3018 1.1 rjs continue; 3019 1.1 rjs } 3020 1.1 rjs } 3021 1.1 rjs if (double_switch_active && 3022 1.1 rjs (compare_with_wrap(asoc->primary_destination->next_tsn_at_change, 3023 1.1 rjs tp1->rec.data.TSN_seq, MAX_TSN))) { 3024 1.1 rjs /* 3025 1.1 rjs * With a double switch we do NOT mark unless we 3026 1.1 rjs * are beyond the switch point. 3027 1.1 rjs */ 3028 1.1 rjs tp1 = TAILQ_NEXT(tp1, sctp_next); 3029 1.1 rjs continue; 3030 1.1 rjs } 3031 1.1 rjs /* 3032 1.1 rjs * Here we check to see if we were have already done a FR 3033 1.1 rjs * and if so we see if the biggest TSN we saw in the sack is 3034 1.1 rjs * smaller than the recovery point. If so we don't strike the 3035 1.1 rjs * tsn... otherwise we CAN strike the TSN. 3036 1.1 rjs */ 3037 1.1 rjs if (accum_moved && asoc->fast_retran_loss_recovery) { 3038 1.1 rjs /* 3039 1.1 rjs * Strike the TSN if in fast-recovery and 3040 1.1 rjs * cum-ack moved. 3041 1.1 rjs */ 3042 1.1 rjs tp1->sent++; 3043 1.1 rjs } else if (tp1->rec.data.doing_fast_retransmit) { 3044 1.1 rjs /* 3045 1.1 rjs * For those that have done a FR we must 3046 1.1 rjs * take special consideration if we strike. I.e 3047 1.1 rjs * the biggest_newly_acked must be higher 3048 1.1 rjs * than the sending_seq at the time we did 3049 1.1 rjs * the FR. 3050 1.1 rjs */ 3051 1.1 rjs #ifdef SCTP_FR_TO_ALTERNATE 3052 1.1 rjs /* 3053 1.1 rjs * If FR's go to new networks, then we 3054 1.1 rjs * must only do this for singly homed asoc's. However 3055 1.1 rjs * if the FR's go to the same network (Armando's work) 3056 1.1 rjs * then its ok to FR multiple times. 3057 1.1 rjs */ 3058 1.1 rjs if (asoc->numnets < 2) 3059 1.1 rjs #else 3060 1.1 rjs if (1) 3061 1.1 rjs #endif 3062 1.1 rjs { 3063 1.1 rjs if ((compare_with_wrap(biggest_tsn_newly_acked, 3064 1.1 rjs tp1->rec.data.fast_retran_tsn, MAX_TSN)) || 3065 1.1 rjs (biggest_tsn_newly_acked == 3066 1.1 rjs tp1->rec.data.fast_retran_tsn)) { 3067 1.1 rjs /* 3068 1.1 rjs * Strike the TSN, since this ack is 3069 1.1 rjs * beyond where things were when we did 3070 1.1 rjs * a FR. 3071 1.1 rjs */ 3072 1.1 rjs #ifdef SCTP_FR_LOGGING 3073 1.1 rjs sctp_log_fr(biggest_tsn_newly_acked, 3074 1.1 rjs tp1->rec.data.TSN_seq, 3075 1.1 rjs tp1->rec.data.fast_retran_tsn, 3076 1.1 rjs SCTP_FR_LOG_STRIKE_CHUNK); 3077 1.1 rjs #endif 3078 1.1 rjs tp1->sent++; 3079 1.1 rjs strike_flag=1; 3080 1.1 rjs } 3081 1.1 rjs } 3082 1.1 rjs } else if (compare_with_wrap(tp1->rec.data.TSN_seq, 3083 1.1 rjs biggest_tsn_newly_acked, MAX_TSN)) { 3084 1.1 rjs /* 3085 1.1 rjs * We don't strike these: 3086 1.1 rjs * This is the HTNA algorithm i.e. we don't strike 3087 1.1 rjs * If our TSN is larger than the Highest TSN Newly 3088 1.1 rjs * Acked. 3089 1.1 rjs */ 3090 1.1 rjs ; 3091 1.1 rjs } else { 3092 1.1 rjs /* Strike the TSN */ 3093 1.1 rjs tp1->sent++; 3094 1.1 rjs } 3095 1.1 rjs if (tp1->sent == SCTP_DATAGRAM_RESEND) { 3096 1.1 rjs /* Increment the count to resend */ 3097 1.1 rjs struct sctp_nets *alt; 3098 1.1 rjs 3099 1.1 rjs #ifdef SCTP_FR_LOGGING 3100 1.1 rjs sctp_log_fr(tp1->rec.data.TSN_seq, tp1->snd_count, 3101 1.1 rjs 0, SCTP_FR_MARKED); 3102 1.1 rjs #endif 3103 1.1 rjs if (strike_flag) { 3104 1.1 rjs /* This is a subsequent FR */ 3105 1.1 rjs sctp_pegs[SCTP_DUP_FR]++; 3106 1.1 rjs } 3107 1.1 rjs asoc->sent_queue_retran_cnt++; 3108 1.1 rjs #ifdef SCTP_FR_TO_ALTERNATE 3109 1.1 rjs /* Can we find an alternate? */ 3110 1.1 rjs alt = sctp_find_alternate_net(stcb, tp1->whoTo); 3111 1.1 rjs #else 3112 1.1 rjs /* 3113 1.1 rjs * default behavior is to NOT retransmit FR's 3114 1.1 rjs * to an alternate. Armando Caro's paper details 3115 1.1 rjs * why. 3116 1.1 rjs */ 3117 1.1 rjs alt = tp1->whoTo; 3118 1.1 rjs #endif 3119 1.1 rjs tp1->rec.data.doing_fast_retransmit = 1; 3120 1.1 rjs tot_retrans++; 3121 1.1 rjs /* mark the sending seq for possible subsequent FR's */ 3122 1.1 rjs if (TAILQ_EMPTY(&asoc->send_queue)) { 3123 1.1 rjs /* 3124 1.1 rjs * If the queue of send is empty then its the 3125 1.1 rjs * next sequence number that will be assigned so 3126 1.1 rjs * we subtract one from this to get the one we 3127 1.1 rjs * last sent. 3128 1.1 rjs */ 3129 1.1 rjs tp1->rec.data.fast_retran_tsn = sending_seq - 1; 3130 1.1 rjs } else { 3131 1.1 rjs /* 3132 1.1 rjs * If there are chunks on the send queue 3133 1.1 rjs * (unsent data that has made it from the 3134 1.1 rjs * stream queues but not out the door, we take 3135 1.1 rjs * the first one (which will have the lowest 3136 1.1 rjs * TSN) and subtract one to get the one we last 3137 1.1 rjs * sent. 3138 1.1 rjs */ 3139 1.1 rjs struct sctp_tmit_chunk *ttt; 3140 1.1 rjs ttt = TAILQ_FIRST(&asoc->send_queue); 3141 1.1 rjs tp1->rec.data.fast_retran_tsn = 3142 1.1 rjs ttt->rec.data.TSN_seq - 1; 3143 1.1 rjs } 3144 1.1 rjs if (tp1->do_rtt) { 3145 1.1 rjs /* 3146 1.1 rjs * this guy had a RTO calculation pending on it, 3147 1.1 rjs * cancel it 3148 1.1 rjs */ 3149 1.1 rjs tp1->whoTo->rto_pending = 0; 3150 1.1 rjs tp1->do_rtt = 0; 3151 1.1 rjs } 3152 1.1 rjs /* fix counts and things */ 3153 1.1 rjs 3154 1.1 rjs tp1->whoTo->net_ack++; 3155 1.1 rjs sctp_flight_size_decrease(tp1); 3156 1.1 rjs #ifdef SCTP_LOG_RWND 3157 1.1 rjs sctp_log_rwnd(SCTP_INCREASE_PEER_RWND, 3158 1.1 rjs asoc->peers_rwnd , tp1->send_size, sctp_peer_chunk_oh); 3159 1.1 rjs #endif 3160 1.1 rjs /* add back to the rwnd */ 3161 1.1 rjs asoc->peers_rwnd += (tp1->send_size + sctp_peer_chunk_oh); 3162 1.1 rjs 3163 1.1 rjs /* remove from the total flight */ 3164 1.1 rjs sctp_total_flight_decrease(stcb, tp1); 3165 1.1 rjs if (alt != tp1->whoTo) { 3166 1.1 rjs /* yes, there is an alternate. */ 3167 1.1 rjs sctp_free_remote_addr(tp1->whoTo); 3168 1.1 rjs tp1->whoTo = alt; 3169 1.1 rjs alt->ref_count++; 3170 1.1 rjs } 3171 1.1 rjs } 3172 1.1 rjs tp1 = TAILQ_NEXT(tp1, sctp_next); 3173 1.1 rjs } /* while (tp1) */ 3174 1.1 rjs 3175 1.1 rjs if (tot_retrans > 0) { 3176 1.1 rjs /* Setup the ecn nonce re-sync point. We 3177 1.1 rjs * do this since once we go to FR something 3178 1.1 rjs * we introduce a Karn's rule scenario and 3179 1.1 rjs * won't know the totals for the ECN bits. 3180 1.1 rjs */ 3181 1.1 rjs asoc->nonce_resync_tsn = sending_seq; 3182 1.1 rjs asoc->nonce_wait_for_ecne = 0; 3183 1.1 rjs asoc->nonce_sum_check = 0; 3184 1.1 rjs } 3185 1.1 rjs 3186 1.1 rjs } 3187 1.1 rjs 3188 1.1 rjs struct sctp_tmit_chunk * 3189 1.1 rjs sctp_try_advance_peer_ack_point(struct sctp_tcb *stcb, 3190 1.1 rjs struct sctp_association *asoc) 3191 1.1 rjs { 3192 1.1 rjs struct sctp_tmit_chunk *tp1, *tp2, *a_adv=NULL; 3193 1.1 rjs struct timeval now; 3194 1.1 rjs int now_filled=0; 3195 1.1 rjs 3196 1.1 rjs if (asoc->peer_supports_prsctp == 0) { 3197 1.1 rjs return (NULL); 3198 1.1 rjs } 3199 1.1 rjs tp1 = TAILQ_FIRST(&asoc->sent_queue); 3200 1.1 rjs while (tp1) { 3201 1.1 rjs if (tp1->sent != SCTP_FORWARD_TSN_SKIP && 3202 1.1 rjs tp1->sent != SCTP_DATAGRAM_RESEND) { 3203 1.1 rjs /* no chance to advance, out of here */ 3204 1.1 rjs break; 3205 1.1 rjs } 3206 1.1 rjs if ((tp1->flags & SCTP_PR_SCTP_ENABLED) == 0) { 3207 1.1 rjs /* 3208 1.1 rjs * We can't fwd-tsn past any that are reliable 3209 1.1 rjs * aka retransmitted until the asoc fails. 3210 1.1 rjs */ 3211 1.1 rjs break; 3212 1.1 rjs } 3213 1.1 rjs if (!now_filled) { 3214 1.1 rjs SCTP_GETTIME_TIMEVAL(&now); 3215 1.1 rjs now_filled = 1; 3216 1.1 rjs } 3217 1.1 rjs tp2 = TAILQ_NEXT(tp1, sctp_next); 3218 1.1 rjs /* 3219 1.1 rjs * now we got a chunk which is marked for another 3220 1.1 rjs * retransmission to a PR-stream but has run 3221 1.1 rjs * out its chances already maybe OR has been 3222 1.1 rjs * marked to skip now. Can we skip it if its a 3223 1.1 rjs * resend? 3224 1.1 rjs */ 3225 1.1 rjs if (tp1->sent == SCTP_DATAGRAM_RESEND && 3226 1.1 rjs (tp1->flags & SCTP_PR_SCTP_BUFFER) == 0) { 3227 1.1 rjs /* 3228 1.1 rjs * Now is this one marked for resend and its time 3229 1.1 rjs * is now up? 3230 1.1 rjs */ 3231 1.1 rjs #ifndef __FreeBSD__ 3232 1.1 rjs if (timercmp(&now, &tp1->rec.data.timetodrop, >)) 3233 1.1 rjs #else 3234 1.1 rjs if (timevalcmp(&now, &tp1->rec.data.timetodrop, >)) 3235 1.1 rjs #endif 3236 1.1 rjs { 3237 1.1 rjs /* Yes so drop it */ 3238 1.1 rjs if (tp1->data) { 3239 1.1 rjs sctp_release_pr_sctp_chunk(stcb, tp1, 3240 1.1 rjs (SCTP_RESPONSE_TO_USER_REQ|SCTP_NOTIFY_DATAGRAM_SENT), 3241 1.1 rjs &asoc->sent_queue); 3242 1.1 rjs } 3243 1.1 rjs } else { 3244 1.1 rjs /* 3245 1.1 rjs * No, we are done when hit one for resend whos 3246 1.1 rjs * time as not expired. 3247 1.1 rjs */ 3248 1.1 rjs break; 3249 1.1 rjs } 3250 1.1 rjs } 3251 1.1 rjs /* 3252 1.1 rjs * Ok now if this chunk is marked to drop it 3253 1.1 rjs * we can clean up the chunk, advance our peer ack point 3254 1.1 rjs * and we can check the next chunk. 3255 1.1 rjs */ 3256 1.1 rjs if (tp1->sent == SCTP_FORWARD_TSN_SKIP) { 3257 1.1 rjs /* advance PeerAckPoint goes forward */ 3258 1.1 rjs asoc->advanced_peer_ack_point = tp1->rec.data.TSN_seq; 3259 1.1 rjs a_adv = tp1; 3260 1.1 rjs /* 3261 1.1 rjs * we don't want to de-queue it here. Just wait for the 3262 1.1 rjs * next peer SACK to come with a new cumTSN and then 3263 1.11 andvar * the chunk will be dropped in the normal fashion. 3264 1.1 rjs */ 3265 1.1 rjs if (tp1->data) { 3266 1.1 rjs sctp_free_bufspace(stcb, asoc, tp1); 3267 1.1 rjs #ifdef SCTP_DEBUG 3268 1.1 rjs if (sctp_debug_on & SCTP_DEBUG_OUTPUT2) { 3269 1.1 rjs printf("--total out:%lu total_mbuf_out:%lu\n", 3270 1.1 rjs (u_long)asoc->total_output_queue_size, 3271 1.1 rjs (u_long)asoc->total_output_mbuf_queue_size); 3272 1.1 rjs } 3273 1.1 rjs #endif 3274 1.1 rjs /* 3275 1.1 rjs * Maybe there should be another notification 3276 1.1 rjs * type 3277 1.1 rjs */ 3278 1.1 rjs sctp_ulp_notify(SCTP_NOTIFY_DG_FAIL, stcb, 3279 1.1 rjs (SCTP_RESPONSE_TO_USER_REQ|SCTP_NOTIFY_DATAGRAM_SENT), 3280 1.1 rjs tp1); 3281 1.1 rjs sctp_m_freem(tp1->data); 3282 1.1 rjs tp1->data = NULL; 3283 1.1 rjs sctp_sowwakeup(stcb->sctp_ep, 3284 1.1 rjs stcb->sctp_socket); 3285 1.1 rjs } 3286 1.1 rjs } else { 3287 1.1 rjs /* If it is still in RESEND we can advance no further */ 3288 1.1 rjs break; 3289 1.1 rjs } 3290 1.1 rjs /* 3291 1.1 rjs * If we hit here we just dumped tp1, move to next 3292 1.1 rjs * tsn on sent queue. 3293 1.1 rjs */ 3294 1.1 rjs tp1 = tp2; 3295 1.1 rjs } 3296 1.1 rjs return (a_adv); 3297 1.1 rjs } 3298 1.1 rjs 3299 1.1 rjs #ifdef SCTP_HIGH_SPEED 3300 1.1 rjs struct sctp_hs_raise_drop { 3301 1.1 rjs int32_t cwnd; 3302 1.1 rjs int32_t increase; 3303 1.1 rjs int32_t drop_percent; 3304 1.1 rjs }; 3305 1.1 rjs 3306 1.1 rjs #define SCTP_HS_TABLE_SIZE 73 3307 1.1 rjs 3308 1.1 rjs struct sctp_hs_raise_drop sctp_cwnd_adjust[SCTP_HS_TABLE_SIZE] = { 3309 1.1 rjs {38,1,50}, /* 0 */ 3310 1.1 rjs {118,2,44}, /* 1 */ 3311 1.1 rjs {221,3,41}, /* 2 */ 3312 1.1 rjs {347,4,38}, /* 3 */ 3313 1.1 rjs {495,5,37}, /* 4 */ 3314 1.1 rjs {663,6,35}, /* 5 */ 3315 1.1 rjs {851,7,34}, /* 6 */ 3316 1.1 rjs {1058,8,33}, /* 7 */ 3317 1.1 rjs {1284,9,32}, /* 8 */ 3318 1.1 rjs {1529,10,31}, /* 9 */ 3319 1.1 rjs {1793,11,30}, /* 10 */ 3320 1.1 rjs {2076,12,29}, /* 11 */ 3321 1.1 rjs {2378,13,28}, /* 12 */ 3322 1.1 rjs {2699,14,28}, /* 13 */ 3323 1.1 rjs {3039,15,27}, /* 14 */ 3324 1.1 rjs {3399,16,27}, /* 15 */ 3325 1.1 rjs {3778,17,26}, /* 16 */ 3326 1.1 rjs {4177,18,26}, /* 17 */ 3327 1.1 rjs {4596,19,25}, /* 18 */ 3328 1.1 rjs {5036,20,25}, /* 19 */ 3329 1.1 rjs {5497,21,24}, /* 20 */ 3330 1.1 rjs {5979,22,24}, /* 21 */ 3331 1.1 rjs {6483,23,23}, /* 22 */ 3332 1.1 rjs {7009,24,23}, /* 23 */ 3333 1.1 rjs {7558,25,22}, /* 24 */ 3334 1.1 rjs {8130,26,22}, /* 25 */ 3335 1.1 rjs {8726,27,22}, /* 26 */ 3336 1.1 rjs {9346,28,21}, /* 27 */ 3337 1.1 rjs {9991,29,21}, /* 28 */ 3338 1.1 rjs {10661,30,21}, /* 29 */ 3339 1.1 rjs {11358,31,20}, /* 30 */ 3340 1.1 rjs {12082,32,20}, /* 31 */ 3341 1.1 rjs {12834,33,20}, /* 32 */ 3342 1.1 rjs {13614,34,19}, /* 33 */ 3343 1.1 rjs {14424,35,19}, /* 34 */ 3344 1.1 rjs {15265,36,19}, /* 35 */ 3345 1.1 rjs {16137,37,19}, /* 36 */ 3346 1.1 rjs {17042,38,18}, /* 37 */ 3347 1.1 rjs {17981,39,18}, /* 38 */ 3348 1.1 rjs {18955,40,18}, /* 39 */ 3349 1.1 rjs {19965,41,17}, /* 40 */ 3350 1.1 rjs {21013,42,17}, /* 41 */ 3351 1.1 rjs {22101,43,17}, /* 42 */ 3352 1.1 rjs {23230,44,17}, /* 43 */ 3353 1.1 rjs {24402,45,16}, /* 44 */ 3354 1.1 rjs {25618,46,16}, /* 45 */ 3355 1.1 rjs {26881,47,16}, /* 46 */ 3356 1.1 rjs {28193,48,16}, /* 47 */ 3357 1.1 rjs {29557,49,15}, /* 48 */ 3358 1.1 rjs {30975,50,15}, /* 49 */ 3359 1.1 rjs {32450,51,15}, /* 50 */ 3360 1.1 rjs {33986,52,15}, /* 51 */ 3361 1.1 rjs {35586,53,14}, /* 52 */ 3362 1.1 rjs {37253,54,14}, /* 53 */ 3363 1.1 rjs {38992,55,14}, /* 54 */ 3364 1.1 rjs {40808,56,14}, /* 55 */ 3365 1.1 rjs {42707,57,13}, /* 56 */ 3366 1.1 rjs {44694,58,13}, /* 57 */ 3367 1.1 rjs {46776,59,13}, /* 58 */ 3368 1.1 rjs {48961,60,13}, /* 59 */ 3369 1.1 rjs {51258,61,13}, /* 60 */ 3370 1.1 rjs {53677,62,12}, /* 61 */ 3371 1.1 rjs {56230,63,12}, /* 62 */ 3372 1.1 rjs {58932,64,12}, /* 63 */ 3373 1.1 rjs {61799,65,12}, /* 64 */ 3374 1.1 rjs {64851,66,11}, /* 65 */ 3375 1.1 rjs {68113,67,11}, /* 66 */ 3376 1.1 rjs {71617,68,11}, /* 67 */ 3377 1.1 rjs {75401,69,10}, /* 68 */ 3378 1.1 rjs {79517,70,10}, /* 69 */ 3379 1.1 rjs {84035,71,10}, /* 70 */ 3380 1.1 rjs {89053,72,10}, /* 71 */ 3381 1.1 rjs {94717,73,9} /* 72 */ 3382 1.1 rjs }; 3383 1.1 rjs 3384 1.1 rjs static void 3385 1.1 rjs sctp_hs_cwnd_increase(struct sctp_nets *net) 3386 1.1 rjs { 3387 1.1 rjs int cur_val, i, indx, incr; 3388 1.1 rjs 3389 1.1 rjs cur_val = net->cwnd >> 10; 3390 1.1 rjs indx = SCTP_HS_TABLE_SIZE - 1; 3391 1.1 rjs 3392 1.1 rjs if (cur_val < sctp_cwnd_adjust[0].cwnd) { 3393 1.1 rjs /* normal mode */ 3394 1.1 rjs if (net->net_ack > net->mtu) { 3395 1.1 rjs net->cwnd += net->mtu; 3396 1.1 rjs #ifdef SCTP_CWND_LOGGING 3397 1.1 rjs sctp_log_cwnd(net, net->mtu, SCTP_CWND_LOG_FROM_SS); 3398 1.1 rjs #endif 3399 1.1 rjs } else { 3400 1.1 rjs net->cwnd += net->net_ack; 3401 1.1 rjs #ifdef SCTP_CWND_LOGGING 3402 1.1 rjs sctp_log_cwnd(net, net->net_ack, SCTP_CWND_LOG_FROM_SS); 3403 1.1 rjs #endif 3404 1.1 rjs } 3405 1.1 rjs } else { 3406 1.1 rjs for (i=net->last_hs_used; i<SCTP_HS_TABLE_SIZE; i++) { 3407 1.1 rjs if (cur_val < sctp_cwnd_adjust[i].cwnd) { 3408 1.1 rjs indx = i; 3409 1.1 rjs break; 3410 1.1 rjs } 3411 1.1 rjs } 3412 1.1 rjs net->last_hs_used = indx; 3413 1.1 rjs incr = ((sctp_cwnd_adjust[indx].increase) << 10); 3414 1.1 rjs net->cwnd += incr; 3415 1.1 rjs #ifdef SCTP_CWND_LOGGING 3416 1.1 rjs sctp_log_cwnd(net, incr, SCTP_CWND_LOG_FROM_SS); 3417 1.1 rjs #endif 3418 1.1 rjs } 3419 1.1 rjs } 3420 1.1 rjs 3421 1.1 rjs static void 3422 1.1 rjs sctp_hs_cwnd_decrease(struct sctp_nets *net) 3423 1.1 rjs { 3424 1.1 rjs int cur_val, i, indx; 3425 1.1 rjs #ifdef SCTP_CWND_LOGGING 3426 1.1 rjs int old_cwnd = net->cwnd; 3427 1.1 rjs #endif 3428 1.1 rjs 3429 1.1 rjs cur_val = net->cwnd >> 10; 3430 1.1 rjs indx = net->last_hs_used; 3431 1.1 rjs if (cur_val < sctp_cwnd_adjust[0].cwnd) { 3432 1.1 rjs /* normal mode */ 3433 1.1 rjs net->ssthresh = net->cwnd / 2; 3434 1.1 rjs if (net->ssthresh < (net->mtu*2)) { 3435 1.1 rjs net->ssthresh = 2 * net->mtu; 3436 1.1 rjs } 3437 1.1 rjs net->cwnd = net->ssthresh; 3438 1.1 rjs #ifdef SCTP_CWND_LOGGING 3439 1.1 rjs sctp_log_cwnd(net, (net->cwnd-old_cwnd), SCTP_CWND_LOG_FROM_FR); 3440 1.1 rjs #endif 3441 1.1 rjs } else { 3442 1.1 rjs /* drop by the proper amount */ 3443 1.1 rjs net->ssthresh = net->cwnd - (int)((net->cwnd / 100) * 3444 1.1 rjs sctp_cwnd_adjust[net->last_hs_used].drop_percent); 3445 1.1 rjs net->cwnd = net->ssthresh; 3446 1.1 rjs /* now where are we */ 3447 1.1 rjs indx = net->last_hs_used; 3448 1.1 rjs cur_val = net->cwnd >> 10; 3449 1.1 rjs /* reset where we are in the table */ 3450 1.1 rjs if (cur_val < sctp_cwnd_adjust[0].cwnd) { 3451 1.1 rjs /* feel out of hs */ 3452 1.1 rjs net->last_hs_used = 0; 3453 1.1 rjs } else { 3454 1.1 rjs for (i = indx; i >= 1; i--) { 3455 1.1 rjs if (cur_val > sctp_cwnd_adjust[i - 1].cwnd) { 3456 1.1 rjs break; 3457 1.1 rjs } 3458 1.1 rjs } 3459 1.1 rjs net->last_hs_used = indx; 3460 1.1 rjs } 3461 1.1 rjs } 3462 1.1 rjs } 3463 1.1 rjs #endif 3464 1.1 rjs 3465 1.1 rjs void 3466 1.1 rjs sctp_handle_sack(struct sctp_sack_chunk *ch, struct sctp_tcb *stcb, 3467 1.1 rjs struct sctp_nets *net_from, int *abort_now) 3468 1.1 rjs { 3469 1.1 rjs struct sctp_association *asoc; 3470 1.1 rjs struct sctp_sack *sack; 3471 1.1 rjs struct sctp_tmit_chunk *tp1, *tp2; 3472 1.1 rjs u_long cum_ack, last_tsn, biggest_tsn_acked, biggest_tsn_newly_acked; 3473 1.1 rjs uint16_t num_seg; 3474 1.1 rjs unsigned int sack_length; 3475 1.1 rjs uint32_t send_s; 3476 1.1 rjs int some_on_streamwheel; 3477 1.1 rjs int strike_enabled = 0, cnt_of_cacc = 0; 3478 1.1 rjs int accum_moved = 0; 3479 1.1 rjs int marking_allowed = 1; 3480 1.1 rjs int will_exit_fast_recovery=0; 3481 1.1 rjs u_int32_t a_rwnd; 3482 1.1 rjs struct sctp_nets *net = NULL; 3483 1.1 rjs int nonce_sum_flag, ecn_seg_sums=0; 3484 1.1 rjs asoc = &stcb->asoc; 3485 1.1 rjs 3486 1.1 rjs /* 3487 1.1 rjs * Handle the incoming sack on data I have been sending. 3488 1.1 rjs */ 3489 1.1 rjs 3490 1.1 rjs /* 3491 1.1 rjs * we take any chance we can to service our queues since we 3492 1.1 rjs * cannot get awoken when the socket is read from :< 3493 1.1 rjs */ 3494 1.1 rjs asoc->overall_error_count = 0; 3495 1.1 rjs 3496 1.1 rjs if (asoc->sent_queue_retran_cnt) { 3497 1.1 rjs #ifdef SCTP_DEBUG 3498 1.1 rjs if (sctp_debug_on & SCTP_DEBUG_INDATA1) { 3499 1.1 rjs printf("Handling SACK for asoc:%p retran:%d\n", 3500 1.1 rjs asoc, asoc->sent_queue_retran_cnt); 3501 1.1 rjs } 3502 1.1 rjs #endif 3503 1.1 rjs } 3504 1.1 rjs 3505 1.1 rjs sctp_service_queues(stcb, asoc, 0); 3506 1.1 rjs 3507 1.1 rjs /* 3508 1.1 rjs * Now perform the actual SACK handling: 3509 1.1 rjs * 1) Verify that it is not an old sack, if so discard. 3510 1.1 rjs * 2) If there is nothing left in the send queue (cum-ack is equal 3511 1.1 rjs * to last acked) then you have a duplicate too, update any rwnd 3512 1.1 rjs * change and verify no timers are running. then return. 3513 1.1 rjs * 3) Process any new consequtive data i.e. cum-ack moved 3514 1.1 rjs * process these first and note that it moved. 3515 1.1 rjs * 4) Process any sack blocks. 3516 1.1 rjs * 5) Drop any acked from the queue. 3517 1.1 rjs * 6) Check for any revoked blocks and mark. 3518 1.1 rjs * 7) Update the cwnd. 3519 1.1 rjs * 8) Nothing left, sync up flightsizes and things, stop all timers 3520 1.1 rjs * and also check for shutdown_pending state. If so then go ahead 3521 1.1 rjs * and send off the shutdown. If in shutdown recv, send off the 3522 1.1 rjs * shutdown-ack and start that timer, Ret. 3523 1.1 rjs * 9) Strike any non-acked things and do FR procedure if needed being 3524 1.1 rjs * sure to set the FR flag. 3525 1.1 rjs * 10) Do pr-sctp procedures. 3526 1.1 rjs * 11) Apply any FR penalties. 3527 1.1 rjs * 12) Assure we will SACK if in shutdown_recv state. 3528 1.1 rjs */ 3529 1.1 rjs 3530 1.1 rjs sack_length = ntohs(ch->ch.chunk_length); 3531 1.1 rjs if (sack_length < sizeof(struct sctp_sack_chunk)) { 3532 1.1 rjs #ifdef SCTP_DEBUG 3533 1.1 rjs if (sctp_debug_on & SCTP_DEBUG_INDATA1) { 3534 1.1 rjs printf("Bad size on sack chunk .. to small\n"); 3535 1.1 rjs } 3536 1.1 rjs #endif 3537 1.1 rjs return; 3538 1.1 rjs } 3539 1.1 rjs /* ECN Nonce */ 3540 1.1 rjs nonce_sum_flag = ch->ch.chunk_flags & SCTP_SACK_NONCE_SUM; 3541 1.1 rjs sack = &ch->sack; 3542 1.1 rjs cum_ack = last_tsn = ntohl(sack->cum_tsn_ack); 3543 1.1 rjs num_seg = ntohs(sack->num_gap_ack_blks); 3544 1.1 rjs 3545 1.1 rjs /* reality check */ 3546 1.1 rjs if (TAILQ_EMPTY(&asoc->send_queue)) { 3547 1.1 rjs send_s = asoc->sending_seq; 3548 1.1 rjs } else { 3549 1.1 rjs tp1 = TAILQ_FIRST(&asoc->send_queue); 3550 1.1 rjs send_s = tp1->rec.data.TSN_seq; 3551 1.1 rjs } 3552 1.1 rjs 3553 1.1 rjs if (sctp_strict_sacks) { 3554 1.1 rjs if (cum_ack == send_s || 3555 1.1 rjs compare_with_wrap(cum_ack, send_s, MAX_TSN)) { 3556 1.1 rjs struct mbuf *oper; 3557 1.1 rjs /* 3558 1.1 rjs * no way, we have not even sent this TSN out yet. 3559 1.1 rjs * Peer is hopelessly messed up with us. 3560 1.1 rjs */ 3561 1.1 rjs hopeless_peer: 3562 1.1 rjs *abort_now = 1; 3563 1.1 rjs /* XXX */ 3564 1.1 rjs MGET(oper, M_DONTWAIT, MT_DATA); 3565 1.1 rjs if (oper) { 3566 1.1 rjs struct sctp_paramhdr *ph; 3567 1.1 rjs u_int32_t *ippp; 3568 1.1 rjs 3569 1.1 rjs oper->m_len = sizeof(struct sctp_paramhdr) + 3570 1.1 rjs sizeof(*ippp); 3571 1.1 rjs ph = mtod(oper, struct sctp_paramhdr *); 3572 1.1 rjs ph->param_type = htons(SCTP_CAUSE_PROTOCOL_VIOLATION); 3573 1.1 rjs ph->param_length = htons(oper->m_len); 3574 1.1 rjs ippp = (u_int32_t *)(ph + 1); 3575 1.1 rjs *ippp = htonl(0x30000002); 3576 1.1 rjs } 3577 1.1 rjs sctp_abort_an_association(stcb->sctp_ep, stcb, SCTP_PEER_FAULTY, oper); 3578 1.1 rjs return; 3579 1.1 rjs } 3580 1.1 rjs } 3581 1.1 rjs /* update the Rwnd of the peer */ 3582 1.1 rjs a_rwnd = (u_int32_t)ntohl(sack->a_rwnd); 3583 1.1 rjs if (asoc->sent_queue_retran_cnt) { 3584 1.1 rjs #ifdef SCTP_DEBUG 3585 1.1 rjs if (sctp_debug_on & SCTP_DEBUG_INDATA1) { 3586 1.1 rjs printf("cum_ack:%lx num_seg:%u last_acked_seq:%x\n", 3587 1.1 rjs cum_ack, (u_int)num_seg, asoc->last_acked_seq); 3588 1.1 rjs } 3589 1.1 rjs #endif 3590 1.1 rjs } 3591 1.1 rjs if (compare_with_wrap(asoc->t3timeout_highest_marked, cum_ack, MAX_TSN)) { 3592 1.1 rjs /* we are not allowed to mark for FR */ 3593 1.1 rjs marking_allowed = 0; 3594 1.1 rjs } 3595 1.1 rjs /**********************/ 3596 1.1 rjs /* 1) check the range */ 3597 1.1 rjs /**********************/ 3598 1.1 rjs if (compare_with_wrap(asoc->last_acked_seq, last_tsn, MAX_TSN)) { 3599 1.1 rjs /* acking something behind */ 3600 1.1 rjs if (asoc->sent_queue_retran_cnt) { 3601 1.1 rjs #ifdef SCTP_DEBUG 3602 1.1 rjs if (sctp_debug_on & SCTP_DEBUG_INDATA1) { 3603 1.1 rjs printf("The cum-ack is behind us\n"); 3604 1.1 rjs } 3605 1.1 rjs #endif 3606 1.1 rjs } 3607 1.1 rjs return; 3608 1.1 rjs } 3609 1.1 rjs 3610 1.1 rjs if (TAILQ_EMPTY(&asoc->sent_queue)) { 3611 1.1 rjs /* nothing left on sendqueue.. consider done */ 3612 1.1 rjs #ifdef SCTP_LOG_RWND 3613 1.1 rjs sctp_log_rwnd_set(SCTP_SET_PEER_RWND_VIA_SACK, 3614 1.1 rjs asoc->peers_rwnd, 0, 0, a_rwnd); 3615 1.1 rjs #endif 3616 1.1 rjs asoc->peers_rwnd = a_rwnd; 3617 1.1 rjs if (asoc->sent_queue_retran_cnt) { 3618 1.1 rjs #ifdef SCTP_DEBUG 3619 1.1 rjs if (sctp_debug_on & SCTP_DEBUG_INDATA1) { 3620 1.1 rjs printf("Huh? retran set but none on queue\n"); 3621 1.1 rjs } 3622 1.1 rjs #endif 3623 1.1 rjs asoc->sent_queue_retran_cnt = 0; 3624 1.1 rjs } 3625 1.1 rjs if (asoc->peers_rwnd < stcb->sctp_ep->sctp_ep.sctp_sws_sender) { 3626 1.1 rjs /* SWS sender side engages */ 3627 1.1 rjs asoc->peers_rwnd = 0; 3628 1.1 rjs } 3629 1.1 rjs /* stop any timers */ 3630 1.1 rjs TAILQ_FOREACH(net, &asoc->nets, sctp_next) { 3631 1.1 rjs sctp_timer_stop(SCTP_TIMER_TYPE_SEND, stcb->sctp_ep, 3632 1.1 rjs stcb, net); 3633 1.1 rjs net->partial_bytes_acked = 0; 3634 1.1 rjs net->flight_size = 0; 3635 1.1 rjs } 3636 1.1 rjs asoc->total_flight = 0; 3637 1.1 rjs asoc->total_flight_count = 0; 3638 1.1 rjs return; 3639 1.1 rjs } 3640 1.1 rjs /* 3641 1.1 rjs * We init netAckSz and netAckSz2 to 0. These are used to track 2 3642 1.1 rjs * things. The total byte count acked is tracked in netAckSz AND 3643 1.1 rjs * netAck2 is used to track the total bytes acked that are un- 3644 1.1 rjs * amibguious and were never retransmitted. We track these on a 3645 1.1 rjs * per destination address basis. 3646 1.1 rjs */ 3647 1.1 rjs TAILQ_FOREACH(net, &asoc->nets, sctp_next) { 3648 1.1 rjs net->prev_cwnd = net->cwnd; 3649 1.1 rjs net->net_ack = 0; 3650 1.1 rjs net->net_ack2 = 0; 3651 1.1 rjs } 3652 1.1 rjs /* process the new consecutive TSN first */ 3653 1.1 rjs tp1 = TAILQ_FIRST(&asoc->sent_queue); 3654 1.1 rjs while (tp1) { 3655 1.1 rjs if (compare_with_wrap(last_tsn, tp1->rec.data.TSN_seq, 3656 1.1 rjs MAX_TSN) || 3657 1.1 rjs last_tsn == tp1->rec.data.TSN_seq) { 3658 1.1 rjs if (tp1->sent != SCTP_DATAGRAM_UNSENT) { 3659 1.1 rjs /* ECN Nonce: Add the nonce to the sender's nonce sum */ 3660 1.1 rjs asoc->nonce_sum_expect_base += tp1->rec.data.ect_nonce; 3661 1.1 rjs accum_moved = 1; 3662 1.1 rjs if (tp1->sent < SCTP_DATAGRAM_ACKED) { 3663 1.1 rjs /* 3664 1.1 rjs * If it is less than ACKED, it is now 3665 1.1 rjs * no-longer in flight. Higher values 3666 1.1 rjs * may occur during marking 3667 1.1 rjs */ 3668 1.1 rjs if ((tp1->whoTo->dest_state & 3669 1.1 rjs SCTP_ADDR_UNCONFIRMED) && 3670 1.1 rjs (tp1->snd_count < 2) ) { 3671 1.1 rjs /* 3672 1.1 rjs * If there was no retran and 3673 1.1 rjs * the address is un-confirmed 3674 1.1 rjs * and we sent there and are 3675 1.1 rjs * now sacked.. its confirmed, 3676 1.1 rjs * mark it so. 3677 1.1 rjs */ 3678 1.1 rjs tp1->whoTo->dest_state &= 3679 1.1 rjs ~SCTP_ADDR_UNCONFIRMED; 3680 1.1 rjs } 3681 1.1 rjs sctp_flight_size_decrease(tp1); 3682 1.1 rjs sctp_total_flight_decrease(stcb, tp1); 3683 1.1 rjs tp1->whoTo->net_ack += tp1->send_size; 3684 1.1 rjs if (tp1->snd_count < 2) { 3685 1.17 andvar /* True non-retransmitted chunk */ 3686 1.1 rjs tp1->whoTo->net_ack2 += 3687 1.1 rjs tp1->send_size; 3688 1.1 rjs /* update RTO too? */ 3689 1.1 rjs if (tp1->do_rtt) { 3690 1.1 rjs tp1->whoTo->RTO = 3691 1.1 rjs sctp_calculate_rto(stcb, 3692 1.1 rjs asoc, tp1->whoTo, 3693 1.1 rjs &tp1->sent_rcv_time); 3694 1.1 rjs tp1->whoTo->rto_pending = 0; 3695 1.1 rjs tp1->do_rtt = 0; 3696 1.1 rjs } 3697 1.1 rjs } 3698 1.1 rjs } 3699 1.1 rjs if (tp1->sent == SCTP_DATAGRAM_RESEND) { 3700 1.1 rjs #ifdef SCTP_DEBUG 3701 1.1 rjs if (sctp_debug_on & SCTP_DEBUG_INDATA3) { 3702 1.1 rjs printf("Hmm. one that is in RESEND that is now ACKED\n"); 3703 1.1 rjs } 3704 1.1 rjs #endif 3705 1.1 rjs sctp_ucount_decr(asoc->sent_queue_retran_cnt); 3706 1.1 rjs #ifdef SCTP_AUDITING_ENABLED 3707 1.1 rjs sctp_audit_log(0xB3, 3708 1.1 rjs (asoc->sent_queue_retran_cnt & 0x000000ff)); 3709 1.1 rjs #endif 3710 1.1 rjs 3711 1.1 rjs } 3712 1.1 rjs tp1->sent = SCTP_DATAGRAM_ACKED; 3713 1.1 rjs } 3714 1.1 rjs } else { 3715 1.1 rjs break; 3716 1.1 rjs } 3717 1.1 rjs tp1 = TAILQ_NEXT(tp1, sctp_next); 3718 1.1 rjs } 3719 1.1 rjs /*******************************************/ 3720 1.1 rjs /* cancel ALL T3-send timer if accum moved */ 3721 1.1 rjs /*******************************************/ 3722 1.1 rjs if (accum_moved) { 3723 1.1 rjs TAILQ_FOREACH(net, &asoc->nets, sctp_next) { 3724 1.1 rjs sctp_timer_stop(SCTP_TIMER_TYPE_SEND, stcb->sctp_ep, 3725 1.1 rjs stcb, net); 3726 1.1 rjs } 3727 1.1 rjs } 3728 1.1 rjs biggest_tsn_newly_acked = biggest_tsn_acked = last_tsn; 3729 1.1 rjs /* always set this up to cum-ack */ 3730 1.1 rjs asoc->this_sack_highest_gap = last_tsn; 3731 1.1 rjs 3732 1.2 christos if (num_seg * sizeof(struct sctp_gap_ack_block) + sizeof(struct sctp_sack_chunk) > sack_length) { 3733 1.1 rjs /* skip corrupt segments */ 3734 1.1 rjs strike_enabled = 0; 3735 1.1 rjs goto skip_segments; 3736 1.1 rjs } 3737 1.1 rjs 3738 1.1 rjs if (num_seg > 0) { 3739 1.1 rjs if (asoc->primary_destination->dest_state & 3740 1.1 rjs SCTP_ADDR_SWITCH_PRIMARY) { 3741 1.1 rjs /* clear the nets CACC flags */ 3742 1.1 rjs TAILQ_FOREACH(net, &asoc->nets, sctp_next) { 3743 1.1 rjs net->cacc_saw_newack = 0; 3744 1.1 rjs } 3745 1.1 rjs } 3746 1.1 rjs /* 3747 1.1 rjs * thisSackHighestGap will increase while handling NEW segments 3748 1.1 rjs */ 3749 1.1 rjs 3750 1.1 rjs sctp_handle_segments(stcb, asoc, ch, last_tsn, 3751 1.1 rjs &biggest_tsn_acked, &biggest_tsn_newly_acked, 3752 1.1 rjs num_seg, &ecn_seg_sums); 3753 1.1 rjs 3754 1.1 rjs if (sctp_strict_sacks) { 3755 1.1 rjs /* validate the biggest_tsn_acked in the gap acks 3756 1.1 rjs * if strict adherence is wanted. 3757 1.1 rjs */ 3758 1.1 rjs if ((biggest_tsn_acked == send_s) || 3759 1.1 rjs (compare_with_wrap(biggest_tsn_acked, send_s, MAX_TSN))) { 3760 1.1 rjs /* 3761 1.1 rjs * peer is either confused or we are under 3762 1.1 rjs * attack. We must abort. 3763 1.1 rjs */ 3764 1.1 rjs goto hopeless_peer; 3765 1.1 rjs } 3766 1.1 rjs } 3767 1.1 rjs 3768 1.1 rjs if (asoc->primary_destination->dest_state & 3769 1.1 rjs SCTP_ADDR_SWITCH_PRIMARY) { 3770 1.1 rjs /* clear the nets CACC flags */ 3771 1.1 rjs TAILQ_FOREACH(net, &asoc->nets, sctp_next) { 3772 1.1 rjs if (net->cacc_saw_newack) { 3773 1.1 rjs cnt_of_cacc++; 3774 1.1 rjs } 3775 1.1 rjs } 3776 1.1 rjs } 3777 1.1 rjs 3778 1.1 rjs } 3779 1.1 rjs 3780 1.1 rjs if (cnt_of_cacc < 2) { 3781 1.1 rjs strike_enabled = 1; 3782 1.1 rjs } else { 3783 1.1 rjs strike_enabled = 0; 3784 1.1 rjs } 3785 1.1 rjs skip_segments: 3786 1.1 rjs /********************************************/ 3787 1.1 rjs /* drop the acked chunks from the sendqueue */ 3788 1.1 rjs /********************************************/ 3789 1.1 rjs asoc->last_acked_seq = cum_ack; 3790 1.1 rjs if (asoc->primary_destination->dest_state & SCTP_ADDR_SWITCH_PRIMARY) { 3791 1.1 rjs if ((cum_ack == asoc->primary_destination->next_tsn_at_change) || 3792 1.1 rjs (compare_with_wrap(cum_ack, 3793 1.1 rjs asoc->primary_destination->next_tsn_at_change, MAX_TSN))) { 3794 1.1 rjs struct sctp_nets *lnet; 3795 1.1 rjs /* Turn off the switch flag for ALL addresses */ 3796 1.1 rjs TAILQ_FOREACH(lnet, &asoc->nets, sctp_next) { 3797 1.1 rjs asoc->primary_destination->dest_state &= 3798 1.1 rjs ~(SCTP_ADDR_SWITCH_PRIMARY|SCTP_ADDR_DOUBLE_SWITCH); 3799 1.1 rjs } 3800 1.1 rjs } 3801 1.1 rjs } 3802 1.1 rjs /* Drag along the t3 timeout point so we don't have a problem at wrap */ 3803 1.1 rjs if (marking_allowed) { 3804 1.1 rjs asoc->t3timeout_highest_marked = cum_ack; 3805 1.1 rjs } 3806 1.1 rjs tp1 = TAILQ_FIRST(&asoc->sent_queue); 3807 1.1 rjs do { 3808 1.1 rjs if (compare_with_wrap(tp1->rec.data.TSN_seq, cum_ack, 3809 1.1 rjs MAX_TSN)) { 3810 1.1 rjs break; 3811 1.1 rjs } 3812 1.1 rjs if (tp1->sent == SCTP_DATAGRAM_UNSENT) { 3813 1.1 rjs /* no more sent on list */ 3814 1.1 rjs break; 3815 1.1 rjs } 3816 1.1 rjs tp2 = TAILQ_NEXT(tp1, sctp_next); 3817 1.1 rjs TAILQ_REMOVE(&asoc->sent_queue, tp1, sctp_next); 3818 1.1 rjs if (tp1->data) { 3819 1.1 rjs sctp_free_bufspace(stcb, asoc, tp1); 3820 1.1 rjs #ifdef SCTP_DEBUG 3821 1.1 rjs if (sctp_debug_on & SCTP_DEBUG_OUTPUT2) { 3822 1.1 rjs printf("--total out:%lu total_mbuf_out:%lu\n", 3823 1.1 rjs (u_long)asoc->total_output_queue_size, 3824 1.1 rjs (u_long)asoc->total_output_mbuf_queue_size); 3825 1.1 rjs } 3826 1.1 rjs #endif 3827 1.1 rjs 3828 1.1 rjs sctp_m_freem(tp1->data); 3829 1.1 rjs if (tp1->flags & SCTP_PR_SCTP_BUFFER) { 3830 1.1 rjs asoc->sent_queue_cnt_removeable--; 3831 1.1 rjs } 3832 1.1 rjs 3833 1.1 rjs } 3834 1.1 rjs tp1->data = NULL; 3835 1.1 rjs asoc->sent_queue_cnt--; 3836 1.1 rjs sctp_free_remote_addr(tp1->whoTo); 3837 1.1 rjs sctppcbinfo.ipi_count_chunk--; 3838 1.1 rjs asoc->chunks_on_out_queue--; 3839 1.1 rjs 3840 1.1 rjs if ((int)sctppcbinfo.ipi_count_chunk < 0) { 3841 1.1 rjs panic("Chunk count is going negative"); 3842 1.1 rjs } 3843 1.1 rjs SCTP_ZONE_FREE(sctppcbinfo.ipi_zone_chunk, tp1); 3844 1.1 rjs sctppcbinfo.ipi_gencnt_chunk++; 3845 1.1 rjs sctp_sowwakeup(stcb->sctp_ep, stcb->sctp_socket); 3846 1.1 rjs tp1 = tp2; 3847 1.1 rjs } while (tp1 != NULL); 3848 1.1 rjs 3849 1.1 rjs 3850 1.1 rjs if (asoc->fast_retran_loss_recovery && accum_moved) { 3851 1.1 rjs if (compare_with_wrap(asoc->last_acked_seq, 3852 1.1 rjs asoc->fast_recovery_tsn, MAX_TSN) || 3853 1.1 rjs asoc->last_acked_seq == asoc->fast_recovery_tsn) { 3854 1.1 rjs /* Setup so we will exit RFC2582 fast recovery */ 3855 1.1 rjs will_exit_fast_recovery = 1; 3856 1.1 rjs } 3857 1.1 rjs } 3858 1.1 rjs 3859 1.1 rjs /* Check for revoked fragments if we hand 3860 1.1 rjs * fragments in a previous segment. If we 3861 1.1 rjs * had no previous fragments we cannot have 3862 1.1 rjs * a revoke issue. 3863 1.1 rjs */ 3864 1.1 rjs if (asoc->saw_sack_with_frags) 3865 1.1 rjs sctp_check_for_revoked(asoc, cum_ack, biggest_tsn_acked); 3866 1.1 rjs 3867 1.1 rjs if (num_seg) 3868 1.1 rjs asoc->saw_sack_with_frags = 1; 3869 1.1 rjs else 3870 1.1 rjs asoc->saw_sack_with_frags = 0; 3871 1.1 rjs 3872 1.1 rjs /******************************/ 3873 1.1 rjs /* update cwnd */ 3874 1.1 rjs /******************************/ 3875 1.1 rjs TAILQ_FOREACH(net, &asoc->nets, sctp_next) { 3876 1.1 rjs /* if nothing was acked on this destination skip it */ 3877 1.1 rjs if (net->net_ack == 0) 3878 1.1 rjs continue; 3879 1.1 rjs 3880 1.1 rjs if (net->net_ack2 > 0) { 3881 1.1 rjs /* 3882 1.1 rjs * Karn's rule applies to clearing error count, 3883 1.1 rjs * this is optional. 3884 1.1 rjs */ 3885 1.1 rjs net->error_count = 0; 3886 1.1 rjs if ((net->dest_state&SCTP_ADDR_NOT_REACHABLE) == 3887 1.1 rjs SCTP_ADDR_NOT_REACHABLE) { 3888 1.1 rjs /* addr came good */ 3889 1.1 rjs net->dest_state &= ~SCTP_ADDR_NOT_REACHABLE; 3890 1.1 rjs net->dest_state |= SCTP_ADDR_REACHABLE; 3891 1.1 rjs sctp_ulp_notify(SCTP_NOTIFY_INTERFACE_UP, stcb, 3892 1.1 rjs SCTP_RECEIVED_SACK, (void *)net); 3893 1.1 rjs /* now was it the primary? if so restore */ 3894 1.1 rjs if (net->dest_state & SCTP_ADDR_WAS_PRIMARY) { 3895 1.1 rjs sctp_set_primary_addr(stcb, (struct sockaddr *)NULL, net); 3896 1.1 rjs } 3897 1.1 rjs } 3898 1.1 rjs } 3899 1.1 rjs 3900 1.1 rjs if (asoc->fast_retran_loss_recovery && 3901 1.1 rjs will_exit_fast_recovery == 0) { 3902 1.1 rjs /* If we are in loss recovery we skip any cwnd update */ 3903 1.1 rjs sctp_pegs[SCTP_CWND_SKIP]++; 3904 1.1 rjs goto skip_cwnd_update; 3905 1.1 rjs } 3906 1.1 rjs if (accum_moved) { 3907 1.1 rjs /* If the cumulative ack moved we can proceed */ 3908 1.1 rjs if (net->cwnd <= net->ssthresh) { 3909 1.1 rjs /* We are in slow start */ 3910 1.1 rjs if (net->flight_size + net->net_ack >= 3911 1.1 rjs net->cwnd ) { 3912 1.1 rjs #ifdef SCTP_HIGH_SPEED 3913 1.1 rjs sctp_hs_cwnd_increase(net); 3914 1.1 rjs #else 3915 1.1 rjs if (net->net_ack > net->mtu) { 3916 1.1 rjs net->cwnd += net->mtu; 3917 1.1 rjs #ifdef SCTP_CWND_LOGGING 3918 1.1 rjs sctp_log_cwnd(net, net->mtu, 3919 1.1 rjs SCTP_CWND_LOG_FROM_SS); 3920 1.1 rjs #endif 3921 1.1 rjs 3922 1.1 rjs } else { 3923 1.1 rjs net->cwnd += net->net_ack; 3924 1.1 rjs #ifdef SCTP_CWND_LOGGING 3925 1.1 rjs sctp_log_cwnd(net, net->net_ack, 3926 1.1 rjs SCTP_CWND_LOG_FROM_SS); 3927 1.1 rjs #endif 3928 1.1 rjs 3929 1.1 rjs } 3930 1.1 rjs #endif 3931 1.1 rjs sctp_pegs[SCTP_CWND_SS]++; 3932 1.1 rjs } else { 3933 1.1 rjs unsigned int dif; 3934 1.1 rjs sctp_pegs[SCTP_CWND_NOUSE_SS]++; 3935 1.1 rjs dif = net->cwnd - (net->flight_size + 3936 1.1 rjs net->net_ack); 3937 1.1 rjs #ifdef SCTP_CWND_LOGGING 3938 1.1 rjs /* sctp_log_cwnd(net, net->net_ack, 3939 1.1 rjs SCTP_CWND_LOG_NOADV_SS);*/ 3940 1.1 rjs #endif 3941 1.1 rjs if (dif > sctp_pegs[SCTP_CWND_DIFF_SA]) { 3942 1.1 rjs sctp_pegs[SCTP_CWND_DIFF_SA] = 3943 1.1 rjs dif; 3944 1.1 rjs sctp_pegs[SCTP_OQS_AT_SS] = 3945 1.1 rjs asoc->total_output_queue_size; 3946 1.1 rjs sctp_pegs[SCTP_SQQ_AT_SS] = 3947 1.1 rjs asoc->sent_queue_cnt; 3948 1.1 rjs sctp_pegs[SCTP_SQC_AT_SS] = 3949 1.1 rjs asoc->send_queue_cnt; 3950 1.1 rjs } 3951 1.1 rjs } 3952 1.1 rjs } else { 3953 1.1 rjs /* We are in congestion avoidance */ 3954 1.1 rjs if (net->flight_size + net->net_ack >= 3955 1.1 rjs net->cwnd) { 3956 1.1 rjs /* 3957 1.1 rjs * add to pba only if we had a cwnd's 3958 1.1 rjs * worth (or so) in flight OR the 3959 1.1 rjs * burst limit was applied. 3960 1.1 rjs */ 3961 1.1 rjs net->partial_bytes_acked += 3962 1.1 rjs net->net_ack; 3963 1.1 rjs 3964 1.1 rjs /* 3965 1.1 rjs * Do we need to increase 3966 1.1 rjs * (if pba is > cwnd)? 3967 1.1 rjs */ 3968 1.1 rjs if (net->partial_bytes_acked >= 3969 1.1 rjs net->cwnd) { 3970 1.1 rjs if (net->cwnd < 3971 1.1 rjs net->partial_bytes_acked) { 3972 1.1 rjs net->partial_bytes_acked -= 3973 1.1 rjs net->cwnd; 3974 1.1 rjs } else { 3975 1.1 rjs net->partial_bytes_acked = 3976 1.1 rjs 0; 3977 1.1 rjs } 3978 1.1 rjs net->cwnd += net->mtu; 3979 1.1 rjs #ifdef SCTP_CWND_LOGGING 3980 1.1 rjs sctp_log_cwnd(net, net->mtu, 3981 1.1 rjs SCTP_CWND_LOG_FROM_CA); 3982 1.1 rjs #endif 3983 1.1 rjs sctp_pegs[SCTP_CWND_CA]++; 3984 1.1 rjs } 3985 1.1 rjs } else { 3986 1.1 rjs unsigned int dif; 3987 1.1 rjs sctp_pegs[SCTP_CWND_NOUSE_CA]++; 3988 1.1 rjs #ifdef SCTP_CWND_LOGGING 3989 1.1 rjs /* sctp_log_cwnd(net, net->net_ack, 3990 1.1 rjs SCTP_CWND_LOG_NOADV_CA); 3991 1.1 rjs */ 3992 1.1 rjs #endif 3993 1.1 rjs dif = net->cwnd - (net->flight_size + 3994 1.1 rjs net->net_ack); 3995 1.1 rjs if (dif > sctp_pegs[SCTP_CWND_DIFF_CA]) { 3996 1.1 rjs sctp_pegs[SCTP_CWND_DIFF_CA] = 3997 1.1 rjs dif; 3998 1.1 rjs sctp_pegs[SCTP_OQS_AT_CA] = 3999 1.1 rjs asoc->total_output_queue_size; 4000 1.1 rjs sctp_pegs[SCTP_SQQ_AT_CA] = 4001 1.1 rjs asoc->sent_queue_cnt; 4002 1.1 rjs sctp_pegs[SCTP_SQC_AT_CA] = 4003 1.1 rjs asoc->send_queue_cnt; 4004 1.1 rjs 4005 1.1 rjs } 4006 1.1 rjs 4007 1.1 rjs } 4008 1.1 rjs } 4009 1.1 rjs } else { 4010 1.1 rjs sctp_pegs[SCTP_CWND_NOCUM]++; 4011 1.1 rjs } 4012 1.1 rjs skip_cwnd_update: 4013 1.1 rjs /* 4014 1.1 rjs * NOW, according to Karn's rule do we need to restore the 4015 1.1 rjs * RTO timer back? Check our net_ack2. If not set then we 4016 1.1 rjs * have a ambiguity.. i.e. all data ack'd was sent to more 4017 1.1 rjs * than one place. 4018 1.1 rjs */ 4019 1.1 rjs 4020 1.1 rjs if (net->net_ack2) { 4021 1.1 rjs /* restore any doubled timers */ 4022 1.1 rjs net->RTO = ((net->lastsa >> 2) + net->lastsv) >> 1; 4023 1.1 rjs if (net->RTO < stcb->asoc.minrto) { 4024 1.1 rjs net->RTO = stcb->asoc.minrto; 4025 1.1 rjs } 4026 1.1 rjs if (net->RTO > stcb->asoc.maxrto) { 4027 1.1 rjs net->RTO = stcb->asoc.maxrto; 4028 1.1 rjs } 4029 1.1 rjs } 4030 1.1 rjs if (net->cwnd > sctp_pegs[SCTP_MAX_CWND]) { 4031 1.1 rjs sctp_pegs[SCTP_MAX_CWND] = net->cwnd; 4032 1.1 rjs } 4033 1.1 rjs } 4034 1.1 rjs /**********************************/ 4035 1.1 rjs /* Now what about shutdown issues */ 4036 1.1 rjs /**********************************/ 4037 1.1 rjs some_on_streamwheel = 0; 4038 1.1 rjs if (!TAILQ_EMPTY(&asoc->out_wheel)) { 4039 1.1 rjs /* Check to see if some data queued */ 4040 1.1 rjs struct sctp_stream_out *outs; 4041 1.1 rjs TAILQ_FOREACH(outs, &asoc->out_wheel, next_spoke) { 4042 1.1 rjs if (!TAILQ_EMPTY(&outs->outqueue)) { 4043 1.1 rjs some_on_streamwheel = 1; 4044 1.1 rjs break; 4045 1.1 rjs } 4046 1.1 rjs } 4047 1.1 rjs } 4048 1.1 rjs if (TAILQ_EMPTY(&asoc->send_queue) && TAILQ_EMPTY(&asoc->sent_queue) && 4049 1.1 rjs some_on_streamwheel == 0) { 4050 1.1 rjs /* nothing left on sendqueue.. consider done */ 4051 1.1 rjs /* stop all timers */ 4052 1.1 rjs #ifdef SCTP_LOG_RWND 4053 1.1 rjs sctp_log_rwnd_set(SCTP_SET_PEER_RWND_VIA_SACK, 4054 1.1 rjs asoc->peers_rwnd, 0, 0, a_rwnd); 4055 1.1 rjs #endif 4056 1.1 rjs asoc->peers_rwnd = a_rwnd; 4057 1.1 rjs if (asoc->peers_rwnd < stcb->sctp_ep->sctp_ep.sctp_sws_sender) { 4058 1.1 rjs /* SWS sender side engages */ 4059 1.1 rjs asoc->peers_rwnd = 0; 4060 1.1 rjs } 4061 1.1 rjs /* stop any timers */ 4062 1.1 rjs TAILQ_FOREACH(net, &asoc->nets, sctp_next) { 4063 1.1 rjs sctp_timer_stop(SCTP_TIMER_TYPE_SEND, stcb->sctp_ep, 4064 1.1 rjs stcb, net); 4065 1.1 rjs net->flight_size = 0; 4066 1.1 rjs net->partial_bytes_acked = 0; 4067 1.1 rjs } 4068 1.1 rjs asoc->total_flight = 0; 4069 1.1 rjs asoc->total_flight_count = 0; 4070 1.1 rjs /* clean up */ 4071 1.1 rjs if (asoc->state & SCTP_STATE_SHUTDOWN_PENDING) { 4072 1.1 rjs asoc->state = SCTP_STATE_SHUTDOWN_SENT; 4073 1.1 rjs #ifdef SCTP_DEBUG 4074 1.1 rjs if (sctp_debug_on & SCTP_DEBUG_OUTPUT4) { 4075 1.1 rjs printf("%s:%d sends a shutdown\n", 4076 1.1 rjs __FILE__, 4077 1.1 rjs __LINE__ 4078 1.1 rjs ); 4079 1.1 rjs } 4080 1.1 rjs #endif 4081 1.1 rjs sctp_send_shutdown(stcb, 4082 1.1 rjs stcb->asoc.primary_destination); 4083 1.1 rjs sctp_timer_start(SCTP_TIMER_TYPE_SHUTDOWN, 4084 1.1 rjs stcb->sctp_ep, stcb, asoc->primary_destination); 4085 1.1 rjs sctp_timer_start(SCTP_TIMER_TYPE_SHUTDOWNGUARD, 4086 1.1 rjs stcb->sctp_ep, stcb, asoc->primary_destination); 4087 1.1 rjs } else if (SCTP_GET_STATE(asoc) == SCTP_STATE_SHUTDOWN_RECEIVED) { 4088 1.1 rjs asoc->state = SCTP_STATE_SHUTDOWN_ACK_SENT; 4089 1.1 rjs 4090 1.1 rjs sctp_send_shutdown_ack(stcb, 4091 1.1 rjs stcb->asoc.primary_destination); 4092 1.1 rjs 4093 1.1 rjs sctp_timer_start(SCTP_TIMER_TYPE_SHUTDOWNACK, 4094 1.1 rjs stcb->sctp_ep, stcb, asoc->primary_destination); 4095 1.1 rjs } 4096 1.1 rjs return; 4097 1.1 rjs } 4098 1.1 rjs /* 4099 1.1 rjs * Now here we are going to recycle net_ack for a different 4100 1.1 rjs * use... HEADS UP. 4101 1.1 rjs */ 4102 1.1 rjs TAILQ_FOREACH(net, &asoc->nets, sctp_next) { 4103 1.1 rjs net->net_ack = 0; 4104 1.1 rjs } 4105 1.1 rjs if ((num_seg > 0) && marking_allowed) { 4106 1.1 rjs sctp_strike_gap_ack_chunks(stcb, asoc, biggest_tsn_acked, 4107 1.1 rjs strike_enabled, biggest_tsn_newly_acked, accum_moved); 4108 1.1 rjs } 4109 1.1 rjs 4110 1.1 rjs /*********************************************/ 4111 1.1 rjs /* Here we perform PR-SCTP procedures */ 4112 1.1 rjs /* (section 4.2) */ 4113 1.1 rjs /*********************************************/ 4114 1.1 rjs /* C1. update advancedPeerAckPoint */ 4115 1.1 rjs if (compare_with_wrap(cum_ack, asoc->advanced_peer_ack_point, MAX_TSN)) { 4116 1.1 rjs asoc->advanced_peer_ack_point = cum_ack; 4117 1.1 rjs } 4118 1.1 rjs /* C2. try to further move advancedPeerAckPoint ahead */ 4119 1.1 rjs if (asoc->peer_supports_prsctp) { 4120 1.1 rjs struct sctp_tmit_chunk *lchk; 4121 1.1 rjs lchk = sctp_try_advance_peer_ack_point(stcb, asoc); 4122 1.1 rjs /* C3. See if we need to send a Fwd-TSN */ 4123 1.1 rjs if (compare_with_wrap(asoc->advanced_peer_ack_point, cum_ack, 4124 1.1 rjs MAX_TSN)) { 4125 1.1 rjs /* 4126 1.1 rjs * ISSUE with ECN, see FWD-TSN processing for notes 4127 1.1 rjs * on issues that will occur when the ECN NONCE stuff 4128 1.1 rjs * is put into SCTP for cross checking. 4129 1.1 rjs */ 4130 1.1 rjs send_forward_tsn(stcb, asoc); 4131 1.1 rjs 4132 1.1 rjs /* ECN Nonce: Disable Nonce Sum check when FWD TSN is sent and store resync tsn*/ 4133 1.1 rjs asoc->nonce_sum_check = 0; 4134 1.1 rjs asoc->nonce_resync_tsn = asoc->advanced_peer_ack_point; 4135 1.1 rjs if (lchk) { 4136 1.1 rjs /* Assure a timer is up */ 4137 1.1 rjs sctp_timer_start(SCTP_TIMER_TYPE_SEND, 4138 1.1 rjs stcb->sctp_ep, stcb, lchk->whoTo); 4139 1.1 rjs } 4140 1.1 rjs } 4141 1.1 rjs } 4142 1.1 rjs TAILQ_FOREACH(net, &asoc->nets, sctp_next) { 4143 1.1 rjs if (asoc->fast_retran_loss_recovery == 0) { 4144 1.1 rjs /* out of a RFC2582 Fast recovery window? */ 4145 1.1 rjs if (net->net_ack > 0) { 4146 1.1 rjs /* 4147 1.1 rjs * per section 7.2.3, are there 4148 1.1 rjs * any destinations that had a fast 4149 1.1 rjs * retransmit to them. If so what we 4150 1.1 rjs * need to do is adjust ssthresh and 4151 1.1 rjs * cwnd. 4152 1.1 rjs */ 4153 1.1 rjs struct sctp_tmit_chunk *lchk; 4154 1.1 rjs #ifdef SCTP_HIGH_SPEED 4155 1.1 rjs sctp_hs_cwnd_decrease(net); 4156 1.1 rjs #else 4157 1.1 rjs #ifdef SCTP_CWND_LOGGING 4158 1.1 rjs int old_cwnd = net->cwnd; 4159 1.1 rjs #endif 4160 1.1 rjs net->ssthresh = net->cwnd / 2; 4161 1.1 rjs if (net->ssthresh < (net->mtu*2)) { 4162 1.1 rjs net->ssthresh = 2 * net->mtu; 4163 1.1 rjs } 4164 1.1 rjs net->cwnd = net->ssthresh; 4165 1.1 rjs #ifdef SCTP_CWND_LOGGING 4166 1.1 rjs sctp_log_cwnd(net, (net->cwnd-old_cwnd), 4167 1.1 rjs SCTP_CWND_LOG_FROM_FR); 4168 1.1 rjs #endif 4169 1.1 rjs #endif 4170 1.1 rjs 4171 1.1 rjs lchk = TAILQ_FIRST(&asoc->send_queue); 4172 1.1 rjs 4173 1.1 rjs net->partial_bytes_acked = 0; 4174 1.1 rjs /* Turn on fast recovery window */ 4175 1.1 rjs asoc->fast_retran_loss_recovery = 1; 4176 1.1 rjs if (lchk == NULL) { 4177 1.1 rjs /* Mark end of the window */ 4178 1.1 rjs asoc->fast_recovery_tsn = asoc->sending_seq - 1; 4179 1.1 rjs } else { 4180 1.1 rjs asoc->fast_recovery_tsn = lchk->rec.data.TSN_seq - 1; 4181 1.1 rjs } 4182 1.1 rjs 4183 1.1 rjs 4184 1.1 rjs /* Disable Nonce Sum Checking and store the resync tsn*/ 4185 1.1 rjs asoc->nonce_sum_check = 0; 4186 1.1 rjs asoc->nonce_resync_tsn = asoc->fast_recovery_tsn + 1; 4187 1.1 rjs 4188 1.1 rjs sctp_timer_stop(SCTP_TIMER_TYPE_SEND, 4189 1.1 rjs stcb->sctp_ep, stcb, net); 4190 1.1 rjs sctp_timer_start(SCTP_TIMER_TYPE_SEND, 4191 1.1 rjs stcb->sctp_ep, stcb, net); 4192 1.1 rjs } 4193 1.1 rjs } else if (net->net_ack > 0) { 4194 1.1 rjs /* 4195 1.1 rjs * Mark a peg that we WOULD have done a cwnd reduction 4196 1.1 rjs * but RFC2582 prevented this action. 4197 1.1 rjs */ 4198 1.1 rjs sctp_pegs[SCTP_FR_INAWINDOW]++; 4199 1.1 rjs } 4200 1.1 rjs } 4201 1.1 rjs 4202 1.1 rjs 4203 1.1 rjs /****************************************************************** 4204 1.1 rjs * Here we do the stuff with ECN Nonce checking. 4205 1.1 rjs * We basically check to see if the nonce sum flag was incorrect 4206 1.1 rjs * or if resynchronization needs to be done. Also if we catch a 4207 1.1 rjs * misbehaving receiver we give him the kick. 4208 1.1 rjs ******************************************************************/ 4209 1.1 rjs 4210 1.1 rjs if (asoc->ecn_nonce_allowed) { 4211 1.1 rjs if (asoc->nonce_sum_check) { 4212 1.1 rjs if (nonce_sum_flag != ((asoc->nonce_sum_expect_base + ecn_seg_sums) & SCTP_SACK_NONCE_SUM)) { 4213 1.1 rjs if (asoc->nonce_wait_for_ecne == 0) { 4214 1.1 rjs struct sctp_tmit_chunk *lchk; 4215 1.1 rjs lchk = TAILQ_FIRST(&asoc->send_queue); 4216 1.1 rjs asoc->nonce_wait_for_ecne = 1; 4217 1.1 rjs if (lchk) { 4218 1.1 rjs asoc->nonce_wait_tsn = lchk->rec.data.TSN_seq; 4219 1.1 rjs } else { 4220 1.1 rjs asoc->nonce_wait_tsn = asoc->sending_seq; 4221 1.1 rjs } 4222 1.1 rjs } else { 4223 1.1 rjs if (compare_with_wrap(asoc->last_acked_seq, asoc->nonce_wait_tsn, MAX_TSN) || 4224 1.1 rjs (asoc->last_acked_seq == asoc->nonce_wait_tsn)) { 4225 1.1 rjs /* Misbehaving peer. We need to react to this guy */ 4226 1.1 rjs printf("Mis-behaving peer detected\n"); 4227 1.1 rjs asoc->ecn_allowed = 0; 4228 1.1 rjs asoc->ecn_nonce_allowed = 0; 4229 1.1 rjs } 4230 1.1 rjs } 4231 1.1 rjs } 4232 1.1 rjs } else { 4233 1.1 rjs /* See if Resynchronization Possible */ 4234 1.1 rjs if (compare_with_wrap(asoc->last_acked_seq, asoc->nonce_resync_tsn, MAX_TSN)) { 4235 1.1 rjs asoc->nonce_sum_check = 1; 4236 1.1 rjs /* now we must calculate what the base 4237 1.1 rjs * is. We do this based on two things, we know 4238 1.1 rjs * the total's for all the segments gap-acked 4239 1.1 rjs * in the SACK, its stored in ecn_seg_sums. 4240 1.1 rjs * We also know the SACK's nonce sum, its 4241 1.1 rjs * in nonce_sum_flag. So we can build a truth 4242 1.1 rjs * table to back-calculate the new value of asoc->nonce_sum_expect_base: 4243 1.1 rjs * 4244 1.1 rjs * SACK-flag-Value Seg-Sums Base 4245 1.1 rjs * 0 0 0 4246 1.1 rjs * 1 0 1 4247 1.1 rjs * 0 1 1 4248 1.1 rjs * 1 1 0 4249 1.1 rjs */ 4250 1.1 rjs asoc->nonce_sum_expect_base = (ecn_seg_sums ^ nonce_sum_flag) & SCTP_SACK_NONCE_SUM; 4251 1.1 rjs } 4252 1.1 rjs } 4253 1.1 rjs } 4254 1.1 rjs /* Now are we exiting loss recovery ? */ 4255 1.1 rjs if (will_exit_fast_recovery) { 4256 1.1 rjs /* Ok, we must exit fast recovery */ 4257 1.1 rjs asoc->fast_retran_loss_recovery = 0; 4258 1.1 rjs } 4259 1.1 rjs if ((asoc->sat_t3_loss_recovery) && 4260 1.1 rjs ((compare_with_wrap(asoc->last_acked_seq, asoc->sat_t3_recovery_tsn, 4261 1.1 rjs MAX_TSN) || 4262 1.1 rjs (asoc->last_acked_seq == asoc->sat_t3_recovery_tsn)))) { 4263 1.1 rjs /* end satellite t3 loss recovery */ 4264 1.1 rjs asoc->sat_t3_loss_recovery = 0; 4265 1.1 rjs } 4266 1.1 rjs /* Adjust and set the new rwnd value */ 4267 1.1 rjs #ifdef SCTP_LOG_RWND 4268 1.1 rjs sctp_log_rwnd_set(SCTP_SET_PEER_RWND_VIA_SACK, 4269 1.1 rjs asoc->peers_rwnd, asoc->total_flight, (asoc->sent_queue_cnt * sctp_peer_chunk_oh), a_rwnd); 4270 1.1 rjs #endif 4271 1.1 rjs 4272 1.1 rjs asoc->peers_rwnd = sctp_sbspace_sub(a_rwnd, 4273 1.1 rjs (u_int32_t)(asoc->total_flight + (asoc->sent_queue_cnt * sctp_peer_chunk_oh))); 4274 1.1 rjs if (asoc->peers_rwnd < stcb->sctp_ep->sctp_ep.sctp_sws_sender) { 4275 1.1 rjs /* SWS sender side engages */ 4276 1.1 rjs asoc->peers_rwnd = 0; 4277 1.1 rjs } 4278 1.1 rjs /* 4279 1.1 rjs * Now we must setup so we have a timer up for anyone with 4280 1.1 rjs * outstanding data. 4281 1.1 rjs */ 4282 1.1 rjs TAILQ_FOREACH(net, &asoc->nets, sctp_next) { 4283 1.1 rjs struct sctp_tmit_chunk *chk; 4284 1.1 rjs TAILQ_FOREACH(chk, &asoc->sent_queue, sctp_next) { 4285 1.1 rjs if (chk->whoTo == net && 4286 1.1 rjs (chk->sent < SCTP_DATAGRAM_ACKED || 4287 1.1 rjs chk->sent == SCTP_FORWARD_TSN_SKIP)) { 4288 1.1 rjs /* 4289 1.1 rjs * Not ack'ed and still outstanding to this 4290 1.1 rjs * destination or marked and must be 4291 1.1 rjs * sacked after fwd-tsn sent. 4292 1.1 rjs */ 4293 1.1 rjs sctp_timer_start(SCTP_TIMER_TYPE_SEND, 4294 1.1 rjs stcb->sctp_ep, stcb, net); 4295 1.1 rjs break; 4296 1.1 rjs } 4297 1.1 rjs } 4298 1.1 rjs } 4299 1.1 rjs } 4300 1.1 rjs 4301 1.1 rjs void 4302 1.1 rjs sctp_update_acked(struct sctp_tcb *stcb, struct sctp_shutdown_chunk *cp, 4303 1.1 rjs struct sctp_nets *netp, int *abort_flag) 4304 1.1 rjs { 4305 1.1 rjs /* Mutate a shutdown into a SACK */ 4306 1.1 rjs struct sctp_sack_chunk sack; 4307 1.1 rjs 4308 1.1 rjs /* Copy cum-ack */ 4309 1.1 rjs sack.sack.cum_tsn_ack = cp->cumulative_tsn_ack; 4310 1.1 rjs /* Arrange so a_rwnd does NOT change */ 4311 1.1 rjs sack.ch.chunk_type = SCTP_SELECTIVE_ACK; 4312 1.1 rjs sack.ch.chunk_flags = 0; 4313 1.1 rjs sack.ch.chunk_length = ntohs(sizeof(struct sctp_sack_chunk)); 4314 1.1 rjs sack.sack.a_rwnd = 4315 1.1 rjs htonl(stcb->asoc.peers_rwnd + stcb->asoc.total_flight); 4316 1.1 rjs /* 4317 1.1 rjs * no gaps in this one. This may cause a temporal view to reneging, 4318 1.1 rjs * but hopefully the second chunk is a true SACK in the packet and 4319 1.1 rjs * will correct this view. One will come soon after no matter what 4320 1.1 rjs * to fix this. 4321 1.1 rjs */ 4322 1.1 rjs sack.sack.num_gap_ack_blks = 0; 4323 1.1 rjs sack.sack.num_dup_tsns = 0; 4324 1.1 rjs /* Now call the SACK processor */ 4325 1.1 rjs sctp_handle_sack(&sack, stcb, netp, abort_flag); 4326 1.1 rjs } 4327 1.1 rjs 4328 1.1 rjs static void 4329 1.1 rjs sctp_kick_prsctp_reorder_queue(struct sctp_tcb *stcb, 4330 1.1 rjs struct sctp_stream_in *strmin) 4331 1.1 rjs { 4332 1.1 rjs struct sctp_tmit_chunk *chk, *nchk; 4333 1.1 rjs struct sctp_association *asoc; 4334 1.1 rjs int tt; 4335 1.1 rjs 4336 1.1 rjs asoc = &stcb->asoc; 4337 1.1 rjs tt = strmin->last_sequence_delivered; 4338 1.1 rjs /* 4339 1.1 rjs * First deliver anything prior to and including the stream no that 4340 1.1 rjs * came in 4341 1.1 rjs */ 4342 1.1 rjs chk = TAILQ_FIRST(&strmin->inqueue); 4343 1.1 rjs while (chk) { 4344 1.1 rjs nchk = TAILQ_NEXT(chk, sctp_next); 4345 1.1 rjs if (compare_with_wrap(tt, chk->rec.data.stream_seq, MAX_SEQ) || 4346 1.1 rjs (tt == chk->rec.data.stream_seq)) { 4347 1.1 rjs /* this is deliverable now */ 4348 1.1 rjs TAILQ_REMOVE(&strmin->inqueue, chk, sctp_next); 4349 1.1 rjs /* subtract pending on streams */ 4350 1.1 rjs asoc->size_on_all_streams -= chk->send_size; 4351 1.1 rjs asoc->cnt_on_all_streams--; 4352 1.1 rjs /* deliver it to at least the delivery-q */ 4353 1.1 rjs sctp_deliver_data(stcb, &stcb->asoc, chk, 0); 4354 1.1 rjs } else { 4355 1.1 rjs /* no more delivery now. */ 4356 1.1 rjs break; 4357 1.1 rjs } 4358 1.1 rjs chk = nchk; 4359 1.1 rjs } 4360 1.1 rjs /* 4361 1.1 rjs * now we must deliver things in queue the normal way if any 4362 1.1 rjs * are now ready. 4363 1.1 rjs */ 4364 1.1 rjs tt = strmin->last_sequence_delivered + 1; 4365 1.1 rjs chk = TAILQ_FIRST(&strmin->inqueue); 4366 1.1 rjs while (chk) { 4367 1.1 rjs nchk = TAILQ_NEXT(chk, sctp_next); 4368 1.1 rjs if (tt == chk->rec.data.stream_seq) { 4369 1.1 rjs /* this is deliverable now */ 4370 1.1 rjs TAILQ_REMOVE(&strmin->inqueue, chk, sctp_next); 4371 1.1 rjs /* subtract pending on streams */ 4372 1.1 rjs asoc->size_on_all_streams -= chk->send_size; 4373 1.1 rjs asoc->cnt_on_all_streams--; 4374 1.1 rjs /* deliver it to at least the delivery-q */ 4375 1.1 rjs strmin->last_sequence_delivered = 4376 1.1 rjs chk->rec.data.stream_seq; 4377 1.1 rjs sctp_deliver_data(stcb, &stcb->asoc, chk, 0); 4378 1.1 rjs tt = strmin->last_sequence_delivered + 1; 4379 1.1 rjs } else { 4380 1.1 rjs break; 4381 1.1 rjs } 4382 1.1 rjs chk = nchk; 4383 1.1 rjs } 4384 1.1 rjs 4385 1.1 rjs } 4386 1.1 rjs 4387 1.1 rjs void 4388 1.1 rjs sctp_handle_forward_tsn(struct sctp_tcb *stcb, 4389 1.1 rjs struct sctp_forward_tsn_chunk *fwd, int *abort_flag) 4390 1.1 rjs { 4391 1.1 rjs /* 4392 1.1 rjs * ISSUES that MUST be fixed for ECN! When we are the 4393 1.1 rjs * sender of the forward TSN, when the SACK comes back 4394 1.1 rjs * that acknowledges the FWD-TSN we must reset the 4395 1.1 rjs * NONCE sum to match correctly. This will get quite 4396 1.1 rjs * tricky since we may have sent more data interveneing and 4397 1.1 rjs * must carefully account for what the SACK says on the 4398 1.1 rjs * nonce and any gaps that are reported. This work 4399 1.1 rjs * will NOT be done here, but I note it here since 4400 1.1 rjs * it is really related to PR-SCTP and FWD-TSN's 4401 1.1 rjs */ 4402 1.1 rjs 4403 1.1 rjs /* The pr-sctp fwd tsn */ 4404 1.1 rjs /* 4405 1.1 rjs * here we will perform all the data receiver side steps for 4406 1.1 rjs * processing FwdTSN, as required in by pr-sctp draft: 4407 1.1 rjs * 4408 1.1 rjs * Assume we get FwdTSN(x): 4409 1.1 rjs * 4410 1.1 rjs * 1) update local cumTSN to x 4411 1.1 rjs * 2) try to further advance cumTSN to x + others we have 4412 1.1 rjs * 3) examine and update re-ordering queue on pr-in-streams 4413 1.1 rjs * 4) clean up re-assembly queue 4414 1.1 rjs * 5) Send a sack to report where we are. 4415 1.1 rjs */ 4416 1.1 rjs struct sctp_strseq *stseq; 4417 1.1 rjs struct sctp_association *asoc; 4418 1.1 rjs u_int32_t new_cum_tsn, gap, back_out_htsn; 4419 1.1 rjs unsigned int i, cnt_gone, fwd_sz, cumack_set_flag, m_size; 4420 1.1 rjs struct sctp_stream_in *strm; 4421 1.1 rjs struct sctp_tmit_chunk *chk, *at; 4422 1.1 rjs 4423 1.1 rjs cumack_set_flag = 0; 4424 1.1 rjs asoc = &stcb->asoc; 4425 1.1 rjs cnt_gone = 0; 4426 1.1 rjs if ((fwd_sz = ntohs(fwd->ch.chunk_length)) < sizeof(struct sctp_forward_tsn_chunk)) { 4427 1.1 rjs #ifdef SCTP_DEBUG 4428 1.1 rjs if (sctp_debug_on & SCTP_DEBUG_INDATA1) { 4429 1.1 rjs printf("Bad size too small/big fwd-tsn\n"); 4430 1.1 rjs } 4431 1.1 rjs #endif 4432 1.1 rjs return; 4433 1.1 rjs } 4434 1.1 rjs m_size = (stcb->asoc.mapping_array_size << 3); 4435 1.1 rjs /*************************************************************/ 4436 1.1 rjs /* 1. Here we update local cumTSN and shift the bitmap array */ 4437 1.1 rjs /*************************************************************/ 4438 1.1 rjs new_cum_tsn = ntohl(fwd->new_cumulative_tsn); 4439 1.1 rjs 4440 1.1 rjs if (compare_with_wrap(asoc->cumulative_tsn, new_cum_tsn, MAX_TSN) || 4441 1.1 rjs asoc->cumulative_tsn == new_cum_tsn) { 4442 1.1 rjs /* Already got there ... */ 4443 1.1 rjs return; 4444 1.1 rjs } 4445 1.1 rjs 4446 1.1 rjs back_out_htsn = asoc->highest_tsn_inside_map; 4447 1.1 rjs if (compare_with_wrap(new_cum_tsn, asoc->highest_tsn_inside_map, 4448 1.1 rjs MAX_TSN)) { 4449 1.1 rjs asoc->highest_tsn_inside_map = new_cum_tsn; 4450 1.1 rjs #ifdef SCTP_MAP_LOGGING 4451 1.1 rjs sctp_log_map(0, 0, asoc->highest_tsn_inside_map, SCTP_MAP_SLIDE_RESULT); 4452 1.1 rjs #endif 4453 1.1 rjs } 4454 1.1 rjs /* 4455 1.1 rjs * now we know the new TSN is more advanced, let's find the 4456 1.1 rjs * actual gap 4457 1.1 rjs */ 4458 1.1 rjs if ((compare_with_wrap(new_cum_tsn, asoc->mapping_array_base_tsn, 4459 1.1 rjs MAX_TSN)) || 4460 1.1 rjs (new_cum_tsn == asoc->mapping_array_base_tsn)) { 4461 1.1 rjs gap = new_cum_tsn - asoc->mapping_array_base_tsn; 4462 1.1 rjs } else { 4463 1.1 rjs /* try to prevent underflow here */ 4464 1.1 rjs gap = new_cum_tsn + (MAX_TSN - asoc->mapping_array_base_tsn) + 1; 4465 1.1 rjs } 4466 1.1 rjs 4467 1.1 rjs if (gap >= m_size) { 4468 1.1 rjs asoc->highest_tsn_inside_map = back_out_htsn; 4469 1.1 rjs if ((long)gap > sctp_sbspace(&stcb->sctp_socket->so_rcv)) { 4470 1.1 rjs /* 4471 1.1 rjs * out of range (of single byte chunks in the rwnd I 4472 1.1 rjs * give out) 4473 1.1 rjs * too questionable. better to drop it silently 4474 1.1 rjs */ 4475 1.1 rjs return; 4476 1.1 rjs } 4477 1.1 rjs if (asoc->highest_tsn_inside_map > 4478 1.1 rjs asoc->mapping_array_base_tsn) { 4479 1.1 rjs gap = asoc->highest_tsn_inside_map - 4480 1.1 rjs asoc->mapping_array_base_tsn; 4481 1.1 rjs } else { 4482 1.1 rjs gap = asoc->highest_tsn_inside_map + 4483 1.1 rjs (MAX_TSN - asoc->mapping_array_base_tsn) + 1; 4484 1.1 rjs } 4485 1.1 rjs cumack_set_flag = 1; 4486 1.1 rjs } 4487 1.1 rjs for (i = 0; i <= gap; i++) { 4488 1.1 rjs SCTP_SET_TSN_PRESENT(asoc->mapping_array, i); 4489 1.1 rjs } 4490 1.1 rjs /* 4491 1.1 rjs * Now after marking all, slide thing forward but no 4492 1.1 rjs * sack please. 4493 1.1 rjs */ 4494 1.1 rjs sctp_sack_check(stcb, 0, 0, abort_flag); 4495 1.1 rjs if (*abort_flag) 4496 1.1 rjs return; 4497 1.1 rjs 4498 1.1 rjs if (cumack_set_flag) { 4499 1.1 rjs /* 4500 1.1 rjs * fwd-tsn went outside my gap array - not a 4501 1.9 andvar * common occurrence. Do the same thing we 4502 1.1 rjs * do when a cookie-echo arrives. 4503 1.1 rjs */ 4504 1.1 rjs asoc->highest_tsn_inside_map = new_cum_tsn - 1; 4505 1.1 rjs asoc->mapping_array_base_tsn = new_cum_tsn; 4506 1.1 rjs asoc->cumulative_tsn = asoc->highest_tsn_inside_map; 4507 1.1 rjs #ifdef SCTP_MAP_LOGGING 4508 1.1 rjs sctp_log_map(0, 3, asoc->highest_tsn_inside_map, SCTP_MAP_SLIDE_RESULT); 4509 1.1 rjs #endif 4510 1.1 rjs asoc->last_echo_tsn = asoc->highest_tsn_inside_map; 4511 1.1 rjs } 4512 1.1 rjs /*************************************************************/ 4513 1.1 rjs /* 2. Clear up re-assembly queue */ 4514 1.1 rjs /*************************************************************/ 4515 1.1 rjs 4516 1.1 rjs /* 4517 1.1 rjs * First service it if pd-api is up, just in case we can 4518 1.1 rjs * progress it forward 4519 1.1 rjs */ 4520 1.1 rjs if (asoc->fragmented_delivery_inprogress) { 4521 1.1 rjs sctp_service_reassembly(stcb, asoc, 0); 4522 1.1 rjs } 4523 1.1 rjs if (!TAILQ_EMPTY(&asoc->reasmqueue)) { 4524 1.1 rjs /* For each one on here see if we need to toss it */ 4525 1.1 rjs /* 4526 1.1 rjs * For now large messages held on the reasmqueue that are 4527 1.1 rjs * complete will be tossed too. We could in theory do more 4528 1.1 rjs * work to spin through and stop after dumping one msg 4529 1.1 rjs * aka seeing the start of a new msg at the head, and call 4530 1.1 rjs * the delivery function... to see if it can be delivered... 4531 1.1 rjs * But for now we just dump everything on the queue. 4532 1.1 rjs */ 4533 1.1 rjs chk = TAILQ_FIRST(&asoc->reasmqueue); 4534 1.1 rjs while (chk) { 4535 1.1 rjs at = TAILQ_NEXT(chk, sctp_next); 4536 1.1 rjs if (compare_with_wrap(asoc->cumulative_tsn, 4537 1.1 rjs chk->rec.data.TSN_seq, MAX_TSN) || 4538 1.1 rjs asoc->cumulative_tsn == chk->rec.data.TSN_seq) { 4539 1.1 rjs /* It needs to be tossed */ 4540 1.1 rjs TAILQ_REMOVE(&asoc->reasmqueue, chk, sctp_next); 4541 1.1 rjs if (compare_with_wrap(chk->rec.data.TSN_seq, 4542 1.1 rjs asoc->tsn_last_delivered, MAX_TSN)) { 4543 1.1 rjs asoc->tsn_last_delivered = 4544 1.1 rjs chk->rec.data.TSN_seq; 4545 1.1 rjs asoc->str_of_pdapi = 4546 1.1 rjs chk->rec.data.stream_number; 4547 1.1 rjs asoc->ssn_of_pdapi = 4548 1.1 rjs chk->rec.data.stream_seq; 4549 1.1 rjs asoc->fragment_flags = 4550 1.1 rjs chk->rec.data.rcv_flags; 4551 1.1 rjs } 4552 1.1 rjs asoc->size_on_reasm_queue -= chk->send_size; 4553 1.1 rjs asoc->cnt_on_reasm_queue--; 4554 1.1 rjs cnt_gone++; 4555 1.1 rjs 4556 1.1 rjs /* Clear up any stream problem */ 4557 1.1 rjs if ((chk->rec.data.rcv_flags & SCTP_DATA_UNORDERED) != 4558 1.1 rjs SCTP_DATA_UNORDERED && 4559 1.1 rjs (compare_with_wrap(chk->rec.data.stream_seq, 4560 1.1 rjs asoc->strmin[chk->rec.data.stream_number].last_sequence_delivered, 4561 1.1 rjs MAX_SEQ))) { 4562 1.1 rjs /* 4563 1.1 rjs * We must dump forward this streams 4564 1.1 rjs * sequence number if the chunk is not 4565 1.1 rjs * unordered that is being skipped. 4566 1.1 rjs * There is a chance that if the peer 4567 1.1 rjs * does not include the last fragment 4568 1.1 rjs * in its FWD-TSN we WILL have a problem 4569 1.1 rjs * here since you would have a partial 4570 1.1 rjs * chunk in queue that may not be 4571 1.1 rjs * deliverable. 4572 1.1 rjs * Also if a Partial delivery API as 4573 1.1 rjs * started the user may get a partial 4574 1.1 rjs * chunk. The next read returning a new 4575 1.1 rjs * chunk... really ugly but I see no way 4576 1.1 rjs * around it! Maybe a notify?? 4577 1.1 rjs */ 4578 1.1 rjs asoc->strmin[chk->rec.data.stream_number].last_sequence_delivered = 4579 1.1 rjs chk->rec.data.stream_seq; 4580 1.1 rjs } 4581 1.16 rin sctp_m_freem(chk->data); 4582 1.16 rin chk->data = NULL; 4583 1.1 rjs sctp_free_remote_addr(chk->whoTo); 4584 1.1 rjs SCTP_ZONE_FREE(sctppcbinfo.ipi_zone_chunk, chk); 4585 1.1 rjs sctppcbinfo.ipi_count_chunk--; 4586 1.1 rjs if ((int)sctppcbinfo.ipi_count_chunk < 0) { 4587 1.1 rjs panic("Chunk count is negative"); 4588 1.1 rjs } 4589 1.1 rjs sctppcbinfo.ipi_gencnt_chunk++; 4590 1.1 rjs } else { 4591 1.1 rjs /* 4592 1.1 rjs * Ok we have gone beyond the end of the 4593 1.1 rjs * fwd-tsn's mark. Some checks... 4594 1.1 rjs */ 4595 1.1 rjs if ((asoc->fragmented_delivery_inprogress) && 4596 1.1 rjs (chk->rec.data.rcv_flags & SCTP_DATA_FIRST_FRAG)) { 4597 1.1 rjs /* Special case PD-API is up and what we fwd-tsn' 4598 1.1 rjs * over includes one that had the LAST_FRAG. We 4599 1.1 rjs * no longer need to do the PD-API. 4600 1.1 rjs */ 4601 1.1 rjs asoc->fragmented_delivery_inprogress = 0; 4602 1.1 rjs sctp_ulp_notify(SCTP_NOTIFY_PARTIAL_DELVIERY_INDICATION, 4603 1.1 rjs stcb, SCTP_PARTIAL_DELIVERY_ABORTED, (void *)NULL); 4604 1.1 rjs 4605 1.1 rjs } 4606 1.1 rjs break; 4607 1.1 rjs } 4608 1.1 rjs chk = at; 4609 1.1 rjs } 4610 1.1 rjs } 4611 1.1 rjs if (asoc->fragmented_delivery_inprogress) { 4612 1.1 rjs /* 4613 1.1 rjs * Ok we removed cnt_gone chunks in the PD-API queue that 4614 1.1 rjs * were being delivered. So now we must turn off the 4615 1.1 rjs * flag. 4616 1.1 rjs */ 4617 1.1 rjs sctp_ulp_notify(SCTP_NOTIFY_PARTIAL_DELVIERY_INDICATION, 4618 1.1 rjs stcb, SCTP_PARTIAL_DELIVERY_ABORTED, (void *)NULL); 4619 1.1 rjs asoc->fragmented_delivery_inprogress = 0; 4620 1.1 rjs } 4621 1.1 rjs /*************************************************************/ 4622 1.1 rjs /* 3. Update the PR-stream re-ordering queues */ 4623 1.1 rjs /*************************************************************/ 4624 1.1 rjs stseq = (struct sctp_strseq *)((vaddr_t)fwd + sizeof(*fwd)); 4625 1.1 rjs fwd_sz -= sizeof(*fwd); 4626 1.1 rjs { 4627 1.1 rjs /* New method. */ 4628 1.1 rjs int num_str; 4629 1.1 rjs num_str = fwd_sz/sizeof(struct sctp_strseq); 4630 1.1 rjs #ifdef SCTP_DEBUG 4631 1.1 rjs if (sctp_debug_on & SCTP_DEBUG_INDATA1) { 4632 1.1 rjs printf("Using NEW method, %d strseq's reported in FWD-TSN\n", 4633 1.1 rjs num_str); 4634 1.1 rjs } 4635 1.1 rjs #endif 4636 1.1 rjs for (i = 0; i < num_str; i++) { 4637 1.1 rjs u_int16_t st; 4638 1.1 rjs #if 0 4639 1.1 rjs unsigned char *xx; 4640 1.1 rjs /* Convert */ 4641 1.1 rjs xx = (unsigned char *)&stseq[i]; 4642 1.1 rjs #endif 4643 1.1 rjs st = ntohs(stseq[i].stream); 4644 1.1 rjs stseq[i].stream = st; 4645 1.1 rjs st = ntohs(stseq[i].sequence); 4646 1.1 rjs stseq[i].sequence = st; 4647 1.1 rjs /* now process */ 4648 1.1 rjs if (stseq[i].stream > asoc->streamincnt) { 4649 1.1 rjs #ifdef SCTP_DEBUG 4650 1.1 rjs if (sctp_debug_on & SCTP_DEBUG_INDATA1) { 4651 1.1 rjs printf("Bogus stream number %d " 4652 1.1 rjs "streamincnt is %d\n", 4653 1.1 rjs stseq[i].stream, asoc->streamincnt); 4654 1.1 rjs } 4655 1.1 rjs #endif 4656 1.1 rjs /* 4657 1.1 rjs * It is arguable if we should continue. Since 4658 1.1 rjs * the peer sent bogus stream info we may be in 4659 1.1 rjs * deep trouble.. 4660 1.1 rjs * a return may be a better choice? 4661 1.1 rjs */ 4662 1.1 rjs continue; 4663 1.1 rjs } 4664 1.1 rjs strm = &asoc->strmin[stseq[i].stream]; 4665 1.1 rjs if (compare_with_wrap(stseq[i].sequence, 4666 1.1 rjs strm->last_sequence_delivered, MAX_SEQ)) { 4667 1.1 rjs /* Update the sequence number */ 4668 1.1 rjs strm->last_sequence_delivered = 4669 1.1 rjs stseq[i].sequence; 4670 1.1 rjs } 4671 1.1 rjs /* now kick the stream the new way */ 4672 1.1 rjs sctp_kick_prsctp_reorder_queue(stcb, strm); 4673 1.1 rjs } 4674 1.1 rjs } 4675 1.1 rjs } 4676