sctp_timer.c revision 1.3.14.2 1 /* $KAME: sctp_timer.c,v 1.30 2005/06/16 18:29:25 jinmei Exp $ */
2 /* $NetBSD: sctp_timer.c,v 1.3.14.2 2017/12/03 11:39:04 jdolecek Exp $ */
3
4 /*
5 * Copyright (C) 2002, 2003, 2004 Cisco Systems Inc,
6 * All rights reserved.
7 *
8 * Redistribution and use in source and binary forms, with or without
9 * modification, are permitted provided that the following conditions
10 * are met:
11 * 1. Redistributions of source code must retain the above copyright
12 * notice, this list of conditions and the following disclaimer.
13 * 2. Redistributions in binary form must reproduce the above copyright
14 * notice, this list of conditions and the following disclaimer in the
15 * documentation and/or other materials provided with the distribution.
16 * 3. Neither the name of the project nor the names of its contributors
17 * may be used to endorse or promote products derived from this software
18 * without specific prior written permission.
19 *
20 * THIS SOFTWARE IS PROVIDED BY THE PROJECT AND CONTRIBUTORS ``AS IS'' AND
21 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
22 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
23 * ARE DISCLAIMED. IN NO EVENT SHALL THE PROJECT OR CONTRIBUTORS BE LIABLE
24 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
25 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
26 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
27 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
28 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
29 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
30 * SUCH DAMAGE.
31 */
32 #include <sys/cdefs.h>
33 __KERNEL_RCSID(0, "$NetBSD: sctp_timer.c,v 1.3.14.2 2017/12/03 11:39:04 jdolecek Exp $");
34
35 #ifdef _KERNEL_OPT
36 #include "opt_inet.h"
37 #include "opt_sctp.h"
38 #endif /* _KERNEL_OPT */
39
40 #include <sys/param.h>
41 #include <sys/systm.h>
42 #include <sys/malloc.h>
43 #include <sys/mbuf.h>
44 #include <sys/domain.h>
45 #include <sys/protosw.h>
46 #include <sys/socket.h>
47 #include <sys/socketvar.h>
48 #include <sys/proc.h>
49 #include <sys/kernel.h>
50 #include <sys/sysctl.h>
51 #ifdef INET6
52 #include <sys/domain.h>
53 #endif
54
55 #include <machine/limits.h>
56
57 #include <net/if.h>
58 #include <net/if_types.h>
59 #include <net/route.h>
60 #include <netinet/in.h>
61 #include <netinet/in_systm.h>
62 #define _IP_VHL
63 #include <netinet/ip.h>
64 #include <netinet/in_pcb.h>
65 #include <netinet/in_var.h>
66 #include <netinet/ip_var.h>
67
68 #ifdef INET6
69 #include <netinet/ip6.h>
70 #include <netinet6/ip6_var.h>
71 #endif /* INET6 */
72
73 #include <netinet/sctp_pcb.h>
74
75 #ifdef IPSEC
76 #include <netipsec/ipsec.h>
77 #include <netipsec/key.h>
78 #endif /* IPSEC */
79 #ifdef INET6
80 #include <netinet6/sctp6_var.h>
81 #endif
82 #include <netinet/sctp_var.h>
83 #include <netinet/sctp_timer.h>
84 #include <netinet/sctputil.h>
85 #include <netinet/sctp_output.h>
86 #include <netinet/sctp_hashdriver.h>
87 #include <netinet/sctp_header.h>
88 #include <netinet/sctp_indata.h>
89 #include <netinet/sctp_asconf.h>
90
91 #include <netinet/sctp.h>
92 #include <netinet/sctp_uio.h>
93
94 #include <net/net_osdep.h>
95
96 #ifdef SCTP_DEBUG
97 extern u_int32_t sctp_debug_on;
98 #endif /* SCTP_DEBUG */
99
100 void
101 sctp_audit_retranmission_queue(struct sctp_association *asoc)
102 {
103 struct sctp_tmit_chunk *chk;
104
105 #ifdef SCTP_DEBUG
106 if (sctp_debug_on & SCTP_DEBUG_TIMER4) {
107 printf("Audit invoked on send queue cnt:%d onqueue:%d\n",
108 asoc->sent_queue_retran_cnt,
109 asoc->sent_queue_cnt);
110 }
111 #endif /* SCTP_DEBUG */
112 asoc->sent_queue_retran_cnt = 0;
113 asoc->sent_queue_cnt = 0;
114 TAILQ_FOREACH(chk, &asoc->sent_queue, sctp_next) {
115 if (chk->sent == SCTP_DATAGRAM_RESEND) {
116 asoc->sent_queue_retran_cnt++;
117 }
118 asoc->sent_queue_cnt++;
119 }
120 TAILQ_FOREACH(chk, &asoc->control_send_queue, sctp_next) {
121 if (chk->sent == SCTP_DATAGRAM_RESEND) {
122 asoc->sent_queue_retran_cnt++;
123 }
124 }
125 #ifdef SCTP_DEBUG
126 if (sctp_debug_on & SCTP_DEBUG_TIMER4) {
127 printf("Audit completes retran:%d onqueue:%d\n",
128 asoc->sent_queue_retran_cnt,
129 asoc->sent_queue_cnt);
130 }
131 #endif /* SCTP_DEBUG */
132 }
133
134 int
135 sctp_threshold_management(struct sctp_inpcb *inp, struct sctp_tcb *stcb,
136 struct sctp_nets *net, uint16_t threshold)
137 {
138 if (net) {
139 net->error_count++;
140 #ifdef SCTP_DEBUG
141 if (sctp_debug_on & SCTP_DEBUG_TIMER4) {
142 printf("Error count for %p now %d thresh:%d\n",
143 net, net->error_count,
144 net->failure_threshold);
145 }
146 #endif /* SCTP_DEBUG */
147 if (net->error_count >= net->failure_threshold) {
148 /* We had a threshold failure */
149 if (net->dest_state & SCTP_ADDR_REACHABLE) {
150 net->dest_state &= ~SCTP_ADDR_REACHABLE;
151 net->dest_state |= SCTP_ADDR_NOT_REACHABLE;
152 if (net == stcb->asoc.primary_destination) {
153 net->dest_state |= SCTP_ADDR_WAS_PRIMARY;
154 }
155 sctp_ulp_notify(SCTP_NOTIFY_INTERFACE_DOWN,
156 stcb,
157 SCTP_FAILED_THRESHOLD,
158 (void *)net);
159 }
160 }
161 /*********HOLD THIS COMMENT FOR PATCH OF ALTERNATE
162 *********ROUTING CODE
163 */
164 /*********HOLD THIS COMMENT FOR END OF PATCH OF ALTERNATE
165 *********ROUTING CODE
166 */
167 }
168 if (stcb == NULL)
169 return (0);
170
171 if (net) {
172 if ((net->dest_state & SCTP_ADDR_UNCONFIRMED) == 0) {
173 stcb->asoc.overall_error_count++;
174 }
175 } else {
176 stcb->asoc.overall_error_count++;
177 }
178 #ifdef SCTP_DEBUG
179 if (sctp_debug_on & SCTP_DEBUG_TIMER4) {
180 printf("Overall error count for %p now %d thresh:%u state:%x\n",
181 &stcb->asoc,
182 stcb->asoc.overall_error_count,
183 (u_int)threshold,
184 ((net == NULL) ? (u_int)0 : (u_int)net->dest_state));
185 }
186 #endif /* SCTP_DEBUG */
187 /* We specifically do not do >= to give the assoc one more
188 * change before we fail it.
189 */
190 if (stcb->asoc.overall_error_count > threshold) {
191 /* Abort notification sends a ULP notify */
192 struct mbuf *oper;
193 MGET(oper, M_DONTWAIT, MT_DATA);
194 if (oper) {
195 struct sctp_paramhdr *ph;
196 u_int32_t *ippp;
197
198 oper->m_len = sizeof(struct sctp_paramhdr) +
199 sizeof(*ippp);
200 ph = mtod(oper, struct sctp_paramhdr *);
201 ph->param_type = htons(SCTP_CAUSE_PROTOCOL_VIOLATION);
202 ph->param_length = htons(oper->m_len);
203 ippp = (u_int32_t *)(ph + 1);
204 *ippp = htonl(0x40000001);
205 }
206 sctp_abort_an_association(inp, stcb, SCTP_FAILED_THRESHOLD, oper);
207 return (1);
208 }
209 return (0);
210 }
211
212 struct sctp_nets *
213 sctp_find_alternate_net(struct sctp_tcb *stcb,
214 struct sctp_nets *net)
215 {
216 /* Find and return an alternate network if possible */
217 struct sctp_nets *alt, *mnet;
218 struct rtentry *rt;
219 int once;
220
221 if (stcb->asoc.numnets == 1) {
222 /* No others but net */
223 return (TAILQ_FIRST(&stcb->asoc.nets));
224 }
225 mnet = net;
226 once = 0;
227
228 if (mnet == NULL) {
229 mnet = TAILQ_FIRST(&stcb->asoc.nets);
230 }
231 do {
232 alt = TAILQ_NEXT(mnet, sctp_next);
233 if (alt == NULL) {
234 once++;
235 if (once > 1) {
236 break;
237 }
238 alt = TAILQ_FIRST(&stcb->asoc.nets);
239 }
240 rt = rtcache_validate(&alt->ro);
241 if (rt == NULL) {
242 alt->src_addr_selected = 0;
243 }
244 if (
245 ((alt->dest_state & SCTP_ADDR_REACHABLE) == SCTP_ADDR_REACHABLE) &&
246 (rt != NULL) &&
247 (!(alt->dest_state & SCTP_ADDR_UNCONFIRMED))
248 ) {
249 /* Found a reachable address */
250 rtcache_unref(rt, &alt->ro);
251 break;
252 }
253 rtcache_unref(rt, &alt->ro);
254 mnet = alt;
255 } while (alt != NULL);
256
257 if (alt == NULL) {
258 /* Case where NO insv network exists (dormant state) */
259 /* we rotate destinations */
260 once = 0;
261 mnet = net;
262 do {
263 alt = TAILQ_NEXT(mnet, sctp_next);
264 if (alt == NULL) {
265 once++;
266 if (once > 1) {
267 break;
268 }
269 alt = TAILQ_FIRST(&stcb->asoc.nets);
270 }
271 if ((!(alt->dest_state & SCTP_ADDR_UNCONFIRMED)) &&
272 (alt != net)) {
273 /* Found an alternate address */
274 break;
275 }
276 mnet = alt;
277 } while (alt != NULL);
278 }
279 if (alt == NULL) {
280 return (net);
281 }
282 return (alt);
283 }
284
285 static void
286 sctp_backoff_on_timeout(struct sctp_tcb *stcb,
287 struct sctp_nets *net,
288 int win_probe,
289 int num_marked)
290 {
291 #ifdef SCTP_DEBUG
292 int oldRTO;
293
294 oldRTO = net->RTO;
295 #endif /* SCTP_DEBUG */
296 net->RTO <<= 1;
297 #ifdef SCTP_DEBUG
298 if (sctp_debug_on & SCTP_DEBUG_TIMER2) {
299 printf("Timer doubles from %d ms -to-> %d ms\n",
300 oldRTO, net->RTO);
301 }
302 #endif /* SCTP_DEBUG */
303
304 if (net->RTO > stcb->asoc.maxrto) {
305 net->RTO = stcb->asoc.maxrto;
306 #ifdef SCTP_DEBUG
307 if (sctp_debug_on & SCTP_DEBUG_TIMER2) {
308 printf("Growth capped by maxrto %d\n",
309 net->RTO);
310 }
311 #endif /* SCTP_DEBUG */
312 }
313
314
315 if ((win_probe == 0) && num_marked) {
316 /* We don't apply penalty to window probe scenarios */
317 #ifdef SCTP_CWND_LOGGING
318 int old_cwnd=net->cwnd;
319 #endif
320 net->ssthresh = net->cwnd >> 1;
321 if (net->ssthresh < (net->mtu << 1)) {
322 net->ssthresh = (net->mtu << 1);
323 }
324 net->cwnd = net->mtu;
325 /* floor of 1 mtu */
326 if (net->cwnd < net->mtu)
327 net->cwnd = net->mtu;
328 #ifdef SCTP_CWND_LOGGING
329 sctp_log_cwnd(net, net->cwnd-old_cwnd, SCTP_CWND_LOG_FROM_RTX);
330 #endif
331
332 net->partial_bytes_acked = 0;
333 #ifdef SCTP_DEBUG
334 if (sctp_debug_on & SCTP_DEBUG_TIMER1) {
335 printf("collapse cwnd to 1MTU ssthresh to %d\n",
336 net->ssthresh);
337 }
338 #endif
339
340 }
341 }
342
343
344 static int
345 sctp_mark_all_for_resend(struct sctp_tcb *stcb,
346 struct sctp_nets *net,
347 struct sctp_nets *alt,
348 int *num_marked)
349 {
350
351 /*
352 * Mark all chunks (well not all) that were sent to *net for retransmission.
353 * Move them to alt for there destination as well... We only
354 * mark chunks that have been outstanding long enough to have
355 * received feed-back.
356 */
357 struct sctp_tmit_chunk *chk, *tp2;
358 struct sctp_nets *lnets;
359 struct timeval now, min_wait, tv;
360 int cur_rto;
361 int win_probes, non_win_probes, orig_rwnd, audit_tf, num_mk, fir;
362 unsigned int cnt_mk;
363 u_int32_t orig_flight;
364 #ifdef SCTP_FR_LOGGING
365 u_int32_t tsnfirst, tsnlast;
366 #endif
367
368 /* none in flight now */
369 audit_tf = 0;
370 fir=0;
371 /* figure out how long a data chunk must be pending
372 * before we can mark it ..
373 */
374 SCTP_GETTIME_TIMEVAL(&now);
375 /* get cur rto in micro-seconds */
376 cur_rto = (((net->lastsa >> 2) + net->lastsv) >> 1);
377 #ifdef SCTP_FR_LOGGING
378 sctp_log_fr(cur_rto, 0, 0, SCTP_FR_T3_MARK_TIME);
379 #endif
380 cur_rto *= 1000;
381 #ifdef SCTP_FR_LOGGING
382 sctp_log_fr(cur_rto, 0, 0, SCTP_FR_T3_MARK_TIME);
383 #endif
384 tv.tv_sec = cur_rto / 1000000;
385 tv.tv_usec = cur_rto % 1000000;
386 #ifndef __FreeBSD__
387 timersub(&now, &tv, &min_wait);
388 #else
389 min_wait = now;
390 timevalsub(&min_wait, &tv);
391 #endif
392 if (min_wait.tv_sec < 0 || min_wait.tv_usec < 0) {
393 /*
394 * if we hit here, we don't
395 * have enough seconds on the clock to account
396 * for the RTO. We just let the lower seconds
397 * be the bounds and don't worry about it. This
398 * may mean we will mark a lot more than we should.
399 */
400 min_wait.tv_sec = min_wait.tv_usec = 0;
401 }
402 #ifdef SCTP_FR_LOGGING
403 sctp_log_fr(cur_rto, now.tv_sec, now.tv_usec, SCTP_FR_T3_MARK_TIME);
404 sctp_log_fr(0, min_wait.tv_sec, min_wait.tv_usec, SCTP_FR_T3_MARK_TIME);
405 #endif
406 if (stcb->asoc.total_flight >= net->flight_size) {
407 stcb->asoc.total_flight -= net->flight_size;
408 } else {
409 audit_tf = 1;
410 stcb->asoc.total_flight = 0;
411 }
412 /* Our rwnd will be incorrect here since we are not adding
413 * back the cnt * mbuf but we will fix that down below.
414 */
415 orig_rwnd = stcb->asoc.peers_rwnd;
416 orig_flight = net->flight_size;
417 stcb->asoc.peers_rwnd += net->flight_size;
418 net->flight_size = 0;
419 net->rto_pending = 0;
420 net->fast_retran_ip= 0;
421 win_probes = non_win_probes = 0;
422 #ifdef SCTP_DEBUG
423 if (sctp_debug_on & SCTP_DEBUG_TIMER2) {
424 printf("Marking ALL un-acked for retransmission at t3-timeout\n");
425 }
426 #endif /* SCTP_DEBUG */
427 /* Now on to each chunk */
428 num_mk = cnt_mk = 0;
429 #ifdef SCTP_FR_LOGGING
430 tsnlast = tsnfirst = 0;
431 #endif
432 chk = TAILQ_FIRST(&stcb->asoc.sent_queue);
433 for (;chk != NULL; chk = tp2) {
434 tp2 = TAILQ_NEXT(chk, sctp_next);
435 if ((compare_with_wrap(stcb->asoc.last_acked_seq,
436 chk->rec.data.TSN_seq,
437 MAX_TSN)) ||
438 (stcb->asoc.last_acked_seq == chk->rec.data.TSN_seq)) {
439 /* Strange case our list got out of order? */
440 printf("Our list is out of order?\n");
441 TAILQ_REMOVE(&stcb->asoc.sent_queue, chk, sctp_next);
442 if (chk->data) {
443 sctp_release_pr_sctp_chunk(stcb, chk, 0xffff,
444 &stcb->asoc.sent_queue);
445 if (chk->flags & SCTP_PR_SCTP_BUFFER) {
446 stcb->asoc.sent_queue_cnt_removeable--;
447 }
448 }
449 stcb->asoc.sent_queue_cnt--;
450 sctp_free_remote_addr(chk->whoTo);
451 sctppcbinfo.ipi_count_chunk--;
452 if ((int)sctppcbinfo.ipi_count_chunk < 0) {
453 panic("Chunk count is going negative");
454 }
455 SCTP_ZONE_FREE(sctppcbinfo.ipi_zone_chunk, chk);
456 sctppcbinfo.ipi_gencnt_chunk++;
457 continue;
458 }
459 if ((chk->whoTo == net) && (chk->sent < SCTP_DATAGRAM_ACKED)) {
460 /* found one to mark:
461 * If it is less than DATAGRAM_ACKED it MUST
462 * not be a skipped or marked TSN but instead
463 * one that is either already set for retransmission OR
464 * one that needs retransmission.
465 */
466
467 /* validate its been outstanding long enough */
468 #ifdef SCTP_FR_LOGGING
469 sctp_log_fr(chk->rec.data.TSN_seq,
470 chk->sent_rcv_time.tv_sec,
471 chk->sent_rcv_time.tv_usec,
472 SCTP_FR_T3_MARK_TIME);
473 #endif
474 if (chk->sent_rcv_time.tv_sec > min_wait.tv_sec) {
475 /* we have reached a chunk that was sent some
476 * seconds past our min.. forget it we will
477 * find no more to send.
478 */
479 #ifdef SCTP_FR_LOGGING
480 sctp_log_fr(0,
481 chk->sent_rcv_time.tv_sec,
482 chk->sent_rcv_time.tv_usec,
483 SCTP_FR_T3_STOPPED);
484 #endif
485 continue;
486 } else if (chk->sent_rcv_time.tv_sec == min_wait.tv_sec) {
487 /* we must look at the micro seconds to know.
488 */
489 if (chk->sent_rcv_time.tv_usec >= min_wait.tv_usec) {
490 /* ok it was sent after our boundary time. */
491 #ifdef SCTP_FR_LOGGING
492 sctp_log_fr(0,
493 chk->sent_rcv_time.tv_sec,
494 chk->sent_rcv_time.tv_usec,
495 SCTP_FR_T3_STOPPED);
496 #endif
497 continue;
498 }
499 }
500 if (stcb->asoc.total_flight_count > 0) {
501 stcb->asoc.total_flight_count--;
502 }
503 if ((chk->flags & (SCTP_PR_SCTP_ENABLED|SCTP_PR_SCTP_BUFFER)) == SCTP_PR_SCTP_ENABLED) {
504 /* Is it expired? */
505 if ((now.tv_sec > chk->rec.data.timetodrop.tv_sec) ||
506 ((chk->rec.data.timetodrop.tv_sec == now.tv_sec) &&
507 (now.tv_usec > chk->rec.data.timetodrop.tv_usec))) {
508 /* Yes so drop it */
509 if (chk->data) {
510 sctp_release_pr_sctp_chunk(stcb,
511 chk,
512 (SCTP_RESPONSE_TO_USER_REQ|SCTP_NOTIFY_DATAGRAM_SENT),
513 &stcb->asoc.sent_queue);
514 }
515 }
516 continue;
517 }
518 if (chk->sent != SCTP_DATAGRAM_RESEND) {
519 stcb->asoc.sent_queue_retran_cnt++;
520 num_mk++;
521 if (fir == 0) {
522 fir = 1;
523 #ifdef SCTP_DEBUG
524 if (sctp_debug_on & SCTP_DEBUG_TIMER1) {
525 printf("First TSN marked was %x\n",
526 chk->rec.data.TSN_seq);
527 }
528 #endif
529 #ifdef SCTP_FR_LOGGING
530 tsnfirst = chk->rec.data.TSN_seq;
531 #endif
532 }
533 #ifdef SCTP_FR_LOGGING
534 tsnlast = chk->rec.data.TSN_seq;
535 sctp_log_fr(chk->rec.data.TSN_seq, chk->snd_count,
536 0, SCTP_FR_T3_MARKED);
537
538 #endif
539 }
540 chk->sent = SCTP_DATAGRAM_RESEND;
541 /* reset the TSN for striking and other FR stuff */
542 chk->rec.data.doing_fast_retransmit = 0;
543 #ifdef SCTP_DEBUG
544 if (sctp_debug_on & SCTP_DEBUG_TIMER3) {
545 printf("mark TSN:%x for retransmission\n", chk->rec.data.TSN_seq);
546 }
547 #endif /* SCTP_DEBUG */
548 /* Clear any time so NO RTT is being done */
549 chk->do_rtt = 0;
550 /* Bump up the count */
551 if (compare_with_wrap(chk->rec.data.TSN_seq,
552 stcb->asoc.t3timeout_highest_marked,
553 MAX_TSN)) {
554 /* TSN_seq > than t3timeout so update */
555 stcb->asoc.t3timeout_highest_marked = chk->rec.data.TSN_seq;
556 }
557 if (alt != net) {
558 sctp_free_remote_addr(chk->whoTo);
559 chk->whoTo = alt;
560 alt->ref_count++;
561 }
562 if ((chk->rec.data.state_flags & SCTP_WINDOW_PROBE) !=
563 SCTP_WINDOW_PROBE) {
564 non_win_probes++;
565 } else {
566 chk->rec.data.state_flags &= ~SCTP_WINDOW_PROBE;
567 win_probes++;
568 }
569 }
570 if (chk->sent == SCTP_DATAGRAM_RESEND) {
571 cnt_mk++;
572 }
573 }
574
575 #ifdef SCTP_FR_LOGGING
576 sctp_log_fr(tsnfirst, tsnlast, num_mk, SCTP_FR_T3_TIMEOUT);
577 #endif
578 /* compensate for the number we marked */
579 stcb->asoc.peers_rwnd += (num_mk /* * sizeof(struct mbuf)*/);
580
581 #ifdef SCTP_DEBUG
582 if (sctp_debug_on & SCTP_DEBUG_TIMER1) {
583 if (num_mk) {
584 #ifdef SCTP_FR_LOGGING
585 printf("LAST TSN marked was %x\n", tsnlast);
586 #endif
587 printf("Num marked for retransmission was %d peer-rwd:%ld\n",
588 num_mk, (u_long)stcb->asoc.peers_rwnd);
589 #ifdef SCTP_FR_LOGGING
590 printf("LAST TSN marked was %x\n", tsnlast);
591 #endif
592 printf("Num marked for retransmission was %d peer-rwd:%d\n",
593 num_mk,
594 (int)stcb->asoc.peers_rwnd
595 );
596 }
597 }
598 #endif
599 *num_marked = num_mk;
600 if (stcb->asoc.sent_queue_retran_cnt != cnt_mk) {
601 printf("Local Audit says there are %d for retran asoc cnt:%d\n",
602 cnt_mk, stcb->asoc.sent_queue_retran_cnt);
603 #ifndef SCTP_AUDITING_ENABLED
604 stcb->asoc.sent_queue_retran_cnt = cnt_mk;
605 #endif
606 }
607 #ifdef SCTP_DEBUG
608 if (sctp_debug_on & SCTP_DEBUG_TIMER3) {
609 printf("**************************\n");
610 }
611 #endif /* SCTP_DEBUG */
612
613 /* Now check for a ECN Echo that may be stranded */
614 TAILQ_FOREACH(chk, &stcb->asoc.control_send_queue, sctp_next) {
615 if ((chk->whoTo == net) &&
616 (chk->rec.chunk_id == SCTP_ECN_ECHO)) {
617 sctp_free_remote_addr(chk->whoTo);
618 chk->whoTo = alt;
619 if (chk->sent != SCTP_DATAGRAM_RESEND) {
620 chk->sent = SCTP_DATAGRAM_RESEND;
621 stcb->asoc.sent_queue_retran_cnt++;
622 }
623 alt->ref_count++;
624 }
625 }
626 if ((orig_rwnd == 0) && (stcb->asoc.total_flight == 0) &&
627 (orig_flight <= net->mtu)) {
628 /*
629 * If the LAST packet sent was not acked and our rwnd is 0
630 * then we are in a win-probe state.
631 */
632 win_probes = 1;
633 non_win_probes = 0;
634 #ifdef SCTP_DEBUG
635 if (sctp_debug_on & SCTP_DEBUG_TIMER1) {
636 printf("WIN_PROBE set via o_rwnd=0 tf=0 and all:%d fit in mtu:%d\n",
637 orig_flight, net->mtu);
638 }
639 #endif
640 }
641
642 if (audit_tf) {
643 #ifdef SCTP_DEBUG
644 if (sctp_debug_on & SCTP_DEBUG_TIMER4) {
645 printf("Audit total flight due to negative value net:%p\n",
646 net);
647 }
648 #endif /* SCTP_DEBUG */
649 stcb->asoc.total_flight = 0;
650 stcb->asoc.total_flight_count = 0;
651 /* Clear all networks flight size */
652 TAILQ_FOREACH(lnets, &stcb->asoc.nets, sctp_next) {
653 lnets->flight_size = 0;
654 #ifdef SCTP_DEBUG
655 if (sctp_debug_on & SCTP_DEBUG_TIMER4) {
656 printf("Net:%p c-f cwnd:%d ssthresh:%d\n",
657 lnets, lnets->cwnd, lnets->ssthresh);
658 }
659 #endif /* SCTP_DEBUG */
660 }
661 TAILQ_FOREACH(chk, &stcb->asoc.sent_queue, sctp_next) {
662 if (chk->sent < SCTP_DATAGRAM_RESEND) {
663 stcb->asoc.total_flight += chk->book_size;
664 chk->whoTo->flight_size += chk->book_size;
665 stcb->asoc.total_flight_count++;
666 }
667 }
668 }
669 /* Setup the ecn nonce re-sync point. We
670 * do this since retranmissions are NOT
671 * setup for ECN. This means that do to
672 * Karn's rule, we don't know the total
673 * of the peers ecn bits.
674 */
675 chk = TAILQ_FIRST(&stcb->asoc.send_queue);
676 if (chk == NULL) {
677 stcb->asoc.nonce_resync_tsn = stcb->asoc.sending_seq;
678 } else {
679 stcb->asoc.nonce_resync_tsn = chk->rec.data.TSN_seq;
680 }
681 stcb->asoc.nonce_wait_for_ecne = 0;
682 stcb->asoc.nonce_sum_check = 0;
683 /* We return 1 if we only have a window probe outstanding */
684 if (win_probes && (non_win_probes == 0)) {
685 return (1);
686 }
687 return (0);
688 }
689
690 static void
691 sctp_move_all_chunks_to_alt(struct sctp_tcb *stcb,
692 struct sctp_nets *net,
693 struct sctp_nets *alt)
694 {
695 struct sctp_association *asoc;
696 struct sctp_stream_out *outs;
697 struct sctp_tmit_chunk *chk;
698
699 if (net == alt)
700 /* nothing to do */
701 return;
702
703 asoc = &stcb->asoc;
704
705 /*
706 * now through all the streams checking for chunks sent to our
707 * bad network.
708 */
709 TAILQ_FOREACH(outs, &asoc->out_wheel, next_spoke) {
710 /* now clean up any chunks here */
711 TAILQ_FOREACH(chk, &outs->outqueue, sctp_next) {
712 if (chk->whoTo == net) {
713 sctp_free_remote_addr(chk->whoTo);
714 chk->whoTo = alt;
715 alt->ref_count++;
716 }
717 }
718 }
719 /* Now check the pending queue */
720 TAILQ_FOREACH(chk, &asoc->send_queue, sctp_next) {
721 if (chk->whoTo == net) {
722 sctp_free_remote_addr(chk->whoTo);
723 chk->whoTo = alt;
724 alt->ref_count++;
725 }
726 }
727
728 }
729
730 int
731 sctp_t3rxt_timer(struct sctp_inpcb *inp,
732 struct sctp_tcb *stcb,
733 struct sctp_nets *net)
734 {
735 struct sctp_nets *alt;
736 int win_probe, num_mk;
737
738
739 #ifdef SCTP_FR_LOGGING
740 sctp_log_fr(0, 0, 0, SCTP_FR_T3_TIMEOUT);
741 #endif
742 /* Find an alternate and mark those for retransmission */
743 alt = sctp_find_alternate_net(stcb, net);
744 win_probe = sctp_mark_all_for_resend(stcb, net, alt, &num_mk);
745
746 /* FR Loss recovery just ended with the T3. */
747 stcb->asoc.fast_retran_loss_recovery = 0;
748
749 /* setup the sat loss recovery that prevents
750 * satellite cwnd advance.
751 */
752 stcb->asoc.sat_t3_loss_recovery = 1;
753 stcb->asoc.sat_t3_recovery_tsn = stcb->asoc.sending_seq;
754
755 /* Backoff the timer and cwnd */
756 sctp_backoff_on_timeout(stcb, net, win_probe, num_mk);
757 if (win_probe == 0) {
758 /* We don't do normal threshold management on window probes */
759 if (sctp_threshold_management(inp, stcb, net,
760 stcb->asoc.max_send_times)) {
761 /* Association was destroyed */
762 return (1);
763 } else {
764 if (net != stcb->asoc.primary_destination) {
765 /* send a immediate HB if our RTO is stale */
766 struct timeval now;
767 unsigned int ms_goneby;
768 SCTP_GETTIME_TIMEVAL(&now);
769 if (net->last_sent_time.tv_sec) {
770 ms_goneby = (now.tv_sec - net->last_sent_time.tv_sec) * 1000;
771 } else {
772 ms_goneby = 0;
773 }
774 if ((ms_goneby > net->RTO) || (net->RTO == 0)) {
775 /* no recent feed back in an RTO or more, request a RTT update */
776 sctp_send_hb(stcb, 1, net);
777 }
778 }
779 }
780 } else {
781 /*
782 * For a window probe we don't penalize the net's but only
783 * the association. This may fail it if SACKs are not coming
784 * back. If sack's are coming with rwnd locked at 0, we will
785 * continue to hold things waiting for rwnd to raise
786 */
787 if (sctp_threshold_management(inp, stcb, NULL,
788 stcb->asoc.max_send_times)) {
789 /* Association was destroyed */
790 return (1);
791 }
792 }
793 if (net->dest_state & SCTP_ADDR_NOT_REACHABLE) {
794 /* Move all pending over too */
795 sctp_move_all_chunks_to_alt(stcb, net, alt);
796 /* Was it our primary? */
797 if ((stcb->asoc.primary_destination == net) && (alt != net)) {
798 /*
799 * Yes, note it as such and find an alternate
800 * note: this means HB code must use this to resent
801 * the primary if it goes active AND if someone does
802 * a change-primary then this flag must be cleared
803 * from any net structures.
804 */
805 if (sctp_set_primary_addr(stcb,
806 (struct sockaddr *)NULL,
807 alt) == 0) {
808 net->dest_state |= SCTP_ADDR_WAS_PRIMARY;
809 net->src_addr_selected = 0;
810 }
811 }
812 }
813 /*
814 * Special case for cookie-echo'ed case, we don't do output
815 * but must await the COOKIE-ACK before retransmission
816 */
817 if (SCTP_GET_STATE(&stcb->asoc) == SCTP_STATE_COOKIE_ECHOED) {
818 /*
819 * Here we just reset the timer and start again since we
820 * have not established the asoc
821 */
822 #ifdef SCTP_DEBUG
823 if (sctp_debug_on & SCTP_DEBUG_TIMER1) {
824 printf("Special cookie case return\n");
825 }
826 #endif /* SCTP_DEBUG */
827 sctp_timer_start(SCTP_TIMER_TYPE_SEND, inp, stcb, net);
828 return (0);
829 }
830 if (stcb->asoc.peer_supports_prsctp) {
831 struct sctp_tmit_chunk *lchk;
832 lchk = sctp_try_advance_peer_ack_point(stcb, &stcb->asoc);
833 /* C3. See if we need to send a Fwd-TSN */
834 if (compare_with_wrap(stcb->asoc.advanced_peer_ack_point,
835 stcb->asoc.last_acked_seq, MAX_TSN)) {
836 /*
837 * ISSUE with ECN, see FWD-TSN processing for notes
838 * on issues that will occur when the ECN NONCE stuff
839 * is put into SCTP for cross checking.
840 */
841 #ifdef SCTP_DEBUG
842 if (sctp_debug_on & SCTP_DEBUG_TIMER1) {
843 printf("Forward TSN time\n");
844 }
845 #endif /* SCTP_DEBUG */
846 send_forward_tsn(stcb, &stcb->asoc);
847 if (lchk) {
848 /* Assure a timer is up */
849 sctp_timer_start(SCTP_TIMER_TYPE_SEND, stcb->sctp_ep, stcb, lchk->whoTo);
850 }
851 }
852 }
853 return (0);
854 }
855
856 int
857 sctp_t1init_timer(struct sctp_inpcb *inp,
858 struct sctp_tcb *stcb,
859 struct sctp_nets *net)
860 {
861 /* bump the thresholds */
862 if (stcb->asoc.delayed_connection) {
863 /* special hook for delayed connection. The
864 * library did NOT complete the rest of its
865 * sends.
866 */
867 stcb->asoc.delayed_connection = 0;
868 sctp_send_initiate(inp, stcb);
869 return (0);
870 }
871 if (sctp_threshold_management(inp, stcb, net,
872 stcb->asoc.max_init_times)) {
873 /* Association was destroyed */
874 return (1);
875 }
876 stcb->asoc.dropped_special_cnt = 0;
877 sctp_backoff_on_timeout(stcb, stcb->asoc.primary_destination, 1, 0);
878 if (stcb->asoc.initial_init_rto_max < net->RTO) {
879 net->RTO = stcb->asoc.initial_init_rto_max;
880 }
881 if (stcb->asoc.numnets > 1) {
882 /* If we have more than one addr use it */
883 struct sctp_nets *alt;
884 alt = sctp_find_alternate_net(stcb, stcb->asoc.primary_destination);
885 if ((alt != NULL) && (alt != stcb->asoc.primary_destination)) {
886 sctp_move_all_chunks_to_alt(stcb, stcb->asoc.primary_destination, alt);
887 stcb->asoc.primary_destination = alt;
888 }
889 }
890 /* Send out a new init */
891 sctp_send_initiate(inp, stcb);
892 return (0);
893 }
894
895 /*
896 * For cookie and asconf we actually need to find and mark for resend,
897 * then increment the resend counter (after all the threshold management
898 * stuff of course).
899 */
900 int sctp_cookie_timer(struct sctp_inpcb *inp,
901 struct sctp_tcb *stcb,
902 struct sctp_nets *net)
903 {
904 struct sctp_nets *alt;
905 struct sctp_tmit_chunk *cookie;
906 /* first before all else we must find the cookie */
907 TAILQ_FOREACH(cookie, &stcb->asoc.control_send_queue, sctp_next) {
908 if (cookie->rec.chunk_id == SCTP_COOKIE_ECHO) {
909 break;
910 }
911 }
912 if (cookie == NULL) {
913 if (SCTP_GET_STATE(&stcb->asoc) == SCTP_STATE_COOKIE_ECHOED) {
914 /* FOOBAR! */
915 struct mbuf *oper;
916 MGET(oper, M_DONTWAIT, MT_DATA);
917 if (oper) {
918 struct sctp_paramhdr *ph;
919 u_int32_t *ippp;
920
921 oper->m_len = sizeof(struct sctp_paramhdr) +
922 sizeof(*ippp);
923 ph = mtod(oper, struct sctp_paramhdr *);
924 ph->param_type = htons(SCTP_CAUSE_PROTOCOL_VIOLATION);
925 ph->param_length = htons(oper->m_len);
926 ippp = (u_int32_t *)(ph + 1);
927 *ippp = htonl(0x40000002);
928 }
929 sctp_abort_an_association(inp, stcb, SCTP_INTERNAL_ERROR,
930 oper);
931 }
932 return (1);
933 }
934 /* Ok we found the cookie, threshold management next */
935 if (sctp_threshold_management(inp, stcb, cookie->whoTo,
936 stcb->asoc.max_init_times)) {
937 /* Assoc is over */
938 return (1);
939 }
940 /*
941 * cleared theshold management now lets backoff the address &
942 * select an alternate
943 */
944 stcb->asoc.dropped_special_cnt = 0;
945 sctp_backoff_on_timeout(stcb, cookie->whoTo, 1, 0);
946 alt = sctp_find_alternate_net(stcb, cookie->whoTo);
947 if (alt != cookie->whoTo) {
948 sctp_free_remote_addr(cookie->whoTo);
949 cookie->whoTo = alt;
950 alt->ref_count++;
951 }
952 /* Now mark the retran info */
953 if (cookie->sent != SCTP_DATAGRAM_RESEND) {
954 stcb->asoc.sent_queue_retran_cnt++;
955 }
956 cookie->sent = SCTP_DATAGRAM_RESEND;
957 /*
958 * Now call the output routine to kick out the cookie again, Note we
959 * don't mark any chunks for retran so that FR will need to kick in
960 * to move these (or a send timer).
961 */
962 return (0);
963 }
964
965 int sctp_strreset_timer(struct sctp_inpcb *inp, struct sctp_tcb *stcb,
966 struct sctp_nets *net)
967 {
968 struct sctp_nets *alt;
969 struct sctp_tmit_chunk *strrst, *chk;
970 struct sctp_stream_reset_req *strreq;
971 /* find the existing STRRESET */
972 TAILQ_FOREACH(strrst, &stcb->asoc.control_send_queue,
973 sctp_next) {
974 if (strrst->rec.chunk_id == SCTP_STREAM_RESET) {
975 /* is it what we want */
976 strreq = mtod(strrst->data, struct sctp_stream_reset_req *);
977 if (strreq->sr_req.ph.param_type == ntohs(SCTP_STR_RESET_REQUEST)) {
978 break;
979 }
980 }
981 }
982 if (strrst == NULL) {
983 #ifdef SCTP_DEBUG
984 if (sctp_debug_on & SCTP_DEBUG_TIMER1) {
985 printf("Strange, strreset timer fires, but I can't find an str-reset?\n");
986 }
987 #endif /* SCTP_DEBUG */
988 return (0);
989 }
990 /* do threshold management */
991 if (sctp_threshold_management(inp, stcb, strrst->whoTo,
992 stcb->asoc.max_send_times)) {
993 /* Assoc is over */
994 return (1);
995 }
996
997 /*
998 * cleared theshold management
999 * now lets backoff the address & select an alternate
1000 */
1001 sctp_backoff_on_timeout(stcb, strrst->whoTo, 1, 0);
1002 alt = sctp_find_alternate_net(stcb, strrst->whoTo);
1003 sctp_free_remote_addr(strrst->whoTo);
1004 strrst->whoTo = alt;
1005 alt->ref_count++;
1006
1007 /* See if a ECN Echo is also stranded */
1008 TAILQ_FOREACH(chk, &stcb->asoc.control_send_queue, sctp_next) {
1009 if ((chk->whoTo == net) &&
1010 (chk->rec.chunk_id == SCTP_ECN_ECHO)) {
1011 sctp_free_remote_addr(chk->whoTo);
1012 if (chk->sent != SCTP_DATAGRAM_RESEND) {
1013 chk->sent = SCTP_DATAGRAM_RESEND;
1014 stcb->asoc.sent_queue_retran_cnt++;
1015 }
1016 chk->whoTo = alt;
1017 alt->ref_count++;
1018 }
1019 }
1020 if (net->dest_state & SCTP_ADDR_NOT_REACHABLE) {
1021 /*
1022 * If the address went un-reachable, we need to move
1023 * to alternates for ALL chk's in queue
1024 */
1025 sctp_move_all_chunks_to_alt(stcb, net, alt);
1026 }
1027 /* mark the retran info */
1028 if (strrst->sent != SCTP_DATAGRAM_RESEND)
1029 stcb->asoc.sent_queue_retran_cnt++;
1030 strrst->sent = SCTP_DATAGRAM_RESEND;
1031
1032 /* restart the timer */
1033 sctp_timer_start(SCTP_TIMER_TYPE_STRRESET, inp, stcb, strrst->whoTo);
1034 return (0);
1035 }
1036
1037 int sctp_asconf_timer(struct sctp_inpcb *inp, struct sctp_tcb *stcb,
1038 struct sctp_nets *net)
1039 {
1040 struct sctp_nets *alt;
1041 struct sctp_tmit_chunk *asconf, *chk;
1042
1043 /* is this the first send, or a retransmission? */
1044 if (stcb->asoc.asconf_sent == 0) {
1045 /* compose a new ASCONF chunk and send it */
1046 sctp_send_asconf(stcb, net);
1047 } else {
1048 /* Retransmission of the existing ASCONF needed... */
1049
1050 /* find the existing ASCONF */
1051 TAILQ_FOREACH(asconf, &stcb->asoc.control_send_queue,
1052 sctp_next) {
1053 if (asconf->rec.chunk_id == SCTP_ASCONF) {
1054 break;
1055 }
1056 }
1057 if (asconf == NULL) {
1058 #ifdef SCTP_DEBUG
1059 if (sctp_debug_on & SCTP_DEBUG_TIMER1) {
1060 printf("Strange, asconf timer fires, but I can't find an asconf?\n");
1061 }
1062 #endif /* SCTP_DEBUG */
1063 return (0);
1064 }
1065 /* do threshold management */
1066 if (sctp_threshold_management(inp, stcb, asconf->whoTo,
1067 stcb->asoc.max_send_times)) {
1068 /* Assoc is over */
1069 return (1);
1070 }
1071
1072 /* PETER? FIX? How will the following code ever run? If
1073 * the max_send_times is hit, threshold managment will
1074 * blow away the association?
1075 */
1076 if (asconf->snd_count > stcb->asoc.max_send_times) {
1077 /*
1078 * Something is rotten, peer is not responding to
1079 * ASCONFs but maybe is to data etc. e.g. it is not
1080 * properly handling the chunk type upper bits
1081 * Mark this peer as ASCONF incapable and cleanup
1082 */
1083 #ifdef SCTP_DEBUG
1084 if (sctp_debug_on & SCTP_DEBUG_TIMER1) {
1085 printf("asconf_timer: Peer has not responded to our repeated ASCONFs\n");
1086 }
1087 #endif /* SCTP_DEBUG */
1088 sctp_asconf_cleanup(stcb, net);
1089 return (0);
1090 }
1091 /*
1092 * cleared theshold management
1093 * now lets backoff the address & select an alternate
1094 */
1095 sctp_backoff_on_timeout(stcb, asconf->whoTo, 1, 0);
1096 alt = sctp_find_alternate_net(stcb, asconf->whoTo);
1097 sctp_free_remote_addr(asconf->whoTo);
1098 asconf->whoTo = alt;
1099 alt->ref_count++;
1100
1101 /* See if a ECN Echo is also stranded */
1102 TAILQ_FOREACH(chk, &stcb->asoc.control_send_queue, sctp_next) {
1103 if ((chk->whoTo == net) &&
1104 (chk->rec.chunk_id == SCTP_ECN_ECHO)) {
1105 sctp_free_remote_addr(chk->whoTo);
1106 chk->whoTo = alt;
1107 if (chk->sent != SCTP_DATAGRAM_RESEND) {
1108 chk->sent = SCTP_DATAGRAM_RESEND;
1109 stcb->asoc.sent_queue_retran_cnt++;
1110 }
1111 alt->ref_count++;
1112
1113 }
1114 }
1115 if (net->dest_state & SCTP_ADDR_NOT_REACHABLE) {
1116 /*
1117 * If the address went un-reachable, we need to move
1118 * to alternates for ALL chk's in queue
1119 */
1120 sctp_move_all_chunks_to_alt(stcb, net, alt);
1121 }
1122 /* mark the retran info */
1123 if (asconf->sent != SCTP_DATAGRAM_RESEND)
1124 stcb->asoc.sent_queue_retran_cnt++;
1125 asconf->sent = SCTP_DATAGRAM_RESEND;
1126 }
1127 return (0);
1128 }
1129
1130 /*
1131 * For the shutdown and shutdown-ack, we do not keep one around on the
1132 * control queue. This means we must generate a new one and call the general
1133 * chunk output routine, AFTER having done threshold
1134 * management.
1135 */
1136 int
1137 sctp_shutdown_timer(struct sctp_inpcb *inp, struct sctp_tcb *stcb,
1138 struct sctp_nets *net)
1139 {
1140 struct sctp_nets *alt;
1141 /* first threshold managment */
1142 if (sctp_threshold_management(inp, stcb, net, stcb->asoc.max_send_times)) {
1143 /* Assoc is over */
1144 return (1);
1145 }
1146 /* second select an alternative */
1147 alt = sctp_find_alternate_net(stcb, net);
1148
1149 /* third generate a shutdown into the queue for out net */
1150 #ifdef SCTP_DEBUG
1151 if (sctp_debug_on & SCTP_DEBUG_OUTPUT4) {
1152 printf("%s:%d sends a shutdown\n",
1153 __FILE__,
1154 __LINE__
1155 );
1156 }
1157 #endif
1158 if (alt) {
1159 sctp_send_shutdown(stcb, alt);
1160 } else {
1161 /* if alt is NULL, there is no dest
1162 * to send to??
1163 */
1164 return (0);
1165 }
1166 /* fourth restart timer */
1167 sctp_timer_start(SCTP_TIMER_TYPE_SHUTDOWN, inp, stcb, alt);
1168 return (0);
1169 }
1170
1171 int sctp_shutdownack_timer(struct sctp_inpcb *inp, struct sctp_tcb *stcb,
1172 struct sctp_nets *net)
1173 {
1174 struct sctp_nets *alt;
1175 /* first threshold managment */
1176 if (sctp_threshold_management(inp, stcb, net, stcb->asoc.max_send_times)) {
1177 /* Assoc is over */
1178 return (1);
1179 }
1180 /* second select an alternative */
1181 alt = sctp_find_alternate_net(stcb, net);
1182
1183 /* third generate a shutdown into the queue for out net */
1184 sctp_send_shutdown_ack(stcb, alt);
1185
1186 /* fourth restart timer */
1187 sctp_timer_start(SCTP_TIMER_TYPE_SHUTDOWNACK, inp, stcb, alt);
1188 return (0);
1189 }
1190
1191 static void
1192 sctp_audit_stream_queues_for_size(struct sctp_inpcb *inp,
1193 struct sctp_tcb *stcb)
1194 {
1195 struct sctp_stream_out *outs;
1196 struct sctp_tmit_chunk *chk;
1197 unsigned int chks_in_queue=0;
1198
1199 if ((stcb == NULL) || (inp == NULL))
1200 return;
1201 if (TAILQ_EMPTY(&stcb->asoc.out_wheel)) {
1202 printf("Strange, out_wheel empty nothing on sent/send and tot=%lu?\n",
1203 (u_long)stcb->asoc.total_output_queue_size);
1204 stcb->asoc.total_output_queue_size = 0;
1205 return;
1206 }
1207 if (stcb->asoc.sent_queue_retran_cnt) {
1208 printf("Hmm, sent_queue_retran_cnt is non-zero %d\n",
1209 stcb->asoc.sent_queue_retran_cnt);
1210 stcb->asoc.sent_queue_retran_cnt = 0;
1211 }
1212 /* Check to see if some data queued, if so report it */
1213 TAILQ_FOREACH(outs, &stcb->asoc.out_wheel, next_spoke) {
1214 if (!TAILQ_EMPTY(&outs->outqueue)) {
1215 TAILQ_FOREACH(chk, &outs->outqueue, sctp_next) {
1216 chks_in_queue++;
1217 }
1218 }
1219 }
1220 if (chks_in_queue != stcb->asoc.stream_queue_cnt) {
1221 printf("Hmm, stream queue cnt at %d I counted %d in stream out wheel\n",
1222 stcb->asoc.stream_queue_cnt, chks_in_queue);
1223 }
1224 if (chks_in_queue) {
1225 /* call the output queue function */
1226 sctp_chunk_output(inp, stcb, 1);
1227 if ((TAILQ_EMPTY(&stcb->asoc.send_queue)) &&
1228 (TAILQ_EMPTY(&stcb->asoc.sent_queue))) {
1229 /* Probably should go in and make it go back through and add fragments allowed */
1230 printf("Still nothing moved %d chunks are stuck\n", chks_in_queue);
1231 }
1232 } else {
1233 printf("Found no chunks on any queue tot:%lu\n",
1234 (u_long)stcb->asoc.total_output_queue_size);
1235 stcb->asoc.total_output_queue_size = 0;
1236 }
1237 }
1238
1239 int
1240 sctp_heartbeat_timer(struct sctp_inpcb *inp, struct sctp_tcb *stcb,
1241 struct sctp_nets *net)
1242 {
1243 int cnt_of_unconf=0;
1244
1245 if (net) {
1246 if (net->hb_responded == 0) {
1247 sctp_backoff_on_timeout(stcb, net, 1, 0);
1248 }
1249 /* Zero PBA, if it needs it */
1250 if (net->partial_bytes_acked) {
1251 net->partial_bytes_acked = 0;
1252 }
1253 }
1254 TAILQ_FOREACH(net, &stcb->asoc.nets, sctp_next) {
1255 if ((net->dest_state & SCTP_ADDR_UNCONFIRMED) &&
1256 (net->dest_state & SCTP_ADDR_REACHABLE)) {
1257 cnt_of_unconf++;
1258 }
1259 }
1260 if ((stcb->asoc.total_output_queue_size > 0) &&
1261 (TAILQ_EMPTY(&stcb->asoc.send_queue)) &&
1262 (TAILQ_EMPTY(&stcb->asoc.sent_queue))) {
1263 sctp_audit_stream_queues_for_size(inp, stcb);
1264 }
1265 /* Send a new HB, this will do threshold managment, pick a new dest */
1266 if (sctp_send_hb(stcb, 0, NULL) < 0) {
1267 return (1);
1268 }
1269 if (cnt_of_unconf > 1) {
1270 /*
1271 * this will send out extra hb's up to maxburst if
1272 * there are any unconfirmed addresses.
1273 */
1274 int cnt_sent = 1;
1275 while ((cnt_sent < stcb->asoc.max_burst) && (cnt_of_unconf > 1)) {
1276 if (sctp_send_hb(stcb, 0, NULL) == 0)
1277 break;
1278 cnt_of_unconf--;
1279 cnt_sent++;
1280 }
1281 }
1282 return (0);
1283 }
1284
1285 #define SCTP_NUMBER_OF_MTU_SIZES 18
1286 static u_int32_t mtu_sizes[]={
1287 68,
1288 296,
1289 508,
1290 512,
1291 544,
1292 576,
1293 1006,
1294 1492,
1295 1500,
1296 1536,
1297 2002,
1298 2048,
1299 4352,
1300 4464,
1301 8166,
1302 17914,
1303 32000,
1304 65535
1305 };
1306
1307
1308 static u_int32_t
1309 sctp_getnext_mtu(struct sctp_inpcb *inp, u_int32_t cur_mtu)
1310 {
1311 /* select another MTU that is just bigger than this one */
1312 int i;
1313
1314 for (i = 0; i < SCTP_NUMBER_OF_MTU_SIZES; i++) {
1315 if (cur_mtu < mtu_sizes[i]) {
1316 /* no max_mtu is bigger than this one */
1317 return (mtu_sizes[i]);
1318 }
1319 }
1320 /* here return the highest allowable */
1321 return (cur_mtu);
1322 }
1323
1324
1325 void sctp_pathmtu_timer(struct sctp_inpcb *inp,
1326 struct sctp_tcb *stcb,
1327 struct sctp_nets *net)
1328 {
1329 u_int32_t next_mtu;
1330 struct rtentry *rt;
1331
1332 /* restart the timer in any case */
1333 next_mtu = sctp_getnext_mtu(inp, net->mtu);
1334 if (next_mtu <= net->mtu) {
1335 /* nothing to do */
1336 return;
1337 }
1338 rt = rtcache_validate(&net->ro);
1339 if (rt != NULL) {
1340 /* only if we have a route and interface do we
1341 * set anything. Note we always restart
1342 * the timer though just in case it is updated
1343 * (i.e. the ifp) or route/ifp is populated.
1344 */
1345 if (rt->rt_ifp != NULL) {
1346 if (rt->rt_ifp->if_mtu > next_mtu) {
1347 /* ok it will fit out the door */
1348 net->mtu = next_mtu;
1349 }
1350 }
1351 rtcache_unref(rt, &net->ro);
1352 }
1353 /* restart the timer */
1354 sctp_timer_start(SCTP_TIMER_TYPE_PATHMTURAISE, inp, stcb, net);
1355 }
1356
1357 void sctp_autoclose_timer(struct sctp_inpcb *inp,
1358 struct sctp_tcb *stcb,
1359 struct sctp_nets *net)
1360 {
1361 struct timeval tn, *tim_touse;
1362 struct sctp_association *asoc;
1363 int ticks_gone_by;
1364
1365 SCTP_GETTIME_TIMEVAL(&tn);
1366 if (stcb->asoc.sctp_autoclose_ticks &&
1367 (inp->sctp_flags & SCTP_PCB_FLAGS_AUTOCLOSE)) {
1368 /* Auto close is on */
1369 asoc = &stcb->asoc;
1370 /* pick the time to use */
1371 if (asoc->time_last_rcvd.tv_sec >
1372 asoc->time_last_sent.tv_sec) {
1373 tim_touse = &asoc->time_last_rcvd;
1374 } else {
1375 tim_touse = &asoc->time_last_sent;
1376 }
1377 /* Now has long enough transpired to autoclose? */
1378 ticks_gone_by = ((tn.tv_sec - tim_touse->tv_sec) * hz);
1379 if ((ticks_gone_by > 0) &&
1380 (ticks_gone_by >= (int)asoc->sctp_autoclose_ticks)) {
1381 /*
1382 * autoclose time has hit, call the output routine,
1383 * which should do nothing just to be SURE we don't
1384 * have hanging data. We can then safely check the
1385 * queues and know that we are clear to send shutdown
1386 */
1387 sctp_chunk_output(inp, stcb, 9);
1388 /* Are we clean? */
1389 if (TAILQ_EMPTY(&asoc->send_queue) &&
1390 TAILQ_EMPTY(&asoc->sent_queue)) {
1391 /*
1392 * there is nothing queued to send,
1393 * so I'm done...
1394 */
1395 if (SCTP_GET_STATE(asoc) !=
1396 SCTP_STATE_SHUTDOWN_SENT) {
1397 /* only send SHUTDOWN 1st time thru */
1398 #ifdef SCTP_DEBUG
1399 if (sctp_debug_on & SCTP_DEBUG_OUTPUT4) {
1400 printf("%s:%d sends a shutdown\n",
1401 __FILE__,
1402 __LINE__
1403 );
1404 }
1405 #endif
1406 sctp_send_shutdown(stcb, stcb->asoc.primary_destination);
1407 asoc->state = SCTP_STATE_SHUTDOWN_SENT;
1408 sctp_timer_start(SCTP_TIMER_TYPE_SHUTDOWN,
1409 stcb->sctp_ep, stcb,
1410 asoc->primary_destination);
1411 sctp_timer_start(SCTP_TIMER_TYPE_SHUTDOWNGUARD,
1412 stcb->sctp_ep, stcb,
1413 asoc->primary_destination);
1414 }
1415 }
1416 } else {
1417 /*
1418 * No auto close at this time, reset t-o to
1419 * check later
1420 */
1421 int tmp;
1422 /* fool the timer startup to use the time left */
1423 tmp = asoc->sctp_autoclose_ticks;
1424 asoc->sctp_autoclose_ticks -= ticks_gone_by;
1425 sctp_timer_start(SCTP_TIMER_TYPE_AUTOCLOSE, inp, stcb,
1426 net);
1427 /* restore the real tick value */
1428 asoc->sctp_autoclose_ticks = tmp;
1429 }
1430 }
1431 }
1432
1433 void
1434 sctp_iterator_timer(struct sctp_iterator *it)
1435 {
1436 int cnt= 0;
1437 /* only one iterator can run at a
1438 * time. This is the only way we
1439 * can cleanly pull ep's from underneath
1440 * all the running interators when a
1441 * ep is freed.
1442 */
1443 SCTP_ITERATOR_LOCK();
1444 if (it->inp == NULL) {
1445 /* iterator is complete */
1446 done_with_iterator:
1447 SCTP_ITERATOR_UNLOCK();
1448 SCTP_INP_INFO_WLOCK();
1449 LIST_REMOVE(it, sctp_nxt_itr);
1450 /* stopping the callout is not needed, in theory,
1451 * but I am paranoid.
1452 */
1453 SCTP_INP_INFO_WUNLOCK();
1454 callout_stop(&it->tmr.timer);
1455 if (it->function_atend != NULL) {
1456 (*it->function_atend)(it->pointer, it->val);
1457 }
1458 callout_destroy(&it->tmr.timer);
1459 free(it, M_PCB);
1460 return;
1461 }
1462 select_a_new_ep:
1463 SCTP_INP_WLOCK(it->inp);
1464 while ((it->pcb_flags) && ((it->inp->sctp_flags & it->pcb_flags) != it->pcb_flags)) {
1465 /* we do not like this ep */
1466 if (it->iterator_flags & SCTP_ITERATOR_DO_SINGLE_INP) {
1467 SCTP_INP_WUNLOCK(it->inp);
1468 goto done_with_iterator;
1469 }
1470 SCTP_INP_WUNLOCK(it->inp);
1471 it->inp = LIST_NEXT(it->inp, sctp_list);
1472 if (it->inp == NULL) {
1473 goto done_with_iterator;
1474 }
1475 SCTP_INP_WLOCK(it->inp);
1476 }
1477 if ((it->inp->inp_starting_point_for_iterator != NULL) &&
1478 (it->inp->inp_starting_point_for_iterator != it)) {
1479 printf("Iterator collision, we must wait for other iterator at %p\n",
1480 it->inp);
1481 SCTP_INP_WUNLOCK(it->inp);
1482 goto start_timer_return;
1483 }
1484 /* now we do the actual write to this guy */
1485 it->inp->inp_starting_point_for_iterator = it;
1486 SCTP_INP_WUNLOCK(it->inp);
1487 SCTP_INP_RLOCK(it->inp);
1488 /* if we reach here we found a inp acceptable, now through each
1489 * one that has the association in the right state
1490 */
1491 if (it->stcb == NULL) {
1492 it->stcb = LIST_FIRST(&it->inp->sctp_asoc_list);
1493 }
1494 if (it->stcb->asoc.stcb_starting_point_for_iterator == it) {
1495 it->stcb->asoc.stcb_starting_point_for_iterator = NULL;
1496 }
1497 while (it->stcb) {
1498 SCTP_TCB_LOCK(it->stcb);
1499 if (it->asoc_state && ((it->stcb->asoc.state & it->asoc_state) != it->asoc_state)) {
1500 SCTP_TCB_UNLOCK(it->stcb);
1501 it->stcb = LIST_NEXT(it->stcb, sctp_tcblist);
1502 continue;
1503 }
1504 cnt++;
1505 /* run function on this one */
1506 SCTP_INP_RUNLOCK(it->inp);
1507 (*it->function_toapply)(it->inp, it->stcb, it->pointer, it->val);
1508 sctp_chunk_output(it->inp, it->stcb, 1);
1509 SCTP_TCB_UNLOCK(it->stcb);
1510 /* see if we have limited out */
1511 if (cnt > SCTP_MAX_ITERATOR_AT_ONCE) {
1512 it->stcb->asoc.stcb_starting_point_for_iterator = it;
1513 start_timer_return:
1514 SCTP_ITERATOR_UNLOCK();
1515 sctp_timer_start(SCTP_TIMER_TYPE_ITERATOR, (struct sctp_inpcb *)it, NULL, NULL);
1516 return;
1517 }
1518 SCTP_INP_RLOCK(it->inp);
1519 it->stcb = LIST_NEXT(it->stcb, sctp_tcblist);
1520 }
1521 /* if we reach here, we ran out of stcb's in the inp we are looking at */
1522 SCTP_INP_RUNLOCK(it->inp);
1523 SCTP_INP_WLOCK(it->inp);
1524 it->inp->inp_starting_point_for_iterator = NULL;
1525 SCTP_INP_WUNLOCK(it->inp);
1526 if (it->iterator_flags & SCTP_ITERATOR_DO_SINGLE_INP) {
1527 it->inp = NULL;
1528 } else {
1529 SCTP_INP_INFO_RLOCK();
1530 it->inp = LIST_NEXT(it->inp, sctp_list);
1531 SCTP_INP_INFO_RUNLOCK();
1532 }
1533 if (it->inp == NULL) {
1534 goto done_with_iterator;
1535 }
1536 goto select_a_new_ep;
1537 }
1538