Home | History | Annotate | Download | only in netinet

Lines Matching refs:stcb

134 sctp_threshold_management(struct sctp_inpcb *inp, struct sctp_tcb *stcb,
151 if (net == stcb->asoc.primary_destination) {
155 stcb,
167 if (stcb == NULL)
172 stcb->asoc.overall_error_count++;
175 stcb->asoc.overall_error_count++;
180 &stcb->asoc,
181 stcb->asoc.overall_error_count,
189 if (stcb->asoc.overall_error_count > threshold) {
205 sctp_abort_an_association(inp, stcb, SCTP_FAILED_THRESHOLD, oper);
212 sctp_find_alternate_net(struct sctp_tcb *stcb,
220 if (stcb->asoc.numnets == 1) {
222 return (TAILQ_FIRST(&stcb->asoc.nets));
228 mnet = TAILQ_FIRST(&stcb->asoc.nets);
237 alt = TAILQ_FIRST(&stcb->asoc.nets);
268 alt = TAILQ_FIRST(&stcb->asoc.nets);
285 sctp_backoff_on_timeout(struct sctp_tcb *stcb,
303 if (net->RTO > stcb->asoc.maxrto) {
304 net->RTO = stcb->asoc.maxrto;
344 sctp_mark_all_for_resend(struct sctp_tcb *stcb,
405 if (stcb->asoc.total_flight >= net->flight_size) {
406 stcb->asoc.total_flight -= net->flight_size;
409 stcb->asoc.total_flight = 0;
414 orig_rwnd = stcb->asoc.peers_rwnd;
416 stcb->asoc.peers_rwnd += net->flight_size;
431 chk = TAILQ_FIRST(&stcb->asoc.sent_queue);
434 if ((compare_with_wrap(stcb->asoc.last_acked_seq,
437 (stcb->asoc.last_acked_seq == chk->rec.data.TSN_seq)) {
440 TAILQ_REMOVE(&stcb->asoc.sent_queue, chk, sctp_next);
442 sctp_release_pr_sctp_chunk(stcb, chk, 0xffff,
443 &stcb->asoc.sent_queue);
445 stcb->asoc.sent_queue_cnt_removeable--;
448 stcb->asoc.sent_queue_cnt--;
499 if (stcb->asoc.total_flight_count > 0) {
500 stcb->asoc.total_flight_count--;
509 sctp_release_pr_sctp_chunk(stcb,
512 &stcb->asoc.sent_queue);
518 stcb->asoc.sent_queue_retran_cnt++;
551 stcb->asoc.t3timeout_highest_marked,
554 stcb->asoc.t3timeout_highest_marked = chk->rec.data.TSN_seq;
578 stcb->asoc.peers_rwnd += (num_mk /* * sizeof(struct mbuf)*/);
587 num_mk, (u_long)stcb->asoc.peers_rwnd);
593 (int)stcb->asoc.peers_rwnd
599 if (stcb->asoc.sent_queue_retran_cnt != cnt_mk) {
601 cnt_mk, stcb->asoc.sent_queue_retran_cnt);
603 stcb->asoc.sent_queue_retran_cnt = cnt_mk;
613 TAILQ_FOREACH(chk, &stcb->asoc.control_send_queue, sctp_next) {
620 stcb->asoc.sent_queue_retran_cnt++;
625 if ((orig_rwnd == 0) && (stcb->asoc.total_flight == 0) &&
648 stcb->asoc.total_flight = 0;
649 stcb->asoc.total_flight_count = 0;
651 TAILQ_FOREACH(lnets, &stcb->asoc.nets, sctp_next) {
660 TAILQ_FOREACH(chk, &stcb->asoc.sent_queue, sctp_next) {
662 stcb->asoc.total_flight += chk->book_size;
664 stcb->asoc.total_flight_count++;
674 chk = TAILQ_FIRST(&stcb->asoc.send_queue);
676 stcb->asoc.nonce_resync_tsn = stcb->asoc.sending_seq;
678 stcb->asoc.nonce_resync_tsn = chk->rec.data.TSN_seq;
680 stcb->asoc.nonce_wait_for_ecne = 0;
681 stcb->asoc.nonce_sum_check = 0;
690 sctp_move_all_chunks_to_alt(struct sctp_tcb *stcb,
702 asoc = &stcb->asoc;
731 struct sctp_tcb *stcb,
742 alt = sctp_find_alternate_net(stcb, net);
743 win_probe = sctp_mark_all_for_resend(stcb, net, alt, &num_mk);
746 stcb->asoc.fast_retran_loss_recovery = 0;
751 stcb->asoc.sat_t3_loss_recovery = 1;
752 stcb->asoc.sat_t3_recovery_tsn = stcb->asoc.sending_seq;
755 sctp_backoff_on_timeout(stcb, net, win_probe, num_mk);
758 if (sctp_threshold_management(inp, stcb, net,
759 stcb->asoc.max_send_times)) {
763 if (net != stcb->asoc.primary_destination) {
775 sctp_send_hb(stcb, 1, net);
786 if (sctp_threshold_management(inp, stcb, NULL,
787 stcb->asoc.max_send_times)) {
794 sctp_move_all_chunks_to_alt(stcb, net, alt);
796 if ((stcb->asoc.primary_destination == net) && (alt != net)) {
804 if (sctp_set_primary_addr(stcb,
816 if (SCTP_GET_STATE(&stcb->asoc) == SCTP_STATE_COOKIE_ECHOED) {
826 sctp_timer_start(SCTP_TIMER_TYPE_SEND, inp, stcb, net);
829 if (stcb->asoc.peer_supports_prsctp) {
831 lchk = sctp_try_advance_peer_ack_point(stcb, &stcb->asoc);
833 if (compare_with_wrap(stcb->asoc.advanced_peer_ack_point,
834 stcb->asoc.last_acked_seq, MAX_TSN)) {
845 send_forward_tsn(stcb, &stcb->asoc);
848 sctp_timer_start(SCTP_TIMER_TYPE_SEND, stcb->sctp_ep, stcb, lchk->whoTo);
857 struct sctp_tcb *stcb,
861 if (stcb->asoc.delayed_connection) {
866 stcb->asoc.delayed_connection = 0;
867 sctp_send_initiate(inp, stcb);
870 if (sctp_threshold_management(inp, stcb, net,
871 stcb->asoc.max_init_times)) {
875 stcb->asoc.dropped_special_cnt = 0;
876 sctp_backoff_on_timeout(stcb, stcb->asoc.primary_destination, 1, 0);
877 if (stcb->asoc.initial_init_rto_max < net->RTO) {
878 net->RTO = stcb->asoc.initial_init_rto_max;
880 if (stcb->asoc.numnets > 1) {
883 alt = sctp_find_alternate_net(stcb, stcb->asoc.primary_destination);
884 if ((alt != NULL) && (alt != stcb->asoc.primary_destination)) {
885 sctp_move_all_chunks_to_alt(stcb, stcb->asoc.primary_destination, alt);
886 stcb->asoc.primary_destination = alt;
890 sctp_send_initiate(inp, stcb);
900 struct sctp_tcb *stcb,
906 TAILQ_FOREACH(cookie, &stcb->asoc.control_send_queue, sctp_next) {
912 if (SCTP_GET_STATE(&stcb->asoc) == SCTP_STATE_COOKIE_ECHOED) {
928 sctp_abort_an_association(inp, stcb, SCTP_INTERNAL_ERROR,
934 if (sctp_threshold_management(inp, stcb, cookie->whoTo,
935 stcb->asoc.max_init_times)) {
943 stcb->asoc.dropped_special_cnt = 0;
944 sctp_backoff_on_timeout(stcb, cookie->whoTo, 1, 0);
945 alt = sctp_find_alternate_net(stcb, cookie->whoTo);
953 stcb->asoc.sent_queue_retran_cnt++;
964 int sctp_strreset_timer(struct sctp_inpcb *inp, struct sctp_tcb *stcb,
971 TAILQ_FOREACH(strrst, &stcb->asoc.control_send_queue,
990 if (sctp_threshold_management(inp, stcb, strrst->whoTo,
991 stcb->asoc.max_send_times)) {
1000 sctp_backoff_on_timeout(stcb, strrst->whoTo, 1, 0);
1001 alt = sctp_find_alternate_net(stcb, strrst->whoTo);
1007 TAILQ_FOREACH(chk, &stcb->asoc.control_send_queue, sctp_next) {
1013 stcb->asoc.sent_queue_retran_cnt++;
1024 sctp_move_all_chunks_to_alt(stcb, net, alt);
1028 stcb->asoc.sent_queue_retran_cnt++;
1032 sctp_timer_start(SCTP_TIMER_TYPE_STRRESET, inp, stcb, strrst->whoTo);
1036 int sctp_asconf_timer(struct sctp_inpcb *inp, struct sctp_tcb *stcb,
1043 if (stcb->asoc.asconf_sent == 0) {
1045 sctp_send_asconf(stcb, net);
1050 TAILQ_FOREACH(asconf, &stcb->asoc.control_send_queue,
1065 if (sctp_threshold_management(inp, stcb, asconf->whoTo,
1066 stcb->asoc.max_send_times)) {
1075 if (asconf->snd_count > stcb->asoc.max_send_times) {
1087 sctp_asconf_cleanup(stcb, net);
1094 sctp_backoff_on_timeout(stcb, asconf->whoTo, 1, 0);
1095 alt = sctp_find_alternate_net(stcb, asconf->whoTo);
1101 TAILQ_FOREACH(chk, &stcb->asoc.control_send_queue, sctp_next) {
1108 stcb->asoc.sent_queue_retran_cnt++;
1119 sctp_move_all_chunks_to_alt(stcb, net, alt);
1123 stcb->asoc.sent_queue_retran_cnt++;
1136 sctp_shutdown_timer(struct sctp_inpcb *inp, struct sctp_tcb *stcb,
1141 if (sctp_threshold_management(inp, stcb, net, stcb->asoc.max_send_times)) {
1146 alt = sctp_find_alternate_net(stcb, net);
1158 sctp_send_shutdown(stcb, alt);
1166 sctp_timer_start(SCTP_TIMER_TYPE_SHUTDOWN, inp, stcb, alt);
1170 int sctp_shutdownack_timer(struct sctp_inpcb *inp, struct sctp_tcb *stcb,
1175 if (sctp_threshold_management(inp, stcb, net, stcb->asoc.max_send_times)) {
1180 alt = sctp_find_alternate_net(stcb, net);
1183 sctp_send_shutdown_ack(stcb, alt);
1186 sctp_timer_start(SCTP_TIMER_TYPE_SHUTDOWNACK, inp, stcb, alt);
1192 struct sctp_tcb *stcb)
1198 if ((stcb == NULL) || (inp == NULL))
1200 if (TAILQ_EMPTY(&stcb->asoc.out_wheel)) {
1202 (u_long)stcb->asoc.total_output_queue_size);
1203 stcb->asoc.total_output_queue_size = 0;
1206 if (stcb->asoc.sent_queue_retran_cnt) {
1208 stcb->asoc.sent_queue_retran_cnt);
1209 stcb->asoc.sent_queue_retran_cnt = 0;
1212 TAILQ_FOREACH(outs, &stcb->asoc.out_wheel, next_spoke) {
1219 if (chks_in_queue != stcb->asoc.stream_queue_cnt) {
1221 stcb->asoc.stream_queue_cnt, chks_in_queue);
1225 sctp_chunk_output(inp, stcb, 1);
1226 if ((TAILQ_EMPTY(&stcb->asoc.send_queue)) &&
1227 (TAILQ_EMPTY(&stcb->asoc.sent_queue))) {
1233 (u_long)stcb->asoc.total_output_queue_size);
1234 stcb->asoc.total_output_queue_size = 0;
1239 sctp_heartbeat_timer(struct sctp_inpcb *inp, struct sctp_tcb *stcb,
1246 sctp_backoff_on_timeout(stcb, net, 1, 0);
1253 TAILQ_FOREACH(net, &stcb->asoc.nets, sctp_next) {
1259 if ((stcb->asoc.total_output_queue_size > 0) &&
1260 (TAILQ_EMPTY(&stcb->asoc.send_queue)) &&
1261 (TAILQ_EMPTY(&stcb->asoc.sent_queue))) {
1262 sctp_audit_stream_queues_for_size(inp, stcb);
1265 if (sctp_send_hb(stcb, 0, NULL) < 0) {
1274 while ((cnt_sent < stcb->asoc.max_burst) && (cnt_of_unconf > 1)) {
1275 if (sctp_send_hb(stcb, 0, NULL) == 0)
1325 struct sctp_tcb *stcb,
1353 sctp_timer_start(SCTP_TIMER_TYPE_PATHMTURAISE, inp, stcb, net);
1357 struct sctp_tcb *stcb,
1365 if (stcb->asoc.sctp_autoclose_ticks &&
1368 asoc = &stcb->asoc;
1386 sctp_chunk_output(inp, stcb, 9);
1405 sctp_send_shutdown(stcb, stcb->asoc.primary_destination);
1408 stcb->sctp_ep, stcb,
1411 stcb->sctp_ep, stcb,
1424 sctp_timer_start(SCTP_TIMER_TYPE_AUTOCLOSE, inp, stcb,
1490 if (it->stcb == NULL) {
1491 it->stcb = LIST_FIRST(&it->inp->sctp_asoc_list);
1493 if (it->stcb->asoc.stcb_starting_point_for_iterator == it) {
1494 it->stcb->asoc.stcb_starting_point_for_iterator = NULL;
1496 while (it->stcb) {
1497 SCTP_TCB_LOCK(it->stcb);
1498 if (it->asoc_state && ((it->stcb->asoc.state & it->asoc_state) != it->asoc_state)) {
1499 SCTP_TCB_UNLOCK(it->stcb);
1500 it->stcb = LIST_NEXT(it->stcb, sctp_tcblist);
1506 (*it->function_toapply)(it->inp, it->stcb, it->pointer, it->val);
1507 sctp_chunk_output(it->inp, it->stcb, 1);
1508 SCTP_TCB_UNLOCK(it->stcb);
1511 it->stcb->asoc.stcb_starting_point_for_iterator = it;
1518 it->stcb = LIST_NEXT(it->stcb, sctp_tcblist);
1520 /* if we reach here, we ran out of stcb's in the inp we are looking at */