Lines Matching refs:tp
201 static void tcp_reno_congestion_exp(struct tcpcb *tp);
209 static void tcp_cubic_slow_retransmit(struct tcpcb *tp);
318 tcp_congctl_select(struct tcpcb *tp, const char *name)
325 old_found = (tp == NULL || tp->t_congctl == NULL);
331 if (!old_found && tccp->congctl_ctl == tp->t_congctl) {
342 if (tp) {
346 tp->t_congctl = new_tccp->congctl_ctl;
363 tcp_congctl_release(struct tcpcb *tp)
367 KASSERT(tp->t_congctl);
370 if (tccp->congctl_ctl == tp->t_congctl) {
428 tcp_common_congestion_exp(struct tcpcb *tp, int betaa, int betab)
435 win = ulmin(tp->snd_wnd, tp->snd_cwnd) * betaa / betab / tp->t_segsz;
439 tp->snd_ssthresh = win * tp->t_segsz;
440 tp->snd_recover = tp->snd_max;
441 tp->snd_cwnd = tp->snd_ssthresh;
447 if (TCP_ECN_ALLOWED(tp))
448 tp->t_flags |= TF_ECN_SND_CWR;
458 tcp_reno_congestion_exp(struct tcpcb *tp)
461 tcp_common_congestion_exp(tp, RENO_BETAA, RENO_BETAB);
465 tcp_reno_do_fast_retransmit(struct tcpcb *tp, const struct tcphdr *th)
481 tcp_seq onxt = tp->snd_nxt;
483 tp->t_partialacks = 0;
484 TCP_TIMER_DISARM(tp, TCPT_REXMT);
485 tp->t_rtttime = 0;
486 if (TCP_SACK_ENABLED(tp)) {
487 tp->t_dupacks = tcprexmtthresh;
488 tp->sack_newdata = tp->snd_nxt;
489 tp->snd_cwnd = tp->t_segsz;
490 (void) tcp_output(tp);
493 tp->snd_nxt = th->th_ack;
494 tp->snd_cwnd = tp->t_segsz;
495 (void) tcp_output(tp);
496 tp->snd_cwnd = tp->snd_ssthresh + tp->t_segsz * tp->t_dupacks;
497 if (SEQ_GT(onxt, tp->snd_nxt))
498 tp->snd_nxt = onxt;
504 tcp_reno_fast_retransmit(struct tcpcb *tp, const struct tcphdr *th)
515 tcp_reno_congestion_exp(tp);
516 return tcp_reno_do_fast_retransmit(tp, th);
520 tcp_reno_slow_retransmit(struct tcpcb *tp)
549 win = ulmin(tp->snd_wnd, tp->snd_cwnd) / 2 / tp->t_segsz;
553 tp->snd_cwnd = tp->t_segsz;
554 tp->snd_ssthresh = win * tp->t_segsz;
555 tp->t_partialacks = -1;
556 tp->t_dupacks = 0;
557 tp->t_bytes_acked = 0;
559 if (TCP_ECN_ALLOWED(tp))
560 tp->t_flags |= TF_ECN_SND_CWR;
564 tcp_reno_fast_retransmit_newack(struct tcpcb *tp,
567 if (tp->t_partialacks < 0) {
572 tp->t_dupacks = 0;
578 if (tp->snd_cwnd > tp->snd_ssthresh)
579 tp->snd_cwnd = tp->snd_ssthresh;
580 tp->t_partialacks = -1;
581 tp->t_dupacks = 0;
582 tp->t_bytes_acked = 0;
583 if (TCP_SACK_ENABLED(tp) && SEQ_GT(th->th_ack, tp->snd_fack))
584 tp->snd_fack = th->th_ack;
589 tcp_reno_newack(struct tcpcb *tp, const struct tcphdr *th)
595 u_int cw = tp->snd_cwnd;
596 u_int incr = tp->t_segsz;
604 int acked = th->th_ack - tp->snd_una;
606 if (cw >= tp->snd_ssthresh) {
607 tp->t_bytes_acked += acked;
608 if (tp->t_bytes_acked >= cw) {
610 tp->t_bytes_acked -= cw;
625 tp->snd_nxt != tp->snd_max) ? incr : incr * 2;
637 if (cw >= tp->snd_ssthresh) {
642 tp->snd_cwnd = uimin(cw + incr, TCP_MAXWIN << tp->snd_scale);
657 tcp_newreno_fast_retransmit(struct tcpcb *tp, const struct tcphdr *th)
660 if (SEQ_LT(th->th_ack, tp->snd_high)) {
665 tp->t_dupacks = 0;
671 return tcp_reno_fast_retransmit(tp, th);
679 tcp_newreno_fast_retransmit_newack(struct tcpcb *tp, const struct tcphdr *th)
681 if (tp->t_partialacks < 0) {
686 tp->t_dupacks = 0;
687 } else if (SEQ_LT(th->th_ack, tp->snd_recover)) {
693 tcp_seq onxt = tp->snd_nxt;
694 u_long ocwnd = tp->snd_cwnd;
703 tp->t_partialacks++;
704 TCP_TIMER_DISARM(tp, TCPT_REXMT);
705 tp->t_rtttime = 0;
707 if (TCP_SACK_ENABLED(tp)) {
723 if (((th->th_ack - tp->snd_una) / tp->t_segsz) > 2)
725 (void)tcp_sack_output(tp, &sack_bytes_rxmt);
726 tp->snd_cwnd = sack_bytes_rxmt +
727 (tp->snd_nxt - tp->sack_newdata) +
728 sack_num_segs * tp->t_segsz;
729 tp->t_flags |= TF_ACKNOW;
730 (void) tcp_output(tp);
732 tp->snd_nxt = th->th_ack;
737 tp->snd_cwnd = tp->t_segsz + (th->th_ack - tp->snd_una);
738 (void) tcp_output(tp);
739 tp->snd_cwnd = ocwnd;
740 if (SEQ_GT(onxt, tp->snd_nxt))
741 tp->snd_nxt = onxt;
744 * tp->snd_una not updated yet.
746 tp->snd_cwnd -= (th->th_ack - tp->snd_una -
747 tp->t_segsz);
759 if (SEQ_SUB(tp->snd_max, th->th_ack) < tp->snd_ssthresh)
760 tp->snd_cwnd = SEQ_SUB(tp->snd_max, th->th_ack)
761 + tp->t_segsz;
763 tp->snd_cwnd = tp->snd_ssthresh;
764 tp->t_partialacks = -1;
765 tp->t_dupacks = 0;
766 tp->t_bytes_acked = 0;
767 if (TCP_SACK_ENABLED(tp) && SEQ_GT(th->th_ack, tp->snd_fack))
768 tp->snd_fack = th->th_ack;
773 tcp_newreno_newack(struct tcpcb *tp, const struct tcphdr *th)
780 if (tp->t_partialacks < 0)
781 tcp_reno_newack(tp, th);
798 static void tcp_cubic_update_ctime(struct tcpcb *tp);
808 tcp_cubic_update_ctime(struct tcpcb *tp)
813 tp->snd_cubic_ctime = now_timeval.tv_sec * 1000 +
821 tcp_cubic_diff_ctime(struct tcpcb *tp)
827 tp->snd_cubic_ctime;
865 tcp_cubic_getW(struct tcpcb *tp, uint32_t ms_elapsed, uint32_t rtt)
871 K = tcp_cubic_cbrt(tp->snd_cubic_wmax / CUBIC_BETAB *
877 return CUBIC_CA * tK3 / CUBIC_CB + tp->snd_cubic_wmax;
881 tcp_cubic_congestion_exp(struct tcpcb *tp)
887 tcp_cubic_update_ctime(tp);
890 if (tp->snd_cubic_wmax < tp->snd_cubic_wmax_last) {
891 tp->snd_cubic_wmax_last = tp->snd_cubic_wmax;
892 tp->snd_cubic_wmax = tp->snd_cubic_wmax / 2 +
893 tp->snd_cubic_wmax * CUBIC_BETAA / CUBIC_BETAB / 2;
895 tp->snd_cubic_wmax_last = tp->snd_cubic_wmax;
896 tp->snd_cubic_wmax = tp->snd_cwnd;
899 tp->snd_cubic_wmax = uimax(tp->t_segsz, tp->snd_cubic_wmax);
902 tcp_common_congestion_exp(tp, CUBIC_BETAA, CUBIC_BETAB);
906 tcp_cubic_fast_retransmit(struct tcpcb *tp, const struct tcphdr *th)
909 if (SEQ_LT(th->th_ack, tp->snd_high)) {
911 tp->t_dupacks = 0;
918 tcp_cubic_congestion_exp(tp);
921 return tcp_reno_do_fast_retransmit(tp, th);
925 tcp_cubic_newack(struct tcpcb *tp, const struct tcphdr *th)
931 if (tp->snd_cwnd > tp->snd_ssthresh && tp->t_partialacks < 0 &&
937 (rtt = (tp->t_srtt << 5) / PR_SLOWHZ) > 0) {
938 ms_elapsed = tcp_cubic_diff_ctime(tp);
941 w_tcp = tp->snd_cubic_wmax * CUBIC_BETAA / CUBIC_BETAB +
944 if (tp->snd_cwnd > w_tcp) {
946 tp->snd_cwnd += (tcp_cubic_getW(tp, ms_elapsed, rtt) -
947 tp->snd_cwnd) / tp->snd_cwnd;
950 tp->snd_cwnd = w_tcp;
954 tp->snd_cwnd = uimax(tp->snd_cwnd, tp->t_segsz);
955 tp->snd_cwnd = uimin(tp->snd_cwnd, TCP_MAXWIN << tp->snd_scale);
958 tcp_newreno_newack(tp, th);
963 tcp_cubic_slow_retransmit(struct tcpcb *tp)
967 tcp_cubic_congestion_exp(tp);
970 tp->snd_cwnd = tp->t_segsz;
971 tp->t_partialacks = -1;
972 tp->t_dupacks = 0;
973 tp->t_bytes_acked = 0;
975 if (TCP_ECN_ALLOWED(tp))
976 tp->t_flags |= TF_ECN_SND_CWR;