Home | History | Annotate | Line # | Download | only in netinet
tcp_congctl.c revision 1.20
      1  1.20     pooka /*	$NetBSD: tcp_congctl.c,v 1.20 2015/08/24 22:21:26 pooka Exp $	*/
      2   1.1    rpaulo 
      3   1.1    rpaulo /*-
      4   1.1    rpaulo  * Copyright (c) 1997, 1998, 1999, 2001, 2005, 2006 The NetBSD Foundation, Inc.
      5   1.1    rpaulo  * All rights reserved.
      6   1.1    rpaulo  *
      7   1.1    rpaulo  * This code is derived from software contributed to The NetBSD Foundation
      8   1.1    rpaulo  * by Jason R. Thorpe and Kevin M. Lahey of the Numerical Aerospace Simulation
      9   1.1    rpaulo  * Facility, NASA Ames Research Center.
     10   1.1    rpaulo  * This code is derived from software contributed to The NetBSD Foundation
     11   1.1    rpaulo  * by Charles M. Hannum.
     12   1.1    rpaulo  * This code is derived from software contributed to The NetBSD Foundation
     13   1.1    rpaulo  * by Rui Paulo.
     14   1.1    rpaulo  *
     15   1.1    rpaulo  * Redistribution and use in source and binary forms, with or without
     16   1.1    rpaulo  * modification, are permitted provided that the following conditions
     17   1.1    rpaulo  * are met:
     18   1.1    rpaulo  * 1. Redistributions of source code must retain the above copyright
     19   1.1    rpaulo  *    notice, this list of conditions and the following disclaimer.
     20   1.1    rpaulo  * 2. Redistributions in binary form must reproduce the above copyright
     21   1.1    rpaulo  *    notice, this list of conditions and the following disclaimer in the
     22   1.1    rpaulo  *    documentation and/or other materials provided with the distribution.
     23   1.1    rpaulo  *
     24   1.1    rpaulo  * THIS SOFTWARE IS PROVIDED BY THE NETBSD FOUNDATION, INC. AND CONTRIBUTORS
     25   1.1    rpaulo  * ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED
     26   1.1    rpaulo  * TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
     27   1.1    rpaulo  * PURPOSE ARE DISCLAIMED.  IN NO EVENT SHALL THE FOUNDATION OR CONTRIBUTORS
     28   1.1    rpaulo  * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
     29   1.1    rpaulo  * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
     30   1.1    rpaulo  * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
     31   1.1    rpaulo  * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
     32   1.1    rpaulo  * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
     33   1.1    rpaulo  * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
     34   1.1    rpaulo  * POSSIBILITY OF SUCH DAMAGE.
     35   1.1    rpaulo  */
     36   1.1    rpaulo 
     37   1.1    rpaulo /*
     38   1.1    rpaulo  * Copyright (C) 1995, 1996, 1997, and 1998 WIDE Project.
     39   1.1    rpaulo  * All rights reserved.
     40   1.1    rpaulo  *
     41   1.1    rpaulo  * Redistribution and use in source and binary forms, with or without
     42   1.1    rpaulo  * modification, are permitted provided that the following conditions
     43   1.1    rpaulo  * are met:
     44   1.1    rpaulo  * 1. Redistributions of source code must retain the above copyright
     45   1.1    rpaulo  *    notice, this list of conditions and the following disclaimer.
     46   1.1    rpaulo  * 2. Redistributions in binary form must reproduce the above copyright
     47   1.1    rpaulo  *    notice, this list of conditions and the following disclaimer in the
     48   1.1    rpaulo  *    documentation and/or other materials provided with the distribution.
     49   1.1    rpaulo  * 3. Neither the name of the project nor the names of its contributors
     50   1.1    rpaulo  *    may be used to endorse or promote products derived from this software
     51   1.1    rpaulo  *    without specific prior written permission.
     52   1.1    rpaulo  *
     53   1.1    rpaulo  * THIS SOFTWARE IS PROVIDED BY THE PROJECT AND CONTRIBUTORS ``AS IS'' AND
     54   1.1    rpaulo  * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
     55   1.1    rpaulo  * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
     56   1.1    rpaulo  * ARE DISCLAIMED.  IN NO EVENT SHALL THE PROJECT OR CONTRIBUTORS BE LIABLE
     57   1.1    rpaulo  * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
     58   1.1    rpaulo  * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
     59   1.1    rpaulo  * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
     60   1.1    rpaulo  * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
     61   1.1    rpaulo  * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
     62   1.1    rpaulo  * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
     63   1.1    rpaulo  * SUCH DAMAGE.
     64   1.1    rpaulo  */
     65   1.1    rpaulo 
     66   1.1    rpaulo /*
     67   1.1    rpaulo  *      @(#)COPYRIGHT   1.1 (NRL) 17 January 1995
     68   1.1    rpaulo  *
     69   1.1    rpaulo  * NRL grants permission for redistribution and use in source and binary
     70   1.1    rpaulo  * forms, with or without modification, of the software and documentation
     71   1.1    rpaulo  * created at NRL provided that the following conditions are met:
     72   1.1    rpaulo  *
     73   1.1    rpaulo  * 1. Redistributions of source code must retain the above copyright
     74   1.1    rpaulo  *    notice, this list of conditions and the following disclaimer.
     75   1.1    rpaulo  * 2. Redistributions in binary form must reproduce the above copyright
     76   1.1    rpaulo  *    notice, this list of conditions and the following disclaimer in the
     77   1.1    rpaulo  *    documentation and/or other materials provided with the distribution.
     78   1.1    rpaulo  * 3. All advertising materials mentioning features or use of this software
     79   1.1    rpaulo  *    must display the following acknowledgements:
     80   1.1    rpaulo  *      This product includes software developed by the University of
     81   1.1    rpaulo  *      California, Berkeley and its contributors.
     82   1.1    rpaulo  *      This product includes software developed at the Information
     83   1.1    rpaulo  *      Technology Division, US Naval Research Laboratory.
     84   1.1    rpaulo  * 4. Neither the name of the NRL nor the names of its contributors
     85   1.1    rpaulo  *    may be used to endorse or promote products derived from this software
     86   1.1    rpaulo  *    without specific prior written permission.
     87   1.1    rpaulo  *
     88   1.1    rpaulo  * THE SOFTWARE PROVIDED BY NRL IS PROVIDED BY NRL AND CONTRIBUTORS ``AS
     89   1.1    rpaulo  * IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED
     90   1.1    rpaulo  * TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A
     91   1.1    rpaulo  * PARTICULAR PURPOSE ARE DISCLAIMED.  IN NO EVENT SHALL NRL OR
     92   1.1    rpaulo  * CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
     93   1.1    rpaulo  * EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
     94   1.1    rpaulo  * PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
     95   1.1    rpaulo  * PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF
     96   1.1    rpaulo  * LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING
     97   1.1    rpaulo  * NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
     98   1.1    rpaulo  * SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
     99   1.1    rpaulo  *
    100   1.1    rpaulo  * The views and conclusions contained in the software and documentation
    101   1.1    rpaulo  * are those of the authors and should not be interpreted as representing
    102   1.1    rpaulo  * official policies, either expressed or implied, of the US Naval
    103   1.1    rpaulo  * Research Laboratory (NRL).
    104   1.1    rpaulo  */
    105   1.1    rpaulo 
    106   1.1    rpaulo /*
    107   1.1    rpaulo  * Copyright (c) 1982, 1986, 1988, 1990, 1993, 1994, 1995
    108   1.1    rpaulo  *	The Regents of the University of California.  All rights reserved.
    109   1.1    rpaulo  *
    110   1.1    rpaulo  * Redistribution and use in source and binary forms, with or without
    111   1.1    rpaulo  * modification, are permitted provided that the following conditions
    112   1.1    rpaulo  * are met:
    113   1.1    rpaulo  * 1. Redistributions of source code must retain the above copyright
    114   1.1    rpaulo  *    notice, this list of conditions and the following disclaimer.
    115   1.1    rpaulo  * 2. Redistributions in binary form must reproduce the above copyright
    116   1.1    rpaulo  *    notice, this list of conditions and the following disclaimer in the
    117   1.1    rpaulo  *    documentation and/or other materials provided with the distribution.
    118   1.1    rpaulo  * 3. Neither the name of the University nor the names of its contributors
    119   1.1    rpaulo  *    may be used to endorse or promote products derived from this software
    120   1.1    rpaulo  *    without specific prior written permission.
    121   1.1    rpaulo  *
    122   1.1    rpaulo  * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND
    123   1.1    rpaulo  * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
    124   1.1    rpaulo  * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
    125   1.1    rpaulo  * ARE DISCLAIMED.  IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE
    126   1.1    rpaulo  * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
    127   1.1    rpaulo  * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
    128   1.1    rpaulo  * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
    129   1.1    rpaulo  * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
    130   1.1    rpaulo  * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
    131   1.1    rpaulo  * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
    132   1.1    rpaulo  * SUCH DAMAGE.
    133   1.1    rpaulo  *
    134   1.1    rpaulo  *	@(#)tcp_input.c	8.12 (Berkeley) 5/24/95
    135   1.1    rpaulo  */
    136   1.1    rpaulo 
    137   1.1    rpaulo #include <sys/cdefs.h>
    138  1.20     pooka __KERNEL_RCSID(0, "$NetBSD: tcp_congctl.c,v 1.20 2015/08/24 22:21:26 pooka Exp $");
    139   1.1    rpaulo 
    140  1.20     pooka #ifdef _KERNEL_OPT
    141   1.1    rpaulo #include "opt_inet.h"
    142   1.1    rpaulo #include "opt_tcp_debug.h"
    143   1.1    rpaulo #include "opt_tcp_congctl.h"
    144  1.20     pooka #endif
    145   1.1    rpaulo 
    146   1.1    rpaulo #include <sys/param.h>
    147   1.1    rpaulo #include <sys/systm.h>
    148   1.1    rpaulo #include <sys/malloc.h>
    149   1.1    rpaulo #include <sys/mbuf.h>
    150   1.1    rpaulo #include <sys/protosw.h>
    151   1.1    rpaulo #include <sys/socket.h>
    152   1.1    rpaulo #include <sys/socketvar.h>
    153   1.1    rpaulo #include <sys/errno.h>
    154   1.1    rpaulo #include <sys/syslog.h>
    155   1.1    rpaulo #include <sys/pool.h>
    156   1.1    rpaulo #include <sys/domain.h>
    157   1.1    rpaulo #include <sys/kernel.h>
    158  1.13   xtraeme #include <sys/mutex.h>
    159   1.1    rpaulo 
    160   1.1    rpaulo #include <net/if.h>
    161   1.1    rpaulo #include <net/route.h>
    162   1.1    rpaulo 
    163   1.1    rpaulo #include <netinet/in.h>
    164   1.1    rpaulo #include <netinet/in_systm.h>
    165   1.1    rpaulo #include <netinet/ip.h>
    166   1.1    rpaulo #include <netinet/in_pcb.h>
    167   1.1    rpaulo #include <netinet/in_var.h>
    168   1.1    rpaulo #include <netinet/ip_var.h>
    169   1.1    rpaulo 
    170   1.1    rpaulo #ifdef INET6
    171   1.1    rpaulo #ifndef INET
    172   1.1    rpaulo #include <netinet/in.h>
    173   1.1    rpaulo #endif
    174   1.1    rpaulo #include <netinet/ip6.h>
    175   1.1    rpaulo #include <netinet6/ip6_var.h>
    176   1.1    rpaulo #include <netinet6/in6_pcb.h>
    177   1.1    rpaulo #include <netinet6/ip6_var.h>
    178   1.1    rpaulo #include <netinet6/in6_var.h>
    179   1.1    rpaulo #include <netinet/icmp6.h>
    180   1.1    rpaulo #include <netinet6/nd6.h>
    181   1.1    rpaulo #endif
    182   1.1    rpaulo 
    183   1.1    rpaulo #include <netinet/tcp.h>
    184   1.1    rpaulo #include <netinet/tcp_fsm.h>
    185   1.1    rpaulo #include <netinet/tcp_seq.h>
    186   1.1    rpaulo #include <netinet/tcp_timer.h>
    187   1.1    rpaulo #include <netinet/tcp_var.h>
    188   1.1    rpaulo #include <netinet/tcpip.h>
    189   1.1    rpaulo #include <netinet/tcp_congctl.h>
    190   1.1    rpaulo #ifdef TCP_DEBUG
    191   1.1    rpaulo #include <netinet/tcp_debug.h>
    192   1.1    rpaulo #endif
    193   1.1    rpaulo 
    194   1.1    rpaulo /*
    195   1.1    rpaulo  * TODO:
    196   1.1    rpaulo  *   consider separating the actual implementations in another file.
    197   1.1    rpaulo  */
    198   1.1    rpaulo 
    199  1.18    kefren static void tcp_common_congestion_exp(struct tcpcb *, int, int);
    200  1.18    kefren 
    201  1.18    kefren static int  tcp_reno_do_fast_retransmit(struct tcpcb *, const struct tcphdr *);
    202  1.11      yamt static int  tcp_reno_fast_retransmit(struct tcpcb *, const struct tcphdr *);
    203   1.1    rpaulo static void tcp_reno_slow_retransmit(struct tcpcb *);
    204  1.11      yamt static void tcp_reno_fast_retransmit_newack(struct tcpcb *,
    205  1.11      yamt     const struct tcphdr *);
    206  1.11      yamt static void tcp_reno_newack(struct tcpcb *, const struct tcphdr *);
    207   1.6    rpaulo static void tcp_reno_congestion_exp(struct tcpcb *tp);
    208   1.1    rpaulo 
    209  1.11      yamt static int  tcp_newreno_fast_retransmit(struct tcpcb *, const struct tcphdr *);
    210   1.1    rpaulo static void tcp_newreno_fast_retransmit_newack(struct tcpcb *,
    211  1.11      yamt 	const struct tcphdr *);
    212  1.11      yamt static void tcp_newreno_newack(struct tcpcb *, const struct tcphdr *);
    213   1.1    rpaulo 
    214  1.18    kefren static int tcp_cubic_fast_retransmit(struct tcpcb *, const struct tcphdr *);
    215  1.18    kefren static void tcp_cubic_slow_retransmit(struct tcpcb *tp);
    216  1.18    kefren static void tcp_cubic_newack(struct tcpcb *, const struct tcphdr *);
    217  1.18    kefren static void tcp_cubic_congestion_exp(struct tcpcb *);
    218   1.1    rpaulo 
    219   1.1    rpaulo static void tcp_congctl_fillnames(void);
    220   1.1    rpaulo 
    221   1.1    rpaulo extern int tcprexmtthresh;
    222   1.1    rpaulo 
    223   1.1    rpaulo MALLOC_DEFINE(M_TCPCONGCTL, "tcpcongctl", "TCP congestion control structures");
    224   1.1    rpaulo 
    225  1.14      matt /* currently selected global congestion control */
    226  1.14      matt char tcp_congctl_global_name[TCPCC_MAXLEN];
    227  1.14      matt 
    228  1.14      matt /* available global congestion control algorithms */
    229  1.14      matt char tcp_congctl_avail[10 * TCPCC_MAXLEN];
    230  1.14      matt 
    231   1.1    rpaulo /*
    232   1.1    rpaulo  * Used to list the available congestion control algorithms.
    233   1.1    rpaulo  */
    234  1.14      matt TAILQ_HEAD(, tcp_congctlent) tcp_congctlhd =
    235  1.14      matt     TAILQ_HEAD_INITIALIZER(tcp_congctlhd);
    236  1.14      matt 
    237  1.14      matt static struct tcp_congctlent * tcp_congctl_global;
    238   1.1    rpaulo 
    239  1.13   xtraeme static kmutex_t tcp_congctl_mtx;
    240   1.1    rpaulo 
    241   1.1    rpaulo void
    242   1.1    rpaulo tcp_congctl_init(void)
    243   1.1    rpaulo {
    244  1.17    martin 	int r __diagused;
    245   1.1    rpaulo 
    246  1.13   xtraeme 	mutex_init(&tcp_congctl_mtx, MUTEX_DEFAULT, IPL_NONE);
    247   1.1    rpaulo 
    248   1.1    rpaulo 	/* Base algorithms. */
    249   1.1    rpaulo 	r = tcp_congctl_register("reno", &tcp_reno_ctl);
    250   1.1    rpaulo 	KASSERT(r == 0);
    251   1.1    rpaulo 	r = tcp_congctl_register("newreno", &tcp_newreno_ctl);
    252   1.1    rpaulo 	KASSERT(r == 0);
    253  1.18    kefren 	r = tcp_congctl_register("cubic", &tcp_cubic_ctl);
    254  1.18    kefren 	KASSERT(r == 0);
    255   1.1    rpaulo 
    256   1.1    rpaulo 	/* NewReno is the default. */
    257   1.1    rpaulo #ifndef TCP_CONGCTL_DEFAULT
    258   1.1    rpaulo #define TCP_CONGCTL_DEFAULT "newreno"
    259   1.1    rpaulo #endif
    260   1.1    rpaulo 
    261   1.1    rpaulo 	r = tcp_congctl_select(NULL, TCP_CONGCTL_DEFAULT);
    262   1.1    rpaulo 	KASSERT(r == 0);
    263   1.1    rpaulo }
    264   1.1    rpaulo 
    265   1.1    rpaulo /*
    266   1.1    rpaulo  * Register a congestion algorithm and select it if we have none.
    267   1.1    rpaulo  */
    268   1.1    rpaulo int
    269  1.14      matt tcp_congctl_register(const char *name, const struct tcp_congctl *tcc)
    270   1.1    rpaulo {
    271   1.1    rpaulo 	struct tcp_congctlent *ntcc, *tccp;
    272   1.1    rpaulo 
    273   1.1    rpaulo 	TAILQ_FOREACH(tccp, &tcp_congctlhd, congctl_ent)
    274   1.1    rpaulo 		if (!strcmp(name, tccp->congctl_name)) {
    275   1.1    rpaulo 			/* name already registered */
    276   1.1    rpaulo 			return EEXIST;
    277   1.1    rpaulo 		}
    278   1.1    rpaulo 
    279  1.14      matt 	ntcc = malloc(sizeof(*ntcc), M_TCPCONGCTL, M_WAITOK|M_ZERO);
    280   1.1    rpaulo 
    281   1.1    rpaulo 	strlcpy(ntcc->congctl_name, name, sizeof(ntcc->congctl_name) - 1);
    282   1.1    rpaulo 	ntcc->congctl_ctl = tcc;
    283   1.1    rpaulo 
    284   1.1    rpaulo 	TAILQ_INSERT_TAIL(&tcp_congctlhd, ntcc, congctl_ent);
    285   1.1    rpaulo 	tcp_congctl_fillnames();
    286   1.1    rpaulo 
    287   1.1    rpaulo 	if (TAILQ_FIRST(&tcp_congctlhd) == ntcc)
    288   1.1    rpaulo 		tcp_congctl_select(NULL, name);
    289   1.1    rpaulo 
    290   1.1    rpaulo 	return 0;
    291   1.1    rpaulo }
    292   1.1    rpaulo 
    293   1.1    rpaulo int
    294   1.1    rpaulo tcp_congctl_unregister(const char *name)
    295   1.1    rpaulo {
    296   1.1    rpaulo 	struct tcp_congctlent *tccp, *rtccp;
    297   1.1    rpaulo 	unsigned int size;
    298   1.1    rpaulo 
    299   1.1    rpaulo 	rtccp = NULL;
    300   1.1    rpaulo 	size = 0;
    301   1.1    rpaulo 	TAILQ_FOREACH(tccp, &tcp_congctlhd, congctl_ent) {
    302   1.1    rpaulo 		if (!strcmp(name, tccp->congctl_name))
    303   1.1    rpaulo 			rtccp = tccp;
    304   1.1    rpaulo 		size++;
    305   1.1    rpaulo 	}
    306   1.1    rpaulo 
    307   1.1    rpaulo 	if (!rtccp)
    308   1.1    rpaulo 		return ENOENT;
    309   1.1    rpaulo 
    310  1.14      matt 	if (size <= 1 || tcp_congctl_global == rtccp || rtccp->congctl_refcnt)
    311   1.1    rpaulo 		return EBUSY;
    312   1.1    rpaulo 
    313   1.1    rpaulo 	TAILQ_REMOVE(&tcp_congctlhd, rtccp, congctl_ent);
    314   1.1    rpaulo 	free(rtccp, M_TCPCONGCTL);
    315   1.1    rpaulo 	tcp_congctl_fillnames();
    316   1.1    rpaulo 
    317   1.1    rpaulo 	return 0;
    318   1.1    rpaulo }
    319   1.1    rpaulo 
    320   1.1    rpaulo /*
    321   1.1    rpaulo  * Select a congestion algorithm by name.
    322   1.1    rpaulo  */
    323   1.1    rpaulo int
    324   1.1    rpaulo tcp_congctl_select(struct tcpcb *tp, const char *name)
    325   1.1    rpaulo {
    326  1.14      matt 	struct tcp_congctlent *tccp, *old_tccp, *new_tccp;
    327  1.14      matt 	bool old_found, new_found;
    328   1.1    rpaulo 
    329   1.1    rpaulo 	KASSERT(name);
    330   1.1    rpaulo 
    331  1.14      matt 	old_found = (tp == NULL || tp->t_congctl == NULL);
    332  1.14      matt 	old_tccp = NULL;
    333  1.14      matt 	new_found = false;
    334  1.14      matt 	new_tccp = NULL;
    335  1.14      matt 
    336  1.14      matt 	TAILQ_FOREACH(tccp, &tcp_congctlhd, congctl_ent) {
    337  1.14      matt 		if (!old_found && tccp->congctl_ctl == tp->t_congctl) {
    338  1.14      matt 			old_tccp = tccp;
    339  1.14      matt 			old_found = true;
    340  1.14      matt 		}
    341  1.14      matt 
    342  1.14      matt 		if (!new_found && !strcmp(name, tccp->congctl_name)) {
    343  1.14      matt 			new_tccp = tccp;
    344  1.14      matt 			new_found = true;
    345  1.14      matt 		}
    346  1.14      matt 
    347  1.14      matt 		if (new_found && old_found) {
    348   1.1    rpaulo 			if (tp) {
    349  1.13   xtraeme 				mutex_enter(&tcp_congctl_mtx);
    350  1.14      matt 				if (old_tccp)
    351  1.14      matt 					old_tccp->congctl_refcnt--;
    352  1.14      matt 				tp->t_congctl = new_tccp->congctl_ctl;
    353  1.14      matt 				new_tccp->congctl_refcnt++;
    354  1.13   xtraeme 				mutex_exit(&tcp_congctl_mtx);
    355   1.1    rpaulo 			} else {
    356  1.14      matt 				tcp_congctl_global = new_tccp;
    357   1.1    rpaulo 				strlcpy(tcp_congctl_global_name,
    358  1.14      matt 				    new_tccp->congctl_name,
    359   1.1    rpaulo 				    sizeof(tcp_congctl_global_name) - 1);
    360   1.1    rpaulo 			}
    361   1.1    rpaulo 			return 0;
    362   1.1    rpaulo 		}
    363  1.14      matt 	}
    364  1.14      matt 
    365  1.14      matt 	return EINVAL;
    366  1.14      matt }
    367  1.14      matt 
    368  1.14      matt void
    369  1.14      matt tcp_congctl_release(struct tcpcb *tp)
    370  1.14      matt {
    371  1.14      matt 	struct tcp_congctlent *tccp;
    372  1.14      matt 
    373  1.14      matt 	KASSERT(tp->t_congctl);
    374   1.1    rpaulo 
    375  1.14      matt 	TAILQ_FOREACH(tccp, &tcp_congctlhd, congctl_ent) {
    376  1.14      matt 		if (tccp->congctl_ctl == tp->t_congctl) {
    377  1.14      matt 			tccp->congctl_refcnt--;
    378  1.14      matt 			return;
    379  1.14      matt 		}
    380  1.14      matt 	}
    381   1.1    rpaulo }
    382   1.1    rpaulo 
    383   1.1    rpaulo /*
    384   1.1    rpaulo  * Returns the name of a congestion algorithm.
    385   1.1    rpaulo  */
    386   1.1    rpaulo const char *
    387   1.1    rpaulo tcp_congctl_bystruct(const struct tcp_congctl *tcc)
    388   1.1    rpaulo {
    389   1.1    rpaulo 	struct tcp_congctlent *tccp;
    390   1.1    rpaulo 
    391   1.1    rpaulo 	KASSERT(tcc);
    392   1.1    rpaulo 
    393   1.1    rpaulo 	TAILQ_FOREACH(tccp, &tcp_congctlhd, congctl_ent)
    394   1.1    rpaulo 		if (tccp->congctl_ctl == tcc)
    395   1.1    rpaulo 			return tccp->congctl_name;
    396   1.1    rpaulo 
    397   1.1    rpaulo 	return NULL;
    398   1.1    rpaulo }
    399   1.1    rpaulo 
    400   1.1    rpaulo static void
    401   1.1    rpaulo tcp_congctl_fillnames(void)
    402   1.1    rpaulo {
    403   1.1    rpaulo 	struct tcp_congctlent *tccp;
    404   1.1    rpaulo 	const char *delim = " ";
    405   1.1    rpaulo 
    406   1.1    rpaulo 	tcp_congctl_avail[0] = '\0';
    407   1.1    rpaulo 	TAILQ_FOREACH(tccp, &tcp_congctlhd, congctl_ent) {
    408   1.1    rpaulo 		strlcat(tcp_congctl_avail, tccp->congctl_name,
    409   1.1    rpaulo 		    sizeof(tcp_congctl_avail) - 1);
    410   1.1    rpaulo 		if (TAILQ_NEXT(tccp, congctl_ent))
    411   1.1    rpaulo 			strlcat(tcp_congctl_avail, delim,
    412   1.1    rpaulo 			    sizeof(tcp_congctl_avail) - 1);
    413   1.1    rpaulo 	}
    414   1.1    rpaulo 
    415   1.1    rpaulo }
    416   1.1    rpaulo 
    417   1.1    rpaulo /* ------------------------------------------------------------------------ */
    418   1.1    rpaulo 
    419   1.6    rpaulo /*
    420  1.18    kefren  * Common stuff
    421   1.6    rpaulo  */
    422  1.18    kefren 
    423  1.18    kefren /* Window reduction (1-beta) for [New]Reno: 0.5 */
    424  1.18    kefren #define RENO_BETAA 1
    425  1.18    kefren #define RENO_BETAB 2
    426  1.18    kefren /* Window reduction (1-beta) for Cubic: 0.8 */
    427  1.18    kefren #define CUBIC_BETAA 4
    428  1.18    kefren #define CUBIC_BETAB 5
    429  1.18    kefren /* Draft Rhee Section 4.1 */
    430  1.18    kefren #define CUBIC_CA 4
    431  1.18    kefren #define CUBIC_CB 10
    432  1.18    kefren 
    433   1.6    rpaulo static void
    434  1.18    kefren tcp_common_congestion_exp(struct tcpcb *tp, int betaa, int betab)
    435   1.1    rpaulo {
    436   1.1    rpaulo 	u_int win;
    437   1.1    rpaulo 
    438   1.1    rpaulo 	/*
    439  1.18    kefren 	 * Reduce the congestion window and the slow start threshold.
    440   1.1    rpaulo 	 */
    441  1.18    kefren 	win = min(tp->snd_wnd, tp->snd_cwnd) * betaa / betab / tp->t_segsz;
    442   1.1    rpaulo 	if (win < 2)
    443   1.1    rpaulo 		win = 2;
    444   1.1    rpaulo 
    445   1.1    rpaulo 	tp->snd_ssthresh = win * tp->t_segsz;
    446   1.1    rpaulo 	tp->snd_recover = tp->snd_max;
    447   1.1    rpaulo 	tp->snd_cwnd = tp->snd_ssthresh;
    448   1.1    rpaulo 
    449   1.7    rpaulo 	/*
    450   1.7    rpaulo 	 * When using TCP ECN, notify the peer that
    451   1.7    rpaulo 	 * we reduced the cwnd.
    452   1.7    rpaulo 	 */
    453   1.1    rpaulo 	if (TCP_ECN_ALLOWED(tp))
    454   1.1    rpaulo 		tp->t_flags |= TF_ECN_SND_CWR;
    455   1.1    rpaulo }
    456   1.1    rpaulo 
    457   1.1    rpaulo 
    458  1.18    kefren /* ------------------------------------------------------------------------ */
    459  1.18    kefren 
    460  1.18    kefren /*
    461  1.18    kefren  * TCP/Reno congestion control.
    462  1.18    kefren  */
    463  1.18    kefren static void
    464  1.18    kefren tcp_reno_congestion_exp(struct tcpcb *tp)
    465  1.18    kefren {
    466  1.18    kefren 
    467  1.18    kefren 	tcp_common_congestion_exp(tp, RENO_BETAA, RENO_BETAB);
    468  1.18    kefren }
    469   1.6    rpaulo 
    470   1.1    rpaulo static int
    471  1.18    kefren tcp_reno_do_fast_retransmit(struct tcpcb *tp, const struct tcphdr *th)
    472   1.1    rpaulo {
    473   1.7    rpaulo 	/*
    474   1.7    rpaulo 	 * Dup acks mean that packets have left the
    475   1.7    rpaulo 	 * network (they're now cached at the receiver)
    476   1.7    rpaulo 	 * so bump cwnd by the amount in the receiver
    477   1.7    rpaulo 	 * to keep a constant cwnd packets in the
    478   1.7    rpaulo 	 * network.
    479   1.7    rpaulo 	 *
    480   1.7    rpaulo 	 * If we are using TCP/SACK, then enter
    481   1.7    rpaulo 	 * Fast Recovery if the receiver SACKs
    482   1.7    rpaulo 	 * data that is tcprexmtthresh * MSS
    483   1.7    rpaulo 	 * bytes past the last ACKed segment,
    484   1.7    rpaulo 	 * irrespective of the number of DupAcks.
    485   1.7    rpaulo 	 */
    486   1.7    rpaulo 
    487  1.18    kefren 	tcp_seq onxt = tp->snd_nxt;
    488  1.18    kefren 
    489   1.1    rpaulo 	tp->t_partialacks = 0;
    490   1.1    rpaulo 	TCP_TIMER_DISARM(tp, TCPT_REXMT);
    491   1.1    rpaulo 	tp->t_rtttime = 0;
    492   1.1    rpaulo 	if (TCP_SACK_ENABLED(tp)) {
    493   1.1    rpaulo 		tp->t_dupacks = tcprexmtthresh;
    494   1.1    rpaulo 		tp->sack_newdata = tp->snd_nxt;
    495   1.1    rpaulo 		tp->snd_cwnd = tp->t_segsz;
    496   1.1    rpaulo 		(void) tcp_output(tp);
    497   1.1    rpaulo 		return 0;
    498   1.1    rpaulo 	}
    499   1.1    rpaulo 	tp->snd_nxt = th->th_ack;
    500   1.1    rpaulo 	tp->snd_cwnd = tp->t_segsz;
    501   1.1    rpaulo 	(void) tcp_output(tp);
    502   1.1    rpaulo 	tp->snd_cwnd = tp->snd_ssthresh + tp->t_segsz * tp->t_dupacks;
    503   1.1    rpaulo 	if (SEQ_GT(onxt, tp->snd_nxt))
    504   1.1    rpaulo 		tp->snd_nxt = onxt;
    505  1.19    kefren 
    506   1.1    rpaulo 	return 0;
    507   1.1    rpaulo }
    508   1.1    rpaulo 
    509  1.18    kefren static int
    510  1.18    kefren tcp_reno_fast_retransmit(struct tcpcb *tp, const struct tcphdr *th)
    511  1.18    kefren {
    512  1.18    kefren 
    513  1.19    kefren 	/*
    514  1.19    kefren 	 * We know we're losing at the current
    515  1.19    kefren 	 * window size so do congestion avoidance
    516  1.19    kefren 	 * (set ssthresh to half the current window
    517  1.19    kefren 	 * and pull our congestion window back to
    518  1.19    kefren 	 * the new ssthresh).
    519  1.19    kefren 	 */
    520  1.19    kefren 
    521  1.18    kefren 	tcp_reno_congestion_exp(tp);
    522  1.18    kefren 	return tcp_reno_do_fast_retransmit(tp, th);
    523  1.18    kefren }
    524  1.18    kefren 
    525   1.1    rpaulo static void
    526   1.1    rpaulo tcp_reno_slow_retransmit(struct tcpcb *tp)
    527   1.1    rpaulo {
    528   1.1    rpaulo 	u_int win;
    529   1.1    rpaulo 
    530   1.1    rpaulo 	/*
    531   1.1    rpaulo 	 * Close the congestion window down to one segment
    532   1.1    rpaulo 	 * (we'll open it by one segment for each ack we get).
    533   1.1    rpaulo 	 * Since we probably have a window's worth of unacked
    534   1.1    rpaulo 	 * data accumulated, this "slow start" keeps us from
    535   1.1    rpaulo 	 * dumping all that data as back-to-back packets (which
    536   1.1    rpaulo 	 * might overwhelm an intermediate gateway).
    537   1.1    rpaulo 	 *
    538   1.1    rpaulo 	 * There are two phases to the opening: Initially we
    539   1.1    rpaulo 	 * open by one mss on each ack.  This makes the window
    540   1.1    rpaulo 	 * size increase exponentially with time.  If the
    541   1.1    rpaulo 	 * window is larger than the path can handle, this
    542   1.1    rpaulo 	 * exponential growth results in dropped packet(s)
    543   1.1    rpaulo 	 * almost immediately.  To get more time between
    544   1.1    rpaulo 	 * drops but still "push" the network to take advantage
    545   1.1    rpaulo 	 * of improving conditions, we switch from exponential
    546   1.1    rpaulo 	 * to linear window opening at some threshhold size.
    547   1.1    rpaulo 	 * For a threshhold, we use half the current window
    548   1.1    rpaulo 	 * size, truncated to a multiple of the mss.
    549   1.1    rpaulo 	 *
    550   1.1    rpaulo 	 * (the minimum cwnd that will give us exponential
    551   1.1    rpaulo 	 * growth is 2 mss.  We don't allow the threshhold
    552   1.1    rpaulo 	 * to go below this.)
    553   1.1    rpaulo 	 */
    554   1.1    rpaulo 
    555   1.1    rpaulo 	win = min(tp->snd_wnd, tp->snd_cwnd) / 2 / tp->t_segsz;
    556   1.1    rpaulo 	if (win < 2)
    557   1.1    rpaulo 		win = 2;
    558   1.1    rpaulo 	/* Loss Window MUST be one segment. */
    559   1.1    rpaulo 	tp->snd_cwnd = tp->t_segsz;
    560   1.1    rpaulo 	tp->snd_ssthresh = win * tp->t_segsz;
    561   1.1    rpaulo 	tp->t_partialacks = -1;
    562   1.1    rpaulo 	tp->t_dupacks = 0;
    563   1.8      yamt 	tp->t_bytes_acked = 0;
    564  1.18    kefren 
    565  1.18    kefren 	if (TCP_ECN_ALLOWED(tp))
    566  1.18    kefren 		tp->t_flags |= TF_ECN_SND_CWR;
    567   1.1    rpaulo }
    568   1.1    rpaulo 
    569   1.1    rpaulo static void
    570  1.11      yamt tcp_reno_fast_retransmit_newack(struct tcpcb *tp,
    571  1.12  christos     const struct tcphdr *th)
    572   1.1    rpaulo {
    573   1.1    rpaulo 	if (tp->t_partialacks < 0) {
    574   1.1    rpaulo 		/*
    575   1.1    rpaulo 		 * We were not in fast recovery.  Reset the duplicate ack
    576   1.1    rpaulo 		 * counter.
    577   1.1    rpaulo 		 */
    578   1.1    rpaulo 		tp->t_dupacks = 0;
    579   1.1    rpaulo 	} else {
    580   1.1    rpaulo 		/*
    581   1.1    rpaulo 		 * Clamp the congestion window to the crossover point and
    582   1.1    rpaulo 		 * exit fast recovery.
    583   1.1    rpaulo 		 */
    584   1.1    rpaulo 		if (tp->snd_cwnd > tp->snd_ssthresh)
    585   1.1    rpaulo 			tp->snd_cwnd = tp->snd_ssthresh;
    586   1.1    rpaulo 		tp->t_partialacks = -1;
    587   1.1    rpaulo 		tp->t_dupacks = 0;
    588   1.8      yamt 		tp->t_bytes_acked = 0;
    589  1.18    kefren 		if (TCP_SACK_ENABLED(tp) && SEQ_GT(th->th_ack, tp->snd_fack))
    590  1.18    kefren 			tp->snd_fack = th->th_ack;
    591   1.1    rpaulo 	}
    592   1.1    rpaulo }
    593   1.1    rpaulo 
    594   1.1    rpaulo static void
    595  1.11      yamt tcp_reno_newack(struct tcpcb *tp, const struct tcphdr *th)
    596   1.1    rpaulo {
    597   1.1    rpaulo 	/*
    598   1.1    rpaulo 	 * When new data is acked, open the congestion window.
    599   1.1    rpaulo 	 */
    600   1.4    rpaulo 
    601   1.4    rpaulo 	u_int cw = tp->snd_cwnd;
    602   1.4    rpaulo 	u_int incr = tp->t_segsz;
    603   1.4    rpaulo 
    604   1.8      yamt 	if (tcp_do_abc) {
    605   1.8      yamt 
    606   1.8      yamt 		/*
    607   1.8      yamt 		 * RFC 3465 Appropriate Byte Counting (ABC)
    608   1.8      yamt 		 */
    609   1.8      yamt 
    610   1.8      yamt 		int acked = th->th_ack - tp->snd_una;
    611   1.8      yamt 
    612   1.8      yamt 		if (cw >= tp->snd_ssthresh) {
    613   1.8      yamt 			tp->t_bytes_acked += acked;
    614   1.8      yamt 			if (tp->t_bytes_acked >= cw) {
    615   1.8      yamt 				/* Time to increase the window. */
    616   1.8      yamt 				tp->t_bytes_acked -= cw;
    617   1.8      yamt 			} else {
    618   1.8      yamt 				/* No need to increase yet. */
    619   1.8      yamt 				incr = 0;
    620   1.8      yamt 			}
    621   1.8      yamt 		} else {
    622   1.8      yamt 			/*
    623   1.8      yamt 			 * use 2*SMSS or 1*SMSS for the "L" param,
    624   1.8      yamt 			 * depending on sysctl setting.
    625   1.8      yamt 			 *
    626   1.8      yamt 			 * (See RFC 3465 2.3 Choosing the Limit)
    627   1.8      yamt 			 */
    628   1.8      yamt 			u_int abc_lim;
    629   1.8      yamt 
    630   1.9      yamt 			abc_lim = (tcp_abc_aggressive == 0 ||
    631   1.9      yamt 			    tp->snd_nxt != tp->snd_max) ? incr : incr * 2;
    632   1.8      yamt 			incr = min(acked, abc_lim);
    633   1.8      yamt 		}
    634   1.8      yamt 	} else {
    635   1.8      yamt 
    636   1.8      yamt 		/*
    637   1.8      yamt 		 * If the window gives us less than ssthresh packets
    638   1.8      yamt 		 * in flight, open exponentially (segsz per packet).
    639   1.8      yamt 		 * Otherwise open linearly: segsz per window
    640   1.8      yamt 		 * (segsz^2 / cwnd per packet).
    641   1.8      yamt 		 */
    642   1.8      yamt 
    643   1.8      yamt 		if (cw >= tp->snd_ssthresh) {
    644   1.8      yamt 			incr = incr * incr / cw;
    645   1.8      yamt 		}
    646   1.8      yamt 	}
    647   1.4    rpaulo 
    648   1.4    rpaulo 	tp->snd_cwnd = min(cw + incr, TCP_MAXWIN << tp->snd_scale);
    649   1.1    rpaulo }
    650   1.1    rpaulo 
    651  1.14      matt const struct tcp_congctl tcp_reno_ctl = {
    652   1.1    rpaulo 	.fast_retransmit = tcp_reno_fast_retransmit,
    653   1.1    rpaulo 	.slow_retransmit = tcp_reno_slow_retransmit,
    654   1.1    rpaulo 	.fast_retransmit_newack = tcp_reno_fast_retransmit_newack,
    655   1.1    rpaulo 	.newack = tcp_reno_newack,
    656   1.6    rpaulo 	.cong_exp = tcp_reno_congestion_exp,
    657   1.1    rpaulo };
    658   1.1    rpaulo 
    659   1.1    rpaulo /*
    660   1.1    rpaulo  * TCP/NewReno Congestion control.
    661   1.1    rpaulo  */
    662   1.1    rpaulo static int
    663  1.11      yamt tcp_newreno_fast_retransmit(struct tcpcb *tp, const struct tcphdr *th)
    664   1.1    rpaulo {
    665  1.16      yamt 
    666   1.1    rpaulo 	if (SEQ_LT(th->th_ack, tp->snd_high)) {
    667   1.1    rpaulo 		/*
    668   1.1    rpaulo 		 * False fast retransmit after timeout.
    669   1.1    rpaulo 		 * Do not enter fast recovery
    670   1.1    rpaulo 		 */
    671   1.1    rpaulo 		tp->t_dupacks = 0;
    672   1.1    rpaulo 		return 1;
    673   1.1    rpaulo 	}
    674  1.16      yamt 	/*
    675  1.16      yamt 	 * Fast retransmit is same as reno.
    676  1.16      yamt 	 */
    677  1.16      yamt 	return tcp_reno_fast_retransmit(tp, th);
    678   1.1    rpaulo }
    679   1.1    rpaulo 
    680   1.1    rpaulo /*
    681   1.1    rpaulo  * Implement the NewReno response to a new ack, checking for partial acks in
    682   1.1    rpaulo  * fast recovery.
    683   1.1    rpaulo  */
    684   1.1    rpaulo static void
    685  1.11      yamt tcp_newreno_fast_retransmit_newack(struct tcpcb *tp, const struct tcphdr *th)
    686   1.1    rpaulo {
    687   1.1    rpaulo 	if (tp->t_partialacks < 0) {
    688   1.1    rpaulo 		/*
    689   1.1    rpaulo 		 * We were not in fast recovery.  Reset the duplicate ack
    690   1.1    rpaulo 		 * counter.
    691   1.1    rpaulo 		 */
    692   1.1    rpaulo 		tp->t_dupacks = 0;
    693   1.1    rpaulo 	} else if (SEQ_LT(th->th_ack, tp->snd_recover)) {
    694   1.1    rpaulo 		/*
    695   1.1    rpaulo 		 * This is a partial ack.  Retransmit the first unacknowledged
    696   1.1    rpaulo 		 * segment and deflate the congestion window by the amount of
    697   1.1    rpaulo 		 * acknowledged data.  Do not exit fast recovery.
    698   1.1    rpaulo 		 */
    699   1.1    rpaulo 		tcp_seq onxt = tp->snd_nxt;
    700   1.1    rpaulo 		u_long ocwnd = tp->snd_cwnd;
    701  1.18    kefren 		int sack_num_segs = 1, sack_bytes_rxmt = 0;
    702   1.1    rpaulo 
    703   1.1    rpaulo 		/*
    704   1.1    rpaulo 		 * snd_una has not yet been updated and the socket's send
    705   1.1    rpaulo 		 * buffer has not yet drained off the ACK'd data, so we
    706   1.1    rpaulo 		 * have to leave snd_una as it was to get the correct data
    707   1.1    rpaulo 		 * offset in tcp_output().
    708   1.1    rpaulo 		 */
    709  1.18    kefren 		tp->t_partialacks++;
    710  1.18    kefren 		TCP_TIMER_DISARM(tp, TCPT_REXMT);
    711   1.1    rpaulo 		tp->t_rtttime = 0;
    712   1.1    rpaulo 		tp->snd_nxt = th->th_ack;
    713  1.18    kefren 
    714  1.18    kefren 		if (TCP_SACK_ENABLED(tp)) {
    715  1.18    kefren 			/*
    716  1.18    kefren 			 * Partial ack handling within a sack recovery episode.
    717  1.18    kefren 			 * Keeping this very simple for now. When a partial ack
    718  1.18    kefren 			 * is received, force snd_cwnd to a value that will
    719  1.18    kefren 			 * allow the sender to transmit no more than 2 segments.
    720  1.18    kefren 			 * If necessary, a fancier scheme can be adopted at a
    721  1.18    kefren 			 * later point, but for now, the goal is to prevent the
    722  1.18    kefren 			 * sender from bursting a large amount of data in the
    723  1.18    kefren 			 * midst of sack recovery.
    724  1.18    kefren 		 	 */
    725  1.18    kefren 
    726  1.18    kefren 			/*
    727  1.18    kefren 			 * send one or 2 segments based on how much
    728  1.18    kefren 			 * new data was acked
    729  1.18    kefren 			 */
    730  1.18    kefren 			if (((th->th_ack - tp->snd_una) / tp->t_segsz) > 2)
    731  1.18    kefren 				sack_num_segs = 2;
    732  1.18    kefren 			(void)tcp_sack_output(tp, &sack_bytes_rxmt);
    733  1.18    kefren 			tp->snd_cwnd = sack_bytes_rxmt +
    734  1.18    kefren 			    (tp->snd_nxt - tp->sack_newdata) +
    735  1.18    kefren 			    sack_num_segs * tp->t_segsz;
    736  1.18    kefren 			tp->t_flags |= TF_ACKNOW;
    737  1.18    kefren 			(void) tcp_output(tp);
    738  1.18    kefren 		} else {
    739  1.18    kefren 			/*
    740  1.18    kefren 			 * Set snd_cwnd to one segment beyond ACK'd offset
    741  1.18    kefren 			 * snd_una is not yet updated when we're called
    742  1.18    kefren 			 */
    743  1.18    kefren 			tp->snd_cwnd = tp->t_segsz + (th->th_ack - tp->snd_una);
    744  1.18    kefren 			(void) tcp_output(tp);
    745  1.18    kefren 			tp->snd_cwnd = ocwnd;
    746  1.18    kefren 			if (SEQ_GT(onxt, tp->snd_nxt))
    747  1.18    kefren 				tp->snd_nxt = onxt;
    748  1.18    kefren 			/*
    749  1.18    kefren 			 * Partial window deflation.  Relies on fact that
    750  1.18    kefren 			 * tp->snd_una not updated yet.
    751  1.18    kefren 		 	 */
    752  1.18    kefren 			tp->snd_cwnd -= (th->th_ack - tp->snd_una -
    753  1.18    kefren 			    tp->t_segsz);
    754  1.18    kefren 		}
    755   1.1    rpaulo 	} else {
    756   1.1    rpaulo 		/*
    757   1.1    rpaulo 		 * Complete ack.  Inflate the congestion window to ssthresh
    758   1.1    rpaulo 		 * and exit fast recovery.
    759   1.1    rpaulo 		 *
    760   1.1    rpaulo 		 * Window inflation should have left us with approx.
    761   1.1    rpaulo 		 * snd_ssthresh outstanding data.  But in case we
    762   1.1    rpaulo 		 * would be inclined to send a burst, better to do
    763   1.1    rpaulo 		 * it via the slow start mechanism.
    764   1.1    rpaulo 		 */
    765   1.1    rpaulo 		if (SEQ_SUB(tp->snd_max, th->th_ack) < tp->snd_ssthresh)
    766   1.1    rpaulo 			tp->snd_cwnd = SEQ_SUB(tp->snd_max, th->th_ack)
    767   1.1    rpaulo 			    + tp->t_segsz;
    768   1.1    rpaulo 		else
    769   1.1    rpaulo 			tp->snd_cwnd = tp->snd_ssthresh;
    770   1.1    rpaulo 		tp->t_partialacks = -1;
    771   1.1    rpaulo 		tp->t_dupacks = 0;
    772   1.8      yamt 		tp->t_bytes_acked = 0;
    773  1.18    kefren 		if (TCP_SACK_ENABLED(tp) && SEQ_GT(th->th_ack, tp->snd_fack))
    774  1.18    kefren 			tp->snd_fack = th->th_ack;
    775   1.1    rpaulo 	}
    776   1.1    rpaulo }
    777   1.1    rpaulo 
    778   1.1    rpaulo static void
    779  1.11      yamt tcp_newreno_newack(struct tcpcb *tp, const struct tcphdr *th)
    780   1.1    rpaulo {
    781   1.1    rpaulo 	/*
    782   1.4    rpaulo 	 * If we are still in fast recovery (meaning we are using
    783   1.4    rpaulo 	 * NewReno and we have only received partial acks), do not
    784   1.4    rpaulo 	 * inflate the window yet.
    785   1.1    rpaulo 	 */
    786   1.4    rpaulo 	if (tp->t_partialacks < 0)
    787   1.4    rpaulo 		tcp_reno_newack(tp, th);
    788   1.1    rpaulo }
    789   1.1    rpaulo 
    790   1.1    rpaulo 
    791  1.14      matt const struct tcp_congctl tcp_newreno_ctl = {
    792   1.1    rpaulo 	.fast_retransmit = tcp_newreno_fast_retransmit,
    793   1.1    rpaulo 	.slow_retransmit = tcp_reno_slow_retransmit,
    794   1.1    rpaulo 	.fast_retransmit_newack = tcp_newreno_fast_retransmit_newack,
    795   1.1    rpaulo 	.newack = tcp_newreno_newack,
    796   1.6    rpaulo 	.cong_exp = tcp_reno_congestion_exp,
    797   1.1    rpaulo };
    798   1.1    rpaulo 
    799  1.18    kefren /*
    800  1.18    kefren  * CUBIC - http://tools.ietf.org/html/draft-rhee-tcpm-cubic-02
    801  1.18    kefren  */
    802  1.18    kefren 
    803  1.18    kefren /* Cubic prototypes */
    804  1.18    kefren static void	tcp_cubic_update_ctime(struct tcpcb *tp);
    805  1.18    kefren static uint32_t	tcp_cubic_diff_ctime(struct tcpcb *);
    806  1.18    kefren static uint32_t	tcp_cubic_cbrt(uint32_t);
    807  1.19    kefren static ulong	tcp_cubic_getW(struct tcpcb *, uint32_t, uint32_t);
    808  1.18    kefren 
    809  1.18    kefren /* Cubic TIME functions - XXX I don't like using timevals and microuptime */
    810  1.18    kefren /*
    811  1.18    kefren  * Set congestion timer to now
    812  1.18    kefren  */
    813  1.18    kefren static void
    814  1.18    kefren tcp_cubic_update_ctime(struct tcpcb *tp)
    815  1.18    kefren {
    816  1.18    kefren 	struct timeval now_timeval;
    817  1.18    kefren 
    818  1.18    kefren 	getmicrouptime(&now_timeval);
    819  1.18    kefren 	tp->snd_cubic_ctime = now_timeval.tv_sec * 1000 +
    820  1.18    kefren 	    now_timeval.tv_usec / 1000;
    821  1.18    kefren }
    822  1.18    kefren 
    823  1.18    kefren /*
    824  1.18    kefren  * miliseconds from last congestion
    825  1.18    kefren  */
    826  1.18    kefren static uint32_t
    827  1.18    kefren tcp_cubic_diff_ctime(struct tcpcb *tp)
    828  1.18    kefren {
    829  1.18    kefren 	struct timeval now_timeval;
    830  1.18    kefren 
    831  1.18    kefren 	getmicrouptime(&now_timeval);
    832  1.18    kefren 	return now_timeval.tv_sec * 1000 + now_timeval.tv_usec / 1000 -
    833  1.18    kefren 	    tp->snd_cubic_ctime;
    834  1.18    kefren }
    835   1.1    rpaulo 
    836  1.18    kefren /*
    837  1.18    kefren  * Approximate cubic root
    838  1.18    kefren  */
    839  1.18    kefren #define CBRT_ROUNDS 30
    840  1.18    kefren static uint32_t
    841  1.18    kefren tcp_cubic_cbrt(uint32_t v)
    842  1.18    kefren {
    843  1.18    kefren 	int i, rounds = CBRT_ROUNDS;
    844  1.18    kefren 	uint64_t x = v / 3;
    845  1.18    kefren 
    846  1.18    kefren 	/* We fail to calculate correct for small numbers */
    847  1.18    kefren 	if (v == 0)
    848  1.18    kefren 		return 0;
    849  1.18    kefren 	else if (v < 4)
    850  1.18    kefren 		return 1;
    851  1.18    kefren 
    852  1.18    kefren 	/*
    853  1.18    kefren 	 * largest x that 2*x^3+3*x fits 64bit
    854  1.18    kefren 	 * Avoid overflow for a time cost
    855  1.18    kefren 	 */
    856  1.18    kefren 	if (x > 2097151)
    857  1.18    kefren 		rounds += 10;
    858  1.18    kefren 
    859  1.18    kefren 	for (i = 0; i < rounds; i++)
    860  1.18    kefren 		if (rounds == CBRT_ROUNDS)
    861  1.18    kefren 			x = (v + 2 * x * x * x) / (3 * x * x);
    862  1.18    kefren 		else
    863  1.18    kefren 			/* Avoid overflow */
    864  1.18    kefren 			x = v / (3 * x * x) + 2 * x / 3;
    865  1.18    kefren 
    866  1.18    kefren 	return (uint32_t)x;
    867  1.18    kefren }
    868  1.18    kefren 
    869  1.19    kefren /* Draft Rhee Section 3.1 - get W(t+rtt) - Eq. 1 */
    870  1.19    kefren static ulong
    871  1.19    kefren tcp_cubic_getW(struct tcpcb *tp, uint32_t ms_elapsed, uint32_t rtt)
    872  1.18    kefren {
    873  1.19    kefren 	uint32_t K;
    874  1.19    kefren 	long tK3;
    875  1.18    kefren 
    876  1.19    kefren 	/* Section 3.1 Eq. 2 */
    877  1.19    kefren 	K = tcp_cubic_cbrt(tp->snd_cubic_wmax / CUBIC_BETAB *
    878  1.18    kefren 	    CUBIC_CB / CUBIC_CA);
    879  1.19    kefren 	/*  (t-K)^3 - not clear why is the measure unit mattering */
    880  1.19    kefren 	tK3 = (long)(ms_elapsed + rtt) - (long)K;
    881  1.19    kefren 	tK3 = tK3 * tK3 * tK3;
    882  1.18    kefren 
    883  1.19    kefren 	return CUBIC_CA * tK3 / CUBIC_CB + tp->snd_cubic_wmax;
    884  1.18    kefren }
    885  1.18    kefren 
    886  1.18    kefren static void
    887  1.18    kefren tcp_cubic_congestion_exp(struct tcpcb *tp)
    888  1.18    kefren {
    889  1.18    kefren 
    890  1.19    kefren 	/*
    891  1.19    kefren 	 * Congestion - Set WMax and shrink cwnd
    892  1.19    kefren 	 */
    893  1.18    kefren 	tcp_cubic_update_ctime(tp);
    894  1.18    kefren 
    895  1.18    kefren 	/* Section 3.6 - Fast Convergence */
    896  1.18    kefren 	if (tp->snd_cubic_wmax < tp->snd_cubic_wmax_last) {
    897  1.18    kefren 		tp->snd_cubic_wmax_last = tp->snd_cubic_wmax;
    898  1.18    kefren 		tp->snd_cubic_wmax = tp->snd_cubic_wmax / 2 +
    899  1.18    kefren 		    tp->snd_cubic_wmax * CUBIC_BETAA / CUBIC_BETAB / 2;
    900  1.18    kefren 	} else {
    901  1.18    kefren 		tp->snd_cubic_wmax_last = tp->snd_cubic_wmax;
    902  1.18    kefren 		tp->snd_cubic_wmax = tp->snd_cwnd;
    903  1.18    kefren 	}
    904  1.19    kefren 
    905  1.19    kefren 	tp->snd_cubic_wmax = max(tp->t_segsz, tp->snd_cubic_wmax);
    906  1.19    kefren 
    907  1.19    kefren 	/* Shrink CWND */
    908  1.18    kefren 	tcp_common_congestion_exp(tp, CUBIC_BETAA, CUBIC_BETAB);
    909  1.18    kefren }
    910  1.18    kefren 
    911  1.18    kefren static int
    912  1.18    kefren tcp_cubic_fast_retransmit(struct tcpcb *tp, const struct tcphdr *th)
    913  1.18    kefren {
    914  1.18    kefren 
    915  1.18    kefren 	if (SEQ_LT(th->th_ack, tp->snd_high)) {
    916  1.18    kefren 		/* See newreno */
    917  1.18    kefren 		tp->t_dupacks = 0;
    918  1.18    kefren 		return 1;
    919  1.18    kefren 	}
    920  1.18    kefren 
    921  1.18    kefren 	/*
    922  1.19    kefren 	 * mark WMax
    923  1.18    kefren 	 */
    924  1.19    kefren 	tcp_cubic_congestion_exp(tp);
    925  1.19    kefren 
    926  1.19    kefren 	/* Do fast retransmit */
    927  1.19    kefren 	return tcp_reno_do_fast_retransmit(tp, th);
    928  1.18    kefren }
    929  1.18    kefren 
    930  1.18    kefren static void
    931  1.18    kefren tcp_cubic_newack(struct tcpcb *tp, const struct tcphdr *th)
    932  1.18    kefren {
    933  1.18    kefren 	uint32_t ms_elapsed, rtt;
    934  1.18    kefren 	u_long w_tcp;
    935  1.18    kefren 
    936  1.19    kefren 	/* Congestion avoidance and not in fast recovery and usable rtt */
    937  1.19    kefren 	if (tp->snd_cwnd > tp->snd_ssthresh && tp->t_partialacks < 0 &&
    938  1.19    kefren 	    /*
    939  1.19    kefren 	     * t_srtt is 1/32 units of slow ticks
    940  1.19    kefren 	     * converting it in ms would be equal to
    941  1.19    kefren 	     * (t_srtt >> 5) * 1000 / PR_SLOWHZ ~= (t_srtt << 5) / PR_SLOWHZ
    942  1.19    kefren 	     */
    943  1.19    kefren 	    (rtt = (tp->t_srtt << 5) / PR_SLOWHZ) > 0) {
    944  1.18    kefren 		ms_elapsed = tcp_cubic_diff_ctime(tp);
    945  1.18    kefren 
    946  1.19    kefren 		/* Compute W_tcp(t) */
    947  1.19    kefren 		w_tcp = tp->snd_cubic_wmax * CUBIC_BETAA / CUBIC_BETAB +
    948  1.18    kefren 		    ms_elapsed / rtt / 3;
    949  1.18    kefren 
    950  1.18    kefren 		if (tp->snd_cwnd > w_tcp) {
    951  1.19    kefren 			/* Not in TCP friendly mode */
    952  1.19    kefren 			tp->snd_cwnd += (tcp_cubic_getW(tp, ms_elapsed, rtt) -
    953  1.19    kefren 			    tp->snd_cwnd) / tp->snd_cwnd;
    954  1.18    kefren 		} else {
    955  1.18    kefren 			/* friendly TCP mode */
    956  1.18    kefren 			tp->snd_cwnd = w_tcp;
    957  1.18    kefren 		}
    958  1.18    kefren 
    959  1.18    kefren 		/* Make sure we are within limits */
    960  1.18    kefren 		tp->snd_cwnd = max(tp->snd_cwnd, tp->t_segsz);
    961  1.18    kefren 		tp->snd_cwnd = min(tp->snd_cwnd, TCP_MAXWIN << tp->snd_scale);
    962  1.18    kefren 	} else {
    963  1.18    kefren 		/* Use New Reno */
    964  1.18    kefren 		tcp_newreno_newack(tp, th);
    965  1.18    kefren 	}
    966  1.18    kefren }
    967  1.18    kefren 
    968  1.18    kefren static void
    969  1.18    kefren tcp_cubic_slow_retransmit(struct tcpcb *tp)
    970  1.18    kefren {
    971  1.18    kefren 
    972  1.19    kefren 	/* Timeout - Mark new congestion */
    973  1.19    kefren 	tcp_cubic_congestion_exp(tp);
    974  1.18    kefren 
    975  1.19    kefren 	/* Loss Window MUST be one segment. */
    976  1.19    kefren 	tp->snd_cwnd = tp->t_segsz;
    977  1.19    kefren 	tp->t_partialacks = -1;
    978  1.19    kefren 	tp->t_dupacks = 0;
    979  1.19    kefren 	tp->t_bytes_acked = 0;
    980  1.19    kefren 
    981  1.19    kefren 	if (TCP_ECN_ALLOWED(tp))
    982  1.19    kefren 		tp->t_flags |= TF_ECN_SND_CWR;
    983  1.18    kefren }
    984  1.18    kefren 
    985  1.18    kefren const struct tcp_congctl tcp_cubic_ctl = {
    986  1.18    kefren 	.fast_retransmit = tcp_cubic_fast_retransmit,
    987  1.18    kefren 	.slow_retransmit = tcp_cubic_slow_retransmit,
    988  1.18    kefren 	.fast_retransmit_newack = tcp_newreno_fast_retransmit_newack,
    989  1.18    kefren 	.newack = tcp_cubic_newack,
    990  1.18    kefren 	.cong_exp = tcp_cubic_congestion_exp,
    991  1.18    kefren };
    992