tcp_congctl.c revision 1.12 1 1.12 christos /* $NetBSD: tcp_congctl.c,v 1.12 2006/11/16 01:33:45 christos Exp $ */
2 1.1 rpaulo
3 1.1 rpaulo /*-
4 1.1 rpaulo * Copyright (c) 1997, 1998, 1999, 2001, 2005, 2006 The NetBSD Foundation, Inc.
5 1.1 rpaulo * All rights reserved.
6 1.1 rpaulo *
7 1.1 rpaulo * This code is derived from software contributed to The NetBSD Foundation
8 1.1 rpaulo * by Jason R. Thorpe and Kevin M. Lahey of the Numerical Aerospace Simulation
9 1.1 rpaulo * Facility, NASA Ames Research Center.
10 1.1 rpaulo * This code is derived from software contributed to The NetBSD Foundation
11 1.1 rpaulo * by Charles M. Hannum.
12 1.1 rpaulo * This code is derived from software contributed to The NetBSD Foundation
13 1.1 rpaulo * by Rui Paulo.
14 1.1 rpaulo *
15 1.1 rpaulo * Redistribution and use in source and binary forms, with or without
16 1.1 rpaulo * modification, are permitted provided that the following conditions
17 1.1 rpaulo * are met:
18 1.1 rpaulo * 1. Redistributions of source code must retain the above copyright
19 1.1 rpaulo * notice, this list of conditions and the following disclaimer.
20 1.1 rpaulo * 2. Redistributions in binary form must reproduce the above copyright
21 1.1 rpaulo * notice, this list of conditions and the following disclaimer in the
22 1.1 rpaulo * documentation and/or other materials provided with the distribution.
23 1.1 rpaulo * 3. All advertising materials mentioning features or use of this software
24 1.1 rpaulo * must display the following acknowledgement:
25 1.1 rpaulo * This product includes software developed by the NetBSD
26 1.1 rpaulo * Foundation, Inc. and its contributors.
27 1.1 rpaulo * 4. Neither the name of The NetBSD Foundation nor the names of its
28 1.1 rpaulo * contributors may be used to endorse or promote products derived
29 1.1 rpaulo * from this software without specific prior written permission.
30 1.1 rpaulo *
31 1.1 rpaulo * THIS SOFTWARE IS PROVIDED BY THE NETBSD FOUNDATION, INC. AND CONTRIBUTORS
32 1.1 rpaulo * ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED
33 1.1 rpaulo * TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
34 1.1 rpaulo * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE FOUNDATION OR CONTRIBUTORS
35 1.1 rpaulo * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
36 1.1 rpaulo * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
37 1.1 rpaulo * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
38 1.1 rpaulo * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
39 1.1 rpaulo * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
40 1.1 rpaulo * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
41 1.1 rpaulo * POSSIBILITY OF SUCH DAMAGE.
42 1.1 rpaulo */
43 1.1 rpaulo
44 1.1 rpaulo /*
45 1.1 rpaulo * Copyright (C) 1995, 1996, 1997, and 1998 WIDE Project.
46 1.1 rpaulo * All rights reserved.
47 1.1 rpaulo *
48 1.1 rpaulo * Redistribution and use in source and binary forms, with or without
49 1.1 rpaulo * modification, are permitted provided that the following conditions
50 1.1 rpaulo * are met:
51 1.1 rpaulo * 1. Redistributions of source code must retain the above copyright
52 1.1 rpaulo * notice, this list of conditions and the following disclaimer.
53 1.1 rpaulo * 2. Redistributions in binary form must reproduce the above copyright
54 1.1 rpaulo * notice, this list of conditions and the following disclaimer in the
55 1.1 rpaulo * documentation and/or other materials provided with the distribution.
56 1.1 rpaulo * 3. Neither the name of the project nor the names of its contributors
57 1.1 rpaulo * may be used to endorse or promote products derived from this software
58 1.1 rpaulo * without specific prior written permission.
59 1.1 rpaulo *
60 1.1 rpaulo * THIS SOFTWARE IS PROVIDED BY THE PROJECT AND CONTRIBUTORS ``AS IS'' AND
61 1.1 rpaulo * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
62 1.1 rpaulo * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
63 1.1 rpaulo * ARE DISCLAIMED. IN NO EVENT SHALL THE PROJECT OR CONTRIBUTORS BE LIABLE
64 1.1 rpaulo * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
65 1.1 rpaulo * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
66 1.1 rpaulo * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
67 1.1 rpaulo * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
68 1.1 rpaulo * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
69 1.1 rpaulo * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
70 1.1 rpaulo * SUCH DAMAGE.
71 1.1 rpaulo */
72 1.1 rpaulo
73 1.1 rpaulo /*
74 1.1 rpaulo * @(#)COPYRIGHT 1.1 (NRL) 17 January 1995
75 1.1 rpaulo *
76 1.1 rpaulo * NRL grants permission for redistribution and use in source and binary
77 1.1 rpaulo * forms, with or without modification, of the software and documentation
78 1.1 rpaulo * created at NRL provided that the following conditions are met:
79 1.1 rpaulo *
80 1.1 rpaulo * 1. Redistributions of source code must retain the above copyright
81 1.1 rpaulo * notice, this list of conditions and the following disclaimer.
82 1.1 rpaulo * 2. Redistributions in binary form must reproduce the above copyright
83 1.1 rpaulo * notice, this list of conditions and the following disclaimer in the
84 1.1 rpaulo * documentation and/or other materials provided with the distribution.
85 1.1 rpaulo * 3. All advertising materials mentioning features or use of this software
86 1.1 rpaulo * must display the following acknowledgements:
87 1.1 rpaulo * This product includes software developed by the University of
88 1.1 rpaulo * California, Berkeley and its contributors.
89 1.1 rpaulo * This product includes software developed at the Information
90 1.1 rpaulo * Technology Division, US Naval Research Laboratory.
91 1.1 rpaulo * 4. Neither the name of the NRL nor the names of its contributors
92 1.1 rpaulo * may be used to endorse or promote products derived from this software
93 1.1 rpaulo * without specific prior written permission.
94 1.1 rpaulo *
95 1.1 rpaulo * THE SOFTWARE PROVIDED BY NRL IS PROVIDED BY NRL AND CONTRIBUTORS ``AS
96 1.1 rpaulo * IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED
97 1.1 rpaulo * TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A
98 1.1 rpaulo * PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL NRL OR
99 1.1 rpaulo * CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
100 1.1 rpaulo * EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
101 1.1 rpaulo * PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
102 1.1 rpaulo * PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF
103 1.1 rpaulo * LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING
104 1.1 rpaulo * NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
105 1.1 rpaulo * SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
106 1.1 rpaulo *
107 1.1 rpaulo * The views and conclusions contained in the software and documentation
108 1.1 rpaulo * are those of the authors and should not be interpreted as representing
109 1.1 rpaulo * official policies, either expressed or implied, of the US Naval
110 1.1 rpaulo * Research Laboratory (NRL).
111 1.1 rpaulo */
112 1.1 rpaulo
113 1.1 rpaulo /*
114 1.1 rpaulo * Copyright (c) 1982, 1986, 1988, 1990, 1993, 1994, 1995
115 1.1 rpaulo * The Regents of the University of California. All rights reserved.
116 1.1 rpaulo *
117 1.1 rpaulo * Redistribution and use in source and binary forms, with or without
118 1.1 rpaulo * modification, are permitted provided that the following conditions
119 1.1 rpaulo * are met:
120 1.1 rpaulo * 1. Redistributions of source code must retain the above copyright
121 1.1 rpaulo * notice, this list of conditions and the following disclaimer.
122 1.1 rpaulo * 2. Redistributions in binary form must reproduce the above copyright
123 1.1 rpaulo * notice, this list of conditions and the following disclaimer in the
124 1.1 rpaulo * documentation and/or other materials provided with the distribution.
125 1.1 rpaulo * 3. Neither the name of the University nor the names of its contributors
126 1.1 rpaulo * may be used to endorse or promote products derived from this software
127 1.1 rpaulo * without specific prior written permission.
128 1.1 rpaulo *
129 1.1 rpaulo * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND
130 1.1 rpaulo * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
131 1.1 rpaulo * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
132 1.1 rpaulo * ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE
133 1.1 rpaulo * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
134 1.1 rpaulo * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
135 1.1 rpaulo * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
136 1.1 rpaulo * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
137 1.1 rpaulo * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
138 1.1 rpaulo * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
139 1.1 rpaulo * SUCH DAMAGE.
140 1.1 rpaulo *
141 1.1 rpaulo * @(#)tcp_input.c 8.12 (Berkeley) 5/24/95
142 1.1 rpaulo */
143 1.1 rpaulo
144 1.1 rpaulo #include <sys/cdefs.h>
145 1.12 christos __KERNEL_RCSID(0, "$NetBSD: tcp_congctl.c,v 1.12 2006/11/16 01:33:45 christos Exp $");
146 1.1 rpaulo
147 1.1 rpaulo #include "opt_inet.h"
148 1.1 rpaulo #include "opt_tcp_debug.h"
149 1.1 rpaulo #include "opt_tcp_congctl.h"
150 1.1 rpaulo
151 1.1 rpaulo #include <sys/param.h>
152 1.1 rpaulo #include <sys/systm.h>
153 1.1 rpaulo #include <sys/malloc.h>
154 1.1 rpaulo #include <sys/mbuf.h>
155 1.1 rpaulo #include <sys/protosw.h>
156 1.1 rpaulo #include <sys/socket.h>
157 1.1 rpaulo #include <sys/socketvar.h>
158 1.1 rpaulo #include <sys/errno.h>
159 1.1 rpaulo #include <sys/syslog.h>
160 1.1 rpaulo #include <sys/pool.h>
161 1.1 rpaulo #include <sys/domain.h>
162 1.1 rpaulo #include <sys/kernel.h>
163 1.1 rpaulo #include <sys/lock.h>
164 1.1 rpaulo
165 1.1 rpaulo #include <net/if.h>
166 1.1 rpaulo #include <net/route.h>
167 1.1 rpaulo
168 1.1 rpaulo #include <netinet/in.h>
169 1.1 rpaulo #include <netinet/in_systm.h>
170 1.1 rpaulo #include <netinet/ip.h>
171 1.1 rpaulo #include <netinet/in_pcb.h>
172 1.1 rpaulo #include <netinet/in_var.h>
173 1.1 rpaulo #include <netinet/ip_var.h>
174 1.1 rpaulo
175 1.1 rpaulo #ifdef INET6
176 1.1 rpaulo #ifndef INET
177 1.1 rpaulo #include <netinet/in.h>
178 1.1 rpaulo #endif
179 1.1 rpaulo #include <netinet/ip6.h>
180 1.1 rpaulo #include <netinet6/ip6_var.h>
181 1.1 rpaulo #include <netinet6/in6_pcb.h>
182 1.1 rpaulo #include <netinet6/ip6_var.h>
183 1.1 rpaulo #include <netinet6/in6_var.h>
184 1.1 rpaulo #include <netinet/icmp6.h>
185 1.1 rpaulo #include <netinet6/nd6.h>
186 1.1 rpaulo #endif
187 1.1 rpaulo
188 1.1 rpaulo #include <netinet/tcp.h>
189 1.1 rpaulo #include <netinet/tcp_fsm.h>
190 1.1 rpaulo #include <netinet/tcp_seq.h>
191 1.1 rpaulo #include <netinet/tcp_timer.h>
192 1.1 rpaulo #include <netinet/tcp_var.h>
193 1.1 rpaulo #include <netinet/tcpip.h>
194 1.1 rpaulo #include <netinet/tcp_congctl.h>
195 1.1 rpaulo #ifdef TCP_DEBUG
196 1.1 rpaulo #include <netinet/tcp_debug.h>
197 1.1 rpaulo #endif
198 1.1 rpaulo
199 1.1 rpaulo /*
200 1.1 rpaulo * TODO:
201 1.1 rpaulo * consider separating the actual implementations in another file.
202 1.1 rpaulo */
203 1.1 rpaulo
204 1.11 yamt static int tcp_reno_fast_retransmit(struct tcpcb *, const struct tcphdr *);
205 1.1 rpaulo static void tcp_reno_slow_retransmit(struct tcpcb *);
206 1.11 yamt static void tcp_reno_fast_retransmit_newack(struct tcpcb *,
207 1.11 yamt const struct tcphdr *);
208 1.11 yamt static void tcp_reno_newack(struct tcpcb *, const struct tcphdr *);
209 1.6 rpaulo static void tcp_reno_congestion_exp(struct tcpcb *tp);
210 1.1 rpaulo
211 1.11 yamt static int tcp_newreno_fast_retransmit(struct tcpcb *, const struct tcphdr *);
212 1.1 rpaulo static void tcp_newreno_fast_retransmit_newack(struct tcpcb *,
213 1.11 yamt const struct tcphdr *);
214 1.11 yamt static void tcp_newreno_newack(struct tcpcb *, const struct tcphdr *);
215 1.1 rpaulo
216 1.1 rpaulo
217 1.1 rpaulo static void tcp_congctl_fillnames(void);
218 1.1 rpaulo
219 1.1 rpaulo extern int tcprexmtthresh;
220 1.1 rpaulo
221 1.1 rpaulo MALLOC_DEFINE(M_TCPCONGCTL, "tcpcongctl", "TCP congestion control structures");
222 1.1 rpaulo
223 1.1 rpaulo /*
224 1.1 rpaulo * Used to list the available congestion control algorithms.
225 1.1 rpaulo */
226 1.1 rpaulo struct tcp_congctlent {
227 1.1 rpaulo TAILQ_ENTRY(tcp_congctlent) congctl_ent;
228 1.1 rpaulo char congctl_name[TCPCC_MAXLEN];
229 1.1 rpaulo struct tcp_congctl *congctl_ctl;
230 1.1 rpaulo };
231 1.1 rpaulo TAILQ_HEAD(, tcp_congctlent) tcp_congctlhd;
232 1.1 rpaulo
233 1.1 rpaulo struct simplelock tcp_congctl_slock;
234 1.1 rpaulo
235 1.1 rpaulo void
236 1.1 rpaulo tcp_congctl_init(void)
237 1.1 rpaulo {
238 1.1 rpaulo int r;
239 1.1 rpaulo
240 1.1 rpaulo TAILQ_INIT(&tcp_congctlhd);
241 1.1 rpaulo simple_lock_init(&tcp_congctl_slock);
242 1.1 rpaulo
243 1.1 rpaulo /* Base algorithms. */
244 1.1 rpaulo r = tcp_congctl_register("reno", &tcp_reno_ctl);
245 1.1 rpaulo KASSERT(r == 0);
246 1.1 rpaulo r = tcp_congctl_register("newreno", &tcp_newreno_ctl);
247 1.1 rpaulo KASSERT(r == 0);
248 1.1 rpaulo
249 1.1 rpaulo /* NewReno is the default. */
250 1.1 rpaulo #ifndef TCP_CONGCTL_DEFAULT
251 1.1 rpaulo #define TCP_CONGCTL_DEFAULT "newreno"
252 1.1 rpaulo #endif
253 1.1 rpaulo
254 1.1 rpaulo r = tcp_congctl_select(NULL, TCP_CONGCTL_DEFAULT);
255 1.1 rpaulo KASSERT(r == 0);
256 1.1 rpaulo }
257 1.1 rpaulo
258 1.1 rpaulo /*
259 1.1 rpaulo * Register a congestion algorithm and select it if we have none.
260 1.1 rpaulo */
261 1.1 rpaulo int
262 1.1 rpaulo tcp_congctl_register(const char *name, struct tcp_congctl *tcc)
263 1.1 rpaulo {
264 1.1 rpaulo struct tcp_congctlent *ntcc, *tccp;
265 1.1 rpaulo
266 1.1 rpaulo TAILQ_FOREACH(tccp, &tcp_congctlhd, congctl_ent)
267 1.1 rpaulo if (!strcmp(name, tccp->congctl_name)) {
268 1.1 rpaulo /* name already registered */
269 1.1 rpaulo return EEXIST;
270 1.1 rpaulo }
271 1.1 rpaulo
272 1.1 rpaulo ntcc = malloc(sizeof(*ntcc), M_TCPCONGCTL, M_WAITOK);
273 1.1 rpaulo
274 1.1 rpaulo strlcpy(ntcc->congctl_name, name, sizeof(ntcc->congctl_name) - 1);
275 1.1 rpaulo ntcc->congctl_ctl = tcc;
276 1.1 rpaulo
277 1.1 rpaulo TAILQ_INSERT_TAIL(&tcp_congctlhd, ntcc, congctl_ent);
278 1.1 rpaulo tcp_congctl_fillnames();
279 1.1 rpaulo
280 1.1 rpaulo if (TAILQ_FIRST(&tcp_congctlhd) == ntcc)
281 1.1 rpaulo tcp_congctl_select(NULL, name);
282 1.1 rpaulo
283 1.1 rpaulo return 0;
284 1.1 rpaulo }
285 1.1 rpaulo
286 1.1 rpaulo int
287 1.1 rpaulo tcp_congctl_unregister(const char *name)
288 1.1 rpaulo {
289 1.1 rpaulo struct tcp_congctlent *tccp, *rtccp;
290 1.1 rpaulo unsigned int size;
291 1.1 rpaulo
292 1.1 rpaulo rtccp = NULL;
293 1.1 rpaulo size = 0;
294 1.1 rpaulo TAILQ_FOREACH(tccp, &tcp_congctlhd, congctl_ent) {
295 1.1 rpaulo if (!strcmp(name, tccp->congctl_name))
296 1.1 rpaulo rtccp = tccp;
297 1.1 rpaulo size++;
298 1.1 rpaulo }
299 1.1 rpaulo
300 1.1 rpaulo if (!rtccp)
301 1.1 rpaulo return ENOENT;
302 1.1 rpaulo
303 1.1 rpaulo if (size <= 1 || tcp_congctl_global == rtccp->congctl_ctl ||
304 1.1 rpaulo rtccp->congctl_ctl->refcnt)
305 1.1 rpaulo return EBUSY;
306 1.1 rpaulo
307 1.1 rpaulo TAILQ_REMOVE(&tcp_congctlhd, rtccp, congctl_ent);
308 1.1 rpaulo free(rtccp, M_TCPCONGCTL);
309 1.1 rpaulo tcp_congctl_fillnames();
310 1.1 rpaulo
311 1.1 rpaulo return 0;
312 1.1 rpaulo }
313 1.1 rpaulo
314 1.1 rpaulo /*
315 1.1 rpaulo * Select a congestion algorithm by name.
316 1.1 rpaulo */
317 1.1 rpaulo int
318 1.1 rpaulo tcp_congctl_select(struct tcpcb *tp, const char *name)
319 1.1 rpaulo {
320 1.1 rpaulo struct tcp_congctlent *tccp;
321 1.1 rpaulo
322 1.1 rpaulo KASSERT(name);
323 1.1 rpaulo
324 1.1 rpaulo TAILQ_FOREACH(tccp, &tcp_congctlhd, congctl_ent)
325 1.1 rpaulo if (!strcmp(name, tccp->congctl_name)) {
326 1.1 rpaulo if (tp) {
327 1.1 rpaulo simple_lock(&tcp_congctl_slock);
328 1.1 rpaulo tp->t_congctl->refcnt--;
329 1.1 rpaulo tp->t_congctl = tccp->congctl_ctl;
330 1.1 rpaulo tp->t_congctl->refcnt++;
331 1.1 rpaulo simple_unlock(&tcp_congctl_slock);
332 1.1 rpaulo } else {
333 1.1 rpaulo tcp_congctl_global = tccp->congctl_ctl;
334 1.1 rpaulo strlcpy(tcp_congctl_global_name,
335 1.1 rpaulo tccp->congctl_name,
336 1.1 rpaulo sizeof(tcp_congctl_global_name) - 1);
337 1.1 rpaulo }
338 1.1 rpaulo return 0;
339 1.1 rpaulo }
340 1.1 rpaulo
341 1.1 rpaulo return EINVAL;
342 1.1 rpaulo }
343 1.1 rpaulo
344 1.1 rpaulo /*
345 1.1 rpaulo * Returns the name of a congestion algorithm.
346 1.1 rpaulo */
347 1.1 rpaulo const char *
348 1.1 rpaulo tcp_congctl_bystruct(const struct tcp_congctl *tcc)
349 1.1 rpaulo {
350 1.1 rpaulo struct tcp_congctlent *tccp;
351 1.1 rpaulo
352 1.1 rpaulo KASSERT(tcc);
353 1.1 rpaulo
354 1.1 rpaulo TAILQ_FOREACH(tccp, &tcp_congctlhd, congctl_ent)
355 1.1 rpaulo if (tccp->congctl_ctl == tcc)
356 1.1 rpaulo return tccp->congctl_name;
357 1.1 rpaulo
358 1.1 rpaulo return NULL;
359 1.1 rpaulo }
360 1.1 rpaulo
361 1.1 rpaulo static void
362 1.1 rpaulo tcp_congctl_fillnames(void)
363 1.1 rpaulo {
364 1.1 rpaulo struct tcp_congctlent *tccp;
365 1.1 rpaulo const char *delim = " ";
366 1.1 rpaulo
367 1.1 rpaulo tcp_congctl_avail[0] = '\0';
368 1.1 rpaulo TAILQ_FOREACH(tccp, &tcp_congctlhd, congctl_ent) {
369 1.1 rpaulo strlcat(tcp_congctl_avail, tccp->congctl_name,
370 1.1 rpaulo sizeof(tcp_congctl_avail) - 1);
371 1.1 rpaulo if (TAILQ_NEXT(tccp, congctl_ent))
372 1.1 rpaulo strlcat(tcp_congctl_avail, delim,
373 1.1 rpaulo sizeof(tcp_congctl_avail) - 1);
374 1.1 rpaulo }
375 1.1 rpaulo
376 1.1 rpaulo }
377 1.1 rpaulo
378 1.1 rpaulo /* ------------------------------------------------------------------------ */
379 1.1 rpaulo
380 1.6 rpaulo /*
381 1.6 rpaulo * TCP/Reno congestion control.
382 1.6 rpaulo */
383 1.6 rpaulo static void
384 1.1 rpaulo tcp_reno_congestion_exp(struct tcpcb *tp)
385 1.1 rpaulo {
386 1.1 rpaulo u_int win;
387 1.1 rpaulo
388 1.1 rpaulo /*
389 1.1 rpaulo * Halve the congestion window and reduce the
390 1.1 rpaulo * slow start threshold.
391 1.1 rpaulo */
392 1.1 rpaulo win = min(tp->snd_wnd, tp->snd_cwnd) / 2 / tp->t_segsz;
393 1.1 rpaulo if (win < 2)
394 1.1 rpaulo win = 2;
395 1.1 rpaulo
396 1.1 rpaulo tp->snd_ssthresh = win * tp->t_segsz;
397 1.1 rpaulo tp->snd_recover = tp->snd_max;
398 1.1 rpaulo tp->snd_cwnd = tp->snd_ssthresh;
399 1.1 rpaulo
400 1.7 rpaulo /*
401 1.7 rpaulo * When using TCP ECN, notify the peer that
402 1.7 rpaulo * we reduced the cwnd.
403 1.7 rpaulo */
404 1.1 rpaulo if (TCP_ECN_ALLOWED(tp))
405 1.1 rpaulo tp->t_flags |= TF_ECN_SND_CWR;
406 1.1 rpaulo }
407 1.1 rpaulo
408 1.1 rpaulo
409 1.6 rpaulo
410 1.1 rpaulo static int
411 1.11 yamt tcp_reno_fast_retransmit(struct tcpcb *tp, const struct tcphdr *th)
412 1.1 rpaulo {
413 1.7 rpaulo /*
414 1.7 rpaulo * We know we're losing at the current
415 1.7 rpaulo * window size so do congestion avoidance
416 1.7 rpaulo * (set ssthresh to half the current window
417 1.7 rpaulo * and pull our congestion window back to
418 1.7 rpaulo * the new ssthresh).
419 1.7 rpaulo *
420 1.7 rpaulo * Dup acks mean that packets have left the
421 1.7 rpaulo * network (they're now cached at the receiver)
422 1.7 rpaulo * so bump cwnd by the amount in the receiver
423 1.7 rpaulo * to keep a constant cwnd packets in the
424 1.7 rpaulo * network.
425 1.7 rpaulo *
426 1.7 rpaulo * If we are using TCP/SACK, then enter
427 1.7 rpaulo * Fast Recovery if the receiver SACKs
428 1.7 rpaulo * data that is tcprexmtthresh * MSS
429 1.7 rpaulo * bytes past the last ACKed segment,
430 1.7 rpaulo * irrespective of the number of DupAcks.
431 1.7 rpaulo */
432 1.7 rpaulo
433 1.1 rpaulo tcp_seq onxt;
434 1.1 rpaulo
435 1.1 rpaulo onxt = tp->snd_nxt;
436 1.1 rpaulo tcp_reno_congestion_exp(tp);
437 1.1 rpaulo tp->t_partialacks = 0;
438 1.1 rpaulo TCP_TIMER_DISARM(tp, TCPT_REXMT);
439 1.1 rpaulo tp->t_rtttime = 0;
440 1.1 rpaulo if (TCP_SACK_ENABLED(tp)) {
441 1.1 rpaulo tp->t_dupacks = tcprexmtthresh;
442 1.1 rpaulo tp->sack_newdata = tp->snd_nxt;
443 1.1 rpaulo tp->snd_cwnd = tp->t_segsz;
444 1.1 rpaulo (void) tcp_output(tp);
445 1.1 rpaulo return 0;
446 1.1 rpaulo }
447 1.1 rpaulo tp->snd_nxt = th->th_ack;
448 1.1 rpaulo tp->snd_cwnd = tp->t_segsz;
449 1.1 rpaulo (void) tcp_output(tp);
450 1.1 rpaulo tp->snd_cwnd = tp->snd_ssthresh + tp->t_segsz * tp->t_dupacks;
451 1.1 rpaulo if (SEQ_GT(onxt, tp->snd_nxt))
452 1.1 rpaulo tp->snd_nxt = onxt;
453 1.1 rpaulo
454 1.1 rpaulo return 0;
455 1.1 rpaulo }
456 1.1 rpaulo
457 1.1 rpaulo static void
458 1.1 rpaulo tcp_reno_slow_retransmit(struct tcpcb *tp)
459 1.1 rpaulo {
460 1.1 rpaulo u_int win;
461 1.1 rpaulo
462 1.1 rpaulo /*
463 1.1 rpaulo * Close the congestion window down to one segment
464 1.1 rpaulo * (we'll open it by one segment for each ack we get).
465 1.1 rpaulo * Since we probably have a window's worth of unacked
466 1.1 rpaulo * data accumulated, this "slow start" keeps us from
467 1.1 rpaulo * dumping all that data as back-to-back packets (which
468 1.1 rpaulo * might overwhelm an intermediate gateway).
469 1.1 rpaulo *
470 1.1 rpaulo * There are two phases to the opening: Initially we
471 1.1 rpaulo * open by one mss on each ack. This makes the window
472 1.1 rpaulo * size increase exponentially with time. If the
473 1.1 rpaulo * window is larger than the path can handle, this
474 1.1 rpaulo * exponential growth results in dropped packet(s)
475 1.1 rpaulo * almost immediately. To get more time between
476 1.1 rpaulo * drops but still "push" the network to take advantage
477 1.1 rpaulo * of improving conditions, we switch from exponential
478 1.1 rpaulo * to linear window opening at some threshhold size.
479 1.1 rpaulo * For a threshhold, we use half the current window
480 1.1 rpaulo * size, truncated to a multiple of the mss.
481 1.1 rpaulo *
482 1.1 rpaulo * (the minimum cwnd that will give us exponential
483 1.1 rpaulo * growth is 2 mss. We don't allow the threshhold
484 1.1 rpaulo * to go below this.)
485 1.1 rpaulo */
486 1.1 rpaulo
487 1.1 rpaulo win = min(tp->snd_wnd, tp->snd_cwnd) / 2 / tp->t_segsz;
488 1.1 rpaulo if (win < 2)
489 1.1 rpaulo win = 2;
490 1.1 rpaulo /* Loss Window MUST be one segment. */
491 1.1 rpaulo tp->snd_cwnd = tp->t_segsz;
492 1.1 rpaulo tp->snd_ssthresh = win * tp->t_segsz;
493 1.1 rpaulo tp->t_partialacks = -1;
494 1.1 rpaulo tp->t_dupacks = 0;
495 1.8 yamt tp->t_bytes_acked = 0;
496 1.1 rpaulo }
497 1.1 rpaulo
498 1.1 rpaulo static void
499 1.11 yamt tcp_reno_fast_retransmit_newack(struct tcpcb *tp,
500 1.12 christos const struct tcphdr *th)
501 1.1 rpaulo {
502 1.1 rpaulo if (tp->t_partialacks < 0) {
503 1.1 rpaulo /*
504 1.1 rpaulo * We were not in fast recovery. Reset the duplicate ack
505 1.1 rpaulo * counter.
506 1.1 rpaulo */
507 1.1 rpaulo tp->t_dupacks = 0;
508 1.1 rpaulo } else {
509 1.1 rpaulo /*
510 1.1 rpaulo * Clamp the congestion window to the crossover point and
511 1.1 rpaulo * exit fast recovery.
512 1.1 rpaulo */
513 1.1 rpaulo if (tp->snd_cwnd > tp->snd_ssthresh)
514 1.1 rpaulo tp->snd_cwnd = tp->snd_ssthresh;
515 1.1 rpaulo tp->t_partialacks = -1;
516 1.1 rpaulo tp->t_dupacks = 0;
517 1.8 yamt tp->t_bytes_acked = 0;
518 1.1 rpaulo }
519 1.1 rpaulo }
520 1.1 rpaulo
521 1.1 rpaulo static void
522 1.11 yamt tcp_reno_newack(struct tcpcb *tp, const struct tcphdr *th)
523 1.1 rpaulo {
524 1.1 rpaulo /*
525 1.1 rpaulo * When new data is acked, open the congestion window.
526 1.1 rpaulo */
527 1.4 rpaulo
528 1.4 rpaulo u_int cw = tp->snd_cwnd;
529 1.4 rpaulo u_int incr = tp->t_segsz;
530 1.4 rpaulo
531 1.8 yamt if (tcp_do_abc) {
532 1.8 yamt
533 1.8 yamt /*
534 1.8 yamt * RFC 3465 Appropriate Byte Counting (ABC)
535 1.8 yamt */
536 1.8 yamt
537 1.8 yamt int acked = th->th_ack - tp->snd_una;
538 1.8 yamt
539 1.8 yamt if (cw >= tp->snd_ssthresh) {
540 1.8 yamt tp->t_bytes_acked += acked;
541 1.8 yamt if (tp->t_bytes_acked >= cw) {
542 1.8 yamt /* Time to increase the window. */
543 1.8 yamt tp->t_bytes_acked -= cw;
544 1.8 yamt } else {
545 1.8 yamt /* No need to increase yet. */
546 1.8 yamt incr = 0;
547 1.8 yamt }
548 1.8 yamt } else {
549 1.8 yamt /*
550 1.8 yamt * use 2*SMSS or 1*SMSS for the "L" param,
551 1.8 yamt * depending on sysctl setting.
552 1.8 yamt *
553 1.8 yamt * (See RFC 3465 2.3 Choosing the Limit)
554 1.8 yamt */
555 1.8 yamt u_int abc_lim;
556 1.8 yamt
557 1.9 yamt abc_lim = (tcp_abc_aggressive == 0 ||
558 1.9 yamt tp->snd_nxt != tp->snd_max) ? incr : incr * 2;
559 1.8 yamt incr = min(acked, abc_lim);
560 1.8 yamt }
561 1.8 yamt } else {
562 1.8 yamt
563 1.8 yamt /*
564 1.8 yamt * If the window gives us less than ssthresh packets
565 1.8 yamt * in flight, open exponentially (segsz per packet).
566 1.8 yamt * Otherwise open linearly: segsz per window
567 1.8 yamt * (segsz^2 / cwnd per packet).
568 1.8 yamt */
569 1.8 yamt
570 1.8 yamt if (cw >= tp->snd_ssthresh) {
571 1.8 yamt incr = incr * incr / cw;
572 1.8 yamt }
573 1.8 yamt }
574 1.4 rpaulo
575 1.4 rpaulo tp->snd_cwnd = min(cw + incr, TCP_MAXWIN << tp->snd_scale);
576 1.1 rpaulo }
577 1.1 rpaulo
578 1.1 rpaulo struct tcp_congctl tcp_reno_ctl = {
579 1.1 rpaulo .fast_retransmit = tcp_reno_fast_retransmit,
580 1.1 rpaulo .slow_retransmit = tcp_reno_slow_retransmit,
581 1.1 rpaulo .fast_retransmit_newack = tcp_reno_fast_retransmit_newack,
582 1.1 rpaulo .newack = tcp_reno_newack,
583 1.6 rpaulo .cong_exp = tcp_reno_congestion_exp,
584 1.1 rpaulo };
585 1.1 rpaulo
586 1.1 rpaulo /*
587 1.1 rpaulo * TCP/NewReno Congestion control.
588 1.1 rpaulo */
589 1.1 rpaulo static int
590 1.11 yamt tcp_newreno_fast_retransmit(struct tcpcb *tp, const struct tcphdr *th)
591 1.1 rpaulo {
592 1.1 rpaulo if (SEQ_LT(th->th_ack, tp->snd_high)) {
593 1.1 rpaulo /*
594 1.1 rpaulo * False fast retransmit after timeout.
595 1.1 rpaulo * Do not enter fast recovery
596 1.1 rpaulo */
597 1.1 rpaulo tp->t_dupacks = 0;
598 1.1 rpaulo return 1;
599 1.1 rpaulo } else {
600 1.1 rpaulo /*
601 1.1 rpaulo * Fast retransmit is same as reno.
602 1.1 rpaulo */
603 1.1 rpaulo return tcp_reno_fast_retransmit(tp, th);
604 1.1 rpaulo }
605 1.1 rpaulo
606 1.1 rpaulo return 0;
607 1.1 rpaulo }
608 1.1 rpaulo
609 1.1 rpaulo /*
610 1.1 rpaulo * Implement the NewReno response to a new ack, checking for partial acks in
611 1.1 rpaulo * fast recovery.
612 1.1 rpaulo */
613 1.1 rpaulo static void
614 1.11 yamt tcp_newreno_fast_retransmit_newack(struct tcpcb *tp, const struct tcphdr *th)
615 1.1 rpaulo {
616 1.1 rpaulo if (tp->t_partialacks < 0) {
617 1.1 rpaulo /*
618 1.1 rpaulo * We were not in fast recovery. Reset the duplicate ack
619 1.1 rpaulo * counter.
620 1.1 rpaulo */
621 1.1 rpaulo tp->t_dupacks = 0;
622 1.1 rpaulo } else if (SEQ_LT(th->th_ack, tp->snd_recover)) {
623 1.1 rpaulo /*
624 1.1 rpaulo * This is a partial ack. Retransmit the first unacknowledged
625 1.1 rpaulo * segment and deflate the congestion window by the amount of
626 1.1 rpaulo * acknowledged data. Do not exit fast recovery.
627 1.1 rpaulo */
628 1.1 rpaulo tcp_seq onxt = tp->snd_nxt;
629 1.1 rpaulo u_long ocwnd = tp->snd_cwnd;
630 1.1 rpaulo
631 1.1 rpaulo /*
632 1.1 rpaulo * snd_una has not yet been updated and the socket's send
633 1.1 rpaulo * buffer has not yet drained off the ACK'd data, so we
634 1.1 rpaulo * have to leave snd_una as it was to get the correct data
635 1.1 rpaulo * offset in tcp_output().
636 1.1 rpaulo */
637 1.1 rpaulo if (++tp->t_partialacks == 1)
638 1.1 rpaulo TCP_TIMER_DISARM(tp, TCPT_REXMT);
639 1.1 rpaulo tp->t_rtttime = 0;
640 1.1 rpaulo tp->snd_nxt = th->th_ack;
641 1.1 rpaulo /*
642 1.1 rpaulo * Set snd_cwnd to one segment beyond ACK'd offset. snd_una
643 1.1 rpaulo * is not yet updated when we're called.
644 1.1 rpaulo */
645 1.1 rpaulo tp->snd_cwnd = tp->t_segsz + (th->th_ack - tp->snd_una);
646 1.1 rpaulo (void) tcp_output(tp);
647 1.1 rpaulo tp->snd_cwnd = ocwnd;
648 1.1 rpaulo if (SEQ_GT(onxt, tp->snd_nxt))
649 1.1 rpaulo tp->snd_nxt = onxt;
650 1.1 rpaulo /*
651 1.1 rpaulo * Partial window deflation. Relies on fact that tp->snd_una
652 1.1 rpaulo * not updated yet.
653 1.1 rpaulo */
654 1.1 rpaulo tp->snd_cwnd -= (th->th_ack - tp->snd_una - tp->t_segsz);
655 1.1 rpaulo } else {
656 1.1 rpaulo /*
657 1.1 rpaulo * Complete ack. Inflate the congestion window to ssthresh
658 1.1 rpaulo * and exit fast recovery.
659 1.1 rpaulo *
660 1.1 rpaulo * Window inflation should have left us with approx.
661 1.1 rpaulo * snd_ssthresh outstanding data. But in case we
662 1.1 rpaulo * would be inclined to send a burst, better to do
663 1.1 rpaulo * it via the slow start mechanism.
664 1.1 rpaulo */
665 1.1 rpaulo if (SEQ_SUB(tp->snd_max, th->th_ack) < tp->snd_ssthresh)
666 1.1 rpaulo tp->snd_cwnd = SEQ_SUB(tp->snd_max, th->th_ack)
667 1.1 rpaulo + tp->t_segsz;
668 1.1 rpaulo else
669 1.1 rpaulo tp->snd_cwnd = tp->snd_ssthresh;
670 1.1 rpaulo tp->t_partialacks = -1;
671 1.1 rpaulo tp->t_dupacks = 0;
672 1.8 yamt tp->t_bytes_acked = 0;
673 1.1 rpaulo }
674 1.1 rpaulo }
675 1.1 rpaulo
676 1.1 rpaulo static void
677 1.11 yamt tcp_newreno_newack(struct tcpcb *tp, const struct tcphdr *th)
678 1.1 rpaulo {
679 1.1 rpaulo /*
680 1.4 rpaulo * If we are still in fast recovery (meaning we are using
681 1.4 rpaulo * NewReno and we have only received partial acks), do not
682 1.4 rpaulo * inflate the window yet.
683 1.1 rpaulo */
684 1.4 rpaulo if (tp->t_partialacks < 0)
685 1.4 rpaulo tcp_reno_newack(tp, th);
686 1.1 rpaulo }
687 1.1 rpaulo
688 1.1 rpaulo
689 1.1 rpaulo struct tcp_congctl tcp_newreno_ctl = {
690 1.1 rpaulo .fast_retransmit = tcp_newreno_fast_retransmit,
691 1.1 rpaulo .slow_retransmit = tcp_reno_slow_retransmit,
692 1.1 rpaulo .fast_retransmit_newack = tcp_newreno_fast_retransmit_newack,
693 1.1 rpaulo .newack = tcp_newreno_newack,
694 1.6 rpaulo .cong_exp = tcp_reno_congestion_exp,
695 1.1 rpaulo };
696 1.1 rpaulo
697 1.1 rpaulo
698