tcp_congctl.c revision 1.18 1 /* $NetBSD: tcp_congctl.c,v 1.18 2013/11/12 09:02:05 kefren Exp $ */
2
3 /*-
4 * Copyright (c) 1997, 1998, 1999, 2001, 2005, 2006 The NetBSD Foundation, Inc.
5 * All rights reserved.
6 *
7 * This code is derived from software contributed to The NetBSD Foundation
8 * by Jason R. Thorpe and Kevin M. Lahey of the Numerical Aerospace Simulation
9 * Facility, NASA Ames Research Center.
10 * This code is derived from software contributed to The NetBSD Foundation
11 * by Charles M. Hannum.
12 * This code is derived from software contributed to The NetBSD Foundation
13 * by Rui Paulo.
14 *
15 * Redistribution and use in source and binary forms, with or without
16 * modification, are permitted provided that the following conditions
17 * are met:
18 * 1. Redistributions of source code must retain the above copyright
19 * notice, this list of conditions and the following disclaimer.
20 * 2. Redistributions in binary form must reproduce the above copyright
21 * notice, this list of conditions and the following disclaimer in the
22 * documentation and/or other materials provided with the distribution.
23 *
24 * THIS SOFTWARE IS PROVIDED BY THE NETBSD FOUNDATION, INC. AND CONTRIBUTORS
25 * ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED
26 * TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
27 * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE FOUNDATION OR CONTRIBUTORS
28 * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
29 * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
30 * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
31 * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
32 * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
33 * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
34 * POSSIBILITY OF SUCH DAMAGE.
35 */
36
37 /*
38 * Copyright (C) 1995, 1996, 1997, and 1998 WIDE Project.
39 * All rights reserved.
40 *
41 * Redistribution and use in source and binary forms, with or without
42 * modification, are permitted provided that the following conditions
43 * are met:
44 * 1. Redistributions of source code must retain the above copyright
45 * notice, this list of conditions and the following disclaimer.
46 * 2. Redistributions in binary form must reproduce the above copyright
47 * notice, this list of conditions and the following disclaimer in the
48 * documentation and/or other materials provided with the distribution.
49 * 3. Neither the name of the project nor the names of its contributors
50 * may be used to endorse or promote products derived from this software
51 * without specific prior written permission.
52 *
53 * THIS SOFTWARE IS PROVIDED BY THE PROJECT AND CONTRIBUTORS ``AS IS'' AND
54 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
55 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
56 * ARE DISCLAIMED. IN NO EVENT SHALL THE PROJECT OR CONTRIBUTORS BE LIABLE
57 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
58 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
59 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
60 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
61 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
62 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
63 * SUCH DAMAGE.
64 */
65
66 /*
67 * @(#)COPYRIGHT 1.1 (NRL) 17 January 1995
68 *
69 * NRL grants permission for redistribution and use in source and binary
70 * forms, with or without modification, of the software and documentation
71 * created at NRL provided that the following conditions are met:
72 *
73 * 1. Redistributions of source code must retain the above copyright
74 * notice, this list of conditions and the following disclaimer.
75 * 2. Redistributions in binary form must reproduce the above copyright
76 * notice, this list of conditions and the following disclaimer in the
77 * documentation and/or other materials provided with the distribution.
78 * 3. All advertising materials mentioning features or use of this software
79 * must display the following acknowledgements:
80 * This product includes software developed by the University of
81 * California, Berkeley and its contributors.
82 * This product includes software developed at the Information
83 * Technology Division, US Naval Research Laboratory.
84 * 4. Neither the name of the NRL nor the names of its contributors
85 * may be used to endorse or promote products derived from this software
86 * without specific prior written permission.
87 *
88 * THE SOFTWARE PROVIDED BY NRL IS PROVIDED BY NRL AND CONTRIBUTORS ``AS
89 * IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED
90 * TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A
91 * PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL NRL OR
92 * CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
93 * EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
94 * PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
95 * PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF
96 * LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING
97 * NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
98 * SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
99 *
100 * The views and conclusions contained in the software and documentation
101 * are those of the authors and should not be interpreted as representing
102 * official policies, either expressed or implied, of the US Naval
103 * Research Laboratory (NRL).
104 */
105
106 /*
107 * Copyright (c) 1982, 1986, 1988, 1990, 1993, 1994, 1995
108 * The Regents of the University of California. All rights reserved.
109 *
110 * Redistribution and use in source and binary forms, with or without
111 * modification, are permitted provided that the following conditions
112 * are met:
113 * 1. Redistributions of source code must retain the above copyright
114 * notice, this list of conditions and the following disclaimer.
115 * 2. Redistributions in binary form must reproduce the above copyright
116 * notice, this list of conditions and the following disclaimer in the
117 * documentation and/or other materials provided with the distribution.
118 * 3. Neither the name of the University nor the names of its contributors
119 * may be used to endorse or promote products derived from this software
120 * without specific prior written permission.
121 *
122 * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND
123 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
124 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
125 * ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE
126 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
127 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
128 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
129 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
130 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
131 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
132 * SUCH DAMAGE.
133 *
134 * @(#)tcp_input.c 8.12 (Berkeley) 5/24/95
135 */
136
137 #include <sys/cdefs.h>
138 __KERNEL_RCSID(0, "$NetBSD: tcp_congctl.c,v 1.18 2013/11/12 09:02:05 kefren Exp $");
139
140 #include "opt_inet.h"
141 #include "opt_tcp_debug.h"
142 #include "opt_tcp_congctl.h"
143
144 #include <sys/param.h>
145 #include <sys/systm.h>
146 #include <sys/malloc.h>
147 #include <sys/mbuf.h>
148 #include <sys/protosw.h>
149 #include <sys/socket.h>
150 #include <sys/socketvar.h>
151 #include <sys/errno.h>
152 #include <sys/syslog.h>
153 #include <sys/pool.h>
154 #include <sys/domain.h>
155 #include <sys/kernel.h>
156 #include <sys/mutex.h>
157
158 #include <net/if.h>
159 #include <net/route.h>
160
161 #include <netinet/in.h>
162 #include <netinet/in_systm.h>
163 #include <netinet/ip.h>
164 #include <netinet/in_pcb.h>
165 #include <netinet/in_var.h>
166 #include <netinet/ip_var.h>
167
168 #ifdef INET6
169 #ifndef INET
170 #include <netinet/in.h>
171 #endif
172 #include <netinet/ip6.h>
173 #include <netinet6/ip6_var.h>
174 #include <netinet6/in6_pcb.h>
175 #include <netinet6/ip6_var.h>
176 #include <netinet6/in6_var.h>
177 #include <netinet/icmp6.h>
178 #include <netinet6/nd6.h>
179 #endif
180
181 #include <netinet/tcp.h>
182 #include <netinet/tcp_fsm.h>
183 #include <netinet/tcp_seq.h>
184 #include <netinet/tcp_timer.h>
185 #include <netinet/tcp_var.h>
186 #include <netinet/tcpip.h>
187 #include <netinet/tcp_congctl.h>
188 #ifdef TCP_DEBUG
189 #include <netinet/tcp_debug.h>
190 #endif
191
192 /*
193 * TODO:
194 * consider separating the actual implementations in another file.
195 */
196
197 static void tcp_common_congestion_exp(struct tcpcb *, int, int);
198
199 static int tcp_reno_do_fast_retransmit(struct tcpcb *, const struct tcphdr *);
200 static int tcp_reno_fast_retransmit(struct tcpcb *, const struct tcphdr *);
201 static void tcp_reno_slow_retransmit(struct tcpcb *);
202 static void tcp_reno_fast_retransmit_newack(struct tcpcb *,
203 const struct tcphdr *);
204 static void tcp_reno_newack(struct tcpcb *, const struct tcphdr *);
205 static void tcp_reno_congestion_exp(struct tcpcb *tp);
206
207 static int tcp_newreno_fast_retransmit(struct tcpcb *, const struct tcphdr *);
208 static void tcp_newreno_fast_retransmit_newack(struct tcpcb *,
209 const struct tcphdr *);
210 static void tcp_newreno_newack(struct tcpcb *, const struct tcphdr *);
211
212 static int tcp_cubic_fast_retransmit(struct tcpcb *, const struct tcphdr *);
213 static void tcp_cubic_slow_retransmit(struct tcpcb *tp);
214 static void tcp_cubic_newack(struct tcpcb *, const struct tcphdr *);
215 static void tcp_cubic_congestion_exp(struct tcpcb *);
216
217 static void tcp_congctl_fillnames(void);
218
219 extern int tcprexmtthresh;
220
221 MALLOC_DEFINE(M_TCPCONGCTL, "tcpcongctl", "TCP congestion control structures");
222
223 /* currently selected global congestion control */
224 char tcp_congctl_global_name[TCPCC_MAXLEN];
225
226 /* available global congestion control algorithms */
227 char tcp_congctl_avail[10 * TCPCC_MAXLEN];
228
229 /*
230 * Used to list the available congestion control algorithms.
231 */
232 TAILQ_HEAD(, tcp_congctlent) tcp_congctlhd =
233 TAILQ_HEAD_INITIALIZER(tcp_congctlhd);
234
235 static struct tcp_congctlent * tcp_congctl_global;
236
237 static kmutex_t tcp_congctl_mtx;
238
239 void
240 tcp_congctl_init(void)
241 {
242 int r __diagused;
243
244 mutex_init(&tcp_congctl_mtx, MUTEX_DEFAULT, IPL_NONE);
245
246 /* Base algorithms. */
247 r = tcp_congctl_register("reno", &tcp_reno_ctl);
248 KASSERT(r == 0);
249 r = tcp_congctl_register("newreno", &tcp_newreno_ctl);
250 KASSERT(r == 0);
251 r = tcp_congctl_register("cubic", &tcp_cubic_ctl);
252 KASSERT(r == 0);
253
254 /* NewReno is the default. */
255 #ifndef TCP_CONGCTL_DEFAULT
256 #define TCP_CONGCTL_DEFAULT "newreno"
257 #endif
258
259 r = tcp_congctl_select(NULL, TCP_CONGCTL_DEFAULT);
260 KASSERT(r == 0);
261 }
262
263 /*
264 * Register a congestion algorithm and select it if we have none.
265 */
266 int
267 tcp_congctl_register(const char *name, const struct tcp_congctl *tcc)
268 {
269 struct tcp_congctlent *ntcc, *tccp;
270
271 TAILQ_FOREACH(tccp, &tcp_congctlhd, congctl_ent)
272 if (!strcmp(name, tccp->congctl_name)) {
273 /* name already registered */
274 return EEXIST;
275 }
276
277 ntcc = malloc(sizeof(*ntcc), M_TCPCONGCTL, M_WAITOK|M_ZERO);
278
279 strlcpy(ntcc->congctl_name, name, sizeof(ntcc->congctl_name) - 1);
280 ntcc->congctl_ctl = tcc;
281
282 TAILQ_INSERT_TAIL(&tcp_congctlhd, ntcc, congctl_ent);
283 tcp_congctl_fillnames();
284
285 if (TAILQ_FIRST(&tcp_congctlhd) == ntcc)
286 tcp_congctl_select(NULL, name);
287
288 return 0;
289 }
290
291 int
292 tcp_congctl_unregister(const char *name)
293 {
294 struct tcp_congctlent *tccp, *rtccp;
295 unsigned int size;
296
297 rtccp = NULL;
298 size = 0;
299 TAILQ_FOREACH(tccp, &tcp_congctlhd, congctl_ent) {
300 if (!strcmp(name, tccp->congctl_name))
301 rtccp = tccp;
302 size++;
303 }
304
305 if (!rtccp)
306 return ENOENT;
307
308 if (size <= 1 || tcp_congctl_global == rtccp || rtccp->congctl_refcnt)
309 return EBUSY;
310
311 TAILQ_REMOVE(&tcp_congctlhd, rtccp, congctl_ent);
312 free(rtccp, M_TCPCONGCTL);
313 tcp_congctl_fillnames();
314
315 return 0;
316 }
317
318 /*
319 * Select a congestion algorithm by name.
320 */
321 int
322 tcp_congctl_select(struct tcpcb *tp, const char *name)
323 {
324 struct tcp_congctlent *tccp, *old_tccp, *new_tccp;
325 bool old_found, new_found;
326
327 KASSERT(name);
328
329 old_found = (tp == NULL || tp->t_congctl == NULL);
330 old_tccp = NULL;
331 new_found = false;
332 new_tccp = NULL;
333
334 TAILQ_FOREACH(tccp, &tcp_congctlhd, congctl_ent) {
335 if (!old_found && tccp->congctl_ctl == tp->t_congctl) {
336 old_tccp = tccp;
337 old_found = true;
338 }
339
340 if (!new_found && !strcmp(name, tccp->congctl_name)) {
341 new_tccp = tccp;
342 new_found = true;
343 }
344
345 if (new_found && old_found) {
346 if (tp) {
347 mutex_enter(&tcp_congctl_mtx);
348 if (old_tccp)
349 old_tccp->congctl_refcnt--;
350 tp->t_congctl = new_tccp->congctl_ctl;
351 new_tccp->congctl_refcnt++;
352 mutex_exit(&tcp_congctl_mtx);
353 } else {
354 tcp_congctl_global = new_tccp;
355 strlcpy(tcp_congctl_global_name,
356 new_tccp->congctl_name,
357 sizeof(tcp_congctl_global_name) - 1);
358 }
359 return 0;
360 }
361 }
362
363 return EINVAL;
364 }
365
366 void
367 tcp_congctl_release(struct tcpcb *tp)
368 {
369 struct tcp_congctlent *tccp;
370
371 KASSERT(tp->t_congctl);
372
373 TAILQ_FOREACH(tccp, &tcp_congctlhd, congctl_ent) {
374 if (tccp->congctl_ctl == tp->t_congctl) {
375 tccp->congctl_refcnt--;
376 return;
377 }
378 }
379 }
380
381 /*
382 * Returns the name of a congestion algorithm.
383 */
384 const char *
385 tcp_congctl_bystruct(const struct tcp_congctl *tcc)
386 {
387 struct tcp_congctlent *tccp;
388
389 KASSERT(tcc);
390
391 TAILQ_FOREACH(tccp, &tcp_congctlhd, congctl_ent)
392 if (tccp->congctl_ctl == tcc)
393 return tccp->congctl_name;
394
395 return NULL;
396 }
397
398 static void
399 tcp_congctl_fillnames(void)
400 {
401 struct tcp_congctlent *tccp;
402 const char *delim = " ";
403
404 tcp_congctl_avail[0] = '\0';
405 TAILQ_FOREACH(tccp, &tcp_congctlhd, congctl_ent) {
406 strlcat(tcp_congctl_avail, tccp->congctl_name,
407 sizeof(tcp_congctl_avail) - 1);
408 if (TAILQ_NEXT(tccp, congctl_ent))
409 strlcat(tcp_congctl_avail, delim,
410 sizeof(tcp_congctl_avail) - 1);
411 }
412
413 }
414
415 /* ------------------------------------------------------------------------ */
416
417 /*
418 * Common stuff
419 */
420
421 /* Window reduction (1-beta) for [New]Reno: 0.5 */
422 #define RENO_BETAA 1
423 #define RENO_BETAB 2
424 /* Window reduction (1-beta) for Cubic: 0.8 */
425 #define CUBIC_BETAA 4
426 #define CUBIC_BETAB 5
427 /* Draft Rhee Section 4.1 */
428 #define CUBIC_CA 4
429 #define CUBIC_CB 10
430
431 static void
432 tcp_common_congestion_exp(struct tcpcb *tp, int betaa, int betab)
433 {
434 u_int win;
435
436 /*
437 * Reduce the congestion window and the slow start threshold.
438 */
439 win = min(tp->snd_wnd, tp->snd_cwnd) * betaa / betab / tp->t_segsz;
440 if (win < 2)
441 win = 2;
442
443 tp->snd_ssthresh = win * tp->t_segsz;
444 tp->snd_recover = tp->snd_max;
445 tp->snd_cwnd = tp->snd_ssthresh;
446
447 /*
448 * When using TCP ECN, notify the peer that
449 * we reduced the cwnd.
450 */
451 if (TCP_ECN_ALLOWED(tp))
452 tp->t_flags |= TF_ECN_SND_CWR;
453 }
454
455
456 /* ------------------------------------------------------------------------ */
457
458 /*
459 * TCP/Reno congestion control.
460 */
461 static void
462 tcp_reno_congestion_exp(struct tcpcb *tp)
463 {
464
465 tcp_common_congestion_exp(tp, RENO_BETAA, RENO_BETAB);
466 }
467
468 static int
469 tcp_reno_do_fast_retransmit(struct tcpcb *tp, const struct tcphdr *th)
470 {
471 /*
472 * We know we're losing at the current
473 * window size so do congestion avoidance
474 * (set ssthresh to half the current window
475 * and pull our congestion window back to
476 * the new ssthresh).
477 *
478 * Dup acks mean that packets have left the
479 * network (they're now cached at the receiver)
480 * so bump cwnd by the amount in the receiver
481 * to keep a constant cwnd packets in the
482 * network.
483 *
484 * If we are using TCP/SACK, then enter
485 * Fast Recovery if the receiver SACKs
486 * data that is tcprexmtthresh * MSS
487 * bytes past the last ACKed segment,
488 * irrespective of the number of DupAcks.
489 */
490
491 tcp_seq onxt = tp->snd_nxt;
492
493 tp->t_partialacks = 0;
494 TCP_TIMER_DISARM(tp, TCPT_REXMT);
495 tp->t_rtttime = 0;
496 if (TCP_SACK_ENABLED(tp)) {
497 tp->t_dupacks = tcprexmtthresh;
498 tp->sack_newdata = tp->snd_nxt;
499 tp->snd_cwnd = tp->t_segsz;
500 (void) tcp_output(tp);
501 return 0;
502 }
503 tp->snd_nxt = th->th_ack;
504 tp->snd_cwnd = tp->t_segsz;
505 (void) tcp_output(tp);
506 tp->snd_cwnd = tp->snd_ssthresh + tp->t_segsz * tp->t_dupacks;
507 if (SEQ_GT(onxt, tp->snd_nxt))
508 tp->snd_nxt = onxt;
509
510 return 0;
511 }
512
513 static int
514 tcp_reno_fast_retransmit(struct tcpcb *tp, const struct tcphdr *th)
515 {
516
517 tcp_reno_congestion_exp(tp);
518 return tcp_reno_do_fast_retransmit(tp, th);
519 }
520
521 static void
522 tcp_reno_slow_retransmit(struct tcpcb *tp)
523 {
524 u_int win;
525
526 /*
527 * Close the congestion window down to one segment
528 * (we'll open it by one segment for each ack we get).
529 * Since we probably have a window's worth of unacked
530 * data accumulated, this "slow start" keeps us from
531 * dumping all that data as back-to-back packets (which
532 * might overwhelm an intermediate gateway).
533 *
534 * There are two phases to the opening: Initially we
535 * open by one mss on each ack. This makes the window
536 * size increase exponentially with time. If the
537 * window is larger than the path can handle, this
538 * exponential growth results in dropped packet(s)
539 * almost immediately. To get more time between
540 * drops but still "push" the network to take advantage
541 * of improving conditions, we switch from exponential
542 * to linear window opening at some threshhold size.
543 * For a threshhold, we use half the current window
544 * size, truncated to a multiple of the mss.
545 *
546 * (the minimum cwnd that will give us exponential
547 * growth is 2 mss. We don't allow the threshhold
548 * to go below this.)
549 */
550
551 win = min(tp->snd_wnd, tp->snd_cwnd) / 2 / tp->t_segsz;
552 if (win < 2)
553 win = 2;
554 /* Loss Window MUST be one segment. */
555 tp->snd_cwnd = tp->t_segsz;
556 tp->snd_ssthresh = win * tp->t_segsz;
557 tp->t_partialacks = -1;
558 tp->t_dupacks = 0;
559 tp->t_bytes_acked = 0;
560
561 if (TCP_ECN_ALLOWED(tp))
562 tp->t_flags |= TF_ECN_SND_CWR;
563 }
564
565 static void
566 tcp_reno_fast_retransmit_newack(struct tcpcb *tp,
567 const struct tcphdr *th)
568 {
569 if (tp->t_partialacks < 0) {
570 /*
571 * We were not in fast recovery. Reset the duplicate ack
572 * counter.
573 */
574 tp->t_dupacks = 0;
575 } else {
576 /*
577 * Clamp the congestion window to the crossover point and
578 * exit fast recovery.
579 */
580 if (tp->snd_cwnd > tp->snd_ssthresh)
581 tp->snd_cwnd = tp->snd_ssthresh;
582 tp->t_partialacks = -1;
583 tp->t_dupacks = 0;
584 tp->t_bytes_acked = 0;
585 if (TCP_SACK_ENABLED(tp) && SEQ_GT(th->th_ack, tp->snd_fack))
586 tp->snd_fack = th->th_ack;
587 }
588 }
589
590 static void
591 tcp_reno_newack(struct tcpcb *tp, const struct tcphdr *th)
592 {
593 /*
594 * When new data is acked, open the congestion window.
595 */
596
597 u_int cw = tp->snd_cwnd;
598 u_int incr = tp->t_segsz;
599
600 if (tcp_do_abc) {
601
602 /*
603 * RFC 3465 Appropriate Byte Counting (ABC)
604 */
605
606 int acked = th->th_ack - tp->snd_una;
607
608 if (cw >= tp->snd_ssthresh) {
609 tp->t_bytes_acked += acked;
610 if (tp->t_bytes_acked >= cw) {
611 /* Time to increase the window. */
612 tp->t_bytes_acked -= cw;
613 } else {
614 /* No need to increase yet. */
615 incr = 0;
616 }
617 } else {
618 /*
619 * use 2*SMSS or 1*SMSS for the "L" param,
620 * depending on sysctl setting.
621 *
622 * (See RFC 3465 2.3 Choosing the Limit)
623 */
624 u_int abc_lim;
625
626 abc_lim = (tcp_abc_aggressive == 0 ||
627 tp->snd_nxt != tp->snd_max) ? incr : incr * 2;
628 incr = min(acked, abc_lim);
629 }
630 } else {
631
632 /*
633 * If the window gives us less than ssthresh packets
634 * in flight, open exponentially (segsz per packet).
635 * Otherwise open linearly: segsz per window
636 * (segsz^2 / cwnd per packet).
637 */
638
639 if (cw >= tp->snd_ssthresh) {
640 incr = incr * incr / cw;
641 }
642 }
643
644 tp->snd_cwnd = min(cw + incr, TCP_MAXWIN << tp->snd_scale);
645 }
646
647 const struct tcp_congctl tcp_reno_ctl = {
648 .fast_retransmit = tcp_reno_fast_retransmit,
649 .slow_retransmit = tcp_reno_slow_retransmit,
650 .fast_retransmit_newack = tcp_reno_fast_retransmit_newack,
651 .newack = tcp_reno_newack,
652 .cong_exp = tcp_reno_congestion_exp,
653 };
654
655 /*
656 * TCP/NewReno Congestion control.
657 */
658 static int
659 tcp_newreno_fast_retransmit(struct tcpcb *tp, const struct tcphdr *th)
660 {
661
662 if (SEQ_LT(th->th_ack, tp->snd_high)) {
663 /*
664 * False fast retransmit after timeout.
665 * Do not enter fast recovery
666 */
667 tp->t_dupacks = 0;
668 return 1;
669 }
670 /*
671 * Fast retransmit is same as reno.
672 */
673 return tcp_reno_fast_retransmit(tp, th);
674 }
675
676 /*
677 * Implement the NewReno response to a new ack, checking for partial acks in
678 * fast recovery.
679 */
680 static void
681 tcp_newreno_fast_retransmit_newack(struct tcpcb *tp, const struct tcphdr *th)
682 {
683 if (tp->t_partialacks < 0) {
684 /*
685 * We were not in fast recovery. Reset the duplicate ack
686 * counter.
687 */
688 tp->t_dupacks = 0;
689 } else if (SEQ_LT(th->th_ack, tp->snd_recover)) {
690 /*
691 * This is a partial ack. Retransmit the first unacknowledged
692 * segment and deflate the congestion window by the amount of
693 * acknowledged data. Do not exit fast recovery.
694 */
695 tcp_seq onxt = tp->snd_nxt;
696 u_long ocwnd = tp->snd_cwnd;
697 int sack_num_segs = 1, sack_bytes_rxmt = 0;
698
699 /*
700 * snd_una has not yet been updated and the socket's send
701 * buffer has not yet drained off the ACK'd data, so we
702 * have to leave snd_una as it was to get the correct data
703 * offset in tcp_output().
704 */
705 tp->t_partialacks++;
706 TCP_TIMER_DISARM(tp, TCPT_REXMT);
707 tp->t_rtttime = 0;
708 tp->snd_nxt = th->th_ack;
709
710 if (TCP_SACK_ENABLED(tp)) {
711 /*
712 * Partial ack handling within a sack recovery episode.
713 * Keeping this very simple for now. When a partial ack
714 * is received, force snd_cwnd to a value that will
715 * allow the sender to transmit no more than 2 segments.
716 * If necessary, a fancier scheme can be adopted at a
717 * later point, but for now, the goal is to prevent the
718 * sender from bursting a large amount of data in the
719 * midst of sack recovery.
720 */
721
722 /*
723 * send one or 2 segments based on how much
724 * new data was acked
725 */
726 if (((th->th_ack - tp->snd_una) / tp->t_segsz) > 2)
727 sack_num_segs = 2;
728 (void)tcp_sack_output(tp, &sack_bytes_rxmt);
729 tp->snd_cwnd = sack_bytes_rxmt +
730 (tp->snd_nxt - tp->sack_newdata) +
731 sack_num_segs * tp->t_segsz;
732 tp->t_flags |= TF_ACKNOW;
733 (void) tcp_output(tp);
734 } else {
735 /*
736 * Set snd_cwnd to one segment beyond ACK'd offset
737 * snd_una is not yet updated when we're called
738 */
739 tp->snd_cwnd = tp->t_segsz + (th->th_ack - tp->snd_una);
740 (void) tcp_output(tp);
741 tp->snd_cwnd = ocwnd;
742 if (SEQ_GT(onxt, tp->snd_nxt))
743 tp->snd_nxt = onxt;
744 /*
745 * Partial window deflation. Relies on fact that
746 * tp->snd_una not updated yet.
747 */
748 tp->snd_cwnd -= (th->th_ack - tp->snd_una -
749 tp->t_segsz);
750 }
751 } else {
752 /*
753 * Complete ack. Inflate the congestion window to ssthresh
754 * and exit fast recovery.
755 *
756 * Window inflation should have left us with approx.
757 * snd_ssthresh outstanding data. But in case we
758 * would be inclined to send a burst, better to do
759 * it via the slow start mechanism.
760 */
761 if (SEQ_SUB(tp->snd_max, th->th_ack) < tp->snd_ssthresh)
762 tp->snd_cwnd = SEQ_SUB(tp->snd_max, th->th_ack)
763 + tp->t_segsz;
764 else
765 tp->snd_cwnd = tp->snd_ssthresh;
766 tp->t_partialacks = -1;
767 tp->t_dupacks = 0;
768 tp->t_bytes_acked = 0;
769 if (TCP_SACK_ENABLED(tp) && SEQ_GT(th->th_ack, tp->snd_fack))
770 tp->snd_fack = th->th_ack;
771 }
772 }
773
774 static void
775 tcp_newreno_newack(struct tcpcb *tp, const struct tcphdr *th)
776 {
777 /*
778 * If we are still in fast recovery (meaning we are using
779 * NewReno and we have only received partial acks), do not
780 * inflate the window yet.
781 */
782 if (tp->t_partialacks < 0)
783 tcp_reno_newack(tp, th);
784 }
785
786
787 const struct tcp_congctl tcp_newreno_ctl = {
788 .fast_retransmit = tcp_newreno_fast_retransmit,
789 .slow_retransmit = tcp_reno_slow_retransmit,
790 .fast_retransmit_newack = tcp_newreno_fast_retransmit_newack,
791 .newack = tcp_newreno_newack,
792 .cong_exp = tcp_reno_congestion_exp,
793 };
794
795 /*
796 * CUBIC - http://tools.ietf.org/html/draft-rhee-tcpm-cubic-02
797 */
798
799 /* Cubic prototypes */
800 static void tcp_cubic_update_ctime(struct tcpcb *tp);
801 static uint32_t tcp_cubic_diff_ctime(struct tcpcb *);
802 static uint32_t tcp_cubic_cbrt(uint32_t);
803 static uint32_t tcp_cubic_getW(struct tcpcb *);
804
805 /* Cubic TIME functions - XXX I don't like using timevals and microuptime */
806 /*
807 * Set congestion timer to now
808 */
809 static void
810 tcp_cubic_update_ctime(struct tcpcb *tp)
811 {
812 struct timeval now_timeval;
813
814 getmicrouptime(&now_timeval);
815 tp->snd_cubic_ctime = now_timeval.tv_sec * 1000 +
816 now_timeval.tv_usec / 1000;
817 }
818
819 /*
820 * miliseconds from last congestion
821 */
822 static uint32_t
823 tcp_cubic_diff_ctime(struct tcpcb *tp)
824 {
825 struct timeval now_timeval;
826
827 getmicrouptime(&now_timeval);
828 return now_timeval.tv_sec * 1000 + now_timeval.tv_usec / 1000 -
829 tp->snd_cubic_ctime;
830 }
831
832 /*
833 * Approximate cubic root
834 */
835 #define CBRT_ROUNDS 30
836 static uint32_t
837 tcp_cubic_cbrt(uint32_t v)
838 {
839 int i, rounds = CBRT_ROUNDS;
840 uint64_t x = v / 3;
841
842 /* We fail to calculate correct for small numbers */
843 if (v == 0)
844 return 0;
845 else if (v < 4)
846 return 1;
847
848 /*
849 * largest x that 2*x^3+3*x fits 64bit
850 * Avoid overflow for a time cost
851 */
852 if (x > 2097151)
853 rounds += 10;
854
855 for (i = 0; i < rounds; i++)
856 if (rounds == CBRT_ROUNDS)
857 x = (v + 2 * x * x * x) / (3 * x * x);
858 else
859 /* Avoid overflow */
860 x = v / (3 * x * x) + 2 * x / 3;
861
862 return (uint32_t)x;
863 }
864
865 /* Draft Rhee Section 3.1 - get W(t) */
866 static uint32_t
867 tcp_cubic_getW(struct tcpcb *tp)
868 {
869 uint32_t ms_elapsed = tcp_cubic_diff_ctime(tp);
870 uint32_t K, CtK;
871
872 K = tcp_cubic_cbrt(tp->snd_cubic_wmax * CUBIC_BETAA / CUBIC_BETAB *
873 CUBIC_CB / CUBIC_CA);
874 /* C*(t-K) */
875 CtK = CUBIC_CA * (ms_elapsed - K) / CUBIC_CB;
876
877 return CtK * CtK * CtK + tp->snd_cubic_wmax;
878 }
879
880 static void
881 tcp_cubic_congestion_exp(struct tcpcb *tp)
882 {
883
884 tcp_cubic_update_ctime(tp);
885
886 /* Section 3.6 - Fast Convergence */
887 if (tp->snd_cubic_wmax < tp->snd_cubic_wmax_last) {
888 tp->snd_cubic_wmax_last = tp->snd_cubic_wmax;
889 tp->snd_cubic_wmax = tp->snd_cubic_wmax / 2 +
890 tp->snd_cubic_wmax * CUBIC_BETAA / CUBIC_BETAB / 2;
891 } else {
892 tp->snd_cubic_wmax_last = tp->snd_cubic_wmax;
893 tp->snd_cubic_wmax = tp->snd_cwnd;
894 }
895 tcp_common_congestion_exp(tp, CUBIC_BETAA, CUBIC_BETAB);
896 }
897
898 static int
899 tcp_cubic_fast_retransmit(struct tcpcb *tp, const struct tcphdr *th)
900 {
901
902 if (SEQ_LT(th->th_ack, tp->snd_high)) {
903 /* See newreno */
904 tp->t_dupacks = 0;
905 return 1;
906 }
907
908 /*
909 * do CUBIC if not in fast recovery
910 */
911 if (tp->t_partialacks < 0) {
912 /* Adjust W_max, W_max_last, cwnd and ssthresh */
913 tcp_cubic_congestion_exp(tp);
914 /* Reno and NewReno FR */
915 return tcp_reno_do_fast_retransmit(tp, th);
916 } else
917 return tcp_reno_fast_retransmit(tp, th);
918 }
919
920 static void
921 tcp_cubic_newack(struct tcpcb *tp, const struct tcphdr *th)
922 {
923 uint32_t ms_elapsed, rtt;
924 u_long w_tcp;
925
926 /* Congestion avoidance and not in fast recovery */
927 if (tp->snd_cwnd > tp->snd_ssthresh && tp->t_partialacks < 0) {
928 ms_elapsed = tcp_cubic_diff_ctime(tp);
929
930 rtt = max(hztoms(1), hztoms((tp->t_srtt >> TCP_RTT_SHIFT)));
931
932 /* Compute W_tcp(t) - XXX should use BETA defines */
933 w_tcp = tp->snd_cubic_wmax * 4 / 5 +
934 ms_elapsed / rtt / 3;
935
936 if (tp->snd_cwnd > w_tcp) {
937 /* Not in TCP mode */
938 tp->snd_cwnd += (tcp_cubic_getW(tp) - tp->snd_cwnd) /
939 tp->snd_cwnd;
940 } else {
941 /* friendly TCP mode */
942 tp->snd_cwnd = w_tcp;
943 }
944
945 /* Make sure we are within limits */
946 tp->snd_cwnd = max(tp->snd_cwnd, tp->t_segsz);
947 tp->snd_cwnd = min(tp->snd_cwnd, TCP_MAXWIN << tp->snd_scale);
948 } else {
949 /* Use New Reno */
950 tcp_newreno_newack(tp, th);
951 }
952 }
953
954 static void
955 tcp_cubic_slow_retransmit(struct tcpcb *tp)
956 {
957
958 /* Reset */
959 tp->snd_cubic_wmax = tp->snd_cubic_wmax_last = tp->snd_cubic_ctime = 0;
960
961 tcp_reno_slow_retransmit(tp);
962 }
963
964 const struct tcp_congctl tcp_cubic_ctl = {
965 .fast_retransmit = tcp_cubic_fast_retransmit,
966 .slow_retransmit = tcp_cubic_slow_retransmit,
967 .fast_retransmit_newack = tcp_newreno_fast_retransmit_newack,
968 .newack = tcp_cubic_newack,
969 .cong_exp = tcp_cubic_congestion_exp,
970 };
971