tcp_subr.c revision 1.287 1 /* $NetBSD: tcp_subr.c,v 1.287 2021/03/08 18:17:27 christos Exp $ */
2
3 /*
4 * Copyright (C) 1995, 1996, 1997, and 1998 WIDE Project.
5 * All rights reserved.
6 *
7 * Redistribution and use in source and binary forms, with or without
8 * modification, are permitted provided that the following conditions
9 * are met:
10 * 1. Redistributions of source code must retain the above copyright
11 * notice, this list of conditions and the following disclaimer.
12 * 2. Redistributions in binary form must reproduce the above copyright
13 * notice, this list of conditions and the following disclaimer in the
14 * documentation and/or other materials provided with the distribution.
15 * 3. Neither the name of the project nor the names of its contributors
16 * may be used to endorse or promote products derived from this software
17 * without specific prior written permission.
18 *
19 * THIS SOFTWARE IS PROVIDED BY THE PROJECT AND CONTRIBUTORS ``AS IS'' AND
20 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
21 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
22 * ARE DISCLAIMED. IN NO EVENT SHALL THE PROJECT OR CONTRIBUTORS BE LIABLE
23 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
24 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
25 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
26 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
27 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
28 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
29 * SUCH DAMAGE.
30 */
31
32 /*
33 * Copyright (c) 1997, 1998, 2000, 2001, 2008 The NetBSD Foundation, Inc.
34 * All rights reserved.
35 *
36 * This code is derived from software contributed to The NetBSD Foundation
37 * by Jason R. Thorpe and Kevin M. Lahey of the Numerical Aerospace Simulation
38 * Facility, NASA Ames Research Center.
39 *
40 * Redistribution and use in source and binary forms, with or without
41 * modification, are permitted provided that the following conditions
42 * are met:
43 * 1. Redistributions of source code must retain the above copyright
44 * notice, this list of conditions and the following disclaimer.
45 * 2. Redistributions in binary form must reproduce the above copyright
46 * notice, this list of conditions and the following disclaimer in the
47 * documentation and/or other materials provided with the distribution.
48 *
49 * THIS SOFTWARE IS PROVIDED BY THE NETBSD FOUNDATION, INC. AND CONTRIBUTORS
50 * ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED
51 * TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
52 * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE FOUNDATION OR CONTRIBUTORS
53 * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
54 * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
55 * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
56 * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
57 * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
58 * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
59 * POSSIBILITY OF SUCH DAMAGE.
60 */
61
62 /*
63 * Copyright (c) 1982, 1986, 1988, 1990, 1993, 1995
64 * The Regents of the University of California. All rights reserved.
65 *
66 * Redistribution and use in source and binary forms, with or without
67 * modification, are permitted provided that the following conditions
68 * are met:
69 * 1. Redistributions of source code must retain the above copyright
70 * notice, this list of conditions and the following disclaimer.
71 * 2. Redistributions in binary form must reproduce the above copyright
72 * notice, this list of conditions and the following disclaimer in the
73 * documentation and/or other materials provided with the distribution.
74 * 3. Neither the name of the University nor the names of its contributors
75 * may be used to endorse or promote products derived from this software
76 * without specific prior written permission.
77 *
78 * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND
79 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
80 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
81 * ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE
82 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
83 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
84 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
85 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
86 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
87 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
88 * SUCH DAMAGE.
89 *
90 * @(#)tcp_subr.c 8.2 (Berkeley) 5/24/95
91 */
92
93 #include <sys/cdefs.h>
94 __KERNEL_RCSID(0, "$NetBSD: tcp_subr.c,v 1.287 2021/03/08 18:17:27 christos Exp $");
95
96 #ifdef _KERNEL_OPT
97 #include "opt_inet.h"
98 #include "opt_ipsec.h"
99 #include "opt_inet_csum.h"
100 #include "opt_mbuftrace.h"
101 #endif
102
103 #include <sys/param.h>
104 #include <sys/atomic.h>
105 #include <sys/proc.h>
106 #include <sys/systm.h>
107 #include <sys/mbuf.h>
108 #include <sys/once.h>
109 #include <sys/socket.h>
110 #include <sys/socketvar.h>
111 #include <sys/protosw.h>
112 #include <sys/errno.h>
113 #include <sys/kernel.h>
114 #include <sys/pool.h>
115 #include <sys/md5.h>
116 #include <sys/cprng.h>
117
118 #include <net/route.h>
119 #include <net/if.h>
120
121 #include <netinet/in.h>
122 #include <netinet/in_systm.h>
123 #include <netinet/ip.h>
124 #include <netinet/in_pcb.h>
125 #include <netinet/ip_var.h>
126 #include <netinet/ip_icmp.h>
127
128 #ifdef INET6
129 #include <netinet/ip6.h>
130 #include <netinet6/in6_pcb.h>
131 #include <netinet6/ip6_var.h>
132 #include <netinet6/in6_var.h>
133 #include <netinet6/ip6protosw.h>
134 #include <netinet/icmp6.h>
135 #include <netinet6/nd6.h>
136 #endif
137
138 #include <netinet/tcp.h>
139 #include <netinet/tcp_fsm.h>
140 #include <netinet/tcp_seq.h>
141 #include <netinet/tcp_timer.h>
142 #include <netinet/tcp_var.h>
143 #include <netinet/tcp_vtw.h>
144 #include <netinet/tcp_private.h>
145 #include <netinet/tcp_congctl.h>
146
147 #ifdef IPSEC
148 #include <netipsec/ipsec.h>
149 #ifdef INET6
150 #include <netipsec/ipsec6.h>
151 #endif
152 #include <netipsec/key.h>
153 #endif
154
155
156 struct inpcbtable tcbtable; /* head of queue of active tcpcb's */
157 u_int32_t tcp_now; /* slow ticks, for RFC 1323 timestamps */
158
159 percpu_t *tcpstat_percpu;
160
161 /* patchable/settable parameters for tcp */
162 int tcp_mssdflt = TCP_MSS;
163 int tcp_minmss = TCP_MINMSS;
164 int tcp_rttdflt = TCPTV_SRTTDFLT / PR_SLOWHZ;
165 int tcp_do_rfc1323 = 1; /* window scaling / timestamps (obsolete) */
166 int tcp_do_rfc1948 = 0; /* ISS by cryptographic hash */
167 int tcp_do_sack = 1; /* selective acknowledgement */
168 int tcp_do_win_scale = 1; /* RFC1323 window scaling */
169 int tcp_do_timestamps = 1; /* RFC1323 timestamps */
170 int tcp_ack_on_push = 0; /* set to enable immediate ACK-on-PUSH */
171 int tcp_do_ecn = 0; /* Explicit Congestion Notification */
172 #ifndef TCP_INIT_WIN
173 #define TCP_INIT_WIN 4 /* initial slow start window */
174 #endif
175 #ifndef TCP_INIT_WIN_LOCAL
176 #define TCP_INIT_WIN_LOCAL 4 /* initial slow start window for local nets */
177 #endif
178 /*
179 * Up to 5 we scale linearly, to reach 3 * 1460; then (iw) * 1460.
180 * This is to simulate current behavior for iw == 4
181 */
182 int tcp_init_win_max[] = {
183 1 * 1460,
184 1 * 1460,
185 2 * 1460,
186 2 * 1460,
187 3 * 1460,
188 5 * 1460,
189 6 * 1460,
190 7 * 1460,
191 8 * 1460,
192 9 * 1460,
193 10 * 1460
194 };
195 int tcp_init_win = TCP_INIT_WIN;
196 int tcp_init_win_local = TCP_INIT_WIN_LOCAL;
197 int tcp_mss_ifmtu = 0;
198 int tcp_rst_ppslim = 100; /* 100pps */
199 int tcp_ackdrop_ppslim = 100; /* 100pps */
200 int tcp_do_loopback_cksum = 0;
201 int tcp_do_abc = 1; /* RFC3465 Appropriate byte counting. */
202 int tcp_abc_aggressive = 1; /* 1: L=2*SMSS 0: L=1*SMSS */
203 int tcp_sack_tp_maxholes = 32;
204 int tcp_sack_globalmaxholes = 1024;
205 int tcp_sack_globalholes = 0;
206 int tcp_ecn_maxretries = 1;
207 int tcp_msl_enable = 1; /* enable TIME_WAIT truncation */
208 int tcp_msl_loop = PR_SLOWHZ; /* MSL for loopback */
209 int tcp_msl_local = 5 * PR_SLOWHZ; /* MSL for 'local' */
210 int tcp_msl_remote = TCPTV_MSL; /* MSL otherwise */
211 int tcp_msl_remote_threshold = TCPTV_SRTTDFLT; /* RTT threshold */
212 int tcp_rttlocal = 0; /* Use RTT to decide who's 'local' */
213
214 int tcp4_vtw_enable = 0; /* 1 to enable */
215 int tcp6_vtw_enable = 0; /* 1 to enable */
216 int tcp_vtw_was_enabled = 0;
217 int tcp_vtw_entries = 1 << 4; /* 16 vestigial TIME_WAIT entries */
218
219 /* tcb hash */
220 #ifndef TCBHASHSIZE
221 #define TCBHASHSIZE 128
222 #endif
223 int tcbhashsize = TCBHASHSIZE;
224
225 /* syn hash parameters */
226 #define TCP_SYN_HASH_SIZE 293
227 #define TCP_SYN_BUCKET_SIZE 35
228 int tcp_syn_cache_size = TCP_SYN_HASH_SIZE;
229 int tcp_syn_cache_limit = TCP_SYN_HASH_SIZE*TCP_SYN_BUCKET_SIZE;
230 int tcp_syn_bucket_limit = 3*TCP_SYN_BUCKET_SIZE;
231 struct syn_cache_head tcp_syn_cache[TCP_SYN_HASH_SIZE];
232
233 int tcp_freeq(struct tcpcb *);
234 static int tcp_iss_secret_init(void);
235
236 static void tcp_mtudisc_callback(struct in_addr);
237
238 #ifdef INET6
239 static void tcp6_mtudisc(struct in6pcb *, int);
240 #endif
241
242 static struct pool tcpcb_pool;
243
244 static int tcp_drainwanted;
245
246 #ifdef TCP_CSUM_COUNTERS
247 #include <sys/device.h>
248
249 struct evcnt tcp_hwcsum_bad = EVCNT_INITIALIZER(EVCNT_TYPE_MISC,
250 NULL, "tcp", "hwcsum bad");
251 struct evcnt tcp_hwcsum_ok = EVCNT_INITIALIZER(EVCNT_TYPE_MISC,
252 NULL, "tcp", "hwcsum ok");
253 struct evcnt tcp_hwcsum_data = EVCNT_INITIALIZER(EVCNT_TYPE_MISC,
254 NULL, "tcp", "hwcsum data");
255 struct evcnt tcp_swcsum = EVCNT_INITIALIZER(EVCNT_TYPE_MISC,
256 NULL, "tcp", "swcsum");
257
258 EVCNT_ATTACH_STATIC(tcp_hwcsum_bad);
259 EVCNT_ATTACH_STATIC(tcp_hwcsum_ok);
260 EVCNT_ATTACH_STATIC(tcp_hwcsum_data);
261 EVCNT_ATTACH_STATIC(tcp_swcsum);
262
263 #if defined(INET6)
264 struct evcnt tcp6_hwcsum_bad = EVCNT_INITIALIZER(EVCNT_TYPE_MISC,
265 NULL, "tcp6", "hwcsum bad");
266 struct evcnt tcp6_hwcsum_ok = EVCNT_INITIALIZER(EVCNT_TYPE_MISC,
267 NULL, "tcp6", "hwcsum ok");
268 struct evcnt tcp6_hwcsum_data = EVCNT_INITIALIZER(EVCNT_TYPE_MISC,
269 NULL, "tcp6", "hwcsum data");
270 struct evcnt tcp6_swcsum = EVCNT_INITIALIZER(EVCNT_TYPE_MISC,
271 NULL, "tcp6", "swcsum");
272
273 EVCNT_ATTACH_STATIC(tcp6_hwcsum_bad);
274 EVCNT_ATTACH_STATIC(tcp6_hwcsum_ok);
275 EVCNT_ATTACH_STATIC(tcp6_hwcsum_data);
276 EVCNT_ATTACH_STATIC(tcp6_swcsum);
277 #endif /* defined(INET6) */
278 #endif /* TCP_CSUM_COUNTERS */
279
280
281 #ifdef TCP_OUTPUT_COUNTERS
282 #include <sys/device.h>
283
284 struct evcnt tcp_output_bigheader = EVCNT_INITIALIZER(EVCNT_TYPE_MISC,
285 NULL, "tcp", "output big header");
286 struct evcnt tcp_output_predict_hit = EVCNT_INITIALIZER(EVCNT_TYPE_MISC,
287 NULL, "tcp", "output predict hit");
288 struct evcnt tcp_output_predict_miss = EVCNT_INITIALIZER(EVCNT_TYPE_MISC,
289 NULL, "tcp", "output predict miss");
290 struct evcnt tcp_output_copysmall = EVCNT_INITIALIZER(EVCNT_TYPE_MISC,
291 NULL, "tcp", "output copy small");
292 struct evcnt tcp_output_copybig = EVCNT_INITIALIZER(EVCNT_TYPE_MISC,
293 NULL, "tcp", "output copy big");
294 struct evcnt tcp_output_refbig = EVCNT_INITIALIZER(EVCNT_TYPE_MISC,
295 NULL, "tcp", "output reference big");
296
297 EVCNT_ATTACH_STATIC(tcp_output_bigheader);
298 EVCNT_ATTACH_STATIC(tcp_output_predict_hit);
299 EVCNT_ATTACH_STATIC(tcp_output_predict_miss);
300 EVCNT_ATTACH_STATIC(tcp_output_copysmall);
301 EVCNT_ATTACH_STATIC(tcp_output_copybig);
302 EVCNT_ATTACH_STATIC(tcp_output_refbig);
303
304 #endif /* TCP_OUTPUT_COUNTERS */
305
306 #ifdef TCP_REASS_COUNTERS
307 #include <sys/device.h>
308
309 struct evcnt tcp_reass_ = EVCNT_INITIALIZER(EVCNT_TYPE_MISC,
310 NULL, "tcp_reass", "calls");
311 struct evcnt tcp_reass_empty = EVCNT_INITIALIZER(EVCNT_TYPE_MISC,
312 &tcp_reass_, "tcp_reass", "insert into empty queue");
313 struct evcnt tcp_reass_iteration[8] = {
314 EVCNT_INITIALIZER(EVCNT_TYPE_MISC, &tcp_reass_, "tcp_reass", ">7 iterations"),
315 EVCNT_INITIALIZER(EVCNT_TYPE_MISC, &tcp_reass_, "tcp_reass", "1 iteration"),
316 EVCNT_INITIALIZER(EVCNT_TYPE_MISC, &tcp_reass_, "tcp_reass", "2 iterations"),
317 EVCNT_INITIALIZER(EVCNT_TYPE_MISC, &tcp_reass_, "tcp_reass", "3 iterations"),
318 EVCNT_INITIALIZER(EVCNT_TYPE_MISC, &tcp_reass_, "tcp_reass", "4 iterations"),
319 EVCNT_INITIALIZER(EVCNT_TYPE_MISC, &tcp_reass_, "tcp_reass", "5 iterations"),
320 EVCNT_INITIALIZER(EVCNT_TYPE_MISC, &tcp_reass_, "tcp_reass", "6 iterations"),
321 EVCNT_INITIALIZER(EVCNT_TYPE_MISC, &tcp_reass_, "tcp_reass", "7 iterations"),
322 };
323 struct evcnt tcp_reass_prependfirst = EVCNT_INITIALIZER(EVCNT_TYPE_MISC,
324 &tcp_reass_, "tcp_reass", "prepend to first");
325 struct evcnt tcp_reass_prepend = EVCNT_INITIALIZER(EVCNT_TYPE_MISC,
326 &tcp_reass_, "tcp_reass", "prepend");
327 struct evcnt tcp_reass_insert = EVCNT_INITIALIZER(EVCNT_TYPE_MISC,
328 &tcp_reass_, "tcp_reass", "insert");
329 struct evcnt tcp_reass_inserttail = EVCNT_INITIALIZER(EVCNT_TYPE_MISC,
330 &tcp_reass_, "tcp_reass", "insert at tail");
331 struct evcnt tcp_reass_append = EVCNT_INITIALIZER(EVCNT_TYPE_MISC,
332 &tcp_reass_, "tcp_reass", "append");
333 struct evcnt tcp_reass_appendtail = EVCNT_INITIALIZER(EVCNT_TYPE_MISC,
334 &tcp_reass_, "tcp_reass", "append to tail fragment");
335 struct evcnt tcp_reass_overlaptail = EVCNT_INITIALIZER(EVCNT_TYPE_MISC,
336 &tcp_reass_, "tcp_reass", "overlap at end");
337 struct evcnt tcp_reass_overlapfront = EVCNT_INITIALIZER(EVCNT_TYPE_MISC,
338 &tcp_reass_, "tcp_reass", "overlap at start");
339 struct evcnt tcp_reass_segdup = EVCNT_INITIALIZER(EVCNT_TYPE_MISC,
340 &tcp_reass_, "tcp_reass", "duplicate segment");
341 struct evcnt tcp_reass_fragdup = EVCNT_INITIALIZER(EVCNT_TYPE_MISC,
342 &tcp_reass_, "tcp_reass", "duplicate fragment");
343
344 EVCNT_ATTACH_STATIC(tcp_reass_);
345 EVCNT_ATTACH_STATIC(tcp_reass_empty);
346 EVCNT_ATTACH_STATIC2(tcp_reass_iteration, 0);
347 EVCNT_ATTACH_STATIC2(tcp_reass_iteration, 1);
348 EVCNT_ATTACH_STATIC2(tcp_reass_iteration, 2);
349 EVCNT_ATTACH_STATIC2(tcp_reass_iteration, 3);
350 EVCNT_ATTACH_STATIC2(tcp_reass_iteration, 4);
351 EVCNT_ATTACH_STATIC2(tcp_reass_iteration, 5);
352 EVCNT_ATTACH_STATIC2(tcp_reass_iteration, 6);
353 EVCNT_ATTACH_STATIC2(tcp_reass_iteration, 7);
354 EVCNT_ATTACH_STATIC(tcp_reass_prependfirst);
355 EVCNT_ATTACH_STATIC(tcp_reass_prepend);
356 EVCNT_ATTACH_STATIC(tcp_reass_insert);
357 EVCNT_ATTACH_STATIC(tcp_reass_inserttail);
358 EVCNT_ATTACH_STATIC(tcp_reass_append);
359 EVCNT_ATTACH_STATIC(tcp_reass_appendtail);
360 EVCNT_ATTACH_STATIC(tcp_reass_overlaptail);
361 EVCNT_ATTACH_STATIC(tcp_reass_overlapfront);
362 EVCNT_ATTACH_STATIC(tcp_reass_segdup);
363 EVCNT_ATTACH_STATIC(tcp_reass_fragdup);
364
365 #endif /* TCP_REASS_COUNTERS */
366
367 #ifdef MBUFTRACE
368 struct mowner tcp_mowner = MOWNER_INIT("tcp", "");
369 struct mowner tcp_rx_mowner = MOWNER_INIT("tcp", "rx");
370 struct mowner tcp_tx_mowner = MOWNER_INIT("tcp", "tx");
371 struct mowner tcp_sock_mowner = MOWNER_INIT("tcp", "sock");
372 struct mowner tcp_sock_rx_mowner = MOWNER_INIT("tcp", "sock rx");
373 struct mowner tcp_sock_tx_mowner = MOWNER_INIT("tcp", "sock tx");
374 #endif
375
376 static int
377 do_tcpinit(void)
378 {
379
380 in_pcbinit(&tcbtable, tcbhashsize, tcbhashsize);
381 pool_init(&tcpcb_pool, sizeof(struct tcpcb), 0, 0, 0, "tcpcbpl",
382 NULL, IPL_SOFTNET);
383
384 tcp_usrreq_init();
385
386 /* Initialize timer state. */
387 tcp_timer_init();
388
389 /* Initialize the compressed state engine. */
390 syn_cache_init();
391
392 /* Initialize the congestion control algorithms. */
393 tcp_congctl_init();
394
395 /* Initialize the TCPCB template. */
396 tcp_tcpcb_template();
397
398 /* Initialize reassembly queue */
399 tcpipqent_init();
400
401 /* SACK */
402 tcp_sack_init();
403
404 MOWNER_ATTACH(&tcp_tx_mowner);
405 MOWNER_ATTACH(&tcp_rx_mowner);
406 MOWNER_ATTACH(&tcp_reass_mowner);
407 MOWNER_ATTACH(&tcp_sock_mowner);
408 MOWNER_ATTACH(&tcp_sock_tx_mowner);
409 MOWNER_ATTACH(&tcp_sock_rx_mowner);
410 MOWNER_ATTACH(&tcp_mowner);
411
412 tcpstat_percpu = percpu_alloc(sizeof(uint64_t) * TCP_NSTATS);
413
414 vtw_earlyinit();
415
416 tcp_slowtimo_init();
417
418 return 0;
419 }
420
421 void
422 tcp_init_common(unsigned basehlen)
423 {
424 static ONCE_DECL(dotcpinit);
425 unsigned hlen = basehlen + sizeof(struct tcphdr);
426 unsigned oldhlen;
427
428 if (max_linkhdr + hlen > MHLEN)
429 panic("tcp_init");
430 while ((oldhlen = max_protohdr) < hlen)
431 atomic_cas_uint(&max_protohdr, oldhlen, hlen);
432
433 RUN_ONCE(&dotcpinit, do_tcpinit);
434 }
435
436 /*
437 * Tcp initialization
438 */
439 void
440 tcp_init(void)
441 {
442
443 icmp_mtudisc_callback_register(tcp_mtudisc_callback);
444
445 tcp_init_common(sizeof(struct ip));
446 }
447
448 /*
449 * Create template to be used to send tcp packets on a connection.
450 * Call after host entry created, allocates an mbuf and fills
451 * in a skeletal tcp/ip header, minimizing the amount of work
452 * necessary when the connection is used.
453 */
454 struct mbuf *
455 tcp_template(struct tcpcb *tp)
456 {
457 struct inpcb *inp = tp->t_inpcb;
458 #ifdef INET6
459 struct in6pcb *in6p = tp->t_in6pcb;
460 #endif
461 struct tcphdr *n;
462 struct mbuf *m;
463 int hlen;
464
465 switch (tp->t_family) {
466 case AF_INET:
467 hlen = sizeof(struct ip);
468 if (inp)
469 break;
470 #ifdef INET6
471 if (in6p) {
472 /* mapped addr case */
473 if (IN6_IS_ADDR_V4MAPPED(&in6p->in6p_laddr)
474 && IN6_IS_ADDR_V4MAPPED(&in6p->in6p_faddr))
475 break;
476 }
477 #endif
478 return NULL; /*EINVAL*/
479 #ifdef INET6
480 case AF_INET6:
481 hlen = sizeof(struct ip6_hdr);
482 if (in6p) {
483 /* more sainty check? */
484 break;
485 }
486 return NULL; /*EINVAL*/
487 #endif
488 default:
489 return NULL; /*EAFNOSUPPORT*/
490 }
491
492 KASSERT(hlen + sizeof(struct tcphdr) <= MCLBYTES);
493
494 m = tp->t_template;
495 if (m && m->m_len == hlen + sizeof(struct tcphdr)) {
496 ;
497 } else {
498 if (m)
499 m_freem(m);
500 m = tp->t_template = NULL;
501 MGETHDR(m, M_DONTWAIT, MT_HEADER);
502 if (m && hlen + sizeof(struct tcphdr) > MHLEN) {
503 MCLGET(m, M_DONTWAIT);
504 if ((m->m_flags & M_EXT) == 0) {
505 m_free(m);
506 m = NULL;
507 }
508 }
509 if (m == NULL)
510 return NULL;
511 MCLAIM(m, &tcp_mowner);
512 m->m_pkthdr.len = m->m_len = hlen + sizeof(struct tcphdr);
513 }
514
515 memset(mtod(m, void *), 0, m->m_len);
516
517 n = (struct tcphdr *)(mtod(m, char *) + hlen);
518
519 switch (tp->t_family) {
520 case AF_INET:
521 {
522 struct ipovly *ipov;
523 mtod(m, struct ip *)->ip_v = 4;
524 mtod(m, struct ip *)->ip_hl = hlen >> 2;
525 ipov = mtod(m, struct ipovly *);
526 ipov->ih_pr = IPPROTO_TCP;
527 ipov->ih_len = htons(sizeof(struct tcphdr));
528 if (inp) {
529 ipov->ih_src = inp->inp_laddr;
530 ipov->ih_dst = inp->inp_faddr;
531 }
532 #ifdef INET6
533 else if (in6p) {
534 /* mapped addr case */
535 bcopy(&in6p->in6p_laddr.s6_addr32[3], &ipov->ih_src,
536 sizeof(ipov->ih_src));
537 bcopy(&in6p->in6p_faddr.s6_addr32[3], &ipov->ih_dst,
538 sizeof(ipov->ih_dst));
539 }
540 #endif
541
542 /*
543 * Compute the pseudo-header portion of the checksum
544 * now. We incrementally add in the TCP option and
545 * payload lengths later, and then compute the TCP
546 * checksum right before the packet is sent off onto
547 * the wire.
548 */
549 n->th_sum = in_cksum_phdr(ipov->ih_src.s_addr,
550 ipov->ih_dst.s_addr,
551 htons(sizeof(struct tcphdr) + IPPROTO_TCP));
552 break;
553 }
554 #ifdef INET6
555 case AF_INET6:
556 {
557 struct ip6_hdr *ip6;
558 mtod(m, struct ip *)->ip_v = 6;
559 ip6 = mtod(m, struct ip6_hdr *);
560 ip6->ip6_nxt = IPPROTO_TCP;
561 ip6->ip6_plen = htons(sizeof(struct tcphdr));
562 ip6->ip6_src = in6p->in6p_laddr;
563 ip6->ip6_dst = in6p->in6p_faddr;
564 ip6->ip6_flow = in6p->in6p_flowinfo & IPV6_FLOWINFO_MASK;
565 if (ip6_auto_flowlabel) {
566 ip6->ip6_flow &= ~IPV6_FLOWLABEL_MASK;
567 ip6->ip6_flow |=
568 (htonl(ip6_randomflowlabel()) & IPV6_FLOWLABEL_MASK);
569 }
570 ip6->ip6_vfc &= ~IPV6_VERSION_MASK;
571 ip6->ip6_vfc |= IPV6_VERSION;
572
573 /*
574 * Compute the pseudo-header portion of the checksum
575 * now. We incrementally add in the TCP option and
576 * payload lengths later, and then compute the TCP
577 * checksum right before the packet is sent off onto
578 * the wire.
579 */
580 n->th_sum = in6_cksum_phdr(&in6p->in6p_laddr,
581 &in6p->in6p_faddr, htonl(sizeof(struct tcphdr)),
582 htonl(IPPROTO_TCP));
583 break;
584 }
585 #endif
586 }
587
588 if (inp) {
589 n->th_sport = inp->inp_lport;
590 n->th_dport = inp->inp_fport;
591 }
592 #ifdef INET6
593 else if (in6p) {
594 n->th_sport = in6p->in6p_lport;
595 n->th_dport = in6p->in6p_fport;
596 }
597 #endif
598
599 n->th_seq = 0;
600 n->th_ack = 0;
601 n->th_x2 = 0;
602 n->th_off = 5;
603 n->th_flags = 0;
604 n->th_win = 0;
605 n->th_urp = 0;
606 return m;
607 }
608
609 /*
610 * Send a single message to the TCP at address specified by
611 * the given TCP/IP header. If m == 0, then we make a copy
612 * of the tcpiphdr at ti and send directly to the addressed host.
613 * This is used to force keep alive messages out using the TCP
614 * template for a connection tp->t_template. If flags are given
615 * then we send a message back to the TCP which originated the
616 * segment ti, and discard the mbuf containing it and any other
617 * attached mbufs.
618 *
619 * In any case the ack and sequence number of the transmitted
620 * segment are as specified by the parameters.
621 */
622 int
623 tcp_respond(struct tcpcb *tp, struct mbuf *mtemplate, struct mbuf *m,
624 struct tcphdr *th0, tcp_seq ack, tcp_seq seq, int flags)
625 {
626 struct route *ro;
627 int error, tlen, win = 0;
628 int hlen;
629 struct ip *ip;
630 #ifdef INET6
631 struct ip6_hdr *ip6;
632 #endif
633 int family; /* family on packet, not inpcb/in6pcb! */
634 struct tcphdr *th;
635
636 if (tp != NULL && (flags & TH_RST) == 0) {
637 KASSERT(!(tp->t_inpcb && tp->t_in6pcb));
638
639 if (tp->t_inpcb)
640 win = sbspace(&tp->t_inpcb->inp_socket->so_rcv);
641 #ifdef INET6
642 if (tp->t_in6pcb)
643 win = sbspace(&tp->t_in6pcb->in6p_socket->so_rcv);
644 #endif
645 }
646
647 th = NULL; /* Quell uninitialized warning */
648 ip = NULL;
649 #ifdef INET6
650 ip6 = NULL;
651 #endif
652 if (m == NULL) {
653 if (!mtemplate)
654 return EINVAL;
655
656 /* get family information from template */
657 switch (mtod(mtemplate, struct ip *)->ip_v) {
658 case 4:
659 family = AF_INET;
660 hlen = sizeof(struct ip);
661 break;
662 #ifdef INET6
663 case 6:
664 family = AF_INET6;
665 hlen = sizeof(struct ip6_hdr);
666 break;
667 #endif
668 default:
669 return EAFNOSUPPORT;
670 }
671
672 MGETHDR(m, M_DONTWAIT, MT_HEADER);
673 if (m) {
674 MCLAIM(m, &tcp_tx_mowner);
675 MCLGET(m, M_DONTWAIT);
676 if ((m->m_flags & M_EXT) == 0) {
677 m_free(m);
678 m = NULL;
679 }
680 }
681 if (m == NULL)
682 return ENOBUFS;
683
684 tlen = 0;
685
686 m->m_data += max_linkhdr;
687 bcopy(mtod(mtemplate, void *), mtod(m, void *),
688 mtemplate->m_len);
689 switch (family) {
690 case AF_INET:
691 ip = mtod(m, struct ip *);
692 th = (struct tcphdr *)(ip + 1);
693 break;
694 #ifdef INET6
695 case AF_INET6:
696 ip6 = mtod(m, struct ip6_hdr *);
697 th = (struct tcphdr *)(ip6 + 1);
698 break;
699 #endif
700 }
701 flags = TH_ACK;
702 } else {
703 if ((m->m_flags & M_PKTHDR) == 0) {
704 m_freem(m);
705 return EINVAL;
706 }
707 KASSERT(th0 != NULL);
708
709 /* get family information from m */
710 switch (mtod(m, struct ip *)->ip_v) {
711 case 4:
712 family = AF_INET;
713 hlen = sizeof(struct ip);
714 ip = mtod(m, struct ip *);
715 break;
716 #ifdef INET6
717 case 6:
718 family = AF_INET6;
719 hlen = sizeof(struct ip6_hdr);
720 ip6 = mtod(m, struct ip6_hdr *);
721 break;
722 #endif
723 default:
724 m_freem(m);
725 return EAFNOSUPPORT;
726 }
727 /* clear h/w csum flags inherited from rx packet */
728 m->m_pkthdr.csum_flags = 0;
729
730 if ((flags & TH_SYN) == 0 || sizeof(*th0) > (th0->th_off << 2))
731 tlen = sizeof(*th0);
732 else
733 tlen = th0->th_off << 2;
734
735 if (m->m_len > hlen + tlen && (m->m_flags & M_EXT) == 0 &&
736 mtod(m, char *) + hlen == (char *)th0) {
737 m->m_len = hlen + tlen;
738 m_freem(m->m_next);
739 m->m_next = NULL;
740 } else {
741 struct mbuf *n;
742
743 KASSERT(max_linkhdr + hlen + tlen <= MCLBYTES);
744
745 MGETHDR(n, M_DONTWAIT, MT_HEADER);
746 if (n && max_linkhdr + hlen + tlen > MHLEN) {
747 MCLGET(n, M_DONTWAIT);
748 if ((n->m_flags & M_EXT) == 0) {
749 m_freem(n);
750 n = NULL;
751 }
752 }
753 if (!n) {
754 m_freem(m);
755 return ENOBUFS;
756 }
757
758 MCLAIM(n, &tcp_tx_mowner);
759 n->m_data += max_linkhdr;
760 n->m_len = hlen + tlen;
761 m_copyback(n, 0, hlen, mtod(m, void *));
762 m_copyback(n, hlen, tlen, (void *)th0);
763
764 m_freem(m);
765 m = n;
766 n = NULL;
767 }
768
769 #define xchg(a,b,type) { type t; t=a; a=b; b=t; }
770 switch (family) {
771 case AF_INET:
772 ip = mtod(m, struct ip *);
773 th = (struct tcphdr *)(ip + 1);
774 ip->ip_p = IPPROTO_TCP;
775 xchg(ip->ip_dst, ip->ip_src, struct in_addr);
776 ip->ip_p = IPPROTO_TCP;
777 break;
778 #ifdef INET6
779 case AF_INET6:
780 ip6 = mtod(m, struct ip6_hdr *);
781 th = (struct tcphdr *)(ip6 + 1);
782 ip6->ip6_nxt = IPPROTO_TCP;
783 xchg(ip6->ip6_dst, ip6->ip6_src, struct in6_addr);
784 ip6->ip6_nxt = IPPROTO_TCP;
785 break;
786 #endif
787 }
788 xchg(th->th_dport, th->th_sport, u_int16_t);
789 #undef xchg
790 tlen = 0; /*be friendly with the following code*/
791 }
792 th->th_seq = htonl(seq);
793 th->th_ack = htonl(ack);
794 th->th_x2 = 0;
795 if ((flags & TH_SYN) == 0) {
796 if (tp)
797 win >>= tp->rcv_scale;
798 if (win > TCP_MAXWIN)
799 win = TCP_MAXWIN;
800 th->th_win = htons((u_int16_t)win);
801 th->th_off = sizeof (struct tcphdr) >> 2;
802 tlen += sizeof(*th);
803 } else {
804 tlen += th->th_off << 2;
805 }
806 m->m_len = hlen + tlen;
807 m->m_pkthdr.len = hlen + tlen;
808 m_reset_rcvif(m);
809 th->th_flags = flags;
810 th->th_urp = 0;
811
812 switch (family) {
813 case AF_INET:
814 {
815 struct ipovly *ipov = (struct ipovly *)ip;
816 memset(ipov->ih_x1, 0, sizeof ipov->ih_x1);
817 ipov->ih_len = htons((u_int16_t)tlen);
818
819 th->th_sum = 0;
820 th->th_sum = in_cksum(m, hlen + tlen);
821 ip->ip_len = htons(hlen + tlen);
822 ip->ip_ttl = ip_defttl;
823 break;
824 }
825 #ifdef INET6
826 case AF_INET6:
827 {
828 th->th_sum = 0;
829 th->th_sum = in6_cksum(m, IPPROTO_TCP, sizeof(struct ip6_hdr),
830 tlen);
831 ip6->ip6_plen = htons(tlen);
832 if (tp && tp->t_in6pcb)
833 ip6->ip6_hlim = in6_selecthlim_rt(tp->t_in6pcb);
834 else
835 ip6->ip6_hlim = ip6_defhlim;
836 ip6->ip6_flow &= ~IPV6_FLOWINFO_MASK;
837 if (ip6_auto_flowlabel) {
838 ip6->ip6_flow |=
839 (htonl(ip6_randomflowlabel()) & IPV6_FLOWLABEL_MASK);
840 }
841 break;
842 }
843 #endif
844 }
845
846 if (tp != NULL && tp->t_inpcb != NULL) {
847 ro = &tp->t_inpcb->inp_route;
848 KASSERT(family == AF_INET);
849 KASSERT(in_hosteq(ip->ip_dst, tp->t_inpcb->inp_faddr));
850 }
851 #ifdef INET6
852 else if (tp != NULL && tp->t_in6pcb != NULL) {
853 ro = (struct route *)&tp->t_in6pcb->in6p_route;
854
855 #ifdef DIAGNOSTIC
856 if (family == AF_INET) {
857 if (!IN6_IS_ADDR_V4MAPPED(&tp->t_in6pcb->in6p_faddr))
858 panic("tcp_respond: not mapped addr");
859 if (memcmp(&ip->ip_dst,
860 &tp->t_in6pcb->in6p_faddr.s6_addr32[3],
861 sizeof(ip->ip_dst)) != 0) {
862 panic("tcp_respond: ip_dst != in6p_faddr");
863 }
864 } else if (family == AF_INET6) {
865 if (!IN6_ARE_ADDR_EQUAL(&ip6->ip6_dst,
866 &tp->t_in6pcb->in6p_faddr))
867 panic("tcp_respond: ip6_dst != in6p_faddr");
868 } else
869 panic("tcp_respond: address family mismatch");
870 #endif
871 }
872 #endif
873 else
874 ro = NULL;
875
876 switch (family) {
877 case AF_INET:
878 error = ip_output(m, NULL, ro,
879 (tp && tp->t_mtudisc ? IP_MTUDISC : 0), NULL,
880 tp ? tp->t_inpcb : NULL);
881 break;
882 #ifdef INET6
883 case AF_INET6:
884 error = ip6_output(m, NULL, ro, 0, NULL,
885 tp ? tp->t_in6pcb : NULL, NULL);
886 break;
887 #endif
888 default:
889 error = EAFNOSUPPORT;
890 break;
891 }
892
893 return error;
894 }
895
896 /*
897 * Template TCPCB. Rather than zeroing a new TCPCB and initializing
898 * a bunch of members individually, we maintain this template for the
899 * static and mostly-static components of the TCPCB, and copy it into
900 * the new TCPCB instead.
901 */
902 static struct tcpcb tcpcb_template = {
903 .t_srtt = TCPTV_SRTTBASE,
904 .t_rttmin = TCPTV_MIN,
905
906 .snd_cwnd = TCP_MAXWIN << TCP_MAX_WINSHIFT,
907 .snd_ssthresh = TCP_MAXWIN << TCP_MAX_WINSHIFT,
908 .snd_numholes = 0,
909 .snd_cubic_wmax = 0,
910 .snd_cubic_wmax_last = 0,
911 .snd_cubic_ctime = 0,
912
913 .t_partialacks = -1,
914 .t_bytes_acked = 0,
915 .t_sndrexmitpack = 0,
916 .t_rcvoopack = 0,
917 .t_sndzerowin = 0,
918 };
919
920 /*
921 * Updates the TCPCB template whenever a parameter that would affect
922 * the template is changed.
923 */
924 void
925 tcp_tcpcb_template(void)
926 {
927 struct tcpcb *tp = &tcpcb_template;
928 int flags;
929
930 tp->t_peermss = tcp_mssdflt;
931 tp->t_ourmss = tcp_mssdflt;
932 tp->t_segsz = tcp_mssdflt;
933
934 flags = 0;
935 if (tcp_do_rfc1323 && tcp_do_win_scale)
936 flags |= TF_REQ_SCALE;
937 if (tcp_do_rfc1323 && tcp_do_timestamps)
938 flags |= TF_REQ_TSTMP;
939 tp->t_flags = flags;
940
941 /*
942 * Init srtt to TCPTV_SRTTBASE (0), so we can tell that we have no
943 * rtt estimate. Set rttvar so that srtt + 2 * rttvar gives
944 * reasonable initial retransmit time.
945 */
946 tp->t_rttvar = tcp_rttdflt * PR_SLOWHZ << (TCP_RTTVAR_SHIFT + 2 - 1);
947 TCPT_RANGESET(tp->t_rxtcur, TCP_REXMTVAL(tp),
948 TCPTV_MIN, TCPTV_REXMTMAX);
949
950 /* Keep Alive */
951 tp->t_keepinit = MIN(tcp_keepinit, TCP_TIMER_MAXTICKS);
952 tp->t_keepidle = MIN(tcp_keepidle, TCP_TIMER_MAXTICKS);
953 tp->t_keepintvl = MIN(tcp_keepintvl, TCP_TIMER_MAXTICKS);
954 tp->t_keepcnt = MAX(1, MIN(tcp_keepcnt, TCP_TIMER_MAXTICKS));
955 tp->t_maxidle = tp->t_keepcnt * MIN(tp->t_keepintvl,
956 TCP_TIMER_MAXTICKS/tp->t_keepcnt);
957
958 /* MSL */
959 tp->t_msl = TCPTV_MSL;
960 }
961
962 /*
963 * Create a new TCP control block, making an
964 * empty reassembly queue and hooking it to the argument
965 * protocol control block.
966 */
967 /* family selects inpcb, or in6pcb */
968 struct tcpcb *
969 tcp_newtcpcb(int family, void *aux)
970 {
971 struct tcpcb *tp;
972 int i;
973
974 /* XXX Consider using a pool_cache for speed. */
975 tp = pool_get(&tcpcb_pool, PR_NOWAIT); /* splsoftnet via tcp_usrreq */
976 if (tp == NULL)
977 return NULL;
978 memcpy(tp, &tcpcb_template, sizeof(*tp));
979 TAILQ_INIT(&tp->segq);
980 TAILQ_INIT(&tp->timeq);
981 tp->t_family = family; /* may be overridden later on */
982 TAILQ_INIT(&tp->snd_holes);
983 LIST_INIT(&tp->t_sc); /* XXX can template this */
984
985 /* Don't sweat this loop; hopefully the compiler will unroll it. */
986 for (i = 0; i < TCPT_NTIMERS; i++) {
987 callout_init(&tp->t_timer[i], CALLOUT_MPSAFE);
988 TCP_TIMER_INIT(tp, i);
989 }
990 callout_init(&tp->t_delack_ch, CALLOUT_MPSAFE);
991
992 switch (family) {
993 case AF_INET:
994 {
995 struct inpcb *inp = (struct inpcb *)aux;
996
997 inp->inp_ip.ip_ttl = ip_defttl;
998 inp->inp_ppcb = (void *)tp;
999
1000 tp->t_inpcb = inp;
1001 tp->t_mtudisc = ip_mtudisc;
1002 break;
1003 }
1004 #ifdef INET6
1005 case AF_INET6:
1006 {
1007 struct in6pcb *in6p = (struct in6pcb *)aux;
1008
1009 in6p->in6p_ip6.ip6_hlim = in6_selecthlim_rt(in6p);
1010 in6p->in6p_ppcb = (void *)tp;
1011
1012 tp->t_in6pcb = in6p;
1013 /* for IPv6, always try to run path MTU discovery */
1014 tp->t_mtudisc = 1;
1015 break;
1016 }
1017 #endif /* INET6 */
1018 default:
1019 for (i = 0; i < TCPT_NTIMERS; i++)
1020 callout_destroy(&tp->t_timer[i]);
1021 callout_destroy(&tp->t_delack_ch);
1022 pool_put(&tcpcb_pool, tp); /* splsoftnet via tcp_usrreq */
1023 return NULL;
1024 }
1025
1026 /*
1027 * Initialize our timebase. When we send timestamps, we take
1028 * the delta from tcp_now -- this means each connection always
1029 * gets a timebase of 1, which makes it, among other things,
1030 * more difficult to determine how long a system has been up,
1031 * and thus how many TCP sequence increments have occurred.
1032 *
1033 * We start with 1, because 0 doesn't work with linux, which
1034 * considers timestamp 0 in a SYN packet as a bug and disables
1035 * timestamps.
1036 */
1037 tp->ts_timebase = tcp_now - 1;
1038
1039 tcp_congctl_select(tp, tcp_congctl_global_name);
1040
1041 return tp;
1042 }
1043
1044 /*
1045 * Drop a TCP connection, reporting
1046 * the specified error. If connection is synchronized,
1047 * then send a RST to peer.
1048 */
1049 struct tcpcb *
1050 tcp_drop(struct tcpcb *tp, int errno)
1051 {
1052 struct socket *so = NULL;
1053
1054 KASSERT(!(tp->t_inpcb && tp->t_in6pcb));
1055
1056 if (tp->t_inpcb)
1057 so = tp->t_inpcb->inp_socket;
1058 #ifdef INET6
1059 if (tp->t_in6pcb)
1060 so = tp->t_in6pcb->in6p_socket;
1061 #endif
1062 if (!so)
1063 return NULL;
1064
1065 if (TCPS_HAVERCVDSYN(tp->t_state)) {
1066 tp->t_state = TCPS_CLOSED;
1067 (void) tcp_output(tp);
1068 TCP_STATINC(TCP_STAT_DROPS);
1069 } else
1070 TCP_STATINC(TCP_STAT_CONNDROPS);
1071 if (errno == ETIMEDOUT && tp->t_softerror)
1072 errno = tp->t_softerror;
1073 so->so_error = errno;
1074 return (tcp_close(tp));
1075 }
1076
1077 /*
1078 * Close a TCP control block:
1079 * discard all space held by the tcp
1080 * discard internet protocol block
1081 * wake up any sleepers
1082 */
1083 struct tcpcb *
1084 tcp_close(struct tcpcb *tp)
1085 {
1086 struct inpcb *inp;
1087 #ifdef INET6
1088 struct in6pcb *in6p;
1089 #endif
1090 struct socket *so;
1091 #ifdef RTV_RTT
1092 struct rtentry *rt = NULL;
1093 #endif
1094 struct route *ro;
1095 int j;
1096
1097 inp = tp->t_inpcb;
1098 #ifdef INET6
1099 in6p = tp->t_in6pcb;
1100 #endif
1101 so = NULL;
1102 ro = NULL;
1103 if (inp) {
1104 so = inp->inp_socket;
1105 ro = &inp->inp_route;
1106 }
1107 #ifdef INET6
1108 else if (in6p) {
1109 so = in6p->in6p_socket;
1110 ro = (struct route *)&in6p->in6p_route;
1111 }
1112 #endif
1113
1114 #ifdef RTV_RTT
1115 /*
1116 * If we sent enough data to get some meaningful characteristics,
1117 * save them in the routing entry. 'Enough' is arbitrarily
1118 * defined as the sendpipesize (default 4K) * 16. This would
1119 * give us 16 rtt samples assuming we only get one sample per
1120 * window (the usual case on a long haul net). 16 samples is
1121 * enough for the srtt filter to converge to within 5% of the correct
1122 * value; fewer samples and we could save a very bogus rtt.
1123 *
1124 * Don't update the default route's characteristics and don't
1125 * update anything that the user "locked".
1126 */
1127 if (SEQ_LT(tp->iss + so->so_snd.sb_hiwat * 16, tp->snd_max) &&
1128 ro && (rt = rtcache_validate(ro)) != NULL &&
1129 !in_nullhost(satocsin(rt_getkey(rt))->sin_addr)) {
1130 u_long i = 0;
1131
1132 if ((rt->rt_rmx.rmx_locks & RTV_RTT) == 0) {
1133 i = tp->t_srtt *
1134 ((RTM_RTTUNIT / PR_SLOWHZ) >> (TCP_RTT_SHIFT + 2));
1135 if (rt->rt_rmx.rmx_rtt && i)
1136 /*
1137 * filter this update to half the old & half
1138 * the new values, converting scale.
1139 * See route.h and tcp_var.h for a
1140 * description of the scaling constants.
1141 */
1142 rt->rt_rmx.rmx_rtt =
1143 (rt->rt_rmx.rmx_rtt + i) / 2;
1144 else
1145 rt->rt_rmx.rmx_rtt = i;
1146 }
1147 if ((rt->rt_rmx.rmx_locks & RTV_RTTVAR) == 0) {
1148 i = tp->t_rttvar *
1149 ((RTM_RTTUNIT / PR_SLOWHZ) >> (TCP_RTTVAR_SHIFT + 2));
1150 if (rt->rt_rmx.rmx_rttvar && i)
1151 rt->rt_rmx.rmx_rttvar =
1152 (rt->rt_rmx.rmx_rttvar + i) / 2;
1153 else
1154 rt->rt_rmx.rmx_rttvar = i;
1155 }
1156 /*
1157 * update the pipelimit (ssthresh) if it has been updated
1158 * already or if a pipesize was specified & the threshhold
1159 * got below half the pipesize. I.e., wait for bad news
1160 * before we start updating, then update on both good
1161 * and bad news.
1162 */
1163 if (((rt->rt_rmx.rmx_locks & RTV_SSTHRESH) == 0 &&
1164 (i = tp->snd_ssthresh) && rt->rt_rmx.rmx_ssthresh) ||
1165 i < (rt->rt_rmx.rmx_sendpipe / 2)) {
1166 /*
1167 * convert the limit from user data bytes to
1168 * packets then to packet data bytes.
1169 */
1170 i = (i + tp->t_segsz / 2) / tp->t_segsz;
1171 if (i < 2)
1172 i = 2;
1173 i *= (u_long)(tp->t_segsz + sizeof (struct tcpiphdr));
1174 if (rt->rt_rmx.rmx_ssthresh)
1175 rt->rt_rmx.rmx_ssthresh =
1176 (rt->rt_rmx.rmx_ssthresh + i) / 2;
1177 else
1178 rt->rt_rmx.rmx_ssthresh = i;
1179 }
1180 }
1181 rtcache_unref(rt, ro);
1182 #endif /* RTV_RTT */
1183 /* free the reassembly queue, if any */
1184 TCP_REASS_LOCK(tp);
1185 (void) tcp_freeq(tp);
1186 TCP_REASS_UNLOCK(tp);
1187
1188 /* free the SACK holes list. */
1189 tcp_free_sackholes(tp);
1190 tcp_congctl_release(tp);
1191 syn_cache_cleanup(tp);
1192
1193 if (tp->t_template) {
1194 m_free(tp->t_template);
1195 tp->t_template = NULL;
1196 }
1197
1198 /*
1199 * Detaching the pcb will unlock the socket/tcpcb, and stopping
1200 * the timers can also drop the lock. We need to prevent access
1201 * to the tcpcb as it's half torn down. Flag the pcb as dead
1202 * (prevents access by timers) and only then detach it.
1203 */
1204 tp->t_flags |= TF_DEAD;
1205 if (inp) {
1206 inp->inp_ppcb = 0;
1207 soisdisconnected(so);
1208 in_pcbdetach(inp);
1209 }
1210 #ifdef INET6
1211 else if (in6p) {
1212 in6p->in6p_ppcb = 0;
1213 soisdisconnected(so);
1214 in6_pcbdetach(in6p);
1215 }
1216 #endif
1217 /*
1218 * pcb is no longer visble elsewhere, so we can safely release
1219 * the lock in callout_halt() if needed.
1220 */
1221 TCP_STATINC(TCP_STAT_CLOSED);
1222 for (j = 0; j < TCPT_NTIMERS; j++) {
1223 callout_halt(&tp->t_timer[j], softnet_lock);
1224 callout_destroy(&tp->t_timer[j]);
1225 }
1226 callout_halt(&tp->t_delack_ch, softnet_lock);
1227 callout_destroy(&tp->t_delack_ch);
1228 pool_put(&tcpcb_pool, tp);
1229
1230 return NULL;
1231 }
1232
1233 int
1234 tcp_freeq(struct tcpcb *tp)
1235 {
1236 struct ipqent *qe;
1237 int rv = 0;
1238
1239 TCP_REASS_LOCK_CHECK(tp);
1240
1241 while ((qe = TAILQ_FIRST(&tp->segq)) != NULL) {
1242 TAILQ_REMOVE(&tp->segq, qe, ipqe_q);
1243 TAILQ_REMOVE(&tp->timeq, qe, ipqe_timeq);
1244 m_freem(qe->ipqe_m);
1245 tcpipqent_free(qe);
1246 rv = 1;
1247 }
1248 tp->t_segqlen = 0;
1249 KASSERT(TAILQ_EMPTY(&tp->timeq));
1250 return (rv);
1251 }
1252
1253 void
1254 tcp_fasttimo(void)
1255 {
1256 if (tcp_drainwanted) {
1257 tcp_drain();
1258 tcp_drainwanted = 0;
1259 }
1260 }
1261
1262 void
1263 tcp_drainstub(void)
1264 {
1265 tcp_drainwanted = 1;
1266 }
1267
1268 /*
1269 * Protocol drain routine. Called when memory is in short supply.
1270 * Called from pr_fasttimo thus a callout context.
1271 */
1272 void
1273 tcp_drain(void)
1274 {
1275 struct inpcb_hdr *inph;
1276 struct tcpcb *tp;
1277
1278 mutex_enter(softnet_lock);
1279 KERNEL_LOCK(1, NULL);
1280
1281 /*
1282 * Free the sequence queue of all TCP connections.
1283 */
1284 TAILQ_FOREACH(inph, &tcbtable.inpt_queue, inph_queue) {
1285 switch (inph->inph_af) {
1286 case AF_INET:
1287 tp = intotcpcb((struct inpcb *)inph);
1288 break;
1289 #ifdef INET6
1290 case AF_INET6:
1291 tp = in6totcpcb((struct in6pcb *)inph);
1292 break;
1293 #endif
1294 default:
1295 tp = NULL;
1296 break;
1297 }
1298 if (tp != NULL) {
1299 /*
1300 * We may be called from a device's interrupt
1301 * context. If the tcpcb is already busy,
1302 * just bail out now.
1303 */
1304 if (tcp_reass_lock_try(tp) == 0)
1305 continue;
1306 if (tcp_freeq(tp))
1307 TCP_STATINC(TCP_STAT_CONNSDRAINED);
1308 TCP_REASS_UNLOCK(tp);
1309 }
1310 }
1311
1312 KERNEL_UNLOCK_ONE(NULL);
1313 mutex_exit(softnet_lock);
1314 }
1315
1316 /*
1317 * Notify a tcp user of an asynchronous error;
1318 * store error as soft error, but wake up user
1319 * (for now, won't do anything until can select for soft error).
1320 */
1321 void
1322 tcp_notify(struct inpcb *inp, int error)
1323 {
1324 struct tcpcb *tp = (struct tcpcb *)inp->inp_ppcb;
1325 struct socket *so = inp->inp_socket;
1326
1327 /*
1328 * Ignore some errors if we are hooked up.
1329 * If connection hasn't completed, has retransmitted several times,
1330 * and receives a second error, give up now. This is better
1331 * than waiting a long time to establish a connection that
1332 * can never complete.
1333 */
1334 if (tp->t_state == TCPS_ESTABLISHED &&
1335 (error == EHOSTUNREACH || error == ENETUNREACH ||
1336 error == EHOSTDOWN)) {
1337 return;
1338 } else if (TCPS_HAVEESTABLISHED(tp->t_state) == 0 &&
1339 tp->t_rxtshift > 3 && tp->t_softerror)
1340 so->so_error = error;
1341 else
1342 tp->t_softerror = error;
1343 cv_broadcast(&so->so_cv);
1344 sorwakeup(so);
1345 sowwakeup(so);
1346 }
1347
1348 #ifdef INET6
1349 void
1350 tcp6_notify(struct in6pcb *in6p, int error)
1351 {
1352 struct tcpcb *tp = (struct tcpcb *)in6p->in6p_ppcb;
1353 struct socket *so = in6p->in6p_socket;
1354
1355 /*
1356 * Ignore some errors if we are hooked up.
1357 * If connection hasn't completed, has retransmitted several times,
1358 * and receives a second error, give up now. This is better
1359 * than waiting a long time to establish a connection that
1360 * can never complete.
1361 */
1362 if (tp->t_state == TCPS_ESTABLISHED &&
1363 (error == EHOSTUNREACH || error == ENETUNREACH ||
1364 error == EHOSTDOWN)) {
1365 return;
1366 } else if (TCPS_HAVEESTABLISHED(tp->t_state) == 0 &&
1367 tp->t_rxtshift > 3 && tp->t_softerror)
1368 so->so_error = error;
1369 else
1370 tp->t_softerror = error;
1371 cv_broadcast(&so->so_cv);
1372 sorwakeup(so);
1373 sowwakeup(so);
1374 }
1375 #endif
1376
1377 #ifdef INET6
1378 void *
1379 tcp6_ctlinput(int cmd, const struct sockaddr *sa, void *d)
1380 {
1381 struct tcphdr th;
1382 void (*notify)(struct in6pcb *, int) = tcp6_notify;
1383 int nmatch;
1384 struct ip6_hdr *ip6;
1385 const struct sockaddr_in6 *sa6_src = NULL;
1386 const struct sockaddr_in6 *sa6 = (const struct sockaddr_in6 *)sa;
1387 struct mbuf *m;
1388 int off;
1389
1390 if (sa->sa_family != AF_INET6 ||
1391 sa->sa_len != sizeof(struct sockaddr_in6))
1392 return NULL;
1393 if ((unsigned)cmd >= PRC_NCMDS)
1394 return NULL;
1395 else if (cmd == PRC_QUENCH) {
1396 /*
1397 * Don't honor ICMP Source Quench messages meant for
1398 * TCP connections.
1399 */
1400 return NULL;
1401 } else if (PRC_IS_REDIRECT(cmd))
1402 notify = in6_rtchange, d = NULL;
1403 else if (cmd == PRC_MSGSIZE)
1404 ; /* special code is present, see below */
1405 else if (cmd == PRC_HOSTDEAD)
1406 d = NULL;
1407 else if (inet6ctlerrmap[cmd] == 0)
1408 return NULL;
1409
1410 /* if the parameter is from icmp6, decode it. */
1411 if (d != NULL) {
1412 struct ip6ctlparam *ip6cp = (struct ip6ctlparam *)d;
1413 m = ip6cp->ip6c_m;
1414 ip6 = ip6cp->ip6c_ip6;
1415 off = ip6cp->ip6c_off;
1416 sa6_src = ip6cp->ip6c_src;
1417 } else {
1418 m = NULL;
1419 ip6 = NULL;
1420 sa6_src = &sa6_any;
1421 off = 0;
1422 }
1423
1424 if (ip6) {
1425 /* check if we can safely examine src and dst ports */
1426 if (m->m_pkthdr.len < off + sizeof(th)) {
1427 if (cmd == PRC_MSGSIZE)
1428 icmp6_mtudisc_update((struct ip6ctlparam *)d, 0);
1429 return NULL;
1430 }
1431
1432 memset(&th, 0, sizeof(th));
1433 m_copydata(m, off, sizeof(th), (void *)&th);
1434
1435 if (cmd == PRC_MSGSIZE) {
1436 int valid = 0;
1437
1438 /*
1439 * Check to see if we have a valid TCP connection
1440 * corresponding to the address in the ICMPv6 message
1441 * payload.
1442 */
1443 if (in6_pcblookup_connect(&tcbtable, &sa6->sin6_addr,
1444 th.th_dport,
1445 (const struct in6_addr *)&sa6_src->sin6_addr,
1446 th.th_sport, 0, 0))
1447 valid++;
1448
1449 /*
1450 * Depending on the value of "valid" and routing table
1451 * size (mtudisc_{hi,lo}wat), we will:
1452 * - recalcurate the new MTU and create the
1453 * corresponding routing entry, or
1454 * - ignore the MTU change notification.
1455 */
1456 icmp6_mtudisc_update((struct ip6ctlparam *)d, valid);
1457
1458 /*
1459 * no need to call in6_pcbnotify, it should have been
1460 * called via callback if necessary
1461 */
1462 return NULL;
1463 }
1464
1465 nmatch = in6_pcbnotify(&tcbtable, sa, th.th_dport,
1466 (const struct sockaddr *)sa6_src, th.th_sport, cmd, NULL, notify);
1467 if (nmatch == 0 && syn_cache_count &&
1468 (inet6ctlerrmap[cmd] == EHOSTUNREACH ||
1469 inet6ctlerrmap[cmd] == ENETUNREACH ||
1470 inet6ctlerrmap[cmd] == EHOSTDOWN))
1471 syn_cache_unreach((const struct sockaddr *)sa6_src,
1472 sa, &th);
1473 } else {
1474 (void) in6_pcbnotify(&tcbtable, sa, 0,
1475 (const struct sockaddr *)sa6_src, 0, cmd, NULL, notify);
1476 }
1477
1478 return NULL;
1479 }
1480 #endif
1481
1482 /* assumes that ip header and tcp header are contiguous on mbuf */
1483 void *
1484 tcp_ctlinput(int cmd, const struct sockaddr *sa, void *v)
1485 {
1486 struct ip *ip = v;
1487 struct tcphdr *th;
1488 struct icmp *icp;
1489 extern const int inetctlerrmap[];
1490 void (*notify)(struct inpcb *, int) = tcp_notify;
1491 int errno;
1492 int nmatch;
1493 struct tcpcb *tp;
1494 u_int mtu;
1495 tcp_seq seq;
1496 struct inpcb *inp;
1497 #ifdef INET6
1498 struct in6pcb *in6p;
1499 struct in6_addr src6, dst6;
1500 #endif
1501
1502 if (sa->sa_family != AF_INET ||
1503 sa->sa_len != sizeof(struct sockaddr_in))
1504 return NULL;
1505 if ((unsigned)cmd >= PRC_NCMDS)
1506 return NULL;
1507 errno = inetctlerrmap[cmd];
1508 if (cmd == PRC_QUENCH)
1509 /*
1510 * Don't honor ICMP Source Quench messages meant for
1511 * TCP connections.
1512 */
1513 return NULL;
1514 else if (PRC_IS_REDIRECT(cmd))
1515 notify = in_rtchange, ip = 0;
1516 else if (cmd == PRC_MSGSIZE && ip && ip->ip_v == 4) {
1517 /*
1518 * Check to see if we have a valid TCP connection
1519 * corresponding to the address in the ICMP message
1520 * payload.
1521 *
1522 * Boundary check is made in icmp_input(), with ICMP_ADVLENMIN.
1523 */
1524 th = (struct tcphdr *)((char *)ip + (ip->ip_hl << 2));
1525 #ifdef INET6
1526 in6_in_2_v4mapin6(&ip->ip_src, &src6);
1527 in6_in_2_v4mapin6(&ip->ip_dst, &dst6);
1528 #endif
1529 if ((inp = in_pcblookup_connect(&tcbtable, ip->ip_dst,
1530 th->th_dport, ip->ip_src, th->th_sport, 0)) != NULL)
1531 #ifdef INET6
1532 in6p = NULL;
1533 #else
1534 ;
1535 #endif
1536 #ifdef INET6
1537 else if ((in6p = in6_pcblookup_connect(&tcbtable, &dst6,
1538 th->th_dport, &src6, th->th_sport, 0, 0)) != NULL)
1539 ;
1540 #endif
1541 else
1542 return NULL;
1543
1544 /*
1545 * Now that we've validated that we are actually communicating
1546 * with the host indicated in the ICMP message, locate the
1547 * ICMP header, recalculate the new MTU, and create the
1548 * corresponding routing entry.
1549 */
1550 icp = (struct icmp *)((char *)ip -
1551 offsetof(struct icmp, icmp_ip));
1552 if (inp) {
1553 if ((tp = intotcpcb(inp)) == NULL)
1554 return NULL;
1555 }
1556 #ifdef INET6
1557 else if (in6p) {
1558 if ((tp = in6totcpcb(in6p)) == NULL)
1559 return NULL;
1560 }
1561 #endif
1562 else
1563 return NULL;
1564 seq = ntohl(th->th_seq);
1565 if (SEQ_LT(seq, tp->snd_una) || SEQ_GT(seq, tp->snd_max))
1566 return NULL;
1567 /*
1568 * If the ICMP message advertises a Next-Hop MTU
1569 * equal or larger than the maximum packet size we have
1570 * ever sent, drop the message.
1571 */
1572 mtu = (u_int)ntohs(icp->icmp_nextmtu);
1573 if (mtu >= tp->t_pmtud_mtu_sent)
1574 return NULL;
1575 if (mtu >= tcp_hdrsz(tp) + tp->t_pmtud_mss_acked) {
1576 /*
1577 * Calculate new MTU, and create corresponding
1578 * route (traditional PMTUD).
1579 */
1580 tp->t_flags &= ~TF_PMTUD_PEND;
1581 icmp_mtudisc(icp, ip->ip_dst);
1582 } else {
1583 /*
1584 * Record the information got in the ICMP
1585 * message; act on it later.
1586 * If we had already recorded an ICMP message,
1587 * replace the old one only if the new message
1588 * refers to an older TCP segment
1589 */
1590 if (tp->t_flags & TF_PMTUD_PEND) {
1591 if (SEQ_LT(tp->t_pmtud_th_seq, seq))
1592 return NULL;
1593 } else
1594 tp->t_flags |= TF_PMTUD_PEND;
1595 tp->t_pmtud_th_seq = seq;
1596 tp->t_pmtud_nextmtu = icp->icmp_nextmtu;
1597 tp->t_pmtud_ip_len = icp->icmp_ip.ip_len;
1598 tp->t_pmtud_ip_hl = icp->icmp_ip.ip_hl;
1599 }
1600 return NULL;
1601 } else if (cmd == PRC_HOSTDEAD)
1602 ip = 0;
1603 else if (errno == 0)
1604 return NULL;
1605 if (ip && ip->ip_v == 4 && sa->sa_family == AF_INET) {
1606 th = (struct tcphdr *)((char *)ip + (ip->ip_hl << 2));
1607 nmatch = in_pcbnotify(&tcbtable, satocsin(sa)->sin_addr,
1608 th->th_dport, ip->ip_src, th->th_sport, errno, notify);
1609 if (nmatch == 0 && syn_cache_count &&
1610 (inetctlerrmap[cmd] == EHOSTUNREACH ||
1611 inetctlerrmap[cmd] == ENETUNREACH ||
1612 inetctlerrmap[cmd] == EHOSTDOWN)) {
1613 struct sockaddr_in sin;
1614 memset(&sin, 0, sizeof(sin));
1615 sin.sin_len = sizeof(sin);
1616 sin.sin_family = AF_INET;
1617 sin.sin_port = th->th_sport;
1618 sin.sin_addr = ip->ip_src;
1619 syn_cache_unreach((struct sockaddr *)&sin, sa, th);
1620 }
1621
1622 /* XXX mapped address case */
1623 } else
1624 in_pcbnotifyall(&tcbtable, satocsin(sa)->sin_addr, errno,
1625 notify);
1626 return NULL;
1627 }
1628
1629 /*
1630 * When a source quench is received, we are being notified of congestion.
1631 * Close the congestion window down to the Loss Window (one segment).
1632 * We will gradually open it again as we proceed.
1633 */
1634 void
1635 tcp_quench(struct inpcb *inp)
1636 {
1637 struct tcpcb *tp = intotcpcb(inp);
1638
1639 if (tp) {
1640 tp->snd_cwnd = tp->t_segsz;
1641 tp->t_bytes_acked = 0;
1642 }
1643 }
1644
1645 #ifdef INET6
1646 void
1647 tcp6_quench(struct in6pcb *in6p)
1648 {
1649 struct tcpcb *tp = in6totcpcb(in6p);
1650
1651 if (tp) {
1652 tp->snd_cwnd = tp->t_segsz;
1653 tp->t_bytes_acked = 0;
1654 }
1655 }
1656 #endif
1657
1658 /*
1659 * Path MTU Discovery handlers.
1660 */
1661 void
1662 tcp_mtudisc_callback(struct in_addr faddr)
1663 {
1664 #ifdef INET6
1665 struct in6_addr in6;
1666 #endif
1667
1668 in_pcbnotifyall(&tcbtable, faddr, EMSGSIZE, tcp_mtudisc);
1669 #ifdef INET6
1670 in6_in_2_v4mapin6(&faddr, &in6);
1671 tcp6_mtudisc_callback(&in6);
1672 #endif
1673 }
1674
1675 /*
1676 * On receipt of path MTU corrections, flush old route and replace it
1677 * with the new one. Retransmit all unacknowledged packets, to ensure
1678 * that all packets will be received.
1679 */
1680 void
1681 tcp_mtudisc(struct inpcb *inp, int errno)
1682 {
1683 struct tcpcb *tp = intotcpcb(inp);
1684 struct rtentry *rt;
1685
1686 if (tp == NULL)
1687 return;
1688
1689 rt = in_pcbrtentry(inp);
1690 if (rt != NULL) {
1691 /*
1692 * If this was not a host route, remove and realloc.
1693 */
1694 if ((rt->rt_flags & RTF_HOST) == 0) {
1695 in_pcbrtentry_unref(rt, inp);
1696 in_rtchange(inp, errno);
1697 if ((rt = in_pcbrtentry(inp)) == NULL)
1698 return;
1699 }
1700
1701 /*
1702 * Slow start out of the error condition. We
1703 * use the MTU because we know it's smaller
1704 * than the previously transmitted segment.
1705 *
1706 * Note: This is more conservative than the
1707 * suggestion in draft-floyd-incr-init-win-03.
1708 */
1709 if (rt->rt_rmx.rmx_mtu != 0)
1710 tp->snd_cwnd =
1711 TCP_INITIAL_WINDOW(tcp_init_win,
1712 rt->rt_rmx.rmx_mtu);
1713 in_pcbrtentry_unref(rt, inp);
1714 }
1715
1716 /*
1717 * Resend unacknowledged packets.
1718 */
1719 tp->snd_nxt = tp->sack_newdata = tp->snd_una;
1720 tcp_output(tp);
1721 }
1722
1723 #ifdef INET6
1724 /*
1725 * Path MTU Discovery handlers.
1726 */
1727 void
1728 tcp6_mtudisc_callback(struct in6_addr *faddr)
1729 {
1730 struct sockaddr_in6 sin6;
1731
1732 memset(&sin6, 0, sizeof(sin6));
1733 sin6.sin6_family = AF_INET6;
1734 sin6.sin6_len = sizeof(struct sockaddr_in6);
1735 sin6.sin6_addr = *faddr;
1736 (void) in6_pcbnotify(&tcbtable, (struct sockaddr *)&sin6, 0,
1737 (const struct sockaddr *)&sa6_any, 0, PRC_MSGSIZE, NULL, tcp6_mtudisc);
1738 }
1739
1740 void
1741 tcp6_mtudisc(struct in6pcb *in6p, int errno)
1742 {
1743 struct tcpcb *tp = in6totcpcb(in6p);
1744 struct rtentry *rt;
1745
1746 if (tp == NULL)
1747 return;
1748
1749 rt = in6_pcbrtentry(in6p);
1750 if (rt != NULL) {
1751 /*
1752 * If this was not a host route, remove and realloc.
1753 */
1754 if ((rt->rt_flags & RTF_HOST) == 0) {
1755 in6_pcbrtentry_unref(rt, in6p);
1756 in6_rtchange(in6p, errno);
1757 rt = in6_pcbrtentry(in6p);
1758 if (rt == NULL)
1759 return;
1760 }
1761
1762 /*
1763 * Slow start out of the error condition. We
1764 * use the MTU because we know it's smaller
1765 * than the previously transmitted segment.
1766 *
1767 * Note: This is more conservative than the
1768 * suggestion in draft-floyd-incr-init-win-03.
1769 */
1770 if (rt->rt_rmx.rmx_mtu != 0) {
1771 tp->snd_cwnd = TCP_INITIAL_WINDOW(tcp_init_win,
1772 rt->rt_rmx.rmx_mtu);
1773 }
1774 in6_pcbrtentry_unref(rt, in6p);
1775 }
1776
1777 /*
1778 * Resend unacknowledged packets.
1779 */
1780 tp->snd_nxt = tp->sack_newdata = tp->snd_una;
1781 tcp_output(tp);
1782 }
1783 #endif /* INET6 */
1784
1785 /*
1786 * Compute the MSS to advertise to the peer. Called only during
1787 * the 3-way handshake. If we are the server (peer initiated
1788 * connection), we are called with a pointer to the interface
1789 * on which the SYN packet arrived. If we are the client (we
1790 * initiated connection), we are called with a pointer to the
1791 * interface out which this connection should go.
1792 *
1793 * NOTE: Do not subtract IP option/extension header size nor IPsec
1794 * header size from MSS advertisement. MSS option must hold the maximum
1795 * segment size we can accept, so it must always be:
1796 * max(if mtu) - ip header - tcp header
1797 */
1798 u_long
1799 tcp_mss_to_advertise(const struct ifnet *ifp, int af)
1800 {
1801 extern u_long in_maxmtu;
1802 u_long mss = 0;
1803 u_long hdrsiz;
1804
1805 /*
1806 * In order to avoid defeating path MTU discovery on the peer,
1807 * we advertise the max MTU of all attached networks as our MSS,
1808 * per RFC 1191, section 3.1.
1809 *
1810 * We provide the option to advertise just the MTU of
1811 * the interface on which we hope this connection will
1812 * be receiving. If we are responding to a SYN, we
1813 * will have a pretty good idea about this, but when
1814 * initiating a connection there is a bit more doubt.
1815 *
1816 * We also need to ensure that loopback has a large enough
1817 * MSS, as the loopback MTU is never included in in_maxmtu.
1818 */
1819
1820 if (ifp != NULL)
1821 switch (af) {
1822 #ifdef INET6
1823 case AF_INET6: /* FALLTHROUGH */
1824 #endif
1825 case AF_INET:
1826 mss = ifp->if_mtu;
1827 break;
1828 }
1829
1830 if (tcp_mss_ifmtu == 0)
1831 switch (af) {
1832 #ifdef INET6
1833 case AF_INET6: /* FALLTHROUGH */
1834 #endif
1835 case AF_INET:
1836 mss = uimax(in_maxmtu, mss);
1837 break;
1838 }
1839
1840 switch (af) {
1841 case AF_INET:
1842 hdrsiz = sizeof(struct ip);
1843 break;
1844 #ifdef INET6
1845 case AF_INET6:
1846 hdrsiz = sizeof(struct ip6_hdr);
1847 break;
1848 #endif
1849 default:
1850 hdrsiz = 0;
1851 break;
1852 }
1853 hdrsiz += sizeof(struct tcphdr);
1854 if (mss > hdrsiz)
1855 mss -= hdrsiz;
1856
1857 mss = uimax(tcp_mssdflt, mss);
1858 return (mss);
1859 }
1860
1861 /*
1862 * Set connection variables based on the peer's advertised MSS.
1863 * We are passed the TCPCB for the actual connection. If we
1864 * are the server, we are called by the compressed state engine
1865 * when the 3-way handshake is complete. If we are the client,
1866 * we are called when we receive the SYN,ACK from the server.
1867 *
1868 * NOTE: Our advertised MSS value must be initialized in the TCPCB
1869 * before this routine is called!
1870 */
1871 void
1872 tcp_mss_from_peer(struct tcpcb *tp, int offer)
1873 {
1874 struct socket *so;
1875 #if defined(RTV_SPIPE) || defined(RTV_SSTHRESH)
1876 struct rtentry *rt;
1877 #endif
1878 u_long bufsize;
1879 int mss;
1880
1881 KASSERT(!(tp->t_inpcb && tp->t_in6pcb));
1882
1883 so = NULL;
1884 rt = NULL;
1885
1886 if (tp->t_inpcb) {
1887 so = tp->t_inpcb->inp_socket;
1888 #if defined(RTV_SPIPE) || defined(RTV_SSTHRESH)
1889 rt = in_pcbrtentry(tp->t_inpcb);
1890 #endif
1891 }
1892
1893 #ifdef INET6
1894 if (tp->t_in6pcb) {
1895 so = tp->t_in6pcb->in6p_socket;
1896 #if defined(RTV_SPIPE) || defined(RTV_SSTHRESH)
1897 rt = in6_pcbrtentry(tp->t_in6pcb);
1898 #endif
1899 }
1900 #endif
1901
1902 /*
1903 * As per RFC1122, use the default MSS value, unless they
1904 * sent us an offer. Do not accept offers less than 256 bytes.
1905 */
1906 mss = tcp_mssdflt;
1907 if (offer)
1908 mss = offer;
1909 mss = uimax(mss, 256); /* sanity */
1910 tp->t_peermss = mss;
1911 mss -= tcp_optlen(tp);
1912 if (tp->t_inpcb)
1913 mss -= ip_optlen(tp->t_inpcb);
1914 #ifdef INET6
1915 if (tp->t_in6pcb)
1916 mss -= ip6_optlen(tp->t_in6pcb);
1917 #endif
1918 /*
1919 * XXX XXX What if mss goes negative or zero? This can happen if a
1920 * socket has large IPv6 options. We crash below.
1921 */
1922
1923 /*
1924 * If there's a pipesize, change the socket buffer to that size.
1925 * Make the socket buffer an integral number of MSS units. If
1926 * the MSS is larger than the socket buffer, artificially decrease
1927 * the MSS.
1928 */
1929 #ifdef RTV_SPIPE
1930 if (rt != NULL && rt->rt_rmx.rmx_sendpipe != 0)
1931 bufsize = rt->rt_rmx.rmx_sendpipe;
1932 else
1933 #endif
1934 {
1935 KASSERT(so != NULL);
1936 bufsize = so->so_snd.sb_hiwat;
1937 }
1938 if (bufsize < mss)
1939 mss = bufsize;
1940 else {
1941 bufsize = roundup(bufsize, mss);
1942 if (bufsize > sb_max)
1943 bufsize = sb_max;
1944 (void) sbreserve(&so->so_snd, bufsize, so);
1945 }
1946 tp->t_segsz = mss;
1947
1948 #ifdef RTV_SSTHRESH
1949 if (rt != NULL && rt->rt_rmx.rmx_ssthresh) {
1950 /*
1951 * There's some sort of gateway or interface buffer
1952 * limit on the path. Use this to set the slow
1953 * start threshold, but set the threshold to no less
1954 * than 2 * MSS.
1955 */
1956 tp->snd_ssthresh = uimax(2 * mss, rt->rt_rmx.rmx_ssthresh);
1957 }
1958 #endif
1959 #if defined(RTV_SPIPE) || defined(RTV_SSTHRESH)
1960 if (tp->t_inpcb)
1961 in_pcbrtentry_unref(rt, tp->t_inpcb);
1962 #ifdef INET6
1963 if (tp->t_in6pcb)
1964 in6_pcbrtentry_unref(rt, tp->t_in6pcb);
1965 #endif
1966 #endif
1967 }
1968
1969 /*
1970 * Processing necessary when a TCP connection is established.
1971 */
1972 void
1973 tcp_established(struct tcpcb *tp)
1974 {
1975 struct socket *so;
1976 #ifdef RTV_RPIPE
1977 struct rtentry *rt;
1978 #endif
1979 u_long bufsize;
1980
1981 KASSERT(!(tp->t_inpcb && tp->t_in6pcb));
1982
1983 so = NULL;
1984 rt = NULL;
1985
1986 /* This is a while() to reduce the dreadful stairstepping below */
1987 while (tp->t_inpcb) {
1988 so = tp->t_inpcb->inp_socket;
1989 #if defined(RTV_RPIPE)
1990 rt = in_pcbrtentry(tp->t_inpcb);
1991 #endif
1992 if (__predict_true(tcp_msl_enable)) {
1993 if (tp->t_inpcb->inp_laddr.s_addr == INADDR_LOOPBACK) {
1994 tp->t_msl = tcp_msl_loop ? tcp_msl_loop : (TCPTV_MSL >> 2);
1995 break;
1996 }
1997
1998 if (__predict_false(tcp_rttlocal)) {
1999 /* This may be adjusted by tcp_input */
2000 tp->t_msl = tcp_msl_local ? tcp_msl_local : (TCPTV_MSL >> 1);
2001 break;
2002 }
2003 if (in_localaddr(tp->t_inpcb->inp_faddr)) {
2004 tp->t_msl = tcp_msl_local ? tcp_msl_local : (TCPTV_MSL >> 1);
2005 break;
2006 }
2007 }
2008 tp->t_msl = tcp_msl_remote ? tcp_msl_remote : TCPTV_MSL;
2009 break;
2010 }
2011
2012 /* Clamp to a reasonable range. */
2013 tp->t_msl = MIN(tp->t_msl, TCP_MAXMSL);
2014
2015 #ifdef INET6
2016 /* The !tp->t_inpcb lets the compiler know it can't be v4 *and* v6 */
2017 while (!tp->t_inpcb && tp->t_in6pcb) {
2018 so = tp->t_in6pcb->in6p_socket;
2019 #if defined(RTV_RPIPE)
2020 rt = in6_pcbrtentry(tp->t_in6pcb);
2021 #endif
2022 if (__predict_true(tcp_msl_enable)) {
2023 extern const struct in6_addr in6addr_loopback;
2024
2025 if (IN6_ARE_ADDR_EQUAL(&tp->t_in6pcb->in6p_laddr,
2026 &in6addr_loopback)) {
2027 tp->t_msl = tcp_msl_loop ? tcp_msl_loop : (TCPTV_MSL >> 2);
2028 break;
2029 }
2030
2031 if (__predict_false(tcp_rttlocal)) {
2032 /* This may be adjusted by tcp_input */
2033 tp->t_msl = tcp_msl_local ? tcp_msl_local : (TCPTV_MSL >> 1);
2034 break;
2035 }
2036 if (in6_localaddr(&tp->t_in6pcb->in6p_faddr)) {
2037 tp->t_msl = tcp_msl_local ? tcp_msl_local : (TCPTV_MSL >> 1);
2038 break;
2039 }
2040 }
2041 tp->t_msl = tcp_msl_remote ? tcp_msl_remote : TCPTV_MSL;
2042 break;
2043 }
2044
2045 /* Clamp to a reasonable range. */
2046 tp->t_msl = MIN(tp->t_msl, TCP_MAXMSL);
2047 #endif
2048
2049 tp->t_state = TCPS_ESTABLISHED;
2050 TCP_TIMER_ARM(tp, TCPT_KEEP, tp->t_keepidle);
2051
2052 #ifdef RTV_RPIPE
2053 if (rt != NULL && rt->rt_rmx.rmx_recvpipe != 0)
2054 bufsize = rt->rt_rmx.rmx_recvpipe;
2055 else
2056 #endif
2057 {
2058 KASSERT(so != NULL);
2059 bufsize = so->so_rcv.sb_hiwat;
2060 }
2061 if (bufsize > tp->t_ourmss) {
2062 bufsize = roundup(bufsize, tp->t_ourmss);
2063 if (bufsize > sb_max)
2064 bufsize = sb_max;
2065 (void) sbreserve(&so->so_rcv, bufsize, so);
2066 }
2067 #ifdef RTV_RPIPE
2068 if (tp->t_inpcb)
2069 in_pcbrtentry_unref(rt, tp->t_inpcb);
2070 #ifdef INET6
2071 if (tp->t_in6pcb)
2072 in6_pcbrtentry_unref(rt, tp->t_in6pcb);
2073 #endif
2074 #endif
2075 }
2076
2077 /*
2078 * Check if there's an initial rtt or rttvar. Convert from the
2079 * route-table units to scaled multiples of the slow timeout timer.
2080 * Called only during the 3-way handshake.
2081 */
2082 void
2083 tcp_rmx_rtt(struct tcpcb *tp)
2084 {
2085 #ifdef RTV_RTT
2086 struct rtentry *rt = NULL;
2087 int rtt;
2088
2089 KASSERT(!(tp->t_inpcb && tp->t_in6pcb));
2090
2091 if (tp->t_inpcb)
2092 rt = in_pcbrtentry(tp->t_inpcb);
2093 #ifdef INET6
2094 if (tp->t_in6pcb)
2095 rt = in6_pcbrtentry(tp->t_in6pcb);
2096 #endif
2097 if (rt == NULL)
2098 return;
2099
2100 if (tp->t_srtt == 0 && (rtt = rt->rt_rmx.rmx_rtt)) {
2101 /*
2102 * XXX The lock bit for MTU indicates that the value
2103 * is also a minimum value; this is subject to time.
2104 */
2105 if (rt->rt_rmx.rmx_locks & RTV_RTT)
2106 TCPT_RANGESET(tp->t_rttmin,
2107 rtt / (RTM_RTTUNIT / PR_SLOWHZ),
2108 TCPTV_MIN, TCPTV_REXMTMAX);
2109 tp->t_srtt = rtt /
2110 ((RTM_RTTUNIT / PR_SLOWHZ) >> (TCP_RTT_SHIFT + 2));
2111 if (rt->rt_rmx.rmx_rttvar) {
2112 tp->t_rttvar = rt->rt_rmx.rmx_rttvar /
2113 ((RTM_RTTUNIT / PR_SLOWHZ) >>
2114 (TCP_RTTVAR_SHIFT + 2));
2115 } else {
2116 /* Default variation is +- 1 rtt */
2117 tp->t_rttvar =
2118 tp->t_srtt >> (TCP_RTT_SHIFT - TCP_RTTVAR_SHIFT);
2119 }
2120 TCPT_RANGESET(tp->t_rxtcur,
2121 ((tp->t_srtt >> 2) + tp->t_rttvar) >> (1 + 2),
2122 tp->t_rttmin, TCPTV_REXMTMAX);
2123 }
2124 if (tp->t_inpcb)
2125 in_pcbrtentry_unref(rt, tp->t_inpcb);
2126 #ifdef INET6
2127 if (tp->t_in6pcb)
2128 in6_pcbrtentry_unref(rt, tp->t_in6pcb);
2129 #endif
2130 #endif
2131 }
2132
2133 tcp_seq tcp_iss_seq = 0; /* tcp initial seq # */
2134
2135 /*
2136 * Get a new sequence value given a tcp control block
2137 */
2138 tcp_seq
2139 tcp_new_iss(struct tcpcb *tp)
2140 {
2141
2142 if (tp->t_inpcb != NULL) {
2143 return tcp_new_iss1(&tp->t_inpcb->inp_laddr,
2144 &tp->t_inpcb->inp_faddr, tp->t_inpcb->inp_lport,
2145 tp->t_inpcb->inp_fport, sizeof(tp->t_inpcb->inp_laddr));
2146 }
2147 #ifdef INET6
2148 if (tp->t_in6pcb != NULL) {
2149 return tcp_new_iss1(&tp->t_in6pcb->in6p_laddr,
2150 &tp->t_in6pcb->in6p_faddr, tp->t_in6pcb->in6p_lport,
2151 tp->t_in6pcb->in6p_fport, sizeof(tp->t_in6pcb->in6p_laddr));
2152 }
2153 #endif
2154
2155 panic("tcp_new_iss: unreachable");
2156 }
2157
2158 static u_int8_t tcp_iss_secret[16]; /* 128 bits; should be plenty */
2159
2160 /*
2161 * Initialize RFC 1948 ISS Secret
2162 */
2163 static int
2164 tcp_iss_secret_init(void)
2165 {
2166 cprng_strong(kern_cprng,
2167 tcp_iss_secret, sizeof(tcp_iss_secret), 0);
2168
2169 return 0;
2170 }
2171
2172 /*
2173 * This routine actually generates a new TCP initial sequence number.
2174 */
2175 tcp_seq
2176 tcp_new_iss1(void *laddr, void *faddr, u_int16_t lport, u_int16_t fport,
2177 size_t addrsz)
2178 {
2179 tcp_seq tcp_iss;
2180
2181 if (tcp_do_rfc1948) {
2182 MD5_CTX ctx;
2183 u_int8_t hash[16]; /* XXX MD5 knowledge */
2184 static ONCE_DECL(tcp_iss_secret_control);
2185
2186 /*
2187 * If we haven't been here before, initialize our cryptographic
2188 * hash secret.
2189 */
2190 RUN_ONCE(&tcp_iss_secret_control, tcp_iss_secret_init);
2191
2192 /*
2193 * Compute the base value of the ISS. It is a hash
2194 * of (saddr, sport, daddr, dport, secret).
2195 */
2196 MD5Init(&ctx);
2197
2198 MD5Update(&ctx, (u_char *) laddr, addrsz);
2199 MD5Update(&ctx, (u_char *) &lport, sizeof(lport));
2200
2201 MD5Update(&ctx, (u_char *) faddr, addrsz);
2202 MD5Update(&ctx, (u_char *) &fport, sizeof(fport));
2203
2204 MD5Update(&ctx, tcp_iss_secret, sizeof(tcp_iss_secret));
2205
2206 MD5Final(hash, &ctx);
2207
2208 memcpy(&tcp_iss, hash, sizeof(tcp_iss));
2209
2210 #ifdef TCPISS_DEBUG
2211 printf("ISS hash 0x%08x, ", tcp_iss);
2212 #endif
2213 /*
2214 * Add the offset in to the computed value.
2215 */
2216 tcp_iss += tcp_iss_seq;
2217 #ifdef TCPISS_DEBUG
2218 printf("ISS %08x\n", tcp_iss);
2219 #endif
2220 } else {
2221 /*
2222 * Randomize.
2223 */
2224 tcp_iss = cprng_fast32();
2225 #ifdef TCPISS_DEBUG
2226 printf("ISS random 0x%08x, ", tcp_iss);
2227 #endif
2228 }
2229
2230 return tcp_iss;
2231 }
2232
2233 #if defined(IPSEC)
2234 /* compute ESP/AH header size for TCP, including outer IP header. */
2235 size_t
2236 ipsec4_hdrsiz_tcp(struct tcpcb *tp)
2237 {
2238 struct inpcb *inp;
2239 size_t hdrsiz;
2240
2241 /* XXX mapped addr case (tp->t_in6pcb) */
2242 if (!tp || !tp->t_template || !(inp = tp->t_inpcb))
2243 return 0;
2244 switch (tp->t_family) {
2245 case AF_INET:
2246 /* XXX: should use correct direction. */
2247 hdrsiz = ipsec_hdrsiz(tp->t_template, IPSEC_DIR_OUTBOUND, inp);
2248 break;
2249 default:
2250 hdrsiz = 0;
2251 break;
2252 }
2253
2254 return hdrsiz;
2255 }
2256
2257 #ifdef INET6
2258 size_t
2259 ipsec6_hdrsiz_tcp(struct tcpcb *tp)
2260 {
2261 struct in6pcb *in6p;
2262 size_t hdrsiz;
2263
2264 if (!tp || !tp->t_template || !(in6p = tp->t_in6pcb))
2265 return 0;
2266 switch (tp->t_family) {
2267 case AF_INET6:
2268 /* XXX: should use correct direction. */
2269 hdrsiz = ipsec_hdrsiz(tp->t_template, IPSEC_DIR_OUTBOUND, in6p);
2270 break;
2271 case AF_INET:
2272 /* mapped address case - tricky */
2273 default:
2274 hdrsiz = 0;
2275 break;
2276 }
2277
2278 return hdrsiz;
2279 }
2280 #endif
2281 #endif /*IPSEC*/
2282
2283 /*
2284 * Determine the length of the TCP options for this connection.
2285 *
2286 * XXX: What do we do for SACK, when we add that? Just reserve
2287 * all of the space? Otherwise we can't exactly be incrementing
2288 * cwnd by an amount that varies depending on the amount we last
2289 * had to SACK!
2290 */
2291
2292 u_int
2293 tcp_optlen(struct tcpcb *tp)
2294 {
2295 u_int optlen;
2296
2297 optlen = 0;
2298 if ((tp->t_flags & (TF_REQ_TSTMP|TF_RCVD_TSTMP|TF_NOOPT)) ==
2299 (TF_REQ_TSTMP | TF_RCVD_TSTMP))
2300 optlen += TCPOLEN_TSTAMP_APPA;
2301
2302 #ifdef TCP_SIGNATURE
2303 if (tp->t_flags & TF_SIGNATURE)
2304 optlen += TCPOLEN_SIGLEN;
2305 #endif
2306
2307 return optlen;
2308 }
2309
2310 u_int
2311 tcp_hdrsz(struct tcpcb *tp)
2312 {
2313 u_int hlen;
2314
2315 switch (tp->t_family) {
2316 #ifdef INET6
2317 case AF_INET6:
2318 hlen = sizeof(struct ip6_hdr);
2319 break;
2320 #endif
2321 case AF_INET:
2322 hlen = sizeof(struct ip);
2323 break;
2324 default:
2325 hlen = 0;
2326 break;
2327 }
2328 hlen += sizeof(struct tcphdr);
2329
2330 if ((tp->t_flags & (TF_REQ_TSTMP|TF_NOOPT)) == TF_REQ_TSTMP &&
2331 (tp->t_flags & TF_RCVD_TSTMP) == TF_RCVD_TSTMP)
2332 hlen += TCPOLEN_TSTAMP_APPA;
2333 #ifdef TCP_SIGNATURE
2334 if (tp->t_flags & TF_SIGNATURE)
2335 hlen += TCPOLEN_SIGLEN;
2336 #endif
2337 return hlen;
2338 }
2339
2340 void
2341 tcp_statinc(u_int stat)
2342 {
2343
2344 KASSERT(stat < TCP_NSTATS);
2345 TCP_STATINC(stat);
2346 }
2347
2348 void
2349 tcp_statadd(u_int stat, uint64_t val)
2350 {
2351
2352 KASSERT(stat < TCP_NSTATS);
2353 TCP_STATADD(stat, val);
2354 }
2355