tcp_subr.c revision 1.268 1 /* $NetBSD: tcp_subr.c,v 1.268 2016/12/08 05:16:33 ozaki-r Exp $ */
2
3 /*
4 * Copyright (C) 1995, 1996, 1997, and 1998 WIDE Project.
5 * All rights reserved.
6 *
7 * Redistribution and use in source and binary forms, with or without
8 * modification, are permitted provided that the following conditions
9 * are met:
10 * 1. Redistributions of source code must retain the above copyright
11 * notice, this list of conditions and the following disclaimer.
12 * 2. Redistributions in binary form must reproduce the above copyright
13 * notice, this list of conditions and the following disclaimer in the
14 * documentation and/or other materials provided with the distribution.
15 * 3. Neither the name of the project nor the names of its contributors
16 * may be used to endorse or promote products derived from this software
17 * without specific prior written permission.
18 *
19 * THIS SOFTWARE IS PROVIDED BY THE PROJECT AND CONTRIBUTORS ``AS IS'' AND
20 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
21 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
22 * ARE DISCLAIMED. IN NO EVENT SHALL THE PROJECT OR CONTRIBUTORS BE LIABLE
23 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
24 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
25 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
26 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
27 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
28 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
29 * SUCH DAMAGE.
30 */
31
32 /*-
33 * Copyright (c) 1997, 1998, 2000, 2001, 2008 The NetBSD Foundation, Inc.
34 * All rights reserved.
35 *
36 * This code is derived from software contributed to The NetBSD Foundation
37 * by Jason R. Thorpe and Kevin M. Lahey of the Numerical Aerospace Simulation
38 * Facility, NASA Ames Research Center.
39 *
40 * Redistribution and use in source and binary forms, with or without
41 * modification, are permitted provided that the following conditions
42 * are met:
43 * 1. Redistributions of source code must retain the above copyright
44 * notice, this list of conditions and the following disclaimer.
45 * 2. Redistributions in binary form must reproduce the above copyright
46 * notice, this list of conditions and the following disclaimer in the
47 * documentation and/or other materials provided with the distribution.
48 *
49 * THIS SOFTWARE IS PROVIDED BY THE NETBSD FOUNDATION, INC. AND CONTRIBUTORS
50 * ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED
51 * TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
52 * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE FOUNDATION OR CONTRIBUTORS
53 * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
54 * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
55 * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
56 * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
57 * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
58 * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
59 * POSSIBILITY OF SUCH DAMAGE.
60 */
61
62 /*
63 * Copyright (c) 1982, 1986, 1988, 1990, 1993, 1995
64 * The Regents of the University of California. All rights reserved.
65 *
66 * Redistribution and use in source and binary forms, with or without
67 * modification, are permitted provided that the following conditions
68 * are met:
69 * 1. Redistributions of source code must retain the above copyright
70 * notice, this list of conditions and the following disclaimer.
71 * 2. Redistributions in binary form must reproduce the above copyright
72 * notice, this list of conditions and the following disclaimer in the
73 * documentation and/or other materials provided with the distribution.
74 * 3. Neither the name of the University nor the names of its contributors
75 * may be used to endorse or promote products derived from this software
76 * without specific prior written permission.
77 *
78 * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND
79 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
80 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
81 * ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE
82 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
83 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
84 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
85 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
86 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
87 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
88 * SUCH DAMAGE.
89 *
90 * @(#)tcp_subr.c 8.2 (Berkeley) 5/24/95
91 */
92
93 #include <sys/cdefs.h>
94 __KERNEL_RCSID(0, "$NetBSD: tcp_subr.c,v 1.268 2016/12/08 05:16:33 ozaki-r Exp $");
95
96 #ifdef _KERNEL_OPT
97 #include "opt_inet.h"
98 #include "opt_ipsec.h"
99 #include "opt_tcp_compat_42.h"
100 #include "opt_inet_csum.h"
101 #include "opt_mbuftrace.h"
102 #endif
103
104 #include <sys/param.h>
105 #include <sys/atomic.h>
106 #include <sys/proc.h>
107 #include <sys/systm.h>
108 #include <sys/mbuf.h>
109 #include <sys/once.h>
110 #include <sys/socket.h>
111 #include <sys/socketvar.h>
112 #include <sys/protosw.h>
113 #include <sys/errno.h>
114 #include <sys/kernel.h>
115 #include <sys/pool.h>
116 #include <sys/md5.h>
117 #include <sys/cprng.h>
118
119 #include <net/route.h>
120 #include <net/if.h>
121
122 #include <netinet/in.h>
123 #include <netinet/in_systm.h>
124 #include <netinet/ip.h>
125 #include <netinet/in_pcb.h>
126 #include <netinet/ip_var.h>
127 #include <netinet/ip_icmp.h>
128
129 #ifdef INET6
130 #ifndef INET
131 #include <netinet/in.h>
132 #endif
133 #include <netinet/ip6.h>
134 #include <netinet6/in6_pcb.h>
135 #include <netinet6/ip6_var.h>
136 #include <netinet6/in6_var.h>
137 #include <netinet6/ip6protosw.h>
138 #include <netinet/icmp6.h>
139 #include <netinet6/nd6.h>
140 #endif
141
142 #include <netinet/tcp.h>
143 #include <netinet/tcp_fsm.h>
144 #include <netinet/tcp_seq.h>
145 #include <netinet/tcp_timer.h>
146 #include <netinet/tcp_var.h>
147 #include <netinet/tcp_vtw.h>
148 #include <netinet/tcp_private.h>
149 #include <netinet/tcp_congctl.h>
150 #include <netinet/tcpip.h>
151
152 #ifdef IPSEC
153 #include <netipsec/ipsec.h>
154 #include <netipsec/xform.h>
155 #ifdef INET6
156 #include <netipsec/ipsec6.h>
157 #endif
158 #include <netipsec/key.h>
159 #endif /* IPSEC*/
160
161
162 struct inpcbtable tcbtable; /* head of queue of active tcpcb's */
163 u_int32_t tcp_now; /* slow ticks, for RFC 1323 timestamps */
164
165 percpu_t *tcpstat_percpu;
166
167 /* patchable/settable parameters for tcp */
168 int tcp_mssdflt = TCP_MSS;
169 int tcp_minmss = TCP_MINMSS;
170 int tcp_rttdflt = TCPTV_SRTTDFLT / PR_SLOWHZ;
171 int tcp_do_rfc1323 = 1; /* window scaling / timestamps (obsolete) */
172 int tcp_do_rfc1948 = 0; /* ISS by cryptographic hash */
173 int tcp_do_sack = 1; /* selective acknowledgement */
174 int tcp_do_win_scale = 1; /* RFC1323 window scaling */
175 int tcp_do_timestamps = 1; /* RFC1323 timestamps */
176 int tcp_ack_on_push = 0; /* set to enable immediate ACK-on-PUSH */
177 int tcp_do_ecn = 0; /* Explicit Congestion Notification */
178 #ifndef TCP_INIT_WIN
179 #define TCP_INIT_WIN 4 /* initial slow start window */
180 #endif
181 #ifndef TCP_INIT_WIN_LOCAL
182 #define TCP_INIT_WIN_LOCAL 4 /* initial slow start window for local nets */
183 #endif
184 /*
185 * Up to 5 we scale linearly, to reach 3 * 1460; then (iw) * 1460.
186 * This is to simulate current behavior for iw == 4
187 */
188 int tcp_init_win_max[] = {
189 1 * 1460,
190 1 * 1460,
191 2 * 1460,
192 2 * 1460,
193 3 * 1460,
194 5 * 1460,
195 6 * 1460,
196 7 * 1460,
197 8 * 1460,
198 9 * 1460,
199 10 * 1460
200 };
201 int tcp_init_win = TCP_INIT_WIN;
202 int tcp_init_win_local = TCP_INIT_WIN_LOCAL;
203 int tcp_mss_ifmtu = 0;
204 #ifdef TCP_COMPAT_42
205 int tcp_compat_42 = 1;
206 #else
207 int tcp_compat_42 = 0;
208 #endif
209 int tcp_rst_ppslim = 100; /* 100pps */
210 int tcp_ackdrop_ppslim = 100; /* 100pps */
211 int tcp_do_loopback_cksum = 0;
212 int tcp_do_abc = 1; /* RFC3465 Appropriate byte counting. */
213 int tcp_abc_aggressive = 1; /* 1: L=2*SMSS 0: L=1*SMSS */
214 int tcp_sack_tp_maxholes = 32;
215 int tcp_sack_globalmaxholes = 1024;
216 int tcp_sack_globalholes = 0;
217 int tcp_ecn_maxretries = 1;
218 int tcp_msl_enable = 1; /* enable TIME_WAIT truncation */
219 int tcp_msl_loop = PR_SLOWHZ; /* MSL for loopback */
220 int tcp_msl_local = 5 * PR_SLOWHZ; /* MSL for 'local' */
221 int tcp_msl_remote = TCPTV_MSL; /* MSL otherwise */
222 int tcp_msl_remote_threshold = TCPTV_SRTTDFLT; /* RTT threshold */
223 int tcp_rttlocal = 0; /* Use RTT to decide who's 'local' */
224
225 int tcp4_vtw_enable = 0; /* 1 to enable */
226 int tcp6_vtw_enable = 0; /* 1 to enable */
227 int tcp_vtw_was_enabled = 0;
228 int tcp_vtw_entries = 1 << 4; /* 16 vestigial TIME_WAIT entries */
229
230 /* tcb hash */
231 #ifndef TCBHASHSIZE
232 #define TCBHASHSIZE 128
233 #endif
234 int tcbhashsize = TCBHASHSIZE;
235
236 /* syn hash parameters */
237 #define TCP_SYN_HASH_SIZE 293
238 #define TCP_SYN_BUCKET_SIZE 35
239 int tcp_syn_cache_size = TCP_SYN_HASH_SIZE;
240 int tcp_syn_cache_limit = TCP_SYN_HASH_SIZE*TCP_SYN_BUCKET_SIZE;
241 int tcp_syn_bucket_limit = 3*TCP_SYN_BUCKET_SIZE;
242 struct syn_cache_head tcp_syn_cache[TCP_SYN_HASH_SIZE];
243
244 int tcp_freeq(struct tcpcb *);
245 static int tcp_iss_secret_init(void);
246
247 #ifdef INET
248 static void tcp_mtudisc_callback(struct in_addr);
249 #endif
250
251 #ifdef INET6
252 void tcp6_mtudisc(struct in6pcb *, int);
253 #endif
254
255 static struct pool tcpcb_pool;
256
257 static int tcp_drainwanted;
258
259 #ifdef TCP_CSUM_COUNTERS
260 #include <sys/device.h>
261
262 #if defined(INET)
263 struct evcnt tcp_hwcsum_bad = EVCNT_INITIALIZER(EVCNT_TYPE_MISC,
264 NULL, "tcp", "hwcsum bad");
265 struct evcnt tcp_hwcsum_ok = EVCNT_INITIALIZER(EVCNT_TYPE_MISC,
266 NULL, "tcp", "hwcsum ok");
267 struct evcnt tcp_hwcsum_data = EVCNT_INITIALIZER(EVCNT_TYPE_MISC,
268 NULL, "tcp", "hwcsum data");
269 struct evcnt tcp_swcsum = EVCNT_INITIALIZER(EVCNT_TYPE_MISC,
270 NULL, "tcp", "swcsum");
271
272 EVCNT_ATTACH_STATIC(tcp_hwcsum_bad);
273 EVCNT_ATTACH_STATIC(tcp_hwcsum_ok);
274 EVCNT_ATTACH_STATIC(tcp_hwcsum_data);
275 EVCNT_ATTACH_STATIC(tcp_swcsum);
276 #endif /* defined(INET) */
277
278 #if defined(INET6)
279 struct evcnt tcp6_hwcsum_bad = EVCNT_INITIALIZER(EVCNT_TYPE_MISC,
280 NULL, "tcp6", "hwcsum bad");
281 struct evcnt tcp6_hwcsum_ok = EVCNT_INITIALIZER(EVCNT_TYPE_MISC,
282 NULL, "tcp6", "hwcsum ok");
283 struct evcnt tcp6_hwcsum_data = EVCNT_INITIALIZER(EVCNT_TYPE_MISC,
284 NULL, "tcp6", "hwcsum data");
285 struct evcnt tcp6_swcsum = EVCNT_INITIALIZER(EVCNT_TYPE_MISC,
286 NULL, "tcp6", "swcsum");
287
288 EVCNT_ATTACH_STATIC(tcp6_hwcsum_bad);
289 EVCNT_ATTACH_STATIC(tcp6_hwcsum_ok);
290 EVCNT_ATTACH_STATIC(tcp6_hwcsum_data);
291 EVCNT_ATTACH_STATIC(tcp6_swcsum);
292 #endif /* defined(INET6) */
293 #endif /* TCP_CSUM_COUNTERS */
294
295
296 #ifdef TCP_OUTPUT_COUNTERS
297 #include <sys/device.h>
298
299 struct evcnt tcp_output_bigheader = EVCNT_INITIALIZER(EVCNT_TYPE_MISC,
300 NULL, "tcp", "output big header");
301 struct evcnt tcp_output_predict_hit = EVCNT_INITIALIZER(EVCNT_TYPE_MISC,
302 NULL, "tcp", "output predict hit");
303 struct evcnt tcp_output_predict_miss = EVCNT_INITIALIZER(EVCNT_TYPE_MISC,
304 NULL, "tcp", "output predict miss");
305 struct evcnt tcp_output_copysmall = EVCNT_INITIALIZER(EVCNT_TYPE_MISC,
306 NULL, "tcp", "output copy small");
307 struct evcnt tcp_output_copybig = EVCNT_INITIALIZER(EVCNT_TYPE_MISC,
308 NULL, "tcp", "output copy big");
309 struct evcnt tcp_output_refbig = EVCNT_INITIALIZER(EVCNT_TYPE_MISC,
310 NULL, "tcp", "output reference big");
311
312 EVCNT_ATTACH_STATIC(tcp_output_bigheader);
313 EVCNT_ATTACH_STATIC(tcp_output_predict_hit);
314 EVCNT_ATTACH_STATIC(tcp_output_predict_miss);
315 EVCNT_ATTACH_STATIC(tcp_output_copysmall);
316 EVCNT_ATTACH_STATIC(tcp_output_copybig);
317 EVCNT_ATTACH_STATIC(tcp_output_refbig);
318
319 #endif /* TCP_OUTPUT_COUNTERS */
320
321 #ifdef TCP_REASS_COUNTERS
322 #include <sys/device.h>
323
324 struct evcnt tcp_reass_ = EVCNT_INITIALIZER(EVCNT_TYPE_MISC,
325 NULL, "tcp_reass", "calls");
326 struct evcnt tcp_reass_empty = EVCNT_INITIALIZER(EVCNT_TYPE_MISC,
327 &tcp_reass_, "tcp_reass", "insert into empty queue");
328 struct evcnt tcp_reass_iteration[8] = {
329 EVCNT_INITIALIZER(EVCNT_TYPE_MISC, &tcp_reass_, "tcp_reass", ">7 iterations"),
330 EVCNT_INITIALIZER(EVCNT_TYPE_MISC, &tcp_reass_, "tcp_reass", "1 iteration"),
331 EVCNT_INITIALIZER(EVCNT_TYPE_MISC, &tcp_reass_, "tcp_reass", "2 iterations"),
332 EVCNT_INITIALIZER(EVCNT_TYPE_MISC, &tcp_reass_, "tcp_reass", "3 iterations"),
333 EVCNT_INITIALIZER(EVCNT_TYPE_MISC, &tcp_reass_, "tcp_reass", "4 iterations"),
334 EVCNT_INITIALIZER(EVCNT_TYPE_MISC, &tcp_reass_, "tcp_reass", "5 iterations"),
335 EVCNT_INITIALIZER(EVCNT_TYPE_MISC, &tcp_reass_, "tcp_reass", "6 iterations"),
336 EVCNT_INITIALIZER(EVCNT_TYPE_MISC, &tcp_reass_, "tcp_reass", "7 iterations"),
337 };
338 struct evcnt tcp_reass_prependfirst = EVCNT_INITIALIZER(EVCNT_TYPE_MISC,
339 &tcp_reass_, "tcp_reass", "prepend to first");
340 struct evcnt tcp_reass_prepend = EVCNT_INITIALIZER(EVCNT_TYPE_MISC,
341 &tcp_reass_, "tcp_reass", "prepend");
342 struct evcnt tcp_reass_insert = EVCNT_INITIALIZER(EVCNT_TYPE_MISC,
343 &tcp_reass_, "tcp_reass", "insert");
344 struct evcnt tcp_reass_inserttail = EVCNT_INITIALIZER(EVCNT_TYPE_MISC,
345 &tcp_reass_, "tcp_reass", "insert at tail");
346 struct evcnt tcp_reass_append = EVCNT_INITIALIZER(EVCNT_TYPE_MISC,
347 &tcp_reass_, "tcp_reass", "append");
348 struct evcnt tcp_reass_appendtail = EVCNT_INITIALIZER(EVCNT_TYPE_MISC,
349 &tcp_reass_, "tcp_reass", "append to tail fragment");
350 struct evcnt tcp_reass_overlaptail = EVCNT_INITIALIZER(EVCNT_TYPE_MISC,
351 &tcp_reass_, "tcp_reass", "overlap at end");
352 struct evcnt tcp_reass_overlapfront = EVCNT_INITIALIZER(EVCNT_TYPE_MISC,
353 &tcp_reass_, "tcp_reass", "overlap at start");
354 struct evcnt tcp_reass_segdup = EVCNT_INITIALIZER(EVCNT_TYPE_MISC,
355 &tcp_reass_, "tcp_reass", "duplicate segment");
356 struct evcnt tcp_reass_fragdup = EVCNT_INITIALIZER(EVCNT_TYPE_MISC,
357 &tcp_reass_, "tcp_reass", "duplicate fragment");
358
359 EVCNT_ATTACH_STATIC(tcp_reass_);
360 EVCNT_ATTACH_STATIC(tcp_reass_empty);
361 EVCNT_ATTACH_STATIC2(tcp_reass_iteration, 0);
362 EVCNT_ATTACH_STATIC2(tcp_reass_iteration, 1);
363 EVCNT_ATTACH_STATIC2(tcp_reass_iteration, 2);
364 EVCNT_ATTACH_STATIC2(tcp_reass_iteration, 3);
365 EVCNT_ATTACH_STATIC2(tcp_reass_iteration, 4);
366 EVCNT_ATTACH_STATIC2(tcp_reass_iteration, 5);
367 EVCNT_ATTACH_STATIC2(tcp_reass_iteration, 6);
368 EVCNT_ATTACH_STATIC2(tcp_reass_iteration, 7);
369 EVCNT_ATTACH_STATIC(tcp_reass_prependfirst);
370 EVCNT_ATTACH_STATIC(tcp_reass_prepend);
371 EVCNT_ATTACH_STATIC(tcp_reass_insert);
372 EVCNT_ATTACH_STATIC(tcp_reass_inserttail);
373 EVCNT_ATTACH_STATIC(tcp_reass_append);
374 EVCNT_ATTACH_STATIC(tcp_reass_appendtail);
375 EVCNT_ATTACH_STATIC(tcp_reass_overlaptail);
376 EVCNT_ATTACH_STATIC(tcp_reass_overlapfront);
377 EVCNT_ATTACH_STATIC(tcp_reass_segdup);
378 EVCNT_ATTACH_STATIC(tcp_reass_fragdup);
379
380 #endif /* TCP_REASS_COUNTERS */
381
382 #ifdef MBUFTRACE
383 struct mowner tcp_mowner = MOWNER_INIT("tcp", "");
384 struct mowner tcp_rx_mowner = MOWNER_INIT("tcp", "rx");
385 struct mowner tcp_tx_mowner = MOWNER_INIT("tcp", "tx");
386 struct mowner tcp_sock_mowner = MOWNER_INIT("tcp", "sock");
387 struct mowner tcp_sock_rx_mowner = MOWNER_INIT("tcp", "sock rx");
388 struct mowner tcp_sock_tx_mowner = MOWNER_INIT("tcp", "sock tx");
389 #endif
390
391 callout_t tcp_slowtimo_ch;
392
393 static int
394 do_tcpinit(void)
395 {
396
397 in_pcbinit(&tcbtable, tcbhashsize, tcbhashsize);
398 pool_init(&tcpcb_pool, sizeof(struct tcpcb), 0, 0, 0, "tcpcbpl",
399 NULL, IPL_SOFTNET);
400
401 tcp_usrreq_init();
402
403 /* Initialize timer state. */
404 tcp_timer_init();
405
406 /* Initialize the compressed state engine. */
407 syn_cache_init();
408
409 /* Initialize the congestion control algorithms. */
410 tcp_congctl_init();
411
412 /* Initialize the TCPCB template. */
413 tcp_tcpcb_template();
414
415 /* Initialize reassembly queue */
416 tcpipqent_init();
417
418 /* SACK */
419 tcp_sack_init();
420
421 MOWNER_ATTACH(&tcp_tx_mowner);
422 MOWNER_ATTACH(&tcp_rx_mowner);
423 MOWNER_ATTACH(&tcp_reass_mowner);
424 MOWNER_ATTACH(&tcp_sock_mowner);
425 MOWNER_ATTACH(&tcp_sock_tx_mowner);
426 MOWNER_ATTACH(&tcp_sock_rx_mowner);
427 MOWNER_ATTACH(&tcp_mowner);
428
429 tcpstat_percpu = percpu_alloc(sizeof(uint64_t) * TCP_NSTATS);
430
431 vtw_earlyinit();
432
433 callout_init(&tcp_slowtimo_ch, CALLOUT_MPSAFE);
434 callout_reset(&tcp_slowtimo_ch, 1, tcp_slowtimo, NULL);
435
436 return 0;
437 }
438
439 void
440 tcp_init_common(unsigned basehlen)
441 {
442 static ONCE_DECL(dotcpinit);
443 unsigned hlen = basehlen + sizeof(struct tcphdr);
444 unsigned oldhlen;
445
446 if (max_linkhdr + hlen > MHLEN)
447 panic("tcp_init");
448 while ((oldhlen = max_protohdr) < hlen)
449 atomic_cas_uint(&max_protohdr, oldhlen, hlen);
450
451 RUN_ONCE(&dotcpinit, do_tcpinit);
452 }
453
454 /*
455 * Tcp initialization
456 */
457 void
458 tcp_init(void)
459 {
460
461 icmp_mtudisc_callback_register(tcp_mtudisc_callback);
462
463 tcp_init_common(sizeof(struct ip));
464 }
465
466 /*
467 * Create template to be used to send tcp packets on a connection.
468 * Call after host entry created, allocates an mbuf and fills
469 * in a skeletal tcp/ip header, minimizing the amount of work
470 * necessary when the connection is used.
471 */
472 struct mbuf *
473 tcp_template(struct tcpcb *tp)
474 {
475 struct inpcb *inp = tp->t_inpcb;
476 #ifdef INET6
477 struct in6pcb *in6p = tp->t_in6pcb;
478 #endif
479 struct tcphdr *n;
480 struct mbuf *m;
481 int hlen;
482
483 switch (tp->t_family) {
484 case AF_INET:
485 hlen = sizeof(struct ip);
486 if (inp)
487 break;
488 #ifdef INET6
489 if (in6p) {
490 /* mapped addr case */
491 if (IN6_IS_ADDR_V4MAPPED(&in6p->in6p_laddr)
492 && IN6_IS_ADDR_V4MAPPED(&in6p->in6p_faddr))
493 break;
494 }
495 #endif
496 return NULL; /*EINVAL*/
497 #ifdef INET6
498 case AF_INET6:
499 hlen = sizeof(struct ip6_hdr);
500 if (in6p) {
501 /* more sainty check? */
502 break;
503 }
504 return NULL; /*EINVAL*/
505 #endif
506 default:
507 hlen = 0; /*pacify gcc*/
508 return NULL; /*EAFNOSUPPORT*/
509 }
510 #ifdef DIAGNOSTIC
511 if (hlen + sizeof(struct tcphdr) > MCLBYTES)
512 panic("mclbytes too small for t_template");
513 #endif
514 m = tp->t_template;
515 if (m && m->m_len == hlen + sizeof(struct tcphdr))
516 ;
517 else {
518 if (m)
519 m_freem(m);
520 m = tp->t_template = NULL;
521 MGETHDR(m, M_DONTWAIT, MT_HEADER);
522 if (m && hlen + sizeof(struct tcphdr) > MHLEN) {
523 MCLGET(m, M_DONTWAIT);
524 if ((m->m_flags & M_EXT) == 0) {
525 m_free(m);
526 m = NULL;
527 }
528 }
529 if (m == NULL)
530 return NULL;
531 MCLAIM(m, &tcp_mowner);
532 m->m_pkthdr.len = m->m_len = hlen + sizeof(struct tcphdr);
533 }
534
535 memset(mtod(m, void *), 0, m->m_len);
536
537 n = (struct tcphdr *)(mtod(m, char *) + hlen);
538
539 switch (tp->t_family) {
540 case AF_INET:
541 {
542 struct ipovly *ipov;
543 mtod(m, struct ip *)->ip_v = 4;
544 mtod(m, struct ip *)->ip_hl = hlen >> 2;
545 ipov = mtod(m, struct ipovly *);
546 ipov->ih_pr = IPPROTO_TCP;
547 ipov->ih_len = htons(sizeof(struct tcphdr));
548 if (inp) {
549 ipov->ih_src = inp->inp_laddr;
550 ipov->ih_dst = inp->inp_faddr;
551 }
552 #ifdef INET6
553 else if (in6p) {
554 /* mapped addr case */
555 bcopy(&in6p->in6p_laddr.s6_addr32[3], &ipov->ih_src,
556 sizeof(ipov->ih_src));
557 bcopy(&in6p->in6p_faddr.s6_addr32[3], &ipov->ih_dst,
558 sizeof(ipov->ih_dst));
559 }
560 #endif
561 /*
562 * Compute the pseudo-header portion of the checksum
563 * now. We incrementally add in the TCP option and
564 * payload lengths later, and then compute the TCP
565 * checksum right before the packet is sent off onto
566 * the wire.
567 */
568 n->th_sum = in_cksum_phdr(ipov->ih_src.s_addr,
569 ipov->ih_dst.s_addr,
570 htons(sizeof(struct tcphdr) + IPPROTO_TCP));
571 break;
572 }
573 #ifdef INET6
574 case AF_INET6:
575 {
576 struct ip6_hdr *ip6;
577 mtod(m, struct ip *)->ip_v = 6;
578 ip6 = mtod(m, struct ip6_hdr *);
579 ip6->ip6_nxt = IPPROTO_TCP;
580 ip6->ip6_plen = htons(sizeof(struct tcphdr));
581 ip6->ip6_src = in6p->in6p_laddr;
582 ip6->ip6_dst = in6p->in6p_faddr;
583 ip6->ip6_flow = in6p->in6p_flowinfo & IPV6_FLOWINFO_MASK;
584 if (ip6_auto_flowlabel) {
585 ip6->ip6_flow &= ~IPV6_FLOWLABEL_MASK;
586 ip6->ip6_flow |=
587 (htonl(ip6_randomflowlabel()) & IPV6_FLOWLABEL_MASK);
588 }
589 ip6->ip6_vfc &= ~IPV6_VERSION_MASK;
590 ip6->ip6_vfc |= IPV6_VERSION;
591
592 /*
593 * Compute the pseudo-header portion of the checksum
594 * now. We incrementally add in the TCP option and
595 * payload lengths later, and then compute the TCP
596 * checksum right before the packet is sent off onto
597 * the wire.
598 */
599 n->th_sum = in6_cksum_phdr(&in6p->in6p_laddr,
600 &in6p->in6p_faddr, htonl(sizeof(struct tcphdr)),
601 htonl(IPPROTO_TCP));
602 break;
603 }
604 #endif
605 }
606 if (inp) {
607 n->th_sport = inp->inp_lport;
608 n->th_dport = inp->inp_fport;
609 }
610 #ifdef INET6
611 else if (in6p) {
612 n->th_sport = in6p->in6p_lport;
613 n->th_dport = in6p->in6p_fport;
614 }
615 #endif
616 n->th_seq = 0;
617 n->th_ack = 0;
618 n->th_x2 = 0;
619 n->th_off = 5;
620 n->th_flags = 0;
621 n->th_win = 0;
622 n->th_urp = 0;
623 return (m);
624 }
625
626 /*
627 * Send a single message to the TCP at address specified by
628 * the given TCP/IP header. If m == 0, then we make a copy
629 * of the tcpiphdr at ti and send directly to the addressed host.
630 * This is used to force keep alive messages out using the TCP
631 * template for a connection tp->t_template. If flags are given
632 * then we send a message back to the TCP which originated the
633 * segment ti, and discard the mbuf containing it and any other
634 * attached mbufs.
635 *
636 * In any case the ack and sequence number of the transmitted
637 * segment are as specified by the parameters.
638 */
639 int
640 tcp_respond(struct tcpcb *tp, struct mbuf *mtemplate, struct mbuf *m,
641 struct tcphdr *th0, tcp_seq ack, tcp_seq seq, int flags)
642 {
643 struct route *ro;
644 int error, tlen, win = 0;
645 int hlen;
646 struct ip *ip;
647 #ifdef INET6
648 struct ip6_hdr *ip6;
649 #endif
650 int family; /* family on packet, not inpcb/in6pcb! */
651 struct tcphdr *th;
652 struct socket *so;
653
654 if (tp != NULL && (flags & TH_RST) == 0) {
655 #ifdef DIAGNOSTIC
656 if (tp->t_inpcb && tp->t_in6pcb)
657 panic("tcp_respond: both t_inpcb and t_in6pcb are set");
658 #endif
659 #ifdef INET
660 if (tp->t_inpcb)
661 win = sbspace(&tp->t_inpcb->inp_socket->so_rcv);
662 #endif
663 #ifdef INET6
664 if (tp->t_in6pcb)
665 win = sbspace(&tp->t_in6pcb->in6p_socket->so_rcv);
666 #endif
667 }
668
669 th = NULL; /* Quell uninitialized warning */
670 ip = NULL;
671 #ifdef INET6
672 ip6 = NULL;
673 #endif
674 if (m == 0) {
675 if (!mtemplate)
676 return EINVAL;
677
678 /* get family information from template */
679 switch (mtod(mtemplate, struct ip *)->ip_v) {
680 case 4:
681 family = AF_INET;
682 hlen = sizeof(struct ip);
683 break;
684 #ifdef INET6
685 case 6:
686 family = AF_INET6;
687 hlen = sizeof(struct ip6_hdr);
688 break;
689 #endif
690 default:
691 return EAFNOSUPPORT;
692 }
693
694 MGETHDR(m, M_DONTWAIT, MT_HEADER);
695 if (m) {
696 MCLAIM(m, &tcp_tx_mowner);
697 MCLGET(m, M_DONTWAIT);
698 if ((m->m_flags & M_EXT) == 0) {
699 m_free(m);
700 m = NULL;
701 }
702 }
703 if (m == NULL)
704 return (ENOBUFS);
705
706 if (tcp_compat_42)
707 tlen = 1;
708 else
709 tlen = 0;
710
711 m->m_data += max_linkhdr;
712 bcopy(mtod(mtemplate, void *), mtod(m, void *),
713 mtemplate->m_len);
714 switch (family) {
715 case AF_INET:
716 ip = mtod(m, struct ip *);
717 th = (struct tcphdr *)(ip + 1);
718 break;
719 #ifdef INET6
720 case AF_INET6:
721 ip6 = mtod(m, struct ip6_hdr *);
722 th = (struct tcphdr *)(ip6 + 1);
723 break;
724 #endif
725 #if 0
726 default:
727 /* noone will visit here */
728 m_freem(m);
729 return EAFNOSUPPORT;
730 #endif
731 }
732 flags = TH_ACK;
733 } else {
734
735 if ((m->m_flags & M_PKTHDR) == 0) {
736 #if 0
737 printf("non PKTHDR to tcp_respond\n");
738 #endif
739 m_freem(m);
740 return EINVAL;
741 }
742 #ifdef DIAGNOSTIC
743 if (!th0)
744 panic("th0 == NULL in tcp_respond");
745 #endif
746
747 /* get family information from m */
748 switch (mtod(m, struct ip *)->ip_v) {
749 case 4:
750 family = AF_INET;
751 hlen = sizeof(struct ip);
752 ip = mtod(m, struct ip *);
753 break;
754 #ifdef INET6
755 case 6:
756 family = AF_INET6;
757 hlen = sizeof(struct ip6_hdr);
758 ip6 = mtod(m, struct ip6_hdr *);
759 break;
760 #endif
761 default:
762 m_freem(m);
763 return EAFNOSUPPORT;
764 }
765 /* clear h/w csum flags inherited from rx packet */
766 m->m_pkthdr.csum_flags = 0;
767
768 if ((flags & TH_SYN) == 0 || sizeof(*th0) > (th0->th_off << 2))
769 tlen = sizeof(*th0);
770 else
771 tlen = th0->th_off << 2;
772
773 if (m->m_len > hlen + tlen && (m->m_flags & M_EXT) == 0 &&
774 mtod(m, char *) + hlen == (char *)th0) {
775 m->m_len = hlen + tlen;
776 m_freem(m->m_next);
777 m->m_next = NULL;
778 } else {
779 struct mbuf *n;
780
781 #ifdef DIAGNOSTIC
782 if (max_linkhdr + hlen + tlen > MCLBYTES) {
783 m_freem(m);
784 return EMSGSIZE;
785 }
786 #endif
787 MGETHDR(n, M_DONTWAIT, MT_HEADER);
788 if (n && max_linkhdr + hlen + tlen > MHLEN) {
789 MCLGET(n, M_DONTWAIT);
790 if ((n->m_flags & M_EXT) == 0) {
791 m_freem(n);
792 n = NULL;
793 }
794 }
795 if (!n) {
796 m_freem(m);
797 return ENOBUFS;
798 }
799
800 MCLAIM(n, &tcp_tx_mowner);
801 n->m_data += max_linkhdr;
802 n->m_len = hlen + tlen;
803 m_copyback(n, 0, hlen, mtod(m, void *));
804 m_copyback(n, hlen, tlen, (void *)th0);
805
806 m_freem(m);
807 m = n;
808 n = NULL;
809 }
810
811 #define xchg(a,b,type) { type t; t=a; a=b; b=t; }
812 switch (family) {
813 case AF_INET:
814 ip = mtod(m, struct ip *);
815 th = (struct tcphdr *)(ip + 1);
816 ip->ip_p = IPPROTO_TCP;
817 xchg(ip->ip_dst, ip->ip_src, struct in_addr);
818 ip->ip_p = IPPROTO_TCP;
819 break;
820 #ifdef INET6
821 case AF_INET6:
822 ip6 = mtod(m, struct ip6_hdr *);
823 th = (struct tcphdr *)(ip6 + 1);
824 ip6->ip6_nxt = IPPROTO_TCP;
825 xchg(ip6->ip6_dst, ip6->ip6_src, struct in6_addr);
826 ip6->ip6_nxt = IPPROTO_TCP;
827 break;
828 #endif
829 #if 0
830 default:
831 /* noone will visit here */
832 m_freem(m);
833 return EAFNOSUPPORT;
834 #endif
835 }
836 xchg(th->th_dport, th->th_sport, u_int16_t);
837 #undef xchg
838 tlen = 0; /*be friendly with the following code*/
839 }
840 th->th_seq = htonl(seq);
841 th->th_ack = htonl(ack);
842 th->th_x2 = 0;
843 if ((flags & TH_SYN) == 0) {
844 if (tp)
845 win >>= tp->rcv_scale;
846 if (win > TCP_MAXWIN)
847 win = TCP_MAXWIN;
848 th->th_win = htons((u_int16_t)win);
849 th->th_off = sizeof (struct tcphdr) >> 2;
850 tlen += sizeof(*th);
851 } else
852 tlen += th->th_off << 2;
853 m->m_len = hlen + tlen;
854 m->m_pkthdr.len = hlen + tlen;
855 m_reset_rcvif(m);
856 th->th_flags = flags;
857 th->th_urp = 0;
858
859 switch (family) {
860 #ifdef INET
861 case AF_INET:
862 {
863 struct ipovly *ipov = (struct ipovly *)ip;
864 memset(ipov->ih_x1, 0, sizeof ipov->ih_x1);
865 ipov->ih_len = htons((u_int16_t)tlen);
866
867 th->th_sum = 0;
868 th->th_sum = in_cksum(m, hlen + tlen);
869 ip->ip_len = htons(hlen + tlen);
870 ip->ip_ttl = ip_defttl;
871 break;
872 }
873 #endif
874 #ifdef INET6
875 case AF_INET6:
876 {
877 th->th_sum = 0;
878 th->th_sum = in6_cksum(m, IPPROTO_TCP, sizeof(struct ip6_hdr),
879 tlen);
880 ip6->ip6_plen = htons(tlen);
881 if (tp && tp->t_in6pcb)
882 ip6->ip6_hlim = in6_selecthlim_rt(tp->t_in6pcb);
883 else
884 ip6->ip6_hlim = ip6_defhlim;
885 ip6->ip6_flow &= ~IPV6_FLOWINFO_MASK;
886 if (ip6_auto_flowlabel) {
887 ip6->ip6_flow |=
888 (htonl(ip6_randomflowlabel()) & IPV6_FLOWLABEL_MASK);
889 }
890 break;
891 }
892 #endif
893 }
894
895 if (tp && tp->t_inpcb)
896 so = tp->t_inpcb->inp_socket;
897 #ifdef INET6
898 else if (tp && tp->t_in6pcb)
899 so = tp->t_in6pcb->in6p_socket;
900 #endif
901 else
902 so = NULL;
903
904 if (tp != NULL && tp->t_inpcb != NULL) {
905 ro = &tp->t_inpcb->inp_route;
906 #ifdef DIAGNOSTIC
907 if (family != AF_INET)
908 panic("tcp_respond: address family mismatch");
909 if (!in_hosteq(ip->ip_dst, tp->t_inpcb->inp_faddr)) {
910 panic("tcp_respond: ip_dst %x != inp_faddr %x",
911 ntohl(ip->ip_dst.s_addr),
912 ntohl(tp->t_inpcb->inp_faddr.s_addr));
913 }
914 #endif
915 }
916 #ifdef INET6
917 else if (tp != NULL && tp->t_in6pcb != NULL) {
918 ro = (struct route *)&tp->t_in6pcb->in6p_route;
919 #ifdef DIAGNOSTIC
920 if (family == AF_INET) {
921 if (!IN6_IS_ADDR_V4MAPPED(&tp->t_in6pcb->in6p_faddr))
922 panic("tcp_respond: not mapped addr");
923 if (memcmp(&ip->ip_dst,
924 &tp->t_in6pcb->in6p_faddr.s6_addr32[3],
925 sizeof(ip->ip_dst)) != 0) {
926 panic("tcp_respond: ip_dst != in6p_faddr");
927 }
928 } else if (family == AF_INET6) {
929 if (!IN6_ARE_ADDR_EQUAL(&ip6->ip6_dst,
930 &tp->t_in6pcb->in6p_faddr))
931 panic("tcp_respond: ip6_dst != in6p_faddr");
932 } else
933 panic("tcp_respond: address family mismatch");
934 #endif
935 }
936 #endif
937 else
938 ro = NULL;
939
940 switch (family) {
941 #ifdef INET
942 case AF_INET:
943 error = ip_output(m, NULL, ro,
944 (tp && tp->t_mtudisc ? IP_MTUDISC : 0), NULL, so);
945 break;
946 #endif
947 #ifdef INET6
948 case AF_INET6:
949 error = ip6_output(m, NULL, ro, 0, NULL, so, NULL);
950 break;
951 #endif
952 default:
953 error = EAFNOSUPPORT;
954 break;
955 }
956
957 return (error);
958 }
959
960 /*
961 * Template TCPCB. Rather than zeroing a new TCPCB and initializing
962 * a bunch of members individually, we maintain this template for the
963 * static and mostly-static components of the TCPCB, and copy it into
964 * the new TCPCB instead.
965 */
966 static struct tcpcb tcpcb_template = {
967 .t_srtt = TCPTV_SRTTBASE,
968 .t_rttmin = TCPTV_MIN,
969
970 .snd_cwnd = TCP_MAXWIN << TCP_MAX_WINSHIFT,
971 .snd_ssthresh = TCP_MAXWIN << TCP_MAX_WINSHIFT,
972 .snd_numholes = 0,
973 .snd_cubic_wmax = 0,
974 .snd_cubic_wmax_last = 0,
975 .snd_cubic_ctime = 0,
976
977 .t_partialacks = -1,
978 .t_bytes_acked = 0,
979 .t_sndrexmitpack = 0,
980 .t_rcvoopack = 0,
981 .t_sndzerowin = 0,
982 };
983
984 /*
985 * Updates the TCPCB template whenever a parameter that would affect
986 * the template is changed.
987 */
988 void
989 tcp_tcpcb_template(void)
990 {
991 struct tcpcb *tp = &tcpcb_template;
992 int flags;
993
994 tp->t_peermss = tcp_mssdflt;
995 tp->t_ourmss = tcp_mssdflt;
996 tp->t_segsz = tcp_mssdflt;
997
998 flags = 0;
999 if (tcp_do_rfc1323 && tcp_do_win_scale)
1000 flags |= TF_REQ_SCALE;
1001 if (tcp_do_rfc1323 && tcp_do_timestamps)
1002 flags |= TF_REQ_TSTMP;
1003 tp->t_flags = flags;
1004
1005 /*
1006 * Init srtt to TCPTV_SRTTBASE (0), so we can tell that we have no
1007 * rtt estimate. Set rttvar so that srtt + 2 * rttvar gives
1008 * reasonable initial retransmit time.
1009 */
1010 tp->t_rttvar = tcp_rttdflt * PR_SLOWHZ << (TCP_RTTVAR_SHIFT + 2 - 1);
1011 TCPT_RANGESET(tp->t_rxtcur, TCP_REXMTVAL(tp),
1012 TCPTV_MIN, TCPTV_REXMTMAX);
1013
1014 /* Keep Alive */
1015 tp->t_keepinit = tcp_keepinit;
1016 tp->t_keepidle = tcp_keepidle;
1017 tp->t_keepintvl = tcp_keepintvl;
1018 tp->t_keepcnt = tcp_keepcnt;
1019 tp->t_maxidle = tp->t_keepcnt * tp->t_keepintvl;
1020
1021 /* MSL */
1022 tp->t_msl = TCPTV_MSL;
1023 }
1024
1025 /*
1026 * Create a new TCP control block, making an
1027 * empty reassembly queue and hooking it to the argument
1028 * protocol control block.
1029 */
1030 /* family selects inpcb, or in6pcb */
1031 struct tcpcb *
1032 tcp_newtcpcb(int family, void *aux)
1033 {
1034 struct tcpcb *tp;
1035 int i;
1036
1037 /* XXX Consider using a pool_cache for speed. */
1038 tp = pool_get(&tcpcb_pool, PR_NOWAIT); /* splsoftnet via tcp_usrreq */
1039 if (tp == NULL)
1040 return (NULL);
1041 memcpy(tp, &tcpcb_template, sizeof(*tp));
1042 TAILQ_INIT(&tp->segq);
1043 TAILQ_INIT(&tp->timeq);
1044 tp->t_family = family; /* may be overridden later on */
1045 TAILQ_INIT(&tp->snd_holes);
1046 LIST_INIT(&tp->t_sc); /* XXX can template this */
1047
1048 /* Don't sweat this loop; hopefully the compiler will unroll it. */
1049 for (i = 0; i < TCPT_NTIMERS; i++) {
1050 callout_init(&tp->t_timer[i], CALLOUT_MPSAFE);
1051 TCP_TIMER_INIT(tp, i);
1052 }
1053 callout_init(&tp->t_delack_ch, CALLOUT_MPSAFE);
1054
1055 switch (family) {
1056 case AF_INET:
1057 {
1058 struct inpcb *inp = (struct inpcb *)aux;
1059
1060 inp->inp_ip.ip_ttl = ip_defttl;
1061 inp->inp_ppcb = (void *)tp;
1062
1063 tp->t_inpcb = inp;
1064 tp->t_mtudisc = ip_mtudisc;
1065 break;
1066 }
1067 #ifdef INET6
1068 case AF_INET6:
1069 {
1070 struct in6pcb *in6p = (struct in6pcb *)aux;
1071
1072 in6p->in6p_ip6.ip6_hlim = in6_selecthlim_rt(in6p);
1073 in6p->in6p_ppcb = (void *)tp;
1074
1075 tp->t_in6pcb = in6p;
1076 /* for IPv6, always try to run path MTU discovery */
1077 tp->t_mtudisc = 1;
1078 break;
1079 }
1080 #endif /* INET6 */
1081 default:
1082 for (i = 0; i < TCPT_NTIMERS; i++)
1083 callout_destroy(&tp->t_timer[i]);
1084 callout_destroy(&tp->t_delack_ch);
1085 pool_put(&tcpcb_pool, tp); /* splsoftnet via tcp_usrreq */
1086 return (NULL);
1087 }
1088
1089 /*
1090 * Initialize our timebase. When we send timestamps, we take
1091 * the delta from tcp_now -- this means each connection always
1092 * gets a timebase of 1, which makes it, among other things,
1093 * more difficult to determine how long a system has been up,
1094 * and thus how many TCP sequence increments have occurred.
1095 *
1096 * We start with 1, because 0 doesn't work with linux, which
1097 * considers timestamp 0 in a SYN packet as a bug and disables
1098 * timestamps.
1099 */
1100 tp->ts_timebase = tcp_now - 1;
1101
1102 tcp_congctl_select(tp, tcp_congctl_global_name);
1103
1104 return (tp);
1105 }
1106
1107 /*
1108 * Drop a TCP connection, reporting
1109 * the specified error. If connection is synchronized,
1110 * then send a RST to peer.
1111 */
1112 struct tcpcb *
1113 tcp_drop(struct tcpcb *tp, int errno)
1114 {
1115 struct socket *so = NULL;
1116
1117 #ifdef DIAGNOSTIC
1118 if (tp->t_inpcb && tp->t_in6pcb)
1119 panic("tcp_drop: both t_inpcb and t_in6pcb are set");
1120 #endif
1121 #ifdef INET
1122 if (tp->t_inpcb)
1123 so = tp->t_inpcb->inp_socket;
1124 #endif
1125 #ifdef INET6
1126 if (tp->t_in6pcb)
1127 so = tp->t_in6pcb->in6p_socket;
1128 #endif
1129 if (!so)
1130 return NULL;
1131
1132 if (TCPS_HAVERCVDSYN(tp->t_state)) {
1133 tp->t_state = TCPS_CLOSED;
1134 (void) tcp_output(tp);
1135 TCP_STATINC(TCP_STAT_DROPS);
1136 } else
1137 TCP_STATINC(TCP_STAT_CONNDROPS);
1138 if (errno == ETIMEDOUT && tp->t_softerror)
1139 errno = tp->t_softerror;
1140 so->so_error = errno;
1141 return (tcp_close(tp));
1142 }
1143
1144 /*
1145 * Close a TCP control block:
1146 * discard all space held by the tcp
1147 * discard internet protocol block
1148 * wake up any sleepers
1149 */
1150 struct tcpcb *
1151 tcp_close(struct tcpcb *tp)
1152 {
1153 struct inpcb *inp;
1154 #ifdef INET6
1155 struct in6pcb *in6p;
1156 #endif
1157 struct socket *so;
1158 #ifdef RTV_RTT
1159 struct rtentry *rt = NULL;
1160 #endif
1161 struct route *ro;
1162 int j;
1163
1164 inp = tp->t_inpcb;
1165 #ifdef INET6
1166 in6p = tp->t_in6pcb;
1167 #endif
1168 so = NULL;
1169 ro = NULL;
1170 if (inp) {
1171 so = inp->inp_socket;
1172 ro = &inp->inp_route;
1173 }
1174 #ifdef INET6
1175 else if (in6p) {
1176 so = in6p->in6p_socket;
1177 ro = (struct route *)&in6p->in6p_route;
1178 }
1179 #endif
1180
1181 #ifdef RTV_RTT
1182 /*
1183 * If we sent enough data to get some meaningful characteristics,
1184 * save them in the routing entry. 'Enough' is arbitrarily
1185 * defined as the sendpipesize (default 4K) * 16. This would
1186 * give us 16 rtt samples assuming we only get one sample per
1187 * window (the usual case on a long haul net). 16 samples is
1188 * enough for the srtt filter to converge to within 5% of the correct
1189 * value; fewer samples and we could save a very bogus rtt.
1190 *
1191 * Don't update the default route's characteristics and don't
1192 * update anything that the user "locked".
1193 */
1194 if (SEQ_LT(tp->iss + so->so_snd.sb_hiwat * 16, tp->snd_max) &&
1195 ro && (rt = rtcache_validate(ro)) != NULL &&
1196 !in_nullhost(satocsin(rt_getkey(rt))->sin_addr)) {
1197 u_long i = 0;
1198
1199 if ((rt->rt_rmx.rmx_locks & RTV_RTT) == 0) {
1200 i = tp->t_srtt *
1201 ((RTM_RTTUNIT / PR_SLOWHZ) >> (TCP_RTT_SHIFT + 2));
1202 if (rt->rt_rmx.rmx_rtt && i)
1203 /*
1204 * filter this update to half the old & half
1205 * the new values, converting scale.
1206 * See route.h and tcp_var.h for a
1207 * description of the scaling constants.
1208 */
1209 rt->rt_rmx.rmx_rtt =
1210 (rt->rt_rmx.rmx_rtt + i) / 2;
1211 else
1212 rt->rt_rmx.rmx_rtt = i;
1213 }
1214 if ((rt->rt_rmx.rmx_locks & RTV_RTTVAR) == 0) {
1215 i = tp->t_rttvar *
1216 ((RTM_RTTUNIT / PR_SLOWHZ) >> (TCP_RTTVAR_SHIFT + 2));
1217 if (rt->rt_rmx.rmx_rttvar && i)
1218 rt->rt_rmx.rmx_rttvar =
1219 (rt->rt_rmx.rmx_rttvar + i) / 2;
1220 else
1221 rt->rt_rmx.rmx_rttvar = i;
1222 }
1223 /*
1224 * update the pipelimit (ssthresh) if it has been updated
1225 * already or if a pipesize was specified & the threshhold
1226 * got below half the pipesize. I.e., wait for bad news
1227 * before we start updating, then update on both good
1228 * and bad news.
1229 */
1230 if (((rt->rt_rmx.rmx_locks & RTV_SSTHRESH) == 0 &&
1231 (i = tp->snd_ssthresh) && rt->rt_rmx.rmx_ssthresh) ||
1232 i < (rt->rt_rmx.rmx_sendpipe / 2)) {
1233 /*
1234 * convert the limit from user data bytes to
1235 * packets then to packet data bytes.
1236 */
1237 i = (i + tp->t_segsz / 2) / tp->t_segsz;
1238 if (i < 2)
1239 i = 2;
1240 i *= (u_long)(tp->t_segsz + sizeof (struct tcpiphdr));
1241 if (rt->rt_rmx.rmx_ssthresh)
1242 rt->rt_rmx.rmx_ssthresh =
1243 (rt->rt_rmx.rmx_ssthresh + i) / 2;
1244 else
1245 rt->rt_rmx.rmx_ssthresh = i;
1246 }
1247 }
1248 rtcache_unref(rt, ro);
1249 #endif /* RTV_RTT */
1250 /* free the reassembly queue, if any */
1251 TCP_REASS_LOCK(tp);
1252 (void) tcp_freeq(tp);
1253 TCP_REASS_UNLOCK(tp);
1254
1255 /* free the SACK holes list. */
1256 tcp_free_sackholes(tp);
1257 tcp_congctl_release(tp);
1258 syn_cache_cleanup(tp);
1259
1260 if (tp->t_template) {
1261 m_free(tp->t_template);
1262 tp->t_template = NULL;
1263 }
1264
1265 /*
1266 * Detaching the pcb will unlock the socket/tcpcb, and stopping
1267 * the timers can also drop the lock. We need to prevent access
1268 * to the tcpcb as it's half torn down. Flag the pcb as dead
1269 * (prevents access by timers) and only then detach it.
1270 */
1271 tp->t_flags |= TF_DEAD;
1272 if (inp) {
1273 inp->inp_ppcb = 0;
1274 soisdisconnected(so);
1275 in_pcbdetach(inp);
1276 }
1277 #ifdef INET6
1278 else if (in6p) {
1279 in6p->in6p_ppcb = 0;
1280 soisdisconnected(so);
1281 in6_pcbdetach(in6p);
1282 }
1283 #endif
1284 /*
1285 * pcb is no longer visble elsewhere, so we can safely release
1286 * the lock in callout_halt() if needed.
1287 */
1288 TCP_STATINC(TCP_STAT_CLOSED);
1289 for (j = 0; j < TCPT_NTIMERS; j++) {
1290 callout_halt(&tp->t_timer[j], softnet_lock);
1291 callout_destroy(&tp->t_timer[j]);
1292 }
1293 callout_halt(&tp->t_delack_ch, softnet_lock);
1294 callout_destroy(&tp->t_delack_ch);
1295 pool_put(&tcpcb_pool, tp);
1296
1297 return NULL;
1298 }
1299
1300 int
1301 tcp_freeq(struct tcpcb *tp)
1302 {
1303 struct ipqent *qe;
1304 int rv = 0;
1305 #ifdef TCPREASS_DEBUG
1306 int i = 0;
1307 #endif
1308
1309 TCP_REASS_LOCK_CHECK(tp);
1310
1311 while ((qe = TAILQ_FIRST(&tp->segq)) != NULL) {
1312 #ifdef TCPREASS_DEBUG
1313 printf("tcp_freeq[%p,%d]: %u:%u(%u) 0x%02x\n",
1314 tp, i++, qe->ipqe_seq, qe->ipqe_seq + qe->ipqe_len,
1315 qe->ipqe_len, qe->ipqe_flags & (TH_SYN|TH_FIN|TH_RST));
1316 #endif
1317 TAILQ_REMOVE(&tp->segq, qe, ipqe_q);
1318 TAILQ_REMOVE(&tp->timeq, qe, ipqe_timeq);
1319 m_freem(qe->ipqe_m);
1320 tcpipqent_free(qe);
1321 rv = 1;
1322 }
1323 tp->t_segqlen = 0;
1324 KASSERT(TAILQ_EMPTY(&tp->timeq));
1325 return (rv);
1326 }
1327
1328 void
1329 tcp_fasttimo(void)
1330 {
1331 if (tcp_drainwanted) {
1332 tcp_drain();
1333 tcp_drainwanted = 0;
1334 }
1335 }
1336
1337 void
1338 tcp_drainstub(void)
1339 {
1340 tcp_drainwanted = 1;
1341 }
1342
1343 /*
1344 * Protocol drain routine. Called when memory is in short supply.
1345 * Called from pr_fasttimo thus a callout context.
1346 */
1347 void
1348 tcp_drain(void)
1349 {
1350 struct inpcb_hdr *inph;
1351 struct tcpcb *tp;
1352
1353 mutex_enter(softnet_lock);
1354 KERNEL_LOCK(1, NULL);
1355
1356 /*
1357 * Free the sequence queue of all TCP connections.
1358 */
1359 TAILQ_FOREACH(inph, &tcbtable.inpt_queue, inph_queue) {
1360 switch (inph->inph_af) {
1361 case AF_INET:
1362 tp = intotcpcb((struct inpcb *)inph);
1363 break;
1364 #ifdef INET6
1365 case AF_INET6:
1366 tp = in6totcpcb((struct in6pcb *)inph);
1367 break;
1368 #endif
1369 default:
1370 tp = NULL;
1371 break;
1372 }
1373 if (tp != NULL) {
1374 /*
1375 * We may be called from a device's interrupt
1376 * context. If the tcpcb is already busy,
1377 * just bail out now.
1378 */
1379 if (tcp_reass_lock_try(tp) == 0)
1380 continue;
1381 if (tcp_freeq(tp))
1382 TCP_STATINC(TCP_STAT_CONNSDRAINED);
1383 TCP_REASS_UNLOCK(tp);
1384 }
1385 }
1386
1387 KERNEL_UNLOCK_ONE(NULL);
1388 mutex_exit(softnet_lock);
1389 }
1390
1391 /*
1392 * Notify a tcp user of an asynchronous error;
1393 * store error as soft error, but wake up user
1394 * (for now, won't do anything until can select for soft error).
1395 */
1396 void
1397 tcp_notify(struct inpcb *inp, int error)
1398 {
1399 struct tcpcb *tp = (struct tcpcb *)inp->inp_ppcb;
1400 struct socket *so = inp->inp_socket;
1401
1402 /*
1403 * Ignore some errors if we are hooked up.
1404 * If connection hasn't completed, has retransmitted several times,
1405 * and receives a second error, give up now. This is better
1406 * than waiting a long time to establish a connection that
1407 * can never complete.
1408 */
1409 if (tp->t_state == TCPS_ESTABLISHED &&
1410 (error == EHOSTUNREACH || error == ENETUNREACH ||
1411 error == EHOSTDOWN)) {
1412 return;
1413 } else if (TCPS_HAVEESTABLISHED(tp->t_state) == 0 &&
1414 tp->t_rxtshift > 3 && tp->t_softerror)
1415 so->so_error = error;
1416 else
1417 tp->t_softerror = error;
1418 cv_broadcast(&so->so_cv);
1419 sorwakeup(so);
1420 sowwakeup(so);
1421 }
1422
1423 #ifdef INET6
1424 void
1425 tcp6_notify(struct in6pcb *in6p, int error)
1426 {
1427 struct tcpcb *tp = (struct tcpcb *)in6p->in6p_ppcb;
1428 struct socket *so = in6p->in6p_socket;
1429
1430 /*
1431 * Ignore some errors if we are hooked up.
1432 * If connection hasn't completed, has retransmitted several times,
1433 * and receives a second error, give up now. This is better
1434 * than waiting a long time to establish a connection that
1435 * can never complete.
1436 */
1437 if (tp->t_state == TCPS_ESTABLISHED &&
1438 (error == EHOSTUNREACH || error == ENETUNREACH ||
1439 error == EHOSTDOWN)) {
1440 return;
1441 } else if (TCPS_HAVEESTABLISHED(tp->t_state) == 0 &&
1442 tp->t_rxtshift > 3 && tp->t_softerror)
1443 so->so_error = error;
1444 else
1445 tp->t_softerror = error;
1446 cv_broadcast(&so->so_cv);
1447 sorwakeup(so);
1448 sowwakeup(so);
1449 }
1450 #endif
1451
1452 #ifdef INET6
1453 void *
1454 tcp6_ctlinput(int cmd, const struct sockaddr *sa, void *d)
1455 {
1456 struct tcphdr th;
1457 void (*notify)(struct in6pcb *, int) = tcp6_notify;
1458 int nmatch;
1459 struct ip6_hdr *ip6;
1460 const struct sockaddr_in6 *sa6_src = NULL;
1461 const struct sockaddr_in6 *sa6 = (const struct sockaddr_in6 *)sa;
1462 struct mbuf *m;
1463 int off;
1464
1465 if (sa->sa_family != AF_INET6 ||
1466 sa->sa_len != sizeof(struct sockaddr_in6))
1467 return NULL;
1468 if ((unsigned)cmd >= PRC_NCMDS)
1469 return NULL;
1470 else if (cmd == PRC_QUENCH) {
1471 /*
1472 * Don't honor ICMP Source Quench messages meant for
1473 * TCP connections.
1474 */
1475 return NULL;
1476 } else if (PRC_IS_REDIRECT(cmd))
1477 notify = in6_rtchange, d = NULL;
1478 else if (cmd == PRC_MSGSIZE)
1479 ; /* special code is present, see below */
1480 else if (cmd == PRC_HOSTDEAD)
1481 d = NULL;
1482 else if (inet6ctlerrmap[cmd] == 0)
1483 return NULL;
1484
1485 /* if the parameter is from icmp6, decode it. */
1486 if (d != NULL) {
1487 struct ip6ctlparam *ip6cp = (struct ip6ctlparam *)d;
1488 m = ip6cp->ip6c_m;
1489 ip6 = ip6cp->ip6c_ip6;
1490 off = ip6cp->ip6c_off;
1491 sa6_src = ip6cp->ip6c_src;
1492 } else {
1493 m = NULL;
1494 ip6 = NULL;
1495 sa6_src = &sa6_any;
1496 off = 0;
1497 }
1498
1499 if (ip6) {
1500 /*
1501 * XXX: We assume that when ip6 is non NULL,
1502 * M and OFF are valid.
1503 */
1504
1505 /* check if we can safely examine src and dst ports */
1506 if (m->m_pkthdr.len < off + sizeof(th)) {
1507 if (cmd == PRC_MSGSIZE)
1508 icmp6_mtudisc_update((struct ip6ctlparam *)d, 0);
1509 return NULL;
1510 }
1511
1512 memset(&th, 0, sizeof(th));
1513 m_copydata(m, off, sizeof(th), (void *)&th);
1514
1515 if (cmd == PRC_MSGSIZE) {
1516 int valid = 0;
1517
1518 /*
1519 * Check to see if we have a valid TCP connection
1520 * corresponding to the address in the ICMPv6 message
1521 * payload.
1522 */
1523 if (in6_pcblookup_connect(&tcbtable, &sa6->sin6_addr,
1524 th.th_dport,
1525 (const struct in6_addr *)&sa6_src->sin6_addr,
1526 th.th_sport, 0, 0))
1527 valid++;
1528
1529 /*
1530 * Depending on the value of "valid" and routing table
1531 * size (mtudisc_{hi,lo}wat), we will:
1532 * - recalcurate the new MTU and create the
1533 * corresponding routing entry, or
1534 * - ignore the MTU change notification.
1535 */
1536 icmp6_mtudisc_update((struct ip6ctlparam *)d, valid);
1537
1538 /*
1539 * no need to call in6_pcbnotify, it should have been
1540 * called via callback if necessary
1541 */
1542 return NULL;
1543 }
1544
1545 nmatch = in6_pcbnotify(&tcbtable, sa, th.th_dport,
1546 (const struct sockaddr *)sa6_src, th.th_sport, cmd, NULL, notify);
1547 if (nmatch == 0 && syn_cache_count &&
1548 (inet6ctlerrmap[cmd] == EHOSTUNREACH ||
1549 inet6ctlerrmap[cmd] == ENETUNREACH ||
1550 inet6ctlerrmap[cmd] == EHOSTDOWN))
1551 syn_cache_unreach((const struct sockaddr *)sa6_src,
1552 sa, &th);
1553 } else {
1554 (void) in6_pcbnotify(&tcbtable, sa, 0,
1555 (const struct sockaddr *)sa6_src, 0, cmd, NULL, notify);
1556 }
1557
1558 return NULL;
1559 }
1560 #endif
1561
1562 #ifdef INET
1563 /* assumes that ip header and tcp header are contiguous on mbuf */
1564 void *
1565 tcp_ctlinput(int cmd, const struct sockaddr *sa, void *v)
1566 {
1567 struct ip *ip = v;
1568 struct tcphdr *th;
1569 struct icmp *icp;
1570 extern const int inetctlerrmap[];
1571 void (*notify)(struct inpcb *, int) = tcp_notify;
1572 int errno;
1573 int nmatch;
1574 struct tcpcb *tp;
1575 u_int mtu;
1576 tcp_seq seq;
1577 struct inpcb *inp;
1578 #ifdef INET6
1579 struct in6pcb *in6p;
1580 struct in6_addr src6, dst6;
1581 #endif
1582
1583 if (sa->sa_family != AF_INET ||
1584 sa->sa_len != sizeof(struct sockaddr_in))
1585 return NULL;
1586 if ((unsigned)cmd >= PRC_NCMDS)
1587 return NULL;
1588 errno = inetctlerrmap[cmd];
1589 if (cmd == PRC_QUENCH)
1590 /*
1591 * Don't honor ICMP Source Quench messages meant for
1592 * TCP connections.
1593 */
1594 return NULL;
1595 else if (PRC_IS_REDIRECT(cmd))
1596 notify = in_rtchange, ip = 0;
1597 else if (cmd == PRC_MSGSIZE && ip && ip->ip_v == 4) {
1598 /*
1599 * Check to see if we have a valid TCP connection
1600 * corresponding to the address in the ICMP message
1601 * payload.
1602 *
1603 * Boundary check is made in icmp_input(), with ICMP_ADVLENMIN.
1604 */
1605 th = (struct tcphdr *)((char *)ip + (ip->ip_hl << 2));
1606 #ifdef INET6
1607 in6_in_2_v4mapin6(&ip->ip_src, &src6);
1608 in6_in_2_v4mapin6(&ip->ip_dst, &dst6);
1609 #endif
1610 if ((inp = in_pcblookup_connect(&tcbtable, ip->ip_dst,
1611 th->th_dport, ip->ip_src, th->th_sport, 0)) != NULL)
1612 #ifdef INET6
1613 in6p = NULL;
1614 #else
1615 ;
1616 #endif
1617 #ifdef INET6
1618 else if ((in6p = in6_pcblookup_connect(&tcbtable, &dst6,
1619 th->th_dport, &src6, th->th_sport, 0, 0)) != NULL)
1620 ;
1621 #endif
1622 else
1623 return NULL;
1624
1625 /*
1626 * Now that we've validated that we are actually communicating
1627 * with the host indicated in the ICMP message, locate the
1628 * ICMP header, recalculate the new MTU, and create the
1629 * corresponding routing entry.
1630 */
1631 icp = (struct icmp *)((char *)ip -
1632 offsetof(struct icmp, icmp_ip));
1633 if (inp) {
1634 if ((tp = intotcpcb(inp)) == NULL)
1635 return NULL;
1636 }
1637 #ifdef INET6
1638 else if (in6p) {
1639 if ((tp = in6totcpcb(in6p)) == NULL)
1640 return NULL;
1641 }
1642 #endif
1643 else
1644 return NULL;
1645 seq = ntohl(th->th_seq);
1646 if (SEQ_LT(seq, tp->snd_una) || SEQ_GT(seq, tp->snd_max))
1647 return NULL;
1648 /*
1649 * If the ICMP message advertises a Next-Hop MTU
1650 * equal or larger than the maximum packet size we have
1651 * ever sent, drop the message.
1652 */
1653 mtu = (u_int)ntohs(icp->icmp_nextmtu);
1654 if (mtu >= tp->t_pmtud_mtu_sent)
1655 return NULL;
1656 if (mtu >= tcp_hdrsz(tp) + tp->t_pmtud_mss_acked) {
1657 /*
1658 * Calculate new MTU, and create corresponding
1659 * route (traditional PMTUD).
1660 */
1661 tp->t_flags &= ~TF_PMTUD_PEND;
1662 icmp_mtudisc(icp, ip->ip_dst);
1663 } else {
1664 /*
1665 * Record the information got in the ICMP
1666 * message; act on it later.
1667 * If we had already recorded an ICMP message,
1668 * replace the old one only if the new message
1669 * refers to an older TCP segment
1670 */
1671 if (tp->t_flags & TF_PMTUD_PEND) {
1672 if (SEQ_LT(tp->t_pmtud_th_seq, seq))
1673 return NULL;
1674 } else
1675 tp->t_flags |= TF_PMTUD_PEND;
1676 tp->t_pmtud_th_seq = seq;
1677 tp->t_pmtud_nextmtu = icp->icmp_nextmtu;
1678 tp->t_pmtud_ip_len = icp->icmp_ip.ip_len;
1679 tp->t_pmtud_ip_hl = icp->icmp_ip.ip_hl;
1680 }
1681 return NULL;
1682 } else if (cmd == PRC_HOSTDEAD)
1683 ip = 0;
1684 else if (errno == 0)
1685 return NULL;
1686 if (ip && ip->ip_v == 4 && sa->sa_family == AF_INET) {
1687 th = (struct tcphdr *)((char *)ip + (ip->ip_hl << 2));
1688 nmatch = in_pcbnotify(&tcbtable, satocsin(sa)->sin_addr,
1689 th->th_dport, ip->ip_src, th->th_sport, errno, notify);
1690 if (nmatch == 0 && syn_cache_count &&
1691 (inetctlerrmap[cmd] == EHOSTUNREACH ||
1692 inetctlerrmap[cmd] == ENETUNREACH ||
1693 inetctlerrmap[cmd] == EHOSTDOWN)) {
1694 struct sockaddr_in sin;
1695 memset(&sin, 0, sizeof(sin));
1696 sin.sin_len = sizeof(sin);
1697 sin.sin_family = AF_INET;
1698 sin.sin_port = th->th_sport;
1699 sin.sin_addr = ip->ip_src;
1700 syn_cache_unreach((struct sockaddr *)&sin, sa, th);
1701 }
1702
1703 /* XXX mapped address case */
1704 } else
1705 in_pcbnotifyall(&tcbtable, satocsin(sa)->sin_addr, errno,
1706 notify);
1707 return NULL;
1708 }
1709
1710 /*
1711 * When a source quench is received, we are being notified of congestion.
1712 * Close the congestion window down to the Loss Window (one segment).
1713 * We will gradually open it again as we proceed.
1714 */
1715 void
1716 tcp_quench(struct inpcb *inp, int errno)
1717 {
1718 struct tcpcb *tp = intotcpcb(inp);
1719
1720 if (tp) {
1721 tp->snd_cwnd = tp->t_segsz;
1722 tp->t_bytes_acked = 0;
1723 }
1724 }
1725 #endif
1726
1727 #ifdef INET6
1728 void
1729 tcp6_quench(struct in6pcb *in6p, int errno)
1730 {
1731 struct tcpcb *tp = in6totcpcb(in6p);
1732
1733 if (tp) {
1734 tp->snd_cwnd = tp->t_segsz;
1735 tp->t_bytes_acked = 0;
1736 }
1737 }
1738 #endif
1739
1740 #ifdef INET
1741 /*
1742 * Path MTU Discovery handlers.
1743 */
1744 void
1745 tcp_mtudisc_callback(struct in_addr faddr)
1746 {
1747 #ifdef INET6
1748 struct in6_addr in6;
1749 #endif
1750
1751 in_pcbnotifyall(&tcbtable, faddr, EMSGSIZE, tcp_mtudisc);
1752 #ifdef INET6
1753 in6_in_2_v4mapin6(&faddr, &in6);
1754 tcp6_mtudisc_callback(&in6);
1755 #endif
1756 }
1757
1758 /*
1759 * On receipt of path MTU corrections, flush old route and replace it
1760 * with the new one. Retransmit all unacknowledged packets, to ensure
1761 * that all packets will be received.
1762 */
1763 void
1764 tcp_mtudisc(struct inpcb *inp, int errno)
1765 {
1766 struct tcpcb *tp = intotcpcb(inp);
1767 struct rtentry *rt;
1768
1769 if (tp == NULL)
1770 return;
1771
1772 rt = in_pcbrtentry(inp);
1773 if (rt != NULL) {
1774 /*
1775 * If this was not a host route, remove and realloc.
1776 */
1777 if ((rt->rt_flags & RTF_HOST) == 0) {
1778 in_pcbrtentry_unref(rt, inp);
1779 in_rtchange(inp, errno);
1780 if ((rt = in_pcbrtentry(inp)) == NULL)
1781 return;
1782 }
1783
1784 /*
1785 * Slow start out of the error condition. We
1786 * use the MTU because we know it's smaller
1787 * than the previously transmitted segment.
1788 *
1789 * Note: This is more conservative than the
1790 * suggestion in draft-floyd-incr-init-win-03.
1791 */
1792 if (rt->rt_rmx.rmx_mtu != 0)
1793 tp->snd_cwnd =
1794 TCP_INITIAL_WINDOW(tcp_init_win,
1795 rt->rt_rmx.rmx_mtu);
1796 in_pcbrtentry_unref(rt, inp);
1797 }
1798
1799 /*
1800 * Resend unacknowledged packets.
1801 */
1802 tp->snd_nxt = tp->sack_newdata = tp->snd_una;
1803 tcp_output(tp);
1804 }
1805 #endif /* INET */
1806
1807 #ifdef INET6
1808 /*
1809 * Path MTU Discovery handlers.
1810 */
1811 void
1812 tcp6_mtudisc_callback(struct in6_addr *faddr)
1813 {
1814 struct sockaddr_in6 sin6;
1815
1816 memset(&sin6, 0, sizeof(sin6));
1817 sin6.sin6_family = AF_INET6;
1818 sin6.sin6_len = sizeof(struct sockaddr_in6);
1819 sin6.sin6_addr = *faddr;
1820 (void) in6_pcbnotify(&tcbtable, (struct sockaddr *)&sin6, 0,
1821 (const struct sockaddr *)&sa6_any, 0, PRC_MSGSIZE, NULL, tcp6_mtudisc);
1822 }
1823
1824 void
1825 tcp6_mtudisc(struct in6pcb *in6p, int errno)
1826 {
1827 struct tcpcb *tp = in6totcpcb(in6p);
1828 struct rtentry *rt;
1829
1830 if (tp == NULL)
1831 return;
1832
1833 rt = in6_pcbrtentry(in6p);
1834 if (rt != NULL) {
1835 /*
1836 * If this was not a host route, remove and realloc.
1837 */
1838 if ((rt->rt_flags & RTF_HOST) == 0) {
1839 in6_pcbrtentry_unref(rt, in6p);
1840 in6_rtchange(in6p, errno);
1841 rt = in6_pcbrtentry(in6p);
1842 if (rt == NULL)
1843 return;
1844 }
1845
1846 /*
1847 * Slow start out of the error condition. We
1848 * use the MTU because we know it's smaller
1849 * than the previously transmitted segment.
1850 *
1851 * Note: This is more conservative than the
1852 * suggestion in draft-floyd-incr-init-win-03.
1853 */
1854 if (rt->rt_rmx.rmx_mtu != 0) {
1855 tp->snd_cwnd = TCP_INITIAL_WINDOW(tcp_init_win,
1856 rt->rt_rmx.rmx_mtu);
1857 }
1858 in6_pcbrtentry_unref(rt, in6p);
1859 }
1860
1861 /*
1862 * Resend unacknowledged packets.
1863 */
1864 tp->snd_nxt = tp->sack_newdata = tp->snd_una;
1865 tcp_output(tp);
1866 }
1867 #endif /* INET6 */
1868
1869 /*
1870 * Compute the MSS to advertise to the peer. Called only during
1871 * the 3-way handshake. If we are the server (peer initiated
1872 * connection), we are called with a pointer to the interface
1873 * on which the SYN packet arrived. If we are the client (we
1874 * initiated connection), we are called with a pointer to the
1875 * interface out which this connection should go.
1876 *
1877 * NOTE: Do not subtract IP option/extension header size nor IPsec
1878 * header size from MSS advertisement. MSS option must hold the maximum
1879 * segment size we can accept, so it must always be:
1880 * max(if mtu) - ip header - tcp header
1881 */
1882 u_long
1883 tcp_mss_to_advertise(const struct ifnet *ifp, int af)
1884 {
1885 extern u_long in_maxmtu;
1886 u_long mss = 0;
1887 u_long hdrsiz;
1888
1889 /*
1890 * In order to avoid defeating path MTU discovery on the peer,
1891 * we advertise the max MTU of all attached networks as our MSS,
1892 * per RFC 1191, section 3.1.
1893 *
1894 * We provide the option to advertise just the MTU of
1895 * the interface on which we hope this connection will
1896 * be receiving. If we are responding to a SYN, we
1897 * will have a pretty good idea about this, but when
1898 * initiating a connection there is a bit more doubt.
1899 *
1900 * We also need to ensure that loopback has a large enough
1901 * MSS, as the loopback MTU is never included in in_maxmtu.
1902 */
1903
1904 if (ifp != NULL)
1905 switch (af) {
1906 case AF_INET:
1907 mss = ifp->if_mtu;
1908 break;
1909 #ifdef INET6
1910 case AF_INET6:
1911 mss = IN6_LINKMTU(ifp);
1912 break;
1913 #endif
1914 }
1915
1916 if (tcp_mss_ifmtu == 0)
1917 switch (af) {
1918 case AF_INET:
1919 mss = max(in_maxmtu, mss);
1920 break;
1921 #ifdef INET6
1922 case AF_INET6:
1923 mss = max(in6_maxmtu, mss);
1924 break;
1925 #endif
1926 }
1927
1928 switch (af) {
1929 case AF_INET:
1930 hdrsiz = sizeof(struct ip);
1931 break;
1932 #ifdef INET6
1933 case AF_INET6:
1934 hdrsiz = sizeof(struct ip6_hdr);
1935 break;
1936 #endif
1937 default:
1938 hdrsiz = 0;
1939 break;
1940 }
1941 hdrsiz += sizeof(struct tcphdr);
1942 if (mss > hdrsiz)
1943 mss -= hdrsiz;
1944
1945 mss = max(tcp_mssdflt, mss);
1946 return (mss);
1947 }
1948
1949 /*
1950 * Set connection variables based on the peer's advertised MSS.
1951 * We are passed the TCPCB for the actual connection. If we
1952 * are the server, we are called by the compressed state engine
1953 * when the 3-way handshake is complete. If we are the client,
1954 * we are called when we receive the SYN,ACK from the server.
1955 *
1956 * NOTE: Our advertised MSS value must be initialized in the TCPCB
1957 * before this routine is called!
1958 */
1959 void
1960 tcp_mss_from_peer(struct tcpcb *tp, int offer)
1961 {
1962 struct socket *so;
1963 #if defined(RTV_SPIPE) || defined(RTV_SSTHRESH)
1964 struct rtentry *rt;
1965 #endif
1966 u_long bufsize;
1967 int mss;
1968
1969 #ifdef DIAGNOSTIC
1970 if (tp->t_inpcb && tp->t_in6pcb)
1971 panic("tcp_mss_from_peer: both t_inpcb and t_in6pcb are set");
1972 #endif
1973 so = NULL;
1974 rt = NULL;
1975 #ifdef INET
1976 if (tp->t_inpcb) {
1977 so = tp->t_inpcb->inp_socket;
1978 #if defined(RTV_SPIPE) || defined(RTV_SSTHRESH)
1979 rt = in_pcbrtentry(tp->t_inpcb);
1980 #endif
1981 }
1982 #endif
1983 #ifdef INET6
1984 if (tp->t_in6pcb) {
1985 so = tp->t_in6pcb->in6p_socket;
1986 #if defined(RTV_SPIPE) || defined(RTV_SSTHRESH)
1987 rt = in6_pcbrtentry(tp->t_in6pcb);
1988 #endif
1989 }
1990 #endif
1991
1992 /*
1993 * As per RFC1122, use the default MSS value, unless they
1994 * sent us an offer. Do not accept offers less than 256 bytes.
1995 */
1996 mss = tcp_mssdflt;
1997 if (offer)
1998 mss = offer;
1999 mss = max(mss, 256); /* sanity */
2000 tp->t_peermss = mss;
2001 mss -= tcp_optlen(tp);
2002 #ifdef INET
2003 if (tp->t_inpcb)
2004 mss -= ip_optlen(tp->t_inpcb);
2005 #endif
2006 #ifdef INET6
2007 if (tp->t_in6pcb)
2008 mss -= ip6_optlen(tp->t_in6pcb);
2009 #endif
2010
2011 /*
2012 * If there's a pipesize, change the socket buffer to that size.
2013 * Make the socket buffer an integral number of MSS units. If
2014 * the MSS is larger than the socket buffer, artificially decrease
2015 * the MSS.
2016 */
2017 #ifdef RTV_SPIPE
2018 if (rt != NULL && rt->rt_rmx.rmx_sendpipe != 0)
2019 bufsize = rt->rt_rmx.rmx_sendpipe;
2020 else
2021 #endif
2022 {
2023 KASSERT(so != NULL);
2024 bufsize = so->so_snd.sb_hiwat;
2025 }
2026 if (bufsize < mss)
2027 mss = bufsize;
2028 else {
2029 bufsize = roundup(bufsize, mss);
2030 if (bufsize > sb_max)
2031 bufsize = sb_max;
2032 (void) sbreserve(&so->so_snd, bufsize, so);
2033 }
2034 tp->t_segsz = mss;
2035
2036 #ifdef RTV_SSTHRESH
2037 if (rt != NULL && rt->rt_rmx.rmx_ssthresh) {
2038 /*
2039 * There's some sort of gateway or interface buffer
2040 * limit on the path. Use this to set the slow
2041 * start threshold, but set the threshold to no less
2042 * than 2 * MSS.
2043 */
2044 tp->snd_ssthresh = max(2 * mss, rt->rt_rmx.rmx_ssthresh);
2045 }
2046 #endif
2047 #if defined(RTV_SPIPE) || defined(RTV_SSTHRESH)
2048 #ifdef INET
2049 if (tp->t_inpcb)
2050 in_pcbrtentry_unref(rt, tp->t_inpcb);
2051 #endif
2052 #ifdef INET6
2053 if (tp->t_in6pcb)
2054 in6_pcbrtentry_unref(rt, tp->t_in6pcb);
2055 #endif
2056 #endif
2057 }
2058
2059 /*
2060 * Processing necessary when a TCP connection is established.
2061 */
2062 void
2063 tcp_established(struct tcpcb *tp)
2064 {
2065 struct socket *so;
2066 #ifdef RTV_RPIPE
2067 struct rtentry *rt;
2068 #endif
2069 u_long bufsize;
2070
2071 #ifdef DIAGNOSTIC
2072 if (tp->t_inpcb && tp->t_in6pcb)
2073 panic("tcp_established: both t_inpcb and t_in6pcb are set");
2074 #endif
2075 so = NULL;
2076 rt = NULL;
2077 #ifdef INET
2078 /* This is a while() to reduce the dreadful stairstepping below */
2079 while (tp->t_inpcb) {
2080 so = tp->t_inpcb->inp_socket;
2081 #if defined(RTV_RPIPE)
2082 rt = in_pcbrtentry(tp->t_inpcb);
2083 #endif
2084 if (__predict_true(tcp_msl_enable)) {
2085 if (tp->t_inpcb->inp_laddr.s_addr == INADDR_LOOPBACK) {
2086 tp->t_msl = tcp_msl_loop ? tcp_msl_loop : (TCPTV_MSL >> 2);
2087 break;
2088 }
2089
2090 if (__predict_false(tcp_rttlocal)) {
2091 /* This may be adjusted by tcp_input */
2092 tp->t_msl = tcp_msl_local ? tcp_msl_local : (TCPTV_MSL >> 1);
2093 break;
2094 }
2095 if (in_localaddr(tp->t_inpcb->inp_faddr)) {
2096 tp->t_msl = tcp_msl_local ? tcp_msl_local : (TCPTV_MSL >> 1);
2097 break;
2098 }
2099 }
2100 tp->t_msl = tcp_msl_remote ? tcp_msl_remote : TCPTV_MSL;
2101 break;
2102 }
2103 #endif
2104 #ifdef INET6
2105 /* The !tp->t_inpcb lets the compiler know it can't be v4 *and* v6 */
2106 while (!tp->t_inpcb && tp->t_in6pcb) {
2107 so = tp->t_in6pcb->in6p_socket;
2108 #if defined(RTV_RPIPE)
2109 rt = in6_pcbrtentry(tp->t_in6pcb);
2110 #endif
2111 if (__predict_true(tcp_msl_enable)) {
2112 extern const struct in6_addr in6addr_loopback;
2113
2114 if (IN6_ARE_ADDR_EQUAL(&tp->t_in6pcb->in6p_laddr,
2115 &in6addr_loopback)) {
2116 tp->t_msl = tcp_msl_loop ? tcp_msl_loop : (TCPTV_MSL >> 2);
2117 break;
2118 }
2119
2120 if (__predict_false(tcp_rttlocal)) {
2121 /* This may be adjusted by tcp_input */
2122 tp->t_msl = tcp_msl_local ? tcp_msl_local : (TCPTV_MSL >> 1);
2123 break;
2124 }
2125 if (in6_localaddr(&tp->t_in6pcb->in6p_faddr)) {
2126 tp->t_msl = tcp_msl_local ? tcp_msl_local : (TCPTV_MSL >> 1);
2127 break;
2128 }
2129 }
2130 tp->t_msl = tcp_msl_remote ? tcp_msl_remote : TCPTV_MSL;
2131 break;
2132 }
2133 #endif
2134
2135 tp->t_state = TCPS_ESTABLISHED;
2136 TCP_TIMER_ARM(tp, TCPT_KEEP, tp->t_keepidle);
2137
2138 #ifdef RTV_RPIPE
2139 if (rt != NULL && rt->rt_rmx.rmx_recvpipe != 0)
2140 bufsize = rt->rt_rmx.rmx_recvpipe;
2141 else
2142 #endif
2143 {
2144 KASSERT(so != NULL);
2145 bufsize = so->so_rcv.sb_hiwat;
2146 }
2147 if (bufsize > tp->t_ourmss) {
2148 bufsize = roundup(bufsize, tp->t_ourmss);
2149 if (bufsize > sb_max)
2150 bufsize = sb_max;
2151 (void) sbreserve(&so->so_rcv, bufsize, so);
2152 }
2153 #ifdef RTV_RPIPE
2154 #ifdef INET
2155 if (tp->t_inpcb)
2156 in_pcbrtentry_unref(rt, tp->t_inpcb);
2157 #endif
2158 #ifdef INET6
2159 if (tp->t_in6pcb)
2160 in6_pcbrtentry_unref(rt, tp->t_in6pcb);
2161 #endif
2162 #endif
2163 }
2164
2165 /*
2166 * Check if there's an initial rtt or rttvar. Convert from the
2167 * route-table units to scaled multiples of the slow timeout timer.
2168 * Called only during the 3-way handshake.
2169 */
2170 void
2171 tcp_rmx_rtt(struct tcpcb *tp)
2172 {
2173 #ifdef RTV_RTT
2174 struct rtentry *rt = NULL;
2175 int rtt;
2176
2177 #ifdef DIAGNOSTIC
2178 if (tp->t_inpcb && tp->t_in6pcb)
2179 panic("tcp_rmx_rtt: both t_inpcb and t_in6pcb are set");
2180 #endif
2181 #ifdef INET
2182 if (tp->t_inpcb)
2183 rt = in_pcbrtentry(tp->t_inpcb);
2184 #endif
2185 #ifdef INET6
2186 if (tp->t_in6pcb)
2187 rt = in6_pcbrtentry(tp->t_in6pcb);
2188 #endif
2189 if (rt == NULL)
2190 return;
2191
2192 if (tp->t_srtt == 0 && (rtt = rt->rt_rmx.rmx_rtt)) {
2193 /*
2194 * XXX The lock bit for MTU indicates that the value
2195 * is also a minimum value; this is subject to time.
2196 */
2197 if (rt->rt_rmx.rmx_locks & RTV_RTT)
2198 TCPT_RANGESET(tp->t_rttmin,
2199 rtt / (RTM_RTTUNIT / PR_SLOWHZ),
2200 TCPTV_MIN, TCPTV_REXMTMAX);
2201 tp->t_srtt = rtt /
2202 ((RTM_RTTUNIT / PR_SLOWHZ) >> (TCP_RTT_SHIFT + 2));
2203 if (rt->rt_rmx.rmx_rttvar) {
2204 tp->t_rttvar = rt->rt_rmx.rmx_rttvar /
2205 ((RTM_RTTUNIT / PR_SLOWHZ) >>
2206 (TCP_RTTVAR_SHIFT + 2));
2207 } else {
2208 /* Default variation is +- 1 rtt */
2209 tp->t_rttvar =
2210 tp->t_srtt >> (TCP_RTT_SHIFT - TCP_RTTVAR_SHIFT);
2211 }
2212 TCPT_RANGESET(tp->t_rxtcur,
2213 ((tp->t_srtt >> 2) + tp->t_rttvar) >> (1 + 2),
2214 tp->t_rttmin, TCPTV_REXMTMAX);
2215 }
2216 #ifdef INET
2217 if (tp->t_inpcb)
2218 in_pcbrtentry_unref(rt, tp->t_inpcb);
2219 #endif
2220 #ifdef INET6
2221 if (tp->t_in6pcb)
2222 in6_pcbrtentry_unref(rt, tp->t_in6pcb);
2223 #endif
2224 #endif
2225 }
2226
2227 tcp_seq tcp_iss_seq = 0; /* tcp initial seq # */
2228
2229 /*
2230 * Get a new sequence value given a tcp control block
2231 */
2232 tcp_seq
2233 tcp_new_iss(struct tcpcb *tp, tcp_seq addin)
2234 {
2235
2236 #ifdef INET
2237 if (tp->t_inpcb != NULL) {
2238 return (tcp_new_iss1(&tp->t_inpcb->inp_laddr,
2239 &tp->t_inpcb->inp_faddr, tp->t_inpcb->inp_lport,
2240 tp->t_inpcb->inp_fport, sizeof(tp->t_inpcb->inp_laddr),
2241 addin));
2242 }
2243 #endif
2244 #ifdef INET6
2245 if (tp->t_in6pcb != NULL) {
2246 return (tcp_new_iss1(&tp->t_in6pcb->in6p_laddr,
2247 &tp->t_in6pcb->in6p_faddr, tp->t_in6pcb->in6p_lport,
2248 tp->t_in6pcb->in6p_fport, sizeof(tp->t_in6pcb->in6p_laddr),
2249 addin));
2250 }
2251 #endif
2252 /* Not possible. */
2253 panic("tcp_new_iss");
2254 }
2255
2256 static u_int8_t tcp_iss_secret[16]; /* 128 bits; should be plenty */
2257
2258 /*
2259 * Initialize RFC 1948 ISS Secret
2260 */
2261 static int
2262 tcp_iss_secret_init(void)
2263 {
2264 cprng_strong(kern_cprng,
2265 tcp_iss_secret, sizeof(tcp_iss_secret), 0);
2266
2267 return 0;
2268 }
2269
2270 /*
2271 * This routine actually generates a new TCP initial sequence number.
2272 */
2273 tcp_seq
2274 tcp_new_iss1(void *laddr, void *faddr, u_int16_t lport, u_int16_t fport,
2275 size_t addrsz, tcp_seq addin)
2276 {
2277 tcp_seq tcp_iss;
2278
2279 if (tcp_do_rfc1948) {
2280 MD5_CTX ctx;
2281 u_int8_t hash[16]; /* XXX MD5 knowledge */
2282 static ONCE_DECL(tcp_iss_secret_control);
2283
2284 /*
2285 * If we haven't been here before, initialize our cryptographic
2286 * hash secret.
2287 */
2288 RUN_ONCE(&tcp_iss_secret_control, tcp_iss_secret_init);
2289
2290 /*
2291 * Compute the base value of the ISS. It is a hash
2292 * of (saddr, sport, daddr, dport, secret).
2293 */
2294 MD5Init(&ctx);
2295
2296 MD5Update(&ctx, (u_char *) laddr, addrsz);
2297 MD5Update(&ctx, (u_char *) &lport, sizeof(lport));
2298
2299 MD5Update(&ctx, (u_char *) faddr, addrsz);
2300 MD5Update(&ctx, (u_char *) &fport, sizeof(fport));
2301
2302 MD5Update(&ctx, tcp_iss_secret, sizeof(tcp_iss_secret));
2303
2304 MD5Final(hash, &ctx);
2305
2306 memcpy(&tcp_iss, hash, sizeof(tcp_iss));
2307
2308 /*
2309 * Now increment our "timer", and add it in to
2310 * the computed value.
2311 *
2312 * XXX Use `addin'?
2313 * XXX TCP_ISSINCR too large to use?
2314 */
2315 tcp_iss_seq += TCP_ISSINCR;
2316 #ifdef TCPISS_DEBUG
2317 printf("ISS hash 0x%08x, ", tcp_iss);
2318 #endif
2319 tcp_iss += tcp_iss_seq + addin;
2320 #ifdef TCPISS_DEBUG
2321 printf("new ISS 0x%08x\n", tcp_iss);
2322 #endif
2323 } else {
2324 /*
2325 * Randomize.
2326 */
2327 tcp_iss = cprng_fast32();
2328
2329 /*
2330 * If we were asked to add some amount to a known value,
2331 * we will take a random value obtained above, mask off
2332 * the upper bits, and add in the known value. We also
2333 * add in a constant to ensure that we are at least a
2334 * certain distance from the original value.
2335 *
2336 * This is used when an old connection is in timed wait
2337 * and we have a new one coming in, for instance.
2338 */
2339 if (addin != 0) {
2340 #ifdef TCPISS_DEBUG
2341 printf("Random %08x, ", tcp_iss);
2342 #endif
2343 tcp_iss &= TCP_ISS_RANDOM_MASK;
2344 tcp_iss += addin + TCP_ISSINCR;
2345 #ifdef TCPISS_DEBUG
2346 printf("Old ISS %08x, ISS %08x\n", addin, tcp_iss);
2347 #endif
2348 } else {
2349 tcp_iss &= TCP_ISS_RANDOM_MASK;
2350 tcp_iss += tcp_iss_seq;
2351 tcp_iss_seq += TCP_ISSINCR;
2352 #ifdef TCPISS_DEBUG
2353 printf("ISS %08x\n", tcp_iss);
2354 #endif
2355 }
2356 }
2357
2358 if (tcp_compat_42) {
2359 /*
2360 * Limit it to the positive range for really old TCP
2361 * implementations.
2362 * Just AND off the top bit instead of checking if
2363 * is set first - saves a branch 50% of the time.
2364 */
2365 tcp_iss &= 0x7fffffff; /* XXX */
2366 }
2367
2368 return (tcp_iss);
2369 }
2370
2371 #if defined(IPSEC)
2372 /* compute ESP/AH header size for TCP, including outer IP header. */
2373 size_t
2374 ipsec4_hdrsiz_tcp(struct tcpcb *tp)
2375 {
2376 struct inpcb *inp;
2377 size_t hdrsiz;
2378
2379 /* XXX mapped addr case (tp->t_in6pcb) */
2380 if (!tp || !tp->t_template || !(inp = tp->t_inpcb))
2381 return 0;
2382 switch (tp->t_family) {
2383 case AF_INET:
2384 /* XXX: should use currect direction. */
2385 hdrsiz = ipsec4_hdrsiz(tp->t_template, IPSEC_DIR_OUTBOUND, inp);
2386 break;
2387 default:
2388 hdrsiz = 0;
2389 break;
2390 }
2391
2392 return hdrsiz;
2393 }
2394
2395 #ifdef INET6
2396 size_t
2397 ipsec6_hdrsiz_tcp(struct tcpcb *tp)
2398 {
2399 struct in6pcb *in6p;
2400 size_t hdrsiz;
2401
2402 if (!tp || !tp->t_template || !(in6p = tp->t_in6pcb))
2403 return 0;
2404 switch (tp->t_family) {
2405 case AF_INET6:
2406 /* XXX: should use currect direction. */
2407 hdrsiz = ipsec6_hdrsiz(tp->t_template, IPSEC_DIR_OUTBOUND, in6p);
2408 break;
2409 case AF_INET:
2410 /* mapped address case - tricky */
2411 default:
2412 hdrsiz = 0;
2413 break;
2414 }
2415
2416 return hdrsiz;
2417 }
2418 #endif
2419 #endif /*IPSEC*/
2420
2421 /*
2422 * Determine the length of the TCP options for this connection.
2423 *
2424 * XXX: What do we do for SACK, when we add that? Just reserve
2425 * all of the space? Otherwise we can't exactly be incrementing
2426 * cwnd by an amount that varies depending on the amount we last
2427 * had to SACK!
2428 */
2429
2430 u_int
2431 tcp_optlen(struct tcpcb *tp)
2432 {
2433 u_int optlen;
2434
2435 optlen = 0;
2436 if ((tp->t_flags & (TF_REQ_TSTMP|TF_RCVD_TSTMP|TF_NOOPT)) ==
2437 (TF_REQ_TSTMP | TF_RCVD_TSTMP))
2438 optlen += TCPOLEN_TSTAMP_APPA;
2439
2440 #ifdef TCP_SIGNATURE
2441 if (tp->t_flags & TF_SIGNATURE)
2442 optlen += TCPOLEN_SIGNATURE + 2;
2443 #endif /* TCP_SIGNATURE */
2444
2445 return optlen;
2446 }
2447
2448 u_int
2449 tcp_hdrsz(struct tcpcb *tp)
2450 {
2451 u_int hlen;
2452
2453 switch (tp->t_family) {
2454 #ifdef INET6
2455 case AF_INET6:
2456 hlen = sizeof(struct ip6_hdr);
2457 break;
2458 #endif
2459 case AF_INET:
2460 hlen = sizeof(struct ip);
2461 break;
2462 default:
2463 hlen = 0;
2464 break;
2465 }
2466 hlen += sizeof(struct tcphdr);
2467
2468 if ((tp->t_flags & (TF_REQ_TSTMP|TF_NOOPT)) == TF_REQ_TSTMP &&
2469 (tp->t_flags & TF_RCVD_TSTMP) == TF_RCVD_TSTMP)
2470 hlen += TCPOLEN_TSTAMP_APPA;
2471 #ifdef TCP_SIGNATURE
2472 if (tp->t_flags & TF_SIGNATURE)
2473 hlen += TCPOLEN_SIGLEN;
2474 #endif
2475 return hlen;
2476 }
2477
2478 void
2479 tcp_statinc(u_int stat)
2480 {
2481
2482 KASSERT(stat < TCP_NSTATS);
2483 TCP_STATINC(stat);
2484 }
2485
2486 void
2487 tcp_statadd(u_int stat, uint64_t val)
2488 {
2489
2490 KASSERT(stat < TCP_NSTATS);
2491 TCP_STATADD(stat, val);
2492 }
2493