tcp_subr.c revision 1.258 1 /* $NetBSD: tcp_subr.c,v 1.258 2015/02/14 12:57:53 he Exp $ */
2
3 /*
4 * Copyright (C) 1995, 1996, 1997, and 1998 WIDE Project.
5 * All rights reserved.
6 *
7 * Redistribution and use in source and binary forms, with or without
8 * modification, are permitted provided that the following conditions
9 * are met:
10 * 1. Redistributions of source code must retain the above copyright
11 * notice, this list of conditions and the following disclaimer.
12 * 2. Redistributions in binary form must reproduce the above copyright
13 * notice, this list of conditions and the following disclaimer in the
14 * documentation and/or other materials provided with the distribution.
15 * 3. Neither the name of the project nor the names of its contributors
16 * may be used to endorse or promote products derived from this software
17 * without specific prior written permission.
18 *
19 * THIS SOFTWARE IS PROVIDED BY THE PROJECT AND CONTRIBUTORS ``AS IS'' AND
20 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
21 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
22 * ARE DISCLAIMED. IN NO EVENT SHALL THE PROJECT OR CONTRIBUTORS BE LIABLE
23 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
24 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
25 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
26 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
27 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
28 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
29 * SUCH DAMAGE.
30 */
31
32 /*-
33 * Copyright (c) 1997, 1998, 2000, 2001, 2008 The NetBSD Foundation, Inc.
34 * All rights reserved.
35 *
36 * This code is derived from software contributed to The NetBSD Foundation
37 * by Jason R. Thorpe and Kevin M. Lahey of the Numerical Aerospace Simulation
38 * Facility, NASA Ames Research Center.
39 *
40 * Redistribution and use in source and binary forms, with or without
41 * modification, are permitted provided that the following conditions
42 * are met:
43 * 1. Redistributions of source code must retain the above copyright
44 * notice, this list of conditions and the following disclaimer.
45 * 2. Redistributions in binary form must reproduce the above copyright
46 * notice, this list of conditions and the following disclaimer in the
47 * documentation and/or other materials provided with the distribution.
48 *
49 * THIS SOFTWARE IS PROVIDED BY THE NETBSD FOUNDATION, INC. AND CONTRIBUTORS
50 * ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED
51 * TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
52 * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE FOUNDATION OR CONTRIBUTORS
53 * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
54 * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
55 * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
56 * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
57 * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
58 * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
59 * POSSIBILITY OF SUCH DAMAGE.
60 */
61
62 /*
63 * Copyright (c) 1982, 1986, 1988, 1990, 1993, 1995
64 * The Regents of the University of California. All rights reserved.
65 *
66 * Redistribution and use in source and binary forms, with or without
67 * modification, are permitted provided that the following conditions
68 * are met:
69 * 1. Redistributions of source code must retain the above copyright
70 * notice, this list of conditions and the following disclaimer.
71 * 2. Redistributions in binary form must reproduce the above copyright
72 * notice, this list of conditions and the following disclaimer in the
73 * documentation and/or other materials provided with the distribution.
74 * 3. Neither the name of the University nor the names of its contributors
75 * may be used to endorse or promote products derived from this software
76 * without specific prior written permission.
77 *
78 * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND
79 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
80 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
81 * ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE
82 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
83 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
84 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
85 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
86 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
87 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
88 * SUCH DAMAGE.
89 *
90 * @(#)tcp_subr.c 8.2 (Berkeley) 5/24/95
91 */
92
93 #include <sys/cdefs.h>
94 __KERNEL_RCSID(0, "$NetBSD: tcp_subr.c,v 1.258 2015/02/14 12:57:53 he Exp $");
95
96 #include "opt_inet.h"
97 #include "opt_ipsec.h"
98 #include "opt_tcp_compat_42.h"
99 #include "opt_inet_csum.h"
100 #include "opt_mbuftrace.h"
101
102 #include <sys/param.h>
103 #include <sys/atomic.h>
104 #include <sys/proc.h>
105 #include <sys/systm.h>
106 #include <sys/mbuf.h>
107 #include <sys/once.h>
108 #include <sys/socket.h>
109 #include <sys/socketvar.h>
110 #include <sys/protosw.h>
111 #include <sys/errno.h>
112 #include <sys/kernel.h>
113 #include <sys/pool.h>
114 #include <sys/md5.h>
115 #include <sys/cprng.h>
116
117 #include <net/route.h>
118 #include <net/if.h>
119
120 #include <netinet/in.h>
121 #include <netinet/in_systm.h>
122 #include <netinet/ip.h>
123 #include <netinet/in_pcb.h>
124 #include <netinet/ip_var.h>
125 #include <netinet/ip_icmp.h>
126
127 #ifdef INET6
128 #ifndef INET
129 #include <netinet/in.h>
130 #endif
131 #include <netinet/ip6.h>
132 #include <netinet6/in6_pcb.h>
133 #include <netinet6/ip6_var.h>
134 #include <netinet6/in6_var.h>
135 #include <netinet6/ip6protosw.h>
136 #include <netinet/icmp6.h>
137 #include <netinet6/nd6.h>
138 #endif
139
140 #include <netinet/tcp.h>
141 #include <netinet/tcp_fsm.h>
142 #include <netinet/tcp_seq.h>
143 #include <netinet/tcp_timer.h>
144 #include <netinet/tcp_var.h>
145 #include <netinet/tcp_vtw.h>
146 #include <netinet/tcp_private.h>
147 #include <netinet/tcp_congctl.h>
148 #include <netinet/tcpip.h>
149
150 #ifdef IPSEC
151 #include <netipsec/ipsec.h>
152 #include <netipsec/xform.h>
153 #ifdef INET6
154 #include <netipsec/ipsec6.h>
155 #endif
156 #include <netipsec/key.h>
157 #endif /* IPSEC*/
158
159
160 struct inpcbtable tcbtable; /* head of queue of active tcpcb's */
161 u_int32_t tcp_now; /* slow ticks, for RFC 1323 timestamps */
162
163 percpu_t *tcpstat_percpu;
164
165 /* patchable/settable parameters for tcp */
166 int tcp_mssdflt = TCP_MSS;
167 int tcp_minmss = TCP_MINMSS;
168 int tcp_rttdflt = TCPTV_SRTTDFLT / PR_SLOWHZ;
169 int tcp_do_rfc1323 = 1; /* window scaling / timestamps (obsolete) */
170 int tcp_do_rfc1948 = 0; /* ISS by cryptographic hash */
171 int tcp_do_sack = 1; /* selective acknowledgement */
172 int tcp_do_win_scale = 1; /* RFC1323 window scaling */
173 int tcp_do_timestamps = 1; /* RFC1323 timestamps */
174 int tcp_ack_on_push = 0; /* set to enable immediate ACK-on-PUSH */
175 int tcp_do_ecn = 0; /* Explicit Congestion Notification */
176 #ifndef TCP_INIT_WIN
177 #define TCP_INIT_WIN 4 /* initial slow start window */
178 #endif
179 #ifndef TCP_INIT_WIN_LOCAL
180 #define TCP_INIT_WIN_LOCAL 4 /* initial slow start window for local nets */
181 #endif
182 /*
183 * Up to 5 we scale linearly, to reach 3 * 1460; then (iw) * 1460.
184 * This is to simulate current behavior for iw == 4
185 */
186 int tcp_init_win_max[] = {
187 1 * 1460,
188 1 * 1460,
189 2 * 1460,
190 2 * 1460,
191 3 * 1460,
192 5 * 1460,
193 6 * 1460,
194 7 * 1460,
195 8 * 1460,
196 9 * 1460,
197 10 * 1460
198 };
199 int tcp_init_win = TCP_INIT_WIN;
200 int tcp_init_win_local = TCP_INIT_WIN_LOCAL;
201 int tcp_mss_ifmtu = 0;
202 #ifdef TCP_COMPAT_42
203 int tcp_compat_42 = 1;
204 #else
205 int tcp_compat_42 = 0;
206 #endif
207 int tcp_rst_ppslim = 100; /* 100pps */
208 int tcp_ackdrop_ppslim = 100; /* 100pps */
209 int tcp_do_loopback_cksum = 0;
210 int tcp_do_abc = 1; /* RFC3465 Appropriate byte counting. */
211 int tcp_abc_aggressive = 1; /* 1: L=2*SMSS 0: L=1*SMSS */
212 int tcp_sack_tp_maxholes = 32;
213 int tcp_sack_globalmaxholes = 1024;
214 int tcp_sack_globalholes = 0;
215 int tcp_ecn_maxretries = 1;
216 int tcp_msl_enable = 1; /* enable TIME_WAIT truncation */
217 int tcp_msl_loop = PR_SLOWHZ; /* MSL for loopback */
218 int tcp_msl_local = 5 * PR_SLOWHZ; /* MSL for 'local' */
219 int tcp_msl_remote = TCPTV_MSL; /* MSL otherwise */
220 int tcp_msl_remote_threshold = TCPTV_SRTTDFLT; /* RTT threshold */
221 int tcp_rttlocal = 0; /* Use RTT to decide who's 'local' */
222
223 int tcp4_vtw_enable = 0; /* 1 to enable */
224 int tcp6_vtw_enable = 0; /* 1 to enable */
225 int tcp_vtw_was_enabled = 0;
226 int tcp_vtw_entries = 1 << 4; /* 16 vestigial TIME_WAIT entries */
227
228 /* tcb hash */
229 #ifndef TCBHASHSIZE
230 #define TCBHASHSIZE 128
231 #endif
232 int tcbhashsize = TCBHASHSIZE;
233
234 /* syn hash parameters */
235 #define TCP_SYN_HASH_SIZE 293
236 #define TCP_SYN_BUCKET_SIZE 35
237 int tcp_syn_cache_size = TCP_SYN_HASH_SIZE;
238 int tcp_syn_cache_limit = TCP_SYN_HASH_SIZE*TCP_SYN_BUCKET_SIZE;
239 int tcp_syn_bucket_limit = 3*TCP_SYN_BUCKET_SIZE;
240 struct syn_cache_head tcp_syn_cache[TCP_SYN_HASH_SIZE];
241
242 int tcp_freeq(struct tcpcb *);
243
244 #ifdef INET
245 static void tcp_mtudisc_callback(struct in_addr);
246 #endif
247
248 #ifdef INET6
249 void tcp6_mtudisc(struct in6pcb *, int);
250 #endif
251
252 static struct pool tcpcb_pool;
253
254 static int tcp_drainwanted;
255
256 #ifdef TCP_CSUM_COUNTERS
257 #include <sys/device.h>
258
259 #if defined(INET)
260 struct evcnt tcp_hwcsum_bad = EVCNT_INITIALIZER(EVCNT_TYPE_MISC,
261 NULL, "tcp", "hwcsum bad");
262 struct evcnt tcp_hwcsum_ok = EVCNT_INITIALIZER(EVCNT_TYPE_MISC,
263 NULL, "tcp", "hwcsum ok");
264 struct evcnt tcp_hwcsum_data = EVCNT_INITIALIZER(EVCNT_TYPE_MISC,
265 NULL, "tcp", "hwcsum data");
266 struct evcnt tcp_swcsum = EVCNT_INITIALIZER(EVCNT_TYPE_MISC,
267 NULL, "tcp", "swcsum");
268
269 EVCNT_ATTACH_STATIC(tcp_hwcsum_bad);
270 EVCNT_ATTACH_STATIC(tcp_hwcsum_ok);
271 EVCNT_ATTACH_STATIC(tcp_hwcsum_data);
272 EVCNT_ATTACH_STATIC(tcp_swcsum);
273 #endif /* defined(INET) */
274
275 #if defined(INET6)
276 struct evcnt tcp6_hwcsum_bad = EVCNT_INITIALIZER(EVCNT_TYPE_MISC,
277 NULL, "tcp6", "hwcsum bad");
278 struct evcnt tcp6_hwcsum_ok = EVCNT_INITIALIZER(EVCNT_TYPE_MISC,
279 NULL, "tcp6", "hwcsum ok");
280 struct evcnt tcp6_hwcsum_data = EVCNT_INITIALIZER(EVCNT_TYPE_MISC,
281 NULL, "tcp6", "hwcsum data");
282 struct evcnt tcp6_swcsum = EVCNT_INITIALIZER(EVCNT_TYPE_MISC,
283 NULL, "tcp6", "swcsum");
284
285 EVCNT_ATTACH_STATIC(tcp6_hwcsum_bad);
286 EVCNT_ATTACH_STATIC(tcp6_hwcsum_ok);
287 EVCNT_ATTACH_STATIC(tcp6_hwcsum_data);
288 EVCNT_ATTACH_STATIC(tcp6_swcsum);
289 #endif /* defined(INET6) */
290 #endif /* TCP_CSUM_COUNTERS */
291
292
293 #ifdef TCP_OUTPUT_COUNTERS
294 #include <sys/device.h>
295
296 struct evcnt tcp_output_bigheader = EVCNT_INITIALIZER(EVCNT_TYPE_MISC,
297 NULL, "tcp", "output big header");
298 struct evcnt tcp_output_predict_hit = EVCNT_INITIALIZER(EVCNT_TYPE_MISC,
299 NULL, "tcp", "output predict hit");
300 struct evcnt tcp_output_predict_miss = EVCNT_INITIALIZER(EVCNT_TYPE_MISC,
301 NULL, "tcp", "output predict miss");
302 struct evcnt tcp_output_copysmall = EVCNT_INITIALIZER(EVCNT_TYPE_MISC,
303 NULL, "tcp", "output copy small");
304 struct evcnt tcp_output_copybig = EVCNT_INITIALIZER(EVCNT_TYPE_MISC,
305 NULL, "tcp", "output copy big");
306 struct evcnt tcp_output_refbig = EVCNT_INITIALIZER(EVCNT_TYPE_MISC,
307 NULL, "tcp", "output reference big");
308
309 EVCNT_ATTACH_STATIC(tcp_output_bigheader);
310 EVCNT_ATTACH_STATIC(tcp_output_predict_hit);
311 EVCNT_ATTACH_STATIC(tcp_output_predict_miss);
312 EVCNT_ATTACH_STATIC(tcp_output_copysmall);
313 EVCNT_ATTACH_STATIC(tcp_output_copybig);
314 EVCNT_ATTACH_STATIC(tcp_output_refbig);
315
316 #endif /* TCP_OUTPUT_COUNTERS */
317
318 #ifdef TCP_REASS_COUNTERS
319 #include <sys/device.h>
320
321 struct evcnt tcp_reass_ = EVCNT_INITIALIZER(EVCNT_TYPE_MISC,
322 NULL, "tcp_reass", "calls");
323 struct evcnt tcp_reass_empty = EVCNT_INITIALIZER(EVCNT_TYPE_MISC,
324 &tcp_reass_, "tcp_reass", "insert into empty queue");
325 struct evcnt tcp_reass_iteration[8] = {
326 EVCNT_INITIALIZER(EVCNT_TYPE_MISC, &tcp_reass_, "tcp_reass", ">7 iterations"),
327 EVCNT_INITIALIZER(EVCNT_TYPE_MISC, &tcp_reass_, "tcp_reass", "1 iteration"),
328 EVCNT_INITIALIZER(EVCNT_TYPE_MISC, &tcp_reass_, "tcp_reass", "2 iterations"),
329 EVCNT_INITIALIZER(EVCNT_TYPE_MISC, &tcp_reass_, "tcp_reass", "3 iterations"),
330 EVCNT_INITIALIZER(EVCNT_TYPE_MISC, &tcp_reass_, "tcp_reass", "4 iterations"),
331 EVCNT_INITIALIZER(EVCNT_TYPE_MISC, &tcp_reass_, "tcp_reass", "5 iterations"),
332 EVCNT_INITIALIZER(EVCNT_TYPE_MISC, &tcp_reass_, "tcp_reass", "6 iterations"),
333 EVCNT_INITIALIZER(EVCNT_TYPE_MISC, &tcp_reass_, "tcp_reass", "7 iterations"),
334 };
335 struct evcnt tcp_reass_prependfirst = EVCNT_INITIALIZER(EVCNT_TYPE_MISC,
336 &tcp_reass_, "tcp_reass", "prepend to first");
337 struct evcnt tcp_reass_prepend = EVCNT_INITIALIZER(EVCNT_TYPE_MISC,
338 &tcp_reass_, "tcp_reass", "prepend");
339 struct evcnt tcp_reass_insert = EVCNT_INITIALIZER(EVCNT_TYPE_MISC,
340 &tcp_reass_, "tcp_reass", "insert");
341 struct evcnt tcp_reass_inserttail = EVCNT_INITIALIZER(EVCNT_TYPE_MISC,
342 &tcp_reass_, "tcp_reass", "insert at tail");
343 struct evcnt tcp_reass_append = EVCNT_INITIALIZER(EVCNT_TYPE_MISC,
344 &tcp_reass_, "tcp_reass", "append");
345 struct evcnt tcp_reass_appendtail = EVCNT_INITIALIZER(EVCNT_TYPE_MISC,
346 &tcp_reass_, "tcp_reass", "append to tail fragment");
347 struct evcnt tcp_reass_overlaptail = EVCNT_INITIALIZER(EVCNT_TYPE_MISC,
348 &tcp_reass_, "tcp_reass", "overlap at end");
349 struct evcnt tcp_reass_overlapfront = EVCNT_INITIALIZER(EVCNT_TYPE_MISC,
350 &tcp_reass_, "tcp_reass", "overlap at start");
351 struct evcnt tcp_reass_segdup = EVCNT_INITIALIZER(EVCNT_TYPE_MISC,
352 &tcp_reass_, "tcp_reass", "duplicate segment");
353 struct evcnt tcp_reass_fragdup = EVCNT_INITIALIZER(EVCNT_TYPE_MISC,
354 &tcp_reass_, "tcp_reass", "duplicate fragment");
355
356 EVCNT_ATTACH_STATIC(tcp_reass_);
357 EVCNT_ATTACH_STATIC(tcp_reass_empty);
358 EVCNT_ATTACH_STATIC2(tcp_reass_iteration, 0);
359 EVCNT_ATTACH_STATIC2(tcp_reass_iteration, 1);
360 EVCNT_ATTACH_STATIC2(tcp_reass_iteration, 2);
361 EVCNT_ATTACH_STATIC2(tcp_reass_iteration, 3);
362 EVCNT_ATTACH_STATIC2(tcp_reass_iteration, 4);
363 EVCNT_ATTACH_STATIC2(tcp_reass_iteration, 5);
364 EVCNT_ATTACH_STATIC2(tcp_reass_iteration, 6);
365 EVCNT_ATTACH_STATIC2(tcp_reass_iteration, 7);
366 EVCNT_ATTACH_STATIC(tcp_reass_prependfirst);
367 EVCNT_ATTACH_STATIC(tcp_reass_prepend);
368 EVCNT_ATTACH_STATIC(tcp_reass_insert);
369 EVCNT_ATTACH_STATIC(tcp_reass_inserttail);
370 EVCNT_ATTACH_STATIC(tcp_reass_append);
371 EVCNT_ATTACH_STATIC(tcp_reass_appendtail);
372 EVCNT_ATTACH_STATIC(tcp_reass_overlaptail);
373 EVCNT_ATTACH_STATIC(tcp_reass_overlapfront);
374 EVCNT_ATTACH_STATIC(tcp_reass_segdup);
375 EVCNT_ATTACH_STATIC(tcp_reass_fragdup);
376
377 #endif /* TCP_REASS_COUNTERS */
378
379 #ifdef MBUFTRACE
380 struct mowner tcp_mowner = MOWNER_INIT("tcp", "");
381 struct mowner tcp_rx_mowner = MOWNER_INIT("tcp", "rx");
382 struct mowner tcp_tx_mowner = MOWNER_INIT("tcp", "tx");
383 struct mowner tcp_sock_mowner = MOWNER_INIT("tcp", "sock");
384 struct mowner tcp_sock_rx_mowner = MOWNER_INIT("tcp", "sock rx");
385 struct mowner tcp_sock_tx_mowner = MOWNER_INIT("tcp", "sock tx");
386 #endif
387
388 callout_t tcp_slowtimo_ch;
389
390 static int
391 do_tcpinit(void)
392 {
393
394 in_pcbinit(&tcbtable, tcbhashsize, tcbhashsize);
395 pool_init(&tcpcb_pool, sizeof(struct tcpcb), 0, 0, 0, "tcpcbpl",
396 NULL, IPL_SOFTNET);
397
398 tcp_usrreq_init();
399
400 /* Initialize timer state. */
401 tcp_timer_init();
402
403 /* Initialize the compressed state engine. */
404 syn_cache_init();
405
406 /* Initialize the congestion control algorithms. */
407 tcp_congctl_init();
408
409 /* Initialize the TCPCB template. */
410 tcp_tcpcb_template();
411
412 /* Initialize reassembly queue */
413 tcpipqent_init();
414
415 /* SACK */
416 tcp_sack_init();
417
418 MOWNER_ATTACH(&tcp_tx_mowner);
419 MOWNER_ATTACH(&tcp_rx_mowner);
420 MOWNER_ATTACH(&tcp_reass_mowner);
421 MOWNER_ATTACH(&tcp_sock_mowner);
422 MOWNER_ATTACH(&tcp_sock_tx_mowner);
423 MOWNER_ATTACH(&tcp_sock_rx_mowner);
424 MOWNER_ATTACH(&tcp_mowner);
425
426 tcpstat_percpu = percpu_alloc(sizeof(uint64_t) * TCP_NSTATS);
427
428 vtw_earlyinit();
429
430 callout_init(&tcp_slowtimo_ch, CALLOUT_MPSAFE);
431 callout_reset(&tcp_slowtimo_ch, 1, tcp_slowtimo, NULL);
432
433 return 0;
434 }
435
436 void
437 tcp_init_common(unsigned basehlen)
438 {
439 static ONCE_DECL(dotcpinit);
440 unsigned hlen = basehlen + sizeof(struct tcphdr);
441 unsigned oldhlen;
442
443 if (max_linkhdr + hlen > MHLEN)
444 panic("tcp_init");
445 while ((oldhlen = max_protohdr) < hlen)
446 atomic_cas_uint(&max_protohdr, oldhlen, hlen);
447
448 RUN_ONCE(&dotcpinit, do_tcpinit);
449 }
450
451 /*
452 * Tcp initialization
453 */
454 void
455 tcp_init(void)
456 {
457
458 icmp_mtudisc_callback_register(tcp_mtudisc_callback);
459
460 tcp_init_common(sizeof(struct ip));
461 }
462
463 /*
464 * Create template to be used to send tcp packets on a connection.
465 * Call after host entry created, allocates an mbuf and fills
466 * in a skeletal tcp/ip header, minimizing the amount of work
467 * necessary when the connection is used.
468 */
469 struct mbuf *
470 tcp_template(struct tcpcb *tp)
471 {
472 struct inpcb *inp = tp->t_inpcb;
473 #ifdef INET6
474 struct in6pcb *in6p = tp->t_in6pcb;
475 #endif
476 struct tcphdr *n;
477 struct mbuf *m;
478 int hlen;
479
480 switch (tp->t_family) {
481 case AF_INET:
482 hlen = sizeof(struct ip);
483 if (inp)
484 break;
485 #ifdef INET6
486 if (in6p) {
487 /* mapped addr case */
488 if (IN6_IS_ADDR_V4MAPPED(&in6p->in6p_laddr)
489 && IN6_IS_ADDR_V4MAPPED(&in6p->in6p_faddr))
490 break;
491 }
492 #endif
493 return NULL; /*EINVAL*/
494 #ifdef INET6
495 case AF_INET6:
496 hlen = sizeof(struct ip6_hdr);
497 if (in6p) {
498 /* more sainty check? */
499 break;
500 }
501 return NULL; /*EINVAL*/
502 #endif
503 default:
504 hlen = 0; /*pacify gcc*/
505 return NULL; /*EAFNOSUPPORT*/
506 }
507 #ifdef DIAGNOSTIC
508 if (hlen + sizeof(struct tcphdr) > MCLBYTES)
509 panic("mclbytes too small for t_template");
510 #endif
511 m = tp->t_template;
512 if (m && m->m_len == hlen + sizeof(struct tcphdr))
513 ;
514 else {
515 if (m)
516 m_freem(m);
517 m = tp->t_template = NULL;
518 MGETHDR(m, M_DONTWAIT, MT_HEADER);
519 if (m && hlen + sizeof(struct tcphdr) > MHLEN) {
520 MCLGET(m, M_DONTWAIT);
521 if ((m->m_flags & M_EXT) == 0) {
522 m_free(m);
523 m = NULL;
524 }
525 }
526 if (m == NULL)
527 return NULL;
528 MCLAIM(m, &tcp_mowner);
529 m->m_pkthdr.len = m->m_len = hlen + sizeof(struct tcphdr);
530 }
531
532 memset(mtod(m, void *), 0, m->m_len);
533
534 n = (struct tcphdr *)(mtod(m, char *) + hlen);
535
536 switch (tp->t_family) {
537 case AF_INET:
538 {
539 struct ipovly *ipov;
540 mtod(m, struct ip *)->ip_v = 4;
541 mtod(m, struct ip *)->ip_hl = hlen >> 2;
542 ipov = mtod(m, struct ipovly *);
543 ipov->ih_pr = IPPROTO_TCP;
544 ipov->ih_len = htons(sizeof(struct tcphdr));
545 if (inp) {
546 ipov->ih_src = inp->inp_laddr;
547 ipov->ih_dst = inp->inp_faddr;
548 }
549 #ifdef INET6
550 else if (in6p) {
551 /* mapped addr case */
552 bcopy(&in6p->in6p_laddr.s6_addr32[3], &ipov->ih_src,
553 sizeof(ipov->ih_src));
554 bcopy(&in6p->in6p_faddr.s6_addr32[3], &ipov->ih_dst,
555 sizeof(ipov->ih_dst));
556 }
557 #endif
558 /*
559 * Compute the pseudo-header portion of the checksum
560 * now. We incrementally add in the TCP option and
561 * payload lengths later, and then compute the TCP
562 * checksum right before the packet is sent off onto
563 * the wire.
564 */
565 n->th_sum = in_cksum_phdr(ipov->ih_src.s_addr,
566 ipov->ih_dst.s_addr,
567 htons(sizeof(struct tcphdr) + IPPROTO_TCP));
568 break;
569 }
570 #ifdef INET6
571 case AF_INET6:
572 {
573 struct ip6_hdr *ip6;
574 mtod(m, struct ip *)->ip_v = 6;
575 ip6 = mtod(m, struct ip6_hdr *);
576 ip6->ip6_nxt = IPPROTO_TCP;
577 ip6->ip6_plen = htons(sizeof(struct tcphdr));
578 ip6->ip6_src = in6p->in6p_laddr;
579 ip6->ip6_dst = in6p->in6p_faddr;
580 ip6->ip6_flow = in6p->in6p_flowinfo & IPV6_FLOWINFO_MASK;
581 if (ip6_auto_flowlabel) {
582 ip6->ip6_flow &= ~IPV6_FLOWLABEL_MASK;
583 ip6->ip6_flow |=
584 (htonl(ip6_randomflowlabel()) & IPV6_FLOWLABEL_MASK);
585 }
586 ip6->ip6_vfc &= ~IPV6_VERSION_MASK;
587 ip6->ip6_vfc |= IPV6_VERSION;
588
589 /*
590 * Compute the pseudo-header portion of the checksum
591 * now. We incrementally add in the TCP option and
592 * payload lengths later, and then compute the TCP
593 * checksum right before the packet is sent off onto
594 * the wire.
595 */
596 n->th_sum = in6_cksum_phdr(&in6p->in6p_laddr,
597 &in6p->in6p_faddr, htonl(sizeof(struct tcphdr)),
598 htonl(IPPROTO_TCP));
599 break;
600 }
601 #endif
602 }
603 if (inp) {
604 n->th_sport = inp->inp_lport;
605 n->th_dport = inp->inp_fport;
606 }
607 #ifdef INET6
608 else if (in6p) {
609 n->th_sport = in6p->in6p_lport;
610 n->th_dport = in6p->in6p_fport;
611 }
612 #endif
613 n->th_seq = 0;
614 n->th_ack = 0;
615 n->th_x2 = 0;
616 n->th_off = 5;
617 n->th_flags = 0;
618 n->th_win = 0;
619 n->th_urp = 0;
620 return (m);
621 }
622
623 /*
624 * Send a single message to the TCP at address specified by
625 * the given TCP/IP header. If m == 0, then we make a copy
626 * of the tcpiphdr at ti and send directly to the addressed host.
627 * This is used to force keep alive messages out using the TCP
628 * template for a connection tp->t_template. If flags are given
629 * then we send a message back to the TCP which originated the
630 * segment ti, and discard the mbuf containing it and any other
631 * attached mbufs.
632 *
633 * In any case the ack and sequence number of the transmitted
634 * segment are as specified by the parameters.
635 */
636 int
637 tcp_respond(struct tcpcb *tp, struct mbuf *mtemplate, struct mbuf *m,
638 struct tcphdr *th0, tcp_seq ack, tcp_seq seq, int flags)
639 {
640 #ifdef INET6
641 struct rtentry *rt;
642 #endif
643 struct route *ro;
644 int error, tlen, win = 0;
645 int hlen;
646 struct ip *ip;
647 #ifdef INET6
648 struct ip6_hdr *ip6;
649 #endif
650 int family; /* family on packet, not inpcb/in6pcb! */
651 struct tcphdr *th;
652 struct socket *so;
653
654 if (tp != NULL && (flags & TH_RST) == 0) {
655 #ifdef DIAGNOSTIC
656 if (tp->t_inpcb && tp->t_in6pcb)
657 panic("tcp_respond: both t_inpcb and t_in6pcb are set");
658 #endif
659 #ifdef INET
660 if (tp->t_inpcb)
661 win = sbspace(&tp->t_inpcb->inp_socket->so_rcv);
662 #endif
663 #ifdef INET6
664 if (tp->t_in6pcb)
665 win = sbspace(&tp->t_in6pcb->in6p_socket->so_rcv);
666 #endif
667 }
668
669 th = NULL; /* Quell uninitialized warning */
670 ip = NULL;
671 #ifdef INET6
672 ip6 = NULL;
673 #endif
674 if (m == 0) {
675 if (!mtemplate)
676 return EINVAL;
677
678 /* get family information from template */
679 switch (mtod(mtemplate, struct ip *)->ip_v) {
680 case 4:
681 family = AF_INET;
682 hlen = sizeof(struct ip);
683 break;
684 #ifdef INET6
685 case 6:
686 family = AF_INET6;
687 hlen = sizeof(struct ip6_hdr);
688 break;
689 #endif
690 default:
691 return EAFNOSUPPORT;
692 }
693
694 MGETHDR(m, M_DONTWAIT, MT_HEADER);
695 if (m) {
696 MCLAIM(m, &tcp_tx_mowner);
697 MCLGET(m, M_DONTWAIT);
698 if ((m->m_flags & M_EXT) == 0) {
699 m_free(m);
700 m = NULL;
701 }
702 }
703 if (m == NULL)
704 return (ENOBUFS);
705
706 if (tcp_compat_42)
707 tlen = 1;
708 else
709 tlen = 0;
710
711 m->m_data += max_linkhdr;
712 bcopy(mtod(mtemplate, void *), mtod(m, void *),
713 mtemplate->m_len);
714 switch (family) {
715 case AF_INET:
716 ip = mtod(m, struct ip *);
717 th = (struct tcphdr *)(ip + 1);
718 break;
719 #ifdef INET6
720 case AF_INET6:
721 ip6 = mtod(m, struct ip6_hdr *);
722 th = (struct tcphdr *)(ip6 + 1);
723 break;
724 #endif
725 #if 0
726 default:
727 /* noone will visit here */
728 m_freem(m);
729 return EAFNOSUPPORT;
730 #endif
731 }
732 flags = TH_ACK;
733 } else {
734
735 if ((m->m_flags & M_PKTHDR) == 0) {
736 #if 0
737 printf("non PKTHDR to tcp_respond\n");
738 #endif
739 m_freem(m);
740 return EINVAL;
741 }
742 #ifdef DIAGNOSTIC
743 if (!th0)
744 panic("th0 == NULL in tcp_respond");
745 #endif
746
747 /* get family information from m */
748 switch (mtod(m, struct ip *)->ip_v) {
749 case 4:
750 family = AF_INET;
751 hlen = sizeof(struct ip);
752 ip = mtod(m, struct ip *);
753 break;
754 #ifdef INET6
755 case 6:
756 family = AF_INET6;
757 hlen = sizeof(struct ip6_hdr);
758 ip6 = mtod(m, struct ip6_hdr *);
759 break;
760 #endif
761 default:
762 m_freem(m);
763 return EAFNOSUPPORT;
764 }
765 /* clear h/w csum flags inherited from rx packet */
766 m->m_pkthdr.csum_flags = 0;
767
768 if ((flags & TH_SYN) == 0 || sizeof(*th0) > (th0->th_off << 2))
769 tlen = sizeof(*th0);
770 else
771 tlen = th0->th_off << 2;
772
773 if (m->m_len > hlen + tlen && (m->m_flags & M_EXT) == 0 &&
774 mtod(m, char *) + hlen == (char *)th0) {
775 m->m_len = hlen + tlen;
776 m_freem(m->m_next);
777 m->m_next = NULL;
778 } else {
779 struct mbuf *n;
780
781 #ifdef DIAGNOSTIC
782 if (max_linkhdr + hlen + tlen > MCLBYTES) {
783 m_freem(m);
784 return EMSGSIZE;
785 }
786 #endif
787 MGETHDR(n, M_DONTWAIT, MT_HEADER);
788 if (n && max_linkhdr + hlen + tlen > MHLEN) {
789 MCLGET(n, M_DONTWAIT);
790 if ((n->m_flags & M_EXT) == 0) {
791 m_freem(n);
792 n = NULL;
793 }
794 }
795 if (!n) {
796 m_freem(m);
797 return ENOBUFS;
798 }
799
800 MCLAIM(n, &tcp_tx_mowner);
801 n->m_data += max_linkhdr;
802 n->m_len = hlen + tlen;
803 m_copyback(n, 0, hlen, mtod(m, void *));
804 m_copyback(n, hlen, tlen, (void *)th0);
805
806 m_freem(m);
807 m = n;
808 n = NULL;
809 }
810
811 #define xchg(a,b,type) { type t; t=a; a=b; b=t; }
812 switch (family) {
813 case AF_INET:
814 ip = mtod(m, struct ip *);
815 th = (struct tcphdr *)(ip + 1);
816 ip->ip_p = IPPROTO_TCP;
817 xchg(ip->ip_dst, ip->ip_src, struct in_addr);
818 ip->ip_p = IPPROTO_TCP;
819 break;
820 #ifdef INET6
821 case AF_INET6:
822 ip6 = mtod(m, struct ip6_hdr *);
823 th = (struct tcphdr *)(ip6 + 1);
824 ip6->ip6_nxt = IPPROTO_TCP;
825 xchg(ip6->ip6_dst, ip6->ip6_src, struct in6_addr);
826 ip6->ip6_nxt = IPPROTO_TCP;
827 break;
828 #endif
829 #if 0
830 default:
831 /* noone will visit here */
832 m_freem(m);
833 return EAFNOSUPPORT;
834 #endif
835 }
836 xchg(th->th_dport, th->th_sport, u_int16_t);
837 #undef xchg
838 tlen = 0; /*be friendly with the following code*/
839 }
840 th->th_seq = htonl(seq);
841 th->th_ack = htonl(ack);
842 th->th_x2 = 0;
843 if ((flags & TH_SYN) == 0) {
844 if (tp)
845 win >>= tp->rcv_scale;
846 if (win > TCP_MAXWIN)
847 win = TCP_MAXWIN;
848 th->th_win = htons((u_int16_t)win);
849 th->th_off = sizeof (struct tcphdr) >> 2;
850 tlen += sizeof(*th);
851 } else
852 tlen += th->th_off << 2;
853 m->m_len = hlen + tlen;
854 m->m_pkthdr.len = hlen + tlen;
855 m->m_pkthdr.rcvif = NULL;
856 th->th_flags = flags;
857 th->th_urp = 0;
858
859 switch (family) {
860 #ifdef INET
861 case AF_INET:
862 {
863 struct ipovly *ipov = (struct ipovly *)ip;
864 memset(ipov->ih_x1, 0, sizeof ipov->ih_x1);
865 ipov->ih_len = htons((u_int16_t)tlen);
866
867 th->th_sum = 0;
868 th->th_sum = in_cksum(m, hlen + tlen);
869 ip->ip_len = htons(hlen + tlen);
870 ip->ip_ttl = ip_defttl;
871 break;
872 }
873 #endif
874 #ifdef INET6
875 case AF_INET6:
876 {
877 th->th_sum = 0;
878 th->th_sum = in6_cksum(m, IPPROTO_TCP, sizeof(struct ip6_hdr),
879 tlen);
880 ip6->ip6_plen = htons(tlen);
881 if (tp && tp->t_in6pcb) {
882 struct ifnet *oifp;
883 ro = &tp->t_in6pcb->in6p_route;
884 oifp = (rt = rtcache_validate(ro)) != NULL ? rt->rt_ifp
885 : NULL;
886 ip6->ip6_hlim = in6_selecthlim(tp->t_in6pcb, oifp);
887 } else
888 ip6->ip6_hlim = ip6_defhlim;
889 ip6->ip6_flow &= ~IPV6_FLOWINFO_MASK;
890 if (ip6_auto_flowlabel) {
891 ip6->ip6_flow |=
892 (htonl(ip6_randomflowlabel()) & IPV6_FLOWLABEL_MASK);
893 }
894 break;
895 }
896 #endif
897 }
898
899 if (tp && tp->t_inpcb)
900 so = tp->t_inpcb->inp_socket;
901 #ifdef INET6
902 else if (tp && tp->t_in6pcb)
903 so = tp->t_in6pcb->in6p_socket;
904 #endif
905 else
906 so = NULL;
907
908 if (tp != NULL && tp->t_inpcb != NULL) {
909 ro = &tp->t_inpcb->inp_route;
910 #ifdef DIAGNOSTIC
911 if (family != AF_INET)
912 panic("tcp_respond: address family mismatch");
913 if (!in_hosteq(ip->ip_dst, tp->t_inpcb->inp_faddr)) {
914 panic("tcp_respond: ip_dst %x != inp_faddr %x",
915 ntohl(ip->ip_dst.s_addr),
916 ntohl(tp->t_inpcb->inp_faddr.s_addr));
917 }
918 #endif
919 }
920 #ifdef INET6
921 else if (tp != NULL && tp->t_in6pcb != NULL) {
922 ro = (struct route *)&tp->t_in6pcb->in6p_route;
923 #ifdef DIAGNOSTIC
924 if (family == AF_INET) {
925 if (!IN6_IS_ADDR_V4MAPPED(&tp->t_in6pcb->in6p_faddr))
926 panic("tcp_respond: not mapped addr");
927 if (memcmp(&ip->ip_dst,
928 &tp->t_in6pcb->in6p_faddr.s6_addr32[3],
929 sizeof(ip->ip_dst)) != 0) {
930 panic("tcp_respond: ip_dst != in6p_faddr");
931 }
932 } else if (family == AF_INET6) {
933 if (!IN6_ARE_ADDR_EQUAL(&ip6->ip6_dst,
934 &tp->t_in6pcb->in6p_faddr))
935 panic("tcp_respond: ip6_dst != in6p_faddr");
936 } else
937 panic("tcp_respond: address family mismatch");
938 #endif
939 }
940 #endif
941 else
942 ro = NULL;
943
944 switch (family) {
945 #ifdef INET
946 case AF_INET:
947 error = ip_output(m, NULL, ro,
948 (tp && tp->t_mtudisc ? IP_MTUDISC : 0), NULL, so);
949 break;
950 #endif
951 #ifdef INET6
952 case AF_INET6:
953 error = ip6_output(m, NULL, ro, 0, NULL, so, NULL);
954 break;
955 #endif
956 default:
957 error = EAFNOSUPPORT;
958 break;
959 }
960
961 return (error);
962 }
963
964 /*
965 * Template TCPCB. Rather than zeroing a new TCPCB and initializing
966 * a bunch of members individually, we maintain this template for the
967 * static and mostly-static components of the TCPCB, and copy it into
968 * the new TCPCB instead.
969 */
970 static struct tcpcb tcpcb_template = {
971 .t_srtt = TCPTV_SRTTBASE,
972 .t_rttmin = TCPTV_MIN,
973
974 .snd_cwnd = TCP_MAXWIN << TCP_MAX_WINSHIFT,
975 .snd_ssthresh = TCP_MAXWIN << TCP_MAX_WINSHIFT,
976 .snd_numholes = 0,
977 .snd_cubic_wmax = 0,
978 .snd_cubic_wmax_last = 0,
979 .snd_cubic_ctime = 0,
980
981 .t_partialacks = -1,
982 .t_bytes_acked = 0,
983 .t_sndrexmitpack = 0,
984 .t_rcvoopack = 0,
985 .t_sndzerowin = 0,
986 };
987
988 /*
989 * Updates the TCPCB template whenever a parameter that would affect
990 * the template is changed.
991 */
992 void
993 tcp_tcpcb_template(void)
994 {
995 struct tcpcb *tp = &tcpcb_template;
996 int flags;
997
998 tp->t_peermss = tcp_mssdflt;
999 tp->t_ourmss = tcp_mssdflt;
1000 tp->t_segsz = tcp_mssdflt;
1001
1002 flags = 0;
1003 if (tcp_do_rfc1323 && tcp_do_win_scale)
1004 flags |= TF_REQ_SCALE;
1005 if (tcp_do_rfc1323 && tcp_do_timestamps)
1006 flags |= TF_REQ_TSTMP;
1007 tp->t_flags = flags;
1008
1009 /*
1010 * Init srtt to TCPTV_SRTTBASE (0), so we can tell that we have no
1011 * rtt estimate. Set rttvar so that srtt + 2 * rttvar gives
1012 * reasonable initial retransmit time.
1013 */
1014 tp->t_rttvar = tcp_rttdflt * PR_SLOWHZ << (TCP_RTTVAR_SHIFT + 2 - 1);
1015 TCPT_RANGESET(tp->t_rxtcur, TCP_REXMTVAL(tp),
1016 TCPTV_MIN, TCPTV_REXMTMAX);
1017
1018 /* Keep Alive */
1019 tp->t_keepinit = tcp_keepinit;
1020 tp->t_keepidle = tcp_keepidle;
1021 tp->t_keepintvl = tcp_keepintvl;
1022 tp->t_keepcnt = tcp_keepcnt;
1023 tp->t_maxidle = tp->t_keepcnt * tp->t_keepintvl;
1024
1025 /* MSL */
1026 tp->t_msl = TCPTV_MSL;
1027 }
1028
1029 /*
1030 * Create a new TCP control block, making an
1031 * empty reassembly queue and hooking it to the argument
1032 * protocol control block.
1033 */
1034 /* family selects inpcb, or in6pcb */
1035 struct tcpcb *
1036 tcp_newtcpcb(int family, void *aux)
1037 {
1038 #ifdef INET6
1039 struct rtentry *rt;
1040 #endif
1041 struct tcpcb *tp;
1042 int i;
1043
1044 /* XXX Consider using a pool_cache for speed. */
1045 tp = pool_get(&tcpcb_pool, PR_NOWAIT); /* splsoftnet via tcp_usrreq */
1046 if (tp == NULL)
1047 return (NULL);
1048 memcpy(tp, &tcpcb_template, sizeof(*tp));
1049 TAILQ_INIT(&tp->segq);
1050 TAILQ_INIT(&tp->timeq);
1051 tp->t_family = family; /* may be overridden later on */
1052 TAILQ_INIT(&tp->snd_holes);
1053 LIST_INIT(&tp->t_sc); /* XXX can template this */
1054
1055 /* Don't sweat this loop; hopefully the compiler will unroll it. */
1056 for (i = 0; i < TCPT_NTIMERS; i++) {
1057 callout_init(&tp->t_timer[i], CALLOUT_MPSAFE);
1058 TCP_TIMER_INIT(tp, i);
1059 }
1060 callout_init(&tp->t_delack_ch, CALLOUT_MPSAFE);
1061
1062 switch (family) {
1063 case AF_INET:
1064 {
1065 struct inpcb *inp = (struct inpcb *)aux;
1066
1067 inp->inp_ip.ip_ttl = ip_defttl;
1068 inp->inp_ppcb = (void *)tp;
1069
1070 tp->t_inpcb = inp;
1071 tp->t_mtudisc = ip_mtudisc;
1072 break;
1073 }
1074 #ifdef INET6
1075 case AF_INET6:
1076 {
1077 struct in6pcb *in6p = (struct in6pcb *)aux;
1078
1079 in6p->in6p_ip6.ip6_hlim = in6_selecthlim(in6p,
1080 (rt = rtcache_validate(&in6p->in6p_route)) != NULL
1081 ? rt->rt_ifp
1082 : NULL);
1083 in6p->in6p_ppcb = (void *)tp;
1084
1085 tp->t_in6pcb = in6p;
1086 /* for IPv6, always try to run path MTU discovery */
1087 tp->t_mtudisc = 1;
1088 break;
1089 }
1090 #endif /* INET6 */
1091 default:
1092 for (i = 0; i < TCPT_NTIMERS; i++)
1093 callout_destroy(&tp->t_timer[i]);
1094 callout_destroy(&tp->t_delack_ch);
1095 pool_put(&tcpcb_pool, tp); /* splsoftnet via tcp_usrreq */
1096 return (NULL);
1097 }
1098
1099 /*
1100 * Initialize our timebase. When we send timestamps, we take
1101 * the delta from tcp_now -- this means each connection always
1102 * gets a timebase of 1, which makes it, among other things,
1103 * more difficult to determine how long a system has been up,
1104 * and thus how many TCP sequence increments have occurred.
1105 *
1106 * We start with 1, because 0 doesn't work with linux, which
1107 * considers timestamp 0 in a SYN packet as a bug and disables
1108 * timestamps.
1109 */
1110 tp->ts_timebase = tcp_now - 1;
1111
1112 tcp_congctl_select(tp, tcp_congctl_global_name);
1113
1114 return (tp);
1115 }
1116
1117 /*
1118 * Drop a TCP connection, reporting
1119 * the specified error. If connection is synchronized,
1120 * then send a RST to peer.
1121 */
1122 struct tcpcb *
1123 tcp_drop(struct tcpcb *tp, int errno)
1124 {
1125 struct socket *so = NULL;
1126
1127 #ifdef DIAGNOSTIC
1128 if (tp->t_inpcb && tp->t_in6pcb)
1129 panic("tcp_drop: both t_inpcb and t_in6pcb are set");
1130 #endif
1131 #ifdef INET
1132 if (tp->t_inpcb)
1133 so = tp->t_inpcb->inp_socket;
1134 #endif
1135 #ifdef INET6
1136 if (tp->t_in6pcb)
1137 so = tp->t_in6pcb->in6p_socket;
1138 #endif
1139 if (!so)
1140 return NULL;
1141
1142 if (TCPS_HAVERCVDSYN(tp->t_state)) {
1143 tp->t_state = TCPS_CLOSED;
1144 (void) tcp_output(tp);
1145 TCP_STATINC(TCP_STAT_DROPS);
1146 } else
1147 TCP_STATINC(TCP_STAT_CONNDROPS);
1148 if (errno == ETIMEDOUT && tp->t_softerror)
1149 errno = tp->t_softerror;
1150 so->so_error = errno;
1151 return (tcp_close(tp));
1152 }
1153
1154 /*
1155 * Close a TCP control block:
1156 * discard all space held by the tcp
1157 * discard internet protocol block
1158 * wake up any sleepers
1159 */
1160 struct tcpcb *
1161 tcp_close(struct tcpcb *tp)
1162 {
1163 struct inpcb *inp;
1164 #ifdef INET6
1165 struct in6pcb *in6p;
1166 #endif
1167 struct socket *so;
1168 #ifdef RTV_RTT
1169 struct rtentry *rt;
1170 #endif
1171 struct route *ro;
1172 int j;
1173
1174 inp = tp->t_inpcb;
1175 #ifdef INET6
1176 in6p = tp->t_in6pcb;
1177 #endif
1178 so = NULL;
1179 ro = NULL;
1180 if (inp) {
1181 so = inp->inp_socket;
1182 ro = &inp->inp_route;
1183 }
1184 #ifdef INET6
1185 else if (in6p) {
1186 so = in6p->in6p_socket;
1187 ro = (struct route *)&in6p->in6p_route;
1188 }
1189 #endif
1190
1191 #ifdef RTV_RTT
1192 /*
1193 * If we sent enough data to get some meaningful characteristics,
1194 * save them in the routing entry. 'Enough' is arbitrarily
1195 * defined as the sendpipesize (default 4K) * 16. This would
1196 * give us 16 rtt samples assuming we only get one sample per
1197 * window (the usual case on a long haul net). 16 samples is
1198 * enough for the srtt filter to converge to within 5% of the correct
1199 * value; fewer samples and we could save a very bogus rtt.
1200 *
1201 * Don't update the default route's characteristics and don't
1202 * update anything that the user "locked".
1203 */
1204 if (SEQ_LT(tp->iss + so->so_snd.sb_hiwat * 16, tp->snd_max) &&
1205 ro && (rt = rtcache_validate(ro)) != NULL &&
1206 !in_nullhost(satocsin(rt_getkey(rt))->sin_addr)) {
1207 u_long i = 0;
1208
1209 if ((rt->rt_rmx.rmx_locks & RTV_RTT) == 0) {
1210 i = tp->t_srtt *
1211 ((RTM_RTTUNIT / PR_SLOWHZ) >> (TCP_RTT_SHIFT + 2));
1212 if (rt->rt_rmx.rmx_rtt && i)
1213 /*
1214 * filter this update to half the old & half
1215 * the new values, converting scale.
1216 * See route.h and tcp_var.h for a
1217 * description of the scaling constants.
1218 */
1219 rt->rt_rmx.rmx_rtt =
1220 (rt->rt_rmx.rmx_rtt + i) / 2;
1221 else
1222 rt->rt_rmx.rmx_rtt = i;
1223 }
1224 if ((rt->rt_rmx.rmx_locks & RTV_RTTVAR) == 0) {
1225 i = tp->t_rttvar *
1226 ((RTM_RTTUNIT / PR_SLOWHZ) >> (TCP_RTTVAR_SHIFT + 2));
1227 if (rt->rt_rmx.rmx_rttvar && i)
1228 rt->rt_rmx.rmx_rttvar =
1229 (rt->rt_rmx.rmx_rttvar + i) / 2;
1230 else
1231 rt->rt_rmx.rmx_rttvar = i;
1232 }
1233 /*
1234 * update the pipelimit (ssthresh) if it has been updated
1235 * already or if a pipesize was specified & the threshhold
1236 * got below half the pipesize. I.e., wait for bad news
1237 * before we start updating, then update on both good
1238 * and bad news.
1239 */
1240 if (((rt->rt_rmx.rmx_locks & RTV_SSTHRESH) == 0 &&
1241 (i = tp->snd_ssthresh) && rt->rt_rmx.rmx_ssthresh) ||
1242 i < (rt->rt_rmx.rmx_sendpipe / 2)) {
1243 /*
1244 * convert the limit from user data bytes to
1245 * packets then to packet data bytes.
1246 */
1247 i = (i + tp->t_segsz / 2) / tp->t_segsz;
1248 if (i < 2)
1249 i = 2;
1250 i *= (u_long)(tp->t_segsz + sizeof (struct tcpiphdr));
1251 if (rt->rt_rmx.rmx_ssthresh)
1252 rt->rt_rmx.rmx_ssthresh =
1253 (rt->rt_rmx.rmx_ssthresh + i) / 2;
1254 else
1255 rt->rt_rmx.rmx_ssthresh = i;
1256 }
1257 }
1258 #endif /* RTV_RTT */
1259 /* free the reassembly queue, if any */
1260 TCP_REASS_LOCK(tp);
1261 (void) tcp_freeq(tp);
1262 TCP_REASS_UNLOCK(tp);
1263
1264 /* free the SACK holes list. */
1265 tcp_free_sackholes(tp);
1266 tcp_congctl_release(tp);
1267 syn_cache_cleanup(tp);
1268
1269 if (tp->t_template) {
1270 m_free(tp->t_template);
1271 tp->t_template = NULL;
1272 }
1273
1274 /*
1275 * Detaching the pcb will unlock the socket/tcpcb, and stopping
1276 * the timers can also drop the lock. We need to prevent access
1277 * to the tcpcb as it's half torn down. Flag the pcb as dead
1278 * (prevents access by timers) and only then detach it.
1279 */
1280 tp->t_flags |= TF_DEAD;
1281 if (inp) {
1282 inp->inp_ppcb = 0;
1283 soisdisconnected(so);
1284 in_pcbdetach(inp);
1285 }
1286 #ifdef INET6
1287 else if (in6p) {
1288 in6p->in6p_ppcb = 0;
1289 soisdisconnected(so);
1290 in6_pcbdetach(in6p);
1291 }
1292 #endif
1293 /*
1294 * pcb is no longer visble elsewhere, so we can safely release
1295 * the lock in callout_halt() if needed.
1296 */
1297 TCP_STATINC(TCP_STAT_CLOSED);
1298 for (j = 0; j < TCPT_NTIMERS; j++) {
1299 callout_halt(&tp->t_timer[j], softnet_lock);
1300 callout_destroy(&tp->t_timer[j]);
1301 }
1302 callout_halt(&tp->t_delack_ch, softnet_lock);
1303 callout_destroy(&tp->t_delack_ch);
1304 pool_put(&tcpcb_pool, tp);
1305
1306 return NULL;
1307 }
1308
1309 int
1310 tcp_freeq(struct tcpcb *tp)
1311 {
1312 struct ipqent *qe;
1313 int rv = 0;
1314 #ifdef TCPREASS_DEBUG
1315 int i = 0;
1316 #endif
1317
1318 TCP_REASS_LOCK_CHECK(tp);
1319
1320 while ((qe = TAILQ_FIRST(&tp->segq)) != NULL) {
1321 #ifdef TCPREASS_DEBUG
1322 printf("tcp_freeq[%p,%d]: %u:%u(%u) 0x%02x\n",
1323 tp, i++, qe->ipqe_seq, qe->ipqe_seq + qe->ipqe_len,
1324 qe->ipqe_len, qe->ipqe_flags & (TH_SYN|TH_FIN|TH_RST));
1325 #endif
1326 TAILQ_REMOVE(&tp->segq, qe, ipqe_q);
1327 TAILQ_REMOVE(&tp->timeq, qe, ipqe_timeq);
1328 m_freem(qe->ipqe_m);
1329 tcpipqent_free(qe);
1330 rv = 1;
1331 }
1332 tp->t_segqlen = 0;
1333 KASSERT(TAILQ_EMPTY(&tp->timeq));
1334 return (rv);
1335 }
1336
1337 void
1338 tcp_fasttimo(void)
1339 {
1340 if (tcp_drainwanted) {
1341 tcp_drain();
1342 tcp_drainwanted = 0;
1343 }
1344 }
1345
1346 void
1347 tcp_drainstub(void)
1348 {
1349 tcp_drainwanted = 1;
1350 }
1351
1352 /*
1353 * Protocol drain routine. Called when memory is in short supply.
1354 * Called from pr_fasttimo thus a callout context.
1355 */
1356 void
1357 tcp_drain(void)
1358 {
1359 struct inpcb_hdr *inph;
1360 struct tcpcb *tp;
1361
1362 mutex_enter(softnet_lock);
1363 KERNEL_LOCK(1, NULL);
1364
1365 /*
1366 * Free the sequence queue of all TCP connections.
1367 */
1368 TAILQ_FOREACH(inph, &tcbtable.inpt_queue, inph_queue) {
1369 switch (inph->inph_af) {
1370 case AF_INET:
1371 tp = intotcpcb((struct inpcb *)inph);
1372 break;
1373 #ifdef INET6
1374 case AF_INET6:
1375 tp = in6totcpcb((struct in6pcb *)inph);
1376 break;
1377 #endif
1378 default:
1379 tp = NULL;
1380 break;
1381 }
1382 if (tp != NULL) {
1383 /*
1384 * We may be called from a device's interrupt
1385 * context. If the tcpcb is already busy,
1386 * just bail out now.
1387 */
1388 if (tcp_reass_lock_try(tp) == 0)
1389 continue;
1390 if (tcp_freeq(tp))
1391 TCP_STATINC(TCP_STAT_CONNSDRAINED);
1392 TCP_REASS_UNLOCK(tp);
1393 }
1394 }
1395
1396 KERNEL_UNLOCK_ONE(NULL);
1397 mutex_exit(softnet_lock);
1398 }
1399
1400 /*
1401 * Notify a tcp user of an asynchronous error;
1402 * store error as soft error, but wake up user
1403 * (for now, won't do anything until can select for soft error).
1404 */
1405 void
1406 tcp_notify(struct inpcb *inp, int error)
1407 {
1408 struct tcpcb *tp = (struct tcpcb *)inp->inp_ppcb;
1409 struct socket *so = inp->inp_socket;
1410
1411 /*
1412 * Ignore some errors if we are hooked up.
1413 * If connection hasn't completed, has retransmitted several times,
1414 * and receives a second error, give up now. This is better
1415 * than waiting a long time to establish a connection that
1416 * can never complete.
1417 */
1418 if (tp->t_state == TCPS_ESTABLISHED &&
1419 (error == EHOSTUNREACH || error == ENETUNREACH ||
1420 error == EHOSTDOWN)) {
1421 return;
1422 } else if (TCPS_HAVEESTABLISHED(tp->t_state) == 0 &&
1423 tp->t_rxtshift > 3 && tp->t_softerror)
1424 so->so_error = error;
1425 else
1426 tp->t_softerror = error;
1427 cv_broadcast(&so->so_cv);
1428 sorwakeup(so);
1429 sowwakeup(so);
1430 }
1431
1432 #ifdef INET6
1433 void
1434 tcp6_notify(struct in6pcb *in6p, int error)
1435 {
1436 struct tcpcb *tp = (struct tcpcb *)in6p->in6p_ppcb;
1437 struct socket *so = in6p->in6p_socket;
1438
1439 /*
1440 * Ignore some errors if we are hooked up.
1441 * If connection hasn't completed, has retransmitted several times,
1442 * and receives a second error, give up now. This is better
1443 * than waiting a long time to establish a connection that
1444 * can never complete.
1445 */
1446 if (tp->t_state == TCPS_ESTABLISHED &&
1447 (error == EHOSTUNREACH || error == ENETUNREACH ||
1448 error == EHOSTDOWN)) {
1449 return;
1450 } else if (TCPS_HAVEESTABLISHED(tp->t_state) == 0 &&
1451 tp->t_rxtshift > 3 && tp->t_softerror)
1452 so->so_error = error;
1453 else
1454 tp->t_softerror = error;
1455 cv_broadcast(&so->so_cv);
1456 sorwakeup(so);
1457 sowwakeup(so);
1458 }
1459 #endif
1460
1461 #ifdef INET6
1462 void *
1463 tcp6_ctlinput(int cmd, const struct sockaddr *sa, void *d)
1464 {
1465 struct tcphdr th;
1466 void (*notify)(struct in6pcb *, int) = tcp6_notify;
1467 int nmatch;
1468 struct ip6_hdr *ip6;
1469 const struct sockaddr_in6 *sa6_src = NULL;
1470 const struct sockaddr_in6 *sa6 = (const struct sockaddr_in6 *)sa;
1471 struct mbuf *m;
1472 int off;
1473
1474 if (sa->sa_family != AF_INET6 ||
1475 sa->sa_len != sizeof(struct sockaddr_in6))
1476 return NULL;
1477 if ((unsigned)cmd >= PRC_NCMDS)
1478 return NULL;
1479 else if (cmd == PRC_QUENCH) {
1480 /*
1481 * Don't honor ICMP Source Quench messages meant for
1482 * TCP connections.
1483 */
1484 return NULL;
1485 } else if (PRC_IS_REDIRECT(cmd))
1486 notify = in6_rtchange, d = NULL;
1487 else if (cmd == PRC_MSGSIZE)
1488 ; /* special code is present, see below */
1489 else if (cmd == PRC_HOSTDEAD)
1490 d = NULL;
1491 else if (inet6ctlerrmap[cmd] == 0)
1492 return NULL;
1493
1494 /* if the parameter is from icmp6, decode it. */
1495 if (d != NULL) {
1496 struct ip6ctlparam *ip6cp = (struct ip6ctlparam *)d;
1497 m = ip6cp->ip6c_m;
1498 ip6 = ip6cp->ip6c_ip6;
1499 off = ip6cp->ip6c_off;
1500 sa6_src = ip6cp->ip6c_src;
1501 } else {
1502 m = NULL;
1503 ip6 = NULL;
1504 sa6_src = &sa6_any;
1505 off = 0;
1506 }
1507
1508 if (ip6) {
1509 /*
1510 * XXX: We assume that when ip6 is non NULL,
1511 * M and OFF are valid.
1512 */
1513
1514 /* check if we can safely examine src and dst ports */
1515 if (m->m_pkthdr.len < off + sizeof(th)) {
1516 if (cmd == PRC_MSGSIZE)
1517 icmp6_mtudisc_update((struct ip6ctlparam *)d, 0);
1518 return NULL;
1519 }
1520
1521 memset(&th, 0, sizeof(th));
1522 m_copydata(m, off, sizeof(th), (void *)&th);
1523
1524 if (cmd == PRC_MSGSIZE) {
1525 int valid = 0;
1526
1527 /*
1528 * Check to see if we have a valid TCP connection
1529 * corresponding to the address in the ICMPv6 message
1530 * payload.
1531 */
1532 if (in6_pcblookup_connect(&tcbtable, &sa6->sin6_addr,
1533 th.th_dport,
1534 (const struct in6_addr *)&sa6_src->sin6_addr,
1535 th.th_sport, 0, 0))
1536 valid++;
1537
1538 /*
1539 * Depending on the value of "valid" and routing table
1540 * size (mtudisc_{hi,lo}wat), we will:
1541 * - recalcurate the new MTU and create the
1542 * corresponding routing entry, or
1543 * - ignore the MTU change notification.
1544 */
1545 icmp6_mtudisc_update((struct ip6ctlparam *)d, valid);
1546
1547 /*
1548 * no need to call in6_pcbnotify, it should have been
1549 * called via callback if necessary
1550 */
1551 return NULL;
1552 }
1553
1554 nmatch = in6_pcbnotify(&tcbtable, sa, th.th_dport,
1555 (const struct sockaddr *)sa6_src, th.th_sport, cmd, NULL, notify);
1556 if (nmatch == 0 && syn_cache_count &&
1557 (inet6ctlerrmap[cmd] == EHOSTUNREACH ||
1558 inet6ctlerrmap[cmd] == ENETUNREACH ||
1559 inet6ctlerrmap[cmd] == EHOSTDOWN))
1560 syn_cache_unreach((const struct sockaddr *)sa6_src,
1561 sa, &th);
1562 } else {
1563 (void) in6_pcbnotify(&tcbtable, sa, 0,
1564 (const struct sockaddr *)sa6_src, 0, cmd, NULL, notify);
1565 }
1566
1567 return NULL;
1568 }
1569 #endif
1570
1571 #ifdef INET
1572 /* assumes that ip header and tcp header are contiguous on mbuf */
1573 void *
1574 tcp_ctlinput(int cmd, const struct sockaddr *sa, void *v)
1575 {
1576 struct ip *ip = v;
1577 struct tcphdr *th;
1578 struct icmp *icp;
1579 extern const int inetctlerrmap[];
1580 void (*notify)(struct inpcb *, int) = tcp_notify;
1581 int errno;
1582 int nmatch;
1583 struct tcpcb *tp;
1584 u_int mtu;
1585 tcp_seq seq;
1586 struct inpcb *inp;
1587 #ifdef INET6
1588 struct in6pcb *in6p;
1589 struct in6_addr src6, dst6;
1590 #endif
1591
1592 if (sa->sa_family != AF_INET ||
1593 sa->sa_len != sizeof(struct sockaddr_in))
1594 return NULL;
1595 if ((unsigned)cmd >= PRC_NCMDS)
1596 return NULL;
1597 errno = inetctlerrmap[cmd];
1598 if (cmd == PRC_QUENCH)
1599 /*
1600 * Don't honor ICMP Source Quench messages meant for
1601 * TCP connections.
1602 */
1603 return NULL;
1604 else if (PRC_IS_REDIRECT(cmd))
1605 notify = in_rtchange, ip = 0;
1606 else if (cmd == PRC_MSGSIZE && ip && ip->ip_v == 4) {
1607 /*
1608 * Check to see if we have a valid TCP connection
1609 * corresponding to the address in the ICMP message
1610 * payload.
1611 *
1612 * Boundary check is made in icmp_input(), with ICMP_ADVLENMIN.
1613 */
1614 th = (struct tcphdr *)((char *)ip + (ip->ip_hl << 2));
1615 #ifdef INET6
1616 memset(&src6, 0, sizeof(src6));
1617 memset(&dst6, 0, sizeof(dst6));
1618 src6.s6_addr16[5] = dst6.s6_addr16[5] = 0xffff;
1619 memcpy(&src6.s6_addr32[3], &ip->ip_src, sizeof(struct in_addr));
1620 memcpy(&dst6.s6_addr32[3], &ip->ip_dst, sizeof(struct in_addr));
1621 #endif
1622 if ((inp = in_pcblookup_connect(&tcbtable, ip->ip_dst,
1623 th->th_dport, ip->ip_src, th->th_sport, 0)) != NULL)
1624 #ifdef INET6
1625 in6p = NULL;
1626 #else
1627 ;
1628 #endif
1629 #ifdef INET6
1630 else if ((in6p = in6_pcblookup_connect(&tcbtable, &dst6,
1631 th->th_dport, &src6, th->th_sport, 0, 0)) != NULL)
1632 ;
1633 #endif
1634 else
1635 return NULL;
1636
1637 /*
1638 * Now that we've validated that we are actually communicating
1639 * with the host indicated in the ICMP message, locate the
1640 * ICMP header, recalculate the new MTU, and create the
1641 * corresponding routing entry.
1642 */
1643 icp = (struct icmp *)((char *)ip -
1644 offsetof(struct icmp, icmp_ip));
1645 if (inp) {
1646 if ((tp = intotcpcb(inp)) == NULL)
1647 return NULL;
1648 }
1649 #ifdef INET6
1650 else if (in6p) {
1651 if ((tp = in6totcpcb(in6p)) == NULL)
1652 return NULL;
1653 }
1654 #endif
1655 else
1656 return NULL;
1657 seq = ntohl(th->th_seq);
1658 if (SEQ_LT(seq, tp->snd_una) || SEQ_GT(seq, tp->snd_max))
1659 return NULL;
1660 /*
1661 * If the ICMP message advertises a Next-Hop MTU
1662 * equal or larger than the maximum packet size we have
1663 * ever sent, drop the message.
1664 */
1665 mtu = (u_int)ntohs(icp->icmp_nextmtu);
1666 if (mtu >= tp->t_pmtud_mtu_sent)
1667 return NULL;
1668 if (mtu >= tcp_hdrsz(tp) + tp->t_pmtud_mss_acked) {
1669 /*
1670 * Calculate new MTU, and create corresponding
1671 * route (traditional PMTUD).
1672 */
1673 tp->t_flags &= ~TF_PMTUD_PEND;
1674 icmp_mtudisc(icp, ip->ip_dst);
1675 } else {
1676 /*
1677 * Record the information got in the ICMP
1678 * message; act on it later.
1679 * If we had already recorded an ICMP message,
1680 * replace the old one only if the new message
1681 * refers to an older TCP segment
1682 */
1683 if (tp->t_flags & TF_PMTUD_PEND) {
1684 if (SEQ_LT(tp->t_pmtud_th_seq, seq))
1685 return NULL;
1686 } else
1687 tp->t_flags |= TF_PMTUD_PEND;
1688 tp->t_pmtud_th_seq = seq;
1689 tp->t_pmtud_nextmtu = icp->icmp_nextmtu;
1690 tp->t_pmtud_ip_len = icp->icmp_ip.ip_len;
1691 tp->t_pmtud_ip_hl = icp->icmp_ip.ip_hl;
1692 }
1693 return NULL;
1694 } else if (cmd == PRC_HOSTDEAD)
1695 ip = 0;
1696 else if (errno == 0)
1697 return NULL;
1698 if (ip && ip->ip_v == 4 && sa->sa_family == AF_INET) {
1699 th = (struct tcphdr *)((char *)ip + (ip->ip_hl << 2));
1700 nmatch = in_pcbnotify(&tcbtable, satocsin(sa)->sin_addr,
1701 th->th_dport, ip->ip_src, th->th_sport, errno, notify);
1702 if (nmatch == 0 && syn_cache_count &&
1703 (inetctlerrmap[cmd] == EHOSTUNREACH ||
1704 inetctlerrmap[cmd] == ENETUNREACH ||
1705 inetctlerrmap[cmd] == EHOSTDOWN)) {
1706 struct sockaddr_in sin;
1707 memset(&sin, 0, sizeof(sin));
1708 sin.sin_len = sizeof(sin);
1709 sin.sin_family = AF_INET;
1710 sin.sin_port = th->th_sport;
1711 sin.sin_addr = ip->ip_src;
1712 syn_cache_unreach((struct sockaddr *)&sin, sa, th);
1713 }
1714
1715 /* XXX mapped address case */
1716 } else
1717 in_pcbnotifyall(&tcbtable, satocsin(sa)->sin_addr, errno,
1718 notify);
1719 return NULL;
1720 }
1721
1722 /*
1723 * When a source quench is received, we are being notified of congestion.
1724 * Close the congestion window down to the Loss Window (one segment).
1725 * We will gradually open it again as we proceed.
1726 */
1727 void
1728 tcp_quench(struct inpcb *inp, int errno)
1729 {
1730 struct tcpcb *tp = intotcpcb(inp);
1731
1732 if (tp) {
1733 tp->snd_cwnd = tp->t_segsz;
1734 tp->t_bytes_acked = 0;
1735 }
1736 }
1737 #endif
1738
1739 #ifdef INET6
1740 void
1741 tcp6_quench(struct in6pcb *in6p, int errno)
1742 {
1743 struct tcpcb *tp = in6totcpcb(in6p);
1744
1745 if (tp) {
1746 tp->snd_cwnd = tp->t_segsz;
1747 tp->t_bytes_acked = 0;
1748 }
1749 }
1750 #endif
1751
1752 #ifdef INET
1753 /*
1754 * Path MTU Discovery handlers.
1755 */
1756 void
1757 tcp_mtudisc_callback(struct in_addr faddr)
1758 {
1759 #ifdef INET6
1760 struct in6_addr in6;
1761 #endif
1762
1763 in_pcbnotifyall(&tcbtable, faddr, EMSGSIZE, tcp_mtudisc);
1764 #ifdef INET6
1765 memset(&in6, 0, sizeof(in6));
1766 in6.s6_addr16[5] = 0xffff;
1767 memcpy(&in6.s6_addr32[3], &faddr, sizeof(struct in_addr));
1768 tcp6_mtudisc_callback(&in6);
1769 #endif
1770 }
1771
1772 /*
1773 * On receipt of path MTU corrections, flush old route and replace it
1774 * with the new one. Retransmit all unacknowledged packets, to ensure
1775 * that all packets will be received.
1776 */
1777 void
1778 tcp_mtudisc(struct inpcb *inp, int errno)
1779 {
1780 struct tcpcb *tp = intotcpcb(inp);
1781 struct rtentry *rt = in_pcbrtentry(inp);
1782
1783 if (tp != 0) {
1784 if (rt != 0) {
1785 /*
1786 * If this was not a host route, remove and realloc.
1787 */
1788 if ((rt->rt_flags & RTF_HOST) == 0) {
1789 in_rtchange(inp, errno);
1790 if ((rt = in_pcbrtentry(inp)) == 0)
1791 return;
1792 }
1793
1794 /*
1795 * Slow start out of the error condition. We
1796 * use the MTU because we know it's smaller
1797 * than the previously transmitted segment.
1798 *
1799 * Note: This is more conservative than the
1800 * suggestion in draft-floyd-incr-init-win-03.
1801 */
1802 if (rt->rt_rmx.rmx_mtu != 0)
1803 tp->snd_cwnd =
1804 TCP_INITIAL_WINDOW(tcp_init_win,
1805 rt->rt_rmx.rmx_mtu);
1806 }
1807
1808 /*
1809 * Resend unacknowledged packets.
1810 */
1811 tp->snd_nxt = tp->sack_newdata = tp->snd_una;
1812 tcp_output(tp);
1813 }
1814 }
1815 #endif
1816
1817 #ifdef INET6
1818 /*
1819 * Path MTU Discovery handlers.
1820 */
1821 void
1822 tcp6_mtudisc_callback(struct in6_addr *faddr)
1823 {
1824 struct sockaddr_in6 sin6;
1825
1826 memset(&sin6, 0, sizeof(sin6));
1827 sin6.sin6_family = AF_INET6;
1828 sin6.sin6_len = sizeof(struct sockaddr_in6);
1829 sin6.sin6_addr = *faddr;
1830 (void) in6_pcbnotify(&tcbtable, (struct sockaddr *)&sin6, 0,
1831 (const struct sockaddr *)&sa6_any, 0, PRC_MSGSIZE, NULL, tcp6_mtudisc);
1832 }
1833
1834 void
1835 tcp6_mtudisc(struct in6pcb *in6p, int errno)
1836 {
1837 struct tcpcb *tp = in6totcpcb(in6p);
1838 struct rtentry *rt = in6_pcbrtentry(in6p);
1839
1840 if (tp != 0) {
1841 if (rt != 0) {
1842 /*
1843 * If this was not a host route, remove and realloc.
1844 */
1845 if ((rt->rt_flags & RTF_HOST) == 0) {
1846 in6_rtchange(in6p, errno);
1847 if ((rt = in6_pcbrtentry(in6p)) == 0)
1848 return;
1849 }
1850
1851 /*
1852 * Slow start out of the error condition. We
1853 * use the MTU because we know it's smaller
1854 * than the previously transmitted segment.
1855 *
1856 * Note: This is more conservative than the
1857 * suggestion in draft-floyd-incr-init-win-03.
1858 */
1859 if (rt->rt_rmx.rmx_mtu != 0)
1860 tp->snd_cwnd =
1861 TCP_INITIAL_WINDOW(tcp_init_win,
1862 rt->rt_rmx.rmx_mtu);
1863 }
1864
1865 /*
1866 * Resend unacknowledged packets.
1867 */
1868 tp->snd_nxt = tp->sack_newdata = tp->snd_una;
1869 tcp_output(tp);
1870 }
1871 }
1872 #endif /* INET6 */
1873
1874 /*
1875 * Compute the MSS to advertise to the peer. Called only during
1876 * the 3-way handshake. If we are the server (peer initiated
1877 * connection), we are called with a pointer to the interface
1878 * on which the SYN packet arrived. If we are the client (we
1879 * initiated connection), we are called with a pointer to the
1880 * interface out which this connection should go.
1881 *
1882 * NOTE: Do not subtract IP option/extension header size nor IPsec
1883 * header size from MSS advertisement. MSS option must hold the maximum
1884 * segment size we can accept, so it must always be:
1885 * max(if mtu) - ip header - tcp header
1886 */
1887 u_long
1888 tcp_mss_to_advertise(const struct ifnet *ifp, int af)
1889 {
1890 extern u_long in_maxmtu;
1891 u_long mss = 0;
1892 u_long hdrsiz;
1893
1894 /*
1895 * In order to avoid defeating path MTU discovery on the peer,
1896 * we advertise the max MTU of all attached networks as our MSS,
1897 * per RFC 1191, section 3.1.
1898 *
1899 * We provide the option to advertise just the MTU of
1900 * the interface on which we hope this connection will
1901 * be receiving. If we are responding to a SYN, we
1902 * will have a pretty good idea about this, but when
1903 * initiating a connection there is a bit more doubt.
1904 *
1905 * We also need to ensure that loopback has a large enough
1906 * MSS, as the loopback MTU is never included in in_maxmtu.
1907 */
1908
1909 if (ifp != NULL)
1910 switch (af) {
1911 case AF_INET:
1912 mss = ifp->if_mtu;
1913 break;
1914 #ifdef INET6
1915 case AF_INET6:
1916 mss = IN6_LINKMTU(ifp);
1917 break;
1918 #endif
1919 }
1920
1921 if (tcp_mss_ifmtu == 0)
1922 switch (af) {
1923 case AF_INET:
1924 mss = max(in_maxmtu, mss);
1925 break;
1926 #ifdef INET6
1927 case AF_INET6:
1928 mss = max(in6_maxmtu, mss);
1929 break;
1930 #endif
1931 }
1932
1933 switch (af) {
1934 case AF_INET:
1935 hdrsiz = sizeof(struct ip);
1936 break;
1937 #ifdef INET6
1938 case AF_INET6:
1939 hdrsiz = sizeof(struct ip6_hdr);
1940 break;
1941 #endif
1942 default:
1943 hdrsiz = 0;
1944 break;
1945 }
1946 hdrsiz += sizeof(struct tcphdr);
1947 if (mss > hdrsiz)
1948 mss -= hdrsiz;
1949
1950 mss = max(tcp_mssdflt, mss);
1951 return (mss);
1952 }
1953
1954 /*
1955 * Set connection variables based on the peer's advertised MSS.
1956 * We are passed the TCPCB for the actual connection. If we
1957 * are the server, we are called by the compressed state engine
1958 * when the 3-way handshake is complete. If we are the client,
1959 * we are called when we receive the SYN,ACK from the server.
1960 *
1961 * NOTE: Our advertised MSS value must be initialized in the TCPCB
1962 * before this routine is called!
1963 */
1964 void
1965 tcp_mss_from_peer(struct tcpcb *tp, int offer)
1966 {
1967 struct socket *so;
1968 #if defined(RTV_SPIPE) || defined(RTV_SSTHRESH)
1969 struct rtentry *rt;
1970 #endif
1971 u_long bufsize;
1972 int mss;
1973
1974 #ifdef DIAGNOSTIC
1975 if (tp->t_inpcb && tp->t_in6pcb)
1976 panic("tcp_mss_from_peer: both t_inpcb and t_in6pcb are set");
1977 #endif
1978 so = NULL;
1979 rt = NULL;
1980 #ifdef INET
1981 if (tp->t_inpcb) {
1982 so = tp->t_inpcb->inp_socket;
1983 #if defined(RTV_SPIPE) || defined(RTV_SSTHRESH)
1984 rt = in_pcbrtentry(tp->t_inpcb);
1985 #endif
1986 }
1987 #endif
1988 #ifdef INET6
1989 if (tp->t_in6pcb) {
1990 so = tp->t_in6pcb->in6p_socket;
1991 #if defined(RTV_SPIPE) || defined(RTV_SSTHRESH)
1992 rt = in6_pcbrtentry(tp->t_in6pcb);
1993 #endif
1994 }
1995 #endif
1996
1997 /*
1998 * As per RFC1122, use the default MSS value, unless they
1999 * sent us an offer. Do not accept offers less than 256 bytes.
2000 */
2001 mss = tcp_mssdflt;
2002 if (offer)
2003 mss = offer;
2004 mss = max(mss, 256); /* sanity */
2005 tp->t_peermss = mss;
2006 mss -= tcp_optlen(tp);
2007 #ifdef INET
2008 if (tp->t_inpcb)
2009 mss -= ip_optlen(tp->t_inpcb);
2010 #endif
2011 #ifdef INET6
2012 if (tp->t_in6pcb)
2013 mss -= ip6_optlen(tp->t_in6pcb);
2014 #endif
2015
2016 /*
2017 * If there's a pipesize, change the socket buffer to that size.
2018 * Make the socket buffer an integral number of MSS units. If
2019 * the MSS is larger than the socket buffer, artificially decrease
2020 * the MSS.
2021 */
2022 #ifdef RTV_SPIPE
2023 if (rt != NULL && rt->rt_rmx.rmx_sendpipe != 0)
2024 bufsize = rt->rt_rmx.rmx_sendpipe;
2025 else
2026 #endif
2027 {
2028 KASSERT(so != NULL);
2029 bufsize = so->so_snd.sb_hiwat;
2030 }
2031 if (bufsize < mss)
2032 mss = bufsize;
2033 else {
2034 bufsize = roundup(bufsize, mss);
2035 if (bufsize > sb_max)
2036 bufsize = sb_max;
2037 (void) sbreserve(&so->so_snd, bufsize, so);
2038 }
2039 tp->t_segsz = mss;
2040
2041 #ifdef RTV_SSTHRESH
2042 if (rt != NULL && rt->rt_rmx.rmx_ssthresh) {
2043 /*
2044 * There's some sort of gateway or interface buffer
2045 * limit on the path. Use this to set the slow
2046 * start threshold, but set the threshold to no less
2047 * than 2 * MSS.
2048 */
2049 tp->snd_ssthresh = max(2 * mss, rt->rt_rmx.rmx_ssthresh);
2050 }
2051 #endif
2052 }
2053
2054 /*
2055 * Processing necessary when a TCP connection is established.
2056 */
2057 void
2058 tcp_established(struct tcpcb *tp)
2059 {
2060 struct socket *so;
2061 #ifdef RTV_RPIPE
2062 struct rtentry *rt;
2063 #endif
2064 u_long bufsize;
2065
2066 #ifdef DIAGNOSTIC
2067 if (tp->t_inpcb && tp->t_in6pcb)
2068 panic("tcp_established: both t_inpcb and t_in6pcb are set");
2069 #endif
2070 so = NULL;
2071 rt = NULL;
2072 #ifdef INET
2073 /* This is a while() to reduce the dreadful stairstepping below */
2074 while (tp->t_inpcb) {
2075 so = tp->t_inpcb->inp_socket;
2076 #if defined(RTV_RPIPE)
2077 rt = in_pcbrtentry(tp->t_inpcb);
2078 #endif
2079 if (__predict_true(tcp_msl_enable)) {
2080 if (tp->t_inpcb->inp_laddr.s_addr == INADDR_LOOPBACK) {
2081 tp->t_msl = tcp_msl_loop ? tcp_msl_loop : (TCPTV_MSL >> 2);
2082 break;
2083 }
2084
2085 if (__predict_false(tcp_rttlocal)) {
2086 /* This may be adjusted by tcp_input */
2087 tp->t_msl = tcp_msl_local ? tcp_msl_local : (TCPTV_MSL >> 1);
2088 break;
2089 }
2090 if (in_localaddr(tp->t_inpcb->inp_faddr)) {
2091 tp->t_msl = tcp_msl_local ? tcp_msl_local : (TCPTV_MSL >> 1);
2092 break;
2093 }
2094 }
2095 tp->t_msl = tcp_msl_remote ? tcp_msl_remote : TCPTV_MSL;
2096 break;
2097 }
2098 #endif
2099 #ifdef INET6
2100 /* The !tp->t_inpcb lets the compiler know it can't be v4 *and* v6 */
2101 while (!tp->t_inpcb && tp->t_in6pcb) {
2102 so = tp->t_in6pcb->in6p_socket;
2103 #if defined(RTV_RPIPE)
2104 rt = in6_pcbrtentry(tp->t_in6pcb);
2105 #endif
2106 if (__predict_true(tcp_msl_enable)) {
2107 extern const struct in6_addr in6addr_loopback;
2108
2109 if (IN6_ARE_ADDR_EQUAL(&tp->t_in6pcb->in6p_laddr,
2110 &in6addr_loopback)) {
2111 tp->t_msl = tcp_msl_loop ? tcp_msl_loop : (TCPTV_MSL >> 2);
2112 break;
2113 }
2114
2115 if (__predict_false(tcp_rttlocal)) {
2116 /* This may be adjusted by tcp_input */
2117 tp->t_msl = tcp_msl_local ? tcp_msl_local : (TCPTV_MSL >> 1);
2118 break;
2119 }
2120 if (in6_localaddr(&tp->t_in6pcb->in6p_faddr)) {
2121 tp->t_msl = tcp_msl_local ? tcp_msl_local : (TCPTV_MSL >> 1);
2122 break;
2123 }
2124 }
2125 tp->t_msl = tcp_msl_remote ? tcp_msl_remote : TCPTV_MSL;
2126 break;
2127 }
2128 #endif
2129
2130 tp->t_state = TCPS_ESTABLISHED;
2131 TCP_TIMER_ARM(tp, TCPT_KEEP, tp->t_keepidle);
2132
2133 #ifdef RTV_RPIPE
2134 if (rt != NULL && rt->rt_rmx.rmx_recvpipe != 0)
2135 bufsize = rt->rt_rmx.rmx_recvpipe;
2136 else
2137 #endif
2138 {
2139 KASSERT(so != NULL);
2140 bufsize = so->so_rcv.sb_hiwat;
2141 }
2142 if (bufsize > tp->t_ourmss) {
2143 bufsize = roundup(bufsize, tp->t_ourmss);
2144 if (bufsize > sb_max)
2145 bufsize = sb_max;
2146 (void) sbreserve(&so->so_rcv, bufsize, so);
2147 }
2148 }
2149
2150 /*
2151 * Check if there's an initial rtt or rttvar. Convert from the
2152 * route-table units to scaled multiples of the slow timeout timer.
2153 * Called only during the 3-way handshake.
2154 */
2155 void
2156 tcp_rmx_rtt(struct tcpcb *tp)
2157 {
2158 #ifdef RTV_RTT
2159 struct rtentry *rt = NULL;
2160 int rtt;
2161
2162 #ifdef DIAGNOSTIC
2163 if (tp->t_inpcb && tp->t_in6pcb)
2164 panic("tcp_rmx_rtt: both t_inpcb and t_in6pcb are set");
2165 #endif
2166 #ifdef INET
2167 if (tp->t_inpcb)
2168 rt = in_pcbrtentry(tp->t_inpcb);
2169 #endif
2170 #ifdef INET6
2171 if (tp->t_in6pcb)
2172 rt = in6_pcbrtentry(tp->t_in6pcb);
2173 #endif
2174 if (rt == NULL)
2175 return;
2176
2177 if (tp->t_srtt == 0 && (rtt = rt->rt_rmx.rmx_rtt)) {
2178 /*
2179 * XXX The lock bit for MTU indicates that the value
2180 * is also a minimum value; this is subject to time.
2181 */
2182 if (rt->rt_rmx.rmx_locks & RTV_RTT)
2183 TCPT_RANGESET(tp->t_rttmin,
2184 rtt / (RTM_RTTUNIT / PR_SLOWHZ),
2185 TCPTV_MIN, TCPTV_REXMTMAX);
2186 tp->t_srtt = rtt /
2187 ((RTM_RTTUNIT / PR_SLOWHZ) >> (TCP_RTT_SHIFT + 2));
2188 if (rt->rt_rmx.rmx_rttvar) {
2189 tp->t_rttvar = rt->rt_rmx.rmx_rttvar /
2190 ((RTM_RTTUNIT / PR_SLOWHZ) >>
2191 (TCP_RTTVAR_SHIFT + 2));
2192 } else {
2193 /* Default variation is +- 1 rtt */
2194 tp->t_rttvar =
2195 tp->t_srtt >> (TCP_RTT_SHIFT - TCP_RTTVAR_SHIFT);
2196 }
2197 TCPT_RANGESET(tp->t_rxtcur,
2198 ((tp->t_srtt >> 2) + tp->t_rttvar) >> (1 + 2),
2199 tp->t_rttmin, TCPTV_REXMTMAX);
2200 }
2201 #endif
2202 }
2203
2204 tcp_seq tcp_iss_seq = 0; /* tcp initial seq # */
2205 u_int8_t tcp_iss_secret[16]; /* 128 bits; should be plenty */
2206
2207 /*
2208 * Get a new sequence value given a tcp control block
2209 */
2210 tcp_seq
2211 tcp_new_iss(struct tcpcb *tp, tcp_seq addin)
2212 {
2213
2214 #ifdef INET
2215 if (tp->t_inpcb != NULL) {
2216 return (tcp_new_iss1(&tp->t_inpcb->inp_laddr,
2217 &tp->t_inpcb->inp_faddr, tp->t_inpcb->inp_lport,
2218 tp->t_inpcb->inp_fport, sizeof(tp->t_inpcb->inp_laddr),
2219 addin));
2220 }
2221 #endif
2222 #ifdef INET6
2223 if (tp->t_in6pcb != NULL) {
2224 return (tcp_new_iss1(&tp->t_in6pcb->in6p_laddr,
2225 &tp->t_in6pcb->in6p_faddr, tp->t_in6pcb->in6p_lport,
2226 tp->t_in6pcb->in6p_fport, sizeof(tp->t_in6pcb->in6p_laddr),
2227 addin));
2228 }
2229 #endif
2230 /* Not possible. */
2231 panic("tcp_new_iss");
2232 }
2233
2234 /*
2235 * This routine actually generates a new TCP initial sequence number.
2236 */
2237 tcp_seq
2238 tcp_new_iss1(void *laddr, void *faddr, u_int16_t lport, u_int16_t fport,
2239 size_t addrsz, tcp_seq addin)
2240 {
2241 tcp_seq tcp_iss;
2242
2243 static bool tcp_iss_gotten_secret;
2244
2245 /*
2246 * If we haven't been here before, initialize our cryptographic
2247 * hash secret.
2248 */
2249 if (tcp_iss_gotten_secret == false) {
2250 cprng_strong(kern_cprng,
2251 tcp_iss_secret, sizeof(tcp_iss_secret), FASYNC);
2252 tcp_iss_gotten_secret = true;
2253 }
2254
2255 if (tcp_do_rfc1948) {
2256 MD5_CTX ctx;
2257 u_int8_t hash[16]; /* XXX MD5 knowledge */
2258
2259 /*
2260 * Compute the base value of the ISS. It is a hash
2261 * of (saddr, sport, daddr, dport, secret).
2262 */
2263 MD5Init(&ctx);
2264
2265 MD5Update(&ctx, (u_char *) laddr, addrsz);
2266 MD5Update(&ctx, (u_char *) &lport, sizeof(lport));
2267
2268 MD5Update(&ctx, (u_char *) faddr, addrsz);
2269 MD5Update(&ctx, (u_char *) &fport, sizeof(fport));
2270
2271 MD5Update(&ctx, tcp_iss_secret, sizeof(tcp_iss_secret));
2272
2273 MD5Final(hash, &ctx);
2274
2275 memcpy(&tcp_iss, hash, sizeof(tcp_iss));
2276
2277 /*
2278 * Now increment our "timer", and add it in to
2279 * the computed value.
2280 *
2281 * XXX Use `addin'?
2282 * XXX TCP_ISSINCR too large to use?
2283 */
2284 tcp_iss_seq += TCP_ISSINCR;
2285 #ifdef TCPISS_DEBUG
2286 printf("ISS hash 0x%08x, ", tcp_iss);
2287 #endif
2288 tcp_iss += tcp_iss_seq + addin;
2289 #ifdef TCPISS_DEBUG
2290 printf("new ISS 0x%08x\n", tcp_iss);
2291 #endif
2292 } else {
2293 /*
2294 * Randomize.
2295 */
2296 tcp_iss = cprng_fast32();
2297
2298 /*
2299 * If we were asked to add some amount to a known value,
2300 * we will take a random value obtained above, mask off
2301 * the upper bits, and add in the known value. We also
2302 * add in a constant to ensure that we are at least a
2303 * certain distance from the original value.
2304 *
2305 * This is used when an old connection is in timed wait
2306 * and we have a new one coming in, for instance.
2307 */
2308 if (addin != 0) {
2309 #ifdef TCPISS_DEBUG
2310 printf("Random %08x, ", tcp_iss);
2311 #endif
2312 tcp_iss &= TCP_ISS_RANDOM_MASK;
2313 tcp_iss += addin + TCP_ISSINCR;
2314 #ifdef TCPISS_DEBUG
2315 printf("Old ISS %08x, ISS %08x\n", addin, tcp_iss);
2316 #endif
2317 } else {
2318 tcp_iss &= TCP_ISS_RANDOM_MASK;
2319 tcp_iss += tcp_iss_seq;
2320 tcp_iss_seq += TCP_ISSINCR;
2321 #ifdef TCPISS_DEBUG
2322 printf("ISS %08x\n", tcp_iss);
2323 #endif
2324 }
2325 }
2326
2327 if (tcp_compat_42) {
2328 /*
2329 * Limit it to the positive range for really old TCP
2330 * implementations.
2331 * Just AND off the top bit instead of checking if
2332 * is set first - saves a branch 50% of the time.
2333 */
2334 tcp_iss &= 0x7fffffff; /* XXX */
2335 }
2336
2337 return (tcp_iss);
2338 }
2339
2340 #if defined(IPSEC)
2341 /* compute ESP/AH header size for TCP, including outer IP header. */
2342 size_t
2343 ipsec4_hdrsiz_tcp(struct tcpcb *tp)
2344 {
2345 struct inpcb *inp;
2346 size_t hdrsiz;
2347
2348 /* XXX mapped addr case (tp->t_in6pcb) */
2349 if (!tp || !tp->t_template || !(inp = tp->t_inpcb))
2350 return 0;
2351 switch (tp->t_family) {
2352 case AF_INET:
2353 /* XXX: should use currect direction. */
2354 hdrsiz = ipsec4_hdrsiz(tp->t_template, IPSEC_DIR_OUTBOUND, inp);
2355 break;
2356 default:
2357 hdrsiz = 0;
2358 break;
2359 }
2360
2361 return hdrsiz;
2362 }
2363
2364 #ifdef INET6
2365 size_t
2366 ipsec6_hdrsiz_tcp(struct tcpcb *tp)
2367 {
2368 struct in6pcb *in6p;
2369 size_t hdrsiz;
2370
2371 if (!tp || !tp->t_template || !(in6p = tp->t_in6pcb))
2372 return 0;
2373 switch (tp->t_family) {
2374 case AF_INET6:
2375 /* XXX: should use currect direction. */
2376 hdrsiz = ipsec6_hdrsiz(tp->t_template, IPSEC_DIR_OUTBOUND, in6p);
2377 break;
2378 case AF_INET:
2379 /* mapped address case - tricky */
2380 default:
2381 hdrsiz = 0;
2382 break;
2383 }
2384
2385 return hdrsiz;
2386 }
2387 #endif
2388 #endif /*IPSEC*/
2389
2390 /*
2391 * Determine the length of the TCP options for this connection.
2392 *
2393 * XXX: What do we do for SACK, when we add that? Just reserve
2394 * all of the space? Otherwise we can't exactly be incrementing
2395 * cwnd by an amount that varies depending on the amount we last
2396 * had to SACK!
2397 */
2398
2399 u_int
2400 tcp_optlen(struct tcpcb *tp)
2401 {
2402 u_int optlen;
2403
2404 optlen = 0;
2405 if ((tp->t_flags & (TF_REQ_TSTMP|TF_RCVD_TSTMP|TF_NOOPT)) ==
2406 (TF_REQ_TSTMP | TF_RCVD_TSTMP))
2407 optlen += TCPOLEN_TSTAMP_APPA;
2408
2409 #ifdef TCP_SIGNATURE
2410 if (tp->t_flags & TF_SIGNATURE)
2411 optlen += TCPOLEN_SIGNATURE + 2;
2412 #endif /* TCP_SIGNATURE */
2413
2414 return optlen;
2415 }
2416
2417 u_int
2418 tcp_hdrsz(struct tcpcb *tp)
2419 {
2420 u_int hlen;
2421
2422 switch (tp->t_family) {
2423 #ifdef INET6
2424 case AF_INET6:
2425 hlen = sizeof(struct ip6_hdr);
2426 break;
2427 #endif
2428 case AF_INET:
2429 hlen = sizeof(struct ip);
2430 break;
2431 default:
2432 hlen = 0;
2433 break;
2434 }
2435 hlen += sizeof(struct tcphdr);
2436
2437 if ((tp->t_flags & (TF_REQ_TSTMP|TF_NOOPT)) == TF_REQ_TSTMP &&
2438 (tp->t_flags & TF_RCVD_TSTMP) == TF_RCVD_TSTMP)
2439 hlen += TCPOLEN_TSTAMP_APPA;
2440 #ifdef TCP_SIGNATURE
2441 if (tp->t_flags & TF_SIGNATURE)
2442 hlen += TCPOLEN_SIGLEN;
2443 #endif
2444 return hlen;
2445 }
2446
2447 void
2448 tcp_statinc(u_int stat)
2449 {
2450
2451 KASSERT(stat < TCP_NSTATS);
2452 TCP_STATINC(stat);
2453 }
2454
2455 void
2456 tcp_statadd(u_int stat, uint64_t val)
2457 {
2458
2459 KASSERT(stat < TCP_NSTATS);
2460 TCP_STATADD(stat, val);
2461 }
2462