Home | History | Annotate | Line # | Download | only in netinet
      1 /*	$NetBSD: tcp_subr.c,v 1.298 2025/02/26 04:49:45 andvar Exp $	*/
      2 
      3 /*
      4  * Copyright (C) 1995, 1996, 1997, and 1998 WIDE Project.
      5  * All rights reserved.
      6  *
      7  * Redistribution and use in source and binary forms, with or without
      8  * modification, are permitted provided that the following conditions
      9  * are met:
     10  * 1. Redistributions of source code must retain the above copyright
     11  *    notice, this list of conditions and the following disclaimer.
     12  * 2. Redistributions in binary form must reproduce the above copyright
     13  *    notice, this list of conditions and the following disclaimer in the
     14  *    documentation and/or other materials provided with the distribution.
     15  * 3. Neither the name of the project nor the names of its contributors
     16  *    may be used to endorse or promote products derived from this software
     17  *    without specific prior written permission.
     18  *
     19  * THIS SOFTWARE IS PROVIDED BY THE PROJECT AND CONTRIBUTORS ``AS IS'' AND
     20  * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
     21  * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
     22  * ARE DISCLAIMED.  IN NO EVENT SHALL THE PROJECT OR CONTRIBUTORS BE LIABLE
     23  * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
     24  * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
     25  * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
     26  * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
     27  * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
     28  * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
     29  * SUCH DAMAGE.
     30  */
     31 
     32 /*
     33  * Copyright (c) 1997, 1998, 2000, 2001, 2008 The NetBSD Foundation, Inc.
     34  * All rights reserved.
     35  *
     36  * This code is derived from software contributed to The NetBSD Foundation
     37  * by Jason R. Thorpe and Kevin M. Lahey of the Numerical Aerospace Simulation
     38  * Facility, NASA Ames Research Center.
     39  *
     40  * Redistribution and use in source and binary forms, with or without
     41  * modification, are permitted provided that the following conditions
     42  * are met:
     43  * 1. Redistributions of source code must retain the above copyright
     44  *    notice, this list of conditions and the following disclaimer.
     45  * 2. Redistributions in binary form must reproduce the above copyright
     46  *    notice, this list of conditions and the following disclaimer in the
     47  *    documentation and/or other materials provided with the distribution.
     48  *
     49  * THIS SOFTWARE IS PROVIDED BY THE NETBSD FOUNDATION, INC. AND CONTRIBUTORS
     50  * ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED
     51  * TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
     52  * PURPOSE ARE DISCLAIMED.  IN NO EVENT SHALL THE FOUNDATION OR CONTRIBUTORS
     53  * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
     54  * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
     55  * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
     56  * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
     57  * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
     58  * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
     59  * POSSIBILITY OF SUCH DAMAGE.
     60  */
     61 
     62 /*
     63  * Copyright (c) 1982, 1986, 1988, 1990, 1993, 1995
     64  *	The Regents of the University of California.  All rights reserved.
     65  *
     66  * Redistribution and use in source and binary forms, with or without
     67  * modification, are permitted provided that the following conditions
     68  * are met:
     69  * 1. Redistributions of source code must retain the above copyright
     70  *    notice, this list of conditions and the following disclaimer.
     71  * 2. Redistributions in binary form must reproduce the above copyright
     72  *    notice, this list of conditions and the following disclaimer in the
     73  *    documentation and/or other materials provided with the distribution.
     74  * 3. Neither the name of the University nor the names of its contributors
     75  *    may be used to endorse or promote products derived from this software
     76  *    without specific prior written permission.
     77  *
     78  * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND
     79  * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
     80  * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
     81  * ARE DISCLAIMED.  IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE
     82  * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
     83  * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
     84  * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
     85  * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
     86  * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
     87  * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
     88  * SUCH DAMAGE.
     89  *
     90  *	@(#)tcp_subr.c	8.2 (Berkeley) 5/24/95
     91  */
     92 
     93 #include <sys/cdefs.h>
     94 __KERNEL_RCSID(0, "$NetBSD: tcp_subr.c,v 1.298 2025/02/26 04:49:45 andvar Exp $");
     95 
     96 #ifdef _KERNEL_OPT
     97 #include "opt_inet.h"
     98 #include "opt_ipsec.h"
     99 #include "opt_inet_csum.h"
    100 #include "opt_mbuftrace.h"
    101 #endif
    102 
    103 #include <sys/param.h>
    104 #include <sys/atomic.h>
    105 #include <sys/proc.h>
    106 #include <sys/systm.h>
    107 #include <sys/mbuf.h>
    108 #include <sys/once.h>
    109 #include <sys/socket.h>
    110 #include <sys/socketvar.h>
    111 #include <sys/protosw.h>
    112 #include <sys/errno.h>
    113 #include <sys/kernel.h>
    114 #include <sys/pool.h>
    115 #include <sys/md5.h>
    116 #include <sys/cprng.h>
    117 
    118 #include <net/route.h>
    119 #include <net/if.h>
    120 
    121 #include <netinet/in.h>
    122 #include <netinet/in_systm.h>
    123 #include <netinet/ip.h>
    124 #include <netinet/in_pcb.h>
    125 #include <netinet/ip_var.h>
    126 #include <netinet/ip_icmp.h>
    127 
    128 #ifdef INET6
    129 #include <netinet/ip6.h>
    130 #include <netinet6/in6_pcb.h>
    131 #include <netinet6/ip6_var.h>
    132 #include <netinet6/in6_var.h>
    133 #include <netinet6/ip6protosw.h>
    134 #include <netinet/icmp6.h>
    135 #include <netinet6/nd6.h>
    136 #endif
    137 
    138 #include <netinet/tcp.h>
    139 #include <netinet/tcp_fsm.h>
    140 #include <netinet/tcp_seq.h>
    141 #include <netinet/tcp_timer.h>
    142 #include <netinet/tcp_var.h>
    143 #include <netinet/tcp_vtw.h>
    144 #include <netinet/tcp_private.h>
    145 #include <netinet/tcp_congctl.h>
    146 #include <netinet/tcp_syncache.h>
    147 
    148 #ifdef IPSEC
    149 #include <netipsec/ipsec.h>
    150 #ifdef INET6
    151 #include <netipsec/ipsec6.h>
    152 #endif
    153 #include <netipsec/key.h>
    154 #endif
    155 
    156 
    157 struct	inpcbtable tcbtable;	/* head of queue of active tcpcb's */
    158 u_int32_t tcp_now;		/* slow ticks, for RFC 1323 timestamps */
    159 
    160 percpu_t *tcpstat_percpu;
    161 
    162 /* patchable/settable parameters for tcp */
    163 int 	tcp_mssdflt = TCP_MSS;
    164 int	tcp_minmss = TCP_MINMSS;
    165 int 	tcp_rttdflt = TCPTV_SRTTDFLT / PR_SLOWHZ;
    166 int	tcp_do_rfc1323 = 1;	/* window scaling / timestamps (obsolete) */
    167 int	tcp_do_rfc1948 = 0;	/* ISS by cryptographic hash */
    168 int	tcp_do_sack = 1;	/* selective acknowledgement */
    169 int	tcp_do_win_scale = 1;	/* RFC1323 window scaling */
    170 int	tcp_do_timestamps = 1;	/* RFC1323 timestamps */
    171 int	tcp_ack_on_push = 0;	/* set to enable immediate ACK-on-PUSH */
    172 int	tcp_do_ecn = 0;		/* Explicit Congestion Notification */
    173 #ifndef TCP_INIT_WIN
    174 #define	TCP_INIT_WIN	4	/* initial slow start window */
    175 #endif
    176 #ifndef TCP_INIT_WIN_LOCAL
    177 #define	TCP_INIT_WIN_LOCAL 4	/* initial slow start window for local nets */
    178 #endif
    179 /*
    180  * Up to 5 we scale linearly, to reach 3 * 1460; then (iw) * 1460.
    181  * This is to simulate current behavior for iw == 4
    182  */
    183 int tcp_init_win_max[] = {
    184 	 1 * 1460,
    185 	 1 * 1460,
    186 	 2 * 1460,
    187 	 2 * 1460,
    188 	 3 * 1460,
    189 	 5 * 1460,
    190 	 6 * 1460,
    191 	 7 * 1460,
    192 	 8 * 1460,
    193 	 9 * 1460,
    194 	10 * 1460
    195 };
    196 int	tcp_init_win = TCP_INIT_WIN;
    197 int	tcp_init_win_local = TCP_INIT_WIN_LOCAL;
    198 int	tcp_mss_ifmtu = 0;
    199 int	tcp_rst_ppslim = 100;	/* 100pps */
    200 int	tcp_ackdrop_ppslim = 100;	/* 100pps */
    201 int	tcp_do_loopback_cksum = 0;
    202 int	tcp_do_abc = 1;		/* RFC3465 Appropriate byte counting. */
    203 int	tcp_abc_aggressive = 1;	/* 1: L=2*SMSS  0: L=1*SMSS */
    204 int	tcp_sack_tp_maxholes = 32;
    205 int	tcp_sack_globalmaxholes = 1024;
    206 int	tcp_sack_globalholes = 0;
    207 int	tcp_ecn_maxretries = 1;
    208 int	tcp_msl_enable = 1;		/* enable TIME_WAIT truncation	*/
    209 int	tcp_msl_loop   = PR_SLOWHZ;	/* MSL for loopback		*/
    210 int	tcp_msl_local  = 5 * PR_SLOWHZ;	/* MSL for 'local'		*/
    211 int	tcp_msl_remote = TCPTV_MSL;	/* MSL otherwise		*/
    212 int	tcp_msl_remote_threshold = TCPTV_SRTTDFLT;	/* RTT threshold */
    213 int	tcp_rttlocal = 0;		/* Use RTT to decide who's 'local' */
    214 
    215 int	tcp4_vtw_enable = 0;		/* 1 to enable */
    216 int	tcp6_vtw_enable = 0;		/* 1 to enable */
    217 int	tcp_vtw_was_enabled = 0;
    218 int	tcp_vtw_entries = 1 << 4;	/* 16 vestigial TIME_WAIT entries */
    219 
    220 /* tcb hash */
    221 #ifndef TCBHASHSIZE
    222 #define	TCBHASHSIZE	128
    223 #endif
    224 int	tcbhashsize = TCBHASHSIZE;
    225 
    226 int	tcp_freeq(struct tcpcb *);
    227 static int	tcp_iss_secret_init(void);
    228 
    229 static void	tcp_mtudisc_callback(struct in_addr);
    230 
    231 #ifdef INET6
    232 static void	tcp6_mtudisc(struct inpcb *, int);
    233 #endif
    234 
    235 static struct pool tcpcb_pool;
    236 
    237 static int tcp_drainwanted;
    238 
    239 #ifdef TCP_CSUM_COUNTERS
    240 #include <sys/device.h>
    241 
    242 struct evcnt tcp_hwcsum_bad = EVCNT_INITIALIZER(EVCNT_TYPE_MISC,
    243     NULL, "tcp", "hwcsum bad");
    244 struct evcnt tcp_hwcsum_ok = EVCNT_INITIALIZER(EVCNT_TYPE_MISC,
    245     NULL, "tcp", "hwcsum ok");
    246 struct evcnt tcp_hwcsum_data = EVCNT_INITIALIZER(EVCNT_TYPE_MISC,
    247     NULL, "tcp", "hwcsum data");
    248 struct evcnt tcp_swcsum = EVCNT_INITIALIZER(EVCNT_TYPE_MISC,
    249     NULL, "tcp", "swcsum");
    250 
    251 EVCNT_ATTACH_STATIC(tcp_hwcsum_bad);
    252 EVCNT_ATTACH_STATIC(tcp_hwcsum_ok);
    253 EVCNT_ATTACH_STATIC(tcp_hwcsum_data);
    254 EVCNT_ATTACH_STATIC(tcp_swcsum);
    255 
    256 #if defined(INET6)
    257 struct evcnt tcp6_hwcsum_bad = EVCNT_INITIALIZER(EVCNT_TYPE_MISC,
    258     NULL, "tcp6", "hwcsum bad");
    259 struct evcnt tcp6_hwcsum_ok = EVCNT_INITIALIZER(EVCNT_TYPE_MISC,
    260     NULL, "tcp6", "hwcsum ok");
    261 struct evcnt tcp6_hwcsum_data = EVCNT_INITIALIZER(EVCNT_TYPE_MISC,
    262     NULL, "tcp6", "hwcsum data");
    263 struct evcnt tcp6_swcsum = EVCNT_INITIALIZER(EVCNT_TYPE_MISC,
    264     NULL, "tcp6", "swcsum");
    265 
    266 EVCNT_ATTACH_STATIC(tcp6_hwcsum_bad);
    267 EVCNT_ATTACH_STATIC(tcp6_hwcsum_ok);
    268 EVCNT_ATTACH_STATIC(tcp6_hwcsum_data);
    269 EVCNT_ATTACH_STATIC(tcp6_swcsum);
    270 #endif /* defined(INET6) */
    271 #endif /* TCP_CSUM_COUNTERS */
    272 
    273 
    274 #ifdef TCP_OUTPUT_COUNTERS
    275 #include <sys/device.h>
    276 
    277 struct evcnt tcp_output_bigheader = EVCNT_INITIALIZER(EVCNT_TYPE_MISC,
    278     NULL, "tcp", "output big header");
    279 struct evcnt tcp_output_predict_hit = EVCNT_INITIALIZER(EVCNT_TYPE_MISC,
    280     NULL, "tcp", "output predict hit");
    281 struct evcnt tcp_output_predict_miss = EVCNT_INITIALIZER(EVCNT_TYPE_MISC,
    282     NULL, "tcp", "output predict miss");
    283 struct evcnt tcp_output_copysmall = EVCNT_INITIALIZER(EVCNT_TYPE_MISC,
    284     NULL, "tcp", "output copy small");
    285 struct evcnt tcp_output_copybig = EVCNT_INITIALIZER(EVCNT_TYPE_MISC,
    286     NULL, "tcp", "output copy big");
    287 struct evcnt tcp_output_refbig = EVCNT_INITIALIZER(EVCNT_TYPE_MISC,
    288     NULL, "tcp", "output reference big");
    289 
    290 EVCNT_ATTACH_STATIC(tcp_output_bigheader);
    291 EVCNT_ATTACH_STATIC(tcp_output_predict_hit);
    292 EVCNT_ATTACH_STATIC(tcp_output_predict_miss);
    293 EVCNT_ATTACH_STATIC(tcp_output_copysmall);
    294 EVCNT_ATTACH_STATIC(tcp_output_copybig);
    295 EVCNT_ATTACH_STATIC(tcp_output_refbig);
    296 
    297 #endif /* TCP_OUTPUT_COUNTERS */
    298 
    299 #ifdef TCP_REASS_COUNTERS
    300 #include <sys/device.h>
    301 
    302 struct evcnt tcp_reass_ = EVCNT_INITIALIZER(EVCNT_TYPE_MISC,
    303     NULL, "tcp_reass", "calls");
    304 struct evcnt tcp_reass_empty = EVCNT_INITIALIZER(EVCNT_TYPE_MISC,
    305     &tcp_reass_, "tcp_reass", "insert into empty queue");
    306 struct evcnt tcp_reass_iteration[8] = {
    307     EVCNT_INITIALIZER(EVCNT_TYPE_MISC, &tcp_reass_, "tcp_reass", ">7 iterations"),
    308     EVCNT_INITIALIZER(EVCNT_TYPE_MISC, &tcp_reass_, "tcp_reass", "1 iteration"),
    309     EVCNT_INITIALIZER(EVCNT_TYPE_MISC, &tcp_reass_, "tcp_reass", "2 iterations"),
    310     EVCNT_INITIALIZER(EVCNT_TYPE_MISC, &tcp_reass_, "tcp_reass", "3 iterations"),
    311     EVCNT_INITIALIZER(EVCNT_TYPE_MISC, &tcp_reass_, "tcp_reass", "4 iterations"),
    312     EVCNT_INITIALIZER(EVCNT_TYPE_MISC, &tcp_reass_, "tcp_reass", "5 iterations"),
    313     EVCNT_INITIALIZER(EVCNT_TYPE_MISC, &tcp_reass_, "tcp_reass", "6 iterations"),
    314     EVCNT_INITIALIZER(EVCNT_TYPE_MISC, &tcp_reass_, "tcp_reass", "7 iterations"),
    315 };
    316 struct evcnt tcp_reass_prependfirst = EVCNT_INITIALIZER(EVCNT_TYPE_MISC,
    317     &tcp_reass_, "tcp_reass", "prepend to first");
    318 struct evcnt tcp_reass_prepend = EVCNT_INITIALIZER(EVCNT_TYPE_MISC,
    319     &tcp_reass_, "tcp_reass", "prepend");
    320 struct evcnt tcp_reass_insert = EVCNT_INITIALIZER(EVCNT_TYPE_MISC,
    321     &tcp_reass_, "tcp_reass", "insert");
    322 struct evcnt tcp_reass_inserttail = EVCNT_INITIALIZER(EVCNT_TYPE_MISC,
    323     &tcp_reass_, "tcp_reass", "insert at tail");
    324 struct evcnt tcp_reass_append = EVCNT_INITIALIZER(EVCNT_TYPE_MISC,
    325     &tcp_reass_, "tcp_reass", "append");
    326 struct evcnt tcp_reass_appendtail = EVCNT_INITIALIZER(EVCNT_TYPE_MISC,
    327     &tcp_reass_, "tcp_reass", "append to tail fragment");
    328 struct evcnt tcp_reass_overlaptail = EVCNT_INITIALIZER(EVCNT_TYPE_MISC,
    329     &tcp_reass_, "tcp_reass", "overlap at end");
    330 struct evcnt tcp_reass_overlapfront = EVCNT_INITIALIZER(EVCNT_TYPE_MISC,
    331     &tcp_reass_, "tcp_reass", "overlap at start");
    332 struct evcnt tcp_reass_segdup = EVCNT_INITIALIZER(EVCNT_TYPE_MISC,
    333     &tcp_reass_, "tcp_reass", "duplicate segment");
    334 struct evcnt tcp_reass_fragdup = EVCNT_INITIALIZER(EVCNT_TYPE_MISC,
    335     &tcp_reass_, "tcp_reass", "duplicate fragment");
    336 
    337 EVCNT_ATTACH_STATIC(tcp_reass_);
    338 EVCNT_ATTACH_STATIC(tcp_reass_empty);
    339 EVCNT_ATTACH_STATIC2(tcp_reass_iteration, 0);
    340 EVCNT_ATTACH_STATIC2(tcp_reass_iteration, 1);
    341 EVCNT_ATTACH_STATIC2(tcp_reass_iteration, 2);
    342 EVCNT_ATTACH_STATIC2(tcp_reass_iteration, 3);
    343 EVCNT_ATTACH_STATIC2(tcp_reass_iteration, 4);
    344 EVCNT_ATTACH_STATIC2(tcp_reass_iteration, 5);
    345 EVCNT_ATTACH_STATIC2(tcp_reass_iteration, 6);
    346 EVCNT_ATTACH_STATIC2(tcp_reass_iteration, 7);
    347 EVCNT_ATTACH_STATIC(tcp_reass_prependfirst);
    348 EVCNT_ATTACH_STATIC(tcp_reass_prepend);
    349 EVCNT_ATTACH_STATIC(tcp_reass_insert);
    350 EVCNT_ATTACH_STATIC(tcp_reass_inserttail);
    351 EVCNT_ATTACH_STATIC(tcp_reass_append);
    352 EVCNT_ATTACH_STATIC(tcp_reass_appendtail);
    353 EVCNT_ATTACH_STATIC(tcp_reass_overlaptail);
    354 EVCNT_ATTACH_STATIC(tcp_reass_overlapfront);
    355 EVCNT_ATTACH_STATIC(tcp_reass_segdup);
    356 EVCNT_ATTACH_STATIC(tcp_reass_fragdup);
    357 
    358 #endif /* TCP_REASS_COUNTERS */
    359 
    360 #ifdef MBUFTRACE
    361 struct mowner tcp_mowner = MOWNER_INIT("tcp", "");
    362 struct mowner tcp_rx_mowner = MOWNER_INIT("tcp", "rx");
    363 struct mowner tcp_tx_mowner = MOWNER_INIT("tcp", "tx");
    364 struct mowner tcp_sock_mowner = MOWNER_INIT("tcp", "sock");
    365 struct mowner tcp_sock_rx_mowner = MOWNER_INIT("tcp", "sock rx");
    366 struct mowner tcp_sock_tx_mowner = MOWNER_INIT("tcp", "sock tx");
    367 #endif
    368 
    369 static int
    370 do_tcpinit(void)
    371 {
    372 
    373 	inpcb_init(&tcbtable, tcbhashsize, tcbhashsize);
    374 	pool_init(&tcpcb_pool, sizeof(struct tcpcb), 0, 0, 0, "tcpcbpl",
    375 	    NULL, IPL_SOFTNET);
    376 
    377 	tcp_usrreq_init();
    378 
    379 	/* Initialize timer state. */
    380 	tcp_timer_init();
    381 
    382 	/* Initialize the compressed state engine. */
    383 	syn_cache_init();
    384 
    385 	/* Initialize the congestion control algorithms. */
    386 	tcp_congctl_init();
    387 
    388 	/* Initialize the TCPCB template. */
    389 	tcp_tcpcb_template();
    390 
    391 	/* Initialize reassembly queue */
    392 	tcpipqent_init();
    393 
    394 	/* SACK */
    395 	tcp_sack_init();
    396 
    397 	MOWNER_ATTACH(&tcp_tx_mowner);
    398 	MOWNER_ATTACH(&tcp_rx_mowner);
    399 	MOWNER_ATTACH(&tcp_reass_mowner);
    400 	MOWNER_ATTACH(&tcp_sock_mowner);
    401 	MOWNER_ATTACH(&tcp_sock_tx_mowner);
    402 	MOWNER_ATTACH(&tcp_sock_rx_mowner);
    403 	MOWNER_ATTACH(&tcp_mowner);
    404 
    405 	tcpstat_percpu = percpu_alloc(sizeof(uint64_t) * TCP_NSTATS);
    406 
    407 	vtw_earlyinit();
    408 
    409 	tcp_slowtimo_init();
    410 
    411 	return 0;
    412 }
    413 
    414 void
    415 tcp_init_common(unsigned basehlen)
    416 {
    417 	static ONCE_DECL(dotcpinit);
    418 	unsigned hlen = basehlen + sizeof(struct tcphdr);
    419 	unsigned oldhlen;
    420 
    421 	if (max_linkhdr + hlen > MHLEN)
    422 		panic("tcp_init");
    423 	while ((oldhlen = max_protohdr) < hlen)
    424 		atomic_cas_uint(&max_protohdr, oldhlen, hlen);
    425 
    426 	RUN_ONCE(&dotcpinit, do_tcpinit);
    427 }
    428 
    429 /*
    430  * Tcp initialization
    431  */
    432 void
    433 tcp_init(void)
    434 {
    435 
    436 	icmp_mtudisc_callback_register(tcp_mtudisc_callback);
    437 
    438 	tcp_init_common(sizeof(struct ip));
    439 }
    440 
    441 /*
    442  * Create template to be used to send tcp packets on a connection.
    443  * Call after host entry created, allocates an mbuf and fills
    444  * in a skeletal tcp/ip header, minimizing the amount of work
    445  * necessary when the connection is used.
    446  */
    447 struct mbuf *
    448 tcp_template(struct tcpcb *tp)
    449 {
    450 	struct inpcb *inp = tp->t_inpcb;
    451 	struct tcphdr *n;
    452 	struct mbuf *m;
    453 	int hlen;
    454 
    455 	switch (tp->t_family) {
    456 	case AF_INET:
    457 		hlen = sizeof(struct ip);
    458 		if (inp->inp_af == AF_INET)
    459 			break;
    460 #ifdef INET6
    461 		if (inp->inp_af == AF_INET6) {
    462 			/* mapped addr case */
    463 			if (IN6_IS_ADDR_V4MAPPED(&in6p_laddr(inp))
    464 			 && IN6_IS_ADDR_V4MAPPED(&in6p_faddr(inp)))
    465 				break;
    466 		}
    467 #endif
    468 		return NULL;	/*EINVAL*/
    469 #ifdef INET6
    470 	case AF_INET6:
    471 		hlen = sizeof(struct ip6_hdr);
    472 		if (inp != NULL) {
    473 			/* more sainty check? */
    474 			break;
    475 		}
    476 		return NULL;	/*EINVAL*/
    477 #endif
    478 	default:
    479 		return NULL;	/*EAFNOSUPPORT*/
    480 	}
    481 
    482 	KASSERT(hlen + sizeof(struct tcphdr) <= MCLBYTES);
    483 
    484 	m = tp->t_template;
    485 	if (m && m->m_len == hlen + sizeof(struct tcphdr)) {
    486 		;
    487 	} else {
    488 		m_freem(m);
    489 		m = tp->t_template = NULL;
    490 		MGETHDR(m, M_DONTWAIT, MT_HEADER);
    491 		if (m && hlen + sizeof(struct tcphdr) > MHLEN) {
    492 			MCLGET(m, M_DONTWAIT);
    493 			if ((m->m_flags & M_EXT) == 0) {
    494 				m_free(m);
    495 				m = NULL;
    496 			}
    497 		}
    498 		if (m == NULL)
    499 			return NULL;
    500 		MCLAIM(m, &tcp_mowner);
    501 		m->m_pkthdr.len = m->m_len = hlen + sizeof(struct tcphdr);
    502 	}
    503 
    504 	memset(mtod(m, void *), 0, m->m_len);
    505 
    506 	n = (struct tcphdr *)(mtod(m, char *) + hlen);
    507 
    508 	switch (tp->t_family) {
    509 	case AF_INET:
    510 	    {
    511 		struct ipovly *ipov;
    512 		mtod(m, struct ip *)->ip_v = 4;
    513 		mtod(m, struct ip *)->ip_hl = hlen >> 2;
    514 		ipov = mtod(m, struct ipovly *);
    515 		ipov->ih_pr = IPPROTO_TCP;
    516 		ipov->ih_len = htons(sizeof(struct tcphdr));
    517 		if (inp->inp_af == AF_INET) {
    518 			ipov->ih_src = in4p_laddr(inp);
    519 			ipov->ih_dst = in4p_faddr(inp);
    520 		}
    521 #ifdef INET6
    522 		else if (inp->inp_af == AF_INET6) {
    523 			/* mapped addr case */
    524 			bcopy(&in6p_laddr(inp).s6_addr32[3], &ipov->ih_src,
    525 				sizeof(ipov->ih_src));
    526 			bcopy(&in6p_faddr(inp).s6_addr32[3], &ipov->ih_dst,
    527 				sizeof(ipov->ih_dst));
    528 		}
    529 #endif
    530 
    531 		/*
    532 		 * Compute the pseudo-header portion of the checksum
    533 		 * now.  We incrementally add in the TCP option and
    534 		 * payload lengths later, and then compute the TCP
    535 		 * checksum right before the packet is sent off onto
    536 		 * the wire.
    537 		 */
    538 		n->th_sum = in_cksum_phdr(ipov->ih_src.s_addr,
    539 		    ipov->ih_dst.s_addr,
    540 		    htons(sizeof(struct tcphdr) + IPPROTO_TCP));
    541 		break;
    542 	    }
    543 #ifdef INET6
    544 	case AF_INET6:
    545 	    {
    546 		struct ip6_hdr *ip6;
    547 		mtod(m, struct ip *)->ip_v = 6;
    548 		ip6 = mtod(m, struct ip6_hdr *);
    549 		ip6->ip6_nxt = IPPROTO_TCP;
    550 		ip6->ip6_plen = htons(sizeof(struct tcphdr));
    551 		ip6->ip6_src = in6p_laddr(inp);
    552 		ip6->ip6_dst = in6p_faddr(inp);
    553 		ip6->ip6_flow = in6p_flowinfo(inp) & IPV6_FLOWINFO_MASK;
    554 		if (ip6_auto_flowlabel) {
    555 			ip6->ip6_flow &= ~IPV6_FLOWLABEL_MASK;
    556 			ip6->ip6_flow |=
    557 			    (htonl(ip6_randomflowlabel()) & IPV6_FLOWLABEL_MASK);
    558 		}
    559 		ip6->ip6_vfc &= ~IPV6_VERSION_MASK;
    560 		ip6->ip6_vfc |= IPV6_VERSION;
    561 
    562 		/*
    563 		 * Compute the pseudo-header portion of the checksum
    564 		 * now.  We incrementally add in the TCP option and
    565 		 * payload lengths later, and then compute the TCP
    566 		 * checksum right before the packet is sent off onto
    567 		 * the wire.
    568 		 */
    569 		n->th_sum = in6_cksum_phdr(&in6p_laddr(inp),
    570 		    &in6p_faddr(inp), htonl(sizeof(struct tcphdr)),
    571 		    htonl(IPPROTO_TCP));
    572 		break;
    573 	    }
    574 #endif
    575 	}
    576 
    577 	n->th_sport = inp->inp_lport;
    578 	n->th_dport = inp->inp_fport;
    579 
    580 	n->th_seq = 0;
    581 	n->th_ack = 0;
    582 	n->th_x2 = 0;
    583 	n->th_off = 5;
    584 	n->th_flags = 0;
    585 	n->th_win = 0;
    586 	n->th_urp = 0;
    587 	return m;
    588 }
    589 
    590 /*
    591  * Send a single message to the TCP at address specified by
    592  * the given TCP/IP header.  If m == 0, then we make a copy
    593  * of the tcpiphdr at ti and send directly to the addressed host.
    594  * This is used to force keep alive messages out using the TCP
    595  * template for a connection tp->t_template.  If flags are given
    596  * then we send a message back to the TCP which originated the
    597  * segment ti, and discard the mbuf containing it and any other
    598  * attached mbufs.
    599  *
    600  * In any case the ack and sequence number of the transmitted
    601  * segment are as specified by the parameters.
    602  */
    603 int
    604 tcp_respond(struct tcpcb *tp, struct mbuf *mtemplate, struct mbuf *m,
    605     struct tcphdr *th0, tcp_seq ack, tcp_seq seq, int flags)
    606 {
    607 	struct route *ro;
    608 	int error, tlen, win = 0;
    609 	int hlen;
    610 	struct ip *ip;
    611 #ifdef INET6
    612 	struct ip6_hdr *ip6;
    613 #endif
    614 	int family;	/* family on packet, not inpcb! */
    615 	struct tcphdr *th;
    616 
    617 	if (tp != NULL && (flags & TH_RST) == 0) {
    618 		KASSERT(tp->t_inpcb != NULL);
    619 
    620 		win = sbspace(&tp->t_inpcb->inp_socket->so_rcv);
    621 	}
    622 
    623 	th = NULL;	/* Quell uninitialized warning */
    624 	ip = NULL;
    625 #ifdef INET6
    626 	ip6 = NULL;
    627 #endif
    628 	if (m == NULL) {
    629 		if (!mtemplate)
    630 			return EINVAL;
    631 
    632 		/* get family information from template */
    633 		switch (mtod(mtemplate, struct ip *)->ip_v) {
    634 		case 4:
    635 			family = AF_INET;
    636 			hlen = sizeof(struct ip);
    637 			break;
    638 #ifdef INET6
    639 		case 6:
    640 			family = AF_INET6;
    641 			hlen = sizeof(struct ip6_hdr);
    642 			break;
    643 #endif
    644 		default:
    645 			return EAFNOSUPPORT;
    646 		}
    647 
    648 		MGETHDR(m, M_DONTWAIT, MT_HEADER);
    649 		if (m) {
    650 			MCLAIM(m, &tcp_tx_mowner);
    651 			MCLGET(m, M_DONTWAIT);
    652 			if ((m->m_flags & M_EXT) == 0) {
    653 				m_free(m);
    654 				m = NULL;
    655 			}
    656 		}
    657 		if (m == NULL)
    658 			return ENOBUFS;
    659 
    660 		tlen = 0;
    661 
    662 		m->m_data += max_linkhdr;
    663 		bcopy(mtod(mtemplate, void *), mtod(m, void *),
    664 			mtemplate->m_len);
    665 		switch (family) {
    666 		case AF_INET:
    667 			ip = mtod(m, struct ip *);
    668 			th = (struct tcphdr *)(ip + 1);
    669 			break;
    670 #ifdef INET6
    671 		case AF_INET6:
    672 			ip6 = mtod(m, struct ip6_hdr *);
    673 			th = (struct tcphdr *)(ip6 + 1);
    674 			break;
    675 #endif
    676 		}
    677 		flags = TH_ACK;
    678 	} else {
    679 		if ((m->m_flags & M_PKTHDR) == 0) {
    680 			m_freem(m);
    681 			return EINVAL;
    682 		}
    683 		KASSERT(th0 != NULL);
    684 
    685 		/* get family information from m */
    686 		switch (mtod(m, struct ip *)->ip_v) {
    687 		case 4:
    688 			family = AF_INET;
    689 			hlen = sizeof(struct ip);
    690 			ip = mtod(m, struct ip *);
    691 			break;
    692 #ifdef INET6
    693 		case 6:
    694 			family = AF_INET6;
    695 			hlen = sizeof(struct ip6_hdr);
    696 			ip6 = mtod(m, struct ip6_hdr *);
    697 			break;
    698 #endif
    699 		default:
    700 			m_freem(m);
    701 			return EAFNOSUPPORT;
    702 		}
    703 		/* clear h/w csum flags inherited from rx packet */
    704 		m->m_pkthdr.csum_flags = 0;
    705 
    706 		if ((flags & TH_SYN) == 0 || sizeof(*th0) > (th0->th_off << 2))
    707 			tlen = sizeof(*th0);
    708 		else
    709 			tlen = th0->th_off << 2;
    710 
    711 		if (m->m_len > hlen + tlen && (m->m_flags & M_EXT) == 0 &&
    712 		    mtod(m, char *) + hlen == (char *)th0) {
    713 			m->m_len = hlen + tlen;
    714 			m_freem(m->m_next);
    715 			m->m_next = NULL;
    716 		} else {
    717 			struct mbuf *n;
    718 
    719 			KASSERT(max_linkhdr + hlen + tlen <= MCLBYTES);
    720 
    721 			MGETHDR(n, M_DONTWAIT, MT_HEADER);
    722 			if (n && max_linkhdr + hlen + tlen > MHLEN) {
    723 				MCLGET(n, M_DONTWAIT);
    724 				if ((n->m_flags & M_EXT) == 0) {
    725 					m_freem(n);
    726 					n = NULL;
    727 				}
    728 			}
    729 			if (!n) {
    730 				m_freem(m);
    731 				return ENOBUFS;
    732 			}
    733 
    734 			MCLAIM(n, &tcp_tx_mowner);
    735 			n->m_data += max_linkhdr;
    736 			n->m_len = hlen + tlen;
    737 			m_copyback(n, 0, hlen, mtod(m, void *));
    738 			m_copyback(n, hlen, tlen, (void *)th0);
    739 
    740 			m_freem(m);
    741 			m = n;
    742 			n = NULL;
    743 		}
    744 
    745 #define xchg(a,b,type) { type t; t=a; a=b; b=t; }
    746 		switch (family) {
    747 		case AF_INET:
    748 			ip = mtod(m, struct ip *);
    749 			th = (struct tcphdr *)(ip + 1);
    750 			ip->ip_p = IPPROTO_TCP;
    751 			xchg(ip->ip_dst, ip->ip_src, struct in_addr);
    752 			ip->ip_p = IPPROTO_TCP;
    753 			break;
    754 #ifdef INET6
    755 		case AF_INET6:
    756 			ip6 = mtod(m, struct ip6_hdr *);
    757 			th = (struct tcphdr *)(ip6 + 1);
    758 			ip6->ip6_nxt = IPPROTO_TCP;
    759 			xchg(ip6->ip6_dst, ip6->ip6_src, struct in6_addr);
    760 			ip6->ip6_nxt = IPPROTO_TCP;
    761 			break;
    762 #endif
    763 		}
    764 		xchg(th->th_dport, th->th_sport, u_int16_t);
    765 #undef xchg
    766 		tlen = 0;	/*be friendly with the following code*/
    767 	}
    768 	th->th_seq = htonl(seq);
    769 	th->th_ack = htonl(ack);
    770 	th->th_x2 = 0;
    771 	if ((flags & TH_SYN) == 0) {
    772 		if (tp)
    773 			win >>= tp->rcv_scale;
    774 		if (win > TCP_MAXWIN)
    775 			win = TCP_MAXWIN;
    776 		th->th_win = htons((u_int16_t)win);
    777 		th->th_off = sizeof (struct tcphdr) >> 2;
    778 		tlen += sizeof(*th);
    779 	} else {
    780 		tlen += th->th_off << 2;
    781 	}
    782 	m->m_len = hlen + tlen;
    783 	m->m_pkthdr.len = hlen + tlen;
    784 	m_reset_rcvif(m);
    785 	th->th_flags = flags;
    786 	th->th_urp = 0;
    787 
    788 	switch (family) {
    789 	case AF_INET:
    790 	    {
    791 		struct ipovly *ipov = (struct ipovly *)ip;
    792 		memset(ipov->ih_x1, 0, sizeof ipov->ih_x1);
    793 		ipov->ih_len = htons((u_int16_t)tlen);
    794 
    795 		th->th_sum = 0;
    796 		th->th_sum = in_cksum(m, hlen + tlen);
    797 		ip->ip_len = htons(hlen + tlen);
    798 		ip->ip_ttl = ip_defttl;
    799 		break;
    800 	    }
    801 #ifdef INET6
    802 	case AF_INET6:
    803 	    {
    804 		th->th_sum = 0;
    805 		th->th_sum = in6_cksum(m, IPPROTO_TCP, sizeof(struct ip6_hdr),
    806 		    tlen);
    807 		ip6->ip6_plen = htons(tlen);
    808 		if (tp && tp->t_inpcb->inp_af == AF_INET6)
    809 			ip6->ip6_hlim = in6pcb_selecthlim_rt(tp->t_inpcb);
    810 		else
    811 			ip6->ip6_hlim = ip6_defhlim;
    812 		ip6->ip6_flow &= ~IPV6_FLOWINFO_MASK;
    813 		if (ip6_auto_flowlabel) {
    814 			ip6->ip6_flow |=
    815 			    (htonl(ip6_randomflowlabel()) & IPV6_FLOWLABEL_MASK);
    816 		}
    817 		break;
    818 	    }
    819 #endif
    820 	}
    821 
    822 	if (tp != NULL && tp->t_inpcb->inp_af == AF_INET) {
    823 		ro = &tp->t_inpcb->inp_route;
    824 		KASSERT(family == AF_INET);
    825 		KASSERT(in_hosteq(ip->ip_dst, in4p_faddr(tp->t_inpcb)));
    826 	}
    827 #ifdef INET6
    828 	else if (tp != NULL && tp->t_inpcb->inp_af == AF_INET6) {
    829 		ro = (struct route *)&tp->t_inpcb->inp_route;
    830 
    831 #ifdef DIAGNOSTIC
    832 		if (family == AF_INET) {
    833 			if (!IN6_IS_ADDR_V4MAPPED(&in6p_faddr(tp->t_inpcb)))
    834 				panic("tcp_respond: not mapped addr");
    835 			if (memcmp(&ip->ip_dst,
    836 			    &in6p_faddr(tp->t_inpcb).s6_addr32[3],
    837 			    sizeof(ip->ip_dst)) != 0) {
    838 				panic("tcp_respond: ip_dst != in6p_faddr");
    839 			}
    840 		} else if (family == AF_INET6) {
    841 			if (!IN6_ARE_ADDR_EQUAL(&ip6->ip6_dst,
    842 			    &in6p_faddr(tp->t_inpcb)))
    843 				panic("tcp_respond: ip6_dst != in6p_faddr");
    844 		} else
    845 			panic("tcp_respond: address family mismatch");
    846 #endif
    847 	}
    848 #endif
    849 	else
    850 		ro = NULL;
    851 
    852 	switch (family) {
    853 	case AF_INET:
    854 		error = ip_output(m, NULL, ro,
    855 		    (tp && tp->t_mtudisc ? IP_MTUDISC : 0), NULL,
    856 		    tp ? tp->t_inpcb : NULL);
    857 		break;
    858 #ifdef INET6
    859 	case AF_INET6:
    860 		error = ip6_output(m, NULL, ro, 0, NULL,
    861 		    tp ? tp->t_inpcb : NULL, NULL);
    862 		break;
    863 #endif
    864 	default:
    865 		error = EAFNOSUPPORT;
    866 		break;
    867 	}
    868 
    869 	return error;
    870 }
    871 
    872 /*
    873  * Template TCPCB.  Rather than zeroing a new TCPCB and initializing
    874  * a bunch of members individually, we maintain this template for the
    875  * static and mostly-static components of the TCPCB, and copy it into
    876  * the new TCPCB instead.
    877  */
    878 static struct tcpcb tcpcb_template = {
    879 	.t_srtt = TCPTV_SRTTBASE,
    880 	.t_rttmin = TCPTV_MIN,
    881 
    882 	.snd_cwnd = TCP_MAXWIN << TCP_MAX_WINSHIFT,
    883 	.snd_ssthresh = TCP_MAXWIN << TCP_MAX_WINSHIFT,
    884 	.snd_numholes = 0,
    885 	.snd_cubic_wmax = 0,
    886 	.snd_cubic_wmax_last = 0,
    887 	.snd_cubic_ctime = 0,
    888 
    889 	.t_partialacks = -1,
    890 	.t_bytes_acked = 0,
    891 	.t_sndrexmitpack = 0,
    892 	.t_rcvoopack = 0,
    893 	.t_sndzerowin = 0,
    894 };
    895 
    896 /*
    897  * Updates the TCPCB template whenever a parameter that would affect
    898  * the template is changed.
    899  */
    900 void
    901 tcp_tcpcb_template(void)
    902 {
    903 	struct tcpcb *tp = &tcpcb_template;
    904 	int flags;
    905 
    906 	tp->t_peermss = tcp_mssdflt;
    907 	tp->t_ourmss = tcp_mssdflt;
    908 	tp->t_segsz = tcp_mssdflt;
    909 
    910 	flags = 0;
    911 	if (tcp_do_rfc1323 && tcp_do_win_scale)
    912 		flags |= TF_REQ_SCALE;
    913 	if (tcp_do_rfc1323 && tcp_do_timestamps)
    914 		flags |= TF_REQ_TSTMP;
    915 	tp->t_flags = flags;
    916 
    917 	/*
    918 	 * Init srtt to TCPTV_SRTTBASE (0), so we can tell that we have no
    919 	 * rtt estimate.  Set rttvar so that srtt + 2 * rttvar gives
    920 	 * reasonable initial retransmit time.
    921 	 */
    922 	tp->t_rttvar = tcp_rttdflt * PR_SLOWHZ << (TCP_RTTVAR_SHIFT + 2 - 1);
    923 	TCPT_RANGESET(tp->t_rxtcur, TCP_REXMTVAL(tp),
    924 	    TCPTV_MIN, TCPTV_REXMTMAX);
    925 
    926 	/* Keep Alive */
    927 	tp->t_keepinit = MIN(tcp_keepinit, TCP_TIMER_MAXTICKS);
    928 	tp->t_keepidle = MIN(tcp_keepidle, TCP_TIMER_MAXTICKS);
    929 	tp->t_keepintvl = MIN(tcp_keepintvl, TCP_TIMER_MAXTICKS);
    930 	tp->t_keepcnt = MAX(1, MIN(tcp_keepcnt, TCP_TIMER_MAXTICKS));
    931 	tp->t_maxidle = tp->t_keepcnt * MIN(tp->t_keepintvl,
    932 	    TCP_TIMER_MAXTICKS/tp->t_keepcnt);
    933 
    934 	/* MSL */
    935 	tp->t_msl = TCPTV_MSL;
    936 }
    937 
    938 /*
    939  * Create a new TCP control block, making an
    940  * empty reassembly queue and hooking it to the argument
    941  * protocol control block.
    942  */
    943 struct tcpcb *
    944 tcp_newtcpcb(int family, struct inpcb *inp)
    945 {
    946 	struct tcpcb *tp;
    947 	int i;
    948 
    949 	/* XXX Consider using a pool_cache for speed. */
    950 	tp = pool_get(&tcpcb_pool, PR_NOWAIT);	/* splsoftnet via tcp_usrreq */
    951 	if (tp == NULL)
    952 		return NULL;
    953 	memcpy(tp, &tcpcb_template, sizeof(*tp));
    954 	TAILQ_INIT(&tp->segq);
    955 	TAILQ_INIT(&tp->timeq);
    956 	tp->t_family = family;		/* may be overridden later on */
    957 	TAILQ_INIT(&tp->snd_holes);
    958 	LIST_INIT(&tp->t_sc);		/* XXX can template this */
    959 
    960 	/* Don't sweat this loop; hopefully the compiler will unroll it. */
    961 	for (i = 0; i < TCPT_NTIMERS; i++) {
    962 		callout_init(&tp->t_timer[i], CALLOUT_MPSAFE);
    963 		TCP_TIMER_INIT(tp, i);
    964 	}
    965 	callout_init(&tp->t_delack_ch, CALLOUT_MPSAFE);
    966 
    967 	switch (family) {
    968 	case AF_INET:
    969 		in4p_ip(inp).ip_ttl = ip_defttl;
    970 		inp->inp_ppcb = (void *)tp;
    971 
    972 		tp->t_inpcb = inp;
    973 		tp->t_mtudisc = ip_mtudisc;
    974 		break;
    975 #ifdef INET6
    976 	case AF_INET6:
    977 		in6p_ip6(inp).ip6_hlim = in6pcb_selecthlim_rt(inp);
    978 		inp->inp_ppcb = (void *)tp;
    979 
    980 		tp->t_inpcb = inp;
    981 		/* for IPv6, always try to run path MTU discovery */
    982 		tp->t_mtudisc = 1;
    983 		break;
    984 #endif /* INET6 */
    985 	default:
    986 		for (i = 0; i < TCPT_NTIMERS; i++)
    987 			callout_destroy(&tp->t_timer[i]);
    988 		callout_destroy(&tp->t_delack_ch);
    989 		pool_put(&tcpcb_pool, tp);	/* splsoftnet via tcp_usrreq */
    990 		return NULL;
    991 	}
    992 
    993 	/*
    994 	 * Initialize our timebase.  When we send timestamps, we take
    995 	 * the delta from tcp_now -- this means each connection always
    996 	 * gets a timebase of 1, which makes it, among other things,
    997 	 * more difficult to determine how long a system has been up,
    998 	 * and thus how many TCP sequence increments have occurred.
    999 	 *
   1000 	 * We start with 1, because 0 doesn't work with linux, which
   1001 	 * considers timestamp 0 in a SYN packet as a bug and disables
   1002 	 * timestamps.
   1003 	 */
   1004 	tp->ts_timebase = tcp_now - 1;
   1005 
   1006 	tcp_congctl_select(tp, tcp_congctl_global_name);
   1007 
   1008 	return tp;
   1009 }
   1010 
   1011 /*
   1012  * Drop a TCP connection, reporting
   1013  * the specified error.  If connection is synchronized,
   1014  * then send a RST to peer.
   1015  */
   1016 struct tcpcb *
   1017 tcp_drop(struct tcpcb *tp, int errno)
   1018 {
   1019 	struct socket *so;
   1020 
   1021 	KASSERT(tp->t_inpcb != NULL);
   1022 
   1023 	so = tp->t_inpcb->inp_socket;
   1024 	if (so == NULL)
   1025 		return NULL;
   1026 
   1027 	if (TCPS_HAVERCVDSYN(tp->t_state)) {
   1028 		tp->t_state = TCPS_CLOSED;
   1029 		(void) tcp_output(tp);
   1030 		TCP_STATINC(TCP_STAT_DROPS);
   1031 	} else
   1032 		TCP_STATINC(TCP_STAT_CONNDROPS);
   1033 	if (errno == ETIMEDOUT && tp->t_softerror)
   1034 		errno = tp->t_softerror;
   1035 	so->so_error = errno;
   1036 	return (tcp_close(tp));
   1037 }
   1038 
   1039 /*
   1040  * Close a TCP control block:
   1041  *	discard all space held by the tcp
   1042  *	discard internet protocol block
   1043  *	wake up any sleepers
   1044  */
   1045 struct tcpcb *
   1046 tcp_close(struct tcpcb *tp)
   1047 {
   1048 	struct inpcb *inp;
   1049 	struct socket *so;
   1050 #ifdef RTV_RTT
   1051 	struct rtentry *rt = NULL;
   1052 #endif
   1053 	struct route *ro;
   1054 	int j;
   1055 
   1056 	inp = tp->t_inpcb;
   1057 	so = inp->inp_socket;
   1058 	ro = &inp->inp_route;
   1059 
   1060 #ifdef RTV_RTT
   1061 	/*
   1062 	 * If we sent enough data to get some meaningful characteristics,
   1063 	 * save them in the routing entry.  'Enough' is arbitrarily
   1064 	 * defined as the sendpipesize (default 4K) * 16.  This would
   1065 	 * give us 16 rtt samples assuming we only get one sample per
   1066 	 * window (the usual case on a long haul net).  16 samples is
   1067 	 * enough for the srtt filter to converge to within 5% of the correct
   1068 	 * value; fewer samples and we could save a very bogus rtt.
   1069 	 *
   1070 	 * Don't update the default route's characteristics and don't
   1071 	 * update anything that the user "locked".
   1072 	 */
   1073 	if (SEQ_LT(tp->iss + so->so_snd.sb_hiwat * 16, tp->snd_max) &&
   1074 	    ro && (rt = rtcache_validate(ro)) != NULL &&
   1075 	    !in_nullhost(satocsin(rt_getkey(rt))->sin_addr)) {
   1076 		u_long i = 0;
   1077 
   1078 		if ((rt->rt_rmx.rmx_locks & RTV_RTT) == 0) {
   1079 			i = tp->t_srtt *
   1080 			    ((RTM_RTTUNIT / PR_SLOWHZ) >> (TCP_RTT_SHIFT + 2));
   1081 			if (rt->rt_rmx.rmx_rtt && i)
   1082 				/*
   1083 				 * filter this update to half the old & half
   1084 				 * the new values, converting scale.
   1085 				 * See route.h and tcp_var.h for a
   1086 				 * description of the scaling constants.
   1087 				 */
   1088 				rt->rt_rmx.rmx_rtt =
   1089 				    (rt->rt_rmx.rmx_rtt + i) / 2;
   1090 			else
   1091 				rt->rt_rmx.rmx_rtt = i;
   1092 		}
   1093 		if ((rt->rt_rmx.rmx_locks & RTV_RTTVAR) == 0) {
   1094 			i = tp->t_rttvar *
   1095 			    ((RTM_RTTUNIT / PR_SLOWHZ) >> (TCP_RTTVAR_SHIFT + 2));
   1096 			if (rt->rt_rmx.rmx_rttvar && i)
   1097 				rt->rt_rmx.rmx_rttvar =
   1098 				    (rt->rt_rmx.rmx_rttvar + i) / 2;
   1099 			else
   1100 				rt->rt_rmx.rmx_rttvar = i;
   1101 		}
   1102 		/*
   1103 		 * update the pipelimit (ssthresh) if it has been updated
   1104 		 * already or if a pipesize was specified & the threshold
   1105 		 * got below half the pipesize.  I.e., wait for bad news
   1106 		 * before we start updating, then update on both good
   1107 		 * and bad news.
   1108 		 */
   1109 		if (((rt->rt_rmx.rmx_locks & RTV_SSTHRESH) == 0 &&
   1110 		    (i = tp->snd_ssthresh) && rt->rt_rmx.rmx_ssthresh) ||
   1111 		    i < (rt->rt_rmx.rmx_sendpipe / 2)) {
   1112 			/*
   1113 			 * convert the limit from user data bytes to
   1114 			 * packets then to packet data bytes.
   1115 			 */
   1116 			i = (i + tp->t_segsz / 2) / tp->t_segsz;
   1117 			if (i < 2)
   1118 				i = 2;
   1119 			i *= (u_long)(tp->t_segsz + sizeof (struct tcpiphdr));
   1120 			if (rt->rt_rmx.rmx_ssthresh)
   1121 				rt->rt_rmx.rmx_ssthresh =
   1122 				    (rt->rt_rmx.rmx_ssthresh + i) / 2;
   1123 			else
   1124 				rt->rt_rmx.rmx_ssthresh = i;
   1125 		}
   1126 	}
   1127 	rtcache_unref(rt, ro);
   1128 #endif /* RTV_RTT */
   1129 	/* free the reassembly queue, if any */
   1130 	TCP_REASS_LOCK(tp);
   1131 	(void) tcp_freeq(tp);
   1132 	TCP_REASS_UNLOCK(tp);
   1133 
   1134 	/* free the SACK holes list. */
   1135 	tcp_free_sackholes(tp);
   1136 	tcp_congctl_release(tp);
   1137 	syn_cache_cleanup(tp);
   1138 
   1139 	if (tp->t_template) {
   1140 		m_free(tp->t_template);
   1141 		tp->t_template = NULL;
   1142 	}
   1143 
   1144 	/*
   1145 	 * Detaching the pcb will unlock the socket/tcpcb, and stopping
   1146 	 * the timers can also drop the lock.  We need to prevent access
   1147 	 * to the tcpcb as it's half torn down.  Flag the pcb as dead
   1148 	 * (prevents access by timers) and only then detach it.
   1149 	 */
   1150 	tp->t_flags |= TF_DEAD;
   1151 	inp->inp_ppcb = NULL;
   1152 	soisdisconnected(so);
   1153 	inpcb_destroy(inp);
   1154 	/*
   1155 	 * pcb is no longer visble elsewhere, so we can safely release
   1156 	 * the lock in callout_halt() if needed.
   1157 	 */
   1158 	TCP_STATINC(TCP_STAT_CLOSED);
   1159 	for (j = 0; j < TCPT_NTIMERS; j++) {
   1160 		callout_halt(&tp->t_timer[j], softnet_lock);
   1161 		callout_destroy(&tp->t_timer[j]);
   1162 	}
   1163 	callout_halt(&tp->t_delack_ch, softnet_lock);
   1164 	callout_destroy(&tp->t_delack_ch);
   1165 	pool_put(&tcpcb_pool, tp);
   1166 
   1167 	return NULL;
   1168 }
   1169 
   1170 int
   1171 tcp_freeq(struct tcpcb *tp)
   1172 {
   1173 	struct ipqent *qe;
   1174 	int rv = 0;
   1175 
   1176 	TCP_REASS_LOCK_CHECK(tp);
   1177 
   1178 	while ((qe = TAILQ_FIRST(&tp->segq)) != NULL) {
   1179 		TAILQ_REMOVE(&tp->segq, qe, ipqe_q);
   1180 		TAILQ_REMOVE(&tp->timeq, qe, ipqe_timeq);
   1181 		m_freem(qe->ipqe_m);
   1182 		tcpipqent_free(qe);
   1183 		rv = 1;
   1184 	}
   1185 	tp->t_segqlen = 0;
   1186 	KASSERT(TAILQ_EMPTY(&tp->timeq));
   1187 	return (rv);
   1188 }
   1189 
   1190 void
   1191 tcp_fasttimo(void)
   1192 {
   1193 	if (tcp_drainwanted) {
   1194 		tcp_drain();
   1195 		tcp_drainwanted = 0;
   1196 	}
   1197 }
   1198 
   1199 void
   1200 tcp_drainstub(void)
   1201 {
   1202 	tcp_drainwanted = 1;
   1203 }
   1204 
   1205 /*
   1206  * Protocol drain routine.  Called when memory is in short supply.
   1207  * Called from pr_fasttimo thus a callout context.
   1208  */
   1209 void
   1210 tcp_drain(void)
   1211 {
   1212 	struct inpcb *inp;
   1213 	struct tcpcb *tp;
   1214 
   1215 	mutex_enter(softnet_lock);
   1216 	KERNEL_LOCK(1, NULL);
   1217 
   1218 	/*
   1219 	 * Free the sequence queue of all TCP connections.
   1220 	 */
   1221 	TAILQ_FOREACH(inp, &tcbtable.inpt_queue, inp_queue) {
   1222 		tp = intotcpcb(inp);
   1223 		if (tp != NULL) {
   1224 			/*
   1225 			 * If the tcpcb is already busy,
   1226 			 * just bail out now.
   1227 			 */
   1228 			if (tcp_reass_lock_try(tp) == 0)
   1229 				continue;
   1230 			if (tcp_freeq(tp))
   1231 				TCP_STATINC(TCP_STAT_CONNSDRAINED);
   1232 			TCP_REASS_UNLOCK(tp);
   1233 		}
   1234 	}
   1235 
   1236 	KERNEL_UNLOCK_ONE(NULL);
   1237 	mutex_exit(softnet_lock);
   1238 }
   1239 
   1240 /*
   1241  * Notify a tcp user of an asynchronous error;
   1242  * store error as soft error, but wake up user
   1243  * (for now, won't do anything until can select for soft error).
   1244  */
   1245 void
   1246 tcp_notify(struct inpcb *inp, int error)
   1247 {
   1248 	struct tcpcb *tp = (struct tcpcb *)inp->inp_ppcb;
   1249 	struct socket *so = inp->inp_socket;
   1250 
   1251 	/*
   1252 	 * Ignore some errors if we are hooked up.
   1253 	 * If connection hasn't completed, has retransmitted several times,
   1254 	 * and receives a second error, give up now.  This is better
   1255 	 * than waiting a long time to establish a connection that
   1256 	 * can never complete.
   1257 	 */
   1258 	if (tp->t_state == TCPS_ESTABLISHED &&
   1259 	     (error == EHOSTUNREACH || error == ENETUNREACH ||
   1260 	      error == EHOSTDOWN)) {
   1261 		return;
   1262 	} else if (TCPS_HAVEESTABLISHED(tp->t_state) == 0 &&
   1263 	    tp->t_rxtshift > 3 && tp->t_softerror)
   1264 		so->so_error = error;
   1265 	else
   1266 		tp->t_softerror = error;
   1267 	cv_broadcast(&so->so_cv);
   1268 	sorwakeup(so);
   1269 	sowwakeup(so);
   1270 }
   1271 
   1272 #ifdef INET6
   1273 void *
   1274 tcp6_ctlinput(int cmd, const struct sockaddr *sa, void *d)
   1275 {
   1276 	struct tcphdr th;
   1277 	void (*notify)(struct inpcb *, int) = tcp_notify;
   1278 	int nmatch;
   1279 	struct ip6_hdr *ip6;
   1280 	const struct sockaddr_in6 *sa6_src = NULL;
   1281 	const struct sockaddr_in6 *sa6 = (const struct sockaddr_in6 *)sa;
   1282 	struct mbuf *m;
   1283 	int off;
   1284 
   1285 	if (sa->sa_family != AF_INET6 ||
   1286 	    sa->sa_len != sizeof(struct sockaddr_in6))
   1287 		return NULL;
   1288 	if ((unsigned)cmd >= PRC_NCMDS)
   1289 		return NULL;
   1290 	else if (cmd == PRC_QUENCH) {
   1291 		/*
   1292 		 * Don't honor ICMP Source Quench messages meant for
   1293 		 * TCP connections.
   1294 		 */
   1295 		return NULL;
   1296 	} else if (PRC_IS_REDIRECT(cmd))
   1297 		notify = in6pcb_rtchange, d = NULL;
   1298 	else if (cmd == PRC_MSGSIZE)
   1299 		; /* special code is present, see below */
   1300 	else if (cmd == PRC_HOSTDEAD)
   1301 		d = NULL;
   1302 	else if (inet6ctlerrmap[cmd] == 0)
   1303 		return NULL;
   1304 
   1305 	/* if the parameter is from icmp6, decode it. */
   1306 	if (d != NULL) {
   1307 		struct ip6ctlparam *ip6cp = (struct ip6ctlparam *)d;
   1308 		m = ip6cp->ip6c_m;
   1309 		ip6 = ip6cp->ip6c_ip6;
   1310 		off = ip6cp->ip6c_off;
   1311 		sa6_src = ip6cp->ip6c_src;
   1312 	} else {
   1313 		m = NULL;
   1314 		ip6 = NULL;
   1315 		sa6_src = &sa6_any;
   1316 		off = 0;
   1317 	}
   1318 
   1319 	if (ip6) {
   1320 		/* check if we can safely examine src and dst ports */
   1321 		if (m->m_pkthdr.len < off + sizeof(th)) {
   1322 			if (cmd == PRC_MSGSIZE)
   1323 				icmp6_mtudisc_update((struct ip6ctlparam *)d, 0);
   1324 			return NULL;
   1325 		}
   1326 
   1327 		memset(&th, 0, sizeof(th));
   1328 		m_copydata(m, off, sizeof(th), (void *)&th);
   1329 
   1330 		if (cmd == PRC_MSGSIZE) {
   1331 			int valid = 0;
   1332 
   1333 			/*
   1334 			 * Check to see if we have a valid TCP connection
   1335 			 * corresponding to the address in the ICMPv6 message
   1336 			 * payload.
   1337 			 */
   1338 			if (in6pcb_lookup(&tcbtable, &sa6->sin6_addr,
   1339 			    th.th_dport,
   1340 			    (const struct in6_addr *)&sa6_src->sin6_addr,
   1341 						  th.th_sport, 0, 0))
   1342 				valid++;
   1343 
   1344 			/*
   1345 			 * Depending on the value of "valid" and routing table
   1346 			 * size (mtudisc_{hi,lo}wat), we will:
   1347 			 * - recalculate the new MTU and create the
   1348 			 *   corresponding routing entry, or
   1349 			 * - ignore the MTU change notification.
   1350 			 */
   1351 			icmp6_mtudisc_update((struct ip6ctlparam *)d, valid);
   1352 
   1353 			/*
   1354 			 * no need to call in6pcb_notify, it should have been
   1355 			 * called via callback if necessary
   1356 			 */
   1357 			return NULL;
   1358 		}
   1359 
   1360 		nmatch = in6pcb_notify(&tcbtable, sa, th.th_dport,
   1361 		    (const struct sockaddr *)sa6_src, th.th_sport, cmd, NULL, notify);
   1362 		if (nmatch == 0 && syn_cache_count &&
   1363 		    (inet6ctlerrmap[cmd] == EHOSTUNREACH ||
   1364 		     inet6ctlerrmap[cmd] == ENETUNREACH ||
   1365 		     inet6ctlerrmap[cmd] == EHOSTDOWN))
   1366 			syn_cache_unreach((const struct sockaddr *)sa6_src,
   1367 					  sa, &th);
   1368 	} else {
   1369 		(void) in6pcb_notify(&tcbtable, sa, 0,
   1370 		    (const struct sockaddr *)sa6_src, 0, cmd, NULL, notify);
   1371 	}
   1372 
   1373 	return NULL;
   1374 }
   1375 #endif
   1376 
   1377 /* assumes that ip header and tcp header are contiguous on mbuf */
   1378 void *
   1379 tcp_ctlinput(int cmd, const struct sockaddr *sa, void *v)
   1380 {
   1381 	struct ip *ip = v;
   1382 	struct tcphdr *th;
   1383 	struct icmp *icp;
   1384 	extern const int inetctlerrmap[];
   1385 	void (*notify)(struct inpcb *, int) = tcp_notify;
   1386 	int errno;
   1387 	int nmatch;
   1388 	struct tcpcb *tp;
   1389 	u_int mtu;
   1390 	tcp_seq seq;
   1391 	struct inpcb *inp;
   1392 #ifdef INET6
   1393 	struct in6_addr src6, dst6;
   1394 #endif
   1395 
   1396 	if (sa->sa_family != AF_INET ||
   1397 	    sa->sa_len != sizeof(struct sockaddr_in))
   1398 		return NULL;
   1399 	if ((unsigned)cmd >= PRC_NCMDS)
   1400 		return NULL;
   1401 	errno = inetctlerrmap[cmd];
   1402 	if (cmd == PRC_QUENCH)
   1403 		/*
   1404 		 * Don't honor ICMP Source Quench messages meant for
   1405 		 * TCP connections.
   1406 		 */
   1407 		return NULL;
   1408 	else if (PRC_IS_REDIRECT(cmd))
   1409 		notify = inpcb_rtchange, ip = 0;
   1410 	else if (cmd == PRC_MSGSIZE && ip && ip->ip_v == 4) {
   1411 		/*
   1412 		 * Check to see if we have a valid TCP connection
   1413 		 * corresponding to the address in the ICMP message
   1414 		 * payload.
   1415 		 *
   1416 		 * Boundary check is made in icmp_input(), with ICMP_ADVLENMIN.
   1417 		 */
   1418 		th = (struct tcphdr *)((char *)ip + (ip->ip_hl << 2));
   1419 #ifdef INET6
   1420 		in6_in_2_v4mapin6(&ip->ip_src, &src6);
   1421 		in6_in_2_v4mapin6(&ip->ip_dst, &dst6);
   1422 #endif
   1423 		if ((inp = inpcb_lookup(&tcbtable, ip->ip_dst,
   1424 		    th->th_dport, ip->ip_src, th->th_sport, 0)) != NULL)
   1425 			;
   1426 #ifdef INET6
   1427 		else if ((inp = in6pcb_lookup(&tcbtable, &dst6,
   1428 		    th->th_dport, &src6, th->th_sport, 0, 0)) != NULL)
   1429 			;
   1430 #endif
   1431 		else
   1432 			return NULL;
   1433 
   1434 		/*
   1435 		 * Now that we've validated that we are actually communicating
   1436 		 * with the host indicated in the ICMP message, locate the
   1437 		 * ICMP header, recalculate the new MTU, and create the
   1438 		 * corresponding routing entry.
   1439 		 */
   1440 		icp = (struct icmp *)((char *)ip -
   1441 		    offsetof(struct icmp, icmp_ip));
   1442 		tp = intotcpcb(inp);
   1443 		if (tp == NULL)
   1444 			return NULL;
   1445 		seq = ntohl(th->th_seq);
   1446 		if (SEQ_LT(seq, tp->snd_una) || SEQ_GT(seq, tp->snd_max))
   1447 			return NULL;
   1448 		/*
   1449 		 * If the ICMP message advertises a Next-Hop MTU
   1450 		 * equal or larger than the maximum packet size we have
   1451 		 * ever sent, drop the message.
   1452 		 */
   1453 		mtu = (u_int)ntohs(icp->icmp_nextmtu);
   1454 		if (mtu >= tp->t_pmtud_mtu_sent)
   1455 			return NULL;
   1456 		if (mtu >= tcp_hdrsz(tp) + tp->t_pmtud_mss_acked) {
   1457 			/*
   1458 			 * Calculate new MTU, and create corresponding
   1459 			 * route (traditional PMTUD).
   1460 			 */
   1461 			tp->t_flags &= ~TF_PMTUD_PEND;
   1462 			icmp_mtudisc(icp, ip->ip_dst);
   1463 		} else {
   1464 			/*
   1465 			 * Record the information got in the ICMP
   1466 			 * message; act on it later.
   1467 			 * If we had already recorded an ICMP message,
   1468 			 * replace the old one only if the new message
   1469 			 * refers to an older TCP segment
   1470 			 */
   1471 			if (tp->t_flags & TF_PMTUD_PEND) {
   1472 				if (SEQ_LT(tp->t_pmtud_th_seq, seq))
   1473 					return NULL;
   1474 			} else
   1475 				tp->t_flags |= TF_PMTUD_PEND;
   1476 			tp->t_pmtud_th_seq = seq;
   1477 			tp->t_pmtud_nextmtu = icp->icmp_nextmtu;
   1478 			tp->t_pmtud_ip_len = icp->icmp_ip.ip_len;
   1479 			tp->t_pmtud_ip_hl = icp->icmp_ip.ip_hl;
   1480 		}
   1481 		return NULL;
   1482 	} else if (cmd == PRC_HOSTDEAD)
   1483 		ip = 0;
   1484 	else if (errno == 0)
   1485 		return NULL;
   1486 	if (ip && ip->ip_v == 4 && sa->sa_family == AF_INET) {
   1487 		th = (struct tcphdr *)((char *)ip + (ip->ip_hl << 2));
   1488 		nmatch = inpcb_notify(&tcbtable, satocsin(sa)->sin_addr,
   1489 		    th->th_dport, ip->ip_src, th->th_sport, errno, notify);
   1490 		if (nmatch == 0 && syn_cache_count &&
   1491 		    (inetctlerrmap[cmd] == EHOSTUNREACH ||
   1492 		    inetctlerrmap[cmd] == ENETUNREACH ||
   1493 		    inetctlerrmap[cmd] == EHOSTDOWN)) {
   1494 			struct sockaddr_in sin;
   1495 			memset(&sin, 0, sizeof(sin));
   1496 			sin.sin_len = sizeof(sin);
   1497 			sin.sin_family = AF_INET;
   1498 			sin.sin_port = th->th_sport;
   1499 			sin.sin_addr = ip->ip_src;
   1500 			syn_cache_unreach((struct sockaddr *)&sin, sa, th);
   1501 		}
   1502 
   1503 		/* XXX mapped address case */
   1504 	} else
   1505 		inpcb_notifyall(&tcbtable, satocsin(sa)->sin_addr, errno,
   1506 		    notify);
   1507 	return NULL;
   1508 }
   1509 
   1510 /*
   1511  * When a source quench is received, we are being notified of congestion.
   1512  * Close the congestion window down to the Loss Window (one segment).
   1513  * We will gradually open it again as we proceed.
   1514  */
   1515 void
   1516 tcp_quench(struct inpcb *inp)
   1517 {
   1518 	struct tcpcb *tp = intotcpcb(inp);
   1519 
   1520 	if (tp) {
   1521 		tp->snd_cwnd = tp->t_segsz;
   1522 		tp->t_bytes_acked = 0;
   1523 	}
   1524 }
   1525 
   1526 /*
   1527  * Path MTU Discovery handlers.
   1528  */
   1529 void
   1530 tcp_mtudisc_callback(struct in_addr faddr)
   1531 {
   1532 #ifdef INET6
   1533 	struct in6_addr in6;
   1534 #endif
   1535 
   1536 	inpcb_notifyall(&tcbtable, faddr, EMSGSIZE, tcp_mtudisc);
   1537 #ifdef INET6
   1538 	in6_in_2_v4mapin6(&faddr, &in6);
   1539 	tcp6_mtudisc_callback(&in6);
   1540 #endif
   1541 }
   1542 
   1543 /*
   1544  * On receipt of path MTU corrections, flush old route and replace it
   1545  * with the new one.  Retransmit all unacknowledged packets, to ensure
   1546  * that all packets will be received.
   1547  */
   1548 void
   1549 tcp_mtudisc(struct inpcb *inp, int errno)
   1550 {
   1551 	struct tcpcb *tp = intotcpcb(inp);
   1552 	struct rtentry *rt;
   1553 
   1554 	if (tp == NULL)
   1555 		return;
   1556 
   1557 	rt = inpcb_rtentry(inp);
   1558 	if (rt != NULL) {
   1559 		/*
   1560 		 * If this was not a host route, remove and realloc.
   1561 		 */
   1562 		if ((rt->rt_flags & RTF_HOST) == 0) {
   1563 			inpcb_rtentry_unref(rt, inp);
   1564 			inpcb_rtchange(inp, errno);
   1565 			if ((rt = inpcb_rtentry(inp)) == NULL)
   1566 				return;
   1567 		}
   1568 
   1569 		/*
   1570 		 * Slow start out of the error condition.  We
   1571 		 * use the MTU because we know it's smaller
   1572 		 * than the previously transmitted segment.
   1573 		 *
   1574 		 * Note: This is more conservative than the
   1575 		 * suggestion in draft-floyd-incr-init-win-03.
   1576 		 */
   1577 		if (rt->rt_rmx.rmx_mtu != 0)
   1578 			tp->snd_cwnd =
   1579 			    TCP_INITIAL_WINDOW(tcp_init_win,
   1580 			    rt->rt_rmx.rmx_mtu);
   1581 		inpcb_rtentry_unref(rt, inp);
   1582 	}
   1583 
   1584 	/*
   1585 	 * Resend unacknowledged packets.
   1586 	 */
   1587 	tp->snd_nxt = tp->sack_newdata = tp->snd_una;
   1588 	tcp_output(tp);
   1589 }
   1590 
   1591 #ifdef INET6
   1592 /*
   1593  * Path MTU Discovery handlers.
   1594  */
   1595 void
   1596 tcp6_mtudisc_callback(struct in6_addr *faddr)
   1597 {
   1598 	struct sockaddr_in6 sin6;
   1599 
   1600 	memset(&sin6, 0, sizeof(sin6));
   1601 	sin6.sin6_family = AF_INET6;
   1602 	sin6.sin6_len = sizeof(struct sockaddr_in6);
   1603 	sin6.sin6_addr = *faddr;
   1604 	(void) in6pcb_notify(&tcbtable, (struct sockaddr *)&sin6, 0,
   1605 	    (const struct sockaddr *)&sa6_any, 0, PRC_MSGSIZE, NULL, tcp6_mtudisc);
   1606 }
   1607 
   1608 void
   1609 tcp6_mtudisc(struct inpcb *inp, int errno)
   1610 {
   1611 	struct tcpcb *tp = intotcpcb(inp);
   1612 	struct rtentry *rt;
   1613 
   1614 	if (tp == NULL)
   1615 		return;
   1616 
   1617 	rt = in6pcb_rtentry(inp);
   1618 	if (rt != NULL) {
   1619 		/*
   1620 		 * If this was not a host route, remove and realloc.
   1621 		 */
   1622 		if ((rt->rt_flags & RTF_HOST) == 0) {
   1623 			in6pcb_rtentry_unref(rt, inp);
   1624 			in6pcb_rtchange(inp, errno);
   1625 			rt = in6pcb_rtentry(inp);
   1626 			if (rt == NULL)
   1627 				return;
   1628 		}
   1629 
   1630 		/*
   1631 		 * Slow start out of the error condition.  We
   1632 		 * use the MTU because we know it's smaller
   1633 		 * than the previously transmitted segment.
   1634 		 *
   1635 		 * Note: This is more conservative than the
   1636 		 * suggestion in draft-floyd-incr-init-win-03.
   1637 		 */
   1638 		if (rt->rt_rmx.rmx_mtu != 0) {
   1639 			tp->snd_cwnd = TCP_INITIAL_WINDOW(tcp_init_win,
   1640 			    rt->rt_rmx.rmx_mtu);
   1641 		}
   1642 		in6pcb_rtentry_unref(rt, inp);
   1643 	}
   1644 
   1645 	/*
   1646 	 * Resend unacknowledged packets.
   1647 	 */
   1648 	tp->snd_nxt = tp->sack_newdata = tp->snd_una;
   1649 	tcp_output(tp);
   1650 }
   1651 #endif /* INET6 */
   1652 
   1653 /*
   1654  * Compute the MSS to advertise to the peer.  Called only during
   1655  * the 3-way handshake.  If we are the server (peer initiated
   1656  * connection), we are called with a pointer to the interface
   1657  * on which the SYN packet arrived.  If we are the client (we
   1658  * initiated connection), we are called with a pointer to the
   1659  * interface out which this connection should go.
   1660  *
   1661  * NOTE: Do not subtract IP option/extension header size nor IPsec
   1662  * header size from MSS advertisement.  MSS option must hold the maximum
   1663  * segment size we can accept, so it must always be:
   1664  *	 max(if mtu) - ip header - tcp header
   1665  */
   1666 u_long
   1667 tcp_mss_to_advertise(const struct ifnet *ifp, int af)
   1668 {
   1669 	extern u_long in_maxmtu;
   1670 	u_long mss = 0;
   1671 	u_long hdrsiz;
   1672 
   1673 	/*
   1674 	 * In order to avoid defeating path MTU discovery on the peer,
   1675 	 * we advertise the max MTU of all attached networks as our MSS,
   1676 	 * per RFC 1191, section 3.1.
   1677 	 *
   1678 	 * We provide the option to advertise just the MTU of
   1679 	 * the interface on which we hope this connection will
   1680 	 * be receiving.  If we are responding to a SYN, we
   1681 	 * will have a pretty good idea about this, but when
   1682 	 * initiating a connection there is a bit more doubt.
   1683 	 *
   1684 	 * We also need to ensure that loopback has a large enough
   1685 	 * MSS, as the loopback MTU is never included in in_maxmtu.
   1686 	 */
   1687 
   1688 	if (ifp != NULL)
   1689 		switch (af) {
   1690 #ifdef INET6
   1691 		case AF_INET6:	/* FALLTHROUGH */
   1692 #endif
   1693 		case AF_INET:
   1694 			mss = ifp->if_mtu;
   1695 			break;
   1696 		}
   1697 
   1698 	if (tcp_mss_ifmtu == 0)
   1699 		switch (af) {
   1700 #ifdef INET6
   1701 		case AF_INET6:	/* FALLTHROUGH */
   1702 #endif
   1703 		case AF_INET:
   1704 			mss = uimax(in_maxmtu, mss);
   1705 			break;
   1706 		}
   1707 
   1708 	switch (af) {
   1709 	case AF_INET:
   1710 		hdrsiz = sizeof(struct ip);
   1711 		break;
   1712 #ifdef INET6
   1713 	case AF_INET6:
   1714 		hdrsiz = sizeof(struct ip6_hdr);
   1715 		break;
   1716 #endif
   1717 	default:
   1718 		hdrsiz = 0;
   1719 		break;
   1720 	}
   1721 	hdrsiz += sizeof(struct tcphdr);
   1722 	if (mss > hdrsiz)
   1723 		mss -= hdrsiz;
   1724 
   1725 	mss = uimax(tcp_mssdflt, mss);
   1726 	return (mss);
   1727 }
   1728 
   1729 /*
   1730  * Set connection variables based on the peer's advertised MSS.
   1731  * We are passed the TCPCB for the actual connection.  If we
   1732  * are the server, we are called by the compressed state engine
   1733  * when the 3-way handshake is complete.  If we are the client,
   1734  * we are called when we receive the SYN,ACK from the server.
   1735  *
   1736  * NOTE: Our advertised MSS value must be initialized in the TCPCB
   1737  * before this routine is called!
   1738  */
   1739 void
   1740 tcp_mss_from_peer(struct tcpcb *tp, int offer)
   1741 {
   1742 	struct socket *so;
   1743 #if defined(RTV_SPIPE) || defined(RTV_SSTHRESH)
   1744 	struct rtentry *rt;
   1745 #endif
   1746 	u_long bufsize;
   1747 	int mss;
   1748 
   1749 	KASSERT(tp->t_inpcb != NULL);
   1750 
   1751 	so = NULL;
   1752 	rt = NULL;
   1753 
   1754 	so = tp->t_inpcb->inp_socket;
   1755 #if defined(RTV_SPIPE) || defined(RTV_SSTHRESH)
   1756 	rt = inpcb_rtentry(tp->t_inpcb);
   1757 #endif
   1758 
   1759 	/*
   1760 	 * As per RFC1122, use the default MSS value, unless they
   1761 	 * sent us an offer.  Do not accept offers less than 256 bytes.
   1762 	 */
   1763 	mss = tcp_mssdflt;
   1764 	if (offer)
   1765 		mss = offer;
   1766 	mss = uimax(mss, 256);		/* sanity */
   1767 	tp->t_peermss = mss;
   1768 	mss -= tcp_optlen(tp);
   1769 	if (tp->t_inpcb->inp_af == AF_INET)
   1770 		mss -= ip_optlen(tp->t_inpcb);
   1771 #ifdef INET6
   1772 	if (tp->t_inpcb->inp_af == AF_INET6)
   1773 		mss -= ip6_optlen(tp->t_inpcb);
   1774 #endif
   1775 	/*
   1776 	 * XXX XXX What if mss goes negative or zero? This can happen if a
   1777 	 * socket has large IPv6 options. We crash below.
   1778 	 */
   1779 
   1780 	/*
   1781 	 * If there's a pipesize, change the socket buffer to that size.
   1782 	 * Make the socket buffer an integral number of MSS units.  If
   1783 	 * the MSS is larger than the socket buffer, artificially decrease
   1784 	 * the MSS.
   1785 	 */
   1786 #ifdef RTV_SPIPE
   1787 	if (rt != NULL && rt->rt_rmx.rmx_sendpipe != 0)
   1788 		bufsize = rt->rt_rmx.rmx_sendpipe;
   1789 	else
   1790 #endif
   1791 	{
   1792 		KASSERT(so != NULL);
   1793 		bufsize = so->so_snd.sb_hiwat;
   1794 	}
   1795 	if (bufsize < mss)
   1796 		mss = bufsize;
   1797 	else {
   1798 		bufsize = roundup(bufsize, mss);
   1799 		if (bufsize > sb_max)
   1800 			bufsize = sb_max;
   1801 		(void) sbreserve(&so->so_snd, bufsize, so);
   1802 	}
   1803 	tp->t_segsz = mss;
   1804 
   1805 #ifdef RTV_SSTHRESH
   1806 	if (rt != NULL && rt->rt_rmx.rmx_ssthresh) {
   1807 		/*
   1808 		 * There's some sort of gateway or interface buffer
   1809 		 * limit on the path.  Use this to set the slow
   1810 		 * start threshold, but set the threshold to no less
   1811 		 * than 2 * MSS.
   1812 		 */
   1813 		tp->snd_ssthresh = uimax(2 * mss, rt->rt_rmx.rmx_ssthresh);
   1814 	}
   1815 #endif
   1816 #if defined(RTV_SPIPE) || defined(RTV_SSTHRESH)
   1817 	inpcb_rtentry_unref(rt, tp->t_inpcb);
   1818 #endif
   1819 }
   1820 
   1821 /*
   1822  * Processing necessary when a TCP connection is established.
   1823  */
   1824 void
   1825 tcp_established(struct tcpcb *tp)
   1826 {
   1827 	struct socket *so;
   1828 #ifdef RTV_RPIPE
   1829 	struct rtentry *rt;
   1830 #endif
   1831 	u_long bufsize;
   1832 
   1833 	KASSERT(tp->t_inpcb != NULL);
   1834 
   1835 	so = NULL;
   1836 	rt = NULL;
   1837 
   1838 	/* This is a while() to reduce the dreadful stairstepping below */
   1839 	while (tp->t_inpcb->inp_af == AF_INET) {
   1840 		so = tp->t_inpcb->inp_socket;
   1841 #if defined(RTV_RPIPE)
   1842 		rt = inpcb_rtentry(tp->t_inpcb);
   1843 #endif
   1844 		if (__predict_true(tcp_msl_enable)) {
   1845 			if (in4p_laddr(tp->t_inpcb).s_addr == INADDR_LOOPBACK) {
   1846 				tp->t_msl = tcp_msl_loop ? tcp_msl_loop : (TCPTV_MSL >> 2);
   1847 				break;
   1848 			}
   1849 
   1850 			if (__predict_false(tcp_rttlocal)) {
   1851 				/* This may be adjusted by tcp_input */
   1852 				tp->t_msl = tcp_msl_local ? tcp_msl_local : (TCPTV_MSL >> 1);
   1853 				break;
   1854 			}
   1855 			if (in_localaddr(in4p_faddr(tp->t_inpcb))) {
   1856 				tp->t_msl = tcp_msl_local ? tcp_msl_local : (TCPTV_MSL >> 1);
   1857 				break;
   1858 			}
   1859 		}
   1860 		tp->t_msl = tcp_msl_remote ? tcp_msl_remote : TCPTV_MSL;
   1861 		break;
   1862 	}
   1863 
   1864 	/* Clamp to a reasonable range.  */
   1865 	tp->t_msl = MIN(tp->t_msl, TCP_MAXMSL);
   1866 
   1867 #ifdef INET6
   1868 	while (tp->t_inpcb->inp_af == AF_INET6) {
   1869 		so = tp->t_inpcb->inp_socket;
   1870 #if defined(RTV_RPIPE)
   1871 		rt = in6pcb_rtentry(tp->t_inpcb);
   1872 #endif
   1873 		if (__predict_true(tcp_msl_enable)) {
   1874 			extern const struct in6_addr in6addr_loopback;
   1875 
   1876 			if (IN6_ARE_ADDR_EQUAL(&in6p_laddr(tp->t_inpcb),
   1877 			    &in6addr_loopback)) {
   1878 				tp->t_msl = tcp_msl_loop ? tcp_msl_loop : (TCPTV_MSL >> 2);
   1879 				break;
   1880 			}
   1881 
   1882 			if (__predict_false(tcp_rttlocal)) {
   1883 				/* This may be adjusted by tcp_input */
   1884 				tp->t_msl = tcp_msl_local ? tcp_msl_local : (TCPTV_MSL >> 1);
   1885 				break;
   1886 			}
   1887 			if (in6_localaddr(&in6p_faddr(tp->t_inpcb))) {
   1888 				tp->t_msl = tcp_msl_local ? tcp_msl_local : (TCPTV_MSL >> 1);
   1889 				break;
   1890 			}
   1891 		}
   1892 		tp->t_msl = tcp_msl_remote ? tcp_msl_remote : TCPTV_MSL;
   1893 		break;
   1894 	}
   1895 
   1896 	/* Clamp to a reasonable range.  */
   1897 	tp->t_msl = MIN(tp->t_msl, TCP_MAXMSL);
   1898 #endif
   1899 
   1900 	tp->t_state = TCPS_ESTABLISHED;
   1901 	TCP_TIMER_ARM(tp, TCPT_KEEP, tp->t_keepidle);
   1902 
   1903 #ifdef RTV_RPIPE
   1904 	if (rt != NULL && rt->rt_rmx.rmx_recvpipe != 0)
   1905 		bufsize = rt->rt_rmx.rmx_recvpipe;
   1906 	else
   1907 #endif
   1908 	{
   1909 		KASSERT(so != NULL);
   1910 		bufsize = so->so_rcv.sb_hiwat;
   1911 	}
   1912 	if (bufsize > tp->t_ourmss) {
   1913 		bufsize = roundup(bufsize, tp->t_ourmss);
   1914 		if (bufsize > sb_max)
   1915 			bufsize = sb_max;
   1916 		(void) sbreserve(&so->so_rcv, bufsize, so);
   1917 	}
   1918 #ifdef RTV_RPIPE
   1919 	inpcb_rtentry_unref(rt, tp->t_inpcb);
   1920 #endif
   1921 }
   1922 
   1923 /*
   1924  * Check if there's an initial rtt or rttvar.  Convert from the
   1925  * route-table units to scaled multiples of the slow timeout timer.
   1926  * Called only during the 3-way handshake.
   1927  */
   1928 void
   1929 tcp_rmx_rtt(struct tcpcb *tp)
   1930 {
   1931 #ifdef RTV_RTT
   1932 	struct rtentry *rt = NULL;
   1933 	int rtt;
   1934 
   1935 	KASSERT(tp->t_inpcb != NULL);
   1936 
   1937 	rt = inpcb_rtentry(tp->t_inpcb);
   1938 	if (rt == NULL)
   1939 		return;
   1940 
   1941 	if (tp->t_srtt == 0 && (rtt = rt->rt_rmx.rmx_rtt)) {
   1942 		/*
   1943 		 * XXX The lock bit for MTU indicates that the value
   1944 		 * is also a minimum value; this is subject to time.
   1945 		 */
   1946 		if (rt->rt_rmx.rmx_locks & RTV_RTT)
   1947 			TCPT_RANGESET(tp->t_rttmin,
   1948 			    rtt / (RTM_RTTUNIT / PR_SLOWHZ),
   1949 			    TCPTV_MIN, TCPTV_REXMTMAX);
   1950 		tp->t_srtt = rtt /
   1951 		    ((RTM_RTTUNIT / PR_SLOWHZ) >> (TCP_RTT_SHIFT + 2));
   1952 		if (rt->rt_rmx.rmx_rttvar) {
   1953 			tp->t_rttvar = rt->rt_rmx.rmx_rttvar /
   1954 			    ((RTM_RTTUNIT / PR_SLOWHZ) >>
   1955 				(TCP_RTTVAR_SHIFT + 2));
   1956 		} else {
   1957 			/* Default variation is +- 1 rtt */
   1958 			tp->t_rttvar =
   1959 			    tp->t_srtt >> (TCP_RTT_SHIFT - TCP_RTTVAR_SHIFT);
   1960 		}
   1961 		TCPT_RANGESET(tp->t_rxtcur,
   1962 		    ((tp->t_srtt >> 2) + tp->t_rttvar) >> (1 + 2),
   1963 		    tp->t_rttmin, TCPTV_REXMTMAX);
   1964 	}
   1965 	inpcb_rtentry_unref(rt, tp->t_inpcb);
   1966 #endif
   1967 }
   1968 
   1969 tcp_seq	 tcp_iss_seq = 0;	/* tcp initial seq # */
   1970 
   1971 /*
   1972  * Get a new sequence value given a tcp control block
   1973  */
   1974 tcp_seq
   1975 tcp_new_iss(struct tcpcb *tp)
   1976 {
   1977 
   1978 	if (tp->t_inpcb->inp_af == AF_INET) {
   1979 		return tcp_new_iss1(&in4p_laddr(tp->t_inpcb),
   1980 		    &in4p_faddr(tp->t_inpcb), tp->t_inpcb->inp_lport,
   1981 		    tp->t_inpcb->inp_fport, sizeof(in4p_laddr(tp->t_inpcb)));
   1982 	}
   1983 #ifdef INET6
   1984 	if (tp->t_inpcb->inp_af == AF_INET6) {
   1985 		return tcp_new_iss1(&in6p_laddr(tp->t_inpcb),
   1986 		    &in6p_faddr(tp->t_inpcb), tp->t_inpcb->inp_lport,
   1987 		    tp->t_inpcb->inp_fport, sizeof(in6p_laddr(tp->t_inpcb)));
   1988 	}
   1989 #endif
   1990 
   1991 	panic("tcp_new_iss: unreachable");
   1992 }
   1993 
   1994 static u_int8_t tcp_iss_secret[16];	/* 128 bits; should be plenty */
   1995 
   1996 /*
   1997  * Initialize RFC 1948 ISS Secret
   1998  */
   1999 static int
   2000 tcp_iss_secret_init(void)
   2001 {
   2002 	cprng_strong(kern_cprng,
   2003 	    tcp_iss_secret, sizeof(tcp_iss_secret), 0);
   2004 
   2005 	return 0;
   2006 }
   2007 
   2008 /*
   2009  * This routine actually generates a new TCP initial sequence number.
   2010  */
   2011 tcp_seq
   2012 tcp_new_iss1(void *laddr, void *faddr, u_int16_t lport, u_int16_t fport,
   2013     size_t addrsz)
   2014 {
   2015 	tcp_seq tcp_iss;
   2016 
   2017 	if (tcp_do_rfc1948) {
   2018 		MD5_CTX ctx;
   2019 		u_int8_t hash[16];	/* XXX MD5 knowledge */
   2020 		static ONCE_DECL(tcp_iss_secret_control);
   2021 
   2022 		/*
   2023 		 * If we haven't been here before, initialize our cryptographic
   2024 		 * hash secret.
   2025 		 */
   2026 		RUN_ONCE(&tcp_iss_secret_control, tcp_iss_secret_init);
   2027 
   2028 		/*
   2029 		 * Compute the base value of the ISS.  It is a hash
   2030 		 * of (saddr, sport, daddr, dport, secret).
   2031 		 */
   2032 		MD5Init(&ctx);
   2033 
   2034 		MD5Update(&ctx, (u_char *) laddr, addrsz);
   2035 		MD5Update(&ctx, (u_char *) &lport, sizeof(lport));
   2036 
   2037 		MD5Update(&ctx, (u_char *) faddr, addrsz);
   2038 		MD5Update(&ctx, (u_char *) &fport, sizeof(fport));
   2039 
   2040 		MD5Update(&ctx, tcp_iss_secret, sizeof(tcp_iss_secret));
   2041 
   2042 		MD5Final(hash, &ctx);
   2043 
   2044 		memcpy(&tcp_iss, hash, sizeof(tcp_iss));
   2045 
   2046 #ifdef TCPISS_DEBUG
   2047 		printf("ISS hash 0x%08x, ", tcp_iss);
   2048 #endif
   2049 	} else {
   2050 		/*
   2051 		 * Randomize.
   2052 		 */
   2053 		tcp_iss = cprng_fast32() & TCP_ISS_RANDOM_MASK;
   2054 #ifdef TCPISS_DEBUG
   2055 		printf("ISS random 0x%08x, ", tcp_iss);
   2056 #endif
   2057 	}
   2058 
   2059 	/*
   2060 	 * Add the offset in to the computed value.
   2061 	 */
   2062 	tcp_iss += tcp_iss_seq;
   2063 #ifdef TCPISS_DEBUG
   2064 	printf("ISS %08x\n", tcp_iss);
   2065 #endif
   2066 	return tcp_iss;
   2067 }
   2068 
   2069 #if defined(IPSEC)
   2070 /* compute ESP/AH header size for TCP, including outer IP header. */
   2071 size_t
   2072 ipsec4_hdrsiz_tcp(struct tcpcb *tp)
   2073 {
   2074 	struct inpcb *inp;
   2075 	size_t hdrsiz;
   2076 
   2077 	/* XXX mapped addr case (tp->t_inpcb) */
   2078 	if (!tp || !tp->t_template || !(inp = tp->t_inpcb))
   2079 		return 0;
   2080 	switch (tp->t_family) {
   2081 	case AF_INET:
   2082 		/* XXX: should use correct direction. */
   2083 		hdrsiz = ipsec_hdrsiz(tp->t_template, IPSEC_DIR_OUTBOUND, inp);
   2084 		break;
   2085 	default:
   2086 		hdrsiz = 0;
   2087 		break;
   2088 	}
   2089 
   2090 	return hdrsiz;
   2091 }
   2092 
   2093 #ifdef INET6
   2094 size_t
   2095 ipsec6_hdrsiz_tcp(struct tcpcb *tp)
   2096 {
   2097 	struct inpcb *inp;
   2098 	size_t hdrsiz;
   2099 
   2100 	if (!tp || !tp->t_template || !(inp = tp->t_inpcb))
   2101 		return 0;
   2102 	switch (tp->t_family) {
   2103 	case AF_INET6:
   2104 		/* XXX: should use correct direction. */
   2105 		hdrsiz = ipsec_hdrsiz(tp->t_template, IPSEC_DIR_OUTBOUND, inp);
   2106 		break;
   2107 	case AF_INET:
   2108 		/* mapped address case - tricky */
   2109 	default:
   2110 		hdrsiz = 0;
   2111 		break;
   2112 	}
   2113 
   2114 	return hdrsiz;
   2115 }
   2116 #endif
   2117 #endif /*IPSEC*/
   2118 
   2119 /*
   2120  * Determine the length of the TCP options for this connection.
   2121  *
   2122  * XXX:  What do we do for SACK, when we add that?  Just reserve
   2123  *       all of the space?  Otherwise we can't exactly be incrementing
   2124  *       cwnd by an amount that varies depending on the amount we last
   2125  *       had to SACK!
   2126  */
   2127 
   2128 u_int
   2129 tcp_optlen(struct tcpcb *tp)
   2130 {
   2131 	u_int optlen;
   2132 
   2133 	optlen = 0;
   2134 	if ((tp->t_flags & (TF_REQ_TSTMP|TF_RCVD_TSTMP|TF_NOOPT)) ==
   2135 	    (TF_REQ_TSTMP | TF_RCVD_TSTMP))
   2136 		optlen += TCPOLEN_TSTAMP_APPA;
   2137 
   2138 #ifdef TCP_SIGNATURE
   2139 	if (tp->t_flags & TF_SIGNATURE)
   2140 		optlen += TCPOLEN_SIGLEN;
   2141 #endif
   2142 
   2143 	return optlen;
   2144 }
   2145 
   2146 u_int
   2147 tcp_hdrsz(struct tcpcb *tp)
   2148 {
   2149 	u_int hlen;
   2150 
   2151 	switch (tp->t_family) {
   2152 #ifdef INET6
   2153 	case AF_INET6:
   2154 		hlen = sizeof(struct ip6_hdr);
   2155 		break;
   2156 #endif
   2157 	case AF_INET:
   2158 		hlen = sizeof(struct ip);
   2159 		break;
   2160 	default:
   2161 		hlen = 0;
   2162 		break;
   2163 	}
   2164 	hlen += sizeof(struct tcphdr);
   2165 
   2166 	if ((tp->t_flags & (TF_REQ_TSTMP|TF_NOOPT)) == TF_REQ_TSTMP &&
   2167 	    (tp->t_flags & TF_RCVD_TSTMP) == TF_RCVD_TSTMP)
   2168 		hlen += TCPOLEN_TSTAMP_APPA;
   2169 #ifdef TCP_SIGNATURE
   2170 	if (tp->t_flags & TF_SIGNATURE)
   2171 		hlen += TCPOLEN_SIGLEN;
   2172 #endif
   2173 	return hlen;
   2174 }
   2175 
   2176 void
   2177 tcp_statinc(u_int stat)
   2178 {
   2179 
   2180 	KASSERT(stat < TCP_NSTATS);
   2181 	TCP_STATINC(stat);
   2182 }
   2183 
   2184 void
   2185 tcp_statadd(u_int stat, uint64_t val)
   2186 {
   2187 
   2188 	KASSERT(stat < TCP_NSTATS);
   2189 	TCP_STATADD(stat, val);
   2190 }
   2191