tcp_subr.c revision 1.250 1 /* $NetBSD: tcp_subr.c,v 1.250 2013/06/05 19:01:26 christos Exp $ */
2
3 /*
4 * Copyright (C) 1995, 1996, 1997, and 1998 WIDE Project.
5 * All rights reserved.
6 *
7 * Redistribution and use in source and binary forms, with or without
8 * modification, are permitted provided that the following conditions
9 * are met:
10 * 1. Redistributions of source code must retain the above copyright
11 * notice, this list of conditions and the following disclaimer.
12 * 2. Redistributions in binary form must reproduce the above copyright
13 * notice, this list of conditions and the following disclaimer in the
14 * documentation and/or other materials provided with the distribution.
15 * 3. Neither the name of the project nor the names of its contributors
16 * may be used to endorse or promote products derived from this software
17 * without specific prior written permission.
18 *
19 * THIS SOFTWARE IS PROVIDED BY THE PROJECT AND CONTRIBUTORS ``AS IS'' AND
20 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
21 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
22 * ARE DISCLAIMED. IN NO EVENT SHALL THE PROJECT OR CONTRIBUTORS BE LIABLE
23 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
24 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
25 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
26 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
27 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
28 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
29 * SUCH DAMAGE.
30 */
31
32 /*-
33 * Copyright (c) 1997, 1998, 2000, 2001, 2008 The NetBSD Foundation, Inc.
34 * All rights reserved.
35 *
36 * This code is derived from software contributed to The NetBSD Foundation
37 * by Jason R. Thorpe and Kevin M. Lahey of the Numerical Aerospace Simulation
38 * Facility, NASA Ames Research Center.
39 *
40 * Redistribution and use in source and binary forms, with or without
41 * modification, are permitted provided that the following conditions
42 * are met:
43 * 1. Redistributions of source code must retain the above copyright
44 * notice, this list of conditions and the following disclaimer.
45 * 2. Redistributions in binary form must reproduce the above copyright
46 * notice, this list of conditions and the following disclaimer in the
47 * documentation and/or other materials provided with the distribution.
48 *
49 * THIS SOFTWARE IS PROVIDED BY THE NETBSD FOUNDATION, INC. AND CONTRIBUTORS
50 * ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED
51 * TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
52 * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE FOUNDATION OR CONTRIBUTORS
53 * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
54 * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
55 * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
56 * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
57 * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
58 * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
59 * POSSIBILITY OF SUCH DAMAGE.
60 */
61
62 /*
63 * Copyright (c) 1982, 1986, 1988, 1990, 1993, 1995
64 * The Regents of the University of California. All rights reserved.
65 *
66 * Redistribution and use in source and binary forms, with or without
67 * modification, are permitted provided that the following conditions
68 * are met:
69 * 1. Redistributions of source code must retain the above copyright
70 * notice, this list of conditions and the following disclaimer.
71 * 2. Redistributions in binary form must reproduce the above copyright
72 * notice, this list of conditions and the following disclaimer in the
73 * documentation and/or other materials provided with the distribution.
74 * 3. Neither the name of the University nor the names of its contributors
75 * may be used to endorse or promote products derived from this software
76 * without specific prior written permission.
77 *
78 * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND
79 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
80 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
81 * ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE
82 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
83 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
84 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
85 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
86 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
87 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
88 * SUCH DAMAGE.
89 *
90 * @(#)tcp_subr.c 8.2 (Berkeley) 5/24/95
91 */
92
93 #include <sys/cdefs.h>
94 __KERNEL_RCSID(0, "$NetBSD: tcp_subr.c,v 1.250 2013/06/05 19:01:26 christos Exp $");
95
96 #include "opt_inet.h"
97 #include "opt_ipsec.h"
98 #include "opt_tcp_compat_42.h"
99 #include "opt_inet_csum.h"
100 #include "opt_mbuftrace.h"
101
102 #include <sys/param.h>
103 #include <sys/proc.h>
104 #include <sys/systm.h>
105 #include <sys/malloc.h>
106 #include <sys/mbuf.h>
107 #include <sys/socket.h>
108 #include <sys/socketvar.h>
109 #include <sys/protosw.h>
110 #include <sys/errno.h>
111 #include <sys/kernel.h>
112 #include <sys/pool.h>
113 #include <sys/md5.h>
114 #include <sys/cprng.h>
115
116 #include <net/route.h>
117 #include <net/if.h>
118
119 #include <netinet/in.h>
120 #include <netinet/in_systm.h>
121 #include <netinet/ip.h>
122 #include <netinet/in_pcb.h>
123 #include <netinet/ip_var.h>
124 #include <netinet/ip_icmp.h>
125
126 #ifdef INET6
127 #ifndef INET
128 #include <netinet/in.h>
129 #endif
130 #include <netinet/ip6.h>
131 #include <netinet6/in6_pcb.h>
132 #include <netinet6/ip6_var.h>
133 #include <netinet6/in6_var.h>
134 #include <netinet6/ip6protosw.h>
135 #include <netinet/icmp6.h>
136 #include <netinet6/nd6.h>
137 #endif
138
139 #include <netinet/tcp.h>
140 #include <netinet/tcp_fsm.h>
141 #include <netinet/tcp_seq.h>
142 #include <netinet/tcp_timer.h>
143 #include <netinet/tcp_var.h>
144 #include <netinet/tcp_vtw.h>
145 #include <netinet/tcp_private.h>
146 #include <netinet/tcp_congctl.h>
147 #include <netinet/tcpip.h>
148
149 #ifdef IPSEC
150 #include <netipsec/ipsec.h>
151 #include <netipsec/xform.h>
152 #ifdef INET6
153 #include <netipsec/ipsec6.h>
154 #endif
155 #include <netipsec/key.h>
156 #endif /* IPSEC*/
157
158
159 struct inpcbtable tcbtable; /* head of queue of active tcpcb's */
160 u_int32_t tcp_now; /* slow ticks, for RFC 1323 timestamps */
161
162 percpu_t *tcpstat_percpu;
163
164 /* patchable/settable parameters for tcp */
165 int tcp_mssdflt = TCP_MSS;
166 int tcp_minmss = TCP_MINMSS;
167 int tcp_rttdflt = TCPTV_SRTTDFLT / PR_SLOWHZ;
168 int tcp_do_rfc1323 = 1; /* window scaling / timestamps (obsolete) */
169 int tcp_do_rfc1948 = 0; /* ISS by cryptographic hash */
170 int tcp_do_sack = 1; /* selective acknowledgement */
171 int tcp_do_win_scale = 1; /* RFC1323 window scaling */
172 int tcp_do_timestamps = 1; /* RFC1323 timestamps */
173 int tcp_ack_on_push = 0; /* set to enable immediate ACK-on-PUSH */
174 int tcp_do_ecn = 0; /* Explicit Congestion Notification */
175 #ifndef TCP_INIT_WIN
176 #define TCP_INIT_WIN 4 /* initial slow start window */
177 #endif
178 #ifndef TCP_INIT_WIN_LOCAL
179 #define TCP_INIT_WIN_LOCAL 4 /* initial slow start window for local nets */
180 #endif
181 /*
182 * Up to 5 we scale linearly, to reach 3 * 1460; then (iw) * 1460.
183 * This is to simulate current behavior for iw == 4
184 */
185 int tcp_init_win_max[] = {
186 1 * 1460,
187 1 * 1460,
188 2 * 1460,
189 2 * 1460,
190 3 * 1460,
191 5 * 1460,
192 6 * 1460,
193 7 * 1460,
194 8 * 1460,
195 9 * 1460,
196 10 * 1460
197 };
198 int tcp_init_win = TCP_INIT_WIN;
199 int tcp_init_win_local = TCP_INIT_WIN_LOCAL;
200 int tcp_mss_ifmtu = 0;
201 #ifdef TCP_COMPAT_42
202 int tcp_compat_42 = 1;
203 #else
204 int tcp_compat_42 = 0;
205 #endif
206 int tcp_rst_ppslim = 100; /* 100pps */
207 int tcp_ackdrop_ppslim = 100; /* 100pps */
208 int tcp_do_loopback_cksum = 0;
209 int tcp_do_abc = 1; /* RFC3465 Appropriate byte counting. */
210 int tcp_abc_aggressive = 1; /* 1: L=2*SMSS 0: L=1*SMSS */
211 int tcp_sack_tp_maxholes = 32;
212 int tcp_sack_globalmaxholes = 1024;
213 int tcp_sack_globalholes = 0;
214 int tcp_ecn_maxretries = 1;
215 int tcp_msl_enable = 1; /* enable TIME_WAIT truncation */
216 int tcp_msl_loop = PR_SLOWHZ; /* MSL for loopback */
217 int tcp_msl_local = 5 * PR_SLOWHZ; /* MSL for 'local' */
218 int tcp_msl_remote = TCPTV_MSL; /* MSL otherwise */
219 int tcp_msl_remote_threshold = TCPTV_SRTTDFLT; /* RTT threshold */
220 int tcp_rttlocal = 0; /* Use RTT to decide who's 'local' */
221
222 int tcp4_vtw_enable = 0; /* 1 to enable */
223 int tcp6_vtw_enable = 0; /* 1 to enable */
224 int tcp_vtw_was_enabled = 0;
225 int tcp_vtw_entries = 1 << 16; /* 64K vestigial TIME_WAIT entries */
226
227 /* tcb hash */
228 #ifndef TCBHASHSIZE
229 #define TCBHASHSIZE 128
230 #endif
231 int tcbhashsize = TCBHASHSIZE;
232
233 /* syn hash parameters */
234 #define TCP_SYN_HASH_SIZE 293
235 #define TCP_SYN_BUCKET_SIZE 35
236 int tcp_syn_cache_size = TCP_SYN_HASH_SIZE;
237 int tcp_syn_cache_limit = TCP_SYN_HASH_SIZE*TCP_SYN_BUCKET_SIZE;
238 int tcp_syn_bucket_limit = 3*TCP_SYN_BUCKET_SIZE;
239 struct syn_cache_head tcp_syn_cache[TCP_SYN_HASH_SIZE];
240
241 int tcp_freeq(struct tcpcb *);
242
243 #ifdef INET
244 void tcp_mtudisc_callback(struct in_addr);
245 #endif
246 #ifdef INET6
247 void tcp6_mtudisc_callback(struct in6_addr *);
248 #endif
249
250 #ifdef INET6
251 void tcp6_mtudisc(struct in6pcb *, int);
252 #endif
253
254 static struct pool tcpcb_pool;
255
256 static int tcp_drainwanted;
257
258 #ifdef TCP_CSUM_COUNTERS
259 #include <sys/device.h>
260
261 #if defined(INET)
262 struct evcnt tcp_hwcsum_bad = EVCNT_INITIALIZER(EVCNT_TYPE_MISC,
263 NULL, "tcp", "hwcsum bad");
264 struct evcnt tcp_hwcsum_ok = EVCNT_INITIALIZER(EVCNT_TYPE_MISC,
265 NULL, "tcp", "hwcsum ok");
266 struct evcnt tcp_hwcsum_data = EVCNT_INITIALIZER(EVCNT_TYPE_MISC,
267 NULL, "tcp", "hwcsum data");
268 struct evcnt tcp_swcsum = EVCNT_INITIALIZER(EVCNT_TYPE_MISC,
269 NULL, "tcp", "swcsum");
270
271 EVCNT_ATTACH_STATIC(tcp_hwcsum_bad);
272 EVCNT_ATTACH_STATIC(tcp_hwcsum_ok);
273 EVCNT_ATTACH_STATIC(tcp_hwcsum_data);
274 EVCNT_ATTACH_STATIC(tcp_swcsum);
275 #endif /* defined(INET) */
276
277 #if defined(INET6)
278 struct evcnt tcp6_hwcsum_bad = EVCNT_INITIALIZER(EVCNT_TYPE_MISC,
279 NULL, "tcp6", "hwcsum bad");
280 struct evcnt tcp6_hwcsum_ok = EVCNT_INITIALIZER(EVCNT_TYPE_MISC,
281 NULL, "tcp6", "hwcsum ok");
282 struct evcnt tcp6_hwcsum_data = EVCNT_INITIALIZER(EVCNT_TYPE_MISC,
283 NULL, "tcp6", "hwcsum data");
284 struct evcnt tcp6_swcsum = EVCNT_INITIALIZER(EVCNT_TYPE_MISC,
285 NULL, "tcp6", "swcsum");
286
287 EVCNT_ATTACH_STATIC(tcp6_hwcsum_bad);
288 EVCNT_ATTACH_STATIC(tcp6_hwcsum_ok);
289 EVCNT_ATTACH_STATIC(tcp6_hwcsum_data);
290 EVCNT_ATTACH_STATIC(tcp6_swcsum);
291 #endif /* defined(INET6) */
292 #endif /* TCP_CSUM_COUNTERS */
293
294
295 #ifdef TCP_OUTPUT_COUNTERS
296 #include <sys/device.h>
297
298 struct evcnt tcp_output_bigheader = EVCNT_INITIALIZER(EVCNT_TYPE_MISC,
299 NULL, "tcp", "output big header");
300 struct evcnt tcp_output_predict_hit = EVCNT_INITIALIZER(EVCNT_TYPE_MISC,
301 NULL, "tcp", "output predict hit");
302 struct evcnt tcp_output_predict_miss = EVCNT_INITIALIZER(EVCNT_TYPE_MISC,
303 NULL, "tcp", "output predict miss");
304 struct evcnt tcp_output_copysmall = EVCNT_INITIALIZER(EVCNT_TYPE_MISC,
305 NULL, "tcp", "output copy small");
306 struct evcnt tcp_output_copybig = EVCNT_INITIALIZER(EVCNT_TYPE_MISC,
307 NULL, "tcp", "output copy big");
308 struct evcnt tcp_output_refbig = EVCNT_INITIALIZER(EVCNT_TYPE_MISC,
309 NULL, "tcp", "output reference big");
310
311 EVCNT_ATTACH_STATIC(tcp_output_bigheader);
312 EVCNT_ATTACH_STATIC(tcp_output_predict_hit);
313 EVCNT_ATTACH_STATIC(tcp_output_predict_miss);
314 EVCNT_ATTACH_STATIC(tcp_output_copysmall);
315 EVCNT_ATTACH_STATIC(tcp_output_copybig);
316 EVCNT_ATTACH_STATIC(tcp_output_refbig);
317
318 #endif /* TCP_OUTPUT_COUNTERS */
319
320 #ifdef TCP_REASS_COUNTERS
321 #include <sys/device.h>
322
323 struct evcnt tcp_reass_ = EVCNT_INITIALIZER(EVCNT_TYPE_MISC,
324 NULL, "tcp_reass", "calls");
325 struct evcnt tcp_reass_empty = EVCNT_INITIALIZER(EVCNT_TYPE_MISC,
326 &tcp_reass_, "tcp_reass", "insert into empty queue");
327 struct evcnt tcp_reass_iteration[8] = {
328 EVCNT_INITIALIZER(EVCNT_TYPE_MISC, &tcp_reass_, "tcp_reass", ">7 iterations"),
329 EVCNT_INITIALIZER(EVCNT_TYPE_MISC, &tcp_reass_, "tcp_reass", "1 iteration"),
330 EVCNT_INITIALIZER(EVCNT_TYPE_MISC, &tcp_reass_, "tcp_reass", "2 iterations"),
331 EVCNT_INITIALIZER(EVCNT_TYPE_MISC, &tcp_reass_, "tcp_reass", "3 iterations"),
332 EVCNT_INITIALIZER(EVCNT_TYPE_MISC, &tcp_reass_, "tcp_reass", "4 iterations"),
333 EVCNT_INITIALIZER(EVCNT_TYPE_MISC, &tcp_reass_, "tcp_reass", "5 iterations"),
334 EVCNT_INITIALIZER(EVCNT_TYPE_MISC, &tcp_reass_, "tcp_reass", "6 iterations"),
335 EVCNT_INITIALIZER(EVCNT_TYPE_MISC, &tcp_reass_, "tcp_reass", "7 iterations"),
336 };
337 struct evcnt tcp_reass_prependfirst = EVCNT_INITIALIZER(EVCNT_TYPE_MISC,
338 &tcp_reass_, "tcp_reass", "prepend to first");
339 struct evcnt tcp_reass_prepend = EVCNT_INITIALIZER(EVCNT_TYPE_MISC,
340 &tcp_reass_, "tcp_reass", "prepend");
341 struct evcnt tcp_reass_insert = EVCNT_INITIALIZER(EVCNT_TYPE_MISC,
342 &tcp_reass_, "tcp_reass", "insert");
343 struct evcnt tcp_reass_inserttail = EVCNT_INITIALIZER(EVCNT_TYPE_MISC,
344 &tcp_reass_, "tcp_reass", "insert at tail");
345 struct evcnt tcp_reass_append = EVCNT_INITIALIZER(EVCNT_TYPE_MISC,
346 &tcp_reass_, "tcp_reass", "append");
347 struct evcnt tcp_reass_appendtail = EVCNT_INITIALIZER(EVCNT_TYPE_MISC,
348 &tcp_reass_, "tcp_reass", "append to tail fragment");
349 struct evcnt tcp_reass_overlaptail = EVCNT_INITIALIZER(EVCNT_TYPE_MISC,
350 &tcp_reass_, "tcp_reass", "overlap at end");
351 struct evcnt tcp_reass_overlapfront = EVCNT_INITIALIZER(EVCNT_TYPE_MISC,
352 &tcp_reass_, "tcp_reass", "overlap at start");
353 struct evcnt tcp_reass_segdup = EVCNT_INITIALIZER(EVCNT_TYPE_MISC,
354 &tcp_reass_, "tcp_reass", "duplicate segment");
355 struct evcnt tcp_reass_fragdup = EVCNT_INITIALIZER(EVCNT_TYPE_MISC,
356 &tcp_reass_, "tcp_reass", "duplicate fragment");
357
358 EVCNT_ATTACH_STATIC(tcp_reass_);
359 EVCNT_ATTACH_STATIC(tcp_reass_empty);
360 EVCNT_ATTACH_STATIC2(tcp_reass_iteration, 0);
361 EVCNT_ATTACH_STATIC2(tcp_reass_iteration, 1);
362 EVCNT_ATTACH_STATIC2(tcp_reass_iteration, 2);
363 EVCNT_ATTACH_STATIC2(tcp_reass_iteration, 3);
364 EVCNT_ATTACH_STATIC2(tcp_reass_iteration, 4);
365 EVCNT_ATTACH_STATIC2(tcp_reass_iteration, 5);
366 EVCNT_ATTACH_STATIC2(tcp_reass_iteration, 6);
367 EVCNT_ATTACH_STATIC2(tcp_reass_iteration, 7);
368 EVCNT_ATTACH_STATIC(tcp_reass_prependfirst);
369 EVCNT_ATTACH_STATIC(tcp_reass_prepend);
370 EVCNT_ATTACH_STATIC(tcp_reass_insert);
371 EVCNT_ATTACH_STATIC(tcp_reass_inserttail);
372 EVCNT_ATTACH_STATIC(tcp_reass_append);
373 EVCNT_ATTACH_STATIC(tcp_reass_appendtail);
374 EVCNT_ATTACH_STATIC(tcp_reass_overlaptail);
375 EVCNT_ATTACH_STATIC(tcp_reass_overlapfront);
376 EVCNT_ATTACH_STATIC(tcp_reass_segdup);
377 EVCNT_ATTACH_STATIC(tcp_reass_fragdup);
378
379 #endif /* TCP_REASS_COUNTERS */
380
381 #ifdef MBUFTRACE
382 struct mowner tcp_mowner = MOWNER_INIT("tcp", "");
383 struct mowner tcp_rx_mowner = MOWNER_INIT("tcp", "rx");
384 struct mowner tcp_tx_mowner = MOWNER_INIT("tcp", "tx");
385 struct mowner tcp_sock_mowner = MOWNER_INIT("tcp", "sock");
386 struct mowner tcp_sock_rx_mowner = MOWNER_INIT("tcp", "sock rx");
387 struct mowner tcp_sock_tx_mowner = MOWNER_INIT("tcp", "sock tx");
388 #endif
389
390 /*
391 * Tcp initialization
392 */
393 void
394 tcp_init(void)
395 {
396 int hlen;
397
398 in_pcbinit(&tcbtable, tcbhashsize, tcbhashsize);
399 pool_init(&tcpcb_pool, sizeof(struct tcpcb), 0, 0, 0, "tcpcbpl",
400 NULL, IPL_SOFTNET);
401
402 hlen = sizeof(struct ip) + sizeof(struct tcphdr);
403 #ifdef INET6
404 if (sizeof(struct ip) < sizeof(struct ip6_hdr))
405 hlen = sizeof(struct ip6_hdr) + sizeof(struct tcphdr);
406 #endif
407 if (max_protohdr < hlen)
408 max_protohdr = hlen;
409 if (max_linkhdr + hlen > MHLEN)
410 panic("tcp_init");
411
412 #ifdef INET
413 icmp_mtudisc_callback_register(tcp_mtudisc_callback);
414 #endif
415 #ifdef INET6
416 icmp6_mtudisc_callback_register(tcp6_mtudisc_callback);
417 #endif
418
419 tcp_usrreq_init();
420
421 /* Initialize timer state. */
422 tcp_timer_init();
423
424 /* Initialize the compressed state engine. */
425 syn_cache_init();
426
427 /* Initialize the congestion control algorithms. */
428 tcp_congctl_init();
429
430 /* Initialize the TCPCB template. */
431 tcp_tcpcb_template();
432
433 /* Initialize reassembly queue */
434 tcpipqent_init();
435
436 /* SACK */
437 tcp_sack_init();
438
439 MOWNER_ATTACH(&tcp_tx_mowner);
440 MOWNER_ATTACH(&tcp_rx_mowner);
441 MOWNER_ATTACH(&tcp_reass_mowner);
442 MOWNER_ATTACH(&tcp_sock_mowner);
443 MOWNER_ATTACH(&tcp_sock_tx_mowner);
444 MOWNER_ATTACH(&tcp_sock_rx_mowner);
445 MOWNER_ATTACH(&tcp_mowner);
446
447 tcpstat_percpu = percpu_alloc(sizeof(uint64_t) * TCP_NSTATS);
448
449 vtw_earlyinit();
450 }
451
452 /*
453 * Create template to be used to send tcp packets on a connection.
454 * Call after host entry created, allocates an mbuf and fills
455 * in a skeletal tcp/ip header, minimizing the amount of work
456 * necessary when the connection is used.
457 */
458 struct mbuf *
459 tcp_template(struct tcpcb *tp)
460 {
461 struct inpcb *inp = tp->t_inpcb;
462 #ifdef INET6
463 struct in6pcb *in6p = tp->t_in6pcb;
464 #endif
465 struct tcphdr *n;
466 struct mbuf *m;
467 int hlen;
468
469 switch (tp->t_family) {
470 case AF_INET:
471 hlen = sizeof(struct ip);
472 if (inp)
473 break;
474 #ifdef INET6
475 if (in6p) {
476 /* mapped addr case */
477 if (IN6_IS_ADDR_V4MAPPED(&in6p->in6p_laddr)
478 && IN6_IS_ADDR_V4MAPPED(&in6p->in6p_faddr))
479 break;
480 }
481 #endif
482 return NULL; /*EINVAL*/
483 #ifdef INET6
484 case AF_INET6:
485 hlen = sizeof(struct ip6_hdr);
486 if (in6p) {
487 /* more sainty check? */
488 break;
489 }
490 return NULL; /*EINVAL*/
491 #endif
492 default:
493 hlen = 0; /*pacify gcc*/
494 return NULL; /*EAFNOSUPPORT*/
495 }
496 #ifdef DIAGNOSTIC
497 if (hlen + sizeof(struct tcphdr) > MCLBYTES)
498 panic("mclbytes too small for t_template");
499 #endif
500 m = tp->t_template;
501 if (m && m->m_len == hlen + sizeof(struct tcphdr))
502 ;
503 else {
504 if (m)
505 m_freem(m);
506 m = tp->t_template = NULL;
507 MGETHDR(m, M_DONTWAIT, MT_HEADER);
508 if (m && hlen + sizeof(struct tcphdr) > MHLEN) {
509 MCLGET(m, M_DONTWAIT);
510 if ((m->m_flags & M_EXT) == 0) {
511 m_free(m);
512 m = NULL;
513 }
514 }
515 if (m == NULL)
516 return NULL;
517 MCLAIM(m, &tcp_mowner);
518 m->m_pkthdr.len = m->m_len = hlen + sizeof(struct tcphdr);
519 }
520
521 memset(mtod(m, void *), 0, m->m_len);
522
523 n = (struct tcphdr *)(mtod(m, char *) + hlen);
524
525 switch (tp->t_family) {
526 case AF_INET:
527 {
528 struct ipovly *ipov;
529 mtod(m, struct ip *)->ip_v = 4;
530 mtod(m, struct ip *)->ip_hl = hlen >> 2;
531 ipov = mtod(m, struct ipovly *);
532 ipov->ih_pr = IPPROTO_TCP;
533 ipov->ih_len = htons(sizeof(struct tcphdr));
534 if (inp) {
535 ipov->ih_src = inp->inp_laddr;
536 ipov->ih_dst = inp->inp_faddr;
537 }
538 #ifdef INET6
539 else if (in6p) {
540 /* mapped addr case */
541 bcopy(&in6p->in6p_laddr.s6_addr32[3], &ipov->ih_src,
542 sizeof(ipov->ih_src));
543 bcopy(&in6p->in6p_faddr.s6_addr32[3], &ipov->ih_dst,
544 sizeof(ipov->ih_dst));
545 }
546 #endif
547 /*
548 * Compute the pseudo-header portion of the checksum
549 * now. We incrementally add in the TCP option and
550 * payload lengths later, and then compute the TCP
551 * checksum right before the packet is sent off onto
552 * the wire.
553 */
554 n->th_sum = in_cksum_phdr(ipov->ih_src.s_addr,
555 ipov->ih_dst.s_addr,
556 htons(sizeof(struct tcphdr) + IPPROTO_TCP));
557 break;
558 }
559 #ifdef INET6
560 case AF_INET6:
561 {
562 struct ip6_hdr *ip6;
563 mtod(m, struct ip *)->ip_v = 6;
564 ip6 = mtod(m, struct ip6_hdr *);
565 ip6->ip6_nxt = IPPROTO_TCP;
566 ip6->ip6_plen = htons(sizeof(struct tcphdr));
567 ip6->ip6_src = in6p->in6p_laddr;
568 ip6->ip6_dst = in6p->in6p_faddr;
569 ip6->ip6_flow = in6p->in6p_flowinfo & IPV6_FLOWINFO_MASK;
570 if (ip6_auto_flowlabel) {
571 ip6->ip6_flow &= ~IPV6_FLOWLABEL_MASK;
572 ip6->ip6_flow |=
573 (htonl(ip6_randomflowlabel()) & IPV6_FLOWLABEL_MASK);
574 }
575 ip6->ip6_vfc &= ~IPV6_VERSION_MASK;
576 ip6->ip6_vfc |= IPV6_VERSION;
577
578 /*
579 * Compute the pseudo-header portion of the checksum
580 * now. We incrementally add in the TCP option and
581 * payload lengths later, and then compute the TCP
582 * checksum right before the packet is sent off onto
583 * the wire.
584 */
585 n->th_sum = in6_cksum_phdr(&in6p->in6p_laddr,
586 &in6p->in6p_faddr, htonl(sizeof(struct tcphdr)),
587 htonl(IPPROTO_TCP));
588 break;
589 }
590 #endif
591 }
592 if (inp) {
593 n->th_sport = inp->inp_lport;
594 n->th_dport = inp->inp_fport;
595 }
596 #ifdef INET6
597 else if (in6p) {
598 n->th_sport = in6p->in6p_lport;
599 n->th_dport = in6p->in6p_fport;
600 }
601 #endif
602 n->th_seq = 0;
603 n->th_ack = 0;
604 n->th_x2 = 0;
605 n->th_off = 5;
606 n->th_flags = 0;
607 n->th_win = 0;
608 n->th_urp = 0;
609 return (m);
610 }
611
612 /*
613 * Send a single message to the TCP at address specified by
614 * the given TCP/IP header. If m == 0, then we make a copy
615 * of the tcpiphdr at ti and send directly to the addressed host.
616 * This is used to force keep alive messages out using the TCP
617 * template for a connection tp->t_template. If flags are given
618 * then we send a message back to the TCP which originated the
619 * segment ti, and discard the mbuf containing it and any other
620 * attached mbufs.
621 *
622 * In any case the ack and sequence number of the transmitted
623 * segment are as specified by the parameters.
624 */
625 int
626 tcp_respond(struct tcpcb *tp, struct mbuf *template, struct mbuf *m,
627 struct tcphdr *th0, tcp_seq ack, tcp_seq seq, int flags)
628 {
629 #ifdef INET6
630 struct rtentry *rt;
631 #endif
632 struct route *ro;
633 int error, tlen, win = 0;
634 int hlen;
635 struct ip *ip;
636 #ifdef INET6
637 struct ip6_hdr *ip6;
638 #endif
639 int family; /* family on packet, not inpcb/in6pcb! */
640 struct tcphdr *th;
641 struct socket *so;
642
643 if (tp != NULL && (flags & TH_RST) == 0) {
644 #ifdef DIAGNOSTIC
645 if (tp->t_inpcb && tp->t_in6pcb)
646 panic("tcp_respond: both t_inpcb and t_in6pcb are set");
647 #endif
648 #ifdef INET
649 if (tp->t_inpcb)
650 win = sbspace(&tp->t_inpcb->inp_socket->so_rcv);
651 #endif
652 #ifdef INET6
653 if (tp->t_in6pcb)
654 win = sbspace(&tp->t_in6pcb->in6p_socket->so_rcv);
655 #endif
656 }
657
658 th = NULL; /* Quell uninitialized warning */
659 ip = NULL;
660 #ifdef INET6
661 ip6 = NULL;
662 #endif
663 if (m == 0) {
664 if (!template)
665 return EINVAL;
666
667 /* get family information from template */
668 switch (mtod(template, struct ip *)->ip_v) {
669 case 4:
670 family = AF_INET;
671 hlen = sizeof(struct ip);
672 break;
673 #ifdef INET6
674 case 6:
675 family = AF_INET6;
676 hlen = sizeof(struct ip6_hdr);
677 break;
678 #endif
679 default:
680 return EAFNOSUPPORT;
681 }
682
683 MGETHDR(m, M_DONTWAIT, MT_HEADER);
684 if (m) {
685 MCLAIM(m, &tcp_tx_mowner);
686 MCLGET(m, M_DONTWAIT);
687 if ((m->m_flags & M_EXT) == 0) {
688 m_free(m);
689 m = NULL;
690 }
691 }
692 if (m == NULL)
693 return (ENOBUFS);
694
695 if (tcp_compat_42)
696 tlen = 1;
697 else
698 tlen = 0;
699
700 m->m_data += max_linkhdr;
701 bcopy(mtod(template, void *), mtod(m, void *),
702 template->m_len);
703 switch (family) {
704 case AF_INET:
705 ip = mtod(m, struct ip *);
706 th = (struct tcphdr *)(ip + 1);
707 break;
708 #ifdef INET6
709 case AF_INET6:
710 ip6 = mtod(m, struct ip6_hdr *);
711 th = (struct tcphdr *)(ip6 + 1);
712 break;
713 #endif
714 #if 0
715 default:
716 /* noone will visit here */
717 m_freem(m);
718 return EAFNOSUPPORT;
719 #endif
720 }
721 flags = TH_ACK;
722 } else {
723
724 if ((m->m_flags & M_PKTHDR) == 0) {
725 #if 0
726 printf("non PKTHDR to tcp_respond\n");
727 #endif
728 m_freem(m);
729 return EINVAL;
730 }
731 #ifdef DIAGNOSTIC
732 if (!th0)
733 panic("th0 == NULL in tcp_respond");
734 #endif
735
736 /* get family information from m */
737 switch (mtod(m, struct ip *)->ip_v) {
738 case 4:
739 family = AF_INET;
740 hlen = sizeof(struct ip);
741 ip = mtod(m, struct ip *);
742 break;
743 #ifdef INET6
744 case 6:
745 family = AF_INET6;
746 hlen = sizeof(struct ip6_hdr);
747 ip6 = mtod(m, struct ip6_hdr *);
748 break;
749 #endif
750 default:
751 m_freem(m);
752 return EAFNOSUPPORT;
753 }
754 /* clear h/w csum flags inherited from rx packet */
755 m->m_pkthdr.csum_flags = 0;
756
757 if ((flags & TH_SYN) == 0 || sizeof(*th0) > (th0->th_off << 2))
758 tlen = sizeof(*th0);
759 else
760 tlen = th0->th_off << 2;
761
762 if (m->m_len > hlen + tlen && (m->m_flags & M_EXT) == 0 &&
763 mtod(m, char *) + hlen == (char *)th0) {
764 m->m_len = hlen + tlen;
765 m_freem(m->m_next);
766 m->m_next = NULL;
767 } else {
768 struct mbuf *n;
769
770 #ifdef DIAGNOSTIC
771 if (max_linkhdr + hlen + tlen > MCLBYTES) {
772 m_freem(m);
773 return EMSGSIZE;
774 }
775 #endif
776 MGETHDR(n, M_DONTWAIT, MT_HEADER);
777 if (n && max_linkhdr + hlen + tlen > MHLEN) {
778 MCLGET(n, M_DONTWAIT);
779 if ((n->m_flags & M_EXT) == 0) {
780 m_freem(n);
781 n = NULL;
782 }
783 }
784 if (!n) {
785 m_freem(m);
786 return ENOBUFS;
787 }
788
789 MCLAIM(n, &tcp_tx_mowner);
790 n->m_data += max_linkhdr;
791 n->m_len = hlen + tlen;
792 m_copyback(n, 0, hlen, mtod(m, void *));
793 m_copyback(n, hlen, tlen, (void *)th0);
794
795 m_freem(m);
796 m = n;
797 n = NULL;
798 }
799
800 #define xchg(a,b,type) { type t; t=a; a=b; b=t; }
801 switch (family) {
802 case AF_INET:
803 ip = mtod(m, struct ip *);
804 th = (struct tcphdr *)(ip + 1);
805 ip->ip_p = IPPROTO_TCP;
806 xchg(ip->ip_dst, ip->ip_src, struct in_addr);
807 ip->ip_p = IPPROTO_TCP;
808 break;
809 #ifdef INET6
810 case AF_INET6:
811 ip6 = mtod(m, struct ip6_hdr *);
812 th = (struct tcphdr *)(ip6 + 1);
813 ip6->ip6_nxt = IPPROTO_TCP;
814 xchg(ip6->ip6_dst, ip6->ip6_src, struct in6_addr);
815 ip6->ip6_nxt = IPPROTO_TCP;
816 break;
817 #endif
818 #if 0
819 default:
820 /* noone will visit here */
821 m_freem(m);
822 return EAFNOSUPPORT;
823 #endif
824 }
825 xchg(th->th_dport, th->th_sport, u_int16_t);
826 #undef xchg
827 tlen = 0; /*be friendly with the following code*/
828 }
829 th->th_seq = htonl(seq);
830 th->th_ack = htonl(ack);
831 th->th_x2 = 0;
832 if ((flags & TH_SYN) == 0) {
833 if (tp)
834 win >>= tp->rcv_scale;
835 if (win > TCP_MAXWIN)
836 win = TCP_MAXWIN;
837 th->th_win = htons((u_int16_t)win);
838 th->th_off = sizeof (struct tcphdr) >> 2;
839 tlen += sizeof(*th);
840 } else
841 tlen += th->th_off << 2;
842 m->m_len = hlen + tlen;
843 m->m_pkthdr.len = hlen + tlen;
844 m->m_pkthdr.rcvif = NULL;
845 th->th_flags = flags;
846 th->th_urp = 0;
847
848 switch (family) {
849 #ifdef INET
850 case AF_INET:
851 {
852 struct ipovly *ipov = (struct ipovly *)ip;
853 memset(ipov->ih_x1, 0, sizeof ipov->ih_x1);
854 ipov->ih_len = htons((u_int16_t)tlen);
855
856 th->th_sum = 0;
857 th->th_sum = in_cksum(m, hlen + tlen);
858 ip->ip_len = htons(hlen + tlen);
859 ip->ip_ttl = ip_defttl;
860 break;
861 }
862 #endif
863 #ifdef INET6
864 case AF_INET6:
865 {
866 th->th_sum = 0;
867 th->th_sum = in6_cksum(m, IPPROTO_TCP, sizeof(struct ip6_hdr),
868 tlen);
869 ip6->ip6_plen = htons(tlen);
870 if (tp && tp->t_in6pcb) {
871 struct ifnet *oifp;
872 ro = &tp->t_in6pcb->in6p_route;
873 oifp = (rt = rtcache_validate(ro)) != NULL ? rt->rt_ifp
874 : NULL;
875 ip6->ip6_hlim = in6_selecthlim(tp->t_in6pcb, oifp);
876 } else
877 ip6->ip6_hlim = ip6_defhlim;
878 ip6->ip6_flow &= ~IPV6_FLOWINFO_MASK;
879 if (ip6_auto_flowlabel) {
880 ip6->ip6_flow |=
881 (htonl(ip6_randomflowlabel()) & IPV6_FLOWLABEL_MASK);
882 }
883 break;
884 }
885 #endif
886 }
887
888 if (tp && tp->t_inpcb)
889 so = tp->t_inpcb->inp_socket;
890 #ifdef INET6
891 else if (tp && tp->t_in6pcb)
892 so = tp->t_in6pcb->in6p_socket;
893 #endif
894 else
895 so = NULL;
896
897 if (tp != NULL && tp->t_inpcb != NULL) {
898 ro = &tp->t_inpcb->inp_route;
899 #ifdef DIAGNOSTIC
900 if (family != AF_INET)
901 panic("tcp_respond: address family mismatch");
902 if (!in_hosteq(ip->ip_dst, tp->t_inpcb->inp_faddr)) {
903 panic("tcp_respond: ip_dst %x != inp_faddr %x",
904 ntohl(ip->ip_dst.s_addr),
905 ntohl(tp->t_inpcb->inp_faddr.s_addr));
906 }
907 #endif
908 }
909 #ifdef INET6
910 else if (tp != NULL && tp->t_in6pcb != NULL) {
911 ro = (struct route *)&tp->t_in6pcb->in6p_route;
912 #ifdef DIAGNOSTIC
913 if (family == AF_INET) {
914 if (!IN6_IS_ADDR_V4MAPPED(&tp->t_in6pcb->in6p_faddr))
915 panic("tcp_respond: not mapped addr");
916 if (memcmp(&ip->ip_dst,
917 &tp->t_in6pcb->in6p_faddr.s6_addr32[3],
918 sizeof(ip->ip_dst)) != 0) {
919 panic("tcp_respond: ip_dst != in6p_faddr");
920 }
921 } else if (family == AF_INET6) {
922 if (!IN6_ARE_ADDR_EQUAL(&ip6->ip6_dst,
923 &tp->t_in6pcb->in6p_faddr))
924 panic("tcp_respond: ip6_dst != in6p_faddr");
925 } else
926 panic("tcp_respond: address family mismatch");
927 #endif
928 }
929 #endif
930 else
931 ro = NULL;
932
933 switch (family) {
934 #ifdef INET
935 case AF_INET:
936 error = ip_output(m, NULL, ro,
937 (tp && tp->t_mtudisc ? IP_MTUDISC : 0), NULL, so);
938 break;
939 #endif
940 #ifdef INET6
941 case AF_INET6:
942 error = ip6_output(m, NULL, ro, 0, NULL, so, NULL);
943 break;
944 #endif
945 default:
946 error = EAFNOSUPPORT;
947 break;
948 }
949
950 return (error);
951 }
952
953 /*
954 * Template TCPCB. Rather than zeroing a new TCPCB and initializing
955 * a bunch of members individually, we maintain this template for the
956 * static and mostly-static components of the TCPCB, and copy it into
957 * the new TCPCB instead.
958 */
959 static struct tcpcb tcpcb_template = {
960 .t_srtt = TCPTV_SRTTBASE,
961 .t_rttmin = TCPTV_MIN,
962
963 .snd_cwnd = TCP_MAXWIN << TCP_MAX_WINSHIFT,
964 .snd_ssthresh = TCP_MAXWIN << TCP_MAX_WINSHIFT,
965 .snd_numholes = 0,
966
967 .t_partialacks = -1,
968 .t_bytes_acked = 0,
969 };
970
971 /*
972 * Updates the TCPCB template whenever a parameter that would affect
973 * the template is changed.
974 */
975 void
976 tcp_tcpcb_template(void)
977 {
978 struct tcpcb *tp = &tcpcb_template;
979 int flags;
980
981 tp->t_peermss = tcp_mssdflt;
982 tp->t_ourmss = tcp_mssdflt;
983 tp->t_segsz = tcp_mssdflt;
984
985 flags = 0;
986 if (tcp_do_rfc1323 && tcp_do_win_scale)
987 flags |= TF_REQ_SCALE;
988 if (tcp_do_rfc1323 && tcp_do_timestamps)
989 flags |= TF_REQ_TSTMP;
990 tp->t_flags = flags;
991
992 /*
993 * Init srtt to TCPTV_SRTTBASE (0), so we can tell that we have no
994 * rtt estimate. Set rttvar so that srtt + 2 * rttvar gives
995 * reasonable initial retransmit time.
996 */
997 tp->t_rttvar = tcp_rttdflt * PR_SLOWHZ << (TCP_RTTVAR_SHIFT + 2 - 1);
998 TCPT_RANGESET(tp->t_rxtcur, TCP_REXMTVAL(tp),
999 TCPTV_MIN, TCPTV_REXMTMAX);
1000
1001 /* Keep Alive */
1002 tp->t_keepinit = tcp_keepinit;
1003 tp->t_keepidle = tcp_keepidle;
1004 tp->t_keepintvl = tcp_keepintvl;
1005 tp->t_keepcnt = tcp_keepcnt;
1006 tp->t_maxidle = tp->t_keepcnt * tp->t_keepintvl;
1007
1008 /* MSL */
1009 tp->t_msl = TCPTV_MSL;
1010 }
1011
1012 /*
1013 * Create a new TCP control block, making an
1014 * empty reassembly queue and hooking it to the argument
1015 * protocol control block.
1016 */
1017 /* family selects inpcb, or in6pcb */
1018 struct tcpcb *
1019 tcp_newtcpcb(int family, void *aux)
1020 {
1021 #ifdef INET6
1022 struct rtentry *rt;
1023 #endif
1024 struct tcpcb *tp;
1025 int i;
1026
1027 /* XXX Consider using a pool_cache for speed. */
1028 tp = pool_get(&tcpcb_pool, PR_NOWAIT); /* splsoftnet via tcp_usrreq */
1029 if (tp == NULL)
1030 return (NULL);
1031 memcpy(tp, &tcpcb_template, sizeof(*tp));
1032 TAILQ_INIT(&tp->segq);
1033 TAILQ_INIT(&tp->timeq);
1034 tp->t_family = family; /* may be overridden later on */
1035 TAILQ_INIT(&tp->snd_holes);
1036 LIST_INIT(&tp->t_sc); /* XXX can template this */
1037
1038 /* Don't sweat this loop; hopefully the compiler will unroll it. */
1039 for (i = 0; i < TCPT_NTIMERS; i++) {
1040 callout_init(&tp->t_timer[i], CALLOUT_MPSAFE);
1041 TCP_TIMER_INIT(tp, i);
1042 }
1043 callout_init(&tp->t_delack_ch, CALLOUT_MPSAFE);
1044
1045 switch (family) {
1046 case AF_INET:
1047 {
1048 struct inpcb *inp = (struct inpcb *)aux;
1049
1050 inp->inp_ip.ip_ttl = ip_defttl;
1051 inp->inp_ppcb = (void *)tp;
1052
1053 tp->t_inpcb = inp;
1054 tp->t_mtudisc = ip_mtudisc;
1055 break;
1056 }
1057 #ifdef INET6
1058 case AF_INET6:
1059 {
1060 struct in6pcb *in6p = (struct in6pcb *)aux;
1061
1062 in6p->in6p_ip6.ip6_hlim = in6_selecthlim(in6p,
1063 (rt = rtcache_validate(&in6p->in6p_route)) != NULL
1064 ? rt->rt_ifp
1065 : NULL);
1066 in6p->in6p_ppcb = (void *)tp;
1067
1068 tp->t_in6pcb = in6p;
1069 /* for IPv6, always try to run path MTU discovery */
1070 tp->t_mtudisc = 1;
1071 break;
1072 }
1073 #endif /* INET6 */
1074 default:
1075 for (i = 0; i < TCPT_NTIMERS; i++)
1076 callout_destroy(&tp->t_timer[i]);
1077 callout_destroy(&tp->t_delack_ch);
1078 pool_put(&tcpcb_pool, tp); /* splsoftnet via tcp_usrreq */
1079 return (NULL);
1080 }
1081
1082 /*
1083 * Initialize our timebase. When we send timestamps, we take
1084 * the delta from tcp_now -- this means each connection always
1085 * gets a timebase of 1, which makes it, among other things,
1086 * more difficult to determine how long a system has been up,
1087 * and thus how many TCP sequence increments have occurred.
1088 *
1089 * We start with 1, because 0 doesn't work with linux, which
1090 * considers timestamp 0 in a SYN packet as a bug and disables
1091 * timestamps.
1092 */
1093 tp->ts_timebase = tcp_now - 1;
1094
1095 tcp_congctl_select(tp, tcp_congctl_global_name);
1096
1097 return (tp);
1098 }
1099
1100 /*
1101 * Drop a TCP connection, reporting
1102 * the specified error. If connection is synchronized,
1103 * then send a RST to peer.
1104 */
1105 struct tcpcb *
1106 tcp_drop(struct tcpcb *tp, int errno)
1107 {
1108 struct socket *so = NULL;
1109
1110 #ifdef DIAGNOSTIC
1111 if (tp->t_inpcb && tp->t_in6pcb)
1112 panic("tcp_drop: both t_inpcb and t_in6pcb are set");
1113 #endif
1114 #ifdef INET
1115 if (tp->t_inpcb)
1116 so = tp->t_inpcb->inp_socket;
1117 #endif
1118 #ifdef INET6
1119 if (tp->t_in6pcb)
1120 so = tp->t_in6pcb->in6p_socket;
1121 #endif
1122 if (!so)
1123 return NULL;
1124
1125 if (TCPS_HAVERCVDSYN(tp->t_state)) {
1126 tp->t_state = TCPS_CLOSED;
1127 (void) tcp_output(tp);
1128 TCP_STATINC(TCP_STAT_DROPS);
1129 } else
1130 TCP_STATINC(TCP_STAT_CONNDROPS);
1131 if (errno == ETIMEDOUT && tp->t_softerror)
1132 errno = tp->t_softerror;
1133 so->so_error = errno;
1134 return (tcp_close(tp));
1135 }
1136
1137 /*
1138 * Close a TCP control block:
1139 * discard all space held by the tcp
1140 * discard internet protocol block
1141 * wake up any sleepers
1142 */
1143 struct tcpcb *
1144 tcp_close(struct tcpcb *tp)
1145 {
1146 struct inpcb *inp;
1147 #ifdef INET6
1148 struct in6pcb *in6p;
1149 #endif
1150 struct socket *so;
1151 #ifdef RTV_RTT
1152 struct rtentry *rt;
1153 #endif
1154 struct route *ro;
1155 int j;
1156
1157 inp = tp->t_inpcb;
1158 #ifdef INET6
1159 in6p = tp->t_in6pcb;
1160 #endif
1161 so = NULL;
1162 ro = NULL;
1163 if (inp) {
1164 so = inp->inp_socket;
1165 ro = &inp->inp_route;
1166 }
1167 #ifdef INET6
1168 else if (in6p) {
1169 so = in6p->in6p_socket;
1170 ro = (struct route *)&in6p->in6p_route;
1171 }
1172 #endif
1173
1174 #ifdef RTV_RTT
1175 /*
1176 * If we sent enough data to get some meaningful characteristics,
1177 * save them in the routing entry. 'Enough' is arbitrarily
1178 * defined as the sendpipesize (default 4K) * 16. This would
1179 * give us 16 rtt samples assuming we only get one sample per
1180 * window (the usual case on a long haul net). 16 samples is
1181 * enough for the srtt filter to converge to within 5% of the correct
1182 * value; fewer samples and we could save a very bogus rtt.
1183 *
1184 * Don't update the default route's characteristics and don't
1185 * update anything that the user "locked".
1186 */
1187 if (SEQ_LT(tp->iss + so->so_snd.sb_hiwat * 16, tp->snd_max) &&
1188 ro && (rt = rtcache_validate(ro)) != NULL &&
1189 !in_nullhost(satocsin(rt_getkey(rt))->sin_addr)) {
1190 u_long i = 0;
1191
1192 if ((rt->rt_rmx.rmx_locks & RTV_RTT) == 0) {
1193 i = tp->t_srtt *
1194 ((RTM_RTTUNIT / PR_SLOWHZ) >> (TCP_RTT_SHIFT + 2));
1195 if (rt->rt_rmx.rmx_rtt && i)
1196 /*
1197 * filter this update to half the old & half
1198 * the new values, converting scale.
1199 * See route.h and tcp_var.h for a
1200 * description of the scaling constants.
1201 */
1202 rt->rt_rmx.rmx_rtt =
1203 (rt->rt_rmx.rmx_rtt + i) / 2;
1204 else
1205 rt->rt_rmx.rmx_rtt = i;
1206 }
1207 if ((rt->rt_rmx.rmx_locks & RTV_RTTVAR) == 0) {
1208 i = tp->t_rttvar *
1209 ((RTM_RTTUNIT / PR_SLOWHZ) >> (TCP_RTTVAR_SHIFT + 2));
1210 if (rt->rt_rmx.rmx_rttvar && i)
1211 rt->rt_rmx.rmx_rttvar =
1212 (rt->rt_rmx.rmx_rttvar + i) / 2;
1213 else
1214 rt->rt_rmx.rmx_rttvar = i;
1215 }
1216 /*
1217 * update the pipelimit (ssthresh) if it has been updated
1218 * already or if a pipesize was specified & the threshhold
1219 * got below half the pipesize. I.e., wait for bad news
1220 * before we start updating, then update on both good
1221 * and bad news.
1222 */
1223 if (((rt->rt_rmx.rmx_locks & RTV_SSTHRESH) == 0 &&
1224 (i = tp->snd_ssthresh) && rt->rt_rmx.rmx_ssthresh) ||
1225 i < (rt->rt_rmx.rmx_sendpipe / 2)) {
1226 /*
1227 * convert the limit from user data bytes to
1228 * packets then to packet data bytes.
1229 */
1230 i = (i + tp->t_segsz / 2) / tp->t_segsz;
1231 if (i < 2)
1232 i = 2;
1233 i *= (u_long)(tp->t_segsz + sizeof (struct tcpiphdr));
1234 if (rt->rt_rmx.rmx_ssthresh)
1235 rt->rt_rmx.rmx_ssthresh =
1236 (rt->rt_rmx.rmx_ssthresh + i) / 2;
1237 else
1238 rt->rt_rmx.rmx_ssthresh = i;
1239 }
1240 }
1241 #endif /* RTV_RTT */
1242 /* free the reassembly queue, if any */
1243 TCP_REASS_LOCK(tp);
1244 (void) tcp_freeq(tp);
1245 TCP_REASS_UNLOCK(tp);
1246
1247 /* free the SACK holes list. */
1248 tcp_free_sackholes(tp);
1249 tcp_congctl_release(tp);
1250 syn_cache_cleanup(tp);
1251
1252 if (tp->t_template) {
1253 m_free(tp->t_template);
1254 tp->t_template = NULL;
1255 }
1256
1257 /*
1258 * Detaching the pcb will unlock the socket/tcpcb, and stopping
1259 * the timers can also drop the lock. We need to prevent access
1260 * to the tcpcb as it's half torn down. Flag the pcb as dead
1261 * (prevents access by timers) and only then detach it.
1262 */
1263 tp->t_flags |= TF_DEAD;
1264 if (inp) {
1265 inp->inp_ppcb = 0;
1266 soisdisconnected(so);
1267 in_pcbdetach(inp);
1268 }
1269 #ifdef INET6
1270 else if (in6p) {
1271 in6p->in6p_ppcb = 0;
1272 soisdisconnected(so);
1273 in6_pcbdetach(in6p);
1274 }
1275 #endif
1276 /*
1277 * pcb is no longer visble elsewhere, so we can safely release
1278 * the lock in callout_halt() if needed.
1279 */
1280 TCP_STATINC(TCP_STAT_CLOSED);
1281 for (j = 0; j < TCPT_NTIMERS; j++) {
1282 callout_halt(&tp->t_timer[j], softnet_lock);
1283 callout_destroy(&tp->t_timer[j]);
1284 }
1285 callout_halt(&tp->t_delack_ch, softnet_lock);
1286 callout_destroy(&tp->t_delack_ch);
1287 pool_put(&tcpcb_pool, tp);
1288
1289 return NULL;
1290 }
1291
1292 int
1293 tcp_freeq(struct tcpcb *tp)
1294 {
1295 struct ipqent *qe;
1296 int rv = 0;
1297 #ifdef TCPREASS_DEBUG
1298 int i = 0;
1299 #endif
1300
1301 TCP_REASS_LOCK_CHECK(tp);
1302
1303 while ((qe = TAILQ_FIRST(&tp->segq)) != NULL) {
1304 #ifdef TCPREASS_DEBUG
1305 printf("tcp_freeq[%p,%d]: %u:%u(%u) 0x%02x\n",
1306 tp, i++, qe->ipqe_seq, qe->ipqe_seq + qe->ipqe_len,
1307 qe->ipqe_len, qe->ipqe_flags & (TH_SYN|TH_FIN|TH_RST));
1308 #endif
1309 TAILQ_REMOVE(&tp->segq, qe, ipqe_q);
1310 TAILQ_REMOVE(&tp->timeq, qe, ipqe_timeq);
1311 m_freem(qe->ipqe_m);
1312 tcpipqent_free(qe);
1313 rv = 1;
1314 }
1315 tp->t_segqlen = 0;
1316 KASSERT(TAILQ_EMPTY(&tp->timeq));
1317 return (rv);
1318 }
1319
1320 void
1321 tcp_fasttimo(void)
1322 {
1323 if (tcp_drainwanted) {
1324 tcp_drain();
1325 tcp_drainwanted = 0;
1326 }
1327 }
1328
1329 void
1330 tcp_drainstub(void)
1331 {
1332 tcp_drainwanted = 1;
1333 }
1334
1335 /*
1336 * Protocol drain routine. Called when memory is in short supply.
1337 * Called from pr_fasttimo thus a callout context.
1338 */
1339 void
1340 tcp_drain(void)
1341 {
1342 struct inpcb_hdr *inph;
1343 struct tcpcb *tp;
1344
1345 mutex_enter(softnet_lock);
1346 KERNEL_LOCK(1, NULL);
1347
1348 /*
1349 * Free the sequence queue of all TCP connections.
1350 */
1351 CIRCLEQ_FOREACH(inph, &tcbtable.inpt_queue, inph_queue) {
1352 switch (inph->inph_af) {
1353 case AF_INET:
1354 tp = intotcpcb((struct inpcb *)inph);
1355 break;
1356 #ifdef INET6
1357 case AF_INET6:
1358 tp = in6totcpcb((struct in6pcb *)inph);
1359 break;
1360 #endif
1361 default:
1362 tp = NULL;
1363 break;
1364 }
1365 if (tp != NULL) {
1366 /*
1367 * We may be called from a device's interrupt
1368 * context. If the tcpcb is already busy,
1369 * just bail out now.
1370 */
1371 if (tcp_reass_lock_try(tp) == 0)
1372 continue;
1373 if (tcp_freeq(tp))
1374 TCP_STATINC(TCP_STAT_CONNSDRAINED);
1375 TCP_REASS_UNLOCK(tp);
1376 }
1377 }
1378
1379 KERNEL_UNLOCK_ONE(NULL);
1380 mutex_exit(softnet_lock);
1381 }
1382
1383 /*
1384 * Notify a tcp user of an asynchronous error;
1385 * store error as soft error, but wake up user
1386 * (for now, won't do anything until can select for soft error).
1387 */
1388 void
1389 tcp_notify(struct inpcb *inp, int error)
1390 {
1391 struct tcpcb *tp = (struct tcpcb *)inp->inp_ppcb;
1392 struct socket *so = inp->inp_socket;
1393
1394 /*
1395 * Ignore some errors if we are hooked up.
1396 * If connection hasn't completed, has retransmitted several times,
1397 * and receives a second error, give up now. This is better
1398 * than waiting a long time to establish a connection that
1399 * can never complete.
1400 */
1401 if (tp->t_state == TCPS_ESTABLISHED &&
1402 (error == EHOSTUNREACH || error == ENETUNREACH ||
1403 error == EHOSTDOWN)) {
1404 return;
1405 } else if (TCPS_HAVEESTABLISHED(tp->t_state) == 0 &&
1406 tp->t_rxtshift > 3 && tp->t_softerror)
1407 so->so_error = error;
1408 else
1409 tp->t_softerror = error;
1410 cv_broadcast(&so->so_cv);
1411 sorwakeup(so);
1412 sowwakeup(so);
1413 }
1414
1415 #ifdef INET6
1416 void
1417 tcp6_notify(struct in6pcb *in6p, int error)
1418 {
1419 struct tcpcb *tp = (struct tcpcb *)in6p->in6p_ppcb;
1420 struct socket *so = in6p->in6p_socket;
1421
1422 /*
1423 * Ignore some errors if we are hooked up.
1424 * If connection hasn't completed, has retransmitted several times,
1425 * and receives a second error, give up now. This is better
1426 * than waiting a long time to establish a connection that
1427 * can never complete.
1428 */
1429 if (tp->t_state == TCPS_ESTABLISHED &&
1430 (error == EHOSTUNREACH || error == ENETUNREACH ||
1431 error == EHOSTDOWN)) {
1432 return;
1433 } else if (TCPS_HAVEESTABLISHED(tp->t_state) == 0 &&
1434 tp->t_rxtshift > 3 && tp->t_softerror)
1435 so->so_error = error;
1436 else
1437 tp->t_softerror = error;
1438 cv_broadcast(&so->so_cv);
1439 sorwakeup(so);
1440 sowwakeup(so);
1441 }
1442 #endif
1443
1444 #ifdef INET6
1445 void *
1446 tcp6_ctlinput(int cmd, const struct sockaddr *sa, void *d)
1447 {
1448 struct tcphdr th;
1449 void (*notify)(struct in6pcb *, int) = tcp6_notify;
1450 int nmatch;
1451 struct ip6_hdr *ip6;
1452 const struct sockaddr_in6 *sa6_src = NULL;
1453 const struct sockaddr_in6 *sa6 = (const struct sockaddr_in6 *)sa;
1454 struct mbuf *m;
1455 int off;
1456
1457 if (sa->sa_family != AF_INET6 ||
1458 sa->sa_len != sizeof(struct sockaddr_in6))
1459 return NULL;
1460 if ((unsigned)cmd >= PRC_NCMDS)
1461 return NULL;
1462 else if (cmd == PRC_QUENCH) {
1463 /*
1464 * Don't honor ICMP Source Quench messages meant for
1465 * TCP connections.
1466 */
1467 return NULL;
1468 } else if (PRC_IS_REDIRECT(cmd))
1469 notify = in6_rtchange, d = NULL;
1470 else if (cmd == PRC_MSGSIZE)
1471 ; /* special code is present, see below */
1472 else if (cmd == PRC_HOSTDEAD)
1473 d = NULL;
1474 else if (inet6ctlerrmap[cmd] == 0)
1475 return NULL;
1476
1477 /* if the parameter is from icmp6, decode it. */
1478 if (d != NULL) {
1479 struct ip6ctlparam *ip6cp = (struct ip6ctlparam *)d;
1480 m = ip6cp->ip6c_m;
1481 ip6 = ip6cp->ip6c_ip6;
1482 off = ip6cp->ip6c_off;
1483 sa6_src = ip6cp->ip6c_src;
1484 } else {
1485 m = NULL;
1486 ip6 = NULL;
1487 sa6_src = &sa6_any;
1488 off = 0;
1489 }
1490
1491 if (ip6) {
1492 /*
1493 * XXX: We assume that when ip6 is non NULL,
1494 * M and OFF are valid.
1495 */
1496
1497 /* check if we can safely examine src and dst ports */
1498 if (m->m_pkthdr.len < off + sizeof(th)) {
1499 if (cmd == PRC_MSGSIZE)
1500 icmp6_mtudisc_update((struct ip6ctlparam *)d, 0);
1501 return NULL;
1502 }
1503
1504 memset(&th, 0, sizeof(th));
1505 m_copydata(m, off, sizeof(th), (void *)&th);
1506
1507 if (cmd == PRC_MSGSIZE) {
1508 int valid = 0;
1509
1510 /*
1511 * Check to see if we have a valid TCP connection
1512 * corresponding to the address in the ICMPv6 message
1513 * payload.
1514 */
1515 if (in6_pcblookup_connect(&tcbtable, &sa6->sin6_addr,
1516 th.th_dport,
1517 (const struct in6_addr *)&sa6_src->sin6_addr,
1518 th.th_sport, 0, 0))
1519 valid++;
1520
1521 /*
1522 * Depending on the value of "valid" and routing table
1523 * size (mtudisc_{hi,lo}wat), we will:
1524 * - recalcurate the new MTU and create the
1525 * corresponding routing entry, or
1526 * - ignore the MTU change notification.
1527 */
1528 icmp6_mtudisc_update((struct ip6ctlparam *)d, valid);
1529
1530 /*
1531 * no need to call in6_pcbnotify, it should have been
1532 * called via callback if necessary
1533 */
1534 return NULL;
1535 }
1536
1537 nmatch = in6_pcbnotify(&tcbtable, sa, th.th_dport,
1538 (const struct sockaddr *)sa6_src, th.th_sport, cmd, NULL, notify);
1539 if (nmatch == 0 && syn_cache_count &&
1540 (inet6ctlerrmap[cmd] == EHOSTUNREACH ||
1541 inet6ctlerrmap[cmd] == ENETUNREACH ||
1542 inet6ctlerrmap[cmd] == EHOSTDOWN))
1543 syn_cache_unreach((const struct sockaddr *)sa6_src,
1544 sa, &th);
1545 } else {
1546 (void) in6_pcbnotify(&tcbtable, sa, 0,
1547 (const struct sockaddr *)sa6_src, 0, cmd, NULL, notify);
1548 }
1549
1550 return NULL;
1551 }
1552 #endif
1553
1554 #ifdef INET
1555 /* assumes that ip header and tcp header are contiguous on mbuf */
1556 void *
1557 tcp_ctlinput(int cmd, const struct sockaddr *sa, void *v)
1558 {
1559 struct ip *ip = v;
1560 struct tcphdr *th;
1561 struct icmp *icp;
1562 extern const int inetctlerrmap[];
1563 void (*notify)(struct inpcb *, int) = tcp_notify;
1564 int errno;
1565 int nmatch;
1566 struct tcpcb *tp;
1567 u_int mtu;
1568 tcp_seq seq;
1569 struct inpcb *inp;
1570 #ifdef INET6
1571 struct in6pcb *in6p;
1572 struct in6_addr src6, dst6;
1573 #endif
1574
1575 if (sa->sa_family != AF_INET ||
1576 sa->sa_len != sizeof(struct sockaddr_in))
1577 return NULL;
1578 if ((unsigned)cmd >= PRC_NCMDS)
1579 return NULL;
1580 errno = inetctlerrmap[cmd];
1581 if (cmd == PRC_QUENCH)
1582 /*
1583 * Don't honor ICMP Source Quench messages meant for
1584 * TCP connections.
1585 */
1586 return NULL;
1587 else if (PRC_IS_REDIRECT(cmd))
1588 notify = in_rtchange, ip = 0;
1589 else if (cmd == PRC_MSGSIZE && ip && ip->ip_v == 4) {
1590 /*
1591 * Check to see if we have a valid TCP connection
1592 * corresponding to the address in the ICMP message
1593 * payload.
1594 *
1595 * Boundary check is made in icmp_input(), with ICMP_ADVLENMIN.
1596 */
1597 th = (struct tcphdr *)((char *)ip + (ip->ip_hl << 2));
1598 #ifdef INET6
1599 memset(&src6, 0, sizeof(src6));
1600 memset(&dst6, 0, sizeof(dst6));
1601 src6.s6_addr16[5] = dst6.s6_addr16[5] = 0xffff;
1602 memcpy(&src6.s6_addr32[3], &ip->ip_src, sizeof(struct in_addr));
1603 memcpy(&dst6.s6_addr32[3], &ip->ip_dst, sizeof(struct in_addr));
1604 #endif
1605 if ((inp = in_pcblookup_connect(&tcbtable, ip->ip_dst,
1606 th->th_dport, ip->ip_src, th->th_sport, 0)) != NULL)
1607 #ifdef INET6
1608 in6p = NULL;
1609 #else
1610 ;
1611 #endif
1612 #ifdef INET6
1613 else if ((in6p = in6_pcblookup_connect(&tcbtable, &dst6,
1614 th->th_dport, &src6, th->th_sport, 0, 0)) != NULL)
1615 ;
1616 #endif
1617 else
1618 return NULL;
1619
1620 /*
1621 * Now that we've validated that we are actually communicating
1622 * with the host indicated in the ICMP message, locate the
1623 * ICMP header, recalculate the new MTU, and create the
1624 * corresponding routing entry.
1625 */
1626 icp = (struct icmp *)((char *)ip -
1627 offsetof(struct icmp, icmp_ip));
1628 if (inp) {
1629 if ((tp = intotcpcb(inp)) == NULL)
1630 return NULL;
1631 }
1632 #ifdef INET6
1633 else if (in6p) {
1634 if ((tp = in6totcpcb(in6p)) == NULL)
1635 return NULL;
1636 }
1637 #endif
1638 else
1639 return NULL;
1640 seq = ntohl(th->th_seq);
1641 if (SEQ_LT(seq, tp->snd_una) || SEQ_GT(seq, tp->snd_max))
1642 return NULL;
1643 /*
1644 * If the ICMP message advertises a Next-Hop MTU
1645 * equal or larger than the maximum packet size we have
1646 * ever sent, drop the message.
1647 */
1648 mtu = (u_int)ntohs(icp->icmp_nextmtu);
1649 if (mtu >= tp->t_pmtud_mtu_sent)
1650 return NULL;
1651 if (mtu >= tcp_hdrsz(tp) + tp->t_pmtud_mss_acked) {
1652 /*
1653 * Calculate new MTU, and create corresponding
1654 * route (traditional PMTUD).
1655 */
1656 tp->t_flags &= ~TF_PMTUD_PEND;
1657 icmp_mtudisc(icp, ip->ip_dst);
1658 } else {
1659 /*
1660 * Record the information got in the ICMP
1661 * message; act on it later.
1662 * If we had already recorded an ICMP message,
1663 * replace the old one only if the new message
1664 * refers to an older TCP segment
1665 */
1666 if (tp->t_flags & TF_PMTUD_PEND) {
1667 if (SEQ_LT(tp->t_pmtud_th_seq, seq))
1668 return NULL;
1669 } else
1670 tp->t_flags |= TF_PMTUD_PEND;
1671 tp->t_pmtud_th_seq = seq;
1672 tp->t_pmtud_nextmtu = icp->icmp_nextmtu;
1673 tp->t_pmtud_ip_len = icp->icmp_ip.ip_len;
1674 tp->t_pmtud_ip_hl = icp->icmp_ip.ip_hl;
1675 }
1676 return NULL;
1677 } else if (cmd == PRC_HOSTDEAD)
1678 ip = 0;
1679 else if (errno == 0)
1680 return NULL;
1681 if (ip && ip->ip_v == 4 && sa->sa_family == AF_INET) {
1682 th = (struct tcphdr *)((char *)ip + (ip->ip_hl << 2));
1683 nmatch = in_pcbnotify(&tcbtable, satocsin(sa)->sin_addr,
1684 th->th_dport, ip->ip_src, th->th_sport, errno, notify);
1685 if (nmatch == 0 && syn_cache_count &&
1686 (inetctlerrmap[cmd] == EHOSTUNREACH ||
1687 inetctlerrmap[cmd] == ENETUNREACH ||
1688 inetctlerrmap[cmd] == EHOSTDOWN)) {
1689 struct sockaddr_in sin;
1690 memset(&sin, 0, sizeof(sin));
1691 sin.sin_len = sizeof(sin);
1692 sin.sin_family = AF_INET;
1693 sin.sin_port = th->th_sport;
1694 sin.sin_addr = ip->ip_src;
1695 syn_cache_unreach((struct sockaddr *)&sin, sa, th);
1696 }
1697
1698 /* XXX mapped address case */
1699 } else
1700 in_pcbnotifyall(&tcbtable, satocsin(sa)->sin_addr, errno,
1701 notify);
1702 return NULL;
1703 }
1704
1705 /*
1706 * When a source quench is received, we are being notified of congestion.
1707 * Close the congestion window down to the Loss Window (one segment).
1708 * We will gradually open it again as we proceed.
1709 */
1710 void
1711 tcp_quench(struct inpcb *inp, int errno)
1712 {
1713 struct tcpcb *tp = intotcpcb(inp);
1714
1715 if (tp) {
1716 tp->snd_cwnd = tp->t_segsz;
1717 tp->t_bytes_acked = 0;
1718 }
1719 }
1720 #endif
1721
1722 #ifdef INET6
1723 void
1724 tcp6_quench(struct in6pcb *in6p, int errno)
1725 {
1726 struct tcpcb *tp = in6totcpcb(in6p);
1727
1728 if (tp) {
1729 tp->snd_cwnd = tp->t_segsz;
1730 tp->t_bytes_acked = 0;
1731 }
1732 }
1733 #endif
1734
1735 #ifdef INET
1736 /*
1737 * Path MTU Discovery handlers.
1738 */
1739 void
1740 tcp_mtudisc_callback(struct in_addr faddr)
1741 {
1742 #ifdef INET6
1743 struct in6_addr in6;
1744 #endif
1745
1746 in_pcbnotifyall(&tcbtable, faddr, EMSGSIZE, tcp_mtudisc);
1747 #ifdef INET6
1748 memset(&in6, 0, sizeof(in6));
1749 in6.s6_addr16[5] = 0xffff;
1750 memcpy(&in6.s6_addr32[3], &faddr, sizeof(struct in_addr));
1751 tcp6_mtudisc_callback(&in6);
1752 #endif
1753 }
1754
1755 /*
1756 * On receipt of path MTU corrections, flush old route and replace it
1757 * with the new one. Retransmit all unacknowledged packets, to ensure
1758 * that all packets will be received.
1759 */
1760 void
1761 tcp_mtudisc(struct inpcb *inp, int errno)
1762 {
1763 struct tcpcb *tp = intotcpcb(inp);
1764 struct rtentry *rt = in_pcbrtentry(inp);
1765
1766 if (tp != 0) {
1767 if (rt != 0) {
1768 /*
1769 * If this was not a host route, remove and realloc.
1770 */
1771 if ((rt->rt_flags & RTF_HOST) == 0) {
1772 in_rtchange(inp, errno);
1773 if ((rt = in_pcbrtentry(inp)) == 0)
1774 return;
1775 }
1776
1777 /*
1778 * Slow start out of the error condition. We
1779 * use the MTU because we know it's smaller
1780 * than the previously transmitted segment.
1781 *
1782 * Note: This is more conservative than the
1783 * suggestion in draft-floyd-incr-init-win-03.
1784 */
1785 if (rt->rt_rmx.rmx_mtu != 0)
1786 tp->snd_cwnd =
1787 TCP_INITIAL_WINDOW(tcp_init_win,
1788 rt->rt_rmx.rmx_mtu);
1789 }
1790
1791 /*
1792 * Resend unacknowledged packets.
1793 */
1794 tp->snd_nxt = tp->sack_newdata = tp->snd_una;
1795 tcp_output(tp);
1796 }
1797 }
1798 #endif
1799
1800 #ifdef INET6
1801 /*
1802 * Path MTU Discovery handlers.
1803 */
1804 void
1805 tcp6_mtudisc_callback(struct in6_addr *faddr)
1806 {
1807 struct sockaddr_in6 sin6;
1808
1809 memset(&sin6, 0, sizeof(sin6));
1810 sin6.sin6_family = AF_INET6;
1811 sin6.sin6_len = sizeof(struct sockaddr_in6);
1812 sin6.sin6_addr = *faddr;
1813 (void) in6_pcbnotify(&tcbtable, (struct sockaddr *)&sin6, 0,
1814 (const struct sockaddr *)&sa6_any, 0, PRC_MSGSIZE, NULL, tcp6_mtudisc);
1815 }
1816
1817 void
1818 tcp6_mtudisc(struct in6pcb *in6p, int errno)
1819 {
1820 struct tcpcb *tp = in6totcpcb(in6p);
1821 struct rtentry *rt = in6_pcbrtentry(in6p);
1822
1823 if (tp != 0) {
1824 if (rt != 0) {
1825 /*
1826 * If this was not a host route, remove and realloc.
1827 */
1828 if ((rt->rt_flags & RTF_HOST) == 0) {
1829 in6_rtchange(in6p, errno);
1830 if ((rt = in6_pcbrtentry(in6p)) == 0)
1831 return;
1832 }
1833
1834 /*
1835 * Slow start out of the error condition. We
1836 * use the MTU because we know it's smaller
1837 * than the previously transmitted segment.
1838 *
1839 * Note: This is more conservative than the
1840 * suggestion in draft-floyd-incr-init-win-03.
1841 */
1842 if (rt->rt_rmx.rmx_mtu != 0)
1843 tp->snd_cwnd =
1844 TCP_INITIAL_WINDOW(tcp_init_win,
1845 rt->rt_rmx.rmx_mtu);
1846 }
1847
1848 /*
1849 * Resend unacknowledged packets.
1850 */
1851 tp->snd_nxt = tp->sack_newdata = tp->snd_una;
1852 tcp_output(tp);
1853 }
1854 }
1855 #endif /* INET6 */
1856
1857 /*
1858 * Compute the MSS to advertise to the peer. Called only during
1859 * the 3-way handshake. If we are the server (peer initiated
1860 * connection), we are called with a pointer to the interface
1861 * on which the SYN packet arrived. If we are the client (we
1862 * initiated connection), we are called with a pointer to the
1863 * interface out which this connection should go.
1864 *
1865 * NOTE: Do not subtract IP option/extension header size nor IPsec
1866 * header size from MSS advertisement. MSS option must hold the maximum
1867 * segment size we can accept, so it must always be:
1868 * max(if mtu) - ip header - tcp header
1869 */
1870 u_long
1871 tcp_mss_to_advertise(const struct ifnet *ifp, int af)
1872 {
1873 extern u_long in_maxmtu;
1874 u_long mss = 0;
1875 u_long hdrsiz;
1876
1877 /*
1878 * In order to avoid defeating path MTU discovery on the peer,
1879 * we advertise the max MTU of all attached networks as our MSS,
1880 * per RFC 1191, section 3.1.
1881 *
1882 * We provide the option to advertise just the MTU of
1883 * the interface on which we hope this connection will
1884 * be receiving. If we are responding to a SYN, we
1885 * will have a pretty good idea about this, but when
1886 * initiating a connection there is a bit more doubt.
1887 *
1888 * We also need to ensure that loopback has a large enough
1889 * MSS, as the loopback MTU is never included in in_maxmtu.
1890 */
1891
1892 if (ifp != NULL)
1893 switch (af) {
1894 case AF_INET:
1895 mss = ifp->if_mtu;
1896 break;
1897 #ifdef INET6
1898 case AF_INET6:
1899 mss = IN6_LINKMTU(ifp);
1900 break;
1901 #endif
1902 }
1903
1904 if (tcp_mss_ifmtu == 0)
1905 switch (af) {
1906 case AF_INET:
1907 mss = max(in_maxmtu, mss);
1908 break;
1909 #ifdef INET6
1910 case AF_INET6:
1911 mss = max(in6_maxmtu, mss);
1912 break;
1913 #endif
1914 }
1915
1916 switch (af) {
1917 case AF_INET:
1918 hdrsiz = sizeof(struct ip);
1919 break;
1920 #ifdef INET6
1921 case AF_INET6:
1922 hdrsiz = sizeof(struct ip6_hdr);
1923 break;
1924 #endif
1925 default:
1926 hdrsiz = 0;
1927 break;
1928 }
1929 hdrsiz += sizeof(struct tcphdr);
1930 if (mss > hdrsiz)
1931 mss -= hdrsiz;
1932
1933 mss = max(tcp_mssdflt, mss);
1934 return (mss);
1935 }
1936
1937 /*
1938 * Set connection variables based on the peer's advertised MSS.
1939 * We are passed the TCPCB for the actual connection. If we
1940 * are the server, we are called by the compressed state engine
1941 * when the 3-way handshake is complete. If we are the client,
1942 * we are called when we receive the SYN,ACK from the server.
1943 *
1944 * NOTE: Our advertised MSS value must be initialized in the TCPCB
1945 * before this routine is called!
1946 */
1947 void
1948 tcp_mss_from_peer(struct tcpcb *tp, int offer)
1949 {
1950 struct socket *so;
1951 #if defined(RTV_SPIPE) || defined(RTV_SSTHRESH)
1952 struct rtentry *rt;
1953 #endif
1954 u_long bufsize;
1955 int mss;
1956
1957 #ifdef DIAGNOSTIC
1958 if (tp->t_inpcb && tp->t_in6pcb)
1959 panic("tcp_mss_from_peer: both t_inpcb and t_in6pcb are set");
1960 #endif
1961 so = NULL;
1962 rt = NULL;
1963 #ifdef INET
1964 if (tp->t_inpcb) {
1965 so = tp->t_inpcb->inp_socket;
1966 #if defined(RTV_SPIPE) || defined(RTV_SSTHRESH)
1967 rt = in_pcbrtentry(tp->t_inpcb);
1968 #endif
1969 }
1970 #endif
1971 #ifdef INET6
1972 if (tp->t_in6pcb) {
1973 so = tp->t_in6pcb->in6p_socket;
1974 #if defined(RTV_SPIPE) || defined(RTV_SSTHRESH)
1975 rt = in6_pcbrtentry(tp->t_in6pcb);
1976 #endif
1977 }
1978 #endif
1979
1980 /*
1981 * As per RFC1122, use the default MSS value, unless they
1982 * sent us an offer. Do not accept offers less than 256 bytes.
1983 */
1984 mss = tcp_mssdflt;
1985 if (offer)
1986 mss = offer;
1987 mss = max(mss, 256); /* sanity */
1988 tp->t_peermss = mss;
1989 mss -= tcp_optlen(tp);
1990 #ifdef INET
1991 if (tp->t_inpcb)
1992 mss -= ip_optlen(tp->t_inpcb);
1993 #endif
1994 #ifdef INET6
1995 if (tp->t_in6pcb)
1996 mss -= ip6_optlen(tp->t_in6pcb);
1997 #endif
1998
1999 /*
2000 * If there's a pipesize, change the socket buffer to that size.
2001 * Make the socket buffer an integral number of MSS units. If
2002 * the MSS is larger than the socket buffer, artificially decrease
2003 * the MSS.
2004 */
2005 #ifdef RTV_SPIPE
2006 if (rt != NULL && rt->rt_rmx.rmx_sendpipe != 0)
2007 bufsize = rt->rt_rmx.rmx_sendpipe;
2008 else
2009 #endif
2010 {
2011 KASSERT(so != NULL);
2012 bufsize = so->so_snd.sb_hiwat;
2013 }
2014 if (bufsize < mss)
2015 mss = bufsize;
2016 else {
2017 bufsize = roundup(bufsize, mss);
2018 if (bufsize > sb_max)
2019 bufsize = sb_max;
2020 (void) sbreserve(&so->so_snd, bufsize, so);
2021 }
2022 tp->t_segsz = mss;
2023
2024 #ifdef RTV_SSTHRESH
2025 if (rt != NULL && rt->rt_rmx.rmx_ssthresh) {
2026 /*
2027 * There's some sort of gateway or interface buffer
2028 * limit on the path. Use this to set the slow
2029 * start threshold, but set the threshold to no less
2030 * than 2 * MSS.
2031 */
2032 tp->snd_ssthresh = max(2 * mss, rt->rt_rmx.rmx_ssthresh);
2033 }
2034 #endif
2035 }
2036
2037 /*
2038 * Processing necessary when a TCP connection is established.
2039 */
2040 void
2041 tcp_established(struct tcpcb *tp)
2042 {
2043 struct socket *so;
2044 #ifdef RTV_RPIPE
2045 struct rtentry *rt;
2046 #endif
2047 u_long bufsize;
2048
2049 #ifdef DIAGNOSTIC
2050 if (tp->t_inpcb && tp->t_in6pcb)
2051 panic("tcp_established: both t_inpcb and t_in6pcb are set");
2052 #endif
2053 so = NULL;
2054 rt = NULL;
2055 #ifdef INET
2056 /* This is a while() to reduce the dreadful stairstepping below */
2057 while (tp->t_inpcb) {
2058 so = tp->t_inpcb->inp_socket;
2059 #if defined(RTV_RPIPE)
2060 rt = in_pcbrtentry(tp->t_inpcb);
2061 #endif
2062 if (__predict_true(tcp_msl_enable)) {
2063 if (tp->t_inpcb->inp_laddr.s_addr == INADDR_LOOPBACK) {
2064 tp->t_msl = tcp_msl_loop ? tcp_msl_loop : (TCPTV_MSL >> 2);
2065 break;
2066 }
2067
2068 if (__predict_false(tcp_rttlocal)) {
2069 /* This may be adjusted by tcp_input */
2070 tp->t_msl = tcp_msl_local ? tcp_msl_local : (TCPTV_MSL >> 1);
2071 break;
2072 }
2073 if (in_localaddr(tp->t_inpcb->inp_faddr)) {
2074 tp->t_msl = tcp_msl_local ? tcp_msl_local : (TCPTV_MSL >> 1);
2075 break;
2076 }
2077 }
2078 tp->t_msl = tcp_msl_remote ? tcp_msl_remote : TCPTV_MSL;
2079 break;
2080 }
2081 #endif
2082 #ifdef INET6
2083 /* The !tp->t_inpcb lets the compiler know it can't be v4 *and* v6 */
2084 while (!tp->t_inpcb && tp->t_in6pcb) {
2085 so = tp->t_in6pcb->in6p_socket;
2086 #if defined(RTV_RPIPE)
2087 rt = in6_pcbrtentry(tp->t_in6pcb);
2088 #endif
2089 if (__predict_true(tcp_msl_enable)) {
2090 extern const struct in6_addr in6addr_loopback;
2091
2092 if (IN6_ARE_ADDR_EQUAL(&tp->t_in6pcb->in6p_laddr,
2093 &in6addr_loopback)) {
2094 tp->t_msl = tcp_msl_loop ? tcp_msl_loop : (TCPTV_MSL >> 2);
2095 break;
2096 }
2097
2098 if (__predict_false(tcp_rttlocal)) {
2099 /* This may be adjusted by tcp_input */
2100 tp->t_msl = tcp_msl_local ? tcp_msl_local : (TCPTV_MSL >> 1);
2101 break;
2102 }
2103 if (in6_localaddr(&tp->t_in6pcb->in6p_faddr)) {
2104 tp->t_msl = tcp_msl_local ? tcp_msl_local : (TCPTV_MSL >> 1);
2105 break;
2106 }
2107 }
2108 tp->t_msl = tcp_msl_remote ? tcp_msl_remote : TCPTV_MSL;
2109 break;
2110 }
2111 #endif
2112
2113 tp->t_state = TCPS_ESTABLISHED;
2114 TCP_TIMER_ARM(tp, TCPT_KEEP, tp->t_keepidle);
2115
2116 #ifdef RTV_RPIPE
2117 if (rt != NULL && rt->rt_rmx.rmx_recvpipe != 0)
2118 bufsize = rt->rt_rmx.rmx_recvpipe;
2119 else
2120 #endif
2121 {
2122 KASSERT(so != NULL);
2123 bufsize = so->so_rcv.sb_hiwat;
2124 }
2125 if (bufsize > tp->t_ourmss) {
2126 bufsize = roundup(bufsize, tp->t_ourmss);
2127 if (bufsize > sb_max)
2128 bufsize = sb_max;
2129 (void) sbreserve(&so->so_rcv, bufsize, so);
2130 }
2131 }
2132
2133 /*
2134 * Check if there's an initial rtt or rttvar. Convert from the
2135 * route-table units to scaled multiples of the slow timeout timer.
2136 * Called only during the 3-way handshake.
2137 */
2138 void
2139 tcp_rmx_rtt(struct tcpcb *tp)
2140 {
2141 #ifdef RTV_RTT
2142 struct rtentry *rt = NULL;
2143 int rtt;
2144
2145 #ifdef DIAGNOSTIC
2146 if (tp->t_inpcb && tp->t_in6pcb)
2147 panic("tcp_rmx_rtt: both t_inpcb and t_in6pcb are set");
2148 #endif
2149 #ifdef INET
2150 if (tp->t_inpcb)
2151 rt = in_pcbrtentry(tp->t_inpcb);
2152 #endif
2153 #ifdef INET6
2154 if (tp->t_in6pcb)
2155 rt = in6_pcbrtentry(tp->t_in6pcb);
2156 #endif
2157 if (rt == NULL)
2158 return;
2159
2160 if (tp->t_srtt == 0 && (rtt = rt->rt_rmx.rmx_rtt)) {
2161 /*
2162 * XXX The lock bit for MTU indicates that the value
2163 * is also a minimum value; this is subject to time.
2164 */
2165 if (rt->rt_rmx.rmx_locks & RTV_RTT)
2166 TCPT_RANGESET(tp->t_rttmin,
2167 rtt / (RTM_RTTUNIT / PR_SLOWHZ),
2168 TCPTV_MIN, TCPTV_REXMTMAX);
2169 tp->t_srtt = rtt /
2170 ((RTM_RTTUNIT / PR_SLOWHZ) >> (TCP_RTT_SHIFT + 2));
2171 if (rt->rt_rmx.rmx_rttvar) {
2172 tp->t_rttvar = rt->rt_rmx.rmx_rttvar /
2173 ((RTM_RTTUNIT / PR_SLOWHZ) >>
2174 (TCP_RTTVAR_SHIFT + 2));
2175 } else {
2176 /* Default variation is +- 1 rtt */
2177 tp->t_rttvar =
2178 tp->t_srtt >> (TCP_RTT_SHIFT - TCP_RTTVAR_SHIFT);
2179 }
2180 TCPT_RANGESET(tp->t_rxtcur,
2181 ((tp->t_srtt >> 2) + tp->t_rttvar) >> (1 + 2),
2182 tp->t_rttmin, TCPTV_REXMTMAX);
2183 }
2184 #endif
2185 }
2186
2187 tcp_seq tcp_iss_seq = 0; /* tcp initial seq # */
2188 u_int8_t tcp_iss_secret[16]; /* 128 bits; should be plenty */
2189
2190 /*
2191 * Get a new sequence value given a tcp control block
2192 */
2193 tcp_seq
2194 tcp_new_iss(struct tcpcb *tp, tcp_seq addin)
2195 {
2196
2197 #ifdef INET
2198 if (tp->t_inpcb != NULL) {
2199 return (tcp_new_iss1(&tp->t_inpcb->inp_laddr,
2200 &tp->t_inpcb->inp_faddr, tp->t_inpcb->inp_lport,
2201 tp->t_inpcb->inp_fport, sizeof(tp->t_inpcb->inp_laddr),
2202 addin));
2203 }
2204 #endif
2205 #ifdef INET6
2206 if (tp->t_in6pcb != NULL) {
2207 return (tcp_new_iss1(&tp->t_in6pcb->in6p_laddr,
2208 &tp->t_in6pcb->in6p_faddr, tp->t_in6pcb->in6p_lport,
2209 tp->t_in6pcb->in6p_fport, sizeof(tp->t_in6pcb->in6p_laddr),
2210 addin));
2211 }
2212 #endif
2213 /* Not possible. */
2214 panic("tcp_new_iss");
2215 }
2216
2217 /*
2218 * This routine actually generates a new TCP initial sequence number.
2219 */
2220 tcp_seq
2221 tcp_new_iss1(void *laddr, void *faddr, u_int16_t lport, u_int16_t fport,
2222 size_t addrsz, tcp_seq addin)
2223 {
2224 tcp_seq tcp_iss;
2225
2226 static bool tcp_iss_gotten_secret;
2227
2228 /*
2229 * If we haven't been here before, initialize our cryptographic
2230 * hash secret.
2231 */
2232 if (tcp_iss_gotten_secret == false) {
2233 cprng_strong(kern_cprng,
2234 tcp_iss_secret, sizeof(tcp_iss_secret), FASYNC);
2235 tcp_iss_gotten_secret = true;
2236 }
2237
2238 if (tcp_do_rfc1948) {
2239 MD5_CTX ctx;
2240 u_int8_t hash[16]; /* XXX MD5 knowledge */
2241
2242 /*
2243 * Compute the base value of the ISS. It is a hash
2244 * of (saddr, sport, daddr, dport, secret).
2245 */
2246 MD5Init(&ctx);
2247
2248 MD5Update(&ctx, (u_char *) laddr, addrsz);
2249 MD5Update(&ctx, (u_char *) &lport, sizeof(lport));
2250
2251 MD5Update(&ctx, (u_char *) faddr, addrsz);
2252 MD5Update(&ctx, (u_char *) &fport, sizeof(fport));
2253
2254 MD5Update(&ctx, tcp_iss_secret, sizeof(tcp_iss_secret));
2255
2256 MD5Final(hash, &ctx);
2257
2258 memcpy(&tcp_iss, hash, sizeof(tcp_iss));
2259
2260 /*
2261 * Now increment our "timer", and add it in to
2262 * the computed value.
2263 *
2264 * XXX Use `addin'?
2265 * XXX TCP_ISSINCR too large to use?
2266 */
2267 tcp_iss_seq += TCP_ISSINCR;
2268 #ifdef TCPISS_DEBUG
2269 printf("ISS hash 0x%08x, ", tcp_iss);
2270 #endif
2271 tcp_iss += tcp_iss_seq + addin;
2272 #ifdef TCPISS_DEBUG
2273 printf("new ISS 0x%08x\n", tcp_iss);
2274 #endif
2275 } else {
2276 /*
2277 * Randomize.
2278 */
2279 tcp_iss = cprng_fast32();
2280
2281 /*
2282 * If we were asked to add some amount to a known value,
2283 * we will take a random value obtained above, mask off
2284 * the upper bits, and add in the known value. We also
2285 * add in a constant to ensure that we are at least a
2286 * certain distance from the original value.
2287 *
2288 * This is used when an old connection is in timed wait
2289 * and we have a new one coming in, for instance.
2290 */
2291 if (addin != 0) {
2292 #ifdef TCPISS_DEBUG
2293 printf("Random %08x, ", tcp_iss);
2294 #endif
2295 tcp_iss &= TCP_ISS_RANDOM_MASK;
2296 tcp_iss += addin + TCP_ISSINCR;
2297 #ifdef TCPISS_DEBUG
2298 printf("Old ISS %08x, ISS %08x\n", addin, tcp_iss);
2299 #endif
2300 } else {
2301 tcp_iss &= TCP_ISS_RANDOM_MASK;
2302 tcp_iss += tcp_iss_seq;
2303 tcp_iss_seq += TCP_ISSINCR;
2304 #ifdef TCPISS_DEBUG
2305 printf("ISS %08x\n", tcp_iss);
2306 #endif
2307 }
2308 }
2309
2310 if (tcp_compat_42) {
2311 /*
2312 * Limit it to the positive range for really old TCP
2313 * implementations.
2314 * Just AND off the top bit instead of checking if
2315 * is set first - saves a branch 50% of the time.
2316 */
2317 tcp_iss &= 0x7fffffff; /* XXX */
2318 }
2319
2320 return (tcp_iss);
2321 }
2322
2323 #if defined(IPSEC)
2324 /* compute ESP/AH header size for TCP, including outer IP header. */
2325 size_t
2326 ipsec4_hdrsiz_tcp(struct tcpcb *tp)
2327 {
2328 struct inpcb *inp;
2329 size_t hdrsiz;
2330
2331 /* XXX mapped addr case (tp->t_in6pcb) */
2332 if (!tp || !tp->t_template || !(inp = tp->t_inpcb))
2333 return 0;
2334 switch (tp->t_family) {
2335 case AF_INET:
2336 /* XXX: should use currect direction. */
2337 hdrsiz = ipsec4_hdrsiz(tp->t_template, IPSEC_DIR_OUTBOUND, inp);
2338 break;
2339 default:
2340 hdrsiz = 0;
2341 break;
2342 }
2343
2344 return hdrsiz;
2345 }
2346
2347 #ifdef INET6
2348 size_t
2349 ipsec6_hdrsiz_tcp(struct tcpcb *tp)
2350 {
2351 struct in6pcb *in6p;
2352 size_t hdrsiz;
2353
2354 if (!tp || !tp->t_template || !(in6p = tp->t_in6pcb))
2355 return 0;
2356 switch (tp->t_family) {
2357 case AF_INET6:
2358 /* XXX: should use currect direction. */
2359 hdrsiz = ipsec6_hdrsiz(tp->t_template, IPSEC_DIR_OUTBOUND, in6p);
2360 break;
2361 case AF_INET:
2362 /* mapped address case - tricky */
2363 default:
2364 hdrsiz = 0;
2365 break;
2366 }
2367
2368 return hdrsiz;
2369 }
2370 #endif
2371 #endif /*IPSEC*/
2372
2373 /*
2374 * Determine the length of the TCP options for this connection.
2375 *
2376 * XXX: What do we do for SACK, when we add that? Just reserve
2377 * all of the space? Otherwise we can't exactly be incrementing
2378 * cwnd by an amount that varies depending on the amount we last
2379 * had to SACK!
2380 */
2381
2382 u_int
2383 tcp_optlen(struct tcpcb *tp)
2384 {
2385 u_int optlen;
2386
2387 optlen = 0;
2388 if ((tp->t_flags & (TF_REQ_TSTMP|TF_RCVD_TSTMP|TF_NOOPT)) ==
2389 (TF_REQ_TSTMP | TF_RCVD_TSTMP))
2390 optlen += TCPOLEN_TSTAMP_APPA;
2391
2392 #ifdef TCP_SIGNATURE
2393 if (tp->t_flags & TF_SIGNATURE)
2394 optlen += TCPOLEN_SIGNATURE + 2;
2395 #endif /* TCP_SIGNATURE */
2396
2397 return optlen;
2398 }
2399
2400 u_int
2401 tcp_hdrsz(struct tcpcb *tp)
2402 {
2403 u_int hlen;
2404
2405 switch (tp->t_family) {
2406 #ifdef INET6
2407 case AF_INET6:
2408 hlen = sizeof(struct ip6_hdr);
2409 break;
2410 #endif
2411 case AF_INET:
2412 hlen = sizeof(struct ip);
2413 break;
2414 default:
2415 hlen = 0;
2416 break;
2417 }
2418 hlen += sizeof(struct tcphdr);
2419
2420 if ((tp->t_flags & (TF_REQ_TSTMP|TF_NOOPT)) == TF_REQ_TSTMP &&
2421 (tp->t_flags & TF_RCVD_TSTMP) == TF_RCVD_TSTMP)
2422 hlen += TCPOLEN_TSTAMP_APPA;
2423 #ifdef TCP_SIGNATURE
2424 if (tp->t_flags & TF_SIGNATURE)
2425 hlen += TCPOLEN_SIGLEN;
2426 #endif
2427 return hlen;
2428 }
2429
2430 void
2431 tcp_statinc(u_int stat)
2432 {
2433
2434 KASSERT(stat < TCP_NSTATS);
2435 TCP_STATINC(stat);
2436 }
2437
2438 void
2439 tcp_statadd(u_int stat, uint64_t val)
2440 {
2441
2442 KASSERT(stat < TCP_NSTATS);
2443 TCP_STATADD(stat, val);
2444 }
2445