tcp_subr.c revision 1.127 1 /* $NetBSD: tcp_subr.c,v 1.127 2002/05/12 20:33:50 matt Exp $ */
2
3 /*
4 * Copyright (C) 1995, 1996, 1997, and 1998 WIDE Project.
5 * All rights reserved.
6 *
7 * Redistribution and use in source and binary forms, with or without
8 * modification, are permitted provided that the following conditions
9 * are met:
10 * 1. Redistributions of source code must retain the above copyright
11 * notice, this list of conditions and the following disclaimer.
12 * 2. Redistributions in binary form must reproduce the above copyright
13 * notice, this list of conditions and the following disclaimer in the
14 * documentation and/or other materials provided with the distribution.
15 * 3. Neither the name of the project nor the names of its contributors
16 * may be used to endorse or promote products derived from this software
17 * without specific prior written permission.
18 *
19 * THIS SOFTWARE IS PROVIDED BY THE PROJECT AND CONTRIBUTORS ``AS IS'' AND
20 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
21 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
22 * ARE DISCLAIMED. IN NO EVENT SHALL THE PROJECT OR CONTRIBUTORS BE LIABLE
23 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
24 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
25 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
26 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
27 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
28 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
29 * SUCH DAMAGE.
30 */
31
32 /*-
33 * Copyright (c) 1997, 1998, 2000, 2001 The NetBSD Foundation, Inc.
34 * All rights reserved.
35 *
36 * This code is derived from software contributed to The NetBSD Foundation
37 * by Jason R. Thorpe and Kevin M. Lahey of the Numerical Aerospace Simulation
38 * Facility, NASA Ames Research Center.
39 *
40 * Redistribution and use in source and binary forms, with or without
41 * modification, are permitted provided that the following conditions
42 * are met:
43 * 1. Redistributions of source code must retain the above copyright
44 * notice, this list of conditions and the following disclaimer.
45 * 2. Redistributions in binary form must reproduce the above copyright
46 * notice, this list of conditions and the following disclaimer in the
47 * documentation and/or other materials provided with the distribution.
48 * 3. All advertising materials mentioning features or use of this software
49 * must display the following acknowledgement:
50 * This product includes software developed by the NetBSD
51 * Foundation, Inc. and its contributors.
52 * 4. Neither the name of The NetBSD Foundation nor the names of its
53 * contributors may be used to endorse or promote products derived
54 * from this software without specific prior written permission.
55 *
56 * THIS SOFTWARE IS PROVIDED BY THE NETBSD FOUNDATION, INC. AND CONTRIBUTORS
57 * ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED
58 * TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
59 * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE FOUNDATION OR CONTRIBUTORS
60 * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
61 * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
62 * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
63 * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
64 * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
65 * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
66 * POSSIBILITY OF SUCH DAMAGE.
67 */
68
69 /*
70 * Copyright (c) 1982, 1986, 1988, 1990, 1993, 1995
71 * The Regents of the University of California. All rights reserved.
72 *
73 * Redistribution and use in source and binary forms, with or without
74 * modification, are permitted provided that the following conditions
75 * are met:
76 * 1. Redistributions of source code must retain the above copyright
77 * notice, this list of conditions and the following disclaimer.
78 * 2. Redistributions in binary form must reproduce the above copyright
79 * notice, this list of conditions and the following disclaimer in the
80 * documentation and/or other materials provided with the distribution.
81 * 3. All advertising materials mentioning features or use of this software
82 * must display the following acknowledgement:
83 * This product includes software developed by the University of
84 * California, Berkeley and its contributors.
85 * 4. Neither the name of the University nor the names of its contributors
86 * may be used to endorse or promote products derived from this software
87 * without specific prior written permission.
88 *
89 * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND
90 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
91 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
92 * ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE
93 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
94 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
95 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
96 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
97 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
98 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
99 * SUCH DAMAGE.
100 *
101 * @(#)tcp_subr.c 8.2 (Berkeley) 5/24/95
102 */
103
104 #include <sys/cdefs.h>
105 __KERNEL_RCSID(0, "$NetBSD: tcp_subr.c,v 1.127 2002/05/12 20:33:50 matt Exp $");
106
107 #include "opt_inet.h"
108 #include "opt_ipsec.h"
109 #include "opt_tcp_compat_42.h"
110 #include "opt_inet_csum.h"
111 #include "rnd.h"
112
113 #include <sys/param.h>
114 #include <sys/proc.h>
115 #include <sys/systm.h>
116 #include <sys/malloc.h>
117 #include <sys/mbuf.h>
118 #include <sys/socket.h>
119 #include <sys/socketvar.h>
120 #include <sys/protosw.h>
121 #include <sys/errno.h>
122 #include <sys/kernel.h>
123 #include <sys/pool.h>
124 #if NRND > 0
125 #include <sys/md5.h>
126 #include <sys/rnd.h>
127 #endif
128
129 #include <net/route.h>
130 #include <net/if.h>
131
132 #include <netinet/in.h>
133 #include <netinet/in_systm.h>
134 #include <netinet/ip.h>
135 #include <netinet/in_pcb.h>
136 #include <netinet/ip_var.h>
137 #include <netinet/ip_icmp.h>
138
139 #ifdef INET6
140 #ifndef INET
141 #include <netinet/in.h>
142 #endif
143 #include <netinet/ip6.h>
144 #include <netinet6/in6_pcb.h>
145 #include <netinet6/ip6_var.h>
146 #include <netinet6/in6_var.h>
147 #include <netinet6/ip6protosw.h>
148 #include <netinet/icmp6.h>
149 #endif
150
151 #include <netinet/tcp.h>
152 #include <netinet/tcp_fsm.h>
153 #include <netinet/tcp_seq.h>
154 #include <netinet/tcp_timer.h>
155 #include <netinet/tcp_var.h>
156 #include <netinet/tcpip.h>
157
158 #ifdef IPSEC
159 #include <netinet6/ipsec.h>
160 #endif /*IPSEC*/
161
162 #ifdef INET6
163 struct in6pcb tcb6;
164 #endif
165
166 struct inpcbtable tcbtable; /* head of queue of active tcpcb's */
167 struct tcpstat tcpstat; /* tcp statistics */
168 u_int32_t tcp_now; /* for RFC 1323 timestamps */
169
170 /* patchable/settable parameters for tcp */
171 int tcp_mssdflt = TCP_MSS;
172 int tcp_rttdflt = TCPTV_SRTTDFLT / PR_SLOWHZ;
173 int tcp_do_rfc1323 = 1; /* window scaling / timestamps (obsolete) */
174 #if NRND > 0
175 int tcp_do_rfc1948 = 0; /* ISS by cryptographic hash */
176 #endif
177 int tcp_do_sack = 1; /* selective acknowledgement */
178 int tcp_do_win_scale = 1; /* RFC1323 window scaling */
179 int tcp_do_timestamps = 1; /* RFC1323 timestamps */
180 int tcp_do_newreno = 0; /* Use the New Reno algorithms */
181 int tcp_ack_on_push = 0; /* set to enable immediate ACK-on-PUSH */
182 int tcp_init_win = 1;
183 int tcp_mss_ifmtu = 0;
184 #ifdef TCP_COMPAT_42
185 int tcp_compat_42 = 1;
186 #else
187 int tcp_compat_42 = 0;
188 #endif
189 int tcp_rst_ppslim = 100; /* 100pps */
190
191 /* tcb hash */
192 #ifndef TCBHASHSIZE
193 #define TCBHASHSIZE 128
194 #endif
195 int tcbhashsize = TCBHASHSIZE;
196
197 /* syn hash parameters */
198 #define TCP_SYN_HASH_SIZE 293
199 #define TCP_SYN_BUCKET_SIZE 35
200 int tcp_syn_cache_size = TCP_SYN_HASH_SIZE;
201 int tcp_syn_cache_limit = TCP_SYN_HASH_SIZE*TCP_SYN_BUCKET_SIZE;
202 int tcp_syn_bucket_limit = 3*TCP_SYN_BUCKET_SIZE;
203 struct syn_cache_head tcp_syn_cache[TCP_SYN_HASH_SIZE];
204
205 int tcp_freeq __P((struct tcpcb *));
206
207 #ifdef INET
208 void tcp_mtudisc_callback __P((struct in_addr));
209 #endif
210 #ifdef INET6
211 void tcp6_mtudisc_callback __P((struct in6_addr *));
212 #endif
213
214 void tcp_mtudisc __P((struct inpcb *, int));
215 #ifdef INET6
216 void tcp6_mtudisc __P((struct in6pcb *, int));
217 #endif
218
219 struct pool tcpcb_pool;
220
221 #ifdef TCP_CSUM_COUNTERS
222 #include <sys/device.h>
223
224 struct evcnt tcp_hwcsum_bad = EVCNT_INITIALIZER(EVCNT_TYPE_MISC,
225 NULL, "tcp", "hwcsum bad");
226 struct evcnt tcp_hwcsum_ok = EVCNT_INITIALIZER(EVCNT_TYPE_MISC,
227 NULL, "tcp", "hwcsum ok");
228 struct evcnt tcp_hwcsum_data = EVCNT_INITIALIZER(EVCNT_TYPE_MISC,
229 NULL, "tcp", "hwcsum data");
230 struct evcnt tcp_swcsum = EVCNT_INITIALIZER(EVCNT_TYPE_MISC,
231 NULL, "tcp", "swcsum");
232 #endif /* TCP_CSUM_COUNTERS */
233
234 #ifdef TCP_OUTPUT_COUNTERS
235 #include <sys/device.h>
236
237 struct evcnt tcp_output_bigheader = EVCNT_INITIALIZER(EVCNT_TYPE_MISC,
238 NULL, "tcp", "output big header");
239 struct evcnt tcp_output_copysmall = EVCNT_INITIALIZER(EVCNT_TYPE_MISC,
240 NULL, "tcp", "output copy small");
241 struct evcnt tcp_output_copybig = EVCNT_INITIALIZER(EVCNT_TYPE_MISC,
242 NULL, "tcp", "output copy big");
243 struct evcnt tcp_output_refbig = EVCNT_INITIALIZER(EVCNT_TYPE_MISC,
244 NULL, "tcp", "output reference big");
245 #endif /* TCP_OUTPUT_COUNTERS */
246
247 #ifdef TCP_REASS_COUNTERS
248 #include <sys/device.h>
249
250 struct evcnt tcp_reass_ = EVCNT_INITIALIZER(EVCNT_TYPE_MISC,
251 NULL, "tcp_reass", "calls");
252 struct evcnt tcp_reass_empty = EVCNT_INITIALIZER(EVCNT_TYPE_MISC,
253 &tcp_reass_, "tcp_reass", "insert into empty queue");
254 struct evcnt tcp_reass_iteration[8] = {
255 EVCNT_INITIALIZER(EVCNT_TYPE_MISC, &tcp_reass_, "tcp_reass", ">7 iterations"),
256 EVCNT_INITIALIZER(EVCNT_TYPE_MISC, &tcp_reass_, "tcp_reass", "1 iteration"),
257 EVCNT_INITIALIZER(EVCNT_TYPE_MISC, &tcp_reass_, "tcp_reass", "2 iterations"),
258 EVCNT_INITIALIZER(EVCNT_TYPE_MISC, &tcp_reass_, "tcp_reass", "3 iterations"),
259 EVCNT_INITIALIZER(EVCNT_TYPE_MISC, &tcp_reass_, "tcp_reass", "4 iterations"),
260 EVCNT_INITIALIZER(EVCNT_TYPE_MISC, &tcp_reass_, "tcp_reass", "5 iterations"),
261 EVCNT_INITIALIZER(EVCNT_TYPE_MISC, &tcp_reass_, "tcp_reass", "6 iterations"),
262 EVCNT_INITIALIZER(EVCNT_TYPE_MISC, &tcp_reass_, "tcp_reass", "7 iterations"),
263 };
264 struct evcnt tcp_reass_prependfirst = EVCNT_INITIALIZER(EVCNT_TYPE_MISC,
265 &tcp_reass_, "tcp_reass", "prepend to first");
266 struct evcnt tcp_reass_prepend = EVCNT_INITIALIZER(EVCNT_TYPE_MISC,
267 &tcp_reass_, "tcp_reass", "prepend");
268 struct evcnt tcp_reass_insert = EVCNT_INITIALIZER(EVCNT_TYPE_MISC,
269 &tcp_reass_, "tcp_reass", "insert");
270 struct evcnt tcp_reass_inserttail = EVCNT_INITIALIZER(EVCNT_TYPE_MISC,
271 &tcp_reass_, "tcp_reass", "insert at tail");
272 struct evcnt tcp_reass_append = EVCNT_INITIALIZER(EVCNT_TYPE_MISC,
273 &tcp_reass_, "tcp_reass", "append");
274 struct evcnt tcp_reass_appendtail = EVCNT_INITIALIZER(EVCNT_TYPE_MISC,
275 &tcp_reass_, "tcp_reass", "append to tail fragment");
276 struct evcnt tcp_reass_overlaptail = EVCNT_INITIALIZER(EVCNT_TYPE_MISC,
277 &tcp_reass_, "tcp_reass", "overlap at end");
278 struct evcnt tcp_reass_overlapfront = EVCNT_INITIALIZER(EVCNT_TYPE_MISC,
279 &tcp_reass_, "tcp_reass", "overlap at start");
280 struct evcnt tcp_reass_segdup = EVCNT_INITIALIZER(EVCNT_TYPE_MISC,
281 &tcp_reass_, "tcp_reass", "duplicate segment");
282 struct evcnt tcp_reass_fragdup = EVCNT_INITIALIZER(EVCNT_TYPE_MISC,
283 &tcp_reass_, "tcp_reass", "duplicate fragment");
284
285 #endif /* TCP_REASS_COUNTERS */
286
287 /*
288 * Tcp initialization
289 */
290 void
291 tcp_init()
292 {
293 int hlen;
294
295 pool_init(&tcpcb_pool, sizeof(struct tcpcb), 0, 0, 0, "tcpcbpl",
296 NULL);
297 in_pcbinit(&tcbtable, tcbhashsize, tcbhashsize);
298 #ifdef INET6
299 tcb6.in6p_next = tcb6.in6p_prev = &tcb6;
300 #endif
301
302 hlen = sizeof(struct ip) + sizeof(struct tcphdr);
303 #ifdef INET6
304 if (sizeof(struct ip) < sizeof(struct ip6_hdr))
305 hlen = sizeof(struct ip6_hdr) + sizeof(struct tcphdr);
306 #endif
307 if (max_protohdr < hlen)
308 max_protohdr = hlen;
309 if (max_linkhdr + hlen > MHLEN)
310 panic("tcp_init");
311
312 #ifdef INET
313 icmp_mtudisc_callback_register(tcp_mtudisc_callback);
314 #endif
315 #ifdef INET6
316 icmp6_mtudisc_callback_register(tcp6_mtudisc_callback);
317 #endif
318
319 /* Initialize timer state. */
320 tcp_timer_init();
321
322 /* Initialize the compressed state engine. */
323 syn_cache_init();
324
325 #ifdef TCP_CSUM_COUNTERS
326 evcnt_attach_static(&tcp_hwcsum_bad);
327 evcnt_attach_static(&tcp_hwcsum_ok);
328 evcnt_attach_static(&tcp_hwcsum_data);
329 evcnt_attach_static(&tcp_swcsum);
330 #endif /* TCP_CSUM_COUNTERS */
331
332 #ifdef TCP_OUTPUT_COUNTERS
333 evcnt_attach_static(&tcp_output_bigheader);
334 evcnt_attach_static(&tcp_output_copysmall);
335 evcnt_attach_static(&tcp_output_copybig);
336 evcnt_attach_static(&tcp_output_refbig);
337 #endif /* TCP_OUTPUT_COUNTERS */
338
339 #ifdef TCP_REASS_COUNTERS
340 evcnt_attach_static(&tcp_reass_);
341 evcnt_attach_static(&tcp_reass_empty);
342 evcnt_attach_static(&tcp_reass_iteration[0]);
343 evcnt_attach_static(&tcp_reass_iteration[1]);
344 evcnt_attach_static(&tcp_reass_iteration[2]);
345 evcnt_attach_static(&tcp_reass_iteration[3]);
346 evcnt_attach_static(&tcp_reass_iteration[4]);
347 evcnt_attach_static(&tcp_reass_iteration[5]);
348 evcnt_attach_static(&tcp_reass_iteration[6]);
349 evcnt_attach_static(&tcp_reass_iteration[7]);
350 evcnt_attach_static(&tcp_reass_prependfirst);
351 evcnt_attach_static(&tcp_reass_prepend);
352 evcnt_attach_static(&tcp_reass_insert);
353 evcnt_attach_static(&tcp_reass_inserttail);
354 evcnt_attach_static(&tcp_reass_append);
355 evcnt_attach_static(&tcp_reass_appendtail);
356 evcnt_attach_static(&tcp_reass_overlaptail);
357 evcnt_attach_static(&tcp_reass_overlapfront);
358 evcnt_attach_static(&tcp_reass_segdup);
359 evcnt_attach_static(&tcp_reass_fragdup);
360 #endif /* TCP_REASS_COUNTERS */
361 }
362
363 /*
364 * Create template to be used to send tcp packets on a connection.
365 * Call after host entry created, allocates an mbuf and fills
366 * in a skeletal tcp/ip header, minimizing the amount of work
367 * necessary when the connection is used.
368 */
369 struct mbuf *
370 tcp_template(tp)
371 struct tcpcb *tp;
372 {
373 struct inpcb *inp = tp->t_inpcb;
374 #ifdef INET6
375 struct in6pcb *in6p = tp->t_in6pcb;
376 #endif
377 struct tcphdr *n;
378 struct mbuf *m;
379 int hlen;
380
381 switch (tp->t_family) {
382 case AF_INET:
383 hlen = sizeof(struct ip);
384 if (inp)
385 break;
386 #ifdef INET6
387 if (in6p) {
388 /* mapped addr case */
389 if (IN6_IS_ADDR_V4MAPPED(&in6p->in6p_laddr)
390 && IN6_IS_ADDR_V4MAPPED(&in6p->in6p_faddr))
391 break;
392 }
393 #endif
394 return NULL; /*EINVAL*/
395 #ifdef INET6
396 case AF_INET6:
397 hlen = sizeof(struct ip6_hdr);
398 if (in6p) {
399 /* more sainty check? */
400 break;
401 }
402 return NULL; /*EINVAL*/
403 #endif
404 default:
405 hlen = 0; /*pacify gcc*/
406 return NULL; /*EAFNOSUPPORT*/
407 }
408 #ifdef DIAGNOSTIC
409 if (hlen + sizeof(struct tcphdr) > MCLBYTES)
410 panic("mclbytes too small for t_template");
411 #endif
412 m = tp->t_template;
413 if (m && m->m_len == hlen + sizeof(struct tcphdr))
414 ;
415 else {
416 if (m)
417 m_freem(m);
418 m = tp->t_template = NULL;
419 MGETHDR(m, M_DONTWAIT, MT_HEADER);
420 if (m && hlen + sizeof(struct tcphdr) > MHLEN) {
421 MCLGET(m, M_DONTWAIT);
422 if ((m->m_flags & M_EXT) == 0) {
423 m_free(m);
424 m = NULL;
425 }
426 }
427 if (m == NULL)
428 return NULL;
429 m->m_pkthdr.len = m->m_len = hlen + sizeof(struct tcphdr);
430 }
431
432 bzero(mtod(m, caddr_t), m->m_len);
433
434 n = (struct tcphdr *)(mtod(m, caddr_t) + hlen);
435
436 switch (tp->t_family) {
437 case AF_INET:
438 {
439 struct ipovly *ipov;
440 mtod(m, struct ip *)->ip_v = 4;
441 ipov = mtod(m, struct ipovly *);
442 ipov->ih_pr = IPPROTO_TCP;
443 ipov->ih_len = htons(sizeof(struct tcphdr));
444 if (inp) {
445 ipov->ih_src = inp->inp_laddr;
446 ipov->ih_dst = inp->inp_faddr;
447 }
448 #ifdef INET6
449 else if (in6p) {
450 /* mapped addr case */
451 bcopy(&in6p->in6p_laddr.s6_addr32[3], &ipov->ih_src,
452 sizeof(ipov->ih_src));
453 bcopy(&in6p->in6p_faddr.s6_addr32[3], &ipov->ih_dst,
454 sizeof(ipov->ih_dst));
455 }
456 #endif
457 /*
458 * Compute the pseudo-header portion of the checksum
459 * now. We incrementally add in the TCP option and
460 * payload lengths later, and then compute the TCP
461 * checksum right before the packet is sent off onto
462 * the wire.
463 */
464 n->th_sum = in_cksum_phdr(ipov->ih_src.s_addr,
465 ipov->ih_dst.s_addr,
466 htons(sizeof(struct tcphdr) + IPPROTO_TCP));
467 break;
468 }
469 #ifdef INET6
470 case AF_INET6:
471 {
472 struct ip6_hdr *ip6;
473 mtod(m, struct ip *)->ip_v = 6;
474 ip6 = mtod(m, struct ip6_hdr *);
475 ip6->ip6_nxt = IPPROTO_TCP;
476 ip6->ip6_plen = htons(sizeof(struct tcphdr));
477 ip6->ip6_src = in6p->in6p_laddr;
478 ip6->ip6_dst = in6p->in6p_faddr;
479 ip6->ip6_flow = in6p->in6p_flowinfo & IPV6_FLOWINFO_MASK;
480 if (ip6_auto_flowlabel) {
481 ip6->ip6_flow &= ~IPV6_FLOWLABEL_MASK;
482 ip6->ip6_flow |=
483 (htonl(ip6_flow_seq++) & IPV6_FLOWLABEL_MASK);
484 }
485 ip6->ip6_vfc &= ~IPV6_VERSION_MASK;
486 ip6->ip6_vfc |= IPV6_VERSION;
487
488 /*
489 * Compute the pseudo-header portion of the checksum
490 * now. We incrementally add in the TCP option and
491 * payload lengths later, and then compute the TCP
492 * checksum right before the packet is sent off onto
493 * the wire.
494 */
495 n->th_sum = in6_cksum_phdr(&in6p->in6p_laddr,
496 &in6p->in6p_faddr, htonl(sizeof(struct tcphdr)),
497 htonl(IPPROTO_TCP));
498 break;
499 }
500 #endif
501 }
502 if (inp) {
503 n->th_sport = inp->inp_lport;
504 n->th_dport = inp->inp_fport;
505 }
506 #ifdef INET6
507 else if (in6p) {
508 n->th_sport = in6p->in6p_lport;
509 n->th_dport = in6p->in6p_fport;
510 }
511 #endif
512 n->th_seq = 0;
513 n->th_ack = 0;
514 n->th_x2 = 0;
515 n->th_off = 5;
516 n->th_flags = 0;
517 n->th_win = 0;
518 n->th_urp = 0;
519 return (m);
520 }
521
522 /*
523 * Send a single message to the TCP at address specified by
524 * the given TCP/IP header. If m == 0, then we make a copy
525 * of the tcpiphdr at ti and send directly to the addressed host.
526 * This is used to force keep alive messages out using the TCP
527 * template for a connection tp->t_template. If flags are given
528 * then we send a message back to the TCP which originated the
529 * segment ti, and discard the mbuf containing it and any other
530 * attached mbufs.
531 *
532 * In any case the ack and sequence number of the transmitted
533 * segment are as specified by the parameters.
534 */
535 int
536 tcp_respond(tp, template, m, th0, ack, seq, flags)
537 struct tcpcb *tp;
538 struct mbuf *template;
539 struct mbuf *m;
540 struct tcphdr *th0;
541 tcp_seq ack, seq;
542 int flags;
543 {
544 struct route *ro;
545 int error, tlen, win = 0;
546 int hlen;
547 struct ip *ip;
548 #ifdef INET6
549 struct ip6_hdr *ip6;
550 #endif
551 int family; /* family on packet, not inpcb/in6pcb! */
552 struct tcphdr *th;
553
554 if (tp != NULL && (flags & TH_RST) == 0) {
555 #ifdef DIAGNOSTIC
556 if (tp->t_inpcb && tp->t_in6pcb)
557 panic("tcp_respond: both t_inpcb and t_in6pcb are set");
558 #endif
559 #ifdef INET
560 if (tp->t_inpcb)
561 win = sbspace(&tp->t_inpcb->inp_socket->so_rcv);
562 #endif
563 #ifdef INET6
564 if (tp->t_in6pcb)
565 win = sbspace(&tp->t_in6pcb->in6p_socket->so_rcv);
566 #endif
567 }
568
569 ip = NULL;
570 #ifdef INET6
571 ip6 = NULL;
572 #endif
573 if (m == 0) {
574 if (!template)
575 return EINVAL;
576
577 /* get family information from template */
578 switch (mtod(template, struct ip *)->ip_v) {
579 case 4:
580 family = AF_INET;
581 hlen = sizeof(struct ip);
582 break;
583 #ifdef INET6
584 case 6:
585 family = AF_INET6;
586 hlen = sizeof(struct ip6_hdr);
587 break;
588 #endif
589 default:
590 return EAFNOSUPPORT;
591 }
592
593 MGETHDR(m, M_DONTWAIT, MT_HEADER);
594 if (m) {
595 MCLGET(m, M_DONTWAIT);
596 if ((m->m_flags & M_EXT) == 0) {
597 m_free(m);
598 m = NULL;
599 }
600 }
601 if (m == NULL)
602 return (ENOBUFS);
603
604 if (tcp_compat_42)
605 tlen = 1;
606 else
607 tlen = 0;
608
609 m->m_data += max_linkhdr;
610 bcopy(mtod(template, caddr_t), mtod(m, caddr_t),
611 template->m_len);
612 switch (family) {
613 case AF_INET:
614 ip = mtod(m, struct ip *);
615 th = (struct tcphdr *)(ip + 1);
616 break;
617 #ifdef INET6
618 case AF_INET6:
619 ip6 = mtod(m, struct ip6_hdr *);
620 th = (struct tcphdr *)(ip6 + 1);
621 break;
622 #endif
623 #if 0
624 default:
625 /* noone will visit here */
626 m_freem(m);
627 return EAFNOSUPPORT;
628 #endif
629 }
630 flags = TH_ACK;
631 } else {
632
633 if ((m->m_flags & M_PKTHDR) == 0) {
634 #if 0
635 printf("non PKTHDR to tcp_respond\n");
636 #endif
637 m_freem(m);
638 return EINVAL;
639 }
640 #ifdef DIAGNOSTIC
641 if (!th0)
642 panic("th0 == NULL in tcp_respond");
643 #endif
644
645 /* get family information from m */
646 switch (mtod(m, struct ip *)->ip_v) {
647 case 4:
648 family = AF_INET;
649 hlen = sizeof(struct ip);
650 ip = mtod(m, struct ip *);
651 break;
652 #ifdef INET6
653 case 6:
654 family = AF_INET6;
655 hlen = sizeof(struct ip6_hdr);
656 ip6 = mtod(m, struct ip6_hdr *);
657 break;
658 #endif
659 default:
660 m_freem(m);
661 return EAFNOSUPPORT;
662 }
663 if ((flags & TH_SYN) == 0 || sizeof(*th0) > (th0->th_off << 2))
664 tlen = sizeof(*th0);
665 else
666 tlen = th0->th_off << 2;
667
668 if (m->m_len > hlen + tlen && (m->m_flags & M_EXT) == 0 &&
669 mtod(m, caddr_t) + hlen == (caddr_t)th0) {
670 m->m_len = hlen + tlen;
671 m_freem(m->m_next);
672 m->m_next = NULL;
673 } else {
674 struct mbuf *n;
675
676 #ifdef DIAGNOSTIC
677 if (max_linkhdr + hlen + tlen > MCLBYTES) {
678 m_freem(m);
679 return EMSGSIZE;
680 }
681 #endif
682 MGETHDR(n, M_DONTWAIT, MT_HEADER);
683 if (n && max_linkhdr + hlen + tlen > MHLEN) {
684 MCLGET(n, M_DONTWAIT);
685 if ((n->m_flags & M_EXT) == 0) {
686 m_freem(n);
687 n = NULL;
688 }
689 }
690 if (!n) {
691 m_freem(m);
692 return ENOBUFS;
693 }
694
695 n->m_data += max_linkhdr;
696 n->m_len = hlen + tlen;
697 m_copyback(n, 0, hlen, mtod(m, caddr_t));
698 m_copyback(n, hlen, tlen, (caddr_t)th0);
699
700 m_freem(m);
701 m = n;
702 n = NULL;
703 }
704
705 #define xchg(a,b,type) { type t; t=a; a=b; b=t; }
706 switch (family) {
707 case AF_INET:
708 ip = mtod(m, struct ip *);
709 th = (struct tcphdr *)(ip + 1);
710 ip->ip_p = IPPROTO_TCP;
711 xchg(ip->ip_dst, ip->ip_src, struct in_addr);
712 ip->ip_p = IPPROTO_TCP;
713 break;
714 #ifdef INET6
715 case AF_INET6:
716 ip6 = mtod(m, struct ip6_hdr *);
717 th = (struct tcphdr *)(ip6 + 1);
718 ip6->ip6_nxt = IPPROTO_TCP;
719 xchg(ip6->ip6_dst, ip6->ip6_src, struct in6_addr);
720 ip6->ip6_nxt = IPPROTO_TCP;
721 break;
722 #endif
723 #if 0
724 default:
725 /* noone will visit here */
726 m_freem(m);
727 return EAFNOSUPPORT;
728 #endif
729 }
730 xchg(th->th_dport, th->th_sport, u_int16_t);
731 #undef xchg
732 tlen = 0; /*be friendly with the following code*/
733 }
734 th->th_seq = htonl(seq);
735 th->th_ack = htonl(ack);
736 th->th_x2 = 0;
737 if ((flags & TH_SYN) == 0) {
738 if (tp)
739 win >>= tp->rcv_scale;
740 if (win > TCP_MAXWIN)
741 win = TCP_MAXWIN;
742 th->th_win = htons((u_int16_t)win);
743 th->th_off = sizeof (struct tcphdr) >> 2;
744 tlen += sizeof(*th);
745 } else
746 tlen += th->th_off << 2;
747 m->m_len = hlen + tlen;
748 m->m_pkthdr.len = hlen + tlen;
749 m->m_pkthdr.rcvif = (struct ifnet *) 0;
750 th->th_flags = flags;
751 th->th_urp = 0;
752
753 switch (family) {
754 #ifdef INET
755 case AF_INET:
756 {
757 struct ipovly *ipov = (struct ipovly *)ip;
758 bzero(ipov->ih_x1, sizeof ipov->ih_x1);
759 ipov->ih_len = htons((u_int16_t)tlen);
760
761 th->th_sum = 0;
762 th->th_sum = in_cksum(m, hlen + tlen);
763 ip->ip_len = hlen + tlen; /*will be flipped on output*/
764 ip->ip_ttl = ip_defttl;
765 break;
766 }
767 #endif
768 #ifdef INET6
769 case AF_INET6:
770 {
771 th->th_sum = 0;
772 th->th_sum = in6_cksum(m, IPPROTO_TCP, sizeof(struct ip6_hdr),
773 tlen);
774 ip6->ip6_plen = ntohs(tlen);
775 if (tp && tp->t_in6pcb) {
776 struct ifnet *oifp;
777 ro = (struct route *)&tp->t_in6pcb->in6p_route;
778 oifp = ro->ro_rt ? ro->ro_rt->rt_ifp : NULL;
779 ip6->ip6_hlim = in6_selecthlim(tp->t_in6pcb, oifp);
780 } else
781 ip6->ip6_hlim = ip6_defhlim;
782 ip6->ip6_flow &= ~IPV6_FLOWINFO_MASK;
783 if (ip6_auto_flowlabel) {
784 ip6->ip6_flow |=
785 (htonl(ip6_flow_seq++) & IPV6_FLOWLABEL_MASK);
786 }
787 break;
788 }
789 #endif
790 }
791
792 #ifdef IPSEC
793 (void)ipsec_setsocket(m, NULL);
794 #endif /*IPSEC*/
795
796 if (tp != NULL && tp->t_inpcb != NULL) {
797 ro = &tp->t_inpcb->inp_route;
798 #ifdef IPSEC
799 if (ipsec_setsocket(m, tp->t_inpcb->inp_socket) != 0) {
800 m_freem(m);
801 return ENOBUFS;
802 }
803 #endif
804 #ifdef DIAGNOSTIC
805 if (family != AF_INET)
806 panic("tcp_respond: address family mismatch");
807 if (!in_hosteq(ip->ip_dst, tp->t_inpcb->inp_faddr)) {
808 panic("tcp_respond: ip_dst %x != inp_faddr %x",
809 ntohl(ip->ip_dst.s_addr),
810 ntohl(tp->t_inpcb->inp_faddr.s_addr));
811 }
812 #endif
813 }
814 #ifdef INET6
815 else if (tp != NULL && tp->t_in6pcb != NULL) {
816 ro = (struct route *)&tp->t_in6pcb->in6p_route;
817 #ifdef IPSEC
818 if (ipsec_setsocket(m, tp->t_in6pcb->in6p_socket) != 0) {
819 m_freem(m);
820 return ENOBUFS;
821 }
822 #endif
823 #ifdef DIAGNOSTIC
824 if (family == AF_INET) {
825 if (!IN6_IS_ADDR_V4MAPPED(&tp->t_in6pcb->in6p_faddr))
826 panic("tcp_respond: not mapped addr");
827 if (bcmp(&ip->ip_dst,
828 &tp->t_in6pcb->in6p_faddr.s6_addr32[3],
829 sizeof(ip->ip_dst)) != 0) {
830 panic("tcp_respond: ip_dst != in6p_faddr");
831 }
832 } else if (family == AF_INET6) {
833 if (!IN6_ARE_ADDR_EQUAL(&ip6->ip6_dst, &tp->t_in6pcb->in6p_faddr))
834 panic("tcp_respond: ip6_dst != in6p_faddr");
835 } else
836 panic("tcp_respond: address family mismatch");
837 #endif
838 }
839 #endif
840 else
841 ro = NULL;
842
843 switch (family) {
844 #ifdef INET
845 case AF_INET:
846 error = ip_output(m, NULL, ro,
847 (ip_mtudisc ? IP_MTUDISC : 0),
848 NULL);
849 break;
850 #endif
851 #ifdef INET6
852 case AF_INET6:
853 error = ip6_output(m, NULL, (struct route_in6 *)ro, 0, NULL,
854 NULL);
855 break;
856 #endif
857 default:
858 error = EAFNOSUPPORT;
859 break;
860 }
861
862 return (error);
863 }
864
865 /*
866 * Create a new TCP control block, making an
867 * empty reassembly queue and hooking it to the argument
868 * protocol control block.
869 */
870 struct tcpcb *
871 tcp_newtcpcb(family, aux)
872 int family; /* selects inpcb, or in6pcb */
873 void *aux;
874 {
875 struct tcpcb *tp;
876 int i;
877
878 switch (family) {
879 case PF_INET:
880 break;
881 #ifdef INET6
882 case PF_INET6:
883 break;
884 #endif
885 default:
886 return NULL;
887 }
888
889 tp = pool_get(&tcpcb_pool, PR_NOWAIT);
890 if (tp == NULL)
891 return (NULL);
892 bzero((caddr_t)tp, sizeof(struct tcpcb));
893 TAILQ_INIT(&tp->segq);
894 TAILQ_INIT(&tp->timeq);
895 tp->t_family = family; /* may be overridden later on */
896 tp->t_peermss = tcp_mssdflt;
897 tp->t_ourmss = tcp_mssdflt;
898 tp->t_segsz = tcp_mssdflt;
899 LIST_INIT(&tp->t_sc);
900
901 callout_init(&tp->t_delack_ch);
902 for (i = 0; i < TCPT_NTIMERS; i++)
903 TCP_TIMER_INIT(tp, i);
904
905 tp->t_flags = 0;
906 if (tcp_do_rfc1323 && tcp_do_win_scale)
907 tp->t_flags |= TF_REQ_SCALE;
908 if (tcp_do_rfc1323 && tcp_do_timestamps)
909 tp->t_flags |= TF_REQ_TSTMP;
910 if (tcp_do_sack == 2)
911 tp->t_flags |= TF_WILL_SACK;
912 else if (tcp_do_sack == 1)
913 tp->t_flags |= TF_WILL_SACK|TF_IGNR_RXSACK;
914 tp->t_flags |= TF_CANT_TXSACK;
915 switch (family) {
916 case PF_INET:
917 tp->t_inpcb = (struct inpcb *)aux;
918 break;
919 #ifdef INET6
920 case PF_INET6:
921 tp->t_in6pcb = (struct in6pcb *)aux;
922 break;
923 #endif
924 }
925 /*
926 * Init srtt to TCPTV_SRTTBASE (0), so we can tell that we have no
927 * rtt estimate. Set rttvar so that srtt + 2 * rttvar gives
928 * reasonable initial retransmit time.
929 */
930 tp->t_srtt = TCPTV_SRTTBASE;
931 tp->t_rttvar = tcp_rttdflt * PR_SLOWHZ << (TCP_RTTVAR_SHIFT + 2 - 1);
932 tp->t_rttmin = TCPTV_MIN;
933 TCPT_RANGESET(tp->t_rxtcur, TCP_REXMTVAL(tp),
934 TCPTV_MIN, TCPTV_REXMTMAX);
935 tp->snd_cwnd = TCP_MAXWIN << TCP_MAX_WINSHIFT;
936 tp->snd_ssthresh = TCP_MAXWIN << TCP_MAX_WINSHIFT;
937 if (family == AF_INET) {
938 struct inpcb *inp = (struct inpcb *)aux;
939 inp->inp_ip.ip_ttl = ip_defttl;
940 inp->inp_ppcb = (caddr_t)tp;
941 }
942 #ifdef INET6
943 else if (family == AF_INET6) {
944 struct in6pcb *in6p = (struct in6pcb *)aux;
945 in6p->in6p_ip6.ip6_hlim = in6_selecthlim(in6p,
946 in6p->in6p_route.ro_rt ? in6p->in6p_route.ro_rt->rt_ifp
947 : NULL);
948 in6p->in6p_ppcb = (caddr_t)tp;
949 }
950 #endif
951
952 /*
953 * Initialize our timebase. When we send timestamps, we take
954 * the delta from tcp_now -- this means each connection always
955 * gets a timebase of 0, which makes it, among other things,
956 * more difficult to determine how long a system has been up,
957 * and thus how many TCP sequence increments have occurred.
958 */
959 tp->ts_timebase = tcp_now;
960
961 return (tp);
962 }
963
964 /*
965 * Drop a TCP connection, reporting
966 * the specified error. If connection is synchronized,
967 * then send a RST to peer.
968 */
969 struct tcpcb *
970 tcp_drop(tp, errno)
971 struct tcpcb *tp;
972 int errno;
973 {
974 struct socket *so = NULL;
975
976 #ifdef DIAGNOSTIC
977 if (tp->t_inpcb && tp->t_in6pcb)
978 panic("tcp_drop: both t_inpcb and t_in6pcb are set");
979 #endif
980 #ifdef INET
981 if (tp->t_inpcb)
982 so = tp->t_inpcb->inp_socket;
983 #endif
984 #ifdef INET6
985 if (tp->t_in6pcb)
986 so = tp->t_in6pcb->in6p_socket;
987 #endif
988 if (!so)
989 return NULL;
990
991 if (TCPS_HAVERCVDSYN(tp->t_state)) {
992 tp->t_state = TCPS_CLOSED;
993 (void) tcp_output(tp);
994 tcpstat.tcps_drops++;
995 } else
996 tcpstat.tcps_conndrops++;
997 if (errno == ETIMEDOUT && tp->t_softerror)
998 errno = tp->t_softerror;
999 so->so_error = errno;
1000 return (tcp_close(tp));
1001 }
1002
1003 /*
1004 * Close a TCP control block:
1005 * discard all space held by the tcp
1006 * discard internet protocol block
1007 * wake up any sleepers
1008 */
1009 struct tcpcb *
1010 tcp_close(tp)
1011 struct tcpcb *tp;
1012 {
1013 struct inpcb *inp;
1014 #ifdef INET6
1015 struct in6pcb *in6p;
1016 #endif
1017 struct socket *so;
1018 #ifdef RTV_RTT
1019 struct rtentry *rt;
1020 #endif
1021 struct route *ro;
1022
1023 inp = tp->t_inpcb;
1024 #ifdef INET6
1025 in6p = tp->t_in6pcb;
1026 #endif
1027 so = NULL;
1028 ro = NULL;
1029 if (inp) {
1030 so = inp->inp_socket;
1031 ro = &inp->inp_route;
1032 }
1033 #ifdef INET6
1034 else if (in6p) {
1035 so = in6p->in6p_socket;
1036 ro = (struct route *)&in6p->in6p_route;
1037 }
1038 #endif
1039
1040 #ifdef RTV_RTT
1041 /*
1042 * If we sent enough data to get some meaningful characteristics,
1043 * save them in the routing entry. 'Enough' is arbitrarily
1044 * defined as the sendpipesize (default 4K) * 16. This would
1045 * give us 16 rtt samples assuming we only get one sample per
1046 * window (the usual case on a long haul net). 16 samples is
1047 * enough for the srtt filter to converge to within 5% of the correct
1048 * value; fewer samples and we could save a very bogus rtt.
1049 *
1050 * Don't update the default route's characteristics and don't
1051 * update anything that the user "locked".
1052 */
1053 if (SEQ_LT(tp->iss + so->so_snd.sb_hiwat * 16, tp->snd_max) &&
1054 ro && (rt = ro->ro_rt) &&
1055 !in_nullhost(satosin(rt_key(rt))->sin_addr)) {
1056 u_long i = 0;
1057
1058 if ((rt->rt_rmx.rmx_locks & RTV_RTT) == 0) {
1059 i = tp->t_srtt *
1060 ((RTM_RTTUNIT / PR_SLOWHZ) >> (TCP_RTT_SHIFT + 2));
1061 if (rt->rt_rmx.rmx_rtt && i)
1062 /*
1063 * filter this update to half the old & half
1064 * the new values, converting scale.
1065 * See route.h and tcp_var.h for a
1066 * description of the scaling constants.
1067 */
1068 rt->rt_rmx.rmx_rtt =
1069 (rt->rt_rmx.rmx_rtt + i) / 2;
1070 else
1071 rt->rt_rmx.rmx_rtt = i;
1072 }
1073 if ((rt->rt_rmx.rmx_locks & RTV_RTTVAR) == 0) {
1074 i = tp->t_rttvar *
1075 ((RTM_RTTUNIT / PR_SLOWHZ) >> (TCP_RTTVAR_SHIFT + 2));
1076 if (rt->rt_rmx.rmx_rttvar && i)
1077 rt->rt_rmx.rmx_rttvar =
1078 (rt->rt_rmx.rmx_rttvar + i) / 2;
1079 else
1080 rt->rt_rmx.rmx_rttvar = i;
1081 }
1082 /*
1083 * update the pipelimit (ssthresh) if it has been updated
1084 * already or if a pipesize was specified & the threshhold
1085 * got below half the pipesize. I.e., wait for bad news
1086 * before we start updating, then update on both good
1087 * and bad news.
1088 */
1089 if (((rt->rt_rmx.rmx_locks & RTV_SSTHRESH) == 0 &&
1090 (i = tp->snd_ssthresh) && rt->rt_rmx.rmx_ssthresh) ||
1091 i < (rt->rt_rmx.rmx_sendpipe / 2)) {
1092 /*
1093 * convert the limit from user data bytes to
1094 * packets then to packet data bytes.
1095 */
1096 i = (i + tp->t_segsz / 2) / tp->t_segsz;
1097 if (i < 2)
1098 i = 2;
1099 i *= (u_long)(tp->t_segsz + sizeof (struct tcpiphdr));
1100 if (rt->rt_rmx.rmx_ssthresh)
1101 rt->rt_rmx.rmx_ssthresh =
1102 (rt->rt_rmx.rmx_ssthresh + i) / 2;
1103 else
1104 rt->rt_rmx.rmx_ssthresh = i;
1105 }
1106 }
1107 #endif /* RTV_RTT */
1108 /* free the reassembly queue, if any */
1109 TCP_REASS_LOCK(tp);
1110 (void) tcp_freeq(tp);
1111 TCP_REASS_UNLOCK(tp);
1112
1113 tcp_canceltimers(tp);
1114 TCP_CLEAR_DELACK(tp);
1115 syn_cache_cleanup(tp);
1116
1117 if (tp->t_template) {
1118 m_free(tp->t_template);
1119 tp->t_template = NULL;
1120 }
1121 pool_put(&tcpcb_pool, tp);
1122 if (inp) {
1123 inp->inp_ppcb = 0;
1124 soisdisconnected(so);
1125 in_pcbdetach(inp);
1126 }
1127 #ifdef INET6
1128 else if (in6p) {
1129 in6p->in6p_ppcb = 0;
1130 soisdisconnected(so);
1131 in6_pcbdetach(in6p);
1132 }
1133 #endif
1134 tcpstat.tcps_closed++;
1135 return ((struct tcpcb *)0);
1136 }
1137
1138 int
1139 tcp_freeq(tp)
1140 struct tcpcb *tp;
1141 {
1142 struct ipqent *qe;
1143 int rv = 0;
1144 #ifdef TCPREASS_DEBUG
1145 int i = 0;
1146 #endif
1147
1148 TCP_REASS_LOCK_CHECK(tp);
1149
1150 while ((qe = TAILQ_FIRST(&tp->segq)) != NULL) {
1151 #ifdef TCPREASS_DEBUG
1152 printf("tcp_freeq[%p,%d]: %u:%u(%u) 0x%02x\n",
1153 tp, i++, qe->ipqe_seq, qe->ipqe_seq + qe->ipqe_len,
1154 qe->ipqe_len, qe->ipqe_flags & (TH_SYN|TH_FIN|TH_RST));
1155 #endif
1156 TAILQ_REMOVE(&tp->segq, qe, ipqe_q);
1157 TAILQ_REMOVE(&tp->timeq, qe, ipqe_timeq);
1158 m_freem(qe->ipqe_m);
1159 pool_put(&ipqent_pool, qe);
1160 rv = 1;
1161 }
1162 return (rv);
1163 }
1164
1165 /*
1166 * Protocol drain routine. Called when memory is in short supply.
1167 */
1168 void
1169 tcp_drain()
1170 {
1171 struct inpcb *inp;
1172 struct tcpcb *tp;
1173
1174 /*
1175 * Free the sequence queue of all TCP connections.
1176 */
1177 inp = CIRCLEQ_FIRST(&tcbtable.inpt_queue);
1178 if (inp) /* XXX */
1179 CIRCLEQ_FOREACH(inp, &tcbtable.inpt_queue, inp_queue) {
1180 if ((tp = intotcpcb(inp)) != NULL) {
1181 /*
1182 * We may be called from a device's interrupt
1183 * context. If the tcpcb is already busy,
1184 * just bail out now.
1185 */
1186 if (tcp_reass_lock_try(tp) == 0)
1187 continue;
1188 if (tcp_freeq(tp))
1189 tcpstat.tcps_connsdrained++;
1190 TCP_REASS_UNLOCK(tp);
1191 }
1192 }
1193 }
1194
1195 #ifdef INET6
1196 void
1197 tcp6_drain()
1198 {
1199 struct in6pcb *in6p;
1200 struct tcpcb *tp;
1201 struct in6pcb *head = &tcb6;
1202
1203 /*
1204 * Free the sequence queue of all TCP connections.
1205 */
1206 for (in6p = head->in6p_next; in6p != head; in6p = in6p->in6p_next) {
1207 if ((tp = in6totcpcb(in6p)) != NULL) {
1208 /*
1209 * We may be called from a device's interrupt
1210 * context. If the tcpcb is already busy,
1211 * just bail out now.
1212 */
1213 if (tcp_reass_lock_try(tp) == 0)
1214 continue;
1215 if (tcp_freeq(tp))
1216 tcpstat.tcps_connsdrained++;
1217 TCP_REASS_UNLOCK(tp);
1218 }
1219 }
1220 }
1221 #endif
1222
1223 /*
1224 * Notify a tcp user of an asynchronous error;
1225 * store error as soft error, but wake up user
1226 * (for now, won't do anything until can select for soft error).
1227 */
1228 void
1229 tcp_notify(inp, error)
1230 struct inpcb *inp;
1231 int error;
1232 {
1233 struct tcpcb *tp = (struct tcpcb *)inp->inp_ppcb;
1234 struct socket *so = inp->inp_socket;
1235
1236 /*
1237 * Ignore some errors if we are hooked up.
1238 * If connection hasn't completed, has retransmitted several times,
1239 * and receives a second error, give up now. This is better
1240 * than waiting a long time to establish a connection that
1241 * can never complete.
1242 */
1243 if (tp->t_state == TCPS_ESTABLISHED &&
1244 (error == EHOSTUNREACH || error == ENETUNREACH ||
1245 error == EHOSTDOWN)) {
1246 return;
1247 } else if (TCPS_HAVEESTABLISHED(tp->t_state) == 0 &&
1248 tp->t_rxtshift > 3 && tp->t_softerror)
1249 so->so_error = error;
1250 else
1251 tp->t_softerror = error;
1252 wakeup((caddr_t) &so->so_timeo);
1253 sorwakeup(so);
1254 sowwakeup(so);
1255 }
1256
1257 #ifdef INET6
1258 void
1259 tcp6_notify(in6p, error)
1260 struct in6pcb *in6p;
1261 int error;
1262 {
1263 struct tcpcb *tp = (struct tcpcb *)in6p->in6p_ppcb;
1264 struct socket *so = in6p->in6p_socket;
1265
1266 /*
1267 * Ignore some errors if we are hooked up.
1268 * If connection hasn't completed, has retransmitted several times,
1269 * and receives a second error, give up now. This is better
1270 * than waiting a long time to establish a connection that
1271 * can never complete.
1272 */
1273 if (tp->t_state == TCPS_ESTABLISHED &&
1274 (error == EHOSTUNREACH || error == ENETUNREACH ||
1275 error == EHOSTDOWN)) {
1276 return;
1277 } else if (TCPS_HAVEESTABLISHED(tp->t_state) == 0 &&
1278 tp->t_rxtshift > 3 && tp->t_softerror)
1279 so->so_error = error;
1280 else
1281 tp->t_softerror = error;
1282 wakeup((caddr_t) &so->so_timeo);
1283 sorwakeup(so);
1284 sowwakeup(so);
1285 }
1286 #endif
1287
1288 #ifdef INET6
1289 void
1290 tcp6_ctlinput(cmd, sa, d)
1291 int cmd;
1292 struct sockaddr *sa;
1293 void *d;
1294 {
1295 struct tcphdr th;
1296 void (*notify) __P((struct in6pcb *, int)) = tcp6_notify;
1297 int nmatch;
1298 struct ip6_hdr *ip6;
1299 const struct sockaddr_in6 *sa6_src = NULL;
1300 struct sockaddr_in6 *sa6 = (struct sockaddr_in6 *)sa;
1301 struct mbuf *m;
1302 int off;
1303
1304 if (sa->sa_family != AF_INET6 ||
1305 sa->sa_len != sizeof(struct sockaddr_in6))
1306 return;
1307 if ((unsigned)cmd >= PRC_NCMDS)
1308 return;
1309 else if (cmd == PRC_QUENCH) {
1310 /* XXX there's no PRC_QUENCH in IPv6 */
1311 notify = tcp6_quench;
1312 } else if (PRC_IS_REDIRECT(cmd))
1313 notify = in6_rtchange, d = NULL;
1314 else if (cmd == PRC_MSGSIZE)
1315 ; /* special code is present, see below */
1316 else if (cmd == PRC_HOSTDEAD)
1317 d = NULL;
1318 else if (inet6ctlerrmap[cmd] == 0)
1319 return;
1320
1321 /* if the parameter is from icmp6, decode it. */
1322 if (d != NULL) {
1323 struct ip6ctlparam *ip6cp = (struct ip6ctlparam *)d;
1324 m = ip6cp->ip6c_m;
1325 ip6 = ip6cp->ip6c_ip6;
1326 off = ip6cp->ip6c_off;
1327 sa6_src = ip6cp->ip6c_src;
1328 } else {
1329 m = NULL;
1330 ip6 = NULL;
1331 sa6_src = &sa6_any;
1332 }
1333
1334 if (ip6) {
1335 /*
1336 * XXX: We assume that when ip6 is non NULL,
1337 * M and OFF are valid.
1338 */
1339
1340 /* check if we can safely examine src and dst ports */
1341 if (m->m_pkthdr.len < off + sizeof(th)) {
1342 if (cmd == PRC_MSGSIZE)
1343 icmp6_mtudisc_update((struct ip6ctlparam *)d, 0);
1344 return;
1345 }
1346
1347 bzero(&th, sizeof(th));
1348 m_copydata(m, off, sizeof(th), (caddr_t)&th);
1349
1350 if (cmd == PRC_MSGSIZE) {
1351 int valid = 0;
1352
1353 /*
1354 * Check to see if we have a valid TCP connection
1355 * corresponding to the address in the ICMPv6 message
1356 * payload.
1357 */
1358 if (in6_pcblookup_connect(&tcb6, &sa6->sin6_addr,
1359 th.th_dport, (struct in6_addr *)&sa6_src->sin6_addr,
1360 th.th_sport, 0))
1361 valid++;
1362
1363 /*
1364 * Depending on the value of "valid" and routing table
1365 * size (mtudisc_{hi,lo}wat), we will:
1366 * - recalcurate the new MTU and create the
1367 * corresponding routing entry, or
1368 * - ignore the MTU change notification.
1369 */
1370 icmp6_mtudisc_update((struct ip6ctlparam *)d, valid);
1371
1372 /*
1373 * no need to call in6_pcbnotify, it should have been
1374 * called via callback if necessary
1375 */
1376 return;
1377 }
1378
1379 nmatch = in6_pcbnotify(&tcb6, sa, th.th_dport,
1380 (struct sockaddr *)sa6_src, th.th_sport, cmd, NULL, notify);
1381 if (nmatch == 0 && syn_cache_count &&
1382 (inet6ctlerrmap[cmd] == EHOSTUNREACH ||
1383 inet6ctlerrmap[cmd] == ENETUNREACH ||
1384 inet6ctlerrmap[cmd] == EHOSTDOWN))
1385 syn_cache_unreach((struct sockaddr *)sa6_src,
1386 sa, &th);
1387 } else {
1388 (void) in6_pcbnotify(&tcb6, sa, 0, (struct sockaddr *)sa6_src,
1389 0, cmd, NULL, notify);
1390 }
1391 }
1392 #endif
1393
1394 #ifdef INET
1395 /* assumes that ip header and tcp header are contiguous on mbuf */
1396 void *
1397 tcp_ctlinput(cmd, sa, v)
1398 int cmd;
1399 struct sockaddr *sa;
1400 void *v;
1401 {
1402 struct ip *ip = v;
1403 struct tcphdr *th;
1404 struct icmp *icp;
1405 extern const int inetctlerrmap[];
1406 void (*notify) __P((struct inpcb *, int)) = tcp_notify;
1407 int errno;
1408 int nmatch;
1409
1410 if (sa->sa_family != AF_INET ||
1411 sa->sa_len != sizeof(struct sockaddr_in))
1412 return NULL;
1413 if ((unsigned)cmd >= PRC_NCMDS)
1414 return NULL;
1415 errno = inetctlerrmap[cmd];
1416 if (cmd == PRC_QUENCH)
1417 notify = tcp_quench;
1418 else if (PRC_IS_REDIRECT(cmd))
1419 notify = in_rtchange, ip = 0;
1420 else if (cmd == PRC_MSGSIZE && ip_mtudisc && ip && ip->ip_v == 4) {
1421 /*
1422 * Check to see if we have a valid TCP connection
1423 * corresponding to the address in the ICMP message
1424 * payload.
1425 *
1426 * Boundary check is made in icmp_input(), with ICMP_ADVLENMIN.
1427 */
1428 th = (struct tcphdr *)((caddr_t)ip + (ip->ip_hl << 2));
1429 if (in_pcblookup_connect(&tcbtable,
1430 ip->ip_dst, th->th_dport,
1431 ip->ip_src, th->th_sport) == NULL)
1432 return NULL;
1433
1434 /*
1435 * Now that we've validated that we are actually communicating
1436 * with the host indicated in the ICMP message, locate the
1437 * ICMP header, recalculate the new MTU, and create the
1438 * corresponding routing entry.
1439 */
1440 icp = (struct icmp *)((caddr_t)ip -
1441 offsetof(struct icmp, icmp_ip));
1442 icmp_mtudisc(icp, ip->ip_dst);
1443
1444 return NULL;
1445 } else if (cmd == PRC_HOSTDEAD)
1446 ip = 0;
1447 else if (errno == 0)
1448 return NULL;
1449 if (ip && ip->ip_v == 4 && sa->sa_family == AF_INET) {
1450 th = (struct tcphdr *)((caddr_t)ip + (ip->ip_hl << 2));
1451 nmatch = in_pcbnotify(&tcbtable, satosin(sa)->sin_addr,
1452 th->th_dport, ip->ip_src, th->th_sport, errno, notify);
1453 if (nmatch == 0 && syn_cache_count &&
1454 (inetctlerrmap[cmd] == EHOSTUNREACH ||
1455 inetctlerrmap[cmd] == ENETUNREACH ||
1456 inetctlerrmap[cmd] == EHOSTDOWN)) {
1457 struct sockaddr_in sin;
1458 bzero(&sin, sizeof(sin));
1459 sin.sin_len = sizeof(sin);
1460 sin.sin_family = AF_INET;
1461 sin.sin_port = th->th_sport;
1462 sin.sin_addr = ip->ip_src;
1463 syn_cache_unreach((struct sockaddr *)&sin, sa, th);
1464 }
1465
1466 /* XXX mapped address case */
1467 } else
1468 in_pcbnotifyall(&tcbtable, satosin(sa)->sin_addr, errno,
1469 notify);
1470 return NULL;
1471 }
1472
1473 /*
1474 * When a source quence is received, we are being notifed of congestion.
1475 * Close the congestion window down to the Loss Window (one segment).
1476 * We will gradually open it again as we proceed.
1477 */
1478 void
1479 tcp_quench(inp, errno)
1480 struct inpcb *inp;
1481 int errno;
1482 {
1483 struct tcpcb *tp = intotcpcb(inp);
1484
1485 if (tp)
1486 tp->snd_cwnd = tp->t_segsz;
1487 }
1488 #endif
1489
1490 #ifdef INET6
1491 void
1492 tcp6_quench(in6p, errno)
1493 struct in6pcb *in6p;
1494 int errno;
1495 {
1496 struct tcpcb *tp = in6totcpcb(in6p);
1497
1498 if (tp)
1499 tp->snd_cwnd = tp->t_segsz;
1500 }
1501 #endif
1502
1503 #ifdef INET
1504 /*
1505 * Path MTU Discovery handlers.
1506 */
1507 void
1508 tcp_mtudisc_callback(faddr)
1509 struct in_addr faddr;
1510 {
1511
1512 in_pcbnotifyall(&tcbtable, faddr, EMSGSIZE, tcp_mtudisc);
1513 }
1514
1515 /*
1516 * On receipt of path MTU corrections, flush old route and replace it
1517 * with the new one. Retransmit all unacknowledged packets, to ensure
1518 * that all packets will be received.
1519 */
1520 void
1521 tcp_mtudisc(inp, errno)
1522 struct inpcb *inp;
1523 int errno;
1524 {
1525 struct tcpcb *tp = intotcpcb(inp);
1526 struct rtentry *rt = in_pcbrtentry(inp);
1527
1528 if (tp != 0) {
1529 if (rt != 0) {
1530 /*
1531 * If this was not a host route, remove and realloc.
1532 */
1533 if ((rt->rt_flags & RTF_HOST) == 0) {
1534 in_rtchange(inp, errno);
1535 if ((rt = in_pcbrtentry(inp)) == 0)
1536 return;
1537 }
1538
1539 /*
1540 * Slow start out of the error condition. We
1541 * use the MTU because we know it's smaller
1542 * than the previously transmitted segment.
1543 *
1544 * Note: This is more conservative than the
1545 * suggestion in draft-floyd-incr-init-win-03.
1546 */
1547 if (rt->rt_rmx.rmx_mtu != 0)
1548 tp->snd_cwnd =
1549 TCP_INITIAL_WINDOW(tcp_init_win,
1550 rt->rt_rmx.rmx_mtu);
1551 }
1552
1553 /*
1554 * Resend unacknowledged packets.
1555 */
1556 tp->snd_nxt = tp->snd_una;
1557 tcp_output(tp);
1558 }
1559 }
1560 #endif
1561
1562 #ifdef INET6
1563 /*
1564 * Path MTU Discovery handlers.
1565 */
1566 void
1567 tcp6_mtudisc_callback(faddr)
1568 struct in6_addr *faddr;
1569 {
1570 struct sockaddr_in6 sin6;
1571
1572 bzero(&sin6, sizeof(sin6));
1573 sin6.sin6_family = AF_INET6;
1574 sin6.sin6_len = sizeof(struct sockaddr_in6);
1575 sin6.sin6_addr = *faddr;
1576 (void) in6_pcbnotify(&tcb6, (struct sockaddr *)&sin6, 0,
1577 (struct sockaddr *)&sa6_any, 0, PRC_MSGSIZE, NULL, tcp6_mtudisc);
1578 }
1579
1580 void
1581 tcp6_mtudisc(in6p, errno)
1582 struct in6pcb *in6p;
1583 int errno;
1584 {
1585 struct tcpcb *tp = in6totcpcb(in6p);
1586 struct rtentry *rt = in6_pcbrtentry(in6p);
1587
1588 if (tp != 0) {
1589 if (rt != 0) {
1590 /*
1591 * If this was not a host route, remove and realloc.
1592 */
1593 if ((rt->rt_flags & RTF_HOST) == 0) {
1594 in6_rtchange(in6p, errno);
1595 if ((rt = in6_pcbrtentry(in6p)) == 0)
1596 return;
1597 }
1598
1599 /*
1600 * Slow start out of the error condition. We
1601 * use the MTU because we know it's smaller
1602 * than the previously transmitted segment.
1603 *
1604 * Note: This is more conservative than the
1605 * suggestion in draft-floyd-incr-init-win-03.
1606 */
1607 if (rt->rt_rmx.rmx_mtu != 0)
1608 tp->snd_cwnd =
1609 TCP_INITIAL_WINDOW(tcp_init_win,
1610 rt->rt_rmx.rmx_mtu);
1611 }
1612
1613 /*
1614 * Resend unacknowledged packets.
1615 */
1616 tp->snd_nxt = tp->snd_una;
1617 tcp_output(tp);
1618 }
1619 }
1620 #endif /* INET6 */
1621
1622 /*
1623 * Compute the MSS to advertise to the peer. Called only during
1624 * the 3-way handshake. If we are the server (peer initiated
1625 * connection), we are called with a pointer to the interface
1626 * on which the SYN packet arrived. If we are the client (we
1627 * initiated connection), we are called with a pointer to the
1628 * interface out which this connection should go.
1629 *
1630 * NOTE: Do not subtract IP option/extension header size nor IPsec
1631 * header size from MSS advertisement. MSS option must hold the maximum
1632 * segment size we can accept, so it must always be:
1633 * max(if mtu) - ip header - tcp header
1634 */
1635 u_long
1636 tcp_mss_to_advertise(ifp, af)
1637 const struct ifnet *ifp;
1638 int af;
1639 {
1640 extern u_long in_maxmtu;
1641 u_long mss = 0;
1642 u_long hdrsiz;
1643
1644 /*
1645 * In order to avoid defeating path MTU discovery on the peer,
1646 * we advertise the max MTU of all attached networks as our MSS,
1647 * per RFC 1191, section 3.1.
1648 *
1649 * We provide the option to advertise just the MTU of
1650 * the interface on which we hope this connection will
1651 * be receiving. If we are responding to a SYN, we
1652 * will have a pretty good idea about this, but when
1653 * initiating a connection there is a bit more doubt.
1654 *
1655 * We also need to ensure that loopback has a large enough
1656 * MSS, as the loopback MTU is never included in in_maxmtu.
1657 */
1658
1659 if (ifp != NULL)
1660 mss = ifp->if_mtu;
1661
1662 if (tcp_mss_ifmtu == 0)
1663 switch (af) {
1664 case AF_INET:
1665 mss = max(in_maxmtu, mss);
1666 break;
1667 #ifdef INET6
1668 case AF_INET6:
1669 mss = max(in6_maxmtu, mss);
1670 break;
1671 #endif
1672 }
1673
1674 switch (af) {
1675 case AF_INET:
1676 hdrsiz = sizeof(struct ip);
1677 break;
1678 #ifdef INET6
1679 case AF_INET6:
1680 hdrsiz = sizeof(struct ip6_hdr);
1681 break;
1682 #endif
1683 default:
1684 hdrsiz = 0;
1685 break;
1686 }
1687 hdrsiz += sizeof(struct tcphdr);
1688 if (mss > hdrsiz)
1689 mss -= hdrsiz;
1690
1691 mss = max(tcp_mssdflt, mss);
1692 return (mss);
1693 }
1694
1695 /*
1696 * Set connection variables based on the peer's advertised MSS.
1697 * We are passed the TCPCB for the actual connection. If we
1698 * are the server, we are called by the compressed state engine
1699 * when the 3-way handshake is complete. If we are the client,
1700 * we are called when we receive the SYN,ACK from the server.
1701 *
1702 * NOTE: Our advertised MSS value must be initialized in the TCPCB
1703 * before this routine is called!
1704 */
1705 void
1706 tcp_mss_from_peer(tp, offer)
1707 struct tcpcb *tp;
1708 int offer;
1709 {
1710 struct socket *so;
1711 #if defined(RTV_SPIPE) || defined(RTV_SSTHRESH)
1712 struct rtentry *rt;
1713 #endif
1714 u_long bufsize;
1715 int mss;
1716
1717 #ifdef DIAGNOSTIC
1718 if (tp->t_inpcb && tp->t_in6pcb)
1719 panic("tcp_mss_from_peer: both t_inpcb and t_in6pcb are set");
1720 #endif
1721 so = NULL;
1722 rt = NULL;
1723 #ifdef INET
1724 if (tp->t_inpcb) {
1725 so = tp->t_inpcb->inp_socket;
1726 #if defined(RTV_SPIPE) || defined(RTV_SSTHRESH)
1727 rt = in_pcbrtentry(tp->t_inpcb);
1728 #endif
1729 }
1730 #endif
1731 #ifdef INET6
1732 if (tp->t_in6pcb) {
1733 so = tp->t_in6pcb->in6p_socket;
1734 #if defined(RTV_SPIPE) || defined(RTV_SSTHRESH)
1735 rt = in6_pcbrtentry(tp->t_in6pcb);
1736 #endif
1737 }
1738 #endif
1739
1740 /*
1741 * As per RFC1122, use the default MSS value, unless they
1742 * sent us an offer. Do not accept offers less than 32 bytes.
1743 */
1744 mss = tcp_mssdflt;
1745 if (offer)
1746 mss = offer;
1747 mss = max(mss, 32); /* sanity */
1748 tp->t_peermss = mss;
1749 mss -= tcp_optlen(tp);
1750 #ifdef INET
1751 if (tp->t_inpcb)
1752 mss -= ip_optlen(tp->t_inpcb);
1753 #endif
1754 #ifdef INET6
1755 if (tp->t_in6pcb)
1756 mss -= ip6_optlen(tp->t_in6pcb);
1757 #endif
1758
1759 /*
1760 * If there's a pipesize, change the socket buffer to that size.
1761 * Make the socket buffer an integral number of MSS units. If
1762 * the MSS is larger than the socket buffer, artificially decrease
1763 * the MSS.
1764 */
1765 #ifdef RTV_SPIPE
1766 if (rt != NULL && rt->rt_rmx.rmx_sendpipe != 0)
1767 bufsize = rt->rt_rmx.rmx_sendpipe;
1768 else
1769 #endif
1770 bufsize = so->so_snd.sb_hiwat;
1771 if (bufsize < mss)
1772 mss = bufsize;
1773 else {
1774 bufsize = roundup(bufsize, mss);
1775 if (bufsize > sb_max)
1776 bufsize = sb_max;
1777 (void) sbreserve(&so->so_snd, bufsize);
1778 }
1779 tp->t_segsz = mss;
1780
1781 #ifdef RTV_SSTHRESH
1782 if (rt != NULL && rt->rt_rmx.rmx_ssthresh) {
1783 /*
1784 * There's some sort of gateway or interface buffer
1785 * limit on the path. Use this to set the slow
1786 * start threshold, but set the threshold to no less
1787 * than 2 * MSS.
1788 */
1789 tp->snd_ssthresh = max(2 * mss, rt->rt_rmx.rmx_ssthresh);
1790 }
1791 #endif
1792 }
1793
1794 /*
1795 * Processing necessary when a TCP connection is established.
1796 */
1797 void
1798 tcp_established(tp)
1799 struct tcpcb *tp;
1800 {
1801 struct socket *so;
1802 #ifdef RTV_RPIPE
1803 struct rtentry *rt;
1804 #endif
1805 u_long bufsize;
1806
1807 #ifdef DIAGNOSTIC
1808 if (tp->t_inpcb && tp->t_in6pcb)
1809 panic("tcp_established: both t_inpcb and t_in6pcb are set");
1810 #endif
1811 so = NULL;
1812 rt = NULL;
1813 #ifdef INET
1814 if (tp->t_inpcb) {
1815 so = tp->t_inpcb->inp_socket;
1816 #if defined(RTV_RPIPE)
1817 rt = in_pcbrtentry(tp->t_inpcb);
1818 #endif
1819 }
1820 #endif
1821 #ifdef INET6
1822 if (tp->t_in6pcb) {
1823 so = tp->t_in6pcb->in6p_socket;
1824 #if defined(RTV_RPIPE)
1825 rt = in6_pcbrtentry(tp->t_in6pcb);
1826 #endif
1827 }
1828 #endif
1829
1830 tp->t_state = TCPS_ESTABLISHED;
1831 TCP_TIMER_ARM(tp, TCPT_KEEP, tcp_keepidle);
1832
1833 #ifdef RTV_RPIPE
1834 if (rt != NULL && rt->rt_rmx.rmx_recvpipe != 0)
1835 bufsize = rt->rt_rmx.rmx_recvpipe;
1836 else
1837 #endif
1838 bufsize = so->so_rcv.sb_hiwat;
1839 if (bufsize > tp->t_ourmss) {
1840 bufsize = roundup(bufsize, tp->t_ourmss);
1841 if (bufsize > sb_max)
1842 bufsize = sb_max;
1843 (void) sbreserve(&so->so_rcv, bufsize);
1844 }
1845 }
1846
1847 /*
1848 * Check if there's an initial rtt or rttvar. Convert from the
1849 * route-table units to scaled multiples of the slow timeout timer.
1850 * Called only during the 3-way handshake.
1851 */
1852 void
1853 tcp_rmx_rtt(tp)
1854 struct tcpcb *tp;
1855 {
1856 #ifdef RTV_RTT
1857 struct rtentry *rt = NULL;
1858 int rtt;
1859
1860 #ifdef DIAGNOSTIC
1861 if (tp->t_inpcb && tp->t_in6pcb)
1862 panic("tcp_rmx_rtt: both t_inpcb and t_in6pcb are set");
1863 #endif
1864 #ifdef INET
1865 if (tp->t_inpcb)
1866 rt = in_pcbrtentry(tp->t_inpcb);
1867 #endif
1868 #ifdef INET6
1869 if (tp->t_in6pcb)
1870 rt = in6_pcbrtentry(tp->t_in6pcb);
1871 #endif
1872 if (rt == NULL)
1873 return;
1874
1875 if (tp->t_srtt == 0 && (rtt = rt->rt_rmx.rmx_rtt)) {
1876 /*
1877 * XXX The lock bit for MTU indicates that the value
1878 * is also a minimum value; this is subject to time.
1879 */
1880 if (rt->rt_rmx.rmx_locks & RTV_RTT)
1881 TCPT_RANGESET(tp->t_rttmin,
1882 rtt / (RTM_RTTUNIT / PR_SLOWHZ),
1883 TCPTV_MIN, TCPTV_REXMTMAX);
1884 tp->t_srtt = rtt /
1885 ((RTM_RTTUNIT / PR_SLOWHZ) >> (TCP_RTT_SHIFT + 2));
1886 if (rt->rt_rmx.rmx_rttvar) {
1887 tp->t_rttvar = rt->rt_rmx.rmx_rttvar /
1888 ((RTM_RTTUNIT / PR_SLOWHZ) >>
1889 (TCP_RTTVAR_SHIFT + 2));
1890 } else {
1891 /* Default variation is +- 1 rtt */
1892 tp->t_rttvar =
1893 tp->t_srtt >> (TCP_RTT_SHIFT - TCP_RTTVAR_SHIFT);
1894 }
1895 TCPT_RANGESET(tp->t_rxtcur,
1896 ((tp->t_srtt >> 2) + tp->t_rttvar) >> (1 + 2),
1897 tp->t_rttmin, TCPTV_REXMTMAX);
1898 }
1899 #endif
1900 }
1901
1902 tcp_seq tcp_iss_seq = 0; /* tcp initial seq # */
1903 #if NRND > 0
1904 u_int8_t tcp_iss_secret[16]; /* 128 bits; should be plenty */
1905 #endif
1906
1907 /*
1908 * Get a new sequence value given a tcp control block
1909 */
1910 tcp_seq
1911 tcp_new_iss(struct tcpcb *tp, tcp_seq addin)
1912 {
1913
1914 #ifdef INET
1915 if (tp->t_inpcb != NULL) {
1916 return (tcp_new_iss1(&tp->t_inpcb->inp_laddr,
1917 &tp->t_inpcb->inp_faddr, tp->t_inpcb->inp_lport,
1918 tp->t_inpcb->inp_fport, sizeof(tp->t_inpcb->inp_laddr),
1919 addin));
1920 }
1921 #endif
1922 #ifdef INET6
1923 if (tp->t_in6pcb != NULL) {
1924 return (tcp_new_iss1(&tp->t_in6pcb->in6p_laddr,
1925 &tp->t_in6pcb->in6p_faddr, tp->t_in6pcb->in6p_lport,
1926 tp->t_in6pcb->in6p_fport, sizeof(tp->t_in6pcb->in6p_laddr),
1927 addin));
1928 }
1929 #endif
1930 /* Not possible. */
1931 panic("tcp_new_iss");
1932 }
1933
1934 /*
1935 * This routine actually generates a new TCP initial sequence number.
1936 */
1937 tcp_seq
1938 tcp_new_iss1(void *laddr, void *faddr, u_int16_t lport, u_int16_t fport,
1939 size_t addrsz, tcp_seq addin)
1940 {
1941 tcp_seq tcp_iss;
1942
1943 #if NRND > 0
1944 static int beenhere;
1945
1946 /*
1947 * If we haven't been here before, initialize our cryptographic
1948 * hash secret.
1949 */
1950 if (beenhere == 0) {
1951 rnd_extract_data(tcp_iss_secret, sizeof(tcp_iss_secret),
1952 RND_EXTRACT_ANY);
1953 beenhere = 1;
1954 }
1955
1956 if (tcp_do_rfc1948) {
1957 MD5_CTX ctx;
1958 u_int8_t hash[16]; /* XXX MD5 knowledge */
1959
1960 /*
1961 * Compute the base value of the ISS. It is a hash
1962 * of (saddr, sport, daddr, dport, secret).
1963 */
1964 MD5Init(&ctx);
1965
1966 MD5Update(&ctx, (u_char *) laddr, addrsz);
1967 MD5Update(&ctx, (u_char *) &lport, sizeof(lport));
1968
1969 MD5Update(&ctx, (u_char *) faddr, addrsz);
1970 MD5Update(&ctx, (u_char *) &fport, sizeof(fport));
1971
1972 MD5Update(&ctx, tcp_iss_secret, sizeof(tcp_iss_secret));
1973
1974 MD5Final(hash, &ctx);
1975
1976 memcpy(&tcp_iss, hash, sizeof(tcp_iss));
1977
1978 /*
1979 * Now increment our "timer", and add it in to
1980 * the computed value.
1981 *
1982 * XXX Use `addin'?
1983 * XXX TCP_ISSINCR too large to use?
1984 */
1985 tcp_iss_seq += TCP_ISSINCR;
1986 #ifdef TCPISS_DEBUG
1987 printf("ISS hash 0x%08x, ", tcp_iss);
1988 #endif
1989 tcp_iss += tcp_iss_seq + addin;
1990 #ifdef TCPISS_DEBUG
1991 printf("new ISS 0x%08x\n", tcp_iss);
1992 #endif
1993 } else
1994 #endif /* NRND > 0 */
1995 {
1996 /*
1997 * Randomize.
1998 */
1999 #if NRND > 0
2000 rnd_extract_data(&tcp_iss, sizeof(tcp_iss), RND_EXTRACT_ANY);
2001 #else
2002 tcp_iss = random();
2003 #endif
2004
2005 /*
2006 * If we were asked to add some amount to a known value,
2007 * we will take a random value obtained above, mask off
2008 * the upper bits, and add in the known value. We also
2009 * add in a constant to ensure that we are at least a
2010 * certain distance from the original value.
2011 *
2012 * This is used when an old connection is in timed wait
2013 * and we have a new one coming in, for instance.
2014 */
2015 if (addin != 0) {
2016 #ifdef TCPISS_DEBUG
2017 printf("Random %08x, ", tcp_iss);
2018 #endif
2019 tcp_iss &= TCP_ISS_RANDOM_MASK;
2020 tcp_iss += addin + TCP_ISSINCR;
2021 #ifdef TCPISS_DEBUG
2022 printf("Old ISS %08x, ISS %08x\n", addin, tcp_iss);
2023 #endif
2024 } else {
2025 tcp_iss &= TCP_ISS_RANDOM_MASK;
2026 tcp_iss += tcp_iss_seq;
2027 tcp_iss_seq += TCP_ISSINCR;
2028 #ifdef TCPISS_DEBUG
2029 printf("ISS %08x\n", tcp_iss);
2030 #endif
2031 }
2032 }
2033
2034 if (tcp_compat_42) {
2035 /*
2036 * Limit it to the positive range for really old TCP
2037 * implementations.
2038 */
2039 if (tcp_iss >= 0x80000000)
2040 tcp_iss &= 0x7fffffff; /* XXX */
2041 }
2042
2043 return (tcp_iss);
2044 }
2045
2046 #ifdef IPSEC
2047 /* compute ESP/AH header size for TCP, including outer IP header. */
2048 size_t
2049 ipsec4_hdrsiz_tcp(tp)
2050 struct tcpcb *tp;
2051 {
2052 struct inpcb *inp;
2053 size_t hdrsiz;
2054
2055 /* XXX mapped addr case (tp->t_in6pcb) */
2056 if (!tp || !tp->t_template || !(inp = tp->t_inpcb))
2057 return 0;
2058 switch (tp->t_family) {
2059 case AF_INET:
2060 /* XXX: should use currect direction. */
2061 hdrsiz = ipsec4_hdrsiz(tp->t_template, IPSEC_DIR_OUTBOUND, inp);
2062 break;
2063 default:
2064 hdrsiz = 0;
2065 break;
2066 }
2067
2068 return hdrsiz;
2069 }
2070
2071 #ifdef INET6
2072 size_t
2073 ipsec6_hdrsiz_tcp(tp)
2074 struct tcpcb *tp;
2075 {
2076 struct in6pcb *in6p;
2077 size_t hdrsiz;
2078
2079 if (!tp || !tp->t_template || !(in6p = tp->t_in6pcb))
2080 return 0;
2081 switch (tp->t_family) {
2082 case AF_INET6:
2083 /* XXX: should use currect direction. */
2084 hdrsiz = ipsec6_hdrsiz(tp->t_template, IPSEC_DIR_OUTBOUND, in6p);
2085 break;
2086 case AF_INET:
2087 /* mapped address case - tricky */
2088 default:
2089 hdrsiz = 0;
2090 break;
2091 }
2092
2093 return hdrsiz;
2094 }
2095 #endif
2096 #endif /*IPSEC*/
2097
2098 /*
2099 * Determine the length of the TCP options for this connection.
2100 *
2101 * XXX: What do we do for SACK, when we add that? Just reserve
2102 * all of the space? Otherwise we can't exactly be incrementing
2103 * cwnd by an amount that varies depending on the amount we last
2104 * had to SACK!
2105 */
2106
2107 u_int
2108 tcp_optlen(tp)
2109 struct tcpcb *tp;
2110 {
2111 if ((tp->t_flags & (TF_REQ_TSTMP|TF_RCVD_TSTMP|TF_NOOPT)) ==
2112 (TF_REQ_TSTMP | TF_RCVD_TSTMP))
2113 return TCPOLEN_TSTAMP_APPA;
2114 else
2115 return 0;
2116 }
2117