sctputil.c revision 1.12.16.2 1 /* $KAME: sctputil.c,v 1.39 2005/06/16 20:54:06 jinmei Exp $ */
2 /* $NetBSD: sctputil.c,v 1.12.16.2 2020/04/08 14:08:58 martin Exp $ */
3
4 /*
5 * Copyright (c) 2001, 2002, 2003, 2004 Cisco Systems, Inc.
6 * All rights reserved.
7 *
8 * Redistribution and use in source and binary forms, with or without
9 * modification, are permitted provided that the following conditions
10 * are met:
11 * 1. Redistributions of source code must retain the above copyright
12 * notice, this list of conditions and the following disclaimer.
13 * 2. Redistributions in binary form must reproduce the above copyright
14 * notice, this list of conditions and the following disclaimer in the
15 * documentation and/or other materials provided with the distribution.
16 * 3. All advertising materials mentioning features or use of this software
17 * must display the following acknowledgement:
18 * This product includes software developed by Cisco Systems, Inc.
19 * 4. Neither the name of the project nor the names of its contributors
20 * may be used to endorse or promote products derived from this software
21 * without specific prior written permission.
22 *
23 * THIS SOFTWARE IS PROVIDED BY CISCO SYSTEMS AND CONTRIBUTORS ``AS IS'' AND
24 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
25 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
26 * ARE DISCLAIMED. IN NO EVENT SHALL CISCO SYSTEMS OR CONTRIBUTORS BE LIABLE
27 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
28 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
29 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
30 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
31 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
32 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
33 * SUCH DAMAGE.
34 */
35
36 #include <sys/cdefs.h>
37 __KERNEL_RCSID(0, "$NetBSD: sctputil.c,v 1.12.16.2 2020/04/08 14:08:58 martin Exp $");
38
39 #ifdef _KERNEL_OPT
40 #include "opt_inet.h"
41 #include "opt_ipsec.h"
42 #include "opt_sctp.h"
43 #endif /* _KERNEL_OPT */
44
45 #include <sys/param.h>
46 #include <sys/systm.h>
47 #include <sys/malloc.h>
48 #include <sys/mbuf.h>
49 #include <sys/domain.h>
50 #include <sys/protosw.h>
51 #include <sys/socket.h>
52 #include <sys/socketvar.h>
53 #include <sys/mutex.h>
54 #include <sys/proc.h>
55 #include <sys/kernel.h>
56 #include <sys/sysctl.h>
57 #include <sys/cprng.h>
58
59 #include <sys/callout.h>
60
61 #include <net/route.h>
62
63 #ifdef INET6
64 #include <sys/domain.h>
65 #endif
66
67 #include <machine/limits.h>
68
69 #include <net/if.h>
70 #include <net/if_types.h>
71 #include <net/route.h>
72
73 #include <netinet/in.h>
74 #include <netinet/in_systm.h>
75 #include <netinet/ip.h>
76 #include <netinet/in_pcb.h>
77 #include <netinet/in_var.h>
78 #include <netinet/ip_var.h>
79
80 #ifdef INET6
81 #include <netinet/ip6.h>
82 #include <netinet6/ip6_var.h>
83 #include <netinet6/scope6_var.h>
84 #include <netinet6/in6_pcb.h>
85
86 #endif /* INET6 */
87
88 #include <netinet/sctp_pcb.h>
89
90 #ifdef IPSEC
91 #include <netipsec/ipsec.h>
92 #include <netipsec/key.h>
93 #endif /* IPSEC */
94
95 #include <netinet/sctputil.h>
96 #include <netinet/sctp_var.h>
97 #ifdef INET6
98 #include <netinet6/sctp6_var.h>
99 #endif
100 #include <netinet/sctp_header.h>
101 #include <netinet/sctp_output.h>
102 #include <netinet/sctp_hashdriver.h>
103 #include <netinet/sctp_uio.h>
104 #include <netinet/sctp_timer.h>
105 #include <netinet/sctp_crc32.h>
106 #include <netinet/sctp_indata.h> /* for sctp_deliver_data() */
107 #define NUMBER_OF_MTU_SIZES 18
108
109 #ifdef SCTP_DEBUG
110 extern u_int32_t sctp_debug_on;
111 #endif
112
113 #ifdef SCTP_STAT_LOGGING
114 int sctp_cwnd_log_at=0;
115 int sctp_cwnd_log_rolled=0;
116 struct sctp_cwnd_log sctp_clog[SCTP_STAT_LOG_SIZE];
117
118 void sctp_clr_stat_log(void)
119 {
120 sctp_cwnd_log_at=0;
121 sctp_cwnd_log_rolled=0;
122 }
123
124 void
125 sctp_log_strm_del_alt(u_int32_t tsn, u_int16_t sseq, int from)
126 {
127
128 sctp_clog[sctp_cwnd_log_at].from = (u_int8_t)from;
129 sctp_clog[sctp_cwnd_log_at].event_type = (u_int8_t)SCTP_LOG_EVENT_STRM;
130 sctp_clog[sctp_cwnd_log_at].x.strlog.n_tsn = tsn;
131 sctp_clog[sctp_cwnd_log_at].x.strlog.n_sseq = sseq;
132 sctp_clog[sctp_cwnd_log_at].x.strlog.e_tsn = 0;
133 sctp_clog[sctp_cwnd_log_at].x.strlog.e_sseq = 0;
134 sctp_cwnd_log_at++;
135 if (sctp_cwnd_log_at >= SCTP_STAT_LOG_SIZE) {
136 sctp_cwnd_log_at = 0;
137 sctp_cwnd_log_rolled = 1;
138 }
139
140 }
141
142 void
143 sctp_log_map(uint32_t map, uint32_t cum, uint32_t high, int from)
144 {
145
146 sctp_clog[sctp_cwnd_log_at].from = (u_int8_t)from;
147 sctp_clog[sctp_cwnd_log_at].event_type = (u_int8_t)SCTP_LOG_EVENT_MAP;
148 sctp_clog[sctp_cwnd_log_at].x.map.base = map;
149 sctp_clog[sctp_cwnd_log_at].x.map.cum = cum;
150 sctp_clog[sctp_cwnd_log_at].x.map.high = high;
151 sctp_cwnd_log_at++;
152 if (sctp_cwnd_log_at >= SCTP_STAT_LOG_SIZE) {
153 sctp_cwnd_log_at = 0;
154 sctp_cwnd_log_rolled = 1;
155 }
156 }
157
158 void
159 sctp_log_fr(uint32_t biggest_tsn, uint32_t biggest_new_tsn, uint32_t tsn,
160 int from)
161 {
162
163 sctp_clog[sctp_cwnd_log_at].from = (u_int8_t)from;
164 sctp_clog[sctp_cwnd_log_at].event_type = (u_int8_t)SCTP_LOG_EVENT_FR;
165 sctp_clog[sctp_cwnd_log_at].x.fr.largest_tsn = biggest_tsn;
166 sctp_clog[sctp_cwnd_log_at].x.fr.largest_new_tsn = biggest_new_tsn;
167 sctp_clog[sctp_cwnd_log_at].x.fr.tsn = tsn;
168 sctp_cwnd_log_at++;
169 if (sctp_cwnd_log_at >= SCTP_STAT_LOG_SIZE) {
170 sctp_cwnd_log_at = 0;
171 sctp_cwnd_log_rolled = 1;
172 }
173 }
174
175 void
176 sctp_log_strm_del(struct sctp_tmit_chunk *chk, struct sctp_tmit_chunk *poschk,
177 int from)
178 {
179
180 if (chk == NULL) {
181 printf("Gak log of NULL?\n");
182 return;
183 }
184 sctp_clog[sctp_cwnd_log_at].from = (u_int8_t)from;
185 sctp_clog[sctp_cwnd_log_at].event_type = (u_int8_t)SCTP_LOG_EVENT_STRM;
186 sctp_clog[sctp_cwnd_log_at].x.strlog.n_tsn = chk->rec.data.TSN_seq;
187 sctp_clog[sctp_cwnd_log_at].x.strlog.n_sseq = chk->rec.data.stream_seq;
188 if (poschk != NULL) {
189 sctp_clog[sctp_cwnd_log_at].x.strlog.e_tsn =
190 poschk->rec.data.TSN_seq;
191 sctp_clog[sctp_cwnd_log_at].x.strlog.e_sseq =
192 poschk->rec.data.stream_seq;
193 } else {
194 sctp_clog[sctp_cwnd_log_at].x.strlog.e_tsn = 0;
195 sctp_clog[sctp_cwnd_log_at].x.strlog.e_sseq = 0;
196 }
197 sctp_cwnd_log_at++;
198 if (sctp_cwnd_log_at >= SCTP_STAT_LOG_SIZE) {
199 sctp_cwnd_log_at = 0;
200 sctp_cwnd_log_rolled = 1;
201 }
202 }
203
204 void
205 sctp_log_cwnd(struct sctp_nets *net, int augment, uint8_t from)
206 {
207
208 sctp_clog[sctp_cwnd_log_at].from = (u_int8_t)from;
209 sctp_clog[sctp_cwnd_log_at].event_type = (u_int8_t)SCTP_LOG_EVENT_CWND;
210 sctp_clog[sctp_cwnd_log_at].x.cwnd.net = net;
211 sctp_clog[sctp_cwnd_log_at].x.cwnd.cwnd_new_value = net->cwnd;
212 sctp_clog[sctp_cwnd_log_at].x.cwnd.inflight = net->flight_size;
213 sctp_clog[sctp_cwnd_log_at].x.cwnd.cwnd_augment = augment;
214 sctp_cwnd_log_at++;
215 if (sctp_cwnd_log_at >= SCTP_STAT_LOG_SIZE) {
216 sctp_cwnd_log_at = 0;
217 sctp_cwnd_log_rolled = 1;
218 }
219 }
220
221 void
222 sctp_log_maxburst(struct sctp_nets *net, int error, int burst, uint8_t from)
223 {
224 sctp_clog[sctp_cwnd_log_at].from = (u_int8_t)from;
225 sctp_clog[sctp_cwnd_log_at].event_type = (u_int8_t)SCTP_LOG_EVENT_MAXBURST;
226 sctp_clog[sctp_cwnd_log_at].x.cwnd.net = net;
227 sctp_clog[sctp_cwnd_log_at].x.cwnd.cwnd_new_value = error;
228 sctp_clog[sctp_cwnd_log_at].x.cwnd.inflight = net->flight_size;
229 sctp_clog[sctp_cwnd_log_at].x.cwnd.cwnd_augment = burst;
230 sctp_cwnd_log_at++;
231 if (sctp_cwnd_log_at >= SCTP_STAT_LOG_SIZE) {
232 sctp_cwnd_log_at = 0;
233 sctp_cwnd_log_rolled = 1;
234 }
235 }
236
237 void
238 sctp_log_rwnd(uint8_t from, u_int32_t peers_rwnd , u_int32_t snd_size, u_int32_t overhead)
239 {
240 sctp_clog[sctp_cwnd_log_at].from = (u_int8_t)from;
241 sctp_clog[sctp_cwnd_log_at].event_type = (u_int8_t)SCTP_LOG_EVENT_RWND;
242 sctp_clog[sctp_cwnd_log_at].x.rwnd.rwnd = peers_rwnd;
243 sctp_clog[sctp_cwnd_log_at].x.rwnd.send_size = snd_size;
244 sctp_clog[sctp_cwnd_log_at].x.rwnd.overhead = overhead;
245 sctp_clog[sctp_cwnd_log_at].x.rwnd.new_rwnd = 0;
246 sctp_cwnd_log_at++;
247 if (sctp_cwnd_log_at >= SCTP_STAT_LOG_SIZE) {
248 sctp_cwnd_log_at = 0;
249 sctp_cwnd_log_rolled = 1;
250 }
251 }
252
253 void
254 sctp_log_rwnd_set(uint8_t from, u_int32_t peers_rwnd , u_int32_t flight_size, u_int32_t overhead, u_int32_t a_rwndval)
255 {
256 sctp_clog[sctp_cwnd_log_at].from = (u_int8_t)from;
257 sctp_clog[sctp_cwnd_log_at].event_type = (u_int8_t)SCTP_LOG_EVENT_RWND;
258 sctp_clog[sctp_cwnd_log_at].x.rwnd.rwnd = peers_rwnd;
259 sctp_clog[sctp_cwnd_log_at].x.rwnd.send_size = flight_size;
260 sctp_clog[sctp_cwnd_log_at].x.rwnd.overhead = overhead;
261 sctp_clog[sctp_cwnd_log_at].x.rwnd.new_rwnd = a_rwndval;
262 sctp_cwnd_log_at++;
263 if (sctp_cwnd_log_at >= SCTP_STAT_LOG_SIZE) {
264 sctp_cwnd_log_at = 0;
265 sctp_cwnd_log_rolled = 1;
266 }
267 }
268
269 void
270 sctp_log_mbcnt(uint8_t from, u_int32_t total_oq , u_int32_t book, u_int32_t total_mbcnt_q, u_int32_t mbcnt)
271 {
272 sctp_clog[sctp_cwnd_log_at].from = (u_int8_t)from;
273 sctp_clog[sctp_cwnd_log_at].event_type = (u_int8_t)SCTP_LOG_EVENT_MBCNT;
274 sctp_clog[sctp_cwnd_log_at].x.mbcnt.total_queue_size = total_oq;
275 sctp_clog[sctp_cwnd_log_at].x.mbcnt.size_change = book;
276 sctp_clog[sctp_cwnd_log_at].x.mbcnt.total_queue_mb_size = total_mbcnt_q;
277 sctp_clog[sctp_cwnd_log_at].x.mbcnt.mbcnt_change = mbcnt;
278 sctp_cwnd_log_at++;
279 if (sctp_cwnd_log_at >= SCTP_STAT_LOG_SIZE) {
280 sctp_cwnd_log_at = 0;
281 sctp_cwnd_log_rolled = 1;
282 }
283 }
284
285 void
286 sctp_log_block(uint8_t from, struct socket *so, struct sctp_association *asoc)
287 {
288
289 sctp_clog[sctp_cwnd_log_at].from = (u_int8_t)from;
290 sctp_clog[sctp_cwnd_log_at].event_type = (u_int8_t)SCTP_LOG_EVENT_BLOCK;
291 sctp_clog[sctp_cwnd_log_at].x.blk.maxmb = (u_int16_t)(so->so_snd.sb_mbmax/1024);
292 sctp_clog[sctp_cwnd_log_at].x.blk.onmb = asoc->total_output_mbuf_queue_size;
293 sctp_clog[sctp_cwnd_log_at].x.blk.maxsb = (u_int16_t)(so->so_snd.sb_hiwat/1024);
294 sctp_clog[sctp_cwnd_log_at].x.blk.onsb = asoc->total_output_queue_size;
295 sctp_clog[sctp_cwnd_log_at].x.blk.send_sent_qcnt = (u_int16_t)(asoc->send_queue_cnt + asoc->sent_queue_cnt);
296 sctp_clog[sctp_cwnd_log_at].x.blk.stream_qcnt = (u_int16_t)asoc->stream_queue_cnt;
297 sctp_cwnd_log_at++;
298 if (sctp_cwnd_log_at >= SCTP_STAT_LOG_SIZE) {
299 sctp_cwnd_log_at = 0;
300 sctp_cwnd_log_rolled = 1;
301 }
302 }
303
304 int
305 sctp_fill_stat_log(struct mbuf *m)
306 {
307 struct sctp_cwnd_log_req *req;
308 int size_limit, num, i, at, cnt_out=0;
309
310 if (m == NULL)
311 return (EINVAL);
312
313 size_limit = (m->m_len - sizeof(struct sctp_cwnd_log_req));
314 if (size_limit < sizeof(struct sctp_cwnd_log)) {
315 return (EINVAL);
316 }
317 req = mtod(m, struct sctp_cwnd_log_req *);
318 num = size_limit/sizeof(struct sctp_cwnd_log);
319 if (sctp_cwnd_log_rolled) {
320 req->num_in_log = SCTP_STAT_LOG_SIZE;
321 } else {
322 req->num_in_log = sctp_cwnd_log_at;
323 /* if the log has not rolled, we don't
324 * let you have old data.
325 */
326 if (req->end_at > sctp_cwnd_log_at) {
327 req->end_at = sctp_cwnd_log_at;
328 }
329 }
330 if ((num < SCTP_STAT_LOG_SIZE) &&
331 ((sctp_cwnd_log_rolled) || (sctp_cwnd_log_at > num))) {
332 /* we can't return all of it */
333 if (((req->start_at == 0) && (req->end_at == 0)) ||
334 (req->start_at >= SCTP_STAT_LOG_SIZE) ||
335 (req->end_at >= SCTP_STAT_LOG_SIZE)) {
336 /* No user request or user is wacked. */
337 req->num_ret = num;
338 req->end_at = sctp_cwnd_log_at - 1;
339 if ((sctp_cwnd_log_at - num) < 0) {
340 int cc;
341 cc = num - sctp_cwnd_log_at;
342 req->start_at = SCTP_STAT_LOG_SIZE - cc;
343 } else {
344 req->start_at = sctp_cwnd_log_at - num;
345 }
346 } else {
347 /* a user request */
348 int cc;
349 if (req->start_at > req->end_at) {
350 cc = (SCTP_STAT_LOG_SIZE - req->start_at) +
351 (req->end_at + 1);
352 } else {
353
354 cc = req->end_at - req->start_at;
355 }
356 if (cc < num) {
357 num = cc;
358 }
359 req->num_ret = num;
360 }
361 } else {
362 /* We can return all of it */
363 req->start_at = 0;
364 req->end_at = sctp_cwnd_log_at - 1;
365 req->num_ret = sctp_cwnd_log_at;
366 }
367 for (i = 0, at = req->start_at; i < req->num_ret; i++) {
368 req->log[i] = sctp_clog[at];
369 cnt_out++;
370 at++;
371 if (at >= SCTP_STAT_LOG_SIZE)
372 at = 0;
373 }
374 m->m_len = (cnt_out * sizeof(struct sctp_cwnd_log_req)) + sizeof(struct sctp_cwnd_log_req);
375 return (0);
376 }
377
378 #endif
379
380 #ifdef SCTP_AUDITING_ENABLED
381 u_int8_t sctp_audit_data[SCTP_AUDIT_SIZE][2];
382 static int sctp_audit_indx = 0;
383
384 static
385 void sctp_print_audit_report(void)
386 {
387 int i;
388 int cnt;
389 cnt = 0;
390 for (i=sctp_audit_indx;i<SCTP_AUDIT_SIZE;i++) {
391 if ((sctp_audit_data[i][0] == 0xe0) &&
392 (sctp_audit_data[i][1] == 0x01)) {
393 cnt = 0;
394 printf("\n");
395 } else if (sctp_audit_data[i][0] == 0xf0) {
396 cnt = 0;
397 printf("\n");
398 } else if ((sctp_audit_data[i][0] == 0xc0) &&
399 (sctp_audit_data[i][1] == 0x01)) {
400 printf("\n");
401 cnt = 0;
402 }
403 printf("%2.2x%2.2x ", (uint32_t)sctp_audit_data[i][0],
404 (uint32_t)sctp_audit_data[i][1]);
405 cnt++;
406 if ((cnt % 14) == 0)
407 printf("\n");
408 }
409 for (i=0;i<sctp_audit_indx;i++) {
410 if ((sctp_audit_data[i][0] == 0xe0) &&
411 (sctp_audit_data[i][1] == 0x01)) {
412 cnt = 0;
413 printf("\n");
414 } else if (sctp_audit_data[i][0] == 0xf0) {
415 cnt = 0;
416 printf("\n");
417 } else if ((sctp_audit_data[i][0] == 0xc0) &&
418 (sctp_audit_data[i][1] == 0x01)) {
419 printf("\n");
420 cnt = 0;
421 }
422 printf("%2.2x%2.2x ", (uint32_t)sctp_audit_data[i][0],
423 (uint32_t)sctp_audit_data[i][1]);
424 cnt++;
425 if ((cnt % 14) == 0)
426 printf("\n");
427 }
428 printf("\n");
429 }
430
431 void sctp_auditing(int from, struct sctp_inpcb *inp, struct sctp_tcb *stcb,
432 struct sctp_nets *net)
433 {
434 int resend_cnt, tot_out, rep, tot_book_cnt;
435 struct sctp_nets *lnet;
436 struct sctp_tmit_chunk *chk;
437
438 sctp_audit_data[sctp_audit_indx][0] = 0xAA;
439 sctp_audit_data[sctp_audit_indx][1] = 0x000000ff & from;
440 sctp_audit_indx++;
441 if (sctp_audit_indx >= SCTP_AUDIT_SIZE) {
442 sctp_audit_indx = 0;
443 }
444 if (inp == NULL) {
445 sctp_audit_data[sctp_audit_indx][0] = 0xAF;
446 sctp_audit_data[sctp_audit_indx][1] = 0x01;
447 sctp_audit_indx++;
448 if (sctp_audit_indx >= SCTP_AUDIT_SIZE) {
449 sctp_audit_indx = 0;
450 }
451 return;
452 }
453 if (stcb == NULL) {
454 sctp_audit_data[sctp_audit_indx][0] = 0xAF;
455 sctp_audit_data[sctp_audit_indx][1] = 0x02;
456 sctp_audit_indx++;
457 if (sctp_audit_indx >= SCTP_AUDIT_SIZE) {
458 sctp_audit_indx = 0;
459 }
460 return;
461 }
462 sctp_audit_data[sctp_audit_indx][0] = 0xA1;
463 sctp_audit_data[sctp_audit_indx][1] =
464 (0x000000ff & stcb->asoc.sent_queue_retran_cnt);
465 sctp_audit_indx++;
466 if (sctp_audit_indx >= SCTP_AUDIT_SIZE) {
467 sctp_audit_indx = 0;
468 }
469 rep = 0;
470 tot_book_cnt = 0;
471 resend_cnt = tot_out = 0;
472 TAILQ_FOREACH(chk, &stcb->asoc.sent_queue, sctp_next) {
473 if (chk->sent == SCTP_DATAGRAM_RESEND) {
474 resend_cnt++;
475 } else if (chk->sent < SCTP_DATAGRAM_RESEND) {
476 tot_out += chk->book_size;
477 tot_book_cnt++;
478 }
479 }
480 if (resend_cnt != stcb->asoc.sent_queue_retran_cnt) {
481 sctp_audit_data[sctp_audit_indx][0] = 0xAF;
482 sctp_audit_data[sctp_audit_indx][1] = 0xA1;
483 sctp_audit_indx++;
484 if (sctp_audit_indx >= SCTP_AUDIT_SIZE) {
485 sctp_audit_indx = 0;
486 }
487 printf("resend_cnt:%d asoc-tot:%d\n",
488 resend_cnt, stcb->asoc.sent_queue_retran_cnt);
489 rep = 1;
490 stcb->asoc.sent_queue_retran_cnt = resend_cnt;
491 sctp_audit_data[sctp_audit_indx][0] = 0xA2;
492 sctp_audit_data[sctp_audit_indx][1] =
493 (0x000000ff & stcb->asoc.sent_queue_retran_cnt);
494 sctp_audit_indx++;
495 if (sctp_audit_indx >= SCTP_AUDIT_SIZE) {
496 sctp_audit_indx = 0;
497 }
498 }
499 if (tot_out != stcb->asoc.total_flight) {
500 sctp_audit_data[sctp_audit_indx][0] = 0xAF;
501 sctp_audit_data[sctp_audit_indx][1] = 0xA2;
502 sctp_audit_indx++;
503 if (sctp_audit_indx >= SCTP_AUDIT_SIZE) {
504 sctp_audit_indx = 0;
505 }
506 rep = 1;
507 printf("tot_flt:%d asoc_tot:%d\n", tot_out,
508 (int)stcb->asoc.total_flight);
509 stcb->asoc.total_flight = tot_out;
510 }
511 if (tot_book_cnt != stcb->asoc.total_flight_count) {
512 sctp_audit_data[sctp_audit_indx][0] = 0xAF;
513 sctp_audit_data[sctp_audit_indx][1] = 0xA5;
514 sctp_audit_indx++;
515 if (sctp_audit_indx >= SCTP_AUDIT_SIZE) {
516 sctp_audit_indx = 0;
517 }
518 rep = 1;
519 printf("tot_flt_book:%d\n", tot_book);
520
521 stcb->asoc.total_flight_count = tot_book_cnt;
522 }
523 tot_out = 0;
524 TAILQ_FOREACH(lnet, &stcb->asoc.nets, sctp_next) {
525 tot_out += lnet->flight_size;
526 }
527 if (tot_out != stcb->asoc.total_flight) {
528 sctp_audit_data[sctp_audit_indx][0] = 0xAF;
529 sctp_audit_data[sctp_audit_indx][1] = 0xA3;
530 sctp_audit_indx++;
531 if (sctp_audit_indx >= SCTP_AUDIT_SIZE) {
532 sctp_audit_indx = 0;
533 }
534 rep = 1;
535 printf("real flight:%d net total was %d\n",
536 stcb->asoc.total_flight, tot_out);
537 /* now corrective action */
538 TAILQ_FOREACH(lnet, &stcb->asoc.nets, sctp_next) {
539 tot_out = 0;
540 TAILQ_FOREACH(chk, &stcb->asoc.sent_queue, sctp_next) {
541 if ((chk->whoTo == lnet) &&
542 (chk->sent < SCTP_DATAGRAM_RESEND)) {
543 tot_out += chk->book_size;
544 }
545 }
546 if (lnet->flight_size != tot_out) {
547 printf("net:%x flight was %d corrected to %d\n",
548 (uint32_t)lnet, lnet->flight_size, tot_out);
549 lnet->flight_size = tot_out;
550 }
551
552 }
553 }
554
555 if (rep) {
556 sctp_print_audit_report();
557 }
558 }
559
560 void
561 sctp_audit_log(u_int8_t ev, u_int8_t fd)
562 {
563 sctp_audit_data[sctp_audit_indx][0] = ev;
564 sctp_audit_data[sctp_audit_indx][1] = fd;
565 sctp_audit_indx++;
566 if (sctp_audit_indx >= SCTP_AUDIT_SIZE) {
567 sctp_audit_indx = 0;
568 }
569 }
570
571 #endif
572
573 /*
574 * a list of sizes based on typical mtu's, used only if next hop
575 * size not returned.
576 */
577 static int sctp_mtu_sizes[] = {
578 68,
579 296,
580 508,
581 512,
582 544,
583 576,
584 1006,
585 1492,
586 1500,
587 1536,
588 2002,
589 2048,
590 4352,
591 4464,
592 8166,
593 17914,
594 32000,
595 65535
596 };
597
598 int
599 find_next_best_mtu(int totsz)
600 {
601 int i, perfer;
602 /*
603 * if we are in here we must find the next best fit based on the
604 * size of the dg that failed to be sent.
605 */
606 perfer = 0;
607 for (i = 0; i < NUMBER_OF_MTU_SIZES; i++) {
608 if (totsz < sctp_mtu_sizes[i]) {
609 perfer = i - 1;
610 if (perfer < 0)
611 perfer = 0;
612 break;
613 }
614 }
615 return (sctp_mtu_sizes[perfer]);
616 }
617
618 uint32_t
619 sctp_select_initial_TSN(struct sctp_pcb *m)
620 {
621 return cprng_strong32();
622 }
623
624 u_int32_t sctp_select_a_tag(struct sctp_inpcb *m)
625 {
626 u_long x, not_done;
627 struct timeval now;
628
629 SCTP_GETTIME_TIMEVAL(&now);
630 not_done = 1;
631 while (not_done) {
632 x = sctp_select_initial_TSN(&m->sctp_ep);
633 if (x == 0) {
634 /* we never use 0 */
635 continue;
636 }
637 if (sctp_is_vtag_good(m, x, &now)) {
638 not_done = 0;
639 }
640 }
641 return (x);
642 }
643
644
645 int
646 sctp_init_asoc(struct sctp_inpcb *m, struct sctp_association *asoc,
647 int for_a_init, uint32_t override_tag )
648 {
649 /*
650 * Anything set to zero is taken care of by the allocation
651 * routine's bzero
652 */
653
654 /*
655 * Up front select what scoping to apply on addresses I tell my peer
656 * Not sure what to do with these right now, we will need to come up
657 * with a way to set them. We may need to pass them through from the
658 * caller in the sctp_aloc_assoc() function.
659 */
660 int i;
661 /* init all variables to a known value.*/
662 asoc->state = SCTP_STATE_INUSE;
663 asoc->max_burst = m->sctp_ep.max_burst;
664 asoc->heart_beat_delay = m->sctp_ep.sctp_timeoutticks[SCTP_TIMER_HEARTBEAT];
665 asoc->cookie_life = m->sctp_ep.def_cookie_life;
666
667 if (override_tag) {
668 asoc->my_vtag = override_tag;
669 } else {
670 asoc->my_vtag = sctp_select_a_tag(m);
671 }
672 asoc->asconf_seq_out = asoc->str_reset_seq_out = asoc->init_seq_number = asoc->sending_seq =
673 sctp_select_initial_TSN(&m->sctp_ep);
674 asoc->t3timeout_highest_marked = asoc->asconf_seq_out;
675 /* we are opptimisitic here */
676 asoc->peer_supports_asconf = 1;
677 asoc->peer_supports_asconf_setprim = 1;
678 asoc->peer_supports_pktdrop = 1;
679
680 asoc->sent_queue_retran_cnt = 0;
681 /* This will need to be adjusted */
682 asoc->last_cwr_tsn = asoc->init_seq_number - 1;
683 asoc->last_acked_seq = asoc->init_seq_number - 1;
684 asoc->advanced_peer_ack_point = asoc->last_acked_seq;
685 asoc->asconf_seq_in = asoc->last_acked_seq;
686
687 /* here we are different, we hold the next one we expect */
688 asoc->str_reset_seq_in = asoc->last_acked_seq + 1;
689
690 asoc->initial_init_rto_max = m->sctp_ep.initial_init_rto_max;
691 asoc->initial_rto = m->sctp_ep.initial_rto;
692
693 asoc->max_init_times = m->sctp_ep.max_init_times;
694 asoc->max_send_times = m->sctp_ep.max_send_times;
695 asoc->def_net_failure = m->sctp_ep.def_net_failure;
696
697 /* ECN Nonce initialization */
698 asoc->ecn_nonce_allowed = 0;
699 asoc->receiver_nonce_sum = 1;
700 asoc->nonce_sum_expect_base = 1;
701 asoc->nonce_sum_check = 1;
702 asoc->nonce_resync_tsn = 0;
703 asoc->nonce_wait_for_ecne = 0;
704 asoc->nonce_wait_tsn = 0;
705
706 if (m->sctp_flags & SCTP_PCB_FLAGS_BOUND_V6) {
707 struct in6pcb *inp6;
708
709
710 /* Its a V6 socket */
711 inp6 = (struct in6pcb *)m;
712 asoc->ipv6_addr_legal = 1;
713 /* Now look at the binding flag to see if V4 will be legal */
714 if (
715 #if defined(__OpenBSD__)
716 (0) /* we always do dual bind */
717 #elif defined (__NetBSD__)
718 (inp6->in6p_flags & IN6P_IPV6_V6ONLY)
719 #else
720 (inp6->inp_flags & IN6P_IPV6_V6ONLY)
721 #endif
722 == 0) {
723 asoc->ipv4_addr_legal = 1;
724 } else {
725 /* V4 addresses are NOT legal on the association */
726 asoc->ipv4_addr_legal = 0;
727 }
728 } else {
729 /* Its a V4 socket, no - V6 */
730 asoc->ipv4_addr_legal = 1;
731 asoc->ipv6_addr_legal = 0;
732 }
733
734
735 asoc->my_rwnd = uimax(m->sctp_socket->so_rcv.sb_hiwat, SCTP_MINIMAL_RWND);
736 asoc->peers_rwnd = m->sctp_socket->so_rcv.sb_hiwat;
737
738 asoc->smallest_mtu = m->sctp_frag_point;
739 asoc->minrto = m->sctp_ep.sctp_minrto;
740 asoc->maxrto = m->sctp_ep.sctp_maxrto;
741
742 LIST_INIT(&asoc->sctp_local_addr_list);
743 TAILQ_INIT(&asoc->nets);
744 TAILQ_INIT(&asoc->pending_reply_queue);
745 asoc->last_asconf_ack_sent = NULL;
746 /* Setup to fill the hb random cache at first HB */
747 asoc->hb_random_idx = 4;
748
749 asoc->sctp_autoclose_ticks = m->sctp_ep.auto_close_time;
750
751 /*
752 * Now the stream parameters, here we allocate space for all
753 * streams that we request by default.
754 */
755 asoc->streamoutcnt = asoc->pre_open_streams =
756 m->sctp_ep.pre_open_stream_count;
757 asoc->strmout = malloc(asoc->streamoutcnt *
758 sizeof(struct sctp_stream_out), M_PCB, M_NOWAIT);
759 if (asoc->strmout == NULL) {
760 /* big trouble no memory */
761 return (ENOMEM);
762 }
763 for (i = 0; i < asoc->streamoutcnt; i++) {
764 /*
765 * inbound side must be set to 0xffff,
766 * also NOTE when we get the INIT-ACK back (for INIT sender)
767 * we MUST reduce the count (streamoutcnt) but first check
768 * if we sent to any of the upper streams that were dropped
769 * (if some were). Those that were dropped must be notified
770 * to the upper layer as failed to send.
771 */
772 asoc->strmout[i].next_sequence_sent = 0x0;
773 TAILQ_INIT(&asoc->strmout[i].outqueue);
774 asoc->strmout[i].stream_no = i;
775 asoc->strmout[i].next_spoke.tqe_next = 0;
776 asoc->strmout[i].next_spoke.tqe_prev = 0;
777 }
778 /* Now the mapping array */
779 asoc->mapping_array_size = SCTP_INITIAL_MAPPING_ARRAY;
780 asoc->mapping_array = malloc(asoc->mapping_array_size,
781 M_PCB, M_NOWAIT);
782 if (asoc->mapping_array == NULL) {
783 free(asoc->strmout, M_PCB);
784 return (ENOMEM);
785 }
786 memset(asoc->mapping_array, 0, asoc->mapping_array_size);
787 /* Now the init of the other outqueues */
788 TAILQ_INIT(&asoc->out_wheel);
789 TAILQ_INIT(&asoc->control_send_queue);
790 TAILQ_INIT(&asoc->send_queue);
791 TAILQ_INIT(&asoc->sent_queue);
792 TAILQ_INIT(&asoc->reasmqueue);
793 TAILQ_INIT(&asoc->delivery_queue);
794 asoc->max_inbound_streams = m->sctp_ep.max_open_streams_intome;
795
796 TAILQ_INIT(&asoc->asconf_queue);
797 return (0);
798 }
799
800 int
801 sctp_expand_mapping_array(struct sctp_association *asoc)
802 {
803 /* mapping array needs to grow */
804 u_int8_t *new_array;
805 uint16_t new_size, old_size;
806
807 old_size = asoc->mapping_array_size;
808 new_size = old_size + SCTP_MAPPING_ARRAY_INCR;
809 new_array = malloc(new_size, M_PCB, M_NOWAIT);
810 if (new_array == NULL) {
811 /* can't get more, forget it */
812 printf("No memory for expansion of SCTP mapping array %d\n",
813 new_size);
814 return (-1);
815 }
816 memcpy(new_array, asoc->mapping_array, old_size);
817 memset(new_array + old_size, 0, SCTP_MAPPING_ARRAY_INCR);
818 free(asoc->mapping_array, M_PCB);
819 asoc->mapping_array = new_array;
820 asoc->mapping_array_size = new_size;
821 return (0);
822 }
823
824 static void
825 sctp_timeout_handler(void *t)
826 {
827 struct sctp_inpcb *inp;
828 struct sctp_tcb *stcb;
829 struct sctp_nets *net;
830 struct sctp_timer *tmr;
831 int did_output;
832
833 mutex_enter(softnet_lock);
834 tmr = (struct sctp_timer *)t;
835 inp = (struct sctp_inpcb *)tmr->ep;
836 stcb = (struct sctp_tcb *)tmr->tcb;
837 net = (struct sctp_nets *)tmr->net;
838 did_output = 1;
839
840 #ifdef SCTP_AUDITING_ENABLED
841 sctp_audit_log(0xF0, (u_int8_t)tmr->type);
842 sctp_auditing(3, inp, stcb, net);
843 #endif
844 sctp_pegs[SCTP_TIMERS_EXP]++;
845
846 if (inp == NULL) {
847 return;
848 }
849
850 SCTP_INP_WLOCK(inp);
851 if (inp->sctp_socket == 0) {
852 mutex_exit(softnet_lock);
853 SCTP_INP_WUNLOCK(inp);
854 return;
855 }
856 if (stcb) {
857 if (stcb->asoc.state == 0) {
858 mutex_exit(softnet_lock);
859 SCTP_INP_WUNLOCK(inp);
860 return;
861 }
862 }
863 #ifdef SCTP_DEBUG
864 if (sctp_debug_on & SCTP_DEBUG_TIMER1) {
865 printf("Timer type %d goes off\n", tmr->type);
866 }
867 #endif /* SCTP_DEBUG */
868 #ifndef __NetBSD__
869 if (!callout_active(&tmr->timer)) {
870 SCTP_INP_WUNLOCK(inp);
871 return;
872 }
873 #endif
874 if (stcb) {
875 SCTP_TCB_LOCK(stcb);
876 }
877 SCTP_INP_INCR_REF(inp);
878 SCTP_INP_WUNLOCK(inp);
879
880 switch (tmr->type) {
881 case SCTP_TIMER_TYPE_ITERATOR:
882 {
883 struct sctp_iterator *it;
884 it = (struct sctp_iterator *)inp;
885 sctp_iterator_timer(it);
886 }
887 break;
888 /* call the handler for the appropriate timer type */
889 case SCTP_TIMER_TYPE_SEND:
890 sctp_pegs[SCTP_TMIT_TIMER]++;
891 stcb->asoc.num_send_timers_up--;
892 if (stcb->asoc.num_send_timers_up < 0) {
893 stcb->asoc.num_send_timers_up = 0;
894 }
895 if (sctp_t3rxt_timer(inp, stcb, net)) {
896 /* no need to unlock on tcb its gone */
897
898 goto out_decr;
899 }
900 #ifdef SCTP_AUDITING_ENABLED
901 sctp_auditing(4, inp, stcb, net);
902 #endif
903 sctp_chunk_output(inp, stcb, 1);
904 if ((stcb->asoc.num_send_timers_up == 0) &&
905 (stcb->asoc.sent_queue_cnt > 0)
906 ) {
907 struct sctp_tmit_chunk *chk;
908 /*
909 * safeguard. If there on some on the sent queue
910 * somewhere but no timers running something is
911 * wrong... so we start a timer on the first chunk
912 * on the send queue on whatever net it is sent to.
913 */
914 sctp_pegs[SCTP_T3_SAFEGRD]++;
915 chk = TAILQ_FIRST(&stcb->asoc.sent_queue);
916 sctp_timer_start(SCTP_TIMER_TYPE_SEND, inp, stcb,
917 chk->whoTo);
918 }
919 break;
920 case SCTP_TIMER_TYPE_INIT:
921 if (sctp_t1init_timer(inp, stcb, net)) {
922 /* no need to unlock on tcb its gone */
923 goto out_decr;
924 }
925 /* We do output but not here */
926 did_output = 0;
927 break;
928 case SCTP_TIMER_TYPE_RECV:
929 sctp_pegs[SCTP_RECV_TIMER]++;
930 sctp_send_sack(stcb);
931 #ifdef SCTP_AUDITING_ENABLED
932 sctp_auditing(4, inp, stcb, net);
933 #endif
934 sctp_chunk_output(inp, stcb, 4);
935 break;
936 case SCTP_TIMER_TYPE_SHUTDOWN:
937 if (sctp_shutdown_timer(inp, stcb, net) ) {
938 /* no need to unlock on tcb its gone */
939 goto out_decr;
940 }
941 #ifdef SCTP_AUDITING_ENABLED
942 sctp_auditing(4, inp, stcb, net);
943 #endif
944 sctp_chunk_output(inp, stcb, 5);
945 break;
946 case SCTP_TIMER_TYPE_HEARTBEAT:
947 if (sctp_heartbeat_timer(inp, stcb, net)) {
948 /* no need to unlock on tcb its gone */
949 goto out_decr;
950 }
951 #ifdef SCTP_AUDITING_ENABLED
952 sctp_auditing(4, inp, stcb, net);
953 #endif
954 sctp_chunk_output(inp, stcb, 6);
955 break;
956 case SCTP_TIMER_TYPE_COOKIE:
957 if (sctp_cookie_timer(inp, stcb, net)) {
958 /* no need to unlock on tcb its gone */
959 goto out_decr;
960 }
961 #ifdef SCTP_AUDITING_ENABLED
962 sctp_auditing(4, inp, stcb, net);
963 #endif
964 sctp_chunk_output(inp, stcb, 1);
965 break;
966 case SCTP_TIMER_TYPE_NEWCOOKIE:
967 {
968 struct timeval tv;
969 int i, secret;
970 SCTP_GETTIME_TIMEVAL(&tv);
971 SCTP_INP_WLOCK(inp);
972 inp->sctp_ep.time_of_secret_change = tv.tv_sec;
973 inp->sctp_ep.last_secret_number =
974 inp->sctp_ep.current_secret_number;
975 inp->sctp_ep.current_secret_number++;
976 if (inp->sctp_ep.current_secret_number >=
977 SCTP_HOW_MANY_SECRETS) {
978 inp->sctp_ep.current_secret_number = 0;
979 }
980 secret = (int)inp->sctp_ep.current_secret_number;
981 for (i = 0; i < SCTP_NUMBER_OF_SECRETS; i++) {
982 inp->sctp_ep.secret_key[secret][i] =
983 sctp_select_initial_TSN(&inp->sctp_ep);
984 }
985 SCTP_INP_WUNLOCK(inp);
986 sctp_timer_start(SCTP_TIMER_TYPE_NEWCOOKIE, inp, stcb, net);
987 }
988 did_output = 0;
989 break;
990 case SCTP_TIMER_TYPE_PATHMTURAISE:
991 sctp_pathmtu_timer(inp, stcb, net);
992 did_output = 0;
993 break;
994 case SCTP_TIMER_TYPE_SHUTDOWNACK:
995 if (sctp_shutdownack_timer(inp, stcb, net)) {
996 /* no need to unlock on tcb its gone */
997 goto out_decr;
998 }
999 #ifdef SCTP_AUDITING_ENABLED
1000 sctp_auditing(4, inp, stcb, net);
1001 #endif
1002 sctp_chunk_output(inp, stcb, 7);
1003 break;
1004 case SCTP_TIMER_TYPE_SHUTDOWNGUARD:
1005 sctp_abort_an_association(inp, stcb,
1006 SCTP_SHUTDOWN_GUARD_EXPIRES, NULL);
1007 /* no need to unlock on tcb its gone */
1008 goto out_decr;
1009 break;
1010
1011 case SCTP_TIMER_TYPE_STRRESET:
1012 if (sctp_strreset_timer(inp, stcb, net)) {
1013 /* no need to unlock on tcb its gone */
1014 goto out_decr;
1015 }
1016 sctp_chunk_output(inp, stcb, 9);
1017 break;
1018
1019 case SCTP_TIMER_TYPE_ASCONF:
1020 if (sctp_asconf_timer(inp, stcb, net)) {
1021 /* no need to unlock on tcb its gone */
1022 goto out_decr;
1023 }
1024 #ifdef SCTP_AUDITING_ENABLED
1025 sctp_auditing(4, inp, stcb, net);
1026 #endif
1027 sctp_chunk_output(inp, stcb, 8);
1028 break;
1029
1030 case SCTP_TIMER_TYPE_AUTOCLOSE:
1031 sctp_autoclose_timer(inp, stcb, net);
1032 sctp_chunk_output(inp, stcb, 10);
1033 did_output = 0;
1034 break;
1035 case SCTP_TIMER_TYPE_INPKILL:
1036 /* special case, take away our
1037 * increment since WE are the killer
1038 */
1039 SCTP_INP_WLOCK(inp);
1040 SCTP_INP_DECR_REF(inp);
1041 SCTP_INP_WUNLOCK(inp);
1042 sctp_timer_stop(SCTP_TIMER_TYPE_INPKILL, inp, NULL, NULL);
1043 sctp_inpcb_free(inp, 1);
1044 goto out_no_decr;
1045 break;
1046 default:
1047 #ifdef SCTP_DEBUG
1048 if (sctp_debug_on & SCTP_DEBUG_TIMER1) {
1049 printf("sctp_timeout_handler:unknown timer %d\n",
1050 tmr->type);
1051 }
1052 #endif /* SCTP_DEBUG */
1053 break;
1054 };
1055 #ifdef SCTP_AUDITING_ENABLED
1056 sctp_audit_log(0xF1, (u_int8_t)tmr->type);
1057 sctp_auditing(5, inp, stcb, net);
1058 #endif
1059 if (did_output) {
1060 /*
1061 * Now we need to clean up the control chunk chain if an
1062 * ECNE is on it. It must be marked as UNSENT again so next
1063 * call will continue to send it until such time that we get
1064 * a CWR, to remove it. It is, however, less likely that we
1065 * will find a ecn echo on the chain though.
1066 */
1067 sctp_fix_ecn_echo(&stcb->asoc);
1068 }
1069 if (stcb) {
1070 SCTP_TCB_UNLOCK(stcb);
1071 }
1072 out_decr:
1073 SCTP_INP_WLOCK(inp);
1074 SCTP_INP_DECR_REF(inp);
1075 SCTP_INP_WUNLOCK(inp);
1076
1077 out_no_decr:
1078
1079 mutex_exit(softnet_lock);
1080 }
1081
1082 int
1083 sctp_timer_start(int t_type, struct sctp_inpcb *inp, struct sctp_tcb *stcb,
1084 struct sctp_nets *net)
1085 {
1086 int to_ticks;
1087 struct sctp_timer *tmr;
1088
1089 if (inp == NULL)
1090 return (EFAULT);
1091
1092 to_ticks = 0;
1093
1094 tmr = NULL;
1095 switch (t_type) {
1096 case SCTP_TIMER_TYPE_ITERATOR:
1097 {
1098 struct sctp_iterator *it;
1099 it = (struct sctp_iterator *)inp;
1100 tmr = &it->tmr;
1101 to_ticks = SCTP_ITERATOR_TICKS;
1102 }
1103 break;
1104 case SCTP_TIMER_TYPE_SEND:
1105 /* Here we use the RTO timer */
1106 {
1107 int rto_val;
1108 if ((stcb == NULL) || (net == NULL)) {
1109 return (EFAULT);
1110 }
1111 tmr = &net->rxt_timer;
1112 if (net->RTO == 0) {
1113 rto_val = stcb->asoc.initial_rto;
1114 } else {
1115 rto_val = net->RTO;
1116 }
1117 to_ticks = MSEC_TO_TICKS(rto_val);
1118 }
1119 break;
1120 case SCTP_TIMER_TYPE_INIT:
1121 /*
1122 * Here we use the INIT timer default
1123 * usually about 1 minute.
1124 */
1125 if ((stcb == NULL) || (net == NULL)) {
1126 return (EFAULT);
1127 }
1128 tmr = &net->rxt_timer;
1129 if (net->RTO == 0) {
1130 to_ticks = MSEC_TO_TICKS(stcb->asoc.initial_rto);
1131 } else {
1132 to_ticks = MSEC_TO_TICKS(net->RTO);
1133 }
1134 break;
1135 case SCTP_TIMER_TYPE_RECV:
1136 /*
1137 * Here we use the Delayed-Ack timer value from the inp
1138 * ususually about 200ms.
1139 */
1140 if (stcb == NULL) {
1141 return (EFAULT);
1142 }
1143 tmr = &stcb->asoc.dack_timer;
1144 to_ticks = inp->sctp_ep.sctp_timeoutticks[SCTP_TIMER_RECV];
1145 break;
1146 case SCTP_TIMER_TYPE_SHUTDOWN:
1147 /* Here we use the RTO of the destination. */
1148 if ((stcb == NULL) || (net == NULL)) {
1149 return (EFAULT);
1150 }
1151
1152 if (net->RTO == 0) {
1153 to_ticks = MSEC_TO_TICKS(stcb->asoc.initial_rto);
1154 } else {
1155 to_ticks = MSEC_TO_TICKS(net->RTO);
1156 }
1157 tmr = &net->rxt_timer;
1158 break;
1159 case SCTP_TIMER_TYPE_HEARTBEAT:
1160 /*
1161 * the net is used here so that we can add in the RTO.
1162 * Even though we use a different timer. We also add the
1163 * HB timer PLUS a random jitter.
1164 */
1165 if (stcb == NULL) {
1166 return (EFAULT);
1167 }
1168 {
1169 uint32_t rndval;
1170 uint8_t this_random;
1171 int cnt_of_unconf=0;
1172 struct sctp_nets *lnet;
1173
1174 TAILQ_FOREACH(lnet, &stcb->asoc.nets, sctp_next) {
1175 if (lnet->dest_state & SCTP_ADDR_UNCONFIRMED) {
1176 cnt_of_unconf++;
1177 }
1178 }
1179 #ifdef SCTP_DEBUG
1180 if (sctp_debug_on & SCTP_DEBUG_TIMER1) {
1181 printf("HB timer to start unconfirmed:%d hb_delay:%d\n",
1182 cnt_of_unconf, stcb->asoc.heart_beat_delay);
1183 }
1184 #endif
1185 if (stcb->asoc.hb_random_idx > 3) {
1186 rndval = sctp_select_initial_TSN(&inp->sctp_ep);
1187 memcpy(stcb->asoc.hb_random_values, &rndval,
1188 sizeof(stcb->asoc.hb_random_values));
1189 this_random = stcb->asoc.hb_random_values[0];
1190 stcb->asoc.hb_random_idx = 0;
1191 stcb->asoc.hb_ect_randombit = 0;
1192 } else {
1193 this_random = stcb->asoc.hb_random_values[stcb->asoc.hb_random_idx];
1194 stcb->asoc.hb_random_idx++;
1195 stcb->asoc.hb_ect_randombit = 0;
1196 }
1197 /*
1198 * this_random will be 0 - 256 ms
1199 * RTO is in ms.
1200 */
1201 if ((stcb->asoc.heart_beat_delay == 0) &&
1202 (cnt_of_unconf == 0)) {
1203 /* no HB on this inp after confirmations */
1204 return (0);
1205 }
1206 if (net) {
1207 int delay;
1208 delay = stcb->asoc.heart_beat_delay;
1209 TAILQ_FOREACH(lnet, &stcb->asoc.nets, sctp_next) {
1210 if ((lnet->dest_state & SCTP_ADDR_UNCONFIRMED) &&
1211 ((lnet->dest_state & SCTP_ADDR_OUT_OF_SCOPE) == 0) &&
1212 (lnet->dest_state & SCTP_ADDR_REACHABLE)) {
1213 delay = 0;
1214 }
1215 }
1216 if (net->RTO == 0) {
1217 /* Never been checked */
1218 to_ticks = this_random + stcb->asoc.initial_rto + delay;
1219 } else {
1220 /* set rto_val to the ms */
1221 to_ticks = delay + net->RTO + this_random;
1222 }
1223 } else {
1224 if (cnt_of_unconf) {
1225 to_ticks = this_random + stcb->asoc.initial_rto;
1226 } else {
1227 to_ticks = stcb->asoc.heart_beat_delay + this_random + stcb->asoc.initial_rto;
1228 }
1229 }
1230 /*
1231 * Now we must convert the to_ticks that are now in
1232 * ms to ticks.
1233 */
1234 to_ticks *= hz;
1235 to_ticks /= 1000;
1236 #ifdef SCTP_DEBUG
1237 if (sctp_debug_on & SCTP_DEBUG_TIMER1) {
1238 printf("Timer to expire in %d ticks\n", to_ticks);
1239 }
1240 #endif
1241 tmr = &stcb->asoc.hb_timer;
1242 }
1243 break;
1244 case SCTP_TIMER_TYPE_COOKIE:
1245 /*
1246 * Here we can use the RTO timer from the network since
1247 * one RTT was compelete. If a retran happened then we will
1248 * be using the RTO initial value.
1249 */
1250 if ((stcb == NULL) || (net == NULL)) {
1251 return (EFAULT);
1252 }
1253 if (net->RTO == 0) {
1254 to_ticks = MSEC_TO_TICKS(stcb->asoc.initial_rto);
1255 } else {
1256 to_ticks = MSEC_TO_TICKS(net->RTO);
1257 }
1258 tmr = &net->rxt_timer;
1259 break;
1260 case SCTP_TIMER_TYPE_NEWCOOKIE:
1261 /*
1262 * nothing needed but the endpoint here
1263 * ususually about 60 minutes.
1264 */
1265 tmr = &inp->sctp_ep.signature_change;
1266 to_ticks = inp->sctp_ep.sctp_timeoutticks[SCTP_TIMER_SIGNATURE];
1267 break;
1268 case SCTP_TIMER_TYPE_INPKILL:
1269 /*
1270 * The inp is setup to die. We re-use the
1271 * signature_chage timer since that has
1272 * stopped and we are in the GONE state.
1273 */
1274 tmr = &inp->sctp_ep.signature_change;
1275 to_ticks = (SCTP_INP_KILL_TIMEOUT * hz) / 1000;
1276 break;
1277 case SCTP_TIMER_TYPE_PATHMTURAISE:
1278 /*
1279 * Here we use the value found in the EP for PMTU
1280 * ususually about 10 minutes.
1281 */
1282 if (stcb == NULL) {
1283 return (EFAULT);
1284 }
1285 if (net == NULL) {
1286 return (EFAULT);
1287 }
1288 to_ticks = inp->sctp_ep.sctp_timeoutticks[SCTP_TIMER_PMTU];
1289 tmr = &net->pmtu_timer;
1290 break;
1291 case SCTP_TIMER_TYPE_SHUTDOWNACK:
1292 /* Here we use the RTO of the destination */
1293 if ((stcb == NULL) || (net == NULL)) {
1294 return (EFAULT);
1295 }
1296 if (net->RTO == 0) {
1297 to_ticks = MSEC_TO_TICKS(stcb->asoc.initial_rto);
1298 } else {
1299 to_ticks = MSEC_TO_TICKS(net->RTO);
1300 }
1301 tmr = &net->rxt_timer;
1302 break;
1303 case SCTP_TIMER_TYPE_SHUTDOWNGUARD:
1304 /*
1305 * Here we use the endpoints shutdown guard timer
1306 * usually about 3 minutes.
1307 */
1308 if (stcb == NULL) {
1309 return (EFAULT);
1310 }
1311 to_ticks = inp->sctp_ep.sctp_timeoutticks[SCTP_TIMER_MAXSHUTDOWN];
1312 tmr = &stcb->asoc.shut_guard_timer;
1313 break;
1314 case SCTP_TIMER_TYPE_STRRESET:
1315 /*
1316 * Here the timer comes from the inp
1317 * but its value is from the RTO.
1318 */
1319 if ((stcb == NULL) || (net == NULL)) {
1320 return (EFAULT);
1321 }
1322 if (net->RTO == 0) {
1323 to_ticks = MSEC_TO_TICKS(stcb->asoc.initial_rto);
1324 } else {
1325 to_ticks = MSEC_TO_TICKS(net->RTO);
1326 }
1327 tmr = &stcb->asoc.strreset_timer;
1328 break;
1329
1330 case SCTP_TIMER_TYPE_ASCONF:
1331 /*
1332 * Here the timer comes from the inp
1333 * but its value is from the RTO.
1334 */
1335 if ((stcb == NULL) || (net == NULL)) {
1336 return (EFAULT);
1337 }
1338 if (net->RTO == 0) {
1339 to_ticks = MSEC_TO_TICKS(stcb->asoc.initial_rto);
1340 } else {
1341 to_ticks = MSEC_TO_TICKS(net->RTO);
1342 }
1343 tmr = &stcb->asoc.asconf_timer;
1344 break;
1345 case SCTP_TIMER_TYPE_AUTOCLOSE:
1346 if (stcb == NULL) {
1347 return (EFAULT);
1348 }
1349 if (stcb->asoc.sctp_autoclose_ticks == 0) {
1350 /* Really an error since stcb is NOT set to autoclose */
1351 return (0);
1352 }
1353 to_ticks = stcb->asoc.sctp_autoclose_ticks;
1354 tmr = &stcb->asoc.autoclose_timer;
1355 break;
1356 default:
1357 #ifdef SCTP_DEBUG
1358 if (sctp_debug_on & SCTP_DEBUG_TIMER1) {
1359 printf("sctp_timer_start:Unknown timer type %d\n",
1360 t_type);
1361 }
1362 #endif /* SCTP_DEBUG */
1363 return (EFAULT);
1364 break;
1365 };
1366 if ((to_ticks <= 0) || (tmr == NULL)) {
1367 #ifdef SCTP_DEBUG
1368 if (sctp_debug_on & SCTP_DEBUG_TIMER1) {
1369 printf("sctp_timer_start:%d:software error to_ticks:%d tmr:%p not set ??\n",
1370 t_type, to_ticks, tmr);
1371 }
1372 #endif /* SCTP_DEBUG */
1373 return (EFAULT);
1374 }
1375 if (callout_pending(&tmr->timer)) {
1376 /*
1377 * we do NOT allow you to have it already running.
1378 * if it is we leave the current one up unchanged
1379 */
1380 return (EALREADY);
1381 }
1382 /* At this point we can proceed */
1383 if (t_type == SCTP_TIMER_TYPE_SEND) {
1384 stcb->asoc.num_send_timers_up++;
1385 }
1386 tmr->type = t_type;
1387 tmr->ep = (void *)inp;
1388 tmr->tcb = (void *)stcb;
1389 tmr->net = (void *)net;
1390 callout_reset(&tmr->timer, to_ticks, sctp_timeout_handler, tmr);
1391 return (0);
1392 }
1393
1394 int
1395 sctp_timer_stop(int t_type, struct sctp_inpcb *inp, struct sctp_tcb *stcb,
1396 struct sctp_nets *net)
1397 {
1398 struct sctp_timer *tmr;
1399
1400 if (inp == NULL)
1401 return (EFAULT);
1402
1403 tmr = NULL;
1404 switch (t_type) {
1405 case SCTP_TIMER_TYPE_ITERATOR:
1406 {
1407 struct sctp_iterator *it;
1408 it = (struct sctp_iterator *)inp;
1409 tmr = &it->tmr;
1410 }
1411 break;
1412 case SCTP_TIMER_TYPE_SEND:
1413 if ((stcb == NULL) || (net == NULL)) {
1414 return (EFAULT);
1415 }
1416 tmr = &net->rxt_timer;
1417 break;
1418 case SCTP_TIMER_TYPE_INIT:
1419 if ((stcb == NULL) || (net == NULL)) {
1420 return (EFAULT);
1421 }
1422 tmr = &net->rxt_timer;
1423 break;
1424 case SCTP_TIMER_TYPE_RECV:
1425 if (stcb == NULL) {
1426 return (EFAULT);
1427 }
1428 tmr = &stcb->asoc.dack_timer;
1429 break;
1430 case SCTP_TIMER_TYPE_SHUTDOWN:
1431 if ((stcb == NULL) || (net == NULL)) {
1432 return (EFAULT);
1433 }
1434 tmr = &net->rxt_timer;
1435 break;
1436 case SCTP_TIMER_TYPE_HEARTBEAT:
1437 if (stcb == NULL) {
1438 return (EFAULT);
1439 }
1440 tmr = &stcb->asoc.hb_timer;
1441 break;
1442 case SCTP_TIMER_TYPE_COOKIE:
1443 if ((stcb == NULL) || (net == NULL)) {
1444 return (EFAULT);
1445 }
1446 tmr = &net->rxt_timer;
1447 break;
1448 case SCTP_TIMER_TYPE_NEWCOOKIE:
1449 /* nothing needed but the endpoint here */
1450 tmr = &inp->sctp_ep.signature_change;
1451 /* We re-use the newcookie timer for
1452 * the INP kill timer. We must assure
1453 * that we do not kill it by accident.
1454 */
1455 break;
1456 case SCTP_TIMER_TYPE_INPKILL:
1457 /*
1458 * The inp is setup to die. We re-use the
1459 * signature_chage timer since that has
1460 * stopped and we are in the GONE state.
1461 */
1462 tmr = &inp->sctp_ep.signature_change;
1463 break;
1464 case SCTP_TIMER_TYPE_PATHMTURAISE:
1465 if (stcb == NULL) {
1466 return (EFAULT);
1467 }
1468 if (net == NULL) {
1469 return (EFAULT);
1470 }
1471 tmr = &net->pmtu_timer;
1472 break;
1473 case SCTP_TIMER_TYPE_SHUTDOWNACK:
1474 if ((stcb == NULL) || (net == NULL)) {
1475 return (EFAULT);
1476 }
1477 tmr = &net->rxt_timer;
1478 break;
1479 case SCTP_TIMER_TYPE_SHUTDOWNGUARD:
1480 if (stcb == NULL) {
1481 return (EFAULT);
1482 }
1483 tmr = &stcb->asoc.shut_guard_timer;
1484 break;
1485 case SCTP_TIMER_TYPE_STRRESET:
1486 if (stcb == NULL) {
1487 return (EFAULT);
1488 }
1489 tmr = &stcb->asoc.strreset_timer;
1490 break;
1491 case SCTP_TIMER_TYPE_ASCONF:
1492 if (stcb == NULL) {
1493 return (EFAULT);
1494 }
1495 tmr = &stcb->asoc.asconf_timer;
1496 break;
1497 case SCTP_TIMER_TYPE_AUTOCLOSE:
1498 if (stcb == NULL) {
1499 return (EFAULT);
1500 }
1501 tmr = &stcb->asoc.autoclose_timer;
1502 break;
1503 default:
1504 #ifdef SCTP_DEBUG
1505 if (sctp_debug_on & SCTP_DEBUG_TIMER1) {
1506 printf("sctp_timer_stop:Unknown timer type %d\n",
1507 t_type);
1508 }
1509 #endif /* SCTP_DEBUG */
1510 break;
1511 };
1512 if (tmr == NULL)
1513 return (EFAULT);
1514
1515 if ((tmr->type != t_type) && tmr->type) {
1516 /*
1517 * Ok we have a timer that is under joint use. Cookie timer
1518 * per chance with the SEND timer. We therefore are NOT
1519 * running the timer that the caller wants stopped. So just
1520 * return.
1521 */
1522 return (0);
1523 }
1524 if (t_type == SCTP_TIMER_TYPE_SEND) {
1525 stcb->asoc.num_send_timers_up--;
1526 if (stcb->asoc.num_send_timers_up < 0) {
1527 stcb->asoc.num_send_timers_up = 0;
1528 }
1529 }
1530 callout_stop(&tmr->timer);
1531 return (0);
1532 }
1533
1534 #ifdef SCTP_USE_ADLER32
1535 static uint32_t
1536 update_adler32(uint32_t adler, uint8_t *buf, int32_t len)
1537 {
1538 u_int32_t s1 = adler & 0xffff;
1539 u_int32_t s2 = (adler >> 16) & 0xffff;
1540 int n;
1541
1542 for (n = 0; n < len; n++, buf++) {
1543 /* s1 = (s1 + buf[n]) % BASE */
1544 /* first we add */
1545 s1 = (s1 + *buf);
1546 /*
1547 * now if we need to, we do a mod by subtracting. It seems
1548 * a bit faster since I really will only ever do one subtract
1549 * at the MOST, since buf[n] is a max of 255.
1550 */
1551 if (s1 >= SCTP_ADLER32_BASE) {
1552 s1 -= SCTP_ADLER32_BASE;
1553 }
1554 /* s2 = (s2 + s1) % BASE */
1555 /* first we add */
1556 s2 = (s2 + s1);
1557 /*
1558 * again, it is more efficent (it seems) to subtract since
1559 * the most s2 will ever be is (BASE-1 + BASE-1) in the worse
1560 * case. This would then be (2 * BASE) - 2, which will still
1561 * only do one subtract. On Intel this is much better to do
1562 * this way and avoid the divide. Have not -pg'd on sparc.
1563 */
1564 if (s2 >= SCTP_ADLER32_BASE) {
1565 s2 -= SCTP_ADLER32_BASE;
1566 }
1567 }
1568 /* Return the adler32 of the bytes buf[0..len-1] */
1569 return ((s2 << 16) + s1);
1570 }
1571
1572 #endif
1573
1574
1575 u_int32_t
1576 sctp_calculate_len(struct mbuf *m)
1577 {
1578 u_int32_t tlen=0;
1579 struct mbuf *at;
1580 at = m;
1581 while (at) {
1582 tlen += at->m_len;
1583 at = at->m_next;
1584 }
1585 return (tlen);
1586 }
1587
1588 #if defined(SCTP_WITH_NO_CSUM)
1589
1590 uint32_t
1591 sctp_calculate_sum(struct mbuf *m, int32_t *pktlen, uint32_t offset)
1592 {
1593 /*
1594 * given a mbuf chain with a packetheader offset by 'offset'
1595 * pointing at a sctphdr (with csum set to 0) go through
1596 * the chain of m_next's and calculate the SCTP checksum.
1597 * This is currently Adler32 but will change to CRC32x
1598 * soon. Also has a side bonus calculate the total length
1599 * of the mbuf chain.
1600 * Note: if offset is greater than the total mbuf length,
1601 * checksum=1, pktlen=0 is returned (ie. no real error code)
1602 */
1603 if (pktlen == NULL)
1604 return (0);
1605 *pktlen = sctp_calculate_len(m);
1606 return (0);
1607 }
1608
1609 #elif defined(SCTP_USE_INCHKSUM)
1610
1611 #include <machine/in_cksum.h>
1612
1613 uint32_t
1614 sctp_calculate_sum(struct mbuf *m, int32_t *pktlen, uint32_t offset)
1615 {
1616 /*
1617 * given a mbuf chain with a packetheader offset by 'offset'
1618 * pointing at a sctphdr (with csum set to 0) go through
1619 * the chain of m_next's and calculate the SCTP checksum.
1620 * This is currently Adler32 but will change to CRC32x
1621 * soon. Also has a side bonus calculate the total length
1622 * of the mbuf chain.
1623 * Note: if offset is greater than the total mbuf length,
1624 * checksum=1, pktlen=0 is returned (ie. no real error code)
1625 */
1626 int32_t tlen=0;
1627 struct mbuf *at;
1628 uint32_t the_sum, retsum;
1629
1630 at = m;
1631 while (at) {
1632 tlen += at->m_len;
1633 at = at->m_next;
1634 }
1635 the_sum = (uint32_t)(in_cksum_skip(m, tlen, offset));
1636 if (pktlen != NULL)
1637 *pktlen = (tlen-offset);
1638 retsum = htons(the_sum);
1639 return (the_sum);
1640 }
1641
1642 #else
1643
1644 uint32_t
1645 sctp_calculate_sum(struct mbuf *m, int32_t *pktlen, uint32_t offset)
1646 {
1647 /*
1648 * given a mbuf chain with a packetheader offset by 'offset'
1649 * pointing at a sctphdr (with csum set to 0) go through
1650 * the chain of m_next's and calculate the SCTP checksum.
1651 * This is currently Adler32 but will change to CRC32x
1652 * soon. Also has a side bonus calculate the total length
1653 * of the mbuf chain.
1654 * Note: if offset is greater than the total mbuf length,
1655 * checksum=1, pktlen=0 is returned (ie. no real error code)
1656 */
1657 int32_t tlen=0;
1658 #ifdef SCTP_USE_ADLER32
1659 uint32_t base = 1L;
1660 #else
1661 uint32_t base = 0xffffffff;
1662 #endif /* SCTP_USE_ADLER32 */
1663 struct mbuf *at;
1664 at = m;
1665 /* find the correct mbuf and offset into mbuf */
1666 while ((at != NULL) && (offset > (uint32_t)at->m_len)) {
1667 offset -= at->m_len; /* update remaining offset left */
1668 at = at->m_next;
1669 }
1670
1671 while (at != NULL) {
1672 #ifdef SCTP_USE_ADLER32
1673 base = update_adler32(base, at->m_data + offset,
1674 at->m_len - offset);
1675 #else
1676 base = update_crc32(base, at->m_data + offset,
1677 at->m_len - offset);
1678 #endif /* SCTP_USE_ADLER32 */
1679 tlen += at->m_len - offset;
1680 /* we only offset once into the first mbuf */
1681 if (offset) {
1682 offset = 0;
1683 }
1684 at = at->m_next;
1685 }
1686 if (pktlen != NULL) {
1687 *pktlen = tlen;
1688 }
1689 #ifdef SCTP_USE_ADLER32
1690 /* Adler32 */
1691 base = htonl(base);
1692 #else
1693 /* CRC-32c */
1694 base = sctp_csum_finalize(base);
1695 #endif
1696 return (base);
1697 }
1698
1699
1700 #endif
1701
1702 void
1703 sctp_mtu_size_reset(struct sctp_inpcb *inp,
1704 struct sctp_association *asoc, u_long mtu)
1705 {
1706 /*
1707 * Reset the P-MTU size on this association, this involves changing
1708 * the asoc MTU, going through ANY chunk+overhead larger than mtu
1709 * to allow the DF flag to be cleared.
1710 */
1711 struct sctp_tmit_chunk *chk;
1712 struct sctp_stream_out *strm;
1713 unsigned int eff_mtu, ovh;
1714 asoc->smallest_mtu = mtu;
1715 if (inp->sctp_flags & SCTP_PCB_FLAGS_BOUND_V6) {
1716 ovh = SCTP_MIN_OVERHEAD;
1717 } else {
1718 ovh = SCTP_MIN_V4_OVERHEAD;
1719 }
1720 eff_mtu = mtu - ovh;
1721 /* Now mark any chunks that need to let IP fragment */
1722 TAILQ_FOREACH(strm, &asoc->out_wheel, next_spoke) {
1723 TAILQ_FOREACH(chk, &strm->outqueue, sctp_next) {
1724 if (chk->send_size > eff_mtu) {
1725 chk->flags &= SCTP_DONT_FRAGMENT;
1726 chk->flags |= CHUNK_FLAGS_FRAGMENT_OK;
1727 }
1728 }
1729 }
1730 TAILQ_FOREACH(chk, &asoc->send_queue, sctp_next) {
1731 if (chk->send_size > eff_mtu) {
1732 chk->flags &= SCTP_DONT_FRAGMENT;
1733 chk->flags |= CHUNK_FLAGS_FRAGMENT_OK;
1734 }
1735 }
1736 TAILQ_FOREACH(chk, &asoc->sent_queue, sctp_next) {
1737 if (chk->send_size > eff_mtu) {
1738 chk->flags &= SCTP_DONT_FRAGMENT;
1739 chk->flags |= CHUNK_FLAGS_FRAGMENT_OK;
1740 }
1741 }
1742 }
1743
1744
1745 /*
1746 * given an association and starting time of the current RTT period
1747 * return RTO in number of usecs
1748 * net should point to the current network
1749 */
1750 u_int32_t
1751 sctp_calculate_rto(struct sctp_tcb *stcb,
1752 struct sctp_association *asoc,
1753 struct sctp_nets *net,
1754 struct timeval *old)
1755 {
1756 /*
1757 * given an association and the starting time of the current RTT
1758 * period (in value1/value2) return RTO in number of usecs.
1759 */
1760 int calc_time = 0;
1761 unsigned int new_rto = 0;
1762 int first_measure = 0;
1763 struct timeval now;
1764
1765 /************************/
1766 /* 1. calculate new RTT */
1767 /************************/
1768 /* get the current time */
1769 SCTP_GETTIME_TIMEVAL(&now);
1770 /* compute the RTT value */
1771 if ((u_long)now.tv_sec > (u_long)old->tv_sec) {
1772 calc_time = ((u_long)now.tv_sec - (u_long)old->tv_sec) * 1000;
1773 if ((u_long)now.tv_usec > (u_long)old->tv_usec) {
1774 calc_time += (((u_long)now.tv_usec -
1775 (u_long)old->tv_usec)/1000);
1776 } else if ((u_long)now.tv_usec < (u_long)old->tv_usec) {
1777 /* Borrow 1,000ms from current calculation */
1778 calc_time -= 1000;
1779 /* Add in the slop over */
1780 calc_time += ((int)now.tv_usec/1000);
1781 /* Add in the pre-second ms's */
1782 calc_time += (((int)1000000 - (int)old->tv_usec)/1000);
1783 }
1784 } else if ((u_long)now.tv_sec == (u_long)old->tv_sec) {
1785 if ((u_long)now.tv_usec > (u_long)old->tv_usec) {
1786 calc_time = ((u_long)now.tv_usec -
1787 (u_long)old->tv_usec)/1000;
1788 } else if ((u_long)now.tv_usec < (u_long)old->tv_usec) {
1789 /* impossible .. garbage in nothing out */
1790 return (((net->lastsa >> 2) + net->lastsv) >> 1);
1791 } else {
1792 /* impossible .. garbage in nothing out */
1793 return (((net->lastsa >> 2) + net->lastsv) >> 1);
1794 }
1795 } else {
1796 /* Clock wrapped? */
1797 return (((net->lastsa >> 2) + net->lastsv) >> 1);
1798 }
1799 /***************************/
1800 /* 2. update RTTVAR & SRTT */
1801 /***************************/
1802 #if 0
1803 /* if (net->lastsv || net->lastsa) {*/
1804 /* per Section 5.3.1 C3 in SCTP */
1805 /* net->lastsv = (int) *//* RTTVAR */
1806 /* (((double)(1.0 - 0.25) * (double)net->lastsv) +
1807 (double)(0.25 * (double)abs(net->lastsa - calc_time)));
1808 net->lastsa = (int) */ /* SRTT */
1809 /*(((double)(1.0 - 0.125) * (double)net->lastsa) +
1810 (double)(0.125 * (double)calc_time));
1811 } else {
1812 *//* the first RTT calculation, per C2 Section 5.3.1 */
1813 /* net->lastsa = calc_time; *//* SRTT */
1814 /* net->lastsv = calc_time / 2; *//* RTTVAR */
1815 /* }*/
1816 /* if RTTVAR goes to 0 you set to clock grainularity */
1817 /* if (net->lastsv == 0) {
1818 net->lastsv = SCTP_CLOCK_GRANULARITY;
1819 }
1820 new_rto = net->lastsa + 4 * net->lastsv;
1821 */
1822 #endif
1823 /* this is Van Jacobson's integer version */
1824 if (net->RTO) {
1825 calc_time -= (net->lastsa >> 3);
1826 net->lastsa += calc_time;
1827 if (calc_time < 0) {
1828 calc_time = -calc_time;
1829 }
1830 calc_time -= (net->lastsv >> 2);
1831 net->lastsv += calc_time;
1832 if (net->lastsv == 0) {
1833 net->lastsv = SCTP_CLOCK_GRANULARITY;
1834 }
1835 } else {
1836 /* First RTO measurment */
1837 net->lastsa = calc_time;
1838 net->lastsv = calc_time >> 1;
1839 first_measure = 1;
1840 }
1841 new_rto = ((net->lastsa >> 2) + net->lastsv) >> 1;
1842 if ((new_rto > SCTP_SAT_NETWORK_MIN) &&
1843 (stcb->asoc.sat_network_lockout == 0)) {
1844 stcb->asoc.sat_network = 1;
1845 } else if ((!first_measure) && stcb->asoc.sat_network) {
1846 stcb->asoc.sat_network = 0;
1847 stcb->asoc.sat_network_lockout = 1;
1848 }
1849 /* bound it, per C6/C7 in Section 5.3.1 */
1850 if (new_rto < stcb->asoc.minrto) {
1851 new_rto = stcb->asoc.minrto;
1852 }
1853 if (new_rto > stcb->asoc.maxrto) {
1854 new_rto = stcb->asoc.maxrto;
1855 }
1856 /* we are now returning the RTT Smoothed */
1857 return ((u_int32_t)new_rto);
1858 }
1859
1860
1861 /*
1862 * return a pointer to a contiguous piece of data from the given
1863 * mbuf chain starting at 'off' for 'len' bytes. If the desired
1864 * piece spans more than one mbuf, a copy is made at 'ptr'.
1865 * caller must ensure that the buffer size is >= 'len'
1866 * returns NULL if there there isn't 'len' bytes in the chain.
1867 */
1868 void *
1869 sctp_m_getptr(struct mbuf *m, int off, int len, u_int8_t *in_ptr)
1870 {
1871 uint32_t count;
1872 uint8_t *ptr;
1873 ptr = in_ptr;
1874 if ((off < 0) || (len <= 0))
1875 return (NULL);
1876
1877 /* find the desired start location */
1878 while ((m != NULL) && (off > 0)) {
1879 if (off < m->m_len)
1880 break;
1881 off -= m->m_len;
1882 m = m->m_next;
1883 }
1884 if (m == NULL)
1885 return (NULL);
1886
1887 /* is the current mbuf large enough (eg. contiguous)? */
1888 if ((m->m_len - off) >= len) {
1889 return ((void *)(mtod(m, vaddr_t) + off));
1890 } else {
1891 /* else, it spans more than one mbuf, so save a temp copy... */
1892 while ((m != NULL) && (len > 0)) {
1893 count = uimin(m->m_len - off, len);
1894 memcpy(ptr, (void *)(mtod(m, vaddr_t) + off), count);
1895 len -= count;
1896 ptr += count;
1897 off = 0;
1898 m = m->m_next;
1899 }
1900 if ((m == NULL) && (len > 0))
1901 return (NULL);
1902 else
1903 return ((void *)in_ptr);
1904 }
1905 }
1906
1907
1908 struct sctp_paramhdr *
1909 sctp_get_next_param(struct mbuf *m,
1910 int offset,
1911 struct sctp_paramhdr *pull,
1912 int pull_limit)
1913 {
1914 /* This just provides a typed signature to Peter's Pull routine */
1915 return ((struct sctp_paramhdr *)sctp_m_getptr(m, offset, pull_limit,
1916 (u_int8_t *)pull));
1917 }
1918
1919
1920 int
1921 sctp_add_pad_tombuf(struct mbuf *m, int padlen)
1922 {
1923 /*
1924 * add padlen bytes of 0 filled padding to the end of the mbuf.
1925 * If padlen is > 3 this routine will fail.
1926 */
1927 u_int8_t *dp;
1928 int i;
1929 if (padlen > 3) {
1930 return (ENOBUFS);
1931 }
1932 if (M_TRAILINGSPACE(m)) {
1933 /*
1934 * The easy way.
1935 * We hope the majority of the time we hit here :)
1936 */
1937 dp = (u_int8_t *)(mtod(m, vaddr_t) + m->m_len);
1938 m->m_len += padlen;
1939 } else {
1940 /* Hard way we must grow the mbuf */
1941 struct mbuf *tmp;
1942 MGET(tmp, M_DONTWAIT, MT_DATA);
1943 if (tmp == NULL) {
1944 /* Out of space GAK! we are in big trouble. */
1945 return (ENOSPC);
1946 }
1947 /* setup and insert in middle */
1948 tmp->m_next = m->m_next;
1949 tmp->m_len = padlen;
1950 m->m_next = tmp;
1951 dp = mtod(tmp, u_int8_t *);
1952 }
1953 /* zero out the pad */
1954 for (i= 0; i < padlen; i++) {
1955 *dp = 0;
1956 dp++;
1957 }
1958 return (0);
1959 }
1960
1961 int
1962 sctp_pad_lastmbuf(struct mbuf *m, int padval)
1963 {
1964 /* find the last mbuf in chain and pad it */
1965 struct mbuf *m_at;
1966 m_at = m;
1967 while (m_at) {
1968 if (m_at->m_next == NULL) {
1969 return (sctp_add_pad_tombuf(m_at, padval));
1970 }
1971 m_at = m_at->m_next;
1972 }
1973 return (EFAULT);
1974 }
1975
1976 static void
1977 sctp_notify_assoc_change(u_int32_t event, struct sctp_tcb *stcb,
1978 u_int32_t error)
1979 {
1980 struct mbuf *m_notify;
1981 struct sctp_assoc_change *sac;
1982 const struct sockaddr *to;
1983 struct sockaddr_in6 sin6, lsa6;
1984
1985 #ifdef SCTP_DEBUG
1986 printf("notify: %d\n", event);
1987 #endif
1988 /*
1989 * First if we are going down dump everything we
1990 * can to the socket rcv queue.
1991 */
1992 if ((event == SCTP_SHUTDOWN_COMP) || (event == SCTP_COMM_LOST)) {
1993 sctp_deliver_data(stcb, &stcb->asoc, NULL, 0);
1994 }
1995
1996 /*
1997 * For TCP model AND UDP connected sockets we will send
1998 * an error up when an ABORT comes in.
1999 */
2000 if (((stcb->sctp_ep->sctp_flags & SCTP_PCB_FLAGS_TCPTYPE) ||
2001 (stcb->sctp_ep->sctp_flags & SCTP_PCB_FLAGS_IN_TCPPOOL)) &&
2002 (event == SCTP_COMM_LOST)) {
2003 stcb->sctp_socket->so_error = ECONNRESET;
2004 /* Wake ANY sleepers */
2005 sowwakeup(stcb->sctp_socket);
2006 sorwakeup(stcb->sctp_socket);
2007 }
2008 #if 0
2009 if ((event == SCTP_COMM_UP) &&
2010 (stcb->sctp_ep->sctp_flags & SCTP_PCB_FLAGS_TCPTYPE) &&
2011 (stcb->sctp_ep->sctp_flags & SCTP_PCB_FLAGS_CONNECTED)) {
2012 soisconnected(stcb->sctp_socket);
2013 }
2014 #endif
2015 if (!(stcb->sctp_ep->sctp_flags & SCTP_PCB_FLAGS_RECVASSOCEVNT)) {
2016 /* event not enabled */
2017 return;
2018 }
2019 MGETHDR(m_notify, M_DONTWAIT, MT_DATA);
2020 if (m_notify == NULL)
2021 /* no space left */
2022 return;
2023 m_notify->m_len = 0;
2024
2025 sac = mtod(m_notify, struct sctp_assoc_change *);
2026 sac->sac_type = SCTP_ASSOC_CHANGE;
2027 sac->sac_flags = 0;
2028 sac->sac_length = sizeof(struct sctp_assoc_change);
2029 sac->sac_state = event;
2030 sac->sac_error = error;
2031 /* XXX verify these stream counts */
2032 sac->sac_outbound_streams = stcb->asoc.streamoutcnt;
2033 sac->sac_inbound_streams = stcb->asoc.streamincnt;
2034 sac->sac_assoc_id = sctp_get_associd(stcb);
2035
2036 m_notify->m_flags |= M_EOR | M_NOTIFICATION;
2037 m_notify->m_pkthdr.len = sizeof(struct sctp_assoc_change);
2038 m_reset_rcvif(m_notify);
2039 m_notify->m_len = sizeof(struct sctp_assoc_change);
2040 m_notify->m_next = NULL;
2041
2042 /* append to socket */
2043 to = rtcache_getdst(&stcb->asoc.primary_destination->ro);
2044 if ((stcb->sctp_ep->sctp_flags & SCTP_PCB_FLAGS_NEEDS_MAPPED_V4) &&
2045 to->sa_family == AF_INET) {
2046 const struct sockaddr_in *sin;
2047
2048 sin = (const struct sockaddr_in *)to;
2049 in6_sin_2_v4mapsin6(sin, &sin6);
2050 to = (struct sockaddr *)&sin6;
2051 }
2052 /* check and strip embedded scope junk */
2053 to = (const struct sockaddr *)sctp_recover_scope((const struct sockaddr_in6 *)to,
2054 &lsa6);
2055 /*
2056 * We need to always notify comm changes.
2057 * if (sctp_sbspace(&stcb->sctp_socket->so_rcv) < m_notify->m_len) {
2058 * sctp_m_freem(m_notify);
2059 * return;
2060 * }
2061 */
2062 SCTP_TCB_UNLOCK(stcb);
2063 SCTP_INP_WLOCK(stcb->sctp_ep);
2064 SCTP_TCB_LOCK(stcb);
2065 if (!sbappendaddr_nocheck(&stcb->sctp_socket->so_rcv,
2066 to, m_notify, NULL, stcb->asoc.my_vtag, stcb->sctp_ep)) {
2067 /* not enough room */
2068 sctp_m_freem(m_notify);
2069 SCTP_INP_WUNLOCK(stcb->sctp_ep);
2070 return;
2071 }
2072 if (((stcb->sctp_ep->sctp_flags & SCTP_PCB_FLAGS_IN_TCPPOOL) == 0) &&
2073 ((stcb->sctp_ep->sctp_flags & SCTP_PCB_FLAGS_CONNECTED) == 0)){
2074 if (sctp_add_to_socket_q(stcb->sctp_ep, stcb)) {
2075 stcb->asoc.my_rwnd_control_len += sizeof(struct mbuf);
2076 }
2077 } else {
2078 stcb->asoc.my_rwnd_control_len += sizeof(struct mbuf);
2079 }
2080 SCTP_INP_WUNLOCK(stcb->sctp_ep);
2081 /* Wake up any sleeper */
2082 sctp_sorwakeup(stcb->sctp_ep, stcb->sctp_socket);
2083 sctp_sowwakeup(stcb->sctp_ep, stcb->sctp_socket);
2084 }
2085
2086 static void
2087 sctp_notify_peer_addr_change(struct sctp_tcb *stcb, uint32_t state,
2088 const struct sockaddr *sa, uint32_t error)
2089 {
2090 struct mbuf *m_notify;
2091 struct sctp_paddr_change *spc;
2092 const struct sockaddr *to;
2093 struct sockaddr_in6 sin6, lsa6;
2094
2095 if (!(stcb->sctp_ep->sctp_flags & SCTP_PCB_FLAGS_RECVPADDREVNT))
2096 /* event not enabled */
2097 return;
2098
2099 MGETHDR(m_notify, M_DONTWAIT, MT_DATA);
2100 if (m_notify == NULL)
2101 return;
2102 m_notify->m_len = 0;
2103
2104 MCLGET(m_notify, M_DONTWAIT);
2105 if ((m_notify->m_flags & M_EXT) != M_EXT) {
2106 sctp_m_freem(m_notify);
2107 return;
2108 }
2109
2110 spc = mtod(m_notify, struct sctp_paddr_change *);
2111 spc->spc_type = SCTP_PEER_ADDR_CHANGE;
2112 spc->spc_flags = 0;
2113 spc->spc_length = sizeof(struct sctp_paddr_change);
2114 if (sa->sa_family == AF_INET) {
2115 memcpy(&spc->spc_aaddr, sa, sizeof(struct sockaddr_in));
2116 } else {
2117 memcpy(&spc->spc_aaddr, sa, sizeof(struct sockaddr_in6));
2118 }
2119 spc->spc_state = state;
2120 spc->spc_error = error;
2121 spc->spc_assoc_id = sctp_get_associd(stcb);
2122
2123 m_notify->m_flags |= M_EOR | M_NOTIFICATION;
2124 m_notify->m_pkthdr.len = sizeof(struct sctp_paddr_change);
2125 m_reset_rcvif(m_notify);
2126 m_notify->m_len = sizeof(struct sctp_paddr_change);
2127 m_notify->m_next = NULL;
2128
2129 to = rtcache_getdst(&stcb->asoc.primary_destination->ro);
2130 if ((stcb->sctp_ep->sctp_flags & SCTP_PCB_FLAGS_NEEDS_MAPPED_V4) &&
2131 to->sa_family == AF_INET) {
2132 const struct sockaddr_in *sin;
2133
2134 sin = (const struct sockaddr_in *)to;
2135 in6_sin_2_v4mapsin6(sin, &sin6);
2136 to = (struct sockaddr *)&sin6;
2137 }
2138 /* check and strip embedded scope junk */
2139 to = (const struct sockaddr *)sctp_recover_scope((const struct sockaddr_in6 *)to,
2140 &lsa6);
2141
2142 if (sctp_sbspace(&stcb->sctp_socket->so_rcv) < m_notify->m_len) {
2143 sctp_m_freem(m_notify);
2144 return;
2145 }
2146 /* append to socket */
2147 SCTP_TCB_UNLOCK(stcb);
2148 SCTP_INP_WLOCK(stcb->sctp_ep);
2149 SCTP_TCB_LOCK(stcb);
2150 if (!sbappendaddr_nocheck(&stcb->sctp_socket->so_rcv, to,
2151 m_notify, NULL, stcb->asoc.my_vtag, stcb->sctp_ep)) {
2152 /* not enough room */
2153 sctp_m_freem(m_notify);
2154 SCTP_INP_WUNLOCK(stcb->sctp_ep);
2155 return;
2156 }
2157 if (((stcb->sctp_ep->sctp_flags & SCTP_PCB_FLAGS_IN_TCPPOOL) == 0) &&
2158 ((stcb->sctp_ep->sctp_flags & SCTP_PCB_FLAGS_CONNECTED) == 0)){
2159 if (sctp_add_to_socket_q(stcb->sctp_ep, stcb)) {
2160 stcb->asoc.my_rwnd_control_len += sizeof(struct mbuf);
2161 }
2162 } else {
2163 stcb->asoc.my_rwnd_control_len += sizeof(struct mbuf);
2164 }
2165 SCTP_INP_WUNLOCK(stcb->sctp_ep);
2166 sctp_sorwakeup(stcb->sctp_ep, stcb->sctp_socket);
2167 }
2168
2169
2170 static void
2171 sctp_notify_send_failed(struct sctp_tcb *stcb, u_int32_t error,
2172 struct sctp_tmit_chunk *chk)
2173 {
2174 struct mbuf *m_notify;
2175 struct sctp_send_failed *ssf;
2176 struct sockaddr_in6 sin6, lsa6;
2177 const struct sockaddr *to;
2178 int length;
2179
2180 if (!(stcb->sctp_ep->sctp_flags & SCTP_PCB_FLAGS_RECVSENDFAILEVNT))
2181 /* event not enabled */
2182 return;
2183
2184 length = sizeof(struct sctp_send_failed) + chk->send_size;
2185 MGETHDR(m_notify, M_DONTWAIT, MT_DATA);
2186 if (m_notify == NULL)
2187 /* no space left */
2188 return;
2189 m_notify->m_len = 0;
2190 ssf = mtod(m_notify, struct sctp_send_failed *);
2191 ssf->ssf_type = SCTP_SEND_FAILED;
2192 if (error == SCTP_NOTIFY_DATAGRAM_UNSENT)
2193 ssf->ssf_flags = SCTP_DATA_UNSENT;
2194 else
2195 ssf->ssf_flags = SCTP_DATA_SENT;
2196 ssf->ssf_length = length;
2197 ssf->ssf_error = error;
2198 /* not exactly what the user sent in, but should be close :) */
2199 ssf->ssf_info.sinfo_stream = chk->rec.data.stream_number;
2200 ssf->ssf_info.sinfo_ssn = chk->rec.data.stream_seq;
2201 ssf->ssf_info.sinfo_flags = chk->rec.data.rcv_flags;
2202 ssf->ssf_info.sinfo_ppid = chk->rec.data.payloadtype;
2203 ssf->ssf_info.sinfo_context = chk->rec.data.context;
2204 ssf->ssf_info.sinfo_assoc_id = sctp_get_associd(stcb);
2205 ssf->ssf_assoc_id = sctp_get_associd(stcb);
2206 m_notify->m_next = chk->data;
2207 if (m_notify->m_next == NULL)
2208 m_notify->m_flags |= M_EOR | M_NOTIFICATION;
2209 else {
2210 struct mbuf *m;
2211 m_notify->m_flags |= M_NOTIFICATION;
2212 m = m_notify;
2213 while (m->m_next != NULL)
2214 m = m->m_next;
2215 m->m_flags |= M_EOR;
2216 }
2217 m_notify->m_pkthdr.len = length;
2218 m_reset_rcvif(m_notify);
2219 m_notify->m_len = sizeof(struct sctp_send_failed);
2220
2221 /* Steal off the mbuf */
2222 chk->data = NULL;
2223 to = rtcache_getdst(&stcb->asoc.primary_destination->ro);
2224 if ((stcb->sctp_ep->sctp_flags & SCTP_PCB_FLAGS_NEEDS_MAPPED_V4) &&
2225 to->sa_family == AF_INET) {
2226 const struct sockaddr_in *sin;
2227
2228 sin = satocsin(to);
2229 in6_sin_2_v4mapsin6(sin, &sin6);
2230 to = (struct sockaddr *)&sin6;
2231 }
2232 /* check and strip embedded scope junk */
2233 to = (const struct sockaddr *)sctp_recover_scope((const struct sockaddr_in6 *)to,
2234 &lsa6);
2235
2236 if (sctp_sbspace(&stcb->sctp_socket->so_rcv) < m_notify->m_len) {
2237 sctp_m_freem(m_notify);
2238 return;
2239 }
2240
2241 /* append to socket */
2242 SCTP_TCB_UNLOCK(stcb);
2243 SCTP_INP_WLOCK(stcb->sctp_ep);
2244 SCTP_TCB_LOCK(stcb);
2245 if (!sbappendaddr_nocheck(&stcb->sctp_socket->so_rcv, to,
2246 m_notify, NULL, stcb->asoc.my_vtag, stcb->sctp_ep)) {
2247 /* not enough room */
2248 sctp_m_freem(m_notify);
2249 SCTP_INP_WUNLOCK(stcb->sctp_ep);
2250 return;
2251 }
2252 if (((stcb->sctp_ep->sctp_flags & SCTP_PCB_FLAGS_IN_TCPPOOL) == 0) &&
2253 ((stcb->sctp_ep->sctp_flags & SCTP_PCB_FLAGS_CONNECTED) == 0)){
2254 if (sctp_add_to_socket_q(stcb->sctp_ep, stcb)) {
2255 stcb->asoc.my_rwnd_control_len += sizeof(struct mbuf);
2256 }
2257 } else {
2258 stcb->asoc.my_rwnd_control_len += sizeof(struct mbuf);
2259 }
2260 SCTP_INP_WUNLOCK(stcb->sctp_ep);
2261 sctp_sorwakeup(stcb->sctp_ep, stcb->sctp_socket);
2262 }
2263
2264 static void
2265 sctp_notify_adaption_layer(struct sctp_tcb *stcb,
2266 u_int32_t error)
2267 {
2268 struct mbuf *m_notify;
2269 struct sctp_adaption_event *sai;
2270 struct sockaddr_in6 sin6, lsa6;
2271 const struct sockaddr *to;
2272
2273 if (!(stcb->sctp_ep->sctp_flags & SCTP_PCB_FLAGS_ADAPTIONEVNT))
2274 /* event not enabled */
2275 return;
2276
2277 MGETHDR(m_notify, M_DONTWAIT, MT_DATA);
2278 if (m_notify == NULL)
2279 /* no space left */
2280 return;
2281 m_notify->m_len = 0;
2282 sai = mtod(m_notify, struct sctp_adaption_event *);
2283 sai->sai_type = SCTP_ADAPTION_INDICATION;
2284 sai->sai_flags = 0;
2285 sai->sai_length = sizeof(struct sctp_adaption_event);
2286 sai->sai_adaption_ind = error;
2287 sai->sai_assoc_id = sctp_get_associd(stcb);
2288
2289 m_notify->m_flags |= M_EOR | M_NOTIFICATION;
2290 m_notify->m_pkthdr.len = sizeof(struct sctp_adaption_event);
2291 m_reset_rcvif(m_notify);
2292 m_notify->m_len = sizeof(struct sctp_adaption_event);
2293 m_notify->m_next = NULL;
2294
2295 to = rtcache_getdst(&stcb->asoc.primary_destination->ro);
2296 if ((stcb->sctp_ep->sctp_flags & SCTP_PCB_FLAGS_NEEDS_MAPPED_V4) &&
2297 (to->sa_family == AF_INET)) {
2298 const struct sockaddr_in *sin;
2299
2300 sin = satocsin(to);
2301 in6_sin_2_v4mapsin6(sin, &sin6);
2302 to = (struct sockaddr *)&sin6;
2303 }
2304 /* check and strip embedded scope junk */
2305 to = (const struct sockaddr *)sctp_recover_scope((const struct sockaddr_in6 *)to,
2306 &lsa6);
2307 if (sctp_sbspace(&stcb->sctp_socket->so_rcv) < m_notify->m_len) {
2308 sctp_m_freem(m_notify);
2309 return;
2310 }
2311 /* append to socket */
2312 SCTP_TCB_UNLOCK(stcb);
2313 SCTP_INP_WLOCK(stcb->sctp_ep);
2314 SCTP_TCB_LOCK(stcb);
2315 if (!sbappendaddr_nocheck(&stcb->sctp_socket->so_rcv, to,
2316 m_notify, NULL, stcb->asoc.my_vtag, stcb->sctp_ep)) {
2317 /* not enough room */
2318 sctp_m_freem(m_notify);
2319 SCTP_INP_WUNLOCK(stcb->sctp_ep);
2320 return;
2321 }
2322 if (((stcb->sctp_ep->sctp_flags & SCTP_PCB_FLAGS_IN_TCPPOOL) == 0) &&
2323 ((stcb->sctp_ep->sctp_flags & SCTP_PCB_FLAGS_CONNECTED) == 0)){
2324 if (sctp_add_to_socket_q(stcb->sctp_ep, stcb)) {
2325 stcb->asoc.my_rwnd_control_len += sizeof(struct mbuf);
2326 }
2327 } else {
2328 stcb->asoc.my_rwnd_control_len += sizeof(struct mbuf);
2329 }
2330 SCTP_INP_WUNLOCK(stcb->sctp_ep);
2331 sctp_sorwakeup(stcb->sctp_ep, stcb->sctp_socket);
2332 }
2333
2334 static void
2335 sctp_notify_partial_delivery_indication(struct sctp_tcb *stcb,
2336 u_int32_t error)
2337 {
2338 struct mbuf *m_notify;
2339 struct sctp_pdapi_event *pdapi;
2340 struct sockaddr_in6 sin6, lsa6;
2341 const struct sockaddr *to;
2342
2343 if (!(stcb->sctp_ep->sctp_flags & SCTP_PCB_FLAGS_PDAPIEVNT))
2344 /* event not enabled */
2345 return;
2346
2347 MGETHDR(m_notify, M_DONTWAIT, MT_DATA);
2348 if (m_notify == NULL)
2349 /* no space left */
2350 return;
2351 m_notify->m_len = 0;
2352 pdapi = mtod(m_notify, struct sctp_pdapi_event *);
2353 pdapi->pdapi_type = SCTP_PARTIAL_DELIVERY_EVENT;
2354 pdapi->pdapi_flags = 0;
2355 pdapi->pdapi_length = sizeof(struct sctp_pdapi_event);
2356 pdapi->pdapi_indication = error;
2357 pdapi->pdapi_assoc_id = sctp_get_associd(stcb);
2358
2359 m_notify->m_flags |= M_EOR | M_NOTIFICATION;
2360 m_notify->m_pkthdr.len = sizeof(struct sctp_pdapi_event);
2361 m_reset_rcvif(m_notify);
2362 m_notify->m_len = sizeof(struct sctp_pdapi_event);
2363 m_notify->m_next = NULL;
2364
2365 to = rtcache_getdst(&stcb->asoc.primary_destination->ro);
2366 if ((stcb->sctp_ep->sctp_flags & SCTP_PCB_FLAGS_NEEDS_MAPPED_V4) &&
2367 (to->sa_family == AF_INET)) {
2368 const struct sockaddr_in *sin;
2369
2370 sin = satocsin(to);
2371 in6_sin_2_v4mapsin6(sin, &sin6);
2372 to = (struct sockaddr *)&sin6;
2373 }
2374 /* check and strip embedded scope junk */
2375 to = (const struct sockaddr *)sctp_recover_scope((const struct sockaddr_in6 *)to,
2376 &lsa6);
2377 if (sctp_sbspace(&stcb->sctp_socket->so_rcv) < m_notify->m_len) {
2378 sctp_m_freem(m_notify);
2379 return;
2380 }
2381 /* append to socket */
2382 SCTP_TCB_UNLOCK(stcb);
2383 SCTP_INP_WLOCK(stcb->sctp_ep);
2384 SCTP_TCB_LOCK(stcb);
2385 if (!sbappendaddr_nocheck(&stcb->sctp_socket->so_rcv, to,
2386 m_notify, NULL, stcb->asoc.my_vtag, stcb->sctp_ep)) {
2387 /* not enough room */
2388 sctp_m_freem(m_notify);
2389 SCTP_INP_WUNLOCK(stcb->sctp_ep);
2390 return;
2391 }
2392 if (((stcb->sctp_ep->sctp_flags & SCTP_PCB_FLAGS_IN_TCPPOOL) == 0) &&
2393 ((stcb->sctp_ep->sctp_flags & SCTP_PCB_FLAGS_CONNECTED) == 0)){
2394 if (sctp_add_to_socket_q(stcb->sctp_ep, stcb)) {
2395 stcb->asoc.my_rwnd_control_len += sizeof(struct mbuf);
2396 }
2397 } else {
2398 stcb->asoc.my_rwnd_control_len += sizeof(struct mbuf);
2399 }
2400 SCTP_INP_WUNLOCK(stcb->sctp_ep);
2401 sctp_sorwakeup(stcb->sctp_ep, stcb->sctp_socket);
2402 }
2403
2404 static void
2405 sctp_notify_shutdown_event(struct sctp_tcb *stcb)
2406 {
2407 struct mbuf *m_notify;
2408 struct sctp_shutdown_event *sse;
2409 struct sockaddr_in6 sin6, lsa6;
2410 const struct sockaddr *to;
2411
2412 /*
2413 * For TCP model AND UDP connected sockets we will send
2414 * an error up when an SHUTDOWN completes
2415 */
2416 if ((stcb->sctp_ep->sctp_flags & SCTP_PCB_FLAGS_TCPTYPE) ||
2417 (stcb->sctp_ep->sctp_flags & SCTP_PCB_FLAGS_IN_TCPPOOL)) {
2418 /* mark socket closed for read/write and wakeup! */
2419 socantrcvmore(stcb->sctp_socket);
2420 socantsendmore(stcb->sctp_socket);
2421 }
2422
2423 if (!(stcb->sctp_ep->sctp_flags & SCTP_PCB_FLAGS_RECVSHUTDOWNEVNT))
2424 /* event not enabled */
2425 return;
2426
2427 MGETHDR(m_notify, M_DONTWAIT, MT_DATA);
2428 if (m_notify == NULL)
2429 /* no space left */
2430 return;
2431 m_notify->m_len = 0;
2432 sse = mtod(m_notify, struct sctp_shutdown_event *);
2433 sse->sse_type = SCTP_SHUTDOWN_EVENT;
2434 sse->sse_flags = 0;
2435 sse->sse_length = sizeof(struct sctp_shutdown_event);
2436 sse->sse_assoc_id = sctp_get_associd(stcb);
2437
2438 m_notify->m_flags |= M_EOR | M_NOTIFICATION;
2439 m_notify->m_pkthdr.len = sizeof(struct sctp_shutdown_event);
2440 m_reset_rcvif(m_notify);
2441 m_notify->m_len = sizeof(struct sctp_shutdown_event);
2442 m_notify->m_next = NULL;
2443
2444 to = rtcache_getdst(&stcb->asoc.primary_destination->ro);
2445 if ((stcb->sctp_ep->sctp_flags & SCTP_PCB_FLAGS_NEEDS_MAPPED_V4) &&
2446 to->sa_family == AF_INET) {
2447 const struct sockaddr_in *sin;
2448
2449 sin = satocsin(to);
2450 in6_sin_2_v4mapsin6(sin, &sin6);
2451 to = (struct sockaddr *)&sin6;
2452 }
2453 /* check and strip embedded scope junk */
2454 to = (const struct sockaddr *)sctp_recover_scope((const struct sockaddr_in6 *)to,
2455 &lsa6);
2456 if (sctp_sbspace(&stcb->sctp_socket->so_rcv) < m_notify->m_len) {
2457 sctp_m_freem(m_notify);
2458 return;
2459 }
2460 /* append to socket */
2461 SCTP_TCB_UNLOCK(stcb);
2462 SCTP_INP_WLOCK(stcb->sctp_ep);
2463 SCTP_TCB_LOCK(stcb);
2464 if (!sbappendaddr_nocheck(&stcb->sctp_socket->so_rcv, to,
2465 m_notify, NULL, stcb->asoc.my_vtag, stcb->sctp_ep)) {
2466 /* not enough room */
2467 sctp_m_freem(m_notify);
2468 SCTP_INP_WUNLOCK(stcb->sctp_ep);
2469 return;
2470 }
2471 if (((stcb->sctp_ep->sctp_flags & SCTP_PCB_FLAGS_IN_TCPPOOL) == 0) &&
2472 ((stcb->sctp_ep->sctp_flags & SCTP_PCB_FLAGS_CONNECTED) == 0)){
2473 if (sctp_add_to_socket_q(stcb->sctp_ep, stcb)) {
2474 stcb->asoc.my_rwnd_control_len += sizeof(struct mbuf);
2475 }
2476 } else {
2477 stcb->asoc.my_rwnd_control_len += sizeof(struct mbuf);
2478 }
2479 SCTP_INP_WUNLOCK(stcb->sctp_ep);
2480 sctp_sorwakeup(stcb->sctp_ep, stcb->sctp_socket);
2481 }
2482
2483 static void
2484 sctp_notify_stream_reset(struct sctp_tcb *stcb,
2485 int number_entries, uint16_t *list, int flag)
2486 {
2487 struct mbuf *m_notify;
2488 struct sctp_stream_reset_event *strreset;
2489 struct sockaddr_in6 sin6, lsa6;
2490 const struct sockaddr *to;
2491 int len;
2492
2493 if (!(stcb->sctp_ep->sctp_flags & SCTP_PCB_FLAGS_STREAM_RESETEVNT))
2494 /* event not enabled */
2495 return;
2496
2497 MGETHDR(m_notify, M_DONTWAIT, MT_DATA);
2498 if (m_notify == NULL)
2499 /* no space left */
2500 return;
2501 m_notify->m_len = 0;
2502 len = sizeof(struct sctp_stream_reset_event) + (number_entries * sizeof(uint16_t));
2503 if (len > M_TRAILINGSPACE(m_notify)) {
2504 MCLGET(m_notify, M_WAIT);
2505 }
2506 if (m_notify == NULL)
2507 /* no clusters */
2508 return;
2509
2510 if (len > M_TRAILINGSPACE(m_notify)) {
2511 /* never enough room */
2512 m_freem(m_notify);
2513 return;
2514 }
2515 strreset = mtod(m_notify, struct sctp_stream_reset_event *);
2516 strreset->strreset_type = SCTP_STREAM_RESET_EVENT;
2517 if (number_entries == 0) {
2518 strreset->strreset_flags = flag | SCTP_STRRESET_ALL_STREAMS;
2519 } else {
2520 strreset->strreset_flags = flag | SCTP_STRRESET_STREAM_LIST;
2521 }
2522 strreset->strreset_length = len;
2523 strreset->strreset_assoc_id = sctp_get_associd(stcb);
2524 if (number_entries) {
2525 int i;
2526 for (i=0; i<number_entries; i++) {
2527 strreset->strreset_list[i] = list[i];
2528 }
2529 }
2530 m_notify->m_flags |= M_EOR | M_NOTIFICATION;
2531 m_notify->m_pkthdr.len = len;
2532 m_reset_rcvif(m_notify);
2533 m_notify->m_len = len;
2534 m_notify->m_next = NULL;
2535 if (sctp_sbspace(&stcb->sctp_socket->so_rcv) < m_notify->m_len) {
2536 /* no space */
2537 sctp_m_freem(m_notify);
2538 return;
2539 }
2540 to = rtcache_getdst(&stcb->asoc.primary_destination->ro);
2541 if ((stcb->sctp_ep->sctp_flags & SCTP_PCB_FLAGS_NEEDS_MAPPED_V4) &&
2542 to->sa_family == AF_INET) {
2543 const struct sockaddr_in *sin;
2544
2545 sin = satocsin(to);
2546 in6_sin_2_v4mapsin6(sin, &sin6);
2547 to = (struct sockaddr *)&sin6;
2548 }
2549 /* check and strip embedded scope junk */
2550 to = (const struct sockaddr *) sctp_recover_scope((const struct sockaddr_in6 *)to,
2551 &lsa6);
2552 /* append to socket */
2553 SCTP_TCB_UNLOCK(stcb);
2554 SCTP_INP_WLOCK(stcb->sctp_ep);
2555 SCTP_TCB_LOCK(stcb);
2556 if (!sbappendaddr_nocheck(&stcb->sctp_socket->so_rcv, to,
2557 m_notify, NULL, stcb->asoc.my_vtag, stcb->sctp_ep)) {
2558 /* not enough room */
2559 sctp_m_freem(m_notify);
2560 SCTP_INP_WUNLOCK(stcb->sctp_ep);
2561 return;
2562 }
2563 if (((stcb->sctp_ep->sctp_flags & SCTP_PCB_FLAGS_IN_TCPPOOL) == 0) &&
2564 ((stcb->sctp_ep->sctp_flags & SCTP_PCB_FLAGS_CONNECTED) == 0)){
2565 if (sctp_add_to_socket_q(stcb->sctp_ep, stcb)) {
2566 stcb->asoc.my_rwnd_control_len += sizeof(struct mbuf);
2567 }
2568 } else {
2569 stcb->asoc.my_rwnd_control_len += sizeof(struct mbuf);
2570 }
2571 SCTP_INP_WUNLOCK(stcb->sctp_ep);
2572 sctp_sorwakeup(stcb->sctp_ep, stcb->sctp_socket);
2573 }
2574
2575
2576 void
2577 sctp_ulp_notify(u_int32_t notification, struct sctp_tcb *stcb,
2578 u_int32_t error, void *data)
2579 {
2580 if (stcb->sctp_ep->sctp_flags & SCTP_PCB_FLAGS_SOCKET_GONE) {
2581 /* No notifications up when we are in a no socket state */
2582 return;
2583 }
2584 if (stcb->asoc.state & SCTP_STATE_CLOSED_SOCKET) {
2585 /* Can't send up to a closed socket any notifications */
2586 return;
2587 }
2588 switch (notification) {
2589 case SCTP_NOTIFY_ASSOC_UP:
2590 sctp_notify_assoc_change(SCTP_COMM_UP, stcb, error);
2591 break;
2592 case SCTP_NOTIFY_ASSOC_DOWN:
2593 sctp_notify_assoc_change(SCTP_SHUTDOWN_COMP, stcb, error);
2594 break;
2595 case SCTP_NOTIFY_INTERFACE_DOWN:
2596 {
2597 struct sctp_nets *net;
2598 net = (struct sctp_nets *)data;
2599 sctp_notify_peer_addr_change(stcb, SCTP_ADDR_UNREACHABLE,
2600 rtcache_getdst(&net->ro), error);
2601 break;
2602 }
2603 case SCTP_NOTIFY_INTERFACE_UP:
2604 {
2605 struct sctp_nets *net;
2606 net = (struct sctp_nets *)data;
2607 sctp_notify_peer_addr_change(stcb, SCTP_ADDR_AVAILABLE,
2608 rtcache_getdst(&net->ro), error);
2609 break;
2610 }
2611 case SCTP_NOTIFY_INTERFACE_CONFIRMED:
2612 {
2613 struct sctp_nets *net;
2614 net = (struct sctp_nets *)data;
2615 sctp_notify_peer_addr_change(stcb, SCTP_ADDR_CONFIRMED,
2616 rtcache_getdst(&net->ro), error);
2617 break;
2618 }
2619 case SCTP_NOTIFY_DG_FAIL:
2620 sctp_notify_send_failed(stcb, error,
2621 (struct sctp_tmit_chunk *)data);
2622 break;
2623 case SCTP_NOTIFY_ADAPTION_INDICATION:
2624 /* Here the error is the adaption indication */
2625 sctp_notify_adaption_layer(stcb, error);
2626 break;
2627 case SCTP_NOTIFY_PARTIAL_DELVIERY_INDICATION:
2628 sctp_notify_partial_delivery_indication(stcb, error);
2629 break;
2630 case SCTP_NOTIFY_STRDATA_ERR:
2631 break;
2632 case SCTP_NOTIFY_ASSOC_ABORTED:
2633 sctp_notify_assoc_change(SCTP_COMM_LOST, stcb, error);
2634 break;
2635 case SCTP_NOTIFY_PEER_OPENED_STREAM:
2636 break;
2637 case SCTP_NOTIFY_STREAM_OPENED_OK:
2638 break;
2639 case SCTP_NOTIFY_ASSOC_RESTART:
2640 sctp_notify_assoc_change(SCTP_RESTART, stcb, error);
2641 break;
2642 case SCTP_NOTIFY_HB_RESP:
2643 break;
2644 case SCTP_NOTIFY_STR_RESET_SEND:
2645 sctp_notify_stream_reset(stcb, error, ((uint16_t *)data), SCTP_STRRESET_OUTBOUND_STR);
2646 break;
2647 case SCTP_NOTIFY_STR_RESET_RECV:
2648 sctp_notify_stream_reset(stcb, error, ((uint16_t *)data), SCTP_STRRESET_INBOUND_STR);
2649 break;
2650 case SCTP_NOTIFY_ASCONF_ADD_IP:
2651 sctp_notify_peer_addr_change(stcb, SCTP_ADDR_ADDED, data,
2652 error);
2653 break;
2654 case SCTP_NOTIFY_ASCONF_DELETE_IP:
2655 sctp_notify_peer_addr_change(stcb, SCTP_ADDR_REMOVED, data,
2656 error);
2657 break;
2658 case SCTP_NOTIFY_ASCONF_SET_PRIMARY:
2659 sctp_notify_peer_addr_change(stcb, SCTP_ADDR_MADE_PRIM, data,
2660 error);
2661 break;
2662 case SCTP_NOTIFY_ASCONF_SUCCESS:
2663 break;
2664 case SCTP_NOTIFY_ASCONF_FAILED:
2665 break;
2666 case SCTP_NOTIFY_PEER_SHUTDOWN:
2667 sctp_notify_shutdown_event(stcb);
2668 break;
2669 default:
2670 #ifdef SCTP_DEBUG
2671 if (sctp_debug_on & SCTP_DEBUG_UTIL1) {
2672 printf("NOTIFY: unknown notification %xh (%u)\n",
2673 notification, notification);
2674 }
2675 #endif /* SCTP_DEBUG */
2676 break;
2677 } /* end switch */
2678 }
2679
2680 void
2681 sctp_report_all_outbound(struct sctp_tcb *stcb)
2682 {
2683 struct sctp_association *asoc;
2684 struct sctp_stream_out *outs;
2685 struct sctp_tmit_chunk *chk;
2686
2687 asoc = &stcb->asoc;
2688
2689 if (stcb->sctp_ep->sctp_flags & SCTP_PCB_FLAGS_SOCKET_GONE) {
2690 return;
2691 }
2692 /* now through all the gunk freeing chunks */
2693 TAILQ_FOREACH(outs, &asoc->out_wheel, next_spoke) {
2694 /* now clean up any chunks here */
2695 chk = TAILQ_FIRST(&outs->outqueue);
2696 while (chk) {
2697 stcb->asoc.stream_queue_cnt--;
2698 TAILQ_REMOVE(&outs->outqueue, chk, sctp_next);
2699 sctp_ulp_notify(SCTP_NOTIFY_DG_FAIL, stcb,
2700 SCTP_NOTIFY_DATAGRAM_UNSENT, chk);
2701 if (chk->data) {
2702 sctp_m_freem(chk->data);
2703 chk->data = NULL;
2704 }
2705 if (chk->whoTo)
2706 sctp_free_remote_addr(chk->whoTo);
2707 chk->whoTo = NULL;
2708 chk->asoc = NULL;
2709 /* Free the chunk */
2710 SCTP_ZONE_FREE(sctppcbinfo.ipi_zone_chunk, chk);
2711 sctppcbinfo.ipi_count_chunk--;
2712 if ((int)sctppcbinfo.ipi_count_chunk < 0) {
2713 panic("Chunk count is negative");
2714 }
2715 sctppcbinfo.ipi_gencnt_chunk++;
2716 chk = TAILQ_FIRST(&outs->outqueue);
2717 }
2718 }
2719 /* pending send queue SHOULD be empty */
2720 if (!TAILQ_EMPTY(&asoc->send_queue)) {
2721 chk = TAILQ_FIRST(&asoc->send_queue);
2722 while (chk) {
2723 TAILQ_REMOVE(&asoc->send_queue, chk, sctp_next);
2724 sctp_ulp_notify(SCTP_NOTIFY_DG_FAIL, stcb, SCTP_NOTIFY_DATAGRAM_UNSENT, chk);
2725 if (chk->data) {
2726 sctp_m_freem(chk->data);
2727 chk->data = NULL;
2728 }
2729 if (chk->whoTo)
2730 sctp_free_remote_addr(chk->whoTo);
2731 chk->whoTo = NULL;
2732 SCTP_ZONE_FREE(sctppcbinfo.ipi_zone_chunk, chk);
2733 sctppcbinfo.ipi_count_chunk--;
2734 if ((int)sctppcbinfo.ipi_count_chunk < 0) {
2735 panic("Chunk count is negative");
2736 }
2737 sctppcbinfo.ipi_gencnt_chunk++;
2738 chk = TAILQ_FIRST(&asoc->send_queue);
2739 }
2740 }
2741 /* sent queue SHOULD be empty */
2742 if (!TAILQ_EMPTY(&asoc->sent_queue)) {
2743 chk = TAILQ_FIRST(&asoc->sent_queue);
2744 while (chk) {
2745 TAILQ_REMOVE(&asoc->sent_queue, chk, sctp_next);
2746 sctp_ulp_notify(SCTP_NOTIFY_DG_FAIL, stcb,
2747 SCTP_NOTIFY_DATAGRAM_SENT, chk);
2748 if (chk->data) {
2749 sctp_m_freem(chk->data);
2750 chk->data = NULL;
2751 }
2752 if (chk->whoTo)
2753 sctp_free_remote_addr(chk->whoTo);
2754 chk->whoTo = NULL;
2755 SCTP_ZONE_FREE(sctppcbinfo.ipi_zone_chunk, chk);
2756 sctppcbinfo.ipi_count_chunk--;
2757 if ((int)sctppcbinfo.ipi_count_chunk < 0) {
2758 panic("Chunk count is negative");
2759 }
2760 sctppcbinfo.ipi_gencnt_chunk++;
2761 chk = TAILQ_FIRST(&asoc->sent_queue);
2762 }
2763 }
2764 }
2765
2766 void
2767 sctp_abort_notification(struct sctp_tcb *stcb, int error)
2768 {
2769
2770 if (stcb->sctp_ep->sctp_flags & SCTP_PCB_FLAGS_SOCKET_GONE) {
2771 return;
2772 }
2773 /* Tell them we lost the asoc */
2774 sctp_report_all_outbound(stcb);
2775 sctp_ulp_notify(SCTP_NOTIFY_ASSOC_ABORTED, stcb, error, NULL);
2776 }
2777
2778 void
2779 sctp_abort_association(struct sctp_inpcb *inp, struct sctp_tcb *stcb,
2780 struct mbuf *m, int iphlen, struct sctphdr *sh, struct mbuf *op_err)
2781 {
2782 u_int32_t vtag;
2783
2784 vtag = 0;
2785 if (stcb != NULL) {
2786 /* We have a TCB to abort, send notification too */
2787 vtag = stcb->asoc.peer_vtag;
2788 sctp_abort_notification(stcb, 0);
2789 }
2790 sctp_send_abort(m, iphlen, sh, vtag, op_err);
2791 if (stcb != NULL) {
2792 /* Ok, now lets free it */
2793 sctp_free_assoc(inp, stcb);
2794 } else {
2795 if (inp->sctp_flags & SCTP_PCB_FLAGS_SOCKET_GONE) {
2796 if (LIST_FIRST(&inp->sctp_asoc_list) == NULL) {
2797 sctp_inpcb_free(inp, 1);
2798 }
2799 }
2800 }
2801 }
2802
2803 void
2804 sctp_abort_an_association(struct sctp_inpcb *inp, struct sctp_tcb *stcb,
2805 int error, struct mbuf *op_err)
2806 {
2807
2808 if (stcb == NULL) {
2809 /* Got to have a TCB */
2810 if (inp->sctp_flags & SCTP_PCB_FLAGS_SOCKET_GONE) {
2811 if (LIST_FIRST(&inp->sctp_asoc_list) == NULL) {
2812 sctp_inpcb_free(inp, 1);
2813 }
2814 }
2815 return;
2816 }
2817 /* notify the ulp */
2818 if ((inp->sctp_flags & SCTP_PCB_FLAGS_SOCKET_GONE) == 0)
2819 sctp_abort_notification(stcb, error);
2820 /* notify the peer */
2821 sctp_send_abort_tcb(stcb, op_err);
2822 /* now free the asoc */
2823 sctp_free_assoc(inp, stcb);
2824 }
2825
2826 void
2827 sctp_handle_ootb(struct mbuf *m, int iphlen, int offset, struct sctphdr *sh,
2828 struct sctp_inpcb *inp, struct mbuf *op_err)
2829 {
2830 struct sctp_chunkhdr *ch, chunk_buf;
2831 unsigned int chk_length;
2832
2833 /* Generate a TO address for future reference */
2834 if (inp && (inp->sctp_flags & SCTP_PCB_FLAGS_SOCKET_GONE)) {
2835 if (LIST_FIRST(&inp->sctp_asoc_list) == NULL) {
2836 sctp_inpcb_free(inp, 1);
2837 }
2838 }
2839 ch = (struct sctp_chunkhdr *)sctp_m_getptr(m, offset,
2840 sizeof(*ch), (u_int8_t *)&chunk_buf);
2841 while (ch != NULL) {
2842 chk_length = ntohs(ch->chunk_length);
2843 if (chk_length < sizeof(*ch)) {
2844 /* break to abort land */
2845 break;
2846 }
2847 switch (ch->chunk_type) {
2848 case SCTP_PACKET_DROPPED:
2849 /* we don't respond to pkt-dropped */
2850 return;
2851 case SCTP_ABORT_ASSOCIATION:
2852 /* we don't respond with an ABORT to an ABORT */
2853 return;
2854 case SCTP_SHUTDOWN_COMPLETE:
2855 /*
2856 * we ignore it since we are not waiting for it
2857 * and peer is gone
2858 */
2859 return;
2860 case SCTP_SHUTDOWN_ACK:
2861 sctp_send_shutdown_complete2(m, iphlen, sh);
2862 return;
2863 default:
2864 break;
2865 }
2866 offset += SCTP_SIZE32(chk_length);
2867 ch = (struct sctp_chunkhdr *)sctp_m_getptr(m, offset,
2868 sizeof(*ch), (u_int8_t *)&chunk_buf);
2869 }
2870 sctp_send_abort(m, iphlen, sh, 0, op_err);
2871 }
2872
2873 /*
2874 * check the inbound datagram to make sure there is not an abort
2875 * inside it, if there is return 1, else return 0.
2876 */
2877 int
2878 sctp_is_there_an_abort_here(struct mbuf *m, int iphlen, int *vtagfill)
2879 {
2880 struct sctp_chunkhdr *ch;
2881 struct sctp_init_chunk *init_chk, chunk_buf;
2882 int offset;
2883 unsigned int chk_length;
2884
2885 offset = iphlen + sizeof(struct sctphdr);
2886 ch = (struct sctp_chunkhdr *)sctp_m_getptr(m, offset, sizeof(*ch),
2887 (u_int8_t *)&chunk_buf);
2888 while (ch != NULL) {
2889 chk_length = ntohs(ch->chunk_length);
2890 if (chk_length < sizeof(*ch)) {
2891 /* packet is probably corrupt */
2892 break;
2893 }
2894 /* we seem to be ok, is it an abort? */
2895 if (ch->chunk_type == SCTP_ABORT_ASSOCIATION) {
2896 /* yep, tell them */
2897 return (1);
2898 }
2899 if (ch->chunk_type == SCTP_INITIATION) {
2900 /* need to update the Vtag */
2901 init_chk = (struct sctp_init_chunk *)sctp_m_getptr(m,
2902 offset, sizeof(*init_chk), (u_int8_t *)&chunk_buf);
2903 if (init_chk != NULL) {
2904 *vtagfill = ntohl(init_chk->init.initiate_tag);
2905 }
2906 }
2907 /* Nope, move to the next chunk */
2908 offset += SCTP_SIZE32(chk_length);
2909 ch = (struct sctp_chunkhdr *)sctp_m_getptr(m, offset,
2910 sizeof(*ch), (u_int8_t *)&chunk_buf);
2911 }
2912 return (0);
2913 }
2914
2915 /*
2916 * currently (2/02), ifa_addr embeds scope_id's and don't
2917 * have sin6_scope_id set (i.e. it's 0)
2918 * so, create this function to compare link local scopes
2919 */
2920 uint32_t
2921 sctp_is_same_scope(const struct sockaddr_in6 *addr1, const struct sockaddr_in6 *addr2)
2922 {
2923 struct sockaddr_in6 a, b;
2924
2925 /* save copies */
2926 a = *addr1;
2927 b = *addr2;
2928
2929 if (a.sin6_scope_id == 0)
2930 if (sa6_recoverscope(&a)) {
2931 /* can't get scope, so can't match */
2932 return (0);
2933 }
2934 if (b.sin6_scope_id == 0)
2935 if (sa6_recoverscope(&b)) {
2936 /* can't get scope, so can't match */
2937 return (0);
2938 }
2939 if (a.sin6_scope_id != b.sin6_scope_id)
2940 return (0);
2941
2942 return (1);
2943 }
2944
2945 /*
2946 * returns a sockaddr_in6 with embedded scope recovered and removed
2947 */
2948 const struct sockaddr_in6 *
2949 sctp_recover_scope(const struct sockaddr_in6 *addr, struct sockaddr_in6 *store)
2950 {
2951 const struct sockaddr_in6 *newaddr;
2952
2953 newaddr = addr;
2954 /* check and strip embedded scope junk */
2955 if (addr->sin6_family == AF_INET6) {
2956 if (IN6_IS_SCOPE_LINKLOCAL(&addr->sin6_addr)) {
2957 if (addr->sin6_scope_id == 0) {
2958 *store = *addr;
2959 if (sa6_recoverscope(store) == 0) {
2960 /* use the recovered scope */
2961 newaddr = store;
2962 }
2963 /* else, return the original "to" addr */
2964 }
2965 }
2966 }
2967 return (newaddr);
2968 }
2969
2970 /*
2971 * are the two addresses the same? currently a "scopeless" check
2972 * returns: 1 if same, 0 if not
2973 */
2974 int
2975 sctp_cmpaddr(const struct sockaddr *sa1, const struct sockaddr *sa2)
2976 {
2977
2978 /* must be valid */
2979 if (sa1 == NULL || sa2 == NULL)
2980 return (0);
2981
2982 /* must be the same family */
2983 if (sa1->sa_family != sa2->sa_family)
2984 return (0);
2985
2986 if (sa1->sa_family == AF_INET6) {
2987 /* IPv6 addresses */
2988 const struct sockaddr_in6 *sin6_1, *sin6_2;
2989
2990 sin6_1 = (const struct sockaddr_in6 *)sa1;
2991 sin6_2 = (const struct sockaddr_in6 *)sa2;
2992 return (SCTP6_ARE_ADDR_EQUAL(&sin6_1->sin6_addr,
2993 &sin6_2->sin6_addr));
2994 } else if (sa1->sa_family == AF_INET) {
2995 /* IPv4 addresses */
2996 const struct sockaddr_in *sin_1, *sin_2;
2997
2998 sin_1 = (const struct sockaddr_in *)sa1;
2999 sin_2 = (const struct sockaddr_in *)sa2;
3000 return (sin_1->sin_addr.s_addr == sin_2->sin_addr.s_addr);
3001 } else {
3002 /* we don't do these... */
3003 return (0);
3004 }
3005 }
3006
3007 void
3008 sctp_print_address(const struct sockaddr *sa)
3009 {
3010 char ip6buf[INET6_ADDRSTRLEN];
3011
3012 if (sa->sa_family == AF_INET6) {
3013 const struct sockaddr_in6 *sin6;
3014 sin6 = (const struct sockaddr_in6 *)sa;
3015 printf("IPv6 address: %s:%d scope:%u\n",
3016 IN6_PRINT(ip6buf, &sin6->sin6_addr), ntohs(sin6->sin6_port),
3017 sin6->sin6_scope_id);
3018 } else if (sa->sa_family == AF_INET) {
3019 const struct sockaddr_in *sin;
3020 sin = (const struct sockaddr_in *)sa;
3021 printf("IPv4 address: %s:%d\n", inet_ntoa(sin->sin_addr),
3022 ntohs(sin->sin_port));
3023 } else {
3024 printf("?\n");
3025 }
3026 }
3027
3028 void
3029 sctp_print_address_pkt(struct ip *iph, struct sctphdr *sh)
3030 {
3031 if (iph->ip_v == IPVERSION) {
3032 struct sockaddr_in lsa, fsa;
3033
3034 memset(&lsa, 0, sizeof(lsa));
3035 lsa.sin_len = sizeof(lsa);
3036 lsa.sin_family = AF_INET;
3037 lsa.sin_addr = iph->ip_src;
3038 lsa.sin_port = sh->src_port;
3039 memset(&fsa, 0, sizeof(fsa));
3040 fsa.sin_len = sizeof(fsa);
3041 fsa.sin_family = AF_INET;
3042 fsa.sin_addr = iph->ip_dst;
3043 fsa.sin_port = sh->dest_port;
3044 printf("src: ");
3045 sctp_print_address((struct sockaddr *)&lsa);
3046 printf("dest: ");
3047 sctp_print_address((struct sockaddr *)&fsa);
3048 } else if (iph->ip_v == (IPV6_VERSION >> 4)) {
3049 struct ip6_hdr *ip6;
3050 struct sockaddr_in6 lsa6, fsa6;
3051
3052 ip6 = (struct ip6_hdr *)iph;
3053 memset(&lsa6, 0, sizeof(lsa6));
3054 lsa6.sin6_len = sizeof(lsa6);
3055 lsa6.sin6_family = AF_INET6;
3056 lsa6.sin6_addr = ip6->ip6_src;
3057 lsa6.sin6_port = sh->src_port;
3058 memset(&fsa6, 0, sizeof(fsa6));
3059 fsa6.sin6_len = sizeof(fsa6);
3060 fsa6.sin6_family = AF_INET6;
3061 fsa6.sin6_addr = ip6->ip6_dst;
3062 fsa6.sin6_port = sh->dest_port;
3063 printf("src: ");
3064 sctp_print_address((struct sockaddr *)&lsa6);
3065 printf("dest: ");
3066 sctp_print_address((struct sockaddr *)&fsa6);
3067 }
3068 }
3069
3070 #if defined(__FreeBSD__) || defined(__APPLE__)
3071
3072 /* cloned from uipc_socket.c */
3073
3074 #define SCTP_SBLINKRECORD(sb, m0) do { \
3075 if ((sb)->sb_lastrecord != NULL) \
3076 (sb)->sb_lastrecord->m_nextpkt = (m0); \
3077 else \
3078 (sb)->sb_mb = (m0); \
3079 (sb)->sb_lastrecord = (m0); \
3080 } while (/*CONSTCOND*/0)
3081 #endif
3082
3083
3084 int
3085 sbappendaddr_nocheck(struct sockbuf *sb, const struct sockaddr *asa,
3086 struct mbuf *m0, struct mbuf *control,
3087 u_int32_t tag, struct sctp_inpcb *inp)
3088 {
3089 #ifdef __NetBSD__
3090 struct mbuf *m, *n;
3091
3092 if (m0 && (m0->m_flags & M_PKTHDR) == 0)
3093 panic("sbappendaddr_nocheck");
3094
3095 m0->m_pkthdr.csum_data = (int)tag;
3096
3097 for (n = control; n; n = n->m_next) {
3098 if (n->m_next == 0) /* keep pointer to last control buf */
3099 break;
3100 }
3101 if (((inp->sctp_flags & SCTP_PCB_FLAGS_TCPTYPE) == 0) ||
3102 ((inp->sctp_flags & SCTP_PCB_FLAGS_IN_TCPPOOL)== 0)) {
3103 MGETHDR(m, M_DONTWAIT, MT_SONAME);
3104 if (m == 0)
3105 return (0);
3106
3107 m->m_len = asa->sa_len;
3108 memcpy(mtod(m, void *), (const void *)asa, asa->sa_len);
3109 } else {
3110 m = NULL;
3111 }
3112 if (n) {
3113 n->m_next = m0; /* concatenate data to control */
3114 }else {
3115 control = m0;
3116 }
3117 if (m)
3118 m->m_next = control;
3119 else
3120 m = control;
3121 m->m_pkthdr.csum_data = tag;
3122
3123 for (n = m; n; n = n->m_next)
3124 sballoc(sb, n);
3125 if ((n = sb->sb_mb) != NULL) {
3126 if ((n->m_nextpkt != inp->sb_last_mpkt) && (n->m_nextpkt == NULL)) {
3127 inp->sb_last_mpkt = NULL;
3128 }
3129 if (inp->sb_last_mpkt)
3130 inp->sb_last_mpkt->m_nextpkt = m;
3131 else {
3132 while (n->m_nextpkt) {
3133 n = n->m_nextpkt;
3134 }
3135 n->m_nextpkt = m;
3136 }
3137 inp->sb_last_mpkt = m;
3138 } else {
3139 inp->sb_last_mpkt = sb->sb_mb = m;
3140 inp->sctp_vtag_first = tag;
3141 }
3142 return (1);
3143 #endif
3144 #if defined(__FreeBSD__) || defined(__APPLE__)
3145 struct mbuf *m, *n, *nlast;
3146 int cnt=0;
3147
3148 if (m0 && (m0->m_flags & M_PKTHDR) == 0)
3149 panic("sbappendaddr_nocheck");
3150
3151 for (n = control; n; n = n->m_next) {
3152 if (n->m_next == 0) /* get pointer to last control buf */
3153 break;
3154 }
3155 if (((inp->sctp_flags & SCTP_PCB_FLAGS_TCPTYPE) == 0) ||
3156 ((inp->sctp_flags & SCTP_PCB_FLAGS_IN_TCPPOOL)== 0)) {
3157 if (asa->sa_len > MHLEN)
3158 return (0);
3159 try_again:
3160 MGETHDR(m, M_DONTWAIT, MT_SONAME);
3161 if (m == 0)
3162 return (0);
3163 m->m_len = 0;
3164 /* safety */
3165 if (m == m0) {
3166 printf("Duplicate mbuf allocated %p in and mget returned %p?\n",
3167 m0, m);
3168 if (cnt) {
3169 panic("more than once");
3170 }
3171 cnt++;
3172 goto try_again;
3173 }
3174 m->m_len = asa->sa_len;
3175 bcopy((void *)asa, mtod(m, void *), asa->sa_len);
3176 }
3177 else {
3178 m = NULL;
3179 }
3180 if (n)
3181 n->m_next = m0; /* concatenate data to control */
3182 else
3183 control = m0;
3184 if (m)
3185 m->m_next = control;
3186 else
3187 m = control;
3188 m->m_pkthdr.csum_data = (int)tag;
3189
3190 for (n = m; n; n = n->m_next)
3191 sballoc(sb, n);
3192 nlast = n;
3193 if (sb->sb_mb == NULL) {
3194 inp->sctp_vtag_first = tag;
3195 }
3196
3197 #ifdef __FREEBSD__
3198 if (sb->sb_mb == NULL)
3199 inp->sctp_vtag_first = tag;
3200 SCTP_SBLINKRECORD(sb, m);
3201 sb->sb_mbtail = nlast;
3202 #else
3203 if ((n = sb->sb_mb) != NULL) {
3204 if ((n->m_nextpkt != inp->sb_last_mpkt) && (n->m_nextpkt == NULL)) {
3205 inp->sb_last_mpkt = NULL;
3206 }
3207 if (inp->sb_last_mpkt)
3208 inp->sb_last_mpkt->m_nextpkt = m;
3209 else {
3210 while (n->m_nextpkt) {
3211 n = n->m_nextpkt;
3212 }
3213 n->m_nextpkt = m;
3214 }
3215 inp->sb_last_mpkt = m;
3216 } else {
3217 inp->sb_last_mpkt = sb->sb_mb = m;
3218 inp->sctp_vtag_first = tag;
3219 }
3220 #endif
3221 return (1);
3222 #endif
3223 #ifdef __OpenBSD__
3224 struct mbuf *m, *n;
3225
3226 if (m0 && (m0->m_flags & M_PKTHDR) == 0)
3227 panic("sbappendaddr_nocheck");
3228 m0->m_pkthdr.csum = (int)tag;
3229 for (n = control; n; n = n->m_next) {
3230 if (n->m_next == 0) /* keep pointer to last control buf */
3231 break;
3232 }
3233 if (((inp->sctp_flags & SCTP_PCB_FLAGS_TCPTYPE) == 0) ||
3234 ((inp->sctp_flags & SCTP_PCB_FLAGS_IN_TCPPOOL)== 0)) {
3235 if (asa->sa_len > MHLEN)
3236 return (0);
3237 MGETHDR(m, M_DONTWAIT, MT_SONAME);
3238 if (m == 0)
3239 return (0);
3240 m->m_len = asa->sa_len;
3241 bcopy((void *)asa, mtod(m, void *), asa->sa_len);
3242 } else {
3243 m = NULL;
3244 }
3245 if (n)
3246 n->m_next = m0; /* concatenate data to control */
3247 else
3248 control = m0;
3249
3250 m->m_pkthdr.csum = (int)tag;
3251 m->m_next = control;
3252 for (n = m; n; n = n->m_next)
3253 sballoc(sb, n);
3254 if ((n = sb->sb_mb) != NULL) {
3255 if ((n->m_nextpkt != inp->sb_last_mpkt) && (n->m_nextpkt == NULL)) {
3256 inp->sb_last_mpkt = NULL;
3257 }
3258 if (inp->sb_last_mpkt)
3259 inp->sb_last_mpkt->m_nextpkt = m;
3260 else {
3261 while (n->m_nextpkt) {
3262 n = n->m_nextpkt;
3263 }
3264 n->m_nextpkt = m;
3265 }
3266 inp->sb_last_mpkt = m;
3267 } else {
3268 inp->sb_last_mpkt = sb->sb_mb = m;
3269 inp->sctp_vtag_first = tag;
3270 }
3271 return (1);
3272 #endif
3273 }
3274
3275 /*************HOLD THIS COMMENT FOR PATCH FILE OF
3276 *************ALTERNATE ROUTING CODE
3277 */
3278
3279 /*************HOLD THIS COMMENT FOR END OF PATCH FILE OF
3280 *************ALTERNATE ROUTING CODE
3281 */
3282
3283 struct mbuf *
3284 sctp_generate_invmanparam(int err)
3285 {
3286 /* Return a MBUF with a invalid mandatory parameter */
3287 struct mbuf *m;
3288
3289 MGET(m, M_DONTWAIT, MT_DATA);
3290 if (m) {
3291 struct sctp_paramhdr *ph;
3292 m->m_len = sizeof(struct sctp_paramhdr);
3293 ph = mtod(m, struct sctp_paramhdr *);
3294 ph->param_length = htons(sizeof(struct sctp_paramhdr));
3295 ph->param_type = htons(err);
3296 }
3297 return (m);
3298 }
3299
3300 static int
3301 sctp_should_be_moved(struct mbuf *this, struct sctp_association *asoc)
3302 {
3303 struct mbuf *m;
3304 /*
3305 * given a mbuf chain, look through it finding
3306 * the M_PKTHDR and return 1 if it belongs to
3307 * the association given. We tell this by
3308 * a kludge where we stuff the my_vtag of the asoc
3309 * into the m->m_pkthdr.csum_data/csum field.
3310 */
3311 m = this;
3312 while (m) {
3313 if (m->m_flags & M_PKTHDR) {
3314 /* check it */
3315 #if defined(__OpenBSD__)
3316 if ((u_int32_t)m->m_pkthdr.csum == asoc->my_vtag)
3317 #else
3318 if ((u_int32_t)m->m_pkthdr.csum_data == asoc->my_vtag)
3319 #endif
3320 {
3321 /* Yep */
3322 return (1);
3323 }
3324 }
3325 m = m->m_next;
3326 }
3327 return (0);
3328 }
3329
3330 u_int32_t
3331 sctp_get_first_vtag_from_sb(struct socket *so)
3332 {
3333 struct mbuf *this, *at;
3334 u_int32_t retval;
3335
3336 retval = 0;
3337 if (so->so_rcv.sb_mb) {
3338 /* grubbing time */
3339 this = so->so_rcv.sb_mb;
3340 while (this) {
3341 at = this;
3342 /* get to the m_pkthdr */
3343 while (at) {
3344 if (at->m_flags & M_PKTHDR)
3345 break;
3346 else {
3347 at = at->m_next;
3348 }
3349 }
3350 /* now do we have a m_pkthdr */
3351 if (at && (at->m_flags & M_PKTHDR)) {
3352 /* check it */
3353 #if defined(__OpenBSD__)
3354 if ((u_int32_t)at->m_pkthdr.csum != 0)
3355 #else
3356 if ((u_int32_t)at->m_pkthdr.csum_data != 0)
3357 #endif
3358 {
3359 /* its the one */
3360 #if defined(__OpenBSD__)
3361 retval = (u_int32_t)at->m_pkthdr.csum;
3362 #else
3363 retval =
3364 (u_int32_t)at->m_pkthdr.csum_data;
3365 #endif
3366 break;
3367 }
3368 }
3369 this = this->m_nextpkt;
3370 }
3371
3372 }
3373 return (retval);
3374
3375 }
3376 void
3377 sctp_grub_through_socket_buffer(struct sctp_inpcb *inp, struct socket *old,
3378 struct socket *new, struct sctp_tcb *stcb)
3379 {
3380 struct mbuf **put, **take, *next, *this;
3381 struct sockbuf *old_sb, *new_sb;
3382 struct sctp_association *asoc;
3383 int moved_top = 0;
3384
3385 asoc = &stcb->asoc;
3386 old_sb = &old->so_rcv;
3387 new_sb = &new->so_rcv;
3388 if (old_sb->sb_mb == NULL) {
3389 /* Nothing to move */
3390 return;
3391 }
3392
3393 if (inp->sctp_vtag_first == asoc->my_vtag) {
3394 /* First one must be moved */
3395 struct mbuf *mm;
3396 for (mm = old_sb->sb_mb; mm; mm = mm->m_next) {
3397 /*
3398 * Go down the chain and fix
3399 * the space allocation of the
3400 * two sockets.
3401 */
3402 sbfree(old_sb, mm);
3403 sballoc(new_sb, mm);
3404 }
3405 new_sb->sb_mb = old_sb->sb_mb;
3406 old_sb->sb_mb = new_sb->sb_mb->m_nextpkt;
3407 new_sb->sb_mb->m_nextpkt = NULL;
3408 put = &new_sb->sb_mb->m_nextpkt;
3409 moved_top = 1;
3410 } else {
3411 put = &new_sb->sb_mb;
3412 }
3413
3414 take = &old_sb->sb_mb;
3415 next = old_sb->sb_mb;
3416 while (next) {
3417 this = next;
3418 /* postion for next one */
3419 next = this->m_nextpkt;
3420 /* check the tag of this packet */
3421 if (sctp_should_be_moved(this, asoc)) {
3422 /* yes this needs to be moved */
3423 struct mbuf *mm;
3424 *take = this->m_nextpkt;
3425 this->m_nextpkt = NULL;
3426 *put = this;
3427 for (mm = this; mm; mm = mm->m_next) {
3428 /*
3429 * Go down the chain and fix
3430 * the space allocation of the
3431 * two sockets.
3432 */
3433 sbfree(old_sb, mm);
3434 sballoc(new_sb, mm);
3435 }
3436 put = &this->m_nextpkt;
3437
3438 } else {
3439 /* no advance our take point. */
3440 take = &this->m_nextpkt;
3441 }
3442 }
3443 if (moved_top) {
3444 /*
3445 * Ok so now we must re-postion vtag_first to
3446 * match the new first one since we moved the
3447 * mbuf at the top.
3448 */
3449 inp->sctp_vtag_first = sctp_get_first_vtag_from_sb(old);
3450 }
3451 }
3452
3453 void
3454 sctp_free_bufspace(struct sctp_tcb *stcb, struct sctp_association *asoc,
3455 struct sctp_tmit_chunk *tp1)
3456 {
3457 if (tp1->data == NULL) {
3458 return;
3459 }
3460 #ifdef SCTP_MBCNT_LOGGING
3461 sctp_log_mbcnt(SCTP_LOG_MBCNT_DECREASE,
3462 asoc->total_output_queue_size,
3463 tp1->book_size,
3464 asoc->total_output_mbuf_queue_size,
3465 tp1->mbcnt);
3466 #endif
3467 if (asoc->total_output_queue_size >= tp1->book_size) {
3468 asoc->total_output_queue_size -= tp1->book_size;
3469 } else {
3470 asoc->total_output_queue_size = 0;
3471 }
3472
3473 /* Now free the mbuf */
3474 if (asoc->total_output_mbuf_queue_size >= tp1->mbcnt) {
3475 asoc->total_output_mbuf_queue_size -= tp1->mbcnt;
3476 } else {
3477 asoc->total_output_mbuf_queue_size = 0;
3478 }
3479 if ((stcb->sctp_ep->sctp_flags & SCTP_PCB_FLAGS_TCPTYPE) ||
3480 (stcb->sctp_ep->sctp_flags & SCTP_PCB_FLAGS_IN_TCPPOOL)) {
3481 if (stcb->sctp_socket->so_snd.sb_cc >= tp1->book_size) {
3482 stcb->sctp_socket->so_snd.sb_cc -= tp1->book_size;
3483 } else {
3484 stcb->sctp_socket->so_snd.sb_cc = 0;
3485
3486 }
3487 if (stcb->sctp_socket->so_snd.sb_mbcnt >= tp1->mbcnt) {
3488 stcb->sctp_socket->so_snd.sb_mbcnt -= tp1->mbcnt;
3489 } else {
3490 stcb->sctp_socket->so_snd.sb_mbcnt = 0;
3491 }
3492 }
3493 }
3494
3495 int
3496 sctp_release_pr_sctp_chunk(struct sctp_tcb *stcb, struct sctp_tmit_chunk *tp1,
3497 int reason, struct sctpchunk_listhead *queue)
3498 {
3499 int ret_sz = 0;
3500 int notdone;
3501 uint8_t foundeom = 0;
3502
3503 do {
3504 ret_sz += tp1->book_size;
3505 tp1->sent = SCTP_FORWARD_TSN_SKIP;
3506 if (tp1->data) {
3507 sctp_free_bufspace(stcb, &stcb->asoc, tp1);
3508 sctp_ulp_notify(SCTP_NOTIFY_DG_FAIL, stcb, reason, tp1);
3509 sctp_m_freem(tp1->data);
3510 tp1->data = NULL;
3511 sctp_sowwakeup(stcb->sctp_ep, stcb->sctp_socket);
3512 }
3513 if (tp1->flags & SCTP_PR_SCTP_BUFFER) {
3514 stcb->asoc.sent_queue_cnt_removeable--;
3515 }
3516 if (queue == &stcb->asoc.send_queue) {
3517 TAILQ_REMOVE(&stcb->asoc.send_queue, tp1, sctp_next);
3518 /* on to the sent queue */
3519 TAILQ_INSERT_TAIL(&stcb->asoc.sent_queue, tp1,
3520 sctp_next);
3521 stcb->asoc.sent_queue_cnt++;
3522 }
3523 if ((tp1->rec.data.rcv_flags & SCTP_DATA_NOT_FRAG) ==
3524 SCTP_DATA_NOT_FRAG) {
3525 /* not frag'ed we ae done */
3526 notdone = 0;
3527 foundeom = 1;
3528 } else if (tp1->rec.data.rcv_flags & SCTP_DATA_LAST_FRAG) {
3529 /* end of frag, we are done */
3530 notdone = 0;
3531 foundeom = 1;
3532 } else {
3533 /* Its a begin or middle piece, we must mark all of it */
3534 notdone = 1;
3535 tp1 = TAILQ_NEXT(tp1, sctp_next);
3536 }
3537 } while (tp1 && notdone);
3538 if ((foundeom == 0) && (queue == &stcb->asoc.sent_queue)) {
3539 /*
3540 * The multi-part message was scattered
3541 * across the send and sent queue.
3542 */
3543 tp1 = TAILQ_FIRST(&stcb->asoc.send_queue);
3544 /*
3545 * recurse throught the send_queue too, starting at the
3546 * beginning.
3547 */
3548 if (tp1) {
3549 ret_sz += sctp_release_pr_sctp_chunk(stcb, tp1, reason,
3550 &stcb->asoc.send_queue);
3551 } else {
3552 printf("hmm, nothing on the send queue and no EOM?\n");
3553 }
3554 }
3555 return (ret_sz);
3556 }
3557
3558 /*
3559 * checks to see if the given address, sa, is one that is currently
3560 * known by the kernel
3561 * note: can't distinguish the same address on multiple interfaces and
3562 * doesn't handle multiple addresses with different zone/scope id's
3563 * note: ifa_ifwithaddr() compares the entire sockaddr struct
3564 */
3565 struct ifaddr *
3566 sctp_find_ifa_by_addr(struct sockaddr *sa)
3567 {
3568 struct ifnet *ifn;
3569 struct ifaddr *ifa;
3570 int s;
3571
3572 /* go through all our known interfaces */
3573 s = pserialize_read_enter();
3574 IFNET_READER_FOREACH(ifn) {
3575 /* go through each interface addresses */
3576 IFADDR_READER_FOREACH(ifa, ifn) {
3577 /* correct family? */
3578 if (ifa->ifa_addr->sa_family != sa->sa_family)
3579 continue;
3580
3581 #ifdef INET6
3582 if (ifa->ifa_addr->sa_family == AF_INET6) {
3583 /* IPv6 address */
3584 struct sockaddr_in6 *sin1, *sin2, sin6_tmp;
3585 sin1 = (struct sockaddr_in6 *)ifa->ifa_addr;
3586 if (IN6_IS_SCOPE_LINKLOCAL(&sin1->sin6_addr)) {
3587 /* create a copy and clear scope */
3588 memcpy(&sin6_tmp, sin1,
3589 sizeof(struct sockaddr_in6));
3590 sin1 = &sin6_tmp;
3591 in6_clearscope(&sin1->sin6_addr);
3592 }
3593 sin2 = (struct sockaddr_in6 *)sa;
3594 if (memcmp(&sin1->sin6_addr, &sin2->sin6_addr,
3595 sizeof(struct in6_addr)) == 0) {
3596 /* found it */
3597 pserialize_read_exit(s);
3598 return (ifa);
3599 }
3600 } else
3601 #endif
3602 if (ifa->ifa_addr->sa_family == AF_INET) {
3603 /* IPv4 address */
3604 struct sockaddr_in *sin1, *sin2;
3605 sin1 = (struct sockaddr_in *)ifa->ifa_addr;
3606 sin2 = (struct sockaddr_in *)sa;
3607 if (sin1->sin_addr.s_addr ==
3608 sin2->sin_addr.s_addr) {
3609 /* found it */
3610 pserialize_read_exit(s);
3611 return (ifa);
3612 }
3613 }
3614 /* else, not AF_INET or AF_INET6, so skip */
3615 } /* end foreach ifa */
3616 } /* end foreach ifn */
3617 pserialize_read_exit(s);
3618
3619 /* not found! */
3620 return (NULL);
3621 }
3622
3623
3624 #ifdef __APPLE__
3625 /*
3626 * here we hack in a fix for Apple's m_copym for the case where the first mbuf
3627 * in the chain is a M_PKTHDR and the length is zero
3628 */
3629 static void
3630 sctp_pkthdr_fix(struct mbuf *m)
3631 {
3632 struct mbuf *m_nxt;
3633
3634 if ((m->m_flags & M_PKTHDR) == 0) {
3635 /* not a PKTHDR */
3636 return;
3637 }
3638
3639 if (m->m_len != 0) {
3640 /* not a zero length PKTHDR mbuf */
3641 return;
3642 }
3643
3644 /* let's move in a word into the first mbuf... yes, ugly! */
3645 m_nxt = m->m_next;
3646 if (m_nxt == NULL) {
3647 /* umm... not a very useful mbuf chain... */
3648 return;
3649 }
3650 if ((size_t)m_nxt->m_len > sizeof(long)) {
3651 /* move over a long */
3652 bcopy(mtod(m_nxt, void *), mtod(m, void *), sizeof(long));
3653 /* update mbuf data pointers and lengths */
3654 m->m_len += sizeof(long);
3655 m_nxt->m_data += sizeof(long);
3656 m_nxt->m_len -= sizeof(long);
3657 }
3658 }
3659
3660 inline struct mbuf *
3661 sctp_m_copym(struct mbuf *m, int off, int len, int wait)
3662 {
3663 sctp_pkthdr_fix(m);
3664 return (m_copym(m, off, len, wait));
3665 }
3666 #endif /* __APPLE__ */
3667