sctputil.c revision 1.14 1 /* $KAME: sctputil.c,v 1.39 2005/06/16 20:54:06 jinmei Exp $ */
2 /* $NetBSD: sctputil.c,v 1.14 2018/11/08 06:34:40 msaitoh Exp $ */
3
4 /*
5 * Copyright (c) 2001, 2002, 2003, 2004 Cisco Systems, Inc.
6 * All rights reserved.
7 *
8 * Redistribution and use in source and binary forms, with or without
9 * modification, are permitted provided that the following conditions
10 * are met:
11 * 1. Redistributions of source code must retain the above copyright
12 * notice, this list of conditions and the following disclaimer.
13 * 2. Redistributions in binary form must reproduce the above copyright
14 * notice, this list of conditions and the following disclaimer in the
15 * documentation and/or other materials provided with the distribution.
16 * 3. All advertising materials mentioning features or use of this software
17 * must display the following acknowledgement:
18 * This product includes software developed by Cisco Systems, Inc.
19 * 4. Neither the name of the project nor the names of its contributors
20 * may be used to endorse or promote products derived from this software
21 * without specific prior written permission.
22 *
23 * THIS SOFTWARE IS PROVIDED BY CISCO SYSTEMS AND CONTRIBUTORS ``AS IS'' AND
24 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
25 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
26 * ARE DISCLAIMED. IN NO EVENT SHALL CISCO SYSTEMS OR CONTRIBUTORS BE LIABLE
27 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
28 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
29 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
30 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
31 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
32 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
33 * SUCH DAMAGE.
34 */
35
36 #include <sys/cdefs.h>
37 __KERNEL_RCSID(0, "$NetBSD: sctputil.c,v 1.14 2018/11/08 06:34:40 msaitoh Exp $");
38
39 #ifdef _KERNEL_OPT
40 #include "opt_inet.h"
41 #include "opt_ipsec.h"
42 #include "opt_sctp.h"
43 #endif /* _KERNEL_OPT */
44
45 #include <sys/param.h>
46 #include <sys/systm.h>
47 #include <sys/malloc.h>
48 #include <sys/mbuf.h>
49 #include <sys/domain.h>
50 #include <sys/protosw.h>
51 #include <sys/socket.h>
52 #include <sys/socketvar.h>
53 #include <sys/mutex.h>
54 #include <sys/proc.h>
55 #include <sys/kernel.h>
56 #include <sys/sysctl.h>
57
58 #include <sys/callout.h>
59
60 #include <net/route.h>
61
62 #ifdef INET6
63 #include <sys/domain.h>
64 #endif
65
66 #include <machine/limits.h>
67
68 #include <net/if.h>
69 #include <net/if_types.h>
70 #include <net/route.h>
71
72 #include <netinet/in.h>
73 #include <netinet/in_systm.h>
74 #include <netinet/ip.h>
75 #include <netinet/in_pcb.h>
76 #include <netinet/in_var.h>
77 #include <netinet/ip_var.h>
78
79 #ifdef INET6
80 #include <netinet/ip6.h>
81 #include <netinet6/ip6_var.h>
82 #include <netinet6/scope6_var.h>
83 #include <netinet6/in6_pcb.h>
84
85 #endif /* INET6 */
86
87 #include <netinet/sctp_pcb.h>
88
89 #ifdef IPSEC
90 #include <netipsec/ipsec.h>
91 #include <netipsec/key.h>
92 #endif /* IPSEC */
93
94 #include <netinet/sctputil.h>
95 #include <netinet/sctp_var.h>
96 #ifdef INET6
97 #include <netinet6/sctp6_var.h>
98 #endif
99 #include <netinet/sctp_header.h>
100 #include <netinet/sctp_output.h>
101 #include <netinet/sctp_hashdriver.h>
102 #include <netinet/sctp_uio.h>
103 #include <netinet/sctp_timer.h>
104 #include <netinet/sctp_crc32.h>
105 #include <netinet/sctp_indata.h> /* for sctp_deliver_data() */
106 #define NUMBER_OF_MTU_SIZES 18
107
108 #ifdef SCTP_DEBUG
109 extern u_int32_t sctp_debug_on;
110 #endif
111
112 #ifdef SCTP_STAT_LOGGING
113 int sctp_cwnd_log_at=0;
114 int sctp_cwnd_log_rolled=0;
115 struct sctp_cwnd_log sctp_clog[SCTP_STAT_LOG_SIZE];
116
117 void sctp_clr_stat_log(void)
118 {
119 sctp_cwnd_log_at=0;
120 sctp_cwnd_log_rolled=0;
121 }
122
123 void
124 sctp_log_strm_del_alt(u_int32_t tsn, u_int16_t sseq, int from)
125 {
126
127 sctp_clog[sctp_cwnd_log_at].from = (u_int8_t)from;
128 sctp_clog[sctp_cwnd_log_at].event_type = (u_int8_t)SCTP_LOG_EVENT_STRM;
129 sctp_clog[sctp_cwnd_log_at].x.strlog.n_tsn = tsn;
130 sctp_clog[sctp_cwnd_log_at].x.strlog.n_sseq = sseq;
131 sctp_clog[sctp_cwnd_log_at].x.strlog.e_tsn = 0;
132 sctp_clog[sctp_cwnd_log_at].x.strlog.e_sseq = 0;
133 sctp_cwnd_log_at++;
134 if (sctp_cwnd_log_at >= SCTP_STAT_LOG_SIZE) {
135 sctp_cwnd_log_at = 0;
136 sctp_cwnd_log_rolled = 1;
137 }
138
139 }
140
141 void
142 sctp_log_map(uint32_t map, uint32_t cum, uint32_t high, int from)
143 {
144
145 sctp_clog[sctp_cwnd_log_at].from = (u_int8_t)from;
146 sctp_clog[sctp_cwnd_log_at].event_type = (u_int8_t)SCTP_LOG_EVENT_MAP;
147 sctp_clog[sctp_cwnd_log_at].x.map.base = map;
148 sctp_clog[sctp_cwnd_log_at].x.map.cum = cum;
149 sctp_clog[sctp_cwnd_log_at].x.map.high = high;
150 sctp_cwnd_log_at++;
151 if (sctp_cwnd_log_at >= SCTP_STAT_LOG_SIZE) {
152 sctp_cwnd_log_at = 0;
153 sctp_cwnd_log_rolled = 1;
154 }
155 }
156
157 void
158 sctp_log_fr(uint32_t biggest_tsn, uint32_t biggest_new_tsn, uint32_t tsn,
159 int from)
160 {
161
162 sctp_clog[sctp_cwnd_log_at].from = (u_int8_t)from;
163 sctp_clog[sctp_cwnd_log_at].event_type = (u_int8_t)SCTP_LOG_EVENT_FR;
164 sctp_clog[sctp_cwnd_log_at].x.fr.largest_tsn = biggest_tsn;
165 sctp_clog[sctp_cwnd_log_at].x.fr.largest_new_tsn = biggest_new_tsn;
166 sctp_clog[sctp_cwnd_log_at].x.fr.tsn = tsn;
167 sctp_cwnd_log_at++;
168 if (sctp_cwnd_log_at >= SCTP_STAT_LOG_SIZE) {
169 sctp_cwnd_log_at = 0;
170 sctp_cwnd_log_rolled = 1;
171 }
172 }
173
174 void
175 sctp_log_strm_del(struct sctp_tmit_chunk *chk, struct sctp_tmit_chunk *poschk,
176 int from)
177 {
178
179 if (chk == NULL) {
180 printf("Gak log of NULL?\n");
181 return;
182 }
183 sctp_clog[sctp_cwnd_log_at].from = (u_int8_t)from;
184 sctp_clog[sctp_cwnd_log_at].event_type = (u_int8_t)SCTP_LOG_EVENT_STRM;
185 sctp_clog[sctp_cwnd_log_at].x.strlog.n_tsn = chk->rec.data.TSN_seq;
186 sctp_clog[sctp_cwnd_log_at].x.strlog.n_sseq = chk->rec.data.stream_seq;
187 if (poschk != NULL) {
188 sctp_clog[sctp_cwnd_log_at].x.strlog.e_tsn =
189 poschk->rec.data.TSN_seq;
190 sctp_clog[sctp_cwnd_log_at].x.strlog.e_sseq =
191 poschk->rec.data.stream_seq;
192 } else {
193 sctp_clog[sctp_cwnd_log_at].x.strlog.e_tsn = 0;
194 sctp_clog[sctp_cwnd_log_at].x.strlog.e_sseq = 0;
195 }
196 sctp_cwnd_log_at++;
197 if (sctp_cwnd_log_at >= SCTP_STAT_LOG_SIZE) {
198 sctp_cwnd_log_at = 0;
199 sctp_cwnd_log_rolled = 1;
200 }
201 }
202
203 void
204 sctp_log_cwnd(struct sctp_nets *net, int augment, uint8_t from)
205 {
206
207 sctp_clog[sctp_cwnd_log_at].from = (u_int8_t)from;
208 sctp_clog[sctp_cwnd_log_at].event_type = (u_int8_t)SCTP_LOG_EVENT_CWND;
209 sctp_clog[sctp_cwnd_log_at].x.cwnd.net = net;
210 sctp_clog[sctp_cwnd_log_at].x.cwnd.cwnd_new_value = net->cwnd;
211 sctp_clog[sctp_cwnd_log_at].x.cwnd.inflight = net->flight_size;
212 sctp_clog[sctp_cwnd_log_at].x.cwnd.cwnd_augment = augment;
213 sctp_cwnd_log_at++;
214 if (sctp_cwnd_log_at >= SCTP_STAT_LOG_SIZE) {
215 sctp_cwnd_log_at = 0;
216 sctp_cwnd_log_rolled = 1;
217 }
218 }
219
220 void
221 sctp_log_maxburst(struct sctp_nets *net, int error, int burst, uint8_t from)
222 {
223 sctp_clog[sctp_cwnd_log_at].from = (u_int8_t)from;
224 sctp_clog[sctp_cwnd_log_at].event_type = (u_int8_t)SCTP_LOG_EVENT_MAXBURST;
225 sctp_clog[sctp_cwnd_log_at].x.cwnd.net = net;
226 sctp_clog[sctp_cwnd_log_at].x.cwnd.cwnd_new_value = error;
227 sctp_clog[sctp_cwnd_log_at].x.cwnd.inflight = net->flight_size;
228 sctp_clog[sctp_cwnd_log_at].x.cwnd.cwnd_augment = burst;
229 sctp_cwnd_log_at++;
230 if (sctp_cwnd_log_at >= SCTP_STAT_LOG_SIZE) {
231 sctp_cwnd_log_at = 0;
232 sctp_cwnd_log_rolled = 1;
233 }
234 }
235
236 void
237 sctp_log_rwnd(uint8_t from, u_int32_t peers_rwnd , u_int32_t snd_size, u_int32_t overhead)
238 {
239 sctp_clog[sctp_cwnd_log_at].from = (u_int8_t)from;
240 sctp_clog[sctp_cwnd_log_at].event_type = (u_int8_t)SCTP_LOG_EVENT_RWND;
241 sctp_clog[sctp_cwnd_log_at].x.rwnd.rwnd = peers_rwnd;
242 sctp_clog[sctp_cwnd_log_at].x.rwnd.send_size = snd_size;
243 sctp_clog[sctp_cwnd_log_at].x.rwnd.overhead = overhead;
244 sctp_clog[sctp_cwnd_log_at].x.rwnd.new_rwnd = 0;
245 sctp_cwnd_log_at++;
246 if (sctp_cwnd_log_at >= SCTP_STAT_LOG_SIZE) {
247 sctp_cwnd_log_at = 0;
248 sctp_cwnd_log_rolled = 1;
249 }
250 }
251
252 void
253 sctp_log_rwnd_set(uint8_t from, u_int32_t peers_rwnd , u_int32_t flight_size, u_int32_t overhead, u_int32_t a_rwndval)
254 {
255 sctp_clog[sctp_cwnd_log_at].from = (u_int8_t)from;
256 sctp_clog[sctp_cwnd_log_at].event_type = (u_int8_t)SCTP_LOG_EVENT_RWND;
257 sctp_clog[sctp_cwnd_log_at].x.rwnd.rwnd = peers_rwnd;
258 sctp_clog[sctp_cwnd_log_at].x.rwnd.send_size = flight_size;
259 sctp_clog[sctp_cwnd_log_at].x.rwnd.overhead = overhead;
260 sctp_clog[sctp_cwnd_log_at].x.rwnd.new_rwnd = a_rwndval;
261 sctp_cwnd_log_at++;
262 if (sctp_cwnd_log_at >= SCTP_STAT_LOG_SIZE) {
263 sctp_cwnd_log_at = 0;
264 sctp_cwnd_log_rolled = 1;
265 }
266 }
267
268 void
269 sctp_log_mbcnt(uint8_t from, u_int32_t total_oq , u_int32_t book, u_int32_t total_mbcnt_q, u_int32_t mbcnt)
270 {
271 sctp_clog[sctp_cwnd_log_at].from = (u_int8_t)from;
272 sctp_clog[sctp_cwnd_log_at].event_type = (u_int8_t)SCTP_LOG_EVENT_MBCNT;
273 sctp_clog[sctp_cwnd_log_at].x.mbcnt.total_queue_size = total_oq;
274 sctp_clog[sctp_cwnd_log_at].x.mbcnt.size_change = book;
275 sctp_clog[sctp_cwnd_log_at].x.mbcnt.total_queue_mb_size = total_mbcnt_q;
276 sctp_clog[sctp_cwnd_log_at].x.mbcnt.mbcnt_change = mbcnt;
277 sctp_cwnd_log_at++;
278 if (sctp_cwnd_log_at >= SCTP_STAT_LOG_SIZE) {
279 sctp_cwnd_log_at = 0;
280 sctp_cwnd_log_rolled = 1;
281 }
282 }
283
284 void
285 sctp_log_block(uint8_t from, struct socket *so, struct sctp_association *asoc)
286 {
287
288 sctp_clog[sctp_cwnd_log_at].from = (u_int8_t)from;
289 sctp_clog[sctp_cwnd_log_at].event_type = (u_int8_t)SCTP_LOG_EVENT_BLOCK;
290 sctp_clog[sctp_cwnd_log_at].x.blk.maxmb = (u_int16_t)(so->so_snd.sb_mbmax/1024);
291 sctp_clog[sctp_cwnd_log_at].x.blk.onmb = asoc->total_output_mbuf_queue_size;
292 sctp_clog[sctp_cwnd_log_at].x.blk.maxsb = (u_int16_t)(so->so_snd.sb_hiwat/1024);
293 sctp_clog[sctp_cwnd_log_at].x.blk.onsb = asoc->total_output_queue_size;
294 sctp_clog[sctp_cwnd_log_at].x.blk.send_sent_qcnt = (u_int16_t)(asoc->send_queue_cnt + asoc->sent_queue_cnt);
295 sctp_clog[sctp_cwnd_log_at].x.blk.stream_qcnt = (u_int16_t)asoc->stream_queue_cnt;
296 sctp_cwnd_log_at++;
297 if (sctp_cwnd_log_at >= SCTP_STAT_LOG_SIZE) {
298 sctp_cwnd_log_at = 0;
299 sctp_cwnd_log_rolled = 1;
300 }
301 }
302
303 int
304 sctp_fill_stat_log(struct mbuf *m)
305 {
306 struct sctp_cwnd_log_req *req;
307 int size_limit, num, i, at, cnt_out=0;
308
309 if (m == NULL)
310 return (EINVAL);
311
312 size_limit = (m->m_len - sizeof(struct sctp_cwnd_log_req));
313 if (size_limit < sizeof(struct sctp_cwnd_log)) {
314 return (EINVAL);
315 }
316 req = mtod(m, struct sctp_cwnd_log_req *);
317 num = size_limit/sizeof(struct sctp_cwnd_log);
318 if (sctp_cwnd_log_rolled) {
319 req->num_in_log = SCTP_STAT_LOG_SIZE;
320 } else {
321 req->num_in_log = sctp_cwnd_log_at;
322 /* if the log has not rolled, we don't
323 * let you have old data.
324 */
325 if (req->end_at > sctp_cwnd_log_at) {
326 req->end_at = sctp_cwnd_log_at;
327 }
328 }
329 if ((num < SCTP_STAT_LOG_SIZE) &&
330 ((sctp_cwnd_log_rolled) || (sctp_cwnd_log_at > num))) {
331 /* we can't return all of it */
332 if (((req->start_at == 0) && (req->end_at == 0)) ||
333 (req->start_at >= SCTP_STAT_LOG_SIZE) ||
334 (req->end_at >= SCTP_STAT_LOG_SIZE)) {
335 /* No user request or user is wacked. */
336 req->num_ret = num;
337 req->end_at = sctp_cwnd_log_at - 1;
338 if ((sctp_cwnd_log_at - num) < 0) {
339 int cc;
340 cc = num - sctp_cwnd_log_at;
341 req->start_at = SCTP_STAT_LOG_SIZE - cc;
342 } else {
343 req->start_at = sctp_cwnd_log_at - num;
344 }
345 } else {
346 /* a user request */
347 int cc;
348 if (req->start_at > req->end_at) {
349 cc = (SCTP_STAT_LOG_SIZE - req->start_at) +
350 (req->end_at + 1);
351 } else {
352
353 cc = req->end_at - req->start_at;
354 }
355 if (cc < num) {
356 num = cc;
357 }
358 req->num_ret = num;
359 }
360 } else {
361 /* We can return all of it */
362 req->start_at = 0;
363 req->end_at = sctp_cwnd_log_at - 1;
364 req->num_ret = sctp_cwnd_log_at;
365 }
366 for (i = 0, at = req->start_at; i < req->num_ret; i++) {
367 req->log[i] = sctp_clog[at];
368 cnt_out++;
369 at++;
370 if (at >= SCTP_STAT_LOG_SIZE)
371 at = 0;
372 }
373 m->m_len = (cnt_out * sizeof(struct sctp_cwnd_log_req)) + sizeof(struct sctp_cwnd_log_req);
374 return (0);
375 }
376
377 #endif
378
379 #ifdef SCTP_AUDITING_ENABLED
380 u_int8_t sctp_audit_data[SCTP_AUDIT_SIZE][2];
381 static int sctp_audit_indx = 0;
382
383 static
384 void sctp_print_audit_report(void)
385 {
386 int i;
387 int cnt;
388 cnt = 0;
389 for (i=sctp_audit_indx;i<SCTP_AUDIT_SIZE;i++) {
390 if ((sctp_audit_data[i][0] == 0xe0) &&
391 (sctp_audit_data[i][1] == 0x01)) {
392 cnt = 0;
393 printf("\n");
394 } else if (sctp_audit_data[i][0] == 0xf0) {
395 cnt = 0;
396 printf("\n");
397 } else if ((sctp_audit_data[i][0] == 0xc0) &&
398 (sctp_audit_data[i][1] == 0x01)) {
399 printf("\n");
400 cnt = 0;
401 }
402 printf("%2.2x%2.2x ", (uint32_t)sctp_audit_data[i][0],
403 (uint32_t)sctp_audit_data[i][1]);
404 cnt++;
405 if ((cnt % 14) == 0)
406 printf("\n");
407 }
408 for (i=0;i<sctp_audit_indx;i++) {
409 if ((sctp_audit_data[i][0] == 0xe0) &&
410 (sctp_audit_data[i][1] == 0x01)) {
411 cnt = 0;
412 printf("\n");
413 } else if (sctp_audit_data[i][0] == 0xf0) {
414 cnt = 0;
415 printf("\n");
416 } else if ((sctp_audit_data[i][0] == 0xc0) &&
417 (sctp_audit_data[i][1] == 0x01)) {
418 printf("\n");
419 cnt = 0;
420 }
421 printf("%2.2x%2.2x ", (uint32_t)sctp_audit_data[i][0],
422 (uint32_t)sctp_audit_data[i][1]);
423 cnt++;
424 if ((cnt % 14) == 0)
425 printf("\n");
426 }
427 printf("\n");
428 }
429
430 void sctp_auditing(int from, struct sctp_inpcb *inp, struct sctp_tcb *stcb,
431 struct sctp_nets *net)
432 {
433 int resend_cnt, tot_out, rep, tot_book_cnt;
434 struct sctp_nets *lnet;
435 struct sctp_tmit_chunk *chk;
436
437 sctp_audit_data[sctp_audit_indx][0] = 0xAA;
438 sctp_audit_data[sctp_audit_indx][1] = 0x000000ff & from;
439 sctp_audit_indx++;
440 if (sctp_audit_indx >= SCTP_AUDIT_SIZE) {
441 sctp_audit_indx = 0;
442 }
443 if (inp == NULL) {
444 sctp_audit_data[sctp_audit_indx][0] = 0xAF;
445 sctp_audit_data[sctp_audit_indx][1] = 0x01;
446 sctp_audit_indx++;
447 if (sctp_audit_indx >= SCTP_AUDIT_SIZE) {
448 sctp_audit_indx = 0;
449 }
450 return;
451 }
452 if (stcb == NULL) {
453 sctp_audit_data[sctp_audit_indx][0] = 0xAF;
454 sctp_audit_data[sctp_audit_indx][1] = 0x02;
455 sctp_audit_indx++;
456 if (sctp_audit_indx >= SCTP_AUDIT_SIZE) {
457 sctp_audit_indx = 0;
458 }
459 return;
460 }
461 sctp_audit_data[sctp_audit_indx][0] = 0xA1;
462 sctp_audit_data[sctp_audit_indx][1] =
463 (0x000000ff & stcb->asoc.sent_queue_retran_cnt);
464 sctp_audit_indx++;
465 if (sctp_audit_indx >= SCTP_AUDIT_SIZE) {
466 sctp_audit_indx = 0;
467 }
468 rep = 0;
469 tot_book_cnt = 0;
470 resend_cnt = tot_out = 0;
471 TAILQ_FOREACH(chk, &stcb->asoc.sent_queue, sctp_next) {
472 if (chk->sent == SCTP_DATAGRAM_RESEND) {
473 resend_cnt++;
474 } else if (chk->sent < SCTP_DATAGRAM_RESEND) {
475 tot_out += chk->book_size;
476 tot_book_cnt++;
477 }
478 }
479 if (resend_cnt != stcb->asoc.sent_queue_retran_cnt) {
480 sctp_audit_data[sctp_audit_indx][0] = 0xAF;
481 sctp_audit_data[sctp_audit_indx][1] = 0xA1;
482 sctp_audit_indx++;
483 if (sctp_audit_indx >= SCTP_AUDIT_SIZE) {
484 sctp_audit_indx = 0;
485 }
486 printf("resend_cnt:%d asoc-tot:%d\n",
487 resend_cnt, stcb->asoc.sent_queue_retran_cnt);
488 rep = 1;
489 stcb->asoc.sent_queue_retran_cnt = resend_cnt;
490 sctp_audit_data[sctp_audit_indx][0] = 0xA2;
491 sctp_audit_data[sctp_audit_indx][1] =
492 (0x000000ff & stcb->asoc.sent_queue_retran_cnt);
493 sctp_audit_indx++;
494 if (sctp_audit_indx >= SCTP_AUDIT_SIZE) {
495 sctp_audit_indx = 0;
496 }
497 }
498 if (tot_out != stcb->asoc.total_flight) {
499 sctp_audit_data[sctp_audit_indx][0] = 0xAF;
500 sctp_audit_data[sctp_audit_indx][1] = 0xA2;
501 sctp_audit_indx++;
502 if (sctp_audit_indx >= SCTP_AUDIT_SIZE) {
503 sctp_audit_indx = 0;
504 }
505 rep = 1;
506 printf("tot_flt:%d asoc_tot:%d\n", tot_out,
507 (int)stcb->asoc.total_flight);
508 stcb->asoc.total_flight = tot_out;
509 }
510 if (tot_book_cnt != stcb->asoc.total_flight_count) {
511 sctp_audit_data[sctp_audit_indx][0] = 0xAF;
512 sctp_audit_data[sctp_audit_indx][1] = 0xA5;
513 sctp_audit_indx++;
514 if (sctp_audit_indx >= SCTP_AUDIT_SIZE) {
515 sctp_audit_indx = 0;
516 }
517 rep = 1;
518 printf("tot_flt_book:%d\n", tot_book);
519
520 stcb->asoc.total_flight_count = tot_book_cnt;
521 }
522 tot_out = 0;
523 TAILQ_FOREACH(lnet, &stcb->asoc.nets, sctp_next) {
524 tot_out += lnet->flight_size;
525 }
526 if (tot_out != stcb->asoc.total_flight) {
527 sctp_audit_data[sctp_audit_indx][0] = 0xAF;
528 sctp_audit_data[sctp_audit_indx][1] = 0xA3;
529 sctp_audit_indx++;
530 if (sctp_audit_indx >= SCTP_AUDIT_SIZE) {
531 sctp_audit_indx = 0;
532 }
533 rep = 1;
534 printf("real flight:%d net total was %d\n",
535 stcb->asoc.total_flight, tot_out);
536 /* now corrective action */
537 TAILQ_FOREACH(lnet, &stcb->asoc.nets, sctp_next) {
538 tot_out = 0;
539 TAILQ_FOREACH(chk, &stcb->asoc.sent_queue, sctp_next) {
540 if ((chk->whoTo == lnet) &&
541 (chk->sent < SCTP_DATAGRAM_RESEND)) {
542 tot_out += chk->book_size;
543 }
544 }
545 if (lnet->flight_size != tot_out) {
546 printf("net:%x flight was %d corrected to %d\n",
547 (uint32_t)lnet, lnet->flight_size, tot_out);
548 lnet->flight_size = tot_out;
549 }
550
551 }
552 }
553
554 if (rep) {
555 sctp_print_audit_report();
556 }
557 }
558
559 void
560 sctp_audit_log(u_int8_t ev, u_int8_t fd)
561 {
562 sctp_audit_data[sctp_audit_indx][0] = ev;
563 sctp_audit_data[sctp_audit_indx][1] = fd;
564 sctp_audit_indx++;
565 if (sctp_audit_indx >= SCTP_AUDIT_SIZE) {
566 sctp_audit_indx = 0;
567 }
568 }
569
570 #endif
571
572 /*
573 * a list of sizes based on typical mtu's, used only if next hop
574 * size not returned.
575 */
576 static int sctp_mtu_sizes[] = {
577 68,
578 296,
579 508,
580 512,
581 544,
582 576,
583 1006,
584 1492,
585 1500,
586 1536,
587 2002,
588 2048,
589 4352,
590 4464,
591 8166,
592 17914,
593 32000,
594 65535
595 };
596
597 int
598 find_next_best_mtu(int totsz)
599 {
600 int i, perfer;
601 /*
602 * if we are in here we must find the next best fit based on the
603 * size of the dg that failed to be sent.
604 */
605 perfer = 0;
606 for (i = 0; i < NUMBER_OF_MTU_SIZES; i++) {
607 if (totsz < sctp_mtu_sizes[i]) {
608 perfer = i - 1;
609 if (perfer < 0)
610 perfer = 0;
611 break;
612 }
613 }
614 return (sctp_mtu_sizes[perfer]);
615 }
616
617 void
618 sctp_fill_random_store(struct sctp_pcb *m)
619 {
620 /*
621 * Here we use the MD5/SHA-1 to hash with our good randomNumbers
622 * and our counter. The result becomes our good random numbers and
623 * we then setup to give these out. Note that we do no lockig
624 * to protect this. This is ok, since if competing folks call
625 * this we will get more gobbled gook in the random store whic
626 * is what we want. There is a danger that two guys will use
627 * the same random numbers, but thats ok too since that
628 * is random as well :->
629 */
630 m->store_at = 0;
631 sctp_hash_digest((char *)m->random_numbers, sizeof(m->random_numbers),
632 (char *)&m->random_counter, sizeof(m->random_counter),
633 (char *)m->random_store);
634 m->random_counter++;
635 }
636
637 uint32_t
638 sctp_select_initial_TSN(struct sctp_pcb *m)
639 {
640 /*
641 * A true implementation should use random selection process to
642 * get the initial stream sequence number, using RFC1750 as a
643 * good guideline
644 */
645 u_long x, *xp;
646 uint8_t *p;
647
648 if (m->initial_sequence_debug != 0) {
649 u_int32_t ret;
650 ret = m->initial_sequence_debug;
651 m->initial_sequence_debug++;
652 return (ret);
653 }
654 if ((m->store_at+sizeof(u_long)) > SCTP_SIGNATURE_SIZE) {
655 /* Refill the random store */
656 sctp_fill_random_store(m);
657 }
658 p = &m->random_store[(int)m->store_at];
659 xp = (u_long *)p;
660 x = *xp;
661 m->store_at += sizeof(u_long);
662 return (x);
663 }
664
665 u_int32_t sctp_select_a_tag(struct sctp_inpcb *m)
666 {
667 u_long x, not_done;
668 struct timeval now;
669
670 SCTP_GETTIME_TIMEVAL(&now);
671 not_done = 1;
672 while (not_done) {
673 x = sctp_select_initial_TSN(&m->sctp_ep);
674 if (x == 0) {
675 /* we never use 0 */
676 continue;
677 }
678 if (sctp_is_vtag_good(m, x, &now)) {
679 not_done = 0;
680 }
681 }
682 return (x);
683 }
684
685
686 int
687 sctp_init_asoc(struct sctp_inpcb *m, struct sctp_association *asoc,
688 int for_a_init, uint32_t override_tag )
689 {
690 /*
691 * Anything set to zero is taken care of by the allocation
692 * routine's bzero
693 */
694
695 /*
696 * Up front select what scoping to apply on addresses I tell my peer
697 * Not sure what to do with these right now, we will need to come up
698 * with a way to set them. We may need to pass them through from the
699 * caller in the sctp_aloc_assoc() function.
700 */
701 int i;
702 /* init all variables to a known value.*/
703 asoc->state = SCTP_STATE_INUSE;
704 asoc->max_burst = m->sctp_ep.max_burst;
705 asoc->heart_beat_delay = m->sctp_ep.sctp_timeoutticks[SCTP_TIMER_HEARTBEAT];
706 asoc->cookie_life = m->sctp_ep.def_cookie_life;
707
708 if (override_tag) {
709 asoc->my_vtag = override_tag;
710 } else {
711 asoc->my_vtag = sctp_select_a_tag(m);
712 }
713 asoc->asconf_seq_out = asoc->str_reset_seq_out = asoc->init_seq_number = asoc->sending_seq =
714 sctp_select_initial_TSN(&m->sctp_ep);
715 asoc->t3timeout_highest_marked = asoc->asconf_seq_out;
716 /* we are opptimisitic here */
717 asoc->peer_supports_asconf = 1;
718 asoc->peer_supports_asconf_setprim = 1;
719 asoc->peer_supports_pktdrop = 1;
720
721 asoc->sent_queue_retran_cnt = 0;
722 /* This will need to be adjusted */
723 asoc->last_cwr_tsn = asoc->init_seq_number - 1;
724 asoc->last_acked_seq = asoc->init_seq_number - 1;
725 asoc->advanced_peer_ack_point = asoc->last_acked_seq;
726 asoc->asconf_seq_in = asoc->last_acked_seq;
727
728 /* here we are different, we hold the next one we expect */
729 asoc->str_reset_seq_in = asoc->last_acked_seq + 1;
730
731 asoc->initial_init_rto_max = m->sctp_ep.initial_init_rto_max;
732 asoc->initial_rto = m->sctp_ep.initial_rto;
733
734 asoc->max_init_times = m->sctp_ep.max_init_times;
735 asoc->max_send_times = m->sctp_ep.max_send_times;
736 asoc->def_net_failure = m->sctp_ep.def_net_failure;
737
738 /* ECN Nonce initialization */
739 asoc->ecn_nonce_allowed = 0;
740 asoc->receiver_nonce_sum = 1;
741 asoc->nonce_sum_expect_base = 1;
742 asoc->nonce_sum_check = 1;
743 asoc->nonce_resync_tsn = 0;
744 asoc->nonce_wait_for_ecne = 0;
745 asoc->nonce_wait_tsn = 0;
746
747 if (m->sctp_flags & SCTP_PCB_FLAGS_BOUND_V6) {
748 struct in6pcb *inp6;
749
750
751 /* Its a V6 socket */
752 inp6 = (struct in6pcb *)m;
753 asoc->ipv6_addr_legal = 1;
754 /* Now look at the binding flag to see if V4 will be legal */
755 if (
756 #if defined(__OpenBSD__)
757 (0) /* we always do dual bind */
758 #elif defined (__NetBSD__)
759 (inp6->in6p_flags & IN6P_IPV6_V6ONLY)
760 #else
761 (inp6->inp_flags & IN6P_IPV6_V6ONLY)
762 #endif
763 == 0) {
764 asoc->ipv4_addr_legal = 1;
765 } else {
766 /* V4 addresses are NOT legal on the association */
767 asoc->ipv4_addr_legal = 0;
768 }
769 } else {
770 /* Its a V4 socket, no - V6 */
771 asoc->ipv4_addr_legal = 1;
772 asoc->ipv6_addr_legal = 0;
773 }
774
775
776 asoc->my_rwnd = uimax(m->sctp_socket->so_rcv.sb_hiwat, SCTP_MINIMAL_RWND);
777 asoc->peers_rwnd = m->sctp_socket->so_rcv.sb_hiwat;
778
779 asoc->smallest_mtu = m->sctp_frag_point;
780 asoc->minrto = m->sctp_ep.sctp_minrto;
781 asoc->maxrto = m->sctp_ep.sctp_maxrto;
782
783 LIST_INIT(&asoc->sctp_local_addr_list);
784 TAILQ_INIT(&asoc->nets);
785 TAILQ_INIT(&asoc->pending_reply_queue);
786 asoc->last_asconf_ack_sent = NULL;
787 /* Setup to fill the hb random cache at first HB */
788 asoc->hb_random_idx = 4;
789
790 asoc->sctp_autoclose_ticks = m->sctp_ep.auto_close_time;
791
792 /*
793 * Now the stream parameters, here we allocate space for all
794 * streams that we request by default.
795 */
796 asoc->streamoutcnt = asoc->pre_open_streams =
797 m->sctp_ep.pre_open_stream_count;
798 asoc->strmout = malloc(asoc->streamoutcnt *
799 sizeof(struct sctp_stream_out), M_PCB, M_NOWAIT);
800 if (asoc->strmout == NULL) {
801 /* big trouble no memory */
802 return (ENOMEM);
803 }
804 for (i = 0; i < asoc->streamoutcnt; i++) {
805 /*
806 * inbound side must be set to 0xffff,
807 * also NOTE when we get the INIT-ACK back (for INIT sender)
808 * we MUST reduce the count (streamoutcnt) but first check
809 * if we sent to any of the upper streams that were dropped
810 * (if some were). Those that were dropped must be notified
811 * to the upper layer as failed to send.
812 */
813 asoc->strmout[i].next_sequence_sent = 0x0;
814 TAILQ_INIT(&asoc->strmout[i].outqueue);
815 asoc->strmout[i].stream_no = i;
816 asoc->strmout[i].next_spoke.tqe_next = 0;
817 asoc->strmout[i].next_spoke.tqe_prev = 0;
818 }
819 /* Now the mapping array */
820 asoc->mapping_array_size = SCTP_INITIAL_MAPPING_ARRAY;
821 asoc->mapping_array = malloc(asoc->mapping_array_size,
822 M_PCB, M_NOWAIT);
823 if (asoc->mapping_array == NULL) {
824 free(asoc->strmout, M_PCB);
825 return (ENOMEM);
826 }
827 memset(asoc->mapping_array, 0, asoc->mapping_array_size);
828 /* Now the init of the other outqueues */
829 TAILQ_INIT(&asoc->out_wheel);
830 TAILQ_INIT(&asoc->control_send_queue);
831 TAILQ_INIT(&asoc->send_queue);
832 TAILQ_INIT(&asoc->sent_queue);
833 TAILQ_INIT(&asoc->reasmqueue);
834 TAILQ_INIT(&asoc->delivery_queue);
835 asoc->max_inbound_streams = m->sctp_ep.max_open_streams_intome;
836
837 TAILQ_INIT(&asoc->asconf_queue);
838 return (0);
839 }
840
841 int
842 sctp_expand_mapping_array(struct sctp_association *asoc)
843 {
844 /* mapping array needs to grow */
845 u_int8_t *new_array;
846 uint16_t new_size, old_size;
847
848 old_size = asoc->mapping_array_size;
849 new_size = old_size + SCTP_MAPPING_ARRAY_INCR;
850 new_array = malloc(new_size, M_PCB, M_NOWAIT);
851 if (new_array == NULL) {
852 /* can't get more, forget it */
853 printf("No memory for expansion of SCTP mapping array %d\n",
854 new_size);
855 return (-1);
856 }
857 memcpy(new_array, asoc->mapping_array, old_size);
858 memset(new_array + old_size, 0, SCTP_MAPPING_ARRAY_INCR);
859 free(asoc->mapping_array, M_PCB);
860 asoc->mapping_array = new_array;
861 asoc->mapping_array_size = new_size;
862 return (0);
863 }
864
865 static void
866 sctp_timeout_handler(void *t)
867 {
868 struct sctp_inpcb *inp;
869 struct sctp_tcb *stcb;
870 struct sctp_nets *net;
871 struct sctp_timer *tmr;
872 int did_output;
873
874 mutex_enter(softnet_lock);
875 tmr = (struct sctp_timer *)t;
876 inp = (struct sctp_inpcb *)tmr->ep;
877 stcb = (struct sctp_tcb *)tmr->tcb;
878 net = (struct sctp_nets *)tmr->net;
879 did_output = 1;
880
881 #ifdef SCTP_AUDITING_ENABLED
882 sctp_audit_log(0xF0, (u_int8_t)tmr->type);
883 sctp_auditing(3, inp, stcb, net);
884 #endif
885 sctp_pegs[SCTP_TIMERS_EXP]++;
886
887 if (inp == NULL) {
888 return;
889 }
890
891 SCTP_INP_WLOCK(inp);
892 if (inp->sctp_socket == 0) {
893 mutex_exit(softnet_lock);
894 SCTP_INP_WUNLOCK(inp);
895 return;
896 }
897 if (stcb) {
898 if (stcb->asoc.state == 0) {
899 mutex_exit(softnet_lock);
900 SCTP_INP_WUNLOCK(inp);
901 return;
902 }
903 }
904 #ifdef SCTP_DEBUG
905 if (sctp_debug_on & SCTP_DEBUG_TIMER1) {
906 printf("Timer type %d goes off\n", tmr->type);
907 }
908 #endif /* SCTP_DEBUG */
909 #ifndef __NetBSD__
910 if (!callout_active(&tmr->timer)) {
911 SCTP_INP_WUNLOCK(inp);
912 return;
913 }
914 #endif
915 if (stcb) {
916 SCTP_TCB_LOCK(stcb);
917 }
918 SCTP_INP_INCR_REF(inp);
919 SCTP_INP_WUNLOCK(inp);
920
921 switch (tmr->type) {
922 case SCTP_TIMER_TYPE_ITERATOR:
923 {
924 struct sctp_iterator *it;
925 it = (struct sctp_iterator *)inp;
926 sctp_iterator_timer(it);
927 }
928 break;
929 /* call the handler for the appropriate timer type */
930 case SCTP_TIMER_TYPE_SEND:
931 sctp_pegs[SCTP_TMIT_TIMER]++;
932 stcb->asoc.num_send_timers_up--;
933 if (stcb->asoc.num_send_timers_up < 0) {
934 stcb->asoc.num_send_timers_up = 0;
935 }
936 if (sctp_t3rxt_timer(inp, stcb, net)) {
937 /* no need to unlock on tcb its gone */
938
939 goto out_decr;
940 }
941 #ifdef SCTP_AUDITING_ENABLED
942 sctp_auditing(4, inp, stcb, net);
943 #endif
944 sctp_chunk_output(inp, stcb, 1);
945 if ((stcb->asoc.num_send_timers_up == 0) &&
946 (stcb->asoc.sent_queue_cnt > 0)
947 ) {
948 struct sctp_tmit_chunk *chk;
949 /*
950 * safeguard. If there on some on the sent queue
951 * somewhere but no timers running something is
952 * wrong... so we start a timer on the first chunk
953 * on the send queue on whatever net it is sent to.
954 */
955 sctp_pegs[SCTP_T3_SAFEGRD]++;
956 chk = TAILQ_FIRST(&stcb->asoc.sent_queue);
957 sctp_timer_start(SCTP_TIMER_TYPE_SEND, inp, stcb,
958 chk->whoTo);
959 }
960 break;
961 case SCTP_TIMER_TYPE_INIT:
962 if (sctp_t1init_timer(inp, stcb, net)) {
963 /* no need to unlock on tcb its gone */
964 goto out_decr;
965 }
966 /* We do output but not here */
967 did_output = 0;
968 break;
969 case SCTP_TIMER_TYPE_RECV:
970 sctp_pegs[SCTP_RECV_TIMER]++;
971 sctp_send_sack(stcb);
972 #ifdef SCTP_AUDITING_ENABLED
973 sctp_auditing(4, inp, stcb, net);
974 #endif
975 sctp_chunk_output(inp, stcb, 4);
976 break;
977 case SCTP_TIMER_TYPE_SHUTDOWN:
978 if (sctp_shutdown_timer(inp, stcb, net) ) {
979 /* no need to unlock on tcb its gone */
980 goto out_decr;
981 }
982 #ifdef SCTP_AUDITING_ENABLED
983 sctp_auditing(4, inp, stcb, net);
984 #endif
985 sctp_chunk_output(inp, stcb, 5);
986 break;
987 case SCTP_TIMER_TYPE_HEARTBEAT:
988 if (sctp_heartbeat_timer(inp, stcb, net)) {
989 /* no need to unlock on tcb its gone */
990 goto out_decr;
991 }
992 #ifdef SCTP_AUDITING_ENABLED
993 sctp_auditing(4, inp, stcb, net);
994 #endif
995 sctp_chunk_output(inp, stcb, 6);
996 break;
997 case SCTP_TIMER_TYPE_COOKIE:
998 if (sctp_cookie_timer(inp, stcb, net)) {
999 /* no need to unlock on tcb its gone */
1000 goto out_decr;
1001 }
1002 #ifdef SCTP_AUDITING_ENABLED
1003 sctp_auditing(4, inp, stcb, net);
1004 #endif
1005 sctp_chunk_output(inp, stcb, 1);
1006 break;
1007 case SCTP_TIMER_TYPE_NEWCOOKIE:
1008 {
1009 struct timeval tv;
1010 int i, secret;
1011 SCTP_GETTIME_TIMEVAL(&tv);
1012 SCTP_INP_WLOCK(inp);
1013 inp->sctp_ep.time_of_secret_change = tv.tv_sec;
1014 inp->sctp_ep.last_secret_number =
1015 inp->sctp_ep.current_secret_number;
1016 inp->sctp_ep.current_secret_number++;
1017 if (inp->sctp_ep.current_secret_number >=
1018 SCTP_HOW_MANY_SECRETS) {
1019 inp->sctp_ep.current_secret_number = 0;
1020 }
1021 secret = (int)inp->sctp_ep.current_secret_number;
1022 for (i = 0; i < SCTP_NUMBER_OF_SECRETS; i++) {
1023 inp->sctp_ep.secret_key[secret][i] =
1024 sctp_select_initial_TSN(&inp->sctp_ep);
1025 }
1026 SCTP_INP_WUNLOCK(inp);
1027 sctp_timer_start(SCTP_TIMER_TYPE_NEWCOOKIE, inp, stcb, net);
1028 }
1029 did_output = 0;
1030 break;
1031 case SCTP_TIMER_TYPE_PATHMTURAISE:
1032 sctp_pathmtu_timer(inp, stcb, net);
1033 did_output = 0;
1034 break;
1035 case SCTP_TIMER_TYPE_SHUTDOWNACK:
1036 if (sctp_shutdownack_timer(inp, stcb, net)) {
1037 /* no need to unlock on tcb its gone */
1038 goto out_decr;
1039 }
1040 #ifdef SCTP_AUDITING_ENABLED
1041 sctp_auditing(4, inp, stcb, net);
1042 #endif
1043 sctp_chunk_output(inp, stcb, 7);
1044 break;
1045 case SCTP_TIMER_TYPE_SHUTDOWNGUARD:
1046 sctp_abort_an_association(inp, stcb,
1047 SCTP_SHUTDOWN_GUARD_EXPIRES, NULL);
1048 /* no need to unlock on tcb its gone */
1049 goto out_decr;
1050 break;
1051
1052 case SCTP_TIMER_TYPE_STRRESET:
1053 if (sctp_strreset_timer(inp, stcb, net)) {
1054 /* no need to unlock on tcb its gone */
1055 goto out_decr;
1056 }
1057 sctp_chunk_output(inp, stcb, 9);
1058 break;
1059
1060 case SCTP_TIMER_TYPE_ASCONF:
1061 if (sctp_asconf_timer(inp, stcb, net)) {
1062 /* no need to unlock on tcb its gone */
1063 goto out_decr;
1064 }
1065 #ifdef SCTP_AUDITING_ENABLED
1066 sctp_auditing(4, inp, stcb, net);
1067 #endif
1068 sctp_chunk_output(inp, stcb, 8);
1069 break;
1070
1071 case SCTP_TIMER_TYPE_AUTOCLOSE:
1072 sctp_autoclose_timer(inp, stcb, net);
1073 sctp_chunk_output(inp, stcb, 10);
1074 did_output = 0;
1075 break;
1076 case SCTP_TIMER_TYPE_INPKILL:
1077 /* special case, take away our
1078 * increment since WE are the killer
1079 */
1080 SCTP_INP_WLOCK(inp);
1081 SCTP_INP_DECR_REF(inp);
1082 SCTP_INP_WUNLOCK(inp);
1083 sctp_timer_stop(SCTP_TIMER_TYPE_INPKILL, inp, NULL, NULL);
1084 sctp_inpcb_free(inp, 1);
1085 goto out_no_decr;
1086 break;
1087 default:
1088 #ifdef SCTP_DEBUG
1089 if (sctp_debug_on & SCTP_DEBUG_TIMER1) {
1090 printf("sctp_timeout_handler:unknown timer %d\n",
1091 tmr->type);
1092 }
1093 #endif /* SCTP_DEBUG */
1094 break;
1095 };
1096 #ifdef SCTP_AUDITING_ENABLED
1097 sctp_audit_log(0xF1, (u_int8_t)tmr->type);
1098 sctp_auditing(5, inp, stcb, net);
1099 #endif
1100 if (did_output) {
1101 /*
1102 * Now we need to clean up the control chunk chain if an
1103 * ECNE is on it. It must be marked as UNSENT again so next
1104 * call will continue to send it until such time that we get
1105 * a CWR, to remove it. It is, however, less likely that we
1106 * will find a ecn echo on the chain though.
1107 */
1108 sctp_fix_ecn_echo(&stcb->asoc);
1109 }
1110 if (stcb) {
1111 SCTP_TCB_UNLOCK(stcb);
1112 }
1113 out_decr:
1114 SCTP_INP_WLOCK(inp);
1115 SCTP_INP_DECR_REF(inp);
1116 SCTP_INP_WUNLOCK(inp);
1117
1118 out_no_decr:
1119
1120 mutex_exit(softnet_lock);
1121 }
1122
1123 int
1124 sctp_timer_start(int t_type, struct sctp_inpcb *inp, struct sctp_tcb *stcb,
1125 struct sctp_nets *net)
1126 {
1127 int to_ticks;
1128 struct sctp_timer *tmr;
1129
1130 if (inp == NULL)
1131 return (EFAULT);
1132
1133 to_ticks = 0;
1134
1135 tmr = NULL;
1136 switch (t_type) {
1137 case SCTP_TIMER_TYPE_ITERATOR:
1138 {
1139 struct sctp_iterator *it;
1140 it = (struct sctp_iterator *)inp;
1141 tmr = &it->tmr;
1142 to_ticks = SCTP_ITERATOR_TICKS;
1143 }
1144 break;
1145 case SCTP_TIMER_TYPE_SEND:
1146 /* Here we use the RTO timer */
1147 {
1148 int rto_val;
1149 if ((stcb == NULL) || (net == NULL)) {
1150 return (EFAULT);
1151 }
1152 tmr = &net->rxt_timer;
1153 if (net->RTO == 0) {
1154 rto_val = stcb->asoc.initial_rto;
1155 } else {
1156 rto_val = net->RTO;
1157 }
1158 to_ticks = MSEC_TO_TICKS(rto_val);
1159 }
1160 break;
1161 case SCTP_TIMER_TYPE_INIT:
1162 /*
1163 * Here we use the INIT timer default
1164 * usually about 1 minute.
1165 */
1166 if ((stcb == NULL) || (net == NULL)) {
1167 return (EFAULT);
1168 }
1169 tmr = &net->rxt_timer;
1170 if (net->RTO == 0) {
1171 to_ticks = MSEC_TO_TICKS(stcb->asoc.initial_rto);
1172 } else {
1173 to_ticks = MSEC_TO_TICKS(net->RTO);
1174 }
1175 break;
1176 case SCTP_TIMER_TYPE_RECV:
1177 /*
1178 * Here we use the Delayed-Ack timer value from the inp
1179 * ususually about 200ms.
1180 */
1181 if (stcb == NULL) {
1182 return (EFAULT);
1183 }
1184 tmr = &stcb->asoc.dack_timer;
1185 to_ticks = inp->sctp_ep.sctp_timeoutticks[SCTP_TIMER_RECV];
1186 break;
1187 case SCTP_TIMER_TYPE_SHUTDOWN:
1188 /* Here we use the RTO of the destination. */
1189 if ((stcb == NULL) || (net == NULL)) {
1190 return (EFAULT);
1191 }
1192
1193 if (net->RTO == 0) {
1194 to_ticks = MSEC_TO_TICKS(stcb->asoc.initial_rto);
1195 } else {
1196 to_ticks = MSEC_TO_TICKS(net->RTO);
1197 }
1198 tmr = &net->rxt_timer;
1199 break;
1200 case SCTP_TIMER_TYPE_HEARTBEAT:
1201 /*
1202 * the net is used here so that we can add in the RTO.
1203 * Even though we use a different timer. We also add the
1204 * HB timer PLUS a random jitter.
1205 */
1206 if (stcb == NULL) {
1207 return (EFAULT);
1208 }
1209 {
1210 uint32_t rndval;
1211 uint8_t this_random;
1212 int cnt_of_unconf=0;
1213 struct sctp_nets *lnet;
1214
1215 TAILQ_FOREACH(lnet, &stcb->asoc.nets, sctp_next) {
1216 if (lnet->dest_state & SCTP_ADDR_UNCONFIRMED) {
1217 cnt_of_unconf++;
1218 }
1219 }
1220 #ifdef SCTP_DEBUG
1221 if (sctp_debug_on & SCTP_DEBUG_TIMER1) {
1222 printf("HB timer to start unconfirmed:%d hb_delay:%d\n",
1223 cnt_of_unconf, stcb->asoc.heart_beat_delay);
1224 }
1225 #endif
1226 if (stcb->asoc.hb_random_idx > 3) {
1227 rndval = sctp_select_initial_TSN(&inp->sctp_ep);
1228 memcpy(stcb->asoc.hb_random_values, &rndval,
1229 sizeof(stcb->asoc.hb_random_values));
1230 this_random = stcb->asoc.hb_random_values[0];
1231 stcb->asoc.hb_random_idx = 0;
1232 stcb->asoc.hb_ect_randombit = 0;
1233 } else {
1234 this_random = stcb->asoc.hb_random_values[stcb->asoc.hb_random_idx];
1235 stcb->asoc.hb_random_idx++;
1236 stcb->asoc.hb_ect_randombit = 0;
1237 }
1238 /*
1239 * this_random will be 0 - 256 ms
1240 * RTO is in ms.
1241 */
1242 if ((stcb->asoc.heart_beat_delay == 0) &&
1243 (cnt_of_unconf == 0)) {
1244 /* no HB on this inp after confirmations */
1245 return (0);
1246 }
1247 if (net) {
1248 int delay;
1249 delay = stcb->asoc.heart_beat_delay;
1250 TAILQ_FOREACH(lnet, &stcb->asoc.nets, sctp_next) {
1251 if ((lnet->dest_state & SCTP_ADDR_UNCONFIRMED) &&
1252 ((lnet->dest_state & SCTP_ADDR_OUT_OF_SCOPE) == 0) &&
1253 (lnet->dest_state & SCTP_ADDR_REACHABLE)) {
1254 delay = 0;
1255 }
1256 }
1257 if (net->RTO == 0) {
1258 /* Never been checked */
1259 to_ticks = this_random + stcb->asoc.initial_rto + delay;
1260 } else {
1261 /* set rto_val to the ms */
1262 to_ticks = delay + net->RTO + this_random;
1263 }
1264 } else {
1265 if (cnt_of_unconf) {
1266 to_ticks = this_random + stcb->asoc.initial_rto;
1267 } else {
1268 to_ticks = stcb->asoc.heart_beat_delay + this_random + stcb->asoc.initial_rto;
1269 }
1270 }
1271 /*
1272 * Now we must convert the to_ticks that are now in
1273 * ms to ticks.
1274 */
1275 to_ticks *= hz;
1276 to_ticks /= 1000;
1277 #ifdef SCTP_DEBUG
1278 if (sctp_debug_on & SCTP_DEBUG_TIMER1) {
1279 printf("Timer to expire in %d ticks\n", to_ticks);
1280 }
1281 #endif
1282 tmr = &stcb->asoc.hb_timer;
1283 }
1284 break;
1285 case SCTP_TIMER_TYPE_COOKIE:
1286 /*
1287 * Here we can use the RTO timer from the network since
1288 * one RTT was compelete. If a retran happened then we will
1289 * be using the RTO initial value.
1290 */
1291 if ((stcb == NULL) || (net == NULL)) {
1292 return (EFAULT);
1293 }
1294 if (net->RTO == 0) {
1295 to_ticks = MSEC_TO_TICKS(stcb->asoc.initial_rto);
1296 } else {
1297 to_ticks = MSEC_TO_TICKS(net->RTO);
1298 }
1299 tmr = &net->rxt_timer;
1300 break;
1301 case SCTP_TIMER_TYPE_NEWCOOKIE:
1302 /*
1303 * nothing needed but the endpoint here
1304 * ususually about 60 minutes.
1305 */
1306 tmr = &inp->sctp_ep.signature_change;
1307 to_ticks = inp->sctp_ep.sctp_timeoutticks[SCTP_TIMER_SIGNATURE];
1308 break;
1309 case SCTP_TIMER_TYPE_INPKILL:
1310 /*
1311 * The inp is setup to die. We re-use the
1312 * signature_chage timer since that has
1313 * stopped and we are in the GONE state.
1314 */
1315 tmr = &inp->sctp_ep.signature_change;
1316 to_ticks = (SCTP_INP_KILL_TIMEOUT * hz) / 1000;
1317 break;
1318 case SCTP_TIMER_TYPE_PATHMTURAISE:
1319 /*
1320 * Here we use the value found in the EP for PMTU
1321 * ususually about 10 minutes.
1322 */
1323 if (stcb == NULL) {
1324 return (EFAULT);
1325 }
1326 if (net == NULL) {
1327 return (EFAULT);
1328 }
1329 to_ticks = inp->sctp_ep.sctp_timeoutticks[SCTP_TIMER_PMTU];
1330 tmr = &net->pmtu_timer;
1331 break;
1332 case SCTP_TIMER_TYPE_SHUTDOWNACK:
1333 /* Here we use the RTO of the destination */
1334 if ((stcb == NULL) || (net == NULL)) {
1335 return (EFAULT);
1336 }
1337 if (net->RTO == 0) {
1338 to_ticks = MSEC_TO_TICKS(stcb->asoc.initial_rto);
1339 } else {
1340 to_ticks = MSEC_TO_TICKS(net->RTO);
1341 }
1342 tmr = &net->rxt_timer;
1343 break;
1344 case SCTP_TIMER_TYPE_SHUTDOWNGUARD:
1345 /*
1346 * Here we use the endpoints shutdown guard timer
1347 * usually about 3 minutes.
1348 */
1349 if (stcb == NULL) {
1350 return (EFAULT);
1351 }
1352 to_ticks = inp->sctp_ep.sctp_timeoutticks[SCTP_TIMER_MAXSHUTDOWN];
1353 tmr = &stcb->asoc.shut_guard_timer;
1354 break;
1355 case SCTP_TIMER_TYPE_STRRESET:
1356 /*
1357 * Here the timer comes from the inp
1358 * but its value is from the RTO.
1359 */
1360 if ((stcb == NULL) || (net == NULL)) {
1361 return (EFAULT);
1362 }
1363 if (net->RTO == 0) {
1364 to_ticks = MSEC_TO_TICKS(stcb->asoc.initial_rto);
1365 } else {
1366 to_ticks = MSEC_TO_TICKS(net->RTO);
1367 }
1368 tmr = &stcb->asoc.strreset_timer;
1369 break;
1370
1371 case SCTP_TIMER_TYPE_ASCONF:
1372 /*
1373 * Here the timer comes from the inp
1374 * but its value is from the RTO.
1375 */
1376 if ((stcb == NULL) || (net == NULL)) {
1377 return (EFAULT);
1378 }
1379 if (net->RTO == 0) {
1380 to_ticks = MSEC_TO_TICKS(stcb->asoc.initial_rto);
1381 } else {
1382 to_ticks = MSEC_TO_TICKS(net->RTO);
1383 }
1384 tmr = &stcb->asoc.asconf_timer;
1385 break;
1386 case SCTP_TIMER_TYPE_AUTOCLOSE:
1387 if (stcb == NULL) {
1388 return (EFAULT);
1389 }
1390 if (stcb->asoc.sctp_autoclose_ticks == 0) {
1391 /* Really an error since stcb is NOT set to autoclose */
1392 return (0);
1393 }
1394 to_ticks = stcb->asoc.sctp_autoclose_ticks;
1395 tmr = &stcb->asoc.autoclose_timer;
1396 break;
1397 default:
1398 #ifdef SCTP_DEBUG
1399 if (sctp_debug_on & SCTP_DEBUG_TIMER1) {
1400 printf("sctp_timer_start:Unknown timer type %d\n",
1401 t_type);
1402 }
1403 #endif /* SCTP_DEBUG */
1404 return (EFAULT);
1405 break;
1406 };
1407 if ((to_ticks <= 0) || (tmr == NULL)) {
1408 #ifdef SCTP_DEBUG
1409 if (sctp_debug_on & SCTP_DEBUG_TIMER1) {
1410 printf("sctp_timer_start:%d:software error to_ticks:%d tmr:%p not set ??\n",
1411 t_type, to_ticks, tmr);
1412 }
1413 #endif /* SCTP_DEBUG */
1414 return (EFAULT);
1415 }
1416 if (callout_pending(&tmr->timer)) {
1417 /*
1418 * we do NOT allow you to have it already running.
1419 * if it is we leave the current one up unchanged
1420 */
1421 return (EALREADY);
1422 }
1423 /* At this point we can proceed */
1424 if (t_type == SCTP_TIMER_TYPE_SEND) {
1425 stcb->asoc.num_send_timers_up++;
1426 }
1427 tmr->type = t_type;
1428 tmr->ep = (void *)inp;
1429 tmr->tcb = (void *)stcb;
1430 tmr->net = (void *)net;
1431 callout_reset(&tmr->timer, to_ticks, sctp_timeout_handler, tmr);
1432 return (0);
1433 }
1434
1435 int
1436 sctp_timer_stop(int t_type, struct sctp_inpcb *inp, struct sctp_tcb *stcb,
1437 struct sctp_nets *net)
1438 {
1439 struct sctp_timer *tmr;
1440
1441 if (inp == NULL)
1442 return (EFAULT);
1443
1444 tmr = NULL;
1445 switch (t_type) {
1446 case SCTP_TIMER_TYPE_ITERATOR:
1447 {
1448 struct sctp_iterator *it;
1449 it = (struct sctp_iterator *)inp;
1450 tmr = &it->tmr;
1451 }
1452 break;
1453 case SCTP_TIMER_TYPE_SEND:
1454 if ((stcb == NULL) || (net == NULL)) {
1455 return (EFAULT);
1456 }
1457 tmr = &net->rxt_timer;
1458 break;
1459 case SCTP_TIMER_TYPE_INIT:
1460 if ((stcb == NULL) || (net == NULL)) {
1461 return (EFAULT);
1462 }
1463 tmr = &net->rxt_timer;
1464 break;
1465 case SCTP_TIMER_TYPE_RECV:
1466 if (stcb == NULL) {
1467 return (EFAULT);
1468 }
1469 tmr = &stcb->asoc.dack_timer;
1470 break;
1471 case SCTP_TIMER_TYPE_SHUTDOWN:
1472 if ((stcb == NULL) || (net == NULL)) {
1473 return (EFAULT);
1474 }
1475 tmr = &net->rxt_timer;
1476 break;
1477 case SCTP_TIMER_TYPE_HEARTBEAT:
1478 if (stcb == NULL) {
1479 return (EFAULT);
1480 }
1481 tmr = &stcb->asoc.hb_timer;
1482 break;
1483 case SCTP_TIMER_TYPE_COOKIE:
1484 if ((stcb == NULL) || (net == NULL)) {
1485 return (EFAULT);
1486 }
1487 tmr = &net->rxt_timer;
1488 break;
1489 case SCTP_TIMER_TYPE_NEWCOOKIE:
1490 /* nothing needed but the endpoint here */
1491 tmr = &inp->sctp_ep.signature_change;
1492 /* We re-use the newcookie timer for
1493 * the INP kill timer. We must assure
1494 * that we do not kill it by accident.
1495 */
1496 break;
1497 case SCTP_TIMER_TYPE_INPKILL:
1498 /*
1499 * The inp is setup to die. We re-use the
1500 * signature_chage timer since that has
1501 * stopped and we are in the GONE state.
1502 */
1503 tmr = &inp->sctp_ep.signature_change;
1504 break;
1505 case SCTP_TIMER_TYPE_PATHMTURAISE:
1506 if (stcb == NULL) {
1507 return (EFAULT);
1508 }
1509 if (net == NULL) {
1510 return (EFAULT);
1511 }
1512 tmr = &net->pmtu_timer;
1513 break;
1514 case SCTP_TIMER_TYPE_SHUTDOWNACK:
1515 if ((stcb == NULL) || (net == NULL)) {
1516 return (EFAULT);
1517 }
1518 tmr = &net->rxt_timer;
1519 break;
1520 case SCTP_TIMER_TYPE_SHUTDOWNGUARD:
1521 if (stcb == NULL) {
1522 return (EFAULT);
1523 }
1524 tmr = &stcb->asoc.shut_guard_timer;
1525 break;
1526 case SCTP_TIMER_TYPE_STRRESET:
1527 if (stcb == NULL) {
1528 return (EFAULT);
1529 }
1530 tmr = &stcb->asoc.strreset_timer;
1531 break;
1532 case SCTP_TIMER_TYPE_ASCONF:
1533 if (stcb == NULL) {
1534 return (EFAULT);
1535 }
1536 tmr = &stcb->asoc.asconf_timer;
1537 break;
1538 case SCTP_TIMER_TYPE_AUTOCLOSE:
1539 if (stcb == NULL) {
1540 return (EFAULT);
1541 }
1542 tmr = &stcb->asoc.autoclose_timer;
1543 break;
1544 default:
1545 #ifdef SCTP_DEBUG
1546 if (sctp_debug_on & SCTP_DEBUG_TIMER1) {
1547 printf("sctp_timer_stop:Unknown timer type %d\n",
1548 t_type);
1549 }
1550 #endif /* SCTP_DEBUG */
1551 break;
1552 };
1553 if (tmr == NULL)
1554 return (EFAULT);
1555
1556 if ((tmr->type != t_type) && tmr->type) {
1557 /*
1558 * Ok we have a timer that is under joint use. Cookie timer
1559 * per chance with the SEND timer. We therefore are NOT
1560 * running the timer that the caller wants stopped. So just
1561 * return.
1562 */
1563 return (0);
1564 }
1565 if (t_type == SCTP_TIMER_TYPE_SEND) {
1566 stcb->asoc.num_send_timers_up--;
1567 if (stcb->asoc.num_send_timers_up < 0) {
1568 stcb->asoc.num_send_timers_up = 0;
1569 }
1570 }
1571 callout_stop(&tmr->timer);
1572 return (0);
1573 }
1574
1575 #ifdef SCTP_USE_ADLER32
1576 static uint32_t
1577 update_adler32(uint32_t adler, uint8_t *buf, int32_t len)
1578 {
1579 u_int32_t s1 = adler & 0xffff;
1580 u_int32_t s2 = (adler >> 16) & 0xffff;
1581 int n;
1582
1583 for (n = 0; n < len; n++, buf++) {
1584 /* s1 = (s1 + buf[n]) % BASE */
1585 /* first we add */
1586 s1 = (s1 + *buf);
1587 /*
1588 * now if we need to, we do a mod by subtracting. It seems
1589 * a bit faster since I really will only ever do one subtract
1590 * at the MOST, since buf[n] is a max of 255.
1591 */
1592 if (s1 >= SCTP_ADLER32_BASE) {
1593 s1 -= SCTP_ADLER32_BASE;
1594 }
1595 /* s2 = (s2 + s1) % BASE */
1596 /* first we add */
1597 s2 = (s2 + s1);
1598 /*
1599 * again, it is more efficent (it seems) to subtract since
1600 * the most s2 will ever be is (BASE-1 + BASE-1) in the worse
1601 * case. This would then be (2 * BASE) - 2, which will still
1602 * only do one subtract. On Intel this is much better to do
1603 * this way and avoid the divide. Have not -pg'd on sparc.
1604 */
1605 if (s2 >= SCTP_ADLER32_BASE) {
1606 s2 -= SCTP_ADLER32_BASE;
1607 }
1608 }
1609 /* Return the adler32 of the bytes buf[0..len-1] */
1610 return ((s2 << 16) + s1);
1611 }
1612
1613 #endif
1614
1615
1616 u_int32_t
1617 sctp_calculate_len(struct mbuf *m)
1618 {
1619 u_int32_t tlen=0;
1620 struct mbuf *at;
1621 at = m;
1622 while (at) {
1623 tlen += at->m_len;
1624 at = at->m_next;
1625 }
1626 return (tlen);
1627 }
1628
1629 #if defined(SCTP_WITH_NO_CSUM)
1630
1631 uint32_t
1632 sctp_calculate_sum(struct mbuf *m, int32_t *pktlen, uint32_t offset)
1633 {
1634 /*
1635 * given a mbuf chain with a packetheader offset by 'offset'
1636 * pointing at a sctphdr (with csum set to 0) go through
1637 * the chain of m_next's and calculate the SCTP checksum.
1638 * This is currently Adler32 but will change to CRC32x
1639 * soon. Also has a side bonus calculate the total length
1640 * of the mbuf chain.
1641 * Note: if offset is greater than the total mbuf length,
1642 * checksum=1, pktlen=0 is returned (ie. no real error code)
1643 */
1644 if (pktlen == NULL)
1645 return (0);
1646 *pktlen = sctp_calculate_len(m);
1647 return (0);
1648 }
1649
1650 #elif defined(SCTP_USE_INCHKSUM)
1651
1652 #include <machine/in_cksum.h>
1653
1654 uint32_t
1655 sctp_calculate_sum(struct mbuf *m, int32_t *pktlen, uint32_t offset)
1656 {
1657 /*
1658 * given a mbuf chain with a packetheader offset by 'offset'
1659 * pointing at a sctphdr (with csum set to 0) go through
1660 * the chain of m_next's and calculate the SCTP checksum.
1661 * This is currently Adler32 but will change to CRC32x
1662 * soon. Also has a side bonus calculate the total length
1663 * of the mbuf chain.
1664 * Note: if offset is greater than the total mbuf length,
1665 * checksum=1, pktlen=0 is returned (ie. no real error code)
1666 */
1667 int32_t tlen=0;
1668 struct mbuf *at;
1669 uint32_t the_sum, retsum;
1670
1671 at = m;
1672 while (at) {
1673 tlen += at->m_len;
1674 at = at->m_next;
1675 }
1676 the_sum = (uint32_t)(in_cksum_skip(m, tlen, offset));
1677 if (pktlen != NULL)
1678 *pktlen = (tlen-offset);
1679 retsum = htons(the_sum);
1680 return (the_sum);
1681 }
1682
1683 #else
1684
1685 uint32_t
1686 sctp_calculate_sum(struct mbuf *m, int32_t *pktlen, uint32_t offset)
1687 {
1688 /*
1689 * given a mbuf chain with a packetheader offset by 'offset'
1690 * pointing at a sctphdr (with csum set to 0) go through
1691 * the chain of m_next's and calculate the SCTP checksum.
1692 * This is currently Adler32 but will change to CRC32x
1693 * soon. Also has a side bonus calculate the total length
1694 * of the mbuf chain.
1695 * Note: if offset is greater than the total mbuf length,
1696 * checksum=1, pktlen=0 is returned (ie. no real error code)
1697 */
1698 int32_t tlen=0;
1699 #ifdef SCTP_USE_ADLER32
1700 uint32_t base = 1L;
1701 #else
1702 uint32_t base = 0xffffffff;
1703 #endif /* SCTP_USE_ADLER32 */
1704 struct mbuf *at;
1705 at = m;
1706 /* find the correct mbuf and offset into mbuf */
1707 while ((at != NULL) && (offset > (uint32_t)at->m_len)) {
1708 offset -= at->m_len; /* update remaining offset left */
1709 at = at->m_next;
1710 }
1711
1712 while (at != NULL) {
1713 #ifdef SCTP_USE_ADLER32
1714 base = update_adler32(base, at->m_data + offset,
1715 at->m_len - offset);
1716 #else
1717 base = update_crc32(base, at->m_data + offset,
1718 at->m_len - offset);
1719 #endif /* SCTP_USE_ADLER32 */
1720 tlen += at->m_len - offset;
1721 /* we only offset once into the first mbuf */
1722 if (offset) {
1723 offset = 0;
1724 }
1725 at = at->m_next;
1726 }
1727 if (pktlen != NULL) {
1728 *pktlen = tlen;
1729 }
1730 #ifdef SCTP_USE_ADLER32
1731 /* Adler32 */
1732 base = htonl(base);
1733 #else
1734 /* CRC-32c */
1735 base = sctp_csum_finalize(base);
1736 #endif
1737 return (base);
1738 }
1739
1740
1741 #endif
1742
1743 void
1744 sctp_mtu_size_reset(struct sctp_inpcb *inp,
1745 struct sctp_association *asoc, u_long mtu)
1746 {
1747 /*
1748 * Reset the P-MTU size on this association, this involves changing
1749 * the asoc MTU, going through ANY chunk+overhead larger than mtu
1750 * to allow the DF flag to be cleared.
1751 */
1752 struct sctp_tmit_chunk *chk;
1753 struct sctp_stream_out *strm;
1754 unsigned int eff_mtu, ovh;
1755 asoc->smallest_mtu = mtu;
1756 if (inp->sctp_flags & SCTP_PCB_FLAGS_BOUND_V6) {
1757 ovh = SCTP_MIN_OVERHEAD;
1758 } else {
1759 ovh = SCTP_MIN_V4_OVERHEAD;
1760 }
1761 eff_mtu = mtu - ovh;
1762 /* Now mark any chunks that need to let IP fragment */
1763 TAILQ_FOREACH(strm, &asoc->out_wheel, next_spoke) {
1764 TAILQ_FOREACH(chk, &strm->outqueue, sctp_next) {
1765 if (chk->send_size > eff_mtu) {
1766 chk->flags &= SCTP_DONT_FRAGMENT;
1767 chk->flags |= CHUNK_FLAGS_FRAGMENT_OK;
1768 }
1769 }
1770 }
1771 TAILQ_FOREACH(chk, &asoc->send_queue, sctp_next) {
1772 if (chk->send_size > eff_mtu) {
1773 chk->flags &= SCTP_DONT_FRAGMENT;
1774 chk->flags |= CHUNK_FLAGS_FRAGMENT_OK;
1775 }
1776 }
1777 TAILQ_FOREACH(chk, &asoc->sent_queue, sctp_next) {
1778 if (chk->send_size > eff_mtu) {
1779 chk->flags &= SCTP_DONT_FRAGMENT;
1780 chk->flags |= CHUNK_FLAGS_FRAGMENT_OK;
1781 }
1782 }
1783 }
1784
1785
1786 /*
1787 * given an association and starting time of the current RTT period
1788 * return RTO in number of usecs
1789 * net should point to the current network
1790 */
1791 u_int32_t
1792 sctp_calculate_rto(struct sctp_tcb *stcb,
1793 struct sctp_association *asoc,
1794 struct sctp_nets *net,
1795 struct timeval *old)
1796 {
1797 /*
1798 * given an association and the starting time of the current RTT
1799 * period (in value1/value2) return RTO in number of usecs.
1800 */
1801 int calc_time = 0;
1802 unsigned int new_rto = 0;
1803 int first_measure = 0;
1804 struct timeval now;
1805
1806 /************************/
1807 /* 1. calculate new RTT */
1808 /************************/
1809 /* get the current time */
1810 SCTP_GETTIME_TIMEVAL(&now);
1811 /* compute the RTT value */
1812 if ((u_long)now.tv_sec > (u_long)old->tv_sec) {
1813 calc_time = ((u_long)now.tv_sec - (u_long)old->tv_sec) * 1000;
1814 if ((u_long)now.tv_usec > (u_long)old->tv_usec) {
1815 calc_time += (((u_long)now.tv_usec -
1816 (u_long)old->tv_usec)/1000);
1817 } else if ((u_long)now.tv_usec < (u_long)old->tv_usec) {
1818 /* Borrow 1,000ms from current calculation */
1819 calc_time -= 1000;
1820 /* Add in the slop over */
1821 calc_time += ((int)now.tv_usec/1000);
1822 /* Add in the pre-second ms's */
1823 calc_time += (((int)1000000 - (int)old->tv_usec)/1000);
1824 }
1825 } else if ((u_long)now.tv_sec == (u_long)old->tv_sec) {
1826 if ((u_long)now.tv_usec > (u_long)old->tv_usec) {
1827 calc_time = ((u_long)now.tv_usec -
1828 (u_long)old->tv_usec)/1000;
1829 } else if ((u_long)now.tv_usec < (u_long)old->tv_usec) {
1830 /* impossible .. garbage in nothing out */
1831 return (((net->lastsa >> 2) + net->lastsv) >> 1);
1832 } else {
1833 /* impossible .. garbage in nothing out */
1834 return (((net->lastsa >> 2) + net->lastsv) >> 1);
1835 }
1836 } else {
1837 /* Clock wrapped? */
1838 return (((net->lastsa >> 2) + net->lastsv) >> 1);
1839 }
1840 /***************************/
1841 /* 2. update RTTVAR & SRTT */
1842 /***************************/
1843 #if 0
1844 /* if (net->lastsv || net->lastsa) {*/
1845 /* per Section 5.3.1 C3 in SCTP */
1846 /* net->lastsv = (int) *//* RTTVAR */
1847 /* (((double)(1.0 - 0.25) * (double)net->lastsv) +
1848 (double)(0.25 * (double)abs(net->lastsa - calc_time)));
1849 net->lastsa = (int) */ /* SRTT */
1850 /*(((double)(1.0 - 0.125) * (double)net->lastsa) +
1851 (double)(0.125 * (double)calc_time));
1852 } else {
1853 *//* the first RTT calculation, per C2 Section 5.3.1 */
1854 /* net->lastsa = calc_time; *//* SRTT */
1855 /* net->lastsv = calc_time / 2; *//* RTTVAR */
1856 /* }*/
1857 /* if RTTVAR goes to 0 you set to clock grainularity */
1858 /* if (net->lastsv == 0) {
1859 net->lastsv = SCTP_CLOCK_GRANULARITY;
1860 }
1861 new_rto = net->lastsa + 4 * net->lastsv;
1862 */
1863 #endif
1864 /* this is Van Jacobson's integer version */
1865 if (net->RTO) {
1866 calc_time -= (net->lastsa >> 3);
1867 net->lastsa += calc_time;
1868 if (calc_time < 0) {
1869 calc_time = -calc_time;
1870 }
1871 calc_time -= (net->lastsv >> 2);
1872 net->lastsv += calc_time;
1873 if (net->lastsv == 0) {
1874 net->lastsv = SCTP_CLOCK_GRANULARITY;
1875 }
1876 } else {
1877 /* First RTO measurment */
1878 net->lastsa = calc_time;
1879 net->lastsv = calc_time >> 1;
1880 first_measure = 1;
1881 }
1882 new_rto = ((net->lastsa >> 2) + net->lastsv) >> 1;
1883 if ((new_rto > SCTP_SAT_NETWORK_MIN) &&
1884 (stcb->asoc.sat_network_lockout == 0)) {
1885 stcb->asoc.sat_network = 1;
1886 } else if ((!first_measure) && stcb->asoc.sat_network) {
1887 stcb->asoc.sat_network = 0;
1888 stcb->asoc.sat_network_lockout = 1;
1889 }
1890 /* bound it, per C6/C7 in Section 5.3.1 */
1891 if (new_rto < stcb->asoc.minrto) {
1892 new_rto = stcb->asoc.minrto;
1893 }
1894 if (new_rto > stcb->asoc.maxrto) {
1895 new_rto = stcb->asoc.maxrto;
1896 }
1897 /* we are now returning the RTT Smoothed */
1898 return ((u_int32_t)new_rto);
1899 }
1900
1901
1902 /*
1903 * return a pointer to a contiguous piece of data from the given
1904 * mbuf chain starting at 'off' for 'len' bytes. If the desired
1905 * piece spans more than one mbuf, a copy is made at 'ptr'.
1906 * caller must ensure that the buffer size is >= 'len'
1907 * returns NULL if there there isn't 'len' bytes in the chain.
1908 */
1909 void *
1910 sctp_m_getptr(struct mbuf *m, int off, int len, u_int8_t *in_ptr)
1911 {
1912 uint32_t count;
1913 uint8_t *ptr;
1914 ptr = in_ptr;
1915 if ((off < 0) || (len <= 0))
1916 return (NULL);
1917
1918 /* find the desired start location */
1919 while ((m != NULL) && (off > 0)) {
1920 if (off < m->m_len)
1921 break;
1922 off -= m->m_len;
1923 m = m->m_next;
1924 }
1925 if (m == NULL)
1926 return (NULL);
1927
1928 /* is the current mbuf large enough (eg. contiguous)? */
1929 if ((m->m_len - off) >= len) {
1930 return ((void *)(mtod(m, vaddr_t) + off));
1931 } else {
1932 /* else, it spans more than one mbuf, so save a temp copy... */
1933 while ((m != NULL) && (len > 0)) {
1934 count = uimin(m->m_len - off, len);
1935 memcpy(ptr, (void *)(mtod(m, vaddr_t) + off), count);
1936 len -= count;
1937 ptr += count;
1938 off = 0;
1939 m = m->m_next;
1940 }
1941 if ((m == NULL) && (len > 0))
1942 return (NULL);
1943 else
1944 return ((void *)in_ptr);
1945 }
1946 }
1947
1948
1949 struct sctp_paramhdr *
1950 sctp_get_next_param(struct mbuf *m,
1951 int offset,
1952 struct sctp_paramhdr *pull,
1953 int pull_limit)
1954 {
1955 /* This just provides a typed signature to Peter's Pull routine */
1956 return ((struct sctp_paramhdr *)sctp_m_getptr(m, offset, pull_limit,
1957 (u_int8_t *)pull));
1958 }
1959
1960
1961 int
1962 sctp_add_pad_tombuf(struct mbuf *m, int padlen)
1963 {
1964 /*
1965 * add padlen bytes of 0 filled padding to the end of the mbuf.
1966 * If padlen is > 3 this routine will fail.
1967 */
1968 u_int8_t *dp;
1969 int i;
1970 if (padlen > 3) {
1971 return (ENOBUFS);
1972 }
1973 if (M_TRAILINGSPACE(m)) {
1974 /*
1975 * The easy way.
1976 * We hope the majority of the time we hit here :)
1977 */
1978 dp = (u_int8_t *)(mtod(m, vaddr_t) + m->m_len);
1979 m->m_len += padlen;
1980 } else {
1981 /* Hard way we must grow the mbuf */
1982 struct mbuf *tmp;
1983 MGET(tmp, M_DONTWAIT, MT_DATA);
1984 if (tmp == NULL) {
1985 /* Out of space GAK! we are in big trouble. */
1986 return (ENOSPC);
1987 }
1988 /* setup and insert in middle */
1989 tmp->m_next = m->m_next;
1990 tmp->m_len = padlen;
1991 m->m_next = tmp;
1992 dp = mtod(tmp, u_int8_t *);
1993 }
1994 /* zero out the pad */
1995 for (i= 0; i < padlen; i++) {
1996 *dp = 0;
1997 dp++;
1998 }
1999 return (0);
2000 }
2001
2002 int
2003 sctp_pad_lastmbuf(struct mbuf *m, int padval)
2004 {
2005 /* find the last mbuf in chain and pad it */
2006 struct mbuf *m_at;
2007 m_at = m;
2008 while (m_at) {
2009 if (m_at->m_next == NULL) {
2010 return (sctp_add_pad_tombuf(m_at, padval));
2011 }
2012 m_at = m_at->m_next;
2013 }
2014 return (EFAULT);
2015 }
2016
2017 static void
2018 sctp_notify_assoc_change(u_int32_t event, struct sctp_tcb *stcb,
2019 u_int32_t error)
2020 {
2021 struct mbuf *m_notify;
2022 struct sctp_assoc_change *sac;
2023 const struct sockaddr *to;
2024 struct sockaddr_in6 sin6, lsa6;
2025
2026 #ifdef SCTP_DEBUG
2027 printf("notify: %d\n", event);
2028 #endif
2029 /*
2030 * First if we are going down dump everything we
2031 * can to the socket rcv queue.
2032 */
2033 if ((event == SCTP_SHUTDOWN_COMP) || (event == SCTP_COMM_LOST)) {
2034 sctp_deliver_data(stcb, &stcb->asoc, NULL, 0);
2035 }
2036
2037 /*
2038 * For TCP model AND UDP connected sockets we will send
2039 * an error up when an ABORT comes in.
2040 */
2041 if (((stcb->sctp_ep->sctp_flags & SCTP_PCB_FLAGS_TCPTYPE) ||
2042 (stcb->sctp_ep->sctp_flags & SCTP_PCB_FLAGS_IN_TCPPOOL)) &&
2043 (event == SCTP_COMM_LOST)) {
2044 stcb->sctp_socket->so_error = ECONNRESET;
2045 /* Wake ANY sleepers */
2046 sowwakeup(stcb->sctp_socket);
2047 sorwakeup(stcb->sctp_socket);
2048 }
2049 #if 0
2050 if ((event == SCTP_COMM_UP) &&
2051 (stcb->sctp_ep->sctp_flags & SCTP_PCB_FLAGS_TCPTYPE) &&
2052 (stcb->sctp_ep->sctp_flags & SCTP_PCB_FLAGS_CONNECTED)) {
2053 soisconnected(stcb->sctp_socket);
2054 }
2055 #endif
2056 if (!(stcb->sctp_ep->sctp_flags & SCTP_PCB_FLAGS_RECVASSOCEVNT)) {
2057 /* event not enabled */
2058 return;
2059 }
2060 MGETHDR(m_notify, M_DONTWAIT, MT_DATA);
2061 if (m_notify == NULL)
2062 /* no space left */
2063 return;
2064 m_notify->m_len = 0;
2065
2066 sac = mtod(m_notify, struct sctp_assoc_change *);
2067 sac->sac_type = SCTP_ASSOC_CHANGE;
2068 sac->sac_flags = 0;
2069 sac->sac_length = sizeof(struct sctp_assoc_change);
2070 sac->sac_state = event;
2071 sac->sac_error = error;
2072 /* XXX verify these stream counts */
2073 sac->sac_outbound_streams = stcb->asoc.streamoutcnt;
2074 sac->sac_inbound_streams = stcb->asoc.streamincnt;
2075 sac->sac_assoc_id = sctp_get_associd(stcb);
2076
2077 m_notify->m_flags |= M_EOR | M_NOTIFICATION;
2078 m_notify->m_pkthdr.len = sizeof(struct sctp_assoc_change);
2079 m_reset_rcvif(m_notify);
2080 m_notify->m_len = sizeof(struct sctp_assoc_change);
2081 m_notify->m_next = NULL;
2082
2083 /* append to socket */
2084 to = rtcache_getdst(&stcb->asoc.primary_destination->ro);
2085 if ((stcb->sctp_ep->sctp_flags & SCTP_PCB_FLAGS_NEEDS_MAPPED_V4) &&
2086 to->sa_family == AF_INET) {
2087 const struct sockaddr_in *sin;
2088
2089 sin = (const struct sockaddr_in *)to;
2090 in6_sin_2_v4mapsin6(sin, &sin6);
2091 to = (struct sockaddr *)&sin6;
2092 }
2093 /* check and strip embedded scope junk */
2094 to = (const struct sockaddr *)sctp_recover_scope((const struct sockaddr_in6 *)to,
2095 &lsa6);
2096 /*
2097 * We need to always notify comm changes.
2098 * if (sctp_sbspace(&stcb->sctp_socket->so_rcv) < m_notify->m_len) {
2099 * sctp_m_freem(m_notify);
2100 * return;
2101 * }
2102 */
2103 SCTP_TCB_UNLOCK(stcb);
2104 SCTP_INP_WLOCK(stcb->sctp_ep);
2105 SCTP_TCB_LOCK(stcb);
2106 if (!sbappendaddr_nocheck(&stcb->sctp_socket->so_rcv,
2107 to, m_notify, NULL, stcb->asoc.my_vtag, stcb->sctp_ep)) {
2108 /* not enough room */
2109 sctp_m_freem(m_notify);
2110 SCTP_INP_WUNLOCK(stcb->sctp_ep);
2111 return;
2112 }
2113 if (((stcb->sctp_ep->sctp_flags & SCTP_PCB_FLAGS_IN_TCPPOOL) == 0) &&
2114 ((stcb->sctp_ep->sctp_flags & SCTP_PCB_FLAGS_CONNECTED) == 0)){
2115 if (sctp_add_to_socket_q(stcb->sctp_ep, stcb)) {
2116 stcb->asoc.my_rwnd_control_len += sizeof(struct mbuf);
2117 }
2118 } else {
2119 stcb->asoc.my_rwnd_control_len += sizeof(struct mbuf);
2120 }
2121 SCTP_INP_WUNLOCK(stcb->sctp_ep);
2122 /* Wake up any sleeper */
2123 sctp_sorwakeup(stcb->sctp_ep, stcb->sctp_socket);
2124 sctp_sowwakeup(stcb->sctp_ep, stcb->sctp_socket);
2125 }
2126
2127 static void
2128 sctp_notify_peer_addr_change(struct sctp_tcb *stcb, uint32_t state,
2129 const struct sockaddr *sa, uint32_t error)
2130 {
2131 struct mbuf *m_notify;
2132 struct sctp_paddr_change *spc;
2133 const struct sockaddr *to;
2134 struct sockaddr_in6 sin6, lsa6;
2135
2136 if (!(stcb->sctp_ep->sctp_flags & SCTP_PCB_FLAGS_RECVPADDREVNT))
2137 /* event not enabled */
2138 return;
2139
2140 MGETHDR(m_notify, M_DONTWAIT, MT_DATA);
2141 if (m_notify == NULL)
2142 return;
2143 m_notify->m_len = 0;
2144
2145 MCLGET(m_notify, M_DONTWAIT);
2146 if ((m_notify->m_flags & M_EXT) != M_EXT) {
2147 sctp_m_freem(m_notify);
2148 return;
2149 }
2150
2151 spc = mtod(m_notify, struct sctp_paddr_change *);
2152 spc->spc_type = SCTP_PEER_ADDR_CHANGE;
2153 spc->spc_flags = 0;
2154 spc->spc_length = sizeof(struct sctp_paddr_change);
2155 if (sa->sa_family == AF_INET) {
2156 memcpy(&spc->spc_aaddr, sa, sizeof(struct sockaddr_in));
2157 } else {
2158 memcpy(&spc->spc_aaddr, sa, sizeof(struct sockaddr_in6));
2159 }
2160 spc->spc_state = state;
2161 spc->spc_error = error;
2162 spc->spc_assoc_id = sctp_get_associd(stcb);
2163
2164 m_notify->m_flags |= M_EOR | M_NOTIFICATION;
2165 m_notify->m_pkthdr.len = sizeof(struct sctp_paddr_change);
2166 m_reset_rcvif(m_notify);
2167 m_notify->m_len = sizeof(struct sctp_paddr_change);
2168 m_notify->m_next = NULL;
2169
2170 to = rtcache_getdst(&stcb->asoc.primary_destination->ro);
2171 if ((stcb->sctp_ep->sctp_flags & SCTP_PCB_FLAGS_NEEDS_MAPPED_V4) &&
2172 to->sa_family == AF_INET) {
2173 const struct sockaddr_in *sin;
2174
2175 sin = (const struct sockaddr_in *)to;
2176 in6_sin_2_v4mapsin6(sin, &sin6);
2177 to = (struct sockaddr *)&sin6;
2178 }
2179 /* check and strip embedded scope junk */
2180 to = (const struct sockaddr *)sctp_recover_scope((const struct sockaddr_in6 *)to,
2181 &lsa6);
2182
2183 if (sctp_sbspace(&stcb->sctp_socket->so_rcv) < m_notify->m_len) {
2184 sctp_m_freem(m_notify);
2185 return;
2186 }
2187 /* append to socket */
2188 SCTP_TCB_UNLOCK(stcb);
2189 SCTP_INP_WLOCK(stcb->sctp_ep);
2190 SCTP_TCB_LOCK(stcb);
2191 if (!sbappendaddr_nocheck(&stcb->sctp_socket->so_rcv, to,
2192 m_notify, NULL, stcb->asoc.my_vtag, stcb->sctp_ep)) {
2193 /* not enough room */
2194 sctp_m_freem(m_notify);
2195 SCTP_INP_WUNLOCK(stcb->sctp_ep);
2196 return;
2197 }
2198 if (((stcb->sctp_ep->sctp_flags & SCTP_PCB_FLAGS_IN_TCPPOOL) == 0) &&
2199 ((stcb->sctp_ep->sctp_flags & SCTP_PCB_FLAGS_CONNECTED) == 0)){
2200 if (sctp_add_to_socket_q(stcb->sctp_ep, stcb)) {
2201 stcb->asoc.my_rwnd_control_len += sizeof(struct mbuf);
2202 }
2203 } else {
2204 stcb->asoc.my_rwnd_control_len += sizeof(struct mbuf);
2205 }
2206 SCTP_INP_WUNLOCK(stcb->sctp_ep);
2207 sctp_sorwakeup(stcb->sctp_ep, stcb->sctp_socket);
2208 }
2209
2210
2211 static void
2212 sctp_notify_send_failed(struct sctp_tcb *stcb, u_int32_t error,
2213 struct sctp_tmit_chunk *chk)
2214 {
2215 struct mbuf *m_notify;
2216 struct sctp_send_failed *ssf;
2217 struct sockaddr_in6 sin6, lsa6;
2218 const struct sockaddr *to;
2219 int length;
2220
2221 if (!(stcb->sctp_ep->sctp_flags & SCTP_PCB_FLAGS_RECVSENDFAILEVNT))
2222 /* event not enabled */
2223 return;
2224
2225 length = sizeof(struct sctp_send_failed) + chk->send_size;
2226 MGETHDR(m_notify, M_DONTWAIT, MT_DATA);
2227 if (m_notify == NULL)
2228 /* no space left */
2229 return;
2230 m_notify->m_len = 0;
2231 ssf = mtod(m_notify, struct sctp_send_failed *);
2232 ssf->ssf_type = SCTP_SEND_FAILED;
2233 if (error == SCTP_NOTIFY_DATAGRAM_UNSENT)
2234 ssf->ssf_flags = SCTP_DATA_UNSENT;
2235 else
2236 ssf->ssf_flags = SCTP_DATA_SENT;
2237 ssf->ssf_length = length;
2238 ssf->ssf_error = error;
2239 /* not exactly what the user sent in, but should be close :) */
2240 ssf->ssf_info.sinfo_stream = chk->rec.data.stream_number;
2241 ssf->ssf_info.sinfo_ssn = chk->rec.data.stream_seq;
2242 ssf->ssf_info.sinfo_flags = chk->rec.data.rcv_flags;
2243 ssf->ssf_info.sinfo_ppid = chk->rec.data.payloadtype;
2244 ssf->ssf_info.sinfo_context = chk->rec.data.context;
2245 ssf->ssf_info.sinfo_assoc_id = sctp_get_associd(stcb);
2246 ssf->ssf_assoc_id = sctp_get_associd(stcb);
2247 m_notify->m_next = chk->data;
2248 if (m_notify->m_next == NULL)
2249 m_notify->m_flags |= M_EOR | M_NOTIFICATION;
2250 else {
2251 struct mbuf *m;
2252 m_notify->m_flags |= M_NOTIFICATION;
2253 m = m_notify;
2254 while (m->m_next != NULL)
2255 m = m->m_next;
2256 m->m_flags |= M_EOR;
2257 }
2258 m_notify->m_pkthdr.len = length;
2259 m_reset_rcvif(m_notify);
2260 m_notify->m_len = sizeof(struct sctp_send_failed);
2261
2262 /* Steal off the mbuf */
2263 chk->data = NULL;
2264 to = rtcache_getdst(&stcb->asoc.primary_destination->ro);
2265 if ((stcb->sctp_ep->sctp_flags & SCTP_PCB_FLAGS_NEEDS_MAPPED_V4) &&
2266 to->sa_family == AF_INET) {
2267 const struct sockaddr_in *sin;
2268
2269 sin = satocsin(to);
2270 in6_sin_2_v4mapsin6(sin, &sin6);
2271 to = (struct sockaddr *)&sin6;
2272 }
2273 /* check and strip embedded scope junk */
2274 to = (const struct sockaddr *)sctp_recover_scope((const struct sockaddr_in6 *)to,
2275 &lsa6);
2276
2277 if (sctp_sbspace(&stcb->sctp_socket->so_rcv) < m_notify->m_len) {
2278 sctp_m_freem(m_notify);
2279 return;
2280 }
2281
2282 /* append to socket */
2283 SCTP_TCB_UNLOCK(stcb);
2284 SCTP_INP_WLOCK(stcb->sctp_ep);
2285 SCTP_TCB_LOCK(stcb);
2286 if (!sbappendaddr_nocheck(&stcb->sctp_socket->so_rcv, to,
2287 m_notify, NULL, stcb->asoc.my_vtag, stcb->sctp_ep)) {
2288 /* not enough room */
2289 sctp_m_freem(m_notify);
2290 SCTP_INP_WUNLOCK(stcb->sctp_ep);
2291 return;
2292 }
2293 if (((stcb->sctp_ep->sctp_flags & SCTP_PCB_FLAGS_IN_TCPPOOL) == 0) &&
2294 ((stcb->sctp_ep->sctp_flags & SCTP_PCB_FLAGS_CONNECTED) == 0)){
2295 if (sctp_add_to_socket_q(stcb->sctp_ep, stcb)) {
2296 stcb->asoc.my_rwnd_control_len += sizeof(struct mbuf);
2297 }
2298 } else {
2299 stcb->asoc.my_rwnd_control_len += sizeof(struct mbuf);
2300 }
2301 SCTP_INP_WUNLOCK(stcb->sctp_ep);
2302 sctp_sorwakeup(stcb->sctp_ep, stcb->sctp_socket);
2303 }
2304
2305 static void
2306 sctp_notify_adaption_layer(struct sctp_tcb *stcb,
2307 u_int32_t error)
2308 {
2309 struct mbuf *m_notify;
2310 struct sctp_adaption_event *sai;
2311 struct sockaddr_in6 sin6, lsa6;
2312 const struct sockaddr *to;
2313
2314 if (!(stcb->sctp_ep->sctp_flags & SCTP_PCB_FLAGS_ADAPTIONEVNT))
2315 /* event not enabled */
2316 return;
2317
2318 MGETHDR(m_notify, M_DONTWAIT, MT_DATA);
2319 if (m_notify == NULL)
2320 /* no space left */
2321 return;
2322 m_notify->m_len = 0;
2323 sai = mtod(m_notify, struct sctp_adaption_event *);
2324 sai->sai_type = SCTP_ADAPTION_INDICATION;
2325 sai->sai_flags = 0;
2326 sai->sai_length = sizeof(struct sctp_adaption_event);
2327 sai->sai_adaption_ind = error;
2328 sai->sai_assoc_id = sctp_get_associd(stcb);
2329
2330 m_notify->m_flags |= M_EOR | M_NOTIFICATION;
2331 m_notify->m_pkthdr.len = sizeof(struct sctp_adaption_event);
2332 m_reset_rcvif(m_notify);
2333 m_notify->m_len = sizeof(struct sctp_adaption_event);
2334 m_notify->m_next = NULL;
2335
2336 to = rtcache_getdst(&stcb->asoc.primary_destination->ro);
2337 if ((stcb->sctp_ep->sctp_flags & SCTP_PCB_FLAGS_NEEDS_MAPPED_V4) &&
2338 (to->sa_family == AF_INET)) {
2339 const struct sockaddr_in *sin;
2340
2341 sin = satocsin(to);
2342 in6_sin_2_v4mapsin6(sin, &sin6);
2343 to = (struct sockaddr *)&sin6;
2344 }
2345 /* check and strip embedded scope junk */
2346 to = (const struct sockaddr *)sctp_recover_scope((const struct sockaddr_in6 *)to,
2347 &lsa6);
2348 if (sctp_sbspace(&stcb->sctp_socket->so_rcv) < m_notify->m_len) {
2349 sctp_m_freem(m_notify);
2350 return;
2351 }
2352 /* append to socket */
2353 SCTP_TCB_UNLOCK(stcb);
2354 SCTP_INP_WLOCK(stcb->sctp_ep);
2355 SCTP_TCB_LOCK(stcb);
2356 if (!sbappendaddr_nocheck(&stcb->sctp_socket->so_rcv, to,
2357 m_notify, NULL, stcb->asoc.my_vtag, stcb->sctp_ep)) {
2358 /* not enough room */
2359 sctp_m_freem(m_notify);
2360 SCTP_INP_WUNLOCK(stcb->sctp_ep);
2361 return;
2362 }
2363 if (((stcb->sctp_ep->sctp_flags & SCTP_PCB_FLAGS_IN_TCPPOOL) == 0) &&
2364 ((stcb->sctp_ep->sctp_flags & SCTP_PCB_FLAGS_CONNECTED) == 0)){
2365 if (sctp_add_to_socket_q(stcb->sctp_ep, stcb)) {
2366 stcb->asoc.my_rwnd_control_len += sizeof(struct mbuf);
2367 }
2368 } else {
2369 stcb->asoc.my_rwnd_control_len += sizeof(struct mbuf);
2370 }
2371 SCTP_INP_WUNLOCK(stcb->sctp_ep);
2372 sctp_sorwakeup(stcb->sctp_ep, stcb->sctp_socket);
2373 }
2374
2375 static void
2376 sctp_notify_partial_delivery_indication(struct sctp_tcb *stcb,
2377 u_int32_t error)
2378 {
2379 struct mbuf *m_notify;
2380 struct sctp_pdapi_event *pdapi;
2381 struct sockaddr_in6 sin6, lsa6;
2382 const struct sockaddr *to;
2383
2384 if (!(stcb->sctp_ep->sctp_flags & SCTP_PCB_FLAGS_PDAPIEVNT))
2385 /* event not enabled */
2386 return;
2387
2388 MGETHDR(m_notify, M_DONTWAIT, MT_DATA);
2389 if (m_notify == NULL)
2390 /* no space left */
2391 return;
2392 m_notify->m_len = 0;
2393 pdapi = mtod(m_notify, struct sctp_pdapi_event *);
2394 pdapi->pdapi_type = SCTP_PARTIAL_DELIVERY_EVENT;
2395 pdapi->pdapi_flags = 0;
2396 pdapi->pdapi_length = sizeof(struct sctp_pdapi_event);
2397 pdapi->pdapi_indication = error;
2398 pdapi->pdapi_assoc_id = sctp_get_associd(stcb);
2399
2400 m_notify->m_flags |= M_EOR | M_NOTIFICATION;
2401 m_notify->m_pkthdr.len = sizeof(struct sctp_pdapi_event);
2402 m_reset_rcvif(m_notify);
2403 m_notify->m_len = sizeof(struct sctp_pdapi_event);
2404 m_notify->m_next = NULL;
2405
2406 to = rtcache_getdst(&stcb->asoc.primary_destination->ro);
2407 if ((stcb->sctp_ep->sctp_flags & SCTP_PCB_FLAGS_NEEDS_MAPPED_V4) &&
2408 (to->sa_family == AF_INET)) {
2409 const struct sockaddr_in *sin;
2410
2411 sin = satocsin(to);
2412 in6_sin_2_v4mapsin6(sin, &sin6);
2413 to = (struct sockaddr *)&sin6;
2414 }
2415 /* check and strip embedded scope junk */
2416 to = (const struct sockaddr *)sctp_recover_scope((const struct sockaddr_in6 *)to,
2417 &lsa6);
2418 if (sctp_sbspace(&stcb->sctp_socket->so_rcv) < m_notify->m_len) {
2419 sctp_m_freem(m_notify);
2420 return;
2421 }
2422 /* append to socket */
2423 SCTP_TCB_UNLOCK(stcb);
2424 SCTP_INP_WLOCK(stcb->sctp_ep);
2425 SCTP_TCB_LOCK(stcb);
2426 if (!sbappendaddr_nocheck(&stcb->sctp_socket->so_rcv, to,
2427 m_notify, NULL, stcb->asoc.my_vtag, stcb->sctp_ep)) {
2428 /* not enough room */
2429 sctp_m_freem(m_notify);
2430 SCTP_INP_WUNLOCK(stcb->sctp_ep);
2431 return;
2432 }
2433 if (((stcb->sctp_ep->sctp_flags & SCTP_PCB_FLAGS_IN_TCPPOOL) == 0) &&
2434 ((stcb->sctp_ep->sctp_flags & SCTP_PCB_FLAGS_CONNECTED) == 0)){
2435 if (sctp_add_to_socket_q(stcb->sctp_ep, stcb)) {
2436 stcb->asoc.my_rwnd_control_len += sizeof(struct mbuf);
2437 }
2438 } else {
2439 stcb->asoc.my_rwnd_control_len += sizeof(struct mbuf);
2440 }
2441 SCTP_INP_WUNLOCK(stcb->sctp_ep);
2442 sctp_sorwakeup(stcb->sctp_ep, stcb->sctp_socket);
2443 }
2444
2445 static void
2446 sctp_notify_shutdown_event(struct sctp_tcb *stcb)
2447 {
2448 struct mbuf *m_notify;
2449 struct sctp_shutdown_event *sse;
2450 struct sockaddr_in6 sin6, lsa6;
2451 const struct sockaddr *to;
2452
2453 /*
2454 * For TCP model AND UDP connected sockets we will send
2455 * an error up when an SHUTDOWN completes
2456 */
2457 if ((stcb->sctp_ep->sctp_flags & SCTP_PCB_FLAGS_TCPTYPE) ||
2458 (stcb->sctp_ep->sctp_flags & SCTP_PCB_FLAGS_IN_TCPPOOL)) {
2459 /* mark socket closed for read/write and wakeup! */
2460 socantrcvmore(stcb->sctp_socket);
2461 socantsendmore(stcb->sctp_socket);
2462 }
2463
2464 if (!(stcb->sctp_ep->sctp_flags & SCTP_PCB_FLAGS_RECVSHUTDOWNEVNT))
2465 /* event not enabled */
2466 return;
2467
2468 MGETHDR(m_notify, M_DONTWAIT, MT_DATA);
2469 if (m_notify == NULL)
2470 /* no space left */
2471 return;
2472 m_notify->m_len = 0;
2473 sse = mtod(m_notify, struct sctp_shutdown_event *);
2474 sse->sse_type = SCTP_SHUTDOWN_EVENT;
2475 sse->sse_flags = 0;
2476 sse->sse_length = sizeof(struct sctp_shutdown_event);
2477 sse->sse_assoc_id = sctp_get_associd(stcb);
2478
2479 m_notify->m_flags |= M_EOR | M_NOTIFICATION;
2480 m_notify->m_pkthdr.len = sizeof(struct sctp_shutdown_event);
2481 m_reset_rcvif(m_notify);
2482 m_notify->m_len = sizeof(struct sctp_shutdown_event);
2483 m_notify->m_next = NULL;
2484
2485 to = rtcache_getdst(&stcb->asoc.primary_destination->ro);
2486 if ((stcb->sctp_ep->sctp_flags & SCTP_PCB_FLAGS_NEEDS_MAPPED_V4) &&
2487 to->sa_family == AF_INET) {
2488 const struct sockaddr_in *sin;
2489
2490 sin = satocsin(to);
2491 in6_sin_2_v4mapsin6(sin, &sin6);
2492 to = (struct sockaddr *)&sin6;
2493 }
2494 /* check and strip embedded scope junk */
2495 to = (const struct sockaddr *)sctp_recover_scope((const struct sockaddr_in6 *)to,
2496 &lsa6);
2497 if (sctp_sbspace(&stcb->sctp_socket->so_rcv) < m_notify->m_len) {
2498 sctp_m_freem(m_notify);
2499 return;
2500 }
2501 /* append to socket */
2502 SCTP_TCB_UNLOCK(stcb);
2503 SCTP_INP_WLOCK(stcb->sctp_ep);
2504 SCTP_TCB_LOCK(stcb);
2505 if (!sbappendaddr_nocheck(&stcb->sctp_socket->so_rcv, to,
2506 m_notify, NULL, stcb->asoc.my_vtag, stcb->sctp_ep)) {
2507 /* not enough room */
2508 sctp_m_freem(m_notify);
2509 SCTP_INP_WUNLOCK(stcb->sctp_ep);
2510 return;
2511 }
2512 if (((stcb->sctp_ep->sctp_flags & SCTP_PCB_FLAGS_IN_TCPPOOL) == 0) &&
2513 ((stcb->sctp_ep->sctp_flags & SCTP_PCB_FLAGS_CONNECTED) == 0)){
2514 if (sctp_add_to_socket_q(stcb->sctp_ep, stcb)) {
2515 stcb->asoc.my_rwnd_control_len += sizeof(struct mbuf);
2516 }
2517 } else {
2518 stcb->asoc.my_rwnd_control_len += sizeof(struct mbuf);
2519 }
2520 SCTP_INP_WUNLOCK(stcb->sctp_ep);
2521 sctp_sorwakeup(stcb->sctp_ep, stcb->sctp_socket);
2522 }
2523
2524 static void
2525 sctp_notify_stream_reset(struct sctp_tcb *stcb,
2526 int number_entries, uint16_t *list, int flag)
2527 {
2528 struct mbuf *m_notify;
2529 struct sctp_stream_reset_event *strreset;
2530 struct sockaddr_in6 sin6, lsa6;
2531 const struct sockaddr *to;
2532 int len;
2533
2534 if (!(stcb->sctp_ep->sctp_flags & SCTP_PCB_FLAGS_STREAM_RESETEVNT))
2535 /* event not enabled */
2536 return;
2537
2538 MGETHDR(m_notify, M_DONTWAIT, MT_DATA);
2539 if (m_notify == NULL)
2540 /* no space left */
2541 return;
2542 m_notify->m_len = 0;
2543 len = sizeof(struct sctp_stream_reset_event) + (number_entries * sizeof(uint16_t));
2544 if (len > M_TRAILINGSPACE(m_notify)) {
2545 MCLGET(m_notify, M_WAIT);
2546 }
2547 if (m_notify == NULL)
2548 /* no clusters */
2549 return;
2550
2551 if (len > M_TRAILINGSPACE(m_notify)) {
2552 /* never enough room */
2553 m_freem(m_notify);
2554 return;
2555 }
2556 strreset = mtod(m_notify, struct sctp_stream_reset_event *);
2557 strreset->strreset_type = SCTP_STREAM_RESET_EVENT;
2558 if (number_entries == 0) {
2559 strreset->strreset_flags = flag | SCTP_STRRESET_ALL_STREAMS;
2560 } else {
2561 strreset->strreset_flags = flag | SCTP_STRRESET_STREAM_LIST;
2562 }
2563 strreset->strreset_length = len;
2564 strreset->strreset_assoc_id = sctp_get_associd(stcb);
2565 if (number_entries) {
2566 int i;
2567 for (i=0; i<number_entries; i++) {
2568 strreset->strreset_list[i] = list[i];
2569 }
2570 }
2571 m_notify->m_flags |= M_EOR | M_NOTIFICATION;
2572 m_notify->m_pkthdr.len = len;
2573 m_reset_rcvif(m_notify);
2574 m_notify->m_len = len;
2575 m_notify->m_next = NULL;
2576 if (sctp_sbspace(&stcb->sctp_socket->so_rcv) < m_notify->m_len) {
2577 /* no space */
2578 sctp_m_freem(m_notify);
2579 return;
2580 }
2581 to = rtcache_getdst(&stcb->asoc.primary_destination->ro);
2582 if ((stcb->sctp_ep->sctp_flags & SCTP_PCB_FLAGS_NEEDS_MAPPED_V4) &&
2583 to->sa_family == AF_INET) {
2584 const struct sockaddr_in *sin;
2585
2586 sin = satocsin(to);
2587 in6_sin_2_v4mapsin6(sin, &sin6);
2588 to = (struct sockaddr *)&sin6;
2589 }
2590 /* check and strip embedded scope junk */
2591 to = (const struct sockaddr *) sctp_recover_scope((const struct sockaddr_in6 *)to,
2592 &lsa6);
2593 /* append to socket */
2594 SCTP_TCB_UNLOCK(stcb);
2595 SCTP_INP_WLOCK(stcb->sctp_ep);
2596 SCTP_TCB_LOCK(stcb);
2597 if (!sbappendaddr_nocheck(&stcb->sctp_socket->so_rcv, to,
2598 m_notify, NULL, stcb->asoc.my_vtag, stcb->sctp_ep)) {
2599 /* not enough room */
2600 sctp_m_freem(m_notify);
2601 SCTP_INP_WUNLOCK(stcb->sctp_ep);
2602 return;
2603 }
2604 if (((stcb->sctp_ep->sctp_flags & SCTP_PCB_FLAGS_IN_TCPPOOL) == 0) &&
2605 ((stcb->sctp_ep->sctp_flags & SCTP_PCB_FLAGS_CONNECTED) == 0)){
2606 if (sctp_add_to_socket_q(stcb->sctp_ep, stcb)) {
2607 stcb->asoc.my_rwnd_control_len += sizeof(struct mbuf);
2608 }
2609 } else {
2610 stcb->asoc.my_rwnd_control_len += sizeof(struct mbuf);
2611 }
2612 SCTP_INP_WUNLOCK(stcb->sctp_ep);
2613 sctp_sorwakeup(stcb->sctp_ep, stcb->sctp_socket);
2614 }
2615
2616
2617 void
2618 sctp_ulp_notify(u_int32_t notification, struct sctp_tcb *stcb,
2619 u_int32_t error, void *data)
2620 {
2621 if (stcb->sctp_ep->sctp_flags & SCTP_PCB_FLAGS_SOCKET_GONE) {
2622 /* No notifications up when we are in a no socket state */
2623 return;
2624 }
2625 if (stcb->asoc.state & SCTP_STATE_CLOSED_SOCKET) {
2626 /* Can't send up to a closed socket any notifications */
2627 return;
2628 }
2629 switch (notification) {
2630 case SCTP_NOTIFY_ASSOC_UP:
2631 sctp_notify_assoc_change(SCTP_COMM_UP, stcb, error);
2632 break;
2633 case SCTP_NOTIFY_ASSOC_DOWN:
2634 sctp_notify_assoc_change(SCTP_SHUTDOWN_COMP, stcb, error);
2635 break;
2636 case SCTP_NOTIFY_INTERFACE_DOWN:
2637 {
2638 struct sctp_nets *net;
2639 net = (struct sctp_nets *)data;
2640 sctp_notify_peer_addr_change(stcb, SCTP_ADDR_UNREACHABLE,
2641 rtcache_getdst(&net->ro), error);
2642 break;
2643 }
2644 case SCTP_NOTIFY_INTERFACE_UP:
2645 {
2646 struct sctp_nets *net;
2647 net = (struct sctp_nets *)data;
2648 sctp_notify_peer_addr_change(stcb, SCTP_ADDR_AVAILABLE,
2649 rtcache_getdst(&net->ro), error);
2650 break;
2651 }
2652 case SCTP_NOTIFY_INTERFACE_CONFIRMED:
2653 {
2654 struct sctp_nets *net;
2655 net = (struct sctp_nets *)data;
2656 sctp_notify_peer_addr_change(stcb, SCTP_ADDR_CONFIRMED,
2657 rtcache_getdst(&net->ro), error);
2658 break;
2659 }
2660 case SCTP_NOTIFY_DG_FAIL:
2661 sctp_notify_send_failed(stcb, error,
2662 (struct sctp_tmit_chunk *)data);
2663 break;
2664 case SCTP_NOTIFY_ADAPTION_INDICATION:
2665 /* Here the error is the adaption indication */
2666 sctp_notify_adaption_layer(stcb, error);
2667 break;
2668 case SCTP_NOTIFY_PARTIAL_DELVIERY_INDICATION:
2669 sctp_notify_partial_delivery_indication(stcb, error);
2670 break;
2671 case SCTP_NOTIFY_STRDATA_ERR:
2672 break;
2673 case SCTP_NOTIFY_ASSOC_ABORTED:
2674 sctp_notify_assoc_change(SCTP_COMM_LOST, stcb, error);
2675 break;
2676 case SCTP_NOTIFY_PEER_OPENED_STREAM:
2677 break;
2678 case SCTP_NOTIFY_STREAM_OPENED_OK:
2679 break;
2680 case SCTP_NOTIFY_ASSOC_RESTART:
2681 sctp_notify_assoc_change(SCTP_RESTART, stcb, error);
2682 break;
2683 case SCTP_NOTIFY_HB_RESP:
2684 break;
2685 case SCTP_NOTIFY_STR_RESET_SEND:
2686 sctp_notify_stream_reset(stcb, error, ((uint16_t *)data), SCTP_STRRESET_OUTBOUND_STR);
2687 break;
2688 case SCTP_NOTIFY_STR_RESET_RECV:
2689 sctp_notify_stream_reset(stcb, error, ((uint16_t *)data), SCTP_STRRESET_INBOUND_STR);
2690 break;
2691 case SCTP_NOTIFY_ASCONF_ADD_IP:
2692 sctp_notify_peer_addr_change(stcb, SCTP_ADDR_ADDED, data,
2693 error);
2694 break;
2695 case SCTP_NOTIFY_ASCONF_DELETE_IP:
2696 sctp_notify_peer_addr_change(stcb, SCTP_ADDR_REMOVED, data,
2697 error);
2698 break;
2699 case SCTP_NOTIFY_ASCONF_SET_PRIMARY:
2700 sctp_notify_peer_addr_change(stcb, SCTP_ADDR_MADE_PRIM, data,
2701 error);
2702 break;
2703 case SCTP_NOTIFY_ASCONF_SUCCESS:
2704 break;
2705 case SCTP_NOTIFY_ASCONF_FAILED:
2706 break;
2707 case SCTP_NOTIFY_PEER_SHUTDOWN:
2708 sctp_notify_shutdown_event(stcb);
2709 break;
2710 default:
2711 #ifdef SCTP_DEBUG
2712 if (sctp_debug_on & SCTP_DEBUG_UTIL1) {
2713 printf("NOTIFY: unknown notification %xh (%u)\n",
2714 notification, notification);
2715 }
2716 #endif /* SCTP_DEBUG */
2717 break;
2718 } /* end switch */
2719 }
2720
2721 void
2722 sctp_report_all_outbound(struct sctp_tcb *stcb)
2723 {
2724 struct sctp_association *asoc;
2725 struct sctp_stream_out *outs;
2726 struct sctp_tmit_chunk *chk;
2727
2728 asoc = &stcb->asoc;
2729
2730 if (stcb->sctp_ep->sctp_flags & SCTP_PCB_FLAGS_SOCKET_GONE) {
2731 return;
2732 }
2733 /* now through all the gunk freeing chunks */
2734 TAILQ_FOREACH(outs, &asoc->out_wheel, next_spoke) {
2735 /* now clean up any chunks here */
2736 chk = TAILQ_FIRST(&outs->outqueue);
2737 while (chk) {
2738 stcb->asoc.stream_queue_cnt--;
2739 TAILQ_REMOVE(&outs->outqueue, chk, sctp_next);
2740 sctp_ulp_notify(SCTP_NOTIFY_DG_FAIL, stcb,
2741 SCTP_NOTIFY_DATAGRAM_UNSENT, chk);
2742 if (chk->data) {
2743 sctp_m_freem(chk->data);
2744 chk->data = NULL;
2745 }
2746 if (chk->whoTo)
2747 sctp_free_remote_addr(chk->whoTo);
2748 chk->whoTo = NULL;
2749 chk->asoc = NULL;
2750 /* Free the chunk */
2751 SCTP_ZONE_FREE(sctppcbinfo.ipi_zone_chunk, chk);
2752 sctppcbinfo.ipi_count_chunk--;
2753 if ((int)sctppcbinfo.ipi_count_chunk < 0) {
2754 panic("Chunk count is negative");
2755 }
2756 sctppcbinfo.ipi_gencnt_chunk++;
2757 chk = TAILQ_FIRST(&outs->outqueue);
2758 }
2759 }
2760 /* pending send queue SHOULD be empty */
2761 if (!TAILQ_EMPTY(&asoc->send_queue)) {
2762 chk = TAILQ_FIRST(&asoc->send_queue);
2763 while (chk) {
2764 TAILQ_REMOVE(&asoc->send_queue, chk, sctp_next);
2765 sctp_ulp_notify(SCTP_NOTIFY_DG_FAIL, stcb, SCTP_NOTIFY_DATAGRAM_UNSENT, chk);
2766 if (chk->data) {
2767 sctp_m_freem(chk->data);
2768 chk->data = NULL;
2769 }
2770 if (chk->whoTo)
2771 sctp_free_remote_addr(chk->whoTo);
2772 chk->whoTo = NULL;
2773 SCTP_ZONE_FREE(sctppcbinfo.ipi_zone_chunk, chk);
2774 sctppcbinfo.ipi_count_chunk--;
2775 if ((int)sctppcbinfo.ipi_count_chunk < 0) {
2776 panic("Chunk count is negative");
2777 }
2778 sctppcbinfo.ipi_gencnt_chunk++;
2779 chk = TAILQ_FIRST(&asoc->send_queue);
2780 }
2781 }
2782 /* sent queue SHOULD be empty */
2783 if (!TAILQ_EMPTY(&asoc->sent_queue)) {
2784 chk = TAILQ_FIRST(&asoc->sent_queue);
2785 while (chk) {
2786 TAILQ_REMOVE(&asoc->sent_queue, chk, sctp_next);
2787 sctp_ulp_notify(SCTP_NOTIFY_DG_FAIL, stcb,
2788 SCTP_NOTIFY_DATAGRAM_SENT, chk);
2789 if (chk->data) {
2790 sctp_m_freem(chk->data);
2791 chk->data = NULL;
2792 }
2793 if (chk->whoTo)
2794 sctp_free_remote_addr(chk->whoTo);
2795 chk->whoTo = NULL;
2796 SCTP_ZONE_FREE(sctppcbinfo.ipi_zone_chunk, chk);
2797 sctppcbinfo.ipi_count_chunk--;
2798 if ((int)sctppcbinfo.ipi_count_chunk < 0) {
2799 panic("Chunk count is negative");
2800 }
2801 sctppcbinfo.ipi_gencnt_chunk++;
2802 chk = TAILQ_FIRST(&asoc->sent_queue);
2803 }
2804 }
2805 }
2806
2807 void
2808 sctp_abort_notification(struct sctp_tcb *stcb, int error)
2809 {
2810
2811 if (stcb->sctp_ep->sctp_flags & SCTP_PCB_FLAGS_SOCKET_GONE) {
2812 return;
2813 }
2814 /* Tell them we lost the asoc */
2815 sctp_report_all_outbound(stcb);
2816 sctp_ulp_notify(SCTP_NOTIFY_ASSOC_ABORTED, stcb, error, NULL);
2817 }
2818
2819 void
2820 sctp_abort_association(struct sctp_inpcb *inp, struct sctp_tcb *stcb,
2821 struct mbuf *m, int iphlen, struct sctphdr *sh, struct mbuf *op_err)
2822 {
2823 u_int32_t vtag;
2824
2825 vtag = 0;
2826 if (stcb != NULL) {
2827 /* We have a TCB to abort, send notification too */
2828 vtag = stcb->asoc.peer_vtag;
2829 sctp_abort_notification(stcb, 0);
2830 }
2831 sctp_send_abort(m, iphlen, sh, vtag, op_err);
2832 if (stcb != NULL) {
2833 /* Ok, now lets free it */
2834 sctp_free_assoc(inp, stcb);
2835 } else {
2836 if (inp->sctp_flags & SCTP_PCB_FLAGS_SOCKET_GONE) {
2837 if (LIST_FIRST(&inp->sctp_asoc_list) == NULL) {
2838 sctp_inpcb_free(inp, 1);
2839 }
2840 }
2841 }
2842 }
2843
2844 void
2845 sctp_abort_an_association(struct sctp_inpcb *inp, struct sctp_tcb *stcb,
2846 int error, struct mbuf *op_err)
2847 {
2848
2849 if (stcb == NULL) {
2850 /* Got to have a TCB */
2851 if (inp->sctp_flags & SCTP_PCB_FLAGS_SOCKET_GONE) {
2852 if (LIST_FIRST(&inp->sctp_asoc_list) == NULL) {
2853 sctp_inpcb_free(inp, 1);
2854 }
2855 }
2856 return;
2857 }
2858 /* notify the ulp */
2859 if ((inp->sctp_flags & SCTP_PCB_FLAGS_SOCKET_GONE) == 0)
2860 sctp_abort_notification(stcb, error);
2861 /* notify the peer */
2862 sctp_send_abort_tcb(stcb, op_err);
2863 /* now free the asoc */
2864 sctp_free_assoc(inp, stcb);
2865 }
2866
2867 void
2868 sctp_handle_ootb(struct mbuf *m, int iphlen, int offset, struct sctphdr *sh,
2869 struct sctp_inpcb *inp, struct mbuf *op_err)
2870 {
2871 struct sctp_chunkhdr *ch, chunk_buf;
2872 unsigned int chk_length;
2873
2874 /* Generate a TO address for future reference */
2875 if (inp && (inp->sctp_flags & SCTP_PCB_FLAGS_SOCKET_GONE)) {
2876 if (LIST_FIRST(&inp->sctp_asoc_list) == NULL) {
2877 sctp_inpcb_free(inp, 1);
2878 }
2879 }
2880 ch = (struct sctp_chunkhdr *)sctp_m_getptr(m, offset,
2881 sizeof(*ch), (u_int8_t *)&chunk_buf);
2882 while (ch != NULL) {
2883 chk_length = ntohs(ch->chunk_length);
2884 if (chk_length < sizeof(*ch)) {
2885 /* break to abort land */
2886 break;
2887 }
2888 switch (ch->chunk_type) {
2889 case SCTP_PACKET_DROPPED:
2890 /* we don't respond to pkt-dropped */
2891 return;
2892 case SCTP_ABORT_ASSOCIATION:
2893 /* we don't respond with an ABORT to an ABORT */
2894 return;
2895 case SCTP_SHUTDOWN_COMPLETE:
2896 /*
2897 * we ignore it since we are not waiting for it
2898 * and peer is gone
2899 */
2900 return;
2901 case SCTP_SHUTDOWN_ACK:
2902 sctp_send_shutdown_complete2(m, iphlen, sh);
2903 return;
2904 default:
2905 break;
2906 }
2907 offset += SCTP_SIZE32(chk_length);
2908 ch = (struct sctp_chunkhdr *)sctp_m_getptr(m, offset,
2909 sizeof(*ch), (u_int8_t *)&chunk_buf);
2910 }
2911 sctp_send_abort(m, iphlen, sh, 0, op_err);
2912 }
2913
2914 /*
2915 * check the inbound datagram to make sure there is not an abort
2916 * inside it, if there is return 1, else return 0.
2917 */
2918 int
2919 sctp_is_there_an_abort_here(struct mbuf *m, int iphlen, int *vtagfill)
2920 {
2921 struct sctp_chunkhdr *ch;
2922 struct sctp_init_chunk *init_chk, chunk_buf;
2923 int offset;
2924 unsigned int chk_length;
2925
2926 offset = iphlen + sizeof(struct sctphdr);
2927 ch = (struct sctp_chunkhdr *)sctp_m_getptr(m, offset, sizeof(*ch),
2928 (u_int8_t *)&chunk_buf);
2929 while (ch != NULL) {
2930 chk_length = ntohs(ch->chunk_length);
2931 if (chk_length < sizeof(*ch)) {
2932 /* packet is probably corrupt */
2933 break;
2934 }
2935 /* we seem to be ok, is it an abort? */
2936 if (ch->chunk_type == SCTP_ABORT_ASSOCIATION) {
2937 /* yep, tell them */
2938 return (1);
2939 }
2940 if (ch->chunk_type == SCTP_INITIATION) {
2941 /* need to update the Vtag */
2942 init_chk = (struct sctp_init_chunk *)sctp_m_getptr(m,
2943 offset, sizeof(*init_chk), (u_int8_t *)&chunk_buf);
2944 if (init_chk != NULL) {
2945 *vtagfill = ntohl(init_chk->init.initiate_tag);
2946 }
2947 }
2948 /* Nope, move to the next chunk */
2949 offset += SCTP_SIZE32(chk_length);
2950 ch = (struct sctp_chunkhdr *)sctp_m_getptr(m, offset,
2951 sizeof(*ch), (u_int8_t *)&chunk_buf);
2952 }
2953 return (0);
2954 }
2955
2956 /*
2957 * currently (2/02), ifa_addr embeds scope_id's and don't
2958 * have sin6_scope_id set (i.e. it's 0)
2959 * so, create this function to compare link local scopes
2960 */
2961 uint32_t
2962 sctp_is_same_scope(const struct sockaddr_in6 *addr1, const struct sockaddr_in6 *addr2)
2963 {
2964 struct sockaddr_in6 a, b;
2965
2966 /* save copies */
2967 a = *addr1;
2968 b = *addr2;
2969
2970 if (a.sin6_scope_id == 0)
2971 if (sa6_recoverscope(&a)) {
2972 /* can't get scope, so can't match */
2973 return (0);
2974 }
2975 if (b.sin6_scope_id == 0)
2976 if (sa6_recoverscope(&b)) {
2977 /* can't get scope, so can't match */
2978 return (0);
2979 }
2980 if (a.sin6_scope_id != b.sin6_scope_id)
2981 return (0);
2982
2983 return (1);
2984 }
2985
2986 /*
2987 * returns a sockaddr_in6 with embedded scope recovered and removed
2988 */
2989 const struct sockaddr_in6 *
2990 sctp_recover_scope(const struct sockaddr_in6 *addr, struct sockaddr_in6 *store)
2991 {
2992 const struct sockaddr_in6 *newaddr;
2993
2994 newaddr = addr;
2995 /* check and strip embedded scope junk */
2996 if (addr->sin6_family == AF_INET6) {
2997 if (IN6_IS_SCOPE_LINKLOCAL(&addr->sin6_addr)) {
2998 if (addr->sin6_scope_id == 0) {
2999 *store = *addr;
3000 if (sa6_recoverscope(store) == 0) {
3001 /* use the recovered scope */
3002 newaddr = store;
3003 }
3004 /* else, return the original "to" addr */
3005 }
3006 }
3007 }
3008 return (newaddr);
3009 }
3010
3011 /*
3012 * are the two addresses the same? currently a "scopeless" check
3013 * returns: 1 if same, 0 if not
3014 */
3015 int
3016 sctp_cmpaddr(const struct sockaddr *sa1, const struct sockaddr *sa2)
3017 {
3018
3019 /* must be valid */
3020 if (sa1 == NULL || sa2 == NULL)
3021 return (0);
3022
3023 /* must be the same family */
3024 if (sa1->sa_family != sa2->sa_family)
3025 return (0);
3026
3027 if (sa1->sa_family == AF_INET6) {
3028 /* IPv6 addresses */
3029 const struct sockaddr_in6 *sin6_1, *sin6_2;
3030
3031 sin6_1 = (const struct sockaddr_in6 *)sa1;
3032 sin6_2 = (const struct sockaddr_in6 *)sa2;
3033 return (SCTP6_ARE_ADDR_EQUAL(&sin6_1->sin6_addr,
3034 &sin6_2->sin6_addr));
3035 } else if (sa1->sa_family == AF_INET) {
3036 /* IPv4 addresses */
3037 const struct sockaddr_in *sin_1, *sin_2;
3038
3039 sin_1 = (const struct sockaddr_in *)sa1;
3040 sin_2 = (const struct sockaddr_in *)sa2;
3041 return (sin_1->sin_addr.s_addr == sin_2->sin_addr.s_addr);
3042 } else {
3043 /* we don't do these... */
3044 return (0);
3045 }
3046 }
3047
3048 void
3049 sctp_print_address(const struct sockaddr *sa)
3050 {
3051 char ip6buf[INET6_ADDRSTRLEN];
3052
3053 if (sa->sa_family == AF_INET6) {
3054 const struct sockaddr_in6 *sin6;
3055 sin6 = (const struct sockaddr_in6 *)sa;
3056 printf("IPv6 address: %s:%d scope:%u\n",
3057 IN6_PRINT(ip6buf, &sin6->sin6_addr), ntohs(sin6->sin6_port),
3058 sin6->sin6_scope_id);
3059 } else if (sa->sa_family == AF_INET) {
3060 const struct sockaddr_in *sin;
3061 sin = (const struct sockaddr_in *)sa;
3062 printf("IPv4 address: %s:%d\n", inet_ntoa(sin->sin_addr),
3063 ntohs(sin->sin_port));
3064 } else {
3065 printf("?\n");
3066 }
3067 }
3068
3069 void
3070 sctp_print_address_pkt(struct ip *iph, struct sctphdr *sh)
3071 {
3072 if (iph->ip_v == IPVERSION) {
3073 struct sockaddr_in lsa, fsa;
3074
3075 memset(&lsa, 0, sizeof(lsa));
3076 lsa.sin_len = sizeof(lsa);
3077 lsa.sin_family = AF_INET;
3078 lsa.sin_addr = iph->ip_src;
3079 lsa.sin_port = sh->src_port;
3080 memset(&fsa, 0, sizeof(fsa));
3081 fsa.sin_len = sizeof(fsa);
3082 fsa.sin_family = AF_INET;
3083 fsa.sin_addr = iph->ip_dst;
3084 fsa.sin_port = sh->dest_port;
3085 printf("src: ");
3086 sctp_print_address((struct sockaddr *)&lsa);
3087 printf("dest: ");
3088 sctp_print_address((struct sockaddr *)&fsa);
3089 } else if (iph->ip_v == (IPV6_VERSION >> 4)) {
3090 struct ip6_hdr *ip6;
3091 struct sockaddr_in6 lsa6, fsa6;
3092
3093 ip6 = (struct ip6_hdr *)iph;
3094 memset(&lsa6, 0, sizeof(lsa6));
3095 lsa6.sin6_len = sizeof(lsa6);
3096 lsa6.sin6_family = AF_INET6;
3097 lsa6.sin6_addr = ip6->ip6_src;
3098 lsa6.sin6_port = sh->src_port;
3099 memset(&fsa6, 0, sizeof(fsa6));
3100 fsa6.sin6_len = sizeof(fsa6);
3101 fsa6.sin6_family = AF_INET6;
3102 fsa6.sin6_addr = ip6->ip6_dst;
3103 fsa6.sin6_port = sh->dest_port;
3104 printf("src: ");
3105 sctp_print_address((struct sockaddr *)&lsa6);
3106 printf("dest: ");
3107 sctp_print_address((struct sockaddr *)&fsa6);
3108 }
3109 }
3110
3111 #if defined(__FreeBSD__) || defined(__APPLE__)
3112
3113 /* cloned from uipc_socket.c */
3114
3115 #define SCTP_SBLINKRECORD(sb, m0) do { \
3116 if ((sb)->sb_lastrecord != NULL) \
3117 (sb)->sb_lastrecord->m_nextpkt = (m0); \
3118 else \
3119 (sb)->sb_mb = (m0); \
3120 (sb)->sb_lastrecord = (m0); \
3121 } while (/*CONSTCOND*/0)
3122 #endif
3123
3124
3125 int
3126 sbappendaddr_nocheck(struct sockbuf *sb, const struct sockaddr *asa,
3127 struct mbuf *m0, struct mbuf *control,
3128 u_int32_t tag, struct sctp_inpcb *inp)
3129 {
3130 #ifdef __NetBSD__
3131 struct mbuf *m, *n;
3132
3133 if (m0 && (m0->m_flags & M_PKTHDR) == 0)
3134 panic("sbappendaddr_nocheck");
3135
3136 m0->m_pkthdr.csum_data = (int)tag;
3137
3138 for (n = control; n; n = n->m_next) {
3139 if (n->m_next == 0) /* keep pointer to last control buf */
3140 break;
3141 }
3142 if (((inp->sctp_flags & SCTP_PCB_FLAGS_TCPTYPE) == 0) ||
3143 ((inp->sctp_flags & SCTP_PCB_FLAGS_IN_TCPPOOL)== 0)) {
3144 MGETHDR(m, M_DONTWAIT, MT_SONAME);
3145 if (m == 0)
3146 return (0);
3147
3148 m->m_len = asa->sa_len;
3149 memcpy(mtod(m, void *), (const void *)asa, asa->sa_len);
3150 } else {
3151 m = NULL;
3152 }
3153 if (n) {
3154 n->m_next = m0; /* concatenate data to control */
3155 }else {
3156 control = m0;
3157 }
3158 if (m)
3159 m->m_next = control;
3160 else
3161 m = control;
3162 m->m_pkthdr.csum_data = tag;
3163
3164 for (n = m; n; n = n->m_next)
3165 sballoc(sb, n);
3166 if ((n = sb->sb_mb) != NULL) {
3167 if ((n->m_nextpkt != inp->sb_last_mpkt) && (n->m_nextpkt == NULL)) {
3168 inp->sb_last_mpkt = NULL;
3169 }
3170 if (inp->sb_last_mpkt)
3171 inp->sb_last_mpkt->m_nextpkt = m;
3172 else {
3173 while (n->m_nextpkt) {
3174 n = n->m_nextpkt;
3175 }
3176 n->m_nextpkt = m;
3177 }
3178 inp->sb_last_mpkt = m;
3179 } else {
3180 inp->sb_last_mpkt = sb->sb_mb = m;
3181 inp->sctp_vtag_first = tag;
3182 }
3183 return (1);
3184 #endif
3185 #if defined(__FreeBSD__) || defined(__APPLE__)
3186 struct mbuf *m, *n, *nlast;
3187 int cnt=0;
3188
3189 if (m0 && (m0->m_flags & M_PKTHDR) == 0)
3190 panic("sbappendaddr_nocheck");
3191
3192 for (n = control; n; n = n->m_next) {
3193 if (n->m_next == 0) /* get pointer to last control buf */
3194 break;
3195 }
3196 if (((inp->sctp_flags & SCTP_PCB_FLAGS_TCPTYPE) == 0) ||
3197 ((inp->sctp_flags & SCTP_PCB_FLAGS_IN_TCPPOOL)== 0)) {
3198 if (asa->sa_len > MHLEN)
3199 return (0);
3200 try_again:
3201 MGETHDR(m, M_DONTWAIT, MT_SONAME);
3202 if (m == 0)
3203 return (0);
3204 m->m_len = 0;
3205 /* safety */
3206 if (m == m0) {
3207 printf("Duplicate mbuf allocated %p in and mget returned %p?\n",
3208 m0, m);
3209 if (cnt) {
3210 panic("more than once");
3211 }
3212 cnt++;
3213 goto try_again;
3214 }
3215 m->m_len = asa->sa_len;
3216 bcopy((void *)asa, mtod(m, void *), asa->sa_len);
3217 }
3218 else {
3219 m = NULL;
3220 }
3221 if (n)
3222 n->m_next = m0; /* concatenate data to control */
3223 else
3224 control = m0;
3225 if (m)
3226 m->m_next = control;
3227 else
3228 m = control;
3229 m->m_pkthdr.csum_data = (int)tag;
3230
3231 for (n = m; n; n = n->m_next)
3232 sballoc(sb, n);
3233 nlast = n;
3234 if (sb->sb_mb == NULL) {
3235 inp->sctp_vtag_first = tag;
3236 }
3237
3238 #ifdef __FREEBSD__
3239 if (sb->sb_mb == NULL)
3240 inp->sctp_vtag_first = tag;
3241 SCTP_SBLINKRECORD(sb, m);
3242 sb->sb_mbtail = nlast;
3243 #else
3244 if ((n = sb->sb_mb) != NULL) {
3245 if ((n->m_nextpkt != inp->sb_last_mpkt) && (n->m_nextpkt == NULL)) {
3246 inp->sb_last_mpkt = NULL;
3247 }
3248 if (inp->sb_last_mpkt)
3249 inp->sb_last_mpkt->m_nextpkt = m;
3250 else {
3251 while (n->m_nextpkt) {
3252 n = n->m_nextpkt;
3253 }
3254 n->m_nextpkt = m;
3255 }
3256 inp->sb_last_mpkt = m;
3257 } else {
3258 inp->sb_last_mpkt = sb->sb_mb = m;
3259 inp->sctp_vtag_first = tag;
3260 }
3261 #endif
3262 return (1);
3263 #endif
3264 #ifdef __OpenBSD__
3265 struct mbuf *m, *n;
3266
3267 if (m0 && (m0->m_flags & M_PKTHDR) == 0)
3268 panic("sbappendaddr_nocheck");
3269 m0->m_pkthdr.csum = (int)tag;
3270 for (n = control; n; n = n->m_next) {
3271 if (n->m_next == 0) /* keep pointer to last control buf */
3272 break;
3273 }
3274 if (((inp->sctp_flags & SCTP_PCB_FLAGS_TCPTYPE) == 0) ||
3275 ((inp->sctp_flags & SCTP_PCB_FLAGS_IN_TCPPOOL)== 0)) {
3276 if (asa->sa_len > MHLEN)
3277 return (0);
3278 MGETHDR(m, M_DONTWAIT, MT_SONAME);
3279 if (m == 0)
3280 return (0);
3281 m->m_len = asa->sa_len;
3282 bcopy((void *)asa, mtod(m, void *), asa->sa_len);
3283 } else {
3284 m = NULL;
3285 }
3286 if (n)
3287 n->m_next = m0; /* concatenate data to control */
3288 else
3289 control = m0;
3290
3291 m->m_pkthdr.csum = (int)tag;
3292 m->m_next = control;
3293 for (n = m; n; n = n->m_next)
3294 sballoc(sb, n);
3295 if ((n = sb->sb_mb) != NULL) {
3296 if ((n->m_nextpkt != inp->sb_last_mpkt) && (n->m_nextpkt == NULL)) {
3297 inp->sb_last_mpkt = NULL;
3298 }
3299 if (inp->sb_last_mpkt)
3300 inp->sb_last_mpkt->m_nextpkt = m;
3301 else {
3302 while (n->m_nextpkt) {
3303 n = n->m_nextpkt;
3304 }
3305 n->m_nextpkt = m;
3306 }
3307 inp->sb_last_mpkt = m;
3308 } else {
3309 inp->sb_last_mpkt = sb->sb_mb = m;
3310 inp->sctp_vtag_first = tag;
3311 }
3312 return (1);
3313 #endif
3314 }
3315
3316 /*************HOLD THIS COMMENT FOR PATCH FILE OF
3317 *************ALTERNATE ROUTING CODE
3318 */
3319
3320 /*************HOLD THIS COMMENT FOR END OF PATCH FILE OF
3321 *************ALTERNATE ROUTING CODE
3322 */
3323
3324 struct mbuf *
3325 sctp_generate_invmanparam(int err)
3326 {
3327 /* Return a MBUF with a invalid mandatory parameter */
3328 struct mbuf *m;
3329
3330 MGET(m, M_DONTWAIT, MT_DATA);
3331 if (m) {
3332 struct sctp_paramhdr *ph;
3333 m->m_len = sizeof(struct sctp_paramhdr);
3334 ph = mtod(m, struct sctp_paramhdr *);
3335 ph->param_length = htons(sizeof(struct sctp_paramhdr));
3336 ph->param_type = htons(err);
3337 }
3338 return (m);
3339 }
3340
3341 static int
3342 sctp_should_be_moved(struct mbuf *this, struct sctp_association *asoc)
3343 {
3344 struct mbuf *m;
3345 /*
3346 * given a mbuf chain, look through it finding
3347 * the M_PKTHDR and return 1 if it belongs to
3348 * the association given. We tell this by
3349 * a kludge where we stuff the my_vtag of the asoc
3350 * into the m->m_pkthdr.csum_data/csum field.
3351 */
3352 m = this;
3353 while (m) {
3354 if (m->m_flags & M_PKTHDR) {
3355 /* check it */
3356 #if defined(__OpenBSD__)
3357 if ((u_int32_t)m->m_pkthdr.csum == asoc->my_vtag)
3358 #else
3359 if ((u_int32_t)m->m_pkthdr.csum_data == asoc->my_vtag)
3360 #endif
3361 {
3362 /* Yep */
3363 return (1);
3364 }
3365 }
3366 m = m->m_next;
3367 }
3368 return (0);
3369 }
3370
3371 u_int32_t
3372 sctp_get_first_vtag_from_sb(struct socket *so)
3373 {
3374 struct mbuf *this, *at;
3375 u_int32_t retval;
3376
3377 retval = 0;
3378 if (so->so_rcv.sb_mb) {
3379 /* grubbing time */
3380 this = so->so_rcv.sb_mb;
3381 while (this) {
3382 at = this;
3383 /* get to the m_pkthdr */
3384 while (at) {
3385 if (at->m_flags & M_PKTHDR)
3386 break;
3387 else {
3388 at = at->m_next;
3389 }
3390 }
3391 /* now do we have a m_pkthdr */
3392 if (at && (at->m_flags & M_PKTHDR)) {
3393 /* check it */
3394 #if defined(__OpenBSD__)
3395 if ((u_int32_t)at->m_pkthdr.csum != 0)
3396 #else
3397 if ((u_int32_t)at->m_pkthdr.csum_data != 0)
3398 #endif
3399 {
3400 /* its the one */
3401 #if defined(__OpenBSD__)
3402 retval = (u_int32_t)at->m_pkthdr.csum;
3403 #else
3404 retval =
3405 (u_int32_t)at->m_pkthdr.csum_data;
3406 #endif
3407 break;
3408 }
3409 }
3410 this = this->m_nextpkt;
3411 }
3412
3413 }
3414 return (retval);
3415
3416 }
3417 void
3418 sctp_grub_through_socket_buffer(struct sctp_inpcb *inp, struct socket *old,
3419 struct socket *new, struct sctp_tcb *stcb)
3420 {
3421 struct mbuf **put, **take, *next, *this;
3422 struct sockbuf *old_sb, *new_sb;
3423 struct sctp_association *asoc;
3424 int moved_top = 0;
3425
3426 asoc = &stcb->asoc;
3427 old_sb = &old->so_rcv;
3428 new_sb = &new->so_rcv;
3429 if (old_sb->sb_mb == NULL) {
3430 /* Nothing to move */
3431 return;
3432 }
3433
3434 if (inp->sctp_vtag_first == asoc->my_vtag) {
3435 /* First one must be moved */
3436 struct mbuf *mm;
3437 for (mm = old_sb->sb_mb; mm; mm = mm->m_next) {
3438 /*
3439 * Go down the chain and fix
3440 * the space allocation of the
3441 * two sockets.
3442 */
3443 sbfree(old_sb, mm);
3444 sballoc(new_sb, mm);
3445 }
3446 new_sb->sb_mb = old_sb->sb_mb;
3447 old_sb->sb_mb = new_sb->sb_mb->m_nextpkt;
3448 new_sb->sb_mb->m_nextpkt = NULL;
3449 put = &new_sb->sb_mb->m_nextpkt;
3450 moved_top = 1;
3451 } else {
3452 put = &new_sb->sb_mb;
3453 }
3454
3455 take = &old_sb->sb_mb;
3456 next = old_sb->sb_mb;
3457 while (next) {
3458 this = next;
3459 /* postion for next one */
3460 next = this->m_nextpkt;
3461 /* check the tag of this packet */
3462 if (sctp_should_be_moved(this, asoc)) {
3463 /* yes this needs to be moved */
3464 struct mbuf *mm;
3465 *take = this->m_nextpkt;
3466 this->m_nextpkt = NULL;
3467 *put = this;
3468 for (mm = this; mm; mm = mm->m_next) {
3469 /*
3470 * Go down the chain and fix
3471 * the space allocation of the
3472 * two sockets.
3473 */
3474 sbfree(old_sb, mm);
3475 sballoc(new_sb, mm);
3476 }
3477 put = &this->m_nextpkt;
3478
3479 } else {
3480 /* no advance our take point. */
3481 take = &this->m_nextpkt;
3482 }
3483 }
3484 if (moved_top) {
3485 /*
3486 * Ok so now we must re-postion vtag_first to
3487 * match the new first one since we moved the
3488 * mbuf at the top.
3489 */
3490 inp->sctp_vtag_first = sctp_get_first_vtag_from_sb(old);
3491 }
3492 }
3493
3494 void
3495 sctp_free_bufspace(struct sctp_tcb *stcb, struct sctp_association *asoc,
3496 struct sctp_tmit_chunk *tp1)
3497 {
3498 if (tp1->data == NULL) {
3499 return;
3500 }
3501 #ifdef SCTP_MBCNT_LOGGING
3502 sctp_log_mbcnt(SCTP_LOG_MBCNT_DECREASE,
3503 asoc->total_output_queue_size,
3504 tp1->book_size,
3505 asoc->total_output_mbuf_queue_size,
3506 tp1->mbcnt);
3507 #endif
3508 if (asoc->total_output_queue_size >= tp1->book_size) {
3509 asoc->total_output_queue_size -= tp1->book_size;
3510 } else {
3511 asoc->total_output_queue_size = 0;
3512 }
3513
3514 /* Now free the mbuf */
3515 if (asoc->total_output_mbuf_queue_size >= tp1->mbcnt) {
3516 asoc->total_output_mbuf_queue_size -= tp1->mbcnt;
3517 } else {
3518 asoc->total_output_mbuf_queue_size = 0;
3519 }
3520 if ((stcb->sctp_ep->sctp_flags & SCTP_PCB_FLAGS_TCPTYPE) ||
3521 (stcb->sctp_ep->sctp_flags & SCTP_PCB_FLAGS_IN_TCPPOOL)) {
3522 if (stcb->sctp_socket->so_snd.sb_cc >= tp1->book_size) {
3523 stcb->sctp_socket->so_snd.sb_cc -= tp1->book_size;
3524 } else {
3525 stcb->sctp_socket->so_snd.sb_cc = 0;
3526
3527 }
3528 if (stcb->sctp_socket->so_snd.sb_mbcnt >= tp1->mbcnt) {
3529 stcb->sctp_socket->so_snd.sb_mbcnt -= tp1->mbcnt;
3530 } else {
3531 stcb->sctp_socket->so_snd.sb_mbcnt = 0;
3532 }
3533 }
3534 }
3535
3536 int
3537 sctp_release_pr_sctp_chunk(struct sctp_tcb *stcb, struct sctp_tmit_chunk *tp1,
3538 int reason, struct sctpchunk_listhead *queue)
3539 {
3540 int ret_sz = 0;
3541 int notdone;
3542 uint8_t foundeom = 0;
3543
3544 do {
3545 ret_sz += tp1->book_size;
3546 tp1->sent = SCTP_FORWARD_TSN_SKIP;
3547 if (tp1->data) {
3548 sctp_free_bufspace(stcb, &stcb->asoc, tp1);
3549 sctp_ulp_notify(SCTP_NOTIFY_DG_FAIL, stcb, reason, tp1);
3550 sctp_m_freem(tp1->data);
3551 tp1->data = NULL;
3552 sctp_sowwakeup(stcb->sctp_ep, stcb->sctp_socket);
3553 }
3554 if (tp1->flags & SCTP_PR_SCTP_BUFFER) {
3555 stcb->asoc.sent_queue_cnt_removeable--;
3556 }
3557 if (queue == &stcb->asoc.send_queue) {
3558 TAILQ_REMOVE(&stcb->asoc.send_queue, tp1, sctp_next);
3559 /* on to the sent queue */
3560 TAILQ_INSERT_TAIL(&stcb->asoc.sent_queue, tp1,
3561 sctp_next);
3562 stcb->asoc.sent_queue_cnt++;
3563 }
3564 if ((tp1->rec.data.rcv_flags & SCTP_DATA_NOT_FRAG) ==
3565 SCTP_DATA_NOT_FRAG) {
3566 /* not frag'ed we ae done */
3567 notdone = 0;
3568 foundeom = 1;
3569 } else if (tp1->rec.data.rcv_flags & SCTP_DATA_LAST_FRAG) {
3570 /* end of frag, we are done */
3571 notdone = 0;
3572 foundeom = 1;
3573 } else {
3574 /* Its a begin or middle piece, we must mark all of it */
3575 notdone = 1;
3576 tp1 = TAILQ_NEXT(tp1, sctp_next);
3577 }
3578 } while (tp1 && notdone);
3579 if ((foundeom == 0) && (queue == &stcb->asoc.sent_queue)) {
3580 /*
3581 * The multi-part message was scattered
3582 * across the send and sent queue.
3583 */
3584 tp1 = TAILQ_FIRST(&stcb->asoc.send_queue);
3585 /*
3586 * recurse throught the send_queue too, starting at the
3587 * beginning.
3588 */
3589 if (tp1) {
3590 ret_sz += sctp_release_pr_sctp_chunk(stcb, tp1, reason,
3591 &stcb->asoc.send_queue);
3592 } else {
3593 printf("hmm, nothing on the send queue and no EOM?\n");
3594 }
3595 }
3596 return (ret_sz);
3597 }
3598
3599 /*
3600 * checks to see if the given address, sa, is one that is currently
3601 * known by the kernel
3602 * note: can't distinguish the same address on multiple interfaces and
3603 * doesn't handle multiple addresses with different zone/scope id's
3604 * note: ifa_ifwithaddr() compares the entire sockaddr struct
3605 */
3606 struct ifaddr *
3607 sctp_find_ifa_by_addr(struct sockaddr *sa)
3608 {
3609 struct ifnet *ifn;
3610 struct ifaddr *ifa;
3611 int s;
3612
3613 /* go through all our known interfaces */
3614 s = pserialize_read_enter();
3615 IFNET_READER_FOREACH(ifn) {
3616 /* go through each interface addresses */
3617 IFADDR_READER_FOREACH(ifa, ifn) {
3618 /* correct family? */
3619 if (ifa->ifa_addr->sa_family != sa->sa_family)
3620 continue;
3621
3622 #ifdef INET6
3623 if (ifa->ifa_addr->sa_family == AF_INET6) {
3624 /* IPv6 address */
3625 struct sockaddr_in6 *sin1, *sin2, sin6_tmp;
3626 sin1 = (struct sockaddr_in6 *)ifa->ifa_addr;
3627 if (IN6_IS_SCOPE_LINKLOCAL(&sin1->sin6_addr)) {
3628 /* create a copy and clear scope */
3629 memcpy(&sin6_tmp, sin1,
3630 sizeof(struct sockaddr_in6));
3631 sin1 = &sin6_tmp;
3632 in6_clearscope(&sin1->sin6_addr);
3633 }
3634 sin2 = (struct sockaddr_in6 *)sa;
3635 if (memcmp(&sin1->sin6_addr, &sin2->sin6_addr,
3636 sizeof(struct in6_addr)) == 0) {
3637 /* found it */
3638 pserialize_read_exit(s);
3639 return (ifa);
3640 }
3641 } else
3642 #endif
3643 if (ifa->ifa_addr->sa_family == AF_INET) {
3644 /* IPv4 address */
3645 struct sockaddr_in *sin1, *sin2;
3646 sin1 = (struct sockaddr_in *)ifa->ifa_addr;
3647 sin2 = (struct sockaddr_in *)sa;
3648 if (sin1->sin_addr.s_addr ==
3649 sin2->sin_addr.s_addr) {
3650 /* found it */
3651 pserialize_read_exit(s);
3652 return (ifa);
3653 }
3654 }
3655 /* else, not AF_INET or AF_INET6, so skip */
3656 } /* end foreach ifa */
3657 } /* end foreach ifn */
3658 pserialize_read_exit(s);
3659
3660 /* not found! */
3661 return (NULL);
3662 }
3663
3664
3665 #ifdef __APPLE__
3666 /*
3667 * here we hack in a fix for Apple's m_copym for the case where the first mbuf
3668 * in the chain is a M_PKTHDR and the length is zero
3669 */
3670 static void
3671 sctp_pkthdr_fix(struct mbuf *m)
3672 {
3673 struct mbuf *m_nxt;
3674
3675 if ((m->m_flags & M_PKTHDR) == 0) {
3676 /* not a PKTHDR */
3677 return;
3678 }
3679
3680 if (m->m_len != 0) {
3681 /* not a zero length PKTHDR mbuf */
3682 return;
3683 }
3684
3685 /* let's move in a word into the first mbuf... yes, ugly! */
3686 m_nxt = m->m_next;
3687 if (m_nxt == NULL) {
3688 /* umm... not a very useful mbuf chain... */
3689 return;
3690 }
3691 if ((size_t)m_nxt->m_len > sizeof(long)) {
3692 /* move over a long */
3693 bcopy(mtod(m_nxt, void *), mtod(m, void *), sizeof(long));
3694 /* update mbuf data pointers and lengths */
3695 m->m_len += sizeof(long);
3696 m_nxt->m_data += sizeof(long);
3697 m_nxt->m_len -= sizeof(long);
3698 }
3699 }
3700
3701 inline struct mbuf *
3702 sctp_m_copym(struct mbuf *m, int off, int len, int wait)
3703 {
3704 sctp_pkthdr_fix(m);
3705 return (m_copym(m, off, len, wait));
3706 }
3707 #endif /* __APPLE__ */
3708