sctputil.c revision 1.3 1 /* $KAME: sctputil.c,v 1.39 2005/06/16 20:54:06 jinmei Exp $ */
2 /* $NetBSD: sctputil.c,v 1.3 2016/03/06 19:46:05 christos Exp $ */
3
4 /*
5 * Copyright (c) 2001, 2002, 2003, 2004 Cisco Systems, Inc.
6 * All rights reserved.
7 *
8 * Redistribution and use in source and binary forms, with or without
9 * modification, are permitted provided that the following conditions
10 * are met:
11 * 1. Redistributions of source code must retain the above copyright
12 * notice, this list of conditions and the following disclaimer.
13 * 2. Redistributions in binary form must reproduce the above copyright
14 * notice, this list of conditions and the following disclaimer in the
15 * documentation and/or other materials provided with the distribution.
16 * 3. All advertising materials mentioning features or use of this software
17 * must display the following acknowledgement:
18 * This product includes software developed by Cisco Systems, Inc.
19 * 4. Neither the name of the project nor the names of its contributors
20 * may be used to endorse or promote products derived from this software
21 * without specific prior written permission.
22 *
23 * THIS SOFTWARE IS PROVIDED BY CISCO SYSTEMS AND CONTRIBUTORS ``AS IS'' AND
24 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
25 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
26 * ARE DISCLAIMED. IN NO EVENT SHALL CISCO SYSTEMS OR CONTRIBUTORS BE LIABLE
27 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
28 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
29 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
30 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
31 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
32 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
33 * SUCH DAMAGE.
34 */
35
36 #include <sys/cdefs.h>
37 __KERNEL_RCSID(0, "$NetBSD: sctputil.c,v 1.3 2016/03/06 19:46:05 christos Exp $");
38
39 #ifdef _KERNEL_OPT
40 #include "opt_inet.h"
41 #include "opt_ipsec.h"
42 #include "opt_sctp.h"
43 #endif /* _KERNEL_OPT */
44
45 #include <sys/param.h>
46 #include <sys/systm.h>
47 #include <sys/malloc.h>
48 #include <sys/mbuf.h>
49 #include <sys/domain.h>
50 #include <sys/protosw.h>
51 #include <sys/socket.h>
52 #include <sys/socketvar.h>
53 #include <sys/mutex.h>
54 #include <sys/proc.h>
55 #include <sys/kernel.h>
56 #include <sys/sysctl.h>
57
58 #include <sys/callout.h>
59
60 #include <net/radix.h>
61 #include <net/route.h>
62
63 #ifdef INET6
64 #include <sys/domain.h>
65 #endif
66
67 #include <machine/limits.h>
68
69 #include <net/if.h>
70 #include <net/if_types.h>
71 #include <net/route.h>
72
73 #include <netinet/in.h>
74 #include <netinet/in_systm.h>
75 #include <netinet/ip.h>
76 #include <netinet/in_pcb.h>
77 #include <netinet/in_var.h>
78 #include <netinet/ip_var.h>
79
80 #ifdef INET6
81 #include <netinet/ip6.h>
82 #include <netinet6/ip6_var.h>
83 #include <netinet6/scope6_var.h>
84 #include <netinet6/in6_pcb.h>
85
86 #endif /* INET6 */
87
88 #include <netinet/sctp_pcb.h>
89
90 #ifdef IPSEC
91 #include <netinet6/ipsec.h>
92 #include <netkey/key.h>
93 #endif /* IPSEC */
94
95 #include <netinet/sctputil.h>
96 #include <netinet/sctp_var.h>
97 #ifdef INET6
98 #include <netinet6/sctp6_var.h>
99 #endif
100 #include <netinet/sctp_header.h>
101 #include <netinet/sctp_output.h>
102 #include <netinet/sctp_hashdriver.h>
103 #include <netinet/sctp_uio.h>
104 #include <netinet/sctp_timer.h>
105 #include <netinet/sctp_crc32.h>
106 #include <netinet/sctp_indata.h> /* for sctp_deliver_data() */
107 #define NUMBER_OF_MTU_SIZES 18
108
109 #ifdef SCTP_DEBUG
110 extern u_int32_t sctp_debug_on;
111 #endif
112
113 #ifdef SCTP_STAT_LOGGING
114 int sctp_cwnd_log_at=0;
115 int sctp_cwnd_log_rolled=0;
116 struct sctp_cwnd_log sctp_clog[SCTP_STAT_LOG_SIZE];
117
118 void sctp_clr_stat_log(void)
119 {
120 sctp_cwnd_log_at=0;
121 sctp_cwnd_log_rolled=0;
122 }
123
124 void
125 sctp_log_strm_del_alt(u_int32_t tsn, u_int16_t sseq, int from)
126 {
127
128 sctp_clog[sctp_cwnd_log_at].from = (u_int8_t)from;
129 sctp_clog[sctp_cwnd_log_at].event_type = (u_int8_t)SCTP_LOG_EVENT_STRM;
130 sctp_clog[sctp_cwnd_log_at].x.strlog.n_tsn = tsn;
131 sctp_clog[sctp_cwnd_log_at].x.strlog.n_sseq = sseq;
132 sctp_clog[sctp_cwnd_log_at].x.strlog.e_tsn = 0;
133 sctp_clog[sctp_cwnd_log_at].x.strlog.e_sseq = 0;
134 sctp_cwnd_log_at++;
135 if (sctp_cwnd_log_at >= SCTP_STAT_LOG_SIZE) {
136 sctp_cwnd_log_at = 0;
137 sctp_cwnd_log_rolled = 1;
138 }
139
140 }
141
142 void
143 sctp_log_map(uint32_t map, uint32_t cum, uint32_t high, int from)
144 {
145
146 sctp_clog[sctp_cwnd_log_at].from = (u_int8_t)from;
147 sctp_clog[sctp_cwnd_log_at].event_type = (u_int8_t)SCTP_LOG_EVENT_MAP;
148 sctp_clog[sctp_cwnd_log_at].x.map.base = map;
149 sctp_clog[sctp_cwnd_log_at].x.map.cum = cum;
150 sctp_clog[sctp_cwnd_log_at].x.map.high = high;
151 sctp_cwnd_log_at++;
152 if (sctp_cwnd_log_at >= SCTP_STAT_LOG_SIZE) {
153 sctp_cwnd_log_at = 0;
154 sctp_cwnd_log_rolled = 1;
155 }
156 }
157
158 void
159 sctp_log_fr(uint32_t biggest_tsn, uint32_t biggest_new_tsn, uint32_t tsn,
160 int from)
161 {
162
163 sctp_clog[sctp_cwnd_log_at].from = (u_int8_t)from;
164 sctp_clog[sctp_cwnd_log_at].event_type = (u_int8_t)SCTP_LOG_EVENT_FR;
165 sctp_clog[sctp_cwnd_log_at].x.fr.largest_tsn = biggest_tsn;
166 sctp_clog[sctp_cwnd_log_at].x.fr.largest_new_tsn = biggest_new_tsn;
167 sctp_clog[sctp_cwnd_log_at].x.fr.tsn = tsn;
168 sctp_cwnd_log_at++;
169 if (sctp_cwnd_log_at >= SCTP_STAT_LOG_SIZE) {
170 sctp_cwnd_log_at = 0;
171 sctp_cwnd_log_rolled = 1;
172 }
173 }
174
175 void
176 sctp_log_strm_del(struct sctp_tmit_chunk *chk, struct sctp_tmit_chunk *poschk,
177 int from)
178 {
179
180 if (chk == NULL) {
181 printf("Gak log of NULL?\n");
182 return;
183 }
184 sctp_clog[sctp_cwnd_log_at].from = (u_int8_t)from;
185 sctp_clog[sctp_cwnd_log_at].event_type = (u_int8_t)SCTP_LOG_EVENT_STRM;
186 sctp_clog[sctp_cwnd_log_at].x.strlog.n_tsn = chk->rec.data.TSN_seq;
187 sctp_clog[sctp_cwnd_log_at].x.strlog.n_sseq = chk->rec.data.stream_seq;
188 if (poschk != NULL) {
189 sctp_clog[sctp_cwnd_log_at].x.strlog.e_tsn =
190 poschk->rec.data.TSN_seq;
191 sctp_clog[sctp_cwnd_log_at].x.strlog.e_sseq =
192 poschk->rec.data.stream_seq;
193 } else {
194 sctp_clog[sctp_cwnd_log_at].x.strlog.e_tsn = 0;
195 sctp_clog[sctp_cwnd_log_at].x.strlog.e_sseq = 0;
196 }
197 sctp_cwnd_log_at++;
198 if (sctp_cwnd_log_at >= SCTP_STAT_LOG_SIZE) {
199 sctp_cwnd_log_at = 0;
200 sctp_cwnd_log_rolled = 1;
201 }
202 }
203
204 void
205 sctp_log_cwnd(struct sctp_nets *net, int augment, uint8_t from)
206 {
207
208 sctp_clog[sctp_cwnd_log_at].from = (u_int8_t)from;
209 sctp_clog[sctp_cwnd_log_at].event_type = (u_int8_t)SCTP_LOG_EVENT_CWND;
210 sctp_clog[sctp_cwnd_log_at].x.cwnd.net = net;
211 sctp_clog[sctp_cwnd_log_at].x.cwnd.cwnd_new_value = net->cwnd;
212 sctp_clog[sctp_cwnd_log_at].x.cwnd.inflight = net->flight_size;
213 sctp_clog[sctp_cwnd_log_at].x.cwnd.cwnd_augment = augment;
214 sctp_cwnd_log_at++;
215 if (sctp_cwnd_log_at >= SCTP_STAT_LOG_SIZE) {
216 sctp_cwnd_log_at = 0;
217 sctp_cwnd_log_rolled = 1;
218 }
219 }
220
221 void
222 sctp_log_maxburst(struct sctp_nets *net, int error, int burst, uint8_t from)
223 {
224 sctp_clog[sctp_cwnd_log_at].from = (u_int8_t)from;
225 sctp_clog[sctp_cwnd_log_at].event_type = (u_int8_t)SCTP_LOG_EVENT_MAXBURST;
226 sctp_clog[sctp_cwnd_log_at].x.cwnd.net = net;
227 sctp_clog[sctp_cwnd_log_at].x.cwnd.cwnd_new_value = error;
228 sctp_clog[sctp_cwnd_log_at].x.cwnd.inflight = net->flight_size;
229 sctp_clog[sctp_cwnd_log_at].x.cwnd.cwnd_augment = burst;
230 sctp_cwnd_log_at++;
231 if (sctp_cwnd_log_at >= SCTP_STAT_LOG_SIZE) {
232 sctp_cwnd_log_at = 0;
233 sctp_cwnd_log_rolled = 1;
234 }
235 }
236
237 void
238 sctp_log_rwnd(uint8_t from, u_int32_t peers_rwnd , u_int32_t snd_size, u_int32_t overhead)
239 {
240 sctp_clog[sctp_cwnd_log_at].from = (u_int8_t)from;
241 sctp_clog[sctp_cwnd_log_at].event_type = (u_int8_t)SCTP_LOG_EVENT_RWND;
242 sctp_clog[sctp_cwnd_log_at].x.rwnd.rwnd = peers_rwnd;
243 sctp_clog[sctp_cwnd_log_at].x.rwnd.send_size = snd_size;
244 sctp_clog[sctp_cwnd_log_at].x.rwnd.overhead = overhead;
245 sctp_clog[sctp_cwnd_log_at].x.rwnd.new_rwnd = 0;
246 sctp_cwnd_log_at++;
247 if (sctp_cwnd_log_at >= SCTP_STAT_LOG_SIZE) {
248 sctp_cwnd_log_at = 0;
249 sctp_cwnd_log_rolled = 1;
250 }
251 }
252
253 void
254 sctp_log_rwnd_set(uint8_t from, u_int32_t peers_rwnd , u_int32_t flight_size, u_int32_t overhead, u_int32_t a_rwndval)
255 {
256 sctp_clog[sctp_cwnd_log_at].from = (u_int8_t)from;
257 sctp_clog[sctp_cwnd_log_at].event_type = (u_int8_t)SCTP_LOG_EVENT_RWND;
258 sctp_clog[sctp_cwnd_log_at].x.rwnd.rwnd = peers_rwnd;
259 sctp_clog[sctp_cwnd_log_at].x.rwnd.send_size = flight_size;
260 sctp_clog[sctp_cwnd_log_at].x.rwnd.overhead = overhead;
261 sctp_clog[sctp_cwnd_log_at].x.rwnd.new_rwnd = a_rwndval;
262 sctp_cwnd_log_at++;
263 if (sctp_cwnd_log_at >= SCTP_STAT_LOG_SIZE) {
264 sctp_cwnd_log_at = 0;
265 sctp_cwnd_log_rolled = 1;
266 }
267 }
268
269 void
270 sctp_log_mbcnt(uint8_t from, u_int32_t total_oq , u_int32_t book, u_int32_t total_mbcnt_q, u_int32_t mbcnt)
271 {
272 sctp_clog[sctp_cwnd_log_at].from = (u_int8_t)from;
273 sctp_clog[sctp_cwnd_log_at].event_type = (u_int8_t)SCTP_LOG_EVENT_MBCNT;
274 sctp_clog[sctp_cwnd_log_at].x.mbcnt.total_queue_size = total_oq;
275 sctp_clog[sctp_cwnd_log_at].x.mbcnt.size_change = book;
276 sctp_clog[sctp_cwnd_log_at].x.mbcnt.total_queue_mb_size = total_mbcnt_q;
277 sctp_clog[sctp_cwnd_log_at].x.mbcnt.mbcnt_change = mbcnt;
278 sctp_cwnd_log_at++;
279 if (sctp_cwnd_log_at >= SCTP_STAT_LOG_SIZE) {
280 sctp_cwnd_log_at = 0;
281 sctp_cwnd_log_rolled = 1;
282 }
283 }
284
285 void
286 sctp_log_block(uint8_t from, struct socket *so, struct sctp_association *asoc)
287 {
288
289 sctp_clog[sctp_cwnd_log_at].from = (u_int8_t)from;
290 sctp_clog[sctp_cwnd_log_at].event_type = (u_int8_t)SCTP_LOG_EVENT_BLOCK;
291 sctp_clog[sctp_cwnd_log_at].x.blk.maxmb = (u_int16_t)(so->so_snd.sb_mbmax/1024);
292 sctp_clog[sctp_cwnd_log_at].x.blk.onmb = asoc->total_output_mbuf_queue_size;
293 sctp_clog[sctp_cwnd_log_at].x.blk.maxsb = (u_int16_t)(so->so_snd.sb_hiwat/1024);
294 sctp_clog[sctp_cwnd_log_at].x.blk.onsb = asoc->total_output_queue_size;
295 sctp_clog[sctp_cwnd_log_at].x.blk.send_sent_qcnt = (u_int16_t)(asoc->send_queue_cnt + asoc->sent_queue_cnt);
296 sctp_clog[sctp_cwnd_log_at].x.blk.stream_qcnt = (u_int16_t)asoc->stream_queue_cnt;
297 sctp_cwnd_log_at++;
298 if (sctp_cwnd_log_at >= SCTP_STAT_LOG_SIZE) {
299 sctp_cwnd_log_at = 0;
300 sctp_cwnd_log_rolled = 1;
301 }
302 }
303
304 int
305 sctp_fill_stat_log(struct mbuf *m)
306 {
307 struct sctp_cwnd_log_req *req;
308 int size_limit, num, i, at, cnt_out=0;
309
310 if (m == NULL)
311 return (EINVAL);
312
313 size_limit = (m->m_len - sizeof(struct sctp_cwnd_log_req));
314 if (size_limit < sizeof(struct sctp_cwnd_log)) {
315 return (EINVAL);
316 }
317 req = mtod(m, struct sctp_cwnd_log_req *);
318 num = size_limit/sizeof(struct sctp_cwnd_log);
319 if (sctp_cwnd_log_rolled) {
320 req->num_in_log = SCTP_STAT_LOG_SIZE;
321 } else {
322 req->num_in_log = sctp_cwnd_log_at;
323 /* if the log has not rolled, we don't
324 * let you have old data.
325 */
326 if (req->end_at > sctp_cwnd_log_at) {
327 req->end_at = sctp_cwnd_log_at;
328 }
329 }
330 if ((num < SCTP_STAT_LOG_SIZE) &&
331 ((sctp_cwnd_log_rolled) || (sctp_cwnd_log_at > num))) {
332 /* we can't return all of it */
333 if (((req->start_at == 0) && (req->end_at == 0)) ||
334 (req->start_at >= SCTP_STAT_LOG_SIZE) ||
335 (req->end_at >= SCTP_STAT_LOG_SIZE)) {
336 /* No user request or user is wacked. */
337 req->num_ret = num;
338 req->end_at = sctp_cwnd_log_at - 1;
339 if ((sctp_cwnd_log_at - num) < 0) {
340 int cc;
341 cc = num - sctp_cwnd_log_at;
342 req->start_at = SCTP_STAT_LOG_SIZE - cc;
343 } else {
344 req->start_at = sctp_cwnd_log_at - num;
345 }
346 } else {
347 /* a user request */
348 int cc;
349 if (req->start_at > req->end_at) {
350 cc = (SCTP_STAT_LOG_SIZE - req->start_at) +
351 (req->end_at + 1);
352 } else {
353
354 cc = req->end_at - req->start_at;
355 }
356 if (cc < num) {
357 num = cc;
358 }
359 req->num_ret = num;
360 }
361 } else {
362 /* We can return all of it */
363 req->start_at = 0;
364 req->end_at = sctp_cwnd_log_at - 1;
365 req->num_ret = sctp_cwnd_log_at;
366 }
367 for (i = 0, at = req->start_at; i < req->num_ret; i++) {
368 req->log[i] = sctp_clog[at];
369 cnt_out++;
370 at++;
371 if (at >= SCTP_STAT_LOG_SIZE)
372 at = 0;
373 }
374 m->m_len = (cnt_out * sizeof(struct sctp_cwnd_log_req)) + sizeof(struct sctp_cwnd_log_req);
375 return (0);
376 }
377
378 #endif
379
380 #ifdef SCTP_AUDITING_ENABLED
381 u_int8_t sctp_audit_data[SCTP_AUDIT_SIZE][2];
382 static int sctp_audit_indx = 0;
383
384 static
385 void sctp_print_audit_report(void)
386 {
387 int i;
388 int cnt;
389 cnt = 0;
390 for (i=sctp_audit_indx;i<SCTP_AUDIT_SIZE;i++) {
391 if ((sctp_audit_data[i][0] == 0xe0) &&
392 (sctp_audit_data[i][1] == 0x01)) {
393 cnt = 0;
394 printf("\n");
395 } else if (sctp_audit_data[i][0] == 0xf0) {
396 cnt = 0;
397 printf("\n");
398 } else if ((sctp_audit_data[i][0] == 0xc0) &&
399 (sctp_audit_data[i][1] == 0x01)) {
400 printf("\n");
401 cnt = 0;
402 }
403 printf("%2.2x%2.2x ", (uint32_t)sctp_audit_data[i][0],
404 (uint32_t)sctp_audit_data[i][1]);
405 cnt++;
406 if ((cnt % 14) == 0)
407 printf("\n");
408 }
409 for (i=0;i<sctp_audit_indx;i++) {
410 if ((sctp_audit_data[i][0] == 0xe0) &&
411 (sctp_audit_data[i][1] == 0x01)) {
412 cnt = 0;
413 printf("\n");
414 } else if (sctp_audit_data[i][0] == 0xf0) {
415 cnt = 0;
416 printf("\n");
417 } else if ((sctp_audit_data[i][0] == 0xc0) &&
418 (sctp_audit_data[i][1] == 0x01)) {
419 printf("\n");
420 cnt = 0;
421 }
422 printf("%2.2x%2.2x ", (uint32_t)sctp_audit_data[i][0],
423 (uint32_t)sctp_audit_data[i][1]);
424 cnt++;
425 if ((cnt % 14) == 0)
426 printf("\n");
427 }
428 printf("\n");
429 }
430
431 void sctp_auditing(int from, struct sctp_inpcb *inp, struct sctp_tcb *stcb,
432 struct sctp_nets *net)
433 {
434 int resend_cnt, tot_out, rep, tot_book_cnt;
435 struct sctp_nets *lnet;
436 struct sctp_tmit_chunk *chk;
437
438 sctp_audit_data[sctp_audit_indx][0] = 0xAA;
439 sctp_audit_data[sctp_audit_indx][1] = 0x000000ff & from;
440 sctp_audit_indx++;
441 if (sctp_audit_indx >= SCTP_AUDIT_SIZE) {
442 sctp_audit_indx = 0;
443 }
444 if (inp == NULL) {
445 sctp_audit_data[sctp_audit_indx][0] = 0xAF;
446 sctp_audit_data[sctp_audit_indx][1] = 0x01;
447 sctp_audit_indx++;
448 if (sctp_audit_indx >= SCTP_AUDIT_SIZE) {
449 sctp_audit_indx = 0;
450 }
451 return;
452 }
453 if (stcb == NULL) {
454 sctp_audit_data[sctp_audit_indx][0] = 0xAF;
455 sctp_audit_data[sctp_audit_indx][1] = 0x02;
456 sctp_audit_indx++;
457 if (sctp_audit_indx >= SCTP_AUDIT_SIZE) {
458 sctp_audit_indx = 0;
459 }
460 return;
461 }
462 sctp_audit_data[sctp_audit_indx][0] = 0xA1;
463 sctp_audit_data[sctp_audit_indx][1] =
464 (0x000000ff & stcb->asoc.sent_queue_retran_cnt);
465 sctp_audit_indx++;
466 if (sctp_audit_indx >= SCTP_AUDIT_SIZE) {
467 sctp_audit_indx = 0;
468 }
469 rep = 0;
470 tot_book_cnt = 0;
471 resend_cnt = tot_out = 0;
472 TAILQ_FOREACH(chk, &stcb->asoc.sent_queue, sctp_next) {
473 if (chk->sent == SCTP_DATAGRAM_RESEND) {
474 resend_cnt++;
475 } else if (chk->sent < SCTP_DATAGRAM_RESEND) {
476 tot_out += chk->book_size;
477 tot_book_cnt++;
478 }
479 }
480 if (resend_cnt != stcb->asoc.sent_queue_retran_cnt) {
481 sctp_audit_data[sctp_audit_indx][0] = 0xAF;
482 sctp_audit_data[sctp_audit_indx][1] = 0xA1;
483 sctp_audit_indx++;
484 if (sctp_audit_indx >= SCTP_AUDIT_SIZE) {
485 sctp_audit_indx = 0;
486 }
487 printf("resend_cnt:%d asoc-tot:%d\n",
488 resend_cnt, stcb->asoc.sent_queue_retran_cnt);
489 rep = 1;
490 stcb->asoc.sent_queue_retran_cnt = resend_cnt;
491 sctp_audit_data[sctp_audit_indx][0] = 0xA2;
492 sctp_audit_data[sctp_audit_indx][1] =
493 (0x000000ff & stcb->asoc.sent_queue_retran_cnt);
494 sctp_audit_indx++;
495 if (sctp_audit_indx >= SCTP_AUDIT_SIZE) {
496 sctp_audit_indx = 0;
497 }
498 }
499 if (tot_out != stcb->asoc.total_flight) {
500 sctp_audit_data[sctp_audit_indx][0] = 0xAF;
501 sctp_audit_data[sctp_audit_indx][1] = 0xA2;
502 sctp_audit_indx++;
503 if (sctp_audit_indx >= SCTP_AUDIT_SIZE) {
504 sctp_audit_indx = 0;
505 }
506 rep = 1;
507 printf("tot_flt:%d asoc_tot:%d\n", tot_out,
508 (int)stcb->asoc.total_flight);
509 stcb->asoc.total_flight = tot_out;
510 }
511 if (tot_book_cnt != stcb->asoc.total_flight_count) {
512 sctp_audit_data[sctp_audit_indx][0] = 0xAF;
513 sctp_audit_data[sctp_audit_indx][1] = 0xA5;
514 sctp_audit_indx++;
515 if (sctp_audit_indx >= SCTP_AUDIT_SIZE) {
516 sctp_audit_indx = 0;
517 }
518 rep = 1;
519 printf("tot_flt_book:%d\n", tot_book);
520
521 stcb->asoc.total_flight_count = tot_book_cnt;
522 }
523 tot_out = 0;
524 TAILQ_FOREACH(lnet, &stcb->asoc.nets, sctp_next) {
525 tot_out += lnet->flight_size;
526 }
527 if (tot_out != stcb->asoc.total_flight) {
528 sctp_audit_data[sctp_audit_indx][0] = 0xAF;
529 sctp_audit_data[sctp_audit_indx][1] = 0xA3;
530 sctp_audit_indx++;
531 if (sctp_audit_indx >= SCTP_AUDIT_SIZE) {
532 sctp_audit_indx = 0;
533 }
534 rep = 1;
535 printf("real flight:%d net total was %d\n",
536 stcb->asoc.total_flight, tot_out);
537 /* now corrective action */
538 TAILQ_FOREACH(lnet, &stcb->asoc.nets, sctp_next) {
539 tot_out = 0;
540 TAILQ_FOREACH(chk, &stcb->asoc.sent_queue, sctp_next) {
541 if ((chk->whoTo == lnet) &&
542 (chk->sent < SCTP_DATAGRAM_RESEND)) {
543 tot_out += chk->book_size;
544 }
545 }
546 if (lnet->flight_size != tot_out) {
547 printf("net:%x flight was %d corrected to %d\n",
548 (uint32_t)lnet, lnet->flight_size, tot_out);
549 lnet->flight_size = tot_out;
550 }
551
552 }
553 }
554
555 if (rep) {
556 sctp_print_audit_report();
557 }
558 }
559
560 void
561 sctp_audit_log(u_int8_t ev, u_int8_t fd)
562 {
563 sctp_audit_data[sctp_audit_indx][0] = ev;
564 sctp_audit_data[sctp_audit_indx][1] = fd;
565 sctp_audit_indx++;
566 if (sctp_audit_indx >= SCTP_AUDIT_SIZE) {
567 sctp_audit_indx = 0;
568 }
569 }
570
571 #endif
572
573 /*
574 * a list of sizes based on typical mtu's, used only if next hop
575 * size not returned.
576 */
577 static int sctp_mtu_sizes[] = {
578 68,
579 296,
580 508,
581 512,
582 544,
583 576,
584 1006,
585 1492,
586 1500,
587 1536,
588 2002,
589 2048,
590 4352,
591 4464,
592 8166,
593 17914,
594 32000,
595 65535
596 };
597
598 int
599 find_next_best_mtu(int totsz)
600 {
601 int i, perfer;
602 /*
603 * if we are in here we must find the next best fit based on the
604 * size of the dg that failed to be sent.
605 */
606 perfer = 0;
607 for (i = 0; i < NUMBER_OF_MTU_SIZES; i++) {
608 if (totsz < sctp_mtu_sizes[i]) {
609 perfer = i - 1;
610 if (perfer < 0)
611 perfer = 0;
612 break;
613 }
614 }
615 return (sctp_mtu_sizes[perfer]);
616 }
617
618 void
619 sctp_fill_random_store(struct sctp_pcb *m)
620 {
621 /*
622 * Here we use the MD5/SHA-1 to hash with our good randomNumbers
623 * and our counter. The result becomes our good random numbers and
624 * we then setup to give these out. Note that we do no lockig
625 * to protect this. This is ok, since if competing folks call
626 * this we will get more gobbled gook in the random store whic
627 * is what we want. There is a danger that two guys will use
628 * the same random numbers, but thats ok too since that
629 * is random as well :->
630 */
631 m->store_at = 0;
632 sctp_hash_digest((char *)m->random_numbers, sizeof(m->random_numbers),
633 (char *)&m->random_counter, sizeof(m->random_counter),
634 (char *)m->random_store);
635 m->random_counter++;
636 }
637
638 uint32_t
639 sctp_select_initial_TSN(struct sctp_pcb *m)
640 {
641 /*
642 * A true implementation should use random selection process to
643 * get the initial stream sequence number, using RFC1750 as a
644 * good guideline
645 */
646 u_long x, *xp;
647 uint8_t *p;
648
649 if (m->initial_sequence_debug != 0) {
650 u_int32_t ret;
651 ret = m->initial_sequence_debug;
652 m->initial_sequence_debug++;
653 return (ret);
654 }
655 if ((m->store_at+sizeof(u_long)) > SCTP_SIGNATURE_SIZE) {
656 /* Refill the random store */
657 sctp_fill_random_store(m);
658 }
659 p = &m->random_store[(int)m->store_at];
660 xp = (u_long *)p;
661 x = *xp;
662 m->store_at += sizeof(u_long);
663 return (x);
664 }
665
666 u_int32_t sctp_select_a_tag(struct sctp_inpcb *m)
667 {
668 u_long x, not_done;
669 struct timeval now;
670
671 SCTP_GETTIME_TIMEVAL(&now);
672 not_done = 1;
673 while (not_done) {
674 x = sctp_select_initial_TSN(&m->sctp_ep);
675 if (x == 0) {
676 /* we never use 0 */
677 continue;
678 }
679 if (sctp_is_vtag_good(m, x, &now)) {
680 not_done = 0;
681 }
682 }
683 return (x);
684 }
685
686
687 int
688 sctp_init_asoc(struct sctp_inpcb *m, struct sctp_association *asoc,
689 int for_a_init, uint32_t override_tag )
690 {
691 /*
692 * Anything set to zero is taken care of by the allocation
693 * routine's bzero
694 */
695
696 /*
697 * Up front select what scoping to apply on addresses I tell my peer
698 * Not sure what to do with these right now, we will need to come up
699 * with a way to set them. We may need to pass them through from the
700 * caller in the sctp_aloc_assoc() function.
701 */
702 int i;
703 /* init all variables to a known value.*/
704 asoc->state = SCTP_STATE_INUSE;
705 asoc->max_burst = m->sctp_ep.max_burst;
706 asoc->heart_beat_delay = m->sctp_ep.sctp_timeoutticks[SCTP_TIMER_HEARTBEAT];
707 asoc->cookie_life = m->sctp_ep.def_cookie_life;
708
709 if (override_tag) {
710 asoc->my_vtag = override_tag;
711 } else {
712 asoc->my_vtag = sctp_select_a_tag(m);
713 }
714 asoc->asconf_seq_out = asoc->str_reset_seq_out = asoc->init_seq_number = asoc->sending_seq =
715 sctp_select_initial_TSN(&m->sctp_ep);
716 asoc->t3timeout_highest_marked = asoc->asconf_seq_out;
717 /* we are opptimisitic here */
718 asoc->peer_supports_asconf = 1;
719 asoc->peer_supports_asconf_setprim = 1;
720 asoc->peer_supports_pktdrop = 1;
721
722 asoc->sent_queue_retran_cnt = 0;
723 /* This will need to be adjusted */
724 asoc->last_cwr_tsn = asoc->init_seq_number - 1;
725 asoc->last_acked_seq = asoc->init_seq_number - 1;
726 asoc->advanced_peer_ack_point = asoc->last_acked_seq;
727 asoc->asconf_seq_in = asoc->last_acked_seq;
728
729 /* here we are different, we hold the next one we expect */
730 asoc->str_reset_seq_in = asoc->last_acked_seq + 1;
731
732 asoc->initial_init_rto_max = m->sctp_ep.initial_init_rto_max;
733 asoc->initial_rto = m->sctp_ep.initial_rto;
734
735 asoc->max_init_times = m->sctp_ep.max_init_times;
736 asoc->max_send_times = m->sctp_ep.max_send_times;
737 asoc->def_net_failure = m->sctp_ep.def_net_failure;
738
739 /* ECN Nonce initialization */
740 asoc->ecn_nonce_allowed = 0;
741 asoc->receiver_nonce_sum = 1;
742 asoc->nonce_sum_expect_base = 1;
743 asoc->nonce_sum_check = 1;
744 asoc->nonce_resync_tsn = 0;
745 asoc->nonce_wait_for_ecne = 0;
746 asoc->nonce_wait_tsn = 0;
747
748 if (m->sctp_flags & SCTP_PCB_FLAGS_BOUND_V6) {
749 struct in6pcb *inp6;
750
751
752 /* Its a V6 socket */
753 inp6 = (struct in6pcb *)m;
754 asoc->ipv6_addr_legal = 1;
755 /* Now look at the binding flag to see if V4 will be legal */
756 if (
757 #if defined(__OpenBSD__)
758 (0) /* we always do dual bind */
759 #elif defined (__NetBSD__)
760 (inp6->in6p_flags & IN6P_IPV6_V6ONLY)
761 #else
762 (inp6->inp_flags & IN6P_IPV6_V6ONLY)
763 #endif
764 == 0) {
765 asoc->ipv4_addr_legal = 1;
766 } else {
767 /* V4 addresses are NOT legal on the association */
768 asoc->ipv4_addr_legal = 0;
769 }
770 } else {
771 /* Its a V4 socket, no - V6 */
772 asoc->ipv4_addr_legal = 1;
773 asoc->ipv6_addr_legal = 0;
774 }
775
776
777 asoc->my_rwnd = max(m->sctp_socket->so_rcv.sb_hiwat, SCTP_MINIMAL_RWND);
778 asoc->peers_rwnd = m->sctp_socket->so_rcv.sb_hiwat;
779
780 asoc->smallest_mtu = m->sctp_frag_point;
781 asoc->minrto = m->sctp_ep.sctp_minrto;
782 asoc->maxrto = m->sctp_ep.sctp_maxrto;
783
784 LIST_INIT(&asoc->sctp_local_addr_list);
785 TAILQ_INIT(&asoc->nets);
786 TAILQ_INIT(&asoc->pending_reply_queue);
787 asoc->last_asconf_ack_sent = NULL;
788 /* Setup to fill the hb random cache at first HB */
789 asoc->hb_random_idx = 4;
790
791 asoc->sctp_autoclose_ticks = m->sctp_ep.auto_close_time;
792
793 /*
794 * Now the stream parameters, here we allocate space for all
795 * streams that we request by default.
796 */
797 asoc->streamoutcnt = asoc->pre_open_streams =
798 m->sctp_ep.pre_open_stream_count;
799 asoc->strmout = malloc(asoc->streamoutcnt *
800 sizeof(struct sctp_stream_out), M_PCB, M_NOWAIT);
801 if (asoc->strmout == NULL) {
802 /* big trouble no memory */
803 return (ENOMEM);
804 }
805 for (i = 0; i < asoc->streamoutcnt; i++) {
806 /*
807 * inbound side must be set to 0xffff,
808 * also NOTE when we get the INIT-ACK back (for INIT sender)
809 * we MUST reduce the count (streamoutcnt) but first check
810 * if we sent to any of the upper streams that were dropped
811 * (if some were). Those that were dropped must be notified
812 * to the upper layer as failed to send.
813 */
814 asoc->strmout[i].next_sequence_sent = 0x0;
815 TAILQ_INIT(&asoc->strmout[i].outqueue);
816 asoc->strmout[i].stream_no = i;
817 asoc->strmout[i].next_spoke.tqe_next = 0;
818 asoc->strmout[i].next_spoke.tqe_prev = 0;
819 }
820 /* Now the mapping array */
821 asoc->mapping_array_size = SCTP_INITIAL_MAPPING_ARRAY;
822 asoc->mapping_array = malloc(asoc->mapping_array_size,
823 M_PCB, M_NOWAIT);
824 if (asoc->mapping_array == NULL) {
825 free(asoc->strmout, M_PCB);
826 return (ENOMEM);
827 }
828 memset(asoc->mapping_array, 0, asoc->mapping_array_size);
829 /* Now the init of the other outqueues */
830 TAILQ_INIT(&asoc->out_wheel);
831 TAILQ_INIT(&asoc->control_send_queue);
832 TAILQ_INIT(&asoc->send_queue);
833 TAILQ_INIT(&asoc->sent_queue);
834 TAILQ_INIT(&asoc->reasmqueue);
835 TAILQ_INIT(&asoc->delivery_queue);
836 asoc->max_inbound_streams = m->sctp_ep.max_open_streams_intome;
837
838 TAILQ_INIT(&asoc->asconf_queue);
839 return (0);
840 }
841
842 int
843 sctp_expand_mapping_array(struct sctp_association *asoc)
844 {
845 /* mapping array needs to grow */
846 u_int8_t *new_array;
847 uint16_t new_size, old_size;
848
849 old_size = asoc->mapping_array_size;
850 new_size = old_size + SCTP_MAPPING_ARRAY_INCR;
851 new_array = malloc(new_size, M_PCB, M_NOWAIT);
852 if (new_array == NULL) {
853 /* can't get more, forget it */
854 printf("No memory for expansion of SCTP mapping array %d\n",
855 new_size);
856 return (-1);
857 }
858 memcpy(new_array, asoc->mapping_array, old_size);
859 memset(new_array + old_size, 0, SCTP_MAPPING_ARRAY_INCR);
860 free(asoc->mapping_array, M_PCB);
861 asoc->mapping_array = new_array;
862 asoc->mapping_array_size = new_size;
863 return (0);
864 }
865
866 static void
867 sctp_timeout_handler(void *t)
868 {
869 struct sctp_inpcb *inp;
870 struct sctp_tcb *stcb;
871 struct sctp_nets *net;
872 struct sctp_timer *tmr;
873 int did_output;
874
875 mutex_enter(softnet_lock);
876 tmr = (struct sctp_timer *)t;
877 inp = (struct sctp_inpcb *)tmr->ep;
878 stcb = (struct sctp_tcb *)tmr->tcb;
879 net = (struct sctp_nets *)tmr->net;
880 did_output = 1;
881
882 #ifdef SCTP_AUDITING_ENABLED
883 sctp_audit_log(0xF0, (u_int8_t)tmr->type);
884 sctp_auditing(3, inp, stcb, net);
885 #endif
886 sctp_pegs[SCTP_TIMERS_EXP]++;
887
888 if (inp == NULL) {
889 return;
890 }
891
892 SCTP_INP_WLOCK(inp);
893 if (inp->sctp_socket == 0) {
894 mutex_exit(softnet_lock);
895 SCTP_INP_WUNLOCK(inp);
896 return;
897 }
898 if (stcb) {
899 if (stcb->asoc.state == 0) {
900 mutex_exit(softnet_lock);
901 SCTP_INP_WUNLOCK(inp);
902 return;
903 }
904 }
905 #ifdef SCTP_DEBUG
906 if (sctp_debug_on & SCTP_DEBUG_TIMER1) {
907 printf("Timer type %d goes off\n", tmr->type);
908 }
909 #endif /* SCTP_DEBUG */
910 #ifndef __NetBSD__
911 if (!callout_active(&tmr->timer)) {
912 SCTP_INP_WUNLOCK(inp);
913 return;
914 }
915 #endif
916 if (stcb) {
917 SCTP_TCB_LOCK(stcb);
918 }
919 SCTP_INP_INCR_REF(inp);
920 SCTP_INP_WUNLOCK(inp);
921
922 switch (tmr->type) {
923 case SCTP_TIMER_TYPE_ITERATOR:
924 {
925 struct sctp_iterator *it;
926 it = (struct sctp_iterator *)inp;
927 sctp_iterator_timer(it);
928 }
929 break;
930 /* call the handler for the appropriate timer type */
931 case SCTP_TIMER_TYPE_SEND:
932 sctp_pegs[SCTP_TMIT_TIMER]++;
933 stcb->asoc.num_send_timers_up--;
934 if (stcb->asoc.num_send_timers_up < 0) {
935 stcb->asoc.num_send_timers_up = 0;
936 }
937 if (sctp_t3rxt_timer(inp, stcb, net)) {
938 /* no need to unlock on tcb its gone */
939
940 goto out_decr;
941 }
942 #ifdef SCTP_AUDITING_ENABLED
943 sctp_auditing(4, inp, stcb, net);
944 #endif
945 sctp_chunk_output(inp, stcb, 1);
946 if ((stcb->asoc.num_send_timers_up == 0) &&
947 (stcb->asoc.sent_queue_cnt > 0)
948 ) {
949 struct sctp_tmit_chunk *chk;
950 /*
951 * safeguard. If there on some on the sent queue
952 * somewhere but no timers running something is
953 * wrong... so we start a timer on the first chunk
954 * on the send queue on whatever net it is sent to.
955 */
956 sctp_pegs[SCTP_T3_SAFEGRD]++;
957 chk = TAILQ_FIRST(&stcb->asoc.sent_queue);
958 sctp_timer_start(SCTP_TIMER_TYPE_SEND, inp, stcb,
959 chk->whoTo);
960 }
961 break;
962 case SCTP_TIMER_TYPE_INIT:
963 if (sctp_t1init_timer(inp, stcb, net)) {
964 /* no need to unlock on tcb its gone */
965 goto out_decr;
966 }
967 /* We do output but not here */
968 did_output = 0;
969 break;
970 case SCTP_TIMER_TYPE_RECV:
971 sctp_pegs[SCTP_RECV_TIMER]++;
972 sctp_send_sack(stcb);
973 #ifdef SCTP_AUDITING_ENABLED
974 sctp_auditing(4, inp, stcb, net);
975 #endif
976 sctp_chunk_output(inp, stcb, 4);
977 break;
978 case SCTP_TIMER_TYPE_SHUTDOWN:
979 if (sctp_shutdown_timer(inp, stcb, net) ) {
980 /* no need to unlock on tcb its gone */
981 goto out_decr;
982 }
983 #ifdef SCTP_AUDITING_ENABLED
984 sctp_auditing(4, inp, stcb, net);
985 #endif
986 sctp_chunk_output(inp, stcb, 5);
987 break;
988 case SCTP_TIMER_TYPE_HEARTBEAT:
989 if (sctp_heartbeat_timer(inp, stcb, net)) {
990 /* no need to unlock on tcb its gone */
991 goto out_decr;
992 }
993 #ifdef SCTP_AUDITING_ENABLED
994 sctp_auditing(4, inp, stcb, net);
995 #endif
996 sctp_chunk_output(inp, stcb, 6);
997 break;
998 case SCTP_TIMER_TYPE_COOKIE:
999 if (sctp_cookie_timer(inp, stcb, net)) {
1000 /* no need to unlock on tcb its gone */
1001 goto out_decr;
1002 }
1003 #ifdef SCTP_AUDITING_ENABLED
1004 sctp_auditing(4, inp, stcb, net);
1005 #endif
1006 sctp_chunk_output(inp, stcb, 1);
1007 break;
1008 case SCTP_TIMER_TYPE_NEWCOOKIE:
1009 {
1010 struct timeval tv;
1011 int i, secret;
1012 SCTP_GETTIME_TIMEVAL(&tv);
1013 SCTP_INP_WLOCK(inp);
1014 inp->sctp_ep.time_of_secret_change = tv.tv_sec;
1015 inp->sctp_ep.last_secret_number =
1016 inp->sctp_ep.current_secret_number;
1017 inp->sctp_ep.current_secret_number++;
1018 if (inp->sctp_ep.current_secret_number >=
1019 SCTP_HOW_MANY_SECRETS) {
1020 inp->sctp_ep.current_secret_number = 0;
1021 }
1022 secret = (int)inp->sctp_ep.current_secret_number;
1023 for (i = 0; i < SCTP_NUMBER_OF_SECRETS; i++) {
1024 inp->sctp_ep.secret_key[secret][i] =
1025 sctp_select_initial_TSN(&inp->sctp_ep);
1026 }
1027 SCTP_INP_WUNLOCK(inp);
1028 sctp_timer_start(SCTP_TIMER_TYPE_NEWCOOKIE, inp, stcb, net);
1029 }
1030 did_output = 0;
1031 break;
1032 case SCTP_TIMER_TYPE_PATHMTURAISE:
1033 sctp_pathmtu_timer(inp, stcb, net);
1034 did_output = 0;
1035 break;
1036 case SCTP_TIMER_TYPE_SHUTDOWNACK:
1037 if (sctp_shutdownack_timer(inp, stcb, net)) {
1038 /* no need to unlock on tcb its gone */
1039 goto out_decr;
1040 }
1041 #ifdef SCTP_AUDITING_ENABLED
1042 sctp_auditing(4, inp, stcb, net);
1043 #endif
1044 sctp_chunk_output(inp, stcb, 7);
1045 break;
1046 case SCTP_TIMER_TYPE_SHUTDOWNGUARD:
1047 sctp_abort_an_association(inp, stcb,
1048 SCTP_SHUTDOWN_GUARD_EXPIRES, NULL);
1049 /* no need to unlock on tcb its gone */
1050 goto out_decr;
1051 break;
1052
1053 case SCTP_TIMER_TYPE_STRRESET:
1054 if (sctp_strreset_timer(inp, stcb, net)) {
1055 /* no need to unlock on tcb its gone */
1056 goto out_decr;
1057 }
1058 sctp_chunk_output(inp, stcb, 9);
1059 break;
1060
1061 case SCTP_TIMER_TYPE_ASCONF:
1062 if (sctp_asconf_timer(inp, stcb, net)) {
1063 /* no need to unlock on tcb its gone */
1064 goto out_decr;
1065 }
1066 #ifdef SCTP_AUDITING_ENABLED
1067 sctp_auditing(4, inp, stcb, net);
1068 #endif
1069 sctp_chunk_output(inp, stcb, 8);
1070 break;
1071
1072 case SCTP_TIMER_TYPE_AUTOCLOSE:
1073 sctp_autoclose_timer(inp, stcb, net);
1074 sctp_chunk_output(inp, stcb, 10);
1075 did_output = 0;
1076 break;
1077 case SCTP_TIMER_TYPE_INPKILL:
1078 /* special case, take away our
1079 * increment since WE are the killer
1080 */
1081 SCTP_INP_WLOCK(inp);
1082 SCTP_INP_DECR_REF(inp);
1083 SCTP_INP_WUNLOCK(inp);
1084 sctp_timer_stop(SCTP_TIMER_TYPE_INPKILL, inp, NULL, NULL);
1085 sctp_inpcb_free(inp, 1);
1086 goto out_no_decr;
1087 break;
1088 default:
1089 #ifdef SCTP_DEBUG
1090 if (sctp_debug_on & SCTP_DEBUG_TIMER1) {
1091 printf("sctp_timeout_handler:unknown timer %d\n",
1092 tmr->type);
1093 }
1094 #endif /* SCTP_DEBUG */
1095 break;
1096 };
1097 #ifdef SCTP_AUDITING_ENABLED
1098 sctp_audit_log(0xF1, (u_int8_t)tmr->type);
1099 sctp_auditing(5, inp, stcb, net);
1100 #endif
1101 if (did_output) {
1102 /*
1103 * Now we need to clean up the control chunk chain if an
1104 * ECNE is on it. It must be marked as UNSENT again so next
1105 * call will continue to send it until such time that we get
1106 * a CWR, to remove it. It is, however, less likely that we
1107 * will find a ecn echo on the chain though.
1108 */
1109 sctp_fix_ecn_echo(&stcb->asoc);
1110 }
1111 if (stcb) {
1112 SCTP_TCB_UNLOCK(stcb);
1113 }
1114 out_decr:
1115 SCTP_INP_WLOCK(inp);
1116 SCTP_INP_DECR_REF(inp);
1117 SCTP_INP_WUNLOCK(inp);
1118
1119 out_no_decr:
1120
1121 mutex_exit(softnet_lock);
1122 }
1123
1124 int
1125 sctp_timer_start(int t_type, struct sctp_inpcb *inp, struct sctp_tcb *stcb,
1126 struct sctp_nets *net)
1127 {
1128 int to_ticks;
1129 struct sctp_timer *tmr;
1130
1131 if (inp == NULL)
1132 return (EFAULT);
1133
1134 to_ticks = 0;
1135
1136 tmr = NULL;
1137 switch (t_type) {
1138 case SCTP_TIMER_TYPE_ITERATOR:
1139 {
1140 struct sctp_iterator *it;
1141 it = (struct sctp_iterator *)inp;
1142 tmr = &it->tmr;
1143 to_ticks = SCTP_ITERATOR_TICKS;
1144 }
1145 break;
1146 case SCTP_TIMER_TYPE_SEND:
1147 /* Here we use the RTO timer */
1148 {
1149 int rto_val;
1150 if ((stcb == NULL) || (net == NULL)) {
1151 return (EFAULT);
1152 }
1153 tmr = &net->rxt_timer;
1154 if (net->RTO == 0) {
1155 rto_val = stcb->asoc.initial_rto;
1156 } else {
1157 rto_val = net->RTO;
1158 }
1159 to_ticks = MSEC_TO_TICKS(rto_val);
1160 }
1161 break;
1162 case SCTP_TIMER_TYPE_INIT:
1163 /*
1164 * Here we use the INIT timer default
1165 * usually about 1 minute.
1166 */
1167 if ((stcb == NULL) || (net == NULL)) {
1168 return (EFAULT);
1169 }
1170 tmr = &net->rxt_timer;
1171 if (net->RTO == 0) {
1172 to_ticks = MSEC_TO_TICKS(stcb->asoc.initial_rto);
1173 } else {
1174 to_ticks = MSEC_TO_TICKS(net->RTO);
1175 }
1176 break;
1177 case SCTP_TIMER_TYPE_RECV:
1178 /*
1179 * Here we use the Delayed-Ack timer value from the inp
1180 * ususually about 200ms.
1181 */
1182 if (stcb == NULL) {
1183 return (EFAULT);
1184 }
1185 tmr = &stcb->asoc.dack_timer;
1186 to_ticks = inp->sctp_ep.sctp_timeoutticks[SCTP_TIMER_RECV];
1187 break;
1188 case SCTP_TIMER_TYPE_SHUTDOWN:
1189 /* Here we use the RTO of the destination. */
1190 if ((stcb == NULL) || (net == NULL)) {
1191 return (EFAULT);
1192 }
1193
1194 if (net->RTO == 0) {
1195 to_ticks = MSEC_TO_TICKS(stcb->asoc.initial_rto);
1196 } else {
1197 to_ticks = MSEC_TO_TICKS(net->RTO);
1198 }
1199 tmr = &net->rxt_timer;
1200 break;
1201 case SCTP_TIMER_TYPE_HEARTBEAT:
1202 /*
1203 * the net is used here so that we can add in the RTO.
1204 * Even though we use a different timer. We also add the
1205 * HB timer PLUS a random jitter.
1206 */
1207 if (stcb == NULL) {
1208 return (EFAULT);
1209 }
1210 {
1211 uint32_t rndval;
1212 uint8_t this_random;
1213 int cnt_of_unconf=0;
1214 struct sctp_nets *lnet;
1215
1216 TAILQ_FOREACH(lnet, &stcb->asoc.nets, sctp_next) {
1217 if (lnet->dest_state & SCTP_ADDR_UNCONFIRMED) {
1218 cnt_of_unconf++;
1219 }
1220 }
1221 #ifdef SCTP_DEBUG
1222 if (sctp_debug_on & SCTP_DEBUG_TIMER1) {
1223 printf("HB timer to start unconfirmed:%d hb_delay:%d\n",
1224 cnt_of_unconf, stcb->asoc.heart_beat_delay);
1225 }
1226 #endif
1227 if (stcb->asoc.hb_random_idx > 3) {
1228 rndval = sctp_select_initial_TSN(&inp->sctp_ep);
1229 memcpy(stcb->asoc.hb_random_values, &rndval,
1230 sizeof(stcb->asoc.hb_random_values));
1231 this_random = stcb->asoc.hb_random_values[0];
1232 stcb->asoc.hb_random_idx = 0;
1233 stcb->asoc.hb_ect_randombit = 0;
1234 } else {
1235 this_random = stcb->asoc.hb_random_values[stcb->asoc.hb_random_idx];
1236 stcb->asoc.hb_random_idx++;
1237 stcb->asoc.hb_ect_randombit = 0;
1238 }
1239 /*
1240 * this_random will be 0 - 256 ms
1241 * RTO is in ms.
1242 */
1243 if ((stcb->asoc.heart_beat_delay == 0) &&
1244 (cnt_of_unconf == 0)) {
1245 /* no HB on this inp after confirmations */
1246 return (0);
1247 }
1248 if (net) {
1249 int delay;
1250 delay = stcb->asoc.heart_beat_delay;
1251 TAILQ_FOREACH(lnet, &stcb->asoc.nets, sctp_next) {
1252 if ((lnet->dest_state & SCTP_ADDR_UNCONFIRMED) &&
1253 ((lnet->dest_state & SCTP_ADDR_OUT_OF_SCOPE) == 0) &&
1254 (lnet->dest_state & SCTP_ADDR_REACHABLE)) {
1255 delay = 0;
1256 }
1257 }
1258 if (net->RTO == 0) {
1259 /* Never been checked */
1260 to_ticks = this_random + stcb->asoc.initial_rto + delay;
1261 } else {
1262 /* set rto_val to the ms */
1263 to_ticks = delay + net->RTO + this_random;
1264 }
1265 } else {
1266 if (cnt_of_unconf) {
1267 to_ticks = this_random + stcb->asoc.initial_rto;
1268 } else {
1269 to_ticks = stcb->asoc.heart_beat_delay + this_random + stcb->asoc.initial_rto;
1270 }
1271 }
1272 /*
1273 * Now we must convert the to_ticks that are now in
1274 * ms to ticks.
1275 */
1276 to_ticks *= hz;
1277 to_ticks /= 1000;
1278 #ifdef SCTP_DEBUG
1279 if (sctp_debug_on & SCTP_DEBUG_TIMER1) {
1280 printf("Timer to expire in %d ticks\n", to_ticks);
1281 }
1282 #endif
1283 tmr = &stcb->asoc.hb_timer;
1284 }
1285 break;
1286 case SCTP_TIMER_TYPE_COOKIE:
1287 /*
1288 * Here we can use the RTO timer from the network since
1289 * one RTT was compelete. If a retran happened then we will
1290 * be using the RTO initial value.
1291 */
1292 if ((stcb == NULL) || (net == NULL)) {
1293 return (EFAULT);
1294 }
1295 if (net->RTO == 0) {
1296 to_ticks = MSEC_TO_TICKS(stcb->asoc.initial_rto);
1297 } else {
1298 to_ticks = MSEC_TO_TICKS(net->RTO);
1299 }
1300 tmr = &net->rxt_timer;
1301 break;
1302 case SCTP_TIMER_TYPE_NEWCOOKIE:
1303 /*
1304 * nothing needed but the endpoint here
1305 * ususually about 60 minutes.
1306 */
1307 tmr = &inp->sctp_ep.signature_change;
1308 to_ticks = inp->sctp_ep.sctp_timeoutticks[SCTP_TIMER_SIGNATURE];
1309 break;
1310 case SCTP_TIMER_TYPE_INPKILL:
1311 /*
1312 * The inp is setup to die. We re-use the
1313 * signature_chage timer since that has
1314 * stopped and we are in the GONE state.
1315 */
1316 tmr = &inp->sctp_ep.signature_change;
1317 to_ticks = (SCTP_INP_KILL_TIMEOUT * hz) / 1000;
1318 break;
1319 case SCTP_TIMER_TYPE_PATHMTURAISE:
1320 /*
1321 * Here we use the value found in the EP for PMTU
1322 * ususually about 10 minutes.
1323 */
1324 if (stcb == NULL) {
1325 return (EFAULT);
1326 }
1327 if (net == NULL) {
1328 return (EFAULT);
1329 }
1330 to_ticks = inp->sctp_ep.sctp_timeoutticks[SCTP_TIMER_PMTU];
1331 tmr = &net->pmtu_timer;
1332 break;
1333 case SCTP_TIMER_TYPE_SHUTDOWNACK:
1334 /* Here we use the RTO of the destination */
1335 if ((stcb == NULL) || (net == NULL)) {
1336 return (EFAULT);
1337 }
1338 if (net->RTO == 0) {
1339 to_ticks = MSEC_TO_TICKS(stcb->asoc.initial_rto);
1340 } else {
1341 to_ticks = MSEC_TO_TICKS(net->RTO);
1342 }
1343 tmr = &net->rxt_timer;
1344 break;
1345 case SCTP_TIMER_TYPE_SHUTDOWNGUARD:
1346 /*
1347 * Here we use the endpoints shutdown guard timer
1348 * usually about 3 minutes.
1349 */
1350 if (stcb == NULL) {
1351 return (EFAULT);
1352 }
1353 to_ticks = inp->sctp_ep.sctp_timeoutticks[SCTP_TIMER_MAXSHUTDOWN];
1354 tmr = &stcb->asoc.shut_guard_timer;
1355 break;
1356 case SCTP_TIMER_TYPE_STRRESET:
1357 /*
1358 * Here the timer comes from the inp
1359 * but its value is from the RTO.
1360 */
1361 if ((stcb == NULL) || (net == NULL)) {
1362 return (EFAULT);
1363 }
1364 if (net->RTO == 0) {
1365 to_ticks = MSEC_TO_TICKS(stcb->asoc.initial_rto);
1366 } else {
1367 to_ticks = MSEC_TO_TICKS(net->RTO);
1368 }
1369 tmr = &stcb->asoc.strreset_timer;
1370 break;
1371
1372 case SCTP_TIMER_TYPE_ASCONF:
1373 /*
1374 * Here the timer comes from the inp
1375 * but its value is from the RTO.
1376 */
1377 if ((stcb == NULL) || (net == NULL)) {
1378 return (EFAULT);
1379 }
1380 if (net->RTO == 0) {
1381 to_ticks = MSEC_TO_TICKS(stcb->asoc.initial_rto);
1382 } else {
1383 to_ticks = MSEC_TO_TICKS(net->RTO);
1384 }
1385 tmr = &stcb->asoc.asconf_timer;
1386 break;
1387 case SCTP_TIMER_TYPE_AUTOCLOSE:
1388 if (stcb == NULL) {
1389 return (EFAULT);
1390 }
1391 if (stcb->asoc.sctp_autoclose_ticks == 0) {
1392 /* Really an error since stcb is NOT set to autoclose */
1393 return (0);
1394 }
1395 to_ticks = stcb->asoc.sctp_autoclose_ticks;
1396 tmr = &stcb->asoc.autoclose_timer;
1397 break;
1398 default:
1399 #ifdef SCTP_DEBUG
1400 if (sctp_debug_on & SCTP_DEBUG_TIMER1) {
1401 printf("sctp_timer_start:Unknown timer type %d\n",
1402 t_type);
1403 }
1404 #endif /* SCTP_DEBUG */
1405 return (EFAULT);
1406 break;
1407 };
1408 if ((to_ticks <= 0) || (tmr == NULL)) {
1409 #ifdef SCTP_DEBUG
1410 if (sctp_debug_on & SCTP_DEBUG_TIMER1) {
1411 printf("sctp_timer_start:%d:software error to_ticks:%d tmr:%p not set ??\n",
1412 t_type, to_ticks, tmr);
1413 }
1414 #endif /* SCTP_DEBUG */
1415 return (EFAULT);
1416 }
1417 if (callout_pending(&tmr->timer)) {
1418 /*
1419 * we do NOT allow you to have it already running.
1420 * if it is we leave the current one up unchanged
1421 */
1422 return (EALREADY);
1423 }
1424 /* At this point we can proceed */
1425 if (t_type == SCTP_TIMER_TYPE_SEND) {
1426 stcb->asoc.num_send_timers_up++;
1427 }
1428 tmr->type = t_type;
1429 tmr->ep = (void *)inp;
1430 tmr->tcb = (void *)stcb;
1431 tmr->net = (void *)net;
1432 callout_reset(&tmr->timer, to_ticks, sctp_timeout_handler, tmr);
1433 return (0);
1434 }
1435
1436 int
1437 sctp_timer_stop(int t_type, struct sctp_inpcb *inp, struct sctp_tcb *stcb,
1438 struct sctp_nets *net)
1439 {
1440 struct sctp_timer *tmr;
1441
1442 if (inp == NULL)
1443 return (EFAULT);
1444
1445 tmr = NULL;
1446 switch (t_type) {
1447 case SCTP_TIMER_TYPE_ITERATOR:
1448 {
1449 struct sctp_iterator *it;
1450 it = (struct sctp_iterator *)inp;
1451 tmr = &it->tmr;
1452 }
1453 break;
1454 case SCTP_TIMER_TYPE_SEND:
1455 if ((stcb == NULL) || (net == NULL)) {
1456 return (EFAULT);
1457 }
1458 tmr = &net->rxt_timer;
1459 break;
1460 case SCTP_TIMER_TYPE_INIT:
1461 if ((stcb == NULL) || (net == NULL)) {
1462 return (EFAULT);
1463 }
1464 tmr = &net->rxt_timer;
1465 break;
1466 case SCTP_TIMER_TYPE_RECV:
1467 if (stcb == NULL) {
1468 return (EFAULT);
1469 }
1470 tmr = &stcb->asoc.dack_timer;
1471 break;
1472 case SCTP_TIMER_TYPE_SHUTDOWN:
1473 if ((stcb == NULL) || (net == NULL)) {
1474 return (EFAULT);
1475 }
1476 tmr = &net->rxt_timer;
1477 break;
1478 case SCTP_TIMER_TYPE_HEARTBEAT:
1479 if (stcb == NULL) {
1480 return (EFAULT);
1481 }
1482 tmr = &stcb->asoc.hb_timer;
1483 break;
1484 case SCTP_TIMER_TYPE_COOKIE:
1485 if ((stcb == NULL) || (net == NULL)) {
1486 return (EFAULT);
1487 }
1488 tmr = &net->rxt_timer;
1489 break;
1490 case SCTP_TIMER_TYPE_NEWCOOKIE:
1491 /* nothing needed but the endpoint here */
1492 tmr = &inp->sctp_ep.signature_change;
1493 /* We re-use the newcookie timer for
1494 * the INP kill timer. We must assure
1495 * that we do not kill it by accident.
1496 */
1497 break;
1498 case SCTP_TIMER_TYPE_INPKILL:
1499 /*
1500 * The inp is setup to die. We re-use the
1501 * signature_chage timer since that has
1502 * stopped and we are in the GONE state.
1503 */
1504 tmr = &inp->sctp_ep.signature_change;
1505 break;
1506 case SCTP_TIMER_TYPE_PATHMTURAISE:
1507 if (stcb == NULL) {
1508 return (EFAULT);
1509 }
1510 if (net == NULL) {
1511 return (EFAULT);
1512 }
1513 tmr = &net->pmtu_timer;
1514 break;
1515 case SCTP_TIMER_TYPE_SHUTDOWNACK:
1516 if ((stcb == NULL) || (net == NULL)) {
1517 return (EFAULT);
1518 }
1519 tmr = &net->rxt_timer;
1520 break;
1521 case SCTP_TIMER_TYPE_SHUTDOWNGUARD:
1522 if (stcb == NULL) {
1523 return (EFAULT);
1524 }
1525 tmr = &stcb->asoc.shut_guard_timer;
1526 break;
1527 case SCTP_TIMER_TYPE_STRRESET:
1528 if (stcb == NULL) {
1529 return (EFAULT);
1530 }
1531 tmr = &stcb->asoc.strreset_timer;
1532 break;
1533 case SCTP_TIMER_TYPE_ASCONF:
1534 if (stcb == NULL) {
1535 return (EFAULT);
1536 }
1537 tmr = &stcb->asoc.asconf_timer;
1538 break;
1539 case SCTP_TIMER_TYPE_AUTOCLOSE:
1540 if (stcb == NULL) {
1541 return (EFAULT);
1542 }
1543 tmr = &stcb->asoc.autoclose_timer;
1544 break;
1545 default:
1546 #ifdef SCTP_DEBUG
1547 if (sctp_debug_on & SCTP_DEBUG_TIMER1) {
1548 printf("sctp_timer_stop:Unknown timer type %d\n",
1549 t_type);
1550 }
1551 #endif /* SCTP_DEBUG */
1552 break;
1553 };
1554 if (tmr == NULL)
1555 return (EFAULT);
1556
1557 if ((tmr->type != t_type) && tmr->type) {
1558 /*
1559 * Ok we have a timer that is under joint use. Cookie timer
1560 * per chance with the SEND timer. We therefore are NOT
1561 * running the timer that the caller wants stopped. So just
1562 * return.
1563 */
1564 return (0);
1565 }
1566 if (t_type == SCTP_TIMER_TYPE_SEND) {
1567 stcb->asoc.num_send_timers_up--;
1568 if (stcb->asoc.num_send_timers_up < 0) {
1569 stcb->asoc.num_send_timers_up = 0;
1570 }
1571 }
1572 callout_stop(&tmr->timer);
1573 return (0);
1574 }
1575
1576 #ifdef SCTP_USE_ADLER32
1577 static uint32_t
1578 update_adler32(uint32_t adler, uint8_t *buf, int32_t len)
1579 {
1580 u_int32_t s1 = adler & 0xffff;
1581 u_int32_t s2 = (adler >> 16) & 0xffff;
1582 int n;
1583
1584 for (n = 0; n < len; n++, buf++) {
1585 /* s1 = (s1 + buf[n]) % BASE */
1586 /* first we add */
1587 s1 = (s1 + *buf);
1588 /*
1589 * now if we need to, we do a mod by subtracting. It seems
1590 * a bit faster since I really will only ever do one subtract
1591 * at the MOST, since buf[n] is a max of 255.
1592 */
1593 if (s1 >= SCTP_ADLER32_BASE) {
1594 s1 -= SCTP_ADLER32_BASE;
1595 }
1596 /* s2 = (s2 + s1) % BASE */
1597 /* first we add */
1598 s2 = (s2 + s1);
1599 /*
1600 * again, it is more efficent (it seems) to subtract since
1601 * the most s2 will ever be is (BASE-1 + BASE-1) in the worse
1602 * case. This would then be (2 * BASE) - 2, which will still
1603 * only do one subtract. On Intel this is much better to do
1604 * this way and avoid the divide. Have not -pg'd on sparc.
1605 */
1606 if (s2 >= SCTP_ADLER32_BASE) {
1607 s2 -= SCTP_ADLER32_BASE;
1608 }
1609 }
1610 /* Return the adler32 of the bytes buf[0..len-1] */
1611 return ((s2 << 16) + s1);
1612 }
1613
1614 #endif
1615
1616
1617 u_int32_t
1618 sctp_calculate_len(struct mbuf *m)
1619 {
1620 u_int32_t tlen=0;
1621 struct mbuf *at;
1622 at = m;
1623 while (at) {
1624 tlen += at->m_len;
1625 at = at->m_next;
1626 }
1627 return (tlen);
1628 }
1629
1630 #if defined(SCTP_WITH_NO_CSUM)
1631
1632 uint32_t
1633 sctp_calculate_sum(struct mbuf *m, int32_t *pktlen, uint32_t offset)
1634 {
1635 /*
1636 * given a mbuf chain with a packetheader offset by 'offset'
1637 * pointing at a sctphdr (with csum set to 0) go through
1638 * the chain of m_next's and calculate the SCTP checksum.
1639 * This is currently Adler32 but will change to CRC32x
1640 * soon. Also has a side bonus calculate the total length
1641 * of the mbuf chain.
1642 * Note: if offset is greater than the total mbuf length,
1643 * checksum=1, pktlen=0 is returned (ie. no real error code)
1644 */
1645 if (pktlen == NULL)
1646 return (0);
1647 *pktlen = sctp_calculate_len(m);
1648 return (0);
1649 }
1650
1651 #elif defined(SCTP_USE_INCHKSUM)
1652
1653 #include <machine/in_cksum.h>
1654
1655 uint32_t
1656 sctp_calculate_sum(struct mbuf *m, int32_t *pktlen, uint32_t offset)
1657 {
1658 /*
1659 * given a mbuf chain with a packetheader offset by 'offset'
1660 * pointing at a sctphdr (with csum set to 0) go through
1661 * the chain of m_next's and calculate the SCTP checksum.
1662 * This is currently Adler32 but will change to CRC32x
1663 * soon. Also has a side bonus calculate the total length
1664 * of the mbuf chain.
1665 * Note: if offset is greater than the total mbuf length,
1666 * checksum=1, pktlen=0 is returned (ie. no real error code)
1667 */
1668 int32_t tlen=0;
1669 struct mbuf *at;
1670 uint32_t the_sum, retsum;
1671
1672 at = m;
1673 while (at) {
1674 tlen += at->m_len;
1675 at = at->m_next;
1676 }
1677 the_sum = (uint32_t)(in_cksum_skip(m, tlen, offset));
1678 if (pktlen != NULL)
1679 *pktlen = (tlen-offset);
1680 retsum = htons(the_sum);
1681 return (the_sum);
1682 }
1683
1684 #else
1685
1686 uint32_t
1687 sctp_calculate_sum(struct mbuf *m, int32_t *pktlen, uint32_t offset)
1688 {
1689 /*
1690 * given a mbuf chain with a packetheader offset by 'offset'
1691 * pointing at a sctphdr (with csum set to 0) go through
1692 * the chain of m_next's and calculate the SCTP checksum.
1693 * This is currently Adler32 but will change to CRC32x
1694 * soon. Also has a side bonus calculate the total length
1695 * of the mbuf chain.
1696 * Note: if offset is greater than the total mbuf length,
1697 * checksum=1, pktlen=0 is returned (ie. no real error code)
1698 */
1699 int32_t tlen=0;
1700 #ifdef SCTP_USE_ADLER32
1701 uint32_t base = 1L;
1702 #else
1703 uint32_t base = 0xffffffff;
1704 #endif /* SCTP_USE_ADLER32 */
1705 struct mbuf *at;
1706 at = m;
1707 /* find the correct mbuf and offset into mbuf */
1708 while ((at != NULL) && (offset > (uint32_t)at->m_len)) {
1709 offset -= at->m_len; /* update remaining offset left */
1710 at = at->m_next;
1711 }
1712
1713 while (at != NULL) {
1714 #ifdef SCTP_USE_ADLER32
1715 base = update_adler32(base, at->m_data + offset,
1716 at->m_len - offset);
1717 #else
1718 base = update_crc32(base, at->m_data + offset,
1719 at->m_len - offset);
1720 #endif /* SCTP_USE_ADLER32 */
1721 tlen += at->m_len - offset;
1722 /* we only offset once into the first mbuf */
1723 if (offset) {
1724 offset = 0;
1725 }
1726 at = at->m_next;
1727 }
1728 if (pktlen != NULL) {
1729 *pktlen = tlen;
1730 }
1731 #ifdef SCTP_USE_ADLER32
1732 /* Adler32 */
1733 base = htonl(base);
1734 #else
1735 /* CRC-32c */
1736 base = sctp_csum_finalize(base);
1737 #endif
1738 return (base);
1739 }
1740
1741
1742 #endif
1743
1744 void
1745 sctp_mtu_size_reset(struct sctp_inpcb *inp,
1746 struct sctp_association *asoc, u_long mtu)
1747 {
1748 /*
1749 * Reset the P-MTU size on this association, this involves changing
1750 * the asoc MTU, going through ANY chunk+overhead larger than mtu
1751 * to allow the DF flag to be cleared.
1752 */
1753 struct sctp_tmit_chunk *chk;
1754 struct sctp_stream_out *strm;
1755 unsigned int eff_mtu, ovh;
1756 asoc->smallest_mtu = mtu;
1757 if (inp->sctp_flags & SCTP_PCB_FLAGS_BOUND_V6) {
1758 ovh = SCTP_MIN_OVERHEAD;
1759 } else {
1760 ovh = SCTP_MIN_V4_OVERHEAD;
1761 }
1762 eff_mtu = mtu - ovh;
1763 /* Now mark any chunks that need to let IP fragment */
1764 TAILQ_FOREACH(strm, &asoc->out_wheel, next_spoke) {
1765 TAILQ_FOREACH(chk, &strm->outqueue, sctp_next) {
1766 if (chk->send_size > eff_mtu) {
1767 chk->flags &= SCTP_DONT_FRAGMENT;
1768 chk->flags |= CHUNK_FLAGS_FRAGMENT_OK;
1769 }
1770 }
1771 }
1772 TAILQ_FOREACH(chk, &asoc->send_queue, sctp_next) {
1773 if (chk->send_size > eff_mtu) {
1774 chk->flags &= SCTP_DONT_FRAGMENT;
1775 chk->flags |= CHUNK_FLAGS_FRAGMENT_OK;
1776 }
1777 }
1778 TAILQ_FOREACH(chk, &asoc->sent_queue, sctp_next) {
1779 if (chk->send_size > eff_mtu) {
1780 chk->flags &= SCTP_DONT_FRAGMENT;
1781 chk->flags |= CHUNK_FLAGS_FRAGMENT_OK;
1782 }
1783 }
1784 }
1785
1786
1787 /*
1788 * given an association and starting time of the current RTT period
1789 * return RTO in number of usecs
1790 * net should point to the current network
1791 */
1792 u_int32_t
1793 sctp_calculate_rto(struct sctp_tcb *stcb,
1794 struct sctp_association *asoc,
1795 struct sctp_nets *net,
1796 struct timeval *old)
1797 {
1798 /*
1799 * given an association and the starting time of the current RTT
1800 * period (in value1/value2) return RTO in number of usecs.
1801 */
1802 int calc_time = 0;
1803 unsigned int new_rto = 0;
1804 int first_measure = 0;
1805 struct timeval now;
1806
1807 /************************/
1808 /* 1. calculate new RTT */
1809 /************************/
1810 /* get the current time */
1811 SCTP_GETTIME_TIMEVAL(&now);
1812 /* compute the RTT value */
1813 if ((u_long)now.tv_sec > (u_long)old->tv_sec) {
1814 calc_time = ((u_long)now.tv_sec - (u_long)old->tv_sec) * 1000;
1815 if ((u_long)now.tv_usec > (u_long)old->tv_usec) {
1816 calc_time += (((u_long)now.tv_usec -
1817 (u_long)old->tv_usec)/1000);
1818 } else if ((u_long)now.tv_usec < (u_long)old->tv_usec) {
1819 /* Borrow 1,000ms from current calculation */
1820 calc_time -= 1000;
1821 /* Add in the slop over */
1822 calc_time += ((int)now.tv_usec/1000);
1823 /* Add in the pre-second ms's */
1824 calc_time += (((int)1000000 - (int)old->tv_usec)/1000);
1825 }
1826 } else if ((u_long)now.tv_sec == (u_long)old->tv_sec) {
1827 if ((u_long)now.tv_usec > (u_long)old->tv_usec) {
1828 calc_time = ((u_long)now.tv_usec -
1829 (u_long)old->tv_usec)/1000;
1830 } else if ((u_long)now.tv_usec < (u_long)old->tv_usec) {
1831 /* impossible .. garbage in nothing out */
1832 return (((net->lastsa >> 2) + net->lastsv) >> 1);
1833 } else {
1834 /* impossible .. garbage in nothing out */
1835 return (((net->lastsa >> 2) + net->lastsv) >> 1);
1836 }
1837 } else {
1838 /* Clock wrapped? */
1839 return (((net->lastsa >> 2) + net->lastsv) >> 1);
1840 }
1841 /***************************/
1842 /* 2. update RTTVAR & SRTT */
1843 /***************************/
1844 #if 0
1845 /* if (net->lastsv || net->lastsa) {*/
1846 /* per Section 5.3.1 C3 in SCTP */
1847 /* net->lastsv = (int) *//* RTTVAR */
1848 /* (((double)(1.0 - 0.25) * (double)net->lastsv) +
1849 (double)(0.25 * (double)abs(net->lastsa - calc_time)));
1850 net->lastsa = (int) */ /* SRTT */
1851 /*(((double)(1.0 - 0.125) * (double)net->lastsa) +
1852 (double)(0.125 * (double)calc_time));
1853 } else {
1854 *//* the first RTT calculation, per C2 Section 5.3.1 */
1855 /* net->lastsa = calc_time; *//* SRTT */
1856 /* net->lastsv = calc_time / 2; *//* RTTVAR */
1857 /* }*/
1858 /* if RTTVAR goes to 0 you set to clock grainularity */
1859 /* if (net->lastsv == 0) {
1860 net->lastsv = SCTP_CLOCK_GRANULARITY;
1861 }
1862 new_rto = net->lastsa + 4 * net->lastsv;
1863 */
1864 #endif
1865 /* this is Van Jacobson's integer version */
1866 if (net->RTO) {
1867 calc_time -= (net->lastsa >> 3);
1868 net->lastsa += calc_time;
1869 if (calc_time < 0) {
1870 calc_time = -calc_time;
1871 }
1872 calc_time -= (net->lastsv >> 2);
1873 net->lastsv += calc_time;
1874 if (net->lastsv == 0) {
1875 net->lastsv = SCTP_CLOCK_GRANULARITY;
1876 }
1877 } else {
1878 /* First RTO measurment */
1879 net->lastsa = calc_time;
1880 net->lastsv = calc_time >> 1;
1881 first_measure = 1;
1882 }
1883 new_rto = ((net->lastsa >> 2) + net->lastsv) >> 1;
1884 if ((new_rto > SCTP_SAT_NETWORK_MIN) &&
1885 (stcb->asoc.sat_network_lockout == 0)) {
1886 stcb->asoc.sat_network = 1;
1887 } else if ((!first_measure) && stcb->asoc.sat_network) {
1888 stcb->asoc.sat_network = 0;
1889 stcb->asoc.sat_network_lockout = 1;
1890 }
1891 /* bound it, per C6/C7 in Section 5.3.1 */
1892 if (new_rto < stcb->asoc.minrto) {
1893 new_rto = stcb->asoc.minrto;
1894 }
1895 if (new_rto > stcb->asoc.maxrto) {
1896 new_rto = stcb->asoc.maxrto;
1897 }
1898 /* we are now returning the RTT Smoothed */
1899 return ((u_int32_t)new_rto);
1900 }
1901
1902
1903 /*
1904 * return a pointer to a contiguous piece of data from the given
1905 * mbuf chain starting at 'off' for 'len' bytes. If the desired
1906 * piece spans more than one mbuf, a copy is made at 'ptr'.
1907 * caller must ensure that the buffer size is >= 'len'
1908 * returns NULL if there there isn't 'len' bytes in the chain.
1909 */
1910 void *
1911 sctp_m_getptr(struct mbuf *m, int off, int len, u_int8_t *in_ptr)
1912 {
1913 uint32_t count;
1914 uint8_t *ptr;
1915 ptr = in_ptr;
1916 if ((off < 0) || (len <= 0))
1917 return (NULL);
1918
1919 /* find the desired start location */
1920 while ((m != NULL) && (off > 0)) {
1921 if (off < m->m_len)
1922 break;
1923 off -= m->m_len;
1924 m = m->m_next;
1925 }
1926 if (m == NULL)
1927 return (NULL);
1928
1929 /* is the current mbuf large enough (eg. contiguous)? */
1930 if ((m->m_len - off) >= len) {
1931 return ((void *)(mtod(m, vaddr_t) + off));
1932 } else {
1933 /* else, it spans more than one mbuf, so save a temp copy... */
1934 while ((m != NULL) && (len > 0)) {
1935 count = min(m->m_len - off, len);
1936 memcpy(ptr, (void *)(mtod(m, vaddr_t) + off), count);
1937 len -= count;
1938 ptr += count;
1939 off = 0;
1940 m = m->m_next;
1941 }
1942 if ((m == NULL) && (len > 0))
1943 return (NULL);
1944 else
1945 return ((void *)in_ptr);
1946 }
1947 }
1948
1949
1950 struct sctp_paramhdr *
1951 sctp_get_next_param(struct mbuf *m,
1952 int offset,
1953 struct sctp_paramhdr *pull,
1954 int pull_limit)
1955 {
1956 /* This just provides a typed signature to Peter's Pull routine */
1957 return ((struct sctp_paramhdr *)sctp_m_getptr(m, offset, pull_limit,
1958 (u_int8_t *)pull));
1959 }
1960
1961
1962 int
1963 sctp_add_pad_tombuf(struct mbuf *m, int padlen)
1964 {
1965 /*
1966 * add padlen bytes of 0 filled padding to the end of the mbuf.
1967 * If padlen is > 3 this routine will fail.
1968 */
1969 u_int8_t *dp;
1970 int i;
1971 if (padlen > 3) {
1972 return (ENOBUFS);
1973 }
1974 if (M_TRAILINGSPACE(m)) {
1975 /*
1976 * The easy way.
1977 * We hope the majority of the time we hit here :)
1978 */
1979 dp = (u_int8_t *)(mtod(m, vaddr_t) + m->m_len);
1980 m->m_len += padlen;
1981 } else {
1982 /* Hard way we must grow the mbuf */
1983 struct mbuf *tmp;
1984 MGET(tmp, M_DONTWAIT, MT_DATA);
1985 if (tmp == NULL) {
1986 /* Out of space GAK! we are in big trouble. */
1987 return (ENOSPC);
1988 }
1989 /* setup and insert in middle */
1990 tmp->m_next = m->m_next;
1991 tmp->m_len = padlen;
1992 m->m_next = tmp;
1993 dp = mtod(tmp, u_int8_t *);
1994 }
1995 /* zero out the pad */
1996 for (i= 0; i < padlen; i++) {
1997 *dp = 0;
1998 dp++;
1999 }
2000 return (0);
2001 }
2002
2003 int
2004 sctp_pad_lastmbuf(struct mbuf *m, int padval)
2005 {
2006 /* find the last mbuf in chain and pad it */
2007 struct mbuf *m_at;
2008 m_at = m;
2009 while (m_at) {
2010 if (m_at->m_next == NULL) {
2011 return (sctp_add_pad_tombuf(m_at, padval));
2012 }
2013 m_at = m_at->m_next;
2014 }
2015 return (EFAULT);
2016 }
2017
2018 static void
2019 sctp_notify_assoc_change(u_int32_t event, struct sctp_tcb *stcb,
2020 u_int32_t error)
2021 {
2022 struct mbuf *m_notify;
2023 struct sctp_assoc_change *sac;
2024 const struct sockaddr *to;
2025 struct sockaddr_in6 sin6, lsa6;
2026
2027 #ifdef SCTP_DEBUG
2028 printf("notify: %d\n", event);
2029 #endif
2030 /*
2031 * First if we are are going down dump everything we
2032 * can to the socket rcv queue.
2033 */
2034 if ((event == SCTP_SHUTDOWN_COMP) || (event == SCTP_COMM_LOST)) {
2035 sctp_deliver_data(stcb, &stcb->asoc, NULL, 0);
2036 }
2037
2038 /*
2039 * For TCP model AND UDP connected sockets we will send
2040 * an error up when an ABORT comes in.
2041 */
2042 if (((stcb->sctp_ep->sctp_flags & SCTP_PCB_FLAGS_TCPTYPE) ||
2043 (stcb->sctp_ep->sctp_flags & SCTP_PCB_FLAGS_IN_TCPPOOL)) &&
2044 (event == SCTP_COMM_LOST)) {
2045 stcb->sctp_socket->so_error = ECONNRESET;
2046 /* Wake ANY sleepers */
2047 sowwakeup(stcb->sctp_socket);
2048 sorwakeup(stcb->sctp_socket);
2049 }
2050 #if 0
2051 if ((event == SCTP_COMM_UP) &&
2052 (stcb->sctp_ep->sctp_flags & SCTP_PCB_FLAGS_TCPTYPE) &&
2053 (stcb->sctp_ep->sctp_flags & SCTP_PCB_FLAGS_CONNECTED)) {
2054 soisconnected(stcb->sctp_socket);
2055 }
2056 #endif
2057 if (!(stcb->sctp_ep->sctp_flags & SCTP_PCB_FLAGS_RECVASSOCEVNT)) {
2058 /* event not enabled */
2059 return;
2060 }
2061 MGETHDR(m_notify, M_DONTWAIT, MT_DATA);
2062 if (m_notify == NULL)
2063 /* no space left */
2064 return;
2065 m_notify->m_len = 0;
2066
2067 sac = mtod(m_notify, struct sctp_assoc_change *);
2068 sac->sac_type = SCTP_ASSOC_CHANGE;
2069 sac->sac_flags = 0;
2070 sac->sac_length = sizeof(struct sctp_assoc_change);
2071 sac->sac_state = event;
2072 sac->sac_error = error;
2073 /* XXX verify these stream counts */
2074 sac->sac_outbound_streams = stcb->asoc.streamoutcnt;
2075 sac->sac_inbound_streams = stcb->asoc.streamincnt;
2076 sac->sac_assoc_id = sctp_get_associd(stcb);
2077
2078 m_notify->m_flags |= M_EOR | M_NOTIFICATION;
2079 m_notify->m_pkthdr.len = sizeof(struct sctp_assoc_change);
2080 m_notify->m_pkthdr.rcvif = 0;
2081 m_notify->m_len = sizeof(struct sctp_assoc_change);
2082 m_notify->m_next = NULL;
2083
2084 /* append to socket */
2085 to = rtcache_getdst(&stcb->asoc.primary_destination->ro);
2086 if ((stcb->sctp_ep->sctp_flags & SCTP_PCB_FLAGS_NEEDS_MAPPED_V4) &&
2087 to->sa_family == AF_INET) {
2088 const struct sockaddr_in *sin;
2089
2090 sin = (const struct sockaddr_in *)to;
2091 in6_sin_2_v4mapsin6(sin, &sin6);
2092 to = (struct sockaddr *)&sin6;
2093 }
2094 /* check and strip embedded scope junk */
2095 to = (const struct sockaddr *)sctp_recover_scope((const struct sockaddr_in6 *)to,
2096 &lsa6);
2097 /*
2098 * We need to always notify comm changes.
2099 * if (sctp_sbspace(&stcb->sctp_socket->so_rcv) < m_notify->m_len) {
2100 * sctp_m_freem(m_notify);
2101 * return;
2102 * }
2103 */
2104 SCTP_TCB_UNLOCK(stcb);
2105 SCTP_INP_WLOCK(stcb->sctp_ep);
2106 SCTP_TCB_LOCK(stcb);
2107 if (!sbappendaddr_nocheck(&stcb->sctp_socket->so_rcv,
2108 to, m_notify, NULL, stcb->asoc.my_vtag, stcb->sctp_ep)) {
2109 /* not enough room */
2110 sctp_m_freem(m_notify);
2111 SCTP_INP_WUNLOCK(stcb->sctp_ep);
2112 return;
2113 }
2114 if (((stcb->sctp_ep->sctp_flags & SCTP_PCB_FLAGS_IN_TCPPOOL) == 0) &&
2115 ((stcb->sctp_ep->sctp_flags & SCTP_PCB_FLAGS_CONNECTED) == 0)){
2116 if (sctp_add_to_socket_q(stcb->sctp_ep, stcb)) {
2117 stcb->asoc.my_rwnd_control_len += sizeof(struct mbuf);
2118 }
2119 } else {
2120 stcb->asoc.my_rwnd_control_len += sizeof(struct mbuf);
2121 }
2122 SCTP_INP_WUNLOCK(stcb->sctp_ep);
2123 /* Wake up any sleeper */
2124 sctp_sorwakeup(stcb->sctp_ep, stcb->sctp_socket);
2125 sctp_sowwakeup(stcb->sctp_ep, stcb->sctp_socket);
2126 }
2127
2128 static void
2129 sctp_notify_peer_addr_change(struct sctp_tcb *stcb, uint32_t state,
2130 const struct sockaddr *sa, uint32_t error)
2131 {
2132 struct mbuf *m_notify;
2133 struct sctp_paddr_change *spc;
2134 const struct sockaddr *to;
2135 struct sockaddr_in6 sin6, lsa6;
2136
2137 if (!(stcb->sctp_ep->sctp_flags & SCTP_PCB_FLAGS_RECVPADDREVNT))
2138 /* event not enabled */
2139 return;
2140
2141 MGETHDR(m_notify, M_DONTWAIT, MT_DATA);
2142 if (m_notify == NULL)
2143 return;
2144 m_notify->m_len = 0;
2145
2146 MCLGET(m_notify, M_DONTWAIT);
2147 if ((m_notify->m_flags & M_EXT) != M_EXT) {
2148 sctp_m_freem(m_notify);
2149 return;
2150 }
2151
2152 spc = mtod(m_notify, struct sctp_paddr_change *);
2153 spc->spc_type = SCTP_PEER_ADDR_CHANGE;
2154 spc->spc_flags = 0;
2155 spc->spc_length = sizeof(struct sctp_paddr_change);
2156 if (sa->sa_family == AF_INET) {
2157 memcpy(&spc->spc_aaddr, sa, sizeof(struct sockaddr_in));
2158 } else {
2159 memcpy(&spc->spc_aaddr, sa, sizeof(struct sockaddr_in6));
2160 }
2161 spc->spc_state = state;
2162 spc->spc_error = error;
2163 spc->spc_assoc_id = sctp_get_associd(stcb);
2164
2165 m_notify->m_flags |= M_EOR | M_NOTIFICATION;
2166 m_notify->m_pkthdr.len = sizeof(struct sctp_paddr_change);
2167 m_notify->m_pkthdr.rcvif = 0;
2168 m_notify->m_len = sizeof(struct sctp_paddr_change);
2169 m_notify->m_next = NULL;
2170
2171 to = rtcache_getdst(&stcb->asoc.primary_destination->ro);
2172 if ((stcb->sctp_ep->sctp_flags & SCTP_PCB_FLAGS_NEEDS_MAPPED_V4) &&
2173 to->sa_family == AF_INET) {
2174 const struct sockaddr_in *sin;
2175
2176 sin = (const struct sockaddr_in *)to;
2177 in6_sin_2_v4mapsin6(sin, &sin6);
2178 to = (struct sockaddr *)&sin6;
2179 }
2180 /* check and strip embedded scope junk */
2181 to = (const struct sockaddr *)sctp_recover_scope((const struct sockaddr_in6 *)to,
2182 &lsa6);
2183
2184 if (sctp_sbspace(&stcb->sctp_socket->so_rcv) < m_notify->m_len) {
2185 sctp_m_freem(m_notify);
2186 return;
2187 }
2188 /* append to socket */
2189 SCTP_TCB_UNLOCK(stcb);
2190 SCTP_INP_WLOCK(stcb->sctp_ep);
2191 SCTP_TCB_LOCK(stcb);
2192 if (!sbappendaddr_nocheck(&stcb->sctp_socket->so_rcv, to,
2193 m_notify, NULL, stcb->asoc.my_vtag, stcb->sctp_ep)) {
2194 /* not enough room */
2195 sctp_m_freem(m_notify);
2196 SCTP_INP_WUNLOCK(stcb->sctp_ep);
2197 return;
2198 }
2199 if (((stcb->sctp_ep->sctp_flags & SCTP_PCB_FLAGS_IN_TCPPOOL) == 0) &&
2200 ((stcb->sctp_ep->sctp_flags & SCTP_PCB_FLAGS_CONNECTED) == 0)){
2201 if (sctp_add_to_socket_q(stcb->sctp_ep, stcb)) {
2202 stcb->asoc.my_rwnd_control_len += sizeof(struct mbuf);
2203 }
2204 } else {
2205 stcb->asoc.my_rwnd_control_len += sizeof(struct mbuf);
2206 }
2207 SCTP_INP_WUNLOCK(stcb->sctp_ep);
2208 sctp_sorwakeup(stcb->sctp_ep, stcb->sctp_socket);
2209 }
2210
2211
2212 static void
2213 sctp_notify_send_failed(struct sctp_tcb *stcb, u_int32_t error,
2214 struct sctp_tmit_chunk *chk)
2215 {
2216 struct mbuf *m_notify;
2217 struct sctp_send_failed *ssf;
2218 struct sockaddr_in6 sin6, lsa6;
2219 const struct sockaddr *to;
2220 int length;
2221
2222 if (!(stcb->sctp_ep->sctp_flags & SCTP_PCB_FLAGS_RECVSENDFAILEVNT))
2223 /* event not enabled */
2224 return;
2225
2226 length = sizeof(struct sctp_send_failed) + chk->send_size;
2227 MGETHDR(m_notify, M_DONTWAIT, MT_DATA);
2228 if (m_notify == NULL)
2229 /* no space left */
2230 return;
2231 m_notify->m_len = 0;
2232 ssf = mtod(m_notify, struct sctp_send_failed *);
2233 ssf->ssf_type = SCTP_SEND_FAILED;
2234 if (error == SCTP_NOTIFY_DATAGRAM_UNSENT)
2235 ssf->ssf_flags = SCTP_DATA_UNSENT;
2236 else
2237 ssf->ssf_flags = SCTP_DATA_SENT;
2238 ssf->ssf_length = length;
2239 ssf->ssf_error = error;
2240 /* not exactly what the user sent in, but should be close :) */
2241 ssf->ssf_info.sinfo_stream = chk->rec.data.stream_number;
2242 ssf->ssf_info.sinfo_ssn = chk->rec.data.stream_seq;
2243 ssf->ssf_info.sinfo_flags = chk->rec.data.rcv_flags;
2244 ssf->ssf_info.sinfo_ppid = chk->rec.data.payloadtype;
2245 ssf->ssf_info.sinfo_context = chk->rec.data.context;
2246 ssf->ssf_info.sinfo_assoc_id = sctp_get_associd(stcb);
2247 ssf->ssf_assoc_id = sctp_get_associd(stcb);
2248 m_notify->m_next = chk->data;
2249 if (m_notify->m_next == NULL)
2250 m_notify->m_flags |= M_EOR | M_NOTIFICATION;
2251 else {
2252 struct mbuf *m;
2253 m_notify->m_flags |= M_NOTIFICATION;
2254 m = m_notify;
2255 while (m->m_next != NULL)
2256 m = m->m_next;
2257 m->m_flags |= M_EOR;
2258 }
2259 m_notify->m_pkthdr.len = length;
2260 m_notify->m_pkthdr.rcvif = 0;
2261 m_notify->m_len = sizeof(struct sctp_send_failed);
2262
2263 /* Steal off the mbuf */
2264 chk->data = NULL;
2265 to = rtcache_getdst(&stcb->asoc.primary_destination->ro);
2266 if ((stcb->sctp_ep->sctp_flags & SCTP_PCB_FLAGS_NEEDS_MAPPED_V4) &&
2267 to->sa_family == AF_INET) {
2268 const struct sockaddr_in *sin;
2269
2270 sin = satocsin(to);
2271 in6_sin_2_v4mapsin6(sin, &sin6);
2272 to = (struct sockaddr *)&sin6;
2273 }
2274 /* check and strip embedded scope junk */
2275 to = (const struct sockaddr *)sctp_recover_scope((const struct sockaddr_in6 *)to,
2276 &lsa6);
2277
2278 if (sctp_sbspace(&stcb->sctp_socket->so_rcv) < m_notify->m_len) {
2279 sctp_m_freem(m_notify);
2280 return;
2281 }
2282
2283 /* append to socket */
2284 SCTP_TCB_UNLOCK(stcb);
2285 SCTP_INP_WLOCK(stcb->sctp_ep);
2286 SCTP_TCB_LOCK(stcb);
2287 if (!sbappendaddr_nocheck(&stcb->sctp_socket->so_rcv, to,
2288 m_notify, NULL, stcb->asoc.my_vtag, stcb->sctp_ep)) {
2289 /* not enough room */
2290 sctp_m_freem(m_notify);
2291 SCTP_INP_WUNLOCK(stcb->sctp_ep);
2292 return;
2293 }
2294 if (((stcb->sctp_ep->sctp_flags & SCTP_PCB_FLAGS_IN_TCPPOOL) == 0) &&
2295 ((stcb->sctp_ep->sctp_flags & SCTP_PCB_FLAGS_CONNECTED) == 0)){
2296 if (sctp_add_to_socket_q(stcb->sctp_ep, stcb)) {
2297 stcb->asoc.my_rwnd_control_len += sizeof(struct mbuf);
2298 }
2299 } else {
2300 stcb->asoc.my_rwnd_control_len += sizeof(struct mbuf);
2301 }
2302 SCTP_INP_WUNLOCK(stcb->sctp_ep);
2303 sctp_sorwakeup(stcb->sctp_ep, stcb->sctp_socket);
2304 }
2305
2306 static void
2307 sctp_notify_adaption_layer(struct sctp_tcb *stcb,
2308 u_int32_t error)
2309 {
2310 struct mbuf *m_notify;
2311 struct sctp_adaption_event *sai;
2312 struct sockaddr_in6 sin6, lsa6;
2313 const struct sockaddr *to;
2314
2315 if (!(stcb->sctp_ep->sctp_flags & SCTP_PCB_FLAGS_ADAPTIONEVNT))
2316 /* event not enabled */
2317 return;
2318
2319 MGETHDR(m_notify, M_DONTWAIT, MT_DATA);
2320 if (m_notify == NULL)
2321 /* no space left */
2322 return;
2323 m_notify->m_len = 0;
2324 sai = mtod(m_notify, struct sctp_adaption_event *);
2325 sai->sai_type = SCTP_ADAPTION_INDICATION;
2326 sai->sai_flags = 0;
2327 sai->sai_length = sizeof(struct sctp_adaption_event);
2328 sai->sai_adaption_ind = error;
2329 sai->sai_assoc_id = sctp_get_associd(stcb);
2330
2331 m_notify->m_flags |= M_EOR | M_NOTIFICATION;
2332 m_notify->m_pkthdr.len = sizeof(struct sctp_adaption_event);
2333 m_notify->m_pkthdr.rcvif = 0;
2334 m_notify->m_len = sizeof(struct sctp_adaption_event);
2335 m_notify->m_next = NULL;
2336
2337 to = rtcache_getdst(&stcb->asoc.primary_destination->ro);
2338 if ((stcb->sctp_ep->sctp_flags & SCTP_PCB_FLAGS_NEEDS_MAPPED_V4) &&
2339 (to->sa_family == AF_INET)) {
2340 const struct sockaddr_in *sin;
2341
2342 sin = satocsin(to);
2343 in6_sin_2_v4mapsin6(sin, &sin6);
2344 to = (struct sockaddr *)&sin6;
2345 }
2346 /* check and strip embedded scope junk */
2347 to = (const struct sockaddr *)sctp_recover_scope((const struct sockaddr_in6 *)to,
2348 &lsa6);
2349 if (sctp_sbspace(&stcb->sctp_socket->so_rcv) < m_notify->m_len) {
2350 sctp_m_freem(m_notify);
2351 return;
2352 }
2353 /* append to socket */
2354 SCTP_TCB_UNLOCK(stcb);
2355 SCTP_INP_WLOCK(stcb->sctp_ep);
2356 SCTP_TCB_LOCK(stcb);
2357 if (!sbappendaddr_nocheck(&stcb->sctp_socket->so_rcv, to,
2358 m_notify, NULL, stcb->asoc.my_vtag, stcb->sctp_ep)) {
2359 /* not enough room */
2360 sctp_m_freem(m_notify);
2361 SCTP_INP_WUNLOCK(stcb->sctp_ep);
2362 return;
2363 }
2364 if (((stcb->sctp_ep->sctp_flags & SCTP_PCB_FLAGS_IN_TCPPOOL) == 0) &&
2365 ((stcb->sctp_ep->sctp_flags & SCTP_PCB_FLAGS_CONNECTED) == 0)){
2366 if (sctp_add_to_socket_q(stcb->sctp_ep, stcb)) {
2367 stcb->asoc.my_rwnd_control_len += sizeof(struct mbuf);
2368 }
2369 } else {
2370 stcb->asoc.my_rwnd_control_len += sizeof(struct mbuf);
2371 }
2372 SCTP_INP_WUNLOCK(stcb->sctp_ep);
2373 sctp_sorwakeup(stcb->sctp_ep, stcb->sctp_socket);
2374 }
2375
2376 static void
2377 sctp_notify_partial_delivery_indication(struct sctp_tcb *stcb,
2378 u_int32_t error)
2379 {
2380 struct mbuf *m_notify;
2381 struct sctp_pdapi_event *pdapi;
2382 struct sockaddr_in6 sin6, lsa6;
2383 const struct sockaddr *to;
2384
2385 if (!(stcb->sctp_ep->sctp_flags & SCTP_PCB_FLAGS_PDAPIEVNT))
2386 /* event not enabled */
2387 return;
2388
2389 MGETHDR(m_notify, M_DONTWAIT, MT_DATA);
2390 if (m_notify == NULL)
2391 /* no space left */
2392 return;
2393 m_notify->m_len = 0;
2394 pdapi = mtod(m_notify, struct sctp_pdapi_event *);
2395 pdapi->pdapi_type = SCTP_PARTIAL_DELIVERY_EVENT;
2396 pdapi->pdapi_flags = 0;
2397 pdapi->pdapi_length = sizeof(struct sctp_pdapi_event);
2398 pdapi->pdapi_indication = error;
2399 pdapi->pdapi_assoc_id = sctp_get_associd(stcb);
2400
2401 m_notify->m_flags |= M_EOR | M_NOTIFICATION;
2402 m_notify->m_pkthdr.len = sizeof(struct sctp_pdapi_event);
2403 m_notify->m_pkthdr.rcvif = 0;
2404 m_notify->m_len = sizeof(struct sctp_pdapi_event);
2405 m_notify->m_next = NULL;
2406
2407 to = rtcache_getdst(&stcb->asoc.primary_destination->ro);
2408 if ((stcb->sctp_ep->sctp_flags & SCTP_PCB_FLAGS_NEEDS_MAPPED_V4) &&
2409 (to->sa_family == AF_INET)) {
2410 const struct sockaddr_in *sin;
2411
2412 sin = satocsin(to);
2413 in6_sin_2_v4mapsin6(sin, &sin6);
2414 to = (struct sockaddr *)&sin6;
2415 }
2416 /* check and strip embedded scope junk */
2417 to = (const struct sockaddr *)sctp_recover_scope((const struct sockaddr_in6 *)to,
2418 &lsa6);
2419 if (sctp_sbspace(&stcb->sctp_socket->so_rcv) < m_notify->m_len) {
2420 sctp_m_freem(m_notify);
2421 return;
2422 }
2423 /* append to socket */
2424 SCTP_TCB_UNLOCK(stcb);
2425 SCTP_INP_WLOCK(stcb->sctp_ep);
2426 SCTP_TCB_LOCK(stcb);
2427 if (!sbappendaddr_nocheck(&stcb->sctp_socket->so_rcv, to,
2428 m_notify, NULL, stcb->asoc.my_vtag, stcb->sctp_ep)) {
2429 /* not enough room */
2430 sctp_m_freem(m_notify);
2431 SCTP_INP_WUNLOCK(stcb->sctp_ep);
2432 return;
2433 }
2434 if (((stcb->sctp_ep->sctp_flags & SCTP_PCB_FLAGS_IN_TCPPOOL) == 0) &&
2435 ((stcb->sctp_ep->sctp_flags & SCTP_PCB_FLAGS_CONNECTED) == 0)){
2436 if (sctp_add_to_socket_q(stcb->sctp_ep, stcb)) {
2437 stcb->asoc.my_rwnd_control_len += sizeof(struct mbuf);
2438 }
2439 } else {
2440 stcb->asoc.my_rwnd_control_len += sizeof(struct mbuf);
2441 }
2442 SCTP_INP_WUNLOCK(stcb->sctp_ep);
2443 sctp_sorwakeup(stcb->sctp_ep, stcb->sctp_socket);
2444 }
2445
2446 static void
2447 sctp_notify_shutdown_event(struct sctp_tcb *stcb)
2448 {
2449 struct mbuf *m_notify;
2450 struct sctp_shutdown_event *sse;
2451 struct sockaddr_in6 sin6, lsa6;
2452 const struct sockaddr *to;
2453
2454 /*
2455 * For TCP model AND UDP connected sockets we will send
2456 * an error up when an SHUTDOWN completes
2457 */
2458 if ((stcb->sctp_ep->sctp_flags & SCTP_PCB_FLAGS_TCPTYPE) ||
2459 (stcb->sctp_ep->sctp_flags & SCTP_PCB_FLAGS_IN_TCPPOOL)) {
2460 /* mark socket closed for read/write and wakeup! */
2461 socantrcvmore(stcb->sctp_socket);
2462 socantsendmore(stcb->sctp_socket);
2463 }
2464
2465 if (!(stcb->sctp_ep->sctp_flags & SCTP_PCB_FLAGS_RECVSHUTDOWNEVNT))
2466 /* event not enabled */
2467 return;
2468
2469 MGETHDR(m_notify, M_DONTWAIT, MT_DATA);
2470 if (m_notify == NULL)
2471 /* no space left */
2472 return;
2473 m_notify->m_len = 0;
2474 sse = mtod(m_notify, struct sctp_shutdown_event *);
2475 sse->sse_type = SCTP_SHUTDOWN_EVENT;
2476 sse->sse_flags = 0;
2477 sse->sse_length = sizeof(struct sctp_shutdown_event);
2478 sse->sse_assoc_id = sctp_get_associd(stcb);
2479
2480 m_notify->m_flags |= M_EOR | M_NOTIFICATION;
2481 m_notify->m_pkthdr.len = sizeof(struct sctp_shutdown_event);
2482 m_notify->m_pkthdr.rcvif = 0;
2483 m_notify->m_len = sizeof(struct sctp_shutdown_event);
2484 m_notify->m_next = NULL;
2485
2486 to = rtcache_getdst(&stcb->asoc.primary_destination->ro);
2487 if ((stcb->sctp_ep->sctp_flags & SCTP_PCB_FLAGS_NEEDS_MAPPED_V4) &&
2488 to->sa_family == AF_INET) {
2489 const struct sockaddr_in *sin;
2490
2491 sin = satocsin(to);
2492 in6_sin_2_v4mapsin6(sin, &sin6);
2493 to = (struct sockaddr *)&sin6;
2494 }
2495 /* check and strip embedded scope junk */
2496 to = (const struct sockaddr *)sctp_recover_scope((const struct sockaddr_in6 *)to,
2497 &lsa6);
2498 if (sctp_sbspace(&stcb->sctp_socket->so_rcv) < m_notify->m_len) {
2499 sctp_m_freem(m_notify);
2500 return;
2501 }
2502 /* append to socket */
2503 SCTP_TCB_UNLOCK(stcb);
2504 SCTP_INP_WLOCK(stcb->sctp_ep);
2505 SCTP_TCB_LOCK(stcb);
2506 if (!sbappendaddr_nocheck(&stcb->sctp_socket->so_rcv, to,
2507 m_notify, NULL, stcb->asoc.my_vtag, stcb->sctp_ep)) {
2508 /* not enough room */
2509 sctp_m_freem(m_notify);
2510 SCTP_INP_WUNLOCK(stcb->sctp_ep);
2511 return;
2512 }
2513 if (((stcb->sctp_ep->sctp_flags & SCTP_PCB_FLAGS_IN_TCPPOOL) == 0) &&
2514 ((stcb->sctp_ep->sctp_flags & SCTP_PCB_FLAGS_CONNECTED) == 0)){
2515 if (sctp_add_to_socket_q(stcb->sctp_ep, stcb)) {
2516 stcb->asoc.my_rwnd_control_len += sizeof(struct mbuf);
2517 }
2518 } else {
2519 stcb->asoc.my_rwnd_control_len += sizeof(struct mbuf);
2520 }
2521 SCTP_INP_WUNLOCK(stcb->sctp_ep);
2522 sctp_sorwakeup(stcb->sctp_ep, stcb->sctp_socket);
2523 }
2524
2525 static void
2526 sctp_notify_stream_reset(struct sctp_tcb *stcb,
2527 int number_entries, uint16_t *list, int flag)
2528 {
2529 struct mbuf *m_notify;
2530 struct sctp_stream_reset_event *strreset;
2531 struct sockaddr_in6 sin6, lsa6;
2532 const struct sockaddr *to;
2533 int len;
2534
2535 if (!(stcb->sctp_ep->sctp_flags & SCTP_PCB_FLAGS_STREAM_RESETEVNT))
2536 /* event not enabled */
2537 return;
2538
2539 MGETHDR(m_notify, M_DONTWAIT, MT_DATA);
2540 if (m_notify == NULL)
2541 /* no space left */
2542 return;
2543 m_notify->m_len = 0;
2544 len = sizeof(struct sctp_stream_reset_event) + (number_entries * sizeof(uint16_t));
2545 if (len > M_TRAILINGSPACE(m_notify)) {
2546 MCLGET(m_notify, M_WAIT);
2547 }
2548 if (m_notify == NULL)
2549 /* no clusters */
2550 return;
2551
2552 if (len > M_TRAILINGSPACE(m_notify)) {
2553 /* never enough room */
2554 m_freem(m_notify);
2555 return;
2556 }
2557 strreset = mtod(m_notify, struct sctp_stream_reset_event *);
2558 strreset->strreset_type = SCTP_STREAM_RESET_EVENT;
2559 if (number_entries == 0) {
2560 strreset->strreset_flags = flag | SCTP_STRRESET_ALL_STREAMS;
2561 } else {
2562 strreset->strreset_flags = flag | SCTP_STRRESET_STREAM_LIST;
2563 }
2564 strreset->strreset_length = len;
2565 strreset->strreset_assoc_id = sctp_get_associd(stcb);
2566 if (number_entries) {
2567 int i;
2568 for (i=0; i<number_entries; i++) {
2569 strreset->strreset_list[i] = list[i];
2570 }
2571 }
2572 m_notify->m_flags |= M_EOR | M_NOTIFICATION;
2573 m_notify->m_pkthdr.len = len;
2574 m_notify->m_pkthdr.rcvif = 0;
2575 m_notify->m_len = len;
2576 m_notify->m_next = NULL;
2577 if (sctp_sbspace(&stcb->sctp_socket->so_rcv) < m_notify->m_len) {
2578 /* no space */
2579 sctp_m_freem(m_notify);
2580 return;
2581 }
2582 to = rtcache_getdst(&stcb->asoc.primary_destination->ro);
2583 if ((stcb->sctp_ep->sctp_flags & SCTP_PCB_FLAGS_NEEDS_MAPPED_V4) &&
2584 to->sa_family == AF_INET) {
2585 const struct sockaddr_in *sin;
2586
2587 sin = satocsin(to);
2588 in6_sin_2_v4mapsin6(sin, &sin6);
2589 to = (struct sockaddr *)&sin6;
2590 }
2591 /* check and strip embedded scope junk */
2592 to = (const struct sockaddr *) sctp_recover_scope((const struct sockaddr_in6 *)to,
2593 &lsa6);
2594 /* append to socket */
2595 SCTP_TCB_UNLOCK(stcb);
2596 SCTP_INP_WLOCK(stcb->sctp_ep);
2597 SCTP_TCB_LOCK(stcb);
2598 if (!sbappendaddr_nocheck(&stcb->sctp_socket->so_rcv, to,
2599 m_notify, NULL, stcb->asoc.my_vtag, stcb->sctp_ep)) {
2600 /* not enough room */
2601 sctp_m_freem(m_notify);
2602 SCTP_INP_WUNLOCK(stcb->sctp_ep);
2603 return;
2604 }
2605 if (((stcb->sctp_ep->sctp_flags & SCTP_PCB_FLAGS_IN_TCPPOOL) == 0) &&
2606 ((stcb->sctp_ep->sctp_flags & SCTP_PCB_FLAGS_CONNECTED) == 0)){
2607 if (sctp_add_to_socket_q(stcb->sctp_ep, stcb)) {
2608 stcb->asoc.my_rwnd_control_len += sizeof(struct mbuf);
2609 }
2610 } else {
2611 stcb->asoc.my_rwnd_control_len += sizeof(struct mbuf);
2612 }
2613 SCTP_INP_WUNLOCK(stcb->sctp_ep);
2614 sctp_sorwakeup(stcb->sctp_ep, stcb->sctp_socket);
2615 }
2616
2617
2618 void
2619 sctp_ulp_notify(u_int32_t notification, struct sctp_tcb *stcb,
2620 u_int32_t error, void *data)
2621 {
2622 if (stcb->sctp_ep->sctp_flags & SCTP_PCB_FLAGS_SOCKET_GONE) {
2623 /* No notifications up when we are in a no socket state */
2624 return;
2625 }
2626 if (stcb->asoc.state & SCTP_STATE_CLOSED_SOCKET) {
2627 /* Can't send up to a closed socket any notifications */
2628 return;
2629 }
2630 switch (notification) {
2631 case SCTP_NOTIFY_ASSOC_UP:
2632 sctp_notify_assoc_change(SCTP_COMM_UP, stcb, error);
2633 break;
2634 case SCTP_NOTIFY_ASSOC_DOWN:
2635 sctp_notify_assoc_change(SCTP_SHUTDOWN_COMP, stcb, error);
2636 break;
2637 case SCTP_NOTIFY_INTERFACE_DOWN:
2638 {
2639 struct sctp_nets *net;
2640 net = (struct sctp_nets *)data;
2641 sctp_notify_peer_addr_change(stcb, SCTP_ADDR_UNREACHABLE,
2642 rtcache_getdst(&net->ro), error);
2643 break;
2644 }
2645 case SCTP_NOTIFY_INTERFACE_UP:
2646 {
2647 struct sctp_nets *net;
2648 net = (struct sctp_nets *)data;
2649 sctp_notify_peer_addr_change(stcb, SCTP_ADDR_AVAILABLE,
2650 rtcache_getdst(&net->ro), error);
2651 break;
2652 }
2653 case SCTP_NOTIFY_INTERFACE_CONFIRMED:
2654 {
2655 struct sctp_nets *net;
2656 net = (struct sctp_nets *)data;
2657 sctp_notify_peer_addr_change(stcb, SCTP_ADDR_CONFIRMED,
2658 rtcache_getdst(&net->ro), error);
2659 break;
2660 }
2661 case SCTP_NOTIFY_DG_FAIL:
2662 sctp_notify_send_failed(stcb, error,
2663 (struct sctp_tmit_chunk *)data);
2664 break;
2665 case SCTP_NOTIFY_ADAPTION_INDICATION:
2666 /* Here the error is the adaption indication */
2667 sctp_notify_adaption_layer(stcb, error);
2668 break;
2669 case SCTP_NOTIFY_PARTIAL_DELVIERY_INDICATION:
2670 sctp_notify_partial_delivery_indication(stcb, error);
2671 break;
2672 case SCTP_NOTIFY_STRDATA_ERR:
2673 break;
2674 case SCTP_NOTIFY_ASSOC_ABORTED:
2675 sctp_notify_assoc_change(SCTP_COMM_LOST, stcb, error);
2676 break;
2677 case SCTP_NOTIFY_PEER_OPENED_STREAM:
2678 break;
2679 case SCTP_NOTIFY_STREAM_OPENED_OK:
2680 break;
2681 case SCTP_NOTIFY_ASSOC_RESTART:
2682 sctp_notify_assoc_change(SCTP_RESTART, stcb, error);
2683 break;
2684 case SCTP_NOTIFY_HB_RESP:
2685 break;
2686 case SCTP_NOTIFY_STR_RESET_SEND:
2687 sctp_notify_stream_reset(stcb, error, ((uint16_t *)data), SCTP_STRRESET_OUTBOUND_STR);
2688 break;
2689 case SCTP_NOTIFY_STR_RESET_RECV:
2690 sctp_notify_stream_reset(stcb, error, ((uint16_t *)data), SCTP_STRRESET_INBOUND_STR);
2691 break;
2692 case SCTP_NOTIFY_ASCONF_ADD_IP:
2693 sctp_notify_peer_addr_change(stcb, SCTP_ADDR_ADDED, data,
2694 error);
2695 break;
2696 case SCTP_NOTIFY_ASCONF_DELETE_IP:
2697 sctp_notify_peer_addr_change(stcb, SCTP_ADDR_REMOVED, data,
2698 error);
2699 break;
2700 case SCTP_NOTIFY_ASCONF_SET_PRIMARY:
2701 sctp_notify_peer_addr_change(stcb, SCTP_ADDR_MADE_PRIM, data,
2702 error);
2703 break;
2704 case SCTP_NOTIFY_ASCONF_SUCCESS:
2705 break;
2706 case SCTP_NOTIFY_ASCONF_FAILED:
2707 break;
2708 case SCTP_NOTIFY_PEER_SHUTDOWN:
2709 sctp_notify_shutdown_event(stcb);
2710 break;
2711 default:
2712 #ifdef SCTP_DEBUG
2713 if (sctp_debug_on & SCTP_DEBUG_UTIL1) {
2714 printf("NOTIFY: unknown notification %xh (%u)\n",
2715 notification, notification);
2716 }
2717 #endif /* SCTP_DEBUG */
2718 break;
2719 } /* end switch */
2720 }
2721
2722 void
2723 sctp_report_all_outbound(struct sctp_tcb *stcb)
2724 {
2725 struct sctp_association *asoc;
2726 struct sctp_stream_out *outs;
2727 struct sctp_tmit_chunk *chk;
2728
2729 asoc = &stcb->asoc;
2730
2731 if (stcb->sctp_ep->sctp_flags & SCTP_PCB_FLAGS_SOCKET_GONE) {
2732 return;
2733 }
2734 /* now through all the gunk freeing chunks */
2735 TAILQ_FOREACH(outs, &asoc->out_wheel, next_spoke) {
2736 /* now clean up any chunks here */
2737 chk = TAILQ_FIRST(&outs->outqueue);
2738 while (chk) {
2739 stcb->asoc.stream_queue_cnt--;
2740 TAILQ_REMOVE(&outs->outqueue, chk, sctp_next);
2741 sctp_ulp_notify(SCTP_NOTIFY_DG_FAIL, stcb,
2742 SCTP_NOTIFY_DATAGRAM_UNSENT, chk);
2743 if (chk->data) {
2744 sctp_m_freem(chk->data);
2745 chk->data = NULL;
2746 }
2747 if (chk->whoTo)
2748 sctp_free_remote_addr(chk->whoTo);
2749 chk->whoTo = NULL;
2750 chk->asoc = NULL;
2751 /* Free the chunk */
2752 SCTP_ZONE_FREE(sctppcbinfo.ipi_zone_chunk, chk);
2753 sctppcbinfo.ipi_count_chunk--;
2754 if ((int)sctppcbinfo.ipi_count_chunk < 0) {
2755 panic("Chunk count is negative");
2756 }
2757 sctppcbinfo.ipi_gencnt_chunk++;
2758 chk = TAILQ_FIRST(&outs->outqueue);
2759 }
2760 }
2761 /* pending send queue SHOULD be empty */
2762 if (!TAILQ_EMPTY(&asoc->send_queue)) {
2763 chk = TAILQ_FIRST(&asoc->send_queue);
2764 while (chk) {
2765 TAILQ_REMOVE(&asoc->send_queue, chk, sctp_next);
2766 sctp_ulp_notify(SCTP_NOTIFY_DG_FAIL, stcb, SCTP_NOTIFY_DATAGRAM_UNSENT, chk);
2767 if (chk->data) {
2768 sctp_m_freem(chk->data);
2769 chk->data = NULL;
2770 }
2771 if (chk->whoTo)
2772 sctp_free_remote_addr(chk->whoTo);
2773 chk->whoTo = NULL;
2774 SCTP_ZONE_FREE(sctppcbinfo.ipi_zone_chunk, chk);
2775 sctppcbinfo.ipi_count_chunk--;
2776 if ((int)sctppcbinfo.ipi_count_chunk < 0) {
2777 panic("Chunk count is negative");
2778 }
2779 sctppcbinfo.ipi_gencnt_chunk++;
2780 chk = TAILQ_FIRST(&asoc->send_queue);
2781 }
2782 }
2783 /* sent queue SHOULD be empty */
2784 if (!TAILQ_EMPTY(&asoc->sent_queue)) {
2785 chk = TAILQ_FIRST(&asoc->sent_queue);
2786 while (chk) {
2787 TAILQ_REMOVE(&asoc->sent_queue, chk, sctp_next);
2788 sctp_ulp_notify(SCTP_NOTIFY_DG_FAIL, stcb,
2789 SCTP_NOTIFY_DATAGRAM_SENT, chk);
2790 if (chk->data) {
2791 sctp_m_freem(chk->data);
2792 chk->data = NULL;
2793 }
2794 if (chk->whoTo)
2795 sctp_free_remote_addr(chk->whoTo);
2796 chk->whoTo = NULL;
2797 SCTP_ZONE_FREE(sctppcbinfo.ipi_zone_chunk, chk);
2798 sctppcbinfo.ipi_count_chunk--;
2799 if ((int)sctppcbinfo.ipi_count_chunk < 0) {
2800 panic("Chunk count is negative");
2801 }
2802 sctppcbinfo.ipi_gencnt_chunk++;
2803 chk = TAILQ_FIRST(&asoc->sent_queue);
2804 }
2805 }
2806 }
2807
2808 void
2809 sctp_abort_notification(struct sctp_tcb *stcb, int error)
2810 {
2811
2812 if (stcb->sctp_ep->sctp_flags & SCTP_PCB_FLAGS_SOCKET_GONE) {
2813 return;
2814 }
2815 /* Tell them we lost the asoc */
2816 sctp_report_all_outbound(stcb);
2817 sctp_ulp_notify(SCTP_NOTIFY_ASSOC_ABORTED, stcb, error, NULL);
2818 }
2819
2820 void
2821 sctp_abort_association(struct sctp_inpcb *inp, struct sctp_tcb *stcb,
2822 struct mbuf *m, int iphlen, struct sctphdr *sh, struct mbuf *op_err)
2823 {
2824 u_int32_t vtag;
2825
2826 vtag = 0;
2827 if (stcb != NULL) {
2828 /* We have a TCB to abort, send notification too */
2829 vtag = stcb->asoc.peer_vtag;
2830 sctp_abort_notification(stcb, 0);
2831 }
2832 sctp_send_abort(m, iphlen, sh, vtag, op_err);
2833 if (stcb != NULL) {
2834 /* Ok, now lets free it */
2835 sctp_free_assoc(inp, stcb);
2836 } else {
2837 if (inp->sctp_flags & SCTP_PCB_FLAGS_SOCKET_GONE) {
2838 if (LIST_FIRST(&inp->sctp_asoc_list) == NULL) {
2839 sctp_inpcb_free(inp, 1);
2840 }
2841 }
2842 }
2843 }
2844
2845 void
2846 sctp_abort_an_association(struct sctp_inpcb *inp, struct sctp_tcb *stcb,
2847 int error, struct mbuf *op_err)
2848 {
2849
2850 if (stcb == NULL) {
2851 /* Got to have a TCB */
2852 if (inp->sctp_flags & SCTP_PCB_FLAGS_SOCKET_GONE) {
2853 if (LIST_FIRST(&inp->sctp_asoc_list) == NULL) {
2854 sctp_inpcb_free(inp, 1);
2855 }
2856 }
2857 return;
2858 }
2859 /* notify the ulp */
2860 if ((inp->sctp_flags & SCTP_PCB_FLAGS_SOCKET_GONE) == 0)
2861 sctp_abort_notification(stcb, error);
2862 /* notify the peer */
2863 sctp_send_abort_tcb(stcb, op_err);
2864 /* now free the asoc */
2865 sctp_free_assoc(inp, stcb);
2866 }
2867
2868 void
2869 sctp_handle_ootb(struct mbuf *m, int iphlen, int offset, struct sctphdr *sh,
2870 struct sctp_inpcb *inp, struct mbuf *op_err)
2871 {
2872 struct sctp_chunkhdr *ch, chunk_buf;
2873 unsigned int chk_length;
2874
2875 /* Generate a TO address for future reference */
2876 if (inp && (inp->sctp_flags & SCTP_PCB_FLAGS_SOCKET_GONE)) {
2877 if (LIST_FIRST(&inp->sctp_asoc_list) == NULL) {
2878 sctp_inpcb_free(inp, 1);
2879 }
2880 }
2881 ch = (struct sctp_chunkhdr *)sctp_m_getptr(m, offset,
2882 sizeof(*ch), (u_int8_t *)&chunk_buf);
2883 while (ch != NULL) {
2884 chk_length = ntohs(ch->chunk_length);
2885 if (chk_length < sizeof(*ch)) {
2886 /* break to abort land */
2887 break;
2888 }
2889 switch (ch->chunk_type) {
2890 case SCTP_PACKET_DROPPED:
2891 /* we don't respond to pkt-dropped */
2892 return;
2893 case SCTP_ABORT_ASSOCIATION:
2894 /* we don't respond with an ABORT to an ABORT */
2895 return;
2896 case SCTP_SHUTDOWN_COMPLETE:
2897 /*
2898 * we ignore it since we are not waiting for it
2899 * and peer is gone
2900 */
2901 return;
2902 case SCTP_SHUTDOWN_ACK:
2903 sctp_send_shutdown_complete2(m, iphlen, sh);
2904 return;
2905 default:
2906 break;
2907 }
2908 offset += SCTP_SIZE32(chk_length);
2909 ch = (struct sctp_chunkhdr *)sctp_m_getptr(m, offset,
2910 sizeof(*ch), (u_int8_t *)&chunk_buf);
2911 }
2912 sctp_send_abort(m, iphlen, sh, 0, op_err);
2913 }
2914
2915 /*
2916 * check the inbound datagram to make sure there is not an abort
2917 * inside it, if there is return 1, else return 0.
2918 */
2919 int
2920 sctp_is_there_an_abort_here(struct mbuf *m, int iphlen, int *vtagfill)
2921 {
2922 struct sctp_chunkhdr *ch;
2923 struct sctp_init_chunk *init_chk, chunk_buf;
2924 int offset;
2925 unsigned int chk_length;
2926
2927 offset = iphlen + sizeof(struct sctphdr);
2928 ch = (struct sctp_chunkhdr *)sctp_m_getptr(m, offset, sizeof(*ch),
2929 (u_int8_t *)&chunk_buf);
2930 while (ch != NULL) {
2931 chk_length = ntohs(ch->chunk_length);
2932 if (chk_length < sizeof(*ch)) {
2933 /* packet is probably corrupt */
2934 break;
2935 }
2936 /* we seem to be ok, is it an abort? */
2937 if (ch->chunk_type == SCTP_ABORT_ASSOCIATION) {
2938 /* yep, tell them */
2939 return (1);
2940 }
2941 if (ch->chunk_type == SCTP_INITIATION) {
2942 /* need to update the Vtag */
2943 init_chk = (struct sctp_init_chunk *)sctp_m_getptr(m,
2944 offset, sizeof(*init_chk), (u_int8_t *)&chunk_buf);
2945 if (init_chk != NULL) {
2946 *vtagfill = ntohl(init_chk->init.initiate_tag);
2947 }
2948 }
2949 /* Nope, move to the next chunk */
2950 offset += SCTP_SIZE32(chk_length);
2951 ch = (struct sctp_chunkhdr *)sctp_m_getptr(m, offset,
2952 sizeof(*ch), (u_int8_t *)&chunk_buf);
2953 }
2954 return (0);
2955 }
2956
2957 /*
2958 * currently (2/02), ifa_addr embeds scope_id's and don't
2959 * have sin6_scope_id set (i.e. it's 0)
2960 * so, create this function to compare link local scopes
2961 */
2962 uint32_t
2963 sctp_is_same_scope(struct sockaddr_in6 *addr1, struct sockaddr_in6 *addr2)
2964 {
2965 struct sockaddr_in6 a, b;
2966
2967 /* save copies */
2968 a = *addr1;
2969 b = *addr2;
2970
2971 if (a.sin6_scope_id == 0)
2972 if (sa6_recoverscope(&a)) {
2973 /* can't get scope, so can't match */
2974 return (0);
2975 }
2976 if (b.sin6_scope_id == 0)
2977 if (sa6_recoverscope(&b)) {
2978 /* can't get scope, so can't match */
2979 return (0);
2980 }
2981 if (a.sin6_scope_id != b.sin6_scope_id)
2982 return (0);
2983
2984 return (1);
2985 }
2986
2987 /*
2988 * returns a sockaddr_in6 with embedded scope recovered and removed
2989 */
2990 const struct sockaddr_in6 *
2991 sctp_recover_scope(const struct sockaddr_in6 *addr, struct sockaddr_in6 *store)
2992 {
2993 const struct sockaddr_in6 *newaddr;
2994
2995 newaddr = addr;
2996 /* check and strip embedded scope junk */
2997 if (addr->sin6_family == AF_INET6) {
2998 if (IN6_IS_SCOPE_LINKLOCAL(&addr->sin6_addr)) {
2999 if (addr->sin6_scope_id == 0) {
3000 *store = *addr;
3001 if (sa6_recoverscope(store) == 0) {
3002 /* use the recovered scope */
3003 newaddr = store;
3004 }
3005 /* else, return the original "to" addr */
3006 }
3007 }
3008 }
3009 return (newaddr);
3010 }
3011
3012 /*
3013 * are the two addresses the same? currently a "scopeless" check
3014 * returns: 1 if same, 0 if not
3015 */
3016 int
3017 sctp_cmpaddr(const struct sockaddr *sa1, const struct sockaddr *sa2)
3018 {
3019
3020 /* must be valid */
3021 if (sa1 == NULL || sa2 == NULL)
3022 return (0);
3023
3024 /* must be the same family */
3025 if (sa1->sa_family != sa2->sa_family)
3026 return (0);
3027
3028 if (sa1->sa_family == AF_INET6) {
3029 /* IPv6 addresses */
3030 const struct sockaddr_in6 *sin6_1, *sin6_2;
3031
3032 sin6_1 = (const struct sockaddr_in6 *)sa1;
3033 sin6_2 = (const struct sockaddr_in6 *)sa2;
3034 return (SCTP6_ARE_ADDR_EQUAL(&sin6_1->sin6_addr,
3035 &sin6_2->sin6_addr));
3036 } else if (sa1->sa_family == AF_INET) {
3037 /* IPv4 addresses */
3038 const struct sockaddr_in *sin_1, *sin_2;
3039
3040 sin_1 = (const struct sockaddr_in *)sa1;
3041 sin_2 = (const struct sockaddr_in *)sa2;
3042 return (sin_1->sin_addr.s_addr == sin_2->sin_addr.s_addr);
3043 } else {
3044 /* we don't do these... */
3045 return (0);
3046 }
3047 }
3048
3049 void
3050 sctp_print_address(const struct sockaddr *sa)
3051 {
3052
3053 if (sa->sa_family == AF_INET6) {
3054 const struct sockaddr_in6 *sin6;
3055 sin6 = (const struct sockaddr_in6 *)sa;
3056 printf("IPv6 address: %s:%d scope:%u\n",
3057 ip6_sprintf(&sin6->sin6_addr), ntohs(sin6->sin6_port),
3058 sin6->sin6_scope_id);
3059 } else if (sa->sa_family == AF_INET) {
3060 const struct sockaddr_in *sin;
3061 sin = (const struct sockaddr_in *)sa;
3062 printf("IPv4 address: %s:%d\n", inet_ntoa(sin->sin_addr),
3063 ntohs(sin->sin_port));
3064 } else {
3065 printf("?\n");
3066 }
3067 }
3068
3069 void
3070 sctp_print_address_pkt(struct ip *iph, struct sctphdr *sh)
3071 {
3072 if (iph->ip_v == IPVERSION) {
3073 struct sockaddr_in lsa, fsa;
3074
3075 memset(&lsa, 0, sizeof(lsa));
3076 lsa.sin_len = sizeof(lsa);
3077 lsa.sin_family = AF_INET;
3078 lsa.sin_addr = iph->ip_src;
3079 lsa.sin_port = sh->src_port;
3080 memset(&fsa, 0, sizeof(fsa));
3081 fsa.sin_len = sizeof(fsa);
3082 fsa.sin_family = AF_INET;
3083 fsa.sin_addr = iph->ip_dst;
3084 fsa.sin_port = sh->dest_port;
3085 printf("src: ");
3086 sctp_print_address((struct sockaddr *)&lsa);
3087 printf("dest: ");
3088 sctp_print_address((struct sockaddr *)&fsa);
3089 } else if (iph->ip_v == (IPV6_VERSION >> 4)) {
3090 struct ip6_hdr *ip6;
3091 struct sockaddr_in6 lsa6, fsa6;
3092
3093 ip6 = (struct ip6_hdr *)iph;
3094 memset(&lsa6, 0, sizeof(lsa6));
3095 lsa6.sin6_len = sizeof(lsa6);
3096 lsa6.sin6_family = AF_INET6;
3097 lsa6.sin6_addr = ip6->ip6_src;
3098 lsa6.sin6_port = sh->src_port;
3099 memset(&fsa6, 0, sizeof(fsa6));
3100 fsa6.sin6_len = sizeof(fsa6);
3101 fsa6.sin6_family = AF_INET6;
3102 fsa6.sin6_addr = ip6->ip6_dst;
3103 fsa6.sin6_port = sh->dest_port;
3104 printf("src: ");
3105 sctp_print_address((struct sockaddr *)&lsa6);
3106 printf("dest: ");
3107 sctp_print_address((struct sockaddr *)&fsa6);
3108 }
3109 }
3110
3111 #if defined(__FreeBSD__) || defined(__APPLE__)
3112
3113 /* cloned from uipc_socket.c */
3114
3115 #define SCTP_SBLINKRECORD(sb, m0) do { \
3116 if ((sb)->sb_lastrecord != NULL) \
3117 (sb)->sb_lastrecord->m_nextpkt = (m0); \
3118 else \
3119 (sb)->sb_mb = (m0); \
3120 (sb)->sb_lastrecord = (m0); \
3121 } while (/*CONSTCOND*/0)
3122 #endif
3123
3124
3125 int
3126 sbappendaddr_nocheck(struct sockbuf *sb, const struct sockaddr *asa,
3127 struct mbuf *m0, struct mbuf *control,
3128 u_int32_t tag, struct sctp_inpcb *inp)
3129 {
3130 #ifdef __NetBSD__
3131 struct mbuf *m, *n;
3132
3133 if (m0 && (m0->m_flags & M_PKTHDR) == 0)
3134 panic("sbappendaddr_nocheck");
3135
3136 m0->m_pkthdr.csum_data = (int)tag;
3137
3138 for (n = control; n; n = n->m_next) {
3139 if (n->m_next == 0) /* keep pointer to last control buf */
3140 break;
3141 }
3142 if (((inp->sctp_flags & SCTP_PCB_FLAGS_TCPTYPE) == 0) ||
3143 ((inp->sctp_flags & SCTP_PCB_FLAGS_IN_TCPPOOL)== 0)) {
3144 MGETHDR(m, M_DONTWAIT, MT_SONAME);
3145 if (m == 0)
3146 return (0);
3147
3148 m->m_len = asa->sa_len;
3149 memcpy(mtod(m, void *), (const void *)asa, asa->sa_len);
3150 } else {
3151 m = NULL;
3152 }
3153 if (n) {
3154 n->m_next = m0; /* concatenate data to control */
3155 }else {
3156 control = m0;
3157 }
3158 if (m)
3159 m->m_next = control;
3160 else
3161 m = control;
3162 m->m_pkthdr.csum_data = tag;
3163
3164 for (n = m; n; n = n->m_next)
3165 sballoc(sb, n);
3166 if ((n = sb->sb_mb) != NULL) {
3167 if ((n->m_nextpkt != inp->sb_last_mpkt) && (n->m_nextpkt == NULL)) {
3168 inp->sb_last_mpkt = NULL;
3169 }
3170 if (inp->sb_last_mpkt)
3171 inp->sb_last_mpkt->m_nextpkt = m;
3172 else {
3173 while (n->m_nextpkt) {
3174 n = n->m_nextpkt;
3175 }
3176 n->m_nextpkt = m;
3177 }
3178 inp->sb_last_mpkt = m;
3179 } else {
3180 inp->sb_last_mpkt = sb->sb_mb = m;
3181 inp->sctp_vtag_first = tag;
3182 }
3183 return (1);
3184 #endif
3185 #if defined(__FreeBSD__) || defined(__APPLE__)
3186 struct mbuf *m, *n, *nlast;
3187 int cnt=0;
3188
3189 if (m0 && (m0->m_flags & M_PKTHDR) == 0)
3190 panic("sbappendaddr_nocheck");
3191
3192 for (n = control; n; n = n->m_next) {
3193 if (n->m_next == 0) /* get pointer to last control buf */
3194 break;
3195 }
3196 if (((inp->sctp_flags & SCTP_PCB_FLAGS_TCPTYPE) == 0) ||
3197 ((inp->sctp_flags & SCTP_PCB_FLAGS_IN_TCPPOOL)== 0)) {
3198 if (asa->sa_len > MHLEN)
3199 return (0);
3200 try_again:
3201 MGETHDR(m, M_DONTWAIT, MT_SONAME);
3202 if (m == 0)
3203 return (0);
3204 m->m_len = 0;
3205 /* safety */
3206 if (m == m0) {
3207 printf("Duplicate mbuf allocated %p in and mget returned %p?\n",
3208 m0, m);
3209 if (cnt) {
3210 panic("more than once");
3211 }
3212 cnt++;
3213 goto try_again;
3214 }
3215 m->m_len = asa->sa_len;
3216 bcopy((void *)asa, mtod(m, void *), asa->sa_len);
3217 }
3218 else {
3219 m = NULL;
3220 }
3221 if (n)
3222 n->m_next = m0; /* concatenate data to control */
3223 else
3224 control = m0;
3225 if (m)
3226 m->m_next = control;
3227 else
3228 m = control;
3229 m->m_pkthdr.csum_data = (int)tag;
3230
3231 for (n = m; n; n = n->m_next)
3232 sballoc(sb, n);
3233 nlast = n;
3234 if (sb->sb_mb == NULL) {
3235 inp->sctp_vtag_first = tag;
3236 }
3237
3238 #ifdef __FREEBSD__
3239 if (sb->sb_mb == NULL)
3240 inp->sctp_vtag_first = tag;
3241 SCTP_SBLINKRECORD(sb, m);
3242 sb->sb_mbtail = nlast;
3243 #else
3244 if ((n = sb->sb_mb) != NULL) {
3245 if ((n->m_nextpkt != inp->sb_last_mpkt) && (n->m_nextpkt == NULL)) {
3246 inp->sb_last_mpkt = NULL;
3247 }
3248 if (inp->sb_last_mpkt)
3249 inp->sb_last_mpkt->m_nextpkt = m;
3250 else {
3251 while (n->m_nextpkt) {
3252 n = n->m_nextpkt;
3253 }
3254 n->m_nextpkt = m;
3255 }
3256 inp->sb_last_mpkt = m;
3257 } else {
3258 inp->sb_last_mpkt = sb->sb_mb = m;
3259 inp->sctp_vtag_first = tag;
3260 }
3261 #endif
3262 return (1);
3263 #endif
3264 #ifdef __OpenBSD__
3265 struct mbuf *m, *n;
3266
3267 if (m0 && (m0->m_flags & M_PKTHDR) == 0)
3268 panic("sbappendaddr_nocheck");
3269 m0->m_pkthdr.csum = (int)tag;
3270 for (n = control; n; n = n->m_next) {
3271 if (n->m_next == 0) /* keep pointer to last control buf */
3272 break;
3273 }
3274 if (((inp->sctp_flags & SCTP_PCB_FLAGS_TCPTYPE) == 0) ||
3275 ((inp->sctp_flags & SCTP_PCB_FLAGS_IN_TCPPOOL)== 0)) {
3276 if (asa->sa_len > MHLEN)
3277 return (0);
3278 MGETHDR(m, M_DONTWAIT, MT_SONAME);
3279 if (m == 0)
3280 return (0);
3281 m->m_len = asa->sa_len;
3282 bcopy((void *)asa, mtod(m, void *), asa->sa_len);
3283 } else {
3284 m = NULL;
3285 }
3286 if (n)
3287 n->m_next = m0; /* concatenate data to control */
3288 else
3289 control = m0;
3290
3291 m->m_pkthdr.csum = (int)tag;
3292 m->m_next = control;
3293 for (n = m; n; n = n->m_next)
3294 sballoc(sb, n);
3295 if ((n = sb->sb_mb) != NULL) {
3296 if ((n->m_nextpkt != inp->sb_last_mpkt) && (n->m_nextpkt == NULL)) {
3297 inp->sb_last_mpkt = NULL;
3298 }
3299 if (inp->sb_last_mpkt)
3300 inp->sb_last_mpkt->m_nextpkt = m;
3301 else {
3302 while (n->m_nextpkt) {
3303 n = n->m_nextpkt;
3304 }
3305 n->m_nextpkt = m;
3306 }
3307 inp->sb_last_mpkt = m;
3308 } else {
3309 inp->sb_last_mpkt = sb->sb_mb = m;
3310 inp->sctp_vtag_first = tag;
3311 }
3312 return (1);
3313 #endif
3314 }
3315
3316 /*************HOLD THIS COMMENT FOR PATCH FILE OF
3317 *************ALTERNATE ROUTING CODE
3318 */
3319
3320 /*************HOLD THIS COMMENT FOR END OF PATCH FILE OF
3321 *************ALTERNATE ROUTING CODE
3322 */
3323
3324 struct mbuf *
3325 sctp_generate_invmanparam(int err)
3326 {
3327 /* Return a MBUF with a invalid mandatory parameter */
3328 struct mbuf *m;
3329
3330 MGET(m, M_DONTWAIT, MT_DATA);
3331 if (m) {
3332 struct sctp_paramhdr *ph;
3333 m->m_len = sizeof(struct sctp_paramhdr);
3334 ph = mtod(m, struct sctp_paramhdr *);
3335 ph->param_length = htons(sizeof(struct sctp_paramhdr));
3336 ph->param_type = htons(err);
3337 }
3338 return (m);
3339 }
3340
3341 static int
3342 sctp_should_be_moved(struct mbuf *this, struct sctp_association *asoc)
3343 {
3344 struct mbuf *m;
3345 /*
3346 * given a mbuf chain, look through it finding
3347 * the M_PKTHDR and return 1 if it belongs to
3348 * the association given. We tell this by
3349 * a kludge where we stuff the my_vtag of the asoc
3350 * into the m->m_pkthdr.csum_data/csum field.
3351 */
3352 m = this;
3353 while (m) {
3354 if (m->m_flags & M_PKTHDR) {
3355 /* check it */
3356 #if defined(__OpenBSD__)
3357 if ((u_int32_t)m->m_pkthdr.csum == asoc->my_vtag)
3358 #else
3359 if ((u_int32_t)m->m_pkthdr.csum_data == asoc->my_vtag)
3360 #endif
3361 {
3362 /* Yep */
3363 return (1);
3364 }
3365 }
3366 m = m->m_next;
3367 }
3368 return (0);
3369 }
3370
3371 u_int32_t
3372 sctp_get_first_vtag_from_sb(struct socket *so)
3373 {
3374 struct mbuf *this, *at;
3375 u_int32_t retval;
3376
3377 retval = 0;
3378 if (so->so_rcv.sb_mb) {
3379 /* grubbing time */
3380 this = so->so_rcv.sb_mb;
3381 while (this) {
3382 at = this;
3383 /* get to the m_pkthdr */
3384 while (at) {
3385 if (at->m_flags & M_PKTHDR)
3386 break;
3387 else {
3388 at = at->m_next;
3389 }
3390 }
3391 /* now do we have a m_pkthdr */
3392 if (at && (at->m_flags & M_PKTHDR)) {
3393 /* check it */
3394 #if defined(__OpenBSD__)
3395 if ((u_int32_t)at->m_pkthdr.csum != 0)
3396 #else
3397 if ((u_int32_t)at->m_pkthdr.csum_data != 0)
3398 #endif
3399 {
3400 /* its the one */
3401 #if defined(__OpenBSD__)
3402 retval = (u_int32_t)at->m_pkthdr.csum;
3403 #else
3404 retval =
3405 (u_int32_t)at->m_pkthdr.csum_data;
3406 #endif
3407 break;
3408 }
3409 }
3410 this = this->m_nextpkt;
3411 }
3412
3413 }
3414 return (retval);
3415
3416 }
3417 void
3418 sctp_grub_through_socket_buffer(struct sctp_inpcb *inp, struct socket *old,
3419 struct socket *new, struct sctp_tcb *stcb)
3420 {
3421 struct mbuf **put, **take, *next, *this;
3422 struct sockbuf *old_sb, *new_sb;
3423 struct sctp_association *asoc;
3424 int moved_top = 0;
3425
3426 asoc = &stcb->asoc;
3427 old_sb = &old->so_rcv;
3428 new_sb = &new->so_rcv;
3429 if (old_sb->sb_mb == NULL) {
3430 /* Nothing to move */
3431 return;
3432 }
3433
3434 if (inp->sctp_vtag_first == asoc->my_vtag) {
3435 /* First one must be moved */
3436 struct mbuf *mm;
3437 for (mm = old_sb->sb_mb; mm; mm = mm->m_next) {
3438 /*
3439 * Go down the chain and fix
3440 * the space allocation of the
3441 * two sockets.
3442 */
3443 sbfree(old_sb, mm);
3444 sballoc(new_sb, mm);
3445 }
3446 new_sb->sb_mb = old_sb->sb_mb;
3447 old_sb->sb_mb = new_sb->sb_mb->m_nextpkt;
3448 new_sb->sb_mb->m_nextpkt = NULL;
3449 put = &new_sb->sb_mb->m_nextpkt;
3450 moved_top = 1;
3451 } else {
3452 put = &new_sb->sb_mb;
3453 }
3454
3455 take = &old_sb->sb_mb;
3456 next = old_sb->sb_mb;
3457 while (next) {
3458 this = next;
3459 /* postion for next one */
3460 next = this->m_nextpkt;
3461 /* check the tag of this packet */
3462 if (sctp_should_be_moved(this, asoc)) {
3463 /* yes this needs to be moved */
3464 struct mbuf *mm;
3465 *take = this->m_nextpkt;
3466 this->m_nextpkt = NULL;
3467 *put = this;
3468 for (mm = this; mm; mm = mm->m_next) {
3469 /*
3470 * Go down the chain and fix
3471 * the space allocation of the
3472 * two sockets.
3473 */
3474 sbfree(old_sb, mm);
3475 sballoc(new_sb, mm);
3476 }
3477 put = &this->m_nextpkt;
3478
3479 } else {
3480 /* no advance our take point. */
3481 take = &this->m_nextpkt;
3482 }
3483 }
3484 if (moved_top) {
3485 /*
3486 * Ok so now we must re-postion vtag_first to
3487 * match the new first one since we moved the
3488 * mbuf at the top.
3489 */
3490 inp->sctp_vtag_first = sctp_get_first_vtag_from_sb(old);
3491 }
3492 }
3493
3494 void
3495 sctp_free_bufspace(struct sctp_tcb *stcb, struct sctp_association *asoc,
3496 struct sctp_tmit_chunk *tp1)
3497 {
3498 if (tp1->data == NULL) {
3499 return;
3500 }
3501 #ifdef SCTP_MBCNT_LOGGING
3502 sctp_log_mbcnt(SCTP_LOG_MBCNT_DECREASE,
3503 asoc->total_output_queue_size,
3504 tp1->book_size,
3505 asoc->total_output_mbuf_queue_size,
3506 tp1->mbcnt);
3507 #endif
3508 if (asoc->total_output_queue_size >= tp1->book_size) {
3509 asoc->total_output_queue_size -= tp1->book_size;
3510 } else {
3511 asoc->total_output_queue_size = 0;
3512 }
3513
3514 /* Now free the mbuf */
3515 if (asoc->total_output_mbuf_queue_size >= tp1->mbcnt) {
3516 asoc->total_output_mbuf_queue_size -= tp1->mbcnt;
3517 } else {
3518 asoc->total_output_mbuf_queue_size = 0;
3519 }
3520 if ((stcb->sctp_ep->sctp_flags & SCTP_PCB_FLAGS_TCPTYPE) ||
3521 (stcb->sctp_ep->sctp_flags & SCTP_PCB_FLAGS_IN_TCPPOOL)) {
3522 if (stcb->sctp_socket->so_snd.sb_cc >= tp1->book_size) {
3523 stcb->sctp_socket->so_snd.sb_cc -= tp1->book_size;
3524 } else {
3525 stcb->sctp_socket->so_snd.sb_cc = 0;
3526
3527 }
3528 if (stcb->sctp_socket->so_snd.sb_mbcnt >= tp1->mbcnt) {
3529 stcb->sctp_socket->so_snd.sb_mbcnt -= tp1->mbcnt;
3530 } else {
3531 stcb->sctp_socket->so_snd.sb_mbcnt = 0;
3532 }
3533 }
3534 }
3535
3536 int
3537 sctp_release_pr_sctp_chunk(struct sctp_tcb *stcb, struct sctp_tmit_chunk *tp1,
3538 int reason, struct sctpchunk_listhead *queue)
3539 {
3540 int ret_sz = 0;
3541 int notdone;
3542 uint8_t foundeom = 0;
3543
3544 do {
3545 ret_sz += tp1->book_size;
3546 tp1->sent = SCTP_FORWARD_TSN_SKIP;
3547 if (tp1->data) {
3548 sctp_free_bufspace(stcb, &stcb->asoc, tp1);
3549 sctp_ulp_notify(SCTP_NOTIFY_DG_FAIL, stcb, reason, tp1);
3550 sctp_m_freem(tp1->data);
3551 tp1->data = NULL;
3552 sctp_sowwakeup(stcb->sctp_ep, stcb->sctp_socket);
3553 }
3554 if (tp1->flags & SCTP_PR_SCTP_BUFFER) {
3555 stcb->asoc.sent_queue_cnt_removeable--;
3556 }
3557 if (queue == &stcb->asoc.send_queue) {
3558 TAILQ_REMOVE(&stcb->asoc.send_queue, tp1, sctp_next);
3559 /* on to the sent queue */
3560 TAILQ_INSERT_TAIL(&stcb->asoc.sent_queue, tp1,
3561 sctp_next);
3562 stcb->asoc.sent_queue_cnt++;
3563 }
3564 if ((tp1->rec.data.rcv_flags & SCTP_DATA_NOT_FRAG) ==
3565 SCTP_DATA_NOT_FRAG) {
3566 /* not frag'ed we ae done */
3567 notdone = 0;
3568 foundeom = 1;
3569 } else if (tp1->rec.data.rcv_flags & SCTP_DATA_LAST_FRAG) {
3570 /* end of frag, we are done */
3571 notdone = 0;
3572 foundeom = 1;
3573 } else {
3574 /* Its a begin or middle piece, we must mark all of it */
3575 notdone = 1;
3576 tp1 = TAILQ_NEXT(tp1, sctp_next);
3577 }
3578 } while (tp1 && notdone);
3579 if ((foundeom == 0) && (queue == &stcb->asoc.sent_queue)) {
3580 /*
3581 * The multi-part message was scattered
3582 * across the send and sent queue.
3583 */
3584 tp1 = TAILQ_FIRST(&stcb->asoc.send_queue);
3585 /*
3586 * recurse throught the send_queue too, starting at the
3587 * beginning.
3588 */
3589 if (tp1) {
3590 ret_sz += sctp_release_pr_sctp_chunk(stcb, tp1, reason,
3591 &stcb->asoc.send_queue);
3592 } else {
3593 printf("hmm, nothing on the send queue and no EOM?\n");
3594 }
3595 }
3596 return (ret_sz);
3597 }
3598
3599 /*
3600 * checks to see if the given address, sa, is one that is currently
3601 * known by the kernel
3602 * note: can't distinguish the same address on multiple interfaces and
3603 * doesn't handle multiple addresses with different zone/scope id's
3604 * note: ifa_ifwithaddr() compares the entire sockaddr struct
3605 */
3606 struct ifaddr *
3607 sctp_find_ifa_by_addr(struct sockaddr *sa)
3608 {
3609 struct ifnet *ifn;
3610 struct ifaddr *ifa;
3611
3612 /* go through all our known interfaces */
3613 TAILQ_FOREACH(ifn, &ifnet_list, if_list) {
3614 /* go through each interface addresses */
3615 TAILQ_FOREACH(ifa, &ifn->if_addrlist, ifa_list) {
3616 /* correct family? */
3617 if (ifa->ifa_addr->sa_family != sa->sa_family)
3618 continue;
3619
3620 #ifdef INET6
3621 if (ifa->ifa_addr->sa_family == AF_INET6) {
3622 /* IPv6 address */
3623 struct sockaddr_in6 *sin1, *sin2, sin6_tmp;
3624 sin1 = (struct sockaddr_in6 *)ifa->ifa_addr;
3625 if (IN6_IS_SCOPE_LINKLOCAL(&sin1->sin6_addr)) {
3626 /* create a copy and clear scope */
3627 memcpy(&sin6_tmp, sin1,
3628 sizeof(struct sockaddr_in6));
3629 sin1 = &sin6_tmp;
3630 in6_clearscope(&sin1->sin6_addr);
3631 }
3632 sin2 = (struct sockaddr_in6 *)sa;
3633 if (memcmp(&sin1->sin6_addr, &sin2->sin6_addr,
3634 sizeof(struct in6_addr)) == 0) {
3635 /* found it */
3636 return (ifa);
3637 }
3638 } else
3639 #endif
3640 if (ifa->ifa_addr->sa_family == AF_INET) {
3641 /* IPv4 address */
3642 struct sockaddr_in *sin1, *sin2;
3643 sin1 = (struct sockaddr_in *)ifa->ifa_addr;
3644 sin2 = (struct sockaddr_in *)sa;
3645 if (sin1->sin_addr.s_addr ==
3646 sin2->sin_addr.s_addr) {
3647 /* found it */
3648 return (ifa);
3649 }
3650 }
3651 /* else, not AF_INET or AF_INET6, so skip */
3652 } /* end foreach ifa */
3653 } /* end foreach ifn */
3654 /* not found! */
3655 return (NULL);
3656 }
3657
3658
3659 #ifdef __APPLE__
3660 /*
3661 * here we hack in a fix for Apple's m_copym for the case where the first mbuf
3662 * in the chain is a M_PKTHDR and the length is zero
3663 */
3664 static void
3665 sctp_pkthdr_fix(struct mbuf *m)
3666 {
3667 struct mbuf *m_nxt;
3668
3669 if ((m->m_flags & M_PKTHDR) == 0) {
3670 /* not a PKTHDR */
3671 return;
3672 }
3673
3674 if (m->m_len != 0) {
3675 /* not a zero length PKTHDR mbuf */
3676 return;
3677 }
3678
3679 /* let's move in a word into the first mbuf... yes, ugly! */
3680 m_nxt = m->m_next;
3681 if (m_nxt == NULL) {
3682 /* umm... not a very useful mbuf chain... */
3683 return;
3684 }
3685 if ((size_t)m_nxt->m_len > sizeof(long)) {
3686 /* move over a long */
3687 bcopy(mtod(m_nxt, void *), mtod(m, void *), sizeof(long));
3688 /* update mbuf data pointers and lengths */
3689 m->m_len += sizeof(long);
3690 m_nxt->m_data += sizeof(long);
3691 m_nxt->m_len -= sizeof(long);
3692 }
3693 }
3694
3695 inline struct mbuf *
3696 sctp_m_copym(struct mbuf *m, int off, int len, int wait)
3697 {
3698 sctp_pkthdr_fix(m);
3699 return (m_copym(m, off, len, wait));
3700 }
3701 #endif /* __APPLE__ */
3702