frag6.c revision 1.18 1 /* $NetBSD: frag6.c,v 1.18 2002/05/28 03:04:05 itojun Exp $ */
2 /* $KAME: frag6.c,v 1.40 2002/05/27 21:40:31 itojun Exp $ */
3
4 /*
5 * Copyright (C) 1995, 1996, 1997, and 1998 WIDE Project.
6 * All rights reserved.
7 *
8 * Redistribution and use in source and binary forms, with or without
9 * modification, are permitted provided that the following conditions
10 * are met:
11 * 1. Redistributions of source code must retain the above copyright
12 * notice, this list of conditions and the following disclaimer.
13 * 2. Redistributions in binary form must reproduce the above copyright
14 * notice, this list of conditions and the following disclaimer in the
15 * documentation and/or other materials provided with the distribution.
16 * 3. Neither the name of the project nor the names of its contributors
17 * may be used to endorse or promote products derived from this software
18 * without specific prior written permission.
19 *
20 * THIS SOFTWARE IS PROVIDED BY THE PROJECT AND CONTRIBUTORS ``AS IS'' AND
21 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
22 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
23 * ARE DISCLAIMED. IN NO EVENT SHALL THE PROJECT OR CONTRIBUTORS BE LIABLE
24 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
25 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
26 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
27 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
28 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
29 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
30 * SUCH DAMAGE.
31 */
32
33 #include <sys/cdefs.h>
34 __KERNEL_RCSID(0, "$NetBSD: frag6.c,v 1.18 2002/05/28 03:04:05 itojun Exp $");
35
36 #include <sys/param.h>
37 #include <sys/systm.h>
38 #include <sys/malloc.h>
39 #include <sys/mbuf.h>
40 #include <sys/domain.h>
41 #include <sys/protosw.h>
42 #include <sys/socket.h>
43 #include <sys/errno.h>
44 #include <sys/time.h>
45 #include <sys/kernel.h>
46 #include <sys/syslog.h>
47
48 #include <net/if.h>
49 #include <net/route.h>
50
51 #include <netinet/in.h>
52 #include <netinet/in_var.h>
53 #include <netinet/ip6.h>
54 #include <netinet6/in6_pcb.h>
55 #include <netinet6/ip6_var.h>
56 #include <netinet/icmp6.h>
57
58 #include <net/net_osdep.h>
59
60 /*
61 * Define it to get a correct behavior on per-interface statistics.
62 * You will need to perform an extra routing table lookup, per fragment,
63 * to do it. This may, or may not be, a performance hit.
64 */
65 #define IN6_IFSTAT_STRICT
66
67 static void frag6_enq __P((struct ip6asfrag *, struct ip6asfrag *));
68 static void frag6_deq __P((struct ip6asfrag *));
69 static void frag6_insque __P((struct ip6q *, struct ip6q *));
70 static void frag6_remque __P((struct ip6q *));
71 static void frag6_freef __P((struct ip6q *));
72
73 static int ip6q_locked;
74 u_int frag6_nfragpackets;
75 u_int frag6_nfrags;
76 struct ip6q ip6q; /* ip6 reassemble queue */
77
78 static __inline int ip6q_lock_try __P((void));
79 static __inline void ip6q_unlock __P((void));
80
81 static __inline int
82 ip6q_lock_try()
83 {
84 int s;
85
86 /*
87 * Use splvm() -- we're bloking things that would cause
88 * mbuf allocation.
89 */
90 s = splvm();
91 if (ip6q_locked) {
92 splx(s);
93 return (0);
94 }
95 ip6q_locked = 1;
96 splx(s);
97 return (1);
98 }
99
100 static __inline void
101 ip6q_unlock()
102 {
103 int s;
104
105 s = splvm();
106 ip6q_locked = 0;
107 splx(s);
108 }
109
110 #ifdef DIAGNOSTIC
111 #define IP6Q_LOCK() \
112 do { \
113 if (ip6q_lock_try() == 0) { \
114 printf("%s:%d: ip6q already locked\n", __FILE__, __LINE__); \
115 panic("ip6q_lock"); \
116 } \
117 } while (0)
118 #define IP6Q_LOCK_CHECK() \
119 do { \
120 if (ip6q_locked == 0) { \
121 printf("%s:%d: ip6q lock not held\n", __FILE__, __LINE__); \
122 panic("ip6q lock check"); \
123 } \
124 } while (0)
125 #else
126 #define IP6Q_LOCK() (void) ip6q_lock_try()
127 #define IP6Q_LOCK_CHECK() /* nothing */
128 #endif
129
130 #define IP6Q_UNLOCK() ip6q_unlock()
131
132 #ifndef offsetof /* XXX */
133 #define offsetof(type, member) ((size_t)(&((type *)0)->member))
134 #endif
135
136 /*
137 * Initialise reassembly queue and fragment identifier.
138 */
139 void
140 frag6_init()
141 {
142 struct timeval tv;
143
144 /*
145 * in many cases, random() here does NOT return random number
146 * as initialization during bootstrap time occur in fixed order.
147 */
148 microtime(&tv);
149 ip6_id = random() ^ tv.tv_usec;
150 ip6q.ip6q_next = ip6q.ip6q_prev = &ip6q;
151 }
152
153 /*
154 * In RFC2460, fragment and reassembly rule do not agree with each other,
155 * in terms of next header field handling in fragment header.
156 * While the sender will use the same value for all of the fragmented packets,
157 * receiver is suggested not to check the consistency.
158 *
159 * fragment rule (p20):
160 * (2) A Fragment header containing:
161 * The Next Header value that identifies the first header of
162 * the Fragmentable Part of the original packet.
163 * -> next header field is same for all fragments
164 *
165 * reassembly rule (p21):
166 * The Next Header field of the last header of the Unfragmentable
167 * Part is obtained from the Next Header field of the first
168 * fragment's Fragment header.
169 * -> should grab it from the first fragment only
170 *
171 * The following note also contradicts with fragment rule - noone is going to
172 * send different fragment with different next header field.
173 *
174 * additional note (p22):
175 * The Next Header values in the Fragment headers of different
176 * fragments of the same original packet may differ. Only the value
177 * from the Offset zero fragment packet is used for reassembly.
178 * -> should grab it from the first fragment only
179 *
180 * There is no explicit reason given in the RFC. Historical reason maybe?
181 */
182 /*
183 * Fragment input
184 */
185 int
186 frag6_input(mp, offp, proto)
187 struct mbuf **mp;
188 int *offp, proto;
189 {
190 struct mbuf *m = *mp, *t;
191 struct ip6_hdr *ip6;
192 struct ip6_frag *ip6f;
193 struct ip6q *q6;
194 struct ip6asfrag *af6, *ip6af, *af6dwn;
195 int offset = *offp, nxt, i, next;
196 int first_frag = 0;
197 int fragoff, frgpartlen; /* must be larger than u_int16_t */
198 struct ifnet *dstifp;
199 #ifdef IN6_IFSTAT_STRICT
200 static struct route_in6 ro;
201 struct sockaddr_in6 *dst;
202 #endif
203
204 ip6 = mtod(m, struct ip6_hdr *);
205 #ifndef PULLDOWN_TEST
206 IP6_EXTHDR_CHECK(m, offset, sizeof(struct ip6_frag), IPPROTO_DONE);
207 ip6f = (struct ip6_frag *)((caddr_t)ip6 + offset);
208 #else
209 IP6_EXTHDR_GET(ip6f, struct ip6_frag *, m, offset, sizeof(*ip6f));
210 if (ip6f == NULL)
211 return IPPROTO_DONE;
212 #endif
213
214 dstifp = NULL;
215 #ifdef IN6_IFSTAT_STRICT
216 /* find the destination interface of the packet. */
217 dst = (struct sockaddr_in6 *)&ro.ro_dst;
218 if (ro.ro_rt
219 && ((ro.ro_rt->rt_flags & RTF_UP) == 0
220 || !IN6_ARE_ADDR_EQUAL(&dst->sin6_addr, &ip6->ip6_dst))) {
221 RTFREE(ro.ro_rt);
222 ro.ro_rt = (struct rtentry *)0;
223 }
224 if (ro.ro_rt == NULL) {
225 bzero(dst, sizeof(*dst));
226 dst->sin6_family = AF_INET6;
227 dst->sin6_len = sizeof(struct sockaddr_in6);
228 dst->sin6_addr = ip6->ip6_dst;
229 }
230 rtalloc((struct route *)&ro);
231 if (ro.ro_rt != NULL && ro.ro_rt->rt_ifa != NULL)
232 dstifp = ((struct in6_ifaddr *)ro.ro_rt->rt_ifa)->ia_ifp;
233 #else
234 /* we are violating the spec, this is not the destination interface */
235 if ((m->m_flags & M_PKTHDR) != 0)
236 dstifp = m->m_pkthdr.rcvif;
237 #endif
238
239 /* jumbo payload can't contain a fragment header */
240 if (ip6->ip6_plen == 0) {
241 icmp6_error(m, ICMP6_PARAM_PROB, ICMP6_PARAMPROB_HEADER, offset);
242 in6_ifstat_inc(dstifp, ifs6_reass_fail);
243 return IPPROTO_DONE;
244 }
245
246 /*
247 * check whether fragment packet's fragment length is
248 * multiple of 8 octets.
249 * sizeof(struct ip6_frag) == 8
250 * sizeof(struct ip6_hdr) = 40
251 */
252 if ((ip6f->ip6f_offlg & IP6F_MORE_FRAG) &&
253 (((ntohs(ip6->ip6_plen) - offset) & 0x7) != 0)) {
254 icmp6_error(m, ICMP6_PARAM_PROB, ICMP6_PARAMPROB_HEADER,
255 offsetof(struct ip6_hdr, ip6_plen));
256 in6_ifstat_inc(dstifp, ifs6_reass_fail);
257 return IPPROTO_DONE;
258 }
259
260 ip6stat.ip6s_fragments++;
261 in6_ifstat_inc(dstifp, ifs6_reass_reqd);
262
263 /* offset now points to data portion */
264 offset += sizeof(struct ip6_frag);
265
266 IP6Q_LOCK();
267
268 /*
269 * Enforce upper bound on number of fragments.
270 * If maxfrag is 0, never accept fragments.
271 * If maxfrag is -1, accept all fragments without limitation.
272 */
273 if (ip6_maxfrags < 0)
274 ;
275 else if (frag6_nfrags >= (u_int)ip6_maxfrags)
276 goto dropfrag;
277
278 for (q6 = ip6q.ip6q_next; q6 != &ip6q; q6 = q6->ip6q_next)
279 if (ip6f->ip6f_ident == q6->ip6q_ident &&
280 IN6_ARE_ADDR_EQUAL(&ip6->ip6_src, &q6->ip6q_src) &&
281 IN6_ARE_ADDR_EQUAL(&ip6->ip6_dst, &q6->ip6q_dst))
282 break;
283
284 if (q6 == &ip6q) {
285 /*
286 * the first fragment to arrive, create a reassembly queue.
287 */
288 first_frag = 1;
289
290 /*
291 * Enforce upper bound on number of fragmented packets
292 * for which we attempt reassembly;
293 * If maxfragpackets is 0, never accept fragments.
294 * If maxfragpackets is -1, accept all fragments without
295 * limitation.
296 */
297 if (ip6_maxfragpackets < 0)
298 ;
299 else if (frag6_nfragpackets >= (u_int)ip6_maxfragpackets)
300 goto dropfrag;
301 frag6_nfragpackets++;
302 q6 = (struct ip6q *)malloc(sizeof(struct ip6q), M_FTABLE,
303 M_DONTWAIT);
304 if (q6 == NULL)
305 goto dropfrag;
306 bzero(q6, sizeof(*q6));
307
308 frag6_insque(q6, &ip6q);
309
310 /* ip6q_nxt will be filled afterwards, from 1st fragment */
311 q6->ip6q_down = q6->ip6q_up = (struct ip6asfrag *)q6;
312 #ifdef notyet
313 q6->ip6q_nxtp = (u_char *)nxtp;
314 #endif
315 q6->ip6q_ident = ip6f->ip6f_ident;
316 q6->ip6q_arrive = 0; /* Is it used anywhere? */
317 q6->ip6q_ttl = IPV6_FRAGTTL;
318 q6->ip6q_src = ip6->ip6_src;
319 q6->ip6q_dst = ip6->ip6_dst;
320 q6->ip6q_unfrglen = -1; /* The 1st fragment has not arrived. */
321
322 q6->ip6q_nfrag = 0;
323 }
324
325 /*
326 * If it's the 1st fragment, record the length of the
327 * unfragmentable part and the next header of the fragment header.
328 */
329 fragoff = ntohs(ip6f->ip6f_offlg & IP6F_OFF_MASK);
330 if (fragoff == 0) {
331 q6->ip6q_unfrglen = offset - sizeof(struct ip6_hdr) -
332 sizeof(struct ip6_frag);
333 q6->ip6q_nxt = ip6f->ip6f_nxt;
334 }
335
336 /*
337 * Check that the reassembled packet would not exceed 65535 bytes
338 * in size.
339 * If it would exceed, discard the fragment and return an ICMP error.
340 */
341 frgpartlen = sizeof(struct ip6_hdr) + ntohs(ip6->ip6_plen) - offset;
342 if (q6->ip6q_unfrglen >= 0) {
343 /* The 1st fragment has already arrived. */
344 if (q6->ip6q_unfrglen + fragoff + frgpartlen > IPV6_MAXPACKET) {
345 icmp6_error(m, ICMP6_PARAM_PROB, ICMP6_PARAMPROB_HEADER,
346 offset - sizeof(struct ip6_frag) +
347 offsetof(struct ip6_frag, ip6f_offlg));
348 IP6Q_UNLOCK();
349 return(IPPROTO_DONE);
350 }
351 } else if (fragoff + frgpartlen > IPV6_MAXPACKET) {
352 icmp6_error(m, ICMP6_PARAM_PROB, ICMP6_PARAMPROB_HEADER,
353 offset - sizeof(struct ip6_frag) +
354 offsetof(struct ip6_frag, ip6f_offlg));
355 IP6Q_UNLOCK();
356 return(IPPROTO_DONE);
357 }
358 /*
359 * If it's the first fragment, do the above check for each
360 * fragment already stored in the reassembly queue.
361 */
362 if (fragoff == 0) {
363 for (af6 = q6->ip6q_down; af6 != (struct ip6asfrag *)q6;
364 af6 = af6dwn) {
365 af6dwn = af6->ip6af_down;
366
367 if (q6->ip6q_unfrglen + af6->ip6af_off + af6->ip6af_frglen >
368 IPV6_MAXPACKET) {
369 struct mbuf *merr = IP6_REASS_MBUF(af6);
370 struct ip6_hdr *ip6err;
371 int erroff = af6->ip6af_offset;
372
373 /* dequeue the fragment. */
374 frag6_deq(af6);
375 free(af6, M_FTABLE);
376
377 /* adjust pointer. */
378 ip6err = mtod(merr, struct ip6_hdr *);
379
380 /*
381 * Restore source and destination addresses
382 * in the erroneous IPv6 header.
383 */
384 ip6err->ip6_src = q6->ip6q_src;
385 ip6err->ip6_dst = q6->ip6q_dst;
386
387 icmp6_error(merr, ICMP6_PARAM_PROB,
388 ICMP6_PARAMPROB_HEADER,
389 erroff - sizeof(struct ip6_frag) +
390 offsetof(struct ip6_frag, ip6f_offlg));
391 }
392 }
393 }
394
395 ip6af = (struct ip6asfrag *)malloc(sizeof(struct ip6asfrag), M_FTABLE,
396 M_DONTWAIT);
397 if (ip6af == NULL)
398 goto dropfrag;
399 bzero(ip6af, sizeof(*ip6af));
400 ip6af->ip6af_head = ip6->ip6_flow;
401 ip6af->ip6af_len = ip6->ip6_plen;
402 ip6af->ip6af_nxt = ip6->ip6_nxt;
403 ip6af->ip6af_hlim = ip6->ip6_hlim;
404 ip6af->ip6af_mff = ip6f->ip6f_offlg & IP6F_MORE_FRAG;
405 ip6af->ip6af_off = fragoff;
406 ip6af->ip6af_frglen = frgpartlen;
407 ip6af->ip6af_offset = offset;
408 IP6_REASS_MBUF(ip6af) = m;
409
410 if (first_frag) {
411 af6 = (struct ip6asfrag *)q6;
412 goto insert;
413 }
414
415 /*
416 * Find a segment which begins after this one does.
417 */
418 for (af6 = q6->ip6q_down; af6 != (struct ip6asfrag *)q6;
419 af6 = af6->ip6af_down)
420 if (af6->ip6af_off > ip6af->ip6af_off)
421 break;
422
423 #if 0
424 /*
425 * If there is a preceding segment, it may provide some of
426 * our data already. If so, drop the data from the incoming
427 * segment. If it provides all of our data, drop us.
428 */
429 if (af6->ip6af_up != (struct ip6asfrag *)q6) {
430 i = af6->ip6af_up->ip6af_off + af6->ip6af_up->ip6af_frglen
431 - ip6af->ip6af_off;
432 if (i > 0) {
433 if (i >= ip6af->ip6af_frglen)
434 goto dropfrag;
435 m_adj(IP6_REASS_MBUF(ip6af), i);
436 ip6af->ip6af_off += i;
437 ip6af->ip6af_frglen -= i;
438 }
439 }
440
441 /*
442 * While we overlap succeeding segments trim them or,
443 * if they are completely covered, dequeue them.
444 */
445 while (af6 != (struct ip6asfrag *)q6 &&
446 ip6af->ip6af_off + ip6af->ip6af_frglen > af6->ip6af_off) {
447 i = (ip6af->ip6af_off + ip6af->ip6af_frglen) - af6->ip6af_off;
448 if (i < af6->ip6af_frglen) {
449 af6->ip6af_frglen -= i;
450 af6->ip6af_off += i;
451 m_adj(IP6_REASS_MBUF(af6), i);
452 break;
453 }
454 af6 = af6->ip6af_down;
455 m_freem(IP6_REASS_MBUF(af6->ip6af_up));
456 frag6_deq(af6->ip6af_up);
457 }
458 #else
459 /*
460 * If the incoming framgent overlaps some existing fragments in
461 * the reassembly queue, drop it, since it is dangerous to override
462 * existing fragments from a security point of view.
463 * We don't know which fragment is the bad guy - here we trust
464 * fragment that came in earlier, with no real reason.
465 */
466 if (af6->ip6af_up != (struct ip6asfrag *)q6) {
467 i = af6->ip6af_up->ip6af_off + af6->ip6af_up->ip6af_frglen
468 - ip6af->ip6af_off;
469 if (i > 0) {
470 #if 0 /* suppress the noisy log */
471 log(LOG_ERR, "%d bytes of a fragment from %s "
472 "overlaps the previous fragment\n",
473 i, ip6_sprintf(&q6->ip6q_src));
474 #endif
475 free(ip6af, M_FTABLE);
476 goto dropfrag;
477 }
478 }
479 if (af6 != (struct ip6asfrag *)q6) {
480 i = (ip6af->ip6af_off + ip6af->ip6af_frglen) - af6->ip6af_off;
481 if (i > 0) {
482 #if 0 /* suppress the noisy log */
483 log(LOG_ERR, "%d bytes of a fragment from %s "
484 "overlaps the succeeding fragment",
485 i, ip6_sprintf(&q6->ip6q_src));
486 #endif
487 free(ip6af, M_FTABLE);
488 goto dropfrag;
489 }
490 }
491 #endif
492
493 insert:
494
495 /*
496 * Stick new segment in its place;
497 * check for complete reassembly.
498 * Move to front of packet queue, as we are
499 * the most recently active fragmented packet.
500 */
501 frag6_enq(ip6af, af6->ip6af_up);
502 frag6_nfrags++;
503 q6->ip6q_nfrag++;
504 #if 0 /* xxx */
505 if (q6 != ip6q.ip6q_next) {
506 frag6_remque(q6);
507 frag6_insque(q6, &ip6q);
508 }
509 #endif
510 next = 0;
511 for (af6 = q6->ip6q_down; af6 != (struct ip6asfrag *)q6;
512 af6 = af6->ip6af_down) {
513 if (af6->ip6af_off != next) {
514 IP6Q_UNLOCK();
515 return IPPROTO_DONE;
516 }
517 next += af6->ip6af_frglen;
518 }
519 if (af6->ip6af_up->ip6af_mff) {
520 IP6Q_UNLOCK();
521 return IPPROTO_DONE;
522 }
523
524 /*
525 * Reassembly is complete; concatenate fragments.
526 */
527 ip6af = q6->ip6q_down;
528 t = m = IP6_REASS_MBUF(ip6af);
529 af6 = ip6af->ip6af_down;
530 frag6_deq(ip6af);
531 while (af6 != (struct ip6asfrag *)q6) {
532 af6dwn = af6->ip6af_down;
533 frag6_deq(af6);
534 while (t->m_next)
535 t = t->m_next;
536 t->m_next = IP6_REASS_MBUF(af6);
537 m_adj(t->m_next, af6->ip6af_offset);
538 free(af6, M_FTABLE);
539 af6 = af6dwn;
540 }
541
542 /* adjust offset to point where the original next header starts */
543 offset = ip6af->ip6af_offset - sizeof(struct ip6_frag);
544 free(ip6af, M_FTABLE);
545 ip6 = mtod(m, struct ip6_hdr *);
546 ip6->ip6_plen = htons((u_short)next + offset - sizeof(struct ip6_hdr));
547 ip6->ip6_src = q6->ip6q_src;
548 ip6->ip6_dst = q6->ip6q_dst;
549 nxt = q6->ip6q_nxt;
550 #ifdef notyet
551 *q6->ip6q_nxtp = (u_char)(nxt & 0xff);
552 #endif
553
554 /*
555 * Delete frag6 header with as a few cost as possible.
556 */
557 if (offset < m->m_len) {
558 ovbcopy((caddr_t)ip6, (caddr_t)ip6 + sizeof(struct ip6_frag),
559 offset);
560 m->m_data += sizeof(struct ip6_frag);
561 m->m_len -= sizeof(struct ip6_frag);
562 } else {
563 /* this comes with no copy if the boundary is on cluster */
564 if ((t = m_split(m, offset, M_DONTWAIT)) == NULL) {
565 frag6_remque(q6);
566 frag6_nfrags -= q6->ip6q_nfrag;
567 free(q6, M_FTABLE);
568 frag6_nfragpackets--;
569 goto dropfrag;
570 }
571 m_adj(t, sizeof(struct ip6_frag));
572 m_cat(m, t);
573 }
574
575 /*
576 * Store NXT to the original.
577 */
578 {
579 char *prvnxtp = ip6_get_prevhdr(m, offset); /* XXX */
580 *prvnxtp = nxt;
581 }
582
583 frag6_remque(q6);
584 frag6_nfrags -= q6->ip6q_nfrag;
585 free(q6, M_FTABLE);
586 frag6_nfragpackets--;
587
588 if (m->m_flags & M_PKTHDR) { /* Isn't it always true? */
589 int plen = 0;
590 for (t = m; t; t = t->m_next)
591 plen += t->m_len;
592 m->m_pkthdr.len = plen;
593 }
594
595 ip6stat.ip6s_reassembled++;
596 in6_ifstat_inc(dstifp, ifs6_reass_ok);
597
598 /*
599 * Tell launch routine the next header
600 */
601
602 *mp = m;
603 *offp = offset;
604
605 IP6Q_UNLOCK();
606 return nxt;
607
608 dropfrag:
609 in6_ifstat_inc(dstifp, ifs6_reass_fail);
610 ip6stat.ip6s_fragdropped++;
611 m_freem(m);
612 IP6Q_UNLOCK();
613 return IPPROTO_DONE;
614 }
615
616 /*
617 * Free a fragment reassembly header and all
618 * associated datagrams.
619 */
620 void
621 frag6_freef(q6)
622 struct ip6q *q6;
623 {
624 struct ip6asfrag *af6, *down6;
625
626 IP6Q_LOCK_CHECK();
627
628 for (af6 = q6->ip6q_down; af6 != (struct ip6asfrag *)q6;
629 af6 = down6) {
630 struct mbuf *m = IP6_REASS_MBUF(af6);
631
632 down6 = af6->ip6af_down;
633 frag6_deq(af6);
634
635 /*
636 * Return ICMP time exceeded error for the 1st fragment.
637 * Just free other fragments.
638 */
639 if (af6->ip6af_off == 0) {
640 struct ip6_hdr *ip6;
641
642 /* adjust pointer */
643 ip6 = mtod(m, struct ip6_hdr *);
644
645 /* restoure source and destination addresses */
646 ip6->ip6_src = q6->ip6q_src;
647 ip6->ip6_dst = q6->ip6q_dst;
648
649 icmp6_error(m, ICMP6_TIME_EXCEEDED,
650 ICMP6_TIME_EXCEED_REASSEMBLY, 0);
651 } else
652 m_freem(m);
653 free(af6, M_FTABLE);
654 }
655 frag6_remque(q6);
656 frag6_nfrags -= q6->ip6q_nfrag;
657 free(q6, M_FTABLE);
658 frag6_nfragpackets--;
659 }
660
661 /*
662 * Put an ip fragment on a reassembly chain.
663 * Like insque, but pointers in middle of structure.
664 */
665 void
666 frag6_enq(af6, up6)
667 struct ip6asfrag *af6, *up6;
668 {
669
670 IP6Q_LOCK_CHECK();
671
672 af6->ip6af_up = up6;
673 af6->ip6af_down = up6->ip6af_down;
674 up6->ip6af_down->ip6af_up = af6;
675 up6->ip6af_down = af6;
676 }
677
678 /*
679 * To frag6_enq as remque is to insque.
680 */
681 void
682 frag6_deq(af6)
683 struct ip6asfrag *af6;
684 {
685
686 IP6Q_LOCK_CHECK();
687
688 af6->ip6af_up->ip6af_down = af6->ip6af_down;
689 af6->ip6af_down->ip6af_up = af6->ip6af_up;
690 }
691
692 void
693 frag6_insque(new, old)
694 struct ip6q *new, *old;
695 {
696
697 IP6Q_LOCK_CHECK();
698
699 new->ip6q_prev = old;
700 new->ip6q_next = old->ip6q_next;
701 old->ip6q_next->ip6q_prev= new;
702 old->ip6q_next = new;
703 }
704
705 void
706 frag6_remque(p6)
707 struct ip6q *p6;
708 {
709
710 IP6Q_LOCK_CHECK();
711
712 p6->ip6q_prev->ip6q_next = p6->ip6q_next;
713 p6->ip6q_next->ip6q_prev = p6->ip6q_prev;
714 }
715
716 /*
717 * IPv6 reassembling timer processing;
718 * if a timer expires on a reassembly
719 * queue, discard it.
720 */
721 void
722 frag6_slowtimo()
723 {
724 struct ip6q *q6;
725 int s = splsoftnet();
726
727 IP6Q_LOCK();
728 q6 = ip6q.ip6q_next;
729 if (q6)
730 while (q6 != &ip6q) {
731 --q6->ip6q_ttl;
732 q6 = q6->ip6q_next;
733 if (q6->ip6q_prev->ip6q_ttl == 0) {
734 ip6stat.ip6s_fragtimeout++;
735 /* XXX in6_ifstat_inc(ifp, ifs6_reass_fail) */
736 frag6_freef(q6->ip6q_prev);
737 }
738 }
739 /*
740 * If we are over the maximum number of fragments
741 * (due to the limit being lowered), drain off
742 * enough to get down to the new limit.
743 */
744 while (frag6_nfragpackets > (u_int)ip6_maxfragpackets &&
745 ip6q.ip6q_prev) {
746 ip6stat.ip6s_fragoverflow++;
747 /* XXX in6_ifstat_inc(ifp, ifs6_reass_fail) */
748 frag6_freef(ip6q.ip6q_prev);
749 }
750 IP6Q_UNLOCK();
751
752 #if 0
753 /*
754 * Routing changes might produce a better route than we last used;
755 * make sure we notice eventually, even if forwarding only for one
756 * destination and the cache is never replaced.
757 */
758 if (ip6_forward_rt.ro_rt) {
759 RTFREE(ip6_forward_rt.ro_rt);
760 ip6_forward_rt.ro_rt = 0;
761 }
762 if (ipsrcchk_rt.ro_rt) {
763 RTFREE(ipsrcchk_rt.ro_rt);
764 ipsrcchk_rt.ro_rt = 0;
765 }
766 #endif
767
768 splx(s);
769 }
770
771 /*
772 * Drain off all datagram fragments.
773 */
774 void
775 frag6_drain()
776 {
777
778 if (ip6q_lock_try() == 0)
779 return;
780 while (ip6q.ip6q_next != &ip6q) {
781 ip6stat.ip6s_fragdropped++;
782 /* XXX in6_ifstat_inc(ifp, ifs6_reass_fail) */
783 frag6_freef(ip6q.ip6q_next);
784 }
785 IP6Q_UNLOCK();
786 }
787