frag6.c revision 1.41 1 /* $NetBSD: frag6.c,v 1.41 2008/01/14 04:14:37 dyoung Exp $ */
2 /* $KAME: frag6.c,v 1.40 2002/05/27 21:40:31 itojun Exp $ */
3
4 /*
5 * Copyright (C) 1995, 1996, 1997, and 1998 WIDE Project.
6 * All rights reserved.
7 *
8 * Redistribution and use in source and binary forms, with or without
9 * modification, are permitted provided that the following conditions
10 * are met:
11 * 1. Redistributions of source code must retain the above copyright
12 * notice, this list of conditions and the following disclaimer.
13 * 2. Redistributions in binary form must reproduce the above copyright
14 * notice, this list of conditions and the following disclaimer in the
15 * documentation and/or other materials provided with the distribution.
16 * 3. Neither the name of the project nor the names of its contributors
17 * may be used to endorse or promote products derived from this software
18 * without specific prior written permission.
19 *
20 * THIS SOFTWARE IS PROVIDED BY THE PROJECT AND CONTRIBUTORS ``AS IS'' AND
21 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
22 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
23 * ARE DISCLAIMED. IN NO EVENT SHALL THE PROJECT OR CONTRIBUTORS BE LIABLE
24 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
25 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
26 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
27 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
28 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
29 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
30 * SUCH DAMAGE.
31 */
32
33 #include <sys/cdefs.h>
34 __KERNEL_RCSID(0, "$NetBSD: frag6.c,v 1.41 2008/01/14 04:14:37 dyoung Exp $");
35
36 #include <sys/param.h>
37 #include <sys/systm.h>
38 #include <sys/malloc.h>
39 #include <sys/mbuf.h>
40 #include <sys/domain.h>
41 #include <sys/protosw.h>
42 #include <sys/socket.h>
43 #include <sys/errno.h>
44 #include <sys/time.h>
45 #include <sys/kernel.h>
46 #include <sys/syslog.h>
47
48 #include <net/if.h>
49 #include <net/route.h>
50
51 #include <netinet/in.h>
52 #include <netinet/in_var.h>
53 #include <netinet/ip6.h>
54 #include <netinet6/ip6_var.h>
55 #include <netinet/icmp6.h>
56
57 #include <net/net_osdep.h>
58
59 static void frag6_enq(struct ip6asfrag *, struct ip6asfrag *);
60 static void frag6_deq(struct ip6asfrag *);
61 static void frag6_insque(struct ip6q *, struct ip6q *);
62 static void frag6_remque(struct ip6q *);
63 static void frag6_freef(struct ip6q *);
64
65 static int ip6q_locked;
66 u_int frag6_nfragpackets;
67 u_int frag6_nfrags;
68 struct ip6q ip6q; /* ip6 reassemble queue */
69
70 static inline int ip6q_lock_try(void);
71 static inline void ip6q_unlock(void);
72
73 static inline int
74 ip6q_lock_try()
75 {
76 int s;
77
78 /*
79 * Use splvm() -- we're bloking things that would cause
80 * mbuf allocation.
81 */
82 s = splvm();
83 if (ip6q_locked) {
84 splx(s);
85 return (0);
86 }
87 ip6q_locked = 1;
88 splx(s);
89 return (1);
90 }
91
92 static inline void
93 ip6q_unlock()
94 {
95 int s;
96
97 s = splvm();
98 ip6q_locked = 0;
99 splx(s);
100 }
101
102 #ifdef DIAGNOSTIC
103 #define IP6Q_LOCK() \
104 do { \
105 if (ip6q_lock_try() == 0) { \
106 printf("%s:%d: ip6q already locked\n", __FILE__, __LINE__); \
107 panic("ip6q_lock"); \
108 } \
109 } while (/*CONSTCOND*/ 0)
110 #define IP6Q_LOCK_CHECK() \
111 do { \
112 if (ip6q_locked == 0) { \
113 printf("%s:%d: ip6q lock not held\n", __FILE__, __LINE__); \
114 panic("ip6q lock check"); \
115 } \
116 } while (/*CONSTCOND*/ 0)
117 #else
118 #define IP6Q_LOCK() (void) ip6q_lock_try()
119 #define IP6Q_LOCK_CHECK() /* nothing */
120 #endif
121
122 #define IP6Q_UNLOCK() ip6q_unlock()
123
124 #ifndef offsetof /* XXX */
125 #define offsetof(type, member) ((size_t)(&((type *)0)->member))
126 #endif
127
128 /*
129 * Initialise reassembly queue and fragment identifier.
130 */
131 void
132 frag6_init()
133 {
134
135 ip6q.ip6q_next = ip6q.ip6q_prev = &ip6q;
136 }
137
138 /*
139 * In RFC2460, fragment and reassembly rule do not agree with each other,
140 * in terms of next header field handling in fragment header.
141 * While the sender will use the same value for all of the fragmented packets,
142 * receiver is suggested not to check the consistency.
143 *
144 * fragment rule (p20):
145 * (2) A Fragment header containing:
146 * The Next Header value that identifies the first header of
147 * the Fragmentable Part of the original packet.
148 * -> next header field is same for all fragments
149 *
150 * reassembly rule (p21):
151 * The Next Header field of the last header of the Unfragmentable
152 * Part is obtained from the Next Header field of the first
153 * fragment's Fragment header.
154 * -> should grab it from the first fragment only
155 *
156 * The following note also contradicts with fragment rule - noone is going to
157 * send different fragment with different next header field.
158 *
159 * additional note (p22):
160 * The Next Header values in the Fragment headers of different
161 * fragments of the same original packet may differ. Only the value
162 * from the Offset zero fragment packet is used for reassembly.
163 * -> should grab it from the first fragment only
164 *
165 * There is no explicit reason given in the RFC. Historical reason maybe?
166 */
167 /*
168 * Fragment input
169 */
170 int
171 frag6_input(struct mbuf **mp, int *offp, int proto)
172 {
173 struct rtentry *rt;
174 struct mbuf *m = *mp, *t;
175 struct ip6_hdr *ip6;
176 struct ip6_frag *ip6f;
177 struct ip6q *q6;
178 struct ip6asfrag *af6, *ip6af, *af6dwn;
179 int offset = *offp, nxt, i, next;
180 int first_frag = 0;
181 int fragoff, frgpartlen; /* must be larger than u_int16_t */
182 struct ifnet *dstifp;
183 static struct route ro;
184 union {
185 struct sockaddr dst;
186 struct sockaddr_in6 dst6;
187 } u;
188
189 ip6 = mtod(m, struct ip6_hdr *);
190 IP6_EXTHDR_GET(ip6f, struct ip6_frag *, m, offset, sizeof(*ip6f));
191 if (ip6f == NULL)
192 return IPPROTO_DONE;
193
194 dstifp = NULL;
195 /* find the destination interface of the packet. */
196 sockaddr_in6_init(&u.dst6, &ip6->ip6_dst, 0, 0, 0);
197 if ((rt = rtcache_lookup(&ro, &u.dst)) != NULL && rt->rt_ifa != NULL)
198 dstifp = ((struct in6_ifaddr *)rt->rt_ifa)->ia_ifp;
199
200 /* jumbo payload can't contain a fragment header */
201 if (ip6->ip6_plen == 0) {
202 icmp6_error(m, ICMP6_PARAM_PROB, ICMP6_PARAMPROB_HEADER, offset);
203 in6_ifstat_inc(dstifp, ifs6_reass_fail);
204 return IPPROTO_DONE;
205 }
206
207 /*
208 * check whether fragment packet's fragment length is
209 * multiple of 8 octets.
210 * sizeof(struct ip6_frag) == 8
211 * sizeof(struct ip6_hdr) = 40
212 */
213 if ((ip6f->ip6f_offlg & IP6F_MORE_FRAG) &&
214 (((ntohs(ip6->ip6_plen) - offset) & 0x7) != 0)) {
215 icmp6_error(m, ICMP6_PARAM_PROB, ICMP6_PARAMPROB_HEADER,
216 offsetof(struct ip6_hdr, ip6_plen));
217 in6_ifstat_inc(dstifp, ifs6_reass_fail);
218 return IPPROTO_DONE;
219 }
220
221 ip6stat.ip6s_fragments++;
222 in6_ifstat_inc(dstifp, ifs6_reass_reqd);
223
224 /* offset now points to data portion */
225 offset += sizeof(struct ip6_frag);
226
227 IP6Q_LOCK();
228
229 /*
230 * Enforce upper bound on number of fragments.
231 * If maxfrag is 0, never accept fragments.
232 * If maxfrag is -1, accept all fragments without limitation.
233 */
234 if (ip6_maxfrags < 0)
235 ;
236 else if (frag6_nfrags >= (u_int)ip6_maxfrags)
237 goto dropfrag;
238
239 for (q6 = ip6q.ip6q_next; q6 != &ip6q; q6 = q6->ip6q_next)
240 if (ip6f->ip6f_ident == q6->ip6q_ident &&
241 IN6_ARE_ADDR_EQUAL(&ip6->ip6_src, &q6->ip6q_src) &&
242 IN6_ARE_ADDR_EQUAL(&ip6->ip6_dst, &q6->ip6q_dst))
243 break;
244
245 if (q6 == &ip6q) {
246 /*
247 * the first fragment to arrive, create a reassembly queue.
248 */
249 first_frag = 1;
250
251 /*
252 * Enforce upper bound on number of fragmented packets
253 * for which we attempt reassembly;
254 * If maxfragpackets is 0, never accept fragments.
255 * If maxfragpackets is -1, accept all fragments without
256 * limitation.
257 */
258 if (ip6_maxfragpackets < 0)
259 ;
260 else if (frag6_nfragpackets >= (u_int)ip6_maxfragpackets)
261 goto dropfrag;
262 frag6_nfragpackets++;
263 q6 = (struct ip6q *)malloc(sizeof(struct ip6q), M_FTABLE,
264 M_DONTWAIT);
265 if (q6 == NULL)
266 goto dropfrag;
267 bzero(q6, sizeof(*q6));
268
269 frag6_insque(q6, &ip6q);
270
271 /* ip6q_nxt will be filled afterwards, from 1st fragment */
272 q6->ip6q_down = q6->ip6q_up = (struct ip6asfrag *)q6;
273 #ifdef notyet
274 q6->ip6q_nxtp = (u_char *)nxtp;
275 #endif
276 q6->ip6q_ident = ip6f->ip6f_ident;
277 q6->ip6q_arrive = 0; /* Is it used anywhere? */
278 q6->ip6q_ttl = IPV6_FRAGTTL;
279 q6->ip6q_src = ip6->ip6_src;
280 q6->ip6q_dst = ip6->ip6_dst;
281 q6->ip6q_unfrglen = -1; /* The 1st fragment has not arrived. */
282
283 q6->ip6q_nfrag = 0;
284 }
285
286 /*
287 * If it's the 1st fragment, record the length of the
288 * unfragmentable part and the next header of the fragment header.
289 */
290 fragoff = ntohs(ip6f->ip6f_offlg & IP6F_OFF_MASK);
291 if (fragoff == 0) {
292 q6->ip6q_unfrglen = offset - sizeof(struct ip6_hdr) -
293 sizeof(struct ip6_frag);
294 q6->ip6q_nxt = ip6f->ip6f_nxt;
295 }
296
297 /*
298 * Check that the reassembled packet would not exceed 65535 bytes
299 * in size.
300 * If it would exceed, discard the fragment and return an ICMP error.
301 */
302 frgpartlen = sizeof(struct ip6_hdr) + ntohs(ip6->ip6_plen) - offset;
303 if (q6->ip6q_unfrglen >= 0) {
304 /* The 1st fragment has already arrived. */
305 if (q6->ip6q_unfrglen + fragoff + frgpartlen > IPV6_MAXPACKET) {
306 icmp6_error(m, ICMP6_PARAM_PROB, ICMP6_PARAMPROB_HEADER,
307 offset - sizeof(struct ip6_frag) +
308 offsetof(struct ip6_frag, ip6f_offlg));
309 IP6Q_UNLOCK();
310 return (IPPROTO_DONE);
311 }
312 } else if (fragoff + frgpartlen > IPV6_MAXPACKET) {
313 icmp6_error(m, ICMP6_PARAM_PROB, ICMP6_PARAMPROB_HEADER,
314 offset - sizeof(struct ip6_frag) +
315 offsetof(struct ip6_frag, ip6f_offlg));
316 IP6Q_UNLOCK();
317 return (IPPROTO_DONE);
318 }
319 /*
320 * If it's the first fragment, do the above check for each
321 * fragment already stored in the reassembly queue.
322 */
323 if (fragoff == 0) {
324 for (af6 = q6->ip6q_down; af6 != (struct ip6asfrag *)q6;
325 af6 = af6dwn) {
326 af6dwn = af6->ip6af_down;
327
328 if (q6->ip6q_unfrglen + af6->ip6af_off + af6->ip6af_frglen >
329 IPV6_MAXPACKET) {
330 struct mbuf *merr = IP6_REASS_MBUF(af6);
331 struct ip6_hdr *ip6err;
332 int erroff = af6->ip6af_offset;
333
334 /* dequeue the fragment. */
335 frag6_deq(af6);
336 free(af6, M_FTABLE);
337
338 /* adjust pointer. */
339 ip6err = mtod(merr, struct ip6_hdr *);
340
341 /*
342 * Restore source and destination addresses
343 * in the erroneous IPv6 header.
344 */
345 ip6err->ip6_src = q6->ip6q_src;
346 ip6err->ip6_dst = q6->ip6q_dst;
347
348 icmp6_error(merr, ICMP6_PARAM_PROB,
349 ICMP6_PARAMPROB_HEADER,
350 erroff - sizeof(struct ip6_frag) +
351 offsetof(struct ip6_frag, ip6f_offlg));
352 }
353 }
354 }
355
356 ip6af = (struct ip6asfrag *)malloc(sizeof(struct ip6asfrag), M_FTABLE,
357 M_DONTWAIT);
358 if (ip6af == NULL)
359 goto dropfrag;
360 bzero(ip6af, sizeof(*ip6af));
361 ip6af->ip6af_head = ip6->ip6_flow;
362 ip6af->ip6af_len = ip6->ip6_plen;
363 ip6af->ip6af_nxt = ip6->ip6_nxt;
364 ip6af->ip6af_hlim = ip6->ip6_hlim;
365 ip6af->ip6af_mff = ip6f->ip6f_offlg & IP6F_MORE_FRAG;
366 ip6af->ip6af_off = fragoff;
367 ip6af->ip6af_frglen = frgpartlen;
368 ip6af->ip6af_offset = offset;
369 IP6_REASS_MBUF(ip6af) = m;
370
371 if (first_frag) {
372 af6 = (struct ip6asfrag *)q6;
373 goto insert;
374 }
375
376 /*
377 * Find a segment which begins after this one does.
378 */
379 for (af6 = q6->ip6q_down; af6 != (struct ip6asfrag *)q6;
380 af6 = af6->ip6af_down)
381 if (af6->ip6af_off > ip6af->ip6af_off)
382 break;
383
384 #if 0
385 /*
386 * If there is a preceding segment, it may provide some of
387 * our data already. If so, drop the data from the incoming
388 * segment. If it provides all of our data, drop us.
389 */
390 if (af6->ip6af_up != (struct ip6asfrag *)q6) {
391 i = af6->ip6af_up->ip6af_off + af6->ip6af_up->ip6af_frglen
392 - ip6af->ip6af_off;
393 if (i > 0) {
394 if (i >= ip6af->ip6af_frglen)
395 goto dropfrag;
396 m_adj(IP6_REASS_MBUF(ip6af), i);
397 ip6af->ip6af_off += i;
398 ip6af->ip6af_frglen -= i;
399 }
400 }
401
402 /*
403 * While we overlap succeeding segments trim them or,
404 * if they are completely covered, dequeue them.
405 */
406 while (af6 != (struct ip6asfrag *)q6 &&
407 ip6af->ip6af_off + ip6af->ip6af_frglen > af6->ip6af_off) {
408 i = (ip6af->ip6af_off + ip6af->ip6af_frglen) - af6->ip6af_off;
409 if (i < af6->ip6af_frglen) {
410 af6->ip6af_frglen -= i;
411 af6->ip6af_off += i;
412 m_adj(IP6_REASS_MBUF(af6), i);
413 break;
414 }
415 af6 = af6->ip6af_down;
416 m_freem(IP6_REASS_MBUF(af6->ip6af_up));
417 frag6_deq(af6->ip6af_up);
418 }
419 #else
420 /*
421 * If the incoming framgent overlaps some existing fragments in
422 * the reassembly queue, drop it, since it is dangerous to override
423 * existing fragments from a security point of view.
424 * We don't know which fragment is the bad guy - here we trust
425 * fragment that came in earlier, with no real reason.
426 */
427 if (af6->ip6af_up != (struct ip6asfrag *)q6) {
428 i = af6->ip6af_up->ip6af_off + af6->ip6af_up->ip6af_frglen
429 - ip6af->ip6af_off;
430 if (i > 0) {
431 #if 0 /* suppress the noisy log */
432 log(LOG_ERR, "%d bytes of a fragment from %s "
433 "overlaps the previous fragment\n",
434 i, ip6_sprintf(&q6->ip6q_src));
435 #endif
436 free(ip6af, M_FTABLE);
437 goto dropfrag;
438 }
439 }
440 if (af6 != (struct ip6asfrag *)q6) {
441 i = (ip6af->ip6af_off + ip6af->ip6af_frglen) - af6->ip6af_off;
442 if (i > 0) {
443 #if 0 /* suppress the noisy log */
444 log(LOG_ERR, "%d bytes of a fragment from %s "
445 "overlaps the succeeding fragment",
446 i, ip6_sprintf(&q6->ip6q_src));
447 #endif
448 free(ip6af, M_FTABLE);
449 goto dropfrag;
450 }
451 }
452 #endif
453
454 insert:
455
456 /*
457 * Stick new segment in its place;
458 * check for complete reassembly.
459 * Move to front of packet queue, as we are
460 * the most recently active fragmented packet.
461 */
462 frag6_enq(ip6af, af6->ip6af_up);
463 frag6_nfrags++;
464 q6->ip6q_nfrag++;
465 #if 0 /* xxx */
466 if (q6 != ip6q.ip6q_next) {
467 frag6_remque(q6);
468 frag6_insque(q6, &ip6q);
469 }
470 #endif
471 next = 0;
472 for (af6 = q6->ip6q_down; af6 != (struct ip6asfrag *)q6;
473 af6 = af6->ip6af_down) {
474 if (af6->ip6af_off != next) {
475 IP6Q_UNLOCK();
476 return IPPROTO_DONE;
477 }
478 next += af6->ip6af_frglen;
479 }
480 if (af6->ip6af_up->ip6af_mff) {
481 IP6Q_UNLOCK();
482 return IPPROTO_DONE;
483 }
484
485 /*
486 * Reassembly is complete; concatenate fragments.
487 */
488 ip6af = q6->ip6q_down;
489 t = m = IP6_REASS_MBUF(ip6af);
490 af6 = ip6af->ip6af_down;
491 frag6_deq(ip6af);
492 while (af6 != (struct ip6asfrag *)q6) {
493 af6dwn = af6->ip6af_down;
494 frag6_deq(af6);
495 while (t->m_next)
496 t = t->m_next;
497 t->m_next = IP6_REASS_MBUF(af6);
498 m_adj(t->m_next, af6->ip6af_offset);
499 free(af6, M_FTABLE);
500 af6 = af6dwn;
501 }
502
503 /* adjust offset to point where the original next header starts */
504 offset = ip6af->ip6af_offset - sizeof(struct ip6_frag);
505 free(ip6af, M_FTABLE);
506 ip6 = mtod(m, struct ip6_hdr *);
507 ip6->ip6_plen = htons(next + offset - sizeof(struct ip6_hdr));
508 ip6->ip6_src = q6->ip6q_src;
509 ip6->ip6_dst = q6->ip6q_dst;
510 nxt = q6->ip6q_nxt;
511 #ifdef notyet
512 *q6->ip6q_nxtp = (u_char)(nxt & 0xff);
513 #endif
514
515 /*
516 * Delete frag6 header with as a few cost as possible.
517 */
518 if (offset < m->m_len) {
519 memmove((char *)ip6 + sizeof(struct ip6_frag), ip6, offset);
520 m->m_data += sizeof(struct ip6_frag);
521 m->m_len -= sizeof(struct ip6_frag);
522 } else {
523 /* this comes with no copy if the boundary is on cluster */
524 if ((t = m_split(m, offset, M_DONTWAIT)) == NULL) {
525 frag6_remque(q6);
526 frag6_nfrags -= q6->ip6q_nfrag;
527 free(q6, M_FTABLE);
528 frag6_nfragpackets--;
529 goto dropfrag;
530 }
531 m_adj(t, sizeof(struct ip6_frag));
532 m_cat(m, t);
533 }
534
535 /*
536 * Store NXT to the original.
537 */
538 {
539 u_int8_t *prvnxtp = ip6_get_prevhdr(m, offset); /* XXX */
540 *prvnxtp = nxt;
541 }
542
543 frag6_remque(q6);
544 frag6_nfrags -= q6->ip6q_nfrag;
545 free(q6, M_FTABLE);
546 frag6_nfragpackets--;
547
548 if (m->m_flags & M_PKTHDR) { /* Isn't it always true? */
549 int plen = 0;
550 for (t = m; t; t = t->m_next)
551 plen += t->m_len;
552 m->m_pkthdr.len = plen;
553 }
554
555 ip6stat.ip6s_reassembled++;
556 in6_ifstat_inc(dstifp, ifs6_reass_ok);
557
558 /*
559 * Tell launch routine the next header
560 */
561
562 *mp = m;
563 *offp = offset;
564
565 IP6Q_UNLOCK();
566 return nxt;
567
568 dropfrag:
569 in6_ifstat_inc(dstifp, ifs6_reass_fail);
570 ip6stat.ip6s_fragdropped++;
571 m_freem(m);
572 IP6Q_UNLOCK();
573 return IPPROTO_DONE;
574 }
575
576 /*
577 * Free a fragment reassembly header and all
578 * associated datagrams.
579 */
580 void
581 frag6_freef(struct ip6q *q6)
582 {
583 struct ip6asfrag *af6, *down6;
584
585 IP6Q_LOCK_CHECK();
586
587 for (af6 = q6->ip6q_down; af6 != (struct ip6asfrag *)q6;
588 af6 = down6) {
589 struct mbuf *m = IP6_REASS_MBUF(af6);
590
591 down6 = af6->ip6af_down;
592 frag6_deq(af6);
593
594 /*
595 * Return ICMP time exceeded error for the 1st fragment.
596 * Just free other fragments.
597 */
598 if (af6->ip6af_off == 0) {
599 struct ip6_hdr *ip6;
600
601 /* adjust pointer */
602 ip6 = mtod(m, struct ip6_hdr *);
603
604 /* restoure source and destination addresses */
605 ip6->ip6_src = q6->ip6q_src;
606 ip6->ip6_dst = q6->ip6q_dst;
607
608 icmp6_error(m, ICMP6_TIME_EXCEEDED,
609 ICMP6_TIME_EXCEED_REASSEMBLY, 0);
610 } else
611 m_freem(m);
612 free(af6, M_FTABLE);
613 }
614 frag6_remque(q6);
615 frag6_nfrags -= q6->ip6q_nfrag;
616 free(q6, M_FTABLE);
617 frag6_nfragpackets--;
618 }
619
620 /*
621 * Put an ip fragment on a reassembly chain.
622 * Like insque, but pointers in middle of structure.
623 */
624 void
625 frag6_enq(struct ip6asfrag *af6, struct ip6asfrag *up6)
626 {
627
628 IP6Q_LOCK_CHECK();
629
630 af6->ip6af_up = up6;
631 af6->ip6af_down = up6->ip6af_down;
632 up6->ip6af_down->ip6af_up = af6;
633 up6->ip6af_down = af6;
634 }
635
636 /*
637 * To frag6_enq as remque is to insque.
638 */
639 void
640 frag6_deq(struct ip6asfrag *af6)
641 {
642
643 IP6Q_LOCK_CHECK();
644
645 af6->ip6af_up->ip6af_down = af6->ip6af_down;
646 af6->ip6af_down->ip6af_up = af6->ip6af_up;
647 }
648
649 void
650 frag6_insque(struct ip6q *new, struct ip6q *old)
651 {
652
653 IP6Q_LOCK_CHECK();
654
655 new->ip6q_prev = old;
656 new->ip6q_next = old->ip6q_next;
657 old->ip6q_next->ip6q_prev= new;
658 old->ip6q_next = new;
659 }
660
661 void
662 frag6_remque(struct ip6q *p6)
663 {
664
665 IP6Q_LOCK_CHECK();
666
667 p6->ip6q_prev->ip6q_next = p6->ip6q_next;
668 p6->ip6q_next->ip6q_prev = p6->ip6q_prev;
669 }
670
671 /*
672 * IPv6 reassembling timer processing;
673 * if a timer expires on a reassembly
674 * queue, discard it.
675 */
676 void
677 frag6_slowtimo()
678 {
679 struct ip6q *q6;
680 int s = splsoftnet();
681
682 IP6Q_LOCK();
683 q6 = ip6q.ip6q_next;
684 if (q6)
685 while (q6 != &ip6q) {
686 --q6->ip6q_ttl;
687 q6 = q6->ip6q_next;
688 if (q6->ip6q_prev->ip6q_ttl == 0) {
689 ip6stat.ip6s_fragtimeout++;
690 /* XXX in6_ifstat_inc(ifp, ifs6_reass_fail) */
691 frag6_freef(q6->ip6q_prev);
692 }
693 }
694 /*
695 * If we are over the maximum number of fragments
696 * (due to the limit being lowered), drain off
697 * enough to get down to the new limit.
698 */
699 while (frag6_nfragpackets > (u_int)ip6_maxfragpackets &&
700 ip6q.ip6q_prev) {
701 ip6stat.ip6s_fragoverflow++;
702 /* XXX in6_ifstat_inc(ifp, ifs6_reass_fail) */
703 frag6_freef(ip6q.ip6q_prev);
704 }
705 IP6Q_UNLOCK();
706
707 #if 0
708 /*
709 * Routing changes might produce a better route than we last used;
710 * make sure we notice eventually, even if forwarding only for one
711 * destination and the cache is never replaced.
712 */
713 rtcache_free(&ip6_forward_rt);
714 rtcache_free(&ipsrcchk_rt);
715 #endif
716
717 splx(s);
718 }
719
720 /*
721 * Drain off all datagram fragments.
722 */
723 void
724 frag6_drain()
725 {
726
727 if (ip6q_lock_try() == 0)
728 return;
729 while (ip6q.ip6q_next != &ip6q) {
730 ip6stat.ip6s_fragdropped++;
731 /* XXX in6_ifstat_inc(ifp, ifs6_reass_fail) */
732 frag6_freef(ip6q.ip6q_next);
733 }
734 IP6Q_UNLOCK();
735 }
736