frag6.c revision 1.70 1 /* $NetBSD: frag6.c,v 1.70 2018/04/13 11:19:09 maxv Exp $ */
2 /* $KAME: frag6.c,v 1.40 2002/05/27 21:40:31 itojun Exp $ */
3
4 /*
5 * Copyright (C) 1995, 1996, 1997, and 1998 WIDE Project.
6 * All rights reserved.
7 *
8 * Redistribution and use in source and binary forms, with or without
9 * modification, are permitted provided that the following conditions
10 * are met:
11 * 1. Redistributions of source code must retain the above copyright
12 * notice, this list of conditions and the following disclaimer.
13 * 2. Redistributions in binary form must reproduce the above copyright
14 * notice, this list of conditions and the following disclaimer in the
15 * documentation and/or other materials provided with the distribution.
16 * 3. Neither the name of the project nor the names of its contributors
17 * may be used to endorse or promote products derived from this software
18 * without specific prior written permission.
19 *
20 * THIS SOFTWARE IS PROVIDED BY THE PROJECT AND CONTRIBUTORS ``AS IS'' AND
21 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
22 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
23 * ARE DISCLAIMED. IN NO EVENT SHALL THE PROJECT OR CONTRIBUTORS BE LIABLE
24 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
25 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
26 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
27 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
28 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
29 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
30 * SUCH DAMAGE.
31 */
32
33 #include <sys/cdefs.h>
34 __KERNEL_RCSID(0, "$NetBSD: frag6.c,v 1.70 2018/04/13 11:19:09 maxv Exp $");
35
36 #ifdef _KERNEL_OPT
37 #include "opt_net_mpsafe.h"
38 #endif
39
40 #include <sys/param.h>
41 #include <sys/systm.h>
42 #include <sys/mbuf.h>
43 #include <sys/errno.h>
44 #include <sys/time.h>
45 #include <sys/kmem.h>
46 #include <sys/kernel.h>
47 #include <sys/syslog.h>
48
49 #include <net/if.h>
50 #include <net/route.h>
51
52 #include <netinet/in.h>
53 #include <netinet/in_var.h>
54 #include <netinet/ip6.h>
55 #include <netinet6/ip6_var.h>
56 #include <netinet6/ip6_private.h>
57 #include <netinet/icmp6.h>
58
59 #include <net/net_osdep.h>
60
61 /*
62 * IP6 reassembly queue structure. Each fragment
63 * being reassembled is attached to one of these structures.
64 */
65 struct ip6q {
66 u_int32_t ip6q_head;
67 u_int16_t ip6q_len;
68 u_int8_t ip6q_nxt; /* ip6f_nxt in first fragment */
69 u_int8_t ip6q_hlim;
70 struct ip6asfrag *ip6q_down;
71 struct ip6asfrag *ip6q_up;
72 u_int32_t ip6q_ident;
73 u_int8_t ip6q_ttl;
74 struct in6_addr ip6q_src, ip6q_dst;
75 struct ip6q *ip6q_next;
76 struct ip6q *ip6q_prev;
77 int ip6q_unfrglen; /* len of unfragmentable part */
78 int ip6q_nfrag; /* # of fragments */
79 };
80
81 struct ip6asfrag {
82 u_int32_t ip6af_head;
83 u_int16_t ip6af_len;
84 u_int8_t ip6af_nxt;
85 u_int8_t ip6af_hlim;
86 /* must not override the above members during reassembling */
87 struct ip6asfrag *ip6af_down;
88 struct ip6asfrag *ip6af_up;
89 struct mbuf *ip6af_m;
90 int ip6af_offset; /* offset in ip6af_m to next header */
91 int ip6af_frglen; /* fragmentable part length */
92 int ip6af_off; /* fragment offset */
93 bool ip6af_mff; /* more fragment bit in frag off */
94 };
95
96
97 static void frag6_enq(struct ip6asfrag *, struct ip6asfrag *);
98 static void frag6_deq(struct ip6asfrag *);
99 static void frag6_insque(struct ip6q *, struct ip6q *);
100 static void frag6_remque(struct ip6q *);
101 static void frag6_freef(struct ip6q *);
102
103 static int frag6_drainwanted;
104
105 u_int frag6_nfragpackets;
106 u_int frag6_nfrags;
107 struct ip6q ip6q; /* ip6 reassembly queue */
108
109 /* Protects ip6q */
110 static kmutex_t frag6_lock __cacheline_aligned;
111
112 /*
113 * Initialise reassembly queue and fragment identifier.
114 */
115 void
116 frag6_init(void)
117 {
118
119 ip6q.ip6q_next = ip6q.ip6q_prev = &ip6q;
120 mutex_init(&frag6_lock, MUTEX_DEFAULT, IPL_NET);
121 }
122
123 /*
124 * IPv6 fragment input.
125 *
126 * In RFC2460, fragment and reassembly rule do not agree with each other,
127 * in terms of next header field handling in fragment header.
128 * While the sender will use the same value for all of the fragmented packets,
129 * receiver is suggested not to check the consistency.
130 *
131 * fragment rule (p20):
132 * (2) A Fragment header containing:
133 * The Next Header value that identifies the first header of
134 * the Fragmentable Part of the original packet.
135 * -> next header field is same for all fragments
136 *
137 * reassembly rule (p21):
138 * The Next Header field of the last header of the Unfragmentable
139 * Part is obtained from the Next Header field of the first
140 * fragment's Fragment header.
141 * -> should grab it from the first fragment only
142 *
143 * The following note also contradicts with fragment rule - noone is going to
144 * send different fragment with different next header field.
145 *
146 * additional note (p22):
147 * The Next Header values in the Fragment headers of different
148 * fragments of the same original packet may differ. Only the value
149 * from the Offset zero fragment packet is used for reassembly.
150 * -> should grab it from the first fragment only
151 *
152 * There is no explicit reason given in the RFC. Historical reason maybe?
153 *
154 * XXX: It would be better to use a pool, rather than kmem.
155 */
156 int
157 frag6_input(struct mbuf **mp, int *offp, int proto)
158 {
159 struct rtentry *rt;
160 struct mbuf *m = *mp, *t;
161 struct ip6_hdr *ip6;
162 struct ip6_frag *ip6f;
163 struct ip6q *q6;
164 struct ip6asfrag *af6, *ip6af, *af6dwn;
165 int offset = *offp, nxt, i, next;
166 int first_frag = 0;
167 int fragoff, frgpartlen; /* must be larger than u_int16_t */
168 struct ifnet *dstifp;
169 static struct route ro;
170 union {
171 struct sockaddr dst;
172 struct sockaddr_in6 dst6;
173 } u;
174
175 ip6 = mtod(m, struct ip6_hdr *);
176 IP6_EXTHDR_GET(ip6f, struct ip6_frag *, m, offset, sizeof(*ip6f));
177 if (ip6f == NULL)
178 return IPPROTO_DONE;
179
180 dstifp = NULL;
181 /* find the destination interface of the packet. */
182 sockaddr_in6_init(&u.dst6, &ip6->ip6_dst, 0, 0, 0);
183 if ((rt = rtcache_lookup(&ro, &u.dst)) != NULL && rt->rt_ifa != NULL)
184 dstifp = ((struct in6_ifaddr *)rt->rt_ifa)->ia_ifp;
185
186 /* jumbo payload can't contain a fragment header */
187 if (ip6->ip6_plen == 0) {
188 icmp6_error(m, ICMP6_PARAM_PROB, ICMP6_PARAMPROB_HEADER, offset);
189 in6_ifstat_inc(dstifp, ifs6_reass_fail);
190 goto done;
191 }
192
193 /*
194 * Check whether fragment packet's fragment length is non-zero and
195 * multiple of 8 octets.
196 * sizeof(struct ip6_frag) == 8
197 * sizeof(struct ip6_hdr) = 40
198 */
199 if ((ip6f->ip6f_offlg & IP6F_MORE_FRAG) &&
200 (((ntohs(ip6->ip6_plen) - offset) == 0) ||
201 ((ntohs(ip6->ip6_plen) - offset) & 0x7) != 0)) {
202 icmp6_error(m, ICMP6_PARAM_PROB, ICMP6_PARAMPROB_HEADER,
203 offsetof(struct ip6_hdr, ip6_plen));
204 in6_ifstat_inc(dstifp, ifs6_reass_fail);
205 goto done;
206 }
207
208 IP6_STATINC(IP6_STAT_FRAGMENTS);
209 in6_ifstat_inc(dstifp, ifs6_reass_reqd);
210
211 /* offset now points to data portion */
212 offset += sizeof(struct ip6_frag);
213
214 /*
215 * RFC6946: A host that receives an IPv6 packet which includes
216 * a Fragment Header with the "Fragment Offset" equal to 0 and
217 * the "M" bit equal to 0 MUST process such packet in isolation
218 * from any other packets/fragments.
219 */
220 fragoff = ntohs(ip6f->ip6f_offlg & IP6F_OFF_MASK);
221 if (fragoff == 0 && !(ip6f->ip6f_offlg & IP6F_MORE_FRAG)) {
222 IP6_STATINC(IP6_STAT_REASSEMBLED);
223 in6_ifstat_inc(dstifp, ifs6_reass_ok);
224 *offp = offset;
225 rtcache_unref(rt, &ro);
226 return ip6f->ip6f_nxt;
227 }
228
229 mutex_enter(&frag6_lock);
230
231 /*
232 * Enforce upper bound on number of fragments.
233 * If maxfrag is 0, never accept fragments.
234 * If maxfrag is -1, accept all fragments without limitation.
235 */
236 if (ip6_maxfrags < 0)
237 ;
238 else if (frag6_nfrags >= (u_int)ip6_maxfrags)
239 goto dropfrag;
240
241 for (q6 = ip6q.ip6q_next; q6 != &ip6q; q6 = q6->ip6q_next)
242 if (ip6f->ip6f_ident == q6->ip6q_ident &&
243 IN6_ARE_ADDR_EQUAL(&ip6->ip6_src, &q6->ip6q_src) &&
244 IN6_ARE_ADDR_EQUAL(&ip6->ip6_dst, &q6->ip6q_dst))
245 break;
246
247 if (q6 == &ip6q) {
248 /*
249 * the first fragment to arrive, create a reassembly queue.
250 */
251 first_frag = 1;
252
253 /*
254 * Enforce upper bound on number of fragmented packets
255 * for which we attempt reassembly;
256 * If maxfragpackets is 0, never accept fragments.
257 * If maxfragpackets is -1, accept all fragments without
258 * limitation.
259 */
260 if (ip6_maxfragpackets < 0)
261 ;
262 else if (frag6_nfragpackets >= (u_int)ip6_maxfragpackets)
263 goto dropfrag;
264 frag6_nfragpackets++;
265
266 q6 = kmem_intr_zalloc(sizeof(struct ip6q), KM_NOSLEEP);
267 if (q6 == NULL) {
268 goto dropfrag;
269 }
270 frag6_insque(q6, &ip6q);
271
272 /* ip6q_nxt will be filled afterwards, from 1st fragment */
273 q6->ip6q_down = q6->ip6q_up = (struct ip6asfrag *)q6;
274 q6->ip6q_ident = ip6f->ip6f_ident;
275 q6->ip6q_ttl = IPV6_FRAGTTL;
276 q6->ip6q_src = ip6->ip6_src;
277 q6->ip6q_dst = ip6->ip6_dst;
278 q6->ip6q_unfrglen = -1; /* The 1st fragment has not arrived. */
279
280 q6->ip6q_nfrag = 0;
281 }
282
283 /*
284 * If it's the 1st fragment, record the length of the
285 * unfragmentable part and the next header of the fragment header.
286 */
287 if (fragoff == 0) {
288 q6->ip6q_unfrglen = offset - sizeof(struct ip6_hdr) -
289 sizeof(struct ip6_frag);
290 q6->ip6q_nxt = ip6f->ip6f_nxt;
291 }
292
293 /*
294 * Check that the reassembled packet would not exceed 65535 bytes
295 * in size. If it would exceed, discard the fragment and return an
296 * ICMP error.
297 */
298 frgpartlen = sizeof(struct ip6_hdr) + ntohs(ip6->ip6_plen) - offset;
299 if (q6->ip6q_unfrglen >= 0) {
300 /* The 1st fragment has already arrived. */
301 if (q6->ip6q_unfrglen + fragoff + frgpartlen > IPV6_MAXPACKET) {
302 mutex_exit(&frag6_lock);
303 icmp6_error(m, ICMP6_PARAM_PROB, ICMP6_PARAMPROB_HEADER,
304 offset - sizeof(struct ip6_frag) +
305 offsetof(struct ip6_frag, ip6f_offlg));
306 goto done;
307 }
308 } else if (fragoff + frgpartlen > IPV6_MAXPACKET) {
309 mutex_exit(&frag6_lock);
310 icmp6_error(m, ICMP6_PARAM_PROB, ICMP6_PARAMPROB_HEADER,
311 offset - sizeof(struct ip6_frag) +
312 offsetof(struct ip6_frag, ip6f_offlg));
313 goto done;
314 }
315
316 /*
317 * If it's the first fragment, do the above check for each
318 * fragment already stored in the reassembly queue.
319 */
320 if (fragoff == 0) {
321 for (af6 = q6->ip6q_down; af6 != (struct ip6asfrag *)q6;
322 af6 = af6dwn) {
323 af6dwn = af6->ip6af_down;
324
325 if (q6->ip6q_unfrglen + af6->ip6af_off + af6->ip6af_frglen >
326 IPV6_MAXPACKET) {
327 struct mbuf *merr = af6->ip6af_m;
328 struct ip6_hdr *ip6err;
329 int erroff = af6->ip6af_offset;
330
331 /* dequeue the fragment. */
332 frag6_deq(af6);
333 kmem_intr_free(af6, sizeof(struct ip6asfrag));
334
335 /* adjust pointer. */
336 ip6err = mtod(merr, struct ip6_hdr *);
337
338 /*
339 * Restore source and destination addresses
340 * in the erroneous IPv6 header.
341 */
342 ip6err->ip6_src = q6->ip6q_src;
343 ip6err->ip6_dst = q6->ip6q_dst;
344
345 icmp6_error(merr, ICMP6_PARAM_PROB,
346 ICMP6_PARAMPROB_HEADER,
347 erroff - sizeof(struct ip6_frag) +
348 offsetof(struct ip6_frag, ip6f_offlg));
349 }
350 }
351 }
352
353 ip6af = kmem_intr_zalloc(sizeof(struct ip6asfrag), KM_NOSLEEP);
354 if (ip6af == NULL) {
355 goto dropfrag;
356 }
357 ip6af->ip6af_head = ip6->ip6_flow;
358 ip6af->ip6af_len = ip6->ip6_plen;
359 ip6af->ip6af_nxt = ip6->ip6_nxt;
360 ip6af->ip6af_hlim = ip6->ip6_hlim;
361 ip6af->ip6af_mff = (ip6f->ip6f_offlg & IP6F_MORE_FRAG) != 0;
362 ip6af->ip6af_off = fragoff;
363 ip6af->ip6af_frglen = frgpartlen;
364 ip6af->ip6af_offset = offset;
365 ip6af->ip6af_m = m;
366
367 if (first_frag) {
368 af6 = (struct ip6asfrag *)q6;
369 goto insert;
370 }
371
372 /*
373 * Find a segment which begins after this one does.
374 */
375 for (af6 = q6->ip6q_down; af6 != (struct ip6asfrag *)q6;
376 af6 = af6->ip6af_down)
377 if (af6->ip6af_off > ip6af->ip6af_off)
378 break;
379
380 /*
381 * If the incoming fragment overlaps some existing fragments in
382 * the reassembly queue - drop it as per RFC 5722.
383 */
384 if (af6->ip6af_up != (struct ip6asfrag *)q6) {
385 i = af6->ip6af_up->ip6af_off + af6->ip6af_up->ip6af_frglen
386 - ip6af->ip6af_off;
387 if (i > 0) {
388 kmem_intr_free(ip6af, sizeof(struct ip6asfrag));
389 goto dropfrag;
390 }
391 }
392 if (af6 != (struct ip6asfrag *)q6) {
393 i = (ip6af->ip6af_off + ip6af->ip6af_frglen) - af6->ip6af_off;
394 if (i > 0) {
395 kmem_intr_free(ip6af, sizeof(struct ip6asfrag));
396 goto dropfrag;
397 }
398 }
399
400 insert:
401 /*
402 * Stick new segment in its place.
403 */
404 frag6_enq(ip6af, af6->ip6af_up);
405 frag6_nfrags++;
406 q6->ip6q_nfrag++;
407
408 /*
409 * Check for complete reassembly.
410 */
411 next = 0;
412 for (af6 = q6->ip6q_down; af6 != (struct ip6asfrag *)q6;
413 af6 = af6->ip6af_down) {
414 if (af6->ip6af_off != next) {
415 mutex_exit(&frag6_lock);
416 goto done;
417 }
418 next += af6->ip6af_frglen;
419 }
420 if (af6->ip6af_up->ip6af_mff) {
421 mutex_exit(&frag6_lock);
422 goto done;
423 }
424
425 /*
426 * Reassembly is complete; concatenate fragments.
427 */
428 ip6af = q6->ip6q_down;
429 t = m = ip6af->ip6af_m;
430 af6 = ip6af->ip6af_down;
431 frag6_deq(ip6af);
432 while (af6 != (struct ip6asfrag *)q6) {
433 af6dwn = af6->ip6af_down;
434 frag6_deq(af6);
435 while (t->m_next)
436 t = t->m_next;
437 t->m_next = af6->ip6af_m;
438 m_adj(t->m_next, af6->ip6af_offset);
439 m_pkthdr_remove(t->m_next);
440 kmem_intr_free(af6, sizeof(struct ip6asfrag));
441 af6 = af6dwn;
442 }
443
444 /* adjust offset to point where the original next header starts */
445 offset = ip6af->ip6af_offset - sizeof(struct ip6_frag);
446 kmem_intr_free(ip6af, sizeof(struct ip6asfrag));
447 ip6 = mtod(m, struct ip6_hdr *);
448 ip6->ip6_plen = htons(next + offset - sizeof(struct ip6_hdr));
449 ip6->ip6_src = q6->ip6q_src;
450 ip6->ip6_dst = q6->ip6q_dst;
451 nxt = q6->ip6q_nxt;
452
453 /*
454 * Delete frag6 header.
455 */
456 if (m->m_len >= offset + sizeof(struct ip6_frag)) {
457 memmove((char *)ip6 + sizeof(struct ip6_frag), ip6, offset);
458 m->m_data += sizeof(struct ip6_frag);
459 m->m_len -= sizeof(struct ip6_frag);
460 } else {
461 /* this comes with no copy if the boundary is on cluster */
462 if ((t = m_split(m, offset, M_DONTWAIT)) == NULL) {
463 frag6_remque(q6);
464 frag6_nfrags -= q6->ip6q_nfrag;
465 kmem_intr_free(q6, sizeof(struct ip6q));
466 frag6_nfragpackets--;
467 goto dropfrag;
468 }
469 m_adj(t, sizeof(struct ip6_frag));
470 m_cat(m, t);
471 }
472
473 frag6_remque(q6);
474 frag6_nfrags -= q6->ip6q_nfrag;
475 kmem_intr_free(q6, sizeof(struct ip6q));
476 frag6_nfragpackets--;
477
478 {
479 KASSERT(m->m_flags & M_PKTHDR);
480 int plen = 0;
481 for (t = m; t; t = t->m_next) {
482 plen += t->m_len;
483 }
484 m->m_pkthdr.len = plen;
485 /* XXX XXX: clear csum_flags? */
486 }
487
488 /*
489 * Restore NXT to the original.
490 */
491 {
492 const int prvnxt = ip6_get_prevhdr(m, offset);
493 uint8_t *prvnxtp;
494
495 IP6_EXTHDR_GET(prvnxtp, uint8_t *, m, prvnxt,
496 sizeof(*prvnxtp));
497 if (prvnxtp == NULL) {
498 goto dropfrag;
499 }
500 *prvnxtp = nxt;
501 }
502
503 IP6_STATINC(IP6_STAT_REASSEMBLED);
504 in6_ifstat_inc(dstifp, ifs6_reass_ok);
505 rtcache_unref(rt, &ro);
506 mutex_exit(&frag6_lock);
507
508 /*
509 * Tell launch routine the next header.
510 */
511 *mp = m;
512 *offp = offset;
513 return nxt;
514
515 dropfrag:
516 mutex_exit(&frag6_lock);
517 in6_ifstat_inc(dstifp, ifs6_reass_fail);
518 IP6_STATINC(IP6_STAT_FRAGDROPPED);
519 m_freem(m);
520 done:
521 rtcache_unref(rt, &ro);
522 return IPPROTO_DONE;
523 }
524
525 int
526 ip6_reass_packet(struct mbuf **mp, int offset)
527 {
528
529 if (frag6_input(mp, &offset, IPPROTO_IPV6) == IPPROTO_DONE) {
530 *mp = NULL;
531 return EINVAL;
532 }
533 return 0;
534 }
535
536 /*
537 * Free a fragment reassembly header and all
538 * associated datagrams.
539 */
540 static void
541 frag6_freef(struct ip6q *q6)
542 {
543 struct ip6asfrag *af6, *down6;
544
545 KASSERT(mutex_owned(&frag6_lock));
546
547 for (af6 = q6->ip6q_down; af6 != (struct ip6asfrag *)q6;
548 af6 = down6) {
549 struct mbuf *m = af6->ip6af_m;
550
551 down6 = af6->ip6af_down;
552 frag6_deq(af6);
553
554 /*
555 * Return ICMP time exceeded error for the 1st fragment.
556 * Just free other fragments.
557 */
558 if (af6->ip6af_off == 0) {
559 struct ip6_hdr *ip6;
560
561 /* adjust pointer */
562 ip6 = mtod(m, struct ip6_hdr *);
563
564 /* restore source and destination addresses */
565 ip6->ip6_src = q6->ip6q_src;
566 ip6->ip6_dst = q6->ip6q_dst;
567
568 icmp6_error(m, ICMP6_TIME_EXCEEDED,
569 ICMP6_TIME_EXCEED_REASSEMBLY, 0);
570 } else {
571 m_freem(m);
572 }
573 kmem_intr_free(af6, sizeof(struct ip6asfrag));
574 }
575
576 frag6_remque(q6);
577 frag6_nfrags -= q6->ip6q_nfrag;
578 kmem_intr_free(q6, sizeof(struct ip6q));
579 frag6_nfragpackets--;
580 }
581
582 /*
583 * Put an ip fragment on a reassembly chain.
584 * Like insque, but pointers in middle of structure.
585 */
586 void
587 frag6_enq(struct ip6asfrag *af6, struct ip6asfrag *up6)
588 {
589
590 KASSERT(mutex_owned(&frag6_lock));
591
592 af6->ip6af_up = up6;
593 af6->ip6af_down = up6->ip6af_down;
594 up6->ip6af_down->ip6af_up = af6;
595 up6->ip6af_down = af6;
596 }
597
598 /*
599 * To frag6_enq as remque is to insque.
600 */
601 void
602 frag6_deq(struct ip6asfrag *af6)
603 {
604
605 KASSERT(mutex_owned(&frag6_lock));
606
607 af6->ip6af_up->ip6af_down = af6->ip6af_down;
608 af6->ip6af_down->ip6af_up = af6->ip6af_up;
609 }
610
611 /*
612 * Insert newq after oldq.
613 */
614 void
615 frag6_insque(struct ip6q *newq, struct ip6q *oldq)
616 {
617
618 KASSERT(mutex_owned(&frag6_lock));
619
620 newq->ip6q_prev = oldq;
621 newq->ip6q_next = oldq->ip6q_next;
622 oldq->ip6q_next->ip6q_prev = newq;
623 oldq->ip6q_next = newq;
624 }
625
626 /*
627 * Unlink p6.
628 */
629 void
630 frag6_remque(struct ip6q *p6)
631 {
632
633 KASSERT(mutex_owned(&frag6_lock));
634
635 p6->ip6q_prev->ip6q_next = p6->ip6q_next;
636 p6->ip6q_next->ip6q_prev = p6->ip6q_prev;
637 }
638
639 void
640 frag6_fasttimo(void)
641 {
642
643 SOFTNET_KERNEL_LOCK_UNLESS_NET_MPSAFE();
644
645 if (frag6_drainwanted) {
646 frag6_drain();
647 frag6_drainwanted = 0;
648 }
649
650 SOFTNET_KERNEL_UNLOCK_UNLESS_NET_MPSAFE();
651 }
652
653 /*
654 * IPv6 reassembling timer processing;
655 * if a timer expires on a reassembly
656 * queue, discard it.
657 */
658 void
659 frag6_slowtimo(void)
660 {
661 struct ip6q *q6;
662
663 SOFTNET_KERNEL_LOCK_UNLESS_NET_MPSAFE();
664
665 mutex_enter(&frag6_lock);
666 q6 = ip6q.ip6q_next;
667 if (q6)
668 while (q6 != &ip6q) {
669 --q6->ip6q_ttl;
670 q6 = q6->ip6q_next;
671 if (q6->ip6q_prev->ip6q_ttl == 0) {
672 IP6_STATINC(IP6_STAT_FRAGTIMEOUT);
673 /* XXX in6_ifstat_inc(ifp, ifs6_reass_fail) */
674 frag6_freef(q6->ip6q_prev);
675 }
676 }
677 /*
678 * If we are over the maximum number of fragments
679 * (due to the limit being lowered), drain off
680 * enough to get down to the new limit.
681 */
682 while (frag6_nfragpackets > (u_int)ip6_maxfragpackets &&
683 ip6q.ip6q_prev) {
684 IP6_STATINC(IP6_STAT_FRAGOVERFLOW);
685 /* XXX in6_ifstat_inc(ifp, ifs6_reass_fail) */
686 frag6_freef(ip6q.ip6q_prev);
687 }
688 mutex_exit(&frag6_lock);
689
690 SOFTNET_KERNEL_UNLOCK_UNLESS_NET_MPSAFE();
691
692 #if 0
693 /*
694 * Routing changes might produce a better route than we last used;
695 * make sure we notice eventually, even if forwarding only for one
696 * destination and the cache is never replaced.
697 */
698 rtcache_free(&ip6_forward_rt);
699 rtcache_free(&ipsrcchk_rt);
700 #endif
701
702 }
703
704 void
705 frag6_drainstub(void)
706 {
707 frag6_drainwanted = 1;
708 }
709
710 /*
711 * Drain off all datagram fragments.
712 */
713 void
714 frag6_drain(void)
715 {
716
717 if (mutex_tryenter(&frag6_lock)) {
718 while (ip6q.ip6q_next != &ip6q) {
719 IP6_STATINC(IP6_STAT_FRAGDROPPED);
720 /* XXX in6_ifstat_inc(ifp, ifs6_reass_fail) */
721 frag6_freef(ip6q.ip6q_next);
722 }
723 mutex_exit(&frag6_lock);
724 }
725 }
726