ip_reass.c revision 1.1 1 1.1 rmind /* $NetBSD: ip_reass.c,v 1.1 2010/07/13 22:16:10 rmind Exp $ */
2 1.1 rmind
3 1.1 rmind /*
4 1.1 rmind * Copyright (c) 1982, 1986, 1988, 1993
5 1.1 rmind * The Regents of the University of California. All rights reserved.
6 1.1 rmind *
7 1.1 rmind * Redistribution and use in source and binary forms, with or without
8 1.1 rmind * modification, are permitted provided that the following conditions
9 1.1 rmind * are met:
10 1.1 rmind * 1. Redistributions of source code must retain the above copyright
11 1.1 rmind * notice, this list of conditions and the following disclaimer.
12 1.1 rmind * 2. Redistributions in binary form must reproduce the above copyright
13 1.1 rmind * notice, this list of conditions and the following disclaimer in the
14 1.1 rmind * documentation and/or other materials provided with the distribution.
15 1.1 rmind * 3. Neither the name of the University nor the names of its contributors
16 1.1 rmind * may be used to endorse or promote products derived from this software
17 1.1 rmind * without specific prior written permission.
18 1.1 rmind *
19 1.1 rmind * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND
20 1.1 rmind * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
21 1.1 rmind * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
22 1.1 rmind * ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE
23 1.1 rmind * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
24 1.1 rmind * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
25 1.1 rmind * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
26 1.1 rmind * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
27 1.1 rmind * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
28 1.1 rmind * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
29 1.1 rmind * SUCH DAMAGE.
30 1.1 rmind *
31 1.1 rmind * @(#)ip_input.c 8.2 (Berkeley) 1/4/94
32 1.1 rmind */
33 1.1 rmind
34 1.1 rmind /*
35 1.1 rmind * IP reassembly.
36 1.1 rmind *
37 1.1 rmind * Additive-Increase/Multiplicative-Decrease (AIMD) strategy for IP
38 1.1 rmind * reassembly queue buffer managment.
39 1.1 rmind *
40 1.1 rmind * We keep a count of total IP fragments (NB: not fragmented packets),
41 1.1 rmind * awaiting reassembly (ip_nfrags) and a limit (ip_maxfrags) on fragments.
42 1.1 rmind * If ip_nfrags exceeds ip_maxfrags the limit, we drop half the total
43 1.1 rmind * fragments in reassembly queues. This AIMD policy avoids repeatedly
44 1.1 rmind * deleting single packets under heavy fragmentation load (e.g., from lossy
45 1.1 rmind * NFS peers).
46 1.1 rmind */
47 1.1 rmind
48 1.1 rmind #include <sys/cdefs.h>
49 1.1 rmind __KERNEL_RCSID(0, "$NetBSD: ip_reass.c,v 1.1 2010/07/13 22:16:10 rmind Exp $");
50 1.1 rmind
51 1.1 rmind #include <sys/param.h>
52 1.1 rmind #include <sys/systm.h>
53 1.1 rmind
54 1.1 rmind #include <sys/malloc.h>
55 1.1 rmind #include <sys/mbuf.h>
56 1.1 rmind #include <sys/domain.h>
57 1.1 rmind #include <sys/protosw.h>
58 1.1 rmind #include <sys/pool.h>
59 1.1 rmind #include <sys/sysctl.h>
60 1.1 rmind
61 1.1 rmind #include <net/if.h>
62 1.1 rmind #include <net/route.h>
63 1.1 rmind
64 1.1 rmind #include <netinet/in.h>
65 1.1 rmind #include <netinet/in_systm.h>
66 1.1 rmind #include <netinet/ip.h>
67 1.1 rmind #include <netinet/in_pcb.h>
68 1.1 rmind #include <netinet/in_proto.h>
69 1.1 rmind #include <netinet/ip_private.h>
70 1.1 rmind #include <netinet/in_var.h>
71 1.1 rmind #include <netinet/ip_var.h>
72 1.1 rmind
73 1.1 rmind /*
74 1.1 rmind * IP datagram reassembly hashed queues, pool, lock and counters.
75 1.1 rmind */
76 1.1 rmind #define IPREASS_HASH_SHIFT 6
77 1.1 rmind #define IPREASS_HASH_SIZE (1 << IPREASS_HASH_SHIFT)
78 1.1 rmind #define IPREASS_HASH_MASK (IPREASS_HASH_SIZE - 1)
79 1.1 rmind #define IPREASS_HASH(x, y) \
80 1.1 rmind (((((x) & 0xf) | ((((x) >> 8) & 0xf) << 4)) ^ (y)) & IPREASS_HASH_MASK)
81 1.1 rmind
82 1.1 rmind struct ipqhead ipq[IPREASS_HASH_SIZE];
83 1.1 rmind struct pool ipqent_pool;
84 1.1 rmind static int ipq_locked;
85 1.1 rmind
86 1.1 rmind static int ip_nfragpackets; /* packets in reass queue */
87 1.1 rmind static int ip_nfrags; /* total fragments in reass queues */
88 1.1 rmind
89 1.1 rmind static int ip_maxfragpackets; /* limit on packets. XXX sysctl */
90 1.1 rmind static int ip_maxfrags; /* limit on fragments. XXX sysctl */
91 1.1 rmind
92 1.1 rmind /*
93 1.1 rmind * Cached copy of nmbclusters. If nbclusters is different,
94 1.1 rmind * recalculate IP parameters derived from nmbclusters.
95 1.1 rmind */
96 1.1 rmind static int ip_nmbclusters; /* copy of nmbclusters */
97 1.1 rmind
98 1.1 rmind /*
99 1.1 rmind * IP reassembly TTL machinery for multiplicative drop.
100 1.1 rmind */
101 1.1 rmind static u_int fragttl_histo[IPFRAGTTL + 1];
102 1.1 rmind
103 1.1 rmind void sysctl_ip_reass_setup(void);
104 1.1 rmind static void ip_nmbclusters_changed(void);
105 1.1 rmind static u_int ip_reass_ttl_decr(u_int ticks);
106 1.1 rmind static void ip_reass_drophalf(void);
107 1.1 rmind
108 1.1 rmind /*
109 1.1 rmind * ip_reass_init:
110 1.1 rmind *
111 1.1 rmind * Initialization of IP reassembly mechanism.
112 1.1 rmind */
113 1.1 rmind void
114 1.1 rmind ip_reass_init(void)
115 1.1 rmind {
116 1.1 rmind int i;
117 1.1 rmind
118 1.1 rmind pool_init(&ipqent_pool, sizeof(struct ipqent), 0, 0, 0, "ipqepl",
119 1.1 rmind NULL, IPL_VM);
120 1.1 rmind
121 1.1 rmind for (i = 0; i < IPREASS_HASH_SIZE; i++) {
122 1.1 rmind LIST_INIT(&ipq[i]);
123 1.1 rmind }
124 1.1 rmind ip_maxfragpackets = 200;
125 1.1 rmind ip_maxfrags = 0;
126 1.1 rmind ip_nmbclusters_changed();
127 1.1 rmind
128 1.1 rmind sysctl_ip_reass_setup();
129 1.1 rmind }
130 1.1 rmind
131 1.1 rmind static struct sysctllog *ip_reass_sysctllog;
132 1.1 rmind
133 1.1 rmind void
134 1.1 rmind sysctl_ip_reass_setup(void)
135 1.1 rmind {
136 1.1 rmind
137 1.1 rmind sysctl_createv(&ip_reass_sysctllog, 0, NULL, NULL,
138 1.1 rmind CTLFLAG_PERMANENT,
139 1.1 rmind CTLTYPE_NODE, "net", NULL,
140 1.1 rmind NULL, 0, NULL, 0,
141 1.1 rmind CTL_NET, CTL_EOL);
142 1.1 rmind sysctl_createv(&ip_reass_sysctllog, 0, NULL, NULL,
143 1.1 rmind CTLFLAG_PERMANENT,
144 1.1 rmind CTLTYPE_NODE, "inet",
145 1.1 rmind SYSCTL_DESCR("PF_INET related settings"),
146 1.1 rmind NULL, 0, NULL, 0,
147 1.1 rmind CTL_NET, PF_INET, CTL_EOL);
148 1.1 rmind sysctl_createv(&ip_reass_sysctllog, 0, NULL, NULL,
149 1.1 rmind CTLFLAG_PERMANENT,
150 1.1 rmind CTLTYPE_NODE, "ip",
151 1.1 rmind SYSCTL_DESCR("IPv4 related settings"),
152 1.1 rmind NULL, 0, NULL, 0,
153 1.1 rmind CTL_NET, PF_INET, IPPROTO_IP, CTL_EOL);
154 1.1 rmind
155 1.1 rmind sysctl_createv(&ip_reass_sysctllog, 0, NULL, NULL,
156 1.1 rmind CTLFLAG_PERMANENT|CTLFLAG_READWRITE,
157 1.1 rmind CTLTYPE_INT, "maxfragpackets",
158 1.1 rmind SYSCTL_DESCR("Maximum number of fragments to retain for "
159 1.1 rmind "possible reassembly"),
160 1.1 rmind NULL, 0, &ip_maxfragpackets, 0,
161 1.1 rmind CTL_NET, PF_INET, IPPROTO_IP, IPCTL_MAXFRAGPACKETS, CTL_EOL);
162 1.1 rmind }
163 1.1 rmind
164 1.1 rmind #define CHECK_NMBCLUSTER_PARAMS() \
165 1.1 rmind do { \
166 1.1 rmind if (__predict_false(ip_nmbclusters != nmbclusters)) \
167 1.1 rmind ip_nmbclusters_changed(); \
168 1.1 rmind } while (/*CONSTCOND*/0)
169 1.1 rmind
170 1.1 rmind /*
171 1.1 rmind * Compute IP limits derived from the value of nmbclusters.
172 1.1 rmind */
173 1.1 rmind static void
174 1.1 rmind ip_nmbclusters_changed(void)
175 1.1 rmind {
176 1.1 rmind ip_maxfrags = nmbclusters / 4;
177 1.1 rmind ip_nmbclusters = nmbclusters;
178 1.1 rmind }
179 1.1 rmind
180 1.1 rmind static inline int ipq_lock_try(void);
181 1.1 rmind static inline void ipq_unlock(void);
182 1.1 rmind
183 1.1 rmind static inline int
184 1.1 rmind ipq_lock_try(void)
185 1.1 rmind {
186 1.1 rmind int s;
187 1.1 rmind
188 1.1 rmind /*
189 1.1 rmind * Use splvm() -- we're blocking things that would cause
190 1.1 rmind * mbuf allocation.
191 1.1 rmind */
192 1.1 rmind s = splvm();
193 1.1 rmind if (ipq_locked) {
194 1.1 rmind splx(s);
195 1.1 rmind return (0);
196 1.1 rmind }
197 1.1 rmind ipq_locked = 1;
198 1.1 rmind splx(s);
199 1.1 rmind return (1);
200 1.1 rmind }
201 1.1 rmind
202 1.1 rmind static inline void
203 1.1 rmind ipq_unlock(void)
204 1.1 rmind {
205 1.1 rmind int s;
206 1.1 rmind
207 1.1 rmind s = splvm();
208 1.1 rmind ipq_locked = 0;
209 1.1 rmind splx(s);
210 1.1 rmind }
211 1.1 rmind
212 1.1 rmind #ifdef DIAGNOSTIC
213 1.1 rmind #define IPQ_LOCK() \
214 1.1 rmind do { \
215 1.1 rmind if (ipq_lock_try() == 0) { \
216 1.1 rmind printf("%s:%d: ipq already locked\n", __FILE__, __LINE__); \
217 1.1 rmind panic("ipq_lock"); \
218 1.1 rmind } \
219 1.1 rmind } while (/*CONSTCOND*/ 0)
220 1.1 rmind #define IPQ_LOCK_CHECK() \
221 1.1 rmind do { \
222 1.1 rmind if (ipq_locked == 0) { \
223 1.1 rmind printf("%s:%d: ipq lock not held\n", __FILE__, __LINE__); \
224 1.1 rmind panic("ipq lock check"); \
225 1.1 rmind } \
226 1.1 rmind } while (/*CONSTCOND*/ 0)
227 1.1 rmind #else
228 1.1 rmind #define IPQ_LOCK() (void) ipq_lock_try()
229 1.1 rmind #define IPQ_LOCK_CHECK() /* nothing */
230 1.1 rmind #endif
231 1.1 rmind
232 1.1 rmind #define IPQ_UNLOCK() ipq_unlock()
233 1.1 rmind
234 1.1 rmind /*
235 1.1 rmind * ip_reass_lookup:
236 1.1 rmind *
237 1.1 rmind * Look for queue of fragments of this datagram.
238 1.1 rmind */
239 1.1 rmind struct ipq *
240 1.1 rmind ip_reass_lookup(struct ip *ip, u_int *hashp)
241 1.1 rmind {
242 1.1 rmind struct ipq *fp;
243 1.1 rmind u_int hash;
244 1.1 rmind
245 1.1 rmind IPQ_LOCK();
246 1.1 rmind hash = IPREASS_HASH(ip->ip_src.s_addr, ip->ip_id);
247 1.1 rmind LIST_FOREACH(fp, &ipq[hash], ipq_q) {
248 1.1 rmind if (ip->ip_id != fp->ipq_id)
249 1.1 rmind continue;
250 1.1 rmind if (!in_hosteq(ip->ip_src, fp->ipq_src))
251 1.1 rmind continue;
252 1.1 rmind if (!in_hosteq(ip->ip_dst, fp->ipq_dst))
253 1.1 rmind continue;
254 1.1 rmind if (ip->ip_p != fp->ipq_p)
255 1.1 rmind continue;
256 1.1 rmind break;
257 1.1 rmind }
258 1.1 rmind *hashp = hash;
259 1.1 rmind return fp;
260 1.1 rmind }
261 1.1 rmind
262 1.1 rmind void
263 1.1 rmind ip_reass_unlock(void)
264 1.1 rmind {
265 1.1 rmind
266 1.1 rmind IPQ_UNLOCK();
267 1.1 rmind }
268 1.1 rmind
269 1.1 rmind struct ipqent *
270 1.1 rmind ip_reass_getent(void)
271 1.1 rmind {
272 1.1 rmind struct ipqent *ipqe;
273 1.1 rmind int s;
274 1.1 rmind
275 1.1 rmind IP_STATINC(IP_STAT_FRAGMENTS);
276 1.1 rmind s = splvm();
277 1.1 rmind ipqe = pool_get(&ipqent_pool, PR_NOWAIT);
278 1.1 rmind splx(s);
279 1.1 rmind
280 1.1 rmind return ipqe;
281 1.1 rmind }
282 1.1 rmind
283 1.1 rmind /*
284 1.1 rmind * ip_reass:
285 1.1 rmind *
286 1.1 rmind * Take incoming datagram fragment and try to reassemble it into whole
287 1.1 rmind * datagram. If a chain for reassembly of this datagram already exists,
288 1.1 rmind * then it is given as 'fp'; otherwise have to make a chain.
289 1.1 rmind */
290 1.1 rmind struct mbuf *
291 1.1 rmind ip_reass(struct ipqent *ipqe, struct ipq *fp, u_int hash)
292 1.1 rmind {
293 1.1 rmind struct ipqhead *ipqhead = &ipq[hash];
294 1.1 rmind const int hlen = ipqe->ipqe_ip->ip_hl << 2;
295 1.1 rmind struct mbuf *m = ipqe->ipqe_m, *t;
296 1.1 rmind struct ipqent *nq, *p, *q;
297 1.1 rmind struct ip *ip;
298 1.1 rmind int i, next, s;
299 1.1 rmind
300 1.1 rmind IPQ_LOCK_CHECK();
301 1.1 rmind
302 1.1 rmind /*
303 1.1 rmind * Presence of header sizes in mbufs would confuse code below.
304 1.1 rmind */
305 1.1 rmind m->m_data += hlen;
306 1.1 rmind m->m_len -= hlen;
307 1.1 rmind
308 1.1 rmind #ifdef notyet
309 1.1 rmind /* Make sure fragment limit is up-to-date. */
310 1.1 rmind CHECK_NMBCLUSTER_PARAMS();
311 1.1 rmind
312 1.1 rmind /* If we have too many fragments, drop the older half. */
313 1.1 rmind if (ip_nfrags >= ip_maxfrags) {
314 1.1 rmind ip_reass_drophalf(void);
315 1.1 rmind }
316 1.1 rmind #endif
317 1.1 rmind
318 1.1 rmind /*
319 1.1 rmind * We are about to add a fragment; increment frag count.
320 1.1 rmind */
321 1.1 rmind ip_nfrags++;
322 1.1 rmind
323 1.1 rmind /*
324 1.1 rmind * If first fragment to arrive, create a reassembly queue.
325 1.1 rmind */
326 1.1 rmind if (fp == NULL) {
327 1.1 rmind /*
328 1.1 rmind * Enforce upper bound on number of fragmented packets
329 1.1 rmind * for which we attempt reassembly: a) if maxfrag is 0,
330 1.1 rmind * never accept fragments b) if maxfrag is -1, accept
331 1.1 rmind * all fragments without limitation.
332 1.1 rmind */
333 1.1 rmind if (ip_maxfragpackets < 0)
334 1.1 rmind ;
335 1.1 rmind else if (ip_nfragpackets >= ip_maxfragpackets) {
336 1.1 rmind goto dropfrag;
337 1.1 rmind }
338 1.1 rmind ip_nfragpackets++;
339 1.1 rmind fp = malloc(sizeof(struct ipq), M_FTABLE, M_NOWAIT);
340 1.1 rmind if (fp == NULL) {
341 1.1 rmind goto dropfrag;
342 1.1 rmind }
343 1.1 rmind LIST_INSERT_HEAD(ipqhead, fp, ipq_q);
344 1.1 rmind fp->ipq_nfrags = 1;
345 1.1 rmind fp->ipq_ttl = IPFRAGTTL;
346 1.1 rmind fp->ipq_p = ipqe->ipqe_ip->ip_p;
347 1.1 rmind fp->ipq_id = ipqe->ipqe_ip->ip_id;
348 1.1 rmind fp->ipq_tos = ipqe->ipqe_ip->ip_tos;
349 1.1 rmind TAILQ_INIT(&fp->ipq_fragq);
350 1.1 rmind fp->ipq_src = ipqe->ipqe_ip->ip_src;
351 1.1 rmind fp->ipq_dst = ipqe->ipqe_ip->ip_dst;
352 1.1 rmind p = NULL;
353 1.1 rmind goto insert;
354 1.1 rmind } else {
355 1.1 rmind fp->ipq_nfrags++;
356 1.1 rmind }
357 1.1 rmind
358 1.1 rmind /*
359 1.1 rmind * Find a segment which begins after this one does.
360 1.1 rmind */
361 1.1 rmind for (p = NULL, q = TAILQ_FIRST(&fp->ipq_fragq); q != NULL;
362 1.1 rmind p = q, q = TAILQ_NEXT(q, ipqe_q))
363 1.1 rmind if (ntohs(q->ipqe_ip->ip_off) > ntohs(ipqe->ipqe_ip->ip_off))
364 1.1 rmind break;
365 1.1 rmind
366 1.1 rmind /*
367 1.1 rmind * If there is a preceding segment, it may provide some of our
368 1.1 rmind * data already. If so, drop the data from the incoming segment.
369 1.1 rmind * If it provides all of our data, drop us.
370 1.1 rmind */
371 1.1 rmind if (p != NULL) {
372 1.1 rmind i = ntohs(p->ipqe_ip->ip_off) + ntohs(p->ipqe_ip->ip_len) -
373 1.1 rmind ntohs(ipqe->ipqe_ip->ip_off);
374 1.1 rmind if (i > 0) {
375 1.1 rmind if (i >= ntohs(ipqe->ipqe_ip->ip_len)) {
376 1.1 rmind goto dropfrag;
377 1.1 rmind }
378 1.1 rmind m_adj(ipqe->ipqe_m, i);
379 1.1 rmind ipqe->ipqe_ip->ip_off =
380 1.1 rmind htons(ntohs(ipqe->ipqe_ip->ip_off) + i);
381 1.1 rmind ipqe->ipqe_ip->ip_len =
382 1.1 rmind htons(ntohs(ipqe->ipqe_ip->ip_len) - i);
383 1.1 rmind }
384 1.1 rmind }
385 1.1 rmind
386 1.1 rmind /*
387 1.1 rmind * While we overlap succeeding segments trim them or, if they are
388 1.1 rmind * completely covered, dequeue them.
389 1.1 rmind */
390 1.1 rmind for (; q != NULL &&
391 1.1 rmind ntohs(ipqe->ipqe_ip->ip_off) + ntohs(ipqe->ipqe_ip->ip_len) >
392 1.1 rmind ntohs(q->ipqe_ip->ip_off); q = nq) {
393 1.1 rmind i = (ntohs(ipqe->ipqe_ip->ip_off) +
394 1.1 rmind ntohs(ipqe->ipqe_ip->ip_len)) - ntohs(q->ipqe_ip->ip_off);
395 1.1 rmind if (i < ntohs(q->ipqe_ip->ip_len)) {
396 1.1 rmind q->ipqe_ip->ip_len =
397 1.1 rmind htons(ntohs(q->ipqe_ip->ip_len) - i);
398 1.1 rmind q->ipqe_ip->ip_off =
399 1.1 rmind htons(ntohs(q->ipqe_ip->ip_off) + i);
400 1.1 rmind m_adj(q->ipqe_m, i);
401 1.1 rmind break;
402 1.1 rmind }
403 1.1 rmind nq = TAILQ_NEXT(q, ipqe_q);
404 1.1 rmind m_freem(q->ipqe_m);
405 1.1 rmind TAILQ_REMOVE(&fp->ipq_fragq, q, ipqe_q);
406 1.1 rmind s = splvm();
407 1.1 rmind pool_put(&ipqent_pool, q);
408 1.1 rmind splx(s);
409 1.1 rmind fp->ipq_nfrags--;
410 1.1 rmind ip_nfrags--;
411 1.1 rmind }
412 1.1 rmind
413 1.1 rmind insert:
414 1.1 rmind /*
415 1.1 rmind * Stick new segment in its place; check for complete reassembly.
416 1.1 rmind */
417 1.1 rmind if (p == NULL) {
418 1.1 rmind TAILQ_INSERT_HEAD(&fp->ipq_fragq, ipqe, ipqe_q);
419 1.1 rmind } else {
420 1.1 rmind TAILQ_INSERT_AFTER(&fp->ipq_fragq, p, ipqe, ipqe_q);
421 1.1 rmind }
422 1.1 rmind next = 0;
423 1.1 rmind for (p = NULL, q = TAILQ_FIRST(&fp->ipq_fragq); q != NULL;
424 1.1 rmind p = q, q = TAILQ_NEXT(q, ipqe_q)) {
425 1.1 rmind if (ntohs(q->ipqe_ip->ip_off) != next) {
426 1.1 rmind IPQ_UNLOCK();
427 1.1 rmind return NULL;
428 1.1 rmind }
429 1.1 rmind next += ntohs(q->ipqe_ip->ip_len);
430 1.1 rmind }
431 1.1 rmind if (p->ipqe_mff) {
432 1.1 rmind IPQ_UNLOCK();
433 1.1 rmind return NULL;
434 1.1 rmind }
435 1.1 rmind /*
436 1.1 rmind * Reassembly is complete. Check for a bogus message size and
437 1.1 rmind * concatenate fragments.
438 1.1 rmind */
439 1.1 rmind q = TAILQ_FIRST(&fp->ipq_fragq);
440 1.1 rmind ip = q->ipqe_ip;
441 1.1 rmind if ((next + (ip->ip_hl << 2)) > IP_MAXPACKET) {
442 1.1 rmind IP_STATINC(IP_STAT_TOOLONG);
443 1.1 rmind ip_freef(fp);
444 1.1 rmind IPQ_UNLOCK();
445 1.1 rmind return NULL;
446 1.1 rmind }
447 1.1 rmind m = q->ipqe_m;
448 1.1 rmind t = m->m_next;
449 1.1 rmind m->m_next = NULL;
450 1.1 rmind m_cat(m, t);
451 1.1 rmind nq = TAILQ_NEXT(q, ipqe_q);
452 1.1 rmind s = splvm();
453 1.1 rmind pool_put(&ipqent_pool, q);
454 1.1 rmind splx(s);
455 1.1 rmind for (q = nq; q != NULL; q = nq) {
456 1.1 rmind t = q->ipqe_m;
457 1.1 rmind nq = TAILQ_NEXT(q, ipqe_q);
458 1.1 rmind s = splvm();
459 1.1 rmind pool_put(&ipqent_pool, q);
460 1.1 rmind splx(s);
461 1.1 rmind m_cat(m, t);
462 1.1 rmind }
463 1.1 rmind ip_nfrags -= fp->ipq_nfrags;
464 1.1 rmind
465 1.1 rmind /*
466 1.1 rmind * Create header for new packet by modifying header of first
467 1.1 rmind * packet. Dequeue and discard fragment reassembly header. Make
468 1.1 rmind * header visible.
469 1.1 rmind */
470 1.1 rmind ip->ip_len = htons(next);
471 1.1 rmind ip->ip_src = fp->ipq_src;
472 1.1 rmind ip->ip_dst = fp->ipq_dst;
473 1.1 rmind LIST_REMOVE(fp, ipq_q);
474 1.1 rmind free(fp, M_FTABLE);
475 1.1 rmind ip_nfragpackets--;
476 1.1 rmind m->m_len += (ip->ip_hl << 2);
477 1.1 rmind m->m_data -= (ip->ip_hl << 2);
478 1.1 rmind /* some debugging cruft by sklower, below, will go away soon */
479 1.1 rmind if (m->m_flags & M_PKTHDR) { /* XXX this should be done elsewhere */
480 1.1 rmind int plen = 0;
481 1.1 rmind for (t = m; t; t = t->m_next) {
482 1.1 rmind plen += t->m_len;
483 1.1 rmind }
484 1.1 rmind m->m_pkthdr.len = plen;
485 1.1 rmind m->m_pkthdr.csum_flags = 0;
486 1.1 rmind }
487 1.1 rmind IPQ_UNLOCK();
488 1.1 rmind return m;
489 1.1 rmind
490 1.1 rmind dropfrag:
491 1.1 rmind if (fp != NULL) {
492 1.1 rmind fp->ipq_nfrags--;
493 1.1 rmind }
494 1.1 rmind ip_nfrags--;
495 1.1 rmind IP_STATINC(IP_STAT_FRAGDROPPED);
496 1.1 rmind m_freem(m);
497 1.1 rmind s = splvm();
498 1.1 rmind pool_put(&ipqent_pool, ipqe);
499 1.1 rmind splx(s);
500 1.1 rmind IPQ_UNLOCK();
501 1.1 rmind return NULL;
502 1.1 rmind }
503 1.1 rmind
504 1.1 rmind /*
505 1.1 rmind * ip_freef:
506 1.1 rmind *
507 1.1 rmind * Free a fragment reassembly header and all associated datagrams.
508 1.1 rmind */
509 1.1 rmind void
510 1.1 rmind ip_freef(struct ipq *fp)
511 1.1 rmind {
512 1.1 rmind struct ipqent *q, *p;
513 1.1 rmind u_int nfrags = 0;
514 1.1 rmind int s;
515 1.1 rmind
516 1.1 rmind IPQ_LOCK_CHECK();
517 1.1 rmind
518 1.1 rmind for (q = TAILQ_FIRST(&fp->ipq_fragq); q != NULL; q = p) {
519 1.1 rmind p = TAILQ_NEXT(q, ipqe_q);
520 1.1 rmind m_freem(q->ipqe_m);
521 1.1 rmind nfrags++;
522 1.1 rmind TAILQ_REMOVE(&fp->ipq_fragq, q, ipqe_q);
523 1.1 rmind s = splvm();
524 1.1 rmind pool_put(&ipqent_pool, q);
525 1.1 rmind splx(s);
526 1.1 rmind }
527 1.1 rmind
528 1.1 rmind if (nfrags != fp->ipq_nfrags) {
529 1.1 rmind printf("ip_freef: nfrags %d != %d\n", fp->ipq_nfrags, nfrags);
530 1.1 rmind }
531 1.1 rmind ip_nfrags -= nfrags;
532 1.1 rmind LIST_REMOVE(fp, ipq_q);
533 1.1 rmind free(fp, M_FTABLE);
534 1.1 rmind ip_nfragpackets--;
535 1.1 rmind }
536 1.1 rmind
537 1.1 rmind /*
538 1.1 rmind * ip_reass_ttl_decr:
539 1.1 rmind *
540 1.1 rmind * Decrement TTL of all reasembly queue entries by `ticks'. Count
541 1.1 rmind * number of distinct fragments (as opposed to partial, fragmented
542 1.1 rmind * datagrams) inthe reassembly queue. While we traverse the entire
543 1.1 rmind * reassembly queue, compute and return the median TTL over all
544 1.1 rmind * fragments.
545 1.1 rmind */
546 1.1 rmind static u_int
547 1.1 rmind ip_reass_ttl_decr(u_int ticks)
548 1.1 rmind {
549 1.1 rmind u_int nfrags, median, dropfraction, keepfraction;
550 1.1 rmind struct ipq *fp, *nfp;
551 1.1 rmind int i;
552 1.1 rmind
553 1.1 rmind nfrags = 0;
554 1.1 rmind memset(fragttl_histo, 0, sizeof(fragttl_histo));
555 1.1 rmind
556 1.1 rmind for (i = 0; i < IPREASS_HASH_SIZE; i++) {
557 1.1 rmind for (fp = LIST_FIRST(&ipq[i]); fp != NULL; fp = nfp) {
558 1.1 rmind fp->ipq_ttl = ((fp->ipq_ttl <= ticks) ?
559 1.1 rmind 0 : fp->ipq_ttl - ticks);
560 1.1 rmind nfp = LIST_NEXT(fp, ipq_q);
561 1.1 rmind if (fp->ipq_ttl == 0) {
562 1.1 rmind IP_STATINC(IP_STAT_FRAGTIMEOUT);
563 1.1 rmind ip_freef(fp);
564 1.1 rmind } else {
565 1.1 rmind nfrags += fp->ipq_nfrags;
566 1.1 rmind fragttl_histo[fp->ipq_ttl] += fp->ipq_nfrags;
567 1.1 rmind }
568 1.1 rmind }
569 1.1 rmind }
570 1.1 rmind
571 1.1 rmind KASSERT(ip_nfrags == nfrags);
572 1.1 rmind
573 1.1 rmind /* Find median (or other drop fraction) in histogram. */
574 1.1 rmind dropfraction = (ip_nfrags / 2);
575 1.1 rmind keepfraction = ip_nfrags - dropfraction;
576 1.1 rmind for (i = IPFRAGTTL, median = 0; i >= 0; i--) {
577 1.1 rmind median += fragttl_histo[i];
578 1.1 rmind if (median >= keepfraction)
579 1.1 rmind break;
580 1.1 rmind }
581 1.1 rmind
582 1.1 rmind /* Return TTL of median (or other fraction). */
583 1.1 rmind return (u_int)i;
584 1.1 rmind }
585 1.1 rmind
586 1.1 rmind static void
587 1.1 rmind ip_reass_drophalf(void)
588 1.1 rmind {
589 1.1 rmind u_int median_ticks;
590 1.1 rmind
591 1.1 rmind /*
592 1.1 rmind * Compute median TTL of all fragments, and count frags
593 1.1 rmind * with that TTL or lower (roughly half of all fragments).
594 1.1 rmind */
595 1.1 rmind median_ticks = ip_reass_ttl_decr(0);
596 1.1 rmind
597 1.1 rmind /* Drop half. */
598 1.1 rmind median_ticks = ip_reass_ttl_decr(median_ticks);
599 1.1 rmind }
600 1.1 rmind
601 1.1 rmind /*
602 1.1 rmind * ip_reass_drain: drain off all datagram fragments. Do not acquire
603 1.1 rmind * softnet_lock as can be called from hardware interrupt context.
604 1.1 rmind */
605 1.1 rmind void
606 1.1 rmind ip_reass_drain(void)
607 1.1 rmind {
608 1.1 rmind
609 1.1 rmind /*
610 1.1 rmind * We may be called from a device's interrupt context. If
611 1.1 rmind * the ipq is already busy, just bail out now.
612 1.1 rmind */
613 1.1 rmind if (ipq_lock_try() != 0) {
614 1.1 rmind /*
615 1.1 rmind * Drop half the total fragments now. If more mbufs are
616 1.1 rmind * needed, we will be called again soon.
617 1.1 rmind */
618 1.1 rmind ip_reass_drophalf();
619 1.1 rmind IPQ_UNLOCK();
620 1.1 rmind }
621 1.1 rmind }
622 1.1 rmind
623 1.1 rmind /*
624 1.1 rmind * ip_reass_slowtimo:
625 1.1 rmind *
626 1.1 rmind * If a timer expires on a reassembly queue, discard it.
627 1.1 rmind */
628 1.1 rmind void
629 1.1 rmind ip_reass_slowtimo(void)
630 1.1 rmind {
631 1.1 rmind static u_int dropscanidx = 0;
632 1.1 rmind u_int i, median_ttl;
633 1.1 rmind
634 1.1 rmind IPQ_LOCK();
635 1.1 rmind
636 1.1 rmind /* Age TTL of all fragments by 1 tick .*/
637 1.1 rmind median_ttl = ip_reass_ttl_decr(1);
638 1.1 rmind
639 1.1 rmind /* Make sure fragment limit is up-to-date. */
640 1.1 rmind CHECK_NMBCLUSTER_PARAMS();
641 1.1 rmind
642 1.1 rmind /* If we have too many fragments, drop the older half. */
643 1.1 rmind if (ip_nfrags > ip_maxfrags) {
644 1.1 rmind ip_reass_ttl_decr(median_ttl);
645 1.1 rmind }
646 1.1 rmind
647 1.1 rmind /*
648 1.1 rmind * If we are over the maximum number of fragmented packets (due to
649 1.1 rmind * the limit being lowered), drain off enough to get down to the
650 1.1 rmind * new limit. Start draining from the reassembly hashqueue most
651 1.1 rmind * recently drained.
652 1.1 rmind */
653 1.1 rmind if (ip_maxfragpackets < 0)
654 1.1 rmind ;
655 1.1 rmind else {
656 1.1 rmind int wrapped = 0;
657 1.1 rmind
658 1.1 rmind i = dropscanidx;
659 1.1 rmind while (ip_nfragpackets > ip_maxfragpackets && wrapped == 0) {
660 1.1 rmind while (LIST_FIRST(&ipq[i]) != NULL) {
661 1.1 rmind ip_freef(LIST_FIRST(&ipq[i]));
662 1.1 rmind }
663 1.1 rmind if (++i >= IPREASS_HASH_SIZE) {
664 1.1 rmind i = 0;
665 1.1 rmind }
666 1.1 rmind /*
667 1.1 rmind * Do not scan forever even if fragment counters are
668 1.1 rmind * wrong: stop after scanning entire reassembly queue.
669 1.1 rmind */
670 1.1 rmind if (i == dropscanidx) {
671 1.1 rmind wrapped = 1;
672 1.1 rmind }
673 1.1 rmind }
674 1.1 rmind dropscanidx = i;
675 1.1 rmind }
676 1.1 rmind IPQ_UNLOCK();
677 1.1 rmind }
678