ip_flow.c revision 1.59.8.1 1 /* $NetBSD: ip_flow.c,v 1.59.8.1 2012/04/17 00:08:40 yamt Exp $ */
2
3 /*-
4 * Copyright (c) 1998 The NetBSD Foundation, Inc.
5 * All rights reserved.
6 *
7 * This code is derived from software contributed to The NetBSD Foundation
8 * by the 3am Software Foundry ("3am"). It was developed by Matt Thomas.
9 *
10 * Redistribution and use in source and binary forms, with or without
11 * modification, are permitted provided that the following conditions
12 * are met:
13 * 1. Redistributions of source code must retain the above copyright
14 * notice, this list of conditions and the following disclaimer.
15 * 2. Redistributions in binary form must reproduce the above copyright
16 * notice, this list of conditions and the following disclaimer in the
17 * documentation and/or other materials provided with the distribution.
18 *
19 * THIS SOFTWARE IS PROVIDED BY THE NETBSD FOUNDATION, INC. AND CONTRIBUTORS
20 * ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED
21 * TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
22 * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE FOUNDATION OR CONTRIBUTORS
23 * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
24 * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
25 * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
26 * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
27 * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
28 * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
29 * POSSIBILITY OF SUCH DAMAGE.
30 */
31
32 #include <sys/cdefs.h>
33 __KERNEL_RCSID(0, "$NetBSD: ip_flow.c,v 1.59.8.1 2012/04/17 00:08:40 yamt Exp $");
34
35 #include <sys/param.h>
36 #include <sys/systm.h>
37 #include <sys/malloc.h>
38 #include <sys/mbuf.h>
39 #include <sys/domain.h>
40 #include <sys/protosw.h>
41 #include <sys/socket.h>
42 #include <sys/socketvar.h>
43 #include <sys/errno.h>
44 #include <sys/time.h>
45 #include <sys/kernel.h>
46 #include <sys/pool.h>
47 #include <sys/sysctl.h>
48
49 #include <net/if.h>
50 #include <net/if_dl.h>
51 #include <net/route.h>
52 #include <net/pfil.h>
53
54 #include <netinet/in.h>
55 #include <netinet/in_systm.h>
56 #include <netinet/ip.h>
57 #include <netinet/in_pcb.h>
58 #include <netinet/in_var.h>
59 #include <netinet/ip_var.h>
60 #include <netinet/ip_private.h>
61
62 /*
63 * Similar code is very well commented in netinet6/ip6_flow.c
64 */
65
66 struct ipflow {
67 LIST_ENTRY(ipflow) ipf_list; /* next in active list */
68 LIST_ENTRY(ipflow) ipf_hash; /* next ipflow in bucket */
69 struct in_addr ipf_dst; /* destination address */
70 struct in_addr ipf_src; /* source address */
71 uint8_t ipf_tos; /* type-of-service */
72 struct route ipf_ro; /* associated route entry */
73 u_long ipf_uses; /* number of uses in this period */
74 u_long ipf_last_uses; /* number of uses in last period */
75 u_long ipf_dropped; /* ENOBUFS retured by if_output */
76 u_long ipf_errors; /* other errors returned by if_output */
77 u_int ipf_timer; /* lifetime timer */
78 };
79
80 #define IPFLOW_HASHBITS 6 /* should not be a multiple of 8 */
81
82 static struct pool ipflow_pool;
83
84 LIST_HEAD(ipflowhead, ipflow);
85
86 #define IPFLOW_TIMER (5 * PR_SLOWHZ)
87 #define IPFLOW_DEFAULT_HASHSIZE (1 << IPFLOW_HASHBITS)
88
89 static struct ipflowhead *ipflowtable = NULL;
90 static struct ipflowhead ipflowlist;
91 static int ipflow_inuse;
92
93 #define IPFLOW_INSERT(bucket, ipf) \
94 do { \
95 LIST_INSERT_HEAD((bucket), (ipf), ipf_hash); \
96 LIST_INSERT_HEAD(&ipflowlist, (ipf), ipf_list); \
97 } while (/*CONSTCOND*/ 0)
98
99 #define IPFLOW_REMOVE(ipf) \
100 do { \
101 LIST_REMOVE((ipf), ipf_hash); \
102 LIST_REMOVE((ipf), ipf_list); \
103 } while (/*CONSTCOND*/ 0)
104
105 #ifndef IPFLOW_MAX
106 #define IPFLOW_MAX 256
107 #endif
108 int ip_maxflows = IPFLOW_MAX;
109 int ip_hashsize = IPFLOW_DEFAULT_HASHSIZE;
110
111 static size_t
112 ipflow_hash(const struct ip *ip)
113 {
114 size_t hash = ip->ip_tos;
115 size_t idx;
116
117 for (idx = 0; idx < 32; idx += IPFLOW_HASHBITS) {
118 hash += (ip->ip_dst.s_addr >> (32 - idx)) +
119 (ip->ip_src.s_addr >> idx);
120 }
121
122 return hash & (ip_hashsize-1);
123 }
124
125 static struct ipflow *
126 ipflow_lookup(const struct ip *ip)
127 {
128 size_t hash;
129 struct ipflow *ipf;
130
131 hash = ipflow_hash(ip);
132
133 LIST_FOREACH(ipf, &ipflowtable[hash], ipf_hash) {
134 if (ip->ip_dst.s_addr == ipf->ipf_dst.s_addr
135 && ip->ip_src.s_addr == ipf->ipf_src.s_addr
136 && ip->ip_tos == ipf->ipf_tos)
137 break;
138 }
139 return ipf;
140 }
141
142 void
143 ipflow_poolinit(void)
144 {
145
146 pool_init(&ipflow_pool, sizeof(struct ipflow), 0, 0, 0, "ipflowpl",
147 NULL, IPL_NET);
148 }
149
150 int
151 ipflow_init(int table_size)
152 {
153 struct ipflowhead *new_table;
154 size_t i;
155
156 new_table = (struct ipflowhead *)malloc(sizeof(struct ipflowhead) *
157 table_size, M_RTABLE, M_NOWAIT);
158
159 if (new_table == NULL)
160 return 1;
161
162 if (ipflowtable != NULL)
163 free(ipflowtable, M_RTABLE);
164
165 ipflowtable = new_table;
166 ip_hashsize = table_size;
167
168 LIST_INIT(&ipflowlist);
169 for (i = 0; i < ip_hashsize; i++)
170 LIST_INIT(&ipflowtable[i]);
171
172 return 0;
173 }
174
175 int
176 ipflow_fastforward(struct mbuf *m)
177 {
178 struct ip *ip;
179 struct ip ip_store;
180 struct ipflow *ipf;
181 struct rtentry *rt;
182 const struct sockaddr *dst;
183 int error;
184 int iplen;
185
186 /*
187 * Are we forwarding packets? Big enough for an IP packet?
188 */
189 if (!ipforwarding || ipflow_inuse == 0 || m->m_len < sizeof(struct ip))
190 return 0;
191
192 /*
193 * Was packet received as a link-level multicast or broadcast?
194 * If so, don't try to fast forward..
195 */
196 if ((m->m_flags & (M_BCAST|M_MCAST)) != 0)
197 return 0;
198
199 /*
200 * IP header with no option and valid version and length
201 */
202 if (IP_HDR_ALIGNED_P(mtod(m, const void *)))
203 ip = mtod(m, struct ip *);
204 else {
205 memcpy(&ip_store, mtod(m, const void *), sizeof(ip_store));
206 ip = &ip_store;
207 }
208 iplen = ntohs(ip->ip_len);
209 if (ip->ip_v != IPVERSION || ip->ip_hl != (sizeof(struct ip) >> 2) ||
210 iplen < sizeof(struct ip) || iplen > m->m_pkthdr.len)
211 return 0;
212 /*
213 * Find a flow.
214 */
215 if ((ipf = ipflow_lookup(ip)) == NULL)
216 return 0;
217
218 /*
219 * Verify the IP header checksum.
220 */
221 switch (m->m_pkthdr.csum_flags &
222 ((m->m_pkthdr.rcvif->if_csum_flags_rx & M_CSUM_IPv4) |
223 M_CSUM_IPv4_BAD)) {
224 case M_CSUM_IPv4|M_CSUM_IPv4_BAD:
225 return (0);
226
227 case M_CSUM_IPv4:
228 /* Checksum was okay. */
229 break;
230
231 default:
232 /* Must compute it ourselves. */
233 if (in_cksum(m, sizeof(struct ip)) != 0)
234 return (0);
235 break;
236 }
237
238 /*
239 * Route and interface still up?
240 */
241 if ((rt = rtcache_validate(&ipf->ipf_ro)) == NULL ||
242 (rt->rt_ifp->if_flags & IFF_UP) == 0)
243 return 0;
244
245 /*
246 * Packet size OK? TTL?
247 */
248 if (m->m_pkthdr.len > rt->rt_ifp->if_mtu || ip->ip_ttl <= IPTTLDEC)
249 return 0;
250
251 /*
252 * Clear any in-bound checksum flags for this packet.
253 */
254 m->m_pkthdr.csum_flags = 0;
255
256 /*
257 * Everything checks out and so we can forward this packet.
258 * Modify the TTL and incrementally change the checksum.
259 *
260 * This method of adding the checksum works on either endian CPU.
261 * If htons() is inlined, all the arithmetic is folded; otherwise
262 * the htons()s are combined by CSE due to the const attribute.
263 *
264 * Don't bother using HW checksumming here -- the incremental
265 * update is pretty fast.
266 */
267 ip->ip_ttl -= IPTTLDEC;
268 if (ip->ip_sum >= (u_int16_t) ~htons(IPTTLDEC << 8))
269 ip->ip_sum -= ~htons(IPTTLDEC << 8);
270 else
271 ip->ip_sum += htons(IPTTLDEC << 8);
272
273 /*
274 * Done modifying the header; copy it back, if necessary.
275 *
276 * XXX Use m_copyback_cow(9) here? --dyoung
277 */
278 if (IP_HDR_ALIGNED_P(mtod(m, void *)) == 0)
279 memcpy(mtod(m, void *), &ip_store, sizeof(ip_store));
280
281 /*
282 * Trim the packet in case it's too long..
283 */
284 if (m->m_pkthdr.len > iplen) {
285 if (m->m_len == m->m_pkthdr.len) {
286 m->m_len = iplen;
287 m->m_pkthdr.len = iplen;
288 } else
289 m_adj(m, iplen - m->m_pkthdr.len);
290 }
291
292 /*
293 * Send the packet on it's way. All we can get back is ENOBUFS
294 */
295 ipf->ipf_uses++;
296 PRT_SLOW_ARM(ipf->ipf_timer, IPFLOW_TIMER);
297
298 if (rt->rt_flags & RTF_GATEWAY)
299 dst = rt->rt_gateway;
300 else
301 dst = rtcache_getdst(&ipf->ipf_ro);
302
303 KERNEL_LOCK(1, NULL);
304 if ((error = (*rt->rt_ifp->if_output)(rt->rt_ifp, m, dst, rt)) != 0) {
305 if (error == ENOBUFS)
306 ipf->ipf_dropped++;
307 else
308 ipf->ipf_errors++;
309 }
310 KERNEL_UNLOCK_ONE(NULL);
311 return 1;
312 }
313
314 static void
316 ipflow_addstats(struct ipflow *ipf)
317 {
318 struct rtentry *rt;
319 uint64_t *ips;
320
321 if ((rt = rtcache_validate(&ipf->ipf_ro)) != NULL)
322 rt->rt_use += ipf->ipf_uses;
323
324 ips = IP_STAT_GETREF();
325 ips[IP_STAT_CANTFORWARD] += ipf->ipf_errors + ipf->ipf_dropped;
326 ips[IP_STAT_TOTAL] += ipf->ipf_uses;
327 ips[IP_STAT_FORWARD] += ipf->ipf_uses;
328 ips[IP_STAT_FASTFORWARD] += ipf->ipf_uses;
329 IP_STAT_PUTREF();
330 }
331
332 static void
333 ipflow_free(struct ipflow *ipf)
334 {
335 int s;
336 /*
337 * Remove the flow from the hash table (at elevated IPL).
338 * Once it's off the list, we can deal with it at normal
339 * network IPL.
340 */
341 s = splnet();
342 IPFLOW_REMOVE(ipf);
343 splx(s);
344 ipflow_addstats(ipf);
345 rtcache_free(&ipf->ipf_ro);
346 ipflow_inuse--;
347 s = splnet();
348 pool_put(&ipflow_pool, ipf);
349 splx(s);
350 }
351
352 static struct ipflow *
353 ipflow_reap(bool just_one)
354 {
355 while (just_one || ipflow_inuse > ip_maxflows) {
356 struct ipflow *ipf, *maybe_ipf = NULL;
357 int s;
358
359 ipf = LIST_FIRST(&ipflowlist);
360 while (ipf != NULL) {
361 /*
362 * If this no longer points to a valid route
363 * reclaim it.
364 */
365 if (rtcache_validate(&ipf->ipf_ro) == NULL)
366 goto done;
367 /*
368 * choose the one that's been least recently
369 * used or has had the least uses in the
370 * last 1.5 intervals.
371 */
372 if (maybe_ipf == NULL ||
373 ipf->ipf_timer < maybe_ipf->ipf_timer ||
374 (ipf->ipf_timer == maybe_ipf->ipf_timer &&
375 ipf->ipf_last_uses + ipf->ipf_uses <
376 maybe_ipf->ipf_last_uses +
377 maybe_ipf->ipf_uses))
378 maybe_ipf = ipf;
379 ipf = LIST_NEXT(ipf, ipf_list);
380 }
381 ipf = maybe_ipf;
382 done:
383 /*
384 * Remove the entry from the flow table.
385 */
386 s = splnet();
387 IPFLOW_REMOVE(ipf);
388 splx(s);
389 ipflow_addstats(ipf);
390 rtcache_free(&ipf->ipf_ro);
391 if (just_one)
392 return ipf;
393 pool_put(&ipflow_pool, ipf);
394 ipflow_inuse--;
395 }
396 return NULL;
397 }
398
399 void
400 ipflow_prune(void)
401 {
402
403 (void) ipflow_reap(false);
404 }
405
406 void
407 ipflow_slowtimo(void)
408 {
409 struct rtentry *rt;
410 struct ipflow *ipf, *next_ipf;
411 uint64_t *ips;
412
413 mutex_enter(softnet_lock);
414 KERNEL_LOCK(1, NULL);
415 for (ipf = LIST_FIRST(&ipflowlist); ipf != NULL; ipf = next_ipf) {
416 next_ipf = LIST_NEXT(ipf, ipf_list);
417 if (PRT_SLOW_ISEXPIRED(ipf->ipf_timer) ||
418 (rt = rtcache_validate(&ipf->ipf_ro)) == NULL) {
419 ipflow_free(ipf);
420 } else {
421 ipf->ipf_last_uses = ipf->ipf_uses;
422 rt->rt_use += ipf->ipf_uses;
423 ips = IP_STAT_GETREF();
424 ips[IP_STAT_TOTAL] += ipf->ipf_uses;
425 ips[IP_STAT_FORWARD] += ipf->ipf_uses;
426 ips[IP_STAT_FASTFORWARD] += ipf->ipf_uses;
427 IP_STAT_PUTREF();
428 ipf->ipf_uses = 0;
429 }
430 }
431 KERNEL_UNLOCK_ONE(NULL);
432 mutex_exit(softnet_lock);
433 }
434
435 void
436 ipflow_create(const struct route *ro, struct mbuf *m)
437 {
438 const struct ip *const ip = mtod(m, const struct ip *);
439 struct ipflow *ipf;
440 size_t hash;
441 int s;
442
443 /*
444 * Don't create cache entries for ICMP messages.
445 */
446 if (ip_maxflows == 0 || ip->ip_p == IPPROTO_ICMP)
447 return;
448 /*
449 * See if an existing flow struct exists. If so remove it from it's
450 * list and free the old route. If not, try to malloc a new one
451 * (if we aren't at our limit).
452 */
453 ipf = ipflow_lookup(ip);
454 if (ipf == NULL) {
455 if (ipflow_inuse >= ip_maxflows) {
456 ipf = ipflow_reap(true);
457 } else {
458 s = splnet();
459 ipf = pool_get(&ipflow_pool, PR_NOWAIT);
460 splx(s);
461 if (ipf == NULL)
462 return;
463 ipflow_inuse++;
464 }
465 memset(ipf, 0, sizeof(*ipf));
466 } else {
467 s = splnet();
468 IPFLOW_REMOVE(ipf);
469 splx(s);
470 ipflow_addstats(ipf);
471 rtcache_free(&ipf->ipf_ro);
472 ipf->ipf_uses = ipf->ipf_last_uses = 0;
473 ipf->ipf_errors = ipf->ipf_dropped = 0;
474 }
475
476 /*
477 * Fill in the updated information.
478 */
479 rtcache_copy(&ipf->ipf_ro, ro);
480 ipf->ipf_dst = ip->ip_dst;
481 ipf->ipf_src = ip->ip_src;
482 ipf->ipf_tos = ip->ip_tos;
483 PRT_SLOW_ARM(ipf->ipf_timer, IPFLOW_TIMER);
484
485 /*
486 * Insert into the approriate bucket of the flow table.
487 */
488 hash = ipflow_hash(ip);
489 s = splnet();
490 IPFLOW_INSERT(&ipflowtable[hash], ipf);
491 splx(s);
492 }
493
494 int
495 ipflow_invalidate_all(int new_size)
496 {
497 struct ipflow *ipf, *next_ipf;
498 int s, error;
499
500 error = 0;
501 s = splnet();
502 for (ipf = LIST_FIRST(&ipflowlist); ipf != NULL; ipf = next_ipf) {
503 next_ipf = LIST_NEXT(ipf, ipf_list);
504 ipflow_free(ipf);
505 }
506
507 if (new_size)
508 error = ipflow_init(new_size);
509 splx(s);
510
511 return error;
512 }
513