ip_flow.c revision 1.65.2.3 1 /* $NetBSD: ip_flow.c,v 1.65.2.3 2016/10/05 20:56:09 skrll Exp $ */
2
3 /*-
4 * Copyright (c) 1998 The NetBSD Foundation, Inc.
5 * All rights reserved.
6 *
7 * This code is derived from software contributed to The NetBSD Foundation
8 * by the 3am Software Foundry ("3am"). It was developed by Matt Thomas.
9 *
10 * Redistribution and use in source and binary forms, with or without
11 * modification, are permitted provided that the following conditions
12 * are met:
13 * 1. Redistributions of source code must retain the above copyright
14 * notice, this list of conditions and the following disclaimer.
15 * 2. Redistributions in binary form must reproduce the above copyright
16 * notice, this list of conditions and the following disclaimer in the
17 * documentation and/or other materials provided with the distribution.
18 *
19 * THIS SOFTWARE IS PROVIDED BY THE NETBSD FOUNDATION, INC. AND CONTRIBUTORS
20 * ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED
21 * TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
22 * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE FOUNDATION OR CONTRIBUTORS
23 * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
24 * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
25 * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
26 * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
27 * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
28 * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
29 * POSSIBILITY OF SUCH DAMAGE.
30 */
31
32 #include <sys/cdefs.h>
33 __KERNEL_RCSID(0, "$NetBSD: ip_flow.c,v 1.65.2.3 2016/10/05 20:56:09 skrll Exp $");
34
35 #include <sys/param.h>
36 #include <sys/systm.h>
37 #include <sys/malloc.h>
38 #include <sys/mbuf.h>
39 #include <sys/domain.h>
40 #include <sys/protosw.h>
41 #include <sys/socket.h>
42 #include <sys/socketvar.h>
43 #include <sys/errno.h>
44 #include <sys/time.h>
45 #include <sys/kernel.h>
46 #include <sys/pool.h>
47 #include <sys/sysctl.h>
48 #include <sys/workqueue.h>
49 #include <sys/atomic.h>
50
51 #include <net/if.h>
52 #include <net/if_dl.h>
53 #include <net/route.h>
54 #include <net/pfil.h>
55
56 #include <netinet/in.h>
57 #include <netinet/in_systm.h>
58 #include <netinet/ip.h>
59 #include <netinet/in_pcb.h>
60 #include <netinet/in_var.h>
61 #include <netinet/ip_var.h>
62 #include <netinet/ip_private.h>
63
64 /*
65 * Similar code is very well commented in netinet6/ip6_flow.c
66 */
67
68 #define IPFLOW_HASHBITS 6 /* should not be a multiple of 8 */
69
70 static struct pool ipflow_pool;
71
72 TAILQ_HEAD(ipflowhead, ipflow);
73
74 #define IPFLOW_TIMER (5 * PR_SLOWHZ)
75 #define IPFLOW_DEFAULT_HASHSIZE (1 << IPFLOW_HASHBITS)
76
77 /*
78 * ip_flow.c internal lock.
79 * If we use softnet_lock, it would cause recursive lock.
80 *
81 * This is a tentative workaround.
82 * We should make it scalable somehow in the future.
83 */
84 static kmutex_t ipflow_lock;
85 static struct ipflowhead *ipflowtable = NULL;
86 static struct ipflowhead ipflowlist;
87 static int ipflow_inuse;
88
89 #define IPFLOW_INSERT(hashidx, ipf) \
90 do { \
91 (ipf)->ipf_hashidx = (hashidx); \
92 TAILQ_INSERT_HEAD(&ipflowtable[(hashidx)], (ipf), ipf_hash); \
93 TAILQ_INSERT_HEAD(&ipflowlist, (ipf), ipf_list); \
94 } while (/*CONSTCOND*/ 0)
95
96 #define IPFLOW_REMOVE(hashidx, ipf) \
97 do { \
98 TAILQ_REMOVE(&ipflowtable[(hashidx)], (ipf), ipf_hash); \
99 TAILQ_REMOVE(&ipflowlist, (ipf), ipf_list); \
100 } while (/*CONSTCOND*/ 0)
101
102 #ifndef IPFLOW_MAX
103 #define IPFLOW_MAX 256
104 #endif
105 static int ip_maxflows = IPFLOW_MAX;
106 static int ip_hashsize = IPFLOW_DEFAULT_HASHSIZE;
107
108 static struct ipflow *ipflow_reap(bool);
109 static void ipflow_sysctl_init(struct sysctllog **);
110
111 static void ipflow_slowtimo_work(struct work *, void *);
112 static struct workqueue *ipflow_slowtimo_wq;
113 static struct work ipflow_slowtimo_wk;
114
115 static size_t
116 ipflow_hash(const struct ip *ip)
117 {
118 size_t hash = ip->ip_tos;
119 size_t idx;
120
121 for (idx = 0; idx < 32; idx += IPFLOW_HASHBITS) {
122 hash += (ip->ip_dst.s_addr >> (32 - idx)) +
123 (ip->ip_src.s_addr >> idx);
124 }
125
126 return hash & (ip_hashsize-1);
127 }
128
129 static struct ipflow *
130 ipflow_lookup(const struct ip *ip)
131 {
132 size_t hash;
133 struct ipflow *ipf;
134
135 KASSERT(mutex_owned(&ipflow_lock));
136
137 hash = ipflow_hash(ip);
138
139 TAILQ_FOREACH(ipf, &ipflowtable[hash], ipf_hash) {
140 if (ip->ip_dst.s_addr == ipf->ipf_dst.s_addr
141 && ip->ip_src.s_addr == ipf->ipf_src.s_addr
142 && ip->ip_tos == ipf->ipf_tos)
143 break;
144 }
145 return ipf;
146 }
147
148 void
149 ipflow_poolinit(void)
150 {
151
152 pool_init(&ipflow_pool, sizeof(struct ipflow), 0, 0, 0, "ipflowpl",
153 NULL, IPL_NET);
154 }
155
156 static int
157 ipflow_reinit(int table_size)
158 {
159 struct ipflowhead *new_table;
160 size_t i;
161
162 KASSERT(mutex_owned(&ipflow_lock));
163
164 new_table = (struct ipflowhead *)malloc(sizeof(struct ipflowhead) *
165 table_size, M_RTABLE, M_NOWAIT);
166
167 if (new_table == NULL)
168 return 1;
169
170 if (ipflowtable != NULL)
171 free(ipflowtable, M_RTABLE);
172
173 ipflowtable = new_table;
174 ip_hashsize = table_size;
175
176 TAILQ_INIT(&ipflowlist);
177 for (i = 0; i < ip_hashsize; i++)
178 TAILQ_INIT(&ipflowtable[i]);
179
180 return 0;
181 }
182
183 void
184 ipflow_init(void)
185 {
186 int error;
187
188 error = workqueue_create(&ipflow_slowtimo_wq, "ipflow_slowtimo",
189 ipflow_slowtimo_work, NULL, PRI_SOFTNET, IPL_SOFTNET, WQ_MPSAFE);
190 if (error != 0)
191 panic("%s: workqueue_create failed (%d)\n", __func__, error);
192
193 mutex_init(&ipflow_lock, MUTEX_DEFAULT, IPL_NONE);
194
195 mutex_enter(&ipflow_lock);
196 (void)ipflow_reinit(ip_hashsize);
197 mutex_exit(&ipflow_lock);
198 ipflow_sysctl_init(NULL);
199 }
200
201 int
202 ipflow_fastforward(struct mbuf *m)
203 {
204 struct ip *ip;
205 struct ip ip_store;
206 struct ipflow *ipf;
207 struct rtentry *rt;
208 const struct sockaddr *dst;
209 int error;
210 int iplen;
211 struct ifnet *ifp;
212 int s;
213 int ret = 0;
214
215 mutex_enter(&ipflow_lock);
216 /*
217 * Are we forwarding packets? Big enough for an IP packet?
218 */
219 if (!ipforwarding || ipflow_inuse == 0 || m->m_len < sizeof(struct ip))
220 goto out;
221
222 /*
223 * Was packet received as a link-level multicast or broadcast?
224 * If so, don't try to fast forward..
225 */
226 if ((m->m_flags & (M_BCAST|M_MCAST)) != 0)
227 goto out;
228
229 /*
230 * IP header with no option and valid version and length
231 */
232 if (IP_HDR_ALIGNED_P(mtod(m, const void *)))
233 ip = mtod(m, struct ip *);
234 else {
235 memcpy(&ip_store, mtod(m, const void *), sizeof(ip_store));
236 ip = &ip_store;
237 }
238 iplen = ntohs(ip->ip_len);
239 if (ip->ip_v != IPVERSION || ip->ip_hl != (sizeof(struct ip) >> 2) ||
240 iplen < sizeof(struct ip) || iplen > m->m_pkthdr.len)
241 goto out;
242 /*
243 * Find a flow.
244 */
245 if ((ipf = ipflow_lookup(ip)) == NULL)
246 goto out;
247
248 ifp = m_get_rcvif(m, &s);
249 /*
250 * Verify the IP header checksum.
251 */
252 switch (m->m_pkthdr.csum_flags &
253 ((ifp->if_csum_flags_rx & M_CSUM_IPv4) |
254 M_CSUM_IPv4_BAD)) {
255 case M_CSUM_IPv4|M_CSUM_IPv4_BAD:
256 m_put_rcvif(ifp, &s);
257 goto out;
258
259 case M_CSUM_IPv4:
260 /* Checksum was okay. */
261 break;
262
263 default:
264 /* Must compute it ourselves. */
265 if (in_cksum(m, sizeof(struct ip)) != 0) {
266 m_put_rcvif(ifp, &s);
267 goto out;
268 }
269 break;
270 }
271 m_put_rcvif(ifp, &s);
272
273 /*
274 * Route and interface still up?
275 */
276 if ((rt = rtcache_validate(&ipf->ipf_ro)) == NULL ||
277 (rt->rt_ifp->if_flags & IFF_UP) == 0 ||
278 (rt->rt_flags & (RTF_BLACKHOLE | RTF_BROADCAST)) != 0)
279 goto out;
280
281 /*
282 * Packet size OK? TTL?
283 */
284 if (m->m_pkthdr.len > rt->rt_ifp->if_mtu || ip->ip_ttl <= IPTTLDEC)
285 goto out;
286
287 /*
288 * Clear any in-bound checksum flags for this packet.
289 */
290 m->m_pkthdr.csum_flags = 0;
291
292 /*
293 * Everything checks out and so we can forward this packet.
294 * Modify the TTL and incrementally change the checksum.
295 *
296 * This method of adding the checksum works on either endian CPU.
297 * If htons() is inlined, all the arithmetic is folded; otherwise
298 * the htons()s are combined by CSE due to the const attribute.
299 *
300 * Don't bother using HW checksumming here -- the incremental
301 * update is pretty fast.
302 */
303 ip->ip_ttl -= IPTTLDEC;
304 if (ip->ip_sum >= (u_int16_t) ~htons(IPTTLDEC << 8))
305 ip->ip_sum -= ~htons(IPTTLDEC << 8);
306 else
307 ip->ip_sum += htons(IPTTLDEC << 8);
308
309 /*
310 * Done modifying the header; copy it back, if necessary.
311 *
312 * XXX Use m_copyback_cow(9) here? --dyoung
313 */
314 if (IP_HDR_ALIGNED_P(mtod(m, void *)) == 0)
315 memcpy(mtod(m, void *), &ip_store, sizeof(ip_store));
316
317 /*
318 * Trim the packet in case it's too long..
319 */
320 if (m->m_pkthdr.len > iplen) {
321 if (m->m_len == m->m_pkthdr.len) {
322 m->m_len = iplen;
323 m->m_pkthdr.len = iplen;
324 } else
325 m_adj(m, iplen - m->m_pkthdr.len);
326 }
327
328 /*
329 * Send the packet on its way. All we can get back is ENOBUFS
330 */
331 ipf->ipf_uses++;
332
333 #if 0
334 /*
335 * Sorting list is too heavy for fast path(packet processing path).
336 * It degrades about 10% performance. So, we does not sort ipflowtable,
337 * and then we use FIFO cache replacement instead fo LRU.
338 */
339 /* move to head (LRU) for ipflowlist. ipflowtable ooes not care LRU. */
340 TAILQ_REMOVE(&ipflowlist, ipf, ipf_list);
341 TAILQ_INSERT_HEAD(&ipflowlist, ipf, ipf_list);
342 #endif
343
344 PRT_SLOW_ARM(ipf->ipf_timer, IPFLOW_TIMER);
345
346 if (rt->rt_flags & RTF_GATEWAY)
347 dst = rt->rt_gateway;
348 else
349 dst = rtcache_getdst(&ipf->ipf_ro);
350
351 if ((error = if_output_lock(rt->rt_ifp, rt->rt_ifp, m, dst, rt)) != 0) {
352 if (error == ENOBUFS)
353 ipf->ipf_dropped++;
354 else
355 ipf->ipf_errors++;
356 }
357 ret = 1;
358 out:
359 mutex_exit(&ipflow_lock);
360 return ret;
361 }
362
363 static void
364 ipflow_addstats(struct ipflow *ipf)
365 {
366 struct rtentry *rt;
367 uint64_t *ips;
368
369 if ((rt = rtcache_validate(&ipf->ipf_ro)) != NULL)
370 rt->rt_use += ipf->ipf_uses;
371
372 ips = IP_STAT_GETREF();
373 ips[IP_STAT_CANTFORWARD] += ipf->ipf_errors + ipf->ipf_dropped;
374 ips[IP_STAT_TOTAL] += ipf->ipf_uses;
375 ips[IP_STAT_FORWARD] += ipf->ipf_uses;
376 ips[IP_STAT_FASTFORWARD] += ipf->ipf_uses;
377 IP_STAT_PUTREF();
378 }
379
380 static void
381 ipflow_free(struct ipflow *ipf)
382 {
383
384 KASSERT(mutex_owned(&ipflow_lock));
385
386 /*
387 * Remove the flow from the hash table (at elevated IPL).
388 * Once it's off the list, we can deal with it at normal
389 * network IPL.
390 */
391 IPFLOW_REMOVE(ipf->ipf_hashidx, ipf);
392
393 ipflow_addstats(ipf);
394 rtcache_free(&ipf->ipf_ro);
395 ipflow_inuse--;
396 pool_put(&ipflow_pool, ipf);
397 }
398
399 static struct ipflow *
400 ipflow_reap(bool just_one)
401 {
402 struct ipflow *ipf;
403
404 KASSERT(mutex_owned(&ipflow_lock));
405
406 /*
407 * This case must remove one ipflow. Furthermore, this case is used in
408 * fast path(packet processing path). So, simply remove TAILQ_LAST one.
409 */
410 if (just_one) {
411 ipf = TAILQ_LAST(&ipflowlist, ipflowhead);
412 KASSERT(ipf != NULL);
413
414 IPFLOW_REMOVE(ipf->ipf_hashidx, ipf);
415
416 ipflow_addstats(ipf);
417 rtcache_free(&ipf->ipf_ro);
418 return ipf;
419 }
420
421 /*
422 * This case is used in slow path(sysctl).
423 * At first, remove invalid rtcache ipflow, and then remove TAILQ_LAST
424 * ipflow if it is ensured least recently used by comparing last_uses.
425 */
426 while (ipflow_inuse > ip_maxflows) {
427 struct ipflow *maybe_ipf = TAILQ_LAST(&ipflowlist, ipflowhead);
428
429 TAILQ_FOREACH(ipf, &ipflowlist, ipf_list) {
430 /*
431 * If this no longer points to a valid route
432 * reclaim it.
433 */
434 if (rtcache_validate(&ipf->ipf_ro) == NULL)
435 goto done;
436 /*
437 * choose the one that's been least recently
438 * used or has had the least uses in the
439 * last 1.5 intervals.
440 */
441 if (ipf->ipf_timer < maybe_ipf->ipf_timer
442 || ((ipf->ipf_timer == maybe_ipf->ipf_timer)
443 && (ipf->ipf_last_uses + ipf->ipf_uses
444 < maybe_ipf->ipf_last_uses + maybe_ipf->ipf_uses)))
445 maybe_ipf = ipf;
446 }
447 ipf = maybe_ipf;
448 done:
449 /*
450 * Remove the entry from the flow table.
451 */
452 IPFLOW_REMOVE(ipf->ipf_hashidx, ipf);
453
454 ipflow_addstats(ipf);
455 rtcache_free(&ipf->ipf_ro);
456 pool_put(&ipflow_pool, ipf);
457 ipflow_inuse--;
458 }
459 return NULL;
460 }
461
462 static unsigned int ipflow_work_enqueued = 0;
463
464 static void
465 ipflow_slowtimo_work(struct work *wk, void *arg)
466 {
467 struct rtentry *rt;
468 struct ipflow *ipf, *next_ipf;
469 uint64_t *ips;
470
471 /* We can allow enqueuing another work at this point */
472 atomic_swap_uint(&ipflow_work_enqueued, 0);
473
474 mutex_enter(softnet_lock);
475 mutex_enter(&ipflow_lock);
476 KERNEL_LOCK(1, NULL);
477 for (ipf = TAILQ_FIRST(&ipflowlist); ipf != NULL; ipf = next_ipf) {
478 next_ipf = TAILQ_NEXT(ipf, ipf_list);
479 if (PRT_SLOW_ISEXPIRED(ipf->ipf_timer) ||
480 (rt = rtcache_validate(&ipf->ipf_ro)) == NULL) {
481 ipflow_free(ipf);
482 } else {
483 ipf->ipf_last_uses = ipf->ipf_uses;
484 rt->rt_use += ipf->ipf_uses;
485 ips = IP_STAT_GETREF();
486 ips[IP_STAT_TOTAL] += ipf->ipf_uses;
487 ips[IP_STAT_FORWARD] += ipf->ipf_uses;
488 ips[IP_STAT_FASTFORWARD] += ipf->ipf_uses;
489 IP_STAT_PUTREF();
490 ipf->ipf_uses = 0;
491 }
492 }
493 KERNEL_UNLOCK_ONE(NULL);
494 mutex_exit(&ipflow_lock);
495 mutex_exit(softnet_lock);
496 }
497
498 void
499 ipflow_slowtimo(void)
500 {
501
502 /* Avoid enqueuing another work when one is already enqueued */
503 if (atomic_swap_uint(&ipflow_work_enqueued, 1) == 1)
504 return;
505
506 workqueue_enqueue(ipflow_slowtimo_wq, &ipflow_slowtimo_wk, NULL);
507 }
508
509 void
510 ipflow_create(const struct route *ro, struct mbuf *m)
511 {
512 const struct ip *const ip = mtod(m, const struct ip *);
513 struct ipflow *ipf;
514 size_t hash;
515
516 mutex_enter(&ipflow_lock);
517
518 /*
519 * Don't create cache entries for ICMP messages.
520 */
521 if (ip_maxflows == 0 || ip->ip_p == IPPROTO_ICMP) {
522 mutex_exit(&ipflow_lock);
523 return;
524 }
525
526 KERNEL_LOCK(1, NULL);
527
528 /*
529 * See if an existing flow struct exists. If so remove it from its
530 * list and free the old route. If not, try to malloc a new one
531 * (if we aren't at our limit).
532 */
533 ipf = ipflow_lookup(ip);
534 if (ipf == NULL) {
535 if (ipflow_inuse >= ip_maxflows) {
536 ipf = ipflow_reap(true);
537 } else {
538 ipf = pool_get(&ipflow_pool, PR_NOWAIT);
539 if (ipf == NULL)
540 goto out;
541 ipflow_inuse++;
542 }
543 memset(ipf, 0, sizeof(*ipf));
544 } else {
545 IPFLOW_REMOVE(ipf->ipf_hashidx, ipf);
546
547 ipflow_addstats(ipf);
548 rtcache_free(&ipf->ipf_ro);
549 ipf->ipf_uses = ipf->ipf_last_uses = 0;
550 ipf->ipf_errors = ipf->ipf_dropped = 0;
551 }
552
553 /*
554 * Fill in the updated information.
555 */
556 rtcache_copy(&ipf->ipf_ro, ro);
557 ipf->ipf_dst = ip->ip_dst;
558 ipf->ipf_src = ip->ip_src;
559 ipf->ipf_tos = ip->ip_tos;
560 PRT_SLOW_ARM(ipf->ipf_timer, IPFLOW_TIMER);
561
562 /*
563 * Insert into the approriate bucket of the flow table.
564 */
565 hash = ipflow_hash(ip);
566 IPFLOW_INSERT(hash, ipf);
567
568 out:
569 KERNEL_UNLOCK_ONE(NULL);
570 mutex_exit(&ipflow_lock);
571 }
572
573 int
574 ipflow_invalidate_all(int new_size)
575 {
576 struct ipflow *ipf, *next_ipf;
577 int error;
578
579 error = 0;
580
581 mutex_enter(&ipflow_lock);
582
583 for (ipf = TAILQ_FIRST(&ipflowlist); ipf != NULL; ipf = next_ipf) {
584 next_ipf = TAILQ_NEXT(ipf, ipf_list);
585 ipflow_free(ipf);
586 }
587
588 if (new_size)
589 error = ipflow_reinit(new_size);
590
591 mutex_exit(&ipflow_lock);
592
593 return error;
594 }
595
596 /*
597 * sysctl helper routine for net.inet.ip.maxflows.
598 */
599 static int
600 sysctl_net_inet_ip_maxflows(SYSCTLFN_ARGS)
601 {
602 int error;
603
604 error = sysctl_lookup(SYSCTLFN_CALL(rnode));
605 if (error || newp == NULL)
606 return (error);
607
608 mutex_enter(softnet_lock);
609 mutex_enter(&ipflow_lock);
610 KERNEL_LOCK(1, NULL);
611
612 ipflow_reap(false);
613
614 KERNEL_UNLOCK_ONE(NULL);
615 mutex_exit(&ipflow_lock);
616 mutex_exit(softnet_lock);
617
618 return (0);
619 }
620
621 static int
622 sysctl_net_inet_ip_hashsize(SYSCTLFN_ARGS)
623 {
624 int error, tmp;
625 struct sysctlnode node;
626
627 node = *rnode;
628 tmp = ip_hashsize;
629 node.sysctl_data = &tmp;
630 error = sysctl_lookup(SYSCTLFN_CALL(&node));
631 if (error || newp == NULL)
632 return (error);
633
634 if ((tmp & (tmp - 1)) == 0 && tmp != 0) {
635 /*
636 * Can only fail due to malloc()
637 */
638 mutex_enter(softnet_lock);
639 KERNEL_LOCK(1, NULL);
640
641 error = ipflow_invalidate_all(tmp);
642
643 KERNEL_UNLOCK_ONE(NULL);
644 mutex_exit(softnet_lock);
645
646 } else {
647 /*
648 * EINVAL if not a power of 2
649 */
650 error = EINVAL;
651 }
652
653 return error;
654 }
655
656 static void
657 ipflow_sysctl_init(struct sysctllog **clog)
658 {
659 sysctl_createv(clog, 0, NULL, NULL,
660 CTLFLAG_PERMANENT,
661 CTLTYPE_NODE, "inet",
662 SYSCTL_DESCR("PF_INET related settings"),
663 NULL, 0, NULL, 0,
664 CTL_NET, PF_INET, CTL_EOL);
665 sysctl_createv(clog, 0, NULL, NULL,
666 CTLFLAG_PERMANENT,
667 CTLTYPE_NODE, "ip",
668 SYSCTL_DESCR("IPv4 related settings"),
669 NULL, 0, NULL, 0,
670 CTL_NET, PF_INET, IPPROTO_IP, CTL_EOL);
671
672 sysctl_createv(clog, 0, NULL, NULL,
673 CTLFLAG_PERMANENT|CTLFLAG_READWRITE,
674 CTLTYPE_INT, "maxflows",
675 SYSCTL_DESCR("Number of flows for fast forwarding"),
676 sysctl_net_inet_ip_maxflows, 0, &ip_maxflows, 0,
677 CTL_NET, PF_INET, IPPROTO_IP,
678 IPCTL_MAXFLOWS, CTL_EOL);
679 sysctl_createv(clog, 0, NULL, NULL,
680 CTLFLAG_PERMANENT|CTLFLAG_READWRITE,
681 CTLTYPE_INT, "hashsize",
682 SYSCTL_DESCR("Size of hash table for fast forwarding (IPv4)"),
683 sysctl_net_inet_ip_hashsize, 0, &ip_hashsize, 0,
684 CTL_NET, PF_INET, IPPROTO_IP,
685 CTL_CREATE, CTL_EOL);
686 }
687