ip_flow.c revision 1.82 1 /* $NetBSD: ip_flow.c,v 1.82 2018/04/11 08:29:19 maxv Exp $ */
2
3 /*-
4 * Copyright (c) 1998 The NetBSD Foundation, Inc.
5 * All rights reserved.
6 *
7 * This code is derived from software contributed to The NetBSD Foundation
8 * by the 3am Software Foundry ("3am"). It was developed by Matt Thomas.
9 *
10 * Redistribution and use in source and binary forms, with or without
11 * modification, are permitted provided that the following conditions
12 * are met:
13 * 1. Redistributions of source code must retain the above copyright
14 * notice, this list of conditions and the following disclaimer.
15 * 2. Redistributions in binary form must reproduce the above copyright
16 * notice, this list of conditions and the following disclaimer in the
17 * documentation and/or other materials provided with the distribution.
18 *
19 * THIS SOFTWARE IS PROVIDED BY THE NETBSD FOUNDATION, INC. AND CONTRIBUTORS
20 * ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED
21 * TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
22 * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE FOUNDATION OR CONTRIBUTORS
23 * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
24 * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
25 * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
26 * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
27 * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
28 * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
29 * POSSIBILITY OF SUCH DAMAGE.
30 */
31
32 #include <sys/cdefs.h>
33 __KERNEL_RCSID(0, "$NetBSD: ip_flow.c,v 1.82 2018/04/11 08:29:19 maxv Exp $");
34
35 #ifdef _KERNEL_OPT
36 #include "opt_net_mpsafe.h"
37 #endif
38
39 #include <sys/param.h>
40 #include <sys/systm.h>
41 #include <sys/malloc.h>
42 #include <sys/mbuf.h>
43 #include <sys/socketvar.h>
44 #include <sys/errno.h>
45 #include <sys/time.h>
46 #include <sys/kernel.h>
47 #include <sys/pool.h>
48 #include <sys/sysctl.h>
49 #include <sys/workqueue.h>
50 #include <sys/atomic.h>
51
52 #include <net/if.h>
53 #include <net/if_dl.h>
54 #include <net/route.h>
55 #include <net/pfil.h>
56
57 #include <netinet/in.h>
58 #include <netinet/in_systm.h>
59 #include <netinet/ip.h>
60 #include <netinet/in_pcb.h>
61 #include <netinet/in_var.h>
62 #include <netinet/ip_var.h>
63 #include <netinet/ip_private.h>
64
65 /*
66 * Similar code is very well commented in netinet6/ip6_flow.c
67 */
68
69 #define IPFLOW_HASHBITS 6 /* should not be a multiple of 8 */
70
71 static struct pool ipflow_pool;
72
73 TAILQ_HEAD(ipflowhead, ipflow);
74
75 #define IPFLOW_TIMER (5 * PR_SLOWHZ)
76 #define IPFLOW_DEFAULT_HASHSIZE (1 << IPFLOW_HASHBITS)
77
78 /*
79 * ip_flow.c internal lock.
80 * If we use softnet_lock, it would cause recursive lock.
81 *
82 * This is a tentative workaround.
83 * We should make it scalable somehow in the future.
84 */
85 static kmutex_t ipflow_lock;
86 static struct ipflowhead *ipflowtable = NULL;
87 static struct ipflowhead ipflowlist;
88 static int ipflow_inuse;
89
90 #define IPFLOW_INSERT(hashidx, ipf) \
91 do { \
92 (ipf)->ipf_hashidx = (hashidx); \
93 TAILQ_INSERT_HEAD(&ipflowtable[(hashidx)], (ipf), ipf_hash); \
94 TAILQ_INSERT_HEAD(&ipflowlist, (ipf), ipf_list); \
95 } while (/*CONSTCOND*/ 0)
96
97 #define IPFLOW_REMOVE(hashidx, ipf) \
98 do { \
99 TAILQ_REMOVE(&ipflowtable[(hashidx)], (ipf), ipf_hash); \
100 TAILQ_REMOVE(&ipflowlist, (ipf), ipf_list); \
101 } while (/*CONSTCOND*/ 0)
102
103 #ifndef IPFLOW_MAX
104 #define IPFLOW_MAX 256
105 #endif
106 static int ip_maxflows = IPFLOW_MAX;
107 static int ip_hashsize = IPFLOW_DEFAULT_HASHSIZE;
108
109 static struct ipflow *ipflow_reap(bool);
110 static void ipflow_sysctl_init(struct sysctllog **);
111
112 static void ipflow_slowtimo_work(struct work *, void *);
113 static struct workqueue *ipflow_slowtimo_wq;
114 static struct work ipflow_slowtimo_wk;
115
116 static size_t
117 ipflow_hash(const struct ip *ip)
118 {
119 size_t hash = ip->ip_tos;
120 size_t idx;
121
122 for (idx = 0; idx < 32; idx += IPFLOW_HASHBITS) {
123 hash += (ip->ip_dst.s_addr >> (32 - idx)) +
124 (ip->ip_src.s_addr >> idx);
125 }
126
127 return hash & (ip_hashsize-1);
128 }
129
130 static struct ipflow *
131 ipflow_lookup(const struct ip *ip)
132 {
133 size_t hash;
134 struct ipflow *ipf;
135
136 KASSERT(mutex_owned(&ipflow_lock));
137
138 hash = ipflow_hash(ip);
139
140 TAILQ_FOREACH(ipf, &ipflowtable[hash], ipf_hash) {
141 if (ip->ip_dst.s_addr == ipf->ipf_dst.s_addr
142 && ip->ip_src.s_addr == ipf->ipf_src.s_addr
143 && ip->ip_tos == ipf->ipf_tos)
144 break;
145 }
146 return ipf;
147 }
148
149 void
150 ipflow_poolinit(void)
151 {
152
153 pool_init(&ipflow_pool, sizeof(struct ipflow), 0, 0, 0, "ipflowpl",
154 NULL, IPL_NET);
155 }
156
157 static int
158 ipflow_reinit(int table_size)
159 {
160 struct ipflowhead *new_table;
161 size_t i;
162
163 KASSERT(mutex_owned(&ipflow_lock));
164
165 new_table = (struct ipflowhead *)malloc(sizeof(struct ipflowhead) *
166 table_size, M_RTABLE, M_NOWAIT);
167
168 if (new_table == NULL)
169 return 1;
170
171 if (ipflowtable != NULL)
172 free(ipflowtable, M_RTABLE);
173
174 ipflowtable = new_table;
175 ip_hashsize = table_size;
176
177 TAILQ_INIT(&ipflowlist);
178 for (i = 0; i < ip_hashsize; i++)
179 TAILQ_INIT(&ipflowtable[i]);
180
181 return 0;
182 }
183
184 void
185 ipflow_init(void)
186 {
187 int error;
188
189 error = workqueue_create(&ipflow_slowtimo_wq, "ipflow_slowtimo",
190 ipflow_slowtimo_work, NULL, PRI_SOFTNET, IPL_SOFTNET, WQ_MPSAFE);
191 if (error != 0)
192 panic("%s: workqueue_create failed (%d)\n", __func__, error);
193
194 mutex_init(&ipflow_lock, MUTEX_DEFAULT, IPL_NONE);
195
196 mutex_enter(&ipflow_lock);
197 (void)ipflow_reinit(ip_hashsize);
198 mutex_exit(&ipflow_lock);
199 ipflow_sysctl_init(NULL);
200 }
201
202 int
203 ipflow_fastforward(struct mbuf *m)
204 {
205 struct ip *ip;
206 struct ip ip_store;
207 struct ipflow *ipf;
208 struct rtentry *rt = NULL;
209 const struct sockaddr *dst;
210 int error;
211 int iplen;
212 struct ifnet *ifp;
213 int s;
214 int ret = 0;
215
216 mutex_enter(&ipflow_lock);
217 /*
218 * Are we forwarding packets? Big enough for an IP packet?
219 */
220 if (!ipforwarding || ipflow_inuse == 0 || m->m_len < sizeof(struct ip))
221 goto out;
222
223 /*
224 * Was packet received as a link-level multicast or broadcast?
225 * If so, don't try to fast forward..
226 */
227 if ((m->m_flags & (M_BCAST|M_MCAST)) != 0)
228 goto out;
229
230 /*
231 * IP header with no option and valid version and length
232 */
233 if (IP_HDR_ALIGNED_P(mtod(m, const void *)))
234 ip = mtod(m, struct ip *);
235 else {
236 memcpy(&ip_store, mtod(m, const void *), sizeof(ip_store));
237 ip = &ip_store;
238 }
239 iplen = ntohs(ip->ip_len);
240 if (ip->ip_v != IPVERSION || ip->ip_hl != (sizeof(struct ip) >> 2) ||
241 iplen < sizeof(struct ip) || iplen > m->m_pkthdr.len)
242 goto out;
243 /*
244 * Find a flow.
245 */
246 if ((ipf = ipflow_lookup(ip)) == NULL)
247 goto out;
248
249 ifp = m_get_rcvif(m, &s);
250 if (__predict_false(ifp == NULL))
251 goto out_unref;
252 /*
253 * Verify the IP header checksum.
254 */
255 switch (m->m_pkthdr.csum_flags &
256 ((ifp->if_csum_flags_rx & M_CSUM_IPv4) |
257 M_CSUM_IPv4_BAD)) {
258 case M_CSUM_IPv4|M_CSUM_IPv4_BAD:
259 m_put_rcvif(ifp, &s);
260 goto out_unref;
261
262 case M_CSUM_IPv4:
263 /* Checksum was okay. */
264 break;
265
266 default:
267 /* Must compute it ourselves. */
268 if (in_cksum(m, sizeof(struct ip)) != 0) {
269 m_put_rcvif(ifp, &s);
270 goto out_unref;
271 }
272 break;
273 }
274 m_put_rcvif(ifp, &s);
275
276 /*
277 * Route and interface still up?
278 */
279 rt = rtcache_validate(&ipf->ipf_ro);
280 if (rt == NULL || (rt->rt_ifp->if_flags & IFF_UP) == 0 ||
281 (rt->rt_flags & (RTF_BLACKHOLE | RTF_BROADCAST)) != 0)
282 goto out_unref;
283
284 /*
285 * Packet size OK? TTL?
286 */
287 if (m->m_pkthdr.len > rt->rt_ifp->if_mtu || ip->ip_ttl <= IPTTLDEC)
288 goto out_unref;
289
290 /*
291 * Clear any in-bound checksum flags for this packet.
292 */
293 m->m_pkthdr.csum_flags = 0;
294
295 /*
296 * Everything checks out and so we can forward this packet.
297 * Modify the TTL and incrementally change the checksum.
298 *
299 * This method of adding the checksum works on either endian CPU.
300 * If htons() is inlined, all the arithmetic is folded; otherwise
301 * the htons()s are combined by CSE due to the const attribute.
302 *
303 * Don't bother using HW checksumming here -- the incremental
304 * update is pretty fast.
305 */
306 ip->ip_ttl -= IPTTLDEC;
307 if (ip->ip_sum >= (u_int16_t) ~htons(IPTTLDEC << 8))
308 ip->ip_sum -= ~htons(IPTTLDEC << 8);
309 else
310 ip->ip_sum += htons(IPTTLDEC << 8);
311
312 /*
313 * Done modifying the header; copy it back, if necessary.
314 *
315 * XXX Use m_copyback_cow(9) here? --dyoung
316 */
317 if (IP_HDR_ALIGNED_P(mtod(m, void *)) == 0)
318 memcpy(mtod(m, void *), &ip_store, sizeof(ip_store));
319
320 /*
321 * Trim the packet in case it's too long..
322 */
323 if (m->m_pkthdr.len > iplen) {
324 if (m->m_len == m->m_pkthdr.len) {
325 m->m_len = iplen;
326 m->m_pkthdr.len = iplen;
327 } else
328 m_adj(m, iplen - m->m_pkthdr.len);
329 }
330
331 /*
332 * Send the packet on its way. All we can get back is ENOBUFS
333 */
334 ipf->ipf_uses++;
335
336 #if 0
337 /*
338 * Sorting list is too heavy for fast path(packet processing path).
339 * It degrades about 10% performance. So, we does not sort ipflowtable,
340 * and then we use FIFO cache replacement instead fo LRU.
341 */
342 /* move to head (LRU) for ipflowlist. ipflowtable ooes not care LRU. */
343 TAILQ_REMOVE(&ipflowlist, ipf, ipf_list);
344 TAILQ_INSERT_HEAD(&ipflowlist, ipf, ipf_list);
345 #endif
346
347 PRT_SLOW_ARM(ipf->ipf_timer, IPFLOW_TIMER);
348
349 if (rt->rt_flags & RTF_GATEWAY)
350 dst = rt->rt_gateway;
351 else
352 dst = rtcache_getdst(&ipf->ipf_ro);
353
354 if ((error = if_output_lock(rt->rt_ifp, rt->rt_ifp, m, dst, rt)) != 0) {
355 if (error == ENOBUFS)
356 ipf->ipf_dropped++;
357 else
358 ipf->ipf_errors++;
359 }
360 ret = 1;
361 out_unref:
362 rtcache_unref(rt, &ipf->ipf_ro);
363 out:
364 mutex_exit(&ipflow_lock);
365 return ret;
366 }
367
368 static void
369 ipflow_addstats(struct ipflow *ipf)
370 {
371 struct rtentry *rt;
372 uint64_t *ips;
373
374 rt = rtcache_validate(&ipf->ipf_ro);
375 if (rt != NULL) {
376 rt->rt_use += ipf->ipf_uses;
377 rtcache_unref(rt, &ipf->ipf_ro);
378 }
379
380 ips = IP_STAT_GETREF();
381 ips[IP_STAT_CANTFORWARD] += ipf->ipf_errors + ipf->ipf_dropped;
382 ips[IP_STAT_TOTAL] += ipf->ipf_uses;
383 ips[IP_STAT_FORWARD] += ipf->ipf_uses;
384 ips[IP_STAT_FASTFORWARD] += ipf->ipf_uses;
385 IP_STAT_PUTREF();
386 }
387
388 static void
389 ipflow_free(struct ipflow *ipf)
390 {
391
392 KASSERT(mutex_owned(&ipflow_lock));
393
394 /*
395 * Remove the flow from the hash table (at elevated IPL).
396 * Once it's off the list, we can deal with it at normal
397 * network IPL.
398 */
399 IPFLOW_REMOVE(ipf->ipf_hashidx, ipf);
400
401 ipflow_addstats(ipf);
402 rtcache_free(&ipf->ipf_ro);
403 ipflow_inuse--;
404 pool_put(&ipflow_pool, ipf);
405 }
406
407 static struct ipflow *
408 ipflow_reap(bool just_one)
409 {
410 struct ipflow *ipf;
411
412 KASSERT(mutex_owned(&ipflow_lock));
413
414 /*
415 * This case must remove one ipflow. Furthermore, this case is used in
416 * fast path(packet processing path). So, simply remove TAILQ_LAST one.
417 */
418 if (just_one) {
419 ipf = TAILQ_LAST(&ipflowlist, ipflowhead);
420 KASSERT(ipf != NULL);
421
422 IPFLOW_REMOVE(ipf->ipf_hashidx, ipf);
423
424 ipflow_addstats(ipf);
425 rtcache_free(&ipf->ipf_ro);
426 return ipf;
427 }
428
429 /*
430 * This case is used in slow path(sysctl).
431 * At first, remove invalid rtcache ipflow, and then remove TAILQ_LAST
432 * ipflow if it is ensured least recently used by comparing last_uses.
433 */
434 while (ipflow_inuse > ip_maxflows) {
435 struct ipflow *maybe_ipf = TAILQ_LAST(&ipflowlist, ipflowhead);
436
437 TAILQ_FOREACH(ipf, &ipflowlist, ipf_list) {
438 struct rtentry *rt;
439 /*
440 * If this no longer points to a valid route
441 * reclaim it.
442 */
443 rt = rtcache_validate(&ipf->ipf_ro);
444 if (rt == NULL)
445 goto done;
446 rtcache_unref(rt, &ipf->ipf_ro);
447 /*
448 * choose the one that's been least recently
449 * used or has had the least uses in the
450 * last 1.5 intervals.
451 */
452 if (ipf->ipf_timer < maybe_ipf->ipf_timer
453 || ((ipf->ipf_timer == maybe_ipf->ipf_timer)
454 && (ipf->ipf_last_uses + ipf->ipf_uses
455 < maybe_ipf->ipf_last_uses + maybe_ipf->ipf_uses)))
456 maybe_ipf = ipf;
457 }
458 ipf = maybe_ipf;
459 done:
460 /*
461 * Remove the entry from the flow table.
462 */
463 IPFLOW_REMOVE(ipf->ipf_hashidx, ipf);
464
465 ipflow_addstats(ipf);
466 rtcache_free(&ipf->ipf_ro);
467 pool_put(&ipflow_pool, ipf);
468 ipflow_inuse--;
469 }
470 return NULL;
471 }
472
473 static unsigned int ipflow_work_enqueued = 0;
474
475 static void
476 ipflow_slowtimo_work(struct work *wk, void *arg)
477 {
478 struct rtentry *rt;
479 struct ipflow *ipf, *next_ipf;
480 uint64_t *ips;
481
482 /* We can allow enqueuing another work at this point */
483 atomic_swap_uint(&ipflow_work_enqueued, 0);
484
485 SOFTNET_KERNEL_LOCK_UNLESS_NET_MPSAFE();
486 mutex_enter(&ipflow_lock);
487 for (ipf = TAILQ_FIRST(&ipflowlist); ipf != NULL; ipf = next_ipf) {
488 next_ipf = TAILQ_NEXT(ipf, ipf_list);
489 if (PRT_SLOW_ISEXPIRED(ipf->ipf_timer) ||
490 (rt = rtcache_validate(&ipf->ipf_ro)) == NULL) {
491 ipflow_free(ipf);
492 } else {
493 ipf->ipf_last_uses = ipf->ipf_uses;
494 rt->rt_use += ipf->ipf_uses;
495 rtcache_unref(rt, &ipf->ipf_ro);
496 ips = IP_STAT_GETREF();
497 ips[IP_STAT_TOTAL] += ipf->ipf_uses;
498 ips[IP_STAT_FORWARD] += ipf->ipf_uses;
499 ips[IP_STAT_FASTFORWARD] += ipf->ipf_uses;
500 IP_STAT_PUTREF();
501 ipf->ipf_uses = 0;
502 }
503 }
504 mutex_exit(&ipflow_lock);
505 SOFTNET_KERNEL_UNLOCK_UNLESS_NET_MPSAFE();
506 }
507
508 void
509 ipflow_slowtimo(void)
510 {
511
512 /* Avoid enqueuing another work when one is already enqueued */
513 if (atomic_swap_uint(&ipflow_work_enqueued, 1) == 1)
514 return;
515
516 workqueue_enqueue(ipflow_slowtimo_wq, &ipflow_slowtimo_wk, NULL);
517 }
518
519 void
520 ipflow_create(struct route *ro, struct mbuf *m)
521 {
522 const struct ip *const ip = mtod(m, const struct ip *);
523 struct ipflow *ipf;
524 size_t hash;
525
526 KERNEL_LOCK_UNLESS_NET_MPSAFE();
527 mutex_enter(&ipflow_lock);
528
529 /*
530 * Don't create cache entries for ICMP messages.
531 */
532 if (ip_maxflows == 0 || ip->ip_p == IPPROTO_ICMP)
533 goto out;
534
535 /*
536 * See if an existing flow struct exists. If so remove it from its
537 * list and free the old route. If not, try to malloc a new one
538 * (if we aren't at our limit).
539 */
540 ipf = ipflow_lookup(ip);
541 if (ipf == NULL) {
542 if (ipflow_inuse >= ip_maxflows) {
543 ipf = ipflow_reap(true);
544 } else {
545 ipf = pool_get(&ipflow_pool, PR_NOWAIT);
546 if (ipf == NULL)
547 goto out;
548 ipflow_inuse++;
549 }
550 memset(ipf, 0, sizeof(*ipf));
551 } else {
552 IPFLOW_REMOVE(ipf->ipf_hashidx, ipf);
553
554 ipflow_addstats(ipf);
555 rtcache_free(&ipf->ipf_ro);
556 ipf->ipf_uses = ipf->ipf_last_uses = 0;
557 ipf->ipf_errors = ipf->ipf_dropped = 0;
558 }
559
560 /*
561 * Fill in the updated information.
562 */
563 rtcache_copy(&ipf->ipf_ro, ro);
564 ipf->ipf_dst = ip->ip_dst;
565 ipf->ipf_src = ip->ip_src;
566 ipf->ipf_tos = ip->ip_tos;
567 PRT_SLOW_ARM(ipf->ipf_timer, IPFLOW_TIMER);
568
569 /*
570 * Insert into the approriate bucket of the flow table.
571 */
572 hash = ipflow_hash(ip);
573 IPFLOW_INSERT(hash, ipf);
574
575 out:
576 mutex_exit(&ipflow_lock);
577 KERNEL_UNLOCK_UNLESS_NET_MPSAFE();
578 }
579
580 int
581 ipflow_invalidate_all(int new_size)
582 {
583 struct ipflow *ipf, *next_ipf;
584 int error;
585
586 error = 0;
587
588 mutex_enter(&ipflow_lock);
589
590 for (ipf = TAILQ_FIRST(&ipflowlist); ipf != NULL; ipf = next_ipf) {
591 next_ipf = TAILQ_NEXT(ipf, ipf_list);
592 ipflow_free(ipf);
593 }
594
595 if (new_size)
596 error = ipflow_reinit(new_size);
597
598 mutex_exit(&ipflow_lock);
599
600 return error;
601 }
602
603 /*
604 * sysctl helper routine for net.inet.ip.maxflows.
605 */
606 static int
607 sysctl_net_inet_ip_maxflows(SYSCTLFN_ARGS)
608 {
609 int error;
610
611 error = sysctl_lookup(SYSCTLFN_CALL(rnode));
612 if (error || newp == NULL)
613 return (error);
614
615 SOFTNET_KERNEL_LOCK_UNLESS_NET_MPSAFE();
616 mutex_enter(&ipflow_lock);
617
618 ipflow_reap(false);
619
620 mutex_exit(&ipflow_lock);
621 SOFTNET_KERNEL_UNLOCK_UNLESS_NET_MPSAFE();
622
623 return (0);
624 }
625
626 static int
627 sysctl_net_inet_ip_hashsize(SYSCTLFN_ARGS)
628 {
629 int error, tmp;
630 struct sysctlnode node;
631
632 node = *rnode;
633 tmp = ip_hashsize;
634 node.sysctl_data = &tmp;
635 error = sysctl_lookup(SYSCTLFN_CALL(&node));
636 if (error || newp == NULL)
637 return (error);
638
639 if ((tmp & (tmp - 1)) == 0 && tmp != 0) {
640 /*
641 * Can only fail due to malloc()
642 */
643 SOFTNET_KERNEL_LOCK_UNLESS_NET_MPSAFE();
644 error = ipflow_invalidate_all(tmp);
645 SOFTNET_KERNEL_UNLOCK_UNLESS_NET_MPSAFE();
646 } else {
647 /*
648 * EINVAL if not a power of 2
649 */
650 error = EINVAL;
651 }
652
653 return error;
654 }
655
656 static void
657 ipflow_sysctl_init(struct sysctllog **clog)
658 {
659 sysctl_createv(clog, 0, NULL, NULL,
660 CTLFLAG_PERMANENT,
661 CTLTYPE_NODE, "inet",
662 SYSCTL_DESCR("PF_INET related settings"),
663 NULL, 0, NULL, 0,
664 CTL_NET, PF_INET, CTL_EOL);
665 sysctl_createv(clog, 0, NULL, NULL,
666 CTLFLAG_PERMANENT,
667 CTLTYPE_NODE, "ip",
668 SYSCTL_DESCR("IPv4 related settings"),
669 NULL, 0, NULL, 0,
670 CTL_NET, PF_INET, IPPROTO_IP, CTL_EOL);
671
672 sysctl_createv(clog, 0, NULL, NULL,
673 CTLFLAG_PERMANENT|CTLFLAG_READWRITE,
674 CTLTYPE_INT, "maxflows",
675 SYSCTL_DESCR("Number of flows for fast forwarding"),
676 sysctl_net_inet_ip_maxflows, 0, &ip_maxflows, 0,
677 CTL_NET, PF_INET, IPPROTO_IP,
678 IPCTL_MAXFLOWS, CTL_EOL);
679 sysctl_createv(clog, 0, NULL, NULL,
680 CTLFLAG_PERMANENT|CTLFLAG_READWRITE,
681 CTLTYPE_INT, "hashsize",
682 SYSCTL_DESCR("Size of hash table for fast forwarding (IPv4)"),
683 sysctl_net_inet_ip_hashsize, 0, &ip_hashsize, 0,
684 CTL_NET, PF_INET, IPPROTO_IP,
685 CTL_CREATE, CTL_EOL);
686 }
687