ip_flow.c revision 1.73.2.3 1 /* $NetBSD: ip_flow.c,v 1.73.2.3 2017/01/07 08:56:51 pgoyette Exp $ */
2
3 /*-
4 * Copyright (c) 1998 The NetBSD Foundation, Inc.
5 * All rights reserved.
6 *
7 * This code is derived from software contributed to The NetBSD Foundation
8 * by the 3am Software Foundry ("3am"). It was developed by Matt Thomas.
9 *
10 * Redistribution and use in source and binary forms, with or without
11 * modification, are permitted provided that the following conditions
12 * are met:
13 * 1. Redistributions of source code must retain the above copyright
14 * notice, this list of conditions and the following disclaimer.
15 * 2. Redistributions in binary form must reproduce the above copyright
16 * notice, this list of conditions and the following disclaimer in the
17 * documentation and/or other materials provided with the distribution.
18 *
19 * THIS SOFTWARE IS PROVIDED BY THE NETBSD FOUNDATION, INC. AND CONTRIBUTORS
20 * ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED
21 * TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
22 * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE FOUNDATION OR CONTRIBUTORS
23 * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
24 * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
25 * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
26 * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
27 * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
28 * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
29 * POSSIBILITY OF SUCH DAMAGE.
30 */
31
32 #include <sys/cdefs.h>
33 __KERNEL_RCSID(0, "$NetBSD: ip_flow.c,v 1.73.2.3 2017/01/07 08:56:51 pgoyette Exp $");
34
35 #ifdef _KERNEL_OPT
36 #include "opt_net_mpsafe.h"
37 #endif
38
39 #include <sys/param.h>
40 #include <sys/systm.h>
41 #include <sys/malloc.h>
42 #include <sys/mbuf.h>
43 #include <sys/domain.h>
44 #include <sys/protosw.h>
45 #include <sys/socket.h>
46 #include <sys/socketvar.h>
47 #include <sys/errno.h>
48 #include <sys/time.h>
49 #include <sys/kernel.h>
50 #include <sys/pool.h>
51 #include <sys/sysctl.h>
52 #include <sys/workqueue.h>
53 #include <sys/atomic.h>
54
55 #include <net/if.h>
56 #include <net/if_dl.h>
57 #include <net/route.h>
58 #include <net/pfil.h>
59
60 #include <netinet/in.h>
61 #include <netinet/in_systm.h>
62 #include <netinet/ip.h>
63 #include <netinet/in_pcb.h>
64 #include <netinet/in_var.h>
65 #include <netinet/ip_var.h>
66 #include <netinet/ip_private.h>
67
68 /*
69 * Similar code is very well commented in netinet6/ip6_flow.c
70 */
71
72 #define IPFLOW_HASHBITS 6 /* should not be a multiple of 8 */
73
74 static struct pool ipflow_pool;
75
76 TAILQ_HEAD(ipflowhead, ipflow);
77
78 #define IPFLOW_TIMER (5 * PR_SLOWHZ)
79 #define IPFLOW_DEFAULT_HASHSIZE (1 << IPFLOW_HASHBITS)
80
81 /*
82 * ip_flow.c internal lock.
83 * If we use softnet_lock, it would cause recursive lock.
84 *
85 * This is a tentative workaround.
86 * We should make it scalable somehow in the future.
87 */
88 static kmutex_t ipflow_lock;
89 static struct ipflowhead *ipflowtable = NULL;
90 static struct ipflowhead ipflowlist;
91 static int ipflow_inuse;
92
93 #define IPFLOW_INSERT(hashidx, ipf) \
94 do { \
95 (ipf)->ipf_hashidx = (hashidx); \
96 TAILQ_INSERT_HEAD(&ipflowtable[(hashidx)], (ipf), ipf_hash); \
97 TAILQ_INSERT_HEAD(&ipflowlist, (ipf), ipf_list); \
98 } while (/*CONSTCOND*/ 0)
99
100 #define IPFLOW_REMOVE(hashidx, ipf) \
101 do { \
102 TAILQ_REMOVE(&ipflowtable[(hashidx)], (ipf), ipf_hash); \
103 TAILQ_REMOVE(&ipflowlist, (ipf), ipf_list); \
104 } while (/*CONSTCOND*/ 0)
105
106 #ifndef IPFLOW_MAX
107 #define IPFLOW_MAX 256
108 #endif
109 static int ip_maxflows = IPFLOW_MAX;
110 static int ip_hashsize = IPFLOW_DEFAULT_HASHSIZE;
111
112 static struct ipflow *ipflow_reap(bool);
113 static void ipflow_sysctl_init(struct sysctllog **);
114
115 static void ipflow_slowtimo_work(struct work *, void *);
116 static struct workqueue *ipflow_slowtimo_wq;
117 static struct work ipflow_slowtimo_wk;
118
119 static size_t
120 ipflow_hash(const struct ip *ip)
121 {
122 size_t hash = ip->ip_tos;
123 size_t idx;
124
125 for (idx = 0; idx < 32; idx += IPFLOW_HASHBITS) {
126 hash += (ip->ip_dst.s_addr >> (32 - idx)) +
127 (ip->ip_src.s_addr >> idx);
128 }
129
130 return hash & (ip_hashsize-1);
131 }
132
133 static struct ipflow *
134 ipflow_lookup(const struct ip *ip)
135 {
136 size_t hash;
137 struct ipflow *ipf;
138
139 KASSERT(mutex_owned(&ipflow_lock));
140
141 hash = ipflow_hash(ip);
142
143 TAILQ_FOREACH(ipf, &ipflowtable[hash], ipf_hash) {
144 if (ip->ip_dst.s_addr == ipf->ipf_dst.s_addr
145 && ip->ip_src.s_addr == ipf->ipf_src.s_addr
146 && ip->ip_tos == ipf->ipf_tos)
147 break;
148 }
149 return ipf;
150 }
151
152 void
153 ipflow_poolinit(void)
154 {
155
156 pool_init(&ipflow_pool, sizeof(struct ipflow), 0, 0, 0, "ipflowpl",
157 NULL, IPL_NET);
158 }
159
160 static int
161 ipflow_reinit(int table_size)
162 {
163 struct ipflowhead *new_table;
164 size_t i;
165
166 KASSERT(mutex_owned(&ipflow_lock));
167
168 new_table = (struct ipflowhead *)malloc(sizeof(struct ipflowhead) *
169 table_size, M_RTABLE, M_NOWAIT);
170
171 if (new_table == NULL)
172 return 1;
173
174 if (ipflowtable != NULL)
175 free(ipflowtable, M_RTABLE);
176
177 ipflowtable = new_table;
178 ip_hashsize = table_size;
179
180 TAILQ_INIT(&ipflowlist);
181 for (i = 0; i < ip_hashsize; i++)
182 TAILQ_INIT(&ipflowtable[i]);
183
184 return 0;
185 }
186
187 void
188 ipflow_init(void)
189 {
190 int error;
191
192 error = workqueue_create(&ipflow_slowtimo_wq, "ipflow_slowtimo",
193 ipflow_slowtimo_work, NULL, PRI_SOFTNET, IPL_SOFTNET, WQ_MPSAFE);
194 if (error != 0)
195 panic("%s: workqueue_create failed (%d)\n", __func__, error);
196
197 mutex_init(&ipflow_lock, MUTEX_DEFAULT, IPL_NONE);
198
199 mutex_enter(&ipflow_lock);
200 (void)ipflow_reinit(ip_hashsize);
201 mutex_exit(&ipflow_lock);
202 ipflow_sysctl_init(NULL);
203 }
204
205 int
206 ipflow_fastforward(struct mbuf *m)
207 {
208 struct ip *ip;
209 struct ip ip_store;
210 struct ipflow *ipf;
211 struct rtentry *rt = NULL;
212 const struct sockaddr *dst;
213 int error;
214 int iplen;
215 struct ifnet *ifp;
216 int s;
217 int ret = 0;
218
219 mutex_enter(&ipflow_lock);
220 /*
221 * Are we forwarding packets? Big enough for an IP packet?
222 */
223 if (!ipforwarding || ipflow_inuse == 0 || m->m_len < sizeof(struct ip))
224 goto out;
225
226 /*
227 * Was packet received as a link-level multicast or broadcast?
228 * If so, don't try to fast forward..
229 */
230 if ((m->m_flags & (M_BCAST|M_MCAST)) != 0)
231 goto out;
232
233 /*
234 * IP header with no option and valid version and length
235 */
236 if (IP_HDR_ALIGNED_P(mtod(m, const void *)))
237 ip = mtod(m, struct ip *);
238 else {
239 memcpy(&ip_store, mtod(m, const void *), sizeof(ip_store));
240 ip = &ip_store;
241 }
242 iplen = ntohs(ip->ip_len);
243 if (ip->ip_v != IPVERSION || ip->ip_hl != (sizeof(struct ip) >> 2) ||
244 iplen < sizeof(struct ip) || iplen > m->m_pkthdr.len)
245 goto out;
246 /*
247 * Find a flow.
248 */
249 if ((ipf = ipflow_lookup(ip)) == NULL)
250 goto out;
251
252 ifp = m_get_rcvif(m, &s);
253 /*
254 * Verify the IP header checksum.
255 */
256 switch (m->m_pkthdr.csum_flags &
257 ((ifp->if_csum_flags_rx & M_CSUM_IPv4) |
258 M_CSUM_IPv4_BAD)) {
259 case M_CSUM_IPv4|M_CSUM_IPv4_BAD:
260 m_put_rcvif(ifp, &s);
261 goto out_unref;
262
263 case M_CSUM_IPv4:
264 /* Checksum was okay. */
265 break;
266
267 default:
268 /* Must compute it ourselves. */
269 if (in_cksum(m, sizeof(struct ip)) != 0) {
270 m_put_rcvif(ifp, &s);
271 goto out_unref;
272 }
273 break;
274 }
275 m_put_rcvif(ifp, &s);
276
277 /*
278 * Route and interface still up?
279 */
280 rt = rtcache_validate(&ipf->ipf_ro);
281 if (rt == NULL || (rt->rt_ifp->if_flags & IFF_UP) == 0 ||
282 (rt->rt_flags & (RTF_BLACKHOLE | RTF_BROADCAST)) != 0)
283 goto out_unref;
284
285 /*
286 * Packet size OK? TTL?
287 */
288 if (m->m_pkthdr.len > rt->rt_ifp->if_mtu || ip->ip_ttl <= IPTTLDEC)
289 goto out_unref;
290
291 /*
292 * Clear any in-bound checksum flags for this packet.
293 */
294 m->m_pkthdr.csum_flags = 0;
295
296 /*
297 * Everything checks out and so we can forward this packet.
298 * Modify the TTL and incrementally change the checksum.
299 *
300 * This method of adding the checksum works on either endian CPU.
301 * If htons() is inlined, all the arithmetic is folded; otherwise
302 * the htons()s are combined by CSE due to the const attribute.
303 *
304 * Don't bother using HW checksumming here -- the incremental
305 * update is pretty fast.
306 */
307 ip->ip_ttl -= IPTTLDEC;
308 if (ip->ip_sum >= (u_int16_t) ~htons(IPTTLDEC << 8))
309 ip->ip_sum -= ~htons(IPTTLDEC << 8);
310 else
311 ip->ip_sum += htons(IPTTLDEC << 8);
312
313 /*
314 * Done modifying the header; copy it back, if necessary.
315 *
316 * XXX Use m_copyback_cow(9) here? --dyoung
317 */
318 if (IP_HDR_ALIGNED_P(mtod(m, void *)) == 0)
319 memcpy(mtod(m, void *), &ip_store, sizeof(ip_store));
320
321 /*
322 * Trim the packet in case it's too long..
323 */
324 if (m->m_pkthdr.len > iplen) {
325 if (m->m_len == m->m_pkthdr.len) {
326 m->m_len = iplen;
327 m->m_pkthdr.len = iplen;
328 } else
329 m_adj(m, iplen - m->m_pkthdr.len);
330 }
331
332 /*
333 * Send the packet on its way. All we can get back is ENOBUFS
334 */
335 ipf->ipf_uses++;
336
337 #if 0
338 /*
339 * Sorting list is too heavy for fast path(packet processing path).
340 * It degrades about 10% performance. So, we does not sort ipflowtable,
341 * and then we use FIFO cache replacement instead fo LRU.
342 */
343 /* move to head (LRU) for ipflowlist. ipflowtable ooes not care LRU. */
344 TAILQ_REMOVE(&ipflowlist, ipf, ipf_list);
345 TAILQ_INSERT_HEAD(&ipflowlist, ipf, ipf_list);
346 #endif
347
348 PRT_SLOW_ARM(ipf->ipf_timer, IPFLOW_TIMER);
349
350 if (rt->rt_flags & RTF_GATEWAY)
351 dst = rt->rt_gateway;
352 else
353 dst = rtcache_getdst(&ipf->ipf_ro);
354
355 if ((error = if_output_lock(rt->rt_ifp, rt->rt_ifp, m, dst, rt)) != 0) {
356 if (error == ENOBUFS)
357 ipf->ipf_dropped++;
358 else
359 ipf->ipf_errors++;
360 }
361 ret = 1;
362 out_unref:
363 rtcache_unref(rt, &ipf->ipf_ro);
364 out:
365 mutex_exit(&ipflow_lock);
366 return ret;
367 }
368
369 static void
371 ipflow_addstats(struct ipflow *ipf)
372 {
373 struct rtentry *rt;
374 uint64_t *ips;
375
376 rt = rtcache_validate(&ipf->ipf_ro);
377 if (rt != NULL) {
378 rt->rt_use += ipf->ipf_uses;
379 rtcache_unref(rt, &ipf->ipf_ro);
380 }
381
382 ips = IP_STAT_GETREF();
383 ips[IP_STAT_CANTFORWARD] += ipf->ipf_errors + ipf->ipf_dropped;
384 ips[IP_STAT_TOTAL] += ipf->ipf_uses;
385 ips[IP_STAT_FORWARD] += ipf->ipf_uses;
386 ips[IP_STAT_FASTFORWARD] += ipf->ipf_uses;
387 IP_STAT_PUTREF();
388 }
389
390 static void
391 ipflow_free(struct ipflow *ipf)
392 {
393
394 KASSERT(mutex_owned(&ipflow_lock));
395
396 /*
397 * Remove the flow from the hash table (at elevated IPL).
398 * Once it's off the list, we can deal with it at normal
399 * network IPL.
400 */
401 IPFLOW_REMOVE(ipf->ipf_hashidx, ipf);
402
403 ipflow_addstats(ipf);
404 rtcache_free(&ipf->ipf_ro);
405 ipflow_inuse--;
406 pool_put(&ipflow_pool, ipf);
407 }
408
409 static struct ipflow *
410 ipflow_reap(bool just_one)
411 {
412 struct ipflow *ipf;
413
414 KASSERT(mutex_owned(&ipflow_lock));
415
416 /*
417 * This case must remove one ipflow. Furthermore, this case is used in
418 * fast path(packet processing path). So, simply remove TAILQ_LAST one.
419 */
420 if (just_one) {
421 ipf = TAILQ_LAST(&ipflowlist, ipflowhead);
422 KASSERT(ipf != NULL);
423
424 IPFLOW_REMOVE(ipf->ipf_hashidx, ipf);
425
426 ipflow_addstats(ipf);
427 rtcache_free(&ipf->ipf_ro);
428 return ipf;
429 }
430
431 /*
432 * This case is used in slow path(sysctl).
433 * At first, remove invalid rtcache ipflow, and then remove TAILQ_LAST
434 * ipflow if it is ensured least recently used by comparing last_uses.
435 */
436 while (ipflow_inuse > ip_maxflows) {
437 struct ipflow *maybe_ipf = TAILQ_LAST(&ipflowlist, ipflowhead);
438
439 TAILQ_FOREACH(ipf, &ipflowlist, ipf_list) {
440 struct rtentry *rt;
441 /*
442 * If this no longer points to a valid route
443 * reclaim it.
444 */
445 rt = rtcache_validate(&ipf->ipf_ro);
446 if (rt == NULL)
447 goto done;
448 rtcache_unref(rt, &ipf->ipf_ro);
449 /*
450 * choose the one that's been least recently
451 * used or has had the least uses in the
452 * last 1.5 intervals.
453 */
454 if (ipf->ipf_timer < maybe_ipf->ipf_timer
455 || ((ipf->ipf_timer == maybe_ipf->ipf_timer)
456 && (ipf->ipf_last_uses + ipf->ipf_uses
457 < maybe_ipf->ipf_last_uses + maybe_ipf->ipf_uses)))
458 maybe_ipf = ipf;
459 }
460 ipf = maybe_ipf;
461 done:
462 /*
463 * Remove the entry from the flow table.
464 */
465 IPFLOW_REMOVE(ipf->ipf_hashidx, ipf);
466
467 ipflow_addstats(ipf);
468 rtcache_free(&ipf->ipf_ro);
469 pool_put(&ipflow_pool, ipf);
470 ipflow_inuse--;
471 }
472 return NULL;
473 }
474
475 static unsigned int ipflow_work_enqueued = 0;
476
477 static void
478 ipflow_slowtimo_work(struct work *wk, void *arg)
479 {
480 struct rtentry *rt;
481 struct ipflow *ipf, *next_ipf;
482 uint64_t *ips;
483
484 /* We can allow enqueuing another work at this point */
485 atomic_swap_uint(&ipflow_work_enqueued, 0);
486
487 #ifndef NET_MPSAFE
488 mutex_enter(softnet_lock);
489 KERNEL_LOCK(1, NULL);
490 #endif
491 mutex_enter(&ipflow_lock);
492 for (ipf = TAILQ_FIRST(&ipflowlist); ipf != NULL; ipf = next_ipf) {
493 next_ipf = TAILQ_NEXT(ipf, ipf_list);
494 if (PRT_SLOW_ISEXPIRED(ipf->ipf_timer) ||
495 (rt = rtcache_validate(&ipf->ipf_ro)) == NULL) {
496 ipflow_free(ipf);
497 } else {
498 ipf->ipf_last_uses = ipf->ipf_uses;
499 rt->rt_use += ipf->ipf_uses;
500 rtcache_unref(rt, &ipf->ipf_ro);
501 ips = IP_STAT_GETREF();
502 ips[IP_STAT_TOTAL] += ipf->ipf_uses;
503 ips[IP_STAT_FORWARD] += ipf->ipf_uses;
504 ips[IP_STAT_FASTFORWARD] += ipf->ipf_uses;
505 IP_STAT_PUTREF();
506 ipf->ipf_uses = 0;
507 }
508 }
509 mutex_exit(&ipflow_lock);
510 #ifndef NET_MPSAFE
511 KERNEL_UNLOCK_ONE(NULL);
512 mutex_exit(softnet_lock);
513 #endif
514 }
515
516 void
517 ipflow_slowtimo(void)
518 {
519
520 /* Avoid enqueuing another work when one is already enqueued */
521 if (atomic_swap_uint(&ipflow_work_enqueued, 1) == 1)
522 return;
523
524 workqueue_enqueue(ipflow_slowtimo_wq, &ipflow_slowtimo_wk, NULL);
525 }
526
527 void
528 ipflow_create(struct route *ro, struct mbuf *m)
529 {
530 const struct ip *const ip = mtod(m, const struct ip *);
531 struct ipflow *ipf;
532 size_t hash;
533
534 #ifndef NET_MPSAFE
535 KERNEL_LOCK(1, NULL);
536 #endif
537 mutex_enter(&ipflow_lock);
538
539 /*
540 * Don't create cache entries for ICMP messages.
541 */
542 if (ip_maxflows == 0 || ip->ip_p == IPPROTO_ICMP)
543 goto out;
544
545 /*
546 * See if an existing flow struct exists. If so remove it from its
547 * list and free the old route. If not, try to malloc a new one
548 * (if we aren't at our limit).
549 */
550 ipf = ipflow_lookup(ip);
551 if (ipf == NULL) {
552 if (ipflow_inuse >= ip_maxflows) {
553 ipf = ipflow_reap(true);
554 } else {
555 ipf = pool_get(&ipflow_pool, PR_NOWAIT);
556 if (ipf == NULL)
557 goto out;
558 ipflow_inuse++;
559 }
560 memset(ipf, 0, sizeof(*ipf));
561 } else {
562 IPFLOW_REMOVE(ipf->ipf_hashidx, ipf);
563
564 ipflow_addstats(ipf);
565 rtcache_free(&ipf->ipf_ro);
566 ipf->ipf_uses = ipf->ipf_last_uses = 0;
567 ipf->ipf_errors = ipf->ipf_dropped = 0;
568 }
569
570 /*
571 * Fill in the updated information.
572 */
573 rtcache_copy(&ipf->ipf_ro, ro);
574 ipf->ipf_dst = ip->ip_dst;
575 ipf->ipf_src = ip->ip_src;
576 ipf->ipf_tos = ip->ip_tos;
577 PRT_SLOW_ARM(ipf->ipf_timer, IPFLOW_TIMER);
578
579 /*
580 * Insert into the approriate bucket of the flow table.
581 */
582 hash = ipflow_hash(ip);
583 IPFLOW_INSERT(hash, ipf);
584
585 out:
586 mutex_exit(&ipflow_lock);
587 #ifndef NET_MPSAFE
588 KERNEL_UNLOCK_ONE(NULL);
589 #endif
590 }
591
592 int
593 ipflow_invalidate_all(int new_size)
594 {
595 struct ipflow *ipf, *next_ipf;
596 int error;
597
598 error = 0;
599
600 mutex_enter(&ipflow_lock);
601
602 for (ipf = TAILQ_FIRST(&ipflowlist); ipf != NULL; ipf = next_ipf) {
603 next_ipf = TAILQ_NEXT(ipf, ipf_list);
604 ipflow_free(ipf);
605 }
606
607 if (new_size)
608 error = ipflow_reinit(new_size);
609
610 mutex_exit(&ipflow_lock);
611
612 return error;
613 }
614
615 /*
616 * sysctl helper routine for net.inet.ip.maxflows.
617 */
618 static int
619 sysctl_net_inet_ip_maxflows(SYSCTLFN_ARGS)
620 {
621 int error;
622
623 error = sysctl_lookup(SYSCTLFN_CALL(rnode));
624 if (error || newp == NULL)
625 return (error);
626
627 #ifndef NET_MPSAFE
628 mutex_enter(softnet_lock);
629 KERNEL_LOCK(1, NULL);
630 #endif
631 mutex_enter(&ipflow_lock);
632
633 ipflow_reap(false);
634
635 mutex_exit(&ipflow_lock);
636 #ifndef NET_MPSAFE
637 KERNEL_UNLOCK_ONE(NULL);
638 mutex_exit(softnet_lock);
639 #endif
640
641 return (0);
642 }
643
644 static int
645 sysctl_net_inet_ip_hashsize(SYSCTLFN_ARGS)
646 {
647 int error, tmp;
648 struct sysctlnode node;
649
650 node = *rnode;
651 tmp = ip_hashsize;
652 node.sysctl_data = &tmp;
653 error = sysctl_lookup(SYSCTLFN_CALL(&node));
654 if (error || newp == NULL)
655 return (error);
656
657 if ((tmp & (tmp - 1)) == 0 && tmp != 0) {
658 /*
659 * Can only fail due to malloc()
660 */
661 #ifndef NET_MPSAFE
662 mutex_enter(softnet_lock);
663 KERNEL_LOCK(1, NULL);
664 #endif
665 error = ipflow_invalidate_all(tmp);
666 #ifndef NET_MPSAFE
667 KERNEL_UNLOCK_ONE(NULL);
668 mutex_exit(softnet_lock);
669 #endif
670 } else {
671 /*
672 * EINVAL if not a power of 2
673 */
674 error = EINVAL;
675 }
676
677 return error;
678 }
679
680 static void
681 ipflow_sysctl_init(struct sysctllog **clog)
682 {
683 sysctl_createv(clog, 0, NULL, NULL,
684 CTLFLAG_PERMANENT,
685 CTLTYPE_NODE, "inet",
686 SYSCTL_DESCR("PF_INET related settings"),
687 NULL, 0, NULL, 0,
688 CTL_NET, PF_INET, CTL_EOL);
689 sysctl_createv(clog, 0, NULL, NULL,
690 CTLFLAG_PERMANENT,
691 CTLTYPE_NODE, "ip",
692 SYSCTL_DESCR("IPv4 related settings"),
693 NULL, 0, NULL, 0,
694 CTL_NET, PF_INET, IPPROTO_IP, CTL_EOL);
695
696 sysctl_createv(clog, 0, NULL, NULL,
697 CTLFLAG_PERMANENT|CTLFLAG_READWRITE,
698 CTLTYPE_INT, "maxflows",
699 SYSCTL_DESCR("Number of flows for fast forwarding"),
700 sysctl_net_inet_ip_maxflows, 0, &ip_maxflows, 0,
701 CTL_NET, PF_INET, IPPROTO_IP,
702 IPCTL_MAXFLOWS, CTL_EOL);
703 sysctl_createv(clog, 0, NULL, NULL,
704 CTLFLAG_PERMANENT|CTLFLAG_READWRITE,
705 CTLTYPE_INT, "hashsize",
706 SYSCTL_DESCR("Size of hash table for fast forwarding (IPv4)"),
707 sysctl_net_inet_ip_hashsize, 0, &ip_hashsize, 0,
708 CTL_NET, PF_INET, IPPROTO_IP,
709 CTL_CREATE, CTL_EOL);
710 }
711