Home | History | Annotate | Line # | Download | only in netinet
ip_flow.c revision 1.64.2.1
      1 /*	$NetBSD: ip_flow.c,v 1.64.2.1 2017/05/12 05:44:10 snj Exp $	*/
      2 
      3 /*-
      4  * Copyright (c) 1998 The NetBSD Foundation, Inc.
      5  * All rights reserved.
      6  *
      7  * This code is derived from software contributed to The NetBSD Foundation
      8  * by the 3am Software Foundry ("3am").  It was developed by Matt Thomas.
      9  *
     10  * Redistribution and use in source and binary forms, with or without
     11  * modification, are permitted provided that the following conditions
     12  * are met:
     13  * 1. Redistributions of source code must retain the above copyright
     14  *    notice, this list of conditions and the following disclaimer.
     15  * 2. Redistributions in binary form must reproduce the above copyright
     16  *    notice, this list of conditions and the following disclaimer in the
     17  *    documentation and/or other materials provided with the distribution.
     18  *
     19  * THIS SOFTWARE IS PROVIDED BY THE NETBSD FOUNDATION, INC. AND CONTRIBUTORS
     20  * ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED
     21  * TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
     22  * PURPOSE ARE DISCLAIMED.  IN NO EVENT SHALL THE FOUNDATION OR CONTRIBUTORS
     23  * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
     24  * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
     25  * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
     26  * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
     27  * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
     28  * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
     29  * POSSIBILITY OF SUCH DAMAGE.
     30  */
     31 
     32 #include <sys/cdefs.h>
     33 __KERNEL_RCSID(0, "$NetBSD: ip_flow.c,v 1.64.2.1 2017/05/12 05:44:10 snj Exp $");
     34 
     35 #include <sys/param.h>
     36 #include <sys/systm.h>
     37 #include <sys/malloc.h>
     38 #include <sys/mbuf.h>
     39 #include <sys/domain.h>
     40 #include <sys/protosw.h>
     41 #include <sys/socket.h>
     42 #include <sys/socketvar.h>
     43 #include <sys/errno.h>
     44 #include <sys/time.h>
     45 #include <sys/kernel.h>
     46 #include <sys/pool.h>
     47 #include <sys/sysctl.h>
     48 #include <sys/workqueue.h>
     49 
     50 #include <net/if.h>
     51 #include <net/if_dl.h>
     52 #include <net/route.h>
     53 #include <net/pfil.h>
     54 
     55 #include <netinet/in.h>
     56 #include <netinet/in_systm.h>
     57 #include <netinet/ip.h>
     58 #include <netinet/in_pcb.h>
     59 #include <netinet/in_var.h>
     60 #include <netinet/ip_var.h>
     61 #include <netinet/ip_private.h>
     62 
     63 /*
     64  * Similar code is very well commented in netinet6/ip6_flow.c
     65  */
     66 
     67 #define	IPFLOW_HASHBITS		6	/* should not be a multiple of 8 */
     68 
     69 static struct pool ipflow_pool;
     70 
     71 LIST_HEAD(ipflowhead, ipflow);
     72 
     73 #define	IPFLOW_TIMER		(5 * PR_SLOWHZ)
     74 #define	IPFLOW_DEFAULT_HASHSIZE	(1 << IPFLOW_HASHBITS)
     75 
     76 static struct ipflowhead *ipflowtable = NULL;
     77 static struct ipflowhead ipflowlist;
     78 static int ipflow_inuse;
     79 
     80 #define	IPFLOW_INSERT(bucket, ipf) \
     81 do { \
     82 	LIST_INSERT_HEAD((bucket), (ipf), ipf_hash); \
     83 	LIST_INSERT_HEAD(&ipflowlist, (ipf), ipf_list); \
     84 } while (/*CONSTCOND*/ 0)
     85 
     86 #define	IPFLOW_REMOVE(ipf) \
     87 do { \
     88 	LIST_REMOVE((ipf), ipf_hash); \
     89 	LIST_REMOVE((ipf), ipf_list); \
     90 } while (/*CONSTCOND*/ 0)
     91 
     92 #ifndef IPFLOW_MAX
     93 #define	IPFLOW_MAX		256
     94 #endif
     95 static int ip_maxflows = IPFLOW_MAX;
     96 static int ip_hashsize = IPFLOW_DEFAULT_HASHSIZE;
     97 
     98 static void ipflow_sysctl_init(struct sysctllog **);
     99 
    100 static void ipflow_slowtimo_work(struct work *, void *);
    101 static struct workqueue	*ipflow_slowtimo_wq;
    102 static struct work	ipflow_slowtimo_wk;
    103 
    104 static size_t
    105 ipflow_hash(const struct ip *ip)
    106 {
    107 	size_t hash = ip->ip_tos;
    108 	size_t idx;
    109 
    110 	for (idx = 0; idx < 32; idx += IPFLOW_HASHBITS) {
    111 		hash += (ip->ip_dst.s_addr >> (32 - idx)) +
    112 		    (ip->ip_src.s_addr >> idx);
    113 	}
    114 
    115 	return hash & (ip_hashsize-1);
    116 }
    117 
    118 static struct ipflow *
    119 ipflow_lookup(const struct ip *ip)
    120 {
    121 	size_t hash;
    122 	struct ipflow *ipf;
    123 
    124 	hash = ipflow_hash(ip);
    125 
    126 	LIST_FOREACH(ipf, &ipflowtable[hash], ipf_hash) {
    127 		if (ip->ip_dst.s_addr == ipf->ipf_dst.s_addr
    128 		    && ip->ip_src.s_addr == ipf->ipf_src.s_addr
    129 		    && ip->ip_tos == ipf->ipf_tos)
    130 			break;
    131 	}
    132 	return ipf;
    133 }
    134 
    135 void
    136 ipflow_poolinit(void)
    137 {
    138 	int error;
    139 
    140 	error = workqueue_create(&ipflow_slowtimo_wq, "ipflow_slowtimo",
    141 	    ipflow_slowtimo_work, NULL, PRI_SOFTNET, IPL_NET, WQ_MPSAFE);
    142 	if (error != 0)
    143 		panic("%s: workqueue_create failed (%d)\n", __func__, error);
    144 
    145 	pool_init(&ipflow_pool, sizeof(struct ipflow), 0, 0, 0, "ipflowpl",
    146 	    NULL, IPL_NET);
    147 }
    148 
    149 static int
    150 ipflow_reinit(int table_size)
    151 {
    152 	struct ipflowhead *new_table;
    153 	size_t i;
    154 
    155 	new_table = (struct ipflowhead *)malloc(sizeof(struct ipflowhead) *
    156 	    table_size, M_RTABLE, M_NOWAIT);
    157 
    158 	if (new_table == NULL)
    159 		return 1;
    160 
    161 	if (ipflowtable != NULL)
    162 		free(ipflowtable, M_RTABLE);
    163 
    164 	ipflowtable = new_table;
    165 	ip_hashsize = table_size;
    166 
    167 	LIST_INIT(&ipflowlist);
    168 	for (i = 0; i < ip_hashsize; i++)
    169 		LIST_INIT(&ipflowtable[i]);
    170 
    171 	return 0;
    172 }
    173 
    174 void
    175 ipflow_init(void)
    176 {
    177 	(void)ipflow_reinit(ip_hashsize);
    178 	ipflow_sysctl_init(NULL);
    179 }
    180 
    181 int
    182 ipflow_fastforward(struct mbuf *m)
    183 {
    184 	struct ip *ip;
    185 	struct ip ip_store;
    186 	struct ipflow *ipf;
    187 	struct rtentry *rt;
    188 	const struct sockaddr *dst;
    189 	int error;
    190 	int iplen;
    191 
    192 	/*
    193 	 * Are we forwarding packets?  Big enough for an IP packet?
    194 	 */
    195 	if (!ipforwarding || ipflow_inuse == 0 || m->m_len < sizeof(struct ip))
    196 		return 0;
    197 
    198 	/*
    199 	 * Was packet received as a link-level multicast or broadcast?
    200 	 * If so, don't try to fast forward..
    201 	 */
    202 	if ((m->m_flags & (M_BCAST|M_MCAST)) != 0)
    203 		return 0;
    204 
    205 	/*
    206 	 * IP header with no option and valid version and length
    207 	 */
    208 	if (IP_HDR_ALIGNED_P(mtod(m, const void *)))
    209 		ip = mtod(m, struct ip *);
    210 	else {
    211 		memcpy(&ip_store, mtod(m, const void *), sizeof(ip_store));
    212 		ip = &ip_store;
    213 	}
    214 	iplen = ntohs(ip->ip_len);
    215 	if (ip->ip_v != IPVERSION || ip->ip_hl != (sizeof(struct ip) >> 2) ||
    216 	    iplen < sizeof(struct ip) || iplen > m->m_pkthdr.len)
    217 		return 0;
    218 	/*
    219 	 * Find a flow.
    220 	 */
    221 	if ((ipf = ipflow_lookup(ip)) == NULL)
    222 		return 0;
    223 
    224 	/*
    225 	 * Verify the IP header checksum.
    226 	 */
    227 	switch (m->m_pkthdr.csum_flags &
    228 		((m->m_pkthdr.rcvif->if_csum_flags_rx & M_CSUM_IPv4) |
    229 		 M_CSUM_IPv4_BAD)) {
    230 	case M_CSUM_IPv4|M_CSUM_IPv4_BAD:
    231 		return (0);
    232 
    233 	case M_CSUM_IPv4:
    234 		/* Checksum was okay. */
    235 		break;
    236 
    237 	default:
    238 		/* Must compute it ourselves. */
    239 		if (in_cksum(m, sizeof(struct ip)) != 0)
    240 			return (0);
    241 		break;
    242 	}
    243 
    244 	/*
    245 	 * Route and interface still up?
    246 	 */
    247 	if ((rt = rtcache_validate(&ipf->ipf_ro)) == NULL ||
    248 	    (rt->rt_ifp->if_flags & IFF_UP) == 0)
    249 		return 0;
    250 
    251 	/*
    252 	 * Packet size OK?  TTL?
    253 	 */
    254 	if (m->m_pkthdr.len > rt->rt_ifp->if_mtu || ip->ip_ttl <= IPTTLDEC)
    255 		return 0;
    256 
    257 	/*
    258 	 * Clear any in-bound checksum flags for this packet.
    259 	 */
    260 	m->m_pkthdr.csum_flags = 0;
    261 
    262 	/*
    263 	 * Everything checks out and so we can forward this packet.
    264 	 * Modify the TTL and incrementally change the checksum.
    265 	 *
    266 	 * This method of adding the checksum works on either endian CPU.
    267 	 * If htons() is inlined, all the arithmetic is folded; otherwise
    268 	 * the htons()s are combined by CSE due to the const attribute.
    269 	 *
    270 	 * Don't bother using HW checksumming here -- the incremental
    271 	 * update is pretty fast.
    272 	 */
    273 	ip->ip_ttl -= IPTTLDEC;
    274 	if (ip->ip_sum >= (u_int16_t) ~htons(IPTTLDEC << 8))
    275 		ip->ip_sum -= ~htons(IPTTLDEC << 8);
    276 	else
    277 		ip->ip_sum += htons(IPTTLDEC << 8);
    278 
    279 	/*
    280 	 * Done modifying the header; copy it back, if necessary.
    281 	 *
    282 	 * XXX Use m_copyback_cow(9) here? --dyoung
    283 	 */
    284 	if (IP_HDR_ALIGNED_P(mtod(m, void *)) == 0)
    285 		memcpy(mtod(m, void *), &ip_store, sizeof(ip_store));
    286 
    287 	/*
    288 	 * Trim the packet in case it's too long..
    289 	 */
    290 	if (m->m_pkthdr.len > iplen) {
    291 		if (m->m_len == m->m_pkthdr.len) {
    292 			m->m_len = iplen;
    293 			m->m_pkthdr.len = iplen;
    294 		} else
    295 			m_adj(m, iplen - m->m_pkthdr.len);
    296 	}
    297 
    298 	/*
    299 	 * Send the packet on it's way.  All we can get back is ENOBUFS
    300 	 */
    301 	ipf->ipf_uses++;
    302 	PRT_SLOW_ARM(ipf->ipf_timer, IPFLOW_TIMER);
    303 
    304 	if (rt->rt_flags & RTF_GATEWAY)
    305 		dst = rt->rt_gateway;
    306 	else
    307 		dst = rtcache_getdst(&ipf->ipf_ro);
    308 
    309 	KERNEL_LOCK(1, NULL);
    310 	if ((error = (*rt->rt_ifp->if_output)(rt->rt_ifp, m, dst, rt)) != 0) {
    311 		if (error == ENOBUFS)
    312 			ipf->ipf_dropped++;
    313 		else
    314 			ipf->ipf_errors++;
    315 	}
    316 	KERNEL_UNLOCK_ONE(NULL);
    317 	return 1;
    318 }
    319 
    320 static void
    322 ipflow_addstats(struct ipflow *ipf)
    323 {
    324 	struct rtentry *rt;
    325 	uint64_t *ips;
    326 
    327 	if ((rt = rtcache_validate(&ipf->ipf_ro)) != NULL)
    328 		rt->rt_use += ipf->ipf_uses;
    329 
    330 	ips = IP_STAT_GETREF();
    331 	ips[IP_STAT_CANTFORWARD] += ipf->ipf_errors + ipf->ipf_dropped;
    332 	ips[IP_STAT_TOTAL] += ipf->ipf_uses;
    333 	ips[IP_STAT_FORWARD] += ipf->ipf_uses;
    334 	ips[IP_STAT_FASTFORWARD] += ipf->ipf_uses;
    335 	IP_STAT_PUTREF();
    336 }
    337 
    338 static void
    339 ipflow_free(struct ipflow *ipf)
    340 {
    341 	int s;
    342 	/*
    343 	 * Remove the flow from the hash table (at elevated IPL).
    344 	 * Once it's off the list, we can deal with it at normal
    345 	 * network IPL.
    346 	 */
    347 	s = splnet();
    348 	IPFLOW_REMOVE(ipf);
    349 	splx(s);
    350 	ipflow_addstats(ipf);
    351 	rtcache_free(&ipf->ipf_ro);
    352 	ipflow_inuse--;
    353 	s = splnet();
    354 	pool_put(&ipflow_pool, ipf);
    355 	splx(s);
    356 }
    357 
    358 struct ipflow *
    359 ipflow_reap(bool just_one)
    360 {
    361 	while (just_one || ipflow_inuse > ip_maxflows) {
    362 		struct ipflow *ipf, *maybe_ipf = NULL;
    363 		int s;
    364 
    365 		ipf = LIST_FIRST(&ipflowlist);
    366 		while (ipf != NULL) {
    367 			/*
    368 			 * If this no longer points to a valid route
    369 			 * reclaim it.
    370 			 */
    371 			if (rtcache_validate(&ipf->ipf_ro) == NULL)
    372 				goto done;
    373 			/*
    374 			 * choose the one that's been least recently
    375 			 * used or has had the least uses in the
    376 			 * last 1.5 intervals.
    377 			 */
    378 			if (maybe_ipf == NULL ||
    379 			    ipf->ipf_timer < maybe_ipf->ipf_timer ||
    380 			    (ipf->ipf_timer == maybe_ipf->ipf_timer &&
    381 			     ipf->ipf_last_uses + ipf->ipf_uses <
    382 			         maybe_ipf->ipf_last_uses +
    383 			         maybe_ipf->ipf_uses))
    384 				maybe_ipf = ipf;
    385 			ipf = LIST_NEXT(ipf, ipf_list);
    386 		}
    387 		ipf = maybe_ipf;
    388 	    done:
    389 		/*
    390 		 * Remove the entry from the flow table.
    391 		 */
    392 		s = splnet();
    393 		IPFLOW_REMOVE(ipf);
    394 		splx(s);
    395 		ipflow_addstats(ipf);
    396 		rtcache_free(&ipf->ipf_ro);
    397 		if (just_one)
    398 			return ipf;
    399 		pool_put(&ipflow_pool, ipf);
    400 		ipflow_inuse--;
    401 	}
    402 	return NULL;
    403 }
    404 
    405 static bool ipflow_work_enqueued = false;
    406 
    407 static void
    408 ipflow_slowtimo_work(struct work *wk, void *arg)
    409 {
    410 	struct rtentry *rt;
    411 	struct ipflow *ipf, *next_ipf;
    412 	uint64_t *ips;
    413 
    414 	mutex_enter(softnet_lock);
    415 	KERNEL_LOCK(1, NULL);
    416 	for (ipf = LIST_FIRST(&ipflowlist); ipf != NULL; ipf = next_ipf) {
    417 		next_ipf = LIST_NEXT(ipf, ipf_list);
    418 		if (PRT_SLOW_ISEXPIRED(ipf->ipf_timer) ||
    419 		    (rt = rtcache_validate(&ipf->ipf_ro)) == NULL) {
    420 			ipflow_free(ipf);
    421 		} else {
    422 			ipf->ipf_last_uses = ipf->ipf_uses;
    423 			rt->rt_use += ipf->ipf_uses;
    424 			ips = IP_STAT_GETREF();
    425 			ips[IP_STAT_TOTAL] += ipf->ipf_uses;
    426 			ips[IP_STAT_FORWARD] += ipf->ipf_uses;
    427 			ips[IP_STAT_FASTFORWARD] += ipf->ipf_uses;
    428 			IP_STAT_PUTREF();
    429 			ipf->ipf_uses = 0;
    430 		}
    431 	}
    432 	ipflow_work_enqueued = false;
    433 	KERNEL_UNLOCK_ONE(NULL);
    434 	mutex_exit(softnet_lock);
    435 }
    436 
    437 void
    438 ipflow_slowtimo(void)
    439 {
    440 
    441 	/* Avoid enqueuing another work when one is already enqueued */
    442 	KERNEL_LOCK(1, NULL);
    443 	if (ipflow_work_enqueued) {
    444 		KERNEL_UNLOCK_ONE(NULL);
    445 		return;
    446 	}
    447 	ipflow_work_enqueued = true;
    448 	KERNEL_UNLOCK_ONE(NULL);
    449 
    450 	workqueue_enqueue(ipflow_slowtimo_wq, &ipflow_slowtimo_wk, NULL);
    451 }
    452 
    453 void
    454 ipflow_create(const struct route *ro, struct mbuf *m)
    455 {
    456 	const struct ip *const ip = mtod(m, const struct ip *);
    457 	struct ipflow *ipf;
    458 	size_t hash;
    459 	int s;
    460 
    461 	/*
    462 	 * Don't create cache entries for ICMP messages.
    463 	 */
    464 	if (ip_maxflows == 0 || ip->ip_p == IPPROTO_ICMP)
    465 		return;
    466 
    467 	KERNEL_LOCK(1, NULL);
    468 
    469 	/*
    470 	 * See if an existing flow struct exists.  If so remove it from it's
    471 	 * list and free the old route.  If not, try to malloc a new one
    472 	 * (if we aren't at our limit).
    473 	 */
    474 	ipf = ipflow_lookup(ip);
    475 	if (ipf == NULL) {
    476 		if (ipflow_inuse >= ip_maxflows) {
    477 			ipf = ipflow_reap(true);
    478 		} else {
    479 			s = splnet();
    480 			ipf = pool_get(&ipflow_pool, PR_NOWAIT);
    481 			splx(s);
    482 			if (ipf == NULL)
    483 				goto out;
    484 			ipflow_inuse++;
    485 		}
    486 		memset(ipf, 0, sizeof(*ipf));
    487 	} else {
    488 		s = splnet();
    489 		IPFLOW_REMOVE(ipf);
    490 		splx(s);
    491 		ipflow_addstats(ipf);
    492 		rtcache_free(&ipf->ipf_ro);
    493 		ipf->ipf_uses = ipf->ipf_last_uses = 0;
    494 		ipf->ipf_errors = ipf->ipf_dropped = 0;
    495 	}
    496 
    497 	/*
    498 	 * Fill in the updated information.
    499 	 */
    500 	rtcache_copy(&ipf->ipf_ro, ro);
    501 	ipf->ipf_dst = ip->ip_dst;
    502 	ipf->ipf_src = ip->ip_src;
    503 	ipf->ipf_tos = ip->ip_tos;
    504 	PRT_SLOW_ARM(ipf->ipf_timer, IPFLOW_TIMER);
    505 
    506 	/*
    507 	 * Insert into the approriate bucket of the flow table.
    508 	 */
    509 	hash = ipflow_hash(ip);
    510 	s = splnet();
    511 	IPFLOW_INSERT(&ipflowtable[hash], ipf);
    512 	splx(s);
    513 
    514  out:
    515 	KERNEL_UNLOCK_ONE(NULL);
    516 }
    517 
    518 int
    519 ipflow_invalidate_all(int new_size)
    520 {
    521 	struct ipflow *ipf, *next_ipf;
    522 	int s, error;
    523 
    524 	error = 0;
    525 	s = splnet();
    526 	for (ipf = LIST_FIRST(&ipflowlist); ipf != NULL; ipf = next_ipf) {
    527 		next_ipf = LIST_NEXT(ipf, ipf_list);
    528 		ipflow_free(ipf);
    529 	}
    530 
    531 	if (new_size)
    532 		error = ipflow_reinit(new_size);
    533 	splx(s);
    534 
    535 	return error;
    536 }
    537 
    538 #ifdef GATEWAY
    539 /*
    540  * sysctl helper routine for net.inet.ip.maxflows.
    541  */
    542 static int
    543 sysctl_net_inet_ip_maxflows(SYSCTLFN_ARGS)
    544 {
    545 	int error;
    546 
    547 	error = sysctl_lookup(SYSCTLFN_CALL(rnode));
    548 	if (error || newp == NULL)
    549 		return (error);
    550 
    551 	mutex_enter(softnet_lock);
    552 	KERNEL_LOCK(1, NULL);
    553 
    554 	ipflow_reap(false);
    555 
    556 	KERNEL_UNLOCK_ONE(NULL);
    557 	mutex_exit(softnet_lock);
    558 
    559 	return (0);
    560 }
    561 
    562 static int
    563 sysctl_net_inet_ip_hashsize(SYSCTLFN_ARGS)
    564 {
    565 	int error, tmp;
    566 	struct sysctlnode node;
    567 
    568 	node = *rnode;
    569 	tmp = ip_hashsize;
    570 	node.sysctl_data = &tmp;
    571 	error = sysctl_lookup(SYSCTLFN_CALL(&node));
    572 	if (error || newp == NULL)
    573 		return (error);
    574 
    575 	if ((tmp & (tmp - 1)) == 0 && tmp != 0) {
    576 		/*
    577 		 * Can only fail due to malloc()
    578 		 */
    579 		mutex_enter(softnet_lock);
    580 		KERNEL_LOCK(1, NULL);
    581 
    582 		error = ipflow_invalidate_all(tmp);
    583 
    584 		KERNEL_UNLOCK_ONE(NULL);
    585 		mutex_exit(softnet_lock);
    586 
    587 	} else {
    588 		/*
    589 		 * EINVAL if not a power of 2
    590 	         */
    591 		error = EINVAL;
    592 	}
    593 
    594 	return error;
    595 }
    596 #endif /* GATEWAY */
    597 
    598 static void
    599 ipflow_sysctl_init(struct sysctllog **clog)
    600 {
    601 	sysctl_createv(clog, 0, NULL, NULL,
    602 		       CTLFLAG_PERMANENT,
    603 		       CTLTYPE_NODE, "inet",
    604 		       SYSCTL_DESCR("PF_INET related settings"),
    605 		       NULL, 0, NULL, 0,
    606 		       CTL_NET, PF_INET, CTL_EOL);
    607 	sysctl_createv(clog, 0, NULL, NULL,
    608 		       CTLFLAG_PERMANENT,
    609 		       CTLTYPE_NODE, "ip",
    610 		       SYSCTL_DESCR("IPv4 related settings"),
    611 		       NULL, 0, NULL, 0,
    612 		       CTL_NET, PF_INET, IPPROTO_IP, CTL_EOL);
    613 
    614 #ifdef GATEWAY
    615 	sysctl_createv(clog, 0, NULL, NULL,
    616 		       CTLFLAG_PERMANENT|CTLFLAG_READWRITE,
    617 		       CTLTYPE_INT, "maxflows",
    618 		       SYSCTL_DESCR("Number of flows for fast forwarding"),
    619 		       sysctl_net_inet_ip_maxflows, 0, &ip_maxflows, 0,
    620 		       CTL_NET, PF_INET, IPPROTO_IP,
    621 		       IPCTL_MAXFLOWS, CTL_EOL);
    622 	sysctl_createv(clog, 0, NULL, NULL,
    623 			CTLFLAG_PERMANENT|CTLFLAG_READWRITE,
    624 			CTLTYPE_INT, "hashsize",
    625 			SYSCTL_DESCR("Size of hash table for fast forwarding (IPv4)"),
    626 			sysctl_net_inet_ip_hashsize, 0, &ip_hashsize, 0,
    627 			CTL_NET, PF_INET, IPPROTO_IP,
    628 			CTL_CREATE, CTL_EOL);
    629 #endif /* GATEWAY */
    630 }
    631