Home | History | Annotate | Line # | Download | only in netinet
ip_flow.c revision 1.65.2.5
      1 /*	$NetBSD: ip_flow.c,v 1.65.2.5 2017/02/05 13:40:59 skrll Exp $	*/
      2 
      3 /*-
      4  * Copyright (c) 1998 The NetBSD Foundation, Inc.
      5  * All rights reserved.
      6  *
      7  * This code is derived from software contributed to The NetBSD Foundation
      8  * by the 3am Software Foundry ("3am").  It was developed by Matt Thomas.
      9  *
     10  * Redistribution and use in source and binary forms, with or without
     11  * modification, are permitted provided that the following conditions
     12  * are met:
     13  * 1. Redistributions of source code must retain the above copyright
     14  *    notice, this list of conditions and the following disclaimer.
     15  * 2. Redistributions in binary form must reproduce the above copyright
     16  *    notice, this list of conditions and the following disclaimer in the
     17  *    documentation and/or other materials provided with the distribution.
     18  *
     19  * THIS SOFTWARE IS PROVIDED BY THE NETBSD FOUNDATION, INC. AND CONTRIBUTORS
     20  * ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED
     21  * TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
     22  * PURPOSE ARE DISCLAIMED.  IN NO EVENT SHALL THE FOUNDATION OR CONTRIBUTORS
     23  * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
     24  * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
     25  * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
     26  * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
     27  * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
     28  * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
     29  * POSSIBILITY OF SUCH DAMAGE.
     30  */
     31 
     32 #include <sys/cdefs.h>
     33 __KERNEL_RCSID(0, "$NetBSD: ip_flow.c,v 1.65.2.5 2017/02/05 13:40:59 skrll Exp $");
     34 
     35 #ifdef _KERNEL_OPT
     36 #include "opt_net_mpsafe.h"
     37 #endif
     38 
     39 #include <sys/param.h>
     40 #include <sys/systm.h>
     41 #include <sys/malloc.h>
     42 #include <sys/mbuf.h>
     43 #include <sys/socketvar.h>
     44 #include <sys/errno.h>
     45 #include <sys/time.h>
     46 #include <sys/kernel.h>
     47 #include <sys/pool.h>
     48 #include <sys/sysctl.h>
     49 #include <sys/workqueue.h>
     50 #include <sys/atomic.h>
     51 
     52 #include <net/if.h>
     53 #include <net/if_dl.h>
     54 #include <net/route.h>
     55 #include <net/pfil.h>
     56 
     57 #include <netinet/in.h>
     58 #include <netinet/in_systm.h>
     59 #include <netinet/ip.h>
     60 #include <netinet/in_pcb.h>
     61 #include <netinet/in_var.h>
     62 #include <netinet/ip_var.h>
     63 #include <netinet/ip_private.h>
     64 
     65 /*
     66  * Similar code is very well commented in netinet6/ip6_flow.c
     67  */
     68 
     69 #define	IPFLOW_HASHBITS		6	/* should not be a multiple of 8 */
     70 
     71 static struct pool ipflow_pool;
     72 
     73 TAILQ_HEAD(ipflowhead, ipflow);
     74 
     75 #define	IPFLOW_TIMER		(5 * PR_SLOWHZ)
     76 #define	IPFLOW_DEFAULT_HASHSIZE	(1 << IPFLOW_HASHBITS)
     77 
     78 /*
     79  * ip_flow.c internal lock.
     80  * If we use softnet_lock, it would cause recursive lock.
     81  *
     82  * This is a tentative workaround.
     83  * We should make it scalable somehow in the future.
     84  */
     85 static kmutex_t ipflow_lock;
     86 static struct ipflowhead *ipflowtable = NULL;
     87 static struct ipflowhead ipflowlist;
     88 static int ipflow_inuse;
     89 
     90 #define	IPFLOW_INSERT(hashidx, ipf) \
     91 do { \
     92 	(ipf)->ipf_hashidx = (hashidx); \
     93 	TAILQ_INSERT_HEAD(&ipflowtable[(hashidx)], (ipf), ipf_hash); \
     94 	TAILQ_INSERT_HEAD(&ipflowlist, (ipf), ipf_list); \
     95 } while (/*CONSTCOND*/ 0)
     96 
     97 #define	IPFLOW_REMOVE(hashidx, ipf) \
     98 do { \
     99 	TAILQ_REMOVE(&ipflowtable[(hashidx)], (ipf), ipf_hash); \
    100 	TAILQ_REMOVE(&ipflowlist, (ipf), ipf_list); \
    101 } while (/*CONSTCOND*/ 0)
    102 
    103 #ifndef IPFLOW_MAX
    104 #define	IPFLOW_MAX		256
    105 #endif
    106 static int ip_maxflows = IPFLOW_MAX;
    107 static int ip_hashsize = IPFLOW_DEFAULT_HASHSIZE;
    108 
    109 static struct ipflow *ipflow_reap(bool);
    110 static void ipflow_sysctl_init(struct sysctllog **);
    111 
    112 static void ipflow_slowtimo_work(struct work *, void *);
    113 static struct workqueue	*ipflow_slowtimo_wq;
    114 static struct work	ipflow_slowtimo_wk;
    115 
    116 static size_t
    117 ipflow_hash(const struct ip *ip)
    118 {
    119 	size_t hash = ip->ip_tos;
    120 	size_t idx;
    121 
    122 	for (idx = 0; idx < 32; idx += IPFLOW_HASHBITS) {
    123 		hash += (ip->ip_dst.s_addr >> (32 - idx)) +
    124 		    (ip->ip_src.s_addr >> idx);
    125 	}
    126 
    127 	return hash & (ip_hashsize-1);
    128 }
    129 
    130 static struct ipflow *
    131 ipflow_lookup(const struct ip *ip)
    132 {
    133 	size_t hash;
    134 	struct ipflow *ipf;
    135 
    136 	KASSERT(mutex_owned(&ipflow_lock));
    137 
    138 	hash = ipflow_hash(ip);
    139 
    140 	TAILQ_FOREACH(ipf, &ipflowtable[hash], ipf_hash) {
    141 		if (ip->ip_dst.s_addr == ipf->ipf_dst.s_addr
    142 		    && ip->ip_src.s_addr == ipf->ipf_src.s_addr
    143 		    && ip->ip_tos == ipf->ipf_tos)
    144 			break;
    145 	}
    146 	return ipf;
    147 }
    148 
    149 void
    150 ipflow_poolinit(void)
    151 {
    152 
    153 	pool_init(&ipflow_pool, sizeof(struct ipflow), 0, 0, 0, "ipflowpl",
    154 	    NULL, IPL_NET);
    155 }
    156 
    157 static int
    158 ipflow_reinit(int table_size)
    159 {
    160 	struct ipflowhead *new_table;
    161 	size_t i;
    162 
    163 	KASSERT(mutex_owned(&ipflow_lock));
    164 
    165 	new_table = (struct ipflowhead *)malloc(sizeof(struct ipflowhead) *
    166 	    table_size, M_RTABLE, M_NOWAIT);
    167 
    168 	if (new_table == NULL)
    169 		return 1;
    170 
    171 	if (ipflowtable != NULL)
    172 		free(ipflowtable, M_RTABLE);
    173 
    174 	ipflowtable = new_table;
    175 	ip_hashsize = table_size;
    176 
    177 	TAILQ_INIT(&ipflowlist);
    178 	for (i = 0; i < ip_hashsize; i++)
    179 		TAILQ_INIT(&ipflowtable[i]);
    180 
    181 	return 0;
    182 }
    183 
    184 void
    185 ipflow_init(void)
    186 {
    187 	int error;
    188 
    189 	error = workqueue_create(&ipflow_slowtimo_wq, "ipflow_slowtimo",
    190 	    ipflow_slowtimo_work, NULL, PRI_SOFTNET, IPL_SOFTNET, WQ_MPSAFE);
    191 	if (error != 0)
    192 		panic("%s: workqueue_create failed (%d)\n", __func__, error);
    193 
    194 	mutex_init(&ipflow_lock, MUTEX_DEFAULT, IPL_NONE);
    195 
    196 	mutex_enter(&ipflow_lock);
    197 	(void)ipflow_reinit(ip_hashsize);
    198 	mutex_exit(&ipflow_lock);
    199 	ipflow_sysctl_init(NULL);
    200 }
    201 
    202 int
    203 ipflow_fastforward(struct mbuf *m)
    204 {
    205 	struct ip *ip;
    206 	struct ip ip_store;
    207 	struct ipflow *ipf;
    208 	struct rtentry *rt = NULL;
    209 	const struct sockaddr *dst;
    210 	int error;
    211 	int iplen;
    212 	struct ifnet *ifp;
    213 	int s;
    214 	int ret = 0;
    215 
    216 	mutex_enter(&ipflow_lock);
    217 	/*
    218 	 * Are we forwarding packets?  Big enough for an IP packet?
    219 	 */
    220 	if (!ipforwarding || ipflow_inuse == 0 || m->m_len < sizeof(struct ip))
    221 		goto out;
    222 
    223 	/*
    224 	 * Was packet received as a link-level multicast or broadcast?
    225 	 * If so, don't try to fast forward..
    226 	 */
    227 	if ((m->m_flags & (M_BCAST|M_MCAST)) != 0)
    228 		goto out;
    229 
    230 	/*
    231 	 * IP header with no option and valid version and length
    232 	 */
    233 	if (IP_HDR_ALIGNED_P(mtod(m, const void *)))
    234 		ip = mtod(m, struct ip *);
    235 	else {
    236 		memcpy(&ip_store, mtod(m, const void *), sizeof(ip_store));
    237 		ip = &ip_store;
    238 	}
    239 	iplen = ntohs(ip->ip_len);
    240 	if (ip->ip_v != IPVERSION || ip->ip_hl != (sizeof(struct ip) >> 2) ||
    241 	    iplen < sizeof(struct ip) || iplen > m->m_pkthdr.len)
    242 		goto out;
    243 	/*
    244 	 * Find a flow.
    245 	 */
    246 	if ((ipf = ipflow_lookup(ip)) == NULL)
    247 		goto out;
    248 
    249 	ifp = m_get_rcvif(m, &s);
    250 	/*
    251 	 * Verify the IP header checksum.
    252 	 */
    253 	switch (m->m_pkthdr.csum_flags &
    254 		((ifp->if_csum_flags_rx & M_CSUM_IPv4) |
    255 		 M_CSUM_IPv4_BAD)) {
    256 	case M_CSUM_IPv4|M_CSUM_IPv4_BAD:
    257 		m_put_rcvif(ifp, &s);
    258 		goto out_unref;
    259 
    260 	case M_CSUM_IPv4:
    261 		/* Checksum was okay. */
    262 		break;
    263 
    264 	default:
    265 		/* Must compute it ourselves. */
    266 		if (in_cksum(m, sizeof(struct ip)) != 0) {
    267 			m_put_rcvif(ifp, &s);
    268 			goto out_unref;
    269 		}
    270 		break;
    271 	}
    272 	m_put_rcvif(ifp, &s);
    273 
    274 	/*
    275 	 * Route and interface still up?
    276 	 */
    277 	rt = rtcache_validate(&ipf->ipf_ro);
    278 	if (rt == NULL || (rt->rt_ifp->if_flags & IFF_UP) == 0 ||
    279 	    (rt->rt_flags & (RTF_BLACKHOLE | RTF_BROADCAST)) != 0)
    280 		goto out_unref;
    281 
    282 	/*
    283 	 * Packet size OK?  TTL?
    284 	 */
    285 	if (m->m_pkthdr.len > rt->rt_ifp->if_mtu || ip->ip_ttl <= IPTTLDEC)
    286 		goto out_unref;
    287 
    288 	/*
    289 	 * Clear any in-bound checksum flags for this packet.
    290 	 */
    291 	m->m_pkthdr.csum_flags = 0;
    292 
    293 	/*
    294 	 * Everything checks out and so we can forward this packet.
    295 	 * Modify the TTL and incrementally change the checksum.
    296 	 *
    297 	 * This method of adding the checksum works on either endian CPU.
    298 	 * If htons() is inlined, all the arithmetic is folded; otherwise
    299 	 * the htons()s are combined by CSE due to the const attribute.
    300 	 *
    301 	 * Don't bother using HW checksumming here -- the incremental
    302 	 * update is pretty fast.
    303 	 */
    304 	ip->ip_ttl -= IPTTLDEC;
    305 	if (ip->ip_sum >= (u_int16_t) ~htons(IPTTLDEC << 8))
    306 		ip->ip_sum -= ~htons(IPTTLDEC << 8);
    307 	else
    308 		ip->ip_sum += htons(IPTTLDEC << 8);
    309 
    310 	/*
    311 	 * Done modifying the header; copy it back, if necessary.
    312 	 *
    313 	 * XXX Use m_copyback_cow(9) here? --dyoung
    314 	 */
    315 	if (IP_HDR_ALIGNED_P(mtod(m, void *)) == 0)
    316 		memcpy(mtod(m, void *), &ip_store, sizeof(ip_store));
    317 
    318 	/*
    319 	 * Trim the packet in case it's too long..
    320 	 */
    321 	if (m->m_pkthdr.len > iplen) {
    322 		if (m->m_len == m->m_pkthdr.len) {
    323 			m->m_len = iplen;
    324 			m->m_pkthdr.len = iplen;
    325 		} else
    326 			m_adj(m, iplen - m->m_pkthdr.len);
    327 	}
    328 
    329 	/*
    330 	 * Send the packet on its way.  All we can get back is ENOBUFS
    331 	 */
    332 	ipf->ipf_uses++;
    333 
    334 #if 0
    335 	/*
    336 	 * Sorting list is too heavy for fast path(packet processing path).
    337 	 * It degrades about 10% performance. So, we does not sort ipflowtable,
    338 	 * and then we use FIFO cache replacement instead fo LRU.
    339 	 */
    340 	/* move to head (LRU) for ipflowlist. ipflowtable ooes not care LRU. */
    341 	TAILQ_REMOVE(&ipflowlist, ipf, ipf_list);
    342 	TAILQ_INSERT_HEAD(&ipflowlist, ipf, ipf_list);
    343 #endif
    344 
    345 	PRT_SLOW_ARM(ipf->ipf_timer, IPFLOW_TIMER);
    346 
    347 	if (rt->rt_flags & RTF_GATEWAY)
    348 		dst = rt->rt_gateway;
    349 	else
    350 		dst = rtcache_getdst(&ipf->ipf_ro);
    351 
    352 	if ((error = if_output_lock(rt->rt_ifp, rt->rt_ifp, m, dst, rt)) != 0) {
    353 		if (error == ENOBUFS)
    354 			ipf->ipf_dropped++;
    355 		else
    356 			ipf->ipf_errors++;
    357 	}
    358 	ret = 1;
    359 out_unref:
    360 	rtcache_unref(rt, &ipf->ipf_ro);
    361 out:
    362 	mutex_exit(&ipflow_lock);
    363 	return ret;
    364 }
    365 
    366 static void
    367 ipflow_addstats(struct ipflow *ipf)
    368 {
    369 	struct rtentry *rt;
    370 	uint64_t *ips;
    371 
    372 	rt = rtcache_validate(&ipf->ipf_ro);
    373 	if (rt != NULL) {
    374 		rt->rt_use += ipf->ipf_uses;
    375 		rtcache_unref(rt, &ipf->ipf_ro);
    376 	}
    377 
    378 	ips = IP_STAT_GETREF();
    379 	ips[IP_STAT_CANTFORWARD] += ipf->ipf_errors + ipf->ipf_dropped;
    380 	ips[IP_STAT_TOTAL] += ipf->ipf_uses;
    381 	ips[IP_STAT_FORWARD] += ipf->ipf_uses;
    382 	ips[IP_STAT_FASTFORWARD] += ipf->ipf_uses;
    383 	IP_STAT_PUTREF();
    384 }
    385 
    386 static void
    387 ipflow_free(struct ipflow *ipf)
    388 {
    389 
    390 	KASSERT(mutex_owned(&ipflow_lock));
    391 
    392 	/*
    393 	 * Remove the flow from the hash table (at elevated IPL).
    394 	 * Once it's off the list, we can deal with it at normal
    395 	 * network IPL.
    396 	 */
    397 	IPFLOW_REMOVE(ipf->ipf_hashidx, ipf);
    398 
    399 	ipflow_addstats(ipf);
    400 	rtcache_free(&ipf->ipf_ro);
    401 	ipflow_inuse--;
    402 	pool_put(&ipflow_pool, ipf);
    403 }
    404 
    405 static struct ipflow *
    406 ipflow_reap(bool just_one)
    407 {
    408 	struct ipflow *ipf;
    409 
    410 	KASSERT(mutex_owned(&ipflow_lock));
    411 
    412 	/*
    413 	 * This case must remove one ipflow. Furthermore, this case is used in
    414 	 * fast path(packet processing path). So, simply remove TAILQ_LAST one.
    415 	 */
    416 	if (just_one) {
    417 		ipf = TAILQ_LAST(&ipflowlist, ipflowhead);
    418 		KASSERT(ipf != NULL);
    419 
    420 		IPFLOW_REMOVE(ipf->ipf_hashidx, ipf);
    421 
    422 		ipflow_addstats(ipf);
    423 		rtcache_free(&ipf->ipf_ro);
    424 		return ipf;
    425 	}
    426 
    427 	/*
    428 	 * This case is used in slow path(sysctl).
    429 	 * At first, remove invalid rtcache ipflow, and then remove TAILQ_LAST
    430 	 * ipflow if it is ensured least recently used by comparing last_uses.
    431 	 */
    432 	while (ipflow_inuse > ip_maxflows) {
    433 		struct ipflow *maybe_ipf = TAILQ_LAST(&ipflowlist, ipflowhead);
    434 
    435 		TAILQ_FOREACH(ipf, &ipflowlist, ipf_list) {
    436 			struct rtentry *rt;
    437 			/*
    438 			 * If this no longer points to a valid route
    439 			 * reclaim it.
    440 			 */
    441 			rt = rtcache_validate(&ipf->ipf_ro);
    442 			if (rt == NULL)
    443 				goto done;
    444 			rtcache_unref(rt, &ipf->ipf_ro);
    445 			/*
    446 			 * choose the one that's been least recently
    447 			 * used or has had the least uses in the
    448 			 * last 1.5 intervals.
    449 			 */
    450 			if (ipf->ipf_timer < maybe_ipf->ipf_timer
    451 			    || ((ipf->ipf_timer == maybe_ipf->ipf_timer)
    452 				&& (ipf->ipf_last_uses + ipf->ipf_uses
    453 				    < maybe_ipf->ipf_last_uses + maybe_ipf->ipf_uses)))
    454 				maybe_ipf = ipf;
    455 		}
    456 		ipf = maybe_ipf;
    457 	    done:
    458 		/*
    459 		 * Remove the entry from the flow table.
    460 		 */
    461 		IPFLOW_REMOVE(ipf->ipf_hashidx, ipf);
    462 
    463 		ipflow_addstats(ipf);
    464 		rtcache_free(&ipf->ipf_ro);
    465 		pool_put(&ipflow_pool, ipf);
    466 		ipflow_inuse--;
    467 	}
    468 	return NULL;
    469 }
    470 
    471 static unsigned int ipflow_work_enqueued = 0;
    472 
    473 static void
    474 ipflow_slowtimo_work(struct work *wk, void *arg)
    475 {
    476 	struct rtentry *rt;
    477 	struct ipflow *ipf, *next_ipf;
    478 	uint64_t *ips;
    479 
    480 	/* We can allow enqueuing another work at this point */
    481 	atomic_swap_uint(&ipflow_work_enqueued, 0);
    482 
    483 #ifndef NET_MPSAFE
    484 	mutex_enter(softnet_lock);
    485 	KERNEL_LOCK(1, NULL);
    486 #endif
    487 	mutex_enter(&ipflow_lock);
    488 	for (ipf = TAILQ_FIRST(&ipflowlist); ipf != NULL; ipf = next_ipf) {
    489 		next_ipf = TAILQ_NEXT(ipf, ipf_list);
    490 		if (PRT_SLOW_ISEXPIRED(ipf->ipf_timer) ||
    491 		    (rt = rtcache_validate(&ipf->ipf_ro)) == NULL) {
    492 			ipflow_free(ipf);
    493 		} else {
    494 			ipf->ipf_last_uses = ipf->ipf_uses;
    495 			rt->rt_use += ipf->ipf_uses;
    496 			rtcache_unref(rt, &ipf->ipf_ro);
    497 			ips = IP_STAT_GETREF();
    498 			ips[IP_STAT_TOTAL] += ipf->ipf_uses;
    499 			ips[IP_STAT_FORWARD] += ipf->ipf_uses;
    500 			ips[IP_STAT_FASTFORWARD] += ipf->ipf_uses;
    501 			IP_STAT_PUTREF();
    502 			ipf->ipf_uses = 0;
    503 		}
    504 	}
    505 	mutex_exit(&ipflow_lock);
    506 #ifndef NET_MPSAFE
    507 	KERNEL_UNLOCK_ONE(NULL);
    508 	mutex_exit(softnet_lock);
    509 #endif
    510 }
    511 
    512 void
    513 ipflow_slowtimo(void)
    514 {
    515 
    516 	/* Avoid enqueuing another work when one is already enqueued */
    517 	if (atomic_swap_uint(&ipflow_work_enqueued, 1) == 1)
    518 		return;
    519 
    520 	workqueue_enqueue(ipflow_slowtimo_wq, &ipflow_slowtimo_wk, NULL);
    521 }
    522 
    523 void
    524 ipflow_create(struct route *ro, struct mbuf *m)
    525 {
    526 	const struct ip *const ip = mtod(m, const struct ip *);
    527 	struct ipflow *ipf;
    528 	size_t hash;
    529 
    530 #ifndef NET_MPSAFE
    531 	KERNEL_LOCK(1, NULL);
    532 #endif
    533 	mutex_enter(&ipflow_lock);
    534 
    535 	/*
    536 	 * Don't create cache entries for ICMP messages.
    537 	 */
    538 	if (ip_maxflows == 0 || ip->ip_p == IPPROTO_ICMP)
    539 		goto out;
    540 
    541 	/*
    542 	 * See if an existing flow struct exists.  If so remove it from its
    543 	 * list and free the old route.  If not, try to malloc a new one
    544 	 * (if we aren't at our limit).
    545 	 */
    546 	ipf = ipflow_lookup(ip);
    547 	if (ipf == NULL) {
    548 		if (ipflow_inuse >= ip_maxflows) {
    549 			ipf = ipflow_reap(true);
    550 		} else {
    551 			ipf = pool_get(&ipflow_pool, PR_NOWAIT);
    552 			if (ipf == NULL)
    553 				goto out;
    554 			ipflow_inuse++;
    555 		}
    556 		memset(ipf, 0, sizeof(*ipf));
    557 	} else {
    558 		IPFLOW_REMOVE(ipf->ipf_hashidx, ipf);
    559 
    560 		ipflow_addstats(ipf);
    561 		rtcache_free(&ipf->ipf_ro);
    562 		ipf->ipf_uses = ipf->ipf_last_uses = 0;
    563 		ipf->ipf_errors = ipf->ipf_dropped = 0;
    564 	}
    565 
    566 	/*
    567 	 * Fill in the updated information.
    568 	 */
    569 	rtcache_copy(&ipf->ipf_ro, ro);
    570 	ipf->ipf_dst = ip->ip_dst;
    571 	ipf->ipf_src = ip->ip_src;
    572 	ipf->ipf_tos = ip->ip_tos;
    573 	PRT_SLOW_ARM(ipf->ipf_timer, IPFLOW_TIMER);
    574 
    575 	/*
    576 	 * Insert into the approriate bucket of the flow table.
    577 	 */
    578 	hash = ipflow_hash(ip);
    579 	IPFLOW_INSERT(hash, ipf);
    580 
    581  out:
    582 	mutex_exit(&ipflow_lock);
    583 #ifndef NET_MPSAFE
    584 	KERNEL_UNLOCK_ONE(NULL);
    585 #endif
    586 }
    587 
    588 int
    589 ipflow_invalidate_all(int new_size)
    590 {
    591 	struct ipflow *ipf, *next_ipf;
    592 	int error;
    593 
    594 	error = 0;
    595 
    596 	mutex_enter(&ipflow_lock);
    597 
    598 	for (ipf = TAILQ_FIRST(&ipflowlist); ipf != NULL; ipf = next_ipf) {
    599 		next_ipf = TAILQ_NEXT(ipf, ipf_list);
    600 		ipflow_free(ipf);
    601 	}
    602 
    603 	if (new_size)
    604 		error = ipflow_reinit(new_size);
    605 
    606 	mutex_exit(&ipflow_lock);
    607 
    608 	return error;
    609 }
    610 
    611 /*
    612  * sysctl helper routine for net.inet.ip.maxflows.
    613  */
    614 static int
    615 sysctl_net_inet_ip_maxflows(SYSCTLFN_ARGS)
    616 {
    617 	int error;
    618 
    619 	error = sysctl_lookup(SYSCTLFN_CALL(rnode));
    620 	if (error || newp == NULL)
    621 		return (error);
    622 
    623 #ifndef NET_MPSAFE
    624 	mutex_enter(softnet_lock);
    625 	KERNEL_LOCK(1, NULL);
    626 #endif
    627 	mutex_enter(&ipflow_lock);
    628 
    629 	ipflow_reap(false);
    630 
    631 	mutex_exit(&ipflow_lock);
    632 #ifndef NET_MPSAFE
    633 	KERNEL_UNLOCK_ONE(NULL);
    634 	mutex_exit(softnet_lock);
    635 #endif
    636 
    637 	return (0);
    638 }
    639 
    640 static int
    641 sysctl_net_inet_ip_hashsize(SYSCTLFN_ARGS)
    642 {
    643 	int error, tmp;
    644 	struct sysctlnode node;
    645 
    646 	node = *rnode;
    647 	tmp = ip_hashsize;
    648 	node.sysctl_data = &tmp;
    649 	error = sysctl_lookup(SYSCTLFN_CALL(&node));
    650 	if (error || newp == NULL)
    651 		return (error);
    652 
    653 	if ((tmp & (tmp - 1)) == 0 && tmp != 0) {
    654 		/*
    655 		 * Can only fail due to malloc()
    656 		 */
    657 #ifndef NET_MPSAFE
    658 		mutex_enter(softnet_lock);
    659 		KERNEL_LOCK(1, NULL);
    660 #endif
    661 		error = ipflow_invalidate_all(tmp);
    662 #ifndef NET_MPSAFE
    663 		KERNEL_UNLOCK_ONE(NULL);
    664 		mutex_exit(softnet_lock);
    665 #endif
    666 	} else {
    667 		/*
    668 		 * EINVAL if not a power of 2
    669 	         */
    670 		error = EINVAL;
    671 	}
    672 
    673 	return error;
    674 }
    675 
    676 static void
    677 ipflow_sysctl_init(struct sysctllog **clog)
    678 {
    679 	sysctl_createv(clog, 0, NULL, NULL,
    680 		       CTLFLAG_PERMANENT,
    681 		       CTLTYPE_NODE, "inet",
    682 		       SYSCTL_DESCR("PF_INET related settings"),
    683 		       NULL, 0, NULL, 0,
    684 		       CTL_NET, PF_INET, CTL_EOL);
    685 	sysctl_createv(clog, 0, NULL, NULL,
    686 		       CTLFLAG_PERMANENT,
    687 		       CTLTYPE_NODE, "ip",
    688 		       SYSCTL_DESCR("IPv4 related settings"),
    689 		       NULL, 0, NULL, 0,
    690 		       CTL_NET, PF_INET, IPPROTO_IP, CTL_EOL);
    691 
    692 	sysctl_createv(clog, 0, NULL, NULL,
    693 		       CTLFLAG_PERMANENT|CTLFLAG_READWRITE,
    694 		       CTLTYPE_INT, "maxflows",
    695 		       SYSCTL_DESCR("Number of flows for fast forwarding"),
    696 		       sysctl_net_inet_ip_maxflows, 0, &ip_maxflows, 0,
    697 		       CTL_NET, PF_INET, IPPROTO_IP,
    698 		       IPCTL_MAXFLOWS, CTL_EOL);
    699 	sysctl_createv(clog, 0, NULL, NULL,
    700 			CTLFLAG_PERMANENT|CTLFLAG_READWRITE,
    701 			CTLTYPE_INT, "hashsize",
    702 			SYSCTL_DESCR("Size of hash table for fast forwarding (IPv4)"),
    703 			sysctl_net_inet_ip_hashsize, 0, &ip_hashsize, 0,
    704 			CTL_NET, PF_INET, IPPROTO_IP,
    705 			CTL_CREATE, CTL_EOL);
    706 }
    707