Home | History | Annotate | Line # | Download | only in netinet
ip_flow.c revision 1.65.2.4
      1 /*	$NetBSD: ip_flow.c,v 1.65.2.4 2016/12/05 10:55:28 skrll Exp $	*/
      2 
      3 /*-
      4  * Copyright (c) 1998 The NetBSD Foundation, Inc.
      5  * All rights reserved.
      6  *
      7  * This code is derived from software contributed to The NetBSD Foundation
      8  * by the 3am Software Foundry ("3am").  It was developed by Matt Thomas.
      9  *
     10  * Redistribution and use in source and binary forms, with or without
     11  * modification, are permitted provided that the following conditions
     12  * are met:
     13  * 1. Redistributions of source code must retain the above copyright
     14  *    notice, this list of conditions and the following disclaimer.
     15  * 2. Redistributions in binary form must reproduce the above copyright
     16  *    notice, this list of conditions and the following disclaimer in the
     17  *    documentation and/or other materials provided with the distribution.
     18  *
     19  * THIS SOFTWARE IS PROVIDED BY THE NETBSD FOUNDATION, INC. AND CONTRIBUTORS
     20  * ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED
     21  * TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
     22  * PURPOSE ARE DISCLAIMED.  IN NO EVENT SHALL THE FOUNDATION OR CONTRIBUTORS
     23  * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
     24  * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
     25  * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
     26  * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
     27  * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
     28  * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
     29  * POSSIBILITY OF SUCH DAMAGE.
     30  */
     31 
     32 #include <sys/cdefs.h>
     33 __KERNEL_RCSID(0, "$NetBSD: ip_flow.c,v 1.65.2.4 2016/12/05 10:55:28 skrll Exp $");
     34 
     35 #ifdef _KERNEL_OPT
     36 #include "opt_net_mpsafe.h"
     37 #endif
     38 
     39 #include <sys/param.h>
     40 #include <sys/systm.h>
     41 #include <sys/malloc.h>
     42 #include <sys/mbuf.h>
     43 #include <sys/domain.h>
     44 #include <sys/protosw.h>
     45 #include <sys/socket.h>
     46 #include <sys/socketvar.h>
     47 #include <sys/errno.h>
     48 #include <sys/time.h>
     49 #include <sys/kernel.h>
     50 #include <sys/pool.h>
     51 #include <sys/sysctl.h>
     52 #include <sys/workqueue.h>
     53 #include <sys/atomic.h>
     54 
     55 #include <net/if.h>
     56 #include <net/if_dl.h>
     57 #include <net/route.h>
     58 #include <net/pfil.h>
     59 
     60 #include <netinet/in.h>
     61 #include <netinet/in_systm.h>
     62 #include <netinet/ip.h>
     63 #include <netinet/in_pcb.h>
     64 #include <netinet/in_var.h>
     65 #include <netinet/ip_var.h>
     66 #include <netinet/ip_private.h>
     67 
     68 /*
     69  * Similar code is very well commented in netinet6/ip6_flow.c
     70  */
     71 
     72 #define	IPFLOW_HASHBITS		6	/* should not be a multiple of 8 */
     73 
     74 static struct pool ipflow_pool;
     75 
     76 TAILQ_HEAD(ipflowhead, ipflow);
     77 
     78 #define	IPFLOW_TIMER		(5 * PR_SLOWHZ)
     79 #define	IPFLOW_DEFAULT_HASHSIZE	(1 << IPFLOW_HASHBITS)
     80 
     81 /*
     82  * ip_flow.c internal lock.
     83  * If we use softnet_lock, it would cause recursive lock.
     84  *
     85  * This is a tentative workaround.
     86  * We should make it scalable somehow in the future.
     87  */
     88 static kmutex_t ipflow_lock;
     89 static struct ipflowhead *ipflowtable = NULL;
     90 static struct ipflowhead ipflowlist;
     91 static int ipflow_inuse;
     92 
     93 #define	IPFLOW_INSERT(hashidx, ipf) \
     94 do { \
     95 	(ipf)->ipf_hashidx = (hashidx); \
     96 	TAILQ_INSERT_HEAD(&ipflowtable[(hashidx)], (ipf), ipf_hash); \
     97 	TAILQ_INSERT_HEAD(&ipflowlist, (ipf), ipf_list); \
     98 } while (/*CONSTCOND*/ 0)
     99 
    100 #define	IPFLOW_REMOVE(hashidx, ipf) \
    101 do { \
    102 	TAILQ_REMOVE(&ipflowtable[(hashidx)], (ipf), ipf_hash); \
    103 	TAILQ_REMOVE(&ipflowlist, (ipf), ipf_list); \
    104 } while (/*CONSTCOND*/ 0)
    105 
    106 #ifndef IPFLOW_MAX
    107 #define	IPFLOW_MAX		256
    108 #endif
    109 static int ip_maxflows = IPFLOW_MAX;
    110 static int ip_hashsize = IPFLOW_DEFAULT_HASHSIZE;
    111 
    112 static struct ipflow *ipflow_reap(bool);
    113 static void ipflow_sysctl_init(struct sysctllog **);
    114 
    115 static void ipflow_slowtimo_work(struct work *, void *);
    116 static struct workqueue	*ipflow_slowtimo_wq;
    117 static struct work	ipflow_slowtimo_wk;
    118 
    119 static size_t
    120 ipflow_hash(const struct ip *ip)
    121 {
    122 	size_t hash = ip->ip_tos;
    123 	size_t idx;
    124 
    125 	for (idx = 0; idx < 32; idx += IPFLOW_HASHBITS) {
    126 		hash += (ip->ip_dst.s_addr >> (32 - idx)) +
    127 		    (ip->ip_src.s_addr >> idx);
    128 	}
    129 
    130 	return hash & (ip_hashsize-1);
    131 }
    132 
    133 static struct ipflow *
    134 ipflow_lookup(const struct ip *ip)
    135 {
    136 	size_t hash;
    137 	struct ipflow *ipf;
    138 
    139 	KASSERT(mutex_owned(&ipflow_lock));
    140 
    141 	hash = ipflow_hash(ip);
    142 
    143 	TAILQ_FOREACH(ipf, &ipflowtable[hash], ipf_hash) {
    144 		if (ip->ip_dst.s_addr == ipf->ipf_dst.s_addr
    145 		    && ip->ip_src.s_addr == ipf->ipf_src.s_addr
    146 		    && ip->ip_tos == ipf->ipf_tos)
    147 			break;
    148 	}
    149 	return ipf;
    150 }
    151 
    152 void
    153 ipflow_poolinit(void)
    154 {
    155 
    156 	pool_init(&ipflow_pool, sizeof(struct ipflow), 0, 0, 0, "ipflowpl",
    157 	    NULL, IPL_NET);
    158 }
    159 
    160 static int
    161 ipflow_reinit(int table_size)
    162 {
    163 	struct ipflowhead *new_table;
    164 	size_t i;
    165 
    166 	KASSERT(mutex_owned(&ipflow_lock));
    167 
    168 	new_table = (struct ipflowhead *)malloc(sizeof(struct ipflowhead) *
    169 	    table_size, M_RTABLE, M_NOWAIT);
    170 
    171 	if (new_table == NULL)
    172 		return 1;
    173 
    174 	if (ipflowtable != NULL)
    175 		free(ipflowtable, M_RTABLE);
    176 
    177 	ipflowtable = new_table;
    178 	ip_hashsize = table_size;
    179 
    180 	TAILQ_INIT(&ipflowlist);
    181 	for (i = 0; i < ip_hashsize; i++)
    182 		TAILQ_INIT(&ipflowtable[i]);
    183 
    184 	return 0;
    185 }
    186 
    187 void
    188 ipflow_init(void)
    189 {
    190 	int error;
    191 
    192 	error = workqueue_create(&ipflow_slowtimo_wq, "ipflow_slowtimo",
    193 	    ipflow_slowtimo_work, NULL, PRI_SOFTNET, IPL_SOFTNET, WQ_MPSAFE);
    194 	if (error != 0)
    195 		panic("%s: workqueue_create failed (%d)\n", __func__, error);
    196 
    197 	mutex_init(&ipflow_lock, MUTEX_DEFAULT, IPL_NONE);
    198 
    199 	mutex_enter(&ipflow_lock);
    200 	(void)ipflow_reinit(ip_hashsize);
    201 	mutex_exit(&ipflow_lock);
    202 	ipflow_sysctl_init(NULL);
    203 }
    204 
    205 int
    206 ipflow_fastforward(struct mbuf *m)
    207 {
    208 	struct ip *ip;
    209 	struct ip ip_store;
    210 	struct ipflow *ipf;
    211 	struct rtentry *rt;
    212 	const struct sockaddr *dst;
    213 	int error;
    214 	int iplen;
    215 	struct ifnet *ifp;
    216 	int s;
    217 	int ret = 0;
    218 
    219 	mutex_enter(&ipflow_lock);
    220 	/*
    221 	 * Are we forwarding packets?  Big enough for an IP packet?
    222 	 */
    223 	if (!ipforwarding || ipflow_inuse == 0 || m->m_len < sizeof(struct ip))
    224 		goto out;
    225 
    226 	/*
    227 	 * Was packet received as a link-level multicast or broadcast?
    228 	 * If so, don't try to fast forward..
    229 	 */
    230 	if ((m->m_flags & (M_BCAST|M_MCAST)) != 0)
    231 		goto out;
    232 
    233 	/*
    234 	 * IP header with no option and valid version and length
    235 	 */
    236 	if (IP_HDR_ALIGNED_P(mtod(m, const void *)))
    237 		ip = mtod(m, struct ip *);
    238 	else {
    239 		memcpy(&ip_store, mtod(m, const void *), sizeof(ip_store));
    240 		ip = &ip_store;
    241 	}
    242 	iplen = ntohs(ip->ip_len);
    243 	if (ip->ip_v != IPVERSION || ip->ip_hl != (sizeof(struct ip) >> 2) ||
    244 	    iplen < sizeof(struct ip) || iplen > m->m_pkthdr.len)
    245 		goto out;
    246 	/*
    247 	 * Find a flow.
    248 	 */
    249 	if ((ipf = ipflow_lookup(ip)) == NULL)
    250 		goto out;
    251 
    252 	ifp = m_get_rcvif(m, &s);
    253 	/*
    254 	 * Verify the IP header checksum.
    255 	 */
    256 	switch (m->m_pkthdr.csum_flags &
    257 		((ifp->if_csum_flags_rx & M_CSUM_IPv4) |
    258 		 M_CSUM_IPv4_BAD)) {
    259 	case M_CSUM_IPv4|M_CSUM_IPv4_BAD:
    260 		m_put_rcvif(ifp, &s);
    261 		goto out;
    262 
    263 	case M_CSUM_IPv4:
    264 		/* Checksum was okay. */
    265 		break;
    266 
    267 	default:
    268 		/* Must compute it ourselves. */
    269 		if (in_cksum(m, sizeof(struct ip)) != 0) {
    270 			m_put_rcvif(ifp, &s);
    271 			goto out;
    272 		}
    273 		break;
    274 	}
    275 	m_put_rcvif(ifp, &s);
    276 
    277 	/*
    278 	 * Route and interface still up?
    279 	 */
    280 	if ((rt = rtcache_validate(&ipf->ipf_ro)) == NULL ||
    281 	    (rt->rt_ifp->if_flags & IFF_UP) == 0 ||
    282 	    (rt->rt_flags & (RTF_BLACKHOLE | RTF_BROADCAST)) != 0)
    283 		goto out;
    284 
    285 	/*
    286 	 * Packet size OK?  TTL?
    287 	 */
    288 	if (m->m_pkthdr.len > rt->rt_ifp->if_mtu || ip->ip_ttl <= IPTTLDEC)
    289 		goto out;
    290 
    291 	/*
    292 	 * Clear any in-bound checksum flags for this packet.
    293 	 */
    294 	m->m_pkthdr.csum_flags = 0;
    295 
    296 	/*
    297 	 * Everything checks out and so we can forward this packet.
    298 	 * Modify the TTL and incrementally change the checksum.
    299 	 *
    300 	 * This method of adding the checksum works on either endian CPU.
    301 	 * If htons() is inlined, all the arithmetic is folded; otherwise
    302 	 * the htons()s are combined by CSE due to the const attribute.
    303 	 *
    304 	 * Don't bother using HW checksumming here -- the incremental
    305 	 * update is pretty fast.
    306 	 */
    307 	ip->ip_ttl -= IPTTLDEC;
    308 	if (ip->ip_sum >= (u_int16_t) ~htons(IPTTLDEC << 8))
    309 		ip->ip_sum -= ~htons(IPTTLDEC << 8);
    310 	else
    311 		ip->ip_sum += htons(IPTTLDEC << 8);
    312 
    313 	/*
    314 	 * Done modifying the header; copy it back, if necessary.
    315 	 *
    316 	 * XXX Use m_copyback_cow(9) here? --dyoung
    317 	 */
    318 	if (IP_HDR_ALIGNED_P(mtod(m, void *)) == 0)
    319 		memcpy(mtod(m, void *), &ip_store, sizeof(ip_store));
    320 
    321 	/*
    322 	 * Trim the packet in case it's too long..
    323 	 */
    324 	if (m->m_pkthdr.len > iplen) {
    325 		if (m->m_len == m->m_pkthdr.len) {
    326 			m->m_len = iplen;
    327 			m->m_pkthdr.len = iplen;
    328 		} else
    329 			m_adj(m, iplen - m->m_pkthdr.len);
    330 	}
    331 
    332 	/*
    333 	 * Send the packet on its way.  All we can get back is ENOBUFS
    334 	 */
    335 	ipf->ipf_uses++;
    336 
    337 #if 0
    338 	/*
    339 	 * Sorting list is too heavy for fast path(packet processing path).
    340 	 * It degrades about 10% performance. So, we does not sort ipflowtable,
    341 	 * and then we use FIFO cache replacement instead fo LRU.
    342 	 */
    343 	/* move to head (LRU) for ipflowlist. ipflowtable ooes not care LRU. */
    344 	TAILQ_REMOVE(&ipflowlist, ipf, ipf_list);
    345 	TAILQ_INSERT_HEAD(&ipflowlist, ipf, ipf_list);
    346 #endif
    347 
    348 	PRT_SLOW_ARM(ipf->ipf_timer, IPFLOW_TIMER);
    349 
    350 	if (rt->rt_flags & RTF_GATEWAY)
    351 		dst = rt->rt_gateway;
    352 	else
    353 		dst = rtcache_getdst(&ipf->ipf_ro);
    354 
    355 	if ((error = if_output_lock(rt->rt_ifp, rt->rt_ifp, m, dst, rt)) != 0) {
    356 		if (error == ENOBUFS)
    357 			ipf->ipf_dropped++;
    358 		else
    359 			ipf->ipf_errors++;
    360 	}
    361 	ret = 1;
    362  out:
    363 	mutex_exit(&ipflow_lock);
    364 	return ret;
    365 }
    366 
    367 static void
    368 ipflow_addstats(struct ipflow *ipf)
    369 {
    370 	struct rtentry *rt;
    371 	uint64_t *ips;
    372 
    373 	if ((rt = rtcache_validate(&ipf->ipf_ro)) != NULL)
    374 		rt->rt_use += ipf->ipf_uses;
    375 
    376 	ips = IP_STAT_GETREF();
    377 	ips[IP_STAT_CANTFORWARD] += ipf->ipf_errors + ipf->ipf_dropped;
    378 	ips[IP_STAT_TOTAL] += ipf->ipf_uses;
    379 	ips[IP_STAT_FORWARD] += ipf->ipf_uses;
    380 	ips[IP_STAT_FASTFORWARD] += ipf->ipf_uses;
    381 	IP_STAT_PUTREF();
    382 }
    383 
    384 static void
    385 ipflow_free(struct ipflow *ipf)
    386 {
    387 
    388 	KASSERT(mutex_owned(&ipflow_lock));
    389 
    390 	/*
    391 	 * Remove the flow from the hash table (at elevated IPL).
    392 	 * Once it's off the list, we can deal with it at normal
    393 	 * network IPL.
    394 	 */
    395 	IPFLOW_REMOVE(ipf->ipf_hashidx, ipf);
    396 
    397 	ipflow_addstats(ipf);
    398 	rtcache_free(&ipf->ipf_ro);
    399 	ipflow_inuse--;
    400 	pool_put(&ipflow_pool, ipf);
    401 }
    402 
    403 static struct ipflow *
    404 ipflow_reap(bool just_one)
    405 {
    406 	struct ipflow *ipf;
    407 
    408 	KASSERT(mutex_owned(&ipflow_lock));
    409 
    410 	/*
    411 	 * This case must remove one ipflow. Furthermore, this case is used in
    412 	 * fast path(packet processing path). So, simply remove TAILQ_LAST one.
    413 	 */
    414 	if (just_one) {
    415 		ipf = TAILQ_LAST(&ipflowlist, ipflowhead);
    416 		KASSERT(ipf != NULL);
    417 
    418 		IPFLOW_REMOVE(ipf->ipf_hashidx, ipf);
    419 
    420 		ipflow_addstats(ipf);
    421 		rtcache_free(&ipf->ipf_ro);
    422 		return ipf;
    423 	}
    424 
    425 	/*
    426 	 * This case is used in slow path(sysctl).
    427 	 * At first, remove invalid rtcache ipflow, and then remove TAILQ_LAST
    428 	 * ipflow if it is ensured least recently used by comparing last_uses.
    429 	 */
    430 	while (ipflow_inuse > ip_maxflows) {
    431 		struct ipflow *maybe_ipf = TAILQ_LAST(&ipflowlist, ipflowhead);
    432 
    433 		TAILQ_FOREACH(ipf, &ipflowlist, ipf_list) {
    434 			/*
    435 			 * If this no longer points to a valid route
    436 			 * reclaim it.
    437 			 */
    438 			if (rtcache_validate(&ipf->ipf_ro) == NULL)
    439 				goto done;
    440 			/*
    441 			 * choose the one that's been least recently
    442 			 * used or has had the least uses in the
    443 			 * last 1.5 intervals.
    444 			 */
    445 			if (ipf->ipf_timer < maybe_ipf->ipf_timer
    446 			    || ((ipf->ipf_timer == maybe_ipf->ipf_timer)
    447 				&& (ipf->ipf_last_uses + ipf->ipf_uses
    448 				    < maybe_ipf->ipf_last_uses + maybe_ipf->ipf_uses)))
    449 				maybe_ipf = ipf;
    450 		}
    451 		ipf = maybe_ipf;
    452 	    done:
    453 		/*
    454 		 * Remove the entry from the flow table.
    455 		 */
    456 		IPFLOW_REMOVE(ipf->ipf_hashidx, ipf);
    457 
    458 		ipflow_addstats(ipf);
    459 		rtcache_free(&ipf->ipf_ro);
    460 		pool_put(&ipflow_pool, ipf);
    461 		ipflow_inuse--;
    462 	}
    463 	return NULL;
    464 }
    465 
    466 static unsigned int ipflow_work_enqueued = 0;
    467 
    468 static void
    469 ipflow_slowtimo_work(struct work *wk, void *arg)
    470 {
    471 	struct rtentry *rt;
    472 	struct ipflow *ipf, *next_ipf;
    473 	uint64_t *ips;
    474 
    475 	/* We can allow enqueuing another work at this point */
    476 	atomic_swap_uint(&ipflow_work_enqueued, 0);
    477 
    478 #ifndef NET_MPSAFE
    479 	mutex_enter(softnet_lock);
    480 	KERNEL_LOCK(1, NULL);
    481 #endif
    482 	mutex_enter(&ipflow_lock);
    483 	for (ipf = TAILQ_FIRST(&ipflowlist); ipf != NULL; ipf = next_ipf) {
    484 		next_ipf = TAILQ_NEXT(ipf, ipf_list);
    485 		if (PRT_SLOW_ISEXPIRED(ipf->ipf_timer) ||
    486 		    (rt = rtcache_validate(&ipf->ipf_ro)) == NULL) {
    487 			ipflow_free(ipf);
    488 		} else {
    489 			ipf->ipf_last_uses = ipf->ipf_uses;
    490 			rt->rt_use += ipf->ipf_uses;
    491 			ips = IP_STAT_GETREF();
    492 			ips[IP_STAT_TOTAL] += ipf->ipf_uses;
    493 			ips[IP_STAT_FORWARD] += ipf->ipf_uses;
    494 			ips[IP_STAT_FASTFORWARD] += ipf->ipf_uses;
    495 			IP_STAT_PUTREF();
    496 			ipf->ipf_uses = 0;
    497 		}
    498 	}
    499 	mutex_exit(&ipflow_lock);
    500 #ifndef NET_MPSAFE
    501 	KERNEL_UNLOCK_ONE(NULL);
    502 	mutex_exit(softnet_lock);
    503 #endif
    504 }
    505 
    506 void
    507 ipflow_slowtimo(void)
    508 {
    509 
    510 	/* Avoid enqueuing another work when one is already enqueued */
    511 	if (atomic_swap_uint(&ipflow_work_enqueued, 1) == 1)
    512 		return;
    513 
    514 	workqueue_enqueue(ipflow_slowtimo_wq, &ipflow_slowtimo_wk, NULL);
    515 }
    516 
    517 void
    518 ipflow_create(const struct route *ro, struct mbuf *m)
    519 {
    520 	const struct ip *const ip = mtod(m, const struct ip *);
    521 	struct ipflow *ipf;
    522 	size_t hash;
    523 
    524 #ifndef NET_MPSAFE
    525 	KERNEL_LOCK(1, NULL);
    526 #endif
    527 	mutex_enter(&ipflow_lock);
    528 
    529 	/*
    530 	 * Don't create cache entries for ICMP messages.
    531 	 */
    532 	if (ip_maxflows == 0 || ip->ip_p == IPPROTO_ICMP)
    533 		goto out;
    534 
    535 	/*
    536 	 * See if an existing flow struct exists.  If so remove it from its
    537 	 * list and free the old route.  If not, try to malloc a new one
    538 	 * (if we aren't at our limit).
    539 	 */
    540 	ipf = ipflow_lookup(ip);
    541 	if (ipf == NULL) {
    542 		if (ipflow_inuse >= ip_maxflows) {
    543 			ipf = ipflow_reap(true);
    544 		} else {
    545 			ipf = pool_get(&ipflow_pool, PR_NOWAIT);
    546 			if (ipf == NULL)
    547 				goto out;
    548 			ipflow_inuse++;
    549 		}
    550 		memset(ipf, 0, sizeof(*ipf));
    551 	} else {
    552 		IPFLOW_REMOVE(ipf->ipf_hashidx, ipf);
    553 
    554 		ipflow_addstats(ipf);
    555 		rtcache_free(&ipf->ipf_ro);
    556 		ipf->ipf_uses = ipf->ipf_last_uses = 0;
    557 		ipf->ipf_errors = ipf->ipf_dropped = 0;
    558 	}
    559 
    560 	/*
    561 	 * Fill in the updated information.
    562 	 */
    563 	rtcache_copy(&ipf->ipf_ro, ro);
    564 	ipf->ipf_dst = ip->ip_dst;
    565 	ipf->ipf_src = ip->ip_src;
    566 	ipf->ipf_tos = ip->ip_tos;
    567 	PRT_SLOW_ARM(ipf->ipf_timer, IPFLOW_TIMER);
    568 
    569 	/*
    570 	 * Insert into the approriate bucket of the flow table.
    571 	 */
    572 	hash = ipflow_hash(ip);
    573 	IPFLOW_INSERT(hash, ipf);
    574 
    575  out:
    576 	mutex_exit(&ipflow_lock);
    577 #ifndef NET_MPSAFE
    578 	KERNEL_UNLOCK_ONE(NULL);
    579 #endif
    580 }
    581 
    582 int
    583 ipflow_invalidate_all(int new_size)
    584 {
    585 	struct ipflow *ipf, *next_ipf;
    586 	int error;
    587 
    588 	error = 0;
    589 
    590 	mutex_enter(&ipflow_lock);
    591 
    592 	for (ipf = TAILQ_FIRST(&ipflowlist); ipf != NULL; ipf = next_ipf) {
    593 		next_ipf = TAILQ_NEXT(ipf, ipf_list);
    594 		ipflow_free(ipf);
    595 	}
    596 
    597 	if (new_size)
    598 		error = ipflow_reinit(new_size);
    599 
    600 	mutex_exit(&ipflow_lock);
    601 
    602 	return error;
    603 }
    604 
    605 /*
    606  * sysctl helper routine for net.inet.ip.maxflows.
    607  */
    608 static int
    609 sysctl_net_inet_ip_maxflows(SYSCTLFN_ARGS)
    610 {
    611 	int error;
    612 
    613 	error = sysctl_lookup(SYSCTLFN_CALL(rnode));
    614 	if (error || newp == NULL)
    615 		return (error);
    616 
    617 #ifndef NET_MPSAFE
    618 	mutex_enter(softnet_lock);
    619 	KERNEL_LOCK(1, NULL);
    620 #endif
    621 	mutex_enter(&ipflow_lock);
    622 
    623 	ipflow_reap(false);
    624 
    625 	mutex_exit(&ipflow_lock);
    626 #ifndef NET_MPSAFE
    627 	KERNEL_UNLOCK_ONE(NULL);
    628 	mutex_exit(softnet_lock);
    629 #endif
    630 
    631 	return (0);
    632 }
    633 
    634 static int
    635 sysctl_net_inet_ip_hashsize(SYSCTLFN_ARGS)
    636 {
    637 	int error, tmp;
    638 	struct sysctlnode node;
    639 
    640 	node = *rnode;
    641 	tmp = ip_hashsize;
    642 	node.sysctl_data = &tmp;
    643 	error = sysctl_lookup(SYSCTLFN_CALL(&node));
    644 	if (error || newp == NULL)
    645 		return (error);
    646 
    647 	if ((tmp & (tmp - 1)) == 0 && tmp != 0) {
    648 		/*
    649 		 * Can only fail due to malloc()
    650 		 */
    651 #ifndef NET_MPSAFE
    652 		mutex_enter(softnet_lock);
    653 		KERNEL_LOCK(1, NULL);
    654 #endif
    655 		error = ipflow_invalidate_all(tmp);
    656 #ifndef NET_MPSAFE
    657 		KERNEL_UNLOCK_ONE(NULL);
    658 		mutex_exit(softnet_lock);
    659 #endif
    660 	} else {
    661 		/*
    662 		 * EINVAL if not a power of 2
    663 	         */
    664 		error = EINVAL;
    665 	}
    666 
    667 	return error;
    668 }
    669 
    670 static void
    671 ipflow_sysctl_init(struct sysctllog **clog)
    672 {
    673 	sysctl_createv(clog, 0, NULL, NULL,
    674 		       CTLFLAG_PERMANENT,
    675 		       CTLTYPE_NODE, "inet",
    676 		       SYSCTL_DESCR("PF_INET related settings"),
    677 		       NULL, 0, NULL, 0,
    678 		       CTL_NET, PF_INET, CTL_EOL);
    679 	sysctl_createv(clog, 0, NULL, NULL,
    680 		       CTLFLAG_PERMANENT,
    681 		       CTLTYPE_NODE, "ip",
    682 		       SYSCTL_DESCR("IPv4 related settings"),
    683 		       NULL, 0, NULL, 0,
    684 		       CTL_NET, PF_INET, IPPROTO_IP, CTL_EOL);
    685 
    686 	sysctl_createv(clog, 0, NULL, NULL,
    687 		       CTLFLAG_PERMANENT|CTLFLAG_READWRITE,
    688 		       CTLTYPE_INT, "maxflows",
    689 		       SYSCTL_DESCR("Number of flows for fast forwarding"),
    690 		       sysctl_net_inet_ip_maxflows, 0, &ip_maxflows, 0,
    691 		       CTL_NET, PF_INET, IPPROTO_IP,
    692 		       IPCTL_MAXFLOWS, CTL_EOL);
    693 	sysctl_createv(clog, 0, NULL, NULL,
    694 			CTLFLAG_PERMANENT|CTLFLAG_READWRITE,
    695 			CTLTYPE_INT, "hashsize",
    696 			SYSCTL_DESCR("Size of hash table for fast forwarding (IPv4)"),
    697 			sysctl_net_inet_ip_hashsize, 0, &ip_hashsize, 0,
    698 			CTL_NET, PF_INET, IPPROTO_IP,
    699 			CTL_CREATE, CTL_EOL);
    700 }
    701