Home | History | Annotate | Line # | Download | only in netinet
ip_flow.c revision 1.65
      1  1.65       snj /*	$NetBSD: ip_flow.c,v 1.65 2014/10/18 08:33:29 snj Exp $	*/
      2   1.1      matt 
      3   1.1      matt /*-
      4   1.1      matt  * Copyright (c) 1998 The NetBSD Foundation, Inc.
      5   1.1      matt  * All rights reserved.
      6   1.1      matt  *
      7   1.1      matt  * This code is derived from software contributed to The NetBSD Foundation
      8   1.1      matt  * by the 3am Software Foundry ("3am").  It was developed by Matt Thomas.
      9   1.1      matt  *
     10   1.1      matt  * Redistribution and use in source and binary forms, with or without
     11   1.1      matt  * modification, are permitted provided that the following conditions
     12   1.1      matt  * are met:
     13   1.1      matt  * 1. Redistributions of source code must retain the above copyright
     14   1.1      matt  *    notice, this list of conditions and the following disclaimer.
     15   1.1      matt  * 2. Redistributions in binary form must reproduce the above copyright
     16   1.1      matt  *    notice, this list of conditions and the following disclaimer in the
     17   1.1      matt  *    documentation and/or other materials provided with the distribution.
     18   1.1      matt  *
     19   1.1      matt  * THIS SOFTWARE IS PROVIDED BY THE NETBSD FOUNDATION, INC. AND CONTRIBUTORS
     20   1.1      matt  * ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED
     21   1.1      matt  * TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
     22   1.1      matt  * PURPOSE ARE DISCLAIMED.  IN NO EVENT SHALL THE FOUNDATION OR CONTRIBUTORS
     23   1.1      matt  * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
     24   1.1      matt  * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
     25   1.1      matt  * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
     26   1.1      matt  * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
     27   1.1      matt  * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
     28   1.1      matt  * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
     29   1.1      matt  * POSSIBILITY OF SUCH DAMAGE.
     30   1.1      matt  */
     31  1.22     lukem 
     32  1.22     lukem #include <sys/cdefs.h>
     33  1.65       snj __KERNEL_RCSID(0, "$NetBSD: ip_flow.c,v 1.65 2014/10/18 08:33:29 snj Exp $");
     34   1.1      matt 
     35   1.1      matt #include <sys/param.h>
     36   1.1      matt #include <sys/systm.h>
     37   1.1      matt #include <sys/malloc.h>
     38   1.1      matt #include <sys/mbuf.h>
     39   1.1      matt #include <sys/domain.h>
     40   1.1      matt #include <sys/protosw.h>
     41   1.1      matt #include <sys/socket.h>
     42   1.1      matt #include <sys/socketvar.h>
     43   1.1      matt #include <sys/errno.h>
     44   1.1      matt #include <sys/time.h>
     45   1.1      matt #include <sys/kernel.h>
     46   1.7   thorpej #include <sys/pool.h>
     47   1.1      matt #include <sys/sysctl.h>
     48   1.1      matt 
     49   1.1      matt #include <net/if.h>
     50   1.1      matt #include <net/if_dl.h>
     51   1.1      matt #include <net/route.h>
     52   1.1      matt #include <net/pfil.h>
     53   1.1      matt 
     54   1.1      matt #include <netinet/in.h>
     55   1.1      matt #include <netinet/in_systm.h>
     56   1.1      matt #include <netinet/ip.h>
     57   1.1      matt #include <netinet/in_pcb.h>
     58   1.1      matt #include <netinet/in_var.h>
     59   1.1      matt #include <netinet/ip_var.h>
     60  1.54   thorpej #include <netinet/ip_private.h>
     61   1.1      matt 
     62  1.44  liamjfoy /*
     63  1.44  liamjfoy  * Similar code is very well commented in netinet6/ip6_flow.c
     64  1.44  liamjfoy  */
     65  1.44  liamjfoy 
     66  1.53   thorpej #define	IPFLOW_HASHBITS		6	/* should not be a multiple of 8 */
     67  1.53   thorpej 
     68  1.57     pooka static struct pool ipflow_pool;
     69   1.7   thorpej 
     70   1.5   thorpej LIST_HEAD(ipflowhead, ipflow);
     71   1.5   thorpej 
     72   1.1      matt #define	IPFLOW_TIMER		(5 * PR_SLOWHZ)
     73  1.43  liamjfoy #define	IPFLOW_DEFAULT_HASHSIZE	(1 << IPFLOW_HASHBITS)
     74   1.5   thorpej 
     75  1.43  liamjfoy static struct ipflowhead *ipflowtable = NULL;
     76   1.5   thorpej static struct ipflowhead ipflowlist;
     77   1.1      matt static int ipflow_inuse;
     78   1.5   thorpej 
     79   1.5   thorpej #define	IPFLOW_INSERT(bucket, ipf) \
     80   1.5   thorpej do { \
     81   1.5   thorpej 	LIST_INSERT_HEAD((bucket), (ipf), ipf_hash); \
     82   1.5   thorpej 	LIST_INSERT_HEAD(&ipflowlist, (ipf), ipf_list); \
     83  1.26     perry } while (/*CONSTCOND*/ 0)
     84   1.5   thorpej 
     85   1.5   thorpej #define	IPFLOW_REMOVE(ipf) \
     86   1.5   thorpej do { \
     87   1.5   thorpej 	LIST_REMOVE((ipf), ipf_hash); \
     88   1.5   thorpej 	LIST_REMOVE((ipf), ipf_list); \
     89  1.26     perry } while (/*CONSTCOND*/ 0)
     90   1.5   thorpej 
     91   1.3      matt #ifndef IPFLOW_MAX
     92   1.1      matt #define	IPFLOW_MAX		256
     93   1.3      matt #endif
     94  1.64     rmind static int ip_maxflows = IPFLOW_MAX;
     95  1.64     rmind static int ip_hashsize = IPFLOW_DEFAULT_HASHSIZE;
     96  1.64     rmind 
     97  1.64     rmind static void ipflow_sysctl_init(struct sysctllog **);
     98   1.1      matt 
     99  1.45  liamjfoy static size_t
    100  1.51    dyoung ipflow_hash(const struct ip *ip)
    101   1.1      matt {
    102  1.45  liamjfoy 	size_t hash = ip->ip_tos;
    103  1.45  liamjfoy 	size_t idx;
    104  1.45  liamjfoy 
    105  1.45  liamjfoy 	for (idx = 0; idx < 32; idx += IPFLOW_HASHBITS) {
    106  1.45  liamjfoy 		hash += (ip->ip_dst.s_addr >> (32 - idx)) +
    107  1.45  liamjfoy 		    (ip->ip_src.s_addr >> idx);
    108  1.45  liamjfoy 	}
    109  1.45  liamjfoy 
    110  1.43  liamjfoy 	return hash & (ip_hashsize-1);
    111   1.1      matt }
    112   1.1      matt 
    113   1.1      matt static struct ipflow *
    114  1.51    dyoung ipflow_lookup(const struct ip *ip)
    115   1.1      matt {
    116  1.45  liamjfoy 	size_t hash;
    117   1.1      matt 	struct ipflow *ipf;
    118   1.1      matt 
    119  1.45  liamjfoy 	hash = ipflow_hash(ip);
    120   1.1      matt 
    121  1.30  christos 	LIST_FOREACH(ipf, &ipflowtable[hash], ipf_hash) {
    122   1.1      matt 		if (ip->ip_dst.s_addr == ipf->ipf_dst.s_addr
    123   1.1      matt 		    && ip->ip_src.s_addr == ipf->ipf_src.s_addr
    124   1.1      matt 		    && ip->ip_tos == ipf->ipf_tos)
    125   1.1      matt 			break;
    126   1.1      matt 	}
    127   1.1      matt 	return ipf;
    128   1.1      matt }
    129   1.1      matt 
    130  1.57     pooka void
    131  1.58    cegger ipflow_poolinit(void)
    132  1.57     pooka {
    133  1.57     pooka 
    134  1.57     pooka 	pool_init(&ipflow_pool, sizeof(struct ipflow), 0, 0, 0, "ipflowpl",
    135  1.57     pooka 	    NULL, IPL_NET);
    136  1.57     pooka }
    137  1.57     pooka 
    138  1.64     rmind static int
    139  1.64     rmind ipflow_reinit(int table_size)
    140   1.7   thorpej {
    141  1.43  liamjfoy 	struct ipflowhead *new_table;
    142  1.45  liamjfoy 	size_t i;
    143   1.7   thorpej 
    144  1.43  liamjfoy 	new_table = (struct ipflowhead *)malloc(sizeof(struct ipflowhead) *
    145  1.43  liamjfoy 	    table_size, M_RTABLE, M_NOWAIT);
    146  1.43  liamjfoy 
    147  1.43  liamjfoy 	if (new_table == NULL)
    148  1.43  liamjfoy 		return 1;
    149  1.43  liamjfoy 
    150  1.43  liamjfoy 	if (ipflowtable != NULL)
    151  1.43  liamjfoy 		free(ipflowtable, M_RTABLE);
    152  1.43  liamjfoy 
    153  1.43  liamjfoy 	ipflowtable = new_table;
    154  1.43  liamjfoy 	ip_hashsize = table_size;
    155  1.43  liamjfoy 
    156   1.7   thorpej 	LIST_INIT(&ipflowlist);
    157  1.43  liamjfoy 	for (i = 0; i < ip_hashsize; i++)
    158   1.7   thorpej 		LIST_INIT(&ipflowtable[i]);
    159  1.43  liamjfoy 
    160  1.43  liamjfoy 	return 0;
    161   1.7   thorpej }
    162   1.7   thorpej 
    163  1.64     rmind void
    164  1.64     rmind ipflow_init(void)
    165  1.64     rmind {
    166  1.64     rmind 	(void)ipflow_reinit(ip_hashsize);
    167  1.64     rmind 	ipflow_sysctl_init(NULL);
    168  1.64     rmind }
    169  1.64     rmind 
    170   1.1      matt int
    171  1.29     perry ipflow_fastforward(struct mbuf *m)
    172   1.1      matt {
    173  1.51    dyoung 	struct ip *ip;
    174  1.51    dyoung 	struct ip ip_store;
    175   1.1      matt 	struct ipflow *ipf;
    176   1.1      matt 	struct rtentry *rt;
    177  1.40    dyoung 	const struct sockaddr *dst;
    178   1.1      matt 	int error;
    179   1.6  sommerfe 	int iplen;
    180   1.1      matt 
    181   1.1      matt 	/*
    182   1.1      matt 	 * Are we forwarding packets?  Big enough for an IP packet?
    183   1.1      matt 	 */
    184   1.3      matt 	if (!ipforwarding || ipflow_inuse == 0 || m->m_len < sizeof(struct ip))
    185   1.1      matt 		return 0;
    186  1.14  sommerfe 
    187  1.14  sommerfe 	/*
    188  1.19       wiz 	 * Was packet received as a link-level multicast or broadcast?
    189  1.14  sommerfe 	 * If so, don't try to fast forward..
    190  1.14  sommerfe 	 */
    191  1.14  sommerfe 	if ((m->m_flags & (M_BCAST|M_MCAST)) != 0)
    192  1.14  sommerfe 		return 0;
    193  1.24    itojun 
    194   1.1      matt 	/*
    195   1.1      matt 	 * IP header with no option and valid version and length
    196   1.1      matt 	 */
    197  1.51    dyoung 	if (IP_HDR_ALIGNED_P(mtod(m, const void *)))
    198  1.25   thorpej 		ip = mtod(m, struct ip *);
    199  1.25   thorpej 	else {
    200  1.51    dyoung 		memcpy(&ip_store, mtod(m, const void *), sizeof(ip_store));
    201  1.25   thorpej 		ip = &ip_store;
    202  1.25   thorpej 	}
    203   1.6  sommerfe 	iplen = ntohs(ip->ip_len);
    204   1.5   thorpej 	if (ip->ip_v != IPVERSION || ip->ip_hl != (sizeof(struct ip) >> 2) ||
    205  1.13     proff 	    iplen < sizeof(struct ip) || iplen > m->m_pkthdr.len)
    206   1.1      matt 		return 0;
    207   1.1      matt 	/*
    208   1.1      matt 	 * Find a flow.
    209   1.1      matt 	 */
    210   1.1      matt 	if ((ipf = ipflow_lookup(ip)) == NULL)
    211   1.1      matt 		return 0;
    212   1.1      matt 
    213   1.1      matt 	/*
    214  1.18   thorpej 	 * Verify the IP header checksum.
    215   1.2   thorpej 	 */
    216  1.18   thorpej 	switch (m->m_pkthdr.csum_flags &
    217  1.20   thorpej 		((m->m_pkthdr.rcvif->if_csum_flags_rx & M_CSUM_IPv4) |
    218  1.18   thorpej 		 M_CSUM_IPv4_BAD)) {
    219  1.18   thorpej 	case M_CSUM_IPv4|M_CSUM_IPv4_BAD:
    220  1.18   thorpej 		return (0);
    221  1.18   thorpej 
    222  1.18   thorpej 	case M_CSUM_IPv4:
    223  1.18   thorpej 		/* Checksum was okay. */
    224  1.18   thorpej 		break;
    225  1.18   thorpej 
    226  1.18   thorpej 	default:
    227  1.18   thorpej 		/* Must compute it ourselves. */
    228  1.18   thorpej 		if (in_cksum(m, sizeof(struct ip)) != 0)
    229  1.18   thorpej 			return (0);
    230  1.18   thorpej 		break;
    231  1.18   thorpej 	}
    232   1.2   thorpej 
    233   1.2   thorpej 	/*
    234   1.1      matt 	 * Route and interface still up?
    235   1.1      matt 	 */
    236  1.50    dyoung 	if ((rt = rtcache_validate(&ipf->ipf_ro)) == NULL ||
    237  1.48    dyoung 	    (rt->rt_ifp->if_flags & IFF_UP) == 0)
    238   1.1      matt 		return 0;
    239   1.1      matt 
    240   1.1      matt 	/*
    241   1.1      matt 	 * Packet size OK?  TTL?
    242   1.1      matt 	 */
    243   1.1      matt 	if (m->m_pkthdr.len > rt->rt_ifp->if_mtu || ip->ip_ttl <= IPTTLDEC)
    244   1.1      matt 		return 0;
    245   1.1      matt 
    246   1.1      matt 	/*
    247  1.18   thorpej 	 * Clear any in-bound checksum flags for this packet.
    248  1.18   thorpej 	 */
    249  1.18   thorpej 	m->m_pkthdr.csum_flags = 0;
    250  1.18   thorpej 
    251  1.18   thorpej 	/*
    252   1.1      matt 	 * Everything checks out and so we can forward this packet.
    253   1.1      matt 	 * Modify the TTL and incrementally change the checksum.
    254  1.24    itojun 	 *
    255   1.9   mycroft 	 * This method of adding the checksum works on either endian CPU.
    256   1.9   mycroft 	 * If htons() is inlined, all the arithmetic is folded; otherwise
    257  1.32     perry 	 * the htons()s are combined by CSE due to the const attribute.
    258  1.18   thorpej 	 *
    259  1.18   thorpej 	 * Don't bother using HW checksumming here -- the incremental
    260  1.18   thorpej 	 * update is pretty fast.
    261   1.1      matt 	 */
    262   1.1      matt 	ip->ip_ttl -= IPTTLDEC;
    263  1.12     itohy 	if (ip->ip_sum >= (u_int16_t) ~htons(IPTTLDEC << 8))
    264  1.11   mycroft 		ip->ip_sum -= ~htons(IPTTLDEC << 8);
    265   1.8   mycroft 	else
    266   1.2   thorpej 		ip->ip_sum += htons(IPTTLDEC << 8);
    267  1.25   thorpej 
    268  1.25   thorpej 	/*
    269  1.25   thorpej 	 * Done modifying the header; copy it back, if necessary.
    270  1.51    dyoung 	 *
    271  1.51    dyoung 	 * XXX Use m_copyback_cow(9) here? --dyoung
    272  1.25   thorpej 	 */
    273  1.41  christos 	if (IP_HDR_ALIGNED_P(mtod(m, void *)) == 0)
    274  1.41  christos 		memcpy(mtod(m, void *), &ip_store, sizeof(ip_store));
    275   1.6  sommerfe 
    276   1.6  sommerfe 	/*
    277  1.24    itojun 	 * Trim the packet in case it's too long..
    278   1.6  sommerfe 	 */
    279   1.6  sommerfe 	if (m->m_pkthdr.len > iplen) {
    280   1.6  sommerfe 		if (m->m_len == m->m_pkthdr.len) {
    281   1.6  sommerfe 			m->m_len = iplen;
    282   1.6  sommerfe 			m->m_pkthdr.len = iplen;
    283   1.6  sommerfe 		} else
    284   1.6  sommerfe 			m_adj(m, iplen - m->m_pkthdr.len);
    285   1.2   thorpej 	}
    286   1.1      matt 
    287   1.1      matt 	/*
    288  1.65       snj 	 * Send the packet on its way.  All we can get back is ENOBUFS
    289   1.1      matt 	 */
    290   1.1      matt 	ipf->ipf_uses++;
    291   1.5   thorpej 	PRT_SLOW_ARM(ipf->ipf_timer, IPFLOW_TIMER);
    292  1.16   thorpej 
    293  1.16   thorpej 	if (rt->rt_flags & RTF_GATEWAY)
    294  1.16   thorpej 		dst = rt->rt_gateway;
    295  1.16   thorpej 	else
    296  1.40    dyoung 		dst = rtcache_getdst(&ipf->ipf_ro);
    297  1.16   thorpej 
    298  1.59       tls 	KERNEL_LOCK(1, NULL);
    299  1.16   thorpej 	if ((error = (*rt->rt_ifp->if_output)(rt->rt_ifp, m, dst, rt)) != 0) {
    300   1.1      matt 		if (error == ENOBUFS)
    301   1.1      matt 			ipf->ipf_dropped++;
    302   1.1      matt 		else
    303   1.1      matt 			ipf->ipf_errors++;
    304   1.1      matt 	}
    305  1.59       tls 	KERNEL_UNLOCK_ONE(NULL);
    306   1.1      matt 	return 1;
    307   1.1      matt }
    308   1.1      matt 
    309   1.1      matt static void
    311   1.1      matt ipflow_addstats(struct ipflow *ipf)
    312  1.49    dyoung {
    313  1.54   thorpej 	struct rtentry *rt;
    314  1.49    dyoung 	uint64_t *ips;
    315  1.50    dyoung 
    316  1.49    dyoung 	if ((rt = rtcache_validate(&ipf->ipf_ro)) != NULL)
    317  1.54   thorpej 		rt->rt_use += ipf->ipf_uses;
    318  1.54   thorpej 
    319  1.54   thorpej 	ips = IP_STAT_GETREF();
    320  1.54   thorpej 	ips[IP_STAT_CANTFORWARD] += ipf->ipf_errors + ipf->ipf_dropped;
    321  1.54   thorpej 	ips[IP_STAT_TOTAL] += ipf->ipf_uses;
    322  1.54   thorpej 	ips[IP_STAT_FORWARD] += ipf->ipf_uses;
    323  1.54   thorpej 	ips[IP_STAT_FASTFORWARD] += ipf->ipf_uses;
    324   1.1      matt 	IP_STAT_PUTREF();
    325   1.1      matt }
    326   1.1      matt 
    327  1.29     perry static void
    328   1.1      matt ipflow_free(struct ipflow *ipf)
    329   1.1      matt {
    330   1.1      matt 	int s;
    331   1.1      matt 	/*
    332   1.1      matt 	 * Remove the flow from the hash table (at elevated IPL).
    333   1.1      matt 	 * Once it's off the list, we can deal with it at normal
    334   1.1      matt 	 * network IPL.
    335  1.17   thorpej 	 */
    336   1.5   thorpej 	s = splnet();
    337   1.1      matt 	IPFLOW_REMOVE(ipf);
    338   1.1      matt 	splx(s);
    339  1.38     joerg 	ipflow_addstats(ipf);
    340   1.1      matt 	rtcache_free(&ipf->ipf_ro);
    341  1.35       tls 	ipflow_inuse--;
    342   1.7   thorpej 	s = splnet();
    343  1.35       tls 	pool_put(&ipflow_pool, ipf);
    344   1.1      matt 	splx(s);
    345   1.1      matt }
    346  1.62  liamjfoy 
    347  1.53   thorpej struct ipflow *
    348   1.1      matt ipflow_reap(bool just_one)
    349   1.3      matt {
    350   1.3      matt 	while (just_one || ipflow_inuse > ip_maxflows) {
    351   1.3      matt 		struct ipflow *ipf, *maybe_ipf = NULL;
    352   1.3      matt 		int s;
    353   1.5   thorpej 
    354   1.5   thorpej 		ipf = LIST_FIRST(&ipflowlist);
    355   1.5   thorpej 		while (ipf != NULL) {
    356   1.5   thorpej 			/*
    357   1.5   thorpej 			 * If this no longer points to a valid route
    358   1.5   thorpej 			 * reclaim it.
    359  1.50    dyoung 			 */
    360   1.5   thorpej 			if (rtcache_validate(&ipf->ipf_ro) == NULL)
    361   1.5   thorpej 				goto done;
    362   1.5   thorpej 			/*
    363   1.5   thorpej 			 * choose the one that's been least recently
    364   1.5   thorpej 			 * used or has had the least uses in the
    365   1.5   thorpej 			 * last 1.5 intervals.
    366   1.5   thorpej 			 */
    367   1.5   thorpej 			if (maybe_ipf == NULL ||
    368   1.5   thorpej 			    ipf->ipf_timer < maybe_ipf->ipf_timer ||
    369   1.5   thorpej 			    (ipf->ipf_timer == maybe_ipf->ipf_timer &&
    370   1.5   thorpej 			     ipf->ipf_last_uses + ipf->ipf_uses <
    371   1.5   thorpej 			         maybe_ipf->ipf_last_uses +
    372   1.5   thorpej 			         maybe_ipf->ipf_uses))
    373   1.5   thorpej 				maybe_ipf = ipf;
    374   1.1      matt 			ipf = LIST_NEXT(ipf, ipf_list);
    375   1.3      matt 		}
    376   1.3      matt 		ipf = maybe_ipf;
    377   1.3      matt 	    done:
    378   1.3      matt 		/*
    379   1.3      matt 		 * Remove the entry from the flow table.
    380  1.17   thorpej 		 */
    381   1.5   thorpej 		s = splnet();
    382   1.3      matt 		IPFLOW_REMOVE(ipf);
    383   1.3      matt 		splx(s);
    384  1.38     joerg 		ipflow_addstats(ipf);
    385   1.3      matt 		rtcache_free(&ipf->ipf_ro);
    386   1.3      matt 		if (just_one)
    387   1.7   thorpej 			return ipf;
    388   1.3      matt 		pool_put(&ipflow_pool, ipf);
    389   1.1      matt 		ipflow_inuse--;
    390   1.3      matt 	}
    391   1.1      matt 	return NULL;
    392   1.1      matt }
    393   1.1      matt 
    394  1.29     perry void
    395   1.1      matt ipflow_slowtimo(void)
    396  1.49    dyoung {
    397   1.5   thorpej 	struct rtentry *rt;
    398  1.54   thorpej 	struct ipflow *ipf, *next_ipf;
    399   1.2   thorpej 	uint64_t *ips;
    400  1.55        ad 
    401  1.55        ad 	mutex_enter(softnet_lock);
    402  1.30  christos 	KERNEL_LOCK(1, NULL);
    403   1.5   thorpej 	for (ipf = LIST_FIRST(&ipflowlist); ipf != NULL; ipf = next_ipf) {
    404  1.37    dyoung 		next_ipf = LIST_NEXT(ipf, ipf_list);
    405  1.50    dyoung 		if (PRT_SLOW_ISEXPIRED(ipf->ipf_timer) ||
    406   1.5   thorpej 		    (rt = rtcache_validate(&ipf->ipf_ro)) == NULL) {
    407   1.5   thorpej 			ipflow_free(ipf);
    408   1.5   thorpej 		} else {
    409  1.49    dyoung 			ipf->ipf_last_uses = ipf->ipf_uses;
    410  1.54   thorpej 			rt->rt_use += ipf->ipf_uses;
    411  1.54   thorpej 			ips = IP_STAT_GETREF();
    412  1.54   thorpej 			ips[IP_STAT_TOTAL] += ipf->ipf_uses;
    413  1.54   thorpej 			ips[IP_STAT_FORWARD] += ipf->ipf_uses;
    414  1.54   thorpej 			ips[IP_STAT_FASTFORWARD] += ipf->ipf_uses;
    415   1.5   thorpej 			IP_STAT_PUTREF();
    416   1.1      matt 			ipf->ipf_uses = 0;
    417   1.1      matt 		}
    418  1.55        ad 	}
    419  1.55        ad 	KERNEL_UNLOCK_ONE(NULL);
    420   1.1      matt 	mutex_exit(softnet_lock);
    421   1.1      matt }
    422   1.1      matt 
    423  1.29     perry void
    424   1.1      matt ipflow_create(const struct route *ro, struct mbuf *m)
    425  1.51    dyoung {
    426   1.1      matt 	const struct ip *const ip = mtod(m, const struct ip *);
    427  1.45  liamjfoy 	struct ipflow *ipf;
    428   1.1      matt 	size_t hash;
    429   1.1      matt 	int s;
    430   1.1      matt 
    431   1.1      matt 	/*
    432   1.1      matt 	 * Don't create cache entries for ICMP messages.
    433   1.3      matt 	 */
    434   1.1      matt 	if (ip_maxflows == 0 || ip->ip_p == IPPROTO_ICMP)
    435  1.63     pooka 		return;
    436  1.63     pooka 
    437  1.63     pooka 	KERNEL_LOCK(1, NULL);
    438   1.1      matt 
    439  1.65       snj 	/*
    440   1.1      matt 	 * See if an existing flow struct exists.  If so remove it from its
    441   1.1      matt 	 * list and free the old route.  If not, try to malloc a new one
    442   1.1      matt 	 * (if we aren't at our limit).
    443   1.1      matt 	 */
    444   1.1      matt 	ipf = ipflow_lookup(ip);
    445   1.3      matt 	if (ipf == NULL) {
    446  1.53   thorpej 		if (ipflow_inuse >= ip_maxflows) {
    447   1.1      matt 			ipf = ipflow_reap(true);
    448  1.36       mrg 		} else {
    449   1.7   thorpej 			s = splnet();
    450  1.35       tls 			ipf = pool_get(&ipflow_pool, PR_NOWAIT);
    451   1.1      matt 			splx(s);
    452  1.63     pooka 			if (ipf == NULL)
    453   1.1      matt 				goto out;
    454   1.1      matt 			ipflow_inuse++;
    455  1.39    dyoung 		}
    456   1.1      matt 		memset(ipf, 0, sizeof(*ipf));
    457  1.17   thorpej 	} else {
    458   1.5   thorpej 		s = splnet();
    459   1.1      matt 		IPFLOW_REMOVE(ipf);
    460   1.1      matt 		splx(s);
    461  1.38     joerg 		ipflow_addstats(ipf);
    462   1.1      matt 		rtcache_free(&ipf->ipf_ro);
    463   1.1      matt 		ipf->ipf_uses = ipf->ipf_last_uses = 0;
    464   1.1      matt 		ipf->ipf_errors = ipf->ipf_dropped = 0;
    465   1.1      matt 	}
    466   1.1      matt 
    467   1.1      matt 	/*
    468   1.1      matt 	 * Fill in the updated information.
    469  1.46    dyoung 	 */
    470   1.1      matt 	rtcache_copy(&ipf->ipf_ro, ro);
    471   1.1      matt 	ipf->ipf_dst = ip->ip_dst;
    472   1.1      matt 	ipf->ipf_src = ip->ip_src;
    473   1.5   thorpej 	ipf->ipf_tos = ip->ip_tos;
    474  1.60  liamjfoy 	PRT_SLOW_ARM(ipf->ipf_timer, IPFLOW_TIMER);
    475   1.1      matt 
    476   1.1      matt 	/*
    477   1.1      matt 	 * Insert into the approriate bucket of the flow table.
    478  1.45  liamjfoy 	 */
    479  1.17   thorpej 	hash = ipflow_hash(ip);
    480   1.5   thorpej 	s = splnet();
    481  1.27       scw 	IPFLOW_INSERT(&ipflowtable[hash], ipf);
    482  1.63     pooka 	splx(s);
    483  1.63     pooka 
    484  1.63     pooka  out:
    485  1.27       scw 	KERNEL_UNLOCK_ONE(NULL);
    486  1.27       scw }
    487  1.43  liamjfoy 
    488  1.43  liamjfoy int
    489  1.27       scw ipflow_invalidate_all(int new_size)
    490  1.27       scw {
    491  1.43  liamjfoy 	struct ipflow *ipf, *next_ipf;
    492  1.27       scw 	int s, error;
    493  1.43  liamjfoy 
    494  1.27       scw 	error = 0;
    495  1.27       scw 	s = splnet();
    496  1.27       scw 	for (ipf = LIST_FIRST(&ipflowlist); ipf != NULL; ipf = next_ipf) {
    497  1.27       scw 		next_ipf = LIST_NEXT(ipf, ipf_list);
    498  1.27       scw 		ipflow_free(ipf);
    499  1.43  liamjfoy 	}
    500  1.43  liamjfoy 
    501  1.64     rmind 	if (new_size)
    502   1.1      matt 		error = ipflow_reinit(new_size);
    503  1.43  liamjfoy 	splx(s);
    504  1.43  liamjfoy 
    505   1.1      matt 	return error;
    506  1.64     rmind }
    507  1.64     rmind 
    508  1.64     rmind #ifdef GATEWAY
    509  1.64     rmind /*
    510  1.64     rmind  * sysctl helper routine for net.inet.ip.maxflows.
    511  1.64     rmind  */
    512  1.64     rmind static int
    513  1.64     rmind sysctl_net_inet_ip_maxflows(SYSCTLFN_ARGS)
    514  1.64     rmind {
    515  1.64     rmind 	int error;
    516  1.64     rmind 
    517  1.64     rmind 	error = sysctl_lookup(SYSCTLFN_CALL(rnode));
    518  1.64     rmind 	if (error || newp == NULL)
    519  1.64     rmind 		return (error);
    520  1.64     rmind 
    521  1.64     rmind 	mutex_enter(softnet_lock);
    522  1.64     rmind 	KERNEL_LOCK(1, NULL);
    523  1.64     rmind 
    524  1.64     rmind 	ipflow_reap(false);
    525  1.64     rmind 
    526  1.64     rmind 	KERNEL_UNLOCK_ONE(NULL);
    527  1.64     rmind 	mutex_exit(softnet_lock);
    528  1.64     rmind 
    529  1.64     rmind 	return (0);
    530  1.64     rmind }
    531  1.64     rmind 
    532  1.64     rmind static int
    533  1.64     rmind sysctl_net_inet_ip_hashsize(SYSCTLFN_ARGS)
    534  1.64     rmind {
    535  1.64     rmind 	int error, tmp;
    536  1.64     rmind 	struct sysctlnode node;
    537  1.64     rmind 
    538  1.64     rmind 	node = *rnode;
    539  1.64     rmind 	tmp = ip_hashsize;
    540  1.64     rmind 	node.sysctl_data = &tmp;
    541  1.64     rmind 	error = sysctl_lookup(SYSCTLFN_CALL(&node));
    542  1.64     rmind 	if (error || newp == NULL)
    543  1.64     rmind 		return (error);
    544  1.64     rmind 
    545  1.64     rmind 	if ((tmp & (tmp - 1)) == 0 && tmp != 0) {
    546  1.64     rmind 		/*
    547  1.64     rmind 		 * Can only fail due to malloc()
    548  1.64     rmind 		 */
    549  1.64     rmind 		mutex_enter(softnet_lock);
    550  1.64     rmind 		KERNEL_LOCK(1, NULL);
    551  1.64     rmind 
    552  1.64     rmind 		error = ipflow_invalidate_all(tmp);
    553  1.64     rmind 
    554  1.64     rmind 		KERNEL_UNLOCK_ONE(NULL);
    555  1.64     rmind 		mutex_exit(softnet_lock);
    556  1.64     rmind 
    557  1.64     rmind 	} else {
    558  1.64     rmind 		/*
    559  1.64     rmind 		 * EINVAL if not a power of 2
    560  1.64     rmind 	         */
    561  1.64     rmind 		error = EINVAL;
    562  1.64     rmind 	}
    563  1.64     rmind 
    564  1.64     rmind 	return error;
    565  1.64     rmind }
    566  1.64     rmind #endif /* GATEWAY */
    567  1.64     rmind 
    568  1.64     rmind static void
    569  1.64     rmind ipflow_sysctl_init(struct sysctllog **clog)
    570  1.64     rmind {
    571  1.64     rmind 	sysctl_createv(clog, 0, NULL, NULL,
    572  1.64     rmind 		       CTLFLAG_PERMANENT,
    573  1.64     rmind 		       CTLTYPE_NODE, "inet",
    574  1.64     rmind 		       SYSCTL_DESCR("PF_INET related settings"),
    575  1.64     rmind 		       NULL, 0, NULL, 0,
    576  1.64     rmind 		       CTL_NET, PF_INET, CTL_EOL);
    577  1.64     rmind 	sysctl_createv(clog, 0, NULL, NULL,
    578  1.64     rmind 		       CTLFLAG_PERMANENT,
    579  1.64     rmind 		       CTLTYPE_NODE, "ip",
    580  1.64     rmind 		       SYSCTL_DESCR("IPv4 related settings"),
    581  1.64     rmind 		       NULL, 0, NULL, 0,
    582  1.64     rmind 		       CTL_NET, PF_INET, IPPROTO_IP, CTL_EOL);
    583  1.64     rmind 
    584  1.64     rmind #ifdef GATEWAY
    585  1.64     rmind 	sysctl_createv(clog, 0, NULL, NULL,
    586  1.64     rmind 		       CTLFLAG_PERMANENT|CTLFLAG_READWRITE,
    587  1.64     rmind 		       CTLTYPE_INT, "maxflows",
    588  1.64     rmind 		       SYSCTL_DESCR("Number of flows for fast forwarding"),
    589  1.64     rmind 		       sysctl_net_inet_ip_maxflows, 0, &ip_maxflows, 0,
    590  1.64     rmind 		       CTL_NET, PF_INET, IPPROTO_IP,
    591  1.64     rmind 		       IPCTL_MAXFLOWS, CTL_EOL);
    592  1.64     rmind 	sysctl_createv(clog, 0, NULL, NULL,
    593  1.64     rmind 			CTLFLAG_PERMANENT|CTLFLAG_READWRITE,
    594  1.64     rmind 			CTLTYPE_INT, "hashsize",
    595  1.64     rmind 			SYSCTL_DESCR("Size of hash table for fast forwarding (IPv4)"),
    596  1.64     rmind 			sysctl_net_inet_ip_hashsize, 0, &ip_hashsize, 0,
    597  1.64     rmind 			CTL_NET, PF_INET, IPPROTO_IP,
    598  1.64     rmind 			CTL_CREATE, CTL_EOL);
    599  1.64     rmind #endif /* GATEWAY */
    600                 }
    601