Home | History | Annotate | Line # | Download | only in netinet
ip_flow.c revision 1.65.2.4
      1  1.65.2.4     skrll /*	$NetBSD: ip_flow.c,v 1.65.2.4 2016/12/05 10:55:28 skrll Exp $	*/
      2       1.1      matt 
      3       1.1      matt /*-
      4       1.1      matt  * Copyright (c) 1998 The NetBSD Foundation, Inc.
      5       1.1      matt  * All rights reserved.
      6       1.1      matt  *
      7       1.1      matt  * This code is derived from software contributed to The NetBSD Foundation
      8       1.1      matt  * by the 3am Software Foundry ("3am").  It was developed by Matt Thomas.
      9       1.1      matt  *
     10       1.1      matt  * Redistribution and use in source and binary forms, with or without
     11       1.1      matt  * modification, are permitted provided that the following conditions
     12       1.1      matt  * are met:
     13       1.1      matt  * 1. Redistributions of source code must retain the above copyright
     14       1.1      matt  *    notice, this list of conditions and the following disclaimer.
     15       1.1      matt  * 2. Redistributions in binary form must reproduce the above copyright
     16       1.1      matt  *    notice, this list of conditions and the following disclaimer in the
     17       1.1      matt  *    documentation and/or other materials provided with the distribution.
     18       1.1      matt  *
     19       1.1      matt  * THIS SOFTWARE IS PROVIDED BY THE NETBSD FOUNDATION, INC. AND CONTRIBUTORS
     20       1.1      matt  * ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED
     21       1.1      matt  * TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
     22       1.1      matt  * PURPOSE ARE DISCLAIMED.  IN NO EVENT SHALL THE FOUNDATION OR CONTRIBUTORS
     23       1.1      matt  * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
     24       1.1      matt  * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
     25       1.1      matt  * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
     26       1.1      matt  * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
     27       1.1      matt  * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
     28       1.1      matt  * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
     29       1.1      matt  * POSSIBILITY OF SUCH DAMAGE.
     30       1.1      matt  */
     31      1.22     lukem 
     32      1.22     lukem #include <sys/cdefs.h>
     33  1.65.2.4     skrll __KERNEL_RCSID(0, "$NetBSD: ip_flow.c,v 1.65.2.4 2016/12/05 10:55:28 skrll Exp $");
     34  1.65.2.4     skrll 
     35  1.65.2.4     skrll #ifdef _KERNEL_OPT
     36  1.65.2.4     skrll #include "opt_net_mpsafe.h"
     37  1.65.2.4     skrll #endif
     38       1.1      matt 
     39       1.1      matt #include <sys/param.h>
     40       1.1      matt #include <sys/systm.h>
     41       1.1      matt #include <sys/malloc.h>
     42       1.1      matt #include <sys/mbuf.h>
     43       1.1      matt #include <sys/domain.h>
     44       1.1      matt #include <sys/protosw.h>
     45       1.1      matt #include <sys/socket.h>
     46       1.1      matt #include <sys/socketvar.h>
     47       1.1      matt #include <sys/errno.h>
     48       1.1      matt #include <sys/time.h>
     49       1.1      matt #include <sys/kernel.h>
     50       1.7   thorpej #include <sys/pool.h>
     51       1.1      matt #include <sys/sysctl.h>
     52  1.65.2.3     skrll #include <sys/workqueue.h>
     53  1.65.2.3     skrll #include <sys/atomic.h>
     54       1.1      matt 
     55       1.1      matt #include <net/if.h>
     56       1.1      matt #include <net/if_dl.h>
     57       1.1      matt #include <net/route.h>
     58       1.1      matt #include <net/pfil.h>
     59       1.1      matt 
     60       1.1      matt #include <netinet/in.h>
     61       1.1      matt #include <netinet/in_systm.h>
     62       1.1      matt #include <netinet/ip.h>
     63       1.1      matt #include <netinet/in_pcb.h>
     64       1.1      matt #include <netinet/in_var.h>
     65       1.1      matt #include <netinet/ip_var.h>
     66      1.54   thorpej #include <netinet/ip_private.h>
     67       1.1      matt 
     68      1.44  liamjfoy /*
     69      1.44  liamjfoy  * Similar code is very well commented in netinet6/ip6_flow.c
     70  1.65.2.3     skrll  */
     71      1.44  liamjfoy 
     72      1.53   thorpej #define	IPFLOW_HASHBITS		6	/* should not be a multiple of 8 */
     73      1.53   thorpej 
     74      1.57     pooka static struct pool ipflow_pool;
     75       1.7   thorpej 
     76  1.65.2.3     skrll TAILQ_HEAD(ipflowhead, ipflow);
     77       1.5   thorpej 
     78       1.1      matt #define	IPFLOW_TIMER		(5 * PR_SLOWHZ)
     79      1.43  liamjfoy #define	IPFLOW_DEFAULT_HASHSIZE	(1 << IPFLOW_HASHBITS)
     80       1.5   thorpej 
     81  1.65.2.2     skrll /*
     82  1.65.2.2     skrll  * ip_flow.c internal lock.
     83  1.65.2.2     skrll  * If we use softnet_lock, it would cause recursive lock.
     84  1.65.2.2     skrll  *
     85  1.65.2.2     skrll  * This is a tentative workaround.
     86  1.65.2.2     skrll  * We should make it scalable somehow in the future.
     87  1.65.2.2     skrll  */
     88  1.65.2.2     skrll static kmutex_t ipflow_lock;
     89      1.43  liamjfoy static struct ipflowhead *ipflowtable = NULL;
     90       1.5   thorpej static struct ipflowhead ipflowlist;
     91       1.1      matt static int ipflow_inuse;
     92       1.5   thorpej 
     93  1.65.2.3     skrll #define	IPFLOW_INSERT(hashidx, ipf) \
     94       1.5   thorpej do { \
     95  1.65.2.3     skrll 	(ipf)->ipf_hashidx = (hashidx); \
     96  1.65.2.3     skrll 	TAILQ_INSERT_HEAD(&ipflowtable[(hashidx)], (ipf), ipf_hash); \
     97  1.65.2.3     skrll 	TAILQ_INSERT_HEAD(&ipflowlist, (ipf), ipf_list); \
     98      1.26     perry } while (/*CONSTCOND*/ 0)
     99       1.5   thorpej 
    100  1.65.2.3     skrll #define	IPFLOW_REMOVE(hashidx, ipf) \
    101       1.5   thorpej do { \
    102  1.65.2.3     skrll 	TAILQ_REMOVE(&ipflowtable[(hashidx)], (ipf), ipf_hash); \
    103  1.65.2.3     skrll 	TAILQ_REMOVE(&ipflowlist, (ipf), ipf_list); \
    104      1.26     perry } while (/*CONSTCOND*/ 0)
    105       1.5   thorpej 
    106       1.3      matt #ifndef IPFLOW_MAX
    107       1.1      matt #define	IPFLOW_MAX		256
    108       1.3      matt #endif
    109      1.64     rmind static int ip_maxflows = IPFLOW_MAX;
    110      1.64     rmind static int ip_hashsize = IPFLOW_DEFAULT_HASHSIZE;
    111      1.64     rmind 
    112  1.65.2.2     skrll static struct ipflow *ipflow_reap(bool);
    113      1.64     rmind static void ipflow_sysctl_init(struct sysctllog **);
    114       1.1      matt 
    115  1.65.2.3     skrll static void ipflow_slowtimo_work(struct work *, void *);
    116  1.65.2.3     skrll static struct workqueue	*ipflow_slowtimo_wq;
    117  1.65.2.3     skrll static struct work	ipflow_slowtimo_wk;
    118  1.65.2.3     skrll 
    119  1.65.2.3     skrll static size_t
    120      1.51    dyoung ipflow_hash(const struct ip *ip)
    121       1.1      matt {
    122      1.45  liamjfoy 	size_t hash = ip->ip_tos;
    123      1.45  liamjfoy 	size_t idx;
    124      1.45  liamjfoy 
    125      1.45  liamjfoy 	for (idx = 0; idx < 32; idx += IPFLOW_HASHBITS) {
    126      1.45  liamjfoy 		hash += (ip->ip_dst.s_addr >> (32 - idx)) +
    127      1.45  liamjfoy 		    (ip->ip_src.s_addr >> idx);
    128      1.45  liamjfoy 	}
    129      1.45  liamjfoy 
    130      1.43  liamjfoy 	return hash & (ip_hashsize-1);
    131       1.1      matt }
    132       1.1      matt 
    133       1.1      matt static struct ipflow *
    134      1.51    dyoung ipflow_lookup(const struct ip *ip)
    135       1.1      matt {
    136      1.45  liamjfoy 	size_t hash;
    137       1.1      matt 	struct ipflow *ipf;
    138       1.1      matt 
    139  1.65.2.2     skrll 	KASSERT(mutex_owned(&ipflow_lock));
    140  1.65.2.2     skrll 
    141      1.45  liamjfoy 	hash = ipflow_hash(ip);
    142       1.1      matt 
    143  1.65.2.3     skrll 	TAILQ_FOREACH(ipf, &ipflowtable[hash], ipf_hash) {
    144       1.1      matt 		if (ip->ip_dst.s_addr == ipf->ipf_dst.s_addr
    145       1.1      matt 		    && ip->ip_src.s_addr == ipf->ipf_src.s_addr
    146       1.1      matt 		    && ip->ip_tos == ipf->ipf_tos)
    147       1.1      matt 			break;
    148       1.1      matt 	}
    149       1.1      matt 	return ipf;
    150       1.1      matt }
    151       1.1      matt 
    152      1.57     pooka void
    153      1.58    cegger ipflow_poolinit(void)
    154      1.57     pooka {
    155      1.57     pooka 
    156      1.57     pooka 	pool_init(&ipflow_pool, sizeof(struct ipflow), 0, 0, 0, "ipflowpl",
    157      1.57     pooka 	    NULL, IPL_NET);
    158      1.57     pooka }
    159      1.57     pooka 
    160      1.64     rmind static int
    161      1.64     rmind ipflow_reinit(int table_size)
    162       1.7   thorpej {
    163      1.43  liamjfoy 	struct ipflowhead *new_table;
    164      1.45  liamjfoy 	size_t i;
    165       1.7   thorpej 
    166  1.65.2.2     skrll 	KASSERT(mutex_owned(&ipflow_lock));
    167  1.65.2.2     skrll 
    168      1.43  liamjfoy 	new_table = (struct ipflowhead *)malloc(sizeof(struct ipflowhead) *
    169      1.43  liamjfoy 	    table_size, M_RTABLE, M_NOWAIT);
    170      1.43  liamjfoy 
    171      1.43  liamjfoy 	if (new_table == NULL)
    172      1.43  liamjfoy 		return 1;
    173      1.43  liamjfoy 
    174      1.43  liamjfoy 	if (ipflowtable != NULL)
    175      1.43  liamjfoy 		free(ipflowtable, M_RTABLE);
    176      1.43  liamjfoy 
    177      1.43  liamjfoy 	ipflowtable = new_table;
    178      1.43  liamjfoy 	ip_hashsize = table_size;
    179      1.43  liamjfoy 
    180  1.65.2.3     skrll 	TAILQ_INIT(&ipflowlist);
    181      1.43  liamjfoy 	for (i = 0; i < ip_hashsize; i++)
    182  1.65.2.3     skrll 		TAILQ_INIT(&ipflowtable[i]);
    183      1.43  liamjfoy 
    184      1.43  liamjfoy 	return 0;
    185       1.7   thorpej }
    186       1.7   thorpej 
    187      1.64     rmind void
    188      1.64     rmind ipflow_init(void)
    189      1.64     rmind {
    190  1.65.2.3     skrll 	int error;
    191  1.65.2.3     skrll 
    192  1.65.2.3     skrll 	error = workqueue_create(&ipflow_slowtimo_wq, "ipflow_slowtimo",
    193  1.65.2.3     skrll 	    ipflow_slowtimo_work, NULL, PRI_SOFTNET, IPL_SOFTNET, WQ_MPSAFE);
    194  1.65.2.3     skrll 	if (error != 0)
    195  1.65.2.3     skrll 		panic("%s: workqueue_create failed (%d)\n", __func__, error);
    196  1.65.2.2     skrll 
    197  1.65.2.2     skrll 	mutex_init(&ipflow_lock, MUTEX_DEFAULT, IPL_NONE);
    198  1.65.2.2     skrll 
    199  1.65.2.2     skrll 	mutex_enter(&ipflow_lock);
    200      1.64     rmind 	(void)ipflow_reinit(ip_hashsize);
    201  1.65.2.2     skrll 	mutex_exit(&ipflow_lock);
    202      1.64     rmind 	ipflow_sysctl_init(NULL);
    203      1.64     rmind }
    204      1.64     rmind 
    205       1.1      matt int
    206      1.29     perry ipflow_fastforward(struct mbuf *m)
    207       1.1      matt {
    208      1.51    dyoung 	struct ip *ip;
    209      1.51    dyoung 	struct ip ip_store;
    210       1.1      matt 	struct ipflow *ipf;
    211       1.1      matt 	struct rtentry *rt;
    212      1.40    dyoung 	const struct sockaddr *dst;
    213       1.1      matt 	int error;
    214       1.6  sommerfe 	int iplen;
    215  1.65.2.2     skrll 	struct ifnet *ifp;
    216  1.65.2.2     skrll 	int s;
    217  1.65.2.2     skrll 	int ret = 0;
    218       1.1      matt 
    219  1.65.2.2     skrll 	mutex_enter(&ipflow_lock);
    220       1.1      matt 	/*
    221       1.1      matt 	 * Are we forwarding packets?  Big enough for an IP packet?
    222       1.1      matt 	 */
    223       1.3      matt 	if (!ipforwarding || ipflow_inuse == 0 || m->m_len < sizeof(struct ip))
    224  1.65.2.2     skrll 		goto out;
    225      1.14  sommerfe 
    226      1.14  sommerfe 	/*
    227      1.19       wiz 	 * Was packet received as a link-level multicast or broadcast?
    228      1.14  sommerfe 	 * If so, don't try to fast forward..
    229      1.14  sommerfe 	 */
    230      1.14  sommerfe 	if ((m->m_flags & (M_BCAST|M_MCAST)) != 0)
    231  1.65.2.2     skrll 		goto out;
    232      1.24    itojun 
    233       1.1      matt 	/*
    234       1.1      matt 	 * IP header with no option and valid version and length
    235       1.1      matt 	 */
    236      1.51    dyoung 	if (IP_HDR_ALIGNED_P(mtod(m, const void *)))
    237      1.25   thorpej 		ip = mtod(m, struct ip *);
    238      1.25   thorpej 	else {
    239      1.51    dyoung 		memcpy(&ip_store, mtod(m, const void *), sizeof(ip_store));
    240      1.25   thorpej 		ip = &ip_store;
    241      1.25   thorpej 	}
    242       1.6  sommerfe 	iplen = ntohs(ip->ip_len);
    243       1.5   thorpej 	if (ip->ip_v != IPVERSION || ip->ip_hl != (sizeof(struct ip) >> 2) ||
    244      1.13     proff 	    iplen < sizeof(struct ip) || iplen > m->m_pkthdr.len)
    245  1.65.2.2     skrll 		goto out;
    246       1.1      matt 	/*
    247       1.1      matt 	 * Find a flow.
    248       1.1      matt 	 */
    249       1.1      matt 	if ((ipf = ipflow_lookup(ip)) == NULL)
    250  1.65.2.2     skrll 		goto out;
    251       1.1      matt 
    252  1.65.2.2     skrll 	ifp = m_get_rcvif(m, &s);
    253       1.1      matt 	/*
    254      1.18   thorpej 	 * Verify the IP header checksum.
    255       1.2   thorpej 	 */
    256      1.18   thorpej 	switch (m->m_pkthdr.csum_flags &
    257  1.65.2.2     skrll 		((ifp->if_csum_flags_rx & M_CSUM_IPv4) |
    258      1.18   thorpej 		 M_CSUM_IPv4_BAD)) {
    259      1.18   thorpej 	case M_CSUM_IPv4|M_CSUM_IPv4_BAD:
    260  1.65.2.2     skrll 		m_put_rcvif(ifp, &s);
    261  1.65.2.2     skrll 		goto out;
    262      1.18   thorpej 
    263      1.18   thorpej 	case M_CSUM_IPv4:
    264      1.18   thorpej 		/* Checksum was okay. */
    265      1.18   thorpej 		break;
    266      1.18   thorpej 
    267      1.18   thorpej 	default:
    268      1.18   thorpej 		/* Must compute it ourselves. */
    269  1.65.2.2     skrll 		if (in_cksum(m, sizeof(struct ip)) != 0) {
    270  1.65.2.2     skrll 			m_put_rcvif(ifp, &s);
    271  1.65.2.2     skrll 			goto out;
    272  1.65.2.2     skrll 		}
    273      1.18   thorpej 		break;
    274      1.18   thorpej 	}
    275  1.65.2.2     skrll 	m_put_rcvif(ifp, &s);
    276       1.2   thorpej 
    277       1.2   thorpej 	/*
    278       1.1      matt 	 * Route and interface still up?
    279       1.1      matt 	 */
    280      1.50    dyoung 	if ((rt = rtcache_validate(&ipf->ipf_ro)) == NULL ||
    281  1.65.2.1     skrll 	    (rt->rt_ifp->if_flags & IFF_UP) == 0 ||
    282  1.65.2.1     skrll 	    (rt->rt_flags & (RTF_BLACKHOLE | RTF_BROADCAST)) != 0)
    283  1.65.2.2     skrll 		goto out;
    284       1.1      matt 
    285       1.1      matt 	/*
    286       1.1      matt 	 * Packet size OK?  TTL?
    287       1.1      matt 	 */
    288       1.1      matt 	if (m->m_pkthdr.len > rt->rt_ifp->if_mtu || ip->ip_ttl <= IPTTLDEC)
    289  1.65.2.2     skrll 		goto out;
    290       1.1      matt 
    291       1.1      matt 	/*
    292      1.18   thorpej 	 * Clear any in-bound checksum flags for this packet.
    293      1.18   thorpej 	 */
    294      1.18   thorpej 	m->m_pkthdr.csum_flags = 0;
    295      1.18   thorpej 
    296      1.18   thorpej 	/*
    297       1.1      matt 	 * Everything checks out and so we can forward this packet.
    298       1.1      matt 	 * Modify the TTL and incrementally change the checksum.
    299      1.24    itojun 	 *
    300       1.9   mycroft 	 * This method of adding the checksum works on either endian CPU.
    301       1.9   mycroft 	 * If htons() is inlined, all the arithmetic is folded; otherwise
    302      1.32     perry 	 * the htons()s are combined by CSE due to the const attribute.
    303      1.18   thorpej 	 *
    304      1.18   thorpej 	 * Don't bother using HW checksumming here -- the incremental
    305      1.18   thorpej 	 * update is pretty fast.
    306       1.1      matt 	 */
    307       1.1      matt 	ip->ip_ttl -= IPTTLDEC;
    308      1.12     itohy 	if (ip->ip_sum >= (u_int16_t) ~htons(IPTTLDEC << 8))
    309      1.11   mycroft 		ip->ip_sum -= ~htons(IPTTLDEC << 8);
    310       1.8   mycroft 	else
    311       1.2   thorpej 		ip->ip_sum += htons(IPTTLDEC << 8);
    312      1.25   thorpej 
    313      1.25   thorpej 	/*
    314      1.25   thorpej 	 * Done modifying the header; copy it back, if necessary.
    315      1.51    dyoung 	 *
    316      1.51    dyoung 	 * XXX Use m_copyback_cow(9) here? --dyoung
    317      1.25   thorpej 	 */
    318      1.41  christos 	if (IP_HDR_ALIGNED_P(mtod(m, void *)) == 0)
    319      1.41  christos 		memcpy(mtod(m, void *), &ip_store, sizeof(ip_store));
    320       1.6  sommerfe 
    321       1.6  sommerfe 	/*
    322      1.24    itojun 	 * Trim the packet in case it's too long..
    323       1.6  sommerfe 	 */
    324       1.6  sommerfe 	if (m->m_pkthdr.len > iplen) {
    325       1.6  sommerfe 		if (m->m_len == m->m_pkthdr.len) {
    326       1.6  sommerfe 			m->m_len = iplen;
    327       1.6  sommerfe 			m->m_pkthdr.len = iplen;
    328       1.6  sommerfe 		} else
    329       1.6  sommerfe 			m_adj(m, iplen - m->m_pkthdr.len);
    330       1.2   thorpej 	}
    331       1.1      matt 
    332       1.1      matt 	/*
    333      1.65       snj 	 * Send the packet on its way.  All we can get back is ENOBUFS
    334       1.1      matt 	 */
    335       1.1      matt 	ipf->ipf_uses++;
    336  1.65.2.3     skrll 
    337  1.65.2.3     skrll #if 0
    338  1.65.2.3     skrll 	/*
    339  1.65.2.3     skrll 	 * Sorting list is too heavy for fast path(packet processing path).
    340  1.65.2.3     skrll 	 * It degrades about 10% performance. So, we does not sort ipflowtable,
    341  1.65.2.3     skrll 	 * and then we use FIFO cache replacement instead fo LRU.
    342  1.65.2.3     skrll 	 */
    343  1.65.2.3     skrll 	/* move to head (LRU) for ipflowlist. ipflowtable ooes not care LRU. */
    344  1.65.2.3     skrll 	TAILQ_REMOVE(&ipflowlist, ipf, ipf_list);
    345  1.65.2.3     skrll 	TAILQ_INSERT_HEAD(&ipflowlist, ipf, ipf_list);
    346  1.65.2.3     skrll #endif
    347  1.65.2.3     skrll 
    348       1.5   thorpej 	PRT_SLOW_ARM(ipf->ipf_timer, IPFLOW_TIMER);
    349      1.16   thorpej 
    350      1.16   thorpej 	if (rt->rt_flags & RTF_GATEWAY)
    351      1.16   thorpej 		dst = rt->rt_gateway;
    352      1.16   thorpej 	else
    353      1.40    dyoung 		dst = rtcache_getdst(&ipf->ipf_ro);
    354      1.16   thorpej 
    355  1.65.2.2     skrll 	if ((error = if_output_lock(rt->rt_ifp, rt->rt_ifp, m, dst, rt)) != 0) {
    356       1.1      matt 		if (error == ENOBUFS)
    357       1.1      matt 			ipf->ipf_dropped++;
    358       1.1      matt 		else
    359       1.1      matt 			ipf->ipf_errors++;
    360       1.1      matt 	}
    361  1.65.2.2     skrll 	ret = 1;
    362  1.65.2.2     skrll  out:
    363  1.65.2.2     skrll 	mutex_exit(&ipflow_lock);
    364  1.65.2.2     skrll 	return ret;
    365       1.1      matt }
    366  1.65.2.3     skrll 
    367       1.1      matt static void
    368      1.29     perry ipflow_addstats(struct ipflow *ipf)
    369       1.1      matt {
    370      1.49    dyoung 	struct rtentry *rt;
    371      1.54   thorpej 	uint64_t *ips;
    372      1.49    dyoung 
    373      1.50    dyoung 	if ((rt = rtcache_validate(&ipf->ipf_ro)) != NULL)
    374      1.49    dyoung 		rt->rt_use += ipf->ipf_uses;
    375  1.65.2.3     skrll 
    376      1.54   thorpej 	ips = IP_STAT_GETREF();
    377      1.54   thorpej 	ips[IP_STAT_CANTFORWARD] += ipf->ipf_errors + ipf->ipf_dropped;
    378      1.54   thorpej 	ips[IP_STAT_TOTAL] += ipf->ipf_uses;
    379      1.54   thorpej 	ips[IP_STAT_FORWARD] += ipf->ipf_uses;
    380      1.54   thorpej 	ips[IP_STAT_FASTFORWARD] += ipf->ipf_uses;
    381      1.54   thorpej 	IP_STAT_PUTREF();
    382       1.1      matt }
    383       1.1      matt 
    384       1.1      matt static void
    385      1.29     perry ipflow_free(struct ipflow *ipf)
    386       1.1      matt {
    387  1.65.2.2     skrll 
    388  1.65.2.2     skrll 	KASSERT(mutex_owned(&ipflow_lock));
    389  1.65.2.2     skrll 
    390       1.1      matt 	/*
    391       1.1      matt 	 * Remove the flow from the hash table (at elevated IPL).
    392       1.1      matt 	 * Once it's off the list, we can deal with it at normal
    393       1.1      matt 	 * network IPL.
    394       1.1      matt 	 */
    395  1.65.2.3     skrll 	IPFLOW_REMOVE(ipf->ipf_hashidx, ipf);
    396  1.65.2.2     skrll 
    397       1.1      matt 	ipflow_addstats(ipf);
    398      1.38     joerg 	rtcache_free(&ipf->ipf_ro);
    399       1.1      matt 	ipflow_inuse--;
    400       1.7   thorpej 	pool_put(&ipflow_pool, ipf);
    401       1.1      matt }
    402       1.1      matt 
    403  1.65.2.2     skrll static struct ipflow *
    404      1.53   thorpej ipflow_reap(bool just_one)
    405       1.1      matt {
    406  1.65.2.3     skrll 	struct ipflow *ipf;
    407  1.65.2.2     skrll 
    408  1.65.2.2     skrll 	KASSERT(mutex_owned(&ipflow_lock));
    409  1.65.2.2     skrll 
    410  1.65.2.3     skrll 	/*
    411  1.65.2.3     skrll 	 * This case must remove one ipflow. Furthermore, this case is used in
    412  1.65.2.3     skrll 	 * fast path(packet processing path). So, simply remove TAILQ_LAST one.
    413  1.65.2.3     skrll 	 */
    414  1.65.2.3     skrll 	if (just_one) {
    415  1.65.2.3     skrll 		ipf = TAILQ_LAST(&ipflowlist, ipflowhead);
    416  1.65.2.3     skrll 		KASSERT(ipf != NULL);
    417  1.65.2.3     skrll 
    418  1.65.2.3     skrll 		IPFLOW_REMOVE(ipf->ipf_hashidx, ipf);
    419  1.65.2.3     skrll 
    420  1.65.2.3     skrll 		ipflow_addstats(ipf);
    421  1.65.2.3     skrll 		rtcache_free(&ipf->ipf_ro);
    422  1.65.2.3     skrll 		return ipf;
    423  1.65.2.3     skrll 	}
    424  1.65.2.3     skrll 
    425  1.65.2.3     skrll 	/*
    426  1.65.2.3     skrll 	 * This case is used in slow path(sysctl).
    427  1.65.2.3     skrll 	 * At first, remove invalid rtcache ipflow, and then remove TAILQ_LAST
    428  1.65.2.3     skrll 	 * ipflow if it is ensured least recently used by comparing last_uses.
    429  1.65.2.3     skrll 	 */
    430  1.65.2.3     skrll 	while (ipflow_inuse > ip_maxflows) {
    431  1.65.2.3     skrll 		struct ipflow *maybe_ipf = TAILQ_LAST(&ipflowlist, ipflowhead);
    432       1.3      matt 
    433  1.65.2.3     skrll 		TAILQ_FOREACH(ipf, &ipflowlist, ipf_list) {
    434       1.5   thorpej 			/*
    435       1.5   thorpej 			 * If this no longer points to a valid route
    436       1.5   thorpej 			 * reclaim it.
    437       1.5   thorpej 			 */
    438      1.50    dyoung 			if (rtcache_validate(&ipf->ipf_ro) == NULL)
    439       1.5   thorpej 				goto done;
    440       1.5   thorpej 			/*
    441       1.5   thorpej 			 * choose the one that's been least recently
    442       1.5   thorpej 			 * used or has had the least uses in the
    443       1.5   thorpej 			 * last 1.5 intervals.
    444       1.5   thorpej 			 */
    445  1.65.2.3     skrll 			if (ipf->ipf_timer < maybe_ipf->ipf_timer
    446  1.65.2.3     skrll 			    || ((ipf->ipf_timer == maybe_ipf->ipf_timer)
    447  1.65.2.3     skrll 				&& (ipf->ipf_last_uses + ipf->ipf_uses
    448  1.65.2.3     skrll 				    < maybe_ipf->ipf_last_uses + maybe_ipf->ipf_uses)))
    449       1.5   thorpej 				maybe_ipf = ipf;
    450       1.1      matt 		}
    451       1.3      matt 		ipf = maybe_ipf;
    452       1.3      matt 	    done:
    453       1.3      matt 		/*
    454       1.3      matt 		 * Remove the entry from the flow table.
    455       1.3      matt 		 */
    456  1.65.2.3     skrll 		IPFLOW_REMOVE(ipf->ipf_hashidx, ipf);
    457  1.65.2.2     skrll 
    458       1.3      matt 		ipflow_addstats(ipf);
    459      1.38     joerg 		rtcache_free(&ipf->ipf_ro);
    460       1.7   thorpej 		pool_put(&ipflow_pool, ipf);
    461       1.3      matt 		ipflow_inuse--;
    462       1.1      matt 	}
    463       1.3      matt 	return NULL;
    464       1.1      matt }
    465       1.1      matt 
    466  1.65.2.3     skrll static unsigned int ipflow_work_enqueued = 0;
    467  1.65.2.3     skrll 
    468  1.65.2.3     skrll static void
    469  1.65.2.3     skrll ipflow_slowtimo_work(struct work *wk, void *arg)
    470       1.1      matt {
    471      1.49    dyoung 	struct rtentry *rt;
    472       1.5   thorpej 	struct ipflow *ipf, *next_ipf;
    473      1.54   thorpej 	uint64_t *ips;
    474       1.2   thorpej 
    475  1.65.2.3     skrll 	/* We can allow enqueuing another work at this point */
    476  1.65.2.3     skrll 	atomic_swap_uint(&ipflow_work_enqueued, 0);
    477  1.65.2.3     skrll 
    478  1.65.2.4     skrll #ifndef NET_MPSAFE
    479      1.55        ad 	mutex_enter(softnet_lock);
    480      1.55        ad 	KERNEL_LOCK(1, NULL);
    481  1.65.2.4     skrll #endif
    482  1.65.2.4     skrll 	mutex_enter(&ipflow_lock);
    483  1.65.2.3     skrll 	for (ipf = TAILQ_FIRST(&ipflowlist); ipf != NULL; ipf = next_ipf) {
    484  1.65.2.3     skrll 		next_ipf = TAILQ_NEXT(ipf, ipf_list);
    485      1.37    dyoung 		if (PRT_SLOW_ISEXPIRED(ipf->ipf_timer) ||
    486      1.50    dyoung 		    (rt = rtcache_validate(&ipf->ipf_ro)) == NULL) {
    487       1.5   thorpej 			ipflow_free(ipf);
    488       1.5   thorpej 		} else {
    489       1.5   thorpej 			ipf->ipf_last_uses = ipf->ipf_uses;
    490      1.49    dyoung 			rt->rt_use += ipf->ipf_uses;
    491      1.54   thorpej 			ips = IP_STAT_GETREF();
    492      1.54   thorpej 			ips[IP_STAT_TOTAL] += ipf->ipf_uses;
    493      1.54   thorpej 			ips[IP_STAT_FORWARD] += ipf->ipf_uses;
    494      1.54   thorpej 			ips[IP_STAT_FASTFORWARD] += ipf->ipf_uses;
    495      1.54   thorpej 			IP_STAT_PUTREF();
    496       1.5   thorpej 			ipf->ipf_uses = 0;
    497       1.1      matt 		}
    498       1.1      matt 	}
    499  1.65.2.2     skrll 	mutex_exit(&ipflow_lock);
    500  1.65.2.4     skrll #ifndef NET_MPSAFE
    501  1.65.2.4     skrll 	KERNEL_UNLOCK_ONE(NULL);
    502      1.55        ad 	mutex_exit(softnet_lock);
    503  1.65.2.4     skrll #endif
    504       1.1      matt }
    505       1.1      matt 
    506       1.1      matt void
    507  1.65.2.3     skrll ipflow_slowtimo(void)
    508  1.65.2.3     skrll {
    509  1.65.2.3     skrll 
    510  1.65.2.3     skrll 	/* Avoid enqueuing another work when one is already enqueued */
    511  1.65.2.3     skrll 	if (atomic_swap_uint(&ipflow_work_enqueued, 1) == 1)
    512  1.65.2.3     skrll 		return;
    513  1.65.2.3     skrll 
    514  1.65.2.3     skrll 	workqueue_enqueue(ipflow_slowtimo_wq, &ipflow_slowtimo_wk, NULL);
    515  1.65.2.3     skrll }
    516  1.65.2.3     skrll 
    517  1.65.2.3     skrll void
    518      1.29     perry ipflow_create(const struct route *ro, struct mbuf *m)
    519       1.1      matt {
    520      1.51    dyoung 	const struct ip *const ip = mtod(m, const struct ip *);
    521       1.1      matt 	struct ipflow *ipf;
    522      1.45  liamjfoy 	size_t hash;
    523  1.65.2.2     skrll 
    524  1.65.2.4     skrll #ifndef NET_MPSAFE
    525  1.65.2.4     skrll 	KERNEL_LOCK(1, NULL);
    526  1.65.2.4     skrll #endif
    527  1.65.2.2     skrll 	mutex_enter(&ipflow_lock);
    528       1.1      matt 
    529       1.1      matt 	/*
    530       1.1      matt 	 * Don't create cache entries for ICMP messages.
    531       1.1      matt 	 */
    532  1.65.2.4     skrll 	if (ip_maxflows == 0 || ip->ip_p == IPPROTO_ICMP)
    533  1.65.2.4     skrll 		goto out;
    534      1.63     pooka 
    535       1.1      matt 	/*
    536      1.65       snj 	 * See if an existing flow struct exists.  If so remove it from its
    537       1.1      matt 	 * list and free the old route.  If not, try to malloc a new one
    538       1.1      matt 	 * (if we aren't at our limit).
    539       1.1      matt 	 */
    540       1.1      matt 	ipf = ipflow_lookup(ip);
    541       1.1      matt 	if (ipf == NULL) {
    542       1.3      matt 		if (ipflow_inuse >= ip_maxflows) {
    543      1.53   thorpej 			ipf = ipflow_reap(true);
    544       1.1      matt 		} else {
    545       1.7   thorpej 			ipf = pool_get(&ipflow_pool, PR_NOWAIT);
    546       1.1      matt 			if (ipf == NULL)
    547      1.63     pooka 				goto out;
    548       1.1      matt 			ipflow_inuse++;
    549       1.1      matt 		}
    550      1.39    dyoung 		memset(ipf, 0, sizeof(*ipf));
    551       1.1      matt 	} else {
    552  1.65.2.3     skrll 		IPFLOW_REMOVE(ipf->ipf_hashidx, ipf);
    553  1.65.2.2     skrll 
    554       1.1      matt 		ipflow_addstats(ipf);
    555      1.38     joerg 		rtcache_free(&ipf->ipf_ro);
    556       1.1      matt 		ipf->ipf_uses = ipf->ipf_last_uses = 0;
    557       1.1      matt 		ipf->ipf_errors = ipf->ipf_dropped = 0;
    558       1.1      matt 	}
    559       1.1      matt 
    560       1.1      matt 	/*
    561       1.1      matt 	 * Fill in the updated information.
    562       1.1      matt 	 */
    563      1.46    dyoung 	rtcache_copy(&ipf->ipf_ro, ro);
    564       1.1      matt 	ipf->ipf_dst = ip->ip_dst;
    565       1.1      matt 	ipf->ipf_src = ip->ip_src;
    566       1.1      matt 	ipf->ipf_tos = ip->ip_tos;
    567       1.5   thorpej 	PRT_SLOW_ARM(ipf->ipf_timer, IPFLOW_TIMER);
    568      1.60  liamjfoy 
    569       1.1      matt 	/*
    570       1.1      matt 	 * Insert into the approriate bucket of the flow table.
    571       1.1      matt 	 */
    572      1.45  liamjfoy 	hash = ipflow_hash(ip);
    573  1.65.2.3     skrll 	IPFLOW_INSERT(hash, ipf);
    574      1.63     pooka 
    575      1.63     pooka  out:
    576  1.65.2.2     skrll 	mutex_exit(&ipflow_lock);
    577  1.65.2.4     skrll #ifndef NET_MPSAFE
    578  1.65.2.4     skrll 	KERNEL_UNLOCK_ONE(NULL);
    579  1.65.2.4     skrll #endif
    580      1.27       scw }
    581      1.27       scw 
    582      1.43  liamjfoy int
    583      1.43  liamjfoy ipflow_invalidate_all(int new_size)
    584      1.27       scw {
    585      1.27       scw 	struct ipflow *ipf, *next_ipf;
    586  1.65.2.2     skrll 	int error;
    587      1.27       scw 
    588      1.43  liamjfoy 	error = 0;
    589  1.65.2.2     skrll 
    590  1.65.2.2     skrll 	mutex_enter(&ipflow_lock);
    591  1.65.2.2     skrll 
    592  1.65.2.3     skrll 	for (ipf = TAILQ_FIRST(&ipflowlist); ipf != NULL; ipf = next_ipf) {
    593  1.65.2.3     skrll 		next_ipf = TAILQ_NEXT(ipf, ipf_list);
    594      1.27       scw 		ipflow_free(ipf);
    595      1.27       scw 	}
    596      1.43  liamjfoy 
    597      1.43  liamjfoy 	if (new_size)
    598      1.64     rmind 		error = ipflow_reinit(new_size);
    599  1.65.2.2     skrll 
    600  1.65.2.2     skrll 	mutex_exit(&ipflow_lock);
    601      1.43  liamjfoy 
    602      1.43  liamjfoy 	return error;
    603       1.1      matt }
    604      1.64     rmind 
    605      1.64     rmind /*
    606      1.64     rmind  * sysctl helper routine for net.inet.ip.maxflows.
    607      1.64     rmind  */
    608      1.64     rmind static int
    609      1.64     rmind sysctl_net_inet_ip_maxflows(SYSCTLFN_ARGS)
    610      1.64     rmind {
    611      1.64     rmind 	int error;
    612      1.64     rmind 
    613      1.64     rmind 	error = sysctl_lookup(SYSCTLFN_CALL(rnode));
    614      1.64     rmind 	if (error || newp == NULL)
    615      1.64     rmind 		return (error);
    616      1.64     rmind 
    617  1.65.2.4     skrll #ifndef NET_MPSAFE
    618      1.64     rmind 	mutex_enter(softnet_lock);
    619      1.64     rmind 	KERNEL_LOCK(1, NULL);
    620  1.65.2.4     skrll #endif
    621  1.65.2.4     skrll 	mutex_enter(&ipflow_lock);
    622      1.64     rmind 
    623      1.64     rmind 	ipflow_reap(false);
    624      1.64     rmind 
    625  1.65.2.2     skrll 	mutex_exit(&ipflow_lock);
    626  1.65.2.4     skrll #ifndef NET_MPSAFE
    627  1.65.2.4     skrll 	KERNEL_UNLOCK_ONE(NULL);
    628      1.64     rmind 	mutex_exit(softnet_lock);
    629  1.65.2.4     skrll #endif
    630      1.64     rmind 
    631      1.64     rmind 	return (0);
    632      1.64     rmind }
    633      1.64     rmind 
    634      1.64     rmind static int
    635      1.64     rmind sysctl_net_inet_ip_hashsize(SYSCTLFN_ARGS)
    636      1.64     rmind {
    637      1.64     rmind 	int error, tmp;
    638      1.64     rmind 	struct sysctlnode node;
    639      1.64     rmind 
    640      1.64     rmind 	node = *rnode;
    641      1.64     rmind 	tmp = ip_hashsize;
    642      1.64     rmind 	node.sysctl_data = &tmp;
    643      1.64     rmind 	error = sysctl_lookup(SYSCTLFN_CALL(&node));
    644      1.64     rmind 	if (error || newp == NULL)
    645      1.64     rmind 		return (error);
    646      1.64     rmind 
    647      1.64     rmind 	if ((tmp & (tmp - 1)) == 0 && tmp != 0) {
    648      1.64     rmind 		/*
    649      1.64     rmind 		 * Can only fail due to malloc()
    650      1.64     rmind 		 */
    651  1.65.2.4     skrll #ifndef NET_MPSAFE
    652      1.64     rmind 		mutex_enter(softnet_lock);
    653      1.64     rmind 		KERNEL_LOCK(1, NULL);
    654  1.65.2.4     skrll #endif
    655      1.64     rmind 		error = ipflow_invalidate_all(tmp);
    656  1.65.2.4     skrll #ifndef NET_MPSAFE
    657      1.64     rmind 		KERNEL_UNLOCK_ONE(NULL);
    658      1.64     rmind 		mutex_exit(softnet_lock);
    659  1.65.2.4     skrll #endif
    660      1.64     rmind 	} else {
    661      1.64     rmind 		/*
    662      1.64     rmind 		 * EINVAL if not a power of 2
    663      1.64     rmind 	         */
    664      1.64     rmind 		error = EINVAL;
    665      1.64     rmind 	}
    666      1.64     rmind 
    667      1.64     rmind 	return error;
    668      1.64     rmind }
    669      1.64     rmind 
    670      1.64     rmind static void
    671      1.64     rmind ipflow_sysctl_init(struct sysctllog **clog)
    672      1.64     rmind {
    673      1.64     rmind 	sysctl_createv(clog, 0, NULL, NULL,
    674      1.64     rmind 		       CTLFLAG_PERMANENT,
    675      1.64     rmind 		       CTLTYPE_NODE, "inet",
    676      1.64     rmind 		       SYSCTL_DESCR("PF_INET related settings"),
    677      1.64     rmind 		       NULL, 0, NULL, 0,
    678      1.64     rmind 		       CTL_NET, PF_INET, CTL_EOL);
    679      1.64     rmind 	sysctl_createv(clog, 0, NULL, NULL,
    680      1.64     rmind 		       CTLFLAG_PERMANENT,
    681      1.64     rmind 		       CTLTYPE_NODE, "ip",
    682      1.64     rmind 		       SYSCTL_DESCR("IPv4 related settings"),
    683      1.64     rmind 		       NULL, 0, NULL, 0,
    684      1.64     rmind 		       CTL_NET, PF_INET, IPPROTO_IP, CTL_EOL);
    685      1.64     rmind 
    686      1.64     rmind 	sysctl_createv(clog, 0, NULL, NULL,
    687      1.64     rmind 		       CTLFLAG_PERMANENT|CTLFLAG_READWRITE,
    688      1.64     rmind 		       CTLTYPE_INT, "maxflows",
    689      1.64     rmind 		       SYSCTL_DESCR("Number of flows for fast forwarding"),
    690      1.64     rmind 		       sysctl_net_inet_ip_maxflows, 0, &ip_maxflows, 0,
    691      1.64     rmind 		       CTL_NET, PF_INET, IPPROTO_IP,
    692      1.64     rmind 		       IPCTL_MAXFLOWS, CTL_EOL);
    693      1.64     rmind 	sysctl_createv(clog, 0, NULL, NULL,
    694      1.64     rmind 			CTLFLAG_PERMANENT|CTLFLAG_READWRITE,
    695      1.64     rmind 			CTLTYPE_INT, "hashsize",
    696      1.64     rmind 			SYSCTL_DESCR("Size of hash table for fast forwarding (IPv4)"),
    697      1.64     rmind 			sysctl_net_inet_ip_hashsize, 0, &ip_hashsize, 0,
    698      1.64     rmind 			CTL_NET, PF_INET, IPPROTO_IP,
    699      1.64     rmind 			CTL_CREATE, CTL_EOL);
    700      1.64     rmind }
    701