Home | History | Annotate | Line # | Download | only in netinet
ip_flow.c revision 1.64.2.1
      1  1.64.2.1       snj /*	$NetBSD: ip_flow.c,v 1.64.2.1 2017/05/12 05:44:10 snj Exp $	*/
      2       1.1      matt 
      3       1.1      matt /*-
      4       1.1      matt  * Copyright (c) 1998 The NetBSD Foundation, Inc.
      5       1.1      matt  * All rights reserved.
      6       1.1      matt  *
      7       1.1      matt  * This code is derived from software contributed to The NetBSD Foundation
      8       1.1      matt  * by the 3am Software Foundry ("3am").  It was developed by Matt Thomas.
      9       1.1      matt  *
     10       1.1      matt  * Redistribution and use in source and binary forms, with or without
     11       1.1      matt  * modification, are permitted provided that the following conditions
     12       1.1      matt  * are met:
     13       1.1      matt  * 1. Redistributions of source code must retain the above copyright
     14       1.1      matt  *    notice, this list of conditions and the following disclaimer.
     15       1.1      matt  * 2. Redistributions in binary form must reproduce the above copyright
     16       1.1      matt  *    notice, this list of conditions and the following disclaimer in the
     17       1.1      matt  *    documentation and/or other materials provided with the distribution.
     18       1.1      matt  *
     19       1.1      matt  * THIS SOFTWARE IS PROVIDED BY THE NETBSD FOUNDATION, INC. AND CONTRIBUTORS
     20       1.1      matt  * ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED
     21       1.1      matt  * TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
     22       1.1      matt  * PURPOSE ARE DISCLAIMED.  IN NO EVENT SHALL THE FOUNDATION OR CONTRIBUTORS
     23       1.1      matt  * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
     24       1.1      matt  * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
     25       1.1      matt  * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
     26       1.1      matt  * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
     27       1.1      matt  * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
     28       1.1      matt  * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
     29       1.1      matt  * POSSIBILITY OF SUCH DAMAGE.
     30       1.1      matt  */
     31      1.22     lukem 
     32      1.22     lukem #include <sys/cdefs.h>
     33  1.64.2.1       snj __KERNEL_RCSID(0, "$NetBSD: ip_flow.c,v 1.64.2.1 2017/05/12 05:44:10 snj Exp $");
     34       1.1      matt 
     35       1.1      matt #include <sys/param.h>
     36       1.1      matt #include <sys/systm.h>
     37       1.1      matt #include <sys/malloc.h>
     38       1.1      matt #include <sys/mbuf.h>
     39       1.1      matt #include <sys/domain.h>
     40       1.1      matt #include <sys/protosw.h>
     41       1.1      matt #include <sys/socket.h>
     42       1.1      matt #include <sys/socketvar.h>
     43       1.1      matt #include <sys/errno.h>
     44       1.1      matt #include <sys/time.h>
     45       1.1      matt #include <sys/kernel.h>
     46       1.7   thorpej #include <sys/pool.h>
     47       1.1      matt #include <sys/sysctl.h>
     48  1.64.2.1       snj #include <sys/workqueue.h>
     49       1.1      matt 
     50       1.1      matt #include <net/if.h>
     51       1.1      matt #include <net/if_dl.h>
     52       1.1      matt #include <net/route.h>
     53       1.1      matt #include <net/pfil.h>
     54       1.1      matt 
     55       1.1      matt #include <netinet/in.h>
     56       1.1      matt #include <netinet/in_systm.h>
     57       1.1      matt #include <netinet/ip.h>
     58       1.1      matt #include <netinet/in_pcb.h>
     59       1.1      matt #include <netinet/in_var.h>
     60       1.1      matt #include <netinet/ip_var.h>
     61      1.54   thorpej #include <netinet/ip_private.h>
     62       1.1      matt 
     63      1.44  liamjfoy /*
     64      1.44  liamjfoy  * Similar code is very well commented in netinet6/ip6_flow.c
     65      1.44  liamjfoy  */
     66      1.44  liamjfoy 
     67      1.53   thorpej #define	IPFLOW_HASHBITS		6	/* should not be a multiple of 8 */
     68      1.53   thorpej 
     69      1.57     pooka static struct pool ipflow_pool;
     70       1.7   thorpej 
     71       1.5   thorpej LIST_HEAD(ipflowhead, ipflow);
     72       1.5   thorpej 
     73       1.1      matt #define	IPFLOW_TIMER		(5 * PR_SLOWHZ)
     74      1.43  liamjfoy #define	IPFLOW_DEFAULT_HASHSIZE	(1 << IPFLOW_HASHBITS)
     75       1.5   thorpej 
     76      1.43  liamjfoy static struct ipflowhead *ipflowtable = NULL;
     77       1.5   thorpej static struct ipflowhead ipflowlist;
     78       1.1      matt static int ipflow_inuse;
     79       1.5   thorpej 
     80       1.5   thorpej #define	IPFLOW_INSERT(bucket, ipf) \
     81       1.5   thorpej do { \
     82       1.5   thorpej 	LIST_INSERT_HEAD((bucket), (ipf), ipf_hash); \
     83       1.5   thorpej 	LIST_INSERT_HEAD(&ipflowlist, (ipf), ipf_list); \
     84      1.26     perry } while (/*CONSTCOND*/ 0)
     85       1.5   thorpej 
     86       1.5   thorpej #define	IPFLOW_REMOVE(ipf) \
     87       1.5   thorpej do { \
     88       1.5   thorpej 	LIST_REMOVE((ipf), ipf_hash); \
     89       1.5   thorpej 	LIST_REMOVE((ipf), ipf_list); \
     90      1.26     perry } while (/*CONSTCOND*/ 0)
     91       1.5   thorpej 
     92       1.3      matt #ifndef IPFLOW_MAX
     93       1.1      matt #define	IPFLOW_MAX		256
     94       1.3      matt #endif
     95      1.64     rmind static int ip_maxflows = IPFLOW_MAX;
     96      1.64     rmind static int ip_hashsize = IPFLOW_DEFAULT_HASHSIZE;
     97      1.64     rmind 
     98      1.64     rmind static void ipflow_sysctl_init(struct sysctllog **);
     99       1.1      matt 
    100  1.64.2.1       snj static void ipflow_slowtimo_work(struct work *, void *);
    101  1.64.2.1       snj static struct workqueue	*ipflow_slowtimo_wq;
    102  1.64.2.1       snj static struct work	ipflow_slowtimo_wk;
    103  1.64.2.1       snj 
    104      1.45  liamjfoy static size_t
    105      1.51    dyoung ipflow_hash(const struct ip *ip)
    106       1.1      matt {
    107      1.45  liamjfoy 	size_t hash = ip->ip_tos;
    108      1.45  liamjfoy 	size_t idx;
    109      1.45  liamjfoy 
    110      1.45  liamjfoy 	for (idx = 0; idx < 32; idx += IPFLOW_HASHBITS) {
    111      1.45  liamjfoy 		hash += (ip->ip_dst.s_addr >> (32 - idx)) +
    112      1.45  liamjfoy 		    (ip->ip_src.s_addr >> idx);
    113      1.45  liamjfoy 	}
    114      1.45  liamjfoy 
    115      1.43  liamjfoy 	return hash & (ip_hashsize-1);
    116       1.1      matt }
    117       1.1      matt 
    118       1.1      matt static struct ipflow *
    119      1.51    dyoung ipflow_lookup(const struct ip *ip)
    120       1.1      matt {
    121      1.45  liamjfoy 	size_t hash;
    122       1.1      matt 	struct ipflow *ipf;
    123       1.1      matt 
    124      1.45  liamjfoy 	hash = ipflow_hash(ip);
    125       1.1      matt 
    126      1.30  christos 	LIST_FOREACH(ipf, &ipflowtable[hash], ipf_hash) {
    127       1.1      matt 		if (ip->ip_dst.s_addr == ipf->ipf_dst.s_addr
    128       1.1      matt 		    && ip->ip_src.s_addr == ipf->ipf_src.s_addr
    129       1.1      matt 		    && ip->ip_tos == ipf->ipf_tos)
    130       1.1      matt 			break;
    131       1.1      matt 	}
    132       1.1      matt 	return ipf;
    133       1.1      matt }
    134       1.1      matt 
    135      1.57     pooka void
    136      1.58    cegger ipflow_poolinit(void)
    137      1.57     pooka {
    138  1.64.2.1       snj 	int error;
    139  1.64.2.1       snj 
    140  1.64.2.1       snj 	error = workqueue_create(&ipflow_slowtimo_wq, "ipflow_slowtimo",
    141  1.64.2.1       snj 	    ipflow_slowtimo_work, NULL, PRI_SOFTNET, IPL_NET, WQ_MPSAFE);
    142  1.64.2.1       snj 	if (error != 0)
    143  1.64.2.1       snj 		panic("%s: workqueue_create failed (%d)\n", __func__, error);
    144      1.57     pooka 
    145      1.57     pooka 	pool_init(&ipflow_pool, sizeof(struct ipflow), 0, 0, 0, "ipflowpl",
    146      1.57     pooka 	    NULL, IPL_NET);
    147      1.57     pooka }
    148      1.57     pooka 
    149      1.64     rmind static int
    150      1.64     rmind ipflow_reinit(int table_size)
    151       1.7   thorpej {
    152      1.43  liamjfoy 	struct ipflowhead *new_table;
    153      1.45  liamjfoy 	size_t i;
    154       1.7   thorpej 
    155      1.43  liamjfoy 	new_table = (struct ipflowhead *)malloc(sizeof(struct ipflowhead) *
    156      1.43  liamjfoy 	    table_size, M_RTABLE, M_NOWAIT);
    157      1.43  liamjfoy 
    158      1.43  liamjfoy 	if (new_table == NULL)
    159      1.43  liamjfoy 		return 1;
    160      1.43  liamjfoy 
    161      1.43  liamjfoy 	if (ipflowtable != NULL)
    162      1.43  liamjfoy 		free(ipflowtable, M_RTABLE);
    163      1.43  liamjfoy 
    164      1.43  liamjfoy 	ipflowtable = new_table;
    165      1.43  liamjfoy 	ip_hashsize = table_size;
    166      1.43  liamjfoy 
    167       1.7   thorpej 	LIST_INIT(&ipflowlist);
    168      1.43  liamjfoy 	for (i = 0; i < ip_hashsize; i++)
    169       1.7   thorpej 		LIST_INIT(&ipflowtable[i]);
    170      1.43  liamjfoy 
    171      1.43  liamjfoy 	return 0;
    172       1.7   thorpej }
    173       1.7   thorpej 
    174      1.64     rmind void
    175      1.64     rmind ipflow_init(void)
    176      1.64     rmind {
    177      1.64     rmind 	(void)ipflow_reinit(ip_hashsize);
    178      1.64     rmind 	ipflow_sysctl_init(NULL);
    179      1.64     rmind }
    180      1.64     rmind 
    181       1.1      matt int
    182      1.29     perry ipflow_fastforward(struct mbuf *m)
    183       1.1      matt {
    184      1.51    dyoung 	struct ip *ip;
    185      1.51    dyoung 	struct ip ip_store;
    186       1.1      matt 	struct ipflow *ipf;
    187       1.1      matt 	struct rtentry *rt;
    188      1.40    dyoung 	const struct sockaddr *dst;
    189       1.1      matt 	int error;
    190       1.6  sommerfe 	int iplen;
    191       1.1      matt 
    192       1.1      matt 	/*
    193       1.1      matt 	 * Are we forwarding packets?  Big enough for an IP packet?
    194       1.1      matt 	 */
    195       1.3      matt 	if (!ipforwarding || ipflow_inuse == 0 || m->m_len < sizeof(struct ip))
    196       1.1      matt 		return 0;
    197      1.14  sommerfe 
    198      1.14  sommerfe 	/*
    199      1.19       wiz 	 * Was packet received as a link-level multicast or broadcast?
    200      1.14  sommerfe 	 * If so, don't try to fast forward..
    201      1.14  sommerfe 	 */
    202      1.14  sommerfe 	if ((m->m_flags & (M_BCAST|M_MCAST)) != 0)
    203      1.14  sommerfe 		return 0;
    204      1.24    itojun 
    205       1.1      matt 	/*
    206       1.1      matt 	 * IP header with no option and valid version and length
    207       1.1      matt 	 */
    208      1.51    dyoung 	if (IP_HDR_ALIGNED_P(mtod(m, const void *)))
    209      1.25   thorpej 		ip = mtod(m, struct ip *);
    210      1.25   thorpej 	else {
    211      1.51    dyoung 		memcpy(&ip_store, mtod(m, const void *), sizeof(ip_store));
    212      1.25   thorpej 		ip = &ip_store;
    213      1.25   thorpej 	}
    214       1.6  sommerfe 	iplen = ntohs(ip->ip_len);
    215       1.5   thorpej 	if (ip->ip_v != IPVERSION || ip->ip_hl != (sizeof(struct ip) >> 2) ||
    216      1.13     proff 	    iplen < sizeof(struct ip) || iplen > m->m_pkthdr.len)
    217       1.1      matt 		return 0;
    218       1.1      matt 	/*
    219       1.1      matt 	 * Find a flow.
    220       1.1      matt 	 */
    221       1.1      matt 	if ((ipf = ipflow_lookup(ip)) == NULL)
    222       1.1      matt 		return 0;
    223       1.1      matt 
    224       1.1      matt 	/*
    225      1.18   thorpej 	 * Verify the IP header checksum.
    226       1.2   thorpej 	 */
    227      1.18   thorpej 	switch (m->m_pkthdr.csum_flags &
    228      1.20   thorpej 		((m->m_pkthdr.rcvif->if_csum_flags_rx & M_CSUM_IPv4) |
    229      1.18   thorpej 		 M_CSUM_IPv4_BAD)) {
    230      1.18   thorpej 	case M_CSUM_IPv4|M_CSUM_IPv4_BAD:
    231      1.18   thorpej 		return (0);
    232      1.18   thorpej 
    233      1.18   thorpej 	case M_CSUM_IPv4:
    234      1.18   thorpej 		/* Checksum was okay. */
    235      1.18   thorpej 		break;
    236      1.18   thorpej 
    237      1.18   thorpej 	default:
    238      1.18   thorpej 		/* Must compute it ourselves. */
    239      1.18   thorpej 		if (in_cksum(m, sizeof(struct ip)) != 0)
    240      1.18   thorpej 			return (0);
    241      1.18   thorpej 		break;
    242      1.18   thorpej 	}
    243       1.2   thorpej 
    244       1.2   thorpej 	/*
    245       1.1      matt 	 * Route and interface still up?
    246       1.1      matt 	 */
    247      1.50    dyoung 	if ((rt = rtcache_validate(&ipf->ipf_ro)) == NULL ||
    248      1.48    dyoung 	    (rt->rt_ifp->if_flags & IFF_UP) == 0)
    249       1.1      matt 		return 0;
    250       1.1      matt 
    251       1.1      matt 	/*
    252       1.1      matt 	 * Packet size OK?  TTL?
    253       1.1      matt 	 */
    254       1.1      matt 	if (m->m_pkthdr.len > rt->rt_ifp->if_mtu || ip->ip_ttl <= IPTTLDEC)
    255       1.1      matt 		return 0;
    256       1.1      matt 
    257       1.1      matt 	/*
    258      1.18   thorpej 	 * Clear any in-bound checksum flags for this packet.
    259      1.18   thorpej 	 */
    260      1.18   thorpej 	m->m_pkthdr.csum_flags = 0;
    261      1.18   thorpej 
    262      1.18   thorpej 	/*
    263       1.1      matt 	 * Everything checks out and so we can forward this packet.
    264       1.1      matt 	 * Modify the TTL and incrementally change the checksum.
    265      1.24    itojun 	 *
    266       1.9   mycroft 	 * This method of adding the checksum works on either endian CPU.
    267       1.9   mycroft 	 * If htons() is inlined, all the arithmetic is folded; otherwise
    268      1.32     perry 	 * the htons()s are combined by CSE due to the const attribute.
    269      1.18   thorpej 	 *
    270      1.18   thorpej 	 * Don't bother using HW checksumming here -- the incremental
    271      1.18   thorpej 	 * update is pretty fast.
    272       1.1      matt 	 */
    273       1.1      matt 	ip->ip_ttl -= IPTTLDEC;
    274      1.12     itohy 	if (ip->ip_sum >= (u_int16_t) ~htons(IPTTLDEC << 8))
    275      1.11   mycroft 		ip->ip_sum -= ~htons(IPTTLDEC << 8);
    276       1.8   mycroft 	else
    277       1.2   thorpej 		ip->ip_sum += htons(IPTTLDEC << 8);
    278      1.25   thorpej 
    279      1.25   thorpej 	/*
    280      1.25   thorpej 	 * Done modifying the header; copy it back, if necessary.
    281      1.51    dyoung 	 *
    282      1.51    dyoung 	 * XXX Use m_copyback_cow(9) here? --dyoung
    283      1.25   thorpej 	 */
    284      1.41  christos 	if (IP_HDR_ALIGNED_P(mtod(m, void *)) == 0)
    285      1.41  christos 		memcpy(mtod(m, void *), &ip_store, sizeof(ip_store));
    286       1.6  sommerfe 
    287       1.6  sommerfe 	/*
    288      1.24    itojun 	 * Trim the packet in case it's too long..
    289       1.6  sommerfe 	 */
    290       1.6  sommerfe 	if (m->m_pkthdr.len > iplen) {
    291       1.6  sommerfe 		if (m->m_len == m->m_pkthdr.len) {
    292       1.6  sommerfe 			m->m_len = iplen;
    293       1.6  sommerfe 			m->m_pkthdr.len = iplen;
    294       1.6  sommerfe 		} else
    295       1.6  sommerfe 			m_adj(m, iplen - m->m_pkthdr.len);
    296       1.2   thorpej 	}
    297       1.1      matt 
    298       1.1      matt 	/*
    299       1.1      matt 	 * Send the packet on it's way.  All we can get back is ENOBUFS
    300       1.1      matt 	 */
    301       1.1      matt 	ipf->ipf_uses++;
    302       1.5   thorpej 	PRT_SLOW_ARM(ipf->ipf_timer, IPFLOW_TIMER);
    303      1.16   thorpej 
    304      1.16   thorpej 	if (rt->rt_flags & RTF_GATEWAY)
    305      1.16   thorpej 		dst = rt->rt_gateway;
    306      1.16   thorpej 	else
    307      1.40    dyoung 		dst = rtcache_getdst(&ipf->ipf_ro);
    308      1.16   thorpej 
    309      1.59       tls 	KERNEL_LOCK(1, NULL);
    310      1.16   thorpej 	if ((error = (*rt->rt_ifp->if_output)(rt->rt_ifp, m, dst, rt)) != 0) {
    311       1.1      matt 		if (error == ENOBUFS)
    312       1.1      matt 			ipf->ipf_dropped++;
    313       1.1      matt 		else
    314       1.1      matt 			ipf->ipf_errors++;
    315       1.1      matt 	}
    316      1.59       tls 	KERNEL_UNLOCK_ONE(NULL);
    317       1.1      matt 	return 1;
    318       1.1      matt }
    319       1.1      matt 
    320       1.1      matt static void
    322       1.1      matt ipflow_addstats(struct ipflow *ipf)
    323      1.49    dyoung {
    324      1.54   thorpej 	struct rtentry *rt;
    325      1.49    dyoung 	uint64_t *ips;
    326      1.50    dyoung 
    327      1.49    dyoung 	if ((rt = rtcache_validate(&ipf->ipf_ro)) != NULL)
    328      1.54   thorpej 		rt->rt_use += ipf->ipf_uses;
    329      1.54   thorpej 
    330      1.54   thorpej 	ips = IP_STAT_GETREF();
    331      1.54   thorpej 	ips[IP_STAT_CANTFORWARD] += ipf->ipf_errors + ipf->ipf_dropped;
    332      1.54   thorpej 	ips[IP_STAT_TOTAL] += ipf->ipf_uses;
    333      1.54   thorpej 	ips[IP_STAT_FORWARD] += ipf->ipf_uses;
    334      1.54   thorpej 	ips[IP_STAT_FASTFORWARD] += ipf->ipf_uses;
    335       1.1      matt 	IP_STAT_PUTREF();
    336       1.1      matt }
    337       1.1      matt 
    338      1.29     perry static void
    339       1.1      matt ipflow_free(struct ipflow *ipf)
    340       1.1      matt {
    341       1.1      matt 	int s;
    342       1.1      matt 	/*
    343       1.1      matt 	 * Remove the flow from the hash table (at elevated IPL).
    344       1.1      matt 	 * Once it's off the list, we can deal with it at normal
    345       1.1      matt 	 * network IPL.
    346      1.17   thorpej 	 */
    347       1.5   thorpej 	s = splnet();
    348       1.1      matt 	IPFLOW_REMOVE(ipf);
    349       1.1      matt 	splx(s);
    350      1.38     joerg 	ipflow_addstats(ipf);
    351       1.1      matt 	rtcache_free(&ipf->ipf_ro);
    352      1.35       tls 	ipflow_inuse--;
    353       1.7   thorpej 	s = splnet();
    354      1.35       tls 	pool_put(&ipflow_pool, ipf);
    355       1.1      matt 	splx(s);
    356       1.1      matt }
    357      1.62  liamjfoy 
    358      1.53   thorpej struct ipflow *
    359       1.1      matt ipflow_reap(bool just_one)
    360       1.3      matt {
    361       1.3      matt 	while (just_one || ipflow_inuse > ip_maxflows) {
    362       1.3      matt 		struct ipflow *ipf, *maybe_ipf = NULL;
    363       1.3      matt 		int s;
    364       1.5   thorpej 
    365       1.5   thorpej 		ipf = LIST_FIRST(&ipflowlist);
    366       1.5   thorpej 		while (ipf != NULL) {
    367       1.5   thorpej 			/*
    368       1.5   thorpej 			 * If this no longer points to a valid route
    369       1.5   thorpej 			 * reclaim it.
    370      1.50    dyoung 			 */
    371       1.5   thorpej 			if (rtcache_validate(&ipf->ipf_ro) == NULL)
    372       1.5   thorpej 				goto done;
    373       1.5   thorpej 			/*
    374       1.5   thorpej 			 * choose the one that's been least recently
    375       1.5   thorpej 			 * used or has had the least uses in the
    376       1.5   thorpej 			 * last 1.5 intervals.
    377       1.5   thorpej 			 */
    378       1.5   thorpej 			if (maybe_ipf == NULL ||
    379       1.5   thorpej 			    ipf->ipf_timer < maybe_ipf->ipf_timer ||
    380       1.5   thorpej 			    (ipf->ipf_timer == maybe_ipf->ipf_timer &&
    381       1.5   thorpej 			     ipf->ipf_last_uses + ipf->ipf_uses <
    382       1.5   thorpej 			         maybe_ipf->ipf_last_uses +
    383       1.5   thorpej 			         maybe_ipf->ipf_uses))
    384       1.5   thorpej 				maybe_ipf = ipf;
    385       1.1      matt 			ipf = LIST_NEXT(ipf, ipf_list);
    386       1.3      matt 		}
    387       1.3      matt 		ipf = maybe_ipf;
    388       1.3      matt 	    done:
    389       1.3      matt 		/*
    390       1.3      matt 		 * Remove the entry from the flow table.
    391      1.17   thorpej 		 */
    392       1.5   thorpej 		s = splnet();
    393       1.3      matt 		IPFLOW_REMOVE(ipf);
    394       1.3      matt 		splx(s);
    395      1.38     joerg 		ipflow_addstats(ipf);
    396       1.3      matt 		rtcache_free(&ipf->ipf_ro);
    397       1.3      matt 		if (just_one)
    398       1.7   thorpej 			return ipf;
    399       1.3      matt 		pool_put(&ipflow_pool, ipf);
    400       1.1      matt 		ipflow_inuse--;
    401       1.3      matt 	}
    402       1.1      matt 	return NULL;
    403       1.1      matt }
    404  1.64.2.1       snj 
    405  1.64.2.1       snj static bool ipflow_work_enqueued = false;
    406  1.64.2.1       snj 
    407  1.64.2.1       snj static void
    408       1.1      matt ipflow_slowtimo_work(struct work *wk, void *arg)
    409      1.49    dyoung {
    410       1.5   thorpej 	struct rtentry *rt;
    411      1.54   thorpej 	struct ipflow *ipf, *next_ipf;
    412       1.2   thorpej 	uint64_t *ips;
    413      1.55        ad 
    414      1.55        ad 	mutex_enter(softnet_lock);
    415      1.30  christos 	KERNEL_LOCK(1, NULL);
    416       1.5   thorpej 	for (ipf = LIST_FIRST(&ipflowlist); ipf != NULL; ipf = next_ipf) {
    417      1.37    dyoung 		next_ipf = LIST_NEXT(ipf, ipf_list);
    418      1.50    dyoung 		if (PRT_SLOW_ISEXPIRED(ipf->ipf_timer) ||
    419       1.5   thorpej 		    (rt = rtcache_validate(&ipf->ipf_ro)) == NULL) {
    420       1.5   thorpej 			ipflow_free(ipf);
    421       1.5   thorpej 		} else {
    422      1.49    dyoung 			ipf->ipf_last_uses = ipf->ipf_uses;
    423      1.54   thorpej 			rt->rt_use += ipf->ipf_uses;
    424      1.54   thorpej 			ips = IP_STAT_GETREF();
    425      1.54   thorpej 			ips[IP_STAT_TOTAL] += ipf->ipf_uses;
    426      1.54   thorpej 			ips[IP_STAT_FORWARD] += ipf->ipf_uses;
    427      1.54   thorpej 			ips[IP_STAT_FASTFORWARD] += ipf->ipf_uses;
    428       1.5   thorpej 			IP_STAT_PUTREF();
    429       1.1      matt 			ipf->ipf_uses = 0;
    430       1.1      matt 		}
    431  1.64.2.1       snj 	}
    432      1.55        ad 	ipflow_work_enqueued = false;
    433      1.55        ad 	KERNEL_UNLOCK_ONE(NULL);
    434       1.1      matt 	mutex_exit(softnet_lock);
    435       1.1      matt }
    436       1.1      matt 
    437  1.64.2.1       snj void
    438  1.64.2.1       snj ipflow_slowtimo(void)
    439  1.64.2.1       snj {
    440  1.64.2.1       snj 
    441  1.64.2.1       snj 	/* Avoid enqueuing another work when one is already enqueued */
    442  1.64.2.1       snj 	KERNEL_LOCK(1, NULL);
    443  1.64.2.1       snj 	if (ipflow_work_enqueued) {
    444  1.64.2.1       snj 		KERNEL_UNLOCK_ONE(NULL);
    445  1.64.2.1       snj 		return;
    446  1.64.2.1       snj 	}
    447  1.64.2.1       snj 	ipflow_work_enqueued = true;
    448  1.64.2.1       snj 	KERNEL_UNLOCK_ONE(NULL);
    449  1.64.2.1       snj 
    450  1.64.2.1       snj 	workqueue_enqueue(ipflow_slowtimo_wq, &ipflow_slowtimo_wk, NULL);
    451  1.64.2.1       snj }
    452  1.64.2.1       snj 
    453      1.29     perry void
    454       1.1      matt ipflow_create(const struct route *ro, struct mbuf *m)
    455      1.51    dyoung {
    456       1.1      matt 	const struct ip *const ip = mtod(m, const struct ip *);
    457      1.45  liamjfoy 	struct ipflow *ipf;
    458       1.1      matt 	size_t hash;
    459       1.1      matt 	int s;
    460       1.1      matt 
    461       1.1      matt 	/*
    462       1.1      matt 	 * Don't create cache entries for ICMP messages.
    463       1.3      matt 	 */
    464       1.1      matt 	if (ip_maxflows == 0 || ip->ip_p == IPPROTO_ICMP)
    465      1.63     pooka 		return;
    466      1.63     pooka 
    467      1.63     pooka 	KERNEL_LOCK(1, NULL);
    468       1.1      matt 
    469       1.1      matt 	/*
    470       1.1      matt 	 * See if an existing flow struct exists.  If so remove it from it's
    471       1.1      matt 	 * list and free the old route.  If not, try to malloc a new one
    472       1.1      matt 	 * (if we aren't at our limit).
    473       1.1      matt 	 */
    474       1.1      matt 	ipf = ipflow_lookup(ip);
    475       1.3      matt 	if (ipf == NULL) {
    476      1.53   thorpej 		if (ipflow_inuse >= ip_maxflows) {
    477       1.1      matt 			ipf = ipflow_reap(true);
    478      1.36       mrg 		} else {
    479       1.7   thorpej 			s = splnet();
    480      1.35       tls 			ipf = pool_get(&ipflow_pool, PR_NOWAIT);
    481       1.1      matt 			splx(s);
    482      1.63     pooka 			if (ipf == NULL)
    483       1.1      matt 				goto out;
    484       1.1      matt 			ipflow_inuse++;
    485      1.39    dyoung 		}
    486       1.1      matt 		memset(ipf, 0, sizeof(*ipf));
    487      1.17   thorpej 	} else {
    488       1.5   thorpej 		s = splnet();
    489       1.1      matt 		IPFLOW_REMOVE(ipf);
    490       1.1      matt 		splx(s);
    491      1.38     joerg 		ipflow_addstats(ipf);
    492       1.1      matt 		rtcache_free(&ipf->ipf_ro);
    493       1.1      matt 		ipf->ipf_uses = ipf->ipf_last_uses = 0;
    494       1.1      matt 		ipf->ipf_errors = ipf->ipf_dropped = 0;
    495       1.1      matt 	}
    496       1.1      matt 
    497       1.1      matt 	/*
    498       1.1      matt 	 * Fill in the updated information.
    499      1.46    dyoung 	 */
    500       1.1      matt 	rtcache_copy(&ipf->ipf_ro, ro);
    501       1.1      matt 	ipf->ipf_dst = ip->ip_dst;
    502       1.1      matt 	ipf->ipf_src = ip->ip_src;
    503       1.5   thorpej 	ipf->ipf_tos = ip->ip_tos;
    504      1.60  liamjfoy 	PRT_SLOW_ARM(ipf->ipf_timer, IPFLOW_TIMER);
    505       1.1      matt 
    506       1.1      matt 	/*
    507       1.1      matt 	 * Insert into the approriate bucket of the flow table.
    508      1.45  liamjfoy 	 */
    509      1.17   thorpej 	hash = ipflow_hash(ip);
    510       1.5   thorpej 	s = splnet();
    511      1.27       scw 	IPFLOW_INSERT(&ipflowtable[hash], ipf);
    512      1.63     pooka 	splx(s);
    513      1.63     pooka 
    514      1.63     pooka  out:
    515      1.27       scw 	KERNEL_UNLOCK_ONE(NULL);
    516      1.27       scw }
    517      1.43  liamjfoy 
    518      1.43  liamjfoy int
    519      1.27       scw ipflow_invalidate_all(int new_size)
    520      1.27       scw {
    521      1.43  liamjfoy 	struct ipflow *ipf, *next_ipf;
    522      1.27       scw 	int s, error;
    523      1.43  liamjfoy 
    524      1.27       scw 	error = 0;
    525      1.27       scw 	s = splnet();
    526      1.27       scw 	for (ipf = LIST_FIRST(&ipflowlist); ipf != NULL; ipf = next_ipf) {
    527      1.27       scw 		next_ipf = LIST_NEXT(ipf, ipf_list);
    528      1.27       scw 		ipflow_free(ipf);
    529      1.43  liamjfoy 	}
    530      1.43  liamjfoy 
    531      1.64     rmind 	if (new_size)
    532       1.1      matt 		error = ipflow_reinit(new_size);
    533      1.43  liamjfoy 	splx(s);
    534      1.43  liamjfoy 
    535       1.1      matt 	return error;
    536      1.64     rmind }
    537      1.64     rmind 
    538      1.64     rmind #ifdef GATEWAY
    539      1.64     rmind /*
    540      1.64     rmind  * sysctl helper routine for net.inet.ip.maxflows.
    541      1.64     rmind  */
    542      1.64     rmind static int
    543      1.64     rmind sysctl_net_inet_ip_maxflows(SYSCTLFN_ARGS)
    544      1.64     rmind {
    545      1.64     rmind 	int error;
    546      1.64     rmind 
    547      1.64     rmind 	error = sysctl_lookup(SYSCTLFN_CALL(rnode));
    548      1.64     rmind 	if (error || newp == NULL)
    549      1.64     rmind 		return (error);
    550      1.64     rmind 
    551      1.64     rmind 	mutex_enter(softnet_lock);
    552      1.64     rmind 	KERNEL_LOCK(1, NULL);
    553      1.64     rmind 
    554      1.64     rmind 	ipflow_reap(false);
    555      1.64     rmind 
    556      1.64     rmind 	KERNEL_UNLOCK_ONE(NULL);
    557      1.64     rmind 	mutex_exit(softnet_lock);
    558      1.64     rmind 
    559      1.64     rmind 	return (0);
    560      1.64     rmind }
    561      1.64     rmind 
    562      1.64     rmind static int
    563      1.64     rmind sysctl_net_inet_ip_hashsize(SYSCTLFN_ARGS)
    564      1.64     rmind {
    565      1.64     rmind 	int error, tmp;
    566      1.64     rmind 	struct sysctlnode node;
    567      1.64     rmind 
    568      1.64     rmind 	node = *rnode;
    569      1.64     rmind 	tmp = ip_hashsize;
    570      1.64     rmind 	node.sysctl_data = &tmp;
    571      1.64     rmind 	error = sysctl_lookup(SYSCTLFN_CALL(&node));
    572      1.64     rmind 	if (error || newp == NULL)
    573      1.64     rmind 		return (error);
    574      1.64     rmind 
    575      1.64     rmind 	if ((tmp & (tmp - 1)) == 0 && tmp != 0) {
    576      1.64     rmind 		/*
    577      1.64     rmind 		 * Can only fail due to malloc()
    578      1.64     rmind 		 */
    579      1.64     rmind 		mutex_enter(softnet_lock);
    580      1.64     rmind 		KERNEL_LOCK(1, NULL);
    581      1.64     rmind 
    582      1.64     rmind 		error = ipflow_invalidate_all(tmp);
    583      1.64     rmind 
    584      1.64     rmind 		KERNEL_UNLOCK_ONE(NULL);
    585      1.64     rmind 		mutex_exit(softnet_lock);
    586      1.64     rmind 
    587      1.64     rmind 	} else {
    588      1.64     rmind 		/*
    589      1.64     rmind 		 * EINVAL if not a power of 2
    590      1.64     rmind 	         */
    591      1.64     rmind 		error = EINVAL;
    592      1.64     rmind 	}
    593      1.64     rmind 
    594      1.64     rmind 	return error;
    595      1.64     rmind }
    596      1.64     rmind #endif /* GATEWAY */
    597      1.64     rmind 
    598      1.64     rmind static void
    599      1.64     rmind ipflow_sysctl_init(struct sysctllog **clog)
    600      1.64     rmind {
    601      1.64     rmind 	sysctl_createv(clog, 0, NULL, NULL,
    602      1.64     rmind 		       CTLFLAG_PERMANENT,
    603      1.64     rmind 		       CTLTYPE_NODE, "inet",
    604      1.64     rmind 		       SYSCTL_DESCR("PF_INET related settings"),
    605      1.64     rmind 		       NULL, 0, NULL, 0,
    606      1.64     rmind 		       CTL_NET, PF_INET, CTL_EOL);
    607      1.64     rmind 	sysctl_createv(clog, 0, NULL, NULL,
    608      1.64     rmind 		       CTLFLAG_PERMANENT,
    609      1.64     rmind 		       CTLTYPE_NODE, "ip",
    610      1.64     rmind 		       SYSCTL_DESCR("IPv4 related settings"),
    611      1.64     rmind 		       NULL, 0, NULL, 0,
    612      1.64     rmind 		       CTL_NET, PF_INET, IPPROTO_IP, CTL_EOL);
    613      1.64     rmind 
    614      1.64     rmind #ifdef GATEWAY
    615      1.64     rmind 	sysctl_createv(clog, 0, NULL, NULL,
    616      1.64     rmind 		       CTLFLAG_PERMANENT|CTLFLAG_READWRITE,
    617      1.64     rmind 		       CTLTYPE_INT, "maxflows",
    618      1.64     rmind 		       SYSCTL_DESCR("Number of flows for fast forwarding"),
    619      1.64     rmind 		       sysctl_net_inet_ip_maxflows, 0, &ip_maxflows, 0,
    620      1.64     rmind 		       CTL_NET, PF_INET, IPPROTO_IP,
    621      1.64     rmind 		       IPCTL_MAXFLOWS, CTL_EOL);
    622      1.64     rmind 	sysctl_createv(clog, 0, NULL, NULL,
    623      1.64     rmind 			CTLFLAG_PERMANENT|CTLFLAG_READWRITE,
    624      1.64     rmind 			CTLTYPE_INT, "hashsize",
    625      1.64     rmind 			SYSCTL_DESCR("Size of hash table for fast forwarding (IPv4)"),
    626      1.64     rmind 			sysctl_net_inet_ip_hashsize, 0, &ip_hashsize, 0,
    627      1.64     rmind 			CTL_NET, PF_INET, IPPROTO_IP,
    628      1.64     rmind 			CTL_CREATE, CTL_EOL);
    629      1.64     rmind #endif /* GATEWAY */
    630                     }
    631