ip_flow.c revision 1.77 1 1.77 ozaki /* $NetBSD: ip_flow.c,v 1.77 2016/10/18 07:30:31 ozaki-r Exp $ */
2 1.1 matt
3 1.1 matt /*-
4 1.1 matt * Copyright (c) 1998 The NetBSD Foundation, Inc.
5 1.1 matt * All rights reserved.
6 1.1 matt *
7 1.1 matt * This code is derived from software contributed to The NetBSD Foundation
8 1.1 matt * by the 3am Software Foundry ("3am"). It was developed by Matt Thomas.
9 1.1 matt *
10 1.1 matt * Redistribution and use in source and binary forms, with or without
11 1.1 matt * modification, are permitted provided that the following conditions
12 1.1 matt * are met:
13 1.1 matt * 1. Redistributions of source code must retain the above copyright
14 1.1 matt * notice, this list of conditions and the following disclaimer.
15 1.1 matt * 2. Redistributions in binary form must reproduce the above copyright
16 1.1 matt * notice, this list of conditions and the following disclaimer in the
17 1.1 matt * documentation and/or other materials provided with the distribution.
18 1.1 matt *
19 1.1 matt * THIS SOFTWARE IS PROVIDED BY THE NETBSD FOUNDATION, INC. AND CONTRIBUTORS
20 1.1 matt * ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED
21 1.1 matt * TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
22 1.1 matt * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE FOUNDATION OR CONTRIBUTORS
23 1.1 matt * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
24 1.1 matt * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
25 1.1 matt * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
26 1.1 matt * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
27 1.1 matt * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
28 1.1 matt * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
29 1.1 matt * POSSIBILITY OF SUCH DAMAGE.
30 1.1 matt */
31 1.22 lukem
32 1.22 lukem #include <sys/cdefs.h>
33 1.77 ozaki __KERNEL_RCSID(0, "$NetBSD: ip_flow.c,v 1.77 2016/10/18 07:30:31 ozaki-r Exp $");
34 1.77 ozaki
35 1.77 ozaki #ifdef _KERNEL_OPT
36 1.77 ozaki #include "opt_net_mpsafe.h"
37 1.77 ozaki #endif
38 1.1 matt
39 1.1 matt #include <sys/param.h>
40 1.1 matt #include <sys/systm.h>
41 1.1 matt #include <sys/malloc.h>
42 1.1 matt #include <sys/mbuf.h>
43 1.1 matt #include <sys/domain.h>
44 1.1 matt #include <sys/protosw.h>
45 1.1 matt #include <sys/socket.h>
46 1.1 matt #include <sys/socketvar.h>
47 1.1 matt #include <sys/errno.h>
48 1.1 matt #include <sys/time.h>
49 1.1 matt #include <sys/kernel.h>
50 1.7 thorpej #include <sys/pool.h>
51 1.1 matt #include <sys/sysctl.h>
52 1.73 ozaki #include <sys/workqueue.h>
53 1.74 ozaki #include <sys/atomic.h>
54 1.1 matt
55 1.1 matt #include <net/if.h>
56 1.1 matt #include <net/if_dl.h>
57 1.1 matt #include <net/route.h>
58 1.1 matt #include <net/pfil.h>
59 1.1 matt
60 1.1 matt #include <netinet/in.h>
61 1.1 matt #include <netinet/in_systm.h>
62 1.1 matt #include <netinet/ip.h>
63 1.1 matt #include <netinet/in_pcb.h>
64 1.1 matt #include <netinet/in_var.h>
65 1.1 matt #include <netinet/ip_var.h>
66 1.54 thorpej #include <netinet/ip_private.h>
67 1.1 matt
68 1.44 liamjfoy /*
69 1.44 liamjfoy * Similar code is very well commented in netinet6/ip6_flow.c
70 1.44 liamjfoy */
71 1.44 liamjfoy
72 1.53 thorpej #define IPFLOW_HASHBITS 6 /* should not be a multiple of 8 */
73 1.53 thorpej
74 1.57 pooka static struct pool ipflow_pool;
75 1.7 thorpej
76 1.76 knakahar TAILQ_HEAD(ipflowhead, ipflow);
77 1.5 thorpej
78 1.1 matt #define IPFLOW_TIMER (5 * PR_SLOWHZ)
79 1.43 liamjfoy #define IPFLOW_DEFAULT_HASHSIZE (1 << IPFLOW_HASHBITS)
80 1.5 thorpej
81 1.70 knakahar /*
82 1.70 knakahar * ip_flow.c internal lock.
83 1.70 knakahar * If we use softnet_lock, it would cause recursive lock.
84 1.70 knakahar *
85 1.70 knakahar * This is a tentative workaround.
86 1.70 knakahar * We should make it scalable somehow in the future.
87 1.70 knakahar */
88 1.70 knakahar static kmutex_t ipflow_lock;
89 1.43 liamjfoy static struct ipflowhead *ipflowtable = NULL;
90 1.5 thorpej static struct ipflowhead ipflowlist;
91 1.1 matt static int ipflow_inuse;
92 1.5 thorpej
93 1.76 knakahar #define IPFLOW_INSERT(hashidx, ipf) \
94 1.5 thorpej do { \
95 1.76 knakahar (ipf)->ipf_hashidx = (hashidx); \
96 1.76 knakahar TAILQ_INSERT_HEAD(&ipflowtable[(hashidx)], (ipf), ipf_hash); \
97 1.76 knakahar TAILQ_INSERT_HEAD(&ipflowlist, (ipf), ipf_list); \
98 1.26 perry } while (/*CONSTCOND*/ 0)
99 1.5 thorpej
100 1.76 knakahar #define IPFLOW_REMOVE(hashidx, ipf) \
101 1.5 thorpej do { \
102 1.76 knakahar TAILQ_REMOVE(&ipflowtable[(hashidx)], (ipf), ipf_hash); \
103 1.76 knakahar TAILQ_REMOVE(&ipflowlist, (ipf), ipf_list); \
104 1.26 perry } while (/*CONSTCOND*/ 0)
105 1.5 thorpej
106 1.3 matt #ifndef IPFLOW_MAX
107 1.1 matt #define IPFLOW_MAX 256
108 1.3 matt #endif
109 1.64 rmind static int ip_maxflows = IPFLOW_MAX;
110 1.64 rmind static int ip_hashsize = IPFLOW_DEFAULT_HASHSIZE;
111 1.64 rmind
112 1.69 knakahar static struct ipflow *ipflow_reap(bool);
113 1.64 rmind static void ipflow_sysctl_init(struct sysctllog **);
114 1.1 matt
115 1.73 ozaki static void ipflow_slowtimo_work(struct work *, void *);
116 1.73 ozaki static struct workqueue *ipflow_slowtimo_wq;
117 1.73 ozaki static struct work ipflow_slowtimo_wk;
118 1.73 ozaki
119 1.45 liamjfoy static size_t
120 1.51 dyoung ipflow_hash(const struct ip *ip)
121 1.1 matt {
122 1.45 liamjfoy size_t hash = ip->ip_tos;
123 1.45 liamjfoy size_t idx;
124 1.45 liamjfoy
125 1.45 liamjfoy for (idx = 0; idx < 32; idx += IPFLOW_HASHBITS) {
126 1.45 liamjfoy hash += (ip->ip_dst.s_addr >> (32 - idx)) +
127 1.45 liamjfoy (ip->ip_src.s_addr >> idx);
128 1.45 liamjfoy }
129 1.45 liamjfoy
130 1.43 liamjfoy return hash & (ip_hashsize-1);
131 1.1 matt }
132 1.1 matt
133 1.1 matt static struct ipflow *
134 1.51 dyoung ipflow_lookup(const struct ip *ip)
135 1.1 matt {
136 1.45 liamjfoy size_t hash;
137 1.1 matt struct ipflow *ipf;
138 1.1 matt
139 1.70 knakahar KASSERT(mutex_owned(&ipflow_lock));
140 1.70 knakahar
141 1.45 liamjfoy hash = ipflow_hash(ip);
142 1.1 matt
143 1.76 knakahar TAILQ_FOREACH(ipf, &ipflowtable[hash], ipf_hash) {
144 1.1 matt if (ip->ip_dst.s_addr == ipf->ipf_dst.s_addr
145 1.1 matt && ip->ip_src.s_addr == ipf->ipf_src.s_addr
146 1.1 matt && ip->ip_tos == ipf->ipf_tos)
147 1.1 matt break;
148 1.1 matt }
149 1.1 matt return ipf;
150 1.1 matt }
151 1.1 matt
152 1.57 pooka void
153 1.58 cegger ipflow_poolinit(void)
154 1.57 pooka {
155 1.57 pooka
156 1.57 pooka pool_init(&ipflow_pool, sizeof(struct ipflow), 0, 0, 0, "ipflowpl",
157 1.57 pooka NULL, IPL_NET);
158 1.57 pooka }
159 1.57 pooka
160 1.64 rmind static int
161 1.64 rmind ipflow_reinit(int table_size)
162 1.7 thorpej {
163 1.43 liamjfoy struct ipflowhead *new_table;
164 1.45 liamjfoy size_t i;
165 1.7 thorpej
166 1.70 knakahar KASSERT(mutex_owned(&ipflow_lock));
167 1.70 knakahar
168 1.43 liamjfoy new_table = (struct ipflowhead *)malloc(sizeof(struct ipflowhead) *
169 1.43 liamjfoy table_size, M_RTABLE, M_NOWAIT);
170 1.43 liamjfoy
171 1.43 liamjfoy if (new_table == NULL)
172 1.43 liamjfoy return 1;
173 1.43 liamjfoy
174 1.43 liamjfoy if (ipflowtable != NULL)
175 1.43 liamjfoy free(ipflowtable, M_RTABLE);
176 1.43 liamjfoy
177 1.43 liamjfoy ipflowtable = new_table;
178 1.43 liamjfoy ip_hashsize = table_size;
179 1.43 liamjfoy
180 1.76 knakahar TAILQ_INIT(&ipflowlist);
181 1.43 liamjfoy for (i = 0; i < ip_hashsize; i++)
182 1.76 knakahar TAILQ_INIT(&ipflowtable[i]);
183 1.43 liamjfoy
184 1.43 liamjfoy return 0;
185 1.7 thorpej }
186 1.7 thorpej
187 1.64 rmind void
188 1.64 rmind ipflow_init(void)
189 1.64 rmind {
190 1.73 ozaki int error;
191 1.73 ozaki
192 1.73 ozaki error = workqueue_create(&ipflow_slowtimo_wq, "ipflow_slowtimo",
193 1.73 ozaki ipflow_slowtimo_work, NULL, PRI_SOFTNET, IPL_SOFTNET, WQ_MPSAFE);
194 1.73 ozaki if (error != 0)
195 1.73 ozaki panic("%s: workqueue_create failed (%d)\n", __func__, error);
196 1.70 knakahar
197 1.70 knakahar mutex_init(&ipflow_lock, MUTEX_DEFAULT, IPL_NONE);
198 1.70 knakahar
199 1.70 knakahar mutex_enter(&ipflow_lock);
200 1.64 rmind (void)ipflow_reinit(ip_hashsize);
201 1.70 knakahar mutex_exit(&ipflow_lock);
202 1.64 rmind ipflow_sysctl_init(NULL);
203 1.64 rmind }
204 1.64 rmind
205 1.1 matt int
206 1.29 perry ipflow_fastforward(struct mbuf *m)
207 1.1 matt {
208 1.51 dyoung struct ip *ip;
209 1.51 dyoung struct ip ip_store;
210 1.1 matt struct ipflow *ipf;
211 1.1 matt struct rtentry *rt;
212 1.40 dyoung const struct sockaddr *dst;
213 1.1 matt int error;
214 1.6 sommerfe int iplen;
215 1.67 ozaki struct ifnet *ifp;
216 1.67 ozaki int s;
217 1.70 knakahar int ret = 0;
218 1.1 matt
219 1.70 knakahar mutex_enter(&ipflow_lock);
220 1.1 matt /*
221 1.1 matt * Are we forwarding packets? Big enough for an IP packet?
222 1.1 matt */
223 1.3 matt if (!ipforwarding || ipflow_inuse == 0 || m->m_len < sizeof(struct ip))
224 1.70 knakahar goto out;
225 1.14 sommerfe
226 1.14 sommerfe /*
227 1.19 wiz * Was packet received as a link-level multicast or broadcast?
228 1.14 sommerfe * If so, don't try to fast forward..
229 1.14 sommerfe */
230 1.14 sommerfe if ((m->m_flags & (M_BCAST|M_MCAST)) != 0)
231 1.70 knakahar goto out;
232 1.24 itojun
233 1.1 matt /*
234 1.1 matt * IP header with no option and valid version and length
235 1.1 matt */
236 1.51 dyoung if (IP_HDR_ALIGNED_P(mtod(m, const void *)))
237 1.25 thorpej ip = mtod(m, struct ip *);
238 1.25 thorpej else {
239 1.51 dyoung memcpy(&ip_store, mtod(m, const void *), sizeof(ip_store));
240 1.25 thorpej ip = &ip_store;
241 1.25 thorpej }
242 1.6 sommerfe iplen = ntohs(ip->ip_len);
243 1.5 thorpej if (ip->ip_v != IPVERSION || ip->ip_hl != (sizeof(struct ip) >> 2) ||
244 1.13 proff iplen < sizeof(struct ip) || iplen > m->m_pkthdr.len)
245 1.70 knakahar goto out;
246 1.1 matt /*
247 1.1 matt * Find a flow.
248 1.1 matt */
249 1.1 matt if ((ipf = ipflow_lookup(ip)) == NULL)
250 1.70 knakahar goto out;
251 1.1 matt
252 1.67 ozaki ifp = m_get_rcvif(m, &s);
253 1.1 matt /*
254 1.18 thorpej * Verify the IP header checksum.
255 1.2 thorpej */
256 1.18 thorpej switch (m->m_pkthdr.csum_flags &
257 1.67 ozaki ((ifp->if_csum_flags_rx & M_CSUM_IPv4) |
258 1.18 thorpej M_CSUM_IPv4_BAD)) {
259 1.18 thorpej case M_CSUM_IPv4|M_CSUM_IPv4_BAD:
260 1.67 ozaki m_put_rcvif(ifp, &s);
261 1.70 knakahar goto out;
262 1.18 thorpej
263 1.18 thorpej case M_CSUM_IPv4:
264 1.18 thorpej /* Checksum was okay. */
265 1.18 thorpej break;
266 1.18 thorpej
267 1.18 thorpej default:
268 1.18 thorpej /* Must compute it ourselves. */
269 1.67 ozaki if (in_cksum(m, sizeof(struct ip)) != 0) {
270 1.67 ozaki m_put_rcvif(ifp, &s);
271 1.70 knakahar goto out;
272 1.67 ozaki }
273 1.18 thorpej break;
274 1.18 thorpej }
275 1.67 ozaki m_put_rcvif(ifp, &s);
276 1.2 thorpej
277 1.2 thorpej /*
278 1.1 matt * Route and interface still up?
279 1.1 matt */
280 1.50 dyoung if ((rt = rtcache_validate(&ipf->ipf_ro)) == NULL ||
281 1.66 roy (rt->rt_ifp->if_flags & IFF_UP) == 0 ||
282 1.66 roy (rt->rt_flags & (RTF_BLACKHOLE | RTF_BROADCAST)) != 0)
283 1.70 knakahar goto out;
284 1.1 matt
285 1.1 matt /*
286 1.1 matt * Packet size OK? TTL?
287 1.1 matt */
288 1.1 matt if (m->m_pkthdr.len > rt->rt_ifp->if_mtu || ip->ip_ttl <= IPTTLDEC)
289 1.70 knakahar goto out;
290 1.1 matt
291 1.1 matt /*
292 1.18 thorpej * Clear any in-bound checksum flags for this packet.
293 1.18 thorpej */
294 1.18 thorpej m->m_pkthdr.csum_flags = 0;
295 1.18 thorpej
296 1.18 thorpej /*
297 1.1 matt * Everything checks out and so we can forward this packet.
298 1.1 matt * Modify the TTL and incrementally change the checksum.
299 1.24 itojun *
300 1.9 mycroft * This method of adding the checksum works on either endian CPU.
301 1.9 mycroft * If htons() is inlined, all the arithmetic is folded; otherwise
302 1.32 perry * the htons()s are combined by CSE due to the const attribute.
303 1.18 thorpej *
304 1.18 thorpej * Don't bother using HW checksumming here -- the incremental
305 1.18 thorpej * update is pretty fast.
306 1.1 matt */
307 1.1 matt ip->ip_ttl -= IPTTLDEC;
308 1.12 itohy if (ip->ip_sum >= (u_int16_t) ~htons(IPTTLDEC << 8))
309 1.11 mycroft ip->ip_sum -= ~htons(IPTTLDEC << 8);
310 1.8 mycroft else
311 1.2 thorpej ip->ip_sum += htons(IPTTLDEC << 8);
312 1.25 thorpej
313 1.25 thorpej /*
314 1.25 thorpej * Done modifying the header; copy it back, if necessary.
315 1.51 dyoung *
316 1.51 dyoung * XXX Use m_copyback_cow(9) here? --dyoung
317 1.25 thorpej */
318 1.41 christos if (IP_HDR_ALIGNED_P(mtod(m, void *)) == 0)
319 1.41 christos memcpy(mtod(m, void *), &ip_store, sizeof(ip_store));
320 1.6 sommerfe
321 1.6 sommerfe /*
322 1.24 itojun * Trim the packet in case it's too long..
323 1.6 sommerfe */
324 1.6 sommerfe if (m->m_pkthdr.len > iplen) {
325 1.6 sommerfe if (m->m_len == m->m_pkthdr.len) {
326 1.6 sommerfe m->m_len = iplen;
327 1.6 sommerfe m->m_pkthdr.len = iplen;
328 1.6 sommerfe } else
329 1.6 sommerfe m_adj(m, iplen - m->m_pkthdr.len);
330 1.2 thorpej }
331 1.1 matt
332 1.1 matt /*
333 1.65 snj * Send the packet on its way. All we can get back is ENOBUFS
334 1.1 matt */
335 1.1 matt ipf->ipf_uses++;
336 1.76 knakahar
337 1.76 knakahar #if 0
338 1.76 knakahar /*
339 1.76 knakahar * Sorting list is too heavy for fast path(packet processing path).
340 1.76 knakahar * It degrades about 10% performance. So, we does not sort ipflowtable,
341 1.76 knakahar * and then we use FIFO cache replacement instead fo LRU.
342 1.76 knakahar */
343 1.76 knakahar /* move to head (LRU) for ipflowlist. ipflowtable ooes not care LRU. */
344 1.76 knakahar TAILQ_REMOVE(&ipflowlist, ipf, ipf_list);
345 1.76 knakahar TAILQ_INSERT_HEAD(&ipflowlist, ipf, ipf_list);
346 1.76 knakahar #endif
347 1.76 knakahar
348 1.5 thorpej PRT_SLOW_ARM(ipf->ipf_timer, IPFLOW_TIMER);
349 1.16 thorpej
350 1.16 thorpej if (rt->rt_flags & RTF_GATEWAY)
351 1.16 thorpej dst = rt->rt_gateway;
352 1.16 thorpej else
353 1.40 dyoung dst = rtcache_getdst(&ipf->ipf_ro);
354 1.16 thorpej
355 1.72 knakahar if ((error = if_output_lock(rt->rt_ifp, rt->rt_ifp, m, dst, rt)) != 0) {
356 1.1 matt if (error == ENOBUFS)
357 1.1 matt ipf->ipf_dropped++;
358 1.1 matt else
359 1.1 matt ipf->ipf_errors++;
360 1.1 matt }
361 1.70 knakahar ret = 1;
362 1.70 knakahar out:
363 1.70 knakahar mutex_exit(&ipflow_lock);
364 1.70 knakahar return ret;
365 1.1 matt }
366 1.1 matt
367 1.1 matt static void
369 1.1 matt ipflow_addstats(struct ipflow *ipf)
370 1.49 dyoung {
371 1.54 thorpej struct rtentry *rt;
372 1.49 dyoung uint64_t *ips;
373 1.50 dyoung
374 1.49 dyoung if ((rt = rtcache_validate(&ipf->ipf_ro)) != NULL)
375 1.54 thorpej rt->rt_use += ipf->ipf_uses;
376 1.54 thorpej
377 1.54 thorpej ips = IP_STAT_GETREF();
378 1.54 thorpej ips[IP_STAT_CANTFORWARD] += ipf->ipf_errors + ipf->ipf_dropped;
379 1.54 thorpej ips[IP_STAT_TOTAL] += ipf->ipf_uses;
380 1.54 thorpej ips[IP_STAT_FORWARD] += ipf->ipf_uses;
381 1.54 thorpej ips[IP_STAT_FASTFORWARD] += ipf->ipf_uses;
382 1.1 matt IP_STAT_PUTREF();
383 1.1 matt }
384 1.1 matt
385 1.29 perry static void
386 1.1 matt ipflow_free(struct ipflow *ipf)
387 1.70 knakahar {
388 1.70 knakahar
389 1.70 knakahar KASSERT(mutex_owned(&ipflow_lock));
390 1.1 matt
391 1.1 matt /*
392 1.1 matt * Remove the flow from the hash table (at elevated IPL).
393 1.1 matt * Once it's off the list, we can deal with it at normal
394 1.1 matt * network IPL.
395 1.76 knakahar */
396 1.71 knakahar IPFLOW_REMOVE(ipf->ipf_hashidx, ipf);
397 1.1 matt
398 1.38 joerg ipflow_addstats(ipf);
399 1.1 matt rtcache_free(&ipf->ipf_ro);
400 1.7 thorpej ipflow_inuse--;
401 1.1 matt pool_put(&ipflow_pool, ipf);
402 1.1 matt }
403 1.69 knakahar
404 1.53 thorpej static struct ipflow *
405 1.1 matt ipflow_reap(bool just_one)
406 1.76 knakahar {
407 1.70 knakahar struct ipflow *ipf;
408 1.70 knakahar
409 1.70 knakahar KASSERT(mutex_owned(&ipflow_lock));
410 1.76 knakahar
411 1.76 knakahar /*
412 1.76 knakahar * This case must remove one ipflow. Furthermore, this case is used in
413 1.76 knakahar * fast path(packet processing path). So, simply remove TAILQ_LAST one.
414 1.76 knakahar */
415 1.76 knakahar if (just_one) {
416 1.76 knakahar ipf = TAILQ_LAST(&ipflowlist, ipflowhead);
417 1.76 knakahar KASSERT(ipf != NULL);
418 1.76 knakahar
419 1.76 knakahar IPFLOW_REMOVE(ipf->ipf_hashidx, ipf);
420 1.76 knakahar
421 1.76 knakahar ipflow_addstats(ipf);
422 1.76 knakahar rtcache_free(&ipf->ipf_ro);
423 1.76 knakahar return ipf;
424 1.76 knakahar }
425 1.76 knakahar
426 1.76 knakahar /*
427 1.76 knakahar * This case is used in slow path(sysctl).
428 1.76 knakahar * At first, remove invalid rtcache ipflow, and then remove TAILQ_LAST
429 1.76 knakahar * ipflow if it is ensured least recently used by comparing last_uses.
430 1.76 knakahar */
431 1.76 knakahar while (ipflow_inuse > ip_maxflows) {
432 1.3 matt struct ipflow *maybe_ipf = TAILQ_LAST(&ipflowlist, ipflowhead);
433 1.76 knakahar
434 1.5 thorpej TAILQ_FOREACH(ipf, &ipflowlist, ipf_list) {
435 1.5 thorpej /*
436 1.5 thorpej * If this no longer points to a valid route
437 1.5 thorpej * reclaim it.
438 1.50 dyoung */
439 1.5 thorpej if (rtcache_validate(&ipf->ipf_ro) == NULL)
440 1.5 thorpej goto done;
441 1.5 thorpej /*
442 1.5 thorpej * choose the one that's been least recently
443 1.5 thorpej * used or has had the least uses in the
444 1.5 thorpej * last 1.5 intervals.
445 1.76 knakahar */
446 1.76 knakahar if (ipf->ipf_timer < maybe_ipf->ipf_timer
447 1.76 knakahar || ((ipf->ipf_timer == maybe_ipf->ipf_timer)
448 1.76 knakahar && (ipf->ipf_last_uses + ipf->ipf_uses
449 1.5 thorpej < maybe_ipf->ipf_last_uses + maybe_ipf->ipf_uses)))
450 1.1 matt maybe_ipf = ipf;
451 1.3 matt }
452 1.3 matt ipf = maybe_ipf;
453 1.3 matt done:
454 1.3 matt /*
455 1.3 matt * Remove the entry from the flow table.
456 1.76 knakahar */
457 1.71 knakahar IPFLOW_REMOVE(ipf->ipf_hashidx, ipf);
458 1.3 matt
459 1.38 joerg ipflow_addstats(ipf);
460 1.7 thorpej rtcache_free(&ipf->ipf_ro);
461 1.3 matt pool_put(&ipflow_pool, ipf);
462 1.1 matt ipflow_inuse--;
463 1.3 matt }
464 1.1 matt return NULL;
465 1.1 matt }
466 1.74 ozaki
467 1.73 ozaki static unsigned int ipflow_work_enqueued = 0;
468 1.73 ozaki
469 1.73 ozaki static void
470 1.1 matt ipflow_slowtimo_work(struct work *wk, void *arg)
471 1.49 dyoung {
472 1.5 thorpej struct rtentry *rt;
473 1.54 thorpej struct ipflow *ipf, *next_ipf;
474 1.2 thorpej uint64_t *ips;
475 1.74 ozaki
476 1.74 ozaki /* We can allow enqueuing another work at this point */
477 1.74 ozaki atomic_swap_uint(&ipflow_work_enqueued, 0);
478 1.77 ozaki
479 1.55 ad #ifndef NET_MPSAFE
480 1.77 ozaki mutex_enter(softnet_lock);
481 1.77 ozaki KERNEL_LOCK(1, NULL);
482 1.70 knakahar #endif
483 1.76 knakahar mutex_enter(&ipflow_lock);
484 1.76 knakahar for (ipf = TAILQ_FIRST(&ipflowlist); ipf != NULL; ipf = next_ipf) {
485 1.37 dyoung next_ipf = TAILQ_NEXT(ipf, ipf_list);
486 1.50 dyoung if (PRT_SLOW_ISEXPIRED(ipf->ipf_timer) ||
487 1.5 thorpej (rt = rtcache_validate(&ipf->ipf_ro)) == NULL) {
488 1.5 thorpej ipflow_free(ipf);
489 1.5 thorpej } else {
490 1.49 dyoung ipf->ipf_last_uses = ipf->ipf_uses;
491 1.54 thorpej rt->rt_use += ipf->ipf_uses;
492 1.54 thorpej ips = IP_STAT_GETREF();
493 1.54 thorpej ips[IP_STAT_TOTAL] += ipf->ipf_uses;
494 1.54 thorpej ips[IP_STAT_FORWARD] += ipf->ipf_uses;
495 1.54 thorpej ips[IP_STAT_FASTFORWARD] += ipf->ipf_uses;
496 1.5 thorpej IP_STAT_PUTREF();
497 1.1 matt ipf->ipf_uses = 0;
498 1.1 matt }
499 1.77 ozaki }
500 1.77 ozaki mutex_exit(&ipflow_lock);
501 1.55 ad #ifndef NET_MPSAFE
502 1.55 ad KERNEL_UNLOCK_ONE(NULL);
503 1.77 ozaki mutex_exit(softnet_lock);
504 1.1 matt #endif
505 1.1 matt }
506 1.1 matt
507 1.73 ozaki void
508 1.73 ozaki ipflow_slowtimo(void)
509 1.73 ozaki {
510 1.73 ozaki
511 1.74 ozaki /* Avoid enqueuing another work when one is already enqueued */
512 1.73 ozaki if (atomic_swap_uint(&ipflow_work_enqueued, 1) == 1)
513 1.73 ozaki return;
514 1.73 ozaki
515 1.73 ozaki workqueue_enqueue(ipflow_slowtimo_wq, &ipflow_slowtimo_wk, NULL);
516 1.73 ozaki }
517 1.73 ozaki
518 1.29 perry void
519 1.1 matt ipflow_create(const struct route *ro, struct mbuf *m)
520 1.51 dyoung {
521 1.1 matt const struct ip *const ip = mtod(m, const struct ip *);
522 1.45 liamjfoy struct ipflow *ipf;
523 1.1 matt size_t hash;
524 1.77 ozaki
525 1.77 ozaki #ifndef NET_MPSAFE
526 1.77 ozaki KERNEL_LOCK(1, NULL);
527 1.70 knakahar #endif
528 1.70 knakahar mutex_enter(&ipflow_lock);
529 1.1 matt
530 1.1 matt /*
531 1.1 matt * Don't create cache entries for ICMP messages.
532 1.77 ozaki */
533 1.77 ozaki if (ip_maxflows == 0 || ip->ip_p == IPPROTO_ICMP)
534 1.63 pooka goto out;
535 1.1 matt
536 1.65 snj /*
537 1.1 matt * See if an existing flow struct exists. If so remove it from its
538 1.1 matt * list and free the old route. If not, try to malloc a new one
539 1.1 matt * (if we aren't at our limit).
540 1.1 matt */
541 1.1 matt ipf = ipflow_lookup(ip);
542 1.3 matt if (ipf == NULL) {
543 1.53 thorpej if (ipflow_inuse >= ip_maxflows) {
544 1.1 matt ipf = ipflow_reap(true);
545 1.7 thorpej } else {
546 1.1 matt ipf = pool_get(&ipflow_pool, PR_NOWAIT);
547 1.63 pooka if (ipf == NULL)
548 1.1 matt goto out;
549 1.1 matt ipflow_inuse++;
550 1.39 dyoung }
551 1.1 matt memset(ipf, 0, sizeof(*ipf));
552 1.76 knakahar } else {
553 1.71 knakahar IPFLOW_REMOVE(ipf->ipf_hashidx, ipf);
554 1.1 matt
555 1.38 joerg ipflow_addstats(ipf);
556 1.1 matt rtcache_free(&ipf->ipf_ro);
557 1.1 matt ipf->ipf_uses = ipf->ipf_last_uses = 0;
558 1.1 matt ipf->ipf_errors = ipf->ipf_dropped = 0;
559 1.1 matt }
560 1.1 matt
561 1.1 matt /*
562 1.1 matt * Fill in the updated information.
563 1.46 dyoung */
564 1.1 matt rtcache_copy(&ipf->ipf_ro, ro);
565 1.1 matt ipf->ipf_dst = ip->ip_dst;
566 1.1 matt ipf->ipf_src = ip->ip_src;
567 1.5 thorpej ipf->ipf_tos = ip->ip_tos;
568 1.60 liamjfoy PRT_SLOW_ARM(ipf->ipf_timer, IPFLOW_TIMER);
569 1.1 matt
570 1.1 matt /*
571 1.1 matt * Insert into the approriate bucket of the flow table.
572 1.45 liamjfoy */
573 1.76 knakahar hash = ipflow_hash(ip);
574 1.63 pooka IPFLOW_INSERT(hash, ipf);
575 1.63 pooka
576 1.77 ozaki out:
577 1.77 ozaki mutex_exit(&ipflow_lock);
578 1.63 pooka #ifndef NET_MPSAFE
579 1.77 ozaki KERNEL_UNLOCK_ONE(NULL);
580 1.27 scw #endif
581 1.27 scw }
582 1.43 liamjfoy
583 1.43 liamjfoy int
584 1.27 scw ipflow_invalidate_all(int new_size)
585 1.27 scw {
586 1.71 knakahar struct ipflow *ipf, *next_ipf;
587 1.27 scw int error;
588 1.43 liamjfoy
589 1.70 knakahar error = 0;
590 1.70 knakahar
591 1.70 knakahar mutex_enter(&ipflow_lock);
592 1.76 knakahar
593 1.76 knakahar for (ipf = TAILQ_FIRST(&ipflowlist); ipf != NULL; ipf = next_ipf) {
594 1.27 scw next_ipf = TAILQ_NEXT(ipf, ipf_list);
595 1.27 scw ipflow_free(ipf);
596 1.43 liamjfoy }
597 1.43 liamjfoy
598 1.64 rmind if (new_size)
599 1.43 liamjfoy error = ipflow_reinit(new_size);
600 1.70 knakahar
601 1.70 knakahar mutex_exit(&ipflow_lock);
602 1.43 liamjfoy
603 1.1 matt return error;
604 1.64 rmind }
605 1.64 rmind
606 1.64 rmind /*
607 1.64 rmind * sysctl helper routine for net.inet.ip.maxflows.
608 1.64 rmind */
609 1.64 rmind static int
610 1.64 rmind sysctl_net_inet_ip_maxflows(SYSCTLFN_ARGS)
611 1.64 rmind {
612 1.64 rmind int error;
613 1.64 rmind
614 1.64 rmind error = sysctl_lookup(SYSCTLFN_CALL(rnode));
615 1.64 rmind if (error || newp == NULL)
616 1.64 rmind return (error);
617 1.77 ozaki
618 1.64 rmind #ifndef NET_MPSAFE
619 1.77 ozaki mutex_enter(softnet_lock);
620 1.77 ozaki KERNEL_LOCK(1, NULL);
621 1.70 knakahar #endif
622 1.64 rmind mutex_enter(&ipflow_lock);
623 1.64 rmind
624 1.64 rmind ipflow_reap(false);
625 1.77 ozaki
626 1.77 ozaki mutex_exit(&ipflow_lock);
627 1.64 rmind #ifndef NET_MPSAFE
628 1.64 rmind KERNEL_UNLOCK_ONE(NULL);
629 1.77 ozaki mutex_exit(softnet_lock);
630 1.64 rmind #endif
631 1.64 rmind
632 1.64 rmind return (0);
633 1.64 rmind }
634 1.64 rmind
635 1.64 rmind static int
636 1.64 rmind sysctl_net_inet_ip_hashsize(SYSCTLFN_ARGS)
637 1.64 rmind {
638 1.64 rmind int error, tmp;
639 1.64 rmind struct sysctlnode node;
640 1.64 rmind
641 1.64 rmind node = *rnode;
642 1.64 rmind tmp = ip_hashsize;
643 1.64 rmind node.sysctl_data = &tmp;
644 1.64 rmind error = sysctl_lookup(SYSCTLFN_CALL(&node));
645 1.64 rmind if (error || newp == NULL)
646 1.64 rmind return (error);
647 1.64 rmind
648 1.64 rmind if ((tmp & (tmp - 1)) == 0 && tmp != 0) {
649 1.64 rmind /*
650 1.64 rmind * Can only fail due to malloc()
651 1.77 ozaki */
652 1.64 rmind #ifndef NET_MPSAFE
653 1.64 rmind mutex_enter(softnet_lock);
654 1.77 ozaki KERNEL_LOCK(1, NULL);
655 1.64 rmind #endif
656 1.77 ozaki error = ipflow_invalidate_all(tmp);
657 1.64 rmind #ifndef NET_MPSAFE
658 1.64 rmind KERNEL_UNLOCK_ONE(NULL);
659 1.77 ozaki mutex_exit(softnet_lock);
660 1.64 rmind #endif
661 1.64 rmind } else {
662 1.64 rmind /*
663 1.64 rmind * EINVAL if not a power of 2
664 1.64 rmind */
665 1.64 rmind error = EINVAL;
666 1.64 rmind }
667 1.64 rmind
668 1.64 rmind return error;
669 1.64 rmind }
670 1.64 rmind
671 1.64 rmind static void
672 1.64 rmind ipflow_sysctl_init(struct sysctllog **clog)
673 1.64 rmind {
674 1.64 rmind sysctl_createv(clog, 0, NULL, NULL,
675 1.64 rmind CTLFLAG_PERMANENT,
676 1.64 rmind CTLTYPE_NODE, "inet",
677 1.64 rmind SYSCTL_DESCR("PF_INET related settings"),
678 1.64 rmind NULL, 0, NULL, 0,
679 1.64 rmind CTL_NET, PF_INET, CTL_EOL);
680 1.64 rmind sysctl_createv(clog, 0, NULL, NULL,
681 1.64 rmind CTLFLAG_PERMANENT,
682 1.64 rmind CTLTYPE_NODE, "ip",
683 1.64 rmind SYSCTL_DESCR("IPv4 related settings"),
684 1.64 rmind NULL, 0, NULL, 0,
685 1.64 rmind CTL_NET, PF_INET, IPPROTO_IP, CTL_EOL);
686 1.64 rmind
687 1.64 rmind sysctl_createv(clog, 0, NULL, NULL,
688 1.64 rmind CTLFLAG_PERMANENT|CTLFLAG_READWRITE,
689 1.64 rmind CTLTYPE_INT, "maxflows",
690 1.64 rmind SYSCTL_DESCR("Number of flows for fast forwarding"),
691 1.64 rmind sysctl_net_inet_ip_maxflows, 0, &ip_maxflows, 0,
692 1.64 rmind CTL_NET, PF_INET, IPPROTO_IP,
693 1.64 rmind IPCTL_MAXFLOWS, CTL_EOL);
694 1.64 rmind sysctl_createv(clog, 0, NULL, NULL,
695 1.64 rmind CTLFLAG_PERMANENT|CTLFLAG_READWRITE,
696 1.64 rmind CTLTYPE_INT, "hashsize",
697 1.64 rmind SYSCTL_DESCR("Size of hash table for fast forwarding (IPv4)"),
698 1.64 rmind sysctl_net_inet_ip_hashsize, 0, &ip_hashsize, 0,
699 1.64 rmind CTL_NET, PF_INET, IPPROTO_IP,
700 1.64 rmind CTL_CREATE, CTL_EOL);
701 }
702