ip6_flow.c revision 1.19.6.4 1 /* $NetBSD: ip6_flow.c,v 1.19.6.4 2017/12/03 11:39:04 jdolecek Exp $ */
2
3 /*-
4 * Copyright (c) 2007 The NetBSD Foundation, Inc.
5 * All rights reserved.
6 *
7 * This code is derived from software contributed to The NetBSD Foundation
8 * by the 3am Software Foundry ("3am"). It was developed by Liam J. Foy
9 * <liamjfoy (at) netbsd.org> and Matt Thomas <matt (at) netbsd.org>.
10 *
11 * Redistribution and use in source and binary forms, with or without
12 * modification, are permitted provided that the following conditions
13 * are met:
14 * 1. Redistributions of source code must retain the above copyright
15 * notice, this list of conditions and the following disclaimer.
16 * 2. Redistributions in binary form must reproduce the above copyright
17 * notice, this list of conditions and the following disclaimer in the
18 * documentation and/or other materials provided with the distribution.
19 *
20 * THIS SOFTWARE IS PROVIDED BY THE NETBSD FOUNDATION, INC. AND CONTRIBUTORS
21 * ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED
22 * TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
23 * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE FOUNDATION OR CONTRIBUTORS
24 * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
25 * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
26 * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
27 * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
28 * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
29 * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
30 * POSSIBILITY OF SUCH DAMAGE.
31 *
32 * IPv6 version was developed by Liam J. Foy. Original source existed in IPv4
33 * format developed by Matt Thomas. Thanks to Joerg Sonnenberger, Matt
34 * Thomas and Christos Zoulas.
35 *
36 * Thanks to Liverpool John Moores University, especially Dr. David Llewellyn-Jones
37 * for providing resources (to test) and Professor Madjid Merabti.
38 */
39
40 #include <sys/cdefs.h>
41 __KERNEL_RCSID(0, "$NetBSD: ip6_flow.c,v 1.19.6.4 2017/12/03 11:39:04 jdolecek Exp $");
42
43 #ifdef _KERNEL_OPT
44 #include "opt_net_mpsafe.h"
45 #endif
46
47 #include <sys/param.h>
48 #include <sys/systm.h>
49 #include <sys/malloc.h>
50 #include <sys/mbuf.h>
51 #include <sys/socketvar.h>
52 #include <sys/time.h>
53 #include <sys/kernel.h>
54 #include <sys/pool.h>
55 #include <sys/sysctl.h>
56 #include <sys/workqueue.h>
57 #include <sys/atomic.h>
58
59 #include <net/if.h>
60 #include <net/if_dl.h>
61 #include <net/route.h>
62 #include <net/pfil.h>
63
64 #include <netinet/in.h>
65 #include <netinet6/in6_var.h>
66 #include <netinet/in_systm.h>
67 #include <netinet/ip6.h>
68 #include <netinet6/ip6_var.h>
69 #include <netinet6/ip6_private.h>
70
71 /*
72 * IPv6 Fast Forward caches/hashes flows from one source to destination.
73 *
74 * Upon a successful forward IPv6FF caches and hashes details such as the
75 * route, source and destination. Once another packet is received matching
76 * the source and destination the packet is forwarded straight onto if_output
77 * using the cached details.
78 *
79 * Example:
80 * ether/fddi_input -> ip6flow_fastforward -> if_output
81 */
82
83 static struct pool ip6flow_pool;
84
85 TAILQ_HEAD(ip6flowhead, ip6flow);
86
87 /*
88 * We could use IPv4 defines (IPFLOW_HASHBITS) but we'll
89 * use our own (possibly for future expansion).
90 */
91 #define IP6FLOW_TIMER (5 * PR_SLOWHZ)
92 #define IP6FLOW_DEFAULT_HASHSIZE (1 << IP6FLOW_HASHBITS)
93
94 /*
95 * ip6_flow.c internal lock.
96 * If we use softnet_lock, it would cause recursive lock.
97 *
98 * This is a tentative workaround.
99 * We should make it scalable somehow in the future.
100 */
101 static kmutex_t ip6flow_lock;
102 static struct ip6flowhead *ip6flowtable = NULL;
103 static struct ip6flowhead ip6flowlist;
104 static int ip6flow_inuse;
105
106 static void ip6flow_slowtimo_work(struct work *, void *);
107 static struct workqueue *ip6flow_slowtimo_wq;
108 static struct work ip6flow_slowtimo_wk;
109
110 static int sysctl_net_inet6_ip6_hashsize(SYSCTLFN_PROTO);
111 static int sysctl_net_inet6_ip6_maxflows(SYSCTLFN_PROTO);
112 static void ip6flow_sysctl_init(struct sysctllog **);
113
114 /*
115 * Insert an ip6flow into the list.
116 */
117 #define IP6FLOW_INSERT(hashidx, ip6f) \
118 do { \
119 (ip6f)->ip6f_hashidx = (hashidx); \
120 TAILQ_INSERT_HEAD(&ip6flowtable[(hashidx)], (ip6f), ip6f_hash); \
121 TAILQ_INSERT_HEAD(&ip6flowlist, (ip6f), ip6f_list); \
122 } while (/*CONSTCOND*/ 0)
123
124 /*
125 * Remove an ip6flow from the list.
126 */
127 #define IP6FLOW_REMOVE(hashidx, ip6f) \
128 do { \
129 TAILQ_REMOVE(&ip6flowtable[(hashidx)], (ip6f), ip6f_hash); \
130 TAILQ_REMOVE(&ip6flowlist, (ip6f), ip6f_list); \
131 } while (/*CONSTCOND*/ 0)
132
133 #ifndef IP6FLOW_DEFAULT
134 #define IP6FLOW_DEFAULT 256
135 #endif
136
137 int ip6_maxflows = IP6FLOW_DEFAULT;
138 int ip6_hashsize = IP6FLOW_DEFAULT_HASHSIZE;
139
140 /*
141 * Calculate hash table position.
142 */
143 static size_t
144 ip6flow_hash(const struct ip6_hdr *ip6)
145 {
146 size_t hash;
147 uint32_t dst_sum, src_sum;
148 size_t idx;
149
150 src_sum = ip6->ip6_src.s6_addr32[0] + ip6->ip6_src.s6_addr32[1]
151 + ip6->ip6_src.s6_addr32[2] + ip6->ip6_src.s6_addr32[3];
152 dst_sum = ip6->ip6_dst.s6_addr32[0] + ip6->ip6_dst.s6_addr32[1]
153 + ip6->ip6_dst.s6_addr32[2] + ip6->ip6_dst.s6_addr32[3];
154
155 hash = ip6->ip6_flow;
156
157 for (idx = 0; idx < 32; idx += IP6FLOW_HASHBITS)
158 hash += (dst_sum >> (32 - idx)) + (src_sum >> idx);
159
160 return hash & (ip6_hashsize-1);
161 }
162
163 /*
164 * Check to see if a flow already exists - if so return it.
165 */
166 static struct ip6flow *
167 ip6flow_lookup(const struct ip6_hdr *ip6)
168 {
169 size_t hash;
170 struct ip6flow *ip6f;
171
172 KASSERT(mutex_owned(&ip6flow_lock));
173
174 hash = ip6flow_hash(ip6);
175
176 TAILQ_FOREACH(ip6f, &ip6flowtable[hash], ip6f_hash) {
177 if (IN6_ARE_ADDR_EQUAL(&ip6->ip6_dst, &ip6f->ip6f_dst)
178 && IN6_ARE_ADDR_EQUAL(&ip6->ip6_src, &ip6f->ip6f_src)
179 && ip6f->ip6f_flow == ip6->ip6_flow) {
180 /* A cached flow has been found. */
181 return ip6f;
182 }
183 }
184
185 return NULL;
186 }
187
188 void
189 ip6flow_poolinit(void)
190 {
191
192 pool_init(&ip6flow_pool, sizeof(struct ip6flow), 0, 0, 0, "ip6flowpl",
193 NULL, IPL_NET);
194 }
195
196 /*
197 * Allocate memory and initialise lists. This function is called
198 * from ip6_init and called there after to resize the hash table.
199 * If a newly sized table cannot be malloc'ed we just continue
200 * to use the old one.
201 */
202 static int
203 ip6flow_init_locked(int table_size)
204 {
205 struct ip6flowhead *new_table;
206 size_t i;
207
208 KASSERT(mutex_owned(&ip6flow_lock));
209
210 new_table = (struct ip6flowhead *)malloc(sizeof(struct ip6flowhead) *
211 table_size, M_RTABLE, M_NOWAIT);
212
213 if (new_table == NULL)
214 return 1;
215
216 if (ip6flowtable != NULL)
217 free(ip6flowtable, M_RTABLE);
218
219 ip6flowtable = new_table;
220 ip6_hashsize = table_size;
221
222 TAILQ_INIT(&ip6flowlist);
223 for (i = 0; i < ip6_hashsize; i++)
224 TAILQ_INIT(&ip6flowtable[i]);
225
226 return 0;
227 }
228
229 int
230 ip6flow_init(int table_size)
231 {
232 int ret, error;
233
234 error = workqueue_create(&ip6flow_slowtimo_wq, "ip6flow_slowtimo",
235 ip6flow_slowtimo_work, NULL, PRI_SOFTNET, IPL_SOFTNET, WQ_MPSAFE);
236 if (error != 0)
237 panic("%s: workqueue_create failed (%d)\n", __func__, error);
238
239 mutex_init(&ip6flow_lock, MUTEX_DEFAULT, IPL_NONE);
240
241 mutex_enter(&ip6flow_lock);
242 ret = ip6flow_init_locked(table_size);
243 mutex_exit(&ip6flow_lock);
244 ip6flow_sysctl_init(NULL);
245
246 return ret;
247 }
248
249 /*
250 * IPv6 Fast Forward routine. Attempt to forward the packet -
251 * if any problems are found return to the main IPv6 input
252 * routine to deal with.
253 */
254 int
255 ip6flow_fastforward(struct mbuf **mp)
256 {
257 struct ip6flow *ip6f;
258 struct ip6_hdr *ip6;
259 struct rtentry *rt = NULL;
260 struct mbuf *m;
261 const struct sockaddr *dst;
262 int error;
263 int ret = 0;
264
265 mutex_enter(&ip6flow_lock);
266
267 /*
268 * Are we forwarding packets and have flows?
269 */
270 if (!ip6_forwarding || ip6flow_inuse == 0)
271 goto out;
272
273 m = *mp;
274 /*
275 * At least size of IPv6 Header?
276 */
277 if (m->m_len < sizeof(struct ip6_hdr))
278 goto out;
279 /*
280 * Was packet received as a link-level multicast or broadcast?
281 * If so, don't try to fast forward.
282 */
283 if ((m->m_flags & (M_BCAST|M_MCAST)) != 0)
284 goto out;
285
286 if (IP6_HDR_ALIGNED_P(mtod(m, const void *)) == 0) {
287 if ((m = m_copyup(m, sizeof(struct ip6_hdr),
288 (max_linkhdr + 3) & ~3)) == NULL) {
289 goto out;
290 }
291 *mp = m;
292 } else if (__predict_false(m->m_len < sizeof(struct ip6_hdr))) {
293 if ((m = m_pullup(m, sizeof(struct ip6_hdr))) == NULL) {
294 goto out;
295 }
296 *mp = m;
297 }
298
299 ip6 = mtod(m, struct ip6_hdr *);
300
301 if ((ip6->ip6_vfc & IPV6_VERSION_MASK) != IPV6_VERSION) {
302 /* Bad version. */
303 goto out;
304 }
305
306 /*
307 * If we have a hop-by-hop extension we must process it.
308 * We just leave this up to ip6_input to deal with.
309 */
310 if (ip6->ip6_nxt == IPPROTO_HOPOPTS)
311 goto out;
312
313 /*
314 * Attempt to find a flow.
315 */
316 if ((ip6f = ip6flow_lookup(ip6)) == NULL) {
317 /* No flow found. */
318 goto out;
319 }
320
321 /*
322 * Route and interface still up?
323 */
324 if ((rt = rtcache_validate(&ip6f->ip6f_ro)) == NULL ||
325 (rt->rt_ifp->if_flags & IFF_UP) == 0 ||
326 (rt->rt_flags & RTF_BLACKHOLE) != 0)
327 goto out_unref;
328
329 /*
330 * Packet size greater than MTU?
331 */
332 if (m->m_pkthdr.len > rt->rt_ifp->if_mtu) {
333 /* Return to main IPv6 input function. */
334 goto out_unref;
335 }
336
337 /*
338 * Clear any in-bound checksum flags for this packet.
339 */
340 m->m_pkthdr.csum_flags = 0;
341
342 if (ip6->ip6_hlim <= IPV6_HLIMDEC)
343 goto out_unref;
344
345 /* Decrement hop limit (same as TTL) */
346 ip6->ip6_hlim -= IPV6_HLIMDEC;
347
348 if (rt->rt_flags & RTF_GATEWAY)
349 dst = rt->rt_gateway;
350 else
351 dst = rtcache_getdst(&ip6f->ip6f_ro);
352
353 PRT_SLOW_ARM(ip6f->ip6f_timer, IP6FLOW_TIMER);
354
355 ip6f->ip6f_uses++;
356
357 #if 0
358 /*
359 * We use FIFO cache replacement instead of LRU the same ip_flow.c.
360 */
361 /* move to head (LRU) for ip6flowlist. ip6flowtable does not care LRU. */
362 TAILQ_REMOVE(&ip6flowlist, ip6f, ip6f_list);
363 TAILQ_INSERT_HEAD(&ip6flowlist, ip6f, ip6f_list);
364 #endif
365
366 /* Send on its way - straight to the interface output routine. */
367 if ((error = if_output_lock(rt->rt_ifp, rt->rt_ifp, m, dst, rt)) != 0) {
368 ip6f->ip6f_dropped++;
369 } else {
370 ip6f->ip6f_forwarded++;
371 }
372 ret = 1;
373 out_unref:
374 rtcache_unref(rt, &ip6f->ip6f_ro);
375 out:
376 mutex_exit(&ip6flow_lock);
377 return ret;
378 }
379
380 /*
381 * Add the IPv6 flow statistics to the main IPv6 statistics.
382 */
383 static void
384 ip6flow_addstats_rt(struct rtentry *rt, struct ip6flow *ip6f)
385 {
386 uint64_t *ip6s;
387
388 if (rt != NULL)
389 rt->rt_use += ip6f->ip6f_uses;
390 ip6s = IP6_STAT_GETREF();
391 ip6s[IP6_STAT_FASTFORWARDFLOWS] = ip6flow_inuse;
392 ip6s[IP6_STAT_CANTFORWARD] += ip6f->ip6f_dropped;
393 ip6s[IP6_STAT_ODROPPED] += ip6f->ip6f_dropped;
394 ip6s[IP6_STAT_TOTAL] += ip6f->ip6f_uses;
395 ip6s[IP6_STAT_FORWARD] += ip6f->ip6f_forwarded;
396 ip6s[IP6_STAT_FASTFORWARD] += ip6f->ip6f_forwarded;
397 IP6_STAT_PUTREF();
398 }
399
400 static void
401 ip6flow_addstats(struct ip6flow *ip6f)
402 {
403 struct rtentry *rt;
404
405 rt = rtcache_validate(&ip6f->ip6f_ro);
406 ip6flow_addstats_rt(rt, ip6f);
407 rtcache_unref(rt, &ip6f->ip6f_ro);
408 }
409
410 /*
411 * Add statistics and free the flow.
412 */
413 static void
414 ip6flow_free(struct ip6flow *ip6f)
415 {
416
417 KASSERT(mutex_owned(&ip6flow_lock));
418
419 /*
420 * Remove the flow from the hash table (at elevated IPL).
421 * Once it's off the list, we can deal with it at normal
422 * network IPL.
423 */
424 IP6FLOW_REMOVE(ip6f->ip6f_hashidx, ip6f);
425
426 ip6flow_inuse--;
427 ip6flow_addstats(ip6f);
428 rtcache_free(&ip6f->ip6f_ro);
429 pool_put(&ip6flow_pool, ip6f);
430 }
431
432 static struct ip6flow *
433 ip6flow_reap_locked(int just_one)
434 {
435 struct ip6flow *ip6f;
436
437 KASSERT(mutex_owned(&ip6flow_lock));
438
439 /*
440 * This case must remove one ip6flow. Furthermore, this case is used in
441 * fast path(packet processing path). So, simply remove TAILQ_LAST one.
442 */
443 if (just_one) {
444 ip6f = TAILQ_LAST(&ip6flowlist, ip6flowhead);
445 KASSERT(ip6f != NULL);
446
447 IP6FLOW_REMOVE(ip6f->ip6f_hashidx, ip6f);
448
449 ip6flow_addstats(ip6f);
450 rtcache_free(&ip6f->ip6f_ro);
451 return ip6f;
452 }
453
454 /*
455 * This case is used in slow path(sysctl).
456 * At first, remove invalid rtcache ip6flow, and then remove TAILQ_LAST
457 * ip6flow if it is ensured least recently used by comparing last_uses.
458 */
459 while (ip6flow_inuse > ip6_maxflows) {
460 struct ip6flow *maybe_ip6f = TAILQ_LAST(&ip6flowlist, ip6flowhead);
461
462 TAILQ_FOREACH(ip6f, &ip6flowlist, ip6f_list) {
463 struct rtentry *rt;
464 /*
465 * If this no longer points to a valid route -
466 * reclaim it.
467 */
468 if ((rt = rtcache_validate(&ip6f->ip6f_ro)) == NULL)
469 goto done;
470 rtcache_unref(rt, &ip6f->ip6f_ro);
471 /*
472 * choose the one that's been least recently
473 * used or has had the least uses in the
474 * last 1.5 intervals.
475 */
476 if (ip6f->ip6f_timer < maybe_ip6f->ip6f_timer
477 || ((ip6f->ip6f_timer == maybe_ip6f->ip6f_timer)
478 && (ip6f->ip6f_last_uses + ip6f->ip6f_uses
479 < maybe_ip6f->ip6f_last_uses + maybe_ip6f->ip6f_uses)))
480 maybe_ip6f = ip6f;
481 }
482 ip6f = maybe_ip6f;
483 done:
484 /*
485 * Remove the entry from the flow table
486 */
487 IP6FLOW_REMOVE(ip6f->ip6f_hashidx, ip6f);
488
489 rtcache_free(&ip6f->ip6f_ro);
490 ip6flow_inuse--;
491 ip6flow_addstats(ip6f);
492 pool_put(&ip6flow_pool, ip6f);
493 }
494 return NULL;
495 }
496
497 /*
498 * Reap one or more flows - ip6flow_reap may remove
499 * multiple flows if net.inet6.ip6.maxflows is reduced.
500 */
501 struct ip6flow *
502 ip6flow_reap(int just_one)
503 {
504 struct ip6flow *ip6f;
505
506 mutex_enter(&ip6flow_lock);
507 ip6f = ip6flow_reap_locked(just_one);
508 mutex_exit(&ip6flow_lock);
509 return ip6f;
510 }
511
512 static unsigned int ip6flow_work_enqueued = 0;
513
514 void
515 ip6flow_slowtimo_work(struct work *wk, void *arg)
516 {
517 struct ip6flow *ip6f, *next_ip6f;
518
519 /* We can allow enqueuing another work at this point */
520 atomic_swap_uint(&ip6flow_work_enqueued, 0);
521
522 SOFTNET_KERNEL_LOCK_UNLESS_NET_MPSAFE();
523 mutex_enter(&ip6flow_lock);
524
525 for (ip6f = TAILQ_FIRST(&ip6flowlist); ip6f != NULL; ip6f = next_ip6f) {
526 struct rtentry *rt = NULL;
527 next_ip6f = TAILQ_NEXT(ip6f, ip6f_list);
528 if (PRT_SLOW_ISEXPIRED(ip6f->ip6f_timer) ||
529 (rt = rtcache_validate(&ip6f->ip6f_ro)) == NULL) {
530 ip6flow_free(ip6f);
531 } else {
532 ip6f->ip6f_last_uses = ip6f->ip6f_uses;
533 ip6flow_addstats_rt(rt, ip6f);
534 ip6f->ip6f_uses = 0;
535 ip6f->ip6f_dropped = 0;
536 ip6f->ip6f_forwarded = 0;
537 }
538 rtcache_unref(rt, &ip6f->ip6f_ro);
539 }
540
541 mutex_exit(&ip6flow_lock);
542 SOFTNET_KERNEL_UNLOCK_UNLESS_NET_MPSAFE();
543 }
544
545 void
546 ip6flow_slowtimo(void)
547 {
548
549 /* Avoid enqueuing another work when one is already enqueued */
550 if (atomic_swap_uint(&ip6flow_work_enqueued, 1) == 1)
551 return;
552
553 workqueue_enqueue(ip6flow_slowtimo_wq, &ip6flow_slowtimo_wk, NULL);
554 }
555
556 /*
557 * We have successfully forwarded a packet using the normal
558 * IPv6 stack. Now create/update a flow.
559 */
560 void
561 ip6flow_create(struct route *ro, struct mbuf *m)
562 {
563 const struct ip6_hdr *ip6;
564 struct ip6flow *ip6f;
565 size_t hash;
566
567 ip6 = mtod(m, const struct ip6_hdr *);
568
569 KERNEL_LOCK_UNLESS_NET_MPSAFE();
570 mutex_enter(&ip6flow_lock);
571
572 /*
573 * If IPv6 Fast Forward is disabled, don't create a flow.
574 * It can be disabled by setting net.inet6.ip6.maxflows to 0.
575 *
576 * Don't create a flow for ICMPv6 messages.
577 */
578 if (ip6_maxflows == 0 || ip6->ip6_nxt == IPPROTO_IPV6_ICMP)
579 goto out;
580
581 /*
582 * See if an existing flow exists. If so:
583 * - Remove the flow
584 * - Add flow statistics
585 * - Free the route
586 * - Reset statistics
587 *
588 * If a flow doesn't exist allocate a new one if
589 * ip6_maxflows hasn't reached its limit. If it has
590 * been reached, reap some flows.
591 */
592 ip6f = ip6flow_lookup(ip6);
593 if (ip6f == NULL) {
594 if (ip6flow_inuse >= ip6_maxflows) {
595 ip6f = ip6flow_reap_locked(1);
596 } else {
597 ip6f = pool_get(&ip6flow_pool, PR_NOWAIT);
598 if (ip6f == NULL)
599 goto out;
600 ip6flow_inuse++;
601 }
602 memset(ip6f, 0, sizeof(*ip6f));
603 } else {
604 IP6FLOW_REMOVE(ip6f->ip6f_hashidx, ip6f);
605
606 ip6flow_addstats(ip6f);
607 rtcache_free(&ip6f->ip6f_ro);
608 ip6f->ip6f_uses = 0;
609 ip6f->ip6f_last_uses = 0;
610 ip6f->ip6f_dropped = 0;
611 ip6f->ip6f_forwarded = 0;
612 }
613
614 /*
615 * Fill in the updated/new details.
616 */
617 rtcache_copy(&ip6f->ip6f_ro, ro);
618 ip6f->ip6f_dst = ip6->ip6_dst;
619 ip6f->ip6f_src = ip6->ip6_src;
620 ip6f->ip6f_flow = ip6->ip6_flow;
621 PRT_SLOW_ARM(ip6f->ip6f_timer, IP6FLOW_TIMER);
622
623 /*
624 * Insert into the approriate bucket of the flow table.
625 */
626 hash = ip6flow_hash(ip6);
627 IP6FLOW_INSERT(hash, ip6f);
628
629 out:
630 mutex_exit(&ip6flow_lock);
631 KERNEL_UNLOCK_UNLESS_NET_MPSAFE();
632 }
633
634 /*
635 * Invalidate/remove all flows - if new_size is positive we
636 * resize the hash table.
637 */
638 int
639 ip6flow_invalidate_all(int new_size)
640 {
641 struct ip6flow *ip6f, *next_ip6f;
642 int error;
643
644 error = 0;
645
646 mutex_enter(&ip6flow_lock);
647
648 for (ip6f = TAILQ_FIRST(&ip6flowlist); ip6f != NULL; ip6f = next_ip6f) {
649 next_ip6f = TAILQ_NEXT(ip6f, ip6f_list);
650 ip6flow_free(ip6f);
651 }
652
653 if (new_size)
654 error = ip6flow_init_locked(new_size);
655
656 mutex_exit(&ip6flow_lock);
657
658 return error;
659 }
660
661 /*
662 * sysctl helper routine for net.inet.ip6.maxflows. Since
663 * we could reduce this value, call ip6flow_reap();
664 */
665 static int
666 sysctl_net_inet6_ip6_maxflows(SYSCTLFN_ARGS)
667 {
668 int error;
669
670 error = sysctl_lookup(SYSCTLFN_CALL(rnode));
671 if (error || newp == NULL)
672 return (error);
673
674 SOFTNET_KERNEL_LOCK_UNLESS_NET_MPSAFE();
675
676 ip6flow_reap(0);
677
678 SOFTNET_KERNEL_UNLOCK_UNLESS_NET_MPSAFE();
679
680 return (0);
681 }
682
683 static int
684 sysctl_net_inet6_ip6_hashsize(SYSCTLFN_ARGS)
685 {
686 int error, tmp;
687 struct sysctlnode node;
688
689 node = *rnode;
690 tmp = ip6_hashsize;
691 node.sysctl_data = &tmp;
692 error = sysctl_lookup(SYSCTLFN_CALL(&node));
693 if (error || newp == NULL)
694 return (error);
695
696 if ((tmp & (tmp - 1)) == 0 && tmp != 0) {
697 /*
698 * Can only fail due to malloc()
699 */
700 SOFTNET_KERNEL_LOCK_UNLESS_NET_MPSAFE();
701 error = ip6flow_invalidate_all(tmp);
702 SOFTNET_KERNEL_UNLOCK_UNLESS_NET_MPSAFE();
703 } else {
704 /*
705 * EINVAL if not a power of 2
706 */
707 error = EINVAL;
708 }
709
710 return error;
711 }
712
713 static void
714 ip6flow_sysctl_init(struct sysctllog **clog)
715 {
716
717 sysctl_createv(clog, 0, NULL, NULL,
718 CTLFLAG_PERMANENT,
719 CTLTYPE_NODE, "inet6",
720 SYSCTL_DESCR("PF_INET6 related settings"),
721 NULL, 0, NULL, 0,
722 CTL_NET, PF_INET6, CTL_EOL);
723 sysctl_createv(clog, 0, NULL, NULL,
724 CTLFLAG_PERMANENT,
725 CTLTYPE_NODE, "ip6",
726 SYSCTL_DESCR("IPv6 related settings"),
727 NULL, 0, NULL, 0,
728 CTL_NET, PF_INET6, IPPROTO_IPV6, CTL_EOL);
729
730 sysctl_createv(clog, 0, NULL, NULL,
731 CTLFLAG_PERMANENT|CTLFLAG_READWRITE,
732 CTLTYPE_INT, "maxflows",
733 SYSCTL_DESCR("Number of flows for fast forwarding (IPv6)"),
734 sysctl_net_inet6_ip6_maxflows, 0, &ip6_maxflows, 0,
735 CTL_NET, PF_INET6, IPPROTO_IPV6,
736 CTL_CREATE, CTL_EOL);
737 sysctl_createv(clog, 0, NULL, NULL,
738 CTLFLAG_PERMANENT|CTLFLAG_READWRITE,
739 CTLTYPE_INT, "hashsize",
740 SYSCTL_DESCR("Size of hash table for fast forwarding (IPv6)"),
741 sysctl_net_inet6_ip6_hashsize, 0, &ip6_hashsize, 0,
742 CTL_NET, PF_INET6, IPPROTO_IPV6,
743 CTL_CREATE, CTL_EOL);
744 }
745