ip6_flow.c revision 1.28.2.4 1 /* $NetBSD: ip6_flow.c,v 1.28.2.4 2017/03/20 06:57:51 pgoyette Exp $ */
2
3 /*-
4 * Copyright (c) 2007 The NetBSD Foundation, Inc.
5 * All rights reserved.
6 *
7 * This code is derived from software contributed to The NetBSD Foundation
8 * by the 3am Software Foundry ("3am"). It was developed by Liam J. Foy
9 * <liamjfoy (at) netbsd.org> and Matt Thomas <matt (at) netbsd.org>.
10 *
11 * Redistribution and use in source and binary forms, with or without
12 * modification, are permitted provided that the following conditions
13 * are met:
14 * 1. Redistributions of source code must retain the above copyright
15 * notice, this list of conditions and the following disclaimer.
16 * 2. Redistributions in binary form must reproduce the above copyright
17 * notice, this list of conditions and the following disclaimer in the
18 * documentation and/or other materials provided with the distribution.
19 *
20 * THIS SOFTWARE IS PROVIDED BY THE NETBSD FOUNDATION, INC. AND CONTRIBUTORS
21 * ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED
22 * TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
23 * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE FOUNDATION OR CONTRIBUTORS
24 * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
25 * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
26 * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
27 * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
28 * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
29 * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
30 * POSSIBILITY OF SUCH DAMAGE.
31 *
32 * IPv6 version was developed by Liam J. Foy. Original source existed in IPv4
33 * format developed by Matt Thomas. Thanks to Joerg Sonnenberger, Matt
34 * Thomas and Christos Zoulas.
35 *
36 * Thanks to Liverpool John Moores University, especially Dr. David Llewellyn-Jones
37 * for providing resources (to test) and Professor Madjid Merabti.
38 */
39
40 #include <sys/cdefs.h>
41 __KERNEL_RCSID(0, "$NetBSD: ip6_flow.c,v 1.28.2.4 2017/03/20 06:57:51 pgoyette Exp $");
42
43 #ifdef _KERNEL_OPT
44 #include "opt_net_mpsafe.h"
45 #endif
46
47 #include <sys/param.h>
48 #include <sys/systm.h>
49 #include <sys/malloc.h>
50 #include <sys/mbuf.h>
51 #include <sys/socketvar.h>
52 #include <sys/time.h>
53 #include <sys/kernel.h>
54 #include <sys/pool.h>
55 #include <sys/sysctl.h>
56 #include <sys/workqueue.h>
57 #include <sys/atomic.h>
58
59 #include <net/if.h>
60 #include <net/if_dl.h>
61 #include <net/route.h>
62 #include <net/pfil.h>
63
64 #include <netinet/in.h>
65 #include <netinet6/in6_var.h>
66 #include <netinet/in_systm.h>
67 #include <netinet/ip6.h>
68 #include <netinet6/ip6_var.h>
69 #include <netinet6/ip6_private.h>
70
71 /*
72 * IPv6 Fast Forward caches/hashes flows from one source to destination.
73 *
74 * Upon a successful forward IPv6FF caches and hashes details such as the
75 * route, source and destination. Once another packet is received matching
76 * the source and destination the packet is forwarded straight onto if_output
77 * using the cached details.
78 *
79 * Example:
80 * ether/fddi_input -> ip6flow_fastforward -> if_output
81 */
82
83 static struct pool ip6flow_pool;
84
85 LIST_HEAD(ip6flowhead, ip6flow);
86
87 /*
88 * We could use IPv4 defines (IPFLOW_HASHBITS) but we'll
89 * use our own (possibly for future expansion).
90 */
91 #define IP6FLOW_TIMER (5 * PR_SLOWHZ)
92 #define IP6FLOW_DEFAULT_HASHSIZE (1 << IP6FLOW_HASHBITS)
93
94 /*
95 * ip6_flow.c internal lock.
96 * If we use softnet_lock, it would cause recursive lock.
97 *
98 * This is a tentative workaround.
99 * We should make it scalable somehow in the future.
100 */
101 static kmutex_t ip6flow_lock;
102 static struct ip6flowhead *ip6flowtable = NULL;
103 static struct ip6flowhead ip6flowlist;
104 static int ip6flow_inuse;
105
106 static void ip6flow_slowtimo_work(struct work *, void *);
107 static struct workqueue *ip6flow_slowtimo_wq;
108 static struct work ip6flow_slowtimo_wk;
109
110 static int sysctl_net_inet6_ip6_hashsize(SYSCTLFN_PROTO);
111 static int sysctl_net_inet6_ip6_maxflows(SYSCTLFN_PROTO);
112 static void ip6flow_sysctl_init(struct sysctllog **);
113
114 /*
115 * Insert an ip6flow into the list.
116 */
117 #define IP6FLOW_INSERT(bucket, ip6f) \
118 do { \
119 LIST_INSERT_HEAD((bucket), (ip6f), ip6f_hash); \
120 LIST_INSERT_HEAD(&ip6flowlist, (ip6f), ip6f_list); \
121 } while (/*CONSTCOND*/ 0)
122
123 /*
124 * Remove an ip6flow from the list.
125 */
126 #define IP6FLOW_REMOVE(ip6f) \
127 do { \
128 LIST_REMOVE((ip6f), ip6f_hash); \
129 LIST_REMOVE((ip6f), ip6f_list); \
130 } while (/*CONSTCOND*/ 0)
131
132 #ifndef IP6FLOW_DEFAULT
133 #define IP6FLOW_DEFAULT 256
134 #endif
135
136 int ip6_maxflows = IP6FLOW_DEFAULT;
137 int ip6_hashsize = IP6FLOW_DEFAULT_HASHSIZE;
138
139 /*
140 * Calculate hash table position.
141 */
142 static size_t
143 ip6flow_hash(const struct ip6_hdr *ip6)
144 {
145 size_t hash;
146 uint32_t dst_sum, src_sum;
147 size_t idx;
148
149 src_sum = ip6->ip6_src.s6_addr32[0] + ip6->ip6_src.s6_addr32[1]
150 + ip6->ip6_src.s6_addr32[2] + ip6->ip6_src.s6_addr32[3];
151 dst_sum = ip6->ip6_dst.s6_addr32[0] + ip6->ip6_dst.s6_addr32[1]
152 + ip6->ip6_dst.s6_addr32[2] + ip6->ip6_dst.s6_addr32[3];
153
154 hash = ip6->ip6_flow;
155
156 for (idx = 0; idx < 32; idx += IP6FLOW_HASHBITS)
157 hash += (dst_sum >> (32 - idx)) + (src_sum >> idx);
158
159 return hash & (ip6_hashsize-1);
160 }
161
162 /*
163 * Check to see if a flow already exists - if so return it.
164 */
165 static struct ip6flow *
166 ip6flow_lookup(const struct ip6_hdr *ip6)
167 {
168 size_t hash;
169 struct ip6flow *ip6f;
170
171 KASSERT(mutex_owned(&ip6flow_lock));
172
173 hash = ip6flow_hash(ip6);
174
175 LIST_FOREACH(ip6f, &ip6flowtable[hash], ip6f_hash) {
176 if (IN6_ARE_ADDR_EQUAL(&ip6->ip6_dst, &ip6f->ip6f_dst)
177 && IN6_ARE_ADDR_EQUAL(&ip6->ip6_src, &ip6f->ip6f_src)
178 && ip6f->ip6f_flow == ip6->ip6_flow) {
179 /* A cached flow has been found. */
180 return ip6f;
181 }
182 }
183
184 return NULL;
185 }
186
187 void
188 ip6flow_poolinit(void)
189 {
190
191 pool_init(&ip6flow_pool, sizeof(struct ip6flow), 0, 0, 0, "ip6flowpl",
192 NULL, IPL_NET);
193 }
194
195 /*
196 * Allocate memory and initialise lists. This function is called
197 * from ip6_init and called there after to resize the hash table.
198 * If a newly sized table cannot be malloc'ed we just continue
199 * to use the old one.
200 */
201 static int
202 ip6flow_init_locked(int table_size)
203 {
204 struct ip6flowhead *new_table;
205 size_t i;
206
207 KASSERT(mutex_owned(&ip6flow_lock));
208
209 new_table = (struct ip6flowhead *)malloc(sizeof(struct ip6flowhead) *
210 table_size, M_RTABLE, M_NOWAIT);
211
212 if (new_table == NULL)
213 return 1;
214
215 if (ip6flowtable != NULL)
216 free(ip6flowtable, M_RTABLE);
217
218 ip6flowtable = new_table;
219 ip6_hashsize = table_size;
220
221 LIST_INIT(&ip6flowlist);
222 for (i = 0; i < ip6_hashsize; i++)
223 LIST_INIT(&ip6flowtable[i]);
224
225 return 0;
226 }
227
228 int
229 ip6flow_init(int table_size)
230 {
231 int ret, error;
232
233 error = workqueue_create(&ip6flow_slowtimo_wq, "ip6flow_slowtimo",
234 ip6flow_slowtimo_work, NULL, PRI_SOFTNET, IPL_SOFTNET, WQ_MPSAFE);
235 if (error != 0)
236 panic("%s: workqueue_create failed (%d)\n", __func__, error);
237
238 mutex_init(&ip6flow_lock, MUTEX_DEFAULT, IPL_NONE);
239
240 mutex_enter(&ip6flow_lock);
241 ret = ip6flow_init_locked(table_size);
242 mutex_exit(&ip6flow_lock);
243 ip6flow_sysctl_init(NULL);
244
245 return ret;
246 }
247
248 /*
249 * IPv6 Fast Forward routine. Attempt to forward the packet -
250 * if any problems are found return to the main IPv6 input
251 * routine to deal with.
252 */
253 int
254 ip6flow_fastforward(struct mbuf **mp)
255 {
256 struct ip6flow *ip6f;
257 struct ip6_hdr *ip6;
258 struct rtentry *rt = NULL;
259 struct mbuf *m;
260 const struct sockaddr *dst;
261 int error;
262 int ret = 0;
263
264 mutex_enter(&ip6flow_lock);
265
266 /*
267 * Are we forwarding packets and have flows?
268 */
269 if (!ip6_forwarding || ip6flow_inuse == 0)
270 goto out;
271
272 m = *mp;
273 /*
274 * At least size of IPv6 Header?
275 */
276 if (m->m_len < sizeof(struct ip6_hdr))
277 goto out;
278 /*
279 * Was packet received as a link-level multicast or broadcast?
280 * If so, don't try to fast forward.
281 */
282 if ((m->m_flags & (M_BCAST|M_MCAST)) != 0)
283 goto out;
284
285 if (IP6_HDR_ALIGNED_P(mtod(m, const void *)) == 0) {
286 if ((m = m_copyup(m, sizeof(struct ip6_hdr),
287 (max_linkhdr + 3) & ~3)) == NULL) {
288 goto out;
289 }
290 *mp = m;
291 } else if (__predict_false(m->m_len < sizeof(struct ip6_hdr))) {
292 if ((m = m_pullup(m, sizeof(struct ip6_hdr))) == NULL) {
293 goto out;
294 }
295 *mp = m;
296 }
297
298 ip6 = mtod(m, struct ip6_hdr *);
299
300 if ((ip6->ip6_vfc & IPV6_VERSION_MASK) != IPV6_VERSION) {
301 /* Bad version. */
302 goto out;
303 }
304
305 /*
306 * If we have a hop-by-hop extension we must process it.
307 * We just leave this up to ip6_input to deal with.
308 */
309 if (ip6->ip6_nxt == IPPROTO_HOPOPTS)
310 goto out;
311
312 /*
313 * Attempt to find a flow.
314 */
315 if ((ip6f = ip6flow_lookup(ip6)) == NULL) {
316 /* No flow found. */
317 goto out;
318 }
319
320 /*
321 * Route and interface still up?
322 */
323 if ((rt = rtcache_validate(&ip6f->ip6f_ro)) == NULL ||
324 (rt->rt_ifp->if_flags & IFF_UP) == 0 ||
325 (rt->rt_flags & RTF_BLACKHOLE) != 0)
326 goto out_unref;
327
328 /*
329 * Packet size greater than MTU?
330 */
331 if (m->m_pkthdr.len > rt->rt_ifp->if_mtu) {
332 /* Return to main IPv6 input function. */
333 goto out_unref;
334 }
335
336 /*
337 * Clear any in-bound checksum flags for this packet.
338 */
339 m->m_pkthdr.csum_flags = 0;
340
341 if (ip6->ip6_hlim <= IPV6_HLIMDEC)
342 goto out_unref;
343
344 /* Decrement hop limit (same as TTL) */
345 ip6->ip6_hlim -= IPV6_HLIMDEC;
346
347 if (rt->rt_flags & RTF_GATEWAY)
348 dst = rt->rt_gateway;
349 else
350 dst = rtcache_getdst(&ip6f->ip6f_ro);
351
352 PRT_SLOW_ARM(ip6f->ip6f_timer, IP6FLOW_TIMER);
353
354 ip6f->ip6f_uses++;
355
356 /* Send on its way - straight to the interface output routine. */
357 if ((error = if_output_lock(rt->rt_ifp, rt->rt_ifp, m, dst, rt)) != 0) {
358 ip6f->ip6f_dropped++;
359 } else {
360 ip6f->ip6f_forwarded++;
361 }
362 ret = 1;
363 out_unref:
364 rtcache_unref(rt, &ip6f->ip6f_ro);
365 out:
366 mutex_exit(&ip6flow_lock);
367 return ret;
368 }
369
370 /*
371 * Add the IPv6 flow statistics to the main IPv6 statistics.
372 */
373 static void
374 ip6flow_addstats_rt(struct rtentry *rt, struct ip6flow *ip6f)
375 {
376 uint64_t *ip6s;
377
378 if (rt != NULL)
379 rt->rt_use += ip6f->ip6f_uses;
380 ip6s = IP6_STAT_GETREF();
381 ip6s[IP6_STAT_FASTFORWARDFLOWS] = ip6flow_inuse;
382 ip6s[IP6_STAT_CANTFORWARD] += ip6f->ip6f_dropped;
383 ip6s[IP6_STAT_ODROPPED] += ip6f->ip6f_dropped;
384 ip6s[IP6_STAT_TOTAL] += ip6f->ip6f_uses;
385 ip6s[IP6_STAT_FORWARD] += ip6f->ip6f_forwarded;
386 ip6s[IP6_STAT_FASTFORWARD] += ip6f->ip6f_forwarded;
387 IP6_STAT_PUTREF();
388 }
389
390 static void
391 ip6flow_addstats(struct ip6flow *ip6f)
392 {
393 struct rtentry *rt;
394
395 rt = rtcache_validate(&ip6f->ip6f_ro);
396 ip6flow_addstats_rt(rt, ip6f);
397 rtcache_unref(rt, &ip6f->ip6f_ro);
398 }
399
400 /*
401 * Add statistics and free the flow.
402 */
403 static void
404 ip6flow_free(struct ip6flow *ip6f)
405 {
406
407 KASSERT(mutex_owned(&ip6flow_lock));
408
409 /*
410 * Remove the flow from the hash table (at elevated IPL).
411 * Once it's off the list, we can deal with it at normal
412 * network IPL.
413 */
414 IP6FLOW_REMOVE(ip6f);
415
416 ip6flow_inuse--;
417 ip6flow_addstats(ip6f);
418 rtcache_free(&ip6f->ip6f_ro);
419 pool_put(&ip6flow_pool, ip6f);
420 }
421
422 static struct ip6flow *
423 ip6flow_reap_locked(int just_one)
424 {
425
426 KASSERT(mutex_owned(&ip6flow_lock));
427
428 while (just_one || ip6flow_inuse > ip6_maxflows) {
429 struct ip6flow *ip6f, *maybe_ip6f = NULL;
430
431 /*
432 * This case is used in slow path(sysctl).
433 * At first, remove invalid rtcache ip6flow, and then remove TAILQ_LAST
434 * ip6flow if it is ensured least recently used by comparing last_uses.
435 */
436 while (ip6flow_inuse > ip6_maxflows) {
437 struct ip6flow *maybe_ip6f = TAILQ_LAST(&ip6flowlist, ip6flowhead);
438
439 TAILQ_FOREACH(ip6f, &ip6flowlist, ip6f_list) {
440 struct rtentry *rt;
441 /*
442 * If this no longer points to a valid route -
443 * reclaim it.
444 */
445 if ((rt = rtcache_validate(&ip6f->ip6f_ro)) == NULL)
446 goto done;
447 rtcache_unref(rt, &ip6f->ip6f_ro);
448 /*
449 * choose the one that's been least recently
450 * used or has had the least uses in the
451 * last 1.5 intervals.
452 */
453 if (maybe_ip6f == NULL ||
454 ip6f->ip6f_timer < maybe_ip6f->ip6f_timer ||
455 (ip6f->ip6f_timer == maybe_ip6f->ip6f_timer &&
456 ip6f->ip6f_last_uses + ip6f->ip6f_uses <
457 maybe_ip6f->ip6f_last_uses +
458 maybe_ip6f->ip6f_uses))
459 maybe_ip6f = ip6f;
460 ip6f = LIST_NEXT(ip6f, ip6f_list);
461 }
462 ip6f = maybe_ip6f;
463 done:
464 /*
465 * Remove the entry from the flow table
466 */
467 IP6FLOW_REMOVE(ip6f);
468
469 rtcache_free(&ip6f->ip6f_ro);
470 if (just_one) {
471 ip6flow_addstats(ip6f);
472 return ip6f;
473 }
474 ip6flow_inuse--;
475 ip6flow_addstats(ip6f);
476 pool_put(&ip6flow_pool, ip6f);
477 }
478 return NULL;
479 }
480
481 /*
482 * Reap one or more flows - ip6flow_reap may remove
483 * multiple flows if net.inet6.ip6.maxflows is reduced.
484 */
485 struct ip6flow *
486 ip6flow_reap(int just_one)
487 {
488 struct ip6flow *ip6f;
489
490 mutex_enter(&ip6flow_lock);
491 ip6f = ip6flow_reap_locked(just_one);
492 mutex_exit(&ip6flow_lock);
493 return ip6f;
494 }
495
496 static unsigned int ip6flow_work_enqueued = 0;
497
498 void
499 ip6flow_slowtimo_work(struct work *wk, void *arg)
500 {
501 struct ip6flow *ip6f, *next_ip6f;
502
503 /* We can allow enqueuing another work at this point */
504 atomic_swap_uint(&ip6flow_work_enqueued, 0);
505
506 #ifndef NET_MPSAFE
507 mutex_enter(softnet_lock);
508 KERNEL_LOCK(1, NULL);
509 #endif
510 mutex_enter(&ip6flow_lock);
511
512 for (ip6f = TAILQ_FIRST(&ip6flowlist); ip6f != NULL; ip6f = next_ip6f) {
513 struct rtentry *rt = NULL;
514 next_ip6f = TAILQ_NEXT(ip6f, ip6f_list);
515 if (PRT_SLOW_ISEXPIRED(ip6f->ip6f_timer) ||
516 (rt = rtcache_validate(&ip6f->ip6f_ro)) == NULL) {
517 ip6flow_free(ip6f);
518 } else {
519 ip6f->ip6f_last_uses = ip6f->ip6f_uses;
520 ip6flow_addstats_rt(rt, ip6f);
521 ip6f->ip6f_uses = 0;
522 ip6f->ip6f_dropped = 0;
523 ip6f->ip6f_forwarded = 0;
524 }
525 rtcache_unref(rt, &ip6f->ip6f_ro);
526 }
527
528 mutex_exit(&ip6flow_lock);
529 #ifndef NET_MPSAFE
530 KERNEL_UNLOCK_ONE(NULL);
531 mutex_exit(softnet_lock);
532 #endif
533 }
534
535 void
536 ip6flow_slowtimo(void)
537 {
538
539 /* Avoid enqueuing another work when one is already enqueued */
540 if (atomic_swap_uint(&ip6flow_work_enqueued, 1) == 1)
541 return;
542
543 workqueue_enqueue(ip6flow_slowtimo_wq, &ip6flow_slowtimo_wk, NULL);
544 }
545
546 /*
547 * We have successfully forwarded a packet using the normal
548 * IPv6 stack. Now create/update a flow.
549 */
550 void
551 ip6flow_create(struct route *ro, struct mbuf *m)
552 {
553 const struct ip6_hdr *ip6;
554 struct ip6flow *ip6f;
555 size_t hash;
556
557 ip6 = mtod(m, const struct ip6_hdr *);
558
559 #ifndef NET_MPSAFE
560 KERNEL_LOCK(1, NULL);
561 #endif
562 mutex_enter(&ip6flow_lock);
563
564 /*
565 * If IPv6 Fast Forward is disabled, don't create a flow.
566 * It can be disabled by setting net.inet6.ip6.maxflows to 0.
567 *
568 * Don't create a flow for ICMPv6 messages.
569 */
570 if (ip6_maxflows == 0 || ip6->ip6_nxt == IPPROTO_IPV6_ICMP)
571 goto out;
572
573 /*
574 * See if an existing flow exists. If so:
575 * - Remove the flow
576 * - Add flow statistics
577 * - Free the route
578 * - Reset statistics
579 *
580 * If a flow doesn't exist allocate a new one if
581 * ip6_maxflows hasn't reached its limit. If it has
582 * been reached, reap some flows.
583 */
584 ip6f = ip6flow_lookup(ip6);
585 if (ip6f == NULL) {
586 if (ip6flow_inuse >= ip6_maxflows) {
587 ip6f = ip6flow_reap_locked(1);
588 } else {
589 ip6f = pool_get(&ip6flow_pool, PR_NOWAIT);
590 if (ip6f == NULL)
591 goto out;
592 ip6flow_inuse++;
593 }
594 memset(ip6f, 0, sizeof(*ip6f));
595 } else {
596 IP6FLOW_REMOVE(ip6f);
597
598 ip6flow_addstats(ip6f);
599 rtcache_free(&ip6f->ip6f_ro);
600 ip6f->ip6f_uses = 0;
601 ip6f->ip6f_last_uses = 0;
602 ip6f->ip6f_dropped = 0;
603 ip6f->ip6f_forwarded = 0;
604 }
605
606 /*
607 * Fill in the updated/new details.
608 */
609 rtcache_copy(&ip6f->ip6f_ro, ro);
610 ip6f->ip6f_dst = ip6->ip6_dst;
611 ip6f->ip6f_src = ip6->ip6_src;
612 ip6f->ip6f_flow = ip6->ip6_flow;
613 PRT_SLOW_ARM(ip6f->ip6f_timer, IP6FLOW_TIMER);
614
615 /*
616 * Insert into the approriate bucket of the flow table.
617 */
618 hash = ip6flow_hash(ip6);
619 IP6FLOW_INSERT(&ip6flowtable[hash], ip6f);
620
621 out:
622 mutex_exit(&ip6flow_lock);
623 #ifndef NET_MPSAFE
624 KERNEL_UNLOCK_ONE(NULL);
625 #endif
626 }
627
628 /*
629 * Invalidate/remove all flows - if new_size is positive we
630 * resize the hash table.
631 */
632 int
633 ip6flow_invalidate_all(int new_size)
634 {
635 struct ip6flow *ip6f, *next_ip6f;
636 int error;
637
638 error = 0;
639
640 mutex_enter(&ip6flow_lock);
641
642 for (ip6f = LIST_FIRST(&ip6flowlist); ip6f != NULL; ip6f = next_ip6f) {
643 next_ip6f = LIST_NEXT(ip6f, ip6f_list);
644 ip6flow_free(ip6f);
645 }
646
647 if (new_size)
648 error = ip6flow_init_locked(new_size);
649
650 mutex_exit(&ip6flow_lock);
651
652 return error;
653 }
654
655 /*
656 * sysctl helper routine for net.inet.ip6.maxflows. Since
657 * we could reduce this value, call ip6flow_reap();
658 */
659 static int
660 sysctl_net_inet6_ip6_maxflows(SYSCTLFN_ARGS)
661 {
662 int error;
663
664 error = sysctl_lookup(SYSCTLFN_CALL(rnode));
665 if (error || newp == NULL)
666 return (error);
667
668 #ifndef NET_MPSAFE
669 mutex_enter(softnet_lock);
670 KERNEL_LOCK(1, NULL);
671 #endif
672
673 ip6flow_reap(0);
674
675 #ifndef NET_MPSAFE
676 KERNEL_UNLOCK_ONE(NULL);
677 mutex_exit(softnet_lock);
678 #endif
679
680 return (0);
681 }
682
683 static int
684 sysctl_net_inet6_ip6_hashsize(SYSCTLFN_ARGS)
685 {
686 int error, tmp;
687 struct sysctlnode node;
688
689 node = *rnode;
690 tmp = ip6_hashsize;
691 node.sysctl_data = &tmp;
692 error = sysctl_lookup(SYSCTLFN_CALL(&node));
693 if (error || newp == NULL)
694 return (error);
695
696 if ((tmp & (tmp - 1)) == 0 && tmp != 0) {
697 /*
698 * Can only fail due to malloc()
699 */
700 #ifndef NET_MPSAFE
701 mutex_enter(softnet_lock);
702 KERNEL_LOCK(1, NULL);
703 #endif
704 error = ip6flow_invalidate_all(tmp);
705 #ifndef NET_MPSAFE
706 KERNEL_UNLOCK_ONE(NULL);
707 mutex_exit(softnet_lock);
708 #endif
709 } else {
710 /*
711 * EINVAL if not a power of 2
712 */
713 error = EINVAL;
714 }
715
716 return error;
717 }
718
719 static void
720 ip6flow_sysctl_init(struct sysctllog **clog)
721 {
722
723 sysctl_createv(clog, 0, NULL, NULL,
724 CTLFLAG_PERMANENT,
725 CTLTYPE_NODE, "inet6",
726 SYSCTL_DESCR("PF_INET6 related settings"),
727 NULL, 0, NULL, 0,
728 CTL_NET, PF_INET6, CTL_EOL);
729 sysctl_createv(clog, 0, NULL, NULL,
730 CTLFLAG_PERMANENT,
731 CTLTYPE_NODE, "ip6",
732 SYSCTL_DESCR("IPv6 related settings"),
733 NULL, 0, NULL, 0,
734 CTL_NET, PF_INET6, IPPROTO_IPV6, CTL_EOL);
735
736 sysctl_createv(clog, 0, NULL, NULL,
737 CTLFLAG_PERMANENT|CTLFLAG_READWRITE,
738 CTLTYPE_INT, "maxflows",
739 SYSCTL_DESCR("Number of flows for fast forwarding (IPv6)"),
740 sysctl_net_inet6_ip6_maxflows, 0, &ip6_maxflows, 0,
741 CTL_NET, PF_INET6, IPPROTO_IPV6,
742 CTL_CREATE, CTL_EOL);
743 sysctl_createv(clog, 0, NULL, NULL,
744 CTLFLAG_PERMANENT|CTLFLAG_READWRITE,
745 CTLTYPE_INT, "hashsize",
746 SYSCTL_DESCR("Size of hash table for fast forwarding (IPv6)"),
747 sysctl_net_inet6_ip6_hashsize, 0, &ip6_hashsize, 0,
748 CTL_NET, PF_INET6, IPPROTO_IPV6,
749 CTL_CREATE, CTL_EOL);
750 }
751