ip6_flow.c revision 1.32 1 /* $NetBSD: ip6_flow.c,v 1.32 2016/10/18 07:30:31 ozaki-r Exp $ */
2
3 /*-
4 * Copyright (c) 2007 The NetBSD Foundation, Inc.
5 * All rights reserved.
6 *
7 * This code is derived from software contributed to The NetBSD Foundation
8 * by the 3am Software Foundry ("3am"). It was developed by Liam J. Foy
9 * <liamjfoy (at) netbsd.org> and Matt Thomas <matt (at) netbsd.org>.
10 *
11 * Redistribution and use in source and binary forms, with or without
12 * modification, are permitted provided that the following conditions
13 * are met:
14 * 1. Redistributions of source code must retain the above copyright
15 * notice, this list of conditions and the following disclaimer.
16 * 2. Redistributions in binary form must reproduce the above copyright
17 * notice, this list of conditions and the following disclaimer in the
18 * documentation and/or other materials provided with the distribution.
19 *
20 * THIS SOFTWARE IS PROVIDED BY THE NETBSD FOUNDATION, INC. AND CONTRIBUTORS
21 * ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED
22 * TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
23 * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE FOUNDATION OR CONTRIBUTORS
24 * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
25 * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
26 * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
27 * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
28 * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
29 * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
30 * POSSIBILITY OF SUCH DAMAGE.
31 *
32 * IPv6 version was developed by Liam J. Foy. Original source existed in IPv4
33 * format developed by Matt Thomas. Thanks to Joerg Sonnenberger, Matt
34 * Thomas and Christos Zoulas.
35 *
36 * Thanks to Liverpool John Moores University, especially Dr. David Llewellyn-Jones
37 * for providing resources (to test) and Professor Madjid Merabti.
38 */
39
40 #include <sys/cdefs.h>
41 __KERNEL_RCSID(0, "$NetBSD: ip6_flow.c,v 1.32 2016/10/18 07:30:31 ozaki-r Exp $");
42
43 #ifdef _KERNEL_OPT
44 #include "opt_net_mpsafe.h"
45 #endif
46
47 #include <sys/param.h>
48 #include <sys/systm.h>
49 #include <sys/malloc.h>
50 #include <sys/mbuf.h>
51 #include <sys/domain.h>
52 #include <sys/protosw.h>
53 #include <sys/socket.h>
54 #include <sys/socketvar.h>
55 #include <sys/time.h>
56 #include <sys/kernel.h>
57 #include <sys/pool.h>
58 #include <sys/sysctl.h>
59 #include <sys/workqueue.h>
60 #include <sys/atomic.h>
61
62 #include <net/if.h>
63 #include <net/if_dl.h>
64 #include <net/route.h>
65 #include <net/pfil.h>
66
67 #include <netinet/in.h>
68 #include <netinet6/in6_var.h>
69 #include <netinet/in_systm.h>
70 #include <netinet/ip6.h>
71 #include <netinet6/ip6_var.h>
72 #include <netinet6/ip6_private.h>
73
74 /*
75 * IPv6 Fast Forward caches/hashes flows from one source to destination.
76 *
77 * Upon a successful forward IPv6FF caches and hashes details such as the
78 * route, source and destination. Once another packet is received matching
79 * the source and destination the packet is forwarded straight onto if_output
80 * using the cached details.
81 *
82 * Example:
83 * ether/fddi_input -> ip6flow_fastforward -> if_output
84 */
85
86 static struct pool ip6flow_pool;
87
88 TAILQ_HEAD(ip6flowhead, ip6flow);
89
90 /*
91 * We could use IPv4 defines (IPFLOW_HASHBITS) but we'll
92 * use our own (possibly for future expansion).
93 */
94 #define IP6FLOW_TIMER (5 * PR_SLOWHZ)
95 #define IP6FLOW_DEFAULT_HASHSIZE (1 << IP6FLOW_HASHBITS)
96
97 /*
98 * ip6_flow.c internal lock.
99 * If we use softnet_lock, it would cause recursive lock.
100 *
101 * This is a tentative workaround.
102 * We should make it scalable somehow in the future.
103 */
104 static kmutex_t ip6flow_lock;
105 static struct ip6flowhead *ip6flowtable = NULL;
106 static struct ip6flowhead ip6flowlist;
107 static int ip6flow_inuse;
108
109 static void ip6flow_slowtimo_work(struct work *, void *);
110 static struct workqueue *ip6flow_slowtimo_wq;
111 static struct work ip6flow_slowtimo_wk;
112
113 static int sysctl_net_inet6_ip6_hashsize(SYSCTLFN_PROTO);
114 static int sysctl_net_inet6_ip6_maxflows(SYSCTLFN_PROTO);
115 static void ip6flow_sysctl_init(struct sysctllog **);
116
117 /*
118 * Insert an ip6flow into the list.
119 */
120 #define IP6FLOW_INSERT(hashidx, ip6f) \
121 do { \
122 (ip6f)->ip6f_hashidx = (hashidx); \
123 TAILQ_INSERT_HEAD(&ip6flowtable[(hashidx)], (ip6f), ip6f_hash); \
124 TAILQ_INSERT_HEAD(&ip6flowlist, (ip6f), ip6f_list); \
125 } while (/*CONSTCOND*/ 0)
126
127 /*
128 * Remove an ip6flow from the list.
129 */
130 #define IP6FLOW_REMOVE(hashidx, ip6f) \
131 do { \
132 TAILQ_REMOVE(&ip6flowtable[(hashidx)], (ip6f), ip6f_hash); \
133 TAILQ_REMOVE(&ip6flowlist, (ip6f), ip6f_list); \
134 } while (/*CONSTCOND*/ 0)
135
136 #ifndef IP6FLOW_DEFAULT
137 #define IP6FLOW_DEFAULT 256
138 #endif
139
140 int ip6_maxflows = IP6FLOW_DEFAULT;
141 int ip6_hashsize = IP6FLOW_DEFAULT_HASHSIZE;
142
143 /*
144 * Calculate hash table position.
145 */
146 static size_t
147 ip6flow_hash(const struct ip6_hdr *ip6)
148 {
149 size_t hash;
150 uint32_t dst_sum, src_sum;
151 size_t idx;
152
153 src_sum = ip6->ip6_src.s6_addr32[0] + ip6->ip6_src.s6_addr32[1]
154 + ip6->ip6_src.s6_addr32[2] + ip6->ip6_src.s6_addr32[3];
155 dst_sum = ip6->ip6_dst.s6_addr32[0] + ip6->ip6_dst.s6_addr32[1]
156 + ip6->ip6_dst.s6_addr32[2] + ip6->ip6_dst.s6_addr32[3];
157
158 hash = ip6->ip6_flow;
159
160 for (idx = 0; idx < 32; idx += IP6FLOW_HASHBITS)
161 hash += (dst_sum >> (32 - idx)) + (src_sum >> idx);
162
163 return hash & (ip6_hashsize-1);
164 }
165
166 /*
167 * Check to see if a flow already exists - if so return it.
168 */
169 static struct ip6flow *
170 ip6flow_lookup(const struct ip6_hdr *ip6)
171 {
172 size_t hash;
173 struct ip6flow *ip6f;
174
175 KASSERT(mutex_owned(&ip6flow_lock));
176
177 hash = ip6flow_hash(ip6);
178
179 TAILQ_FOREACH(ip6f, &ip6flowtable[hash], ip6f_hash) {
180 if (IN6_ARE_ADDR_EQUAL(&ip6->ip6_dst, &ip6f->ip6f_dst)
181 && IN6_ARE_ADDR_EQUAL(&ip6->ip6_src, &ip6f->ip6f_src)
182 && ip6f->ip6f_flow == ip6->ip6_flow) {
183 /* A cached flow has been found. */
184 return ip6f;
185 }
186 }
187
188 return NULL;
189 }
190
191 void
192 ip6flow_poolinit(void)
193 {
194
195 pool_init(&ip6flow_pool, sizeof(struct ip6flow), 0, 0, 0, "ip6flowpl",
196 NULL, IPL_NET);
197 }
198
199 /*
200 * Allocate memory and initialise lists. This function is called
201 * from ip6_init and called there after to resize the hash table.
202 * If a newly sized table cannot be malloc'ed we just continue
203 * to use the old one.
204 */
205 static int
206 ip6flow_init_locked(int table_size)
207 {
208 struct ip6flowhead *new_table;
209 size_t i;
210
211 KASSERT(mutex_owned(&ip6flow_lock));
212
213 new_table = (struct ip6flowhead *)malloc(sizeof(struct ip6flowhead) *
214 table_size, M_RTABLE, M_NOWAIT);
215
216 if (new_table == NULL)
217 return 1;
218
219 if (ip6flowtable != NULL)
220 free(ip6flowtable, M_RTABLE);
221
222 ip6flowtable = new_table;
223 ip6_hashsize = table_size;
224
225 TAILQ_INIT(&ip6flowlist);
226 for (i = 0; i < ip6_hashsize; i++)
227 TAILQ_INIT(&ip6flowtable[i]);
228
229 return 0;
230 }
231
232 int
233 ip6flow_init(int table_size)
234 {
235 int ret, error;
236
237 error = workqueue_create(&ip6flow_slowtimo_wq, "ip6flow_slowtimo",
238 ip6flow_slowtimo_work, NULL, PRI_SOFTNET, IPL_SOFTNET, WQ_MPSAFE);
239 if (error != 0)
240 panic("%s: workqueue_create failed (%d)\n", __func__, error);
241
242 mutex_init(&ip6flow_lock, MUTEX_DEFAULT, IPL_NONE);
243
244 mutex_enter(&ip6flow_lock);
245 ret = ip6flow_init_locked(table_size);
246 mutex_exit(&ip6flow_lock);
247 ip6flow_sysctl_init(NULL);
248
249 return ret;
250 }
251
252 /*
253 * IPv6 Fast Forward routine. Attempt to forward the packet -
254 * if any problems are found return to the main IPv6 input
255 * routine to deal with.
256 */
257 int
258 ip6flow_fastforward(struct mbuf **mp)
259 {
260 struct ip6flow *ip6f;
261 struct ip6_hdr *ip6;
262 struct rtentry *rt;
263 struct mbuf *m;
264 const struct sockaddr *dst;
265 int error;
266 int ret = 0;
267
268 mutex_enter(&ip6flow_lock);
269
270 /*
271 * Are we forwarding packets and have flows?
272 */
273 if (!ip6_forwarding || ip6flow_inuse == 0)
274 goto out;
275
276 m = *mp;
277 /*
278 * At least size of IPv6 Header?
279 */
280 if (m->m_len < sizeof(struct ip6_hdr))
281 goto out;
282 /*
283 * Was packet received as a link-level multicast or broadcast?
284 * If so, don't try to fast forward.
285 */
286 if ((m->m_flags & (M_BCAST|M_MCAST)) != 0)
287 goto out;
288
289 if (IP6_HDR_ALIGNED_P(mtod(m, const void *)) == 0) {
290 if ((m = m_copyup(m, sizeof(struct ip6_hdr),
291 (max_linkhdr + 3) & ~3)) == NULL) {
292 goto out;
293 }
294 *mp = m;
295 } else if (__predict_false(m->m_len < sizeof(struct ip6_hdr))) {
296 if ((m = m_pullup(m, sizeof(struct ip6_hdr))) == NULL) {
297 goto out;
298 }
299 *mp = m;
300 }
301
302 ip6 = mtod(m, struct ip6_hdr *);
303
304 if ((ip6->ip6_vfc & IPV6_VERSION_MASK) != IPV6_VERSION) {
305 /* Bad version. */
306 goto out;
307 }
308
309 /*
310 * If we have a hop-by-hop extension we must process it.
311 * We just leave this up to ip6_input to deal with.
312 */
313 if (ip6->ip6_nxt == IPPROTO_HOPOPTS)
314 goto out;
315
316 /*
317 * Attempt to find a flow.
318 */
319 if ((ip6f = ip6flow_lookup(ip6)) == NULL) {
320 /* No flow found. */
321 goto out;
322 }
323
324 /*
325 * Route and interface still up?
326 */
327 if ((rt = rtcache_validate(&ip6f->ip6f_ro)) == NULL ||
328 (rt->rt_ifp->if_flags & IFF_UP) == 0 ||
329 (rt->rt_flags & RTF_BLACKHOLE) != 0)
330 goto out;
331
332 /*
333 * Packet size greater than MTU?
334 */
335 if (m->m_pkthdr.len > rt->rt_ifp->if_mtu) {
336 /* Return to main IPv6 input function. */
337 goto out;
338 }
339
340 /*
341 * Clear any in-bound checksum flags for this packet.
342 */
343 m->m_pkthdr.csum_flags = 0;
344
345 if (ip6->ip6_hlim <= IPV6_HLIMDEC)
346 goto out;
347
348 /* Decrement hop limit (same as TTL) */
349 ip6->ip6_hlim -= IPV6_HLIMDEC;
350
351 if (rt->rt_flags & RTF_GATEWAY)
352 dst = rt->rt_gateway;
353 else
354 dst = rtcache_getdst(&ip6f->ip6f_ro);
355
356 PRT_SLOW_ARM(ip6f->ip6f_timer, IP6FLOW_TIMER);
357
358 ip6f->ip6f_uses++;
359
360 #if 0
361 /*
362 * We use FIFO cache replacement instead of LRU the same ip_flow.c.
363 */
364 /* move to head (LRU) for ip6flowlist. ip6flowtable does not care LRU. */
365 TAILQ_REMOVE(&ip6flowlist, ip6f, ip6f_list);
366 TAILQ_INSERT_HEAD(&ip6flowlist, ip6f, ip6f_list);
367 #endif
368
369 /* Send on its way - straight to the interface output routine. */
370 if ((error = if_output_lock(rt->rt_ifp, rt->rt_ifp, m, dst, rt)) != 0) {
371 ip6f->ip6f_dropped++;
372 } else {
373 ip6f->ip6f_forwarded++;
374 }
375 ret = 1;
376 out:
377 mutex_exit(&ip6flow_lock);
378 return ret;
379 }
380
381 /*
382 * Add the IPv6 flow statistics to the main IPv6 statistics.
383 */
384 static void
385 ip6flow_addstats(const struct ip6flow *ip6f)
386 {
387 struct rtentry *rt;
388 uint64_t *ip6s;
389
390 if ((rt = rtcache_validate(&ip6f->ip6f_ro)) != NULL)
391 rt->rt_use += ip6f->ip6f_uses;
392 ip6s = IP6_STAT_GETREF();
393 ip6s[IP6_STAT_FASTFORWARDFLOWS] = ip6flow_inuse;
394 ip6s[IP6_STAT_CANTFORWARD] += ip6f->ip6f_dropped;
395 ip6s[IP6_STAT_ODROPPED] += ip6f->ip6f_dropped;
396 ip6s[IP6_STAT_TOTAL] += ip6f->ip6f_uses;
397 ip6s[IP6_STAT_FORWARD] += ip6f->ip6f_forwarded;
398 ip6s[IP6_STAT_FASTFORWARD] += ip6f->ip6f_forwarded;
399 IP6_STAT_PUTREF();
400 }
401
402 /*
403 * Add statistics and free the flow.
404 */
405 static void
406 ip6flow_free(struct ip6flow *ip6f)
407 {
408
409 KASSERT(mutex_owned(&ip6flow_lock));
410
411 /*
412 * Remove the flow from the hash table (at elevated IPL).
413 * Once it's off the list, we can deal with it at normal
414 * network IPL.
415 */
416 IP6FLOW_REMOVE(ip6f->ip6f_hashidx, ip6f);
417
418 ip6flow_inuse--;
419 ip6flow_addstats(ip6f);
420 rtcache_free(&ip6f->ip6f_ro);
421 pool_put(&ip6flow_pool, ip6f);
422 }
423
424 static struct ip6flow *
425 ip6flow_reap_locked(int just_one)
426 {
427 struct ip6flow *ip6f;
428
429 KASSERT(mutex_owned(&ip6flow_lock));
430
431 /*
432 * This case must remove one ip6flow. Furthermore, this case is used in
433 * fast path(packet processing path). So, simply remove TAILQ_LAST one.
434 */
435 if (just_one) {
436 ip6f = TAILQ_LAST(&ip6flowlist, ip6flowhead);
437 KASSERT(ip6f != NULL);
438
439 IP6FLOW_REMOVE(ip6f->ip6f_hashidx, ip6f);
440
441 ip6flow_addstats(ip6f);
442 rtcache_free(&ip6f->ip6f_ro);
443 return ip6f;
444 }
445
446 /*
447 * This case is used in slow path(sysctl).
448 * At first, remove invalid rtcache ip6flow, and then remove TAILQ_LAST
449 * ip6flow if it is ensured least recently used by comparing last_uses.
450 */
451 while (ip6flow_inuse > ip6_maxflows) {
452 struct ip6flow *maybe_ip6f = TAILQ_LAST(&ip6flowlist, ip6flowhead);
453
454 TAILQ_FOREACH(ip6f, &ip6flowlist, ip6f_list) {
455 /*
456 * If this no longer points to a valid route -
457 * reclaim it.
458 */
459 if (rtcache_validate(&ip6f->ip6f_ro) == NULL)
460 goto done;
461 /*
462 * choose the one that's been least recently
463 * used or has had the least uses in the
464 * last 1.5 intervals.
465 */
466 if (ip6f->ip6f_timer < maybe_ip6f->ip6f_timer
467 || ((ip6f->ip6f_timer == maybe_ip6f->ip6f_timer)
468 && (ip6f->ip6f_last_uses + ip6f->ip6f_uses
469 < maybe_ip6f->ip6f_last_uses + maybe_ip6f->ip6f_uses)))
470 maybe_ip6f = ip6f;
471 }
472 ip6f = maybe_ip6f;
473 done:
474 /*
475 * Remove the entry from the flow table
476 */
477 IP6FLOW_REMOVE(ip6f->ip6f_hashidx, ip6f);
478
479 rtcache_free(&ip6f->ip6f_ro);
480 ip6flow_inuse--;
481 ip6flow_addstats(ip6f);
482 pool_put(&ip6flow_pool, ip6f);
483 }
484 return NULL;
485 }
486
487 /*
488 * Reap one or more flows - ip6flow_reap may remove
489 * multiple flows if net.inet6.ip6.maxflows is reduced.
490 */
491 struct ip6flow *
492 ip6flow_reap(int just_one)
493 {
494 struct ip6flow *ip6f;
495
496 mutex_enter(&ip6flow_lock);
497 ip6f = ip6flow_reap_locked(just_one);
498 mutex_exit(&ip6flow_lock);
499 return ip6f;
500 }
501
502 static unsigned int ip6flow_work_enqueued = 0;
503
504 void
505 ip6flow_slowtimo_work(struct work *wk, void *arg)
506 {
507 struct ip6flow *ip6f, *next_ip6f;
508
509 /* We can allow enqueuing another work at this point */
510 atomic_swap_uint(&ip6flow_work_enqueued, 0);
511
512 #ifndef NET_MPSAFE
513 mutex_enter(softnet_lock);
514 KERNEL_LOCK(1, NULL);
515 #endif
516 mutex_enter(&ip6flow_lock);
517
518 for (ip6f = TAILQ_FIRST(&ip6flowlist); ip6f != NULL; ip6f = next_ip6f) {
519 next_ip6f = TAILQ_NEXT(ip6f, ip6f_list);
520 if (PRT_SLOW_ISEXPIRED(ip6f->ip6f_timer) ||
521 rtcache_validate(&ip6f->ip6f_ro) == NULL) {
522 ip6flow_free(ip6f);
523 } else {
524 ip6f->ip6f_last_uses = ip6f->ip6f_uses;
525 ip6flow_addstats(ip6f);
526 ip6f->ip6f_uses = 0;
527 ip6f->ip6f_dropped = 0;
528 ip6f->ip6f_forwarded = 0;
529 }
530 }
531
532 mutex_exit(&ip6flow_lock);
533 #ifndef NET_MPSAFE
534 KERNEL_UNLOCK_ONE(NULL);
535 mutex_exit(softnet_lock);
536 #endif
537 }
538
539 void
540 ip6flow_slowtimo(void)
541 {
542
543 /* Avoid enqueuing another work when one is already enqueued */
544 if (atomic_swap_uint(&ip6flow_work_enqueued, 1) == 1)
545 return;
546
547 workqueue_enqueue(ip6flow_slowtimo_wq, &ip6flow_slowtimo_wk, NULL);
548 }
549
550 /*
551 * We have successfully forwarded a packet using the normal
552 * IPv6 stack. Now create/update a flow.
553 */
554 void
555 ip6flow_create(const struct route *ro, struct mbuf *m)
556 {
557 const struct ip6_hdr *ip6;
558 struct ip6flow *ip6f;
559 size_t hash;
560
561 ip6 = mtod(m, const struct ip6_hdr *);
562
563 #ifndef NET_MPSAFE
564 KERNEL_LOCK(1, NULL);
565 #endif
566 mutex_enter(&ip6flow_lock);
567
568 /*
569 * If IPv6 Fast Forward is disabled, don't create a flow.
570 * It can be disabled by setting net.inet6.ip6.maxflows to 0.
571 *
572 * Don't create a flow for ICMPv6 messages.
573 */
574 if (ip6_maxflows == 0 || ip6->ip6_nxt == IPPROTO_IPV6_ICMP)
575 goto out;
576
577 /*
578 * See if an existing flow exists. If so:
579 * - Remove the flow
580 * - Add flow statistics
581 * - Free the route
582 * - Reset statistics
583 *
584 * If a flow doesn't exist allocate a new one if
585 * ip6_maxflows hasn't reached its limit. If it has
586 * been reached, reap some flows.
587 */
588 ip6f = ip6flow_lookup(ip6);
589 if (ip6f == NULL) {
590 if (ip6flow_inuse >= ip6_maxflows) {
591 ip6f = ip6flow_reap_locked(1);
592 } else {
593 ip6f = pool_get(&ip6flow_pool, PR_NOWAIT);
594 if (ip6f == NULL)
595 goto out;
596 ip6flow_inuse++;
597 }
598 memset(ip6f, 0, sizeof(*ip6f));
599 } else {
600 IP6FLOW_REMOVE(ip6f->ip6f_hashidx, ip6f);
601
602 ip6flow_addstats(ip6f);
603 rtcache_free(&ip6f->ip6f_ro);
604 ip6f->ip6f_uses = 0;
605 ip6f->ip6f_last_uses = 0;
606 ip6f->ip6f_dropped = 0;
607 ip6f->ip6f_forwarded = 0;
608 }
609
610 /*
611 * Fill in the updated/new details.
612 */
613 rtcache_copy(&ip6f->ip6f_ro, ro);
614 ip6f->ip6f_dst = ip6->ip6_dst;
615 ip6f->ip6f_src = ip6->ip6_src;
616 ip6f->ip6f_flow = ip6->ip6_flow;
617 PRT_SLOW_ARM(ip6f->ip6f_timer, IP6FLOW_TIMER);
618
619 /*
620 * Insert into the approriate bucket of the flow table.
621 */
622 hash = ip6flow_hash(ip6);
623 IP6FLOW_INSERT(hash, ip6f);
624
625 out:
626 mutex_exit(&ip6flow_lock);
627 #ifndef NET_MPSAFE
628 KERNEL_UNLOCK_ONE(NULL);
629 #endif
630 }
631
632 /*
633 * Invalidate/remove all flows - if new_size is positive we
634 * resize the hash table.
635 */
636 int
637 ip6flow_invalidate_all(int new_size)
638 {
639 struct ip6flow *ip6f, *next_ip6f;
640 int error;
641
642 error = 0;
643
644 mutex_enter(&ip6flow_lock);
645
646 for (ip6f = TAILQ_FIRST(&ip6flowlist); ip6f != NULL; ip6f = next_ip6f) {
647 next_ip6f = TAILQ_NEXT(ip6f, ip6f_list);
648 ip6flow_free(ip6f);
649 }
650
651 if (new_size)
652 error = ip6flow_init_locked(new_size);
653
654 mutex_exit(&ip6flow_lock);
655
656 return error;
657 }
658
659 /*
660 * sysctl helper routine for net.inet.ip6.maxflows. Since
661 * we could reduce this value, call ip6flow_reap();
662 */
663 static int
664 sysctl_net_inet6_ip6_maxflows(SYSCTLFN_ARGS)
665 {
666 int error;
667
668 error = sysctl_lookup(SYSCTLFN_CALL(rnode));
669 if (error || newp == NULL)
670 return (error);
671
672 #ifndef NET_MPSAFE
673 mutex_enter(softnet_lock);
674 KERNEL_LOCK(1, NULL);
675 #endif
676
677 ip6flow_reap(0);
678
679 #ifndef NET_MPSAFE
680 KERNEL_UNLOCK_ONE(NULL);
681 mutex_exit(softnet_lock);
682 #endif
683
684 return (0);
685 }
686
687 static int
688 sysctl_net_inet6_ip6_hashsize(SYSCTLFN_ARGS)
689 {
690 int error, tmp;
691 struct sysctlnode node;
692
693 node = *rnode;
694 tmp = ip6_hashsize;
695 node.sysctl_data = &tmp;
696 error = sysctl_lookup(SYSCTLFN_CALL(&node));
697 if (error || newp == NULL)
698 return (error);
699
700 if ((tmp & (tmp - 1)) == 0 && tmp != 0) {
701 /*
702 * Can only fail due to malloc()
703 */
704 #ifndef NET_MPSAFE
705 mutex_enter(softnet_lock);
706 KERNEL_LOCK(1, NULL);
707 #endif
708 error = ip6flow_invalidate_all(tmp);
709 #ifndef NET_MPSAFE
710 KERNEL_UNLOCK_ONE(NULL);
711 mutex_exit(softnet_lock);
712 #endif
713 } else {
714 /*
715 * EINVAL if not a power of 2
716 */
717 error = EINVAL;
718 }
719
720 return error;
721 }
722
723 static void
724 ip6flow_sysctl_init(struct sysctllog **clog)
725 {
726
727 sysctl_createv(clog, 0, NULL, NULL,
728 CTLFLAG_PERMANENT,
729 CTLTYPE_NODE, "inet6",
730 SYSCTL_DESCR("PF_INET6 related settings"),
731 NULL, 0, NULL, 0,
732 CTL_NET, PF_INET6, CTL_EOL);
733 sysctl_createv(clog, 0, NULL, NULL,
734 CTLFLAG_PERMANENT,
735 CTLTYPE_NODE, "ip6",
736 SYSCTL_DESCR("IPv6 related settings"),
737 NULL, 0, NULL, 0,
738 CTL_NET, PF_INET6, IPPROTO_IPV6, CTL_EOL);
739
740 sysctl_createv(clog, 0, NULL, NULL,
741 CTLFLAG_PERMANENT|CTLFLAG_READWRITE,
742 CTLTYPE_INT, "maxflows",
743 SYSCTL_DESCR("Number of flows for fast forwarding (IPv6)"),
744 sysctl_net_inet6_ip6_maxflows, 0, &ip6_maxflows, 0,
745 CTL_NET, PF_INET6, IPPROTO_IPV6,
746 CTL_CREATE, CTL_EOL);
747 sysctl_createv(clog, 0, NULL, NULL,
748 CTLFLAG_PERMANENT|CTLFLAG_READWRITE,
749 CTLTYPE_INT, "hashsize",
750 SYSCTL_DESCR("Size of hash table for fast forwarding (IPv6)"),
751 sysctl_net_inet6_ip6_hashsize, 0, &ip6_hashsize, 0,
752 CTL_NET, PF_INET6, IPPROTO_IPV6,
753 CTL_CREATE, CTL_EOL);
754 }
755