ip_mroute.c revision 1.160 1 /* $NetBSD: ip_mroute.c,v 1.160 2018/06/21 10:37:50 knakahara Exp $ */
2
3 /*
4 * Copyright (c) 1992, 1993
5 * The Regents of the University of California. All rights reserved.
6 *
7 * This code is derived from software contributed to Berkeley by
8 * Stephen Deering of Stanford University.
9 *
10 * Redistribution and use in source and binary forms, with or without
11 * modification, are permitted provided that the following conditions
12 * are met:
13 * 1. Redistributions of source code must retain the above copyright
14 * notice, this list of conditions and the following disclaimer.
15 * 2. Redistributions in binary form must reproduce the above copyright
16 * notice, this list of conditions and the following disclaimer in the
17 * documentation and/or other materials provided with the distribution.
18 * 3. Neither the name of the University nor the names of its contributors
19 * may be used to endorse or promote products derived from this software
20 * without specific prior written permission.
21 *
22 * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND
23 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
24 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
25 * ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE
26 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
27 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
28 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
29 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
30 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
31 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
32 * SUCH DAMAGE.
33 *
34 * @(#)ip_mroute.c 8.2 (Berkeley) 11/15/93
35 */
36
37 /*
38 * Copyright (c) 1989 Stephen Deering
39 *
40 * This code is derived from software contributed to Berkeley by
41 * Stephen Deering of Stanford University.
42 *
43 * Redistribution and use in source and binary forms, with or without
44 * modification, are permitted provided that the following conditions
45 * are met:
46 * 1. Redistributions of source code must retain the above copyright
47 * notice, this list of conditions and the following disclaimer.
48 * 2. Redistributions in binary form must reproduce the above copyright
49 * notice, this list of conditions and the following disclaimer in the
50 * documentation and/or other materials provided with the distribution.
51 * 3. All advertising materials mentioning features or use of this software
52 * must display the following acknowledgement:
53 * This product includes software developed by the University of
54 * California, Berkeley and its contributors.
55 * 4. Neither the name of the University nor the names of its contributors
56 * may be used to endorse or promote products derived from this software
57 * without specific prior written permission.
58 *
59 * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND
60 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
61 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
62 * ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE
63 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
64 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
65 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
66 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
67 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
68 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
69 * SUCH DAMAGE.
70 *
71 * @(#)ip_mroute.c 8.2 (Berkeley) 11/15/93
72 */
73
74 /*
75 * IP multicast forwarding procedures
76 *
77 * Written by David Waitzman, BBN Labs, August 1988.
78 * Modified by Steve Deering, Stanford, February 1989.
79 * Modified by Mark J. Steiglitz, Stanford, May, 1991
80 * Modified by Van Jacobson, LBL, January 1993
81 * Modified by Ajit Thyagarajan, PARC, August 1993
82 * Modified by Bill Fenner, PARC, April 1994
83 * Modified by Charles M. Hannum, NetBSD, May 1995.
84 * Modified by Ahmed Helmy, SGI, June 1996
85 * Modified by George Edmond Eddy (Rusty), ISI, February 1998
86 * Modified by Pavlin Radoslavov, USC/ISI, May 1998, August 1999, October 2000
87 * Modified by Hitoshi Asaeda, WIDE, August 2000
88 * Modified by Pavlin Radoslavov, ICSI, October 2002
89 *
90 * MROUTING Revision: 1.2
91 * and PIM-SMv2 and PIM-DM support, advanced API support,
92 * bandwidth metering and signaling
93 */
94
95 #include <sys/cdefs.h>
96 __KERNEL_RCSID(0, "$NetBSD: ip_mroute.c,v 1.160 2018/06/21 10:37:50 knakahara Exp $");
97
98 #ifdef _KERNEL_OPT
99 #include "opt_inet.h"
100 #include "opt_ipsec.h"
101 #include "opt_pim.h"
102 #endif
103
104 #ifdef PIM
105 #define _PIM_VT 1
106 #endif
107
108 #include <sys/param.h>
109 #include <sys/systm.h>
110 #include <sys/callout.h>
111 #include <sys/mbuf.h>
112 #include <sys/socket.h>
113 #include <sys/socketvar.h>
114 #include <sys/errno.h>
115 #include <sys/time.h>
116 #include <sys/kernel.h>
117 #include <sys/kmem.h>
118 #include <sys/ioctl.h>
119 #include <sys/syslog.h>
120
121 #include <net/if.h>
122 #include <net/raw_cb.h>
123
124 #include <netinet/in.h>
125 #include <netinet/in_var.h>
126 #include <netinet/in_systm.h>
127 #include <netinet/ip.h>
128 #include <netinet/ip_var.h>
129 #include <netinet/in_pcb.h>
130 #include <netinet/udp.h>
131 #include <netinet/igmp.h>
132 #include <netinet/igmp_var.h>
133 #include <netinet/ip_mroute.h>
134 #ifdef PIM
135 #include <netinet/pim.h>
136 #include <netinet/pim_var.h>
137 #endif
138 #include <netinet/ip_encap.h>
139
140 #ifdef IPSEC
141 #include <netipsec/ipsec.h>
142 #include <netipsec/key.h>
143 #endif
144
145 #define IP_MULTICASTOPTS 0
146 #define M_PULLUP(m, len) \
147 do { \
148 if ((m) && ((m)->m_flags & M_EXT || (m)->m_len < (len))) \
149 (m) = m_pullup((m), (len)); \
150 } while (/*CONSTCOND*/ 0)
151
152 /*
153 * Globals. All but ip_mrouter and ip_mrtproto could be static,
154 * except for netstat or debugging purposes.
155 */
156 struct socket *ip_mrouter = NULL;
157 int ip_mrtproto = IGMP_DVMRP; /* for netstat only */
158
159 #define MFCHASH(a, g) \
160 ((((a).s_addr >> 20) ^ ((a).s_addr >> 10) ^ (a).s_addr ^ \
161 ((g).s_addr >> 20) ^ ((g).s_addr >> 10) ^ (g).s_addr) & mfchash)
162 LIST_HEAD(mfchashhdr, mfc) *mfchashtbl;
163 u_long mfchash;
164
165 u_char nexpire[MFCTBLSIZ];
166 struct vif viftable[MAXVIFS];
167 struct mrtstat mrtstat;
168 u_int mrtdebug = 0; /* debug level */
169 #define DEBUG_MFC 0x02
170 #define DEBUG_FORWARD 0x04
171 #define DEBUG_EXPIRE 0x08
172 #define DEBUG_XMIT 0x10
173 #define DEBUG_PIM 0x20
174
175 #define VIFI_INVALID ((vifi_t) -1)
176
177 u_int tbfdebug = 0; /* tbf debug level */
178
179 /* vif attachment using sys/netinet/ip_encap.c */
180 static void vif_input(struct mbuf *, int, int, void *);
181 static int vif_encapcheck(struct mbuf *, int, int, void *);
182
183 static const struct encapsw vif_encapsw = {
184 .encapsw4 = {
185 .pr_input = vif_input,
186 .pr_ctlinput = NULL,
187 }
188 };
189
190 #define EXPIRE_TIMEOUT (hz / 4) /* 4x / second */
191 #define UPCALL_EXPIRE 6 /* number of timeouts */
192
193 /*
194 * Define the token bucket filter structures
195 */
196
197 #define TBF_REPROCESS (hz / 100) /* 100x / second */
198
199 static int get_sg_cnt(struct sioc_sg_req *);
200 static int get_vif_cnt(struct sioc_vif_req *);
201 static int ip_mrouter_init(struct socket *, int);
202 static int set_assert(int);
203 static int add_vif(struct vifctl *);
204 static int del_vif(vifi_t *);
205 static void update_mfc_params(struct mfc *, struct mfcctl2 *);
206 static void init_mfc_params(struct mfc *, struct mfcctl2 *);
207 static void expire_mfc(struct mfc *);
208 static int add_mfc(struct sockopt *);
209 #ifdef UPCALL_TIMING
210 static void collate(struct timeval *);
211 #endif
212 static int del_mfc(struct sockopt *);
213 static int set_api_config(struct sockopt *); /* chose API capabilities */
214 static int socket_send(struct socket *, struct mbuf *, struct sockaddr_in *);
215 static void expire_upcalls(void *);
216 static int ip_mdq(struct mbuf *, struct ifnet *, struct mfc *);
217 static void phyint_send(struct ip *, struct vif *, struct mbuf *);
218 static void encap_send(struct ip *, struct vif *, struct mbuf *);
219 static void tbf_control(struct vif *, struct mbuf *, struct ip *, u_int32_t);
220 static void tbf_queue(struct vif *, struct mbuf *);
221 static void tbf_process_q(struct vif *);
222 static void tbf_reprocess_q(void *);
223 static int tbf_dq_sel(struct vif *, struct ip *);
224 static void tbf_send_packet(struct vif *, struct mbuf *);
225 static void tbf_update_tokens(struct vif *);
226 static int priority(struct vif *, struct ip *);
227
228 /*
229 * Bandwidth monitoring
230 */
231 static void free_bw_list(struct bw_meter *);
232 static int add_bw_upcall(struct bw_upcall *);
233 static int del_bw_upcall(struct bw_upcall *);
234 static void bw_meter_receive_packet(struct bw_meter *, int , struct timeval *);
235 static void bw_meter_prepare_upcall(struct bw_meter *, struct timeval *);
236 static void bw_upcalls_send(void);
237 static void schedule_bw_meter(struct bw_meter *, struct timeval *);
238 static void unschedule_bw_meter(struct bw_meter *);
239 static void bw_meter_process(void);
240 static void expire_bw_upcalls_send(void *);
241 static void expire_bw_meter_process(void *);
242
243 #ifdef PIM
244 static int pim_register_send(struct ip *, struct vif *,
245 struct mbuf *, struct mfc *);
246 static int pim_register_send_rp(struct ip *, struct vif *,
247 struct mbuf *, struct mfc *);
248 static int pim_register_send_upcall(struct ip *, struct vif *,
249 struct mbuf *, struct mfc *);
250 static struct mbuf *pim_register_prepare(struct ip *, struct mbuf *);
251 #endif
252
253 #define ENCAP_TTL 64
254 #define ENCAP_PROTO IPPROTO_IPIP
255
256 /* prototype IP hdr for encapsulated packets */
257 static const struct ip multicast_encap_iphdr = {
258 .ip_hl = sizeof(struct ip) >> 2,
259 .ip_v = IPVERSION,
260 .ip_len = sizeof(struct ip),
261 .ip_ttl = ENCAP_TTL,
262 .ip_p = ENCAP_PROTO,
263 };
264
265 /*
266 * Bandwidth meter variables and constants
267 */
268
269 /*
270 * Pending timeouts are stored in a hash table, the key being the
271 * expiration time. Periodically, the entries are analysed and processed.
272 */
273 #define BW_METER_BUCKETS 1024
274 static struct bw_meter *bw_meter_timers[BW_METER_BUCKETS];
275 struct callout bw_meter_ch;
276 #define BW_METER_PERIOD (hz) /* periodical handling of bw meters */
277
278 /*
279 * Pending upcalls are stored in a vector which is flushed when
280 * full, or periodically
281 */
282 static struct bw_upcall bw_upcalls[BW_UPCALLS_MAX];
283 static u_int bw_upcalls_n; /* # of pending upcalls */
284 struct callout bw_upcalls_ch;
285 #define BW_UPCALLS_PERIOD (hz) /* periodical flush of bw upcalls */
286
287 #ifdef PIM
288 struct pimstat pimstat;
289
290 /*
291 * Note: the PIM Register encapsulation adds the following in front of a
292 * data packet:
293 *
294 * struct pim_encap_hdr {
295 * struct ip ip;
296 * struct pim_encap_pimhdr pim;
297 * }
298 */
299
300 struct pim_encap_pimhdr {
301 struct pim pim;
302 uint32_t flags;
303 };
304
305 static struct ip pim_encap_iphdr = {
306 .ip_v = IPVERSION,
307 .ip_hl = sizeof(struct ip) >> 2,
308 .ip_len = sizeof(struct ip),
309 .ip_ttl = ENCAP_TTL,
310 .ip_p = IPPROTO_PIM,
311 };
312
313 static struct pim_encap_pimhdr pim_encap_pimhdr = {
314 {
315 PIM_MAKE_VT(PIM_VERSION, PIM_REGISTER), /* PIM vers and message type */
316 0, /* reserved */
317 0, /* checksum */
318 },
319 0 /* flags */
320 };
321
322 static struct ifnet multicast_register_if;
323 static vifi_t reg_vif_num = VIFI_INVALID;
324 #endif /* PIM */
325
326
327 /*
328 * Private variables.
329 */
330 static vifi_t numvifs = 0;
331
332 static struct callout expire_upcalls_ch;
333
334 /*
335 * whether or not special PIM assert processing is enabled.
336 */
337 static int pim_assert;
338 /*
339 * Rate limit for assert notification messages, in usec
340 */
341 #define ASSERT_MSG_TIME 3000000
342
343 /*
344 * Kernel multicast routing API capabilities and setup.
345 * If more API capabilities are added to the kernel, they should be
346 * recorded in `mrt_api_support'.
347 */
348 static const u_int32_t mrt_api_support = (MRT_MFC_FLAGS_DISABLE_WRONGVIF |
349 MRT_MFC_FLAGS_BORDER_VIF |
350 MRT_MFC_RP |
351 MRT_MFC_BW_UPCALL);
352 static u_int32_t mrt_api_config = 0;
353
354 /*
355 * Find a route for a given origin IP address and Multicast group address
356 * Type of service parameter to be added in the future!!!
357 * Statistics are updated by the caller if needed
358 * (mrtstat.mrts_mfc_lookups and mrtstat.mrts_mfc_misses)
359 */
360 static struct mfc *
361 mfc_find(struct in_addr *o, struct in_addr *g)
362 {
363 struct mfc *rt;
364
365 LIST_FOREACH(rt, &mfchashtbl[MFCHASH(*o, *g)], mfc_hash) {
366 if (in_hosteq(rt->mfc_origin, *o) &&
367 in_hosteq(rt->mfc_mcastgrp, *g) &&
368 (rt->mfc_stall == NULL))
369 break;
370 }
371
372 return rt;
373 }
374
375 /*
376 * Macros to compute elapsed time efficiently
377 * Borrowed from Van Jacobson's scheduling code
378 */
379 #define TV_DELTA(a, b, delta) do { \
380 int xxs; \
381 delta = (a).tv_usec - (b).tv_usec; \
382 xxs = (a).tv_sec - (b).tv_sec; \
383 switch (xxs) { \
384 case 2: \
385 delta += 1000000; \
386 /* fall through */ \
387 case 1: \
388 delta += 1000000; \
389 /* fall through */ \
390 case 0: \
391 break; \
392 default: \
393 delta += (1000000 * xxs); \
394 break; \
395 } \
396 } while (/*CONSTCOND*/ 0)
397
398 #ifdef UPCALL_TIMING
399 u_int32_t upcall_data[51];
400 #endif /* UPCALL_TIMING */
401
402 /*
403 * Handle MRT setsockopt commands to modify the multicast routing tables.
404 */
405 int
406 ip_mrouter_set(struct socket *so, struct sockopt *sopt)
407 {
408 int error;
409 int optval;
410 struct vifctl vifc;
411 vifi_t vifi;
412 struct bw_upcall bwuc;
413
414 if (sopt->sopt_name != MRT_INIT && so != ip_mrouter)
415 error = ENOPROTOOPT;
416 else {
417 switch (sopt->sopt_name) {
418 case MRT_INIT:
419 error = sockopt_getint(sopt, &optval);
420 if (error)
421 break;
422
423 error = ip_mrouter_init(so, optval);
424 break;
425 case MRT_DONE:
426 error = ip_mrouter_done();
427 break;
428 case MRT_ADD_VIF:
429 error = sockopt_get(sopt, &vifc, sizeof(vifc));
430 if (error)
431 break;
432 error = add_vif(&vifc);
433 break;
434 case MRT_DEL_VIF:
435 error = sockopt_get(sopt, &vifi, sizeof(vifi));
436 if (error)
437 break;
438 error = del_vif(&vifi);
439 break;
440 case MRT_ADD_MFC:
441 error = add_mfc(sopt);
442 break;
443 case MRT_DEL_MFC:
444 error = del_mfc(sopt);
445 break;
446 case MRT_ASSERT:
447 error = sockopt_getint(sopt, &optval);
448 if (error)
449 break;
450 error = set_assert(optval);
451 break;
452 case MRT_API_CONFIG:
453 error = set_api_config(sopt);
454 break;
455 case MRT_ADD_BW_UPCALL:
456 error = sockopt_get(sopt, &bwuc, sizeof(bwuc));
457 if (error)
458 break;
459 error = add_bw_upcall(&bwuc);
460 break;
461 case MRT_DEL_BW_UPCALL:
462 error = sockopt_get(sopt, &bwuc, sizeof(bwuc));
463 if (error)
464 break;
465 error = del_bw_upcall(&bwuc);
466 break;
467 default:
468 error = ENOPROTOOPT;
469 break;
470 }
471 }
472 return error;
473 }
474
475 /*
476 * Handle MRT getsockopt commands
477 */
478 int
479 ip_mrouter_get(struct socket *so, struct sockopt *sopt)
480 {
481 int error;
482
483 if (so != ip_mrouter)
484 error = ENOPROTOOPT;
485 else {
486 switch (sopt->sopt_name) {
487 case MRT_VERSION:
488 error = sockopt_setint(sopt, 0x0305); /* XXX !!!! */
489 break;
490 case MRT_ASSERT:
491 error = sockopt_setint(sopt, pim_assert);
492 break;
493 case MRT_API_SUPPORT:
494 error = sockopt_set(sopt, &mrt_api_support,
495 sizeof(mrt_api_support));
496 break;
497 case MRT_API_CONFIG:
498 error = sockopt_set(sopt, &mrt_api_config,
499 sizeof(mrt_api_config));
500 break;
501 default:
502 error = ENOPROTOOPT;
503 break;
504 }
505 }
506 return error;
507 }
508
509 /*
510 * Handle ioctl commands to obtain information from the cache
511 */
512 int
513 mrt_ioctl(struct socket *so, u_long cmd, void *data)
514 {
515 int error;
516
517 if (so != ip_mrouter)
518 error = EINVAL;
519 else
520 switch (cmd) {
521 case SIOCGETVIFCNT:
522 error = get_vif_cnt((struct sioc_vif_req *)data);
523 break;
524 case SIOCGETSGCNT:
525 error = get_sg_cnt((struct sioc_sg_req *)data);
526 break;
527 default:
528 error = EINVAL;
529 break;
530 }
531
532 return error;
533 }
534
535 /*
536 * returns the packet, byte, rpf-failure count for the source group provided
537 */
538 static int
539 get_sg_cnt(struct sioc_sg_req *req)
540 {
541 int s;
542 struct mfc *rt;
543
544 s = splsoftnet();
545 rt = mfc_find(&req->src, &req->grp);
546 if (rt == NULL) {
547 splx(s);
548 req->pktcnt = req->bytecnt = req->wrong_if = 0xffffffff;
549 return EADDRNOTAVAIL;
550 }
551 req->pktcnt = rt->mfc_pkt_cnt;
552 req->bytecnt = rt->mfc_byte_cnt;
553 req->wrong_if = rt->mfc_wrong_if;
554 splx(s);
555
556 return 0;
557 }
558
559 /*
560 * returns the input and output packet and byte counts on the vif provided
561 */
562 static int
563 get_vif_cnt(struct sioc_vif_req *req)
564 {
565 vifi_t vifi = req->vifi;
566
567 if (vifi >= numvifs)
568 return EINVAL;
569
570 req->icount = viftable[vifi].v_pkt_in;
571 req->ocount = viftable[vifi].v_pkt_out;
572 req->ibytes = viftable[vifi].v_bytes_in;
573 req->obytes = viftable[vifi].v_bytes_out;
574
575 return 0;
576 }
577
578 /*
579 * Enable multicast routing
580 */
581 static int
582 ip_mrouter_init(struct socket *so, int v)
583 {
584 if (mrtdebug)
585 log(LOG_DEBUG,
586 "ip_mrouter_init: so_type = %d, pr_protocol = %d\n",
587 so->so_type, so->so_proto->pr_protocol);
588
589 if (so->so_type != SOCK_RAW ||
590 so->so_proto->pr_protocol != IPPROTO_IGMP)
591 return EOPNOTSUPP;
592
593 if (v != 1)
594 return EINVAL;
595
596 if (ip_mrouter != NULL)
597 return EADDRINUSE;
598
599 ip_mrouter = so;
600
601 mfchashtbl = hashinit(MFCTBLSIZ, HASH_LIST, true, &mfchash);
602 memset((void *)nexpire, 0, sizeof(nexpire));
603
604 pim_assert = 0;
605
606 callout_init(&expire_upcalls_ch, 0);
607 callout_reset(&expire_upcalls_ch, EXPIRE_TIMEOUT,
608 expire_upcalls, NULL);
609
610 callout_init(&bw_upcalls_ch, 0);
611 callout_reset(&bw_upcalls_ch, BW_UPCALLS_PERIOD,
612 expire_bw_upcalls_send, NULL);
613
614 callout_init(&bw_meter_ch, 0);
615 callout_reset(&bw_meter_ch, BW_METER_PERIOD,
616 expire_bw_meter_process, NULL);
617
618 if (mrtdebug)
619 log(LOG_DEBUG, "ip_mrouter_init\n");
620
621 return 0;
622 }
623
624 /*
625 * Disable multicast routing
626 */
627 int
628 ip_mrouter_done(void)
629 {
630 vifi_t vifi;
631 struct vif *vifp;
632 int i;
633 int s;
634
635 s = splsoftnet();
636
637 /* Clear out all the vifs currently in use. */
638 for (vifi = 0; vifi < numvifs; vifi++) {
639 vifp = &viftable[vifi];
640 if (!in_nullhost(vifp->v_lcl_addr))
641 reset_vif(vifp);
642 }
643
644 numvifs = 0;
645 pim_assert = 0;
646 mrt_api_config = 0;
647
648 callout_stop(&expire_upcalls_ch);
649 callout_stop(&bw_upcalls_ch);
650 callout_stop(&bw_meter_ch);
651
652 /*
653 * Free all multicast forwarding cache entries.
654 */
655 for (i = 0; i < MFCTBLSIZ; i++) {
656 struct mfc *rt, *nrt;
657
658 for (rt = LIST_FIRST(&mfchashtbl[i]); rt; rt = nrt) {
659 nrt = LIST_NEXT(rt, mfc_hash);
660
661 expire_mfc(rt);
662 }
663 }
664
665 memset((void *)nexpire, 0, sizeof(nexpire));
666 hashdone(mfchashtbl, HASH_LIST, mfchash);
667 mfchashtbl = NULL;
668
669 bw_upcalls_n = 0;
670 memset(bw_meter_timers, 0, sizeof(bw_meter_timers));
671
672 /* Reset de-encapsulation cache. */
673
674 ip_mrouter = NULL;
675
676 splx(s);
677
678 if (mrtdebug)
679 log(LOG_DEBUG, "ip_mrouter_done\n");
680
681 return 0;
682 }
683
684 void
685 ip_mrouter_detach(struct ifnet *ifp)
686 {
687 int vifi, i;
688 struct vif *vifp;
689 struct mfc *rt;
690 struct rtdetq *rte;
691
692 /* XXX not sure about side effect to userland routing daemon */
693 for (vifi = 0; vifi < numvifs; vifi++) {
694 vifp = &viftable[vifi];
695 if (vifp->v_ifp == ifp)
696 reset_vif(vifp);
697 }
698 for (i = 0; i < MFCTBLSIZ; i++) {
699 if (nexpire[i] == 0)
700 continue;
701 LIST_FOREACH(rt, &mfchashtbl[i], mfc_hash) {
702 for (rte = rt->mfc_stall; rte; rte = rte->next) {
703 if (rte->ifp == ifp)
704 rte->ifp = NULL;
705 }
706 }
707 }
708 }
709
710 /*
711 * Set PIM assert processing global
712 */
713 static int
714 set_assert(int i)
715 {
716 pim_assert = !!i;
717 return 0;
718 }
719
720 /*
721 * Configure API capabilities
722 */
723 static int
724 set_api_config(struct sockopt *sopt)
725 {
726 u_int32_t apival;
727 int i, error;
728
729 /*
730 * We can set the API capabilities only if it is the first operation
731 * after MRT_INIT. I.e.:
732 * - there are no vifs installed
733 * - pim_assert is not enabled
734 * - the MFC table is empty
735 */
736 error = sockopt_get(sopt, &apival, sizeof(apival));
737 if (error)
738 return error;
739 if (numvifs > 0)
740 return EPERM;
741 if (pim_assert)
742 return EPERM;
743 for (i = 0; i < MFCTBLSIZ; i++) {
744 if (LIST_FIRST(&mfchashtbl[i]) != NULL)
745 return EPERM;
746 }
747
748 mrt_api_config = apival & mrt_api_support;
749 return 0;
750 }
751
752 /*
753 * Add a vif to the vif table
754 */
755 static int
756 add_vif(struct vifctl *vifcp)
757 {
758 struct vif *vifp;
759 struct ifnet *ifp;
760 int error, s;
761 struct sockaddr_in sin;
762
763 if (vifcp->vifc_vifi >= MAXVIFS)
764 return EINVAL;
765 if (in_nullhost(vifcp->vifc_lcl_addr))
766 return EADDRNOTAVAIL;
767
768 vifp = &viftable[vifcp->vifc_vifi];
769 if (!in_nullhost(vifp->v_lcl_addr))
770 return EADDRINUSE;
771
772 /* Find the interface with an address in AF_INET family. */
773 #ifdef PIM
774 if (vifcp->vifc_flags & VIFF_REGISTER) {
775 /*
776 * XXX: Because VIFF_REGISTER does not really need a valid
777 * local interface (e.g. it could be 127.0.0.2), we don't
778 * check its address.
779 */
780 ifp = NULL;
781 } else
782 #endif
783 {
784 struct ifaddr *ifa;
785
786 sockaddr_in_init(&sin, &vifcp->vifc_lcl_addr, 0);
787 s = pserialize_read_enter();
788 ifa = ifa_ifwithaddr(sintosa(&sin));
789 if (ifa == NULL) {
790 pserialize_read_exit(s);
791 return EADDRNOTAVAIL;
792 }
793 ifp = ifa->ifa_ifp;
794 /* FIXME NOMPSAFE */
795 pserialize_read_exit(s);
796 }
797
798 if (vifcp->vifc_flags & VIFF_TUNNEL) {
799 if (vifcp->vifc_flags & VIFF_SRCRT) {
800 log(LOG_ERR, "source routed tunnels not supported\n");
801 return EOPNOTSUPP;
802 }
803
804 /* attach this vif to decapsulator dispatch table */
805 /*
806 * XXX Use addresses in registration so that matching
807 * can be done with radix tree in decapsulator. But,
808 * we need to check inner header for multicast, so
809 * this requires both radix tree lookup and then a
810 * function to check, and this is not supported yet.
811 */
812 error = encap_lock_enter();
813 if (error)
814 return error;
815 vifp->v_encap_cookie = encap_attach_func(AF_INET, IPPROTO_IPV4,
816 vif_encapcheck, &vif_encapsw, vifp);
817 encap_lock_exit();
818 if (!vifp->v_encap_cookie)
819 return EINVAL;
820
821 /* Create a fake encapsulation interface. */
822 ifp = malloc(sizeof(*ifp), M_MRTABLE, M_WAITOK|M_ZERO);
823 snprintf(ifp->if_xname, sizeof(ifp->if_xname),
824 "mdecap%d", vifcp->vifc_vifi);
825
826 /* Prepare cached route entry. */
827 memset(&vifp->v_route, 0, sizeof(vifp->v_route));
828 #ifdef PIM
829 } else if (vifcp->vifc_flags & VIFF_REGISTER) {
830 ifp = &multicast_register_if;
831 if (mrtdebug)
832 log(LOG_DEBUG, "Adding a register vif, ifp: %p\n",
833 (void *)ifp);
834 if (reg_vif_num == VIFI_INVALID) {
835 memset(ifp, 0, sizeof(*ifp));
836 snprintf(ifp->if_xname, sizeof(ifp->if_xname),
837 "register_vif");
838 ifp->if_flags = IFF_LOOPBACK;
839 memset(&vifp->v_route, 0, sizeof(vifp->v_route));
840 reg_vif_num = vifcp->vifc_vifi;
841 }
842 #endif
843 } else {
844 /* Make sure the interface supports multicast. */
845 if ((ifp->if_flags & IFF_MULTICAST) == 0)
846 return EOPNOTSUPP;
847
848 /* Enable promiscuous reception of all IP multicasts. */
849 sockaddr_in_init(&sin, &zeroin_addr, 0);
850 error = if_mcast_op(ifp, SIOCADDMULTI, sintosa(&sin));
851 if (error)
852 return error;
853 }
854
855 s = splsoftnet();
856
857 /* Define parameters for the tbf structure. */
858 vifp->tbf_q = NULL;
859 vifp->tbf_t = &vifp->tbf_q;
860 microtime(&vifp->tbf_last_pkt_t);
861 vifp->tbf_n_tok = 0;
862 vifp->tbf_q_len = 0;
863 vifp->tbf_max_q_len = MAXQSIZE;
864
865 vifp->v_flags = vifcp->vifc_flags;
866 vifp->v_threshold = vifcp->vifc_threshold;
867 /* scaling up here allows division by 1024 in critical code */
868 vifp->v_rate_limit = vifcp->vifc_rate_limit * 1024 / 1000;
869 vifp->v_lcl_addr = vifcp->vifc_lcl_addr;
870 vifp->v_rmt_addr = vifcp->vifc_rmt_addr;
871 vifp->v_ifp = ifp;
872 /* Initialize per vif pkt counters. */
873 vifp->v_pkt_in = 0;
874 vifp->v_pkt_out = 0;
875 vifp->v_bytes_in = 0;
876 vifp->v_bytes_out = 0;
877
878 callout_init(&vifp->v_repq_ch, 0);
879
880 splx(s);
881
882 /* Adjust numvifs up if the vifi is higher than numvifs. */
883 if (numvifs <= vifcp->vifc_vifi)
884 numvifs = vifcp->vifc_vifi + 1;
885
886 if (mrtdebug)
887 log(LOG_DEBUG, "add_vif #%d, lcladdr %x, %s %x, thresh %x, rate %d\n",
888 vifcp->vifc_vifi,
889 ntohl(vifcp->vifc_lcl_addr.s_addr),
890 (vifcp->vifc_flags & VIFF_TUNNEL) ? "rmtaddr" : "mask",
891 ntohl(vifcp->vifc_rmt_addr.s_addr),
892 vifcp->vifc_threshold,
893 vifcp->vifc_rate_limit);
894
895 return 0;
896 }
897
898 void
899 reset_vif(struct vif *vifp)
900 {
901 struct mbuf *m, *n;
902 struct ifnet *ifp;
903 struct sockaddr_in sin;
904
905 callout_stop(&vifp->v_repq_ch);
906
907 /* detach this vif from decapsulator dispatch table */
908 encap_lock_enter();
909 encap_detach(vifp->v_encap_cookie);
910 encap_lock_exit();
911 vifp->v_encap_cookie = NULL;
912
913 /*
914 * Free packets queued at the interface
915 */
916 for (m = vifp->tbf_q; m != NULL; m = n) {
917 n = m->m_nextpkt;
918 m_freem(m);
919 }
920
921 if (vifp->v_flags & VIFF_TUNNEL)
922 free(vifp->v_ifp, M_MRTABLE);
923 else if (vifp->v_flags & VIFF_REGISTER) {
924 #ifdef PIM
925 reg_vif_num = VIFI_INVALID;
926 #endif
927 } else {
928 sockaddr_in_init(&sin, &zeroin_addr, 0);
929 ifp = vifp->v_ifp;
930 if_mcast_op(ifp, SIOCDELMULTI, sintosa(&sin));
931 }
932 memset((void *)vifp, 0, sizeof(*vifp));
933 }
934
935 /*
936 * Delete a vif from the vif table
937 */
938 static int
939 del_vif(vifi_t *vifip)
940 {
941 struct vif *vifp;
942 vifi_t vifi;
943 int s;
944
945 if (*vifip >= numvifs)
946 return EINVAL;
947
948 vifp = &viftable[*vifip];
949 if (in_nullhost(vifp->v_lcl_addr))
950 return EADDRNOTAVAIL;
951
952 s = splsoftnet();
953
954 reset_vif(vifp);
955
956 /* Adjust numvifs down */
957 for (vifi = numvifs; vifi > 0; vifi--)
958 if (!in_nullhost(viftable[vifi - 1].v_lcl_addr))
959 break;
960 numvifs = vifi;
961
962 splx(s);
963
964 if (mrtdebug)
965 log(LOG_DEBUG, "del_vif %d, numvifs %d\n", *vifip, numvifs);
966
967 return 0;
968 }
969
970 /*
971 * update an mfc entry without resetting counters and S,G addresses.
972 */
973 static void
974 update_mfc_params(struct mfc *rt, struct mfcctl2 *mfccp)
975 {
976 int i;
977
978 rt->mfc_parent = mfccp->mfcc_parent;
979 for (i = 0; i < numvifs; i++) {
980 rt->mfc_ttls[i] = mfccp->mfcc_ttls[i];
981 rt->mfc_flags[i] = mfccp->mfcc_flags[i] & mrt_api_config &
982 MRT_MFC_FLAGS_ALL;
983 }
984 /* set the RP address */
985 if (mrt_api_config & MRT_MFC_RP)
986 rt->mfc_rp = mfccp->mfcc_rp;
987 else
988 rt->mfc_rp = zeroin_addr;
989 }
990
991 /*
992 * fully initialize an mfc entry from the parameter.
993 */
994 static void
995 init_mfc_params(struct mfc *rt, struct mfcctl2 *mfccp)
996 {
997 rt->mfc_origin = mfccp->mfcc_origin;
998 rt->mfc_mcastgrp = mfccp->mfcc_mcastgrp;
999
1000 update_mfc_params(rt, mfccp);
1001
1002 /* initialize pkt counters per src-grp */
1003 rt->mfc_pkt_cnt = 0;
1004 rt->mfc_byte_cnt = 0;
1005 rt->mfc_wrong_if = 0;
1006 timerclear(&rt->mfc_last_assert);
1007 }
1008
1009 static void
1010 expire_mfc(struct mfc *rt)
1011 {
1012 struct rtdetq *rte, *nrte;
1013
1014 free_bw_list(rt->mfc_bw_meter);
1015
1016 for (rte = rt->mfc_stall; rte != NULL; rte = nrte) {
1017 nrte = rte->next;
1018 m_freem(rte->m);
1019 free(rte, M_MRTABLE);
1020 }
1021
1022 LIST_REMOVE(rt, mfc_hash);
1023 free(rt, M_MRTABLE);
1024 }
1025
1026 /*
1027 * Add an mfc entry
1028 */
1029 static int
1030 add_mfc(struct sockopt *sopt)
1031 {
1032 struct mfcctl2 mfcctl2;
1033 struct mfcctl2 *mfccp;
1034 struct mfc *rt;
1035 u_int32_t hash = 0;
1036 struct rtdetq *rte, *nrte;
1037 u_short nstl;
1038 int s;
1039 int error;
1040
1041 /*
1042 * select data size depending on API version.
1043 */
1044 mfccp = &mfcctl2;
1045 memset(&mfcctl2, 0, sizeof(mfcctl2));
1046
1047 if (mrt_api_config & MRT_API_FLAGS_ALL)
1048 error = sockopt_get(sopt, mfccp, sizeof(struct mfcctl2));
1049 else
1050 error = sockopt_get(sopt, mfccp, sizeof(struct mfcctl));
1051
1052 if (error)
1053 return error;
1054
1055 s = splsoftnet();
1056 rt = mfc_find(&mfccp->mfcc_origin, &mfccp->mfcc_mcastgrp);
1057
1058 /* If an entry already exists, just update the fields */
1059 if (rt) {
1060 if (mrtdebug & DEBUG_MFC)
1061 log(LOG_DEBUG, "add_mfc update o %x g %x p %x\n",
1062 ntohl(mfccp->mfcc_origin.s_addr),
1063 ntohl(mfccp->mfcc_mcastgrp.s_addr),
1064 mfccp->mfcc_parent);
1065
1066 update_mfc_params(rt, mfccp);
1067
1068 splx(s);
1069 return 0;
1070 }
1071
1072 /*
1073 * Find the entry for which the upcall was made and update
1074 */
1075 nstl = 0;
1076 hash = MFCHASH(mfccp->mfcc_origin, mfccp->mfcc_mcastgrp);
1077 LIST_FOREACH(rt, &mfchashtbl[hash], mfc_hash) {
1078 if (in_hosteq(rt->mfc_origin, mfccp->mfcc_origin) &&
1079 in_hosteq(rt->mfc_mcastgrp, mfccp->mfcc_mcastgrp) &&
1080 rt->mfc_stall != NULL) {
1081 if (nstl++)
1082 log(LOG_ERR, "add_mfc %s o %x g %x p %x dbx %p\n",
1083 "multiple kernel entries",
1084 ntohl(mfccp->mfcc_origin.s_addr),
1085 ntohl(mfccp->mfcc_mcastgrp.s_addr),
1086 mfccp->mfcc_parent, rt->mfc_stall);
1087
1088 if (mrtdebug & DEBUG_MFC)
1089 log(LOG_DEBUG, "add_mfc o %x g %x p %x dbg %p\n",
1090 ntohl(mfccp->mfcc_origin.s_addr),
1091 ntohl(mfccp->mfcc_mcastgrp.s_addr),
1092 mfccp->mfcc_parent, rt->mfc_stall);
1093
1094 rte = rt->mfc_stall;
1095 init_mfc_params(rt, mfccp);
1096 rt->mfc_stall = NULL;
1097
1098 rt->mfc_expire = 0; /* Don't clean this guy up */
1099 nexpire[hash]--;
1100
1101 /* free packets Qed at the end of this entry */
1102 for (; rte != NULL; rte = nrte) {
1103 nrte = rte->next;
1104 if (rte->ifp) {
1105 ip_mdq(rte->m, rte->ifp, rt);
1106 }
1107 m_freem(rte->m);
1108 #ifdef UPCALL_TIMING
1109 collate(&rte->t);
1110 #endif /* UPCALL_TIMING */
1111 free(rte, M_MRTABLE);
1112 }
1113 }
1114 }
1115
1116 /*
1117 * It is possible that an entry is being inserted without an upcall
1118 */
1119 if (nstl == 0) {
1120 /*
1121 * No mfc; make a new one
1122 */
1123 if (mrtdebug & DEBUG_MFC)
1124 log(LOG_DEBUG, "add_mfc no upcall o %x g %x p %x\n",
1125 ntohl(mfccp->mfcc_origin.s_addr),
1126 ntohl(mfccp->mfcc_mcastgrp.s_addr),
1127 mfccp->mfcc_parent);
1128
1129 LIST_FOREACH(rt, &mfchashtbl[hash], mfc_hash) {
1130 if (in_hosteq(rt->mfc_origin, mfccp->mfcc_origin) &&
1131 in_hosteq(rt->mfc_mcastgrp, mfccp->mfcc_mcastgrp)) {
1132 init_mfc_params(rt, mfccp);
1133 if (rt->mfc_expire)
1134 nexpire[hash]--;
1135 rt->mfc_expire = 0;
1136 break; /* XXX */
1137 }
1138 }
1139 if (rt == NULL) { /* no upcall, so make a new entry */
1140 rt = malloc(sizeof(*rt), M_MRTABLE, M_NOWAIT);
1141 if (rt == NULL) {
1142 splx(s);
1143 return ENOBUFS;
1144 }
1145
1146 init_mfc_params(rt, mfccp);
1147 rt->mfc_expire = 0;
1148 rt->mfc_stall = NULL;
1149 rt->mfc_bw_meter = NULL;
1150
1151 /* insert new entry at head of hash chain */
1152 LIST_INSERT_HEAD(&mfchashtbl[hash], rt, mfc_hash);
1153 }
1154 }
1155
1156 splx(s);
1157 return 0;
1158 }
1159
1160 #ifdef UPCALL_TIMING
1161 /*
1162 * collect delay statistics on the upcalls
1163 */
1164 static void
1165 collate(struct timeval *t)
1166 {
1167 u_int32_t d;
1168 struct timeval tp;
1169 u_int32_t delta;
1170
1171 microtime(&tp);
1172
1173 if (timercmp(t, &tp, <)) {
1174 TV_DELTA(tp, *t, delta);
1175
1176 d = delta >> 10;
1177 if (d > 50)
1178 d = 50;
1179
1180 ++upcall_data[d];
1181 }
1182 }
1183 #endif /* UPCALL_TIMING */
1184
1185 /*
1186 * Delete an mfc entry
1187 */
1188 static int
1189 del_mfc(struct sockopt *sopt)
1190 {
1191 struct mfcctl2 mfcctl2;
1192 struct mfcctl2 *mfccp;
1193 struct mfc *rt;
1194 int s;
1195 int error;
1196
1197 /*
1198 * XXX: for deleting MFC entries the information in entries
1199 * of size "struct mfcctl" is sufficient.
1200 */
1201
1202 mfccp = &mfcctl2;
1203 memset(&mfcctl2, 0, sizeof(mfcctl2));
1204
1205 error = sockopt_get(sopt, mfccp, sizeof(struct mfcctl));
1206 if (error) {
1207 /* Try with the size of mfcctl2. */
1208 error = sockopt_get(sopt, mfccp, sizeof(struct mfcctl2));
1209 if (error)
1210 return error;
1211 }
1212
1213 if (mrtdebug & DEBUG_MFC)
1214 log(LOG_DEBUG, "del_mfc origin %x mcastgrp %x\n",
1215 ntohl(mfccp->mfcc_origin.s_addr),
1216 ntohl(mfccp->mfcc_mcastgrp.s_addr));
1217
1218 s = splsoftnet();
1219
1220 rt = mfc_find(&mfccp->mfcc_origin, &mfccp->mfcc_mcastgrp);
1221 if (rt == NULL) {
1222 splx(s);
1223 return EADDRNOTAVAIL;
1224 }
1225
1226 /*
1227 * free the bw_meter entries
1228 */
1229 free_bw_list(rt->mfc_bw_meter);
1230 rt->mfc_bw_meter = NULL;
1231
1232 LIST_REMOVE(rt, mfc_hash);
1233 free(rt, M_MRTABLE);
1234
1235 splx(s);
1236 return 0;
1237 }
1238
1239 static int
1240 socket_send(struct socket *s, struct mbuf *mm, struct sockaddr_in *src)
1241 {
1242 if (s) {
1243 if (sbappendaddr(&s->so_rcv, sintosa(src), mm, NULL) != 0) {
1244 sorwakeup(s);
1245 return 0;
1246 }
1247 soroverflow(s);
1248 }
1249 m_freem(mm);
1250 return -1;
1251 }
1252
1253 /*
1254 * IP multicast forwarding function. This function assumes that the packet
1255 * pointed to by "ip" has arrived on (or is about to be sent to) the interface
1256 * pointed to by "ifp", and the packet is to be relayed to other networks
1257 * that have members of the packet's destination IP multicast group.
1258 *
1259 * The packet is returned unscathed to the caller, unless it is
1260 * erroneous, in which case a non-zero return value tells the caller to
1261 * discard it.
1262 */
1263
1264 #define IP_HDR_LEN 20 /* # bytes of fixed IP header (excluding options) */
1265 #define TUNNEL_LEN 12 /* # bytes of IP option for tunnel encapsulation */
1266
1267 int
1268 ip_mforward(struct mbuf *m, struct ifnet *ifp)
1269 {
1270 struct ip *ip = mtod(m, struct ip *);
1271 struct mfc *rt;
1272 static int srctun = 0;
1273 struct mbuf *mm;
1274 struct sockaddr_in sin;
1275 int s;
1276 vifi_t vifi;
1277
1278 if (mrtdebug & DEBUG_FORWARD)
1279 log(LOG_DEBUG, "ip_mforward: src %x, dst %x, ifp %p\n",
1280 ntohl(ip->ip_src.s_addr), ntohl(ip->ip_dst.s_addr), ifp);
1281
1282 /*
1283 * XXX XXX: Why do we check [1] against IPOPT_LSRR? Because we
1284 * expect [0] to be IPOPT_NOP, maybe? In all cases that doesn't
1285 * make a lot of sense, a forged packet can just put two IPOPT_NOPs
1286 * followed by one IPOPT_LSRR, and bypass the check.
1287 */
1288 if (ip->ip_hl < (IP_HDR_LEN + TUNNEL_LEN) >> 2 ||
1289 ((u_char *)(ip + 1))[1] != IPOPT_LSRR) {
1290 /*
1291 * Packet arrived via a physical interface or
1292 * an encapsulated tunnel or a register_vif.
1293 */
1294 } else {
1295 /*
1296 * Packet arrived through a source-route tunnel.
1297 * Source-route tunnels are no longer supported.
1298 */
1299 if ((srctun++ % 1000) == 0)
1300 log(LOG_ERR,
1301 "ip_mforward: received source-routed packet from %x\n",
1302 ntohl(ip->ip_src.s_addr));
1303 return EOPNOTSUPP;
1304 }
1305
1306 /*
1307 * Clear any in-bound checksum flags for this packet.
1308 */
1309 m->m_pkthdr.csum_flags = 0;
1310
1311 /*
1312 * Don't forward a packet with time-to-live of zero or one,
1313 * or a packet destined to a local-only group.
1314 */
1315 if (ip->ip_ttl <= 1 || IN_LOCAL_GROUP(ip->ip_dst.s_addr))
1316 return 0;
1317
1318 /*
1319 * Determine forwarding vifs from the forwarding cache table
1320 */
1321 s = splsoftnet();
1322 ++mrtstat.mrts_mfc_lookups;
1323 rt = mfc_find(&ip->ip_src, &ip->ip_dst);
1324
1325 /* Entry exists, so forward if necessary */
1326 if (rt != NULL) {
1327 splx(s);
1328 return ip_mdq(m, ifp, rt);
1329 } else {
1330 /*
1331 * If we don't have a route for packet's origin, make a copy
1332 * of the packet and send message to routing daemon.
1333 */
1334
1335 struct mbuf *mb0;
1336 struct rtdetq *rte;
1337 u_int32_t hash;
1338 const int hlen = ip->ip_hl << 2;
1339 #ifdef UPCALL_TIMING
1340 struct timeval tp;
1341 microtime(&tp);
1342 #endif
1343
1344 ++mrtstat.mrts_mfc_misses;
1345
1346 mrtstat.mrts_no_route++;
1347 if (mrtdebug & (DEBUG_FORWARD | DEBUG_MFC))
1348 log(LOG_DEBUG, "ip_mforward: no rte s %x g %x\n",
1349 ntohl(ip->ip_src.s_addr),
1350 ntohl(ip->ip_dst.s_addr));
1351
1352 /*
1353 * Allocate mbufs early so that we don't do extra work if we are
1354 * just going to fail anyway. Make sure to pullup the header so
1355 * that other people can't step on it.
1356 */
1357 rte = malloc(sizeof(*rte), M_MRTABLE, M_NOWAIT);
1358 if (rte == NULL) {
1359 splx(s);
1360 return ENOBUFS;
1361 }
1362 mb0 = m_copypacket(m, M_DONTWAIT);
1363 M_PULLUP(mb0, hlen);
1364 if (mb0 == NULL) {
1365 free(rte, M_MRTABLE);
1366 splx(s);
1367 return ENOBUFS;
1368 }
1369
1370 /* is there an upcall waiting for this flow? */
1371 hash = MFCHASH(ip->ip_src, ip->ip_dst);
1372 LIST_FOREACH(rt, &mfchashtbl[hash], mfc_hash) {
1373 if (in_hosteq(ip->ip_src, rt->mfc_origin) &&
1374 in_hosteq(ip->ip_dst, rt->mfc_mcastgrp) &&
1375 rt->mfc_stall != NULL)
1376 break;
1377 }
1378
1379 if (rt == NULL) {
1380 int i;
1381 struct igmpmsg *im;
1382
1383 /*
1384 * Locate the vifi for the incoming interface for
1385 * this packet.
1386 * If none found, drop packet.
1387 */
1388 for (vifi = 0; vifi < numvifs &&
1389 viftable[vifi].v_ifp != ifp; vifi++)
1390 ;
1391 if (vifi >= numvifs) /* vif not found, drop packet */
1392 goto non_fatal;
1393
1394 /* no upcall, so make a new entry */
1395 rt = malloc(sizeof(*rt), M_MRTABLE, M_NOWAIT);
1396 if (rt == NULL)
1397 goto fail;
1398
1399 /*
1400 * Make a copy of the header to send to the user level
1401 * process
1402 */
1403 mm = m_copym(m, 0, hlen, M_DONTWAIT);
1404 M_PULLUP(mm, hlen);
1405 if (mm == NULL)
1406 goto fail1;
1407
1408 /*
1409 * Send message to routing daemon to install
1410 * a route into the kernel table
1411 */
1412
1413 im = mtod(mm, struct igmpmsg *);
1414 im->im_msgtype = IGMPMSG_NOCACHE;
1415 im->im_mbz = 0;
1416 im->im_vif = vifi;
1417
1418 mrtstat.mrts_upcalls++;
1419
1420 sockaddr_in_init(&sin, &ip->ip_src, 0);
1421 if (socket_send(ip_mrouter, mm, &sin) < 0) {
1422 log(LOG_WARNING,
1423 "ip_mforward: ip_mrouter socket queue full\n");
1424 ++mrtstat.mrts_upq_sockfull;
1425 fail1:
1426 free(rt, M_MRTABLE);
1427 fail:
1428 free(rte, M_MRTABLE);
1429 m_freem(mb0);
1430 splx(s);
1431 return ENOBUFS;
1432 }
1433
1434 /* insert new entry at head of hash chain */
1435 rt->mfc_origin = ip->ip_src;
1436 rt->mfc_mcastgrp = ip->ip_dst;
1437 rt->mfc_pkt_cnt = 0;
1438 rt->mfc_byte_cnt = 0;
1439 rt->mfc_wrong_if = 0;
1440 rt->mfc_expire = UPCALL_EXPIRE;
1441 nexpire[hash]++;
1442 for (i = 0; i < numvifs; i++) {
1443 rt->mfc_ttls[i] = 0;
1444 rt->mfc_flags[i] = 0;
1445 }
1446 rt->mfc_parent = -1;
1447
1448 /* clear the RP address */
1449 rt->mfc_rp = zeroin_addr;
1450
1451 rt->mfc_bw_meter = NULL;
1452
1453 /* link into table */
1454 LIST_INSERT_HEAD(&mfchashtbl[hash], rt, mfc_hash);
1455 /* Add this entry to the end of the queue */
1456 rt->mfc_stall = rte;
1457 } else {
1458 /* determine if q has overflowed */
1459 struct rtdetq **p;
1460 int npkts = 0;
1461
1462 /*
1463 * XXX ouch! we need to append to the list, but we
1464 * only have a pointer to the front, so we have to
1465 * scan the entire list every time.
1466 */
1467 for (p = &rt->mfc_stall; *p != NULL; p = &(*p)->next)
1468 if (++npkts > MAX_UPQ) {
1469 mrtstat.mrts_upq_ovflw++;
1470 non_fatal:
1471 free(rte, M_MRTABLE);
1472 m_freem(mb0);
1473 splx(s);
1474 return 0;
1475 }
1476
1477 /* Add this entry to the end of the queue */
1478 *p = rte;
1479 }
1480
1481 rte->next = NULL;
1482 rte->m = mb0;
1483 rte->ifp = ifp;
1484 #ifdef UPCALL_TIMING
1485 rte->t = tp;
1486 #endif
1487
1488 splx(s);
1489
1490 return 0;
1491 }
1492 }
1493
1494 /*ARGSUSED*/
1495 static void
1496 expire_upcalls(void *v)
1497 {
1498 int i;
1499
1500 /* XXX NOMPSAFE still need softnet_lock */
1501 mutex_enter(softnet_lock);
1502 KERNEL_LOCK(1, NULL);
1503
1504 for (i = 0; i < MFCTBLSIZ; i++) {
1505 struct mfc *rt, *nrt;
1506
1507 if (nexpire[i] == 0)
1508 continue;
1509
1510 for (rt = LIST_FIRST(&mfchashtbl[i]); rt; rt = nrt) {
1511 nrt = LIST_NEXT(rt, mfc_hash);
1512
1513 if (rt->mfc_expire == 0 || --rt->mfc_expire > 0)
1514 continue;
1515 nexpire[i]--;
1516
1517 /*
1518 * free the bw_meter entries
1519 */
1520 while (rt->mfc_bw_meter != NULL) {
1521 struct bw_meter *x = rt->mfc_bw_meter;
1522
1523 rt->mfc_bw_meter = x->bm_mfc_next;
1524 kmem_intr_free(x, sizeof(*x));
1525 }
1526
1527 ++mrtstat.mrts_cache_cleanups;
1528 if (mrtdebug & DEBUG_EXPIRE)
1529 log(LOG_DEBUG,
1530 "expire_upcalls: expiring (%x %x)\n",
1531 ntohl(rt->mfc_origin.s_addr),
1532 ntohl(rt->mfc_mcastgrp.s_addr));
1533
1534 expire_mfc(rt);
1535 }
1536 }
1537
1538 callout_reset(&expire_upcalls_ch, EXPIRE_TIMEOUT,
1539 expire_upcalls, NULL);
1540
1541 KERNEL_UNLOCK_ONE(NULL);
1542 mutex_exit(softnet_lock);
1543 }
1544
1545 /*
1546 * Macro to send packet on vif.
1547 */
1548 #define MC_SEND(ip, vifp, m) do { \
1549 if ((vifp)->v_flags & VIFF_TUNNEL) \
1550 encap_send((ip), (vifp), (m)); \
1551 else \
1552 phyint_send((ip), (vifp), (m)); \
1553 } while (/*CONSTCOND*/ 0)
1554
1555 /*
1556 * Packet forwarding routine once entry in the cache is made
1557 */
1558 static int
1559 ip_mdq(struct mbuf *m, struct ifnet *ifp, struct mfc *rt)
1560 {
1561 struct ip *ip = mtod(m, struct ip *);
1562 vifi_t vifi;
1563 struct vif *vifp;
1564 struct sockaddr_in sin;
1565 const int plen = ntohs(ip->ip_len) - (ip->ip_hl << 2);
1566
1567 /*
1568 * Don't forward if it didn't arrive from the parent vif for its origin.
1569 */
1570 vifi = rt->mfc_parent;
1571 if ((vifi >= numvifs) || (viftable[vifi].v_ifp != ifp)) {
1572 /* came in the wrong interface */
1573 if (mrtdebug & DEBUG_FORWARD)
1574 log(LOG_DEBUG, "wrong if: ifp %p vifi %d vififp %p\n",
1575 ifp, vifi,
1576 vifi >= numvifs ? 0 : viftable[vifi].v_ifp);
1577 ++mrtstat.mrts_wrong_if;
1578 ++rt->mfc_wrong_if;
1579
1580 /*
1581 * If we are doing PIM assert processing, send a message
1582 * to the routing daemon.
1583 *
1584 * XXX: A PIM-SM router needs the WRONGVIF detection so it
1585 * can complete the SPT switch, regardless of the type
1586 * of the iif (broadcast media, GRE tunnel, etc).
1587 */
1588 if (pim_assert && (vifi < numvifs) && viftable[vifi].v_ifp) {
1589 struct timeval now;
1590 u_int32_t delta;
1591
1592 #ifdef PIM
1593 if (ifp == &multicast_register_if)
1594 pimstat.pims_rcv_registers_wrongiif++;
1595 #endif
1596
1597 /* Get vifi for the incoming packet */
1598 for (vifi = 0;
1599 vifi < numvifs && viftable[vifi].v_ifp != ifp;
1600 vifi++)
1601 ;
1602 if (vifi >= numvifs) {
1603 /* The iif is not found: ignore the packet. */
1604 return 0;
1605 }
1606
1607 if (rt->mfc_flags[vifi] &
1608 MRT_MFC_FLAGS_DISABLE_WRONGVIF) {
1609 /* WRONGVIF disabled: ignore the packet */
1610 return 0;
1611 }
1612
1613 microtime(&now);
1614
1615 TV_DELTA(rt->mfc_last_assert, now, delta);
1616
1617 if (delta > ASSERT_MSG_TIME) {
1618 struct igmpmsg *im;
1619 const int hlen = ip->ip_hl << 2;
1620 struct mbuf *mm =
1621 m_copym(m, 0, hlen, M_DONTWAIT);
1622
1623 M_PULLUP(mm, hlen);
1624 if (mm == NULL)
1625 return ENOBUFS;
1626
1627 rt->mfc_last_assert = now;
1628
1629 im = mtod(mm, struct igmpmsg *);
1630 im->im_msgtype = IGMPMSG_WRONGVIF;
1631 im->im_mbz = 0;
1632 im->im_vif = vifi;
1633
1634 mrtstat.mrts_upcalls++;
1635
1636 sockaddr_in_init(&sin, &im->im_src, 0);
1637 if (socket_send(ip_mrouter, mm, &sin) < 0) {
1638 log(LOG_WARNING,
1639 "ip_mforward: ip_mrouter socket queue full\n");
1640 ++mrtstat.mrts_upq_sockfull;
1641 return ENOBUFS;
1642 }
1643 }
1644 }
1645 return 0;
1646 }
1647
1648 /* If I sourced this packet, it counts as output, else it was input. */
1649 if (in_hosteq(ip->ip_src, viftable[vifi].v_lcl_addr)) {
1650 viftable[vifi].v_pkt_out++;
1651 viftable[vifi].v_bytes_out += plen;
1652 } else {
1653 viftable[vifi].v_pkt_in++;
1654 viftable[vifi].v_bytes_in += plen;
1655 }
1656 rt->mfc_pkt_cnt++;
1657 rt->mfc_byte_cnt += plen;
1658
1659 /*
1660 * For each vif, decide if a copy of the packet should be forwarded.
1661 * Forward if:
1662 * - the ttl exceeds the vif's threshold
1663 * - there are group members downstream on interface
1664 */
1665 for (vifp = viftable, vifi = 0; vifi < numvifs; vifp++, vifi++) {
1666 if ((rt->mfc_ttls[vifi] > 0) &&
1667 (ip->ip_ttl > rt->mfc_ttls[vifi])) {
1668 vifp->v_pkt_out++;
1669 vifp->v_bytes_out += plen;
1670 #ifdef PIM
1671 if (vifp->v_flags & VIFF_REGISTER)
1672 pim_register_send(ip, vifp, m, rt);
1673 else
1674 #endif
1675 MC_SEND(ip, vifp, m);
1676 }
1677 }
1678
1679 /*
1680 * Perform upcall-related bw measuring.
1681 */
1682 if (rt->mfc_bw_meter != NULL) {
1683 struct bw_meter *x;
1684 struct timeval now;
1685
1686 microtime(&now);
1687 for (x = rt->mfc_bw_meter; x != NULL; x = x->bm_mfc_next)
1688 bw_meter_receive_packet(x, plen, &now);
1689 }
1690
1691 return 0;
1692 }
1693
1694 static void
1695 phyint_send(struct ip *ip, struct vif *vifp, struct mbuf *m)
1696 {
1697 struct mbuf *mb_copy;
1698 const int hlen = ip->ip_hl << 2;
1699
1700 /*
1701 * Make a new reference to the packet; make sure that
1702 * the IP header is actually copied, not just referenced,
1703 * so that ip_output() only scribbles on the copy.
1704 */
1705 mb_copy = m_copypacket(m, M_DONTWAIT);
1706 M_PULLUP(mb_copy, hlen);
1707 if (mb_copy == NULL)
1708 return;
1709
1710 if (vifp->v_rate_limit <= 0)
1711 tbf_send_packet(vifp, mb_copy);
1712 else
1713 tbf_control(vifp, mb_copy, mtod(mb_copy, struct ip *),
1714 ntohs(ip->ip_len));
1715 }
1716
1717 static void
1718 encap_send(struct ip *ip, struct vif *vifp, struct mbuf *m)
1719 {
1720 struct mbuf *mb_copy;
1721 struct ip *ip_copy;
1722 int i, len = ntohs(ip->ip_len) + sizeof(multicast_encap_iphdr);
1723
1724 /* Take care of delayed checksums */
1725 if (m->m_pkthdr.csum_flags & (M_CSUM_TCPv4|M_CSUM_UDPv4)) {
1726 in_delayed_cksum(m);
1727 m->m_pkthdr.csum_flags &= ~(M_CSUM_TCPv4|M_CSUM_UDPv4);
1728 }
1729
1730 /*
1731 * copy the old packet & pullup its IP header into the
1732 * new mbuf so we can modify it. Try to fill the new
1733 * mbuf since if we don't the ethernet driver will.
1734 */
1735 MGETHDR(mb_copy, M_DONTWAIT, MT_DATA);
1736 if (mb_copy == NULL)
1737 return;
1738 mb_copy->m_data += max_linkhdr;
1739 mb_copy->m_pkthdr.len = len;
1740 mb_copy->m_len = sizeof(multicast_encap_iphdr);
1741
1742 if ((mb_copy->m_next = m_copypacket(m, M_DONTWAIT)) == NULL) {
1743 m_freem(mb_copy);
1744 return;
1745 }
1746 i = MHLEN - max_linkhdr;
1747 if (i > len)
1748 i = len;
1749 mb_copy = m_pullup(mb_copy, i);
1750 if (mb_copy == NULL)
1751 return;
1752
1753 /*
1754 * fill in the encapsulating IP header.
1755 */
1756 ip_copy = mtod(mb_copy, struct ip *);
1757 *ip_copy = multicast_encap_iphdr;
1758 if (len < IP_MINFRAGSIZE)
1759 ip_copy->ip_id = 0;
1760 else
1761 ip_copy->ip_id = ip_newid(NULL);
1762 ip_copy->ip_len = htons(len);
1763 ip_copy->ip_src = vifp->v_lcl_addr;
1764 ip_copy->ip_dst = vifp->v_rmt_addr;
1765
1766 /*
1767 * turn the encapsulated IP header back into a valid one.
1768 */
1769 ip = (struct ip *)((char *)ip_copy + sizeof(multicast_encap_iphdr));
1770 --ip->ip_ttl;
1771 ip->ip_sum = 0;
1772 mb_copy->m_data += sizeof(multicast_encap_iphdr);
1773 ip->ip_sum = in_cksum(mb_copy, ip->ip_hl << 2);
1774 mb_copy->m_data -= sizeof(multicast_encap_iphdr);
1775
1776 if (vifp->v_rate_limit <= 0)
1777 tbf_send_packet(vifp, mb_copy);
1778 else
1779 tbf_control(vifp, mb_copy, ip, ntohs(ip_copy->ip_len));
1780 }
1781
1782 /*
1783 * De-encapsulate a packet and feed it back through ip input.
1784 */
1785 static void
1786 vif_input(struct mbuf *m, int off, int proto, void *eparg)
1787 {
1788 struct vif *vifp = eparg;
1789
1790 KASSERT(vifp != NULL);
1791
1792 if (proto != ENCAP_PROTO) {
1793 m_freem(m);
1794 mrtstat.mrts_bad_tunnel++;
1795 return;
1796 }
1797
1798 m_adj(m, off);
1799 m_set_rcvif(m, vifp->v_ifp);
1800
1801 if (__predict_false(!pktq_enqueue(ip_pktq, m, 0))) {
1802 m_freem(m);
1803 }
1804 }
1805
1806 /*
1807 * Check if the packet should be received on the vif denoted by arg.
1808 * (The encap selection code will call this once per vif since each is
1809 * registered separately.)
1810 */
1811 static int
1812 vif_encapcheck(struct mbuf *m, int off, int proto, void *arg)
1813 {
1814 struct vif *vifp;
1815 struct ip ip;
1816
1817 #ifdef DIAGNOSTIC
1818 if (!arg || proto != IPPROTO_IPV4)
1819 panic("unexpected arg in vif_encapcheck");
1820 #endif
1821
1822 /*
1823 * Accept the packet only if the inner heaader is multicast
1824 * and the outer header matches a tunnel-mode vif. Order
1825 * checks in the hope that common non-matching packets will be
1826 * rejected quickly. Assume that unicast IPv4 traffic in a
1827 * parallel tunnel (e.g. gif(4)) is unlikely.
1828 */
1829
1830 /* Obtain the outer IP header and the vif pointer. */
1831 m_copydata(m, 0, sizeof(ip), (void *)&ip);
1832 vifp = (struct vif *)arg;
1833
1834 /*
1835 * The outer source must match the vif's remote peer address.
1836 * For a multicast router with several tunnels, this is the
1837 * only check that will fail on packets in other tunnels,
1838 * assuming the local address is the same.
1839 */
1840 if (!in_hosteq(vifp->v_rmt_addr, ip.ip_src))
1841 return 0;
1842
1843 /* The outer destination must match the vif's local address. */
1844 if (!in_hosteq(vifp->v_lcl_addr, ip.ip_dst))
1845 return 0;
1846
1847 /* The vif must be of tunnel type. */
1848 if ((vifp->v_flags & VIFF_TUNNEL) == 0)
1849 return 0;
1850
1851 /* Check that the inner destination is multicast. */
1852 if (off + sizeof(ip) > m->m_pkthdr.len)
1853 return 0;
1854 m_copydata(m, off, sizeof(ip), (void *)&ip);
1855 if (!IN_MULTICAST(ip.ip_dst.s_addr))
1856 return 0;
1857
1858 /*
1859 * We have checked that both the outer src and dst addresses
1860 * match the vif, and that the inner destination is multicast
1861 * (224/5). By claiming more than 64, we intend to
1862 * preferentially take packets that also match a parallel
1863 * gif(4).
1864 */
1865 return 32 + 32 + 5;
1866 }
1867
1868 /*
1869 * Token bucket filter module
1870 */
1871 static void
1872 tbf_control(struct vif *vifp, struct mbuf *m, struct ip *ip, u_int32_t len)
1873 {
1874
1875 if (len > MAX_BKT_SIZE) {
1876 /* drop if packet is too large */
1877 mrtstat.mrts_pkt2large++;
1878 m_freem(m);
1879 return;
1880 }
1881
1882 tbf_update_tokens(vifp);
1883
1884 /*
1885 * If there are enough tokens, and the queue is empty, send this packet
1886 * out immediately. Otherwise, try to insert it on this vif's queue.
1887 */
1888 if (vifp->tbf_q_len == 0) {
1889 if (len <= vifp->tbf_n_tok) {
1890 vifp->tbf_n_tok -= len;
1891 tbf_send_packet(vifp, m);
1892 } else {
1893 /* queue packet and timeout till later */
1894 tbf_queue(vifp, m);
1895 callout_reset(&vifp->v_repq_ch, TBF_REPROCESS,
1896 tbf_reprocess_q, vifp);
1897 }
1898 } else {
1899 if (vifp->tbf_q_len >= vifp->tbf_max_q_len &&
1900 !tbf_dq_sel(vifp, ip)) {
1901 /* queue full, and couldn't make room */
1902 mrtstat.mrts_q_overflow++;
1903 m_freem(m);
1904 } else {
1905 /* queue length low enough, or made room */
1906 tbf_queue(vifp, m);
1907 tbf_process_q(vifp);
1908 }
1909 }
1910 }
1911
1912 /*
1913 * adds a packet to the queue at the interface
1914 */
1915 static void
1916 tbf_queue(struct vif *vifp, struct mbuf *m)
1917 {
1918 int s = splsoftnet();
1919
1920 /* insert at tail */
1921 *vifp->tbf_t = m;
1922 vifp->tbf_t = &m->m_nextpkt;
1923 vifp->tbf_q_len++;
1924
1925 splx(s);
1926 }
1927
1928 /*
1929 * processes the queue at the interface
1930 */
1931 static void
1932 tbf_process_q(struct vif *vifp)
1933 {
1934 struct mbuf *m;
1935 int len;
1936 int s = splsoftnet();
1937
1938 /*
1939 * Loop through the queue at the interface and send as many packets
1940 * as possible.
1941 */
1942 for (m = vifp->tbf_q; m != NULL; m = vifp->tbf_q) {
1943 len = ntohs(mtod(m, struct ip *)->ip_len);
1944
1945 /* determine if the packet can be sent */
1946 if (len <= vifp->tbf_n_tok) {
1947 /* if so,
1948 * reduce no of tokens, dequeue the packet,
1949 * send the packet.
1950 */
1951 if ((vifp->tbf_q = m->m_nextpkt) == NULL)
1952 vifp->tbf_t = &vifp->tbf_q;
1953 --vifp->tbf_q_len;
1954
1955 m->m_nextpkt = NULL;
1956 vifp->tbf_n_tok -= len;
1957 tbf_send_packet(vifp, m);
1958 } else
1959 break;
1960 }
1961 splx(s);
1962 }
1963
1964 static void
1965 tbf_reprocess_q(void *arg)
1966 {
1967 struct vif *vifp = arg;
1968
1969 if (ip_mrouter == NULL)
1970 return;
1971
1972 tbf_update_tokens(vifp);
1973 tbf_process_q(vifp);
1974
1975 if (vifp->tbf_q_len != 0)
1976 callout_reset(&vifp->v_repq_ch, TBF_REPROCESS,
1977 tbf_reprocess_q, vifp);
1978 }
1979
1980 /* function that will selectively discard a member of the queue
1981 * based on the precedence value and the priority
1982 */
1983 static int
1984 tbf_dq_sel(struct vif *vifp, struct ip *ip)
1985 {
1986 u_int p;
1987 struct mbuf **mp, *m;
1988 int s = splsoftnet();
1989
1990 p = priority(vifp, ip);
1991
1992 for (mp = &vifp->tbf_q, m = *mp;
1993 m != NULL;
1994 mp = &m->m_nextpkt, m = *mp) {
1995 if (p > priority(vifp, mtod(m, struct ip *))) {
1996 if ((*mp = m->m_nextpkt) == NULL)
1997 vifp->tbf_t = mp;
1998 --vifp->tbf_q_len;
1999
2000 m_freem(m);
2001 mrtstat.mrts_drop_sel++;
2002 splx(s);
2003 return 1;
2004 }
2005 }
2006 splx(s);
2007 return 0;
2008 }
2009
2010 static void
2011 tbf_send_packet(struct vif *vifp, struct mbuf *m)
2012 {
2013 int error;
2014 int s = splsoftnet();
2015
2016 if (vifp->v_flags & VIFF_TUNNEL) {
2017 /* If tunnel options */
2018 ip_output(m, NULL, &vifp->v_route, IP_FORWARDING, NULL, NULL);
2019 } else {
2020 /* if physical interface option, extract the options and then send */
2021 struct ip_moptions imo;
2022
2023 imo.imo_multicast_if_index = if_get_index(vifp->v_ifp);
2024 imo.imo_multicast_ttl = mtod(m, struct ip *)->ip_ttl - 1;
2025 imo.imo_multicast_loop = 1;
2026
2027 error = ip_output(m, NULL, NULL, IP_FORWARDING|IP_MULTICASTOPTS,
2028 &imo, NULL);
2029
2030 if (mrtdebug & DEBUG_XMIT)
2031 log(LOG_DEBUG, "phyint_send on vif %ld err %d\n",
2032 (long)(vifp - viftable), error);
2033 }
2034 splx(s);
2035 }
2036
2037 /* determine the current time and then
2038 * the elapsed time (between the last time and time now)
2039 * in milliseconds & update the no. of tokens in the bucket
2040 */
2041 static void
2042 tbf_update_tokens(struct vif *vifp)
2043 {
2044 struct timeval tp;
2045 u_int32_t tm;
2046 int s = splsoftnet();
2047
2048 microtime(&tp);
2049
2050 TV_DELTA(tp, vifp->tbf_last_pkt_t, tm);
2051
2052 /*
2053 * This formula is actually
2054 * "time in seconds" * "bytes/second".
2055 *
2056 * (tm / 1000000) * (v_rate_limit * 1000 * (1000/1024) / 8)
2057 *
2058 * The (1000/1024) was introduced in add_vif to optimize
2059 * this divide into a shift.
2060 */
2061 vifp->tbf_n_tok += tm * vifp->v_rate_limit / 8192;
2062 vifp->tbf_last_pkt_t = tp;
2063
2064 if (vifp->tbf_n_tok > MAX_BKT_SIZE)
2065 vifp->tbf_n_tok = MAX_BKT_SIZE;
2066
2067 splx(s);
2068 }
2069
2070 static int
2071 priority(struct vif *vifp, struct ip *ip)
2072 {
2073 int prio = 50; /* the lowest priority -- default case */
2074
2075 /* temporary hack; may add general packet classifier some day */
2076
2077 /*
2078 * XXX XXX: We're reading the UDP header, but we didn't ensure
2079 * it was present in the packet.
2080 */
2081
2082 /*
2083 * The UDP port space is divided up into four priority ranges:
2084 * [0, 16384) : unclassified - lowest priority
2085 * [16384, 32768) : audio - highest priority
2086 * [32768, 49152) : whiteboard - medium priority
2087 * [49152, 65536) : video - low priority
2088 */
2089 if (ip->ip_p == IPPROTO_UDP) {
2090 struct udphdr *udp = (struct udphdr *)(((char *)ip) + (ip->ip_hl << 2));
2091
2092 switch (ntohs(udp->uh_dport) & 0xc000) {
2093 case 0x4000:
2094 prio = 70;
2095 break;
2096 case 0x8000:
2097 prio = 60;
2098 break;
2099 case 0xc000:
2100 prio = 55;
2101 break;
2102 }
2103
2104 if (tbfdebug > 1)
2105 log(LOG_DEBUG, "port %x prio %d\n",
2106 ntohs(udp->uh_dport), prio);
2107 }
2108
2109 return prio;
2110 }
2111
2112 /*
2113 * Code for bandwidth monitors
2114 */
2115
2116 /*
2117 * Define common interface for timeval-related methods
2118 */
2119 #define BW_TIMEVALCMP(tvp, uvp, cmp) timercmp((tvp), (uvp), cmp)
2120 #define BW_TIMEVALDECR(vvp, uvp) timersub((vvp), (uvp), (vvp))
2121 #define BW_TIMEVALADD(vvp, uvp) timeradd((vvp), (uvp), (vvp))
2122
2123 static uint32_t
2124 compute_bw_meter_flags(struct bw_upcall *req)
2125 {
2126 uint32_t flags = 0;
2127
2128 if (req->bu_flags & BW_UPCALL_UNIT_PACKETS)
2129 flags |= BW_METER_UNIT_PACKETS;
2130 if (req->bu_flags & BW_UPCALL_UNIT_BYTES)
2131 flags |= BW_METER_UNIT_BYTES;
2132 if (req->bu_flags & BW_UPCALL_GEQ)
2133 flags |= BW_METER_GEQ;
2134 if (req->bu_flags & BW_UPCALL_LEQ)
2135 flags |= BW_METER_LEQ;
2136
2137 return flags;
2138 }
2139
2140 /*
2141 * Add a bw_meter entry
2142 */
2143 static int
2144 add_bw_upcall(struct bw_upcall *req)
2145 {
2146 int s;
2147 struct mfc *mfc;
2148 struct timeval delta = { BW_UPCALL_THRESHOLD_INTERVAL_MIN_SEC,
2149 BW_UPCALL_THRESHOLD_INTERVAL_MIN_USEC };
2150 struct timeval now;
2151 struct bw_meter *x;
2152 uint32_t flags;
2153
2154 if (!(mrt_api_config & MRT_MFC_BW_UPCALL))
2155 return EOPNOTSUPP;
2156
2157 /* Test if the flags are valid */
2158 if (!(req->bu_flags & (BW_UPCALL_UNIT_PACKETS | BW_UPCALL_UNIT_BYTES)))
2159 return EINVAL;
2160 if (!(req->bu_flags & (BW_UPCALL_GEQ | BW_UPCALL_LEQ)))
2161 return EINVAL;
2162 if ((req->bu_flags & (BW_UPCALL_GEQ | BW_UPCALL_LEQ))
2163 == (BW_UPCALL_GEQ | BW_UPCALL_LEQ))
2164 return EINVAL;
2165
2166 /* Test if the threshold time interval is valid */
2167 if (BW_TIMEVALCMP(&req->bu_threshold.b_time, &delta, <))
2168 return EINVAL;
2169
2170 flags = compute_bw_meter_flags(req);
2171
2172 /*
2173 * Find if we have already same bw_meter entry
2174 */
2175 s = splsoftnet();
2176 mfc = mfc_find(&req->bu_src, &req->bu_dst);
2177 if (mfc == NULL) {
2178 splx(s);
2179 return EADDRNOTAVAIL;
2180 }
2181 for (x = mfc->mfc_bw_meter; x != NULL; x = x->bm_mfc_next) {
2182 if ((BW_TIMEVALCMP(&x->bm_threshold.b_time,
2183 &req->bu_threshold.b_time, ==)) &&
2184 (x->bm_threshold.b_packets == req->bu_threshold.b_packets) &&
2185 (x->bm_threshold.b_bytes == req->bu_threshold.b_bytes) &&
2186 (x->bm_flags & BW_METER_USER_FLAGS) == flags) {
2187 splx(s);
2188 return 0; /* XXX Already installed */
2189 }
2190 }
2191
2192 /* Allocate the new bw_meter entry */
2193 x = kmem_intr_alloc(sizeof(*x), KM_NOSLEEP);
2194 if (x == NULL) {
2195 splx(s);
2196 return ENOBUFS;
2197 }
2198
2199 /* Set the new bw_meter entry */
2200 x->bm_threshold.b_time = req->bu_threshold.b_time;
2201 microtime(&now);
2202 x->bm_start_time = now;
2203 x->bm_threshold.b_packets = req->bu_threshold.b_packets;
2204 x->bm_threshold.b_bytes = req->bu_threshold.b_bytes;
2205 x->bm_measured.b_packets = 0;
2206 x->bm_measured.b_bytes = 0;
2207 x->bm_flags = flags;
2208 x->bm_time_next = NULL;
2209 x->bm_time_hash = BW_METER_BUCKETS;
2210
2211 /* Add the new bw_meter entry to the front of entries for this MFC */
2212 x->bm_mfc = mfc;
2213 x->bm_mfc_next = mfc->mfc_bw_meter;
2214 mfc->mfc_bw_meter = x;
2215 schedule_bw_meter(x, &now);
2216 splx(s);
2217
2218 return 0;
2219 }
2220
2221 static void
2222 free_bw_list(struct bw_meter *list)
2223 {
2224 while (list != NULL) {
2225 struct bw_meter *x = list;
2226
2227 list = list->bm_mfc_next;
2228 unschedule_bw_meter(x);
2229 kmem_intr_free(x, sizeof(*x));
2230 }
2231 }
2232
2233 /*
2234 * Delete one or multiple bw_meter entries
2235 */
2236 static int
2237 del_bw_upcall(struct bw_upcall *req)
2238 {
2239 int s;
2240 struct mfc *mfc;
2241 struct bw_meter *x;
2242
2243 if (!(mrt_api_config & MRT_MFC_BW_UPCALL))
2244 return EOPNOTSUPP;
2245
2246 s = splsoftnet();
2247 /* Find the corresponding MFC entry */
2248 mfc = mfc_find(&req->bu_src, &req->bu_dst);
2249 if (mfc == NULL) {
2250 splx(s);
2251 return EADDRNOTAVAIL;
2252 } else if (req->bu_flags & BW_UPCALL_DELETE_ALL) {
2253 /*
2254 * Delete all bw_meter entries for this mfc
2255 */
2256 struct bw_meter *list;
2257
2258 list = mfc->mfc_bw_meter;
2259 mfc->mfc_bw_meter = NULL;
2260 free_bw_list(list);
2261 splx(s);
2262 return 0;
2263 } else { /* Delete a single bw_meter entry */
2264 struct bw_meter *prev;
2265 uint32_t flags = 0;
2266
2267 flags = compute_bw_meter_flags(req);
2268
2269 /* Find the bw_meter entry to delete */
2270 for (prev = NULL, x = mfc->mfc_bw_meter; x != NULL;
2271 prev = x, x = x->bm_mfc_next) {
2272 if ((BW_TIMEVALCMP(&x->bm_threshold.b_time,
2273 &req->bu_threshold.b_time, ==)) &&
2274 (x->bm_threshold.b_packets == req->bu_threshold.b_packets) &&
2275 (x->bm_threshold.b_bytes == req->bu_threshold.b_bytes) &&
2276 (x->bm_flags & BW_METER_USER_FLAGS) == flags)
2277 break;
2278 }
2279 if (x != NULL) { /* Delete entry from the list for this MFC */
2280 if (prev != NULL)
2281 prev->bm_mfc_next = x->bm_mfc_next; /* remove from middle*/
2282 else
2283 x->bm_mfc->mfc_bw_meter = x->bm_mfc_next;/* new head of list */
2284
2285 unschedule_bw_meter(x);
2286 splx(s);
2287 /* Free the bw_meter entry */
2288 kmem_intr_free(x, sizeof(*x));
2289 return 0;
2290 } else {
2291 splx(s);
2292 return EINVAL;
2293 }
2294 }
2295 /* NOTREACHED */
2296 }
2297
2298 /*
2299 * Perform bandwidth measurement processing that may result in an upcall
2300 */
2301 static void
2302 bw_meter_receive_packet(struct bw_meter *x, int plen, struct timeval *nowp)
2303 {
2304 struct timeval delta;
2305
2306 delta = *nowp;
2307 BW_TIMEVALDECR(&delta, &x->bm_start_time);
2308
2309 if (x->bm_flags & BW_METER_GEQ) {
2310 /*
2311 * Processing for ">=" type of bw_meter entry
2312 */
2313 if (BW_TIMEVALCMP(&delta, &x->bm_threshold.b_time, >)) {
2314 /* Reset the bw_meter entry */
2315 x->bm_start_time = *nowp;
2316 x->bm_measured.b_packets = 0;
2317 x->bm_measured.b_bytes = 0;
2318 x->bm_flags &= ~BW_METER_UPCALL_DELIVERED;
2319 }
2320
2321 /* Record that a packet is received */
2322 x->bm_measured.b_packets++;
2323 x->bm_measured.b_bytes += plen;
2324
2325 /*
2326 * Test if we should deliver an upcall
2327 */
2328 if (!(x->bm_flags & BW_METER_UPCALL_DELIVERED)) {
2329 if (((x->bm_flags & BW_METER_UNIT_PACKETS) &&
2330 (x->bm_measured.b_packets >= x->bm_threshold.b_packets)) ||
2331 ((x->bm_flags & BW_METER_UNIT_BYTES) &&
2332 (x->bm_measured.b_bytes >= x->bm_threshold.b_bytes))) {
2333 /* Prepare an upcall for delivery */
2334 bw_meter_prepare_upcall(x, nowp);
2335 x->bm_flags |= BW_METER_UPCALL_DELIVERED;
2336 }
2337 }
2338 } else if (x->bm_flags & BW_METER_LEQ) {
2339 /*
2340 * Processing for "<=" type of bw_meter entry
2341 */
2342 if (BW_TIMEVALCMP(&delta, &x->bm_threshold.b_time, >)) {
2343 /*
2344 * We are behind time with the multicast forwarding table
2345 * scanning for "<=" type of bw_meter entries, so test now
2346 * if we should deliver an upcall.
2347 */
2348 if (((x->bm_flags & BW_METER_UNIT_PACKETS) &&
2349 (x->bm_measured.b_packets <= x->bm_threshold.b_packets)) ||
2350 ((x->bm_flags & BW_METER_UNIT_BYTES) &&
2351 (x->bm_measured.b_bytes <= x->bm_threshold.b_bytes))) {
2352 /* Prepare an upcall for delivery */
2353 bw_meter_prepare_upcall(x, nowp);
2354 }
2355 /* Reschedule the bw_meter entry */
2356 unschedule_bw_meter(x);
2357 schedule_bw_meter(x, nowp);
2358 }
2359
2360 /* Record that a packet is received */
2361 x->bm_measured.b_packets++;
2362 x->bm_measured.b_bytes += plen;
2363
2364 /*
2365 * Test if we should restart the measuring interval
2366 */
2367 if ((x->bm_flags & BW_METER_UNIT_PACKETS &&
2368 x->bm_measured.b_packets <= x->bm_threshold.b_packets) ||
2369 (x->bm_flags & BW_METER_UNIT_BYTES &&
2370 x->bm_measured.b_bytes <= x->bm_threshold.b_bytes)) {
2371 /* Don't restart the measuring interval */
2372 } else {
2373 /* Do restart the measuring interval */
2374 /*
2375 * XXX: note that we don't unschedule and schedule, because this
2376 * might be too much overhead per packet. Instead, when we process
2377 * all entries for a given timer hash bin, we check whether it is
2378 * really a timeout. If not, we reschedule at that time.
2379 */
2380 x->bm_start_time = *nowp;
2381 x->bm_measured.b_packets = 0;
2382 x->bm_measured.b_bytes = 0;
2383 x->bm_flags &= ~BW_METER_UPCALL_DELIVERED;
2384 }
2385 }
2386 }
2387
2388 /*
2389 * Prepare a bandwidth-related upcall
2390 */
2391 static void
2392 bw_meter_prepare_upcall(struct bw_meter *x, struct timeval *nowp)
2393 {
2394 struct timeval delta;
2395 struct bw_upcall *u;
2396
2397 /*
2398 * Compute the measured time interval
2399 */
2400 delta = *nowp;
2401 BW_TIMEVALDECR(&delta, &x->bm_start_time);
2402
2403 /*
2404 * If there are too many pending upcalls, deliver them now
2405 */
2406 if (bw_upcalls_n >= BW_UPCALLS_MAX)
2407 bw_upcalls_send();
2408
2409 /*
2410 * Set the bw_upcall entry
2411 */
2412 u = &bw_upcalls[bw_upcalls_n++];
2413 u->bu_src = x->bm_mfc->mfc_origin;
2414 u->bu_dst = x->bm_mfc->mfc_mcastgrp;
2415 u->bu_threshold.b_time = x->bm_threshold.b_time;
2416 u->bu_threshold.b_packets = x->bm_threshold.b_packets;
2417 u->bu_threshold.b_bytes = x->bm_threshold.b_bytes;
2418 u->bu_measured.b_time = delta;
2419 u->bu_measured.b_packets = x->bm_measured.b_packets;
2420 u->bu_measured.b_bytes = x->bm_measured.b_bytes;
2421 u->bu_flags = 0;
2422 if (x->bm_flags & BW_METER_UNIT_PACKETS)
2423 u->bu_flags |= BW_UPCALL_UNIT_PACKETS;
2424 if (x->bm_flags & BW_METER_UNIT_BYTES)
2425 u->bu_flags |= BW_UPCALL_UNIT_BYTES;
2426 if (x->bm_flags & BW_METER_GEQ)
2427 u->bu_flags |= BW_UPCALL_GEQ;
2428 if (x->bm_flags & BW_METER_LEQ)
2429 u->bu_flags |= BW_UPCALL_LEQ;
2430 }
2431
2432 /*
2433 * Send the pending bandwidth-related upcalls
2434 */
2435 static void
2436 bw_upcalls_send(void)
2437 {
2438 struct mbuf *m;
2439 int len = bw_upcalls_n * sizeof(bw_upcalls[0]);
2440 struct sockaddr_in k_igmpsrc = {
2441 .sin_len = sizeof(k_igmpsrc),
2442 .sin_family = AF_INET,
2443 };
2444 static struct igmpmsg igmpmsg = {
2445 0, /* unused1 */
2446 0, /* unused2 */
2447 IGMPMSG_BW_UPCALL,/* im_msgtype */
2448 0, /* im_mbz */
2449 0, /* im_vif */
2450 0, /* unused3 */
2451 { 0 }, /* im_src */
2452 { 0 } /* im_dst */
2453 };
2454
2455 if (bw_upcalls_n == 0)
2456 return; /* No pending upcalls */
2457
2458 bw_upcalls_n = 0;
2459
2460 /*
2461 * Allocate a new mbuf, initialize it with the header and
2462 * the payload for the pending calls.
2463 */
2464 MGETHDR(m, M_DONTWAIT, MT_HEADER);
2465 if (m == NULL) {
2466 log(LOG_WARNING, "bw_upcalls_send: cannot allocate mbuf\n");
2467 return;
2468 }
2469
2470 m->m_len = m->m_pkthdr.len = 0;
2471 m_copyback(m, 0, sizeof(struct igmpmsg), (void *)&igmpmsg);
2472 m_copyback(m, sizeof(struct igmpmsg), len, (void *)&bw_upcalls[0]);
2473
2474 /*
2475 * Send the upcalls
2476 * XXX do we need to set the address in k_igmpsrc ?
2477 */
2478 mrtstat.mrts_upcalls++;
2479 if (socket_send(ip_mrouter, m, &k_igmpsrc) < 0) {
2480 log(LOG_WARNING, "bw_upcalls_send: ip_mrouter socket queue full\n");
2481 ++mrtstat.mrts_upq_sockfull;
2482 }
2483 }
2484
2485 /*
2486 * Compute the timeout hash value for the bw_meter entries
2487 */
2488 #define BW_METER_TIMEHASH(bw_meter, hash) \
2489 do { \
2490 struct timeval next_timeval = (bw_meter)->bm_start_time; \
2491 BW_TIMEVALADD(&next_timeval, &(bw_meter)->bm_threshold.b_time); \
2492 (hash) = next_timeval.tv_sec; \
2493 if (next_timeval.tv_usec) \
2494 (hash)++; /* XXX: make sure we don't timeout early */ \
2495 (hash) %= BW_METER_BUCKETS; \
2496 } while (/*CONSTCOND*/ 0)
2497
2498 /*
2499 * Schedule a timer to process periodically bw_meter entry of type "<="
2500 * by linking the entry in the proper hash bucket.
2501 */
2502 static void
2503 schedule_bw_meter(struct bw_meter *x, struct timeval *nowp)
2504 {
2505 int time_hash;
2506
2507 if (!(x->bm_flags & BW_METER_LEQ))
2508 return; /* XXX: we schedule timers only for "<=" entries */
2509
2510 /*
2511 * Reset the bw_meter entry
2512 */
2513 x->bm_start_time = *nowp;
2514 x->bm_measured.b_packets = 0;
2515 x->bm_measured.b_bytes = 0;
2516 x->bm_flags &= ~BW_METER_UPCALL_DELIVERED;
2517
2518 /*
2519 * Compute the timeout hash value and insert the entry
2520 */
2521 BW_METER_TIMEHASH(x, time_hash);
2522 x->bm_time_next = bw_meter_timers[time_hash];
2523 bw_meter_timers[time_hash] = x;
2524 x->bm_time_hash = time_hash;
2525 }
2526
2527 /*
2528 * Unschedule the periodic timer that processes bw_meter entry of type "<="
2529 * by removing the entry from the proper hash bucket.
2530 */
2531 static void
2532 unschedule_bw_meter(struct bw_meter *x)
2533 {
2534 int time_hash;
2535 struct bw_meter *prev, *tmp;
2536
2537 if (!(x->bm_flags & BW_METER_LEQ))
2538 return; /* XXX: we schedule timers only for "<=" entries */
2539
2540 /*
2541 * Compute the timeout hash value and delete the entry
2542 */
2543 time_hash = x->bm_time_hash;
2544 if (time_hash >= BW_METER_BUCKETS)
2545 return; /* Entry was not scheduled */
2546
2547 for (prev = NULL, tmp = bw_meter_timers[time_hash];
2548 tmp != NULL; prev = tmp, tmp = tmp->bm_time_next)
2549 if (tmp == x)
2550 break;
2551
2552 if (tmp == NULL)
2553 panic("unschedule_bw_meter: bw_meter entry not found");
2554
2555 if (prev != NULL)
2556 prev->bm_time_next = x->bm_time_next;
2557 else
2558 bw_meter_timers[time_hash] = x->bm_time_next;
2559
2560 x->bm_time_next = NULL;
2561 x->bm_time_hash = BW_METER_BUCKETS;
2562 }
2563
2564 /*
2565 * Process all "<=" type of bw_meter that should be processed now,
2566 * and for each entry prepare an upcall if necessary. Each processed
2567 * entry is rescheduled again for the (periodic) processing.
2568 *
2569 * This is run periodically (once per second normally). On each round,
2570 * all the potentially matching entries are in the hash slot that we are
2571 * looking at.
2572 */
2573 static void
2574 bw_meter_process(void)
2575 {
2576 int s;
2577 static uint32_t last_tv_sec; /* last time we processed this */
2578
2579 uint32_t loops;
2580 int i;
2581 struct timeval now, process_endtime;
2582
2583 microtime(&now);
2584 if (last_tv_sec == now.tv_sec)
2585 return; /* nothing to do */
2586
2587 loops = now.tv_sec - last_tv_sec;
2588 last_tv_sec = now.tv_sec;
2589 if (loops > BW_METER_BUCKETS)
2590 loops = BW_METER_BUCKETS;
2591
2592 s = splsoftnet();
2593 /*
2594 * Process all bins of bw_meter entries from the one after the last
2595 * processed to the current one. On entry, i points to the last bucket
2596 * visited, so we need to increment i at the beginning of the loop.
2597 */
2598 for (i = (now.tv_sec - loops) % BW_METER_BUCKETS; loops > 0; loops--) {
2599 struct bw_meter *x, *tmp_list;
2600
2601 if (++i >= BW_METER_BUCKETS)
2602 i = 0;
2603
2604 /* Disconnect the list of bw_meter entries from the bin */
2605 tmp_list = bw_meter_timers[i];
2606 bw_meter_timers[i] = NULL;
2607
2608 /* Process the list of bw_meter entries */
2609 while (tmp_list != NULL) {
2610 x = tmp_list;
2611 tmp_list = tmp_list->bm_time_next;
2612
2613 /* Test if the time interval is over */
2614 process_endtime = x->bm_start_time;
2615 BW_TIMEVALADD(&process_endtime, &x->bm_threshold.b_time);
2616 if (BW_TIMEVALCMP(&process_endtime, &now, >)) {
2617 /* Not yet: reschedule, but don't reset */
2618 int time_hash;
2619
2620 BW_METER_TIMEHASH(x, time_hash);
2621 if (time_hash == i && process_endtime.tv_sec == now.tv_sec) {
2622 /*
2623 * XXX: somehow the bin processing is a bit ahead of time.
2624 * Put the entry in the next bin.
2625 */
2626 if (++time_hash >= BW_METER_BUCKETS)
2627 time_hash = 0;
2628 }
2629 x->bm_time_next = bw_meter_timers[time_hash];
2630 bw_meter_timers[time_hash] = x;
2631 x->bm_time_hash = time_hash;
2632
2633 continue;
2634 }
2635
2636 /*
2637 * Test if we should deliver an upcall
2638 */
2639 if (((x->bm_flags & BW_METER_UNIT_PACKETS) &&
2640 (x->bm_measured.b_packets <= x->bm_threshold.b_packets)) ||
2641 ((x->bm_flags & BW_METER_UNIT_BYTES) &&
2642 (x->bm_measured.b_bytes <= x->bm_threshold.b_bytes))) {
2643 /* Prepare an upcall for delivery */
2644 bw_meter_prepare_upcall(x, &now);
2645 }
2646
2647 /*
2648 * Reschedule for next processing
2649 */
2650 schedule_bw_meter(x, &now);
2651 }
2652 }
2653
2654 /* Send all upcalls that are pending delivery */
2655 bw_upcalls_send();
2656
2657 splx(s);
2658 }
2659
2660 /*
2661 * A periodic function for sending all upcalls that are pending delivery
2662 */
2663 static void
2664 expire_bw_upcalls_send(void *unused)
2665 {
2666 int s;
2667
2668 s = splsoftnet();
2669 bw_upcalls_send();
2670 splx(s);
2671
2672 callout_reset(&bw_upcalls_ch, BW_UPCALLS_PERIOD,
2673 expire_bw_upcalls_send, NULL);
2674 }
2675
2676 /*
2677 * A periodic function for periodic scanning of the multicast forwarding
2678 * table for processing all "<=" bw_meter entries.
2679 */
2680 static void
2681 expire_bw_meter_process(void *unused)
2682 {
2683 if (mrt_api_config & MRT_MFC_BW_UPCALL)
2684 bw_meter_process();
2685
2686 callout_reset(&bw_meter_ch, BW_METER_PERIOD,
2687 expire_bw_meter_process, NULL);
2688 }
2689
2690 /*
2691 * End of bandwidth monitoring code
2692 */
2693
2694 #ifdef PIM
2695 /*
2696 * Send the packet up to the user daemon, or eventually do kernel encapsulation
2697 */
2698 static int
2699 pim_register_send(struct ip *ip, struct vif *vifp, struct mbuf *m,
2700 struct mfc *rt)
2701 {
2702 struct mbuf *mb_copy, *mm;
2703
2704 if (mrtdebug & DEBUG_PIM)
2705 log(LOG_DEBUG, "pim_register_send: \n");
2706
2707 mb_copy = pim_register_prepare(ip, m);
2708 if (mb_copy == NULL)
2709 return ENOBUFS;
2710
2711 /*
2712 * Send all the fragments. Note that the mbuf for each fragment
2713 * is freed by the sending machinery.
2714 */
2715 for (mm = mb_copy; mm; mm = mb_copy) {
2716 mb_copy = mm->m_nextpkt;
2717 mm->m_nextpkt = NULL;
2718 mm = m_pullup(mm, sizeof(struct ip));
2719 if (mm != NULL) {
2720 ip = mtod(mm, struct ip *);
2721 if ((mrt_api_config & MRT_MFC_RP) &&
2722 !in_nullhost(rt->mfc_rp)) {
2723 pim_register_send_rp(ip, vifp, mm, rt);
2724 } else {
2725 pim_register_send_upcall(ip, vifp, mm, rt);
2726 }
2727 }
2728 }
2729
2730 return 0;
2731 }
2732
2733 /*
2734 * Return a copy of the data packet that is ready for PIM Register
2735 * encapsulation.
2736 * XXX: Note that in the returned copy the IP header is a valid one.
2737 */
2738 static struct mbuf *
2739 pim_register_prepare(struct ip *ip, struct mbuf *m)
2740 {
2741 struct mbuf *mb_copy = NULL;
2742 int mtu;
2743
2744 /* Take care of delayed checksums */
2745 if (m->m_pkthdr.csum_flags & (M_CSUM_TCPv4|M_CSUM_UDPv4)) {
2746 in_delayed_cksum(m);
2747 m->m_pkthdr.csum_flags &= ~(M_CSUM_TCPv4|M_CSUM_UDPv4);
2748 }
2749
2750 /*
2751 * Copy the old packet & pullup its IP header into the
2752 * new mbuf so we can modify it.
2753 */
2754 mb_copy = m_copypacket(m, M_DONTWAIT);
2755 if (mb_copy == NULL)
2756 return NULL;
2757 mb_copy = m_pullup(mb_copy, ip->ip_hl << 2);
2758 if (mb_copy == NULL)
2759 return NULL;
2760
2761 /* take care of the TTL */
2762 ip = mtod(mb_copy, struct ip *);
2763 --ip->ip_ttl;
2764
2765 /* Compute the MTU after the PIM Register encapsulation */
2766 mtu = 0xffff - sizeof(pim_encap_iphdr) - sizeof(pim_encap_pimhdr);
2767
2768 if (ntohs(ip->ip_len) <= mtu) {
2769 /* Turn the IP header into a valid one */
2770 ip->ip_sum = 0;
2771 ip->ip_sum = in_cksum(mb_copy, ip->ip_hl << 2);
2772 } else {
2773 /* Fragment the packet */
2774 if (ip_fragment(mb_copy, NULL, mtu) != 0) {
2775 /* XXX: mb_copy was freed by ip_fragment() */
2776 return NULL;
2777 }
2778 }
2779 return mb_copy;
2780 }
2781
2782 /*
2783 * Send an upcall with the data packet to the user-level process.
2784 */
2785 static int
2786 pim_register_send_upcall(struct ip *ip, struct vif *vifp,
2787 struct mbuf *mb_copy, struct mfc *rt)
2788 {
2789 struct mbuf *mb_first;
2790 int len = ntohs(ip->ip_len);
2791 struct igmpmsg *im;
2792 struct sockaddr_in k_igmpsrc = {
2793 .sin_len = sizeof(k_igmpsrc),
2794 .sin_family = AF_INET,
2795 };
2796
2797 /*
2798 * Add a new mbuf with an upcall header
2799 */
2800 MGETHDR(mb_first, M_DONTWAIT, MT_HEADER);
2801 if (mb_first == NULL) {
2802 m_freem(mb_copy);
2803 return ENOBUFS;
2804 }
2805 mb_first->m_data += max_linkhdr;
2806 mb_first->m_pkthdr.len = len + sizeof(struct igmpmsg);
2807 mb_first->m_len = sizeof(struct igmpmsg);
2808 mb_first->m_next = mb_copy;
2809
2810 /* Send message to routing daemon */
2811 im = mtod(mb_first, struct igmpmsg *);
2812 im->im_msgtype = IGMPMSG_WHOLEPKT;
2813 im->im_mbz = 0;
2814 im->im_vif = vifp - viftable;
2815 im->im_src = ip->ip_src;
2816 im->im_dst = ip->ip_dst;
2817
2818 k_igmpsrc.sin_addr = ip->ip_src;
2819
2820 mrtstat.mrts_upcalls++;
2821
2822 if (socket_send(ip_mrouter, mb_first, &k_igmpsrc) < 0) {
2823 if (mrtdebug & DEBUG_PIM)
2824 log(LOG_WARNING,
2825 "mcast: pim_register_send_upcall: ip_mrouter socket queue full\n");
2826 ++mrtstat.mrts_upq_sockfull;
2827 return ENOBUFS;
2828 }
2829
2830 /* Keep statistics */
2831 pimstat.pims_snd_registers_msgs++;
2832 pimstat.pims_snd_registers_bytes += len;
2833
2834 return 0;
2835 }
2836
2837 /*
2838 * Encapsulate the data packet in PIM Register message and send it to the RP.
2839 */
2840 static int
2841 pim_register_send_rp(struct ip *ip, struct vif *vifp,
2842 struct mbuf *mb_copy, struct mfc *rt)
2843 {
2844 struct mbuf *mb_first;
2845 struct ip *ip_outer;
2846 struct pim_encap_pimhdr *pimhdr;
2847 int len = ntohs(ip->ip_len);
2848 vifi_t vifi = rt->mfc_parent;
2849
2850 if ((vifi >= numvifs) || in_nullhost(viftable[vifi].v_lcl_addr)) {
2851 m_freem(mb_copy);
2852 return EADDRNOTAVAIL; /* The iif vif is invalid */
2853 }
2854
2855 /*
2856 * Add a new mbuf with the encapsulating header
2857 */
2858 MGETHDR(mb_first, M_DONTWAIT, MT_HEADER);
2859 if (mb_first == NULL) {
2860 m_freem(mb_copy);
2861 return ENOBUFS;
2862 }
2863 mb_first->m_data += max_linkhdr;
2864 mb_first->m_len = sizeof(pim_encap_iphdr) + sizeof(pim_encap_pimhdr);
2865 mb_first->m_next = mb_copy;
2866
2867 mb_first->m_pkthdr.len = len + mb_first->m_len;
2868
2869 /*
2870 * Fill in the encapsulating IP and PIM header
2871 */
2872 ip_outer = mtod(mb_first, struct ip *);
2873 *ip_outer = pim_encap_iphdr;
2874 if (mb_first->m_pkthdr.len < IP_MINFRAGSIZE)
2875 ip_outer->ip_id = 0;
2876 else
2877 ip_outer->ip_id = ip_newid(NULL);
2878 ip_outer->ip_len = htons(len + sizeof(pim_encap_iphdr) +
2879 sizeof(pim_encap_pimhdr));
2880 ip_outer->ip_src = viftable[vifi].v_lcl_addr;
2881 ip_outer->ip_dst = rt->mfc_rp;
2882 /*
2883 * Copy the inner header TOS to the outer header, and take care of the
2884 * IP_DF bit.
2885 */
2886 ip_outer->ip_tos = ip->ip_tos;
2887 if (ntohs(ip->ip_off) & IP_DF)
2888 ip_outer->ip_off |= htons(IP_DF);
2889 pimhdr = (struct pim_encap_pimhdr *)((char *)ip_outer
2890 + sizeof(pim_encap_iphdr));
2891 *pimhdr = pim_encap_pimhdr;
2892 /* If the iif crosses a border, set the Border-bit */
2893 if (rt->mfc_flags[vifi] & MRT_MFC_FLAGS_BORDER_VIF & mrt_api_config)
2894 pimhdr->flags |= htonl(PIM_BORDER_REGISTER);
2895
2896 mb_first->m_data += sizeof(pim_encap_iphdr);
2897 pimhdr->pim.pim_cksum = in_cksum(mb_first, sizeof(pim_encap_pimhdr));
2898 mb_first->m_data -= sizeof(pim_encap_iphdr);
2899
2900 if (vifp->v_rate_limit == 0)
2901 tbf_send_packet(vifp, mb_first);
2902 else
2903 tbf_control(vifp, mb_first, ip, ntohs(ip_outer->ip_len));
2904
2905 /* Keep statistics */
2906 pimstat.pims_snd_registers_msgs++;
2907 pimstat.pims_snd_registers_bytes += len;
2908
2909 return 0;
2910 }
2911
2912 /*
2913 * PIM-SMv2 and PIM-DM messages processing.
2914 * Receives and verifies the PIM control messages, and passes them
2915 * up to the listening socket, using rip_input().
2916 * The only message with special processing is the PIM_REGISTER message
2917 * (used by PIM-SM): the PIM header is stripped off, and the inner packet
2918 * is passed to if_simloop().
2919 */
2920 void
2921 pim_input(struct mbuf *m, ...)
2922 {
2923 struct ip *ip = mtod(m, struct ip *);
2924 struct pim *pim;
2925 int minlen;
2926 int datalen;
2927 int ip_tos;
2928 int proto;
2929 int iphlen;
2930 va_list ap;
2931
2932 va_start(ap, m);
2933 iphlen = va_arg(ap, int);
2934 proto = va_arg(ap, int);
2935 va_end(ap);
2936
2937 datalen = ntohs(ip->ip_len) - iphlen;
2938
2939 /* Keep statistics */
2940 pimstat.pims_rcv_total_msgs++;
2941 pimstat.pims_rcv_total_bytes += datalen;
2942
2943 /*
2944 * Validate lengths
2945 */
2946 if (datalen < PIM_MINLEN) {
2947 pimstat.pims_rcv_tooshort++;
2948 log(LOG_ERR, "pim_input: packet size too small %d from %lx\n",
2949 datalen, (u_long)ip->ip_src.s_addr);
2950 m_freem(m);
2951 return;
2952 }
2953
2954 /*
2955 * If the packet is at least as big as a REGISTER, go ahead
2956 * and grab the PIM REGISTER header size, to avoid another
2957 * possible m_pullup() later.
2958 *
2959 * PIM_MINLEN == pimhdr + u_int32_t == 4 + 4 = 8
2960 * PIM_REG_MINLEN == pimhdr + reghdr + encap_iphdr == 4 + 4 + 20 = 28
2961 */
2962 minlen = iphlen + (datalen >= PIM_REG_MINLEN ? PIM_REG_MINLEN : PIM_MINLEN);
2963
2964 /*
2965 * Get the IP and PIM headers in contiguous memory, and
2966 * possibly the PIM REGISTER header.
2967 */
2968 if ((m->m_flags & M_EXT || m->m_len < minlen) &&
2969 (m = m_pullup(m, minlen)) == NULL) {
2970 log(LOG_ERR, "pim_input: m_pullup failure\n");
2971 return;
2972 }
2973 ip = mtod(m, struct ip *);
2974 ip_tos = ip->ip_tos;
2975
2976 /* adjust mbuf to point to the PIM header */
2977 m->m_data += iphlen;
2978 m->m_len -= iphlen;
2979 pim = mtod(m, struct pim *);
2980
2981 /*
2982 * Validate checksum. If PIM REGISTER, exclude the data packet.
2983 *
2984 * XXX: some older PIMv2 implementations don't make this distinction,
2985 * so for compatibility reason perform the checksum over part of the
2986 * message, and if error, then over the whole message.
2987 */
2988 if (PIM_VT_T(pim->pim_vt) == PIM_REGISTER && in_cksum(m, PIM_MINLEN) == 0) {
2989 /* do nothing, checksum okay */
2990 } else if (in_cksum(m, datalen)) {
2991 pimstat.pims_rcv_badsum++;
2992 if (mrtdebug & DEBUG_PIM)
2993 log(LOG_DEBUG, "pim_input: invalid checksum\n");
2994 m_freem(m);
2995 return;
2996 }
2997
2998 /* PIM version check */
2999 if (PIM_VT_V(pim->pim_vt) < PIM_VERSION) {
3000 pimstat.pims_rcv_badversion++;
3001 log(LOG_ERR, "pim_input: incorrect version %d, expecting %d\n",
3002 PIM_VT_V(pim->pim_vt), PIM_VERSION);
3003 m_freem(m);
3004 return;
3005 }
3006
3007 /* restore mbuf back to the outer IP */
3008 m->m_data -= iphlen;
3009 m->m_len += iphlen;
3010
3011 if (PIM_VT_T(pim->pim_vt) == PIM_REGISTER) {
3012 /*
3013 * Since this is a REGISTER, we'll make a copy of the register
3014 * headers ip + pim + u_int32 + encap_ip, to be passed up to the
3015 * routing daemon.
3016 */
3017 int s;
3018 struct sockaddr_in dst = {
3019 .sin_len = sizeof(dst),
3020 .sin_family = AF_INET,
3021 };
3022 struct mbuf *mcp;
3023 struct ip *encap_ip;
3024 u_int32_t *reghdr;
3025 struct ifnet *vifp;
3026
3027 s = splsoftnet();
3028 if ((reg_vif_num >= numvifs) || (reg_vif_num == VIFI_INVALID)) {
3029 splx(s);
3030 if (mrtdebug & DEBUG_PIM)
3031 log(LOG_DEBUG,
3032 "pim_input: register vif not set: %d\n", reg_vif_num);
3033 m_freem(m);
3034 return;
3035 }
3036 /* XXX need refcnt? */
3037 vifp = viftable[reg_vif_num].v_ifp;
3038 splx(s);
3039
3040 /*
3041 * Validate length
3042 */
3043 if (datalen < PIM_REG_MINLEN) {
3044 pimstat.pims_rcv_tooshort++;
3045 pimstat.pims_rcv_badregisters++;
3046 log(LOG_ERR,
3047 "pim_input: register packet size too small %d from %lx\n",
3048 datalen, (u_long)ip->ip_src.s_addr);
3049 m_freem(m);
3050 return;
3051 }
3052
3053 reghdr = (u_int32_t *)(pim + 1);
3054 encap_ip = (struct ip *)(reghdr + 1);
3055
3056 if (mrtdebug & DEBUG_PIM) {
3057 log(LOG_DEBUG,
3058 "pim_input[register], encap_ip: %lx -> %lx, encap_ip len %d\n",
3059 (u_long)ntohl(encap_ip->ip_src.s_addr),
3060 (u_long)ntohl(encap_ip->ip_dst.s_addr),
3061 ntohs(encap_ip->ip_len));
3062 }
3063
3064 /* verify the version number of the inner packet */
3065 if (encap_ip->ip_v != IPVERSION) {
3066 pimstat.pims_rcv_badregisters++;
3067 if (mrtdebug & DEBUG_PIM) {
3068 log(LOG_DEBUG, "pim_input: invalid IP version (%d) "
3069 "of the inner packet\n", encap_ip->ip_v);
3070 }
3071 m_freem(m);
3072 return;
3073 }
3074
3075 /* verify the inner packet doesn't have options */
3076 if (encap_ip->ip_hl != (sizeof(struct ip) >> 2)) {
3077 pimstat.pims_rcv_badregisters++;
3078 m_freem(m);
3079 return;
3080 }
3081
3082 /* verify the inner packet is destined to a mcast group */
3083 if (!IN_MULTICAST(encap_ip->ip_dst.s_addr)) {
3084 pimstat.pims_rcv_badregisters++;
3085 if (mrtdebug & DEBUG_PIM)
3086 log(LOG_DEBUG,
3087 "pim_input: inner packet of register is not "
3088 "multicast %lx\n",
3089 (u_long)ntohl(encap_ip->ip_dst.s_addr));
3090 m_freem(m);
3091 return;
3092 }
3093
3094 /* If a NULL_REGISTER, pass it to the daemon */
3095 if ((ntohl(*reghdr) & PIM_NULL_REGISTER))
3096 goto pim_input_to_daemon;
3097
3098 /*
3099 * Copy the TOS from the outer IP header to the inner IP header.
3100 */
3101 if (encap_ip->ip_tos != ip_tos) {
3102 /* Outer TOS -> inner TOS */
3103 encap_ip->ip_tos = ip_tos;
3104 /* Recompute the inner header checksum. Sigh... */
3105
3106 /* adjust mbuf to point to the inner IP header */
3107 m->m_data += (iphlen + PIM_MINLEN);
3108 m->m_len -= (iphlen + PIM_MINLEN);
3109
3110 encap_ip->ip_sum = 0;
3111 encap_ip->ip_sum = in_cksum(m, encap_ip->ip_hl << 2);
3112
3113 /* restore mbuf to point back to the outer IP header */
3114 m->m_data -= (iphlen + PIM_MINLEN);
3115 m->m_len += (iphlen + PIM_MINLEN);
3116 }
3117
3118 /*
3119 * Decapsulate the inner IP packet and loopback to forward it
3120 * as a normal multicast packet. Also, make a copy of the
3121 * outer_iphdr + pimhdr + reghdr + encap_iphdr
3122 * to pass to the daemon later, so it can take the appropriate
3123 * actions (e.g., send back PIM_REGISTER_STOP).
3124 * XXX: here m->m_data points to the outer IP header.
3125 */
3126 mcp = m_copym(m, 0, iphlen + PIM_REG_MINLEN, M_DONTWAIT);
3127 if (mcp == NULL) {
3128 log(LOG_ERR,
3129 "pim_input: pim register: could not copy register head\n");
3130 m_freem(m);
3131 return;
3132 }
3133
3134 /* Keep statistics */
3135 /* XXX: registers_bytes include only the encap. mcast pkt */
3136 pimstat.pims_rcv_registers_msgs++;
3137 pimstat.pims_rcv_registers_bytes += ntohs(encap_ip->ip_len);
3138
3139 /*
3140 * forward the inner ip packet; point m_data at the inner ip.
3141 */
3142 m_adj(m, iphlen + PIM_MINLEN);
3143
3144 if (mrtdebug & DEBUG_PIM) {
3145 log(LOG_DEBUG,
3146 "pim_input: forwarding decapsulated register: "
3147 "src %lx, dst %lx, vif %d\n",
3148 (u_long)ntohl(encap_ip->ip_src.s_addr),
3149 (u_long)ntohl(encap_ip->ip_dst.s_addr),
3150 reg_vif_num);
3151 }
3152 /* NB: vifp was collected above; can it change on us? */
3153 looutput(vifp, m, (struct sockaddr *)&dst, NULL);
3154
3155 /* prepare the register head to send to the mrouting daemon */
3156 m = mcp;
3157 }
3158
3159 pim_input_to_daemon:
3160 /*
3161 * Pass the PIM message up to the daemon; if it is a Register message,
3162 * pass the 'head' only up to the daemon. This includes the
3163 * outer IP header, PIM header, PIM-Register header and the
3164 * inner IP header.
3165 * XXX: the outer IP header pkt size of a Register is not adjust to
3166 * reflect the fact that the inner multicast data is truncated.
3167 */
3168 /*
3169 * Currently, pim_input() is always called holding softnet_lock
3170 * by ipintr()(!NET_MPSAFE) or PR_INPUT_WRAP()(NET_MPSAFE).
3171 */
3172 KASSERT(mutex_owned(softnet_lock));
3173 rip_input(m, iphlen, proto);
3174
3175 return;
3176 }
3177 #endif /* PIM */
3178