ip_mroute.c revision 1.68 1 /* $NetBSD: ip_mroute.c,v 1.68 2003/05/14 16:52:53 itojun Exp $ */
2
3 /*
4 * Copyright (c) 1989 Stephen Deering
5 * Copyright (c) 1992, 1993
6 * The Regents of the University of California. All rights reserved.
7 *
8 * This code is derived from software contributed to Berkeley by
9 * Stephen Deering of Stanford University.
10 *
11 * Redistribution and use in source and binary forms, with or without
12 * modification, are permitted provided that the following conditions
13 * are met:
14 * 1. Redistributions of source code must retain the above copyright
15 * notice, this list of conditions and the following disclaimer.
16 * 2. Redistributions in binary form must reproduce the above copyright
17 * notice, this list of conditions and the following disclaimer in the
18 * documentation and/or other materials provided with the distribution.
19 * 3. All advertising materials mentioning features or use of this software
20 * must display the following acknowledgement:
21 * This product includes software developed by the University of
22 * California, Berkeley and its contributors.
23 * 4. Neither the name of the University nor the names of its contributors
24 * may be used to endorse or promote products derived from this software
25 * without specific prior written permission.
26 *
27 * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND
28 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
29 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
30 * ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE
31 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
32 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
33 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
34 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
35 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
36 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
37 * SUCH DAMAGE.
38 *
39 * @(#)ip_mroute.c 8.2 (Berkeley) 11/15/93
40 */
41
42 /*
43 * IP multicast forwarding procedures
44 *
45 * Written by David Waitzman, BBN Labs, August 1988.
46 * Modified by Steve Deering, Stanford, February 1989.
47 * Modified by Mark J. Steiglitz, Stanford, May, 1991
48 * Modified by Van Jacobson, LBL, January 1993
49 * Modified by Ajit Thyagarajan, PARC, August 1993
50 * Modified by Bill Fenner, PARC, April 1994
51 * Modified by Charles M. Hannum, NetBSD, May 1995.
52 *
53 * MROUTING Revision: 1.2
54 */
55
56 #include <sys/cdefs.h>
57 __KERNEL_RCSID(0, "$NetBSD: ip_mroute.c,v 1.68 2003/05/14 16:52:53 itojun Exp $");
58
59 #include "opt_ipsec.h"
60
61 #include <sys/param.h>
62 #include <sys/systm.h>
63 #include <sys/callout.h>
64 #include <sys/mbuf.h>
65 #include <sys/socket.h>
66 #include <sys/socketvar.h>
67 #include <sys/protosw.h>
68 #include <sys/errno.h>
69 #include <sys/time.h>
70 #include <sys/kernel.h>
71 #include <sys/ioctl.h>
72 #include <sys/syslog.h>
73 #include <net/if.h>
74 #include <net/route.h>
75 #include <net/raw_cb.h>
76 #include <netinet/in.h>
77 #include <netinet/in_var.h>
78 #include <netinet/in_systm.h>
79 #include <netinet/ip.h>
80 #include <netinet/ip_var.h>
81 #include <netinet/in_pcb.h>
82 #include <netinet/udp.h>
83 #include <netinet/igmp.h>
84 #include <netinet/igmp_var.h>
85 #include <netinet/ip_mroute.h>
86 #include <netinet/ip_encap.h>
87
88 #ifdef IPSEC
89 #include <netinet6/ipsec.h>
90 #include <netkey/key.h>
91 #endif
92
93 #include <machine/stdarg.h>
94
95 #define IP_MULTICASTOPTS 0
96 #define M_PULLUP(m, len) \
97 do { \
98 if ((m) && ((m)->m_flags & M_EXT || (m)->m_len < (len))) \
99 (m) = m_pullup((m), (len)); \
100 } while (/*CONSTCOND*/ 0)
101
102 /*
103 * Globals. All but ip_mrouter and ip_mrtproto could be static,
104 * except for netstat or debugging purposes.
105 */
106 struct socket *ip_mrouter = 0;
107 int ip_mrtproto = IGMP_DVMRP; /* for netstat only */
108
109 #define NO_RTE_FOUND 0x1
110 #define RTE_FOUND 0x2
111
112 #define MFCHASH(a, g) \
113 ((((a).s_addr >> 20) ^ ((a).s_addr >> 10) ^ (a).s_addr ^ \
114 ((g).s_addr >> 20) ^ ((g).s_addr >> 10) ^ (g).s_addr) & mfchash)
115 LIST_HEAD(mfchashhdr, mfc) *mfchashtbl;
116 u_long mfchash;
117
118 u_char nexpire[MFCTBLSIZ];
119 struct vif viftable[MAXVIFS];
120 struct mrtstat mrtstat;
121 u_int mrtdebug = 0; /* debug level */
122 #define DEBUG_MFC 0x02
123 #define DEBUG_FORWARD 0x04
124 #define DEBUG_EXPIRE 0x08
125 #define DEBUG_XMIT 0x10
126 u_int tbfdebug = 0; /* tbf debug level */
127 #ifdef RSVP_ISI
128 u_int rsvpdebug = 0; /* rsvp debug level */
129 extern struct socket *ip_rsvpd;
130 extern int rsvp_on;
131 #endif /* RSVP_ISI */
132
133 /* vif attachment using sys/netinet/ip_encap.c */
134 extern struct domain inetdomain;
135 static void vif_input __P((struct mbuf *, ...));
136 static int vif_encapcheck __P((const struct mbuf *, int, int, void *));
137 static struct protosw vif_protosw =
138 { SOCK_RAW, &inetdomain, IPPROTO_IPV4, PR_ATOMIC|PR_ADDR,
139 vif_input, rip_output, 0, rip_ctloutput,
140 rip_usrreq,
141 0, 0, 0, 0,
142 };
143
144 #define EXPIRE_TIMEOUT (hz / 4) /* 4x / second */
145 #define UPCALL_EXPIRE 6 /* number of timeouts */
146
147 /*
148 * Define the token bucket filter structures
149 */
150
151 #define TBF_REPROCESS (hz / 100) /* 100x / second */
152
153 static int get_sg_cnt __P((struct sioc_sg_req *));
154 static int get_vif_cnt __P((struct sioc_vif_req *));
155 static int ip_mrouter_init __P((struct socket *, struct mbuf *));
156 static int get_version __P((struct mbuf *));
157 static int set_assert __P((struct mbuf *));
158 static int get_assert __P((struct mbuf *));
159 static int add_vif __P((struct mbuf *));
160 static int del_vif __P((struct mbuf *));
161 static void update_mfc __P((struct mfcctl *, struct mfc *));
162 static void expire_mfc __P((struct mfc *));
163 static int add_mfc __P((struct mbuf *));
164 #ifdef UPCALL_TIMING
165 static void collate __P((struct timeval *));
166 #endif
167 static int del_mfc __P((struct mbuf *));
168 static int socket_send __P((struct socket *, struct mbuf *,
169 struct sockaddr_in *));
170 static void expire_upcalls __P((void *));
171 #ifdef RSVP_ISI
172 static int ip_mdq __P((struct mbuf *, struct ifnet *, struct mfc *, vifi_t));
173 #else
174 static int ip_mdq __P((struct mbuf *, struct ifnet *, struct mfc *));
175 #endif
176 static void phyint_send __P((struct ip *, struct vif *, struct mbuf *));
177 static void encap_send __P((struct ip *, struct vif *, struct mbuf *));
178 static void tbf_control __P((struct vif *, struct mbuf *, struct ip *,
179 u_int32_t));
180 static void tbf_queue __P((struct vif *, struct mbuf *));
181 static void tbf_process_q __P((struct vif *));
182 static void tbf_reprocess_q __P((void *));
183 static int tbf_dq_sel __P((struct vif *, struct ip *));
184 static void tbf_send_packet __P((struct vif *, struct mbuf *));
185 static void tbf_update_tokens __P((struct vif *));
186 static int priority __P((struct vif *, struct ip *));
187
188 /*
189 * 'Interfaces' associated with decapsulator (so we can tell
190 * packets that went through it from ones that get reflected
191 * by a broken gateway). These interfaces are never linked into
192 * the system ifnet list & no routes point to them. I.e., packets
193 * can't be sent this way. They only exist as a placeholder for
194 * multicast source verification.
195 */
196 #if 0
197 struct ifnet multicast_decap_if[MAXVIFS];
198 #endif
199
200 #define ENCAP_TTL 64
201 #define ENCAP_PROTO IPPROTO_IPIP /* 4 */
202
203 /* prototype IP hdr for encapsulated packets */
204 struct ip multicast_encap_iphdr = {
205 #if BYTE_ORDER == LITTLE_ENDIAN
206 sizeof(struct ip) >> 2, IPVERSION,
207 #else
208 IPVERSION, sizeof(struct ip) >> 2,
209 #endif
210 0, /* tos */
211 sizeof(struct ip), /* total length */
212 0, /* id */
213 0, /* frag offset */
214 ENCAP_TTL, ENCAP_PROTO,
215 0, /* checksum */
216 };
217
218 /*
219 * Private variables.
220 */
221 static vifi_t numvifs = 0;
222
223 static struct callout expire_upcalls_ch;
224
225 /*
226 * one-back cache used by vif_encapcheck to locate a tunnel's vif
227 * given a datagram's src ip address.
228 */
229 static struct in_addr last_encap_src;
230 static struct vif *last_encap_vif;
231
232 /*
233 * whether or not special PIM assert processing is enabled.
234 */
235 static int pim_assert;
236 /*
237 * Rate limit for assert notification messages, in usec
238 */
239 #define ASSERT_MSG_TIME 3000000
240
241 /*
242 * Find a route for a given origin IP address and Multicast group address
243 * Type of service parameter to be added in the future!!!
244 */
245
246 #define MFCFIND(o, g, rt) { \
247 struct mfc *_rt; \
248 (rt) = 0; \
249 ++mrtstat.mrts_mfc_lookups; \
250 LIST_FOREACH(_rt, &mfchashtbl[MFCHASH(o, g)], mfc_hash) { \
251 if (in_hosteq(_rt->mfc_origin, (o)) && \
252 in_hosteq(_rt->mfc_mcastgrp, (g)) && \
253 _rt->mfc_stall == 0) { \
254 (rt) = _rt; \
255 break; \
256 } \
257 } \
258 if ((rt) == 0) \
259 ++mrtstat.mrts_mfc_misses; \
260 }
261
262 /*
263 * Macros to compute elapsed time efficiently
264 * Borrowed from Van Jacobson's scheduling code
265 */
266 #define TV_DELTA(a, b, delta) { \
267 int xxs; \
268 delta = (a).tv_usec - (b).tv_usec; \
269 xxs = (a).tv_sec - (b).tv_sec; \
270 switch (xxs) { \
271 case 2: \
272 delta += 1000000; \
273 /* fall through */ \
274 case 1: \
275 delta += 1000000; \
276 /* fall through */ \
277 case 0: \
278 break; \
279 default: \
280 delta += (1000000 * xxs); \
281 break; \
282 } \
283 }
284
285 #ifdef UPCALL_TIMING
286 u_int32_t upcall_data[51];
287 #endif /* UPCALL_TIMING */
288
289 /*
290 * Handle MRT setsockopt commands to modify the multicast routing tables.
291 */
292 int
293 ip_mrouter_set(so, optname, m)
294 struct socket *so;
295 int optname;
296 struct mbuf **m;
297 {
298 int error;
299
300 if (optname != MRT_INIT && so != ip_mrouter)
301 error = ENOPROTOOPT;
302 else
303 switch (optname) {
304 case MRT_INIT:
305 error = ip_mrouter_init(so, *m);
306 break;
307 case MRT_DONE:
308 error = ip_mrouter_done();
309 break;
310 case MRT_ADD_VIF:
311 error = add_vif(*m);
312 break;
313 case MRT_DEL_VIF:
314 error = del_vif(*m);
315 break;
316 case MRT_ADD_MFC:
317 error = add_mfc(*m);
318 break;
319 case MRT_DEL_MFC:
320 error = del_mfc(*m);
321 break;
322 case MRT_ASSERT:
323 error = set_assert(*m);
324 break;
325 default:
326 error = ENOPROTOOPT;
327 break;
328 }
329
330 if (*m)
331 m_free(*m);
332 return (error);
333 }
334
335 /*
336 * Handle MRT getsockopt commands
337 */
338 int
339 ip_mrouter_get(so, optname, m)
340 struct socket *so;
341 int optname;
342 struct mbuf **m;
343 {
344 int error;
345
346 if (so != ip_mrouter)
347 error = ENOPROTOOPT;
348 else {
349 *m = m_get(M_WAIT, MT_SOOPTS);
350 MCLAIM(*m, so->so_mowner);
351
352 switch (optname) {
353 case MRT_VERSION:
354 error = get_version(*m);
355 break;
356 case MRT_ASSERT:
357 error = get_assert(*m);
358 break;
359 default:
360 error = ENOPROTOOPT;
361 break;
362 }
363
364 if (error)
365 m_free(*m);
366 }
367
368 return (error);
369 }
370
371 /*
372 * Handle ioctl commands to obtain information from the cache
373 */
374 int
375 mrt_ioctl(so, cmd, data)
376 struct socket *so;
377 u_long cmd;
378 caddr_t data;
379 {
380 int error;
381
382 if (so != ip_mrouter)
383 error = EINVAL;
384 else
385 switch (cmd) {
386 case SIOCGETVIFCNT:
387 error = get_vif_cnt((struct sioc_vif_req *)data);
388 break;
389 case SIOCGETSGCNT:
390 error = get_sg_cnt((struct sioc_sg_req *)data);
391 break;
392 default:
393 error = EINVAL;
394 break;
395 }
396
397 return (error);
398 }
399
400 /*
401 * returns the packet, byte, rpf-failure count for the source group provided
402 */
403 static int
404 get_sg_cnt(req)
405 struct sioc_sg_req *req;
406 {
407 struct mfc *rt;
408 int s;
409
410 s = splsoftnet();
411 MFCFIND(req->src, req->grp, rt);
412 splx(s);
413 if (rt != 0) {
414 req->pktcnt = rt->mfc_pkt_cnt;
415 req->bytecnt = rt->mfc_byte_cnt;
416 req->wrong_if = rt->mfc_wrong_if;
417 } else
418 req->pktcnt = req->bytecnt = req->wrong_if = 0xffffffff;
419
420 return (0);
421 }
422
423 /*
424 * returns the input and output packet and byte counts on the vif provided
425 */
426 static int
427 get_vif_cnt(req)
428 struct sioc_vif_req *req;
429 {
430 vifi_t vifi = req->vifi;
431
432 if (vifi >= numvifs)
433 return (EINVAL);
434
435 req->icount = viftable[vifi].v_pkt_in;
436 req->ocount = viftable[vifi].v_pkt_out;
437 req->ibytes = viftable[vifi].v_bytes_in;
438 req->obytes = viftable[vifi].v_bytes_out;
439
440 return (0);
441 }
442
443 /*
444 * Enable multicast routing
445 */
446 static int
447 ip_mrouter_init(so, m)
448 struct socket *so;
449 struct mbuf *m;
450 {
451 int *v;
452
453 if (mrtdebug)
454 log(LOG_DEBUG,
455 "ip_mrouter_init: so_type = %d, pr_protocol = %d\n",
456 so->so_type, so->so_proto->pr_protocol);
457
458 if (so->so_type != SOCK_RAW ||
459 so->so_proto->pr_protocol != IPPROTO_IGMP)
460 return (EOPNOTSUPP);
461
462 if (m == 0 || m->m_len < sizeof(int))
463 return (EINVAL);
464
465 v = mtod(m, int *);
466 if (*v != 1)
467 return (EINVAL);
468
469 if (ip_mrouter != 0)
470 return (EADDRINUSE);
471
472 ip_mrouter = so;
473
474 mfchashtbl =
475 hashinit(MFCTBLSIZ, HASH_LIST, M_MRTABLE, M_WAITOK, &mfchash);
476 bzero((caddr_t)nexpire, sizeof(nexpire));
477
478 pim_assert = 0;
479
480 callout_init(&expire_upcalls_ch);
481 callout_reset(&expire_upcalls_ch, EXPIRE_TIMEOUT,
482 expire_upcalls, NULL);
483
484 if (mrtdebug)
485 log(LOG_DEBUG, "ip_mrouter_init\n");
486
487 return (0);
488 }
489
490 /*
491 * Disable multicast routing
492 */
493 int
494 ip_mrouter_done()
495 {
496 vifi_t vifi;
497 struct vif *vifp;
498 int i;
499 int s;
500
501 s = splsoftnet();
502
503 /* Clear out all the vifs currently in use. */
504 for (vifi = 0; vifi < numvifs; vifi++) {
505 vifp = &viftable[vifi];
506 if (!in_nullhost(vifp->v_lcl_addr))
507 reset_vif(vifp);
508 }
509
510 numvifs = 0;
511 pim_assert = 0;
512
513 callout_stop(&expire_upcalls_ch);
514
515 /*
516 * Free all multicast forwarding cache entries.
517 */
518 for (i = 0; i < MFCTBLSIZ; i++) {
519 struct mfc *rt, *nrt;
520
521 for (rt = LIST_FIRST(&mfchashtbl[i]); rt; rt = nrt) {
522 nrt = LIST_NEXT(rt, mfc_hash);
523
524 expire_mfc(rt);
525 }
526 }
527
528 free(mfchashtbl, M_MRTABLE);
529 mfchashtbl = 0;
530
531 /* Reset de-encapsulation cache. */
532
533 ip_mrouter = 0;
534
535 splx(s);
536
537 if (mrtdebug)
538 log(LOG_DEBUG, "ip_mrouter_done\n");
539
540 return (0);
541 }
542
543 static int
544 get_version(m)
545 struct mbuf *m;
546 {
547 int *v = mtod(m, int *);
548
549 *v = 0x0305; /* XXX !!!! */
550 m->m_len = sizeof(int);
551 return (0);
552 }
553
554 /*
555 * Set PIM assert processing global
556 */
557 static int
558 set_assert(m)
559 struct mbuf *m;
560 {
561 int *i;
562
563 if (m == 0 || m->m_len < sizeof(int))
564 return (EINVAL);
565
566 i = mtod(m, int *);
567 pim_assert = !!*i;
568 return (0);
569 }
570
571 /*
572 * Get PIM assert processing global
573 */
574 static int
575 get_assert(m)
576 struct mbuf *m;
577 {
578 int *i = mtod(m, int *);
579
580 *i = pim_assert;
581 m->m_len = sizeof(int);
582 return (0);
583 }
584
585 static struct sockaddr_in sin = { sizeof(sin), AF_INET };
586
587 /*
588 * Add a vif to the vif table
589 */
590 static int
591 add_vif(m)
592 struct mbuf *m;
593 {
594 struct vifctl *vifcp;
595 struct vif *vifp;
596 struct ifaddr *ifa;
597 struct ifnet *ifp;
598 struct ifreq ifr;
599 int error, s;
600
601 if (m == 0 || m->m_len < sizeof(struct vifctl))
602 return (EINVAL);
603
604 vifcp = mtod(m, struct vifctl *);
605 if (vifcp->vifc_vifi >= MAXVIFS)
606 return (EINVAL);
607
608 vifp = &viftable[vifcp->vifc_vifi];
609 if (!in_nullhost(vifp->v_lcl_addr))
610 return (EADDRINUSE);
611
612 /* Find the interface with an address in AF_INET family. */
613 sin.sin_addr = vifcp->vifc_lcl_addr;
614 ifa = ifa_ifwithaddr(sintosa(&sin));
615 if (ifa == 0)
616 return (EADDRNOTAVAIL);
617
618 if (vifcp->vifc_flags & VIFF_TUNNEL) {
619 if (vifcp->vifc_flags & VIFF_SRCRT) {
620 log(LOG_ERR, "Source routed tunnels not supported\n");
621 return (EOPNOTSUPP);
622 }
623
624 /* attach this vif to decapsulator dispatch table */
625 vifp->v_encap_cookie = encap_attach_func(AF_INET, IPPROTO_IPV4,
626 vif_encapcheck, &vif_protosw, vifp);
627 if (!vifp->v_encap_cookie)
628 return (EINVAL);
629
630 /* Create a fake encapsulation interface. */
631 ifp = (struct ifnet *)malloc(sizeof(*ifp), M_MRTABLE, M_WAITOK);
632 bzero(ifp, sizeof(*ifp));
633 sprintf(ifp->if_xname, "mdecap%d", vifcp->vifc_vifi);
634
635 /* Prepare cached route entry. */
636 bzero(&vifp->v_route, sizeof(vifp->v_route));
637 } else {
638 /* Use the physical interface associated with the address. */
639 ifp = ifa->ifa_ifp;
640
641 /* Make sure the interface supports multicast. */
642 if ((ifp->if_flags & IFF_MULTICAST) == 0)
643 return (EOPNOTSUPP);
644
645 /* Enable promiscuous reception of all IP multicasts. */
646 satosin(&ifr.ifr_addr)->sin_len = sizeof(struct sockaddr_in);
647 satosin(&ifr.ifr_addr)->sin_family = AF_INET;
648 satosin(&ifr.ifr_addr)->sin_addr = zeroin_addr;
649 error = (*ifp->if_ioctl)(ifp, SIOCADDMULTI, (caddr_t)&ifr);
650 if (error)
651 return (error);
652 }
653
654 s = splsoftnet();
655
656 /* Define parameters for the tbf structure. */
657 vifp->tbf_q = 0;
658 vifp->tbf_t = &vifp->tbf_q;
659 microtime(&vifp->tbf_last_pkt_t);
660 vifp->tbf_n_tok = 0;
661 vifp->tbf_q_len = 0;
662 vifp->tbf_max_q_len = MAXQSIZE;
663
664 vifp->v_flags = vifcp->vifc_flags;
665 vifp->v_threshold = vifcp->vifc_threshold;
666 /* scaling up here allows division by 1024 in critical code */
667 vifp->v_rate_limit = vifcp->vifc_rate_limit * 1024 / 1000;
668 vifp->v_lcl_addr = vifcp->vifc_lcl_addr;
669 vifp->v_rmt_addr = vifcp->vifc_rmt_addr;
670 vifp->v_ifp = ifp;
671 /* Initialize per vif pkt counters. */
672 vifp->v_pkt_in = 0;
673 vifp->v_pkt_out = 0;
674 vifp->v_bytes_in = 0;
675 vifp->v_bytes_out = 0;
676
677 callout_init(&vifp->v_repq_ch);
678
679 #ifdef RSVP_ISI
680 vifp->v_rsvp_on = 0;
681 vifp->v_rsvpd = 0;
682 #endif /* RSVP_ISI */
683
684 splx(s);
685
686 /* Adjust numvifs up if the vifi is higher than numvifs. */
687 if (numvifs <= vifcp->vifc_vifi)
688 numvifs = vifcp->vifc_vifi + 1;
689
690 if (mrtdebug)
691 log(LOG_DEBUG, "add_vif #%d, lcladdr %x, %s %x, thresh %x, rate %d\n",
692 vifcp->vifc_vifi,
693 ntohl(vifcp->vifc_lcl_addr.s_addr),
694 (vifcp->vifc_flags & VIFF_TUNNEL) ? "rmtaddr" : "mask",
695 ntohl(vifcp->vifc_rmt_addr.s_addr),
696 vifcp->vifc_threshold,
697 vifcp->vifc_rate_limit);
698
699 return (0);
700 }
701
702 void
703 reset_vif(vifp)
704 struct vif *vifp;
705 {
706 struct mbuf *m, *n;
707 struct ifnet *ifp;
708 struct ifreq ifr;
709
710 callout_stop(&vifp->v_repq_ch);
711
712 /* detach this vif from decapsulator dispatch table */
713 encap_detach(vifp->v_encap_cookie);
714 vifp->v_encap_cookie = NULL;
715
716 for (m = vifp->tbf_q; m != 0; m = n) {
717 n = m->m_nextpkt;
718 m_freem(m);
719 }
720
721 if (vifp->v_flags & VIFF_TUNNEL) {
722 free(vifp->v_ifp, M_MRTABLE);
723 if (vifp == last_encap_vif) {
724 last_encap_vif = 0;
725 last_encap_src = zeroin_addr;
726 }
727 } else {
728 satosin(&ifr.ifr_addr)->sin_len = sizeof(struct sockaddr_in);
729 satosin(&ifr.ifr_addr)->sin_family = AF_INET;
730 satosin(&ifr.ifr_addr)->sin_addr = zeroin_addr;
731 ifp = vifp->v_ifp;
732 (*ifp->if_ioctl)(ifp, SIOCDELMULTI, (caddr_t)&ifr);
733 }
734 bzero((caddr_t)vifp, sizeof(*vifp));
735 }
736
737 /*
738 * Delete a vif from the vif table
739 */
740 static int
741 del_vif(m)
742 struct mbuf *m;
743 {
744 vifi_t *vifip;
745 struct vif *vifp;
746 vifi_t vifi;
747 int s;
748
749 if (m == 0 || m->m_len < sizeof(vifi_t))
750 return (EINVAL);
751
752 vifip = mtod(m, vifi_t *);
753 if (*vifip >= numvifs)
754 return (EINVAL);
755
756 vifp = &viftable[*vifip];
757 if (in_nullhost(vifp->v_lcl_addr))
758 return (EADDRNOTAVAIL);
759
760 s = splsoftnet();
761
762 reset_vif(vifp);
763
764 /* Adjust numvifs down */
765 for (vifi = numvifs; vifi > 0; vifi--)
766 if (!in_nullhost(viftable[vifi-1].v_lcl_addr))
767 break;
768 numvifs = vifi;
769
770 splx(s);
771
772 if (mrtdebug)
773 log(LOG_DEBUG, "del_vif %d, numvifs %d\n", *vifip, numvifs);
774
775 return (0);
776 }
777
778 static void
779 update_mfc(mfccp, rt)
780 struct mfcctl *mfccp;
781 struct mfc *rt;
782 {
783 vifi_t vifi;
784
785 rt->mfc_parent = mfccp->mfcc_parent;
786 for (vifi = 0; vifi < numvifs; vifi++)
787 rt->mfc_ttls[vifi] = mfccp->mfcc_ttls[vifi];
788 rt->mfc_expire = 0;
789 rt->mfc_stall = 0;
790 }
791
792 static void
793 expire_mfc(rt)
794 struct mfc *rt;
795 {
796 struct rtdetq *rte, *nrte;
797
798 for (rte = rt->mfc_stall; rte != 0; rte = nrte) {
799 nrte = rte->next;
800 m_freem(rte->m);
801 free(rte, M_MRTABLE);
802 }
803
804 LIST_REMOVE(rt, mfc_hash);
805 free(rt, M_MRTABLE);
806 }
807
808 /*
809 * Add an mfc entry
810 */
811 static int
812 add_mfc(m)
813 struct mbuf *m;
814 {
815 struct mfcctl *mfccp;
816 struct mfc *rt;
817 u_int32_t hash = 0;
818 struct rtdetq *rte, *nrte;
819 u_short nstl;
820 int s;
821
822 if (m == 0 || m->m_len < sizeof(struct mfcctl))
823 return (EINVAL);
824
825 mfccp = mtod(m, struct mfcctl *);
826
827 s = splsoftnet();
828 MFCFIND(mfccp->mfcc_origin, mfccp->mfcc_mcastgrp, rt);
829
830 /* If an entry already exists, just update the fields */
831 if (rt) {
832 if (mrtdebug & DEBUG_MFC)
833 log(LOG_DEBUG,"add_mfc update o %x g %x p %x\n",
834 ntohl(mfccp->mfcc_origin.s_addr),
835 ntohl(mfccp->mfcc_mcastgrp.s_addr),
836 mfccp->mfcc_parent);
837
838 if (rt->mfc_expire)
839 nexpire[hash]--;
840
841 update_mfc(mfccp, rt);
842
843 splx(s);
844 return (0);
845 }
846
847 /*
848 * Find the entry for which the upcall was made and update
849 */
850 nstl = 0;
851 hash = MFCHASH(mfccp->mfcc_origin, mfccp->mfcc_mcastgrp);
852 LIST_FOREACH(rt, &mfchashtbl[hash], mfc_hash) {
853 if (in_hosteq(rt->mfc_origin, mfccp->mfcc_origin) &&
854 in_hosteq(rt->mfc_mcastgrp, mfccp->mfcc_mcastgrp) &&
855 rt->mfc_stall != 0) {
856 if (nstl++)
857 log(LOG_ERR, "add_mfc %s o %x g %x p %x dbx %p\n",
858 "multiple kernel entries",
859 ntohl(mfccp->mfcc_origin.s_addr),
860 ntohl(mfccp->mfcc_mcastgrp.s_addr),
861 mfccp->mfcc_parent, rt->mfc_stall);
862
863 if (mrtdebug & DEBUG_MFC)
864 log(LOG_DEBUG,"add_mfc o %x g %x p %x dbg %p\n",
865 ntohl(mfccp->mfcc_origin.s_addr),
866 ntohl(mfccp->mfcc_mcastgrp.s_addr),
867 mfccp->mfcc_parent, rt->mfc_stall);
868
869 if (rt->mfc_expire)
870 nexpire[hash]--;
871
872 rte = rt->mfc_stall;
873 update_mfc(mfccp, rt);
874
875 /* free packets Qed at the end of this entry */
876 for (; rte != 0; rte = nrte) {
877 nrte = rte->next;
878 #ifdef RSVP_ISI
879 ip_mdq(rte->m, rte->ifp, rt, -1);
880 #else
881 ip_mdq(rte->m, rte->ifp, rt);
882 #endif /* RSVP_ISI */
883 m_freem(rte->m);
884 #ifdef UPCALL_TIMING
885 collate(&rte->t);
886 #endif /* UPCALL_TIMING */
887 free(rte, M_MRTABLE);
888 }
889 }
890 }
891
892 if (nstl == 0) {
893 /*
894 * No mfc; make a new one
895 */
896 if (mrtdebug & DEBUG_MFC)
897 log(LOG_DEBUG,"add_mfc no upcall o %x g %x p %x\n",
898 ntohl(mfccp->mfcc_origin.s_addr),
899 ntohl(mfccp->mfcc_mcastgrp.s_addr),
900 mfccp->mfcc_parent);
901
902 rt = (struct mfc *)malloc(sizeof(*rt), M_MRTABLE, M_NOWAIT);
903 if (rt == 0) {
904 splx(s);
905 return (ENOBUFS);
906 }
907
908 rt->mfc_origin = mfccp->mfcc_origin;
909 rt->mfc_mcastgrp = mfccp->mfcc_mcastgrp;
910 /* initialize pkt counters per src-grp */
911 rt->mfc_pkt_cnt = 0;
912 rt->mfc_byte_cnt = 0;
913 rt->mfc_wrong_if = 0;
914 timerclear(&rt->mfc_last_assert);
915 update_mfc(mfccp, rt);
916
917 /* insert new entry at head of hash chain */
918 LIST_INSERT_HEAD(&mfchashtbl[hash], rt, mfc_hash);
919 }
920
921 splx(s);
922 return (0);
923 }
924
925 #ifdef UPCALL_TIMING
926 /*
927 * collect delay statistics on the upcalls
928 */
929 static void collate(t)
930 struct timeval *t;
931 {
932 u_int32_t d;
933 struct timeval tp;
934 u_int32_t delta;
935
936 microtime(&tp);
937
938 if (timercmp(t, &tp, <)) {
939 TV_DELTA(tp, *t, delta);
940
941 d = delta >> 10;
942 if (d > 50)
943 d = 50;
944
945 ++upcall_data[d];
946 }
947 }
948 #endif /* UPCALL_TIMING */
949
950 /*
951 * Delete an mfc entry
952 */
953 static int
954 del_mfc(m)
955 struct mbuf *m;
956 {
957 struct mfcctl *mfccp;
958 struct mfc *rt;
959 int s;
960
961 if (m == 0 || m->m_len < sizeof(struct mfcctl))
962 return (EINVAL);
963
964 mfccp = mtod(m, struct mfcctl *);
965
966 if (mrtdebug & DEBUG_MFC)
967 log(LOG_DEBUG, "del_mfc origin %x mcastgrp %x\n",
968 ntohl(mfccp->mfcc_origin.s_addr),
969 ntohl(mfccp->mfcc_mcastgrp.s_addr));
970
971 s = splsoftnet();
972
973 MFCFIND(mfccp->mfcc_origin, mfccp->mfcc_mcastgrp, rt);
974 if (rt == 0) {
975 splx(s);
976 return (EADDRNOTAVAIL);
977 }
978
979 LIST_REMOVE(rt, mfc_hash);
980 free(rt, M_MRTABLE);
981
982 splx(s);
983 return (0);
984 }
985
986 static int
987 socket_send(s, mm, src)
988 struct socket *s;
989 struct mbuf *mm;
990 struct sockaddr_in *src;
991 {
992 if (s) {
993 if (sbappendaddr(&s->so_rcv, sintosa(src), mm,
994 (struct mbuf *)0) != 0) {
995 sorwakeup(s);
996 return (0);
997 }
998 }
999 m_freem(mm);
1000 return (-1);
1001 }
1002
1003 /*
1004 * IP multicast forwarding function. This function assumes that the packet
1005 * pointed to by "ip" has arrived on (or is about to be sent to) the interface
1006 * pointed to by "ifp", and the packet is to be relayed to other networks
1007 * that have members of the packet's destination IP multicast group.
1008 *
1009 * The packet is returned unscathed to the caller, unless it is
1010 * erroneous, in which case a non-zero return value tells the caller to
1011 * discard it.
1012 */
1013
1014 #define IP_HDR_LEN 20 /* # bytes of fixed IP header (excluding options) */
1015 #define TUNNEL_LEN 12 /* # bytes of IP option for tunnel encapsulation */
1016
1017 int
1018 #ifdef RSVP_ISI
1019 ip_mforward(m, ifp, imo)
1020 #else
1021 ip_mforward(m, ifp)
1022 #endif /* RSVP_ISI */
1023 struct mbuf *m;
1024 struct ifnet *ifp;
1025 #ifdef RSVP_ISI
1026 struct ip_moptions *imo;
1027 #endif /* RSVP_ISI */
1028 {
1029 struct ip *ip = mtod(m, struct ip *);
1030 struct mfc *rt;
1031 static int srctun = 0;
1032 struct mbuf *mm;
1033 int s;
1034 #ifdef RSVP_ISI
1035 struct vif *vifp;
1036 vifi_t vifi;
1037 #endif /* RSVP_ISI */
1038
1039 /*
1040 * Clear any in-bound checksum flags for this packet.
1041 */
1042 m->m_pkthdr.csum_flags = 0;
1043
1044 if (mrtdebug & DEBUG_FORWARD)
1045 log(LOG_DEBUG, "ip_mforward: src %x, dst %x, ifp %p\n",
1046 ntohl(ip->ip_src.s_addr), ntohl(ip->ip_dst.s_addr), ifp);
1047
1048 if (ip->ip_hl < (IP_HDR_LEN + TUNNEL_LEN) >> 2 ||
1049 ((u_char *)(ip + 1))[1] != IPOPT_LSRR) {
1050 /*
1051 * Packet arrived via a physical interface or
1052 * an encapuslated tunnel.
1053 */
1054 } else {
1055 /*
1056 * Packet arrived through a source-route tunnel.
1057 * Source-route tunnels are no longer supported.
1058 */
1059 if ((srctun++ % 1000) == 0)
1060 log(LOG_ERR,
1061 "ip_mforward: received source-routed packet from %x\n",
1062 ntohl(ip->ip_src.s_addr));
1063
1064 return (1);
1065 }
1066
1067 #ifdef RSVP_ISI
1068 if (imo && ((vifi = imo->imo_multicast_vif) < numvifs)) {
1069 if (ip->ip_ttl < 255)
1070 ip->ip_ttl++; /* compensate for -1 in *_send routines */
1071 if (rsvpdebug && ip->ip_p == IPPROTO_RSVP) {
1072 vifp = viftable + vifi;
1073 printf("Sending IPPROTO_RSVP from %x to %x on vif %d (%s%s)\n",
1074 ntohl(ip->ip_src), ntohl(ip->ip_dst), vifi,
1075 (vifp->v_flags & VIFF_TUNNEL) ? "tunnel on " : "",
1076 vifp->v_ifp->if_xname);
1077 }
1078 return (ip_mdq(m, ifp, (struct mfc *)0, vifi));
1079 }
1080 if (rsvpdebug && ip->ip_p == IPPROTO_RSVP) {
1081 printf("Warning: IPPROTO_RSVP from %x to %x without vif option\n",
1082 ntohl(ip->ip_src), ntohl(ip->ip_dst));
1083 }
1084 #endif /* RSVP_ISI */
1085
1086 /*
1087 * Don't forward a packet with time-to-live of zero or one,
1088 * or a packet destined to a local-only group.
1089 */
1090 if (ip->ip_ttl <= 1 || IN_LOCAL_GROUP(ip->ip_dst.s_addr))
1091 return (0);
1092
1093 /*
1094 * Determine forwarding vifs from the forwarding cache table
1095 */
1096 s = splsoftnet();
1097 MFCFIND(ip->ip_src, ip->ip_dst, rt);
1098
1099 /* Entry exists, so forward if necessary */
1100 if (rt != 0) {
1101 splx(s);
1102 #ifdef RSVP_ISI
1103 return (ip_mdq(m, ifp, rt, -1));
1104 #else
1105 return (ip_mdq(m, ifp, rt));
1106 #endif /* RSVP_ISI */
1107 } else {
1108 /*
1109 * If we don't have a route for packet's origin,
1110 * Make a copy of the packet &
1111 * send message to routing daemon
1112 */
1113
1114 struct mbuf *mb0;
1115 struct rtdetq *rte;
1116 u_int32_t hash;
1117 int hlen = ip->ip_hl << 2;
1118 #ifdef UPCALL_TIMING
1119 struct timeval tp;
1120
1121 microtime(&tp);
1122 #endif /* UPCALL_TIMING */
1123
1124 mrtstat.mrts_no_route++;
1125 if (mrtdebug & (DEBUG_FORWARD | DEBUG_MFC))
1126 log(LOG_DEBUG, "ip_mforward: no rte s %x g %x\n",
1127 ntohl(ip->ip_src.s_addr),
1128 ntohl(ip->ip_dst.s_addr));
1129
1130 /*
1131 * Allocate mbufs early so that we don't do extra work if we are
1132 * just going to fail anyway. Make sure to pullup the header so
1133 * that other people can't step on it.
1134 */
1135 rte = (struct rtdetq *)malloc(sizeof(*rte), M_MRTABLE,
1136 M_NOWAIT);
1137 if (rte == 0) {
1138 splx(s);
1139 return (ENOBUFS);
1140 }
1141 mb0 = m_copy(m, 0, M_COPYALL);
1142 M_PULLUP(mb0, hlen);
1143 if (mb0 == 0) {
1144 free(rte, M_MRTABLE);
1145 splx(s);
1146 return (ENOBUFS);
1147 }
1148
1149 /* is there an upcall waiting for this packet? */
1150 hash = MFCHASH(ip->ip_src, ip->ip_dst);
1151 LIST_FOREACH(rt, &mfchashtbl[hash], mfc_hash) {
1152 if (in_hosteq(ip->ip_src, rt->mfc_origin) &&
1153 in_hosteq(ip->ip_dst, rt->mfc_mcastgrp) &&
1154 rt->mfc_stall != 0)
1155 break;
1156 }
1157
1158 if (rt == 0) {
1159 int i;
1160 struct igmpmsg *im;
1161
1162 /* no upcall, so make a new entry */
1163 rt = (struct mfc *)malloc(sizeof(*rt), M_MRTABLE,
1164 M_NOWAIT);
1165 if (rt == 0) {
1166 free(rte, M_MRTABLE);
1167 m_freem(mb0);
1168 splx(s);
1169 return (ENOBUFS);
1170 }
1171 /*
1172 * Make a copy of the header to send to the user level
1173 * process
1174 */
1175 mm = m_copy(m, 0, hlen);
1176 M_PULLUP(mm, hlen);
1177 if (mm == 0) {
1178 free(rte, M_MRTABLE);
1179 m_freem(mb0);
1180 free(rt, M_MRTABLE);
1181 splx(s);
1182 return (ENOBUFS);
1183 }
1184
1185 /*
1186 * Send message to routing daemon to install
1187 * a route into the kernel table
1188 */
1189 sin.sin_addr = ip->ip_src;
1190
1191 im = mtod(mm, struct igmpmsg *);
1192 im->im_msgtype = IGMPMSG_NOCACHE;
1193 im->im_mbz = 0;
1194
1195 mrtstat.mrts_upcalls++;
1196
1197 if (socket_send(ip_mrouter, mm, &sin) < 0) {
1198 log(LOG_WARNING,
1199 "ip_mforward: ip_mrouter socket queue full\n");
1200 ++mrtstat.mrts_upq_sockfull;
1201 free(rte, M_MRTABLE);
1202 m_freem(mb0);
1203 free(rt, M_MRTABLE);
1204 splx(s);
1205 return (ENOBUFS);
1206 }
1207
1208 /* insert new entry at head of hash chain */
1209 rt->mfc_origin = ip->ip_src;
1210 rt->mfc_mcastgrp = ip->ip_dst;
1211 rt->mfc_pkt_cnt = 0;
1212 rt->mfc_byte_cnt = 0;
1213 rt->mfc_wrong_if = 0;
1214 rt->mfc_expire = UPCALL_EXPIRE;
1215 nexpire[hash]++;
1216 for (i = 0; i < numvifs; i++)
1217 rt->mfc_ttls[i] = 0;
1218 rt->mfc_parent = -1;
1219
1220 /* link into table */
1221 LIST_INSERT_HEAD(&mfchashtbl[hash], rt, mfc_hash);
1222 /* Add this entry to the end of the queue */
1223 rt->mfc_stall = rte;
1224 } else {
1225 /* determine if q has overflowed */
1226 struct rtdetq **p;
1227 int npkts = 0;
1228
1229 for (p = &rt->mfc_stall; *p != 0; p = &(*p)->next)
1230 if (++npkts > MAX_UPQ) {
1231 mrtstat.mrts_upq_ovflw++;
1232 free(rte, M_MRTABLE);
1233 m_freem(mb0);
1234 splx(s);
1235 return (0);
1236 }
1237
1238 /* Add this entry to the end of the queue */
1239 *p = rte;
1240 }
1241
1242 rte->next = 0;
1243 rte->m = mb0;
1244 rte->ifp = ifp;
1245 #ifdef UPCALL_TIMING
1246 rte->t = tp;
1247 #endif /* UPCALL_TIMING */
1248
1249 splx(s);
1250
1251 return (0);
1252 }
1253 }
1254
1255
1256 /*ARGSUSED*/
1257 static void
1258 expire_upcalls(v)
1259 void *v;
1260 {
1261 int i;
1262 int s;
1263
1264 s = splsoftnet();
1265
1266 for (i = 0; i < MFCTBLSIZ; i++) {
1267 struct mfc *rt, *nrt;
1268
1269 if (nexpire[i] == 0)
1270 continue;
1271
1272 for (rt = LIST_FIRST(&mfchashtbl[i]); rt; rt = nrt) {
1273 nrt = LIST_NEXT(rt, mfc_hash);
1274
1275 if (rt->mfc_expire == 0 || --rt->mfc_expire > 0)
1276 continue;
1277 nexpire[i]--;
1278
1279 ++mrtstat.mrts_cache_cleanups;
1280 if (mrtdebug & DEBUG_EXPIRE)
1281 log(LOG_DEBUG,
1282 "expire_upcalls: expiring (%x %x)\n",
1283 ntohl(rt->mfc_origin.s_addr),
1284 ntohl(rt->mfc_mcastgrp.s_addr));
1285
1286 expire_mfc(rt);
1287 }
1288 }
1289
1290 splx(s);
1291 callout_reset(&expire_upcalls_ch, EXPIRE_TIMEOUT,
1292 expire_upcalls, NULL);
1293 }
1294
1295 /*
1296 * Packet forwarding routine once entry in the cache is made
1297 */
1298 static int
1299 #ifdef RSVP_ISI
1300 ip_mdq(m, ifp, rt, xmt_vif)
1301 #else
1302 ip_mdq(m, ifp, rt)
1303 #endif /* RSVP_ISI */
1304 struct mbuf *m;
1305 struct ifnet *ifp;
1306 struct mfc *rt;
1307 #ifdef RSVP_ISI
1308 vifi_t xmt_vif;
1309 #endif /* RSVP_ISI */
1310 {
1311 struct ip *ip = mtod(m, struct ip *);
1312 vifi_t vifi;
1313 struct vif *vifp;
1314 int plen = ntohs(ip->ip_len);
1315
1316 /*
1317 * Macro to send packet on vif. Since RSVP packets don't get counted on
1318 * input, they shouldn't get counted on output, so statistics keeping is
1319 * separate.
1320 */
1321 #define MC_SEND(ip,vifp,m) do { \
1322 if ((vifp)->v_flags & VIFF_TUNNEL) \
1323 encap_send((ip), (vifp), (m)); \
1324 else \
1325 phyint_send((ip), (vifp), (m)); \
1326 } while (/*CONSTCOND*/ 0)
1327
1328 #ifdef RSVP_ISI
1329 /*
1330 * If xmt_vif is not -1, send on only the requested vif.
1331 *
1332 * (since vifi_t is u_short, -1 becomes MAXUSHORT, which > numvifs.
1333 */
1334 if (xmt_vif < numvifs) {
1335 MC_SEND(ip, viftable + xmt_vif, m);
1336 return (1);
1337 }
1338 #endif /* RSVP_ISI */
1339
1340 /*
1341 * Don't forward if it didn't arrive from the parent vif for its origin.
1342 */
1343 vifi = rt->mfc_parent;
1344 if ((vifi >= numvifs) || (viftable[vifi].v_ifp != ifp)) {
1345 /* came in the wrong interface */
1346 if (mrtdebug & DEBUG_FORWARD)
1347 log(LOG_DEBUG, "wrong if: ifp %p vifi %d vififp %p\n",
1348 ifp, vifi, viftable[vifi].v_ifp);
1349 ++mrtstat.mrts_wrong_if;
1350 ++rt->mfc_wrong_if;
1351 /*
1352 * If we are doing PIM assert processing, and we are forwarding
1353 * packets on this interface, and it is a broadcast medium
1354 * interface (and not a tunnel), send a message to the routing
1355 * daemon.
1356 */
1357 if (pim_assert && rt->mfc_ttls[vifi] &&
1358 (ifp->if_flags & IFF_BROADCAST) &&
1359 !(viftable[vifi].v_flags & VIFF_TUNNEL)) {
1360 struct mbuf *mm;
1361 struct igmpmsg *im;
1362 int hlen = ip->ip_hl << 2;
1363 struct timeval now;
1364 u_int32_t delta;
1365
1366 microtime(&now);
1367
1368 TV_DELTA(rt->mfc_last_assert, now, delta);
1369
1370 if (delta > ASSERT_MSG_TIME) {
1371 mm = m_copy(m, 0, hlen);
1372 M_PULLUP(mm, hlen);
1373 if (mm == 0) {
1374 return (ENOBUFS);
1375 }
1376
1377 rt->mfc_last_assert = now;
1378
1379 im = mtod(mm, struct igmpmsg *);
1380 im->im_msgtype = IGMPMSG_WRONGVIF;
1381 im->im_mbz = 0;
1382 im->im_vif = vifi;
1383
1384 sin.sin_addr = im->im_src;
1385
1386 socket_send(ip_mrouter, mm, &sin);
1387 }
1388 }
1389 return (0);
1390 }
1391
1392 /* If I sourced this packet, it counts as output, else it was input. */
1393 if (in_hosteq(ip->ip_src, viftable[vifi].v_lcl_addr)) {
1394 viftable[vifi].v_pkt_out++;
1395 viftable[vifi].v_bytes_out += plen;
1396 } else {
1397 viftable[vifi].v_pkt_in++;
1398 viftable[vifi].v_bytes_in += plen;
1399 }
1400 rt->mfc_pkt_cnt++;
1401 rt->mfc_byte_cnt += plen;
1402
1403 /*
1404 * For each vif, decide if a copy of the packet should be forwarded.
1405 * Forward if:
1406 * - the ttl exceeds the vif's threshold
1407 * - there are group members downstream on interface
1408 */
1409 for (vifp = viftable, vifi = 0; vifi < numvifs; vifp++, vifi++)
1410 if ((rt->mfc_ttls[vifi] > 0) &&
1411 (ip->ip_ttl > rt->mfc_ttls[vifi])) {
1412 vifp->v_pkt_out++;
1413 vifp->v_bytes_out += plen;
1414 MC_SEND(ip, vifp, m);
1415 }
1416
1417 return (0);
1418 }
1419
1420 #ifdef RSVP_ISI
1421 /*
1422 * check if a vif number is legal/ok. This is used by ip_output, to export
1423 * numvifs there,
1424 */
1425 int
1426 legal_vif_num(vif)
1427 int vif;
1428 {
1429 if (vif >= 0 && vif < numvifs)
1430 return (1);
1431 else
1432 return (0);
1433 }
1434 #endif /* RSVP_ISI */
1435
1436 static void
1437 phyint_send(ip, vifp, m)
1438 struct ip *ip;
1439 struct vif *vifp;
1440 struct mbuf *m;
1441 {
1442 struct mbuf *mb_copy;
1443 int hlen = ip->ip_hl << 2;
1444
1445 /*
1446 * Make a new reference to the packet; make sure that
1447 * the IP header is actually copied, not just referenced,
1448 * so that ip_output() only scribbles on the copy.
1449 */
1450 mb_copy = m_copy(m, 0, M_COPYALL);
1451 M_PULLUP(mb_copy, hlen);
1452 if (mb_copy == 0)
1453 return;
1454
1455 if (vifp->v_rate_limit <= 0)
1456 tbf_send_packet(vifp, mb_copy);
1457 else
1458 tbf_control(vifp, mb_copy, mtod(mb_copy, struct ip *),
1459 ntohs(ip->ip_len));
1460 }
1461
1462 static void
1463 encap_send(ip, vifp, m)
1464 struct ip *ip;
1465 struct vif *vifp;
1466 struct mbuf *m;
1467 {
1468 struct mbuf *mb_copy;
1469 struct ip *ip_copy;
1470 int i, len = ntohs(ip->ip_len) + sizeof(multicast_encap_iphdr);
1471
1472 /*
1473 * copy the old packet & pullup it's IP header into the
1474 * new mbuf so we can modify it. Try to fill the new
1475 * mbuf since if we don't the ethernet driver will.
1476 */
1477 MGETHDR(mb_copy, M_DONTWAIT, MT_DATA);
1478 if (mb_copy == 0)
1479 return;
1480 mb_copy->m_data += max_linkhdr;
1481 mb_copy->m_pkthdr.len = len;
1482 mb_copy->m_len = sizeof(multicast_encap_iphdr);
1483
1484 if ((mb_copy->m_next = m_copy(m, 0, M_COPYALL)) == 0) {
1485 m_freem(mb_copy);
1486 return;
1487 }
1488 i = MHLEN - max_linkhdr;
1489 if (i > len)
1490 i = len;
1491 mb_copy = m_pullup(mb_copy, i);
1492 if (mb_copy == 0)
1493 return;
1494
1495 /*
1496 * fill in the encapsulating IP header.
1497 */
1498 ip_copy = mtod(mb_copy, struct ip *);
1499 *ip_copy = multicast_encap_iphdr;
1500 ip_copy->ip_id = htons(ip_id++);
1501 ip_copy->ip_len = htons(len);
1502 ip_copy->ip_src = vifp->v_lcl_addr;
1503 ip_copy->ip_dst = vifp->v_rmt_addr;
1504
1505 /*
1506 * turn the encapsulated IP header back into a valid one.
1507 */
1508 ip = (struct ip *)((caddr_t)ip_copy + sizeof(multicast_encap_iphdr));
1509 --ip->ip_ttl;
1510 ip->ip_sum = 0;
1511 mb_copy->m_data += sizeof(multicast_encap_iphdr);
1512 ip->ip_sum = in_cksum(mb_copy, ip->ip_hl << 2);
1513 mb_copy->m_data -= sizeof(multicast_encap_iphdr);
1514
1515 if (vifp->v_rate_limit <= 0)
1516 tbf_send_packet(vifp, mb_copy);
1517 else
1518 tbf_control(vifp, mb_copy, ip, ntohs(ip_copy->ip_len));
1519 }
1520
1521 /*
1522 * De-encapsulate a packet and feed it back through ip input.
1523 */
1524 static void
1525 #if __STDC__
1526 vif_input(struct mbuf *m, ...)
1527 #else
1528 vif_input(m, va_alist)
1529 struct mbuf *m;
1530 va_dcl
1531 #endif
1532 {
1533 int off, proto;
1534 va_list ap;
1535 struct vif *vifp;
1536 int s;
1537 struct ifqueue *ifq;
1538
1539 va_start(ap, m);
1540 off = va_arg(ap, int);
1541 proto = va_arg(ap, int);
1542 va_end(ap);
1543
1544 vifp = (struct vif *)encap_getarg(m);
1545 if (!vifp || proto != AF_INET) {
1546 m_freem(m);
1547 mrtstat.mrts_bad_tunnel++;
1548 return;
1549 }
1550
1551 m_adj(m, off);
1552 m->m_pkthdr.rcvif = vifp->v_ifp;
1553 ifq = &ipintrq;
1554 s = splnet();
1555 if (IF_QFULL(ifq)) {
1556 IF_DROP(ifq);
1557 m_freem(m);
1558 } else {
1559 IF_ENQUEUE(ifq, m);
1560 /*
1561 * normally we would need a "schednetisr(NETISR_IP)"
1562 * here but we were called by ip_input and it is going
1563 * to loop back & try to dequeue the packet we just
1564 * queued as soon as we return so we avoid the
1565 * unnecessary software interrrupt.
1566 */
1567 }
1568 splx(s);
1569 }
1570
1571 /*
1572 * Check if the packet should be grabbed by us.
1573 */
1574 static int
1575 vif_encapcheck(m, off, proto, arg)
1576 const struct mbuf *m;
1577 int off;
1578 int proto;
1579 void *arg;
1580 {
1581 struct vif *vifp;
1582 struct ip ip;
1583
1584 #ifdef DIAGNOSTIC
1585 if (!arg || proto != IPPROTO_IPV4)
1586 panic("unexpected arg in vif_encapcheck");
1587 #endif
1588
1589 /*
1590 * do not grab the packet if it's not to a multicast destination or if
1591 * we don't have an encapsulating tunnel with the source.
1592 * Note: This code assumes that the remote site IP address
1593 * uniquely identifies the tunnel (i.e., that this site has
1594 * at most one tunnel with the remote site).
1595 */
1596
1597 /* LINTED const cast */
1598 m_copydata((struct mbuf *)m, off, sizeof(ip), (caddr_t)&ip);
1599 if (!IN_MULTICAST(ip.ip_dst.s_addr))
1600 return 0;
1601
1602 /* LINTED const cast */
1603 m_copydata((struct mbuf *)m, 0, sizeof(ip), (caddr_t)&ip);
1604 if (!in_hosteq(ip.ip_src, last_encap_src)) {
1605 vifp = (struct vif *)arg;
1606 if (vifp->v_flags & VIFF_TUNNEL &&
1607 in_hosteq(vifp->v_rmt_addr, ip.ip_src))
1608 ;
1609 else
1610 return 0;
1611 last_encap_vif = vifp;
1612 last_encap_src = ip.ip_src;
1613 } else
1614 vifp = last_encap_vif;
1615
1616 /* 32bit match, since we have checked ip_src only */
1617 return 32;
1618 }
1619
1620 /*
1621 * Token bucket filter module
1622 */
1623 static void
1624 tbf_control(vifp, m, ip, len)
1625 struct vif *vifp;
1626 struct mbuf *m;
1627 struct ip *ip;
1628 u_int32_t len;
1629 {
1630
1631 if (len > MAX_BKT_SIZE) {
1632 /* drop if packet is too large */
1633 mrtstat.mrts_pkt2large++;
1634 m_freem(m);
1635 return;
1636 }
1637
1638 tbf_update_tokens(vifp);
1639
1640 /*
1641 * If there are enough tokens, and the queue is empty, send this packet
1642 * out immediately. Otherwise, try to insert it on this vif's queue.
1643 */
1644 if (vifp->tbf_q_len == 0) {
1645 if (len <= vifp->tbf_n_tok) {
1646 vifp->tbf_n_tok -= len;
1647 tbf_send_packet(vifp, m);
1648 } else {
1649 /* queue packet and timeout till later */
1650 tbf_queue(vifp, m);
1651 callout_reset(&vifp->v_repq_ch, TBF_REPROCESS,
1652 tbf_reprocess_q, vifp);
1653 }
1654 } else {
1655 if (vifp->tbf_q_len >= vifp->tbf_max_q_len &&
1656 !tbf_dq_sel(vifp, ip)) {
1657 /* queue length too much, and couldn't make room */
1658 mrtstat.mrts_q_overflow++;
1659 m_freem(m);
1660 } else {
1661 /* queue length low enough, or made room */
1662 tbf_queue(vifp, m);
1663 tbf_process_q(vifp);
1664 }
1665 }
1666 }
1667
1668 /*
1669 * adds a packet to the queue at the interface
1670 */
1671 static void
1672 tbf_queue(vifp, m)
1673 struct vif *vifp;
1674 struct mbuf *m;
1675 {
1676 int s = splsoftnet();
1677
1678 /* insert at tail */
1679 *vifp->tbf_t = m;
1680 vifp->tbf_t = &m->m_nextpkt;
1681 vifp->tbf_q_len++;
1682
1683 splx(s);
1684 }
1685
1686
1687 /*
1688 * processes the queue at the interface
1689 */
1690 static void
1691 tbf_process_q(vifp)
1692 struct vif *vifp;
1693 {
1694 struct mbuf *m;
1695 int len;
1696 int s = splsoftnet();
1697
1698 /*
1699 * Loop through the queue at the interface and send as many packets
1700 * as possible.
1701 */
1702 for (m = vifp->tbf_q; m != 0; m = vifp->tbf_q) {
1703 len = ntohs(mtod(m, struct ip *)->ip_len);
1704
1705 /* determine if the packet can be sent */
1706 if (len <= vifp->tbf_n_tok) {
1707 /* if so,
1708 * reduce no of tokens, dequeue the packet,
1709 * send the packet.
1710 */
1711 if ((vifp->tbf_q = m->m_nextpkt) == 0)
1712 vifp->tbf_t = &vifp->tbf_q;
1713 --vifp->tbf_q_len;
1714
1715 m->m_nextpkt = 0;
1716 vifp->tbf_n_tok -= len;
1717 tbf_send_packet(vifp, m);
1718 } else
1719 break;
1720 }
1721 splx(s);
1722 }
1723
1724 static void
1725 tbf_reprocess_q(arg)
1726 void *arg;
1727 {
1728 struct vif *vifp = arg;
1729
1730 if (ip_mrouter == 0)
1731 return;
1732
1733 tbf_update_tokens(vifp);
1734 tbf_process_q(vifp);
1735
1736 if (vifp->tbf_q_len != 0)
1737 callout_reset(&vifp->v_repq_ch, TBF_REPROCESS,
1738 tbf_reprocess_q, vifp);
1739 }
1740
1741 /* function that will selectively discard a member of the queue
1742 * based on the precedence value and the priority
1743 */
1744 static int
1745 tbf_dq_sel(vifp, ip)
1746 struct vif *vifp;
1747 struct ip *ip;
1748 {
1749 u_int p;
1750 struct mbuf **mp, *m;
1751 int s = splsoftnet();
1752
1753 p = priority(vifp, ip);
1754
1755 for (mp = &vifp->tbf_q, m = *mp;
1756 m != 0;
1757 mp = &m->m_nextpkt, m = *mp) {
1758 if (p > priority(vifp, mtod(m, struct ip *))) {
1759 if ((*mp = m->m_nextpkt) == 0)
1760 vifp->tbf_t = mp;
1761 --vifp->tbf_q_len;
1762
1763 m_freem(m);
1764 mrtstat.mrts_drop_sel++;
1765 splx(s);
1766 return (1);
1767 }
1768 }
1769 splx(s);
1770 return (0);
1771 }
1772
1773 static void
1774 tbf_send_packet(vifp, m)
1775 struct vif *vifp;
1776 struct mbuf *m;
1777 {
1778 int error;
1779 int s = splsoftnet();
1780
1781 if (vifp->v_flags & VIFF_TUNNEL) {
1782 /* If tunnel options */
1783 #ifdef IPSEC
1784 /* Don't lookup socket in forwading case */
1785 (void)ipsec_setsocket(m, NULL);
1786 #endif
1787 ip_output(m, (struct mbuf *)0, &vifp->v_route,
1788 IP_FORWARDING, (struct ip_moptions *)0);
1789 } else {
1790 /* if physical interface option, extract the options and then send */
1791 struct ip_moptions imo;
1792
1793 imo.imo_multicast_ifp = vifp->v_ifp;
1794 imo.imo_multicast_ttl = mtod(m, struct ip *)->ip_ttl - 1;
1795 imo.imo_multicast_loop = 1;
1796 #ifdef RSVP_ISI
1797 imo.imo_multicast_vif = -1;
1798 #endif
1799
1800 #ifdef IPSEC
1801 /* Don't lookup socket in forwading case */
1802 (void)ipsec_setsocket(m, NULL);
1803 #endif
1804 error = ip_output(m, (struct mbuf *)0, (struct route *)0,
1805 IP_FORWARDING|IP_MULTICASTOPTS, &imo);
1806
1807 if (mrtdebug & DEBUG_XMIT)
1808 log(LOG_DEBUG, "phyint_send on vif %ld err %d\n",
1809 (long)(vifp - viftable), error);
1810 }
1811 splx(s);
1812 }
1813
1814 /* determine the current time and then
1815 * the elapsed time (between the last time and time now)
1816 * in milliseconds & update the no. of tokens in the bucket
1817 */
1818 static void
1819 tbf_update_tokens(vifp)
1820 struct vif *vifp;
1821 {
1822 struct timeval tp;
1823 u_int32_t tm;
1824 int s = splsoftnet();
1825
1826 microtime(&tp);
1827
1828 TV_DELTA(tp, vifp->tbf_last_pkt_t, tm);
1829
1830 /*
1831 * This formula is actually
1832 * "time in seconds" * "bytes/second".
1833 *
1834 * (tm / 1000000) * (v_rate_limit * 1000 * (1000/1024) / 8)
1835 *
1836 * The (1000/1024) was introduced in add_vif to optimize
1837 * this divide into a shift.
1838 */
1839 vifp->tbf_n_tok += tm * vifp->v_rate_limit / 8192;
1840 vifp->tbf_last_pkt_t = tp;
1841
1842 if (vifp->tbf_n_tok > MAX_BKT_SIZE)
1843 vifp->tbf_n_tok = MAX_BKT_SIZE;
1844
1845 splx(s);
1846 }
1847
1848 static int
1849 priority(vifp, ip)
1850 struct vif *vifp;
1851 struct ip *ip;
1852 {
1853 int prio;
1854
1855 /* temporary hack; may add general packet classifier some day */
1856
1857 /*
1858 * The UDP port space is divided up into four priority ranges:
1859 * [0, 16384) : unclassified - lowest priority
1860 * [16384, 32768) : audio - highest priority
1861 * [32768, 49152) : whiteboard - medium priority
1862 * [49152, 65536) : video - low priority
1863 */
1864 if (ip->ip_p == IPPROTO_UDP) {
1865 struct udphdr *udp = (struct udphdr *)(((char *)ip) + (ip->ip_hl << 2));
1866
1867 switch (ntohs(udp->uh_dport) & 0xc000) {
1868 case 0x4000:
1869 prio = 70;
1870 break;
1871 case 0x8000:
1872 prio = 60;
1873 break;
1874 case 0xc000:
1875 prio = 55;
1876 break;
1877 default:
1878 prio = 50;
1879 break;
1880 }
1881
1882 if (tbfdebug > 1)
1883 log(LOG_DEBUG, "port %x prio %d\n",
1884 ntohs(udp->uh_dport), prio);
1885 } else
1886 prio = 50;
1887
1888 return (prio);
1889 }
1890
1891 /*
1892 * End of token bucket filter modifications
1893 */
1894 #ifdef RSVP_ISI
1895 int
1896 ip_rsvp_vif_init(so, m)
1897 struct socket *so;
1898 struct mbuf *m;
1899 {
1900 int i;
1901 int s;
1902
1903 if (rsvpdebug)
1904 printf("ip_rsvp_vif_init: so_type = %d, pr_protocol = %d\n",
1905 so->so_type, so->so_proto->pr_protocol);
1906
1907 if (so->so_type != SOCK_RAW ||
1908 so->so_proto->pr_protocol != IPPROTO_RSVP)
1909 return (EOPNOTSUPP);
1910
1911 /* Check mbuf. */
1912 if (m == 0 || m->m_len != sizeof(int)) {
1913 return (EINVAL);
1914 }
1915 i = *(mtod(m, int *));
1916
1917 if (rsvpdebug)
1918 printf("ip_rsvp_vif_init: vif = %d rsvp_on = %d\n",i,rsvp_on);
1919
1920 s = splsoftnet();
1921
1922 /* Check vif. */
1923 if (!legal_vif_num(i)) {
1924 splx(s);
1925 return (EADDRNOTAVAIL);
1926 }
1927
1928 /* Check if socket is available. */
1929 if (viftable[i].v_rsvpd != 0) {
1930 splx(s);
1931 return (EADDRINUSE);
1932 }
1933
1934 viftable[i].v_rsvpd = so;
1935 /*
1936 * This may seem silly, but we need to be sure we don't over-increment
1937 * the RSVP counter, in case something slips up.
1938 */
1939 if (!viftable[i].v_rsvp_on) {
1940 viftable[i].v_rsvp_on = 1;
1941 rsvp_on++;
1942 }
1943
1944 splx(s);
1945 return (0);
1946 }
1947
1948 int
1949 ip_rsvp_vif_done(so, m)
1950 struct socket *so;
1951 struct mbuf *m;
1952 {
1953 int i;
1954 int s;
1955
1956 if (rsvpdebug)
1957 printf("ip_rsvp_vif_done: so_type = %d, pr_protocol = %d\n",
1958 so->so_type, so->so_proto->pr_protocol);
1959
1960 if (so->so_type != SOCK_RAW ||
1961 so->so_proto->pr_protocol != IPPROTO_RSVP)
1962 return (EOPNOTSUPP);
1963
1964 /* Check mbuf. */
1965 if (m == 0 || m->m_len != sizeof(int)) {
1966 return (EINVAL);
1967 }
1968 i = *(mtod(m, int *));
1969
1970 s = splsoftnet();
1971
1972 /* Check vif. */
1973 if (!legal_vif_num(i)) {
1974 splx(s);
1975 return (EADDRNOTAVAIL);
1976 }
1977
1978 if (rsvpdebug)
1979 printf("ip_rsvp_vif_done: v_rsvpd = %x so = %x\n",
1980 viftable[i].v_rsvpd, so);
1981
1982 viftable[i].v_rsvpd = 0;
1983 /*
1984 * This may seem silly, but we need to be sure we don't over-decrement
1985 * the RSVP counter, in case something slips up.
1986 */
1987 if (viftable[i].v_rsvp_on) {
1988 viftable[i].v_rsvp_on = 0;
1989 rsvp_on--;
1990 }
1991
1992 splx(s);
1993 return (0);
1994 }
1995
1996 void
1997 ip_rsvp_force_done(so)
1998 struct socket *so;
1999 {
2000 int vifi;
2001 int s;
2002
2003 /* Don't bother if it is not the right type of socket. */
2004 if (so->so_type != SOCK_RAW ||
2005 so->so_proto->pr_protocol != IPPROTO_RSVP)
2006 return;
2007
2008 s = splsoftnet();
2009
2010 /*
2011 * The socket may be attached to more than one vif...this
2012 * is perfectly legal.
2013 */
2014 for (vifi = 0; vifi < numvifs; vifi++) {
2015 if (viftable[vifi].v_rsvpd == so) {
2016 viftable[vifi].v_rsvpd = 0;
2017 /*
2018 * This may seem silly, but we need to be sure we don't
2019 * over-decrement the RSVP counter, in case something
2020 * slips up.
2021 */
2022 if (viftable[vifi].v_rsvp_on) {
2023 viftable[vifi].v_rsvp_on = 0;
2024 rsvp_on--;
2025 }
2026 }
2027 }
2028
2029 splx(s);
2030 return;
2031 }
2032
2033 void
2034 rsvp_input(m, ifp)
2035 struct mbuf *m;
2036 struct ifnet *ifp;
2037 {
2038 int vifi;
2039 struct ip *ip = mtod(m, struct ip *);
2040 static struct sockaddr_in rsvp_src = { sizeof(sin), AF_INET };
2041 int s;
2042
2043 if (rsvpdebug)
2044 printf("rsvp_input: rsvp_on %d\n",rsvp_on);
2045
2046 /*
2047 * Can still get packets with rsvp_on = 0 if there is a local member
2048 * of the group to which the RSVP packet is addressed. But in this
2049 * case we want to throw the packet away.
2050 */
2051 if (!rsvp_on) {
2052 m_freem(m);
2053 return;
2054 }
2055
2056 /*
2057 * If the old-style non-vif-associated socket is set, then use
2058 * it and ignore the new ones.
2059 */
2060 if (ip_rsvpd != 0) {
2061 if (rsvpdebug)
2062 printf("rsvp_input: "
2063 "Sending packet up old-style socket\n");
2064 rip_input(m); /*XXX*/
2065 return;
2066 }
2067
2068 s = splsoftnet();
2069
2070 if (rsvpdebug)
2071 printf("rsvp_input: check vifs\n");
2072
2073 /* Find which vif the packet arrived on. */
2074 for (vifi = 0; vifi < numvifs; vifi++) {
2075 if (viftable[vifi].v_ifp == ifp)
2076 break;
2077 }
2078
2079 if (vifi == numvifs) {
2080 /* Can't find vif packet arrived on. Drop packet. */
2081 if (rsvpdebug)
2082 printf("rsvp_input: "
2083 "Can't find vif for packet...dropping it.\n");
2084 m_freem(m);
2085 splx(s);
2086 return;
2087 }
2088
2089 if (rsvpdebug)
2090 printf("rsvp_input: check socket\n");
2091
2092 if (viftable[vifi].v_rsvpd == 0) {
2093 /*
2094 * drop packet, since there is no specific socket for this
2095 * interface
2096 */
2097 if (rsvpdebug)
2098 printf("rsvp_input: No socket defined for vif %d\n",
2099 vifi);
2100 m_freem(m);
2101 splx(s);
2102 return;
2103 }
2104
2105 rsvp_src.sin_addr = ip->ip_src;
2106
2107 if (rsvpdebug && m)
2108 printf("rsvp_input: m->m_len = %d, sbspace() = %d\n",
2109 m->m_len,sbspace(&viftable[vifi].v_rsvpd->so_rcv));
2110
2111 if (socket_send(viftable[vifi].v_rsvpd, m, &rsvp_src) < 0)
2112 if (rsvpdebug)
2113 printf("rsvp_input: Failed to append to socket\n");
2114 else
2115 if (rsvpdebug)
2116 printf("rsvp_input: send packet up\n");
2117
2118 splx(s);
2119 }
2120 #endif /* RSVP_ISI */
2121