prune.c revision 1.6.2.1 1 /* $NetBSD: prune.c,v 1.6.2.1 2000/10/19 18:52:10 he Exp $ */
2
3 /*
4 * The mrouted program is covered by the license in the accompanying file
5 * named "LICENSE". Use of the mrouted program represents acceptance of
6 * the terms and conditions listed in that file.
7 *
8 * The mrouted program is COPYRIGHT 1989 by The Board of Trustees of
9 * Leland Stanford Junior University.
10 */
11
12
13 #include "defs.h"
14
15 extern int cache_lifetime;
16 extern int max_prune_lifetime;
17 extern struct rtentry *routing_table;
18
19 extern int phys_vif;
20
21 /*
22 * dither cache lifetime to obtain a value between x and 2*x
23 */
24 #ifdef SYSV
25 #define CACHE_LIFETIME(x) ((x) + (lrand48() % (x)))
26 #else
27 #define CACHE_LIFETIME(x) ((x) + (random() % (x)))
28 #endif
29
30 #define CHK_GS(x, y) { \
31 switch(x) { \
32 case 2: \
33 case 4: \
34 case 8: \
35 case 16: \
36 case 32: \
37 case 64: \
38 case 128: \
39 case 256: y = 1; \
40 break; \
41 default: y = 0; \
42 } \
43 }
44
45 struct gtable *kernel_table; /* ptr to list of kernel grp entries*/
46 static struct gtable *kernel_no_route; /* list of grp entries w/o routes */
47 struct gtable *gtp; /* pointer for kernel rt entries */
48 unsigned int kroutes; /* current number of cache entries */
49
50 /****************************************************************************
51 Functions that are local to prune.c
52 ****************************************************************************/
53 static void prun_add_ttls __P((struct gtable *gt));
54 static int pruning_neighbor __P((vifi_t vifi, u_int32_t addr));
55 static int can_mtrace __P((vifi_t vifi, u_int32_t addr));
56 static struct ptable * find_prune_entry __P((u_int32_t vr, struct ptable *pt));
57 static void expire_prune __P((vifi_t vifi, struct gtable *gt));
58 static void send_prune __P((struct gtable *gt));
59 static void send_graft __P((struct gtable *gt));
60 static void send_graft_ack __P((u_int32_t src, u_int32_t dst,
61 u_int32_t origin, u_int32_t grp));
62 static void update_kernel __P((struct gtable *g));
63 static char * scaletime __P((u_long t));
64
65 /*
66 * Updates the ttl values for each vif.
67 */
68 static void
69 prun_add_ttls(gt)
70 struct gtable *gt;
71 {
72 struct uvif *v;
73 vifi_t vifi;
74
75 for (vifi = 0, v = uvifs; vifi < numvifs; ++vifi, ++v) {
76 if (VIFM_ISSET(vifi, gt->gt_grpmems))
77 gt->gt_ttls[vifi] = v->uv_threshold;
78 else
79 gt->gt_ttls[vifi] = 0;
80 }
81 }
82
83 /*
84 * checks for scoped multicast addresses
85 */
86 #define GET_SCOPE(gt) { \
87 register vifi_t _i; \
88 if ((ntohl((gt)->gt_mcastgrp) & 0xff000000) == 0xef000000) \
89 for (_i = 0; _i < numvifs; _i++) \
90 if (scoped_addr(_i, (gt)->gt_mcastgrp)) \
91 VIFM_SET(_i, (gt)->gt_scope); \
92 }
93
94 int
95 scoped_addr(vifi, addr)
96 vifi_t vifi;
97 u_int32_t addr;
98 {
99 struct vif_acl *acl;
100
101 for (acl = uvifs[vifi].uv_acl; acl; acl = acl->acl_next)
102 if ((addr & acl->acl_mask) == acl->acl_addr)
103 return 1;
104
105 return 0;
106 }
107
108 /*
109 * Determine if mcastgrp has a listener on vifi
110 */
111 int
112 grplst_mem(vifi, mcastgrp)
113 vifi_t vifi;
114 u_int32_t mcastgrp;
115 {
116 register struct listaddr *g;
117 register struct uvif *v;
118
119 v = &uvifs[vifi];
120
121 for (g = v->uv_groups; g != NULL; g = g->al_next)
122 if (mcastgrp == g->al_addr)
123 return 1;
124
125 return 0;
126 }
127
128 /*
129 * Finds the group entry with the specified source and netmask.
130 * If netmask is 0, it uses the route's netmask.
131 *
132 * Returns TRUE if found a match, and the global variable gtp is left
133 * pointing to entry before the found entry.
134 * Returns FALSE if no exact match found, gtp is left pointing to before
135 * the entry in question belongs, or is NULL if the it belongs at the
136 * head of the list.
137 */
138 int
139 find_src_grp(src, mask, grp)
140 u_int32_t src;
141 u_int32_t mask;
142 u_int32_t grp;
143 {
144 struct gtable *gt;
145
146 gtp = NULL;
147 gt = kernel_table;
148 while (gt != NULL) {
149 if (grp == gt->gt_mcastgrp &&
150 (mask ? (gt->gt_route->rt_origin == src &&
151 gt->gt_route->rt_originmask == mask) :
152 ((src & gt->gt_route->rt_originmask) ==
153 gt->gt_route->rt_origin)))
154 return TRUE;
155 if (ntohl(grp) > ntohl(gt->gt_mcastgrp) ||
156 (grp == gt->gt_mcastgrp &&
157 (ntohl(mask) < ntohl(gt->gt_route->rt_originmask) ||
158 (mask == gt->gt_route->rt_originmask &&
159 (ntohl(src) > ntohl(gt->gt_route->rt_origin)))))) {
160 gtp = gt;
161 gt = gt->gt_gnext;
162 }
163 else break;
164 }
165 return FALSE;
166 }
167
168 /*
169 * Check if the neighbor supports pruning
170 */
171 static int
172 pruning_neighbor(vifi, addr)
173 vifi_t vifi;
174 u_int32_t addr;
175 {
176 struct listaddr *n = neighbor_info(vifi, addr);
177 int vers;
178
179 if (n == NULL)
180 return 0;
181
182 if (n->al_flags & NF_PRUNE)
183 return 1;
184
185 /*
186 * Versions from 3.0 to 3.4 relied on the version number to identify
187 * that they could handle pruning.
188 */
189 vers = NBR_VERS(n);
190 return (vers >= 0x0300 && vers <= 0x0304);
191 }
192
193 /*
194 * Can the neighbor in question handle multicast traceroute?
195 */
196 static int
197 can_mtrace(vifi, addr)
198 vifi_t vifi;
199 u_int32_t addr;
200 {
201 struct listaddr *n = neighbor_info(vifi, addr);
202 int vers;
203
204 if (n == NULL)
205 return 0;
206
207 if (n->al_flags & NF_MTRACE)
208 return 1;
209
210 /*
211 * Versions 3.3 and 3.4 relied on the version number to identify
212 * that they could handle traceroute.
213 */
214 vers = NBR_VERS(n);
215 return (vers >= 0x0303 && vers <= 0x0304);
216 }
217
218 /*
219 * Returns the prune entry of the router, or NULL if none exists
220 */
221 static struct ptable *
222 find_prune_entry(vr, pt)
223 u_int32_t vr;
224 struct ptable *pt;
225 {
226 while (pt) {
227 if (pt->pt_router == vr)
228 return pt;
229 pt = pt->pt_next;
230 }
231
232 return NULL;
233 }
234
235 /*
236 * Send a prune message to the dominant router for
237 * this source.
238 *
239 * Record an entry that a prune was sent for this group
240 */
241 static void
242 send_prune(gt)
243 struct gtable *gt;
244 {
245 struct ptable *pt;
246 char *p;
247 int i;
248 int datalen;
249 u_int32_t src;
250 u_int32_t dst;
251 u_int32_t tmp;
252
253 /* Don't process any prunes if router is not pruning */
254 if (pruning == 0)
255 return;
256
257 /* Can't process a prune if we don't have an associated route */
258 if (gt->gt_route == NULL)
259 return;
260
261 /* Don't send a prune to a non-pruning router */
262 if (!pruning_neighbor(gt->gt_route->rt_parent, gt->gt_route->rt_gateway))
263 return;
264
265 /*
266 * sends a prune message to the router upstream.
267 */
268 src = uvifs[gt->gt_route->rt_parent].uv_lcl_addr;
269 dst = gt->gt_route->rt_gateway;
270
271 p = send_buf + MIN_IP_HEADER_LEN + IGMP_MINLEN;
272 datalen = 0;
273
274 /*
275 * determine prune lifetime
276 */
277 gt->gt_prsent_timer = gt->gt_timer;
278 for (pt = gt->gt_pruntbl; pt; pt = pt->pt_next)
279 if (pt->pt_timer < gt->gt_prsent_timer)
280 gt->gt_prsent_timer = pt->pt_timer;
281
282 /*
283 * If we have a graft pending, cancel graft retransmission
284 */
285 gt->gt_grftsnt = 0;
286
287 for (i = 0; i < 4; i++)
288 *p++ = ((char *)&(gt->gt_route->rt_origin))[i];
289 for (i = 0; i < 4; i++)
290 *p++ = ((char *)&(gt->gt_mcastgrp))[i];
291 tmp = htonl(gt->gt_prsent_timer);
292 for (i = 0; i < 4; i++)
293 *p++ = ((char *)&(tmp))[i];
294 datalen += 12;
295
296 send_igmp(src, dst, IGMP_DVMRP, DVMRP_PRUNE,
297 htonl(MROUTED_LEVEL), datalen);
298
299 log(LOG_DEBUG, 0, "sent prune for (%s %s)/%d on vif %d to %s",
300 inet_fmts(gt->gt_route->rt_origin, gt->gt_route->rt_originmask, s1),
301 inet_fmt(gt->gt_mcastgrp, s2),
302 gt->gt_prsent_timer, gt->gt_route->rt_parent,
303 inet_fmt(gt->gt_route->rt_gateway, s3));
304 }
305
306 /*
307 * a prune was sent upstream
308 * so, a graft has to be sent to annul the prune
309 * set up a graft timer so that if an ack is not
310 * heard within that time, another graft request
311 * is sent out.
312 */
313 static void
314 send_graft(gt)
315 struct gtable *gt;
316 {
317 register char *p;
318 register int i;
319 int datalen;
320 u_int32_t src;
321 u_int32_t dst;
322
323 /* Can't send a graft without an associated route */
324 if (gt->gt_route == NULL)
325 return;
326
327 src = uvifs[gt->gt_route->rt_parent].uv_lcl_addr;
328 dst = gt->gt_route->rt_gateway;
329
330 p = send_buf + MIN_IP_HEADER_LEN + IGMP_MINLEN;
331 datalen = 0;
332
333 for (i = 0; i < 4; i++)
334 *p++ = ((char *)&(gt->gt_route->rt_origin))[i];
335 for (i = 0; i < 4; i++)
336 *p++ = ((char *)&(gt->gt_mcastgrp))[i];
337 datalen += 8;
338
339 if (datalen != 0) {
340 send_igmp(src, dst, IGMP_DVMRP, DVMRP_GRAFT,
341 htonl(MROUTED_LEVEL), datalen);
342 }
343 log(LOG_DEBUG, 0, "sent graft for (%s %s) to %s on vif %d",
344 inet_fmts(gt->gt_route->rt_origin, gt->gt_route->rt_originmask, s1),
345 inet_fmt(gt->gt_mcastgrp, s2),
346 inet_fmt(gt->gt_route->rt_gateway, s3), gt->gt_route->rt_parent);
347 }
348
349 /*
350 * Send an ack that a graft was received
351 */
352 static void
353 send_graft_ack(src, dst, origin, grp)
354 u_int32_t src;
355 u_int32_t dst;
356 u_int32_t origin;
357 u_int32_t grp;
358 {
359 register char *p;
360 register int i;
361 int datalen;
362
363 p = send_buf + MIN_IP_HEADER_LEN + IGMP_MINLEN;
364 datalen = 0;
365
366 for (i = 0; i < 4; i++)
367 *p++ = ((char *)&(origin))[i];
368 for (i = 0; i < 4; i++)
369 *p++ = ((char *)&(grp))[i];
370 datalen += 8;
371
372 send_igmp(src, dst, IGMP_DVMRP, DVMRP_GRAFT_ACK,
373 htonl(MROUTED_LEVEL), datalen);
374
375 log(LOG_DEBUG, 0, "sent graft ack for (%s, %s) to %s",
376 inet_fmt(origin, s1), inet_fmt(grp, s2), inet_fmt(dst, s3));
377 }
378
379 /*
380 * Update the kernel cache with all the routes hanging off the group entry
381 */
382 static void
383 update_kernel(g)
384 struct gtable *g;
385 {
386 struct stable *st;
387
388 for (st = g->gt_srctbl; st; st = st->st_next)
389 k_add_rg(st->st_origin, g);
390 }
391
392 /****************************************************************************
393 Functions that are used externally
394 ****************************************************************************/
395
396 #ifdef SNMP
397 #include <sys/types.h>
398 #include "snmp.h"
399
400 /*
401 * Find a specific group entry in the group table
402 */
403 struct gtable *
404 find_grp(grp)
405 u_long grp;
406 {
407 struct gtable *gt;
408
409 for (gt = kernel_table; gt; gt = gt->gt_gnext) {
410 if (ntohl(grp) < ntohl(gt->gt_mcastgrp))
411 break;
412 if (gt->gt_mcastgrp == grp)
413 return gt;
414 }
415 return NULL;
416 }
417
418 /*
419 * Given a group entry and source, find the corresponding source table
420 * entry
421 */
422 struct stable *
423 find_grp_src(gt, src)
424 struct gtable *gt;
425 u_long src;
426 {
427 struct stable *st;
428 u_long grp = gt->gt_mcastgrp;
429 struct gtable *gtcurr;
430
431 for (gtcurr = gt; gtcurr->gt_mcastgrp == grp; gtcurr = gtcurr->gt_gnext) {
432 for (st = gtcurr->gt_srctbl; st; st = st->st_next)
433 if (st->st_origin == src)
434 return st;
435 }
436 return NULL;
437 }
438
439 /*
440 * Find next entry > specification
441 */
442 int
443 next_grp_src_mask(gtpp, stpp, grp, src, mask)
444 struct gtable **gtpp; /* ordered by group */
445 struct stable **stpp; /* ordered by source */
446 u_long grp;
447 u_long src;
448 u_long mask;
449 {
450 struct gtable *gt, *gbest = NULL;
451 struct stable *st, *sbest = NULL;
452
453 /* Find first group entry >= grp spec */
454 (*gtpp) = kernel_table;
455 while ((*gtpp) && ntohl((*gtpp)->gt_mcastgrp) < ntohl(grp))
456 (*gtpp)=(*gtpp)->gt_gnext;
457 if (!(*gtpp))
458 return 0; /* no more groups */
459
460 for (gt = kernel_table; gt; gt=gt->gt_gnext) {
461 /* Since grps are ordered, we can stop when group changes from gbest */
462 if (gbest && gbest->gt_mcastgrp != gt->gt_mcastgrp)
463 break;
464 for (st = gt->gt_srctbl; st; st=st->st_next) {
465
466 /* Among those entries > spec, find "lowest" one */
467 if (((ntohl(gt->gt_mcastgrp)> ntohl(grp))
468 || (ntohl(gt->gt_mcastgrp)==ntohl(grp)
469 && ntohl(st->st_origin)> ntohl(src))
470 || (ntohl(gt->gt_mcastgrp)==ntohl(grp)
471 && ntohl(st->st_origin)==src && 0xFFFFFFFF>ntohl(mask)))
472 && (!gbest
473 || (ntohl(gt->gt_mcastgrp)< ntohl(gbest->gt_mcastgrp))
474 || (ntohl(gt->gt_mcastgrp)==ntohl(gbest->gt_mcastgrp)
475 && ntohl(st->st_origin)< ntohl(sbest->st_origin)))) {
476 gbest = gt;
477 sbest = st;
478 }
479 }
480 }
481 (*gtpp) = gbest;
482 (*stpp) = sbest;
483 return (*gtpp)!=0;
484 }
485
486 /*
487 * Ensure that sg contains current information for the given group,source.
488 * This is fetched from the kernel as a unit so that counts for the entry
489 * are consistent, i.e. packet and byte counts for the same entry are
490 * read at the same time.
491 */
492 void
493 refresh_sg(sg, gt, st)
494 struct sioc_sg_req *sg;
495 struct gtable *gt;
496 struct stable *st;
497 {
498 static int lastq = -1;
499
500 if (quantum != lastq || sg->src.s_addr!=st->st_origin
501 || sg->grp.s_addr!=gt->gt_mcastgrp) {
502 lastq = quantum;
503 sg->src.s_addr = st->st_origin;
504 sg->grp.s_addr = gt->gt_mcastgrp;
505 ioctl(igmp_socket, SIOCGETSGCNT, (char *)sg);
506 }
507 }
508
509 /*
510 * Return pointer to a specific route entry. This must be a separate
511 * function from find_route() which modifies rtp.
512 */
513 struct rtentry *
514 snmp_find_route(src, mask)
515 register u_long src, mask;
516 {
517 register struct rtentry *rt;
518
519 for (rt = routing_table; rt; rt = rt->rt_next) {
520 if (src == rt->rt_origin && mask == rt->rt_originmask)
521 return rt;
522 }
523 return NULL;
524 }
525
526 /*
527 * Find next route entry > specification
528 */
529 int
530 next_route(rtpp, src, mask)
531 struct rtentry **rtpp;
532 u_long src;
533 u_long mask;
534 {
535 struct rtentry *rt, *rbest = NULL;
536
537 /* Among all entries > spec, find "lowest" one in order */
538 for (rt = routing_table; rt; rt=rt->rt_next) {
539 if ((ntohl(rt->rt_origin) > ntohl(src)
540 || (ntohl(rt->rt_origin) == ntohl(src)
541 && ntohl(rt->rt_originmask) > ntohl(mask)))
542 && (!rbest || (ntohl(rt->rt_origin) < ntohl(rbest->rt_origin))
543 || (ntohl(rt->rt_origin) == ntohl(rbest->rt_origin)
544 && ntohl(rt->rt_originmask) < ntohl(rbest->rt_originmask))))
545 rbest = rt;
546 }
547 (*rtpp) = rbest;
548 return (*rtpp)!=0;
549 }
550
551 /*
552 * Given a routing table entry, and a vifi, find the next vifi/entry
553 */
554 int
555 next_route_child(rtpp, src, mask, vifi)
556 struct rtentry **rtpp;
557 u_long src;
558 u_long mask;
559 vifi_t *vifi; /* vif at which to start looking */
560 {
561 struct rtentry *rt;
562
563 /* Get (S,M) entry */
564 if (!((*rtpp) = snmp_find_route(src,mask)))
565 if (!next_route(rtpp, src, mask))
566 return 0;
567
568 /* Continue until we get one with a valid next vif */
569 do {
570 for (; (*rtpp)->rt_children && *vifi<numvifs; (*vifi)++)
571 if (VIFM_ISSET(*vifi, (*rtpp)->rt_children))
572 return 1;
573 *vifi = 0;
574 } while( next_route(rtpp, (*rtpp)->rt_origin, (*rtpp)->rt_originmask) );
575
576 return 0;
577 }
578
579 /*
580 * Given a routing table entry, and a vifi, find the next entry
581 * equal to or greater than those
582 */
583 int
584 next_child(gtpp, stpp, grp, src, mask, vifi)
585 struct gtable **gtpp;
586 struct stable **stpp;
587 u_long grp;
588 u_long src;
589 u_long mask;
590 vifi_t *vifi; /* vif at which to start looking */
591 {
592 struct stable *st;
593
594 /* Get (G,S,M) entry */
595 if (mask!=0xFFFFFFFF
596 || !((*gtpp) = find_grp(grp))
597 || !((*stpp) = find_grp_src((*gtpp),src)))
598 if (!next_grp_src_mask(gtpp, stpp, grp, src, mask))
599 return 0;
600
601 /* Continue until we get one with a valid next vif */
602 do {
603 for (; (*gtpp)->gt_route->rt_children && *vifi<numvifs; (*vifi)++)
604 if (VIFM_ISSET(*vifi, (*gtpp)->gt_route->rt_children))
605 return 1;
606 *vifi = 0;
607 } while (next_grp_src_mask(gtpp, stpp, (*gtpp)->gt_mcastgrp,
608 (*stpp)->st_origin, 0xFFFFFFFF) );
609
610 return 0;
611 }
612 #endif /* SNMP */
613
614 /*
615 * Initialize the kernel table structure
616 */
617 void
618 init_ktable()
619 {
620 kernel_table = NULL;
621 kernel_no_route = NULL;
622 kroutes = 0;
623 }
624
625 /*
626 * Add a new table entry for (origin, mcastgrp)
627 */
628 void
629 add_table_entry(origin, mcastgrp)
630 u_int32_t origin;
631 u_int32_t mcastgrp;
632 {
633 struct rtentry *r;
634 struct gtable *gt,**gtnp,*prev_gt;
635 struct stable *st,**stnp;
636 vifi_t i;
637
638 #ifdef DEBUG_MFC
639 md_log(MD_MISS, origin, mcastgrp);
640 #endif
641
642 r = determine_route(origin);
643 prev_gt = NULL;
644 if (r == NULL) {
645 /*
646 * Look for it on the no_route table; if it is found then
647 * it will be detected as a duplicate below.
648 */
649 for (gt = kernel_no_route; gt; gt = gt->gt_next)
650 if (mcastgrp == gt->gt_mcastgrp &&
651 gt->gt_srctbl && gt->gt_srctbl->st_origin == origin)
652 break;
653 gtnp = &kernel_no_route;
654 } else {
655 gtnp = &r->rt_groups;
656 while ((gt = *gtnp) != NULL) {
657 if (gt->gt_mcastgrp >= mcastgrp)
658 break;
659 gtnp = >->gt_next;
660 prev_gt = gt;
661 }
662 }
663
664 if (gt == NULL || gt->gt_mcastgrp != mcastgrp) {
665 gt = (struct gtable *)malloc(sizeof(struct gtable));
666 if (gt == NULL)
667 log(LOG_ERR, 0, "ran out of memory");
668
669 gt->gt_mcastgrp = mcastgrp;
670 gt->gt_timer = CACHE_LIFETIME(cache_lifetime);
671 time(>->gt_ctime);
672 gt->gt_grpmems = 0;
673 gt->gt_scope = 0;
674 gt->gt_prsent_timer = 0;
675 gt->gt_grftsnt = 0;
676 gt->gt_srctbl = NULL;
677 gt->gt_pruntbl = NULL;
678 gt->gt_route = r;
679 #ifdef RSRR
680 gt->gt_rsrr_cache = NULL;
681 #endif
682
683 if (r != NULL) {
684 /* obtain the multicast group membership list */
685 for (i = 0; i < numvifs; i++) {
686 if (VIFM_ISSET(i, r->rt_children) &&
687 !(VIFM_ISSET(i, r->rt_leaves)))
688 VIFM_SET(i, gt->gt_grpmems);
689
690 if (VIFM_ISSET(i, r->rt_leaves) && grplst_mem(i, mcastgrp))
691 VIFM_SET(i, gt->gt_grpmems);
692 }
693 GET_SCOPE(gt);
694 if (VIFM_ISSET(r->rt_parent, gt->gt_scope))
695 gt->gt_scope = -1;
696 gt->gt_grpmems &= ~gt->gt_scope;
697 } else {
698 gt->gt_scope = -1;
699 gt->gt_grpmems = 0;
700 }
701
702 /* update ttls */
703 prun_add_ttls(gt);
704
705 gt->gt_next = *gtnp;
706 *gtnp = gt;
707 if (gt->gt_next)
708 gt->gt_next->gt_prev = gt;
709 gt->gt_prev = prev_gt;
710
711 if (r) {
712 if (find_src_grp(r->rt_origin, r->rt_originmask, gt->gt_mcastgrp)) {
713 struct gtable *g;
714
715 g = gtp ? gtp->gt_gnext : kernel_table;
716 log(LOG_WARNING, 0, "Entry for (%s %s) (rt:%p) exists (rt:%p)",
717 inet_fmts(r->rt_origin, r->rt_originmask, s1),
718 inet_fmt(g->gt_mcastgrp, s2),
719 r, g->gt_route);
720 } else {
721 if (gtp) {
722 gt->gt_gnext = gtp->gt_gnext;
723 gt->gt_gprev = gtp;
724 gtp->gt_gnext = gt;
725 } else {
726 gt->gt_gnext = kernel_table;
727 gt->gt_gprev = NULL;
728 kernel_table = gt;
729 }
730 if (gt->gt_gnext)
731 gt->gt_gnext->gt_gprev = gt;
732 }
733 } else {
734 gt->gt_gnext = gt->gt_gprev = NULL;
735 }
736 }
737
738 stnp = >->gt_srctbl;
739 while ((st = *stnp) != NULL) {
740 if (ntohl(st->st_origin) >= ntohl(origin))
741 break;
742 stnp = &st->st_next;
743 }
744
745 if (st == NULL || st->st_origin != origin) {
746 st = (struct stable *)malloc(sizeof(struct stable));
747 if (st == NULL)
748 log(LOG_ERR, 0, "ran out of memory");
749
750 st->st_origin = origin;
751 st->st_pktcnt = 0;
752 st->st_next = *stnp;
753 *stnp = st;
754 } else {
755 #ifdef DEBUG_MFC
756 md_log(MD_DUPE, origin, mcastgrp);
757 #endif
758 log(LOG_WARNING, 0, "kernel entry already exists for (%s %s)",
759 inet_fmt(origin, s1), inet_fmt(mcastgrp, s2));
760 /* XXX Doing this should cause no harm, and may ensure
761 * kernel<>mrouted synchronization */
762 k_add_rg(origin, gt);
763 return;
764 }
765
766 kroutes++;
767 k_add_rg(origin, gt);
768
769 log(LOG_DEBUG, 0, "add cache entry (%s %s) gm:%x, parent-vif:%d",
770 inet_fmt(origin, s1),
771 inet_fmt(mcastgrp, s2),
772 gt->gt_grpmems, r ? r->rt_parent : -1);
773
774 /* If there are no leaf vifs
775 * which have this group, then
776 * mark this src-grp as a prune candidate.
777 */
778 if (!gt->gt_prsent_timer && !gt->gt_grpmems && r && r->rt_gateway)
779 send_prune(gt);
780 }
781
782 /*
783 * An mrouter has gone down and come up on an interface
784 * Forward on that interface immediately
785 */
786 void
787 reset_neighbor_state(vifi, addr)
788 vifi_t vifi;
789 u_int32_t addr;
790 {
791 struct rtentry *r;
792 struct gtable *g;
793 struct ptable *pt, **ptnp;
794 struct stable *st;
795
796 for (g = kernel_table; g; g = g->gt_gnext) {
797 r = g->gt_route;
798
799 /*
800 * If neighbor was the parent, remove the prune sent state
801 * and all of the source cache info so that prunes get
802 * regenerated.
803 */
804 if (vifi == r->rt_parent) {
805 if (addr == r->rt_gateway) {
806 log(LOG_DEBUG, 0, "reset_neighbor_state parent reset (%s %s)",
807 inet_fmts(r->rt_origin, r->rt_originmask, s1),
808 inet_fmt(g->gt_mcastgrp, s2));
809
810 g->gt_prsent_timer = 0;
811 g->gt_grftsnt = 0;
812 while ((st = g->gt_srctbl) != NULL) {
813 g->gt_srctbl = st->st_next;
814 k_del_rg(st->st_origin, g);
815 kroutes--;
816 free(st);
817 }
818 }
819 } else {
820 /*
821 * Neighbor was not the parent, send grafts to join the groups
822 */
823 if (g->gt_prsent_timer) {
824 g->gt_grftsnt = 1;
825 send_graft(g);
826 g->gt_prsent_timer = 0;
827 }
828
829 /*
830 * Remove any prunes that this router has sent us.
831 */
832 ptnp = &g->gt_pruntbl;
833 while ((pt = *ptnp) != NULL) {
834 if (pt->pt_vifi == vifi && pt->pt_router == addr) {
835 *ptnp = pt->pt_next;
836 free(pt);
837 } else
838 ptnp = &pt->pt_next;
839 }
840
841 /*
842 * And see if we want to forward again.
843 */
844 if (!VIFM_ISSET(vifi, g->gt_grpmems)) {
845 if (VIFM_ISSET(vifi, r->rt_children) &&
846 !(VIFM_ISSET(vifi, r->rt_leaves)))
847 VIFM_SET(vifi, g->gt_grpmems);
848
849 if (VIFM_ISSET(vifi, r->rt_leaves) &&
850 grplst_mem(vifi, g->gt_mcastgrp))
851 VIFM_SET(vifi, g->gt_grpmems);
852
853 g->gt_grpmems &= ~g->gt_scope;
854 prun_add_ttls(g);
855
856 /* Update kernel state */
857 update_kernel(g);
858 #ifdef RSRR
859 /* Send route change notification to reservation protocol. */
860 rsrr_cache_send(g,1);
861 #endif /* RSRR */
862
863 log(LOG_DEBUG, 0, "reset member state (%s %s) gm:%x",
864 inet_fmts(r->rt_origin, r->rt_originmask, s1),
865 inet_fmt(g->gt_mcastgrp, s2), g->gt_grpmems);
866 }
867 }
868 }
869 }
870
871 /*
872 * Delete table entry from the kernel
873 * del_flag determines how many entries to delete
874 */
875 void
876 del_table_entry(r, mcastgrp, del_flag)
877 struct rtentry *r;
878 u_int32_t mcastgrp;
879 u_int del_flag;
880 {
881 struct gtable *g, *prev_g;
882 struct stable *st, *prev_st;
883 struct ptable *pt, *prev_pt;
884
885 if (del_flag == DEL_ALL_ROUTES) {
886 g = r->rt_groups;
887 while (g) {
888 log(LOG_DEBUG, 0, "del_table_entry deleting (%s %s)",
889 inet_fmts(r->rt_origin, r->rt_originmask, s1),
890 inet_fmt(g->gt_mcastgrp, s2));
891 st = g->gt_srctbl;
892 while (st) {
893 if (k_del_rg(st->st_origin, g) < 0) {
894 log(LOG_WARNING, errno,
895 "del_table_entry trying to delete (%s, %s)",
896 inet_fmt(st->st_origin, s1),
897 inet_fmt(g->gt_mcastgrp, s2));
898 }
899 kroutes--;
900 prev_st = st;
901 st = st->st_next;
902 free(prev_st);
903 }
904 g->gt_srctbl = NULL;
905
906 pt = g->gt_pruntbl;
907 while (pt) {
908 prev_pt = pt;
909 pt = pt->pt_next;
910 free(prev_pt);
911 }
912 g->gt_pruntbl = NULL;
913
914 if (g->gt_gnext)
915 g->gt_gnext->gt_gprev = g->gt_gprev;
916 if (g->gt_gprev)
917 g->gt_gprev->gt_gnext = g->gt_gnext;
918 else
919 kernel_table = g->gt_gnext;
920
921 #ifdef RSRR
922 /* Send route change notification to reservation protocol. */
923 rsrr_cache_send(g,0);
924 rsrr_cache_clean(g);
925 #endif /* RSRR */
926 prev_g = g;
927 g = g->gt_next;
928 free(prev_g);
929 }
930 r->rt_groups = NULL;
931 }
932
933 /*
934 * Dummy routine - someday this may be needed, so it is just there
935 */
936 if (del_flag == DEL_RTE_GROUP) {
937 prev_g = (struct gtable *)&r->rt_groups;
938 for (g = r->rt_groups; g; g = g->gt_next) {
939 if (g->gt_mcastgrp == mcastgrp) {
940 log(LOG_DEBUG, 0, "del_table_entry deleting (%s %s)",
941 inet_fmts(r->rt_origin, r->rt_originmask, s1),
942 inet_fmt(g->gt_mcastgrp, s2));
943 st = g->gt_srctbl;
944 while (st) {
945 if (k_del_rg(st->st_origin, g) < 0) {
946 log(LOG_WARNING, errno,
947 "del_table_entry trying to delete (%s, %s)",
948 inet_fmt(st->st_origin, s1),
949 inet_fmt(g->gt_mcastgrp, s2));
950 }
951 kroutes--;
952 prev_st = st;
953 st = st->st_next;
954 free(prev_st);
955 }
956 g->gt_srctbl = NULL;
957
958 pt = g->gt_pruntbl;
959 while (pt) {
960 prev_pt = pt;
961 pt = pt->pt_next;
962 free(prev_pt);
963 }
964 g->gt_pruntbl = NULL;
965
966 if (g->gt_gnext)
967 g->gt_gnext->gt_gprev = g->gt_gprev;
968 if (g->gt_gprev)
969 g->gt_gprev->gt_gnext = g->gt_gnext;
970 else
971 kernel_table = g->gt_gnext;
972
973 if (prev_g != (struct gtable *)&r->rt_groups)
974 g->gt_next->gt_prev = prev_g;
975 else
976 g->gt_next->gt_prev = NULL;
977 prev_g->gt_next = g->gt_next;
978
979 #ifdef RSRR
980 /* Send route change notification to reservation protocol. */
981 rsrr_cache_send(g,0);
982 rsrr_cache_clean(g);
983 #endif /* RSRR */
984 free(g);
985 g = prev_g;
986 } else {
987 prev_g = g;
988 }
989 }
990 }
991 }
992
993 /*
994 * update kernel table entry when a route entry changes
995 */
996 void
997 update_table_entry(r)
998 struct rtentry *r;
999 {
1000 struct gtable *g;
1001 struct ptable *pt, *prev_pt;
1002 vifi_t i;
1003
1004 for (g = r->rt_groups; g; g = g->gt_next) {
1005 pt = g->gt_pruntbl;
1006 while (pt) {
1007 prev_pt = pt->pt_next;
1008 free(pt);
1009 pt = prev_pt;
1010 }
1011 g->gt_pruntbl = NULL;
1012
1013 g->gt_grpmems = 0;
1014
1015 /* obtain the multicast group membership list */
1016 for (i = 0; i < numvifs; i++) {
1017 if (VIFM_ISSET(i, r->rt_children) &&
1018 !(VIFM_ISSET(i, r->rt_leaves)))
1019 VIFM_SET(i, g->gt_grpmems);
1020
1021 if (VIFM_ISSET(i, r->rt_leaves) && grplst_mem(i, g->gt_mcastgrp))
1022 VIFM_SET(i, g->gt_grpmems);
1023 }
1024 if (VIFM_ISSET(r->rt_parent, g->gt_scope))
1025 g->gt_scope = -1;
1026 g->gt_grpmems &= ~g->gt_scope;
1027
1028 log(LOG_DEBUG, 0, "updating cache entries (%s %s) gm:%x",
1029 inet_fmts(r->rt_origin, r->rt_originmask, s1),
1030 inet_fmt(g->gt_mcastgrp, s2),
1031 g->gt_grpmems);
1032
1033 if (g->gt_grpmems && g->gt_prsent_timer) {
1034 g->gt_grftsnt = 1;
1035 send_graft(g);
1036 g->gt_prsent_timer = 0;
1037 }
1038
1039 /* update ttls and add entry into kernel */
1040 prun_add_ttls(g);
1041 update_kernel(g);
1042 #ifdef RSRR
1043 /* Send route change notification to reservation protocol. */
1044 rsrr_cache_send(g,1);
1045 #endif /* RSRR */
1046
1047 /* Check if we want to prune this group */
1048 if (!g->gt_prsent_timer && g->gt_grpmems == 0 && r->rt_gateway) {
1049 g->gt_timer = CACHE_LIFETIME(cache_lifetime);
1050 send_prune(g);
1051 }
1052 }
1053 }
1054
1055 /*
1056 * set the forwarding flag for all mcastgrps on this vifi
1057 */
1058 void
1059 update_lclgrp(vifi, mcastgrp)
1060 vifi_t vifi;
1061 u_int32_t mcastgrp;
1062 {
1063 struct rtentry *r;
1064 struct gtable *g;
1065
1066 log(LOG_DEBUG, 0, "group %s joined on vif %d",
1067 inet_fmt(mcastgrp, s1), vifi);
1068
1069 for (g = kernel_table; g; g = g->gt_gnext) {
1070 if (ntohl(mcastgrp) < ntohl(g->gt_mcastgrp))
1071 break;
1072
1073 r = g->gt_route;
1074 if (g->gt_mcastgrp == mcastgrp &&
1075 VIFM_ISSET(vifi, r->rt_children)) {
1076
1077 VIFM_SET(vifi, g->gt_grpmems);
1078 g->gt_grpmems &= ~g->gt_scope;
1079 if (g->gt_grpmems == 0)
1080 continue;
1081
1082 prun_add_ttls(g);
1083 log(LOG_DEBUG, 0, "update lclgrp (%s %s) gm:%x",
1084 inet_fmts(r->rt_origin, r->rt_originmask, s1),
1085 inet_fmt(g->gt_mcastgrp, s2), g->gt_grpmems);
1086
1087 update_kernel(g);
1088 #ifdef RSRR
1089 /* Send route change notification to reservation protocol. */
1090 rsrr_cache_send(g,1);
1091 #endif /* RSRR */
1092 }
1093 }
1094 }
1095
1096 /*
1097 * reset forwarding flag for all mcastgrps on this vifi
1098 */
1099 void
1100 delete_lclgrp(vifi, mcastgrp)
1101 vifi_t vifi;
1102 u_int32_t mcastgrp;
1103 {
1104 struct rtentry *r;
1105 struct gtable *g;
1106
1107 log(LOG_DEBUG, 0, "group %s left on vif %d",
1108 inet_fmt(mcastgrp, s1), vifi);
1109
1110 for (g = kernel_table; g; g = g->gt_gnext) {
1111 if (ntohl(mcastgrp) < ntohl(g->gt_mcastgrp))
1112 break;
1113
1114 if (g->gt_mcastgrp == mcastgrp) {
1115 int stop_sending = 1;
1116
1117 r = g->gt_route;
1118 /*
1119 * If this is not a leaf, then we have router neighbors on this
1120 * vif. Only turn off forwarding if they have all pruned.
1121 */
1122 if (!VIFM_ISSET(vifi, r->rt_leaves)) {
1123 struct listaddr *vr;
1124
1125 for (vr = uvifs[vifi].uv_neighbors; vr; vr = vr->al_next)
1126 if (find_prune_entry(vr->al_addr, g->gt_pruntbl) == NULL) {
1127 stop_sending = 0;
1128 break;
1129 }
1130 }
1131
1132 if (stop_sending) {
1133 VIFM_CLR(vifi, g->gt_grpmems);
1134 log(LOG_DEBUG, 0, "delete lclgrp (%s %s) gm:%x",
1135 inet_fmts(r->rt_origin, r->rt_originmask, s1),
1136 inet_fmt(g->gt_mcastgrp, s2), g->gt_grpmems);
1137
1138 prun_add_ttls(g);
1139 update_kernel(g);
1140 #ifdef RSRR
1141 /* Send route change notification to reservation protocol. */
1142 rsrr_cache_send(g,1);
1143 #endif /* RSRR */
1144
1145 /*
1146 * If there are no more members of this particular group,
1147 * send prune upstream
1148 */
1149 if (!g->gt_prsent_timer && g->gt_grpmems == 0 && r->rt_gateway)
1150 send_prune(g);
1151 }
1152 }
1153 }
1154 }
1155
1156 /*
1157 * Takes the prune message received and then strips it to
1158 * determine the (src, grp) pair to be pruned.
1159 *
1160 * Adds the router to the (src, grp) entry then.
1161 *
1162 * Determines if further packets have to be sent down that vif
1163 *
1164 * Determines if a corresponding prune message has to be generated
1165 */
1166 void
1167 accept_prune(src, dst, p, datalen)
1168 u_int32_t src;
1169 u_int32_t dst;
1170 char *p;
1171 int datalen;
1172 {
1173 u_int32_t prun_src;
1174 u_int32_t prun_grp;
1175 u_int32_t prun_tmr;
1176 vifi_t vifi;
1177 int i;
1178 int stop_sending;
1179 struct rtentry *r;
1180 struct gtable *g;
1181 struct ptable *pt;
1182 struct listaddr *vr;
1183
1184 /* Don't process any prunes if router is not pruning */
1185 if (pruning == 0)
1186 return;
1187
1188 if ((vifi = find_vif(src, dst)) == NO_VIF) {
1189 log(LOG_INFO, 0,
1190 "ignoring prune report from non-neighbor %s",
1191 inet_fmt(src, s1));
1192 return;
1193 }
1194
1195 /* Check if enough data is present */
1196 if (datalen < 12)
1197 {
1198 log(LOG_WARNING, 0,
1199 "non-decipherable prune from %s",
1200 inet_fmt(src, s1));
1201 return;
1202 }
1203
1204 for (i = 0; i< 4; i++)
1205 ((char *)&prun_src)[i] = *p++;
1206 for (i = 0; i< 4; i++)
1207 ((char *)&prun_grp)[i] = *p++;
1208 for (i = 0; i< 4; i++)
1209 ((char *)&prun_tmr)[i] = *p++;
1210 prun_tmr = ntohl(prun_tmr);
1211
1212 log(LOG_DEBUG, 0, "%s on vif %d prunes (%s %s)/%d",
1213 inet_fmt(src, s1), vifi,
1214 inet_fmt(prun_src, s2), inet_fmt(prun_grp, s3), prun_tmr);
1215
1216 /*
1217 * Find the subnet for the prune
1218 */
1219 if (find_src_grp(prun_src, 0, prun_grp)) {
1220 g = gtp ? gtp->gt_gnext : kernel_table;
1221 r = g->gt_route;
1222
1223 if (!VIFM_ISSET(vifi, r->rt_children)) {
1224 log(LOG_WARNING, 0, "prune received from non-child %s for (%s %s)",
1225 inet_fmt(src, s1), inet_fmt(prun_src, s2),
1226 inet_fmt(prun_grp, s3));
1227 return;
1228 }
1229 if (VIFM_ISSET(vifi, g->gt_scope)) {
1230 log(LOG_WARNING, 0, "prune received from %s on scoped grp (%s %s)",
1231 inet_fmt(src, s1), inet_fmt(prun_src, s2),
1232 inet_fmt(prun_grp, s3));
1233 return;
1234 }
1235 if ((pt = find_prune_entry(src, g->gt_pruntbl)) != NULL) {
1236 /*
1237 * If it's about to expire, then it's only still around because
1238 * of timer granularity, so don't warn about it.
1239 */
1240 if (pt->pt_timer > 10) {
1241 log(LOG_WARNING, 0, "%s %d from %s for (%s %s)/%d %s %d %s %x",
1242 "duplicate prune received on vif",
1243 vifi, inet_fmt(src, s1), inet_fmt(prun_src, s2),
1244 inet_fmt(prun_grp, s3), prun_tmr,
1245 "old timer:", pt->pt_timer, "cur gm:", g->gt_grpmems);
1246 }
1247 pt->pt_timer = prun_tmr;
1248 } else {
1249 /* allocate space for the prune structure */
1250 pt = (struct ptable *)(malloc(sizeof(struct ptable)));
1251 if (pt == NULL)
1252 log(LOG_ERR, 0, "pt: ran out of memory");
1253
1254 pt->pt_vifi = vifi;
1255 pt->pt_router = src;
1256 pt->pt_timer = prun_tmr;
1257
1258 pt->pt_next = g->gt_pruntbl;
1259 g->gt_pruntbl = pt;
1260 }
1261
1262 /* Refresh the group's lifetime */
1263 g->gt_timer = CACHE_LIFETIME(cache_lifetime);
1264 if (g->gt_timer < prun_tmr)
1265 g->gt_timer = prun_tmr;
1266
1267 /*
1268 * check if any more packets need to be sent on the
1269 * vif which sent this message
1270 */
1271 stop_sending = 1;
1272 for (vr = uvifs[vifi].uv_neighbors; vr; vr = vr->al_next)
1273 if (find_prune_entry(vr->al_addr, g->gt_pruntbl) == NULL) {
1274 stop_sending = 0;
1275 break;
1276 }
1277
1278 if (stop_sending && !grplst_mem(vifi, prun_grp)) {
1279 VIFM_CLR(vifi, g->gt_grpmems);
1280 log(LOG_DEBUG, 0, "prune (%s %s), stop sending on vif %d, gm:%x",
1281 inet_fmts(r->rt_origin, r->rt_originmask, s1),
1282 inet_fmt(g->gt_mcastgrp, s2), vifi, g->gt_grpmems);
1283
1284 prun_add_ttls(g);
1285 update_kernel(g);
1286 #ifdef RSRR
1287 /* Send route change notification to reservation protocol. */
1288 rsrr_cache_send(g,1);
1289 #endif /* RSRR */
1290 }
1291
1292 /*
1293 * check if all the child routers have expressed no interest
1294 * in this group and if this group does not exist in the
1295 * interface
1296 * Send a prune message then upstream
1297 */
1298 if (!g->gt_prsent_timer && g->gt_grpmems == 0 && r->rt_gateway) {
1299 send_prune(g);
1300 }
1301 } else {
1302 /*
1303 * There is no kernel entry for this group. Therefore, we can
1304 * simply ignore the prune, as we are not forwarding this traffic
1305 * downstream.
1306 */
1307 log(LOG_DEBUG, 0, "%s (%s %s)/%d from %s",
1308 "prune message received with no kernel entry for",
1309 inet_fmt(prun_src, s1), inet_fmt(prun_grp, s2),
1310 prun_tmr, inet_fmt(src, s3));
1311 return;
1312 }
1313 }
1314
1315 /*
1316 * Checks if this mcastgrp is present in the kernel table
1317 * If so and if a prune was sent, it sends a graft upwards
1318 */
1319 void
1320 chkgrp_graft(vifi, mcastgrp)
1321 vifi_t vifi;
1322 u_int32_t mcastgrp;
1323 {
1324 struct rtentry *r;
1325 struct gtable *g;
1326
1327 for (g = kernel_table; g; g = g->gt_gnext) {
1328 if (ntohl(mcastgrp) < ntohl(g->gt_mcastgrp))
1329 break;
1330
1331 r = g->gt_route;
1332 if (g->gt_mcastgrp == mcastgrp && VIFM_ISSET(vifi, r->rt_children))
1333 if (g->gt_prsent_timer) {
1334 VIFM_SET(vifi, g->gt_grpmems);
1335
1336 /*
1337 * If the vif that was joined was a scoped vif,
1338 * ignore it ; don't graft back
1339 */
1340 g->gt_grpmems &= ~g->gt_scope;
1341 if (g->gt_grpmems == 0)
1342 continue;
1343
1344 /* set the flag for graft retransmission */
1345 g->gt_grftsnt = 1;
1346
1347 /* send graft upwards */
1348 send_graft(g);
1349
1350 /* reset the prune timer and update cache timer*/
1351 g->gt_prsent_timer = 0;
1352 g->gt_timer = max_prune_lifetime;
1353
1354 log(LOG_DEBUG, 0, "chkgrp graft (%s %s) gm:%x",
1355 inet_fmts(r->rt_origin, r->rt_originmask, s1),
1356 inet_fmt(g->gt_mcastgrp, s2), g->gt_grpmems);
1357
1358 prun_add_ttls(g);
1359 update_kernel(g);
1360 #ifdef RSRR
1361 /* Send route change notification to reservation protocol. */
1362 rsrr_cache_send(g,1);
1363 #endif /* RSRR */
1364 }
1365 }
1366 }
1367
1368 /* determine the multicast group and src
1369 *
1370 * if it does, then determine if a prune was sent
1371 * upstream.
1372 * if prune sent upstream, send graft upstream and send
1373 * ack downstream.
1374 *
1375 * if no prune sent upstream, change the forwarding bit
1376 * for this interface and send ack downstream.
1377 *
1378 * if no entry exists for this group send ack downstream.
1379 */
1380 void
1381 accept_graft(src, dst, p, datalen)
1382 u_int32_t src;
1383 u_int32_t dst;
1384 char *p;
1385 int datalen;
1386 {
1387 vifi_t vifi;
1388 u_int32_t graft_src;
1389 u_int32_t graft_grp;
1390 int i;
1391 struct rtentry *r;
1392 struct gtable *g;
1393 struct ptable *pt, **ptnp;
1394
1395 if ((vifi = find_vif(src, dst)) == NO_VIF) {
1396 log(LOG_INFO, 0,
1397 "ignoring graft from non-neighbor %s",
1398 inet_fmt(src, s1));
1399 return;
1400 }
1401
1402 if (datalen < 8) {
1403 log(LOG_WARNING, 0,
1404 "received non-decipherable graft from %s",
1405 inet_fmt(src, s1));
1406 return;
1407 }
1408
1409 for (i = 0; i< 4; i++)
1410 ((char *)&graft_src)[i] = *p++;
1411 for (i = 0; i< 4; i++)
1412 ((char *)&graft_grp)[i] = *p++;
1413
1414 log(LOG_DEBUG, 0, "%s on vif %d grafts (%s %s)",
1415 inet_fmt(src, s1), vifi,
1416 inet_fmt(graft_src, s2), inet_fmt(graft_grp, s3));
1417
1418 /*
1419 * Find the subnet for the graft
1420 */
1421 if (find_src_grp(graft_src, 0, graft_grp)) {
1422 g = gtp ? gtp->gt_gnext : kernel_table;
1423 r = g->gt_route;
1424
1425 if (VIFM_ISSET(vifi, g->gt_scope)) {
1426 log(LOG_WARNING, 0, "graft received from %s on scoped grp (%s %s)",
1427 inet_fmt(src, s1), inet_fmt(graft_src, s2),
1428 inet_fmt(graft_grp, s3));
1429 return;
1430 }
1431
1432 ptnp = &g->gt_pruntbl;
1433 while ((pt = *ptnp) != NULL) {
1434 if ((pt->pt_vifi == vifi) && (pt->pt_router == src)) {
1435 *ptnp = pt->pt_next;
1436 free(pt);
1437
1438 VIFM_SET(vifi, g->gt_grpmems);
1439 log(LOG_DEBUG, 0, "accept graft (%s %s) gm:%x",
1440 inet_fmts(r->rt_origin, r->rt_originmask, s1),
1441 inet_fmt(g->gt_mcastgrp, s2), g->gt_grpmems);
1442
1443 prun_add_ttls(g);
1444 update_kernel(g);
1445 #ifdef RSRR
1446 /* Send route change notification to reservation protocol. */
1447 rsrr_cache_send(g,1);
1448 #endif /* RSRR */
1449 break;
1450 } else {
1451 ptnp = &pt->pt_next;
1452 }
1453 }
1454
1455 /* send ack downstream */
1456 send_graft_ack(dst, src, graft_src, graft_grp);
1457 g->gt_timer = max_prune_lifetime;
1458
1459 if (g->gt_prsent_timer) {
1460 /* set the flag for graft retransmission */
1461 g->gt_grftsnt = 1;
1462
1463 /* send graft upwards */
1464 send_graft(g);
1465
1466 /* reset the prune sent timer */
1467 g->gt_prsent_timer = 0;
1468 }
1469 } else {
1470 /*
1471 * We have no state for the source and group in question.
1472 * We can simply acknowledge the graft, since we know
1473 * that we have no prune state, and grafts are requests
1474 * to remove prune state.
1475 */
1476 send_graft_ack(dst, src, graft_src, graft_grp);
1477 log(LOG_DEBUG, 0, "%s (%s %s) from %s",
1478 "graft received with no kernel entry for",
1479 inet_fmt(graft_src, s1), inet_fmt(graft_grp, s2),
1480 inet_fmt(src, s3));
1481 return;
1482 }
1483 }
1484
1485 /*
1486 * find out which group is involved first of all
1487 * then determine if a graft was sent.
1488 * if no graft sent, ignore the message
1489 * if graft was sent and the ack is from the right
1490 * source, remove the graft timer so that we don't
1491 * have send a graft again
1492 */
1493 void
1494 accept_g_ack(src, dst, p, datalen)
1495 u_int32_t src;
1496 u_int32_t dst;
1497 char *p;
1498 int datalen;
1499 {
1500 struct gtable *g;
1501 vifi_t vifi;
1502 u_int32_t grft_src;
1503 u_int32_t grft_grp;
1504 int i;
1505
1506 if ((vifi = find_vif(src, dst)) == NO_VIF) {
1507 log(LOG_INFO, 0,
1508 "ignoring graft ack from non-neighbor %s",
1509 inet_fmt(src, s1));
1510 return;
1511 }
1512
1513 if (datalen < 0 || datalen > 8) {
1514 log(LOG_WARNING, 0,
1515 "received non-decipherable graft ack from %s",
1516 inet_fmt(src, s1));
1517 return;
1518 }
1519
1520 for (i = 0; i< 4; i++)
1521 ((char *)&grft_src)[i] = *p++;
1522 for (i = 0; i< 4; i++)
1523 ((char *)&grft_grp)[i] = *p++;
1524
1525 log(LOG_DEBUG, 0, "%s on vif %d acks graft (%s, %s)",
1526 inet_fmt(src, s1), vifi,
1527 inet_fmt(grft_src, s2), inet_fmt(grft_grp, s3));
1528
1529 /*
1530 * Find the subnet for the graft ack
1531 */
1532 if (find_src_grp(grft_src, 0, grft_grp)) {
1533 g = gtp ? gtp->gt_gnext : kernel_table;
1534 g->gt_grftsnt = 0;
1535 } else {
1536 log(LOG_WARNING, 0, "%s (%s, %s) from %s",
1537 "rcvd graft ack with no kernel entry for",
1538 inet_fmt(grft_src, s1), inet_fmt(grft_grp, s2),
1539 inet_fmt(src, s3));
1540 return;
1541 }
1542 }
1543
1544
1545 /*
1546 * free all prune entries and kernel routes
1547 * normally, this should inform the kernel that all of its routes
1548 * are going away, but this is only called by restart(), which is
1549 * about to call MRT_DONE which does that anyway.
1550 */
1551 void
1552 free_all_prunes()
1553 {
1554 register struct rtentry *r;
1555 register struct gtable *g, *prev_g;
1556 register struct stable *s, *prev_s;
1557 register struct ptable *p, *prev_p;
1558
1559 for (r = routing_table; r; r = r->rt_next) {
1560 g = r->rt_groups;
1561 while (g) {
1562 s = g->gt_srctbl;
1563 while (s) {
1564 prev_s = s;
1565 s = s->st_next;
1566 free(prev_s);
1567 }
1568
1569 p = g->gt_pruntbl;
1570 while (p) {
1571 prev_p = p;
1572 p = p->pt_next;
1573 free(prev_p);
1574 }
1575
1576 prev_g = g;
1577 g = g->gt_next;
1578 free(prev_g);
1579 }
1580 r->rt_groups = NULL;
1581 }
1582 kernel_table = NULL;
1583
1584 g = kernel_no_route;
1585 while (g) {
1586 if (g->gt_srctbl)
1587 free(g->gt_srctbl);
1588
1589 prev_g = g;
1590 g = g->gt_next;
1591 free(prev_g);
1592 }
1593 kernel_no_route = NULL;
1594 }
1595
1596 /*
1597 * When a new route is created, search
1598 * a) The less-specific part of the routing table
1599 * b) The route-less kernel table
1600 * for sources that the new route might want to handle.
1601 *
1602 * "Inheriting" these sources might be cleanest, but simply deleting
1603 * them is easier, and letting the kernel re-request them.
1604 */
1605 void
1606 steal_sources(rt)
1607 struct rtentry *rt;
1608 {
1609 register struct rtentry *rp;
1610 register struct gtable *gt, **gtnp;
1611 register struct stable *st, **stnp;
1612
1613 for (rp = rt->rt_next; rp; rp = rp->rt_next) {
1614 if ((rt->rt_origin & rp->rt_originmask) == rp->rt_origin) {
1615 log(LOG_DEBUG, 0, "Route for %s stealing sources from %s",
1616 inet_fmts(rt->rt_origin, rt->rt_originmask, s1),
1617 inet_fmts(rp->rt_origin, rp->rt_originmask, s2));
1618 for (gt = rp->rt_groups; gt; gt = gt->gt_next) {
1619 stnp = >->gt_srctbl;
1620 while ((st = *stnp) != NULL) {
1621 if ((st->st_origin & rt->rt_originmask) == rt->rt_origin) {
1622 log(LOG_DEBUG, 0, "%s stealing (%s %s) from %s",
1623 inet_fmts(rt->rt_origin, rt->rt_originmask, s1),
1624 inet_fmt(st->st_origin, s3),
1625 inet_fmt(gt->gt_mcastgrp, s4),
1626 inet_fmts(rp->rt_origin, rp->rt_originmask, s2));
1627 if (k_del_rg(st->st_origin, gt) < 0) {
1628 log(LOG_WARNING, errno, "%s (%s, %s)",
1629 "steal_sources trying to delete",
1630 inet_fmt(st->st_origin, s1),
1631 inet_fmt(gt->gt_mcastgrp, s2));
1632 }
1633 *stnp = st->st_next;
1634 kroutes--;
1635 free(st);
1636 } else {
1637 stnp = &st->st_next;
1638 }
1639 }
1640 }
1641 }
1642 }
1643
1644 gtnp = &kernel_no_route;
1645 while ((gt = *gtnp) != NULL) {
1646 if (gt->gt_srctbl && ((gt->gt_srctbl->st_origin & rt->rt_originmask)
1647 == rt->rt_origin)) {
1648 log(LOG_DEBUG, 0, "%s stealing (%s %s) from %s",
1649 inet_fmts(rt->rt_origin, rt->rt_originmask, s1),
1650 inet_fmt(gt->gt_srctbl->st_origin, s3),
1651 inet_fmt(gt->gt_mcastgrp, s4),
1652 "no_route table");
1653 if (k_del_rg(gt->gt_srctbl->st_origin, gt) < 0) {
1654 log(LOG_WARNING, errno, "%s (%s %s)",
1655 "steal_sources trying to delete",
1656 inet_fmt(gt->gt_srctbl->st_origin, s1),
1657 inet_fmt(gt->gt_mcastgrp, s2));
1658 }
1659 kroutes--;
1660 free(gt->gt_srctbl);
1661 *gtnp = gt->gt_next;
1662 if (gt->gt_next)
1663 gt->gt_next->gt_prev = gt->gt_prev;
1664 free(gt);
1665 } else {
1666 gtnp = >->gt_next;
1667 }
1668 }
1669 }
1670
1671 /*
1672 * Advance the timers on all the cache entries.
1673 * If there are any entries whose timers have expired,
1674 * remove these entries from the kernel cache.
1675 */
1676 void
1677 age_table_entry()
1678 {
1679 struct rtentry *r;
1680 struct gtable *gt, **gtnptr;
1681 struct stable *st, **stnp;
1682 struct ptable *pt, **ptnp;
1683 struct sioc_sg_req sg_req;
1684
1685 log(LOG_DEBUG, 0, "ageing entries");
1686
1687 gtnptr = &kernel_table;
1688 while ((gt = *gtnptr) != NULL) {
1689 r = gt->gt_route;
1690
1691 /* advance the timer for the kernel entry */
1692 gt->gt_timer -= ROUTE_MAX_REPORT_DELAY;
1693
1694 /* decrement prune timer if need be */
1695 if (gt->gt_prsent_timer > 0) {
1696 gt->gt_prsent_timer -= ROUTE_MAX_REPORT_DELAY;
1697 if (gt->gt_prsent_timer <= 0) {
1698 log(LOG_DEBUG, 0, "upstream prune tmo (%s %s)",
1699 inet_fmts(r->rt_origin, r->rt_originmask, s1),
1700 inet_fmt(gt->gt_mcastgrp, s2));
1701 gt->gt_prsent_timer = -1;
1702 }
1703 }
1704
1705 /* retransmit graft if graft sent flag is still set */
1706 if (gt->gt_grftsnt) {
1707 register int y;
1708 CHK_GS(gt->gt_grftsnt++, y);
1709 if (y)
1710 send_graft(gt);
1711 }
1712
1713 /*
1714 * Age prunes
1715 *
1716 * If a prune expires, forward again on that vif.
1717 */
1718 ptnp = >->gt_pruntbl;
1719 while ((pt = *ptnp) != NULL) {
1720 if ((pt->pt_timer -= ROUTE_MAX_REPORT_DELAY) <= 0) {
1721 log(LOG_DEBUG, 0, "expire prune (%s %s) from %s on vif %d",
1722 inet_fmts(r->rt_origin, r->rt_originmask, s1),
1723 inet_fmt(gt->gt_mcastgrp, s2),
1724 inet_fmt(pt->pt_router, s3),
1725 pt->pt_vifi);
1726
1727 expire_prune(pt->pt_vifi, gt);
1728
1729 /* remove the router's prune entry and await new one */
1730 *ptnp = pt->pt_next;
1731 free(pt);
1732 } else {
1733 ptnp = &pt->pt_next;
1734 }
1735 }
1736
1737 /*
1738 * If the cache entry has expired, delete source table entries for
1739 * silent sources. If there are no source entries left, and there
1740 * are no downstream prunes, then the entry is deleted.
1741 * Otherwise, the cache entry's timer is refreshed.
1742 */
1743 if (gt->gt_timer <= 0) {
1744 /* Check for traffic before deleting source entries */
1745 sg_req.grp.s_addr = gt->gt_mcastgrp;
1746 stnp = >->gt_srctbl;
1747 while ((st = *stnp) != NULL) {
1748 sg_req.src.s_addr = st->st_origin;
1749 if (ioctl(igmp_socket, SIOCGETSGCNT, (char *)&sg_req) < 0) {
1750 log(LOG_WARNING, errno, "%s (%s %s)",
1751 "age_table_entry: SIOCGETSGCNT failing for",
1752 inet_fmt(st->st_origin, s1),
1753 inet_fmt(gt->gt_mcastgrp, s2));
1754 /* Make sure it gets deleted below */
1755 sg_req.pktcnt = st->st_pktcnt;
1756 }
1757 if (sg_req.pktcnt == st->st_pktcnt) {
1758 *stnp = st->st_next;
1759 log(LOG_DEBUG, 0, "age_table_entry deleting (%s %s)",
1760 inet_fmt(st->st_origin, s1),
1761 inet_fmt(gt->gt_mcastgrp, s2));
1762 if (k_del_rg(st->st_origin, gt) < 0) {
1763 log(LOG_WARNING, errno,
1764 "age_table_entry trying to delete (%s %s)",
1765 inet_fmt(st->st_origin, s1),
1766 inet_fmt(gt->gt_mcastgrp, s2));
1767 }
1768 kroutes--;
1769 free(st);
1770 } else {
1771 st->st_pktcnt = sg_req.pktcnt;
1772 stnp = &st->st_next;
1773 }
1774 }
1775
1776 /*
1777 * Retain the group entry if we have downstream prunes or if
1778 * there is at least one source in the list that still has
1779 * traffic, or if our upstream prune timer is running.
1780 */
1781 if (gt->gt_pruntbl != NULL || gt->gt_srctbl != NULL ||
1782 gt->gt_prsent_timer > 0) {
1783 gt->gt_timer = CACHE_LIFETIME(cache_lifetime);
1784 if (gt->gt_prsent_timer == -1) {
1785 if (gt->gt_grpmems == 0)
1786 send_prune(gt);
1787 else
1788 gt->gt_prsent_timer = 0;
1789 }
1790 gtnptr = >->gt_gnext;
1791 continue;
1792 }
1793
1794 log(LOG_DEBUG, 0, "timeout cache entry (%s, %s)",
1795 inet_fmts(r->rt_origin, r->rt_originmask, s1),
1796 inet_fmt(gt->gt_mcastgrp, s2));
1797
1798 if (gt->gt_prev)
1799 gt->gt_prev->gt_next = gt->gt_next;
1800 else
1801 gt->gt_route->rt_groups = gt->gt_next;
1802 if (gt->gt_next)
1803 gt->gt_next->gt_prev = gt->gt_prev;
1804
1805 if (gt->gt_gprev) {
1806 gt->gt_gprev->gt_gnext = gt->gt_gnext;
1807 gtnptr = >->gt_gprev->gt_gnext;
1808 } else {
1809 kernel_table = gt->gt_gnext;
1810 gtnptr = &kernel_table;
1811 }
1812 if (gt->gt_gnext)
1813 gt->gt_gnext->gt_gprev = gt->gt_gprev;
1814
1815 #ifdef RSRR
1816 /* Send route change notification to reservation protocol. */
1817 rsrr_cache_send(gt,0);
1818 rsrr_cache_clean(gt);
1819 #endif /* RSRR */
1820 free((char *)gt);
1821 } else {
1822 if (gt->gt_prsent_timer == -1) {
1823 if (gt->gt_grpmems == 0)
1824 send_prune(gt);
1825 else
1826 gt->gt_prsent_timer = 0;
1827 }
1828 gtnptr = >->gt_gnext;
1829 }
1830 }
1831
1832 /*
1833 * When traversing the no_route table, the decision is much easier.
1834 * Just delete it if it has timed out.
1835 */
1836 gtnptr = &kernel_no_route;
1837 while ((gt = *gtnptr) != NULL) {
1838 /* advance the timer for the kernel entry */
1839 gt->gt_timer -= ROUTE_MAX_REPORT_DELAY;
1840
1841 if (gt->gt_timer < 0) {
1842 if (gt->gt_srctbl) {
1843 if (k_del_rg(gt->gt_srctbl->st_origin, gt) < 0) {
1844 log(LOG_WARNING, errno, "%s (%s %s)",
1845 "age_table_entry trying to delete no-route",
1846 inet_fmt(gt->gt_srctbl->st_origin, s1),
1847 inet_fmt(gt->gt_mcastgrp, s2));
1848 }
1849 free(gt->gt_srctbl);
1850 }
1851 *gtnptr = gt->gt_next;
1852 if (gt->gt_next)
1853 gt->gt_next->gt_prev = gt->gt_prev;
1854
1855 free((char *)gt);
1856 } else {
1857 gtnptr = >->gt_next;
1858 }
1859 }
1860 }
1861
1862 /*
1863 * Modify the kernel to forward packets when one or multiple prunes that
1864 * were received on the vif given by vifi, for the group given by gt,
1865 * have expired.
1866 */
1867 static void
1868 expire_prune(vifi, gt)
1869 vifi_t vifi;
1870 struct gtable *gt;
1871 {
1872 /*
1873 * No need to send a graft, any prunes that we sent
1874 * will expire before any prunes that we have received.
1875 */
1876 if (gt->gt_prsent_timer > 0) {
1877 log(LOG_DEBUG, 0, "prune expired with %d left on %s",
1878 gt->gt_prsent_timer, "prsent_timer");
1879 gt->gt_prsent_timer = 0;
1880 }
1881
1882 /* modify the kernel entry to forward packets */
1883 if (!VIFM_ISSET(vifi, gt->gt_grpmems)) {
1884 struct rtentry *rt = gt->gt_route;
1885 VIFM_SET(vifi, gt->gt_grpmems);
1886 log(LOG_DEBUG, 0, "forw again (%s %s) gm:%x vif:%d",
1887 inet_fmts(rt->rt_origin, rt->rt_originmask, s1),
1888 inet_fmt(gt->gt_mcastgrp, s2), gt->gt_grpmems, vifi);
1889
1890 prun_add_ttls(gt);
1891 update_kernel(gt);
1892 #ifdef RSRR
1893 /* Send route change notification to reservation protocol. */
1894 rsrr_cache_send(gt,1);
1895 #endif /* RSRR */
1896 }
1897 }
1898
1899
1900 static char *
1901 scaletime(t)
1902 u_long t;
1903 {
1904 static char buf1[5];
1905 static char buf2[5];
1906 static char *buf=buf1;
1907 char s;
1908 char *p;
1909
1910 p = buf;
1911 if (buf == buf1)
1912 buf = buf2;
1913 else
1914 buf = buf1;
1915
1916 if (t < 120) {
1917 s = 's';
1918 } else if (t < 3600) {
1919 t /= 60;
1920 s = 'm';
1921 } else if (t < 86400) {
1922 t /= 3600;
1923 s = 'h';
1924 } else if (t < 864000) {
1925 t /= 86400;
1926 s = 'd';
1927 } else {
1928 t /= 604800;
1929 s = 'w';
1930 }
1931 if (t > 999)
1932 return "*** ";
1933
1934 sprintf(p,"%3d%c", (int)t, s);
1935
1936 return p;
1937 }
1938
1939 /*
1940 * Print the contents of the cache table on file 'fp2'.
1941 */
1942 void
1943 dump_cache(fp2)
1944 FILE *fp2;
1945 {
1946 register struct rtentry *r;
1947 register struct gtable *gt;
1948 register struct stable *st;
1949 register vifi_t i;
1950 register time_t thyme = time(0);
1951
1952 fprintf(fp2,
1953 "Multicast Routing Cache Table (%d entries)\n%s", kroutes,
1954 " Origin Mcast-group CTmr Age Ptmr IVif Forwvifs\n");
1955
1956 for (gt = kernel_no_route; gt; gt = gt->gt_next) {
1957 if (gt->gt_srctbl) {
1958 fprintf(fp2, " %-18s %-15s %-4s %-4s - -1\n",
1959 inet_fmts(gt->gt_srctbl->st_origin, 0xffffffff, s1),
1960 inet_fmt(gt->gt_mcastgrp, s2), scaletime(gt->gt_timer),
1961 scaletime(thyme - gt->gt_ctime));
1962 fprintf(fp2, ">%s\n", inet_fmt(gt->gt_srctbl->st_origin, s1));
1963 }
1964 }
1965
1966 for (gt = kernel_table; gt; gt = gt->gt_gnext) {
1967 r = gt->gt_route;
1968 fprintf(fp2, " %-18s %-15s",
1969 inet_fmts(r->rt_origin, r->rt_originmask, s1),
1970 inet_fmt(gt->gt_mcastgrp, s2));
1971
1972 fprintf(fp2, " %-4s", scaletime(gt->gt_timer));
1973
1974 fprintf(fp2, " %-4s %-4s ", scaletime(thyme - gt->gt_ctime),
1975 gt->gt_prsent_timer ? scaletime(gt->gt_prsent_timer) :
1976 " -");
1977
1978 fprintf(fp2, "%2u%c%c ", r->rt_parent,
1979 gt->gt_prsent_timer ? 'P' : ' ',
1980 VIFM_ISSET(r->rt_parent, gt->gt_scope) ? 'B' : ' ');
1981
1982 for (i = 0; i < numvifs; ++i) {
1983 if (VIFM_ISSET(i, gt->gt_grpmems))
1984 fprintf(fp2, " %u ", i);
1985 else if (VIFM_ISSET(i, r->rt_children) &&
1986 !VIFM_ISSET(i, r->rt_leaves))
1987 fprintf(fp2, " %u%c", i,
1988 VIFM_ISSET(i, gt->gt_scope) ? 'b' : 'p');
1989 }
1990 fprintf(fp2, "\n");
1991 for (st = gt->gt_srctbl; st; st = st->st_next) {
1992 fprintf(fp2, ">%s\n", inet_fmt(st->st_origin, s1));
1993 }
1994 #ifdef DEBUG_PRUNES
1995 for (pt = gt->gt_pruntbl; pt; pt = pt->pt_next) {
1996 fprintf(fp2, "<r:%s v:%d t:%d\n", inet_fmt(pt->pt_router, s1),
1997 pt->pt_vifi, pt->pt_timer);
1998 }
1999 #endif
2000 }
2001 }
2002
2003 /*
2004 * Traceroute function which returns traceroute replies to the requesting
2005 * router. Also forwards the request to downstream routers.
2006 */
2007 void
2008 accept_mtrace(src, dst, group, data, no, datalen)
2009 u_int32_t src;
2010 u_int32_t dst;
2011 u_int32_t group;
2012 char *data;
2013 u_int no; /* promoted u_char */
2014 int datalen;
2015 {
2016 u_char type;
2017 struct rtentry *rt;
2018 struct gtable *gt;
2019 struct tr_query *qry;
2020 struct tr_resp *resp;
2021 int vifi;
2022 char *p;
2023 int rcount;
2024 int errcode = TR_NO_ERR;
2025 int resptype;
2026 struct timeval tp;
2027 struct sioc_vif_req v_req;
2028 struct sioc_sg_req sg_req;
2029
2030 /* Remember qid across invocations */
2031 static u_int32_t oqid = 0;
2032
2033 /* timestamp the request/response */
2034 gettimeofday(&tp, 0);
2035
2036 /*
2037 * Check if it is a query or a response
2038 */
2039 if (datalen == QLEN) {
2040 type = QUERY;
2041 log(LOG_DEBUG, 0, "Initial traceroute query rcvd from %s to %s",
2042 inet_fmt(src, s1), inet_fmt(dst, s2));
2043 }
2044 else if ((datalen - QLEN) % RLEN == 0) {
2045 type = RESP;
2046 log(LOG_DEBUG, 0, "In-transit traceroute query rcvd from %s to %s",
2047 inet_fmt(src, s1), inet_fmt(dst, s2));
2048 if (IN_MULTICAST(ntohl(dst))) {
2049 log(LOG_DEBUG, 0, "Dropping multicast response");
2050 return;
2051 }
2052 }
2053 else {
2054 log(LOG_WARNING, 0, "%s from %s to %s",
2055 "Non decipherable traceroute request recieved",
2056 inet_fmt(src, s1), inet_fmt(dst, s2));
2057 return;
2058 }
2059
2060 qry = (struct tr_query *)data;
2061
2062 /*
2063 * if it is a packet with all reports filled, drop it
2064 */
2065 if ((rcount = (datalen - QLEN)/RLEN) == no) {
2066 log(LOG_DEBUG, 0, "packet with all reports filled in");
2067 return;
2068 }
2069
2070 log(LOG_DEBUG, 0, "s: %s g: %s d: %s ", inet_fmt(qry->tr_src, s1),
2071 inet_fmt(group, s2), inet_fmt(qry->tr_dst, s3));
2072 log(LOG_DEBUG, 0, "rttl: %d rd: %s", qry->tr_rttl,
2073 inet_fmt(qry->tr_raddr, s1));
2074 log(LOG_DEBUG, 0, "rcount:%d, qid:%06x", rcount, qry->tr_qid);
2075
2076 /* determine the routing table entry for this traceroute */
2077 rt = determine_route(qry->tr_src);
2078 if (rt) {
2079 log(LOG_DEBUG, 0, "rt parent vif: %d rtr: %s metric: %d",
2080 rt->rt_parent, inet_fmt(rt->rt_gateway, s1), rt->rt_metric);
2081 log(LOG_DEBUG, 0, "rt origin %s",
2082 inet_fmts(rt->rt_origin, rt->rt_originmask, s1));
2083 } else
2084 log(LOG_DEBUG, 0, "...no route");
2085
2086 /*
2087 * Query type packet - check if rte exists
2088 * Check if the query destination is a vif connected to me.
2089 * and if so, whether I should start response back
2090 */
2091 if (type == QUERY) {
2092 if (oqid == qry->tr_qid) {
2093 /*
2094 * If the multicast router is a member of the group being
2095 * queried, and the query is multicasted, then the router can
2096 * recieve multiple copies of the same query. If we have already
2097 * replied to this traceroute, just ignore it this time.
2098 *
2099 * This is not a total solution, but since if this fails you
2100 * only get N copies, N <= the number of interfaces on the router,
2101 * it is not fatal.
2102 */
2103 log(LOG_DEBUG, 0, "ignoring duplicate traceroute packet");
2104 return;
2105 }
2106
2107 if (rt == NULL) {
2108 log(LOG_DEBUG, 0, "Mcast traceroute: no route entry %s",
2109 inet_fmt(qry->tr_src, s1));
2110 if (IN_MULTICAST(ntohl(dst)))
2111 return;
2112 }
2113 vifi = find_vif(qry->tr_dst, 0);
2114
2115 if (vifi == NO_VIF) {
2116 /* The traceroute destination is not on one of my subnet vifs. */
2117 log(LOG_DEBUG, 0, "Destination %s not an interface",
2118 inet_fmt(qry->tr_dst, s1));
2119 if (IN_MULTICAST(ntohl(dst)))
2120 return;
2121 errcode = TR_WRONG_IF;
2122 } else if (rt != NULL && !VIFM_ISSET(vifi, rt->rt_children)) {
2123 log(LOG_DEBUG, 0, "Destination %s not on forwarding tree for src %s",
2124 inet_fmt(qry->tr_dst, s1), inet_fmt(qry->tr_src, s2));
2125 if (IN_MULTICAST(ntohl(dst)))
2126 return;
2127 errcode = TR_WRONG_IF;
2128 }
2129 }
2130 else {
2131 /*
2132 * determine which interface the packet came in on
2133 * RESP packets travel hop-by-hop so this either traversed
2134 * a tunnel or came from a directly attached mrouter.
2135 */
2136 if ((vifi = find_vif(src, dst)) == NO_VIF) {
2137 log(LOG_DEBUG, 0, "Wrong interface for packet");
2138 errcode = TR_WRONG_IF;
2139 }
2140 }
2141
2142 /* Now that we've decided to send a response, save the qid */
2143 oqid = qry->tr_qid;
2144
2145 log(LOG_DEBUG, 0, "Sending traceroute response");
2146
2147 /* copy the packet to the sending buffer */
2148 p = send_buf + MIN_IP_HEADER_LEN + IGMP_MINLEN;
2149
2150 bcopy(data, p, datalen);
2151
2152 p += datalen;
2153
2154 /*
2155 * If there is no room to insert our reply, coopt the previous hop
2156 * error indication to relay this fact.
2157 */
2158 if (p + sizeof(struct tr_resp) > send_buf + RECV_BUF_SIZE) {
2159 resp = (struct tr_resp *)p - 1;
2160 resp->tr_rflags = TR_NO_SPACE;
2161 rt = NULL;
2162 goto sendit;
2163 }
2164
2165 /*
2166 * fill in initial response fields
2167 */
2168 resp = (struct tr_resp *)p;
2169 bzero(resp, sizeof(struct tr_resp));
2170 datalen += RLEN;
2171
2172 resp->tr_qarr = htonl((tp.tv_sec + JAN_1970) << 16) +
2173 ((tp.tv_usec >> 4) & 0xffff);
2174
2175 resp->tr_rproto = PROTO_DVMRP;
2176 if (errcode != TR_NO_ERR) {
2177 resp->tr_rflags = errcode;
2178 rt = NULL; /* hack to enforce send straight to requestor */
2179 goto sendit;
2180 }
2181 resp->tr_outaddr = uvifs[vifi].uv_lcl_addr;
2182 resp->tr_fttl = uvifs[vifi].uv_threshold;
2183 resp->tr_rflags = TR_NO_ERR;
2184
2185 /*
2186 * obtain # of packets out on interface
2187 */
2188 v_req.vifi = vifi;
2189 if (ioctl(igmp_socket, SIOCGETVIFCNT, (char *)&v_req) >= 0)
2190 resp->tr_vifout = htonl(v_req.ocount);
2191
2192 /*
2193 * fill in scoping & pruning information
2194 */
2195 if (rt)
2196 for (gt = rt->rt_groups; gt; gt = gt->gt_next) {
2197 if (gt->gt_mcastgrp >= group)
2198 break;
2199 }
2200 else
2201 gt = NULL;
2202
2203 if (gt && gt->gt_mcastgrp == group) {
2204 sg_req.src.s_addr = qry->tr_src;
2205 sg_req.grp.s_addr = group;
2206 if (ioctl(igmp_socket, SIOCGETSGCNT, (char *)&sg_req) >= 0)
2207 resp->tr_pktcnt = htonl(sg_req.pktcnt);
2208
2209 if (VIFM_ISSET(vifi, gt->gt_scope))
2210 resp->tr_rflags = TR_SCOPED;
2211 else if (gt->gt_prsent_timer)
2212 resp->tr_rflags = TR_PRUNED;
2213 else if (!VIFM_ISSET(vifi, gt->gt_grpmems)) {
2214 if (VIFM_ISSET(vifi, rt->rt_children) &&
2215 !VIFM_ISSET(vifi, rt->rt_leaves))
2216 resp->tr_rflags = TR_OPRUNED;
2217 else
2218 resp->tr_rflags = TR_NO_FWD;
2219 }
2220 } else {
2221 if (scoped_addr(vifi, group))
2222 resp->tr_rflags = TR_SCOPED;
2223 else if (rt && !VIFM_ISSET(vifi, rt->rt_children))
2224 resp->tr_rflags = TR_NO_FWD;
2225 }
2226
2227 /*
2228 * if no rte exists, set NO_RTE error
2229 */
2230 if (rt == NULL) {
2231 src = dst; /* the dst address of resp. pkt */
2232 resp->tr_inaddr = 0;
2233 resp->tr_rflags = TR_NO_RTE;
2234 resp->tr_rmtaddr = 0;
2235 } else {
2236 /* get # of packets in on interface */
2237 v_req.vifi = rt->rt_parent;
2238 if (ioctl(igmp_socket, SIOCGETVIFCNT, (char *)&v_req) >= 0)
2239 resp->tr_vifin = htonl(v_req.icount);
2240
2241 MASK_TO_VAL(rt->rt_originmask, resp->tr_smask);
2242 src = uvifs[rt->rt_parent].uv_lcl_addr;
2243 resp->tr_inaddr = src;
2244 resp->tr_rmtaddr = rt->rt_gateway;
2245 if (!VIFM_ISSET(vifi, rt->rt_children)) {
2246 log(LOG_DEBUG, 0, "Destination %s not on forwarding tree for src %s",
2247 inet_fmt(qry->tr_dst, s1), inet_fmt(qry->tr_src, s2));
2248 resp->tr_rflags = TR_WRONG_IF;
2249 }
2250 if (rt->rt_metric >= UNREACHABLE) {
2251 resp->tr_rflags = TR_NO_RTE;
2252 /* Hack to send reply directly */
2253 rt = NULL;
2254 }
2255 }
2256
2257 sendit:
2258 /*
2259 * if metric is 1 or no. of reports is 1, send response to requestor
2260 * else send to upstream router. If the upstream router can't handle
2261 * mtrace, set an error code and send to requestor anyway.
2262 */
2263 log(LOG_DEBUG, 0, "rcount:%d, no:%d", rcount, no);
2264
2265 if ((rcount + 1 == no) || (rt == NULL) || (rt->rt_metric == 1)) {
2266 resptype = IGMP_MTRACE_REPLY;
2267 dst = qry->tr_raddr;
2268 } else
2269 if (!can_mtrace(rt->rt_parent, rt->rt_gateway)) {
2270 dst = qry->tr_raddr;
2271 resp->tr_rflags = TR_OLD_ROUTER;
2272 resptype = IGMP_MTRACE_REPLY;
2273 } else {
2274 dst = rt->rt_gateway;
2275 resptype = IGMP_MTRACE_QUERY;
2276 }
2277
2278 if (IN_MULTICAST(ntohl(dst))) {
2279 /*
2280 * Send the reply on a known multicast capable vif.
2281 * If we don't have one, we can't source any multicasts anyway.
2282 */
2283 if (phys_vif != -1) {
2284 log(LOG_DEBUG, 0, "Sending reply to %s from %s",
2285 inet_fmt(dst, s1), inet_fmt(uvifs[phys_vif].uv_lcl_addr, s2));
2286 k_set_ttl(qry->tr_rttl);
2287 send_igmp(uvifs[phys_vif].uv_lcl_addr, dst,
2288 resptype, no, group,
2289 datalen);
2290 k_set_ttl(1);
2291 } else
2292 log(LOG_INFO, 0, "No enabled phyints -- %s",
2293 "dropping traceroute reply");
2294 } else {
2295 log(LOG_DEBUG, 0, "Sending %s to %s from %s",
2296 resptype == IGMP_MTRACE_REPLY ? "reply" : "request on",
2297 inet_fmt(dst, s1), inet_fmt(src, s2));
2298
2299 send_igmp(src, dst,
2300 resptype, no, group,
2301 datalen);
2302 }
2303 return;
2304 }
2305