prune.c revision 1.16 1 /* $NetBSD: prune.c,v 1.16 2006/05/12 01:27:27 mrg Exp $ */
2
3 /*
4 * The mrouted program is covered by the license in the accompanying file
5 * named "LICENSE". Use of the mrouted program represents acceptance of
6 * the terms and conditions listed in that file.
7 *
8 * The mrouted program is COPYRIGHT 1989 by The Board of Trustees of
9 * Leland Stanford Junior University.
10 */
11
12
13 #include "defs.h"
14
15 extern int cache_lifetime;
16 extern int max_prune_lifetime;
17 extern struct rtentry *routing_table;
18
19 extern int phys_vif;
20
21 /*
22 * dither cache lifetime to obtain a value between x and 2*x
23 */
24 #define CACHE_LIFETIME(x) ((x) + (arc4random() % (x)))
25
26 #define CHK_GS(x, y) { \
27 switch(x) { \
28 case 2: \
29 case 4: \
30 case 8: \
31 case 16: \
32 case 32: \
33 case 64: \
34 case 128: \
35 /*case 256:*/ y = 1; \
36 break; \
37 default: y = 0; \
38 } \
39 }
40
41 struct gtable *kernel_table; /* ptr to list of kernel grp entries*/
42 static struct gtable *kernel_no_route; /* list of grp entries w/o routes */
43 struct gtable *gtp; /* pointer for kernel rt entries */
44 unsigned int kroutes; /* current number of cache entries */
45
46 /****************************************************************************
47 Functions that are local to prune.c
48 ****************************************************************************/
49 static void prun_add_ttls(struct gtable *gt);
50 static int pruning_neighbor(vifi_t vifi, u_int32_t addr);
51 static int can_mtrace(vifi_t vifi, u_int32_t addr);
52 static struct ptable * find_prune_entry(u_int32_t vr, struct ptable *pt);
53 static void expire_prune(vifi_t vifi, struct gtable *gt);
54 static void send_prune(struct gtable *gt);
55 static void send_graft(struct gtable *gt);
56 static void send_graft_ack(u_int32_t src, u_int32_t dst,
57 u_int32_t origin, u_int32_t grp);
58 static void update_kernel(struct gtable *g);
59 static char * scaletime(u_long t);
60
61 /*
62 * Updates the ttl values for each vif.
63 */
64 static void
65 prun_add_ttls(struct gtable *gt)
66 {
67 struct uvif *v;
68 vifi_t vifi;
69
70 for (vifi = 0, v = uvifs; vifi < numvifs; ++vifi, ++v) {
71 if (VIFM_ISSET(vifi, gt->gt_grpmems))
72 gt->gt_ttls[vifi] = v->uv_threshold;
73 else
74 gt->gt_ttls[vifi] = 0;
75 }
76 }
77
78 /*
79 * checks for scoped multicast addresses
80 */
81 #define GET_SCOPE(gt) { \
82 vifi_t _i; \
83 if ((ntohl((gt)->gt_mcastgrp) & 0xff000000) == 0xef000000) \
84 for (_i = 0; _i < numvifs; _i++) \
85 if (scoped_addr(_i, (gt)->gt_mcastgrp)) \
86 VIFM_SET(_i, (gt)->gt_scope); \
87 }
88
89 int
90 scoped_addr(vifi_t vifi, u_int32_t addr)
91 {
92 struct vif_acl *acl;
93
94 for (acl = uvifs[vifi].uv_acl; acl; acl = acl->acl_next)
95 if ((addr & acl->acl_mask) == acl->acl_addr)
96 return 1;
97
98 return 0;
99 }
100
101 /*
102 * Determine if mcastgrp has a listener on vifi
103 */
104 int
105 grplst_mem(vifi_t vifi, u_int32_t mcastgrp)
106 {
107 struct listaddr *g;
108 struct uvif *v;
109
110 v = &uvifs[vifi];
111
112 for (g = v->uv_groups; g != NULL; g = g->al_next)
113 if (mcastgrp == g->al_addr)
114 return 1;
115
116 return 0;
117 }
118
119 /*
120 * Finds the group entry with the specified source and netmask.
121 * If netmask is 0, it uses the route's netmask.
122 *
123 * Returns TRUE if found a match, and the global variable gtp is left
124 * pointing to entry before the found entry.
125 * Returns FALSE if no exact match found, gtp is left pointing to before
126 * the entry in question belongs, or is NULL if the it belongs at the
127 * head of the list.
128 */
129 int
130 find_src_grp(u_int32_t src, u_int32_t mask, u_int32_t grp)
131 {
132 struct gtable *gt;
133
134 gtp = NULL;
135 gt = kernel_table;
136 while (gt != NULL) {
137 if (grp == gt->gt_mcastgrp &&
138 (mask ? (gt->gt_route->rt_origin == src &&
139 gt->gt_route->rt_originmask == mask) :
140 ((src & gt->gt_route->rt_originmask) ==
141 gt->gt_route->rt_origin)))
142 return TRUE;
143 if (ntohl(grp) > ntohl(gt->gt_mcastgrp) ||
144 (grp == gt->gt_mcastgrp &&
145 (ntohl(mask) < ntohl(gt->gt_route->rt_originmask) ||
146 (mask == gt->gt_route->rt_originmask &&
147 (ntohl(src) > ntohl(gt->gt_route->rt_origin)))))) {
148 gtp = gt;
149 gt = gt->gt_gnext;
150 }
151 else break;
152 }
153 return FALSE;
154 }
155
156 /*
157 * Check if the neighbor supports pruning
158 */
159 static int
160 pruning_neighbor(vifi_t vifi, u_int32_t addr)
161 {
162 struct listaddr *n = neighbor_info(vifi, addr);
163 int vers;
164
165 if (n == NULL)
166 return 0;
167
168 if (n->al_flags & NF_PRUNE)
169 return 1;
170
171 /*
172 * Versions from 3.0 to 3.4 relied on the version number to identify
173 * that they could handle pruning.
174 */
175 vers = NBR_VERS(n);
176 return (vers >= 0x0300 && vers <= 0x0304);
177 }
178
179 /*
180 * Can the neighbor in question handle multicast traceroute?
181 */
182 static int
183 can_mtrace(vifi_t vifi, u_int32_t addr)
184 {
185 struct listaddr *n = neighbor_info(vifi, addr);
186 int vers;
187
188 if (n == NULL)
189 return 0;
190
191 if (n->al_flags & NF_MTRACE)
192 return 1;
193
194 /*
195 * Versions 3.3 and 3.4 relied on the version number to identify
196 * that they could handle traceroute.
197 */
198 vers = NBR_VERS(n);
199 return (vers >= 0x0303 && vers <= 0x0304);
200 }
201
202 /*
203 * Returns the prune entry of the router, or NULL if none exists
204 */
205 static struct ptable *
206 find_prune_entry(u_int32_t vr, struct ptable *pt)
207 {
208 while (pt) {
209 if (pt->pt_router == vr)
210 return pt;
211 pt = pt->pt_next;
212 }
213
214 return NULL;
215 }
216
217 /*
218 * Send a prune message to the dominant router for
219 * this source.
220 *
221 * Record an entry that a prune was sent for this group
222 */
223 static void
224 send_prune(struct gtable *gt)
225 {
226 struct ptable *pt;
227 char *p;
228 int i;
229 int datalen;
230 u_int32_t src;
231 u_int32_t dst;
232 u_int32_t tmp;
233
234 /* Don't process any prunes if router is not pruning */
235 if (pruning == 0)
236 return;
237
238 /* Can't process a prune if we don't have an associated route */
239 if (gt->gt_route == NULL)
240 return;
241
242 /* Don't send a prune to a non-pruning router */
243 if (!pruning_neighbor(gt->gt_route->rt_parent, gt->gt_route->rt_gateway))
244 return;
245
246 /*
247 * sends a prune message to the router upstream.
248 */
249 src = uvifs[gt->gt_route->rt_parent].uv_lcl_addr;
250 dst = gt->gt_route->rt_gateway;
251
252 p = send_buf + MIN_IP_HEADER_LEN + IGMP_MINLEN;
253 datalen = 0;
254
255 /*
256 * determine prune lifetime
257 */
258 gt->gt_prsent_timer = gt->gt_timer;
259 for (pt = gt->gt_pruntbl; pt; pt = pt->pt_next)
260 if (pt->pt_timer < gt->gt_prsent_timer)
261 gt->gt_prsent_timer = pt->pt_timer;
262
263 /*
264 * If we have a graft pending, cancel graft retransmission
265 */
266 gt->gt_grftsnt = 0;
267
268 for (i = 0; i < 4; i++)
269 *p++ = ((char *)&(gt->gt_route->rt_origin))[i];
270 for (i = 0; i < 4; i++)
271 *p++ = ((char *)&(gt->gt_mcastgrp))[i];
272 tmp = htonl(gt->gt_prsent_timer);
273 for (i = 0; i < 4; i++)
274 *p++ = ((char *)&(tmp))[i];
275 datalen += 12;
276
277 send_igmp(src, dst, IGMP_DVMRP, DVMRP_PRUNE,
278 htonl(MROUTED_LEVEL), datalen);
279
280 logit(LOG_DEBUG, 0, "sent prune for (%s %s)/%d on vif %d to %s",
281 inet_fmts(gt->gt_route->rt_origin, gt->gt_route->rt_originmask),
282 inet_fmt(gt->gt_mcastgrp),
283 gt->gt_prsent_timer, gt->gt_route->rt_parent,
284 inet_fmt(gt->gt_route->rt_gateway));
285 }
286
287 /*
288 * a prune was sent upstream
289 * so, a graft has to be sent to annul the prune
290 * set up a graft timer so that if an ack is not
291 * heard within that time, another graft request
292 * is sent out.
293 */
294 static void
295 send_graft(struct gtable *gt)
296 {
297 char *p;
298 int i;
299 int datalen;
300 u_int32_t src;
301 u_int32_t dst;
302
303 /* Can't send a graft without an associated route */
304 if (gt->gt_route == NULL)
305 return;
306
307 src = uvifs[gt->gt_route->rt_parent].uv_lcl_addr;
308 dst = gt->gt_route->rt_gateway;
309
310 p = send_buf + MIN_IP_HEADER_LEN + IGMP_MINLEN;
311 datalen = 0;
312
313 for (i = 0; i < 4; i++)
314 *p++ = ((char *)&(gt->gt_route->rt_origin))[i];
315 for (i = 0; i < 4; i++)
316 *p++ = ((char *)&(gt->gt_mcastgrp))[i];
317 datalen += 8;
318
319 if (datalen != 0) {
320 send_igmp(src, dst, IGMP_DVMRP, DVMRP_GRAFT,
321 htonl(MROUTED_LEVEL), datalen);
322 }
323 logit(LOG_DEBUG, 0, "sent graft for (%s %s) to %s on vif %d",
324 inet_fmts(gt->gt_route->rt_origin, gt->gt_route->rt_originmask),
325 inet_fmt(gt->gt_mcastgrp),
326 inet_fmt(gt->gt_route->rt_gateway),
327 gt->gt_route->rt_parent);
328 }
329
330 /*
331 * Send an ack that a graft was received
332 */
333 static void
334 send_graft_ack(u_int32_t src, u_int32_t dst, u_int32_t origin, u_int32_t grp)
335 {
336 char *p;
337 int i;
338 int datalen;
339
340 p = send_buf + MIN_IP_HEADER_LEN + IGMP_MINLEN;
341 datalen = 0;
342
343 for (i = 0; i < 4; i++)
344 *p++ = ((char *)&(origin))[i];
345 for (i = 0; i < 4; i++)
346 *p++ = ((char *)&(grp))[i];
347 datalen += 8;
348
349 send_igmp(src, dst, IGMP_DVMRP, DVMRP_GRAFT_ACK,
350 htonl(MROUTED_LEVEL), datalen);
351
352 logit(LOG_DEBUG, 0, "sent graft ack for (%s, %s) to %s",
353 inet_fmt(origin), inet_fmt(grp),
354 inet_fmt(dst));
355 }
356
357 /*
358 * Update the kernel cache with all the routes hanging off the group entry
359 */
360 static void
361 update_kernel(struct gtable *g)
362 {
363 struct stable *st;
364
365 for (st = g->gt_srctbl; st; st = st->st_next)
366 k_add_rg(st->st_origin, g);
367 }
368
369 /****************************************************************************
370 Functions that are used externally
371 ****************************************************************************/
372
373 #ifdef SNMP
374 #include <sys/types.h>
375 #include "snmp.h"
376
377 /*
378 * Find a specific group entry in the group table
379 */
380 struct gtable *
381 find_grp(u_long grp)
382 {
383 struct gtable *gt;
384
385 for (gt = kernel_table; gt; gt = gt->gt_gnext) {
386 if (ntohl(grp) < ntohl(gt->gt_mcastgrp))
387 break;
388 if (gt->gt_mcastgrp == grp)
389 return gt;
390 }
391 return NULL;
392 }
393
394 /*
395 * Given a group entry and source, find the corresponding source table
396 * entry
397 */
398 struct stable *
399 find_grp_src(struct gtable *gt, u_long src)
400 {
401 struct stable *st;
402 u_long grp = gt->gt_mcastgrp;
403 struct gtable *gtcurr;
404
405 for (gtcurr = gt; gtcurr->gt_mcastgrp == grp; gtcurr = gtcurr->gt_gnext) {
406 for (st = gtcurr->gt_srctbl; st; st = st->st_next)
407 if (st->st_origin == src)
408 return st;
409 }
410 return NULL;
411 }
412
413 /*
414 * Find next entry > specification
415 *
416 * gtpp: ordered by group
417 * stpp: ordered by source
418 */
419 int
420 next_grp_src_mask(struct gtable **gtpp, struct stable **stpp, u_long grp,
421 u_long src, u_long mask)
422 {
423 struct gtable *gt, *gbest = NULL;
424 struct stable *st, *sbest = NULL;
425
426 /* Find first group entry >= grp spec */
427 (*gtpp) = kernel_table;
428 while ((*gtpp) && ntohl((*gtpp)->gt_mcastgrp) < ntohl(grp))
429 (*gtpp)=(*gtpp)->gt_gnext;
430 if (!(*gtpp))
431 return 0; /* no more groups */
432
433 for (gt = kernel_table; gt; gt=gt->gt_gnext) {
434 /* Since grps are ordered, we can stop when group changes from gbest */
435 if (gbest && gbest->gt_mcastgrp != gt->gt_mcastgrp)
436 break;
437 for (st = gt->gt_srctbl; st; st=st->st_next) {
438
439 /* Among those entries > spec, find "lowest" one */
440 if (((ntohl(gt->gt_mcastgrp)> ntohl(grp))
441 || (ntohl(gt->gt_mcastgrp)==ntohl(grp)
442 && ntohl(st->st_origin)> ntohl(src))
443 || (ntohl(gt->gt_mcastgrp)==ntohl(grp)
444 && ntohl(st->st_origin)==src && 0xFFFFFFFF>ntohl(mask)))
445 && (!gbest
446 || (ntohl(gt->gt_mcastgrp)< ntohl(gbest->gt_mcastgrp))
447 || (ntohl(gt->gt_mcastgrp)==ntohl(gbest->gt_mcastgrp)
448 && ntohl(st->st_origin)< ntohl(sbest->st_origin)))) {
449 gbest = gt;
450 sbest = st;
451 }
452 }
453 }
454 (*gtpp) = gbest;
455 (*stpp) = sbest;
456 return (*gtpp)!=0;
457 }
458
459 /*
460 * Ensure that sg contains current information for the given group,source.
461 * This is fetched from the kernel as a unit so that counts for the entry
462 * are consistent, i.e. packet and byte counts for the same entry are
463 * read at the same time.
464 */
465 void
466 refresh_sg(struct sioc_sg_req *sg, struct gtable *gt, struct stable *st)
467 {
468 static int lastq = -1;
469
470 if (quantum != lastq || sg->src.s_addr!=st->st_origin
471 || sg->grp.s_addr!=gt->gt_mcastgrp) {
472 lastq = quantum;
473 sg->src.s_addr = st->st_origin;
474 sg->grp.s_addr = gt->gt_mcastgrp;
475 ioctl(igmp_socket, SIOCGETSGCNT, (char *)sg);
476 }
477 }
478
479 /*
480 * Return pointer to a specific route entry. This must be a separate
481 * function from find_route() which modifies rtp.
482 */
483 struct rtentry *
484 snmp_find_route(u_long src, u_long mask)
485 {
486 struct rtentry *rt;
487
488 for (rt = routing_table; rt; rt = rt->rt_next) {
489 if (src == rt->rt_origin && mask == rt->rt_originmask)
490 return rt;
491 }
492 return NULL;
493 }
494
495 /*
496 * Find next route entry > specification
497 */
498 int
499 next_route(struct rtentry **rtpp, u_long src, u_long mask)
500 {
501 struct rtentry *rt, *rbest = NULL;
502
503 /* Among all entries > spec, find "lowest" one in order */
504 for (rt = routing_table; rt; rt=rt->rt_next) {
505 if ((ntohl(rt->rt_origin) > ntohl(src)
506 || (ntohl(rt->rt_origin) == ntohl(src)
507 && ntohl(rt->rt_originmask) > ntohl(mask)))
508 && (!rbest || (ntohl(rt->rt_origin) < ntohl(rbest->rt_origin))
509 || (ntohl(rt->rt_origin) == ntohl(rbest->rt_origin)
510 && ntohl(rt->rt_originmask) < ntohl(rbest->rt_originmask))))
511 rbest = rt;
512 }
513 (*rtpp) = rbest;
514 return (*rtpp)!=0;
515 }
516
517 /*
518 * Given a routing table entry, and a vifi, find the next vifi/entry
519 *
520 * vifi: vifi at which to start looking
521 */
522 int
523 next_route_child(struct rtentry **rtpp, u_long src, u_long mask, vifi_t *vifi)
524 {
525 struct rtentry *rt;
526
527 /* Get (S,M) entry */
528 if (!((*rtpp) = snmp_find_route(src,mask)))
529 if (!next_route(rtpp, src, mask))
530 return 0;
531
532 /* Continue until we get one with a valid next vif */
533 do {
534 for (; (*rtpp)->rt_children && *vifi<numvifs; (*vifi)++)
535 if (VIFM_ISSET(*vifi, (*rtpp)->rt_children))
536 return 1;
537 *vifi = 0;
538 } while( next_route(rtpp, (*rtpp)->rt_origin, (*rtpp)->rt_originmask) );
539
540 return 0;
541 }
542
543 /*
544 * Given a routing table entry, and a vifi, find the next entry
545 * equal to or greater than those
546 *
547 * vifi: vifi at which to start looking
548 */
549 int
550 next_child(struct gtable **gtpp, struct stable **stpp, u_long grp, u_long src,
551 u_long mask, vifi_t *vifi)
552 {
553 struct stable *st;
554
555 /* Get (G,S,M) entry */
556 if (mask!=0xFFFFFFFF
557 || !((*gtpp) = find_grp(grp))
558 || !((*stpp) = find_grp_src((*gtpp),src)))
559 if (!next_grp_src_mask(gtpp, stpp, grp, src, mask))
560 return 0;
561
562 /* Continue until we get one with a valid next vif */
563 do {
564 for (; (*gtpp)->gt_route->rt_children && *vifi<numvifs; (*vifi)++)
565 if (VIFM_ISSET(*vifi, (*gtpp)->gt_route->rt_children))
566 return 1;
567 *vifi = 0;
568 } while (next_grp_src_mask(gtpp, stpp, (*gtpp)->gt_mcastgrp,
569 (*stpp)->st_origin, 0xFFFFFFFF) );
570
571 return 0;
572 }
573 #endif /* SNMP */
574
575 /*
576 * Initialize the kernel table structure
577 */
578 void
579 init_ktable(void)
580 {
581 kernel_table = NULL;
582 kernel_no_route = NULL;
583 kroutes = 0;
584 }
585
586 /*
587 * Add a new table entry for (origin, mcastgrp)
588 */
589 void
590 add_table_entry(u_int32_t origin, u_int32_t mcastgrp)
591 {
592 struct rtentry *r;
593 struct gtable *gt,**gtnp,*prev_gt;
594 struct stable *st,**stnp;
595 vifi_t i;
596
597 #ifdef DEBUG_MFC
598 md_log(MD_MISS, origin, mcastgrp);
599 #endif
600
601 r = determine_route(origin);
602 prev_gt = NULL;
603 if (r == NULL) {
604 /*
605 * Look for it on the no_route table; if it is found then
606 * it will be detected as a duplicate below.
607 */
608 for (gt = kernel_no_route; gt; gt = gt->gt_next)
609 if (mcastgrp == gt->gt_mcastgrp &&
610 gt->gt_srctbl && gt->gt_srctbl->st_origin == origin)
611 break;
612 gtnp = &kernel_no_route;
613 } else {
614 gtnp = &r->rt_groups;
615 while ((gt = *gtnp) != NULL) {
616 if (gt->gt_mcastgrp >= mcastgrp)
617 break;
618 gtnp = >->gt_next;
619 prev_gt = gt;
620 }
621 }
622
623 if (gt == NULL || gt->gt_mcastgrp != mcastgrp) {
624 gt = (struct gtable *)malloc(sizeof(struct gtable));
625 if (gt == NULL)
626 logit(LOG_ERR, 0, "ran out of memory");
627
628 gt->gt_mcastgrp = mcastgrp;
629 gt->gt_timer = CACHE_LIFETIME(cache_lifetime);
630 time(>->gt_ctime);
631 gt->gt_grpmems = 0;
632 gt->gt_scope = 0;
633 gt->gt_prsent_timer = 0;
634 gt->gt_grftsnt = 0;
635 gt->gt_srctbl = NULL;
636 gt->gt_pruntbl = NULL;
637 gt->gt_route = r;
638 #ifdef RSRR
639 gt->gt_rsrr_cache = NULL;
640 #endif
641
642 if (r != NULL) {
643 /* obtain the multicast group membership list */
644 for (i = 0; i < numvifs; i++) {
645 if (VIFM_ISSET(i, r->rt_children) &&
646 !(VIFM_ISSET(i, r->rt_leaves)))
647 VIFM_SET(i, gt->gt_grpmems);
648
649 if (VIFM_ISSET(i, r->rt_leaves) && grplst_mem(i, mcastgrp))
650 VIFM_SET(i, gt->gt_grpmems);
651 }
652 GET_SCOPE(gt);
653 if (VIFM_ISSET(r->rt_parent, gt->gt_scope))
654 gt->gt_scope = -1;
655 gt->gt_grpmems &= ~gt->gt_scope;
656 } else {
657 gt->gt_scope = -1;
658 gt->gt_grpmems = 0;
659 }
660
661 /* update ttls */
662 prun_add_ttls(gt);
663
664 gt->gt_next = *gtnp;
665 *gtnp = gt;
666 if (gt->gt_next)
667 gt->gt_next->gt_prev = gt;
668 gt->gt_prev = prev_gt;
669
670 if (r) {
671 if (find_src_grp(r->rt_origin, r->rt_originmask, gt->gt_mcastgrp)) {
672 struct gtable *g;
673
674 g = gtp ? gtp->gt_gnext : kernel_table;
675 logit(LOG_WARNING, 0, "Entry for (%s %s) (rt:%p) exists (rt:%p)",
676 inet_fmts(r->rt_origin, r->rt_originmask),
677 inet_fmt(g->gt_mcastgrp),
678 r, g->gt_route);
679 } else {
680 if (gtp) {
681 gt->gt_gnext = gtp->gt_gnext;
682 gt->gt_gprev = gtp;
683 gtp->gt_gnext = gt;
684 } else {
685 gt->gt_gnext = kernel_table;
686 gt->gt_gprev = NULL;
687 kernel_table = gt;
688 }
689 if (gt->gt_gnext)
690 gt->gt_gnext->gt_gprev = gt;
691 }
692 } else {
693 gt->gt_gnext = gt->gt_gprev = NULL;
694 }
695 }
696
697 stnp = >->gt_srctbl;
698 while ((st = *stnp) != NULL) {
699 if (ntohl(st->st_origin) >= ntohl(origin))
700 break;
701 stnp = &st->st_next;
702 }
703
704 if (st == NULL || st->st_origin != origin) {
705 st = (struct stable *)malloc(sizeof(struct stable));
706 if (st == NULL)
707 logit(LOG_ERR, 0, "ran out of memory");
708
709 st->st_origin = origin;
710 st->st_pktcnt = 0;
711 st->st_next = *stnp;
712 *stnp = st;
713 } else {
714 #ifdef DEBUG_MFC
715 md_log(MD_DUPE, origin, mcastgrp);
716 #endif
717 logit(LOG_WARNING, 0, "kernel entry already exists for (%s %s)",
718 inet_fmt(origin),
719 inet_fmt(mcastgrp));
720 /* XXX Doing this should cause no harm, and may ensure
721 * kernel<>mrouted synchronization */
722 k_add_rg(origin, gt);
723 return;
724 }
725
726 kroutes++;
727 k_add_rg(origin, gt);
728
729 logit(LOG_DEBUG, 0, "add cache entry (%s %s) gm:%x, parent-vif:%d",
730 inet_fmt(origin),
731 inet_fmt(mcastgrp),
732 gt->gt_grpmems, r ? r->rt_parent : -1);
733
734 /* If there are no leaf vifs
735 * which have this group, then
736 * mark this src-grp as a prune candidate.
737 */
738 if (!gt->gt_prsent_timer && !gt->gt_grpmems && r && r->rt_gateway)
739 send_prune(gt);
740 }
741
742 /*
743 * An mrouter has gone down and come up on an interface
744 * Forward on that interface immediately
745 */
746 void
747 reset_neighbor_state(vifi_t vifi, u_int32_t addr)
748 {
749 struct rtentry *r;
750 struct gtable *g;
751 struct ptable *pt, **ptnp;
752 struct stable *st;
753
754 for (g = kernel_table; g; g = g->gt_gnext) {
755 r = g->gt_route;
756
757 /*
758 * If neighbor was the parent, remove the prune sent state
759 * and all of the source cache info so that prunes get
760 * regenerated.
761 */
762 if (vifi == r->rt_parent) {
763 if (addr == r->rt_gateway) {
764 logit(LOG_DEBUG, 0, "reset_neighbor_state parent reset (%s %s)",
765 inet_fmts(r->rt_origin, r->rt_originmask),
766 inet_fmt(g->gt_mcastgrp));
767
768 g->gt_prsent_timer = 0;
769 g->gt_grftsnt = 0;
770 while ((st = g->gt_srctbl) != NULL) {
771 g->gt_srctbl = st->st_next;
772 k_del_rg(st->st_origin, g);
773 kroutes--;
774 free(st);
775 }
776 }
777 } else {
778 /*
779 * Neighbor was not the parent, send grafts to join the groups
780 */
781 if (g->gt_prsent_timer) {
782 g->gt_grftsnt = 1;
783 send_graft(g);
784 g->gt_prsent_timer = 0;
785 }
786
787 /*
788 * Remove any prunes that this router has sent us.
789 */
790 ptnp = &g->gt_pruntbl;
791 while ((pt = *ptnp) != NULL) {
792 if (pt->pt_vifi == vifi && pt->pt_router == addr) {
793 *ptnp = pt->pt_next;
794 free(pt);
795 } else
796 ptnp = &pt->pt_next;
797 }
798
799 /*
800 * And see if we want to forward again.
801 */
802 if (!VIFM_ISSET(vifi, g->gt_grpmems)) {
803 if (VIFM_ISSET(vifi, r->rt_children) &&
804 !(VIFM_ISSET(vifi, r->rt_leaves)))
805 VIFM_SET(vifi, g->gt_grpmems);
806
807 if (VIFM_ISSET(vifi, r->rt_leaves) &&
808 grplst_mem(vifi, g->gt_mcastgrp))
809 VIFM_SET(vifi, g->gt_grpmems);
810
811 g->gt_grpmems &= ~g->gt_scope;
812 prun_add_ttls(g);
813
814 /* Update kernel state */
815 update_kernel(g);
816 #ifdef RSRR
817 /* Send route change notification to reservation protocol. */
818 rsrr_cache_send(g,1);
819 #endif /* RSRR */
820
821 logit(LOG_DEBUG, 0, "reset member state (%s %s) gm:%x",
822 inet_fmts(r->rt_origin, r->rt_originmask),
823 inet_fmt(g->gt_mcastgrp), g->gt_grpmems);
824 }
825 }
826 }
827 }
828
829 /*
830 * Delete table entry from the kernel
831 * del_flag determines how many entries to delete
832 */
833 void
834 del_table_entry(struct rtentry *r, u_int32_t mcastgrp, u_int del_flag)
835 {
836 struct gtable *g, *prev_g;
837 struct stable *st, *prev_st;
838 struct ptable *pt, *prev_pt;
839
840 if (del_flag == DEL_ALL_ROUTES) {
841 g = r->rt_groups;
842 while (g) {
843 logit(LOG_DEBUG, 0, "del_table_entry deleting (%s %s)",
844 inet_fmts(r->rt_origin, r->rt_originmask),
845 inet_fmt(g->gt_mcastgrp));
846 st = g->gt_srctbl;
847 while (st) {
848 if (k_del_rg(st->st_origin, g) < 0) {
849 logit(LOG_WARNING, errno,
850 "del_table_entry trying to delete (%s, %s)",
851 inet_fmt(st->st_origin),
852 inet_fmt(g->gt_mcastgrp));
853 }
854 kroutes--;
855 prev_st = st;
856 st = st->st_next;
857 free(prev_st);
858 }
859 g->gt_srctbl = NULL;
860
861 pt = g->gt_pruntbl;
862 while (pt) {
863 prev_pt = pt;
864 pt = pt->pt_next;
865 free(prev_pt);
866 }
867 g->gt_pruntbl = NULL;
868
869 if (g->gt_gnext)
870 g->gt_gnext->gt_gprev = g->gt_gprev;
871 if (g->gt_gprev)
872 g->gt_gprev->gt_gnext = g->gt_gnext;
873 else
874 kernel_table = g->gt_gnext;
875
876 #ifdef RSRR
877 /* Send route change notification to reservation protocol. */
878 rsrr_cache_send(g,0);
879 rsrr_cache_clean(g);
880 #endif /* RSRR */
881 prev_g = g;
882 g = g->gt_next;
883 free(prev_g);
884 }
885 r->rt_groups = NULL;
886 }
887
888 /*
889 * Dummy routine - someday this may be needed, so it is just there
890 */
891 if (del_flag == DEL_RTE_GROUP) {
892 prev_g = (struct gtable *)&r->rt_groups;
893 for (g = r->rt_groups; g; g = g->gt_next) {
894 if (g->gt_mcastgrp == mcastgrp) {
895 logit(LOG_DEBUG, 0, "del_table_entry deleting (%s %s)",
896 inet_fmts(r->rt_origin, r->rt_originmask),
897 inet_fmt(g->gt_mcastgrp));
898 st = g->gt_srctbl;
899 while (st) {
900 if (k_del_rg(st->st_origin, g) < 0) {
901 logit(LOG_WARNING, errno,
902 "del_table_entry trying to delete (%s, %s)",
903 inet_fmt(st->st_origin),
904 inet_fmt(g->gt_mcastgrp));
905 }
906 kroutes--;
907 prev_st = st;
908 st = st->st_next;
909 free(prev_st);
910 }
911 g->gt_srctbl = NULL;
912
913 pt = g->gt_pruntbl;
914 while (pt) {
915 prev_pt = pt;
916 pt = pt->pt_next;
917 free(prev_pt);
918 }
919 g->gt_pruntbl = NULL;
920
921 if (g->gt_gnext)
922 g->gt_gnext->gt_gprev = g->gt_gprev;
923 if (g->gt_gprev)
924 g->gt_gprev->gt_gnext = g->gt_gnext;
925 else
926 kernel_table = g->gt_gnext;
927
928 if (prev_g != (struct gtable *)&r->rt_groups)
929 g->gt_next->gt_prev = prev_g;
930 else
931 g->gt_next->gt_prev = NULL;
932 prev_g->gt_next = g->gt_next;
933
934 #ifdef RSRR
935 /* Send route change notification to reservation protocol. */
936 rsrr_cache_send(g,0);
937 rsrr_cache_clean(g);
938 #endif /* RSRR */
939 free(g);
940 g = prev_g;
941 } else {
942 prev_g = g;
943 }
944 }
945 }
946 }
947
948 /*
949 * update kernel table entry when a route entry changes
950 */
951 void
952 update_table_entry(struct rtentry *r)
953 {
954 struct gtable *g;
955 struct ptable *pt, *prev_pt;
956 vifi_t i;
957
958 for (g = r->rt_groups; g; g = g->gt_next) {
959 pt = g->gt_pruntbl;
960 while (pt) {
961 prev_pt = pt->pt_next;
962 free(pt);
963 pt = prev_pt;
964 }
965 g->gt_pruntbl = NULL;
966
967 g->gt_grpmems = 0;
968
969 /* obtain the multicast group membership list */
970 for (i = 0; i < numvifs; i++) {
971 if (VIFM_ISSET(i, r->rt_children) &&
972 !(VIFM_ISSET(i, r->rt_leaves)))
973 VIFM_SET(i, g->gt_grpmems);
974
975 if (VIFM_ISSET(i, r->rt_leaves) && grplst_mem(i, g->gt_mcastgrp))
976 VIFM_SET(i, g->gt_grpmems);
977 }
978 if (VIFM_ISSET(r->rt_parent, g->gt_scope))
979 g->gt_scope = -1;
980 g->gt_grpmems &= ~g->gt_scope;
981
982 logit(LOG_DEBUG, 0, "updating cache entries (%s %s) gm:%x",
983 inet_fmts(r->rt_origin, r->rt_originmask),
984 inet_fmt(g->gt_mcastgrp),
985 g->gt_grpmems);
986
987 if (g->gt_grpmems && g->gt_prsent_timer) {
988 g->gt_grftsnt = 1;
989 send_graft(g);
990 g->gt_prsent_timer = 0;
991 }
992
993 /* update ttls and add entry into kernel */
994 prun_add_ttls(g);
995 update_kernel(g);
996 #ifdef RSRR
997 /* Send route change notification to reservation protocol. */
998 rsrr_cache_send(g,1);
999 #endif /* RSRR */
1000
1001 /* Check if we want to prune this group */
1002 if (!g->gt_prsent_timer && g->gt_grpmems == 0 && r->rt_gateway) {
1003 g->gt_timer = CACHE_LIFETIME(cache_lifetime);
1004 send_prune(g);
1005 }
1006 }
1007 }
1008
1009 /*
1010 * set the forwarding flag for all mcastgrps on this vifi
1011 */
1012 void
1013 update_lclgrp(vifi_t vifi, u_int32_t mcastgrp)
1014 {
1015 struct rtentry *r;
1016 struct gtable *g;
1017
1018 logit(LOG_DEBUG, 0, "group %s joined on vif %d",
1019 inet_fmt(mcastgrp), vifi);
1020
1021 for (g = kernel_table; g; g = g->gt_gnext) {
1022 if (ntohl(mcastgrp) < ntohl(g->gt_mcastgrp))
1023 break;
1024
1025 r = g->gt_route;
1026 if (g->gt_mcastgrp == mcastgrp &&
1027 VIFM_ISSET(vifi, r->rt_children)) {
1028
1029 VIFM_SET(vifi, g->gt_grpmems);
1030 g->gt_grpmems &= ~g->gt_scope;
1031 if (g->gt_grpmems == 0)
1032 continue;
1033
1034 prun_add_ttls(g);
1035 logit(LOG_DEBUG, 0, "update lclgrp (%s %s) gm:%x",
1036 inet_fmts(r->rt_origin, r->rt_originmask),
1037 inet_fmt(g->gt_mcastgrp), g->gt_grpmems);
1038
1039 update_kernel(g);
1040 #ifdef RSRR
1041 /* Send route change notification to reservation protocol. */
1042 rsrr_cache_send(g,1);
1043 #endif /* RSRR */
1044 }
1045 }
1046 }
1047
1048 /*
1049 * reset forwarding flag for all mcastgrps on this vifi
1050 */
1051 void
1052 delete_lclgrp(vifi_t vifi, u_int32_t mcastgrp)
1053 {
1054 struct rtentry *r;
1055 struct gtable *g;
1056
1057 logit(LOG_DEBUG, 0, "group %s left on vif %d",
1058 inet_fmt(mcastgrp), vifi);
1059
1060 for (g = kernel_table; g; g = g->gt_gnext) {
1061 if (ntohl(mcastgrp) < ntohl(g->gt_mcastgrp))
1062 break;
1063
1064 if (g->gt_mcastgrp == mcastgrp) {
1065 int stop_sending = 1;
1066
1067 r = g->gt_route;
1068 /*
1069 * If this is not a leaf, then we have router neighbors on this
1070 * vif. Only turn off forwarding if they have all pruned.
1071 */
1072 if (!VIFM_ISSET(vifi, r->rt_leaves)) {
1073 struct listaddr *vr;
1074
1075 for (vr = uvifs[vifi].uv_neighbors; vr; vr = vr->al_next)
1076 if (find_prune_entry(vr->al_addr, g->gt_pruntbl) == NULL) {
1077 stop_sending = 0;
1078 break;
1079 }
1080 }
1081
1082 if (stop_sending) {
1083 VIFM_CLR(vifi, g->gt_grpmems);
1084 logit(LOG_DEBUG, 0, "delete lclgrp (%s %s) gm:%x",
1085 inet_fmts(r->rt_origin, r->rt_originmask),
1086 inet_fmt(g->gt_mcastgrp), g->gt_grpmems);
1087
1088 prun_add_ttls(g);
1089 update_kernel(g);
1090 #ifdef RSRR
1091 /* Send route change notification to reservation protocol. */
1092 rsrr_cache_send(g,1);
1093 #endif /* RSRR */
1094
1095 /*
1096 * If there are no more members of this particular group,
1097 * send prune upstream
1098 */
1099 if (!g->gt_prsent_timer && g->gt_grpmems == 0 && r->rt_gateway)
1100 send_prune(g);
1101 }
1102 }
1103 }
1104 }
1105
1106 /*
1107 * Takes the prune message received and then strips it to
1108 * determine the (src, grp) pair to be pruned.
1109 *
1110 * Adds the router to the (src, grp) entry then.
1111 *
1112 * Determines if further packets have to be sent down that vif
1113 *
1114 * Determines if a corresponding prune message has to be generated
1115 */
1116 void
1117 accept_prune(u_int32_t src, u_int32_t dst, char *p, int datalen)
1118 {
1119 u_int32_t prun_src;
1120 u_int32_t prun_grp;
1121 u_int32_t prun_tmr;
1122 vifi_t vifi;
1123 int i;
1124 int stop_sending;
1125 struct rtentry *r;
1126 struct gtable *g;
1127 struct ptable *pt;
1128 struct listaddr *vr;
1129
1130 /* Don't process any prunes if router is not pruning */
1131 if (pruning == 0)
1132 return;
1133
1134 if ((vifi = find_vif(src, dst)) == NO_VIF) {
1135 logit(LOG_INFO, 0,
1136 "ignoring prune report from non-neighbor %s",
1137 inet_fmt(src));
1138 return;
1139 }
1140
1141 /* Check if enough data is present */
1142 if (datalen < 12)
1143 {
1144 logit(LOG_WARNING, 0,
1145 "non-decipherable prune from %s",
1146 inet_fmt(src));
1147 return;
1148 }
1149
1150 for (i = 0; i< 4; i++)
1151 ((char *)&prun_src)[i] = *p++;
1152 for (i = 0; i< 4; i++)
1153 ((char *)&prun_grp)[i] = *p++;
1154 for (i = 0; i< 4; i++)
1155 ((char *)&prun_tmr)[i] = *p++;
1156 prun_tmr = ntohl(prun_tmr);
1157
1158 logit(LOG_DEBUG, 0, "%s on vif %d prunes (%s %s)/%d",
1159 inet_fmt(src), vifi,
1160 inet_fmt(prun_src), inet_fmt(prun_grp), prun_tmr);
1161
1162 /*
1163 * Find the subnet for the prune
1164 */
1165 if (find_src_grp(prun_src, 0, prun_grp)) {
1166 g = gtp ? gtp->gt_gnext : kernel_table;
1167 r = g->gt_route;
1168
1169 if (!VIFM_ISSET(vifi, r->rt_children)) {
1170 logit(LOG_WARNING, 0, "prune received from non-child %s for (%s %s)",
1171 inet_fmt(src), inet_fmt(prun_src),
1172 inet_fmt(prun_grp));
1173 return;
1174 }
1175 if (VIFM_ISSET(vifi, g->gt_scope)) {
1176 logit(LOG_WARNING, 0, "prune received from %s on scoped grp (%s %s)",
1177 inet_fmt(src), inet_fmt(prun_src),
1178 inet_fmt(prun_grp));
1179 return;
1180 }
1181 if ((pt = find_prune_entry(src, g->gt_pruntbl)) != NULL) {
1182 /*
1183 * If it's about to expire, then it's only still around because
1184 * of timer granularity, so don't warn about it.
1185 */
1186 if (pt->pt_timer > 10) {
1187 logit(LOG_WARNING, 0, "%s %d from %s for (%s %s)/%d %s %d %s %x",
1188 "duplicate prune received on vif",
1189 vifi, inet_fmt(src), inet_fmt(prun_src),
1190 inet_fmt(prun_grp), prun_tmr,
1191 "old timer:", pt->pt_timer, "cur gm:", g->gt_grpmems);
1192 }
1193 pt->pt_timer = prun_tmr;
1194 } else {
1195 /* allocate space for the prune structure */
1196 pt = (struct ptable *)(malloc(sizeof(struct ptable)));
1197 if (pt == NULL)
1198 logit(LOG_ERR, 0, "pt: ran out of memory");
1199
1200 pt->pt_vifi = vifi;
1201 pt->pt_router = src;
1202 pt->pt_timer = prun_tmr;
1203
1204 pt->pt_next = g->gt_pruntbl;
1205 g->gt_pruntbl = pt;
1206 }
1207
1208 /* Refresh the group's lifetime */
1209 g->gt_timer = CACHE_LIFETIME(cache_lifetime);
1210 if (g->gt_timer < prun_tmr)
1211 g->gt_timer = prun_tmr;
1212
1213 /*
1214 * check if any more packets need to be sent on the
1215 * vif which sent this message
1216 */
1217 stop_sending = 1;
1218 for (vr = uvifs[vifi].uv_neighbors; vr; vr = vr->al_next)
1219 if (find_prune_entry(vr->al_addr, g->gt_pruntbl) == NULL) {
1220 stop_sending = 0;
1221 break;
1222 }
1223
1224 if (stop_sending && !grplst_mem(vifi, prun_grp)) {
1225 VIFM_CLR(vifi, g->gt_grpmems);
1226 logit(LOG_DEBUG, 0, "prune (%s %s), stop sending on vif %d, gm:%x",
1227 inet_fmts(r->rt_origin, r->rt_originmask),
1228 inet_fmt(g->gt_mcastgrp), vifi, g->gt_grpmems);
1229
1230 prun_add_ttls(g);
1231 update_kernel(g);
1232 #ifdef RSRR
1233 /* Send route change notification to reservation protocol. */
1234 rsrr_cache_send(g,1);
1235 #endif /* RSRR */
1236 }
1237
1238 /*
1239 * check if all the child routers have expressed no interest
1240 * in this group and if this group does not exist in the
1241 * interface
1242 * Send a prune message then upstream
1243 */
1244 if (!g->gt_prsent_timer && g->gt_grpmems == 0 && r->rt_gateway) {
1245 send_prune(g);
1246 }
1247 } else {
1248 /*
1249 * There is no kernel entry for this group. Therefore, we can
1250 * simply ignore the prune, as we are not forwarding this traffic
1251 * downstream.
1252 */
1253 logit(LOG_DEBUG, 0, "%s (%s %s)/%d from %s",
1254 "prune message received with no kernel entry for",
1255 inet_fmt(prun_src), inet_fmt(prun_grp),
1256 prun_tmr, inet_fmt(src));
1257 return;
1258 }
1259 }
1260
1261 /*
1262 * Checks if this mcastgrp is present in the kernel table
1263 * If so and if a prune was sent, it sends a graft upwards
1264 */
1265 void
1266 chkgrp_graft(vifi_t vifi, u_int32_t mcastgrp)
1267 {
1268 struct rtentry *r;
1269 struct gtable *g;
1270
1271 for (g = kernel_table; g; g = g->gt_gnext) {
1272 if (ntohl(mcastgrp) < ntohl(g->gt_mcastgrp))
1273 break;
1274
1275 r = g->gt_route;
1276 if (g->gt_mcastgrp == mcastgrp && VIFM_ISSET(vifi, r->rt_children))
1277 if (g->gt_prsent_timer) {
1278 VIFM_SET(vifi, g->gt_grpmems);
1279
1280 /*
1281 * If the vif that was joined was a scoped vif,
1282 * ignore it ; don't graft back
1283 */
1284 g->gt_grpmems &= ~g->gt_scope;
1285 if (g->gt_grpmems == 0)
1286 continue;
1287
1288 /* set the flag for graft retransmission */
1289 g->gt_grftsnt = 1;
1290
1291 /* send graft upwards */
1292 send_graft(g);
1293
1294 /* reset the prune timer and update cache timer*/
1295 g->gt_prsent_timer = 0;
1296 g->gt_timer = max_prune_lifetime;
1297
1298 logit(LOG_DEBUG, 0, "chkgrp graft (%s %s) gm:%x",
1299 inet_fmts(r->rt_origin, r->rt_originmask),
1300 inet_fmt(g->gt_mcastgrp), g->gt_grpmems);
1301
1302 prun_add_ttls(g);
1303 update_kernel(g);
1304 #ifdef RSRR
1305 /* Send route change notification to reservation protocol. */
1306 rsrr_cache_send(g,1);
1307 #endif /* RSRR */
1308 }
1309 }
1310 }
1311
1312 /* determine the multicast group and src
1313 *
1314 * if it does, then determine if a prune was sent
1315 * upstream.
1316 * if prune sent upstream, send graft upstream and send
1317 * ack downstream.
1318 *
1319 * if no prune sent upstream, change the forwarding bit
1320 * for this interface and send ack downstream.
1321 *
1322 * if no entry exists for this group send ack downstream.
1323 */
1324 void
1325 accept_graft(u_int32_t src, u_int32_t dst, char *p, int datalen)
1326 {
1327 vifi_t vifi;
1328 u_int32_t graft_src;
1329 u_int32_t graft_grp;
1330 int i;
1331 struct rtentry *r;
1332 struct gtable *g;
1333 struct ptable *pt, **ptnp;
1334
1335 if ((vifi = find_vif(src, dst)) == NO_VIF) {
1336 logit(LOG_INFO, 0,
1337 "ignoring graft from non-neighbor %s",
1338 inet_fmt(src));
1339 return;
1340 }
1341
1342 if (datalen < 8) {
1343 logit(LOG_WARNING, 0,
1344 "received non-decipherable graft from %s",
1345 inet_fmt(src));
1346 return;
1347 }
1348
1349 for (i = 0; i< 4; i++)
1350 ((char *)&graft_src)[i] = *p++;
1351 for (i = 0; i< 4; i++)
1352 ((char *)&graft_grp)[i] = *p++;
1353
1354 logit(LOG_DEBUG, 0, "%s on vif %d grafts (%s %s)",
1355 inet_fmt(src), vifi,
1356 inet_fmt(graft_src), inet_fmt(graft_grp));
1357
1358 /*
1359 * Find the subnet for the graft
1360 */
1361 if (find_src_grp(graft_src, 0, graft_grp)) {
1362 g = gtp ? gtp->gt_gnext : kernel_table;
1363 r = g->gt_route;
1364
1365 if (VIFM_ISSET(vifi, g->gt_scope)) {
1366 logit(LOG_WARNING, 0, "graft received from %s on scoped grp (%s %s)",
1367 inet_fmt(src), inet_fmt(graft_src),
1368 inet_fmt(graft_grp));
1369 return;
1370 }
1371
1372 ptnp = &g->gt_pruntbl;
1373 while ((pt = *ptnp) != NULL) {
1374 if ((pt->pt_vifi == vifi) && (pt->pt_router == src)) {
1375 *ptnp = pt->pt_next;
1376 free(pt);
1377
1378 VIFM_SET(vifi, g->gt_grpmems);
1379 logit(LOG_DEBUG, 0, "accept graft (%s %s) gm:%x",
1380 inet_fmts(r->rt_origin, r->rt_originmask),
1381 inet_fmt(g->gt_mcastgrp), g->gt_grpmems);
1382
1383 prun_add_ttls(g);
1384 update_kernel(g);
1385 #ifdef RSRR
1386 /* Send route change notification to reservation protocol. */
1387 rsrr_cache_send(g,1);
1388 #endif /* RSRR */
1389 break;
1390 } else {
1391 ptnp = &pt->pt_next;
1392 }
1393 }
1394
1395 /* send ack downstream */
1396 send_graft_ack(dst, src, graft_src, graft_grp);
1397 g->gt_timer = max_prune_lifetime;
1398
1399 if (g->gt_prsent_timer) {
1400 /* set the flag for graft retransmission */
1401 g->gt_grftsnt = 1;
1402
1403 /* send graft upwards */
1404 send_graft(g);
1405
1406 /* reset the prune sent timer */
1407 g->gt_prsent_timer = 0;
1408 }
1409 } else {
1410 /*
1411 * We have no state for the source and group in question.
1412 * We can simply acknowledge the graft, since we know
1413 * that we have no prune state, and grafts are requests
1414 * to remove prune state.
1415 */
1416 send_graft_ack(dst, src, graft_src, graft_grp);
1417 logit(LOG_DEBUG, 0, "%s (%s %s) from %s",
1418 "graft received with no kernel entry for",
1419 inet_fmt(graft_src), inet_fmt(graft_grp),
1420 inet_fmt(src));
1421 return;
1422 }
1423 }
1424
1425 /*
1426 * find out which group is involved first of all
1427 * then determine if a graft was sent.
1428 * if no graft sent, ignore the message
1429 * if graft was sent and the ack is from the right
1430 * source, remove the graft timer so that we don't
1431 * have send a graft again
1432 */
1433 void
1434 accept_g_ack(u_int32_t src, u_int32_t dst, char *p, int datalen)
1435 {
1436 struct gtable *g;
1437 vifi_t vifi;
1438 u_int32_t grft_src;
1439 u_int32_t grft_grp;
1440 int i;
1441
1442 if ((vifi = find_vif(src, dst)) == NO_VIF) {
1443 logit(LOG_INFO, 0,
1444 "ignoring graft ack from non-neighbor %s",
1445 inet_fmt(src));
1446 return;
1447 }
1448
1449 if (datalen < 0 || datalen > 8) {
1450 logit(LOG_WARNING, 0,
1451 "received non-decipherable graft ack from %s",
1452 inet_fmt(src));
1453 return;
1454 }
1455
1456 for (i = 0; i< 4; i++)
1457 ((char *)&grft_src)[i] = *p++;
1458 for (i = 0; i< 4; i++)
1459 ((char *)&grft_grp)[i] = *p++;
1460
1461 logit(LOG_DEBUG, 0, "%s on vif %d acks graft (%s, %s)",
1462 inet_fmt(src), vifi,
1463 inet_fmt(grft_src), inet_fmt(grft_grp));
1464
1465 /*
1466 * Find the subnet for the graft ack
1467 */
1468 if (find_src_grp(grft_src, 0, grft_grp)) {
1469 g = gtp ? gtp->gt_gnext : kernel_table;
1470 g->gt_grftsnt = 0;
1471 } else {
1472 logit(LOG_WARNING, 0, "%s (%s, %s) from %s",
1473 "rcvd graft ack with no kernel entry for",
1474 inet_fmt(grft_src), inet_fmt(grft_grp),
1475 inet_fmt(src));
1476 return;
1477 }
1478 }
1479
1480
1481 /*
1482 * free all prune entries and kernel routes
1483 * normally, this should inform the kernel that all of its routes
1484 * are going away, but this is only called by restart(), which is
1485 * about to call MRT_DONE which does that anyway.
1486 */
1487 void
1488 free_all_prunes(void)
1489 {
1490 struct rtentry *r;
1491 struct gtable *g, *prev_g;
1492 struct stable *s, *prev_s;
1493 struct ptable *p, *prev_p;
1494
1495 for (r = routing_table; r; r = r->rt_next) {
1496 g = r->rt_groups;
1497 while (g) {
1498 s = g->gt_srctbl;
1499 while (s) {
1500 prev_s = s;
1501 s = s->st_next;
1502 free(prev_s);
1503 }
1504
1505 p = g->gt_pruntbl;
1506 while (p) {
1507 prev_p = p;
1508 p = p->pt_next;
1509 free(prev_p);
1510 }
1511
1512 prev_g = g;
1513 g = g->gt_next;
1514 free(prev_g);
1515 }
1516 r->rt_groups = NULL;
1517 }
1518 kernel_table = NULL;
1519
1520 g = kernel_no_route;
1521 while (g) {
1522 if (g->gt_srctbl)
1523 free(g->gt_srctbl);
1524
1525 prev_g = g;
1526 g = g->gt_next;
1527 free(prev_g);
1528 }
1529 kernel_no_route = NULL;
1530 }
1531
1532 /*
1533 * When a new route is created, search
1534 * a) The less-specific part of the routing table
1535 * b) The route-less kernel table
1536 * for sources that the new route might want to handle.
1537 *
1538 * "Inheriting" these sources might be cleanest, but simply deleting
1539 * them is easier, and letting the kernel re-request them.
1540 */
1541 void
1542 steal_sources(struct rtentry *rt)
1543 {
1544 struct rtentry *rp;
1545 struct gtable *gt, **gtnp;
1546 struct stable *st, **stnp;
1547
1548 for (rp = rt->rt_next; rp; rp = rp->rt_next) {
1549 if ((rt->rt_origin & rp->rt_originmask) == rp->rt_origin) {
1550 logit(LOG_DEBUG, 0, "Route for %s stealing sources from %s",
1551 inet_fmts(rt->rt_origin, rt->rt_originmask),
1552 inet_fmts(rp->rt_origin, rp->rt_originmask));
1553 for (gt = rp->rt_groups; gt; gt = gt->gt_next) {
1554 stnp = >->gt_srctbl;
1555 while ((st = *stnp) != NULL) {
1556 if ((st->st_origin & rt->rt_originmask) == rt->rt_origin) {
1557 logit(LOG_DEBUG, 0, "%s stealing (%s %s) from %s",
1558 inet_fmts(rt->rt_origin, rt->rt_originmask),
1559 inet_fmt(st->st_origin),
1560 inet_fmt(gt->gt_mcastgrp),
1561 inet_fmts(rp->rt_origin, rp->rt_originmask));
1562 if (k_del_rg(st->st_origin, gt) < 0) {
1563 logit(LOG_WARNING, errno, "%s (%s, %s)",
1564 "steal_sources trying to delete",
1565 inet_fmt(st->st_origin),
1566 inet_fmt(gt->gt_mcastgrp));
1567 }
1568 *stnp = st->st_next;
1569 kroutes--;
1570 free(st);
1571 } else {
1572 stnp = &st->st_next;
1573 }
1574 }
1575 }
1576 }
1577 }
1578
1579 gtnp = &kernel_no_route;
1580 while ((gt = *gtnp) != NULL) {
1581 if (gt->gt_srctbl && ((gt->gt_srctbl->st_origin & rt->rt_originmask)
1582 == rt->rt_origin)) {
1583 logit(LOG_DEBUG, 0, "%s stealing (%s %s) from %s",
1584 inet_fmts(rt->rt_origin, rt->rt_originmask),
1585 inet_fmt(gt->gt_srctbl->st_origin),
1586 inet_fmt(gt->gt_mcastgrp),
1587 "no_route table");
1588 if (k_del_rg(gt->gt_srctbl->st_origin, gt) < 0) {
1589 logit(LOG_WARNING, errno, "%s (%s %s)",
1590 "steal_sources trying to delete",
1591 inet_fmt(gt->gt_srctbl->st_origin),
1592 inet_fmt(gt->gt_mcastgrp));
1593 }
1594 kroutes--;
1595 free(gt->gt_srctbl);
1596 *gtnp = gt->gt_next;
1597 if (gt->gt_next)
1598 gt->gt_next->gt_prev = gt->gt_prev;
1599 free(gt);
1600 } else {
1601 gtnp = >->gt_next;
1602 }
1603 }
1604 }
1605
1606 /*
1607 * Advance the timers on all the cache entries.
1608 * If there are any entries whose timers have expired,
1609 * remove these entries from the kernel cache.
1610 */
1611 void
1612 age_table_entry(void)
1613 {
1614 struct rtentry *r;
1615 struct gtable *gt, **gtnptr;
1616 struct stable *st, **stnp;
1617 struct ptable *pt, **ptnp;
1618 struct sioc_sg_req sg_req;
1619
1620 logit(LOG_DEBUG, 0, "ageing entries");
1621
1622 gtnptr = &kernel_table;
1623 while ((gt = *gtnptr) != NULL) {
1624 r = gt->gt_route;
1625
1626 /* advance the timer for the kernel entry */
1627 gt->gt_timer -= ROUTE_MAX_REPORT_DELAY;
1628
1629 /* decrement prune timer if need be */
1630 if (gt->gt_prsent_timer > 0) {
1631 gt->gt_prsent_timer -= ROUTE_MAX_REPORT_DELAY;
1632 if (gt->gt_prsent_timer <= 0) {
1633 logit(LOG_DEBUG, 0, "upstream prune tmo (%s %s)",
1634 inet_fmts(r->rt_origin, r->rt_originmask),
1635 inet_fmt(gt->gt_mcastgrp));
1636 gt->gt_prsent_timer = -1;
1637 }
1638 }
1639
1640 /* retransmit graft if graft sent flag is still set */
1641 if (gt->gt_grftsnt) {
1642 int y;
1643 CHK_GS(gt->gt_grftsnt++, y);
1644 if (y)
1645 send_graft(gt);
1646 }
1647
1648 /*
1649 * Age prunes
1650 *
1651 * If a prune expires, forward again on that vif.
1652 */
1653 ptnp = >->gt_pruntbl;
1654 while ((pt = *ptnp) != NULL) {
1655 if ((pt->pt_timer -= ROUTE_MAX_REPORT_DELAY) <= 0) {
1656 logit(LOG_DEBUG, 0, "expire prune (%s %s) from %s on vif %d",
1657 inet_fmts(r->rt_origin, r->rt_originmask),
1658 inet_fmt(gt->gt_mcastgrp),
1659 inet_fmt(pt->pt_router),
1660 pt->pt_vifi);
1661
1662 expire_prune(pt->pt_vifi, gt);
1663
1664 /* remove the router's prune entry and await new one */
1665 *ptnp = pt->pt_next;
1666 free(pt);
1667 } else {
1668 ptnp = &pt->pt_next;
1669 }
1670 }
1671
1672 /*
1673 * If the cache entry has expired, delete source table entries for
1674 * silent sources. If there are no source entries left, and there
1675 * are no downstream prunes, then the entry is deleted.
1676 * Otherwise, the cache entry's timer is refreshed.
1677 */
1678 if (gt->gt_timer <= 0) {
1679 /* Check for traffic before deleting source entries */
1680 sg_req.grp.s_addr = gt->gt_mcastgrp;
1681 stnp = >->gt_srctbl;
1682 while ((st = *stnp) != NULL) {
1683 sg_req.src.s_addr = st->st_origin;
1684 if (ioctl(igmp_socket, SIOCGETSGCNT, (char *)&sg_req) < 0) {
1685 logit(LOG_WARNING, errno, "%s (%s %s)",
1686 "age_table_entry: SIOCGETSGCNT failing for",
1687 inet_fmt(st->st_origin),
1688 inet_fmt(gt->gt_mcastgrp));
1689 /* Make sure it gets deleted below */
1690 sg_req.pktcnt = st->st_pktcnt;
1691 }
1692 if (sg_req.pktcnt == st->st_pktcnt) {
1693 *stnp = st->st_next;
1694 logit(LOG_DEBUG, 0, "age_table_entry deleting (%s %s)",
1695 inet_fmt(st->st_origin),
1696 inet_fmt(gt->gt_mcastgrp));
1697 if (k_del_rg(st->st_origin, gt) < 0) {
1698 logit(LOG_WARNING, errno,
1699 "age_table_entry trying to delete (%s %s)",
1700 inet_fmt(st->st_origin),
1701 inet_fmt(gt->gt_mcastgrp));
1702 }
1703 kroutes--;
1704 free(st);
1705 } else {
1706 st->st_pktcnt = sg_req.pktcnt;
1707 stnp = &st->st_next;
1708 }
1709 }
1710
1711 /*
1712 * Retain the group entry if we have downstream prunes or if
1713 * there is at least one source in the list that still has
1714 * traffic, or if our upstream prune timer is running.
1715 */
1716 if (gt->gt_pruntbl != NULL || gt->gt_srctbl != NULL ||
1717 gt->gt_prsent_timer > 0) {
1718 gt->gt_timer = CACHE_LIFETIME(cache_lifetime);
1719 if (gt->gt_prsent_timer == -1) {
1720 if (gt->gt_grpmems == 0)
1721 send_prune(gt);
1722 else
1723 gt->gt_prsent_timer = 0;
1724 }
1725 gtnptr = >->gt_gnext;
1726 continue;
1727 }
1728
1729 logit(LOG_DEBUG, 0, "timeout cache entry (%s, %s)",
1730 inet_fmts(r->rt_origin, r->rt_originmask),
1731 inet_fmt(gt->gt_mcastgrp));
1732
1733 if (gt->gt_prev)
1734 gt->gt_prev->gt_next = gt->gt_next;
1735 else
1736 gt->gt_route->rt_groups = gt->gt_next;
1737 if (gt->gt_next)
1738 gt->gt_next->gt_prev = gt->gt_prev;
1739
1740 if (gt->gt_gprev) {
1741 gt->gt_gprev->gt_gnext = gt->gt_gnext;
1742 gtnptr = >->gt_gprev->gt_gnext;
1743 } else {
1744 kernel_table = gt->gt_gnext;
1745 gtnptr = &kernel_table;
1746 }
1747 if (gt->gt_gnext)
1748 gt->gt_gnext->gt_gprev = gt->gt_gprev;
1749
1750 #ifdef RSRR
1751 /* Send route change notification to reservation protocol. */
1752 rsrr_cache_send(gt,0);
1753 rsrr_cache_clean(gt);
1754 #endif /* RSRR */
1755 free((char *)gt);
1756 } else {
1757 if (gt->gt_prsent_timer == -1) {
1758 if (gt->gt_grpmems == 0)
1759 send_prune(gt);
1760 else
1761 gt->gt_prsent_timer = 0;
1762 }
1763 gtnptr = >->gt_gnext;
1764 }
1765 }
1766
1767 /*
1768 * When traversing the no_route table, the decision is much easier.
1769 * Just delete it if it has timed out.
1770 */
1771 gtnptr = &kernel_no_route;
1772 while ((gt = *gtnptr) != NULL) {
1773 /* advance the timer for the kernel entry */
1774 gt->gt_timer -= ROUTE_MAX_REPORT_DELAY;
1775
1776 if (gt->gt_timer < 0) {
1777 if (gt->gt_srctbl) {
1778 if (k_del_rg(gt->gt_srctbl->st_origin, gt) < 0) {
1779 logit(LOG_WARNING, errno, "%s (%s %s)",
1780 "age_table_entry trying to delete no-route",
1781 inet_fmt(gt->gt_srctbl->st_origin),
1782 inet_fmt(gt->gt_mcastgrp));
1783 }
1784 free(gt->gt_srctbl);
1785 }
1786 *gtnptr = gt->gt_next;
1787 if (gt->gt_next)
1788 gt->gt_next->gt_prev = gt->gt_prev;
1789
1790 free((char *)gt);
1791 } else {
1792 gtnptr = >->gt_next;
1793 }
1794 }
1795 }
1796
1797 /*
1798 * Modify the kernel to forward packets when one or multiple prunes that
1799 * were received on the vif given by vifi, for the group given by gt,
1800 * have expired.
1801 */
1802 static void
1803 expire_prune(vifi_t vifi, struct gtable *gt)
1804 {
1805 /*
1806 * No need to send a graft, any prunes that we sent
1807 * will expire before any prunes that we have received.
1808 */
1809 if (gt->gt_prsent_timer > 0) {
1810 logit(LOG_DEBUG, 0, "prune expired with %d left on %s",
1811 gt->gt_prsent_timer, "prsent_timer");
1812 gt->gt_prsent_timer = 0;
1813 }
1814
1815 /* modify the kernel entry to forward packets */
1816 if (!VIFM_ISSET(vifi, gt->gt_grpmems)) {
1817 struct rtentry *rt = gt->gt_route;
1818 VIFM_SET(vifi, gt->gt_grpmems);
1819 logit(LOG_DEBUG, 0, "forw again (%s %s) gm:%x vif:%d",
1820 inet_fmts(rt->rt_origin, rt->rt_originmask),
1821 inet_fmt(gt->gt_mcastgrp), gt->gt_grpmems, vifi);
1822
1823 prun_add_ttls(gt);
1824 update_kernel(gt);
1825 #ifdef RSRR
1826 /* Send route change notification to reservation protocol. */
1827 rsrr_cache_send(gt,1);
1828 #endif /* RSRR */
1829 }
1830 }
1831
1832
1833 static char *
1834 scaletime(u_long t)
1835 {
1836 static char buf1[5];
1837 static char buf2[5];
1838 static char *buf=buf1;
1839 char s;
1840 char *p;
1841
1842 p = buf;
1843 if (buf == buf1)
1844 buf = buf2;
1845 else
1846 buf = buf1;
1847
1848 if (t < 120) {
1849 s = 's';
1850 } else if (t < 3600) {
1851 t /= 60;
1852 s = 'm';
1853 } else if (t < 86400) {
1854 t /= 3600;
1855 s = 'h';
1856 } else if (t < 864000) {
1857 t /= 86400;
1858 s = 'd';
1859 } else {
1860 t /= 604800;
1861 s = 'w';
1862 }
1863 if (t > 999)
1864 return "*** ";
1865
1866 snprintf(p, 5, "%3d%c", (int)t, s);
1867
1868 return p;
1869 }
1870
1871 /*
1872 * Print the contents of the cache table on file 'fp2'.
1873 */
1874 void
1875 dump_cache(FILE *fp2)
1876 {
1877 struct rtentry *r;
1878 struct gtable *gt;
1879 struct stable *st;
1880 vifi_t i;
1881 time_t thyme = time(0);
1882
1883 fprintf(fp2,
1884 "Multicast Routing Cache Table (%d entries)\n%s", kroutes,
1885 " Origin Mcast-group CTmr Age Ptmr IVif Forwvifs\n");
1886
1887 for (gt = kernel_no_route; gt; gt = gt->gt_next) {
1888 if (gt->gt_srctbl) {
1889 fprintf(fp2, " %-18s %-15s %-4s %-4s - -1\n",
1890 inet_fmts(gt->gt_srctbl->st_origin, 0xffffffff),
1891 inet_fmt(gt->gt_mcastgrp), scaletime(gt->gt_timer),
1892 scaletime(thyme - gt->gt_ctime));
1893 fprintf(fp2, ">%s\n", inet_fmt(gt->gt_srctbl->st_origin));
1894 }
1895 }
1896
1897 for (gt = kernel_table; gt; gt = gt->gt_gnext) {
1898 r = gt->gt_route;
1899 fprintf(fp2, " %-18s %-15s",
1900 inet_fmts(r->rt_origin, r->rt_originmask),
1901 inet_fmt(gt->gt_mcastgrp));
1902
1903 fprintf(fp2, " %-4s", scaletime(gt->gt_timer));
1904
1905 fprintf(fp2, " %-4s %-4s ", scaletime(thyme - gt->gt_ctime),
1906 gt->gt_prsent_timer ? scaletime(gt->gt_prsent_timer) :
1907 " -");
1908
1909 fprintf(fp2, "%2u%c%c ", r->rt_parent,
1910 gt->gt_prsent_timer ? 'P' : ' ',
1911 VIFM_ISSET(r->rt_parent, gt->gt_scope) ? 'B' : ' ');
1912
1913 for (i = 0; i < numvifs; ++i) {
1914 if (VIFM_ISSET(i, gt->gt_grpmems))
1915 fprintf(fp2, " %u ", i);
1916 else if (VIFM_ISSET(i, r->rt_children) &&
1917 !VIFM_ISSET(i, r->rt_leaves))
1918 fprintf(fp2, " %u%c", i,
1919 VIFM_ISSET(i, gt->gt_scope) ? 'b' : 'p');
1920 }
1921 fprintf(fp2, "\n");
1922 for (st = gt->gt_srctbl; st; st = st->st_next) {
1923 fprintf(fp2, ">%s\n", inet_fmt(st->st_origin));
1924 }
1925 #ifdef DEBUG_PRUNES
1926 for (pt = gt->gt_pruntbl; pt; pt = pt->pt_next) {
1927 fprintf(fp2, "<r:%s v:%d t:%d\n", inet_fmt(pt->pt_router),
1928 pt->pt_vifi, pt->pt_timer);
1929 }
1930 #endif
1931 }
1932 }
1933
1934 /*
1935 * Traceroute function which returns traceroute replies to the requesting
1936 * router. Also forwards the request to downstream routers.
1937 *
1938 * no: promoted u_char
1939 */
1940 void
1941 accept_mtrace(u_int32_t src, u_int32_t dst, u_int32_t group, char *data,
1942 u_int no, int datalen)
1943 {
1944 u_char type;
1945 struct rtentry *rt;
1946 struct gtable *gt;
1947 struct tr_query *qry;
1948 struct tr_resp *resp;
1949 int vifi;
1950 char *p;
1951 int rcount;
1952 int errcode = TR_NO_ERR;
1953 int resptype;
1954 struct timeval tp;
1955 struct sioc_vif_req v_req;
1956 struct sioc_sg_req sg_req;
1957
1958 /* Remember qid across invocations */
1959 static u_int32_t oqid = 0;
1960
1961 /* timestamp the request/response */
1962 gettimeofday(&tp, 0);
1963
1964 /*
1965 * Check if it is a query or a response
1966 */
1967 if (datalen == QLEN) {
1968 type = QUERY;
1969 logit(LOG_DEBUG, 0, "Initial traceroute query rcvd from %s to %s",
1970 inet_fmt(src), inet_fmt(dst));
1971 }
1972 else if ((datalen - QLEN) % RLEN == 0) {
1973 type = RESP;
1974 logit(LOG_DEBUG, 0, "In-transit traceroute query rcvd from %s to %s",
1975 inet_fmt(src), inet_fmt(dst));
1976 if (IN_MULTICAST(ntohl(dst))) {
1977 logit(LOG_DEBUG, 0, "Dropping multicast response");
1978 return;
1979 }
1980 }
1981 else {
1982 logit(LOG_WARNING, 0, "%s from %s to %s",
1983 "Non decipherable traceroute request received",
1984 inet_fmt(src), inet_fmt(dst));
1985 return;
1986 }
1987
1988 qry = (struct tr_query *)data;
1989
1990 /*
1991 * if it is a packet with all reports filled, drop it
1992 */
1993 if ((rcount = (datalen - QLEN)/RLEN) == no) {
1994 logit(LOG_DEBUG, 0, "packet with all reports filled in");
1995 return;
1996 }
1997
1998 logit(LOG_DEBUG, 0, "s: %s g: %s d: %s ",
1999 inet_fmt(qry->tr_src),
2000 inet_fmt(group),
2001 inet_fmt(qry->tr_dst));
2002 logit(LOG_DEBUG, 0, "rttl: %d rd: %s", qry->tr_rttl,
2003 inet_fmt(qry->tr_raddr));
2004 logit(LOG_DEBUG, 0, "rcount:%d, qid:%06x", rcount, qry->tr_qid);
2005
2006 /* determine the routing table entry for this traceroute */
2007 rt = determine_route(qry->tr_src);
2008 if (rt) {
2009 logit(LOG_DEBUG, 0, "rt parent vif: %d rtr: %s metric: %d",
2010 rt->rt_parent, inet_fmt(rt->rt_gateway),
2011 rt->rt_metric);
2012 logit(LOG_DEBUG, 0, "rt origin %s",
2013 inet_fmts(rt->rt_origin, rt->rt_originmask));
2014 } else
2015 logit(LOG_DEBUG, 0, "...no route");
2016
2017 /*
2018 * Query type packet - check if rte exists
2019 * Check if the query destination is a vif connected to me.
2020 * and if so, whether I should start response back
2021 */
2022 if (type == QUERY) {
2023 if (oqid == qry->tr_qid) {
2024 /*
2025 * If the multicast router is a member of the group being
2026 * queried, and the query is multicasted, then the router can
2027 * receive multiple copies of the same query. If we have already
2028 * replied to this traceroute, just ignore it this time.
2029 *
2030 * This is not a total solution, but since if this fails you
2031 * only get N copies, N <= the number of interfaces on the router,
2032 * it is not fatal.
2033 */
2034 logit(LOG_DEBUG, 0, "ignoring duplicate traceroute packet");
2035 return;
2036 }
2037
2038 if (rt == NULL) {
2039 logit(LOG_DEBUG, 0, "Mcast traceroute: no route entry %s",
2040 inet_fmt(qry->tr_src));
2041 if (IN_MULTICAST(ntohl(dst)))
2042 return;
2043 }
2044 vifi = find_vif(qry->tr_dst, 0);
2045
2046 if (vifi == NO_VIF) {
2047 /* The traceroute destination is not on one of my subnet vifs. */
2048 logit(LOG_DEBUG, 0, "Destination %s not an interface",
2049 inet_fmt(qry->tr_dst));
2050 if (IN_MULTICAST(ntohl(dst)))
2051 return;
2052 errcode = TR_WRONG_IF;
2053 } else if (rt != NULL && !VIFM_ISSET(vifi, rt->rt_children)) {
2054 logit(LOG_DEBUG, 0,
2055 "Destination %s not on forwarding tree for src %s",
2056 inet_fmt(qry->tr_dst),
2057 inet_fmt(qry->tr_src));
2058 if (IN_MULTICAST(ntohl(dst)))
2059 return;
2060 errcode = TR_WRONG_IF;
2061 }
2062 }
2063 else {
2064 /*
2065 * determine which interface the packet came in on
2066 * RESP packets travel hop-by-hop so this either traversed
2067 * a tunnel or came from a directly attached mrouter.
2068 */
2069 if ((vifi = find_vif(src, dst)) == NO_VIF) {
2070 logit(LOG_DEBUG, 0, "Wrong interface for packet");
2071 errcode = TR_WRONG_IF;
2072 }
2073 }
2074
2075 /* Now that we've decided to send a response, save the qid */
2076 oqid = qry->tr_qid;
2077
2078 logit(LOG_DEBUG, 0, "Sending traceroute response");
2079
2080 /* copy the packet to the sending buffer */
2081 p = send_buf + MIN_IP_HEADER_LEN + IGMP_MINLEN;
2082
2083 bcopy(data, p, datalen);
2084
2085 p += datalen;
2086
2087 /*
2088 * If there is no room to insert our reply, coopt the previous hop
2089 * error indication to relay this fact.
2090 */
2091 if (p + sizeof(struct tr_resp) > send_buf + RECV_BUF_SIZE) {
2092 resp = (struct tr_resp *)p - 1;
2093 resp->tr_rflags = TR_NO_SPACE;
2094 rt = NULL;
2095 goto sendit;
2096 }
2097
2098 /*
2099 * fill in initial response fields
2100 */
2101 resp = (struct tr_resp *)p;
2102 bzero(resp, sizeof(struct tr_resp));
2103 datalen += RLEN;
2104
2105 resp->tr_qarr = htonl((tp.tv_sec + JAN_1970) << 16) +
2106 ((tp.tv_usec >> 4) & 0xffff);
2107
2108 resp->tr_rproto = PROTO_DVMRP;
2109 if (errcode != TR_NO_ERR) {
2110 resp->tr_rflags = errcode;
2111 rt = NULL; /* hack to enforce send straight to requestor */
2112 goto sendit;
2113 }
2114 resp->tr_outaddr = uvifs[vifi].uv_lcl_addr;
2115 resp->tr_fttl = uvifs[vifi].uv_threshold;
2116 resp->tr_rflags = TR_NO_ERR;
2117
2118 /*
2119 * obtain # of packets out on interface
2120 */
2121 v_req.vifi = vifi;
2122 if (ioctl(igmp_socket, SIOCGETVIFCNT, (char *)&v_req) >= 0)
2123 resp->tr_vifout = htonl(v_req.ocount);
2124
2125 /*
2126 * fill in scoping & pruning information
2127 */
2128 if (rt)
2129 for (gt = rt->rt_groups; gt; gt = gt->gt_next) {
2130 if (gt->gt_mcastgrp >= group)
2131 break;
2132 }
2133 else
2134 gt = NULL;
2135
2136 if (gt && gt->gt_mcastgrp == group) {
2137 sg_req.src.s_addr = qry->tr_src;
2138 sg_req.grp.s_addr = group;
2139 if (ioctl(igmp_socket, SIOCGETSGCNT, (char *)&sg_req) >= 0)
2140 resp->tr_pktcnt = htonl(sg_req.pktcnt);
2141
2142 if (VIFM_ISSET(vifi, gt->gt_scope))
2143 resp->tr_rflags = TR_SCOPED;
2144 else if (gt->gt_prsent_timer)
2145 resp->tr_rflags = TR_PRUNED;
2146 else if (!VIFM_ISSET(vifi, gt->gt_grpmems)) {
2147 if (VIFM_ISSET(vifi, rt->rt_children) &&
2148 !VIFM_ISSET(vifi, rt->rt_leaves))
2149 resp->tr_rflags = TR_OPRUNED;
2150 else
2151 resp->tr_rflags = TR_NO_FWD;
2152 }
2153 } else {
2154 if (scoped_addr(vifi, group))
2155 resp->tr_rflags = TR_SCOPED;
2156 else if (rt && !VIFM_ISSET(vifi, rt->rt_children))
2157 resp->tr_rflags = TR_NO_FWD;
2158 }
2159
2160 /*
2161 * if no rte exists, set NO_RTE error
2162 */
2163 if (rt == NULL) {
2164 src = dst; /* the dst address of resp. pkt */
2165 resp->tr_inaddr = 0;
2166 resp->tr_rflags = TR_NO_RTE;
2167 resp->tr_rmtaddr = 0;
2168 } else {
2169 /* get # of packets in on interface */
2170 v_req.vifi = rt->rt_parent;
2171 if (ioctl(igmp_socket, SIOCGETVIFCNT, (char *)&v_req) >= 0)
2172 resp->tr_vifin = htonl(v_req.icount);
2173
2174 MASK_TO_VAL(rt->rt_originmask, resp->tr_smask);
2175 src = uvifs[rt->rt_parent].uv_lcl_addr;
2176 resp->tr_inaddr = src;
2177 resp->tr_rmtaddr = rt->rt_gateway;
2178 if (!VIFM_ISSET(vifi, rt->rt_children)) {
2179 logit(LOG_DEBUG, 0,
2180 "Destination %s not on forwarding tree for src %s",
2181 inet_fmt(qry->tr_dst),
2182 inet_fmt(qry->tr_src));
2183 resp->tr_rflags = TR_WRONG_IF;
2184 }
2185 if (rt->rt_metric >= UNREACHABLE) {
2186 resp->tr_rflags = TR_NO_RTE;
2187 /* Hack to send reply directly */
2188 rt = NULL;
2189 }
2190 }
2191
2192 sendit:
2193 /*
2194 * if metric is 1 or no. of reports is 1, send response to requestor
2195 * else send to upstream router. If the upstream router can't handle
2196 * mtrace, set an error code and send to requestor anyway.
2197 */
2198 logit(LOG_DEBUG, 0, "rcount:%d, no:%d", rcount, no);
2199
2200 if ((rcount + 1 == no) || (rt == NULL) || (rt->rt_metric == 1)) {
2201 resptype = IGMP_MTRACE_REPLY;
2202 dst = qry->tr_raddr;
2203 } else
2204 if (!can_mtrace(rt->rt_parent, rt->rt_gateway)) {
2205 dst = qry->tr_raddr;
2206 resp->tr_rflags = TR_OLD_ROUTER;
2207 resptype = IGMP_MTRACE_REPLY;
2208 } else {
2209 dst = rt->rt_gateway;
2210 resptype = IGMP_MTRACE_QUERY;
2211 }
2212
2213 if (IN_MULTICAST(ntohl(dst))) {
2214 /*
2215 * Send the reply on a known multicast capable vif.
2216 * If we don't have one, we can't source any multicasts anyway.
2217 */
2218 if (phys_vif != -1) {
2219 logit(LOG_DEBUG, 0, "Sending reply to %s from %s",
2220 inet_fmt(dst), inet_fmt(uvifs[phys_vif].uv_lcl_addr));
2221 k_set_ttl(qry->tr_rttl);
2222 send_igmp(uvifs[phys_vif].uv_lcl_addr, dst,
2223 resptype, no, group,
2224 datalen);
2225 k_set_ttl(1);
2226 } else
2227 logit(LOG_INFO, 0, "No enabled phyints -- %s",
2228 "dropping traceroute reply");
2229 } else {
2230 logit(LOG_DEBUG, 0, "Sending %s to %s from %s",
2231 resptype == IGMP_MTRACE_REPLY ? "reply" : "request on",
2232 inet_fmt(dst), inet_fmt(src));
2233
2234 send_igmp(src, dst,
2235 resptype, no, group,
2236 datalen);
2237 }
2238 return;
2239 }
2240