prune.c revision 1.12 1 /* $NetBSD: prune.c,v 1.12 2003/03/05 21:32:51 wiz Exp $ */
2
3 /*
4 * The mrouted program is covered by the license in the accompanying file
5 * named "LICENSE". Use of the mrouted program represents acceptance of
6 * the terms and conditions listed in that file.
7 *
8 * The mrouted program is COPYRIGHT 1989 by The Board of Trustees of
9 * Leland Stanford Junior University.
10 */
11
12
13 #include "defs.h"
14
15 extern int cache_lifetime;
16 extern int max_prune_lifetime;
17 extern struct rtentry *routing_table;
18
19 extern int phys_vif;
20
21 /*
22 * dither cache lifetime to obtain a value between x and 2*x
23 */
24 #ifdef SYSV
25 #define CACHE_LIFETIME(x) ((x) + (lrand48() % (x)))
26 #else
27 #define CACHE_LIFETIME(x) ((x) + (random() % (x)))
28 #endif
29
30 #define CHK_GS(x, y) { \
31 switch(x) { \
32 case 2: \
33 case 4: \
34 case 8: \
35 case 16: \
36 case 32: \
37 case 64: \
38 case 128: \
39 case 256: y = 1; \
40 break; \
41 default: y = 0; \
42 } \
43 }
44
45 struct gtable *kernel_table; /* ptr to list of kernel grp entries*/
46 static struct gtable *kernel_no_route; /* list of grp entries w/o routes */
47 struct gtable *gtp; /* pointer for kernel rt entries */
48 unsigned int kroutes; /* current number of cache entries */
49
50 /****************************************************************************
51 Functions that are local to prune.c
52 ****************************************************************************/
53 static void prun_add_ttls(struct gtable *gt);
54 static int pruning_neighbor(vifi_t vifi, u_int32_t addr);
55 static int can_mtrace(vifi_t vifi, u_int32_t addr);
56 static struct ptable * find_prune_entry(u_int32_t vr, struct ptable *pt);
57 static void expire_prune(vifi_t vifi, struct gtable *gt);
58 static void send_prune(struct gtable *gt);
59 static void send_graft(struct gtable *gt);
60 static void send_graft_ack(u_int32_t src, u_int32_t dst,
61 u_int32_t origin, u_int32_t grp);
62 static void update_kernel(struct gtable *g);
63 static char * scaletime(u_long t);
64
65 /*
66 * Updates the ttl values for each vif.
67 */
68 static void
69 prun_add_ttls(struct gtable *gt)
70 {
71 struct uvif *v;
72 vifi_t vifi;
73
74 for (vifi = 0, v = uvifs; vifi < numvifs; ++vifi, ++v) {
75 if (VIFM_ISSET(vifi, gt->gt_grpmems))
76 gt->gt_ttls[vifi] = v->uv_threshold;
77 else
78 gt->gt_ttls[vifi] = 0;
79 }
80 }
81
82 /*
83 * checks for scoped multicast addresses
84 */
85 #define GET_SCOPE(gt) { \
86 vifi_t _i; \
87 if ((ntohl((gt)->gt_mcastgrp) & 0xff000000) == 0xef000000) \
88 for (_i = 0; _i < numvifs; _i++) \
89 if (scoped_addr(_i, (gt)->gt_mcastgrp)) \
90 VIFM_SET(_i, (gt)->gt_scope); \
91 }
92
93 int
94 scoped_addr(vifi_t vifi, u_int32_t addr)
95 {
96 struct vif_acl *acl;
97
98 for (acl = uvifs[vifi].uv_acl; acl; acl = acl->acl_next)
99 if ((addr & acl->acl_mask) == acl->acl_addr)
100 return 1;
101
102 return 0;
103 }
104
105 /*
106 * Determine if mcastgrp has a listener on vifi
107 */
108 int
109 grplst_mem(vifi_t vifi, u_int32_t mcastgrp)
110 {
111 struct listaddr *g;
112 struct uvif *v;
113
114 v = &uvifs[vifi];
115
116 for (g = v->uv_groups; g != NULL; g = g->al_next)
117 if (mcastgrp == g->al_addr)
118 return 1;
119
120 return 0;
121 }
122
123 /*
124 * Finds the group entry with the specified source and netmask.
125 * If netmask is 0, it uses the route's netmask.
126 *
127 * Returns TRUE if found a match, and the global variable gtp is left
128 * pointing to entry before the found entry.
129 * Returns FALSE if no exact match found, gtp is left pointing to before
130 * the entry in question belongs, or is NULL if the it belongs at the
131 * head of the list.
132 */
133 int
134 find_src_grp(u_int32_t src, u_int32_t mask, u_int32_t grp)
135 {
136 struct gtable *gt;
137
138 gtp = NULL;
139 gt = kernel_table;
140 while (gt != NULL) {
141 if (grp == gt->gt_mcastgrp &&
142 (mask ? (gt->gt_route->rt_origin == src &&
143 gt->gt_route->rt_originmask == mask) :
144 ((src & gt->gt_route->rt_originmask) ==
145 gt->gt_route->rt_origin)))
146 return TRUE;
147 if (ntohl(grp) > ntohl(gt->gt_mcastgrp) ||
148 (grp == gt->gt_mcastgrp &&
149 (ntohl(mask) < ntohl(gt->gt_route->rt_originmask) ||
150 (mask == gt->gt_route->rt_originmask &&
151 (ntohl(src) > ntohl(gt->gt_route->rt_origin)))))) {
152 gtp = gt;
153 gt = gt->gt_gnext;
154 }
155 else break;
156 }
157 return FALSE;
158 }
159
160 /*
161 * Check if the neighbor supports pruning
162 */
163 static int
164 pruning_neighbor(vifi_t vifi, u_int32_t addr)
165 {
166 struct listaddr *n = neighbor_info(vifi, addr);
167 int vers;
168
169 if (n == NULL)
170 return 0;
171
172 if (n->al_flags & NF_PRUNE)
173 return 1;
174
175 /*
176 * Versions from 3.0 to 3.4 relied on the version number to identify
177 * that they could handle pruning.
178 */
179 vers = NBR_VERS(n);
180 return (vers >= 0x0300 && vers <= 0x0304);
181 }
182
183 /*
184 * Can the neighbor in question handle multicast traceroute?
185 */
186 static int
187 can_mtrace(vifi_t vifi, u_int32_t addr)
188 {
189 struct listaddr *n = neighbor_info(vifi, addr);
190 int vers;
191
192 if (n == NULL)
193 return 0;
194
195 if (n->al_flags & NF_MTRACE)
196 return 1;
197
198 /*
199 * Versions 3.3 and 3.4 relied on the version number to identify
200 * that they could handle traceroute.
201 */
202 vers = NBR_VERS(n);
203 return (vers >= 0x0303 && vers <= 0x0304);
204 }
205
206 /*
207 * Returns the prune entry of the router, or NULL if none exists
208 */
209 static struct ptable *
210 find_prune_entry(u_int32_t vr, struct ptable *pt)
211 {
212 while (pt) {
213 if (pt->pt_router == vr)
214 return pt;
215 pt = pt->pt_next;
216 }
217
218 return NULL;
219 }
220
221 /*
222 * Send a prune message to the dominant router for
223 * this source.
224 *
225 * Record an entry that a prune was sent for this group
226 */
227 static void
228 send_prune(struct gtable *gt)
229 {
230 struct ptable *pt;
231 char *p;
232 int i;
233 int datalen;
234 u_int32_t src;
235 u_int32_t dst;
236 u_int32_t tmp;
237
238 /* Don't process any prunes if router is not pruning */
239 if (pruning == 0)
240 return;
241
242 /* Can't process a prune if we don't have an associated route */
243 if (gt->gt_route == NULL)
244 return;
245
246 /* Don't send a prune to a non-pruning router */
247 if (!pruning_neighbor(gt->gt_route->rt_parent, gt->gt_route->rt_gateway))
248 return;
249
250 /*
251 * sends a prune message to the router upstream.
252 */
253 src = uvifs[gt->gt_route->rt_parent].uv_lcl_addr;
254 dst = gt->gt_route->rt_gateway;
255
256 p = send_buf + MIN_IP_HEADER_LEN + IGMP_MINLEN;
257 datalen = 0;
258
259 /*
260 * determine prune lifetime
261 */
262 gt->gt_prsent_timer = gt->gt_timer;
263 for (pt = gt->gt_pruntbl; pt; pt = pt->pt_next)
264 if (pt->pt_timer < gt->gt_prsent_timer)
265 gt->gt_prsent_timer = pt->pt_timer;
266
267 /*
268 * If we have a graft pending, cancel graft retransmission
269 */
270 gt->gt_grftsnt = 0;
271
272 for (i = 0; i < 4; i++)
273 *p++ = ((char *)&(gt->gt_route->rt_origin))[i];
274 for (i = 0; i < 4; i++)
275 *p++ = ((char *)&(gt->gt_mcastgrp))[i];
276 tmp = htonl(gt->gt_prsent_timer);
277 for (i = 0; i < 4; i++)
278 *p++ = ((char *)&(tmp))[i];
279 datalen += 12;
280
281 send_igmp(src, dst, IGMP_DVMRP, DVMRP_PRUNE,
282 htonl(MROUTED_LEVEL), datalen);
283
284 logit(LOG_DEBUG, 0, "sent prune for (%s %s)/%d on vif %d to %s",
285 inet_fmts(gt->gt_route->rt_origin, gt->gt_route->rt_originmask, s1),
286 inet_fmt(gt->gt_mcastgrp, s2),
287 gt->gt_prsent_timer, gt->gt_route->rt_parent,
288 inet_fmt(gt->gt_route->rt_gateway, s3));
289 }
290
291 /*
292 * a prune was sent upstream
293 * so, a graft has to be sent to annul the prune
294 * set up a graft timer so that if an ack is not
295 * heard within that time, another graft request
296 * is sent out.
297 */
298 static void
299 send_graft(struct gtable *gt)
300 {
301 char *p;
302 int i;
303 int datalen;
304 u_int32_t src;
305 u_int32_t dst;
306
307 /* Can't send a graft without an associated route */
308 if (gt->gt_route == NULL)
309 return;
310
311 src = uvifs[gt->gt_route->rt_parent].uv_lcl_addr;
312 dst = gt->gt_route->rt_gateway;
313
314 p = send_buf + MIN_IP_HEADER_LEN + IGMP_MINLEN;
315 datalen = 0;
316
317 for (i = 0; i < 4; i++)
318 *p++ = ((char *)&(gt->gt_route->rt_origin))[i];
319 for (i = 0; i < 4; i++)
320 *p++ = ((char *)&(gt->gt_mcastgrp))[i];
321 datalen += 8;
322
323 if (datalen != 0) {
324 send_igmp(src, dst, IGMP_DVMRP, DVMRP_GRAFT,
325 htonl(MROUTED_LEVEL), datalen);
326 }
327 logit(LOG_DEBUG, 0, "sent graft for (%s %s) to %s on vif %d",
328 inet_fmts(gt->gt_route->rt_origin, gt->gt_route->rt_originmask, s1),
329 inet_fmt(gt->gt_mcastgrp, s2),
330 inet_fmt(gt->gt_route->rt_gateway, s3), gt->gt_route->rt_parent);
331 }
332
333 /*
334 * Send an ack that a graft was received
335 */
336 static void
337 send_graft_ack(u_int32_t src, u_int32_t dst, u_int32_t origin, u_int32_t grp)
338 {
339 char *p;
340 int i;
341 int datalen;
342
343 p = send_buf + MIN_IP_HEADER_LEN + IGMP_MINLEN;
344 datalen = 0;
345
346 for (i = 0; i < 4; i++)
347 *p++ = ((char *)&(origin))[i];
348 for (i = 0; i < 4; i++)
349 *p++ = ((char *)&(grp))[i];
350 datalen += 8;
351
352 send_igmp(src, dst, IGMP_DVMRP, DVMRP_GRAFT_ACK,
353 htonl(MROUTED_LEVEL), datalen);
354
355 logit(LOG_DEBUG, 0, "sent graft ack for (%s, %s) to %s",
356 inet_fmt(origin, s1), inet_fmt(grp, s2), inet_fmt(dst, s3));
357 }
358
359 /*
360 * Update the kernel cache with all the routes hanging off the group entry
361 */
362 static void
363 update_kernel(struct gtable *g)
364 {
365 struct stable *st;
366
367 for (st = g->gt_srctbl; st; st = st->st_next)
368 k_add_rg(st->st_origin, g);
369 }
370
371 /****************************************************************************
372 Functions that are used externally
373 ****************************************************************************/
374
375 #ifdef SNMP
376 #include <sys/types.h>
377 #include "snmp.h"
378
379 /*
380 * Find a specific group entry in the group table
381 */
382 struct gtable *
383 find_grp(u_long grp)
384 {
385 struct gtable *gt;
386
387 for (gt = kernel_table; gt; gt = gt->gt_gnext) {
388 if (ntohl(grp) < ntohl(gt->gt_mcastgrp))
389 break;
390 if (gt->gt_mcastgrp == grp)
391 return gt;
392 }
393 return NULL;
394 }
395
396 /*
397 * Given a group entry and source, find the corresponding source table
398 * entry
399 */
400 struct stable *
401 find_grp_src(struct gtable *gt, u_long src)
402 {
403 struct stable *st;
404 u_long grp = gt->gt_mcastgrp;
405 struct gtable *gtcurr;
406
407 for (gtcurr = gt; gtcurr->gt_mcastgrp == grp; gtcurr = gtcurr->gt_gnext) {
408 for (st = gtcurr->gt_srctbl; st; st = st->st_next)
409 if (st->st_origin == src)
410 return st;
411 }
412 return NULL;
413 }
414
415 /*
416 * Find next entry > specification
417 *
418 * gtpp: ordered by group
419 * stpp: ordered by source
420 */
421 int
422 next_grp_src_mask(struct gtable **gtpp, struct stable **stpp, u_long grp,
423 u_long src, u_long mask)
424 {
425 struct gtable *gt, *gbest = NULL;
426 struct stable *st, *sbest = NULL;
427
428 /* Find first group entry >= grp spec */
429 (*gtpp) = kernel_table;
430 while ((*gtpp) && ntohl((*gtpp)->gt_mcastgrp) < ntohl(grp))
431 (*gtpp)=(*gtpp)->gt_gnext;
432 if (!(*gtpp))
433 return 0; /* no more groups */
434
435 for (gt = kernel_table; gt; gt=gt->gt_gnext) {
436 /* Since grps are ordered, we can stop when group changes from gbest */
437 if (gbest && gbest->gt_mcastgrp != gt->gt_mcastgrp)
438 break;
439 for (st = gt->gt_srctbl; st; st=st->st_next) {
440
441 /* Among those entries > spec, find "lowest" one */
442 if (((ntohl(gt->gt_mcastgrp)> ntohl(grp))
443 || (ntohl(gt->gt_mcastgrp)==ntohl(grp)
444 && ntohl(st->st_origin)> ntohl(src))
445 || (ntohl(gt->gt_mcastgrp)==ntohl(grp)
446 && ntohl(st->st_origin)==src && 0xFFFFFFFF>ntohl(mask)))
447 && (!gbest
448 || (ntohl(gt->gt_mcastgrp)< ntohl(gbest->gt_mcastgrp))
449 || (ntohl(gt->gt_mcastgrp)==ntohl(gbest->gt_mcastgrp)
450 && ntohl(st->st_origin)< ntohl(sbest->st_origin)))) {
451 gbest = gt;
452 sbest = st;
453 }
454 }
455 }
456 (*gtpp) = gbest;
457 (*stpp) = sbest;
458 return (*gtpp)!=0;
459 }
460
461 /*
462 * Ensure that sg contains current information for the given group,source.
463 * This is fetched from the kernel as a unit so that counts for the entry
464 * are consistent, i.e. packet and byte counts for the same entry are
465 * read at the same time.
466 */
467 void
468 refresh_sg(struct sioc_sg_req *sg, struct gtable *gt, struct stable *st)
469 {
470 static int lastq = -1;
471
472 if (quantum != lastq || sg->src.s_addr!=st->st_origin
473 || sg->grp.s_addr!=gt->gt_mcastgrp) {
474 lastq = quantum;
475 sg->src.s_addr = st->st_origin;
476 sg->grp.s_addr = gt->gt_mcastgrp;
477 ioctl(igmp_socket, SIOCGETSGCNT, (char *)sg);
478 }
479 }
480
481 /*
482 * Return pointer to a specific route entry. This must be a separate
483 * function from find_route() which modifies rtp.
484 */
485 struct rtentry *
486 snmp_find_route(u_long src, u_long mask)
487 {
488 struct rtentry *rt;
489
490 for (rt = routing_table; rt; rt = rt->rt_next) {
491 if (src == rt->rt_origin && mask == rt->rt_originmask)
492 return rt;
493 }
494 return NULL;
495 }
496
497 /*
498 * Find next route entry > specification
499 */
500 int
501 next_route(struct rtentry **rtpp, u_long src, u_long mask)
502 {
503 struct rtentry *rt, *rbest = NULL;
504
505 /* Among all entries > spec, find "lowest" one in order */
506 for (rt = routing_table; rt; rt=rt->rt_next) {
507 if ((ntohl(rt->rt_origin) > ntohl(src)
508 || (ntohl(rt->rt_origin) == ntohl(src)
509 && ntohl(rt->rt_originmask) > ntohl(mask)))
510 && (!rbest || (ntohl(rt->rt_origin) < ntohl(rbest->rt_origin))
511 || (ntohl(rt->rt_origin) == ntohl(rbest->rt_origin)
512 && ntohl(rt->rt_originmask) < ntohl(rbest->rt_originmask))))
513 rbest = rt;
514 }
515 (*rtpp) = rbest;
516 return (*rtpp)!=0;
517 }
518
519 /*
520 * Given a routing table entry, and a vifi, find the next vifi/entry
521 *
522 * vifi: vifi at which to start looking
523 */
524 int
525 next_route_child(struct rtentry **rtpp, u_long src, u_long mask, vifi_t *vifi)
526 {
527 struct rtentry *rt;
528
529 /* Get (S,M) entry */
530 if (!((*rtpp) = snmp_find_route(src,mask)))
531 if (!next_route(rtpp, src, mask))
532 return 0;
533
534 /* Continue until we get one with a valid next vif */
535 do {
536 for (; (*rtpp)->rt_children && *vifi<numvifs; (*vifi)++)
537 if (VIFM_ISSET(*vifi, (*rtpp)->rt_children))
538 return 1;
539 *vifi = 0;
540 } while( next_route(rtpp, (*rtpp)->rt_origin, (*rtpp)->rt_originmask) );
541
542 return 0;
543 }
544
545 /*
546 * Given a routing table entry, and a vifi, find the next entry
547 * equal to or greater than those
548 *
549 * vifi: vifi at which to start looking
550 */
551 int
552 next_child(struct gtable **gtpp, struct stable **stpp, u_long grp, u_long src,
553 u_long mask, vifi_t *vifi)
554 {
555 struct stable *st;
556
557 /* Get (G,S,M) entry */
558 if (mask!=0xFFFFFFFF
559 || !((*gtpp) = find_grp(grp))
560 || !((*stpp) = find_grp_src((*gtpp),src)))
561 if (!next_grp_src_mask(gtpp, stpp, grp, src, mask))
562 return 0;
563
564 /* Continue until we get one with a valid next vif */
565 do {
566 for (; (*gtpp)->gt_route->rt_children && *vifi<numvifs; (*vifi)++)
567 if (VIFM_ISSET(*vifi, (*gtpp)->gt_route->rt_children))
568 return 1;
569 *vifi = 0;
570 } while (next_grp_src_mask(gtpp, stpp, (*gtpp)->gt_mcastgrp,
571 (*stpp)->st_origin, 0xFFFFFFFF) );
572
573 return 0;
574 }
575 #endif /* SNMP */
576
577 /*
578 * Initialize the kernel table structure
579 */
580 void
581 init_ktable(void)
582 {
583 kernel_table = NULL;
584 kernel_no_route = NULL;
585 kroutes = 0;
586 }
587
588 /*
589 * Add a new table entry for (origin, mcastgrp)
590 */
591 void
592 add_table_entry(u_int32_t origin, u_int32_t mcastgrp)
593 {
594 struct rtentry *r;
595 struct gtable *gt,**gtnp,*prev_gt;
596 struct stable *st,**stnp;
597 vifi_t i;
598
599 #ifdef DEBUG_MFC
600 md_log(MD_MISS, origin, mcastgrp);
601 #endif
602
603 r = determine_route(origin);
604 prev_gt = NULL;
605 if (r == NULL) {
606 /*
607 * Look for it on the no_route table; if it is found then
608 * it will be detected as a duplicate below.
609 */
610 for (gt = kernel_no_route; gt; gt = gt->gt_next)
611 if (mcastgrp == gt->gt_mcastgrp &&
612 gt->gt_srctbl && gt->gt_srctbl->st_origin == origin)
613 break;
614 gtnp = &kernel_no_route;
615 } else {
616 gtnp = &r->rt_groups;
617 while ((gt = *gtnp) != NULL) {
618 if (gt->gt_mcastgrp >= mcastgrp)
619 break;
620 gtnp = >->gt_next;
621 prev_gt = gt;
622 }
623 }
624
625 if (gt == NULL || gt->gt_mcastgrp != mcastgrp) {
626 gt = (struct gtable *)malloc(sizeof(struct gtable));
627 if (gt == NULL)
628 logit(LOG_ERR, 0, "ran out of memory");
629
630 gt->gt_mcastgrp = mcastgrp;
631 gt->gt_timer = CACHE_LIFETIME(cache_lifetime);
632 time(>->gt_ctime);
633 gt->gt_grpmems = 0;
634 gt->gt_scope = 0;
635 gt->gt_prsent_timer = 0;
636 gt->gt_grftsnt = 0;
637 gt->gt_srctbl = NULL;
638 gt->gt_pruntbl = NULL;
639 gt->gt_route = r;
640 #ifdef RSRR
641 gt->gt_rsrr_cache = NULL;
642 #endif
643
644 if (r != NULL) {
645 /* obtain the multicast group membership list */
646 for (i = 0; i < numvifs; i++) {
647 if (VIFM_ISSET(i, r->rt_children) &&
648 !(VIFM_ISSET(i, r->rt_leaves)))
649 VIFM_SET(i, gt->gt_grpmems);
650
651 if (VIFM_ISSET(i, r->rt_leaves) && grplst_mem(i, mcastgrp))
652 VIFM_SET(i, gt->gt_grpmems);
653 }
654 GET_SCOPE(gt);
655 if (VIFM_ISSET(r->rt_parent, gt->gt_scope))
656 gt->gt_scope = -1;
657 gt->gt_grpmems &= ~gt->gt_scope;
658 } else {
659 gt->gt_scope = -1;
660 gt->gt_grpmems = 0;
661 }
662
663 /* update ttls */
664 prun_add_ttls(gt);
665
666 gt->gt_next = *gtnp;
667 *gtnp = gt;
668 if (gt->gt_next)
669 gt->gt_next->gt_prev = gt;
670 gt->gt_prev = prev_gt;
671
672 if (r) {
673 if (find_src_grp(r->rt_origin, r->rt_originmask, gt->gt_mcastgrp)) {
674 struct gtable *g;
675
676 g = gtp ? gtp->gt_gnext : kernel_table;
677 logit(LOG_WARNING, 0, "Entry for (%s %s) (rt:%p) exists (rt:%p)",
678 inet_fmts(r->rt_origin, r->rt_originmask, s1),
679 inet_fmt(g->gt_mcastgrp, s2),
680 r, g->gt_route);
681 } else {
682 if (gtp) {
683 gt->gt_gnext = gtp->gt_gnext;
684 gt->gt_gprev = gtp;
685 gtp->gt_gnext = gt;
686 } else {
687 gt->gt_gnext = kernel_table;
688 gt->gt_gprev = NULL;
689 kernel_table = gt;
690 }
691 if (gt->gt_gnext)
692 gt->gt_gnext->gt_gprev = gt;
693 }
694 } else {
695 gt->gt_gnext = gt->gt_gprev = NULL;
696 }
697 }
698
699 stnp = >->gt_srctbl;
700 while ((st = *stnp) != NULL) {
701 if (ntohl(st->st_origin) >= ntohl(origin))
702 break;
703 stnp = &st->st_next;
704 }
705
706 if (st == NULL || st->st_origin != origin) {
707 st = (struct stable *)malloc(sizeof(struct stable));
708 if (st == NULL)
709 logit(LOG_ERR, 0, "ran out of memory");
710
711 st->st_origin = origin;
712 st->st_pktcnt = 0;
713 st->st_next = *stnp;
714 *stnp = st;
715 } else {
716 #ifdef DEBUG_MFC
717 md_log(MD_DUPE, origin, mcastgrp);
718 #endif
719 logit(LOG_WARNING, 0, "kernel entry already exists for (%s %s)",
720 inet_fmt(origin, s1), inet_fmt(mcastgrp, s2));
721 /* XXX Doing this should cause no harm, and may ensure
722 * kernel<>mrouted synchronization */
723 k_add_rg(origin, gt);
724 return;
725 }
726
727 kroutes++;
728 k_add_rg(origin, gt);
729
730 logit(LOG_DEBUG, 0, "add cache entry (%s %s) gm:%x, parent-vif:%d",
731 inet_fmt(origin, s1),
732 inet_fmt(mcastgrp, s2),
733 gt->gt_grpmems, r ? r->rt_parent : -1);
734
735 /* If there are no leaf vifs
736 * which have this group, then
737 * mark this src-grp as a prune candidate.
738 */
739 if (!gt->gt_prsent_timer && !gt->gt_grpmems && r && r->rt_gateway)
740 send_prune(gt);
741 }
742
743 /*
744 * An mrouter has gone down and come up on an interface
745 * Forward on that interface immediately
746 */
747 void
748 reset_neighbor_state(vifi_t vifi, u_int32_t addr)
749 {
750 struct rtentry *r;
751 struct gtable *g;
752 struct ptable *pt, **ptnp;
753 struct stable *st;
754
755 for (g = kernel_table; g; g = g->gt_gnext) {
756 r = g->gt_route;
757
758 /*
759 * If neighbor was the parent, remove the prune sent state
760 * and all of the source cache info so that prunes get
761 * regenerated.
762 */
763 if (vifi == r->rt_parent) {
764 if (addr == r->rt_gateway) {
765 logit(LOG_DEBUG, 0, "reset_neighbor_state parent reset (%s %s)",
766 inet_fmts(r->rt_origin, r->rt_originmask, s1),
767 inet_fmt(g->gt_mcastgrp, s2));
768
769 g->gt_prsent_timer = 0;
770 g->gt_grftsnt = 0;
771 while ((st = g->gt_srctbl) != NULL) {
772 g->gt_srctbl = st->st_next;
773 k_del_rg(st->st_origin, g);
774 kroutes--;
775 free(st);
776 }
777 }
778 } else {
779 /*
780 * Neighbor was not the parent, send grafts to join the groups
781 */
782 if (g->gt_prsent_timer) {
783 g->gt_grftsnt = 1;
784 send_graft(g);
785 g->gt_prsent_timer = 0;
786 }
787
788 /*
789 * Remove any prunes that this router has sent us.
790 */
791 ptnp = &g->gt_pruntbl;
792 while ((pt = *ptnp) != NULL) {
793 if (pt->pt_vifi == vifi && pt->pt_router == addr) {
794 *ptnp = pt->pt_next;
795 free(pt);
796 } else
797 ptnp = &pt->pt_next;
798 }
799
800 /*
801 * And see if we want to forward again.
802 */
803 if (!VIFM_ISSET(vifi, g->gt_grpmems)) {
804 if (VIFM_ISSET(vifi, r->rt_children) &&
805 !(VIFM_ISSET(vifi, r->rt_leaves)))
806 VIFM_SET(vifi, g->gt_grpmems);
807
808 if (VIFM_ISSET(vifi, r->rt_leaves) &&
809 grplst_mem(vifi, g->gt_mcastgrp))
810 VIFM_SET(vifi, g->gt_grpmems);
811
812 g->gt_grpmems &= ~g->gt_scope;
813 prun_add_ttls(g);
814
815 /* Update kernel state */
816 update_kernel(g);
817 #ifdef RSRR
818 /* Send route change notification to reservation protocol. */
819 rsrr_cache_send(g,1);
820 #endif /* RSRR */
821
822 logit(LOG_DEBUG, 0, "reset member state (%s %s) gm:%x",
823 inet_fmts(r->rt_origin, r->rt_originmask, s1),
824 inet_fmt(g->gt_mcastgrp, s2), g->gt_grpmems);
825 }
826 }
827 }
828 }
829
830 /*
831 * Delete table entry from the kernel
832 * del_flag determines how many entries to delete
833 */
834 void
835 del_table_entry(struct rtentry *r, u_int32_t mcastgrp, u_int del_flag)
836 {
837 struct gtable *g, *prev_g;
838 struct stable *st, *prev_st;
839 struct ptable *pt, *prev_pt;
840
841 if (del_flag == DEL_ALL_ROUTES) {
842 g = r->rt_groups;
843 while (g) {
844 logit(LOG_DEBUG, 0, "del_table_entry deleting (%s %s)",
845 inet_fmts(r->rt_origin, r->rt_originmask, s1),
846 inet_fmt(g->gt_mcastgrp, s2));
847 st = g->gt_srctbl;
848 while (st) {
849 if (k_del_rg(st->st_origin, g) < 0) {
850 logit(LOG_WARNING, errno,
851 "del_table_entry trying to delete (%s, %s)",
852 inet_fmt(st->st_origin, s1),
853 inet_fmt(g->gt_mcastgrp, s2));
854 }
855 kroutes--;
856 prev_st = st;
857 st = st->st_next;
858 free(prev_st);
859 }
860 g->gt_srctbl = NULL;
861
862 pt = g->gt_pruntbl;
863 while (pt) {
864 prev_pt = pt;
865 pt = pt->pt_next;
866 free(prev_pt);
867 }
868 g->gt_pruntbl = NULL;
869
870 if (g->gt_gnext)
871 g->gt_gnext->gt_gprev = g->gt_gprev;
872 if (g->gt_gprev)
873 g->gt_gprev->gt_gnext = g->gt_gnext;
874 else
875 kernel_table = g->gt_gnext;
876
877 #ifdef RSRR
878 /* Send route change notification to reservation protocol. */
879 rsrr_cache_send(g,0);
880 rsrr_cache_clean(g);
881 #endif /* RSRR */
882 prev_g = g;
883 g = g->gt_next;
884 free(prev_g);
885 }
886 r->rt_groups = NULL;
887 }
888
889 /*
890 * Dummy routine - someday this may be needed, so it is just there
891 */
892 if (del_flag == DEL_RTE_GROUP) {
893 prev_g = (struct gtable *)&r->rt_groups;
894 for (g = r->rt_groups; g; g = g->gt_next) {
895 if (g->gt_mcastgrp == mcastgrp) {
896 logit(LOG_DEBUG, 0, "del_table_entry deleting (%s %s)",
897 inet_fmts(r->rt_origin, r->rt_originmask, s1),
898 inet_fmt(g->gt_mcastgrp, s2));
899 st = g->gt_srctbl;
900 while (st) {
901 if (k_del_rg(st->st_origin, g) < 0) {
902 logit(LOG_WARNING, errno,
903 "del_table_entry trying to delete (%s, %s)",
904 inet_fmt(st->st_origin, s1),
905 inet_fmt(g->gt_mcastgrp, s2));
906 }
907 kroutes--;
908 prev_st = st;
909 st = st->st_next;
910 free(prev_st);
911 }
912 g->gt_srctbl = NULL;
913
914 pt = g->gt_pruntbl;
915 while (pt) {
916 prev_pt = pt;
917 pt = pt->pt_next;
918 free(prev_pt);
919 }
920 g->gt_pruntbl = NULL;
921
922 if (g->gt_gnext)
923 g->gt_gnext->gt_gprev = g->gt_gprev;
924 if (g->gt_gprev)
925 g->gt_gprev->gt_gnext = g->gt_gnext;
926 else
927 kernel_table = g->gt_gnext;
928
929 if (prev_g != (struct gtable *)&r->rt_groups)
930 g->gt_next->gt_prev = prev_g;
931 else
932 g->gt_next->gt_prev = NULL;
933 prev_g->gt_next = g->gt_next;
934
935 #ifdef RSRR
936 /* Send route change notification to reservation protocol. */
937 rsrr_cache_send(g,0);
938 rsrr_cache_clean(g);
939 #endif /* RSRR */
940 free(g);
941 g = prev_g;
942 } else {
943 prev_g = g;
944 }
945 }
946 }
947 }
948
949 /*
950 * update kernel table entry when a route entry changes
951 */
952 void
953 update_table_entry(struct rtentry *r)
954 {
955 struct gtable *g;
956 struct ptable *pt, *prev_pt;
957 vifi_t i;
958
959 for (g = r->rt_groups; g; g = g->gt_next) {
960 pt = g->gt_pruntbl;
961 while (pt) {
962 prev_pt = pt->pt_next;
963 free(pt);
964 pt = prev_pt;
965 }
966 g->gt_pruntbl = NULL;
967
968 g->gt_grpmems = 0;
969
970 /* obtain the multicast group membership list */
971 for (i = 0; i < numvifs; i++) {
972 if (VIFM_ISSET(i, r->rt_children) &&
973 !(VIFM_ISSET(i, r->rt_leaves)))
974 VIFM_SET(i, g->gt_grpmems);
975
976 if (VIFM_ISSET(i, r->rt_leaves) && grplst_mem(i, g->gt_mcastgrp))
977 VIFM_SET(i, g->gt_grpmems);
978 }
979 if (VIFM_ISSET(r->rt_parent, g->gt_scope))
980 g->gt_scope = -1;
981 g->gt_grpmems &= ~g->gt_scope;
982
983 logit(LOG_DEBUG, 0, "updating cache entries (%s %s) gm:%x",
984 inet_fmts(r->rt_origin, r->rt_originmask, s1),
985 inet_fmt(g->gt_mcastgrp, s2),
986 g->gt_grpmems);
987
988 if (g->gt_grpmems && g->gt_prsent_timer) {
989 g->gt_grftsnt = 1;
990 send_graft(g);
991 g->gt_prsent_timer = 0;
992 }
993
994 /* update ttls and add entry into kernel */
995 prun_add_ttls(g);
996 update_kernel(g);
997 #ifdef RSRR
998 /* Send route change notification to reservation protocol. */
999 rsrr_cache_send(g,1);
1000 #endif /* RSRR */
1001
1002 /* Check if we want to prune this group */
1003 if (!g->gt_prsent_timer && g->gt_grpmems == 0 && r->rt_gateway) {
1004 g->gt_timer = CACHE_LIFETIME(cache_lifetime);
1005 send_prune(g);
1006 }
1007 }
1008 }
1009
1010 /*
1011 * set the forwarding flag for all mcastgrps on this vifi
1012 */
1013 void
1014 update_lclgrp(vifi_t vifi, u_int32_t mcastgrp)
1015 {
1016 struct rtentry *r;
1017 struct gtable *g;
1018
1019 logit(LOG_DEBUG, 0, "group %s joined on vif %d",
1020 inet_fmt(mcastgrp, s1), vifi);
1021
1022 for (g = kernel_table; g; g = g->gt_gnext) {
1023 if (ntohl(mcastgrp) < ntohl(g->gt_mcastgrp))
1024 break;
1025
1026 r = g->gt_route;
1027 if (g->gt_mcastgrp == mcastgrp &&
1028 VIFM_ISSET(vifi, r->rt_children)) {
1029
1030 VIFM_SET(vifi, g->gt_grpmems);
1031 g->gt_grpmems &= ~g->gt_scope;
1032 if (g->gt_grpmems == 0)
1033 continue;
1034
1035 prun_add_ttls(g);
1036 logit(LOG_DEBUG, 0, "update lclgrp (%s %s) gm:%x",
1037 inet_fmts(r->rt_origin, r->rt_originmask, s1),
1038 inet_fmt(g->gt_mcastgrp, s2), g->gt_grpmems);
1039
1040 update_kernel(g);
1041 #ifdef RSRR
1042 /* Send route change notification to reservation protocol. */
1043 rsrr_cache_send(g,1);
1044 #endif /* RSRR */
1045 }
1046 }
1047 }
1048
1049 /*
1050 * reset forwarding flag for all mcastgrps on this vifi
1051 */
1052 void
1053 delete_lclgrp(vifi_t vifi, u_int32_t mcastgrp)
1054 {
1055 struct rtentry *r;
1056 struct gtable *g;
1057
1058 logit(LOG_DEBUG, 0, "group %s left on vif %d",
1059 inet_fmt(mcastgrp, s1), vifi);
1060
1061 for (g = kernel_table; g; g = g->gt_gnext) {
1062 if (ntohl(mcastgrp) < ntohl(g->gt_mcastgrp))
1063 break;
1064
1065 if (g->gt_mcastgrp == mcastgrp) {
1066 int stop_sending = 1;
1067
1068 r = g->gt_route;
1069 /*
1070 * If this is not a leaf, then we have router neighbors on this
1071 * vif. Only turn off forwarding if they have all pruned.
1072 */
1073 if (!VIFM_ISSET(vifi, r->rt_leaves)) {
1074 struct listaddr *vr;
1075
1076 for (vr = uvifs[vifi].uv_neighbors; vr; vr = vr->al_next)
1077 if (find_prune_entry(vr->al_addr, g->gt_pruntbl) == NULL) {
1078 stop_sending = 0;
1079 break;
1080 }
1081 }
1082
1083 if (stop_sending) {
1084 VIFM_CLR(vifi, g->gt_grpmems);
1085 logit(LOG_DEBUG, 0, "delete lclgrp (%s %s) gm:%x",
1086 inet_fmts(r->rt_origin, r->rt_originmask, s1),
1087 inet_fmt(g->gt_mcastgrp, s2), g->gt_grpmems);
1088
1089 prun_add_ttls(g);
1090 update_kernel(g);
1091 #ifdef RSRR
1092 /* Send route change notification to reservation protocol. */
1093 rsrr_cache_send(g,1);
1094 #endif /* RSRR */
1095
1096 /*
1097 * If there are no more members of this particular group,
1098 * send prune upstream
1099 */
1100 if (!g->gt_prsent_timer && g->gt_grpmems == 0 && r->rt_gateway)
1101 send_prune(g);
1102 }
1103 }
1104 }
1105 }
1106
1107 /*
1108 * Takes the prune message received and then strips it to
1109 * determine the (src, grp) pair to be pruned.
1110 *
1111 * Adds the router to the (src, grp) entry then.
1112 *
1113 * Determines if further packets have to be sent down that vif
1114 *
1115 * Determines if a corresponding prune message has to be generated
1116 */
1117 void
1118 accept_prune(u_int32_t src, u_int32_t dst, char *p, int datalen)
1119 {
1120 u_int32_t prun_src;
1121 u_int32_t prun_grp;
1122 u_int32_t prun_tmr;
1123 vifi_t vifi;
1124 int i;
1125 int stop_sending;
1126 struct rtentry *r;
1127 struct gtable *g;
1128 struct ptable *pt;
1129 struct listaddr *vr;
1130
1131 /* Don't process any prunes if router is not pruning */
1132 if (pruning == 0)
1133 return;
1134
1135 if ((vifi = find_vif(src, dst)) == NO_VIF) {
1136 logit(LOG_INFO, 0,
1137 "ignoring prune report from non-neighbor %s",
1138 inet_fmt(src, s1));
1139 return;
1140 }
1141
1142 /* Check if enough data is present */
1143 if (datalen < 12)
1144 {
1145 logit(LOG_WARNING, 0,
1146 "non-decipherable prune from %s",
1147 inet_fmt(src, s1));
1148 return;
1149 }
1150
1151 for (i = 0; i< 4; i++)
1152 ((char *)&prun_src)[i] = *p++;
1153 for (i = 0; i< 4; i++)
1154 ((char *)&prun_grp)[i] = *p++;
1155 for (i = 0; i< 4; i++)
1156 ((char *)&prun_tmr)[i] = *p++;
1157 prun_tmr = ntohl(prun_tmr);
1158
1159 logit(LOG_DEBUG, 0, "%s on vif %d prunes (%s %s)/%d",
1160 inet_fmt(src, s1), vifi,
1161 inet_fmt(prun_src, s2), inet_fmt(prun_grp, s3), prun_tmr);
1162
1163 /*
1164 * Find the subnet for the prune
1165 */
1166 if (find_src_grp(prun_src, 0, prun_grp)) {
1167 g = gtp ? gtp->gt_gnext : kernel_table;
1168 r = g->gt_route;
1169
1170 if (!VIFM_ISSET(vifi, r->rt_children)) {
1171 logit(LOG_WARNING, 0, "prune received from non-child %s for (%s %s)",
1172 inet_fmt(src, s1), inet_fmt(prun_src, s2),
1173 inet_fmt(prun_grp, s3));
1174 return;
1175 }
1176 if (VIFM_ISSET(vifi, g->gt_scope)) {
1177 logit(LOG_WARNING, 0, "prune received from %s on scoped grp (%s %s)",
1178 inet_fmt(src, s1), inet_fmt(prun_src, s2),
1179 inet_fmt(prun_grp, s3));
1180 return;
1181 }
1182 if ((pt = find_prune_entry(src, g->gt_pruntbl)) != NULL) {
1183 /*
1184 * If it's about to expire, then it's only still around because
1185 * of timer granularity, so don't warn about it.
1186 */
1187 if (pt->pt_timer > 10) {
1188 logit(LOG_WARNING, 0, "%s %d from %s for (%s %s)/%d %s %d %s %x",
1189 "duplicate prune received on vif",
1190 vifi, inet_fmt(src, s1), inet_fmt(prun_src, s2),
1191 inet_fmt(prun_grp, s3), prun_tmr,
1192 "old timer:", pt->pt_timer, "cur gm:", g->gt_grpmems);
1193 }
1194 pt->pt_timer = prun_tmr;
1195 } else {
1196 /* allocate space for the prune structure */
1197 pt = (struct ptable *)(malloc(sizeof(struct ptable)));
1198 if (pt == NULL)
1199 logit(LOG_ERR, 0, "pt: ran out of memory");
1200
1201 pt->pt_vifi = vifi;
1202 pt->pt_router = src;
1203 pt->pt_timer = prun_tmr;
1204
1205 pt->pt_next = g->gt_pruntbl;
1206 g->gt_pruntbl = pt;
1207 }
1208
1209 /* Refresh the group's lifetime */
1210 g->gt_timer = CACHE_LIFETIME(cache_lifetime);
1211 if (g->gt_timer < prun_tmr)
1212 g->gt_timer = prun_tmr;
1213
1214 /*
1215 * check if any more packets need to be sent on the
1216 * vif which sent this message
1217 */
1218 stop_sending = 1;
1219 for (vr = uvifs[vifi].uv_neighbors; vr; vr = vr->al_next)
1220 if (find_prune_entry(vr->al_addr, g->gt_pruntbl) == NULL) {
1221 stop_sending = 0;
1222 break;
1223 }
1224
1225 if (stop_sending && !grplst_mem(vifi, prun_grp)) {
1226 VIFM_CLR(vifi, g->gt_grpmems);
1227 logit(LOG_DEBUG, 0, "prune (%s %s), stop sending on vif %d, gm:%x",
1228 inet_fmts(r->rt_origin, r->rt_originmask, s1),
1229 inet_fmt(g->gt_mcastgrp, s2), vifi, g->gt_grpmems);
1230
1231 prun_add_ttls(g);
1232 update_kernel(g);
1233 #ifdef RSRR
1234 /* Send route change notification to reservation protocol. */
1235 rsrr_cache_send(g,1);
1236 #endif /* RSRR */
1237 }
1238
1239 /*
1240 * check if all the child routers have expressed no interest
1241 * in this group and if this group does not exist in the
1242 * interface
1243 * Send a prune message then upstream
1244 */
1245 if (!g->gt_prsent_timer && g->gt_grpmems == 0 && r->rt_gateway) {
1246 send_prune(g);
1247 }
1248 } else {
1249 /*
1250 * There is no kernel entry for this group. Therefore, we can
1251 * simply ignore the prune, as we are not forwarding this traffic
1252 * downstream.
1253 */
1254 logit(LOG_DEBUG, 0, "%s (%s %s)/%d from %s",
1255 "prune message received with no kernel entry for",
1256 inet_fmt(prun_src, s1), inet_fmt(prun_grp, s2),
1257 prun_tmr, inet_fmt(src, s3));
1258 return;
1259 }
1260 }
1261
1262 /*
1263 * Checks if this mcastgrp is present in the kernel table
1264 * If so and if a prune was sent, it sends a graft upwards
1265 */
1266 void
1267 chkgrp_graft(vifi_t vifi, u_int32_t mcastgrp)
1268 {
1269 struct rtentry *r;
1270 struct gtable *g;
1271
1272 for (g = kernel_table; g; g = g->gt_gnext) {
1273 if (ntohl(mcastgrp) < ntohl(g->gt_mcastgrp))
1274 break;
1275
1276 r = g->gt_route;
1277 if (g->gt_mcastgrp == mcastgrp && VIFM_ISSET(vifi, r->rt_children))
1278 if (g->gt_prsent_timer) {
1279 VIFM_SET(vifi, g->gt_grpmems);
1280
1281 /*
1282 * If the vif that was joined was a scoped vif,
1283 * ignore it ; don't graft back
1284 */
1285 g->gt_grpmems &= ~g->gt_scope;
1286 if (g->gt_grpmems == 0)
1287 continue;
1288
1289 /* set the flag for graft retransmission */
1290 g->gt_grftsnt = 1;
1291
1292 /* send graft upwards */
1293 send_graft(g);
1294
1295 /* reset the prune timer and update cache timer*/
1296 g->gt_prsent_timer = 0;
1297 g->gt_timer = max_prune_lifetime;
1298
1299 logit(LOG_DEBUG, 0, "chkgrp graft (%s %s) gm:%x",
1300 inet_fmts(r->rt_origin, r->rt_originmask, s1),
1301 inet_fmt(g->gt_mcastgrp, s2), g->gt_grpmems);
1302
1303 prun_add_ttls(g);
1304 update_kernel(g);
1305 #ifdef RSRR
1306 /* Send route change notification to reservation protocol. */
1307 rsrr_cache_send(g,1);
1308 #endif /* RSRR */
1309 }
1310 }
1311 }
1312
1313 /* determine the multicast group and src
1314 *
1315 * if it does, then determine if a prune was sent
1316 * upstream.
1317 * if prune sent upstream, send graft upstream and send
1318 * ack downstream.
1319 *
1320 * if no prune sent upstream, change the forwarding bit
1321 * for this interface and send ack downstream.
1322 *
1323 * if no entry exists for this group send ack downstream.
1324 */
1325 void
1326 accept_graft(u_int32_t src, u_int32_t dst, char *p, int datalen)
1327 {
1328 vifi_t vifi;
1329 u_int32_t graft_src;
1330 u_int32_t graft_grp;
1331 int i;
1332 struct rtentry *r;
1333 struct gtable *g;
1334 struct ptable *pt, **ptnp;
1335
1336 if ((vifi = find_vif(src, dst)) == NO_VIF) {
1337 logit(LOG_INFO, 0,
1338 "ignoring graft from non-neighbor %s",
1339 inet_fmt(src, s1));
1340 return;
1341 }
1342
1343 if (datalen < 8) {
1344 logit(LOG_WARNING, 0,
1345 "received non-decipherable graft from %s",
1346 inet_fmt(src, s1));
1347 return;
1348 }
1349
1350 for (i = 0; i< 4; i++)
1351 ((char *)&graft_src)[i] = *p++;
1352 for (i = 0; i< 4; i++)
1353 ((char *)&graft_grp)[i] = *p++;
1354
1355 logit(LOG_DEBUG, 0, "%s on vif %d grafts (%s %s)",
1356 inet_fmt(src, s1), vifi,
1357 inet_fmt(graft_src, s2), inet_fmt(graft_grp, s3));
1358
1359 /*
1360 * Find the subnet for the graft
1361 */
1362 if (find_src_grp(graft_src, 0, graft_grp)) {
1363 g = gtp ? gtp->gt_gnext : kernel_table;
1364 r = g->gt_route;
1365
1366 if (VIFM_ISSET(vifi, g->gt_scope)) {
1367 logit(LOG_WARNING, 0, "graft received from %s on scoped grp (%s %s)",
1368 inet_fmt(src, s1), inet_fmt(graft_src, s2),
1369 inet_fmt(graft_grp, s3));
1370 return;
1371 }
1372
1373 ptnp = &g->gt_pruntbl;
1374 while ((pt = *ptnp) != NULL) {
1375 if ((pt->pt_vifi == vifi) && (pt->pt_router == src)) {
1376 *ptnp = pt->pt_next;
1377 free(pt);
1378
1379 VIFM_SET(vifi, g->gt_grpmems);
1380 logit(LOG_DEBUG, 0, "accept graft (%s %s) gm:%x",
1381 inet_fmts(r->rt_origin, r->rt_originmask, s1),
1382 inet_fmt(g->gt_mcastgrp, s2), g->gt_grpmems);
1383
1384 prun_add_ttls(g);
1385 update_kernel(g);
1386 #ifdef RSRR
1387 /* Send route change notification to reservation protocol. */
1388 rsrr_cache_send(g,1);
1389 #endif /* RSRR */
1390 break;
1391 } else {
1392 ptnp = &pt->pt_next;
1393 }
1394 }
1395
1396 /* send ack downstream */
1397 send_graft_ack(dst, src, graft_src, graft_grp);
1398 g->gt_timer = max_prune_lifetime;
1399
1400 if (g->gt_prsent_timer) {
1401 /* set the flag for graft retransmission */
1402 g->gt_grftsnt = 1;
1403
1404 /* send graft upwards */
1405 send_graft(g);
1406
1407 /* reset the prune sent timer */
1408 g->gt_prsent_timer = 0;
1409 }
1410 } else {
1411 /*
1412 * We have no state for the source and group in question.
1413 * We can simply acknowledge the graft, since we know
1414 * that we have no prune state, and grafts are requests
1415 * to remove prune state.
1416 */
1417 send_graft_ack(dst, src, graft_src, graft_grp);
1418 logit(LOG_DEBUG, 0, "%s (%s %s) from %s",
1419 "graft received with no kernel entry for",
1420 inet_fmt(graft_src, s1), inet_fmt(graft_grp, s2),
1421 inet_fmt(src, s3));
1422 return;
1423 }
1424 }
1425
1426 /*
1427 * find out which group is involved first of all
1428 * then determine if a graft was sent.
1429 * if no graft sent, ignore the message
1430 * if graft was sent and the ack is from the right
1431 * source, remove the graft timer so that we don't
1432 * have send a graft again
1433 */
1434 void
1435 accept_g_ack(u_int32_t src, u_int32_t dst, char *p, int datalen)
1436 {
1437 struct gtable *g;
1438 vifi_t vifi;
1439 u_int32_t grft_src;
1440 u_int32_t grft_grp;
1441 int i;
1442
1443 if ((vifi = find_vif(src, dst)) == NO_VIF) {
1444 logit(LOG_INFO, 0,
1445 "ignoring graft ack from non-neighbor %s",
1446 inet_fmt(src, s1));
1447 return;
1448 }
1449
1450 if (datalen < 0 || datalen > 8) {
1451 logit(LOG_WARNING, 0,
1452 "received non-decipherable graft ack from %s",
1453 inet_fmt(src, s1));
1454 return;
1455 }
1456
1457 for (i = 0; i< 4; i++)
1458 ((char *)&grft_src)[i] = *p++;
1459 for (i = 0; i< 4; i++)
1460 ((char *)&grft_grp)[i] = *p++;
1461
1462 logit(LOG_DEBUG, 0, "%s on vif %d acks graft (%s, %s)",
1463 inet_fmt(src, s1), vifi,
1464 inet_fmt(grft_src, s2), inet_fmt(grft_grp, s3));
1465
1466 /*
1467 * Find the subnet for the graft ack
1468 */
1469 if (find_src_grp(grft_src, 0, grft_grp)) {
1470 g = gtp ? gtp->gt_gnext : kernel_table;
1471 g->gt_grftsnt = 0;
1472 } else {
1473 logit(LOG_WARNING, 0, "%s (%s, %s) from %s",
1474 "rcvd graft ack with no kernel entry for",
1475 inet_fmt(grft_src, s1), inet_fmt(grft_grp, s2),
1476 inet_fmt(src, s3));
1477 return;
1478 }
1479 }
1480
1481
1482 /*
1483 * free all prune entries and kernel routes
1484 * normally, this should inform the kernel that all of its routes
1485 * are going away, but this is only called by restart(), which is
1486 * about to call MRT_DONE which does that anyway.
1487 */
1488 void
1489 free_all_prunes(void)
1490 {
1491 struct rtentry *r;
1492 struct gtable *g, *prev_g;
1493 struct stable *s, *prev_s;
1494 struct ptable *p, *prev_p;
1495
1496 for (r = routing_table; r; r = r->rt_next) {
1497 g = r->rt_groups;
1498 while (g) {
1499 s = g->gt_srctbl;
1500 while (s) {
1501 prev_s = s;
1502 s = s->st_next;
1503 free(prev_s);
1504 }
1505
1506 p = g->gt_pruntbl;
1507 while (p) {
1508 prev_p = p;
1509 p = p->pt_next;
1510 free(prev_p);
1511 }
1512
1513 prev_g = g;
1514 g = g->gt_next;
1515 free(prev_g);
1516 }
1517 r->rt_groups = NULL;
1518 }
1519 kernel_table = NULL;
1520
1521 g = kernel_no_route;
1522 while (g) {
1523 if (g->gt_srctbl)
1524 free(g->gt_srctbl);
1525
1526 prev_g = g;
1527 g = g->gt_next;
1528 free(prev_g);
1529 }
1530 kernel_no_route = NULL;
1531 }
1532
1533 /*
1534 * When a new route is created, search
1535 * a) The less-specific part of the routing table
1536 * b) The route-less kernel table
1537 * for sources that the new route might want to handle.
1538 *
1539 * "Inheriting" these sources might be cleanest, but simply deleting
1540 * them is easier, and letting the kernel re-request them.
1541 */
1542 void
1543 steal_sources(struct rtentry *rt)
1544 {
1545 struct rtentry *rp;
1546 struct gtable *gt, **gtnp;
1547 struct stable *st, **stnp;
1548
1549 for (rp = rt->rt_next; rp; rp = rp->rt_next) {
1550 if ((rt->rt_origin & rp->rt_originmask) == rp->rt_origin) {
1551 logit(LOG_DEBUG, 0, "Route for %s stealing sources from %s",
1552 inet_fmts(rt->rt_origin, rt->rt_originmask, s1),
1553 inet_fmts(rp->rt_origin, rp->rt_originmask, s2));
1554 for (gt = rp->rt_groups; gt; gt = gt->gt_next) {
1555 stnp = >->gt_srctbl;
1556 while ((st = *stnp) != NULL) {
1557 if ((st->st_origin & rt->rt_originmask) == rt->rt_origin) {
1558 logit(LOG_DEBUG, 0, "%s stealing (%s %s) from %s",
1559 inet_fmts(rt->rt_origin, rt->rt_originmask, s1),
1560 inet_fmt(st->st_origin, s3),
1561 inet_fmt(gt->gt_mcastgrp, s4),
1562 inet_fmts(rp->rt_origin, rp->rt_originmask, s2));
1563 if (k_del_rg(st->st_origin, gt) < 0) {
1564 logit(LOG_WARNING, errno, "%s (%s, %s)",
1565 "steal_sources trying to delete",
1566 inet_fmt(st->st_origin, s1),
1567 inet_fmt(gt->gt_mcastgrp, s2));
1568 }
1569 *stnp = st->st_next;
1570 kroutes--;
1571 free(st);
1572 } else {
1573 stnp = &st->st_next;
1574 }
1575 }
1576 }
1577 }
1578 }
1579
1580 gtnp = &kernel_no_route;
1581 while ((gt = *gtnp) != NULL) {
1582 if (gt->gt_srctbl && ((gt->gt_srctbl->st_origin & rt->rt_originmask)
1583 == rt->rt_origin)) {
1584 logit(LOG_DEBUG, 0, "%s stealing (%s %s) from %s",
1585 inet_fmts(rt->rt_origin, rt->rt_originmask, s1),
1586 inet_fmt(gt->gt_srctbl->st_origin, s3),
1587 inet_fmt(gt->gt_mcastgrp, s4),
1588 "no_route table");
1589 if (k_del_rg(gt->gt_srctbl->st_origin, gt) < 0) {
1590 logit(LOG_WARNING, errno, "%s (%s %s)",
1591 "steal_sources trying to delete",
1592 inet_fmt(gt->gt_srctbl->st_origin, s1),
1593 inet_fmt(gt->gt_mcastgrp, s2));
1594 }
1595 kroutes--;
1596 free(gt->gt_srctbl);
1597 *gtnp = gt->gt_next;
1598 if (gt->gt_next)
1599 gt->gt_next->gt_prev = gt->gt_prev;
1600 free(gt);
1601 } else {
1602 gtnp = >->gt_next;
1603 }
1604 }
1605 }
1606
1607 /*
1608 * Advance the timers on all the cache entries.
1609 * If there are any entries whose timers have expired,
1610 * remove these entries from the kernel cache.
1611 */
1612 void
1613 age_table_entry(void)
1614 {
1615 struct rtentry *r;
1616 struct gtable *gt, **gtnptr;
1617 struct stable *st, **stnp;
1618 struct ptable *pt, **ptnp;
1619 struct sioc_sg_req sg_req;
1620
1621 logit(LOG_DEBUG, 0, "ageing entries");
1622
1623 gtnptr = &kernel_table;
1624 while ((gt = *gtnptr) != NULL) {
1625 r = gt->gt_route;
1626
1627 /* advance the timer for the kernel entry */
1628 gt->gt_timer -= ROUTE_MAX_REPORT_DELAY;
1629
1630 /* decrement prune timer if need be */
1631 if (gt->gt_prsent_timer > 0) {
1632 gt->gt_prsent_timer -= ROUTE_MAX_REPORT_DELAY;
1633 if (gt->gt_prsent_timer <= 0) {
1634 logit(LOG_DEBUG, 0, "upstream prune tmo (%s %s)",
1635 inet_fmts(r->rt_origin, r->rt_originmask, s1),
1636 inet_fmt(gt->gt_mcastgrp, s2));
1637 gt->gt_prsent_timer = -1;
1638 }
1639 }
1640
1641 /* retransmit graft if graft sent flag is still set */
1642 if (gt->gt_grftsnt) {
1643 int y;
1644 CHK_GS(gt->gt_grftsnt++, y);
1645 if (y)
1646 send_graft(gt);
1647 }
1648
1649 /*
1650 * Age prunes
1651 *
1652 * If a prune expires, forward again on that vif.
1653 */
1654 ptnp = >->gt_pruntbl;
1655 while ((pt = *ptnp) != NULL) {
1656 if ((pt->pt_timer -= ROUTE_MAX_REPORT_DELAY) <= 0) {
1657 logit(LOG_DEBUG, 0, "expire prune (%s %s) from %s on vif %d",
1658 inet_fmts(r->rt_origin, r->rt_originmask, s1),
1659 inet_fmt(gt->gt_mcastgrp, s2),
1660 inet_fmt(pt->pt_router, s3),
1661 pt->pt_vifi);
1662
1663 expire_prune(pt->pt_vifi, gt);
1664
1665 /* remove the router's prune entry and await new one */
1666 *ptnp = pt->pt_next;
1667 free(pt);
1668 } else {
1669 ptnp = &pt->pt_next;
1670 }
1671 }
1672
1673 /*
1674 * If the cache entry has expired, delete source table entries for
1675 * silent sources. If there are no source entries left, and there
1676 * are no downstream prunes, then the entry is deleted.
1677 * Otherwise, the cache entry's timer is refreshed.
1678 */
1679 if (gt->gt_timer <= 0) {
1680 /* Check for traffic before deleting source entries */
1681 sg_req.grp.s_addr = gt->gt_mcastgrp;
1682 stnp = >->gt_srctbl;
1683 while ((st = *stnp) != NULL) {
1684 sg_req.src.s_addr = st->st_origin;
1685 if (ioctl(igmp_socket, SIOCGETSGCNT, (char *)&sg_req) < 0) {
1686 logit(LOG_WARNING, errno, "%s (%s %s)",
1687 "age_table_entry: SIOCGETSGCNT failing for",
1688 inet_fmt(st->st_origin, s1),
1689 inet_fmt(gt->gt_mcastgrp, s2));
1690 /* Make sure it gets deleted below */
1691 sg_req.pktcnt = st->st_pktcnt;
1692 }
1693 if (sg_req.pktcnt == st->st_pktcnt) {
1694 *stnp = st->st_next;
1695 logit(LOG_DEBUG, 0, "age_table_entry deleting (%s %s)",
1696 inet_fmt(st->st_origin, s1),
1697 inet_fmt(gt->gt_mcastgrp, s2));
1698 if (k_del_rg(st->st_origin, gt) < 0) {
1699 logit(LOG_WARNING, errno,
1700 "age_table_entry trying to delete (%s %s)",
1701 inet_fmt(st->st_origin, s1),
1702 inet_fmt(gt->gt_mcastgrp, s2));
1703 }
1704 kroutes--;
1705 free(st);
1706 } else {
1707 st->st_pktcnt = sg_req.pktcnt;
1708 stnp = &st->st_next;
1709 }
1710 }
1711
1712 /*
1713 * Retain the group entry if we have downstream prunes or if
1714 * there is at least one source in the list that still has
1715 * traffic, or if our upstream prune timer is running.
1716 */
1717 if (gt->gt_pruntbl != NULL || gt->gt_srctbl != NULL ||
1718 gt->gt_prsent_timer > 0) {
1719 gt->gt_timer = CACHE_LIFETIME(cache_lifetime);
1720 if (gt->gt_prsent_timer == -1) {
1721 if (gt->gt_grpmems == 0)
1722 send_prune(gt);
1723 else
1724 gt->gt_prsent_timer = 0;
1725 }
1726 gtnptr = >->gt_gnext;
1727 continue;
1728 }
1729
1730 logit(LOG_DEBUG, 0, "timeout cache entry (%s, %s)",
1731 inet_fmts(r->rt_origin, r->rt_originmask, s1),
1732 inet_fmt(gt->gt_mcastgrp, s2));
1733
1734 if (gt->gt_prev)
1735 gt->gt_prev->gt_next = gt->gt_next;
1736 else
1737 gt->gt_route->rt_groups = gt->gt_next;
1738 if (gt->gt_next)
1739 gt->gt_next->gt_prev = gt->gt_prev;
1740
1741 if (gt->gt_gprev) {
1742 gt->gt_gprev->gt_gnext = gt->gt_gnext;
1743 gtnptr = >->gt_gprev->gt_gnext;
1744 } else {
1745 kernel_table = gt->gt_gnext;
1746 gtnptr = &kernel_table;
1747 }
1748 if (gt->gt_gnext)
1749 gt->gt_gnext->gt_gprev = gt->gt_gprev;
1750
1751 #ifdef RSRR
1752 /* Send route change notification to reservation protocol. */
1753 rsrr_cache_send(gt,0);
1754 rsrr_cache_clean(gt);
1755 #endif /* RSRR */
1756 free((char *)gt);
1757 } else {
1758 if (gt->gt_prsent_timer == -1) {
1759 if (gt->gt_grpmems == 0)
1760 send_prune(gt);
1761 else
1762 gt->gt_prsent_timer = 0;
1763 }
1764 gtnptr = >->gt_gnext;
1765 }
1766 }
1767
1768 /*
1769 * When traversing the no_route table, the decision is much easier.
1770 * Just delete it if it has timed out.
1771 */
1772 gtnptr = &kernel_no_route;
1773 while ((gt = *gtnptr) != NULL) {
1774 /* advance the timer for the kernel entry */
1775 gt->gt_timer -= ROUTE_MAX_REPORT_DELAY;
1776
1777 if (gt->gt_timer < 0) {
1778 if (gt->gt_srctbl) {
1779 if (k_del_rg(gt->gt_srctbl->st_origin, gt) < 0) {
1780 logit(LOG_WARNING, errno, "%s (%s %s)",
1781 "age_table_entry trying to delete no-route",
1782 inet_fmt(gt->gt_srctbl->st_origin, s1),
1783 inet_fmt(gt->gt_mcastgrp, s2));
1784 }
1785 free(gt->gt_srctbl);
1786 }
1787 *gtnptr = gt->gt_next;
1788 if (gt->gt_next)
1789 gt->gt_next->gt_prev = gt->gt_prev;
1790
1791 free((char *)gt);
1792 } else {
1793 gtnptr = >->gt_next;
1794 }
1795 }
1796 }
1797
1798 /*
1799 * Modify the kernel to forward packets when one or multiple prunes that
1800 * were received on the vif given by vifi, for the group given by gt,
1801 * have expired.
1802 */
1803 static void
1804 expire_prune(vifi_t vifi, struct gtable *gt)
1805 {
1806 /*
1807 * No need to send a graft, any prunes that we sent
1808 * will expire before any prunes that we have received.
1809 */
1810 if (gt->gt_prsent_timer > 0) {
1811 logit(LOG_DEBUG, 0, "prune expired with %d left on %s",
1812 gt->gt_prsent_timer, "prsent_timer");
1813 gt->gt_prsent_timer = 0;
1814 }
1815
1816 /* modify the kernel entry to forward packets */
1817 if (!VIFM_ISSET(vifi, gt->gt_grpmems)) {
1818 struct rtentry *rt = gt->gt_route;
1819 VIFM_SET(vifi, gt->gt_grpmems);
1820 logit(LOG_DEBUG, 0, "forw again (%s %s) gm:%x vif:%d",
1821 inet_fmts(rt->rt_origin, rt->rt_originmask, s1),
1822 inet_fmt(gt->gt_mcastgrp, s2), gt->gt_grpmems, vifi);
1823
1824 prun_add_ttls(gt);
1825 update_kernel(gt);
1826 #ifdef RSRR
1827 /* Send route change notification to reservation protocol. */
1828 rsrr_cache_send(gt,1);
1829 #endif /* RSRR */
1830 }
1831 }
1832
1833
1834 static char *
1835 scaletime(u_long t)
1836 {
1837 static char buf1[5];
1838 static char buf2[5];
1839 static char *buf=buf1;
1840 char s;
1841 char *p;
1842
1843 p = buf;
1844 if (buf == buf1)
1845 buf = buf2;
1846 else
1847 buf = buf1;
1848
1849 if (t < 120) {
1850 s = 's';
1851 } else if (t < 3600) {
1852 t /= 60;
1853 s = 'm';
1854 } else if (t < 86400) {
1855 t /= 3600;
1856 s = 'h';
1857 } else if (t < 864000) {
1858 t /= 86400;
1859 s = 'd';
1860 } else {
1861 t /= 604800;
1862 s = 'w';
1863 }
1864 if (t > 999)
1865 return "*** ";
1866
1867 sprintf(p,"%3d%c", (int)t, s);
1868
1869 return p;
1870 }
1871
1872 /*
1873 * Print the contents of the cache table on file 'fp2'.
1874 */
1875 void
1876 dump_cache(FILE *fp2)
1877 {
1878 struct rtentry *r;
1879 struct gtable *gt;
1880 struct stable *st;
1881 vifi_t i;
1882 time_t thyme = time(0);
1883
1884 fprintf(fp2,
1885 "Multicast Routing Cache Table (%d entries)\n%s", kroutes,
1886 " Origin Mcast-group CTmr Age Ptmr IVif Forwvifs\n");
1887
1888 for (gt = kernel_no_route; gt; gt = gt->gt_next) {
1889 if (gt->gt_srctbl) {
1890 fprintf(fp2, " %-18s %-15s %-4s %-4s - -1\n",
1891 inet_fmts(gt->gt_srctbl->st_origin, 0xffffffff, s1),
1892 inet_fmt(gt->gt_mcastgrp, s2), scaletime(gt->gt_timer),
1893 scaletime(thyme - gt->gt_ctime));
1894 fprintf(fp2, ">%s\n", inet_fmt(gt->gt_srctbl->st_origin, s1));
1895 }
1896 }
1897
1898 for (gt = kernel_table; gt; gt = gt->gt_gnext) {
1899 r = gt->gt_route;
1900 fprintf(fp2, " %-18s %-15s",
1901 inet_fmts(r->rt_origin, r->rt_originmask, s1),
1902 inet_fmt(gt->gt_mcastgrp, s2));
1903
1904 fprintf(fp2, " %-4s", scaletime(gt->gt_timer));
1905
1906 fprintf(fp2, " %-4s %-4s ", scaletime(thyme - gt->gt_ctime),
1907 gt->gt_prsent_timer ? scaletime(gt->gt_prsent_timer) :
1908 " -");
1909
1910 fprintf(fp2, "%2u%c%c ", r->rt_parent,
1911 gt->gt_prsent_timer ? 'P' : ' ',
1912 VIFM_ISSET(r->rt_parent, gt->gt_scope) ? 'B' : ' ');
1913
1914 for (i = 0; i < numvifs; ++i) {
1915 if (VIFM_ISSET(i, gt->gt_grpmems))
1916 fprintf(fp2, " %u ", i);
1917 else if (VIFM_ISSET(i, r->rt_children) &&
1918 !VIFM_ISSET(i, r->rt_leaves))
1919 fprintf(fp2, " %u%c", i,
1920 VIFM_ISSET(i, gt->gt_scope) ? 'b' : 'p');
1921 }
1922 fprintf(fp2, "\n");
1923 for (st = gt->gt_srctbl; st; st = st->st_next) {
1924 fprintf(fp2, ">%s\n", inet_fmt(st->st_origin, s1));
1925 }
1926 #ifdef DEBUG_PRUNES
1927 for (pt = gt->gt_pruntbl; pt; pt = pt->pt_next) {
1928 fprintf(fp2, "<r:%s v:%d t:%d\n", inet_fmt(pt->pt_router, s1),
1929 pt->pt_vifi, pt->pt_timer);
1930 }
1931 #endif
1932 }
1933 }
1934
1935 /*
1936 * Traceroute function which returns traceroute replies to the requesting
1937 * router. Also forwards the request to downstream routers.
1938 *
1939 * no: promoted u_char
1940 */
1941 void
1942 accept_mtrace(u_int32_t src, u_int32_t dst, u_int32_t group, char *data,
1943 u_int no, int datalen)
1944 {
1945 u_char type;
1946 struct rtentry *rt;
1947 struct gtable *gt;
1948 struct tr_query *qry;
1949 struct tr_resp *resp;
1950 int vifi;
1951 char *p;
1952 int rcount;
1953 int errcode = TR_NO_ERR;
1954 int resptype;
1955 struct timeval tp;
1956 struct sioc_vif_req v_req;
1957 struct sioc_sg_req sg_req;
1958
1959 /* Remember qid across invocations */
1960 static u_int32_t oqid = 0;
1961
1962 /* timestamp the request/response */
1963 gettimeofday(&tp, 0);
1964
1965 /*
1966 * Check if it is a query or a response
1967 */
1968 if (datalen == QLEN) {
1969 type = QUERY;
1970 logit(LOG_DEBUG, 0, "Initial traceroute query rcvd from %s to %s",
1971 inet_fmt(src, s1), inet_fmt(dst, s2));
1972 }
1973 else if ((datalen - QLEN) % RLEN == 0) {
1974 type = RESP;
1975 logit(LOG_DEBUG, 0, "In-transit traceroute query rcvd from %s to %s",
1976 inet_fmt(src, s1), inet_fmt(dst, s2));
1977 if (IN_MULTICAST(ntohl(dst))) {
1978 logit(LOG_DEBUG, 0, "Dropping multicast response");
1979 return;
1980 }
1981 }
1982 else {
1983 logit(LOG_WARNING, 0, "%s from %s to %s",
1984 "Non decipherable traceroute request received",
1985 inet_fmt(src, s1), inet_fmt(dst, s2));
1986 return;
1987 }
1988
1989 qry = (struct tr_query *)data;
1990
1991 /*
1992 * if it is a packet with all reports filled, drop it
1993 */
1994 if ((rcount = (datalen - QLEN)/RLEN) == no) {
1995 logit(LOG_DEBUG, 0, "packet with all reports filled in");
1996 return;
1997 }
1998
1999 logit(LOG_DEBUG, 0, "s: %s g: %s d: %s ", inet_fmt(qry->tr_src, s1),
2000 inet_fmt(group, s2), inet_fmt(qry->tr_dst, s3));
2001 logit(LOG_DEBUG, 0, "rttl: %d rd: %s", qry->tr_rttl,
2002 inet_fmt(qry->tr_raddr, s1));
2003 logit(LOG_DEBUG, 0, "rcount:%d, qid:%06x", rcount, qry->tr_qid);
2004
2005 /* determine the routing table entry for this traceroute */
2006 rt = determine_route(qry->tr_src);
2007 if (rt) {
2008 logit(LOG_DEBUG, 0, "rt parent vif: %d rtr: %s metric: %d",
2009 rt->rt_parent, inet_fmt(rt->rt_gateway, s1), rt->rt_metric);
2010 logit(LOG_DEBUG, 0, "rt origin %s",
2011 inet_fmts(rt->rt_origin, rt->rt_originmask, s1));
2012 } else
2013 logit(LOG_DEBUG, 0, "...no route");
2014
2015 /*
2016 * Query type packet - check if rte exists
2017 * Check if the query destination is a vif connected to me.
2018 * and if so, whether I should start response back
2019 */
2020 if (type == QUERY) {
2021 if (oqid == qry->tr_qid) {
2022 /*
2023 * If the multicast router is a member of the group being
2024 * queried, and the query is multicasted, then the router can
2025 * receive multiple copies of the same query. If we have already
2026 * replied to this traceroute, just ignore it this time.
2027 *
2028 * This is not a total solution, but since if this fails you
2029 * only get N copies, N <= the number of interfaces on the router,
2030 * it is not fatal.
2031 */
2032 logit(LOG_DEBUG, 0, "ignoring duplicate traceroute packet");
2033 return;
2034 }
2035
2036 if (rt == NULL) {
2037 logit(LOG_DEBUG, 0, "Mcast traceroute: no route entry %s",
2038 inet_fmt(qry->tr_src, s1));
2039 if (IN_MULTICAST(ntohl(dst)))
2040 return;
2041 }
2042 vifi = find_vif(qry->tr_dst, 0);
2043
2044 if (vifi == NO_VIF) {
2045 /* The traceroute destination is not on one of my subnet vifs. */
2046 logit(LOG_DEBUG, 0, "Destination %s not an interface",
2047 inet_fmt(qry->tr_dst, s1));
2048 if (IN_MULTICAST(ntohl(dst)))
2049 return;
2050 errcode = TR_WRONG_IF;
2051 } else if (rt != NULL && !VIFM_ISSET(vifi, rt->rt_children)) {
2052 logit(LOG_DEBUG, 0, "Destination %s not on forwarding tree for src %s",
2053 inet_fmt(qry->tr_dst, s1), inet_fmt(qry->tr_src, s2));
2054 if (IN_MULTICAST(ntohl(dst)))
2055 return;
2056 errcode = TR_WRONG_IF;
2057 }
2058 }
2059 else {
2060 /*
2061 * determine which interface the packet came in on
2062 * RESP packets travel hop-by-hop so this either traversed
2063 * a tunnel or came from a directly attached mrouter.
2064 */
2065 if ((vifi = find_vif(src, dst)) == NO_VIF) {
2066 logit(LOG_DEBUG, 0, "Wrong interface for packet");
2067 errcode = TR_WRONG_IF;
2068 }
2069 }
2070
2071 /* Now that we've decided to send a response, save the qid */
2072 oqid = qry->tr_qid;
2073
2074 logit(LOG_DEBUG, 0, "Sending traceroute response");
2075
2076 /* copy the packet to the sending buffer */
2077 p = send_buf + MIN_IP_HEADER_LEN + IGMP_MINLEN;
2078
2079 bcopy(data, p, datalen);
2080
2081 p += datalen;
2082
2083 /*
2084 * If there is no room to insert our reply, coopt the previous hop
2085 * error indication to relay this fact.
2086 */
2087 if (p + sizeof(struct tr_resp) > send_buf + RECV_BUF_SIZE) {
2088 resp = (struct tr_resp *)p - 1;
2089 resp->tr_rflags = TR_NO_SPACE;
2090 rt = NULL;
2091 goto sendit;
2092 }
2093
2094 /*
2095 * fill in initial response fields
2096 */
2097 resp = (struct tr_resp *)p;
2098 bzero(resp, sizeof(struct tr_resp));
2099 datalen += RLEN;
2100
2101 resp->tr_qarr = htonl((tp.tv_sec + JAN_1970) << 16) +
2102 ((tp.tv_usec >> 4) & 0xffff);
2103
2104 resp->tr_rproto = PROTO_DVMRP;
2105 if (errcode != TR_NO_ERR) {
2106 resp->tr_rflags = errcode;
2107 rt = NULL; /* hack to enforce send straight to requestor */
2108 goto sendit;
2109 }
2110 resp->tr_outaddr = uvifs[vifi].uv_lcl_addr;
2111 resp->tr_fttl = uvifs[vifi].uv_threshold;
2112 resp->tr_rflags = TR_NO_ERR;
2113
2114 /*
2115 * obtain # of packets out on interface
2116 */
2117 v_req.vifi = vifi;
2118 if (ioctl(igmp_socket, SIOCGETVIFCNT, (char *)&v_req) >= 0)
2119 resp->tr_vifout = htonl(v_req.ocount);
2120
2121 /*
2122 * fill in scoping & pruning information
2123 */
2124 if (rt)
2125 for (gt = rt->rt_groups; gt; gt = gt->gt_next) {
2126 if (gt->gt_mcastgrp >= group)
2127 break;
2128 }
2129 else
2130 gt = NULL;
2131
2132 if (gt && gt->gt_mcastgrp == group) {
2133 sg_req.src.s_addr = qry->tr_src;
2134 sg_req.grp.s_addr = group;
2135 if (ioctl(igmp_socket, SIOCGETSGCNT, (char *)&sg_req) >= 0)
2136 resp->tr_pktcnt = htonl(sg_req.pktcnt);
2137
2138 if (VIFM_ISSET(vifi, gt->gt_scope))
2139 resp->tr_rflags = TR_SCOPED;
2140 else if (gt->gt_prsent_timer)
2141 resp->tr_rflags = TR_PRUNED;
2142 else if (!VIFM_ISSET(vifi, gt->gt_grpmems)) {
2143 if (VIFM_ISSET(vifi, rt->rt_children) &&
2144 !VIFM_ISSET(vifi, rt->rt_leaves))
2145 resp->tr_rflags = TR_OPRUNED;
2146 else
2147 resp->tr_rflags = TR_NO_FWD;
2148 }
2149 } else {
2150 if (scoped_addr(vifi, group))
2151 resp->tr_rflags = TR_SCOPED;
2152 else if (rt && !VIFM_ISSET(vifi, rt->rt_children))
2153 resp->tr_rflags = TR_NO_FWD;
2154 }
2155
2156 /*
2157 * if no rte exists, set NO_RTE error
2158 */
2159 if (rt == NULL) {
2160 src = dst; /* the dst address of resp. pkt */
2161 resp->tr_inaddr = 0;
2162 resp->tr_rflags = TR_NO_RTE;
2163 resp->tr_rmtaddr = 0;
2164 } else {
2165 /* get # of packets in on interface */
2166 v_req.vifi = rt->rt_parent;
2167 if (ioctl(igmp_socket, SIOCGETVIFCNT, (char *)&v_req) >= 0)
2168 resp->tr_vifin = htonl(v_req.icount);
2169
2170 MASK_TO_VAL(rt->rt_originmask, resp->tr_smask);
2171 src = uvifs[rt->rt_parent].uv_lcl_addr;
2172 resp->tr_inaddr = src;
2173 resp->tr_rmtaddr = rt->rt_gateway;
2174 if (!VIFM_ISSET(vifi, rt->rt_children)) {
2175 logit(LOG_DEBUG, 0, "Destination %s not on forwarding tree for src %s",
2176 inet_fmt(qry->tr_dst, s1), inet_fmt(qry->tr_src, s2));
2177 resp->tr_rflags = TR_WRONG_IF;
2178 }
2179 if (rt->rt_metric >= UNREACHABLE) {
2180 resp->tr_rflags = TR_NO_RTE;
2181 /* Hack to send reply directly */
2182 rt = NULL;
2183 }
2184 }
2185
2186 sendit:
2187 /*
2188 * if metric is 1 or no. of reports is 1, send response to requestor
2189 * else send to upstream router. If the upstream router can't handle
2190 * mtrace, set an error code and send to requestor anyway.
2191 */
2192 logit(LOG_DEBUG, 0, "rcount:%d, no:%d", rcount, no);
2193
2194 if ((rcount + 1 == no) || (rt == NULL) || (rt->rt_metric == 1)) {
2195 resptype = IGMP_MTRACE_REPLY;
2196 dst = qry->tr_raddr;
2197 } else
2198 if (!can_mtrace(rt->rt_parent, rt->rt_gateway)) {
2199 dst = qry->tr_raddr;
2200 resp->tr_rflags = TR_OLD_ROUTER;
2201 resptype = IGMP_MTRACE_REPLY;
2202 } else {
2203 dst = rt->rt_gateway;
2204 resptype = IGMP_MTRACE_QUERY;
2205 }
2206
2207 if (IN_MULTICAST(ntohl(dst))) {
2208 /*
2209 * Send the reply on a known multicast capable vif.
2210 * If we don't have one, we can't source any multicasts anyway.
2211 */
2212 if (phys_vif != -1) {
2213 logit(LOG_DEBUG, 0, "Sending reply to %s from %s",
2214 inet_fmt(dst, s1), inet_fmt(uvifs[phys_vif].uv_lcl_addr, s2));
2215 k_set_ttl(qry->tr_rttl);
2216 send_igmp(uvifs[phys_vif].uv_lcl_addr, dst,
2217 resptype, no, group,
2218 datalen);
2219 k_set_ttl(1);
2220 } else
2221 logit(LOG_INFO, 0, "No enabled phyints -- %s",
2222 "dropping traceroute reply");
2223 } else {
2224 logit(LOG_DEBUG, 0, "Sending %s to %s from %s",
2225 resptype == IGMP_MTRACE_REPLY ? "reply" : "request on",
2226 inet_fmt(dst, s1), inet_fmt(src, s2));
2227
2228 send_igmp(src, dst,
2229 resptype, no, group,
2230 datalen);
2231 }
2232 return;
2233 }
2234