route.c revision 1.94 1 /* $NetBSD: route.c,v 1.94 2007/07/19 20:48:53 dyoung Exp $ */
2
3 /*-
4 * Copyright (c) 1998 The NetBSD Foundation, Inc.
5 * All rights reserved.
6 *
7 * This code is derived from software contributed to The NetBSD Foundation
8 * by Kevin M. Lahey of the Numerical Aerospace Simulation Facility,
9 * NASA Ames Research Center.
10 *
11 * Redistribution and use in source and binary forms, with or without
12 * modification, are permitted provided that the following conditions
13 * are met:
14 * 1. Redistributions of source code must retain the above copyright
15 * notice, this list of conditions and the following disclaimer.
16 * 2. Redistributions in binary form must reproduce the above copyright
17 * notice, this list of conditions and the following disclaimer in the
18 * documentation and/or other materials provided with the distribution.
19 * 3. All advertising materials mentioning features or use of this software
20 * must display the following acknowledgement:
21 * This product includes software developed by the NetBSD
22 * Foundation, Inc. and its contributors.
23 * 4. Neither the name of The NetBSD Foundation nor the names of its
24 * contributors may be used to endorse or promote products derived
25 * from this software without specific prior written permission.
26 *
27 * THIS SOFTWARE IS PROVIDED BY THE NETBSD FOUNDATION, INC. AND CONTRIBUTORS
28 * ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED
29 * TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
30 * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE FOUNDATION OR CONTRIBUTORS
31 * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
32 * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
33 * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
34 * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
35 * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
36 * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
37 * POSSIBILITY OF SUCH DAMAGE.
38 */
39
40 /*
41 * Copyright (C) 1995, 1996, 1997, and 1998 WIDE Project.
42 * All rights reserved.
43 *
44 * Redistribution and use in source and binary forms, with or without
45 * modification, are permitted provided that the following conditions
46 * are met:
47 * 1. Redistributions of source code must retain the above copyright
48 * notice, this list of conditions and the following disclaimer.
49 * 2. Redistributions in binary form must reproduce the above copyright
50 * notice, this list of conditions and the following disclaimer in the
51 * documentation and/or other materials provided with the distribution.
52 * 3. Neither the name of the project nor the names of its contributors
53 * may be used to endorse or promote products derived from this software
54 * without specific prior written permission.
55 *
56 * THIS SOFTWARE IS PROVIDED BY THE PROJECT AND CONTRIBUTORS ``AS IS'' AND
57 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
58 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
59 * ARE DISCLAIMED. IN NO EVENT SHALL THE PROJECT OR CONTRIBUTORS BE LIABLE
60 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
61 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
62 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
63 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
64 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
65 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
66 * SUCH DAMAGE.
67 */
68
69 /*
70 * Copyright (c) 1980, 1986, 1991, 1993
71 * The Regents of the University of California. All rights reserved.
72 *
73 * Redistribution and use in source and binary forms, with or without
74 * modification, are permitted provided that the following conditions
75 * are met:
76 * 1. Redistributions of source code must retain the above copyright
77 * notice, this list of conditions and the following disclaimer.
78 * 2. Redistributions in binary form must reproduce the above copyright
79 * notice, this list of conditions and the following disclaimer in the
80 * documentation and/or other materials provided with the distribution.
81 * 3. Neither the name of the University nor the names of its contributors
82 * may be used to endorse or promote products derived from this software
83 * without specific prior written permission.
84 *
85 * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND
86 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
87 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
88 * ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE
89 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
90 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
91 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
92 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
93 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
94 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
95 * SUCH DAMAGE.
96 *
97 * @(#)route.c 8.3 (Berkeley) 1/9/95
98 */
99
100 #include "opt_route.h"
101
102 #include <sys/cdefs.h>
103 __KERNEL_RCSID(0, "$NetBSD: route.c,v 1.94 2007/07/19 20:48:53 dyoung Exp $");
104
105 #include <sys/param.h>
106 #include <sys/sysctl.h>
107 #include <sys/systm.h>
108 #include <sys/callout.h>
109 #include <sys/proc.h>
110 #include <sys/mbuf.h>
111 #include <sys/socket.h>
112 #include <sys/socketvar.h>
113 #include <sys/domain.h>
114 #include <sys/protosw.h>
115 #include <sys/kernel.h>
116 #include <sys/ioctl.h>
117 #include <sys/pool.h>
118
119 #include <net/if.h>
120 #include <net/route.h>
121 #include <net/raw_cb.h>
122
123 #include <netinet/in.h>
124 #include <netinet/in_var.h>
125
126 #ifdef RTFLUSH_DEBUG
127 #define rtcache_debug() __predict_false(_rtcache_debug)
128 #else /* RTFLUSH_DEBUG */
129 #define rtcache_debug() 0
130 #endif /* RTFLUSH_DEBUG */
131
132 struct route_cb route_cb;
133 struct rtstat rtstat;
134 struct radix_node_head *rt_tables[AF_MAX+1];
135
136 int rttrash; /* routes not in table but not freed */
137 struct sockaddr wildcard; /* zero valued cookie for wildcard searches */
138
139 POOL_INIT(rtentry_pool, sizeof(struct rtentry), 0, 0, 0, "rtentpl", NULL,
140 IPL_SOFTNET);
141 POOL_INIT(rttimer_pool, sizeof(struct rttimer), 0, 0, 0, "rttmrpl", NULL,
142 IPL_SOFTNET);
143
144 struct callout rt_timer_ch; /* callout for rt_timer_timer() */
145
146 #ifdef RTFLUSH_DEBUG
147 static int _rtcache_debug = 0;
148 #endif /* RTFLUSH_DEBUG */
149
150 static int rtdeletemsg(struct rtentry *);
151 static int rtflushclone1(struct rtentry *, void *);
152 static void rtflushclone(sa_family_t family, struct rtentry *);
153
154 #ifdef RTFLUSH_DEBUG
155 SYSCTL_SETUP(sysctl_net_rtcache_setup, "sysctl net.rtcache.debug setup")
156 {
157 const struct sysctlnode *rnode;
158
159 /* XXX do not duplicate */
160 if (sysctl_createv(clog, 0, NULL, &rnode, CTLFLAG_PERMANENT,
161 CTLTYPE_NODE, "net", NULL, NULL, 0, NULL, 0, CTL_NET, CTL_EOL) != 0)
162 return;
163 if (sysctl_createv(clog, 0, &rnode, &rnode, CTLFLAG_PERMANENT,
164 CTLTYPE_NODE,
165 "rtcache", SYSCTL_DESCR("Route cache related settings"),
166 NULL, 0, NULL, 0, CTL_CREATE, CTL_EOL) != 0)
167 return;
168 if (sysctl_createv(clog, 0, &rnode, &rnode,
169 CTLFLAG_PERMANENT|CTLFLAG_READWRITE, CTLTYPE_INT,
170 "debug", SYSCTL_DESCR("Debug route caches"),
171 NULL, 0, &_rtcache_debug, 0, CTL_CREATE, CTL_EOL) != 0)
172 return;
173 }
174 #endif /* RTFLUSH_DEBUG */
175
176 struct ifaddr *
177 rt_get_ifa(struct rtentry *rt)
178 {
179 struct ifaddr *ifa;
180
181 if ((ifa = rt->rt_ifa) == NULL)
182 return ifa;
183 else if (ifa->ifa_getifa == NULL)
184 return ifa;
185 #if 0
186 else if (ifa->ifa_seqno != NULL && *ifa->ifa_seqno == rt->rt_ifa_seqno)
187 return ifa;
188 #endif
189 else {
190 ifa = (*ifa->ifa_getifa)(ifa, rt_getkey(rt));
191 rt_replace_ifa(rt, ifa);
192 return ifa;
193 }
194 }
195
196 static void
197 rt_set_ifa1(struct rtentry *rt, struct ifaddr *ifa)
198 {
199 rt->rt_ifa = ifa;
200 if (ifa->ifa_seqno != NULL)
201 rt->rt_ifa_seqno = *ifa->ifa_seqno;
202 }
203
204 void
205 rt_replace_ifa(struct rtentry *rt, struct ifaddr *ifa)
206 {
207 IFAREF(ifa);
208 IFAFREE(rt->rt_ifa);
209 rt_set_ifa1(rt, ifa);
210 }
211
212 static void
213 rt_set_ifa(struct rtentry *rt, struct ifaddr *ifa)
214 {
215 IFAREF(ifa);
216 rt_set_ifa1(rt, ifa);
217 }
218
219 void
220 rtable_init(void **table)
221 {
222 struct domain *dom;
223 DOMAIN_FOREACH(dom)
224 if (dom->dom_rtattach)
225 dom->dom_rtattach(&table[dom->dom_family],
226 dom->dom_rtoffset);
227 }
228
229 void
230 route_init(void)
231 {
232
233 rn_init(); /* initialize all zeroes, all ones, mask table */
234 rtable_init((void **)rt_tables);
235 }
236
237 void
238 rtflushall(int family)
239 {
240 int s;
241 struct domain *dom;
242 struct route *ro;
243
244 if (rtcache_debug())
245 printf("%s: enter\n", __func__);
246
247 if ((dom = pffinddomain(family)) == NULL)
248 return;
249
250 s = splnet();
251 while ((ro = LIST_FIRST(&dom->dom_rtcache)) != NULL) {
252 KASSERT(ro->ro_rt != NULL);
253 rtcache_clear(ro);
254 }
255 splx(s);
256 }
257
258 void
259 rtflush(struct route *ro)
260 {
261 KASSERT(ro->ro_rt != NULL);
262 KASSERT(rtcache_getdst(ro) != NULL);
263
264 RTFREE(ro->ro_rt);
265 ro->ro_rt = NULL;
266
267 LIST_REMOVE(ro, ro_rtcache_next);
268
269 #if 0
270 if (rtcache_debug()) {
271 printf("%s: flushing %s\n", __func__,
272 inet_ntoa((satocsin(rtcache_getdst(ro)))->sin_addr));
273 }
274 #endif
275 }
276
277 void
278 rtcache(struct route *ro)
279 {
280 struct domain *dom;
281
282 KASSERT(ro->ro_rt != NULL);
283 KASSERT(rtcache_getdst(ro) != NULL);
284
285 if ((dom = pffinddomain(rtcache_getdst(ro)->sa_family)) == NULL)
286 return;
287
288 LIST_INSERT_HEAD(&dom->dom_rtcache, ro, ro_rtcache_next);
289 }
290
291 /*
292 * Packet routing routines.
293 */
294 void
295 rtalloc(struct route *ro)
296 {
297 if (ro->ro_rt != NULL) {
298 if (ro->ro_rt->rt_ifp != NULL &&
299 (ro->ro_rt->rt_flags & RTF_UP) != 0)
300 return;
301 rtflush(ro);
302 }
303 if (rtcache_getdst(ro) == NULL ||
304 (ro->ro_rt = rtalloc1(rtcache_getdst(ro), 1)) == NULL)
305 return;
306 rtcache(ro);
307 }
308
309 struct rtentry *
310 rtalloc1(const struct sockaddr *dst, int report)
311 {
312 struct radix_node_head *rnh = rt_tables[dst->sa_family];
313 struct rtentry *rt;
314 struct radix_node *rn;
315 struct rtentry *newrt = NULL;
316 struct rt_addrinfo info;
317 int s = splsoftnet(), err = 0, msgtype = RTM_MISS;
318
319 if (rnh && (rn = rnh->rnh_matchaddr(dst, rnh)) &&
320 ((rn->rn_flags & RNF_ROOT) == 0)) {
321 newrt = rt = (struct rtentry *)rn;
322 if (report && (rt->rt_flags & RTF_CLONING)) {
323 err = rtrequest(RTM_RESOLVE, dst, NULL, NULL, 0,
324 &newrt);
325 if (err) {
326 newrt = rt;
327 rt->rt_refcnt++;
328 goto miss;
329 }
330 KASSERT(newrt != NULL);
331 if ((rt = newrt) && (rt->rt_flags & RTF_XRESOLVE)) {
332 msgtype = RTM_RESOLVE;
333 goto miss;
334 }
335 /* Inform listeners of the new route */
336 memset(&info, 0, sizeof(info));
337 info.rti_info[RTAX_DST] = rt_getkey(rt);
338 info.rti_info[RTAX_NETMASK] = rt_mask(rt);
339 info.rti_info[RTAX_GATEWAY] = rt->rt_gateway;
340 if (rt->rt_ifp != NULL) {
341 info.rti_info[RTAX_IFP] =
342 TAILQ_FIRST(&rt->rt_ifp->if_addrlist)->ifa_addr;
343 info.rti_info[RTAX_IFA] = rt->rt_ifa->ifa_addr;
344 }
345 rt_missmsg(RTM_ADD, &info, rt->rt_flags, 0);
346 } else
347 rt->rt_refcnt++;
348 } else {
349 rtstat.rts_unreach++;
350 miss: if (report) {
351 memset((void *)&info, 0, sizeof(info));
352 info.rti_info[RTAX_DST] = dst;
353 rt_missmsg(msgtype, &info, 0, err);
354 }
355 }
356 splx(s);
357 return (newrt);
358 }
359
360 void
361 rtfree(struct rtentry *rt)
362 {
363 struct ifaddr *ifa;
364
365 if (rt == NULL)
366 panic("rtfree");
367 rt->rt_refcnt--;
368 if (rt->rt_refcnt <= 0 && (rt->rt_flags & RTF_UP) == 0) {
369 if (rt->rt_nodes->rn_flags & (RNF_ACTIVE | RNF_ROOT))
370 panic ("rtfree 2");
371 rttrash--;
372 if (rt->rt_refcnt < 0) {
373 printf("rtfree: %p not freed (neg refs)\n", rt);
374 return;
375 }
376 rt_timer_remove_all(rt, 0);
377 ifa = rt->rt_ifa;
378 rt->rt_ifa = NULL;
379 IFAFREE(ifa);
380 rt->rt_ifp = NULL;
381 rt_destroy(rt);
382 pool_put(&rtentry_pool, rt);
383 }
384 }
385
386 void
387 ifafree(struct ifaddr *ifa)
388 {
389
390 #ifdef DIAGNOSTIC
391 if (ifa == NULL)
392 panic("ifafree: null ifa");
393 if (ifa->ifa_refcnt != 0)
394 panic("ifafree: ifa_refcnt != 0 (%d)", ifa->ifa_refcnt);
395 #endif
396 #ifdef IFAREF_DEBUG
397 printf("ifafree: freeing ifaddr %p\n", ifa);
398 #endif
399 free(ifa, M_IFADDR);
400 }
401
402 static inline int
403 equal(const struct sockaddr *sa1, const struct sockaddr *sa2)
404 {
405 return sockaddr_cmp(sa1, sa2) == 0;
406 }
407
408 /*
409 * Force a routing table entry to the specified
410 * destination to go through the given gateway.
411 * Normally called as a result of a routing redirect
412 * message from the network layer.
413 *
414 * N.B.: must be called at splsoftnet
415 */
416 void
417 rtredirect(const struct sockaddr *dst, const struct sockaddr *gateway,
418 const struct sockaddr *netmask, int flags, const struct sockaddr *src,
419 struct rtentry **rtp)
420 {
421 struct rtentry *rt;
422 int error = 0;
423 u_quad_t *stat = NULL;
424 struct rt_addrinfo info;
425 struct ifaddr *ifa;
426
427 /* verify the gateway is directly reachable */
428 if ((ifa = ifa_ifwithnet(gateway)) == NULL) {
429 error = ENETUNREACH;
430 goto out;
431 }
432 rt = rtalloc1(dst, 0);
433 /*
434 * If the redirect isn't from our current router for this dst,
435 * it's either old or wrong. If it redirects us to ourselves,
436 * we have a routing loop, perhaps as a result of an interface
437 * going down recently.
438 */
439 if (!(flags & RTF_DONE) && rt &&
440 (!equal(src, rt->rt_gateway) || rt->rt_ifa != ifa))
441 error = EINVAL;
442 else if (ifa_ifwithaddr(gateway))
443 error = EHOSTUNREACH;
444 if (error)
445 goto done;
446 /*
447 * Create a new entry if we just got back a wildcard entry
448 * or the lookup failed. This is necessary for hosts
449 * which use routing redirects generated by smart gateways
450 * to dynamically build the routing tables.
451 */
452 if ((rt == NULL) || (rt_mask(rt) && rt_mask(rt)->sa_len < 2))
453 goto create;
454 /*
455 * Don't listen to the redirect if it's
456 * for a route to an interface.
457 */
458 if (rt->rt_flags & RTF_GATEWAY) {
459 if (((rt->rt_flags & RTF_HOST) == 0) && (flags & RTF_HOST)) {
460 /*
461 * Changing from route to net => route to host.
462 * Create new route, rather than smashing route to net.
463 */
464 create:
465 if (rt)
466 rtfree(rt);
467 flags |= RTF_GATEWAY | RTF_DYNAMIC;
468 info.rti_info[RTAX_DST] = dst;
469 info.rti_info[RTAX_GATEWAY] = gateway;
470 info.rti_info[RTAX_NETMASK] = netmask;
471 info.rti_ifa = ifa;
472 info.rti_flags = flags;
473 rt = NULL;
474 error = rtrequest1(RTM_ADD, &info, &rt);
475 if (rt != NULL)
476 flags = rt->rt_flags;
477 stat = &rtstat.rts_dynamic;
478 } else {
479 /*
480 * Smash the current notion of the gateway to
481 * this destination. Should check about netmask!!!
482 */
483 rt->rt_flags |= RTF_MODIFIED;
484 flags |= RTF_MODIFIED;
485 stat = &rtstat.rts_newgateway;
486 rt_setgate(rt, gateway);
487 }
488 } else
489 error = EHOSTUNREACH;
490 done:
491 if (rt) {
492 if (rtp && !error)
493 *rtp = rt;
494 else
495 rtfree(rt);
496 }
497 out:
498 if (error)
499 rtstat.rts_badredirect++;
500 else if (stat != NULL)
501 (*stat)++;
502 memset((void *)&info, 0, sizeof(info));
503 info.rti_info[RTAX_DST] = dst;
504 info.rti_info[RTAX_GATEWAY] = gateway;
505 info.rti_info[RTAX_NETMASK] = netmask;
506 info.rti_info[RTAX_AUTHOR] = src;
507 rt_missmsg(RTM_REDIRECT, &info, flags, error);
508 }
509
510 /*
511 * Delete a route and generate a message
512 */
513 static int
514 rtdeletemsg(struct rtentry *rt)
515 {
516 int error;
517 struct rt_addrinfo info;
518
519 /*
520 * Request the new route so that the entry is not actually
521 * deleted. That will allow the information being reported to
522 * be accurate (and consistent with route_output()).
523 */
524 memset((void *)&info, 0, sizeof(info));
525 info.rti_info[RTAX_DST] = rt_getkey(rt);
526 info.rti_info[RTAX_NETMASK] = rt_mask(rt);
527 info.rti_info[RTAX_GATEWAY] = rt->rt_gateway;
528 info.rti_flags = rt->rt_flags;
529 error = rtrequest1(RTM_DELETE, &info, &rt);
530
531 rt_missmsg(RTM_DELETE, &info, info.rti_flags, error);
532
533 /* Adjust the refcount */
534 if (error == 0 && rt->rt_refcnt <= 0) {
535 rt->rt_refcnt++;
536 rtfree(rt);
537 }
538 return (error);
539 }
540
541 static int
542 rtflushclone1(struct rtentry *rt, void *arg)
543 {
544 struct rtentry *parent;
545
546 parent = (struct rtentry *)arg;
547 if ((rt->rt_flags & RTF_CLONED) != 0 && rt->rt_parent == parent)
548 rtdeletemsg(rt);
549 return 0;
550 }
551
552 static void
553 rtflushclone(sa_family_t family, struct rtentry *parent)
554 {
555
556 #ifdef DIAGNOSTIC
557 if (!parent || (parent->rt_flags & RTF_CLONING) == 0)
558 panic("rtflushclone: called with a non-cloning route");
559 #endif
560 rt_walktree(family, rtflushclone1, (void *)parent);
561 }
562
563 /*
564 * Routing table ioctl interface.
565 */
566 int
567 rtioctl(u_long req, void *data, struct lwp *l)
568 {
569 return (EOPNOTSUPP);
570 }
571
572 struct ifaddr *
573 ifa_ifwithroute(int flags, const struct sockaddr *dst,
574 const struct sockaddr *gateway)
575 {
576 struct ifaddr *ifa;
577 if ((flags & RTF_GATEWAY) == 0) {
578 /*
579 * If we are adding a route to an interface,
580 * and the interface is a pt to pt link
581 * we should search for the destination
582 * as our clue to the interface. Otherwise
583 * we can use the local address.
584 */
585 ifa = NULL;
586 if (flags & RTF_HOST)
587 ifa = ifa_ifwithdstaddr(dst);
588 if (ifa == NULL)
589 ifa = ifa_ifwithaddr(gateway);
590 } else {
591 /*
592 * If we are adding a route to a remote net
593 * or host, the gateway may still be on the
594 * other end of a pt to pt link.
595 */
596 ifa = ifa_ifwithdstaddr(gateway);
597 }
598 if (ifa == NULL)
599 ifa = ifa_ifwithnet(gateway);
600 if (ifa == NULL) {
601 struct rtentry *rt = rtalloc1(dst, 0);
602 if (rt == NULL)
603 return NULL;
604 rt->rt_refcnt--;
605 if ((ifa = rt->rt_ifa) == NULL)
606 return NULL;
607 }
608 if (ifa->ifa_addr->sa_family != dst->sa_family) {
609 struct ifaddr *oifa = ifa;
610 ifa = ifaof_ifpforaddr(dst, ifa->ifa_ifp);
611 if (ifa == 0)
612 ifa = oifa;
613 }
614 return (ifa);
615 }
616
617 #define ROUNDUP(a) (a>0 ? (1 + (((a) - 1) | (sizeof(long) - 1))) : sizeof(long))
618
619 int
620 rtrequest(int req, const struct sockaddr *dst, const struct sockaddr *gateway,
621 const struct sockaddr *netmask, int flags, struct rtentry **ret_nrt)
622 {
623 struct rt_addrinfo info;
624
625 memset(&info, 0, sizeof(info));
626 info.rti_flags = flags;
627 info.rti_info[RTAX_DST] = dst;
628 info.rti_info[RTAX_GATEWAY] = gateway;
629 info.rti_info[RTAX_NETMASK] = netmask;
630 return rtrequest1(req, &info, ret_nrt);
631 }
632
633 int
634 rt_getifa(struct rt_addrinfo *info)
635 {
636 struct ifaddr *ifa;
637 const struct sockaddr *dst = info->rti_info[RTAX_DST];
638 const struct sockaddr *gateway = info->rti_info[RTAX_GATEWAY];
639 const struct sockaddr *ifaaddr = info->rti_info[RTAX_IFA];
640 const struct sockaddr *ifpaddr = info->rti_info[RTAX_IFP];
641 int flags = info->rti_flags;
642
643 /*
644 * ifp may be specified by sockaddr_dl when protocol address
645 * is ambiguous
646 */
647 if (info->rti_ifp == NULL && ifpaddr != NULL
648 && ifpaddr->sa_family == AF_LINK &&
649 (ifa = ifa_ifwithnet((const struct sockaddr *)ifpaddr)) != NULL)
650 info->rti_ifp = ifa->ifa_ifp;
651 if (info->rti_ifa == NULL && ifaaddr != NULL)
652 info->rti_ifa = ifa_ifwithaddr(ifaaddr);
653 if (info->rti_ifa == NULL) {
654 const struct sockaddr *sa;
655
656 sa = ifaaddr != NULL ? ifaaddr :
657 (gateway != NULL ? gateway : dst);
658 if (sa != NULL && info->rti_ifp != NULL)
659 info->rti_ifa = ifaof_ifpforaddr(sa, info->rti_ifp);
660 else if (dst != NULL && gateway != NULL)
661 info->rti_ifa = ifa_ifwithroute(flags, dst, gateway);
662 else if (sa != NULL)
663 info->rti_ifa = ifa_ifwithroute(flags, sa, sa);
664 }
665 if ((ifa = info->rti_ifa) == NULL)
666 return ENETUNREACH;
667 if (ifa->ifa_getifa != NULL)
668 info->rti_ifa = ifa = (*ifa->ifa_getifa)(ifa, dst);
669 if (info->rti_ifp == NULL)
670 info->rti_ifp = ifa->ifa_ifp;
671 return 0;
672 }
673
674 int
675 rtrequest1(int req, struct rt_addrinfo *info, struct rtentry **ret_nrt)
676 {
677 int s = splsoftnet();
678 int error = 0;
679 struct rtentry *rt, *crt;
680 struct radix_node *rn;
681 struct radix_node_head *rnh;
682 struct ifaddr *ifa;
683 struct sockaddr_storage maskeddst;
684 const struct sockaddr *dst = info->rti_info[RTAX_DST];
685 const struct sockaddr *gateway = info->rti_info[RTAX_GATEWAY];
686 const struct sockaddr *netmask = info->rti_info[RTAX_NETMASK];
687 int flags = info->rti_flags;
688 #define senderr(x) { error = x ; goto bad; }
689
690 if ((rnh = rt_tables[dst->sa_family]) == NULL)
691 senderr(ESRCH);
692 if (flags & RTF_HOST)
693 netmask = NULL;
694 switch (req) {
695 case RTM_DELETE:
696 if (netmask) {
697 rt_maskedcopy(dst, (struct sockaddr *)&maskeddst,
698 netmask);
699 dst = (struct sockaddr *)&maskeddst;
700 }
701 if ((rn = rnh->rnh_lookup(dst, netmask, rnh)) == NULL)
702 senderr(ESRCH);
703 rt = (struct rtentry *)rn;
704 if ((rt->rt_flags & RTF_CLONING) != 0) {
705 /* clean up any cloned children */
706 rtflushclone(dst->sa_family, rt);
707 }
708 if ((rn = rnh->rnh_deladdr(dst, netmask, rnh)) == NULL)
709 senderr(ESRCH);
710 if (rn->rn_flags & (RNF_ACTIVE | RNF_ROOT))
711 panic ("rtrequest delete");
712 rt = (struct rtentry *)rn;
713 if (rt->rt_gwroute) {
714 RTFREE(rt->rt_gwroute);
715 rt->rt_gwroute = NULL;
716 }
717 if (rt->rt_parent) {
718 rt->rt_parent->rt_refcnt--;
719 rt->rt_parent = NULL;
720 }
721 rt->rt_flags &= ~RTF_UP;
722 if ((ifa = rt->rt_ifa) && ifa->ifa_rtrequest)
723 ifa->ifa_rtrequest(RTM_DELETE, rt, info);
724 rttrash++;
725 if (ret_nrt)
726 *ret_nrt = rt;
727 else if (rt->rt_refcnt <= 0) {
728 rt->rt_refcnt++;
729 rtfree(rt);
730 }
731 break;
732
733 case RTM_RESOLVE:
734 if (ret_nrt == NULL || (rt = *ret_nrt) == NULL)
735 senderr(EINVAL);
736 if ((rt->rt_flags & RTF_CLONING) == 0)
737 senderr(EINVAL);
738 ifa = rt->rt_ifa;
739 flags = rt->rt_flags & ~(RTF_CLONING | RTF_STATIC);
740 flags |= RTF_CLONED;
741 gateway = rt->rt_gateway;
742 flags |= RTF_HOST;
743 goto makeroute;
744
745 case RTM_ADD:
746 if (info->rti_ifa == NULL && (error = rt_getifa(info)))
747 senderr(error);
748 ifa = info->rti_ifa;
749 makeroute:
750 /* Already at splsoftnet() so pool_get/pool_put are safe */
751 rt = pool_get(&rtentry_pool, PR_NOWAIT);
752 if (rt == NULL)
753 senderr(ENOBUFS);
754 Bzero(rt, sizeof(*rt));
755 rt->rt_flags = RTF_UP | flags;
756 LIST_INIT(&rt->rt_timer);
757 RT_DPRINTF("%s l.%d: rt->_rt_key = %p\n", __func__, __LINE__,
758 (void *)rt->_rt_key);
759 if (rt_setkey(rt, dst, PR_NOWAIT) == NULL ||
760 rt_setgate(rt, gateway) != 0) {
761 pool_put(&rtentry_pool, rt);
762 senderr(ENOBUFS);
763 }
764 RT_DPRINTF("%s l.%d: rt->_rt_key = %p\n", __func__, __LINE__,
765 (void *)rt->_rt_key);
766 if (netmask) {
767 rt_maskedcopy(dst, (struct sockaddr *)&maskeddst,
768 netmask);
769 rt_setkey(rt, (struct sockaddr *)&maskeddst, PR_NOWAIT);
770 RT_DPRINTF("%s l.%d: rt->_rt_key = %p\n", __func__,
771 __LINE__, (void *)rt->_rt_key);
772 } else {
773 rt_setkey(rt, dst, PR_NOWAIT);
774 RT_DPRINTF("%s l.%d: rt->_rt_key = %p\n", __func__,
775 __LINE__, (void *)rt->_rt_key);
776 }
777 rt_set_ifa(rt, ifa);
778 RT_DPRINTF("%s l.%d: rt->_rt_key = %p\n", __func__,
779 __LINE__, (void *)rt->_rt_key);
780 rt->rt_ifp = ifa->ifa_ifp;
781 if (req == RTM_RESOLVE) {
782 rt->rt_rmx = (*ret_nrt)->rt_rmx; /* copy metrics */
783 rt->rt_parent = *ret_nrt;
784 rt->rt_parent->rt_refcnt++;
785 }
786 RT_DPRINTF("%s l.%d: rt->_rt_key = %p\n", __func__,
787 __LINE__, (void *)rt->_rt_key);
788 rn = rnh->rnh_addaddr(rt_getkey(rt), netmask, rnh,
789 rt->rt_nodes);
790 RT_DPRINTF("%s l.%d: rt->_rt_key = %p\n", __func__,
791 __LINE__, (void *)rt->_rt_key);
792 if (rn == NULL && (crt = rtalloc1(rt_getkey(rt), 0)) != NULL) {
793 /* overwrite cloned route */
794 if ((crt->rt_flags & RTF_CLONED) != 0) {
795 rtdeletemsg(crt);
796 rn = rnh->rnh_addaddr(rt_getkey(rt),
797 netmask, rnh, rt->rt_nodes);
798 }
799 RTFREE(crt);
800 RT_DPRINTF("%s l.%d: rt->_rt_key = %p\n", __func__,
801 __LINE__, (void *)rt->_rt_key);
802 }
803 RT_DPRINTF("%s l.%d: rt->_rt_key = %p\n", __func__,
804 __LINE__, (void *)rt->_rt_key);
805 if (rn == NULL) {
806 IFAFREE(ifa);
807 if ((rt->rt_flags & RTF_CLONED) != 0 && rt->rt_parent)
808 rtfree(rt->rt_parent);
809 if (rt->rt_gwroute)
810 rtfree(rt->rt_gwroute);
811 rt_destroy(rt);
812 pool_put(&rtentry_pool, rt);
813 senderr(EEXIST);
814 }
815 RT_DPRINTF("%s l.%d: rt->_rt_key = %p\n", __func__,
816 __LINE__, (void *)rt->_rt_key);
817 if (ifa->ifa_rtrequest)
818 ifa->ifa_rtrequest(req, rt, info);
819 RT_DPRINTF("%s l.%d: rt->_rt_key = %p\n", __func__,
820 __LINE__, (void *)rt->_rt_key);
821 if (ret_nrt) {
822 *ret_nrt = rt;
823 rt->rt_refcnt++;
824 }
825 if ((rt->rt_flags & RTF_CLONING) != 0) {
826 /* clean up any cloned children */
827 rtflushclone(dst->sa_family, rt);
828 }
829 rtflushall(dst->sa_family);
830 break;
831 case RTM_GET:
832 if (netmask != NULL) {
833 rt_maskedcopy(dst, (struct sockaddr *)&maskeddst,
834 netmask);
835 dst = (struct sockaddr *)&maskeddst;
836 }
837 rn = rnh->rnh_lookup(dst, netmask, rnh);
838 if (rn == NULL || (rn->rn_flags & RNF_ROOT) != 0)
839 senderr(ESRCH);
840 if (ret_nrt != NULL) {
841 rt = (struct rtentry *)rn;
842 *ret_nrt = rt;
843 rt->rt_refcnt++;
844 }
845 break;
846 }
847 bad:
848 splx(s);
849 return (error);
850 }
851
852 int
853 rt_setgate(struct rtentry *rt, const struct sockaddr *gate)
854 {
855 KASSERT(rt != rt->rt_gwroute);
856
857 KASSERT(rt->_rt_key != NULL);
858 RT_DPRINTF("%s l.%d: rt->_rt_key = %p\n", __func__,
859 __LINE__, (void *)rt->_rt_key);
860
861 if (rt->rt_gwroute) {
862 RTFREE(rt->rt_gwroute);
863 rt->rt_gwroute = NULL;
864 }
865 KASSERT(rt->_rt_key != NULL);
866 RT_DPRINTF("%s l.%d: rt->_rt_key = %p\n", __func__,
867 __LINE__, (void *)rt->_rt_key);
868 if (rt->rt_gateway != NULL)
869 sockaddr_free(rt->rt_gateway);
870 KASSERT(rt->_rt_key != NULL);
871 RT_DPRINTF("%s l.%d: rt->_rt_key = %p\n", __func__,
872 __LINE__, (void *)rt->_rt_key);
873 if ((rt->rt_gateway = sockaddr_dup(gate, PR_NOWAIT)) == NULL)
874 return ENOMEM;
875 KASSERT(rt->_rt_key != NULL);
876 RT_DPRINTF("%s l.%d: rt->_rt_key = %p\n", __func__,
877 __LINE__, (void *)rt->_rt_key);
878
879 if (rt->rt_flags & RTF_GATEWAY) {
880 KASSERT(rt->_rt_key != NULL);
881 RT_DPRINTF("%s l.%d: rt->_rt_key = %p\n", __func__,
882 __LINE__, (void *)rt->_rt_key);
883 rt->rt_gwroute = rtalloc1(gate, 1);
884 /*
885 * If we switched gateways, grab the MTU from the new
886 * gateway route if the current MTU, if the current MTU is
887 * greater than the MTU of gateway.
888 * Note that, if the MTU of gateway is 0, we will reset the
889 * MTU of the route to run PMTUD again from scratch. XXX
890 */
891 KASSERT(rt->_rt_key != NULL);
892 RT_DPRINTF("%s l.%d: rt->_rt_key = %p\n", __func__,
893 __LINE__, (void *)rt->_rt_key);
894 if (rt->rt_gwroute
895 && !(rt->rt_rmx.rmx_locks & RTV_MTU)
896 && rt->rt_rmx.rmx_mtu
897 && rt->rt_rmx.rmx_mtu > rt->rt_gwroute->rt_rmx.rmx_mtu) {
898 rt->rt_rmx.rmx_mtu = rt->rt_gwroute->rt_rmx.rmx_mtu;
899 }
900 }
901 KASSERT(rt->_rt_key != NULL);
902 RT_DPRINTF("%s l.%d: rt->_rt_key = %p\n", __func__,
903 __LINE__, (void *)rt->_rt_key);
904 return 0;
905 }
906
907 void
908 rt_maskedcopy(const struct sockaddr *src, struct sockaddr *dst,
909 const struct sockaddr *netmask)
910 {
911 const char *netmaskp = &netmask->sa_data[0],
912 *srcp = &src->sa_data[0];
913 char *dstp = &dst->sa_data[0];
914 const char *maskend = dstp + MIN(netmask->sa_len, src->sa_len);
915 const char *srcend = dstp + src->sa_len;
916
917 dst->sa_len = src->sa_len;
918 dst->sa_family = src->sa_family;
919
920 while (dstp < maskend)
921 *dstp++ = *srcp++ & *netmaskp++;
922 if (dstp < srcend)
923 memset(dstp, 0, (size_t)(srcend - dstp));
924 }
925
926 /*
927 * Set up or tear down a routing table entry, normally
928 * for an interface.
929 */
930 int
931 rtinit(struct ifaddr *ifa, int cmd, int flags)
932 {
933 struct rtentry *rt;
934 struct sockaddr *dst, *odst;
935 struct sockaddr_storage maskeddst;
936 struct rtentry *nrt = NULL;
937 int error;
938 struct rt_addrinfo info;
939
940 dst = flags & RTF_HOST ? ifa->ifa_dstaddr : ifa->ifa_addr;
941 if (cmd == RTM_DELETE) {
942 if ((flags & RTF_HOST) == 0 && ifa->ifa_netmask) {
943 /* Delete subnet route for this interface */
944 odst = dst;
945 dst = (struct sockaddr *)&maskeddst;
946 rt_maskedcopy(odst, dst, ifa->ifa_netmask);
947 }
948 if ((rt = rtalloc1(dst, 0)) != NULL) {
949 rt->rt_refcnt--;
950 if (rt->rt_ifa != ifa)
951 return (flags & RTF_HOST) ? EHOSTUNREACH
952 : ENETUNREACH;
953 }
954 }
955 memset(&info, 0, sizeof(info));
956 info.rti_ifa = ifa;
957 info.rti_flags = flags | ifa->ifa_flags;
958 info.rti_info[RTAX_DST] = dst;
959 info.rti_info[RTAX_GATEWAY] = ifa->ifa_addr;
960 /*
961 * XXX here, it seems that we are assuming that ifa_netmask is NULL
962 * for RTF_HOST. bsdi4 passes NULL explicitly (via intermediate
963 * variable) when RTF_HOST is 1. still not sure if i can safely
964 * change it to meet bsdi4 behavior.
965 */
966 info.rti_info[RTAX_NETMASK] = ifa->ifa_netmask;
967 error = rtrequest1(cmd, &info, &nrt);
968 if (cmd == RTM_DELETE && error == 0 && (rt = nrt)) {
969 rt_newaddrmsg(cmd, ifa, error, nrt);
970 if (rt->rt_refcnt <= 0) {
971 rt->rt_refcnt++;
972 rtfree(rt);
973 }
974 }
975 if (cmd == RTM_ADD && error == 0 && (rt = nrt)) {
976 rt->rt_refcnt--;
977 if (rt->rt_ifa != ifa) {
978 printf("rtinit: wrong ifa (%p) was (%p)\n", ifa,
979 rt->rt_ifa);
980 if (rt->rt_ifa->ifa_rtrequest)
981 rt->rt_ifa->ifa_rtrequest(RTM_DELETE, rt, NULL);
982 rt_replace_ifa(rt, ifa);
983 rt->rt_ifp = ifa->ifa_ifp;
984 if (ifa->ifa_rtrequest)
985 ifa->ifa_rtrequest(RTM_ADD, rt, NULL);
986 }
987 rt_newaddrmsg(cmd, ifa, error, nrt);
988 }
989 return error;
990 }
991
992 /*
993 * Route timer routines. These routes allow functions to be called
994 * for various routes at any time. This is useful in supporting
995 * path MTU discovery and redirect route deletion.
996 *
997 * This is similar to some BSDI internal functions, but it provides
998 * for multiple queues for efficiency's sake...
999 */
1000
1001 LIST_HEAD(, rttimer_queue) rttimer_queue_head;
1002 static int rt_init_done = 0;
1003
1004 #define RTTIMER_CALLOUT(r) do { \
1005 if (r->rtt_func != NULL) { \
1006 (*r->rtt_func)(r->rtt_rt, r); \
1007 } else { \
1008 rtrequest((int) RTM_DELETE, \
1009 rt_getkey(r->rtt_rt), \
1010 0, 0, 0, 0); \
1011 } \
1012 } while (/*CONSTCOND*/0)
1013
1014 /*
1015 * Some subtle order problems with domain initialization mean that
1016 * we cannot count on this being run from rt_init before various
1017 * protocol initializations are done. Therefore, we make sure
1018 * that this is run when the first queue is added...
1019 */
1020
1021 void
1022 rt_timer_init(void)
1023 {
1024 assert(rt_init_done == 0);
1025
1026 LIST_INIT(&rttimer_queue_head);
1027 callout_init(&rt_timer_ch, 0);
1028 callout_reset(&rt_timer_ch, hz, rt_timer_timer, NULL);
1029 rt_init_done = 1;
1030 }
1031
1032 struct rttimer_queue *
1033 rt_timer_queue_create(u_int timeout)
1034 {
1035 struct rttimer_queue *rtq;
1036
1037 if (rt_init_done == 0)
1038 rt_timer_init();
1039
1040 R_Malloc(rtq, struct rttimer_queue *, sizeof *rtq);
1041 if (rtq == NULL)
1042 return NULL;
1043 Bzero(rtq, sizeof *rtq);
1044
1045 rtq->rtq_timeout = timeout;
1046 rtq->rtq_count = 0;
1047 TAILQ_INIT(&rtq->rtq_head);
1048 LIST_INSERT_HEAD(&rttimer_queue_head, rtq, rtq_link);
1049
1050 return rtq;
1051 }
1052
1053 void
1054 rt_timer_queue_change(struct rttimer_queue *rtq, long timeout)
1055 {
1056
1057 rtq->rtq_timeout = timeout;
1058 }
1059
1060 void
1061 rt_timer_queue_remove_all(struct rttimer_queue *rtq, int destroy)
1062 {
1063 struct rttimer *r;
1064
1065 while ((r = TAILQ_FIRST(&rtq->rtq_head)) != NULL) {
1066 LIST_REMOVE(r, rtt_link);
1067 TAILQ_REMOVE(&rtq->rtq_head, r, rtt_next);
1068 if (destroy)
1069 RTTIMER_CALLOUT(r);
1070 /* we are already at splsoftnet */
1071 pool_put(&rttimer_pool, r);
1072 if (rtq->rtq_count > 0)
1073 rtq->rtq_count--;
1074 else
1075 printf("rt_timer_queue_remove_all: "
1076 "rtq_count reached 0\n");
1077 }
1078 }
1079
1080 void
1081 rt_timer_queue_destroy(struct rttimer_queue *rtq, int destroy)
1082 {
1083
1084 rt_timer_queue_remove_all(rtq, destroy);
1085
1086 LIST_REMOVE(rtq, rtq_link);
1087
1088 /*
1089 * Caller is responsible for freeing the rttimer_queue structure.
1090 */
1091 }
1092
1093 unsigned long
1094 rt_timer_count(struct rttimer_queue *rtq)
1095 {
1096 return rtq->rtq_count;
1097 }
1098
1099 void
1100 rt_timer_remove_all(struct rtentry *rt, int destroy)
1101 {
1102 struct rttimer *r;
1103
1104 while ((r = LIST_FIRST(&rt->rt_timer)) != NULL) {
1105 LIST_REMOVE(r, rtt_link);
1106 TAILQ_REMOVE(&r->rtt_queue->rtq_head, r, rtt_next);
1107 if (destroy)
1108 RTTIMER_CALLOUT(r);
1109 if (r->rtt_queue->rtq_count > 0)
1110 r->rtt_queue->rtq_count--;
1111 else
1112 printf("rt_timer_remove_all: rtq_count reached 0\n");
1113 /* we are already at splsoftnet */
1114 pool_put(&rttimer_pool, r);
1115 }
1116 }
1117
1118 int
1119 rt_timer_add(struct rtentry *rt,
1120 void (*func)(struct rtentry *, struct rttimer *),
1121 struct rttimer_queue *queue)
1122 {
1123 struct rttimer *r;
1124 int s;
1125
1126 /*
1127 * If there's already a timer with this action, destroy it before
1128 * we add a new one.
1129 */
1130 LIST_FOREACH(r, &rt->rt_timer, rtt_link) {
1131 if (r->rtt_func == func)
1132 break;
1133 }
1134 if (r != NULL) {
1135 LIST_REMOVE(r, rtt_link);
1136 TAILQ_REMOVE(&r->rtt_queue->rtq_head, r, rtt_next);
1137 if (r->rtt_queue->rtq_count > 0)
1138 r->rtt_queue->rtq_count--;
1139 else
1140 printf("rt_timer_add: rtq_count reached 0\n");
1141 } else {
1142 s = splsoftnet();
1143 r = pool_get(&rttimer_pool, PR_NOWAIT);
1144 splx(s);
1145 if (r == NULL)
1146 return ENOBUFS;
1147 }
1148
1149 memset(r, 0, sizeof(*r));
1150
1151 r->rtt_rt = rt;
1152 r->rtt_time = time_uptime;
1153 r->rtt_func = func;
1154 r->rtt_queue = queue;
1155 LIST_INSERT_HEAD(&rt->rt_timer, r, rtt_link);
1156 TAILQ_INSERT_TAIL(&queue->rtq_head, r, rtt_next);
1157 r->rtt_queue->rtq_count++;
1158
1159 return (0);
1160 }
1161
1162 /* ARGSUSED */
1163 void
1164 rt_timer_timer(void *arg)
1165 {
1166 struct rttimer_queue *rtq;
1167 struct rttimer *r;
1168 int s;
1169
1170 s = splsoftnet();
1171 LIST_FOREACH(rtq, &rttimer_queue_head, rtq_link) {
1172 while ((r = TAILQ_FIRST(&rtq->rtq_head)) != NULL &&
1173 (r->rtt_time + rtq->rtq_timeout) < time_uptime) {
1174 LIST_REMOVE(r, rtt_link);
1175 TAILQ_REMOVE(&rtq->rtq_head, r, rtt_next);
1176 RTTIMER_CALLOUT(r);
1177 pool_put(&rttimer_pool, r);
1178 if (rtq->rtq_count > 0)
1179 rtq->rtq_count--;
1180 else
1181 printf("rt_timer_timer: rtq_count reached 0\n");
1182 }
1183 }
1184 splx(s);
1185
1186 callout_reset(&rt_timer_ch, hz, rt_timer_timer, NULL);
1187 }
1188
1189 #ifdef RTCACHE_DEBUG
1190 #ifndef RTCACHE_DEBUG_SIZE
1191 #define RTCACHE_DEBUG_SIZE (1024 * 1024)
1192 #endif
1193 static const char *cache_caller[RTCACHE_DEBUG_SIZE];
1194 static struct route *cache_entry[RTCACHE_DEBUG_SIZE];
1195 size_t cache_cur;
1196 #endif
1197
1198 #ifdef RTCACHE_DEBUG
1199 static void
1200 _rtcache_init_debug(const char *caller, struct route *ro, int flag)
1201 #else
1202 static void
1203 _rtcache_init(struct route *ro, int flag)
1204 #endif
1205 {
1206 #ifdef RTCACHE_DEBUG
1207 size_t i;
1208 for (i = 0; i < cache_cur; ++i) {
1209 if (cache_entry[i] == ro)
1210 panic("Reinit of route %p, initialised from %s", ro, cache_caller[i]);
1211 }
1212 #endif
1213
1214 if (rtcache_getdst(ro) == NULL)
1215 return;
1216 ro->ro_rt = rtalloc1(rtcache_getdst(ro), flag);
1217 if (ro->ro_rt != NULL) {
1218 #ifdef RTCACHE_DEBUG
1219 if (cache_cur == RTCACHE_DEBUG_SIZE)
1220 panic("Route cache debug overflow");
1221 cache_caller[cache_cur] = caller;
1222 cache_entry[cache_cur] = ro;
1223 ++cache_cur;
1224 #endif
1225 rtcache(ro);
1226 }
1227 }
1228
1229 #ifdef RTCACHE_DEBUG
1230 void
1231 rtcache_init_debug(const char *caller, struct route *ro)
1232 {
1233 _rtcache_init_debug(caller, ro, 1);
1234 }
1235
1236 void
1237 rtcache_init_noclone_debug(const char *caller, struct route *ro)
1238 {
1239 _rtcache_init_debug(caller, ro, 0);
1240 }
1241
1242 void
1243 rtcache_update(struct route *ro, int clone)
1244 {
1245 rtcache_clear(ro);
1246 _rtcache_init_debug(__func__, ro, clone);
1247 }
1248 #else
1249 void
1250 rtcache_init(struct route *ro)
1251 {
1252 _rtcache_init(ro, 1);
1253 }
1254
1255 void
1256 rtcache_init_noclone(struct route *ro)
1257 {
1258 _rtcache_init(ro, 0);
1259 }
1260
1261 void
1262 rtcache_update(struct route *ro, int clone)
1263 {
1264 rtcache_clear(ro);
1265 _rtcache_init(ro, clone);
1266 }
1267 #endif
1268
1269 #ifdef RTCACHE_DEBUG
1270 void
1271 rtcache_copy_debug(const char *caller, struct route *new_ro, const struct route *old_ro)
1272 #else
1273 void
1274 rtcache_copy(struct route *new_ro, const struct route *old_ro)
1275 #endif
1276 {
1277 /* XXX i doubt this DTRT any longer --dyoung */
1278 #ifdef RTCACHE_DEBUG
1279 size_t i;
1280
1281 for (i = 0; i < cache_cur; ++i) {
1282 if (cache_entry[i] == new_ro)
1283 panic("Copy to initalised route %p (before %s)", new_ro, cache_caller[i]);
1284 }
1285 #endif
1286
1287 if (rtcache_getdst(old_ro) == NULL ||
1288 rtcache_setdst(new_ro, rtcache_getdst(old_ro)) != 0)
1289 return;
1290 new_ro->ro_rt = old_ro->ro_rt;
1291 if (new_ro->ro_rt != NULL) {
1292 #ifdef RTCACHE_DEBUG
1293 if (cache_cur == RTCACHE_DEBUG_SIZE)
1294 panic("Route cache debug overflow");
1295 cache_caller[cache_cur] = caller;
1296 cache_entry[cache_cur] = new_ro;
1297 ++cache_cur;
1298 #endif
1299 rtcache(new_ro);
1300 ++new_ro->ro_rt->rt_refcnt;
1301 }
1302 }
1303
1304 void
1305 rtcache_clear(struct route *ro)
1306 {
1307 #ifdef RTCACHE_DEBUG
1308 size_t j, i = cache_cur;
1309 for (i = j = 0; i < cache_cur; ++i, ++j) {
1310 if (cache_entry[i] == ro) {
1311 if (ro->ro_rt == NULL)
1312 panic("Route cache manipulated (allocated by %s)", cache_caller[i]);
1313 --j;
1314 } else {
1315 cache_caller[j] = cache_caller[i];
1316 cache_entry[j] = cache_entry[i];
1317 }
1318 }
1319 if (ro->ro_rt != NULL) {
1320 if (i != j + 1)
1321 panic("Wrong entries after rtcache_free: %zu (expected %zu)", j, i - 1);
1322 --cache_cur;
1323 }
1324 #endif
1325
1326 if (ro->ro_rt != NULL)
1327 rtflush(ro);
1328 ro->ro_rt = NULL;
1329 }
1330
1331 struct rtentry *
1332 rtcache_lookup2(struct route *ro, const struct sockaddr *dst, int clone,
1333 int *hitp)
1334 {
1335 const struct sockaddr *odst;
1336
1337 odst = rtcache_getdst(ro);
1338
1339 if (odst == NULL)
1340 ;
1341 else if (sockaddr_cmp(odst, dst) != 0)
1342 rtcache_free(ro);
1343 else if (rtcache_down(ro))
1344 rtcache_clear(ro);
1345
1346 if (ro->ro_rt == NULL) {
1347 *hitp = 0;
1348 rtcache_setdst(ro, dst);
1349 _rtcache_init(ro, clone);
1350 } else
1351 *hitp = 1;
1352
1353 return ro->ro_rt;
1354 }
1355
1356 void
1357 rtcache_free(struct route *ro)
1358 {
1359 rtcache_clear(ro);
1360 if (ro->ro_sa != NULL) {
1361 sockaddr_free(ro->ro_sa);
1362 ro->ro_sa = NULL;
1363 }
1364 }
1365
1366 int
1367 rtcache_setdst(struct route *ro, const struct sockaddr *sa)
1368 {
1369 KASSERT(sa != NULL);
1370
1371 if (ro->ro_sa != NULL && ro->ro_sa->sa_family == sa->sa_family) {
1372 rtcache_clear(ro);
1373 sockaddr_copy(ro->ro_sa, sa);
1374 return 0;
1375 } else if (ro->ro_sa != NULL)
1376 rtcache_free(ro); /* free ro_sa, wrong family */
1377
1378 if ((ro->ro_sa = sockaddr_dup(sa, PR_NOWAIT)) == NULL)
1379 return ENOMEM;
1380 return 0;
1381 }
1382
1383 static int
1384 rt_walktree_visitor(struct radix_node *rn, void *v)
1385 {
1386 struct rtwalk *rw = (struct rtwalk *)v;
1387
1388 return (*rw->rw_f)((struct rtentry *)rn, rw->rw_v);
1389 }
1390
1391 int
1392 rt_walktree(sa_family_t family, int (*f)(struct rtentry *, void *), void *v)
1393 {
1394 struct radix_node_head *rnh = rt_tables[family];
1395 struct rtwalk rw;
1396
1397 if (rnh == NULL)
1398 return 0;
1399
1400 rw.rw_f = f;
1401 rw.rw_v = v;
1402
1403 return rn_walktree(rnh, rt_walktree_visitor, &rw);
1404 }
1405