route.c revision 1.92 1 /* $NetBSD: route.c,v 1.92 2007/06/09 03:07:21 dyoung Exp $ */
2
3 /*-
4 * Copyright (c) 1998 The NetBSD Foundation, Inc.
5 * All rights reserved.
6 *
7 * This code is derived from software contributed to The NetBSD Foundation
8 * by Kevin M. Lahey of the Numerical Aerospace Simulation Facility,
9 * NASA Ames Research Center.
10 *
11 * Redistribution and use in source and binary forms, with or without
12 * modification, are permitted provided that the following conditions
13 * are met:
14 * 1. Redistributions of source code must retain the above copyright
15 * notice, this list of conditions and the following disclaimer.
16 * 2. Redistributions in binary form must reproduce the above copyright
17 * notice, this list of conditions and the following disclaimer in the
18 * documentation and/or other materials provided with the distribution.
19 * 3. All advertising materials mentioning features or use of this software
20 * must display the following acknowledgement:
21 * This product includes software developed by the NetBSD
22 * Foundation, Inc. and its contributors.
23 * 4. Neither the name of The NetBSD Foundation nor the names of its
24 * contributors may be used to endorse or promote products derived
25 * from this software without specific prior written permission.
26 *
27 * THIS SOFTWARE IS PROVIDED BY THE NETBSD FOUNDATION, INC. AND CONTRIBUTORS
28 * ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED
29 * TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
30 * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE FOUNDATION OR CONTRIBUTORS
31 * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
32 * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
33 * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
34 * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
35 * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
36 * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
37 * POSSIBILITY OF SUCH DAMAGE.
38 */
39
40 /*
41 * Copyright (C) 1995, 1996, 1997, and 1998 WIDE Project.
42 * All rights reserved.
43 *
44 * Redistribution and use in source and binary forms, with or without
45 * modification, are permitted provided that the following conditions
46 * are met:
47 * 1. Redistributions of source code must retain the above copyright
48 * notice, this list of conditions and the following disclaimer.
49 * 2. Redistributions in binary form must reproduce the above copyright
50 * notice, this list of conditions and the following disclaimer in the
51 * documentation and/or other materials provided with the distribution.
52 * 3. Neither the name of the project nor the names of its contributors
53 * may be used to endorse or promote products derived from this software
54 * without specific prior written permission.
55 *
56 * THIS SOFTWARE IS PROVIDED BY THE PROJECT AND CONTRIBUTORS ``AS IS'' AND
57 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
58 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
59 * ARE DISCLAIMED. IN NO EVENT SHALL THE PROJECT OR CONTRIBUTORS BE LIABLE
60 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
61 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
62 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
63 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
64 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
65 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
66 * SUCH DAMAGE.
67 */
68
69 /*
70 * Copyright (c) 1980, 1986, 1991, 1993
71 * The Regents of the University of California. All rights reserved.
72 *
73 * Redistribution and use in source and binary forms, with or without
74 * modification, are permitted provided that the following conditions
75 * are met:
76 * 1. Redistributions of source code must retain the above copyright
77 * notice, this list of conditions and the following disclaimer.
78 * 2. Redistributions in binary form must reproduce the above copyright
79 * notice, this list of conditions and the following disclaimer in the
80 * documentation and/or other materials provided with the distribution.
81 * 3. Neither the name of the University nor the names of its contributors
82 * may be used to endorse or promote products derived from this software
83 * without specific prior written permission.
84 *
85 * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND
86 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
87 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
88 * ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE
89 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
90 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
91 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
92 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
93 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
94 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
95 * SUCH DAMAGE.
96 *
97 * @(#)route.c 8.3 (Berkeley) 1/9/95
98 */
99
100 #include "opt_route.h"
101
102 #include <sys/cdefs.h>
103 __KERNEL_RCSID(0, "$NetBSD: route.c,v 1.92 2007/06/09 03:07:21 dyoung Exp $");
104
105 #include <sys/param.h>
106 #include <sys/sysctl.h>
107 #include <sys/systm.h>
108 #include <sys/callout.h>
109 #include <sys/proc.h>
110 #include <sys/mbuf.h>
111 #include <sys/socket.h>
112 #include <sys/socketvar.h>
113 #include <sys/domain.h>
114 #include <sys/protosw.h>
115 #include <sys/kernel.h>
116 #include <sys/ioctl.h>
117 #include <sys/pool.h>
118
119 #include <net/if.h>
120 #include <net/route.h>
121 #include <net/raw_cb.h>
122
123 #include <netinet/in.h>
124 #include <netinet/in_var.h>
125
126 #ifdef RTFLUSH_DEBUG
127 #define rtcache_debug() __predict_false(_rtcache_debug)
128 #else /* RTFLUSH_DEBUG */
129 #define rtcache_debug() 0
130 #endif /* RTFLUSH_DEBUG */
131
132 struct route_cb route_cb;
133 struct rtstat rtstat;
134 struct radix_node_head *rt_tables[AF_MAX+1];
135
136 int rttrash; /* routes not in table but not freed */
137 struct sockaddr wildcard; /* zero valued cookie for wildcard searches */
138
139 POOL_INIT(rtentry_pool, sizeof(struct rtentry), 0, 0, 0, "rtentpl", NULL,
140 IPL_SOFTNET);
141 POOL_INIT(rttimer_pool, sizeof(struct rttimer), 0, 0, 0, "rttmrpl", NULL,
142 IPL_SOFTNET);
143
144 struct callout rt_timer_ch; /* callout for rt_timer_timer() */
145
146 #ifdef RTFLUSH_DEBUG
147 static int _rtcache_debug = 0;
148 #endif /* RTFLUSH_DEBUG */
149
150 static int rtdeletemsg(struct rtentry *);
151 static int rtflushclone1(struct rtentry *, void *);
152 static void rtflushclone(sa_family_t family, struct rtentry *);
153
154 #ifdef RTFLUSH_DEBUG
155 SYSCTL_SETUP(sysctl_net_rtcache_setup, "sysctl net.rtcache.debug setup")
156 {
157 const struct sysctlnode *rnode;
158
159 /* XXX do not duplicate */
160 if (sysctl_createv(clog, 0, NULL, &rnode, CTLFLAG_PERMANENT,
161 CTLTYPE_NODE, "net", NULL, NULL, 0, NULL, 0, CTL_NET, CTL_EOL) != 0)
162 return;
163 if (sysctl_createv(clog, 0, &rnode, &rnode, CTLFLAG_PERMANENT,
164 CTLTYPE_NODE,
165 "rtcache", SYSCTL_DESCR("Route cache related settings"),
166 NULL, 0, NULL, 0, CTL_CREATE, CTL_EOL) != 0)
167 return;
168 if (sysctl_createv(clog, 0, &rnode, &rnode,
169 CTLFLAG_PERMANENT|CTLFLAG_READWRITE, CTLTYPE_INT,
170 "debug", SYSCTL_DESCR("Debug route caches"),
171 NULL, 0, &_rtcache_debug, 0, CTL_CREATE, CTL_EOL) != 0)
172 return;
173 }
174 #endif /* RTFLUSH_DEBUG */
175
176 struct ifaddr *
177 rt_get_ifa(struct rtentry *rt)
178 {
179 struct ifaddr *ifa;
180
181 if ((ifa = rt->rt_ifa) == NULL)
182 return ifa;
183 else if (ifa->ifa_getifa == NULL)
184 return ifa;
185 #if 0
186 else if (ifa->ifa_seqno != NULL && *ifa->ifa_seqno == rt->rt_ifa_seqno)
187 return ifa;
188 #endif
189 else {
190 ifa = (*ifa->ifa_getifa)(ifa, rt_key(rt));
191 rt_replace_ifa(rt, ifa);
192 return ifa;
193 }
194 }
195
196 static void
197 rt_set_ifa1(struct rtentry *rt, struct ifaddr *ifa)
198 {
199 rt->rt_ifa = ifa;
200 if (ifa->ifa_seqno != NULL)
201 rt->rt_ifa_seqno = *ifa->ifa_seqno;
202 }
203
204 void
205 rt_replace_ifa(struct rtentry *rt, struct ifaddr *ifa)
206 {
207 IFAREF(ifa);
208 IFAFREE(rt->rt_ifa);
209 rt_set_ifa1(rt, ifa);
210 }
211
212 static void
213 rt_set_ifa(struct rtentry *rt, struct ifaddr *ifa)
214 {
215 IFAREF(ifa);
216 rt_set_ifa1(rt, ifa);
217 }
218
219 void
220 rtable_init(void **table)
221 {
222 struct domain *dom;
223 DOMAIN_FOREACH(dom)
224 if (dom->dom_rtattach)
225 dom->dom_rtattach(&table[dom->dom_family],
226 dom->dom_rtoffset);
227 }
228
229 void
230 route_init(void)
231 {
232
233 rn_init(); /* initialize all zeroes, all ones, mask table */
234 rtable_init((void **)rt_tables);
235 }
236
237 void
238 rtflushall(int family)
239 {
240 int s;
241 struct domain *dom;
242 struct route *ro;
243
244 if (rtcache_debug())
245 printf("%s: enter\n", __func__);
246
247 if ((dom = pffinddomain(family)) == NULL)
248 return;
249
250 s = splnet();
251 while ((ro = LIST_FIRST(&dom->dom_rtcache)) != NULL) {
252 KASSERT(ro->ro_rt != NULL);
253 rtcache_clear(ro);
254 }
255 splx(s);
256 }
257
258 void
259 rtflush(struct route *ro)
260 {
261 KASSERT(ro->ro_rt != NULL);
262 KASSERT(rtcache_getdst(ro) != NULL);
263
264 RTFREE(ro->ro_rt);
265 ro->ro_rt = NULL;
266
267 LIST_REMOVE(ro, ro_rtcache_next);
268
269 #if 0
270 if (rtcache_debug()) {
271 printf("%s: flushing %s\n", __func__,
272 inet_ntoa((satocsin(rtcache_getdst(ro)))->sin_addr));
273 }
274 #endif
275 }
276
277 void
278 rtcache(struct route *ro)
279 {
280 struct domain *dom;
281
282 KASSERT(ro->ro_rt != NULL);
283 KASSERT(rtcache_getdst(ro) != NULL);
284
285 if ((dom = pffinddomain(rtcache_getdst(ro)->sa_family)) == NULL)
286 return;
287
288 LIST_INSERT_HEAD(&dom->dom_rtcache, ro, ro_rtcache_next);
289 }
290
291 /*
292 * Packet routing routines.
293 */
294 void
295 rtalloc(struct route *ro)
296 {
297 if (ro->ro_rt != NULL) {
298 if (ro->ro_rt->rt_ifp != NULL &&
299 (ro->ro_rt->rt_flags & RTF_UP) != 0)
300 return;
301 rtflush(ro);
302 }
303 if (rtcache_getdst(ro) == NULL ||
304 (ro->ro_rt = rtalloc1(rtcache_getdst(ro), 1)) == NULL)
305 return;
306 rtcache(ro);
307 }
308
309 struct rtentry *
310 rtalloc1(const struct sockaddr *dst, int report)
311 {
312 struct radix_node_head *rnh = rt_tables[dst->sa_family];
313 struct rtentry *rt;
314 struct radix_node *rn;
315 struct rtentry *newrt = NULL;
316 struct rt_addrinfo info;
317 int s = splsoftnet(), err = 0, msgtype = RTM_MISS;
318
319 if (rnh && (rn = rnh->rnh_matchaddr(dst, rnh)) &&
320 ((rn->rn_flags & RNF_ROOT) == 0)) {
321 newrt = rt = (struct rtentry *)rn;
322 if (report && (rt->rt_flags & RTF_CLONING)) {
323 err = rtrequest(RTM_RESOLVE, dst, NULL, NULL, 0,
324 &newrt);
325 if (err) {
326 newrt = rt;
327 rt->rt_refcnt++;
328 goto miss;
329 }
330 KASSERT(newrt != NULL);
331 if ((rt = newrt) && (rt->rt_flags & RTF_XRESOLVE)) {
332 msgtype = RTM_RESOLVE;
333 goto miss;
334 }
335 /* Inform listeners of the new route */
336 memset(&info, 0, sizeof(info));
337 info.rti_info[RTAX_DST] = rt_key(rt);
338 info.rti_info[RTAX_NETMASK] = rt_mask(rt);
339 info.rti_info[RTAX_GATEWAY] = rt->rt_gateway;
340 if (rt->rt_ifp != NULL) {
341 info.rti_info[RTAX_IFP] =
342 TAILQ_FIRST(&rt->rt_ifp->if_addrlist)->ifa_addr;
343 info.rti_info[RTAX_IFA] = rt->rt_ifa->ifa_addr;
344 }
345 rt_missmsg(RTM_ADD, &info, rt->rt_flags, 0);
346 } else
347 rt->rt_refcnt++;
348 } else {
349 rtstat.rts_unreach++;
350 miss: if (report) {
351 memset((void *)&info, 0, sizeof(info));
352 info.rti_info[RTAX_DST] = dst;
353 rt_missmsg(msgtype, &info, 0, err);
354 }
355 }
356 splx(s);
357 return (newrt);
358 }
359
360 void
361 rtfree(struct rtentry *rt)
362 {
363 struct ifaddr *ifa;
364
365 if (rt == NULL)
366 panic("rtfree");
367 rt->rt_refcnt--;
368 if (rt->rt_refcnt <= 0 && (rt->rt_flags & RTF_UP) == 0) {
369 if (rt->rt_nodes->rn_flags & (RNF_ACTIVE | RNF_ROOT))
370 panic ("rtfree 2");
371 rttrash--;
372 if (rt->rt_refcnt < 0) {
373 printf("rtfree: %p not freed (neg refs)\n", rt);
374 return;
375 }
376 rt_timer_remove_all(rt, 0);
377 ifa = rt->rt_ifa;
378 rt->rt_ifa = NULL;
379 IFAFREE(ifa);
380 rt->rt_ifp = NULL;
381 Free(rt_key(rt));
382 pool_put(&rtentry_pool, rt);
383 }
384 }
385
386 void
387 ifafree(struct ifaddr *ifa)
388 {
389
390 #ifdef DIAGNOSTIC
391 if (ifa == NULL)
392 panic("ifafree: null ifa");
393 if (ifa->ifa_refcnt != 0)
394 panic("ifafree: ifa_refcnt != 0 (%d)", ifa->ifa_refcnt);
395 #endif
396 #ifdef IFAREF_DEBUG
397 printf("ifafree: freeing ifaddr %p\n", ifa);
398 #endif
399 free(ifa, M_IFADDR);
400 }
401
402 /*
403 * Force a routing table entry to the specified
404 * destination to go through the given gateway.
405 * Normally called as a result of a routing redirect
406 * message from the network layer.
407 *
408 * N.B.: must be called at splsoftnet
409 */
410 void
411 rtredirect(const struct sockaddr *dst, const struct sockaddr *gateway,
412 const struct sockaddr *netmask, int flags, const struct sockaddr *src,
413 struct rtentry **rtp)
414 {
415 struct rtentry *rt;
416 int error = 0;
417 u_quad_t *stat = NULL;
418 struct rt_addrinfo info;
419 struct ifaddr *ifa;
420
421 /* verify the gateway is directly reachable */
422 if ((ifa = ifa_ifwithnet(gateway)) == NULL) {
423 error = ENETUNREACH;
424 goto out;
425 }
426 rt = rtalloc1(dst, 0);
427 /*
428 * If the redirect isn't from our current router for this dst,
429 * it's either old or wrong. If it redirects us to ourselves,
430 * we have a routing loop, perhaps as a result of an interface
431 * going down recently.
432 */
433 #define equal(a1, a2) \
434 ((a1)->sa_len == (a2)->sa_len && \
435 memcmp((a1), (a2), (a1)->sa_len) == 0)
436 if (!(flags & RTF_DONE) && rt &&
437 (!equal(src, rt->rt_gateway) || rt->rt_ifa != ifa))
438 error = EINVAL;
439 else if (ifa_ifwithaddr(gateway))
440 error = EHOSTUNREACH;
441 if (error)
442 goto done;
443 /*
444 * Create a new entry if we just got back a wildcard entry
445 * or the lookup failed. This is necessary for hosts
446 * which use routing redirects generated by smart gateways
447 * to dynamically build the routing tables.
448 */
449 if ((rt == NULL) || (rt_mask(rt) && rt_mask(rt)->sa_len < 2))
450 goto create;
451 /*
452 * Don't listen to the redirect if it's
453 * for a route to an interface.
454 */
455 if (rt->rt_flags & RTF_GATEWAY) {
456 if (((rt->rt_flags & RTF_HOST) == 0) && (flags & RTF_HOST)) {
457 /*
458 * Changing from route to net => route to host.
459 * Create new route, rather than smashing route to net.
460 */
461 create:
462 if (rt)
463 rtfree(rt);
464 flags |= RTF_GATEWAY | RTF_DYNAMIC;
465 info.rti_info[RTAX_DST] = dst;
466 info.rti_info[RTAX_GATEWAY] = gateway;
467 info.rti_info[RTAX_NETMASK] = netmask;
468 info.rti_ifa = ifa;
469 info.rti_flags = flags;
470 rt = NULL;
471 error = rtrequest1(RTM_ADD, &info, &rt);
472 if (rt != NULL)
473 flags = rt->rt_flags;
474 stat = &rtstat.rts_dynamic;
475 } else {
476 /*
477 * Smash the current notion of the gateway to
478 * this destination. Should check about netmask!!!
479 */
480 rt->rt_flags |= RTF_MODIFIED;
481 flags |= RTF_MODIFIED;
482 stat = &rtstat.rts_newgateway;
483 rt_setgate(rt, rt_key(rt), gateway);
484 }
485 } else
486 error = EHOSTUNREACH;
487 done:
488 if (rt) {
489 if (rtp && !error)
490 *rtp = rt;
491 else
492 rtfree(rt);
493 }
494 out:
495 if (error)
496 rtstat.rts_badredirect++;
497 else if (stat != NULL)
498 (*stat)++;
499 memset((void *)&info, 0, sizeof(info));
500 info.rti_info[RTAX_DST] = dst;
501 info.rti_info[RTAX_GATEWAY] = gateway;
502 info.rti_info[RTAX_NETMASK] = netmask;
503 info.rti_info[RTAX_AUTHOR] = src;
504 rt_missmsg(RTM_REDIRECT, &info, flags, error);
505 }
506
507 /*
508 * Delete a route and generate a message
509 */
510 static int
511 rtdeletemsg(struct rtentry *rt)
512 {
513 int error;
514 struct rt_addrinfo info;
515
516 /*
517 * Request the new route so that the entry is not actually
518 * deleted. That will allow the information being reported to
519 * be accurate (and consistent with route_output()).
520 */
521 memset((void *)&info, 0, sizeof(info));
522 info.rti_info[RTAX_DST] = rt_key(rt);
523 info.rti_info[RTAX_NETMASK] = rt_mask(rt);
524 info.rti_info[RTAX_GATEWAY] = rt->rt_gateway;
525 info.rti_flags = rt->rt_flags;
526 error = rtrequest1(RTM_DELETE, &info, &rt);
527
528 rt_missmsg(RTM_DELETE, &info, info.rti_flags, error);
529
530 /* Adjust the refcount */
531 if (error == 0 && rt->rt_refcnt <= 0) {
532 rt->rt_refcnt++;
533 rtfree(rt);
534 }
535 return (error);
536 }
537
538 static int
539 rtflushclone1(struct rtentry *rt, void *arg)
540 {
541 struct rtentry *parent;
542
543 parent = (struct rtentry *)arg;
544 if ((rt->rt_flags & RTF_CLONED) != 0 && rt->rt_parent == parent)
545 rtdeletemsg(rt);
546 return 0;
547 }
548
549 static void
550 rtflushclone(sa_family_t family, struct rtentry *parent)
551 {
552
553 #ifdef DIAGNOSTIC
554 if (!parent || (parent->rt_flags & RTF_CLONING) == 0)
555 panic("rtflushclone: called with a non-cloning route");
556 #endif
557 rt_walktree(family, rtflushclone1, (void *)parent);
558 }
559
560 /*
561 * Routing table ioctl interface.
562 */
563 int
564 rtioctl(u_long req, void *data, struct lwp *l)
565 {
566 return (EOPNOTSUPP);
567 }
568
569 struct ifaddr *
570 ifa_ifwithroute(int flags, const struct sockaddr *dst,
571 const struct sockaddr *gateway)
572 {
573 struct ifaddr *ifa;
574 if ((flags & RTF_GATEWAY) == 0) {
575 /*
576 * If we are adding a route to an interface,
577 * and the interface is a pt to pt link
578 * we should search for the destination
579 * as our clue to the interface. Otherwise
580 * we can use the local address.
581 */
582 ifa = NULL;
583 if (flags & RTF_HOST)
584 ifa = ifa_ifwithdstaddr(dst);
585 if (ifa == NULL)
586 ifa = ifa_ifwithaddr(gateway);
587 } else {
588 /*
589 * If we are adding a route to a remote net
590 * or host, the gateway may still be on the
591 * other end of a pt to pt link.
592 */
593 ifa = ifa_ifwithdstaddr(gateway);
594 }
595 if (ifa == NULL)
596 ifa = ifa_ifwithnet(gateway);
597 if (ifa == NULL) {
598 struct rtentry *rt = rtalloc1(dst, 0);
599 if (rt == NULL)
600 return NULL;
601 rt->rt_refcnt--;
602 if ((ifa = rt->rt_ifa) == NULL)
603 return NULL;
604 }
605 if (ifa->ifa_addr->sa_family != dst->sa_family) {
606 struct ifaddr *oifa = ifa;
607 ifa = ifaof_ifpforaddr(dst, ifa->ifa_ifp);
608 if (ifa == 0)
609 ifa = oifa;
610 }
611 return (ifa);
612 }
613
614 #define ROUNDUP(a) (a>0 ? (1 + (((a) - 1) | (sizeof(long) - 1))) : sizeof(long))
615
616 int
617 rtrequest(int req, const struct sockaddr *dst, const struct sockaddr *gateway,
618 const struct sockaddr *netmask, int flags, struct rtentry **ret_nrt)
619 {
620 struct rt_addrinfo info;
621
622 memset(&info, 0, sizeof(info));
623 info.rti_flags = flags;
624 info.rti_info[RTAX_DST] = dst;
625 info.rti_info[RTAX_GATEWAY] = gateway;
626 info.rti_info[RTAX_NETMASK] = netmask;
627 return rtrequest1(req, &info, ret_nrt);
628 }
629
630 int
631 rt_getifa(struct rt_addrinfo *info)
632 {
633 struct ifaddr *ifa;
634 const struct sockaddr *dst = info->rti_info[RTAX_DST];
635 const struct sockaddr *gateway = info->rti_info[RTAX_GATEWAY];
636 const struct sockaddr *ifaaddr = info->rti_info[RTAX_IFA];
637 const struct sockaddr *ifpaddr = info->rti_info[RTAX_IFP];
638 int flags = info->rti_flags;
639
640 /*
641 * ifp may be specified by sockaddr_dl when protocol address
642 * is ambiguous
643 */
644 if (info->rti_ifp == NULL && ifpaddr != NULL
645 && ifpaddr->sa_family == AF_LINK &&
646 (ifa = ifa_ifwithnet((const struct sockaddr *)ifpaddr)) != NULL)
647 info->rti_ifp = ifa->ifa_ifp;
648 if (info->rti_ifa == NULL && ifaaddr != NULL)
649 info->rti_ifa = ifa_ifwithaddr(ifaaddr);
650 if (info->rti_ifa == NULL) {
651 const struct sockaddr *sa;
652
653 sa = ifaaddr != NULL ? ifaaddr :
654 (gateway != NULL ? gateway : dst);
655 if (sa != NULL && info->rti_ifp != NULL)
656 info->rti_ifa = ifaof_ifpforaddr(sa, info->rti_ifp);
657 else if (dst != NULL && gateway != NULL)
658 info->rti_ifa = ifa_ifwithroute(flags, dst, gateway);
659 else if (sa != NULL)
660 info->rti_ifa = ifa_ifwithroute(flags, sa, sa);
661 }
662 if ((ifa = info->rti_ifa) == NULL)
663 return ENETUNREACH;
664 if (ifa->ifa_getifa != NULL)
665 info->rti_ifa = ifa = (*ifa->ifa_getifa)(ifa, dst);
666 if (info->rti_ifp == NULL)
667 info->rti_ifp = ifa->ifa_ifp;
668 return 0;
669 }
670
671 int
672 rtrequest1(int req, struct rt_addrinfo *info, struct rtentry **ret_nrt)
673 {
674 int s = splsoftnet();
675 int error = 0;
676 struct rtentry *rt, *crt;
677 struct radix_node *rn;
678 struct radix_node_head *rnh;
679 struct ifaddr *ifa;
680 struct sockaddr_storage deldst;
681 const struct sockaddr *dst = info->rti_info[RTAX_DST];
682 const struct sockaddr *gateway = info->rti_info[RTAX_GATEWAY];
683 const struct sockaddr *netmask = info->rti_info[RTAX_NETMASK];
684 int flags = info->rti_flags;
685 #define senderr(x) { error = x ; goto bad; }
686
687 if ((rnh = rt_tables[dst->sa_family]) == NULL)
688 senderr(ESRCH);
689 if (flags & RTF_HOST)
690 netmask = NULL;
691 switch (req) {
692 case RTM_DELETE:
693 if (netmask) {
694 rt_maskedcopy(dst, (struct sockaddr *)&deldst, netmask);
695 dst = (struct sockaddr *)&deldst;
696 }
697 if ((rn = rnh->rnh_lookup(dst, netmask, rnh)) == NULL)
698 senderr(ESRCH);
699 rt = (struct rtentry *)rn;
700 if ((rt->rt_flags & RTF_CLONING) != 0) {
701 /* clean up any cloned children */
702 rtflushclone(dst->sa_family, rt);
703 }
704 if ((rn = rnh->rnh_deladdr(dst, netmask, rnh)) == NULL)
705 senderr(ESRCH);
706 if (rn->rn_flags & (RNF_ACTIVE | RNF_ROOT))
707 panic ("rtrequest delete");
708 rt = (struct rtentry *)rn;
709 if (rt->rt_gwroute) {
710 RTFREE(rt->rt_gwroute);
711 rt->rt_gwroute = NULL;
712 }
713 if (rt->rt_parent) {
714 rt->rt_parent->rt_refcnt--;
715 rt->rt_parent = NULL;
716 }
717 rt->rt_flags &= ~RTF_UP;
718 if ((ifa = rt->rt_ifa) && ifa->ifa_rtrequest)
719 ifa->ifa_rtrequest(RTM_DELETE, rt, info);
720 rttrash++;
721 if (ret_nrt)
722 *ret_nrt = rt;
723 else if (rt->rt_refcnt <= 0) {
724 rt->rt_refcnt++;
725 rtfree(rt);
726 }
727 break;
728
729 case RTM_RESOLVE:
730 if (ret_nrt == NULL || (rt = *ret_nrt) == NULL)
731 senderr(EINVAL);
732 if ((rt->rt_flags & RTF_CLONING) == 0)
733 senderr(EINVAL);
734 ifa = rt->rt_ifa;
735 flags = rt->rt_flags & ~(RTF_CLONING | RTF_STATIC);
736 flags |= RTF_CLONED;
737 gateway = rt->rt_gateway;
738 if ((netmask = rt->rt_genmask) == NULL)
739 flags |= RTF_HOST;
740 goto makeroute;
741
742 case RTM_ADD:
743 if (info->rti_ifa == NULL && (error = rt_getifa(info)))
744 senderr(error);
745 ifa = info->rti_ifa;
746 makeroute:
747 /* Already at splsoftnet() so pool_get/pool_put are safe */
748 rt = pool_get(&rtentry_pool, PR_NOWAIT);
749 if (rt == NULL)
750 senderr(ENOBUFS);
751 Bzero(rt, sizeof(*rt));
752 rt->rt_flags = RTF_UP | flags;
753 LIST_INIT(&rt->rt_timer);
754 if (rt_setgate(rt, dst, gateway)) {
755 pool_put(&rtentry_pool, rt);
756 senderr(ENOBUFS);
757 }
758 if (netmask) {
759 rt_maskedcopy(dst, rt_key(rt), netmask);
760 } else
761 Bcopy(dst, rt_key(rt), dst->sa_len);
762 rt_set_ifa(rt, ifa);
763 rt->rt_ifp = ifa->ifa_ifp;
764 if (req == RTM_RESOLVE) {
765 rt->rt_rmx = (*ret_nrt)->rt_rmx; /* copy metrics */
766 rt->rt_parent = *ret_nrt;
767 rt->rt_parent->rt_refcnt++;
768 }
769 rn = rnh->rnh_addaddr(rt_key(rt), netmask, rnh, rt->rt_nodes);
770 if (rn == NULL && (crt = rtalloc1(rt_key(rt), 0)) != NULL) {
771 /* overwrite cloned route */
772 if ((crt->rt_flags & RTF_CLONED) != 0) {
773 rtdeletemsg(crt);
774 rn = rnh->rnh_addaddr(rt_key(rt),
775 netmask, rnh, rt->rt_nodes);
776 }
777 RTFREE(crt);
778 }
779 if (rn == NULL) {
780 IFAFREE(ifa);
781 if ((rt->rt_flags & RTF_CLONED) != 0 && rt->rt_parent)
782 rtfree(rt->rt_parent);
783 if (rt->rt_gwroute)
784 rtfree(rt->rt_gwroute);
785 Free(rt_key(rt));
786 pool_put(&rtentry_pool, rt);
787 senderr(EEXIST);
788 }
789 if (ifa->ifa_rtrequest)
790 ifa->ifa_rtrequest(req, rt, info);
791 if (ret_nrt) {
792 *ret_nrt = rt;
793 rt->rt_refcnt++;
794 }
795 if ((rt->rt_flags & RTF_CLONING) != 0) {
796 /* clean up any cloned children */
797 rtflushclone(dst->sa_family, rt);
798 }
799 rtflushall(dst->sa_family);
800 break;
801 case RTM_GET:
802 rn = rnh->rnh_lookup(dst, netmask, rnh);
803 if (rn == NULL || (rn->rn_flags & RNF_ROOT) != 0)
804 senderr(ESRCH);
805 if (ret_nrt != NULL) {
806 rt = (struct rtentry *)rn;
807 *ret_nrt = rt;
808 rt->rt_refcnt++;
809 }
810 break;
811 }
812 bad:
813 splx(s);
814 return (error);
815 }
816
817 int
818 rt_setgate( struct rtentry *rt0, const struct sockaddr *dst,
819 const struct sockaddr *gate)
820 {
821 char *new, *old;
822 u_int dlen = ROUNDUP(dst->sa_len), glen = ROUNDUP(gate->sa_len);
823 struct rtentry *rt = rt0;
824
825 if (rt->rt_gateway == NULL || glen > ROUNDUP(rt->rt_gateway->sa_len)) {
826 old = (void *)rt_key(rt);
827 R_Malloc(new, void *, dlen + glen);
828 if (new == NULL)
829 return 1;
830 Bzero(new, dlen + glen);
831 rt->rt_nodes->rn_key = new;
832 } else {
833 new = __UNCONST(rt->rt_nodes->rn_key); /*XXXUNCONST*/
834 old = NULL;
835 }
836 Bcopy(gate, (rt->rt_gateway = (struct sockaddr *)(new + dlen)), glen);
837 if (old) {
838 Bcopy(dst, new, dlen);
839 Free(old);
840 }
841 if (rt->rt_gwroute) {
842 RTFREE(rt->rt_gwroute);
843 rt->rt_gwroute = NULL;
844 }
845 if (rt->rt_flags & RTF_GATEWAY) {
846 rt->rt_gwroute = rtalloc1(gate, 1);
847 /*
848 * If we switched gateways, grab the MTU from the new
849 * gateway route if the current MTU, if the current MTU is
850 * greater than the MTU of gateway.
851 * Note that, if the MTU of gateway is 0, we will reset the
852 * MTU of the route to run PMTUD again from scratch. XXX
853 */
854 if (rt->rt_gwroute
855 && !(rt->rt_rmx.rmx_locks & RTV_MTU)
856 && rt->rt_rmx.rmx_mtu
857 && rt->rt_rmx.rmx_mtu > rt->rt_gwroute->rt_rmx.rmx_mtu) {
858 rt->rt_rmx.rmx_mtu = rt->rt_gwroute->rt_rmx.rmx_mtu;
859 }
860 }
861 return 0;
862 }
863
864 void
865 rt_maskedcopy(const struct sockaddr *src, struct sockaddr *dst,
866 const struct sockaddr *netmask)
867 {
868 const u_char *cp1 = (const u_char *)src;
869 u_char *cp2 = (u_char *)dst;
870 const u_char *cp3 = (const u_char *)netmask;
871 u_char *cplim = cp2 + *cp3;
872 u_char *cplim2 = cp2 + *cp1;
873
874 *cp2++ = *cp1++; *cp2++ = *cp1++; /* copies sa_len & sa_family */
875 cp3 += 2;
876 if (cplim > cplim2)
877 cplim = cplim2;
878 while (cp2 < cplim)
879 *cp2++ = *cp1++ & *cp3++;
880 if (cp2 < cplim2)
881 memset(cp2, 0, (unsigned)(cplim2 - cp2));
882 }
883
884 /*
885 * Set up or tear down a routing table entry, normally
886 * for an interface.
887 */
888 int
889 rtinit(struct ifaddr *ifa, int cmd, int flags)
890 {
891 struct rtentry *rt;
892 struct sockaddr *dst, *odst;
893 struct sockaddr_storage deldst;
894 struct rtentry *nrt = NULL;
895 int error;
896 struct rt_addrinfo info;
897
898 dst = flags & RTF_HOST ? ifa->ifa_dstaddr : ifa->ifa_addr;
899 if (cmd == RTM_DELETE) {
900 if ((flags & RTF_HOST) == 0 && ifa->ifa_netmask) {
901 /* Delete subnet route for this interface */
902 odst = dst;
903 dst = (struct sockaddr *)&deldst;
904 rt_maskedcopy(odst, dst, ifa->ifa_netmask);
905 }
906 if ((rt = rtalloc1(dst, 0)) != NULL) {
907 rt->rt_refcnt--;
908 if (rt->rt_ifa != ifa)
909 return (flags & RTF_HOST) ? EHOSTUNREACH
910 : ENETUNREACH;
911 }
912 }
913 memset(&info, 0, sizeof(info));
914 info.rti_ifa = ifa;
915 info.rti_flags = flags | ifa->ifa_flags;
916 info.rti_info[RTAX_DST] = dst;
917 info.rti_info[RTAX_GATEWAY] = ifa->ifa_addr;
918 /*
919 * XXX here, it seems that we are assuming that ifa_netmask is NULL
920 * for RTF_HOST. bsdi4 passes NULL explicitly (via intermediate
921 * variable) when RTF_HOST is 1. still not sure if i can safely
922 * change it to meet bsdi4 behavior.
923 */
924 info.rti_info[RTAX_NETMASK] = ifa->ifa_netmask;
925 error = rtrequest1(cmd, &info, &nrt);
926 if (cmd == RTM_DELETE && error == 0 && (rt = nrt)) {
927 rt_newaddrmsg(cmd, ifa, error, nrt);
928 if (rt->rt_refcnt <= 0) {
929 rt->rt_refcnt++;
930 rtfree(rt);
931 }
932 }
933 if (cmd == RTM_ADD && error == 0 && (rt = nrt)) {
934 rt->rt_refcnt--;
935 if (rt->rt_ifa != ifa) {
936 printf("rtinit: wrong ifa (%p) was (%p)\n", ifa,
937 rt->rt_ifa);
938 if (rt->rt_ifa->ifa_rtrequest)
939 rt->rt_ifa->ifa_rtrequest(RTM_DELETE, rt, NULL);
940 rt_replace_ifa(rt, ifa);
941 rt->rt_ifp = ifa->ifa_ifp;
942 if (ifa->ifa_rtrequest)
943 ifa->ifa_rtrequest(RTM_ADD, rt, NULL);
944 }
945 rt_newaddrmsg(cmd, ifa, error, nrt);
946 }
947 return error;
948 }
949
950 /*
951 * Route timer routines. These routes allow functions to be called
952 * for various routes at any time. This is useful in supporting
953 * path MTU discovery and redirect route deletion.
954 *
955 * This is similar to some BSDI internal functions, but it provides
956 * for multiple queues for efficiency's sake...
957 */
958
959 LIST_HEAD(, rttimer_queue) rttimer_queue_head;
960 static int rt_init_done = 0;
961
962 #define RTTIMER_CALLOUT(r) do { \
963 if (r->rtt_func != NULL) { \
964 (*r->rtt_func)(r->rtt_rt, r); \
965 } else { \
966 rtrequest((int) RTM_DELETE, \
967 (struct sockaddr *)rt_key(r->rtt_rt), \
968 0, 0, 0, 0); \
969 } \
970 } while (/*CONSTCOND*/0)
971
972 /*
973 * Some subtle order problems with domain initialization mean that
974 * we cannot count on this being run from rt_init before various
975 * protocol initializations are done. Therefore, we make sure
976 * that this is run when the first queue is added...
977 */
978
979 void
980 rt_timer_init(void)
981 {
982 assert(rt_init_done == 0);
983
984 LIST_INIT(&rttimer_queue_head);
985 callout_init(&rt_timer_ch);
986 callout_reset(&rt_timer_ch, hz, rt_timer_timer, NULL);
987 rt_init_done = 1;
988 }
989
990 struct rttimer_queue *
991 rt_timer_queue_create(u_int timeout)
992 {
993 struct rttimer_queue *rtq;
994
995 if (rt_init_done == 0)
996 rt_timer_init();
997
998 R_Malloc(rtq, struct rttimer_queue *, sizeof *rtq);
999 if (rtq == NULL)
1000 return NULL;
1001 Bzero(rtq, sizeof *rtq);
1002
1003 rtq->rtq_timeout = timeout;
1004 rtq->rtq_count = 0;
1005 TAILQ_INIT(&rtq->rtq_head);
1006 LIST_INSERT_HEAD(&rttimer_queue_head, rtq, rtq_link);
1007
1008 return rtq;
1009 }
1010
1011 void
1012 rt_timer_queue_change(struct rttimer_queue *rtq, long timeout)
1013 {
1014
1015 rtq->rtq_timeout = timeout;
1016 }
1017
1018 void
1019 rt_timer_queue_remove_all(struct rttimer_queue *rtq, int destroy)
1020 {
1021 struct rttimer *r;
1022
1023 while ((r = TAILQ_FIRST(&rtq->rtq_head)) != NULL) {
1024 LIST_REMOVE(r, rtt_link);
1025 TAILQ_REMOVE(&rtq->rtq_head, r, rtt_next);
1026 if (destroy)
1027 RTTIMER_CALLOUT(r);
1028 /* we are already at splsoftnet */
1029 pool_put(&rttimer_pool, r);
1030 if (rtq->rtq_count > 0)
1031 rtq->rtq_count--;
1032 else
1033 printf("rt_timer_queue_remove_all: "
1034 "rtq_count reached 0\n");
1035 }
1036 }
1037
1038 void
1039 rt_timer_queue_destroy(struct rttimer_queue *rtq, int destroy)
1040 {
1041
1042 rt_timer_queue_remove_all(rtq, destroy);
1043
1044 LIST_REMOVE(rtq, rtq_link);
1045
1046 /*
1047 * Caller is responsible for freeing the rttimer_queue structure.
1048 */
1049 }
1050
1051 unsigned long
1052 rt_timer_count(struct rttimer_queue *rtq)
1053 {
1054 return rtq->rtq_count;
1055 }
1056
1057 void
1058 rt_timer_remove_all(struct rtentry *rt, int destroy)
1059 {
1060 struct rttimer *r;
1061
1062 while ((r = LIST_FIRST(&rt->rt_timer)) != NULL) {
1063 LIST_REMOVE(r, rtt_link);
1064 TAILQ_REMOVE(&r->rtt_queue->rtq_head, r, rtt_next);
1065 if (destroy)
1066 RTTIMER_CALLOUT(r);
1067 if (r->rtt_queue->rtq_count > 0)
1068 r->rtt_queue->rtq_count--;
1069 else
1070 printf("rt_timer_remove_all: rtq_count reached 0\n");
1071 /* we are already at splsoftnet */
1072 pool_put(&rttimer_pool, r);
1073 }
1074 }
1075
1076 int
1077 rt_timer_add(struct rtentry *rt,
1078 void (*func)(struct rtentry *, struct rttimer *),
1079 struct rttimer_queue *queue)
1080 {
1081 struct rttimer *r;
1082 int s;
1083
1084 /*
1085 * If there's already a timer with this action, destroy it before
1086 * we add a new one.
1087 */
1088 LIST_FOREACH(r, &rt->rt_timer, rtt_link) {
1089 if (r->rtt_func == func)
1090 break;
1091 }
1092 if (r != NULL) {
1093 LIST_REMOVE(r, rtt_link);
1094 TAILQ_REMOVE(&r->rtt_queue->rtq_head, r, rtt_next);
1095 if (r->rtt_queue->rtq_count > 0)
1096 r->rtt_queue->rtq_count--;
1097 else
1098 printf("rt_timer_add: rtq_count reached 0\n");
1099 } else {
1100 s = splsoftnet();
1101 r = pool_get(&rttimer_pool, PR_NOWAIT);
1102 splx(s);
1103 if (r == NULL)
1104 return ENOBUFS;
1105 }
1106
1107 memset(r, 0, sizeof(*r));
1108
1109 r->rtt_rt = rt;
1110 r->rtt_time = time_uptime;
1111 r->rtt_func = func;
1112 r->rtt_queue = queue;
1113 LIST_INSERT_HEAD(&rt->rt_timer, r, rtt_link);
1114 TAILQ_INSERT_TAIL(&queue->rtq_head, r, rtt_next);
1115 r->rtt_queue->rtq_count++;
1116
1117 return (0);
1118 }
1119
1120 /* ARGSUSED */
1121 void
1122 rt_timer_timer(void *arg)
1123 {
1124 struct rttimer_queue *rtq;
1125 struct rttimer *r;
1126 int s;
1127
1128 s = splsoftnet();
1129 LIST_FOREACH(rtq, &rttimer_queue_head, rtq_link) {
1130 while ((r = TAILQ_FIRST(&rtq->rtq_head)) != NULL &&
1131 (r->rtt_time + rtq->rtq_timeout) < time_uptime) {
1132 LIST_REMOVE(r, rtt_link);
1133 TAILQ_REMOVE(&rtq->rtq_head, r, rtt_next);
1134 RTTIMER_CALLOUT(r);
1135 pool_put(&rttimer_pool, r);
1136 if (rtq->rtq_count > 0)
1137 rtq->rtq_count--;
1138 else
1139 printf("rt_timer_timer: rtq_count reached 0\n");
1140 }
1141 }
1142 splx(s);
1143
1144 callout_reset(&rt_timer_ch, hz, rt_timer_timer, NULL);
1145 }
1146
1147 #ifdef RTCACHE_DEBUG
1148 #ifndef RTCACHE_DEBUG_SIZE
1149 #define RTCACHE_DEBUG_SIZE (1024 * 1024)
1150 #endif
1151 static const char *cache_caller[RTCACHE_DEBUG_SIZE];
1152 static struct route *cache_entry[RTCACHE_DEBUG_SIZE];
1153 size_t cache_cur;
1154 #endif
1155
1156 #ifdef RTCACHE_DEBUG
1157 static void
1158 _rtcache_init_debug(const char *caller, struct route *ro, int flag)
1159 #else
1160 static void
1161 _rtcache_init(struct route *ro, int flag)
1162 #endif
1163 {
1164 #ifdef RTCACHE_DEBUG
1165 size_t i;
1166 for (i = 0; i < cache_cur; ++i) {
1167 if (cache_entry[i] == ro)
1168 panic("Reinit of route %p, initialised from %s", ro, cache_caller[i]);
1169 }
1170 #endif
1171
1172 if (rtcache_getdst(ro) == NULL)
1173 return;
1174 ro->ro_rt = rtalloc1(rtcache_getdst(ro), flag);
1175 if (ro->ro_rt != NULL) {
1176 #ifdef RTCACHE_DEBUG
1177 if (cache_cur == RTCACHE_DEBUG_SIZE)
1178 panic("Route cache debug overflow");
1179 cache_caller[cache_cur] = caller;
1180 cache_entry[cache_cur] = ro;
1181 ++cache_cur;
1182 #endif
1183 rtcache(ro);
1184 }
1185 }
1186
1187 #ifdef RTCACHE_DEBUG
1188 void
1189 rtcache_init_debug(const char *caller, struct route *ro)
1190 {
1191 _rtcache_init_debug(caller, ro, 1);
1192 }
1193
1194 void
1195 rtcache_init_noclone_debug(const char *caller, struct route *ro)
1196 {
1197 _rtcache_init_debug(caller, ro, 0);
1198 }
1199
1200 void
1201 rtcache_update(struct route *ro, int clone)
1202 {
1203 rtcache_clear(ro);
1204 _rtcache_init_debug(__func__, ro, clone);
1205 }
1206 #else
1207 void
1208 rtcache_init(struct route *ro)
1209 {
1210 _rtcache_init(ro, 1);
1211 }
1212
1213 void
1214 rtcache_init_noclone(struct route *ro)
1215 {
1216 _rtcache_init(ro, 0);
1217 }
1218
1219 void
1220 rtcache_update(struct route *ro, int clone)
1221 {
1222 rtcache_clear(ro);
1223 _rtcache_init(ro, clone);
1224 }
1225 #endif
1226
1227 #ifdef RTCACHE_DEBUG
1228 void
1229 rtcache_copy_debug(const char *caller, struct route *new_ro, const struct route *old_ro)
1230 #else
1231 void
1232 rtcache_copy(struct route *new_ro, const struct route *old_ro)
1233 #endif
1234 {
1235 /* XXX i doubt this DTRT any longer --dyoung */
1236 #ifdef RTCACHE_DEBUG
1237 size_t i;
1238
1239 for (i = 0; i < cache_cur; ++i) {
1240 if (cache_entry[i] == new_ro)
1241 panic("Copy to initalised route %p (before %s)", new_ro, cache_caller[i]);
1242 }
1243 #endif
1244
1245 if (rtcache_getdst(old_ro) == NULL ||
1246 rtcache_setdst(new_ro, rtcache_getdst(old_ro)) != 0)
1247 return;
1248 new_ro->ro_rt = old_ro->ro_rt;
1249 if (new_ro->ro_rt != NULL) {
1250 #ifdef RTCACHE_DEBUG
1251 if (cache_cur == RTCACHE_DEBUG_SIZE)
1252 panic("Route cache debug overflow");
1253 cache_caller[cache_cur] = caller;
1254 cache_entry[cache_cur] = new_ro;
1255 ++cache_cur;
1256 #endif
1257 rtcache(new_ro);
1258 ++new_ro->ro_rt->rt_refcnt;
1259 }
1260 }
1261
1262 void
1263 rtcache_clear(struct route *ro)
1264 {
1265 #ifdef RTCACHE_DEBUG
1266 size_t j, i = cache_cur;
1267 for (i = j = 0; i < cache_cur; ++i, ++j) {
1268 if (cache_entry[i] == ro) {
1269 if (ro->ro_rt == NULL)
1270 panic("Route cache manipulated (allocated by %s)", cache_caller[i]);
1271 --j;
1272 } else {
1273 cache_caller[j] = cache_caller[i];
1274 cache_entry[j] = cache_entry[i];
1275 }
1276 }
1277 if (ro->ro_rt != NULL) {
1278 if (i != j + 1)
1279 panic("Wrong entries after rtcache_free: %zu (expected %zu)", j, i - 1);
1280 --cache_cur;
1281 }
1282 #endif
1283
1284 if (ro->ro_rt != NULL)
1285 rtflush(ro);
1286 ro->ro_rt = NULL;
1287 }
1288
1289 struct rtentry *
1290 rtcache_lookup2(struct route *ro, const struct sockaddr *dst, int clone,
1291 int *hitp)
1292 {
1293 const struct sockaddr *odst;
1294
1295 odst = rtcache_getdst(ro);
1296
1297 if (odst == NULL)
1298 ;
1299 else if (sockaddr_cmp(odst, dst) != 0)
1300 rtcache_free(ro);
1301 else if (rtcache_down(ro))
1302 rtcache_clear(ro);
1303
1304 if (ro->ro_rt == NULL) {
1305 *hitp = 0;
1306 rtcache_setdst(ro, dst);
1307 _rtcache_init(ro, clone);
1308 } else
1309 *hitp = 1;
1310
1311 return ro->ro_rt;
1312 }
1313
1314 void
1315 rtcache_free(struct route *ro)
1316 {
1317 rtcache_clear(ro);
1318 if (ro->ro_sa != NULL) {
1319 sockaddr_free(ro->ro_sa);
1320 ro->ro_sa = NULL;
1321 }
1322 }
1323
1324 int
1325 rtcache_setdst(struct route *ro, const struct sockaddr *sa)
1326 {
1327 KASSERT(sa != NULL);
1328
1329 if (ro->ro_sa != NULL && ro->ro_sa->sa_family == sa->sa_family) {
1330 rtcache_clear(ro);
1331 sockaddr_copy(ro->ro_sa, sa);
1332 return 0;
1333 } else if (ro->ro_sa != NULL)
1334 rtcache_free(ro); /* free ro_sa, wrong family */
1335
1336 if ((ro->ro_sa = sockaddr_dup(sa, PR_NOWAIT)) == NULL)
1337 return ENOMEM;
1338 return 0;
1339 }
1340
1341 static int
1342 rt_walktree_visitor(struct radix_node *rn, void *v)
1343 {
1344 struct rtwalk *rw = (struct rtwalk *)v;
1345
1346 return (*rw->rw_f)((struct rtentry *)rn, rw->rw_v);
1347 }
1348
1349 int
1350 rt_walktree(sa_family_t family, int (*f)(struct rtentry *, void *), void *v)
1351 {
1352 struct radix_node_head *rnh = rt_tables[family];
1353 struct rtwalk rw;
1354
1355 if (rnh == NULL)
1356 return 0;
1357
1358 rw.rw_f = f;
1359 rw.rw_v = v;
1360
1361 return rn_walktree(rnh, rt_walktree_visitor, &rw);
1362 }
1363