route.c revision 1.97 1 /* $NetBSD: route.c,v 1.97 2007/08/30 02:22:29 dyoung Exp $ */
2
3 /*-
4 * Copyright (c) 1998 The NetBSD Foundation, Inc.
5 * All rights reserved.
6 *
7 * This code is derived from software contributed to The NetBSD Foundation
8 * by Kevin M. Lahey of the Numerical Aerospace Simulation Facility,
9 * NASA Ames Research Center.
10 *
11 * Redistribution and use in source and binary forms, with or without
12 * modification, are permitted provided that the following conditions
13 * are met:
14 * 1. Redistributions of source code must retain the above copyright
15 * notice, this list of conditions and the following disclaimer.
16 * 2. Redistributions in binary form must reproduce the above copyright
17 * notice, this list of conditions and the following disclaimer in the
18 * documentation and/or other materials provided with the distribution.
19 * 3. All advertising materials mentioning features or use of this software
20 * must display the following acknowledgement:
21 * This product includes software developed by the NetBSD
22 * Foundation, Inc. and its contributors.
23 * 4. Neither the name of The NetBSD Foundation nor the names of its
24 * contributors may be used to endorse or promote products derived
25 * from this software without specific prior written permission.
26 *
27 * THIS SOFTWARE IS PROVIDED BY THE NETBSD FOUNDATION, INC. AND CONTRIBUTORS
28 * ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED
29 * TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
30 * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE FOUNDATION OR CONTRIBUTORS
31 * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
32 * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
33 * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
34 * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
35 * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
36 * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
37 * POSSIBILITY OF SUCH DAMAGE.
38 */
39
40 /*
41 * Copyright (C) 1995, 1996, 1997, and 1998 WIDE Project.
42 * All rights reserved.
43 *
44 * Redistribution and use in source and binary forms, with or without
45 * modification, are permitted provided that the following conditions
46 * are met:
47 * 1. Redistributions of source code must retain the above copyright
48 * notice, this list of conditions and the following disclaimer.
49 * 2. Redistributions in binary form must reproduce the above copyright
50 * notice, this list of conditions and the following disclaimer in the
51 * documentation and/or other materials provided with the distribution.
52 * 3. Neither the name of the project nor the names of its contributors
53 * may be used to endorse or promote products derived from this software
54 * without specific prior written permission.
55 *
56 * THIS SOFTWARE IS PROVIDED BY THE PROJECT AND CONTRIBUTORS ``AS IS'' AND
57 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
58 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
59 * ARE DISCLAIMED. IN NO EVENT SHALL THE PROJECT OR CONTRIBUTORS BE LIABLE
60 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
61 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
62 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
63 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
64 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
65 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
66 * SUCH DAMAGE.
67 */
68
69 /*
70 * Copyright (c) 1980, 1986, 1991, 1993
71 * The Regents of the University of California. All rights reserved.
72 *
73 * Redistribution and use in source and binary forms, with or without
74 * modification, are permitted provided that the following conditions
75 * are met:
76 * 1. Redistributions of source code must retain the above copyright
77 * notice, this list of conditions and the following disclaimer.
78 * 2. Redistributions in binary form must reproduce the above copyright
79 * notice, this list of conditions and the following disclaimer in the
80 * documentation and/or other materials provided with the distribution.
81 * 3. Neither the name of the University nor the names of its contributors
82 * may be used to endorse or promote products derived from this software
83 * without specific prior written permission.
84 *
85 * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND
86 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
87 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
88 * ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE
89 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
90 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
91 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
92 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
93 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
94 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
95 * SUCH DAMAGE.
96 *
97 * @(#)route.c 8.3 (Berkeley) 1/9/95
98 */
99
100 #include "opt_route.h"
101
102 #include <sys/cdefs.h>
103 __KERNEL_RCSID(0, "$NetBSD: route.c,v 1.97 2007/08/30 02:22:29 dyoung Exp $");
104
105 #include <sys/param.h>
106 #include <sys/sysctl.h>
107 #include <sys/systm.h>
108 #include <sys/callout.h>
109 #include <sys/proc.h>
110 #include <sys/mbuf.h>
111 #include <sys/socket.h>
112 #include <sys/socketvar.h>
113 #include <sys/domain.h>
114 #include <sys/protosw.h>
115 #include <sys/kernel.h>
116 #include <sys/ioctl.h>
117 #include <sys/pool.h>
118
119 #include <net/if.h>
120 #include <net/route.h>
121 #include <net/raw_cb.h>
122
123 #include <netinet/in.h>
124 #include <netinet/in_var.h>
125
126 #ifdef RTFLUSH_DEBUG
127 #define rtcache_debug() __predict_false(_rtcache_debug)
128 #else /* RTFLUSH_DEBUG */
129 #define rtcache_debug() 0
130 #endif /* RTFLUSH_DEBUG */
131
132 struct route_cb route_cb;
133 struct rtstat rtstat;
134 struct radix_node_head *rt_tables[AF_MAX+1];
135
136 int rttrash; /* routes not in table but not freed */
137 struct sockaddr wildcard; /* zero valued cookie for wildcard searches */
138
139 POOL_INIT(rtentry_pool, sizeof(struct rtentry), 0, 0, 0, "rtentpl", NULL,
140 IPL_SOFTNET);
141 POOL_INIT(rttimer_pool, sizeof(struct rttimer), 0, 0, 0, "rttmrpl", NULL,
142 IPL_SOFTNET);
143
144 struct callout rt_timer_ch; /* callout for rt_timer_timer() */
145
146 #ifdef RTFLUSH_DEBUG
147 static int _rtcache_debug = 0;
148 #endif /* RTFLUSH_DEBUG */
149
150 static int rtdeletemsg(struct rtentry *);
151 static int rtflushclone1(struct rtentry *, void *);
152 static void rtflushclone(sa_family_t family, struct rtentry *);
153
154 #ifdef RTFLUSH_DEBUG
155 SYSCTL_SETUP(sysctl_net_rtcache_setup, "sysctl net.rtcache.debug setup")
156 {
157 const struct sysctlnode *rnode;
158
159 /* XXX do not duplicate */
160 if (sysctl_createv(clog, 0, NULL, &rnode, CTLFLAG_PERMANENT,
161 CTLTYPE_NODE, "net", NULL, NULL, 0, NULL, 0, CTL_NET, CTL_EOL) != 0)
162 return;
163 if (sysctl_createv(clog, 0, &rnode, &rnode, CTLFLAG_PERMANENT,
164 CTLTYPE_NODE,
165 "rtcache", SYSCTL_DESCR("Route cache related settings"),
166 NULL, 0, NULL, 0, CTL_CREATE, CTL_EOL) != 0)
167 return;
168 if (sysctl_createv(clog, 0, &rnode, &rnode,
169 CTLFLAG_PERMANENT|CTLFLAG_READWRITE, CTLTYPE_INT,
170 "debug", SYSCTL_DESCR("Debug route caches"),
171 NULL, 0, &_rtcache_debug, 0, CTL_CREATE, CTL_EOL) != 0)
172 return;
173 }
174 #endif /* RTFLUSH_DEBUG */
175
176 struct ifaddr *
177 rt_get_ifa(struct rtentry *rt)
178 {
179 struct ifaddr *ifa;
180
181 if ((ifa = rt->rt_ifa) == NULL)
182 return ifa;
183 else if (ifa->ifa_getifa == NULL)
184 return ifa;
185 #if 0
186 else if (ifa->ifa_seqno != NULL && *ifa->ifa_seqno == rt->rt_ifa_seqno)
187 return ifa;
188 #endif
189 else {
190 ifa = (*ifa->ifa_getifa)(ifa, rt_getkey(rt));
191 rt_replace_ifa(rt, ifa);
192 return ifa;
193 }
194 }
195
196 static void
197 rt_set_ifa1(struct rtentry *rt, struct ifaddr *ifa)
198 {
199 rt->rt_ifa = ifa;
200 if (ifa->ifa_seqno != NULL)
201 rt->rt_ifa_seqno = *ifa->ifa_seqno;
202 }
203
204 void
205 rt_replace_ifa(struct rtentry *rt, struct ifaddr *ifa)
206 {
207 IFAREF(ifa);
208 IFAFREE(rt->rt_ifa);
209 rt_set_ifa1(rt, ifa);
210 }
211
212 static void
213 rt_set_ifa(struct rtentry *rt, struct ifaddr *ifa)
214 {
215 IFAREF(ifa);
216 rt_set_ifa1(rt, ifa);
217 }
218
219 void
220 rtable_init(void **table)
221 {
222 struct domain *dom;
223 DOMAIN_FOREACH(dom)
224 if (dom->dom_rtattach)
225 dom->dom_rtattach(&table[dom->dom_family],
226 dom->dom_rtoffset);
227 }
228
229 void
230 route_init(void)
231 {
232
233 rn_init(); /* initialize all zeroes, all ones, mask table */
234 rtable_init((void **)rt_tables);
235 }
236
237 void
238 rtflushall(int family)
239 {
240 int s;
241 struct domain *dom;
242 struct route *ro;
243
244 if (rtcache_debug())
245 printf("%s: enter\n", __func__);
246
247 if ((dom = pffinddomain(family)) == NULL)
248 return;
249
250 s = splnet();
251 while ((ro = LIST_FIRST(&dom->dom_rtcache)) != NULL) {
252 KASSERT(ro->ro_rt != NULL);
253 rtcache_clear(ro);
254 }
255 splx(s);
256 }
257
258 void
259 rtflush(struct route *ro)
260 {
261 int s = splnet();
262 KASSERT(ro->ro_rt != NULL);
263 KASSERT(rtcache_getdst(ro) != NULL);
264
265 RTFREE(ro->ro_rt);
266 ro->ro_rt = NULL;
267
268 LIST_REMOVE(ro, ro_rtcache_next);
269 splx(s);
270
271 #if 0
272 if (rtcache_debug()) {
273 printf("%s: flushing %s\n", __func__,
274 inet_ntoa((satocsin(rtcache_getdst(ro)))->sin_addr));
275 }
276 #endif
277 }
278
279 void
280 rtcache(struct route *ro)
281 {
282 int s;
283 struct domain *dom;
284
285 KASSERT(ro->ro_rt != NULL);
286 KASSERT(rtcache_getdst(ro) != NULL);
287
288 if ((dom = pffinddomain(rtcache_getdst(ro)->sa_family)) == NULL)
289 return;
290
291 s = splnet();
292 LIST_INSERT_HEAD(&dom->dom_rtcache, ro, ro_rtcache_next);
293 splx(s);
294 }
295
296 /*
297 * Packet routing routines.
298 */
299 void
300 rtalloc(struct route *ro)
301 {
302 if (ro->ro_rt != NULL) {
303 if (ro->ro_rt->rt_ifp != NULL &&
304 (ro->ro_rt->rt_flags & RTF_UP) != 0)
305 return;
306 rtflush(ro);
307 }
308 if (rtcache_getdst(ro) == NULL ||
309 (ro->ro_rt = rtalloc1(rtcache_getdst(ro), 1)) == NULL)
310 return;
311 rtcache(ro);
312 }
313
314 struct rtentry *
315 rtalloc1(const struct sockaddr *dst, int report)
316 {
317 struct radix_node_head *rnh = rt_tables[dst->sa_family];
318 struct rtentry *rt;
319 struct radix_node *rn;
320 struct rtentry *newrt = NULL;
321 struct rt_addrinfo info;
322 int s = splsoftnet(), err = 0, msgtype = RTM_MISS;
323
324 if (rnh && (rn = rnh->rnh_matchaddr(dst, rnh)) &&
325 ((rn->rn_flags & RNF_ROOT) == 0)) {
326 newrt = rt = (struct rtentry *)rn;
327 if (report && (rt->rt_flags & RTF_CLONING)) {
328 err = rtrequest(RTM_RESOLVE, dst, NULL, NULL, 0,
329 &newrt);
330 if (err) {
331 newrt = rt;
332 rt->rt_refcnt++;
333 goto miss;
334 }
335 KASSERT(newrt != NULL);
336 if ((rt = newrt) && (rt->rt_flags & RTF_XRESOLVE)) {
337 msgtype = RTM_RESOLVE;
338 goto miss;
339 }
340 /* Inform listeners of the new route */
341 memset(&info, 0, sizeof(info));
342 info.rti_info[RTAX_DST] = rt_getkey(rt);
343 info.rti_info[RTAX_NETMASK] = rt_mask(rt);
344 info.rti_info[RTAX_GATEWAY] = rt->rt_gateway;
345 if (rt->rt_ifp != NULL) {
346 info.rti_info[RTAX_IFP] =
347 TAILQ_FIRST(&rt->rt_ifp->if_addrlist)->ifa_addr;
348 info.rti_info[RTAX_IFA] = rt->rt_ifa->ifa_addr;
349 }
350 rt_missmsg(RTM_ADD, &info, rt->rt_flags, 0);
351 } else
352 rt->rt_refcnt++;
353 } else {
354 rtstat.rts_unreach++;
355 miss: if (report) {
356 memset((void *)&info, 0, sizeof(info));
357 info.rti_info[RTAX_DST] = dst;
358 rt_missmsg(msgtype, &info, 0, err);
359 }
360 }
361 splx(s);
362 return newrt;
363 }
364
365 void
366 rtfree(struct rtentry *rt)
367 {
368 struct ifaddr *ifa;
369
370 if (rt == NULL)
371 panic("rtfree");
372 rt->rt_refcnt--;
373 if (rt->rt_refcnt <= 0 && (rt->rt_flags & RTF_UP) == 0) {
374 if (rt->rt_nodes->rn_flags & (RNF_ACTIVE | RNF_ROOT))
375 panic ("rtfree 2");
376 rttrash--;
377 if (rt->rt_refcnt < 0) {
378 printf("rtfree: %p not freed (neg refs)\n", rt);
379 return;
380 }
381 rt_timer_remove_all(rt, 0);
382 ifa = rt->rt_ifa;
383 rt->rt_ifa = NULL;
384 IFAFREE(ifa);
385 rt->rt_ifp = NULL;
386 rt_destroy(rt);
387 pool_put(&rtentry_pool, rt);
388 }
389 }
390
391 void
392 ifafree(struct ifaddr *ifa)
393 {
394
395 #ifdef DIAGNOSTIC
396 if (ifa == NULL)
397 panic("ifafree: null ifa");
398 if (ifa->ifa_refcnt != 0)
399 panic("ifafree: ifa_refcnt != 0 (%d)", ifa->ifa_refcnt);
400 #endif
401 #ifdef IFAREF_DEBUG
402 printf("ifafree: freeing ifaddr %p\n", ifa);
403 #endif
404 free(ifa, M_IFADDR);
405 }
406
407 static inline int
408 equal(const struct sockaddr *sa1, const struct sockaddr *sa2)
409 {
410 return sockaddr_cmp(sa1, sa2) == 0;
411 }
412
413 /*
414 * Force a routing table entry to the specified
415 * destination to go through the given gateway.
416 * Normally called as a result of a routing redirect
417 * message from the network layer.
418 *
419 * N.B.: must be called at splsoftnet
420 */
421 void
422 rtredirect(const struct sockaddr *dst, const struct sockaddr *gateway,
423 const struct sockaddr *netmask, int flags, const struct sockaddr *src,
424 struct rtentry **rtp)
425 {
426 struct rtentry *rt;
427 int error = 0;
428 u_quad_t *stat = NULL;
429 struct rt_addrinfo info;
430 struct ifaddr *ifa;
431
432 /* verify the gateway is directly reachable */
433 if ((ifa = ifa_ifwithnet(gateway)) == NULL) {
434 error = ENETUNREACH;
435 goto out;
436 }
437 rt = rtalloc1(dst, 0);
438 /*
439 * If the redirect isn't from our current router for this dst,
440 * it's either old or wrong. If it redirects us to ourselves,
441 * we have a routing loop, perhaps as a result of an interface
442 * going down recently.
443 */
444 if (!(flags & RTF_DONE) && rt &&
445 (!equal(src, rt->rt_gateway) || rt->rt_ifa != ifa))
446 error = EINVAL;
447 else if (ifa_ifwithaddr(gateway))
448 error = EHOSTUNREACH;
449 if (error)
450 goto done;
451 /*
452 * Create a new entry if we just got back a wildcard entry
453 * or the lookup failed. This is necessary for hosts
454 * which use routing redirects generated by smart gateways
455 * to dynamically build the routing tables.
456 */
457 if (rt == NULL || (rt_mask(rt) && rt_mask(rt)->sa_len < 2))
458 goto create;
459 /*
460 * Don't listen to the redirect if it's
461 * for a route to an interface.
462 */
463 if (rt->rt_flags & RTF_GATEWAY) {
464 if (((rt->rt_flags & RTF_HOST) == 0) && (flags & RTF_HOST)) {
465 /*
466 * Changing from route to net => route to host.
467 * Create new route, rather than smashing route to net.
468 */
469 create:
470 if (rt != NULL)
471 rtfree(rt);
472 flags |= RTF_GATEWAY | RTF_DYNAMIC;
473 info.rti_info[RTAX_DST] = dst;
474 info.rti_info[RTAX_GATEWAY] = gateway;
475 info.rti_info[RTAX_NETMASK] = netmask;
476 info.rti_ifa = ifa;
477 info.rti_flags = flags;
478 rt = NULL;
479 error = rtrequest1(RTM_ADD, &info, &rt);
480 if (rt != NULL)
481 flags = rt->rt_flags;
482 stat = &rtstat.rts_dynamic;
483 } else {
484 /*
485 * Smash the current notion of the gateway to
486 * this destination. Should check about netmask!!!
487 */
488 rt->rt_flags |= RTF_MODIFIED;
489 flags |= RTF_MODIFIED;
490 stat = &rtstat.rts_newgateway;
491 rt_setgate(rt, gateway);
492 }
493 } else
494 error = EHOSTUNREACH;
495 done:
496 if (rt) {
497 if (rtp != NULL && !error)
498 *rtp = rt;
499 else
500 rtfree(rt);
501 }
502 out:
503 if (error)
504 rtstat.rts_badredirect++;
505 else if (stat != NULL)
506 (*stat)++;
507 memset(&info, 0, sizeof(info));
508 info.rti_info[RTAX_DST] = dst;
509 info.rti_info[RTAX_GATEWAY] = gateway;
510 info.rti_info[RTAX_NETMASK] = netmask;
511 info.rti_info[RTAX_AUTHOR] = src;
512 rt_missmsg(RTM_REDIRECT, &info, flags, error);
513 }
514
515 /*
516 * Delete a route and generate a message
517 */
518 static int
519 rtdeletemsg(struct rtentry *rt)
520 {
521 int error;
522 struct rt_addrinfo info;
523
524 /*
525 * Request the new route so that the entry is not actually
526 * deleted. That will allow the information being reported to
527 * be accurate (and consistent with route_output()).
528 */
529 memset(&info, 0, sizeof(info));
530 info.rti_info[RTAX_DST] = rt_getkey(rt);
531 info.rti_info[RTAX_NETMASK] = rt_mask(rt);
532 info.rti_info[RTAX_GATEWAY] = rt->rt_gateway;
533 info.rti_flags = rt->rt_flags;
534 error = rtrequest1(RTM_DELETE, &info, &rt);
535
536 rt_missmsg(RTM_DELETE, &info, info.rti_flags, error);
537
538 /* Adjust the refcount */
539 if (error == 0 && rt->rt_refcnt <= 0) {
540 rt->rt_refcnt++;
541 rtfree(rt);
542 }
543 return error;
544 }
545
546 static int
547 rtflushclone1(struct rtentry *rt, void *arg)
548 {
549 struct rtentry *parent;
550
551 parent = (struct rtentry *)arg;
552 if ((rt->rt_flags & RTF_CLONED) != 0 && rt->rt_parent == parent)
553 rtdeletemsg(rt);
554 return 0;
555 }
556
557 static void
558 rtflushclone(sa_family_t family, struct rtentry *parent)
559 {
560
561 #ifdef DIAGNOSTIC
562 if (!parent || (parent->rt_flags & RTF_CLONING) == 0)
563 panic("rtflushclone: called with a non-cloning route");
564 #endif
565 rt_walktree(family, rtflushclone1, (void *)parent);
566 }
567
568 /*
569 * Routing table ioctl interface.
570 */
571 int
572 rtioctl(u_long req, void *data, struct lwp *l)
573 {
574 return EOPNOTSUPP;
575 }
576
577 struct ifaddr *
578 ifa_ifwithroute(int flags, const struct sockaddr *dst,
579 const struct sockaddr *gateway)
580 {
581 struct ifaddr *ifa;
582 if ((flags & RTF_GATEWAY) == 0) {
583 /*
584 * If we are adding a route to an interface,
585 * and the interface is a pt to pt link
586 * we should search for the destination
587 * as our clue to the interface. Otherwise
588 * we can use the local address.
589 */
590 ifa = NULL;
591 if (flags & RTF_HOST)
592 ifa = ifa_ifwithdstaddr(dst);
593 if (ifa == NULL)
594 ifa = ifa_ifwithaddr(gateway);
595 } else {
596 /*
597 * If we are adding a route to a remote net
598 * or host, the gateway may still be on the
599 * other end of a pt to pt link.
600 */
601 ifa = ifa_ifwithdstaddr(gateway);
602 }
603 if (ifa == NULL)
604 ifa = ifa_ifwithnet(gateway);
605 if (ifa == NULL) {
606 struct rtentry *rt = rtalloc1(dst, 0);
607 if (rt == NULL)
608 return NULL;
609 rt->rt_refcnt--;
610 if ((ifa = rt->rt_ifa) == NULL)
611 return NULL;
612 }
613 if (ifa->ifa_addr->sa_family != dst->sa_family) {
614 struct ifaddr *oifa = ifa;
615 ifa = ifaof_ifpforaddr(dst, ifa->ifa_ifp);
616 if (ifa == 0)
617 ifa = oifa;
618 }
619 return ifa;
620 }
621
622 #define ROUNDUP(a) (a>0 ? (1 + (((a) - 1) | (sizeof(long) - 1))) : sizeof(long))
623
624 int
625 rtrequest(int req, const struct sockaddr *dst, const struct sockaddr *gateway,
626 const struct sockaddr *netmask, int flags, struct rtentry **ret_nrt)
627 {
628 struct rt_addrinfo info;
629
630 memset(&info, 0, sizeof(info));
631 info.rti_flags = flags;
632 info.rti_info[RTAX_DST] = dst;
633 info.rti_info[RTAX_GATEWAY] = gateway;
634 info.rti_info[RTAX_NETMASK] = netmask;
635 return rtrequest1(req, &info, ret_nrt);
636 }
637
638 int
639 rt_getifa(struct rt_addrinfo *info)
640 {
641 struct ifaddr *ifa;
642 const struct sockaddr *dst = info->rti_info[RTAX_DST];
643 const struct sockaddr *gateway = info->rti_info[RTAX_GATEWAY];
644 const struct sockaddr *ifaaddr = info->rti_info[RTAX_IFA];
645 const struct sockaddr *ifpaddr = info->rti_info[RTAX_IFP];
646 int flags = info->rti_flags;
647
648 /*
649 * ifp may be specified by sockaddr_dl when protocol address
650 * is ambiguous
651 */
652 if (info->rti_ifp == NULL && ifpaddr != NULL
653 && ifpaddr->sa_family == AF_LINK &&
654 (ifa = ifa_ifwithnet((const struct sockaddr *)ifpaddr)) != NULL)
655 info->rti_ifp = ifa->ifa_ifp;
656 if (info->rti_ifa == NULL && ifaaddr != NULL)
657 info->rti_ifa = ifa_ifwithaddr(ifaaddr);
658 if (info->rti_ifa == NULL) {
659 const struct sockaddr *sa;
660
661 sa = ifaaddr != NULL ? ifaaddr :
662 (gateway != NULL ? gateway : dst);
663 if (sa != NULL && info->rti_ifp != NULL)
664 info->rti_ifa = ifaof_ifpforaddr(sa, info->rti_ifp);
665 else if (dst != NULL && gateway != NULL)
666 info->rti_ifa = ifa_ifwithroute(flags, dst, gateway);
667 else if (sa != NULL)
668 info->rti_ifa = ifa_ifwithroute(flags, sa, sa);
669 }
670 if ((ifa = info->rti_ifa) == NULL)
671 return ENETUNREACH;
672 if (ifa->ifa_getifa != NULL)
673 info->rti_ifa = ifa = (*ifa->ifa_getifa)(ifa, dst);
674 if (info->rti_ifp == NULL)
675 info->rti_ifp = ifa->ifa_ifp;
676 return 0;
677 }
678
679 int
680 rtrequest1(int req, struct rt_addrinfo *info, struct rtentry **ret_nrt)
681 {
682 int s = splsoftnet();
683 int error = 0;
684 struct rtentry *rt, *crt;
685 struct radix_node *rn;
686 struct radix_node_head *rnh;
687 struct ifaddr *ifa;
688 struct sockaddr_storage maskeddst;
689 const struct sockaddr *dst = info->rti_info[RTAX_DST];
690 const struct sockaddr *gateway = info->rti_info[RTAX_GATEWAY];
691 const struct sockaddr *netmask = info->rti_info[RTAX_NETMASK];
692 int flags = info->rti_flags;
693 #define senderr(x) { error = x ; goto bad; }
694
695 if ((rnh = rt_tables[dst->sa_family]) == NULL)
696 senderr(ESRCH);
697 if (flags & RTF_HOST)
698 netmask = NULL;
699 switch (req) {
700 case RTM_DELETE:
701 if (netmask) {
702 rt_maskedcopy(dst, (struct sockaddr *)&maskeddst,
703 netmask);
704 dst = (struct sockaddr *)&maskeddst;
705 }
706 if ((rn = rnh->rnh_lookup(dst, netmask, rnh)) == NULL)
707 senderr(ESRCH);
708 rt = (struct rtentry *)rn;
709 if ((rt->rt_flags & RTF_CLONING) != 0) {
710 /* clean up any cloned children */
711 rtflushclone(dst->sa_family, rt);
712 }
713 if ((rn = rnh->rnh_deladdr(dst, netmask, rnh)) == NULL)
714 senderr(ESRCH);
715 if (rn->rn_flags & (RNF_ACTIVE | RNF_ROOT))
716 panic ("rtrequest delete");
717 rt = (struct rtentry *)rn;
718 if (rt->rt_gwroute) {
719 RTFREE(rt->rt_gwroute);
720 rt->rt_gwroute = NULL;
721 }
722 if (rt->rt_parent) {
723 rt->rt_parent->rt_refcnt--;
724 rt->rt_parent = NULL;
725 }
726 rt->rt_flags &= ~RTF_UP;
727 if ((ifa = rt->rt_ifa) && ifa->ifa_rtrequest)
728 ifa->ifa_rtrequest(RTM_DELETE, rt, info);
729 rttrash++;
730 if (ret_nrt)
731 *ret_nrt = rt;
732 else if (rt->rt_refcnt <= 0) {
733 rt->rt_refcnt++;
734 rtfree(rt);
735 }
736 break;
737
738 case RTM_RESOLVE:
739 if (ret_nrt == NULL || (rt = *ret_nrt) == NULL)
740 senderr(EINVAL);
741 if ((rt->rt_flags & RTF_CLONING) == 0)
742 senderr(EINVAL);
743 ifa = rt->rt_ifa;
744 flags = rt->rt_flags & ~(RTF_CLONING | RTF_STATIC);
745 flags |= RTF_CLONED;
746 gateway = rt->rt_gateway;
747 flags |= RTF_HOST;
748 goto makeroute;
749
750 case RTM_ADD:
751 if (info->rti_ifa == NULL && (error = rt_getifa(info)))
752 senderr(error);
753 ifa = info->rti_ifa;
754 makeroute:
755 /* Already at splsoftnet() so pool_get/pool_put are safe */
756 rt = pool_get(&rtentry_pool, PR_NOWAIT);
757 if (rt == NULL)
758 senderr(ENOBUFS);
759 Bzero(rt, sizeof(*rt));
760 rt->rt_flags = RTF_UP | flags;
761 LIST_INIT(&rt->rt_timer);
762 RT_DPRINTF("%s l.%d: rt->_rt_key = %p\n", __func__, __LINE__,
763 (void *)rt->_rt_key);
764 if (rt_setkey(rt, dst, M_NOWAIT) == NULL ||
765 rt_setgate(rt, gateway) != 0) {
766 pool_put(&rtentry_pool, rt);
767 senderr(ENOBUFS);
768 }
769 RT_DPRINTF("%s l.%d: rt->_rt_key = %p\n", __func__, __LINE__,
770 (void *)rt->_rt_key);
771 if (netmask) {
772 rt_maskedcopy(dst, (struct sockaddr *)&maskeddst,
773 netmask);
774 rt_setkey(rt, (struct sockaddr *)&maskeddst, M_NOWAIT);
775 RT_DPRINTF("%s l.%d: rt->_rt_key = %p\n", __func__,
776 __LINE__, (void *)rt->_rt_key);
777 } else {
778 rt_setkey(rt, dst, M_NOWAIT);
779 RT_DPRINTF("%s l.%d: rt->_rt_key = %p\n", __func__,
780 __LINE__, (void *)rt->_rt_key);
781 }
782 rt_set_ifa(rt, ifa);
783 RT_DPRINTF("%s l.%d: rt->_rt_key = %p\n", __func__,
784 __LINE__, (void *)rt->_rt_key);
785 rt->rt_ifp = ifa->ifa_ifp;
786 if (req == RTM_RESOLVE) {
787 rt->rt_rmx = (*ret_nrt)->rt_rmx; /* copy metrics */
788 rt->rt_parent = *ret_nrt;
789 rt->rt_parent->rt_refcnt++;
790 }
791 RT_DPRINTF("%s l.%d: rt->_rt_key = %p\n", __func__,
792 __LINE__, (void *)rt->_rt_key);
793 rn = rnh->rnh_addaddr(rt_getkey(rt), netmask, rnh,
794 rt->rt_nodes);
795 RT_DPRINTF("%s l.%d: rt->_rt_key = %p\n", __func__,
796 __LINE__, (void *)rt->_rt_key);
797 if (rn == NULL && (crt = rtalloc1(rt_getkey(rt), 0)) != NULL) {
798 /* overwrite cloned route */
799 if ((crt->rt_flags & RTF_CLONED) != 0) {
800 rtdeletemsg(crt);
801 rn = rnh->rnh_addaddr(rt_getkey(rt),
802 netmask, rnh, rt->rt_nodes);
803 }
804 RTFREE(crt);
805 RT_DPRINTF("%s l.%d: rt->_rt_key = %p\n", __func__,
806 __LINE__, (void *)rt->_rt_key);
807 }
808 RT_DPRINTF("%s l.%d: rt->_rt_key = %p\n", __func__,
809 __LINE__, (void *)rt->_rt_key);
810 if (rn == NULL) {
811 IFAFREE(ifa);
812 if ((rt->rt_flags & RTF_CLONED) != 0 && rt->rt_parent)
813 rtfree(rt->rt_parent);
814 if (rt->rt_gwroute)
815 rtfree(rt->rt_gwroute);
816 rt_destroy(rt);
817 pool_put(&rtentry_pool, rt);
818 senderr(EEXIST);
819 }
820 RT_DPRINTF("%s l.%d: rt->_rt_key = %p\n", __func__,
821 __LINE__, (void *)rt->_rt_key);
822 if (ifa->ifa_rtrequest)
823 ifa->ifa_rtrequest(req, rt, info);
824 RT_DPRINTF("%s l.%d: rt->_rt_key = %p\n", __func__,
825 __LINE__, (void *)rt->_rt_key);
826 if (ret_nrt) {
827 *ret_nrt = rt;
828 rt->rt_refcnt++;
829 }
830 if ((rt->rt_flags & RTF_CLONING) != 0) {
831 /* clean up any cloned children */
832 rtflushclone(dst->sa_family, rt);
833 }
834 rtflushall(dst->sa_family);
835 break;
836 case RTM_GET:
837 if (netmask != NULL) {
838 rt_maskedcopy(dst, (struct sockaddr *)&maskeddst,
839 netmask);
840 dst = (struct sockaddr *)&maskeddst;
841 }
842 rn = rnh->rnh_lookup(dst, netmask, rnh);
843 if (rn == NULL || (rn->rn_flags & RNF_ROOT) != 0)
844 senderr(ESRCH);
845 if (ret_nrt != NULL) {
846 rt = (struct rtentry *)rn;
847 *ret_nrt = rt;
848 rt->rt_refcnt++;
849 }
850 break;
851 }
852 bad:
853 splx(s);
854 return error;
855 }
856
857 int
858 rt_setgate(struct rtentry *rt, const struct sockaddr *gate)
859 {
860 KASSERT(rt != rt->rt_gwroute);
861
862 KASSERT(rt->_rt_key != NULL);
863 RT_DPRINTF("%s l.%d: rt->_rt_key = %p\n", __func__,
864 __LINE__, (void *)rt->_rt_key);
865
866 if (rt->rt_gwroute) {
867 RTFREE(rt->rt_gwroute);
868 rt->rt_gwroute = NULL;
869 }
870 KASSERT(rt->_rt_key != NULL);
871 RT_DPRINTF("%s l.%d: rt->_rt_key = %p\n", __func__,
872 __LINE__, (void *)rt->_rt_key);
873 if (rt->rt_gateway != NULL)
874 sockaddr_free(rt->rt_gateway);
875 KASSERT(rt->_rt_key != NULL);
876 RT_DPRINTF("%s l.%d: rt->_rt_key = %p\n", __func__,
877 __LINE__, (void *)rt->_rt_key);
878 if ((rt->rt_gateway = sockaddr_dup(gate, M_NOWAIT)) == NULL)
879 return ENOMEM;
880 KASSERT(rt->_rt_key != NULL);
881 RT_DPRINTF("%s l.%d: rt->_rt_key = %p\n", __func__,
882 __LINE__, (void *)rt->_rt_key);
883
884 if (rt->rt_flags & RTF_GATEWAY) {
885 KASSERT(rt->_rt_key != NULL);
886 RT_DPRINTF("%s l.%d: rt->_rt_key = %p\n", __func__,
887 __LINE__, (void *)rt->_rt_key);
888 rt->rt_gwroute = rtalloc1(gate, 1);
889 /*
890 * If we switched gateways, grab the MTU from the new
891 * gateway route if the current MTU, if the current MTU is
892 * greater than the MTU of gateway.
893 * Note that, if the MTU of gateway is 0, we will reset the
894 * MTU of the route to run PMTUD again from scratch. XXX
895 */
896 KASSERT(rt->_rt_key != NULL);
897 RT_DPRINTF("%s l.%d: rt->_rt_key = %p\n", __func__,
898 __LINE__, (void *)rt->_rt_key);
899 if (rt->rt_gwroute
900 && !(rt->rt_rmx.rmx_locks & RTV_MTU)
901 && rt->rt_rmx.rmx_mtu
902 && rt->rt_rmx.rmx_mtu > rt->rt_gwroute->rt_rmx.rmx_mtu) {
903 rt->rt_rmx.rmx_mtu = rt->rt_gwroute->rt_rmx.rmx_mtu;
904 }
905 }
906 KASSERT(rt->_rt_key != NULL);
907 RT_DPRINTF("%s l.%d: rt->_rt_key = %p\n", __func__,
908 __LINE__, (void *)rt->_rt_key);
909 return 0;
910 }
911
912 void
913 rt_maskedcopy(const struct sockaddr *src, struct sockaddr *dst,
914 const struct sockaddr *netmask)
915 {
916 const char *netmaskp = &netmask->sa_data[0],
917 *srcp = &src->sa_data[0];
918 char *dstp = &dst->sa_data[0];
919 const char *maskend = dstp + MIN(netmask->sa_len, src->sa_len);
920 const char *srcend = dstp + src->sa_len;
921
922 dst->sa_len = src->sa_len;
923 dst->sa_family = src->sa_family;
924
925 while (dstp < maskend)
926 *dstp++ = *srcp++ & *netmaskp++;
927 if (dstp < srcend)
928 memset(dstp, 0, (size_t)(srcend - dstp));
929 }
930
931 /*
932 * Set up or tear down a routing table entry, normally
933 * for an interface.
934 */
935 int
936 rtinit(struct ifaddr *ifa, int cmd, int flags)
937 {
938 struct rtentry *rt;
939 struct sockaddr *dst, *odst;
940 struct sockaddr_storage maskeddst;
941 struct rtentry *nrt = NULL;
942 int error;
943 struct rt_addrinfo info;
944
945 dst = flags & RTF_HOST ? ifa->ifa_dstaddr : ifa->ifa_addr;
946 if (cmd == RTM_DELETE) {
947 if ((flags & RTF_HOST) == 0 && ifa->ifa_netmask) {
948 /* Delete subnet route for this interface */
949 odst = dst;
950 dst = (struct sockaddr *)&maskeddst;
951 rt_maskedcopy(odst, dst, ifa->ifa_netmask);
952 }
953 if ((rt = rtalloc1(dst, 0)) != NULL) {
954 rt->rt_refcnt--;
955 if (rt->rt_ifa != ifa)
956 return (flags & RTF_HOST) ? EHOSTUNREACH
957 : ENETUNREACH;
958 }
959 }
960 memset(&info, 0, sizeof(info));
961 info.rti_ifa = ifa;
962 info.rti_flags = flags | ifa->ifa_flags;
963 info.rti_info[RTAX_DST] = dst;
964 info.rti_info[RTAX_GATEWAY] = ifa->ifa_addr;
965 /*
966 * XXX here, it seems that we are assuming that ifa_netmask is NULL
967 * for RTF_HOST. bsdi4 passes NULL explicitly (via intermediate
968 * variable) when RTF_HOST is 1. still not sure if i can safely
969 * change it to meet bsdi4 behavior.
970 */
971 info.rti_info[RTAX_NETMASK] = ifa->ifa_netmask;
972 error = rtrequest1(cmd, &info, &nrt);
973 if (cmd == RTM_DELETE && error == 0 && (rt = nrt)) {
974 rt_newaddrmsg(cmd, ifa, error, nrt);
975 if (rt->rt_refcnt <= 0) {
976 rt->rt_refcnt++;
977 rtfree(rt);
978 }
979 }
980 if (cmd == RTM_ADD && error == 0 && (rt = nrt)) {
981 rt->rt_refcnt--;
982 if (rt->rt_ifa != ifa) {
983 printf("rtinit: wrong ifa (%p) was (%p)\n", ifa,
984 rt->rt_ifa);
985 if (rt->rt_ifa->ifa_rtrequest)
986 rt->rt_ifa->ifa_rtrequest(RTM_DELETE, rt, NULL);
987 rt_replace_ifa(rt, ifa);
988 rt->rt_ifp = ifa->ifa_ifp;
989 if (ifa->ifa_rtrequest)
990 ifa->ifa_rtrequest(RTM_ADD, rt, NULL);
991 }
992 rt_newaddrmsg(cmd, ifa, error, nrt);
993 }
994 return error;
995 }
996
997 /*
998 * Route timer routines. These routes allow functions to be called
999 * for various routes at any time. This is useful in supporting
1000 * path MTU discovery and redirect route deletion.
1001 *
1002 * This is similar to some BSDI internal functions, but it provides
1003 * for multiple queues for efficiency's sake...
1004 */
1005
1006 LIST_HEAD(, rttimer_queue) rttimer_queue_head;
1007 static int rt_init_done = 0;
1008
1009 #define RTTIMER_CALLOUT(r) do { \
1010 if (r->rtt_func != NULL) { \
1011 (*r->rtt_func)(r->rtt_rt, r); \
1012 } else { \
1013 rtrequest((int) RTM_DELETE, \
1014 rt_getkey(r->rtt_rt), \
1015 0, 0, 0, 0); \
1016 } \
1017 } while (/*CONSTCOND*/0)
1018
1019 /*
1020 * Some subtle order problems with domain initialization mean that
1021 * we cannot count on this being run from rt_init before various
1022 * protocol initializations are done. Therefore, we make sure
1023 * that this is run when the first queue is added...
1024 */
1025
1026 void
1027 rt_timer_init(void)
1028 {
1029 assert(rt_init_done == 0);
1030
1031 LIST_INIT(&rttimer_queue_head);
1032 callout_init(&rt_timer_ch, 0);
1033 callout_reset(&rt_timer_ch, hz, rt_timer_timer, NULL);
1034 rt_init_done = 1;
1035 }
1036
1037 struct rttimer_queue *
1038 rt_timer_queue_create(u_int timeout)
1039 {
1040 struct rttimer_queue *rtq;
1041
1042 if (rt_init_done == 0)
1043 rt_timer_init();
1044
1045 R_Malloc(rtq, struct rttimer_queue *, sizeof *rtq);
1046 if (rtq == NULL)
1047 return NULL;
1048 Bzero(rtq, sizeof *rtq);
1049
1050 rtq->rtq_timeout = timeout;
1051 rtq->rtq_count = 0;
1052 TAILQ_INIT(&rtq->rtq_head);
1053 LIST_INSERT_HEAD(&rttimer_queue_head, rtq, rtq_link);
1054
1055 return rtq;
1056 }
1057
1058 void
1059 rt_timer_queue_change(struct rttimer_queue *rtq, long timeout)
1060 {
1061
1062 rtq->rtq_timeout = timeout;
1063 }
1064
1065 void
1066 rt_timer_queue_remove_all(struct rttimer_queue *rtq, int destroy)
1067 {
1068 struct rttimer *r;
1069
1070 while ((r = TAILQ_FIRST(&rtq->rtq_head)) != NULL) {
1071 LIST_REMOVE(r, rtt_link);
1072 TAILQ_REMOVE(&rtq->rtq_head, r, rtt_next);
1073 if (destroy)
1074 RTTIMER_CALLOUT(r);
1075 /* we are already at splsoftnet */
1076 pool_put(&rttimer_pool, r);
1077 if (rtq->rtq_count > 0)
1078 rtq->rtq_count--;
1079 else
1080 printf("rt_timer_queue_remove_all: "
1081 "rtq_count reached 0\n");
1082 }
1083 }
1084
1085 void
1086 rt_timer_queue_destroy(struct rttimer_queue *rtq, int destroy)
1087 {
1088
1089 rt_timer_queue_remove_all(rtq, destroy);
1090
1091 LIST_REMOVE(rtq, rtq_link);
1092
1093 /*
1094 * Caller is responsible for freeing the rttimer_queue structure.
1095 */
1096 }
1097
1098 unsigned long
1099 rt_timer_count(struct rttimer_queue *rtq)
1100 {
1101 return rtq->rtq_count;
1102 }
1103
1104 void
1105 rt_timer_remove_all(struct rtentry *rt, int destroy)
1106 {
1107 struct rttimer *r;
1108
1109 while ((r = LIST_FIRST(&rt->rt_timer)) != NULL) {
1110 LIST_REMOVE(r, rtt_link);
1111 TAILQ_REMOVE(&r->rtt_queue->rtq_head, r, rtt_next);
1112 if (destroy)
1113 RTTIMER_CALLOUT(r);
1114 if (r->rtt_queue->rtq_count > 0)
1115 r->rtt_queue->rtq_count--;
1116 else
1117 printf("rt_timer_remove_all: rtq_count reached 0\n");
1118 /* we are already at splsoftnet */
1119 pool_put(&rttimer_pool, r);
1120 }
1121 }
1122
1123 int
1124 rt_timer_add(struct rtentry *rt,
1125 void (*func)(struct rtentry *, struct rttimer *),
1126 struct rttimer_queue *queue)
1127 {
1128 struct rttimer *r;
1129 int s;
1130
1131 /*
1132 * If there's already a timer with this action, destroy it before
1133 * we add a new one.
1134 */
1135 LIST_FOREACH(r, &rt->rt_timer, rtt_link) {
1136 if (r->rtt_func == func)
1137 break;
1138 }
1139 if (r != NULL) {
1140 LIST_REMOVE(r, rtt_link);
1141 TAILQ_REMOVE(&r->rtt_queue->rtq_head, r, rtt_next);
1142 if (r->rtt_queue->rtq_count > 0)
1143 r->rtt_queue->rtq_count--;
1144 else
1145 printf("rt_timer_add: rtq_count reached 0\n");
1146 } else {
1147 s = splsoftnet();
1148 r = pool_get(&rttimer_pool, PR_NOWAIT);
1149 splx(s);
1150 if (r == NULL)
1151 return ENOBUFS;
1152 }
1153
1154 memset(r, 0, sizeof(*r));
1155
1156 r->rtt_rt = rt;
1157 r->rtt_time = time_uptime;
1158 r->rtt_func = func;
1159 r->rtt_queue = queue;
1160 LIST_INSERT_HEAD(&rt->rt_timer, r, rtt_link);
1161 TAILQ_INSERT_TAIL(&queue->rtq_head, r, rtt_next);
1162 r->rtt_queue->rtq_count++;
1163
1164 return 0;
1165 }
1166
1167 /* ARGSUSED */
1168 void
1169 rt_timer_timer(void *arg)
1170 {
1171 struct rttimer_queue *rtq;
1172 struct rttimer *r;
1173 int s;
1174
1175 s = splsoftnet();
1176 LIST_FOREACH(rtq, &rttimer_queue_head, rtq_link) {
1177 while ((r = TAILQ_FIRST(&rtq->rtq_head)) != NULL &&
1178 (r->rtt_time + rtq->rtq_timeout) < time_uptime) {
1179 LIST_REMOVE(r, rtt_link);
1180 TAILQ_REMOVE(&rtq->rtq_head, r, rtt_next);
1181 RTTIMER_CALLOUT(r);
1182 pool_put(&rttimer_pool, r);
1183 if (rtq->rtq_count > 0)
1184 rtq->rtq_count--;
1185 else
1186 printf("rt_timer_timer: rtq_count reached 0\n");
1187 }
1188 }
1189 splx(s);
1190
1191 callout_reset(&rt_timer_ch, hz, rt_timer_timer, NULL);
1192 }
1193
1194 #ifdef RTCACHE_DEBUG
1195 #ifndef RTCACHE_DEBUG_SIZE
1196 #define RTCACHE_DEBUG_SIZE (1024 * 1024)
1197 #endif
1198 static const char *cache_caller[RTCACHE_DEBUG_SIZE];
1199 static struct route *cache_entry[RTCACHE_DEBUG_SIZE];
1200 size_t cache_cur;
1201 #endif
1202
1203 #ifdef RTCACHE_DEBUG
1204 static void
1205 _rtcache_init_debug(const char *caller, struct route *ro, int flag)
1206 #else
1207 static void
1208 _rtcache_init(struct route *ro, int flag)
1209 #endif
1210 {
1211 #ifdef RTCACHE_DEBUG
1212 size_t i;
1213 for (i = 0; i < cache_cur; ++i) {
1214 if (cache_entry[i] == ro)
1215 panic("Reinit of route %p, initialised from %s", ro, cache_caller[i]);
1216 }
1217 #endif
1218
1219 if (rtcache_getdst(ro) == NULL)
1220 return;
1221 ro->ro_rt = rtalloc1(rtcache_getdst(ro), flag);
1222 if (ro->ro_rt != NULL) {
1223 #ifdef RTCACHE_DEBUG
1224 if (cache_cur == RTCACHE_DEBUG_SIZE)
1225 panic("Route cache debug overflow");
1226 cache_caller[cache_cur] = caller;
1227 cache_entry[cache_cur] = ro;
1228 ++cache_cur;
1229 #endif
1230 rtcache(ro);
1231 }
1232 }
1233
1234 #ifdef RTCACHE_DEBUG
1235 void
1236 rtcache_init_debug(const char *caller, struct route *ro)
1237 {
1238 _rtcache_init_debug(caller, ro, 1);
1239 }
1240
1241 void
1242 rtcache_init_noclone_debug(const char *caller, struct route *ro)
1243 {
1244 _rtcache_init_debug(caller, ro, 0);
1245 }
1246
1247 void
1248 rtcache_update(struct route *ro, int clone)
1249 {
1250 rtcache_clear(ro);
1251 _rtcache_init_debug(__func__, ro, clone);
1252 }
1253 #else
1254 void
1255 rtcache_init(struct route *ro)
1256 {
1257 _rtcache_init(ro, 1);
1258 }
1259
1260 void
1261 rtcache_init_noclone(struct route *ro)
1262 {
1263 _rtcache_init(ro, 0);
1264 }
1265
1266 void
1267 rtcache_update(struct route *ro, int clone)
1268 {
1269 rtcache_clear(ro);
1270 _rtcache_init(ro, clone);
1271 }
1272 #endif
1273
1274 #ifdef RTCACHE_DEBUG
1275 void
1276 rtcache_copy_debug(const char *caller, struct route *new_ro, const struct route *old_ro)
1277 #else
1278 void
1279 rtcache_copy(struct route *new_ro, const struct route *old_ro)
1280 #endif
1281 {
1282 /* XXX i doubt this DTRT any longer --dyoung */
1283 #ifdef RTCACHE_DEBUG
1284 size_t i;
1285
1286 for (i = 0; i < cache_cur; ++i) {
1287 if (cache_entry[i] == new_ro)
1288 panic("Copy to initalised route %p (before %s)", new_ro, cache_caller[i]);
1289 }
1290 #endif
1291
1292 if (rtcache_getdst(old_ro) == NULL ||
1293 rtcache_setdst(new_ro, rtcache_getdst(old_ro)) != 0)
1294 return;
1295 new_ro->ro_rt = old_ro->ro_rt;
1296 if (new_ro->ro_rt != NULL) {
1297 #ifdef RTCACHE_DEBUG
1298 if (cache_cur == RTCACHE_DEBUG_SIZE)
1299 panic("Route cache debug overflow");
1300 cache_caller[cache_cur] = caller;
1301 cache_entry[cache_cur] = new_ro;
1302 ++cache_cur;
1303 #endif
1304 rtcache(new_ro);
1305 ++new_ro->ro_rt->rt_refcnt;
1306 }
1307 }
1308
1309 void
1310 rtcache_clear(struct route *ro)
1311 {
1312 #ifdef RTCACHE_DEBUG
1313 size_t j, i = cache_cur;
1314 for (i = j = 0; i < cache_cur; ++i, ++j) {
1315 if (cache_entry[i] == ro) {
1316 if (ro->ro_rt == NULL)
1317 panic("Route cache manipulated (allocated by %s)", cache_caller[i]);
1318 --j;
1319 } else {
1320 cache_caller[j] = cache_caller[i];
1321 cache_entry[j] = cache_entry[i];
1322 }
1323 }
1324 if (ro->ro_rt != NULL) {
1325 if (i != j + 1)
1326 panic("Wrong entries after rtcache_free: %zu (expected %zu)", j, i - 1);
1327 --cache_cur;
1328 }
1329 #endif
1330
1331 if (ro->ro_rt != NULL)
1332 rtflush(ro);
1333 ro->ro_rt = NULL;
1334 }
1335
1336 struct rtentry *
1337 rtcache_lookup2(struct route *ro, const struct sockaddr *dst, int clone,
1338 int *hitp)
1339 {
1340 const struct sockaddr *odst;
1341
1342 odst = rtcache_getdst(ro);
1343
1344 if (odst == NULL)
1345 ;
1346 else if (sockaddr_cmp(odst, dst) != 0)
1347 rtcache_free(ro);
1348 else if (rtcache_down(ro))
1349 rtcache_clear(ro);
1350
1351 if (ro->ro_rt == NULL) {
1352 *hitp = 0;
1353 rtcache_setdst(ro, dst);
1354 _rtcache_init(ro, clone);
1355 } else
1356 *hitp = 1;
1357
1358 return ro->ro_rt;
1359 }
1360
1361 void
1362 rtcache_free(struct route *ro)
1363 {
1364 rtcache_clear(ro);
1365 if (ro->ro_sa != NULL) {
1366 sockaddr_free(ro->ro_sa);
1367 ro->ro_sa = NULL;
1368 }
1369 }
1370
1371 int
1372 rtcache_setdst(struct route *ro, const struct sockaddr *sa)
1373 {
1374 KASSERT(sa != NULL);
1375
1376 if (ro->ro_sa != NULL && ro->ro_sa->sa_family == sa->sa_family) {
1377 rtcache_clear(ro);
1378 if (sockaddr_copy(ro->ro_sa, ro->ro_sa->sa_len, sa) != NULL)
1379 return 0;
1380 sockaddr_free(ro->ro_sa);
1381 } else if (ro->ro_sa != NULL)
1382 rtcache_free(ro); /* free ro_sa, wrong family */
1383
1384 if ((ro->ro_sa = sockaddr_dup(sa, M_NOWAIT)) == NULL)
1385 return ENOMEM;
1386 return 0;
1387 }
1388
1389 static int
1390 rt_walktree_visitor(struct radix_node *rn, void *v)
1391 {
1392 struct rtwalk *rw = (struct rtwalk *)v;
1393
1394 return (*rw->rw_f)((struct rtentry *)rn, rw->rw_v);
1395 }
1396
1397 int
1398 rt_walktree(sa_family_t family, int (*f)(struct rtentry *, void *), void *v)
1399 {
1400 struct radix_node_head *rnh = rt_tables[family];
1401 struct rtwalk rw;
1402
1403 if (rnh == NULL)
1404 return 0;
1405
1406 rw.rw_f = f;
1407 rw.rw_v = v;
1408
1409 return rn_walktree(rnh, rt_walktree_visitor, &rw);
1410 }
1411