route.c revision 1.78 1 /* $NetBSD: route.c,v 1.78 2006/12/04 00:52:47 dyoung Exp $ */
2
3 /*-
4 * Copyright (c) 1998 The NetBSD Foundation, Inc.
5 * All rights reserved.
6 *
7 * This code is derived from software contributed to The NetBSD Foundation
8 * by Kevin M. Lahey of the Numerical Aerospace Simulation Facility,
9 * NASA Ames Research Center.
10 *
11 * Redistribution and use in source and binary forms, with or without
12 * modification, are permitted provided that the following conditions
13 * are met:
14 * 1. Redistributions of source code must retain the above copyright
15 * notice, this list of conditions and the following disclaimer.
16 * 2. Redistributions in binary form must reproduce the above copyright
17 * notice, this list of conditions and the following disclaimer in the
18 * documentation and/or other materials provided with the distribution.
19 * 3. All advertising materials mentioning features or use of this software
20 * must display the following acknowledgement:
21 * This product includes software developed by the NetBSD
22 * Foundation, Inc. and its contributors.
23 * 4. Neither the name of The NetBSD Foundation nor the names of its
24 * contributors may be used to endorse or promote products derived
25 * from this software without specific prior written permission.
26 *
27 * THIS SOFTWARE IS PROVIDED BY THE NETBSD FOUNDATION, INC. AND CONTRIBUTORS
28 * ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED
29 * TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
30 * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE FOUNDATION OR CONTRIBUTORS
31 * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
32 * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
33 * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
34 * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
35 * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
36 * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
37 * POSSIBILITY OF SUCH DAMAGE.
38 */
39
40 /*
41 * Copyright (C) 1995, 1996, 1997, and 1998 WIDE Project.
42 * All rights reserved.
43 *
44 * Redistribution and use in source and binary forms, with or without
45 * modification, are permitted provided that the following conditions
46 * are met:
47 * 1. Redistributions of source code must retain the above copyright
48 * notice, this list of conditions and the following disclaimer.
49 * 2. Redistributions in binary form must reproduce the above copyright
50 * notice, this list of conditions and the following disclaimer in the
51 * documentation and/or other materials provided with the distribution.
52 * 3. Neither the name of the project nor the names of its contributors
53 * may be used to endorse or promote products derived from this software
54 * without specific prior written permission.
55 *
56 * THIS SOFTWARE IS PROVIDED BY THE PROJECT AND CONTRIBUTORS ``AS IS'' AND
57 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
58 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
59 * ARE DISCLAIMED. IN NO EVENT SHALL THE PROJECT OR CONTRIBUTORS BE LIABLE
60 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
61 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
62 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
63 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
64 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
65 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
66 * SUCH DAMAGE.
67 */
68
69 /*
70 * Copyright (c) 1980, 1986, 1991, 1993
71 * The Regents of the University of California. All rights reserved.
72 *
73 * Redistribution and use in source and binary forms, with or without
74 * modification, are permitted provided that the following conditions
75 * are met:
76 * 1. Redistributions of source code must retain the above copyright
77 * notice, this list of conditions and the following disclaimer.
78 * 2. Redistributions in binary form must reproduce the above copyright
79 * notice, this list of conditions and the following disclaimer in the
80 * documentation and/or other materials provided with the distribution.
81 * 3. Neither the name of the University nor the names of its contributors
82 * may be used to endorse or promote products derived from this software
83 * without specific prior written permission.
84 *
85 * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND
86 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
87 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
88 * ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE
89 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
90 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
91 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
92 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
93 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
94 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
95 * SUCH DAMAGE.
96 *
97 * @(#)route.c 8.3 (Berkeley) 1/9/95
98 */
99
100 #include <sys/cdefs.h>
101 __KERNEL_RCSID(0, "$NetBSD: route.c,v 1.78 2006/12/04 00:52:47 dyoung Exp $");
102
103 #include <sys/param.h>
104 #include <sys/systm.h>
105 #include <sys/callout.h>
106 #include <sys/proc.h>
107 #include <sys/mbuf.h>
108 #include <sys/socket.h>
109 #include <sys/socketvar.h>
110 #include <sys/domain.h>
111 #include <sys/protosw.h>
112 #include <sys/kernel.h>
113 #include <sys/ioctl.h>
114 #include <sys/pool.h>
115
116 #include <net/if.h>
117 #include <net/route.h>
118 #include <net/raw_cb.h>
119
120 #include <netinet/in.h>
121 #include <netinet/in_var.h>
122
123
124 struct route_cb route_cb;
125 struct rtstat rtstat;
126 struct radix_node_head *rt_tables[AF_MAX+1];
127
128 int rttrash; /* routes not in table but not freed */
129 struct sockaddr wildcard; /* zero valued cookie for wildcard searches */
130
131 POOL_INIT(rtentry_pool, sizeof(struct rtentry), 0, 0, 0, "rtentpl", NULL);
132 POOL_INIT(rttimer_pool, sizeof(struct rttimer), 0, 0, 0, "rttmrpl", NULL);
133
134 struct callout rt_timer_ch; /* callout for rt_timer_timer() */
135
136 static int rtdeletemsg(struct rtentry *);
137 static int rtflushclone1(struct radix_node *, void *);
138 static void rtflushclone(struct radix_node_head *, struct rtentry *);
139
140 void
141 rtable_init(void **table)
142 {
143 struct domain *dom;
144 DOMAIN_FOREACH(dom)
145 if (dom->dom_rtattach)
146 dom->dom_rtattach(&table[dom->dom_family],
147 dom->dom_rtoffset);
148 }
149
150 void
151 route_init(void)
152 {
153
154 rn_init(); /* initialize all zeroes, all ones, mask table */
155 rtable_init((void **)rt_tables);
156 }
157
158 /*
159 * Packet routing routines.
160 */
161 void
162 rtalloc(struct route *ro)
163 {
164 if (ro->ro_rt != NULL) {
165 if (ro->ro_rt->rt_ifp != NULL &&
166 (ro->ro_rt->rt_flags & RTF_UP) != 0)
167 return;
168 RTFREE(ro->ro_rt);
169 }
170 ro->ro_rt = rtalloc1(&ro->ro_dst, 1);
171 }
172
173 struct rtentry *
174 rtalloc1(const struct sockaddr *dst, int report)
175 {
176 struct radix_node_head *rnh = rt_tables[dst->sa_family];
177 struct rtentry *rt;
178 struct radix_node *rn;
179 struct rtentry *newrt = NULL;
180 struct rt_addrinfo info;
181 int s = splsoftnet(), err = 0, msgtype = RTM_MISS;
182
183 if (rnh && (rn = rnh->rnh_matchaddr(dst, rnh)) &&
184 ((rn->rn_flags & RNF_ROOT) == 0)) {
185 newrt = rt = (struct rtentry *)rn;
186 if (report && (rt->rt_flags & RTF_CLONING)) {
187 err = rtrequest(RTM_RESOLVE, dst, NULL, NULL, 0,
188 &newrt);
189 if (err) {
190 newrt = rt;
191 rt->rt_refcnt++;
192 goto miss;
193 }
194 KASSERT(newrt != NULL);
195 if ((rt = newrt) && (rt->rt_flags & RTF_XRESOLVE)) {
196 msgtype = RTM_RESOLVE;
197 goto miss;
198 }
199 /* Inform listeners of the new route */
200 memset(&info, 0, sizeof(info));
201 info.rti_info[RTAX_DST] = rt_key(rt);
202 info.rti_info[RTAX_NETMASK] = rt_mask(rt);
203 info.rti_info[RTAX_GATEWAY] = rt->rt_gateway;
204 if (rt->rt_ifp != NULL) {
205 info.rti_info[RTAX_IFP] =
206 TAILQ_FIRST(&rt->rt_ifp->if_addrlist)->ifa_addr;
207 info.rti_info[RTAX_IFA] = rt->rt_ifa->ifa_addr;
208 }
209 rt_missmsg(RTM_ADD, &info, rt->rt_flags, 0);
210 } else
211 rt->rt_refcnt++;
212 } else {
213 rtstat.rts_unreach++;
214 miss: if (report) {
215 memset((caddr_t)&info, 0, sizeof(info));
216 info.rti_info[RTAX_DST] = dst;
217 rt_missmsg(msgtype, &info, 0, err);
218 }
219 }
220 splx(s);
221 return (newrt);
222 }
223
224 void
225 rtfree(struct rtentry *rt)
226 {
227 struct ifaddr *ifa;
228
229 if (rt == NULL)
230 panic("rtfree");
231 rt->rt_refcnt--;
232 if (rt->rt_refcnt <= 0 && (rt->rt_flags & RTF_UP) == 0) {
233 if (rt->rt_nodes->rn_flags & (RNF_ACTIVE | RNF_ROOT))
234 panic ("rtfree 2");
235 rttrash--;
236 if (rt->rt_refcnt < 0) {
237 printf("rtfree: %p not freed (neg refs)\n", rt);
238 return;
239 }
240 rt_timer_remove_all(rt, 0);
241 ifa = rt->rt_ifa;
242 rt->rt_ifa = NULL;
243 IFAFREE(ifa);
244 rt->rt_ifp = NULL;
245 Free(rt_key(rt));
246 pool_put(&rtentry_pool, rt);
247 }
248 }
249
250 void
251 ifafree(struct ifaddr *ifa)
252 {
253
254 #ifdef DIAGNOSTIC
255 if (ifa == NULL)
256 panic("ifafree: null ifa");
257 if (ifa->ifa_refcnt != 0)
258 panic("ifafree: ifa_refcnt != 0 (%d)", ifa->ifa_refcnt);
259 #endif
260 #ifdef IFAREF_DEBUG
261 printf("ifafree: freeing ifaddr %p\n", ifa);
262 #endif
263 free(ifa, M_IFADDR);
264 }
265
266 /*
267 * Force a routing table entry to the specified
268 * destination to go through the given gateway.
269 * Normally called as a result of a routing redirect
270 * message from the network layer.
271 *
272 * N.B.: must be called at splsoftnet
273 */
274 void
275 rtredirect(const struct sockaddr *dst, const struct sockaddr *gateway,
276 const struct sockaddr *netmask, int flags, const struct sockaddr *src,
277 struct rtentry **rtp)
278 {
279 struct rtentry *rt;
280 int error = 0;
281 u_quad_t *stat = NULL;
282 struct rt_addrinfo info;
283 struct ifaddr *ifa;
284
285 /* verify the gateway is directly reachable */
286 if ((ifa = ifa_ifwithnet(gateway)) == NULL) {
287 error = ENETUNREACH;
288 goto out;
289 }
290 rt = rtalloc1(dst, 0);
291 /*
292 * If the redirect isn't from our current router for this dst,
293 * it's either old or wrong. If it redirects us to ourselves,
294 * we have a routing loop, perhaps as a result of an interface
295 * going down recently.
296 */
297 #define equal(a1, a2) \
298 ((a1)->sa_len == (a2)->sa_len && \
299 memcmp((a1), (a2), (a1)->sa_len) == 0)
300 if (!(flags & RTF_DONE) && rt &&
301 (!equal(src, rt->rt_gateway) || rt->rt_ifa != ifa))
302 error = EINVAL;
303 else if (ifa_ifwithaddr(gateway))
304 error = EHOSTUNREACH;
305 if (error)
306 goto done;
307 /*
308 * Create a new entry if we just got back a wildcard entry
309 * or the lookup failed. This is necessary for hosts
310 * which use routing redirects generated by smart gateways
311 * to dynamically build the routing tables.
312 */
313 if ((rt == NULL) || (rt_mask(rt) && rt_mask(rt)->sa_len < 2))
314 goto create;
315 /*
316 * Don't listen to the redirect if it's
317 * for a route to an interface.
318 */
319 if (rt->rt_flags & RTF_GATEWAY) {
320 if (((rt->rt_flags & RTF_HOST) == 0) && (flags & RTF_HOST)) {
321 /*
322 * Changing from route to net => route to host.
323 * Create new route, rather than smashing route to net.
324 */
325 create:
326 if (rt)
327 rtfree(rt);
328 flags |= RTF_GATEWAY | RTF_DYNAMIC;
329 info.rti_info[RTAX_DST] = dst;
330 info.rti_info[RTAX_GATEWAY] = gateway;
331 info.rti_info[RTAX_NETMASK] = netmask;
332 info.rti_ifa = ifa;
333 info.rti_flags = flags;
334 rt = NULL;
335 error = rtrequest1(RTM_ADD, &info, &rt);
336 if (rt != NULL)
337 flags = rt->rt_flags;
338 stat = &rtstat.rts_dynamic;
339 } else {
340 /*
341 * Smash the current notion of the gateway to
342 * this destination. Should check about netmask!!!
343 */
344 rt->rt_flags |= RTF_MODIFIED;
345 flags |= RTF_MODIFIED;
346 stat = &rtstat.rts_newgateway;
347 rt_setgate(rt, rt_key(rt), gateway);
348 }
349 } else
350 error = EHOSTUNREACH;
351 done:
352 if (rt) {
353 if (rtp && !error)
354 *rtp = rt;
355 else
356 rtfree(rt);
357 }
358 out:
359 if (error)
360 rtstat.rts_badredirect++;
361 else if (stat != NULL)
362 (*stat)++;
363 memset((caddr_t)&info, 0, sizeof(info));
364 info.rti_info[RTAX_DST] = dst;
365 info.rti_info[RTAX_GATEWAY] = gateway;
366 info.rti_info[RTAX_NETMASK] = netmask;
367 info.rti_info[RTAX_AUTHOR] = src;
368 rt_missmsg(RTM_REDIRECT, &info, flags, error);
369 }
370
371 /*
372 * Delete a route and generate a message
373 */
374 static int
375 rtdeletemsg(struct rtentry *rt)
376 {
377 int error;
378 struct rt_addrinfo info;
379
380 /*
381 * Request the new route so that the entry is not actually
382 * deleted. That will allow the information being reported to
383 * be accurate (and consistent with route_output()).
384 */
385 memset((caddr_t)&info, 0, sizeof(info));
386 info.rti_info[RTAX_DST] = rt_key(rt);
387 info.rti_info[RTAX_NETMASK] = rt_mask(rt);
388 info.rti_info[RTAX_GATEWAY] = rt->rt_gateway;
389 info.rti_flags = rt->rt_flags;
390 error = rtrequest1(RTM_DELETE, &info, &rt);
391
392 rt_missmsg(RTM_DELETE, &info, info.rti_flags, error);
393
394 /* Adjust the refcount */
395 if (error == 0 && rt->rt_refcnt <= 0) {
396 rt->rt_refcnt++;
397 rtfree(rt);
398 }
399 return (error);
400 }
401
402 static int
403 rtflushclone1(struct radix_node *rn, void *arg)
404 {
405 struct rtentry *rt, *parent;
406
407 rt = (struct rtentry *)rn;
408 parent = (struct rtentry *)arg;
409 if ((rt->rt_flags & RTF_CLONED) != 0 && rt->rt_parent == parent)
410 rtdeletemsg(rt);
411 return 0;
412 }
413
414 static void
415 rtflushclone(struct radix_node_head *rnh, struct rtentry *parent)
416 {
417
418 #ifdef DIAGNOSTIC
419 if (!parent || (parent->rt_flags & RTF_CLONING) == 0)
420 panic("rtflushclone: called with a non-cloning route");
421 if (!rnh->rnh_walktree)
422 panic("rtflushclone: no rnh_walktree");
423 #endif
424 rnh->rnh_walktree(rnh, rtflushclone1, (void *)parent);
425 }
426
427 /*
428 * Routing table ioctl interface.
429 */
430 int
431 rtioctl(u_long req, caddr_t data, struct lwp *l)
432 {
433 return (EOPNOTSUPP);
434 }
435
436 struct ifaddr *
437 ifa_ifwithroute(int flags, const struct sockaddr *dst,
438 const struct sockaddr *gateway)
439 {
440 struct ifaddr *ifa;
441 if ((flags & RTF_GATEWAY) == 0) {
442 /*
443 * If we are adding a route to an interface,
444 * and the interface is a pt to pt link
445 * we should search for the destination
446 * as our clue to the interface. Otherwise
447 * we can use the local address.
448 */
449 ifa = NULL;
450 if (flags & RTF_HOST)
451 ifa = ifa_ifwithdstaddr(dst);
452 if (ifa == NULL)
453 ifa = ifa_ifwithaddr(gateway);
454 } else {
455 /*
456 * If we are adding a route to a remote net
457 * or host, the gateway may still be on the
458 * other end of a pt to pt link.
459 */
460 ifa = ifa_ifwithdstaddr(gateway);
461 }
462 if (ifa == NULL)
463 ifa = ifa_ifwithnet(gateway);
464 if (ifa == NULL) {
465 struct rtentry *rt = rtalloc1(dst, 0);
466 if (rt == NULL)
467 return NULL;
468 rt->rt_refcnt--;
469 if ((ifa = rt->rt_ifa) == NULL)
470 return NULL;
471 }
472 if (ifa->ifa_addr->sa_family != dst->sa_family) {
473 struct ifaddr *oifa = ifa;
474 ifa = ifaof_ifpforaddr(dst, ifa->ifa_ifp);
475 if (ifa == 0)
476 ifa = oifa;
477 }
478 return (ifa);
479 }
480
481 #define ROUNDUP(a) (a>0 ? (1 + (((a) - 1) | (sizeof(long) - 1))) : sizeof(long))
482
483 int
484 rtrequest(int req, const struct sockaddr *dst, const struct sockaddr *gateway,
485 const struct sockaddr *netmask, int flags, struct rtentry **ret_nrt)
486 {
487 struct rt_addrinfo info;
488
489 memset(&info, 0, sizeof(info));
490 info.rti_flags = flags;
491 info.rti_info[RTAX_DST] = dst;
492 info.rti_info[RTAX_GATEWAY] = gateway;
493 info.rti_info[RTAX_NETMASK] = netmask;
494 return rtrequest1(req, &info, ret_nrt);
495 }
496
497 int
498 rt_getifa(struct rt_addrinfo *info)
499 {
500 struct ifaddr *ifa;
501 const struct sockaddr *dst = info->rti_info[RTAX_DST];
502 const struct sockaddr *gateway = info->rti_info[RTAX_GATEWAY];
503 const struct sockaddr *ifaaddr = info->rti_info[RTAX_IFA];
504 const struct sockaddr *ifpaddr = info->rti_info[RTAX_IFP];
505 int flags = info->rti_flags;
506
507 /*
508 * ifp may be specified by sockaddr_dl when protocol address
509 * is ambiguous
510 */
511 if (info->rti_ifp == NULL && ifpaddr != NULL
512 && ifpaddr->sa_family == AF_LINK &&
513 (ifa = ifa_ifwithnet((const struct sockaddr *)ifpaddr)) != NULL)
514 info->rti_ifp = ifa->ifa_ifp;
515 if (info->rti_ifa == NULL && ifaaddr != NULL)
516 info->rti_ifa = ifa_ifwithaddr(ifaaddr);
517 if (info->rti_ifa == NULL) {
518 const struct sockaddr *sa;
519
520 sa = ifaaddr != NULL ? ifaaddr :
521 (gateway != NULL ? gateway : dst);
522 if (sa != NULL && info->rti_ifp != NULL)
523 info->rti_ifa = ifaof_ifpforaddr(sa, info->rti_ifp);
524 else if (dst != NULL && gateway != NULL)
525 info->rti_ifa = ifa_ifwithroute(flags, dst, gateway);
526 else if (sa != NULL)
527 info->rti_ifa = ifa_ifwithroute(flags, sa, sa);
528 }
529 if ((ifa = info->rti_ifa) == NULL)
530 return ENETUNREACH;
531 if (ifa->ifa_getifa != NULL)
532 info->rti_ifa = ifa = (*ifa->ifa_getifa)(ifa, dst);
533 if (info->rti_ifp == NULL)
534 info->rti_ifp = ifa->ifa_ifp;
535 return 0;
536 }
537
538 int
539 rtrequest1(int req, struct rt_addrinfo *info, struct rtentry **ret_nrt)
540 {
541 int s = splsoftnet();
542 int error = 0;
543 struct rtentry *rt, *crt;
544 struct radix_node *rn;
545 struct radix_node_head *rnh;
546 struct ifaddr *ifa;
547 struct sockaddr *ndst;
548 struct sockaddr_storage deldst;
549 const struct sockaddr *dst = info->rti_info[RTAX_DST];
550 const struct sockaddr *gateway = info->rti_info[RTAX_GATEWAY];
551 const struct sockaddr *netmask = info->rti_info[RTAX_NETMASK];
552 int flags = info->rti_flags;
553 #define senderr(x) { error = x ; goto bad; }
554
555 if ((rnh = rt_tables[dst->sa_family]) == NULL)
556 senderr(ESRCH);
557 if (flags & RTF_HOST)
558 netmask = NULL;
559 switch (req) {
560 case RTM_DELETE:
561 if (netmask) {
562 rt_maskedcopy(dst, (struct sockaddr *)&deldst, netmask);
563 dst = (struct sockaddr *)&deldst;
564 }
565 if ((rn = rnh->rnh_lookup(dst, netmask, rnh)) == NULL)
566 senderr(ESRCH);
567 rt = (struct rtentry *)rn;
568 if ((rt->rt_flags & RTF_CLONING) != 0) {
569 /* clean up any cloned children */
570 rtflushclone(rnh, rt);
571 }
572 if ((rn = rnh->rnh_deladdr(dst, netmask, rnh)) == NULL)
573 senderr(ESRCH);
574 if (rn->rn_flags & (RNF_ACTIVE | RNF_ROOT))
575 panic ("rtrequest delete");
576 rt = (struct rtentry *)rn;
577 if (rt->rt_gwroute) {
578 RTFREE(rt->rt_gwroute);
579 rt->rt_gwroute = NULL;
580 }
581 if (rt->rt_parent) {
582 rt->rt_parent->rt_refcnt--;
583 rt->rt_parent = NULL;
584 }
585 rt->rt_flags &= ~RTF_UP;
586 if ((ifa = rt->rt_ifa) && ifa->ifa_rtrequest)
587 ifa->ifa_rtrequest(RTM_DELETE, rt, info);
588 rttrash++;
589 if (ret_nrt)
590 *ret_nrt = rt;
591 else if (rt->rt_refcnt <= 0) {
592 rt->rt_refcnt++;
593 rtfree(rt);
594 }
595 break;
596
597 case RTM_RESOLVE:
598 if (ret_nrt == NULL || (rt = *ret_nrt) == NULL)
599 senderr(EINVAL);
600 if ((rt->rt_flags & RTF_CLONING) == 0)
601 senderr(EINVAL);
602 ifa = rt->rt_ifa;
603 flags = rt->rt_flags & ~(RTF_CLONING | RTF_STATIC);
604 flags |= RTF_CLONED;
605 gateway = rt->rt_gateway;
606 if ((netmask = rt->rt_genmask) == NULL)
607 flags |= RTF_HOST;
608 goto makeroute;
609
610 case RTM_ADD:
611 if (info->rti_ifa == NULL && (error = rt_getifa(info)))
612 senderr(error);
613 ifa = info->rti_ifa;
614 makeroute:
615 /* Already at splsoftnet() so pool_get/pool_put are safe */
616 rt = pool_get(&rtentry_pool, PR_NOWAIT);
617 if (rt == NULL)
618 senderr(ENOBUFS);
619 Bzero(rt, sizeof(*rt));
620 rt->rt_flags = RTF_UP | flags;
621 LIST_INIT(&rt->rt_timer);
622 if (rt_setgate(rt, dst, gateway)) {
623 pool_put(&rtentry_pool, rt);
624 senderr(ENOBUFS);
625 }
626 ndst = rt_key(rt);
627 if (netmask) {
628 rt_maskedcopy(dst, ndst, netmask);
629 } else
630 Bcopy(dst, ndst, dst->sa_len);
631 rt_set_ifa(rt, ifa);
632 rt->rt_ifp = ifa->ifa_ifp;
633 if (req == RTM_RESOLVE) {
634 rt->rt_rmx = (*ret_nrt)->rt_rmx; /* copy metrics */
635 rt->rt_parent = *ret_nrt;
636 rt->rt_parent->rt_refcnt++;
637 }
638 rn = rnh->rnh_addaddr(ndst, netmask, rnh, rt->rt_nodes);
639 if (rn == NULL && (crt = rtalloc1(ndst, 0)) != NULL) {
640 /* overwrite cloned route */
641 if ((crt->rt_flags & RTF_CLONED) != 0) {
642 rtdeletemsg(crt);
643 rn = rnh->rnh_addaddr(ndst,
644 netmask, rnh, rt->rt_nodes);
645 }
646 RTFREE(crt);
647 }
648 if (rn == NULL) {
649 IFAFREE(ifa);
650 if ((rt->rt_flags & RTF_CLONED) != 0 && rt->rt_parent)
651 rtfree(rt->rt_parent);
652 if (rt->rt_gwroute)
653 rtfree(rt->rt_gwroute);
654 Free(rt_key(rt));
655 pool_put(&rtentry_pool, rt);
656 senderr(EEXIST);
657 }
658 if (ifa->ifa_rtrequest)
659 ifa->ifa_rtrequest(req, rt, info);
660 if (ret_nrt) {
661 *ret_nrt = rt;
662 rt->rt_refcnt++;
663 }
664 if ((rt->rt_flags & RTF_CLONING) != 0) {
665 /* clean up any cloned children */
666 rtflushclone(rnh, rt);
667 }
668 break;
669 }
670 bad:
671 splx(s);
672 return (error);
673 }
674
675 int
676 rt_setgate( struct rtentry *rt0, const struct sockaddr *dst,
677 const struct sockaddr *gate)
678 {
679 char *new, *old;
680 u_int dlen = ROUNDUP(dst->sa_len), glen = ROUNDUP(gate->sa_len);
681 struct rtentry *rt = rt0;
682
683 if (rt->rt_gateway == NULL || glen > ROUNDUP(rt->rt_gateway->sa_len)) {
684 old = (caddr_t)rt_key(rt);
685 R_Malloc(new, caddr_t, dlen + glen);
686 if (new == NULL)
687 return 1;
688 Bzero(new, dlen + glen);
689 rt->rt_nodes->rn_key = new;
690 } else {
691 new = __UNCONST(rt->rt_nodes->rn_key); /*XXXUNCONST*/
692 old = NULL;
693 }
694 Bcopy(gate, (rt->rt_gateway = (struct sockaddr *)(new + dlen)), glen);
695 if (old) {
696 Bcopy(dst, new, dlen);
697 Free(old);
698 }
699 if (rt->rt_gwroute) {
700 RTFREE(rt->rt_gwroute);
701 rt->rt_gwroute = NULL;
702 }
703 if (rt->rt_flags & RTF_GATEWAY) {
704 rt->rt_gwroute = rtalloc1(gate, 1);
705 /*
706 * If we switched gateways, grab the MTU from the new
707 * gateway route if the current MTU, if the current MTU is
708 * greater than the MTU of gateway.
709 * Note that, if the MTU of gateway is 0, we will reset the
710 * MTU of the route to run PMTUD again from scratch. XXX
711 */
712 if (rt->rt_gwroute
713 && !(rt->rt_rmx.rmx_locks & RTV_MTU)
714 && rt->rt_rmx.rmx_mtu
715 && rt->rt_rmx.rmx_mtu > rt->rt_gwroute->rt_rmx.rmx_mtu) {
716 rt->rt_rmx.rmx_mtu = rt->rt_gwroute->rt_rmx.rmx_mtu;
717 }
718 }
719 return 0;
720 }
721
722 void
723 rt_maskedcopy(const struct sockaddr *src, struct sockaddr *dst,
724 const struct sockaddr *netmask)
725 {
726 const u_char *cp1 = (const u_char *)src;
727 u_char *cp2 = (u_char *)dst;
728 const u_char *cp3 = (const u_char *)netmask;
729 u_char *cplim = cp2 + *cp3;
730 u_char *cplim2 = cp2 + *cp1;
731
732 *cp2++ = *cp1++; *cp2++ = *cp1++; /* copies sa_len & sa_family */
733 cp3 += 2;
734 if (cplim > cplim2)
735 cplim = cplim2;
736 while (cp2 < cplim)
737 *cp2++ = *cp1++ & *cp3++;
738 if (cp2 < cplim2)
739 memset(cp2, 0, (unsigned)(cplim2 - cp2));
740 }
741
742 /*
743 * Set up or tear down a routing table entry, normally
744 * for an interface.
745 */
746 int
747 rtinit(struct ifaddr *ifa, int cmd, int flags)
748 {
749 struct rtentry *rt;
750 struct sockaddr *dst, *odst;
751 struct sockaddr_storage deldst;
752 struct rtentry *nrt = NULL;
753 int error;
754 struct rt_addrinfo info;
755
756 dst = flags & RTF_HOST ? ifa->ifa_dstaddr : ifa->ifa_addr;
757 if (cmd == RTM_DELETE) {
758 if ((flags & RTF_HOST) == 0 && ifa->ifa_netmask) {
759 /* Delete subnet route for this interface */
760 odst = dst;
761 dst = (struct sockaddr *)&deldst;
762 rt_maskedcopy(odst, dst, ifa->ifa_netmask);
763 }
764 if ((rt = rtalloc1(dst, 0)) != NULL) {
765 rt->rt_refcnt--;
766 if (rt->rt_ifa != ifa)
767 return (flags & RTF_HOST ? EHOSTUNREACH
768 : ENETUNREACH);
769 }
770 }
771 memset(&info, 0, sizeof(info));
772 info.rti_ifa = ifa;
773 info.rti_flags = flags | ifa->ifa_flags;
774 info.rti_info[RTAX_DST] = dst;
775 info.rti_info[RTAX_GATEWAY] = ifa->ifa_addr;
776 /*
777 * XXX here, it seems that we are assuming that ifa_netmask is NULL
778 * for RTF_HOST. bsdi4 passes NULL explicitly (via intermediate
779 * variable) when RTF_HOST is 1. still not sure if i can safely
780 * change it to meet bsdi4 behavior.
781 */
782 info.rti_info[RTAX_NETMASK] = ifa->ifa_netmask;
783 error = rtrequest1(cmd, &info, &nrt);
784 if (cmd == RTM_DELETE && error == 0 && (rt = nrt)) {
785 rt_newaddrmsg(cmd, ifa, error, nrt);
786 if (rt->rt_refcnt <= 0) {
787 rt->rt_refcnt++;
788 rtfree(rt);
789 }
790 }
791 if (cmd == RTM_ADD && error == 0 && (rt = nrt)) {
792 rt->rt_refcnt--;
793 if (rt->rt_ifa != ifa) {
794 printf("rtinit: wrong ifa (%p) was (%p)\n", ifa,
795 rt->rt_ifa);
796 if (rt->rt_ifa->ifa_rtrequest)
797 rt->rt_ifa->ifa_rtrequest(RTM_DELETE, rt, NULL);
798 rt_replace_ifa(rt, ifa);
799 rt->rt_ifp = ifa->ifa_ifp;
800 if (ifa->ifa_rtrequest)
801 ifa->ifa_rtrequest(RTM_ADD, rt, NULL);
802 }
803 rt_newaddrmsg(cmd, ifa, error, nrt);
804 }
805 return (error);
806 }
807
808 /*
809 * Route timer routines. These routes allow functions to be called
810 * for various routes at any time. This is useful in supporting
811 * path MTU discovery and redirect route deletion.
812 *
813 * This is similar to some BSDI internal functions, but it provides
814 * for multiple queues for efficiency's sake...
815 */
816
817 LIST_HEAD(, rttimer_queue) rttimer_queue_head;
818 static int rt_init_done = 0;
819
820 #define RTTIMER_CALLOUT(r) do { \
821 if (r->rtt_func != NULL) { \
822 (*r->rtt_func)(r->rtt_rt, r); \
823 } else { \
824 rtrequest((int) RTM_DELETE, \
825 (struct sockaddr *)rt_key(r->rtt_rt), \
826 0, 0, 0, 0); \
827 } \
828 } while (/*CONSTCOND*/0)
829
830 /*
831 * Some subtle order problems with domain initialization mean that
832 * we cannot count on this being run from rt_init before various
833 * protocol initializations are done. Therefore, we make sure
834 * that this is run when the first queue is added...
835 */
836
837 void
838 rt_timer_init(void)
839 {
840 assert(rt_init_done == 0);
841
842 LIST_INIT(&rttimer_queue_head);
843 callout_init(&rt_timer_ch);
844 callout_reset(&rt_timer_ch, hz, rt_timer_timer, NULL);
845 rt_init_done = 1;
846 }
847
848 struct rttimer_queue *
849 rt_timer_queue_create(u_int timeout)
850 {
851 struct rttimer_queue *rtq;
852
853 if (rt_init_done == 0)
854 rt_timer_init();
855
856 R_Malloc(rtq, struct rttimer_queue *, sizeof *rtq);
857 if (rtq == NULL)
858 return (NULL);
859 Bzero(rtq, sizeof *rtq);
860
861 rtq->rtq_timeout = timeout;
862 rtq->rtq_count = 0;
863 TAILQ_INIT(&rtq->rtq_head);
864 LIST_INSERT_HEAD(&rttimer_queue_head, rtq, rtq_link);
865
866 return (rtq);
867 }
868
869 void
870 rt_timer_queue_change(struct rttimer_queue *rtq, long timeout)
871 {
872
873 rtq->rtq_timeout = timeout;
874 }
875
876 void
877 rt_timer_queue_remove_all(struct rttimer_queue *rtq, int destroy)
878 {
879 struct rttimer *r;
880
881 while ((r = TAILQ_FIRST(&rtq->rtq_head)) != NULL) {
882 LIST_REMOVE(r, rtt_link);
883 TAILQ_REMOVE(&rtq->rtq_head, r, rtt_next);
884 if (destroy)
885 RTTIMER_CALLOUT(r);
886 /* we are already at splsoftnet */
887 pool_put(&rttimer_pool, r);
888 if (rtq->rtq_count > 0)
889 rtq->rtq_count--;
890 else
891 printf("rt_timer_queue_remove_all: "
892 "rtq_count reached 0\n");
893 }
894 }
895
896 void
897 rt_timer_queue_destroy(struct rttimer_queue *rtq, int destroy)
898 {
899
900 rt_timer_queue_remove_all(rtq, destroy);
901
902 LIST_REMOVE(rtq, rtq_link);
903
904 /*
905 * Caller is responsible for freeing the rttimer_queue structure.
906 */
907 }
908
909 unsigned long
910 rt_timer_count(struct rttimer_queue *rtq)
911 {
912 return rtq->rtq_count;
913 }
914
915 void
916 rt_timer_remove_all(struct rtentry *rt, int destroy)
917 {
918 struct rttimer *r;
919
920 while ((r = LIST_FIRST(&rt->rt_timer)) != NULL) {
921 LIST_REMOVE(r, rtt_link);
922 TAILQ_REMOVE(&r->rtt_queue->rtq_head, r, rtt_next);
923 if (destroy)
924 RTTIMER_CALLOUT(r);
925 if (r->rtt_queue->rtq_count > 0)
926 r->rtt_queue->rtq_count--;
927 else
928 printf("rt_timer_remove_all: rtq_count reached 0\n");
929 /* we are already at splsoftnet */
930 pool_put(&rttimer_pool, r);
931 }
932 }
933
934 int
935 rt_timer_add(struct rtentry *rt,
936 void (*func)(struct rtentry *, struct rttimer *),
937 struct rttimer_queue *queue)
938 {
939 struct rttimer *r;
940 int s;
941
942 /*
943 * If there's already a timer with this action, destroy it before
944 * we add a new one.
945 */
946 for (r = LIST_FIRST(&rt->rt_timer); r != NULL;
947 r = LIST_NEXT(r, rtt_link)) {
948 if (r->rtt_func == func) {
949 LIST_REMOVE(r, rtt_link);
950 TAILQ_REMOVE(&r->rtt_queue->rtq_head, r, rtt_next);
951 if (r->rtt_queue->rtq_count > 0)
952 r->rtt_queue->rtq_count--;
953 else
954 printf("rt_timer_add: rtq_count reached 0\n");
955 s = splsoftnet();
956 pool_put(&rttimer_pool, r);
957 splx(s);
958 break; /* only one per list, so we can quit... */
959 }
960 }
961
962 s = splsoftnet();
963 r = pool_get(&rttimer_pool, PR_NOWAIT);
964 splx(s);
965 if (r == NULL)
966 return (ENOBUFS);
967 Bzero(r, sizeof(*r));
968
969 r->rtt_rt = rt;
970 r->rtt_time = time_uptime;
971 r->rtt_func = func;
972 r->rtt_queue = queue;
973 LIST_INSERT_HEAD(&rt->rt_timer, r, rtt_link);
974 TAILQ_INSERT_TAIL(&queue->rtq_head, r, rtt_next);
975 r->rtt_queue->rtq_count++;
976
977 return (0);
978 }
979
980 /* ARGSUSED */
981 void
982 rt_timer_timer(void *arg)
983 {
984 struct rttimer_queue *rtq;
985 struct rttimer *r;
986 int s;
987
988 s = splsoftnet();
989 for (rtq = LIST_FIRST(&rttimer_queue_head); rtq != NULL;
990 rtq = LIST_NEXT(rtq, rtq_link)) {
991 while ((r = TAILQ_FIRST(&rtq->rtq_head)) != NULL &&
992 (r->rtt_time + rtq->rtq_timeout) < time_uptime) {
993 LIST_REMOVE(r, rtt_link);
994 TAILQ_REMOVE(&rtq->rtq_head, r, rtt_next);
995 RTTIMER_CALLOUT(r);
996 pool_put(&rttimer_pool, r);
997 if (rtq->rtq_count > 0)
998 rtq->rtq_count--;
999 else
1000 printf("rt_timer_timer: rtq_count reached 0\n");
1001 }
1002 }
1003 splx(s);
1004
1005 callout_reset(&rt_timer_ch, hz, rt_timer_timer, NULL);
1006 }
1007