route.c revision 1.72 1 /* $NetBSD: route.c,v 1.72 2006/10/05 17:35:19 tls Exp $ */
2
3 /*-
4 * Copyright (c) 1998 The NetBSD Foundation, Inc.
5 * All rights reserved.
6 *
7 * This code is derived from software contributed to The NetBSD Foundation
8 * by Kevin M. Lahey of the Numerical Aerospace Simulation Facility,
9 * NASA Ames Research Center.
10 *
11 * Redistribution and use in source and binary forms, with or without
12 * modification, are permitted provided that the following conditions
13 * are met:
14 * 1. Redistributions of source code must retain the above copyright
15 * notice, this list of conditions and the following disclaimer.
16 * 2. Redistributions in binary form must reproduce the above copyright
17 * notice, this list of conditions and the following disclaimer in the
18 * documentation and/or other materials provided with the distribution.
19 * 3. All advertising materials mentioning features or use of this software
20 * must display the following acknowledgement:
21 * This product includes software developed by the NetBSD
22 * Foundation, Inc. and its contributors.
23 * 4. Neither the name of The NetBSD Foundation nor the names of its
24 * contributors may be used to endorse or promote products derived
25 * from this software without specific prior written permission.
26 *
27 * THIS SOFTWARE IS PROVIDED BY THE NETBSD FOUNDATION, INC. AND CONTRIBUTORS
28 * ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED
29 * TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
30 * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE FOUNDATION OR CONTRIBUTORS
31 * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
32 * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
33 * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
34 * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
35 * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
36 * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
37 * POSSIBILITY OF SUCH DAMAGE.
38 */
39
40 /*
41 * Copyright (C) 1995, 1996, 1997, and 1998 WIDE Project.
42 * All rights reserved.
43 *
44 * Redistribution and use in source and binary forms, with or without
45 * modification, are permitted provided that the following conditions
46 * are met:
47 * 1. Redistributions of source code must retain the above copyright
48 * notice, this list of conditions and the following disclaimer.
49 * 2. Redistributions in binary form must reproduce the above copyright
50 * notice, this list of conditions and the following disclaimer in the
51 * documentation and/or other materials provided with the distribution.
52 * 3. Neither the name of the project nor the names of its contributors
53 * may be used to endorse or promote products derived from this software
54 * without specific prior written permission.
55 *
56 * THIS SOFTWARE IS PROVIDED BY THE PROJECT AND CONTRIBUTORS ``AS IS'' AND
57 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
58 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
59 * ARE DISCLAIMED. IN NO EVENT SHALL THE PROJECT OR CONTRIBUTORS BE LIABLE
60 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
61 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
62 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
63 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
64 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
65 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
66 * SUCH DAMAGE.
67 */
68
69 /*
70 * Copyright (c) 1980, 1986, 1991, 1993
71 * The Regents of the University of California. All rights reserved.
72 *
73 * Redistribution and use in source and binary forms, with or without
74 * modification, are permitted provided that the following conditions
75 * are met:
76 * 1. Redistributions of source code must retain the above copyright
77 * notice, this list of conditions and the following disclaimer.
78 * 2. Redistributions in binary form must reproduce the above copyright
79 * notice, this list of conditions and the following disclaimer in the
80 * documentation and/or other materials provided with the distribution.
81 * 3. Neither the name of the University nor the names of its contributors
82 * may be used to endorse or promote products derived from this software
83 * without specific prior written permission.
84 *
85 * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND
86 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
87 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
88 * ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE
89 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
90 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
91 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
92 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
93 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
94 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
95 * SUCH DAMAGE.
96 *
97 * @(#)route.c 8.3 (Berkeley) 1/9/95
98 */
99
100 #include <sys/cdefs.h>
101 __KERNEL_RCSID(0, "$NetBSD: route.c,v 1.72 2006/10/05 17:35:19 tls Exp $");
102
103
104 #include <sys/param.h>
105 #include <sys/systm.h>
106 #include <sys/callout.h>
107 #include <sys/proc.h>
108 #include <sys/mbuf.h>
109 #include <sys/socket.h>
110 #include <sys/socketvar.h>
111 #include <sys/domain.h>
112 #include <sys/protosw.h>
113 #include <sys/kernel.h>
114 #include <sys/ioctl.h>
115 #include <sys/pool.h>
116
117 #include <net/if.h>
118 #include <net/route.h>
119 #include <net/raw_cb.h>
120
121 #include <netinet/in.h>
122 #include <netinet/in_var.h>
123
124
125 struct route_cb route_cb;
126 struct rtstat rtstat;
127 struct radix_node_head *rt_tables[AF_MAX+1];
128
129 int rttrash; /* routes not in table but not freed */
130 struct sockaddr wildcard; /* zero valued cookie for wildcard searches */
131
132 POOL_INIT(rtentry_pool, sizeof(struct rtentry), 0, 0, 0, "rtentpl", NULL);
133 POOL_INIT(rttimer_pool, sizeof(struct rttimer), 0, 0, 0, "rttmrpl", NULL);
134
135 struct callout rt_timer_ch; /* callout for rt_timer_timer() */
136
137 static int rtdeletemsg(struct rtentry *);
138 static int rtflushclone1(struct radix_node *, void *);
139 static void rtflushclone(struct radix_node_head *, struct rtentry *);
140
141 void
142 rtable_init(void **table)
143 {
144 struct domain *dom;
145 DOMAIN_FOREACH(dom)
146 if (dom->dom_rtattach)
147 dom->dom_rtattach(&table[dom->dom_family],
148 dom->dom_rtoffset);
149 }
150
151 void
152 route_init(void)
153 {
154
155 rn_init(); /* initialize all zeroes, all ones, mask table */
156 rtable_init((void **)rt_tables);
157 }
158
159 /*
160 * Packet routing routines.
161 */
162 void
163 rtalloc(struct route *ro)
164 {
165 if (ro->ro_rt && ro->ro_rt->rt_ifp && (ro->ro_rt->rt_flags & RTF_UP))
166 return; /* XXX */
167 ro->ro_rt = rtalloc1(&ro->ro_dst, 1);
168 }
169
170 struct rtentry *
171 rtalloc1(const struct sockaddr *dst, int report)
172 {
173 struct radix_node_head *rnh = rt_tables[dst->sa_family];
174 struct rtentry *rt;
175 struct radix_node *rn;
176 struct rtentry *newrt = NULL;
177 struct rt_addrinfo info;
178 int s = splsoftnet(), err = 0, msgtype = RTM_MISS;
179
180 if (rnh && (rn = rnh->rnh_matchaddr(dst, rnh)) &&
181 ((rn->rn_flags & RNF_ROOT) == 0)) {
182 newrt = rt = (struct rtentry *)rn;
183 if (report && (rt->rt_flags & RTF_CLONING)) {
184 err = rtrequest(RTM_RESOLVE, dst, NULL, NULL, 0,
185 &newrt);
186 if (err) {
187 newrt = rt;
188 rt->rt_refcnt++;
189 goto miss;
190 }
191 KASSERT(newrt != NULL);
192 if ((rt = newrt) && (rt->rt_flags & RTF_XRESOLVE)) {
193 msgtype = RTM_RESOLVE;
194 goto miss;
195 }
196 /* Inform listeners of the new route */
197 memset(&info, 0, sizeof(info));
198 info.rti_info[RTAX_DST] = rt_key(rt);
199 info.rti_info[RTAX_NETMASK] = rt_mask(rt);
200 info.rti_info[RTAX_GATEWAY] = rt->rt_gateway;
201 if (rt->rt_ifp != NULL) {
202 info.rti_info[RTAX_IFP] =
203 TAILQ_FIRST(&rt->rt_ifp->if_addrlist)->ifa_addr;
204 info.rti_info[RTAX_IFA] = rt->rt_ifa->ifa_addr;
205 }
206 rt_missmsg(RTM_ADD, &info, rt->rt_flags, 0);
207 } else
208 rt->rt_refcnt++;
209 } else {
210 rtstat.rts_unreach++;
211 miss: if (report) {
212 memset((caddr_t)&info, 0, sizeof(info));
213 info.rti_info[RTAX_DST] = dst;
214 rt_missmsg(msgtype, &info, 0, err);
215 }
216 }
217 splx(s);
218 return (newrt);
219 }
220
221 void
222 rtfree(struct rtentry *rt)
223 {
224 struct ifaddr *ifa;
225
226 if (rt == NULL)
227 panic("rtfree");
228 rt->rt_refcnt--;
229 if (rt->rt_refcnt <= 0 && (rt->rt_flags & RTF_UP) == 0) {
230 if (rt->rt_nodes->rn_flags & (RNF_ACTIVE | RNF_ROOT))
231 panic ("rtfree 2");
232 rttrash--;
233 if (rt->rt_refcnt < 0) {
234 printf("rtfree: %p not freed (neg refs)\n", rt);
235 return;
236 }
237 rt_timer_remove_all(rt, 0);
238 ifa = rt->rt_ifa;
239 IFAFREE(ifa);
240 Free(rt_key(rt));
241 pool_put(&rtentry_pool, rt);
242 }
243 }
244
245 void
246 ifafree(struct ifaddr *ifa)
247 {
248
249 #ifdef DIAGNOSTIC
250 if (ifa == NULL)
251 panic("ifafree: null ifa");
252 if (ifa->ifa_refcnt != 0)
253 panic("ifafree: ifa_refcnt != 0 (%d)", ifa->ifa_refcnt);
254 #endif
255 #ifdef IFAREF_DEBUG
256 printf("ifafree: freeing ifaddr %p\n", ifa);
257 #endif
258 free(ifa, M_IFADDR);
259 }
260
261 /*
262 * Force a routing table entry to the specified
263 * destination to go through the given gateway.
264 * Normally called as a result of a routing redirect
265 * message from the network layer.
266 *
267 * N.B.: must be called at splsoftnet
268 */
269 void
270 rtredirect(const struct sockaddr *dst, const struct sockaddr *gateway,
271 const struct sockaddr *netmask, int flags, const struct sockaddr *src,
272 struct rtentry **rtp)
273 {
274 struct rtentry *rt;
275 int error = 0;
276 u_quad_t *stat = NULL;
277 struct rt_addrinfo info;
278 struct ifaddr *ifa;
279
280 /* verify the gateway is directly reachable */
281 if ((ifa = ifa_ifwithnet(gateway)) == NULL) {
282 error = ENETUNREACH;
283 goto out;
284 }
285 rt = rtalloc1(dst, 0);
286 /*
287 * If the redirect isn't from our current router for this dst,
288 * it's either old or wrong. If it redirects us to ourselves,
289 * we have a routing loop, perhaps as a result of an interface
290 * going down recently.
291 */
292 #define equal(a1, a2) \
293 ((a1)->sa_len == (a2)->sa_len && \
294 memcmp((a1), (a2), (a1)->sa_len) == 0)
295 if (!(flags & RTF_DONE) && rt &&
296 (!equal(src, rt->rt_gateway) || rt->rt_ifa != ifa))
297 error = EINVAL;
298 else if (ifa_ifwithaddr(gateway))
299 error = EHOSTUNREACH;
300 if (error)
301 goto done;
302 /*
303 * Create a new entry if we just got back a wildcard entry
304 * or the lookup failed. This is necessary for hosts
305 * which use routing redirects generated by smart gateways
306 * to dynamically build the routing tables.
307 */
308 if ((rt == NULL) || (rt_mask(rt) && rt_mask(rt)->sa_len < 2))
309 goto create;
310 /*
311 * Don't listen to the redirect if it's
312 * for a route to an interface.
313 */
314 if (rt->rt_flags & RTF_GATEWAY) {
315 if (((rt->rt_flags & RTF_HOST) == 0) && (flags & RTF_HOST)) {
316 /*
317 * Changing from route to net => route to host.
318 * Create new route, rather than smashing route to net.
319 */
320 create:
321 if (rt)
322 rtfree(rt);
323 flags |= RTF_GATEWAY | RTF_DYNAMIC;
324 info.rti_info[RTAX_DST] = dst;
325 info.rti_info[RTAX_GATEWAY] = gateway;
326 info.rti_info[RTAX_NETMASK] = netmask;
327 info.rti_ifa = ifa;
328 info.rti_flags = flags;
329 rt = NULL;
330 error = rtrequest1(RTM_ADD, &info, &rt);
331 if (rt != NULL)
332 flags = rt->rt_flags;
333 stat = &rtstat.rts_dynamic;
334 } else {
335 /*
336 * Smash the current notion of the gateway to
337 * this destination. Should check about netmask!!!
338 */
339 rt->rt_flags |= RTF_MODIFIED;
340 flags |= RTF_MODIFIED;
341 stat = &rtstat.rts_newgateway;
342 rt_setgate(rt, rt_key(rt), gateway);
343 }
344 } else
345 error = EHOSTUNREACH;
346 done:
347 if (rt) {
348 if (rtp && !error)
349 *rtp = rt;
350 else
351 rtfree(rt);
352 }
353 out:
354 if (error)
355 rtstat.rts_badredirect++;
356 else if (stat != NULL)
357 (*stat)++;
358 memset((caddr_t)&info, 0, sizeof(info));
359 info.rti_info[RTAX_DST] = dst;
360 info.rti_info[RTAX_GATEWAY] = gateway;
361 info.rti_info[RTAX_NETMASK] = netmask;
362 info.rti_info[RTAX_AUTHOR] = src;
363 rt_missmsg(RTM_REDIRECT, &info, flags, error);
364 }
365
366 /*
367 * Delete a route and generate a message
368 */
369 static int
370 rtdeletemsg(struct rtentry *rt)
371 {
372 int error;
373 struct rt_addrinfo info;
374
375 /*
376 * Request the new route so that the entry is not actually
377 * deleted. That will allow the information being reported to
378 * be accurate (and consistent with route_output()).
379 */
380 memset((caddr_t)&info, 0, sizeof(info));
381 info.rti_info[RTAX_DST] = rt_key(rt);
382 info.rti_info[RTAX_NETMASK] = rt_mask(rt);
383 info.rti_info[RTAX_GATEWAY] = rt->rt_gateway;
384 info.rti_flags = rt->rt_flags;
385 error = rtrequest1(RTM_DELETE, &info, &rt);
386
387 rt_missmsg(RTM_DELETE, &info, info.rti_flags, error);
388
389 /* Adjust the refcount */
390 if (error == 0 && rt->rt_refcnt <= 0) {
391 rt->rt_refcnt++;
392 rtfree(rt);
393 }
394 return (error);
395 }
396
397 static int
398 rtflushclone1(struct radix_node *rn, void *arg)
399 {
400 struct rtentry *rt, *parent;
401
402 rt = (struct rtentry *)rn;
403 parent = (struct rtentry *)arg;
404 if ((rt->rt_flags & RTF_CLONED) != 0 && rt->rt_parent == parent)
405 rtdeletemsg(rt);
406 return 0;
407 }
408
409 static void
410 rtflushclone(struct radix_node_head *rnh, struct rtentry *parent)
411 {
412
413 #ifdef DIAGNOSTIC
414 if (!parent || (parent->rt_flags & RTF_CLONING) == 0)
415 panic("rtflushclone: called with a non-cloning route");
416 if (!rnh->rnh_walktree)
417 panic("rtflushclone: no rnh_walktree");
418 #endif
419 rnh->rnh_walktree(rnh, rtflushclone1, (void *)parent);
420 }
421
422 /*
423 * Routing table ioctl interface.
424 */
425 int
426 rtioctl(u_long req, caddr_t data, struct lwp *l)
427 {
428 return (EOPNOTSUPP);
429 }
430
431 struct ifaddr *
432 ifa_ifwithroute(int flags, const struct sockaddr *dst,
433 const struct sockaddr *gateway)
434 {
435 struct ifaddr *ifa;
436 if ((flags & RTF_GATEWAY) == 0) {
437 /*
438 * If we are adding a route to an interface,
439 * and the interface is a pt to pt link
440 * we should search for the destination
441 * as our clue to the interface. Otherwise
442 * we can use the local address.
443 */
444 ifa = NULL;
445 if (flags & RTF_HOST)
446 ifa = ifa_ifwithdstaddr(dst);
447 if (ifa == NULL)
448 ifa = ifa_ifwithaddr(gateway);
449 } else {
450 /*
451 * If we are adding a route to a remote net
452 * or host, the gateway may still be on the
453 * other end of a pt to pt link.
454 */
455 ifa = ifa_ifwithdstaddr(gateway);
456 }
457 if (ifa == NULL)
458 ifa = ifa_ifwithnet(gateway);
459 if (ifa == NULL) {
460 struct rtentry *rt = rtalloc1(dst, 0);
461 if (rt == NULL)
462 return NULL;
463 rt->rt_refcnt--;
464 if ((ifa = rt->rt_ifa) == NULL)
465 return NULL;
466 }
467 if (ifa->ifa_addr->sa_family != dst->sa_family) {
468 struct ifaddr *oifa = ifa;
469 ifa = ifaof_ifpforaddr(dst, ifa->ifa_ifp);
470 if (ifa == 0)
471 ifa = oifa;
472 }
473 return (ifa);
474 }
475
476 #define ROUNDUP(a) (a>0 ? (1 + (((a) - 1) | (sizeof(long) - 1))) : sizeof(long))
477
478 int
479 rtrequest(int req, const struct sockaddr *dst, const struct sockaddr *gateway,
480 const struct sockaddr *netmask, int flags, struct rtentry **ret_nrt)
481 {
482 struct rt_addrinfo info;
483
484 memset(&info, 0, sizeof(info));
485 info.rti_flags = flags;
486 info.rti_info[RTAX_DST] = dst;
487 info.rti_info[RTAX_GATEWAY] = gateway;
488 info.rti_info[RTAX_NETMASK] = netmask;
489 return rtrequest1(req, &info, ret_nrt);
490 }
491
492 int
493 rt_getifa(struct rt_addrinfo *info)
494 {
495 struct ifaddr *ifa;
496 int error = 0;
497 const struct sockaddr *dst = info->rti_info[RTAX_DST];
498 const struct sockaddr *gateway = info->rti_info[RTAX_GATEWAY];
499 const struct sockaddr *ifaaddr = info->rti_info[RTAX_IFA];
500 const struct sockaddr *ifpaddr = info->rti_info[RTAX_IFP];
501 int flags = info->rti_flags;
502
503 /*
504 * ifp may be specified by sockaddr_dl when protocol address
505 * is ambiguous
506 */
507 if (info->rti_ifp == NULL && ifpaddr != NULL
508 && ifpaddr->sa_family == AF_LINK &&
509 (ifa = ifa_ifwithnet((const struct sockaddr *)ifpaddr)) != NULL)
510 info->rti_ifp = ifa->ifa_ifp;
511 if (info->rti_ifa == NULL && ifaaddr != NULL)
512 info->rti_ifa = ifa_ifwithaddr(ifaaddr);
513 if (info->rti_ifa == NULL) {
514 const struct sockaddr *sa;
515
516 sa = ifaaddr != NULL ? ifaaddr :
517 (gateway != NULL ? gateway : dst);
518 if (sa != NULL && info->rti_ifp != NULL)
519 info->rti_ifa = ifaof_ifpforaddr(sa, info->rti_ifp);
520 else if (dst != NULL && gateway != NULL)
521 info->rti_ifa = ifa_ifwithroute(flags, dst, gateway);
522 else if (sa != NULL)
523 info->rti_ifa = ifa_ifwithroute(flags, sa, sa);
524 }
525 if ((ifa = info->rti_ifa) != NULL) {
526 if (info->rti_ifp == NULL)
527 info->rti_ifp = ifa->ifa_ifp;
528 } else
529 error = ENETUNREACH;
530 return (error);
531 }
532
533 int
534 rtrequest1(int req, struct rt_addrinfo *info, struct rtentry **ret_nrt)
535 {
536 int s = splsoftnet();
537 int error = 0;
538 struct rtentry *rt, *crt;
539 struct radix_node *rn;
540 struct radix_node_head *rnh;
541 struct ifaddr *ifa;
542 struct sockaddr *ndst;
543 struct sockaddr_storage deldst;
544 const struct sockaddr *dst = info->rti_info[RTAX_DST];
545 const struct sockaddr *gateway = info->rti_info[RTAX_GATEWAY];
546 const struct sockaddr *netmask = info->rti_info[RTAX_NETMASK];
547 int flags = info->rti_flags;
548 #define senderr(x) { error = x ; goto bad; }
549
550 if ((rnh = rt_tables[dst->sa_family]) == NULL)
551 senderr(ESRCH);
552 if (flags & RTF_HOST)
553 netmask = NULL;
554 switch (req) {
555 case RTM_DELETE:
556 if (netmask) {
557 rt_maskedcopy(dst, (struct sockaddr *)&deldst, netmask);
558 dst = (struct sockaddr *)&deldst;
559 }
560 if ((rn = rnh->rnh_lookup(dst, netmask, rnh)) == NULL)
561 senderr(ESRCH);
562 rt = (struct rtentry *)rn;
563 if ((rt->rt_flags & RTF_CLONING) != 0) {
564 /* clean up any cloned children */
565 rtflushclone(rnh, rt);
566 }
567 if ((rn = rnh->rnh_deladdr(dst, netmask, rnh)) == NULL)
568 senderr(ESRCH);
569 if (rn->rn_flags & (RNF_ACTIVE | RNF_ROOT))
570 panic ("rtrequest delete");
571 rt = (struct rtentry *)rn;
572 if (rt->rt_gwroute) {
573 RTFREE(rt->rt_gwroute);
574 rt->rt_gwroute = NULL;
575 }
576 if (rt->rt_parent) {
577 rt->rt_parent->rt_refcnt--;
578 rt->rt_parent = NULL;
579 }
580 rt->rt_flags &= ~RTF_UP;
581 if ((ifa = rt->rt_ifa) && ifa->ifa_rtrequest)
582 ifa->ifa_rtrequest(RTM_DELETE, rt, info);
583 rttrash++;
584 if (ret_nrt)
585 *ret_nrt = rt;
586 else if (rt->rt_refcnt <= 0) {
587 rt->rt_refcnt++;
588 rtfree(rt);
589 }
590 break;
591
592 case RTM_RESOLVE:
593 if (ret_nrt == NULL || (rt = *ret_nrt) == NULL)
594 senderr(EINVAL);
595 if ((rt->rt_flags & RTF_CLONING) == 0)
596 senderr(EINVAL);
597 ifa = rt->rt_ifa;
598 flags = rt->rt_flags & ~(RTF_CLONING | RTF_STATIC);
599 flags |= RTF_CLONED;
600 gateway = rt->rt_gateway;
601 if ((netmask = rt->rt_genmask) == NULL)
602 flags |= RTF_HOST;
603 goto makeroute;
604
605 case RTM_ADD:
606 if (info->rti_ifa == NULL && (error = rt_getifa(info)))
607 senderr(error);
608 ifa = info->rti_ifa;
609 makeroute:
610 /* Already at splsoftnet() so pool_get/pool_put are safe */
611 rt = pool_get(&rtentry_pool, PR_NOWAIT);
612 if (rt == NULL)
613 senderr(ENOBUFS);
614 Bzero(rt, sizeof(*rt));
615 rt->rt_flags = RTF_UP | flags;
616 LIST_INIT(&rt->rt_timer);
617 if (rt_setgate(rt, dst, gateway)) {
618 pool_put(&rtentry_pool, rt);
619 senderr(ENOBUFS);
620 }
621 ndst = rt_key(rt);
622 if (netmask) {
623 rt_maskedcopy(dst, ndst, netmask);
624 } else
625 Bcopy(dst, ndst, dst->sa_len);
626 IFAREF(ifa);
627 rt->rt_ifa = ifa;
628 rt->rt_ifp = ifa->ifa_ifp;
629 if (req == RTM_RESOLVE) {
630 rt->rt_rmx = (*ret_nrt)->rt_rmx; /* copy metrics */
631 rt->rt_parent = *ret_nrt;
632 rt->rt_parent->rt_refcnt++;
633 }
634 rn = rnh->rnh_addaddr(ndst, netmask, rnh, rt->rt_nodes);
635 if (rn == NULL && (crt = rtalloc1(ndst, 0)) != NULL) {
636 /* overwrite cloned route */
637 if ((crt->rt_flags & RTF_CLONED) != 0) {
638 rtdeletemsg(crt);
639 rn = rnh->rnh_addaddr(ndst,
640 netmask, rnh, rt->rt_nodes);
641 }
642 RTFREE(crt);
643 }
644 if (rn == NULL) {
645 IFAFREE(ifa);
646 if ((rt->rt_flags & RTF_CLONED) != 0 && rt->rt_parent)
647 rtfree(rt->rt_parent);
648 if (rt->rt_gwroute)
649 rtfree(rt->rt_gwroute);
650 Free(rt_key(rt));
651 pool_put(&rtentry_pool, rt);
652 senderr(EEXIST);
653 }
654 if (ifa->ifa_rtrequest)
655 ifa->ifa_rtrequest(req, rt, info);
656 if (ret_nrt) {
657 *ret_nrt = rt;
658 rt->rt_refcnt++;
659 }
660 if ((rt->rt_flags & RTF_CLONING) != 0) {
661 /* clean up any cloned children */
662 rtflushclone(rnh, rt);
663 }
664 break;
665 }
666 bad:
667 splx(s);
668 return (error);
669 }
670
671 int
672 rt_setgate( struct rtentry *rt0, const struct sockaddr *dst,
673 const struct sockaddr *gate)
674 {
675 char *new, *old;
676 u_int dlen = ROUNDUP(dst->sa_len), glen = ROUNDUP(gate->sa_len);
677 struct rtentry *rt = rt0;
678
679 if (rt->rt_gateway == NULL || glen > ROUNDUP(rt->rt_gateway->sa_len)) {
680 old = (caddr_t)rt_key(rt);
681 R_Malloc(new, caddr_t, dlen + glen);
682 if (new == NULL)
683 return 1;
684 Bzero(new, dlen + glen);
685 rt->rt_nodes->rn_key = new;
686 } else {
687 new = __UNCONST(rt->rt_nodes->rn_key); /*XXXUNCONST*/
688 old = NULL;
689 }
690 Bcopy(gate, (rt->rt_gateway = (struct sockaddr *)(new + dlen)), glen);
691 if (old) {
692 Bcopy(dst, new, dlen);
693 Free(old);
694 }
695 if (rt->rt_gwroute) {
696 RTFREE(rt->rt_gwroute);
697 rt->rt_gwroute = NULL;
698 }
699 if (rt->rt_flags & RTF_GATEWAY) {
700 rt->rt_gwroute = rtalloc1(gate, 1);
701 /*
702 * If we switched gateways, grab the MTU from the new
703 * gateway route if the current MTU, if the current MTU is
704 * greater than the MTU of gateway.
705 * Note that, if the MTU of gateway is 0, we will reset the
706 * MTU of the route to run PMTUD again from scratch. XXX
707 */
708 if (rt->rt_gwroute
709 && !(rt->rt_rmx.rmx_locks & RTV_MTU)
710 && rt->rt_rmx.rmx_mtu
711 && rt->rt_rmx.rmx_mtu > rt->rt_gwroute->rt_rmx.rmx_mtu) {
712 rt->rt_rmx.rmx_mtu = rt->rt_gwroute->rt_rmx.rmx_mtu;
713 }
714 }
715 return 0;
716 }
717
718 void
719 rt_maskedcopy(const struct sockaddr *src, struct sockaddr *dst,
720 const struct sockaddr *netmask)
721 {
722 const u_char *cp1 = (const u_char *)src;
723 u_char *cp2 = (u_char *)dst;
724 const u_char *cp3 = (const u_char *)netmask;
725 u_char *cplim = cp2 + *cp3;
726 u_char *cplim2 = cp2 + *cp1;
727
728 *cp2++ = *cp1++; *cp2++ = *cp1++; /* copies sa_len & sa_family */
729 cp3 += 2;
730 if (cplim > cplim2)
731 cplim = cplim2;
732 while (cp2 < cplim)
733 *cp2++ = *cp1++ & *cp3++;
734 if (cp2 < cplim2)
735 memset(cp2, 0, (unsigned)(cplim2 - cp2));
736 }
737
738 /*
739 * Set up or tear down a routing table entry, normally
740 * for an interface.
741 */
742 int
743 rtinit(struct ifaddr *ifa, int cmd, int flags)
744 {
745 struct rtentry *rt;
746 struct sockaddr *dst, *odst;
747 struct sockaddr_storage deldst;
748 struct rtentry *nrt = NULL;
749 int error;
750 struct rt_addrinfo info;
751
752 dst = flags & RTF_HOST ? ifa->ifa_dstaddr : ifa->ifa_addr;
753 if (cmd == RTM_DELETE) {
754 if ((flags & RTF_HOST) == 0 && ifa->ifa_netmask) {
755 /* Delete subnet route for this interface */
756 odst = dst;
757 dst = (struct sockaddr *)&deldst;
758 rt_maskedcopy(odst, dst, ifa->ifa_netmask);
759 }
760 if ((rt = rtalloc1(dst, 0)) != NULL) {
761 rt->rt_refcnt--;
762 if (rt->rt_ifa != ifa)
763 return (flags & RTF_HOST ? EHOSTUNREACH
764 : ENETUNREACH);
765 }
766 }
767 memset(&info, 0, sizeof(info));
768 info.rti_ifa = ifa;
769 info.rti_flags = flags | ifa->ifa_flags;
770 info.rti_info[RTAX_DST] = dst;
771 info.rti_info[RTAX_GATEWAY] = ifa->ifa_addr;
772 /*
773 * XXX here, it seems that we are assuming that ifa_netmask is NULL
774 * for RTF_HOST. bsdi4 passes NULL explicitly (via intermediate
775 * variable) when RTF_HOST is 1. still not sure if i can safely
776 * change it to meet bsdi4 behavior.
777 */
778 info.rti_info[RTAX_NETMASK] = ifa->ifa_netmask;
779 error = rtrequest1(cmd, &info, &nrt);
780 if (cmd == RTM_DELETE && error == 0 && (rt = nrt)) {
781 rt_newaddrmsg(cmd, ifa, error, nrt);
782 if (rt->rt_refcnt <= 0) {
783 rt->rt_refcnt++;
784 rtfree(rt);
785 }
786 }
787 if (cmd == RTM_ADD && error == 0 && (rt = nrt)) {
788 rt->rt_refcnt--;
789 if (rt->rt_ifa != ifa) {
790 printf("rtinit: wrong ifa (%p) was (%p)\n", ifa,
791 rt->rt_ifa);
792 if (rt->rt_ifa->ifa_rtrequest)
793 rt->rt_ifa->ifa_rtrequest(RTM_DELETE, rt, NULL);
794 IFAFREE(rt->rt_ifa);
795 rt->rt_ifa = ifa;
796 rt->rt_ifp = ifa->ifa_ifp;
797 IFAREF(ifa);
798 if (ifa->ifa_rtrequest)
799 ifa->ifa_rtrequest(RTM_ADD, rt, NULL);
800 }
801 rt_newaddrmsg(cmd, ifa, error, nrt);
802 }
803 return (error);
804 }
805
806 /*
807 * Route timer routines. These routes allow functions to be called
808 * for various routes at any time. This is useful in supporting
809 * path MTU discovery and redirect route deletion.
810 *
811 * This is similar to some BSDI internal functions, but it provides
812 * for multiple queues for efficiency's sake...
813 */
814
815 LIST_HEAD(, rttimer_queue) rttimer_queue_head;
816 static int rt_init_done = 0;
817
818 #define RTTIMER_CALLOUT(r) do { \
819 if (r->rtt_func != NULL) { \
820 (*r->rtt_func)(r->rtt_rt, r); \
821 } else { \
822 rtrequest((int) RTM_DELETE, \
823 (struct sockaddr *)rt_key(r->rtt_rt), \
824 0, 0, 0, 0); \
825 } \
826 } while (/*CONSTCOND*/0)
827
828 /*
829 * Some subtle order problems with domain initialization mean that
830 * we cannot count on this being run from rt_init before various
831 * protocol initializations are done. Therefore, we make sure
832 * that this is run when the first queue is added...
833 */
834
835 void
836 rt_timer_init(void)
837 {
838 assert(rt_init_done == 0);
839
840 LIST_INIT(&rttimer_queue_head);
841 callout_init(&rt_timer_ch);
842 callout_reset(&rt_timer_ch, hz, rt_timer_timer, NULL);
843 rt_init_done = 1;
844 }
845
846 struct rttimer_queue *
847 rt_timer_queue_create(u_int timeout)
848 {
849 struct rttimer_queue *rtq;
850
851 if (rt_init_done == 0)
852 rt_timer_init();
853
854 R_Malloc(rtq, struct rttimer_queue *, sizeof *rtq);
855 if (rtq == NULL)
856 return (NULL);
857 Bzero(rtq, sizeof *rtq);
858
859 rtq->rtq_timeout = timeout;
860 rtq->rtq_count = 0;
861 TAILQ_INIT(&rtq->rtq_head);
862 LIST_INSERT_HEAD(&rttimer_queue_head, rtq, rtq_link);
863
864 return (rtq);
865 }
866
867 void
868 rt_timer_queue_change(struct rttimer_queue *rtq, long timeout)
869 {
870
871 rtq->rtq_timeout = timeout;
872 }
873
874 void
875 rt_timer_queue_remove_all(struct rttimer_queue *rtq, int destroy)
876 {
877 struct rttimer *r;
878
879 while ((r = TAILQ_FIRST(&rtq->rtq_head)) != NULL) {
880 LIST_REMOVE(r, rtt_link);
881 TAILQ_REMOVE(&rtq->rtq_head, r, rtt_next);
882 if (destroy)
883 RTTIMER_CALLOUT(r);
884 /* we are already at splsoftnet */
885 pool_put(&rttimer_pool, r);
886 if (rtq->rtq_count > 0)
887 rtq->rtq_count--;
888 else
889 printf("rt_timer_queue_remove_all: "
890 "rtq_count reached 0\n");
891 }
892 }
893
894 void
895 rt_timer_queue_destroy(struct rttimer_queue *rtq, int destroy)
896 {
897
898 rt_timer_queue_remove_all(rtq, destroy);
899
900 LIST_REMOVE(rtq, rtq_link);
901
902 /*
903 * Caller is responsible for freeing the rttimer_queue structure.
904 */
905 }
906
907 unsigned long
908 rt_timer_count(struct rttimer_queue *rtq)
909 {
910 return rtq->rtq_count;
911 }
912
913 void
914 rt_timer_remove_all(struct rtentry *rt, int destroy)
915 {
916 struct rttimer *r;
917
918 while ((r = LIST_FIRST(&rt->rt_timer)) != NULL) {
919 LIST_REMOVE(r, rtt_link);
920 TAILQ_REMOVE(&r->rtt_queue->rtq_head, r, rtt_next);
921 if (destroy)
922 RTTIMER_CALLOUT(r);
923 if (r->rtt_queue->rtq_count > 0)
924 r->rtt_queue->rtq_count--;
925 else
926 printf("rt_timer_remove_all: rtq_count reached 0\n");
927 /* we are already at splsoftnet */
928 pool_put(&rttimer_pool, r);
929 }
930 }
931
932 int
933 rt_timer_add(struct rtentry *rt,
934 void (*func)(struct rtentry *, struct rttimer *),
935 struct rttimer_queue *queue)
936 {
937 struct rttimer *r;
938 int s;
939
940 /*
941 * If there's already a timer with this action, destroy it before
942 * we add a new one.
943 */
944 for (r = LIST_FIRST(&rt->rt_timer); r != NULL;
945 r = LIST_NEXT(r, rtt_link)) {
946 if (r->rtt_func == func) {
947 LIST_REMOVE(r, rtt_link);
948 TAILQ_REMOVE(&r->rtt_queue->rtq_head, r, rtt_next);
949 if (r->rtt_queue->rtq_count > 0)
950 r->rtt_queue->rtq_count--;
951 else
952 printf("rt_timer_add: rtq_count reached 0\n");
953 s = splsoftnet();
954 pool_put(&rttimer_pool, r);
955 splx(s);
956 break; /* only one per list, so we can quit... */
957 }
958 }
959
960 s = splsoftnet();
961 r = pool_get(&rttimer_pool, PR_NOWAIT);
962 splx(s);
963 if (r == NULL)
964 return (ENOBUFS);
965 Bzero(r, sizeof(*r));
966
967 r->rtt_rt = rt;
968 r->rtt_time = time_uptime;
969 r->rtt_func = func;
970 r->rtt_queue = queue;
971 LIST_INSERT_HEAD(&rt->rt_timer, r, rtt_link);
972 TAILQ_INSERT_TAIL(&queue->rtq_head, r, rtt_next);
973 r->rtt_queue->rtq_count++;
974
975 return (0);
976 }
977
978 /* ARGSUSED */
979 void
980 rt_timer_timer(void *arg)
981 {
982 struct rttimer_queue *rtq;
983 struct rttimer *r;
984 int s;
985
986 s = splsoftnet();
987 for (rtq = LIST_FIRST(&rttimer_queue_head); rtq != NULL;
988 rtq = LIST_NEXT(rtq, rtq_link)) {
989 while ((r = TAILQ_FIRST(&rtq->rtq_head)) != NULL &&
990 (r->rtt_time + rtq->rtq_timeout) < time_uptime) {
991 LIST_REMOVE(r, rtt_link);
992 TAILQ_REMOVE(&rtq->rtq_head, r, rtt_next);
993 RTTIMER_CALLOUT(r);
994 pool_put(&rttimer_pool, r);
995 if (rtq->rtq_count > 0)
996 rtq->rtq_count--;
997 else
998 printf("rt_timer_timer: rtq_count reached 0\n");
999 }
1000 }
1001 splx(s);
1002
1003 callout_reset(&rt_timer_ch, hz, rt_timer_timer, NULL);
1004 }
1005