route.c revision 1.214 1 /* $NetBSD: route.c,v 1.214 2018/10/30 05:30:31 ozaki-r Exp $ */
2
3 /*-
4 * Copyright (c) 1998, 2008 The NetBSD Foundation, Inc.
5 * All rights reserved.
6 *
7 * This code is derived from software contributed to The NetBSD Foundation
8 * by Kevin M. Lahey of the Numerical Aerospace Simulation Facility,
9 * NASA Ames Research Center.
10 *
11 * Redistribution and use in source and binary forms, with or without
12 * modification, are permitted provided that the following conditions
13 * are met:
14 * 1. Redistributions of source code must retain the above copyright
15 * notice, this list of conditions and the following disclaimer.
16 * 2. Redistributions in binary form must reproduce the above copyright
17 * notice, this list of conditions and the following disclaimer in the
18 * documentation and/or other materials provided with the distribution.
19 *
20 * THIS SOFTWARE IS PROVIDED BY THE NETBSD FOUNDATION, INC. AND CONTRIBUTORS
21 * ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED
22 * TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
23 * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE FOUNDATION OR CONTRIBUTORS
24 * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
25 * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
26 * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
27 * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
28 * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
29 * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
30 * POSSIBILITY OF SUCH DAMAGE.
31 */
32
33 /*
34 * Copyright (C) 1995, 1996, 1997, and 1998 WIDE Project.
35 * All rights reserved.
36 *
37 * Redistribution and use in source and binary forms, with or without
38 * modification, are permitted provided that the following conditions
39 * are met:
40 * 1. Redistributions of source code must retain the above copyright
41 * notice, this list of conditions and the following disclaimer.
42 * 2. Redistributions in binary form must reproduce the above copyright
43 * notice, this list of conditions and the following disclaimer in the
44 * documentation and/or other materials provided with the distribution.
45 * 3. Neither the name of the project nor the names of its contributors
46 * may be used to endorse or promote products derived from this software
47 * without specific prior written permission.
48 *
49 * THIS SOFTWARE IS PROVIDED BY THE PROJECT AND CONTRIBUTORS ``AS IS'' AND
50 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
51 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
52 * ARE DISCLAIMED. IN NO EVENT SHALL THE PROJECT OR CONTRIBUTORS BE LIABLE
53 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
54 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
55 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
56 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
57 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
58 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
59 * SUCH DAMAGE.
60 */
61
62 /*
63 * Copyright (c) 1980, 1986, 1991, 1993
64 * The Regents of the University of California. All rights reserved.
65 *
66 * Redistribution and use in source and binary forms, with or without
67 * modification, are permitted provided that the following conditions
68 * are met:
69 * 1. Redistributions of source code must retain the above copyright
70 * notice, this list of conditions and the following disclaimer.
71 * 2. Redistributions in binary form must reproduce the above copyright
72 * notice, this list of conditions and the following disclaimer in the
73 * documentation and/or other materials provided with the distribution.
74 * 3. Neither the name of the University nor the names of its contributors
75 * may be used to endorse or promote products derived from this software
76 * without specific prior written permission.
77 *
78 * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND
79 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
80 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
81 * ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE
82 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
83 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
84 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
85 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
86 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
87 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
88 * SUCH DAMAGE.
89 *
90 * @(#)route.c 8.3 (Berkeley) 1/9/95
91 */
92
93 #ifdef _KERNEL_OPT
94 #include "opt_inet.h"
95 #include "opt_route.h"
96 #include "opt_net_mpsafe.h"
97 #endif
98
99 #include <sys/cdefs.h>
100 __KERNEL_RCSID(0, "$NetBSD: route.c,v 1.214 2018/10/30 05:30:31 ozaki-r Exp $");
101
102 #include <sys/param.h>
103 #ifdef RTFLUSH_DEBUG
104 #include <sys/sysctl.h>
105 #endif
106 #include <sys/systm.h>
107 #include <sys/callout.h>
108 #include <sys/proc.h>
109 #include <sys/mbuf.h>
110 #include <sys/socket.h>
111 #include <sys/socketvar.h>
112 #include <sys/domain.h>
113 #include <sys/kernel.h>
114 #include <sys/ioctl.h>
115 #include <sys/pool.h>
116 #include <sys/kauth.h>
117 #include <sys/workqueue.h>
118 #include <sys/syslog.h>
119 #include <sys/rwlock.h>
120 #include <sys/mutex.h>
121 #include <sys/cpu.h>
122
123 #include <net/if.h>
124 #include <net/if_dl.h>
125 #include <net/route.h>
126 #if defined(INET) || defined(INET6)
127 #include <net/if_llatbl.h>
128 #endif
129
130 #include <netinet/in.h>
131 #include <netinet/in_var.h>
132
133 #define PRESERVED_RTF (RTF_UP | RTF_GATEWAY | RTF_HOST | RTF_DONE | RTF_MASK)
134
135 #ifdef RTFLUSH_DEBUG
136 #define rtcache_debug() __predict_false(_rtcache_debug)
137 #else /* RTFLUSH_DEBUG */
138 #define rtcache_debug() 0
139 #endif /* RTFLUSH_DEBUG */
140
141 #ifdef RT_DEBUG
142 #define RT_REFCNT_TRACE(rt) printf("%s:%d: rt=%p refcnt=%d\n", \
143 __func__, __LINE__, (rt), (rt)->rt_refcnt)
144 #else
145 #define RT_REFCNT_TRACE(rt) do {} while (0)
146 #endif
147
148 #ifdef RT_DEBUG
149 #define dlog(level, fmt, args...) log(level, fmt, ##args)
150 #else
151 #define dlog(level, fmt, args...) do {} while (0)
152 #endif
153
154 struct rtstat rtstat;
155
156 static int rttrash; /* routes not in table but not freed */
157
158 static struct pool rtentry_pool;
159 static struct pool rttimer_pool;
160
161 static struct callout rt_timer_ch; /* callout for rt_timer_timer() */
162 static struct workqueue *rt_timer_wq;
163 static struct work rt_timer_wk;
164
165 static void rt_timer_init(void);
166 static void rt_timer_queue_remove_all(struct rttimer_queue *);
167 static void rt_timer_remove_all(struct rtentry *);
168 static void rt_timer_timer(void *);
169
170 /*
171 * Locking notes:
172 * - The routing table is protected by a global rwlock
173 * - API: RT_RLOCK and friends
174 * - rtcaches are NOT protected by the framework
175 * - Callers must guarantee a rtcache isn't accessed simultaneously
176 * - How the constraint is guranteed in the wild
177 * - Protect a rtcache by a mutex (e.g., inp_route)
178 * - Make rtcache per-CPU and allow only accesses from softint
179 * (e.g., ipforward_rt_percpu)
180 * - References to a rtentry is managed by reference counting and psref
181 * - Reference couting is used for temporal reference when a rtentry
182 * is fetched from the routing table
183 * - psref is used for temporal reference when a rtentry is fetched
184 * from a rtcache
185 * - struct route (rtcache) has struct psref, so we cannot obtain
186 * a reference twice on the same struct route
187 * - Befere destroying or updating a rtentry, we have to wait for
188 * all references left (see below for details)
189 * - APIs
190 * - An obtained rtentry via rtalloc1 or rtrequest* must be
191 * unreferenced by rt_unref
192 * - An obtained rtentry via rtcache_* must be unreferenced by
193 * rtcache_unref
194 * - TODO: once we get a lockless routing table, we should use only
195 * psref for rtentries
196 * - rtentry destruction
197 * - A rtentry is destroyed (freed) only when we call rtrequest(RTM_DELETE)
198 * - If a caller of rtrequest grabs a reference of a rtentry, the caller
199 * has a responsibility to destroy the rtentry by itself by calling
200 * rt_free
201 * - If not, rtrequest itself does that
202 * - If rt_free is called in softint, the actual destruction routine is
203 * deferred to a workqueue
204 * - rtentry update
205 * - When updating a rtentry, RTF_UPDATING flag is set
206 * - If a rtentry is set RTF_UPDATING, fetching the rtentry from
207 * the routing table or a rtcache results in either of the following
208 * cases:
209 * - if the caller runs in softint, the caller fails to fetch
210 * - otherwise, the caller waits for the update completed and retries
211 * to fetch (probably succeed to fetch for the second time)
212 * - rtcache invalidation
213 * - There is a global generation counter that is incremented when
214 * any routes have been added or deleted
215 * - When a rtcache caches a rtentry into itself, it also stores
216 * a snapshot of the generation counter
217 * - If the snapshot equals to the global counter, the cache is valid,
218 * otherwise the cache is invalidated
219 */
220
221 /*
222 * Global lock for the routing table.
223 */
224 static krwlock_t rt_lock __cacheline_aligned;
225 #ifdef NET_MPSAFE
226 #define RT_RLOCK() rw_enter(&rt_lock, RW_READER)
227 #define RT_WLOCK() rw_enter(&rt_lock, RW_WRITER)
228 #define RT_UNLOCK() rw_exit(&rt_lock)
229 #define RT_WLOCKED() rw_write_held(&rt_lock)
230 #define RT_ASSERT_WLOCK() KASSERT(rw_write_held(&rt_lock))
231 #else
232 #define RT_RLOCK() do {} while (0)
233 #define RT_WLOCK() do {} while (0)
234 #define RT_UNLOCK() do {} while (0)
235 #define RT_WLOCKED() true
236 #define RT_ASSERT_WLOCK() do {} while (0)
237 #endif
238
239 static uint64_t rtcache_generation;
240
241 /*
242 * mutex and cv that are used to wait for references to a rtentry left
243 * before updating the rtentry.
244 */
245 static struct {
246 kmutex_t lock;
247 kcondvar_t cv;
248 bool ongoing;
249 const struct lwp *lwp;
250 } rt_update_global __cacheline_aligned;
251
252 /*
253 * A workqueue and stuff that are used to defer the destruction routine
254 * of rtentries.
255 */
256 static struct {
257 struct workqueue *wq;
258 struct work wk;
259 kmutex_t lock;
260 SLIST_HEAD(, rtentry) queue;
261 bool enqueued;
262 } rt_free_global __cacheline_aligned;
263
264 /* psref for rtentry */
265 static struct psref_class *rt_psref_class __read_mostly;
266
267 #ifdef RTFLUSH_DEBUG
268 static int _rtcache_debug = 0;
269 #endif /* RTFLUSH_DEBUG */
270
271 static kauth_listener_t route_listener;
272
273 static int rtdeletemsg(struct rtentry *);
274
275 static void rt_maskedcopy(const struct sockaddr *,
276 struct sockaddr *, const struct sockaddr *);
277
278 static void rtcache_invalidate(void);
279
280 static void rt_ref(struct rtentry *);
281
282 static struct rtentry *
283 rtalloc1_locked(const struct sockaddr *, int, bool, bool);
284
285 static struct ifaddr *rt_getifa(struct rt_addrinfo *, struct psref *);
286 static struct ifnet *rt_getifp(struct rt_addrinfo *, struct psref *);
287 static struct ifaddr *ifa_ifwithroute_psref(int, const struct sockaddr *,
288 const struct sockaddr *, struct psref *);
289
290 static void rtcache_ref(struct rtentry *, struct route *);
291
292 #ifdef NET_MPSAFE
293 static void rt_update_wait(void);
294 #endif
295
296 static bool rt_wait_ok(void);
297 static void rt_wait_refcnt(const char *, struct rtentry *, int);
298 static void rt_wait_psref(struct rtentry *);
299
300 #ifdef DDB
301 static void db_print_sa(const struct sockaddr *);
302 static void db_print_ifa(struct ifaddr *);
303 static int db_show_rtentry(struct rtentry *, void *);
304 #endif
305
306 #ifdef RTFLUSH_DEBUG
307 static void sysctl_net_rtcache_setup(struct sysctllog **);
308 static void
309 sysctl_net_rtcache_setup(struct sysctllog **clog)
310 {
311 const struct sysctlnode *rnode;
312
313 if (sysctl_createv(clog, 0, NULL, &rnode, CTLFLAG_PERMANENT,
314 CTLTYPE_NODE,
315 "rtcache", SYSCTL_DESCR("Route cache related settings"),
316 NULL, 0, NULL, 0, CTL_NET, CTL_CREATE, CTL_EOL) != 0)
317 return;
318 if (sysctl_createv(clog, 0, &rnode, &rnode,
319 CTLFLAG_PERMANENT|CTLFLAG_READWRITE, CTLTYPE_INT,
320 "debug", SYSCTL_DESCR("Debug route caches"),
321 NULL, 0, &_rtcache_debug, 0, CTL_CREATE, CTL_EOL) != 0)
322 return;
323 }
324 #endif /* RTFLUSH_DEBUG */
325
326 static inline void
327 rt_destroy(struct rtentry *rt)
328 {
329 if (rt->_rt_key != NULL)
330 sockaddr_free(rt->_rt_key);
331 if (rt->rt_gateway != NULL)
332 sockaddr_free(rt->rt_gateway);
333 if (rt_gettag(rt) != NULL)
334 sockaddr_free(rt_gettag(rt));
335 rt->_rt_key = rt->rt_gateway = rt->rt_tag = NULL;
336 }
337
338 static inline const struct sockaddr *
339 rt_setkey(struct rtentry *rt, const struct sockaddr *key, int flags)
340 {
341 if (rt->_rt_key == key)
342 goto out;
343
344 if (rt->_rt_key != NULL)
345 sockaddr_free(rt->_rt_key);
346 rt->_rt_key = sockaddr_dup(key, flags);
347 out:
348 rt->rt_nodes->rn_key = (const char *)rt->_rt_key;
349 return rt->_rt_key;
350 }
351
352 struct ifaddr *
353 rt_get_ifa(struct rtentry *rt)
354 {
355 struct ifaddr *ifa;
356
357 if ((ifa = rt->rt_ifa) == NULL)
358 return ifa;
359 else if (ifa->ifa_getifa == NULL)
360 return ifa;
361 #if 0
362 else if (ifa->ifa_seqno != NULL && *ifa->ifa_seqno == rt->rt_ifa_seqno)
363 return ifa;
364 #endif
365 else {
366 ifa = (*ifa->ifa_getifa)(ifa, rt_getkey(rt));
367 if (ifa == NULL)
368 return NULL;
369 rt_replace_ifa(rt, ifa);
370 return ifa;
371 }
372 }
373
374 static void
375 rt_set_ifa1(struct rtentry *rt, struct ifaddr *ifa)
376 {
377 rt->rt_ifa = ifa;
378 if (ifa->ifa_seqno != NULL)
379 rt->rt_ifa_seqno = *ifa->ifa_seqno;
380 }
381
382 /*
383 * Is this route the connected route for the ifa?
384 */
385 static int
386 rt_ifa_connected(const struct rtentry *rt, const struct ifaddr *ifa)
387 {
388 const struct sockaddr *key, *dst, *odst;
389 struct sockaddr_storage maskeddst;
390
391 key = rt_getkey(rt);
392 dst = rt->rt_flags & RTF_HOST ? ifa->ifa_dstaddr : ifa->ifa_addr;
393 if (dst == NULL ||
394 dst->sa_family != key->sa_family ||
395 dst->sa_len != key->sa_len)
396 return 0;
397 if ((rt->rt_flags & RTF_HOST) == 0 && ifa->ifa_netmask) {
398 odst = dst;
399 dst = (struct sockaddr *)&maskeddst;
400 rt_maskedcopy(odst, (struct sockaddr *)&maskeddst,
401 ifa->ifa_netmask);
402 }
403 return (memcmp(dst, key, dst->sa_len) == 0);
404 }
405
406 void
407 rt_replace_ifa(struct rtentry *rt, struct ifaddr *ifa)
408 {
409 struct ifaddr *old;
410
411 if (rt->rt_ifa == ifa)
412 return;
413
414 if (rt->rt_ifa &&
415 rt->rt_ifa != ifa &&
416 rt->rt_ifa->ifa_flags & IFA_ROUTE &&
417 rt_ifa_connected(rt, rt->rt_ifa))
418 {
419 RT_DPRINTF("rt->_rt_key = %p, ifa = %p, "
420 "replace deleted IFA_ROUTE\n",
421 (void *)rt->_rt_key, (void *)rt->rt_ifa);
422 rt->rt_ifa->ifa_flags &= ~IFA_ROUTE;
423 if (rt_ifa_connected(rt, ifa)) {
424 RT_DPRINTF("rt->_rt_key = %p, ifa = %p, "
425 "replace added IFA_ROUTE\n",
426 (void *)rt->_rt_key, (void *)ifa);
427 ifa->ifa_flags |= IFA_ROUTE;
428 }
429 }
430
431 ifaref(ifa);
432 old = rt->rt_ifa;
433 rt_set_ifa1(rt, ifa);
434 ifafree(old);
435 }
436
437 static void
438 rt_set_ifa(struct rtentry *rt, struct ifaddr *ifa)
439 {
440 ifaref(ifa);
441 rt_set_ifa1(rt, ifa);
442 }
443
444 static int
445 route_listener_cb(kauth_cred_t cred, kauth_action_t action, void *cookie,
446 void *arg0, void *arg1, void *arg2, void *arg3)
447 {
448 struct rt_msghdr *rtm;
449 int result;
450
451 result = KAUTH_RESULT_DEFER;
452 rtm = arg1;
453
454 if (action != KAUTH_NETWORK_ROUTE)
455 return result;
456
457 if (rtm->rtm_type == RTM_GET)
458 result = KAUTH_RESULT_ALLOW;
459
460 return result;
461 }
462
463 static void rt_free_work(struct work *, void *);
464
465 void
466 rt_init(void)
467 {
468 int error;
469
470 #ifdef RTFLUSH_DEBUG
471 sysctl_net_rtcache_setup(NULL);
472 #endif
473
474 mutex_init(&rt_free_global.lock, MUTEX_DEFAULT, IPL_SOFTNET);
475 SLIST_INIT(&rt_free_global.queue);
476 rt_free_global.enqueued = false;
477
478 rt_psref_class = psref_class_create("rtentry", IPL_SOFTNET);
479
480 error = workqueue_create(&rt_free_global.wq, "rt_free",
481 rt_free_work, NULL, PRI_SOFTNET, IPL_SOFTNET, WQ_MPSAFE);
482 if (error)
483 panic("%s: workqueue_create failed (%d)\n", __func__, error);
484
485 mutex_init(&rt_update_global.lock, MUTEX_DEFAULT, IPL_SOFTNET);
486 cv_init(&rt_update_global.cv, "rt_update");
487
488 pool_init(&rtentry_pool, sizeof(struct rtentry), 0, 0, 0, "rtentpl",
489 NULL, IPL_SOFTNET);
490 pool_init(&rttimer_pool, sizeof(struct rttimer), 0, 0, 0, "rttmrpl",
491 NULL, IPL_SOFTNET);
492
493 rn_init(); /* initialize all zeroes, all ones, mask table */
494 rtbl_init();
495
496 route_listener = kauth_listen_scope(KAUTH_SCOPE_NETWORK,
497 route_listener_cb, NULL);
498 }
499
500 static void
501 rtcache_invalidate(void)
502 {
503
504 RT_ASSERT_WLOCK();
505
506 if (rtcache_debug())
507 printf("%s: enter\n", __func__);
508
509 rtcache_generation++;
510 }
511
512 #ifdef RT_DEBUG
513 static void
514 dump_rt(const struct rtentry *rt)
515 {
516 char buf[512];
517
518 log(LOG_DEBUG, "rt: ");
519 log(LOG_DEBUG, "p=%p ", rt);
520 if (rt->_rt_key == NULL) {
521 log(LOG_DEBUG, "dst=(NULL) ");
522 } else {
523 sockaddr_format(rt->_rt_key, buf, sizeof(buf));
524 log(LOG_DEBUG, "dst=%s ", buf);
525 }
526 if (rt->rt_gateway == NULL) {
527 log(LOG_DEBUG, "gw=(NULL) ");
528 } else {
529 sockaddr_format(rt->_rt_key, buf, sizeof(buf));
530 log(LOG_DEBUG, "gw=%s ", buf);
531 }
532 log(LOG_DEBUG, "flags=%x ", rt->rt_flags);
533 if (rt->rt_ifp == NULL) {
534 log(LOG_DEBUG, "if=(NULL) ");
535 } else {
536 log(LOG_DEBUG, "if=%s ", rt->rt_ifp->if_xname);
537 }
538 log(LOG_DEBUG, "\n");
539 }
540 #endif /* RT_DEBUG */
541
542 /*
543 * Packet routing routines. If success, refcnt of a returned rtentry
544 * will be incremented. The caller has to rtfree it by itself.
545 */
546 struct rtentry *
547 rtalloc1_locked(const struct sockaddr *dst, int report, bool wait_ok,
548 bool wlock)
549 {
550 rtbl_t *rtbl;
551 struct rtentry *rt;
552 int s;
553
554 #ifdef NET_MPSAFE
555 retry:
556 #endif
557 s = splsoftnet();
558 rtbl = rt_gettable(dst->sa_family);
559 if (rtbl == NULL)
560 goto miss;
561
562 rt = rt_matchaddr(rtbl, dst);
563 if (rt == NULL)
564 goto miss;
565
566 if (!ISSET(rt->rt_flags, RTF_UP))
567 goto miss;
568
569 #ifdef NET_MPSAFE
570 if (ISSET(rt->rt_flags, RTF_UPDATING) &&
571 /* XXX updater should be always able to acquire */
572 curlwp != rt_update_global.lwp) {
573 if (!wait_ok || !rt_wait_ok())
574 goto miss;
575 RT_UNLOCK();
576 splx(s);
577
578 /* We can wait until the update is complete */
579 rt_update_wait();
580
581 if (wlock)
582 RT_WLOCK();
583 else
584 RT_RLOCK();
585 goto retry;
586 }
587 #endif /* NET_MPSAFE */
588
589 rt_ref(rt);
590 RT_REFCNT_TRACE(rt);
591
592 splx(s);
593 return rt;
594 miss:
595 rtstat.rts_unreach++;
596 if (report) {
597 struct rt_addrinfo info;
598
599 memset(&info, 0, sizeof(info));
600 info.rti_info[RTAX_DST] = dst;
601 rt_missmsg(RTM_MISS, &info, 0, 0);
602 }
603 splx(s);
604 return NULL;
605 }
606
607 struct rtentry *
608 rtalloc1(const struct sockaddr *dst, int report)
609 {
610 struct rtentry *rt;
611
612 RT_RLOCK();
613 rt = rtalloc1_locked(dst, report, true, false);
614 RT_UNLOCK();
615
616 return rt;
617 }
618
619 static void
620 rt_ref(struct rtentry *rt)
621 {
622
623 KASSERT(rt->rt_refcnt >= 0);
624 atomic_inc_uint(&rt->rt_refcnt);
625 }
626
627 void
628 rt_unref(struct rtentry *rt)
629 {
630
631 KASSERT(rt != NULL);
632 KASSERTMSG(rt->rt_refcnt > 0, "refcnt=%d", rt->rt_refcnt);
633
634 atomic_dec_uint(&rt->rt_refcnt);
635 if (!ISSET(rt->rt_flags, RTF_UP) || ISSET(rt->rt_flags, RTF_UPDATING)) {
636 mutex_enter(&rt_free_global.lock);
637 cv_broadcast(&rt->rt_cv);
638 mutex_exit(&rt_free_global.lock);
639 }
640 }
641
642 static bool
643 rt_wait_ok(void)
644 {
645
646 KASSERT(!cpu_intr_p());
647 return !cpu_softintr_p();
648 }
649
650 void
651 rt_wait_refcnt(const char *title, struct rtentry *rt, int cnt)
652 {
653 mutex_enter(&rt_free_global.lock);
654 while (rt->rt_refcnt > cnt) {
655 dlog(LOG_DEBUG, "%s: %s waiting (refcnt=%d)\n",
656 __func__, title, rt->rt_refcnt);
657 cv_wait(&rt->rt_cv, &rt_free_global.lock);
658 dlog(LOG_DEBUG, "%s: %s waited (refcnt=%d)\n",
659 __func__, title, rt->rt_refcnt);
660 }
661 mutex_exit(&rt_free_global.lock);
662 }
663
664 void
665 rt_wait_psref(struct rtentry *rt)
666 {
667
668 psref_target_destroy(&rt->rt_psref, rt_psref_class);
669 psref_target_init(&rt->rt_psref, rt_psref_class);
670 }
671
672 static void
673 _rt_free(struct rtentry *rt)
674 {
675 struct ifaddr *ifa;
676
677 /*
678 * Need to avoid a deadlock on rt_wait_refcnt of update
679 * and a conflict on psref_target_destroy of update.
680 */
681 #ifdef NET_MPSAFE
682 rt_update_wait();
683 #endif
684
685 RT_REFCNT_TRACE(rt);
686 KASSERTMSG(rt->rt_refcnt >= 0, "refcnt=%d", rt->rt_refcnt);
687 rt_wait_refcnt("free", rt, 0);
688 #ifdef NET_MPSAFE
689 psref_target_destroy(&rt->rt_psref, rt_psref_class);
690 #endif
691
692 rt_assert_inactive(rt);
693 rttrash--;
694 ifa = rt->rt_ifa;
695 rt->rt_ifa = NULL;
696 ifafree(ifa);
697 rt->rt_ifp = NULL;
698 cv_destroy(&rt->rt_cv);
699 rt_destroy(rt);
700 pool_put(&rtentry_pool, rt);
701 }
702
703 static void
704 rt_free_work(struct work *wk, void *arg)
705 {
706
707 for (;;) {
708 struct rtentry *rt;
709
710 mutex_enter(&rt_free_global.lock);
711 if ((rt = SLIST_FIRST(&rt_free_global.queue)) == NULL) {
712 rt_free_global.enqueued = false;
713 mutex_exit(&rt_free_global.lock);
714 return;
715 }
716 SLIST_REMOVE_HEAD(&rt_free_global.queue, rt_free);
717 mutex_exit(&rt_free_global.lock);
718 atomic_dec_uint(&rt->rt_refcnt);
719 _rt_free(rt);
720 }
721 }
722
723 void
724 rt_free(struct rtentry *rt)
725 {
726
727 KASSERT(rt->rt_refcnt > 0);
728 if (rt_wait_ok()) {
729 atomic_dec_uint(&rt->rt_refcnt);
730 _rt_free(rt);
731 return;
732 }
733
734 mutex_enter(&rt_free_global.lock);
735 /* No need to add a reference here. */
736 SLIST_INSERT_HEAD(&rt_free_global.queue, rt, rt_free);
737 if (!rt_free_global.enqueued) {
738 workqueue_enqueue(rt_free_global.wq, &rt_free_global.wk, NULL);
739 rt_free_global.enqueued = true;
740 }
741 mutex_exit(&rt_free_global.lock);
742 }
743
744 #ifdef NET_MPSAFE
745 static void
746 rt_update_wait(void)
747 {
748
749 mutex_enter(&rt_update_global.lock);
750 while (rt_update_global.ongoing) {
751 dlog(LOG_DEBUG, "%s: waiting lwp=%p\n", __func__, curlwp);
752 cv_wait(&rt_update_global.cv, &rt_update_global.lock);
753 dlog(LOG_DEBUG, "%s: waited lwp=%p\n", __func__, curlwp);
754 }
755 mutex_exit(&rt_update_global.lock);
756 }
757 #endif
758
759 int
760 rt_update_prepare(struct rtentry *rt)
761 {
762
763 dlog(LOG_DEBUG, "%s: updating rt=%p lwp=%p\n", __func__, rt, curlwp);
764
765 RT_WLOCK();
766 /* If the entry is being destroyed, don't proceed the update. */
767 if (!ISSET(rt->rt_flags, RTF_UP)) {
768 RT_UNLOCK();
769 return ESRCH;
770 }
771 rt->rt_flags |= RTF_UPDATING;
772 RT_UNLOCK();
773
774 mutex_enter(&rt_update_global.lock);
775 while (rt_update_global.ongoing) {
776 dlog(LOG_DEBUG, "%s: waiting ongoing updating rt=%p lwp=%p\n",
777 __func__, rt, curlwp);
778 cv_wait(&rt_update_global.cv, &rt_update_global.lock);
779 dlog(LOG_DEBUG, "%s: waited ongoing updating rt=%p lwp=%p\n",
780 __func__, rt, curlwp);
781 }
782 rt_update_global.ongoing = true;
783 /* XXX need it to avoid rt_update_wait by updater itself. */
784 rt_update_global.lwp = curlwp;
785 mutex_exit(&rt_update_global.lock);
786
787 rt_wait_refcnt("update", rt, 1);
788 rt_wait_psref(rt);
789
790 return 0;
791 }
792
793 void
794 rt_update_finish(struct rtentry *rt)
795 {
796
797 RT_WLOCK();
798 rt->rt_flags &= ~RTF_UPDATING;
799 RT_UNLOCK();
800
801 mutex_enter(&rt_update_global.lock);
802 rt_update_global.ongoing = false;
803 rt_update_global.lwp = NULL;
804 cv_broadcast(&rt_update_global.cv);
805 mutex_exit(&rt_update_global.lock);
806
807 dlog(LOG_DEBUG, "%s: updated rt=%p lwp=%p\n", __func__, rt, curlwp);
808 }
809
810 /*
811 * Force a routing table entry to the specified
812 * destination to go through the given gateway.
813 * Normally called as a result of a routing redirect
814 * message from the network layer.
815 *
816 * N.B.: must be called at splsoftnet
817 */
818 void
819 rtredirect(const struct sockaddr *dst, const struct sockaddr *gateway,
820 const struct sockaddr *netmask, int flags, const struct sockaddr *src,
821 struct rtentry **rtp)
822 {
823 struct rtentry *rt;
824 int error = 0;
825 uint64_t *stat = NULL;
826 struct rt_addrinfo info;
827 struct ifaddr *ifa;
828 struct psref psref;
829
830 /* verify the gateway is directly reachable */
831 if ((ifa = ifa_ifwithnet_psref(gateway, &psref)) == NULL) {
832 error = ENETUNREACH;
833 goto out;
834 }
835 rt = rtalloc1(dst, 0);
836 /*
837 * If the redirect isn't from our current router for this dst,
838 * it's either old or wrong. If it redirects us to ourselves,
839 * we have a routing loop, perhaps as a result of an interface
840 * going down recently.
841 */
842 if (!(flags & RTF_DONE) && rt &&
843 (sockaddr_cmp(src, rt->rt_gateway) != 0 || rt->rt_ifa != ifa))
844 error = EINVAL;
845 else {
846 int s = pserialize_read_enter();
847 struct ifaddr *_ifa;
848
849 _ifa = ifa_ifwithaddr(gateway);
850 if (_ifa != NULL)
851 error = EHOSTUNREACH;
852 pserialize_read_exit(s);
853 }
854 if (error)
855 goto done;
856 /*
857 * Create a new entry if we just got back a wildcard entry
858 * or the lookup failed. This is necessary for hosts
859 * which use routing redirects generated by smart gateways
860 * to dynamically build the routing tables.
861 */
862 if (rt == NULL || (rt_mask(rt) && rt_mask(rt)->sa_len < 2))
863 goto create;
864 /*
865 * Don't listen to the redirect if it's
866 * for a route to an interface.
867 */
868 if (rt->rt_flags & RTF_GATEWAY) {
869 if (((rt->rt_flags & RTF_HOST) == 0) && (flags & RTF_HOST)) {
870 /*
871 * Changing from route to net => route to host.
872 * Create new route, rather than smashing route to net.
873 */
874 create:
875 if (rt != NULL)
876 rt_unref(rt);
877 flags |= RTF_GATEWAY | RTF_DYNAMIC;
878 memset(&info, 0, sizeof(info));
879 info.rti_info[RTAX_DST] = dst;
880 info.rti_info[RTAX_GATEWAY] = gateway;
881 info.rti_info[RTAX_NETMASK] = netmask;
882 info.rti_ifa = ifa;
883 info.rti_flags = flags;
884 rt = NULL;
885 error = rtrequest1(RTM_ADD, &info, &rt);
886 if (rt != NULL)
887 flags = rt->rt_flags;
888 stat = &rtstat.rts_dynamic;
889 } else {
890 /*
891 * Smash the current notion of the gateway to
892 * this destination. Should check about netmask!!!
893 */
894 #ifdef NET_MPSAFE
895 KASSERT(!cpu_softintr_p());
896
897 error = rt_update_prepare(rt);
898 if (error == 0) {
899 #endif
900 RT_WLOCK();
901 error = rt_setgate(rt, gateway);
902 if (error == 0) {
903 rt->rt_flags |= RTF_MODIFIED;
904 flags |= RTF_MODIFIED;
905 }
906 RT_UNLOCK();
907 #ifdef NET_MPSAFE
908 rt_update_finish(rt);
909 } else {
910 /*
911 * If error != 0, the rtentry is being
912 * destroyed, so doing nothing doesn't
913 * matter.
914 */
915 }
916 #endif
917 stat = &rtstat.rts_newgateway;
918 }
919 } else
920 error = EHOSTUNREACH;
921 done:
922 if (rt) {
923 if (rtp != NULL && !error)
924 *rtp = rt;
925 else
926 rt_unref(rt);
927 }
928 out:
929 if (error)
930 rtstat.rts_badredirect++;
931 else if (stat != NULL)
932 (*stat)++;
933 memset(&info, 0, sizeof(info));
934 info.rti_info[RTAX_DST] = dst;
935 info.rti_info[RTAX_GATEWAY] = gateway;
936 info.rti_info[RTAX_NETMASK] = netmask;
937 info.rti_info[RTAX_AUTHOR] = src;
938 rt_missmsg(RTM_REDIRECT, &info, flags, error);
939 ifa_release(ifa, &psref);
940 }
941
942 /*
943 * Delete a route and generate a message.
944 * It doesn't free a passed rt.
945 */
946 static int
947 rtdeletemsg(struct rtentry *rt)
948 {
949 int error;
950 struct rt_addrinfo info;
951 struct rtentry *retrt;
952
953 /*
954 * Request the new route so that the entry is not actually
955 * deleted. That will allow the information being reported to
956 * be accurate (and consistent with route_output()).
957 */
958 memset(&info, 0, sizeof(info));
959 info.rti_info[RTAX_DST] = rt_getkey(rt);
960 info.rti_info[RTAX_NETMASK] = rt_mask(rt);
961 info.rti_info[RTAX_GATEWAY] = rt->rt_gateway;
962 info.rti_flags = rt->rt_flags;
963 error = rtrequest1(RTM_DELETE, &info, &retrt);
964
965 rt_missmsg(RTM_DELETE, &info, info.rti_flags, error);
966
967 return error;
968 }
969
970 static struct ifaddr *
971 ifa_ifwithroute_psref(int flags, const struct sockaddr *dst,
972 const struct sockaddr *gateway, struct psref *psref)
973 {
974 struct ifaddr *ifa = NULL;
975
976 if ((flags & RTF_GATEWAY) == 0) {
977 /*
978 * If we are adding a route to an interface,
979 * and the interface is a pt to pt link
980 * we should search for the destination
981 * as our clue to the interface. Otherwise
982 * we can use the local address.
983 */
984 if ((flags & RTF_HOST) && gateway->sa_family != AF_LINK)
985 ifa = ifa_ifwithdstaddr_psref(dst, psref);
986 if (ifa == NULL)
987 ifa = ifa_ifwithaddr_psref(gateway, psref);
988 } else {
989 /*
990 * If we are adding a route to a remote net
991 * or host, the gateway may still be on the
992 * other end of a pt to pt link.
993 */
994 ifa = ifa_ifwithdstaddr_psref(gateway, psref);
995 }
996 if (ifa == NULL)
997 ifa = ifa_ifwithnet_psref(gateway, psref);
998 if (ifa == NULL) {
999 int s;
1000 struct rtentry *rt;
1001
1002 rt = rtalloc1_locked(gateway, 0, true, true);
1003 if (rt == NULL)
1004 return NULL;
1005 if (rt->rt_flags & RTF_GATEWAY) {
1006 rt_unref(rt);
1007 return NULL;
1008 }
1009 /*
1010 * Just in case. May not need to do this workaround.
1011 * Revisit when working on rtentry MP-ification.
1012 */
1013 s = pserialize_read_enter();
1014 IFADDR_READER_FOREACH(ifa, rt->rt_ifp) {
1015 if (ifa == rt->rt_ifa)
1016 break;
1017 }
1018 if (ifa != NULL)
1019 ifa_acquire(ifa, psref);
1020 pserialize_read_exit(s);
1021 rt_unref(rt);
1022 if (ifa == NULL)
1023 return NULL;
1024 }
1025 if (ifa->ifa_addr->sa_family != dst->sa_family) {
1026 struct ifaddr *nifa;
1027 int s;
1028
1029 s = pserialize_read_enter();
1030 nifa = ifaof_ifpforaddr(dst, ifa->ifa_ifp);
1031 if (nifa != NULL) {
1032 ifa_release(ifa, psref);
1033 ifa_acquire(nifa, psref);
1034 ifa = nifa;
1035 }
1036 pserialize_read_exit(s);
1037 }
1038 return ifa;
1039 }
1040
1041 /*
1042 * If it suceeds and ret_nrt isn't NULL, refcnt of ret_nrt is incremented.
1043 * The caller has to rtfree it by itself.
1044 */
1045 int
1046 rtrequest(int req, const struct sockaddr *dst, const struct sockaddr *gateway,
1047 const struct sockaddr *netmask, int flags, struct rtentry **ret_nrt)
1048 {
1049 struct rt_addrinfo info;
1050
1051 memset(&info, 0, sizeof(info));
1052 info.rti_flags = flags;
1053 info.rti_info[RTAX_DST] = dst;
1054 info.rti_info[RTAX_GATEWAY] = gateway;
1055 info.rti_info[RTAX_NETMASK] = netmask;
1056 return rtrequest1(req, &info, ret_nrt);
1057 }
1058
1059 /*
1060 * It's a utility function to add/remove a route to/from the routing table
1061 * and tell user processes the addition/removal on success.
1062 */
1063 int
1064 rtrequest_newmsg(const int req, const struct sockaddr *dst,
1065 const struct sockaddr *gateway, const struct sockaddr *netmask,
1066 const int flags)
1067 {
1068 int error;
1069 struct rtentry *ret_nrt = NULL;
1070
1071 KASSERT(req == RTM_ADD || req == RTM_DELETE);
1072
1073 error = rtrequest(req, dst, gateway, netmask, flags, &ret_nrt);
1074 if (error != 0)
1075 return error;
1076
1077 KASSERT(ret_nrt != NULL);
1078
1079 rt_newmsg(req, ret_nrt); /* tell user process */
1080 if (req == RTM_DELETE)
1081 rt_free(ret_nrt);
1082 else
1083 rt_unref(ret_nrt);
1084
1085 return 0;
1086 }
1087
1088 static struct ifnet *
1089 rt_getifp(struct rt_addrinfo *info, struct psref *psref)
1090 {
1091 const struct sockaddr *ifpaddr = info->rti_info[RTAX_IFP];
1092
1093 if (info->rti_ifp != NULL)
1094 return NULL;
1095 /*
1096 * ifp may be specified by sockaddr_dl when protocol address
1097 * is ambiguous
1098 */
1099 if (ifpaddr != NULL && ifpaddr->sa_family == AF_LINK) {
1100 struct ifaddr *ifa;
1101 int s = pserialize_read_enter();
1102
1103 ifa = ifa_ifwithnet(ifpaddr);
1104 if (ifa != NULL)
1105 info->rti_ifp = if_get_byindex(ifa->ifa_ifp->if_index,
1106 psref);
1107 pserialize_read_exit(s);
1108 }
1109
1110 return info->rti_ifp;
1111 }
1112
1113 static struct ifaddr *
1114 rt_getifa(struct rt_addrinfo *info, struct psref *psref)
1115 {
1116 struct ifaddr *ifa = NULL;
1117 const struct sockaddr *dst = info->rti_info[RTAX_DST];
1118 const struct sockaddr *gateway = info->rti_info[RTAX_GATEWAY];
1119 const struct sockaddr *ifaaddr = info->rti_info[RTAX_IFA];
1120 int flags = info->rti_flags;
1121 const struct sockaddr *sa;
1122
1123 if (info->rti_ifa == NULL && ifaaddr != NULL) {
1124 ifa = ifa_ifwithaddr_psref(ifaaddr, psref);
1125 if (ifa != NULL)
1126 goto got;
1127 }
1128
1129 sa = ifaaddr != NULL ? ifaaddr :
1130 (gateway != NULL ? gateway : dst);
1131 if (sa != NULL && info->rti_ifp != NULL)
1132 ifa = ifaof_ifpforaddr_psref(sa, info->rti_ifp, psref);
1133 else if (dst != NULL && gateway != NULL)
1134 ifa = ifa_ifwithroute_psref(flags, dst, gateway, psref);
1135 else if (sa != NULL)
1136 ifa = ifa_ifwithroute_psref(flags, sa, sa, psref);
1137 if (ifa == NULL)
1138 return NULL;
1139 got:
1140 if (ifa->ifa_getifa != NULL) {
1141 /* FIXME ifa_getifa is NOMPSAFE */
1142 ifa = (*ifa->ifa_getifa)(ifa, dst);
1143 if (ifa == NULL)
1144 return NULL;
1145 ifa_acquire(ifa, psref);
1146 }
1147 info->rti_ifa = ifa;
1148 if (info->rti_ifp == NULL)
1149 info->rti_ifp = ifa->ifa_ifp;
1150 return ifa;
1151 }
1152
1153 /*
1154 * If it suceeds and ret_nrt isn't NULL, refcnt of ret_nrt is incremented.
1155 * The caller has to rtfree it by itself.
1156 */
1157 int
1158 rtrequest1(int req, struct rt_addrinfo *info, struct rtentry **ret_nrt)
1159 {
1160 int s = splsoftnet(), ss;
1161 int error = 0, rc;
1162 struct rtentry *rt;
1163 rtbl_t *rtbl;
1164 struct ifaddr *ifa = NULL;
1165 struct sockaddr_storage maskeddst;
1166 const struct sockaddr *dst = info->rti_info[RTAX_DST];
1167 const struct sockaddr *gateway = info->rti_info[RTAX_GATEWAY];
1168 const struct sockaddr *netmask = info->rti_info[RTAX_NETMASK];
1169 int flags = info->rti_flags;
1170 struct psref psref_ifp, psref_ifa;
1171 int bound = 0;
1172 struct ifnet *ifp = NULL;
1173 bool need_to_release_ifa = true;
1174 bool need_unlock = true;
1175 #define senderr(x) { error = x ; goto bad; }
1176
1177 RT_WLOCK();
1178
1179 bound = curlwp_bind();
1180 if ((rtbl = rt_gettable(dst->sa_family)) == NULL)
1181 senderr(ESRCH);
1182 if (flags & RTF_HOST)
1183 netmask = NULL;
1184 switch (req) {
1185 case RTM_DELETE:
1186 if (netmask) {
1187 rt_maskedcopy(dst, (struct sockaddr *)&maskeddst,
1188 netmask);
1189 dst = (struct sockaddr *)&maskeddst;
1190 }
1191 if ((rt = rt_lookup(rtbl, dst, netmask)) == NULL)
1192 senderr(ESRCH);
1193 if ((rt = rt_deladdr(rtbl, dst, netmask)) == NULL)
1194 senderr(ESRCH);
1195 rt->rt_flags &= ~RTF_UP;
1196 if ((ifa = rt->rt_ifa)) {
1197 if (ifa->ifa_flags & IFA_ROUTE &&
1198 rt_ifa_connected(rt, ifa)) {
1199 RT_DPRINTF("rt->_rt_key = %p, ifa = %p, "
1200 "deleted IFA_ROUTE\n",
1201 (void *)rt->_rt_key, (void *)ifa);
1202 ifa->ifa_flags &= ~IFA_ROUTE;
1203 }
1204 if (ifa->ifa_rtrequest)
1205 ifa->ifa_rtrequest(RTM_DELETE, rt, info);
1206 ifa = NULL;
1207 }
1208 rttrash++;
1209 if (ret_nrt) {
1210 *ret_nrt = rt;
1211 rt_ref(rt);
1212 RT_REFCNT_TRACE(rt);
1213 }
1214 rtcache_invalidate();
1215 RT_UNLOCK();
1216 need_unlock = false;
1217 rt_timer_remove_all(rt);
1218 #if defined(INET) || defined(INET6)
1219 if (netmask != NULL)
1220 lltable_prefix_free(dst->sa_family, dst, netmask, 0);
1221 #endif
1222 if (ret_nrt == NULL) {
1223 /* Adjust the refcount */
1224 rt_ref(rt);
1225 RT_REFCNT_TRACE(rt);
1226 rt_free(rt);
1227 }
1228 break;
1229
1230 case RTM_ADD:
1231 if (info->rti_ifa == NULL) {
1232 ifp = rt_getifp(info, &psref_ifp);
1233 ifa = rt_getifa(info, &psref_ifa);
1234 if (ifa == NULL)
1235 senderr(ENETUNREACH);
1236 } else {
1237 /* Caller should have a reference of ifa */
1238 ifa = info->rti_ifa;
1239 need_to_release_ifa = false;
1240 }
1241 rt = pool_get(&rtentry_pool, PR_NOWAIT);
1242 if (rt == NULL)
1243 senderr(ENOBUFS);
1244 memset(rt, 0, sizeof(*rt));
1245 rt->rt_flags = RTF_UP | flags;
1246 LIST_INIT(&rt->rt_timer);
1247
1248 RT_DPRINTF("rt->_rt_key = %p\n", (void *)rt->_rt_key);
1249 if (netmask) {
1250 rt_maskedcopy(dst, (struct sockaddr *)&maskeddst,
1251 netmask);
1252 rt_setkey(rt, (struct sockaddr *)&maskeddst, M_NOWAIT);
1253 } else {
1254 rt_setkey(rt, dst, M_NOWAIT);
1255 }
1256 RT_DPRINTF("rt->_rt_key = %p\n", (void *)rt->_rt_key);
1257 if (rt_getkey(rt) == NULL ||
1258 rt_setgate(rt, gateway) != 0) {
1259 pool_put(&rtentry_pool, rt);
1260 senderr(ENOBUFS);
1261 }
1262
1263 rt_set_ifa(rt, ifa);
1264 if (info->rti_info[RTAX_TAG] != NULL) {
1265 const struct sockaddr *tag;
1266 tag = rt_settag(rt, info->rti_info[RTAX_TAG]);
1267 if (tag == NULL)
1268 senderr(ENOBUFS);
1269 }
1270 RT_DPRINTF("rt->_rt_key = %p\n", (void *)rt->_rt_key);
1271
1272 ss = pserialize_read_enter();
1273 if (info->rti_info[RTAX_IFP] != NULL) {
1274 struct ifaddr *ifa2;
1275 ifa2 = ifa_ifwithnet(info->rti_info[RTAX_IFP]);
1276 if (ifa2 != NULL)
1277 rt->rt_ifp = ifa2->ifa_ifp;
1278 else
1279 rt->rt_ifp = ifa->ifa_ifp;
1280 } else
1281 rt->rt_ifp = ifa->ifa_ifp;
1282 pserialize_read_exit(ss);
1283 cv_init(&rt->rt_cv, "rtentry");
1284 psref_target_init(&rt->rt_psref, rt_psref_class);
1285
1286 RT_DPRINTF("rt->_rt_key = %p\n", (void *)rt->_rt_key);
1287 rc = rt_addaddr(rtbl, rt, netmask);
1288 RT_DPRINTF("rt->_rt_key = %p\n", (void *)rt->_rt_key);
1289 if (rc != 0) {
1290 ifafree(ifa); /* for rt_set_ifa above */
1291 cv_destroy(&rt->rt_cv);
1292 rt_destroy(rt);
1293 pool_put(&rtentry_pool, rt);
1294 senderr(rc);
1295 }
1296 RT_DPRINTF("rt->_rt_key = %p\n", (void *)rt->_rt_key);
1297 if (ifa->ifa_rtrequest)
1298 ifa->ifa_rtrequest(req, rt, info);
1299 if (need_to_release_ifa)
1300 ifa_release(ifa, &psref_ifa);
1301 ifa = NULL;
1302 if_put(ifp, &psref_ifp);
1303 ifp = NULL;
1304 RT_DPRINTF("rt->_rt_key = %p\n", (void *)rt->_rt_key);
1305 if (ret_nrt) {
1306 *ret_nrt = rt;
1307 rt_ref(rt);
1308 RT_REFCNT_TRACE(rt);
1309 }
1310 rtcache_invalidate();
1311 RT_UNLOCK();
1312 need_unlock = false;
1313 break;
1314 case RTM_GET:
1315 if (netmask != NULL) {
1316 rt_maskedcopy(dst, (struct sockaddr *)&maskeddst,
1317 netmask);
1318 dst = (struct sockaddr *)&maskeddst;
1319 }
1320 if ((rt = rt_lookup(rtbl, dst, netmask)) == NULL)
1321 senderr(ESRCH);
1322 if (ret_nrt != NULL) {
1323 *ret_nrt = rt;
1324 rt_ref(rt);
1325 RT_REFCNT_TRACE(rt);
1326 }
1327 break;
1328 }
1329 bad:
1330 if (need_to_release_ifa)
1331 ifa_release(ifa, &psref_ifa);
1332 if_put(ifp, &psref_ifp);
1333 curlwp_bindx(bound);
1334 if (need_unlock)
1335 RT_UNLOCK();
1336 splx(s);
1337 return error;
1338 }
1339
1340 int
1341 rt_setgate(struct rtentry *rt, const struct sockaddr *gate)
1342 {
1343 struct sockaddr *new, *old;
1344
1345 KASSERT(RT_WLOCKED());
1346 KASSERT(rt->_rt_key != NULL);
1347 RT_DPRINTF("rt->_rt_key = %p\n", (void *)rt->_rt_key);
1348
1349 new = sockaddr_dup(gate, M_ZERO | M_NOWAIT);
1350 if (new == NULL)
1351 return ENOMEM;
1352
1353 old = rt->rt_gateway;
1354 rt->rt_gateway = new;
1355 if (old != NULL)
1356 sockaddr_free(old);
1357
1358 KASSERT(rt->_rt_key != NULL);
1359 RT_DPRINTF("rt->_rt_key = %p\n", (void *)rt->_rt_key);
1360
1361 if (rt->rt_flags & RTF_GATEWAY) {
1362 struct rtentry *gwrt;
1363
1364 gwrt = rtalloc1_locked(gate, 1, false, true);
1365 /*
1366 * If we switched gateways, grab the MTU from the new
1367 * gateway route if the current MTU, if the current MTU is
1368 * greater than the MTU of gateway.
1369 * Note that, if the MTU of gateway is 0, we will reset the
1370 * MTU of the route to run PMTUD again from scratch. XXX
1371 */
1372 if (gwrt != NULL) {
1373 KASSERT(gwrt->_rt_key != NULL);
1374 RT_DPRINTF("gwrt->_rt_key = %p\n", gwrt->_rt_key);
1375 if ((rt->rt_rmx.rmx_locks & RTV_MTU) == 0 &&
1376 rt->rt_rmx.rmx_mtu &&
1377 rt->rt_rmx.rmx_mtu > gwrt->rt_rmx.rmx_mtu) {
1378 rt->rt_rmx.rmx_mtu = gwrt->rt_rmx.rmx_mtu;
1379 }
1380 rt_unref(gwrt);
1381 }
1382 }
1383 KASSERT(rt->_rt_key != NULL);
1384 RT_DPRINTF("rt->_rt_key = %p\n", (void *)rt->_rt_key);
1385 return 0;
1386 }
1387
1388 static struct ifaddr *
1389 rt_update_get_ifa(const struct rt_addrinfo info, const struct rtentry *rt,
1390 struct ifnet **ifp, struct psref *psref_ifp, struct psref *psref)
1391 {
1392 struct ifaddr *ifa = NULL;
1393
1394 *ifp = NULL;
1395 if (info.rti_info[RTAX_IFP] != NULL) {
1396 ifa = ifa_ifwithnet_psref(info.rti_info[RTAX_IFP], psref);
1397 if (ifa == NULL)
1398 goto next;
1399 *ifp = ifa->ifa_ifp;
1400 if_acquire(*ifp, psref_ifp);
1401 if (info.rti_info[RTAX_IFA] == NULL &&
1402 info.rti_info[RTAX_GATEWAY] == NULL)
1403 goto next;
1404 ifa_release(ifa, psref);
1405 if (info.rti_info[RTAX_IFA] == NULL) {
1406 /* route change <dst> <gw> -ifp <if> */
1407 ifa = ifaof_ifpforaddr_psref(info.rti_info[RTAX_GATEWAY],
1408 *ifp, psref);
1409 } else {
1410 /* route change <dst> -ifp <if> -ifa <addr> */
1411 ifa = ifa_ifwithaddr_psref(info.rti_info[RTAX_IFA], psref);
1412 if (ifa != NULL)
1413 goto out;
1414 ifa = ifaof_ifpforaddr_psref(info.rti_info[RTAX_IFA],
1415 *ifp, psref);
1416 }
1417 goto out;
1418 }
1419 next:
1420 if (info.rti_info[RTAX_IFA] != NULL) {
1421 /* route change <dst> <gw> -ifa <addr> */
1422 ifa = ifa_ifwithaddr_psref(info.rti_info[RTAX_IFA], psref);
1423 if (ifa != NULL)
1424 goto out;
1425 }
1426 if (info.rti_info[RTAX_GATEWAY] != NULL) {
1427 /* route change <dst> <gw> */
1428 ifa = ifa_ifwithroute_psref(rt->rt_flags, rt_getkey(rt),
1429 info.rti_info[RTAX_GATEWAY], psref);
1430 }
1431 out:
1432 if (ifa != NULL && *ifp == NULL) {
1433 *ifp = ifa->ifa_ifp;
1434 if_acquire(*ifp, psref_ifp);
1435 }
1436 if (ifa == NULL && *ifp != NULL) {
1437 if_put(*ifp, psref_ifp);
1438 *ifp = NULL;
1439 }
1440 return ifa;
1441 }
1442
1443 int
1444 rt_update(struct rtentry *rt, struct rt_addrinfo *info, void *rtm)
1445 {
1446 int error = 0;
1447 struct ifnet *ifp = NULL, *new_ifp = NULL;
1448 struct ifaddr *ifa = NULL, *new_ifa;
1449 struct psref psref_ifa, psref_new_ifa, psref_ifp, psref_new_ifp;
1450 bool newgw, ifp_changed = false;
1451
1452 RT_WLOCK();
1453 /*
1454 * New gateway could require new ifaddr, ifp;
1455 * flags may also be different; ifp may be specified
1456 * by ll sockaddr when protocol address is ambiguous
1457 */
1458 newgw = info->rti_info[RTAX_GATEWAY] != NULL &&
1459 sockaddr_cmp(info->rti_info[RTAX_GATEWAY], rt->rt_gateway) != 0;
1460
1461 if (newgw || info->rti_info[RTAX_IFP] != NULL ||
1462 info->rti_info[RTAX_IFA] != NULL) {
1463 ifp = rt_getifp(info, &psref_ifp);
1464 /* info refers ifp so we need to keep a reference */
1465 ifa = rt_getifa(info, &psref_ifa);
1466 if (ifa == NULL) {
1467 error = ENETUNREACH;
1468 goto out;
1469 }
1470 }
1471 if (newgw) {
1472 error = rt_setgate(rt, info->rti_info[RTAX_GATEWAY]);
1473 if (error != 0)
1474 goto out;
1475 }
1476 if (info->rti_info[RTAX_TAG]) {
1477 const struct sockaddr *tag;
1478 tag = rt_settag(rt, info->rti_info[RTAX_TAG]);
1479 if (tag == NULL) {
1480 error = ENOBUFS;
1481 goto out;
1482 }
1483 }
1484 /*
1485 * New gateway could require new ifaddr, ifp;
1486 * flags may also be different; ifp may be specified
1487 * by ll sockaddr when protocol address is ambiguous
1488 */
1489 new_ifa = rt_update_get_ifa(*info, rt, &new_ifp, &psref_new_ifp,
1490 &psref_new_ifa);
1491 if (new_ifa != NULL) {
1492 ifa_release(ifa, &psref_ifa);
1493 ifa = new_ifa;
1494 }
1495 if (ifa) {
1496 struct ifaddr *oifa = rt->rt_ifa;
1497 if (oifa != ifa && !ifa_is_destroying(ifa) &&
1498 new_ifp != NULL && !if_is_deactivated(new_ifp)) {
1499 if (oifa && oifa->ifa_rtrequest)
1500 oifa->ifa_rtrequest(RTM_DELETE, rt, info);
1501 rt_replace_ifa(rt, ifa);
1502 rt->rt_ifp = new_ifp;
1503 ifp_changed = true;
1504 }
1505 if (new_ifa == NULL)
1506 ifa_release(ifa, &psref_ifa);
1507 }
1508 ifa_release(new_ifa, &psref_new_ifa);
1509 if (new_ifp && rt->rt_ifp != new_ifp && !if_is_deactivated(new_ifp)) {
1510 rt->rt_ifp = new_ifp;
1511 ifp_changed = true;
1512 }
1513 rt_setmetrics(rtm, rt);
1514 if (rt->rt_flags != info->rti_flags) {
1515 rt->rt_flags = (info->rti_flags & ~PRESERVED_RTF) |
1516 (rt->rt_flags & PRESERVED_RTF);
1517 }
1518 if (rt->rt_ifa && rt->rt_ifa->ifa_rtrequest)
1519 rt->rt_ifa->ifa_rtrequest(RTM_ADD, rt, info);
1520 #if defined(INET) || defined(INET6)
1521 if (ifp_changed && rt_mask(rt) != NULL)
1522 lltable_prefix_free(rt_getkey(rt)->sa_family, rt_getkey(rt),
1523 rt_mask(rt), 0);
1524 #else
1525 (void)ifp_changed; /* XXX gcc */
1526 #endif
1527 out:
1528 if_put(new_ifp, &psref_new_ifp);
1529 if_put(ifp, &psref_ifp);
1530
1531 RT_UNLOCK();
1532
1533 return error;
1534 }
1535
1536 static void
1537 rt_maskedcopy(const struct sockaddr *src, struct sockaddr *dst,
1538 const struct sockaddr *netmask)
1539 {
1540 const char *netmaskp = &netmask->sa_data[0],
1541 *srcp = &src->sa_data[0];
1542 char *dstp = &dst->sa_data[0];
1543 const char *maskend = (char *)dst + MIN(netmask->sa_len, src->sa_len);
1544 const char *srcend = (char *)dst + src->sa_len;
1545
1546 dst->sa_len = src->sa_len;
1547 dst->sa_family = src->sa_family;
1548
1549 while (dstp < maskend)
1550 *dstp++ = *srcp++ & *netmaskp++;
1551 if (dstp < srcend)
1552 memset(dstp, 0, (size_t)(srcend - dstp));
1553 }
1554
1555 /*
1556 * Inform the routing socket of a route change.
1557 */
1558 void
1559 rt_newmsg(const int cmd, const struct rtentry *rt)
1560 {
1561 struct rt_addrinfo info;
1562
1563 memset((void *)&info, 0, sizeof(info));
1564 info.rti_info[RTAX_DST] = rt_getkey(rt);
1565 info.rti_info[RTAX_GATEWAY] = rt->rt_gateway;
1566 info.rti_info[RTAX_NETMASK] = rt_mask(rt);
1567 if (rt->rt_ifp) {
1568 info.rti_info[RTAX_IFP] = rt->rt_ifp->if_dl->ifa_addr;
1569 info.rti_info[RTAX_IFA] = rt->rt_ifa->ifa_addr;
1570 }
1571
1572 rt_missmsg(cmd, &info, rt->rt_flags, 0);
1573 }
1574
1575 /*
1576 * Set up or tear down a routing table entry, normally
1577 * for an interface.
1578 */
1579 int
1580 rtinit(struct ifaddr *ifa, int cmd, int flags)
1581 {
1582 struct rtentry *rt;
1583 struct sockaddr *dst, *odst;
1584 struct sockaddr_storage maskeddst;
1585 struct rtentry *nrt = NULL;
1586 int error;
1587 struct rt_addrinfo info;
1588
1589 dst = flags & RTF_HOST ? ifa->ifa_dstaddr : ifa->ifa_addr;
1590 if (cmd == RTM_DELETE) {
1591 if ((flags & RTF_HOST) == 0 && ifa->ifa_netmask) {
1592 /* Delete subnet route for this interface */
1593 odst = dst;
1594 dst = (struct sockaddr *)&maskeddst;
1595 rt_maskedcopy(odst, dst, ifa->ifa_netmask);
1596 }
1597 if ((rt = rtalloc1(dst, 0)) != NULL) {
1598 if (rt->rt_ifa != ifa) {
1599 rt_unref(rt);
1600 return (flags & RTF_HOST) ? EHOSTUNREACH
1601 : ENETUNREACH;
1602 }
1603 rt_unref(rt);
1604 }
1605 }
1606 memset(&info, 0, sizeof(info));
1607 info.rti_ifa = ifa;
1608 info.rti_flags = flags | ifa->ifa_flags;
1609 info.rti_info[RTAX_DST] = dst;
1610 info.rti_info[RTAX_GATEWAY] = ifa->ifa_addr;
1611
1612 /*
1613 * XXX here, it seems that we are assuming that ifa_netmask is NULL
1614 * for RTF_HOST. bsdi4 passes NULL explicitly (via intermediate
1615 * variable) when RTF_HOST is 1. still not sure if i can safely
1616 * change it to meet bsdi4 behavior.
1617 */
1618 if (cmd != RTM_LLINFO_UPD)
1619 info.rti_info[RTAX_NETMASK] = ifa->ifa_netmask;
1620 error = rtrequest1((cmd == RTM_LLINFO_UPD) ? RTM_GET : cmd, &info,
1621 &nrt);
1622 if (error != 0)
1623 return error;
1624
1625 rt = nrt;
1626 RT_REFCNT_TRACE(rt);
1627 switch (cmd) {
1628 case RTM_DELETE:
1629 rt_newmsg(cmd, rt);
1630 rt_free(rt);
1631 break;
1632 case RTM_LLINFO_UPD:
1633 if (cmd == RTM_LLINFO_UPD && ifa->ifa_rtrequest != NULL)
1634 ifa->ifa_rtrequest(RTM_LLINFO_UPD, rt, &info);
1635 rt_newmsg(RTM_CHANGE, rt);
1636 rt_unref(rt);
1637 break;
1638 case RTM_ADD:
1639 /*
1640 * XXX it looks just reverting rt_ifa replaced by ifa_rtrequest
1641 * called via rtrequest1. Can we just prevent the replacement
1642 * somehow and remove the following code? And also doesn't
1643 * calling ifa_rtrequest(RTM_ADD) replace rt_ifa again?
1644 */
1645 if (rt->rt_ifa != ifa) {
1646 printf("rtinit: wrong ifa (%p) was (%p)\n", ifa,
1647 rt->rt_ifa);
1648 #ifdef NET_MPSAFE
1649 KASSERT(!cpu_softintr_p());
1650
1651 error = rt_update_prepare(rt);
1652 if (error == 0) {
1653 #endif
1654 if (rt->rt_ifa->ifa_rtrequest != NULL) {
1655 rt->rt_ifa->ifa_rtrequest(RTM_DELETE,
1656 rt, &info);
1657 }
1658 rt_replace_ifa(rt, ifa);
1659 rt->rt_ifp = ifa->ifa_ifp;
1660 if (ifa->ifa_rtrequest != NULL)
1661 ifa->ifa_rtrequest(RTM_ADD, rt, &info);
1662 #ifdef NET_MPSAFE
1663 rt_update_finish(rt);
1664 } else {
1665 /*
1666 * If error != 0, the rtentry is being
1667 * destroyed, so doing nothing doesn't
1668 * matter.
1669 */
1670 }
1671 #endif
1672 }
1673 rt_newmsg(cmd, rt);
1674 rt_unref(rt);
1675 RT_REFCNT_TRACE(rt);
1676 break;
1677 }
1678 return error;
1679 }
1680
1681 /*
1682 * Create a local route entry for the address.
1683 * Announce the addition of the address and the route to the routing socket.
1684 */
1685 int
1686 rt_ifa_addlocal(struct ifaddr *ifa)
1687 {
1688 struct rtentry *rt;
1689 int e;
1690
1691 /* If there is no loopback entry, allocate one. */
1692 rt = rtalloc1(ifa->ifa_addr, 0);
1693 #ifdef RT_DEBUG
1694 if (rt != NULL)
1695 dump_rt(rt);
1696 #endif
1697 if (rt == NULL || (rt->rt_flags & RTF_HOST) == 0 ||
1698 (rt->rt_ifp->if_flags & IFF_LOOPBACK) == 0)
1699 {
1700 struct rt_addrinfo info;
1701 struct rtentry *nrt;
1702
1703 memset(&info, 0, sizeof(info));
1704 info.rti_flags = RTF_HOST | RTF_LOCAL;
1705 info.rti_info[RTAX_DST] = ifa->ifa_addr;
1706 info.rti_info[RTAX_GATEWAY] =
1707 (const struct sockaddr *)ifa->ifa_ifp->if_sadl;
1708 info.rti_ifa = ifa;
1709 nrt = NULL;
1710 e = rtrequest1(RTM_ADD, &info, &nrt);
1711 if (nrt && ifa != nrt->rt_ifa)
1712 rt_replace_ifa(nrt, ifa);
1713 rt_newaddrmsg(RTM_ADD, ifa, e, nrt);
1714 if (nrt != NULL) {
1715 #ifdef RT_DEBUG
1716 dump_rt(nrt);
1717 #endif
1718 rt_unref(nrt);
1719 RT_REFCNT_TRACE(nrt);
1720 }
1721 } else {
1722 e = 0;
1723 rt_newaddrmsg(RTM_NEWADDR, ifa, 0, NULL);
1724 }
1725 if (rt != NULL)
1726 rt_unref(rt);
1727 return e;
1728 }
1729
1730 /*
1731 * Remove the local route entry for the address.
1732 * Announce the removal of the address and the route to the routing socket.
1733 */
1734 int
1735 rt_ifa_remlocal(struct ifaddr *ifa, struct ifaddr *alt_ifa)
1736 {
1737 struct rtentry *rt;
1738 int e = 0;
1739
1740 rt = rtalloc1(ifa->ifa_addr, 0);
1741
1742 /*
1743 * Before deleting, check if a corresponding loopbacked
1744 * host route surely exists. With this check, we can avoid
1745 * deleting an interface direct route whose destination is
1746 * the same as the address being removed. This can happen
1747 * when removing a subnet-router anycast address on an
1748 * interface attached to a shared medium.
1749 */
1750 if (rt != NULL &&
1751 (rt->rt_flags & RTF_HOST) &&
1752 (rt->rt_ifp->if_flags & IFF_LOOPBACK))
1753 {
1754 /* If we cannot replace the route's ifaddr with the equivalent
1755 * ifaddr of another interface, I believe it is safest to
1756 * delete the route.
1757 */
1758 if (alt_ifa == NULL) {
1759 e = rtdeletemsg(rt);
1760 if (e == 0) {
1761 rt_unref(rt);
1762 rt_free(rt);
1763 rt = NULL;
1764 }
1765 rt_newaddrmsg(RTM_DELADDR, ifa, 0, NULL);
1766 } else {
1767 rt_replace_ifa(rt, alt_ifa);
1768 rt_newmsg(RTM_CHANGE, rt);
1769 }
1770 } else
1771 rt_newaddrmsg(RTM_DELADDR, ifa, 0, NULL);
1772 if (rt != NULL)
1773 rt_unref(rt);
1774 return e;
1775 }
1776
1777 /*
1778 * Route timer routines. These routes allow functions to be called
1779 * for various routes at any time. This is useful in supporting
1780 * path MTU discovery and redirect route deletion.
1781 *
1782 * This is similar to some BSDI internal functions, but it provides
1783 * for multiple queues for efficiency's sake...
1784 */
1785
1786 LIST_HEAD(, rttimer_queue) rttimer_queue_head;
1787 static int rt_init_done = 0;
1788
1789 /*
1790 * Some subtle order problems with domain initialization mean that
1791 * we cannot count on this being run from rt_init before various
1792 * protocol initializations are done. Therefore, we make sure
1793 * that this is run when the first queue is added...
1794 */
1795
1796 static void rt_timer_work(struct work *, void *);
1797
1798 static void
1799 rt_timer_init(void)
1800 {
1801 int error;
1802
1803 assert(rt_init_done == 0);
1804
1805 /* XXX should be in rt_init */
1806 rw_init(&rt_lock);
1807
1808 LIST_INIT(&rttimer_queue_head);
1809 callout_init(&rt_timer_ch, CALLOUT_MPSAFE);
1810 error = workqueue_create(&rt_timer_wq, "rt_timer",
1811 rt_timer_work, NULL, PRI_SOFTNET, IPL_SOFTNET, WQ_MPSAFE);
1812 if (error)
1813 panic("%s: workqueue_create failed (%d)\n", __func__, error);
1814 callout_reset(&rt_timer_ch, hz, rt_timer_timer, NULL);
1815 rt_init_done = 1;
1816 }
1817
1818 struct rttimer_queue *
1819 rt_timer_queue_create(u_int timeout)
1820 {
1821 struct rttimer_queue *rtq;
1822
1823 if (rt_init_done == 0)
1824 rt_timer_init();
1825
1826 R_Malloc(rtq, struct rttimer_queue *, sizeof *rtq);
1827 if (rtq == NULL)
1828 return NULL;
1829 memset(rtq, 0, sizeof(*rtq));
1830
1831 rtq->rtq_timeout = timeout;
1832 TAILQ_INIT(&rtq->rtq_head);
1833 RT_WLOCK();
1834 LIST_INSERT_HEAD(&rttimer_queue_head, rtq, rtq_link);
1835 RT_UNLOCK();
1836
1837 return rtq;
1838 }
1839
1840 void
1841 rt_timer_queue_change(struct rttimer_queue *rtq, long timeout)
1842 {
1843
1844 rtq->rtq_timeout = timeout;
1845 }
1846
1847 static void
1848 rt_timer_queue_remove_all(struct rttimer_queue *rtq)
1849 {
1850 struct rttimer *r;
1851
1852 RT_ASSERT_WLOCK();
1853
1854 while ((r = TAILQ_FIRST(&rtq->rtq_head)) != NULL) {
1855 LIST_REMOVE(r, rtt_link);
1856 TAILQ_REMOVE(&rtq->rtq_head, r, rtt_next);
1857 rt_ref(r->rtt_rt); /* XXX */
1858 RT_REFCNT_TRACE(r->rtt_rt);
1859 RT_UNLOCK();
1860 (*r->rtt_func)(r->rtt_rt, r);
1861 pool_put(&rttimer_pool, r);
1862 RT_WLOCK();
1863 if (rtq->rtq_count > 0)
1864 rtq->rtq_count--;
1865 else
1866 printf("rt_timer_queue_remove_all: "
1867 "rtq_count reached 0\n");
1868 }
1869 }
1870
1871 void
1872 rt_timer_queue_destroy(struct rttimer_queue *rtq)
1873 {
1874
1875 RT_WLOCK();
1876 rt_timer_queue_remove_all(rtq);
1877 LIST_REMOVE(rtq, rtq_link);
1878 RT_UNLOCK();
1879
1880 /*
1881 * Caller is responsible for freeing the rttimer_queue structure.
1882 */
1883 }
1884
1885 unsigned long
1886 rt_timer_count(struct rttimer_queue *rtq)
1887 {
1888 return rtq->rtq_count;
1889 }
1890
1891 static void
1892 rt_timer_remove_all(struct rtentry *rt)
1893 {
1894 struct rttimer *r;
1895
1896 RT_WLOCK();
1897 while ((r = LIST_FIRST(&rt->rt_timer)) != NULL) {
1898 LIST_REMOVE(r, rtt_link);
1899 TAILQ_REMOVE(&r->rtt_queue->rtq_head, r, rtt_next);
1900 if (r->rtt_queue->rtq_count > 0)
1901 r->rtt_queue->rtq_count--;
1902 else
1903 printf("rt_timer_remove_all: rtq_count reached 0\n");
1904 pool_put(&rttimer_pool, r);
1905 }
1906 RT_UNLOCK();
1907 }
1908
1909 int
1910 rt_timer_add(struct rtentry *rt,
1911 void (*func)(struct rtentry *, struct rttimer *),
1912 struct rttimer_queue *queue)
1913 {
1914 struct rttimer *r;
1915
1916 KASSERT(func != NULL);
1917 RT_WLOCK();
1918 /*
1919 * If there's already a timer with this action, destroy it before
1920 * we add a new one.
1921 */
1922 LIST_FOREACH(r, &rt->rt_timer, rtt_link) {
1923 if (r->rtt_func == func)
1924 break;
1925 }
1926 if (r != NULL) {
1927 LIST_REMOVE(r, rtt_link);
1928 TAILQ_REMOVE(&r->rtt_queue->rtq_head, r, rtt_next);
1929 if (r->rtt_queue->rtq_count > 0)
1930 r->rtt_queue->rtq_count--;
1931 else
1932 printf("rt_timer_add: rtq_count reached 0\n");
1933 } else {
1934 r = pool_get(&rttimer_pool, PR_NOWAIT);
1935 if (r == NULL) {
1936 RT_UNLOCK();
1937 return ENOBUFS;
1938 }
1939 }
1940
1941 memset(r, 0, sizeof(*r));
1942
1943 r->rtt_rt = rt;
1944 r->rtt_time = time_uptime;
1945 r->rtt_func = func;
1946 r->rtt_queue = queue;
1947 LIST_INSERT_HEAD(&rt->rt_timer, r, rtt_link);
1948 TAILQ_INSERT_TAIL(&queue->rtq_head, r, rtt_next);
1949 r->rtt_queue->rtq_count++;
1950
1951 RT_UNLOCK();
1952
1953 return 0;
1954 }
1955
1956 static void
1957 rt_timer_work(struct work *wk, void *arg)
1958 {
1959 struct rttimer_queue *rtq;
1960 struct rttimer *r;
1961
1962 RT_WLOCK();
1963 LIST_FOREACH(rtq, &rttimer_queue_head, rtq_link) {
1964 while ((r = TAILQ_FIRST(&rtq->rtq_head)) != NULL &&
1965 (r->rtt_time + rtq->rtq_timeout) < time_uptime) {
1966 LIST_REMOVE(r, rtt_link);
1967 TAILQ_REMOVE(&rtq->rtq_head, r, rtt_next);
1968 /*
1969 * Take a reference to avoid the rtentry is freed
1970 * accidentally after RT_UNLOCK. The callback
1971 * (rtt_func) must rt_unref it by itself.
1972 */
1973 rt_ref(r->rtt_rt);
1974 RT_REFCNT_TRACE(r->rtt_rt);
1975 RT_UNLOCK();
1976 (*r->rtt_func)(r->rtt_rt, r);
1977 pool_put(&rttimer_pool, r);
1978 RT_WLOCK();
1979 if (rtq->rtq_count > 0)
1980 rtq->rtq_count--;
1981 else
1982 printf("rt_timer_timer: rtq_count reached 0\n");
1983 }
1984 }
1985 RT_UNLOCK();
1986
1987 callout_reset(&rt_timer_ch, hz, rt_timer_timer, NULL);
1988 }
1989
1990 static void
1991 rt_timer_timer(void *arg)
1992 {
1993
1994 workqueue_enqueue(rt_timer_wq, &rt_timer_wk, NULL);
1995 }
1996
1997 static struct rtentry *
1998 _rtcache_init(struct route *ro, int flag)
1999 {
2000 struct rtentry *rt;
2001
2002 rtcache_invariants(ro);
2003 KASSERT(ro->_ro_rt == NULL);
2004
2005 if (rtcache_getdst(ro) == NULL)
2006 return NULL;
2007 rt = rtalloc1(rtcache_getdst(ro), flag);
2008 if (rt != NULL) {
2009 RT_RLOCK();
2010 if (ISSET(rt->rt_flags, RTF_UP)) {
2011 ro->_ro_rt = rt;
2012 ro->ro_rtcache_generation = rtcache_generation;
2013 rtcache_ref(rt, ro);
2014 }
2015 RT_UNLOCK();
2016 rt_unref(rt);
2017 }
2018
2019 rtcache_invariants(ro);
2020 return ro->_ro_rt;
2021 }
2022
2023 struct rtentry *
2024 rtcache_init(struct route *ro)
2025 {
2026
2027 return _rtcache_init(ro, 1);
2028 }
2029
2030 struct rtentry *
2031 rtcache_init_noclone(struct route *ro)
2032 {
2033
2034 return _rtcache_init(ro, 0);
2035 }
2036
2037 struct rtentry *
2038 rtcache_update(struct route *ro, int clone)
2039 {
2040
2041 ro->_ro_rt = NULL;
2042 return _rtcache_init(ro, clone);
2043 }
2044
2045 void
2046 rtcache_copy(struct route *new_ro, struct route *old_ro)
2047 {
2048 struct rtentry *rt;
2049 int ret;
2050
2051 KASSERT(new_ro != old_ro);
2052 rtcache_invariants(new_ro);
2053 rtcache_invariants(old_ro);
2054
2055 rt = rtcache_validate(old_ro);
2056
2057 if (rtcache_getdst(old_ro) == NULL)
2058 goto out;
2059 ret = rtcache_setdst(new_ro, rtcache_getdst(old_ro));
2060 if (ret != 0)
2061 goto out;
2062
2063 RT_RLOCK();
2064 new_ro->_ro_rt = rt;
2065 new_ro->ro_rtcache_generation = rtcache_generation;
2066 RT_UNLOCK();
2067 rtcache_invariants(new_ro);
2068 out:
2069 rtcache_unref(rt, old_ro);
2070 return;
2071 }
2072
2073 #if defined(RT_DEBUG) && defined(NET_MPSAFE)
2074 static void
2075 rtcache_trace(const char *func, struct rtentry *rt, struct route *ro)
2076 {
2077 char dst[64];
2078
2079 sockaddr_format(ro->ro_sa, dst, 64);
2080 printf("trace: %s:\tdst=%s cpu=%d lwp=%p psref=%p target=%p\n", func, dst,
2081 cpu_index(curcpu()), curlwp, &ro->ro_psref, &rt->rt_psref);
2082 }
2083 #define RTCACHE_PSREF_TRACE(rt, ro) rtcache_trace(__func__, (rt), (ro))
2084 #else
2085 #define RTCACHE_PSREF_TRACE(rt, ro) do {} while (0)
2086 #endif
2087
2088 static void
2089 rtcache_ref(struct rtentry *rt, struct route *ro)
2090 {
2091
2092 KASSERT(rt != NULL);
2093
2094 #ifdef NET_MPSAFE
2095 RTCACHE_PSREF_TRACE(rt, ro);
2096 ro->ro_bound = curlwp_bind();
2097 psref_acquire(&ro->ro_psref, &rt->rt_psref, rt_psref_class);
2098 #endif
2099 }
2100
2101 void
2102 rtcache_unref(struct rtentry *rt, struct route *ro)
2103 {
2104
2105 if (rt == NULL)
2106 return;
2107
2108 #ifdef NET_MPSAFE
2109 psref_release(&ro->ro_psref, &rt->rt_psref, rt_psref_class);
2110 curlwp_bindx(ro->ro_bound);
2111 RTCACHE_PSREF_TRACE(rt, ro);
2112 #endif
2113 }
2114
2115 struct rtentry *
2116 rtcache_validate(struct route *ro)
2117 {
2118 struct rtentry *rt = NULL;
2119
2120 #ifdef NET_MPSAFE
2121 retry:
2122 #endif
2123 rtcache_invariants(ro);
2124 RT_RLOCK();
2125 if (ro->ro_rtcache_generation != rtcache_generation) {
2126 /* The cache is invalidated */
2127 rt = NULL;
2128 goto out;
2129 }
2130
2131 rt = ro->_ro_rt;
2132 if (rt == NULL)
2133 goto out;
2134
2135 if ((rt->rt_flags & RTF_UP) == 0) {
2136 rt = NULL;
2137 goto out;
2138 }
2139 #ifdef NET_MPSAFE
2140 if (ISSET(rt->rt_flags, RTF_UPDATING)) {
2141 if (rt_wait_ok()) {
2142 RT_UNLOCK();
2143
2144 /* We can wait until the update is complete */
2145 rt_update_wait();
2146 goto retry;
2147 } else {
2148 rt = NULL;
2149 }
2150 } else
2151 #endif
2152 rtcache_ref(rt, ro);
2153 out:
2154 RT_UNLOCK();
2155 return rt;
2156 }
2157
2158 struct rtentry *
2159 rtcache_lookup2(struct route *ro, const struct sockaddr *dst,
2160 int clone, int *hitp)
2161 {
2162 const struct sockaddr *odst;
2163 struct rtentry *rt = NULL;
2164
2165 odst = rtcache_getdst(ro);
2166 if (odst == NULL)
2167 goto miss;
2168
2169 if (sockaddr_cmp(odst, dst) != 0) {
2170 rtcache_free(ro);
2171 goto miss;
2172 }
2173
2174 rt = rtcache_validate(ro);
2175 if (rt == NULL) {
2176 ro->_ro_rt = NULL;
2177 goto miss;
2178 }
2179
2180 rtcache_invariants(ro);
2181
2182 if (hitp != NULL)
2183 *hitp = 1;
2184 return rt;
2185 miss:
2186 if (hitp != NULL)
2187 *hitp = 0;
2188 if (rtcache_setdst(ro, dst) == 0)
2189 rt = _rtcache_init(ro, clone);
2190
2191 rtcache_invariants(ro);
2192
2193 return rt;
2194 }
2195
2196 void
2197 rtcache_free(struct route *ro)
2198 {
2199
2200 ro->_ro_rt = NULL;
2201 if (ro->ro_sa != NULL) {
2202 sockaddr_free(ro->ro_sa);
2203 ro->ro_sa = NULL;
2204 }
2205 rtcache_invariants(ro);
2206 }
2207
2208 int
2209 rtcache_setdst(struct route *ro, const struct sockaddr *sa)
2210 {
2211 KASSERT(sa != NULL);
2212
2213 rtcache_invariants(ro);
2214 if (ro->ro_sa != NULL) {
2215 if (ro->ro_sa->sa_family == sa->sa_family) {
2216 ro->_ro_rt = NULL;
2217 sockaddr_copy(ro->ro_sa, ro->ro_sa->sa_len, sa);
2218 rtcache_invariants(ro);
2219 return 0;
2220 }
2221 /* free ro_sa, wrong family */
2222 rtcache_free(ro);
2223 }
2224
2225 KASSERT(ro->_ro_rt == NULL);
2226
2227 if ((ro->ro_sa = sockaddr_dup(sa, M_ZERO | M_NOWAIT)) == NULL) {
2228 rtcache_invariants(ro);
2229 return ENOMEM;
2230 }
2231 rtcache_invariants(ro);
2232 return 0;
2233 }
2234
2235 const struct sockaddr *
2236 rt_settag(struct rtentry *rt, const struct sockaddr *tag)
2237 {
2238 if (rt->rt_tag != tag) {
2239 if (rt->rt_tag != NULL)
2240 sockaddr_free(rt->rt_tag);
2241 rt->rt_tag = sockaddr_dup(tag, M_ZERO | M_NOWAIT);
2242 }
2243 return rt->rt_tag;
2244 }
2245
2246 struct sockaddr *
2247 rt_gettag(const struct rtentry *rt)
2248 {
2249 return rt->rt_tag;
2250 }
2251
2252 int
2253 rt_check_reject_route(const struct rtentry *rt, const struct ifnet *ifp)
2254 {
2255
2256 if ((rt->rt_flags & RTF_REJECT) != 0) {
2257 /* Mimic looutput */
2258 if (ifp->if_flags & IFF_LOOPBACK)
2259 return (rt->rt_flags & RTF_HOST) ?
2260 EHOSTUNREACH : ENETUNREACH;
2261 else if (rt->rt_rmx.rmx_expire == 0 ||
2262 time_uptime < rt->rt_rmx.rmx_expire)
2263 return (rt->rt_flags & RTF_GATEWAY) ?
2264 EHOSTUNREACH : EHOSTDOWN;
2265 }
2266
2267 return 0;
2268 }
2269
2270 void
2271 rt_delete_matched_entries(sa_family_t family, int (*f)(struct rtentry *, void *),
2272 void *v)
2273 {
2274
2275 for (;;) {
2276 int s;
2277 int error;
2278 struct rtentry *rt, *retrt = NULL;
2279
2280 RT_RLOCK();
2281 s = splsoftnet();
2282 rt = rtbl_search_matched_entry(family, f, v);
2283 if (rt == NULL) {
2284 splx(s);
2285 RT_UNLOCK();
2286 return;
2287 }
2288 rt_ref(rt);
2289 splx(s);
2290 RT_UNLOCK();
2291
2292 error = rtrequest(RTM_DELETE, rt_getkey(rt), rt->rt_gateway,
2293 rt_mask(rt), rt->rt_flags, &retrt);
2294 if (error == 0) {
2295 KASSERT(retrt == rt);
2296 KASSERT((retrt->rt_flags & RTF_UP) == 0);
2297 retrt->rt_ifp = NULL;
2298 rt_unref(rt);
2299 rt_free(retrt);
2300 } else if (error == ESRCH) {
2301 /* Someone deleted the entry already. */
2302 rt_unref(rt);
2303 } else {
2304 log(LOG_ERR, "%s: unable to delete rtentry @ %p, "
2305 "error = %d\n", rt->rt_ifp->if_xname, rt, error);
2306 /* XXX how to treat this case? */
2307 }
2308 }
2309 }
2310
2311 static int
2312 rt_walktree_locked(sa_family_t family, int (*f)(struct rtentry *, void *),
2313 void *v)
2314 {
2315
2316 return rtbl_walktree(family, f, v);
2317 }
2318
2319 int
2320 rt_walktree(sa_family_t family, int (*f)(struct rtentry *, void *), void *v)
2321 {
2322 int error;
2323
2324 RT_RLOCK();
2325 error = rt_walktree_locked(family, f, v);
2326 RT_UNLOCK();
2327
2328 return error;
2329 }
2330
2331 #ifdef DDB
2332
2333 #include <machine/db_machdep.h>
2334 #include <ddb/db_interface.h>
2335 #include <ddb/db_output.h>
2336
2337 #define rt_expire rt_rmx.rmx_expire
2338
2339 static void
2340 db_print_sa(const struct sockaddr *sa)
2341 {
2342 int len;
2343 const u_char *p;
2344
2345 if (sa == NULL) {
2346 db_printf("[NULL]");
2347 return;
2348 }
2349
2350 p = (const u_char *)sa;
2351 len = sa->sa_len;
2352 db_printf("[");
2353 while (len > 0) {
2354 db_printf("%d", *p);
2355 p++; len--;
2356 if (len) db_printf(",");
2357 }
2358 db_printf("]\n");
2359 }
2360
2361 static void
2362 db_print_ifa(struct ifaddr *ifa)
2363 {
2364 if (ifa == NULL)
2365 return;
2366 db_printf(" ifa_addr=");
2367 db_print_sa(ifa->ifa_addr);
2368 db_printf(" ifa_dsta=");
2369 db_print_sa(ifa->ifa_dstaddr);
2370 db_printf(" ifa_mask=");
2371 db_print_sa(ifa->ifa_netmask);
2372 db_printf(" flags=0x%x,refcnt=%d,metric=%d\n",
2373 ifa->ifa_flags,
2374 ifa->ifa_refcnt,
2375 ifa->ifa_metric);
2376 }
2377
2378 /*
2379 * Function to pass to rt_walktree().
2380 * Return non-zero error to abort walk.
2381 */
2382 static int
2383 db_show_rtentry(struct rtentry *rt, void *w)
2384 {
2385 db_printf("rtentry=%p", rt);
2386
2387 db_printf(" flags=0x%x refcnt=%d use=%"PRId64" expire=%"PRId64"\n",
2388 rt->rt_flags, rt->rt_refcnt,
2389 rt->rt_use, (uint64_t)rt->rt_expire);
2390
2391 db_printf(" key="); db_print_sa(rt_getkey(rt));
2392 db_printf(" mask="); db_print_sa(rt_mask(rt));
2393 db_printf(" gw="); db_print_sa(rt->rt_gateway);
2394
2395 db_printf(" ifp=%p ", rt->rt_ifp);
2396 if (rt->rt_ifp)
2397 db_printf("(%s)", rt->rt_ifp->if_xname);
2398 else
2399 db_printf("(NULL)");
2400
2401 db_printf(" ifa=%p\n", rt->rt_ifa);
2402 db_print_ifa(rt->rt_ifa);
2403
2404 db_printf(" gwroute=%p llinfo=%p\n",
2405 rt->rt_gwroute, rt->rt_llinfo);
2406
2407 return 0;
2408 }
2409
2410 /*
2411 * Function to print all the route trees.
2412 * Use this from ddb: "show routes"
2413 */
2414 void
2415 db_show_routes(db_expr_t addr, bool have_addr,
2416 db_expr_t count, const char *modif)
2417 {
2418
2419 /* Taking RT_LOCK will fail if LOCKDEBUG is enabled. */
2420 rt_walktree_locked(AF_INET, db_show_rtentry, NULL);
2421 }
2422 #endif
2423