route.c revision 1.221 1 /* $NetBSD: route.c,v 1.221 2019/09/19 04:46:29 ozaki-r Exp $ */
2
3 /*-
4 * Copyright (c) 1998, 2008 The NetBSD Foundation, Inc.
5 * All rights reserved.
6 *
7 * This code is derived from software contributed to The NetBSD Foundation
8 * by Kevin M. Lahey of the Numerical Aerospace Simulation Facility,
9 * NASA Ames Research Center.
10 *
11 * Redistribution and use in source and binary forms, with or without
12 * modification, are permitted provided that the following conditions
13 * are met:
14 * 1. Redistributions of source code must retain the above copyright
15 * notice, this list of conditions and the following disclaimer.
16 * 2. Redistributions in binary form must reproduce the above copyright
17 * notice, this list of conditions and the following disclaimer in the
18 * documentation and/or other materials provided with the distribution.
19 *
20 * THIS SOFTWARE IS PROVIDED BY THE NETBSD FOUNDATION, INC. AND CONTRIBUTORS
21 * ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED
22 * TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
23 * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE FOUNDATION OR CONTRIBUTORS
24 * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
25 * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
26 * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
27 * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
28 * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
29 * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
30 * POSSIBILITY OF SUCH DAMAGE.
31 */
32
33 /*
34 * Copyright (C) 1995, 1996, 1997, and 1998 WIDE Project.
35 * All rights reserved.
36 *
37 * Redistribution and use in source and binary forms, with or without
38 * modification, are permitted provided that the following conditions
39 * are met:
40 * 1. Redistributions of source code must retain the above copyright
41 * notice, this list of conditions and the following disclaimer.
42 * 2. Redistributions in binary form must reproduce the above copyright
43 * notice, this list of conditions and the following disclaimer in the
44 * documentation and/or other materials provided with the distribution.
45 * 3. Neither the name of the project nor the names of its contributors
46 * may be used to endorse or promote products derived from this software
47 * without specific prior written permission.
48 *
49 * THIS SOFTWARE IS PROVIDED BY THE PROJECT AND CONTRIBUTORS ``AS IS'' AND
50 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
51 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
52 * ARE DISCLAIMED. IN NO EVENT SHALL THE PROJECT OR CONTRIBUTORS BE LIABLE
53 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
54 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
55 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
56 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
57 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
58 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
59 * SUCH DAMAGE.
60 */
61
62 /*
63 * Copyright (c) 1980, 1986, 1991, 1993
64 * The Regents of the University of California. All rights reserved.
65 *
66 * Redistribution and use in source and binary forms, with or without
67 * modification, are permitted provided that the following conditions
68 * are met:
69 * 1. Redistributions of source code must retain the above copyright
70 * notice, this list of conditions and the following disclaimer.
71 * 2. Redistributions in binary form must reproduce the above copyright
72 * notice, this list of conditions and the following disclaimer in the
73 * documentation and/or other materials provided with the distribution.
74 * 3. Neither the name of the University nor the names of its contributors
75 * may be used to endorse or promote products derived from this software
76 * without specific prior written permission.
77 *
78 * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND
79 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
80 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
81 * ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE
82 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
83 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
84 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
85 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
86 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
87 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
88 * SUCH DAMAGE.
89 *
90 * @(#)route.c 8.3 (Berkeley) 1/9/95
91 */
92
93 #ifdef _KERNEL_OPT
94 #include "opt_inet.h"
95 #include "opt_route.h"
96 #include "opt_net_mpsafe.h"
97 #endif
98
99 #include <sys/cdefs.h>
100 __KERNEL_RCSID(0, "$NetBSD: route.c,v 1.221 2019/09/19 04:46:29 ozaki-r Exp $");
101
102 #include <sys/param.h>
103 #ifdef RTFLUSH_DEBUG
104 #include <sys/sysctl.h>
105 #endif
106 #include <sys/systm.h>
107 #include <sys/callout.h>
108 #include <sys/proc.h>
109 #include <sys/mbuf.h>
110 #include <sys/socket.h>
111 #include <sys/socketvar.h>
112 #include <sys/domain.h>
113 #include <sys/kernel.h>
114 #include <sys/ioctl.h>
115 #include <sys/pool.h>
116 #include <sys/kauth.h>
117 #include <sys/workqueue.h>
118 #include <sys/syslog.h>
119 #include <sys/rwlock.h>
120 #include <sys/mutex.h>
121 #include <sys/cpu.h>
122 #include <sys/kmem.h>
123
124 #include <net/if.h>
125 #include <net/if_dl.h>
126 #include <net/route.h>
127 #if defined(INET) || defined(INET6)
128 #include <net/if_llatbl.h>
129 #endif
130
131 #include <netinet/in.h>
132 #include <netinet/in_var.h>
133
134 #define PRESERVED_RTF (RTF_UP | RTF_GATEWAY | RTF_HOST | RTF_DONE | RTF_MASK)
135
136 #ifdef RTFLUSH_DEBUG
137 #define rtcache_debug() __predict_false(_rtcache_debug)
138 #else /* RTFLUSH_DEBUG */
139 #define rtcache_debug() 0
140 #endif /* RTFLUSH_DEBUG */
141
142 #ifdef RT_DEBUG
143 #define RT_REFCNT_TRACE(rt) printf("%s:%d: rt=%p refcnt=%d\n", \
144 __func__, __LINE__, (rt), (rt)->rt_refcnt)
145 #else
146 #define RT_REFCNT_TRACE(rt) do {} while (0)
147 #endif
148
149 #ifdef RT_DEBUG
150 #define dlog(level, fmt, args...) log(level, fmt, ##args)
151 #else
152 #define dlog(level, fmt, args...) do {} while (0)
153 #endif
154
155 struct rtstat rtstat;
156
157 static int rttrash; /* routes not in table but not freed */
158
159 static struct pool rtentry_pool;
160 static struct pool rttimer_pool;
161
162 static struct callout rt_timer_ch; /* callout for rt_timer_timer() */
163 static struct workqueue *rt_timer_wq;
164 static struct work rt_timer_wk;
165
166 static void rt_timer_init(void);
167 static void rt_timer_queue_remove_all(struct rttimer_queue *);
168 static void rt_timer_remove_all(struct rtentry *);
169 static void rt_timer_timer(void *);
170
171 /*
172 * Locking notes:
173 * - The routing table is protected by a global rwlock
174 * - API: RT_RLOCK and friends
175 * - rtcaches are NOT protected by the framework
176 * - Callers must guarantee a rtcache isn't accessed simultaneously
177 * - How the constraint is guranteed in the wild
178 * - Protect a rtcache by a mutex (e.g., inp_route)
179 * - Make rtcache per-CPU and allow only accesses from softint
180 * (e.g., ipforward_rt_percpu)
181 * - References to a rtentry is managed by reference counting and psref
182 * - Reference couting is used for temporal reference when a rtentry
183 * is fetched from the routing table
184 * - psref is used for temporal reference when a rtentry is fetched
185 * from a rtcache
186 * - struct route (rtcache) has struct psref, so we cannot obtain
187 * a reference twice on the same struct route
188 * - Befere destroying or updating a rtentry, we have to wait for
189 * all references left (see below for details)
190 * - APIs
191 * - An obtained rtentry via rtalloc1 or rtrequest* must be
192 * unreferenced by rt_unref
193 * - An obtained rtentry via rtcache_* must be unreferenced by
194 * rtcache_unref
195 * - TODO: once we get a lockless routing table, we should use only
196 * psref for rtentries
197 * - rtentry destruction
198 * - A rtentry is destroyed (freed) only when we call rtrequest(RTM_DELETE)
199 * - If a caller of rtrequest grabs a reference of a rtentry, the caller
200 * has a responsibility to destroy the rtentry by itself by calling
201 * rt_free
202 * - If not, rtrequest itself does that
203 * - If rt_free is called in softint, the actual destruction routine is
204 * deferred to a workqueue
205 * - rtentry update
206 * - When updating a rtentry, RTF_UPDATING flag is set
207 * - If a rtentry is set RTF_UPDATING, fetching the rtentry from
208 * the routing table or a rtcache results in either of the following
209 * cases:
210 * - if the caller runs in softint, the caller fails to fetch
211 * - otherwise, the caller waits for the update completed and retries
212 * to fetch (probably succeed to fetch for the second time)
213 * - rtcache invalidation
214 * - There is a global generation counter that is incremented when
215 * any routes have been added or deleted
216 * - When a rtcache caches a rtentry into itself, it also stores
217 * a snapshot of the generation counter
218 * - If the snapshot equals to the global counter, the cache is valid,
219 * otherwise the cache is invalidated
220 */
221
222 /*
223 * Global lock for the routing table.
224 */
225 static krwlock_t rt_lock __cacheline_aligned;
226 #ifdef NET_MPSAFE
227 #define RT_RLOCK() rw_enter(&rt_lock, RW_READER)
228 #define RT_WLOCK() rw_enter(&rt_lock, RW_WRITER)
229 #define RT_UNLOCK() rw_exit(&rt_lock)
230 #define RT_WLOCKED() rw_write_held(&rt_lock)
231 #define RT_ASSERT_WLOCK() KASSERT(rw_write_held(&rt_lock))
232 #else
233 #define RT_RLOCK() do {} while (0)
234 #define RT_WLOCK() do {} while (0)
235 #define RT_UNLOCK() do {} while (0)
236 #define RT_WLOCKED() true
237 #define RT_ASSERT_WLOCK() do {} while (0)
238 #endif
239
240 static uint64_t rtcache_generation;
241
242 /*
243 * mutex and cv that are used to wait for references to a rtentry left
244 * before updating the rtentry.
245 */
246 static struct {
247 kmutex_t lock;
248 kcondvar_t cv;
249 bool ongoing;
250 const struct lwp *lwp;
251 } rt_update_global __cacheline_aligned;
252
253 /*
254 * A workqueue and stuff that are used to defer the destruction routine
255 * of rtentries.
256 */
257 static struct {
258 struct workqueue *wq;
259 struct work wk;
260 kmutex_t lock;
261 SLIST_HEAD(, rtentry) queue;
262 bool enqueued;
263 } rt_free_global __cacheline_aligned;
264
265 /* psref for rtentry */
266 static struct psref_class *rt_psref_class __read_mostly;
267
268 #ifdef RTFLUSH_DEBUG
269 static int _rtcache_debug = 0;
270 #endif /* RTFLUSH_DEBUG */
271
272 static kauth_listener_t route_listener;
273
274 static int rtdeletemsg(struct rtentry *);
275
276 static void rt_maskedcopy(const struct sockaddr *,
277 struct sockaddr *, const struct sockaddr *);
278
279 static void rtcache_invalidate(void);
280
281 static void rt_ref(struct rtentry *);
282
283 static struct rtentry *
284 rtalloc1_locked(const struct sockaddr *, int, bool, bool);
285
286 static struct ifaddr *rt_getifa(struct rt_addrinfo *, struct psref *);
287 static struct ifnet *rt_getifp(struct rt_addrinfo *, struct psref *);
288 static struct ifaddr *ifa_ifwithroute_psref(int, const struct sockaddr *,
289 const struct sockaddr *, struct psref *);
290
291 static void rtcache_ref(struct rtentry *, struct route *);
292
293 #ifdef NET_MPSAFE
294 static void rt_update_wait(void);
295 #endif
296
297 static bool rt_wait_ok(void);
298 static void rt_wait_refcnt(const char *, struct rtentry *, int);
299 static void rt_wait_psref(struct rtentry *);
300
301 #ifdef DDB
302 static void db_print_sa(const struct sockaddr *);
303 static void db_print_ifa(struct ifaddr *);
304 static int db_show_rtentry(struct rtentry *, void *);
305 #endif
306
307 #ifdef RTFLUSH_DEBUG
308 static void sysctl_net_rtcache_setup(struct sysctllog **);
309 static void
310 sysctl_net_rtcache_setup(struct sysctllog **clog)
311 {
312 const struct sysctlnode *rnode;
313
314 if (sysctl_createv(clog, 0, NULL, &rnode, CTLFLAG_PERMANENT,
315 CTLTYPE_NODE,
316 "rtcache", SYSCTL_DESCR("Route cache related settings"),
317 NULL, 0, NULL, 0, CTL_NET, CTL_CREATE, CTL_EOL) != 0)
318 return;
319 if (sysctl_createv(clog, 0, &rnode, &rnode,
320 CTLFLAG_PERMANENT|CTLFLAG_READWRITE, CTLTYPE_INT,
321 "debug", SYSCTL_DESCR("Debug route caches"),
322 NULL, 0, &_rtcache_debug, 0, CTL_CREATE, CTL_EOL) != 0)
323 return;
324 }
325 #endif /* RTFLUSH_DEBUG */
326
327 static inline void
328 rt_destroy(struct rtentry *rt)
329 {
330 if (rt->_rt_key != NULL)
331 sockaddr_free(rt->_rt_key);
332 if (rt->rt_gateway != NULL)
333 sockaddr_free(rt->rt_gateway);
334 if (rt_gettag(rt) != NULL)
335 sockaddr_free(rt_gettag(rt));
336 rt->_rt_key = rt->rt_gateway = rt->rt_tag = NULL;
337 }
338
339 static inline const struct sockaddr *
340 rt_setkey(struct rtentry *rt, const struct sockaddr *key, int flags)
341 {
342 if (rt->_rt_key == key)
343 goto out;
344
345 if (rt->_rt_key != NULL)
346 sockaddr_free(rt->_rt_key);
347 rt->_rt_key = sockaddr_dup(key, flags);
348 out:
349 rt->rt_nodes->rn_key = (const char *)rt->_rt_key;
350 return rt->_rt_key;
351 }
352
353 struct ifaddr *
354 rt_get_ifa(struct rtentry *rt)
355 {
356 struct ifaddr *ifa;
357
358 if ((ifa = rt->rt_ifa) == NULL)
359 return ifa;
360 else if (ifa->ifa_getifa == NULL)
361 return ifa;
362 #if 0
363 else if (ifa->ifa_seqno != NULL && *ifa->ifa_seqno == rt->rt_ifa_seqno)
364 return ifa;
365 #endif
366 else {
367 ifa = (*ifa->ifa_getifa)(ifa, rt_getkey(rt));
368 if (ifa == NULL)
369 return NULL;
370 rt_replace_ifa(rt, ifa);
371 return ifa;
372 }
373 }
374
375 static void
376 rt_set_ifa1(struct rtentry *rt, struct ifaddr *ifa)
377 {
378 rt->rt_ifa = ifa;
379 if (ifa->ifa_seqno != NULL)
380 rt->rt_ifa_seqno = *ifa->ifa_seqno;
381 }
382
383 /*
384 * Is this route the connected route for the ifa?
385 */
386 static int
387 rt_ifa_connected(const struct rtentry *rt, const struct ifaddr *ifa)
388 {
389 const struct sockaddr *key, *dst, *odst;
390 struct sockaddr_storage maskeddst;
391
392 key = rt_getkey(rt);
393 dst = rt->rt_flags & RTF_HOST ? ifa->ifa_dstaddr : ifa->ifa_addr;
394 if (dst == NULL ||
395 dst->sa_family != key->sa_family ||
396 dst->sa_len != key->sa_len)
397 return 0;
398 if ((rt->rt_flags & RTF_HOST) == 0 && ifa->ifa_netmask) {
399 odst = dst;
400 dst = (struct sockaddr *)&maskeddst;
401 rt_maskedcopy(odst, (struct sockaddr *)&maskeddst,
402 ifa->ifa_netmask);
403 }
404 return (memcmp(dst, key, dst->sa_len) == 0);
405 }
406
407 void
408 rt_replace_ifa(struct rtentry *rt, struct ifaddr *ifa)
409 {
410 struct ifaddr *old;
411
412 if (rt->rt_ifa == ifa)
413 return;
414
415 if (rt->rt_ifa &&
416 rt->rt_ifa != ifa &&
417 rt->rt_ifa->ifa_flags & IFA_ROUTE &&
418 rt_ifa_connected(rt, rt->rt_ifa))
419 {
420 RT_DPRINTF("rt->_rt_key = %p, ifa = %p, "
421 "replace deleted IFA_ROUTE\n",
422 (void *)rt->_rt_key, (void *)rt->rt_ifa);
423 rt->rt_ifa->ifa_flags &= ~IFA_ROUTE;
424 if (rt_ifa_connected(rt, ifa)) {
425 RT_DPRINTF("rt->_rt_key = %p, ifa = %p, "
426 "replace added IFA_ROUTE\n",
427 (void *)rt->_rt_key, (void *)ifa);
428 ifa->ifa_flags |= IFA_ROUTE;
429 }
430 }
431
432 ifaref(ifa);
433 old = rt->rt_ifa;
434 rt_set_ifa1(rt, ifa);
435 ifafree(old);
436 }
437
438 static void
439 rt_set_ifa(struct rtentry *rt, struct ifaddr *ifa)
440 {
441 ifaref(ifa);
442 rt_set_ifa1(rt, ifa);
443 }
444
445 static int
446 route_listener_cb(kauth_cred_t cred, kauth_action_t action, void *cookie,
447 void *arg0, void *arg1, void *arg2, void *arg3)
448 {
449 struct rt_msghdr *rtm;
450 int result;
451
452 result = KAUTH_RESULT_DEFER;
453 rtm = arg1;
454
455 if (action != KAUTH_NETWORK_ROUTE)
456 return result;
457
458 if (rtm->rtm_type == RTM_GET)
459 result = KAUTH_RESULT_ALLOW;
460
461 return result;
462 }
463
464 static void rt_free_work(struct work *, void *);
465
466 void
467 rt_init(void)
468 {
469 int error;
470
471 #ifdef RTFLUSH_DEBUG
472 sysctl_net_rtcache_setup(NULL);
473 #endif
474
475 mutex_init(&rt_free_global.lock, MUTEX_DEFAULT, IPL_SOFTNET);
476 SLIST_INIT(&rt_free_global.queue);
477 rt_free_global.enqueued = false;
478
479 rt_psref_class = psref_class_create("rtentry", IPL_SOFTNET);
480
481 error = workqueue_create(&rt_free_global.wq, "rt_free",
482 rt_free_work, NULL, PRI_SOFTNET, IPL_SOFTNET, WQ_MPSAFE);
483 if (error)
484 panic("%s: workqueue_create failed (%d)\n", __func__, error);
485
486 mutex_init(&rt_update_global.lock, MUTEX_DEFAULT, IPL_SOFTNET);
487 cv_init(&rt_update_global.cv, "rt_update");
488
489 pool_init(&rtentry_pool, sizeof(struct rtentry), 0, 0, 0, "rtentpl",
490 NULL, IPL_SOFTNET);
491 pool_init(&rttimer_pool, sizeof(struct rttimer), 0, 0, 0, "rttmrpl",
492 NULL, IPL_SOFTNET);
493
494 rn_init(); /* initialize all zeroes, all ones, mask table */
495 rtbl_init();
496
497 route_listener = kauth_listen_scope(KAUTH_SCOPE_NETWORK,
498 route_listener_cb, NULL);
499 }
500
501 static void
502 rtcache_invalidate(void)
503 {
504
505 RT_ASSERT_WLOCK();
506
507 if (rtcache_debug())
508 printf("%s: enter\n", __func__);
509
510 rtcache_generation++;
511 }
512
513 #ifdef RT_DEBUG
514 static void
515 dump_rt(const struct rtentry *rt)
516 {
517 char buf[512];
518
519 log(LOG_DEBUG, "rt: ");
520 log(LOG_DEBUG, "p=%p ", rt);
521 if (rt->_rt_key == NULL) {
522 log(LOG_DEBUG, "dst=(NULL) ");
523 } else {
524 sockaddr_format(rt->_rt_key, buf, sizeof(buf));
525 log(LOG_DEBUG, "dst=%s ", buf);
526 }
527 if (rt->rt_gateway == NULL) {
528 log(LOG_DEBUG, "gw=(NULL) ");
529 } else {
530 sockaddr_format(rt->_rt_key, buf, sizeof(buf));
531 log(LOG_DEBUG, "gw=%s ", buf);
532 }
533 log(LOG_DEBUG, "flags=%x ", rt->rt_flags);
534 if (rt->rt_ifp == NULL) {
535 log(LOG_DEBUG, "if=(NULL) ");
536 } else {
537 log(LOG_DEBUG, "if=%s ", rt->rt_ifp->if_xname);
538 }
539 log(LOG_DEBUG, "\n");
540 }
541 #endif /* RT_DEBUG */
542
543 /*
544 * Packet routing routines. If success, refcnt of a returned rtentry
545 * will be incremented. The caller has to rtfree it by itself.
546 */
547 struct rtentry *
548 rtalloc1_locked(const struct sockaddr *dst, int report, bool wait_ok,
549 bool wlock)
550 {
551 rtbl_t *rtbl;
552 struct rtentry *rt;
553 int s;
554
555 #ifdef NET_MPSAFE
556 retry:
557 #endif
558 s = splsoftnet();
559 rtbl = rt_gettable(dst->sa_family);
560 if (rtbl == NULL)
561 goto miss;
562
563 rt = rt_matchaddr(rtbl, dst);
564 if (rt == NULL)
565 goto miss;
566
567 if (!ISSET(rt->rt_flags, RTF_UP))
568 goto miss;
569
570 #ifdef NET_MPSAFE
571 if (ISSET(rt->rt_flags, RTF_UPDATING) &&
572 /* XXX updater should be always able to acquire */
573 curlwp != rt_update_global.lwp) {
574 if (!wait_ok || !rt_wait_ok())
575 goto miss;
576 RT_UNLOCK();
577 splx(s);
578
579 /* We can wait until the update is complete */
580 rt_update_wait();
581
582 if (wlock)
583 RT_WLOCK();
584 else
585 RT_RLOCK();
586 goto retry;
587 }
588 #endif /* NET_MPSAFE */
589
590 rt_ref(rt);
591 RT_REFCNT_TRACE(rt);
592
593 splx(s);
594 return rt;
595 miss:
596 rtstat.rts_unreach++;
597 if (report) {
598 struct rt_addrinfo info;
599
600 memset(&info, 0, sizeof(info));
601 info.rti_info[RTAX_DST] = dst;
602 rt_missmsg(RTM_MISS, &info, 0, 0);
603 }
604 splx(s);
605 return NULL;
606 }
607
608 struct rtentry *
609 rtalloc1(const struct sockaddr *dst, int report)
610 {
611 struct rtentry *rt;
612
613 RT_RLOCK();
614 rt = rtalloc1_locked(dst, report, true, false);
615 RT_UNLOCK();
616
617 return rt;
618 }
619
620 static void
621 rt_ref(struct rtentry *rt)
622 {
623
624 KASSERT(rt->rt_refcnt >= 0);
625 atomic_inc_uint(&rt->rt_refcnt);
626 }
627
628 void
629 rt_unref(struct rtentry *rt)
630 {
631
632 KASSERT(rt != NULL);
633 KASSERTMSG(rt->rt_refcnt > 0, "refcnt=%d", rt->rt_refcnt);
634
635 atomic_dec_uint(&rt->rt_refcnt);
636 if (!ISSET(rt->rt_flags, RTF_UP) || ISSET(rt->rt_flags, RTF_UPDATING)) {
637 mutex_enter(&rt_free_global.lock);
638 cv_broadcast(&rt->rt_cv);
639 mutex_exit(&rt_free_global.lock);
640 }
641 }
642
643 static bool
644 rt_wait_ok(void)
645 {
646
647 KASSERT(!cpu_intr_p());
648 return !cpu_softintr_p();
649 }
650
651 void
652 rt_wait_refcnt(const char *title, struct rtentry *rt, int cnt)
653 {
654 mutex_enter(&rt_free_global.lock);
655 while (rt->rt_refcnt > cnt) {
656 dlog(LOG_DEBUG, "%s: %s waiting (refcnt=%d)\n",
657 __func__, title, rt->rt_refcnt);
658 cv_wait(&rt->rt_cv, &rt_free_global.lock);
659 dlog(LOG_DEBUG, "%s: %s waited (refcnt=%d)\n",
660 __func__, title, rt->rt_refcnt);
661 }
662 mutex_exit(&rt_free_global.lock);
663 }
664
665 void
666 rt_wait_psref(struct rtentry *rt)
667 {
668
669 psref_target_destroy(&rt->rt_psref, rt_psref_class);
670 psref_target_init(&rt->rt_psref, rt_psref_class);
671 }
672
673 static void
674 _rt_free(struct rtentry *rt)
675 {
676 struct ifaddr *ifa;
677
678 /*
679 * Need to avoid a deadlock on rt_wait_refcnt of update
680 * and a conflict on psref_target_destroy of update.
681 */
682 #ifdef NET_MPSAFE
683 rt_update_wait();
684 #endif
685
686 RT_REFCNT_TRACE(rt);
687 KASSERTMSG(rt->rt_refcnt >= 0, "refcnt=%d", rt->rt_refcnt);
688 rt_wait_refcnt("free", rt, 0);
689 #ifdef NET_MPSAFE
690 psref_target_destroy(&rt->rt_psref, rt_psref_class);
691 #endif
692
693 rt_assert_inactive(rt);
694 rttrash--;
695 ifa = rt->rt_ifa;
696 rt->rt_ifa = NULL;
697 ifafree(ifa);
698 rt->rt_ifp = NULL;
699 cv_destroy(&rt->rt_cv);
700 rt_destroy(rt);
701 pool_put(&rtentry_pool, rt);
702 }
703
704 static void
705 rt_free_work(struct work *wk, void *arg)
706 {
707
708 for (;;) {
709 struct rtentry *rt;
710
711 mutex_enter(&rt_free_global.lock);
712 if ((rt = SLIST_FIRST(&rt_free_global.queue)) == NULL) {
713 rt_free_global.enqueued = false;
714 mutex_exit(&rt_free_global.lock);
715 return;
716 }
717 SLIST_REMOVE_HEAD(&rt_free_global.queue, rt_free);
718 mutex_exit(&rt_free_global.lock);
719 atomic_dec_uint(&rt->rt_refcnt);
720 _rt_free(rt);
721 }
722 }
723
724 void
725 rt_free(struct rtentry *rt)
726 {
727
728 KASSERT(rt->rt_refcnt > 0);
729 if (rt_wait_ok()) {
730 atomic_dec_uint(&rt->rt_refcnt);
731 _rt_free(rt);
732 return;
733 }
734
735 mutex_enter(&rt_free_global.lock);
736 /* No need to add a reference here. */
737 SLIST_INSERT_HEAD(&rt_free_global.queue, rt, rt_free);
738 if (!rt_free_global.enqueued) {
739 workqueue_enqueue(rt_free_global.wq, &rt_free_global.wk, NULL);
740 rt_free_global.enqueued = true;
741 }
742 mutex_exit(&rt_free_global.lock);
743 }
744
745 #ifdef NET_MPSAFE
746 static void
747 rt_update_wait(void)
748 {
749
750 mutex_enter(&rt_update_global.lock);
751 while (rt_update_global.ongoing) {
752 dlog(LOG_DEBUG, "%s: waiting lwp=%p\n", __func__, curlwp);
753 cv_wait(&rt_update_global.cv, &rt_update_global.lock);
754 dlog(LOG_DEBUG, "%s: waited lwp=%p\n", __func__, curlwp);
755 }
756 mutex_exit(&rt_update_global.lock);
757 }
758 #endif
759
760 int
761 rt_update_prepare(struct rtentry *rt)
762 {
763
764 dlog(LOG_DEBUG, "%s: updating rt=%p lwp=%p\n", __func__, rt, curlwp);
765
766 RT_WLOCK();
767 /* If the entry is being destroyed, don't proceed the update. */
768 if (!ISSET(rt->rt_flags, RTF_UP)) {
769 RT_UNLOCK();
770 return ESRCH;
771 }
772 rt->rt_flags |= RTF_UPDATING;
773 RT_UNLOCK();
774
775 mutex_enter(&rt_update_global.lock);
776 while (rt_update_global.ongoing) {
777 dlog(LOG_DEBUG, "%s: waiting ongoing updating rt=%p lwp=%p\n",
778 __func__, rt, curlwp);
779 cv_wait(&rt_update_global.cv, &rt_update_global.lock);
780 dlog(LOG_DEBUG, "%s: waited ongoing updating rt=%p lwp=%p\n",
781 __func__, rt, curlwp);
782 }
783 rt_update_global.ongoing = true;
784 /* XXX need it to avoid rt_update_wait by updater itself. */
785 rt_update_global.lwp = curlwp;
786 mutex_exit(&rt_update_global.lock);
787
788 rt_wait_refcnt("update", rt, 1);
789 rt_wait_psref(rt);
790
791 return 0;
792 }
793
794 void
795 rt_update_finish(struct rtentry *rt)
796 {
797
798 RT_WLOCK();
799 rt->rt_flags &= ~RTF_UPDATING;
800 RT_UNLOCK();
801
802 mutex_enter(&rt_update_global.lock);
803 rt_update_global.ongoing = false;
804 rt_update_global.lwp = NULL;
805 cv_broadcast(&rt_update_global.cv);
806 mutex_exit(&rt_update_global.lock);
807
808 dlog(LOG_DEBUG, "%s: updated rt=%p lwp=%p\n", __func__, rt, curlwp);
809 }
810
811 /*
812 * Force a routing table entry to the specified
813 * destination to go through the given gateway.
814 * Normally called as a result of a routing redirect
815 * message from the network layer.
816 *
817 * N.B.: must be called at splsoftnet
818 */
819 void
820 rtredirect(const struct sockaddr *dst, const struct sockaddr *gateway,
821 const struct sockaddr *netmask, int flags, const struct sockaddr *src,
822 struct rtentry **rtp)
823 {
824 struct rtentry *rt;
825 int error = 0;
826 uint64_t *stat = NULL;
827 struct rt_addrinfo info;
828 struct ifaddr *ifa;
829 struct psref psref;
830
831 /* verify the gateway is directly reachable */
832 if ((ifa = ifa_ifwithnet_psref(gateway, &psref)) == NULL) {
833 error = ENETUNREACH;
834 goto out;
835 }
836 rt = rtalloc1(dst, 0);
837 /*
838 * If the redirect isn't from our current router for this dst,
839 * it's either old or wrong. If it redirects us to ourselves,
840 * we have a routing loop, perhaps as a result of an interface
841 * going down recently.
842 */
843 if (!(flags & RTF_DONE) && rt &&
844 (sockaddr_cmp(src, rt->rt_gateway) != 0 || rt->rt_ifa != ifa))
845 error = EINVAL;
846 else {
847 int s = pserialize_read_enter();
848 struct ifaddr *_ifa;
849
850 _ifa = ifa_ifwithaddr(gateway);
851 if (_ifa != NULL)
852 error = EHOSTUNREACH;
853 pserialize_read_exit(s);
854 }
855 if (error)
856 goto done;
857 /*
858 * Create a new entry if we just got back a wildcard entry
859 * or the lookup failed. This is necessary for hosts
860 * which use routing redirects generated by smart gateways
861 * to dynamically build the routing tables.
862 */
863 if (rt == NULL || (rt_mask(rt) && rt_mask(rt)->sa_len < 2))
864 goto create;
865 /*
866 * Don't listen to the redirect if it's
867 * for a route to an interface.
868 */
869 if (rt->rt_flags & RTF_GATEWAY) {
870 if (((rt->rt_flags & RTF_HOST) == 0) && (flags & RTF_HOST)) {
871 /*
872 * Changing from route to net => route to host.
873 * Create new route, rather than smashing route to net.
874 */
875 create:
876 if (rt != NULL)
877 rt_unref(rt);
878 flags |= RTF_GATEWAY | RTF_DYNAMIC;
879 memset(&info, 0, sizeof(info));
880 info.rti_info[RTAX_DST] = dst;
881 info.rti_info[RTAX_GATEWAY] = gateway;
882 info.rti_info[RTAX_NETMASK] = netmask;
883 info.rti_ifa = ifa;
884 info.rti_flags = flags;
885 rt = NULL;
886 error = rtrequest1(RTM_ADD, &info, &rt);
887 if (rt != NULL)
888 flags = rt->rt_flags;
889 stat = &rtstat.rts_dynamic;
890 } else {
891 /*
892 * Smash the current notion of the gateway to
893 * this destination. Should check about netmask!!!
894 */
895 #ifdef NET_MPSAFE
896 KASSERT(!cpu_softintr_p());
897
898 error = rt_update_prepare(rt);
899 if (error == 0) {
900 #endif
901 RT_WLOCK();
902 error = rt_setgate(rt, gateway);
903 if (error == 0) {
904 rt->rt_flags |= RTF_MODIFIED;
905 flags |= RTF_MODIFIED;
906 }
907 RT_UNLOCK();
908 #ifdef NET_MPSAFE
909 rt_update_finish(rt);
910 } else {
911 /*
912 * If error != 0, the rtentry is being
913 * destroyed, so doing nothing doesn't
914 * matter.
915 */
916 }
917 #endif
918 stat = &rtstat.rts_newgateway;
919 }
920 } else
921 error = EHOSTUNREACH;
922 done:
923 if (rt) {
924 if (rtp != NULL && !error)
925 *rtp = rt;
926 else
927 rt_unref(rt);
928 }
929 out:
930 if (error)
931 rtstat.rts_badredirect++;
932 else if (stat != NULL)
933 (*stat)++;
934 memset(&info, 0, sizeof(info));
935 info.rti_info[RTAX_DST] = dst;
936 info.rti_info[RTAX_GATEWAY] = gateway;
937 info.rti_info[RTAX_NETMASK] = netmask;
938 info.rti_info[RTAX_AUTHOR] = src;
939 rt_missmsg(RTM_REDIRECT, &info, flags, error);
940 ifa_release(ifa, &psref);
941 }
942
943 /*
944 * Delete a route and generate a message.
945 * It doesn't free a passed rt.
946 */
947 static int
948 rtdeletemsg(struct rtentry *rt)
949 {
950 int error;
951 struct rt_addrinfo info;
952 struct rtentry *retrt;
953
954 /*
955 * Request the new route so that the entry is not actually
956 * deleted. That will allow the information being reported to
957 * be accurate (and consistent with route_output()).
958 */
959 memset(&info, 0, sizeof(info));
960 info.rti_info[RTAX_DST] = rt_getkey(rt);
961 info.rti_info[RTAX_NETMASK] = rt_mask(rt);
962 info.rti_info[RTAX_GATEWAY] = rt->rt_gateway;
963 info.rti_flags = rt->rt_flags;
964 error = rtrequest1(RTM_DELETE, &info, &retrt);
965
966 rt_missmsg(RTM_DELETE, &info, info.rti_flags, error);
967
968 return error;
969 }
970
971 static struct ifaddr *
972 ifa_ifwithroute_psref(int flags, const struct sockaddr *dst,
973 const struct sockaddr *gateway, struct psref *psref)
974 {
975 struct ifaddr *ifa = NULL;
976
977 if ((flags & RTF_GATEWAY) == 0) {
978 /*
979 * If we are adding a route to an interface,
980 * and the interface is a pt to pt link
981 * we should search for the destination
982 * as our clue to the interface. Otherwise
983 * we can use the local address.
984 */
985 if ((flags & RTF_HOST) && gateway->sa_family != AF_LINK)
986 ifa = ifa_ifwithdstaddr_psref(dst, psref);
987 if (ifa == NULL)
988 ifa = ifa_ifwithaddr_psref(gateway, psref);
989 } else {
990 /*
991 * If we are adding a route to a remote net
992 * or host, the gateway may still be on the
993 * other end of a pt to pt link.
994 */
995 ifa = ifa_ifwithdstaddr_psref(gateway, psref);
996 }
997 if (ifa == NULL)
998 ifa = ifa_ifwithnet_psref(gateway, psref);
999 if (ifa == NULL) {
1000 int s;
1001 struct rtentry *rt;
1002
1003 rt = rtalloc1_locked(gateway, 0, true, true);
1004 if (rt == NULL)
1005 return NULL;
1006 if (rt->rt_flags & RTF_GATEWAY) {
1007 rt_unref(rt);
1008 return NULL;
1009 }
1010 /*
1011 * Just in case. May not need to do this workaround.
1012 * Revisit when working on rtentry MP-ification.
1013 */
1014 s = pserialize_read_enter();
1015 IFADDR_READER_FOREACH(ifa, rt->rt_ifp) {
1016 if (ifa == rt->rt_ifa)
1017 break;
1018 }
1019 if (ifa != NULL)
1020 ifa_acquire(ifa, psref);
1021 pserialize_read_exit(s);
1022 rt_unref(rt);
1023 if (ifa == NULL)
1024 return NULL;
1025 }
1026 if (ifa->ifa_addr->sa_family != dst->sa_family) {
1027 struct ifaddr *nifa;
1028 int s;
1029
1030 s = pserialize_read_enter();
1031 nifa = ifaof_ifpforaddr(dst, ifa->ifa_ifp);
1032 if (nifa != NULL) {
1033 ifa_release(ifa, psref);
1034 ifa_acquire(nifa, psref);
1035 ifa = nifa;
1036 }
1037 pserialize_read_exit(s);
1038 }
1039 return ifa;
1040 }
1041
1042 /*
1043 * If it suceeds and ret_nrt isn't NULL, refcnt of ret_nrt is incremented.
1044 * The caller has to rtfree it by itself.
1045 */
1046 int
1047 rtrequest(int req, const struct sockaddr *dst, const struct sockaddr *gateway,
1048 const struct sockaddr *netmask, int flags, struct rtentry **ret_nrt)
1049 {
1050 struct rt_addrinfo info;
1051
1052 memset(&info, 0, sizeof(info));
1053 info.rti_flags = flags;
1054 info.rti_info[RTAX_DST] = dst;
1055 info.rti_info[RTAX_GATEWAY] = gateway;
1056 info.rti_info[RTAX_NETMASK] = netmask;
1057 return rtrequest1(req, &info, ret_nrt);
1058 }
1059
1060 /*
1061 * It's a utility function to add/remove a route to/from the routing table
1062 * and tell user processes the addition/removal on success.
1063 */
1064 int
1065 rtrequest_newmsg(const int req, const struct sockaddr *dst,
1066 const struct sockaddr *gateway, const struct sockaddr *netmask,
1067 const int flags)
1068 {
1069 int error;
1070 struct rtentry *ret_nrt = NULL;
1071
1072 KASSERT(req == RTM_ADD || req == RTM_DELETE);
1073
1074 error = rtrequest(req, dst, gateway, netmask, flags, &ret_nrt);
1075 if (error != 0)
1076 return error;
1077
1078 KASSERT(ret_nrt != NULL);
1079
1080 rt_newmsg(req, ret_nrt); /* tell user process */
1081 if (req == RTM_DELETE)
1082 rt_free(ret_nrt);
1083 else
1084 rt_unref(ret_nrt);
1085
1086 return 0;
1087 }
1088
1089 static struct ifnet *
1090 rt_getifp(struct rt_addrinfo *info, struct psref *psref)
1091 {
1092 const struct sockaddr *ifpaddr = info->rti_info[RTAX_IFP];
1093
1094 if (info->rti_ifp != NULL)
1095 return NULL;
1096 /*
1097 * ifp may be specified by sockaddr_dl when protocol address
1098 * is ambiguous
1099 */
1100 if (ifpaddr != NULL && ifpaddr->sa_family == AF_LINK) {
1101 struct ifaddr *ifa;
1102 int s = pserialize_read_enter();
1103
1104 ifa = ifa_ifwithnet(ifpaddr);
1105 if (ifa != NULL)
1106 info->rti_ifp = if_get_byindex(ifa->ifa_ifp->if_index,
1107 psref);
1108 pserialize_read_exit(s);
1109 }
1110
1111 return info->rti_ifp;
1112 }
1113
1114 static struct ifaddr *
1115 rt_getifa(struct rt_addrinfo *info, struct psref *psref)
1116 {
1117 struct ifaddr *ifa = NULL;
1118 const struct sockaddr *dst = info->rti_info[RTAX_DST];
1119 const struct sockaddr *gateway = info->rti_info[RTAX_GATEWAY];
1120 const struct sockaddr *ifaaddr = info->rti_info[RTAX_IFA];
1121 int flags = info->rti_flags;
1122 const struct sockaddr *sa;
1123
1124 if (info->rti_ifa == NULL && ifaaddr != NULL) {
1125 ifa = ifa_ifwithaddr_psref(ifaaddr, psref);
1126 if (ifa != NULL)
1127 goto got;
1128 }
1129
1130 sa = ifaaddr != NULL ? ifaaddr :
1131 (gateway != NULL ? gateway : dst);
1132 if (sa != NULL && info->rti_ifp != NULL)
1133 ifa = ifaof_ifpforaddr_psref(sa, info->rti_ifp, psref);
1134 else if (dst != NULL && gateway != NULL)
1135 ifa = ifa_ifwithroute_psref(flags, dst, gateway, psref);
1136 else if (sa != NULL)
1137 ifa = ifa_ifwithroute_psref(flags, sa, sa, psref);
1138 if (ifa == NULL)
1139 return NULL;
1140 got:
1141 if (ifa->ifa_getifa != NULL) {
1142 /* FIXME ifa_getifa is NOMPSAFE */
1143 ifa = (*ifa->ifa_getifa)(ifa, dst);
1144 if (ifa == NULL)
1145 return NULL;
1146 ifa_acquire(ifa, psref);
1147 }
1148 info->rti_ifa = ifa;
1149 if (info->rti_ifp == NULL)
1150 info->rti_ifp = ifa->ifa_ifp;
1151 return ifa;
1152 }
1153
1154 /*
1155 * If it suceeds and ret_nrt isn't NULL, refcnt of ret_nrt is incremented.
1156 * The caller has to rtfree it by itself.
1157 */
1158 int
1159 rtrequest1(int req, struct rt_addrinfo *info, struct rtentry **ret_nrt)
1160 {
1161 int s = splsoftnet(), ss;
1162 int error = 0, rc;
1163 struct rtentry *rt;
1164 rtbl_t *rtbl;
1165 struct ifaddr *ifa = NULL;
1166 struct sockaddr_storage maskeddst;
1167 const struct sockaddr *dst = info->rti_info[RTAX_DST];
1168 const struct sockaddr *gateway = info->rti_info[RTAX_GATEWAY];
1169 const struct sockaddr *netmask = info->rti_info[RTAX_NETMASK];
1170 int flags = info->rti_flags;
1171 struct psref psref_ifp, psref_ifa;
1172 int bound = 0;
1173 struct ifnet *ifp = NULL;
1174 bool need_to_release_ifa = true;
1175 bool need_unlock = true;
1176 #define senderr(x) { error = x ; goto bad; }
1177
1178 RT_WLOCK();
1179
1180 bound = curlwp_bind();
1181 if ((rtbl = rt_gettable(dst->sa_family)) == NULL)
1182 senderr(ESRCH);
1183 if (flags & RTF_HOST)
1184 netmask = NULL;
1185 switch (req) {
1186 case RTM_DELETE:
1187 if (netmask) {
1188 rt_maskedcopy(dst, (struct sockaddr *)&maskeddst,
1189 netmask);
1190 dst = (struct sockaddr *)&maskeddst;
1191 }
1192 if ((rt = rt_lookup(rtbl, dst, netmask)) == NULL)
1193 senderr(ESRCH);
1194 if ((rt = rt_deladdr(rtbl, dst, netmask)) == NULL)
1195 senderr(ESRCH);
1196 rt->rt_flags &= ~RTF_UP;
1197 if ((ifa = rt->rt_ifa)) {
1198 if (ifa->ifa_flags & IFA_ROUTE &&
1199 rt_ifa_connected(rt, ifa)) {
1200 RT_DPRINTF("rt->_rt_key = %p, ifa = %p, "
1201 "deleted IFA_ROUTE\n",
1202 (void *)rt->_rt_key, (void *)ifa);
1203 ifa->ifa_flags &= ~IFA_ROUTE;
1204 }
1205 if (ifa->ifa_rtrequest)
1206 ifa->ifa_rtrequest(RTM_DELETE, rt, info);
1207 ifa = NULL;
1208 }
1209 rttrash++;
1210 if (ret_nrt) {
1211 *ret_nrt = rt;
1212 rt_ref(rt);
1213 RT_REFCNT_TRACE(rt);
1214 }
1215 rtcache_invalidate();
1216 RT_UNLOCK();
1217 need_unlock = false;
1218 rt_timer_remove_all(rt);
1219 #if defined(INET) || defined(INET6)
1220 if (netmask != NULL)
1221 lltable_prefix_free(dst->sa_family, dst, netmask, 0);
1222 #endif
1223 if (ret_nrt == NULL) {
1224 /* Adjust the refcount */
1225 rt_ref(rt);
1226 RT_REFCNT_TRACE(rt);
1227 rt_free(rt);
1228 }
1229 break;
1230
1231 case RTM_ADD:
1232 if (info->rti_ifa == NULL) {
1233 ifp = rt_getifp(info, &psref_ifp);
1234 ifa = rt_getifa(info, &psref_ifa);
1235 if (ifa == NULL)
1236 senderr(ENETUNREACH);
1237 } else {
1238 /* Caller should have a reference of ifa */
1239 ifa = info->rti_ifa;
1240 need_to_release_ifa = false;
1241 }
1242 rt = pool_get(&rtentry_pool, PR_NOWAIT);
1243 if (rt == NULL)
1244 senderr(ENOBUFS);
1245 memset(rt, 0, sizeof(*rt));
1246 rt->rt_flags = RTF_UP | (flags & ~RTF_DONTCHANGEIFA);
1247 LIST_INIT(&rt->rt_timer);
1248
1249 RT_DPRINTF("rt->_rt_key = %p\n", (void *)rt->_rt_key);
1250 if (netmask) {
1251 rt_maskedcopy(dst, (struct sockaddr *)&maskeddst,
1252 netmask);
1253 rt_setkey(rt, (struct sockaddr *)&maskeddst, M_NOWAIT);
1254 } else {
1255 rt_setkey(rt, dst, M_NOWAIT);
1256 }
1257 RT_DPRINTF("rt->_rt_key = %p\n", (void *)rt->_rt_key);
1258 if (rt_getkey(rt) == NULL ||
1259 rt_setgate(rt, gateway) != 0) {
1260 pool_put(&rtentry_pool, rt);
1261 senderr(ENOBUFS);
1262 }
1263
1264 rt_set_ifa(rt, ifa);
1265 if (info->rti_info[RTAX_TAG] != NULL) {
1266 const struct sockaddr *tag;
1267 tag = rt_settag(rt, info->rti_info[RTAX_TAG]);
1268 if (tag == NULL)
1269 senderr(ENOBUFS);
1270 }
1271 RT_DPRINTF("rt->_rt_key = %p\n", (void *)rt->_rt_key);
1272
1273 ss = pserialize_read_enter();
1274 if (info->rti_info[RTAX_IFP] != NULL) {
1275 struct ifaddr *ifa2;
1276 ifa2 = ifa_ifwithnet(info->rti_info[RTAX_IFP]);
1277 if (ifa2 != NULL)
1278 rt->rt_ifp = ifa2->ifa_ifp;
1279 else
1280 rt->rt_ifp = ifa->ifa_ifp;
1281 } else
1282 rt->rt_ifp = ifa->ifa_ifp;
1283 pserialize_read_exit(ss);
1284 cv_init(&rt->rt_cv, "rtentry");
1285 psref_target_init(&rt->rt_psref, rt_psref_class);
1286
1287 RT_DPRINTF("rt->_rt_key = %p\n", (void *)rt->_rt_key);
1288 rc = rt_addaddr(rtbl, rt, netmask);
1289 RT_DPRINTF("rt->_rt_key = %p\n", (void *)rt->_rt_key);
1290 if (rc != 0) {
1291 ifafree(ifa); /* for rt_set_ifa above */
1292 cv_destroy(&rt->rt_cv);
1293 rt_destroy(rt);
1294 pool_put(&rtentry_pool, rt);
1295 senderr(rc);
1296 }
1297 RT_DPRINTF("rt->_rt_key = %p\n", (void *)rt->_rt_key);
1298 if (ifa->ifa_rtrequest)
1299 ifa->ifa_rtrequest(req, rt, info);
1300 if (need_to_release_ifa)
1301 ifa_release(ifa, &psref_ifa);
1302 ifa = NULL;
1303 if_put(ifp, &psref_ifp);
1304 ifp = NULL;
1305 RT_DPRINTF("rt->_rt_key = %p\n", (void *)rt->_rt_key);
1306 if (ret_nrt) {
1307 *ret_nrt = rt;
1308 rt_ref(rt);
1309 RT_REFCNT_TRACE(rt);
1310 }
1311 rtcache_invalidate();
1312 RT_UNLOCK();
1313 need_unlock = false;
1314 break;
1315 case RTM_GET:
1316 if (netmask != NULL) {
1317 rt_maskedcopy(dst, (struct sockaddr *)&maskeddst,
1318 netmask);
1319 dst = (struct sockaddr *)&maskeddst;
1320 }
1321 if ((rt = rt_lookup(rtbl, dst, netmask)) == NULL)
1322 senderr(ESRCH);
1323 if (ret_nrt != NULL) {
1324 *ret_nrt = rt;
1325 rt_ref(rt);
1326 RT_REFCNT_TRACE(rt);
1327 }
1328 break;
1329 }
1330 bad:
1331 if (need_to_release_ifa)
1332 ifa_release(ifa, &psref_ifa);
1333 if_put(ifp, &psref_ifp);
1334 curlwp_bindx(bound);
1335 if (need_unlock)
1336 RT_UNLOCK();
1337 splx(s);
1338 return error;
1339 }
1340
1341 int
1342 rt_setgate(struct rtentry *rt, const struct sockaddr *gate)
1343 {
1344 struct sockaddr *new, *old;
1345
1346 KASSERT(RT_WLOCKED());
1347 KASSERT(rt->_rt_key != NULL);
1348 RT_DPRINTF("rt->_rt_key = %p\n", (void *)rt->_rt_key);
1349
1350 new = sockaddr_dup(gate, M_ZERO | M_NOWAIT);
1351 if (new == NULL)
1352 return ENOMEM;
1353
1354 old = rt->rt_gateway;
1355 rt->rt_gateway = new;
1356 if (old != NULL)
1357 sockaddr_free(old);
1358
1359 KASSERT(rt->_rt_key != NULL);
1360 RT_DPRINTF("rt->_rt_key = %p\n", (void *)rt->_rt_key);
1361
1362 if (rt->rt_flags & RTF_GATEWAY) {
1363 struct rtentry *gwrt;
1364
1365 gwrt = rtalloc1_locked(gate, 1, false, true);
1366 /*
1367 * If we switched gateways, grab the MTU from the new
1368 * gateway route if the current MTU, if the current MTU is
1369 * greater than the MTU of gateway.
1370 * Note that, if the MTU of gateway is 0, we will reset the
1371 * MTU of the route to run PMTUD again from scratch. XXX
1372 */
1373 if (gwrt != NULL) {
1374 KASSERT(gwrt->_rt_key != NULL);
1375 RT_DPRINTF("gwrt->_rt_key = %p\n", gwrt->_rt_key);
1376 if ((rt->rt_rmx.rmx_locks & RTV_MTU) == 0 &&
1377 rt->rt_rmx.rmx_mtu &&
1378 rt->rt_rmx.rmx_mtu > gwrt->rt_rmx.rmx_mtu) {
1379 rt->rt_rmx.rmx_mtu = gwrt->rt_rmx.rmx_mtu;
1380 }
1381 rt_unref(gwrt);
1382 }
1383 }
1384 KASSERT(rt->_rt_key != NULL);
1385 RT_DPRINTF("rt->_rt_key = %p\n", (void *)rt->_rt_key);
1386 return 0;
1387 }
1388
1389 static struct ifaddr *
1390 rt_update_get_ifa(const struct rt_addrinfo info, const struct rtentry *rt,
1391 struct ifnet **ifp, struct psref *psref_ifp, struct psref *psref)
1392 {
1393 struct ifaddr *ifa = NULL;
1394
1395 *ifp = NULL;
1396 if (info.rti_info[RTAX_IFP] != NULL) {
1397 ifa = ifa_ifwithnet_psref(info.rti_info[RTAX_IFP], psref);
1398 if (ifa == NULL)
1399 goto next;
1400 *ifp = ifa->ifa_ifp;
1401 if_acquire(*ifp, psref_ifp);
1402 if (info.rti_info[RTAX_IFA] == NULL &&
1403 info.rti_info[RTAX_GATEWAY] == NULL)
1404 goto next;
1405 ifa_release(ifa, psref);
1406 if (info.rti_info[RTAX_IFA] == NULL) {
1407 /* route change <dst> <gw> -ifp <if> */
1408 ifa = ifaof_ifpforaddr_psref(info.rti_info[RTAX_GATEWAY],
1409 *ifp, psref);
1410 } else {
1411 /* route change <dst> -ifp <if> -ifa <addr> */
1412 ifa = ifa_ifwithaddr_psref(info.rti_info[RTAX_IFA], psref);
1413 if (ifa != NULL)
1414 goto out;
1415 ifa = ifaof_ifpforaddr_psref(info.rti_info[RTAX_IFA],
1416 *ifp, psref);
1417 }
1418 goto out;
1419 }
1420 next:
1421 if (info.rti_info[RTAX_IFA] != NULL) {
1422 /* route change <dst> <gw> -ifa <addr> */
1423 ifa = ifa_ifwithaddr_psref(info.rti_info[RTAX_IFA], psref);
1424 if (ifa != NULL)
1425 goto out;
1426 }
1427 if (info.rti_info[RTAX_GATEWAY] != NULL) {
1428 /* route change <dst> <gw> */
1429 ifa = ifa_ifwithroute_psref(rt->rt_flags, rt_getkey(rt),
1430 info.rti_info[RTAX_GATEWAY], psref);
1431 }
1432 out:
1433 if (ifa != NULL && *ifp == NULL) {
1434 *ifp = ifa->ifa_ifp;
1435 if_acquire(*ifp, psref_ifp);
1436 }
1437 if (ifa == NULL && *ifp != NULL) {
1438 if_put(*ifp, psref_ifp);
1439 *ifp = NULL;
1440 }
1441 return ifa;
1442 }
1443
1444 int
1445 rt_update(struct rtentry *rt, struct rt_addrinfo *info, void *rtm)
1446 {
1447 int error = 0;
1448 struct ifnet *ifp = NULL, *new_ifp = NULL;
1449 struct ifaddr *ifa = NULL, *new_ifa;
1450 struct psref psref_ifa, psref_new_ifa, psref_ifp, psref_new_ifp;
1451 bool newgw, ifp_changed = false;
1452
1453 RT_WLOCK();
1454 /*
1455 * New gateway could require new ifaddr, ifp;
1456 * flags may also be different; ifp may be specified
1457 * by ll sockaddr when protocol address is ambiguous
1458 */
1459 newgw = info->rti_info[RTAX_GATEWAY] != NULL &&
1460 sockaddr_cmp(info->rti_info[RTAX_GATEWAY], rt->rt_gateway) != 0;
1461
1462 if (newgw || info->rti_info[RTAX_IFP] != NULL ||
1463 info->rti_info[RTAX_IFA] != NULL) {
1464 ifp = rt_getifp(info, &psref_ifp);
1465 /* info refers ifp so we need to keep a reference */
1466 ifa = rt_getifa(info, &psref_ifa);
1467 if (ifa == NULL) {
1468 error = ENETUNREACH;
1469 goto out;
1470 }
1471 }
1472 if (newgw) {
1473 error = rt_setgate(rt, info->rti_info[RTAX_GATEWAY]);
1474 if (error != 0)
1475 goto out;
1476 }
1477 if (info->rti_info[RTAX_TAG]) {
1478 const struct sockaddr *tag;
1479 tag = rt_settag(rt, info->rti_info[RTAX_TAG]);
1480 if (tag == NULL) {
1481 error = ENOBUFS;
1482 goto out;
1483 }
1484 }
1485 /*
1486 * New gateway could require new ifaddr, ifp;
1487 * flags may also be different; ifp may be specified
1488 * by ll sockaddr when protocol address is ambiguous
1489 */
1490 new_ifa = rt_update_get_ifa(*info, rt, &new_ifp, &psref_new_ifp,
1491 &psref_new_ifa);
1492 if (new_ifa != NULL) {
1493 ifa_release(ifa, &psref_ifa);
1494 ifa = new_ifa;
1495 }
1496 if (ifa) {
1497 struct ifaddr *oifa = rt->rt_ifa;
1498 if (oifa != ifa && !ifa_is_destroying(ifa) &&
1499 new_ifp != NULL && !if_is_deactivated(new_ifp)) {
1500 if (oifa && oifa->ifa_rtrequest)
1501 oifa->ifa_rtrequest(RTM_DELETE, rt, info);
1502 rt_replace_ifa(rt, ifa);
1503 rt->rt_ifp = new_ifp;
1504 ifp_changed = true;
1505 }
1506 if (new_ifa == NULL)
1507 ifa_release(ifa, &psref_ifa);
1508 /* To avoid ifa_release below */
1509 ifa = NULL;
1510 }
1511 ifa_release(new_ifa, &psref_new_ifa);
1512 if (new_ifp && rt->rt_ifp != new_ifp && !if_is_deactivated(new_ifp)) {
1513 rt->rt_ifp = new_ifp;
1514 ifp_changed = true;
1515 }
1516 rt_setmetrics(rtm, rt);
1517 if (rt->rt_flags != info->rti_flags) {
1518 rt->rt_flags = (info->rti_flags & ~PRESERVED_RTF) |
1519 (rt->rt_flags & PRESERVED_RTF);
1520 }
1521 if (rt->rt_ifa && rt->rt_ifa->ifa_rtrequest)
1522 rt->rt_ifa->ifa_rtrequest(RTM_ADD, rt, info);
1523 #if defined(INET) || defined(INET6)
1524 if (ifp_changed && rt_mask(rt) != NULL)
1525 lltable_prefix_free(rt_getkey(rt)->sa_family, rt_getkey(rt),
1526 rt_mask(rt), 0);
1527 #else
1528 (void)ifp_changed; /* XXX gcc */
1529 #endif
1530 out:
1531 ifa_release(ifa, &psref_ifa);
1532 if_put(new_ifp, &psref_new_ifp);
1533 if_put(ifp, &psref_ifp);
1534
1535 RT_UNLOCK();
1536
1537 return error;
1538 }
1539
1540 static void
1541 rt_maskedcopy(const struct sockaddr *src, struct sockaddr *dst,
1542 const struct sockaddr *netmask)
1543 {
1544 const char *netmaskp = &netmask->sa_data[0],
1545 *srcp = &src->sa_data[0];
1546 char *dstp = &dst->sa_data[0];
1547 const char *maskend = (char *)dst + MIN(netmask->sa_len, src->sa_len);
1548 const char *srcend = (char *)dst + src->sa_len;
1549
1550 dst->sa_len = src->sa_len;
1551 dst->sa_family = src->sa_family;
1552
1553 while (dstp < maskend)
1554 *dstp++ = *srcp++ & *netmaskp++;
1555 if (dstp < srcend)
1556 memset(dstp, 0, (size_t)(srcend - dstp));
1557 }
1558
1559 /*
1560 * Inform the routing socket of a route change.
1561 */
1562 void
1563 rt_newmsg(const int cmd, const struct rtentry *rt)
1564 {
1565 struct rt_addrinfo info;
1566
1567 memset((void *)&info, 0, sizeof(info));
1568 info.rti_info[RTAX_DST] = rt_getkey(rt);
1569 info.rti_info[RTAX_GATEWAY] = rt->rt_gateway;
1570 info.rti_info[RTAX_NETMASK] = rt_mask(rt);
1571 if (rt->rt_ifp) {
1572 info.rti_info[RTAX_IFP] = rt->rt_ifp->if_dl->ifa_addr;
1573 info.rti_info[RTAX_IFA] = rt->rt_ifa->ifa_addr;
1574 }
1575
1576 rt_missmsg(cmd, &info, rt->rt_flags, 0);
1577 }
1578
1579 /*
1580 * Set up or tear down a routing table entry, normally
1581 * for an interface.
1582 */
1583 int
1584 rtinit(struct ifaddr *ifa, int cmd, int flags)
1585 {
1586 struct rtentry *rt;
1587 struct sockaddr *dst, *odst;
1588 struct sockaddr_storage maskeddst;
1589 struct rtentry *nrt = NULL;
1590 int error;
1591 struct rt_addrinfo info;
1592
1593 dst = flags & RTF_HOST ? ifa->ifa_dstaddr : ifa->ifa_addr;
1594 if (cmd == RTM_DELETE) {
1595 if ((flags & RTF_HOST) == 0 && ifa->ifa_netmask) {
1596 /* Delete subnet route for this interface */
1597 odst = dst;
1598 dst = (struct sockaddr *)&maskeddst;
1599 rt_maskedcopy(odst, dst, ifa->ifa_netmask);
1600 }
1601 if ((rt = rtalloc1(dst, 0)) != NULL) {
1602 if (rt->rt_ifa != ifa) {
1603 rt_unref(rt);
1604 return (flags & RTF_HOST) ? EHOSTUNREACH
1605 : ENETUNREACH;
1606 }
1607 rt_unref(rt);
1608 }
1609 }
1610 memset(&info, 0, sizeof(info));
1611 info.rti_ifa = ifa;
1612 info.rti_flags = flags | ifa->ifa_flags | RTF_DONTCHANGEIFA;
1613 info.rti_info[RTAX_DST] = dst;
1614 info.rti_info[RTAX_GATEWAY] = ifa->ifa_addr;
1615
1616 /*
1617 * XXX here, it seems that we are assuming that ifa_netmask is NULL
1618 * for RTF_HOST. bsdi4 passes NULL explicitly (via intermediate
1619 * variable) when RTF_HOST is 1. still not sure if i can safely
1620 * change it to meet bsdi4 behavior.
1621 */
1622 if (cmd != RTM_LLINFO_UPD)
1623 info.rti_info[RTAX_NETMASK] = ifa->ifa_netmask;
1624 error = rtrequest1((cmd == RTM_LLINFO_UPD) ? RTM_GET : cmd, &info,
1625 &nrt);
1626 if (error != 0)
1627 return error;
1628
1629 rt = nrt;
1630 RT_REFCNT_TRACE(rt);
1631 switch (cmd) {
1632 case RTM_DELETE:
1633 rt_newmsg(cmd, rt);
1634 rt_free(rt);
1635 break;
1636 case RTM_LLINFO_UPD:
1637 if (cmd == RTM_LLINFO_UPD && ifa->ifa_rtrequest != NULL)
1638 ifa->ifa_rtrequest(RTM_LLINFO_UPD, rt, &info);
1639 rt_newmsg(RTM_CHANGE, rt);
1640 rt_unref(rt);
1641 break;
1642 case RTM_ADD:
1643 KASSERT(rt->rt_ifa == ifa);
1644 rt_newmsg(cmd, rt);
1645 rt_unref(rt);
1646 RT_REFCNT_TRACE(rt);
1647 break;
1648 }
1649 return error;
1650 }
1651
1652 /*
1653 * Create a local route entry for the address.
1654 * Announce the addition of the address and the route to the routing socket.
1655 */
1656 int
1657 rt_ifa_addlocal(struct ifaddr *ifa)
1658 {
1659 struct rtentry *rt;
1660 int e;
1661
1662 /* If there is no loopback entry, allocate one. */
1663 rt = rtalloc1(ifa->ifa_addr, 0);
1664 #ifdef RT_DEBUG
1665 if (rt != NULL)
1666 dump_rt(rt);
1667 #endif
1668 if (rt == NULL || (rt->rt_flags & RTF_HOST) == 0 ||
1669 (rt->rt_ifp->if_flags & IFF_LOOPBACK) == 0)
1670 {
1671 struct rt_addrinfo info;
1672 struct rtentry *nrt;
1673
1674 memset(&info, 0, sizeof(info));
1675 info.rti_flags = RTF_HOST | RTF_LOCAL | RTF_DONTCHANGEIFA;
1676 info.rti_info[RTAX_DST] = ifa->ifa_addr;
1677 info.rti_info[RTAX_GATEWAY] =
1678 (const struct sockaddr *)ifa->ifa_ifp->if_sadl;
1679 info.rti_ifa = ifa;
1680 nrt = NULL;
1681 e = rtrequest1(RTM_ADD, &info, &nrt);
1682 rt_addrmsg_rt(RTM_ADD, ifa, e, nrt);
1683 if (nrt != NULL) {
1684 KASSERT(nrt->rt_ifa == ifa);
1685 #ifdef RT_DEBUG
1686 dump_rt(nrt);
1687 #endif
1688 rt_unref(nrt);
1689 RT_REFCNT_TRACE(nrt);
1690 }
1691 } else {
1692 e = 0;
1693 rt_addrmsg(RTM_NEWADDR, ifa);
1694 }
1695 if (rt != NULL)
1696 rt_unref(rt);
1697 return e;
1698 }
1699
1700 /*
1701 * Remove the local route entry for the address.
1702 * Announce the removal of the address and the route to the routing socket.
1703 */
1704 int
1705 rt_ifa_remlocal(struct ifaddr *ifa, struct ifaddr *alt_ifa)
1706 {
1707 struct rtentry *rt;
1708 int e = 0;
1709
1710 rt = rtalloc1(ifa->ifa_addr, 0);
1711
1712 /*
1713 * Before deleting, check if a corresponding loopbacked
1714 * host route surely exists. With this check, we can avoid
1715 * deleting an interface direct route whose destination is
1716 * the same as the address being removed. This can happen
1717 * when removing a subnet-router anycast address on an
1718 * interface attached to a shared medium.
1719 */
1720 if (rt != NULL &&
1721 (rt->rt_flags & RTF_HOST) &&
1722 (rt->rt_ifp->if_flags & IFF_LOOPBACK))
1723 {
1724 /* If we cannot replace the route's ifaddr with the equivalent
1725 * ifaddr of another interface, I believe it is safest to
1726 * delete the route.
1727 */
1728 if (alt_ifa == NULL) {
1729 e = rtdeletemsg(rt);
1730 if (e == 0) {
1731 rt_unref(rt);
1732 rt_free(rt);
1733 rt = NULL;
1734 }
1735 rt_addrmsg(RTM_DELADDR, ifa);
1736 } else {
1737 #ifdef NET_MPSAFE
1738 int error = rt_update_prepare(rt);
1739 if (error == 0) {
1740 rt_replace_ifa(rt, alt_ifa);
1741 rt_update_finish(rt);
1742 } else {
1743 /*
1744 * If error != 0, the rtentry is being
1745 * destroyed, so doing nothing doesn't
1746 * matter.
1747 */
1748 }
1749 #else
1750 rt_replace_ifa(rt, alt_ifa);
1751 #endif
1752 rt_newmsg(RTM_CHANGE, rt);
1753 }
1754 } else
1755 rt_addrmsg(RTM_DELADDR, ifa);
1756 if (rt != NULL)
1757 rt_unref(rt);
1758 return e;
1759 }
1760
1761 /*
1762 * Route timer routines. These routes allow functions to be called
1763 * for various routes at any time. This is useful in supporting
1764 * path MTU discovery and redirect route deletion.
1765 *
1766 * This is similar to some BSDI internal functions, but it provides
1767 * for multiple queues for efficiency's sake...
1768 */
1769
1770 LIST_HEAD(, rttimer_queue) rttimer_queue_head;
1771 static int rt_init_done = 0;
1772
1773 /*
1774 * Some subtle order problems with domain initialization mean that
1775 * we cannot count on this being run from rt_init before various
1776 * protocol initializations are done. Therefore, we make sure
1777 * that this is run when the first queue is added...
1778 */
1779
1780 static void rt_timer_work(struct work *, void *);
1781
1782 static void
1783 rt_timer_init(void)
1784 {
1785 int error;
1786
1787 assert(rt_init_done == 0);
1788
1789 /* XXX should be in rt_init */
1790 rw_init(&rt_lock);
1791
1792 LIST_INIT(&rttimer_queue_head);
1793 callout_init(&rt_timer_ch, CALLOUT_MPSAFE);
1794 error = workqueue_create(&rt_timer_wq, "rt_timer",
1795 rt_timer_work, NULL, PRI_SOFTNET, IPL_SOFTNET, WQ_MPSAFE);
1796 if (error)
1797 panic("%s: workqueue_create failed (%d)\n", __func__, error);
1798 callout_reset(&rt_timer_ch, hz, rt_timer_timer, NULL);
1799 rt_init_done = 1;
1800 }
1801
1802 struct rttimer_queue *
1803 rt_timer_queue_create(u_int timeout)
1804 {
1805 struct rttimer_queue *rtq;
1806
1807 if (rt_init_done == 0)
1808 rt_timer_init();
1809
1810 R_Malloc(rtq, struct rttimer_queue *, sizeof *rtq);
1811 if (rtq == NULL)
1812 return NULL;
1813 memset(rtq, 0, sizeof(*rtq));
1814
1815 rtq->rtq_timeout = timeout;
1816 TAILQ_INIT(&rtq->rtq_head);
1817 RT_WLOCK();
1818 LIST_INSERT_HEAD(&rttimer_queue_head, rtq, rtq_link);
1819 RT_UNLOCK();
1820
1821 return rtq;
1822 }
1823
1824 void
1825 rt_timer_queue_change(struct rttimer_queue *rtq, long timeout)
1826 {
1827
1828 rtq->rtq_timeout = timeout;
1829 }
1830
1831 static void
1832 rt_timer_queue_remove_all(struct rttimer_queue *rtq)
1833 {
1834 struct rttimer *r;
1835
1836 RT_ASSERT_WLOCK();
1837
1838 while ((r = TAILQ_FIRST(&rtq->rtq_head)) != NULL) {
1839 LIST_REMOVE(r, rtt_link);
1840 TAILQ_REMOVE(&rtq->rtq_head, r, rtt_next);
1841 rt_ref(r->rtt_rt); /* XXX */
1842 RT_REFCNT_TRACE(r->rtt_rt);
1843 RT_UNLOCK();
1844 (*r->rtt_func)(r->rtt_rt, r);
1845 pool_put(&rttimer_pool, r);
1846 RT_WLOCK();
1847 if (rtq->rtq_count > 0)
1848 rtq->rtq_count--;
1849 else
1850 printf("rt_timer_queue_remove_all: "
1851 "rtq_count reached 0\n");
1852 }
1853 }
1854
1855 void
1856 rt_timer_queue_destroy(struct rttimer_queue *rtq)
1857 {
1858
1859 RT_WLOCK();
1860 rt_timer_queue_remove_all(rtq);
1861 LIST_REMOVE(rtq, rtq_link);
1862 RT_UNLOCK();
1863
1864 /*
1865 * Caller is responsible for freeing the rttimer_queue structure.
1866 */
1867 }
1868
1869 unsigned long
1870 rt_timer_count(struct rttimer_queue *rtq)
1871 {
1872 return rtq->rtq_count;
1873 }
1874
1875 static void
1876 rt_timer_remove_all(struct rtentry *rt)
1877 {
1878 struct rttimer *r;
1879
1880 RT_WLOCK();
1881 while ((r = LIST_FIRST(&rt->rt_timer)) != NULL) {
1882 LIST_REMOVE(r, rtt_link);
1883 TAILQ_REMOVE(&r->rtt_queue->rtq_head, r, rtt_next);
1884 if (r->rtt_queue->rtq_count > 0)
1885 r->rtt_queue->rtq_count--;
1886 else
1887 printf("rt_timer_remove_all: rtq_count reached 0\n");
1888 pool_put(&rttimer_pool, r);
1889 }
1890 RT_UNLOCK();
1891 }
1892
1893 int
1894 rt_timer_add(struct rtentry *rt,
1895 void (*func)(struct rtentry *, struct rttimer *),
1896 struct rttimer_queue *queue)
1897 {
1898 struct rttimer *r;
1899
1900 KASSERT(func != NULL);
1901 RT_WLOCK();
1902 /*
1903 * If there's already a timer with this action, destroy it before
1904 * we add a new one.
1905 */
1906 LIST_FOREACH(r, &rt->rt_timer, rtt_link) {
1907 if (r->rtt_func == func)
1908 break;
1909 }
1910 if (r != NULL) {
1911 LIST_REMOVE(r, rtt_link);
1912 TAILQ_REMOVE(&r->rtt_queue->rtq_head, r, rtt_next);
1913 if (r->rtt_queue->rtq_count > 0)
1914 r->rtt_queue->rtq_count--;
1915 else
1916 printf("rt_timer_add: rtq_count reached 0\n");
1917 } else {
1918 r = pool_get(&rttimer_pool, PR_NOWAIT);
1919 if (r == NULL) {
1920 RT_UNLOCK();
1921 return ENOBUFS;
1922 }
1923 }
1924
1925 memset(r, 0, sizeof(*r));
1926
1927 r->rtt_rt = rt;
1928 r->rtt_time = time_uptime;
1929 r->rtt_func = func;
1930 r->rtt_queue = queue;
1931 LIST_INSERT_HEAD(&rt->rt_timer, r, rtt_link);
1932 TAILQ_INSERT_TAIL(&queue->rtq_head, r, rtt_next);
1933 r->rtt_queue->rtq_count++;
1934
1935 RT_UNLOCK();
1936
1937 return 0;
1938 }
1939
1940 static void
1941 rt_timer_work(struct work *wk, void *arg)
1942 {
1943 struct rttimer_queue *rtq;
1944 struct rttimer *r;
1945
1946 RT_WLOCK();
1947 LIST_FOREACH(rtq, &rttimer_queue_head, rtq_link) {
1948 while ((r = TAILQ_FIRST(&rtq->rtq_head)) != NULL &&
1949 (r->rtt_time + rtq->rtq_timeout) < time_uptime) {
1950 LIST_REMOVE(r, rtt_link);
1951 TAILQ_REMOVE(&rtq->rtq_head, r, rtt_next);
1952 /*
1953 * Take a reference to avoid the rtentry is freed
1954 * accidentally after RT_UNLOCK. The callback
1955 * (rtt_func) must rt_unref it by itself.
1956 */
1957 rt_ref(r->rtt_rt);
1958 RT_REFCNT_TRACE(r->rtt_rt);
1959 RT_UNLOCK();
1960 (*r->rtt_func)(r->rtt_rt, r);
1961 pool_put(&rttimer_pool, r);
1962 RT_WLOCK();
1963 if (rtq->rtq_count > 0)
1964 rtq->rtq_count--;
1965 else
1966 printf("rt_timer_timer: rtq_count reached 0\n");
1967 }
1968 }
1969 RT_UNLOCK();
1970
1971 callout_reset(&rt_timer_ch, hz, rt_timer_timer, NULL);
1972 }
1973
1974 static void
1975 rt_timer_timer(void *arg)
1976 {
1977
1978 workqueue_enqueue(rt_timer_wq, &rt_timer_wk, NULL);
1979 }
1980
1981 static struct rtentry *
1982 _rtcache_init(struct route *ro, int flag)
1983 {
1984 struct rtentry *rt;
1985
1986 rtcache_invariants(ro);
1987 KASSERT(ro->_ro_rt == NULL);
1988
1989 if (rtcache_getdst(ro) == NULL)
1990 return NULL;
1991 rt = rtalloc1(rtcache_getdst(ro), flag);
1992 if (rt != NULL) {
1993 RT_RLOCK();
1994 if (ISSET(rt->rt_flags, RTF_UP)) {
1995 ro->_ro_rt = rt;
1996 ro->ro_rtcache_generation = rtcache_generation;
1997 rtcache_ref(rt, ro);
1998 }
1999 RT_UNLOCK();
2000 rt_unref(rt);
2001 }
2002
2003 rtcache_invariants(ro);
2004 return ro->_ro_rt;
2005 }
2006
2007 struct rtentry *
2008 rtcache_init(struct route *ro)
2009 {
2010
2011 return _rtcache_init(ro, 1);
2012 }
2013
2014 struct rtentry *
2015 rtcache_init_noclone(struct route *ro)
2016 {
2017
2018 return _rtcache_init(ro, 0);
2019 }
2020
2021 struct rtentry *
2022 rtcache_update(struct route *ro, int clone)
2023 {
2024
2025 ro->_ro_rt = NULL;
2026 return _rtcache_init(ro, clone);
2027 }
2028
2029 void
2030 rtcache_copy(struct route *new_ro, struct route *old_ro)
2031 {
2032 struct rtentry *rt;
2033 int ret;
2034
2035 KASSERT(new_ro != old_ro);
2036 rtcache_invariants(new_ro);
2037 rtcache_invariants(old_ro);
2038
2039 rt = rtcache_validate(old_ro);
2040
2041 if (rtcache_getdst(old_ro) == NULL)
2042 goto out;
2043 ret = rtcache_setdst(new_ro, rtcache_getdst(old_ro));
2044 if (ret != 0)
2045 goto out;
2046
2047 RT_RLOCK();
2048 new_ro->_ro_rt = rt;
2049 new_ro->ro_rtcache_generation = rtcache_generation;
2050 RT_UNLOCK();
2051 rtcache_invariants(new_ro);
2052 out:
2053 rtcache_unref(rt, old_ro);
2054 return;
2055 }
2056
2057 #if defined(RT_DEBUG) && defined(NET_MPSAFE)
2058 static void
2059 rtcache_trace(const char *func, struct rtentry *rt, struct route *ro)
2060 {
2061 char dst[64];
2062
2063 sockaddr_format(ro->ro_sa, dst, 64);
2064 printf("trace: %s:\tdst=%s cpu=%d lwp=%p psref=%p target=%p\n", func, dst,
2065 cpu_index(curcpu()), curlwp, &ro->ro_psref, &rt->rt_psref);
2066 }
2067 #define RTCACHE_PSREF_TRACE(rt, ro) rtcache_trace(__func__, (rt), (ro))
2068 #else
2069 #define RTCACHE_PSREF_TRACE(rt, ro) do {} while (0)
2070 #endif
2071
2072 static void
2073 rtcache_ref(struct rtentry *rt, struct route *ro)
2074 {
2075
2076 KASSERT(rt != NULL);
2077
2078 #ifdef NET_MPSAFE
2079 RTCACHE_PSREF_TRACE(rt, ro);
2080 ro->ro_bound = curlwp_bind();
2081 /* XXX Use a real caller's address */
2082 PSREF_DEBUG_FILL_RETURN_ADDRESS(&ro->ro_psref);
2083 psref_acquire(&ro->ro_psref, &rt->rt_psref, rt_psref_class);
2084 #endif
2085 }
2086
2087 void
2088 rtcache_unref(struct rtentry *rt, struct route *ro)
2089 {
2090
2091 if (rt == NULL)
2092 return;
2093
2094 #ifdef NET_MPSAFE
2095 psref_release(&ro->ro_psref, &rt->rt_psref, rt_psref_class);
2096 curlwp_bindx(ro->ro_bound);
2097 RTCACHE_PSREF_TRACE(rt, ro);
2098 #endif
2099 }
2100
2101 struct rtentry *
2102 rtcache_validate(struct route *ro)
2103 {
2104 struct rtentry *rt = NULL;
2105
2106 #ifdef NET_MPSAFE
2107 retry:
2108 #endif
2109 rtcache_invariants(ro);
2110 RT_RLOCK();
2111 if (ro->ro_rtcache_generation != rtcache_generation) {
2112 /* The cache is invalidated */
2113 rt = NULL;
2114 goto out;
2115 }
2116
2117 rt = ro->_ro_rt;
2118 if (rt == NULL)
2119 goto out;
2120
2121 if ((rt->rt_flags & RTF_UP) == 0) {
2122 rt = NULL;
2123 goto out;
2124 }
2125 #ifdef NET_MPSAFE
2126 if (ISSET(rt->rt_flags, RTF_UPDATING)) {
2127 if (rt_wait_ok()) {
2128 RT_UNLOCK();
2129
2130 /* We can wait until the update is complete */
2131 rt_update_wait();
2132 goto retry;
2133 } else {
2134 rt = NULL;
2135 }
2136 } else
2137 #endif
2138 rtcache_ref(rt, ro);
2139 out:
2140 RT_UNLOCK();
2141 return rt;
2142 }
2143
2144 struct rtentry *
2145 rtcache_lookup2(struct route *ro, const struct sockaddr *dst,
2146 int clone, int *hitp)
2147 {
2148 const struct sockaddr *odst;
2149 struct rtentry *rt = NULL;
2150
2151 odst = rtcache_getdst(ro);
2152 if (odst == NULL)
2153 goto miss;
2154
2155 if (sockaddr_cmp(odst, dst) != 0) {
2156 rtcache_free(ro);
2157 goto miss;
2158 }
2159
2160 rt = rtcache_validate(ro);
2161 if (rt == NULL) {
2162 ro->_ro_rt = NULL;
2163 goto miss;
2164 }
2165
2166 rtcache_invariants(ro);
2167
2168 if (hitp != NULL)
2169 *hitp = 1;
2170 return rt;
2171 miss:
2172 if (hitp != NULL)
2173 *hitp = 0;
2174 if (rtcache_setdst(ro, dst) == 0)
2175 rt = _rtcache_init(ro, clone);
2176
2177 rtcache_invariants(ro);
2178
2179 return rt;
2180 }
2181
2182 void
2183 rtcache_free(struct route *ro)
2184 {
2185
2186 ro->_ro_rt = NULL;
2187 if (ro->ro_sa != NULL) {
2188 sockaddr_free(ro->ro_sa);
2189 ro->ro_sa = NULL;
2190 }
2191 rtcache_invariants(ro);
2192 }
2193
2194 int
2195 rtcache_setdst(struct route *ro, const struct sockaddr *sa)
2196 {
2197 KASSERT(sa != NULL);
2198
2199 rtcache_invariants(ro);
2200 if (ro->ro_sa != NULL) {
2201 if (ro->ro_sa->sa_family == sa->sa_family) {
2202 ro->_ro_rt = NULL;
2203 sockaddr_copy(ro->ro_sa, ro->ro_sa->sa_len, sa);
2204 rtcache_invariants(ro);
2205 return 0;
2206 }
2207 /* free ro_sa, wrong family */
2208 rtcache_free(ro);
2209 }
2210
2211 KASSERT(ro->_ro_rt == NULL);
2212
2213 if ((ro->ro_sa = sockaddr_dup(sa, M_ZERO | M_NOWAIT)) == NULL) {
2214 rtcache_invariants(ro);
2215 return ENOMEM;
2216 }
2217 rtcache_invariants(ro);
2218 return 0;
2219 }
2220
2221 static void
2222 rtcache_percpu_init_cpu(void *p, void *arg __unused, struct cpu_info *ci __unused)
2223 {
2224 struct route **rop = p;
2225
2226 /*
2227 * We can't have struct route as percpu data because it can be destroyed
2228 * over a memory enlargement processing of percpu.
2229 */
2230 *rop = kmem_zalloc(sizeof(**rop), KM_SLEEP);
2231 }
2232
2233 percpu_t *
2234 rtcache_percpu_alloc(void)
2235 {
2236 percpu_t *pc;
2237
2238 pc = percpu_alloc(sizeof(struct route *));
2239 percpu_foreach(pc, rtcache_percpu_init_cpu, NULL);
2240
2241 return pc;
2242 }
2243
2244 const struct sockaddr *
2245 rt_settag(struct rtentry *rt, const struct sockaddr *tag)
2246 {
2247 if (rt->rt_tag != tag) {
2248 if (rt->rt_tag != NULL)
2249 sockaddr_free(rt->rt_tag);
2250 rt->rt_tag = sockaddr_dup(tag, M_ZERO | M_NOWAIT);
2251 }
2252 return rt->rt_tag;
2253 }
2254
2255 struct sockaddr *
2256 rt_gettag(const struct rtentry *rt)
2257 {
2258 return rt->rt_tag;
2259 }
2260
2261 int
2262 rt_check_reject_route(const struct rtentry *rt, const struct ifnet *ifp)
2263 {
2264
2265 if ((rt->rt_flags & RTF_REJECT) != 0) {
2266 /* Mimic looutput */
2267 if (ifp->if_flags & IFF_LOOPBACK)
2268 return (rt->rt_flags & RTF_HOST) ?
2269 EHOSTUNREACH : ENETUNREACH;
2270 else if (rt->rt_rmx.rmx_expire == 0 ||
2271 time_uptime < rt->rt_rmx.rmx_expire)
2272 return (rt->rt_flags & RTF_GATEWAY) ?
2273 EHOSTUNREACH : EHOSTDOWN;
2274 }
2275
2276 return 0;
2277 }
2278
2279 void
2280 rt_delete_matched_entries(sa_family_t family, int (*f)(struct rtentry *, void *),
2281 void *v)
2282 {
2283
2284 for (;;) {
2285 int s;
2286 int error;
2287 struct rtentry *rt, *retrt = NULL;
2288
2289 RT_RLOCK();
2290 s = splsoftnet();
2291 rt = rtbl_search_matched_entry(family, f, v);
2292 if (rt == NULL) {
2293 splx(s);
2294 RT_UNLOCK();
2295 return;
2296 }
2297 rt_ref(rt);
2298 splx(s);
2299 RT_UNLOCK();
2300
2301 error = rtrequest(RTM_DELETE, rt_getkey(rt), rt->rt_gateway,
2302 rt_mask(rt), rt->rt_flags, &retrt);
2303 if (error == 0) {
2304 KASSERT(retrt == rt);
2305 KASSERT((retrt->rt_flags & RTF_UP) == 0);
2306 retrt->rt_ifp = NULL;
2307 rt_unref(rt);
2308 rt_free(retrt);
2309 } else if (error == ESRCH) {
2310 /* Someone deleted the entry already. */
2311 rt_unref(rt);
2312 } else {
2313 log(LOG_ERR, "%s: unable to delete rtentry @ %p, "
2314 "error = %d\n", rt->rt_ifp->if_xname, rt, error);
2315 /* XXX how to treat this case? */
2316 }
2317 }
2318 }
2319
2320 static int
2321 rt_walktree_locked(sa_family_t family, int (*f)(struct rtentry *, void *),
2322 void *v)
2323 {
2324
2325 return rtbl_walktree(family, f, v);
2326 }
2327
2328 int
2329 rt_walktree(sa_family_t family, int (*f)(struct rtentry *, void *), void *v)
2330 {
2331 int error;
2332
2333 RT_RLOCK();
2334 error = rt_walktree_locked(family, f, v);
2335 RT_UNLOCK();
2336
2337 return error;
2338 }
2339
2340 #ifdef DDB
2341
2342 #include <machine/db_machdep.h>
2343 #include <ddb/db_interface.h>
2344 #include <ddb/db_output.h>
2345
2346 #define rt_expire rt_rmx.rmx_expire
2347
2348 static void
2349 db_print_sa(const struct sockaddr *sa)
2350 {
2351 int len;
2352 const u_char *p;
2353
2354 if (sa == NULL) {
2355 db_printf("[NULL]");
2356 return;
2357 }
2358
2359 p = (const u_char *)sa;
2360 len = sa->sa_len;
2361 db_printf("[");
2362 while (len > 0) {
2363 db_printf("%d", *p);
2364 p++; len--;
2365 if (len) db_printf(",");
2366 }
2367 db_printf("]\n");
2368 }
2369
2370 static void
2371 db_print_ifa(struct ifaddr *ifa)
2372 {
2373 if (ifa == NULL)
2374 return;
2375 db_printf(" ifa_addr=");
2376 db_print_sa(ifa->ifa_addr);
2377 db_printf(" ifa_dsta=");
2378 db_print_sa(ifa->ifa_dstaddr);
2379 db_printf(" ifa_mask=");
2380 db_print_sa(ifa->ifa_netmask);
2381 db_printf(" flags=0x%x,refcnt=%d,metric=%d\n",
2382 ifa->ifa_flags,
2383 ifa->ifa_refcnt,
2384 ifa->ifa_metric);
2385 }
2386
2387 /*
2388 * Function to pass to rt_walktree().
2389 * Return non-zero error to abort walk.
2390 */
2391 static int
2392 db_show_rtentry(struct rtentry *rt, void *w)
2393 {
2394 db_printf("rtentry=%p", rt);
2395
2396 db_printf(" flags=0x%x refcnt=%d use=%"PRId64" expire=%"PRId64"\n",
2397 rt->rt_flags, rt->rt_refcnt,
2398 rt->rt_use, (uint64_t)rt->rt_expire);
2399
2400 db_printf(" key="); db_print_sa(rt_getkey(rt));
2401 db_printf(" mask="); db_print_sa(rt_mask(rt));
2402 db_printf(" gw="); db_print_sa(rt->rt_gateway);
2403
2404 db_printf(" ifp=%p ", rt->rt_ifp);
2405 if (rt->rt_ifp)
2406 db_printf("(%s)", rt->rt_ifp->if_xname);
2407 else
2408 db_printf("(NULL)");
2409
2410 db_printf(" ifa=%p\n", rt->rt_ifa);
2411 db_print_ifa(rt->rt_ifa);
2412
2413 db_printf(" gwroute=%p llinfo=%p\n",
2414 rt->rt_gwroute, rt->rt_llinfo);
2415
2416 return 0;
2417 }
2418
2419 /*
2420 * Function to print all the route trees.
2421 * Use this from ddb: "show routes"
2422 */
2423 void
2424 db_show_routes(db_expr_t addr, bool have_addr,
2425 db_expr_t count, const char *modif)
2426 {
2427
2428 /* Taking RT_LOCK will fail if LOCKDEBUG is enabled. */
2429 rt_walktree_locked(AF_INET, db_show_rtentry, NULL);
2430 }
2431 #endif
2432