xref: /openbsd-src/sys/net/route.c (revision 8550894424f8a4aa4aafb6cd57229dd6ed7cd9dd)
1 /*	$OpenBSD: route.c,v 1.415 2023/01/21 17:35:01 mvs Exp $	*/
2 /*	$NetBSD: route.c,v 1.14 1996/02/13 22:00:46 christos Exp $	*/
3 
4 /*
5  * Copyright (C) 1995, 1996, 1997, and 1998 WIDE Project.
6  * All rights reserved.
7  *
8  * Redistribution and use in source and binary forms, with or without
9  * modification, are permitted provided that the following conditions
10  * are met:
11  * 1. Redistributions of source code must retain the above copyright
12  *    notice, this list of conditions and the following disclaimer.
13  * 2. Redistributions in binary form must reproduce the above copyright
14  *    notice, this list of conditions and the following disclaimer in the
15  *    documentation and/or other materials provided with the distribution.
16  * 3. Neither the name of the project nor the names of its contributors
17  *    may be used to endorse or promote products derived from this software
18  *    without specific prior written permission.
19  *
20  * THIS SOFTWARE IS PROVIDED BY THE PROJECT AND CONTRIBUTORS ``AS IS'' AND
21  * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
22  * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
23  * ARE DISCLAIMED.  IN NO EVENT SHALL THE PROJECT OR CONTRIBUTORS BE LIABLE
24  * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
25  * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
26  * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
27  * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
28  * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
29  * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
30  * SUCH DAMAGE.
31  */
32 
33 /*
34  * Copyright (c) 1980, 1986, 1991, 1993
35  *	The Regents of the University of California.  All rights reserved.
36  *
37  * Redistribution and use in source and binary forms, with or without
38  * modification, are permitted provided that the following conditions
39  * are met:
40  * 1. Redistributions of source code must retain the above copyright
41  *    notice, this list of conditions and the following disclaimer.
42  * 2. Redistributions in binary form must reproduce the above copyright
43  *    notice, this list of conditions and the following disclaimer in the
44  *    documentation and/or other materials provided with the distribution.
45  * 3. Neither the name of the University nor the names of its contributors
46  *    may be used to endorse or promote products derived from this software
47  *    without specific prior written permission.
48  *
49  * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND
50  * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
51  * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
52  * ARE DISCLAIMED.  IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE
53  * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
54  * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
55  * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
56  * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
57  * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
58  * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
59  * SUCH DAMAGE.
60  *
61  *	@(#)route.c	8.2 (Berkeley) 11/15/93
62  */
63 
64 /*
65  *	@(#)COPYRIGHT	1.1 (NRL) 17 January 1995
66  *
67  * NRL grants permission for redistribution and use in source and binary
68  * forms, with or without modification, of the software and documentation
69  * created at NRL provided that the following conditions are met:
70  *
71  * 1. Redistributions of source code must retain the above copyright
72  *    notice, this list of conditions and the following disclaimer.
73  * 2. Redistributions in binary form must reproduce the above copyright
74  *    notice, this list of conditions and the following disclaimer in the
75  *    documentation and/or other materials provided with the distribution.
76  * 3. All advertising materials mentioning features or use of this software
77  *    must display the following acknowledgements:
78  *	This product includes software developed by the University of
79  *	California, Berkeley and its contributors.
80  *	This product includes software developed at the Information
81  *	Technology Division, US Naval Research Laboratory.
82  * 4. Neither the name of the NRL nor the names of its contributors
83  *    may be used to endorse or promote products derived from this software
84  *    without specific prior written permission.
85  *
86  * THE SOFTWARE PROVIDED BY NRL IS PROVIDED BY NRL AND CONTRIBUTORS ``AS
87  * IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED
88  * TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A
89  * PARTICULAR PURPOSE ARE DISCLAIMED.  IN NO EVENT SHALL NRL OR
90  * CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
91  * EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
92  * PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
93  * PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF
94  * LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING
95  * NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
96  * SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
97  *
98  * The views and conclusions contained in the software and documentation
99  * are those of the authors and should not be interpreted as representing
100  * official policies, either expressed or implied, of the US Naval
101  * Research Laboratory (NRL).
102  */
103 
104 #include <sys/param.h>
105 #include <sys/systm.h>
106 #include <sys/mbuf.h>
107 #include <sys/socket.h>
108 #include <sys/socketvar.h>
109 #include <sys/timeout.h>
110 #include <sys/domain.h>
111 #include <sys/ioctl.h>
112 #include <sys/kernel.h>
113 #include <sys/queue.h>
114 #include <sys/pool.h>
115 #include <sys/atomic.h>
116 #include <sys/rwlock.h>
117 
118 #include <net/if.h>
119 #include <net/if_var.h>
120 #include <net/if_dl.h>
121 #include <net/route.h>
122 
123 #include <netinet/in.h>
124 #include <netinet/ip_var.h>
125 #include <netinet/in_var.h>
126 
127 #ifdef INET6
128 #include <netinet/ip6.h>
129 #include <netinet6/ip6_var.h>
130 #include <netinet6/in6_var.h>
131 #endif
132 
133 #ifdef MPLS
134 #include <netmpls/mpls.h>
135 #endif
136 
137 #ifdef BFD
138 #include <net/bfd.h>
139 #endif
140 
141 #define ROUNDUP(a) (a>0 ? (1 + (((a) - 1) | (sizeof(long) - 1))) : sizeof(long))
142 
143 struct rwlock rt_lock = RWLOCK_INITIALIZER("rtlck");
144 
145 /* Give some jitter to hash, to avoid synchronization between routers. */
146 static uint32_t		rt_hashjitter;
147 
148 extern unsigned int	rtmap_limit;
149 
150 struct cpumem *		rtcounters;
151 int			rttrash;	/* routes not in table but not freed */
152 
153 struct pool	rtentry_pool;		/* pool for rtentry structures */
154 struct pool	rttimer_pool;		/* pool for rttimer structures */
155 
156 int	rt_setgwroute(struct rtentry *, u_int);
157 void	rt_putgwroute(struct rtentry *);
158 int	rtflushclone1(struct rtentry *, void *, u_int);
159 int	rtflushclone(struct rtentry *, unsigned int);
160 int	rt_ifa_purge_walker(struct rtentry *, void *, unsigned int);
161 struct rtentry *rt_match(struct sockaddr *, uint32_t *, int, unsigned int);
162 int	rt_clone(struct rtentry **, struct sockaddr *, unsigned int);
163 struct sockaddr *rt_plentosa(sa_family_t, int, struct sockaddr_in6 *);
164 static int rt_copysa(struct sockaddr *, struct sockaddr *, struct sockaddr **);
165 
166 #define	LABELID_MAX	50000
167 
168 struct rt_label {
169 	TAILQ_ENTRY(rt_label)	rtl_entry;
170 	char			rtl_name[RTLABEL_LEN];
171 	u_int16_t		rtl_id;
172 	int			rtl_ref;
173 };
174 
175 TAILQ_HEAD(rt_labels, rt_label)	rt_labels = TAILQ_HEAD_INITIALIZER(rt_labels);
176 
177 void
178 route_init(void)
179 {
180 	rtcounters = counters_alloc(rts_ncounters);
181 
182 	pool_init(&rtentry_pool, sizeof(struct rtentry), 0, IPL_MPFLOOR, 0,
183 	    "rtentry", NULL);
184 
185 	while (rt_hashjitter == 0)
186 		rt_hashjitter = arc4random();
187 
188 #ifdef BFD
189 	bfdinit();
190 #endif
191 }
192 
193 /*
194  * Returns 1 if the (cached) ``rt'' entry is still valid, 0 otherwise.
195  */
196 int
197 rtisvalid(struct rtentry *rt)
198 {
199 	if (rt == NULL)
200 		return (0);
201 
202 	if (!ISSET(rt->rt_flags, RTF_UP))
203 		return (0);
204 
205 	if (ISSET(rt->rt_flags, RTF_GATEWAY)) {
206 		KASSERT(rt->rt_gwroute != NULL);
207 		KASSERT(!ISSET(rt->rt_gwroute->rt_flags, RTF_GATEWAY));
208 		if (!ISSET(rt->rt_gwroute->rt_flags, RTF_UP))
209 			return (0);
210 	}
211 
212 	return (1);
213 }
214 
215 /*
216  * Do the actual lookup for rtalloc(9), do not use directly!
217  *
218  * Return the best matching entry for the destination ``dst''.
219  *
220  * "RT_RESOLVE" means that a corresponding L2 entry should
221  * be added to the routing table and resolved (via ARP or
222  * NDP), if it does not exist.
223  */
224 struct rtentry *
225 rt_match(struct sockaddr *dst, uint32_t *src, int flags, unsigned int tableid)
226 {
227 	struct rtentry		*rt = NULL;
228 
229 	rt = rtable_match(tableid, dst, src);
230 	if (rt == NULL) {
231 		rtstat_inc(rts_unreach);
232 		return (NULL);
233 	}
234 
235 	if (ISSET(rt->rt_flags, RTF_CLONING) && ISSET(flags, RT_RESOLVE))
236 		rt_clone(&rt, dst, tableid);
237 
238 	rt->rt_use++;
239 	return (rt);
240 }
241 
242 int
243 rt_clone(struct rtentry **rtp, struct sockaddr *dst, unsigned int rtableid)
244 {
245 	struct rt_addrinfo	 info;
246 	struct rtentry		*rt = *rtp;
247 	int			 error = 0;
248 
249 	memset(&info, 0, sizeof(info));
250 	info.rti_info[RTAX_DST] = dst;
251 
252 	/*
253 	 * The priority of cloned route should be different
254 	 * to avoid conflict with /32 cloning routes.
255 	 *
256 	 * It should also be higher to let the ARP layer find
257 	 * cloned routes instead of the cloning one.
258 	 */
259 	RT_LOCK();
260 	error = rtrequest(RTM_RESOLVE, &info, rt->rt_priority - 1, &rt,
261 	    rtableid);
262 	RT_UNLOCK();
263 	if (error) {
264 		rtm_miss(RTM_MISS, &info, 0, RTP_NONE, 0, error, rtableid);
265 	} else {
266 		/* Inform listeners of the new route */
267 		rtm_send(rt, RTM_ADD, 0, rtableid);
268 		rtfree(*rtp);
269 		*rtp = rt;
270 	}
271 	return (error);
272 }
273 
274 /*
275  * Originated from bridge_hash() in if_bridge.c
276  */
277 #define mix(a, b, c) do {						\
278 	a -= b; a -= c; a ^= (c >> 13);					\
279 	b -= c; b -= a; b ^= (a << 8);					\
280 	c -= a; c -= b; c ^= (b >> 13);					\
281 	a -= b; a -= c; a ^= (c >> 12);					\
282 	b -= c; b -= a; b ^= (a << 16);					\
283 	c -= a; c -= b; c ^= (b >> 5);					\
284 	a -= b; a -= c; a ^= (c >> 3);					\
285 	b -= c; b -= a; b ^= (a << 10);					\
286 	c -= a; c -= b; c ^= (b >> 15);					\
287 } while (0)
288 
289 int
290 rt_hash(struct rtentry *rt, struct sockaddr *dst, uint32_t *src)
291 {
292 	uint32_t a, b, c;
293 
294 	if (src == NULL || !rtisvalid(rt) || !ISSET(rt->rt_flags, RTF_MPATH))
295 		return (-1);
296 
297 	a = b = 0x9e3779b9;
298 	c = rt_hashjitter;
299 
300 	switch (dst->sa_family) {
301 	case AF_INET:
302 	    {
303 		struct sockaddr_in *sin;
304 
305 		if (!ipmultipath)
306 			return (-1);
307 
308 		sin = satosin(dst);
309 		a += sin->sin_addr.s_addr;
310 		b += src[0];
311 		mix(a, b, c);
312 		break;
313 	    }
314 #ifdef INET6
315 	case AF_INET6:
316 	    {
317 		struct sockaddr_in6 *sin6;
318 
319 		if (!ip6_multipath)
320 			return (-1);
321 
322 		sin6 = satosin6(dst);
323 		a += sin6->sin6_addr.s6_addr32[0];
324 		b += sin6->sin6_addr.s6_addr32[2];
325 		c += src[0];
326 		mix(a, b, c);
327 		a += sin6->sin6_addr.s6_addr32[1];
328 		b += sin6->sin6_addr.s6_addr32[3];
329 		c += src[1];
330 		mix(a, b, c);
331 		a += sin6->sin6_addr.s6_addr32[2];
332 		b += sin6->sin6_addr.s6_addr32[1];
333 		c += src[2];
334 		mix(a, b, c);
335 		a += sin6->sin6_addr.s6_addr32[3];
336 		b += sin6->sin6_addr.s6_addr32[0];
337 		c += src[3];
338 		mix(a, b, c);
339 		break;
340 	    }
341 #endif /* INET6 */
342 	}
343 
344 	return (c & 0xffff);
345 }
346 
347 /*
348  * Allocate a route, potentially using multipath to select the peer.
349  */
350 struct rtentry *
351 rtalloc_mpath(struct sockaddr *dst, uint32_t *src, unsigned int rtableid)
352 {
353 	return (rt_match(dst, src, RT_RESOLVE, rtableid));
354 }
355 
356 /*
357  * Look in the routing table for the best matching entry for
358  * ``dst''.
359  *
360  * If a route with a gateway is found and its next hop is no
361  * longer valid, try to cache it.
362  */
363 struct rtentry *
364 rtalloc(struct sockaddr *dst, int flags, unsigned int rtableid)
365 {
366 	return (rt_match(dst, NULL, flags, rtableid));
367 }
368 
369 /*
370  * Cache the route entry corresponding to a reachable next hop in
371  * the gateway entry ``rt''.
372  */
373 int
374 rt_setgwroute(struct rtentry *rt, u_int rtableid)
375 {
376 	struct rtentry *prt, *nhrt;
377 	unsigned int rdomain = rtable_l2(rtableid);
378 	int error;
379 
380 	NET_ASSERT_LOCKED();
381 
382 	KASSERT(ISSET(rt->rt_flags, RTF_GATEWAY));
383 
384 	/* If we cannot find a valid next hop bail. */
385 	nhrt = rt_match(rt->rt_gateway, NULL, RT_RESOLVE, rdomain);
386 	if (nhrt == NULL)
387 		return (ENOENT);
388 
389 	/* Next hop entry must be on the same interface. */
390 	if (nhrt->rt_ifidx != rt->rt_ifidx) {
391 		struct sockaddr_in6	sa_mask;
392 
393 		if (!ISSET(nhrt->rt_flags, RTF_LLINFO) ||
394 		    !ISSET(nhrt->rt_flags, RTF_CLONED)) {
395 			rtfree(nhrt);
396 			return (EHOSTUNREACH);
397 		}
398 
399 		/*
400 		 * We found a L2 entry, so we might have multiple
401 		 * RTF_CLONING routes for the same subnet.  Query
402 		 * the first route of the multipath chain and iterate
403 		 * until we find the correct one.
404 		 */
405 		prt = rtable_lookup(rdomain, rt_key(nhrt->rt_parent),
406 		    rt_plen2mask(nhrt->rt_parent, &sa_mask), NULL, RTP_ANY);
407 		rtfree(nhrt);
408 
409 		while (prt != NULL && prt->rt_ifidx != rt->rt_ifidx)
410 			prt = rtable_iterate(prt);
411 
412 		/* We found nothing or a non-cloning MPATH route. */
413 		if (prt == NULL || !ISSET(prt->rt_flags, RTF_CLONING)) {
414 			rtfree(prt);
415 			return (EHOSTUNREACH);
416 		}
417 
418 		error = rt_clone(&prt, rt->rt_gateway, rdomain);
419 		if (error) {
420 			rtfree(prt);
421 			return (error);
422 		}
423 		nhrt = prt;
424 	}
425 
426 	/*
427 	 * Next hop must be reachable, this also prevents rtentry
428 	 * loops for example when rt->rt_gwroute points to rt.
429 	 */
430 	if (ISSET(nhrt->rt_flags, RTF_CLONING|RTF_GATEWAY)) {
431 		rtfree(nhrt);
432 		return (ENETUNREACH);
433 	}
434 
435 	/* Next hop is valid so remove possible old cache. */
436 	rt_putgwroute(rt);
437 	KASSERT(rt->rt_gwroute == NULL);
438 
439 	/*
440 	 * If the MTU of next hop is 0, this will reset the MTU of the
441 	 * route to run PMTUD again from scratch.
442 	 */
443 	if (!ISSET(rt->rt_locks, RTV_MTU) && (rt->rt_mtu > nhrt->rt_mtu))
444 		rt->rt_mtu = nhrt->rt_mtu;
445 
446 	/*
447 	 * To avoid reference counting problems when writing link-layer
448 	 * addresses in an outgoing packet, we ensure that the lifetime
449 	 * of a cached entry is greater than the bigger lifetime of the
450 	 * gateway entries it is pointed by.
451 	 */
452 	nhrt->rt_flags |= RTF_CACHED;
453 	nhrt->rt_cachecnt++;
454 
455 	rt->rt_gwroute = nhrt;
456 
457 	return (0);
458 }
459 
460 /*
461  * Invalidate the cached route entry of the gateway entry ``rt''.
462  */
463 void
464 rt_putgwroute(struct rtentry *rt)
465 {
466 	struct rtentry *nhrt = rt->rt_gwroute;
467 
468 	NET_ASSERT_LOCKED();
469 
470 	if (!ISSET(rt->rt_flags, RTF_GATEWAY) || nhrt == NULL)
471 		return;
472 
473 	KASSERT(ISSET(nhrt->rt_flags, RTF_CACHED));
474 	KASSERT(nhrt->rt_cachecnt > 0);
475 
476 	--nhrt->rt_cachecnt;
477 	if (nhrt->rt_cachecnt == 0)
478 		nhrt->rt_flags &= ~RTF_CACHED;
479 
480 	rtfree(rt->rt_gwroute);
481 	rt->rt_gwroute = NULL;
482 }
483 
484 void
485 rtref(struct rtentry *rt)
486 {
487 	refcnt_take(&rt->rt_refcnt);
488 }
489 
490 void
491 rtfree(struct rtentry *rt)
492 {
493 	if (rt == NULL)
494 		return;
495 
496 	if (refcnt_rele(&rt->rt_refcnt) == 0)
497 		return;
498 
499 	KASSERT(!ISSET(rt->rt_flags, RTF_UP));
500 	KASSERT(!RT_ROOT(rt));
501 	atomic_dec_int(&rttrash);
502 
503 	KERNEL_LOCK();
504 	rt_timer_remove_all(rt);
505 	ifafree(rt->rt_ifa);
506 	rtlabel_unref(rt->rt_labelid);
507 #ifdef MPLS
508 	rt_mpls_clear(rt);
509 #endif
510 	free(rt->rt_gateway, M_RTABLE, ROUNDUP(rt->rt_gateway->sa_len));
511 	free(rt_key(rt), M_RTABLE, rt_key(rt)->sa_len);
512 	KERNEL_UNLOCK();
513 
514 	pool_put(&rtentry_pool, rt);
515 }
516 
517 struct ifaddr *
518 ifaref(struct ifaddr *ifa)
519 {
520 	refcnt_take(&ifa->ifa_refcnt);
521 	return ifa;
522 }
523 
524 void
525 ifafree(struct ifaddr *ifa)
526 {
527 	if (refcnt_rele(&ifa->ifa_refcnt) == 0)
528 		return;
529 	free(ifa, M_IFADDR, 0);
530 }
531 
532 /*
533  * Force a routing table entry to the specified
534  * destination to go through the given gateway.
535  * Normally called as a result of a routing redirect
536  * message from the network layer.
537  */
538 void
539 rtredirect(struct sockaddr *dst, struct sockaddr *gateway,
540     struct sockaddr *src, struct rtentry **rtp, unsigned int rdomain)
541 {
542 	struct rtentry		*rt;
543 	int			 error = 0;
544 	enum rtstat_counters	 stat = rts_ncounters;
545 	struct rt_addrinfo	 info;
546 	struct ifaddr		*ifa;
547 	unsigned int		 ifidx = 0;
548 	int			 flags = RTF_GATEWAY|RTF_HOST;
549 	uint8_t			 prio = RTP_NONE;
550 
551 	NET_ASSERT_LOCKED();
552 
553 	/* verify the gateway is directly reachable */
554 	rt = rtalloc(gateway, 0, rdomain);
555 	if (!rtisvalid(rt) || ISSET(rt->rt_flags, RTF_GATEWAY)) {
556 		rtfree(rt);
557 		error = ENETUNREACH;
558 		goto out;
559 	}
560 	ifidx = rt->rt_ifidx;
561 	ifa = rt->rt_ifa;
562 	rtfree(rt);
563 	rt = NULL;
564 
565 	rt = rtable_lookup(rdomain, dst, NULL, NULL, RTP_ANY);
566 	/*
567 	 * If the redirect isn't from our current router for this dst,
568 	 * it's either old or wrong.  If it redirects us to ourselves,
569 	 * we have a routing loop, perhaps as a result of an interface
570 	 * going down recently.
571 	 */
572 #define	equal(a1, a2) \
573 	((a1)->sa_len == (a2)->sa_len && \
574 	 bcmp((caddr_t)(a1), (caddr_t)(a2), (a1)->sa_len) == 0)
575 	if (rt != NULL && (!equal(src, rt->rt_gateway) || rt->rt_ifa != ifa))
576 		error = EINVAL;
577 	else if (ifa_ifwithaddr(gateway, rdomain) != NULL ||
578 	    (gateway->sa_family == AF_INET &&
579 	    in_broadcast(satosin(gateway)->sin_addr, rdomain)))
580 		error = EHOSTUNREACH;
581 	if (error)
582 		goto done;
583 	/*
584 	 * Create a new entry if we just got back a wildcard entry
585 	 * or the lookup failed.  This is necessary for hosts
586 	 * which use routing redirects generated by smart gateways
587 	 * to dynamically build the routing tables.
588 	 */
589 	if (rt == NULL)
590 		goto create;
591 	/*
592 	 * Don't listen to the redirect if it's
593 	 * for a route to an interface.
594 	 */
595 	if (ISSET(rt->rt_flags, RTF_GATEWAY)) {
596 		if (!ISSET(rt->rt_flags, RTF_HOST)) {
597 			/*
598 			 * Changing from route to net => route to host.
599 			 * Create new route, rather than smashing route to net.
600 			 */
601 create:
602 			rtfree(rt);
603 			flags |= RTF_DYNAMIC;
604 			bzero(&info, sizeof(info));
605 			info.rti_info[RTAX_DST] = dst;
606 			info.rti_info[RTAX_GATEWAY] = gateway;
607 			info.rti_ifa = ifa;
608 			info.rti_flags = flags;
609 			rt = NULL;
610 			error = rtrequest(RTM_ADD, &info, RTP_DEFAULT, &rt,
611 			    rdomain);
612 			if (error == 0) {
613 				flags = rt->rt_flags;
614 				prio = rt->rt_priority;
615 			}
616 			stat = rts_dynamic;
617 		} else {
618 			/*
619 			 * Smash the current notion of the gateway to
620 			 * this destination.  Should check about netmask!!!
621 			 */
622 			rt->rt_flags |= RTF_MODIFIED;
623 			flags |= RTF_MODIFIED;
624 			prio = rt->rt_priority;
625 			stat = rts_newgateway;
626 			rt_setgate(rt, gateway, rdomain);
627 		}
628 	} else
629 		error = EHOSTUNREACH;
630 done:
631 	if (rt) {
632 		if (rtp && !error)
633 			*rtp = rt;
634 		else
635 			rtfree(rt);
636 	}
637 out:
638 	if (error)
639 		rtstat_inc(rts_badredirect);
640 	else if (stat != rts_ncounters)
641 		rtstat_inc(stat);
642 	bzero((caddr_t)&info, sizeof(info));
643 	info.rti_info[RTAX_DST] = dst;
644 	info.rti_info[RTAX_GATEWAY] = gateway;
645 	info.rti_info[RTAX_AUTHOR] = src;
646 	rtm_miss(RTM_REDIRECT, &info, flags, prio, ifidx, error, rdomain);
647 }
648 
649 /*
650  * Delete a route and generate a message
651  */
652 int
653 rtdeletemsg(struct rtentry *rt, struct ifnet *ifp, u_int tableid)
654 {
655 	int			error;
656 	struct rt_addrinfo	info;
657 	struct sockaddr_rtlabel sa_rl;
658 	struct sockaddr_in6	sa_mask;
659 
660 	KASSERT(rt->rt_ifidx == ifp->if_index);
661 
662 	/*
663 	 * Request the new route so that the entry is not actually
664 	 * deleted.  That will allow the information being reported to
665 	 * be accurate (and consistent with route_output()).
666 	 */
667 	memset(&info, 0, sizeof(info));
668 	info.rti_info[RTAX_DST] = rt_key(rt);
669 	info.rti_info[RTAX_GATEWAY] = rt->rt_gateway;
670 	if (!ISSET(rt->rt_flags, RTF_HOST))
671 		info.rti_info[RTAX_NETMASK] = rt_plen2mask(rt, &sa_mask);
672 	info.rti_info[RTAX_LABEL] = rtlabel_id2sa(rt->rt_labelid, &sa_rl);
673 	info.rti_flags = rt->rt_flags;
674 	info.rti_info[RTAX_IFP] = sdltosa(ifp->if_sadl);
675 	info.rti_info[RTAX_IFA] = rt->rt_ifa->ifa_addr;
676 	error = rtrequest_delete(&info, rt->rt_priority, ifp, &rt, tableid);
677 	rtm_miss(RTM_DELETE, &info, info.rti_flags, rt->rt_priority,
678 	    rt->rt_ifidx, error, tableid);
679 	if (error == 0)
680 		rtfree(rt);
681 	return (error);
682 }
683 
684 static inline int
685 rtequal(struct rtentry *a, struct rtentry *b)
686 {
687 	if (a == b)
688 		return 1;
689 
690 	if (memcmp(rt_key(a), rt_key(b), rt_key(a)->sa_len) == 0 &&
691 	    rt_plen(a) == rt_plen(b))
692 		return 1;
693 	else
694 		return 0;
695 }
696 
697 int
698 rtflushclone1(struct rtentry *rt, void *arg, u_int id)
699 {
700 	struct rtentry *cloningrt = arg;
701 	struct ifnet *ifp;
702 
703 	if (!ISSET(rt->rt_flags, RTF_CLONED))
704 		return 0;
705 
706 	/* Cached route must stay alive as long as their parent are alive. */
707 	if (ISSET(rt->rt_flags, RTF_CACHED) && (rt->rt_parent != cloningrt))
708 		return 0;
709 
710 	if (!rtequal(rt->rt_parent, cloningrt))
711 		return 0;
712 	/*
713 	 * This happens when an interface with a RTF_CLONING route is
714 	 * being detached.  In this case it's safe to bail because all
715 	 * the routes are being purged by rt_ifa_purge().
716 	 */
717 	ifp = if_get(rt->rt_ifidx);
718 	if (ifp == NULL)
719 		return 0;
720 
721 	if_put(ifp);
722 	return EEXIST;
723 }
724 
725 int
726 rtflushclone(struct rtentry *parent, unsigned int rtableid)
727 {
728 	struct rtentry *rt = NULL;
729 	struct ifnet *ifp;
730 	int error;
731 
732 #ifdef DIAGNOSTIC
733 	if (!parent || (parent->rt_flags & RTF_CLONING) == 0)
734 		panic("rtflushclone: called with a non-cloning route");
735 #endif
736 
737 	do {
738 		error = rtable_walk(rtableid, rt_key(parent)->sa_family, &rt,
739 		    rtflushclone1, parent);
740 		if (rt != NULL && error == EEXIST) {
741 			ifp = if_get(rt->rt_ifidx);
742 			if (ifp == NULL) {
743 				error = EAGAIN;
744 			} else {
745 				error = rtdeletemsg(rt, ifp, rtableid);
746 				if (error == 0)
747 					error = EAGAIN;
748 				if_put(ifp);
749 			}
750 		}
751 		rtfree(rt);
752 		rt = NULL;
753 	} while (error == EAGAIN);
754 
755 	return error;
756 
757 }
758 
759 int
760 rtrequest_delete(struct rt_addrinfo *info, u_int8_t prio, struct ifnet *ifp,
761     struct rtentry **ret_nrt, u_int tableid)
762 {
763 	struct rtentry	*rt;
764 	int		 error;
765 
766 	NET_ASSERT_LOCKED();
767 
768 	if (!rtable_exists(tableid))
769 		return (EAFNOSUPPORT);
770 	rt = rtable_lookup(tableid, info->rti_info[RTAX_DST],
771 	    info->rti_info[RTAX_NETMASK], info->rti_info[RTAX_GATEWAY], prio);
772 	if (rt == NULL)
773 		return (ESRCH);
774 
775 	/* Make sure that's the route the caller want to delete. */
776 	if (ifp != NULL && ifp->if_index != rt->rt_ifidx) {
777 		rtfree(rt);
778 		return (ESRCH);
779 	}
780 
781 #ifdef BFD
782 	if (ISSET(rt->rt_flags, RTF_BFD))
783 		bfdclear(rt);
784 #endif
785 
786 	error = rtable_delete(tableid, info->rti_info[RTAX_DST],
787 	    info->rti_info[RTAX_NETMASK], rt);
788 	if (error != 0) {
789 		rtfree(rt);
790 		return (ESRCH);
791 	}
792 
793 	/* Release next hop cache before flushing cloned entries. */
794 	rt_putgwroute(rt);
795 
796 	/* Clean up any cloned children. */
797 	if (ISSET(rt->rt_flags, RTF_CLONING))
798 		rtflushclone(rt, tableid);
799 
800 	rtfree(rt->rt_parent);
801 	rt->rt_parent = NULL;
802 
803 	rt->rt_flags &= ~RTF_UP;
804 
805 	KASSERT(ifp->if_index == rt->rt_ifidx);
806 	ifp->if_rtrequest(ifp, RTM_DELETE, rt);
807 
808 	atomic_inc_int(&rttrash);
809 
810 	if (ret_nrt != NULL)
811 		*ret_nrt = rt;
812 	else
813 		rtfree(rt);
814 
815 	return (0);
816 }
817 
818 int
819 rtrequest(int req, struct rt_addrinfo *info, u_int8_t prio,
820     struct rtentry **ret_nrt, u_int tableid)
821 {
822 	struct ifnet		*ifp;
823 	struct rtentry		*rt, *crt;
824 	struct ifaddr		*ifa;
825 	struct sockaddr		*ndst;
826 	struct sockaddr_rtlabel	*sa_rl, sa_rl2;
827 	struct sockaddr_dl	 sa_dl = { sizeof(sa_dl), AF_LINK };
828 	int			 error;
829 
830 	NET_ASSERT_LOCKED();
831 
832 	if (!rtable_exists(tableid))
833 		return (EAFNOSUPPORT);
834 	if (info->rti_flags & RTF_HOST)
835 		info->rti_info[RTAX_NETMASK] = NULL;
836 	switch (req) {
837 	case RTM_DELETE:
838 		return (EINVAL);
839 
840 	case RTM_RESOLVE:
841 		if (ret_nrt == NULL || (rt = *ret_nrt) == NULL)
842 			return (EINVAL);
843 		if ((rt->rt_flags & RTF_CLONING) == 0)
844 			return (EINVAL);
845 		KASSERT(rt->rt_ifa->ifa_ifp != NULL);
846 		info->rti_ifa = rt->rt_ifa;
847 		info->rti_flags = rt->rt_flags | (RTF_CLONED|RTF_HOST);
848 		info->rti_flags &= ~(RTF_CLONING|RTF_CONNECTED|RTF_STATIC);
849 		info->rti_info[RTAX_GATEWAY] = sdltosa(&sa_dl);
850 		info->rti_info[RTAX_LABEL] =
851 		    rtlabel_id2sa(rt->rt_labelid, &sa_rl2);
852 		/* FALLTHROUGH */
853 
854 	case RTM_ADD:
855 		if (info->rti_ifa == NULL)
856 			return (EINVAL);
857 		ifa = info->rti_ifa;
858 		ifp = ifa->ifa_ifp;
859 		if (prio == 0)
860 			prio = ifp->if_priority + RTP_STATIC;
861 
862 		error = rt_copysa(info->rti_info[RTAX_DST],
863 		    info->rti_info[RTAX_NETMASK], &ndst);
864 		if (error)
865 			return (error);
866 
867 		rt = pool_get(&rtentry_pool, PR_NOWAIT | PR_ZERO);
868 		if (rt == NULL) {
869 			free(ndst, M_RTABLE, ndst->sa_len);
870 			return (ENOBUFS);
871 		}
872 
873 		refcnt_init(&rt->rt_refcnt);
874 		rt->rt_flags = info->rti_flags | RTF_UP;
875 		rt->rt_priority = prio;	/* init routing priority */
876 		LIST_INIT(&rt->rt_timer);
877 
878 		/* Check the link state if the table supports it. */
879 		if (rtable_mpath_capable(tableid, ndst->sa_family) &&
880 		    !ISSET(rt->rt_flags, RTF_LOCAL) &&
881 		    (!LINK_STATE_IS_UP(ifp->if_link_state) ||
882 		    !ISSET(ifp->if_flags, IFF_UP))) {
883 			rt->rt_flags &= ~RTF_UP;
884 			rt->rt_priority |= RTP_DOWN;
885 		}
886 
887 		if (info->rti_info[RTAX_LABEL] != NULL) {
888 			sa_rl = (struct sockaddr_rtlabel *)
889 			    info->rti_info[RTAX_LABEL];
890 			rt->rt_labelid = rtlabel_name2id(sa_rl->sr_label);
891 		}
892 
893 #ifdef MPLS
894 		/* We have to allocate additional space for MPLS infos */
895 		if (info->rti_flags & RTF_MPLS &&
896 		    (info->rti_info[RTAX_SRC] != NULL ||
897 		    info->rti_info[RTAX_DST]->sa_family == AF_MPLS)) {
898 			error = rt_mpls_set(rt, info->rti_info[RTAX_SRC],
899 			    info->rti_mpls);
900 			if (error) {
901 				free(ndst, M_RTABLE, ndst->sa_len);
902 				pool_put(&rtentry_pool, rt);
903 				return (error);
904 			}
905 		} else
906 			rt_mpls_clear(rt);
907 #endif
908 
909 		rt->rt_ifa = ifaref(ifa);
910 		rt->rt_ifidx = ifp->if_index;
911 		/*
912 		 * Copy metrics and a back pointer from the cloned
913 		 * route's parent.
914 		 */
915 		if (ISSET(rt->rt_flags, RTF_CLONED)) {
916 			rtref(*ret_nrt);
917 			rt->rt_parent = *ret_nrt;
918 			rt->rt_rmx = (*ret_nrt)->rt_rmx;
919 		}
920 
921 		/*
922 		 * We must set rt->rt_gateway before adding ``rt'' to
923 		 * the routing table because the radix MPATH code use
924 		 * it to (re)order routes.
925 		 */
926 		if ((error = rt_setgate(rt, info->rti_info[RTAX_GATEWAY],
927 		    tableid))) {
928 			ifafree(ifa);
929 			rtfree(rt->rt_parent);
930 			rt_putgwroute(rt);
931 			free(rt->rt_gateway, M_RTABLE,
932 			    ROUNDUP(rt->rt_gateway->sa_len));
933 			free(ndst, M_RTABLE, ndst->sa_len);
934 			pool_put(&rtentry_pool, rt);
935 			return (error);
936 		}
937 
938 		error = rtable_insert(tableid, ndst,
939 		    info->rti_info[RTAX_NETMASK], info->rti_info[RTAX_GATEWAY],
940 		    rt->rt_priority, rt);
941 		if (error != 0 &&
942 		    (crt = rtable_match(tableid, ndst, NULL)) != NULL) {
943 			/* overwrite cloned route */
944 			if (ISSET(crt->rt_flags, RTF_CLONED) &&
945 			    !ISSET(crt->rt_flags, RTF_CACHED)) {
946 				struct ifnet *cifp;
947 
948 				cifp = if_get(crt->rt_ifidx);
949 				KASSERT(cifp != NULL);
950 				rtdeletemsg(crt, cifp, tableid);
951 				if_put(cifp);
952 
953 				error = rtable_insert(tableid, ndst,
954 				    info->rti_info[RTAX_NETMASK],
955 				    info->rti_info[RTAX_GATEWAY],
956 				    rt->rt_priority, rt);
957 			}
958 			rtfree(crt);
959 		}
960 		if (error != 0) {
961 			ifafree(ifa);
962 			rtfree(rt->rt_parent);
963 			rt_putgwroute(rt);
964 			free(rt->rt_gateway, M_RTABLE,
965 			    ROUNDUP(rt->rt_gateway->sa_len));
966 			free(ndst, M_RTABLE, ndst->sa_len);
967 			pool_put(&rtentry_pool, rt);
968 			return (EEXIST);
969 		}
970 		ifp->if_rtrequest(ifp, req, rt);
971 
972 		if_group_routechange(info->rti_info[RTAX_DST],
973 			info->rti_info[RTAX_NETMASK]);
974 
975 		if (ret_nrt != NULL)
976 			*ret_nrt = rt;
977 		else
978 			rtfree(rt);
979 		break;
980 	}
981 
982 	return (0);
983 }
984 
985 int
986 rt_setgate(struct rtentry *rt, struct sockaddr *gate, u_int rtableid)
987 {
988 	int glen = ROUNDUP(gate->sa_len);
989 	struct sockaddr *sa;
990 
991 	if (rt->rt_gateway == NULL || glen != ROUNDUP(rt->rt_gateway->sa_len)) {
992 		sa = malloc(glen, M_RTABLE, M_NOWAIT);
993 		if (sa == NULL)
994 			return (ENOBUFS);
995 		if (rt->rt_gateway != NULL) {
996 			free(rt->rt_gateway, M_RTABLE,
997 			    ROUNDUP(rt->rt_gateway->sa_len));
998 		}
999 		rt->rt_gateway = sa;
1000 	}
1001 	memmove(rt->rt_gateway, gate, glen);
1002 
1003 	if (ISSET(rt->rt_flags, RTF_GATEWAY))
1004 		return (rt_setgwroute(rt, rtableid));
1005 
1006 	return (0);
1007 }
1008 
1009 /*
1010  * Return the route entry containing the next hop link-layer
1011  * address corresponding to ``rt''.
1012  */
1013 struct rtentry *
1014 rt_getll(struct rtentry *rt)
1015 {
1016 	if (ISSET(rt->rt_flags, RTF_GATEWAY)) {
1017 		KASSERT(rt->rt_gwroute != NULL);
1018 		return (rt->rt_gwroute);
1019 	}
1020 
1021 	return (rt);
1022 }
1023 
1024 void
1025 rt_maskedcopy(struct sockaddr *src, struct sockaddr *dst,
1026     struct sockaddr *netmask)
1027 {
1028 	u_char	*cp1 = (u_char *)src;
1029 	u_char	*cp2 = (u_char *)dst;
1030 	u_char	*cp3 = (u_char *)netmask;
1031 	u_char	*cplim = cp2 + *cp3;
1032 	u_char	*cplim2 = cp2 + *cp1;
1033 
1034 	*cp2++ = *cp1++; *cp2++ = *cp1++; /* copies sa_len & sa_family */
1035 	cp3 += 2;
1036 	if (cplim > cplim2)
1037 		cplim = cplim2;
1038 	while (cp2 < cplim)
1039 		*cp2++ = *cp1++ & *cp3++;
1040 	if (cp2 < cplim2)
1041 		bzero(cp2, cplim2 - cp2);
1042 }
1043 
1044 /*
1045  * allocate new sockaddr structure based on the user supplied src and mask
1046  * that is useable for the routing table.
1047  */
1048 static int
1049 rt_copysa(struct sockaddr *src, struct sockaddr *mask, struct sockaddr **dst)
1050 {
1051 	static const u_char maskarray[] = {
1052 	    0x0, 0x80, 0xc0, 0xe0, 0xf0, 0xf8, 0xfc, 0xfe };
1053 	struct sockaddr *ndst;
1054 	const struct domain *dp;
1055 	u_char *csrc, *cdst;
1056 	int i, plen;
1057 
1058 	for (i = 0; (dp = domains[i]) != NULL; i++) {
1059 		if (dp->dom_rtoffset == 0)
1060 			continue;
1061 		if (src->sa_family == dp->dom_family)
1062 			break;
1063 	}
1064 	if (dp == NULL)
1065 		return (EAFNOSUPPORT);
1066 
1067 	if (src->sa_len < dp->dom_sasize)
1068 		return (EINVAL);
1069 
1070 	plen = rtable_satoplen(src->sa_family, mask);
1071 	if (plen == -1)
1072 		return (EINVAL);
1073 
1074 	ndst = malloc(dp->dom_sasize, M_RTABLE, M_NOWAIT|M_ZERO);
1075 	if (ndst == NULL)
1076 		return (ENOBUFS);
1077 
1078 	ndst->sa_family = src->sa_family;
1079 	ndst->sa_len = dp->dom_sasize;
1080 
1081 	csrc = (u_char *)src + dp->dom_rtoffset;
1082 	cdst = (u_char *)ndst + dp->dom_rtoffset;
1083 
1084 	memcpy(cdst, csrc, plen / 8);
1085 	if (plen % 8 != 0)
1086 		cdst[plen / 8] = csrc[plen / 8] & maskarray[plen % 8];
1087 
1088 	*dst = ndst;
1089 	return (0);
1090 }
1091 
1092 int
1093 rt_ifa_add(struct ifaddr *ifa, int flags, struct sockaddr *dst,
1094     unsigned int rdomain)
1095 {
1096 	struct ifnet		*ifp = ifa->ifa_ifp;
1097 	struct rtentry		*rt;
1098 	struct sockaddr_rtlabel	 sa_rl;
1099 	struct rt_addrinfo	 info;
1100 	uint8_t			 prio = ifp->if_priority + RTP_STATIC;
1101 	int			 error;
1102 
1103 	KASSERT(rdomain == rtable_l2(rdomain));
1104 
1105 	memset(&info, 0, sizeof(info));
1106 	info.rti_ifa = ifa;
1107 	info.rti_flags = flags;
1108 	info.rti_info[RTAX_DST] = dst;
1109 	if (flags & RTF_LLINFO)
1110 		info.rti_info[RTAX_GATEWAY] = sdltosa(ifp->if_sadl);
1111 	else
1112 		info.rti_info[RTAX_GATEWAY] = ifa->ifa_addr;
1113 	info.rti_info[RTAX_LABEL] = rtlabel_id2sa(ifp->if_rtlabelid, &sa_rl);
1114 
1115 #ifdef MPLS
1116 	if ((flags & RTF_MPLS) == RTF_MPLS)
1117 		info.rti_mpls = MPLS_OP_POP;
1118 #endif /* MPLS */
1119 
1120 	if ((flags & RTF_HOST) == 0)
1121 		info.rti_info[RTAX_NETMASK] = ifa->ifa_netmask;
1122 
1123 	if (flags & (RTF_LOCAL|RTF_BROADCAST))
1124 		prio = RTP_LOCAL;
1125 
1126 	if (flags & RTF_CONNECTED)
1127 		prio = ifp->if_priority + RTP_CONNECTED;
1128 
1129 	error = rtrequest(RTM_ADD, &info, prio, &rt, rdomain);
1130 	if (error == 0) {
1131 		/*
1132 		 * A local route is created for every address configured
1133 		 * on an interface, so use this information to notify
1134 		 * userland that a new address has been added.
1135 		 */
1136 		if (flags & RTF_LOCAL)
1137 			rtm_addr(RTM_NEWADDR, ifa);
1138 		rtm_send(rt, RTM_ADD, 0, rdomain);
1139 		rtfree(rt);
1140 	}
1141 	return (error);
1142 }
1143 
1144 int
1145 rt_ifa_del(struct ifaddr *ifa, int flags, struct sockaddr *dst,
1146     unsigned int rdomain)
1147 {
1148 	struct ifnet		*ifp = ifa->ifa_ifp;
1149 	struct rtentry		*rt;
1150 	struct mbuf		*m = NULL;
1151 	struct sockaddr		*deldst;
1152 	struct rt_addrinfo	 info;
1153 	struct sockaddr_rtlabel	 sa_rl;
1154 	uint8_t			 prio = ifp->if_priority + RTP_STATIC;
1155 	int			 error;
1156 
1157 	KASSERT(rdomain == rtable_l2(rdomain));
1158 
1159 	if ((flags & RTF_HOST) == 0 && ifa->ifa_netmask) {
1160 		m = m_get(M_DONTWAIT, MT_SONAME);
1161 		if (m == NULL)
1162 			return (ENOBUFS);
1163 		deldst = mtod(m, struct sockaddr *);
1164 		rt_maskedcopy(dst, deldst, ifa->ifa_netmask);
1165 		dst = deldst;
1166 	}
1167 
1168 	memset(&info, 0, sizeof(info));
1169 	info.rti_ifa = ifa;
1170 	info.rti_flags = flags;
1171 	info.rti_info[RTAX_DST] = dst;
1172 	if ((flags & RTF_LLINFO) == 0)
1173 		info.rti_info[RTAX_GATEWAY] = ifa->ifa_addr;
1174 	info.rti_info[RTAX_LABEL] = rtlabel_id2sa(ifp->if_rtlabelid, &sa_rl);
1175 
1176 	if ((flags & RTF_HOST) == 0)
1177 		info.rti_info[RTAX_NETMASK] = ifa->ifa_netmask;
1178 
1179 	if (flags & (RTF_LOCAL|RTF_BROADCAST))
1180 		prio = RTP_LOCAL;
1181 
1182 	if (flags & RTF_CONNECTED)
1183 		prio = ifp->if_priority + RTP_CONNECTED;
1184 
1185 	rtable_clearsource(rdomain, ifa->ifa_addr);
1186 	error = rtrequest_delete(&info, prio, ifp, &rt, rdomain);
1187 	if (error == 0) {
1188 		rtm_send(rt, RTM_DELETE, 0, rdomain);
1189 		if (flags & RTF_LOCAL)
1190 			rtm_addr(RTM_DELADDR, ifa);
1191 		rtfree(rt);
1192 	}
1193 	m_free(m);
1194 
1195 	return (error);
1196 }
1197 
1198 /*
1199  * Add ifa's address as a local rtentry.
1200  */
1201 int
1202 rt_ifa_addlocal(struct ifaddr *ifa)
1203 {
1204 	struct ifnet *ifp = ifa->ifa_ifp;
1205 	struct rtentry *rt;
1206 	u_int flags = RTF_HOST|RTF_LOCAL;
1207 	int error = 0;
1208 
1209 	/*
1210 	 * If the configured address correspond to the magical "any"
1211 	 * address do not add a local route entry because that might
1212 	 * corrupt the routing tree which uses this value for the
1213 	 * default routes.
1214 	 */
1215 	switch (ifa->ifa_addr->sa_family) {
1216 	case AF_INET:
1217 		if (satosin(ifa->ifa_addr)->sin_addr.s_addr == INADDR_ANY)
1218 			return (0);
1219 		break;
1220 #ifdef INET6
1221 	case AF_INET6:
1222 		if (IN6_ARE_ADDR_EQUAL(&satosin6(ifa->ifa_addr)->sin6_addr,
1223 		    &in6addr_any))
1224 			return (0);
1225 		break;
1226 #endif
1227 	default:
1228 		break;
1229 	}
1230 
1231 	if (!ISSET(ifp->if_flags, (IFF_LOOPBACK|IFF_POINTOPOINT)))
1232 		flags |= RTF_LLINFO;
1233 
1234 	/* If there is no local entry, allocate one. */
1235 	rt = rtalloc(ifa->ifa_addr, 0, ifp->if_rdomain);
1236 	if (rt == NULL || ISSET(rt->rt_flags, flags) != flags) {
1237 		error = rt_ifa_add(ifa, flags | RTF_MPATH, ifa->ifa_addr,
1238 		    ifp->if_rdomain);
1239 	}
1240 	rtfree(rt);
1241 
1242 	return (error);
1243 }
1244 
1245 /*
1246  * Remove local rtentry of ifa's address if it exists.
1247  */
1248 int
1249 rt_ifa_dellocal(struct ifaddr *ifa)
1250 {
1251 	struct ifnet *ifp = ifa->ifa_ifp;
1252 	struct rtentry *rt;
1253 	u_int flags = RTF_HOST|RTF_LOCAL;
1254 	int error = 0;
1255 
1256 	/*
1257 	 * We do not add local routes for such address, so do not bother
1258 	 * removing them.
1259 	 */
1260 	switch (ifa->ifa_addr->sa_family) {
1261 	case AF_INET:
1262 		if (satosin(ifa->ifa_addr)->sin_addr.s_addr == INADDR_ANY)
1263 			return (0);
1264 		break;
1265 #ifdef INET6
1266 	case AF_INET6:
1267 		if (IN6_ARE_ADDR_EQUAL(&satosin6(ifa->ifa_addr)->sin6_addr,
1268 		    &in6addr_any))
1269 			return (0);
1270 		break;
1271 #endif
1272 	default:
1273 		break;
1274 	}
1275 
1276 	if (!ISSET(ifp->if_flags, (IFF_LOOPBACK|IFF_POINTOPOINT)))
1277 		flags |= RTF_LLINFO;
1278 
1279 	/*
1280 	 * Before deleting, check if a corresponding local host
1281 	 * route surely exists.  With this check, we can avoid to
1282 	 * delete an interface direct route whose destination is same
1283 	 * as the address being removed.  This can happen when removing
1284 	 * a subnet-router anycast address on an interface attached
1285 	 * to a shared medium.
1286 	 */
1287 	rt = rtalloc(ifa->ifa_addr, 0, ifp->if_rdomain);
1288 	if (rt != NULL && ISSET(rt->rt_flags, flags) == flags) {
1289 		error = rt_ifa_del(ifa, flags, ifa->ifa_addr,
1290 		    ifp->if_rdomain);
1291 	}
1292 	rtfree(rt);
1293 
1294 	return (error);
1295 }
1296 
1297 /*
1298  * Remove all addresses attached to ``ifa''.
1299  */
1300 void
1301 rt_ifa_purge(struct ifaddr *ifa)
1302 {
1303 	struct ifnet		*ifp = ifa->ifa_ifp;
1304 	struct rtentry		*rt = NULL;
1305 	unsigned int		 rtableid;
1306 	int			 error, af = ifa->ifa_addr->sa_family;
1307 
1308 	KASSERT(ifp != NULL);
1309 
1310 	for (rtableid = 0; rtableid < rtmap_limit; rtableid++) {
1311 		/* skip rtables that are not in the rdomain of the ifp */
1312 		if (rtable_l2(rtableid) != ifp->if_rdomain)
1313 			continue;
1314 
1315 		do {
1316 			error = rtable_walk(rtableid, af, &rt,
1317 			    rt_ifa_purge_walker, ifa);
1318 			if (rt != NULL && error == EEXIST) {
1319 				error = rtdeletemsg(rt, ifp, rtableid);
1320 				if (error == 0)
1321 					error = EAGAIN;
1322 			}
1323 			rtfree(rt);
1324 			rt = NULL;
1325 		} while (error == EAGAIN);
1326 
1327 		if (error == EAFNOSUPPORT)
1328 			error = 0;
1329 
1330 		if (error)
1331 			break;
1332 	}
1333 }
1334 
1335 int
1336 rt_ifa_purge_walker(struct rtentry *rt, void *vifa, unsigned int rtableid)
1337 {
1338 	struct ifaddr		*ifa = vifa;
1339 
1340 	if (rt->rt_ifa == ifa)
1341 		return EEXIST;
1342 
1343 	return 0;
1344 }
1345 
1346 /*
1347  * Route timer routines.  These routes allow functions to be called
1348  * for various routes at any time.  This is useful in supporting
1349  * path MTU discovery and redirect route deletion.
1350  *
1351  * This is similar to some BSDI internal functions, but it provides
1352  * for multiple queues for efficiency's sake...
1353  */
1354 
1355 struct mutex			rttimer_mtx;
1356 
1357 struct rttimer {
1358 	TAILQ_ENTRY(rttimer)	rtt_next;	/* [T] entry on timer queue */
1359 	LIST_ENTRY(rttimer)	rtt_link;	/* [T] timers per rtentry */
1360 	struct timeout		rtt_timeout;	/* [I] timeout for this entry */
1361 	struct rttimer_queue	*rtt_queue;	/* [I] back pointer to queue */
1362 	struct rtentry		*rtt_rt;	/* [T] back pointer to route */
1363 	time_t			rtt_expire;	/* [I] rt expire time */
1364 	u_int			rtt_tableid;	/* [I] rtable id of rtt_rt */
1365 };
1366 
1367 #define RTTIMER_CALLOUT(r)	{					\
1368 	if (r->rtt_queue->rtq_func != NULL) {				\
1369 		(*r->rtt_queue->rtq_func)(r->rtt_rt, r->rtt_tableid);	\
1370 	} else {							\
1371 		struct ifnet *ifp;					\
1372 									\
1373 		ifp = if_get(r->rtt_rt->rt_ifidx);			\
1374 		if (ifp != NULL &&					\
1375 		    (r->rtt_rt->rt_flags & (RTF_DYNAMIC|RTF_HOST)) ==	\
1376 		    (RTF_DYNAMIC|RTF_HOST))				\
1377 			rtdeletemsg(r->rtt_rt, ifp, r->rtt_tableid);	\
1378 		if_put(ifp);						\
1379 	}								\
1380 }
1381 
1382 /*
1383  * Some subtle order problems with domain initialization mean that
1384  * we cannot count on this being run from rt_init before various
1385  * protocol initializations are done.  Therefore, we make sure
1386  * that this is run when the first queue is added...
1387  */
1388 
1389 void
1390 rt_timer_init(void)
1391 {
1392 	pool_init(&rttimer_pool, sizeof(struct rttimer), 0,
1393 	    IPL_MPFLOOR, 0, "rttmr", NULL);
1394 	mtx_init(&rttimer_mtx, IPL_MPFLOOR);
1395 }
1396 
1397 void
1398 rt_timer_queue_init(struct rttimer_queue *rtq, int timeout,
1399     void (*func)(struct rtentry *, u_int))
1400 {
1401 	rtq->rtq_timeout = timeout;
1402 	rtq->rtq_count = 0;
1403 	rtq->rtq_func = func;
1404 	TAILQ_INIT(&rtq->rtq_head);
1405 }
1406 
1407 void
1408 rt_timer_queue_change(struct rttimer_queue *rtq, int timeout)
1409 {
1410 	mtx_enter(&rttimer_mtx);
1411 	rtq->rtq_timeout = timeout;
1412 	mtx_leave(&rttimer_mtx);
1413 }
1414 
1415 void
1416 rt_timer_queue_flush(struct rttimer_queue *rtq)
1417 {
1418 	struct rttimer		*r;
1419 	TAILQ_HEAD(, rttimer)	 rttlist;
1420 
1421 	NET_ASSERT_LOCKED();
1422 
1423 	TAILQ_INIT(&rttlist);
1424 	mtx_enter(&rttimer_mtx);
1425 	while ((r = TAILQ_FIRST(&rtq->rtq_head)) != NULL) {
1426 		LIST_REMOVE(r, rtt_link);
1427 		TAILQ_REMOVE(&rtq->rtq_head, r, rtt_next);
1428 		TAILQ_INSERT_TAIL(&rttlist, r, rtt_next);
1429 		KASSERT(rtq->rtq_count > 0);
1430 		rtq->rtq_count--;
1431 	}
1432 	mtx_leave(&rttimer_mtx);
1433 
1434 	while ((r = TAILQ_FIRST(&rttlist)) != NULL) {
1435 		TAILQ_REMOVE(&rttlist, r, rtt_next);
1436 		RTTIMER_CALLOUT(r);
1437 		pool_put(&rttimer_pool, r);
1438 	}
1439 }
1440 
1441 unsigned long
1442 rt_timer_queue_count(struct rttimer_queue *rtq)
1443 {
1444 	return (rtq->rtq_count);
1445 }
1446 
1447 static inline struct rttimer *
1448 rt_timer_unlink(struct rttimer *r)
1449 {
1450 	MUTEX_ASSERT_LOCKED(&rttimer_mtx);
1451 
1452 	LIST_REMOVE(r, rtt_link);
1453 	r->rtt_rt = NULL;
1454 
1455 	if (timeout_del(&r->rtt_timeout) == 0) {
1456 		/* timeout fired, so rt_timer_timer will do the cleanup */
1457 		return NULL;
1458 	}
1459 
1460 	TAILQ_REMOVE(&r->rtt_queue->rtq_head, r, rtt_next);
1461 	KASSERT(r->rtt_queue->rtq_count > 0);
1462 	r->rtt_queue->rtq_count--;
1463 	return r;
1464 }
1465 
1466 void
1467 rt_timer_remove_all(struct rtentry *rt)
1468 {
1469 	struct rttimer		*r;
1470 	TAILQ_HEAD(, rttimer)	 rttlist;
1471 
1472 	TAILQ_INIT(&rttlist);
1473 	mtx_enter(&rttimer_mtx);
1474 	while ((r = LIST_FIRST(&rt->rt_timer)) != NULL) {
1475 		r = rt_timer_unlink(r);
1476 		if (r != NULL)
1477 			TAILQ_INSERT_TAIL(&rttlist, r, rtt_next);
1478 	}
1479 	mtx_leave(&rttimer_mtx);
1480 
1481 	while ((r = TAILQ_FIRST(&rttlist)) != NULL) {
1482 		TAILQ_REMOVE(&rttlist, r, rtt_next);
1483 		pool_put(&rttimer_pool, r);
1484 	}
1485 }
1486 
1487 time_t
1488 rt_timer_get_expire(const struct rtentry *rt)
1489 {
1490 	const struct rttimer	*r;
1491 	time_t			 expire = 0;
1492 
1493 	mtx_enter(&rttimer_mtx);
1494 	LIST_FOREACH(r, &rt->rt_timer, rtt_link) {
1495 		if (expire == 0 || expire > r->rtt_expire)
1496 			expire = r->rtt_expire;
1497 	}
1498 	mtx_leave(&rttimer_mtx);
1499 
1500 	return expire;
1501 }
1502 
1503 int
1504 rt_timer_add(struct rtentry *rt, struct rttimer_queue *queue, u_int rtableid)
1505 {
1506 	struct rttimer	*r, *rnew;
1507 
1508 	rnew = pool_get(&rttimer_pool, PR_NOWAIT | PR_ZERO);
1509 	if (rnew == NULL)
1510 		return (ENOBUFS);
1511 
1512 	rnew->rtt_rt = rt;
1513 	rnew->rtt_queue = queue;
1514 	rnew->rtt_tableid = rtableid;
1515 	rnew->rtt_expire = getuptime() + queue->rtq_timeout;
1516 	timeout_set_proc(&rnew->rtt_timeout, rt_timer_timer, rnew);
1517 
1518 	mtx_enter(&rttimer_mtx);
1519 	/*
1520 	 * If there's already a timer with this action, destroy it before
1521 	 * we add a new one.
1522 	 */
1523 	LIST_FOREACH(r, &rt->rt_timer, rtt_link) {
1524 		if (r->rtt_queue == queue) {
1525 			r = rt_timer_unlink(r);
1526 			break;  /* only one per list, so we can quit... */
1527 		}
1528 	}
1529 
1530 	LIST_INSERT_HEAD(&rt->rt_timer, rnew, rtt_link);
1531 	TAILQ_INSERT_TAIL(&queue->rtq_head, rnew, rtt_next);
1532 	timeout_add_sec(&rnew->rtt_timeout, queue->rtq_timeout);
1533 	rnew->rtt_queue->rtq_count++;
1534 	mtx_leave(&rttimer_mtx);
1535 
1536 	if (r != NULL)
1537 		pool_put(&rttimer_pool, r);
1538 
1539 	return (0);
1540 }
1541 
1542 void
1543 rt_timer_timer(void *arg)
1544 {
1545 	struct rttimer		*r = arg;
1546 	struct rttimer_queue	*rtq = r->rtt_queue;
1547 
1548 	NET_LOCK();
1549 	mtx_enter(&rttimer_mtx);
1550 
1551 	if (r->rtt_rt != NULL)
1552 		LIST_REMOVE(r, rtt_link);
1553 	TAILQ_REMOVE(&rtq->rtq_head, r, rtt_next);
1554 	KASSERT(rtq->rtq_count > 0);
1555 	rtq->rtq_count--;
1556 
1557 	mtx_leave(&rttimer_mtx);
1558 
1559 	if (r->rtt_rt != NULL)
1560 		RTTIMER_CALLOUT(r);
1561 	NET_UNLOCK();
1562 
1563 	pool_put(&rttimer_pool, r);
1564 }
1565 
1566 #ifdef MPLS
1567 int
1568 rt_mpls_set(struct rtentry *rt, struct sockaddr *src, uint8_t op)
1569 {
1570 	struct sockaddr_mpls	*psa_mpls = (struct sockaddr_mpls *)src;
1571 	struct rt_mpls		*rt_mpls;
1572 
1573 	if (psa_mpls == NULL && op != MPLS_OP_POP)
1574 		return (EOPNOTSUPP);
1575 	if (psa_mpls != NULL && psa_mpls->smpls_len != sizeof(*psa_mpls))
1576 		return (EINVAL);
1577 	if (psa_mpls != NULL && psa_mpls->smpls_family != AF_MPLS)
1578 		return (EAFNOSUPPORT);
1579 
1580 	rt->rt_llinfo = malloc(sizeof(struct rt_mpls), M_TEMP, M_NOWAIT|M_ZERO);
1581 	if (rt->rt_llinfo == NULL)
1582 		return (ENOMEM);
1583 
1584 	rt_mpls = (struct rt_mpls *)rt->rt_llinfo;
1585 	if (psa_mpls != NULL)
1586 		rt_mpls->mpls_label = psa_mpls->smpls_label;
1587 	rt_mpls->mpls_operation = op;
1588 	/* XXX: set experimental bits */
1589 	rt->rt_flags |= RTF_MPLS;
1590 
1591 	return (0);
1592 }
1593 
1594 void
1595 rt_mpls_clear(struct rtentry *rt)
1596 {
1597 	if (rt->rt_llinfo != NULL && rt->rt_flags & RTF_MPLS) {
1598 		free(rt->rt_llinfo, M_TEMP, sizeof(struct rt_mpls));
1599 		rt->rt_llinfo = NULL;
1600 	}
1601 	rt->rt_flags &= ~RTF_MPLS;
1602 }
1603 #endif
1604 
1605 u_int16_t
1606 rtlabel_name2id(char *name)
1607 {
1608 	struct rt_label		*label, *p;
1609 	u_int16_t		 new_id = 1;
1610 
1611 	if (!name[0])
1612 		return (0);
1613 
1614 	TAILQ_FOREACH(label, &rt_labels, rtl_entry)
1615 		if (strcmp(name, label->rtl_name) == 0) {
1616 			label->rtl_ref++;
1617 			return (label->rtl_id);
1618 		}
1619 
1620 	/*
1621 	 * to avoid fragmentation, we do a linear search from the beginning
1622 	 * and take the first free slot we find. if there is none or the list
1623 	 * is empty, append a new entry at the end.
1624 	 */
1625 	TAILQ_FOREACH(p, &rt_labels, rtl_entry) {
1626 		if (p->rtl_id != new_id)
1627 			break;
1628 		new_id = p->rtl_id + 1;
1629 	}
1630 	if (new_id > LABELID_MAX)
1631 		return (0);
1632 
1633 	label = malloc(sizeof(*label), M_RTABLE, M_NOWAIT|M_ZERO);
1634 	if (label == NULL)
1635 		return (0);
1636 	strlcpy(label->rtl_name, name, sizeof(label->rtl_name));
1637 	label->rtl_id = new_id;
1638 	label->rtl_ref++;
1639 
1640 	if (p != NULL)	/* insert new entry before p */
1641 		TAILQ_INSERT_BEFORE(p, label, rtl_entry);
1642 	else		/* either list empty or no free slot in between */
1643 		TAILQ_INSERT_TAIL(&rt_labels, label, rtl_entry);
1644 
1645 	return (label->rtl_id);
1646 }
1647 
1648 const char *
1649 rtlabel_id2name(u_int16_t id)
1650 {
1651 	struct rt_label	*label;
1652 
1653 	TAILQ_FOREACH(label, &rt_labels, rtl_entry)
1654 		if (label->rtl_id == id)
1655 			return (label->rtl_name);
1656 
1657 	return (NULL);
1658 }
1659 
1660 struct sockaddr *
1661 rtlabel_id2sa(u_int16_t labelid, struct sockaddr_rtlabel *sa_rl)
1662 {
1663 	const char	*label;
1664 
1665 	if (labelid == 0 || (label = rtlabel_id2name(labelid)) == NULL)
1666 		return (NULL);
1667 
1668 	bzero(sa_rl, sizeof(*sa_rl));
1669 	sa_rl->sr_len = sizeof(*sa_rl);
1670 	sa_rl->sr_family = AF_UNSPEC;
1671 	strlcpy(sa_rl->sr_label, label, sizeof(sa_rl->sr_label));
1672 
1673 	return ((struct sockaddr *)sa_rl);
1674 }
1675 
1676 void
1677 rtlabel_unref(u_int16_t id)
1678 {
1679 	struct rt_label	*p, *next;
1680 
1681 	if (id == 0)
1682 		return;
1683 
1684 	TAILQ_FOREACH_SAFE(p, &rt_labels, rtl_entry, next) {
1685 		if (id == p->rtl_id) {
1686 			if (--p->rtl_ref == 0) {
1687 				TAILQ_REMOVE(&rt_labels, p, rtl_entry);
1688 				free(p, M_RTABLE, sizeof(*p));
1689 			}
1690 			break;
1691 		}
1692 	}
1693 }
1694 
1695 int
1696 rt_if_track(struct ifnet *ifp)
1697 {
1698 	unsigned int rtableid;
1699 	struct rtentry *rt = NULL;
1700 	int i, error = 0;
1701 
1702 	for (rtableid = 0; rtableid < rtmap_limit; rtableid++) {
1703 		/* skip rtables that are not in the rdomain of the ifp */
1704 		if (rtable_l2(rtableid) != ifp->if_rdomain)
1705 			continue;
1706 		for (i = 1; i <= AF_MAX; i++) {
1707 			if (!rtable_mpath_capable(rtableid, i))
1708 				continue;
1709 
1710 			do {
1711 				error = rtable_walk(rtableid, i, &rt,
1712 				    rt_if_linkstate_change, ifp);
1713 				if (rt != NULL && error == EEXIST) {
1714 					error = rtdeletemsg(rt, ifp, rtableid);
1715 					if (error == 0)
1716 						error = EAGAIN;
1717 				}
1718 				rtfree(rt);
1719 				rt = NULL;
1720 			} while (error == EAGAIN);
1721 
1722 			if (error == EAFNOSUPPORT)
1723 				error = 0;
1724 
1725 			if (error)
1726 				break;
1727 		}
1728 	}
1729 
1730 	return (error);
1731 }
1732 
1733 int
1734 rt_if_linkstate_change(struct rtentry *rt, void *arg, u_int id)
1735 {
1736 	struct ifnet *ifp = arg;
1737 	struct sockaddr_in6 sa_mask;
1738 	int error;
1739 
1740 	if (rt->rt_ifidx != ifp->if_index)
1741 		return (0);
1742 
1743 	/* Local routes are always usable. */
1744 	if (rt->rt_flags & RTF_LOCAL) {
1745 		rt->rt_flags |= RTF_UP;
1746 		return (0);
1747 	}
1748 
1749 	if (LINK_STATE_IS_UP(ifp->if_link_state) && ifp->if_flags & IFF_UP) {
1750 		if (ISSET(rt->rt_flags, RTF_UP))
1751 			return (0);
1752 
1753 		/* bring route up */
1754 		rt->rt_flags |= RTF_UP;
1755 		error = rtable_mpath_reprio(id, rt_key(rt), rt_plen(rt),
1756 		    rt->rt_priority & RTP_MASK, rt);
1757 	} else {
1758 		/*
1759 		 * Remove redirected and cloned routes (mainly ARP)
1760 		 * from down interfaces so we have a chance to get
1761 		 * new routes from a better source.
1762 		 */
1763 		if (ISSET(rt->rt_flags, RTF_CLONED|RTF_DYNAMIC) &&
1764 		    !ISSET(rt->rt_flags, RTF_CACHED|RTF_BFD)) {
1765 			return (EEXIST);
1766 		}
1767 
1768 		if (!ISSET(rt->rt_flags, RTF_UP))
1769 			return (0);
1770 
1771 		/* take route down */
1772 		rt->rt_flags &= ~RTF_UP;
1773 		error = rtable_mpath_reprio(id, rt_key(rt), rt_plen(rt),
1774 		    rt->rt_priority | RTP_DOWN, rt);
1775 	}
1776 	if_group_routechange(rt_key(rt), rt_plen2mask(rt, &sa_mask));
1777 
1778 	return (error);
1779 }
1780 
1781 struct sockaddr *
1782 rt_plentosa(sa_family_t af, int plen, struct sockaddr_in6 *sa_mask)
1783 {
1784 	struct sockaddr_in	*sin = (struct sockaddr_in *)sa_mask;
1785 #ifdef INET6
1786 	struct sockaddr_in6	*sin6 = (struct sockaddr_in6 *)sa_mask;
1787 #endif
1788 
1789 	KASSERT(plen >= 0 || plen == -1);
1790 
1791 	if (plen == -1)
1792 		return (NULL);
1793 
1794 	memset(sa_mask, 0, sizeof(*sa_mask));
1795 
1796 	switch (af) {
1797 	case AF_INET:
1798 		sin->sin_family = AF_INET;
1799 		sin->sin_len = sizeof(struct sockaddr_in);
1800 		in_prefixlen2mask(&sin->sin_addr, plen);
1801 		break;
1802 #ifdef INET6
1803 	case AF_INET6:
1804 		sin6->sin6_family = AF_INET6;
1805 		sin6->sin6_len = sizeof(struct sockaddr_in6);
1806 		in6_prefixlen2mask(&sin6->sin6_addr, plen);
1807 		break;
1808 #endif /* INET6 */
1809 	default:
1810 		return (NULL);
1811 	}
1812 
1813 	return ((struct sockaddr *)sa_mask);
1814 }
1815 
1816 struct sockaddr *
1817 rt_plen2mask(struct rtentry *rt, struct sockaddr_in6 *sa_mask)
1818 {
1819 	return (rt_plentosa(rt_key(rt)->sa_family, rt_plen(rt), sa_mask));
1820 }
1821 
1822 #ifdef DDB
1823 #include <machine/db_machdep.h>
1824 #include <ddb/db_output.h>
1825 
1826 void	db_print_sa(struct sockaddr *);
1827 void	db_print_ifa(struct ifaddr *);
1828 
1829 void
1830 db_print_sa(struct sockaddr *sa)
1831 {
1832 	int len;
1833 	u_char *p;
1834 
1835 	if (sa == NULL) {
1836 		db_printf("[NULL]");
1837 		return;
1838 	}
1839 
1840 	p = (u_char *)sa;
1841 	len = sa->sa_len;
1842 	db_printf("[");
1843 	while (len > 0) {
1844 		db_printf("%d", *p);
1845 		p++;
1846 		len--;
1847 		if (len)
1848 			db_printf(",");
1849 	}
1850 	db_printf("]\n");
1851 }
1852 
1853 void
1854 db_print_ifa(struct ifaddr *ifa)
1855 {
1856 	if (ifa == NULL)
1857 		return;
1858 	db_printf("  ifa_addr=");
1859 	db_print_sa(ifa->ifa_addr);
1860 	db_printf("  ifa_dsta=");
1861 	db_print_sa(ifa->ifa_dstaddr);
1862 	db_printf("  ifa_mask=");
1863 	db_print_sa(ifa->ifa_netmask);
1864 	db_printf("  flags=0x%x, refcnt=%u, metric=%d\n",
1865 	    ifa->ifa_flags, ifa->ifa_refcnt.r_refs, ifa->ifa_metric);
1866 }
1867 
1868 /*
1869  * Function to pass to rtable_walk().
1870  * Return non-zero error to abort walk.
1871  */
1872 int
1873 db_show_rtentry(struct rtentry *rt, void *w, unsigned int id)
1874 {
1875 	db_printf("rtentry=%p", rt);
1876 
1877 	db_printf(" flags=0x%x refcnt=%u use=%llu expire=%lld\n",
1878 	    rt->rt_flags, rt->rt_refcnt.r_refs, rt->rt_use, rt->rt_expire);
1879 
1880 	db_printf(" key="); db_print_sa(rt_key(rt));
1881 	db_printf(" plen=%d", rt_plen(rt));
1882 	db_printf(" gw="); db_print_sa(rt->rt_gateway);
1883 	db_printf(" ifidx=%u ", rt->rt_ifidx);
1884 	db_printf(" ifa=%p\n", rt->rt_ifa);
1885 	db_print_ifa(rt->rt_ifa);
1886 
1887 	db_printf(" gwroute=%p llinfo=%p priority=%d\n",
1888 	    rt->rt_gwroute, rt->rt_llinfo, rt->rt_priority);
1889 	return (0);
1890 }
1891 
1892 /*
1893  * Function to print all the route trees.
1894  */
1895 int
1896 db_show_rtable(int af, unsigned int rtableid)
1897 {
1898 	db_printf("Route tree for af %d, rtableid %u\n", af, rtableid);
1899 	rtable_walk(rtableid, af, NULL, db_show_rtentry, NULL);
1900 	return (0);
1901 }
1902 #endif /* DDB */
1903