xref: /openbsd-src/sys/net/route.c (revision c1a45aed656e7d5627c30c92421893a76f370ccb)
1 /*	$OpenBSD: route.c,v 1.406 2022/04/20 17:58:22 bluhm Exp $	*/
2 /*	$NetBSD: route.c,v 1.14 1996/02/13 22:00:46 christos Exp $	*/
3 
4 /*
5  * Copyright (C) 1995, 1996, 1997, and 1998 WIDE Project.
6  * All rights reserved.
7  *
8  * Redistribution and use in source and binary forms, with or without
9  * modification, are permitted provided that the following conditions
10  * are met:
11  * 1. Redistributions of source code must retain the above copyright
12  *    notice, this list of conditions and the following disclaimer.
13  * 2. Redistributions in binary form must reproduce the above copyright
14  *    notice, this list of conditions and the following disclaimer in the
15  *    documentation and/or other materials provided with the distribution.
16  * 3. Neither the name of the project nor the names of its contributors
17  *    may be used to endorse or promote products derived from this software
18  *    without specific prior written permission.
19  *
20  * THIS SOFTWARE IS PROVIDED BY THE PROJECT AND CONTRIBUTORS ``AS IS'' AND
21  * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
22  * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
23  * ARE DISCLAIMED.  IN NO EVENT SHALL THE PROJECT OR CONTRIBUTORS BE LIABLE
24  * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
25  * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
26  * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
27  * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
28  * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
29  * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
30  * SUCH DAMAGE.
31  */
32 
33 /*
34  * Copyright (c) 1980, 1986, 1991, 1993
35  *	The Regents of the University of California.  All rights reserved.
36  *
37  * Redistribution and use in source and binary forms, with or without
38  * modification, are permitted provided that the following conditions
39  * are met:
40  * 1. Redistributions of source code must retain the above copyright
41  *    notice, this list of conditions and the following disclaimer.
42  * 2. Redistributions in binary form must reproduce the above copyright
43  *    notice, this list of conditions and the following disclaimer in the
44  *    documentation and/or other materials provided with the distribution.
45  * 3. Neither the name of the University nor the names of its contributors
46  *    may be used to endorse or promote products derived from this software
47  *    without specific prior written permission.
48  *
49  * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND
50  * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
51  * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
52  * ARE DISCLAIMED.  IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE
53  * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
54  * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
55  * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
56  * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
57  * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
58  * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
59  * SUCH DAMAGE.
60  *
61  *	@(#)route.c	8.2 (Berkeley) 11/15/93
62  */
63 
64 /*
65  *	@(#)COPYRIGHT	1.1 (NRL) 17 January 1995
66  *
67  * NRL grants permission for redistribution and use in source and binary
68  * forms, with or without modification, of the software and documentation
69  * created at NRL provided that the following conditions are met:
70  *
71  * 1. Redistributions of source code must retain the above copyright
72  *    notice, this list of conditions and the following disclaimer.
73  * 2. Redistributions in binary form must reproduce the above copyright
74  *    notice, this list of conditions and the following disclaimer in the
75  *    documentation and/or other materials provided with the distribution.
76  * 3. All advertising materials mentioning features or use of this software
77  *    must display the following acknowledgements:
78  *	This product includes software developed by the University of
79  *	California, Berkeley and its contributors.
80  *	This product includes software developed at the Information
81  *	Technology Division, US Naval Research Laboratory.
82  * 4. Neither the name of the NRL nor the names of its contributors
83  *    may be used to endorse or promote products derived from this software
84  *    without specific prior written permission.
85  *
86  * THE SOFTWARE PROVIDED BY NRL IS PROVIDED BY NRL AND CONTRIBUTORS ``AS
87  * IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED
88  * TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A
89  * PARTICULAR PURPOSE ARE DISCLAIMED.  IN NO EVENT SHALL NRL OR
90  * CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
91  * EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
92  * PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
93  * PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF
94  * LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING
95  * NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
96  * SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
97  *
98  * The views and conclusions contained in the software and documentation
99  * are those of the authors and should not be interpreted as representing
100  * official policies, either expressed or implied, of the US Naval
101  * Research Laboratory (NRL).
102  */
103 
104 #include <sys/param.h>
105 #include <sys/systm.h>
106 #include <sys/mbuf.h>
107 #include <sys/socket.h>
108 #include <sys/socketvar.h>
109 #include <sys/timeout.h>
110 #include <sys/domain.h>
111 #include <sys/ioctl.h>
112 #include <sys/kernel.h>
113 #include <sys/queue.h>
114 #include <sys/pool.h>
115 #include <sys/atomic.h>
116 
117 #include <net/if.h>
118 #include <net/if_var.h>
119 #include <net/if_dl.h>
120 #include <net/route.h>
121 
122 #include <netinet/in.h>
123 #include <netinet/ip_var.h>
124 #include <netinet/in_var.h>
125 
126 #ifdef INET6
127 #include <netinet/ip6.h>
128 #include <netinet6/ip6_var.h>
129 #include <netinet6/in6_var.h>
130 #endif
131 
132 #ifdef MPLS
133 #include <netmpls/mpls.h>
134 #endif
135 
136 #ifdef BFD
137 #include <net/bfd.h>
138 #endif
139 
140 #define ROUNDUP(a) (a>0 ? (1 + (((a) - 1) | (sizeof(long) - 1))) : sizeof(long))
141 
142 /* Give some jitter to hash, to avoid synchronization between routers. */
143 static uint32_t		rt_hashjitter;
144 
145 extern unsigned int	rtmap_limit;
146 
147 struct cpumem *		rtcounters;
148 int			rttrash;	/* routes not in table but not freed */
149 int			ifatrash;	/* ifas not in ifp list but not free */
150 
151 struct pool	rtentry_pool;		/* pool for rtentry structures */
152 struct pool	rttimer_pool;		/* pool for rttimer structures */
153 struct pool	rttimer_queue_pool;	/* pool for rttimer_queue structures */
154 
155 int	rt_setgwroute(struct rtentry *, u_int);
156 void	rt_putgwroute(struct rtentry *);
157 int	rtflushclone1(struct rtentry *, void *, u_int);
158 int	rtflushclone(struct rtentry *, unsigned int);
159 int	rt_ifa_purge_walker(struct rtentry *, void *, unsigned int);
160 struct rtentry *rt_match(struct sockaddr *, uint32_t *, int, unsigned int);
161 int	rt_clone(struct rtentry **, struct sockaddr *, unsigned int);
162 struct sockaddr *rt_plentosa(sa_family_t, int, struct sockaddr_in6 *);
163 static int rt_copysa(struct sockaddr *, struct sockaddr *, struct sockaddr **);
164 
165 #ifdef DDB
166 void	db_print_sa(struct sockaddr *);
167 void	db_print_ifa(struct ifaddr *);
168 int	db_show_rtentry(struct rtentry *, void *, unsigned int);
169 #endif
170 
171 #define	LABELID_MAX	50000
172 
173 struct rt_label {
174 	TAILQ_ENTRY(rt_label)	rtl_entry;
175 	char			rtl_name[RTLABEL_LEN];
176 	u_int16_t		rtl_id;
177 	int			rtl_ref;
178 };
179 
180 TAILQ_HEAD(rt_labels, rt_label)	rt_labels = TAILQ_HEAD_INITIALIZER(rt_labels);
181 
182 void
183 route_init(void)
184 {
185 	rtcounters = counters_alloc(rts_ncounters);
186 
187 	pool_init(&rtentry_pool, sizeof(struct rtentry), 0, IPL_MPFLOOR, 0,
188 	    "rtentry", NULL);
189 
190 	while (rt_hashjitter == 0)
191 		rt_hashjitter = arc4random();
192 
193 #ifdef BFD
194 	bfdinit();
195 #endif
196 }
197 
198 /*
199  * Returns 1 if the (cached) ``rt'' entry is still valid, 0 otherwise.
200  */
201 int
202 rtisvalid(struct rtentry *rt)
203 {
204 	if (rt == NULL)
205 		return (0);
206 
207 	if (!ISSET(rt->rt_flags, RTF_UP))
208 		return (0);
209 
210 	if (ISSET(rt->rt_flags, RTF_GATEWAY)) {
211 		KASSERT(rt->rt_gwroute != NULL);
212 		KASSERT(!ISSET(rt->rt_gwroute->rt_flags, RTF_GATEWAY));
213 		if (!ISSET(rt->rt_gwroute->rt_flags, RTF_UP))
214 			return (0);
215 	}
216 
217 	return (1);
218 }
219 
220 /*
221  * Do the actual lookup for rtalloc(9), do not use directly!
222  *
223  * Return the best matching entry for the destination ``dst''.
224  *
225  * "RT_RESOLVE" means that a corresponding L2 entry should
226  * be added to the routing table and resolved (via ARP or
227  * NDP), if it does not exist.
228  */
229 struct rtentry *
230 rt_match(struct sockaddr *dst, uint32_t *src, int flags, unsigned int tableid)
231 {
232 	struct rtentry		*rt = NULL;
233 
234 	rt = rtable_match(tableid, dst, src);
235 	if (rt == NULL) {
236 		rtstat_inc(rts_unreach);
237 		return (NULL);
238 	}
239 
240 	if (ISSET(rt->rt_flags, RTF_CLONING) && ISSET(flags, RT_RESOLVE))
241 		rt_clone(&rt, dst, tableid);
242 
243 	rt->rt_use++;
244 	return (rt);
245 }
246 
247 int
248 rt_clone(struct rtentry **rtp, struct sockaddr *dst, unsigned int rtableid)
249 {
250 	struct rt_addrinfo	 info;
251 	struct rtentry		*rt = *rtp;
252 	int			 error = 0;
253 
254 	memset(&info, 0, sizeof(info));
255 	info.rti_info[RTAX_DST] = dst;
256 
257 	/*
258 	 * The priority of cloned route should be different
259 	 * to avoid conflict with /32 cloning routes.
260 	 *
261 	 * It should also be higher to let the ARP layer find
262 	 * cloned routes instead of the cloning one.
263 	 */
264 	KERNEL_LOCK();
265 	error = rtrequest(RTM_RESOLVE, &info, rt->rt_priority - 1, &rt,
266 	    rtableid);
267 	KERNEL_UNLOCK();
268 	if (error) {
269 		rtm_miss(RTM_MISS, &info, 0, RTP_NONE, 0, error, rtableid);
270 	} else {
271 		/* Inform listeners of the new route */
272 		rtm_send(rt, RTM_ADD, 0, rtableid);
273 		rtfree(*rtp);
274 		*rtp = rt;
275 	}
276 	return (error);
277 }
278 
279 /*
280  * Originated from bridge_hash() in if_bridge.c
281  */
282 #define mix(a, b, c) do {						\
283 	a -= b; a -= c; a ^= (c >> 13);					\
284 	b -= c; b -= a; b ^= (a << 8);					\
285 	c -= a; c -= b; c ^= (b >> 13);					\
286 	a -= b; a -= c; a ^= (c >> 12);					\
287 	b -= c; b -= a; b ^= (a << 16);					\
288 	c -= a; c -= b; c ^= (b >> 5);					\
289 	a -= b; a -= c; a ^= (c >> 3);					\
290 	b -= c; b -= a; b ^= (a << 10);					\
291 	c -= a; c -= b; c ^= (b >> 15);					\
292 } while (0)
293 
294 int
295 rt_hash(struct rtentry *rt, struct sockaddr *dst, uint32_t *src)
296 {
297 	uint32_t a, b, c;
298 
299 	if (src == NULL || !rtisvalid(rt) || !ISSET(rt->rt_flags, RTF_MPATH))
300 		return (-1);
301 
302 	a = b = 0x9e3779b9;
303 	c = rt_hashjitter;
304 
305 	switch (dst->sa_family) {
306 	case AF_INET:
307 	    {
308 		struct sockaddr_in *sin;
309 
310 		if (!ipmultipath)
311 			return (-1);
312 
313 		sin = satosin(dst);
314 		a += sin->sin_addr.s_addr;
315 		b += src[0];
316 		mix(a, b, c);
317 		break;
318 	    }
319 #ifdef INET6
320 	case AF_INET6:
321 	    {
322 		struct sockaddr_in6 *sin6;
323 
324 		if (!ip6_multipath)
325 			return (-1);
326 
327 		sin6 = satosin6(dst);
328 		a += sin6->sin6_addr.s6_addr32[0];
329 		b += sin6->sin6_addr.s6_addr32[2];
330 		c += src[0];
331 		mix(a, b, c);
332 		a += sin6->sin6_addr.s6_addr32[1];
333 		b += sin6->sin6_addr.s6_addr32[3];
334 		c += src[1];
335 		mix(a, b, c);
336 		a += sin6->sin6_addr.s6_addr32[2];
337 		b += sin6->sin6_addr.s6_addr32[1];
338 		c += src[2];
339 		mix(a, b, c);
340 		a += sin6->sin6_addr.s6_addr32[3];
341 		b += sin6->sin6_addr.s6_addr32[0];
342 		c += src[3];
343 		mix(a, b, c);
344 		break;
345 	    }
346 #endif /* INET6 */
347 	}
348 
349 	return (c & 0xffff);
350 }
351 
352 /*
353  * Allocate a route, potentially using multipath to select the peer.
354  */
355 struct rtentry *
356 rtalloc_mpath(struct sockaddr *dst, uint32_t *src, unsigned int rtableid)
357 {
358 	return (rt_match(dst, src, RT_RESOLVE, rtableid));
359 }
360 
361 /*
362  * Look in the routing table for the best matching entry for
363  * ``dst''.
364  *
365  * If a route with a gateway is found and its next hop is no
366  * longer valid, try to cache it.
367  */
368 struct rtentry *
369 rtalloc(struct sockaddr *dst, int flags, unsigned int rtableid)
370 {
371 	return (rt_match(dst, NULL, flags, rtableid));
372 }
373 
374 /*
375  * Cache the route entry corresponding to a reachable next hop in
376  * the gateway entry ``rt''.
377  */
378 int
379 rt_setgwroute(struct rtentry *rt, u_int rtableid)
380 {
381 	struct rtentry *prt, *nhrt;
382 	unsigned int rdomain = rtable_l2(rtableid);
383 	int error;
384 
385 	NET_ASSERT_LOCKED();
386 
387 	KASSERT(ISSET(rt->rt_flags, RTF_GATEWAY));
388 
389 	/* If we cannot find a valid next hop bail. */
390 	nhrt = rt_match(rt->rt_gateway, NULL, RT_RESOLVE, rdomain);
391 	if (nhrt == NULL)
392 		return (ENOENT);
393 
394 	/* Next hop entry must be on the same interface. */
395 	if (nhrt->rt_ifidx != rt->rt_ifidx) {
396 		struct sockaddr_in6	sa_mask;
397 
398 		if (!ISSET(nhrt->rt_flags, RTF_LLINFO) ||
399 		    !ISSET(nhrt->rt_flags, RTF_CLONED)) {
400 			rtfree(nhrt);
401 			return (EHOSTUNREACH);
402 		}
403 
404 		/*
405 		 * We found a L2 entry, so we might have multiple
406 		 * RTF_CLONING routes for the same subnet.  Query
407 		 * the first route of the multipath chain and iterate
408 		 * until we find the correct one.
409 		 */
410 		prt = rtable_lookup(rdomain, rt_key(nhrt->rt_parent),
411 		    rt_plen2mask(nhrt->rt_parent, &sa_mask), NULL, RTP_ANY);
412 		rtfree(nhrt);
413 
414 		while (prt != NULL && prt->rt_ifidx != rt->rt_ifidx)
415 			prt = rtable_iterate(prt);
416 
417 		/* We found nothing or a non-cloning MPATH route. */
418 		if (prt == NULL || !ISSET(prt->rt_flags, RTF_CLONING)) {
419 			rtfree(prt);
420 			return (EHOSTUNREACH);
421 		}
422 
423 		error = rt_clone(&prt, rt->rt_gateway, rdomain);
424 		if (error) {
425 			rtfree(prt);
426 			return (error);
427 		}
428 		nhrt = prt;
429 	}
430 
431 	/*
432 	 * Next hop must be reachable, this also prevents rtentry
433 	 * loops for example when rt->rt_gwroute points to rt.
434 	 */
435 	if (ISSET(nhrt->rt_flags, RTF_CLONING|RTF_GATEWAY)) {
436 		rtfree(nhrt);
437 		return (ENETUNREACH);
438 	}
439 
440 	/* Next hop is valid so remove possible old cache. */
441 	rt_putgwroute(rt);
442 	KASSERT(rt->rt_gwroute == NULL);
443 
444 	/*
445 	 * If the MTU of next hop is 0, this will reset the MTU of the
446 	 * route to run PMTUD again from scratch.
447 	 */
448 	if (!ISSET(rt->rt_locks, RTV_MTU) && (rt->rt_mtu > nhrt->rt_mtu))
449 		rt->rt_mtu = nhrt->rt_mtu;
450 
451 	/*
452 	 * To avoid reference counting problems when writing link-layer
453 	 * addresses in an outgoing packet, we ensure that the lifetime
454 	 * of a cached entry is greater than the bigger lifetime of the
455 	 * gateway entries it is pointed by.
456 	 */
457 	nhrt->rt_flags |= RTF_CACHED;
458 	nhrt->rt_cachecnt++;
459 
460 	rt->rt_gwroute = nhrt;
461 
462 	return (0);
463 }
464 
465 /*
466  * Invalidate the cached route entry of the gateway entry ``rt''.
467  */
468 void
469 rt_putgwroute(struct rtentry *rt)
470 {
471 	struct rtentry *nhrt = rt->rt_gwroute;
472 
473 	NET_ASSERT_LOCKED();
474 
475 	if (!ISSET(rt->rt_flags, RTF_GATEWAY) || nhrt == NULL)
476 		return;
477 
478 	KASSERT(ISSET(nhrt->rt_flags, RTF_CACHED));
479 	KASSERT(nhrt->rt_cachecnt > 0);
480 
481 	--nhrt->rt_cachecnt;
482 	if (nhrt->rt_cachecnt == 0)
483 		nhrt->rt_flags &= ~RTF_CACHED;
484 
485 	rtfree(rt->rt_gwroute);
486 	rt->rt_gwroute = NULL;
487 }
488 
489 void
490 rtref(struct rtentry *rt)
491 {
492 	atomic_inc_int(&rt->rt_refcnt);
493 }
494 
495 void
496 rtfree(struct rtentry *rt)
497 {
498 	int		 refcnt;
499 
500 	if (rt == NULL)
501 		return;
502 
503 	refcnt = (int)atomic_dec_int_nv(&rt->rt_refcnt);
504 	if (refcnt <= 0) {
505 		KASSERT(!ISSET(rt->rt_flags, RTF_UP));
506 		KASSERT(!RT_ROOT(rt));
507 		atomic_dec_int(&rttrash);
508 		if (refcnt < 0) {
509 			printf("rtfree: %p not freed (neg refs)\n", rt);
510 			return;
511 		}
512 
513 		KERNEL_LOCK();
514 		rt_timer_remove_all(rt);
515 		ifafree(rt->rt_ifa);
516 		rtlabel_unref(rt->rt_labelid);
517 #ifdef MPLS
518 		rt_mpls_clear(rt);
519 #endif
520 		free(rt->rt_gateway, M_RTABLE, ROUNDUP(rt->rt_gateway->sa_len));
521 		free(rt_key(rt), M_RTABLE, rt_key(rt)->sa_len);
522 		KERNEL_UNLOCK();
523 
524 		pool_put(&rtentry_pool, rt);
525 	}
526 }
527 
528 void
529 ifafree(struct ifaddr *ifa)
530 {
531 	if (ifa == NULL)
532 		panic("ifafree");
533 	if (ifa->ifa_refcnt == 0) {
534 		ifatrash--;
535 		free(ifa, M_IFADDR, 0);
536 	} else
537 		ifa->ifa_refcnt--;
538 }
539 
540 /*
541  * Force a routing table entry to the specified
542  * destination to go through the given gateway.
543  * Normally called as a result of a routing redirect
544  * message from the network layer.
545  */
546 void
547 rtredirect(struct sockaddr *dst, struct sockaddr *gateway,
548     struct sockaddr *src, struct rtentry **rtp, unsigned int rdomain)
549 {
550 	struct rtentry		*rt;
551 	int			 error = 0;
552 	enum rtstat_counters	 stat = rts_ncounters;
553 	struct rt_addrinfo	 info;
554 	struct ifaddr		*ifa;
555 	unsigned int		 ifidx = 0;
556 	int			 flags = RTF_GATEWAY|RTF_HOST;
557 	uint8_t			 prio = RTP_NONE;
558 
559 	NET_ASSERT_LOCKED();
560 
561 	/* verify the gateway is directly reachable */
562 	rt = rtalloc(gateway, 0, rdomain);
563 	if (!rtisvalid(rt) || ISSET(rt->rt_flags, RTF_GATEWAY)) {
564 		rtfree(rt);
565 		error = ENETUNREACH;
566 		goto out;
567 	}
568 	ifidx = rt->rt_ifidx;
569 	ifa = rt->rt_ifa;
570 	rtfree(rt);
571 	rt = NULL;
572 
573 	rt = rtable_lookup(rdomain, dst, NULL, NULL, RTP_ANY);
574 	/*
575 	 * If the redirect isn't from our current router for this dst,
576 	 * it's either old or wrong.  If it redirects us to ourselves,
577 	 * we have a routing loop, perhaps as a result of an interface
578 	 * going down recently.
579 	 */
580 #define	equal(a1, a2) \
581 	((a1)->sa_len == (a2)->sa_len && \
582 	 bcmp((caddr_t)(a1), (caddr_t)(a2), (a1)->sa_len) == 0)
583 	if (rt != NULL && (!equal(src, rt->rt_gateway) || rt->rt_ifa != ifa))
584 		error = EINVAL;
585 	else if (ifa_ifwithaddr(gateway, rdomain) != NULL ||
586 	    (gateway->sa_family == AF_INET &&
587 	    in_broadcast(satosin(gateway)->sin_addr, rdomain)))
588 		error = EHOSTUNREACH;
589 	if (error)
590 		goto done;
591 	/*
592 	 * Create a new entry if we just got back a wildcard entry
593 	 * or the lookup failed.  This is necessary for hosts
594 	 * which use routing redirects generated by smart gateways
595 	 * to dynamically build the routing tables.
596 	 */
597 	if (rt == NULL)
598 		goto create;
599 	/*
600 	 * Don't listen to the redirect if it's
601 	 * for a route to an interface.
602 	 */
603 	if (ISSET(rt->rt_flags, RTF_GATEWAY)) {
604 		if (!ISSET(rt->rt_flags, RTF_HOST)) {
605 			/*
606 			 * Changing from route to net => route to host.
607 			 * Create new route, rather than smashing route to net.
608 			 */
609 create:
610 			rtfree(rt);
611 			flags |= RTF_DYNAMIC;
612 			bzero(&info, sizeof(info));
613 			info.rti_info[RTAX_DST] = dst;
614 			info.rti_info[RTAX_GATEWAY] = gateway;
615 			info.rti_ifa = ifa;
616 			info.rti_flags = flags;
617 			rt = NULL;
618 			error = rtrequest(RTM_ADD, &info, RTP_DEFAULT, &rt,
619 			    rdomain);
620 			if (error == 0) {
621 				flags = rt->rt_flags;
622 				prio = rt->rt_priority;
623 			}
624 			stat = rts_dynamic;
625 		} else {
626 			/*
627 			 * Smash the current notion of the gateway to
628 			 * this destination.  Should check about netmask!!!
629 			 */
630 			rt->rt_flags |= RTF_MODIFIED;
631 			flags |= RTF_MODIFIED;
632 			prio = rt->rt_priority;
633 			stat = rts_newgateway;
634 			rt_setgate(rt, gateway, rdomain);
635 		}
636 	} else
637 		error = EHOSTUNREACH;
638 done:
639 	if (rt) {
640 		if (rtp && !error)
641 			*rtp = rt;
642 		else
643 			rtfree(rt);
644 	}
645 out:
646 	if (error)
647 		rtstat_inc(rts_badredirect);
648 	else if (stat != rts_ncounters)
649 		rtstat_inc(stat);
650 	bzero((caddr_t)&info, sizeof(info));
651 	info.rti_info[RTAX_DST] = dst;
652 	info.rti_info[RTAX_GATEWAY] = gateway;
653 	info.rti_info[RTAX_AUTHOR] = src;
654 	rtm_miss(RTM_REDIRECT, &info, flags, prio, ifidx, error, rdomain);
655 }
656 
657 /*
658  * Delete a route and generate a message
659  */
660 int
661 rtdeletemsg(struct rtentry *rt, struct ifnet *ifp, u_int tableid)
662 {
663 	int			error;
664 	struct rt_addrinfo	info;
665 	struct sockaddr_rtlabel sa_rl;
666 	struct sockaddr_in6	sa_mask;
667 
668 	KASSERT(rt->rt_ifidx == ifp->if_index);
669 
670 	/*
671 	 * Request the new route so that the entry is not actually
672 	 * deleted.  That will allow the information being reported to
673 	 * be accurate (and consistent with route_output()).
674 	 */
675 	memset(&info, 0, sizeof(info));
676 	info.rti_info[RTAX_DST] = rt_key(rt);
677 	info.rti_info[RTAX_GATEWAY] = rt->rt_gateway;
678 	if (!ISSET(rt->rt_flags, RTF_HOST))
679 		info.rti_info[RTAX_NETMASK] = rt_plen2mask(rt, &sa_mask);
680 	info.rti_info[RTAX_LABEL] = rtlabel_id2sa(rt->rt_labelid, &sa_rl);
681 	info.rti_flags = rt->rt_flags;
682 	info.rti_info[RTAX_IFP] = sdltosa(ifp->if_sadl);
683 	info.rti_info[RTAX_IFA] = rt->rt_ifa->ifa_addr;
684 	error = rtrequest_delete(&info, rt->rt_priority, ifp, &rt, tableid);
685 	rtm_miss(RTM_DELETE, &info, info.rti_flags, rt->rt_priority,
686 	    rt->rt_ifidx, error, tableid);
687 	if (error == 0)
688 		rtfree(rt);
689 	return (error);
690 }
691 
692 static inline int
693 rtequal(struct rtentry *a, struct rtentry *b)
694 {
695 	if (a == b)
696 		return 1;
697 
698 	if (memcmp(rt_key(a), rt_key(b), rt_key(a)->sa_len) == 0 &&
699 	    rt_plen(a) == rt_plen(b))
700 		return 1;
701 	else
702 		return 0;
703 }
704 
705 int
706 rtflushclone1(struct rtentry *rt, void *arg, u_int id)
707 {
708 	struct rtentry *cloningrt = arg;
709 	struct ifnet *ifp;
710 
711 	if (!ISSET(rt->rt_flags, RTF_CLONED))
712 		return 0;
713 
714 	/* Cached route must stay alive as long as their parent are alive. */
715 	if (ISSET(rt->rt_flags, RTF_CACHED) && (rt->rt_parent != cloningrt))
716 		return 0;
717 
718 	if (!rtequal(rt->rt_parent, cloningrt))
719 		return 0;
720 	/*
721 	 * This happens when an interface with a RTF_CLONING route is
722 	 * being detached.  In this case it's safe to bail because all
723 	 * the routes are being purged by rt_ifa_purge().
724 	 */
725 	ifp = if_get(rt->rt_ifidx);
726 	if (ifp == NULL)
727 		return 0;
728 
729 	if_put(ifp);
730 	return EEXIST;
731 }
732 
733 int
734 rtflushclone(struct rtentry *parent, unsigned int rtableid)
735 {
736 	struct rtentry *rt = NULL;
737 	struct ifnet *ifp;
738 	int error;
739 
740 #ifdef DIAGNOSTIC
741 	if (!parent || (parent->rt_flags & RTF_CLONING) == 0)
742 		panic("rtflushclone: called with a non-cloning route");
743 #endif
744 
745 	do {
746 		error = rtable_walk(rtableid, rt_key(parent)->sa_family, &rt,
747 		    rtflushclone1, parent);
748 		if (rt != NULL && error == EEXIST) {
749 			ifp = if_get(rt->rt_ifidx);
750 			if (ifp == NULL) {
751 				error = EAGAIN;
752 			} else {
753 				error = rtdeletemsg(rt, ifp, rtableid);
754 				if (error == 0)
755 					error = EAGAIN;
756 				if_put(ifp);
757 			}
758 		}
759 		rtfree(rt);
760 		rt = NULL;
761 	} while (error == EAGAIN);
762 
763 	return error;
764 
765 }
766 
767 int
768 rtrequest_delete(struct rt_addrinfo *info, u_int8_t prio, struct ifnet *ifp,
769     struct rtentry **ret_nrt, u_int tableid)
770 {
771 	struct rtentry	*rt;
772 	int		 error;
773 
774 	NET_ASSERT_LOCKED();
775 
776 	if (!rtable_exists(tableid))
777 		return (EAFNOSUPPORT);
778 	rt = rtable_lookup(tableid, info->rti_info[RTAX_DST],
779 	    info->rti_info[RTAX_NETMASK], info->rti_info[RTAX_GATEWAY], prio);
780 	if (rt == NULL)
781 		return (ESRCH);
782 
783 	/* Make sure that's the route the caller want to delete. */
784 	if (ifp != NULL && ifp->if_index != rt->rt_ifidx) {
785 		rtfree(rt);
786 		return (ESRCH);
787 	}
788 
789 #ifdef BFD
790 	if (ISSET(rt->rt_flags, RTF_BFD))
791 		bfdclear(rt);
792 #endif
793 
794 	error = rtable_delete(tableid, info->rti_info[RTAX_DST],
795 	    info->rti_info[RTAX_NETMASK], rt);
796 	if (error != 0) {
797 		rtfree(rt);
798 		return (ESRCH);
799 	}
800 
801 	/* Release next hop cache before flushing cloned entries. */
802 	rt_putgwroute(rt);
803 
804 	/* Clean up any cloned children. */
805 	if (ISSET(rt->rt_flags, RTF_CLONING))
806 		rtflushclone(rt, tableid);
807 
808 	rtfree(rt->rt_parent);
809 	rt->rt_parent = NULL;
810 
811 	rt->rt_flags &= ~RTF_UP;
812 
813 	KASSERT(ifp->if_index == rt->rt_ifidx);
814 	ifp->if_rtrequest(ifp, RTM_DELETE, rt);
815 
816 	atomic_inc_int(&rttrash);
817 
818 	if (ret_nrt != NULL)
819 		*ret_nrt = rt;
820 	else
821 		rtfree(rt);
822 
823 	return (0);
824 }
825 
826 int
827 rtrequest(int req, struct rt_addrinfo *info, u_int8_t prio,
828     struct rtentry **ret_nrt, u_int tableid)
829 {
830 	struct ifnet		*ifp;
831 	struct rtentry		*rt, *crt;
832 	struct ifaddr		*ifa;
833 	struct sockaddr		*ndst;
834 	struct sockaddr_rtlabel	*sa_rl, sa_rl2;
835 	struct sockaddr_dl	 sa_dl = { sizeof(sa_dl), AF_LINK };
836 	int			 error;
837 
838 	NET_ASSERT_LOCKED();
839 
840 	if (!rtable_exists(tableid))
841 		return (EAFNOSUPPORT);
842 	if (info->rti_flags & RTF_HOST)
843 		info->rti_info[RTAX_NETMASK] = NULL;
844 	switch (req) {
845 	case RTM_DELETE:
846 		return (EINVAL);
847 
848 	case RTM_RESOLVE:
849 		if (ret_nrt == NULL || (rt = *ret_nrt) == NULL)
850 			return (EINVAL);
851 		if ((rt->rt_flags & RTF_CLONING) == 0)
852 			return (EINVAL);
853 		KASSERT(rt->rt_ifa->ifa_ifp != NULL);
854 		info->rti_ifa = rt->rt_ifa;
855 		info->rti_flags = rt->rt_flags | (RTF_CLONED|RTF_HOST);
856 		info->rti_flags &= ~(RTF_CLONING|RTF_CONNECTED|RTF_STATIC);
857 		info->rti_info[RTAX_GATEWAY] = sdltosa(&sa_dl);
858 		info->rti_info[RTAX_LABEL] =
859 		    rtlabel_id2sa(rt->rt_labelid, &sa_rl2);
860 		/* FALLTHROUGH */
861 
862 	case RTM_ADD:
863 		if (info->rti_ifa == NULL)
864 			return (EINVAL);
865 		ifa = info->rti_ifa;
866 		ifp = ifa->ifa_ifp;
867 		if (prio == 0)
868 			prio = ifp->if_priority + RTP_STATIC;
869 
870 		error = rt_copysa(info->rti_info[RTAX_DST],
871 		    info->rti_info[RTAX_NETMASK], &ndst);
872 		if (error)
873 			return (error);
874 
875 		rt = pool_get(&rtentry_pool, PR_NOWAIT | PR_ZERO);
876 		if (rt == NULL) {
877 			free(ndst, M_RTABLE, ndst->sa_len);
878 			return (ENOBUFS);
879 		}
880 
881 		rt->rt_refcnt = 1;
882 		rt->rt_flags = info->rti_flags | RTF_UP;
883 		rt->rt_priority = prio;	/* init routing priority */
884 		LIST_INIT(&rt->rt_timer);
885 
886 		/* Check the link state if the table supports it. */
887 		if (rtable_mpath_capable(tableid, ndst->sa_family) &&
888 		    !ISSET(rt->rt_flags, RTF_LOCAL) &&
889 		    (!LINK_STATE_IS_UP(ifp->if_link_state) ||
890 		    !ISSET(ifp->if_flags, IFF_UP))) {
891 			rt->rt_flags &= ~RTF_UP;
892 			rt->rt_priority |= RTP_DOWN;
893 		}
894 
895 		if (info->rti_info[RTAX_LABEL] != NULL) {
896 			sa_rl = (struct sockaddr_rtlabel *)
897 			    info->rti_info[RTAX_LABEL];
898 			rt->rt_labelid = rtlabel_name2id(sa_rl->sr_label);
899 		}
900 
901 #ifdef MPLS
902 		/* We have to allocate additional space for MPLS infos */
903 		if (info->rti_flags & RTF_MPLS &&
904 		    (info->rti_info[RTAX_SRC] != NULL ||
905 		    info->rti_info[RTAX_DST]->sa_family == AF_MPLS)) {
906 			error = rt_mpls_set(rt, info->rti_info[RTAX_SRC],
907 			    info->rti_mpls);
908 			if (error) {
909 				free(ndst, M_RTABLE, ndst->sa_len);
910 				pool_put(&rtentry_pool, rt);
911 				return (error);
912 			}
913 		} else
914 			rt_mpls_clear(rt);
915 #endif
916 
917 		ifa->ifa_refcnt++;
918 		rt->rt_ifa = ifa;
919 		rt->rt_ifidx = ifp->if_index;
920 		/*
921 		 * Copy metrics and a back pointer from the cloned
922 		 * route's parent.
923 		 */
924 		if (ISSET(rt->rt_flags, RTF_CLONED)) {
925 			rtref(*ret_nrt);
926 			rt->rt_parent = *ret_nrt;
927 			rt->rt_rmx = (*ret_nrt)->rt_rmx;
928 		}
929 
930 		/*
931 		 * We must set rt->rt_gateway before adding ``rt'' to
932 		 * the routing table because the radix MPATH code use
933 		 * it to (re)order routes.
934 		 */
935 		if ((error = rt_setgate(rt, info->rti_info[RTAX_GATEWAY],
936 		    tableid))) {
937 			ifafree(ifa);
938 			rtfree(rt->rt_parent);
939 			rt_putgwroute(rt);
940 			free(rt->rt_gateway, M_RTABLE,
941 			    ROUNDUP(rt->rt_gateway->sa_len));
942 			free(ndst, M_RTABLE, ndst->sa_len);
943 			pool_put(&rtentry_pool, rt);
944 			return (error);
945 		}
946 
947 		error = rtable_insert(tableid, ndst,
948 		    info->rti_info[RTAX_NETMASK], info->rti_info[RTAX_GATEWAY],
949 		    rt->rt_priority, rt);
950 		if (error != 0 &&
951 		    (crt = rtable_match(tableid, ndst, NULL)) != NULL) {
952 			/* overwrite cloned route */
953 			if (ISSET(crt->rt_flags, RTF_CLONED) &&
954 			    !ISSET(crt->rt_flags, RTF_CACHED)) {
955 				struct ifnet *cifp;
956 
957 				cifp = if_get(crt->rt_ifidx);
958 				KASSERT(cifp != NULL);
959 				rtdeletemsg(crt, cifp, tableid);
960 				if_put(cifp);
961 
962 				error = rtable_insert(tableid, ndst,
963 				    info->rti_info[RTAX_NETMASK],
964 				    info->rti_info[RTAX_GATEWAY],
965 				    rt->rt_priority, rt);
966 			}
967 			rtfree(crt);
968 		}
969 		if (error != 0) {
970 			ifafree(ifa);
971 			rtfree(rt->rt_parent);
972 			rt_putgwroute(rt);
973 			free(rt->rt_gateway, M_RTABLE,
974 			    ROUNDUP(rt->rt_gateway->sa_len));
975 			free(ndst, M_RTABLE, ndst->sa_len);
976 			pool_put(&rtentry_pool, rt);
977 			return (EEXIST);
978 		}
979 		ifp->if_rtrequest(ifp, req, rt);
980 
981 		if_group_routechange(info->rti_info[RTAX_DST],
982 			info->rti_info[RTAX_NETMASK]);
983 
984 		if (ret_nrt != NULL)
985 			*ret_nrt = rt;
986 		else
987 			rtfree(rt);
988 		break;
989 	}
990 
991 	return (0);
992 }
993 
994 int
995 rt_setgate(struct rtentry *rt, struct sockaddr *gate, u_int rtableid)
996 {
997 	int glen = ROUNDUP(gate->sa_len);
998 	struct sockaddr *sa;
999 
1000 	if (rt->rt_gateway == NULL || glen != ROUNDUP(rt->rt_gateway->sa_len)) {
1001 		sa = malloc(glen, M_RTABLE, M_NOWAIT);
1002 		if (sa == NULL)
1003 			return (ENOBUFS);
1004 		if (rt->rt_gateway != NULL) {
1005 			free(rt->rt_gateway, M_RTABLE,
1006 			    ROUNDUP(rt->rt_gateway->sa_len));
1007 		}
1008 		rt->rt_gateway = sa;
1009 	}
1010 	memmove(rt->rt_gateway, gate, glen);
1011 
1012 	if (ISSET(rt->rt_flags, RTF_GATEWAY))
1013 		return (rt_setgwroute(rt, rtableid));
1014 
1015 	return (0);
1016 }
1017 
1018 /*
1019  * Return the route entry containing the next hop link-layer
1020  * address corresponding to ``rt''.
1021  */
1022 struct rtentry *
1023 rt_getll(struct rtentry *rt)
1024 {
1025 	if (ISSET(rt->rt_flags, RTF_GATEWAY)) {
1026 		KASSERT(rt->rt_gwroute != NULL);
1027 		return (rt->rt_gwroute);
1028 	}
1029 
1030 	return (rt);
1031 }
1032 
1033 void
1034 rt_maskedcopy(struct sockaddr *src, struct sockaddr *dst,
1035     struct sockaddr *netmask)
1036 {
1037 	u_char	*cp1 = (u_char *)src;
1038 	u_char	*cp2 = (u_char *)dst;
1039 	u_char	*cp3 = (u_char *)netmask;
1040 	u_char	*cplim = cp2 + *cp3;
1041 	u_char	*cplim2 = cp2 + *cp1;
1042 
1043 	*cp2++ = *cp1++; *cp2++ = *cp1++; /* copies sa_len & sa_family */
1044 	cp3 += 2;
1045 	if (cplim > cplim2)
1046 		cplim = cplim2;
1047 	while (cp2 < cplim)
1048 		*cp2++ = *cp1++ & *cp3++;
1049 	if (cp2 < cplim2)
1050 		bzero(cp2, cplim2 - cp2);
1051 }
1052 
1053 /*
1054  * allocate new sockaddr structure based on the user supplied src and mask
1055  * that is useable for the routing table.
1056  */
1057 static int
1058 rt_copysa(struct sockaddr *src, struct sockaddr *mask, struct sockaddr **dst)
1059 {
1060 	static const u_char maskarray[] = {
1061 	    0x0, 0x80, 0xc0, 0xe0, 0xf0, 0xf8, 0xfc, 0xfe };
1062 	struct sockaddr *ndst;
1063 	const struct domain *dp;
1064 	u_char *csrc, *cdst;
1065 	int i, plen;
1066 
1067 	for (i = 0; (dp = domains[i]) != NULL; i++) {
1068 		if (dp->dom_rtoffset == 0)
1069 			continue;
1070 		if (src->sa_family == dp->dom_family)
1071 			break;
1072 	}
1073 	if (dp == NULL)
1074 		return (EAFNOSUPPORT);
1075 
1076 	if (src->sa_len < dp->dom_sasize)
1077 		return (EINVAL);
1078 
1079 	plen = rtable_satoplen(src->sa_family, mask);
1080 	if (plen == -1)
1081 		return (EINVAL);
1082 
1083 	ndst = malloc(dp->dom_sasize, M_RTABLE, M_NOWAIT|M_ZERO);
1084 	if (ndst == NULL)
1085 		return (ENOBUFS);
1086 
1087 	ndst->sa_family = src->sa_family;
1088 	ndst->sa_len = dp->dom_sasize;
1089 
1090 	csrc = (u_char *)src + dp->dom_rtoffset;
1091 	cdst = (u_char *)ndst + dp->dom_rtoffset;
1092 
1093 	memcpy(cdst, csrc, plen / 8);
1094 	if (plen % 8 != 0)
1095 		cdst[plen / 8] = csrc[plen / 8] & maskarray[plen % 8];
1096 
1097 	*dst = ndst;
1098 	return (0);
1099 }
1100 
1101 int
1102 rt_ifa_add(struct ifaddr *ifa, int flags, struct sockaddr *dst,
1103     unsigned int rdomain)
1104 {
1105 	struct ifnet		*ifp = ifa->ifa_ifp;
1106 	struct rtentry		*rt;
1107 	struct sockaddr_rtlabel	 sa_rl;
1108 	struct rt_addrinfo	 info;
1109 	uint8_t			 prio = ifp->if_priority + RTP_STATIC;
1110 	int			 error;
1111 
1112 	KASSERT(rdomain == rtable_l2(rdomain));
1113 
1114 	memset(&info, 0, sizeof(info));
1115 	info.rti_ifa = ifa;
1116 	info.rti_flags = flags;
1117 	info.rti_info[RTAX_DST] = dst;
1118 	if (flags & RTF_LLINFO)
1119 		info.rti_info[RTAX_GATEWAY] = sdltosa(ifp->if_sadl);
1120 	else
1121 		info.rti_info[RTAX_GATEWAY] = ifa->ifa_addr;
1122 	info.rti_info[RTAX_LABEL] = rtlabel_id2sa(ifp->if_rtlabelid, &sa_rl);
1123 
1124 #ifdef MPLS
1125 	if ((flags & RTF_MPLS) == RTF_MPLS)
1126 		info.rti_mpls = MPLS_OP_POP;
1127 #endif /* MPLS */
1128 
1129 	if ((flags & RTF_HOST) == 0)
1130 		info.rti_info[RTAX_NETMASK] = ifa->ifa_netmask;
1131 
1132 	if (flags & (RTF_LOCAL|RTF_BROADCAST))
1133 		prio = RTP_LOCAL;
1134 
1135 	if (flags & RTF_CONNECTED)
1136 		prio = ifp->if_priority + RTP_CONNECTED;
1137 
1138 	error = rtrequest(RTM_ADD, &info, prio, &rt, rdomain);
1139 	if (error == 0) {
1140 		/*
1141 		 * A local route is created for every address configured
1142 		 * on an interface, so use this information to notify
1143 		 * userland that a new address has been added.
1144 		 */
1145 		if (flags & RTF_LOCAL)
1146 			rtm_addr(RTM_NEWADDR, ifa);
1147 		rtm_send(rt, RTM_ADD, 0, rdomain);
1148 		rtfree(rt);
1149 	}
1150 	return (error);
1151 }
1152 
1153 int
1154 rt_ifa_del(struct ifaddr *ifa, int flags, struct sockaddr *dst,
1155     unsigned int rdomain)
1156 {
1157 	struct ifnet		*ifp = ifa->ifa_ifp;
1158 	struct rtentry		*rt;
1159 	struct mbuf		*m = NULL;
1160 	struct sockaddr		*deldst;
1161 	struct rt_addrinfo	 info;
1162 	struct sockaddr_rtlabel	 sa_rl;
1163 	uint8_t			 prio = ifp->if_priority + RTP_STATIC;
1164 	int			 error;
1165 
1166 	KASSERT(rdomain == rtable_l2(rdomain));
1167 
1168 	if ((flags & RTF_HOST) == 0 && ifa->ifa_netmask) {
1169 		m = m_get(M_DONTWAIT, MT_SONAME);
1170 		if (m == NULL)
1171 			return (ENOBUFS);
1172 		deldst = mtod(m, struct sockaddr *);
1173 		rt_maskedcopy(dst, deldst, ifa->ifa_netmask);
1174 		dst = deldst;
1175 	}
1176 
1177 	memset(&info, 0, sizeof(info));
1178 	info.rti_ifa = ifa;
1179 	info.rti_flags = flags;
1180 	info.rti_info[RTAX_DST] = dst;
1181 	if ((flags & RTF_LLINFO) == 0)
1182 		info.rti_info[RTAX_GATEWAY] = ifa->ifa_addr;
1183 	info.rti_info[RTAX_LABEL] = rtlabel_id2sa(ifp->if_rtlabelid, &sa_rl);
1184 
1185 	if ((flags & RTF_HOST) == 0)
1186 		info.rti_info[RTAX_NETMASK] = ifa->ifa_netmask;
1187 
1188 	if (flags & (RTF_LOCAL|RTF_BROADCAST))
1189 		prio = RTP_LOCAL;
1190 
1191 	if (flags & RTF_CONNECTED)
1192 		prio = ifp->if_priority + RTP_CONNECTED;
1193 
1194 	rtable_clearsource(rdomain, ifa->ifa_addr);
1195 	error = rtrequest_delete(&info, prio, ifp, &rt, rdomain);
1196 	if (error == 0) {
1197 		rtm_send(rt, RTM_DELETE, 0, rdomain);
1198 		if (flags & RTF_LOCAL)
1199 			rtm_addr(RTM_DELADDR, ifa);
1200 		rtfree(rt);
1201 	}
1202 	m_free(m);
1203 
1204 	return (error);
1205 }
1206 
1207 /*
1208  * Add ifa's address as a local rtentry.
1209  */
1210 int
1211 rt_ifa_addlocal(struct ifaddr *ifa)
1212 {
1213 	struct ifnet *ifp = ifa->ifa_ifp;
1214 	struct rtentry *rt;
1215 	u_int flags = RTF_HOST|RTF_LOCAL;
1216 	int error = 0;
1217 
1218 	/*
1219 	 * If the configured address correspond to the magical "any"
1220 	 * address do not add a local route entry because that might
1221 	 * corrupt the routing tree which uses this value for the
1222 	 * default routes.
1223 	 */
1224 	switch (ifa->ifa_addr->sa_family) {
1225 	case AF_INET:
1226 		if (satosin(ifa->ifa_addr)->sin_addr.s_addr == INADDR_ANY)
1227 			return (0);
1228 		break;
1229 #ifdef INET6
1230 	case AF_INET6:
1231 		if (IN6_ARE_ADDR_EQUAL(&satosin6(ifa->ifa_addr)->sin6_addr,
1232 		    &in6addr_any))
1233 			return (0);
1234 		break;
1235 #endif
1236 	default:
1237 		break;
1238 	}
1239 
1240 	if (!ISSET(ifp->if_flags, (IFF_LOOPBACK|IFF_POINTOPOINT)))
1241 		flags |= RTF_LLINFO;
1242 
1243 	/* If there is no local entry, allocate one. */
1244 	rt = rtalloc(ifa->ifa_addr, 0, ifp->if_rdomain);
1245 	if (rt == NULL || ISSET(rt->rt_flags, flags) != flags) {
1246 		error = rt_ifa_add(ifa, flags | RTF_MPATH, ifa->ifa_addr,
1247 		    ifp->if_rdomain);
1248 	}
1249 	rtfree(rt);
1250 
1251 	return (error);
1252 }
1253 
1254 /*
1255  * Remove local rtentry of ifa's address if it exists.
1256  */
1257 int
1258 rt_ifa_dellocal(struct ifaddr *ifa)
1259 {
1260 	struct ifnet *ifp = ifa->ifa_ifp;
1261 	struct rtentry *rt;
1262 	u_int flags = RTF_HOST|RTF_LOCAL;
1263 	int error = 0;
1264 
1265 	/*
1266 	 * We do not add local routes for such address, so do not bother
1267 	 * removing them.
1268 	 */
1269 	switch (ifa->ifa_addr->sa_family) {
1270 	case AF_INET:
1271 		if (satosin(ifa->ifa_addr)->sin_addr.s_addr == INADDR_ANY)
1272 			return (0);
1273 		break;
1274 #ifdef INET6
1275 	case AF_INET6:
1276 		if (IN6_ARE_ADDR_EQUAL(&satosin6(ifa->ifa_addr)->sin6_addr,
1277 		    &in6addr_any))
1278 			return (0);
1279 		break;
1280 #endif
1281 	default:
1282 		break;
1283 	}
1284 
1285 	if (!ISSET(ifp->if_flags, (IFF_LOOPBACK|IFF_POINTOPOINT)))
1286 		flags |= RTF_LLINFO;
1287 
1288 	/*
1289 	 * Before deleting, check if a corresponding local host
1290 	 * route surely exists.  With this check, we can avoid to
1291 	 * delete an interface direct route whose destination is same
1292 	 * as the address being removed.  This can happen when removing
1293 	 * a subnet-router anycast address on an interface attached
1294 	 * to a shared medium.
1295 	 */
1296 	rt = rtalloc(ifa->ifa_addr, 0, ifp->if_rdomain);
1297 	if (rt != NULL && ISSET(rt->rt_flags, flags) == flags) {
1298 		error = rt_ifa_del(ifa, flags, ifa->ifa_addr,
1299 		    ifp->if_rdomain);
1300 	}
1301 	rtfree(rt);
1302 
1303 	return (error);
1304 }
1305 
1306 /*
1307  * Remove all addresses attached to ``ifa''.
1308  */
1309 void
1310 rt_ifa_purge(struct ifaddr *ifa)
1311 {
1312 	struct ifnet		*ifp = ifa->ifa_ifp;
1313 	struct rtentry		*rt = NULL;
1314 	unsigned int		 rtableid;
1315 	int			 error, af = ifa->ifa_addr->sa_family;
1316 
1317 	KASSERT(ifp != NULL);
1318 
1319 	for (rtableid = 0; rtableid < rtmap_limit; rtableid++) {
1320 		/* skip rtables that are not in the rdomain of the ifp */
1321 		if (rtable_l2(rtableid) != ifp->if_rdomain)
1322 			continue;
1323 
1324 		do {
1325 			error = rtable_walk(rtableid, af, &rt,
1326 			    rt_ifa_purge_walker, ifa);
1327 			if (rt != NULL && error == EEXIST) {
1328 				error = rtdeletemsg(rt, ifp, rtableid);
1329 				if (error == 0)
1330 					error = EAGAIN;
1331 			}
1332 			rtfree(rt);
1333 			rt = NULL;
1334 		} while (error == EAGAIN);
1335 
1336 		if (error == EAFNOSUPPORT)
1337 			error = 0;
1338 
1339 		if (error)
1340 			break;
1341 	}
1342 }
1343 
1344 int
1345 rt_ifa_purge_walker(struct rtentry *rt, void *vifa, unsigned int rtableid)
1346 {
1347 	struct ifaddr		*ifa = vifa;
1348 
1349 	if (rt->rt_ifa == ifa)
1350 		return EEXIST;
1351 
1352 	return 0;
1353 }
1354 
1355 /*
1356  * Route timer routines.  These routes allow functions to be called
1357  * for various routes at any time.  This is useful in supporting
1358  * path MTU discovery and redirect route deletion.
1359  *
1360  * This is similar to some BSDI internal functions, but it provides
1361  * for multiple queues for efficiency's sake...
1362  */
1363 
1364 LIST_HEAD(, rttimer_queue)	rttimer_queue_head;
1365 
1366 #define RTTIMER_CALLOUT(r)	{					\
1367 	if (r->rtt_func != NULL) {					\
1368 		(*r->rtt_func)(r->rtt_rt, r);				\
1369 	} else {							\
1370 		struct ifnet *ifp;					\
1371 									\
1372 		ifp = if_get(r->rtt_rt->rt_ifidx);			\
1373 		if (ifp != NULL)					\
1374 			rtdeletemsg(r->rtt_rt, ifp, r->rtt_tableid);	\
1375 		if_put(ifp);						\
1376 	}								\
1377 }
1378 
1379 /*
1380  * Some subtle order problems with domain initialization mean that
1381  * we cannot count on this being run from rt_init before various
1382  * protocol initializations are done.  Therefore, we make sure
1383  * that this is run when the first queue is added...
1384  */
1385 
1386 void
1387 rt_timer_init(void)
1388 {
1389 	static struct timeout	rt_timer_timeout;
1390 
1391 	pool_init(&rttimer_pool, sizeof(struct rttimer), 0,
1392 	    IPL_MPFLOOR, 0, "rttmr", NULL);
1393 	pool_init(&rttimer_queue_pool, sizeof(struct rttimer_queue), 0,
1394 	    IPL_MPFLOOR, 0, "rttmrq", NULL);
1395 
1396 	LIST_INIT(&rttimer_queue_head);
1397 	timeout_set_proc(&rt_timer_timeout, rt_timer_timer, &rt_timer_timeout);
1398 	timeout_add_sec(&rt_timer_timeout, 1);
1399 }
1400 
1401 struct rttimer_queue *
1402 rt_timer_queue_create(int timeout)
1403 {
1404 	struct rttimer_queue	*rtq;
1405 
1406 	rtq = pool_get(&rttimer_queue_pool, PR_WAITOK | PR_ZERO);
1407 
1408 	rtq->rtq_timeout = timeout;
1409 	rtq->rtq_count = 0;
1410 	TAILQ_INIT(&rtq->rtq_head);
1411 	LIST_INSERT_HEAD(&rttimer_queue_head, rtq, rtq_link);
1412 
1413 	return (rtq);
1414 }
1415 
1416 void
1417 rt_timer_queue_change(struct rttimer_queue *rtq, int timeout)
1418 {
1419 	rtq->rtq_timeout = timeout;
1420 }
1421 
1422 void
1423 rt_timer_queue_destroy(struct rttimer_queue *rtq)
1424 {
1425 	struct rttimer	*r;
1426 
1427 	NET_ASSERT_LOCKED();
1428 
1429 	while ((r = TAILQ_FIRST(&rtq->rtq_head)) != NULL) {
1430 		LIST_REMOVE(r, rtt_link);
1431 		TAILQ_REMOVE(&rtq->rtq_head, r, rtt_next);
1432 		RTTIMER_CALLOUT(r);
1433 		pool_put(&rttimer_pool, r);
1434 		if (rtq->rtq_count > 0)
1435 			rtq->rtq_count--;
1436 		else
1437 			printf("rt_timer_queue_destroy: rtq_count reached 0\n");
1438 	}
1439 
1440 	LIST_REMOVE(rtq, rtq_link);
1441 	pool_put(&rttimer_queue_pool, rtq);
1442 }
1443 
1444 unsigned long
1445 rt_timer_queue_count(struct rttimer_queue *rtq)
1446 {
1447 	return (rtq->rtq_count);
1448 }
1449 
1450 void
1451 rt_timer_remove_all(struct rtentry *rt)
1452 {
1453 	struct rttimer	*r;
1454 
1455 	while ((r = LIST_FIRST(&rt->rt_timer)) != NULL) {
1456 		LIST_REMOVE(r, rtt_link);
1457 		TAILQ_REMOVE(&r->rtt_queue->rtq_head, r, rtt_next);
1458 		if (r->rtt_queue->rtq_count > 0)
1459 			r->rtt_queue->rtq_count--;
1460 		else
1461 			printf("rt_timer_remove_all: rtq_count reached 0\n");
1462 		pool_put(&rttimer_pool, r);
1463 	}
1464 }
1465 
1466 int
1467 rt_timer_add(struct rtentry *rt, void (*func)(struct rtentry *,
1468     struct rttimer *), struct rttimer_queue *queue, u_int rtableid)
1469 {
1470 	struct rttimer	*r;
1471 	time_t		 current_time;
1472 
1473 	current_time = getuptime();
1474 	rt->rt_expire = current_time + queue->rtq_timeout;
1475 
1476 	/*
1477 	 * If there's already a timer with this action, destroy it before
1478 	 * we add a new one.
1479 	 */
1480 	LIST_FOREACH(r, &rt->rt_timer, rtt_link) {
1481 		if (r->rtt_func == func) {
1482 			LIST_REMOVE(r, rtt_link);
1483 			TAILQ_REMOVE(&r->rtt_queue->rtq_head, r, rtt_next);
1484 			if (r->rtt_queue->rtq_count > 0)
1485 				r->rtt_queue->rtq_count--;
1486 			else
1487 				printf("rt_timer_add: rtq_count reached 0\n");
1488 			pool_put(&rttimer_pool, r);
1489 			break;  /* only one per list, so we can quit... */
1490 		}
1491 	}
1492 
1493 	r = pool_get(&rttimer_pool, PR_NOWAIT | PR_ZERO);
1494 	if (r == NULL)
1495 		return (ENOBUFS);
1496 
1497 	r->rtt_rt = rt;
1498 	r->rtt_time = current_time;
1499 	r->rtt_func = func;
1500 	r->rtt_queue = queue;
1501 	r->rtt_tableid = rtableid;
1502 	LIST_INSERT_HEAD(&rt->rt_timer, r, rtt_link);
1503 	TAILQ_INSERT_TAIL(&queue->rtq_head, r, rtt_next);
1504 	r->rtt_queue->rtq_count++;
1505 
1506 	return (0);
1507 }
1508 
1509 void
1510 rt_timer_timer(void *arg)
1511 {
1512 	struct timeout		*to = (struct timeout *)arg;
1513 	struct rttimer_queue	*rtq;
1514 	struct rttimer		*r;
1515 	time_t			 current_time;
1516 
1517 	current_time = getuptime();
1518 
1519 	NET_LOCK();
1520 	LIST_FOREACH(rtq, &rttimer_queue_head, rtq_link) {
1521 		while ((r = TAILQ_FIRST(&rtq->rtq_head)) != NULL &&
1522 		    (r->rtt_time + rtq->rtq_timeout) < current_time) {
1523 			LIST_REMOVE(r, rtt_link);
1524 			TAILQ_REMOVE(&rtq->rtq_head, r, rtt_next);
1525 			RTTIMER_CALLOUT(r);
1526 			pool_put(&rttimer_pool, r);
1527 			if (rtq->rtq_count > 0)
1528 				rtq->rtq_count--;
1529 			else
1530 				printf("rt_timer_timer: rtq_count reached 0\n");
1531 		}
1532 	}
1533 	NET_UNLOCK();
1534 
1535 	timeout_add_sec(to, 1);
1536 }
1537 
1538 #ifdef MPLS
1539 int
1540 rt_mpls_set(struct rtentry *rt, struct sockaddr *src, uint8_t op)
1541 {
1542 	struct sockaddr_mpls	*psa_mpls = (struct sockaddr_mpls *)src;
1543 	struct rt_mpls		*rt_mpls;
1544 
1545 	if (psa_mpls == NULL && op != MPLS_OP_POP)
1546 		return (EOPNOTSUPP);
1547 	if (psa_mpls != NULL && psa_mpls->smpls_len != sizeof(*psa_mpls))
1548 		return (EINVAL);
1549 	if (psa_mpls != NULL && psa_mpls->smpls_family != AF_MPLS)
1550 		return (EAFNOSUPPORT);
1551 
1552 	rt->rt_llinfo = malloc(sizeof(struct rt_mpls), M_TEMP, M_NOWAIT|M_ZERO);
1553 	if (rt->rt_llinfo == NULL)
1554 		return (ENOMEM);
1555 
1556 	rt_mpls = (struct rt_mpls *)rt->rt_llinfo;
1557 	if (psa_mpls != NULL)
1558 		rt_mpls->mpls_label = psa_mpls->smpls_label;
1559 	rt_mpls->mpls_operation = op;
1560 	/* XXX: set experimental bits */
1561 	rt->rt_flags |= RTF_MPLS;
1562 
1563 	return (0);
1564 }
1565 
1566 void
1567 rt_mpls_clear(struct rtentry *rt)
1568 {
1569 	if (rt->rt_llinfo != NULL && rt->rt_flags & RTF_MPLS) {
1570 		free(rt->rt_llinfo, M_TEMP, sizeof(struct rt_mpls));
1571 		rt->rt_llinfo = NULL;
1572 	}
1573 	rt->rt_flags &= ~RTF_MPLS;
1574 }
1575 #endif
1576 
1577 u_int16_t
1578 rtlabel_name2id(char *name)
1579 {
1580 	struct rt_label		*label, *p;
1581 	u_int16_t		 new_id = 1;
1582 
1583 	if (!name[0])
1584 		return (0);
1585 
1586 	TAILQ_FOREACH(label, &rt_labels, rtl_entry)
1587 		if (strcmp(name, label->rtl_name) == 0) {
1588 			label->rtl_ref++;
1589 			return (label->rtl_id);
1590 		}
1591 
1592 	/*
1593 	 * to avoid fragmentation, we do a linear search from the beginning
1594 	 * and take the first free slot we find. if there is none or the list
1595 	 * is empty, append a new entry at the end.
1596 	 */
1597 	TAILQ_FOREACH(p, &rt_labels, rtl_entry) {
1598 		if (p->rtl_id != new_id)
1599 			break;
1600 		new_id = p->rtl_id + 1;
1601 	}
1602 	if (new_id > LABELID_MAX)
1603 		return (0);
1604 
1605 	label = malloc(sizeof(*label), M_RTABLE, M_NOWAIT|M_ZERO);
1606 	if (label == NULL)
1607 		return (0);
1608 	strlcpy(label->rtl_name, name, sizeof(label->rtl_name));
1609 	label->rtl_id = new_id;
1610 	label->rtl_ref++;
1611 
1612 	if (p != NULL)	/* insert new entry before p */
1613 		TAILQ_INSERT_BEFORE(p, label, rtl_entry);
1614 	else		/* either list empty or no free slot in between */
1615 		TAILQ_INSERT_TAIL(&rt_labels, label, rtl_entry);
1616 
1617 	return (label->rtl_id);
1618 }
1619 
1620 const char *
1621 rtlabel_id2name(u_int16_t id)
1622 {
1623 	struct rt_label	*label;
1624 
1625 	TAILQ_FOREACH(label, &rt_labels, rtl_entry)
1626 		if (label->rtl_id == id)
1627 			return (label->rtl_name);
1628 
1629 	return (NULL);
1630 }
1631 
1632 struct sockaddr *
1633 rtlabel_id2sa(u_int16_t labelid, struct sockaddr_rtlabel *sa_rl)
1634 {
1635 	const char	*label;
1636 
1637 	if (labelid == 0 || (label = rtlabel_id2name(labelid)) == NULL)
1638 		return (NULL);
1639 
1640 	bzero(sa_rl, sizeof(*sa_rl));
1641 	sa_rl->sr_len = sizeof(*sa_rl);
1642 	sa_rl->sr_family = AF_UNSPEC;
1643 	strlcpy(sa_rl->sr_label, label, sizeof(sa_rl->sr_label));
1644 
1645 	return ((struct sockaddr *)sa_rl);
1646 }
1647 
1648 void
1649 rtlabel_unref(u_int16_t id)
1650 {
1651 	struct rt_label	*p, *next;
1652 
1653 	if (id == 0)
1654 		return;
1655 
1656 	TAILQ_FOREACH_SAFE(p, &rt_labels, rtl_entry, next) {
1657 		if (id == p->rtl_id) {
1658 			if (--p->rtl_ref == 0) {
1659 				TAILQ_REMOVE(&rt_labels, p, rtl_entry);
1660 				free(p, M_RTABLE, sizeof(*p));
1661 			}
1662 			break;
1663 		}
1664 	}
1665 }
1666 
1667 int
1668 rt_if_track(struct ifnet *ifp)
1669 {
1670 	unsigned int rtableid;
1671 	struct rtentry *rt = NULL;
1672 	int i, error = 0;
1673 
1674 	for (rtableid = 0; rtableid < rtmap_limit; rtableid++) {
1675 		/* skip rtables that are not in the rdomain of the ifp */
1676 		if (rtable_l2(rtableid) != ifp->if_rdomain)
1677 			continue;
1678 		for (i = 1; i <= AF_MAX; i++) {
1679 			if (!rtable_mpath_capable(rtableid, i))
1680 				continue;
1681 
1682 			do {
1683 				error = rtable_walk(rtableid, i, &rt,
1684 				    rt_if_linkstate_change, ifp);
1685 				if (rt != NULL && error == EEXIST) {
1686 					error = rtdeletemsg(rt, ifp, rtableid);
1687 					if (error == 0)
1688 						error = EAGAIN;
1689 				}
1690 				rtfree(rt);
1691 				rt = NULL;
1692 			} while (error == EAGAIN);
1693 
1694 			if (error == EAFNOSUPPORT)
1695 				error = 0;
1696 
1697 			if (error)
1698 				break;
1699 		}
1700 	}
1701 
1702 	return (error);
1703 }
1704 
1705 int
1706 rt_if_linkstate_change(struct rtentry *rt, void *arg, u_int id)
1707 {
1708 	struct ifnet *ifp = arg;
1709 	struct sockaddr_in6 sa_mask;
1710 	int error;
1711 
1712 	if (rt->rt_ifidx != ifp->if_index)
1713 		return (0);
1714 
1715 	/* Local routes are always usable. */
1716 	if (rt->rt_flags & RTF_LOCAL) {
1717 		rt->rt_flags |= RTF_UP;
1718 		return (0);
1719 	}
1720 
1721 	if (LINK_STATE_IS_UP(ifp->if_link_state) && ifp->if_flags & IFF_UP) {
1722 		if (ISSET(rt->rt_flags, RTF_UP))
1723 			return (0);
1724 
1725 		/* bring route up */
1726 		rt->rt_flags |= RTF_UP;
1727 		error = rtable_mpath_reprio(id, rt_key(rt), rt_plen(rt),
1728 		    rt->rt_priority & RTP_MASK, rt);
1729 	} else {
1730 		/*
1731 		 * Remove redirected and cloned routes (mainly ARP)
1732 		 * from down interfaces so we have a chance to get
1733 		 * new routes from a better source.
1734 		 */
1735 		if (ISSET(rt->rt_flags, RTF_CLONED|RTF_DYNAMIC) &&
1736 		    !ISSET(rt->rt_flags, RTF_CACHED|RTF_BFD)) {
1737 			return (EEXIST);
1738 		}
1739 
1740 		if (!ISSET(rt->rt_flags, RTF_UP))
1741 			return (0);
1742 
1743 		/* take route down */
1744 		rt->rt_flags &= ~RTF_UP;
1745 		error = rtable_mpath_reprio(id, rt_key(rt), rt_plen(rt),
1746 		    rt->rt_priority | RTP_DOWN, rt);
1747 	}
1748 	if_group_routechange(rt_key(rt), rt_plen2mask(rt, &sa_mask));
1749 
1750 	return (error);
1751 }
1752 
1753 struct sockaddr *
1754 rt_plentosa(sa_family_t af, int plen, struct sockaddr_in6 *sa_mask)
1755 {
1756 	struct sockaddr_in	*sin = (struct sockaddr_in *)sa_mask;
1757 #ifdef INET6
1758 	struct sockaddr_in6	*sin6 = (struct sockaddr_in6 *)sa_mask;
1759 #endif
1760 
1761 	KASSERT(plen >= 0 || plen == -1);
1762 
1763 	if (plen == -1)
1764 		return (NULL);
1765 
1766 	memset(sa_mask, 0, sizeof(*sa_mask));
1767 
1768 	switch (af) {
1769 	case AF_INET:
1770 		sin->sin_family = AF_INET;
1771 		sin->sin_len = sizeof(struct sockaddr_in);
1772 		in_prefixlen2mask(&sin->sin_addr, plen);
1773 		break;
1774 #ifdef INET6
1775 	case AF_INET6:
1776 		sin6->sin6_family = AF_INET6;
1777 		sin6->sin6_len = sizeof(struct sockaddr_in6);
1778 		in6_prefixlen2mask(&sin6->sin6_addr, plen);
1779 		break;
1780 #endif /* INET6 */
1781 	default:
1782 		return (NULL);
1783 	}
1784 
1785 	return ((struct sockaddr *)sa_mask);
1786 }
1787 
1788 struct sockaddr *
1789 rt_plen2mask(struct rtentry *rt, struct sockaddr_in6 *sa_mask)
1790 {
1791 	return (rt_plentosa(rt_key(rt)->sa_family, rt_plen(rt), sa_mask));
1792 }
1793 
1794 #ifdef DDB
1795 #include <machine/db_machdep.h>
1796 #include <ddb/db_output.h>
1797 
1798 void
1799 db_print_sa(struct sockaddr *sa)
1800 {
1801 	int len;
1802 	u_char *p;
1803 
1804 	if (sa == NULL) {
1805 		db_printf("[NULL]");
1806 		return;
1807 	}
1808 
1809 	p = (u_char *)sa;
1810 	len = sa->sa_len;
1811 	db_printf("[");
1812 	while (len > 0) {
1813 		db_printf("%d", *p);
1814 		p++;
1815 		len--;
1816 		if (len)
1817 			db_printf(",");
1818 	}
1819 	db_printf("]\n");
1820 }
1821 
1822 void
1823 db_print_ifa(struct ifaddr *ifa)
1824 {
1825 	if (ifa == NULL)
1826 		return;
1827 	db_printf("  ifa_addr=");
1828 	db_print_sa(ifa->ifa_addr);
1829 	db_printf("  ifa_dsta=");
1830 	db_print_sa(ifa->ifa_dstaddr);
1831 	db_printf("  ifa_mask=");
1832 	db_print_sa(ifa->ifa_netmask);
1833 	db_printf("  flags=0x%x, refcnt=%d, metric=%d\n",
1834 	    ifa->ifa_flags, ifa->ifa_refcnt, ifa->ifa_metric);
1835 }
1836 
1837 /*
1838  * Function to pass to rtable_walk().
1839  * Return non-zero error to abort walk.
1840  */
1841 int
1842 db_show_rtentry(struct rtentry *rt, void *w, unsigned int id)
1843 {
1844 	db_printf("rtentry=%p", rt);
1845 
1846 	db_printf(" flags=0x%x refcnt=%d use=%llu expire=%lld rtableid=%u\n",
1847 	    rt->rt_flags, rt->rt_refcnt, rt->rt_use, rt->rt_expire, id);
1848 
1849 	db_printf(" key="); db_print_sa(rt_key(rt));
1850 	db_printf(" plen=%d", rt_plen(rt));
1851 	db_printf(" gw="); db_print_sa(rt->rt_gateway);
1852 	db_printf(" ifidx=%u ", rt->rt_ifidx);
1853 	db_printf(" ifa=%p\n", rt->rt_ifa);
1854 	db_print_ifa(rt->rt_ifa);
1855 
1856 	db_printf(" gwroute=%p llinfo=%p\n", rt->rt_gwroute, rt->rt_llinfo);
1857 	return (0);
1858 }
1859 
1860 /*
1861  * Function to print all the route trees.
1862  * Use this from ddb:  "call db_show_arptab"
1863  */
1864 int
1865 db_show_arptab(void)
1866 {
1867 	db_printf("Route tree for AF_INET\n");
1868 	rtable_walk(0, AF_INET, NULL, db_show_rtentry, NULL);
1869 	return (0);
1870 }
1871 #endif /* DDB */
1872