xref: /openbsd-src/sys/net/route.c (revision f763167468dba5339ed4b14b7ecaca2a397ab0f6)
1 /*	$OpenBSD: route.c,v 1.367 2017/09/05 10:56:04 mpi Exp $	*/
2 /*	$NetBSD: route.c,v 1.14 1996/02/13 22:00:46 christos Exp $	*/
3 
4 /*
5  * Copyright (C) 1995, 1996, 1997, and 1998 WIDE Project.
6  * All rights reserved.
7  *
8  * Redistribution and use in source and binary forms, with or without
9  * modification, are permitted provided that the following conditions
10  * are met:
11  * 1. Redistributions of source code must retain the above copyright
12  *    notice, this list of conditions and the following disclaimer.
13  * 2. Redistributions in binary form must reproduce the above copyright
14  *    notice, this list of conditions and the following disclaimer in the
15  *    documentation and/or other materials provided with the distribution.
16  * 3. Neither the name of the project nor the names of its contributors
17  *    may be used to endorse or promote products derived from this software
18  *    without specific prior written permission.
19  *
20  * THIS SOFTWARE IS PROVIDED BY THE PROJECT AND CONTRIBUTORS ``AS IS'' AND
21  * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
22  * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
23  * ARE DISCLAIMED.  IN NO EVENT SHALL THE PROJECT OR CONTRIBUTORS BE LIABLE
24  * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
25  * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
26  * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
27  * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
28  * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
29  * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
30  * SUCH DAMAGE.
31  */
32 
33 /*
34  * Copyright (c) 1980, 1986, 1991, 1993
35  *	The Regents of the University of California.  All rights reserved.
36  *
37  * Redistribution and use in source and binary forms, with or without
38  * modification, are permitted provided that the following conditions
39  * are met:
40  * 1. Redistributions of source code must retain the above copyright
41  *    notice, this list of conditions and the following disclaimer.
42  * 2. Redistributions in binary form must reproduce the above copyright
43  *    notice, this list of conditions and the following disclaimer in the
44  *    documentation and/or other materials provided with the distribution.
45  * 3. Neither the name of the University nor the names of its contributors
46  *    may be used to endorse or promote products derived from this software
47  *    without specific prior written permission.
48  *
49  * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND
50  * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
51  * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
52  * ARE DISCLAIMED.  IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE
53  * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
54  * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
55  * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
56  * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
57  * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
58  * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
59  * SUCH DAMAGE.
60  *
61  *	@(#)route.c	8.2 (Berkeley) 11/15/93
62  */
63 
64 /*
65  *	@(#)COPYRIGHT	1.1 (NRL) 17 January 1995
66  *
67  * NRL grants permission for redistribution and use in source and binary
68  * forms, with or without modification, of the software and documentation
69  * created at NRL provided that the following conditions are met:
70  *
71  * 1. Redistributions of source code must retain the above copyright
72  *    notice, this list of conditions and the following disclaimer.
73  * 2. Redistributions in binary form must reproduce the above copyright
74  *    notice, this list of conditions and the following disclaimer in the
75  *    documentation and/or other materials provided with the distribution.
76  * 3. All advertising materials mentioning features or use of this software
77  *    must display the following acknowledgements:
78  *	This product includes software developed by the University of
79  *	California, Berkeley and its contributors.
80  *	This product includes software developed at the Information
81  *	Technology Division, US Naval Research Laboratory.
82  * 4. Neither the name of the NRL nor the names of its contributors
83  *    may be used to endorse or promote products derived from this software
84  *    without specific prior written permission.
85  *
86  * THE SOFTWARE PROVIDED BY NRL IS PROVIDED BY NRL AND CONTRIBUTORS ``AS
87  * IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED
88  * TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A
89  * PARTICULAR PURPOSE ARE DISCLAIMED.  IN NO EVENT SHALL NRL OR
90  * CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
91  * EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
92  * PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
93  * PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF
94  * LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING
95  * NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
96  * SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
97  *
98  * The views and conclusions contained in the software and documentation
99  * are those of the authors and should not be interpreted as representing
100  * official policies, either expressed or implied, of the US Naval
101  * Research Laboratory (NRL).
102  */
103 
104 #include <sys/param.h>
105 #include <sys/systm.h>
106 #include <sys/mbuf.h>
107 #include <sys/socket.h>
108 #include <sys/socketvar.h>
109 #include <sys/timeout.h>
110 #include <sys/domain.h>
111 #include <sys/protosw.h>
112 #include <sys/ioctl.h>
113 #include <sys/kernel.h>
114 #include <sys/queue.h>
115 #include <sys/pool.h>
116 #include <sys/atomic.h>
117 
118 #include <net/if.h>
119 #include <net/if_var.h>
120 #include <net/if_dl.h>
121 #include <net/route.h>
122 
123 #include <netinet/in.h>
124 #include <netinet/ip_var.h>
125 #include <netinet/in_var.h>
126 
127 #ifdef INET6
128 #include <netinet/ip6.h>
129 #include <netinet6/ip6_var.h>
130 #include <netinet6/in6_var.h>
131 #endif
132 
133 #ifdef MPLS
134 #include <netmpls/mpls.h>
135 #endif
136 
137 #ifdef BFD
138 #include <net/bfd.h>
139 #endif
140 
141 #define ROUNDUP(a) (a>0 ? (1 + (((a) - 1) | (sizeof(long) - 1))) : sizeof(long))
142 
143 /* Give some jitter to hash, to avoid synchronization between routers. */
144 static uint32_t		rt_hashjitter;
145 
146 extern unsigned int	rtmap_limit;
147 
148 struct cpumem *		rtcounters;
149 int			rttrash;	/* routes not in table but not freed */
150 int			ifatrash;	/* ifas not in ifp list but not free */
151 
152 struct pool		rtentry_pool;	/* pool for rtentry structures */
153 struct pool		rttimer_pool;	/* pool for rttimer structures */
154 
155 void	rt_timer_init(void);
156 int	rt_setgwroute(struct rtentry *, u_int);
157 void	rt_putgwroute(struct rtentry *);
158 int	rtflushclone1(struct rtentry *, void *, u_int);
159 void	rtflushclone(unsigned int, struct rtentry *);
160 int	rt_ifa_purge_walker(struct rtentry *, void *, unsigned int);
161 struct rtentry *rt_match(struct sockaddr *, uint32_t *, int, unsigned int);
162 struct sockaddr *rt_plentosa(sa_family_t, int, struct sockaddr_in6 *);
163 
164 #ifdef DDB
165 void	db_print_sa(struct sockaddr *);
166 void	db_print_ifa(struct ifaddr *);
167 int	db_show_rtentry(struct rtentry *, void *, unsigned int);
168 #endif
169 
170 #define	LABELID_MAX	50000
171 
172 struct rt_label {
173 	TAILQ_ENTRY(rt_label)	rtl_entry;
174 	char			rtl_name[RTLABEL_LEN];
175 	u_int16_t		rtl_id;
176 	int			rtl_ref;
177 };
178 
179 TAILQ_HEAD(rt_labels, rt_label)	rt_labels = TAILQ_HEAD_INITIALIZER(rt_labels);
180 
181 void
182 route_init(void)
183 {
184 	rtcounters = counters_alloc(rts_ncounters);
185 
186 	pool_init(&rtentry_pool, sizeof(struct rtentry), 0, IPL_SOFTNET, 0,
187 	    "rtentry", NULL);
188 
189 	while (rt_hashjitter == 0)
190 		rt_hashjitter = arc4random();
191 
192 #ifdef BFD
193 	bfdinit();
194 #endif
195 }
196 
197 /*
198  * Returns 1 if the (cached) ``rt'' entry is still valid, 0 otherwise.
199  */
200 int
201 rtisvalid(struct rtentry *rt)
202 {
203 	if (rt == NULL)
204 		return (0);
205 
206 	if (!ISSET(rt->rt_flags, RTF_UP))
207 		return (0);
208 
209 	if (ISSET(rt->rt_flags, RTF_GATEWAY)) {
210 		KASSERT(rt->rt_gwroute != NULL);
211 		KASSERT(!ISSET(rt->rt_gwroute->rt_flags, RTF_GATEWAY));
212 		if (!ISSET(rt->rt_gwroute->rt_flags, RTF_UP))
213 			return (0);
214 	}
215 
216 	return (1);
217 }
218 
219 /*
220  * Do the actual lookup for rtalloc(9), do not use directly!
221  *
222  * Return the best matching entry for the destination ``dst''.
223  *
224  * "RT_RESOLVE" means that a corresponding L2 entry should
225  *   be added to the routing table and resolved (via ARP or
226  *   NDP), if it does not exist.
227  */
228 struct rtentry *
229 rt_match(struct sockaddr *dst, uint32_t *src, int flags, unsigned int tableid)
230 {
231 	struct rtentry		*rt0, *rt = NULL;
232 	int			 error = 0;
233 
234 	NET_ASSERT_LOCKED();
235 
236 	rt = rtable_match(tableid, dst, src);
237 	if (rt != NULL) {
238 		if ((rt->rt_flags & RTF_CLONING) && ISSET(flags, RT_RESOLVE)) {
239 			struct rt_addrinfo	 info;
240 
241 			rt0 = rt;
242 
243 			memset(&info, 0, sizeof(info));
244 			info.rti_info[RTAX_DST] = dst;
245 
246 			KERNEL_LOCK();
247 			/*
248 			 * The priority of cloned route should be different
249 			 * to avoid conflict with /32 cloning routes.
250 			 *
251 			 * It should also be higher to let the ARP layer find
252 			 * cloned routes instead of the cloning one.
253 			 */
254 			error = rtrequest(RTM_RESOLVE, &info,
255 			    rt->rt_priority - 1, &rt, tableid);
256 			if (error) {
257 				rtm_miss(RTM_MISS, &info, 0, RTP_NONE, 0,
258 				    error, tableid);
259 			} else {
260 				/* Inform listeners of the new route */
261 				rtm_send(rt, RTM_ADD, 0, tableid);
262 				rtfree(rt0);
263 			}
264 			KERNEL_UNLOCK();
265 		}
266 		rt->rt_use++;
267 	} else
268 		rtstat_inc(rts_unreach);
269 	return (rt);
270 }
271 
272 /*
273  * Originated from bridge_hash() in if_bridge.c
274  */
275 #define mix(a, b, c) do {						\
276 	a -= b; a -= c; a ^= (c >> 13);					\
277 	b -= c; b -= a; b ^= (a << 8);					\
278 	c -= a; c -= b; c ^= (b >> 13);					\
279 	a -= b; a -= c; a ^= (c >> 12);					\
280 	b -= c; b -= a; b ^= (a << 16);					\
281 	c -= a; c -= b; c ^= (b >> 5);					\
282 	a -= b; a -= c; a ^= (c >> 3);					\
283 	b -= c; b -= a; b ^= (a << 10);					\
284 	c -= a; c -= b; c ^= (b >> 15);					\
285 } while (0)
286 
287 int
288 rt_hash(struct rtentry *rt, struct sockaddr *dst, uint32_t *src)
289 {
290 	uint32_t a, b, c;
291 
292 	if (src == NULL || !rtisvalid(rt) || !ISSET(rt->rt_flags, RTF_MPATH))
293 		return (-1);
294 
295 	a = b = 0x9e3779b9;
296 	c = rt_hashjitter;
297 
298 	switch (dst->sa_family) {
299 	case AF_INET:
300 	    {
301 		struct sockaddr_in *sin;
302 
303 		if (!ipmultipath)
304 			return (-1);
305 
306 		sin = satosin(dst);
307 		a += sin->sin_addr.s_addr;
308 		b += (src != NULL) ? src[0] : 0;
309 		mix(a, b, c);
310 		break;
311 	    }
312 #ifdef INET6
313 	case AF_INET6:
314 	    {
315 		struct sockaddr_in6 *sin6;
316 
317 		if (!ip6_multipath)
318 			return (-1);
319 
320 		sin6 = satosin6(dst);
321 		a += sin6->sin6_addr.s6_addr32[0];
322 		b += sin6->sin6_addr.s6_addr32[2];
323 		c += (src != NULL) ? src[0] : 0;
324 		mix(a, b, c);
325 		a += sin6->sin6_addr.s6_addr32[1];
326 		b += sin6->sin6_addr.s6_addr32[3];
327 		c += (src != NULL) ? src[1] : 0;
328 		mix(a, b, c);
329 		a += sin6->sin6_addr.s6_addr32[2];
330 		b += sin6->sin6_addr.s6_addr32[1];
331 		c += (src != NULL) ? src[2] : 0;
332 		mix(a, b, c);
333 		a += sin6->sin6_addr.s6_addr32[3];
334 		b += sin6->sin6_addr.s6_addr32[0];
335 		c += (src != NULL) ? src[3] : 0;
336 		mix(a, b, c);
337 		break;
338 	    }
339 #endif /* INET6 */
340 	}
341 
342 	return (c & 0xffff);
343 }
344 
345 /*
346  * Allocate a route, potentially using multipath to select the peer.
347  */
348 struct rtentry *
349 rtalloc_mpath(struct sockaddr *dst, uint32_t *src, unsigned int rtableid)
350 {
351 	return (rt_match(dst, src, RT_RESOLVE, rtableid));
352 }
353 
354 /*
355  * Look in the routing table for the best matching entry for
356  * ``dst''.
357  *
358  * If a route with a gateway is found and its next hop is no
359  * longer valid, try to cache it.
360  */
361 struct rtentry *
362 rtalloc(struct sockaddr *dst, int flags, unsigned int rtableid)
363 {
364 	return (rt_match(dst, NULL, flags, rtableid));
365 }
366 
367 /*
368  * Cache the route entry corresponding to a reachable next hop in
369  * the gateway entry ``rt''.
370  */
371 int
372 rt_setgwroute(struct rtentry *rt, u_int rtableid)
373 {
374 	struct rtentry *nhrt;
375 
376 	NET_ASSERT_LOCKED();
377 
378 	KASSERT(ISSET(rt->rt_flags, RTF_GATEWAY));
379 
380 	/* If we cannot find a valid next hop bail. */
381 	nhrt = rt_match(rt->rt_gateway, NULL, RT_RESOLVE, rtable_l2(rtableid));
382 	if (nhrt == NULL)
383 		return (ENOENT);
384 
385 	/* Next hop entry must be on the same interface. */
386 	if (nhrt->rt_ifidx != rt->rt_ifidx) {
387 		rtfree(nhrt);
388 		return (EHOSTUNREACH);
389 	}
390 
391 	/*
392 	 * Next hop must be reachable, this also prevents rtentry
393 	 * loops for example when rt->rt_gwroute points to rt.
394 	 */
395 	if (ISSET(nhrt->rt_flags, RTF_CLONING|RTF_GATEWAY)) {
396 		rtfree(nhrt);
397 		return (ENETUNREACH);
398 	}
399 
400 	/* Next hop is valid so remove possible old cache. */
401 	rt_putgwroute(rt);
402 	KASSERT(rt->rt_gwroute == NULL);
403 
404 	/*
405 	 * If the MTU of next hop is 0, this will reset the MTU of the
406 	 * route to run PMTUD again from scratch.
407 	 */
408 	if (!ISSET(rt->rt_locks, RTV_MTU) && (rt->rt_mtu > nhrt->rt_mtu))
409 		rt->rt_mtu = nhrt->rt_mtu;
410 
411 	/*
412 	 * To avoid reference counting problems when writting link-layer
413 	 * addresses in an outgoing packet, we ensure that the lifetime
414 	 * of a cached entry is greater that the bigger lifetime of the
415 	 * gateway entries it is pointed by.
416 	 */
417 	nhrt->rt_flags |= RTF_CACHED;
418 	nhrt->rt_cachecnt++;
419 
420 	rt->rt_gwroute = nhrt;
421 
422 	return (0);
423 }
424 
425 /*
426  * Invalidate the cached route entry of the gateway entry ``rt''.
427  */
428 void
429 rt_putgwroute(struct rtentry *rt)
430 {
431 	struct rtentry *nhrt = rt->rt_gwroute;
432 
433 	NET_ASSERT_LOCKED();
434 
435 	if (!ISSET(rt->rt_flags, RTF_GATEWAY) || nhrt == NULL)
436 		return;
437 
438 	KASSERT(ISSET(nhrt->rt_flags, RTF_CACHED));
439 	KASSERT(nhrt->rt_cachecnt > 0);
440 
441 	--nhrt->rt_cachecnt;
442 	if (nhrt->rt_cachecnt == 0)
443 		nhrt->rt_flags &= ~RTF_CACHED;
444 
445 	rtfree(rt->rt_gwroute);
446 	rt->rt_gwroute = NULL;
447 }
448 
449 void
450 rtref(struct rtentry *rt)
451 {
452 	atomic_inc_int(&rt->rt_refcnt);
453 }
454 
455 void
456 rtfree(struct rtentry *rt)
457 {
458 	int		 refcnt;
459 
460 	if (rt == NULL)
461 		return;
462 
463 	refcnt = (int)atomic_dec_int_nv(&rt->rt_refcnt);
464 	if (refcnt <= 0) {
465 		KASSERT(!ISSET(rt->rt_flags, RTF_UP));
466 		KASSERT(!RT_ROOT(rt));
467 		atomic_dec_int(&rttrash);
468 		if (refcnt < 0) {
469 			printf("rtfree: %p not freed (neg refs)\n", rt);
470 			return;
471 		}
472 
473 		KERNEL_LOCK();
474 		rt_timer_remove_all(rt);
475 		ifafree(rt->rt_ifa);
476 		rtlabel_unref(rt->rt_labelid);
477 #ifdef MPLS
478 		if (rt->rt_flags & RTF_MPLS)
479 			free(rt->rt_llinfo, M_TEMP, sizeof(struct rt_mpls));
480 #endif
481 		free(rt->rt_gateway, M_RTABLE, ROUNDUP(rt->rt_gateway->sa_len));
482 		free(rt_key(rt), M_RTABLE, rt_key(rt)->sa_len);
483 		KERNEL_UNLOCK();
484 
485 		pool_put(&rtentry_pool, rt);
486 	}
487 }
488 
489 void
490 ifafree(struct ifaddr *ifa)
491 {
492 	if (ifa == NULL)
493 		panic("ifafree");
494 	if (ifa->ifa_refcnt == 0) {
495 		ifatrash--;
496 		free(ifa, M_IFADDR, 0);
497 	} else
498 		ifa->ifa_refcnt--;
499 }
500 
501 /*
502  * Force a routing table entry to the specified
503  * destination to go through the given gateway.
504  * Normally called as a result of a routing redirect
505  * message from the network layer.
506  */
507 void
508 rtredirect(struct sockaddr *dst, struct sockaddr *gateway,
509     struct sockaddr *src, struct rtentry **rtp, unsigned int rdomain)
510 {
511 	struct rtentry		*rt;
512 	int			 error = 0;
513 	enum rtstat_counters	 stat = rts_ncounters;
514 	struct rt_addrinfo	 info;
515 	struct ifaddr		*ifa;
516 	unsigned int		 ifidx = 0;
517 	int			 flags = RTF_GATEWAY|RTF_HOST;
518 	uint8_t			 prio = RTP_NONE;
519 
520 	NET_ASSERT_LOCKED();
521 
522 	/* verify the gateway is directly reachable */
523 	rt = rtalloc(gateway, 0, rdomain);
524 	if (!rtisvalid(rt) || ISSET(rt->rt_flags, RTF_GATEWAY)) {
525 		rtfree(rt);
526 		error = ENETUNREACH;
527 		goto out;
528 	}
529 	ifidx = rt->rt_ifidx;
530 	ifa = rt->rt_ifa;
531 	rtfree(rt);
532 	rt = NULL;
533 
534 	rt = rtable_lookup(rdomain, dst, NULL, NULL, RTP_ANY);
535 	/*
536 	 * If the redirect isn't from our current router for this dst,
537 	 * it's either old or wrong.  If it redirects us to ourselves,
538 	 * we have a routing loop, perhaps as a result of an interface
539 	 * going down recently.
540 	 */
541 #define	equal(a1, a2) \
542 	((a1)->sa_len == (a2)->sa_len && \
543 	 bcmp((caddr_t)(a1), (caddr_t)(a2), (a1)->sa_len) == 0)
544 	if (rt != NULL && (!equal(src, rt->rt_gateway) || rt->rt_ifa != ifa))
545 		error = EINVAL;
546 	else if (ifa_ifwithaddr(gateway, rdomain) != NULL ||
547 	    (gateway->sa_family = AF_INET &&
548 	    in_broadcast(satosin(gateway)->sin_addr, rdomain)))
549 		error = EHOSTUNREACH;
550 	if (error)
551 		goto done;
552 	/*
553 	 * Create a new entry if we just got back a wildcard entry
554 	 * or the lookup failed.  This is necessary for hosts
555 	 * which use routing redirects generated by smart gateways
556 	 * to dynamically build the routing tables.
557 	 */
558 	if (rt == NULL)
559 		goto create;
560 	/*
561 	 * Don't listen to the redirect if it's
562 	 * for a route to an interface.
563 	 */
564 	if (ISSET(rt->rt_flags, RTF_GATEWAY)) {
565 		if (!ISSET(rt->rt_flags, RTF_HOST)) {
566 			/*
567 			 * Changing from route to net => route to host.
568 			 * Create new route, rather than smashing route to net.
569 			 */
570 create:
571 			rtfree(rt);
572 			flags |= RTF_DYNAMIC;
573 			bzero(&info, sizeof(info));
574 			info.rti_info[RTAX_DST] = dst;
575 			info.rti_info[RTAX_GATEWAY] = gateway;
576 			info.rti_ifa = ifa;
577 			info.rti_flags = flags;
578 			rt = NULL;
579 			error = rtrequest(RTM_ADD, &info, RTP_DEFAULT, &rt,
580 			    rdomain);
581 			if (error == 0) {
582 				flags = rt->rt_flags;
583 				prio = rt->rt_priority;
584 			}
585 			stat = rts_dynamic;
586 		} else {
587 			/*
588 			 * Smash the current notion of the gateway to
589 			 * this destination.  Should check about netmask!!!
590 			 */
591 			rt->rt_flags |= RTF_MODIFIED;
592 			flags |= RTF_MODIFIED;
593 			prio = rt->rt_priority;
594 			stat = rts_newgateway;
595 			rt_setgate(rt, gateway, rdomain);
596 		}
597 	} else
598 		error = EHOSTUNREACH;
599 done:
600 	if (rt) {
601 		if (rtp && !error)
602 			*rtp = rt;
603 		else
604 			rtfree(rt);
605 	}
606 out:
607 	if (error)
608 		rtstat_inc(rts_badredirect);
609 	else if (stat != rts_ncounters)
610 		rtstat_inc(stat);
611 	bzero((caddr_t)&info, sizeof(info));
612 	info.rti_info[RTAX_DST] = dst;
613 	info.rti_info[RTAX_GATEWAY] = gateway;
614 	info.rti_info[RTAX_AUTHOR] = src;
615 	KERNEL_LOCK();
616 	rtm_miss(RTM_REDIRECT, &info, flags, prio, ifidx, error, rdomain);
617 	KERNEL_UNLOCK();
618 }
619 
620 /*
621  * Delete a route and generate a message
622  */
623 int
624 rtdeletemsg(struct rtentry *rt, struct ifnet *ifp, u_int tableid)
625 {
626 	int			error;
627 	struct rt_addrinfo	info;
628 	struct sockaddr_in6	sa_mask;
629 
630 	KASSERT(rt->rt_ifidx == ifp->if_index);
631 
632 	/*
633 	 * Request the new route so that the entry is not actually
634 	 * deleted.  That will allow the information being reported to
635 	 * be accurate (and consistent with route_output()).
636 	 */
637 	memset(&info, 0, sizeof(info));
638 	info.rti_info[RTAX_DST] = rt_key(rt);
639 	info.rti_info[RTAX_GATEWAY] = rt->rt_gateway;
640 	if (!ISSET(rt->rt_flags, RTF_HOST))
641 		info.rti_info[RTAX_NETMASK] = rt_plen2mask(rt, &sa_mask);
642 	error = rtrequest_delete(&info, rt->rt_priority, ifp, &rt, tableid);
643 	KERNEL_LOCK();
644 	rtm_send(rt, RTM_DELETE, error, tableid);
645 	KERNEL_UNLOCK();
646 	if (error == 0)
647 		rtfree(rt);
648 	return (error);
649 }
650 
651 static inline int
652 rtequal(struct rtentry *a, struct rtentry *b)
653 {
654 	if (a == b)
655 		return 1;
656 
657 	if (memcmp(rt_key(a), rt_key(b), rt_key(a)->sa_len) == 0 &&
658 	    rt_plen(a) == rt_plen(b))
659 		return 1;
660 	else
661 		return 0;
662 }
663 
664 int
665 rtflushclone1(struct rtentry *rt, void *arg, u_int id)
666 {
667 	struct rtentry *parent = arg;
668 	struct ifnet *ifp;
669 	int error;
670 
671 	ifp = if_get(rt->rt_ifidx);
672 
673 	/*
674 	 * This happens when an interface with a RTF_CLONING route is
675 	 * being detached.  In this case it's safe to bail because all
676 	 * the routes are being purged by rt_ifa_purge().
677 	 */
678 	if (ifp == NULL)
679 	        return 0;
680 
681 	if (ISSET(rt->rt_flags, RTF_CLONED) && rtequal(rt->rt_parent, parent)) {
682 	        error = rtdeletemsg(rt, ifp, id);
683 	        if (error == 0)
684 			error = EAGAIN;
685 	} else
686 		error = 0;
687 
688 	if_put(ifp);
689 	return error;
690 }
691 
692 void
693 rtflushclone(unsigned int rtableid, struct rtentry *parent)
694 {
695 
696 #ifdef DIAGNOSTIC
697 	if (!parent || (parent->rt_flags & RTF_CLONING) == 0)
698 		panic("rtflushclone: called with a non-cloning route");
699 #endif
700 	rtable_walk(rtableid, rt_key(parent)->sa_family, rtflushclone1, parent);
701 }
702 
703 int
704 rtrequest_delete(struct rt_addrinfo *info, u_int8_t prio, struct ifnet *ifp,
705     struct rtentry **ret_nrt, u_int tableid)
706 {
707 	struct rtentry	*rt;
708 	int		 error;
709 
710 	NET_ASSERT_LOCKED();
711 
712 	if (!rtable_exists(tableid))
713 		return (EAFNOSUPPORT);
714 	rt = rtable_lookup(tableid, info->rti_info[RTAX_DST],
715 	    info->rti_info[RTAX_NETMASK], info->rti_info[RTAX_GATEWAY], prio);
716 	if (rt == NULL)
717 		return (ESRCH);
718 
719 	/* Make sure that's the route the caller want to delete. */
720 	if (ifp != NULL && ifp->if_index != rt->rt_ifidx) {
721 		rtfree(rt);
722 		return (ESRCH);
723 	}
724 
725 #ifdef BFD
726 	if (ISSET(rt->rt_flags, RTF_BFD))
727 		bfdclear(rt);
728 #endif
729 
730 	error = rtable_delete(tableid, info->rti_info[RTAX_DST],
731 	    info->rti_info[RTAX_NETMASK], rt);
732 	if (error != 0) {
733 		rtfree(rt);
734 		return (ESRCH);
735 	}
736 
737 	/* Release next hop cache before flushing cloned entries. */
738 	rt_putgwroute(rt);
739 
740 	/* Clean up any cloned children. */
741 	if (ISSET(rt->rt_flags, RTF_CLONING))
742 		rtflushclone(tableid, rt);
743 
744 	rtfree(rt->rt_parent);
745 	rt->rt_parent = NULL;
746 
747 	rt->rt_flags &= ~RTF_UP;
748 
749 	KASSERT(ifp->if_index == rt->rt_ifidx);
750 	ifp->if_rtrequest(ifp, RTM_DELETE, rt);
751 
752 	atomic_inc_int(&rttrash);
753 
754 	if (ret_nrt != NULL)
755 		*ret_nrt = rt;
756 	else
757 		rtfree(rt);
758 
759 	return (0);
760 }
761 
762 int
763 rtrequest(int req, struct rt_addrinfo *info, u_int8_t prio,
764     struct rtentry **ret_nrt, u_int tableid)
765 {
766 	struct ifnet		*ifp;
767 	struct rtentry		*rt, *crt;
768 	struct ifaddr		*ifa;
769 	struct sockaddr		*ndst;
770 	struct sockaddr_rtlabel	*sa_rl, sa_rl2;
771 	struct sockaddr_dl	 sa_dl = { sizeof(sa_dl), AF_LINK };
772 	int			 dlen, error;
773 #ifdef MPLS
774 	struct sockaddr_mpls	*sa_mpls;
775 #endif
776 
777 	NET_ASSERT_LOCKED();
778 
779 	if (!rtable_exists(tableid))
780 		return (EAFNOSUPPORT);
781 	if (info->rti_flags & RTF_HOST)
782 		info->rti_info[RTAX_NETMASK] = NULL;
783 	switch (req) {
784 	case RTM_DELETE:
785 		return (EINVAL);
786 
787 	case RTM_RESOLVE:
788 		if (ret_nrt == NULL || (rt = *ret_nrt) == NULL)
789 			return (EINVAL);
790 		if ((rt->rt_flags & RTF_CLONING) == 0)
791 			return (EINVAL);
792 		KASSERT(rt->rt_ifa->ifa_ifp != NULL);
793 		info->rti_ifa = rt->rt_ifa;
794 		info->rti_flags = rt->rt_flags | (RTF_CLONED|RTF_HOST);
795 		info->rti_flags &= ~(RTF_CLONING|RTF_CONNECTED|RTF_STATIC);
796 		info->rti_info[RTAX_GATEWAY] = sdltosa(&sa_dl);
797 		info->rti_info[RTAX_LABEL] =
798 		    rtlabel_id2sa(rt->rt_labelid, &sa_rl2);
799 		/* FALLTHROUGH */
800 
801 	case RTM_ADD:
802 		if (info->rti_ifa == NULL)
803 			return (EINVAL);
804 		ifa = info->rti_ifa;
805 		ifp = ifa->ifa_ifp;
806 		if (prio == 0)
807 			prio = ifp->if_priority + RTP_STATIC;
808 
809 		dlen = info->rti_info[RTAX_DST]->sa_len;
810 		ndst = malloc(dlen, M_RTABLE, M_NOWAIT);
811 		if (ndst == NULL)
812 			return (ENOBUFS);
813 
814 		if (info->rti_info[RTAX_NETMASK] != NULL)
815 			rt_maskedcopy(info->rti_info[RTAX_DST], ndst,
816 			    info->rti_info[RTAX_NETMASK]);
817 		else
818 			memcpy(ndst, info->rti_info[RTAX_DST], dlen);
819 
820 		rt = pool_get(&rtentry_pool, PR_NOWAIT | PR_ZERO);
821 		if (rt == NULL) {
822 			free(ndst, M_RTABLE, dlen);
823 			return (ENOBUFS);
824 		}
825 
826 		rt->rt_refcnt = 1;
827 		rt->rt_flags = info->rti_flags | RTF_UP;
828 		rt->rt_priority = prio;	/* init routing priority */
829 		LIST_INIT(&rt->rt_timer);
830 
831 		/* Check the link state if the table supports it. */
832 		if (rtable_mpath_capable(tableid, ndst->sa_family) &&
833 		    !ISSET(rt->rt_flags, RTF_LOCAL) &&
834 		    (!LINK_STATE_IS_UP(ifp->if_link_state) ||
835 		    !ISSET(ifp->if_flags, IFF_UP))) {
836 			rt->rt_flags &= ~RTF_UP;
837 			rt->rt_priority |= RTP_DOWN;
838 		}
839 
840 		if (info->rti_info[RTAX_LABEL] != NULL) {
841 			sa_rl = (struct sockaddr_rtlabel *)
842 			    info->rti_info[RTAX_LABEL];
843 			rt->rt_labelid = rtlabel_name2id(sa_rl->sr_label);
844 		}
845 
846 #ifdef MPLS
847 		/* We have to allocate additional space for MPLS infos */
848 		if (info->rti_flags & RTF_MPLS &&
849 		    (info->rti_info[RTAX_SRC] != NULL ||
850 		    info->rti_info[RTAX_DST]->sa_family == AF_MPLS)) {
851 			struct rt_mpls *rt_mpls;
852 
853 			sa_mpls = (struct sockaddr_mpls *)
854 			    info->rti_info[RTAX_SRC];
855 
856 			rt->rt_llinfo = malloc(sizeof(struct rt_mpls),
857 			    M_TEMP, M_NOWAIT|M_ZERO);
858 
859 			if (rt->rt_llinfo == NULL) {
860 				free(ndst, M_RTABLE, dlen);
861 				pool_put(&rtentry_pool, rt);
862 				return (ENOMEM);
863 			}
864 
865 			rt_mpls = (struct rt_mpls *)rt->rt_llinfo;
866 
867 			if (sa_mpls != NULL)
868 				rt_mpls->mpls_label = sa_mpls->smpls_label;
869 
870 			rt_mpls->mpls_operation = info->rti_mpls;
871 
872 			/* XXX: set experimental bits */
873 
874 			rt->rt_flags |= RTF_MPLS;
875 		} else
876 			rt->rt_flags &= ~RTF_MPLS;
877 #endif
878 
879 		ifa->ifa_refcnt++;
880 		rt->rt_ifa = ifa;
881 		rt->rt_ifidx = ifp->if_index;
882 		/*
883 		 * Copy metrics and a back pointer from the cloned
884 		 * route's parent.
885 		 */
886 		if (ISSET(rt->rt_flags, RTF_CLONED)) {
887 			rtref(*ret_nrt);
888 			rt->rt_parent = *ret_nrt;
889 			rt->rt_rmx = (*ret_nrt)->rt_rmx;
890 		}
891 
892 		/*
893 		 * We must set rt->rt_gateway before adding ``rt'' to
894 		 * the routing table because the radix MPATH code use
895 		 * it to (re)order routes.
896 		 */
897 		if ((error = rt_setgate(rt, info->rti_info[RTAX_GATEWAY],
898 		    tableid))) {
899 			ifafree(ifa);
900 			rtfree(rt->rt_parent);
901 			rt_putgwroute(rt);
902 			free(rt->rt_gateway, M_RTABLE, 0);
903 			free(ndst, M_RTABLE, dlen);
904 			pool_put(&rtentry_pool, rt);
905 			return (error);
906 		}
907 
908 		error = rtable_insert(tableid, ndst,
909 		    info->rti_info[RTAX_NETMASK], info->rti_info[RTAX_GATEWAY],
910 		    rt->rt_priority, rt);
911 		if (error != 0 &&
912 		    (crt = rtable_match(tableid, ndst, NULL)) != NULL) {
913 			/* overwrite cloned route */
914 			if (ISSET(crt->rt_flags, RTF_CLONED)) {
915 				struct ifnet *cifp;
916 
917 				cifp = if_get(crt->rt_ifidx);
918 				KASSERT(cifp != NULL);
919 				rtdeletemsg(crt, cifp, tableid);
920 				if_put(cifp);
921 
922 				error = rtable_insert(tableid, ndst,
923 				    info->rti_info[RTAX_NETMASK],
924 				    info->rti_info[RTAX_GATEWAY],
925 				    rt->rt_priority, rt);
926 			}
927 			rtfree(crt);
928 		}
929 		if (error != 0) {
930 			ifafree(ifa);
931 			rtfree(rt->rt_parent);
932 			rt_putgwroute(rt);
933 			free(rt->rt_gateway, M_RTABLE, 0);
934 			free(ndst, M_RTABLE, dlen);
935 			pool_put(&rtentry_pool, rt);
936 			return (EEXIST);
937 		}
938 		ifp->if_rtrequest(ifp, req, rt);
939 
940 		if_group_routechange(info->rti_info[RTAX_DST],
941 			info->rti_info[RTAX_NETMASK]);
942 
943 		if (ret_nrt != NULL)
944 			*ret_nrt = rt;
945 		else
946 			rtfree(rt);
947 		break;
948 	}
949 
950 	return (0);
951 }
952 
953 int
954 rt_setgate(struct rtentry *rt, struct sockaddr *gate, u_int rtableid)
955 {
956 	int glen = ROUNDUP(gate->sa_len);
957 	struct sockaddr *sa;
958 
959 	if (rt->rt_gateway == NULL || glen > ROUNDUP(rt->rt_gateway->sa_len)) {
960 		sa = malloc(glen, M_RTABLE, M_NOWAIT);
961 		if (sa == NULL)
962 			return (ENOBUFS);
963 		free(rt->rt_gateway, M_RTABLE, 0);
964 		rt->rt_gateway = sa;
965 	}
966 	memmove(rt->rt_gateway, gate, glen);
967 
968 	if (ISSET(rt->rt_flags, RTF_GATEWAY))
969 		return (rt_setgwroute(rt, rtableid));
970 
971 	return (0);
972 }
973 
974 /*
975  * Return the route entry containing the next hop link-layer
976  * address corresponding to ``rt''.
977  */
978 struct rtentry *
979 rt_getll(struct rtentry *rt)
980 {
981 	if (ISSET(rt->rt_flags, RTF_GATEWAY)) {
982 		KASSERT(rt->rt_gwroute != NULL);
983 		return (rt->rt_gwroute);
984 	}
985 
986 	return (rt);
987 }
988 
989 void
990 rt_maskedcopy(struct sockaddr *src, struct sockaddr *dst,
991     struct sockaddr *netmask)
992 {
993 	u_char	*cp1 = (u_char *)src;
994 	u_char	*cp2 = (u_char *)dst;
995 	u_char	*cp3 = (u_char *)netmask;
996 	u_char	*cplim = cp2 + *cp3;
997 	u_char	*cplim2 = cp2 + *cp1;
998 
999 	*cp2++ = *cp1++; *cp2++ = *cp1++; /* copies sa_len & sa_family */
1000 	cp3 += 2;
1001 	if (cplim > cplim2)
1002 		cplim = cplim2;
1003 	while (cp2 < cplim)
1004 		*cp2++ = *cp1++ & *cp3++;
1005 	if (cp2 < cplim2)
1006 		bzero((caddr_t)cp2, (unsigned)(cplim2 - cp2));
1007 }
1008 
1009 int
1010 rt_ifa_add(struct ifaddr *ifa, int flags, struct sockaddr *dst)
1011 {
1012 	struct ifnet		*ifp = ifa->ifa_ifp;
1013 	struct rtentry		*rt;
1014 	struct sockaddr_rtlabel	 sa_rl;
1015 	struct rt_addrinfo	 info;
1016 	unsigned int		 rtableid = ifp->if_rdomain;
1017 	uint8_t			 prio = ifp->if_priority + RTP_STATIC;
1018 	int			 error;
1019 
1020 	memset(&info, 0, sizeof(info));
1021 	info.rti_ifa = ifa;
1022 	info.rti_flags = flags | RTF_MPATH;
1023 	info.rti_info[RTAX_DST] = dst;
1024 	if (flags & RTF_LLINFO)
1025 		info.rti_info[RTAX_GATEWAY] = sdltosa(ifp->if_sadl);
1026 	else
1027 		info.rti_info[RTAX_GATEWAY] = ifa->ifa_addr;
1028 	info.rti_info[RTAX_LABEL] = rtlabel_id2sa(ifp->if_rtlabelid, &sa_rl);
1029 
1030 #ifdef MPLS
1031 	if ((flags & RTF_MPLS) == RTF_MPLS)
1032 		info.rti_mpls = MPLS_OP_POP;
1033 #endif /* MPLS */
1034 
1035 	if ((flags & RTF_HOST) == 0)
1036 		info.rti_info[RTAX_NETMASK] = ifa->ifa_netmask;
1037 
1038 	if (flags & (RTF_LOCAL|RTF_BROADCAST))
1039 		prio = RTP_LOCAL;
1040 
1041 	if (flags & RTF_CONNECTED)
1042 		prio = ifp->if_priority + RTP_CONNECTED;
1043 
1044 	error = rtrequest(RTM_ADD, &info, prio, &rt, rtableid);
1045 	if (error == 0) {
1046 		/*
1047 		 * A local route is created for every address configured
1048 		 * on an interface, so use this information to notify
1049 		 * userland that a new address has been added.
1050 		 */
1051 		if (flags & RTF_LOCAL)
1052 			rtm_addr(rt, RTM_NEWADDR, ifa);
1053 		rtm_send(rt, RTM_ADD, 0, rtableid);
1054 		rtfree(rt);
1055 	}
1056 	return (error);
1057 }
1058 
1059 int
1060 rt_ifa_del(struct ifaddr *ifa, int flags, struct sockaddr *dst)
1061 {
1062 	struct ifnet		*ifp = ifa->ifa_ifp;
1063 	struct rtentry		*rt;
1064 	struct mbuf		*m = NULL;
1065 	struct sockaddr		*deldst;
1066 	struct rt_addrinfo	 info;
1067 	struct sockaddr_rtlabel	 sa_rl;
1068 	unsigned int		 rtableid = ifp->if_rdomain;
1069 	uint8_t			 prio = ifp->if_priority + RTP_STATIC;
1070 	int			 error;
1071 
1072 #ifdef MPLS
1073 	if ((flags & RTF_MPLS) == RTF_MPLS)
1074 		/* MPLS routes only exist in rdomain 0 */
1075 		rtableid = 0;
1076 #endif /* MPLS */
1077 
1078 	if ((flags & RTF_HOST) == 0 && ifa->ifa_netmask) {
1079 		m = m_get(M_DONTWAIT, MT_SONAME);
1080 		if (m == NULL)
1081 			return (ENOBUFS);
1082 		deldst = mtod(m, struct sockaddr *);
1083 		rt_maskedcopy(dst, deldst, ifa->ifa_netmask);
1084 		dst = deldst;
1085 	}
1086 
1087 	memset(&info, 0, sizeof(info));
1088 	info.rti_ifa = ifa;
1089 	info.rti_flags = flags;
1090 	info.rti_info[RTAX_DST] = dst;
1091 	if ((flags & RTF_LLINFO) == 0)
1092 		info.rti_info[RTAX_GATEWAY] = ifa->ifa_addr;
1093 	info.rti_info[RTAX_LABEL] = rtlabel_id2sa(ifp->if_rtlabelid, &sa_rl);
1094 
1095 	if ((flags & RTF_HOST) == 0)
1096 		info.rti_info[RTAX_NETMASK] = ifa->ifa_netmask;
1097 
1098 	if (flags & (RTF_LOCAL|RTF_BROADCAST))
1099 		prio = RTP_LOCAL;
1100 
1101 	if (flags & RTF_CONNECTED)
1102 		prio = ifp->if_priority + RTP_CONNECTED;
1103 
1104 	error = rtrequest_delete(&info, prio, ifp, &rt, rtableid);
1105 	if (error == 0) {
1106 		rtm_send(rt, RTM_DELETE, 0, rtableid);
1107 		if (flags & RTF_LOCAL)
1108 			rtm_addr(rt, RTM_DELADDR, ifa);
1109 		rtfree(rt);
1110 	}
1111 	m_free(m);
1112 
1113 	return (error);
1114 }
1115 
1116 /*
1117  * Add ifa's address as a local rtentry.
1118  */
1119 int
1120 rt_ifa_addlocal(struct ifaddr *ifa)
1121 {
1122 	struct rtentry *rt;
1123 	u_int flags = RTF_HOST|RTF_LOCAL;
1124 	int error = 0;
1125 
1126 	/*
1127 	 * If the configured address correspond to the magical "any"
1128 	 * address do not add a local route entry because that might
1129 	 * corrupt the routing tree which uses this value for the
1130 	 * default routes.
1131 	 */
1132 	switch (ifa->ifa_addr->sa_family) {
1133 	case AF_INET:
1134 		if (satosin(ifa->ifa_addr)->sin_addr.s_addr == INADDR_ANY)
1135 			return (0);
1136 		break;
1137 #ifdef INET6
1138 	case AF_INET6:
1139 		if (IN6_ARE_ADDR_EQUAL(&satosin6(ifa->ifa_addr)->sin6_addr,
1140 		    &in6addr_any))
1141 			return (0);
1142 		break;
1143 #endif
1144 	default:
1145 		break;
1146 	}
1147 
1148 	if (!ISSET(ifa->ifa_ifp->if_flags, (IFF_LOOPBACK|IFF_POINTOPOINT)))
1149 		flags |= RTF_LLINFO;
1150 
1151 	/* If there is no local entry, allocate one. */
1152 	rt = rtalloc(ifa->ifa_addr, 0, ifa->ifa_ifp->if_rdomain);
1153 	if (rt == NULL || ISSET(rt->rt_flags, flags) != flags)
1154 		error = rt_ifa_add(ifa, flags, ifa->ifa_addr);
1155 	rtfree(rt);
1156 
1157 	return (error);
1158 }
1159 
1160 /*
1161  * Remove local rtentry of ifa's addresss if it exists.
1162  */
1163 int
1164 rt_ifa_dellocal(struct ifaddr *ifa)
1165 {
1166 	struct rtentry *rt;
1167 	u_int flags = RTF_HOST|RTF_LOCAL;
1168 	int error = 0;
1169 
1170 	/*
1171 	 * We do not add local routes for such address, so do not bother
1172 	 * removing them.
1173 	 */
1174 	switch (ifa->ifa_addr->sa_family) {
1175 	case AF_INET:
1176 		if (satosin(ifa->ifa_addr)->sin_addr.s_addr == INADDR_ANY)
1177 			return (0);
1178 		break;
1179 #ifdef INET6
1180 	case AF_INET6:
1181 		if (IN6_ARE_ADDR_EQUAL(&satosin6(ifa->ifa_addr)->sin6_addr,
1182 		    &in6addr_any))
1183 			return (0);
1184 		break;
1185 #endif
1186 	default:
1187 		break;
1188 	}
1189 
1190 	if (!ISSET(ifa->ifa_ifp->if_flags, (IFF_LOOPBACK|IFF_POINTOPOINT)))
1191 		flags |= RTF_LLINFO;
1192 
1193 	/*
1194 	 * Before deleting, check if a corresponding local host
1195 	 * route surely exists.  With this check, we can avoid to
1196 	 * delete an interface direct route whose destination is same
1197 	 * as the address being removed.  This can happen when removing
1198 	 * a subnet-router anycast address on an interface attached
1199 	 * to a shared medium.
1200 	 */
1201 	rt = rtalloc(ifa->ifa_addr, 0, ifa->ifa_ifp->if_rdomain);
1202 	if (rt != NULL && ISSET(rt->rt_flags, flags) == flags)
1203 		error = rt_ifa_del(ifa, flags, ifa->ifa_addr);
1204 	rtfree(rt);
1205 
1206 	return (error);
1207 }
1208 
1209 /*
1210  * Remove all addresses attached to ``ifa''.
1211  */
1212 void
1213 rt_ifa_purge(struct ifaddr *ifa)
1214 {
1215 	struct ifnet		*ifp = ifa->ifa_ifp;
1216 	unsigned int		 rtableid;
1217 	int			 i;
1218 
1219 	KASSERT(ifp != NULL);
1220 
1221 	for (rtableid = 0; rtableid < rtmap_limit; rtableid++) {
1222 		/* skip rtables that are not in the rdomain of the ifp */
1223 		if (rtable_l2(rtableid) != ifp->if_rdomain)
1224 			continue;
1225 		for (i = 1; i <= AF_MAX; i++) {
1226 			rtable_walk(rtableid, i, rt_ifa_purge_walker, ifa);
1227 		}
1228 	}
1229 }
1230 
1231 int
1232 rt_ifa_purge_walker(struct rtentry *rt, void *vifa, unsigned int rtableid)
1233 {
1234 	struct ifaddr		*ifa = vifa;
1235 	struct ifnet		*ifp = ifa->ifa_ifp;
1236 	int			 error;
1237 
1238 	if (rt->rt_ifa != ifa)
1239 		return (0);
1240 
1241 	if ((error = rtdeletemsg(rt, ifp, rtableid))) {
1242 		return (error);
1243 	}
1244 
1245 	return (EAGAIN);
1246 
1247 }
1248 
1249 /*
1250  * Route timer routines.  These routes allow functions to be called
1251  * for various routes at any time.  This is useful in supporting
1252  * path MTU discovery and redirect route deletion.
1253  *
1254  * This is similar to some BSDI internal functions, but it provides
1255  * for multiple queues for efficiency's sake...
1256  */
1257 
1258 LIST_HEAD(, rttimer_queue)	rttimer_queue_head;
1259 static int			rt_init_done = 0;
1260 
1261 #define RTTIMER_CALLOUT(r)	{					\
1262 	if (r->rtt_func != NULL) {					\
1263 		(*r->rtt_func)(r->rtt_rt, r);				\
1264 	} else {							\
1265 		struct ifnet *ifp;					\
1266 									\
1267 		ifp = if_get(r->rtt_rt->rt_ifidx);			\
1268 		if (ifp != NULL) 					\
1269 			rtdeletemsg(r->rtt_rt, ifp, r->rtt_tableid);	\
1270 		if_put(ifp);						\
1271 	}								\
1272 }
1273 
1274 /*
1275  * Some subtle order problems with domain initialization mean that
1276  * we cannot count on this being run from rt_init before various
1277  * protocol initializations are done.  Therefore, we make sure
1278  * that this is run when the first queue is added...
1279  */
1280 
1281 void
1282 rt_timer_init(void)
1283 {
1284 	static struct timeout	rt_timer_timeout;
1285 
1286 	if (rt_init_done)
1287 		panic("rt_timer_init: already initialized");
1288 
1289 	pool_init(&rttimer_pool, sizeof(struct rttimer), 0, IPL_SOFTNET, 0,
1290 	    "rttmr", NULL);
1291 
1292 	LIST_INIT(&rttimer_queue_head);
1293 	timeout_set_proc(&rt_timer_timeout, rt_timer_timer, &rt_timer_timeout);
1294 	timeout_add_sec(&rt_timer_timeout, 1);
1295 	rt_init_done = 1;
1296 }
1297 
1298 struct rttimer_queue *
1299 rt_timer_queue_create(u_int timeout)
1300 {
1301 	struct rttimer_queue	*rtq;
1302 
1303 	if (rt_init_done == 0)
1304 		rt_timer_init();
1305 
1306 	if ((rtq = malloc(sizeof(*rtq), M_RTABLE, M_NOWAIT|M_ZERO)) == NULL)
1307 		return (NULL);
1308 
1309 	rtq->rtq_timeout = timeout;
1310 	rtq->rtq_count = 0;
1311 	TAILQ_INIT(&rtq->rtq_head);
1312 	LIST_INSERT_HEAD(&rttimer_queue_head, rtq, rtq_link);
1313 
1314 	return (rtq);
1315 }
1316 
1317 void
1318 rt_timer_queue_change(struct rttimer_queue *rtq, long timeout)
1319 {
1320 	rtq->rtq_timeout = timeout;
1321 }
1322 
1323 void
1324 rt_timer_queue_destroy(struct rttimer_queue *rtq)
1325 {
1326 	struct rttimer	*r;
1327 
1328 	NET_ASSERT_LOCKED();
1329 
1330 	while ((r = TAILQ_FIRST(&rtq->rtq_head)) != NULL) {
1331 		LIST_REMOVE(r, rtt_link);
1332 		TAILQ_REMOVE(&rtq->rtq_head, r, rtt_next);
1333 		RTTIMER_CALLOUT(r);
1334 		pool_put(&rttimer_pool, r);
1335 		if (rtq->rtq_count > 0)
1336 			rtq->rtq_count--;
1337 		else
1338 			printf("rt_timer_queue_destroy: rtq_count reached 0\n");
1339 	}
1340 
1341 	LIST_REMOVE(rtq, rtq_link);
1342 	free(rtq, M_RTABLE, sizeof(*rtq));
1343 }
1344 
1345 unsigned long
1346 rt_timer_queue_count(struct rttimer_queue *rtq)
1347 {
1348 	return (rtq->rtq_count);
1349 }
1350 
1351 void
1352 rt_timer_remove_all(struct rtentry *rt)
1353 {
1354 	struct rttimer	*r;
1355 
1356 	while ((r = LIST_FIRST(&rt->rt_timer)) != NULL) {
1357 		LIST_REMOVE(r, rtt_link);
1358 		TAILQ_REMOVE(&r->rtt_queue->rtq_head, r, rtt_next);
1359 		if (r->rtt_queue->rtq_count > 0)
1360 			r->rtt_queue->rtq_count--;
1361 		else
1362 			printf("rt_timer_remove_all: rtq_count reached 0\n");
1363 		pool_put(&rttimer_pool, r);
1364 	}
1365 }
1366 
1367 int
1368 rt_timer_add(struct rtentry *rt, void (*func)(struct rtentry *,
1369     struct rttimer *), struct rttimer_queue *queue, u_int rtableid)
1370 {
1371 	struct rttimer	*r;
1372 	long		 current_time;
1373 
1374 	current_time = time_uptime;
1375 	rt->rt_expire = time_uptime + queue->rtq_timeout;
1376 
1377 	/*
1378 	 * If there's already a timer with this action, destroy it before
1379 	 * we add a new one.
1380 	 */
1381 	LIST_FOREACH(r, &rt->rt_timer, rtt_link) {
1382 		if (r->rtt_func == func) {
1383 			LIST_REMOVE(r, rtt_link);
1384 			TAILQ_REMOVE(&r->rtt_queue->rtq_head, r, rtt_next);
1385 			if (r->rtt_queue->rtq_count > 0)
1386 				r->rtt_queue->rtq_count--;
1387 			else
1388 				printf("rt_timer_add: rtq_count reached 0\n");
1389 			pool_put(&rttimer_pool, r);
1390 			break;  /* only one per list, so we can quit... */
1391 		}
1392 	}
1393 
1394 	r = pool_get(&rttimer_pool, PR_NOWAIT | PR_ZERO);
1395 	if (r == NULL)
1396 		return (ENOBUFS);
1397 
1398 	r->rtt_rt = rt;
1399 	r->rtt_time = current_time;
1400 	r->rtt_func = func;
1401 	r->rtt_queue = queue;
1402 	r->rtt_tableid = rtableid;
1403 	LIST_INSERT_HEAD(&rt->rt_timer, r, rtt_link);
1404 	TAILQ_INSERT_TAIL(&queue->rtq_head, r, rtt_next);
1405 	r->rtt_queue->rtq_count++;
1406 
1407 	return (0);
1408 }
1409 
1410 void
1411 rt_timer_timer(void *arg)
1412 {
1413 	struct timeout		*to = (struct timeout *)arg;
1414 	struct rttimer_queue	*rtq;
1415 	struct rttimer		*r;
1416 	long			 current_time;
1417 
1418 	current_time = time_uptime;
1419 
1420 	NET_LOCK();
1421 	LIST_FOREACH(rtq, &rttimer_queue_head, rtq_link) {
1422 		while ((r = TAILQ_FIRST(&rtq->rtq_head)) != NULL &&
1423 		    (r->rtt_time + rtq->rtq_timeout) < current_time) {
1424 			LIST_REMOVE(r, rtt_link);
1425 			TAILQ_REMOVE(&rtq->rtq_head, r, rtt_next);
1426 			RTTIMER_CALLOUT(r);
1427 			pool_put(&rttimer_pool, r);
1428 			if (rtq->rtq_count > 0)
1429 				rtq->rtq_count--;
1430 			else
1431 				printf("rt_timer_timer: rtq_count reached 0\n");
1432 		}
1433 	}
1434 	NET_UNLOCK();
1435 
1436 	timeout_add_sec(to, 1);
1437 }
1438 
1439 u_int16_t
1440 rtlabel_name2id(char *name)
1441 {
1442 	struct rt_label		*label, *p;
1443 	u_int16_t		 new_id = 1;
1444 
1445 	if (!name[0])
1446 		return (0);
1447 
1448 	TAILQ_FOREACH(label, &rt_labels, rtl_entry)
1449 		if (strcmp(name, label->rtl_name) == 0) {
1450 			label->rtl_ref++;
1451 			return (label->rtl_id);
1452 		}
1453 
1454 	/*
1455 	 * to avoid fragmentation, we do a linear search from the beginning
1456 	 * and take the first free slot we find. if there is none or the list
1457 	 * is empty, append a new entry at the end.
1458 	 */
1459 	TAILQ_FOREACH(p, &rt_labels, rtl_entry) {
1460 		if (p->rtl_id != new_id)
1461 			break;
1462 		new_id = p->rtl_id + 1;
1463 	}
1464 	if (new_id > LABELID_MAX)
1465 		return (0);
1466 
1467 	label = malloc(sizeof(*label), M_RTABLE, M_NOWAIT|M_ZERO);
1468 	if (label == NULL)
1469 		return (0);
1470 	strlcpy(label->rtl_name, name, sizeof(label->rtl_name));
1471 	label->rtl_id = new_id;
1472 	label->rtl_ref++;
1473 
1474 	if (p != NULL)	/* insert new entry before p */
1475 		TAILQ_INSERT_BEFORE(p, label, rtl_entry);
1476 	else		/* either list empty or no free slot in between */
1477 		TAILQ_INSERT_TAIL(&rt_labels, label, rtl_entry);
1478 
1479 	return (label->rtl_id);
1480 }
1481 
1482 const char *
1483 rtlabel_id2name(u_int16_t id)
1484 {
1485 	struct rt_label	*label;
1486 
1487 	TAILQ_FOREACH(label, &rt_labels, rtl_entry)
1488 		if (label->rtl_id == id)
1489 			return (label->rtl_name);
1490 
1491 	return (NULL);
1492 }
1493 
1494 struct sockaddr *
1495 rtlabel_id2sa(u_int16_t labelid, struct sockaddr_rtlabel *sa_rl)
1496 {
1497 	const char	*label;
1498 
1499 	if (labelid == 0 || (label = rtlabel_id2name(labelid)) == NULL)
1500 		return (NULL);
1501 
1502 	bzero(sa_rl, sizeof(*sa_rl));
1503 	sa_rl->sr_len = sizeof(*sa_rl);
1504 	sa_rl->sr_family = AF_UNSPEC;
1505 	strlcpy(sa_rl->sr_label, label, sizeof(sa_rl->sr_label));
1506 
1507 	return ((struct sockaddr *)sa_rl);
1508 }
1509 
1510 void
1511 rtlabel_unref(u_int16_t id)
1512 {
1513 	struct rt_label	*p, *next;
1514 
1515 	if (id == 0)
1516 		return;
1517 
1518 	TAILQ_FOREACH_SAFE(p, &rt_labels, rtl_entry, next) {
1519 		if (id == p->rtl_id) {
1520 			if (--p->rtl_ref == 0) {
1521 				TAILQ_REMOVE(&rt_labels, p, rtl_entry);
1522 				free(p, M_RTABLE, sizeof(*p));
1523 			}
1524 			break;
1525 		}
1526 	}
1527 }
1528 
1529 void
1530 rt_if_track(struct ifnet *ifp)
1531 {
1532 	int i;
1533 	u_int tid;
1534 
1535 	for (tid = 0; tid < rtmap_limit; tid++) {
1536 		/* skip rtables that are not in the rdomain of the ifp */
1537 		if (rtable_l2(tid) != ifp->if_rdomain)
1538 			continue;
1539 		for (i = 1; i <= AF_MAX; i++) {
1540 			if (!rtable_mpath_capable(tid, i))
1541 				continue;
1542 
1543 			rtable_walk(tid, i, rt_if_linkstate_change, ifp);
1544 		}
1545 	}
1546 }
1547 
1548 int
1549 rt_if_linkstate_change(struct rtentry *rt, void *arg, u_int id)
1550 {
1551 	struct ifnet *ifp = arg;
1552 	struct sockaddr_in6 sa_mask;
1553 	int error;
1554 
1555 	if (rt->rt_ifidx != ifp->if_index)
1556 		return (0);
1557 
1558 	/* Local routes are always usable. */
1559 	if (rt->rt_flags & RTF_LOCAL) {
1560 		rt->rt_flags |= RTF_UP;
1561 		return (0);
1562 	}
1563 
1564 	if (LINK_STATE_IS_UP(ifp->if_link_state) && ifp->if_flags & IFF_UP) {
1565 		if (ISSET(rt->rt_flags, RTF_UP))
1566 			return (0);
1567 
1568 		/* bring route up */
1569 		rt->rt_flags |= RTF_UP;
1570 		error = rtable_mpath_reprio(id, rt_key(rt),
1571 		    rt_plen2mask(rt, &sa_mask), rt->rt_priority & RTP_MASK, rt);
1572 	} else {
1573 		/*
1574 		 * Remove redirected and cloned routes (mainly ARP)
1575 		 * from down interfaces so we have a chance to get
1576 		 * new routes from a better source.
1577 		 */
1578 		if (ISSET(rt->rt_flags, RTF_CLONED|RTF_DYNAMIC) &&
1579 		    !ISSET(rt->rt_flags, RTF_CACHED|RTF_BFD)) {
1580 			if ((error = rtdeletemsg(rt, ifp, id)))
1581 				return (error);
1582 			return (EAGAIN);
1583 		}
1584 
1585 		if (!ISSET(rt->rt_flags, RTF_UP))
1586 			return (0);
1587 
1588 		/* take route down */
1589 		rt->rt_flags &= ~RTF_UP;
1590 		error = rtable_mpath_reprio(id, rt_key(rt),
1591 		    rt_plen2mask(rt, &sa_mask), rt->rt_priority | RTP_DOWN, rt);
1592 	}
1593 	if_group_routechange(rt_key(rt), rt_plen2mask(rt, &sa_mask));
1594 
1595 	return (error);
1596 }
1597 
1598 struct sockaddr *
1599 rt_plentosa(sa_family_t af, int plen, struct sockaddr_in6 *sa_mask)
1600 {
1601 	struct sockaddr_in	*sin = (struct sockaddr_in *)sa_mask;
1602 #ifdef INET6
1603 	struct sockaddr_in6	*sin6 = (struct sockaddr_in6 *)sa_mask;
1604 #endif
1605 
1606 	KASSERT(plen >= 0 || plen == -1);
1607 
1608 	if (plen == -1)
1609 		return (NULL);
1610 
1611 	memset(sa_mask, 0, sizeof(*sa_mask));
1612 
1613 	switch (af) {
1614 	case AF_INET:
1615 		sin->sin_family = AF_INET;
1616 		sin->sin_len = sizeof(struct sockaddr_in);
1617 		in_prefixlen2mask(&sin->sin_addr, plen);
1618 		break;
1619 #ifdef INET6
1620 	case AF_INET6:
1621 		sin6->sin6_family = AF_INET6;
1622 		sin6->sin6_len = sizeof(struct sockaddr_in6);
1623 		in6_prefixlen2mask(&sin6->sin6_addr, plen);
1624 		break;
1625 #endif /* INET6 */
1626 	default:
1627 		return (NULL);
1628 	}
1629 
1630 	return ((struct sockaddr *)sa_mask);
1631 }
1632 
1633 struct sockaddr *
1634 rt_plen2mask(struct rtentry *rt, struct sockaddr_in6 *sa_mask)
1635 {
1636 	return (rt_plentosa(rt_key(rt)->sa_family, rt_plen(rt), sa_mask));
1637 }
1638 
1639 #ifdef DDB
1640 #include <machine/db_machdep.h>
1641 #include <ddb/db_output.h>
1642 
1643 void
1644 db_print_sa(struct sockaddr *sa)
1645 {
1646 	int len;
1647 	u_char *p;
1648 
1649 	if (sa == NULL) {
1650 		db_printf("[NULL]");
1651 		return;
1652 	}
1653 
1654 	p = (u_char *)sa;
1655 	len = sa->sa_len;
1656 	db_printf("[");
1657 	while (len > 0) {
1658 		db_printf("%d", *p);
1659 		p++;
1660 		len--;
1661 		if (len)
1662 			db_printf(",");
1663 	}
1664 	db_printf("]\n");
1665 }
1666 
1667 void
1668 db_print_ifa(struct ifaddr *ifa)
1669 {
1670 	if (ifa == NULL)
1671 		return;
1672 	db_printf("  ifa_addr=");
1673 	db_print_sa(ifa->ifa_addr);
1674 	db_printf("  ifa_dsta=");
1675 	db_print_sa(ifa->ifa_dstaddr);
1676 	db_printf("  ifa_mask=");
1677 	db_print_sa(ifa->ifa_netmask);
1678 	db_printf("  flags=0x%x, refcnt=%d, metric=%d\n",
1679 	    ifa->ifa_flags, ifa->ifa_refcnt, ifa->ifa_metric);
1680 }
1681 
1682 /*
1683  * Function to pass to rtalble_walk().
1684  * Return non-zero error to abort walk.
1685  */
1686 int
1687 db_show_rtentry(struct rtentry *rt, void *w, unsigned int id)
1688 {
1689 	db_printf("rtentry=%p", rt);
1690 
1691 	db_printf(" flags=0x%x refcnt=%d use=%llu expire=%lld rtableid=%u\n",
1692 	    rt->rt_flags, rt->rt_refcnt, rt->rt_use, rt->rt_expire, id);
1693 
1694 	db_printf(" key="); db_print_sa(rt_key(rt));
1695 	db_printf(" plen=%d", rt_plen(rt));
1696 	db_printf(" gw="); db_print_sa(rt->rt_gateway);
1697 	db_printf(" ifidx=%u ", rt->rt_ifidx);
1698 	db_printf(" ifa=%p\n", rt->rt_ifa);
1699 	db_print_ifa(rt->rt_ifa);
1700 
1701 	db_printf(" gwroute=%p llinfo=%p\n", rt->rt_gwroute, rt->rt_llinfo);
1702 	return (0);
1703 }
1704 
1705 /*
1706  * Function to print all the route trees.
1707  * Use this from ddb:  "call db_show_arptab"
1708  */
1709 int
1710 db_show_arptab(void)
1711 {
1712 	db_printf("Route tree for AF_INET\n");
1713 	rtable_walk(0, AF_INET, db_show_rtentry, NULL);
1714 	return (0);
1715 }
1716 #endif /* DDB */
1717