xref: /dflybsd-src/sys/net/route.c (revision 8bb2400d5e4f21e6ed6fe870515c2d9a1cdfb5c7)
1 /*
2  * Copyright (c) 2004, 2005 The DragonFly Project.  All rights reserved.
3  *
4  * This code is derived from software contributed to The DragonFly Project
5  * by Jeffrey M. Hsu.
6  *
7  * Redistribution and use in source and binary forms, with or without
8  * modification, are permitted provided that the following conditions
9  * are met:
10  * 1. Redistributions of source code must retain the above copyright
11  *    notice, this list of conditions and the following disclaimer.
12  * 2. Redistributions in binary form must reproduce the above copyright
13  *    notice, this list of conditions and the following disclaimer in the
14  *    documentation and/or other materials provided with the distribution.
15  * 3. Neither the name of The DragonFly Project nor the names of its
16  *    contributors may be used to endorse or promote products derived
17  *    from this software without specific, prior written permission.
18  *
19  * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
20  * ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
21  * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS
22  * FOR A PARTICULAR PURPOSE ARE DISCLAIMED.  IN NO EVENT SHALL THE
23  * COPYRIGHT HOLDERS OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT,
24  * INCIDENTAL, SPECIAL, EXEMPLARY OR CONSEQUENTIAL DAMAGES (INCLUDING,
25  * BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
26  * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED
27  * AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
28  * OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT
29  * OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
30  * SUCH DAMAGE.
31  */
32 
33 /*
34  * Copyright (c) 1980, 1986, 1991, 1993
35  *	The Regents of the University of California.  All rights reserved.
36  *
37  * Redistribution and use in source and binary forms, with or without
38  * modification, are permitted provided that the following conditions
39  * are met:
40  * 1. Redistributions of source code must retain the above copyright
41  *    notice, this list of conditions and the following disclaimer.
42  * 2. Redistributions in binary form must reproduce the above copyright
43  *    notice, this list of conditions and the following disclaimer in the
44  *    documentation and/or other materials provided with the distribution.
45  * 3. All advertising materials mentioning features or use of this software
46  *    must display the following acknowledgement:
47  *	This product includes software developed by the University of
48  *	California, Berkeley and its contributors.
49  * 4. Neither the name of the University nor the names of its contributors
50  *    may be used to endorse or promote products derived from this software
51  *    without specific prior written permission.
52  *
53  * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND
54  * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
55  * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
56  * ARE DISCLAIMED.  IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE
57  * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
58  * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
59  * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
60  * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
61  * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
62  * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
63  * SUCH DAMAGE.
64  *
65  *	@(#)route.c	8.3 (Berkeley) 1/9/95
66  * $FreeBSD: src/sys/net/route.c,v 1.59.2.10 2003/01/17 08:04:00 ru Exp $
67  */
68 
69 #include "opt_inet.h"
70 #include "opt_mpls.h"
71 
72 #include <sys/param.h>
73 #include <sys/systm.h>
74 #include <sys/malloc.h>
75 #include <sys/mbuf.h>
76 #include <sys/socket.h>
77 #include <sys/domain.h>
78 #include <sys/kernel.h>
79 #include <sys/sysctl.h>
80 #include <sys/globaldata.h>
81 #include <sys/thread.h>
82 
83 #include <net/if.h>
84 #include <net/route.h>
85 #include <net/netisr.h>
86 
87 #include <netinet/in.h>
88 #include <net/ip_mroute/ip_mroute.h>
89 
90 #include <sys/thread2.h>
91 #include <sys/msgport2.h>
92 #include <net/netmsg2.h>
93 
94 #ifdef MPLS
95 #include <netproto/mpls/mpls.h>
96 #endif
97 
98 static struct rtstatistics rtstatistics_percpu[MAXCPU];
99 #ifdef SMP
100 #define rtstat	rtstatistics_percpu[mycpuid]
101 #else
102 #define rtstat	rtstatistics_percpu[0]
103 #endif
104 
105 struct radix_node_head *rt_tables[MAXCPU][AF_MAX+1];
106 struct lwkt_port *rt_ports[MAXCPU];
107 
108 static void	rt_maskedcopy (struct sockaddr *, struct sockaddr *,
109 			       struct sockaddr *);
110 static void rtable_init(void);
111 static void rtable_service_loop(void *dummy);
112 static void rtinit_rtrequest_callback(int, int, struct rt_addrinfo *,
113 				      struct rtentry *, void *);
114 
115 #ifdef SMP
116 static void rtredirect_msghandler(netmsg_t msg);
117 static void rtrequest1_msghandler(netmsg_t msg);
118 #endif
119 static void rtsearch_msghandler(netmsg_t msg);
120 static void rtmask_add_msghandler(netmsg_t msg);
121 
122 static int rt_setshims(struct rtentry *, struct sockaddr **);
123 
124 SYSCTL_NODE(_net, OID_AUTO, route, CTLFLAG_RW, 0, "Routing");
125 
126 #ifdef ROUTE_DEBUG
127 static int route_debug = 1;
128 SYSCTL_INT(_net_route, OID_AUTO, route_debug, CTLFLAG_RW,
129            &route_debug, 0, "");
130 #endif
131 
132 int route_assert_owner_access = 1;
133 SYSCTL_INT(_net_route, OID_AUTO, assert_owner_access, CTLFLAG_RW,
134            &route_assert_owner_access, 0, "");
135 
136 u_long route_kmalloc_limit = 0;
137 TUNABLE_ULONG("net.route.kmalloc_limit", &route_kmalloc_limit);
138 
139 /*
140  * Initialize the route table(s) for protocol domains and
141  * create a helper thread which will be responsible for updating
142  * route table entries on each cpu.
143  */
144 void
145 route_init(void)
146 {
147 	int cpu;
148 	thread_t rtd;
149 
150 	for (cpu = 0; cpu < ncpus; ++cpu)
151 		bzero(&rtstatistics_percpu[cpu], sizeof(struct rtstatistics));
152 	rn_init();      /* initialize all zeroes, all ones, mask table */
153 	rtable_init();	/* call dom_rtattach() on each cpu */
154 
155 	for (cpu = 0; cpu < ncpus; cpu++) {
156 		lwkt_create(rtable_service_loop, NULL, &rtd, NULL,
157 			    0, cpu, "rtable_cpu %d", cpu);
158 		rt_ports[cpu] = &rtd->td_msgport;
159 	}
160 
161 	if (route_kmalloc_limit)
162 		kmalloc_raise_limit(M_RTABLE, route_kmalloc_limit);
163 }
164 
165 static void
166 rtable_init_oncpu(netmsg_t msg)
167 {
168 	struct domain *dom;
169 	int cpu = mycpuid;
170 
171 	SLIST_FOREACH(dom, &domains, dom_next) {
172 		if (dom->dom_rtattach) {
173 			dom->dom_rtattach(
174 				(void **)&rt_tables[cpu][dom->dom_family],
175 			        dom->dom_rtoffset);
176 		}
177 	}
178 	ifnet_forwardmsg(&msg->lmsg, cpu + 1);
179 }
180 
181 static void
182 rtable_init(void)
183 {
184 	struct netmsg_base msg;
185 
186 	netmsg_init(&msg, NULL, &curthread->td_msgport, 0, rtable_init_oncpu);
187 	ifnet_domsg(&msg.lmsg, 0);
188 }
189 
190 /*
191  * Our per-cpu table management protocol thread.  All route table operations
192  * are sequentially chained through all cpus starting at cpu #0 in order to
193  * maintain duplicate route tables on each cpu.  Having a spearate route
194  * table management thread allows the protocol and interrupt threads to
195  * issue route table changes.
196  */
197 static void
198 rtable_service_loop(void *dummy __unused)
199 {
200 	netmsg_base_t msg;
201 	thread_t td = curthread;
202 
203 	while ((msg = lwkt_waitport(&td->td_msgport, 0)) != NULL) {
204 		msg->nm_dispatch((netmsg_t)msg);
205 	}
206 }
207 
208 /*
209  * Routing statistics.
210  */
211 #ifdef SMP
212 static int
213 sysctl_rtstatistics(SYSCTL_HANDLER_ARGS)
214 {
215 	int cpu, error = 0;
216 
217 	for (cpu = 0; cpu < ncpus; ++cpu) {
218 		if ((error = SYSCTL_OUT(req, &rtstatistics_percpu[cpu],
219 					sizeof(struct rtstatistics))))
220 				break;
221 		if ((error = SYSCTL_IN(req, &rtstatistics_percpu[cpu],
222 					sizeof(struct rtstatistics))))
223 				break;
224 	}
225 
226 	return (error);
227 }
228 SYSCTL_PROC(_net_route, OID_AUTO, stats, (CTLTYPE_OPAQUE|CTLFLAG_RW),
229 	0, 0, sysctl_rtstatistics, "S,rtstatistics", "Routing statistics");
230 #else
231 SYSCTL_STRUCT(_net_route, OID_AUTO, stats, CTLFLAG_RW, &rtstat, rtstatistics,
232 "Routing statistics");
233 #endif
234 
235 /*
236  * Packet routing routines.
237  */
238 
239 /*
240  * Look up and fill in the "ro_rt" rtentry field in a route structure given
241  * an address in the "ro_dst" field.  Always send a report on a miss and
242  * always clone routes.
243  */
244 void
245 rtalloc(struct route *ro)
246 {
247 	rtalloc_ign(ro, 0UL);
248 }
249 
250 /*
251  * Look up and fill in the "ro_rt" rtentry field in a route structure given
252  * an address in the "ro_dst" field.  Always send a report on a miss and
253  * optionally clone routes when RTF_CLONING or RTF_PRCLONING are not being
254  * ignored.
255  */
256 void
257 rtalloc_ign(struct route *ro, u_long ignoreflags)
258 {
259 	if (ro->ro_rt != NULL) {
260 		if (ro->ro_rt->rt_ifp != NULL && ro->ro_rt->rt_flags & RTF_UP)
261 			return;
262 		rtfree(ro->ro_rt);
263 		ro->ro_rt = NULL;
264 	}
265 	ro->ro_rt = _rtlookup(&ro->ro_dst, RTL_REPORTMSG, ignoreflags);
266 }
267 
268 /*
269  * Look up the route that matches the given "dst" address.
270  *
271  * Route lookup can have the side-effect of creating and returning
272  * a cloned route instead when "dst" matches a cloning route and the
273  * RTF_CLONING and RTF_PRCLONING flags are not being ignored.
274  *
275  * Any route returned has its reference count incremented.
276  */
277 struct rtentry *
278 _rtlookup(struct sockaddr *dst, boolean_t generate_report, u_long ignore)
279 {
280 	struct radix_node_head *rnh = rt_tables[mycpuid][dst->sa_family];
281 	struct rtentry *rt;
282 
283 	if (rnh == NULL)
284 		goto unreach;
285 
286 	/*
287 	 * Look up route in the radix tree.
288 	 */
289 	rt = (struct rtentry *) rnh->rnh_matchaddr((char *)dst, rnh);
290 	if (rt == NULL)
291 		goto unreach;
292 
293 	/*
294 	 * Handle cloning routes.
295 	 */
296 	if ((rt->rt_flags & ~ignore & (RTF_CLONING | RTF_PRCLONING)) != 0) {
297 		struct rtentry *clonedroute;
298 		int error;
299 
300 		clonedroute = rt;	/* copy in/copy out parameter */
301 		error = rtrequest(RTM_RESOLVE, dst, NULL, NULL, 0,
302 				  &clonedroute);	/* clone the route */
303 		if (error != 0) {	/* cloning failed */
304 			if (generate_report)
305 				rt_dstmsg(RTM_MISS, dst, error);
306 			rt->rt_refcnt++;
307 			return (rt);	/* return the uncloned route */
308 		}
309 		if (generate_report) {
310 			if (clonedroute->rt_flags & RTF_XRESOLVE)
311 				rt_dstmsg(RTM_RESOLVE, dst, 0);
312 			else
313 				rt_rtmsg(RTM_ADD, clonedroute,
314 					 clonedroute->rt_ifp, 0);
315 		}
316 		return (clonedroute);	/* return cloned route */
317 	}
318 
319 	/*
320 	 * Increment the reference count of the matched route and return.
321 	 */
322 	rt->rt_refcnt++;
323 	return (rt);
324 
325 unreach:
326 	rtstat.rts_unreach++;
327 	if (generate_report)
328 		rt_dstmsg(RTM_MISS, dst, 0);
329 	return (NULL);
330 }
331 
332 void
333 rtfree(struct rtentry *rt)
334 {
335 	if (rt->rt_cpuid == mycpuid)
336 		rtfree_oncpu(rt);
337 	else
338 		rtfree_remote(rt);
339 }
340 
341 void
342 rtfree_oncpu(struct rtentry *rt)
343 {
344 	KKASSERT(rt->rt_cpuid == mycpuid);
345 	KASSERT(rt->rt_refcnt > 0, ("rtfree: rt_refcnt %ld", rt->rt_refcnt));
346 
347 	--rt->rt_refcnt;
348 	if (rt->rt_refcnt == 0) {
349 		struct radix_node_head *rnh =
350 		    rt_tables[mycpuid][rt_key(rt)->sa_family];
351 
352 		if (rnh->rnh_close)
353 			rnh->rnh_close((struct radix_node *)rt, rnh);
354 		if (!(rt->rt_flags & RTF_UP)) {
355 			/* deallocate route */
356 			if (rt->rt_ifa != NULL)
357 				IFAFREE(rt->rt_ifa);
358 			if (rt->rt_parent != NULL)
359 				RTFREE(rt->rt_parent);	/* recursive call! */
360 			Free(rt_key(rt));
361 			Free(rt);
362 		}
363 	}
364 }
365 
366 static void
367 rtfree_remote_dispatch(netmsg_t msg)
368 {
369 	struct lwkt_msg *lmsg = &msg->lmsg;
370 	struct rtentry *rt = lmsg->u.ms_resultp;
371 
372 	rtfree_oncpu(rt);
373 	lwkt_replymsg(lmsg, 0);
374 }
375 
376 void
377 rtfree_remote(struct rtentry *rt)
378 {
379 	struct netmsg_base msg;
380 	struct lwkt_msg *lmsg;
381 
382 	KKASSERT(rt->rt_cpuid != mycpuid);
383 
384 	if (route_assert_owner_access) {
385 		panic("rt remote free rt_cpuid %d, mycpuid %d",
386 		      rt->rt_cpuid, mycpuid);
387 	} else {
388 		kprintf("rt remote free rt_cpuid %d, mycpuid %d\n",
389 			rt->rt_cpuid, mycpuid);
390 		print_backtrace(-1);
391 	}
392 
393 	netmsg_init(&msg, NULL, &curthread->td_msgport,
394 		    0, rtfree_remote_dispatch);
395 	lmsg = &msg.lmsg;
396 	lmsg->u.ms_resultp = rt;
397 
398 	lwkt_domsg(rtable_portfn(rt->rt_cpuid), lmsg, 0);
399 }
400 
401 static int
402 rtredirect_oncpu(struct sockaddr *dst, struct sockaddr *gateway,
403 		 struct sockaddr *netmask, int flags, struct sockaddr *src)
404 {
405 	struct rtentry *rt = NULL;
406 	struct rt_addrinfo rtinfo;
407 	struct ifaddr *ifa;
408 	u_long *stat = NULL;
409 	int error;
410 
411 	/* verify the gateway is directly reachable */
412 	if ((ifa = ifa_ifwithnet(gateway)) == NULL) {
413 		error = ENETUNREACH;
414 		goto out;
415 	}
416 
417 	/*
418 	 * If the redirect isn't from our current router for this destination,
419 	 * it's either old or wrong.
420 	 */
421 	if (!(flags & RTF_DONE) &&		/* XXX JH */
422 	    (rt = rtpurelookup(dst)) != NULL &&
423 	    (!sa_equal(src, rt->rt_gateway) || rt->rt_ifa != ifa)) {
424 		error = EINVAL;
425 		goto done;
426 	}
427 
428 	/*
429 	 * If it redirects us to ourselves, we have a routing loop,
430 	 * perhaps as a result of an interface going down recently.
431 	 */
432 	if (ifa_ifwithaddr(gateway)) {
433 		error = EHOSTUNREACH;
434 		goto done;
435 	}
436 
437 	/*
438 	 * Create a new entry if the lookup failed or if we got back
439 	 * a wildcard entry for the default route.  This is necessary
440 	 * for hosts which use routing redirects generated by smart
441 	 * gateways to dynamically build the routing tables.
442 	 */
443 	if (rt == NULL)
444 		goto create;
445 	if ((rt_mask(rt) != NULL && rt_mask(rt)->sa_len < 2)) {
446 		rtfree(rt);
447 		goto create;
448 	}
449 
450 	/* Ignore redirects for directly connected hosts. */
451 	if (!(rt->rt_flags & RTF_GATEWAY)) {
452 		error = EHOSTUNREACH;
453 		goto done;
454 	}
455 
456 	if (!(rt->rt_flags & RTF_HOST) && (flags & RTF_HOST)) {
457 		/*
458 		 * Changing from a network route to a host route.
459 		 * Create a new host route rather than smashing the
460 		 * network route.
461 		 */
462 create:
463 		flags |=  RTF_GATEWAY | RTF_DYNAMIC;
464 		bzero(&rtinfo, sizeof(struct rt_addrinfo));
465 		rtinfo.rti_info[RTAX_DST] = dst;
466 		rtinfo.rti_info[RTAX_GATEWAY] = gateway;
467 		rtinfo.rti_info[RTAX_NETMASK] = netmask;
468 		rtinfo.rti_flags = flags;
469 		rtinfo.rti_ifa = ifa;
470 		rt = NULL;	/* copy-in/copy-out parameter */
471 		error = rtrequest1(RTM_ADD, &rtinfo, &rt);
472 		if (rt != NULL)
473 			flags = rt->rt_flags;
474 		stat = &rtstat.rts_dynamic;
475 	} else {
476 		/*
477 		 * Smash the current notion of the gateway to this destination.
478 		 * Should check about netmask!!!
479 		 */
480 		rt->rt_flags |= RTF_MODIFIED;
481 		flags |= RTF_MODIFIED;
482 
483 		/* We only need to report rtmsg on CPU0 */
484 		rt_setgate(rt, rt_key(rt), gateway,
485 			   mycpuid == 0 ? RTL_REPORTMSG : RTL_DONTREPORT);
486 		error = 0;
487 		stat = &rtstat.rts_newgateway;
488 	}
489 
490 done:
491 	if (rt != NULL)
492 		rtfree(rt);
493 out:
494 	if (error != 0)
495 		rtstat.rts_badredirect++;
496 	else if (stat != NULL)
497 		(*stat)++;
498 
499 	return error;
500 }
501 
502 #ifdef SMP
503 
504 struct netmsg_rtredirect {
505 	struct netmsg_base base;
506 	struct sockaddr *dst;
507 	struct sockaddr *gateway;
508 	struct sockaddr *netmask;
509 	int		flags;
510 	struct sockaddr *src;
511 };
512 
513 #endif
514 
515 /*
516  * Force a routing table entry to the specified
517  * destination to go through the given gateway.
518  * Normally called as a result of a routing redirect
519  * message from the network layer.
520  *
521  * N.B.: must be called at splnet
522  */
523 void
524 rtredirect(struct sockaddr *dst, struct sockaddr *gateway,
525 	   struct sockaddr *netmask, int flags, struct sockaddr *src)
526 {
527 	struct rt_addrinfo rtinfo;
528 	int error;
529 #ifdef SMP
530 	struct netmsg_rtredirect msg;
531 
532 	netmsg_init(&msg.base, NULL, &curthread->td_msgport,
533 		    0, rtredirect_msghandler);
534 	msg.dst = dst;
535 	msg.gateway = gateway;
536 	msg.netmask = netmask;
537 	msg.flags = flags;
538 	msg.src = src;
539 	error = lwkt_domsg(rtable_portfn(0), &msg.base.lmsg, 0);
540 #else
541 	error = rtredirect_oncpu(dst, gateway, netmask, flags, src);
542 #endif
543 	bzero(&rtinfo, sizeof(struct rt_addrinfo));
544 	rtinfo.rti_info[RTAX_DST] = dst;
545 	rtinfo.rti_info[RTAX_GATEWAY] = gateway;
546 	rtinfo.rti_info[RTAX_NETMASK] = netmask;
547 	rtinfo.rti_info[RTAX_AUTHOR] = src;
548 	rt_missmsg(RTM_REDIRECT, &rtinfo, flags, error);
549 }
550 
551 #ifdef SMP
552 
553 static void
554 rtredirect_msghandler(netmsg_t msg)
555 {
556 	struct netmsg_rtredirect *rmsg = (void *)msg;
557 	int nextcpu;
558 
559 	rtredirect_oncpu(rmsg->dst, rmsg->gateway, rmsg->netmask,
560 			 rmsg->flags, rmsg->src);
561 	nextcpu = mycpuid + 1;
562 	if (nextcpu < ncpus)
563 		lwkt_forwardmsg(rtable_portfn(nextcpu), &msg->lmsg);
564 	else
565 		lwkt_replymsg(&msg->lmsg, 0);
566 }
567 
568 #endif
569 
570 /*
571 * Routing table ioctl interface.
572 */
573 int
574 rtioctl(u_long req, caddr_t data, struct ucred *cred)
575 {
576 #ifdef INET
577 	/* Multicast goop, grrr... */
578 	return mrt_ioctl ? mrt_ioctl(req, data) : EOPNOTSUPP;
579 #else
580 	return ENXIO;
581 #endif
582 }
583 
584 struct ifaddr *
585 ifa_ifwithroute(int flags, struct sockaddr *dst, struct sockaddr *gateway)
586 {
587 	struct ifaddr *ifa;
588 
589 	if (!(flags & RTF_GATEWAY)) {
590 		/*
591 		 * If we are adding a route to an interface,
592 		 * and the interface is a point-to-point link,
593 		 * we should search for the destination
594 		 * as our clue to the interface.  Otherwise
595 		 * we can use the local address.
596 		 */
597 		ifa = NULL;
598 		if (flags & RTF_HOST) {
599 			ifa = ifa_ifwithdstaddr(dst);
600 		}
601 		if (ifa == NULL)
602 			ifa = ifa_ifwithaddr(gateway);
603 	} else {
604 		/*
605 		 * If we are adding a route to a remote net
606 		 * or host, the gateway may still be on the
607 		 * other end of a pt to pt link.
608 		 */
609 		ifa = ifa_ifwithdstaddr(gateway);
610 	}
611 	if (ifa == NULL)
612 		ifa = ifa_ifwithnet(gateway);
613 	if (ifa == NULL) {
614 		struct rtentry *rt;
615 
616 		rt = rtpurelookup(gateway);
617 		if (rt == NULL)
618 			return (NULL);
619 		rt->rt_refcnt--;
620 		if ((ifa = rt->rt_ifa) == NULL)
621 			return (NULL);
622 	}
623 	if (ifa->ifa_addr->sa_family != dst->sa_family) {
624 		struct ifaddr *oldifa = ifa;
625 
626 		ifa = ifaof_ifpforaddr(dst, ifa->ifa_ifp);
627 		if (ifa == NULL)
628 			ifa = oldifa;
629 	}
630 	return (ifa);
631 }
632 
633 static int rt_fixdelete (struct radix_node *, void *);
634 static int rt_fixchange (struct radix_node *, void *);
635 
636 struct rtfc_arg {
637 	struct rtentry *rt0;
638 	struct radix_node_head *rnh;
639 };
640 
641 /*
642  * Set rtinfo->rti_ifa and rtinfo->rti_ifp.
643  */
644 int
645 rt_getifa(struct rt_addrinfo *rtinfo)
646 {
647 	struct sockaddr *gateway = rtinfo->rti_info[RTAX_GATEWAY];
648 	struct sockaddr *dst = rtinfo->rti_info[RTAX_DST];
649 	struct sockaddr *ifaaddr = rtinfo->rti_info[RTAX_IFA];
650 	int flags = rtinfo->rti_flags;
651 
652 	/*
653 	 * ifp may be specified by sockaddr_dl
654 	 * when protocol address is ambiguous.
655 	 */
656 	if (rtinfo->rti_ifp == NULL) {
657 		struct sockaddr *ifpaddr;
658 
659 		ifpaddr = rtinfo->rti_info[RTAX_IFP];
660 		if (ifpaddr != NULL && ifpaddr->sa_family == AF_LINK) {
661 			struct ifaddr *ifa;
662 
663 			ifa = ifa_ifwithnet(ifpaddr);
664 			if (ifa != NULL)
665 				rtinfo->rti_ifp = ifa->ifa_ifp;
666 		}
667 	}
668 
669 	if (rtinfo->rti_ifa == NULL && ifaaddr != NULL)
670 		rtinfo->rti_ifa = ifa_ifwithaddr(ifaaddr);
671 	if (rtinfo->rti_ifa == NULL) {
672 		struct sockaddr *sa;
673 
674 		sa = ifaaddr != NULL ? ifaaddr :
675 		    (gateway != NULL ? gateway : dst);
676 		if (sa != NULL && rtinfo->rti_ifp != NULL)
677 			rtinfo->rti_ifa = ifaof_ifpforaddr(sa, rtinfo->rti_ifp);
678 		else if (dst != NULL && gateway != NULL)
679 			rtinfo->rti_ifa = ifa_ifwithroute(flags, dst, gateway);
680 		else if (sa != NULL)
681 			rtinfo->rti_ifa = ifa_ifwithroute(flags, sa, sa);
682 	}
683 	if (rtinfo->rti_ifa == NULL)
684 		return (ENETUNREACH);
685 
686 	if (rtinfo->rti_ifp == NULL)
687 		rtinfo->rti_ifp = rtinfo->rti_ifa->ifa_ifp;
688 	return (0);
689 }
690 
691 /*
692  * Do appropriate manipulations of a routing tree given
693  * all the bits of info needed
694  */
695 int
696 rtrequest(
697 	int req,
698 	struct sockaddr *dst,
699 	struct sockaddr *gateway,
700 	struct sockaddr *netmask,
701 	int flags,
702 	struct rtentry **ret_nrt)
703 {
704 	struct rt_addrinfo rtinfo;
705 
706 	bzero(&rtinfo, sizeof(struct rt_addrinfo));
707 	rtinfo.rti_info[RTAX_DST] = dst;
708 	rtinfo.rti_info[RTAX_GATEWAY] = gateway;
709 	rtinfo.rti_info[RTAX_NETMASK] = netmask;
710 	rtinfo.rti_flags = flags;
711 	return rtrequest1(req, &rtinfo, ret_nrt);
712 }
713 
714 int
715 rtrequest_global(
716 	int req,
717 	struct sockaddr *dst,
718 	struct sockaddr *gateway,
719 	struct sockaddr *netmask,
720 	int flags)
721 {
722 	struct rt_addrinfo rtinfo;
723 
724 	bzero(&rtinfo, sizeof(struct rt_addrinfo));
725 	rtinfo.rti_info[RTAX_DST] = dst;
726 	rtinfo.rti_info[RTAX_GATEWAY] = gateway;
727 	rtinfo.rti_info[RTAX_NETMASK] = netmask;
728 	rtinfo.rti_flags = flags;
729 	return rtrequest1_global(req, &rtinfo, NULL, NULL);
730 }
731 
732 #ifdef SMP
733 
734 struct netmsg_rtq {
735 	struct netmsg_base	base;
736 	int			req;
737 	struct rt_addrinfo	*rtinfo;
738 	rtrequest1_callback_func_t callback;
739 	void			*arg;
740 };
741 
742 #endif
743 
744 int
745 rtrequest1_global(int req, struct rt_addrinfo *rtinfo,
746 		  rtrequest1_callback_func_t callback, void *arg)
747 {
748 	int error;
749 #ifdef SMP
750 	struct netmsg_rtq msg;
751 
752 	netmsg_init(&msg.base, NULL, &curthread->td_msgport,
753 		    0, rtrequest1_msghandler);
754 	msg.base.lmsg.ms_error = -1;
755 	msg.req = req;
756 	msg.rtinfo = rtinfo;
757 	msg.callback = callback;
758 	msg.arg = arg;
759 	error = lwkt_domsg(rtable_portfn(0), &msg.base.lmsg, 0);
760 #else
761 	struct rtentry *rt = NULL;
762 
763 	error = rtrequest1(req, rtinfo, &rt);
764 	if (rt)
765 		--rt->rt_refcnt;
766 	if (callback)
767 		callback(req, error, rtinfo, rt, arg);
768 #endif
769 	return (error);
770 }
771 
772 /*
773  * Handle a route table request on the current cpu.  Since the route table's
774  * are supposed to be identical on each cpu, an error occuring later in the
775  * message chain is considered system-fatal.
776  */
777 #ifdef SMP
778 
779 static void
780 rtrequest1_msghandler(netmsg_t msg)
781 {
782 	struct netmsg_rtq *rmsg = (void *)msg;
783 	struct rt_addrinfo rtinfo;
784 	struct rtentry *rt = NULL;
785 	int nextcpu;
786 	int error;
787 
788 	/*
789 	 * Copy the rtinfo.  We need to make sure that the original
790 	 * rtinfo, which is setup by the caller, in the netmsg will
791 	 * _not_ be changed; else the next CPU on the netmsg forwarding
792 	 * path will see a different rtinfo than what this CPU has seen.
793 	 */
794 	rtinfo = *rmsg->rtinfo;
795 
796 	error = rtrequest1(rmsg->req, &rtinfo, &rt);
797 	if (rt)
798 		--rt->rt_refcnt;
799 	if (rmsg->callback)
800 		rmsg->callback(rmsg->req, error, &rtinfo, rt, rmsg->arg);
801 
802 	/*
803 	 * RTM_DELETE's are propogated even if an error occurs, since a
804 	 * cloned route might be undergoing deletion and cloned routes
805 	 * are not necessarily replicated.  An overall error is returned
806 	 * only if no cpus have the route in question.
807 	 */
808 	if (rmsg->base.lmsg.ms_error < 0 || error == 0)
809 		rmsg->base.lmsg.ms_error = error;
810 
811 	nextcpu = mycpuid + 1;
812 	if (error && rmsg->req != RTM_DELETE) {
813 		if (mycpuid != 0) {
814 			panic("rtrequest1_msghandler: rtrequest table "
815 			      "error was cpu%d, err %d\n", mycpuid, error);
816 		}
817 		lwkt_replymsg(&rmsg->base.lmsg, error);
818 	} else if (nextcpu < ncpus) {
819 		lwkt_forwardmsg(rtable_portfn(nextcpu), &rmsg->base.lmsg);
820 	} else {
821 		lwkt_replymsg(&rmsg->base.lmsg, rmsg->base.lmsg.ms_error);
822 	}
823 }
824 
825 #endif
826 
827 int
828 rtrequest1(int req, struct rt_addrinfo *rtinfo, struct rtentry **ret_nrt)
829 {
830 	struct sockaddr *dst = rtinfo->rti_info[RTAX_DST];
831 	struct rtentry *rt;
832 	struct radix_node *rn;
833 	struct radix_node_head *rnh;
834 	struct ifaddr *ifa;
835 	struct sockaddr *ndst;
836 	boolean_t reportmsg;
837 	int error = 0;
838 
839 #define gotoerr(x) { error = x ; goto bad; }
840 
841 #ifdef ROUTE_DEBUG
842 	if (route_debug)
843 		rt_addrinfo_print(req, rtinfo);
844 #endif
845 
846 	crit_enter();
847 	/*
848 	 * Find the correct routing tree to use for this Address Family
849 	 */
850 	if ((rnh = rt_tables[mycpuid][dst->sa_family]) == NULL)
851 		gotoerr(EAFNOSUPPORT);
852 
853 	/*
854 	 * If we are adding a host route then we don't want to put
855 	 * a netmask in the tree, nor do we want to clone it.
856 	 */
857 	if (rtinfo->rti_flags & RTF_HOST) {
858 		rtinfo->rti_info[RTAX_NETMASK] = NULL;
859 		rtinfo->rti_flags &= ~(RTF_CLONING | RTF_PRCLONING);
860 	}
861 
862 	switch (req) {
863 	case RTM_DELETE:
864 		/* Remove the item from the tree. */
865 		rn = rnh->rnh_deladdr((char *)rtinfo->rti_info[RTAX_DST],
866 				      (char *)rtinfo->rti_info[RTAX_NETMASK],
867 				      rnh);
868 		if (rn == NULL)
869 			gotoerr(ESRCH);
870 		KASSERT(!(rn->rn_flags & (RNF_ACTIVE | RNF_ROOT)),
871 			("rnh_deladdr returned flags 0x%x", rn->rn_flags));
872 		rt = (struct rtentry *)rn;
873 
874 		/* ref to prevent a deletion race */
875 		++rt->rt_refcnt;
876 
877 		/* Free any routes cloned from this one. */
878 		if ((rt->rt_flags & (RTF_CLONING | RTF_PRCLONING)) &&
879 		    rt_mask(rt) != NULL) {
880 			rnh->rnh_walktree_from(rnh, (char *)rt_key(rt),
881 					       (char *)rt_mask(rt),
882 					       rt_fixdelete, rt);
883 		}
884 
885 		if (rt->rt_gwroute != NULL) {
886 			RTFREE(rt->rt_gwroute);
887 			rt->rt_gwroute = NULL;
888 		}
889 
890 		/*
891 		 * NB: RTF_UP must be set during the search above,
892 		 * because we might delete the last ref, causing
893 		 * rt to get freed prematurely.
894 		 */
895 		rt->rt_flags &= ~RTF_UP;
896 
897 #ifdef ROUTE_DEBUG
898 		if (route_debug)
899 			rt_print(rtinfo, rt);
900 #endif
901 
902 		/* Give the protocol a chance to keep things in sync. */
903 		if ((ifa = rt->rt_ifa) && ifa->ifa_rtrequest)
904 			ifa->ifa_rtrequest(RTM_DELETE, rt, rtinfo);
905 
906 		/*
907 		 * If the caller wants it, then it can have it,
908 		 * but it's up to it to free the rtentry as we won't be
909 		 * doing it.
910 		 */
911 		KASSERT(rt->rt_refcnt >= 0,
912 			("rtrequest1(DELETE): refcnt %ld", rt->rt_refcnt));
913 		if (ret_nrt != NULL) {
914 			/* leave ref intact for return */
915 			*ret_nrt = rt;
916 		} else {
917 			/* deref / attempt to destroy */
918 			rtfree(rt);
919 		}
920 		break;
921 
922 	case RTM_RESOLVE:
923 		if (ret_nrt == NULL || (rt = *ret_nrt) == NULL)
924 			gotoerr(EINVAL);
925 		ifa = rt->rt_ifa;
926 		rtinfo->rti_flags =
927 		    rt->rt_flags & ~(RTF_CLONING | RTF_PRCLONING | RTF_STATIC);
928 		rtinfo->rti_flags |= RTF_WASCLONED;
929 		rtinfo->rti_info[RTAX_GATEWAY] = rt->rt_gateway;
930 		if ((rtinfo->rti_info[RTAX_NETMASK] = rt->rt_genmask) == NULL)
931 			rtinfo->rti_flags |= RTF_HOST;
932 		rtinfo->rti_info[RTAX_MPLS1] = rt->rt_shim[0];
933 		rtinfo->rti_info[RTAX_MPLS2] = rt->rt_shim[1];
934 		rtinfo->rti_info[RTAX_MPLS3] = rt->rt_shim[2];
935 		goto makeroute;
936 
937 	case RTM_ADD:
938 		KASSERT(!(rtinfo->rti_flags & RTF_GATEWAY) ||
939 			rtinfo->rti_info[RTAX_GATEWAY] != NULL,
940 		    ("rtrequest: GATEWAY but no gateway"));
941 
942 		if (rtinfo->rti_ifa == NULL && (error = rt_getifa(rtinfo)))
943 			gotoerr(error);
944 		ifa = rtinfo->rti_ifa;
945 makeroute:
946 		R_Malloc(rt, struct rtentry *, sizeof(struct rtentry));
947 		if (rt == NULL) {
948 			if (req == RTM_ADD) {
949 				kprintf("rtrequest1: alloc rtentry failed on "
950 				    "cpu%d\n", mycpuid);
951 			}
952 			gotoerr(ENOBUFS);
953 		}
954 		bzero(rt, sizeof(struct rtentry));
955 		rt->rt_flags = RTF_UP | rtinfo->rti_flags;
956 		rt->rt_cpuid = mycpuid;
957 
958 		if (mycpuid != 0 && req == RTM_ADD) {
959 			/* For RTM_ADD, we have already sent rtmsg on CPU0. */
960 			reportmsg = RTL_DONTREPORT;
961 		} else {
962 			/*
963 			 * For RTM_ADD, we only send rtmsg on CPU0.
964 			 * For RTM_RESOLVE, we always send rtmsg. XXX
965 			 */
966 			reportmsg = RTL_REPORTMSG;
967 		}
968 		error = rt_setgate(rt, dst, rtinfo->rti_info[RTAX_GATEWAY],
969 				   reportmsg);
970 		if (error != 0) {
971 			Free(rt);
972 			gotoerr(error);
973 		}
974 
975 		ndst = rt_key(rt);
976 		if (rtinfo->rti_info[RTAX_NETMASK] != NULL)
977 			rt_maskedcopy(dst, ndst,
978 				      rtinfo->rti_info[RTAX_NETMASK]);
979 		else
980 			bcopy(dst, ndst, dst->sa_len);
981 
982 		if (rtinfo->rti_info[RTAX_MPLS1] != NULL)
983 			rt_setshims(rt, rtinfo->rti_info);
984 
985 		/*
986 		 * Note that we now have a reference to the ifa.
987 		 * This moved from below so that rnh->rnh_addaddr() can
988 		 * examine the ifa and  ifa->ifa_ifp if it so desires.
989 		 */
990 		IFAREF(ifa);
991 		rt->rt_ifa = ifa;
992 		rt->rt_ifp = ifa->ifa_ifp;
993 		/* XXX mtu manipulation will be done in rnh_addaddr -- itojun */
994 
995 		rn = rnh->rnh_addaddr((char *)ndst,
996 				      (char *)rtinfo->rti_info[RTAX_NETMASK],
997 				      rnh, rt->rt_nodes);
998 		if (rn == NULL) {
999 			struct rtentry *oldrt;
1000 
1001 			/*
1002 			 * We already have one of these in the tree.
1003 			 * We do a special hack: if the old route was
1004 			 * cloned, then we blow it away and try
1005 			 * re-inserting the new one.
1006 			 */
1007 			oldrt = rtpurelookup(ndst);
1008 			if (oldrt != NULL) {
1009 				--oldrt->rt_refcnt;
1010 				if (oldrt->rt_flags & RTF_WASCLONED) {
1011 					rtrequest(RTM_DELETE, rt_key(oldrt),
1012 						  oldrt->rt_gateway,
1013 						  rt_mask(oldrt),
1014 						  oldrt->rt_flags, NULL);
1015 					rn = rnh->rnh_addaddr((char *)ndst,
1016 					    (char *)
1017 						rtinfo->rti_info[RTAX_NETMASK],
1018 					    rnh, rt->rt_nodes);
1019 				}
1020 			}
1021 		}
1022 
1023 		/*
1024 		 * If it still failed to go into the tree,
1025 		 * then un-make it (this should be a function).
1026 		 */
1027 		if (rn == NULL) {
1028 			if (rt->rt_gwroute != NULL)
1029 				rtfree(rt->rt_gwroute);
1030 			IFAFREE(ifa);
1031 			Free(rt_key(rt));
1032 			Free(rt);
1033 			gotoerr(EEXIST);
1034 		}
1035 
1036 		/*
1037 		 * If we got here from RESOLVE, then we are cloning
1038 		 * so clone the rest, and note that we
1039 		 * are a clone (and increment the parent's references)
1040 		 */
1041 		if (req == RTM_RESOLVE) {
1042 			rt->rt_rmx = (*ret_nrt)->rt_rmx;    /* copy metrics */
1043 			rt->rt_rmx.rmx_pksent = 0;  /* reset packet counter */
1044 			if ((*ret_nrt)->rt_flags &
1045 				       (RTF_CLONING | RTF_PRCLONING)) {
1046 				rt->rt_parent = *ret_nrt;
1047 				(*ret_nrt)->rt_refcnt++;
1048 			}
1049 		}
1050 
1051 		/*
1052 		 * if this protocol has something to add to this then
1053 		 * allow it to do that as well.
1054 		 */
1055 		if (ifa->ifa_rtrequest != NULL)
1056 			ifa->ifa_rtrequest(req, rt, rtinfo);
1057 
1058 		/*
1059 		 * We repeat the same procedure from rt_setgate() here because
1060 		 * it doesn't fire when we call it there because the node
1061 		 * hasn't been added to the tree yet.
1062 		 */
1063 		if (req == RTM_ADD && !(rt->rt_flags & RTF_HOST) &&
1064 		    rt_mask(rt) != NULL) {
1065 			struct rtfc_arg arg = { rt, rnh };
1066 
1067 			rnh->rnh_walktree_from(rnh, (char *)rt_key(rt),
1068 					       (char *)rt_mask(rt),
1069 					       rt_fixchange, &arg);
1070 		}
1071 
1072 #ifdef ROUTE_DEBUG
1073 		if (route_debug)
1074 			rt_print(rtinfo, rt);
1075 #endif
1076 		/*
1077 		 * Return the resulting rtentry,
1078 		 * increasing the number of references by one.
1079 		 */
1080 		if (ret_nrt != NULL) {
1081 			rt->rt_refcnt++;
1082 			*ret_nrt = rt;
1083 		}
1084 		break;
1085 	default:
1086 		error = EOPNOTSUPP;
1087 	}
1088 bad:
1089 #ifdef ROUTE_DEBUG
1090 	if (route_debug) {
1091 		if (error)
1092 			kprintf("rti %p failed error %d\n", rtinfo, error);
1093 		else
1094 			kprintf("rti %p succeeded\n", rtinfo);
1095 	}
1096 #endif
1097 	crit_exit();
1098 	return (error);
1099 }
1100 
1101 /*
1102  * Called from rtrequest(RTM_DELETE, ...) to fix up the route's ``family''
1103  * (i.e., the routes related to it by the operation of cloning).  This
1104  * routine is iterated over all potential former-child-routes by way of
1105  * rnh->rnh_walktree_from() above, and those that actually are children of
1106  * the late parent (passed in as VP here) are themselves deleted.
1107  */
1108 static int
1109 rt_fixdelete(struct radix_node *rn, void *vp)
1110 {
1111 	struct rtentry *rt = (struct rtentry *)rn;
1112 	struct rtentry *rt0 = vp;
1113 
1114 	if (rt->rt_parent == rt0 &&
1115 	    !(rt->rt_flags & (RTF_PINNED | RTF_CLONING | RTF_PRCLONING))) {
1116 		return rtrequest(RTM_DELETE, rt_key(rt), NULL, rt_mask(rt),
1117 				 rt->rt_flags, NULL);
1118 	}
1119 	return 0;
1120 }
1121 
1122 /*
1123  * This routine is called from rt_setgate() to do the analogous thing for
1124  * adds and changes.  There is the added complication in this case of a
1125  * middle insert; i.e., insertion of a new network route between an older
1126  * network route and (cloned) host routes.  For this reason, a simple check
1127  * of rt->rt_parent is insufficient; each candidate route must be tested
1128  * against the (mask, value) of the new route (passed as before in vp)
1129  * to see if the new route matches it.
1130  *
1131  * XXX - it may be possible to do fixdelete() for changes and reserve this
1132  * routine just for adds.  I'm not sure why I thought it was necessary to do
1133  * changes this way.
1134  */
1135 #ifdef DEBUG
1136 static int rtfcdebug = 0;
1137 #endif
1138 
1139 static int
1140 rt_fixchange(struct radix_node *rn, void *vp)
1141 {
1142 	struct rtentry *rt = (struct rtentry *)rn;
1143 	struct rtfc_arg *ap = vp;
1144 	struct rtentry *rt0 = ap->rt0;
1145 	struct radix_node_head *rnh = ap->rnh;
1146 	u_char *xk1, *xm1, *xk2, *xmp;
1147 	int i, len, mlen;
1148 
1149 #ifdef DEBUG
1150 	if (rtfcdebug)
1151 		kprintf("rt_fixchange: rt %p, rt0 %p\n", rt, rt0);
1152 #endif
1153 
1154 	if (rt->rt_parent == NULL ||
1155 	    (rt->rt_flags & (RTF_PINNED | RTF_CLONING | RTF_PRCLONING))) {
1156 #ifdef DEBUG
1157 		if (rtfcdebug) kprintf("no parent, pinned or cloning\n");
1158 #endif
1159 		return 0;
1160 	}
1161 
1162 	if (rt->rt_parent == rt0) {
1163 #ifdef DEBUG
1164 		if (rtfcdebug) kprintf("parent match\n");
1165 #endif
1166 		return rtrequest(RTM_DELETE, rt_key(rt), NULL, rt_mask(rt),
1167 				 rt->rt_flags, NULL);
1168 	}
1169 
1170 	/*
1171 	 * There probably is a function somewhere which does this...
1172 	 * if not, there should be.
1173 	 */
1174 	len = imin(rt_key(rt0)->sa_len, rt_key(rt)->sa_len);
1175 
1176 	xk1 = (u_char *)rt_key(rt0);
1177 	xm1 = (u_char *)rt_mask(rt0);
1178 	xk2 = (u_char *)rt_key(rt);
1179 
1180 	/* avoid applying a less specific route */
1181 	xmp = (u_char *)rt_mask(rt->rt_parent);
1182 	mlen = rt_key(rt->rt_parent)->sa_len;
1183 	if (mlen > rt_key(rt0)->sa_len) {
1184 #ifdef DEBUG
1185 		if (rtfcdebug)
1186 			kprintf("rt_fixchange: inserting a less "
1187 			       "specific route\n");
1188 #endif
1189 		return 0;
1190 	}
1191 	for (i = rnh->rnh_treetop->rn_offset; i < mlen; i++) {
1192 		if ((xmp[i] & ~(xmp[i] ^ xm1[i])) != xmp[i]) {
1193 #ifdef DEBUG
1194 			if (rtfcdebug)
1195 				kprintf("rt_fixchange: inserting a less "
1196 				       "specific route\n");
1197 #endif
1198 			return 0;
1199 		}
1200 	}
1201 
1202 	for (i = rnh->rnh_treetop->rn_offset; i < len; i++) {
1203 		if ((xk2[i] & xm1[i]) != xk1[i]) {
1204 #ifdef DEBUG
1205 			if (rtfcdebug) kprintf("no match\n");
1206 #endif
1207 			return 0;
1208 		}
1209 	}
1210 
1211 	/*
1212 	 * OK, this node is a clone, and matches the node currently being
1213 	 * changed/added under the node's mask.  So, get rid of it.
1214 	 */
1215 #ifdef DEBUG
1216 	if (rtfcdebug) kprintf("deleting\n");
1217 #endif
1218 	return rtrequest(RTM_DELETE, rt_key(rt), NULL, rt_mask(rt),
1219 			 rt->rt_flags, NULL);
1220 }
1221 
1222 #define ROUNDUP(a) (a>0 ? (1 + (((a) - 1) | (sizeof(long) - 1))) : sizeof(long))
1223 
1224 int
1225 rt_setgate(struct rtentry *rt0, struct sockaddr *dst, struct sockaddr *gate,
1226 	   boolean_t generate_report)
1227 {
1228 	char *space, *oldspace;
1229 	int dlen = ROUNDUP(dst->sa_len), glen = ROUNDUP(gate->sa_len);
1230 	struct rtentry *rt = rt0;
1231 	struct radix_node_head *rnh = rt_tables[mycpuid][dst->sa_family];
1232 
1233 	/*
1234 	 * A host route with the destination equal to the gateway
1235 	 * will interfere with keeping LLINFO in the routing
1236 	 * table, so disallow it.
1237 	 */
1238 	if (((rt0->rt_flags & (RTF_HOST | RTF_GATEWAY | RTF_LLINFO)) ==
1239 			      (RTF_HOST | RTF_GATEWAY)) &&
1240 	    dst->sa_len == gate->sa_len &&
1241 	    sa_equal(dst, gate)) {
1242 		/*
1243 		 * The route might already exist if this is an RTM_CHANGE
1244 		 * or a routing redirect, so try to delete it.
1245 		 */
1246 		if (rt_key(rt0) != NULL)
1247 			rtrequest(RTM_DELETE, rt_key(rt0), rt0->rt_gateway,
1248 				  rt_mask(rt0), rt0->rt_flags, NULL);
1249 		return EADDRNOTAVAIL;
1250 	}
1251 
1252 	/*
1253 	 * Both dst and gateway are stored in the same malloc'ed chunk
1254 	 * (If I ever get my hands on....)
1255 	 * if we need to malloc a new chunk, then keep the old one around
1256 	 * till we don't need it any more.
1257 	 */
1258 	if (rt->rt_gateway == NULL || glen > ROUNDUP(rt->rt_gateway->sa_len)) {
1259 		oldspace = (char *)rt_key(rt);
1260 		R_Malloc(space, char *, dlen + glen);
1261 		if (space == NULL)
1262 			return ENOBUFS;
1263 		rt->rt_nodes->rn_key = space;
1264 	} else {
1265 		space = (char *)rt_key(rt);	/* Just use the old space. */
1266 		oldspace = NULL;
1267 	}
1268 
1269 	/* Set the gateway value. */
1270 	rt->rt_gateway = (struct sockaddr *)(space + dlen);
1271 	bcopy(gate, rt->rt_gateway, glen);
1272 
1273 	if (oldspace != NULL) {
1274 		/*
1275 		 * If we allocated a new chunk, preserve the original dst.
1276 		 * This way, rt_setgate() really just sets the gate
1277 		 * and leaves the dst field alone.
1278 		 */
1279 		bcopy(dst, space, dlen);
1280 		Free(oldspace);
1281 	}
1282 
1283 	/*
1284 	 * If there is already a gwroute, it's now almost definitely wrong
1285 	 * so drop it.
1286 	 */
1287 	if (rt->rt_gwroute != NULL) {
1288 		RTFREE(rt->rt_gwroute);
1289 		rt->rt_gwroute = NULL;
1290 	}
1291 	if (rt->rt_flags & RTF_GATEWAY) {
1292 		/*
1293 		 * Cloning loop avoidance: In the presence of
1294 		 * protocol-cloning and bad configuration, it is
1295 		 * possible to get stuck in bottomless mutual recursion
1296 		 * (rtrequest rt_setgate rtlookup).  We avoid this
1297 		 * by not allowing protocol-cloning to operate for
1298 		 * gateways (which is probably the correct choice
1299 		 * anyway), and avoid the resulting reference loops
1300 		 * by disallowing any route to run through itself as
1301 		 * a gateway.  This is obviously mandatory when we
1302 		 * get rt->rt_output().
1303 		 *
1304 		 * This breaks TTCP for hosts outside the gateway!  XXX JH
1305 		 */
1306 		rt->rt_gwroute = _rtlookup(gate, generate_report,
1307 					   RTF_PRCLONING);
1308 		if (rt->rt_gwroute == rt) {
1309 			rt->rt_gwroute = NULL;
1310 			--rt->rt_refcnt;
1311 			return EDQUOT; /* failure */
1312 		}
1313 	}
1314 
1315 	/*
1316 	 * This isn't going to do anything useful for host routes, so
1317 	 * don't bother.  Also make sure we have a reasonable mask
1318 	 * (we don't yet have one during adds).
1319 	 */
1320 	if (!(rt->rt_flags & RTF_HOST) && rt_mask(rt) != NULL) {
1321 		struct rtfc_arg arg = { rt, rnh };
1322 
1323 		rnh->rnh_walktree_from(rnh, (char *)rt_key(rt),
1324 				       (char *)rt_mask(rt),
1325 				       rt_fixchange, &arg);
1326 	}
1327 
1328 	return 0;
1329 }
1330 
1331 static void
1332 rt_maskedcopy(
1333 	struct sockaddr *src,
1334 	struct sockaddr *dst,
1335 	struct sockaddr *netmask)
1336 {
1337 	u_char *cp1 = (u_char *)src;
1338 	u_char *cp2 = (u_char *)dst;
1339 	u_char *cp3 = (u_char *)netmask;
1340 	u_char *cplim = cp2 + *cp3;
1341 	u_char *cplim2 = cp2 + *cp1;
1342 
1343 	*cp2++ = *cp1++; *cp2++ = *cp1++; /* copies sa_len & sa_family */
1344 	cp3 += 2;
1345 	if (cplim > cplim2)
1346 		cplim = cplim2;
1347 	while (cp2 < cplim)
1348 		*cp2++ = *cp1++ & *cp3++;
1349 	if (cp2 < cplim2)
1350 		bzero(cp2, cplim2 - cp2);
1351 }
1352 
1353 int
1354 rt_llroute(struct sockaddr *dst, struct rtentry *rt0, struct rtentry **drt)
1355 {
1356 	struct rtentry *up_rt, *rt;
1357 
1358 	if (!(rt0->rt_flags & RTF_UP)) {
1359 		up_rt = rtlookup(dst);
1360 		if (up_rt == NULL)
1361 			return (EHOSTUNREACH);
1362 		up_rt->rt_refcnt--;
1363 	} else
1364 		up_rt = rt0;
1365 	if (up_rt->rt_flags & RTF_GATEWAY) {
1366 		if (up_rt->rt_gwroute == NULL) {
1367 			up_rt->rt_gwroute = rtlookup(up_rt->rt_gateway);
1368 			if (up_rt->rt_gwroute == NULL)
1369 				return (EHOSTUNREACH);
1370 		} else if (!(up_rt->rt_gwroute->rt_flags & RTF_UP)) {
1371 			rtfree(up_rt->rt_gwroute);
1372 			up_rt->rt_gwroute = rtlookup(up_rt->rt_gateway);
1373 			if (up_rt->rt_gwroute == NULL)
1374 				return (EHOSTUNREACH);
1375 		}
1376 		rt = up_rt->rt_gwroute;
1377 	} else
1378 		rt = up_rt;
1379 	if (rt->rt_flags & RTF_REJECT &&
1380 	    (rt->rt_rmx.rmx_expire == 0 ||		/* rt doesn't expire */
1381 	     time_second < rt->rt_rmx.rmx_expire))	/* rt not expired */
1382 		return (rt->rt_flags & RTF_HOST ?  EHOSTDOWN : EHOSTUNREACH);
1383 	*drt = rt;
1384 	return 0;
1385 }
1386 
1387 static int
1388 rt_setshims(struct rtentry *rt, struct sockaddr **rt_shim){
1389 	int i;
1390 
1391 	for (i=0; i<3; i++) {
1392 		struct sockaddr *shim = rt_shim[RTAX_MPLS1 + i];
1393 		int shimlen;
1394 
1395 		if (shim == NULL)
1396 			break;
1397 
1398 		shimlen = ROUNDUP(shim->sa_len);
1399 		R_Malloc(rt->rt_shim[i], struct sockaddr *, shimlen);
1400 		bcopy(shim, rt->rt_shim[i], shimlen);
1401 	}
1402 
1403 	return 0;
1404 }
1405 
1406 #ifdef ROUTE_DEBUG
1407 
1408 /*
1409  * Print out a route table entry
1410  */
1411 void
1412 rt_print(struct rt_addrinfo *rtinfo, struct rtentry *rn)
1413 {
1414 	kprintf("rti %p cpu %d route %p flags %08lx: ",
1415 		rtinfo, mycpuid, rn, rn->rt_flags);
1416 	sockaddr_print(rt_key(rn));
1417 	kprintf(" mask ");
1418 	sockaddr_print(rt_mask(rn));
1419 	kprintf(" gw ");
1420 	sockaddr_print(rn->rt_gateway);
1421 	kprintf(" ifc \"%s\"", rn->rt_ifp ? rn->rt_ifp->if_dname : "?");
1422 	kprintf(" ifa %p\n", rn->rt_ifa);
1423 }
1424 
1425 void
1426 rt_addrinfo_print(int cmd, struct rt_addrinfo *rti)
1427 {
1428 	int didit = 0;
1429 	int i;
1430 
1431 #ifdef ROUTE_DEBUG
1432 	if (cmd == RTM_DELETE && route_debug > 1)
1433 		print_backtrace(-1);
1434 #endif
1435 
1436 	switch(cmd) {
1437 	case RTM_ADD:
1438 		kprintf("ADD ");
1439 		break;
1440 	case RTM_RESOLVE:
1441 		kprintf("RES ");
1442 		break;
1443 	case RTM_DELETE:
1444 		kprintf("DEL ");
1445 		break;
1446 	default:
1447 		kprintf("C%02d ", cmd);
1448 		break;
1449 	}
1450 	kprintf("rti %p cpu %d ", rti, mycpuid);
1451 	for (i = 0; i < rti->rti_addrs; ++i) {
1452 		if (rti->rti_info[i] == NULL)
1453 			continue;
1454 		if (didit)
1455 			kprintf(" ,");
1456 		switch(i) {
1457 		case RTAX_DST:
1458 			kprintf("(DST ");
1459 			break;
1460 		case RTAX_GATEWAY:
1461 			kprintf("(GWY ");
1462 			break;
1463 		case RTAX_NETMASK:
1464 			kprintf("(MSK ");
1465 			break;
1466 		case RTAX_GENMASK:
1467 			kprintf("(GEN ");
1468 			break;
1469 		case RTAX_IFP:
1470 			kprintf("(IFP ");
1471 			break;
1472 		case RTAX_IFA:
1473 			kprintf("(IFA ");
1474 			break;
1475 		case RTAX_AUTHOR:
1476 			kprintf("(AUT ");
1477 			break;
1478 		case RTAX_BRD:
1479 			kprintf("(BRD ");
1480 			break;
1481 		default:
1482 			kprintf("(?%02d ", i);
1483 			break;
1484 		}
1485 		sockaddr_print(rti->rti_info[i]);
1486 		kprintf(")");
1487 		didit = 1;
1488 	}
1489 	kprintf("\n");
1490 }
1491 
1492 void
1493 sockaddr_print(struct sockaddr *sa)
1494 {
1495 	struct sockaddr_in *sa4;
1496 	struct sockaddr_in6 *sa6;
1497 	int len;
1498 	int i;
1499 
1500 	if (sa == NULL) {
1501 		kprintf("NULL");
1502 		return;
1503 	}
1504 
1505 	len = sa->sa_len - offsetof(struct sockaddr, sa_data[0]);
1506 
1507 	switch(sa->sa_family) {
1508 	case AF_INET:
1509 	case AF_INET6:
1510 	default:
1511 		switch(sa->sa_family) {
1512 		case AF_INET:
1513 			sa4 = (struct sockaddr_in *)sa;
1514 			kprintf("INET %d %d.%d.%d.%d",
1515 				ntohs(sa4->sin_port),
1516 				(ntohl(sa4->sin_addr.s_addr) >> 24) & 255,
1517 				(ntohl(sa4->sin_addr.s_addr) >> 16) & 255,
1518 				(ntohl(sa4->sin_addr.s_addr) >> 8) & 255,
1519 				(ntohl(sa4->sin_addr.s_addr) >> 0) & 255
1520 			);
1521 			break;
1522 		case AF_INET6:
1523 			sa6 = (struct sockaddr_in6 *)sa;
1524 			kprintf("INET6 %d %04x:%04x%04x:%04x:%04x:%04x:%04x:%04x",
1525 				ntohs(sa6->sin6_port),
1526 				sa6->sin6_addr.s6_addr16[0],
1527 				sa6->sin6_addr.s6_addr16[1],
1528 				sa6->sin6_addr.s6_addr16[2],
1529 				sa6->sin6_addr.s6_addr16[3],
1530 				sa6->sin6_addr.s6_addr16[4],
1531 				sa6->sin6_addr.s6_addr16[5],
1532 				sa6->sin6_addr.s6_addr16[6],
1533 				sa6->sin6_addr.s6_addr16[7]
1534 			);
1535 			break;
1536 		default:
1537 			kprintf("AF%d ", sa->sa_family);
1538 			while (len > 0 && sa->sa_data[len-1] == 0)
1539 				--len;
1540 
1541 			for (i = 0; i < len; ++i) {
1542 				if (i)
1543 					kprintf(".");
1544 				kprintf("%d", (unsigned char)sa->sa_data[i]);
1545 			}
1546 			break;
1547 		}
1548 	}
1549 }
1550 
1551 #endif
1552 
1553 /*
1554  * Set up a routing table entry, normally for an interface.
1555  */
1556 int
1557 rtinit(struct ifaddr *ifa, int cmd, int flags)
1558 {
1559 	struct sockaddr *dst, *deldst, *netmask;
1560 	struct mbuf *m = NULL;
1561 	struct radix_node_head *rnh;
1562 	struct radix_node *rn;
1563 	struct rt_addrinfo rtinfo;
1564 	int error;
1565 
1566 	if (flags & RTF_HOST) {
1567 		dst = ifa->ifa_dstaddr;
1568 		netmask = NULL;
1569 	} else {
1570 		dst = ifa->ifa_addr;
1571 		netmask = ifa->ifa_netmask;
1572 	}
1573 	/*
1574 	 * If it's a delete, check that if it exists, it's on the correct
1575 	 * interface or we might scrub a route to another ifa which would
1576 	 * be confusing at best and possibly worse.
1577 	 */
1578 	if (cmd == RTM_DELETE) {
1579 		/*
1580 		 * It's a delete, so it should already exist..
1581 		 * If it's a net, mask off the host bits
1582 		 * (Assuming we have a mask)
1583 		 */
1584 		if (netmask != NULL) {
1585 			m = m_get(MB_DONTWAIT, MT_SONAME);
1586 			if (m == NULL)
1587 				return (ENOBUFS);
1588 			mbuftrackid(m, 34);
1589 			deldst = mtod(m, struct sockaddr *);
1590 			rt_maskedcopy(dst, deldst, netmask);
1591 			dst = deldst;
1592 		}
1593 		/*
1594 		 * Look up an rtentry that is in the routing tree and
1595 		 * contains the correct info.
1596 		 */
1597 		if ((rnh = rt_tables[mycpuid][dst->sa_family]) == NULL ||
1598 		    (rn = rnh->rnh_lookup((char *)dst,
1599 					  (char *)netmask, rnh)) == NULL ||
1600 		    ((struct rtentry *)rn)->rt_ifa != ifa ||
1601 		    !sa_equal((struct sockaddr *)rn->rn_key, dst)) {
1602 			if (m != NULL)
1603 				m_free(m);
1604 			return (flags & RTF_HOST ? EHOSTUNREACH : ENETUNREACH);
1605 		}
1606 		/* XXX */
1607 #if 0
1608 		else {
1609 			/*
1610 			 * One would think that as we are deleting, and we know
1611 			 * it doesn't exist, we could just return at this point
1612 			 * with an "ELSE" clause, but apparently not..
1613 			 */
1614 			return (flags & RTF_HOST ? EHOSTUNREACH : ENETUNREACH);
1615 		}
1616 #endif
1617 	}
1618 	/*
1619 	 * Do the actual request
1620 	 */
1621 	bzero(&rtinfo, sizeof(struct rt_addrinfo));
1622 	rtinfo.rti_info[RTAX_DST] = dst;
1623 	rtinfo.rti_info[RTAX_GATEWAY] = ifa->ifa_addr;
1624 	rtinfo.rti_info[RTAX_NETMASK] = netmask;
1625 	rtinfo.rti_flags = flags | ifa->ifa_flags;
1626 	rtinfo.rti_ifa = ifa;
1627 	error = rtrequest1_global(cmd, &rtinfo, rtinit_rtrequest_callback, ifa);
1628 	if (m != NULL)
1629 		m_free(m);
1630 	return (error);
1631 }
1632 
1633 static void
1634 rtinit_rtrequest_callback(int cmd, int error,
1635 			  struct rt_addrinfo *rtinfo, struct rtentry *rt,
1636 			  void *arg)
1637 {
1638 	struct ifaddr *ifa = arg;
1639 
1640 	if (error == 0 && rt) {
1641 		if (mycpuid == 0) {
1642 			++rt->rt_refcnt;
1643 			rt_newaddrmsg(cmd, ifa, error, rt);
1644 			--rt->rt_refcnt;
1645 		}
1646 		if (cmd == RTM_DELETE) {
1647 			if (rt->rt_refcnt == 0) {
1648 				++rt->rt_refcnt;
1649 				rtfree(rt);
1650 			}
1651 		}
1652 	}
1653 }
1654 
1655 struct netmsg_rts {
1656 	struct netmsg_base	base;
1657 	int			req;
1658 	struct rt_addrinfo	*rtinfo;
1659 	rtsearch_callback_func_t callback;
1660 	void			*arg;
1661 	boolean_t		exact_match;
1662 	int			found_cnt;
1663 };
1664 
1665 int
1666 rtsearch_global(int req, struct rt_addrinfo *rtinfo,
1667 		rtsearch_callback_func_t callback, void *arg,
1668 		boolean_t exact_match)
1669 {
1670 	struct netmsg_rts msg;
1671 
1672 	netmsg_init(&msg.base, NULL, &curthread->td_msgport,
1673 		    0, rtsearch_msghandler);
1674 	msg.req = req;
1675 	msg.rtinfo = rtinfo;
1676 	msg.callback = callback;
1677 	msg.arg = arg;
1678 	msg.exact_match = exact_match;
1679 	msg.found_cnt = 0;
1680 	return lwkt_domsg(rtable_portfn(0), &msg.base.lmsg, 0);
1681 }
1682 
1683 static void
1684 rtsearch_msghandler(netmsg_t msg)
1685 {
1686 	struct netmsg_rts *rmsg = (void *)msg;
1687 	struct rt_addrinfo rtinfo;
1688 	struct radix_node_head *rnh;
1689 	struct rtentry *rt;
1690 	int nextcpu, error;
1691 
1692 	/*
1693 	 * Copy the rtinfo.  We need to make sure that the original
1694 	 * rtinfo, which is setup by the caller, in the netmsg will
1695 	 * _not_ be changed; else the next CPU on the netmsg forwarding
1696 	 * path will see a different rtinfo than what this CPU has seen.
1697 	 */
1698 	rtinfo = *rmsg->rtinfo;
1699 
1700 	/*
1701 	 * Find the correct routing tree to use for this Address Family
1702 	 */
1703 	if ((rnh = rt_tables[mycpuid][rtinfo.rti_dst->sa_family]) == NULL) {
1704 		if (mycpuid != 0)
1705 			panic("partially initialized routing tables");
1706 		lwkt_replymsg(&rmsg->base.lmsg, EAFNOSUPPORT);
1707 		return;
1708 	}
1709 
1710 	/*
1711 	 * Correct rtinfo for the host route searching.
1712 	 */
1713 	if (rtinfo.rti_flags & RTF_HOST) {
1714 		rtinfo.rti_netmask = NULL;
1715 		rtinfo.rti_flags &= ~(RTF_CLONING | RTF_PRCLONING);
1716 	}
1717 
1718 	rt = (struct rtentry *)
1719 	     rnh->rnh_lookup((char *)rtinfo.rti_dst,
1720 			     (char *)rtinfo.rti_netmask, rnh);
1721 
1722 	/*
1723 	 * If we are asked to do the "exact match", we need to make sure
1724 	 * that host route searching got a host route while a network
1725 	 * route searching got a network route.
1726 	 */
1727 	if (rt != NULL && rmsg->exact_match &&
1728 	    ((rt->rt_flags ^ rtinfo.rti_flags) & RTF_HOST))
1729 		rt = NULL;
1730 
1731 	if (rt == NULL) {
1732 		/*
1733 		 * No matching routes have been found, don't count this
1734 		 * as a critical error (here, we set 'error' to 0), just
1735 		 * keep moving on, since at least prcloned routes are not
1736 		 * duplicated onto each CPU.
1737 		 */
1738 		error = 0;
1739 	} else {
1740 		rmsg->found_cnt++;
1741 
1742 		rt->rt_refcnt++;
1743 		error = rmsg->callback(rmsg->req, &rtinfo, rt, rmsg->arg,
1744 				      rmsg->found_cnt);
1745 		rt->rt_refcnt--;
1746 
1747 		if (error == EJUSTRETURN) {
1748 			lwkt_replymsg(&rmsg->base.lmsg, 0);
1749 			return;
1750 		}
1751 	}
1752 
1753 	nextcpu = mycpuid + 1;
1754 	if (error) {
1755 		KKASSERT(rmsg->found_cnt > 0);
1756 
1757 		/*
1758 		 * Under following cases, unrecoverable error has
1759 		 * not occured:
1760 		 * o  Request is RTM_GET
1761 		 * o  The first time that we find the route, but the
1762 		 *    modification fails.
1763 		 */
1764 		if (rmsg->req != RTM_GET && rmsg->found_cnt > 1) {
1765 			panic("rtsearch_msghandler: unrecoverable error "
1766 			      "cpu %d", mycpuid);
1767 		}
1768 		lwkt_replymsg(&rmsg->base.lmsg, error);
1769 	} else if (nextcpu < ncpus) {
1770 		lwkt_forwardmsg(rtable_portfn(nextcpu), &rmsg->base.lmsg);
1771 	} else {
1772 		if (rmsg->found_cnt == 0) {
1773 			/* The requested route was never seen ... */
1774 			error = ESRCH;
1775 		}
1776 		lwkt_replymsg(&rmsg->base.lmsg, error);
1777 	}
1778 }
1779 
1780 int
1781 rtmask_add_global(struct sockaddr *mask)
1782 {
1783 	struct netmsg_base msg;
1784 
1785 	netmsg_init(&msg, NULL, &curthread->td_msgport,
1786 		    0, rtmask_add_msghandler);
1787 	msg.lmsg.u.ms_resultp = mask;
1788 
1789 	return lwkt_domsg(rtable_portfn(0), &msg.lmsg, 0);
1790 }
1791 
1792 struct sockaddr *
1793 _rtmask_lookup(struct sockaddr *mask, boolean_t search)
1794 {
1795 	struct radix_node *n;
1796 
1797 #define	clen(s)	(*(u_char *)(s))
1798 	n = rn_addmask((char *)mask, search, 1, rn_cpumaskhead(mycpuid));
1799 	if (n != NULL &&
1800 	    mask->sa_len >= clen(n->rn_key) &&
1801 	    bcmp((char *)mask + 1,
1802 		 (char *)n->rn_key + 1, clen(n->rn_key) - 1) == 0) {
1803 		return (struct sockaddr *)n->rn_key;
1804 	} else {
1805 		return NULL;
1806 	}
1807 #undef clen
1808 }
1809 
1810 static void
1811 rtmask_add_msghandler(netmsg_t msg)
1812 {
1813 	struct lwkt_msg *lmsg = &msg->lmsg;
1814 	struct sockaddr *mask = lmsg->u.ms_resultp;
1815 	int error = 0, nextcpu;
1816 
1817 	if (rtmask_lookup(mask) == NULL)
1818 		error = ENOBUFS;
1819 
1820 	nextcpu = mycpuid + 1;
1821 	if (!error && nextcpu < ncpus)
1822 		lwkt_forwardmsg(rtable_portfn(nextcpu), lmsg);
1823 	else
1824 		lwkt_replymsg(lmsg, error);
1825 }
1826 
1827 /* This must be before ip6_init2(), which is now SI_ORDER_MIDDLE */
1828 SYSINIT(route, SI_SUB_PROTO_DOMAIN, SI_ORDER_THIRD, route_init, 0);
1829