1 /* 2 * Copyright 1994, 1995 Massachusetts Institute of Technology 3 * 4 * Permission to use, copy, modify, and distribute this software and 5 * its documentation for any purpose and without fee is hereby 6 * granted, provided that both the above copyright notice and this 7 * permission notice appear in all copies, that both the above 8 * copyright notice and this permission notice appear in all 9 * supporting documentation, and that the name of M.I.T. not be used 10 * in advertising or publicity pertaining to distribution of the 11 * software without specific, written prior permission. M.I.T. makes 12 * no representations about the suitability of this software for any 13 * purpose. It is provided "as is" without express or implied 14 * warranty. 15 * 16 * THIS SOFTWARE IS PROVIDED BY M.I.T. ``AS IS''. M.I.T. DISCLAIMS 17 * ALL EXPRESS OR IMPLIED WARRANTIES WITH REGARD TO THIS SOFTWARE, 18 * INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF 19 * MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE. IN NO EVENT 20 * SHALL M.I.T. BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, 21 * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT 22 * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF 23 * USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND 24 * ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, 25 * OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT 26 * OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF 27 * SUCH DAMAGE. 28 * 29 * $FreeBSD: src/sys/netinet/in_rmx.c,v 1.37.2.3 2002/08/09 14:49:23 ru Exp $ 30 * $DragonFly: src/sys/netinet/in_rmx.c,v 1.14 2006/04/11 06:59:34 dillon Exp $ 31 */ 32 33 /* 34 * This code does two things necessary for the enhanced TCP metrics to 35 * function in a useful manner: 36 * 1) It marks all non-host routes as `cloning', thus ensuring that 37 * every actual reference to such a route actually gets turned 38 * into a reference to a host route to the specific destination 39 * requested. 40 * 2) When such routes lose all their references, it arranges for them 41 * to be deleted in some random collection of circumstances, so that 42 * a large quantity of stale routing data is not kept in kernel memory 43 * indefinitely. See in_rtqtimo() below for the exact mechanism. 44 */ 45 46 #include "opt_carp.h" 47 48 #include <sys/param.h> 49 #include <sys/systm.h> 50 #include <sys/kernel.h> 51 #include <sys/sysctl.h> 52 #include <sys/socket.h> 53 #include <sys/mbuf.h> 54 #include <sys/syslog.h> 55 #include <sys/globaldata.h> 56 #include <sys/thread2.h> 57 58 #include <net/if.h> 59 #include <net/route.h> 60 #include <net/if_var.h> 61 #ifdef CARP 62 #include <net/if_types.h> 63 #endif 64 #include <net/netmsg2.h> 65 #include <netinet/in.h> 66 #include <netinet/in_var.h> 67 #include <netinet/ip_var.h> 68 #include <netinet/ip_flow.h> 69 70 #define RTPRF_EXPIRING RTF_PROTO3 /* set on routes we manage */ 71 72 static struct callout in_rtqtimo_ch[MAXCPU]; 73 74 /* 75 * Do what we need to do when inserting a route. 76 */ 77 static struct radix_node * 78 in_addroute(char *key, char *mask, struct radix_node_head *head, 79 struct radix_node *treenodes) 80 { 81 struct rtentry *rt = (struct rtentry *)treenodes; 82 struct sockaddr_in *sin = (struct sockaddr_in *)rt_key(rt); 83 struct radix_node *ret; 84 struct in_ifaddr_container *iac; 85 struct in_ifaddr *ia; 86 87 /* 88 * For IP, mark routes to multicast addresses as such, because 89 * it's easy to do and might be useful (but this is much more 90 * dubious since it's so easy to inspect the address). 91 * 92 * For IP, all unicast non-host routes are automatically cloning. 93 */ 94 if (IN_MULTICAST(ntohl(sin->sin_addr.s_addr))) 95 rt->rt_flags |= RTF_MULTICAST; 96 97 if (!(rt->rt_flags & (RTF_HOST | RTF_CLONING | RTF_MULTICAST))) 98 rt->rt_flags |= RTF_PRCLONING; 99 100 /* 101 * For host routes, we make sure that RTF_BROADCAST 102 * is set for anything that looks like a broadcast address. 103 * This way, we can avoid an expensive call to in_broadcast() 104 * in ip_output() most of the time (because the route passed 105 * to ip_output() is almost always a host route). 106 * 107 * For local routes we set RTF_LOCAL allowing various shortcuts. 108 * 109 * A cloned network route will point to one of several possible 110 * addresses if an interface has aliases and must be repointed 111 * back to the correct address or arp_rtrequest() will not properly 112 * detect the local ip. 113 */ 114 if (rt->rt_flags & RTF_HOST) { 115 if (in_broadcast(sin->sin_addr, rt->rt_ifp)) { 116 rt->rt_flags |= RTF_BROADCAST; 117 } else if (satosin(rt->rt_ifa->ifa_addr)->sin_addr.s_addr == 118 sin->sin_addr.s_addr) { 119 rt->rt_flags |= RTF_LOCAL; 120 } else { 121 LIST_FOREACH(iac, INADDR_HASH(sin->sin_addr.s_addr), 122 ia_hash) { 123 ia = iac->ia; 124 if (sin->sin_addr.s_addr == 125 ia->ia_addr.sin_addr.s_addr) { 126 rt->rt_flags |= RTF_LOCAL; 127 IFAREF(&ia->ia_ifa); 128 IFAFREE(rt->rt_ifa); 129 rt->rt_ifa = &ia->ia_ifa; 130 rt->rt_ifp = rt->rt_ifa->ifa_ifp; 131 break; 132 } 133 } 134 } 135 } 136 137 if (rt->rt_rmx.rmx_mtu != 0 && !(rt->rt_rmx.rmx_locks & RTV_MTU) && 138 rt->rt_ifp != NULL) 139 rt->rt_rmx.rmx_mtu = rt->rt_ifp->if_mtu; 140 141 ret = rn_addroute(key, mask, head, treenodes); 142 if (ret == NULL && (rt->rt_flags & RTF_HOST)) { 143 struct rtentry *oldrt; 144 145 /* 146 * We are trying to add a host route, but can't. 147 * Find out if it is because of an ARP entry and 148 * delete it if so. 149 */ 150 oldrt = rtpurelookup((struct sockaddr *)sin); 151 if (oldrt != NULL) { 152 --oldrt->rt_refcnt; 153 if ((oldrt->rt_flags & RTF_LLINFO) && 154 (oldrt->rt_flags & RTF_HOST) && 155 oldrt->rt_gateway && 156 oldrt->rt_gateway->sa_family == AF_LINK) { 157 rtrequest(RTM_DELETE, rt_key(oldrt), 158 oldrt->rt_gateway, rt_mask(oldrt), 159 oldrt->rt_flags, NULL); 160 ret = rn_addroute(key, mask, head, treenodes); 161 } 162 } 163 } 164 165 /* 166 * If the new route has been created successfully, and it is 167 * not a multicast/broadcast or cloned route, then we will 168 * have to flush the ipflow. Otherwise, we may end up using 169 * the wrong route. 170 */ 171 if (ret != NULL && 172 (rt->rt_flags & 173 (RTF_MULTICAST | RTF_BROADCAST | RTF_WASCLONED)) == 0) { 174 ipflow_flush_oncpu(); 175 } 176 return ret; 177 } 178 179 /* 180 * This code is the inverse of in_closeroute: on first reference, if we 181 * were managing the route, stop doing so and set the expiration timer 182 * back off again. 183 */ 184 static struct radix_node * 185 in_matchroute(char *key, struct radix_node_head *head) 186 { 187 struct radix_node *rn = rn_match(key, head); 188 struct rtentry *rt = (struct rtentry *)rn; 189 190 if (rt != NULL && rt->rt_refcnt == 0) { /* this is first reference */ 191 if (rt->rt_flags & RTPRF_EXPIRING) { 192 rt->rt_flags &= ~RTPRF_EXPIRING; 193 rt->rt_rmx.rmx_expire = 0; 194 } 195 } 196 return rn; 197 } 198 199 static int rtq_reallyold = 60*60; /* one hour is ``really old'' */ 200 SYSCTL_INT(_net_inet_ip, IPCTL_RTEXPIRE, rtexpire, CTLFLAG_RW, 201 &rtq_reallyold , 0, 202 "Default expiration time on cloned routes"); 203 204 static int rtq_minreallyold = 10; /* never automatically crank down to less */ 205 SYSCTL_INT(_net_inet_ip, IPCTL_RTMINEXPIRE, rtminexpire, CTLFLAG_RW, 206 &rtq_minreallyold , 0, 207 "Minimum time to attempt to hold onto cloned routes"); 208 209 static int rtq_toomany = 128; /* 128 cached routes is ``too many'' */ 210 SYSCTL_INT(_net_inet_ip, IPCTL_RTMAXCACHE, rtmaxcache, CTLFLAG_RW, 211 &rtq_toomany , 0, "Upper limit on cloned routes"); 212 213 /* 214 * On last reference drop, mark the route as belong to us so that it can be 215 * timed out. 216 */ 217 static void 218 in_closeroute(struct radix_node *rn, struct radix_node_head *head) 219 { 220 struct rtentry *rt = (struct rtentry *)rn; 221 222 if (!(rt->rt_flags & RTF_UP)) 223 return; /* prophylactic measures */ 224 225 if ((rt->rt_flags & (RTF_LLINFO | RTF_HOST)) != RTF_HOST) 226 return; 227 228 if ((rt->rt_flags & (RTF_WASCLONED | RTPRF_EXPIRING)) != RTF_WASCLONED) 229 return; 230 231 /* 232 * As requested by David Greenman: 233 * If rtq_reallyold is 0, just delete the route without 234 * waiting for a timeout cycle to kill it. 235 */ 236 if (rtq_reallyold != 0) { 237 rt->rt_flags |= RTPRF_EXPIRING; 238 rt->rt_rmx.rmx_expire = time_second + rtq_reallyold; 239 } else { 240 /* 241 * Remove route from the radix tree, but defer deallocation 242 * until we return to rtfree(). 243 */ 244 rtrequest(RTM_DELETE, rt_key(rt), rt->rt_gateway, rt_mask(rt), 245 rt->rt_flags, &rt); 246 } 247 } 248 249 struct rtqk_arg { 250 struct radix_node_head *rnh; 251 int draining; 252 int killed; 253 int found; 254 int updating; 255 time_t nextstop; 256 }; 257 258 /* 259 * Get rid of old routes. When draining, this deletes everything, even when 260 * the timeout is not expired yet. When updating, this makes sure that 261 * nothing has a timeout longer than the current value of rtq_reallyold. 262 */ 263 static int 264 in_rtqkill(struct radix_node *rn, void *rock) 265 { 266 struct rtqk_arg *ap = rock; 267 struct rtentry *rt = (struct rtentry *)rn; 268 int err; 269 270 if (rt->rt_flags & RTPRF_EXPIRING) { 271 ap->found++; 272 if (ap->draining || rt->rt_rmx.rmx_expire <= time_second) { 273 if (rt->rt_refcnt > 0) 274 panic("rtqkill route really not free"); 275 276 err = rtrequest(RTM_DELETE, rt_key(rt), rt->rt_gateway, 277 rt_mask(rt), rt->rt_flags, NULL); 278 if (err) 279 log(LOG_WARNING, "in_rtqkill: error %d\n", err); 280 else 281 ap->killed++; 282 } else { 283 if (ap->updating && 284 (rt->rt_rmx.rmx_expire - time_second > 285 rtq_reallyold)) { 286 rt->rt_rmx.rmx_expire = time_second + 287 rtq_reallyold; 288 } 289 ap->nextstop = lmin(ap->nextstop, 290 rt->rt_rmx.rmx_expire); 291 } 292 } 293 294 return 0; 295 } 296 297 #define RTQ_TIMEOUT 60*10 /* run no less than once every ten minutes */ 298 static int rtq_timeout = RTQ_TIMEOUT; 299 300 static void 301 in_rtqtimo(void *rock) 302 { 303 struct radix_node_head *rnh = rock; 304 struct rtqk_arg arg; 305 struct timeval atv; 306 static time_t last_adjusted_timeout = 0; 307 308 arg.found = arg.killed = 0; 309 arg.rnh = rnh; 310 arg.nextstop = time_second + rtq_timeout; 311 arg.draining = arg.updating = 0; 312 crit_enter(); 313 rnh->rnh_walktree(rnh, in_rtqkill, &arg); 314 crit_exit(); 315 316 /* 317 * Attempt to be somewhat dynamic about this: 318 * If there are ``too many'' routes sitting around taking up space, 319 * then crank down the timeout, and see if we can't make some more 320 * go away. However, we make sure that we will never adjust more 321 * than once in rtq_timeout seconds, to keep from cranking down too 322 * hard. 323 */ 324 if ((arg.found - arg.killed > rtq_toomany) && 325 (time_second - last_adjusted_timeout >= rtq_timeout) && 326 rtq_reallyold > rtq_minreallyold) { 327 rtq_reallyold = 2*rtq_reallyold / 3; 328 if (rtq_reallyold < rtq_minreallyold) { 329 rtq_reallyold = rtq_minreallyold; 330 } 331 332 last_adjusted_timeout = time_second; 333 #ifdef DIAGNOSTIC 334 log(LOG_DEBUG, "in_rtqtimo: adjusted rtq_reallyold to %d\n", 335 rtq_reallyold); 336 #endif 337 arg.found = arg.killed = 0; 338 arg.updating = 1; 339 crit_enter(); 340 rnh->rnh_walktree(rnh, in_rtqkill, &arg); 341 crit_exit(); 342 } 343 344 atv.tv_usec = 0; 345 atv.tv_sec = arg.nextstop - time_second; 346 callout_reset(&in_rtqtimo_ch[mycpuid], tvtohz_high(&atv), in_rtqtimo, 347 rock); 348 } 349 350 void 351 in_rtqdrain(void) 352 { 353 struct radix_node_head *rnh = rt_tables[mycpuid][AF_INET]; 354 struct rtqk_arg arg; 355 356 arg.found = arg.killed = 0; 357 arg.rnh = rnh; 358 arg.nextstop = 0; 359 arg.draining = 1; 360 arg.updating = 0; 361 crit_enter(); 362 rnh->rnh_walktree(rnh, in_rtqkill, &arg); 363 crit_exit(); 364 } 365 366 /* 367 * Initialize our routing tree. 368 */ 369 int 370 in_inithead(void **head, int off) 371 { 372 struct radix_node_head *rnh; 373 374 if (!rn_inithead(head, rn_cpumaskhead(mycpuid), off)) 375 return 0; 376 377 if (head != (void **)&rt_tables[mycpuid][AF_INET]) /* BOGUS! */ 378 return 1; /* only do this for the real routing table */ 379 380 rnh = *head; 381 rnh->rnh_addaddr = in_addroute; 382 rnh->rnh_matchaddr = in_matchroute; 383 rnh->rnh_close = in_closeroute; 384 callout_init(&in_rtqtimo_ch[mycpuid]); 385 in_rtqtimo(rnh); /* kick off timeout first time */ 386 return 1; 387 } 388 389 /* 390 * This zaps old routes when the interface goes down or interface 391 * address is deleted. In the latter case, it deletes static routes 392 * that point to this address. If we don't do this, we may end up 393 * using the old address in the future. The ones we always want to 394 * get rid of are things like ARP entries, since the user might down 395 * the interface, walk over to a completely different network, and 396 * plug back in. 397 * 398 * in_ifadown() is typically called when an interface is being brought 399 * down. We must iterate through all per-cpu route tables and clean 400 * them up. 401 */ 402 struct in_ifadown_arg { 403 struct radix_node_head *rnh; 404 struct ifaddr *ifa; 405 int del; 406 }; 407 408 static int 409 in_ifadownkill(struct radix_node *rn, void *xap) 410 { 411 struct in_ifadown_arg *ap = xap; 412 struct rtentry *rt = (struct rtentry *)rn; 413 int err; 414 415 if (rt->rt_ifa == ap->ifa && 416 (ap->del || !(rt->rt_flags & RTF_STATIC))) { 417 /* 418 * We need to disable the automatic prune that happens 419 * in this case in rtrequest() because it will blow 420 * away the pointers that rn_walktree() needs in order 421 * continue our descent. We will end up deleting all 422 * the routes that rtrequest() would have in any case, 423 * so that behavior is not needed there. 424 */ 425 rt->rt_flags &= ~(RTF_CLONING | RTF_PRCLONING); 426 err = rtrequest(RTM_DELETE, rt_key(rt), rt->rt_gateway, 427 rt_mask(rt), rt->rt_flags, NULL); 428 if (err) 429 log(LOG_WARNING, "in_ifadownkill: error %d\n", err); 430 } 431 return 0; 432 } 433 434 struct netmsg_ifadown { 435 struct netmsg_base base; 436 struct ifaddr *ifa; 437 int del; 438 }; 439 440 static void 441 in_ifadown_dispatch(netmsg_t msg) 442 { 443 struct netmsg_ifadown *rmsg = (void *)msg; 444 struct radix_node_head *rnh; 445 struct ifaddr *ifa = rmsg->ifa; 446 struct in_ifadown_arg arg; 447 int nextcpu, cpu; 448 449 cpu = mycpuid; 450 451 arg.rnh = rnh = rt_tables[cpu][AF_INET]; 452 arg.ifa = ifa; 453 arg.del = rmsg->del; 454 rnh->rnh_walktree(rnh, in_ifadownkill, &arg); 455 ifa->ifa_flags &= ~IFA_ROUTE; 456 457 nextcpu = cpu + 1; 458 if (nextcpu < ncpus) 459 lwkt_forwardmsg(rtable_portfn(nextcpu), &rmsg->base.lmsg); 460 else 461 lwkt_replymsg(&rmsg->base.lmsg, 0); 462 } 463 464 int 465 in_ifadown_force(struct ifaddr *ifa, int delete) 466 { 467 struct netmsg_ifadown msg; 468 469 if (ifa->ifa_addr->sa_family != AF_INET) 470 return 1; 471 472 /* 473 * XXX individual requests are not independantly chained, 474 * which means that the per-cpu route tables will not be 475 * consistent in the middle of the operation. If routes 476 * related to the interface are manipulated while we are 477 * doing this the inconsistancy could trigger a panic. 478 */ 479 netmsg_init(&msg.base, NULL, &curthread->td_msgport, 0, 480 in_ifadown_dispatch); 481 msg.ifa = ifa; 482 msg.del = delete; 483 KASSERT(&curthread->td_msgport != rtable_portfn(0), 484 ("in_ifadown in rtable thread")); 485 lwkt_domsg(rtable_portfn(0), &msg.base.lmsg, 0); 486 487 return 0; 488 } 489 490 int 491 in_ifadown(struct ifaddr *ifa, int delete) 492 { 493 #ifdef CARP 494 if (ifa->ifa_ifp->if_type == IFT_CARP) 495 return 0; 496 #endif 497 return in_ifadown_force(ifa, delete); 498 } 499