1 /* 2 * Copyright 1994, 1995 Massachusetts Institute of Technology 3 * 4 * Permission to use, copy, modify, and distribute this software and 5 * its documentation for any purpose and without fee is hereby 6 * granted, provided that both the above copyright notice and this 7 * permission notice appear in all copies, that both the above 8 * copyright notice and this permission notice appear in all 9 * supporting documentation, and that the name of M.I.T. not be used 10 * in advertising or publicity pertaining to distribution of the 11 * software without specific, written prior permission. M.I.T. makes 12 * no representations about the suitability of this software for any 13 * purpose. It is provided "as is" without express or implied 14 * warranty. 15 * 16 * THIS SOFTWARE IS PROVIDED BY M.I.T. ``AS IS''. M.I.T. DISCLAIMS 17 * ALL EXPRESS OR IMPLIED WARRANTIES WITH REGARD TO THIS SOFTWARE, 18 * INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF 19 * MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE. IN NO EVENT 20 * SHALL M.I.T. BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, 21 * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT 22 * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF 23 * USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND 24 * ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, 25 * OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT 26 * OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF 27 * SUCH DAMAGE. 28 * 29 * $FreeBSD: src/sys/netinet/in_rmx.c,v 1.37.2.3 2002/08/09 14:49:23 ru Exp $ 30 * $DragonFly: src/sys/netinet/in_rmx.c,v 1.14 2006/04/11 06:59:34 dillon Exp $ 31 */ 32 33 /* 34 * This code does two things necessary for the enhanced TCP metrics to 35 * function in a useful manner: 36 * 1) It marks all non-host routes as `cloning', thus ensuring that 37 * every actual reference to such a route actually gets turned 38 * into a reference to a host route to the specific destination 39 * requested. 40 * 2) When such routes lose all their references, it arranges for them 41 * to be deleted in some random collection of circumstances, so that 42 * a large quantity of stale routing data is not kept in kernel memory 43 * indefinitely. See in_rtqtimo() below for the exact mechanism. 44 */ 45 46 #include "opt_carp.h" 47 48 #include <sys/param.h> 49 #include <sys/systm.h> 50 #include <sys/kernel.h> 51 #include <sys/sysctl.h> 52 #include <sys/socket.h> 53 #include <sys/mbuf.h> 54 #include <sys/syslog.h> 55 #include <sys/globaldata.h> 56 #include <sys/thread2.h> 57 58 #include <net/if.h> 59 #include <net/route.h> 60 #include <net/if_var.h> 61 #ifdef CARP 62 #include <net/if_types.h> 63 #endif 64 #include <net/netmsg2.h> 65 #include <net/netisr2.h> 66 #include <netinet/in.h> 67 #include <netinet/in_var.h> 68 #include <netinet/ip_var.h> 69 #include <netinet/ip_flow.h> 70 71 #define RTPRF_EXPIRING RTF_PROTO3 /* set on routes we manage */ 72 73 struct in_rtq_pcpu { 74 struct radix_node_head *rnh; 75 76 struct callout timo_ch; 77 struct netmsg_base timo_nmsg; 78 79 time_t lastdrain; 80 int draining; 81 struct netmsg_base drain_nmsg; 82 } __cachealign; 83 84 static void in_rtqtimo(void *); 85 86 static struct in_rtq_pcpu in_rtq_pcpu[MAXCPU]; 87 88 /* 89 * Do what we need to do when inserting a route. 90 */ 91 static struct radix_node * 92 in_addroute(const void *key, const void *mask, struct radix_node_head *head, 93 struct radix_node *nodes) 94 { 95 struct rtentry *rt = (struct rtentry *)nodes; 96 struct sockaddr_in *sin = (struct sockaddr_in *)rt_key(rt); 97 struct radix_node *ret; 98 struct in_ifaddr_container *iac; 99 struct in_ifaddr *ia; 100 101 /* 102 * For IP, mark routes to multicast addresses as such, because 103 * it's easy to do and might be useful (but this is much more 104 * dubious since it's so easy to inspect the address). 105 * 106 * For IP, all unicast non-host routes are automatically cloning. 107 */ 108 if (IN_MULTICAST(ntohl(sin->sin_addr.s_addr))) 109 rt->rt_flags |= RTF_MULTICAST; 110 111 if (!(rt->rt_flags & (RTF_HOST | RTF_CLONING | RTF_MULTICAST))) 112 rt->rt_flags |= RTF_PRCLONING; 113 114 /* 115 * Try to set RTF_BROADCAST or RTF_LOCAL for a host route. 116 * 117 * Skip this process if a host route already has RTF_LOCAL set, 118 * for example by ifa_maintain_loopback_route(). 119 * 120 * For host routes, we make sure that RTF_BROADCAST is set for 121 * anything that looks like a broadcast address. This way, we can 122 * avoid an expensive call to in_broadcast() in ip_output() most of 123 * the time (because the route passed to ip_output() is almost always 124 * a host route). 125 * 126 * For local routes, we set RTF_LOCAL to allow various shortcuts. 127 * 128 * A cloned network route will point to one of several possible 129 * addresses if an interface has aliases and must be repointed back to 130 * the correct address or arp_rtrequest() will not properly detect the 131 * local IP. 132 */ 133 if ((rt->rt_flags & (RTF_HOST | RTF_LOCAL)) == RTF_HOST) { 134 if (in_broadcast(sin->sin_addr, rt->rt_ifp)) { 135 rt->rt_flags |= RTF_BROADCAST; 136 } else if (satosin(rt->rt_ifa->ifa_addr)->sin_addr.s_addr == 137 sin->sin_addr.s_addr) { 138 rt->rt_flags |= RTF_LOCAL; 139 } else { 140 LIST_FOREACH(iac, INADDR_HASH(sin->sin_addr.s_addr), 141 ia_hash) { 142 ia = iac->ia; 143 if (sin->sin_addr.s_addr == 144 ia->ia_addr.sin_addr.s_addr) { 145 rt->rt_flags |= RTF_LOCAL; 146 IFAREF(&ia->ia_ifa); 147 IFAFREE(rt->rt_ifa); 148 rt->rt_ifa = &ia->ia_ifa; 149 rt->rt_ifp = rt->rt_ifa->ifa_ifp; 150 break; 151 } 152 } 153 } 154 } 155 156 if (rt->rt_rmx.rmx_mtu == 0 && !(rt->rt_rmx.rmx_locks & RTV_MTU) && 157 rt->rt_ifp != NULL) 158 rt->rt_rmx.rmx_mtu = rt->rt_ifp->if_mtu; 159 160 ret = rn_addroute(key, mask, head, nodes); 161 if (ret == NULL && (rt->rt_flags & RTF_HOST)) { 162 struct rtentry *oldrt; 163 164 /* 165 * We are trying to add a host route, but can't. 166 * Find out if it is because of an ARP entry and 167 * delete it if so. 168 */ 169 oldrt = rtpurelookup((struct sockaddr *)sin); 170 if (oldrt != NULL) { 171 --oldrt->rt_refcnt; 172 if ((oldrt->rt_flags & RTF_LLINFO) && 173 (oldrt->rt_flags & RTF_HOST) && 174 oldrt->rt_gateway && 175 oldrt->rt_gateway->sa_family == AF_LINK) { 176 rtrequest(RTM_DELETE, rt_key(oldrt), 177 oldrt->rt_gateway, rt_mask(oldrt), 178 oldrt->rt_flags, NULL); 179 ret = rn_addroute(key, mask, head, nodes); 180 } 181 } 182 } 183 184 /* 185 * If the new route has been created successfully, and it is 186 * not a multicast/broadcast or cloned route, then we will 187 * have to flush the ipflow. Otherwise, we may end up using 188 * the wrong route. 189 */ 190 if (ret != NULL && 191 (rt->rt_flags & 192 (RTF_MULTICAST | RTF_BROADCAST | RTF_WASCLONED)) == 0) 193 ipflow_flush_oncpu(); 194 return ret; 195 } 196 197 /* 198 * This code is the inverse of in_closeroute: on first reference, if we 199 * were managing the route, stop doing so and set the expiration timer 200 * back off again. 201 */ 202 static struct radix_node * 203 in_matchroute(const void *key, struct radix_node_head *head) 204 { 205 struct radix_node *rn = rn_match(key, head); 206 struct rtentry *rt = (struct rtentry *)rn; 207 208 if (rt != NULL && rt->rt_refcnt == 0) { /* this is first reference */ 209 if (rt->rt_flags & RTPRF_EXPIRING) { 210 rt->rt_flags &= ~RTPRF_EXPIRING; 211 rt->rt_rmx.rmx_expire = 0; 212 } 213 } 214 return rn; 215 } 216 217 static int rtq_reallyold = 60*60; /* one hour is ``really old'' */ 218 SYSCTL_INT(_net_inet_ip, IPCTL_RTEXPIRE, rtexpire, CTLFLAG_RW, 219 &rtq_reallyold , 0, 220 "Default expiration time on cloned routes"); 221 222 static int rtq_minreallyold = 10; /* never automatically crank down to less */ 223 SYSCTL_INT(_net_inet_ip, IPCTL_RTMINEXPIRE, rtminexpire, CTLFLAG_RW, 224 &rtq_minreallyold , 0, 225 "Minimum time to attempt to hold onto cloned routes"); 226 227 static int rtq_toomany = 128; /* 128 cached routes is ``too many'' */ 228 SYSCTL_INT(_net_inet_ip, IPCTL_RTMAXCACHE, rtmaxcache, CTLFLAG_RW, 229 &rtq_toomany , 0, "Upper limit on cloned routes"); 230 231 /* 232 * On last reference drop, mark the route as belong to us so that it can be 233 * timed out. 234 */ 235 static void 236 in_closeroute(struct radix_node *rn, struct radix_node_head *head) 237 { 238 struct rtentry *rt = (struct rtentry *)rn; 239 240 if (!(rt->rt_flags & RTF_UP)) 241 return; /* prophylactic measures */ 242 243 if ((rt->rt_flags & (RTF_LLINFO | RTF_HOST)) != RTF_HOST) 244 return; 245 246 if ((rt->rt_flags & (RTF_WASCLONED | RTPRF_EXPIRING)) != RTF_WASCLONED) 247 return; 248 249 /* 250 * As requested by David Greenman: 251 * If rtq_reallyold is 0, just delete the route without 252 * waiting for a timeout cycle to kill it. 253 */ 254 if (rtq_reallyold != 0) { 255 rt->rt_flags |= RTPRF_EXPIRING; 256 rt->rt_rmx.rmx_expire = time_uptime + rtq_reallyold; 257 } else { 258 /* 259 * Remove route from the radix tree, but defer deallocation 260 * until we return to rtfree(). 261 */ 262 rtrequest(RTM_DELETE, rt_key(rt), rt->rt_gateway, rt_mask(rt), 263 rt->rt_flags, &rt); 264 } 265 } 266 267 struct rtqk_arg { 268 struct radix_node_head *rnh; 269 int draining; 270 int killed; 271 int found; 272 int updating; 273 time_t nextstop; 274 }; 275 276 /* 277 * Get rid of old routes. When draining, this deletes everything, even when 278 * the timeout is not expired yet. When updating, this makes sure that 279 * nothing has a timeout longer than the current value of rtq_reallyold. 280 */ 281 static int 282 in_rtqkill(struct radix_node *rn, void *rock) 283 { 284 struct rtqk_arg *ap = rock; 285 struct rtentry *rt = (struct rtentry *)rn; 286 int err; 287 288 if (rt->rt_flags & RTPRF_EXPIRING) { 289 ap->found++; 290 if (ap->draining || rt->rt_rmx.rmx_expire <= time_uptime) { 291 if (rt->rt_refcnt > 0) 292 panic("rtqkill route really not free"); 293 294 err = rtrequest(RTM_DELETE, rt_key(rt), rt->rt_gateway, 295 rt_mask(rt), rt->rt_flags, NULL); 296 if (err) 297 log(LOG_WARNING, "in_rtqkill: error %d\n", err); 298 else 299 ap->killed++; 300 } else { 301 if (ap->updating && 302 (int)(rt->rt_rmx.rmx_expire - time_uptime) > 303 rtq_reallyold) { 304 rt->rt_rmx.rmx_expire = time_uptime + 305 rtq_reallyold; 306 } 307 ap->nextstop = lmin(ap->nextstop, 308 rt->rt_rmx.rmx_expire); 309 } 310 } 311 312 return 0; 313 } 314 315 #define RTQ_TIMEOUT 60*10 /* run no less than once every ten minutes */ 316 static int rtq_timeout = RTQ_TIMEOUT; 317 318 /* 319 * NOTE: 320 * 'last_adjusted_timeout' and 'rtq_reallyold' are _not_ read-only, and 321 * could be changed by all CPUs. However, they are changed at so low 322 * frequency that we could ignore the cache trashing issue and take them 323 * as read-mostly. 324 */ 325 static void 326 in_rtqtimo_dispatch(netmsg_t nmsg) 327 { 328 struct rtqk_arg arg; 329 struct timeval atv; 330 static time_t last_adjusted_timeout = 0; 331 struct in_rtq_pcpu *pcpu = &in_rtq_pcpu[mycpuid]; 332 struct radix_node_head *rnh = pcpu->rnh; 333 334 ASSERT_NETISR_NCPUS(mycpuid); 335 336 /* Reply ASAP */ 337 crit_enter(); 338 lwkt_replymsg(&nmsg->lmsg, 0); 339 crit_exit(); 340 341 arg.found = arg.killed = 0; 342 arg.rnh = rnh; 343 arg.nextstop = time_uptime + rtq_timeout; 344 arg.draining = arg.updating = 0; 345 rnh->rnh_walktree(rnh, in_rtqkill, &arg); 346 347 /* 348 * Attempt to be somewhat dynamic about this: 349 * If there are ``too many'' routes sitting around taking up space, 350 * then crank down the timeout, and see if we can't make some more 351 * go away. However, we make sure that we will never adjust more 352 * than once in rtq_timeout seconds, to keep from cranking down too 353 * hard. 354 */ 355 if ((arg.found - arg.killed > rtq_toomany) && 356 (int)(time_uptime - last_adjusted_timeout) >= rtq_timeout && 357 rtq_reallyold > rtq_minreallyold) { 358 rtq_reallyold = 2*rtq_reallyold / 3; 359 if (rtq_reallyold < rtq_minreallyold) { 360 rtq_reallyold = rtq_minreallyold; 361 } 362 363 last_adjusted_timeout = time_uptime; 364 #ifdef DIAGNOSTIC 365 log(LOG_DEBUG, "in_rtqtimo: adjusted rtq_reallyold to %d\n", 366 rtq_reallyold); 367 #endif 368 arg.found = arg.killed = 0; 369 arg.updating = 1; 370 rnh->rnh_walktree(rnh, in_rtqkill, &arg); 371 } 372 373 atv.tv_usec = 0; 374 atv.tv_sec = arg.nextstop - time_uptime; 375 if ((int)atv.tv_sec < 1) { /* time shift safety */ 376 atv.tv_sec = 1; 377 arg.nextstop = time_uptime + atv.tv_sec; 378 } 379 if ((int)atv.tv_sec > rtq_timeout) { /* time shift safety */ 380 atv.tv_sec = rtq_timeout; 381 arg.nextstop = time_uptime + atv.tv_sec; 382 } 383 callout_reset(&pcpu->timo_ch, tvtohz_high(&atv), in_rtqtimo, NULL); 384 } 385 386 static void 387 in_rtqtimo(void *arg __unused) 388 { 389 int cpuid = mycpuid; 390 struct lwkt_msg *lmsg = &in_rtq_pcpu[cpuid].timo_nmsg.lmsg; 391 392 crit_enter(); 393 if (lmsg->ms_flags & MSGF_DONE) 394 lwkt_sendmsg_oncpu(netisr_cpuport(cpuid), lmsg); 395 crit_exit(); 396 } 397 398 static void 399 in_rtqdrain_oncpu(struct in_rtq_pcpu *pcpu) 400 { 401 struct radix_node_head *rnh = rt_tables[mycpuid][AF_INET]; 402 struct rtqk_arg arg; 403 404 ASSERT_NETISR_NCPUS(mycpuid); 405 406 arg.found = arg.killed = 0; 407 arg.rnh = rnh; 408 arg.nextstop = 0; 409 arg.draining = 1; 410 arg.updating = 0; 411 rnh->rnh_walktree(rnh, in_rtqkill, &arg); 412 413 pcpu->lastdrain = time_uptime; 414 } 415 416 static void 417 in_rtqdrain_dispatch(netmsg_t nmsg) 418 { 419 struct in_rtq_pcpu *pcpu = &in_rtq_pcpu[mycpuid]; 420 421 /* Reply ASAP */ 422 crit_enter(); 423 lwkt_replymsg(&nmsg->lmsg, 0); 424 crit_exit(); 425 426 in_rtqdrain_oncpu(pcpu); 427 pcpu->draining = 0; 428 } 429 430 static void 431 in_rtqdrain_ipi(void *arg __unused) 432 { 433 int cpu = mycpuid; 434 struct lwkt_msg *msg = &in_rtq_pcpu[cpu].drain_nmsg.lmsg; 435 436 crit_enter(); 437 if (msg->ms_flags & MSGF_DONE) 438 lwkt_sendmsg_oncpu(netisr_cpuport(cpu), msg); 439 crit_exit(); 440 } 441 442 void 443 in_rtqdrain(void) 444 { 445 cpumask_t mask; 446 int cpu; 447 448 CPUMASK_ASSBMASK(mask, netisr_ncpus); 449 CPUMASK_ANDMASK(mask, smp_active_mask); 450 451 cpu = mycpuid; 452 if (IN_NETISR_NCPUS(cpu)) { 453 in_rtqdrain_oncpu(&in_rtq_pcpu[cpu]); 454 CPUMASK_NANDBIT(mask, cpu); 455 } 456 457 for (cpu = 0; cpu < netisr_ncpus; ++cpu) { 458 struct in_rtq_pcpu *pcpu = &in_rtq_pcpu[cpu]; 459 460 if (!CPUMASK_TESTBIT(mask, cpu)) 461 continue; 462 463 if (pcpu->draining || pcpu->lastdrain == time_uptime) { 464 /* Just drained or is draining; skip this cpu. */ 465 CPUMASK_NANDBIT(mask, cpu); 466 continue; 467 } 468 pcpu->draining = 1; 469 } 470 471 if (CPUMASK_TESTNZERO(mask)) 472 lwkt_send_ipiq_mask(mask, in_rtqdrain_ipi, NULL); 473 } 474 475 /* 476 * Initialize our routing tree. 477 */ 478 int 479 in_inithead(void **head, int off) 480 { 481 struct radix_node_head *rnh; 482 struct in_rtq_pcpu *pcpu; 483 int cpuid = mycpuid; 484 485 rnh = *head; 486 KKASSERT(rnh == rt_tables[cpuid][AF_INET]); 487 488 if (!rn_inithead(&rnh, rn_cpumaskhead(cpuid), off)) 489 return 0; 490 491 *head = rnh; 492 rnh->rnh_addaddr = in_addroute; 493 rnh->rnh_matchaddr = in_matchroute; 494 rnh->rnh_close = in_closeroute; 495 496 pcpu = &in_rtq_pcpu[cpuid]; 497 pcpu->rnh = rnh; 498 callout_init_mp(&pcpu->timo_ch); 499 netmsg_init(&pcpu->timo_nmsg, NULL, &netisr_adone_rport, MSGF_PRIORITY, 500 in_rtqtimo_dispatch); 501 netmsg_init(&pcpu->drain_nmsg, NULL, &netisr_adone_rport, MSGF_PRIORITY, 502 in_rtqdrain_dispatch); 503 504 in_rtqtimo(NULL); /* kick off timeout first time */ 505 return 1; 506 } 507 508 /* 509 * This zaps old routes when the interface goes down or interface 510 * address is deleted. In the latter case, it deletes static routes 511 * that point to this address. If we don't do this, we may end up 512 * using the old address in the future. The ones we always want to 513 * get rid of are things like ARP entries, since the user might down 514 * the interface, walk over to a completely different network, and 515 * plug back in. 516 * 517 * in_ifadown() is typically called when an interface is being brought 518 * down. We must iterate through all per-cpu route tables and clean 519 * them up. 520 */ 521 struct in_ifadown_arg { 522 struct radix_node_head *rnh; 523 struct ifaddr *ifa; 524 int del; 525 }; 526 527 static int 528 in_ifadownkill(struct radix_node *rn, void *xap) 529 { 530 struct in_ifadown_arg *ap = xap; 531 struct rtentry *rt = (struct rtentry *)rn; 532 int err; 533 534 if (rt->rt_ifa == ap->ifa && 535 (ap->del || !(rt->rt_flags & RTF_STATIC))) { 536 /* 537 * We need to disable the automatic prune that happens 538 * in this case in rtrequest() because it will blow 539 * away the pointers that rn_walktree() needs in order 540 * continue our descent. We will end up deleting all 541 * the routes that rtrequest() would have in any case, 542 * so that behavior is not needed there. 543 */ 544 rt->rt_flags &= ~(RTF_CLONING | RTF_PRCLONING); 545 err = rtrequest(RTM_DELETE, rt_key(rt), rt->rt_gateway, 546 rt_mask(rt), rt->rt_flags, NULL); 547 if (err) 548 log(LOG_WARNING, "in_ifadownkill: error %d\n", err); 549 } 550 return 0; 551 } 552 553 struct netmsg_ifadown { 554 struct netmsg_base base; 555 struct ifaddr *ifa; 556 int del; 557 }; 558 559 static void 560 in_ifadown_dispatch(netmsg_t msg) 561 { 562 struct netmsg_ifadown *rmsg = (void *)msg; 563 struct radix_node_head *rnh; 564 struct ifaddr *ifa = rmsg->ifa; 565 struct in_ifadown_arg arg; 566 int cpu; 567 568 cpu = mycpuid; 569 ASSERT_NETISR_NCPUS(cpu); 570 571 arg.rnh = rnh = rt_tables[cpu][AF_INET]; 572 arg.ifa = ifa; 573 arg.del = rmsg->del; 574 rnh->rnh_walktree(rnh, in_ifadownkill, &arg); 575 ifa->ifa_flags &= ~IFA_ROUTE; 576 577 netisr_forwardmsg(&msg->base, cpu + 1); 578 } 579 580 int 581 in_ifadown_force(struct ifaddr *ifa, int delete) 582 { 583 struct netmsg_ifadown msg; 584 585 if (ifa->ifa_addr->sa_family != AF_INET) 586 return 1; 587 588 /* 589 * XXX individual requests are not independantly chained, 590 * which means that the per-cpu route tables will not be 591 * consistent in the middle of the operation. If routes 592 * related to the interface are manipulated while we are 593 * doing this the inconsistancy could trigger a panic. 594 */ 595 netmsg_init(&msg.base, NULL, &curthread->td_msgport, MSGF_PRIORITY, 596 in_ifadown_dispatch); 597 msg.ifa = ifa; 598 msg.del = delete; 599 netisr_domsg_global(&msg.base); 600 601 return 0; 602 } 603 604 int 605 in_ifadown(struct ifaddr *ifa, int delete) 606 { 607 #ifdef CARP 608 if (ifa->ifa_ifp->if_type == IFT_CARP) 609 return 0; 610 #endif 611 return in_ifadown_force(ifa, delete); 612 } 613