1 /* $NetBSD: ip_flow.c,v 1.60 2012/01/19 13:13:48 liamjfoy Exp $ */ 2 3 /*- 4 * Copyright (c) 1998 The NetBSD Foundation, Inc. 5 * All rights reserved. 6 * 7 * This code is derived from software contributed to The NetBSD Foundation 8 * by the 3am Software Foundry ("3am"). It was developed by Matt Thomas. 9 * 10 * Redistribution and use in source and binary forms, with or without 11 * modification, are permitted provided that the following conditions 12 * are met: 13 * 1. Redistributions of source code must retain the above copyright 14 * notice, this list of conditions and the following disclaimer. 15 * 2. Redistributions in binary form must reproduce the above copyright 16 * notice, this list of conditions and the following disclaimer in the 17 * documentation and/or other materials provided with the distribution. 18 * 19 * THIS SOFTWARE IS PROVIDED BY THE NETBSD FOUNDATION, INC. AND CONTRIBUTORS 20 * ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED 21 * TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR 22 * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE FOUNDATION OR CONTRIBUTORS 23 * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR 24 * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF 25 * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS 26 * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN 27 * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) 28 * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE 29 * POSSIBILITY OF SUCH DAMAGE. 30 */ 31 32 #include <sys/cdefs.h> 33 __KERNEL_RCSID(0, "$NetBSD: ip_flow.c,v 1.60 2012/01/19 13:13:48 liamjfoy Exp $"); 34 35 #include <sys/param.h> 36 #include <sys/systm.h> 37 #include <sys/malloc.h> 38 #include <sys/mbuf.h> 39 #include <sys/domain.h> 40 #include <sys/protosw.h> 41 #include <sys/socket.h> 42 #include <sys/socketvar.h> 43 #include <sys/errno.h> 44 #include <sys/time.h> 45 #include <sys/kernel.h> 46 #include <sys/pool.h> 47 #include <sys/sysctl.h> 48 49 #include <net/if.h> 50 #include <net/if_dl.h> 51 #include <net/route.h> 52 #include <net/pfil.h> 53 54 #include <netinet/in.h> 55 #include <netinet/in_systm.h> 56 #include <netinet/ip.h> 57 #include <netinet/in_pcb.h> 58 #include <netinet/in_var.h> 59 #include <netinet/ip_var.h> 60 #include <netinet/ip_private.h> 61 62 /* 63 * Similar code is very well commented in netinet6/ip6_flow.c 64 */ 65 66 struct ipflow { 67 LIST_ENTRY(ipflow) ipf_list; /* next in active list */ 68 LIST_ENTRY(ipflow) ipf_hash; /* next ipflow in bucket */ 69 struct in_addr ipf_dst; /* destination address */ 70 struct in_addr ipf_src; /* source address */ 71 uint8_t ipf_tos; /* type-of-service */ 72 struct route ipf_ro; /* associated route entry */ 73 u_long ipf_uses; /* number of uses in this period */ 74 u_long ipf_last_uses; /* number of uses in last period */ 75 u_long ipf_dropped; /* ENOBUFS retured by if_output */ 76 u_long ipf_errors; /* other errors returned by if_output */ 77 u_int ipf_timer; /* lifetime timer */ 78 }; 79 80 #define IPFLOW_HASHBITS 6 /* should not be a multiple of 8 */ 81 82 static struct pool ipflow_pool; 83 84 LIST_HEAD(ipflowhead, ipflow); 85 86 #define IPFLOW_TIMER (5 * PR_SLOWHZ) 87 #define IPFLOW_DEFAULT_HASHSIZE (1 << IPFLOW_HASHBITS) 88 89 static struct ipflowhead *ipflowtable = NULL; 90 static struct ipflowhead ipflowlist; 91 static int ipflow_inuse; 92 93 #define IPFLOW_INSERT(bucket, ipf) \ 94 do { \ 95 LIST_INSERT_HEAD((bucket), (ipf), ipf_hash); \ 96 LIST_INSERT_HEAD(&ipflowlist, (ipf), ipf_list); \ 97 } while (/*CONSTCOND*/ 0) 98 99 #define IPFLOW_REMOVE(ipf) \ 100 do { \ 101 LIST_REMOVE((ipf), ipf_hash); \ 102 LIST_REMOVE((ipf), ipf_list); \ 103 } while (/*CONSTCOND*/ 0) 104 105 #ifndef IPFLOW_MAX 106 #define IPFLOW_MAX 256 107 #endif 108 int ip_maxflows = IPFLOW_MAX; 109 int ip_hashsize = IPFLOW_DEFAULT_HASHSIZE; 110 111 static size_t 112 ipflow_hash(const struct ip *ip) 113 { 114 size_t hash = ip->ip_tos; 115 size_t idx; 116 117 for (idx = 0; idx < 32; idx += IPFLOW_HASHBITS) { 118 hash += (ip->ip_dst.s_addr >> (32 - idx)) + 119 (ip->ip_src.s_addr >> idx); 120 } 121 122 return hash & (ip_hashsize-1); 123 } 124 125 static struct ipflow * 126 ipflow_lookup(const struct ip *ip) 127 { 128 size_t hash; 129 struct ipflow *ipf; 130 131 hash = ipflow_hash(ip); 132 133 LIST_FOREACH(ipf, &ipflowtable[hash], ipf_hash) { 134 if (ip->ip_dst.s_addr == ipf->ipf_dst.s_addr 135 && ip->ip_src.s_addr == ipf->ipf_src.s_addr 136 && ip->ip_tos == ipf->ipf_tos) 137 break; 138 } 139 return ipf; 140 } 141 142 void 143 ipflow_poolinit(void) 144 { 145 146 pool_init(&ipflow_pool, sizeof(struct ipflow), 0, 0, 0, "ipflowpl", 147 NULL, IPL_NET); 148 } 149 150 int 151 ipflow_init(int table_size) 152 { 153 struct ipflowhead *new_table; 154 size_t i; 155 156 new_table = (struct ipflowhead *)malloc(sizeof(struct ipflowhead) * 157 table_size, M_RTABLE, M_NOWAIT); 158 159 if (new_table == NULL) 160 return 1; 161 162 if (ipflowtable != NULL) 163 free(ipflowtable, M_RTABLE); 164 165 ipflowtable = new_table; 166 ip_hashsize = table_size; 167 168 LIST_INIT(&ipflowlist); 169 for (i = 0; i < ip_hashsize; i++) 170 LIST_INIT(&ipflowtable[i]); 171 172 return 0; 173 } 174 175 int 176 ipflow_fastforward(struct mbuf *m) 177 { 178 struct ip *ip; 179 struct ip ip_store; 180 struct ipflow *ipf; 181 struct rtentry *rt; 182 const struct sockaddr *dst; 183 int error; 184 int iplen; 185 186 /* 187 * Are we forwarding packets? Big enough for an IP packet? 188 */ 189 if (!ipforwarding || ipflow_inuse == 0 || m->m_len < sizeof(struct ip)) 190 return 0; 191 192 /* 193 * Was packet received as a link-level multicast or broadcast? 194 * If so, don't try to fast forward.. 195 */ 196 if ((m->m_flags & (M_BCAST|M_MCAST)) != 0) 197 return 0; 198 199 /* 200 * IP header with no option and valid version and length 201 */ 202 if (IP_HDR_ALIGNED_P(mtod(m, const void *))) 203 ip = mtod(m, struct ip *); 204 else { 205 memcpy(&ip_store, mtod(m, const void *), sizeof(ip_store)); 206 ip = &ip_store; 207 } 208 iplen = ntohs(ip->ip_len); 209 if (ip->ip_v != IPVERSION || ip->ip_hl != (sizeof(struct ip) >> 2) || 210 iplen < sizeof(struct ip) || iplen > m->m_pkthdr.len) 211 return 0; 212 /* 213 * Find a flow. 214 */ 215 if ((ipf = ipflow_lookup(ip)) == NULL) 216 return 0; 217 218 /* 219 * Verify the IP header checksum. 220 */ 221 switch (m->m_pkthdr.csum_flags & 222 ((m->m_pkthdr.rcvif->if_csum_flags_rx & M_CSUM_IPv4) | 223 M_CSUM_IPv4_BAD)) { 224 case M_CSUM_IPv4|M_CSUM_IPv4_BAD: 225 return (0); 226 227 case M_CSUM_IPv4: 228 /* Checksum was okay. */ 229 break; 230 231 default: 232 /* Must compute it ourselves. */ 233 if (in_cksum(m, sizeof(struct ip)) != 0) 234 return (0); 235 break; 236 } 237 238 /* 239 * Route and interface still up? 240 */ 241 if ((rt = rtcache_validate(&ipf->ipf_ro)) == NULL || 242 (rt->rt_ifp->if_flags & IFF_UP) == 0) 243 return 0; 244 245 /* 246 * Packet size OK? TTL? 247 */ 248 if (m->m_pkthdr.len > rt->rt_ifp->if_mtu || ip->ip_ttl <= IPTTLDEC) 249 return 0; 250 251 /* 252 * Clear any in-bound checksum flags for this packet. 253 */ 254 m->m_pkthdr.csum_flags = 0; 255 256 /* 257 * Everything checks out and so we can forward this packet. 258 * Modify the TTL and incrementally change the checksum. 259 * 260 * This method of adding the checksum works on either endian CPU. 261 * If htons() is inlined, all the arithmetic is folded; otherwise 262 * the htons()s are combined by CSE due to the const attribute. 263 * 264 * Don't bother using HW checksumming here -- the incremental 265 * update is pretty fast. 266 */ 267 ip->ip_ttl -= IPTTLDEC; 268 if (ip->ip_sum >= (u_int16_t) ~htons(IPTTLDEC << 8)) 269 ip->ip_sum -= ~htons(IPTTLDEC << 8); 270 else 271 ip->ip_sum += htons(IPTTLDEC << 8); 272 273 /* 274 * Done modifying the header; copy it back, if necessary. 275 * 276 * XXX Use m_copyback_cow(9) here? --dyoung 277 */ 278 if (IP_HDR_ALIGNED_P(mtod(m, void *)) == 0) 279 memcpy(mtod(m, void *), &ip_store, sizeof(ip_store)); 280 281 /* 282 * Trim the packet in case it's too long.. 283 */ 284 if (m->m_pkthdr.len > iplen) { 285 if (m->m_len == m->m_pkthdr.len) { 286 m->m_len = iplen; 287 m->m_pkthdr.len = iplen; 288 } else 289 m_adj(m, iplen - m->m_pkthdr.len); 290 } 291 292 /* 293 * Send the packet on it's way. All we can get back is ENOBUFS 294 */ 295 ipf->ipf_uses++; 296 PRT_SLOW_ARM(ipf->ipf_timer, IPFLOW_TIMER); 297 298 if (rt->rt_flags & RTF_GATEWAY) 299 dst = rt->rt_gateway; 300 else 301 dst = rtcache_getdst(&ipf->ipf_ro); 302 303 KERNEL_LOCK(1, NULL); 304 if ((error = (*rt->rt_ifp->if_output)(rt->rt_ifp, m, dst, rt)) != 0) { 305 if (error == ENOBUFS) 306 ipf->ipf_dropped++; 307 else 308 ipf->ipf_errors++; 309 } 310 KERNEL_UNLOCK_ONE(NULL); 311 return 1; 312 } 313 314 static void 315 ipflow_addstats(struct ipflow *ipf) 316 { 317 struct rtentry *rt; 318 uint64_t *ips; 319 320 if ((rt = rtcache_validate(&ipf->ipf_ro)) != NULL) 321 rt->rt_use += ipf->ipf_uses; 322 323 ips = IP_STAT_GETREF(); 324 ips[IP_STAT_CANTFORWARD] += ipf->ipf_errors + ipf->ipf_dropped; 325 ips[IP_STAT_TOTAL] += ipf->ipf_uses; 326 ips[IP_STAT_FORWARD] += ipf->ipf_uses; 327 ips[IP_STAT_FASTFORWARD] += ipf->ipf_uses; 328 IP_STAT_PUTREF(); 329 } 330 331 static void 332 ipflow_free(struct ipflow *ipf) 333 { 334 int s; 335 /* 336 * Remove the flow from the hash table (at elevated IPL). 337 * Once it's off the list, we can deal with it at normal 338 * network IPL. 339 */ 340 s = splnet(); 341 IPFLOW_REMOVE(ipf); 342 splx(s); 343 ipflow_addstats(ipf); 344 rtcache_free(&ipf->ipf_ro); 345 ipflow_inuse--; 346 s = splnet(); 347 pool_put(&ipflow_pool, ipf); 348 splx(s); 349 } 350 351 static struct ipflow * 352 ipflow_reap(bool just_one) 353 { 354 while (just_one || ipflow_inuse > ip_maxflows) { 355 struct ipflow *ipf, *maybe_ipf = NULL; 356 int s; 357 358 ipf = LIST_FIRST(&ipflowlist); 359 while (ipf != NULL) { 360 /* 361 * If this no longer points to a valid route 362 * reclaim it. 363 */ 364 if (rtcache_validate(&ipf->ipf_ro) == NULL) 365 goto done; 366 /* 367 * choose the one that's been least recently 368 * used or has had the least uses in the 369 * last 1.5 intervals. 370 */ 371 if (maybe_ipf == NULL || 372 ipf->ipf_timer < maybe_ipf->ipf_timer || 373 (ipf->ipf_timer == maybe_ipf->ipf_timer && 374 ipf->ipf_last_uses + ipf->ipf_uses < 375 maybe_ipf->ipf_last_uses + 376 maybe_ipf->ipf_uses)) 377 maybe_ipf = ipf; 378 ipf = LIST_NEXT(ipf, ipf_list); 379 } 380 ipf = maybe_ipf; 381 done: 382 /* 383 * Remove the entry from the flow table. 384 */ 385 s = splnet(); 386 IPFLOW_REMOVE(ipf); 387 splx(s); 388 ipflow_addstats(ipf); 389 rtcache_free(&ipf->ipf_ro); 390 if (just_one) 391 return ipf; 392 pool_put(&ipflow_pool, ipf); 393 ipflow_inuse--; 394 } 395 return NULL; 396 } 397 398 void 399 ipflow_prune(void) 400 { 401 402 (void) ipflow_reap(false); 403 } 404 405 void 406 ipflow_slowtimo(void) 407 { 408 struct rtentry *rt; 409 struct ipflow *ipf, *next_ipf; 410 uint64_t *ips; 411 412 mutex_enter(softnet_lock); 413 KERNEL_LOCK(1, NULL); 414 for (ipf = LIST_FIRST(&ipflowlist); ipf != NULL; ipf = next_ipf) { 415 next_ipf = LIST_NEXT(ipf, ipf_list); 416 if (PRT_SLOW_ISEXPIRED(ipf->ipf_timer) || 417 (rt = rtcache_validate(&ipf->ipf_ro)) == NULL) { 418 ipflow_free(ipf); 419 } else { 420 ipf->ipf_last_uses = ipf->ipf_uses; 421 rt->rt_use += ipf->ipf_uses; 422 ips = IP_STAT_GETREF(); 423 ips[IP_STAT_TOTAL] += ipf->ipf_uses; 424 ips[IP_STAT_FORWARD] += ipf->ipf_uses; 425 ips[IP_STAT_FASTFORWARD] += ipf->ipf_uses; 426 IP_STAT_PUTREF(); 427 ipf->ipf_uses = 0; 428 } 429 } 430 KERNEL_UNLOCK_ONE(NULL); 431 mutex_exit(softnet_lock); 432 } 433 434 void 435 ipflow_create(const struct route *ro, struct mbuf *m) 436 { 437 const struct ip *const ip = mtod(m, const struct ip *); 438 struct ipflow *ipf; 439 size_t hash; 440 int s; 441 442 /* 443 * Don't create cache entries for ICMP messages. 444 */ 445 if (ip_maxflows == 0 || ip->ip_p == IPPROTO_ICMP) 446 return; 447 /* 448 * See if an existing flow struct exists. If so remove it from it's 449 * list and free the old route. If not, try to malloc a new one 450 * (if we aren't at our limit). 451 */ 452 ipf = ipflow_lookup(ip); 453 if (ipf == NULL) { 454 if (ipflow_inuse >= ip_maxflows) { 455 ipf = ipflow_reap(true); 456 } else { 457 s = splnet(); 458 ipf = pool_get(&ipflow_pool, PR_NOWAIT); 459 splx(s); 460 if (ipf == NULL) 461 return; 462 ipflow_inuse++; 463 } 464 memset(ipf, 0, sizeof(*ipf)); 465 } else { 466 s = splnet(); 467 IPFLOW_REMOVE(ipf); 468 splx(s); 469 ipflow_addstats(ipf); 470 rtcache_free(&ipf->ipf_ro); 471 ipf->ipf_uses = ipf->ipf_last_uses = 0; 472 ipf->ipf_errors = ipf->ipf_dropped = 0; 473 } 474 475 /* 476 * Fill in the updated information. 477 */ 478 rtcache_copy(&ipf->ipf_ro, ro); 479 ipf->ipf_dst = ip->ip_dst; 480 ipf->ipf_src = ip->ip_src; 481 ipf->ipf_tos = ip->ip_tos; 482 PRT_SLOW_ARM(ipf->ipf_timer, IPFLOW_TIMER); 483 484 /* 485 * Insert into the approriate bucket of the flow table. 486 */ 487 hash = ipflow_hash(ip); 488 s = splnet(); 489 IPFLOW_INSERT(&ipflowtable[hash], ipf); 490 splx(s); 491 } 492 493 int 494 ipflow_invalidate_all(int new_size) 495 { 496 struct ipflow *ipf, *next_ipf; 497 int s, error; 498 499 error = 0; 500 s = splnet(); 501 for (ipf = LIST_FIRST(&ipflowlist); ipf != NULL; ipf = next_ipf) { 502 next_ipf = LIST_NEXT(ipf, ipf_list); 503 ipflow_free(ipf); 504 } 505 506 if (new_size) 507 error = ipflow_init(new_size); 508 splx(s); 509 510 return error; 511 } 512