1 /* $NetBSD: ip_flow.c,v 1.48 2007/08/20 19:42:34 dyoung Exp $ */ 2 3 /*- 4 * Copyright (c) 1998 The NetBSD Foundation, Inc. 5 * All rights reserved. 6 * 7 * This code is derived from software contributed to The NetBSD Foundation 8 * by the 3am Software Foundry ("3am"). It was developed by Matt Thomas. 9 * 10 * Redistribution and use in source and binary forms, with or without 11 * modification, are permitted provided that the following conditions 12 * are met: 13 * 1. Redistributions of source code must retain the above copyright 14 * notice, this list of conditions and the following disclaimer. 15 * 2. Redistributions in binary form must reproduce the above copyright 16 * notice, this list of conditions and the following disclaimer in the 17 * documentation and/or other materials provided with the distribution. 18 * 3. All advertising materials mentioning features or use of this software 19 * must display the following acknowledgement: 20 * This product includes software developed by the NetBSD 21 * Foundation, Inc. and its contributors. 22 * 4. Neither the name of The NetBSD Foundation nor the names of its 23 * contributors may be used to endorse or promote products derived 24 * from this software without specific prior written permission. 25 * 26 * THIS SOFTWARE IS PROVIDED BY THE NETBSD FOUNDATION, INC. AND CONTRIBUTORS 27 * ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED 28 * TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR 29 * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE FOUNDATION OR CONTRIBUTORS 30 * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR 31 * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF 32 * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS 33 * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN 34 * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) 35 * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE 36 * POSSIBILITY OF SUCH DAMAGE. 37 */ 38 39 #include <sys/cdefs.h> 40 __KERNEL_RCSID(0, "$NetBSD: ip_flow.c,v 1.48 2007/08/20 19:42:34 dyoung Exp $"); 41 42 #include <sys/param.h> 43 #include <sys/systm.h> 44 #include <sys/malloc.h> 45 #include <sys/mbuf.h> 46 #include <sys/domain.h> 47 #include <sys/protosw.h> 48 #include <sys/socket.h> 49 #include <sys/socketvar.h> 50 #include <sys/errno.h> 51 #include <sys/time.h> 52 #include <sys/kernel.h> 53 #include <sys/pool.h> 54 #include <sys/sysctl.h> 55 56 #include <net/if.h> 57 #include <net/if_dl.h> 58 #include <net/route.h> 59 #include <net/pfil.h> 60 61 #include <netinet/in.h> 62 #include <netinet/in_systm.h> 63 #include <netinet/ip.h> 64 #include <netinet/in_pcb.h> 65 #include <netinet/in_var.h> 66 #include <netinet/ip_var.h> 67 68 /* 69 * Similar code is very well commented in netinet6/ip6_flow.c 70 */ 71 72 POOL_INIT(ipflow_pool, sizeof(struct ipflow), 0, 0, 0, "ipflowpl", NULL, 73 IPL_NET); 74 75 LIST_HEAD(ipflowhead, ipflow); 76 77 #define IPFLOW_TIMER (5 * PR_SLOWHZ) 78 #define IPFLOW_DEFAULT_HASHSIZE (1 << IPFLOW_HASHBITS) 79 80 static struct ipflowhead *ipflowtable = NULL; 81 static struct ipflowhead ipflowlist; 82 static int ipflow_inuse; 83 84 #define IPFLOW_INSERT(bucket, ipf) \ 85 do { \ 86 LIST_INSERT_HEAD((bucket), (ipf), ipf_hash); \ 87 LIST_INSERT_HEAD(&ipflowlist, (ipf), ipf_list); \ 88 } while (/*CONSTCOND*/ 0) 89 90 #define IPFLOW_REMOVE(ipf) \ 91 do { \ 92 LIST_REMOVE((ipf), ipf_hash); \ 93 LIST_REMOVE((ipf), ipf_list); \ 94 } while (/*CONSTCOND*/ 0) 95 96 #ifndef IPFLOW_MAX 97 #define IPFLOW_MAX 256 98 #endif 99 int ip_maxflows = IPFLOW_MAX; 100 int ip_hashsize = IPFLOW_DEFAULT_HASHSIZE; 101 102 static size_t 103 ipflow_hash(struct ip *ip) 104 { 105 size_t hash = ip->ip_tos; 106 size_t idx; 107 108 for (idx = 0; idx < 32; idx += IPFLOW_HASHBITS) { 109 hash += (ip->ip_dst.s_addr >> (32 - idx)) + 110 (ip->ip_src.s_addr >> idx); 111 } 112 113 return hash & (ip_hashsize-1); 114 } 115 116 static struct ipflow * 117 ipflow_lookup(struct ip *ip) 118 { 119 size_t hash; 120 struct ipflow *ipf; 121 122 hash = ipflow_hash(ip); 123 124 LIST_FOREACH(ipf, &ipflowtable[hash], ipf_hash) { 125 if (ip->ip_dst.s_addr == ipf->ipf_dst.s_addr 126 && ip->ip_src.s_addr == ipf->ipf_src.s_addr 127 && ip->ip_tos == ipf->ipf_tos) 128 break; 129 } 130 return ipf; 131 } 132 133 int 134 ipflow_init(int table_size) 135 { 136 struct ipflowhead *new_table; 137 size_t i; 138 139 new_table = (struct ipflowhead *)malloc(sizeof(struct ipflowhead) * 140 table_size, M_RTABLE, M_NOWAIT); 141 142 if (new_table == NULL) 143 return 1; 144 145 if (ipflowtable != NULL) 146 free(ipflowtable, M_RTABLE); 147 148 ipflowtable = new_table; 149 ip_hashsize = table_size; 150 151 LIST_INIT(&ipflowlist); 152 for (i = 0; i < ip_hashsize; i++) 153 LIST_INIT(&ipflowtable[i]); 154 155 return 0; 156 } 157 158 int 159 ipflow_fastforward(struct mbuf *m) 160 { 161 struct ip *ip, ip_store; 162 struct ipflow *ipf; 163 struct rtentry *rt; 164 const struct sockaddr *dst; 165 int error; 166 int iplen; 167 168 /* 169 * Are we forwarding packets? Big enough for an IP packet? 170 */ 171 if (!ipforwarding || ipflow_inuse == 0 || m->m_len < sizeof(struct ip)) 172 return 0; 173 174 /* 175 * Was packet received as a link-level multicast or broadcast? 176 * If so, don't try to fast forward.. 177 */ 178 if ((m->m_flags & (M_BCAST|M_MCAST)) != 0) 179 return 0; 180 181 /* 182 * IP header with no option and valid version and length 183 */ 184 if (IP_HDR_ALIGNED_P(mtod(m, void *))) 185 ip = mtod(m, struct ip *); 186 else { 187 memcpy(&ip_store, mtod(m, void *), sizeof(ip_store)); 188 ip = &ip_store; 189 } 190 iplen = ntohs(ip->ip_len); 191 if (ip->ip_v != IPVERSION || ip->ip_hl != (sizeof(struct ip) >> 2) || 192 iplen < sizeof(struct ip) || iplen > m->m_pkthdr.len) 193 return 0; 194 /* 195 * Find a flow. 196 */ 197 if ((ipf = ipflow_lookup(ip)) == NULL) 198 return 0; 199 200 /* 201 * Verify the IP header checksum. 202 */ 203 switch (m->m_pkthdr.csum_flags & 204 ((m->m_pkthdr.rcvif->if_csum_flags_rx & M_CSUM_IPv4) | 205 M_CSUM_IPv4_BAD)) { 206 case M_CSUM_IPv4|M_CSUM_IPv4_BAD: 207 return (0); 208 209 case M_CSUM_IPv4: 210 /* Checksum was okay. */ 211 break; 212 213 default: 214 /* Must compute it ourselves. */ 215 if (in_cksum(m, sizeof(struct ip)) != 0) 216 return (0); 217 break; 218 } 219 220 /* 221 * Route and interface still up? 222 */ 223 if (rtcache_down(&ipf->ipf_ro) || (rt = ipf->ipf_ro.ro_rt) == NULL || 224 (rt->rt_ifp->if_flags & IFF_UP) == 0) 225 return 0; 226 227 /* 228 * Packet size OK? TTL? 229 */ 230 if (m->m_pkthdr.len > rt->rt_ifp->if_mtu || ip->ip_ttl <= IPTTLDEC) 231 return 0; 232 233 /* 234 * Clear any in-bound checksum flags for this packet. 235 */ 236 m->m_pkthdr.csum_flags = 0; 237 238 /* 239 * Everything checks out and so we can forward this packet. 240 * Modify the TTL and incrementally change the checksum. 241 * 242 * This method of adding the checksum works on either endian CPU. 243 * If htons() is inlined, all the arithmetic is folded; otherwise 244 * the htons()s are combined by CSE due to the const attribute. 245 * 246 * Don't bother using HW checksumming here -- the incremental 247 * update is pretty fast. 248 */ 249 ip->ip_ttl -= IPTTLDEC; 250 if (ip->ip_sum >= (u_int16_t) ~htons(IPTTLDEC << 8)) 251 ip->ip_sum -= ~htons(IPTTLDEC << 8); 252 else 253 ip->ip_sum += htons(IPTTLDEC << 8); 254 255 /* 256 * Done modifying the header; copy it back, if necessary. 257 */ 258 if (IP_HDR_ALIGNED_P(mtod(m, void *)) == 0) 259 memcpy(mtod(m, void *), &ip_store, sizeof(ip_store)); 260 261 /* 262 * Trim the packet in case it's too long.. 263 */ 264 if (m->m_pkthdr.len > iplen) { 265 if (m->m_len == m->m_pkthdr.len) { 266 m->m_len = iplen; 267 m->m_pkthdr.len = iplen; 268 } else 269 m_adj(m, iplen - m->m_pkthdr.len); 270 } 271 272 /* 273 * Send the packet on it's way. All we can get back is ENOBUFS 274 */ 275 ipf->ipf_uses++; 276 PRT_SLOW_ARM(ipf->ipf_timer, IPFLOW_TIMER); 277 278 if (rt->rt_flags & RTF_GATEWAY) 279 dst = rt->rt_gateway; 280 else 281 dst = rtcache_getdst(&ipf->ipf_ro); 282 283 if ((error = (*rt->rt_ifp->if_output)(rt->rt_ifp, m, dst, rt)) != 0) { 284 if (error == ENOBUFS) 285 ipf->ipf_dropped++; 286 else 287 ipf->ipf_errors++; 288 } 289 return 1; 290 } 291 292 static void 293 ipflow_addstats(struct ipflow *ipf) 294 { 295 if (!rtcache_down(&ipf->ipf_ro) && ipf->ipf_ro.ro_rt != NULL) 296 ipf->ipf_ro.ro_rt->rt_use += ipf->ipf_uses; 297 ipstat.ips_cantforward += ipf->ipf_errors + ipf->ipf_dropped; 298 ipstat.ips_total += ipf->ipf_uses; 299 ipstat.ips_forward += ipf->ipf_uses; 300 ipstat.ips_fastforward += ipf->ipf_uses; 301 } 302 303 static void 304 ipflow_free(struct ipflow *ipf) 305 { 306 int s; 307 /* 308 * Remove the flow from the hash table (at elevated IPL). 309 * Once it's off the list, we can deal with it at normal 310 * network IPL. 311 */ 312 s = splnet(); 313 IPFLOW_REMOVE(ipf); 314 splx(s); 315 ipflow_addstats(ipf); 316 rtcache_free(&ipf->ipf_ro); 317 ipflow_inuse--; 318 s = splnet(); 319 pool_put(&ipflow_pool, ipf); 320 splx(s); 321 } 322 323 struct ipflow * 324 ipflow_reap(int just_one) 325 { 326 while (just_one || ipflow_inuse > ip_maxflows) { 327 struct ipflow *ipf, *maybe_ipf = NULL; 328 int s; 329 330 ipf = LIST_FIRST(&ipflowlist); 331 while (ipf != NULL) { 332 /* 333 * If this no longer points to a valid route 334 * reclaim it. 335 */ 336 if (rtcache_down(&ipf->ipf_ro) || 337 ipf->ipf_ro.ro_rt == NULL) 338 goto done; 339 /* 340 * choose the one that's been least recently 341 * used or has had the least uses in the 342 * last 1.5 intervals. 343 */ 344 if (maybe_ipf == NULL || 345 ipf->ipf_timer < maybe_ipf->ipf_timer || 346 (ipf->ipf_timer == maybe_ipf->ipf_timer && 347 ipf->ipf_last_uses + ipf->ipf_uses < 348 maybe_ipf->ipf_last_uses + 349 maybe_ipf->ipf_uses)) 350 maybe_ipf = ipf; 351 ipf = LIST_NEXT(ipf, ipf_list); 352 } 353 ipf = maybe_ipf; 354 done: 355 /* 356 * Remove the entry from the flow table. 357 */ 358 s = splnet(); 359 IPFLOW_REMOVE(ipf); 360 splx(s); 361 ipflow_addstats(ipf); 362 rtcache_free(&ipf->ipf_ro); 363 if (just_one) 364 return ipf; 365 pool_put(&ipflow_pool, ipf); 366 ipflow_inuse--; 367 } 368 return NULL; 369 } 370 371 void 372 ipflow_slowtimo(void) 373 { 374 struct ipflow *ipf, *next_ipf; 375 376 for (ipf = LIST_FIRST(&ipflowlist); ipf != NULL; ipf = next_ipf) { 377 next_ipf = LIST_NEXT(ipf, ipf_list); 378 if (PRT_SLOW_ISEXPIRED(ipf->ipf_timer) || 379 rtcache_down(&ipf->ipf_ro) || ipf->ipf_ro.ro_rt == NULL) { 380 ipflow_free(ipf); 381 } else { 382 ipf->ipf_last_uses = ipf->ipf_uses; 383 ipf->ipf_ro.ro_rt->rt_use += ipf->ipf_uses; 384 ipstat.ips_total += ipf->ipf_uses; 385 ipstat.ips_forward += ipf->ipf_uses; 386 ipstat.ips_fastforward += ipf->ipf_uses; 387 ipf->ipf_uses = 0; 388 } 389 } 390 } 391 392 void 393 ipflow_create(const struct route *ro, struct mbuf *m) 394 { 395 struct ip *const ip = mtod(m, struct ip *); 396 struct ipflow *ipf; 397 size_t hash; 398 int s; 399 400 /* 401 * Don't create cache entries for ICMP messages. 402 */ 403 if (ip_maxflows == 0 || ip->ip_p == IPPROTO_ICMP) 404 return; 405 /* 406 * See if an existing flow struct exists. If so remove it from it's 407 * list and free the old route. If not, try to malloc a new one 408 * (if we aren't at our limit). 409 */ 410 ipf = ipflow_lookup(ip); 411 if (ipf == NULL) { 412 if (ipflow_inuse >= ip_maxflows) { 413 ipf = ipflow_reap(1); 414 } else { 415 s = splnet(); 416 ipf = pool_get(&ipflow_pool, PR_NOWAIT); 417 splx(s); 418 if (ipf == NULL) 419 return; 420 ipflow_inuse++; 421 } 422 memset(ipf, 0, sizeof(*ipf)); 423 } else { 424 s = splnet(); 425 IPFLOW_REMOVE(ipf); 426 splx(s); 427 ipflow_addstats(ipf); 428 rtcache_free(&ipf->ipf_ro); 429 ipf->ipf_uses = ipf->ipf_last_uses = 0; 430 ipf->ipf_errors = ipf->ipf_dropped = 0; 431 } 432 433 /* 434 * Fill in the updated information. 435 */ 436 rtcache_copy(&ipf->ipf_ro, ro); 437 ipf->ipf_dst = ip->ip_dst; 438 ipf->ipf_src = ip->ip_src; 439 ipf->ipf_tos = ip->ip_tos; 440 PRT_SLOW_ARM(ipf->ipf_timer, IPFLOW_TIMER); 441 ipf->ipf_start = time_uptime; 442 /* 443 * Insert into the approriate bucket of the flow table. 444 */ 445 hash = ipflow_hash(ip); 446 s = splnet(); 447 IPFLOW_INSERT(&ipflowtable[hash], ipf); 448 splx(s); 449 } 450 451 int 452 ipflow_invalidate_all(int new_size) 453 { 454 struct ipflow *ipf, *next_ipf; 455 int s, error; 456 457 error = 0; 458 s = splnet(); 459 for (ipf = LIST_FIRST(&ipflowlist); ipf != NULL; ipf = next_ipf) { 460 next_ipf = LIST_NEXT(ipf, ipf_list); 461 ipflow_free(ipf); 462 } 463 464 if (new_size) 465 error = ipflow_init(new_size); 466 splx(s); 467 468 return error; 469 } 470