1 /* $NetBSD: ip6_flow.c,v 1.42 2021/02/19 14:52:00 christos Exp $ */ 2 3 /* 4 * Copyright (c) 2007 The NetBSD Foundation, Inc. 5 * All rights reserved. 6 * 7 * This code is derived from software contributed to The NetBSD Foundation 8 * by the 3am Software Foundry ("3am"). It was developed by Liam J. Foy 9 * <liamjfoy@netbsd.org> and Matt Thomas <matt@netbsd.org>. 10 * 11 * Redistribution and use in source and binary forms, with or without 12 * modification, are permitted provided that the following conditions 13 * are met: 14 * 1. Redistributions of source code must retain the above copyright 15 * notice, this list of conditions and the following disclaimer. 16 * 2. Redistributions in binary form must reproduce the above copyright 17 * notice, this list of conditions and the following disclaimer in the 18 * documentation and/or other materials provided with the distribution. 19 * 20 * THIS SOFTWARE IS PROVIDED BY THE NETBSD FOUNDATION, INC. AND CONTRIBUTORS 21 * ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED 22 * TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR 23 * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE FOUNDATION OR CONTRIBUTORS 24 * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR 25 * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF 26 * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS 27 * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN 28 * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) 29 * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE 30 * POSSIBILITY OF SUCH DAMAGE. 31 * 32 * IPv6 version was developed by Liam J. Foy. Original source existed in IPv4 33 * format developed by Matt Thomas. Thanks to Joerg Sonnenberger, Matt 34 * Thomas and Christos Zoulas. 35 * 36 * Thanks to Liverpool John Moores University, especially Dr. David Llewellyn-Jones 37 * for providing resources (to test) and Professor Madjid Merabti. 38 */ 39 40 #include <sys/cdefs.h> 41 __KERNEL_RCSID(0, "$NetBSD: ip6_flow.c,v 1.42 2021/02/19 14:52:00 christos Exp $"); 42 43 #ifdef _KERNEL_OPT 44 #include "opt_net_mpsafe.h" 45 #endif 46 47 #include <sys/param.h> 48 #include <sys/systm.h> 49 #include <sys/malloc.h> 50 #include <sys/mbuf.h> 51 #include <sys/socketvar.h> 52 #include <sys/time.h> 53 #include <sys/kernel.h> 54 #include <sys/pool.h> 55 #include <sys/sysctl.h> 56 #include <sys/workqueue.h> 57 #include <sys/atomic.h> 58 59 #include <net/if.h> 60 #include <net/if_dl.h> 61 #include <net/route.h> 62 #include <net/pfil.h> 63 64 #include <netinet/in.h> 65 #include <netinet6/in6_var.h> 66 #include <netinet/in_systm.h> 67 #include <netinet/ip6.h> 68 #include <netinet6/ip6_var.h> 69 #include <netinet6/ip6_private.h> 70 71 /* 72 * IPv6 Fast Forward caches/hashes flows from one source to destination. 73 * 74 * Upon a successful forward IPv6FF caches and hashes details such as the 75 * route, source and destination. Once another packet is received matching 76 * the source and destination the packet is forwarded straight onto if_output 77 * using the cached details. 78 * 79 * Example: 80 * ether/fddi_input -> ip6flow_fastforward -> if_output 81 */ 82 83 static struct pool ip6flow_pool; 84 85 TAILQ_HEAD(ip6flowhead, ip6flow); 86 87 /* 88 * We could use IPv4 defines (IPFLOW_HASHBITS) but we'll 89 * use our own (possibly for future expansion). 90 */ 91 #define IP6FLOW_TIMER (5 * PR_SLOWHZ) 92 #define IP6FLOW_DEFAULT_HASHSIZE (1 << IP6FLOW_HASHBITS) 93 94 /* 95 * ip6_flow.c internal lock. 96 * If we use softnet_lock, it would cause recursive lock. 97 * 98 * This is a tentative workaround. 99 * We should make it scalable somehow in the future. 100 */ 101 static kmutex_t ip6flow_lock __cacheline_aligned; 102 static struct ip6flowhead *ip6flowtable = NULL; 103 static struct ip6flowhead ip6flowlist; 104 static int ip6flow_inuse __cacheline_aligned; 105 106 static void ip6flow_slowtimo_work(struct work *, void *); 107 static struct workqueue *ip6flow_slowtimo_wq; 108 static struct work ip6flow_slowtimo_wk; 109 110 static int sysctl_net_inet6_ip6_hashsize(SYSCTLFN_PROTO); 111 static int sysctl_net_inet6_ip6_maxflows(SYSCTLFN_PROTO); 112 static void ip6flow_sysctl_init(struct sysctllog **); 113 114 /* 115 * Insert an ip6flow into the list. 116 */ 117 #define IP6FLOW_INSERT(hashidx, ip6f) \ 118 do { \ 119 (ip6f)->ip6f_hashidx = (hashidx); \ 120 TAILQ_INSERT_HEAD(&ip6flowtable[(hashidx)], (ip6f), ip6f_hash); \ 121 TAILQ_INSERT_HEAD(&ip6flowlist, (ip6f), ip6f_list); \ 122 } while (/*CONSTCOND*/ 0) 123 124 /* 125 * Remove an ip6flow from the list. 126 */ 127 #define IP6FLOW_REMOVE(hashidx, ip6f) \ 128 do { \ 129 TAILQ_REMOVE(&ip6flowtable[(hashidx)], (ip6f), ip6f_hash); \ 130 TAILQ_REMOVE(&ip6flowlist, (ip6f), ip6f_list); \ 131 } while (/*CONSTCOND*/ 0) 132 133 #ifndef IP6FLOW_DEFAULT 134 #define IP6FLOW_DEFAULT 256 135 #endif 136 137 int ip6_maxflows = IP6FLOW_DEFAULT; 138 int ip6_hashsize = IP6FLOW_DEFAULT_HASHSIZE; 139 140 /* 141 * Calculate hash table position. 142 */ 143 static size_t 144 ip6flow_hash(const struct ip6_hdr *ip6) 145 { 146 size_t hash; 147 uint32_t dst_sum, src_sum; 148 size_t idx; 149 150 src_sum = ip6->ip6_src.s6_addr32[0] + ip6->ip6_src.s6_addr32[1] 151 + ip6->ip6_src.s6_addr32[2] + ip6->ip6_src.s6_addr32[3]; 152 dst_sum = ip6->ip6_dst.s6_addr32[0] + ip6->ip6_dst.s6_addr32[1] 153 + ip6->ip6_dst.s6_addr32[2] + ip6->ip6_dst.s6_addr32[3]; 154 155 hash = ip6->ip6_flow; 156 157 for (idx = 0; idx < 32; idx += IP6FLOW_HASHBITS) 158 hash += (dst_sum >> (32 - idx)) + (src_sum >> idx); 159 160 return hash & (ip6_hashsize-1); 161 } 162 163 /* 164 * Check to see if a flow already exists - if so return it. 165 */ 166 static struct ip6flow * 167 ip6flow_lookup(const struct ip6_hdr *ip6) 168 { 169 size_t hash; 170 struct ip6flow *ip6f; 171 172 KASSERT(mutex_owned(&ip6flow_lock)); 173 174 hash = ip6flow_hash(ip6); 175 176 TAILQ_FOREACH(ip6f, &ip6flowtable[hash], ip6f_hash) { 177 if (IN6_ARE_ADDR_EQUAL(&ip6->ip6_dst, &ip6f->ip6f_dst) 178 && IN6_ARE_ADDR_EQUAL(&ip6->ip6_src, &ip6f->ip6f_src) 179 && ip6f->ip6f_flow == ip6->ip6_flow) { 180 /* A cached flow has been found. */ 181 return ip6f; 182 } 183 } 184 185 return NULL; 186 } 187 188 void 189 ip6flow_poolinit(void) 190 { 191 192 pool_init(&ip6flow_pool, sizeof(struct ip6flow), 0, 0, 0, "ip6flowpl", 193 NULL, IPL_NET); 194 } 195 196 /* 197 * Allocate memory and initialise lists. This function is called 198 * from ip6_init and called there after to resize the hash table. 199 * If a newly sized table cannot be malloc'ed we just continue 200 * to use the old one. 201 */ 202 static int 203 ip6flow_init_locked(int table_size) 204 { 205 struct ip6flowhead *new_table; 206 size_t i; 207 208 KASSERT(mutex_owned(&ip6flow_lock)); 209 210 new_table = (struct ip6flowhead *)malloc(sizeof(struct ip6flowhead) * 211 table_size, M_RTABLE, M_NOWAIT); 212 213 if (new_table == NULL) 214 return 1; 215 216 if (ip6flowtable != NULL) 217 free(ip6flowtable, M_RTABLE); 218 219 ip6flowtable = new_table; 220 ip6_hashsize = table_size; 221 222 TAILQ_INIT(&ip6flowlist); 223 for (i = 0; i < ip6_hashsize; i++) 224 TAILQ_INIT(&ip6flowtable[i]); 225 226 return 0; 227 } 228 229 int 230 ip6flow_init(int table_size) 231 { 232 int ret, error; 233 234 error = workqueue_create(&ip6flow_slowtimo_wq, "ip6flow", 235 ip6flow_slowtimo_work, NULL, PRI_SOFTNET, IPL_SOFTNET, WQ_MPSAFE); 236 if (error != 0) 237 panic("%s: workqueue_create failed (%d)\n", __func__, error); 238 239 mutex_init(&ip6flow_lock, MUTEX_DEFAULT, IPL_NONE); 240 241 mutex_enter(&ip6flow_lock); 242 ret = ip6flow_init_locked(table_size); 243 mutex_exit(&ip6flow_lock); 244 ip6flow_sysctl_init(NULL); 245 246 return ret; 247 } 248 249 /* 250 * IPv6 Fast Forward routine. Attempt to forward the packet - 251 * if any problems are found return to the main IPv6 input 252 * routine to deal with. 253 */ 254 int 255 ip6flow_fastforward(struct mbuf **mp) 256 { 257 struct ip6flow *ip6f; 258 struct ip6_hdr *ip6; 259 struct rtentry *rt = NULL; 260 struct mbuf *m; 261 const struct sockaddr *dst; 262 int error; 263 int ret = 0; 264 265 mutex_enter(&ip6flow_lock); 266 267 /* 268 * Are we forwarding packets and have flows? 269 */ 270 if (!ip6_forwarding || ip6flow_inuse == 0) 271 goto out; 272 273 m = *mp; 274 /* 275 * At least size of IPv6 Header? 276 */ 277 if (m->m_len < sizeof(struct ip6_hdr)) 278 goto out; 279 /* 280 * Was packet received as a link-level multicast or broadcast? 281 * If so, don't try to fast forward. 282 */ 283 if ((m->m_flags & (M_BCAST|M_MCAST)) != 0) 284 goto out; 285 286 if (ACCESSIBLE_POINTER(mtod(m, const void *), struct ip6_hdr) == 0) { 287 if ((m = m_copyup(m, sizeof(struct ip6_hdr), 288 (max_linkhdr + 3) & ~3)) == NULL) { 289 ret = 1; 290 goto out; 291 } 292 *mp = m; 293 } 294 295 ip6 = mtod(m, struct ip6_hdr *); 296 297 if ((ip6->ip6_vfc & IPV6_VERSION_MASK) != IPV6_VERSION) { 298 /* Bad version. */ 299 goto out; 300 } 301 302 /* 303 * If we have a hop-by-hop extension we must process it. 304 * We just leave this up to ip6_input to deal with. 305 */ 306 if (ip6->ip6_nxt == IPPROTO_HOPOPTS) 307 goto out; 308 309 /* 310 * Attempt to find a flow. 311 */ 312 if ((ip6f = ip6flow_lookup(ip6)) == NULL) { 313 /* No flow found. */ 314 goto out; 315 } 316 317 /* 318 * Route and interface still up? 319 */ 320 if ((rt = rtcache_validate(&ip6f->ip6f_ro)) == NULL || 321 (rt->rt_ifp->if_flags & IFF_UP) == 0 || 322 (rt->rt_flags & RTF_BLACKHOLE) != 0) 323 goto out_unref; 324 325 /* 326 * Packet size greater than MTU? 327 */ 328 if (m->m_pkthdr.len > rt->rt_ifp->if_mtu) { 329 /* Return to main IPv6 input function. */ 330 goto out_unref; 331 } 332 333 /* 334 * Clear any in-bound checksum flags for this packet. 335 */ 336 m->m_pkthdr.csum_flags = 0; 337 338 if (ip6->ip6_hlim <= IPV6_HLIMDEC) 339 goto out_unref; 340 341 /* Decrement hop limit (same as TTL) */ 342 ip6->ip6_hlim -= IPV6_HLIMDEC; 343 344 if (rt->rt_flags & RTF_GATEWAY) 345 dst = rt->rt_gateway; 346 else 347 dst = rtcache_getdst(&ip6f->ip6f_ro); 348 349 PRT_SLOW_ARM(ip6f->ip6f_timer, IP6FLOW_TIMER); 350 351 ip6f->ip6f_uses++; 352 353 #if 0 354 /* 355 * We use FIFO cache replacement instead of LRU the same ip_flow.c. 356 */ 357 /* move to head (LRU) for ip6flowlist. ip6flowtable does not care LRU. */ 358 TAILQ_REMOVE(&ip6flowlist, ip6f, ip6f_list); 359 TAILQ_INSERT_HEAD(&ip6flowlist, ip6f, ip6f_list); 360 #endif 361 362 /* Send on its way - straight to the interface output routine. */ 363 if ((error = if_output_lock(rt->rt_ifp, rt->rt_ifp, m, dst, rt)) != 0) { 364 ip6f->ip6f_dropped++; 365 } else { 366 ip6f->ip6f_forwarded++; 367 } 368 ret = 1; 369 out_unref: 370 rtcache_unref(rt, &ip6f->ip6f_ro); 371 out: 372 mutex_exit(&ip6flow_lock); 373 return ret; 374 } 375 376 /* 377 * Add the IPv6 flow statistics to the main IPv6 statistics. 378 */ 379 static void 380 ip6flow_addstats_rt(struct rtentry *rt, struct ip6flow *ip6f) 381 { 382 uint64_t *ip6s; 383 384 if (rt != NULL) 385 rt->rt_use += ip6f->ip6f_uses; 386 ip6s = IP6_STAT_GETREF(); 387 ip6s[IP6_STAT_FASTFORWARDFLOWS] = ip6flow_inuse; 388 ip6s[IP6_STAT_CANTFORWARD] += ip6f->ip6f_dropped; 389 ip6s[IP6_STAT_ODROPPED] += ip6f->ip6f_dropped; 390 ip6s[IP6_STAT_TOTAL] += ip6f->ip6f_uses; 391 ip6s[IP6_STAT_FORWARD] += ip6f->ip6f_forwarded; 392 ip6s[IP6_STAT_FASTFORWARD] += ip6f->ip6f_forwarded; 393 IP6_STAT_PUTREF(); 394 } 395 396 static void 397 ip6flow_addstats(struct ip6flow *ip6f) 398 { 399 struct rtentry *rt; 400 401 rt = rtcache_validate(&ip6f->ip6f_ro); 402 ip6flow_addstats_rt(rt, ip6f); 403 rtcache_unref(rt, &ip6f->ip6f_ro); 404 } 405 406 /* 407 * Add statistics and free the flow. 408 */ 409 static void 410 ip6flow_free(struct ip6flow *ip6f) 411 { 412 413 KASSERT(mutex_owned(&ip6flow_lock)); 414 415 /* 416 * Remove the flow from the hash table (at elevated IPL). 417 * Once it's off the list, we can deal with it at normal 418 * network IPL. 419 */ 420 IP6FLOW_REMOVE(ip6f->ip6f_hashidx, ip6f); 421 422 ip6flow_inuse--; 423 ip6flow_addstats(ip6f); 424 rtcache_free(&ip6f->ip6f_ro); 425 pool_put(&ip6flow_pool, ip6f); 426 } 427 428 static struct ip6flow * 429 ip6flow_reap_locked(int just_one) 430 { 431 struct ip6flow *ip6f; 432 433 KASSERT(mutex_owned(&ip6flow_lock)); 434 435 /* 436 * This case must remove one ip6flow. Furthermore, this case is used in 437 * fast path(packet processing path). So, simply remove TAILQ_LAST one. 438 */ 439 if (just_one) { 440 ip6f = TAILQ_LAST(&ip6flowlist, ip6flowhead); 441 KASSERT(ip6f != NULL); 442 443 IP6FLOW_REMOVE(ip6f->ip6f_hashidx, ip6f); 444 445 ip6flow_addstats(ip6f); 446 rtcache_free(&ip6f->ip6f_ro); 447 return ip6f; 448 } 449 450 /* 451 * This case is used in slow path(sysctl). 452 * At first, remove invalid rtcache ip6flow, and then remove TAILQ_LAST 453 * ip6flow if it is ensured least recently used by comparing last_uses. 454 */ 455 while (ip6flow_inuse > ip6_maxflows) { 456 struct ip6flow *maybe_ip6f = TAILQ_LAST(&ip6flowlist, ip6flowhead); 457 458 TAILQ_FOREACH(ip6f, &ip6flowlist, ip6f_list) { 459 struct rtentry *rt; 460 /* 461 * If this no longer points to a valid route - 462 * reclaim it. 463 */ 464 if ((rt = rtcache_validate(&ip6f->ip6f_ro)) == NULL) 465 goto done; 466 rtcache_unref(rt, &ip6f->ip6f_ro); 467 /* 468 * choose the one that's been least recently 469 * used or has had the least uses in the 470 * last 1.5 intervals. 471 */ 472 if (ip6f->ip6f_timer < maybe_ip6f->ip6f_timer 473 || ((ip6f->ip6f_timer == maybe_ip6f->ip6f_timer) 474 && (ip6f->ip6f_last_uses + ip6f->ip6f_uses 475 < maybe_ip6f->ip6f_last_uses + maybe_ip6f->ip6f_uses))) 476 maybe_ip6f = ip6f; 477 } 478 ip6f = maybe_ip6f; 479 done: 480 /* 481 * Remove the entry from the flow table 482 */ 483 IP6FLOW_REMOVE(ip6f->ip6f_hashidx, ip6f); 484 485 rtcache_free(&ip6f->ip6f_ro); 486 ip6flow_inuse--; 487 ip6flow_addstats(ip6f); 488 pool_put(&ip6flow_pool, ip6f); 489 } 490 return NULL; 491 } 492 493 /* 494 * Reap one or more flows - ip6flow_reap may remove 495 * multiple flows if net.inet6.ip6.maxflows is reduced. 496 */ 497 struct ip6flow * 498 ip6flow_reap(int just_one) 499 { 500 struct ip6flow *ip6f; 501 502 mutex_enter(&ip6flow_lock); 503 ip6f = ip6flow_reap_locked(just_one); 504 mutex_exit(&ip6flow_lock); 505 return ip6f; 506 } 507 508 static unsigned int ip6flow_work_enqueued = 0; 509 510 void 511 ip6flow_slowtimo_work(struct work *wk, void *arg) 512 { 513 struct ip6flow *ip6f, *next_ip6f; 514 515 /* We can allow enqueuing another work at this point */ 516 atomic_swap_uint(&ip6flow_work_enqueued, 0); 517 518 SOFTNET_KERNEL_LOCK_UNLESS_NET_MPSAFE(); 519 mutex_enter(&ip6flow_lock); 520 521 for (ip6f = TAILQ_FIRST(&ip6flowlist); ip6f != NULL; ip6f = next_ip6f) { 522 struct rtentry *rt = NULL; 523 next_ip6f = TAILQ_NEXT(ip6f, ip6f_list); 524 if (PRT_SLOW_ISEXPIRED(ip6f->ip6f_timer) || 525 (rt = rtcache_validate(&ip6f->ip6f_ro)) == NULL) { 526 ip6flow_free(ip6f); 527 } else { 528 ip6f->ip6f_last_uses = ip6f->ip6f_uses; 529 ip6flow_addstats_rt(rt, ip6f); 530 ip6f->ip6f_uses = 0; 531 ip6f->ip6f_dropped = 0; 532 ip6f->ip6f_forwarded = 0; 533 } 534 rtcache_unref(rt, &ip6f->ip6f_ro); 535 } 536 537 mutex_exit(&ip6flow_lock); 538 SOFTNET_KERNEL_UNLOCK_UNLESS_NET_MPSAFE(); 539 } 540 541 void 542 ip6flow_slowtimo(void) 543 { 544 545 /* Avoid enqueuing another work when one is already enqueued */ 546 if (atomic_swap_uint(&ip6flow_work_enqueued, 1) == 1) 547 return; 548 549 workqueue_enqueue(ip6flow_slowtimo_wq, &ip6flow_slowtimo_wk, NULL); 550 } 551 552 /* 553 * We have successfully forwarded a packet using the normal 554 * IPv6 stack. Now create/update a flow. 555 */ 556 void 557 ip6flow_create(struct route *ro, struct mbuf *m) 558 { 559 const struct ip6_hdr *ip6; 560 struct ip6flow *ip6f; 561 size_t hash; 562 563 ip6 = mtod(m, const struct ip6_hdr *); 564 565 KERNEL_LOCK_UNLESS_NET_MPSAFE(); 566 mutex_enter(&ip6flow_lock); 567 568 /* 569 * If IPv6 Fast Forward is disabled, don't create a flow. 570 * It can be disabled by setting net.inet6.ip6.maxflows to 0. 571 * 572 * Don't create a flow for ICMPv6 messages. 573 */ 574 if (ip6_maxflows == 0 || ip6->ip6_nxt == IPPROTO_IPV6_ICMP) 575 goto out; 576 577 /* 578 * See if an existing flow exists. If so: 579 * - Remove the flow 580 * - Add flow statistics 581 * - Free the route 582 * - Reset statistics 583 * 584 * If a flow doesn't exist allocate a new one if 585 * ip6_maxflows hasn't reached its limit. If it has 586 * been reached, reap some flows. 587 */ 588 ip6f = ip6flow_lookup(ip6); 589 if (ip6f == NULL) { 590 if (ip6flow_inuse >= ip6_maxflows) { 591 ip6f = ip6flow_reap_locked(1); 592 } else { 593 ip6f = pool_get(&ip6flow_pool, PR_NOWAIT); 594 if (ip6f == NULL) 595 goto out; 596 ip6flow_inuse++; 597 } 598 memset(ip6f, 0, sizeof(*ip6f)); 599 } else { 600 IP6FLOW_REMOVE(ip6f->ip6f_hashidx, ip6f); 601 602 ip6flow_addstats(ip6f); 603 rtcache_free(&ip6f->ip6f_ro); 604 ip6f->ip6f_uses = 0; 605 ip6f->ip6f_last_uses = 0; 606 ip6f->ip6f_dropped = 0; 607 ip6f->ip6f_forwarded = 0; 608 } 609 610 /* 611 * Fill in the updated/new details. 612 */ 613 rtcache_copy(&ip6f->ip6f_ro, ro); 614 ip6f->ip6f_dst = ip6->ip6_dst; 615 ip6f->ip6f_src = ip6->ip6_src; 616 ip6f->ip6f_flow = ip6->ip6_flow; 617 PRT_SLOW_ARM(ip6f->ip6f_timer, IP6FLOW_TIMER); 618 619 /* 620 * Insert into the appropriate bucket of the flow table. 621 */ 622 hash = ip6flow_hash(ip6); 623 IP6FLOW_INSERT(hash, ip6f); 624 625 out: 626 mutex_exit(&ip6flow_lock); 627 KERNEL_UNLOCK_UNLESS_NET_MPSAFE(); 628 } 629 630 /* 631 * Invalidate/remove all flows - if new_size is positive we 632 * resize the hash table. 633 */ 634 int 635 ip6flow_invalidate_all(int new_size) 636 { 637 struct ip6flow *ip6f, *next_ip6f; 638 int error; 639 640 error = 0; 641 642 mutex_enter(&ip6flow_lock); 643 644 for (ip6f = TAILQ_FIRST(&ip6flowlist); ip6f != NULL; ip6f = next_ip6f) { 645 next_ip6f = TAILQ_NEXT(ip6f, ip6f_list); 646 ip6flow_free(ip6f); 647 } 648 649 if (new_size) 650 error = ip6flow_init_locked(new_size); 651 652 mutex_exit(&ip6flow_lock); 653 654 return error; 655 } 656 657 /* 658 * sysctl helper routine for net.inet.ip6.maxflows. Since 659 * we could reduce this value, call ip6flow_reap(); 660 */ 661 static int 662 sysctl_net_inet6_ip6_maxflows(SYSCTLFN_ARGS) 663 { 664 int error; 665 666 error = sysctl_lookup(SYSCTLFN_CALL(rnode)); 667 if (error || newp == NULL) 668 return (error); 669 670 SOFTNET_KERNEL_LOCK_UNLESS_NET_MPSAFE(); 671 672 ip6flow_reap(0); 673 674 SOFTNET_KERNEL_UNLOCK_UNLESS_NET_MPSAFE(); 675 676 return (0); 677 } 678 679 static int 680 sysctl_net_inet6_ip6_hashsize(SYSCTLFN_ARGS) 681 { 682 int error, tmp; 683 struct sysctlnode node; 684 685 node = *rnode; 686 tmp = ip6_hashsize; 687 node.sysctl_data = &tmp; 688 error = sysctl_lookup(SYSCTLFN_CALL(&node)); 689 if (error || newp == NULL) 690 return (error); 691 692 if ((tmp & (tmp - 1)) == 0 && tmp != 0) { 693 /* 694 * Can only fail due to malloc() 695 */ 696 SOFTNET_KERNEL_LOCK_UNLESS_NET_MPSAFE(); 697 error = ip6flow_invalidate_all(tmp); 698 SOFTNET_KERNEL_UNLOCK_UNLESS_NET_MPSAFE(); 699 } else { 700 /* 701 * EINVAL if not a power of 2 702 */ 703 error = EINVAL; 704 } 705 706 return error; 707 } 708 709 static void 710 ip6flow_sysctl_init(struct sysctllog **clog) 711 { 712 713 sysctl_createv(clog, 0, NULL, NULL, 714 CTLFLAG_PERMANENT, 715 CTLTYPE_NODE, "inet6", 716 SYSCTL_DESCR("PF_INET6 related settings"), 717 NULL, 0, NULL, 0, 718 CTL_NET, PF_INET6, CTL_EOL); 719 sysctl_createv(clog, 0, NULL, NULL, 720 CTLFLAG_PERMANENT, 721 CTLTYPE_NODE, "ip6", 722 SYSCTL_DESCR("IPv6 related settings"), 723 NULL, 0, NULL, 0, 724 CTL_NET, PF_INET6, IPPROTO_IPV6, CTL_EOL); 725 726 sysctl_createv(clog, 0, NULL, NULL, 727 CTLFLAG_PERMANENT|CTLFLAG_READWRITE, 728 CTLTYPE_INT, "maxflows", 729 SYSCTL_DESCR("Number of flows for fast forwarding (IPv6)"), 730 sysctl_net_inet6_ip6_maxflows, 0, &ip6_maxflows, 0, 731 CTL_NET, PF_INET6, IPPROTO_IPV6, 732 CTL_CREATE, CTL_EOL); 733 sysctl_createv(clog, 0, NULL, NULL, 734 CTLFLAG_PERMANENT|CTLFLAG_READWRITE, 735 CTLTYPE_INT, "hashsize", 736 SYSCTL_DESCR("Size of hash table for fast forwarding (IPv6)"), 737 sysctl_net_inet6_ip6_hashsize, 0, &ip6_hashsize, 0, 738 CTL_NET, PF_INET6, IPPROTO_IPV6, 739 CTL_CREATE, CTL_EOL); 740 } 741