xref: /netbsd-src/sys/netinet6/ip6_flow.c (revision 2e2322c9c07009df921d11b1268f8506affbb8ba)
1 /*	$NetBSD: ip6_flow.c,v 1.33 2016/12/08 05:16:34 ozaki-r Exp $	*/
2 
3 /*-
4  * Copyright (c) 2007 The NetBSD Foundation, Inc.
5  * All rights reserved.
6  *
7  * This code is derived from software contributed to The NetBSD Foundation
8  * by the 3am Software Foundry ("3am").  It was developed by Liam J. Foy
9  * <liamjfoy@netbsd.org> and Matt Thomas <matt@netbsd.org>.
10  *
11  * Redistribution and use in source and binary forms, with or without
12  * modification, are permitted provided that the following conditions
13  * are met:
14  * 1. Redistributions of source code must retain the above copyright
15  *    notice, this list of conditions and the following disclaimer.
16  * 2. Redistributions in binary form must reproduce the above copyright
17  *    notice, this list of conditions and the following disclaimer in the
18  *    documentation and/or other materials provided with the distribution.
19  *
20  * THIS SOFTWARE IS PROVIDED BY THE NETBSD FOUNDATION, INC. AND CONTRIBUTORS
21  * ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED
22  * TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
23  * PURPOSE ARE DISCLAIMED.  IN NO EVENT SHALL THE FOUNDATION OR CONTRIBUTORS
24  * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
25  * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
26  * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
27  * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
28  * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
29  * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
30  * POSSIBILITY OF SUCH DAMAGE.
31  *
32  * IPv6 version was developed by Liam J. Foy. Original source existed in IPv4
33  * format developed by Matt Thomas. Thanks to Joerg Sonnenberger, Matt
34  * Thomas and Christos Zoulas.
35  *
36  * Thanks to Liverpool John Moores University, especially Dr. David Llewellyn-Jones
37  * for providing resources (to test) and Professor Madjid Merabti.
38  */
39 
40 #include <sys/cdefs.h>
41 __KERNEL_RCSID(0, "$NetBSD: ip6_flow.c,v 1.33 2016/12/08 05:16:34 ozaki-r Exp $");
42 
43 #ifdef _KERNEL_OPT
44 #include "opt_net_mpsafe.h"
45 #endif
46 
47 #include <sys/param.h>
48 #include <sys/systm.h>
49 #include <sys/malloc.h>
50 #include <sys/mbuf.h>
51 #include <sys/domain.h>
52 #include <sys/protosw.h>
53 #include <sys/socket.h>
54 #include <sys/socketvar.h>
55 #include <sys/time.h>
56 #include <sys/kernel.h>
57 #include <sys/pool.h>
58 #include <sys/sysctl.h>
59 #include <sys/workqueue.h>
60 #include <sys/atomic.h>
61 
62 #include <net/if.h>
63 #include <net/if_dl.h>
64 #include <net/route.h>
65 #include <net/pfil.h>
66 
67 #include <netinet/in.h>
68 #include <netinet6/in6_var.h>
69 #include <netinet/in_systm.h>
70 #include <netinet/ip6.h>
71 #include <netinet6/ip6_var.h>
72 #include <netinet6/ip6_private.h>
73 
74 /*
75  * IPv6 Fast Forward caches/hashes flows from one source to destination.
76  *
77  * Upon a successful forward IPv6FF caches and hashes details such as the
78  * route, source and destination. Once another packet is received matching
79  * the source and destination the packet is forwarded straight onto if_output
80  * using the cached details.
81  *
82  * Example:
83  * ether/fddi_input -> ip6flow_fastforward -> if_output
84  */
85 
86 static struct pool ip6flow_pool;
87 
88 TAILQ_HEAD(ip6flowhead, ip6flow);
89 
90 /*
91  * We could use IPv4 defines (IPFLOW_HASHBITS) but we'll
92  * use our own (possibly for future expansion).
93  */
94 #define	IP6FLOW_TIMER		(5 * PR_SLOWHZ)
95 #define	IP6FLOW_DEFAULT_HASHSIZE	(1 << IP6FLOW_HASHBITS)
96 
97 /*
98  * ip6_flow.c internal lock.
99  * If we use softnet_lock, it would cause recursive lock.
100  *
101  * This is a tentative workaround.
102  * We should make it scalable somehow in the future.
103  */
104 static kmutex_t ip6flow_lock;
105 static struct ip6flowhead *ip6flowtable = NULL;
106 static struct ip6flowhead ip6flowlist;
107 static int ip6flow_inuse;
108 
109 static void ip6flow_slowtimo_work(struct work *, void *);
110 static struct workqueue	*ip6flow_slowtimo_wq;
111 static struct work	ip6flow_slowtimo_wk;
112 
113 static int sysctl_net_inet6_ip6_hashsize(SYSCTLFN_PROTO);
114 static int sysctl_net_inet6_ip6_maxflows(SYSCTLFN_PROTO);
115 static void ip6flow_sysctl_init(struct sysctllog **);
116 
117 /*
118  * Insert an ip6flow into the list.
119  */
120 #define	IP6FLOW_INSERT(hashidx, ip6f) \
121 do { \
122 	(ip6f)->ip6f_hashidx = (hashidx); \
123 	TAILQ_INSERT_HEAD(&ip6flowtable[(hashidx)], (ip6f), ip6f_hash); \
124 	TAILQ_INSERT_HEAD(&ip6flowlist, (ip6f), ip6f_list); \
125 } while (/*CONSTCOND*/ 0)
126 
127 /*
128  * Remove an ip6flow from the list.
129  */
130 #define	IP6FLOW_REMOVE(hashidx, ip6f) \
131 do { \
132 	TAILQ_REMOVE(&ip6flowtable[(hashidx)], (ip6f), ip6f_hash); \
133 	TAILQ_REMOVE(&ip6flowlist, (ip6f), ip6f_list); \
134 } while (/*CONSTCOND*/ 0)
135 
136 #ifndef IP6FLOW_DEFAULT
137 #define	IP6FLOW_DEFAULT		256
138 #endif
139 
140 int ip6_maxflows = IP6FLOW_DEFAULT;
141 int ip6_hashsize = IP6FLOW_DEFAULT_HASHSIZE;
142 
143 /*
144  * Calculate hash table position.
145  */
146 static size_t
147 ip6flow_hash(const struct ip6_hdr *ip6)
148 {
149 	size_t hash;
150 	uint32_t dst_sum, src_sum;
151 	size_t idx;
152 
153 	src_sum = ip6->ip6_src.s6_addr32[0] + ip6->ip6_src.s6_addr32[1]
154 	    + ip6->ip6_src.s6_addr32[2] + ip6->ip6_src.s6_addr32[3];
155 	dst_sum = ip6->ip6_dst.s6_addr32[0] + ip6->ip6_dst.s6_addr32[1]
156 	    + ip6->ip6_dst.s6_addr32[2] + ip6->ip6_dst.s6_addr32[3];
157 
158 	hash = ip6->ip6_flow;
159 
160 	for (idx = 0; idx < 32; idx += IP6FLOW_HASHBITS)
161 		hash += (dst_sum >> (32 - idx)) + (src_sum >> idx);
162 
163 	return hash & (ip6_hashsize-1);
164 }
165 
166 /*
167  * Check to see if a flow already exists - if so return it.
168  */
169 static struct ip6flow *
170 ip6flow_lookup(const struct ip6_hdr *ip6)
171 {
172 	size_t hash;
173 	struct ip6flow *ip6f;
174 
175 	KASSERT(mutex_owned(&ip6flow_lock));
176 
177 	hash = ip6flow_hash(ip6);
178 
179 	TAILQ_FOREACH(ip6f, &ip6flowtable[hash], ip6f_hash) {
180 		if (IN6_ARE_ADDR_EQUAL(&ip6->ip6_dst, &ip6f->ip6f_dst)
181 		    && IN6_ARE_ADDR_EQUAL(&ip6->ip6_src, &ip6f->ip6f_src)
182 		    && ip6f->ip6f_flow == ip6->ip6_flow) {
183 		    	/* A cached flow has been found. */
184 			return ip6f;
185 		}
186 	}
187 
188 	return NULL;
189 }
190 
191 void
192 ip6flow_poolinit(void)
193 {
194 
195 	pool_init(&ip6flow_pool, sizeof(struct ip6flow), 0, 0, 0, "ip6flowpl",
196 			NULL, IPL_NET);
197 }
198 
199 /*
200  * Allocate memory and initialise lists. This function is called
201  * from ip6_init and called there after to resize the hash table.
202  * If a newly sized table cannot be malloc'ed we just continue
203  * to use the old one.
204  */
205 static int
206 ip6flow_init_locked(int table_size)
207 {
208 	struct ip6flowhead *new_table;
209 	size_t i;
210 
211 	KASSERT(mutex_owned(&ip6flow_lock));
212 
213 	new_table = (struct ip6flowhead *)malloc(sizeof(struct ip6flowhead) *
214 	    table_size, M_RTABLE, M_NOWAIT);
215 
216 	if (new_table == NULL)
217 		return 1;
218 
219 	if (ip6flowtable != NULL)
220 		free(ip6flowtable, M_RTABLE);
221 
222 	ip6flowtable = new_table;
223 	ip6_hashsize = table_size;
224 
225 	TAILQ_INIT(&ip6flowlist);
226 	for (i = 0; i < ip6_hashsize; i++)
227 		TAILQ_INIT(&ip6flowtable[i]);
228 
229 	return 0;
230 }
231 
232 int
233 ip6flow_init(int table_size)
234 {
235 	int ret, error;
236 
237 	error = workqueue_create(&ip6flow_slowtimo_wq, "ip6flow_slowtimo",
238 	    ip6flow_slowtimo_work, NULL, PRI_SOFTNET, IPL_SOFTNET, WQ_MPSAFE);
239 	if (error != 0)
240 		panic("%s: workqueue_create failed (%d)\n", __func__, error);
241 
242 	mutex_init(&ip6flow_lock, MUTEX_DEFAULT, IPL_NONE);
243 
244 	mutex_enter(&ip6flow_lock);
245 	ret = ip6flow_init_locked(table_size);
246 	mutex_exit(&ip6flow_lock);
247 	ip6flow_sysctl_init(NULL);
248 
249 	return ret;
250 }
251 
252 /*
253  * IPv6 Fast Forward routine. Attempt to forward the packet -
254  * if any problems are found return to the main IPv6 input
255  * routine to deal with.
256  */
257 int
258 ip6flow_fastforward(struct mbuf **mp)
259 {
260 	struct ip6flow *ip6f;
261 	struct ip6_hdr *ip6;
262 	struct rtentry *rt = NULL;
263 	struct mbuf *m;
264 	const struct sockaddr *dst;
265 	int error;
266 	int ret = 0;
267 
268 	mutex_enter(&ip6flow_lock);
269 
270 	/*
271 	 * Are we forwarding packets and have flows?
272 	 */
273 	if (!ip6_forwarding || ip6flow_inuse == 0)
274 		goto out;
275 
276 	m = *mp;
277 	/*
278 	 * At least size of IPv6 Header?
279 	 */
280 	if (m->m_len < sizeof(struct ip6_hdr))
281 		goto out;
282 	/*
283 	 * Was packet received as a link-level multicast or broadcast?
284 	 * If so, don't try to fast forward.
285 	 */
286 	if ((m->m_flags & (M_BCAST|M_MCAST)) != 0)
287 		goto out;
288 
289 	if (IP6_HDR_ALIGNED_P(mtod(m, const void *)) == 0) {
290 		if ((m = m_copyup(m, sizeof(struct ip6_hdr),
291 				(max_linkhdr + 3) & ~3)) == NULL) {
292 			goto out;
293 		}
294 		*mp = m;
295 	} else if (__predict_false(m->m_len < sizeof(struct ip6_hdr))) {
296 		if ((m = m_pullup(m, sizeof(struct ip6_hdr))) == NULL) {
297 			goto out;
298 		}
299 		*mp = m;
300 	}
301 
302 	ip6 = mtod(m, struct ip6_hdr *);
303 
304 	if ((ip6->ip6_vfc & IPV6_VERSION_MASK) != IPV6_VERSION) {
305 		/* Bad version. */
306 		goto out;
307 	}
308 
309 	/*
310 	 * If we have a hop-by-hop extension we must process it.
311 	 * We just leave this up to ip6_input to deal with.
312 	 */
313 	if (ip6->ip6_nxt == IPPROTO_HOPOPTS)
314 		goto out;
315 
316 	/*
317 	 * Attempt to find a flow.
318 	 */
319 	if ((ip6f = ip6flow_lookup(ip6)) == NULL) {
320 		/* No flow found. */
321 		goto out;
322 	}
323 
324 	/*
325 	 * Route and interface still up?
326 	 */
327 	if ((rt = rtcache_validate(&ip6f->ip6f_ro)) == NULL ||
328 	    (rt->rt_ifp->if_flags & IFF_UP) == 0 ||
329 	    (rt->rt_flags & RTF_BLACKHOLE) != 0)
330 		goto out_unref;
331 
332 	/*
333 	 * Packet size greater than MTU?
334 	 */
335 	if (m->m_pkthdr.len > rt->rt_ifp->if_mtu) {
336 		/* Return to main IPv6 input function. */
337 		goto out_unref;
338 	}
339 
340 	/*
341 	 * Clear any in-bound checksum flags for this packet.
342 	 */
343 	m->m_pkthdr.csum_flags = 0;
344 
345 	if (ip6->ip6_hlim <= IPV6_HLIMDEC)
346 		goto out_unref;
347 
348 	/* Decrement hop limit (same as TTL) */
349 	ip6->ip6_hlim -= IPV6_HLIMDEC;
350 
351 	if (rt->rt_flags & RTF_GATEWAY)
352 		dst = rt->rt_gateway;
353 	else
354 		dst = rtcache_getdst(&ip6f->ip6f_ro);
355 
356 	PRT_SLOW_ARM(ip6f->ip6f_timer, IP6FLOW_TIMER);
357 
358 	ip6f->ip6f_uses++;
359 
360 #if 0
361 	/*
362 	 * We use FIFO cache replacement instead of LRU the same ip_flow.c.
363 	 */
364 	/* move to head (LRU) for ip6flowlist. ip6flowtable does not care LRU. */
365 	TAILQ_REMOVE(&ip6flowlist, ip6f, ip6f_list);
366 	TAILQ_INSERT_HEAD(&ip6flowlist, ip6f, ip6f_list);
367 #endif
368 
369 	/* Send on its way - straight to the interface output routine. */
370 	if ((error = if_output_lock(rt->rt_ifp, rt->rt_ifp, m, dst, rt)) != 0) {
371 		ip6f->ip6f_dropped++;
372 	} else {
373 		ip6f->ip6f_forwarded++;
374 	}
375 	ret = 1;
376 out_unref:
377 	rtcache_unref(rt, &ip6f->ip6f_ro);
378 out:
379 	mutex_exit(&ip6flow_lock);
380 	return ret;
381 }
382 
383 /*
384  * Add the IPv6 flow statistics to the main IPv6 statistics.
385  */
386 static void
387 ip6flow_addstats_rt(struct rtentry *rt, struct ip6flow *ip6f)
388 {
389 	uint64_t *ip6s;
390 
391 	if (rt != NULL)
392 		rt->rt_use += ip6f->ip6f_uses;
393 	ip6s = IP6_STAT_GETREF();
394 	ip6s[IP6_STAT_FASTFORWARDFLOWS] = ip6flow_inuse;
395 	ip6s[IP6_STAT_CANTFORWARD] += ip6f->ip6f_dropped;
396 	ip6s[IP6_STAT_ODROPPED] += ip6f->ip6f_dropped;
397 	ip6s[IP6_STAT_TOTAL] += ip6f->ip6f_uses;
398 	ip6s[IP6_STAT_FORWARD] += ip6f->ip6f_forwarded;
399 	ip6s[IP6_STAT_FASTFORWARD] += ip6f->ip6f_forwarded;
400 	IP6_STAT_PUTREF();
401 }
402 
403 static void
404 ip6flow_addstats(struct ip6flow *ip6f)
405 {
406 	struct rtentry *rt;
407 
408 	rt = rtcache_validate(&ip6f->ip6f_ro);
409 	ip6flow_addstats_rt(rt, ip6f);
410 	rtcache_unref(rt, &ip6f->ip6f_ro);
411 }
412 
413 /*
414  * Add statistics and free the flow.
415  */
416 static void
417 ip6flow_free(struct ip6flow *ip6f)
418 {
419 
420 	KASSERT(mutex_owned(&ip6flow_lock));
421 
422 	/*
423 	 * Remove the flow from the hash table (at elevated IPL).
424 	 * Once it's off the list, we can deal with it at normal
425 	 * network IPL.
426 	 */
427 	IP6FLOW_REMOVE(ip6f->ip6f_hashidx, ip6f);
428 
429 	ip6flow_inuse--;
430 	ip6flow_addstats(ip6f);
431 	rtcache_free(&ip6f->ip6f_ro);
432 	pool_put(&ip6flow_pool, ip6f);
433 }
434 
435 static struct ip6flow *
436 ip6flow_reap_locked(int just_one)
437 {
438 	struct ip6flow *ip6f;
439 
440 	KASSERT(mutex_owned(&ip6flow_lock));
441 
442 	/*
443 	 * This case must remove one ip6flow. Furthermore, this case is used in
444 	 * fast path(packet processing path). So, simply remove TAILQ_LAST one.
445 	 */
446 	if (just_one) {
447 		ip6f = TAILQ_LAST(&ip6flowlist, ip6flowhead);
448 		KASSERT(ip6f != NULL);
449 
450 		IP6FLOW_REMOVE(ip6f->ip6f_hashidx, ip6f);
451 
452 		ip6flow_addstats(ip6f);
453 		rtcache_free(&ip6f->ip6f_ro);
454 		return ip6f;
455 	}
456 
457 	/*
458 	 * This case is used in slow path(sysctl).
459 	 * At first, remove invalid rtcache ip6flow, and then remove TAILQ_LAST
460 	 * ip6flow if it is ensured least recently used by comparing last_uses.
461 	 */
462 	while (ip6flow_inuse > ip6_maxflows) {
463 		struct ip6flow *maybe_ip6f = TAILQ_LAST(&ip6flowlist, ip6flowhead);
464 
465 		TAILQ_FOREACH(ip6f, &ip6flowlist, ip6f_list) {
466 			struct rtentry *rt;
467 			/*
468 			 * If this no longer points to a valid route -
469 			 * reclaim it.
470 			 */
471 			if ((rt = rtcache_validate(&ip6f->ip6f_ro)) == NULL)
472 				goto done;
473 			rtcache_unref(rt, &ip6f->ip6f_ro);
474 			/*
475 			 * choose the one that's been least recently
476 			 * used or has had the least uses in the
477 			 * last 1.5 intervals.
478 			 */
479 			if (ip6f->ip6f_timer < maybe_ip6f->ip6f_timer
480 			    || ((ip6f->ip6f_timer == maybe_ip6f->ip6f_timer)
481 				&& (ip6f->ip6f_last_uses + ip6f->ip6f_uses
482 				    < maybe_ip6f->ip6f_last_uses + maybe_ip6f->ip6f_uses)))
483 				maybe_ip6f = ip6f;
484 		}
485 		ip6f = maybe_ip6f;
486 	    done:
487 		/*
488 		 * Remove the entry from the flow table
489 		 */
490 		IP6FLOW_REMOVE(ip6f->ip6f_hashidx, ip6f);
491 
492 		rtcache_free(&ip6f->ip6f_ro);
493 		ip6flow_inuse--;
494 		ip6flow_addstats(ip6f);
495 		pool_put(&ip6flow_pool, ip6f);
496 	}
497 	return NULL;
498 }
499 
500 /*
501  * Reap one or more flows - ip6flow_reap may remove
502  * multiple flows if net.inet6.ip6.maxflows is reduced.
503  */
504 struct ip6flow *
505 ip6flow_reap(int just_one)
506 {
507 	struct ip6flow *ip6f;
508 
509 	mutex_enter(&ip6flow_lock);
510 	ip6f = ip6flow_reap_locked(just_one);
511 	mutex_exit(&ip6flow_lock);
512 	return ip6f;
513 }
514 
515 static unsigned int ip6flow_work_enqueued = 0;
516 
517 void
518 ip6flow_slowtimo_work(struct work *wk, void *arg)
519 {
520 	struct ip6flow *ip6f, *next_ip6f;
521 
522 	/* We can allow enqueuing another work at this point */
523 	atomic_swap_uint(&ip6flow_work_enqueued, 0);
524 
525 #ifndef NET_MPSAFE
526 	mutex_enter(softnet_lock);
527 	KERNEL_LOCK(1, NULL);
528 #endif
529 	mutex_enter(&ip6flow_lock);
530 
531 	for (ip6f = TAILQ_FIRST(&ip6flowlist); ip6f != NULL; ip6f = next_ip6f) {
532 		struct rtentry *rt = NULL;
533 		next_ip6f = TAILQ_NEXT(ip6f, ip6f_list);
534 		if (PRT_SLOW_ISEXPIRED(ip6f->ip6f_timer) ||
535 		    (rt = rtcache_validate(&ip6f->ip6f_ro)) == NULL) {
536 			ip6flow_free(ip6f);
537 		} else {
538 			ip6f->ip6f_last_uses = ip6f->ip6f_uses;
539 			ip6flow_addstats_rt(rt, ip6f);
540 			ip6f->ip6f_uses = 0;
541 			ip6f->ip6f_dropped = 0;
542 			ip6f->ip6f_forwarded = 0;
543 		}
544 		rtcache_unref(rt, &ip6f->ip6f_ro);
545 	}
546 
547 	mutex_exit(&ip6flow_lock);
548 #ifndef NET_MPSAFE
549 	KERNEL_UNLOCK_ONE(NULL);
550 	mutex_exit(softnet_lock);
551 #endif
552 }
553 
554 void
555 ip6flow_slowtimo(void)
556 {
557 
558 	/* Avoid enqueuing another work when one is already enqueued */
559 	if (atomic_swap_uint(&ip6flow_work_enqueued, 1) == 1)
560 		return;
561 
562 	workqueue_enqueue(ip6flow_slowtimo_wq, &ip6flow_slowtimo_wk, NULL);
563 }
564 
565 /*
566  * We have successfully forwarded a packet using the normal
567  * IPv6 stack. Now create/update a flow.
568  */
569 void
570 ip6flow_create(struct route *ro, struct mbuf *m)
571 {
572 	const struct ip6_hdr *ip6;
573 	struct ip6flow *ip6f;
574 	size_t hash;
575 
576 	ip6 = mtod(m, const struct ip6_hdr *);
577 
578 #ifndef NET_MPSAFE
579 	KERNEL_LOCK(1, NULL);
580 #endif
581 	mutex_enter(&ip6flow_lock);
582 
583 	/*
584 	 * If IPv6 Fast Forward is disabled, don't create a flow.
585 	 * It can be disabled by setting net.inet6.ip6.maxflows to 0.
586 	 *
587 	 * Don't create a flow for ICMPv6 messages.
588 	 */
589 	if (ip6_maxflows == 0 || ip6->ip6_nxt == IPPROTO_IPV6_ICMP)
590 		goto out;
591 
592 	/*
593 	 * See if an existing flow exists.  If so:
594 	 *	- Remove the flow
595 	 *	- Add flow statistics
596 	 *	- Free the route
597 	 *	- Reset statistics
598 	 *
599 	 * If a flow doesn't exist allocate a new one if
600 	 * ip6_maxflows hasn't reached its limit. If it has
601 	 * been reached, reap some flows.
602 	 */
603 	ip6f = ip6flow_lookup(ip6);
604 	if (ip6f == NULL) {
605 		if (ip6flow_inuse >= ip6_maxflows) {
606 			ip6f = ip6flow_reap_locked(1);
607 		} else {
608 			ip6f = pool_get(&ip6flow_pool, PR_NOWAIT);
609 			if (ip6f == NULL)
610 				goto out;
611 			ip6flow_inuse++;
612 		}
613 		memset(ip6f, 0, sizeof(*ip6f));
614 	} else {
615 		IP6FLOW_REMOVE(ip6f->ip6f_hashidx, ip6f);
616 
617 		ip6flow_addstats(ip6f);
618 		rtcache_free(&ip6f->ip6f_ro);
619 		ip6f->ip6f_uses = 0;
620 		ip6f->ip6f_last_uses = 0;
621 		ip6f->ip6f_dropped = 0;
622 		ip6f->ip6f_forwarded = 0;
623 	}
624 
625 	/*
626 	 * Fill in the updated/new details.
627 	 */
628 	rtcache_copy(&ip6f->ip6f_ro, ro);
629 	ip6f->ip6f_dst = ip6->ip6_dst;
630 	ip6f->ip6f_src = ip6->ip6_src;
631 	ip6f->ip6f_flow = ip6->ip6_flow;
632 	PRT_SLOW_ARM(ip6f->ip6f_timer, IP6FLOW_TIMER);
633 
634 	/*
635 	 * Insert into the approriate bucket of the flow table.
636 	 */
637 	hash = ip6flow_hash(ip6);
638 	IP6FLOW_INSERT(hash, ip6f);
639 
640  out:
641 	mutex_exit(&ip6flow_lock);
642 #ifndef NET_MPSAFE
643 	KERNEL_UNLOCK_ONE(NULL);
644 #endif
645 }
646 
647 /*
648  * Invalidate/remove all flows - if new_size is positive we
649  * resize the hash table.
650  */
651 int
652 ip6flow_invalidate_all(int new_size)
653 {
654 	struct ip6flow *ip6f, *next_ip6f;
655 	int error;
656 
657 	error = 0;
658 
659 	mutex_enter(&ip6flow_lock);
660 
661 	for (ip6f = TAILQ_FIRST(&ip6flowlist); ip6f != NULL; ip6f = next_ip6f) {
662 		next_ip6f = TAILQ_NEXT(ip6f, ip6f_list);
663 		ip6flow_free(ip6f);
664 	}
665 
666 	if (new_size)
667 		error = ip6flow_init_locked(new_size);
668 
669 	mutex_exit(&ip6flow_lock);
670 
671 	return error;
672 }
673 
674 /*
675  * sysctl helper routine for net.inet.ip6.maxflows. Since
676  * we could reduce this value, call ip6flow_reap();
677  */
678 static int
679 sysctl_net_inet6_ip6_maxflows(SYSCTLFN_ARGS)
680 {
681 	int error;
682 
683 	error = sysctl_lookup(SYSCTLFN_CALL(rnode));
684 	if (error || newp == NULL)
685 		return (error);
686 
687 #ifndef NET_MPSAFE
688 	mutex_enter(softnet_lock);
689 	KERNEL_LOCK(1, NULL);
690 #endif
691 
692 	ip6flow_reap(0);
693 
694 #ifndef NET_MPSAFE
695 	KERNEL_UNLOCK_ONE(NULL);
696 	mutex_exit(softnet_lock);
697 #endif
698 
699 	return (0);
700 }
701 
702 static int
703 sysctl_net_inet6_ip6_hashsize(SYSCTLFN_ARGS)
704 {
705 	int error, tmp;
706 	struct sysctlnode node;
707 
708 	node = *rnode;
709 	tmp = ip6_hashsize;
710 	node.sysctl_data = &tmp;
711 	error = sysctl_lookup(SYSCTLFN_CALL(&node));
712 	if (error || newp == NULL)
713 		return (error);
714 
715 	if ((tmp & (tmp - 1)) == 0 && tmp != 0) {
716 		/*
717 		 * Can only fail due to malloc()
718 		 */
719 #ifndef NET_MPSAFE
720 		mutex_enter(softnet_lock);
721 		KERNEL_LOCK(1, NULL);
722 #endif
723 		error = ip6flow_invalidate_all(tmp);
724 #ifndef NET_MPSAFE
725 		KERNEL_UNLOCK_ONE(NULL);
726 		mutex_exit(softnet_lock);
727 #endif
728 	} else {
729 		/*
730 		 * EINVAL if not a power of 2
731 		 */
732 		error = EINVAL;
733 	}
734 
735 	return error;
736 }
737 
738 static void
739 ip6flow_sysctl_init(struct sysctllog **clog)
740 {
741 
742 	sysctl_createv(clog, 0, NULL, NULL,
743 		       CTLFLAG_PERMANENT,
744 		       CTLTYPE_NODE, "inet6",
745 		       SYSCTL_DESCR("PF_INET6 related settings"),
746 		       NULL, 0, NULL, 0,
747 		       CTL_NET, PF_INET6, CTL_EOL);
748 	sysctl_createv(clog, 0, NULL, NULL,
749 		       CTLFLAG_PERMANENT,
750 		       CTLTYPE_NODE, "ip6",
751 		       SYSCTL_DESCR("IPv6 related settings"),
752 		       NULL, 0, NULL, 0,
753 		       CTL_NET, PF_INET6, IPPROTO_IPV6, CTL_EOL);
754 
755 	sysctl_createv(clog, 0, NULL, NULL,
756 			CTLFLAG_PERMANENT|CTLFLAG_READWRITE,
757 			CTLTYPE_INT, "maxflows",
758 			SYSCTL_DESCR("Number of flows for fast forwarding (IPv6)"),
759 			sysctl_net_inet6_ip6_maxflows, 0, &ip6_maxflows, 0,
760 			CTL_NET, PF_INET6, IPPROTO_IPV6,
761 			CTL_CREATE, CTL_EOL);
762 	sysctl_createv(clog, 0, NULL, NULL,
763 			CTLFLAG_PERMANENT|CTLFLAG_READWRITE,
764 			CTLTYPE_INT, "hashsize",
765 			SYSCTL_DESCR("Size of hash table for fast forwarding (IPv6)"),
766 			sysctl_net_inet6_ip6_hashsize, 0, &ip6_hashsize, 0,
767 			CTL_NET, PF_INET6, IPPROTO_IPV6,
768 			CTL_CREATE, CTL_EOL);
769 }
770