xref: /netbsd-src/sys/netinet/ip_flow.c (revision c34236556bea94afcaca1782d7d228301edc3ea0)
1 /*	$NetBSD: ip_flow.c,v 1.78 2016/12/08 05:16:33 ozaki-r Exp $	*/
2 
3 /*-
4  * Copyright (c) 1998 The NetBSD Foundation, Inc.
5  * All rights reserved.
6  *
7  * This code is derived from software contributed to The NetBSD Foundation
8  * by the 3am Software Foundry ("3am").  It was developed by Matt Thomas.
9  *
10  * Redistribution and use in source and binary forms, with or without
11  * modification, are permitted provided that the following conditions
12  * are met:
13  * 1. Redistributions of source code must retain the above copyright
14  *    notice, this list of conditions and the following disclaimer.
15  * 2. Redistributions in binary form must reproduce the above copyright
16  *    notice, this list of conditions and the following disclaimer in the
17  *    documentation and/or other materials provided with the distribution.
18  *
19  * THIS SOFTWARE IS PROVIDED BY THE NETBSD FOUNDATION, INC. AND CONTRIBUTORS
20  * ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED
21  * TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
22  * PURPOSE ARE DISCLAIMED.  IN NO EVENT SHALL THE FOUNDATION OR CONTRIBUTORS
23  * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
24  * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
25  * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
26  * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
27  * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
28  * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
29  * POSSIBILITY OF SUCH DAMAGE.
30  */
31 
32 #include <sys/cdefs.h>
33 __KERNEL_RCSID(0, "$NetBSD: ip_flow.c,v 1.78 2016/12/08 05:16:33 ozaki-r Exp $");
34 
35 #ifdef _KERNEL_OPT
36 #include "opt_net_mpsafe.h"
37 #endif
38 
39 #include <sys/param.h>
40 #include <sys/systm.h>
41 #include <sys/malloc.h>
42 #include <sys/mbuf.h>
43 #include <sys/domain.h>
44 #include <sys/protosw.h>
45 #include <sys/socket.h>
46 #include <sys/socketvar.h>
47 #include <sys/errno.h>
48 #include <sys/time.h>
49 #include <sys/kernel.h>
50 #include <sys/pool.h>
51 #include <sys/sysctl.h>
52 #include <sys/workqueue.h>
53 #include <sys/atomic.h>
54 
55 #include <net/if.h>
56 #include <net/if_dl.h>
57 #include <net/route.h>
58 #include <net/pfil.h>
59 
60 #include <netinet/in.h>
61 #include <netinet/in_systm.h>
62 #include <netinet/ip.h>
63 #include <netinet/in_pcb.h>
64 #include <netinet/in_var.h>
65 #include <netinet/ip_var.h>
66 #include <netinet/ip_private.h>
67 
68 /*
69  * Similar code is very well commented in netinet6/ip6_flow.c
70  */
71 
72 #define	IPFLOW_HASHBITS		6	/* should not be a multiple of 8 */
73 
74 static struct pool ipflow_pool;
75 
76 TAILQ_HEAD(ipflowhead, ipflow);
77 
78 #define	IPFLOW_TIMER		(5 * PR_SLOWHZ)
79 #define	IPFLOW_DEFAULT_HASHSIZE	(1 << IPFLOW_HASHBITS)
80 
81 /*
82  * ip_flow.c internal lock.
83  * If we use softnet_lock, it would cause recursive lock.
84  *
85  * This is a tentative workaround.
86  * We should make it scalable somehow in the future.
87  */
88 static kmutex_t ipflow_lock;
89 static struct ipflowhead *ipflowtable = NULL;
90 static struct ipflowhead ipflowlist;
91 static int ipflow_inuse;
92 
93 #define	IPFLOW_INSERT(hashidx, ipf) \
94 do { \
95 	(ipf)->ipf_hashidx = (hashidx); \
96 	TAILQ_INSERT_HEAD(&ipflowtable[(hashidx)], (ipf), ipf_hash); \
97 	TAILQ_INSERT_HEAD(&ipflowlist, (ipf), ipf_list); \
98 } while (/*CONSTCOND*/ 0)
99 
100 #define	IPFLOW_REMOVE(hashidx, ipf) \
101 do { \
102 	TAILQ_REMOVE(&ipflowtable[(hashidx)], (ipf), ipf_hash); \
103 	TAILQ_REMOVE(&ipflowlist, (ipf), ipf_list); \
104 } while (/*CONSTCOND*/ 0)
105 
106 #ifndef IPFLOW_MAX
107 #define	IPFLOW_MAX		256
108 #endif
109 static int ip_maxflows = IPFLOW_MAX;
110 static int ip_hashsize = IPFLOW_DEFAULT_HASHSIZE;
111 
112 static struct ipflow *ipflow_reap(bool);
113 static void ipflow_sysctl_init(struct sysctllog **);
114 
115 static void ipflow_slowtimo_work(struct work *, void *);
116 static struct workqueue	*ipflow_slowtimo_wq;
117 static struct work	ipflow_slowtimo_wk;
118 
119 static size_t
120 ipflow_hash(const struct ip *ip)
121 {
122 	size_t hash = ip->ip_tos;
123 	size_t idx;
124 
125 	for (idx = 0; idx < 32; idx += IPFLOW_HASHBITS) {
126 		hash += (ip->ip_dst.s_addr >> (32 - idx)) +
127 		    (ip->ip_src.s_addr >> idx);
128 	}
129 
130 	return hash & (ip_hashsize-1);
131 }
132 
133 static struct ipflow *
134 ipflow_lookup(const struct ip *ip)
135 {
136 	size_t hash;
137 	struct ipflow *ipf;
138 
139 	KASSERT(mutex_owned(&ipflow_lock));
140 
141 	hash = ipflow_hash(ip);
142 
143 	TAILQ_FOREACH(ipf, &ipflowtable[hash], ipf_hash) {
144 		if (ip->ip_dst.s_addr == ipf->ipf_dst.s_addr
145 		    && ip->ip_src.s_addr == ipf->ipf_src.s_addr
146 		    && ip->ip_tos == ipf->ipf_tos)
147 			break;
148 	}
149 	return ipf;
150 }
151 
152 void
153 ipflow_poolinit(void)
154 {
155 
156 	pool_init(&ipflow_pool, sizeof(struct ipflow), 0, 0, 0, "ipflowpl",
157 	    NULL, IPL_NET);
158 }
159 
160 static int
161 ipflow_reinit(int table_size)
162 {
163 	struct ipflowhead *new_table;
164 	size_t i;
165 
166 	KASSERT(mutex_owned(&ipflow_lock));
167 
168 	new_table = (struct ipflowhead *)malloc(sizeof(struct ipflowhead) *
169 	    table_size, M_RTABLE, M_NOWAIT);
170 
171 	if (new_table == NULL)
172 		return 1;
173 
174 	if (ipflowtable != NULL)
175 		free(ipflowtable, M_RTABLE);
176 
177 	ipflowtable = new_table;
178 	ip_hashsize = table_size;
179 
180 	TAILQ_INIT(&ipflowlist);
181 	for (i = 0; i < ip_hashsize; i++)
182 		TAILQ_INIT(&ipflowtable[i]);
183 
184 	return 0;
185 }
186 
187 void
188 ipflow_init(void)
189 {
190 	int error;
191 
192 	error = workqueue_create(&ipflow_slowtimo_wq, "ipflow_slowtimo",
193 	    ipflow_slowtimo_work, NULL, PRI_SOFTNET, IPL_SOFTNET, WQ_MPSAFE);
194 	if (error != 0)
195 		panic("%s: workqueue_create failed (%d)\n", __func__, error);
196 
197 	mutex_init(&ipflow_lock, MUTEX_DEFAULT, IPL_NONE);
198 
199 	mutex_enter(&ipflow_lock);
200 	(void)ipflow_reinit(ip_hashsize);
201 	mutex_exit(&ipflow_lock);
202 	ipflow_sysctl_init(NULL);
203 }
204 
205 int
206 ipflow_fastforward(struct mbuf *m)
207 {
208 	struct ip *ip;
209 	struct ip ip_store;
210 	struct ipflow *ipf;
211 	struct rtentry *rt = NULL;
212 	const struct sockaddr *dst;
213 	int error;
214 	int iplen;
215 	struct ifnet *ifp;
216 	int s;
217 	int ret = 0;
218 
219 	mutex_enter(&ipflow_lock);
220 	/*
221 	 * Are we forwarding packets?  Big enough for an IP packet?
222 	 */
223 	if (!ipforwarding || ipflow_inuse == 0 || m->m_len < sizeof(struct ip))
224 		goto out;
225 
226 	/*
227 	 * Was packet received as a link-level multicast or broadcast?
228 	 * If so, don't try to fast forward..
229 	 */
230 	if ((m->m_flags & (M_BCAST|M_MCAST)) != 0)
231 		goto out;
232 
233 	/*
234 	 * IP header with no option and valid version and length
235 	 */
236 	if (IP_HDR_ALIGNED_P(mtod(m, const void *)))
237 		ip = mtod(m, struct ip *);
238 	else {
239 		memcpy(&ip_store, mtod(m, const void *), sizeof(ip_store));
240 		ip = &ip_store;
241 	}
242 	iplen = ntohs(ip->ip_len);
243 	if (ip->ip_v != IPVERSION || ip->ip_hl != (sizeof(struct ip) >> 2) ||
244 	    iplen < sizeof(struct ip) || iplen > m->m_pkthdr.len)
245 		goto out;
246 	/*
247 	 * Find a flow.
248 	 */
249 	if ((ipf = ipflow_lookup(ip)) == NULL)
250 		goto out;
251 
252 	ifp = m_get_rcvif(m, &s);
253 	/*
254 	 * Verify the IP header checksum.
255 	 */
256 	switch (m->m_pkthdr.csum_flags &
257 		((ifp->if_csum_flags_rx & M_CSUM_IPv4) |
258 		 M_CSUM_IPv4_BAD)) {
259 	case M_CSUM_IPv4|M_CSUM_IPv4_BAD:
260 		m_put_rcvif(ifp, &s);
261 		goto out_unref;
262 
263 	case M_CSUM_IPv4:
264 		/* Checksum was okay. */
265 		break;
266 
267 	default:
268 		/* Must compute it ourselves. */
269 		if (in_cksum(m, sizeof(struct ip)) != 0) {
270 			m_put_rcvif(ifp, &s);
271 			goto out_unref;
272 		}
273 		break;
274 	}
275 	m_put_rcvif(ifp, &s);
276 
277 	/*
278 	 * Route and interface still up?
279 	 */
280 	rt = rtcache_validate(&ipf->ipf_ro);
281 	if (rt == NULL || (rt->rt_ifp->if_flags & IFF_UP) == 0 ||
282 	    (rt->rt_flags & (RTF_BLACKHOLE | RTF_BROADCAST)) != 0)
283 		goto out_unref;
284 
285 	/*
286 	 * Packet size OK?  TTL?
287 	 */
288 	if (m->m_pkthdr.len > rt->rt_ifp->if_mtu || ip->ip_ttl <= IPTTLDEC)
289 		goto out_unref;
290 
291 	/*
292 	 * Clear any in-bound checksum flags for this packet.
293 	 */
294 	m->m_pkthdr.csum_flags = 0;
295 
296 	/*
297 	 * Everything checks out and so we can forward this packet.
298 	 * Modify the TTL and incrementally change the checksum.
299 	 *
300 	 * This method of adding the checksum works on either endian CPU.
301 	 * If htons() is inlined, all the arithmetic is folded; otherwise
302 	 * the htons()s are combined by CSE due to the const attribute.
303 	 *
304 	 * Don't bother using HW checksumming here -- the incremental
305 	 * update is pretty fast.
306 	 */
307 	ip->ip_ttl -= IPTTLDEC;
308 	if (ip->ip_sum >= (u_int16_t) ~htons(IPTTLDEC << 8))
309 		ip->ip_sum -= ~htons(IPTTLDEC << 8);
310 	else
311 		ip->ip_sum += htons(IPTTLDEC << 8);
312 
313 	/*
314 	 * Done modifying the header; copy it back, if necessary.
315 	 *
316 	 * XXX Use m_copyback_cow(9) here? --dyoung
317 	 */
318 	if (IP_HDR_ALIGNED_P(mtod(m, void *)) == 0)
319 		memcpy(mtod(m, void *), &ip_store, sizeof(ip_store));
320 
321 	/*
322 	 * Trim the packet in case it's too long..
323 	 */
324 	if (m->m_pkthdr.len > iplen) {
325 		if (m->m_len == m->m_pkthdr.len) {
326 			m->m_len = iplen;
327 			m->m_pkthdr.len = iplen;
328 		} else
329 			m_adj(m, iplen - m->m_pkthdr.len);
330 	}
331 
332 	/*
333 	 * Send the packet on its way.  All we can get back is ENOBUFS
334 	 */
335 	ipf->ipf_uses++;
336 
337 #if 0
338 	/*
339 	 * Sorting list is too heavy for fast path(packet processing path).
340 	 * It degrades about 10% performance. So, we does not sort ipflowtable,
341 	 * and then we use FIFO cache replacement instead fo LRU.
342 	 */
343 	/* move to head (LRU) for ipflowlist. ipflowtable ooes not care LRU. */
344 	TAILQ_REMOVE(&ipflowlist, ipf, ipf_list);
345 	TAILQ_INSERT_HEAD(&ipflowlist, ipf, ipf_list);
346 #endif
347 
348 	PRT_SLOW_ARM(ipf->ipf_timer, IPFLOW_TIMER);
349 
350 	if (rt->rt_flags & RTF_GATEWAY)
351 		dst = rt->rt_gateway;
352 	else
353 		dst = rtcache_getdst(&ipf->ipf_ro);
354 
355 	if ((error = if_output_lock(rt->rt_ifp, rt->rt_ifp, m, dst, rt)) != 0) {
356 		if (error == ENOBUFS)
357 			ipf->ipf_dropped++;
358 		else
359 			ipf->ipf_errors++;
360 	}
361 	ret = 1;
362 out_unref:
363 	rtcache_unref(rt, &ipf->ipf_ro);
364 out:
365 	mutex_exit(&ipflow_lock);
366 	return ret;
367 }
368 
369 static void
370 ipflow_addstats(struct ipflow *ipf)
371 {
372 	struct rtentry *rt;
373 	uint64_t *ips;
374 
375 	rt = rtcache_validate(&ipf->ipf_ro);
376 	if (rt != NULL) {
377 		rt->rt_use += ipf->ipf_uses;
378 		rtcache_unref(rt, &ipf->ipf_ro);
379 	}
380 
381 	ips = IP_STAT_GETREF();
382 	ips[IP_STAT_CANTFORWARD] += ipf->ipf_errors + ipf->ipf_dropped;
383 	ips[IP_STAT_TOTAL] += ipf->ipf_uses;
384 	ips[IP_STAT_FORWARD] += ipf->ipf_uses;
385 	ips[IP_STAT_FASTFORWARD] += ipf->ipf_uses;
386 	IP_STAT_PUTREF();
387 }
388 
389 static void
390 ipflow_free(struct ipflow *ipf)
391 {
392 
393 	KASSERT(mutex_owned(&ipflow_lock));
394 
395 	/*
396 	 * Remove the flow from the hash table (at elevated IPL).
397 	 * Once it's off the list, we can deal with it at normal
398 	 * network IPL.
399 	 */
400 	IPFLOW_REMOVE(ipf->ipf_hashidx, ipf);
401 
402 	ipflow_addstats(ipf);
403 	rtcache_free(&ipf->ipf_ro);
404 	ipflow_inuse--;
405 	pool_put(&ipflow_pool, ipf);
406 }
407 
408 static struct ipflow *
409 ipflow_reap(bool just_one)
410 {
411 	struct ipflow *ipf;
412 
413 	KASSERT(mutex_owned(&ipflow_lock));
414 
415 	/*
416 	 * This case must remove one ipflow. Furthermore, this case is used in
417 	 * fast path(packet processing path). So, simply remove TAILQ_LAST one.
418 	 */
419 	if (just_one) {
420 		ipf = TAILQ_LAST(&ipflowlist, ipflowhead);
421 		KASSERT(ipf != NULL);
422 
423 		IPFLOW_REMOVE(ipf->ipf_hashidx, ipf);
424 
425 		ipflow_addstats(ipf);
426 		rtcache_free(&ipf->ipf_ro);
427 		return ipf;
428 	}
429 
430 	/*
431 	 * This case is used in slow path(sysctl).
432 	 * At first, remove invalid rtcache ipflow, and then remove TAILQ_LAST
433 	 * ipflow if it is ensured least recently used by comparing last_uses.
434 	 */
435 	while (ipflow_inuse > ip_maxflows) {
436 		struct ipflow *maybe_ipf = TAILQ_LAST(&ipflowlist, ipflowhead);
437 
438 		TAILQ_FOREACH(ipf, &ipflowlist, ipf_list) {
439 			struct rtentry *rt;
440 			/*
441 			 * If this no longer points to a valid route
442 			 * reclaim it.
443 			 */
444 			rt = rtcache_validate(&ipf->ipf_ro);
445 			if (rt == NULL)
446 				goto done;
447 			rtcache_unref(rt, &ipf->ipf_ro);
448 			/*
449 			 * choose the one that's been least recently
450 			 * used or has had the least uses in the
451 			 * last 1.5 intervals.
452 			 */
453 			if (ipf->ipf_timer < maybe_ipf->ipf_timer
454 			    || ((ipf->ipf_timer == maybe_ipf->ipf_timer)
455 				&& (ipf->ipf_last_uses + ipf->ipf_uses
456 				    < maybe_ipf->ipf_last_uses + maybe_ipf->ipf_uses)))
457 				maybe_ipf = ipf;
458 		}
459 		ipf = maybe_ipf;
460 	    done:
461 		/*
462 		 * Remove the entry from the flow table.
463 		 */
464 		IPFLOW_REMOVE(ipf->ipf_hashidx, ipf);
465 
466 		ipflow_addstats(ipf);
467 		rtcache_free(&ipf->ipf_ro);
468 		pool_put(&ipflow_pool, ipf);
469 		ipflow_inuse--;
470 	}
471 	return NULL;
472 }
473 
474 static unsigned int ipflow_work_enqueued = 0;
475 
476 static void
477 ipflow_slowtimo_work(struct work *wk, void *arg)
478 {
479 	struct rtentry *rt;
480 	struct ipflow *ipf, *next_ipf;
481 	uint64_t *ips;
482 
483 	/* We can allow enqueuing another work at this point */
484 	atomic_swap_uint(&ipflow_work_enqueued, 0);
485 
486 #ifndef NET_MPSAFE
487 	mutex_enter(softnet_lock);
488 	KERNEL_LOCK(1, NULL);
489 #endif
490 	mutex_enter(&ipflow_lock);
491 	for (ipf = TAILQ_FIRST(&ipflowlist); ipf != NULL; ipf = next_ipf) {
492 		next_ipf = TAILQ_NEXT(ipf, ipf_list);
493 		if (PRT_SLOW_ISEXPIRED(ipf->ipf_timer) ||
494 		    (rt = rtcache_validate(&ipf->ipf_ro)) == NULL) {
495 			ipflow_free(ipf);
496 		} else {
497 			ipf->ipf_last_uses = ipf->ipf_uses;
498 			rt->rt_use += ipf->ipf_uses;
499 			rtcache_unref(rt, &ipf->ipf_ro);
500 			ips = IP_STAT_GETREF();
501 			ips[IP_STAT_TOTAL] += ipf->ipf_uses;
502 			ips[IP_STAT_FORWARD] += ipf->ipf_uses;
503 			ips[IP_STAT_FASTFORWARD] += ipf->ipf_uses;
504 			IP_STAT_PUTREF();
505 			ipf->ipf_uses = 0;
506 		}
507 	}
508 	mutex_exit(&ipflow_lock);
509 #ifndef NET_MPSAFE
510 	KERNEL_UNLOCK_ONE(NULL);
511 	mutex_exit(softnet_lock);
512 #endif
513 }
514 
515 void
516 ipflow_slowtimo(void)
517 {
518 
519 	/* Avoid enqueuing another work when one is already enqueued */
520 	if (atomic_swap_uint(&ipflow_work_enqueued, 1) == 1)
521 		return;
522 
523 	workqueue_enqueue(ipflow_slowtimo_wq, &ipflow_slowtimo_wk, NULL);
524 }
525 
526 void
527 ipflow_create(struct route *ro, struct mbuf *m)
528 {
529 	const struct ip *const ip = mtod(m, const struct ip *);
530 	struct ipflow *ipf;
531 	size_t hash;
532 
533 #ifndef NET_MPSAFE
534 	KERNEL_LOCK(1, NULL);
535 #endif
536 	mutex_enter(&ipflow_lock);
537 
538 	/*
539 	 * Don't create cache entries for ICMP messages.
540 	 */
541 	if (ip_maxflows == 0 || ip->ip_p == IPPROTO_ICMP)
542 		goto out;
543 
544 	/*
545 	 * See if an existing flow struct exists.  If so remove it from its
546 	 * list and free the old route.  If not, try to malloc a new one
547 	 * (if we aren't at our limit).
548 	 */
549 	ipf = ipflow_lookup(ip);
550 	if (ipf == NULL) {
551 		if (ipflow_inuse >= ip_maxflows) {
552 			ipf = ipflow_reap(true);
553 		} else {
554 			ipf = pool_get(&ipflow_pool, PR_NOWAIT);
555 			if (ipf == NULL)
556 				goto out;
557 			ipflow_inuse++;
558 		}
559 		memset(ipf, 0, sizeof(*ipf));
560 	} else {
561 		IPFLOW_REMOVE(ipf->ipf_hashidx, ipf);
562 
563 		ipflow_addstats(ipf);
564 		rtcache_free(&ipf->ipf_ro);
565 		ipf->ipf_uses = ipf->ipf_last_uses = 0;
566 		ipf->ipf_errors = ipf->ipf_dropped = 0;
567 	}
568 
569 	/*
570 	 * Fill in the updated information.
571 	 */
572 	rtcache_copy(&ipf->ipf_ro, ro);
573 	ipf->ipf_dst = ip->ip_dst;
574 	ipf->ipf_src = ip->ip_src;
575 	ipf->ipf_tos = ip->ip_tos;
576 	PRT_SLOW_ARM(ipf->ipf_timer, IPFLOW_TIMER);
577 
578 	/*
579 	 * Insert into the approriate bucket of the flow table.
580 	 */
581 	hash = ipflow_hash(ip);
582 	IPFLOW_INSERT(hash, ipf);
583 
584  out:
585 	mutex_exit(&ipflow_lock);
586 #ifndef NET_MPSAFE
587 	KERNEL_UNLOCK_ONE(NULL);
588 #endif
589 }
590 
591 int
592 ipflow_invalidate_all(int new_size)
593 {
594 	struct ipflow *ipf, *next_ipf;
595 	int error;
596 
597 	error = 0;
598 
599 	mutex_enter(&ipflow_lock);
600 
601 	for (ipf = TAILQ_FIRST(&ipflowlist); ipf != NULL; ipf = next_ipf) {
602 		next_ipf = TAILQ_NEXT(ipf, ipf_list);
603 		ipflow_free(ipf);
604 	}
605 
606 	if (new_size)
607 		error = ipflow_reinit(new_size);
608 
609 	mutex_exit(&ipflow_lock);
610 
611 	return error;
612 }
613 
614 /*
615  * sysctl helper routine for net.inet.ip.maxflows.
616  */
617 static int
618 sysctl_net_inet_ip_maxflows(SYSCTLFN_ARGS)
619 {
620 	int error;
621 
622 	error = sysctl_lookup(SYSCTLFN_CALL(rnode));
623 	if (error || newp == NULL)
624 		return (error);
625 
626 #ifndef NET_MPSAFE
627 	mutex_enter(softnet_lock);
628 	KERNEL_LOCK(1, NULL);
629 #endif
630 	mutex_enter(&ipflow_lock);
631 
632 	ipflow_reap(false);
633 
634 	mutex_exit(&ipflow_lock);
635 #ifndef NET_MPSAFE
636 	KERNEL_UNLOCK_ONE(NULL);
637 	mutex_exit(softnet_lock);
638 #endif
639 
640 	return (0);
641 }
642 
643 static int
644 sysctl_net_inet_ip_hashsize(SYSCTLFN_ARGS)
645 {
646 	int error, tmp;
647 	struct sysctlnode node;
648 
649 	node = *rnode;
650 	tmp = ip_hashsize;
651 	node.sysctl_data = &tmp;
652 	error = sysctl_lookup(SYSCTLFN_CALL(&node));
653 	if (error || newp == NULL)
654 		return (error);
655 
656 	if ((tmp & (tmp - 1)) == 0 && tmp != 0) {
657 		/*
658 		 * Can only fail due to malloc()
659 		 */
660 #ifndef NET_MPSAFE
661 		mutex_enter(softnet_lock);
662 		KERNEL_LOCK(1, NULL);
663 #endif
664 		error = ipflow_invalidate_all(tmp);
665 #ifndef NET_MPSAFE
666 		KERNEL_UNLOCK_ONE(NULL);
667 		mutex_exit(softnet_lock);
668 #endif
669 	} else {
670 		/*
671 		 * EINVAL if not a power of 2
672 	         */
673 		error = EINVAL;
674 	}
675 
676 	return error;
677 }
678 
679 static void
680 ipflow_sysctl_init(struct sysctllog **clog)
681 {
682 	sysctl_createv(clog, 0, NULL, NULL,
683 		       CTLFLAG_PERMANENT,
684 		       CTLTYPE_NODE, "inet",
685 		       SYSCTL_DESCR("PF_INET related settings"),
686 		       NULL, 0, NULL, 0,
687 		       CTL_NET, PF_INET, CTL_EOL);
688 	sysctl_createv(clog, 0, NULL, NULL,
689 		       CTLFLAG_PERMANENT,
690 		       CTLTYPE_NODE, "ip",
691 		       SYSCTL_DESCR("IPv4 related settings"),
692 		       NULL, 0, NULL, 0,
693 		       CTL_NET, PF_INET, IPPROTO_IP, CTL_EOL);
694 
695 	sysctl_createv(clog, 0, NULL, NULL,
696 		       CTLFLAG_PERMANENT|CTLFLAG_READWRITE,
697 		       CTLTYPE_INT, "maxflows",
698 		       SYSCTL_DESCR("Number of flows for fast forwarding"),
699 		       sysctl_net_inet_ip_maxflows, 0, &ip_maxflows, 0,
700 		       CTL_NET, PF_INET, IPPROTO_IP,
701 		       IPCTL_MAXFLOWS, CTL_EOL);
702 	sysctl_createv(clog, 0, NULL, NULL,
703 			CTLFLAG_PERMANENT|CTLFLAG_READWRITE,
704 			CTLTYPE_INT, "hashsize",
705 			SYSCTL_DESCR("Size of hash table for fast forwarding (IPv4)"),
706 			sysctl_net_inet_ip_hashsize, 0, &ip_hashsize, 0,
707 			CTL_NET, PF_INET, IPPROTO_IP,
708 			CTL_CREATE, CTL_EOL);
709 }
710