xref: /netbsd-src/sys/netinet/ip_flow.c (revision 023842dd7aa71afbdae78525ee699a0a8181af29)
1 /*	$NetBSD: ip_flow.c,v 1.86 2024/06/29 12:59:08 riastradh Exp $	*/
2 
3 /*-
4  * Copyright (c) 1998 The NetBSD Foundation, Inc.
5  * All rights reserved.
6  *
7  * This code is derived from software contributed to The NetBSD Foundation
8  * by the 3am Software Foundry ("3am").  It was developed by Matt Thomas.
9  *
10  * Redistribution and use in source and binary forms, with or without
11  * modification, are permitted provided that the following conditions
12  * are met:
13  * 1. Redistributions of source code must retain the above copyright
14  *    notice, this list of conditions and the following disclaimer.
15  * 2. Redistributions in binary form must reproduce the above copyright
16  *    notice, this list of conditions and the following disclaimer in the
17  *    documentation and/or other materials provided with the distribution.
18  *
19  * THIS SOFTWARE IS PROVIDED BY THE NETBSD FOUNDATION, INC. AND CONTRIBUTORS
20  * ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED
21  * TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
22  * PURPOSE ARE DISCLAIMED.  IN NO EVENT SHALL THE FOUNDATION OR CONTRIBUTORS
23  * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
24  * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
25  * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
26  * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
27  * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
28  * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
29  * POSSIBILITY OF SUCH DAMAGE.
30  */
31 
32 #include <sys/cdefs.h>
33 __KERNEL_RCSID(0, "$NetBSD: ip_flow.c,v 1.86 2024/06/29 12:59:08 riastradh Exp $");
34 
35 #ifdef _KERNEL_OPT
36 #include "opt_net_mpsafe.h"
37 #endif
38 
39 #include <sys/param.h>
40 #include <sys/systm.h>
41 #include <sys/malloc.h>
42 #include <sys/mbuf.h>
43 #include <sys/socketvar.h>
44 #include <sys/errno.h>
45 #include <sys/time.h>
46 #include <sys/kernel.h>
47 #include <sys/pool.h>
48 #include <sys/sysctl.h>
49 #include <sys/workqueue.h>
50 #include <sys/atomic.h>
51 
52 #include <net/if.h>
53 #include <net/if_dl.h>
54 #include <net/route.h>
55 #include <net/pfil.h>
56 
57 #include <netinet/in.h>
58 #include <netinet/in_systm.h>
59 #include <netinet/ip.h>
60 #include <netinet/in_pcb.h>
61 #include <netinet/in_var.h>
62 #include <netinet/ip_var.h>
63 #include <netinet/ip_private.h>
64 
65 /*
66  * Similar code is very well commented in netinet6/ip6_flow.c
67  */
68 
69 #define	IPFLOW_HASHBITS		6	/* should not be a multiple of 8 */
70 
71 static struct pool ipflow_pool;
72 
73 TAILQ_HEAD(ipflowhead, ipflow);
74 
75 #define	IPFLOW_TIMER		(5 * PR_SLOWHZ)
76 #define	IPFLOW_DEFAULT_HASHSIZE	(1 << IPFLOW_HASHBITS)
77 
78 /*
79  * ip_flow.c internal lock.
80  * If we use softnet_lock, it would cause recursive lock.
81  *
82  * This is a tentative workaround.
83  * We should make it scalable somehow in the future.
84  */
85 static kmutex_t ipflow_lock;
86 static struct ipflowhead *ipflowtable = NULL;
87 static struct ipflowhead ipflowlist;
88 static int ipflow_inuse;
89 
90 #define	IPFLOW_INSERT(hashidx, ipf) \
91 do { \
92 	(ipf)->ipf_hashidx = (hashidx); \
93 	TAILQ_INSERT_HEAD(&ipflowtable[(hashidx)], (ipf), ipf_hash); \
94 	TAILQ_INSERT_HEAD(&ipflowlist, (ipf), ipf_list); \
95 } while (/*CONSTCOND*/ 0)
96 
97 #define	IPFLOW_REMOVE(hashidx, ipf) \
98 do { \
99 	TAILQ_REMOVE(&ipflowtable[(hashidx)], (ipf), ipf_hash); \
100 	TAILQ_REMOVE(&ipflowlist, (ipf), ipf_list); \
101 } while (/*CONSTCOND*/ 0)
102 
103 #ifndef IPFLOW_MAX
104 #define	IPFLOW_MAX		256
105 #endif
106 static int ip_maxflows = IPFLOW_MAX;
107 static int ip_hashsize = IPFLOW_DEFAULT_HASHSIZE;
108 
109 static struct ipflow *ipflow_reap(bool);
110 static void ipflow_sysctl_init(struct sysctllog **);
111 
112 static void ipflow_slowtimo_work(struct work *, void *);
113 static struct workqueue	*ipflow_slowtimo_wq;
114 static struct work	ipflow_slowtimo_wk;
115 
116 static size_t
ipflow_hash(const struct ip * ip)117 ipflow_hash(const struct ip *ip)
118 {
119 	size_t hash = ip->ip_tos;
120 	size_t idx;
121 
122 	for (idx = 0; idx < 32; idx += IPFLOW_HASHBITS) {
123 		hash += (ip->ip_dst.s_addr >> (32 - idx)) +
124 		    (ip->ip_src.s_addr >> idx);
125 	}
126 
127 	return hash & (ip_hashsize-1);
128 }
129 
130 static struct ipflow *
ipflow_lookup(const struct ip * ip)131 ipflow_lookup(const struct ip *ip)
132 {
133 	size_t hash;
134 	struct ipflow *ipf;
135 
136 	KASSERT(mutex_owned(&ipflow_lock));
137 
138 	hash = ipflow_hash(ip);
139 
140 	TAILQ_FOREACH(ipf, &ipflowtable[hash], ipf_hash) {
141 		if (ip->ip_dst.s_addr == ipf->ipf_dst.s_addr
142 		    && ip->ip_src.s_addr == ipf->ipf_src.s_addr
143 		    && ip->ip_tos == ipf->ipf_tos)
144 			break;
145 	}
146 	return ipf;
147 }
148 
149 void
ipflow_poolinit(void)150 ipflow_poolinit(void)
151 {
152 
153 	pool_init(&ipflow_pool, sizeof(struct ipflow), 0, 0, 0, "ipflowpl",
154 	    NULL, IPL_NET);
155 }
156 
157 static int
ipflow_reinit(int table_size)158 ipflow_reinit(int table_size)
159 {
160 	struct ipflowhead *new_table;
161 	size_t i;
162 
163 	KASSERT(mutex_owned(&ipflow_lock));
164 
165 	new_table = (struct ipflowhead *)malloc(sizeof(struct ipflowhead) *
166 	    table_size, M_RTABLE, M_NOWAIT);
167 
168 	if (new_table == NULL)
169 		return 1;
170 
171 	if (ipflowtable != NULL)
172 		free(ipflowtable, M_RTABLE);
173 
174 	ipflowtable = new_table;
175 	ip_hashsize = table_size;
176 
177 	TAILQ_INIT(&ipflowlist);
178 	for (i = 0; i < ip_hashsize; i++)
179 		TAILQ_INIT(&ipflowtable[i]);
180 
181 	return 0;
182 }
183 
184 void
ipflow_init(void)185 ipflow_init(void)
186 {
187 	int error;
188 
189 	error = workqueue_create(&ipflow_slowtimo_wq, "ipflow_slowtimo",
190 	    ipflow_slowtimo_work, NULL, PRI_SOFTNET, IPL_SOFTNET, WQ_MPSAFE);
191 	if (error != 0)
192 		panic("%s: workqueue_create failed (%d)\n", __func__, error);
193 
194 	mutex_init(&ipflow_lock, MUTEX_DEFAULT, IPL_NONE);
195 
196 	mutex_enter(&ipflow_lock);
197 	(void)ipflow_reinit(ip_hashsize);
198 	mutex_exit(&ipflow_lock);
199 	ipflow_sysctl_init(NULL);
200 }
201 
202 int
ipflow_fastforward(struct mbuf * m)203 ipflow_fastforward(struct mbuf *m)
204 {
205 	struct ip *ip;
206 	struct ip ip_store;
207 	struct ipflow *ipf;
208 	struct rtentry *rt = NULL;
209 	const struct sockaddr *dst;
210 	int error;
211 	int iplen;
212 	struct ifnet *ifp;
213 	int s;
214 	int ret = 0;
215 
216 	mutex_enter(&ipflow_lock);
217 	/*
218 	 * Are we forwarding packets?  Big enough for an IP packet?
219 	 */
220 	if (!ipforwarding || ipflow_inuse == 0 || m->m_len < sizeof(struct ip))
221 		goto out;
222 
223 	/*
224 	 * Was packet received as a link-level multicast or broadcast?
225 	 * If so, don't try to fast forward..
226 	 */
227 	if ((m->m_flags & (M_BCAST|M_MCAST)) != 0)
228 		goto out;
229 
230 	/*
231 	 * IP header with no option and valid version and length
232 	 */
233 	ip = mtod(m, struct ip *);
234 	if (!ACCESSIBLE_POINTER(ip, struct ip)) {
235 		memcpy(&ip_store, mtod(m, const void *), sizeof(ip_store));
236 		ip = &ip_store;
237 	}
238 	iplen = ntohs(ip->ip_len);
239 	if (ip->ip_v != IPVERSION || ip->ip_hl != (sizeof(struct ip) >> 2) ||
240 	    iplen < sizeof(struct ip) || iplen > m->m_pkthdr.len)
241 		goto out;
242 	/*
243 	 * Find a flow.
244 	 */
245 	if ((ipf = ipflow_lookup(ip)) == NULL)
246 		goto out;
247 
248 	ifp = m_get_rcvif(m, &s);
249 	if (__predict_false(ifp == NULL))
250 		goto out_unref;
251 	/*
252 	 * Verify the IP header checksum.
253 	 */
254 	switch (m->m_pkthdr.csum_flags &
255 		((ifp->if_csum_flags_rx & M_CSUM_IPv4) |
256 		 M_CSUM_IPv4_BAD)) {
257 	case M_CSUM_IPv4|M_CSUM_IPv4_BAD:
258 		m_put_rcvif(ifp, &s);
259 		goto out_unref;
260 
261 	case M_CSUM_IPv4:
262 		/* Checksum was okay. */
263 		break;
264 
265 	default:
266 		/* Must compute it ourselves. */
267 		if (in_cksum(m, sizeof(struct ip)) != 0) {
268 			m_put_rcvif(ifp, &s);
269 			goto out_unref;
270 		}
271 		break;
272 	}
273 	m_put_rcvif(ifp, &s);
274 
275 	/*
276 	 * Route and interface still up?
277 	 */
278 	rt = rtcache_validate(&ipf->ipf_ro);
279 	if (rt == NULL || (rt->rt_ifp->if_flags & IFF_UP) == 0 ||
280 	    (rt->rt_flags & (RTF_BLACKHOLE | RTF_BROADCAST)) != 0)
281 		goto out_unref;
282 
283 	/*
284 	 * Packet size OK?  TTL?
285 	 */
286 	if (m->m_pkthdr.len > rt->rt_ifp->if_mtu || ip->ip_ttl <= IPTTLDEC)
287 		goto out_unref;
288 
289 	/*
290 	 * Clear any in-bound checksum flags for this packet.
291 	 */
292 	m->m_pkthdr.csum_flags = 0;
293 
294 	/*
295 	 * Everything checks out and so we can forward this packet.
296 	 * Modify the TTL and incrementally change the checksum.
297 	 *
298 	 * This method of adding the checksum works on either endian CPU.
299 	 * If htons() is inlined, all the arithmetic is folded; otherwise
300 	 * the htons()s are combined by CSE due to the const attribute.
301 	 *
302 	 * Don't bother using HW checksumming here -- the incremental
303 	 * update is pretty fast.
304 	 */
305 	ip->ip_ttl -= IPTTLDEC;
306 	if (ip->ip_sum >= (u_int16_t) ~htons(IPTTLDEC << 8))
307 		ip->ip_sum -= ~htons(IPTTLDEC << 8);
308 	else
309 		ip->ip_sum += htons(IPTTLDEC << 8);
310 
311 	/*
312 	 * Done modifying the header; copy it back, if necessary.
313 	 *
314 	 * XXX Use m_copyback_cow(9) here? --dyoung
315 	 */
316 	if (!ACCESSIBLE_POINTER(mtod(m, void *), struct ip))
317 		memcpy(mtod(m, void *), &ip_store, sizeof(ip_store));
318 
319 	/*
320 	 * Trim the packet in case it's too long..
321 	 */
322 	if (m->m_pkthdr.len > iplen) {
323 		if (m->m_len == m->m_pkthdr.len) {
324 			m->m_len = iplen;
325 			m->m_pkthdr.len = iplen;
326 		} else
327 			m_adj(m, iplen - m->m_pkthdr.len);
328 	}
329 
330 	/*
331 	 * Send the packet on its way.  All we can get back is ENOBUFS
332 	 */
333 	ipf->ipf_uses++;
334 
335 #if 0
336 	/*
337 	 * Sorting list is too heavy for fast path(packet processing path).
338 	 * It degrades about 10% performance. So, we does not sort ipflowtable,
339 	 * and then we use FIFO cache replacement instead fo LRU.
340 	 */
341 	/* move to head (LRU) for ipflowlist. ipflowtable ooes not care LRU. */
342 	TAILQ_REMOVE(&ipflowlist, ipf, ipf_list);
343 	TAILQ_INSERT_HEAD(&ipflowlist, ipf, ipf_list);
344 #endif
345 
346 	PRT_SLOW_ARM(ipf->ipf_timer, IPFLOW_TIMER);
347 
348 	if (rt->rt_flags & RTF_GATEWAY)
349 		dst = rt->rt_gateway;
350 	else
351 		dst = rtcache_getdst(&ipf->ipf_ro);
352 
353 	if ((error = if_output_lock(rt->rt_ifp, rt->rt_ifp, m, dst, rt)) != 0) {
354 		if (error == ENOBUFS)
355 			ipf->ipf_dropped++;
356 		else
357 			ipf->ipf_errors++;
358 	}
359 	ret = 1;
360 out_unref:
361 	rtcache_unref(rt, &ipf->ipf_ro);
362 out:
363 	mutex_exit(&ipflow_lock);
364 	return ret;
365 }
366 
367 static void
ipflow_addstats(struct ipflow * ipf)368 ipflow_addstats(struct ipflow *ipf)
369 {
370 	struct rtentry *rt;
371 	net_stat_ref_t ips;
372 
373 	rt = rtcache_validate(&ipf->ipf_ro);
374 	if (rt != NULL) {
375 		rt->rt_use += ipf->ipf_uses;
376 		rtcache_unref(rt, &ipf->ipf_ro);
377 	}
378 
379 	ips = IP_STAT_GETREF();
380 	_NET_STATADD_REF(ips, IP_STAT_CANTFORWARD,
381 	    ipf->ipf_errors + ipf->ipf_dropped);
382 	_NET_STATADD_REF(ips, IP_STAT_TOTAL, ipf->ipf_uses);
383 	_NET_STATADD_REF(ips, IP_STAT_FORWARD, ipf->ipf_uses);
384 	_NET_STATADD_REF(ips, IP_STAT_FASTFORWARD, ipf->ipf_uses);
385 	IP_STAT_PUTREF();
386 }
387 
388 static void
ipflow_free(struct ipflow * ipf)389 ipflow_free(struct ipflow *ipf)
390 {
391 
392 	KASSERT(mutex_owned(&ipflow_lock));
393 
394 	/*
395 	 * Remove the flow from the hash table (at elevated IPL).
396 	 * Once it's off the list, we can deal with it at normal
397 	 * network IPL.
398 	 */
399 	IPFLOW_REMOVE(ipf->ipf_hashidx, ipf);
400 
401 	ipflow_addstats(ipf);
402 	rtcache_free(&ipf->ipf_ro);
403 	ipflow_inuse--;
404 	pool_put(&ipflow_pool, ipf);
405 }
406 
407 static struct ipflow *
ipflow_reap(bool just_one)408 ipflow_reap(bool just_one)
409 {
410 	struct ipflow *ipf;
411 
412 	KASSERT(mutex_owned(&ipflow_lock));
413 
414 	/*
415 	 * This case must remove one ipflow. Furthermore, this case is used in
416 	 * fast path(packet processing path). So, simply remove TAILQ_LAST one.
417 	 */
418 	if (just_one) {
419 		ipf = TAILQ_LAST(&ipflowlist, ipflowhead);
420 		KASSERT(ipf != NULL);
421 
422 		IPFLOW_REMOVE(ipf->ipf_hashidx, ipf);
423 
424 		ipflow_addstats(ipf);
425 		rtcache_free(&ipf->ipf_ro);
426 		return ipf;
427 	}
428 
429 	/*
430 	 * This case is used in slow path(sysctl).
431 	 * At first, remove invalid rtcache ipflow, and then remove TAILQ_LAST
432 	 * ipflow if it is ensured least recently used by comparing last_uses.
433 	 */
434 	while (ipflow_inuse > ip_maxflows) {
435 		struct ipflow *maybe_ipf = TAILQ_LAST(&ipflowlist, ipflowhead);
436 
437 		TAILQ_FOREACH(ipf, &ipflowlist, ipf_list) {
438 			struct rtentry *rt;
439 			/*
440 			 * If this no longer points to a valid route
441 			 * reclaim it.
442 			 */
443 			rt = rtcache_validate(&ipf->ipf_ro);
444 			if (rt == NULL)
445 				goto done;
446 			rtcache_unref(rt, &ipf->ipf_ro);
447 			/*
448 			 * choose the one that's been least recently
449 			 * used or has had the least uses in the
450 			 * last 1.5 intervals.
451 			 */
452 			if (ipf->ipf_timer < maybe_ipf->ipf_timer
453 			    || ((ipf->ipf_timer == maybe_ipf->ipf_timer)
454 				&& (ipf->ipf_last_uses + ipf->ipf_uses
455 				    < maybe_ipf->ipf_last_uses + maybe_ipf->ipf_uses)))
456 				maybe_ipf = ipf;
457 		}
458 		ipf = maybe_ipf;
459 	    done:
460 		/*
461 		 * Remove the entry from the flow table.
462 		 */
463 		IPFLOW_REMOVE(ipf->ipf_hashidx, ipf);
464 
465 		ipflow_addstats(ipf);
466 		rtcache_free(&ipf->ipf_ro);
467 		pool_put(&ipflow_pool, ipf);
468 		ipflow_inuse--;
469 	}
470 	return NULL;
471 }
472 
473 static unsigned int ipflow_work_enqueued = 0;
474 
475 static void
ipflow_slowtimo_work(struct work * wk,void * arg)476 ipflow_slowtimo_work(struct work *wk, void *arg)
477 {
478 	struct rtentry *rt;
479 	struct ipflow *ipf, *next_ipf;
480 	net_stat_ref_t ips;
481 
482 	/* We can allow enqueuing another work at this point */
483 	atomic_swap_uint(&ipflow_work_enqueued, 0);
484 
485 	SOFTNET_KERNEL_LOCK_UNLESS_NET_MPSAFE();
486 	mutex_enter(&ipflow_lock);
487 	for (ipf = TAILQ_FIRST(&ipflowlist); ipf != NULL; ipf = next_ipf) {
488 		next_ipf = TAILQ_NEXT(ipf, ipf_list);
489 		if (PRT_SLOW_ISEXPIRED(ipf->ipf_timer) ||
490 		    (rt = rtcache_validate(&ipf->ipf_ro)) == NULL) {
491 			ipflow_free(ipf);
492 		} else {
493 			ipf->ipf_last_uses = ipf->ipf_uses;
494 			rt->rt_use += ipf->ipf_uses;
495 			rtcache_unref(rt, &ipf->ipf_ro);
496 			ips = IP_STAT_GETREF();
497 			_NET_STATADD_REF(ips, IP_STAT_TOTAL, ipf->ipf_uses);
498 			_NET_STATADD_REF(ips, IP_STAT_FORWARD, ipf->ipf_uses);
499 			_NET_STATADD_REF(ips, IP_STAT_FASTFORWARD,
500 			    ipf->ipf_uses);
501 			IP_STAT_PUTREF();
502 			ipf->ipf_uses = 0;
503 		}
504 	}
505 	mutex_exit(&ipflow_lock);
506 	SOFTNET_KERNEL_UNLOCK_UNLESS_NET_MPSAFE();
507 }
508 
509 void
ipflow_slowtimo(void)510 ipflow_slowtimo(void)
511 {
512 
513 	/* Avoid enqueuing another work when one is already enqueued */
514 	if (atomic_swap_uint(&ipflow_work_enqueued, 1) == 1)
515 		return;
516 
517 	workqueue_enqueue(ipflow_slowtimo_wq, &ipflow_slowtimo_wk, NULL);
518 }
519 
520 void
ipflow_create(struct route * ro,struct mbuf * m)521 ipflow_create(struct route *ro, struct mbuf *m)
522 {
523 	const struct ip *const ip = mtod(m, const struct ip *);
524 	struct ipflow *ipf;
525 	size_t hash;
526 
527 	KERNEL_LOCK_UNLESS_NET_MPSAFE();
528 	mutex_enter(&ipflow_lock);
529 
530 	/*
531 	 * Don't create cache entries for ICMP messages.
532 	 */
533 	if (ip_maxflows == 0 || ip->ip_p == IPPROTO_ICMP)
534 		goto out;
535 
536 	/*
537 	 * See if an existing flow struct exists.  If so remove it from its
538 	 * list and free the old route.  If not, try to malloc a new one
539 	 * (if we aren't at our limit).
540 	 */
541 	ipf = ipflow_lookup(ip);
542 	if (ipf == NULL) {
543 		if (ipflow_inuse >= ip_maxflows) {
544 			ipf = ipflow_reap(true);
545 		} else {
546 			ipf = pool_get(&ipflow_pool, PR_NOWAIT);
547 			if (ipf == NULL)
548 				goto out;
549 			ipflow_inuse++;
550 		}
551 		memset(ipf, 0, sizeof(*ipf));
552 	} else {
553 		IPFLOW_REMOVE(ipf->ipf_hashidx, ipf);
554 
555 		ipflow_addstats(ipf);
556 		rtcache_free(&ipf->ipf_ro);
557 		ipf->ipf_uses = ipf->ipf_last_uses = 0;
558 		ipf->ipf_errors = ipf->ipf_dropped = 0;
559 	}
560 
561 	/*
562 	 * Fill in the updated information.
563 	 */
564 	rtcache_copy(&ipf->ipf_ro, ro);
565 	ipf->ipf_dst = ip->ip_dst;
566 	ipf->ipf_src = ip->ip_src;
567 	ipf->ipf_tos = ip->ip_tos;
568 	PRT_SLOW_ARM(ipf->ipf_timer, IPFLOW_TIMER);
569 
570 	/*
571 	 * Insert into the approriate bucket of the flow table.
572 	 */
573 	hash = ipflow_hash(ip);
574 	IPFLOW_INSERT(hash, ipf);
575 
576  out:
577 	mutex_exit(&ipflow_lock);
578 	KERNEL_UNLOCK_UNLESS_NET_MPSAFE();
579 }
580 
581 int
ipflow_invalidate_all(int new_size)582 ipflow_invalidate_all(int new_size)
583 {
584 	struct ipflow *ipf, *next_ipf;
585 	int error;
586 
587 	error = 0;
588 
589 	mutex_enter(&ipflow_lock);
590 
591 	for (ipf = TAILQ_FIRST(&ipflowlist); ipf != NULL; ipf = next_ipf) {
592 		next_ipf = TAILQ_NEXT(ipf, ipf_list);
593 		ipflow_free(ipf);
594 	}
595 
596 	if (new_size)
597 		error = ipflow_reinit(new_size);
598 
599 	mutex_exit(&ipflow_lock);
600 
601 	return error;
602 }
603 
604 /*
605  * sysctl helper routine for net.inet.ip.maxflows.
606  */
607 static int
sysctl_net_inet_ip_maxflows(SYSCTLFN_ARGS)608 sysctl_net_inet_ip_maxflows(SYSCTLFN_ARGS)
609 {
610 	int error;
611 
612 	error = sysctl_lookup(SYSCTLFN_CALL(rnode));
613 	if (error || newp == NULL)
614 		return (error);
615 
616 	SOFTNET_KERNEL_LOCK_UNLESS_NET_MPSAFE();
617 	mutex_enter(&ipflow_lock);
618 
619 	ipflow_reap(false);
620 
621 	mutex_exit(&ipflow_lock);
622 	SOFTNET_KERNEL_UNLOCK_UNLESS_NET_MPSAFE();
623 
624 	return (0);
625 }
626 
627 static int
sysctl_net_inet_ip_hashsize(SYSCTLFN_ARGS)628 sysctl_net_inet_ip_hashsize(SYSCTLFN_ARGS)
629 {
630 	int error, tmp;
631 	struct sysctlnode node;
632 
633 	node = *rnode;
634 	tmp = ip_hashsize;
635 	node.sysctl_data = &tmp;
636 	error = sysctl_lookup(SYSCTLFN_CALL(&node));
637 	if (error || newp == NULL)
638 		return (error);
639 
640 	if ((tmp & (tmp - 1)) == 0 && tmp != 0) {
641 		/*
642 		 * Can only fail due to malloc()
643 		 */
644 		SOFTNET_KERNEL_LOCK_UNLESS_NET_MPSAFE();
645 		error = ipflow_invalidate_all(tmp);
646 		SOFTNET_KERNEL_UNLOCK_UNLESS_NET_MPSAFE();
647 	} else {
648 		/*
649 		 * EINVAL if not a power of 2
650 		 */
651 		error = EINVAL;
652 	}
653 
654 	return error;
655 }
656 
657 static void
ipflow_sysctl_init(struct sysctllog ** clog)658 ipflow_sysctl_init(struct sysctllog **clog)
659 {
660 	sysctl_createv(clog, 0, NULL, NULL,
661 		       CTLFLAG_PERMANENT,
662 		       CTLTYPE_NODE, "inet",
663 		       SYSCTL_DESCR("PF_INET related settings"),
664 		       NULL, 0, NULL, 0,
665 		       CTL_NET, PF_INET, CTL_EOL);
666 	sysctl_createv(clog, 0, NULL, NULL,
667 		       CTLFLAG_PERMANENT,
668 		       CTLTYPE_NODE, "ip",
669 		       SYSCTL_DESCR("IPv4 related settings"),
670 		       NULL, 0, NULL, 0,
671 		       CTL_NET, PF_INET, IPPROTO_IP, CTL_EOL);
672 
673 	sysctl_createv(clog, 0, NULL, NULL,
674 		       CTLFLAG_PERMANENT|CTLFLAG_READWRITE,
675 		       CTLTYPE_INT, "maxflows",
676 		       SYSCTL_DESCR("Number of flows for fast forwarding"),
677 		       sysctl_net_inet_ip_maxflows, 0, &ip_maxflows, 0,
678 		       CTL_NET, PF_INET, IPPROTO_IP,
679 		       IPCTL_MAXFLOWS, CTL_EOL);
680 	sysctl_createv(clog, 0, NULL, NULL,
681 			CTLFLAG_PERMANENT|CTLFLAG_READWRITE,
682 			CTLTYPE_INT, "hashsize",
683 			SYSCTL_DESCR("Size of hash table for fast forwarding (IPv4)"),
684 			sysctl_net_inet_ip_hashsize, 0, &ip_hashsize, 0,
685 			CTL_NET, PF_INET, IPPROTO_IP,
686 			CTL_CREATE, CTL_EOL);
687 }
688