xref: /netbsd-src/sys/netinet/ip_flow.c (revision fdd524d4ccd2bb0c6f67401e938dabf773eb0372)
1 /*	$NetBSD: ip_flow.c,v 1.73 2016/07/11 07:37:00 ozaki-r Exp $	*/
2 
3 /*-
4  * Copyright (c) 1998 The NetBSD Foundation, Inc.
5  * All rights reserved.
6  *
7  * This code is derived from software contributed to The NetBSD Foundation
8  * by the 3am Software Foundry ("3am").  It was developed by Matt Thomas.
9  *
10  * Redistribution and use in source and binary forms, with or without
11  * modification, are permitted provided that the following conditions
12  * are met:
13  * 1. Redistributions of source code must retain the above copyright
14  *    notice, this list of conditions and the following disclaimer.
15  * 2. Redistributions in binary form must reproduce the above copyright
16  *    notice, this list of conditions and the following disclaimer in the
17  *    documentation and/or other materials provided with the distribution.
18  *
19  * THIS SOFTWARE IS PROVIDED BY THE NETBSD FOUNDATION, INC. AND CONTRIBUTORS
20  * ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED
21  * TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
22  * PURPOSE ARE DISCLAIMED.  IN NO EVENT SHALL THE FOUNDATION OR CONTRIBUTORS
23  * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
24  * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
25  * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
26  * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
27  * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
28  * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
29  * POSSIBILITY OF SUCH DAMAGE.
30  */
31 
32 #include <sys/cdefs.h>
33 __KERNEL_RCSID(0, "$NetBSD: ip_flow.c,v 1.73 2016/07/11 07:37:00 ozaki-r Exp $");
34 
35 #include <sys/param.h>
36 #include <sys/systm.h>
37 #include <sys/malloc.h>
38 #include <sys/mbuf.h>
39 #include <sys/domain.h>
40 #include <sys/protosw.h>
41 #include <sys/socket.h>
42 #include <sys/socketvar.h>
43 #include <sys/errno.h>
44 #include <sys/time.h>
45 #include <sys/kernel.h>
46 #include <sys/pool.h>
47 #include <sys/sysctl.h>
48 #include <sys/workqueue.h>
49 
50 #include <net/if.h>
51 #include <net/if_dl.h>
52 #include <net/route.h>
53 #include <net/pfil.h>
54 
55 #include <netinet/in.h>
56 #include <netinet/in_systm.h>
57 #include <netinet/ip.h>
58 #include <netinet/in_pcb.h>
59 #include <netinet/in_var.h>
60 #include <netinet/ip_var.h>
61 #include <netinet/ip_private.h>
62 
63 /*
64  * Similar code is very well commented in netinet6/ip6_flow.c
65  */
66 
67 #define	IPFLOW_HASHBITS		6	/* should not be a multiple of 8 */
68 
69 static struct pool ipflow_pool;
70 
71 LIST_HEAD(ipflowhead, ipflow);
72 
73 #define	IPFLOW_TIMER		(5 * PR_SLOWHZ)
74 #define	IPFLOW_DEFAULT_HASHSIZE	(1 << IPFLOW_HASHBITS)
75 
76 /*
77  * ip_flow.c internal lock.
78  * If we use softnet_lock, it would cause recursive lock.
79  *
80  * This is a tentative workaround.
81  * We should make it scalable somehow in the future.
82  */
83 static kmutex_t ipflow_lock;
84 static struct ipflowhead *ipflowtable = NULL;
85 static struct ipflowhead ipflowlist;
86 static int ipflow_inuse;
87 
88 #define	IPFLOW_INSERT(bucket, ipf) \
89 do { \
90 	LIST_INSERT_HEAD((bucket), (ipf), ipf_hash); \
91 	LIST_INSERT_HEAD(&ipflowlist, (ipf), ipf_list); \
92 } while (/*CONSTCOND*/ 0)
93 
94 #define	IPFLOW_REMOVE(ipf) \
95 do { \
96 	LIST_REMOVE((ipf), ipf_hash); \
97 	LIST_REMOVE((ipf), ipf_list); \
98 } while (/*CONSTCOND*/ 0)
99 
100 #ifndef IPFLOW_MAX
101 #define	IPFLOW_MAX		256
102 #endif
103 static int ip_maxflows = IPFLOW_MAX;
104 static int ip_hashsize = IPFLOW_DEFAULT_HASHSIZE;
105 
106 static struct ipflow *ipflow_reap(bool);
107 static void ipflow_sysctl_init(struct sysctllog **);
108 
109 static void ipflow_slowtimo_work(struct work *, void *);
110 static struct workqueue	*ipflow_slowtimo_wq;
111 static struct work	ipflow_slowtimo_wk;
112 
113 static size_t
114 ipflow_hash(const struct ip *ip)
115 {
116 	size_t hash = ip->ip_tos;
117 	size_t idx;
118 
119 	for (idx = 0; idx < 32; idx += IPFLOW_HASHBITS) {
120 		hash += (ip->ip_dst.s_addr >> (32 - idx)) +
121 		    (ip->ip_src.s_addr >> idx);
122 	}
123 
124 	return hash & (ip_hashsize-1);
125 }
126 
127 static struct ipflow *
128 ipflow_lookup(const struct ip *ip)
129 {
130 	size_t hash;
131 	struct ipflow *ipf;
132 
133 	KASSERT(mutex_owned(&ipflow_lock));
134 
135 	hash = ipflow_hash(ip);
136 
137 	LIST_FOREACH(ipf, &ipflowtable[hash], ipf_hash) {
138 		if (ip->ip_dst.s_addr == ipf->ipf_dst.s_addr
139 		    && ip->ip_src.s_addr == ipf->ipf_src.s_addr
140 		    && ip->ip_tos == ipf->ipf_tos)
141 			break;
142 	}
143 	return ipf;
144 }
145 
146 void
147 ipflow_poolinit(void)
148 {
149 
150 	pool_init(&ipflow_pool, sizeof(struct ipflow), 0, 0, 0, "ipflowpl",
151 	    NULL, IPL_NET);
152 }
153 
154 static int
155 ipflow_reinit(int table_size)
156 {
157 	struct ipflowhead *new_table;
158 	size_t i;
159 
160 	KASSERT(mutex_owned(&ipflow_lock));
161 
162 	new_table = (struct ipflowhead *)malloc(sizeof(struct ipflowhead) *
163 	    table_size, M_RTABLE, M_NOWAIT);
164 
165 	if (new_table == NULL)
166 		return 1;
167 
168 	if (ipflowtable != NULL)
169 		free(ipflowtable, M_RTABLE);
170 
171 	ipflowtable = new_table;
172 	ip_hashsize = table_size;
173 
174 	LIST_INIT(&ipflowlist);
175 	for (i = 0; i < ip_hashsize; i++)
176 		LIST_INIT(&ipflowtable[i]);
177 
178 	return 0;
179 }
180 
181 void
182 ipflow_init(void)
183 {
184 	int error;
185 
186 	error = workqueue_create(&ipflow_slowtimo_wq, "ipflow_slowtimo",
187 	    ipflow_slowtimo_work, NULL, PRI_SOFTNET, IPL_SOFTNET, WQ_MPSAFE);
188 	if (error != 0)
189 		panic("%s: workqueue_create failed (%d)\n", __func__, error);
190 
191 	mutex_init(&ipflow_lock, MUTEX_DEFAULT, IPL_NONE);
192 
193 	mutex_enter(&ipflow_lock);
194 	(void)ipflow_reinit(ip_hashsize);
195 	mutex_exit(&ipflow_lock);
196 	ipflow_sysctl_init(NULL);
197 }
198 
199 int
200 ipflow_fastforward(struct mbuf *m)
201 {
202 	struct ip *ip;
203 	struct ip ip_store;
204 	struct ipflow *ipf;
205 	struct rtentry *rt;
206 	const struct sockaddr *dst;
207 	int error;
208 	int iplen;
209 	struct ifnet *ifp;
210 	int s;
211 	int ret = 0;
212 
213 	mutex_enter(&ipflow_lock);
214 	/*
215 	 * Are we forwarding packets?  Big enough for an IP packet?
216 	 */
217 	if (!ipforwarding || ipflow_inuse == 0 || m->m_len < sizeof(struct ip))
218 		goto out;
219 
220 	/*
221 	 * Was packet received as a link-level multicast or broadcast?
222 	 * If so, don't try to fast forward..
223 	 */
224 	if ((m->m_flags & (M_BCAST|M_MCAST)) != 0)
225 		goto out;
226 
227 	/*
228 	 * IP header with no option and valid version and length
229 	 */
230 	if (IP_HDR_ALIGNED_P(mtod(m, const void *)))
231 		ip = mtod(m, struct ip *);
232 	else {
233 		memcpy(&ip_store, mtod(m, const void *), sizeof(ip_store));
234 		ip = &ip_store;
235 	}
236 	iplen = ntohs(ip->ip_len);
237 	if (ip->ip_v != IPVERSION || ip->ip_hl != (sizeof(struct ip) >> 2) ||
238 	    iplen < sizeof(struct ip) || iplen > m->m_pkthdr.len)
239 		goto out;
240 	/*
241 	 * Find a flow.
242 	 */
243 	if ((ipf = ipflow_lookup(ip)) == NULL)
244 		goto out;
245 
246 	ifp = m_get_rcvif(m, &s);
247 	/*
248 	 * Verify the IP header checksum.
249 	 */
250 	switch (m->m_pkthdr.csum_flags &
251 		((ifp->if_csum_flags_rx & M_CSUM_IPv4) |
252 		 M_CSUM_IPv4_BAD)) {
253 	case M_CSUM_IPv4|M_CSUM_IPv4_BAD:
254 		m_put_rcvif(ifp, &s);
255 		goto out;
256 
257 	case M_CSUM_IPv4:
258 		/* Checksum was okay. */
259 		break;
260 
261 	default:
262 		/* Must compute it ourselves. */
263 		if (in_cksum(m, sizeof(struct ip)) != 0) {
264 			m_put_rcvif(ifp, &s);
265 			goto out;
266 		}
267 		break;
268 	}
269 	m_put_rcvif(ifp, &s);
270 
271 	/*
272 	 * Route and interface still up?
273 	 */
274 	if ((rt = rtcache_validate(&ipf->ipf_ro)) == NULL ||
275 	    (rt->rt_ifp->if_flags & IFF_UP) == 0 ||
276 	    (rt->rt_flags & (RTF_BLACKHOLE | RTF_BROADCAST)) != 0)
277 		goto out;
278 
279 	/*
280 	 * Packet size OK?  TTL?
281 	 */
282 	if (m->m_pkthdr.len > rt->rt_ifp->if_mtu || ip->ip_ttl <= IPTTLDEC)
283 		goto out;
284 
285 	/*
286 	 * Clear any in-bound checksum flags for this packet.
287 	 */
288 	m->m_pkthdr.csum_flags = 0;
289 
290 	/*
291 	 * Everything checks out and so we can forward this packet.
292 	 * Modify the TTL and incrementally change the checksum.
293 	 *
294 	 * This method of adding the checksum works on either endian CPU.
295 	 * If htons() is inlined, all the arithmetic is folded; otherwise
296 	 * the htons()s are combined by CSE due to the const attribute.
297 	 *
298 	 * Don't bother using HW checksumming here -- the incremental
299 	 * update is pretty fast.
300 	 */
301 	ip->ip_ttl -= IPTTLDEC;
302 	if (ip->ip_sum >= (u_int16_t) ~htons(IPTTLDEC << 8))
303 		ip->ip_sum -= ~htons(IPTTLDEC << 8);
304 	else
305 		ip->ip_sum += htons(IPTTLDEC << 8);
306 
307 	/*
308 	 * Done modifying the header; copy it back, if necessary.
309 	 *
310 	 * XXX Use m_copyback_cow(9) here? --dyoung
311 	 */
312 	if (IP_HDR_ALIGNED_P(mtod(m, void *)) == 0)
313 		memcpy(mtod(m, void *), &ip_store, sizeof(ip_store));
314 
315 	/*
316 	 * Trim the packet in case it's too long..
317 	 */
318 	if (m->m_pkthdr.len > iplen) {
319 		if (m->m_len == m->m_pkthdr.len) {
320 			m->m_len = iplen;
321 			m->m_pkthdr.len = iplen;
322 		} else
323 			m_adj(m, iplen - m->m_pkthdr.len);
324 	}
325 
326 	/*
327 	 * Send the packet on its way.  All we can get back is ENOBUFS
328 	 */
329 	ipf->ipf_uses++;
330 	PRT_SLOW_ARM(ipf->ipf_timer, IPFLOW_TIMER);
331 
332 	if (rt->rt_flags & RTF_GATEWAY)
333 		dst = rt->rt_gateway;
334 	else
335 		dst = rtcache_getdst(&ipf->ipf_ro);
336 
337 	if ((error = if_output_lock(rt->rt_ifp, rt->rt_ifp, m, dst, rt)) != 0) {
338 		if (error == ENOBUFS)
339 			ipf->ipf_dropped++;
340 		else
341 			ipf->ipf_errors++;
342 	}
343 	ret = 1;
344  out:
345 	mutex_exit(&ipflow_lock);
346 	return ret;
347 }
348 
349 static void
350 ipflow_addstats(struct ipflow *ipf)
351 {
352 	struct rtentry *rt;
353 	uint64_t *ips;
354 
355 	if ((rt = rtcache_validate(&ipf->ipf_ro)) != NULL)
356 		rt->rt_use += ipf->ipf_uses;
357 
358 	ips = IP_STAT_GETREF();
359 	ips[IP_STAT_CANTFORWARD] += ipf->ipf_errors + ipf->ipf_dropped;
360 	ips[IP_STAT_TOTAL] += ipf->ipf_uses;
361 	ips[IP_STAT_FORWARD] += ipf->ipf_uses;
362 	ips[IP_STAT_FASTFORWARD] += ipf->ipf_uses;
363 	IP_STAT_PUTREF();
364 }
365 
366 static void
367 ipflow_free(struct ipflow *ipf)
368 {
369 
370 	KASSERT(mutex_owned(&ipflow_lock));
371 
372 	/*
373 	 * Remove the flow from the hash table (at elevated IPL).
374 	 * Once it's off the list, we can deal with it at normal
375 	 * network IPL.
376 	 */
377 	IPFLOW_REMOVE(ipf);
378 
379 	ipflow_addstats(ipf);
380 	rtcache_free(&ipf->ipf_ro);
381 	ipflow_inuse--;
382 	pool_put(&ipflow_pool, ipf);
383 }
384 
385 static struct ipflow *
386 ipflow_reap(bool just_one)
387 {
388 
389 	KASSERT(mutex_owned(&ipflow_lock));
390 
391 	while (just_one || ipflow_inuse > ip_maxflows) {
392 		struct ipflow *ipf, *maybe_ipf = NULL;
393 
394 		ipf = LIST_FIRST(&ipflowlist);
395 		while (ipf != NULL) {
396 			/*
397 			 * If this no longer points to a valid route
398 			 * reclaim it.
399 			 */
400 			if (rtcache_validate(&ipf->ipf_ro) == NULL)
401 				goto done;
402 			/*
403 			 * choose the one that's been least recently
404 			 * used or has had the least uses in the
405 			 * last 1.5 intervals.
406 			 */
407 			if (maybe_ipf == NULL ||
408 			    ipf->ipf_timer < maybe_ipf->ipf_timer ||
409 			    (ipf->ipf_timer == maybe_ipf->ipf_timer &&
410 			     ipf->ipf_last_uses + ipf->ipf_uses <
411 			         maybe_ipf->ipf_last_uses +
412 			         maybe_ipf->ipf_uses))
413 				maybe_ipf = ipf;
414 			ipf = LIST_NEXT(ipf, ipf_list);
415 		}
416 		ipf = maybe_ipf;
417 	    done:
418 		/*
419 		 * Remove the entry from the flow table.
420 		 */
421 		IPFLOW_REMOVE(ipf);
422 
423 		ipflow_addstats(ipf);
424 		rtcache_free(&ipf->ipf_ro);
425 		if (just_one)
426 			return ipf;
427 		pool_put(&ipflow_pool, ipf);
428 		ipflow_inuse--;
429 	}
430 	return NULL;
431 }
432 
433 static bool ipflow_work_enqueued = false;
434 
435 static void
436 ipflow_slowtimo_work(struct work *wk, void *arg)
437 {
438 	struct rtentry *rt;
439 	struct ipflow *ipf, *next_ipf;
440 	uint64_t *ips;
441 
442 	mutex_enter(softnet_lock);
443 	mutex_enter(&ipflow_lock);
444 	KERNEL_LOCK(1, NULL);
445 	for (ipf = LIST_FIRST(&ipflowlist); ipf != NULL; ipf = next_ipf) {
446 		next_ipf = LIST_NEXT(ipf, ipf_list);
447 		if (PRT_SLOW_ISEXPIRED(ipf->ipf_timer) ||
448 		    (rt = rtcache_validate(&ipf->ipf_ro)) == NULL) {
449 			ipflow_free(ipf);
450 		} else {
451 			ipf->ipf_last_uses = ipf->ipf_uses;
452 			rt->rt_use += ipf->ipf_uses;
453 			ips = IP_STAT_GETREF();
454 			ips[IP_STAT_TOTAL] += ipf->ipf_uses;
455 			ips[IP_STAT_FORWARD] += ipf->ipf_uses;
456 			ips[IP_STAT_FASTFORWARD] += ipf->ipf_uses;
457 			IP_STAT_PUTREF();
458 			ipf->ipf_uses = 0;
459 		}
460 	}
461 	ipflow_work_enqueued = false;
462 	KERNEL_UNLOCK_ONE(NULL);
463 	mutex_exit(&ipflow_lock);
464 	mutex_exit(softnet_lock);
465 }
466 
467 void
468 ipflow_slowtimo(void)
469 {
470 
471 	/* Avoid enqueuing another work when one is already enqueued */
472 	mutex_enter(&ipflow_lock);
473 	if (ipflow_work_enqueued) {
474 		mutex_exit(&ipflow_lock);
475 		return;
476 	}
477 	ipflow_work_enqueued = true;
478 	mutex_exit(&ipflow_lock);
479 
480 	workqueue_enqueue(ipflow_slowtimo_wq, &ipflow_slowtimo_wk, NULL);
481 }
482 
483 void
484 ipflow_create(const struct route *ro, struct mbuf *m)
485 {
486 	const struct ip *const ip = mtod(m, const struct ip *);
487 	struct ipflow *ipf;
488 	size_t hash;
489 
490 	mutex_enter(&ipflow_lock);
491 
492 	/*
493 	 * Don't create cache entries for ICMP messages.
494 	 */
495 	if (ip_maxflows == 0 || ip->ip_p == IPPROTO_ICMP) {
496 		mutex_exit(&ipflow_lock);
497 		return;
498 	}
499 
500 	KERNEL_LOCK(1, NULL);
501 
502 	/*
503 	 * See if an existing flow struct exists.  If so remove it from its
504 	 * list and free the old route.  If not, try to malloc a new one
505 	 * (if we aren't at our limit).
506 	 */
507 	ipf = ipflow_lookup(ip);
508 	if (ipf == NULL) {
509 		if (ipflow_inuse >= ip_maxflows) {
510 			ipf = ipflow_reap(true);
511 		} else {
512 			ipf = pool_get(&ipflow_pool, PR_NOWAIT);
513 			if (ipf == NULL)
514 				goto out;
515 			ipflow_inuse++;
516 		}
517 		memset(ipf, 0, sizeof(*ipf));
518 	} else {
519 		IPFLOW_REMOVE(ipf);
520 
521 		ipflow_addstats(ipf);
522 		rtcache_free(&ipf->ipf_ro);
523 		ipf->ipf_uses = ipf->ipf_last_uses = 0;
524 		ipf->ipf_errors = ipf->ipf_dropped = 0;
525 	}
526 
527 	/*
528 	 * Fill in the updated information.
529 	 */
530 	rtcache_copy(&ipf->ipf_ro, ro);
531 	ipf->ipf_dst = ip->ip_dst;
532 	ipf->ipf_src = ip->ip_src;
533 	ipf->ipf_tos = ip->ip_tos;
534 	PRT_SLOW_ARM(ipf->ipf_timer, IPFLOW_TIMER);
535 
536 	/*
537 	 * Insert into the approriate bucket of the flow table.
538 	 */
539 	hash = ipflow_hash(ip);
540 	IPFLOW_INSERT(&ipflowtable[hash], ipf);
541 
542  out:
543 	KERNEL_UNLOCK_ONE(NULL);
544 	mutex_exit(&ipflow_lock);
545 }
546 
547 int
548 ipflow_invalidate_all(int new_size)
549 {
550 	struct ipflow *ipf, *next_ipf;
551 	int error;
552 
553 	error = 0;
554 
555 	mutex_enter(&ipflow_lock);
556 
557 	for (ipf = LIST_FIRST(&ipflowlist); ipf != NULL; ipf = next_ipf) {
558 		next_ipf = LIST_NEXT(ipf, ipf_list);
559 		ipflow_free(ipf);
560 	}
561 
562 	if (new_size)
563 		error = ipflow_reinit(new_size);
564 
565 	mutex_exit(&ipflow_lock);
566 
567 	return error;
568 }
569 
570 #ifdef GATEWAY
571 /*
572  * sysctl helper routine for net.inet.ip.maxflows.
573  */
574 static int
575 sysctl_net_inet_ip_maxflows(SYSCTLFN_ARGS)
576 {
577 	int error;
578 
579 	error = sysctl_lookup(SYSCTLFN_CALL(rnode));
580 	if (error || newp == NULL)
581 		return (error);
582 
583 	mutex_enter(softnet_lock);
584 	mutex_enter(&ipflow_lock);
585 	KERNEL_LOCK(1, NULL);
586 
587 	ipflow_reap(false);
588 
589 	KERNEL_UNLOCK_ONE(NULL);
590 	mutex_exit(&ipflow_lock);
591 	mutex_exit(softnet_lock);
592 
593 	return (0);
594 }
595 
596 static int
597 sysctl_net_inet_ip_hashsize(SYSCTLFN_ARGS)
598 {
599 	int error, tmp;
600 	struct sysctlnode node;
601 
602 	node = *rnode;
603 	tmp = ip_hashsize;
604 	node.sysctl_data = &tmp;
605 	error = sysctl_lookup(SYSCTLFN_CALL(&node));
606 	if (error || newp == NULL)
607 		return (error);
608 
609 	if ((tmp & (tmp - 1)) == 0 && tmp != 0) {
610 		/*
611 		 * Can only fail due to malloc()
612 		 */
613 		mutex_enter(softnet_lock);
614 		KERNEL_LOCK(1, NULL);
615 
616 		error = ipflow_invalidate_all(tmp);
617 
618 		KERNEL_UNLOCK_ONE(NULL);
619 		mutex_exit(softnet_lock);
620 
621 	} else {
622 		/*
623 		 * EINVAL if not a power of 2
624 	         */
625 		error = EINVAL;
626 	}
627 
628 	return error;
629 }
630 #endif /* GATEWAY */
631 
632 static void
633 ipflow_sysctl_init(struct sysctllog **clog)
634 {
635 	sysctl_createv(clog, 0, NULL, NULL,
636 		       CTLFLAG_PERMANENT,
637 		       CTLTYPE_NODE, "inet",
638 		       SYSCTL_DESCR("PF_INET related settings"),
639 		       NULL, 0, NULL, 0,
640 		       CTL_NET, PF_INET, CTL_EOL);
641 	sysctl_createv(clog, 0, NULL, NULL,
642 		       CTLFLAG_PERMANENT,
643 		       CTLTYPE_NODE, "ip",
644 		       SYSCTL_DESCR("IPv4 related settings"),
645 		       NULL, 0, NULL, 0,
646 		       CTL_NET, PF_INET, IPPROTO_IP, CTL_EOL);
647 
648 #ifdef GATEWAY
649 	sysctl_createv(clog, 0, NULL, NULL,
650 		       CTLFLAG_PERMANENT|CTLFLAG_READWRITE,
651 		       CTLTYPE_INT, "maxflows",
652 		       SYSCTL_DESCR("Number of flows for fast forwarding"),
653 		       sysctl_net_inet_ip_maxflows, 0, &ip_maxflows, 0,
654 		       CTL_NET, PF_INET, IPPROTO_IP,
655 		       IPCTL_MAXFLOWS, CTL_EOL);
656 	sysctl_createv(clog, 0, NULL, NULL,
657 			CTLFLAG_PERMANENT|CTLFLAG_READWRITE,
658 			CTLTYPE_INT, "hashsize",
659 			SYSCTL_DESCR("Size of hash table for fast forwarding (IPv4)"),
660 			sysctl_net_inet_ip_hashsize, 0, &ip_hashsize, 0,
661 			CTL_NET, PF_INET, IPPROTO_IP,
662 			CTL_CREATE, CTL_EOL);
663 #endif /* GATEWAY */
664 }
665