xref: /netbsd-src/sys/netinet/ip_flow.c (revision 413d532bcc3f62d122e56d92e13ac64825a40baf)
1 /*	$NetBSD: ip_flow.c,v 1.63 2014/04/01 13:11:44 pooka Exp $	*/
2 
3 /*-
4  * Copyright (c) 1998 The NetBSD Foundation, Inc.
5  * All rights reserved.
6  *
7  * This code is derived from software contributed to The NetBSD Foundation
8  * by the 3am Software Foundry ("3am").  It was developed by Matt Thomas.
9  *
10  * Redistribution and use in source and binary forms, with or without
11  * modification, are permitted provided that the following conditions
12  * are met:
13  * 1. Redistributions of source code must retain the above copyright
14  *    notice, this list of conditions and the following disclaimer.
15  * 2. Redistributions in binary form must reproduce the above copyright
16  *    notice, this list of conditions and the following disclaimer in the
17  *    documentation and/or other materials provided with the distribution.
18  *
19  * THIS SOFTWARE IS PROVIDED BY THE NETBSD FOUNDATION, INC. AND CONTRIBUTORS
20  * ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED
21  * TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
22  * PURPOSE ARE DISCLAIMED.  IN NO EVENT SHALL THE FOUNDATION OR CONTRIBUTORS
23  * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
24  * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
25  * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
26  * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
27  * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
28  * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
29  * POSSIBILITY OF SUCH DAMAGE.
30  */
31 
32 #include <sys/cdefs.h>
33 __KERNEL_RCSID(0, "$NetBSD: ip_flow.c,v 1.63 2014/04/01 13:11:44 pooka Exp $");
34 
35 #include <sys/param.h>
36 #include <sys/systm.h>
37 #include <sys/malloc.h>
38 #include <sys/mbuf.h>
39 #include <sys/domain.h>
40 #include <sys/protosw.h>
41 #include <sys/socket.h>
42 #include <sys/socketvar.h>
43 #include <sys/errno.h>
44 #include <sys/time.h>
45 #include <sys/kernel.h>
46 #include <sys/pool.h>
47 #include <sys/sysctl.h>
48 
49 #include <net/if.h>
50 #include <net/if_dl.h>
51 #include <net/route.h>
52 #include <net/pfil.h>
53 
54 #include <netinet/in.h>
55 #include <netinet/in_systm.h>
56 #include <netinet/ip.h>
57 #include <netinet/in_pcb.h>
58 #include <netinet/in_var.h>
59 #include <netinet/ip_var.h>
60 #include <netinet/ip_private.h>
61 
62 /*
63  * Similar code is very well commented in netinet6/ip6_flow.c
64  */
65 
66 #define	IPFLOW_HASHBITS		6	/* should not be a multiple of 8 */
67 
68 static struct pool ipflow_pool;
69 
70 LIST_HEAD(ipflowhead, ipflow);
71 
72 #define	IPFLOW_TIMER		(5 * PR_SLOWHZ)
73 #define	IPFLOW_DEFAULT_HASHSIZE	(1 << IPFLOW_HASHBITS)
74 
75 static struct ipflowhead *ipflowtable = NULL;
76 static struct ipflowhead ipflowlist;
77 static int ipflow_inuse;
78 
79 #define	IPFLOW_INSERT(bucket, ipf) \
80 do { \
81 	LIST_INSERT_HEAD((bucket), (ipf), ipf_hash); \
82 	LIST_INSERT_HEAD(&ipflowlist, (ipf), ipf_list); \
83 } while (/*CONSTCOND*/ 0)
84 
85 #define	IPFLOW_REMOVE(ipf) \
86 do { \
87 	LIST_REMOVE((ipf), ipf_hash); \
88 	LIST_REMOVE((ipf), ipf_list); \
89 } while (/*CONSTCOND*/ 0)
90 
91 #ifndef IPFLOW_MAX
92 #define	IPFLOW_MAX		256
93 #endif
94 int ip_maxflows = IPFLOW_MAX;
95 int ip_hashsize = IPFLOW_DEFAULT_HASHSIZE;
96 
97 static size_t
98 ipflow_hash(const struct ip *ip)
99 {
100 	size_t hash = ip->ip_tos;
101 	size_t idx;
102 
103 	for (idx = 0; idx < 32; idx += IPFLOW_HASHBITS) {
104 		hash += (ip->ip_dst.s_addr >> (32 - idx)) +
105 		    (ip->ip_src.s_addr >> idx);
106 	}
107 
108 	return hash & (ip_hashsize-1);
109 }
110 
111 static struct ipflow *
112 ipflow_lookup(const struct ip *ip)
113 {
114 	size_t hash;
115 	struct ipflow *ipf;
116 
117 	hash = ipflow_hash(ip);
118 
119 	LIST_FOREACH(ipf, &ipflowtable[hash], ipf_hash) {
120 		if (ip->ip_dst.s_addr == ipf->ipf_dst.s_addr
121 		    && ip->ip_src.s_addr == ipf->ipf_src.s_addr
122 		    && ip->ip_tos == ipf->ipf_tos)
123 			break;
124 	}
125 	return ipf;
126 }
127 
128 void
129 ipflow_poolinit(void)
130 {
131 
132 	pool_init(&ipflow_pool, sizeof(struct ipflow), 0, 0, 0, "ipflowpl",
133 	    NULL, IPL_NET);
134 }
135 
136 int
137 ipflow_init(int table_size)
138 {
139 	struct ipflowhead *new_table;
140 	size_t i;
141 
142 	new_table = (struct ipflowhead *)malloc(sizeof(struct ipflowhead) *
143 	    table_size, M_RTABLE, M_NOWAIT);
144 
145 	if (new_table == NULL)
146 		return 1;
147 
148 	if (ipflowtable != NULL)
149 		free(ipflowtable, M_RTABLE);
150 
151 	ipflowtable = new_table;
152 	ip_hashsize = table_size;
153 
154 	LIST_INIT(&ipflowlist);
155 	for (i = 0; i < ip_hashsize; i++)
156 		LIST_INIT(&ipflowtable[i]);
157 
158 	return 0;
159 }
160 
161 int
162 ipflow_fastforward(struct mbuf *m)
163 {
164 	struct ip *ip;
165 	struct ip ip_store;
166 	struct ipflow *ipf;
167 	struct rtentry *rt;
168 	const struct sockaddr *dst;
169 	int error;
170 	int iplen;
171 
172 	/*
173 	 * Are we forwarding packets?  Big enough for an IP packet?
174 	 */
175 	if (!ipforwarding || ipflow_inuse == 0 || m->m_len < sizeof(struct ip))
176 		return 0;
177 
178 	/*
179 	 * Was packet received as a link-level multicast or broadcast?
180 	 * If so, don't try to fast forward..
181 	 */
182 	if ((m->m_flags & (M_BCAST|M_MCAST)) != 0)
183 		return 0;
184 
185 	/*
186 	 * IP header with no option and valid version and length
187 	 */
188 	if (IP_HDR_ALIGNED_P(mtod(m, const void *)))
189 		ip = mtod(m, struct ip *);
190 	else {
191 		memcpy(&ip_store, mtod(m, const void *), sizeof(ip_store));
192 		ip = &ip_store;
193 	}
194 	iplen = ntohs(ip->ip_len);
195 	if (ip->ip_v != IPVERSION || ip->ip_hl != (sizeof(struct ip) >> 2) ||
196 	    iplen < sizeof(struct ip) || iplen > m->m_pkthdr.len)
197 		return 0;
198 	/*
199 	 * Find a flow.
200 	 */
201 	if ((ipf = ipflow_lookup(ip)) == NULL)
202 		return 0;
203 
204 	/*
205 	 * Verify the IP header checksum.
206 	 */
207 	switch (m->m_pkthdr.csum_flags &
208 		((m->m_pkthdr.rcvif->if_csum_flags_rx & M_CSUM_IPv4) |
209 		 M_CSUM_IPv4_BAD)) {
210 	case M_CSUM_IPv4|M_CSUM_IPv4_BAD:
211 		return (0);
212 
213 	case M_CSUM_IPv4:
214 		/* Checksum was okay. */
215 		break;
216 
217 	default:
218 		/* Must compute it ourselves. */
219 		if (in_cksum(m, sizeof(struct ip)) != 0)
220 			return (0);
221 		break;
222 	}
223 
224 	/*
225 	 * Route and interface still up?
226 	 */
227 	if ((rt = rtcache_validate(&ipf->ipf_ro)) == NULL ||
228 	    (rt->rt_ifp->if_flags & IFF_UP) == 0)
229 		return 0;
230 
231 	/*
232 	 * Packet size OK?  TTL?
233 	 */
234 	if (m->m_pkthdr.len > rt->rt_ifp->if_mtu || ip->ip_ttl <= IPTTLDEC)
235 		return 0;
236 
237 	/*
238 	 * Clear any in-bound checksum flags for this packet.
239 	 */
240 	m->m_pkthdr.csum_flags = 0;
241 
242 	/*
243 	 * Everything checks out and so we can forward this packet.
244 	 * Modify the TTL and incrementally change the checksum.
245 	 *
246 	 * This method of adding the checksum works on either endian CPU.
247 	 * If htons() is inlined, all the arithmetic is folded; otherwise
248 	 * the htons()s are combined by CSE due to the const attribute.
249 	 *
250 	 * Don't bother using HW checksumming here -- the incremental
251 	 * update is pretty fast.
252 	 */
253 	ip->ip_ttl -= IPTTLDEC;
254 	if (ip->ip_sum >= (u_int16_t) ~htons(IPTTLDEC << 8))
255 		ip->ip_sum -= ~htons(IPTTLDEC << 8);
256 	else
257 		ip->ip_sum += htons(IPTTLDEC << 8);
258 
259 	/*
260 	 * Done modifying the header; copy it back, if necessary.
261 	 *
262 	 * XXX Use m_copyback_cow(9) here? --dyoung
263 	 */
264 	if (IP_HDR_ALIGNED_P(mtod(m, void *)) == 0)
265 		memcpy(mtod(m, void *), &ip_store, sizeof(ip_store));
266 
267 	/*
268 	 * Trim the packet in case it's too long..
269 	 */
270 	if (m->m_pkthdr.len > iplen) {
271 		if (m->m_len == m->m_pkthdr.len) {
272 			m->m_len = iplen;
273 			m->m_pkthdr.len = iplen;
274 		} else
275 			m_adj(m, iplen - m->m_pkthdr.len);
276 	}
277 
278 	/*
279 	 * Send the packet on it's way.  All we can get back is ENOBUFS
280 	 */
281 	ipf->ipf_uses++;
282 	PRT_SLOW_ARM(ipf->ipf_timer, IPFLOW_TIMER);
283 
284 	if (rt->rt_flags & RTF_GATEWAY)
285 		dst = rt->rt_gateway;
286 	else
287 		dst = rtcache_getdst(&ipf->ipf_ro);
288 
289 	KERNEL_LOCK(1, NULL);
290 	if ((error = (*rt->rt_ifp->if_output)(rt->rt_ifp, m, dst, rt)) != 0) {
291 		if (error == ENOBUFS)
292 			ipf->ipf_dropped++;
293 		else
294 			ipf->ipf_errors++;
295 	}
296 	KERNEL_UNLOCK_ONE(NULL);
297 	return 1;
298 }
299 
300 static void
301 ipflow_addstats(struct ipflow *ipf)
302 {
303 	struct rtentry *rt;
304 	uint64_t *ips;
305 
306 	if ((rt = rtcache_validate(&ipf->ipf_ro)) != NULL)
307 		rt->rt_use += ipf->ipf_uses;
308 
309 	ips = IP_STAT_GETREF();
310 	ips[IP_STAT_CANTFORWARD] += ipf->ipf_errors + ipf->ipf_dropped;
311 	ips[IP_STAT_TOTAL] += ipf->ipf_uses;
312 	ips[IP_STAT_FORWARD] += ipf->ipf_uses;
313 	ips[IP_STAT_FASTFORWARD] += ipf->ipf_uses;
314 	IP_STAT_PUTREF();
315 }
316 
317 static void
318 ipflow_free(struct ipflow *ipf)
319 {
320 	int s;
321 	/*
322 	 * Remove the flow from the hash table (at elevated IPL).
323 	 * Once it's off the list, we can deal with it at normal
324 	 * network IPL.
325 	 */
326 	s = splnet();
327 	IPFLOW_REMOVE(ipf);
328 	splx(s);
329 	ipflow_addstats(ipf);
330 	rtcache_free(&ipf->ipf_ro);
331 	ipflow_inuse--;
332 	s = splnet();
333 	pool_put(&ipflow_pool, ipf);
334 	splx(s);
335 }
336 
337 struct ipflow *
338 ipflow_reap(bool just_one)
339 {
340 	while (just_one || ipflow_inuse > ip_maxflows) {
341 		struct ipflow *ipf, *maybe_ipf = NULL;
342 		int s;
343 
344 		ipf = LIST_FIRST(&ipflowlist);
345 		while (ipf != NULL) {
346 			/*
347 			 * If this no longer points to a valid route
348 			 * reclaim it.
349 			 */
350 			if (rtcache_validate(&ipf->ipf_ro) == NULL)
351 				goto done;
352 			/*
353 			 * choose the one that's been least recently
354 			 * used or has had the least uses in the
355 			 * last 1.5 intervals.
356 			 */
357 			if (maybe_ipf == NULL ||
358 			    ipf->ipf_timer < maybe_ipf->ipf_timer ||
359 			    (ipf->ipf_timer == maybe_ipf->ipf_timer &&
360 			     ipf->ipf_last_uses + ipf->ipf_uses <
361 			         maybe_ipf->ipf_last_uses +
362 			         maybe_ipf->ipf_uses))
363 				maybe_ipf = ipf;
364 			ipf = LIST_NEXT(ipf, ipf_list);
365 		}
366 		ipf = maybe_ipf;
367 	    done:
368 		/*
369 		 * Remove the entry from the flow table.
370 		 */
371 		s = splnet();
372 		IPFLOW_REMOVE(ipf);
373 		splx(s);
374 		ipflow_addstats(ipf);
375 		rtcache_free(&ipf->ipf_ro);
376 		if (just_one)
377 			return ipf;
378 		pool_put(&ipflow_pool, ipf);
379 		ipflow_inuse--;
380 	}
381 	return NULL;
382 }
383 
384 void
385 ipflow_slowtimo(void)
386 {
387 	struct rtentry *rt;
388 	struct ipflow *ipf, *next_ipf;
389 	uint64_t *ips;
390 
391 	mutex_enter(softnet_lock);
392 	KERNEL_LOCK(1, NULL);
393 	for (ipf = LIST_FIRST(&ipflowlist); ipf != NULL; ipf = next_ipf) {
394 		next_ipf = LIST_NEXT(ipf, ipf_list);
395 		if (PRT_SLOW_ISEXPIRED(ipf->ipf_timer) ||
396 		    (rt = rtcache_validate(&ipf->ipf_ro)) == NULL) {
397 			ipflow_free(ipf);
398 		} else {
399 			ipf->ipf_last_uses = ipf->ipf_uses;
400 			rt->rt_use += ipf->ipf_uses;
401 			ips = IP_STAT_GETREF();
402 			ips[IP_STAT_TOTAL] += ipf->ipf_uses;
403 			ips[IP_STAT_FORWARD] += ipf->ipf_uses;
404 			ips[IP_STAT_FASTFORWARD] += ipf->ipf_uses;
405 			IP_STAT_PUTREF();
406 			ipf->ipf_uses = 0;
407 		}
408 	}
409 	KERNEL_UNLOCK_ONE(NULL);
410 	mutex_exit(softnet_lock);
411 }
412 
413 void
414 ipflow_create(const struct route *ro, struct mbuf *m)
415 {
416 	const struct ip *const ip = mtod(m, const struct ip *);
417 	struct ipflow *ipf;
418 	size_t hash;
419 	int s;
420 
421 	/*
422 	 * Don't create cache entries for ICMP messages.
423 	 */
424 	if (ip_maxflows == 0 || ip->ip_p == IPPROTO_ICMP)
425 		return;
426 
427 	KERNEL_LOCK(1, NULL);
428 
429 	/*
430 	 * See if an existing flow struct exists.  If so remove it from it's
431 	 * list and free the old route.  If not, try to malloc a new one
432 	 * (if we aren't at our limit).
433 	 */
434 	ipf = ipflow_lookup(ip);
435 	if (ipf == NULL) {
436 		if (ipflow_inuse >= ip_maxflows) {
437 			ipf = ipflow_reap(true);
438 		} else {
439 			s = splnet();
440 			ipf = pool_get(&ipflow_pool, PR_NOWAIT);
441 			splx(s);
442 			if (ipf == NULL)
443 				goto out;
444 			ipflow_inuse++;
445 		}
446 		memset(ipf, 0, sizeof(*ipf));
447 	} else {
448 		s = splnet();
449 		IPFLOW_REMOVE(ipf);
450 		splx(s);
451 		ipflow_addstats(ipf);
452 		rtcache_free(&ipf->ipf_ro);
453 		ipf->ipf_uses = ipf->ipf_last_uses = 0;
454 		ipf->ipf_errors = ipf->ipf_dropped = 0;
455 	}
456 
457 	/*
458 	 * Fill in the updated information.
459 	 */
460 	rtcache_copy(&ipf->ipf_ro, ro);
461 	ipf->ipf_dst = ip->ip_dst;
462 	ipf->ipf_src = ip->ip_src;
463 	ipf->ipf_tos = ip->ip_tos;
464 	PRT_SLOW_ARM(ipf->ipf_timer, IPFLOW_TIMER);
465 
466 	/*
467 	 * Insert into the approriate bucket of the flow table.
468 	 */
469 	hash = ipflow_hash(ip);
470 	s = splnet();
471 	IPFLOW_INSERT(&ipflowtable[hash], ipf);
472 	splx(s);
473 
474  out:
475 	KERNEL_UNLOCK_ONE(NULL);
476 }
477 
478 int
479 ipflow_invalidate_all(int new_size)
480 {
481 	struct ipflow *ipf, *next_ipf;
482 	int s, error;
483 
484 	error = 0;
485 	s = splnet();
486 	for (ipf = LIST_FIRST(&ipflowlist); ipf != NULL; ipf = next_ipf) {
487 		next_ipf = LIST_NEXT(ipf, ipf_list);
488 		ipflow_free(ipf);
489 	}
490 
491 	if (new_size)
492 		error = ipflow_init(new_size);
493 	splx(s);
494 
495 	return error;
496 }
497