xref: /dflybsd-src/sys/netgraph7/netflow/netflow.c (revision b06ebda0110aaa466b4f7ba6cacac7a26b29f434)
1*b06ebda0SMatthew Dillon /*-
2*b06ebda0SMatthew Dillon  * Copyright (c) 2004-2005 Gleb Smirnoff <glebius@FreeBSD.org>
3*b06ebda0SMatthew Dillon  * Copyright (c) 2001-2003 Roman V. Palagin <romanp@unshadow.net>
4*b06ebda0SMatthew Dillon  * All rights reserved.
5*b06ebda0SMatthew Dillon  *
6*b06ebda0SMatthew Dillon  * Redistribution and use in source and binary forms, with or without
7*b06ebda0SMatthew Dillon  * modification, are permitted provided that the following conditions
8*b06ebda0SMatthew Dillon  * are met:
9*b06ebda0SMatthew Dillon  * 1. Redistributions of source code must retain the above copyright
10*b06ebda0SMatthew Dillon  *    notice, this list of conditions and the following disclaimer.
11*b06ebda0SMatthew Dillon  * 2. Redistributions in binary form must reproduce the above copyright
12*b06ebda0SMatthew Dillon  *    notice, this list of conditions and the following disclaimer in the
13*b06ebda0SMatthew Dillon  *    documentation and/or other materials provided with the distribution.
14*b06ebda0SMatthew Dillon  *
15*b06ebda0SMatthew Dillon  * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
16*b06ebda0SMatthew Dillon  * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
17*b06ebda0SMatthew Dillon  * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
18*b06ebda0SMatthew Dillon  * ARE DISCLAIMED.  IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
19*b06ebda0SMatthew Dillon  * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
20*b06ebda0SMatthew Dillon  * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
21*b06ebda0SMatthew Dillon  * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
22*b06ebda0SMatthew Dillon  * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
23*b06ebda0SMatthew Dillon  * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
24*b06ebda0SMatthew Dillon  * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
25*b06ebda0SMatthew Dillon  * SUCH DAMAGE.
26*b06ebda0SMatthew Dillon  *
27*b06ebda0SMatthew Dillon  * $SourceForge: netflow.c,v 1.41 2004/09/05 11:41:10 glebius Exp $
28*b06ebda0SMatthew Dillon  */
29*b06ebda0SMatthew Dillon 
30*b06ebda0SMatthew Dillon static const char rcs_id[] =
31*b06ebda0SMatthew Dillon     "@(#) $FreeBSD: src/sys/netgraph/netflow/netflow.c,v 1.29 2008/05/09 23:02:57 julian Exp $";
32*b06ebda0SMatthew Dillon 
33*b06ebda0SMatthew Dillon #include <sys/param.h>
34*b06ebda0SMatthew Dillon #include <sys/kernel.h>
35*b06ebda0SMatthew Dillon #include <sys/limits.h>
36*b06ebda0SMatthew Dillon #include <sys/mbuf.h>
37*b06ebda0SMatthew Dillon #include <sys/syslog.h>
38*b06ebda0SMatthew Dillon #include <sys/systm.h>
39*b06ebda0SMatthew Dillon #include <sys/socket.h>
40*b06ebda0SMatthew Dillon 
41*b06ebda0SMatthew Dillon #include <machine/atomic.h>
42*b06ebda0SMatthew Dillon 
43*b06ebda0SMatthew Dillon #include <net/if.h>
44*b06ebda0SMatthew Dillon #include <net/route.h>
45*b06ebda0SMatthew Dillon #include <netinet/in.h>
46*b06ebda0SMatthew Dillon #include <netinet/in_systm.h>
47*b06ebda0SMatthew Dillon #include <netinet/ip.h>
48*b06ebda0SMatthew Dillon #include <netinet/tcp.h>
49*b06ebda0SMatthew Dillon #include <netinet/udp.h>
50*b06ebda0SMatthew Dillon 
51*b06ebda0SMatthew Dillon #include <netgraph/ng_message.h>
52*b06ebda0SMatthew Dillon #include <netgraph/netgraph.h>
53*b06ebda0SMatthew Dillon 
54*b06ebda0SMatthew Dillon #include <netgraph/netflow/netflow.h>
55*b06ebda0SMatthew Dillon #include <netgraph/netflow/ng_netflow.h>
56*b06ebda0SMatthew Dillon 
57*b06ebda0SMatthew Dillon #define	NBUCKETS	(65536)		/* must be power of 2 */
58*b06ebda0SMatthew Dillon 
59*b06ebda0SMatthew Dillon /* This hash is for TCP or UDP packets. */
60*b06ebda0SMatthew Dillon #define FULL_HASH(addr1, addr2, port1, port2)	\
61*b06ebda0SMatthew Dillon 	(((addr1 ^ (addr1 >> 16) ^ 		\
62*b06ebda0SMatthew Dillon 	htons(addr2 ^ (addr2 >> 16))) ^ 	\
63*b06ebda0SMatthew Dillon 	port1 ^ htons(port2)) &			\
64*b06ebda0SMatthew Dillon 	(NBUCKETS - 1))
65*b06ebda0SMatthew Dillon 
66*b06ebda0SMatthew Dillon /* This hash is for all other IP packets. */
67*b06ebda0SMatthew Dillon #define ADDR_HASH(addr1, addr2)			\
68*b06ebda0SMatthew Dillon 	((addr1 ^ (addr1 >> 16) ^ 		\
69*b06ebda0SMatthew Dillon 	htons(addr2 ^ (addr2 >> 16))) &		\
70*b06ebda0SMatthew Dillon 	(NBUCKETS - 1))
71*b06ebda0SMatthew Dillon 
72*b06ebda0SMatthew Dillon /* Macros to shorten logical constructions */
73*b06ebda0SMatthew Dillon /* XXX: priv must exist in namespace */
74*b06ebda0SMatthew Dillon #define	INACTIVE(fle)	(time_uptime - fle->f.last > priv->info.nfinfo_inact_t)
75*b06ebda0SMatthew Dillon #define	AGED(fle)	(time_uptime - fle->f.first > priv->info.nfinfo_act_t)
76*b06ebda0SMatthew Dillon #define	ISFREE(fle)	(fle->f.packets == 0)
77*b06ebda0SMatthew Dillon 
78*b06ebda0SMatthew Dillon /*
79*b06ebda0SMatthew Dillon  * 4 is a magical number: statistically number of 4-packet flows is
80*b06ebda0SMatthew Dillon  * bigger than 5,6,7...-packet flows by an order of magnitude. Most UDP/ICMP
81*b06ebda0SMatthew Dillon  * scans are 1 packet (~ 90% of flow cache). TCP scans are 2-packet in case
82*b06ebda0SMatthew Dillon  * of reachable host and 4-packet otherwise.
83*b06ebda0SMatthew Dillon  */
84*b06ebda0SMatthew Dillon #define	SMALL(fle)	(fle->f.packets <= 4)
85*b06ebda0SMatthew Dillon 
86*b06ebda0SMatthew Dillon /*
87*b06ebda0SMatthew Dillon  * Cisco uses milliseconds for uptime. Bad idea, since it overflows
88*b06ebda0SMatthew Dillon  * every 48+ days. But we will do same to keep compatibility. This macro
89*b06ebda0SMatthew Dillon  * does overflowable multiplication to 1000.
90*b06ebda0SMatthew Dillon  */
91*b06ebda0SMatthew Dillon #define	MILLIUPTIME(t)	(((t) << 9) +	/* 512 */	\
92*b06ebda0SMatthew Dillon 			 ((t) << 8) +	/* 256 */	\
93*b06ebda0SMatthew Dillon 			 ((t) << 7) +	/* 128 */	\
94*b06ebda0SMatthew Dillon 			 ((t) << 6) +	/* 64  */	\
95*b06ebda0SMatthew Dillon 			 ((t) << 5) +	/* 32  */	\
96*b06ebda0SMatthew Dillon 			 ((t) << 3))	/* 8   */
97*b06ebda0SMatthew Dillon 
98*b06ebda0SMatthew Dillon MALLOC_DECLARE(M_NETFLOW_HASH);
99*b06ebda0SMatthew Dillon MALLOC_DEFINE(M_NETFLOW_HASH, "netflow_hash", "NetFlow hash");
100*b06ebda0SMatthew Dillon 
101*b06ebda0SMatthew Dillon static int export_add(item_p, struct flow_entry *);
102*b06ebda0SMatthew Dillon static int export_send(priv_p, item_p, int flags);
103*b06ebda0SMatthew Dillon 
104*b06ebda0SMatthew Dillon /* Generate hash for a given flow record. */
105*b06ebda0SMatthew Dillon static __inline uint32_t
106*b06ebda0SMatthew Dillon ip_hash(struct flow_rec *r)
107*b06ebda0SMatthew Dillon {
108*b06ebda0SMatthew Dillon 	switch (r->r_ip_p) {
109*b06ebda0SMatthew Dillon 	case IPPROTO_TCP:
110*b06ebda0SMatthew Dillon 	case IPPROTO_UDP:
111*b06ebda0SMatthew Dillon 		return FULL_HASH(r->r_src.s_addr, r->r_dst.s_addr,
112*b06ebda0SMatthew Dillon 		    r->r_sport, r->r_dport);
113*b06ebda0SMatthew Dillon 	default:
114*b06ebda0SMatthew Dillon 		return ADDR_HASH(r->r_src.s_addr, r->r_dst.s_addr);
115*b06ebda0SMatthew Dillon 	}
116*b06ebda0SMatthew Dillon }
117*b06ebda0SMatthew Dillon 
118*b06ebda0SMatthew Dillon /* This is callback from uma(9), called on alloc. */
119*b06ebda0SMatthew Dillon static int
120*b06ebda0SMatthew Dillon uma_ctor_flow(void *mem, int size, void *arg, int how)
121*b06ebda0SMatthew Dillon {
122*b06ebda0SMatthew Dillon 	priv_p priv = (priv_p )arg;
123*b06ebda0SMatthew Dillon 
124*b06ebda0SMatthew Dillon 	if (atomic_load_acq_32(&priv->info.nfinfo_used) >= CACHESIZE)
125*b06ebda0SMatthew Dillon 		return (ENOMEM);
126*b06ebda0SMatthew Dillon 
127*b06ebda0SMatthew Dillon 	atomic_add_32(&priv->info.nfinfo_used, 1);
128*b06ebda0SMatthew Dillon 
129*b06ebda0SMatthew Dillon 	return (0);
130*b06ebda0SMatthew Dillon }
131*b06ebda0SMatthew Dillon 
132*b06ebda0SMatthew Dillon /* This is callback from uma(9), called on free. */
133*b06ebda0SMatthew Dillon static void
134*b06ebda0SMatthew Dillon uma_dtor_flow(void *mem, int size, void *arg)
135*b06ebda0SMatthew Dillon {
136*b06ebda0SMatthew Dillon 	priv_p priv = (priv_p )arg;
137*b06ebda0SMatthew Dillon 
138*b06ebda0SMatthew Dillon 	atomic_subtract_32(&priv->info.nfinfo_used, 1);
139*b06ebda0SMatthew Dillon }
140*b06ebda0SMatthew Dillon 
141*b06ebda0SMatthew Dillon /*
142*b06ebda0SMatthew Dillon  * Detach export datagram from priv, if there is any.
143*b06ebda0SMatthew Dillon  * If there is no, allocate a new one.
144*b06ebda0SMatthew Dillon  */
145*b06ebda0SMatthew Dillon static item_p
146*b06ebda0SMatthew Dillon get_export_dgram(priv_p priv)
147*b06ebda0SMatthew Dillon {
148*b06ebda0SMatthew Dillon 	item_p	item = NULL;
149*b06ebda0SMatthew Dillon 
150*b06ebda0SMatthew Dillon 	mtx_lock(&priv->export_mtx);
151*b06ebda0SMatthew Dillon 	if (priv->export_item != NULL) {
152*b06ebda0SMatthew Dillon 		item = priv->export_item;
153*b06ebda0SMatthew Dillon 		priv->export_item = NULL;
154*b06ebda0SMatthew Dillon 	}
155*b06ebda0SMatthew Dillon 	mtx_unlock(&priv->export_mtx);
156*b06ebda0SMatthew Dillon 
157*b06ebda0SMatthew Dillon 	if (item == NULL) {
158*b06ebda0SMatthew Dillon 		struct netflow_v5_export_dgram *dgram;
159*b06ebda0SMatthew Dillon 		struct mbuf *m;
160*b06ebda0SMatthew Dillon 
161*b06ebda0SMatthew Dillon 		m = m_getcl(M_DONTWAIT, MT_DATA, M_PKTHDR);
162*b06ebda0SMatthew Dillon 		if (m == NULL)
163*b06ebda0SMatthew Dillon 			return (NULL);
164*b06ebda0SMatthew Dillon 		item = ng_package_data(m, NG_NOFLAGS);
165*b06ebda0SMatthew Dillon 		if (item == NULL)
166*b06ebda0SMatthew Dillon 			return (NULL);
167*b06ebda0SMatthew Dillon 		dgram = mtod(m, struct netflow_v5_export_dgram *);
168*b06ebda0SMatthew Dillon 		dgram->header.count = 0;
169*b06ebda0SMatthew Dillon 		dgram->header.version = htons(NETFLOW_V5);
170*b06ebda0SMatthew Dillon 
171*b06ebda0SMatthew Dillon 	}
172*b06ebda0SMatthew Dillon 
173*b06ebda0SMatthew Dillon 	return (item);
174*b06ebda0SMatthew Dillon }
175*b06ebda0SMatthew Dillon 
176*b06ebda0SMatthew Dillon /*
177*b06ebda0SMatthew Dillon  * Re-attach incomplete datagram back to priv.
178*b06ebda0SMatthew Dillon  * If there is already another one, then send incomplete. */
179*b06ebda0SMatthew Dillon static void
180*b06ebda0SMatthew Dillon return_export_dgram(priv_p priv, item_p item, int flags)
181*b06ebda0SMatthew Dillon {
182*b06ebda0SMatthew Dillon 	/*
183*b06ebda0SMatthew Dillon 	 * It may happen on SMP, that some thread has already
184*b06ebda0SMatthew Dillon 	 * put its item there, in this case we bail out and
185*b06ebda0SMatthew Dillon 	 * send what we have to collector.
186*b06ebda0SMatthew Dillon 	 */
187*b06ebda0SMatthew Dillon 	mtx_lock(&priv->export_mtx);
188*b06ebda0SMatthew Dillon 	if (priv->export_item == NULL) {
189*b06ebda0SMatthew Dillon 		priv->export_item = item;
190*b06ebda0SMatthew Dillon 		mtx_unlock(&priv->export_mtx);
191*b06ebda0SMatthew Dillon 	} else {
192*b06ebda0SMatthew Dillon 		mtx_unlock(&priv->export_mtx);
193*b06ebda0SMatthew Dillon 		export_send(priv, item, flags);
194*b06ebda0SMatthew Dillon 	}
195*b06ebda0SMatthew Dillon }
196*b06ebda0SMatthew Dillon 
197*b06ebda0SMatthew Dillon /*
198*b06ebda0SMatthew Dillon  * The flow is over. Call export_add() and free it. If datagram is
199*b06ebda0SMatthew Dillon  * full, then call export_send().
200*b06ebda0SMatthew Dillon  */
201*b06ebda0SMatthew Dillon static __inline void
202*b06ebda0SMatthew Dillon expire_flow(priv_p priv, item_p *item, struct flow_entry *fle, int flags)
203*b06ebda0SMatthew Dillon {
204*b06ebda0SMatthew Dillon 	if (*item == NULL)
205*b06ebda0SMatthew Dillon 		*item = get_export_dgram(priv);
206*b06ebda0SMatthew Dillon 	if (*item == NULL) {
207*b06ebda0SMatthew Dillon 		atomic_add_32(&priv->info.nfinfo_export_failed, 1);
208*b06ebda0SMatthew Dillon 		uma_zfree_arg(priv->zone, fle, priv);
209*b06ebda0SMatthew Dillon 		return;
210*b06ebda0SMatthew Dillon 	}
211*b06ebda0SMatthew Dillon 	if (export_add(*item, fle) > 0) {
212*b06ebda0SMatthew Dillon 		export_send(priv, *item, flags);
213*b06ebda0SMatthew Dillon 		*item = NULL;
214*b06ebda0SMatthew Dillon 	}
215*b06ebda0SMatthew Dillon 	uma_zfree_arg(priv->zone, fle, priv);
216*b06ebda0SMatthew Dillon }
217*b06ebda0SMatthew Dillon 
218*b06ebda0SMatthew Dillon /* Get a snapshot of node statistics */
219*b06ebda0SMatthew Dillon void
220*b06ebda0SMatthew Dillon ng_netflow_copyinfo(priv_p priv, struct ng_netflow_info *i)
221*b06ebda0SMatthew Dillon {
222*b06ebda0SMatthew Dillon 	/* XXX: atomic */
223*b06ebda0SMatthew Dillon 	memcpy((void *)i, (void *)&priv->info, sizeof(priv->info));
224*b06ebda0SMatthew Dillon }
225*b06ebda0SMatthew Dillon 
226*b06ebda0SMatthew Dillon /*
227*b06ebda0SMatthew Dillon  * Insert a record into defined slot.
228*b06ebda0SMatthew Dillon  *
229*b06ebda0SMatthew Dillon  * First we get for us a free flow entry, then fill in all
230*b06ebda0SMatthew Dillon  * possible fields in it.
231*b06ebda0SMatthew Dillon  *
232*b06ebda0SMatthew Dillon  * TODO: consider dropping hash mutex while filling in datagram,
233*b06ebda0SMatthew Dillon  * as this was done in previous version. Need to test & profile
234*b06ebda0SMatthew Dillon  * to be sure.
235*b06ebda0SMatthew Dillon  */
236*b06ebda0SMatthew Dillon static __inline int
237*b06ebda0SMatthew Dillon hash_insert(priv_p priv, struct flow_hash_entry  *hsh, struct flow_rec *r,
238*b06ebda0SMatthew Dillon 	int plen, uint8_t tcp_flags)
239*b06ebda0SMatthew Dillon {
240*b06ebda0SMatthew Dillon 	struct flow_entry *fle;
241*b06ebda0SMatthew Dillon 	struct sockaddr_in sin;
242*b06ebda0SMatthew Dillon 	struct rtentry *rt;
243*b06ebda0SMatthew Dillon 
244*b06ebda0SMatthew Dillon 	mtx_assert(&hsh->mtx, MA_OWNED);
245*b06ebda0SMatthew Dillon 
246*b06ebda0SMatthew Dillon 	fle = uma_zalloc_arg(priv->zone, priv, M_NOWAIT);
247*b06ebda0SMatthew Dillon 	if (fle == NULL) {
248*b06ebda0SMatthew Dillon 		atomic_add_32(&priv->info.nfinfo_alloc_failed, 1);
249*b06ebda0SMatthew Dillon 		return (ENOMEM);
250*b06ebda0SMatthew Dillon 	}
251*b06ebda0SMatthew Dillon 
252*b06ebda0SMatthew Dillon 	/*
253*b06ebda0SMatthew Dillon 	 * Now fle is totally ours. It is detached from all lists,
254*b06ebda0SMatthew Dillon 	 * we can safely edit it.
255*b06ebda0SMatthew Dillon 	 */
256*b06ebda0SMatthew Dillon 
257*b06ebda0SMatthew Dillon 	bcopy(r, &fle->f.r, sizeof(struct flow_rec));
258*b06ebda0SMatthew Dillon 	fle->f.bytes = plen;
259*b06ebda0SMatthew Dillon 	fle->f.packets = 1;
260*b06ebda0SMatthew Dillon 	fle->f.tcp_flags = tcp_flags;
261*b06ebda0SMatthew Dillon 
262*b06ebda0SMatthew Dillon 	fle->f.first = fle->f.last = time_uptime;
263*b06ebda0SMatthew Dillon 
264*b06ebda0SMatthew Dillon 	/*
265*b06ebda0SMatthew Dillon 	 * First we do route table lookup on destination address. So we can
266*b06ebda0SMatthew Dillon 	 * fill in out_ifx, dst_mask, nexthop, and dst_as in future releases.
267*b06ebda0SMatthew Dillon 	 */
268*b06ebda0SMatthew Dillon 	bzero(&sin, sizeof(sin));
269*b06ebda0SMatthew Dillon 	sin.sin_len = sizeof(struct sockaddr_in);
270*b06ebda0SMatthew Dillon 	sin.sin_family = AF_INET;
271*b06ebda0SMatthew Dillon 	sin.sin_addr = fle->f.r.r_dst;
272*b06ebda0SMatthew Dillon 	/* XXX MRT 0 as a default.. need the m here to get fib */
273*b06ebda0SMatthew Dillon 	rt = rtalloc1_fib((struct sockaddr *)&sin, 0, RTF_CLONING, 0);
274*b06ebda0SMatthew Dillon 	if (rt != NULL) {
275*b06ebda0SMatthew Dillon 		fle->f.fle_o_ifx = rt->rt_ifp->if_index;
276*b06ebda0SMatthew Dillon 
277*b06ebda0SMatthew Dillon 		if (rt->rt_flags & RTF_GATEWAY &&
278*b06ebda0SMatthew Dillon 		    rt->rt_gateway->sa_family == AF_INET)
279*b06ebda0SMatthew Dillon 			fle->f.next_hop =
280*b06ebda0SMatthew Dillon 			    ((struct sockaddr_in *)(rt->rt_gateway))->sin_addr;
281*b06ebda0SMatthew Dillon 
282*b06ebda0SMatthew Dillon 		if (rt_mask(rt))
283*b06ebda0SMatthew Dillon 			fle->f.dst_mask = bitcount32(((struct sockaddr_in *)
284*b06ebda0SMatthew Dillon 			    rt_mask(rt))->sin_addr.s_addr);
285*b06ebda0SMatthew Dillon 		else if (rt->rt_flags & RTF_HOST)
286*b06ebda0SMatthew Dillon 			/* Give up. We can't determine mask :( */
287*b06ebda0SMatthew Dillon 			fle->f.dst_mask = 32;
288*b06ebda0SMatthew Dillon 
289*b06ebda0SMatthew Dillon 		RTFREE_LOCKED(rt);
290*b06ebda0SMatthew Dillon 	}
291*b06ebda0SMatthew Dillon 
292*b06ebda0SMatthew Dillon 	/* Do route lookup on source address, to fill in src_mask. */
293*b06ebda0SMatthew Dillon 	bzero(&sin, sizeof(sin));
294*b06ebda0SMatthew Dillon 	sin.sin_len = sizeof(struct sockaddr_in);
295*b06ebda0SMatthew Dillon 	sin.sin_family = AF_INET;
296*b06ebda0SMatthew Dillon 	sin.sin_addr = fle->f.r.r_src;
297*b06ebda0SMatthew Dillon 	/* XXX MRT 0 as a default  revisit.  need the mbuf for fib*/
298*b06ebda0SMatthew Dillon 	rt = rtalloc1_fib((struct sockaddr *)&sin, 0, RTF_CLONING, 0);
299*b06ebda0SMatthew Dillon 	if (rt != NULL) {
300*b06ebda0SMatthew Dillon 		if (rt_mask(rt))
301*b06ebda0SMatthew Dillon 			fle->f.src_mask = bitcount32(((struct sockaddr_in *)
302*b06ebda0SMatthew Dillon 			    rt_mask(rt))->sin_addr.s_addr);
303*b06ebda0SMatthew Dillon 		else if (rt->rt_flags & RTF_HOST)
304*b06ebda0SMatthew Dillon 			/* Give up. We can't determine mask :( */
305*b06ebda0SMatthew Dillon 			fle->f.src_mask = 32;
306*b06ebda0SMatthew Dillon 
307*b06ebda0SMatthew Dillon 		RTFREE_LOCKED(rt);
308*b06ebda0SMatthew Dillon 	}
309*b06ebda0SMatthew Dillon 
310*b06ebda0SMatthew Dillon 	/* Push new flow at the and of hash. */
311*b06ebda0SMatthew Dillon 	TAILQ_INSERT_TAIL(&hsh->head, fle, fle_hash);
312*b06ebda0SMatthew Dillon 
313*b06ebda0SMatthew Dillon 	return (0);
314*b06ebda0SMatthew Dillon }
315*b06ebda0SMatthew Dillon 
316*b06ebda0SMatthew Dillon 
317*b06ebda0SMatthew Dillon /*
318*b06ebda0SMatthew Dillon  * Non-static functions called from ng_netflow.c
319*b06ebda0SMatthew Dillon  */
320*b06ebda0SMatthew Dillon 
321*b06ebda0SMatthew Dillon /* Allocate memory and set up flow cache */
322*b06ebda0SMatthew Dillon int
323*b06ebda0SMatthew Dillon ng_netflow_cache_init(priv_p priv)
324*b06ebda0SMatthew Dillon {
325*b06ebda0SMatthew Dillon 	struct flow_hash_entry	*hsh;
326*b06ebda0SMatthew Dillon 	int i;
327*b06ebda0SMatthew Dillon 
328*b06ebda0SMatthew Dillon 	/* Initialize cache UMA zone. */
329*b06ebda0SMatthew Dillon 	priv->zone = uma_zcreate("NetFlow cache", sizeof(struct flow_entry),
330*b06ebda0SMatthew Dillon 	    uma_ctor_flow, uma_dtor_flow, NULL, NULL, UMA_ALIGN_CACHE, 0);
331*b06ebda0SMatthew Dillon 	uma_zone_set_max(priv->zone, CACHESIZE);
332*b06ebda0SMatthew Dillon 
333*b06ebda0SMatthew Dillon 	/* Allocate hash. */
334*b06ebda0SMatthew Dillon 	MALLOC(priv->hash, struct flow_hash_entry *,
335*b06ebda0SMatthew Dillon 	    NBUCKETS * sizeof(struct flow_hash_entry),
336*b06ebda0SMatthew Dillon 	    M_NETFLOW_HASH, M_WAITOK | M_ZERO);
337*b06ebda0SMatthew Dillon 
338*b06ebda0SMatthew Dillon 	if (priv->hash == NULL) {
339*b06ebda0SMatthew Dillon 		uma_zdestroy(priv->zone);
340*b06ebda0SMatthew Dillon 		return (ENOMEM);
341*b06ebda0SMatthew Dillon 	}
342*b06ebda0SMatthew Dillon 
343*b06ebda0SMatthew Dillon 	/* Initialize hash. */
344*b06ebda0SMatthew Dillon 	for (i = 0, hsh = priv->hash; i < NBUCKETS; i++, hsh++) {
345*b06ebda0SMatthew Dillon 		mtx_init(&hsh->mtx, "hash mutex", NULL, MTX_DEF);
346*b06ebda0SMatthew Dillon 		TAILQ_INIT(&hsh->head);
347*b06ebda0SMatthew Dillon 	}
348*b06ebda0SMatthew Dillon 
349*b06ebda0SMatthew Dillon 	mtx_init(&priv->export_mtx, "export dgram lock", NULL, MTX_DEF);
350*b06ebda0SMatthew Dillon 
351*b06ebda0SMatthew Dillon 	return (0);
352*b06ebda0SMatthew Dillon }
353*b06ebda0SMatthew Dillon 
354*b06ebda0SMatthew Dillon /* Free all flow cache memory. Called from node close method. */
355*b06ebda0SMatthew Dillon void
356*b06ebda0SMatthew Dillon ng_netflow_cache_flush(priv_p priv)
357*b06ebda0SMatthew Dillon {
358*b06ebda0SMatthew Dillon 	struct flow_entry	*fle, *fle1;
359*b06ebda0SMatthew Dillon 	struct flow_hash_entry	*hsh;
360*b06ebda0SMatthew Dillon 	item_p			item = NULL;
361*b06ebda0SMatthew Dillon 	int i;
362*b06ebda0SMatthew Dillon 
363*b06ebda0SMatthew Dillon 	/*
364*b06ebda0SMatthew Dillon 	 * We are going to free probably billable data.
365*b06ebda0SMatthew Dillon 	 * Expire everything before freeing it.
366*b06ebda0SMatthew Dillon 	 * No locking is required since callout is already drained.
367*b06ebda0SMatthew Dillon 	 */
368*b06ebda0SMatthew Dillon 	for (hsh = priv->hash, i = 0; i < NBUCKETS; hsh++, i++)
369*b06ebda0SMatthew Dillon 		TAILQ_FOREACH_SAFE(fle, &hsh->head, fle_hash, fle1) {
370*b06ebda0SMatthew Dillon 			TAILQ_REMOVE(&hsh->head, fle, fle_hash);
371*b06ebda0SMatthew Dillon 			expire_flow(priv, &item, fle, NG_QUEUE);
372*b06ebda0SMatthew Dillon 		}
373*b06ebda0SMatthew Dillon 
374*b06ebda0SMatthew Dillon 	if (item != NULL)
375*b06ebda0SMatthew Dillon 		export_send(priv, item, NG_QUEUE);
376*b06ebda0SMatthew Dillon 
377*b06ebda0SMatthew Dillon 	uma_zdestroy(priv->zone);
378*b06ebda0SMatthew Dillon 
379*b06ebda0SMatthew Dillon 	/* Destroy hash mutexes. */
380*b06ebda0SMatthew Dillon 	for (i = 0, hsh = priv->hash; i < NBUCKETS; i++, hsh++)
381*b06ebda0SMatthew Dillon 		mtx_destroy(&hsh->mtx);
382*b06ebda0SMatthew Dillon 
383*b06ebda0SMatthew Dillon 	/* Free hash memory. */
384*b06ebda0SMatthew Dillon 	if (priv->hash)
385*b06ebda0SMatthew Dillon 		FREE(priv->hash, M_NETFLOW_HASH);
386*b06ebda0SMatthew Dillon 
387*b06ebda0SMatthew Dillon 	mtx_destroy(&priv->export_mtx);
388*b06ebda0SMatthew Dillon }
389*b06ebda0SMatthew Dillon 
390*b06ebda0SMatthew Dillon /* Insert packet from into flow cache. */
391*b06ebda0SMatthew Dillon int
392*b06ebda0SMatthew Dillon ng_netflow_flow_add(priv_p priv, struct ip *ip, iface_p iface,
393*b06ebda0SMatthew Dillon 	struct ifnet *ifp)
394*b06ebda0SMatthew Dillon {
395*b06ebda0SMatthew Dillon 	register struct flow_entry	*fle, *fle1;
396*b06ebda0SMatthew Dillon 	struct flow_hash_entry		*hsh;
397*b06ebda0SMatthew Dillon 	struct flow_rec		r;
398*b06ebda0SMatthew Dillon 	item_p			item = NULL;
399*b06ebda0SMatthew Dillon 	int			hlen, plen;
400*b06ebda0SMatthew Dillon 	int			error = 0;
401*b06ebda0SMatthew Dillon 	uint8_t			tcp_flags = 0;
402*b06ebda0SMatthew Dillon 
403*b06ebda0SMatthew Dillon 	/* Try to fill flow_rec r */
404*b06ebda0SMatthew Dillon 	bzero(&r, sizeof(r));
405*b06ebda0SMatthew Dillon 	/* check version */
406*b06ebda0SMatthew Dillon 	if (ip->ip_v != IPVERSION)
407*b06ebda0SMatthew Dillon 		return (EINVAL);
408*b06ebda0SMatthew Dillon 
409*b06ebda0SMatthew Dillon 	/* verify min header length */
410*b06ebda0SMatthew Dillon 	hlen = ip->ip_hl << 2;
411*b06ebda0SMatthew Dillon 
412*b06ebda0SMatthew Dillon 	if (hlen < sizeof(struct ip))
413*b06ebda0SMatthew Dillon 		return (EINVAL);
414*b06ebda0SMatthew Dillon 
415*b06ebda0SMatthew Dillon 	r.r_src = ip->ip_src;
416*b06ebda0SMatthew Dillon 	r.r_dst = ip->ip_dst;
417*b06ebda0SMatthew Dillon 
418*b06ebda0SMatthew Dillon 	/* save packet length */
419*b06ebda0SMatthew Dillon 	plen = ntohs(ip->ip_len);
420*b06ebda0SMatthew Dillon 
421*b06ebda0SMatthew Dillon 	r.r_ip_p = ip->ip_p;
422*b06ebda0SMatthew Dillon 	r.r_tos = ip->ip_tos;
423*b06ebda0SMatthew Dillon 
424*b06ebda0SMatthew Dillon 	/* Configured in_ifx overrides mbuf's */
425*b06ebda0SMatthew Dillon 	if (iface->info.ifinfo_index == 0) {
426*b06ebda0SMatthew Dillon 		if (ifp != NULL)
427*b06ebda0SMatthew Dillon 			r.r_i_ifx = ifp->if_index;
428*b06ebda0SMatthew Dillon 	} else
429*b06ebda0SMatthew Dillon 		r.r_i_ifx = iface->info.ifinfo_index;
430*b06ebda0SMatthew Dillon 
431*b06ebda0SMatthew Dillon 	/*
432*b06ebda0SMatthew Dillon 	 * XXX NOTE: only first fragment of fragmented TCP, UDP and
433*b06ebda0SMatthew Dillon 	 * ICMP packet will be recorded with proper s_port and d_port.
434*b06ebda0SMatthew Dillon 	 * Following fragments will be recorded simply as IP packet with
435*b06ebda0SMatthew Dillon 	 * ip_proto = ip->ip_p and s_port, d_port set to zero.
436*b06ebda0SMatthew Dillon 	 * I know, it looks like bug. But I don't want to re-implement
437*b06ebda0SMatthew Dillon 	 * ip packet assebmling here. Anyway, (in)famous trafd works this way -
438*b06ebda0SMatthew Dillon 	 * and nobody complains yet :)
439*b06ebda0SMatthew Dillon 	 */
440*b06ebda0SMatthew Dillon 	if ((ip->ip_off & htons(IP_OFFMASK)) == 0)
441*b06ebda0SMatthew Dillon 		switch(r.r_ip_p) {
442*b06ebda0SMatthew Dillon 		case IPPROTO_TCP:
443*b06ebda0SMatthew Dillon 		{
444*b06ebda0SMatthew Dillon 			register struct tcphdr *tcp;
445*b06ebda0SMatthew Dillon 
446*b06ebda0SMatthew Dillon 			tcp = (struct tcphdr *)((caddr_t )ip + hlen);
447*b06ebda0SMatthew Dillon 			r.r_sport = tcp->th_sport;
448*b06ebda0SMatthew Dillon 			r.r_dport = tcp->th_dport;
449*b06ebda0SMatthew Dillon 			tcp_flags = tcp->th_flags;
450*b06ebda0SMatthew Dillon 			break;
451*b06ebda0SMatthew Dillon 		}
452*b06ebda0SMatthew Dillon 			case IPPROTO_UDP:
453*b06ebda0SMatthew Dillon 			r.r_ports = *(uint32_t *)((caddr_t )ip + hlen);
454*b06ebda0SMatthew Dillon 			break;
455*b06ebda0SMatthew Dillon 		}
456*b06ebda0SMatthew Dillon 
457*b06ebda0SMatthew Dillon 	/* Update node statistics. XXX: race... */
458*b06ebda0SMatthew Dillon 	priv->info.nfinfo_packets ++;
459*b06ebda0SMatthew Dillon 	priv->info.nfinfo_bytes += plen;
460*b06ebda0SMatthew Dillon 
461*b06ebda0SMatthew Dillon 	/* Find hash slot. */
462*b06ebda0SMatthew Dillon 	hsh = &priv->hash[ip_hash(&r)];
463*b06ebda0SMatthew Dillon 
464*b06ebda0SMatthew Dillon 	mtx_lock(&hsh->mtx);
465*b06ebda0SMatthew Dillon 
466*b06ebda0SMatthew Dillon 	/*
467*b06ebda0SMatthew Dillon 	 * Go through hash and find our entry. If we encounter an
468*b06ebda0SMatthew Dillon 	 * entry, that should be expired, purge it. We do a reverse
469*b06ebda0SMatthew Dillon 	 * search since most active entries are first, and most
470*b06ebda0SMatthew Dillon 	 * searches are done on most active entries.
471*b06ebda0SMatthew Dillon 	 */
472*b06ebda0SMatthew Dillon 	TAILQ_FOREACH_REVERSE_SAFE(fle, &hsh->head, fhead, fle_hash, fle1) {
473*b06ebda0SMatthew Dillon 		if (bcmp(&r, &fle->f.r, sizeof(struct flow_rec)) == 0)
474*b06ebda0SMatthew Dillon 			break;
475*b06ebda0SMatthew Dillon 		if ((INACTIVE(fle) && SMALL(fle)) || AGED(fle)) {
476*b06ebda0SMatthew Dillon 			TAILQ_REMOVE(&hsh->head, fle, fle_hash);
477*b06ebda0SMatthew Dillon 			expire_flow(priv, &item, fle, NG_QUEUE);
478*b06ebda0SMatthew Dillon 			atomic_add_32(&priv->info.nfinfo_act_exp, 1);
479*b06ebda0SMatthew Dillon 		}
480*b06ebda0SMatthew Dillon 	}
481*b06ebda0SMatthew Dillon 
482*b06ebda0SMatthew Dillon 	if (fle) {			/* An existent entry. */
483*b06ebda0SMatthew Dillon 
484*b06ebda0SMatthew Dillon 		fle->f.bytes += plen;
485*b06ebda0SMatthew Dillon 		fle->f.packets ++;
486*b06ebda0SMatthew Dillon 		fle->f.tcp_flags |= tcp_flags;
487*b06ebda0SMatthew Dillon 		fle->f.last = time_uptime;
488*b06ebda0SMatthew Dillon 
489*b06ebda0SMatthew Dillon 		/*
490*b06ebda0SMatthew Dillon 		 * We have the following reasons to expire flow in active way:
491*b06ebda0SMatthew Dillon 		 * - it hit active timeout
492*b06ebda0SMatthew Dillon 		 * - a TCP connection closed
493*b06ebda0SMatthew Dillon 		 * - it is going to overflow counter
494*b06ebda0SMatthew Dillon 		 */
495*b06ebda0SMatthew Dillon 		if (tcp_flags & TH_FIN || tcp_flags & TH_RST || AGED(fle) ||
496*b06ebda0SMatthew Dillon 		    (fle->f.bytes >= (UINT_MAX - IF_MAXMTU)) ) {
497*b06ebda0SMatthew Dillon 			TAILQ_REMOVE(&hsh->head, fle, fle_hash);
498*b06ebda0SMatthew Dillon 			expire_flow(priv, &item, fle, NG_QUEUE);
499*b06ebda0SMatthew Dillon 			atomic_add_32(&priv->info.nfinfo_act_exp, 1);
500*b06ebda0SMatthew Dillon 		} else {
501*b06ebda0SMatthew Dillon 			/*
502*b06ebda0SMatthew Dillon 			 * It is the newest, move it to the tail,
503*b06ebda0SMatthew Dillon 			 * if it isn't there already. Next search will
504*b06ebda0SMatthew Dillon 			 * locate it quicker.
505*b06ebda0SMatthew Dillon 			 */
506*b06ebda0SMatthew Dillon 			if (fle != TAILQ_LAST(&hsh->head, fhead)) {
507*b06ebda0SMatthew Dillon 				TAILQ_REMOVE(&hsh->head, fle, fle_hash);
508*b06ebda0SMatthew Dillon 				TAILQ_INSERT_TAIL(&hsh->head, fle, fle_hash);
509*b06ebda0SMatthew Dillon 			}
510*b06ebda0SMatthew Dillon 		}
511*b06ebda0SMatthew Dillon 	} else				/* A new flow entry. */
512*b06ebda0SMatthew Dillon 		error = hash_insert(priv, hsh, &r, plen, tcp_flags);
513*b06ebda0SMatthew Dillon 
514*b06ebda0SMatthew Dillon 	mtx_unlock(&hsh->mtx);
515*b06ebda0SMatthew Dillon 
516*b06ebda0SMatthew Dillon 	if (item != NULL)
517*b06ebda0SMatthew Dillon 		return_export_dgram(priv, item, NG_QUEUE);
518*b06ebda0SMatthew Dillon 
519*b06ebda0SMatthew Dillon 	return (error);
520*b06ebda0SMatthew Dillon }
521*b06ebda0SMatthew Dillon 
522*b06ebda0SMatthew Dillon /*
523*b06ebda0SMatthew Dillon  * Return records from cache to userland.
524*b06ebda0SMatthew Dillon  *
525*b06ebda0SMatthew Dillon  * TODO: matching particular IP should be done in kernel, here.
526*b06ebda0SMatthew Dillon  */
527*b06ebda0SMatthew Dillon int
528*b06ebda0SMatthew Dillon ng_netflow_flow_show(priv_p priv, uint32_t last, struct ng_mesg *resp)
529*b06ebda0SMatthew Dillon {
530*b06ebda0SMatthew Dillon 	struct flow_hash_entry *hsh;
531*b06ebda0SMatthew Dillon 	struct flow_entry *fle;
532*b06ebda0SMatthew Dillon 	struct ngnf_flows *data;
533*b06ebda0SMatthew Dillon 	int i;
534*b06ebda0SMatthew Dillon 
535*b06ebda0SMatthew Dillon 	data = (struct ngnf_flows *)resp->data;
536*b06ebda0SMatthew Dillon 	data->last = 0;
537*b06ebda0SMatthew Dillon 	data->nentries = 0;
538*b06ebda0SMatthew Dillon 
539*b06ebda0SMatthew Dillon 	/* Check if this is a first run */
540*b06ebda0SMatthew Dillon 	if (last == 0) {
541*b06ebda0SMatthew Dillon 		hsh = priv->hash;
542*b06ebda0SMatthew Dillon 		i = 0;
543*b06ebda0SMatthew Dillon 	} else {
544*b06ebda0SMatthew Dillon 		if (last > NBUCKETS-1)
545*b06ebda0SMatthew Dillon 			return (EINVAL);
546*b06ebda0SMatthew Dillon 		hsh = priv->hash + last;
547*b06ebda0SMatthew Dillon 		i = last;
548*b06ebda0SMatthew Dillon 	}
549*b06ebda0SMatthew Dillon 
550*b06ebda0SMatthew Dillon 	/*
551*b06ebda0SMatthew Dillon 	 * We will transfer not more than NREC_AT_ONCE. More data
552*b06ebda0SMatthew Dillon 	 * will come in next message.
553*b06ebda0SMatthew Dillon 	 * We send current hash index to userland, and userland should
554*b06ebda0SMatthew Dillon 	 * return it back to us. Then, we will restart with new entry.
555*b06ebda0SMatthew Dillon 	 *
556*b06ebda0SMatthew Dillon 	 * The resulting cache snapshot is inaccurate for the
557*b06ebda0SMatthew Dillon 	 * following reasons:
558*b06ebda0SMatthew Dillon 	 *  - we skip locked hash entries
559*b06ebda0SMatthew Dillon 	 *  - we bail out, if someone wants our entry
560*b06ebda0SMatthew Dillon 	 *  - we skip rest of entry, when hit NREC_AT_ONCE
561*b06ebda0SMatthew Dillon 	 */
562*b06ebda0SMatthew Dillon 	for (; i < NBUCKETS; hsh++, i++) {
563*b06ebda0SMatthew Dillon 		if (mtx_trylock(&hsh->mtx) == 0)
564*b06ebda0SMatthew Dillon 			continue;
565*b06ebda0SMatthew Dillon 
566*b06ebda0SMatthew Dillon 		TAILQ_FOREACH(fle, &hsh->head, fle_hash) {
567*b06ebda0SMatthew Dillon 			if (hsh->mtx.mtx_lock & MTX_CONTESTED)
568*b06ebda0SMatthew Dillon 				break;
569*b06ebda0SMatthew Dillon 
570*b06ebda0SMatthew Dillon 			bcopy(&fle->f, &(data->entries[data->nentries]),
571*b06ebda0SMatthew Dillon 			    sizeof(fle->f));
572*b06ebda0SMatthew Dillon 			data->nentries++;
573*b06ebda0SMatthew Dillon 			if (data->nentries == NREC_AT_ONCE) {
574*b06ebda0SMatthew Dillon 				mtx_unlock(&hsh->mtx);
575*b06ebda0SMatthew Dillon 				if (++i < NBUCKETS)
576*b06ebda0SMatthew Dillon 					data->last = i;
577*b06ebda0SMatthew Dillon 				return (0);
578*b06ebda0SMatthew Dillon 			}
579*b06ebda0SMatthew Dillon 		}
580*b06ebda0SMatthew Dillon 		mtx_unlock(&hsh->mtx);
581*b06ebda0SMatthew Dillon 	}
582*b06ebda0SMatthew Dillon 
583*b06ebda0SMatthew Dillon 	return (0);
584*b06ebda0SMatthew Dillon }
585*b06ebda0SMatthew Dillon 
586*b06ebda0SMatthew Dillon /* We have full datagram in privdata. Send it to export hook. */
587*b06ebda0SMatthew Dillon static int
588*b06ebda0SMatthew Dillon export_send(priv_p priv, item_p item, int flags)
589*b06ebda0SMatthew Dillon {
590*b06ebda0SMatthew Dillon 	struct mbuf *m = NGI_M(item);
591*b06ebda0SMatthew Dillon 	struct netflow_v5_export_dgram *dgram = mtod(m,
592*b06ebda0SMatthew Dillon 					struct netflow_v5_export_dgram *);
593*b06ebda0SMatthew Dillon 	struct netflow_v5_header *header = &dgram->header;
594*b06ebda0SMatthew Dillon 	struct timespec ts;
595*b06ebda0SMatthew Dillon 	int error = 0;
596*b06ebda0SMatthew Dillon 
597*b06ebda0SMatthew Dillon 	/* Fill mbuf header. */
598*b06ebda0SMatthew Dillon 	m->m_len = m->m_pkthdr.len = sizeof(struct netflow_v5_record) *
599*b06ebda0SMatthew Dillon 	   header->count + sizeof(struct netflow_v5_header);
600*b06ebda0SMatthew Dillon 
601*b06ebda0SMatthew Dillon 	/* Fill export header. */
602*b06ebda0SMatthew Dillon 	header->sys_uptime = htonl(MILLIUPTIME(time_uptime));
603*b06ebda0SMatthew Dillon 	getnanotime(&ts);
604*b06ebda0SMatthew Dillon 	header->unix_secs  = htonl(ts.tv_sec);
605*b06ebda0SMatthew Dillon 	header->unix_nsecs = htonl(ts.tv_nsec);
606*b06ebda0SMatthew Dillon 	header->engine_type = 0;
607*b06ebda0SMatthew Dillon 	header->engine_id = 0;
608*b06ebda0SMatthew Dillon 	header->pad = 0;
609*b06ebda0SMatthew Dillon 	header->flow_seq = htonl(atomic_fetchadd_32(&priv->flow_seq,
610*b06ebda0SMatthew Dillon 	    header->count));
611*b06ebda0SMatthew Dillon 	header->count = htons(header->count);
612*b06ebda0SMatthew Dillon 
613*b06ebda0SMatthew Dillon 	if (priv->export != NULL)
614*b06ebda0SMatthew Dillon 		NG_FWD_ITEM_HOOK_FLAGS(error, item, priv->export, flags);
615*b06ebda0SMatthew Dillon 	else
616*b06ebda0SMatthew Dillon 		NG_FREE_ITEM(item);
617*b06ebda0SMatthew Dillon 
618*b06ebda0SMatthew Dillon 	return (error);
619*b06ebda0SMatthew Dillon }
620*b06ebda0SMatthew Dillon 
621*b06ebda0SMatthew Dillon 
622*b06ebda0SMatthew Dillon /* Add export record to dgram. */
623*b06ebda0SMatthew Dillon static int
624*b06ebda0SMatthew Dillon export_add(item_p item, struct flow_entry *fle)
625*b06ebda0SMatthew Dillon {
626*b06ebda0SMatthew Dillon 	struct netflow_v5_export_dgram *dgram = mtod(NGI_M(item),
627*b06ebda0SMatthew Dillon 					struct netflow_v5_export_dgram *);
628*b06ebda0SMatthew Dillon 	struct netflow_v5_header *header = &dgram->header;
629*b06ebda0SMatthew Dillon 	struct netflow_v5_record *rec;
630*b06ebda0SMatthew Dillon 
631*b06ebda0SMatthew Dillon 	rec = &dgram->r[header->count];
632*b06ebda0SMatthew Dillon 	header->count ++;
633*b06ebda0SMatthew Dillon 
634*b06ebda0SMatthew Dillon 	KASSERT(header->count <= NETFLOW_V5_MAX_RECORDS,
635*b06ebda0SMatthew Dillon 	    ("ng_netflow: export too big"));
636*b06ebda0SMatthew Dillon 
637*b06ebda0SMatthew Dillon 	/* Fill in export record. */
638*b06ebda0SMatthew Dillon 	rec->src_addr = fle->f.r.r_src.s_addr;
639*b06ebda0SMatthew Dillon 	rec->dst_addr = fle->f.r.r_dst.s_addr;
640*b06ebda0SMatthew Dillon 	rec->next_hop = fle->f.next_hop.s_addr;
641*b06ebda0SMatthew Dillon 	rec->i_ifx    = htons(fle->f.fle_i_ifx);
642*b06ebda0SMatthew Dillon 	rec->o_ifx    = htons(fle->f.fle_o_ifx);
643*b06ebda0SMatthew Dillon 	rec->packets  = htonl(fle->f.packets);
644*b06ebda0SMatthew Dillon 	rec->octets   = htonl(fle->f.bytes);
645*b06ebda0SMatthew Dillon 	rec->first    = htonl(MILLIUPTIME(fle->f.first));
646*b06ebda0SMatthew Dillon 	rec->last     = htonl(MILLIUPTIME(fle->f.last));
647*b06ebda0SMatthew Dillon 	rec->s_port   = fle->f.r.r_sport;
648*b06ebda0SMatthew Dillon 	rec->d_port   = fle->f.r.r_dport;
649*b06ebda0SMatthew Dillon 	rec->flags    = fle->f.tcp_flags;
650*b06ebda0SMatthew Dillon 	rec->prot     = fle->f.r.r_ip_p;
651*b06ebda0SMatthew Dillon 	rec->tos      = fle->f.r.r_tos;
652*b06ebda0SMatthew Dillon 	rec->dst_mask = fle->f.dst_mask;
653*b06ebda0SMatthew Dillon 	rec->src_mask = fle->f.src_mask;
654*b06ebda0SMatthew Dillon 
655*b06ebda0SMatthew Dillon 	/* Not supported fields. */
656*b06ebda0SMatthew Dillon 	rec->src_as = rec->dst_as = 0;
657*b06ebda0SMatthew Dillon 
658*b06ebda0SMatthew Dillon 	if (header->count == NETFLOW_V5_MAX_RECORDS)
659*b06ebda0SMatthew Dillon 		return (1); /* end of datagram */
660*b06ebda0SMatthew Dillon 	else
661*b06ebda0SMatthew Dillon 		return (0);
662*b06ebda0SMatthew Dillon }
663*b06ebda0SMatthew Dillon 
664*b06ebda0SMatthew Dillon /* Periodic flow expiry run. */
665*b06ebda0SMatthew Dillon void
666*b06ebda0SMatthew Dillon ng_netflow_expire(void *arg)
667*b06ebda0SMatthew Dillon {
668*b06ebda0SMatthew Dillon 	struct flow_entry	*fle, *fle1;
669*b06ebda0SMatthew Dillon 	struct flow_hash_entry	*hsh;
670*b06ebda0SMatthew Dillon 	priv_p			priv = (priv_p )arg;
671*b06ebda0SMatthew Dillon 	item_p			item = NULL;
672*b06ebda0SMatthew Dillon 	uint32_t		used;
673*b06ebda0SMatthew Dillon 	int			i;
674*b06ebda0SMatthew Dillon 
675*b06ebda0SMatthew Dillon 	/*
676*b06ebda0SMatthew Dillon 	 * Going through all the cache.
677*b06ebda0SMatthew Dillon 	 */
678*b06ebda0SMatthew Dillon 	for (hsh = priv->hash, i = 0; i < NBUCKETS; hsh++, i++) {
679*b06ebda0SMatthew Dillon 		/*
680*b06ebda0SMatthew Dillon 		 * Skip entries, that are already being worked on.
681*b06ebda0SMatthew Dillon 		 */
682*b06ebda0SMatthew Dillon 		if (mtx_trylock(&hsh->mtx) == 0)
683*b06ebda0SMatthew Dillon 			continue;
684*b06ebda0SMatthew Dillon 
685*b06ebda0SMatthew Dillon 		used = atomic_load_acq_32(&priv->info.nfinfo_used);
686*b06ebda0SMatthew Dillon 		TAILQ_FOREACH_SAFE(fle, &hsh->head, fle_hash, fle1) {
687*b06ebda0SMatthew Dillon 			/*
688*b06ebda0SMatthew Dillon 			 * Interrupt thread wants this entry!
689*b06ebda0SMatthew Dillon 			 * Quick! Quick! Bail out!
690*b06ebda0SMatthew Dillon 			 */
691*b06ebda0SMatthew Dillon 			if (hsh->mtx.mtx_lock & MTX_CONTESTED)
692*b06ebda0SMatthew Dillon 				break;
693*b06ebda0SMatthew Dillon 
694*b06ebda0SMatthew Dillon 			/*
695*b06ebda0SMatthew Dillon 			 * Don't expire aggressively while hash collision
696*b06ebda0SMatthew Dillon 			 * ratio is predicted small.
697*b06ebda0SMatthew Dillon 			 */
698*b06ebda0SMatthew Dillon 			if (used <= (NBUCKETS*2) && !INACTIVE(fle))
699*b06ebda0SMatthew Dillon 				break;
700*b06ebda0SMatthew Dillon 
701*b06ebda0SMatthew Dillon 			if ((INACTIVE(fle) && (SMALL(fle) ||
702*b06ebda0SMatthew Dillon 			    (used > (NBUCKETS*2)))) || AGED(fle)) {
703*b06ebda0SMatthew Dillon 				TAILQ_REMOVE(&hsh->head, fle, fle_hash);
704*b06ebda0SMatthew Dillon 				expire_flow(priv, &item, fle, NG_NOFLAGS);
705*b06ebda0SMatthew Dillon 				used--;
706*b06ebda0SMatthew Dillon 				atomic_add_32(&priv->info.nfinfo_inact_exp, 1);
707*b06ebda0SMatthew Dillon 			}
708*b06ebda0SMatthew Dillon 		}
709*b06ebda0SMatthew Dillon 		mtx_unlock(&hsh->mtx);
710*b06ebda0SMatthew Dillon 	}
711*b06ebda0SMatthew Dillon 
712*b06ebda0SMatthew Dillon 	if (item != NULL)
713*b06ebda0SMatthew Dillon 		return_export_dgram(priv, item, NG_NOFLAGS);
714*b06ebda0SMatthew Dillon 
715*b06ebda0SMatthew Dillon 	/* Schedule next expire. */
716*b06ebda0SMatthew Dillon 	callout_reset(&priv->exp_callout, (1*hz), &ng_netflow_expire,
717*b06ebda0SMatthew Dillon 	    (void *)priv);
718*b06ebda0SMatthew Dillon }
719