xref: /openbsd-src/sys/net/if_pflow.c (revision 4c1e55dc91edd6e69ccc60ce855900fbc12cf34f)
1 /*	$OpenBSD: if_pflow.c,v 1.20 2012/04/11 17:42:53 mikeb Exp $	*/
2 
3 /*
4  * Copyright (c) 2011 Florian Obser <florian@narrans.de>
5  * Copyright (c) 2011 Sebastian Benoit <benoit-lists@fb12.de>
6  * Copyright (c) 2008 Henning Brauer <henning@openbsd.org>
7  * Copyright (c) 2008 Joerg Goltermann <jg@osn.de>
8  *
9  * Permission to use, copy, modify, and distribute this software for any
10  * purpose with or without fee is hereby granted, provided that the above
11  * copyright notice and this permission notice appear in all copies.
12  *
13  * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
14  * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
15  * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
16  * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
17  * WHATSOEVER RESULTING FROM LOSS OF MIND, USE, DATA OR PROFITS, WHETHER IN
18  * AN ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT
19  * OF OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
20  */
21 
22 #include <sys/types.h>
23 #include <sys/malloc.h>
24 #include <sys/param.h>
25 #include <sys/systm.h>
26 #include <sys/mbuf.h>
27 #include <sys/socket.h>
28 #include <sys/ioctl.h>
29 #include <sys/kernel.h>
30 #include <sys/proc.h>
31 #include <sys/sysctl.h>
32 #include <dev/rndvar.h>
33 
34 #include <net/if.h>
35 #include <net/if_types.h>
36 #include <net/bpf.h>
37 #include <net/route.h>
38 #include <netinet/in.h>
39 #include <netinet/if_ether.h>
40 #include <netinet/tcp.h>
41 
42 #ifdef INET
43 #include <netinet/in.h>
44 #include <netinet/in_var.h>
45 #include <netinet/in_systm.h>
46 #include <netinet/ip.h>
47 #include <netinet/ip_var.h>
48 #include <netinet/udp.h>
49 #include <netinet/udp_var.h>
50 #include <netinet/in_pcb.h>
51 #endif /* INET */
52 
53 #include <net/pfvar.h>
54 #include <net/if_pflow.h>
55 
56 #include "bpfilter.h"
57 #include "pflow.h"
58 
59 #define PFLOW_MINMTU	\
60     (sizeof(struct pflow_header) + sizeof(struct pflow_flow))
61 
62 #ifdef PFLOWDEBUG
63 #define DPRINTF(x)	do { printf x ; } while (0)
64 #else
65 #define DPRINTF(x)
66 #endif
67 
68 SLIST_HEAD(, pflow_softc) pflowif_list;
69 struct pflowstats	 pflowstats;
70 
71 void	pflowattach(int);
72 int	pflow_clone_create(struct if_clone *, int);
73 int	pflow_clone_destroy(struct ifnet *);
74 void	pflow_init_timeouts(struct pflow_softc *);
75 int	pflow_calc_mtu(struct pflow_softc *, int, int);
76 void	pflow_setmtu(struct pflow_softc *, int);
77 int	pflowoutput(struct ifnet *, struct mbuf *, struct sockaddr *,
78 	    struct rtentry *);
79 int	pflowioctl(struct ifnet *, u_long, caddr_t);
80 void	pflowstart(struct ifnet *);
81 
82 struct mbuf	*pflow_get_mbuf(struct pflow_softc *, u_int16_t);
83 void	pflow_flush(struct pflow_softc *);
84 int	pflow_sendout_v5(struct pflow_softc *);
85 int	pflow_sendout_ipfix(struct pflow_softc *, sa_family_t);
86 int	pflow_sendout_ipfix_tmpl(struct pflow_softc *);
87 int	pflow_sendout_mbuf(struct pflow_softc *, struct mbuf *);
88 void	pflow_timeout(void *);
89 void	pflow_timeout6(void *);
90 void	pflow_timeout_tmpl(void *);
91 void	copy_flow_data(struct pflow_flow *, struct pflow_flow *,
92 	struct pf_state *, int, int);
93 void	copy_flow4_data(struct pflow_flow4 *, struct pflow_flow4 *,
94 	struct pf_state *, int, int);
95 void	copy_flow6_data(struct pflow_flow6 *, struct pflow_flow6 *,
96 	struct pf_state *, int, int);
97 int	pflow_pack_flow(struct pf_state *, struct pflow_softc *);
98 int	pflow_pack_flow_ipfix(struct pf_state *, struct pflow_softc *);
99 int	pflow_get_dynport(void);
100 int	export_pflow_if(struct pf_state*, struct pflow_softc *);
101 int	copy_flow_to_m(struct pflow_flow *flow, struct pflow_softc *sc);
102 int	copy_flow4_to_m(struct pflow_flow4 *flow, struct pflow_softc *sc);
103 int	copy_flow6_to_m(struct pflow_flow6 *flow, struct pflow_softc *sc);
104 
105 struct if_clone	pflow_cloner =
106     IF_CLONE_INITIALIZER("pflow", pflow_clone_create,
107     pflow_clone_destroy);
108 
109 /* from in_pcb.c */
110 extern int ipport_hifirstauto;
111 extern int ipport_hilastauto;
112 
113 /* from udp_usrreq.c */
114 extern int udpcksum;
115 
116 /* from kern/kern_clock.c; incremented each clock tick. */
117 extern int ticks;
118 
119 void
120 pflowattach(int npflow)
121 {
122 	SLIST_INIT(&pflowif_list);
123 	if_clone_attach(&pflow_cloner);
124 }
125 
126 int
127 pflow_clone_create(struct if_clone *ifc, int unit)
128 {
129 	struct ifnet		*ifp;
130 	struct pflow_softc	*pflowif;
131 
132 	if ((pflowif = malloc(sizeof(*pflowif),
133 	    M_DEVBUF, M_NOWAIT|M_ZERO)) == NULL)
134 		return (ENOMEM);
135 
136 	pflowif->sc_imo.imo_membership = malloc(
137 	    (sizeof(struct in_multi *) * IP_MIN_MEMBERSHIPS), M_IPMOPTS,
138 	    M_WAITOK|M_ZERO);
139 	pflowif->sc_imo.imo_max_memberships = IP_MIN_MEMBERSHIPS;
140 	pflowif->sc_receiver_ip.s_addr = 0;
141 	pflowif->sc_receiver_port = 0;
142 	pflowif->sc_sender_ip.s_addr = INADDR_ANY;
143 	pflowif->sc_sender_port = pflow_get_dynport();
144 	pflowif->sc_version = PFLOW_PROTO_DEFAULT;
145 	bzero(&pflowif->sc_tmpl,sizeof(pflowif->sc_tmpl));
146 	pflowif->sc_tmpl.set_header.set_id =
147 	    htons(pflowif->sc_version == PFLOW_PROTO_9?
148 	    PFLOW_V9_TMPL_SET_ID:PFLOW_V10_TMPL_SET_ID);
149 	pflowif->sc_tmpl.set_header.set_length =
150 	    htons(sizeof(struct pflow_tmpl));
151 
152 	/* v9/v10 IPv4 template */
153 	pflowif->sc_tmpl.ipv4_tmpl.h.tmpl_id = htons(PFLOW_TMPL_IPV4_ID);
154 	pflowif->sc_tmpl.ipv4_tmpl.h.field_count
155 	    = htons(PFLOW_TMPL_IPV4_FIELD_COUNT);
156 	pflowif->sc_tmpl.ipv4_tmpl.src_ip.field_id =
157 	    htons(PFIX_IE_sourceIPv4Address);
158 	pflowif->sc_tmpl.ipv4_tmpl.src_ip.len = htons(4);
159 	pflowif->sc_tmpl.ipv4_tmpl.dest_ip.field_id =
160 	    htons(PFIX_IE_destinationIPv4Address);
161 	pflowif->sc_tmpl.ipv4_tmpl.dest_ip.len = htons(4);
162 	pflowif->sc_tmpl.ipv4_tmpl.packets.field_id =
163 	    htons(PFIX_IE_packetDeltaCount);
164 	pflowif->sc_tmpl.ipv4_tmpl.packets.len = htons(8);
165 	pflowif->sc_tmpl.ipv4_tmpl.octets.field_id =
166 	    htons(PFIX_IE_octetDeltaCount);
167 	pflowif->sc_tmpl.ipv4_tmpl.octets.len = htons(8);
168 	pflowif->sc_tmpl.ipv4_tmpl.start.field_id =
169 	    htons(PFIX_IE_flowStartSysUpTime);
170 	pflowif->sc_tmpl.ipv4_tmpl.start.len = htons(4);
171 	pflowif->sc_tmpl.ipv4_tmpl.finish.field_id =
172 	    htons(PFIX_IE_flowEndSysUpTime);
173 	pflowif->sc_tmpl.ipv4_tmpl.finish.len = htons(4);
174 	pflowif->sc_tmpl.ipv4_tmpl.src_port.field_id =
175 	    htons(PFIX_IE_sourceTransportPort);
176 	pflowif->sc_tmpl.ipv4_tmpl.src_port.len = htons(2);
177 	pflowif->sc_tmpl.ipv4_tmpl.dest_port.field_id =
178 	    htons(PFIX_IE_destinationTransportPort);
179 	pflowif->sc_tmpl.ipv4_tmpl.dest_port.len = htons(2);
180 	pflowif->sc_tmpl.ipv4_tmpl.tos.field_id =
181 	    htons(PFIX_IE_ipClassOfService);
182 	pflowif->sc_tmpl.ipv4_tmpl.tos.len = htons(1);
183 	pflowif->sc_tmpl.ipv4_tmpl.protocol.field_id =
184 	    htons(PFIX_IE_protocolIdentifier);
185 	pflowif->sc_tmpl.ipv4_tmpl.protocol.len = htons(1);
186 
187 	/* v9/v10 IPv6 template */
188 	pflowif->sc_tmpl.ipv6_tmpl.h.tmpl_id = htons(PFLOW_TMPL_IPV6_ID);
189 	pflowif->sc_tmpl.ipv6_tmpl.h.field_count =
190 	    htons(PFLOW_TMPL_IPV6_FIELD_COUNT);
191 	pflowif->sc_tmpl.ipv6_tmpl.src_ip.field_id =
192 	    htons(PFIX_IE_sourceIPv6Address);
193 	pflowif->sc_tmpl.ipv6_tmpl.src_ip.len = htons(16);
194 	pflowif->sc_tmpl.ipv6_tmpl.dest_ip.field_id =
195 	    htons(PFIX_IE_destinationIPv6Address);
196 	pflowif->sc_tmpl.ipv6_tmpl.dest_ip.len = htons(16);
197 	pflowif->sc_tmpl.ipv6_tmpl.packets.field_id =
198 	    htons(PFIX_IE_packetDeltaCount);
199 	pflowif->sc_tmpl.ipv6_tmpl.packets.len = htons(8);
200 	pflowif->sc_tmpl.ipv6_tmpl.octets.field_id =
201 	    htons(PFIX_IE_octetDeltaCount);
202 	pflowif->sc_tmpl.ipv6_tmpl.octets.len = htons(8);
203 	pflowif->sc_tmpl.ipv6_tmpl.start.field_id =
204 	    htons(PFIX_IE_flowStartSysUpTime);
205 	pflowif->sc_tmpl.ipv6_tmpl.start.len = htons(4);
206 	pflowif->sc_tmpl.ipv6_tmpl.finish.field_id =
207 	    htons(PFIX_IE_flowEndSysUpTime);
208 	pflowif->sc_tmpl.ipv6_tmpl.finish.len = htons(4);
209 	pflowif->sc_tmpl.ipv6_tmpl.src_port.field_id =
210 	    htons(PFIX_IE_sourceTransportPort);
211 	pflowif->sc_tmpl.ipv6_tmpl.src_port.len = htons(2);
212 	pflowif->sc_tmpl.ipv6_tmpl.dest_port.field_id =
213 	    htons(PFIX_IE_destinationTransportPort);
214 	pflowif->sc_tmpl.ipv6_tmpl.dest_port.len = htons(2);
215 	pflowif->sc_tmpl.ipv6_tmpl.tos.field_id =
216 	    htons(PFIX_IE_ipClassOfService);
217 	pflowif->sc_tmpl.ipv6_tmpl.tos.len = htons(1);
218 	pflowif->sc_tmpl.ipv6_tmpl.protocol.field_id =
219 	    htons(PFIX_IE_protocolIdentifier);
220 	pflowif->sc_tmpl.ipv6_tmpl.protocol.len = htons(1);
221 
222 	ifp = &pflowif->sc_if;
223 	snprintf(ifp->if_xname, sizeof ifp->if_xname, "pflow%d", unit);
224 	ifp->if_softc = pflowif;
225 	ifp->if_ioctl = pflowioctl;
226 	ifp->if_output = pflowoutput;
227 	ifp->if_start = pflowstart;
228 	ifp->if_type = IFT_PFLOW;
229 	IFQ_SET_MAXLEN(&ifp->if_snd, ifqmaxlen);
230 	ifp->if_hdrlen = PFLOW_HDRLEN;
231 	ifp->if_flags = IFF_UP;
232 	ifp->if_flags &= ~IFF_RUNNING;	/* not running, need receiver */
233 	pflow_setmtu(pflowif, ETHERMTU);
234 	pflow_init_timeouts(pflowif);
235 	if_attach(ifp);
236 	if_alloc_sadl(ifp);
237 
238 #if NBPFILTER > 0
239 	bpfattach(&pflowif->sc_if.if_bpf, ifp, DLT_RAW, 0);
240 #endif
241 
242 	/* Insert into list of pflows */
243 	SLIST_INSERT_HEAD(&pflowif_list, pflowif, sc_next);
244 	return (0);
245 }
246 
247 int
248 pflow_clone_destroy(struct ifnet *ifp)
249 {
250 	struct pflow_softc	*sc = ifp->if_softc;
251 	int			 s;
252 
253 	s = splnet();
254 	pflow_flush(sc);
255 	if_detach(ifp);
256 	SLIST_REMOVE(&pflowif_list, sc, pflow_softc, sc_next);
257 	free(sc->sc_imo.imo_membership, M_IPMOPTS);
258 	free(sc, M_DEVBUF);
259 	splx(s);
260 	return (0);
261 }
262 
263 /*
264  * Start output on the pflow interface.
265  */
266 void
267 pflowstart(struct ifnet *ifp)
268 {
269 	struct mbuf	*m;
270 	int		 s;
271 
272 	for (;;) {
273 		s = splnet();
274 		IF_DROP(&ifp->if_snd);
275 		IF_DEQUEUE(&ifp->if_snd, m);
276 		splx(s);
277 
278 		if (m == NULL)
279 			return;
280 		m_freem(m);
281 	}
282 }
283 
284 int
285 pflowoutput(struct ifnet *ifp, struct mbuf *m, struct sockaddr *dst,
286 	struct rtentry *rt)
287 {
288 	m_freem(m);
289 	return (0);
290 }
291 
292 /* ARGSUSED */
293 int
294 pflowioctl(struct ifnet *ifp, u_long cmd, caddr_t data)
295 {
296 	struct proc		*p = curproc;
297 	struct pflow_softc	*sc = ifp->if_softc;
298 	struct ifreq		*ifr = (struct ifreq *)data;
299 	struct pflowreq		 pflowr;
300 	int			 s, error;
301 
302 	switch (cmd) {
303 	case SIOCSIFADDR:
304 	case SIOCAIFADDR:
305 	case SIOCSIFDSTADDR:
306 	case SIOCSIFFLAGS:
307 		if ((ifp->if_flags & IFF_UP) &&
308 		    sc->sc_receiver_ip.s_addr != 0 &&
309 		    sc->sc_receiver_port != 0) {
310 			ifp->if_flags |= IFF_RUNNING;
311 			sc->sc_gcounter=pflowstats.pflow_flows;
312 			/* send templates on startup */
313 			if (sc->sc_version == PFLOW_PROTO_9
314 			    || sc->sc_version == PFLOW_PROTO_10) {
315 				s = splnet();
316 				pflow_sendout_ipfix_tmpl(sc);
317 				splx(s);
318 			}
319 		} else
320 			ifp->if_flags &= ~IFF_RUNNING;
321 		break;
322 	case SIOCSIFMTU:
323 		if (ifr->ifr_mtu < PFLOW_MINMTU)
324 			return (EINVAL);
325 		if (ifr->ifr_mtu > MCLBYTES)
326 			ifr->ifr_mtu = MCLBYTES;
327 		s = splnet();
328 		if (ifr->ifr_mtu < ifp->if_mtu)
329 			pflow_flush(sc);
330 		pflow_setmtu(sc, ifr->ifr_mtu);
331 		splx(s);
332 		break;
333 
334 	case SIOCGETPFLOW:
335 		bzero(&pflowr, sizeof(pflowr));
336 
337 		pflowr.sender_ip = sc->sc_sender_ip;
338 		pflowr.receiver_ip = sc->sc_receiver_ip;
339 		pflowr.receiver_port = sc->sc_receiver_port;
340 		pflowr.version = sc->sc_version;
341 
342 		if ((error = copyout(&pflowr, ifr->ifr_data,
343 		    sizeof(pflowr))))
344 			return (error);
345 		break;
346 
347 	case SIOCSETPFLOW:
348 		if ((error = suser(p, 0)) != 0)
349 			return (error);
350 		if ((error = copyin(ifr->ifr_data, &pflowr,
351 		    sizeof(pflowr))))
352 			return (error);
353 		if (pflowr.addrmask & PFLOW_MASK_VERSION) {
354 			switch(pflowr.version) {
355 			case PFLOW_PROTO_5:
356 			case PFLOW_PROTO_9:
357 			case PFLOW_PROTO_10:
358 				break;
359 			default:
360 				return(EINVAL);
361 			}
362 		}
363 		s = splnet();
364 
365 		pflow_flush(sc);
366 
367 		if (pflowr.addrmask & PFLOW_MASK_DSTIP)
368 			sc->sc_receiver_ip = pflowr.receiver_ip;
369 		if (pflowr.addrmask & PFLOW_MASK_DSTPRT)
370 			sc->sc_receiver_port = pflowr.receiver_port;
371 		if (pflowr.addrmask & PFLOW_MASK_SRCIP)
372 			sc->sc_sender_ip.s_addr = pflowr.sender_ip.s_addr;
373 		/* error check is above */
374 		if (pflowr.addrmask & PFLOW_MASK_VERSION)
375 			sc->sc_version = pflowr.version;
376 
377 		pflow_setmtu(sc, ETHERMTU);
378 		pflow_init_timeouts(sc);
379 
380 		if (sc->sc_version == PFLOW_PROTO_9
381 		    || sc->sc_version == PFLOW_PROTO_10) {
382 			sc->sc_tmpl.set_header.set_id =
383 			    htons(sc->sc_version == PFLOW_PROTO_9?
384 				PFLOW_V9_TMPL_SET_ID:PFLOW_V10_TMPL_SET_ID);
385 			pflow_sendout_ipfix_tmpl(sc);
386 		}
387 
388 		splx(s);
389 
390 		if ((ifp->if_flags & IFF_UP) &&
391 		    sc->sc_receiver_ip.s_addr != 0 &&
392 		    sc->sc_receiver_port != 0) {
393 			ifp->if_flags |= IFF_RUNNING;
394 			sc->sc_gcounter=pflowstats.pflow_flows;
395 		} else
396 			ifp->if_flags &= ~IFF_RUNNING;
397 
398 		break;
399 
400 	default:
401 		return (ENOTTY);
402 	}
403 	return (0);
404 }
405 
406 void
407 pflow_init_timeouts(struct pflow_softc *sc)
408 {
409 	switch (sc->sc_version) {
410 	case PFLOW_PROTO_5:
411 		if (timeout_initialized(&sc->sc_tmo6))
412 			timeout_del(&sc->sc_tmo6);
413 		if (timeout_initialized(&sc->sc_tmo_tmpl))
414 			timeout_del(&sc->sc_tmo_tmpl);
415 		if (!timeout_initialized(&sc->sc_tmo))
416 			timeout_set(&sc->sc_tmo, pflow_timeout, sc);
417 		break;
418 	case PFLOW_PROTO_9:
419 	case PFLOW_PROTO_10:
420 		if (!timeout_initialized(&sc->sc_tmo_tmpl))
421 			timeout_set(&sc->sc_tmo_tmpl, pflow_timeout_tmpl, sc);
422 		if (!timeout_initialized(&sc->sc_tmo))
423 			timeout_set(&sc->sc_tmo, pflow_timeout, sc);
424 		if (!timeout_initialized(&sc->sc_tmo6))
425 			timeout_set(&sc->sc_tmo6, pflow_timeout6, sc);
426 
427 		timeout_add_sec(&sc->sc_tmo_tmpl, PFLOW_TMPL_TIMEOUT);
428 		break;
429 	default: /* NOTREACHED */
430 		break;
431 	}
432 }
433 
434 int
435 pflow_calc_mtu(struct pflow_softc *sc, int mtu, int hdrsz)
436 {
437 	sc->sc_maxcount4 = (mtu - hdrsz -
438 	    sizeof(struct udpiphdr)) / sizeof(struct pflow_flow4);
439 	if (sc->sc_maxcount4 > PFLOW_MAXFLOWS)
440 		sc->sc_maxcount4 = PFLOW_MAXFLOWS;
441 	sc->sc_maxcount6 = (mtu - hdrsz -
442 	    sizeof(struct udpiphdr)) / sizeof(struct pflow_flow6);
443 	if (sc->sc_maxcount6 > PFLOW_MAXFLOWS)
444 		sc->sc_maxcount6 = PFLOW_MAXFLOWS;
445 
446 	return (hdrsz + sizeof(struct udpiphdr) +
447 	    MIN(sc->sc_maxcount4 * sizeof(struct pflow_flow4),
448 	    sc->sc_maxcount6 * sizeof(struct pflow_flow6)));
449 }
450 
451 void
452 pflow_setmtu(struct pflow_softc *sc, int mtu_req)
453 {
454 	int	mtu;
455 
456 	if (sc->sc_pflow_ifp && sc->sc_pflow_ifp->if_mtu < mtu_req)
457 		mtu = sc->sc_pflow_ifp->if_mtu;
458 	else
459 		mtu = mtu_req;
460 
461 	switch (sc->sc_version) {
462 	case PFLOW_PROTO_5:
463 		sc->sc_maxcount = (mtu - sizeof(struct pflow_header) -
464 		    sizeof(struct udpiphdr)) / sizeof(struct pflow_flow);
465 		if (sc->sc_maxcount > PFLOW_MAXFLOWS)
466 		    sc->sc_maxcount = PFLOW_MAXFLOWS;
467 		sc->sc_if.if_mtu = sizeof(struct pflow_header) +
468 		    sizeof(struct udpiphdr) +
469 		    sc->sc_maxcount * sizeof(struct pflow_flow);
470 		break;
471 	case PFLOW_PROTO_9:
472 		sc->sc_if.if_mtu =
473 		    pflow_calc_mtu(sc, mtu, sizeof(struct pflow_v9_header));
474 		break;
475 	case PFLOW_PROTO_10:
476 		sc->sc_if.if_mtu =
477 		    pflow_calc_mtu(sc, mtu, sizeof(struct pflow_v10_header));
478 		break;
479 	default: /* NOTREACHED */
480 		break;
481 	}
482 }
483 
484 struct mbuf *
485 pflow_get_mbuf(struct pflow_softc *sc, u_int16_t set_id)
486 {
487 	struct pflow_set_header	 set_hdr;
488 	struct pflow_header	 h;
489 	struct mbuf		*m;
490 
491 	MGETHDR(m, M_DONTWAIT, MT_DATA);
492 	if (m == NULL) {
493 		pflowstats.pflow_onomem++;
494 		return (NULL);
495 	}
496 
497 	MCLGET(m, M_DONTWAIT);
498 	if ((m->m_flags & M_EXT) == 0) {
499 		m_free(m);
500 		pflowstats.pflow_onomem++;
501 		return (NULL);
502 	}
503 
504 	m->m_len = m->m_pkthdr.len = 0;
505 	m->m_pkthdr.rcvif = NULL;
506 
507 	if (sc == NULL)		/* get only a new empty mbuf */
508 		return (m);
509 
510 	if (sc->sc_version == PFLOW_PROTO_5) {
511 		/* populate pflow_header */
512 		h.reserved1 = 0;
513 		h.reserved2 = 0;
514 		h.count = 0;
515 		h.version = htons(PFLOW_PROTO_5);
516 		h.flow_sequence = htonl(sc->sc_gcounter);
517 		h.engine_type = PFLOW_ENGINE_TYPE;
518 		h.engine_id = PFLOW_ENGINE_ID;
519 		m_copyback(m, 0, PFLOW_HDRLEN, &h, M_NOWAIT);
520 
521 		sc->sc_count = 0;
522 		timeout_add_sec(&sc->sc_tmo, PFLOW_TIMEOUT);
523 	} else {
524 		/* populate pflow_set_header */
525 		set_hdr.set_length = 0;
526 		set_hdr.set_id = htons(set_id);
527 		m_copyback(m, 0, PFLOW_SET_HDRLEN, &set_hdr, M_NOWAIT);
528 	}
529 
530 	return (m);
531 }
532 
533 void
534 copy_flow_data(struct pflow_flow *flow1, struct pflow_flow *flow2,
535     struct pf_state *st, int src, int dst)
536 {
537 	struct pf_state_key	*sk = st->key[PF_SK_WIRE];
538 
539 	flow1->src_ip = flow2->dest_ip = sk->addr[src].v4.s_addr;
540 	flow1->src_port = flow2->dest_port = sk->port[src];
541 	flow1->dest_ip = flow2->src_ip = sk->addr[dst].v4.s_addr;
542 	flow1->dest_port = flow2->src_port = sk->port[dst];
543 
544 	flow1->dest_as = flow2->src_as =
545 	    flow1->src_as = flow2->dest_as = 0;
546 	flow1->if_index_out = flow2->if_index_in =
547 	    flow1->if_index_in = flow2->if_index_out = 0;
548 	flow1->dest_mask = flow2->src_mask =
549 	    flow1->src_mask = flow2->dest_mask = 0;
550 
551 	flow1->flow_packets = htonl(st->packets[0]);
552 	flow2->flow_packets = htonl(st->packets[1]);
553 	flow1->flow_octets = htonl(st->bytes[0]);
554 	flow2->flow_octets = htonl(st->bytes[1]);
555 
556 	flow1->flow_start = flow2->flow_start =
557 	    htonl(st->creation * 1000);
558 	flow1->flow_finish = flow2->flow_finish =
559 	    htonl((time_uptime - (st->rule.ptr->timeout[st->timeout] ?
560 	    st->rule.ptr->timeout[st->timeout] :
561 	    pf_default_rule.timeout[st->timeout])) * 1000);
562 	flow1->tcp_flags = flow2->tcp_flags = 0;
563 	flow1->protocol = flow2->protocol = sk->proto;
564 	flow1->tos = flow2->tos = st->rule.ptr->tos;
565 }
566 
567 void
568 copy_flow4_data(struct pflow_flow4 *flow1, struct pflow_flow4 *flow2,
569     struct pf_state *st, int src, int dst)
570 {
571 	struct pf_state_key	*sk = st->key[PF_SK_WIRE];
572 
573 	flow1->src_ip = flow2->dest_ip = sk->addr[src].v4.s_addr;
574 	flow1->src_port = flow2->dest_port = sk->port[src];
575 	flow1->dest_ip = flow2->src_ip = sk->addr[dst].v4.s_addr;
576 	flow1->dest_port = flow2->src_port = sk->port[dst];
577 
578 	flow1->flow_packets = htobe64(st->packets[0]);
579 	flow2->flow_packets = htobe64(st->packets[1]);
580 	flow1->flow_octets = htobe64(st->bytes[0]);
581 	flow2->flow_octets = htobe64(st->bytes[1]);
582 
583 	flow1->flow_start = flow2->flow_start =
584 	    htonl(st->creation * 1000);
585 	flow1->flow_finish = flow2->flow_finish =
586 	    htonl((time_uptime - (st->rule.ptr->timeout[st->timeout] ?
587 	    st->rule.ptr->timeout[st->timeout] :
588 	    pf_default_rule.timeout[st->timeout])) * 1000);
589 
590 	flow1->protocol = flow2->protocol = sk->proto;
591 	flow1->tos = flow2->tos = st->rule.ptr->tos;
592 }
593 
594 void
595 copy_flow6_data(struct pflow_flow6 *flow1, struct pflow_flow6 *flow2,
596     struct pf_state *st, int src, int dst)
597 {
598 	struct pf_state_key	*sk = st->key[PF_SK_WIRE];
599 	bcopy(&sk->addr[src].v6, &flow1->src_ip, sizeof(flow1->src_ip));
600 	bcopy(&sk->addr[src].v6, &flow2->dest_ip, sizeof(flow2->dest_ip));
601 	flow1->src_port = flow2->dest_port = sk->port[src];
602 	bcopy(&sk->addr[dst].v6, &flow1->dest_ip, sizeof(flow1->dest_ip));
603 	bcopy(&sk->addr[dst].v6, &flow2->src_ip, sizeof(flow2->src_ip));
604 	flow1->dest_port = flow2->src_port = sk->port[dst];
605 
606 	flow1->flow_packets = htobe64(st->packets[0]);
607 	flow2->flow_packets = htobe64(st->packets[1]);
608 	flow1->flow_octets = htobe64(st->bytes[0]);
609 	flow2->flow_octets = htobe64(st->bytes[1]);
610 
611 	flow1->flow_start = flow2->flow_start =
612 	    htonl(st->creation * 1000);
613 	flow1->flow_finish = flow2->flow_finish =
614 	    htonl((time_uptime - (st->rule.ptr->timeout[st->timeout] ?
615 	    st->rule.ptr->timeout[st->timeout] :
616 	    pf_default_rule.timeout[st->timeout])) * 1000);
617 
618 	flow1->protocol = flow2->protocol = sk->proto;
619 	flow1->tos = flow2->tos = st->rule.ptr->tos;
620 }
621 
622 int
623 export_pflow(struct pf_state *st)
624 {
625 	struct pflow_softc	*sc = NULL;
626 	struct pf_state_key	*sk = st->key[PF_SK_WIRE];
627 
628 	SLIST_FOREACH(sc, &pflowif_list, sc_next) {
629 		switch (sc->sc_version) {
630 		case PFLOW_PROTO_5:
631 			if( sk->af == AF_INET )
632 				export_pflow_if(st, sc);
633 			break;
634 		case PFLOW_PROTO_9:
635 			/* ... fall through ... */
636 		case PFLOW_PROTO_10:
637 			if( sk->af == AF_INET || sk->af == AF_INET6 )
638 				export_pflow_if(st, sc);
639 			break;
640 		default: /* NOTREACHED */
641 			break;
642 		}
643 	}
644 
645 	return (0);
646 }
647 
648 int
649 export_pflow_if(struct pf_state *st, struct pflow_softc *sc)
650 {
651 	struct pf_state		 pfs_copy;
652 	struct ifnet		*ifp = &sc->sc_if;
653 	u_int64_t		 bytes[2];
654 	int			 ret = 0;
655 
656 	if (!(ifp->if_flags & IFF_RUNNING))
657 		return (0);
658 
659 	if (sc->sc_version == PFLOW_PROTO_9 || sc->sc_version == PFLOW_PROTO_10)
660 		return (pflow_pack_flow_ipfix(st, sc));
661 
662 	/* PFLOW_PROTO_5 */
663 	if ((st->bytes[0] < (u_int64_t)PFLOW_MAXBYTES)
664 	    && (st->bytes[1] < (u_int64_t)PFLOW_MAXBYTES))
665 		return (pflow_pack_flow(st, sc));
666 
667 	/* flow > PFLOW_MAXBYTES need special handling */
668 	bcopy(st, &pfs_copy, sizeof(pfs_copy));
669 	bytes[0] = pfs_copy.bytes[0];
670 	bytes[1] = pfs_copy.bytes[1];
671 
672 	while (bytes[0] > PFLOW_MAXBYTES) {
673 		pfs_copy.bytes[0] = PFLOW_MAXBYTES;
674 		pfs_copy.bytes[1] = 0;
675 
676 		if ((ret = pflow_pack_flow(&pfs_copy, sc)) != 0)
677 			return (ret);
678 		if ((bytes[0] - PFLOW_MAXBYTES) > 0)
679 			bytes[0] -= PFLOW_MAXBYTES;
680 	}
681 
682 	while (bytes[1] > (u_int64_t)PFLOW_MAXBYTES) {
683 		pfs_copy.bytes[1] = PFLOW_MAXBYTES;
684 		pfs_copy.bytes[0] = 0;
685 
686 		if ((ret = pflow_pack_flow(&pfs_copy, sc)) != 0)
687 			return (ret);
688 		if ((bytes[1] - PFLOW_MAXBYTES) > 0)
689 			bytes[1] -= PFLOW_MAXBYTES;
690 	}
691 
692 	pfs_copy.bytes[0] = bytes[0];
693 	pfs_copy.bytes[1] = bytes[1];
694 
695 	return (pflow_pack_flow(&pfs_copy, sc));
696 }
697 
698 int
699 copy_flow_to_m(struct pflow_flow *flow, struct pflow_softc *sc)
700 {
701 	int		s, ret = 0;
702 
703 	s = splnet();
704 	if (sc->sc_mbuf == NULL) {
705 		if ((sc->sc_mbuf = pflow_get_mbuf(sc, 0)) == NULL) {
706 			splx(s);
707 			return (ENOBUFS);
708 		}
709 	}
710 	m_copyback(sc->sc_mbuf, PFLOW_HDRLEN +
711 	    (sc->sc_count * sizeof(struct pflow_flow)),
712 	    sizeof(struct pflow_flow), flow, M_NOWAIT);
713 
714 	if (pflowstats.pflow_flows == sc->sc_gcounter)
715 		pflowstats.pflow_flows++;
716 	sc->sc_gcounter++;
717 	sc->sc_count++;
718 
719 	if (sc->sc_count >= sc->sc_maxcount)
720 		ret = pflow_sendout_v5(sc);
721 
722 	splx(s);
723 	return(ret);
724 }
725 
726 int
727 copy_flow4_to_m(struct pflow_flow4 *flow, struct pflow_softc *sc)
728 {
729 	int		s, ret = 0;
730 
731 	s = splnet();
732 	if (sc->sc_mbuf == NULL) {
733 		if ((sc->sc_mbuf =
734 		    pflow_get_mbuf(sc, PFLOW_TMPL_IPV4_ID)) == NULL) {
735 			splx(s);
736 			return (ENOBUFS);
737 		}
738 		sc->sc_count4 = 0;
739 		timeout_add_sec(&sc->sc_tmo, PFLOW_TIMEOUT);
740 	}
741 	m_copyback(sc->sc_mbuf, PFLOW_SET_HDRLEN +
742 	    (sc->sc_count4 * sizeof(struct pflow_flow4)),
743 	    sizeof(struct pflow_flow4), flow, M_NOWAIT);
744 
745 	if (pflowstats.pflow_flows == sc->sc_gcounter)
746 		pflowstats.pflow_flows++;
747 	sc->sc_gcounter++;
748 	sc->sc_count4++;
749 
750 	if (sc->sc_count4 >= sc->sc_maxcount4)
751 		ret = pflow_sendout_ipfix(sc, AF_INET);
752 	splx(s);
753 	return(ret);
754 }
755 
756 int
757 copy_flow6_to_m(struct pflow_flow6 *flow, struct pflow_softc *sc)
758 {
759 	int		s, ret = 0;
760 
761 	s = splnet();
762 	if (sc->sc_mbuf6 == NULL) {
763 		if ((sc->sc_mbuf6 =
764 		    pflow_get_mbuf(sc, PFLOW_TMPL_IPV6_ID)) == NULL) {
765 			splx(s);
766 			return (ENOBUFS);
767 		}
768 		sc->sc_count6 = 0;
769 		timeout_add_sec(&sc->sc_tmo6, PFLOW_TIMEOUT);
770 	}
771 	m_copyback(sc->sc_mbuf6, PFLOW_SET_HDRLEN +
772 	    (sc->sc_count6 * sizeof(struct pflow_flow6)),
773 	    sizeof(struct pflow_flow6), flow, M_NOWAIT);
774 
775 	if (pflowstats.pflow_flows == sc->sc_gcounter)
776 		pflowstats.pflow_flows++;
777 	sc->sc_gcounter++;
778 	sc->sc_count6++;
779 
780 	if (sc->sc_count6 >= sc->sc_maxcount6)
781 		ret = pflow_sendout_ipfix(sc, AF_INET6);
782 
783 	splx(s);
784 	return(ret);
785 }
786 
787 int
788 pflow_pack_flow(struct pf_state *st, struct pflow_softc *sc)
789 {
790 	struct pflow_flow	 flow1;
791 	struct pflow_flow	 flow2;
792 	int			 ret = 0;
793 
794 	bzero(&flow1, sizeof(flow1));
795 	bzero(&flow2, sizeof(flow2));
796 
797 	if (st->direction == PF_OUT)
798 		copy_flow_data(&flow1, &flow2, st, 1, 0);
799 	else
800 		copy_flow_data(&flow1, &flow2, st, 0, 1);
801 
802 	if (st->bytes[0] != 0) /* first flow from state */
803 		ret = copy_flow_to_m(&flow1, sc);
804 
805 	if (st->bytes[1] != 0) /* second flow from state */
806 		ret = copy_flow_to_m(&flow2, sc);
807 
808 	return (ret);
809 }
810 
811 int
812 pflow_pack_flow_ipfix(struct pf_state *st, struct pflow_softc *sc)
813 {
814 	struct pf_state_key	*sk = st->key[PF_SK_WIRE];
815 	struct pflow_flow4	 flow4_1, flow4_2;
816 	struct pflow_flow6	 flow6_1, flow6_2;
817 	int			 ret = 0;
818 	if (sk->af == AF_INET) {
819 		bzero(&flow4_1, sizeof(flow4_1));
820 		bzero(&flow4_2, sizeof(flow4_2));
821 
822 		if (st->direction == PF_OUT)
823 			copy_flow4_data(&flow4_1, &flow4_2, st, 1, 0);
824 		else
825 			copy_flow4_data(&flow4_1, &flow4_2, st, 0, 1);
826 
827 		if (st->bytes[0] != 0) /* first flow from state */
828 			ret = copy_flow4_to_m(&flow4_1, sc);
829 
830 		if (st->bytes[1] != 0) /* second flow from state */
831 			ret = copy_flow4_to_m(&flow4_2, sc);
832 	} else if (sk->af == AF_INET6) {
833 		bzero(&flow6_1, sizeof(flow6_1));
834 		bzero(&flow6_2, sizeof(flow6_2));
835 
836 		if (st->direction == PF_OUT)
837 			copy_flow6_data(&flow6_1, &flow6_2, st, 1, 0);
838 		else
839 			copy_flow6_data(&flow6_1, &flow6_2, st, 0, 1);
840 
841 		if (st->bytes[0] != 0) /* first flow from state */
842 			ret = copy_flow6_to_m(&flow6_1, sc);
843 
844 		if (st->bytes[1] != 0) /* second flow from state */
845 			ret = copy_flow6_to_m(&flow6_2, sc);
846 	}
847 	return (ret);
848 }
849 
850 void
851 pflow_timeout(void *v)
852 {
853 	struct pflow_softc	*sc = v;
854 	int			 s;
855 
856 	s = splnet();
857 	switch (sc->sc_version) {
858 	case PFLOW_PROTO_5:
859 		pflow_sendout_v5(sc);
860 		break;
861 	case PFLOW_PROTO_9:
862 		/* ... fall through ... */
863 	case PFLOW_PROTO_10:
864 		pflow_sendout_ipfix(sc, AF_INET);
865 	default: /* NOTREACHED */
866 		break;
867 	}
868 	splx(s);
869 }
870 
871 void
872 pflow_timeout6(void *v)
873 {
874 	struct pflow_softc	*sc = v;
875 	int			 s;
876 
877 	s = splnet();
878 	pflow_sendout_ipfix(sc, AF_INET6);
879 	splx(s);
880 }
881 
882 void
883 pflow_timeout_tmpl(void *v)
884 {
885 	struct pflow_softc	*sc = v;
886 	int			 s;
887 
888 	s = splnet();
889 	pflow_sendout_ipfix_tmpl(sc);
890 	splx(s);
891 }
892 
893 /* This must be called in splnet() */
894 void
895 pflow_flush(struct pflow_softc *sc)
896 {
897 	switch (sc->sc_version) {
898 	case PFLOW_PROTO_5:
899 		pflow_sendout_v5(sc);
900 		break;
901 	case PFLOW_PROTO_9:
902 	case PFLOW_PROTO_10:
903 		pflow_sendout_ipfix(sc, AF_INET);
904 		pflow_sendout_ipfix(sc, AF_INET6);
905 		break;
906 	default: /* NOTREACHED */
907 		break;
908 	}
909 }
910 
911 
912 /* This must be called in splnet() */
913 int
914 pflow_sendout_v5(struct pflow_softc *sc)
915 {
916 	struct mbuf		*m = sc->sc_mbuf;
917 	struct pflow_header	*h;
918 	struct ifnet		*ifp = &sc->sc_if;
919 
920 	timeout_del(&sc->sc_tmo);
921 
922 	if (m == NULL)
923 		return (0);
924 
925 	sc->sc_mbuf = NULL;
926 	if (!(ifp->if_flags & IFF_RUNNING)) {
927 		m_freem(m);
928 		return (0);
929 	}
930 
931 	pflowstats.pflow_packets++;
932 	h = mtod(m, struct pflow_header *);
933 	h->count = htons(sc->sc_count);
934 
935 	/* populate pflow_header */
936 	h->uptime_ms = htonl(time_uptime * 1000);
937 	h->time_sec = htonl(time_second);
938 	h->time_nanosec = htonl(ticks);
939 
940 	return (pflow_sendout_mbuf(sc, m));
941 }
942 
943 /* This must be called in splnet() */
944 int
945 pflow_sendout_ipfix(struct pflow_softc *sc, sa_family_t af)
946 {
947 	struct mbuf			*m;
948 	struct pflow_v9_header		*h9;
949 	struct pflow_v10_header		*h10;
950 	struct pflow_set_header		*set_hdr;
951 	struct ifnet			*ifp = &sc->sc_if;
952 	int				 set_length;
953 
954 	switch (af) {
955 	case AF_INET:
956 		m = sc->sc_mbuf;
957 		timeout_del(&sc->sc_tmo);
958 		if (m == NULL)
959 			return (0);
960 		sc->sc_mbuf = NULL;
961 		break;
962 	case AF_INET6:
963 		m = sc->sc_mbuf6;
964 		timeout_del(&sc->sc_tmo6);
965 		if (m == NULL)
966 			return (0);
967 		sc->sc_mbuf6 = NULL;
968 		break;
969 	default: /* NOTREACHED */
970 		break;
971 	}
972 
973 	if (!(ifp->if_flags & IFF_RUNNING)) {
974 		m_freem(m);
975 		return (0);
976 	}
977 
978 	pflowstats.pflow_packets++;
979 	set_hdr = mtod(m, struct pflow_set_header *);
980 	switch (af) {
981 	case AF_INET:
982 		set_length = sizeof(struct pflow_set_header)
983 		    + sc->sc_count4 * sizeof(struct pflow_flow4);
984 		break;
985 	case AF_INET6:
986 		set_length = sizeof(struct pflow_set_header)
987 		    + sc->sc_count6 * sizeof(struct pflow_flow6);
988 		break;
989 	default: /* NOTREACHED */
990 		break;
991 	}
992 	set_hdr->set_length = htons(set_length);
993 
994 	switch (sc->sc_version) {
995 	case PFLOW_PROTO_9:
996 		/* populate pflow_header */
997 		M_PREPEND(m, sizeof(struct pflow_v9_header), M_DONTWAIT);
998 		if (m == NULL) {
999 			pflowstats.pflow_onomem++;
1000 			return (ENOBUFS);
1001 		}
1002 		h9 = mtod(m, struct pflow_v9_header *);
1003 		h9->version = htons(PFLOW_PROTO_9);
1004 		h9->count = htons(1);
1005 		h9->uptime_ms = htonl(time_uptime * 1000);
1006 		h9->time_sec = htonl(time_second);
1007 		/* XXX correct mod 2^32 semantics? */
1008 		h9->flow_sequence = htonl(sc->sc_gcounter);
1009 		h9->observation_dom = htonl(PFLOW_ENGINE_TYPE);
1010 		break;
1011 	case PFLOW_PROTO_10:
1012 		/* populate pflow_header */
1013 		M_PREPEND(m, sizeof(struct pflow_v10_header), M_DONTWAIT);
1014 		if (m == NULL) {
1015 			pflowstats.pflow_onomem++;
1016 			return (ENOBUFS);
1017 		}
1018 		h10 = mtod(m, struct pflow_v10_header *);
1019 		h10->version = htons(PFLOW_PROTO_10);
1020 		h10->length = htons(PFLOW_V10_HDRLEN + set_length);
1021 		h10->time_sec = htonl(time_second);
1022 		/* XXX correct mod 2^32 semantics? */
1023 		h10->flow_sequence = htonl(sc->sc_gcounter);
1024 		h10->observation_dom = htonl(PFLOW_ENGINE_TYPE);
1025 		break;
1026 	default: /* NOTREACHED */
1027 		break;
1028 	}
1029 	return (pflow_sendout_mbuf(sc, m));
1030 }
1031 
1032 /* This must be called in splnet() */
1033 int
1034 pflow_sendout_ipfix_tmpl(struct pflow_softc *sc)
1035 {
1036 	struct mbuf			*m;
1037 	struct pflow_v9_header		*h9;
1038 	struct pflow_v10_header		*h10;
1039 	struct ifnet			*ifp = &sc->sc_if;
1040 
1041 	timeout_del(&sc->sc_tmo_tmpl);
1042 
1043 	if (!(ifp->if_flags & IFF_RUNNING)) {
1044 		return (0);
1045 	}
1046 	m = pflow_get_mbuf(NULL, 0);
1047 	if (m == NULL)
1048 		return (0);
1049 	if (m_copyback(m, 0, sizeof(struct pflow_tmpl),
1050 	    &sc->sc_tmpl, M_NOWAIT)) {
1051 		m_freem(m);
1052 		return (0);
1053 	}
1054 	pflowstats.pflow_packets++;
1055 	switch (sc->sc_version) {
1056 	case PFLOW_PROTO_9:
1057 		/* populate pflow_header */
1058 		M_PREPEND(m, sizeof(struct pflow_v9_header), M_DONTWAIT);
1059 		if (m == NULL) {
1060 			pflowstats.pflow_onomem++;
1061 			return (ENOBUFS);
1062 		}
1063 		h9 = mtod(m, struct pflow_v9_header *);
1064 		h9->version = htons(PFLOW_PROTO_9);
1065 		h9->count = htons(1);
1066 	        h9->uptime_ms = htonl(time_uptime * 1000);
1067 		h9->time_sec = htonl(time_second);
1068 		/* XXX correct mod 2^32 semantics? */
1069 		h9->flow_sequence = htonl(sc->sc_gcounter);
1070 		h9->observation_dom = htonl(PFLOW_ENGINE_TYPE);
1071 		break;
1072 	case PFLOW_PROTO_10:
1073 		/* populate pflow_header */
1074 		M_PREPEND(m, sizeof(struct pflow_v10_header), M_DONTWAIT);
1075 		if (m == NULL) {
1076 			pflowstats.pflow_onomem++;
1077 			return (ENOBUFS);
1078 		}
1079 		h10 = mtod(m, struct pflow_v10_header *);
1080 		h10->version = htons(PFLOW_PROTO_10);
1081 		h10->length = htons(PFLOW_V10_HDRLEN
1082 		    + sizeof(struct pflow_tmpl));
1083 		h10->time_sec = htonl(time_second);
1084 		/* XXX correct mod 2^32 semantics? */
1085 		h10->flow_sequence = htonl(sc->sc_gcounter);
1086 		h10->observation_dom = htonl(PFLOW_ENGINE_TYPE);
1087 		break;
1088 	default: /* NOTREACHED */
1089 		break;
1090 	}
1091 	timeout_add_sec(&sc->sc_tmo_tmpl, PFLOW_TMPL_TIMEOUT);
1092 	return (pflow_sendout_mbuf(sc, m));
1093 }
1094 
1095 int
1096 pflow_sendout_mbuf(struct pflow_softc *sc, struct mbuf *m)
1097 {
1098 	struct udpiphdr	*ui;
1099 	u_int16_t	 len = m->m_pkthdr.len;
1100 	struct ifnet	*ifp = &sc->sc_if;
1101 	struct ip	*ip;
1102 	int		 err;
1103 
1104 	/* UDP Header*/
1105 	M_PREPEND(m, sizeof(struct udpiphdr), M_DONTWAIT);
1106 	if (m == NULL) {
1107 		pflowstats.pflow_onomem++;
1108 		return (ENOBUFS);
1109 	}
1110 
1111 	ui = mtod(m, struct udpiphdr *);
1112 	ui->ui_pr = IPPROTO_UDP;
1113 	ui->ui_src = sc->sc_sender_ip;
1114 	ui->ui_sport = sc->sc_sender_port;
1115 	ui->ui_dst = sc->sc_receiver_ip;
1116 	ui->ui_dport = sc->sc_receiver_port;
1117 	ui->ui_ulen = htons(sizeof(struct udphdr) + len);
1118 
1119 	ip = (struct ip *)ui;
1120 	ip->ip_v = IPVERSION;
1121 	ip->ip_hl = sizeof(struct ip) >> 2;
1122 	ip->ip_id = htons(ip_randomid());
1123 	ip->ip_off = htons(IP_DF);
1124 	ip->ip_tos = IPTOS_LOWDELAY;
1125 	ip->ip_ttl = IPDEFTTL;
1126 	ip->ip_len = htons(sizeof(struct udpiphdr) + len);
1127 
1128 	/*
1129 	 * Compute the pseudo-header checksum; defer further checksumming
1130 	 * until ip_output() or hardware (if it exists).
1131 	 */
1132 	if (udpcksum) {
1133 		m->m_pkthdr.csum_flags |= M_UDP_CSUM_OUT;
1134 		ui->ui_sum = in_cksum_phdr(ui->ui_src.s_addr,
1135 		    ui->ui_dst.s_addr, htons(len + sizeof(struct udphdr) +
1136 		    IPPROTO_UDP));
1137 	} else
1138 		ui->ui_sum = 0;
1139 
1140 #if NBPFILTER > 0
1141 	if (ifp->if_bpf) {
1142 		ip->ip_sum = in_cksum(m, ip->ip_hl << 2);
1143 		bpf_mtap(ifp->if_bpf, m, BPF_DIRECTION_OUT);
1144 	}
1145 #endif
1146 
1147 	sc->sc_if.if_opackets++;
1148 	sc->sc_if.if_obytes += m->m_pkthdr.len;
1149 
1150 	if ((err = ip_output(m, NULL, NULL, IP_RAWOUTPUT, &sc->sc_imo, NULL))) {
1151 		pflowstats.pflow_oerrors++;
1152 		sc->sc_if.if_oerrors++;
1153 	}
1154 	return (err);
1155 }
1156 
1157 int
1158 pflow_get_dynport(void)
1159 {
1160 	u_int16_t	tmp, low, high, cut;
1161 
1162 	low = ipport_hifirstauto;     /* sysctl */
1163 	high = ipport_hilastauto;
1164 
1165 	cut = arc4random_uniform(1 + high - low) + low;
1166 
1167 	for (tmp = cut; tmp <= high; ++(tmp)) {
1168 		if (!in_baddynamic(tmp, IPPROTO_UDP))
1169 			return (htons(tmp));
1170 	}
1171 
1172 	for (tmp = cut - 1; tmp >= low; --(tmp)) {
1173 		if (!in_baddynamic(tmp, IPPROTO_UDP))
1174 			return (htons(tmp));
1175 	}
1176 
1177 	return (htons(ipport_hilastauto)); /* XXX */
1178 }
1179 
1180 int
1181 pflow_sysctl(int *name, u_int namelen, void *oldp, size_t *oldlenp,
1182     void *newp, size_t newlen)
1183 {
1184 	if (namelen != 1)
1185 		return (ENOTDIR);
1186 
1187 	switch (name[0]) {
1188 	case NET_PFLOW_STATS:
1189 		if (newp != NULL)
1190 			return (EPERM);
1191 		return (sysctl_struct(oldp, oldlenp, newp, newlen,
1192 		    &pflowstats, sizeof(pflowstats)));
1193 	default:
1194 		return (EOPNOTSUPP);
1195 	}
1196 	return (0);
1197 }
1198