xref: /netbsd-src/sys/dist/pf/net/if_pfsync.c (revision 7f21db1c0118155e0dd40b75182e30c589d9f63e)
1 /*	$NetBSD: if_pfsync.c,v 1.5 2010/01/23 01:17:23 minskim Exp $	*/
2 /*	$OpenBSD: if_pfsync.c,v 1.83 2007/06/26 14:44:12 mcbride Exp $	*/
3 
4 /*
5  * Copyright (c) 2002 Michael Shalayeff
6  * All rights reserved.
7  *
8  * Redistribution and use in source and binary forms, with or without
9  * modification, are permitted provided that the following conditions
10  * are met:
11  * 1. Redistributions of source code must retain the above copyright
12  *    notice, this list of conditions and the following disclaimer.
13  * 2. Redistributions in binary form must reproduce the above copyright
14  *    notice, this list of conditions and the following disclaimer in the
15  *    documentation and/or other materials provided with the distribution.
16  *
17  * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
18  * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
19  * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
20  * IN NO EVENT SHALL THE AUTHOR OR HIS RELATIVES BE LIABLE FOR ANY DIRECT,
21  * INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
22  * (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
23  * SERVICES; LOSS OF MIND, USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
24  * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT,
25  * STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING
26  * IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF
27  * THE POSSIBILITY OF SUCH DAMAGE.
28  */
29 
30 #include <sys/cdefs.h>
31 __KERNEL_RCSID(0, "$NetBSD: if_pfsync.c,v 1.5 2010/01/23 01:17:23 minskim Exp $");
32 
33 #ifdef _KERNEL_OPT
34 #include "opt_inet.h"
35 #include "opt_inet6.h"
36 #endif
37 
38 #include <sys/param.h>
39 #include <sys/proc.h>
40 #include <sys/systm.h>
41 #include <sys/time.h>
42 #include <sys/mbuf.h>
43 #include <sys/socket.h>
44 #include <sys/ioctl.h>
45 #include <sys/callout.h>
46 #include <sys/kernel.h>
47 
48 #include <net/if.h>
49 #include <net/if_types.h>
50 #include <net/route.h>
51 #include <net/bpf.h>
52 #include <netinet/in.h>
53 #ifndef __NetBSD__
54 #include <netinet/if_ether.h>
55 #else
56 #include <net/if_ether.h>
57 #endif /* __NetBSD__ */
58 #include <netinet/tcp.h>
59 #include <netinet/tcp_seq.h>
60 
61 #ifdef	INET
62 #include <netinet/in_systm.h>
63 #include <netinet/in_var.h>
64 #include <netinet/ip.h>
65 #include <netinet/ip_var.h>
66 #endif
67 
68 #ifdef INET6
69 #include <netinet6/nd6.h>
70 #endif /* INET6 */
71 
72 #include "carp.h"
73 #if NCARP > 0
74 extern int carp_suppress_preempt;
75 #endif
76 
77 #include <net/pfvar.h>
78 #include <net/if_pfsync.h>
79 
80 #ifdef __NetBSD__
81 #include <sys/conf.h>
82 #include <sys/lwp.h>
83 #include <sys/kauth.h>
84 #include <sys/sysctl.h>
85 
86 #include <net/net_stats.h>
87 
88 percpu_t	*pfsyncstat_percpu;
89 
90 #define	PFSYNC_STATINC(x) _NET_STATINC(pfsyncstat_percpu, x)
91 #endif /* __NetBSD__ */
92 
93 #include "pfsync.h"
94 
95 #define PFSYNC_MINMTU	\
96     (sizeof(struct pfsync_header) + sizeof(struct pf_state))
97 
98 #ifdef PFSYNCDEBUG
99 #define DPRINTF(x)    do { if (pfsyncdebug) printf x ; } while (0)
100 int pfsyncdebug;
101 #else
102 #define DPRINTF(x)
103 #endif
104 
105 extern int ifqmaxlen; /* XXX */
106 
107 struct pfsync_softc	*pfsyncif = NULL;
108 
109 void	pfsyncattach(int);
110 int	pfsync_clone_create(struct if_clone *, int);
111 int	pfsync_clone_destroy(struct ifnet *);
112 void	pfsync_setmtu(struct pfsync_softc *, int);
113 int	pfsync_alloc_scrub_memory(struct pfsync_state_peer *,
114 	    struct pf_state_peer *);
115 int	pfsync_insert_net_state(struct pfsync_state *, u_int8_t);
116 void	pfsync_update_net_tdb(struct pfsync_tdb *);
117 int	pfsyncoutput(struct ifnet *, struct mbuf *, const struct sockaddr *,
118 	    struct rtentry *);
119 int	pfsyncioctl(struct ifnet *, u_long, void*);
120 void	pfsyncstart(struct ifnet *);
121 
122 struct mbuf *pfsync_get_mbuf(struct pfsync_softc *, u_int8_t, void **);
123 int	pfsync_request_update(struct pfsync_state_upd *, struct in_addr *);
124 int	pfsync_sendout(struct pfsync_softc *);
125 int	pfsync_tdb_sendout(struct pfsync_softc *);
126 int	pfsync_sendout_mbuf(struct pfsync_softc *, struct mbuf *);
127 void	pfsync_timeout(void *);
128 void	pfsync_tdb_timeout(void *);
129 void	pfsync_send_bus(struct pfsync_softc *, u_int8_t);
130 void	pfsync_bulk_update(void *);
131 void	pfsync_bulkfail(void *);
132 
133 int	pfsync_sync_ok;
134 
135 struct if_clone	pfsync_cloner =
136     IF_CLONE_INITIALIZER("pfsync", pfsync_clone_create, pfsync_clone_destroy);
137 
138 void
139 pfsyncattach(int npfsync)
140 {
141 	if_clone_attach(&pfsync_cloner);
142 
143 	pfsyncstat_percpu = percpu_alloc(sizeof(uint64_t) * PFSYNC_NSTATS);
144 }
145 
146 int
147 pfsync_clone_create(struct if_clone *ifc, int unit)
148 {
149 	struct ifnet *ifp;
150 
151 	if (unit != 0)
152 		return (EINVAL);
153 
154 	pfsync_sync_ok = 1;
155 	if ((pfsyncif = malloc(sizeof(*pfsyncif), M_DEVBUF, M_NOWAIT)) == NULL)
156 		return (ENOMEM);
157 	memset(pfsyncif, 0, sizeof(*pfsyncif));
158 	pfsyncif->sc_mbuf = NULL;
159 	pfsyncif->sc_mbuf_net = NULL;
160 	pfsyncif->sc_mbuf_tdb = NULL;
161 	pfsyncif->sc_statep.s = NULL;
162 	pfsyncif->sc_statep_net.s = NULL;
163 	pfsyncif->sc_statep_tdb.t = NULL;
164 	pfsyncif->sc_maxupdates = 128;
165 	pfsyncif->sc_sync_peer.s_addr = INADDR_PFSYNC_GROUP;
166 	pfsyncif->sc_sendaddr.s_addr = INADDR_PFSYNC_GROUP;
167 	pfsyncif->sc_ureq_received = 0;
168 	pfsyncif->sc_ureq_sent = 0;
169 	pfsyncif->sc_bulk_send_next = NULL;
170 	pfsyncif->sc_bulk_terminator = NULL;
171 	ifp = &pfsyncif->sc_if;
172 	snprintf(ifp->if_xname, sizeof ifp->if_xname, "pfsync%d", unit);
173 	ifp->if_softc = pfsyncif;
174 	ifp->if_ioctl = pfsyncioctl;
175 	ifp->if_output = pfsyncoutput;
176 	ifp->if_start = pfsyncstart;
177 	ifp->if_type = IFT_PFSYNC;
178 	ifp->if_snd.ifq_maxlen = ifqmaxlen;
179 	ifp->if_hdrlen = PFSYNC_HDRLEN;
180 	pfsync_setmtu(pfsyncif, ETHERMTU);
181 
182 	callout_init(&pfsyncif->sc_tmo, 0);
183 	callout_init(&pfsyncif->sc_tdb_tmo, 0);
184 	callout_init(&pfsyncif->sc_bulk_tmo, 0);
185 	callout_init(&pfsyncif->sc_bulkfail_tmo, 0);
186 	callout_setfunc(&pfsyncif->sc_tmo, pfsync_timeout, pfsyncif);
187 	callout_setfunc(&pfsyncif->sc_tdb_tmo, pfsync_tdb_timeout, pfsyncif);
188 	callout_setfunc(&pfsyncif->sc_bulk_tmo, pfsync_bulk_update, pfsyncif);
189 	callout_setfunc(&pfsyncif->sc_bulkfail_tmo, pfsync_bulkfail, pfsyncif);
190 
191 	if_attach(ifp);
192 	if_alloc_sadl(ifp);
193 
194 	bpf_ops->bpf_attach(&pfsyncif->sc_if, DLT_PFSYNC, PFSYNC_HDRLEN,
195 	    &pfsyncif->sc_if.if_bpf);
196 
197 	return (0);
198 }
199 
200 int
201 pfsync_clone_destroy(struct ifnet *ifp)
202 {
203 	bpf_ops->bpf_detach(ifp);
204 	if_detach(ifp);
205 	free(pfsyncif, M_DEVBUF);
206 	pfsyncif = NULL;
207 	return (0);
208 }
209 
210 /*
211  * Start output on the pfsync interface.
212  */
213 void
214 pfsyncstart(struct ifnet *ifp)
215 {
216 	struct mbuf *m;
217 	int s;
218 
219 	for (;;) {
220 		s = splnet();
221 		IF_DROP(&ifp->if_snd);
222 		IF_DEQUEUE(&ifp->if_snd, m);
223 		splx(s);
224 
225 		if (m == NULL)
226 			return;
227 		else
228 			m_freem(m);
229 	}
230 }
231 
232 int
233 pfsync_alloc_scrub_memory(struct pfsync_state_peer *s,
234     struct pf_state_peer *d)
235 {
236 	if (s->scrub.scrub_flag && d->scrub == NULL) {
237 		d->scrub = pool_get(&pf_state_scrub_pl, PR_NOWAIT);
238 		if (d->scrub == NULL)
239 			return (ENOMEM);
240 		memset(d->scrub, 0, sizeof(*d->scrub));
241 	}
242 
243 	return (0);
244 }
245 
246 int
247 pfsync_insert_net_state(struct pfsync_state *sp, u_int8_t chksum_flag)
248 {
249 	struct pf_state	*st = NULL;
250 	struct pf_state_key *sk = NULL;
251 	struct pf_rule *r = NULL;
252 	struct pfi_kif	*kif;
253 
254 	if (sp->creatorid == 0 && pf_status.debug >= PF_DEBUG_MISC) {
255 		printf("pfsync_insert_net_state: invalid creator id:"
256 		    " %08x\n", ntohl(sp->creatorid));
257 		return (EINVAL);
258 	}
259 
260 	kif = pfi_kif_get(sp->ifname);
261 	if (kif == NULL) {
262 		if (pf_status.debug >= PF_DEBUG_MISC)
263 			printf("pfsync_insert_net_state: "
264 			    "unknown interface: %s\n", sp->ifname);
265 		/* skip this state */
266 		return (0);
267 	}
268 
269 	/*
270 	 * If the ruleset checksums match, it's safe to associate the state
271 	 * with the rule of that number.
272 	 */
273 	if (sp->rule != htonl(-1) && sp->anchor == htonl(-1) && chksum_flag &&
274 	    ntohl(sp->rule) <
275 	    pf_main_ruleset.rules[PF_RULESET_FILTER].active.rcount)
276 		r = pf_main_ruleset.rules[
277 		    PF_RULESET_FILTER].active.ptr_array[ntohl(sp->rule)];
278 	else
279 		r = &pf_default_rule;
280 
281 	if (!r->max_states || r->states < r->max_states)
282 		st = pool_get(&pf_state_pl, PR_NOWAIT);
283 	if (st == NULL) {
284 		pfi_kif_unref(kif, PFI_KIF_REF_NONE);
285 		return (ENOMEM);
286 	}
287 	memset(st, 0, sizeof(*st));
288 
289 	if ((sk = pf_alloc_state_key(st)) == NULL) {
290 		pool_put(&pf_state_pl, st);
291 		pfi_kif_unref(kif, PFI_KIF_REF_NONE);
292 		return (ENOMEM);
293 	}
294 
295 	/* allocate memory for scrub info */
296 	if (pfsync_alloc_scrub_memory(&sp->src, &st->src) ||
297 	    pfsync_alloc_scrub_memory(&sp->dst, &st->dst)) {
298 		pfi_kif_unref(kif, PFI_KIF_REF_NONE);
299 		if (st->src.scrub)
300 			pool_put(&pf_state_scrub_pl, st->src.scrub);
301 		pool_put(&pf_state_pl, st);
302 		pool_put(&pf_state_key_pl, sk);
303 		return (ENOMEM);
304 	}
305 
306 	st->rule.ptr = r;
307 	/* XXX get pointers to nat_rule and anchor */
308 
309 	/* XXX when we have nat_rule/anchors, use STATE_INC_COUNTERS */
310 	r->states++;
311 
312 	/* fill in the rest of the state entry */
313 	pf_state_host_ntoh(&sp->lan, &sk->lan);
314 	pf_state_host_ntoh(&sp->gwy, &sk->gwy);
315 	pf_state_host_ntoh(&sp->ext, &sk->ext);
316 
317 	pf_state_peer_ntoh(&sp->src, &st->src);
318 	pf_state_peer_ntoh(&sp->dst, &st->dst);
319 
320 	memcpy(&st->rt_addr, &sp->rt_addr, sizeof(st->rt_addr));
321 	st->creation = time_second - ntohl(sp->creation);
322 	st->expire = ntohl(sp->expire) + time_second;
323 
324 	sk->af = sp->af;
325 	sk->proto = sp->proto;
326 	sk->direction = sp->direction;
327 	st->log = sp->log;
328 	st->timeout = sp->timeout;
329 	st->allow_opts = sp->allow_opts;
330 
331 	memcpy(&st->id, sp->id, sizeof(st->id));
332 	st->creatorid = sp->creatorid;
333 	st->sync_flags = PFSTATE_FROMSYNC;
334 
335 	if (pf_insert_state(kif, st)) {
336 		pfi_kif_unref(kif, PFI_KIF_REF_NONE);
337 		/* XXX when we have nat_rule/anchors, use STATE_DEC_COUNTERS */
338 		r->states--;
339 		if (st->dst.scrub)
340 			pool_put(&pf_state_scrub_pl, st->dst.scrub);
341 		if (st->src.scrub)
342 			pool_put(&pf_state_scrub_pl, st->src.scrub);
343 		pool_put(&pf_state_pl, st);
344 		return (EINVAL);
345 	}
346 
347 	return (0);
348 }
349 
350 void
351 pfsync_input(struct mbuf *m, ...)
352 {
353 	struct ip *ip = mtod(m, struct ip *);
354 	struct pfsync_header *ph;
355 	struct pfsync_softc *sc = pfsyncif;
356 	struct pf_state *st;
357 	struct pf_state_key *sk;
358 	struct pf_state_cmp id_key;
359 	struct pfsync_state *sp;
360 	struct pfsync_state_upd *up;
361 	struct pfsync_state_del *dp;
362 	struct pfsync_state_clr *cp;
363 	struct pfsync_state_upd_req *rup;
364 	struct pfsync_state_bus *bus;
365 #ifdef IPSEC
366 	struct pfsync_tdb *pt;
367 #endif
368 	struct in_addr src;
369 	struct mbuf *mp;
370 	int iplen, action, error, i, s, count, offp, sfail, stale = 0;
371 	u_int8_t chksum_flag = 0;
372 
373 	PFSYNC_STATINC(PFSYNC_STAT_IPACKETS);
374 
375 	/* verify that we have a sync interface configured */
376 	if (!sc || !sc->sc_sync_ifp || !pf_status.running)
377 		goto done;
378 
379 	/* verify that the packet came in on the right interface */
380 	if (sc->sc_sync_ifp != m->m_pkthdr.rcvif) {
381 		PFSYNC_STATINC(PFSYNC_STAT_BADIF);
382 		goto done;
383 	}
384 
385 	/* verify that the IP TTL is 255.  */
386 	if (ip->ip_ttl != PFSYNC_DFLTTL) {
387 		PFSYNC_STATINC(PFSYNC_STAT_BADTTL);
388 		goto done;
389 	}
390 
391 	iplen = ip->ip_hl << 2;
392 
393 	if (m->m_pkthdr.len < iplen + sizeof(*ph)) {
394 		PFSYNC_STATINC(PFSYNC_STAT_HDROPS);
395 		goto done;
396 	}
397 
398 	if (iplen + sizeof(*ph) > m->m_len) {
399 		if ((m = m_pullup(m, iplen + sizeof(*ph))) == NULL) {
400 			PFSYNC_STATINC(PFSYNC_STAT_HDROPS);
401 			goto done;
402 		}
403 		ip = mtod(m, struct ip *);
404 	}
405 	ph = (struct pfsync_header *)((char *)ip + iplen);
406 
407 	/* verify the version */
408 	if (ph->version != PFSYNC_VERSION) {
409 		PFSYNC_STATINC(PFSYNC_STAT_BADVER);
410 		goto done;
411 	}
412 
413 	action = ph->action;
414 	count = ph->count;
415 
416 	/* make sure it's a valid action code */
417 	if (action >= PFSYNC_ACT_MAX) {
418 		PFSYNC_STATINC(PFSYNC_STAT_BADACT);
419 		goto done;
420 	}
421 
422 	/* Cheaper to grab this now than having to mess with mbufs later */
423 	src = ip->ip_src;
424 
425 	if (!bcmp(&ph->pf_chksum, &pf_status.pf_chksum, PF_MD5_DIGEST_LENGTH))
426 		chksum_flag++;
427 
428 	switch (action) {
429 	case PFSYNC_ACT_CLR: {
430 		struct pf_state *nexts;
431 		struct pf_state_key *nextsk;
432 		struct pfi_kif *kif;
433 		u_int32_t creatorid;
434 		if ((mp = m_pulldown(m, iplen + sizeof(*ph),
435 		    sizeof(*cp), &offp)) == NULL) {
436 			PFSYNC_STATINC(PFSYNC_STAT_BADLEN);
437 			return;
438 		}
439 		cp = (struct pfsync_state_clr *)(mp->m_data + offp);
440 		creatorid = cp->creatorid;
441 
442 		s = splsoftnet();
443 		if (cp->ifname[0] == '\0') {
444 			for (st = RB_MIN(pf_state_tree_id, &tree_id);
445 			    st; st = nexts) {
446 				nexts = RB_NEXT(pf_state_tree_id, &tree_id, st);
447 				if (st->creatorid == creatorid) {
448 					st->sync_flags |= PFSTATE_FROMSYNC;
449 					pf_unlink_state(st);
450 				}
451 			}
452 		} else {
453 			if ((kif = pfi_kif_get(cp->ifname)) == NULL) {
454 				splx(s);
455 				return;
456 			}
457 			for (sk = RB_MIN(pf_state_tree_lan_ext,
458 			    &pf_statetbl_lan_ext); sk; sk = nextsk) {
459 				nextsk = RB_NEXT(pf_state_tree_lan_ext,
460 				    &pf_statetbl_lan_ext, sk);
461 				TAILQ_FOREACH(st, &sk->states, next) {
462 					if (st->creatorid == creatorid) {
463 						st->sync_flags |=
464 						    PFSTATE_FROMSYNC;
465 						pf_unlink_state(st);
466 					}
467 				}
468 			}
469 		}
470 		splx(s);
471 
472 		break;
473 	}
474 	case PFSYNC_ACT_INS:
475 		if ((mp = m_pulldown(m, iplen + sizeof(*ph),
476 		    count * sizeof(*sp), &offp)) == NULL) {
477 			PFSYNC_STATINC(PFSYNC_STAT_BADLEN);
478 			return;
479 		}
480 
481 		s = splsoftnet();
482 		for (i = 0, sp = (struct pfsync_state *)(mp->m_data + offp);
483 		    i < count; i++, sp++) {
484 			/* check for invalid values */
485 			if (sp->timeout >= PFTM_MAX ||
486 			    sp->src.state > PF_TCPS_PROXY_DST ||
487 			    sp->dst.state > PF_TCPS_PROXY_DST ||
488 			    sp->direction > PF_OUT ||
489 			    (sp->af != AF_INET && sp->af != AF_INET6)) {
490 				if (pf_status.debug >= PF_DEBUG_MISC)
491 					printf("pfsync_insert: PFSYNC_ACT_INS: "
492 					    "invalid value\n");
493 				PFSYNC_STATINC(PFSYNC_STAT_BADSTATE);
494 				continue;
495 			}
496 
497 			if ((error = pfsync_insert_net_state(sp,
498 			    chksum_flag))) {
499 				if (error == ENOMEM) {
500 					splx(s);
501 					goto done;
502 				}
503 				continue;
504 			}
505 		}
506 		splx(s);
507 		break;
508 	case PFSYNC_ACT_UPD:
509 		if ((mp = m_pulldown(m, iplen + sizeof(*ph),
510 		    count * sizeof(*sp), &offp)) == NULL) {
511 			PFSYNC_STATINC(PFSYNC_STAT_BADLEN);
512 			return;
513 		}
514 
515 		s = splsoftnet();
516 		for (i = 0, sp = (struct pfsync_state *)(mp->m_data + offp);
517 		    i < count; i++, sp++) {
518 			int flags = PFSYNC_FLAG_STALE;
519 
520 			/* check for invalid values */
521 			if (sp->timeout >= PFTM_MAX ||
522 			    sp->src.state > PF_TCPS_PROXY_DST ||
523 			    sp->dst.state > PF_TCPS_PROXY_DST) {
524 				if (pf_status.debug >= PF_DEBUG_MISC)
525 					printf("pfsync_insert: PFSYNC_ACT_UPD: "
526 					    "invalid value\n");
527 				PFSYNC_STATINC(PFSYNC_STAT_BADSTATE);
528 				continue;
529 			}
530 
531 			memcpy(&id_key.id, sp->id, sizeof(id_key.id));
532 			id_key.creatorid = sp->creatorid;
533 
534 			st = pf_find_state_byid(&id_key);
535 			if (st == NULL) {
536 				/* insert the update */
537 				if (pfsync_insert_net_state(sp, chksum_flag)) {
538 					PFSYNC_STATINC(PFSYNC_STAT_BADSTATE);
539 				}
540 				continue;
541 			}
542 			sk = st->state_key;
543 			sfail = 0;
544 			if (sk->proto == IPPROTO_TCP) {
545 				/*
546 				 * The state should never go backwards except
547 				 * for syn-proxy states.  Neither should the
548 				 * sequence window slide backwards.
549 				 */
550 				if (st->src.state > sp->src.state &&
551 				    (st->src.state < PF_TCPS_PROXY_SRC ||
552 				    sp->src.state >= PF_TCPS_PROXY_SRC))
553 					sfail = 1;
554 				else if (SEQ_GT(st->src.seqlo,
555 				    ntohl(sp->src.seqlo)))
556 					sfail = 3;
557 				else if (st->dst.state > sp->dst.state) {
558 					/* There might still be useful
559 					 * information about the src state here,
560 					 * so import that part of the update,
561 					 * then "fail" so we send the updated
562 					 * state back to the peer who is missing
563 					 * our what we know. */
564 					pf_state_peer_ntoh(&sp->src, &st->src);
565 					/* XXX do anything with timeouts? */
566 					sfail = 7;
567 					flags = 0;
568 				} else if (st->dst.state >= TCPS_SYN_SENT &&
569 				    SEQ_GT(st->dst.seqlo, ntohl(sp->dst.seqlo)))
570 					sfail = 4;
571 			} else {
572 				/*
573 				 * Non-TCP protocol state machine always go
574 				 * forwards
575 				 */
576 				if (st->src.state > sp->src.state)
577 					sfail = 5;
578 				else if (st->dst.state > sp->dst.state)
579 					sfail = 6;
580 			}
581 			if (sfail) {
582 				if (pf_status.debug >= PF_DEBUG_MISC)
583 					printf("pfsync: %s stale update "
584 					    "(%d) id: %016" PRIu64 ""
585 					    "creatorid: %08x\n",
586 					    (sfail < 7 ?  "ignoring"
587 					     : "partial"), sfail,
588 					    be64toh(st->id),
589 					    ntohl(st->creatorid));
590 				PFSYNC_STATINC(PFSYNC_STAT_BADSTATE);
591 
592 				if (!(sp->sync_flags & PFSTATE_STALE)) {
593 					/* we have a better state, send it */
594 					if (sc->sc_mbuf != NULL && !stale)
595 						pfsync_sendout(sc);
596 					stale++;
597 					if (!st->sync_flags)
598 						pfsync_pack_state(
599 						    PFSYNC_ACT_UPD, st, flags);
600 				}
601 				continue;
602 			}
603 	    		pfsync_alloc_scrub_memory(&sp->dst, &st->dst);
604 			pf_state_peer_ntoh(&sp->src, &st->src);
605 			pf_state_peer_ntoh(&sp->dst, &st->dst);
606 			st->expire = ntohl(sp->expire) + time_second;
607 			st->timeout = sp->timeout;
608 		}
609 		if (stale && sc->sc_mbuf != NULL)
610 			pfsync_sendout(sc);
611 		splx(s);
612 		break;
613 	/*
614 	 * It's not strictly necessary for us to support the "uncompressed"
615 	 * delete action, but it's relatively simple and maintains consistency.
616 	 */
617 	case PFSYNC_ACT_DEL:
618 		if ((mp = m_pulldown(m, iplen + sizeof(*ph),
619 		    count * sizeof(*sp), &offp)) == NULL) {
620 			PFSYNC_STATINC(PFSYNC_STAT_BADLEN);
621 			return;
622 		}
623 
624 		s = splsoftnet();
625 		for (i = 0, sp = (struct pfsync_state *)(mp->m_data + offp);
626 		    i < count; i++, sp++) {
627 			memcpy(&id_key.id, sp->id, sizeof(id_key.id));
628 			id_key.creatorid = sp->creatorid;
629 
630 			st = pf_find_state_byid(&id_key);
631 			if (st == NULL) {
632 				PFSYNC_STATINC(PFSYNC_STAT_BADSTATE);
633 				continue;
634 			}
635 			st->sync_flags |= PFSTATE_FROMSYNC;
636 			pf_unlink_state(st);
637 		}
638 		splx(s);
639 		break;
640 	case PFSYNC_ACT_UPD_C: {
641 		int update_requested = 0;
642 
643 		if ((mp = m_pulldown(m, iplen + sizeof(*ph),
644 		    count * sizeof(*up), &offp)) == NULL) {
645 			PFSYNC_STATINC(PFSYNC_STAT_BADLEN);
646 			return;
647 		}
648 
649 		s = splsoftnet();
650 		for (i = 0, up = (struct pfsync_state_upd *)(mp->m_data + offp);
651 		    i < count; i++, up++) {
652 			/* check for invalid values */
653 			if (up->timeout >= PFTM_MAX ||
654 			    up->src.state > PF_TCPS_PROXY_DST ||
655 			    up->dst.state > PF_TCPS_PROXY_DST) {
656 				if (pf_status.debug >= PF_DEBUG_MISC)
657 					printf("pfsync_insert: "
658 					    "PFSYNC_ACT_UPD_C: "
659 					    "invalid value\n");
660 				PFSYNC_STATINC(PFSYNC_STAT_BADSTATE);
661 				continue;
662 			}
663 
664 			memcpy(&id_key.id, up->id, sizeof(id_key.id));
665 			id_key.creatorid = up->creatorid;
666 
667 			st = pf_find_state_byid(&id_key);
668 			if (st == NULL) {
669 				/* We don't have this state. Ask for it. */
670 				error = pfsync_request_update(up, &src);
671 				if (error == ENOMEM) {
672 					splx(s);
673 					goto done;
674 				}
675 				update_requested = 1;
676 				PFSYNC_STATINC(PFSYNC_STAT_BADSTATE);
677 				continue;
678 			}
679 			sk = st->state_key;
680 			sfail = 0;
681 			if (sk->proto == IPPROTO_TCP) {
682 				/*
683 				 * The state should never go backwards except
684 				 * for syn-proxy states.  Neither should the
685 				 * sequence window slide backwards.
686 				 */
687 				if (st->src.state > up->src.state &&
688 				    (st->src.state < PF_TCPS_PROXY_SRC ||
689 				    up->src.state >= PF_TCPS_PROXY_SRC))
690 					sfail = 1;
691 				else if (st->dst.state > up->dst.state)
692 					sfail = 2;
693 				else if (SEQ_GT(st->src.seqlo,
694 				    ntohl(up->src.seqlo)))
695 					sfail = 3;
696 				else if (st->dst.state >= TCPS_SYN_SENT &&
697 				    SEQ_GT(st->dst.seqlo, ntohl(up->dst.seqlo)))
698 					sfail = 4;
699 			} else {
700 				/*
701 				 * Non-TCP protocol state machine always go
702 				 * forwards
703 				 */
704 				if (st->src.state > up->src.state)
705 					sfail = 5;
706 				else if (st->dst.state > up->dst.state)
707 					sfail = 6;
708 			}
709 			if (sfail) {
710 				if (pf_status.debug >= PF_DEBUG_MISC)
711 					printf("pfsync: ignoring stale update "
712 					    "(%d) id: %016" PRIu64 ""
713 					    "creatorid: %08x\n", sfail,
714 					    be64toh(st->id),
715 					    ntohl(st->creatorid));
716 				PFSYNC_STATINC(PFSYNC_STAT_BADSTATE);
717 
718 				/* we have a better state, send it out */
719 				if ((!stale || update_requested) &&
720 				    sc->sc_mbuf != NULL) {
721 					pfsync_sendout(sc);
722 					update_requested = 0;
723 				}
724 				stale++;
725 				if (!st->sync_flags)
726 					pfsync_pack_state(PFSYNC_ACT_UPD, st,
727 					    PFSYNC_FLAG_STALE);
728 				continue;
729 			}
730 	    		pfsync_alloc_scrub_memory(&up->dst, &st->dst);
731 			pf_state_peer_ntoh(&up->src, &st->src);
732 			pf_state_peer_ntoh(&up->dst, &st->dst);
733 			st->expire = ntohl(up->expire) + time_second;
734 			st->timeout = up->timeout;
735 		}
736 		if ((update_requested || stale) && sc->sc_mbuf)
737 			pfsync_sendout(sc);
738 		splx(s);
739 		break;
740 	}
741 	case PFSYNC_ACT_DEL_C:
742 		if ((mp = m_pulldown(m, iplen + sizeof(*ph),
743 		    count * sizeof(*dp), &offp)) == NULL) {
744 			PFSYNC_STATINC(PFSYNC_STAT_BADLEN);
745 			return;
746 		}
747 
748 		s = splsoftnet();
749 		for (i = 0, dp = (struct pfsync_state_del *)(mp->m_data + offp);
750 		    i < count; i++, dp++) {
751 			memcpy(&id_key.id, dp->id, sizeof(id_key.id));
752 			id_key.creatorid = dp->creatorid;
753 
754 			st = pf_find_state_byid(&id_key);
755 			if (st == NULL) {
756 				PFSYNC_STATINC(PFSYNC_STAT_BADSTATE);
757 				continue;
758 			}
759 			st->sync_flags |= PFSTATE_FROMSYNC;
760 			pf_unlink_state(st);
761 		}
762 		splx(s);
763 		break;
764 	case PFSYNC_ACT_INS_F:
765 	case PFSYNC_ACT_DEL_F:
766 		/* not implemented */
767 		break;
768 	case PFSYNC_ACT_UREQ:
769 		if ((mp = m_pulldown(m, iplen + sizeof(*ph),
770 		    count * sizeof(*rup), &offp)) == NULL) {
771 			PFSYNC_STATINC(PFSYNC_STAT_BADLEN);
772 			return;
773 		}
774 
775 		s = splsoftnet();
776 		if (sc->sc_mbuf != NULL)
777 			pfsync_sendout(sc);
778 		for (i = 0,
779 		    rup = (struct pfsync_state_upd_req *)(mp->m_data + offp);
780 		    i < count; i++, rup++) {
781 			memcpy(&id_key.id, rup->id, sizeof(id_key.id));
782 			id_key.creatorid = rup->creatorid;
783 
784 			if (id_key.id == 0 && id_key.creatorid == 0) {
785 				sc->sc_ureq_received = time_uptime;
786 				if (sc->sc_bulk_send_next == NULL)
787 					sc->sc_bulk_send_next =
788 					    TAILQ_FIRST(&state_list);
789 				sc->sc_bulk_terminator = sc->sc_bulk_send_next;
790 				if (pf_status.debug >= PF_DEBUG_MISC)
791 					printf("pfsync: received "
792 					    "bulk update request\n");
793 				pfsync_send_bus(sc, PFSYNC_BUS_START);
794 				callout_schedule(&sc->sc_bulk_tmo, 1 * hz);
795 			} else {
796 				st = pf_find_state_byid(&id_key);
797 				if (st == NULL) {
798 					PFSYNC_STATINC(PFSYNC_STAT_BADSTATE);
799 					continue;
800 				}
801 				if (!st->sync_flags)
802 					pfsync_pack_state(PFSYNC_ACT_UPD,
803 					    st, 0);
804 			}
805 		}
806 		if (sc->sc_mbuf != NULL)
807 			pfsync_sendout(sc);
808 		splx(s);
809 		break;
810 	case PFSYNC_ACT_BUS:
811 		/* If we're not waiting for a bulk update, who cares. */
812 		if (sc->sc_ureq_sent == 0)
813 			break;
814 
815 		if ((mp = m_pulldown(m, iplen + sizeof(*ph),
816 		    sizeof(*bus), &offp)) == NULL) {
817 			PFSYNC_STATINC(PFSYNC_STAT_BADLEN);
818 			return;
819 		}
820 		bus = (struct pfsync_state_bus *)(mp->m_data + offp);
821 		switch (bus->status) {
822 		case PFSYNC_BUS_START:
823 			callout_schedule(&sc->sc_bulkfail_tmo,
824 			    pf_pool_limits[PF_LIMIT_STATES].limit /
825 			    (PFSYNC_BULKPACKETS * sc->sc_maxcount));
826 			if (pf_status.debug >= PF_DEBUG_MISC)
827 				printf("pfsync: received bulk "
828 				    "update start\n");
829 			break;
830 		case PFSYNC_BUS_END:
831 			if (time_uptime - ntohl(bus->endtime) >=
832 			    sc->sc_ureq_sent) {
833 				/* that's it, we're happy */
834 				sc->sc_ureq_sent = 0;
835 				sc->sc_bulk_tries = 0;
836 				callout_stop(&sc->sc_bulkfail_tmo);
837 #if NCARP > 0
838 				if (!pfsync_sync_ok)
839 					carp_suppress_preempt--;
840 #endif
841 				pfsync_sync_ok = 1;
842 				if (pf_status.debug >= PF_DEBUG_MISC)
843 					printf("pfsync: received valid "
844 					    "bulk update end\n");
845 			} else {
846 				if (pf_status.debug >= PF_DEBUG_MISC)
847 					printf("pfsync: received invalid "
848 					    "bulk update end: bad timestamp\n");
849 			}
850 			break;
851 		}
852 		break;
853 #ifdef IPSEC
854 	case PFSYNC_ACT_TDB_UPD:
855 		if ((mp = m_pulldown(m, iplen + sizeof(*ph),
856 		    count * sizeof(*pt), &offp)) == NULL) {
857 			PFSYNC_STATINC(PFSYNC_STAT_BADLEN);
858 			return;
859 		}
860 		s = splsoftnet();
861 		for (i = 0, pt = (struct pfsync_tdb *)(mp->m_data + offp);
862 		    i < count; i++, pt++)
863 			pfsync_update_net_tdb(pt);
864 		splx(s);
865 		break;
866 #endif
867 	}
868 
869 done:
870 	if (m)
871 		m_freem(m);
872 }
873 
874 int
875 pfsyncoutput(struct ifnet *ifp, struct mbuf *m, const struct sockaddr *dst,
876 	struct rtentry *rt)
877 {
878 	m_freem(m);
879 	return (0);
880 }
881 
882 /* ARGSUSED */
883 int
884 pfsyncioctl(struct ifnet *ifp, u_long cmd, void*  data)
885 {
886 	struct lwp *l = curlwp;
887 	struct pfsync_softc *sc = ifp->if_softc;
888 	struct ifreq *ifr = (struct ifreq *)data;
889 	struct ip_moptions *imo = &sc->sc_imo;
890 	struct pfsyncreq pfsyncr;
891 	struct ifnet    *sifp;
892 	int s, error;
893 
894 	switch (cmd) {
895 	case SIOCSIFADDR:
896 	case SIOCAIFADDR:
897 	case SIOCSIFDSTADDR:
898 	case SIOCSIFFLAGS:
899 		if (ifp->if_flags & IFF_UP)
900 			ifp->if_flags |= IFF_RUNNING;
901 		else
902 			ifp->if_flags &= ~IFF_RUNNING;
903 		break;
904 	case SIOCSIFMTU:
905 		if (ifr->ifr_mtu < PFSYNC_MINMTU)
906 			return (EINVAL);
907 		if (ifr->ifr_mtu > MCLBYTES)
908 			ifr->ifr_mtu = MCLBYTES;
909 		s = splnet();
910 		if (ifr->ifr_mtu < ifp->if_mtu)
911 			pfsync_sendout(sc);
912 		pfsync_setmtu(sc, ifr->ifr_mtu);
913 		splx(s);
914 		break;
915 	case SIOCGETPFSYNC:
916 		if ((error = kauth_authorize_network(l->l_cred,
917 		    KAUTH_NETWORK_INTERFACE,
918 		    KAUTH_REQ_NETWORK_INTERFACE_GETPRIV, ifp, (void *)cmd,
919 		    NULL)) != 0)
920 			return (error);
921 		memset(&pfsyncr, 0, sizeof(pfsyncr));
922 		if (sc->sc_sync_ifp)
923 			strlcpy(pfsyncr.pfsyncr_syncdev,
924 			    sc->sc_sync_ifp->if_xname, IFNAMSIZ);
925 		pfsyncr.pfsyncr_syncpeer = sc->sc_sync_peer;
926 		pfsyncr.pfsyncr_maxupdates = sc->sc_maxupdates;
927 		if ((error = copyout(&pfsyncr, ifr->ifr_data, sizeof(pfsyncr))))
928 			return (error);
929 		break;
930 	case SIOCSETPFSYNC:
931 		if ((error = kauth_authorize_network(l->l_cred,
932 		    KAUTH_NETWORK_INTERFACE,
933 		    KAUTH_REQ_NETWORK_INTERFACE_SETPRIV, ifp, (void *)cmd,
934 		    NULL)) != 0)
935 			return (error);
936 		if ((error = copyin(ifr->ifr_data, &pfsyncr, sizeof(pfsyncr))))
937 			return (error);
938 
939 		if (pfsyncr.pfsyncr_syncpeer.s_addr == 0)
940 			sc->sc_sync_peer.s_addr = INADDR_PFSYNC_GROUP;
941 		else
942 			sc->sc_sync_peer.s_addr =
943 			    pfsyncr.pfsyncr_syncpeer.s_addr;
944 
945 		if (pfsyncr.pfsyncr_maxupdates > 255)
946 			return (EINVAL);
947 		sc->sc_maxupdates = pfsyncr.pfsyncr_maxupdates;
948 
949 		if (pfsyncr.pfsyncr_syncdev[0] == 0) {
950 			sc->sc_sync_ifp = NULL;
951 			if (sc->sc_mbuf_net != NULL) {
952 				/* Don't keep stale pfsync packets around. */
953 				s = splnet();
954 				m_freem(sc->sc_mbuf_net);
955 				sc->sc_mbuf_net = NULL;
956 				sc->sc_statep_net.s = NULL;
957 				splx(s);
958 			}
959 			if (imo->imo_num_memberships > 0) {
960 				in_delmulti(imo->imo_membership[--imo->imo_num_memberships]);
961 				imo->imo_multicast_ifp = NULL;
962 			}
963 			break;
964 		}
965 
966 		if ((sifp = ifunit(pfsyncr.pfsyncr_syncdev)) == NULL)
967 			return (EINVAL);
968 
969 		s = splnet();
970 		if (sifp->if_mtu < sc->sc_if.if_mtu ||
971 		    (sc->sc_sync_ifp != NULL &&
972 		    sifp->if_mtu < sc->sc_sync_ifp->if_mtu) ||
973 		    sifp->if_mtu < MCLBYTES - sizeof(struct ip))
974 			pfsync_sendout(sc);
975 		sc->sc_sync_ifp = sifp;
976 
977 		pfsync_setmtu(sc, sc->sc_if.if_mtu);
978 
979 		if (imo->imo_num_memberships > 0) {
980 			in_delmulti(imo->imo_membership[--imo->imo_num_memberships]);
981 			imo->imo_multicast_ifp = NULL;
982 		}
983 
984 		if (sc->sc_sync_ifp &&
985 		    sc->sc_sync_peer.s_addr == INADDR_PFSYNC_GROUP) {
986 			struct in_addr addr;
987 
988 			if (!(sc->sc_sync_ifp->if_flags & IFF_MULTICAST)) {
989 				sc->sc_sync_ifp = NULL;
990 				splx(s);
991 				return (EADDRNOTAVAIL);
992 			}
993 
994 			addr.s_addr = INADDR_PFSYNC_GROUP;
995 
996 			if ((imo->imo_membership[0] =
997 			    in_addmulti(&addr, sc->sc_sync_ifp)) == NULL) {
998 				sc->sc_sync_ifp = NULL;
999 				splx(s);
1000 				return (ENOBUFS);
1001 			}
1002 			imo->imo_num_memberships++;
1003 			imo->imo_multicast_ifp = sc->sc_sync_ifp;
1004 			imo->imo_multicast_ttl = PFSYNC_DFLTTL;
1005 			imo->imo_multicast_loop = 0;
1006 		}
1007 
1008 		if (sc->sc_sync_ifp ||
1009 		    sc->sc_sendaddr.s_addr != INADDR_PFSYNC_GROUP) {
1010 			/* Request a full state table update. */
1011 			sc->sc_ureq_sent = time_uptime;
1012 #if NCARP > 0
1013 			if (pfsync_sync_ok)
1014 				carp_suppress_preempt ++;
1015 #endif
1016 			pfsync_sync_ok = 0;
1017 			if (pf_status.debug >= PF_DEBUG_MISC)
1018 				printf("pfsync: requesting bulk update\n");
1019 			callout_schedule(&sc->sc_bulkfail_tmo, 5 * hz);
1020 			error = pfsync_request_update(NULL, NULL);
1021 			if (error == ENOMEM) {
1022 				splx(s);
1023 				return (ENOMEM);
1024 			}
1025 			pfsync_sendout(sc);
1026 		}
1027 		splx(s);
1028 
1029 		break;
1030 
1031 	default:
1032 		return ifioctl_common(ifp, cmd, data);
1033 	}
1034 
1035 	return (0);
1036 }
1037 
1038 void
1039 pfsync_setmtu(struct pfsync_softc *sc, int mtu_req)
1040 {
1041 	int mtu;
1042 
1043 	if (sc->sc_sync_ifp && sc->sc_sync_ifp->if_mtu < mtu_req)
1044 		mtu = sc->sc_sync_ifp->if_mtu;
1045 	else
1046 		mtu = mtu_req;
1047 
1048 	sc->sc_maxcount = (mtu - sizeof(struct pfsync_header)) /
1049 	    sizeof(struct pfsync_state);
1050 	if (sc->sc_maxcount > 254)
1051 	    sc->sc_maxcount = 254;
1052 	sc->sc_if.if_mtu = sizeof(struct pfsync_header) +
1053 	    sc->sc_maxcount * sizeof(struct pfsync_state);
1054 }
1055 
1056 struct mbuf *
1057 pfsync_get_mbuf(struct pfsync_softc *sc, u_int8_t action, void **sp)
1058 {
1059 	struct pfsync_header *h;
1060 	struct mbuf *m;
1061 	int len;
1062 
1063 	MGETHDR(m, M_DONTWAIT, MT_DATA);
1064 	if (m == NULL) {
1065 		sc->sc_if.if_oerrors++;
1066 		return (NULL);
1067 	}
1068 
1069 	switch (action) {
1070 	case PFSYNC_ACT_CLR:
1071 		len = sizeof(struct pfsync_header) +
1072 		    sizeof(struct pfsync_state_clr);
1073 		break;
1074 	case PFSYNC_ACT_UPD_C:
1075 		len = (sc->sc_maxcount * sizeof(struct pfsync_state_upd)) +
1076 		    sizeof(struct pfsync_header);
1077 		break;
1078 	case PFSYNC_ACT_DEL_C:
1079 		len = (sc->sc_maxcount * sizeof(struct pfsync_state_del)) +
1080 		    sizeof(struct pfsync_header);
1081 		break;
1082 	case PFSYNC_ACT_UREQ:
1083 		len = (sc->sc_maxcount * sizeof(struct pfsync_state_upd_req)) +
1084 		    sizeof(struct pfsync_header);
1085 		break;
1086 	case PFSYNC_ACT_BUS:
1087 		len = sizeof(struct pfsync_header) +
1088 		    sizeof(struct pfsync_state_bus);
1089 		break;
1090 	case PFSYNC_ACT_TDB_UPD:
1091 		len = (sc->sc_maxcount * sizeof(struct pfsync_tdb)) +
1092 		    sizeof(struct pfsync_header);
1093 		break;
1094 	default:
1095 		len = (sc->sc_maxcount * sizeof(struct pfsync_state)) +
1096 		    sizeof(struct pfsync_header);
1097 		break;
1098 	}
1099 
1100 	if (len > MHLEN) {
1101 		MCLGET(m, M_DONTWAIT);
1102 		if ((m->m_flags & M_EXT) == 0) {
1103 			m_free(m);
1104 			sc->sc_if.if_oerrors++;
1105 			return (NULL);
1106 		}
1107 		m->m_data += (MCLBYTES - len) &~ (sizeof(long) - 1);
1108 	} else
1109 		MH_ALIGN(m, len);
1110 
1111 	m->m_pkthdr.rcvif = NULL;
1112 	m->m_pkthdr.len = m->m_len = sizeof(struct pfsync_header);
1113 	h = mtod(m, struct pfsync_header *);
1114 	h->version = PFSYNC_VERSION;
1115 	h->af = 0;
1116 	h->count = 0;
1117 	h->action = action;
1118 	if (action != PFSYNC_ACT_TDB_UPD)
1119 		memcpy(&h->pf_chksum, &pf_status.pf_chksum,
1120 		    PF_MD5_DIGEST_LENGTH);
1121 
1122 	*sp = (void *)((char *)h + PFSYNC_HDRLEN);
1123 	if (action == PFSYNC_ACT_TDB_UPD)
1124 		callout_schedule(&sc->sc_tdb_tmo, hz);
1125 	else
1126 		callout_schedule(&sc->sc_tmo, hz);
1127 	return (m);
1128 }
1129 
1130 int
1131 pfsync_pack_state(u_int8_t action, struct pf_state *st, int flags)
1132 {
1133 	struct ifnet *ifp = NULL;
1134 	struct pfsync_softc *sc = pfsyncif;
1135 	struct pfsync_header *h, *h_net;
1136 	struct pfsync_state *sp = NULL;
1137 	struct pfsync_state_upd *up = NULL;
1138 	struct pfsync_state_del *dp = NULL;
1139 	struct pf_state_key *sk = st->state_key;
1140 	struct pf_rule *r;
1141 	u_long secs;
1142 	int s, ret = 0;
1143 	u_int8_t i = 255, newaction = 0;
1144 
1145 	if (sc == NULL)
1146 		return (0);
1147 	ifp = &sc->sc_if;
1148 
1149 	/*
1150 	 * If a packet falls in the forest and there's nobody around to
1151 	 * hear, does it make a sound?
1152 	 */
1153 	if (ifp->if_bpf == NULL && sc->sc_sync_ifp == NULL &&
1154 	    sc->sc_sync_peer.s_addr == INADDR_PFSYNC_GROUP) {
1155 		/* Don't leave any stale pfsync packets hanging around. */
1156 		if (sc->sc_mbuf != NULL) {
1157 			m_freem(sc->sc_mbuf);
1158 			sc->sc_mbuf = NULL;
1159 			sc->sc_statep.s = NULL;
1160 		}
1161 		return (0);
1162 	}
1163 
1164 	if (action >= PFSYNC_ACT_MAX)
1165 		return (EINVAL);
1166 
1167 	s = splnet();
1168 	if (sc->sc_mbuf == NULL) {
1169 		if ((sc->sc_mbuf = pfsync_get_mbuf(sc, action,
1170 		    (void *)&sc->sc_statep.s)) == NULL) {
1171 			splx(s);
1172 			return (ENOMEM);
1173 		}
1174 		h = mtod(sc->sc_mbuf, struct pfsync_header *);
1175 	} else {
1176 		h = mtod(sc->sc_mbuf, struct pfsync_header *);
1177 		if (h->action != action) {
1178 			pfsync_sendout(sc);
1179 			if ((sc->sc_mbuf = pfsync_get_mbuf(sc, action,
1180 			    (void *)&sc->sc_statep.s)) == NULL) {
1181 				splx(s);
1182 				return (ENOMEM);
1183 			}
1184 			h = mtod(sc->sc_mbuf, struct pfsync_header *);
1185 		} else {
1186 			/*
1187 			 * If it's an update, look in the packet to see if
1188 			 * we already have an update for the state.
1189 			 */
1190 			if (action == PFSYNC_ACT_UPD && sc->sc_maxupdates) {
1191 				struct pfsync_state *usp =
1192 				    (void *)((char *)h + PFSYNC_HDRLEN);
1193 
1194 				for (i = 0; i < h->count; i++) {
1195 					if (!memcmp(usp->id, &st->id,
1196 					    PFSYNC_ID_LEN) &&
1197 					    usp->creatorid == st->creatorid) {
1198 						sp = usp;
1199 						sp->updates++;
1200 						break;
1201 					}
1202 					usp++;
1203 				}
1204 			}
1205 		}
1206 	}
1207 
1208 	secs = time_second;
1209 
1210 	st->pfsync_time = time_uptime;
1211 
1212 	if (sp == NULL) {
1213 		/* not a "duplicate" update */
1214 		i = 255;
1215 		sp = sc->sc_statep.s++;
1216 		sc->sc_mbuf->m_pkthdr.len =
1217 		    sc->sc_mbuf->m_len += sizeof(struct pfsync_state);
1218 		h->count++;
1219 		memset(sp, 0, sizeof(*sp));
1220 
1221 		memcpy(sp->id, &st->id, sizeof(sp->id));
1222 		sp->creatorid = st->creatorid;
1223 
1224 		strlcpy(sp->ifname, st->kif->pfik_name, sizeof(sp->ifname));
1225 		pf_state_host_hton(&sk->lan, &sp->lan);
1226 		pf_state_host_hton(&sk->gwy, &sp->gwy);
1227 		pf_state_host_hton(&sk->ext, &sp->ext);
1228 
1229 		memcpy(&sp->rt_addr, &st->rt_addr, sizeof(sp->rt_addr));
1230 
1231 		sp->creation = htonl(secs - st->creation);
1232 		pf_state_counter_hton(st->packets[0], sp->packets[0]);
1233 		pf_state_counter_hton(st->packets[1], sp->packets[1]);
1234 		pf_state_counter_hton(st->bytes[0], sp->bytes[0]);
1235 		pf_state_counter_hton(st->bytes[1], sp->bytes[1]);
1236 		if ((r = st->rule.ptr) == NULL)
1237 			sp->rule = htonl(-1);
1238 		else
1239 			sp->rule = htonl(r->nr);
1240 		if ((r = st->anchor.ptr) == NULL)
1241 			sp->anchor = htonl(-1);
1242 		else
1243 			sp->anchor = htonl(r->nr);
1244 		sp->af = sk->af;
1245 		sp->proto = sk->proto;
1246 		sp->direction = sk->direction;
1247 		sp->log = st->log;
1248 		sp->allow_opts = st->allow_opts;
1249 		sp->timeout = st->timeout;
1250 
1251 		if (flags & PFSYNC_FLAG_STALE)
1252 			sp->sync_flags |= PFSTATE_STALE;
1253 	}
1254 
1255 	pf_state_peer_hton(&st->src, &sp->src);
1256 	pf_state_peer_hton(&st->dst, &sp->dst);
1257 
1258 	if (st->expire <= secs)
1259 		sp->expire = htonl(0);
1260 	else
1261 		sp->expire = htonl(st->expire - secs);
1262 
1263 	/* do we need to build "compressed" actions for network transfer? */
1264 	if (sc->sc_sync_ifp && flags & PFSYNC_FLAG_COMPRESS) {
1265 		switch (action) {
1266 		case PFSYNC_ACT_UPD:
1267 			newaction = PFSYNC_ACT_UPD_C;
1268 			break;
1269 		case PFSYNC_ACT_DEL:
1270 			newaction = PFSYNC_ACT_DEL_C;
1271 			break;
1272 		default:
1273 			/* by default we just send the uncompressed states */
1274 			break;
1275 		}
1276 	}
1277 
1278 	if (newaction) {
1279 		if (sc->sc_mbuf_net == NULL) {
1280 			if ((sc->sc_mbuf_net = pfsync_get_mbuf(sc, newaction,
1281 			    (void *)&sc->sc_statep_net.s)) == NULL) {
1282 				splx(s);
1283 				return (ENOMEM);
1284 			}
1285 		}
1286 		h_net = mtod(sc->sc_mbuf_net, struct pfsync_header *);
1287 
1288 		switch (newaction) {
1289 		case PFSYNC_ACT_UPD_C:
1290 			if (i != 255) {
1291 				up = (void *)((char *)h_net +
1292 				    PFSYNC_HDRLEN + (i * sizeof(*up)));
1293 				up->updates++;
1294 			} else {
1295 				h_net->count++;
1296 				sc->sc_mbuf_net->m_pkthdr.len =
1297 				    sc->sc_mbuf_net->m_len += sizeof(*up);
1298 				up = sc->sc_statep_net.u++;
1299 
1300 				memset(up, 0, sizeof(*up));
1301 				memcpy(up->id, &st->id, sizeof(up->id));
1302 				up->creatorid = st->creatorid;
1303 			}
1304 			up->timeout = st->timeout;
1305 			up->expire = sp->expire;
1306 			up->src = sp->src;
1307 			up->dst = sp->dst;
1308 			break;
1309 		case PFSYNC_ACT_DEL_C:
1310 			sc->sc_mbuf_net->m_pkthdr.len =
1311 			    sc->sc_mbuf_net->m_len += sizeof(*dp);
1312 			dp = sc->sc_statep_net.d++;
1313 			h_net->count++;
1314 
1315 			memset(dp, 0, sizeof(*dp));
1316 			memcpy(dp->id, &st->id, sizeof(dp->id));
1317 			dp->creatorid = st->creatorid;
1318 			break;
1319 		}
1320 	}
1321 
1322 	if (h->count == sc->sc_maxcount ||
1323 	    (sc->sc_maxupdates && (sp->updates >= sc->sc_maxupdates)))
1324 		ret = pfsync_sendout(sc);
1325 
1326 	splx(s);
1327 	return (ret);
1328 }
1329 
1330 /* This must be called in splnet() */
1331 int
1332 pfsync_request_update(struct pfsync_state_upd *up, struct in_addr *src)
1333 {
1334 	struct ifnet *ifp = NULL;
1335 	struct pfsync_header *h;
1336 	struct pfsync_softc *sc = pfsyncif;
1337 	struct pfsync_state_upd_req *rup;
1338 	int ret = 0;
1339 
1340 	if (sc == NULL)
1341 		return (0);
1342 
1343 	ifp = &sc->sc_if;
1344 	if (sc->sc_mbuf == NULL) {
1345 		if ((sc->sc_mbuf = pfsync_get_mbuf(sc, PFSYNC_ACT_UREQ,
1346 		    (void *)&sc->sc_statep.s)) == NULL)
1347 			return (ENOMEM);
1348 		h = mtod(sc->sc_mbuf, struct pfsync_header *);
1349 	} else {
1350 		h = mtod(sc->sc_mbuf, struct pfsync_header *);
1351 		if (h->action != PFSYNC_ACT_UREQ) {
1352 			pfsync_sendout(sc);
1353 			if ((sc->sc_mbuf = pfsync_get_mbuf(sc, PFSYNC_ACT_UREQ,
1354 			    (void *)&sc->sc_statep.s)) == NULL)
1355 				return (ENOMEM);
1356 			h = mtod(sc->sc_mbuf, struct pfsync_header *);
1357 		}
1358 	}
1359 
1360 	if (src != NULL)
1361 		sc->sc_sendaddr = *src;
1362 	sc->sc_mbuf->m_pkthdr.len = sc->sc_mbuf->m_len += sizeof(*rup);
1363 	h->count++;
1364 	rup = sc->sc_statep.r++;
1365 	memset(rup, 0, sizeof(*rup));
1366 	if (up != NULL) {
1367 		memcpy(rup->id, up->id, sizeof(rup->id));
1368 		rup->creatorid = up->creatorid;
1369 	}
1370 
1371 	if (h->count == sc->sc_maxcount)
1372 		ret = pfsync_sendout(sc);
1373 
1374 	return (ret);
1375 }
1376 
1377 int
1378 pfsync_clear_states(u_int32_t creatorid, char *ifname)
1379 {
1380 	struct ifnet *ifp = NULL;
1381 	struct pfsync_softc *sc = pfsyncif;
1382 	struct pfsync_state_clr *cp;
1383 	int s, ret;
1384 
1385 	if (sc == NULL)
1386 		return (0);
1387 
1388 	ifp = &sc->sc_if;
1389 	s = splnet();
1390 	if (sc->sc_mbuf != NULL)
1391 		pfsync_sendout(sc);
1392 	if ((sc->sc_mbuf = pfsync_get_mbuf(sc, PFSYNC_ACT_CLR,
1393 	    (void *)&sc->sc_statep.c)) == NULL) {
1394 		splx(s);
1395 		return (ENOMEM);
1396 	}
1397 	sc->sc_mbuf->m_pkthdr.len = sc->sc_mbuf->m_len += sizeof(*cp);
1398 	cp = sc->sc_statep.c;
1399 	cp->creatorid = creatorid;
1400 	if (ifname != NULL)
1401 		strlcpy(cp->ifname, ifname, IFNAMSIZ);
1402 
1403 	ret = (pfsync_sendout(sc));
1404 	splx(s);
1405 	return (ret);
1406 }
1407 
1408 void
1409 pfsync_timeout(void *v)
1410 {
1411 	struct pfsync_softc *sc = v;
1412 	int s;
1413 
1414 	s = splnet();
1415 	pfsync_sendout(sc);
1416 	splx(s);
1417 }
1418 
1419 void
1420 pfsync_tdb_timeout(void *v)
1421 {
1422 	struct pfsync_softc *sc = v;
1423 	int s;
1424 
1425 	s = splnet();
1426 	pfsync_tdb_sendout(sc);
1427 	splx(s);
1428 }
1429 
1430 /* This must be called in splnet() */
1431 void
1432 pfsync_send_bus(struct pfsync_softc *sc, u_int8_t status)
1433 {
1434 	struct pfsync_state_bus *bus;
1435 
1436 	if (sc->sc_mbuf != NULL)
1437 		pfsync_sendout(sc);
1438 
1439 	if (pfsync_sync_ok &&
1440 	    (sc->sc_mbuf = pfsync_get_mbuf(sc, PFSYNC_ACT_BUS,
1441 	    (void *)&sc->sc_statep.b)) != NULL) {
1442 		sc->sc_mbuf->m_pkthdr.len = sc->sc_mbuf->m_len += sizeof(*bus);
1443 		bus = sc->sc_statep.b;
1444 		bus->creatorid = pf_status.hostid;
1445 		bus->status = status;
1446 		bus->endtime = htonl(time_uptime - sc->sc_ureq_received);
1447 		pfsync_sendout(sc);
1448 	}
1449 }
1450 
1451 void
1452 pfsync_bulk_update(void *v)
1453 {
1454 	struct pfsync_softc *sc = v;
1455 	int s, i = 0;
1456 	struct pf_state *state;
1457 
1458 	s = splnet();
1459 	if (sc->sc_mbuf != NULL)
1460 		pfsync_sendout(sc);
1461 
1462 	/*
1463 	 * Grab at most PFSYNC_BULKPACKETS worth of states which have not
1464 	 * been sent since the latest request was made.
1465 	 */
1466 	state = sc->sc_bulk_send_next;
1467 	if (state)
1468 		do {
1469 			/* send state update if syncable and not already sent */
1470 			if (!state->sync_flags
1471 			    && state->timeout < PFTM_MAX
1472 			    && state->pfsync_time <= sc->sc_ureq_received) {
1473 				pfsync_pack_state(PFSYNC_ACT_UPD, state, 0);
1474 				i++;
1475 			}
1476 
1477 			/* figure next state to send */
1478 			state = TAILQ_NEXT(state, entry_list);
1479 
1480 			/* wrap to start of list if we hit the end */
1481 			if (!state)
1482 				state = TAILQ_FIRST(&state_list);
1483 		} while (i < sc->sc_maxcount * PFSYNC_BULKPACKETS &&
1484 		    state != sc->sc_bulk_terminator);
1485 
1486 	if (!state || state == sc->sc_bulk_terminator) {
1487 		/* we're done */
1488 		pfsync_send_bus(sc, PFSYNC_BUS_END);
1489 		sc->sc_ureq_received = 0;
1490 		sc->sc_bulk_send_next = NULL;
1491 		sc->sc_bulk_terminator = NULL;
1492 		callout_stop(&sc->sc_bulk_tmo);
1493 		if (pf_status.debug >= PF_DEBUG_MISC)
1494 			printf("pfsync: bulk update complete\n");
1495 	} else {
1496 		/* look again for more in a bit */
1497 		callout_schedule(&sc->sc_bulk_tmo, 1);
1498 		sc->sc_bulk_send_next = state;
1499 	}
1500 	if (sc->sc_mbuf != NULL)
1501 		pfsync_sendout(sc);
1502 	splx(s);
1503 }
1504 
1505 void
1506 pfsync_bulkfail(void *v)
1507 {
1508 	struct pfsync_softc *sc = v;
1509 	int s, error;
1510 
1511 	if (sc->sc_bulk_tries++ < PFSYNC_MAX_BULKTRIES) {
1512 		/* Try again in a bit */
1513 		callout_schedule(&sc->sc_bulkfail_tmo, 5 * hz);
1514 		s = splnet();
1515 		error = pfsync_request_update(NULL, NULL);
1516 		if (error == ENOMEM) {
1517 			if (pf_status.debug >= PF_DEBUG_MISC)
1518 				printf("pfsync: cannot allocate mbufs for "
1519 				    "bulk update\n");
1520 		} else
1521 			pfsync_sendout(sc);
1522 		splx(s);
1523 	} else {
1524 		/* Pretend like the transfer was ok */
1525 		sc->sc_ureq_sent = 0;
1526 		sc->sc_bulk_tries = 0;
1527 #if NCARP > 0
1528 		if (!pfsync_sync_ok)
1529 			carp_suppress_preempt --;
1530 #endif
1531 		pfsync_sync_ok = 1;
1532 		if (pf_status.debug >= PF_DEBUG_MISC)
1533 			printf("pfsync: failed to receive "
1534 			    "bulk update status\n");
1535 		callout_stop(&sc->sc_bulkfail_tmo);
1536 	}
1537 }
1538 
1539 /* This must be called in splnet() */
1540 int
1541 pfsync_sendout(struct pfsync_softc *sc)
1542 {
1543 	struct ifnet *ifp = &sc->sc_if;
1544 	struct mbuf *m;
1545 
1546 	callout_stop(&sc->sc_tmo);
1547 
1548 	if (sc->sc_mbuf == NULL)
1549 		return (0);
1550 	m = sc->sc_mbuf;
1551 	sc->sc_mbuf = NULL;
1552 	sc->sc_statep.s = NULL;
1553 
1554 	if (ifp->if_bpf)
1555 		bpf_ops->bpf_mtap(ifp->if_bpf, m);
1556 
1557 	if (sc->sc_mbuf_net) {
1558 		m_freem(m);
1559 		m = sc->sc_mbuf_net;
1560 		sc->sc_mbuf_net = NULL;
1561 		sc->sc_statep_net.s = NULL;
1562 	}
1563 
1564 	return pfsync_sendout_mbuf(sc, m);
1565 }
1566 
1567 int
1568 pfsync_tdb_sendout(struct pfsync_softc *sc)
1569 {
1570 	struct ifnet *ifp = &sc->sc_if;
1571 	struct mbuf *m;
1572 
1573 	callout_stop(&sc->sc_tdb_tmo);
1574 
1575 	if (sc->sc_mbuf_tdb == NULL)
1576 		return (0);
1577 	m = sc->sc_mbuf_tdb;
1578 	sc->sc_mbuf_tdb = NULL;
1579 	sc->sc_statep_tdb.t = NULL;
1580 
1581 	if (ifp->if_bpf)
1582 		bpf_ops->bpf_mtap(ifp->if_bpf, m);
1583 
1584 	return pfsync_sendout_mbuf(sc, m);
1585 }
1586 
1587 int
1588 pfsync_sendout_mbuf(struct pfsync_softc *sc, struct mbuf *m)
1589 {
1590 	struct sockaddr sa;
1591 	struct ip *ip;
1592 
1593 	if (sc->sc_sync_ifp ||
1594 	    sc->sc_sync_peer.s_addr != INADDR_PFSYNC_GROUP) {
1595 		M_PREPEND(m, sizeof(struct ip), M_DONTWAIT);
1596 		if (m == NULL) {
1597 			PFSYNC_STATINC(PFSYNC_STAT_ONOMEM);
1598 			return (0);
1599 		}
1600 		ip = mtod(m, struct ip *);
1601 		ip->ip_v = IPVERSION;
1602 		ip->ip_hl = sizeof(*ip) >> 2;
1603 		ip->ip_tos = IPTOS_LOWDELAY;
1604 		ip->ip_len = htons(m->m_pkthdr.len);
1605 		ip->ip_id = htons(ip_randomid(0));
1606 		ip->ip_off = htons(IP_DF);
1607 		ip->ip_ttl = PFSYNC_DFLTTL;
1608 		ip->ip_p = IPPROTO_PFSYNC;
1609 		ip->ip_sum = 0;
1610 
1611 		memset(&sa, 0, sizeof(sa));
1612 		ip->ip_src.s_addr = INADDR_ANY;
1613 
1614 		if (sc->sc_sendaddr.s_addr == INADDR_PFSYNC_GROUP)
1615 			m->m_flags |= M_MCAST;
1616 		ip->ip_dst = sc->sc_sendaddr;
1617 		sc->sc_sendaddr.s_addr = sc->sc_sync_peer.s_addr;
1618 
1619 		PFSYNC_STATINC(PFSYNC_STAT_OPACKETS);
1620 
1621 		if (ip_output(m, NULL, NULL, IP_RAWOUTPUT, &sc->sc_imo, NULL)) {
1622 			PFSYNC_STATINC(PFSYNC_STAT_OERRORS);
1623 		}
1624 	} else
1625 		m_freem(m);
1626 
1627 	return (0);
1628 }
1629 
1630 #ifdef IPSEC
1631 /* Update an in-kernel tdb. Silently fail if no tdb is found. */
1632 void
1633 pfsync_update_net_tdb(struct pfsync_tdb *pt)
1634 {
1635 	struct tdb		*tdb;
1636 	int			 s;
1637 
1638 	/* check for invalid values */
1639 	if (ntohl(pt->spi) <= SPI_RESERVED_MAX ||
1640 	    (pt->dst.sa.sa_family != AF_INET &&
1641 	     pt->dst.sa.sa_family != AF_INET6))
1642 		goto bad;
1643 
1644 	s = spltdb();
1645 	tdb = gettdb(pt->spi, &pt->dst, pt->sproto);
1646 	if (tdb) {
1647 		pt->rpl = ntohl(pt->rpl);
1648 		pt->cur_bytes = betoh64(pt->cur_bytes);
1649 
1650 		/* Neither replay nor byte counter should ever decrease. */
1651 		if (pt->rpl < tdb->tdb_rpl ||
1652 		    pt->cur_bytes < tdb->tdb_cur_bytes) {
1653 			splx(s);
1654 			goto bad;
1655 		}
1656 
1657 		tdb->tdb_rpl = pt->rpl;
1658 		tdb->tdb_cur_bytes = pt->cur_bytes;
1659 	}
1660 	splx(s);
1661 	return;
1662 
1663  bad:
1664 	if (pf_status.debug >= PF_DEBUG_MISC)
1665 		printf("pfsync_insert: PFSYNC_ACT_TDB_UPD: "
1666 		    "invalid value\n");
1667 	PFSYNC_STATINC(PFSYNC_STAT_BADSTATE);
1668 	return;
1669 }
1670 
1671 /* One of our local tdbs have been updated, need to sync rpl with others */
1672 int
1673 pfsync_update_tdb(struct tdb *tdb, int output)
1674 {
1675 	struct ifnet *ifp = NULL;
1676 	struct pfsync_softc *sc = pfsyncif;
1677 	struct pfsync_header *h;
1678 	struct pfsync_tdb *pt = NULL;
1679 	int s, i, ret;
1680 
1681 	if (sc == NULL)
1682 		return (0);
1683 
1684 	ifp = &sc->sc_if;
1685 	if (ifp->if_bpf == NULL && sc->sc_sync_ifp == NULL &&
1686 	    sc->sc_sync_peer.s_addr == INADDR_PFSYNC_GROUP) {
1687 		/* Don't leave any stale pfsync packets hanging around. */
1688 		if (sc->sc_mbuf_tdb != NULL) {
1689 			m_freem(sc->sc_mbuf_tdb);
1690 			sc->sc_mbuf_tdb = NULL;
1691 			sc->sc_statep_tdb.t = NULL;
1692 		}
1693 		return (0);
1694 	}
1695 
1696 	s = splnet();
1697 	if (sc->sc_mbuf_tdb == NULL) {
1698 		if ((sc->sc_mbuf_tdb = pfsync_get_mbuf(sc, PFSYNC_ACT_TDB_UPD,
1699 		    (void *)&sc->sc_statep_tdb.t)) == NULL) {
1700 			splx(s);
1701 			return (ENOMEM);
1702 		}
1703 		h = mtod(sc->sc_mbuf_tdb, struct pfsync_header *);
1704 	} else {
1705 		h = mtod(sc->sc_mbuf_tdb, struct pfsync_header *);
1706 		if (h->action != PFSYNC_ACT_TDB_UPD) {
1707 			/*
1708 			 * XXX will never happen as long as there's
1709 			 * only one "TDB action".
1710 			 */
1711 			pfsync_tdb_sendout(sc);
1712 			sc->sc_mbuf_tdb = pfsync_get_mbuf(sc,
1713 			    PFSYNC_ACT_TDB_UPD, (void *)&sc->sc_statep_tdb.t);
1714 			if (sc->sc_mbuf_tdb == NULL) {
1715 				splx(s);
1716 				return (ENOMEM);
1717 			}
1718 			h = mtod(sc->sc_mbuf_tdb, struct pfsync_header *);
1719 		} else if (sc->sc_maxupdates) {
1720 			/*
1721 			 * If it's an update, look in the packet to see if
1722 			 * we already have an update for the state.
1723 			 */
1724 			struct pfsync_tdb *u =
1725 			    (void *)((char *)h + PFSYNC_HDRLEN);
1726 
1727 			for (i = 0; !pt && i < h->count; i++) {
1728 				if (tdb->tdb_spi == u->spi &&
1729 				    tdb->tdb_sproto == u->sproto &&
1730 			            !bcmp(&tdb->tdb_dst, &u->dst,
1731 				    SA_LEN(&u->dst.sa))) {
1732 					pt = u;
1733 					pt->updates++;
1734 				}
1735 				u++;
1736 			}
1737 		}
1738 	}
1739 
1740 	if (pt == NULL) {
1741 		/* not a "duplicate" update */
1742 		pt = sc->sc_statep_tdb.t++;
1743 		sc->sc_mbuf_tdb->m_pkthdr.len =
1744 		    sc->sc_mbuf_tdb->m_len += sizeof(struct pfsync_tdb);
1745 		h->count++;
1746 		memset(pt, 0, sizeof(*pt));
1747 
1748 		pt->spi = tdb->tdb_spi;
1749 		memcpy(&pt->dst, &tdb->tdb_dst, sizeof pt->dst);
1750 		pt->sproto = tdb->tdb_sproto;
1751 	}
1752 
1753 	/*
1754 	 * When a failover happens, the master's rpl is probably above
1755 	 * what we see here (we may be up to a second late), so
1756 	 * increase it a bit for outbound tdbs to manage most such
1757 	 * situations.
1758 	 *
1759 	 * For now, just add an offset that is likely to be larger
1760 	 * than the number of packets we can see in one second. The RFC
1761 	 * just says the next packet must have a higher seq value.
1762 	 *
1763 	 * XXX What is a good algorithm for this? We could use
1764 	 * a rate-determined increase, but to know it, we would have
1765 	 * to extend struct tdb.
1766 	 * XXX pt->rpl can wrap over MAXINT, but if so the real tdb
1767 	 * will soon be replaced anyway. For now, just don't handle
1768 	 * this edge case.
1769 	 */
1770 #define RPL_INCR 16384
1771 	pt->rpl = htonl(tdb->tdb_rpl + (output ? RPL_INCR : 0));
1772 	pt->cur_bytes = htobe64(tdb->tdb_cur_bytes);
1773 
1774 	if (h->count == sc->sc_maxcount ||
1775 	    (sc->sc_maxupdates && (pt->updates >= sc->sc_maxupdates)))
1776 		ret = pfsync_tdb_sendout(sc);
1777 
1778 	splx(s);
1779 	return (ret);
1780 }
1781 #endif
1782 
1783 static int
1784 sysctl_net_inet_pfsync_stats(SYSCTLFN_ARGS)
1785 {
1786 
1787 	return (NETSTAT_SYSCTL(pfsyncstat_percpu, PFSYNC_NSTATS));
1788 }
1789 
1790 SYSCTL_SETUP(sysctl_net_inet_pfsync_setup, "sysctl net.inet.pfsync subtree setup")
1791 {
1792 
1793 	sysctl_createv(clog, 0, NULL, NULL,
1794 		       CTLFLAG_PERMANENT,
1795 		       CTLTYPE_NODE, "net", NULL,
1796 		       NULL, 0, NULL, 0,
1797 		       CTL_NET, CTL_EOL);
1798 	sysctl_createv(clog, 0, NULL, NULL,
1799 		       CTLFLAG_PERMANENT,
1800 		       CTLTYPE_NODE, "inet", NULL,
1801 		       NULL, 0, NULL, 0,
1802 		       CTL_NET, PF_INET, CTL_EOL);
1803 	sysctl_createv(clog, 0, NULL, NULL,
1804 		       CTLFLAG_PERMANENT,
1805 		       CTLTYPE_NODE, "pfsync",
1806 		       SYSCTL_DESCR("pfsync related settings"),
1807 		       NULL, 0, NULL, 0,
1808 		       CTL_NET, PF_INET, IPPROTO_PFSYNC, CTL_EOL);
1809 	sysctl_createv(clog, 0, NULL, NULL,
1810 		       CTLFLAG_PERMANENT|CTLFLAG_READONLY,
1811 		       CTLTYPE_STRUCT, "stats",
1812 			   SYSCTL_DESCR("pfsync statistics"),
1813 		       sysctl_net_inet_pfsync_stats, 0, NULL, 0,
1814 		       CTL_NET, PF_INET, IPPROTO_PFSYNC,
1815 	       CTL_CREATE, CTL_EOL);
1816 }
1817