xref: /dflybsd-src/sys/net/pf/if_pfsync.c (revision 88abd8b5763f2e5d4b4db5c5dc1b5bb4c489698b)
1 /*	$OpenBSD: if_pfsync.c,v 1.73 2006/11/16 13:13:38 henning Exp $	*/
2 
3 /*
4  * Copyright (c) 2002 Michael Shalayeff
5  * All rights reserved.
6  *
7  * Redistribution and use in source and binary forms, with or without
8  * modification, are permitted provided that the following conditions
9  * are met:
10  * 1. Redistributions of source code must retain the above copyright
11  *    notice, this list of conditions and the following disclaimer.
12  * 2. Redistributions in binary form must reproduce the above copyright
13  *    notice, this list of conditions and the following disclaimer in the
14  *    documentation and/or other materials provided with the distribution.
15  *
16  * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
17  * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
18  * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
19  * IN NO EVENT SHALL THE AUTHOR OR HIS RELATIVES BE LIABLE FOR ANY DIRECT,
20  * INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
21  * (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
22  * SERVICES; LOSS OF MIND, USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
23  * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT,
24  * STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING
25  * IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF
26  * THE POSSIBILITY OF SUCH DAMAGE.
27  */
28 
29 #include "opt_inet.h"
30 #include "opt_inet6.h"
31 #include "use_carp.h"
32 
33 #include <sys/param.h>
34 #include <sys/endian.h>
35 #include <sys/proc.h>
36 #include <sys/priv.h>
37 #include <sys/systm.h>
38 #include <sys/time.h>
39 #include <sys/mbuf.h>
40 #include <sys/socket.h>
41 #include <sys/kernel.h>
42 #include <sys/malloc.h>
43 #include <sys/module.h>
44 #include <sys/sockio.h>
45 #include <sys/thread2.h>
46 #include <vm/vm_zone.h>
47 
48 #include <machine/inttypes.h>
49 
50 #include <net/if.h>
51 #include <net/if_types.h>
52 #include <net/route.h>
53 #include <net/bpf.h>
54 #include <netinet/in.h>
55 #include <netinet/if_ether.h>
56 #include <netinet/ip_carp.h>
57 #include <netinet/tcp.h>
58 #include <netinet/tcp_seq.h>
59 
60 #ifdef	INET
61 #include <netinet/in_systm.h>
62 #include <netinet/in_var.h>
63 #include <netinet/ip.h>
64 #include <netinet/ip_var.h>
65 #endif
66 
67 #ifdef INET6
68 #include <netinet6/nd6.h>
69 #endif /* INET6 */
70 
71 #include <net/pf/pfvar.h>
72 #include <net/pf/if_pfsync.h>
73 
74 #define	PFSYNCNAME	"pfsync"
75 
76 #define PFSYNC_MINMTU	\
77     (sizeof(struct pfsync_header) + sizeof(struct pf_state))
78 
79 #ifdef PFSYNCDEBUG
80 #define DPRINTF(x)    do { if (pfsyncdebug) kprintf x ; } while (0)
81 int pfsyncdebug;
82 #else
83 #define DPRINTF(x)
84 #endif
85 
86 struct pfsync_softc	*pfsyncif = NULL;
87 struct pfsyncstats	 pfsyncstats;
88 
89 void	pfsyncattach(int);
90 static void	pfsync_clone_destroy(struct ifnet *);
91 static int	pfsync_clone_create(struct if_clone *, int, caddr_t);
92 void	pfsync_setmtu(struct pfsync_softc *, int);
93 int	pfsync_alloc_scrub_memory(struct pfsync_state_peer *,
94 	    struct pf_state_peer *);
95 int	pfsync_insert_net_state(struct pfsync_state *, u_int8_t);
96 int	pfsyncoutput(struct ifnet *, struct mbuf *, struct sockaddr *,
97 	    struct rtentry *);
98 int	pfsyncioctl(struct ifnet *, u_long, caddr_t, struct ucred *);
99 void	pfsyncstart(struct ifnet *);
100 
101 struct mbuf *pfsync_get_mbuf(struct pfsync_softc *, u_int8_t, void **);
102 int	pfsync_request_update(struct pfsync_state_upd *, struct in_addr *);
103 int	pfsync_sendout(struct pfsync_softc *);
104 int	pfsync_sendout_mbuf(struct pfsync_softc *, struct mbuf *);
105 void	pfsync_timeout(void *);
106 void	pfsync_send_bus(struct pfsync_softc *, u_int8_t);
107 void	pfsync_bulk_update(void *);
108 void	pfsync_bulkfail(void *);
109 
110 static MALLOC_DEFINE(M_PFSYNC, PFSYNCNAME, "Packet Filter State Sync. Interface");
111 static LIST_HEAD(pfsync_list, pfsync_softc) pfsync_list;
112 
113 int	pfsync_sync_ok;
114 
115 struct if_clone	pfsync_cloner =
116     IF_CLONE_INITIALIZER("pfsync", pfsync_clone_create, pfsync_clone_destroy, 1 ,1);
117 
118 void
119 pfsyncattach(int npfsync)
120 {
121 	if_clone_attach(&pfsync_cloner);
122 }
123 static int
124 pfsync_clone_create(struct if_clone *ifc, int unit, caddr_t param __unused)
125 {
126 	struct pfsync_softc *sc;
127 	struct ifnet *ifp;
128 
129 	lwkt_gettoken(&pf_token);
130 
131 	MALLOC(sc, struct pfsync_softc *, sizeof(*sc), M_PFSYNC,
132 	    M_WAITOK|M_ZERO);
133 
134 	pfsync_sync_ok = 1;
135 	sc->sc_mbuf = NULL;
136 	sc->sc_mbuf_net = NULL;
137 	sc->sc_mbuf_tdb = NULL;
138 	sc->sc_statep.s = NULL;
139 	sc->sc_statep_net.s = NULL;
140 	sc->sc_statep_tdb.t = NULL;
141 	sc->sc_maxupdates = 128;
142 	sc->sc_sync_peer.s_addr =htonl(INADDR_PFSYNC_GROUP);
143 	sc->sc_sendaddr.s_addr = htonl(INADDR_PFSYNC_GROUP);
144 	sc->sc_ureq_received = 0;
145 	sc->sc_ureq_sent = 0;
146 	sc->sc_bulk_send_next = NULL;
147 	sc->sc_bulk_terminator = NULL;
148 
149 	lwkt_reltoken(&pf_token);
150 	ifp = &sc->sc_if;
151 	ksnprintf(ifp->if_xname, sizeof ifp->if_xname, "pfsync%d", unit);
152 	if_initname(ifp, ifc->ifc_name, unit);
153 	ifp->if_ioctl = pfsyncioctl;
154 	ifp->if_output = pfsyncoutput;
155 	ifp->if_start = pfsyncstart;
156 	ifp->if_type = IFT_PFSYNC;
157 	ifp->if_snd.ifq_maxlen = ifqmaxlen;
158 	ifp->if_hdrlen = PFSYNC_HDRLEN;
159 	ifp->if_baudrate = IF_Mbps(100);
160 	ifp->if_softc = sc;
161 	pfsync_setmtu(sc, MCLBYTES);
162 	callout_init(&sc->sc_tmo);
163 	callout_init(&sc->sc_tdb_tmo);
164 	callout_init(&sc->sc_bulk_tmo);
165 	callout_init(&sc->sc_bulkfail_tmo);
166 	if_attach(&sc->sc_if, NULL);
167 
168 	LIST_INSERT_HEAD(&pfsync_list, sc, sc_next);
169 	bpfattach(&sc->sc_if, DLT_PFSYNC, PFSYNC_HDRLEN);
170 
171 #if NCARP > 0
172 	if_addgroup(ifp, "carp");
173 #endif
174 	lwkt_gettoken(&pf_token);
175 
176 	lwkt_reltoken(&pf_token);
177 	return (0);
178 }
179 
180 static void
181 pfsync_clone_destroy(struct ifnet *ifp)
182 {
183 	lwkt_gettoken(&pf_token);
184 	lwkt_reltoken(&pf_token);
185 #if NBPFILTER > 0
186 	bpfdetach(ifp);
187 #endif
188 	if_detach(ifp);
189 	lwkt_gettoken(&pf_token);
190 	kfree(pfsyncif, M_DEVBUF);
191 	pfsyncif = NULL;
192 }
193 
194 /*
195  * Start output on the pfsync interface.
196  */
197 void
198 pfsyncstart(struct ifnet *ifp)
199 {
200 	crit_enter();
201 	IF_DROP(&ifp->if_snd);
202 	IF_DRAIN(&ifp->if_snd);
203 	crit_exit();
204 }
205 
206 int
207 pfsync_alloc_scrub_memory(struct pfsync_state_peer *s,
208     struct pf_state_peer *d)
209 {
210 	if (s->scrub.scrub_flag && d->scrub == NULL) {
211 		d->scrub = pool_get(&pf_state_scrub_pl, PR_NOWAIT);
212 		if (d->scrub == NULL)
213 			return (ENOMEM);
214 		bzero(d->scrub, sizeof(*d->scrub));
215 	}
216 
217 	return (0);
218 }
219 
220 int
221 pfsync_insert_net_state(struct pfsync_state *sp, u_int8_t chksum_flag)
222 {
223 	struct pf_state	*st = NULL;
224 	struct pf_state_key *sk = NULL;
225 	struct pf_rule *r = NULL;
226 	struct pfi_kif	*kif;
227 
228 	if (sp->creatorid == 0 && pf_status.debug >= PF_DEBUG_MISC) {
229 		kprintf("pfsync_insert_net_state: invalid creator id:"
230 		    " %08x\n", ntohl(sp->creatorid));
231 		return (EINVAL);
232 	}
233 
234 	kif = pfi_kif_get(sp->ifname);
235 	if (kif == NULL) {
236 		if (pf_status.debug >= PF_DEBUG_MISC)
237 			kprintf("pfsync_insert_net_state: "
238 			    "unknown interface: %s\n", sp->ifname);
239 		/* skip this state */
240 		return (0);
241 	}
242 
243 	/*
244 	 * If the ruleset checksums match, it's safe to associate the state
245 	 * with the rule of that number.
246 	 */
247 	if (sp->rule != htonl(-1) && sp->anchor == htonl(-1) && chksum_flag &&
248 	    ntohl(sp->rule) <
249 	    pf_main_ruleset.rules[PF_RULESET_FILTER].active.rcount)
250 		r = pf_main_ruleset.rules[
251 		    PF_RULESET_FILTER].active.ptr_array[ntohl(sp->rule)];
252 	else
253 		r = &pf_default_rule;
254 
255 	if (!r->max_states || r->states < r->max_states)
256 		st = pool_get(&pf_state_pl, PR_NOWAIT);
257 	if (st == NULL) {
258 		pfi_kif_unref(kif, PFI_KIF_REF_NONE);
259 		return (ENOMEM);
260 	}
261 	bzero(st, sizeof(*st));
262 
263 	if ((sk = pf_alloc_state_key(st)) == NULL) {
264 		pool_put(&pf_state_pl, st);
265 		pfi_kif_unref(kif, PFI_KIF_REF_NONE);
266 		return (ENOMEM);
267 	}
268 
269 	/* allocate memory for scrub info */
270 	if (pfsync_alloc_scrub_memory(&sp->src, &st->src) ||
271 	    pfsync_alloc_scrub_memory(&sp->dst, &st->dst)) {
272 		pfi_kif_unref(kif, PFI_KIF_REF_NONE);
273 		if (st->src.scrub)
274 			pool_put(&pf_state_scrub_pl, st->src.scrub);
275 		pool_put(&pf_state_pl, st);
276 		pool_put(&pf_state_key_pl, sk);
277 		return (ENOMEM);
278 	}
279 
280 	st->rule.ptr = r;
281 	/* XXX get pointers to nat_rule and anchor */
282 
283 	/* XXX when we have nat_rule/anchors, use STATE_INC_COUNTERS */
284 	r->states++;
285 
286 	/* fill in the rest of the state entry */
287 	pf_state_host_ntoh(&sp->lan, &sk->lan);
288 	pf_state_host_ntoh(&sp->gwy, &sk->gwy);
289 	pf_state_host_ntoh(&sp->ext, &sk->ext);
290 
291 	pf_state_peer_ntoh(&sp->src, &st->src);
292 	pf_state_peer_ntoh(&sp->dst, &st->dst);
293 
294 	bcopy(&sp->rt_addr, &st->rt_addr, sizeof(st->rt_addr));
295 	st->creation = time_second - ntohl(sp->creation);
296 	st->expire = ntohl(sp->expire) + time_second;
297 
298 	sk->af = sp->af;
299 	sk->proto = sp->proto;
300 	sk->direction = sp->direction;
301 	st->log = sp->log;
302 	st->timeout = sp->timeout;
303 	st->allow_opts = sp->allow_opts;
304 
305 	bcopy(sp->id, &st->id, sizeof(st->id));
306 	st->creatorid = sp->creatorid;
307 	st->sync_flags = PFSTATE_FROMSYNC;
308 
309 	if (pf_insert_state(kif, st)) {
310 		pfi_kif_unref(kif, PFI_KIF_REF_NONE);
311 		/* XXX when we have nat_rule/anchors, use STATE_DEC_COUNTERS */
312 		r->states--;
313 		if (st->dst.scrub)
314 			pool_put(&pf_state_scrub_pl, st->dst.scrub);
315 		if (st->src.scrub)
316 			pool_put(&pf_state_scrub_pl, st->src.scrub);
317 		pool_put(&pf_state_pl, st);
318 		return (EINVAL);
319 	}
320 
321 	return (0);
322 }
323 
324 void
325 pfsync_input(struct mbuf *m, ...)
326 {
327 	struct ip *ip = mtod(m, struct ip *);
328 	struct pfsync_header *ph;
329 	struct pfsync_softc *sc = pfsyncif;
330 	struct pf_state *st;
331 	struct pf_state_key *sk;
332 	struct pf_state_cmp id_key;
333 	struct pfsync_state *sp;
334 	struct pfsync_state_upd *up;
335 	struct pfsync_state_del *dp;
336 	struct pfsync_state_clr *cp;
337 	struct pfsync_state_upd_req *rup;
338 	struct pfsync_state_bus *bus;
339 #ifdef IPSEC
340 	struct pfsync_tdb *pt;
341 #endif
342 	struct in_addr src;
343 	struct mbuf *mp;
344 	int iplen, action, error, i, count, offp, sfail, stale = 0;
345 	u_int8_t chksum_flag = 0;
346 
347 	/* This function is not yet called from anywhere */
348 	/* Still we assume for safety that pf_token must be held */
349 	ASSERT_LWKT_TOKEN_HELD(&pf_token);
350 
351 	pfsyncstats.pfsyncs_ipackets++;
352 
353 	/* verify that we have a sync interface configured */
354 	if (!sc || !sc->sc_sync_ifp || !pf_status.running)
355 		goto done;
356 
357 	/* verify that the packet came in on the right interface */
358 	if (sc->sc_sync_ifp != m->m_pkthdr.rcvif) {
359 		pfsyncstats.pfsyncs_badif++;
360 		goto done;
361 	}
362 
363 	/* verify that the IP TTL is 255.  */
364 	if (ip->ip_ttl != PFSYNC_DFLTTL) {
365 		pfsyncstats.pfsyncs_badttl++;
366 		goto done;
367 	}
368 
369 	iplen = ip->ip_hl << 2;
370 
371 	if (m->m_pkthdr.len < iplen + sizeof(*ph)) {
372 		pfsyncstats.pfsyncs_hdrops++;
373 		goto done;
374 	}
375 
376 	if (iplen + sizeof(*ph) > m->m_len) {
377 		if ((m = m_pullup(m, iplen + sizeof(*ph))) == NULL) {
378 			pfsyncstats.pfsyncs_hdrops++;
379 			goto done;
380 		}
381 		ip = mtod(m, struct ip *);
382 	}
383 	ph = (struct pfsync_header *)((char *)ip + iplen);
384 
385 	/* verify the version */
386 	if (ph->version != PFSYNC_VERSION) {
387 		pfsyncstats.pfsyncs_badver++;
388 		goto done;
389 	}
390 
391 	action = ph->action;
392 	count = ph->count;
393 
394 	/* make sure it's a valid action code */
395 	if (action >= PFSYNC_ACT_MAX) {
396 		pfsyncstats.pfsyncs_badact++;
397 		goto done;
398 	}
399 
400 	/* Cheaper to grab this now than having to mess with mbufs later */
401 	src = ip->ip_src;
402 
403 	if (!bcmp(&ph->pf_chksum, &pf_status.pf_chksum, PF_MD5_DIGEST_LENGTH))
404 		chksum_flag++;
405 
406 	switch (action) {
407 	case PFSYNC_ACT_CLR: {
408 		struct pf_state *nexts;
409 		struct pf_state_key *nextsk;
410 		struct pfi_kif *kif;
411 		u_int32_t creatorid;
412 		if ((mp = m_pulldown(m, iplen + sizeof(*ph),
413 		    sizeof(*cp), &offp)) == NULL) {
414 			pfsyncstats.pfsyncs_badlen++;
415 			return;
416 		}
417 		cp = (struct pfsync_state_clr *)(mp->m_data + offp);
418 		creatorid = cp->creatorid;
419 
420 		crit_enter();
421 		if (cp->ifname[0] == '\0') {
422 			for (st = RB_MIN(pf_state_tree_id, &tree_id);
423 			    st; st = nexts) {
424 				nexts = RB_NEXT(pf_state_tree_id, &tree_id, st);
425 				if (st->creatorid == creatorid) {
426 					st->sync_flags |= PFSTATE_FROMSYNC;
427 					pf_unlink_state(st);
428 				}
429 			}
430 		} else {
431 			if ((kif = pfi_kif_get(cp->ifname)) == NULL) {
432 				crit_exit();
433 				return;
434 			}
435 			for (sk = RB_MIN(pf_state_tree_lan_ext,
436 			    &pf_statetbl_lan_ext); sk; sk = nextsk) {
437 				nextsk = RB_NEXT(pf_state_tree_lan_ext,
438 				    &pf_statetbl_lan_ext, sk);
439 				TAILQ_FOREACH(st, &sk->states, next) {
440 					if (st->creatorid == creatorid) {
441 						st->sync_flags |=
442 						    PFSTATE_FROMSYNC;
443 						pf_unlink_state(st);
444 					}
445 				}
446 			}
447 		}
448 		crit_exit();
449 
450 		break;
451 	}
452 	case PFSYNC_ACT_INS:
453 		if ((mp = m_pulldown(m, iplen + sizeof(*ph),
454 		    count * sizeof(*sp), &offp)) == NULL) {
455 			pfsyncstats.pfsyncs_badlen++;
456 			return;
457 		}
458 
459 		crit_enter();
460 		for (i = 0, sp = (struct pfsync_state *)(mp->m_data + offp);
461 		    i < count; i++, sp++) {
462 			/* check for invalid values */
463 			if (sp->timeout >= PFTM_MAX ||
464 			    sp->src.state > PF_TCPS_PROXY_DST ||
465 			    sp->dst.state > PF_TCPS_PROXY_DST ||
466 			    sp->direction > PF_OUT ||
467 			    (sp->af != AF_INET && sp->af != AF_INET6)) {
468 				if (pf_status.debug >= PF_DEBUG_MISC)
469 					kprintf("pfsync_insert: PFSYNC_ACT_INS: "
470 					    "invalid value\n");
471 				pfsyncstats.pfsyncs_badstate++;
472 				continue;
473 			}
474 
475 			if ((error = pfsync_insert_net_state(sp,
476 			    chksum_flag))) {
477 				if (error == ENOMEM) {
478 					crit_exit();
479 					goto done;
480 				}
481 				continue;
482 			}
483 		}
484 		crit_exit();
485 		break;
486 	case PFSYNC_ACT_UPD:
487 		if ((mp = m_pulldown(m, iplen + sizeof(*ph),
488 		    count * sizeof(*sp), &offp)) == NULL) {
489 			pfsyncstats.pfsyncs_badlen++;
490 			return;
491 		}
492 
493 		crit_enter();
494 		for (i = 0, sp = (struct pfsync_state *)(mp->m_data + offp);
495 		    i < count; i++, sp++) {
496 			int flags = PFSYNC_FLAG_STALE;
497 
498 			/* check for invalid values */
499 			if (sp->timeout >= PFTM_MAX ||
500 			    sp->src.state > PF_TCPS_PROXY_DST ||
501 			    sp->dst.state > PF_TCPS_PROXY_DST) {
502 				if (pf_status.debug >= PF_DEBUG_MISC)
503 					kprintf("pfsync_insert: PFSYNC_ACT_UPD: "
504 					    "invalid value\n");
505 				pfsyncstats.pfsyncs_badstate++;
506 				continue;
507 			}
508 
509 			bcopy(sp->id, &id_key.id, sizeof(id_key.id));
510 			id_key.creatorid = sp->creatorid;
511 
512 			st = pf_find_state_byid(&id_key);
513 			if (st == NULL) {
514 				/* insert the update */
515 				if (pfsync_insert_net_state(sp, chksum_flag))
516 					pfsyncstats.pfsyncs_badstate++;
517 				continue;
518 			}
519 			sk = st->state_key;
520 			sfail = 0;
521 			if (sk->proto == IPPROTO_TCP) {
522 				/*
523 				 * The state should never go backwards except
524 				 * for syn-proxy states.  Neither should the
525 				 * sequence window slide backwards.
526 				 */
527 				if (st->src.state > sp->src.state &&
528 				    (st->src.state < PF_TCPS_PROXY_SRC ||
529 				    sp->src.state >= PF_TCPS_PROXY_SRC))
530 					sfail = 1;
531 				else if (SEQ_GT(st->src.seqlo,
532 				    ntohl(sp->src.seqlo)))
533 					sfail = 3;
534 				else if (st->dst.state > sp->dst.state) {
535 					/* There might still be useful
536 					 * information about the src state here,
537 					 * so import that part of the update,
538 					 * then "fail" so we send the updated
539 					 * state back to the peer who is missing
540 					 * our what we know. */
541 					pf_state_peer_ntoh(&sp->src, &st->src);
542 					/* XXX do anything with timeouts? */
543 					sfail = 7;
544 					flags = 0;
545 				} else if (st->dst.state >= TCPS_SYN_SENT &&
546 				    SEQ_GT(st->dst.seqlo, ntohl(sp->dst.seqlo)))
547 					sfail = 4;
548 			} else {
549 				/*
550 				 * Non-TCP protocol state machine always go
551 				 * forwards
552 				 */
553 				if (st->src.state > sp->src.state)
554 					sfail = 5;
555 				else if (st->dst.state > sp->dst.state)
556 					sfail = 6;
557 			}
558 			if (sfail) {
559 				if (pf_status.debug >= PF_DEBUG_MISC)
560 					kprintf("pfsync: %s stale update "
561 					    "(%d) id: %016jx "
562 					    "creatorid: %08x\n",
563 					    (sfail < 7 ?  "ignoring"
564 					     : "partial"), sfail,
565 					    (uintmax_t)be64toh(st->id),
566 					    ntohl(st->creatorid));
567 				pfsyncstats.pfsyncs_badstate++;
568 
569 				if (!(sp->sync_flags & PFSTATE_STALE)) {
570 					/* we have a better state, send it */
571 					if (sc->sc_mbuf != NULL && !stale)
572 						pfsync_sendout(sc);
573 					stale++;
574 					if (!st->sync_flags)
575 						pfsync_pack_state(
576 						    PFSYNC_ACT_UPD, st, flags);
577 				}
578 				continue;
579 			}
580 	    		pfsync_alloc_scrub_memory(&sp->dst, &st->dst);
581 			pf_state_peer_ntoh(&sp->src, &st->src);
582 			pf_state_peer_ntoh(&sp->dst, &st->dst);
583 			st->expire = ntohl(sp->expire) + time_second;
584 			st->timeout = sp->timeout;
585 		}
586 		if (stale && sc->sc_mbuf != NULL)
587 			pfsync_sendout(sc);
588 		crit_exit();
589 		break;
590 	/*
591 	 * It's not strictly necessary for us to support the "uncompressed"
592 	 * delete action, but it's relatively simple and maintains consistency.
593 	 */
594 	case PFSYNC_ACT_DEL:
595 		if ((mp = m_pulldown(m, iplen + sizeof(*ph),
596 		    count * sizeof(*sp), &offp)) == NULL) {
597 			pfsyncstats.pfsyncs_badlen++;
598 			return;
599 		}
600 
601 		crit_enter();
602 		for (i = 0, sp = (struct pfsync_state *)(mp->m_data + offp);
603 		    i < count; i++, sp++) {
604 			bcopy(sp->id, &id_key.id, sizeof(id_key.id));
605 			id_key.creatorid = sp->creatorid;
606 
607 			st = pf_find_state_byid(&id_key);
608 			if (st == NULL) {
609 				pfsyncstats.pfsyncs_badstate++;
610 				continue;
611 			}
612 			st->sync_flags |= PFSTATE_FROMSYNC;
613 			pf_unlink_state(st);
614 		}
615 		crit_exit();
616 		break;
617 	case PFSYNC_ACT_UPD_C: {
618 		int update_requested = 0;
619 
620 		if ((mp = m_pulldown(m, iplen + sizeof(*ph),
621 		    count * sizeof(*up), &offp)) == NULL) {
622 			pfsyncstats.pfsyncs_badlen++;
623 			return;
624 		}
625 
626 		crit_enter();
627 		for (i = 0, up = (struct pfsync_state_upd *)(mp->m_data + offp);
628 		    i < count; i++, up++) {
629 			/* check for invalid values */
630 			if (up->timeout >= PFTM_MAX ||
631 			    up->src.state > PF_TCPS_PROXY_DST ||
632 			    up->dst.state > PF_TCPS_PROXY_DST) {
633 				if (pf_status.debug >= PF_DEBUG_MISC)
634 					kprintf("pfsync_insert: "
635 					    "PFSYNC_ACT_UPD_C: "
636 					    "invalid value\n");
637 				pfsyncstats.pfsyncs_badstate++;
638 				continue;
639 			}
640 
641 			bcopy(up->id, &id_key.id, sizeof(id_key.id));
642 			id_key.creatorid = up->creatorid;
643 
644 			st = pf_find_state_byid(&id_key);
645 			if (st == NULL) {
646 				/* We don't have this state. Ask for it. */
647 				error = pfsync_request_update(up, &src);
648 				if (error == ENOMEM) {
649 					crit_exit();
650 					goto done;
651 				}
652 				update_requested = 1;
653 				pfsyncstats.pfsyncs_badstate++;
654 				continue;
655 			}
656 			sk = st->state_key;
657 			sfail = 0;
658 			if (sk->proto == IPPROTO_TCP) {
659 				/*
660 				 * The state should never go backwards except
661 				 * for syn-proxy states.  Neither should the
662 				 * sequence window slide backwards.
663 				 */
664 				if (st->src.state > up->src.state &&
665 				    (st->src.state < PF_TCPS_PROXY_SRC ||
666 				    up->src.state >= PF_TCPS_PROXY_SRC))
667 					sfail = 1;
668 				else if (st->dst.state > up->dst.state)
669 					sfail = 2;
670 				else if (SEQ_GT(st->src.seqlo,
671 				    ntohl(up->src.seqlo)))
672 					sfail = 3;
673 				else if (st->dst.state >= TCPS_SYN_SENT &&
674 				    SEQ_GT(st->dst.seqlo, ntohl(up->dst.seqlo)))
675 					sfail = 4;
676 			} else {
677 				/*
678 				 * Non-TCP protocol state machine always go
679 				 * forwards
680 				 */
681 				if (st->src.state > up->src.state)
682 					sfail = 5;
683 				else if (st->dst.state > up->dst.state)
684 					sfail = 6;
685 			}
686 			if (sfail) {
687 				if (pf_status.debug >= PF_DEBUG_MISC)
688 					kprintf("pfsync: ignoring stale update "
689 					    "(%d) id: %016llx "
690 					    "creatorid: %08x\n", sfail,
691 					    be64toh(st->id),
692 					    ntohl(st->creatorid));
693 				pfsyncstats.pfsyncs_badstate++;
694 
695 				/* we have a better state, send it out */
696 				if ((!stale || update_requested) &&
697 				    sc->sc_mbuf != NULL) {
698 					pfsync_sendout(sc);
699 					update_requested = 0;
700 				}
701 				stale++;
702 				if (!st->sync_flags)
703 					pfsync_pack_state(PFSYNC_ACT_UPD, st,
704 					    PFSYNC_FLAG_STALE);
705 				continue;
706 			}
707 	    		pfsync_alloc_scrub_memory(&up->dst, &st->dst);
708 			pf_state_peer_ntoh(&up->src, &st->src);
709 			pf_state_peer_ntoh(&up->dst, &st->dst);
710 			st->expire = ntohl(up->expire) + time_second;
711 			st->timeout = up->timeout;
712 		}
713 		if ((update_requested || stale) && sc->sc_mbuf)
714 			pfsync_sendout(sc);
715 		crit_exit();
716 		break;
717 	}
718 	case PFSYNC_ACT_DEL_C:
719 		if ((mp = m_pulldown(m, iplen + sizeof(*ph),
720 		    count * sizeof(*dp), &offp)) == NULL) {
721 			pfsyncstats.pfsyncs_badlen++;
722 			return;
723 		}
724 
725 		crit_enter();
726 		for (i = 0, dp = (struct pfsync_state_del *)(mp->m_data + offp);
727 		    i < count; i++, dp++) {
728 			bcopy(dp->id, &id_key.id, sizeof(id_key.id));
729 			id_key.creatorid = dp->creatorid;
730 
731 			st = pf_find_state_byid(&id_key);
732 			if (st == NULL) {
733 				pfsyncstats.pfsyncs_badstate++;
734 				continue;
735 			}
736 			st->sync_flags |= PFSTATE_FROMSYNC;
737 			pf_unlink_state(st);
738 		}
739 		crit_exit();
740 		break;
741 	case PFSYNC_ACT_INS_F:
742 	case PFSYNC_ACT_DEL_F:
743 		/* not implemented */
744 		break;
745 	case PFSYNC_ACT_UREQ:
746 		if ((mp = m_pulldown(m, iplen + sizeof(*ph),
747 		    count * sizeof(*rup), &offp)) == NULL) {
748 			pfsyncstats.pfsyncs_badlen++;
749 			return;
750 		}
751 
752 		crit_enter();
753 		if (sc->sc_mbuf != NULL)
754 			pfsync_sendout(sc);
755 		for (i = 0,
756 		    rup = (struct pfsync_state_upd_req *)(mp->m_data + offp);
757 		    i < count; i++, rup++) {
758 			bcopy(rup->id, &id_key.id, sizeof(id_key.id));
759 			id_key.creatorid = rup->creatorid;
760 
761 			if (id_key.id == 0 && id_key.creatorid == 0) {
762 				sc->sc_ureq_received = mycpu->gd_time_seconds;
763 				if (sc->sc_bulk_send_next == NULL)
764 					sc->sc_bulk_send_next =
765 					    TAILQ_FIRST(&state_list);
766 				sc->sc_bulk_terminator = sc->sc_bulk_send_next;
767 				if (pf_status.debug >= PF_DEBUG_MISC)
768 					kprintf("pfsync: received "
769 					    "bulk update request\n");
770 				pfsync_send_bus(sc, PFSYNC_BUS_START);
771 				lwkt_reltoken(&pf_token);
772 				callout_reset(&sc->sc_bulk_tmo, 1 * hz,
773 				    pfsync_bulk_update,
774 				    LIST_FIRST(&pfsync_list));
775 				lwkt_gettoken(&pf_token);
776 			} else {
777 				st = pf_find_state_byid(&id_key);
778 				if (st == NULL) {
779 					pfsyncstats.pfsyncs_badstate++;
780 					continue;
781 				}
782 				if (!st->sync_flags)
783 					pfsync_pack_state(PFSYNC_ACT_UPD,
784 					    st, 0);
785 			}
786 		}
787 		if (sc->sc_mbuf != NULL)
788 			pfsync_sendout(sc);
789 		crit_exit();
790 		break;
791 	case PFSYNC_ACT_BUS:
792 		/* If we're not waiting for a bulk update, who cares. */
793 		if (sc->sc_ureq_sent == 0)
794 			break;
795 
796 		if ((mp = m_pulldown(m, iplen + sizeof(*ph),
797 		    sizeof(*bus), &offp)) == NULL) {
798 			pfsyncstats.pfsyncs_badlen++;
799 			return;
800 		}
801 		bus = (struct pfsync_state_bus *)(mp->m_data + offp);
802 		switch (bus->status) {
803 		case PFSYNC_BUS_START:
804 			lwkt_reltoken(&pf_token);
805 			callout_reset(&sc->sc_bulkfail_tmo,
806 			    pf_pool_limits[PF_LIMIT_STATES].limit /
807 			    (PFSYNC_BULKPACKETS * sc->sc_maxcount),
808 			    pfsync_bulkfail, LIST_FIRST(&pfsync_list));
809 			lwkt_gettoken(&pf_token);
810 			if (pf_status.debug >= PF_DEBUG_MISC)
811 				kprintf("pfsync: received bulk "
812 				    "update start\n");
813 			break;
814 		case PFSYNC_BUS_END:
815 			if (mycpu->gd_time_seconds - ntohl(bus->endtime) >=
816 			    sc->sc_ureq_sent) {
817 				/* that's it, we're happy */
818 				sc->sc_ureq_sent = 0;
819 				sc->sc_bulk_tries = 0;
820 				lwkt_reltoken(&pf_token);
821 				callout_stop(&sc->sc_bulkfail_tmo);
822 				lwkt_gettoken(&pf_token);
823 #if NCARP > 0
824 				if (!pfsync_sync_ok) {
825 					lwkt_reltoken(&pf_token);
826 					carp_group_demote_adj(&sc->sc_if, -1);
827 					lwkt_gettoken(&pf_token);
828 				}
829 #endif
830 				pfsync_sync_ok = 1;
831 				if (pf_status.debug >= PF_DEBUG_MISC)
832 					kprintf("pfsync: received valid "
833 					    "bulk update end\n");
834 			} else {
835 				if (pf_status.debug >= PF_DEBUG_MISC)
836 					kprintf("pfsync: received invalid "
837 					    "bulk update end: bad timestamp\n");
838 			}
839 			break;
840 		}
841 		break;
842 #ifdef IPSEC
843 	case PFSYNC_ACT_TDB_UPD:
844 		if ((mp = m_pulldown(m, iplen + sizeof(*ph),
845 		    count * sizeof(*pt), &offp)) == NULL) {
846 			pfsyncstats.pfsyncs_badlen++;
847 			return;
848 		}
849 		crit_enter();
850 		for (i = 0, pt = (struct pfsync_tdb *)(mp->m_data + offp);
851 		    i < count; i++, pt++)
852 			pfsync_update_net_tdb(pt);
853 		crit_exit();
854 		break;
855 #endif
856 	}
857 
858 done:
859 	if (m)
860 		m_freem(m);
861 }
862 
863 int
864 pfsyncoutput(struct ifnet *ifp, struct mbuf *m, struct sockaddr *dst,
865 	struct rtentry *rt)
866 {
867 	m_freem(m);
868 	return (0);
869 }
870 
871 /* ARGSUSED */
872 int
873 pfsyncioctl(struct ifnet *ifp, u_long cmd, caddr_t data, struct ucred *cr)
874 {
875 	struct pfsync_softc *sc = ifp->if_softc;
876 	struct ifreq *ifr = (struct ifreq *)data;
877 	struct ip_moptions *imo = &sc->sc_imo;
878 	struct pfsyncreq pfsyncr;
879 	struct ifnet    *sifp;
880 	int error;
881 
882 	lwkt_gettoken(&pf_token);
883 
884 	switch (cmd) {
885 	case SIOCSIFADDR:
886 	case SIOCAIFADDR:
887 	case SIOCSIFDSTADDR:
888 	case SIOCSIFFLAGS:
889 		if (ifp->if_flags & IFF_UP)
890 			ifp->if_flags |= IFF_RUNNING;
891 		else
892 			ifp->if_flags &= ~IFF_RUNNING;
893 		break;
894 	case SIOCSIFMTU:
895 		if (ifr->ifr_mtu < PFSYNC_MINMTU) {
896 			lwkt_reltoken(&pf_token);
897 			return (EINVAL);
898 		}
899 		if (ifr->ifr_mtu > MCLBYTES)
900 			ifr->ifr_mtu = MCLBYTES;
901 		crit_enter();
902 		if (ifr->ifr_mtu < ifp->if_mtu)
903 			pfsync_sendout(sc);
904 		pfsync_setmtu(sc, ifr->ifr_mtu);
905 		crit_exit();
906 		break;
907 	case SIOCGETPFSYNC:
908 		bzero(&pfsyncr, sizeof(pfsyncr));
909 		if (sc->sc_sync_ifp)
910 			strlcpy(pfsyncr.pfsyncr_syncdev,
911 			    sc->sc_sync_ifp->if_xname, IFNAMSIZ);
912 		pfsyncr.pfsyncr_syncpeer = sc->sc_sync_peer;
913 		pfsyncr.pfsyncr_maxupdates = sc->sc_maxupdates;
914 		lwkt_reltoken(&pf_token);
915 		if ((error = copyout(&pfsyncr, ifr->ifr_data, sizeof(pfsyncr))))
916 			return (error);
917 		lwkt_gettoken(&pf_token);
918 		break;
919 	case SIOCSETPFSYNC:
920 		if ((error = priv_check_cred(cr, PRIV_ROOT, NULL_CRED_OKAY)) != 0) {
921 			lwkt_reltoken(&pf_token);
922 			return (error);
923 		}
924 		if ((error = copyin(ifr->ifr_data, &pfsyncr, sizeof(pfsyncr)))) {
925 			lwkt_reltoken(&pf_token);
926 			return (error);
927 		}
928 
929 		if (pfsyncr.pfsyncr_syncpeer.s_addr == 0)
930 			sc->sc_sync_peer.s_addr = INADDR_PFSYNC_GROUP;
931 		else
932 			sc->sc_sync_peer.s_addr =
933 			    pfsyncr.pfsyncr_syncpeer.s_addr;
934 
935 		if (pfsyncr.pfsyncr_maxupdates > 255) {
936 			lwkt_reltoken(&pf_token);
937 			return (EINVAL);
938 		}
939 		sc->sc_maxupdates = pfsyncr.pfsyncr_maxupdates;
940 
941 		if (pfsyncr.pfsyncr_syncdev[0] == 0) {
942 			sc->sc_sync_ifp = NULL;
943 			if (sc->sc_mbuf_net != NULL) {
944 				/* Don't keep stale pfsync packets around. */
945 				crit_enter();
946 				m_freem(sc->sc_mbuf_net);
947 				sc->sc_mbuf_net = NULL;
948 				sc->sc_statep_net.s = NULL;
949 				crit_exit();
950 			}
951 			if (imo->imo_num_memberships > 0) {
952 				in_delmulti(imo->imo_membership[--imo->imo_num_memberships]);
953 				imo->imo_multicast_ifp = NULL;
954 			}
955 			break;
956 		}
957 
958 		if ((sifp = ifunit(pfsyncr.pfsyncr_syncdev)) == NULL) {
959 			lwkt_reltoken(&pf_token);
960 			return (EINVAL);
961 		}
962 
963 		crit_enter();
964 		if (sifp->if_mtu < sc->sc_if.if_mtu ||
965 		    (sc->sc_sync_ifp != NULL &&
966 		    sifp->if_mtu < sc->sc_sync_ifp->if_mtu) ||
967 		    sifp->if_mtu < MCLBYTES - sizeof(struct ip))
968 			pfsync_sendout(sc);
969 		sc->sc_sync_ifp = sifp;
970 
971 		pfsync_setmtu(sc, sc->sc_if.if_mtu);
972 
973 		if (imo->imo_num_memberships > 0) {
974 			in_delmulti(imo->imo_membership[--imo->imo_num_memberships]);
975 			imo->imo_multicast_ifp = NULL;
976 		}
977 
978 		if (sc->sc_sync_ifp &&
979 		    sc->sc_sync_peer.s_addr == INADDR_PFSYNC_GROUP) {
980 			struct in_addr addr;
981 
982 			if (!(sc->sc_sync_ifp->if_flags & IFF_MULTICAST)) {
983 				sc->sc_sync_ifp = NULL;
984 				lwkt_reltoken(&pf_token);
985 				crit_exit();
986 				return (EADDRNOTAVAIL);
987 			}
988 
989 			addr.s_addr = INADDR_PFSYNC_GROUP;
990 
991 			if ((imo->imo_membership[0] =
992 			    in_addmulti(&addr, sc->sc_sync_ifp)) == NULL) {
993 				sc->sc_sync_ifp = NULL;
994 				lwkt_reltoken(&pf_token);
995 				crit_exit();
996 				return (ENOBUFS);
997 			}
998 			imo->imo_num_memberships++;
999 			imo->imo_multicast_ifp = sc->sc_sync_ifp;
1000 			imo->imo_multicast_ttl = PFSYNC_DFLTTL;
1001 			imo->imo_multicast_loop = 0;
1002 		}
1003 
1004 		if (sc->sc_sync_ifp ||
1005 		    sc->sc_sendaddr.s_addr != INADDR_PFSYNC_GROUP) {
1006 			/* Request a full state table update. */
1007 			sc->sc_ureq_sent = mycpu->gd_time_seconds;
1008 #if NCARP > 0
1009 			if (pfsync_sync_ok)
1010 				carp_group_demote_adj(&sc->sc_if, 1);
1011 #endif
1012 			pfsync_sync_ok = 0;
1013 			if (pf_status.debug >= PF_DEBUG_MISC)
1014 				kprintf("pfsync: requesting bulk update\n");
1015 			lwkt_reltoken(&pf_token);
1016 			callout_reset(&sc->sc_bulkfail_tmo, 5 * hz,
1017 			    pfsync_bulkfail, LIST_FIRST(&pfsync_list));
1018 			lwkt_gettoken(&pf_token);
1019 			error = pfsync_request_update(NULL, NULL);
1020 			if (error == ENOMEM) {
1021 				lwkt_reltoken(&pf_token);
1022 				crit_exit();
1023 				return (ENOMEM);
1024 			}
1025 			pfsync_sendout(sc);
1026 		}
1027 		crit_exit();
1028 
1029 		break;
1030 
1031 	default:
1032 		lwkt_reltoken(&pf_token);
1033 		return (ENOTTY);
1034 	}
1035 
1036 	lwkt_reltoken(&pf_token);
1037 	return (0);
1038 }
1039 
1040 void
1041 pfsync_setmtu(struct pfsync_softc *sc, int mtu_req)
1042 {
1043 	int mtu;
1044 
1045 	if (sc->sc_sync_ifp && sc->sc_sync_ifp->if_mtu < mtu_req)
1046 		mtu = sc->sc_sync_ifp->if_mtu;
1047 	else
1048 		mtu = mtu_req;
1049 
1050 	sc->sc_maxcount = (mtu - sizeof(struct pfsync_header)) /
1051 	    sizeof(struct pfsync_state);
1052 	if (sc->sc_maxcount > 254)
1053 	    sc->sc_maxcount = 254;
1054 	sc->sc_if.if_mtu = sizeof(struct pfsync_header) +
1055 	    sc->sc_maxcount * sizeof(struct pfsync_state);
1056 }
1057 
1058 struct mbuf *
1059 pfsync_get_mbuf(struct pfsync_softc *sc, u_int8_t action, void **sp)
1060 {
1061 	struct pfsync_header *h;
1062 	struct mbuf *m;
1063 	int len;
1064 
1065 	ASSERT_LWKT_TOKEN_HELD(&pf_token);
1066 
1067 	MGETHDR(m, M_WAITOK, MT_DATA);
1068 	if (m == NULL) {
1069 		sc->sc_if.if_oerrors++;
1070 		return (NULL);
1071 	}
1072 
1073 	switch (action) {
1074 	case PFSYNC_ACT_CLR:
1075 		len = sizeof(struct pfsync_header) +
1076 		    sizeof(struct pfsync_state_clr);
1077 		break;
1078 	case PFSYNC_ACT_UPD_C:
1079 		len = (sc->sc_maxcount * sizeof(struct pfsync_state_upd)) +
1080 		    sizeof(struct pfsync_header);
1081 		break;
1082 	case PFSYNC_ACT_DEL_C:
1083 		len = (sc->sc_maxcount * sizeof(struct pfsync_state_del)) +
1084 		    sizeof(struct pfsync_header);
1085 		break;
1086 	case PFSYNC_ACT_UREQ:
1087 		len = (sc->sc_maxcount * sizeof(struct pfsync_state_upd_req)) +
1088 		    sizeof(struct pfsync_header);
1089 		break;
1090 	case PFSYNC_ACT_BUS:
1091 		len = sizeof(struct pfsync_header) +
1092 		    sizeof(struct pfsync_state_bus);
1093 		break;
1094 	case PFSYNC_ACT_TDB_UPD:
1095 		len = (sc->sc_maxcount * sizeof(struct pfsync_tdb)) +
1096 		    sizeof(struct pfsync_header);
1097 		break;
1098 	default:
1099 		len = (sc->sc_maxcount * sizeof(struct pfsync_state)) +
1100 		    sizeof(struct pfsync_header);
1101 		break;
1102 	}
1103 
1104 	if (len > MHLEN) {
1105 		MCLGET(m, M_WAITOK);
1106 		if ((m->m_flags & M_EXT) == 0) {
1107 			m_free(m);
1108 			sc->sc_if.if_oerrors++;
1109 			return (NULL);
1110 		}
1111 		m->m_data += (MCLBYTES - len) &~ (sizeof(long) - 1);
1112 	} else
1113 		MH_ALIGN(m, len);
1114 
1115 	m->m_pkthdr.rcvif = NULL;
1116 	m->m_pkthdr.len = m->m_len = sizeof(struct pfsync_header);
1117 	h = mtod(m, struct pfsync_header *);
1118 	h->version = PFSYNC_VERSION;
1119 	h->af = 0;
1120 	h->count = 0;
1121 	h->action = action;
1122 
1123 	*sp = (void *)((char *)h + PFSYNC_HDRLEN);
1124 	lwkt_reltoken(&pf_token);
1125 	callout_reset(&sc->sc_tmo, hz, pfsync_timeout,
1126 	    LIST_FIRST(&pfsync_list));
1127 	lwkt_gettoken(&pf_token);
1128 	return (m);
1129 }
1130 
1131 int
1132 pfsync_pack_state(u_int8_t action, struct pf_state *st, int flags)
1133 {
1134 	struct ifnet *ifp = NULL;
1135 	struct pfsync_softc *sc = pfsyncif;
1136 	struct pfsync_header *h, *h_net;
1137 	struct pfsync_state *sp = NULL;
1138 	struct pfsync_state_upd *up = NULL;
1139 	struct pfsync_state_del *dp = NULL;
1140 	struct pf_state_key *sk = st->state_key;
1141 	struct pf_rule *r;
1142 	u_long secs;
1143 	int ret = 0;
1144 	u_int8_t i = 255, newaction = 0;
1145 
1146 	if (sc == NULL)
1147 		return (0);
1148 	ifp = &sc->sc_if;
1149 
1150 	/*
1151 	 * If a packet falls in the forest and there's nobody around to
1152 	 * hear, does it make a sound?
1153 	 */
1154 	if (ifp->if_bpf == NULL && sc->sc_sync_ifp == NULL &&
1155 	    sc->sc_sync_peer.s_addr == INADDR_PFSYNC_GROUP) {
1156 		/* Don't leave any stale pfsync packets hanging around. */
1157 		if (sc->sc_mbuf != NULL) {
1158 			m_freem(sc->sc_mbuf);
1159 			sc->sc_mbuf = NULL;
1160 			sc->sc_statep.s = NULL;
1161 		}
1162 		return (0);
1163 	}
1164 
1165 	if (action >= PFSYNC_ACT_MAX)
1166 		return (EINVAL);
1167 
1168 	crit_enter();
1169 	if (sc->sc_mbuf == NULL) {
1170 		if ((sc->sc_mbuf = pfsync_get_mbuf(sc, action,
1171 		    (void *)&sc->sc_statep.s)) == NULL) {
1172 			crit_exit();
1173 			return (ENOMEM);
1174 		}
1175 		h = mtod(sc->sc_mbuf, struct pfsync_header *);
1176 	} else {
1177 		h = mtod(sc->sc_mbuf, struct pfsync_header *);
1178 		if (h->action != action) {
1179 			pfsync_sendout(sc);
1180 			if ((sc->sc_mbuf = pfsync_get_mbuf(sc, action,
1181 			    (void *)&sc->sc_statep.s)) == NULL) {
1182 				crit_exit();
1183 				return (ENOMEM);
1184 			}
1185 			h = mtod(sc->sc_mbuf, struct pfsync_header *);
1186 		} else {
1187 			/*
1188 			 * If it's an update, look in the packet to see if
1189 			 * we already have an update for the state.
1190 			 */
1191 			if (action == PFSYNC_ACT_UPD && sc->sc_maxupdates) {
1192 				struct pfsync_state *usp =
1193 				    (void *)((char *)h + PFSYNC_HDRLEN);
1194 
1195 				for (i = 0; i < h->count; i++) {
1196 					if (!memcmp(usp->id, &st->id,
1197 					    PFSYNC_ID_LEN) &&
1198 					    usp->creatorid == st->creatorid) {
1199 						sp = usp;
1200 						sp->updates++;
1201 						break;
1202 					}
1203 					usp++;
1204 				}
1205 			}
1206 		}
1207 	}
1208 
1209 	secs = time_second;
1210 
1211 	st->pfsync_time = mycpu->gd_time_seconds;;
1212 
1213 	if (sp == NULL) {
1214 		/* not a "duplicate" update */
1215 		i = 255;
1216 		sp = sc->sc_statep.s++;
1217 		sc->sc_mbuf->m_pkthdr.len =
1218 		    sc->sc_mbuf->m_len += sizeof(struct pfsync_state);
1219 		h->count++;
1220 		bzero(sp, sizeof(*sp));
1221 
1222 		bcopy(&st->id, sp->id, sizeof(sp->id));
1223 		sp->creatorid = st->creatorid;
1224 
1225 		strlcpy(sp->ifname, st->kif->pfik_name, sizeof(sp->ifname));
1226 		pf_state_host_hton(&sk->lan, &sp->lan);
1227 		pf_state_host_hton(&sk->gwy, &sp->gwy);
1228 		pf_state_host_hton(&sk->ext, &sp->ext);
1229 
1230 		bcopy(&st->rt_addr, &sp->rt_addr, sizeof(sp->rt_addr));
1231 
1232 		sp->creation = htonl(secs - st->creation);
1233 		pf_state_counter_hton(st->packets[0], sp->packets[0]);
1234 		pf_state_counter_hton(st->packets[1], sp->packets[1]);
1235 		pf_state_counter_hton(st->bytes[0], sp->bytes[0]);
1236 		pf_state_counter_hton(st->bytes[1], sp->bytes[1]);
1237 		if ((r = st->rule.ptr) == NULL)
1238 			sp->rule = htonl(-1);
1239 		else
1240 			sp->rule = htonl(r->nr);
1241 		if ((r = st->anchor.ptr) == NULL)
1242 			sp->anchor = htonl(-1);
1243 		else
1244 			sp->anchor = htonl(r->nr);
1245 		sp->af = sk->af;
1246 		sp->proto = sk->proto;
1247 		sp->direction = sk->direction;
1248 		sp->log = st->log;
1249 		sp->allow_opts = st->allow_opts;
1250 		sp->timeout = st->timeout;
1251 
1252 		if (flags & PFSYNC_FLAG_STALE)
1253 			sp->sync_flags |= PFSTATE_STALE;
1254 	}
1255 
1256 	pf_state_peer_hton(&st->src, &sp->src);
1257 	pf_state_peer_hton(&st->dst, &sp->dst);
1258 
1259 	if (st->expire <= secs)
1260 		sp->expire = htonl(0);
1261 	else
1262 		sp->expire = htonl(st->expire - secs);
1263 
1264 	/* do we need to build "compressed" actions for network transfer? */
1265 	if (sc->sc_sync_ifp && flags & PFSYNC_FLAG_COMPRESS) {
1266 		switch (action) {
1267 		case PFSYNC_ACT_UPD:
1268 			newaction = PFSYNC_ACT_UPD_C;
1269 			break;
1270 		case PFSYNC_ACT_DEL:
1271 			newaction = PFSYNC_ACT_DEL_C;
1272 			break;
1273 		default:
1274 			/* by default we just send the uncompressed states */
1275 			break;
1276 		}
1277 	}
1278 
1279 	if (newaction) {
1280 		if (sc->sc_mbuf_net == NULL) {
1281 			if ((sc->sc_mbuf_net = pfsync_get_mbuf(sc, newaction,
1282 			    (void *)&sc->sc_statep_net.s)) == NULL) {
1283 				crit_exit();
1284 				return (ENOMEM);
1285 			}
1286 		}
1287 		h_net = mtod(sc->sc_mbuf_net, struct pfsync_header *);
1288 
1289 		switch (newaction) {
1290 		case PFSYNC_ACT_UPD_C:
1291 			if (i != 255) {
1292 				up = (void *)((char *)h_net +
1293 				    PFSYNC_HDRLEN + (i * sizeof(*up)));
1294 				up->updates++;
1295 			} else {
1296 				h_net->count++;
1297 				sc->sc_mbuf_net->m_pkthdr.len =
1298 				    sc->sc_mbuf_net->m_len += sizeof(*up);
1299 				up = sc->sc_statep_net.u++;
1300 
1301 				bzero(up, sizeof(*up));
1302 				bcopy(&st->id, up->id, sizeof(up->id));
1303 				up->creatorid = st->creatorid;
1304 			}
1305 			up->timeout = st->timeout;
1306 			up->expire = sp->expire;
1307 			up->src = sp->src;
1308 			up->dst = sp->dst;
1309 			break;
1310 		case PFSYNC_ACT_DEL_C:
1311 			sc->sc_mbuf_net->m_pkthdr.len =
1312 			    sc->sc_mbuf_net->m_len += sizeof(*dp);
1313 			dp = sc->sc_statep_net.d++;
1314 			h_net->count++;
1315 
1316 			bzero(dp, sizeof(*dp));
1317 			bcopy(&st->id, dp->id, sizeof(dp->id));
1318 			dp->creatorid = st->creatorid;
1319 			break;
1320 		}
1321 	}
1322 
1323 	if (h->count == sc->sc_maxcount ||
1324 	    (sc->sc_maxupdates && (sp->updates >= sc->sc_maxupdates)))
1325 		ret = pfsync_sendout(sc);
1326 
1327 	crit_exit();
1328 	return (ret);
1329 }
1330 
1331 int
1332 pfsync_request_update(struct pfsync_state_upd *up, struct in_addr *src)
1333 {
1334 	struct ifnet *ifp = NULL;
1335 	struct pfsync_header *h;
1336 	struct pfsync_softc *sc = pfsyncif;
1337 	struct pfsync_state_upd_req *rup;
1338 	int ret = 0;
1339 
1340 	if (sc == NULL)
1341 		return (0);
1342 
1343 	ifp = &sc->sc_if;
1344 	if (sc->sc_mbuf == NULL) {
1345 		if ((sc->sc_mbuf = pfsync_get_mbuf(sc, PFSYNC_ACT_UREQ,
1346 		    (void *)&sc->sc_statep.s)) == NULL)
1347 			return (ENOMEM);
1348 		h = mtod(sc->sc_mbuf, struct pfsync_header *);
1349 	} else {
1350 		h = mtod(sc->sc_mbuf, struct pfsync_header *);
1351 		if (h->action != PFSYNC_ACT_UREQ) {
1352 			pfsync_sendout(sc);
1353 			if ((sc->sc_mbuf = pfsync_get_mbuf(sc, PFSYNC_ACT_UREQ,
1354 			    (void *)&sc->sc_statep.s)) == NULL)
1355 				return (ENOMEM);
1356 			h = mtod(sc->sc_mbuf, struct pfsync_header *);
1357 		}
1358 	}
1359 
1360 	if (src != NULL)
1361 		sc->sc_sendaddr = *src;
1362 	sc->sc_mbuf->m_pkthdr.len = sc->sc_mbuf->m_len += sizeof(*rup);
1363 	h->count++;
1364 	rup = sc->sc_statep.r++;
1365 	bzero(rup, sizeof(*rup));
1366 	if (up != NULL) {
1367 		bcopy(up->id, rup->id, sizeof(rup->id));
1368 		rup->creatorid = up->creatorid;
1369 	}
1370 
1371 	if (h->count == sc->sc_maxcount)
1372 		ret = pfsync_sendout(sc);
1373 
1374 	return (ret);
1375 }
1376 
1377 int
1378 pfsync_clear_states(u_int32_t creatorid, char *ifname)
1379 {
1380 	struct ifnet *ifp = NULL;
1381 	struct pfsync_softc *sc = pfsyncif;
1382 	struct pfsync_state_clr *cp;
1383 	int ret;
1384 
1385 	if (sc == NULL)
1386 		return (0);
1387 
1388 	ifp = &sc->sc_if;
1389 	crit_enter();
1390 	if (sc->sc_mbuf != NULL)
1391 		pfsync_sendout(sc);
1392 	if ((sc->sc_mbuf = pfsync_get_mbuf(sc, PFSYNC_ACT_CLR,
1393 	    (void *)&sc->sc_statep.c)) == NULL) {
1394 		crit_exit();
1395 		return (ENOMEM);
1396 	}
1397 	sc->sc_mbuf->m_pkthdr.len = sc->sc_mbuf->m_len += sizeof(*cp);
1398 	cp = sc->sc_statep.c;
1399 	cp->creatorid = creatorid;
1400 	if (ifname != NULL)
1401 		strlcpy(cp->ifname, ifname, IFNAMSIZ);
1402 
1403 	ret = (pfsync_sendout(sc));
1404 	crit_exit();
1405 	return (ret);
1406 }
1407 
1408 void
1409 pfsync_timeout(void *v)
1410 {
1411 	struct pfsync_softc *sc = v;
1412 
1413 	crit_enter();
1414 	pfsync_sendout(sc);
1415 	crit_exit();
1416 }
1417 
1418 void
1419 pfsync_send_bus(struct pfsync_softc *sc, u_int8_t status)
1420 {
1421 	struct pfsync_state_bus *bus;
1422 
1423 	if (sc->sc_mbuf != NULL)
1424 		pfsync_sendout(sc);
1425 
1426 	if (pfsync_sync_ok &&
1427 	    (sc->sc_mbuf = pfsync_get_mbuf(sc, PFSYNC_ACT_BUS,
1428 	    (void *)&sc->sc_statep.b)) != NULL) {
1429 		sc->sc_mbuf->m_pkthdr.len = sc->sc_mbuf->m_len += sizeof(*bus);
1430 		bus = sc->sc_statep.b;
1431 		bus->creatorid = pf_status.hostid;
1432 		bus->status = status;
1433 		bus->endtime = htonl(mycpu->gd_time_seconds - sc->sc_ureq_received);
1434 		pfsync_sendout(sc);
1435 	}
1436 }
1437 
1438 void
1439 pfsync_bulk_update(void *v)
1440 {
1441 	struct pfsync_softc *sc = v;
1442 	int i = 0;
1443 	struct pf_state *state;
1444 
1445 	ASSERT_LWKT_TOKEN_HELD(&pf_token);
1446 
1447 	crit_enter();
1448 	if (sc->sc_mbuf != NULL)
1449 		pfsync_sendout(sc);
1450 
1451 	/*
1452 	 * Grab at most PFSYNC_BULKPACKETS worth of states which have not
1453 	 * been sent since the latest request was made.
1454 	 */
1455 	state = sc->sc_bulk_send_next;
1456 	if (state)
1457 		do {
1458 			/* send state update if syncable and not already sent */
1459 			if (!state->sync_flags
1460 			    && state->timeout < PFTM_MAX
1461 			    && state->pfsync_time <= sc->sc_ureq_received) {
1462 				pfsync_pack_state(PFSYNC_ACT_UPD, state, 0);
1463 				i++;
1464 			}
1465 
1466 			/* figure next state to send */
1467 			state = TAILQ_NEXT(state, entry_list);
1468 
1469 			/* wrap to start of list if we hit the end */
1470 			if (!state)
1471 				state = TAILQ_FIRST(&state_list);
1472 		} while (i < sc->sc_maxcount * PFSYNC_BULKPACKETS &&
1473 		    state != sc->sc_bulk_terminator);
1474 
1475 	if (!state || state == sc->sc_bulk_terminator) {
1476 		/* we're done */
1477 		pfsync_send_bus(sc, PFSYNC_BUS_END);
1478 		sc->sc_ureq_received = 0;
1479 		sc->sc_bulk_send_next = NULL;
1480 		sc->sc_bulk_terminator = NULL;
1481 		lwkt_reltoken(&pf_token);
1482 		callout_stop(&sc->sc_bulk_tmo);
1483 		lwkt_gettoken(&pf_token);
1484 		if (pf_status.debug >= PF_DEBUG_MISC)
1485 			kprintf("pfsync: bulk update complete\n");
1486 	} else {
1487 		/* look again for more in a bit */
1488 		lwkt_reltoken(&pf_token);
1489 		callout_reset(&sc->sc_bulk_tmo, 1, pfsync_timeout,
1490 			    LIST_FIRST(&pfsync_list));
1491 		lwkt_gettoken(&pf_token);
1492 		sc->sc_bulk_send_next = state;
1493 	}
1494 	if (sc->sc_mbuf != NULL)
1495 		pfsync_sendout(sc);
1496 	crit_exit();
1497 }
1498 
1499 void
1500 pfsync_bulkfail(void *v)
1501 {
1502 	struct pfsync_softc *sc = v;
1503 	int error;
1504 
1505 	ASSERT_LWKT_TOKEN_HELD(&pf_token);
1506 
1507 	if (sc->sc_bulk_tries++ < PFSYNC_MAX_BULKTRIES) {
1508 		/* Try again in a bit */
1509 		lwkt_reltoken(&pf_token);
1510 		callout_reset(&sc->sc_bulkfail_tmo, 5 * hz, pfsync_bulkfail,
1511 		    LIST_FIRST(&pfsync_list));
1512 		lwkt_gettoken(&pf_token);
1513 		crit_enter();
1514 		error = pfsync_request_update(NULL, NULL);
1515 		if (error == ENOMEM) {
1516 			if (pf_status.debug >= PF_DEBUG_MISC)
1517 				kprintf("pfsync: cannot allocate mbufs for "
1518 				    "bulk update\n");
1519 		} else
1520 			pfsync_sendout(sc);
1521 		crit_exit();
1522 	} else {
1523 		/* Pretend like the transfer was ok */
1524 		sc->sc_ureq_sent = 0;
1525 		sc->sc_bulk_tries = 0;
1526 #if NCARP > 0
1527 		if (!pfsync_sync_ok)
1528 			carp_group_demote_adj(&sc->sc_if, -1);
1529 #endif
1530 		pfsync_sync_ok = 1;
1531 		if (pf_status.debug >= PF_DEBUG_MISC)
1532 			kprintf("pfsync: failed to receive "
1533 			    "bulk update status\n");
1534 		lwkt_reltoken(&pf_token);
1535 		callout_stop(&sc->sc_bulkfail_tmo);
1536 		lwkt_gettoken(&pf_token);
1537 	}
1538 }
1539 
1540 /* This must be called in splnet() */
1541 int
1542 pfsync_sendout(struct pfsync_softc *sc)
1543 {
1544 #if NBPFILTER > 0
1545 	struct ifnet *ifp = &sc->sc_if;
1546 #endif
1547 	struct mbuf *m;
1548 
1549 	ASSERT_LWKT_TOKEN_HELD(&pf_token);
1550 
1551 	lwkt_reltoken(&pf_token);
1552 	callout_stop(&sc->sc_tmo);
1553 	lwkt_gettoken(&pf_token);
1554 
1555 	if (sc->sc_mbuf == NULL)
1556 		return (0);
1557 	m = sc->sc_mbuf;
1558 	sc->sc_mbuf = NULL;
1559 	sc->sc_statep.s = NULL;
1560 
1561 #if NBPFILTER > 0
1562 	if (ifp->if_bpf) {
1563 		lwkt_reltoken(&pf_token);
1564 		bpf_mtap(ifp->if_bpf, m, BPF_DIRECTION_OUT);
1565 		lwkt_gettoken(&pf_token);
1566 	}
1567 #endif
1568 
1569 	if (sc->sc_mbuf_net) {
1570 		m_freem(m);
1571 		m = sc->sc_mbuf_net;
1572 		sc->sc_mbuf_net = NULL;
1573 		sc->sc_statep_net.s = NULL;
1574 	}
1575 
1576 	return pfsync_sendout_mbuf(sc, m);
1577 }
1578 
1579 int
1580 pfsync_sendout_mbuf(struct pfsync_softc *sc, struct mbuf *m)
1581 {
1582 	struct sockaddr sa;
1583 	struct ip *ip;
1584 
1585 	if (sc->sc_sync_ifp ||
1586 	    sc->sc_sync_peer.s_addr != INADDR_PFSYNC_GROUP) {
1587 		M_PREPEND(m, sizeof(struct ip), M_WAITOK);
1588 		if (m == NULL) {
1589 			pfsyncstats.pfsyncs_onomem++;
1590 			return (0);
1591 		}
1592 		ip = mtod(m, struct ip *);
1593 		ip->ip_v = IPVERSION;
1594 		ip->ip_hl = sizeof(*ip) >> 2;
1595 		ip->ip_tos = IPTOS_LOWDELAY;
1596 		ip->ip_len = htons(m->m_pkthdr.len);
1597 		ip->ip_id = htons(ip_randomid());
1598 		ip->ip_off = htons(IP_DF);
1599 		ip->ip_ttl = PFSYNC_DFLTTL;
1600 		ip->ip_p = IPPROTO_PFSYNC;
1601 		ip->ip_sum = 0;
1602 
1603 		bzero(&sa, sizeof(sa));
1604 		ip->ip_src.s_addr = INADDR_ANY;
1605 
1606 		if (sc->sc_sendaddr.s_addr == INADDR_PFSYNC_GROUP)
1607 			m->m_flags |= M_MCAST;
1608 		ip->ip_dst = sc->sc_sendaddr;
1609 		sc->sc_sendaddr.s_addr = sc->sc_sync_peer.s_addr;
1610 
1611 		pfsyncstats.pfsyncs_opackets++;
1612 
1613 		if (ip_output(m, NULL, NULL, IP_RAWOUTPUT, &sc->sc_imo, NULL))
1614 			pfsyncstats.pfsyncs_oerrors++;
1615 	} else
1616 		m_freem(m);
1617 
1618 	return (0);
1619 }
1620 
1621 static int
1622 pfsync_modevent(module_t mod, int type, void *data)
1623 {
1624 	int error = 0;
1625 
1626 	lwkt_gettoken(&pf_token);
1627 
1628 	switch (type) {
1629 	case MOD_LOAD:
1630 		LIST_INIT(&pfsync_list);
1631 		lwkt_reltoken(&pf_token);
1632 		if_clone_attach(&pfsync_cloner);
1633 		lwkt_gettoken(&pf_token);
1634 		break;
1635 
1636 	case MOD_UNLOAD:
1637 		lwkt_reltoken(&pf_token);
1638 		if_clone_detach(&pfsync_cloner);
1639 		lwkt_gettoken(&pf_token);
1640 		while (!LIST_EMPTY(&pfsync_list))
1641 			pfsync_clone_destroy(
1642 				&LIST_FIRST(&pfsync_list)->sc_if);
1643 		break;
1644 
1645 	default:
1646 		error = EINVAL;
1647 		break;
1648 	}
1649 
1650 	lwkt_reltoken(&pf_token);
1651 	return error;
1652 }
1653 
1654 static moduledata_t pfsync_mod = {
1655 	"pfsync",
1656 	pfsync_modevent,
1657 	0
1658 };
1659 
1660 #define PFSYNC_MODVER 1
1661 
1662 DECLARE_MODULE(pfsync, pfsync_mod, SI_SUB_PSEUDO, SI_ORDER_ANY);
1663 MODULE_VERSION(pfsync, PFSYNC_MODVER);
1664 
1665 
1666 
1667