xref: /netbsd-src/sys/dist/pf/net/if_pfsync.c (revision b7b7574d3bf8eeb51a1fa3977b59142ec6434a55)
1 /*	$NetBSD: if_pfsync.c,v 1.10 2014/03/06 15:21:58 nonaka Exp $	*/
2 /*	$OpenBSD: if_pfsync.c,v 1.83 2007/06/26 14:44:12 mcbride Exp $	*/
3 
4 /*
5  * Copyright (c) 2002 Michael Shalayeff
6  * All rights reserved.
7  *
8  * Redistribution and use in source and binary forms, with or without
9  * modification, are permitted provided that the following conditions
10  * are met:
11  * 1. Redistributions of source code must retain the above copyright
12  *    notice, this list of conditions and the following disclaimer.
13  * 2. Redistributions in binary form must reproduce the above copyright
14  *    notice, this list of conditions and the following disclaimer in the
15  *    documentation and/or other materials provided with the distribution.
16  *
17  * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
18  * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
19  * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
20  * IN NO EVENT SHALL THE AUTHOR OR HIS RELATIVES BE LIABLE FOR ANY DIRECT,
21  * INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
22  * (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
23  * SERVICES; LOSS OF MIND, USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
24  * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT,
25  * STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING
26  * IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF
27  * THE POSSIBILITY OF SUCH DAMAGE.
28  */
29 
30 #include <sys/cdefs.h>
31 __KERNEL_RCSID(0, "$NetBSD: if_pfsync.c,v 1.10 2014/03/06 15:21:58 nonaka Exp $");
32 
33 #ifdef _KERNEL_OPT
34 #include "opt_inet.h"
35 #include "opt_inet6.h"
36 #endif
37 
38 #include <sys/param.h>
39 #include <sys/proc.h>
40 #include <sys/systm.h>
41 #include <sys/time.h>
42 #include <sys/mbuf.h>
43 #include <sys/socket.h>
44 #include <sys/ioctl.h>
45 #include <sys/callout.h>
46 #include <sys/kernel.h>
47 
48 #include <net/if.h>
49 #include <net/if_types.h>
50 #include <net/route.h>
51 #include <net/bpf.h>
52 #include <netinet/in.h>
53 #ifndef __NetBSD__
54 #include <netinet/if_ether.h>
55 #else
56 #include <net/if_ether.h>
57 #endif /* __NetBSD__ */
58 #include <netinet/tcp.h>
59 #include <netinet/tcp_seq.h>
60 
61 #ifdef	INET
62 #include <netinet/in_systm.h>
63 #include <netinet/in_var.h>
64 #include <netinet/ip.h>
65 #include <netinet/ip_var.h>
66 #endif
67 
68 #ifdef INET6
69 #include <netinet6/nd6.h>
70 #endif /* INET6 */
71 
72 #include "carp.h"
73 #if NCARP > 0
74 extern int carp_suppress_preempt;
75 #endif
76 
77 #include <net/pfvar.h>
78 #include <net/if_pfsync.h>
79 
80 #ifdef __NetBSD__
81 #include <sys/conf.h>
82 #include <sys/lwp.h>
83 #include <sys/kauth.h>
84 #include <sys/sysctl.h>
85 
86 #include <net/net_stats.h>
87 
88 percpu_t	*pfsyncstat_percpu;
89 
90 #define	PFSYNC_STATINC(x) _NET_STATINC(pfsyncstat_percpu, x)
91 #endif /* __NetBSD__ */
92 
93 #include "pfsync.h"
94 
95 #define PFSYNC_MINMTU	\
96     (sizeof(struct pfsync_header) + sizeof(struct pf_state))
97 
98 #ifdef PFSYNCDEBUG
99 #define DPRINTF(x)    do { if (pfsyncdebug) printf x ; } while (0)
100 int pfsyncdebug;
101 #else
102 #define DPRINTF(x)
103 #endif
104 
105 extern int ifqmaxlen; /* XXX */
106 
107 struct pfsync_softc	*pfsyncif = NULL;
108 
109 void	pfsyncattach(int);
110 int	pfsync_clone_create(struct if_clone *, int);
111 int	pfsync_clone_destroy(struct ifnet *);
112 void	pfsync_setmtu(struct pfsync_softc *, int);
113 int	pfsync_alloc_scrub_memory(struct pfsync_state_peer *,
114 	    struct pf_state_peer *);
115 int	pfsync_insert_net_state(struct pfsync_state *, u_int8_t);
116 void	pfsync_update_net_tdb(struct pfsync_tdb *);
117 int	pfsyncoutput(struct ifnet *, struct mbuf *, const struct sockaddr *,
118 	    struct rtentry *);
119 int	pfsyncioctl(struct ifnet *, u_long, void*);
120 void	pfsyncstart(struct ifnet *);
121 
122 struct mbuf *pfsync_get_mbuf(struct pfsync_softc *, u_int8_t, void **);
123 int	pfsync_request_update(struct pfsync_state_upd *, struct in_addr *);
124 int	pfsync_sendout(struct pfsync_softc *);
125 int	pfsync_tdb_sendout(struct pfsync_softc *);
126 int	pfsync_sendout_mbuf(struct pfsync_softc *, struct mbuf *);
127 void	pfsync_timeout(void *);
128 void	pfsync_tdb_timeout(void *);
129 void	pfsync_send_bus(struct pfsync_softc *, u_int8_t);
130 void	pfsync_bulk_update(void *);
131 void	pfsync_bulkfail(void *);
132 
133 int	pfsync_sync_ok;
134 
135 struct if_clone	pfsync_cloner =
136     IF_CLONE_INITIALIZER("pfsync", pfsync_clone_create, pfsync_clone_destroy);
137 
138 void
139 pfsyncattach(int npfsync)
140 {
141 	if_clone_attach(&pfsync_cloner);
142 
143 	pfsyncstat_percpu = percpu_alloc(sizeof(uint64_t) * PFSYNC_NSTATS);
144 }
145 
146 int
147 pfsync_clone_create(struct if_clone *ifc, int unit)
148 {
149 	struct ifnet *ifp;
150 
151 	if (unit != 0)
152 		return (EINVAL);
153 
154 	pfsync_sync_ok = 1;
155 	if ((pfsyncif = malloc(sizeof(*pfsyncif), M_DEVBUF, M_NOWAIT)) == NULL)
156 		return (ENOMEM);
157 	memset(pfsyncif, 0, sizeof(*pfsyncif));
158 	pfsyncif->sc_mbuf = NULL;
159 	pfsyncif->sc_mbuf_net = NULL;
160 	pfsyncif->sc_mbuf_tdb = NULL;
161 	pfsyncif->sc_statep.s = NULL;
162 	pfsyncif->sc_statep_net.s = NULL;
163 	pfsyncif->sc_statep_tdb.t = NULL;
164 	pfsyncif->sc_maxupdates = 128;
165 	pfsyncif->sc_sync_peer.s_addr = INADDR_PFSYNC_GROUP;
166 	pfsyncif->sc_sendaddr.s_addr = INADDR_PFSYNC_GROUP;
167 	pfsyncif->sc_ureq_received = 0;
168 	pfsyncif->sc_ureq_sent = 0;
169 	pfsyncif->sc_bulk_send_next = NULL;
170 	pfsyncif->sc_bulk_terminator = NULL;
171 	ifp = &pfsyncif->sc_if;
172 	snprintf(ifp->if_xname, sizeof ifp->if_xname, "pfsync%d", unit);
173 	ifp->if_softc = pfsyncif;
174 	ifp->if_ioctl = pfsyncioctl;
175 	ifp->if_output = pfsyncoutput;
176 	ifp->if_start = pfsyncstart;
177 	ifp->if_type = IFT_PFSYNC;
178 	ifp->if_snd.ifq_maxlen = ifqmaxlen;
179 	ifp->if_hdrlen = PFSYNC_HDRLEN;
180 	pfsync_setmtu(pfsyncif, ETHERMTU);
181 
182 	callout_init(&pfsyncif->sc_tmo, 0);
183 	callout_init(&pfsyncif->sc_tdb_tmo, 0);
184 	callout_init(&pfsyncif->sc_bulk_tmo, 0);
185 	callout_init(&pfsyncif->sc_bulkfail_tmo, 0);
186 	callout_setfunc(&pfsyncif->sc_tmo, pfsync_timeout, pfsyncif);
187 	callout_setfunc(&pfsyncif->sc_tdb_tmo, pfsync_tdb_timeout, pfsyncif);
188 	callout_setfunc(&pfsyncif->sc_bulk_tmo, pfsync_bulk_update, pfsyncif);
189 	callout_setfunc(&pfsyncif->sc_bulkfail_tmo, pfsync_bulkfail, pfsyncif);
190 
191 	if_attach(ifp);
192 	if_alloc_sadl(ifp);
193 
194 	bpf_attach(&pfsyncif->sc_if, DLT_PFSYNC, PFSYNC_HDRLEN);
195 
196 	return (0);
197 }
198 
199 int
200 pfsync_clone_destroy(struct ifnet *ifp)
201 {
202 	bpf_detach(ifp);
203 	if_detach(ifp);
204 	free(pfsyncif, M_DEVBUF);
205 	pfsyncif = NULL;
206 	return (0);
207 }
208 
209 /*
210  * Start output on the pfsync interface.
211  */
212 void
213 pfsyncstart(struct ifnet *ifp)
214 {
215 	struct mbuf *m;
216 	int s;
217 
218 	for (;;) {
219 		s = splnet();
220 		IF_DROP(&ifp->if_snd);
221 		IF_DEQUEUE(&ifp->if_snd, m);
222 		splx(s);
223 
224 		if (m == NULL)
225 			return;
226 		else
227 			m_freem(m);
228 	}
229 }
230 
231 int
232 pfsync_alloc_scrub_memory(struct pfsync_state_peer *s,
233     struct pf_state_peer *d)
234 {
235 	if (s->scrub.scrub_flag && d->scrub == NULL) {
236 		d->scrub = pool_get(&pf_state_scrub_pl, PR_NOWAIT);
237 		if (d->scrub == NULL)
238 			return (ENOMEM);
239 		memset(d->scrub, 0, sizeof(*d->scrub));
240 	}
241 
242 	return (0);
243 }
244 
245 int
246 pfsync_insert_net_state(struct pfsync_state *sp, u_int8_t chksum_flag)
247 {
248 	struct pf_state	*st = NULL;
249 	struct pf_state_key *sk = NULL;
250 	struct pf_rule *r = NULL;
251 	struct pfi_kif	*kif;
252 
253 	if (sp->creatorid == 0 && pf_status.debug >= PF_DEBUG_MISC) {
254 		printf("pfsync_insert_net_state: invalid creator id:"
255 		    " %08x\n", ntohl(sp->creatorid));
256 		return (EINVAL);
257 	}
258 
259 	kif = pfi_kif_get(sp->ifname);
260 	if (kif == NULL) {
261 		if (pf_status.debug >= PF_DEBUG_MISC)
262 			printf("pfsync_insert_net_state: "
263 			    "unknown interface: %s\n", sp->ifname);
264 		/* skip this state */
265 		return (0);
266 	}
267 
268 	/*
269 	 * If the ruleset checksums match, it's safe to associate the state
270 	 * with the rule of that number.
271 	 */
272 	if (sp->rule != htonl(-1) && sp->anchor == htonl(-1) && chksum_flag &&
273 	    ntohl(sp->rule) <
274 	    pf_main_ruleset.rules[PF_RULESET_FILTER].active.rcount)
275 		r = pf_main_ruleset.rules[
276 		    PF_RULESET_FILTER].active.ptr_array[ntohl(sp->rule)];
277 	else
278 		r = &pf_default_rule;
279 
280 	if (!r->max_states || r->states < r->max_states)
281 		st = pool_get(&pf_state_pl, PR_NOWAIT);
282 	if (st == NULL) {
283 		pfi_kif_unref(kif, PFI_KIF_REF_NONE);
284 		return (ENOMEM);
285 	}
286 	memset(st, 0, sizeof(*st));
287 
288 	if ((sk = pf_alloc_state_key(st)) == NULL) {
289 		pool_put(&pf_state_pl, st);
290 		pfi_kif_unref(kif, PFI_KIF_REF_NONE);
291 		return (ENOMEM);
292 	}
293 
294 	/* allocate memory for scrub info */
295 	if (pfsync_alloc_scrub_memory(&sp->src, &st->src) ||
296 	    pfsync_alloc_scrub_memory(&sp->dst, &st->dst)) {
297 		pfi_kif_unref(kif, PFI_KIF_REF_NONE);
298 		if (st->src.scrub)
299 			pool_put(&pf_state_scrub_pl, st->src.scrub);
300 		pool_put(&pf_state_pl, st);
301 		pool_put(&pf_state_key_pl, sk);
302 		return (ENOMEM);
303 	}
304 
305 	st->rule.ptr = r;
306 	/* XXX get pointers to nat_rule and anchor */
307 
308 	/* XXX when we have nat_rule/anchors, use STATE_INC_COUNTERS */
309 	r->states++;
310 
311 	/* fill in the rest of the state entry */
312 	pf_state_host_ntoh(&sp->lan, &sk->lan);
313 	pf_state_host_ntoh(&sp->gwy, &sk->gwy);
314 	pf_state_host_ntoh(&sp->ext, &sk->ext);
315 
316 	pf_state_peer_ntoh(&sp->src, &st->src);
317 	pf_state_peer_ntoh(&sp->dst, &st->dst);
318 
319 	memcpy(&st->rt_addr, &sp->rt_addr, sizeof(st->rt_addr));
320 	st->creation = time_second - ntohl(sp->creation);
321 	st->expire = ntohl(sp->expire) + time_second;
322 
323 	sk->af = sp->af;
324 	sk->proto = sp->proto;
325 	sk->direction = sp->direction;
326 	st->log = sp->log;
327 	st->timeout = sp->timeout;
328 	st->allow_opts = sp->allow_opts;
329 
330 	memcpy(&st->id, sp->id, sizeof(st->id));
331 	st->creatorid = sp->creatorid;
332 	st->sync_flags = PFSTATE_FROMSYNC;
333 
334 	if (pf_insert_state(kif, st)) {
335 		pfi_kif_unref(kif, PFI_KIF_REF_NONE);
336 		/* XXX when we have nat_rule/anchors, use STATE_DEC_COUNTERS */
337 		r->states--;
338 		if (st->dst.scrub)
339 			pool_put(&pf_state_scrub_pl, st->dst.scrub);
340 		if (st->src.scrub)
341 			pool_put(&pf_state_scrub_pl, st->src.scrub);
342 		pool_put(&pf_state_pl, st);
343 		return (EINVAL);
344 	}
345 
346 	return (0);
347 }
348 
349 void
350 pfsync_input(struct mbuf *m, ...)
351 {
352 	struct ip *ip = mtod(m, struct ip *);
353 	struct pfsync_header *ph;
354 	struct pfsync_softc *sc = pfsyncif;
355 	struct pf_state *st;
356 	struct pf_state_key *sk;
357 	struct pf_state_cmp id_key;
358 	struct pfsync_state *sp;
359 	struct pfsync_state_upd *up;
360 	struct pfsync_state_del *dp;
361 	struct pfsync_state_clr *cp;
362 	struct pfsync_state_upd_req *rup;
363 	struct pfsync_state_bus *bus;
364 	struct in_addr src;
365 	struct mbuf *mp;
366 	int iplen, action, error, i, s, count, offp, sfail, stale = 0;
367 	u_int8_t chksum_flag = 0;
368 
369 	PFSYNC_STATINC(PFSYNC_STAT_IPACKETS);
370 
371 	/* verify that we have a sync interface configured */
372 	if (!sc || !sc->sc_sync_ifp || !pf_status.running)
373 		goto done;
374 
375 	/* verify that the packet came in on the right interface */
376 	if (sc->sc_sync_ifp != m->m_pkthdr.rcvif) {
377 		PFSYNC_STATINC(PFSYNC_STAT_BADIF);
378 		goto done;
379 	}
380 
381 	/* verify that the IP TTL is 255.  */
382 	if (ip->ip_ttl != PFSYNC_DFLTTL) {
383 		PFSYNC_STATINC(PFSYNC_STAT_BADTTL);
384 		goto done;
385 	}
386 
387 	iplen = ip->ip_hl << 2;
388 
389 	if (m->m_pkthdr.len < iplen + sizeof(*ph)) {
390 		PFSYNC_STATINC(PFSYNC_STAT_HDROPS);
391 		goto done;
392 	}
393 
394 	if (iplen + sizeof(*ph) > m->m_len) {
395 		if ((m = m_pullup(m, iplen + sizeof(*ph))) == NULL) {
396 			PFSYNC_STATINC(PFSYNC_STAT_HDROPS);
397 			goto done;
398 		}
399 		ip = mtod(m, struct ip *);
400 	}
401 	ph = (struct pfsync_header *)((char *)ip + iplen);
402 
403 	/* verify the version */
404 	if (ph->version != PFSYNC_VERSION) {
405 		PFSYNC_STATINC(PFSYNC_STAT_BADVER);
406 		goto done;
407 	}
408 
409 	action = ph->action;
410 	count = ph->count;
411 
412 	/* make sure it's a valid action code */
413 	if (action >= PFSYNC_ACT_MAX) {
414 		PFSYNC_STATINC(PFSYNC_STAT_BADACT);
415 		goto done;
416 	}
417 
418 	/* Cheaper to grab this now than having to mess with mbufs later */
419 	src = ip->ip_src;
420 
421 	if (!bcmp(&ph->pf_chksum, &pf_status.pf_chksum, PF_MD5_DIGEST_LENGTH))
422 		chksum_flag++;
423 
424 	switch (action) {
425 	case PFSYNC_ACT_CLR: {
426 		struct pf_state *nexts;
427 		struct pf_state_key *nextsk;
428 		struct pfi_kif *kif;
429 		u_int32_t creatorid;
430 		if ((mp = m_pulldown(m, iplen + sizeof(*ph),
431 		    sizeof(*cp), &offp)) == NULL) {
432 			PFSYNC_STATINC(PFSYNC_STAT_BADLEN);
433 			return;
434 		}
435 		cp = (struct pfsync_state_clr *)(mp->m_data + offp);
436 		creatorid = cp->creatorid;
437 
438 		s = splsoftnet();
439 		if (cp->ifname[0] == '\0') {
440 			for (st = RB_MIN(pf_state_tree_id, &tree_id);
441 			    st; st = nexts) {
442 				nexts = RB_NEXT(pf_state_tree_id, &tree_id, st);
443 				if (st->creatorid == creatorid) {
444 					st->sync_flags |= PFSTATE_FROMSYNC;
445 					pf_unlink_state(st);
446 				}
447 			}
448 		} else {
449 			if ((kif = pfi_kif_get(cp->ifname)) == NULL) {
450 				splx(s);
451 				return;
452 			}
453 			for (sk = RB_MIN(pf_state_tree_lan_ext,
454 			    &pf_statetbl_lan_ext); sk; sk = nextsk) {
455 				nextsk = RB_NEXT(pf_state_tree_lan_ext,
456 				    &pf_statetbl_lan_ext, sk);
457 				TAILQ_FOREACH(st, &sk->states, next) {
458 					if (st->creatorid == creatorid) {
459 						st->sync_flags |=
460 						    PFSTATE_FROMSYNC;
461 						pf_unlink_state(st);
462 					}
463 				}
464 			}
465 		}
466 		splx(s);
467 
468 		break;
469 	}
470 	case PFSYNC_ACT_INS:
471 		if ((mp = m_pulldown(m, iplen + sizeof(*ph),
472 		    count * sizeof(*sp), &offp)) == NULL) {
473 			PFSYNC_STATINC(PFSYNC_STAT_BADLEN);
474 			return;
475 		}
476 
477 		s = splsoftnet();
478 		for (i = 0, sp = (struct pfsync_state *)(mp->m_data + offp);
479 		    i < count; i++, sp++) {
480 			/* check for invalid values */
481 			if (sp->timeout >= PFTM_MAX ||
482 			    sp->src.state > PF_TCPS_PROXY_DST ||
483 			    sp->dst.state > PF_TCPS_PROXY_DST ||
484 			    sp->direction > PF_OUT ||
485 			    (sp->af != AF_INET && sp->af != AF_INET6)) {
486 				if (pf_status.debug >= PF_DEBUG_MISC)
487 					printf("pfsync_insert: PFSYNC_ACT_INS: "
488 					    "invalid value\n");
489 				PFSYNC_STATINC(PFSYNC_STAT_BADSTATE);
490 				continue;
491 			}
492 
493 			if ((error = pfsync_insert_net_state(sp,
494 			    chksum_flag))) {
495 				if (error == ENOMEM) {
496 					splx(s);
497 					goto done;
498 				}
499 				continue;
500 			}
501 		}
502 		splx(s);
503 		break;
504 	case PFSYNC_ACT_UPD:
505 		if ((mp = m_pulldown(m, iplen + sizeof(*ph),
506 		    count * sizeof(*sp), &offp)) == NULL) {
507 			PFSYNC_STATINC(PFSYNC_STAT_BADLEN);
508 			return;
509 		}
510 
511 		s = splsoftnet();
512 		for (i = 0, sp = (struct pfsync_state *)(mp->m_data + offp);
513 		    i < count; i++, sp++) {
514 			int flags = PFSYNC_FLAG_STALE;
515 
516 			/* check for invalid values */
517 			if (sp->timeout >= PFTM_MAX ||
518 			    sp->src.state > PF_TCPS_PROXY_DST ||
519 			    sp->dst.state > PF_TCPS_PROXY_DST) {
520 				if (pf_status.debug >= PF_DEBUG_MISC)
521 					printf("pfsync_insert: PFSYNC_ACT_UPD: "
522 					    "invalid value\n");
523 				PFSYNC_STATINC(PFSYNC_STAT_BADSTATE);
524 				continue;
525 			}
526 
527 			memcpy(&id_key.id, sp->id, sizeof(id_key.id));
528 			id_key.creatorid = sp->creatorid;
529 
530 			st = pf_find_state_byid(&id_key);
531 			if (st == NULL) {
532 				/* insert the update */
533 				if (pfsync_insert_net_state(sp, chksum_flag)) {
534 					PFSYNC_STATINC(PFSYNC_STAT_BADSTATE);
535 				}
536 				continue;
537 			}
538 			sk = st->state_key;
539 			sfail = 0;
540 			if (sk->proto == IPPROTO_TCP) {
541 				/*
542 				 * The state should never go backwards except
543 				 * for syn-proxy states.  Neither should the
544 				 * sequence window slide backwards.
545 				 */
546 				if (st->src.state > sp->src.state &&
547 				    (st->src.state < PF_TCPS_PROXY_SRC ||
548 				    sp->src.state >= PF_TCPS_PROXY_SRC))
549 					sfail = 1;
550 				else if (SEQ_GT(st->src.seqlo,
551 				    ntohl(sp->src.seqlo)))
552 					sfail = 3;
553 				else if (st->dst.state > sp->dst.state) {
554 					/* There might still be useful
555 					 * information about the src state here,
556 					 * so import that part of the update,
557 					 * then "fail" so we send the updated
558 					 * state back to the peer who is missing
559 					 * our what we know. */
560 					pf_state_peer_ntoh(&sp->src, &st->src);
561 					/* XXX do anything with timeouts? */
562 					sfail = 7;
563 					flags = 0;
564 				} else if (st->dst.state >= TCPS_SYN_SENT &&
565 				    SEQ_GT(st->dst.seqlo, ntohl(sp->dst.seqlo)))
566 					sfail = 4;
567 			} else {
568 				/*
569 				 * Non-TCP protocol state machine always go
570 				 * forwards
571 				 */
572 				if (st->src.state > sp->src.state)
573 					sfail = 5;
574 				else if (st->dst.state > sp->dst.state)
575 					sfail = 6;
576 			}
577 			if (sfail) {
578 				if (pf_status.debug >= PF_DEBUG_MISC)
579 					printf("pfsync: %s stale update "
580 					    "(%d) id: %016" PRIu64 ""
581 					    "creatorid: %08x\n",
582 					    (sfail < 7 ?  "ignoring"
583 					     : "partial"), sfail,
584 					    be64toh(st->id),
585 					    ntohl(st->creatorid));
586 				PFSYNC_STATINC(PFSYNC_STAT_BADSTATE);
587 
588 				if (!(sp->sync_flags & PFSTATE_STALE)) {
589 					/* we have a better state, send it */
590 					if (sc->sc_mbuf != NULL && !stale)
591 						pfsync_sendout(sc);
592 					stale++;
593 					if (!st->sync_flags)
594 						pfsync_pack_state(
595 						    PFSYNC_ACT_UPD, st, flags);
596 				}
597 				continue;
598 			}
599 	    		pfsync_alloc_scrub_memory(&sp->dst, &st->dst);
600 			pf_state_peer_ntoh(&sp->src, &st->src);
601 			pf_state_peer_ntoh(&sp->dst, &st->dst);
602 			st->expire = ntohl(sp->expire) + time_second;
603 			st->timeout = sp->timeout;
604 		}
605 		if (stale && sc->sc_mbuf != NULL)
606 			pfsync_sendout(sc);
607 		splx(s);
608 		break;
609 	/*
610 	 * It's not strictly necessary for us to support the "uncompressed"
611 	 * delete action, but it's relatively simple and maintains consistency.
612 	 */
613 	case PFSYNC_ACT_DEL:
614 		if ((mp = m_pulldown(m, iplen + sizeof(*ph),
615 		    count * sizeof(*sp), &offp)) == NULL) {
616 			PFSYNC_STATINC(PFSYNC_STAT_BADLEN);
617 			return;
618 		}
619 
620 		s = splsoftnet();
621 		for (i = 0, sp = (struct pfsync_state *)(mp->m_data + offp);
622 		    i < count; i++, sp++) {
623 			memcpy(&id_key.id, sp->id, sizeof(id_key.id));
624 			id_key.creatorid = sp->creatorid;
625 
626 			st = pf_find_state_byid(&id_key);
627 			if (st == NULL) {
628 				PFSYNC_STATINC(PFSYNC_STAT_BADSTATE);
629 				continue;
630 			}
631 			st->sync_flags |= PFSTATE_FROMSYNC;
632 			pf_unlink_state(st);
633 		}
634 		splx(s);
635 		break;
636 	case PFSYNC_ACT_UPD_C: {
637 		int update_requested = 0;
638 
639 		if ((mp = m_pulldown(m, iplen + sizeof(*ph),
640 		    count * sizeof(*up), &offp)) == NULL) {
641 			PFSYNC_STATINC(PFSYNC_STAT_BADLEN);
642 			return;
643 		}
644 
645 		s = splsoftnet();
646 		for (i = 0, up = (struct pfsync_state_upd *)(mp->m_data + offp);
647 		    i < count; i++, up++) {
648 			/* check for invalid values */
649 			if (up->timeout >= PFTM_MAX ||
650 			    up->src.state > PF_TCPS_PROXY_DST ||
651 			    up->dst.state > PF_TCPS_PROXY_DST) {
652 				if (pf_status.debug >= PF_DEBUG_MISC)
653 					printf("pfsync_insert: "
654 					    "PFSYNC_ACT_UPD_C: "
655 					    "invalid value\n");
656 				PFSYNC_STATINC(PFSYNC_STAT_BADSTATE);
657 				continue;
658 			}
659 
660 			memcpy(&id_key.id, up->id, sizeof(id_key.id));
661 			id_key.creatorid = up->creatorid;
662 
663 			st = pf_find_state_byid(&id_key);
664 			if (st == NULL) {
665 				/* We don't have this state. Ask for it. */
666 				error = pfsync_request_update(up, &src);
667 				if (error == ENOMEM) {
668 					splx(s);
669 					goto done;
670 				}
671 				update_requested = 1;
672 				PFSYNC_STATINC(PFSYNC_STAT_BADSTATE);
673 				continue;
674 			}
675 			sk = st->state_key;
676 			sfail = 0;
677 			if (sk->proto == IPPROTO_TCP) {
678 				/*
679 				 * The state should never go backwards except
680 				 * for syn-proxy states.  Neither should the
681 				 * sequence window slide backwards.
682 				 */
683 				if (st->src.state > up->src.state &&
684 				    (st->src.state < PF_TCPS_PROXY_SRC ||
685 				    up->src.state >= PF_TCPS_PROXY_SRC))
686 					sfail = 1;
687 				else if (st->dst.state > up->dst.state)
688 					sfail = 2;
689 				else if (SEQ_GT(st->src.seqlo,
690 				    ntohl(up->src.seqlo)))
691 					sfail = 3;
692 				else if (st->dst.state >= TCPS_SYN_SENT &&
693 				    SEQ_GT(st->dst.seqlo, ntohl(up->dst.seqlo)))
694 					sfail = 4;
695 			} else {
696 				/*
697 				 * Non-TCP protocol state machine always go
698 				 * forwards
699 				 */
700 				if (st->src.state > up->src.state)
701 					sfail = 5;
702 				else if (st->dst.state > up->dst.state)
703 					sfail = 6;
704 			}
705 			if (sfail) {
706 				if (pf_status.debug >= PF_DEBUG_MISC)
707 					printf("pfsync: ignoring stale update "
708 					    "(%d) id: %016" PRIu64 ""
709 					    "creatorid: %08x\n", sfail,
710 					    be64toh(st->id),
711 					    ntohl(st->creatorid));
712 				PFSYNC_STATINC(PFSYNC_STAT_BADSTATE);
713 
714 				/* we have a better state, send it out */
715 				if ((!stale || update_requested) &&
716 				    sc->sc_mbuf != NULL) {
717 					pfsync_sendout(sc);
718 					update_requested = 0;
719 				}
720 				stale++;
721 				if (!st->sync_flags)
722 					pfsync_pack_state(PFSYNC_ACT_UPD, st,
723 					    PFSYNC_FLAG_STALE);
724 				continue;
725 			}
726 	    		pfsync_alloc_scrub_memory(&up->dst, &st->dst);
727 			pf_state_peer_ntoh(&up->src, &st->src);
728 			pf_state_peer_ntoh(&up->dst, &st->dst);
729 			st->expire = ntohl(up->expire) + time_second;
730 			st->timeout = up->timeout;
731 		}
732 		if ((update_requested || stale) && sc->sc_mbuf)
733 			pfsync_sendout(sc);
734 		splx(s);
735 		break;
736 	}
737 	case PFSYNC_ACT_DEL_C:
738 		if ((mp = m_pulldown(m, iplen + sizeof(*ph),
739 		    count * sizeof(*dp), &offp)) == NULL) {
740 			PFSYNC_STATINC(PFSYNC_STAT_BADLEN);
741 			return;
742 		}
743 
744 		s = splsoftnet();
745 		for (i = 0, dp = (struct pfsync_state_del *)(mp->m_data + offp);
746 		    i < count; i++, dp++) {
747 			memcpy(&id_key.id, dp->id, sizeof(id_key.id));
748 			id_key.creatorid = dp->creatorid;
749 
750 			st = pf_find_state_byid(&id_key);
751 			if (st == NULL) {
752 				PFSYNC_STATINC(PFSYNC_STAT_BADSTATE);
753 				continue;
754 			}
755 			st->sync_flags |= PFSTATE_FROMSYNC;
756 			pf_unlink_state(st);
757 		}
758 		splx(s);
759 		break;
760 	case PFSYNC_ACT_INS_F:
761 	case PFSYNC_ACT_DEL_F:
762 		/* not implemented */
763 		break;
764 	case PFSYNC_ACT_UREQ:
765 		if ((mp = m_pulldown(m, iplen + sizeof(*ph),
766 		    count * sizeof(*rup), &offp)) == NULL) {
767 			PFSYNC_STATINC(PFSYNC_STAT_BADLEN);
768 			return;
769 		}
770 
771 		s = splsoftnet();
772 		if (sc->sc_mbuf != NULL)
773 			pfsync_sendout(sc);
774 		for (i = 0,
775 		    rup = (struct pfsync_state_upd_req *)(mp->m_data + offp);
776 		    i < count; i++, rup++) {
777 			memcpy(&id_key.id, rup->id, sizeof(id_key.id));
778 			id_key.creatorid = rup->creatorid;
779 
780 			if (id_key.id == 0 && id_key.creatorid == 0) {
781 				sc->sc_ureq_received = time_uptime;
782 				if (sc->sc_bulk_send_next == NULL)
783 					sc->sc_bulk_send_next =
784 					    TAILQ_FIRST(&state_list);
785 				sc->sc_bulk_terminator = sc->sc_bulk_send_next;
786 				if (pf_status.debug >= PF_DEBUG_MISC)
787 					printf("pfsync: received "
788 					    "bulk update request\n");
789 				pfsync_send_bus(sc, PFSYNC_BUS_START);
790 				callout_schedule(&sc->sc_bulk_tmo, 1 * hz);
791 			} else {
792 				st = pf_find_state_byid(&id_key);
793 				if (st == NULL) {
794 					PFSYNC_STATINC(PFSYNC_STAT_BADSTATE);
795 					continue;
796 				}
797 				if (!st->sync_flags)
798 					pfsync_pack_state(PFSYNC_ACT_UPD,
799 					    st, 0);
800 			}
801 		}
802 		if (sc->sc_mbuf != NULL)
803 			pfsync_sendout(sc);
804 		splx(s);
805 		break;
806 	case PFSYNC_ACT_BUS:
807 		/* If we're not waiting for a bulk update, who cares. */
808 		if (sc->sc_ureq_sent == 0)
809 			break;
810 
811 		if ((mp = m_pulldown(m, iplen + sizeof(*ph),
812 		    sizeof(*bus), &offp)) == NULL) {
813 			PFSYNC_STATINC(PFSYNC_STAT_BADLEN);
814 			return;
815 		}
816 		bus = (struct pfsync_state_bus *)(mp->m_data + offp);
817 		switch (bus->status) {
818 		case PFSYNC_BUS_START:
819 			callout_schedule(&sc->sc_bulkfail_tmo,
820 			    pf_pool_limits[PF_LIMIT_STATES].limit /
821 			    (PFSYNC_BULKPACKETS * sc->sc_maxcount));
822 			if (pf_status.debug >= PF_DEBUG_MISC)
823 				printf("pfsync: received bulk "
824 				    "update start\n");
825 			break;
826 		case PFSYNC_BUS_END:
827 			if (time_uptime - ntohl(bus->endtime) >=
828 			    sc->sc_ureq_sent) {
829 				/* that's it, we're happy */
830 				sc->sc_ureq_sent = 0;
831 				sc->sc_bulk_tries = 0;
832 				callout_stop(&sc->sc_bulkfail_tmo);
833 #if NCARP > 0
834 				if (!pfsync_sync_ok)
835 					carp_suppress_preempt--;
836 #endif
837 				pfsync_sync_ok = 1;
838 				if (pf_status.debug >= PF_DEBUG_MISC)
839 					printf("pfsync: received valid "
840 					    "bulk update end\n");
841 			} else {
842 				if (pf_status.debug >= PF_DEBUG_MISC)
843 					printf("pfsync: received invalid "
844 					    "bulk update end: bad timestamp\n");
845 			}
846 			break;
847 		}
848 		break;
849 	}
850 
851 done:
852 	if (m)
853 		m_freem(m);
854 }
855 
856 int
857 pfsyncoutput(struct ifnet *ifp, struct mbuf *m, const struct sockaddr *dst,
858 	struct rtentry *rt)
859 {
860 	m_freem(m);
861 	return (0);
862 }
863 
864 /* ARGSUSED */
865 int
866 pfsyncioctl(struct ifnet *ifp, u_long cmd, void*  data)
867 {
868 	struct lwp *l = curlwp;
869 	struct pfsync_softc *sc = ifp->if_softc;
870 	struct ifreq *ifr = (struct ifreq *)data;
871 	struct ip_moptions *imo = &sc->sc_imo;
872 	struct pfsyncreq pfsyncr;
873 	struct ifnet    *sifp;
874 	int s, error;
875 
876 	switch (cmd) {
877 	case SIOCSIFADDR:
878 	case SIOCAIFADDR:
879 	case SIOCSIFDSTADDR:
880 	case SIOCSIFFLAGS:
881 		if (ifp->if_flags & IFF_UP)
882 			ifp->if_flags |= IFF_RUNNING;
883 		else
884 			ifp->if_flags &= ~IFF_RUNNING;
885 		break;
886 	case SIOCSIFMTU:
887 		if (ifr->ifr_mtu < PFSYNC_MINMTU)
888 			return (EINVAL);
889 		if (ifr->ifr_mtu > MCLBYTES)
890 			ifr->ifr_mtu = MCLBYTES;
891 		s = splnet();
892 		if (ifr->ifr_mtu < ifp->if_mtu)
893 			pfsync_sendout(sc);
894 		pfsync_setmtu(sc, ifr->ifr_mtu);
895 		splx(s);
896 		break;
897 	case SIOCGETPFSYNC:
898 		if ((error = kauth_authorize_network(l->l_cred,
899 		    KAUTH_NETWORK_INTERFACE,
900 		    KAUTH_REQ_NETWORK_INTERFACE_GETPRIV, ifp, (void *)cmd,
901 		    NULL)) != 0)
902 			return (error);
903 		memset(&pfsyncr, 0, sizeof(pfsyncr));
904 		if (sc->sc_sync_ifp)
905 			strlcpy(pfsyncr.pfsyncr_syncdev,
906 			    sc->sc_sync_ifp->if_xname, IFNAMSIZ);
907 		pfsyncr.pfsyncr_syncpeer = sc->sc_sync_peer;
908 		pfsyncr.pfsyncr_maxupdates = sc->sc_maxupdates;
909 		if ((error = copyout(&pfsyncr, ifr->ifr_data, sizeof(pfsyncr))))
910 			return (error);
911 		break;
912 	case SIOCSETPFSYNC:
913 		if ((error = kauth_authorize_network(l->l_cred,
914 		    KAUTH_NETWORK_INTERFACE,
915 		    KAUTH_REQ_NETWORK_INTERFACE_SETPRIV, ifp, (void *)cmd,
916 		    NULL)) != 0)
917 			return (error);
918 		if ((error = copyin(ifr->ifr_data, &pfsyncr, sizeof(pfsyncr))))
919 			return (error);
920 
921 		if (pfsyncr.pfsyncr_syncpeer.s_addr == 0)
922 			sc->sc_sync_peer.s_addr = INADDR_PFSYNC_GROUP;
923 		else
924 			sc->sc_sync_peer.s_addr =
925 			    pfsyncr.pfsyncr_syncpeer.s_addr;
926 
927 		if (pfsyncr.pfsyncr_maxupdates > 255)
928 			return (EINVAL);
929 		sc->sc_maxupdates = pfsyncr.pfsyncr_maxupdates;
930 
931 		if (pfsyncr.pfsyncr_syncdev[0] == 0) {
932 			sc->sc_sync_ifp = NULL;
933 			if (sc->sc_mbuf_net != NULL) {
934 				/* Don't keep stale pfsync packets around. */
935 				s = splnet();
936 				m_freem(sc->sc_mbuf_net);
937 				sc->sc_mbuf_net = NULL;
938 				sc->sc_statep_net.s = NULL;
939 				splx(s);
940 			}
941 			if (imo->imo_num_memberships > 0) {
942 				in_delmulti(imo->imo_membership[--imo->imo_num_memberships]);
943 				imo->imo_multicast_ifp = NULL;
944 			}
945 			break;
946 		}
947 
948 		if ((sifp = ifunit(pfsyncr.pfsyncr_syncdev)) == NULL)
949 			return (EINVAL);
950 
951 		s = splnet();
952 		if (sifp->if_mtu < sc->sc_if.if_mtu ||
953 		    (sc->sc_sync_ifp != NULL &&
954 		    sifp->if_mtu < sc->sc_sync_ifp->if_mtu) ||
955 		    sifp->if_mtu < MCLBYTES - sizeof(struct ip))
956 			pfsync_sendout(sc);
957 		sc->sc_sync_ifp = sifp;
958 
959 		pfsync_setmtu(sc, sc->sc_if.if_mtu);
960 
961 		if (imo->imo_num_memberships > 0) {
962 			in_delmulti(imo->imo_membership[--imo->imo_num_memberships]);
963 			imo->imo_multicast_ifp = NULL;
964 		}
965 
966 		if (sc->sc_sync_ifp &&
967 		    sc->sc_sync_peer.s_addr == INADDR_PFSYNC_GROUP) {
968 			struct in_addr addr;
969 
970 			if (!(sc->sc_sync_ifp->if_flags & IFF_MULTICAST)) {
971 				sc->sc_sync_ifp = NULL;
972 				splx(s);
973 				return (EADDRNOTAVAIL);
974 			}
975 
976 			addr.s_addr = INADDR_PFSYNC_GROUP;
977 
978 			if ((imo->imo_membership[0] =
979 			    in_addmulti(&addr, sc->sc_sync_ifp)) == NULL) {
980 				sc->sc_sync_ifp = NULL;
981 				splx(s);
982 				return (ENOBUFS);
983 			}
984 			imo->imo_num_memberships++;
985 			imo->imo_multicast_ifp = sc->sc_sync_ifp;
986 			imo->imo_multicast_ttl = PFSYNC_DFLTTL;
987 			imo->imo_multicast_loop = 0;
988 		}
989 
990 		if (sc->sc_sync_ifp ||
991 		    sc->sc_sendaddr.s_addr != INADDR_PFSYNC_GROUP) {
992 			/* Request a full state table update. */
993 			sc->sc_ureq_sent = time_uptime;
994 #if NCARP > 0
995 			if (pfsync_sync_ok)
996 				carp_suppress_preempt ++;
997 #endif
998 			pfsync_sync_ok = 0;
999 			if (pf_status.debug >= PF_DEBUG_MISC)
1000 				printf("pfsync: requesting bulk update\n");
1001 			callout_schedule(&sc->sc_bulkfail_tmo, 5 * hz);
1002 			error = pfsync_request_update(NULL, NULL);
1003 			if (error == ENOMEM) {
1004 				splx(s);
1005 				return (ENOMEM);
1006 			}
1007 			pfsync_sendout(sc);
1008 		}
1009 		splx(s);
1010 
1011 		break;
1012 
1013 	default:
1014 		return ifioctl_common(ifp, cmd, data);
1015 	}
1016 
1017 	return (0);
1018 }
1019 
1020 void
1021 pfsync_setmtu(struct pfsync_softc *sc, int mtu_req)
1022 {
1023 	int mtu;
1024 
1025 	if (sc->sc_sync_ifp && sc->sc_sync_ifp->if_mtu < mtu_req)
1026 		mtu = sc->sc_sync_ifp->if_mtu;
1027 	else
1028 		mtu = mtu_req;
1029 
1030 	sc->sc_maxcount = (mtu - sizeof(struct pfsync_header)) /
1031 	    sizeof(struct pfsync_state);
1032 	if (sc->sc_maxcount > 254)
1033 	    sc->sc_maxcount = 254;
1034 	sc->sc_if.if_mtu = sizeof(struct pfsync_header) +
1035 	    sc->sc_maxcount * sizeof(struct pfsync_state);
1036 }
1037 
1038 struct mbuf *
1039 pfsync_get_mbuf(struct pfsync_softc *sc, u_int8_t action, void **sp)
1040 {
1041 	struct pfsync_header *h;
1042 	struct mbuf *m;
1043 	int len;
1044 
1045 	MGETHDR(m, M_DONTWAIT, MT_DATA);
1046 	if (m == NULL) {
1047 		sc->sc_if.if_oerrors++;
1048 		return (NULL);
1049 	}
1050 
1051 	switch (action) {
1052 	case PFSYNC_ACT_CLR:
1053 		len = sizeof(struct pfsync_header) +
1054 		    sizeof(struct pfsync_state_clr);
1055 		break;
1056 	case PFSYNC_ACT_UPD_C:
1057 		len = (sc->sc_maxcount * sizeof(struct pfsync_state_upd)) +
1058 		    sizeof(struct pfsync_header);
1059 		break;
1060 	case PFSYNC_ACT_DEL_C:
1061 		len = (sc->sc_maxcount * sizeof(struct pfsync_state_del)) +
1062 		    sizeof(struct pfsync_header);
1063 		break;
1064 	case PFSYNC_ACT_UREQ:
1065 		len = (sc->sc_maxcount * sizeof(struct pfsync_state_upd_req)) +
1066 		    sizeof(struct pfsync_header);
1067 		break;
1068 	case PFSYNC_ACT_BUS:
1069 		len = sizeof(struct pfsync_header) +
1070 		    sizeof(struct pfsync_state_bus);
1071 		break;
1072 	case PFSYNC_ACT_TDB_UPD:
1073 		len = (sc->sc_maxcount * sizeof(struct pfsync_tdb)) +
1074 		    sizeof(struct pfsync_header);
1075 		break;
1076 	default:
1077 		len = (sc->sc_maxcount * sizeof(struct pfsync_state)) +
1078 		    sizeof(struct pfsync_header);
1079 		break;
1080 	}
1081 
1082 	if (len > MHLEN) {
1083 		MCLGET(m, M_DONTWAIT);
1084 		if ((m->m_flags & M_EXT) == 0) {
1085 			m_free(m);
1086 			sc->sc_if.if_oerrors++;
1087 			return (NULL);
1088 		}
1089 		m->m_data += (MCLBYTES - len) &~ (sizeof(long) - 1);
1090 	} else
1091 		MH_ALIGN(m, len);
1092 
1093 	m->m_pkthdr.rcvif = NULL;
1094 	m->m_pkthdr.len = m->m_len = sizeof(struct pfsync_header);
1095 	h = mtod(m, struct pfsync_header *);
1096 	h->version = PFSYNC_VERSION;
1097 	h->af = 0;
1098 	h->count = 0;
1099 	h->action = action;
1100 	if (action != PFSYNC_ACT_TDB_UPD)
1101 		memcpy(&h->pf_chksum, &pf_status.pf_chksum,
1102 		    PF_MD5_DIGEST_LENGTH);
1103 
1104 	*sp = (void *)((char *)h + PFSYNC_HDRLEN);
1105 	if (action == PFSYNC_ACT_TDB_UPD)
1106 		callout_schedule(&sc->sc_tdb_tmo, hz);
1107 	else
1108 		callout_schedule(&sc->sc_tmo, hz);
1109 	return (m);
1110 }
1111 
1112 int
1113 pfsync_pack_state(u_int8_t action, struct pf_state *st, int flags)
1114 {
1115 	struct ifnet *ifp = NULL;
1116 	struct pfsync_softc *sc = pfsyncif;
1117 	struct pfsync_header *h, *h_net;
1118 	struct pfsync_state *sp = NULL;
1119 	struct pfsync_state_upd *up = NULL;
1120 	struct pfsync_state_del *dp = NULL;
1121 	struct pf_state_key *sk = st->state_key;
1122 	struct pf_rule *r;
1123 	u_long secs;
1124 	int s, ret = 0;
1125 	u_int8_t i = 255, newaction = 0;
1126 
1127 	if (sc == NULL)
1128 		return (0);
1129 	ifp = &sc->sc_if;
1130 
1131 	/*
1132 	 * If a packet falls in the forest and there's nobody around to
1133 	 * hear, does it make a sound?
1134 	 */
1135 	if (ifp->if_bpf == NULL && sc->sc_sync_ifp == NULL &&
1136 	    sc->sc_sync_peer.s_addr == INADDR_PFSYNC_GROUP) {
1137 		/* Don't leave any stale pfsync packets hanging around. */
1138 		if (sc->sc_mbuf != NULL) {
1139 			m_freem(sc->sc_mbuf);
1140 			sc->sc_mbuf = NULL;
1141 			sc->sc_statep.s = NULL;
1142 		}
1143 		return (0);
1144 	}
1145 
1146 	if (action >= PFSYNC_ACT_MAX)
1147 		return (EINVAL);
1148 
1149 	s = splnet();
1150 	if (sc->sc_mbuf == NULL) {
1151 		if ((sc->sc_mbuf = pfsync_get_mbuf(sc, action,
1152 		    (void *)&sc->sc_statep.s)) == NULL) {
1153 			splx(s);
1154 			return (ENOMEM);
1155 		}
1156 		h = mtod(sc->sc_mbuf, struct pfsync_header *);
1157 	} else {
1158 		h = mtod(sc->sc_mbuf, struct pfsync_header *);
1159 		if (h->action != action) {
1160 			pfsync_sendout(sc);
1161 			if ((sc->sc_mbuf = pfsync_get_mbuf(sc, action,
1162 			    (void *)&sc->sc_statep.s)) == NULL) {
1163 				splx(s);
1164 				return (ENOMEM);
1165 			}
1166 			h = mtod(sc->sc_mbuf, struct pfsync_header *);
1167 		} else {
1168 			/*
1169 			 * If it's an update, look in the packet to see if
1170 			 * we already have an update for the state.
1171 			 */
1172 			if (action == PFSYNC_ACT_UPD && sc->sc_maxupdates) {
1173 				struct pfsync_state *usp =
1174 				    (void *)((char *)h + PFSYNC_HDRLEN);
1175 
1176 				for (i = 0; i < h->count; i++) {
1177 					if (!memcmp(usp->id, &st->id,
1178 					    PFSYNC_ID_LEN) &&
1179 					    usp->creatorid == st->creatorid) {
1180 						sp = usp;
1181 						sp->updates++;
1182 						break;
1183 					}
1184 					usp++;
1185 				}
1186 			}
1187 		}
1188 	}
1189 
1190 	secs = time_second;
1191 
1192 	st->pfsync_time = time_uptime;
1193 
1194 	if (sp == NULL) {
1195 		/* not a "duplicate" update */
1196 		i = 255;
1197 		sp = sc->sc_statep.s++;
1198 		sc->sc_mbuf->m_pkthdr.len =
1199 		    sc->sc_mbuf->m_len += sizeof(struct pfsync_state);
1200 		h->count++;
1201 		memset(sp, 0, sizeof(*sp));
1202 
1203 		memcpy(sp->id, &st->id, sizeof(sp->id));
1204 		sp->creatorid = st->creatorid;
1205 
1206 		strlcpy(sp->ifname, st->kif->pfik_name, sizeof(sp->ifname));
1207 		pf_state_host_hton(&sk->lan, &sp->lan);
1208 		pf_state_host_hton(&sk->gwy, &sp->gwy);
1209 		pf_state_host_hton(&sk->ext, &sp->ext);
1210 
1211 		memcpy(&sp->rt_addr, &st->rt_addr, sizeof(sp->rt_addr));
1212 
1213 		sp->creation = htonl(secs - st->creation);
1214 		pf_state_counter_hton(st->packets[0], sp->packets[0]);
1215 		pf_state_counter_hton(st->packets[1], sp->packets[1]);
1216 		pf_state_counter_hton(st->bytes[0], sp->bytes[0]);
1217 		pf_state_counter_hton(st->bytes[1], sp->bytes[1]);
1218 		if ((r = st->rule.ptr) == NULL)
1219 			sp->rule = htonl(-1);
1220 		else
1221 			sp->rule = htonl(r->nr);
1222 		if ((r = st->anchor.ptr) == NULL)
1223 			sp->anchor = htonl(-1);
1224 		else
1225 			sp->anchor = htonl(r->nr);
1226 		sp->af = sk->af;
1227 		sp->proto = sk->proto;
1228 		sp->direction = sk->direction;
1229 		sp->log = st->log;
1230 		sp->allow_opts = st->allow_opts;
1231 		sp->timeout = st->timeout;
1232 
1233 		if (flags & PFSYNC_FLAG_STALE)
1234 			sp->sync_flags |= PFSTATE_STALE;
1235 	}
1236 
1237 	pf_state_peer_hton(&st->src, &sp->src);
1238 	pf_state_peer_hton(&st->dst, &sp->dst);
1239 
1240 	if (st->expire <= secs)
1241 		sp->expire = htonl(0);
1242 	else
1243 		sp->expire = htonl(st->expire - secs);
1244 
1245 	/* do we need to build "compressed" actions for network transfer? */
1246 	if (sc->sc_sync_ifp && flags & PFSYNC_FLAG_COMPRESS) {
1247 		switch (action) {
1248 		case PFSYNC_ACT_UPD:
1249 			newaction = PFSYNC_ACT_UPD_C;
1250 			break;
1251 		case PFSYNC_ACT_DEL:
1252 			newaction = PFSYNC_ACT_DEL_C;
1253 			break;
1254 		default:
1255 			/* by default we just send the uncompressed states */
1256 			break;
1257 		}
1258 	}
1259 
1260 	if (newaction) {
1261 		if (sc->sc_mbuf_net == NULL) {
1262 			if ((sc->sc_mbuf_net = pfsync_get_mbuf(sc, newaction,
1263 			    (void *)&sc->sc_statep_net.s)) == NULL) {
1264 				splx(s);
1265 				return (ENOMEM);
1266 			}
1267 		}
1268 		h_net = mtod(sc->sc_mbuf_net, struct pfsync_header *);
1269 
1270 		switch (newaction) {
1271 		case PFSYNC_ACT_UPD_C:
1272 			if (i != 255) {
1273 				up = (void *)((char *)h_net +
1274 				    PFSYNC_HDRLEN + (i * sizeof(*up)));
1275 				up->updates++;
1276 			} else {
1277 				h_net->count++;
1278 				sc->sc_mbuf_net->m_pkthdr.len =
1279 				    sc->sc_mbuf_net->m_len += sizeof(*up);
1280 				up = sc->sc_statep_net.u++;
1281 
1282 				memset(up, 0, sizeof(*up));
1283 				memcpy(up->id, &st->id, sizeof(up->id));
1284 				up->creatorid = st->creatorid;
1285 			}
1286 			up->timeout = st->timeout;
1287 			up->expire = sp->expire;
1288 			up->src = sp->src;
1289 			up->dst = sp->dst;
1290 			break;
1291 		case PFSYNC_ACT_DEL_C:
1292 			sc->sc_mbuf_net->m_pkthdr.len =
1293 			    sc->sc_mbuf_net->m_len += sizeof(*dp);
1294 			dp = sc->sc_statep_net.d++;
1295 			h_net->count++;
1296 
1297 			memset(dp, 0, sizeof(*dp));
1298 			memcpy(dp->id, &st->id, sizeof(dp->id));
1299 			dp->creatorid = st->creatorid;
1300 			break;
1301 		}
1302 	}
1303 
1304 	if (h->count == sc->sc_maxcount ||
1305 	    (sc->sc_maxupdates && (sp->updates >= sc->sc_maxupdates)))
1306 		ret = pfsync_sendout(sc);
1307 
1308 	splx(s);
1309 	return (ret);
1310 }
1311 
1312 /* This must be called in splnet() */
1313 int
1314 pfsync_request_update(struct pfsync_state_upd *up, struct in_addr *src)
1315 {
1316 	struct pfsync_header *h;
1317 	struct pfsync_softc *sc = pfsyncif;
1318 	struct pfsync_state_upd_req *rup;
1319 	int ret = 0;
1320 
1321 	if (sc == NULL)
1322 		return (0);
1323 
1324 	if (sc->sc_mbuf == NULL) {
1325 		if ((sc->sc_mbuf = pfsync_get_mbuf(sc, PFSYNC_ACT_UREQ,
1326 		    (void *)&sc->sc_statep.s)) == NULL)
1327 			return (ENOMEM);
1328 		h = mtod(sc->sc_mbuf, struct pfsync_header *);
1329 	} else {
1330 		h = mtod(sc->sc_mbuf, struct pfsync_header *);
1331 		if (h->action != PFSYNC_ACT_UREQ) {
1332 			pfsync_sendout(sc);
1333 			if ((sc->sc_mbuf = pfsync_get_mbuf(sc, PFSYNC_ACT_UREQ,
1334 			    (void *)&sc->sc_statep.s)) == NULL)
1335 				return (ENOMEM);
1336 			h = mtod(sc->sc_mbuf, struct pfsync_header *);
1337 		}
1338 	}
1339 
1340 	if (src != NULL)
1341 		sc->sc_sendaddr = *src;
1342 	sc->sc_mbuf->m_pkthdr.len = sc->sc_mbuf->m_len += sizeof(*rup);
1343 	h->count++;
1344 	rup = sc->sc_statep.r++;
1345 	memset(rup, 0, sizeof(*rup));
1346 	if (up != NULL) {
1347 		memcpy(rup->id, up->id, sizeof(rup->id));
1348 		rup->creatorid = up->creatorid;
1349 	}
1350 
1351 	if (h->count == sc->sc_maxcount)
1352 		ret = pfsync_sendout(sc);
1353 
1354 	return (ret);
1355 }
1356 
1357 int
1358 pfsync_clear_states(u_int32_t creatorid, char *ifname)
1359 {
1360 	struct pfsync_softc *sc = pfsyncif;
1361 	struct pfsync_state_clr *cp;
1362 	int s, ret;
1363 
1364 	if (sc == NULL)
1365 		return (0);
1366 
1367 	s = splnet();
1368 	if (sc->sc_mbuf != NULL)
1369 		pfsync_sendout(sc);
1370 	if ((sc->sc_mbuf = pfsync_get_mbuf(sc, PFSYNC_ACT_CLR,
1371 	    (void *)&sc->sc_statep.c)) == NULL) {
1372 		splx(s);
1373 		return (ENOMEM);
1374 	}
1375 	sc->sc_mbuf->m_pkthdr.len = sc->sc_mbuf->m_len += sizeof(*cp);
1376 	cp = sc->sc_statep.c;
1377 	cp->creatorid = creatorid;
1378 	if (ifname != NULL)
1379 		strlcpy(cp->ifname, ifname, IFNAMSIZ);
1380 
1381 	ret = (pfsync_sendout(sc));
1382 	splx(s);
1383 	return (ret);
1384 }
1385 
1386 void
1387 pfsync_timeout(void *v)
1388 {
1389 	struct pfsync_softc *sc = v;
1390 	int s;
1391 
1392 	s = splnet();
1393 	pfsync_sendout(sc);
1394 	splx(s);
1395 }
1396 
1397 void
1398 pfsync_tdb_timeout(void *v)
1399 {
1400 	struct pfsync_softc *sc = v;
1401 	int s;
1402 
1403 	s = splnet();
1404 	pfsync_tdb_sendout(sc);
1405 	splx(s);
1406 }
1407 
1408 /* This must be called in splnet() */
1409 void
1410 pfsync_send_bus(struct pfsync_softc *sc, u_int8_t status)
1411 {
1412 	struct pfsync_state_bus *bus;
1413 
1414 	if (sc->sc_mbuf != NULL)
1415 		pfsync_sendout(sc);
1416 
1417 	if (pfsync_sync_ok &&
1418 	    (sc->sc_mbuf = pfsync_get_mbuf(sc, PFSYNC_ACT_BUS,
1419 	    (void *)&sc->sc_statep.b)) != NULL) {
1420 		sc->sc_mbuf->m_pkthdr.len = sc->sc_mbuf->m_len += sizeof(*bus);
1421 		bus = sc->sc_statep.b;
1422 		bus->creatorid = pf_status.hostid;
1423 		bus->status = status;
1424 		bus->endtime = htonl(time_uptime - sc->sc_ureq_received);
1425 		pfsync_sendout(sc);
1426 	}
1427 }
1428 
1429 void
1430 pfsync_bulk_update(void *v)
1431 {
1432 	struct pfsync_softc *sc = v;
1433 	int s, i = 0;
1434 	struct pf_state *state;
1435 
1436 	s = splnet();
1437 	if (sc->sc_mbuf != NULL)
1438 		pfsync_sendout(sc);
1439 
1440 	/*
1441 	 * Grab at most PFSYNC_BULKPACKETS worth of states which have not
1442 	 * been sent since the latest request was made.
1443 	 */
1444 	state = sc->sc_bulk_send_next;
1445 	if (state)
1446 		do {
1447 			/* send state update if syncable and not already sent */
1448 			if (!state->sync_flags
1449 			    && state->timeout < PFTM_MAX
1450 			    && state->pfsync_time <= sc->sc_ureq_received) {
1451 				pfsync_pack_state(PFSYNC_ACT_UPD, state, 0);
1452 				i++;
1453 			}
1454 
1455 			/* figure next state to send */
1456 			state = TAILQ_NEXT(state, entry_list);
1457 
1458 			/* wrap to start of list if we hit the end */
1459 			if (!state)
1460 				state = TAILQ_FIRST(&state_list);
1461 		} while (i < sc->sc_maxcount * PFSYNC_BULKPACKETS &&
1462 		    state != sc->sc_bulk_terminator);
1463 
1464 	if (!state || state == sc->sc_bulk_terminator) {
1465 		/* we're done */
1466 		pfsync_send_bus(sc, PFSYNC_BUS_END);
1467 		sc->sc_ureq_received = 0;
1468 		sc->sc_bulk_send_next = NULL;
1469 		sc->sc_bulk_terminator = NULL;
1470 		callout_stop(&sc->sc_bulk_tmo);
1471 		if (pf_status.debug >= PF_DEBUG_MISC)
1472 			printf("pfsync: bulk update complete\n");
1473 	} else {
1474 		/* look again for more in a bit */
1475 		callout_schedule(&sc->sc_bulk_tmo, 1);
1476 		sc->sc_bulk_send_next = state;
1477 	}
1478 	if (sc->sc_mbuf != NULL)
1479 		pfsync_sendout(sc);
1480 	splx(s);
1481 }
1482 
1483 void
1484 pfsync_bulkfail(void *v)
1485 {
1486 	struct pfsync_softc *sc = v;
1487 	int s, error;
1488 
1489 	if (sc->sc_bulk_tries++ < PFSYNC_MAX_BULKTRIES) {
1490 		/* Try again in a bit */
1491 		callout_schedule(&sc->sc_bulkfail_tmo, 5 * hz);
1492 		s = splnet();
1493 		error = pfsync_request_update(NULL, NULL);
1494 		if (error == ENOMEM) {
1495 			if (pf_status.debug >= PF_DEBUG_MISC)
1496 				printf("pfsync: cannot allocate mbufs for "
1497 				    "bulk update\n");
1498 		} else
1499 			pfsync_sendout(sc);
1500 		splx(s);
1501 	} else {
1502 		/* Pretend like the transfer was ok */
1503 		sc->sc_ureq_sent = 0;
1504 		sc->sc_bulk_tries = 0;
1505 #if NCARP > 0
1506 		if (!pfsync_sync_ok)
1507 			carp_suppress_preempt --;
1508 #endif
1509 		pfsync_sync_ok = 1;
1510 		if (pf_status.debug >= PF_DEBUG_MISC)
1511 			printf("pfsync: failed to receive "
1512 			    "bulk update status\n");
1513 		callout_stop(&sc->sc_bulkfail_tmo);
1514 	}
1515 }
1516 
1517 /* This must be called in splnet() */
1518 int
1519 pfsync_sendout(struct pfsync_softc *sc)
1520 {
1521 	struct ifnet *ifp = &sc->sc_if;
1522 	struct mbuf *m;
1523 
1524 	callout_stop(&sc->sc_tmo);
1525 
1526 	if (sc->sc_mbuf == NULL)
1527 		return (0);
1528 	m = sc->sc_mbuf;
1529 	sc->sc_mbuf = NULL;
1530 	sc->sc_statep.s = NULL;
1531 
1532 	bpf_mtap(ifp, m);
1533 
1534 	if (sc->sc_mbuf_net) {
1535 		m_freem(m);
1536 		m = sc->sc_mbuf_net;
1537 		sc->sc_mbuf_net = NULL;
1538 		sc->sc_statep_net.s = NULL;
1539 	}
1540 
1541 	return pfsync_sendout_mbuf(sc, m);
1542 }
1543 
1544 int
1545 pfsync_tdb_sendout(struct pfsync_softc *sc)
1546 {
1547 	struct ifnet *ifp = &sc->sc_if;
1548 	struct mbuf *m;
1549 
1550 	callout_stop(&sc->sc_tdb_tmo);
1551 
1552 	if (sc->sc_mbuf_tdb == NULL)
1553 		return (0);
1554 	m = sc->sc_mbuf_tdb;
1555 	sc->sc_mbuf_tdb = NULL;
1556 	sc->sc_statep_tdb.t = NULL;
1557 
1558 	bpf_mtap(ifp, m);
1559 
1560 	return pfsync_sendout_mbuf(sc, m);
1561 }
1562 
1563 int
1564 pfsync_sendout_mbuf(struct pfsync_softc *sc, struct mbuf *m)
1565 {
1566 	struct sockaddr sa;
1567 	struct ip *ip;
1568 
1569 	if (sc->sc_sync_ifp ||
1570 	    sc->sc_sync_peer.s_addr != INADDR_PFSYNC_GROUP) {
1571 		M_PREPEND(m, sizeof(struct ip), M_DONTWAIT);
1572 		if (m == NULL) {
1573 			PFSYNC_STATINC(PFSYNC_STAT_ONOMEM);
1574 			return (0);
1575 		}
1576 		ip = mtod(m, struct ip *);
1577 		ip->ip_v = IPVERSION;
1578 		ip->ip_hl = sizeof(*ip) >> 2;
1579 		ip->ip_tos = IPTOS_LOWDELAY;
1580 		ip->ip_len = htons(m->m_pkthdr.len);
1581 		ip->ip_id = htons(ip_randomid(ip_ids, 0));
1582 		ip->ip_off = htons(IP_DF);
1583 		ip->ip_ttl = PFSYNC_DFLTTL;
1584 		ip->ip_p = IPPROTO_PFSYNC;
1585 		ip->ip_sum = 0;
1586 
1587 		memset(&sa, 0, sizeof(sa));
1588 		ip->ip_src.s_addr = INADDR_ANY;
1589 
1590 		if (sc->sc_sendaddr.s_addr == INADDR_PFSYNC_GROUP)
1591 			m->m_flags |= M_MCAST;
1592 		ip->ip_dst = sc->sc_sendaddr;
1593 		sc->sc_sendaddr.s_addr = sc->sc_sync_peer.s_addr;
1594 
1595 		PFSYNC_STATINC(PFSYNC_STAT_OPACKETS);
1596 
1597 		if (ip_output(m, NULL, NULL, IP_RAWOUTPUT, &sc->sc_imo, NULL)) {
1598 			PFSYNC_STATINC(PFSYNC_STAT_OERRORS);
1599 		}
1600 	} else
1601 		m_freem(m);
1602 
1603 	return (0);
1604 }
1605 
1606 static int
1607 sysctl_net_inet_pfsync_stats(SYSCTLFN_ARGS)
1608 {
1609 
1610 	return (NETSTAT_SYSCTL(pfsyncstat_percpu, PFSYNC_NSTATS));
1611 }
1612 
1613 SYSCTL_SETUP(sysctl_net_inet_pfsync_setup, "sysctl net.inet.pfsync subtree setup")
1614 {
1615 
1616 	sysctl_createv(clog, 0, NULL, NULL,
1617 		       CTLFLAG_PERMANENT,
1618 		       CTLTYPE_NODE, "net", NULL,
1619 		       NULL, 0, NULL, 0,
1620 		       CTL_NET, CTL_EOL);
1621 	sysctl_createv(clog, 0, NULL, NULL,
1622 		       CTLFLAG_PERMANENT,
1623 		       CTLTYPE_NODE, "inet", NULL,
1624 		       NULL, 0, NULL, 0,
1625 		       CTL_NET, PF_INET, CTL_EOL);
1626 	sysctl_createv(clog, 0, NULL, NULL,
1627 		       CTLFLAG_PERMANENT,
1628 		       CTLTYPE_NODE, "pfsync",
1629 		       SYSCTL_DESCR("pfsync related settings"),
1630 		       NULL, 0, NULL, 0,
1631 		       CTL_NET, PF_INET, IPPROTO_PFSYNC, CTL_EOL);
1632 	sysctl_createv(clog, 0, NULL, NULL,
1633 		       CTLFLAG_PERMANENT|CTLFLAG_READONLY,
1634 		       CTLTYPE_STRUCT, "stats",
1635 			   SYSCTL_DESCR("pfsync statistics"),
1636 		       sysctl_net_inet_pfsync_stats, 0, NULL, 0,
1637 		       CTL_NET, PF_INET, IPPROTO_PFSYNC,
1638 	       CTL_CREATE, CTL_EOL);
1639 }
1640