1 /* 2 * Copyright (c) 2002 Michael Shalayeff 3 * All rights reserved. 4 * 5 * Redistribution and use in source and binary forms, with or without 6 * modification, are permitted provided that the following conditions 7 * are met: 8 * 1. Redistributions of source code must retain the above copyright 9 * notice, this list of conditions and the following disclaimer. 10 * 2. Redistributions in binary form must reproduce the above copyright 11 * notice, this list of conditions and the following disclaimer in the 12 * documentation and/or other materials provided with the distribution. 13 * 14 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR 15 * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES 16 * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. 17 * IN NO EVENT SHALL THE AUTHOR OR HIS RELATIVES BE LIABLE FOR ANY DIRECT, 18 * INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES 19 * (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR 20 * SERVICES; LOSS OF MIND, USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) 21 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, 22 * STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING 23 * IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF 24 * THE POSSIBILITY OF SUCH DAMAGE. 25 * 26 * $OpenBSD: if_pfsync.c,v 1.98 2008/06/29 08:42:15 mcbride Exp $ 27 */ 28 29 #include "opt_inet.h" 30 #include "opt_inet6.h" 31 #include "opt_carp.h" 32 33 #include <sys/param.h> 34 #include <sys/endian.h> 35 #include <sys/proc.h> 36 #include <sys/priv.h> 37 #include <sys/systm.h> 38 #include <sys/time.h> 39 #include <sys/mbuf.h> 40 #include <sys/socket.h> 41 #include <sys/kernel.h> 42 #include <sys/malloc.h> 43 #include <sys/module.h> 44 #include <sys/msgport2.h> 45 #include <sys/sockio.h> 46 #include <sys/thread2.h> 47 48 #include <machine/inttypes.h> 49 50 #include <net/if.h> 51 #include <net/if_types.h> 52 #include <net/ifq_var.h> 53 #include <net/route.h> 54 #include <net/bpf.h> 55 #include <net/netisr2.h> 56 #include <net/netmsg2.h> 57 #include <netinet/in.h> 58 #include <netinet/if_ether.h> 59 #include <netinet/ip_carp.h> 60 #include <netinet/tcp.h> 61 #include <netinet/tcp_seq.h> 62 63 #ifdef INET 64 #include <netinet/in_systm.h> 65 #include <netinet/in_var.h> 66 #include <netinet/ip.h> 67 #include <netinet/ip_var.h> 68 #endif 69 70 #ifdef INET6 71 #include <netinet6/nd6.h> 72 #endif /* INET6 */ 73 74 #include <net/pf/pfvar.h> 75 #include <net/pf/if_pfsync.h> 76 77 #define PFSYNCNAME "pfsync" 78 79 #define PFSYNC_MINMTU \ 80 (sizeof(struct pfsync_header) + sizeof(struct pf_state)) 81 82 #ifdef PFSYNCDEBUG 83 #define DPRINTF(x) do { if (pfsyncdebug) kprintf x ; } while (0) 84 int pfsyncdebug; 85 #else 86 #define DPRINTF(x) 87 #endif 88 89 struct pfsync_softc *pfsyncif = NULL; 90 struct pfsyncstats pfsyncstats; 91 92 void pfsyncattach(int); 93 static int pfsync_clone_destroy(struct ifnet *); 94 static int pfsync_clone_create(struct if_clone *, int, caddr_t); 95 void pfsync_setmtu(struct pfsync_softc *, int); 96 int pfsync_alloc_scrub_memory(struct pfsync_state_peer *, 97 struct pf_state_peer *); 98 int pfsyncoutput(struct ifnet *, struct mbuf *, struct sockaddr *, 99 struct rtentry *); 100 int pfsyncioctl(struct ifnet *, u_long, caddr_t, struct ucred *); 101 void pfsyncstart(struct ifnet *, struct ifaltq_subque *); 102 103 struct mbuf *pfsync_get_mbuf(struct pfsync_softc *, u_int8_t, void **); 104 int pfsync_request_update(struct pfsync_state_upd *, struct in_addr *); 105 int pfsync_sendout(struct pfsync_softc *); 106 int pfsync_sendout_mbuf(struct pfsync_softc *, struct mbuf *); 107 void pfsync_timeout(void *); 108 void pfsync_send_bus(struct pfsync_softc *, u_int8_t); 109 void pfsync_bulk_update(void *); 110 void pfsync_bulkfail(void *); 111 112 static struct in_multi *pfsync_in_addmulti(struct ifnet *); 113 static void pfsync_in_delmulti(struct in_multi *); 114 115 static MALLOC_DEFINE(M_PFSYNC, PFSYNCNAME, "Packet Filter State Sync. Interface"); 116 static LIST_HEAD(pfsync_list, pfsync_softc) pfsync_list; 117 118 int pfsync_sync_ok; 119 120 struct if_clone pfsync_cloner = 121 IF_CLONE_INITIALIZER("pfsync", pfsync_clone_create, pfsync_clone_destroy, 1 ,1); 122 123 void 124 pfsyncattach(int npfsync) 125 { 126 if_clone_attach(&pfsync_cloner); 127 } 128 static int 129 pfsync_clone_create(struct if_clone *ifc, int unit, caddr_t param __unused) 130 { 131 struct pfsync_softc *sc; 132 struct ifnet *ifp; 133 134 lwkt_gettoken(&pf_token); 135 136 sc = kmalloc(sizeof(*sc), M_PFSYNC, M_WAITOK | M_ZERO); 137 pfsync_sync_ok = 1; 138 sc->sc_mbuf = NULL; 139 sc->sc_mbuf_net = NULL; 140 sc->sc_mbuf_tdb = NULL; 141 sc->sc_statep.s = NULL; 142 sc->sc_statep_net.s = NULL; 143 sc->sc_statep_tdb.t = NULL; 144 sc->sc_maxupdates = 128; 145 sc->sc_sync_peer.s_addr =htonl(INADDR_PFSYNC_GROUP); 146 sc->sc_sendaddr.s_addr = htonl(INADDR_PFSYNC_GROUP); 147 sc->sc_ureq_received = 0; 148 sc->sc_ureq_sent = 0; 149 sc->sc_bulk_send_next = NULL; 150 sc->sc_bulk_terminator = NULL; 151 sc->sc_bulk_send_cpu = 0; 152 sc->sc_bulk_terminator_cpu = 0; 153 sc->sc_imo.imo_max_memberships = IP_MAX_MEMBERSHIPS; 154 lwkt_reltoken(&pf_token); 155 ifp = &sc->sc_if; 156 ksnprintf(ifp->if_xname, sizeof ifp->if_xname, "pfsync%d", unit); 157 if_initname(ifp, ifc->ifc_name, unit); 158 ifp->if_ioctl = pfsyncioctl; 159 ifp->if_output = pfsyncoutput; 160 ifp->if_start = pfsyncstart; 161 ifp->if_type = IFT_PFSYNC; 162 ifq_set_maxlen(&ifp->if_snd, ifqmaxlen); 163 ifp->if_hdrlen = PFSYNC_HDRLEN; 164 ifp->if_baudrate = IF_Mbps(100); 165 ifp->if_softc = sc; 166 pfsync_setmtu(sc, MCLBYTES); 167 callout_init(&sc->sc_tmo); 168 /* callout_init(&sc->sc_tdb_tmo); XXX we don't support tdb (yet) */ 169 callout_init(&sc->sc_bulk_tmo); 170 callout_init(&sc->sc_bulkfail_tmo); 171 if_attach(ifp, NULL); 172 173 LIST_INSERT_HEAD(&pfsync_list, sc, sc_next); 174 175 176 #if NCARP > 0 177 if_addgroup(ifp, "carp"); 178 #endif 179 180 #if NBPFILTER > 0 181 bpfattach(&sc->sc_if, DLT_PFSYNC, PFSYNC_HDRLEN); 182 #endif 183 lwkt_gettoken(&pf_token); 184 185 lwkt_reltoken(&pf_token); 186 return (0); 187 } 188 189 static int 190 pfsync_clone_destroy(struct ifnet *ifp) 191 { 192 lwkt_gettoken(&pf_token); 193 lwkt_reltoken(&pf_token); 194 195 struct pfsync_softc *sc = ifp->if_softc; 196 callout_stop(&sc->sc_tmo); 197 /* callout_stop(&sc->sc_tdb_tmo); XXX we don't support tdb (yet) */ 198 callout_stop(&sc->sc_bulk_tmo); 199 callout_stop(&sc->sc_bulkfail_tmo); 200 #if NCARP > 0 201 if (!pfsync_sync_ok) 202 carp_group_demote_adj(&sc->sc_if, -1); 203 #endif 204 #if NBPFILTER > 0 205 bpfdetach(ifp); 206 #endif 207 if_detach(ifp); 208 lwkt_gettoken(&pf_token); 209 LIST_REMOVE(sc, sc_next); 210 kfree(sc, M_PFSYNC); 211 lwkt_reltoken(&pf_token); 212 213 214 return 0; 215 } 216 217 /* 218 * Start output on the pfsync interface. 219 */ 220 void 221 pfsyncstart(struct ifnet *ifp, struct ifaltq_subque *ifsq) 222 { 223 ASSERT_ALTQ_SQ_DEFAULT(ifp, ifsq); 224 ifsq_purge(ifsq); 225 } 226 227 int 228 pfsync_alloc_scrub_memory(struct pfsync_state_peer *s, 229 struct pf_state_peer *d) 230 { 231 if (s->scrub.scrub_flag && d->scrub == NULL) { 232 d->scrub = kmalloc(sizeof(struct pf_state_scrub), M_PFSYNC, M_NOWAIT|M_ZERO); 233 234 if (d->scrub == NULL) 235 return (ENOMEM); 236 } 237 238 return (0); 239 } 240 241 void 242 pfsync_state_export(struct pfsync_state *sp, struct pf_state *st) 243 { 244 bzero(sp, sizeof(struct pfsync_state)); 245 246 /* copy from state key */ 247 sp->key[PF_SK_WIRE].addr[0] = st->key[PF_SK_WIRE]->addr[0]; 248 sp->key[PF_SK_WIRE].addr[1] = st->key[PF_SK_WIRE]->addr[1]; 249 sp->key[PF_SK_WIRE].port[0] = st->key[PF_SK_WIRE]->port[0]; 250 sp->key[PF_SK_WIRE].port[1] = st->key[PF_SK_WIRE]->port[1]; 251 sp->key[PF_SK_STACK].addr[0] = st->key[PF_SK_STACK]->addr[0]; 252 sp->key[PF_SK_STACK].addr[1] = st->key[PF_SK_STACK]->addr[1]; 253 sp->key[PF_SK_STACK].port[0] = st->key[PF_SK_STACK]->port[0]; 254 sp->key[PF_SK_STACK].port[1] = st->key[PF_SK_STACK]->port[1]; 255 sp->proto = st->key[PF_SK_WIRE]->proto; 256 sp->af = st->key[PF_SK_WIRE]->af; 257 258 /* copy from state */ 259 strlcpy(sp->ifname, st->kif->pfik_name, sizeof(sp->ifname)); 260 bcopy(&st->rt_addr, &sp->rt_addr, sizeof(sp->rt_addr)); 261 sp->creation = htonl(time_second - st->creation); 262 sp->expire = pf_state_expires(st); 263 if (sp->expire <= time_second) 264 sp->expire = htonl(0); 265 else 266 sp->expire = htonl(sp->expire - time_second); 267 268 sp->direction = st->direction; 269 sp->log = st->log; 270 sp->timeout = st->timeout; 271 sp->state_flags = st->state_flags; 272 if (st->src_node) 273 sp->sync_flags |= PFSYNC_FLAG_SRCNODE; 274 if (st->nat_src_node) 275 sp->sync_flags |= PFSYNC_FLAG_NATSRCNODE; 276 277 bcopy(&st->id, &sp->id, sizeof(sp->id)); 278 sp->creatorid = st->creatorid; 279 pf_state_peer_hton(&st->src, &sp->src); 280 pf_state_peer_hton(&st->dst, &sp->dst); 281 282 if (st->rule.ptr == NULL) 283 sp->rule = htonl(-1); 284 else 285 sp->rule = htonl(st->rule.ptr->nr); 286 if (st->anchor.ptr == NULL) 287 sp->anchor = htonl(-1); 288 else 289 sp->anchor = htonl(st->anchor.ptr->nr); 290 if (st->nat_rule.ptr == NULL) 291 sp->nat_rule = htonl(-1); 292 else 293 sp->nat_rule = htonl(st->nat_rule.ptr->nr); 294 295 pf_state_counter_hton(st->packets[0], sp->packets[0]); 296 pf_state_counter_hton(st->packets[1], sp->packets[1]); 297 pf_state_counter_hton(st->bytes[0], sp->bytes[0]); 298 pf_state_counter_hton(st->bytes[1], sp->bytes[1]); 299 300 } 301 302 int 303 pfsync_state_import(struct pfsync_state *sp, u_int8_t flags) 304 { 305 struct pf_state *st = NULL; 306 struct pf_state_key *skw = NULL, *sks = NULL; 307 struct pf_rule *r = NULL; 308 struct pfi_kif *kif; 309 int pool_flags; 310 int error; 311 312 if (sp->creatorid == 0 && pf_status.debug >= PF_DEBUG_MISC) { 313 kprintf("pfsync_insert_net_state: invalid creator id:" 314 " %08x\n", ntohl(sp->creatorid)); 315 return (EINVAL); 316 } 317 318 if ((kif = pfi_kif_get(sp->ifname)) == NULL) { 319 if (pf_status.debug >= PF_DEBUG_MISC) 320 kprintf("pfsync_insert_net_state: " 321 "unknown interface: %s\n", sp->ifname); 322 if (flags & PFSYNC_SI_IOCTL) 323 return (EINVAL); 324 return (0); /* skip this state */ 325 } 326 327 /* 328 * If the ruleset checksums match or the state is coming from the ioctl, 329 * it's safe to associate the state with the rule of that number. 330 */ 331 if (sp->rule != htonl(-1) && sp->anchor == htonl(-1) && 332 (flags & (PFSYNC_SI_IOCTL | PFSYNC_SI_CKSUM)) && ntohl(sp->rule) < 333 pf_main_ruleset.rules[PF_RULESET_FILTER].active.rcount) 334 r = pf_main_ruleset.rules[ 335 PF_RULESET_FILTER].active.ptr_array[ntohl(sp->rule)]; 336 else 337 r = &pf_default_rule; 338 339 if ((r->max_states && r->states_cur >= r->max_states)) 340 goto cleanup; 341 342 if (flags & PFSYNC_SI_IOCTL) 343 pool_flags = M_WAITOK | M_NULLOK | M_ZERO; 344 else 345 pool_flags = M_WAITOK | M_ZERO; 346 347 if ((st = kmalloc(sizeof(struct pf_state), M_PFSYNC, pool_flags)) == NULL) 348 goto cleanup; 349 350 if ((skw = pf_alloc_state_key(pool_flags)) == NULL) 351 goto cleanup; 352 353 if (PF_ANEQ(&sp->key[PF_SK_WIRE].addr[0], 354 &sp->key[PF_SK_STACK].addr[0], sp->af) || 355 PF_ANEQ(&sp->key[PF_SK_WIRE].addr[1], 356 &sp->key[PF_SK_STACK].addr[1], sp->af) || 357 sp->key[PF_SK_WIRE].port[0] != sp->key[PF_SK_STACK].port[0] || 358 sp->key[PF_SK_WIRE].port[1] != sp->key[PF_SK_STACK].port[1]) { 359 if ((sks = pf_alloc_state_key(pool_flags)) == NULL) 360 goto cleanup; 361 } else 362 sks = skw; 363 364 /* allocate memory for scrub info */ 365 if (pfsync_alloc_scrub_memory(&sp->src, &st->src) || 366 pfsync_alloc_scrub_memory(&sp->dst, &st->dst)) 367 goto cleanup; 368 369 /* copy to state key(s) */ 370 skw->addr[0] = sp->key[PF_SK_WIRE].addr[0]; 371 skw->addr[1] = sp->key[PF_SK_WIRE].addr[1]; 372 skw->port[0] = sp->key[PF_SK_WIRE].port[0]; 373 skw->port[1] = sp->key[PF_SK_WIRE].port[1]; 374 skw->proto = sp->proto; 375 skw->af = sp->af; 376 if (sks != skw) { 377 sks->addr[0] = sp->key[PF_SK_STACK].addr[0]; 378 sks->addr[1] = sp->key[PF_SK_STACK].addr[1]; 379 sks->port[0] = sp->key[PF_SK_STACK].port[0]; 380 sks->port[1] = sp->key[PF_SK_STACK].port[1]; 381 sks->proto = sp->proto; 382 sks->af = sp->af; 383 } 384 385 /* copy to state */ 386 bcopy(&sp->rt_addr, &st->rt_addr, sizeof(st->rt_addr)); 387 st->creation = time_second - ntohl(sp->creation); 388 st->expire = time_second; 389 if (sp->expire) { 390 /* XXX No adaptive scaling. */ 391 st->expire -= r->timeout[sp->timeout] - ntohl(sp->expire); 392 } 393 394 st->expire = ntohl(sp->expire) + time_second; 395 st->direction = sp->direction; 396 st->log = sp->log; 397 st->timeout = sp->timeout; 398 st->state_flags = sp->state_flags; 399 if (!(flags & PFSYNC_SI_IOCTL)) 400 st->sync_flags = PFSTATE_FROMSYNC; 401 402 bcopy(sp->id, &st->id, sizeof(st->id)); 403 st->creatorid = sp->creatorid; 404 pf_state_peer_ntoh(&sp->src, &st->src); 405 pf_state_peer_ntoh(&sp->dst, &st->dst); 406 407 st->rule.ptr = r; 408 st->nat_rule.ptr = NULL; 409 st->anchor.ptr = NULL; 410 st->rt_kif = NULL; 411 412 st->pfsync_time = 0; 413 414 415 /* XXX when we have nat_rule/anchors, use STATE_INC_COUNTERS */ 416 r->states_cur++; 417 r->states_tot++; 418 419 if ((error = pf_state_insert(kif, skw, sks, st)) != 0) { 420 /* XXX when we have nat_rule/anchors, use STATE_DEC_COUNTERS */ 421 r->states_cur--; 422 goto cleanup_state; 423 } 424 425 return (0); 426 427 cleanup: 428 error = ENOMEM; 429 if (skw == sks) 430 sks = NULL; 431 if (skw != NULL) 432 kfree(skw, M_PFSYNC); 433 if (sks != NULL) 434 kfree(sks, M_PFSYNC); 435 436 cleanup_state: /* pf_state_insert frees the state keys */ 437 if (st) { 438 if (st->dst.scrub) 439 kfree(st->dst.scrub, M_PFSYNC); 440 if (st->src.scrub) 441 kfree(st->src.scrub, M_PFSYNC); 442 kfree(st, M_PFSYNC); 443 } 444 return (error); 445 } 446 447 void 448 pfsync_input(struct mbuf *m, ...) 449 { 450 struct ip *ip = mtod(m, struct ip *); 451 struct pfsync_header *ph; 452 struct pfsync_softc *sc = pfsyncif; 453 struct pf_state *st; 454 struct pf_state_key *sk; 455 struct pf_state_item *si; 456 struct pf_state_cmp id_key; 457 struct pfsync_state *sp; 458 struct pfsync_state_upd *up; 459 struct pfsync_state_del *dp; 460 struct pfsync_state_clr *cp; 461 struct pfsync_state_upd_req *rup; 462 struct pfsync_state_bus *bus; 463 #ifdef IPSEC 464 struct pfsync_tdb *pt; 465 #endif 466 struct in_addr src; 467 struct mbuf *mp; 468 int iplen, action, error, i, count, offp, sfail, stale = 0; 469 u_int8_t flags = 0; 470 471 /* This function is not yet called from anywhere */ 472 /* Still we assume for safety that pf_token must be held */ 473 ASSERT_LWKT_TOKEN_HELD(&pf_token); 474 475 pfsyncstats.pfsyncs_ipackets++; 476 477 /* verify that we have a sync interface configured */ 478 if (!sc || !sc->sc_sync_ifp || !pf_status.running) 479 goto done; 480 481 /* verify that the packet came in on the right interface */ 482 if (sc->sc_sync_ifp != m->m_pkthdr.rcvif) { 483 pfsyncstats.pfsyncs_badif++; 484 goto done; 485 } 486 487 /* verify that the IP TTL is 255. */ 488 if (ip->ip_ttl != PFSYNC_DFLTTL) { 489 pfsyncstats.pfsyncs_badttl++; 490 goto done; 491 } 492 493 iplen = ip->ip_hl << 2; 494 495 if (m->m_pkthdr.len < iplen + sizeof(*ph)) { 496 pfsyncstats.pfsyncs_hdrops++; 497 goto done; 498 } 499 500 if (iplen + sizeof(*ph) > m->m_len) { 501 if ((m = m_pullup(m, iplen + sizeof(*ph))) == NULL) { 502 pfsyncstats.pfsyncs_hdrops++; 503 goto done; 504 } 505 ip = mtod(m, struct ip *); 506 } 507 ph = (struct pfsync_header *)((char *)ip + iplen); 508 509 /* verify the version */ 510 if (ph->version != PFSYNC_VERSION) { 511 pfsyncstats.pfsyncs_badver++; 512 goto done; 513 } 514 515 action = ph->action; 516 count = ph->count; 517 518 /* make sure it's a valid action code */ 519 if (action >= PFSYNC_ACT_MAX) { 520 pfsyncstats.pfsyncs_badact++; 521 goto done; 522 } 523 524 /* Cheaper to grab this now than having to mess with mbufs later */ 525 src = ip->ip_src; 526 527 if (!bcmp(&ph->pf_chksum, &pf_status.pf_chksum, PF_MD5_DIGEST_LENGTH)) 528 flags |= PFSYNC_SI_CKSUM; 529 530 switch (action) { 531 case PFSYNC_ACT_CLR: { 532 struct pf_state *nexts; 533 struct pf_state_key *nextsk; 534 struct pfi_kif *kif; 535 globaldata_t save_gd = mycpu; 536 int nn; 537 538 u_int32_t creatorid; 539 if ((mp = m_pulldown(m, iplen + sizeof(*ph), 540 sizeof(*cp), &offp)) == NULL) { 541 pfsyncstats.pfsyncs_badlen++; 542 return; 543 } 544 cp = (struct pfsync_state_clr *)(mp->m_data + offp); 545 creatorid = cp->creatorid; 546 547 crit_enter(); 548 if (cp->ifname[0] == '\0') { 549 lwkt_gettoken(&pf_token); 550 for (nn = 0; nn < ncpus; ++nn) { 551 lwkt_setcpu_self(globaldata_find(nn)); 552 for (st = RB_MIN(pf_state_tree_id, 553 &tree_id[nn]); 554 st; st = nexts) { 555 nexts = RB_NEXT(pf_state_tree_id, 556 &tree_id[n], st); 557 if (st->creatorid == creatorid) { 558 st->sync_flags |= 559 PFSTATE_FROMSYNC; 560 pf_unlink_state(st); 561 } 562 } 563 } 564 lwkt_setcpu_self(save_gd); 565 lwkt_reltoken(&pf_token); 566 } else { 567 if ((kif = pfi_kif_get(cp->ifname)) == NULL) { 568 crit_exit(); 569 return; 570 } 571 /* XXX correct? */ 572 lwkt_gettoken(&pf_token); 573 for (nn = 0; nn < ncpus; ++nn) { 574 lwkt_setcpu_self(globaldata_find(nn)); 575 for (sk = RB_MIN(pf_state_tree, 576 &pf_statetbl[nn]); 577 sk; 578 sk = nextsk) { 579 nextsk = RB_NEXT(pf_state_tree, 580 &pf_statetbl[n], sk); 581 TAILQ_FOREACH(si, &sk->states, entry) { 582 if (si->s->creatorid == 583 creatorid) { 584 si->s->sync_flags |= 585 PFSTATE_FROMSYNC; 586 pf_unlink_state(si->s); 587 } 588 } 589 } 590 } 591 lwkt_setcpu_self(save_gd); 592 lwkt_reltoken(&pf_token); 593 } 594 crit_exit(); 595 596 break; 597 } 598 case PFSYNC_ACT_INS: 599 if ((mp = m_pulldown(m, iplen + sizeof(*ph), 600 count * sizeof(*sp), &offp)) == NULL) { 601 pfsyncstats.pfsyncs_badlen++; 602 return; 603 } 604 605 crit_enter(); 606 for (i = 0, sp = (struct pfsync_state *)(mp->m_data + offp); 607 i < count; i++, sp++) { 608 /* check for invalid values */ 609 if (sp->timeout >= PFTM_MAX || 610 sp->src.state > PF_TCPS_PROXY_DST || 611 sp->dst.state > PF_TCPS_PROXY_DST || 612 sp->direction > PF_OUT || 613 (sp->af != AF_INET && sp->af != AF_INET6)) { 614 if (pf_status.debug >= PF_DEBUG_MISC) 615 kprintf("pfsync_insert: PFSYNC_ACT_INS: " 616 "invalid value\n"); 617 pfsyncstats.pfsyncs_badval++; 618 continue; 619 } 620 621 if ((error = pfsync_state_import(sp, flags))) { 622 if (error == ENOMEM) { 623 crit_exit(); 624 goto done; 625 } 626 } 627 } 628 crit_exit(); 629 break; 630 case PFSYNC_ACT_UPD: 631 if ((mp = m_pulldown(m, iplen + sizeof(*ph), 632 count * sizeof(*sp), &offp)) == NULL) { 633 pfsyncstats.pfsyncs_badlen++; 634 return; 635 } 636 637 crit_enter(); 638 for (i = 0, sp = (struct pfsync_state *)(mp->m_data + offp); 639 i < count; i++, sp++) { 640 int flags = PFSYNC_FLAG_STALE; 641 642 /* check for invalid values */ 643 if (sp->timeout >= PFTM_MAX || 644 sp->src.state > PF_TCPS_PROXY_DST || 645 sp->dst.state > PF_TCPS_PROXY_DST) { 646 if (pf_status.debug >= PF_DEBUG_MISC) 647 kprintf("pfsync_insert: PFSYNC_ACT_UPD: " 648 "invalid value\n"); 649 pfsyncstats.pfsyncs_badval++; 650 continue; 651 } 652 653 bcopy(sp->id, &id_key.id, sizeof(id_key.id)); 654 id_key.creatorid = sp->creatorid; 655 656 st = pf_find_state_byid(&id_key); 657 if (st == NULL) { 658 /* insert the update */ 659 if (pfsync_state_import(sp, flags)) 660 pfsyncstats.pfsyncs_badstate++; 661 continue; 662 } 663 sk = st->key[PF_SK_WIRE]; /* XXX right one? */ 664 sfail = 0; 665 if (sk->proto == IPPROTO_TCP) { 666 /* 667 * The state should never go backwards except 668 * for syn-proxy states. Neither should the 669 * sequence window slide backwards. 670 */ 671 if (st->src.state > sp->src.state && 672 (st->src.state < PF_TCPS_PROXY_SRC || 673 sp->src.state >= PF_TCPS_PROXY_SRC)) 674 sfail = 1; 675 else if (SEQ_GT(st->src.seqlo, 676 ntohl(sp->src.seqlo))) 677 sfail = 3; 678 else if (st->dst.state > sp->dst.state) { 679 /* There might still be useful 680 * information about the src state here, 681 * so import that part of the update, 682 * then "fail" so we send the updated 683 * state back to the peer who is missing 684 * our what we know. */ 685 pf_state_peer_ntoh(&sp->src, &st->src); 686 /* XXX do anything with timeouts? */ 687 sfail = 7; 688 flags = 0; 689 } else if (st->dst.state >= TCPS_SYN_SENT && 690 SEQ_GT(st->dst.seqlo, ntohl(sp->dst.seqlo))) 691 sfail = 4; 692 } else { 693 /* 694 * Non-TCP protocol state machine always go 695 * forwards 696 */ 697 if (st->src.state > sp->src.state) 698 sfail = 5; 699 else if (st->dst.state > sp->dst.state) 700 sfail = 6; 701 } 702 if (sfail) { 703 if (pf_status.debug >= PF_DEBUG_MISC) 704 kprintf("pfsync: %s stale update " 705 "(%d) id: %016jx " 706 "creatorid: %08x\n", 707 (sfail < 7 ? "ignoring" 708 : "partial"), sfail, 709 (uintmax_t)be64toh(st->id), 710 ntohl(st->creatorid)); 711 pfsyncstats.pfsyncs_stale++; 712 713 if (!(sp->sync_flags & PFSTATE_STALE)) { 714 /* we have a better state, send it */ 715 if (sc->sc_mbuf != NULL && !stale) 716 pfsync_sendout(sc); 717 stale++; 718 if (!st->sync_flags) 719 pfsync_pack_state( 720 PFSYNC_ACT_UPD, st, flags); 721 } 722 continue; 723 } 724 pfsync_alloc_scrub_memory(&sp->dst, &st->dst); 725 pf_state_peer_ntoh(&sp->src, &st->src); 726 pf_state_peer_ntoh(&sp->dst, &st->dst); 727 st->expire = ntohl(sp->expire) + time_second; 728 st->timeout = sp->timeout; 729 } 730 if (stale && sc->sc_mbuf != NULL) 731 pfsync_sendout(sc); 732 crit_exit(); 733 break; 734 /* 735 * It's not strictly necessary for us to support the "uncompressed" 736 * delete action, but it's relatively simple and maintains consistency. 737 */ 738 case PFSYNC_ACT_DEL: 739 if ((mp = m_pulldown(m, iplen + sizeof(*ph), 740 count * sizeof(*sp), &offp)) == NULL) { 741 pfsyncstats.pfsyncs_badlen++; 742 return; 743 } 744 745 crit_enter(); 746 for (i = 0, sp = (struct pfsync_state *)(mp->m_data + offp); 747 i < count; i++, sp++) { 748 bcopy(sp->id, &id_key.id, sizeof(id_key.id)); 749 id_key.creatorid = sp->creatorid; 750 751 st = pf_find_state_byid(&id_key); 752 if (st == NULL) { 753 pfsyncstats.pfsyncs_badstate++; 754 continue; 755 } 756 st->sync_flags |= PFSTATE_FROMSYNC; 757 pf_unlink_state(st); 758 } 759 crit_exit(); 760 break; 761 case PFSYNC_ACT_UPD_C: { 762 int update_requested = 0; 763 764 if ((mp = m_pulldown(m, iplen + sizeof(*ph), 765 count * sizeof(*up), &offp)) == NULL) { 766 pfsyncstats.pfsyncs_badlen++; 767 return; 768 } 769 770 crit_enter(); 771 for (i = 0, up = (struct pfsync_state_upd *)(mp->m_data + offp); 772 i < count; i++, up++) { 773 /* check for invalid values */ 774 if (up->timeout >= PFTM_MAX || 775 up->src.state > PF_TCPS_PROXY_DST || 776 up->dst.state > PF_TCPS_PROXY_DST) { 777 if (pf_status.debug >= PF_DEBUG_MISC) 778 kprintf("pfsync_insert: " 779 "PFSYNC_ACT_UPD_C: " 780 "invalid value\n"); 781 pfsyncstats.pfsyncs_badval++; 782 continue; 783 } 784 785 bcopy(up->id, &id_key.id, sizeof(id_key.id)); 786 id_key.creatorid = up->creatorid; 787 788 st = pf_find_state_byid(&id_key); 789 if (st == NULL) { 790 /* We don't have this state. Ask for it. */ 791 error = pfsync_request_update(up, &src); 792 if (error == ENOMEM) { 793 crit_exit(); 794 goto done; 795 } 796 update_requested = 1; 797 pfsyncstats.pfsyncs_badstate++; 798 continue; 799 } 800 sk = st->key[PF_SK_WIRE]; /* XXX right one? */ 801 sfail = 0; 802 if (sk->proto == IPPROTO_TCP) { 803 /* 804 * The state should never go backwards except 805 * for syn-proxy states. Neither should the 806 * sequence window slide backwards. 807 */ 808 if (st->src.state > up->src.state && 809 (st->src.state < PF_TCPS_PROXY_SRC || 810 up->src.state >= PF_TCPS_PROXY_SRC)) 811 sfail = 1; 812 else if (st->dst.state > up->dst.state) 813 sfail = 2; 814 else if (SEQ_GT(st->src.seqlo, 815 ntohl(up->src.seqlo))) 816 sfail = 3; 817 else if (st->dst.state >= TCPS_SYN_SENT && 818 SEQ_GT(st->dst.seqlo, ntohl(up->dst.seqlo))) 819 sfail = 4; 820 } else { 821 /* 822 * Non-TCP protocol state machine always go 823 * forwards 824 */ 825 if (st->src.state > up->src.state) 826 sfail = 5; 827 else if (st->dst.state > up->dst.state) 828 sfail = 6; 829 } 830 if (sfail) { 831 if (pf_status.debug >= PF_DEBUG_MISC) 832 kprintf("pfsync: ignoring stale update " 833 "(%d) id: %016" PRIx64 " " 834 "creatorid: %08x\n", sfail, 835 be64toh(st->id), 836 ntohl(st->creatorid)); 837 pfsyncstats.pfsyncs_stale++; 838 839 /* we have a better state, send it out */ 840 if ((!stale || update_requested) && 841 sc->sc_mbuf != NULL) { 842 pfsync_sendout(sc); 843 update_requested = 0; 844 } 845 stale++; 846 if (!st->sync_flags) 847 pfsync_pack_state(PFSYNC_ACT_UPD, st, 848 PFSYNC_FLAG_STALE); 849 continue; 850 } 851 pfsync_alloc_scrub_memory(&up->dst, &st->dst); 852 pf_state_peer_ntoh(&up->src, &st->src); 853 pf_state_peer_ntoh(&up->dst, &st->dst); 854 st->expire = ntohl(up->expire) + time_second; 855 st->timeout = up->timeout; 856 } 857 if ((update_requested || stale) && sc->sc_mbuf) 858 pfsync_sendout(sc); 859 crit_exit(); 860 break; 861 } 862 case PFSYNC_ACT_DEL_C: 863 if ((mp = m_pulldown(m, iplen + sizeof(*ph), 864 count * sizeof(*dp), &offp)) == NULL) { 865 pfsyncstats.pfsyncs_badlen++; 866 return; 867 } 868 869 crit_enter(); 870 for (i = 0, dp = (struct pfsync_state_del *)(mp->m_data + offp); 871 i < count; i++, dp++) { 872 bcopy(dp->id, &id_key.id, sizeof(id_key.id)); 873 id_key.creatorid = dp->creatorid; 874 875 st = pf_find_state_byid(&id_key); 876 if (st == NULL) { 877 pfsyncstats.pfsyncs_badstate++; 878 continue; 879 } 880 st->sync_flags |= PFSTATE_FROMSYNC; 881 pf_unlink_state(st); 882 } 883 crit_exit(); 884 break; 885 case PFSYNC_ACT_INS_F: 886 case PFSYNC_ACT_DEL_F: 887 /* not implemented */ 888 break; 889 case PFSYNC_ACT_UREQ: 890 if ((mp = m_pulldown(m, iplen + sizeof(*ph), 891 count * sizeof(*rup), &offp)) == NULL) { 892 pfsyncstats.pfsyncs_badlen++; 893 return; 894 } 895 896 crit_enter(); 897 if (sc->sc_mbuf != NULL) 898 pfsync_sendout(sc); 899 for (i = 0, 900 rup = (struct pfsync_state_upd_req *)(mp->m_data + offp); 901 i < count; i++, rup++) { 902 bcopy(rup->id, &id_key.id, sizeof(id_key.id)); 903 id_key.creatorid = rup->creatorid; 904 905 if (id_key.id == 0 && id_key.creatorid == 0) { 906 sc->sc_ureq_received = mycpu->gd_time_seconds; 907 if (sc->sc_bulk_send_next == NULL) { 908 if (++sc->sc_bulk_send_cpu >= ncpus) 909 sc->sc_bulk_send_cpu = 0; 910 sc->sc_bulk_send_next = 911 TAILQ_FIRST(&state_list[sc->sc_bulk_send_cpu]); 912 } 913 sc->sc_bulk_terminator = 914 sc->sc_bulk_send_next; 915 sc->sc_bulk_terminator_cpu = 916 sc->sc_bulk_send_cpu; 917 if (pf_status.debug >= PF_DEBUG_MISC) 918 kprintf("pfsync: received " 919 "bulk update request\n"); 920 pfsync_send_bus(sc, PFSYNC_BUS_START); 921 lwkt_reltoken(&pf_token); 922 callout_init(&sc->sc_bulk_tmo); 923 lwkt_gettoken(&pf_token); 924 } else { 925 st = pf_find_state_byid(&id_key); 926 if (st == NULL) { 927 pfsyncstats.pfsyncs_badstate++; 928 continue; 929 } 930 if (!st->sync_flags) 931 pfsync_pack_state(PFSYNC_ACT_UPD, 932 st, 0); 933 } 934 } 935 if (sc->sc_mbuf != NULL) 936 pfsync_sendout(sc); 937 crit_exit(); 938 break; 939 case PFSYNC_ACT_BUS: 940 /* If we're not waiting for a bulk update, who cares. */ 941 if (sc->sc_ureq_sent == 0) 942 break; 943 944 if ((mp = m_pulldown(m, iplen + sizeof(*ph), 945 sizeof(*bus), &offp)) == NULL) { 946 pfsyncstats.pfsyncs_badlen++; 947 return; 948 } 949 bus = (struct pfsync_state_bus *)(mp->m_data + offp); 950 switch (bus->status) { 951 case PFSYNC_BUS_START: 952 lwkt_reltoken(&pf_token); 953 callout_reset(&sc->sc_bulkfail_tmo, 954 pf_pool_limits[PF_LIMIT_STATES].limit / 955 (PFSYNC_BULKPACKETS * sc->sc_maxcount), 956 pfsync_bulkfail, LIST_FIRST(&pfsync_list)); 957 lwkt_gettoken(&pf_token); 958 if (pf_status.debug >= PF_DEBUG_MISC) 959 kprintf("pfsync: received bulk " 960 "update start\n"); 961 break; 962 case PFSYNC_BUS_END: 963 if (mycpu->gd_time_seconds - ntohl(bus->endtime) >= 964 sc->sc_ureq_sent) { 965 /* that's it, we're happy */ 966 sc->sc_ureq_sent = 0; 967 sc->sc_bulk_tries = 0; 968 lwkt_reltoken(&pf_token); 969 callout_stop(&sc->sc_bulkfail_tmo); 970 lwkt_gettoken(&pf_token); 971 #if NCARP > 0 972 if (!pfsync_sync_ok) { 973 lwkt_reltoken(&pf_token); 974 carp_group_demote_adj(&sc->sc_if, -1); 975 lwkt_gettoken(&pf_token); 976 } 977 #endif 978 pfsync_sync_ok = 1; 979 if (pf_status.debug >= PF_DEBUG_MISC) 980 kprintf("pfsync: received valid " 981 "bulk update end\n"); 982 } else { 983 if (pf_status.debug >= PF_DEBUG_MISC) 984 kprintf("pfsync: received invalid " 985 "bulk update end: bad timestamp\n"); 986 } 987 break; 988 } 989 break; 990 #ifdef IPSEC 991 case PFSYNC_ACT_TDB_UPD: 992 if ((mp = m_pulldown(m, iplen + sizeof(*ph), 993 count * sizeof(*pt), &offp)) == NULL) { 994 pfsyncstats.pfsyncs_badlen++; 995 return; 996 } 997 crit_enter(); 998 for (i = 0, pt = (struct pfsync_tdb *)(mp->m_data + offp); 999 i < count; i++, pt++) 1000 pfsync_update_net_tdb(pt); 1001 crit_exit(); 1002 break; 1003 #endif 1004 } 1005 1006 done: 1007 if (m) 1008 m_freem(m); 1009 } 1010 1011 int 1012 pfsyncoutput(struct ifnet *ifp, struct mbuf *m, struct sockaddr *dst, 1013 struct rtentry *rt) 1014 { 1015 m_freem(m); 1016 return (0); 1017 } 1018 1019 /* ARGSUSED */ 1020 int 1021 pfsyncioctl(struct ifnet *ifp, u_long cmd, caddr_t data, struct ucred *cr) 1022 { 1023 struct pfsync_softc *sc = ifp->if_softc; 1024 struct ifreq *ifr = (struct ifreq *)data; 1025 struct ip_moptions *imo = &sc->sc_imo; 1026 struct pfsyncreq pfsyncr; 1027 struct ifnet *sifp; 1028 int error; 1029 1030 lwkt_gettoken(&pf_token); 1031 1032 switch (cmd) { 1033 case SIOCSIFADDR: 1034 case SIOCAIFADDR: 1035 case SIOCSIFDSTADDR: 1036 case SIOCSIFFLAGS: 1037 if (ifp->if_flags & IFF_UP) 1038 ifp->if_flags |= IFF_RUNNING; 1039 else 1040 ifp->if_flags &= ~IFF_RUNNING; 1041 break; 1042 case SIOCSIFMTU: 1043 if (ifr->ifr_mtu < PFSYNC_MINMTU) { 1044 lwkt_reltoken(&pf_token); 1045 return (EINVAL); 1046 } 1047 if (ifr->ifr_mtu > MCLBYTES) 1048 ifr->ifr_mtu = MCLBYTES; 1049 crit_enter(); 1050 if (ifr->ifr_mtu < ifp->if_mtu) 1051 pfsync_sendout(sc); 1052 pfsync_setmtu(sc, ifr->ifr_mtu); 1053 crit_exit(); 1054 break; 1055 case SIOCGETPFSYNC: 1056 bzero(&pfsyncr, sizeof(pfsyncr)); 1057 if (sc->sc_sync_ifp) 1058 strlcpy(pfsyncr.pfsyncr_syncdev, 1059 sc->sc_sync_ifp->if_xname, IFNAMSIZ); 1060 pfsyncr.pfsyncr_syncpeer = sc->sc_sync_peer; 1061 pfsyncr.pfsyncr_maxupdates = sc->sc_maxupdates; 1062 lwkt_reltoken(&pf_token); 1063 if ((error = copyout(&pfsyncr, ifr->ifr_data, sizeof(pfsyncr)))) 1064 return (error); 1065 lwkt_gettoken(&pf_token); 1066 break; 1067 case SIOCSETPFSYNC: 1068 if ((error = priv_check_cred(cr, PRIV_ROOT, NULL_CRED_OKAY)) != 0) { 1069 lwkt_reltoken(&pf_token); 1070 return (error); 1071 } 1072 if ((error = copyin(ifr->ifr_data, &pfsyncr, sizeof(pfsyncr)))) { 1073 lwkt_reltoken(&pf_token); 1074 return (error); 1075 } 1076 1077 if (pfsyncr.pfsyncr_syncpeer.s_addr == 0) 1078 sc->sc_sync_peer.s_addr = INADDR_PFSYNC_GROUP; 1079 else 1080 sc->sc_sync_peer.s_addr = 1081 pfsyncr.pfsyncr_syncpeer.s_addr; 1082 1083 if (pfsyncr.pfsyncr_maxupdates > 255) { 1084 lwkt_reltoken(&pf_token); 1085 return (EINVAL); 1086 } 1087 sc->sc_maxupdates = pfsyncr.pfsyncr_maxupdates; 1088 1089 if (pfsyncr.pfsyncr_syncdev[0] == 0) { 1090 sc->sc_sync_ifp = NULL; 1091 if (sc->sc_mbuf_net != NULL) { 1092 /* Don't keep stale pfsync packets around. */ 1093 crit_enter(); 1094 m_freem(sc->sc_mbuf_net); 1095 sc->sc_mbuf_net = NULL; 1096 sc->sc_statep_net.s = NULL; 1097 crit_exit(); 1098 } 1099 if (imo->imo_num_memberships > 0) { 1100 pfsync_in_delmulti(imo->imo_membership[--imo->imo_num_memberships]); 1101 imo->imo_multicast_ifp = NULL; 1102 } 1103 break; 1104 } 1105 1106 if ((sifp = ifunit(pfsyncr.pfsyncr_syncdev)) == NULL) { 1107 lwkt_reltoken(&pf_token); 1108 return (EINVAL); 1109 } 1110 1111 crit_enter(); 1112 if (sifp->if_mtu < sc->sc_if.if_mtu || 1113 (sc->sc_sync_ifp != NULL && 1114 sifp->if_mtu < sc->sc_sync_ifp->if_mtu) || 1115 sifp->if_mtu < MCLBYTES - sizeof(struct ip)) 1116 pfsync_sendout(sc); 1117 sc->sc_sync_ifp = sifp; 1118 1119 pfsync_setmtu(sc, sc->sc_if.if_mtu); 1120 1121 if (imo->imo_num_memberships > 0) { 1122 pfsync_in_delmulti(imo->imo_membership[--imo->imo_num_memberships]); 1123 imo->imo_multicast_ifp = NULL; 1124 } 1125 1126 if (sc->sc_sync_ifp && 1127 sc->sc_sync_peer.s_addr == INADDR_PFSYNC_GROUP) { 1128 if (!(sc->sc_sync_ifp->if_flags & IFF_MULTICAST)) { 1129 sc->sc_sync_ifp = NULL; 1130 lwkt_reltoken(&pf_token); 1131 crit_exit(); 1132 return (EADDRNOTAVAIL); 1133 } 1134 1135 if ((imo->imo_membership[0] = 1136 pfsync_in_addmulti(sc->sc_sync_ifp)) == NULL) { 1137 sc->sc_sync_ifp = NULL; 1138 lwkt_reltoken(&pf_token); 1139 crit_exit(); 1140 return (ENOBUFS); 1141 } 1142 imo->imo_num_memberships++; 1143 imo->imo_multicast_ifp = sc->sc_sync_ifp; 1144 imo->imo_multicast_ttl = PFSYNC_DFLTTL; 1145 imo->imo_multicast_loop = 0; 1146 } 1147 1148 if (sc->sc_sync_ifp || 1149 sc->sc_sendaddr.s_addr != INADDR_PFSYNC_GROUP) { 1150 /* Request a full state table update. */ 1151 sc->sc_ureq_sent = mycpu->gd_time_seconds; 1152 #if NCARP > 0 1153 if (pfsync_sync_ok) 1154 carp_group_demote_adj(&sc->sc_if, 1); 1155 #endif 1156 pfsync_sync_ok = 0; 1157 if (pf_status.debug >= PF_DEBUG_MISC) 1158 kprintf("pfsync: requesting bulk update\n"); 1159 lwkt_reltoken(&pf_token); 1160 callout_reset(&sc->sc_bulkfail_tmo, 5 * hz, 1161 pfsync_bulkfail, LIST_FIRST(&pfsync_list)); 1162 lwkt_gettoken(&pf_token); 1163 error = pfsync_request_update(NULL, NULL); 1164 if (error == ENOMEM) { 1165 lwkt_reltoken(&pf_token); 1166 crit_exit(); 1167 return (ENOMEM); 1168 } 1169 pfsync_sendout(sc); 1170 } 1171 crit_exit(); 1172 1173 break; 1174 1175 default: 1176 lwkt_reltoken(&pf_token); 1177 return (ENOTTY); 1178 } 1179 1180 lwkt_reltoken(&pf_token); 1181 return (0); 1182 } 1183 1184 void 1185 pfsync_setmtu(struct pfsync_softc *sc, int mtu_req) 1186 { 1187 int mtu; 1188 1189 if (sc->sc_sync_ifp && sc->sc_sync_ifp->if_mtu < mtu_req) 1190 mtu = sc->sc_sync_ifp->if_mtu; 1191 else 1192 mtu = mtu_req; 1193 1194 sc->sc_maxcount = (mtu - sizeof(struct pfsync_header)) / 1195 sizeof(struct pfsync_state); 1196 if (sc->sc_maxcount > 254) 1197 sc->sc_maxcount = 254; 1198 sc->sc_if.if_mtu = sizeof(struct pfsync_header) + 1199 sc->sc_maxcount * sizeof(struct pfsync_state); 1200 } 1201 1202 struct mbuf * 1203 pfsync_get_mbuf(struct pfsync_softc *sc, u_int8_t action, void **sp) 1204 { 1205 struct pfsync_header *h; 1206 struct mbuf *m; 1207 int len; 1208 1209 ASSERT_LWKT_TOKEN_HELD(&pf_token); 1210 1211 MGETHDR(m, M_WAITOK, MT_DATA); 1212 if (m == NULL) { 1213 IFNET_STAT_INC(&sc->sc_if, oerrors, 1); 1214 return (NULL); 1215 } 1216 1217 switch (action) { 1218 case PFSYNC_ACT_CLR: 1219 len = sizeof(struct pfsync_header) + 1220 sizeof(struct pfsync_state_clr); 1221 break; 1222 case PFSYNC_ACT_UPD_C: 1223 len = (sc->sc_maxcount * sizeof(struct pfsync_state_upd)) + 1224 sizeof(struct pfsync_header); 1225 break; 1226 case PFSYNC_ACT_DEL_C: 1227 len = (sc->sc_maxcount * sizeof(struct pfsync_state_del)) + 1228 sizeof(struct pfsync_header); 1229 break; 1230 case PFSYNC_ACT_UREQ: 1231 len = (sc->sc_maxcount * sizeof(struct pfsync_state_upd_req)) + 1232 sizeof(struct pfsync_header); 1233 break; 1234 case PFSYNC_ACT_BUS: 1235 len = sizeof(struct pfsync_header) + 1236 sizeof(struct pfsync_state_bus); 1237 break; 1238 case PFSYNC_ACT_TDB_UPD: 1239 len = (sc->sc_maxcount * sizeof(struct pfsync_tdb)) + 1240 sizeof(struct pfsync_header); 1241 break; 1242 default: 1243 len = (sc->sc_maxcount * sizeof(struct pfsync_state)) + 1244 sizeof(struct pfsync_header); 1245 break; 1246 } 1247 1248 if (len > MHLEN) { 1249 MCLGET(m, M_WAITOK); 1250 if ((m->m_flags & M_EXT) == 0) { 1251 m_free(m); 1252 IFNET_STAT_INC(&sc->sc_if, oerrors, 1); 1253 return (NULL); 1254 } 1255 m->m_data += (MCLBYTES - len) &~ (sizeof(long) - 1); 1256 } else 1257 MH_ALIGN(m, len); 1258 1259 m->m_pkthdr.rcvif = NULL; 1260 m->m_pkthdr.len = m->m_len = sizeof(struct pfsync_header); 1261 h = mtod(m, struct pfsync_header *); 1262 h->version = PFSYNC_VERSION; 1263 h->af = 0; 1264 h->count = 0; 1265 h->action = action; 1266 1267 *sp = (void *)((char *)h + PFSYNC_HDRLEN); 1268 lwkt_reltoken(&pf_token); 1269 callout_reset(&sc->sc_tmo, hz, pfsync_timeout, 1270 LIST_FIRST(&pfsync_list)); 1271 lwkt_gettoken(&pf_token); 1272 return (m); 1273 } 1274 1275 int 1276 pfsync_pack_state(u_int8_t action, struct pf_state *st, int flags) 1277 { 1278 struct ifnet *ifp = NULL; 1279 struct pfsync_softc *sc = pfsyncif; 1280 struct pfsync_header *h, *h_net; 1281 struct pfsync_state *sp = NULL; 1282 struct pfsync_state_upd *up = NULL; 1283 struct pfsync_state_del *dp = NULL; 1284 int ret = 0; 1285 u_int8_t i = 255, newaction = 0; 1286 1287 if (sc == NULL) 1288 return (0); 1289 ifp = &sc->sc_if; 1290 1291 /* 1292 * If a packet falls in the forest and there's nobody around to 1293 * hear, does it make a sound? 1294 */ 1295 if (ifp->if_bpf == NULL && sc->sc_sync_ifp == NULL && 1296 sc->sc_sync_peer.s_addr == INADDR_PFSYNC_GROUP) { 1297 /* Don't leave any stale pfsync packets hanging around. */ 1298 if (sc->sc_mbuf != NULL) { 1299 m_freem(sc->sc_mbuf); 1300 sc->sc_mbuf = NULL; 1301 sc->sc_statep.s = NULL; 1302 } 1303 return (0); 1304 } 1305 1306 if (action >= PFSYNC_ACT_MAX) 1307 return (EINVAL); 1308 1309 crit_enter(); 1310 if (sc->sc_mbuf == NULL) { 1311 if ((sc->sc_mbuf = pfsync_get_mbuf(sc, action, 1312 (void *)&sc->sc_statep.s)) == NULL) { 1313 crit_exit(); 1314 return (ENOMEM); 1315 } 1316 h = mtod(sc->sc_mbuf, struct pfsync_header *); 1317 } else { 1318 h = mtod(sc->sc_mbuf, struct pfsync_header *); 1319 if (h->action != action) { 1320 pfsync_sendout(sc); 1321 if ((sc->sc_mbuf = pfsync_get_mbuf(sc, action, 1322 (void *)&sc->sc_statep.s)) == NULL) { 1323 crit_exit(); 1324 return (ENOMEM); 1325 } 1326 h = mtod(sc->sc_mbuf, struct pfsync_header *); 1327 } else { 1328 /* 1329 * If it's an update, look in the packet to see if 1330 * we already have an update for the state. 1331 */ 1332 if (action == PFSYNC_ACT_UPD && sc->sc_maxupdates) { 1333 struct pfsync_state *usp = 1334 (void *)((char *)h + PFSYNC_HDRLEN); 1335 1336 for (i = 0; i < h->count; i++) { 1337 if (!memcmp(usp->id, &st->id, 1338 PFSYNC_ID_LEN) && 1339 usp->creatorid == st->creatorid) { 1340 sp = usp; 1341 sp->updates++; 1342 break; 1343 } 1344 usp++; 1345 } 1346 } 1347 } 1348 } 1349 1350 st->pfsync_time = mycpu->gd_time_seconds; 1351 1352 if (sp == NULL) { 1353 /* not a "duplicate" update */ 1354 i = 255; 1355 sp = sc->sc_statep.s++; 1356 sc->sc_mbuf->m_pkthdr.len = 1357 sc->sc_mbuf->m_len += sizeof(struct pfsync_state); 1358 h->count++; 1359 bzero(sp, sizeof(*sp)); 1360 1361 pfsync_state_export(sp, st); 1362 1363 if (flags & PFSYNC_FLAG_STALE) 1364 sp->sync_flags |= PFSTATE_STALE; 1365 } else { 1366 pf_state_peer_hton(&st->src, &sp->src); 1367 pf_state_peer_hton(&st->dst, &sp->dst); 1368 1369 if (st->expire <= time_second) 1370 sp->expire = htonl(0); 1371 else 1372 sp->expire = htonl(st->expire - time_second); 1373 } 1374 1375 /* do we need to build "compressed" actions for network transfer? */ 1376 if (sc->sc_sync_ifp && flags & PFSYNC_FLAG_COMPRESS) { 1377 switch (action) { 1378 case PFSYNC_ACT_UPD: 1379 newaction = PFSYNC_ACT_UPD_C; 1380 break; 1381 case PFSYNC_ACT_DEL: 1382 newaction = PFSYNC_ACT_DEL_C; 1383 break; 1384 default: 1385 /* by default we just send the uncompressed states */ 1386 break; 1387 } 1388 } 1389 1390 if (newaction) { 1391 if (sc->sc_mbuf_net == NULL) { 1392 if ((sc->sc_mbuf_net = pfsync_get_mbuf(sc, newaction, 1393 (void *)&sc->sc_statep_net.s)) == NULL) { 1394 crit_exit(); 1395 return (ENOMEM); 1396 } 1397 } 1398 h_net = mtod(sc->sc_mbuf_net, struct pfsync_header *); 1399 1400 switch (newaction) { 1401 case PFSYNC_ACT_UPD_C: 1402 if (i != 255) { 1403 up = (void *)((char *)h_net + 1404 PFSYNC_HDRLEN + (i * sizeof(*up))); 1405 up->updates++; 1406 } else { 1407 h_net->count++; 1408 sc->sc_mbuf_net->m_pkthdr.len = 1409 sc->sc_mbuf_net->m_len += sizeof(*up); 1410 up = sc->sc_statep_net.u++; 1411 1412 bzero(up, sizeof(*up)); 1413 bcopy(&st->id, up->id, sizeof(up->id)); 1414 up->creatorid = st->creatorid; 1415 } 1416 up->timeout = st->timeout; 1417 up->expire = sp->expire; 1418 up->src = sp->src; 1419 up->dst = sp->dst; 1420 break; 1421 case PFSYNC_ACT_DEL_C: 1422 sc->sc_mbuf_net->m_pkthdr.len = 1423 sc->sc_mbuf_net->m_len += sizeof(*dp); 1424 dp = sc->sc_statep_net.d++; 1425 h_net->count++; 1426 1427 bzero(dp, sizeof(*dp)); 1428 bcopy(&st->id, dp->id, sizeof(dp->id)); 1429 dp->creatorid = st->creatorid; 1430 break; 1431 } 1432 } 1433 1434 if (h->count == sc->sc_maxcount || 1435 (sc->sc_maxupdates && (sp->updates >= sc->sc_maxupdates))) 1436 ret = pfsync_sendout(sc); 1437 1438 crit_exit(); 1439 return (ret); 1440 } 1441 1442 int 1443 pfsync_request_update(struct pfsync_state_upd *up, struct in_addr *src) 1444 { 1445 struct pfsync_header *h; 1446 struct pfsync_softc *sc = pfsyncif; 1447 struct pfsync_state_upd_req *rup; 1448 int ret = 0; 1449 1450 if (sc == NULL) 1451 return (0); 1452 1453 if (sc->sc_mbuf == NULL) { 1454 if ((sc->sc_mbuf = pfsync_get_mbuf(sc, PFSYNC_ACT_UREQ, 1455 (void *)&sc->sc_statep.s)) == NULL) 1456 return (ENOMEM); 1457 h = mtod(sc->sc_mbuf, struct pfsync_header *); 1458 } else { 1459 h = mtod(sc->sc_mbuf, struct pfsync_header *); 1460 if (h->action != PFSYNC_ACT_UREQ) { 1461 pfsync_sendout(sc); 1462 if ((sc->sc_mbuf = pfsync_get_mbuf(sc, PFSYNC_ACT_UREQ, 1463 (void *)&sc->sc_statep.s)) == NULL) 1464 return (ENOMEM); 1465 h = mtod(sc->sc_mbuf, struct pfsync_header *); 1466 } 1467 } 1468 1469 if (src != NULL) 1470 sc->sc_sendaddr = *src; 1471 sc->sc_mbuf->m_pkthdr.len = sc->sc_mbuf->m_len += sizeof(*rup); 1472 h->count++; 1473 rup = sc->sc_statep.r++; 1474 bzero(rup, sizeof(*rup)); 1475 if (up != NULL) { 1476 bcopy(up->id, rup->id, sizeof(rup->id)); 1477 rup->creatorid = up->creatorid; 1478 } 1479 1480 if (h->count == sc->sc_maxcount) 1481 ret = pfsync_sendout(sc); 1482 1483 return (ret); 1484 } 1485 1486 int 1487 pfsync_clear_states(u_int32_t creatorid, char *ifname) 1488 { 1489 struct pfsync_softc *sc = pfsyncif; 1490 struct pfsync_state_clr *cp; 1491 int ret; 1492 1493 if (sc == NULL) 1494 return (0); 1495 1496 crit_enter(); 1497 if (sc->sc_mbuf != NULL) 1498 pfsync_sendout(sc); 1499 if ((sc->sc_mbuf = pfsync_get_mbuf(sc, PFSYNC_ACT_CLR, 1500 (void *)&sc->sc_statep.c)) == NULL) { 1501 crit_exit(); 1502 return (ENOMEM); 1503 } 1504 sc->sc_mbuf->m_pkthdr.len = sc->sc_mbuf->m_len += sizeof(*cp); 1505 cp = sc->sc_statep.c; 1506 cp->creatorid = creatorid; 1507 if (ifname != NULL) 1508 strlcpy(cp->ifname, ifname, IFNAMSIZ); 1509 1510 ret = (pfsync_sendout(sc)); 1511 crit_exit(); 1512 return (ret); 1513 } 1514 1515 void 1516 pfsync_timeout(void *v) 1517 { 1518 struct pfsync_softc *sc = v; 1519 1520 crit_enter(); 1521 pfsync_sendout(sc); 1522 crit_exit(); 1523 } 1524 1525 void 1526 pfsync_send_bus(struct pfsync_softc *sc, u_int8_t status) 1527 { 1528 struct pfsync_state_bus *bus; 1529 1530 if (sc->sc_mbuf != NULL) 1531 pfsync_sendout(sc); 1532 1533 if (pfsync_sync_ok && 1534 (sc->sc_mbuf = pfsync_get_mbuf(sc, PFSYNC_ACT_BUS, 1535 (void *)&sc->sc_statep.b)) != NULL) { 1536 sc->sc_mbuf->m_pkthdr.len = sc->sc_mbuf->m_len += sizeof(*bus); 1537 bus = sc->sc_statep.b; 1538 bus->creatorid = pf_status.hostid; 1539 bus->status = status; 1540 bus->endtime = htonl(mycpu->gd_time_seconds - sc->sc_ureq_received); 1541 pfsync_sendout(sc); 1542 } 1543 } 1544 1545 void 1546 pfsync_bulk_update(void *v) 1547 { 1548 struct pfsync_softc *sc = v; 1549 int i = 0; 1550 int cpu; 1551 struct pf_state *state; 1552 1553 ASSERT_LWKT_TOKEN_HELD(&pf_token); 1554 1555 crit_enter(); 1556 if (sc->sc_mbuf != NULL) 1557 pfsync_sendout(sc); 1558 1559 /* 1560 * Grab at most PFSYNC_BULKPACKETS worth of states which have not 1561 * been sent since the latest request was made. 1562 */ 1563 state = sc->sc_bulk_send_next; 1564 cpu = sc->sc_bulk_send_cpu; 1565 if (state) 1566 do { 1567 /* send state update if syncable and not already sent */ 1568 if (!state->sync_flags 1569 && state->timeout < PFTM_MAX 1570 && state->pfsync_time <= sc->sc_ureq_received) { 1571 pfsync_pack_state(PFSYNC_ACT_UPD, state, 0); 1572 i++; 1573 } 1574 1575 /* figure next state to send */ 1576 state = TAILQ_NEXT(state, entry_list); 1577 1578 /* wrap to start of list if we hit the end */ 1579 if (state == NULL) { 1580 if (++cpu >= ncpus) 1581 cpu = 0; 1582 state = TAILQ_FIRST(&state_list[cpu]); 1583 } 1584 } while (i < sc->sc_maxcount * PFSYNC_BULKPACKETS && 1585 cpu != sc->sc_bulk_terminator_cpu && 1586 state != sc->sc_bulk_terminator); 1587 1588 if (state == NULL || (cpu == sc->sc_bulk_terminator_cpu && 1589 state == sc->sc_bulk_terminator)) { 1590 /* we're done */ 1591 pfsync_send_bus(sc, PFSYNC_BUS_END); 1592 sc->sc_ureq_received = 0; 1593 sc->sc_bulk_send_next = NULL; 1594 sc->sc_bulk_terminator = NULL; 1595 sc->sc_bulk_send_cpu = 0; 1596 sc->sc_bulk_terminator_cpu = 0; 1597 lwkt_reltoken(&pf_token); 1598 callout_stop(&sc->sc_bulk_tmo); 1599 lwkt_gettoken(&pf_token); 1600 if (pf_status.debug >= PF_DEBUG_MISC) 1601 kprintf("pfsync: bulk update complete\n"); 1602 } else { 1603 /* look again for more in a bit */ 1604 lwkt_reltoken(&pf_token); 1605 callout_reset(&sc->sc_bulk_tmo, 1, pfsync_timeout, 1606 LIST_FIRST(&pfsync_list)); 1607 lwkt_gettoken(&pf_token); 1608 sc->sc_bulk_send_next = state; 1609 sc->sc_bulk_send_cpu = cpu; 1610 } 1611 if (sc->sc_mbuf != NULL) 1612 pfsync_sendout(sc); 1613 crit_exit(); 1614 } 1615 1616 void 1617 pfsync_bulkfail(void *v) 1618 { 1619 struct pfsync_softc *sc = v; 1620 int error; 1621 1622 ASSERT_LWKT_TOKEN_HELD(&pf_token); 1623 1624 if (sc->sc_bulk_tries++ < PFSYNC_MAX_BULKTRIES) { 1625 /* Try again in a bit */ 1626 lwkt_reltoken(&pf_token); 1627 callout_reset(&sc->sc_bulkfail_tmo, 5 * hz, pfsync_bulkfail, 1628 LIST_FIRST(&pfsync_list)); 1629 lwkt_gettoken(&pf_token); 1630 crit_enter(); 1631 error = pfsync_request_update(NULL, NULL); 1632 if (error == ENOMEM) { 1633 if (pf_status.debug >= PF_DEBUG_MISC) 1634 kprintf("pfsync: cannot allocate mbufs for " 1635 "bulk update\n"); 1636 } else 1637 pfsync_sendout(sc); 1638 crit_exit(); 1639 } else { 1640 /* Pretend like the transfer was ok */ 1641 sc->sc_ureq_sent = 0; 1642 sc->sc_bulk_tries = 0; 1643 #if NCARP > 0 1644 if (!pfsync_sync_ok) 1645 carp_group_demote_adj(&sc->sc_if, -1); 1646 #endif 1647 pfsync_sync_ok = 1; 1648 if (pf_status.debug >= PF_DEBUG_MISC) 1649 kprintf("pfsync: failed to receive " 1650 "bulk update status\n"); 1651 lwkt_reltoken(&pf_token); 1652 callout_stop(&sc->sc_bulkfail_tmo); 1653 lwkt_gettoken(&pf_token); 1654 } 1655 } 1656 1657 /* This must be called in splnet() */ 1658 int 1659 pfsync_sendout(struct pfsync_softc *sc) 1660 { 1661 #if NBPFILTER > 0 1662 struct ifnet *ifp = &sc->sc_if; 1663 #endif 1664 struct mbuf *m; 1665 1666 ASSERT_LWKT_TOKEN_HELD(&pf_token); 1667 1668 lwkt_reltoken(&pf_token); 1669 callout_stop(&sc->sc_tmo); 1670 lwkt_gettoken(&pf_token); 1671 1672 if (sc->sc_mbuf == NULL) 1673 return (0); 1674 m = sc->sc_mbuf; 1675 sc->sc_mbuf = NULL; 1676 sc->sc_statep.s = NULL; 1677 1678 #if NBPFILTER > 0 1679 if (ifp->if_bpf) { 1680 bpf_gettoken(); 1681 if (ifp->if_bpf) 1682 bpf_mtap(ifp->if_bpf, m, BPF_DIRECTION_OUT); 1683 bpf_reltoken(); 1684 } 1685 #endif 1686 1687 if (sc->sc_mbuf_net) { 1688 m_freem(m); 1689 m = sc->sc_mbuf_net; 1690 sc->sc_mbuf_net = NULL; 1691 sc->sc_statep_net.s = NULL; 1692 } 1693 1694 return pfsync_sendout_mbuf(sc, m); 1695 } 1696 1697 int 1698 pfsync_sendout_mbuf(struct pfsync_softc *sc, struct mbuf *m) 1699 { 1700 struct sockaddr sa; 1701 struct ip *ip; 1702 1703 if (sc->sc_sync_ifp || 1704 sc->sc_sync_peer.s_addr != INADDR_PFSYNC_GROUP) { 1705 M_PREPEND(m, sizeof(struct ip), M_WAITOK); 1706 if (m == NULL) { 1707 pfsyncstats.pfsyncs_onomem++; 1708 return (0); 1709 } 1710 ip = mtod(m, struct ip *); 1711 ip->ip_v = IPVERSION; 1712 ip->ip_hl = sizeof(*ip) >> 2; 1713 ip->ip_tos = IPTOS_LOWDELAY; 1714 ip->ip_len = htons(m->m_pkthdr.len); 1715 ip->ip_id = htons(ip_randomid()); 1716 ip->ip_off = htons(IP_DF); 1717 ip->ip_ttl = PFSYNC_DFLTTL; 1718 ip->ip_p = IPPROTO_PFSYNC; 1719 ip->ip_sum = 0; 1720 1721 bzero(&sa, sizeof(sa)); 1722 ip->ip_src.s_addr = INADDR_ANY; 1723 1724 if (sc->sc_sendaddr.s_addr == INADDR_PFSYNC_GROUP) 1725 m->m_flags |= M_MCAST; 1726 ip->ip_dst = sc->sc_sendaddr; 1727 sc->sc_sendaddr.s_addr = sc->sc_sync_peer.s_addr; 1728 1729 pfsyncstats.pfsyncs_opackets++; 1730 1731 if (ip_output(m, NULL, NULL, IP_RAWOUTPUT, &sc->sc_imo, NULL)) 1732 pfsyncstats.pfsyncs_oerrors++; 1733 } else 1734 m_freem(m); 1735 1736 return (0); 1737 } 1738 1739 static int 1740 pfsync_modevent(module_t mod, int type, void *data) 1741 { 1742 int error = 0; 1743 1744 struct pfsync_softc *pfs_if, *tmp; 1745 1746 lwkt_gettoken(&pf_token); 1747 1748 switch (type) { 1749 case MOD_LOAD: 1750 LIST_INIT(&pfsync_list); 1751 lwkt_reltoken(&pf_token); 1752 if_clone_attach(&pfsync_cloner); 1753 lwkt_gettoken(&pf_token); 1754 /* Override the function pointer for pf_ioctl.c */ 1755 break; 1756 1757 case MOD_UNLOAD: 1758 lwkt_reltoken(&pf_token); 1759 if_clone_detach(&pfsync_cloner); 1760 lwkt_gettoken(&pf_token); 1761 LIST_FOREACH_MUTABLE(pfs_if, &pfsync_list, sc_next, tmp) { 1762 pfsync_clone_destroy(&pfs_if->sc_if); 1763 } 1764 break; 1765 1766 default: 1767 error = EINVAL; 1768 break; 1769 } 1770 1771 lwkt_reltoken(&pf_token); 1772 return error; 1773 } 1774 1775 static moduledata_t pfsync_mod = { 1776 "pfsync", 1777 pfsync_modevent, 1778 0 1779 }; 1780 1781 #define PFSYNC_MODVER 44 1782 1783 DECLARE_MODULE(pfsync, pfsync_mod, SI_SUB_PSEUDO, SI_ORDER_ANY); 1784 MODULE_VERSION(pfsync, PFSYNC_MODVER); 1785 1786 static void 1787 pfsync_in_addmulti_dispatch(netmsg_t nmsg) 1788 { 1789 struct lwkt_msg *lmsg = &nmsg->lmsg; 1790 struct ifnet *ifp = lmsg->u.ms_resultp; 1791 struct in_addr addr; 1792 1793 addr.s_addr = INADDR_PFSYNC_GROUP; 1794 lmsg->u.ms_resultp = in_addmulti(&addr, ifp); 1795 1796 lwkt_replymsg(lmsg, 0); 1797 } 1798 1799 static struct in_multi * 1800 pfsync_in_addmulti(struct ifnet *ifp) 1801 { 1802 struct netmsg_base nmsg; 1803 struct lwkt_msg *lmsg = &nmsg.lmsg; 1804 1805 netmsg_init(&nmsg, NULL, &curthread->td_msgport, 0, 1806 pfsync_in_addmulti_dispatch); 1807 lmsg->u.ms_resultp = ifp; 1808 1809 lwkt_domsg(netisr_cpuport(0), lmsg, 0); 1810 return lmsg->u.ms_resultp; 1811 } 1812 1813 static void 1814 pfsync_in_delmulti_dispatch(netmsg_t nmsg) 1815 { 1816 struct lwkt_msg *lmsg = &nmsg->lmsg; 1817 1818 in_delmulti(lmsg->u.ms_resultp); 1819 lwkt_replymsg(lmsg, 0); 1820 } 1821 1822 static void 1823 pfsync_in_delmulti(struct in_multi *inm) 1824 { 1825 struct netmsg_base nmsg; 1826 struct lwkt_msg *lmsg = &nmsg.lmsg; 1827 1828 netmsg_init(&nmsg, NULL, &curthread->td_msgport, 0, 1829 pfsync_in_delmulti_dispatch); 1830 lmsg->u.ms_resultp = inm; 1831 1832 lwkt_domsg(netisr_cpuport(0), lmsg, 0); 1833 } 1834