1 /* $OpenBSD: if_bridge.c,v 1.75 2001/08/12 00:09:29 mickey Exp $ */ 2 3 /* 4 * Copyright (c) 1999, 2000 Jason L. Wright (jason@thought.net) 5 * All rights reserved. 6 * 7 * Redistribution and use in source and binary forms, with or without 8 * modification, are permitted provided that the following conditions 9 * are met: 10 * 1. Redistributions of source code must retain the above copyright 11 * notice, this list of conditions and the following disclaimer. 12 * 2. Redistributions in binary form must reproduce the above copyright 13 * notice, this list of conditions and the following disclaimer in the 14 * documentation and/or other materials provided with the distribution. 15 * 3. All advertising materials mentioning features or use of this software 16 * must display the following acknowledgement: 17 * This product includes software developed by Jason L. Wright 18 * 4. The name of the author may not be used to endorse or promote products 19 * derived from this software without specific prior written permission. 20 * 21 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR 22 * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED 23 * WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE 24 * DISCLAIMED. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, 25 * INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES 26 * (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR 27 * SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) 28 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, 29 * STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN 30 * ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE 31 * POSSIBILITY OF SUCH DAMAGE. 32 */ 33 34 #include "bpfilter.h" 35 #include "gif.h" 36 #include "pf.h" 37 38 #include <sys/param.h> 39 #include <sys/proc.h> 40 #include <sys/systm.h> 41 #include <sys/mbuf.h> 42 #include <sys/socket.h> 43 #include <sys/ioctl.h> 44 #include <sys/errno.h> 45 #include <sys/device.h> 46 #include <sys/kernel.h> 47 #include <machine/cpu.h> 48 49 #include <net/if.h> 50 #include <net/if_types.h> 51 #include <net/if_llc.h> 52 #include <net/route.h> 53 #include <net/netisr.h> 54 55 #ifdef INET 56 #include <netinet/in.h> 57 #include <netinet/in_systm.h> 58 #include <netinet/in_var.h> 59 #include <netinet/ip.h> 60 #include <netinet/if_ether.h> 61 #include <netinet/ip_ipsp.h> 62 63 #include <net/if_enc.h> 64 #endif 65 66 #if NPF > 0 67 #include <net/pfvar.h> 68 #define BRIDGE_IN PF_IN 69 #define BRIDGE_OUT PF_OUT 70 #else 71 #define BRIDGE_IN 0 72 #define BRIDGE_OUT 1 73 #endif 74 75 #if NBPFILTER > 0 76 #include <net/bpf.h> 77 #endif 78 79 #include <net/if_bridge.h> 80 81 #ifndef BRIDGE_RTABLE_SIZE 82 #define BRIDGE_RTABLE_SIZE 1024 83 #endif 84 #define BRIDGE_RTABLE_MASK (BRIDGE_RTABLE_SIZE - 1) 85 86 /* 87 * Maximum number of addresses to cache 88 */ 89 #ifndef BRIDGE_RTABLE_MAX 90 #define BRIDGE_RTABLE_MAX 100 91 #endif 92 93 /* spanning tree defaults */ 94 #define BSTP_DEFAULT_MAX_AGE (20 * 256) 95 #define BSTP_DEFAULT_HELLO_TIME (2 * 256) 96 #define BSTP_DEFAULT_FORWARD_DELAY (15 * 256) 97 #define BSTP_DEFAULT_HOLD_TIME (1 * 256) 98 #define BSTP_DEFAULT_BRIDGE_PRIORITY 0x8000 99 #define BSTP_DEFAULT_PORT_PRIORITY 0x80 100 #define BSTP_DEFAULT_PATH_COST 55 101 102 /* 103 * Timeout (in seconds) for entries learned dynamically 104 */ 105 #ifndef BRIDGE_RTABLE_TIMEOUT 106 #define BRIDGE_RTABLE_TIMEOUT 240 107 #endif 108 109 extern int ifqmaxlen; 110 111 struct bridge_softc *bridgectl; 112 int nbridge; 113 114 void bridgeattach __P((int)); 115 int bridge_ioctl __P((struct ifnet *, u_long, caddr_t)); 116 void bridge_start __P((struct ifnet *)); 117 void bridgeintr_frame __P((struct bridge_softc *, struct mbuf *)); 118 void bridge_broadcast __P((struct bridge_softc *, struct ifnet *, 119 struct ether_header *, struct mbuf *)); 120 void bridge_stop __P((struct bridge_softc *)); 121 void bridge_init __P((struct bridge_softc *)); 122 int bridge_bifconf __P((struct bridge_softc *, struct ifbifconf *)); 123 124 void bridge_timer __P((void *)); 125 int bridge_rtfind __P((struct bridge_softc *, struct ifbaconf *)); 126 void bridge_rtage __P((struct bridge_softc *)); 127 void bridge_rttrim __P((struct bridge_softc *)); 128 void bridge_rtdelete __P((struct bridge_softc *, struct ifnet *)); 129 int bridge_rtdaddr __P((struct bridge_softc *, struct ether_addr *)); 130 int bridge_rtflush __P((struct bridge_softc *, int)); 131 struct ifnet * bridge_rtupdate __P((struct bridge_softc *, 132 struct ether_addr *, struct ifnet *ifp, int, u_int8_t)); 133 struct ifnet * bridge_rtlookup __P((struct bridge_softc *, 134 struct ether_addr *)); 135 u_int32_t bridge_hash __P((struct bridge_softc *, struct ether_addr *)); 136 int bridge_blocknonip __P((struct ether_header *, struct mbuf *)); 137 int bridge_addrule __P((struct bridge_iflist *, 138 struct ifbrlreq *, int out)); 139 int bridge_flushrule __P((struct bridge_iflist *)); 140 int bridge_brlconf __P((struct bridge_softc *, struct ifbrlconf *)); 141 u_int8_t bridge_filterrule __P((struct brl_head *, struct ether_header *)); 142 #if NPF > 0 143 struct mbuf *bridge_filter __P((struct bridge_softc *, int, struct ifnet *, 144 struct ether_header *, struct mbuf *m)); 145 #endif 146 147 #define ETHERADDR_IS_IP_MCAST(a) \ 148 /* struct etheraddr *a; */ \ 149 ((a)->ether_addr_octet[0] == 0x01 && \ 150 (a)->ether_addr_octet[1] == 0x00 && \ 151 (a)->ether_addr_octet[2] == 0x5e) 152 153 154 void 155 bridgeattach(n) 156 int n; 157 { 158 struct bridge_softc *sc; 159 struct ifnet *ifp; 160 int i; 161 162 bridgectl = malloc(n * sizeof(*sc), M_DEVBUF, M_NOWAIT); 163 if (!bridgectl) 164 return; 165 nbridge = n; 166 bzero(bridgectl, n * sizeof(*sc)); 167 for (sc = bridgectl, i = 0; i < nbridge; i++, sc++) { 168 169 sc->sc_brtmax = BRIDGE_RTABLE_MAX; 170 sc->sc_brttimeout = BRIDGE_RTABLE_TIMEOUT; 171 sc->sc_bridge_max_age = BSTP_DEFAULT_MAX_AGE; 172 sc->sc_bridge_hello_time = BSTP_DEFAULT_HELLO_TIME; 173 sc->sc_bridge_forward_delay= BSTP_DEFAULT_FORWARD_DELAY; 174 sc->sc_bridge_priority = BSTP_DEFAULT_BRIDGE_PRIORITY; 175 sc->sc_hold_time = BSTP_DEFAULT_HOLD_TIME; 176 timeout_set(&sc->sc_brtimeout, bridge_timer, sc); 177 LIST_INIT(&sc->sc_iflist); 178 ifp = &sc->sc_if; 179 sprintf(ifp->if_xname, "bridge%d", i); 180 ifp->if_softc = sc; 181 ifp->if_mtu = ETHERMTU; 182 ifp->if_ioctl = bridge_ioctl; 183 ifp->if_output = bridge_output; 184 ifp->if_start = bridge_start; 185 ifp->if_type = IFT_BRIDGE; 186 ifp->if_snd.ifq_maxlen = ifqmaxlen; 187 ifp->if_hdrlen = sizeof(struct ether_header); 188 if_attach(ifp); 189 #if NBPFILTER > 0 190 bpfattach(&sc->sc_if.if_bpf, ifp, 191 DLT_EN10MB, sizeof(struct ether_header)); 192 #endif 193 } 194 } 195 196 int 197 bridge_ioctl(ifp, cmd, data) 198 struct ifnet *ifp; 199 u_long cmd; 200 caddr_t data; 201 { 202 struct proc *prc = curproc; /* XXX */ 203 struct ifnet *ifs; 204 struct bridge_softc *sc = (struct bridge_softc *)ifp->if_softc; 205 struct ifbreq *req = (struct ifbreq *)data; 206 struct ifbaconf *baconf = (struct ifbaconf *)data; 207 struct ifbareq *bareq = (struct ifbareq *)data; 208 struct ifbrparam *bparam = (struct ifbrparam *)data; 209 struct ifbifconf *bifconf = (struct ifbifconf *)data; 210 struct ifbrlreq *brlreq = (struct ifbrlreq *)data; 211 struct ifbrlconf *brlconf = (struct ifbrlconf *)data; 212 struct ifreq ifreq; 213 int error = 0, s; 214 struct bridge_iflist *p; 215 216 s = splnet(); 217 switch (cmd) { 218 case SIOCBRDGADD: 219 if ((error = suser(prc->p_ucred, &prc->p_acflag)) != 0) 220 break; 221 222 ifs = ifunit(req->ifbr_ifsname); 223 if (ifs == NULL) { /* no such interface */ 224 error = ENOENT; 225 break; 226 } 227 if (ifs->if_bridge == (caddr_t)sc) { 228 error = EEXIST; 229 break; 230 } 231 if (ifs->if_bridge != NULL) { 232 error = EBUSY; 233 break; 234 } 235 236 if (ifs->if_type == IFT_ETHER) { 237 if ((ifs->if_flags & IFF_UP) == 0) { 238 /* 239 * Bring interface up long enough to set 240 * promiscuous flag, then shut it down again. 241 */ 242 strlcpy(ifreq.ifr_name, req->ifbr_ifsname, 243 IFNAMSIZ); 244 ifs->if_flags |= IFF_UP; 245 ifreq.ifr_flags = ifs->if_flags; 246 error = (*ifs->if_ioctl)(ifs, SIOCSIFFLAGS, 247 (caddr_t)&ifreq); 248 if (error != 0) 249 break; 250 251 error = ifpromisc(ifs, 1); 252 if (error != 0) 253 break; 254 255 strlcpy(ifreq.ifr_name, req->ifbr_ifsname, 256 IFNAMSIZ); 257 ifs->if_flags &= ~IFF_UP; 258 ifreq.ifr_flags = ifs->if_flags; 259 error = (*ifs->if_ioctl)(ifs, SIOCSIFFLAGS, 260 (caddr_t)&ifreq); 261 if (error != 0) { 262 ifpromisc(ifs, 0); 263 break; 264 } 265 } else { 266 error = ifpromisc(ifs, 1); 267 if (error != 0) 268 break; 269 } 270 } 271 #if NGIF > 0 272 else if (ifs->if_type == IFT_GIF) { 273 /* Nothing needed */ 274 } 275 #endif /* NGIF */ 276 else { 277 error = EINVAL; 278 break; 279 } 280 281 p = (struct bridge_iflist *) malloc( 282 sizeof(struct bridge_iflist), M_DEVBUF, M_NOWAIT); 283 if (p == NULL) { 284 if (ifs->if_type == IFT_ETHER) 285 ifpromisc(ifs, 0); 286 error = ENOMEM; 287 break; 288 } 289 bzero(p, sizeof(struct bridge_iflist)); 290 291 p->ifp = ifs; 292 p->bif_flags = IFBIF_LEARNING | IFBIF_DISCOVER; 293 p->bif_priority = BSTP_DEFAULT_PORT_PRIORITY; 294 p->bif_path_cost = BSTP_DEFAULT_PATH_COST; 295 SIMPLEQ_INIT(&p->bif_brlin); 296 SIMPLEQ_INIT(&p->bif_brlout); 297 LIST_INSERT_HEAD(&sc->sc_iflist, p, next); 298 ifs->if_bridge = (caddr_t)sc; 299 break; 300 case SIOCBRDGDEL: 301 if ((error = suser(prc->p_ucred, &prc->p_acflag)) != 0) 302 break; 303 304 LIST_FOREACH(p, &sc->sc_iflist, next) { 305 if (strncmp(p->ifp->if_xname, req->ifbr_ifsname, 306 sizeof(p->ifp->if_xname)) == 0) { 307 p->ifp->if_bridge = NULL; 308 309 error = ifpromisc(p->ifp, 0); 310 311 LIST_REMOVE(p, next); 312 bridge_rtdelete(sc, p->ifp); 313 bridge_flushrule(p); 314 free(p, M_DEVBUF); 315 break; 316 } 317 } 318 if (p == LIST_END(&sc->sc_iflist)) { 319 error = ENOENT; 320 break; 321 } 322 break; 323 case SIOCBRDGIFS: 324 error = bridge_bifconf(sc, bifconf); 325 break; 326 case SIOCBRDGGIFFLGS: 327 ifs = ifunit(req->ifbr_ifsname); 328 if (ifs == NULL) { 329 error = ENOENT; 330 break; 331 } 332 if ((caddr_t)sc != ifs->if_bridge) { 333 error = ESRCH; 334 break; 335 } 336 LIST_FOREACH(p, &sc->sc_iflist, next) { 337 if (p->ifp == ifs) 338 break; 339 } 340 if (p == LIST_END(&sc->sc_iflist)) { 341 error = ESRCH; 342 break; 343 } 344 req->ifbr_ifsflags = p->bif_flags; 345 req->ifbr_state = p->bif_state; 346 req->ifbr_priority = p->bif_priority; 347 req->ifbr_portno = p->ifp->if_index & 0xff; 348 break; 349 case SIOCBRDGSIFFLGS: 350 if ((error = suser(prc->p_ucred, &prc->p_acflag)) != 0) 351 break; 352 ifs = ifunit(req->ifbr_ifsname); 353 if (ifs == NULL) { 354 error = ENOENT; 355 break; 356 } 357 if ((caddr_t)sc != ifs->if_bridge) { 358 error = ESRCH; 359 break; 360 } 361 LIST_FOREACH(p, &sc->sc_iflist, next) { 362 if (p->ifp == ifs) 363 break; 364 } 365 if (p == LIST_END(&sc->sc_iflist)) { 366 error = ESRCH; 367 break; 368 } 369 if ((req->ifbr_ifsflags & IFBIF_STP) && 370 (ifs->if_type != IFT_ETHER)) { 371 error = EINVAL; 372 break; 373 } 374 p->bif_flags = req->ifbr_ifsflags; 375 break; 376 case SIOCBRDGSIFPRIO: 377 if ((error = suser(prc->p_ucred, &prc->p_acflag)) != 0) 378 break; 379 ifs = ifunit(req->ifbr_ifsname); 380 if (ifs == NULL) { 381 error = ENOENT; 382 break; 383 } 384 if ((caddr_t)sc != ifs->if_bridge) { 385 error = ESRCH; 386 break; 387 } 388 LIST_FOREACH(p, &sc->sc_iflist, next) { 389 if (p->ifp == ifs) 390 break; 391 } 392 if (p == LIST_END(&sc->sc_iflist)) { 393 error = ESRCH; 394 break; 395 } 396 p->bif_priority = req->ifbr_priority; 397 break; 398 case SIOCBRDGRTS: 399 error = bridge_rtfind(sc, baconf); 400 break; 401 case SIOCBRDGFLUSH: 402 if ((error = suser(prc->p_ucred, &prc->p_acflag)) != 0) 403 break; 404 405 error = bridge_rtflush(sc, req->ifbr_ifsflags); 406 break; 407 case SIOCBRDGSADDR: 408 if ((error = suser(prc->p_ucred, &prc->p_acflag)) != 0) 409 break; 410 411 ifs = ifunit(bareq->ifba_ifsname); 412 if (ifs == NULL) { /* no such interface */ 413 error = ENOENT; 414 break; 415 } 416 417 if (ifs->if_bridge == NULL || 418 ifs->if_bridge != (caddr_t)sc) { 419 error = ESRCH; 420 break; 421 } 422 423 ifs = bridge_rtupdate(sc, &bareq->ifba_dst, ifs, 1, 424 bareq->ifba_flags); 425 if (ifs == NULL) 426 error = ENOMEM; 427 break; 428 case SIOCBRDGDADDR: 429 if ((error = suser(prc->p_ucred, &prc->p_acflag)) != 0) 430 break; 431 error = bridge_rtdaddr(sc, &bareq->ifba_dst); 432 break; 433 case SIOCBRDGGCACHE: 434 bparam->ifbrp_csize = sc->sc_brtmax; 435 break; 436 case SIOCBRDGSCACHE: 437 if ((error = suser(prc->p_ucred, &prc->p_acflag)) != 0) 438 break; 439 sc->sc_brtmax = bparam->ifbrp_csize; 440 bridge_rttrim(sc); 441 break; 442 case SIOCBRDGSTO: 443 if ((error = suser(prc->p_ucred, &prc->p_acflag)) != 0) 444 break; 445 sc->sc_brttimeout = bparam->ifbrp_ctime; 446 timeout_del(&sc->sc_brtimeout); 447 if (bparam->ifbrp_ctime != 0) 448 timeout_add(&sc->sc_brtimeout, sc->sc_brttimeout * hz); 449 break; 450 case SIOCBRDGGTO: 451 bparam->ifbrp_ctime = sc->sc_brttimeout; 452 break; 453 case SIOCSIFFLAGS: 454 if ((ifp->if_flags & IFF_UP) == IFF_UP) 455 bridge_init(sc); 456 457 if ((ifp->if_flags & IFF_UP) == 0) 458 bridge_stop(sc); 459 460 break; 461 case SIOCBRDGARL: 462 if ((error = suser(prc->p_ucred, &prc->p_acflag)) != 0) 463 break; 464 ifs = ifunit(brlreq->ifbr_ifsname); 465 if (ifs == NULL) { 466 error = ENOENT; 467 break; 468 } 469 if (ifs->if_bridge == NULL || 470 ifs->if_bridge != (caddr_t)sc) { 471 error = ESRCH; 472 break; 473 } 474 LIST_FOREACH(p, &sc->sc_iflist, next) { 475 if (p->ifp == ifs) 476 break; 477 } 478 if (p == LIST_END(&sc->sc_iflist)) { 479 error = ESRCH; 480 break; 481 } 482 if ((brlreq->ifbr_action != BRL_ACTION_BLOCK && 483 brlreq->ifbr_action != BRL_ACTION_PASS) || 484 (brlreq->ifbr_flags & (BRL_FLAG_IN|BRL_FLAG_OUT)) == 0) { 485 error = EINVAL; 486 break; 487 } 488 if (brlreq->ifbr_flags & BRL_FLAG_IN) { 489 error = bridge_addrule(p, brlreq, 0); 490 if (error) 491 break; 492 } 493 if (brlreq->ifbr_flags & BRL_FLAG_OUT) { 494 error = bridge_addrule(p, brlreq, 1); 495 if (error) 496 break; 497 } 498 break; 499 case SIOCBRDGFRL: 500 if ((error = suser(prc->p_ucred, &prc->p_acflag)) != 0) 501 break; 502 ifs = ifunit(brlreq->ifbr_ifsname); 503 if (ifs == NULL) { 504 error = ENOENT; 505 break; 506 } 507 if (ifs->if_bridge == NULL || 508 ifs->if_bridge != (caddr_t)sc) { 509 error = ESRCH; 510 break; 511 } 512 LIST_FOREACH(p, &sc->sc_iflist, next) { 513 if (p->ifp == ifs) 514 break; 515 } 516 if (p == LIST_END(&sc->sc_iflist)) { 517 error = ESRCH; 518 break; 519 } 520 error = bridge_flushrule(p); 521 break; 522 case SIOCBRDGGRL: 523 error = bridge_brlconf(sc, brlconf); 524 break; 525 case SIOCBRDGGPRI: 526 case SIOCBRDGGMA: 527 case SIOCBRDGGHT: 528 case SIOCBRDGGFD: 529 break; 530 case SIOCBRDGSPRI: 531 case SIOCBRDGSFD: 532 case SIOCBRDGSMA: 533 case SIOCBRDGSHT: 534 error = suser(prc->p_ucred, &prc->p_acflag); 535 break; 536 default: 537 error = EINVAL; 538 } 539 540 if (!error) 541 error = bstp_ioctl(ifp, cmd, data); 542 543 splx(s); 544 return (error); 545 } 546 547 /* Detach an interface from a bridge. */ 548 void 549 bridge_ifdetach(ifp) 550 struct ifnet *ifp; 551 { 552 struct bridge_softc *sc = (struct bridge_softc *)ifp->if_bridge; 553 struct bridge_iflist *bif; 554 555 LIST_FOREACH(bif, &sc->sc_iflist, next) { 556 if (bif->ifp == ifp) { 557 LIST_REMOVE(bif, next); 558 bridge_rtdelete(sc, ifp); 559 bridge_flushrule(bif); 560 free(bif, M_DEVBUF); 561 ifp->if_bridge = NULL; 562 break; 563 } 564 } 565 } 566 567 int 568 bridge_bifconf(sc, bifc) 569 struct bridge_softc *sc; 570 struct ifbifconf *bifc; 571 { 572 struct bridge_iflist *p; 573 u_int32_t total = 0, i = 0; 574 int error = 0; 575 struct ifbreq breq; 576 577 LIST_FOREACH(p, &sc->sc_iflist, next) { 578 total++; 579 } 580 if (bifc->ifbic_len == 0) { 581 i = total; 582 goto done; 583 } 584 585 LIST_FOREACH(p, &sc->sc_iflist, next) { 586 if (bifc->ifbic_len < sizeof(breq)) 587 break; 588 strlcpy(breq.ifbr_name, sc->sc_if.if_xname, IFNAMSIZ); 589 strlcpy(breq.ifbr_ifsname, p->ifp->if_xname, IFNAMSIZ); 590 breq.ifbr_ifsflags = p->bif_flags; 591 breq.ifbr_state = p->bif_state; 592 breq.ifbr_priority = p->bif_priority; 593 breq.ifbr_portno = p->ifp->if_index & 0xff; 594 error = copyout((caddr_t)&breq, 595 (caddr_t)(bifc->ifbic_req + i), sizeof(breq)); 596 if (error) 597 goto done; 598 i++; 599 bifc->ifbic_len -= sizeof(breq); 600 } 601 602 done: 603 bifc->ifbic_len = i * sizeof(breq); 604 return (error); 605 } 606 607 int 608 bridge_brlconf(sc, bc) 609 struct bridge_softc *sc; 610 struct ifbrlconf *bc; 611 { 612 struct ifnet *ifp; 613 struct bridge_iflist *ifl; 614 struct brl_node *n; 615 struct ifbrlreq req; 616 int error = 0; 617 u_int32_t i = 0, total = 0; 618 619 ifp = ifunit(bc->ifbrl_ifsname); 620 if (ifp == NULL) 621 return (ENOENT); 622 if (ifp->if_bridge == NULL || ifp->if_bridge != (caddr_t)sc) 623 return (ESRCH); 624 LIST_FOREACH(ifl, &sc->sc_iflist, next) { 625 if (ifl->ifp == ifp) 626 break; 627 } 628 if (ifl == LIST_END(&sc->sc_iflist)) 629 return (ESRCH); 630 631 SIMPLEQ_FOREACH(n, &ifl->bif_brlin, brl_next) { 632 total++; 633 } 634 SIMPLEQ_FOREACH(n, &ifl->bif_brlout, brl_next) { 635 total++; 636 } 637 638 if (bc->ifbrl_len == 0) { 639 i = total; 640 goto done; 641 } 642 643 SIMPLEQ_FOREACH(n, &ifl->bif_brlin, brl_next) { 644 if (bc->ifbrl_len < sizeof(req)) 645 goto done; 646 strlcpy(req.ifbr_name, sc->sc_if.if_xname, IFNAMSIZ); 647 strlcpy(req.ifbr_ifsname, ifl->ifp->if_xname, IFNAMSIZ); 648 req.ifbr_action = n->brl_action; 649 req.ifbr_flags = n->brl_flags; 650 req.ifbr_src = n->brl_src; 651 req.ifbr_dst = n->brl_dst; 652 error = copyout((caddr_t)&req, 653 (caddr_t)(bc->ifbrl_buf + (i * sizeof(req))), sizeof(req)); 654 if (error) 655 goto done; 656 i++; 657 bc->ifbrl_len -= sizeof(req); 658 } 659 660 SIMPLEQ_FOREACH(n, &ifl->bif_brlout, brl_next) { 661 if (bc->ifbrl_len < sizeof(req)) 662 goto done; 663 strlcpy(req.ifbr_name, sc->sc_if.if_xname, IFNAMSIZ); 664 strlcpy(req.ifbr_ifsname, ifl->ifp->if_xname, IFNAMSIZ); 665 req.ifbr_action = n->brl_action; 666 req.ifbr_flags = n->brl_flags; 667 req.ifbr_src = n->brl_src; 668 req.ifbr_dst = n->brl_dst; 669 error = copyout((caddr_t)&req, 670 (caddr_t)(bc->ifbrl_buf + (i * sizeof(req))), sizeof(req)); 671 if (error) 672 goto done; 673 i++; 674 bc->ifbrl_len -= sizeof(req); 675 } 676 677 done: 678 bc->ifbrl_len = i * sizeof(req); 679 return (error); 680 } 681 682 void 683 bridge_init(sc) 684 struct bridge_softc *sc; 685 { 686 struct ifnet *ifp = &sc->sc_if; 687 int i; 688 689 if ((ifp->if_flags & IFF_RUNNING) == IFF_RUNNING) 690 return; 691 692 if (sc->sc_rts == NULL) { 693 sc->sc_rts = (struct bridge_rthead *)malloc( 694 BRIDGE_RTABLE_SIZE * (sizeof(struct bridge_rthead)), 695 M_DEVBUF, M_NOWAIT); 696 if (sc->sc_rts == NULL) 697 return; 698 for (i = 0; i < BRIDGE_RTABLE_SIZE; i++) { 699 LIST_INIT(&sc->sc_rts[i]); 700 } 701 sc->sc_hashkey = arc4random(); 702 } 703 ifp->if_flags |= IFF_RUNNING; 704 705 if (sc->sc_brttimeout != 0) 706 timeout_add(&sc->sc_brtimeout, sc->sc_brttimeout * hz); 707 } 708 709 /* 710 * Stop the bridge and deallocate the routing table. 711 */ 712 void 713 bridge_stop(sc) 714 struct bridge_softc *sc; 715 { 716 struct ifnet *ifp = &sc->sc_if; 717 718 /* 719 * If we're not running, there's nothing to do. 720 */ 721 if ((ifp->if_flags & IFF_RUNNING) == 0) 722 return; 723 724 timeout_del(&sc->sc_brtimeout); 725 726 bridge_rtflush(sc, IFBF_FLUSHDYN); 727 728 ifp->if_flags &= ~IFF_RUNNING; 729 } 730 731 /* 732 * Send output from the bridge. The mbuf has the ethernet header 733 * already attached. We must enqueue or free the mbuf before exiting. 734 */ 735 int 736 bridge_output(ifp, m, sa, rt) 737 struct ifnet *ifp; 738 struct mbuf *m; 739 struct sockaddr *sa; 740 struct rtentry *rt; 741 { 742 struct ether_header *eh; 743 struct ifnet *dst_if; 744 struct ether_addr *src, *dst; 745 struct bridge_softc *sc; 746 int s, error, len; 747 short mflags; 748 ALTQ_DECL(struct altq_pktattr pktattr;) 749 #ifdef IPSEC 750 struct m_tag *mtag; 751 #endif /* IPSEC */ 752 753 if (m->m_len < sizeof(*eh)) { 754 m = m_pullup(m, sizeof(*eh)); 755 if (m == NULL) 756 return (0); 757 } 758 eh = mtod(m, struct ether_header *); 759 dst = (struct ether_addr *)&eh->ether_dhost[0]; 760 src = (struct ether_addr *)&eh->ether_shost[0]; 761 sc = (struct bridge_softc *)ifp->if_bridge; 762 763 s = splimp(); 764 765 /* 766 * If bridge is down, but original output interface is up, 767 * go ahead and send out that interface. Otherwise the packet 768 * is dropped below. 769 */ 770 if ((sc->sc_if.if_flags & IFF_RUNNING) == 0) { 771 dst_if = ifp; 772 goto sendunicast; 773 } 774 775 /* 776 * If the packet is a broadcast or we don't know a better way to 777 * get there, send to all interfaces. 778 */ 779 dst_if = bridge_rtlookup(sc, dst); 780 if (dst_if == NULL || ETHER_IS_MULTICAST(eh->ether_dhost)) { 781 struct bridge_iflist *p; 782 struct mbuf *mc; 783 int used = 0; 784 785 #ifdef IPSEC 786 /* 787 * Don't send out the packet if IPsec is needed, and 788 * notify IPsec to do its own crypto for now. 789 */ 790 if ((mtag = m_tag_find(m, PACKET_TAG_IPSEC_OUT_CRYPTO_NEEDED, 791 NULL)) != NULL) { 792 ipsp_skipcrypto_unmark((struct tdb_ident *)(mtag + 1)); 793 m_freem(m); 794 splx(s); 795 return (0); 796 } 797 #endif /* IPSEC */ 798 799 /* Catch packets that need TCP/UDP/IP hardware checksumming */ 800 if (m->m_pkthdr.csum & M_IPV4_CSUM_OUT || 801 m->m_pkthdr.csum & M_TCPV4_CSUM_OUT || 802 m->m_pkthdr.csum & M_UDPV4_CSUM_OUT) { 803 m_freem(m); 804 splx(s); 805 return (0); 806 } 807 808 LIST_FOREACH(p, &sc->sc_iflist, next) { 809 dst_if = p->ifp; 810 if ((dst_if->if_flags & IFF_RUNNING) == 0) 811 continue; 812 #ifdef ALTQ 813 if (ALTQ_IS_ENABLED(&dst_if->if_snd) == 0) 814 #endif 815 if (IF_QFULL(&dst_if->if_snd)) { 816 IF_DROP(&dst_if->if_snd); 817 sc->sc_if.if_oerrors++; 818 continue; 819 } 820 if (LIST_NEXT(p, next) == LIST_END(&sc->sc_iflist)) { 821 used = 1; 822 mc = m; 823 } else { 824 mc = m_copym(m, 0, M_COPYALL, M_NOWAIT); 825 if (mc == NULL) { 826 sc->sc_if.if_oerrors++; 827 continue; 828 } 829 } 830 len = mc->m_pkthdr.len; 831 mflags = mc->m_flags; 832 #ifdef ALTQ 833 if (ALTQ_IS_ENABLED(&dst_if->if_snd)) 834 altq_etherclassify(&dst_if->if_snd, mc, &pktattr); 835 #endif 836 IFQ_ENQUEUE(&dst_if->if_snd, mc, &pktattr, error); 837 if (error) { 838 sc->sc_if.if_oerrors++; 839 continue; 840 } 841 sc->sc_if.if_opackets++; 842 sc->sc_if.if_obytes += len; 843 dst_if->if_obytes += len; 844 if (mflags & M_MCAST) 845 dst_if->if_omcasts++; 846 if ((dst_if->if_flags & IFF_OACTIVE) == 0) 847 (*dst_if->if_start)(dst_if); 848 } 849 if (!used) 850 m_freem(m); 851 splx(s); 852 return (0); 853 } 854 855 sendunicast: 856 if ((dst_if->if_flags & IFF_RUNNING) == 0) { 857 m_freem(m); 858 splx(s); 859 return (0); 860 } 861 len = m->m_pkthdr.len; 862 mflags = m->m_flags; 863 #ifdef ALTQ 864 if (ALTQ_IS_ENABLED(&dst_if->if_snd)) 865 altq_etherclassify(&dst_if->if_snd, m, &pktattr); 866 #endif 867 IFQ_ENQUEUE(&dst_if->if_snd, m, &pktattr, error); 868 if (error) { 869 sc->sc_if.if_oerrors++; 870 splx(s); 871 return (0); 872 } 873 sc->sc_if.if_opackets++; 874 sc->sc_if.if_obytes += len; 875 dst_if->if_obytes += len; 876 if (mflags & M_MCAST) 877 dst_if->if_omcasts++; 878 if ((dst_if->if_flags & IFF_OACTIVE) == 0) 879 (*dst_if->if_start)(dst_if); 880 splx(s); 881 return (0); 882 } 883 884 /* 885 * Start output on the bridge. This function should never be called. 886 */ 887 void 888 bridge_start(ifp) 889 struct ifnet *ifp; 890 { 891 } 892 893 /* 894 * Loop through each bridge interface and process their input queues. 895 */ 896 void 897 bridgeintr(void) 898 { 899 struct bridge_softc *sc; 900 struct mbuf *m; 901 int i, s; 902 903 for (i = 0; i < nbridge; i++) { 904 sc = &bridgectl[i]; 905 for (;;) { 906 s = splimp(); 907 IF_DEQUEUE(&sc->sc_if.if_snd, m); 908 splx(s); 909 if (m == NULL) 910 break; 911 bridgeintr_frame(sc, m); 912 } 913 } 914 } 915 916 /* 917 * Process a single frame. Frame must be freed or queued before returning. 918 */ 919 void 920 bridgeintr_frame(sc, m) 921 struct bridge_softc *sc; 922 struct mbuf *m; 923 { 924 int s, error, len; 925 struct ifnet *src_if, *dst_if; 926 struct bridge_iflist *ifl; 927 struct ether_addr *dst, *src; 928 struct ether_header eh; 929 short mflags; 930 ALTQ_DECL(struct altq_pktattr pktattr;) 931 932 if ((sc->sc_if.if_flags & IFF_RUNNING) == 0) { 933 m_freem(m); 934 return; 935 } 936 937 src_if = m->m_pkthdr.rcvif; 938 939 #if NBPFILTER > 0 940 if (sc->sc_if.if_bpf) 941 bpf_mtap(sc->sc_if.if_bpf, m); 942 #endif 943 944 sc->sc_if.if_ipackets++; 945 sc->sc_if.if_ibytes += m->m_pkthdr.len; 946 947 LIST_FOREACH(ifl, &sc->sc_iflist, next) { 948 if (ifl->ifp == src_if) 949 break; 950 } 951 if (ifl == LIST_END(&sc->sc_iflist)) { 952 m_freem(m); 953 return; 954 } 955 956 if ((ifl->bif_flags & IFBIF_STP) && 957 (ifl->bif_state == BSTP_IFSTATE_BLOCKING || 958 ifl->bif_state == BSTP_IFSTATE_LISTENING || 959 ifl->bif_state == BSTP_IFSTATE_DISABLED)) { 960 m_freem(m); 961 return; 962 } 963 964 if (m->m_pkthdr.len < sizeof(eh)) { 965 m_freem(m); 966 return; 967 } 968 m_copydata(m, 0, sizeof(struct ether_header), (caddr_t)&eh); 969 dst = (struct ether_addr *)&eh.ether_dhost[0]; 970 src = (struct ether_addr *)&eh.ether_shost[0]; 971 972 /* 973 * If interface is learning, and if source address 974 * is not broadcast or multicast, record it's address. 975 */ 976 if ((ifl->bif_flags & IFBIF_LEARNING) && 977 (eh.ether_shost[0] & 1) == 0 && 978 !(eh.ether_shost[0] == 0 && eh.ether_shost[1] == 0 && 979 eh.ether_shost[2] == 0 && eh.ether_shost[3] == 0 && 980 eh.ether_shost[4] == 0 && eh.ether_shost[5] == 0)) 981 bridge_rtupdate(sc, src, src_if, 0, IFBAF_DYNAMIC); 982 983 if ((ifl->bif_flags & IFBIF_STP) && 984 (ifl->bif_state == BSTP_IFSTATE_LEARNING)) { 985 m_freem(m); 986 return; 987 } 988 989 /* 990 * At this point, the port either doesn't participate in stp or 991 * it's in the forwarding state 992 */ 993 994 /* 995 * If packet is unicast, destined for someone on "this" 996 * side of the bridge, drop it. 997 */ 998 if ((m->m_flags & (M_BCAST | M_MCAST)) == 0) { 999 dst_if = bridge_rtlookup(sc, dst); 1000 if (dst_if == src_if) { 1001 m_freem(m); 1002 return; 1003 } 1004 } else 1005 dst_if = NULL; 1006 1007 /* 1008 * Multicast packets get handled a little differently: 1009 * If interface is: 1010 * -link0,-link1 (default) Forward all multicast 1011 * as broadcast. 1012 * -link0,link1 Drop non-IP multicast, forward 1013 * as broadcast IP multicast. 1014 * link0,-link1 Drop IP multicast, forward as 1015 * broadcast non-IP multicast. 1016 * link0,link1 Drop all multicast. 1017 */ 1018 if (m->m_flags & M_MCAST) { 1019 if ((sc->sc_if.if_flags & 1020 (IFF_LINK0 | IFF_LINK1)) == 1021 (IFF_LINK0 | IFF_LINK1)) { 1022 m_freem(m); 1023 return; 1024 } 1025 if (sc->sc_if.if_flags & IFF_LINK0 && 1026 ETHERADDR_IS_IP_MCAST(dst)) { 1027 m_freem(m); 1028 return; 1029 } 1030 if (sc->sc_if.if_flags & IFF_LINK1 && 1031 !ETHERADDR_IS_IP_MCAST(dst)) { 1032 m_freem(m); 1033 return; 1034 } 1035 } 1036 1037 if (ifl->bif_flags & IFBIF_BLOCKNONIP && bridge_blocknonip(&eh, m)) { 1038 m_freem(m); 1039 return; 1040 } 1041 1042 if (bridge_filterrule(&ifl->bif_brlin, &eh) == BRL_ACTION_BLOCK) { 1043 m_freem(m); 1044 return; 1045 } 1046 #if NPF > 0 1047 m = bridge_filter(sc, BRIDGE_IN, src_if, &eh, m); 1048 if (m == NULL) 1049 return; 1050 #endif 1051 /* 1052 * If the packet is a multicast or broadcast OR if we don't 1053 * know any better, forward it to all interfaces. 1054 */ 1055 if ((m->m_flags & (M_BCAST | M_MCAST)) || dst_if == NULL) { 1056 sc->sc_if.if_imcasts++; 1057 s = splimp(); 1058 bridge_broadcast(sc, src_if, &eh, m); 1059 splx(s); 1060 return; 1061 } 1062 1063 /* 1064 * At this point, we're dealing with a unicast frame going to a 1065 * different interface 1066 */ 1067 if ((dst_if->if_flags & IFF_RUNNING) == 0) { 1068 m_freem(m); 1069 return; 1070 } 1071 LIST_FOREACH(ifl, &sc->sc_iflist, next) { 1072 if (ifl->ifp == dst_if) 1073 break; 1074 } 1075 if (ifl == LIST_END(&sc->sc_iflist)) { 1076 m_freem(m); 1077 return; 1078 } 1079 if ((ifl->bif_flags & IFBIF_STP) && 1080 (ifl->bif_state == BSTP_IFSTATE_DISABLED || 1081 ifl->bif_state == BSTP_IFSTATE_BLOCKING)) { 1082 m_freem(m); 1083 return; 1084 } 1085 if (bridge_filterrule(&ifl->bif_brlout, &eh) == BRL_ACTION_BLOCK) { 1086 m_freem(m); 1087 return; 1088 } 1089 #if NPF > 0 1090 m = bridge_filter(sc, BRIDGE_OUT, dst_if, &eh, m); 1091 if (m == NULL) 1092 return; 1093 #endif 1094 1095 #ifdef ALTQ 1096 if (ALTQ_IS_ENABLED(&dst_if->if_snd)) 1097 altq_etherclassify(&dst_if->if_snd, m, &pktattr); 1098 #endif 1099 len = m->m_pkthdr.len; 1100 mflags = m->m_flags; 1101 s = splimp(); 1102 IFQ_ENQUEUE(&dst_if->if_snd, m, &pktattr, error); 1103 if (error) { 1104 sc->sc_if.if_oerrors++; 1105 splx(s); 1106 return; 1107 } 1108 sc->sc_if.if_opackets++; 1109 sc->sc_if.if_obytes += len; 1110 dst_if->if_obytes += len; 1111 if (mflags & M_MCAST) 1112 dst_if->if_omcasts++; 1113 if ((dst_if->if_flags & IFF_OACTIVE) == 0) 1114 (*dst_if->if_start)(dst_if); 1115 splx(s); 1116 } 1117 1118 /* 1119 * Receive input from an interface. Queue the packet for bridging if its 1120 * not for us, and schedule an interrupt. 1121 */ 1122 struct mbuf * 1123 bridge_input(ifp, eh, m) 1124 struct ifnet *ifp; 1125 struct ether_header *eh; 1126 struct mbuf *m; 1127 { 1128 struct bridge_softc *sc; 1129 int s; 1130 struct bridge_iflist *ifl; 1131 struct arpcom *ac; 1132 struct mbuf *mc; 1133 1134 /* 1135 * Make sure this interface is a bridge member. 1136 */ 1137 if (ifp == NULL || ifp->if_bridge == NULL || m == NULL) 1138 return (m); 1139 1140 if ((m->m_flags & M_PKTHDR) == 0) 1141 panic("bridge_input(): no HDR"); 1142 1143 m->m_flags &= ~M_PROTO1; /* Loop prevention */ 1144 1145 sc = (struct bridge_softc *)ifp->if_bridge; 1146 if ((sc->sc_if.if_flags & IFF_RUNNING) == 0) 1147 return (m); 1148 1149 LIST_FOREACH(ifl, &sc->sc_iflist, next) { 1150 if (ifl->ifp == ifp) 1151 break; 1152 } 1153 if (ifl == LIST_END(&sc->sc_iflist)) 1154 return (m); 1155 1156 if (m->m_flags & (M_BCAST | M_MCAST)) { 1157 /* Tap off 802.1D packets, they do not get forwarded */ 1158 if (bcmp(eh->ether_dhost, bstp_etheraddr, ETHER_ADDR_LEN) == 0) { 1159 m = bstp_input(sc, ifp, eh, m); 1160 if (m == NULL) 1161 return (NULL); 1162 } 1163 1164 /* 1165 * No need to queue frames for ifs in the blocking, disabled, 1166 * or listening state 1167 */ 1168 if ((ifl->bif_flags & IFBIF_STP) && 1169 ((ifl->bif_state == BSTP_IFSTATE_BLOCKING) || 1170 (ifl->bif_state == BSTP_IFSTATE_LISTENING) || 1171 (ifl->bif_state == BSTP_IFSTATE_DISABLED))) 1172 return (m); 1173 1174 /* 1175 * make a copy of 'm' with 'eh' tacked on to the 1176 * beginning. Return 'm' for local processing 1177 * and enqueue the copy. Schedule netisr. 1178 */ 1179 mc = m_copym2(m, 0, M_COPYALL, M_NOWAIT); 1180 if (mc == NULL) 1181 return (m); 1182 M_PREPEND(mc, sizeof(struct ether_header), M_DONTWAIT); 1183 if (mc == NULL) 1184 return (m); 1185 bcopy(eh, mtod(mc, caddr_t), sizeof(struct ether_header)); 1186 s = splimp(); 1187 if (IF_QFULL(&sc->sc_if.if_snd)) { 1188 m_freem(mc); 1189 splx(s); 1190 return (m); 1191 } 1192 IF_ENQUEUE(&sc->sc_if.if_snd, mc); 1193 splx(s); 1194 schednetisr(NETISR_BRIDGE); 1195 if (ifp->if_type == IFT_GIF) { 1196 LIST_FOREACH(ifl, &sc->sc_iflist, next) { 1197 if (ifl->ifp->if_type == IFT_ETHER) 1198 break; 1199 } 1200 if (ifl != LIST_END(&sc->sc_iflist)) { 1201 m->m_flags |= M_PROTO1; 1202 m->m_pkthdr.rcvif = ifl->ifp; 1203 ether_input(ifl->ifp, eh, m); 1204 m = NULL; 1205 } 1206 } 1207 return (m); 1208 } 1209 1210 /* 1211 * No need to queue frames for ifs in the blocking, disabled, or 1212 * listening state 1213 */ 1214 if ((ifl->bif_flags & IFBIF_STP) && 1215 ((ifl->bif_state == BSTP_IFSTATE_BLOCKING) || 1216 (ifl->bif_state == BSTP_IFSTATE_LISTENING) || 1217 (ifl->bif_state == BSTP_IFSTATE_DISABLED))) 1218 return (m); 1219 1220 1221 /* 1222 * Unicast, make sure it's not for us. 1223 */ 1224 LIST_FOREACH(ifl, &sc->sc_iflist, next) { 1225 if (ifl->ifp->if_type != IFT_ETHER) 1226 continue; 1227 ac = (struct arpcom *)ifl->ifp; 1228 if (bcmp(ac->ac_enaddr, eh->ether_dhost, ETHER_ADDR_LEN) == 0) { 1229 if (ifl->bif_flags & IFBIF_LEARNING) 1230 bridge_rtupdate(sc, 1231 (struct ether_addr *)&eh->ether_shost, 1232 ifp, 0, IFBAF_DYNAMIC); 1233 m->m_pkthdr.rcvif = ifl->ifp; 1234 if (ifp->if_type == IFT_GIF) { 1235 m->m_flags |= M_PROTO1; 1236 ether_input(ifl->ifp, eh, m); 1237 m = NULL; 1238 } 1239 return (m); 1240 } 1241 if (bcmp(ac->ac_enaddr, eh->ether_shost, ETHER_ADDR_LEN) == 0) { 1242 m_freem(m); 1243 return (NULL); 1244 } 1245 } 1246 M_PREPEND(m, sizeof(struct ether_header), M_DONTWAIT); 1247 if (m == NULL) 1248 return (NULL); 1249 bcopy(eh, mtod(m, caddr_t), sizeof(struct ether_header)); 1250 s = splimp(); 1251 if (IF_QFULL(&sc->sc_if.if_snd)) { 1252 m_freem(m); 1253 splx(s); 1254 return (NULL); 1255 } 1256 IF_ENQUEUE(&sc->sc_if.if_snd, m); 1257 splx(s); 1258 schednetisr(NETISR_BRIDGE); 1259 return (NULL); 1260 } 1261 1262 /* 1263 * Send a frame to all interfaces that are members of the bridge 1264 * (except the one it came in on). This code assumes that it is 1265 * running at splnet or higher. 1266 */ 1267 void 1268 bridge_broadcast(sc, ifp, eh, m) 1269 struct bridge_softc *sc; 1270 struct ifnet *ifp; 1271 struct ether_header *eh; 1272 struct mbuf *m; 1273 { 1274 struct bridge_iflist *p; 1275 struct mbuf *mc; 1276 struct ifnet *dst_if; 1277 int error, len, used = 0; 1278 short mflags; 1279 ALTQ_DECL(struct altq_pktattr pktattr;) 1280 1281 LIST_FOREACH(p, &sc->sc_iflist, next) { 1282 /* 1283 * Don't retransmit out of the same interface where 1284 * the packet was received from. 1285 */ 1286 dst_if = p->ifp; 1287 if (dst_if->if_index == ifp->if_index) 1288 continue; 1289 1290 if ((p->bif_flags & IFBIF_STP) && 1291 (p->bif_state == BSTP_IFSTATE_BLOCKING || 1292 p->bif_state == BSTP_IFSTATE_DISABLED)) 1293 continue; 1294 1295 if ((p->bif_flags & IFBIF_DISCOVER) == 0 && 1296 (m->m_flags & (M_BCAST | M_MCAST)) == 0) 1297 continue; 1298 1299 if ((dst_if->if_flags & IFF_RUNNING) == 0) 1300 continue; 1301 1302 #ifdef ALTQ 1303 if (ALTQ_IS_ENABLED(&dst_if->if_snd) == 0) 1304 #endif 1305 if (IF_QFULL(&dst_if->if_snd)) { 1306 IF_DROP(&dst_if->if_snd); 1307 sc->sc_if.if_oerrors++; 1308 continue; 1309 } 1310 1311 /* Drop non-IP frames if the appropriate flag is set. */ 1312 if (p->bif_flags & IFBIF_BLOCKNONIP && 1313 bridge_blocknonip(eh, m)) 1314 continue; 1315 1316 if (bridge_filterrule(&p->bif_brlout, eh) == BRL_ACTION_BLOCK) 1317 continue; 1318 1319 /* If last one, reuse the passed-in mbuf */ 1320 if (LIST_NEXT(p, next) == LIST_END(&sc->sc_iflist)) { 1321 mc = m; 1322 used = 1; 1323 } else { 1324 mc = m_copym(m, 0, M_COPYALL, M_DONTWAIT); 1325 if (mc == NULL) { 1326 sc->sc_if.if_oerrors++; 1327 continue; 1328 } 1329 } 1330 1331 #if NPF > 0 1332 mc = bridge_filter(sc, BRIDGE_OUT, dst_if, eh, mc); 1333 if (mc == NULL) 1334 continue; 1335 #endif 1336 1337 #ifdef ALTQ 1338 if (ALTQ_IS_ENABLED(&dst_if->if_snd)) 1339 altq_etherclassify(&dst_if->if_snd, mc, &pktattr); 1340 #endif 1341 IFQ_ENQUEUE(&dst_if->if_snd, mc, &pktattr, error); 1342 if (error) { 1343 sc->sc_if.if_oerrors++; 1344 continue; 1345 } 1346 sc->sc_if.if_opackets++; 1347 sc->sc_if.if_obytes += len; 1348 dst_if->if_obytes += len; 1349 if (mflags & M_MCAST) 1350 dst_if->if_omcasts++; 1351 if ((dst_if->if_flags & IFF_OACTIVE) == 0) 1352 (*dst_if->if_start)(dst_if); 1353 } 1354 1355 if (!used) 1356 m_freem(m); 1357 } 1358 1359 struct ifnet * 1360 bridge_rtupdate(sc, ea, ifp, setflags, flags) 1361 struct bridge_softc *sc; 1362 struct ether_addr *ea; 1363 struct ifnet *ifp; 1364 int setflags; 1365 u_int8_t flags; 1366 { 1367 struct bridge_rtnode *p, *q; 1368 u_int32_t h; 1369 int dir; 1370 1371 if (sc->sc_rts == NULL) { 1372 if (setflags && flags == IFBAF_STATIC) { 1373 sc->sc_rts = (struct bridge_rthead *)malloc( 1374 BRIDGE_RTABLE_SIZE * 1375 (sizeof(struct bridge_rthead)),M_DEVBUF,M_NOWAIT); 1376 if (sc->sc_rts == NULL) 1377 goto done; 1378 1379 for (h = 0; h < BRIDGE_RTABLE_SIZE; h++) { 1380 LIST_INIT(&sc->sc_rts[h]); 1381 } 1382 sc->sc_hashkey = arc4random(); 1383 } else 1384 goto done; 1385 } 1386 1387 h = bridge_hash(sc, ea); 1388 p = LIST_FIRST(&sc->sc_rts[h]); 1389 if (p == LIST_END(&sc->sc_rts[h])) { 1390 if (sc->sc_brtcnt >= sc->sc_brtmax) 1391 goto done; 1392 p = (struct bridge_rtnode *)malloc( 1393 sizeof(struct bridge_rtnode), M_DEVBUF, M_NOWAIT); 1394 if (p == NULL) 1395 goto done; 1396 1397 bcopy(ea, &p->brt_addr, sizeof(p->brt_addr)); 1398 p->brt_if = ifp; 1399 p->brt_age = 1; 1400 1401 if (setflags) 1402 p->brt_flags = flags; 1403 else 1404 p->brt_flags = IFBAF_DYNAMIC; 1405 1406 LIST_INSERT_HEAD(&sc->sc_rts[h], p, brt_next); 1407 sc->sc_brtcnt++; 1408 goto want; 1409 } 1410 1411 do { 1412 q = p; 1413 p = LIST_NEXT(p, brt_next); 1414 1415 dir = memcmp(ea, &q->brt_addr, sizeof(q->brt_addr)); 1416 if (dir == 0) { 1417 if (setflags) { 1418 q->brt_if = ifp; 1419 q->brt_flags = flags; 1420 } 1421 1422 if (q->brt_if == ifp) 1423 q->brt_age = 1; 1424 ifp = q->brt_if; 1425 goto want; 1426 } 1427 1428 if (dir > 0) { 1429 if (sc->sc_brtcnt >= sc->sc_brtmax) 1430 goto done; 1431 p = (struct bridge_rtnode *)malloc( 1432 sizeof(struct bridge_rtnode), M_DEVBUF, M_NOWAIT); 1433 if (p == NULL) 1434 goto done; 1435 1436 bcopy(ea, &p->brt_addr, sizeof(p->brt_addr)); 1437 p->brt_if = ifp; 1438 p->brt_age = 1; 1439 1440 if (setflags) 1441 p->brt_flags = flags; 1442 else 1443 p->brt_flags = IFBAF_DYNAMIC; 1444 1445 LIST_INSERT_BEFORE(q, p, brt_next); 1446 sc->sc_brtcnt++; 1447 goto want; 1448 } 1449 1450 if (p == LIST_END(&sc->sc_rts[h])) { 1451 if (sc->sc_brtcnt >= sc->sc_brtmax) 1452 goto done; 1453 p = (struct bridge_rtnode *)malloc( 1454 sizeof(struct bridge_rtnode), M_DEVBUF, M_NOWAIT); 1455 if (p == NULL) 1456 goto done; 1457 1458 bcopy(ea, &p->brt_addr, sizeof(p->brt_addr)); 1459 p->brt_if = ifp; 1460 p->brt_age = 1; 1461 1462 if (setflags) 1463 p->brt_flags = flags; 1464 else 1465 p->brt_flags = IFBAF_DYNAMIC; 1466 LIST_INSERT_AFTER(q, p, brt_next); 1467 sc->sc_brtcnt++; 1468 goto want; 1469 } 1470 } while (p != LIST_END(&sc->sc_rts[h])); 1471 1472 done: 1473 ifp = NULL; 1474 want: 1475 return (ifp); 1476 } 1477 1478 struct ifnet * 1479 bridge_rtlookup(sc, ea) 1480 struct bridge_softc *sc; 1481 struct ether_addr *ea; 1482 { 1483 struct bridge_rtnode *p; 1484 u_int32_t h; 1485 int dir; 1486 1487 if (sc->sc_rts == NULL) 1488 goto fail; 1489 1490 h = bridge_hash(sc, ea); 1491 LIST_FOREACH(p, &sc->sc_rts[h], brt_next) { 1492 dir = memcmp(ea, &p->brt_addr, sizeof(p->brt_addr)); 1493 if (dir == 0) 1494 return (p->brt_if); 1495 if (dir > 0) 1496 goto fail; 1497 } 1498 fail: 1499 return (NULL); 1500 } 1501 1502 /* 1503 * The following hash function is adapted from 'Hash Functions' by Bob Jenkins 1504 * ("Algorithm Alley", Dr. Dobbs Journal, September 1997). 1505 * "You may use this code any way you wish, private, educational, or 1506 * commercial. It's free." 1507 */ 1508 #define mix(a,b,c) \ 1509 do { \ 1510 a -= b; a -= c; a ^= (c >> 13); \ 1511 b -= c; b -= a; b ^= (a << 8); \ 1512 c -= a; c -= b; c ^= (b >> 13); \ 1513 a -= b; a -= c; a ^= (c >> 12); \ 1514 b -= c; b -= a; b ^= (a << 16); \ 1515 c -= a; c -= b; c ^= (b >> 5); \ 1516 a -= b; a -= c; a ^= (c >> 3); \ 1517 b -= c; b -= a; b ^= (a << 10); \ 1518 c -= a; c -= b; c ^= (b >> 15); \ 1519 } while(0) 1520 1521 u_int32_t 1522 bridge_hash(sc, addr) 1523 struct bridge_softc *sc; 1524 struct ether_addr *addr; 1525 { 1526 u_int32_t a = 0x9e3779b9, b = 0x9e3779b9, c = sc->sc_hashkey; 1527 1528 b += addr->ether_addr_octet[5] << 8; 1529 b += addr->ether_addr_octet[4]; 1530 a += addr->ether_addr_octet[3] << 24; 1531 a += addr->ether_addr_octet[2] << 16; 1532 a += addr->ether_addr_octet[1] << 8; 1533 a += addr->ether_addr_octet[0]; 1534 1535 mix(a, b, c); 1536 return (c & BRIDGE_RTABLE_MASK); 1537 } 1538 1539 /* 1540 * Trim the routing table so that we've got a number of routes 1541 * less than or equal to the maximum. 1542 */ 1543 void 1544 bridge_rttrim(sc) 1545 struct bridge_softc *sc; 1546 { 1547 struct bridge_rtnode *n, *p; 1548 int i; 1549 1550 if (sc->sc_rts == NULL) 1551 goto done; 1552 1553 /* 1554 * Make sure we have to trim the address table 1555 */ 1556 if (sc->sc_brtcnt <= sc->sc_brtmax) 1557 goto done; 1558 1559 /* 1560 * Force an aging cycle, this might trim enough addresses. 1561 */ 1562 bridge_rtage(sc); 1563 1564 if (sc->sc_brtcnt <= sc->sc_brtmax) 1565 goto done; 1566 1567 for (i = 0; i < BRIDGE_RTABLE_SIZE; i++) { 1568 n = LIST_FIRST(&sc->sc_rts[i]); 1569 while (n != LIST_END(&sc->sc_rts[i])) { 1570 p = LIST_NEXT(n, brt_next); 1571 if ((n->brt_flags & IFBAF_TYPEMASK) == IFBAF_DYNAMIC) { 1572 LIST_REMOVE(n, brt_next); 1573 sc->sc_brtcnt--; 1574 free(n, M_DEVBUF); 1575 n = p; 1576 if (sc->sc_brtcnt <= sc->sc_brtmax) 1577 goto done; 1578 } 1579 } 1580 } 1581 1582 done: 1583 if (sc->sc_rts != NULL && sc->sc_brtcnt == 0 && 1584 (sc->sc_if.if_flags & IFF_UP) == 0) { 1585 free(sc->sc_rts, M_DEVBUF); 1586 sc->sc_rts = NULL; 1587 } 1588 } 1589 1590 void 1591 bridge_timer(vsc) 1592 void *vsc; 1593 { 1594 struct bridge_softc *sc = vsc; 1595 int s; 1596 1597 s = splsoftnet(); 1598 bridge_rtage(sc); 1599 splx(s); 1600 } 1601 1602 /* 1603 * Perform an aging cycle 1604 */ 1605 void 1606 bridge_rtage(sc) 1607 struct bridge_softc *sc; 1608 { 1609 struct bridge_rtnode *n, *p; 1610 int i; 1611 1612 if (sc->sc_rts == NULL) 1613 return; 1614 1615 for (i = 0; i < BRIDGE_RTABLE_SIZE; i++) { 1616 n = LIST_FIRST(&sc->sc_rts[i]); 1617 while (n != LIST_END(&sc->sc_rts[i])) { 1618 if ((n->brt_flags & IFBAF_TYPEMASK) == IFBAF_STATIC) { 1619 n->brt_age = !n->brt_age; 1620 if (n->brt_age) 1621 n->brt_age = 0; 1622 n = LIST_NEXT(n, brt_next); 1623 } else if (n->brt_age) { 1624 n->brt_age = 0; 1625 n = LIST_NEXT(n, brt_next); 1626 } else { 1627 p = LIST_NEXT(n, brt_next); 1628 LIST_REMOVE(n, brt_next); 1629 sc->sc_brtcnt--; 1630 free(n, M_DEVBUF); 1631 n = p; 1632 } 1633 } 1634 } 1635 1636 if (sc->sc_brttimeout != 0) 1637 timeout_add(&sc->sc_brtimeout, sc->sc_brttimeout * hz); 1638 } 1639 1640 /* 1641 * Remove all dynamic addresses from the cache 1642 */ 1643 int 1644 bridge_rtflush(sc, full) 1645 struct bridge_softc *sc; 1646 int full; 1647 { 1648 int i; 1649 struct bridge_rtnode *p, *n; 1650 1651 if (sc->sc_rts == NULL) 1652 return (0); 1653 1654 for (i = 0; i < BRIDGE_RTABLE_SIZE; i++) { 1655 n = LIST_FIRST(&sc->sc_rts[i]); 1656 while (n != LIST_END(&sc->sc_rts[i])) { 1657 if (full || 1658 (n->brt_flags & IFBAF_TYPEMASK) == IFBAF_DYNAMIC) { 1659 p = LIST_NEXT(n, brt_next); 1660 LIST_REMOVE(n, brt_next); 1661 sc->sc_brtcnt--; 1662 free(n, M_DEVBUF); 1663 n = p; 1664 } else 1665 n = LIST_NEXT(n, brt_next); 1666 } 1667 } 1668 1669 if (sc->sc_brtcnt == 0 && (sc->sc_if.if_flags & IFF_UP) == 0) { 1670 free(sc->sc_rts, M_DEVBUF); 1671 sc->sc_rts = NULL; 1672 } 1673 1674 return (0); 1675 } 1676 1677 /* 1678 * Remove an address from the cache 1679 */ 1680 int 1681 bridge_rtdaddr(sc, ea) 1682 struct bridge_softc *sc; 1683 struct ether_addr *ea; 1684 { 1685 int h; 1686 struct bridge_rtnode *p; 1687 1688 if (sc->sc_rts == NULL) 1689 return (ENOENT); 1690 1691 h = bridge_hash(sc, ea); 1692 LIST_FOREACH(p, &sc->sc_rts[h], brt_next) { 1693 if (bcmp(ea, &p->brt_addr, sizeof(p->brt_addr)) == 0) { 1694 LIST_REMOVE(p, brt_next); 1695 sc->sc_brtcnt--; 1696 free(p, M_DEVBUF); 1697 if (sc->sc_brtcnt == 0 && 1698 (sc->sc_if.if_flags & IFF_UP) == 0) { 1699 free(sc->sc_rts, M_DEVBUF); 1700 sc->sc_rts = NULL; 1701 } 1702 return (0); 1703 } 1704 } 1705 1706 return (ENOENT); 1707 } 1708 /* 1709 * Delete routes to a specific interface member. 1710 */ 1711 void 1712 bridge_rtdelete(sc, ifp) 1713 struct bridge_softc *sc; 1714 struct ifnet *ifp; 1715 { 1716 int i; 1717 struct bridge_rtnode *n, *p; 1718 1719 if (sc->sc_rts == NULL) 1720 return; 1721 1722 /* 1723 * Loop through all of the hash buckets and traverse each 1724 * chain looking for routes to this interface. 1725 */ 1726 for (i = 0; i < BRIDGE_RTABLE_SIZE; i++) { 1727 n = LIST_FIRST(&sc->sc_rts[i]); 1728 while (n != LIST_END(&sc->sc_rts[i])) { 1729 if (n->brt_if == ifp) { /* found one */ 1730 p = LIST_NEXT(n, brt_next); 1731 LIST_REMOVE(n, brt_next); 1732 sc->sc_brtcnt--; 1733 free(n, M_DEVBUF); 1734 n = p; 1735 } else 1736 n = LIST_NEXT(n, brt_next); 1737 } 1738 } 1739 if (sc->sc_brtcnt == 0 && (sc->sc_if.if_flags & IFF_UP) == 0) { 1740 free(sc->sc_rts, M_DEVBUF); 1741 sc->sc_rts = NULL; 1742 } 1743 } 1744 1745 /* 1746 * Gather all of the routes for this interface. 1747 */ 1748 int 1749 bridge_rtfind(sc, baconf) 1750 struct bridge_softc *sc; 1751 struct ifbaconf *baconf; 1752 { 1753 int i, error = 0; 1754 u_int32_t cnt = 0; 1755 struct bridge_rtnode *n; 1756 struct ifbareq bareq; 1757 1758 if (sc->sc_rts == NULL || baconf->ifbac_len == 0) 1759 goto done; 1760 1761 for (i = 0, cnt = 0; i < BRIDGE_RTABLE_SIZE; i++) { 1762 LIST_FOREACH(n, &sc->sc_rts[i], brt_next) { 1763 if (baconf->ifbac_len < sizeof(struct ifbareq)) 1764 goto done; 1765 bcopy(sc->sc_if.if_xname, bareq.ifba_name, 1766 sizeof(bareq.ifba_name)); 1767 bcopy(n->brt_if->if_xname, bareq.ifba_ifsname, 1768 sizeof(bareq.ifba_ifsname)); 1769 bcopy(&n->brt_addr, &bareq.ifba_dst, 1770 sizeof(bareq.ifba_dst)); 1771 bareq.ifba_age = n->brt_age; 1772 bareq.ifba_flags = n->brt_flags; 1773 error = copyout((caddr_t)&bareq, 1774 (caddr_t)(baconf->ifbac_req + cnt), sizeof(bareq)); 1775 if (error) 1776 goto done; 1777 cnt++; 1778 baconf->ifbac_len -= sizeof(struct ifbareq); 1779 } 1780 } 1781 done: 1782 baconf->ifbac_len = cnt * sizeof(struct ifbareq); 1783 return (error); 1784 } 1785 1786 /* 1787 * Block non-ip frames: 1788 * Returns 0 if frame is ip, and 1 if it should be dropped. 1789 */ 1790 int 1791 bridge_blocknonip(eh, m) 1792 struct ether_header *eh; 1793 struct mbuf *m; 1794 { 1795 struct llc llc; 1796 u_int16_t etype; 1797 1798 if (m->m_pkthdr.len < sizeof(struct ether_header)) 1799 return (1); 1800 1801 etype = ntohs(eh->ether_type); 1802 switch (etype) { 1803 case ETHERTYPE_ARP: 1804 case ETHERTYPE_REVARP: 1805 case ETHERTYPE_IP: 1806 case ETHERTYPE_IPV6: 1807 return (0); 1808 } 1809 1810 if (etype > ETHERMTU) 1811 return (1); 1812 1813 if (m->m_pkthdr.len < 1814 (sizeof(struct ether_header) + LLC_SNAPFRAMELEN)) 1815 return (1); 1816 1817 m_copydata(m, sizeof(struct ether_header), LLC_SNAPFRAMELEN, 1818 (caddr_t)&llc); 1819 1820 etype = ntohs(llc.llc_snap.ether_type); 1821 if (llc.llc_dsap == LLC_SNAP_LSAP && 1822 llc.llc_ssap == LLC_SNAP_LSAP && 1823 llc.llc_control == LLC_UI && 1824 llc.llc_snap.org_code[0] == 0 && 1825 llc.llc_snap.org_code[1] == 0 && 1826 llc.llc_snap.org_code[2] == 0 && 1827 (etype == ETHERTYPE_ARP || etype == ETHERTYPE_REVARP || 1828 etype == ETHERTYPE_IP || etype == ETHERTYPE_IPV6)) { 1829 return (0); 1830 } 1831 1832 return (1); 1833 } 1834 1835 u_int8_t 1836 bridge_filterrule(h, eh) 1837 struct brl_head *h; 1838 struct ether_header *eh; 1839 { 1840 struct brl_node *n; 1841 u_int8_t flags; 1842 1843 SIMPLEQ_FOREACH(n, h, brl_next) { 1844 flags = n->brl_flags & (BRL_FLAG_SRCVALID|BRL_FLAG_DSTVALID); 1845 if (flags == 0) 1846 return (n->brl_action); 1847 if (flags == (BRL_FLAG_SRCVALID|BRL_FLAG_DSTVALID)) { 1848 if (bcmp(eh->ether_shost, &n->brl_src, ETHER_ADDR_LEN)) 1849 continue; 1850 if (bcmp(eh->ether_dhost, &n->brl_src, ETHER_ADDR_LEN)) 1851 continue; 1852 return (n->brl_action); 1853 } 1854 if (flags == BRL_FLAG_SRCVALID) { 1855 if (bcmp(eh->ether_shost, &n->brl_src, ETHER_ADDR_LEN)) 1856 continue; 1857 return (n->brl_action); 1858 } 1859 if (flags == BRL_FLAG_DSTVALID) { 1860 if (bcmp(eh->ether_dhost, &n->brl_dst, ETHER_ADDR_LEN)) 1861 continue; 1862 return (n->brl_action); 1863 } 1864 } 1865 return (BRL_ACTION_PASS); 1866 } 1867 1868 int 1869 bridge_addrule(bif, req, out) 1870 struct bridge_iflist *bif; 1871 struct ifbrlreq *req; 1872 int out; 1873 { 1874 struct brl_node *n; 1875 1876 n = (struct brl_node *)malloc(sizeof(struct brl_node), M_DEVBUF, M_NOWAIT); 1877 if (n == NULL) 1878 return (ENOMEM); 1879 bcopy(&req->ifbr_src, &n->brl_src, sizeof(struct ether_addr)); 1880 bcopy(&req->ifbr_dst, &n->brl_dst, sizeof(struct ether_addr)); 1881 n->brl_action = req->ifbr_action; 1882 n->brl_flags = req->ifbr_flags; 1883 if (out) { 1884 n->brl_flags &= ~BRL_FLAG_IN; 1885 n->brl_flags |= BRL_FLAG_OUT; 1886 SIMPLEQ_INSERT_TAIL(&bif->bif_brlout, n, brl_next); 1887 } else { 1888 n->brl_flags &= ~BRL_FLAG_OUT; 1889 n->brl_flags |= BRL_FLAG_IN; 1890 SIMPLEQ_INSERT_TAIL(&bif->bif_brlin, n, brl_next); 1891 } 1892 return (0); 1893 } 1894 1895 int 1896 bridge_flushrule(bif) 1897 struct bridge_iflist *bif; 1898 { 1899 struct brl_node *p; 1900 1901 while (!SIMPLEQ_EMPTY(&bif->bif_brlin)) { 1902 p = SIMPLEQ_FIRST(&bif->bif_brlin); 1903 SIMPLEQ_REMOVE_HEAD(&bif->bif_brlin, p, brl_next); 1904 free(p, M_DEVBUF); 1905 } 1906 while (!SIMPLEQ_EMPTY(&bif->bif_brlout)) { 1907 p = SIMPLEQ_FIRST(&bif->bif_brlout); 1908 SIMPLEQ_REMOVE_HEAD(&bif->bif_brlin, p, brl_next); 1909 free(p, M_DEVBUF); 1910 } 1911 return (0); 1912 } 1913 1914 #if NPF > 0 1915 /* 1916 * Filter IP packets by peeking into the ethernet frame. This violates 1917 * the ISO model, but allows us to act as a IP filter at the data link 1918 * layer. As a result, most of this code will look familiar to those 1919 * who've read net/if_ethersubr.c and netinet/ip_input.c 1920 */ 1921 struct mbuf * 1922 bridge_filter(sc, dir, ifp, eh, m) 1923 struct bridge_softc *sc; 1924 int dir; 1925 struct ifnet *ifp; 1926 struct ether_header *eh; 1927 struct mbuf *m; 1928 { 1929 #if NPF == 0 1930 return (m); 1931 #else 1932 struct llc llc; 1933 int hassnap = 0; 1934 struct ip *ip; 1935 int hlen; 1936 1937 if (eh->ether_type != htons(ETHERTYPE_IP)) { 1938 if (eh->ether_type > ETHERMTU || 1939 m->m_pkthdr.len < (LLC_SNAPFRAMELEN + 1940 sizeof(struct ether_header))) 1941 return (m); 1942 1943 m_copydata(m, sizeof(struct ether_header), 1944 LLC_SNAPFRAMELEN, (caddr_t)&llc); 1945 1946 if (llc.llc_dsap != LLC_SNAP_LSAP || 1947 llc.llc_ssap != LLC_SNAP_LSAP || 1948 llc.llc_control != LLC_UI || 1949 llc.llc_snap.org_code[0] || 1950 llc.llc_snap.org_code[1] || 1951 llc.llc_snap.org_code[2] || 1952 llc.llc_snap.ether_type != htons(ETHERTYPE_IP)) 1953 return (m); 1954 hassnap = 1; 1955 } 1956 1957 m_adj(m, sizeof(struct ether_header)); 1958 if (hassnap) 1959 m_adj(m, LLC_SNAPFRAMELEN); 1960 1961 if (m->m_pkthdr.len < sizeof(struct ip)) 1962 goto dropit; 1963 1964 /* Copy minimal header, and drop invalids */ 1965 if (m->m_len < sizeof(struct ip) && 1966 (m = m_pullup(m, sizeof(struct ip))) == NULL) 1967 return (NULL); 1968 ip = mtod(m, struct ip *); 1969 1970 if (ip->ip_v != IPVERSION) 1971 goto dropit; 1972 1973 hlen = ip->ip_hl << 2; /* get whole header length */ 1974 if (hlen < sizeof(struct ip)) 1975 goto dropit; 1976 if (hlen > m->m_len) { 1977 if ((m = m_pullup(m, hlen)) == NULL) 1978 return (NULL); 1979 ip = mtod(m, struct ip *); 1980 } 1981 1982 if ((ip->ip_sum = in_cksum(m, hlen)) != 0) 1983 goto dropit; 1984 1985 NTOHS(ip->ip_len); 1986 if (ip->ip_len < hlen) 1987 goto dropit; 1988 NTOHS(ip->ip_id); 1989 NTOHS(ip->ip_off); 1990 1991 if (m->m_pkthdr.len < ip->ip_len) 1992 goto dropit; 1993 if (m->m_pkthdr.len > ip->ip_len) { 1994 if (m->m_len == m->m_pkthdr.len) { 1995 m->m_len = ip->ip_len; 1996 m->m_pkthdr.len = ip->ip_len; 1997 } else 1998 m_adj(m, ip->ip_len - m->m_pkthdr.len); 1999 } 2000 2001 /* Finally, we get to filter the packet! */ 2002 m->m_pkthdr.rcvif = ifp; 2003 if (pf_test(dir, ifp, &m) != PF_PASS) 2004 goto dropit; 2005 2006 /* Rebuild the IP header */ 2007 if (m->m_len < hlen && ((m = m_pullup(m, hlen)) == NULL)) 2008 return (NULL); 2009 if (m->m_len < sizeof(struct ip)) 2010 goto dropit; 2011 ip = mtod(m, struct ip *); 2012 HTONS(ip->ip_len); 2013 HTONS(ip->ip_id); 2014 HTONS(ip->ip_off); 2015 ip->ip_sum = 0; 2016 ip->ip_sum = in_cksum(m, hlen); 2017 2018 /* Reattach SNAP header */ 2019 if (hassnap) { 2020 M_PREPEND(m, LLC_SNAPFRAMELEN, M_DONTWAIT); 2021 if (m == NULL) 2022 goto dropit; 2023 bcopy(&llc, mtod(m, caddr_t), LLC_SNAPFRAMELEN); 2024 } 2025 2026 /* Reattach ethernet header */ 2027 M_PREPEND(m, sizeof(*eh), M_DONTWAIT); 2028 if (m == NULL) 2029 goto dropit; 2030 bcopy(eh, mtod(m, caddr_t), sizeof(*eh)); 2031 2032 return (m); 2033 2034 dropit: 2035 if (m != NULL) 2036 m_freem(m); 2037 return (NULL); 2038 #endif /* NPF == 0 */ 2039 } 2040 #endif 2041