1 /* $OpenBSD: if_bridge.c,v 1.354 2021/03/05 06:44:09 dlg Exp $ */ 2 3 /* 4 * Copyright (c) 1999, 2000 Jason L. Wright (jason@thought.net) 5 * All rights reserved. 6 * 7 * Redistribution and use in source and binary forms, with or without 8 * modification, are permitted provided that the following conditions 9 * are met: 10 * 1. Redistributions of source code must retain the above copyright 11 * notice, this list of conditions and the following disclaimer. 12 * 2. Redistributions in binary form must reproduce the above copyright 13 * notice, this list of conditions and the following disclaimer in the 14 * documentation and/or other materials provided with the distribution. 15 * 16 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR 17 * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED 18 * WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE 19 * DISCLAIMED. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, 20 * INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES 21 * (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR 22 * SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) 23 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, 24 * STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN 25 * ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE 26 * POSSIBILITY OF SUCH DAMAGE. 27 * 28 * Effort sponsored in part by the Defense Advanced Research Projects 29 * Agency (DARPA) and Air Force Research Laboratory, Air Force 30 * Materiel Command, USAF, under agreement number F30602-01-2-0537. 31 * 32 */ 33 34 #include "bpfilter.h" 35 #include "gif.h" 36 #include "pf.h" 37 #include "carp.h" 38 #include "vlan.h" 39 40 #include <sys/param.h> 41 #include <sys/systm.h> 42 #include <sys/mbuf.h> 43 #include <sys/socket.h> 44 #include <sys/ioctl.h> 45 #include <sys/kernel.h> 46 47 #include <net/if.h> 48 #include <net/if_types.h> 49 #include <net/if_llc.h> 50 #include <net/netisr.h> 51 52 #include <netinet/in.h> 53 #include <netinet/ip.h> 54 #include <netinet/ip_var.h> 55 #include <netinet/if_ether.h> 56 #include <netinet/ip_icmp.h> 57 58 #ifdef IPSEC 59 #include <netinet/ip_ipsp.h> 60 #include <net/if_enc.h> 61 #endif 62 63 #ifdef INET6 64 #include <netinet6/in6_var.h> 65 #include <netinet/ip6.h> 66 #include <netinet6/ip6_var.h> 67 #endif 68 69 #if NPF > 0 70 #include <net/pfvar.h> 71 #define BRIDGE_IN PF_IN 72 #define BRIDGE_OUT PF_OUT 73 #else 74 #define BRIDGE_IN 0 75 #define BRIDGE_OUT 1 76 #endif 77 78 #if NBPFILTER > 0 79 #include <net/bpf.h> 80 #endif 81 82 #if NCARP > 0 83 #include <netinet/ip_carp.h> 84 #endif 85 86 #if NVLAN > 0 87 #include <net/if_vlan_var.h> 88 #endif 89 90 #include <net/if_bridge.h> 91 92 /* 93 * Maximum number of addresses to cache 94 */ 95 #ifndef BRIDGE_RTABLE_MAX 96 #define BRIDGE_RTABLE_MAX 100 97 #endif 98 99 /* 100 * Timeout (in seconds) for entries learned dynamically 101 */ 102 #ifndef BRIDGE_RTABLE_TIMEOUT 103 #define BRIDGE_RTABLE_TIMEOUT 240 104 #endif 105 106 void bridgeattach(int); 107 int bridge_ioctl(struct ifnet *, u_long, caddr_t); 108 void bridge_ifdetach(void *); 109 void bridge_spandetach(void *); 110 int bridge_ifremove(struct bridge_iflist *); 111 void bridge_spanremove(struct bridge_iflist *); 112 struct mbuf * 113 bridge_input(struct ifnet *, struct mbuf *, uint64_t, void *); 114 void bridge_process(struct ifnet *, struct mbuf *); 115 void bridgeintr_frame(struct ifnet *, struct ifnet *, struct mbuf *); 116 void bridge_bifgetstp(struct bridge_softc *, struct bridge_iflist *, 117 struct ifbreq *); 118 void bridge_broadcast(struct bridge_softc *, struct ifnet *, 119 struct ether_header *, struct mbuf *); 120 int bridge_localbroadcast(struct ifnet *, struct ether_header *, 121 struct mbuf *); 122 void bridge_span(struct ifnet *, struct mbuf *); 123 void bridge_stop(struct bridge_softc *); 124 void bridge_init(struct bridge_softc *); 125 int bridge_bifconf(struct bridge_softc *, struct ifbifconf *); 126 int bridge_blocknonip(struct ether_header *, struct mbuf *); 127 void bridge_ifinput(struct ifnet *, struct mbuf *); 128 int bridge_dummy_output(struct ifnet *, struct mbuf *, struct sockaddr *, 129 struct rtentry *); 130 void bridge_send_icmp_err(struct ifnet *, struct ether_header *, 131 struct mbuf *, int, struct llc *, int, int, int); 132 int bridge_ifenqueue(struct ifnet *, struct ifnet *, struct mbuf *); 133 struct mbuf *bridge_ip(struct ifnet *, int, struct ifnet *, 134 struct ether_header *, struct mbuf *); 135 #ifdef IPSEC 136 int bridge_ipsec(struct ifnet *, struct ether_header *, int, struct llc *, 137 int, int, int, struct mbuf *); 138 #endif 139 int bridge_clone_create(struct if_clone *, int); 140 int bridge_clone_destroy(struct ifnet *); 141 142 #define ETHERADDR_IS_IP_MCAST(a) \ 143 /* struct etheraddr *a; */ \ 144 ((a)->ether_addr_octet[0] == 0x01 && \ 145 (a)->ether_addr_octet[1] == 0x00 && \ 146 (a)->ether_addr_octet[2] == 0x5e) 147 148 struct niqueue bridgeintrq = NIQUEUE_INITIALIZER(1024, NETISR_BRIDGE); 149 150 struct if_clone bridge_cloner = 151 IF_CLONE_INITIALIZER("bridge", bridge_clone_create, bridge_clone_destroy); 152 153 const struct ether_brport bridge_brport = { 154 bridge_input, 155 NULL, 156 }; 157 158 void 159 bridgeattach(int n) 160 { 161 if_clone_attach(&bridge_cloner); 162 } 163 164 int 165 bridge_clone_create(struct if_clone *ifc, int unit) 166 { 167 struct bridge_softc *sc; 168 struct ifnet *ifp; 169 int i; 170 171 sc = malloc(sizeof(*sc), M_DEVBUF, M_WAITOK|M_ZERO); 172 sc->sc_stp = bstp_create(); 173 if (!sc->sc_stp) { 174 free(sc, M_DEVBUF, sizeof *sc); 175 return (ENOMEM); 176 } 177 178 sc->sc_brtmax = BRIDGE_RTABLE_MAX; 179 sc->sc_brttimeout = BRIDGE_RTABLE_TIMEOUT; 180 timeout_set(&sc->sc_brtimeout, bridge_rtage, sc); 181 SMR_SLIST_INIT(&sc->sc_iflist); 182 SMR_SLIST_INIT(&sc->sc_spanlist); 183 mtx_init(&sc->sc_mtx, IPL_MPFLOOR); 184 for (i = 0; i < BRIDGE_RTABLE_SIZE; i++) 185 LIST_INIT(&sc->sc_rts[i]); 186 arc4random_buf(&sc->sc_hashkey, sizeof(sc->sc_hashkey)); 187 ifp = &sc->sc_if; 188 snprintf(ifp->if_xname, sizeof ifp->if_xname, "%s%d", ifc->ifc_name, 189 unit); 190 ifp->if_softc = sc; 191 ifp->if_mtu = ETHERMTU; 192 ifp->if_ioctl = bridge_ioctl; 193 ifp->if_output = bridge_dummy_output; 194 ifp->if_xflags = IFXF_CLONED; 195 ifp->if_start = NULL; 196 ifp->if_type = IFT_BRIDGE; 197 ifp->if_hdrlen = ETHER_HDR_LEN; 198 199 if_attach(ifp); 200 if_alloc_sadl(ifp); 201 202 #if NBPFILTER > 0 203 bpfattach(&sc->sc_if.if_bpf, ifp, 204 DLT_EN10MB, ETHER_HDR_LEN); 205 #endif 206 207 return (0); 208 } 209 210 int 211 bridge_dummy_output(struct ifnet *ifp, struct mbuf *m, struct sockaddr *dst, 212 struct rtentry *rt) 213 { 214 m_freem(m); 215 return (EAFNOSUPPORT); 216 } 217 218 int 219 bridge_clone_destroy(struct ifnet *ifp) 220 { 221 struct bridge_softc *sc = ifp->if_softc; 222 struct bridge_iflist *bif; 223 224 /* 225 * bridge(4) detach hook doesn't need the NET_LOCK(), worst the 226 * use of smr_barrier() while holding the lock might lead to a 227 * deadlock situation. 228 */ 229 NET_ASSERT_UNLOCKED(); 230 231 bridge_stop(sc); 232 bridge_rtflush(sc, IFBF_FLUSHALL); 233 while ((bif = SMR_SLIST_FIRST_LOCKED(&sc->sc_iflist)) != NULL) 234 bridge_ifremove(bif); 235 while ((bif = SMR_SLIST_FIRST_LOCKED(&sc->sc_spanlist)) != NULL) 236 bridge_spanremove(bif); 237 238 bstp_destroy(sc->sc_stp); 239 240 if_detach(ifp); 241 242 free(sc, M_DEVBUF, sizeof *sc); 243 return (0); 244 } 245 246 int 247 bridge_ioctl(struct ifnet *ifp, u_long cmd, caddr_t data) 248 { 249 struct bridge_softc *sc = (struct bridge_softc *)ifp->if_softc; 250 struct ifbreq *req = (struct ifbreq *)data; 251 struct ifbropreq *brop = (struct ifbropreq *)data; 252 struct ifnet *ifs; 253 struct bridge_iflist *bif; 254 struct bstp_port *bp; 255 struct bstp_state *bs = sc->sc_stp; 256 int error = 0; 257 258 /* 259 * bridge(4) data structure aren't protected by the NET_LOCK(). 260 * Idealy it shouldn't be taken before calling `ifp->if_ioctl' 261 * but we aren't there yet. 262 */ 263 NET_UNLOCK(); 264 265 switch (cmd) { 266 case SIOCBRDGADD: 267 /* bridge(4) does not distinguish between routing/forwarding ports */ 268 case SIOCBRDGADDL: 269 if ((error = suser(curproc)) != 0) 270 break; 271 272 ifs = if_unit(req->ifbr_ifsname); 273 if (ifs == NULL) { /* no such interface */ 274 error = ENOENT; 275 break; 276 } 277 if (ifs->if_type != IFT_ETHER) { 278 if_put(ifs); 279 error = EINVAL; 280 break; 281 } 282 if (ifs->if_bridgeidx != 0) { 283 if (ifs->if_bridgeidx == ifp->if_index) 284 error = EEXIST; 285 else 286 error = EBUSY; 287 if_put(ifs); 288 break; 289 } 290 291 error = ether_brport_isset(ifs); 292 if (error != 0) { 293 if_put(ifs); 294 break; 295 } 296 297 /* If it's in the span list, it can't be a member. */ 298 SMR_SLIST_FOREACH_LOCKED(bif, &sc->sc_spanlist, bif_next) { 299 if (bif->ifp == ifs) 300 break; 301 } 302 if (bif != NULL) { 303 if_put(ifs); 304 error = EBUSY; 305 break; 306 } 307 308 bif = malloc(sizeof(*bif), M_DEVBUF, M_NOWAIT|M_ZERO); 309 if (bif == NULL) { 310 if_put(ifs); 311 error = ENOMEM; 312 break; 313 } 314 315 NET_LOCK(); 316 error = ifpromisc(ifs, 1); 317 NET_UNLOCK(); 318 if (error != 0) { 319 if_put(ifs); 320 free(bif, M_DEVBUF, sizeof(*bif)); 321 break; 322 } 323 324 /* 325 * XXX If the NET_LOCK() or ifpromisc() calls above 326 * had to sleep, then something else could have come 327 * along and taken over ifs while the kernel lock was 328 * released. 329 */ 330 331 bif->bridge_sc = sc; 332 bif->ifp = ifs; 333 bif->bif_flags = IFBIF_LEARNING | IFBIF_DISCOVER; 334 SIMPLEQ_INIT(&bif->bif_brlin); 335 SIMPLEQ_INIT(&bif->bif_brlout); 336 ifs->if_bridgeidx = ifp->if_index; 337 task_set(&bif->bif_dtask, bridge_ifdetach, bif); 338 if_detachhook_add(ifs, &bif->bif_dtask); 339 ether_brport_set(bif->ifp, &bridge_brport); 340 SMR_SLIST_INSERT_HEAD_LOCKED(&sc->sc_iflist, bif, bif_next); 341 break; 342 case SIOCBRDGDEL: 343 if ((error = suser(curproc)) != 0) 344 break; 345 error = bridge_findbif(sc, req->ifbr_ifsname, &bif); 346 if (error != 0) 347 break; 348 bridge_ifremove(bif); 349 break; 350 case SIOCBRDGIFS: 351 error = bridge_bifconf(sc, (struct ifbifconf *)data); 352 break; 353 case SIOCBRDGADDS: 354 if ((error = suser(curproc)) != 0) 355 break; 356 ifs = if_unit(req->ifbr_ifsname); 357 if (ifs == NULL) { /* no such interface */ 358 error = ENOENT; 359 break; 360 } 361 if (ifs->if_type != IFT_ETHER) { 362 if_put(ifs); 363 error = EINVAL; 364 break; 365 } 366 if (ifs->if_bridgeidx != 0) { 367 if (ifs->if_bridgeidx == ifp->if_index) 368 error = EEXIST; 369 else 370 error = EBUSY; 371 if_put(ifs); 372 break; 373 } 374 SMR_SLIST_FOREACH_LOCKED(bif, &sc->sc_spanlist, bif_next) { 375 if (bif->ifp == ifs) 376 break; 377 } 378 if (bif != NULL) { 379 if_put(ifs); 380 error = EEXIST; 381 break; 382 } 383 bif = malloc(sizeof(*bif), M_DEVBUF, M_NOWAIT|M_ZERO); 384 if (bif == NULL) { 385 if_put(ifs); 386 error = ENOMEM; 387 break; 388 } 389 bif->bridge_sc = sc; 390 bif->ifp = ifs; 391 bif->bif_flags = IFBIF_SPAN; 392 SIMPLEQ_INIT(&bif->bif_brlin); 393 SIMPLEQ_INIT(&bif->bif_brlout); 394 task_set(&bif->bif_dtask, bridge_spandetach, bif); 395 if_detachhook_add(ifs, &bif->bif_dtask); 396 SMR_SLIST_INSERT_HEAD_LOCKED(&sc->sc_spanlist, bif, bif_next); 397 break; 398 case SIOCBRDGDELS: 399 if ((error = suser(curproc)) != 0) 400 break; 401 ifs = if_unit(req->ifbr_ifsname); 402 if (ifs == NULL) { 403 error = ENOENT; 404 break; 405 } 406 SMR_SLIST_FOREACH_LOCKED(bif, &sc->sc_spanlist, bif_next) { 407 if (bif->ifp == ifs) 408 break; 409 } 410 if_put(ifs); 411 if (bif == NULL) { 412 error = ESRCH; 413 break; 414 } 415 bridge_spanremove(bif); 416 break; 417 case SIOCBRDGGIFFLGS: 418 error = bridge_findbif(sc, req->ifbr_ifsname, &bif); 419 if (error != 0) 420 break; 421 req->ifbr_ifsflags = bif->bif_flags; 422 req->ifbr_portno = bif->ifp->if_index & 0xfff; 423 req->ifbr_protected = bif->bif_protected; 424 if (bif->bif_flags & IFBIF_STP) 425 bridge_bifgetstp(sc, bif, req); 426 break; 427 case SIOCBRDGSIFFLGS: 428 if (req->ifbr_ifsflags & IFBIF_RO_MASK) { 429 error = EINVAL; 430 break; 431 } 432 if ((error = suser(curproc)) != 0) 433 break; 434 error = bridge_findbif(sc, req->ifbr_ifsname, &bif); 435 if (error != 0) 436 break; 437 if (req->ifbr_ifsflags & IFBIF_STP) { 438 if ((bif->bif_flags & IFBIF_STP) == 0) { 439 /* Enable STP */ 440 if ((bif->bif_stp = bstp_add(sc->sc_stp, 441 bif->ifp)) == NULL) { 442 error = ENOMEM; 443 break; 444 } 445 } else { 446 /* Update STP flags */ 447 bstp_ifsflags(bif->bif_stp, req->ifbr_ifsflags); 448 } 449 } else if (bif->bif_flags & IFBIF_STP) { 450 bstp_delete(bif->bif_stp); 451 bif->bif_stp = NULL; 452 } 453 bif->bif_flags = req->ifbr_ifsflags; 454 break; 455 case SIOCSIFFLAGS: 456 if ((ifp->if_flags & IFF_UP) == IFF_UP) 457 bridge_init(sc); 458 459 if ((ifp->if_flags & IFF_UP) == 0) 460 bridge_stop(sc); 461 462 break; 463 case SIOCBRDGGPARAM: 464 if ((bp = bs->bs_root_port) == NULL) 465 brop->ifbop_root_port = 0; 466 else 467 brop->ifbop_root_port = bp->bp_ifindex; 468 brop->ifbop_maxage = bs->bs_bridge_max_age >> 8; 469 brop->ifbop_hellotime = bs->bs_bridge_htime >> 8; 470 brop->ifbop_fwddelay = bs->bs_bridge_fdelay >> 8; 471 brop->ifbop_holdcount = bs->bs_txholdcount; 472 brop->ifbop_priority = bs->bs_bridge_priority; 473 brop->ifbop_protocol = bs->bs_protover; 474 brop->ifbop_root_bridge = bs->bs_root_pv.pv_root_id; 475 brop->ifbop_root_path_cost = bs->bs_root_pv.pv_cost; 476 brop->ifbop_root_port = bs->bs_root_pv.pv_port_id; 477 brop->ifbop_desg_bridge = bs->bs_root_pv.pv_dbridge_id; 478 brop->ifbop_last_tc_time.tv_sec = bs->bs_last_tc_time.tv_sec; 479 brop->ifbop_last_tc_time.tv_usec = bs->bs_last_tc_time.tv_usec; 480 break; 481 case SIOCBRDGSIFPROT: 482 error = bridge_findbif(sc, req->ifbr_ifsname, &bif); 483 if (error != 0) 484 break; 485 bif->bif_protected = req->ifbr_protected; 486 break; 487 case SIOCBRDGRTS: 488 case SIOCBRDGGCACHE: 489 case SIOCBRDGGPRI: 490 case SIOCBRDGGMA: 491 case SIOCBRDGGHT: 492 case SIOCBRDGGFD: 493 case SIOCBRDGGTO: 494 case SIOCBRDGGRL: 495 break; 496 case SIOCBRDGFLUSH: 497 case SIOCBRDGSADDR: 498 case SIOCBRDGDADDR: 499 case SIOCBRDGSCACHE: 500 case SIOCBRDGSTO: 501 case SIOCBRDGARL: 502 case SIOCBRDGFRL: 503 case SIOCBRDGSPRI: 504 case SIOCBRDGSFD: 505 case SIOCBRDGSMA: 506 case SIOCBRDGSHT: 507 case SIOCBRDGSTXHC: 508 case SIOCBRDGSPROTO: 509 case SIOCBRDGSIFPRIO: 510 case SIOCBRDGSIFCOST: 511 error = suser(curproc); 512 break; 513 default: 514 error = ENOTTY; 515 break; 516 } 517 518 if (!error) 519 error = bridgectl_ioctl(ifp, cmd, data); 520 521 if (!error) 522 error = bstp_ioctl(ifp, cmd, data); 523 524 NET_LOCK(); 525 return (error); 526 } 527 528 /* Detach an interface from a bridge. */ 529 int 530 bridge_ifremove(struct bridge_iflist *bif) 531 { 532 struct bridge_softc *sc = bif->bridge_sc; 533 int error; 534 535 SMR_SLIST_REMOVE_LOCKED(&sc->sc_iflist, bif, bridge_iflist, bif_next); 536 if_detachhook_del(bif->ifp, &bif->bif_dtask); 537 ether_brport_clr(bif->ifp); 538 539 smr_barrier(); 540 541 if (bif->bif_flags & IFBIF_STP) { 542 bstp_delete(bif->bif_stp); 543 bif->bif_stp = NULL; 544 } 545 546 bif->ifp->if_bridgeidx = 0; 547 NET_LOCK(); 548 error = ifpromisc(bif->ifp, 0); 549 NET_UNLOCK(); 550 551 bridge_rtdelete(sc, bif->ifp, 0); 552 bridge_flushrule(bif); 553 554 if_put(bif->ifp); 555 bif->ifp = NULL; 556 free(bif, M_DEVBUF, sizeof(*bif)); 557 558 return (error); 559 } 560 561 void 562 bridge_spanremove(struct bridge_iflist *bif) 563 { 564 struct bridge_softc *sc = bif->bridge_sc; 565 566 SMR_SLIST_REMOVE_LOCKED(&sc->sc_spanlist, bif, bridge_iflist, bif_next); 567 if_detachhook_del(bif->ifp, &bif->bif_dtask); 568 569 smr_barrier(); 570 571 if_put(bif->ifp); 572 bif->ifp = NULL; 573 free(bif, M_DEVBUF, sizeof(*bif)); 574 } 575 576 void 577 bridge_ifdetach(void *xbif) 578 { 579 struct bridge_iflist *bif = xbif; 580 581 /* 582 * bridge(4) detach hook doesn't need the NET_LOCK(), worst the 583 * use of smr_barrier() while holding the lock might lead to a 584 * deadlock situation. 585 */ 586 NET_UNLOCK(); 587 bridge_ifremove(bif); 588 NET_LOCK(); 589 } 590 591 void 592 bridge_spandetach(void *xbif) 593 { 594 struct bridge_iflist *bif = xbif; 595 596 /* 597 * bridge(4) detach hook doesn't need the NET_LOCK(), worst the 598 * use of smr_barrier() while holding the lock might lead to a 599 * deadlock situation. 600 */ 601 NET_UNLOCK(); 602 bridge_spanremove(bif); 603 NET_LOCK(); 604 } 605 606 void 607 bridge_bifgetstp(struct bridge_softc *sc, struct bridge_iflist *bif, 608 struct ifbreq *breq) 609 { 610 struct bstp_state *bs = sc->sc_stp; 611 struct bstp_port *bp = bif->bif_stp; 612 613 breq->ifbr_state = bstp_getstate(bs, bp); 614 breq->ifbr_priority = bp->bp_priority; 615 breq->ifbr_path_cost = bp->bp_path_cost; 616 breq->ifbr_proto = bp->bp_protover; 617 breq->ifbr_role = bp->bp_role; 618 breq->ifbr_stpflags = bp->bp_flags; 619 breq->ifbr_fwd_trans = bp->bp_forward_transitions; 620 breq->ifbr_root_bridge = bs->bs_root_pv.pv_root_id; 621 breq->ifbr_root_cost = bs->bs_root_pv.pv_cost; 622 breq->ifbr_root_port = bs->bs_root_pv.pv_port_id; 623 breq->ifbr_desg_bridge = bs->bs_root_pv.pv_dbridge_id; 624 breq->ifbr_desg_port = bs->bs_root_pv.pv_dport_id; 625 626 /* Copy STP state options as flags */ 627 if (bp->bp_operedge) 628 breq->ifbr_ifsflags |= IFBIF_BSTP_EDGE; 629 if (bp->bp_flags & BSTP_PORT_AUTOEDGE) 630 breq->ifbr_ifsflags |= IFBIF_BSTP_AUTOEDGE; 631 if (bp->bp_ptp_link) 632 breq->ifbr_ifsflags |= IFBIF_BSTP_PTP; 633 if (bp->bp_flags & BSTP_PORT_AUTOPTP) 634 breq->ifbr_ifsflags |= IFBIF_BSTP_AUTOPTP; 635 } 636 637 int 638 bridge_bifconf(struct bridge_softc *sc, struct ifbifconf *bifc) 639 { 640 struct bridge_iflist *bif; 641 u_int32_t total = 0, i = 0; 642 int error = 0; 643 struct ifbreq *breq, *breqs = NULL; 644 645 SMR_SLIST_FOREACH_LOCKED(bif, &sc->sc_iflist, bif_next) 646 total++; 647 648 SMR_SLIST_FOREACH_LOCKED(bif, &sc->sc_spanlist, bif_next) 649 total++; 650 651 if (bifc->ifbic_len == 0) { 652 i = total; 653 goto done; 654 } 655 656 breqs = mallocarray(total, sizeof(*breqs), M_TEMP, M_NOWAIT|M_ZERO); 657 if (breqs == NULL) 658 goto done; 659 660 SMR_SLIST_FOREACH_LOCKED(bif, &sc->sc_iflist, bif_next) { 661 if (bifc->ifbic_len < (i + 1) * sizeof(*breqs)) 662 break; 663 breq = &breqs[i]; 664 strlcpy(breq->ifbr_name, sc->sc_if.if_xname, IFNAMSIZ); 665 strlcpy(breq->ifbr_ifsname, bif->ifp->if_xname, IFNAMSIZ); 666 breq->ifbr_ifsflags = bif->bif_flags; 667 breq->ifbr_portno = bif->ifp->if_index & 0xfff; 668 breq->ifbr_protected = bif->bif_protected; 669 if (bif->bif_flags & IFBIF_STP) 670 bridge_bifgetstp(sc, bif, breq); 671 i++; 672 } 673 SMR_SLIST_FOREACH_LOCKED(bif, &sc->sc_spanlist, bif_next) { 674 if (bifc->ifbic_len < (i + 1) * sizeof(*breqs)) 675 break; 676 breq = &breqs[i]; 677 strlcpy(breq->ifbr_name, sc->sc_if.if_xname, IFNAMSIZ); 678 strlcpy(breq->ifbr_ifsname, bif->ifp->if_xname, IFNAMSIZ); 679 breq->ifbr_ifsflags = bif->bif_flags | IFBIF_SPAN; 680 breq->ifbr_portno = bif->ifp->if_index & 0xfff; 681 i++; 682 } 683 684 error = copyout(breqs, bifc->ifbic_req, i * sizeof(*breqs)); 685 done: 686 free(breqs, M_TEMP, total * sizeof(*breq)); 687 bifc->ifbic_len = i * sizeof(*breq); 688 return (error); 689 } 690 691 int 692 bridge_findbif(struct bridge_softc *sc, const char *name, 693 struct bridge_iflist **rbif) 694 { 695 struct ifnet *ifp; 696 struct bridge_iflist *bif; 697 int error = 0; 698 699 KERNEL_ASSERT_LOCKED(); 700 701 if ((ifp = if_unit(name)) == NULL) 702 return (ENOENT); 703 704 if (ifp->if_bridgeidx != sc->sc_if.if_index) { 705 error = ESRCH; 706 goto put; 707 } 708 709 SMR_SLIST_FOREACH_LOCKED(bif, &sc->sc_iflist, bif_next) { 710 if (bif->ifp == ifp) 711 break; 712 } 713 714 if (bif == NULL) { 715 error = ENOENT; 716 goto put; 717 } 718 719 *rbif = bif; 720 put: 721 if_put(ifp); 722 723 return (error); 724 } 725 726 struct bridge_iflist * 727 bridge_getbif(struct ifnet *ifp) 728 { 729 struct bridge_iflist *bif; 730 struct bridge_softc *sc; 731 struct ifnet *bifp; 732 733 KERNEL_ASSERT_LOCKED(); 734 735 bifp = if_get(ifp->if_bridgeidx); 736 if (bifp == NULL) 737 return (NULL); 738 739 sc = bifp->if_softc; 740 SMR_SLIST_FOREACH_LOCKED(bif, &sc->sc_iflist, bif_next) { 741 if (bif->ifp == ifp) 742 break; 743 } 744 745 if_put(bifp); 746 747 return (bif); 748 } 749 750 void 751 bridge_init(struct bridge_softc *sc) 752 { 753 struct ifnet *ifp = &sc->sc_if; 754 755 if (ISSET(ifp->if_flags, IFF_RUNNING)) 756 return; 757 758 bstp_enable(sc->sc_stp, ifp->if_index); 759 760 if (sc->sc_brttimeout != 0) 761 timeout_add_sec(&sc->sc_brtimeout, sc->sc_brttimeout); 762 763 SET(ifp->if_flags, IFF_RUNNING); 764 } 765 766 /* 767 * Stop the bridge and deallocate the routing table. 768 */ 769 void 770 bridge_stop(struct bridge_softc *sc) 771 { 772 struct ifnet *ifp = &sc->sc_if; 773 774 if (!ISSET(ifp->if_flags, IFF_RUNNING)) 775 return; 776 777 CLR(ifp->if_flags, IFF_RUNNING); 778 779 bstp_disable(sc->sc_stp); 780 781 timeout_del_barrier(&sc->sc_brtimeout); 782 783 bridge_rtflush(sc, IFBF_FLUSHDYN); 784 } 785 786 /* 787 * Send output from the bridge. The mbuf has the ethernet header 788 * already attached. We must enqueue or free the mbuf before exiting. 789 */ 790 int 791 bridge_enqueue(struct ifnet *ifp, struct mbuf *m) 792 { 793 struct ifnet *brifp; 794 struct ether_header *eh; 795 struct ifnet *dst_if = NULL; 796 unsigned int dst_ifidx = 0; 797 #if NBPFILTER > 0 798 caddr_t if_bpf; 799 #endif 800 int error = 0; 801 802 if (m->m_len < sizeof(*eh)) { 803 m = m_pullup(m, sizeof(*eh)); 804 if (m == NULL) 805 return (ENOBUFS); 806 } 807 808 /* ifp must be a member interface of the bridge. */ 809 brifp = if_get(ifp->if_bridgeidx); 810 if (brifp == NULL) { 811 m_freem(m); 812 return (EINVAL); 813 } 814 815 /* 816 * If bridge is down, but original output interface is up, 817 * go ahead and send out that interface. Otherwise the packet 818 * is dropped below. 819 */ 820 if (!ISSET(brifp->if_flags, IFF_RUNNING)) { 821 /* Loop prevention. */ 822 m->m_flags |= M_PROTO1; 823 error = if_enqueue(ifp, m); 824 if_put(brifp); 825 return (error); 826 } 827 828 #if NBPFILTER > 0 829 if_bpf = brifp->if_bpf; 830 if (if_bpf) 831 bpf_mtap(if_bpf, m, BPF_DIRECTION_OUT); 832 #endif 833 ifp->if_opackets++; 834 ifp->if_obytes += m->m_pkthdr.len; 835 836 bridge_span(brifp, m); 837 838 eh = mtod(m, struct ether_header *); 839 if (!ETHER_IS_MULTICAST(eh->ether_dhost)) { 840 struct ether_addr *dst; 841 842 dst = (struct ether_addr *)&eh->ether_dhost[0]; 843 dst_ifidx = bridge_rtlookup(brifp, dst, m); 844 } 845 846 /* 847 * If the packet is a broadcast or we don't know a better way to 848 * get there, send to all interfaces. 849 */ 850 if (dst_ifidx == 0) { 851 struct bridge_softc *sc = brifp->if_softc; 852 struct bridge_iflist *bif; 853 struct mbuf *mc; 854 855 smr_read_enter(); 856 SMR_SLIST_FOREACH(bif, &sc->sc_iflist, bif_next) { 857 dst_if = bif->ifp; 858 if ((dst_if->if_flags & IFF_RUNNING) == 0) 859 continue; 860 861 /* 862 * If this is not the original output interface, 863 * and the interface is participating in spanning 864 * tree, make sure the port is in a state that 865 * allows forwarding. 866 */ 867 if (dst_if != ifp && 868 (bif->bif_flags & IFBIF_STP) && 869 (bif->bif_state == BSTP_IFSTATE_DISCARDING)) 870 continue; 871 if ((bif->bif_flags & IFBIF_DISCOVER) == 0 && 872 (m->m_flags & (M_BCAST | M_MCAST)) == 0) 873 continue; 874 875 if (bridge_filterrule(&bif->bif_brlout, eh, m) == 876 BRL_ACTION_BLOCK) 877 continue; 878 879 mc = m_dup_pkt(m, ETHER_ALIGN, M_NOWAIT); 880 if (mc == NULL) { 881 brifp->if_oerrors++; 882 continue; 883 } 884 885 error = bridge_ifenqueue(brifp, dst_if, mc); 886 if (error) 887 continue; 888 } 889 smr_read_leave(); 890 m_freem(m); 891 goto out; 892 } 893 894 dst_if = if_get(dst_ifidx); 895 if ((dst_if == NULL) || !ISSET(dst_if->if_flags, IFF_RUNNING)) { 896 m_freem(m); 897 if_put(dst_if); 898 error = ENETDOWN; 899 goto out; 900 } 901 902 bridge_ifenqueue(brifp, dst_if, m); 903 if_put(dst_if); 904 out: 905 if_put(brifp); 906 return (error); 907 } 908 909 /* 910 * Loop through each bridge interface and process their input queues. 911 */ 912 void 913 bridgeintr(void) 914 { 915 struct mbuf_list ml; 916 struct mbuf *m; 917 struct ifnet *ifp; 918 919 niq_delist(&bridgeintrq, &ml); 920 if (ml_empty(&ml)) 921 return; 922 923 KERNEL_LOCK(); 924 while ((m = ml_dequeue(&ml)) != NULL) { 925 926 ifp = if_get(m->m_pkthdr.ph_ifidx); 927 if (ifp == NULL) { 928 m_freem(m); 929 continue; 930 } 931 932 bridge_process(ifp, m); 933 934 if_put(ifp); 935 } 936 KERNEL_UNLOCK(); 937 } 938 939 /* 940 * Process a single frame. Frame must be freed or queued before returning. 941 */ 942 void 943 bridgeintr_frame(struct ifnet *brifp, struct ifnet *src_if, struct mbuf *m) 944 { 945 struct bridge_softc *sc = brifp->if_softc; 946 struct ifnet *dst_if = NULL; 947 struct bridge_iflist *bif; 948 struct ether_addr *dst, *src; 949 struct ether_header eh; 950 unsigned int dst_ifidx; 951 u_int32_t protected; 952 int len; 953 954 955 sc->sc_if.if_ipackets++; 956 sc->sc_if.if_ibytes += m->m_pkthdr.len; 957 958 bif = bridge_getbif(src_if); 959 KASSERT(bif != NULL); 960 961 m_copydata(m, 0, ETHER_HDR_LEN, &eh); 962 dst = (struct ether_addr *)&eh.ether_dhost[0]; 963 src = (struct ether_addr *)&eh.ether_shost[0]; 964 965 /* 966 * If interface is learning, and if source address 967 * is not broadcast or multicast, record its address. 968 */ 969 if ((bif->bif_flags & IFBIF_LEARNING) && 970 !ETHER_IS_MULTICAST(eh.ether_shost) && 971 !ETHER_IS_ANYADDR(eh.ether_shost)) 972 bridge_rtupdate(sc, src, src_if, 0, IFBAF_DYNAMIC, m); 973 974 if ((bif->bif_flags & IFBIF_STP) && 975 (bif->bif_state == BSTP_IFSTATE_LEARNING)) { 976 m_freem(m); 977 return; 978 } 979 980 /* 981 * At this point, the port either doesn't participate in stp or 982 * it's in the forwarding state 983 */ 984 985 /* 986 * If packet is unicast, destined for someone on "this" 987 * side of the bridge, drop it. 988 */ 989 if (!ETHER_IS_MULTICAST(eh.ether_dhost)) { 990 dst_ifidx = bridge_rtlookup(brifp, dst, NULL); 991 if (dst_ifidx == src_if->if_index) { 992 m_freem(m); 993 return; 994 } 995 } else { 996 if (ETHER_IS_BROADCAST(eh.ether_dhost)) 997 m->m_flags |= M_BCAST; 998 else 999 m->m_flags |= M_MCAST; 1000 } 1001 1002 /* 1003 * Multicast packets get handled a little differently: 1004 * If interface is: 1005 * -link0,-link1 (default) Forward all multicast 1006 * as broadcast. 1007 * -link0,link1 Drop non-IP multicast, forward 1008 * as broadcast IP multicast. 1009 * link0,-link1 Drop IP multicast, forward as 1010 * broadcast non-IP multicast. 1011 * link0,link1 Drop all multicast. 1012 */ 1013 if (m->m_flags & M_MCAST) { 1014 if ((sc->sc_if.if_flags & 1015 (IFF_LINK0 | IFF_LINK1)) == 1016 (IFF_LINK0 | IFF_LINK1)) { 1017 m_freem(m); 1018 return; 1019 } 1020 if (sc->sc_if.if_flags & IFF_LINK0 && 1021 ETHERADDR_IS_IP_MCAST(dst)) { 1022 m_freem(m); 1023 return; 1024 } 1025 if (sc->sc_if.if_flags & IFF_LINK1 && 1026 !ETHERADDR_IS_IP_MCAST(dst)) { 1027 m_freem(m); 1028 return; 1029 } 1030 } 1031 1032 if (bif->bif_flags & IFBIF_BLOCKNONIP && bridge_blocknonip(&eh, m)) { 1033 m_freem(m); 1034 return; 1035 } 1036 1037 if (bridge_filterrule(&bif->bif_brlin, &eh, m) == BRL_ACTION_BLOCK) { 1038 m_freem(m); 1039 return; 1040 } 1041 m = bridge_ip(&sc->sc_if, BRIDGE_IN, src_if, &eh, m); 1042 if (m == NULL) 1043 return; 1044 /* 1045 * If the packet is a multicast or broadcast OR if we don't 1046 * know any better, forward it to all interfaces. 1047 */ 1048 if ((m->m_flags & (M_BCAST | M_MCAST)) || dst_ifidx == 0) { 1049 sc->sc_if.if_imcasts++; 1050 bridge_broadcast(sc, src_if, &eh, m); 1051 return; 1052 } 1053 protected = bif->bif_protected; 1054 1055 dst_if = if_get(dst_ifidx); 1056 if (dst_if == NULL) 1057 goto bad; 1058 1059 /* 1060 * At this point, we're dealing with a unicast frame going to a 1061 * different interface 1062 */ 1063 if (!ISSET(dst_if->if_flags, IFF_RUNNING)) 1064 goto bad; 1065 bif = bridge_getbif(dst_if); 1066 if ((bif == NULL) || ((bif->bif_flags & IFBIF_STP) && 1067 (bif->bif_state == BSTP_IFSTATE_DISCARDING))) 1068 goto bad; 1069 /* 1070 * Do not transmit if both ports are part of the same protected 1071 * domain. 1072 */ 1073 if (protected != 0 && (protected & bif->bif_protected)) 1074 goto bad; 1075 if (bridge_filterrule(&bif->bif_brlout, &eh, m) == BRL_ACTION_BLOCK) 1076 goto bad; 1077 m = bridge_ip(&sc->sc_if, BRIDGE_OUT, dst_if, &eh, m); 1078 if (m == NULL) 1079 goto bad; 1080 1081 len = m->m_pkthdr.len; 1082 #if NVLAN > 0 1083 if ((m->m_flags & M_VLANTAG) && 1084 (dst_if->if_capabilities & IFCAP_VLAN_HWTAGGING) == 0) 1085 len += ETHER_VLAN_ENCAP_LEN; 1086 #endif 1087 if ((len - ETHER_HDR_LEN) > dst_if->if_mtu) 1088 bridge_fragment(&sc->sc_if, dst_if, &eh, m); 1089 else { 1090 bridge_ifenqueue(&sc->sc_if, dst_if, m); 1091 } 1092 m = NULL; 1093 bad: 1094 if_put(dst_if); 1095 m_freem(m); 1096 } 1097 1098 /* 1099 * Return 1 if `ena' belongs to `bif', 0 otherwise. 1100 */ 1101 int 1102 bridge_ourether(struct ifnet *ifp, uint8_t *ena) 1103 { 1104 struct arpcom *ac = (struct arpcom *)ifp; 1105 1106 if (memcmp(ac->ac_enaddr, ena, ETHER_ADDR_LEN) == 0) 1107 return (1); 1108 1109 #if NCARP > 0 1110 if (carp_ourether(ifp, ena)) 1111 return (1); 1112 #endif 1113 1114 return (0); 1115 } 1116 1117 /* 1118 * Receive input from an interface. Queue the packet for bridging if its 1119 * not for us, and schedule an interrupt. 1120 */ 1121 struct mbuf * 1122 bridge_input(struct ifnet *ifp, struct mbuf *m, uint64_t dst, void *null) 1123 { 1124 KASSERT(m->m_flags & M_PKTHDR); 1125 1126 if (m->m_flags & M_PROTO1) { 1127 m->m_flags &= ~M_PROTO1; 1128 return (m); 1129 } 1130 1131 niq_enqueue(&bridgeintrq, m); 1132 1133 return (NULL); 1134 } 1135 1136 void 1137 bridge_process(struct ifnet *ifp, struct mbuf *m) 1138 { 1139 struct ifnet *brifp; 1140 struct bridge_softc *sc; 1141 struct bridge_iflist *bif = NULL, *bif0 = NULL; 1142 struct ether_header *eh; 1143 struct mbuf *mc; 1144 #if NBPFILTER > 0 1145 caddr_t if_bpf; 1146 #endif 1147 1148 KERNEL_ASSERT_LOCKED(); 1149 1150 brifp = if_get(ifp->if_bridgeidx); 1151 if ((brifp == NULL) || !ISSET(brifp->if_flags, IFF_RUNNING)) 1152 goto reenqueue; 1153 1154 if (m->m_pkthdr.len < sizeof(*eh)) 1155 goto bad; 1156 1157 #if NVLAN > 0 1158 /* 1159 * If the underlying interface removed the VLAN header itself, 1160 * add it back. 1161 */ 1162 if (ISSET(m->m_flags, M_VLANTAG)) { 1163 m = vlan_inject(m, ETHERTYPE_VLAN, m->m_pkthdr.ether_vtag); 1164 if (m == NULL) 1165 goto bad; 1166 } 1167 #endif 1168 1169 #if NBPFILTER > 0 1170 if_bpf = brifp->if_bpf; 1171 if (if_bpf) 1172 bpf_mtap_ether(if_bpf, m, BPF_DIRECTION_IN); 1173 #endif 1174 1175 eh = mtod(m, struct ether_header *); 1176 1177 sc = brifp->if_softc; 1178 SMR_SLIST_FOREACH_LOCKED(bif, &sc->sc_iflist, bif_next) { 1179 struct arpcom *ac = (struct arpcom *)bif->ifp; 1180 if (memcmp(ac->ac_enaddr, eh->ether_shost, ETHER_ADDR_LEN) == 0) 1181 goto bad; 1182 if (bif->ifp == ifp) 1183 bif0 = bif; 1184 } 1185 if (bif0 == NULL) 1186 goto reenqueue; 1187 1188 bridge_span(brifp, m); 1189 1190 if (ETHER_IS_MULTICAST(eh->ether_dhost)) { 1191 /* 1192 * Reserved destination MAC addresses (01:80:C2:00:00:0x) 1193 * should not be forwarded to bridge members according to 1194 * section 7.12.6 of the 802.1D-2004 specification. The 1195 * STP destination address (as stored in bstp_etheraddr) 1196 * is the first of these. 1197 */ 1198 if (memcmp(eh->ether_dhost, bstp_etheraddr, 1199 ETHER_ADDR_LEN - 1) == 0) { 1200 if (eh->ether_dhost[ETHER_ADDR_LEN - 1] == 0) { 1201 /* STP traffic */ 1202 m = bstp_input(sc->sc_stp, bif0->bif_stp, eh, 1203 m); 1204 if (m == NULL) 1205 goto bad; 1206 } else if (eh->ether_dhost[ETHER_ADDR_LEN - 1] <= 0xf) 1207 goto bad; 1208 } 1209 1210 /* 1211 * No need to process frames for ifs in the discarding state 1212 */ 1213 if ((bif0->bif_flags & IFBIF_STP) && 1214 (bif0->bif_state == BSTP_IFSTATE_DISCARDING)) 1215 goto reenqueue; 1216 1217 mc = m_dup_pkt(m, ETHER_ALIGN, M_NOWAIT); 1218 if (mc == NULL) 1219 goto reenqueue; 1220 1221 bridge_ifinput(ifp, mc); 1222 1223 bridgeintr_frame(brifp, ifp, m); 1224 if_put(brifp); 1225 return; 1226 } 1227 1228 /* 1229 * Unicast, make sure it's not for us. 1230 */ 1231 if (bridge_ourether(bif0->ifp, eh->ether_dhost)) { 1232 bif = bif0; 1233 } else { 1234 SMR_SLIST_FOREACH_LOCKED(bif, &sc->sc_iflist, bif_next) { 1235 if (bif->ifp == ifp) 1236 continue; 1237 if (bridge_ourether(bif->ifp, eh->ether_dhost)) 1238 break; 1239 } 1240 } 1241 if (bif != NULL) { 1242 if (bif0->bif_flags & IFBIF_LEARNING) 1243 bridge_rtupdate(sc, 1244 (struct ether_addr *)&eh->ether_shost, 1245 ifp, 0, IFBAF_DYNAMIC, m); 1246 if (bridge_filterrule(&bif0->bif_brlin, eh, m) == 1247 BRL_ACTION_BLOCK) { 1248 goto bad; 1249 } 1250 1251 /* Count for the bridge */ 1252 brifp->if_ipackets++; 1253 brifp->if_ibytes += m->m_pkthdr.len; 1254 1255 ifp = bif->ifp; 1256 goto reenqueue; 1257 } 1258 1259 bridgeintr_frame(brifp, ifp, m); 1260 if_put(brifp); 1261 return; 1262 1263 reenqueue: 1264 bridge_ifinput(ifp, m); 1265 m = NULL; 1266 bad: 1267 m_freem(m); 1268 if_put(brifp); 1269 } 1270 1271 /* 1272 * Send a frame to all interfaces that are members of the bridge 1273 * (except the one it came in on). 1274 */ 1275 void 1276 bridge_broadcast(struct bridge_softc *sc, struct ifnet *ifp, 1277 struct ether_header *eh, struct mbuf *m) 1278 { 1279 struct bridge_iflist *bif; 1280 struct mbuf *mc; 1281 struct ifnet *dst_if; 1282 int len, used = 0; 1283 u_int32_t protected; 1284 1285 bif = bridge_getbif(ifp); 1286 KASSERT(bif != NULL); 1287 protected = bif->bif_protected; 1288 1289 SMR_SLIST_FOREACH_LOCKED(bif, &sc->sc_iflist, bif_next) { 1290 dst_if = bif->ifp; 1291 1292 if ((dst_if->if_flags & IFF_RUNNING) == 0) 1293 continue; 1294 1295 if ((bif->bif_flags & IFBIF_STP) && 1296 (bif->bif_state == BSTP_IFSTATE_DISCARDING)) 1297 continue; 1298 1299 if ((bif->bif_flags & IFBIF_DISCOVER) == 0 && 1300 (m->m_flags & (M_BCAST | M_MCAST)) == 0) 1301 continue; 1302 1303 /* Drop non-IP frames if the appropriate flag is set. */ 1304 if (bif->bif_flags & IFBIF_BLOCKNONIP && 1305 bridge_blocknonip(eh, m)) 1306 continue; 1307 1308 /* 1309 * Do not transmit if both ports are part of the same 1310 * protected domain. 1311 */ 1312 if (protected != 0 && (protected & bif->bif_protected)) 1313 continue; 1314 1315 if (bridge_filterrule(&bif->bif_brlout, eh, m) == 1316 BRL_ACTION_BLOCK) 1317 continue; 1318 1319 /* 1320 * Don't retransmit out of the same interface where 1321 * the packet was received from. 1322 */ 1323 if (dst_if->if_index == ifp->if_index) 1324 continue; 1325 1326 if (bridge_localbroadcast(dst_if, eh, m)) 1327 sc->sc_if.if_oerrors++; 1328 1329 /* If last one, reuse the passed-in mbuf */ 1330 if (SMR_SLIST_NEXT_LOCKED(bif, bif_next) == NULL) { 1331 mc = m; 1332 used = 1; 1333 } else { 1334 mc = m_dup_pkt(m, ETHER_ALIGN, M_NOWAIT); 1335 if (mc == NULL) { 1336 sc->sc_if.if_oerrors++; 1337 continue; 1338 } 1339 } 1340 1341 mc = bridge_ip(&sc->sc_if, BRIDGE_OUT, dst_if, eh, mc); 1342 if (mc == NULL) 1343 continue; 1344 1345 len = mc->m_pkthdr.len; 1346 #if NVLAN > 0 1347 if ((mc->m_flags & M_VLANTAG) && 1348 (dst_if->if_capabilities & IFCAP_VLAN_HWTAGGING) == 0) 1349 len += ETHER_VLAN_ENCAP_LEN; 1350 #endif 1351 if ((len - ETHER_HDR_LEN) > dst_if->if_mtu) 1352 bridge_fragment(&sc->sc_if, dst_if, eh, mc); 1353 else { 1354 bridge_ifenqueue(&sc->sc_if, dst_if, mc); 1355 } 1356 } 1357 1358 if (!used) 1359 m_freem(m); 1360 } 1361 1362 int 1363 bridge_localbroadcast(struct ifnet *ifp, struct ether_header *eh, 1364 struct mbuf *m) 1365 { 1366 struct mbuf *m1; 1367 u_int16_t etype; 1368 1369 /* 1370 * quick optimisation, don't send packets up the stack if no 1371 * corresponding address has been specified. 1372 */ 1373 etype = ntohs(eh->ether_type); 1374 if (!(m->m_flags & M_VLANTAG) && etype == ETHERTYPE_IP) { 1375 struct ifaddr *ifa; 1376 TAILQ_FOREACH(ifa, &ifp->if_addrlist, ifa_list) { 1377 if (ifa->ifa_addr->sa_family == AF_INET) 1378 break; 1379 } 1380 if (ifa == NULL) 1381 return (0); 1382 } 1383 1384 m1 = m_dup_pkt(m, ETHER_ALIGN, M_NOWAIT); 1385 if (m1 == NULL) 1386 return (1); 1387 1388 #if NPF > 0 1389 pf_pkt_addr_changed(m1); 1390 #endif /* NPF */ 1391 1392 bridge_ifinput(ifp, m1); 1393 1394 return (0); 1395 } 1396 1397 void 1398 bridge_span(struct ifnet *brifp, struct mbuf *m) 1399 { 1400 struct bridge_softc *sc = brifp->if_softc; 1401 struct bridge_iflist *bif; 1402 struct ifnet *ifp; 1403 struct mbuf *mc; 1404 int error; 1405 1406 smr_read_enter(); 1407 SMR_SLIST_FOREACH(bif, &sc->sc_spanlist, bif_next) { 1408 ifp = bif->ifp; 1409 1410 if ((ifp->if_flags & IFF_RUNNING) == 0) 1411 continue; 1412 1413 mc = m_copym(m, 0, M_COPYALL, M_DONTWAIT); 1414 if (mc == NULL) { 1415 brifp->if_oerrors++; 1416 continue; 1417 } 1418 1419 error = bridge_ifenqueue(brifp, ifp, mc); 1420 if (error) 1421 continue; 1422 } 1423 smr_read_leave(); 1424 } 1425 1426 /* 1427 * Block non-ip frames: 1428 * Returns 0 if frame is ip, and 1 if it should be dropped. 1429 */ 1430 int 1431 bridge_blocknonip(struct ether_header *eh, struct mbuf *m) 1432 { 1433 struct llc llc; 1434 u_int16_t etype; 1435 1436 if (m->m_pkthdr.len < ETHER_HDR_LEN) 1437 return (1); 1438 1439 #if NVLAN > 0 1440 if (m->m_flags & M_VLANTAG) 1441 return (1); 1442 #endif 1443 1444 etype = ntohs(eh->ether_type); 1445 switch (etype) { 1446 case ETHERTYPE_ARP: 1447 case ETHERTYPE_REVARP: 1448 case ETHERTYPE_IP: 1449 case ETHERTYPE_IPV6: 1450 return (0); 1451 } 1452 1453 if (etype > ETHERMTU) 1454 return (1); 1455 1456 if (m->m_pkthdr.len < 1457 (ETHER_HDR_LEN + LLC_SNAPFRAMELEN)) 1458 return (1); 1459 1460 m_copydata(m, ETHER_HDR_LEN, LLC_SNAPFRAMELEN, &llc); 1461 1462 etype = ntohs(llc.llc_snap.ether_type); 1463 if (llc.llc_dsap == LLC_SNAP_LSAP && 1464 llc.llc_ssap == LLC_SNAP_LSAP && 1465 llc.llc_control == LLC_UI && 1466 llc.llc_snap.org_code[0] == 0 && 1467 llc.llc_snap.org_code[1] == 0 && 1468 llc.llc_snap.org_code[2] == 0 && 1469 (etype == ETHERTYPE_ARP || etype == ETHERTYPE_REVARP || 1470 etype == ETHERTYPE_IP || etype == ETHERTYPE_IPV6)) { 1471 return (0); 1472 } 1473 1474 return (1); 1475 } 1476 1477 #ifdef IPSEC 1478 int 1479 bridge_ipsec(struct ifnet *ifp, struct ether_header *eh, int hassnap, 1480 struct llc *llc, int dir, int af, int hlen, struct mbuf *m) 1481 { 1482 union sockaddr_union dst; 1483 struct tdb *tdb; 1484 u_int32_t spi; 1485 u_int16_t cpi; 1486 int error, off; 1487 u_int8_t proto = 0; 1488 struct ip *ip; 1489 #ifdef INET6 1490 struct ip6_hdr *ip6; 1491 #endif /* INET6 */ 1492 #if NPF > 0 1493 struct ifnet *encif; 1494 #endif 1495 1496 if (dir == BRIDGE_IN) { 1497 switch (af) { 1498 case AF_INET: 1499 if (m->m_pkthdr.len - hlen < 2 * sizeof(u_int32_t)) 1500 goto skiplookup; 1501 1502 ip = mtod(m, struct ip *); 1503 proto = ip->ip_p; 1504 off = offsetof(struct ip, ip_p); 1505 1506 if (proto != IPPROTO_ESP && proto != IPPROTO_AH && 1507 proto != IPPROTO_IPCOMP) 1508 goto skiplookup; 1509 1510 bzero(&dst, sizeof(union sockaddr_union)); 1511 dst.sa.sa_family = AF_INET; 1512 dst.sin.sin_len = sizeof(struct sockaddr_in); 1513 m_copydata(m, offsetof(struct ip, ip_dst), 1514 sizeof(struct in_addr), &dst.sin.sin_addr); 1515 1516 break; 1517 #ifdef INET6 1518 case AF_INET6: 1519 if (m->m_pkthdr.len - hlen < 2 * sizeof(u_int32_t)) 1520 goto skiplookup; 1521 1522 ip6 = mtod(m, struct ip6_hdr *); 1523 1524 /* XXX We should chase down the header chain */ 1525 proto = ip6->ip6_nxt; 1526 off = offsetof(struct ip6_hdr, ip6_nxt); 1527 1528 if (proto != IPPROTO_ESP && proto != IPPROTO_AH && 1529 proto != IPPROTO_IPCOMP) 1530 goto skiplookup; 1531 1532 bzero(&dst, sizeof(union sockaddr_union)); 1533 dst.sa.sa_family = AF_INET6; 1534 dst.sin6.sin6_len = sizeof(struct sockaddr_in6); 1535 m_copydata(m, offsetof(struct ip6_hdr, ip6_dst), 1536 sizeof(struct in6_addr), &dst.sin6.sin6_addr); 1537 1538 break; 1539 #endif /* INET6 */ 1540 default: 1541 return (0); 1542 } 1543 1544 switch (proto) { 1545 case IPPROTO_ESP: 1546 m_copydata(m, hlen, sizeof(u_int32_t), &spi); 1547 break; 1548 case IPPROTO_AH: 1549 m_copydata(m, hlen + sizeof(u_int32_t), 1550 sizeof(u_int32_t), &spi); 1551 break; 1552 case IPPROTO_IPCOMP: 1553 m_copydata(m, hlen + sizeof(u_int16_t), 1554 sizeof(u_int16_t), &cpi); 1555 spi = htonl(ntohs(cpi)); 1556 break; 1557 } 1558 1559 NET_ASSERT_LOCKED(); 1560 1561 tdb = gettdb(ifp->if_rdomain, spi, &dst, proto); 1562 if (tdb != NULL && (tdb->tdb_flags & TDBF_INVALID) == 0 && 1563 tdb->tdb_xform != NULL) { 1564 if (tdb->tdb_first_use == 0) { 1565 tdb->tdb_first_use = gettime(); 1566 if (tdb->tdb_flags & TDBF_FIRSTUSE) 1567 timeout_add_sec(&tdb->tdb_first_tmo, 1568 tdb->tdb_exp_first_use); 1569 if (tdb->tdb_flags & TDBF_SOFT_FIRSTUSE) 1570 timeout_add_sec(&tdb->tdb_sfirst_tmo, 1571 tdb->tdb_soft_first_use); 1572 } 1573 1574 (*(tdb->tdb_xform->xf_input))(m, tdb, hlen, off); 1575 return (1); 1576 } else { 1577 skiplookup: 1578 /* XXX do an input policy lookup */ 1579 return (0); 1580 } 1581 } else { /* Outgoing from the bridge. */ 1582 tdb = ipsp_spd_lookup(m, af, hlen, &error, 1583 IPSP_DIRECTION_OUT, NULL, NULL, 0); 1584 if (tdb != NULL) { 1585 /* 1586 * We don't need to do loop detection, the 1587 * bridge will do that for us. 1588 */ 1589 #if NPF > 0 1590 if ((encif = enc_getif(tdb->tdb_rdomain, 1591 tdb->tdb_tap)) == NULL || 1592 pf_test(af, dir, encif, &m) != PF_PASS) { 1593 m_freem(m); 1594 return (1); 1595 } 1596 if (m == NULL) 1597 return (1); 1598 else if (af == AF_INET) 1599 in_proto_cksum_out(m, encif); 1600 #ifdef INET6 1601 else if (af == AF_INET6) 1602 in6_proto_cksum_out(m, encif); 1603 #endif /* INET6 */ 1604 #endif /* NPF */ 1605 1606 ip = mtod(m, struct ip *); 1607 if ((af == AF_INET) && 1608 ip_mtudisc && (ip->ip_off & htons(IP_DF)) && 1609 tdb->tdb_mtu && ntohs(ip->ip_len) > tdb->tdb_mtu && 1610 tdb->tdb_mtutimeout > gettime()) 1611 bridge_send_icmp_err(ifp, eh, m, 1612 hassnap, llc, tdb->tdb_mtu, 1613 ICMP_UNREACH, ICMP_UNREACH_NEEDFRAG); 1614 else 1615 error = ipsp_process_packet(m, tdb, af, 0); 1616 return (1); 1617 } else 1618 return (0); 1619 } 1620 1621 return (0); 1622 } 1623 #endif /* IPSEC */ 1624 1625 /* 1626 * Filter IP packets by peeking into the ethernet frame. This violates 1627 * the ISO model, but allows us to act as a IP filter at the data link 1628 * layer. As a result, most of this code will look familiar to those 1629 * who've read net/if_ethersubr.c and netinet/ip_input.c 1630 */ 1631 struct mbuf * 1632 bridge_ip(struct ifnet *brifp, int dir, struct ifnet *ifp, 1633 struct ether_header *eh, struct mbuf *m) 1634 { 1635 struct llc llc; 1636 int hassnap = 0; 1637 struct ip *ip; 1638 int hlen; 1639 u_int16_t etype; 1640 1641 #if NVLAN > 0 1642 if (m->m_flags & M_VLANTAG) 1643 return (m); 1644 #endif 1645 1646 etype = ntohs(eh->ether_type); 1647 1648 if (etype != ETHERTYPE_IP && etype != ETHERTYPE_IPV6) { 1649 if (etype > ETHERMTU || 1650 m->m_pkthdr.len < (LLC_SNAPFRAMELEN + 1651 ETHER_HDR_LEN)) 1652 return (m); 1653 1654 m_copydata(m, ETHER_HDR_LEN, LLC_SNAPFRAMELEN, &llc); 1655 1656 if (llc.llc_dsap != LLC_SNAP_LSAP || 1657 llc.llc_ssap != LLC_SNAP_LSAP || 1658 llc.llc_control != LLC_UI || 1659 llc.llc_snap.org_code[0] || 1660 llc.llc_snap.org_code[1] || 1661 llc.llc_snap.org_code[2]) 1662 return (m); 1663 1664 etype = ntohs(llc.llc_snap.ether_type); 1665 if (etype != ETHERTYPE_IP && etype != ETHERTYPE_IPV6) 1666 return (m); 1667 hassnap = 1; 1668 } 1669 1670 m_adj(m, ETHER_HDR_LEN); 1671 if (hassnap) 1672 m_adj(m, LLC_SNAPFRAMELEN); 1673 1674 switch (etype) { 1675 1676 case ETHERTYPE_IP: 1677 if (m->m_pkthdr.len < sizeof(struct ip)) 1678 goto dropit; 1679 1680 /* Copy minimal header, and drop invalids */ 1681 if (m->m_len < sizeof(struct ip) && 1682 (m = m_pullup(m, sizeof(struct ip))) == NULL) { 1683 ipstat_inc(ips_toosmall); 1684 return (NULL); 1685 } 1686 ip = mtod(m, struct ip *); 1687 1688 if (ip->ip_v != IPVERSION) { 1689 ipstat_inc(ips_badvers); 1690 goto dropit; 1691 } 1692 1693 hlen = ip->ip_hl << 2; /* get whole header length */ 1694 if (hlen < sizeof(struct ip)) { 1695 ipstat_inc(ips_badhlen); 1696 goto dropit; 1697 } 1698 1699 if (hlen > m->m_len) { 1700 if ((m = m_pullup(m, hlen)) == NULL) { 1701 ipstat_inc(ips_badhlen); 1702 return (NULL); 1703 } 1704 ip = mtod(m, struct ip *); 1705 } 1706 1707 if ((m->m_pkthdr.csum_flags & M_IPV4_CSUM_IN_OK) == 0) { 1708 if (m->m_pkthdr.csum_flags & M_IPV4_CSUM_IN_BAD) { 1709 ipstat_inc(ips_badsum); 1710 goto dropit; 1711 } 1712 1713 ipstat_inc(ips_inswcsum); 1714 if (in_cksum(m, hlen) != 0) { 1715 ipstat_inc(ips_badsum); 1716 goto dropit; 1717 } 1718 } 1719 1720 if (ntohs(ip->ip_len) < hlen) 1721 goto dropit; 1722 1723 if (m->m_pkthdr.len < ntohs(ip->ip_len)) 1724 goto dropit; 1725 if (m->m_pkthdr.len > ntohs(ip->ip_len)) { 1726 if (m->m_len == m->m_pkthdr.len) { 1727 m->m_len = ntohs(ip->ip_len); 1728 m->m_pkthdr.len = ntohs(ip->ip_len); 1729 } else 1730 m_adj(m, ntohs(ip->ip_len) - m->m_pkthdr.len); 1731 } 1732 1733 #ifdef IPSEC 1734 if ((brifp->if_flags & IFF_LINK2) == IFF_LINK2 && 1735 bridge_ipsec(ifp, eh, hassnap, &llc, dir, AF_INET, hlen, m)) 1736 return (NULL); 1737 #endif /* IPSEC */ 1738 #if NPF > 0 1739 /* Finally, we get to filter the packet! */ 1740 if (pf_test(AF_INET, dir, ifp, &m) != PF_PASS) 1741 goto dropit; 1742 if (m == NULL) 1743 goto dropit; 1744 #endif /* NPF > 0 */ 1745 1746 /* Rebuild the IP header */ 1747 if (m->m_len < hlen && ((m = m_pullup(m, hlen)) == NULL)) 1748 return (NULL); 1749 if (m->m_len < sizeof(struct ip)) 1750 goto dropit; 1751 in_proto_cksum_out(m, ifp); 1752 ip = mtod(m, struct ip *); 1753 ip->ip_sum = 0; 1754 if (0 && (ifp->if_capabilities & IFCAP_CSUM_IPv4)) 1755 m->m_pkthdr.csum_flags |= M_IPV4_CSUM_OUT; 1756 else { 1757 ipstat_inc(ips_outswcsum); 1758 ip->ip_sum = in_cksum(m, hlen); 1759 } 1760 1761 #if NPF > 0 1762 if (dir == BRIDGE_IN && 1763 m->m_pkthdr.pf.flags & PF_TAG_DIVERTED) { 1764 m_resethdr(m); 1765 m->m_pkthdr.ph_ifidx = ifp->if_index; 1766 m->m_pkthdr.ph_rtableid = ifp->if_rdomain; 1767 ipv4_input(ifp, m); 1768 return (NULL); 1769 } 1770 #endif /* NPF > 0 */ 1771 1772 break; 1773 1774 #ifdef INET6 1775 case ETHERTYPE_IPV6: { 1776 struct ip6_hdr *ip6; 1777 1778 if (m->m_len < sizeof(struct ip6_hdr)) { 1779 if ((m = m_pullup(m, sizeof(struct ip6_hdr))) 1780 == NULL) { 1781 ip6stat_inc(ip6s_toosmall); 1782 return (NULL); 1783 } 1784 } 1785 1786 ip6 = mtod(m, struct ip6_hdr *); 1787 1788 if ((ip6->ip6_vfc & IPV6_VERSION_MASK) != IPV6_VERSION) { 1789 ip6stat_inc(ip6s_badvers); 1790 goto dropit; 1791 } 1792 1793 #ifdef IPSEC 1794 hlen = sizeof(struct ip6_hdr); 1795 1796 if ((brifp->if_flags & IFF_LINK2) == IFF_LINK2 && 1797 bridge_ipsec(ifp, eh, hassnap, &llc, dir, AF_INET6, hlen, 1798 m)) 1799 return (NULL); 1800 #endif /* IPSEC */ 1801 1802 #if NPF > 0 1803 if (pf_test(AF_INET6, dir, ifp, &m) != PF_PASS) 1804 goto dropit; 1805 if (m == NULL) 1806 return (NULL); 1807 #endif /* NPF > 0 */ 1808 in6_proto_cksum_out(m, ifp); 1809 1810 #if NPF > 0 1811 if (dir == BRIDGE_IN && 1812 m->m_pkthdr.pf.flags & PF_TAG_DIVERTED) { 1813 m_resethdr(m); 1814 m->m_pkthdr.ph_ifidx = ifp->if_index; 1815 m->m_pkthdr.ph_rtableid = ifp->if_rdomain; 1816 ipv6_input(ifp, m); 1817 return (NULL); 1818 } 1819 #endif /* NPF > 0 */ 1820 1821 break; 1822 } 1823 #endif /* INET6 */ 1824 1825 default: 1826 goto dropit; 1827 break; 1828 } 1829 1830 /* Reattach SNAP header */ 1831 if (hassnap) { 1832 M_PREPEND(m, LLC_SNAPFRAMELEN, M_DONTWAIT); 1833 if (m == NULL) 1834 goto dropit; 1835 bcopy(&llc, mtod(m, caddr_t), LLC_SNAPFRAMELEN); 1836 } 1837 1838 /* Reattach ethernet header */ 1839 M_PREPEND(m, sizeof(*eh), M_DONTWAIT); 1840 if (m == NULL) 1841 goto dropit; 1842 bcopy(eh, mtod(m, caddr_t), sizeof(*eh)); 1843 1844 return (m); 1845 1846 dropit: 1847 m_freem(m); 1848 return (NULL); 1849 } 1850 1851 void 1852 bridge_fragment(struct ifnet *brifp, struct ifnet *ifp, struct ether_header *eh, 1853 struct mbuf *m) 1854 { 1855 struct llc llc; 1856 struct mbuf_list fml; 1857 int error = 0; 1858 int hassnap = 0; 1859 u_int16_t etype; 1860 struct ip *ip; 1861 1862 etype = ntohs(eh->ether_type); 1863 #if NVLAN > 0 1864 if ((m->m_flags & M_VLANTAG) || etype == ETHERTYPE_VLAN || 1865 etype == ETHERTYPE_QINQ) { 1866 int len = m->m_pkthdr.len; 1867 1868 if (m->m_flags & M_VLANTAG) 1869 len += ETHER_VLAN_ENCAP_LEN; 1870 if ((ifp->if_capabilities & IFCAP_VLAN_MTU) && 1871 (len - sizeof(struct ether_vlan_header) <= ifp->if_mtu)) { 1872 bridge_ifenqueue(brifp, ifp, m); 1873 return; 1874 } 1875 goto dropit; 1876 } 1877 #endif 1878 if (etype != ETHERTYPE_IP) { 1879 if (etype > ETHERMTU || 1880 m->m_pkthdr.len < (LLC_SNAPFRAMELEN + 1881 ETHER_HDR_LEN)) 1882 goto dropit; 1883 1884 m_copydata(m, ETHER_HDR_LEN, LLC_SNAPFRAMELEN, &llc); 1885 1886 if (llc.llc_dsap != LLC_SNAP_LSAP || 1887 llc.llc_ssap != LLC_SNAP_LSAP || 1888 llc.llc_control != LLC_UI || 1889 llc.llc_snap.org_code[0] || 1890 llc.llc_snap.org_code[1] || 1891 llc.llc_snap.org_code[2] || 1892 llc.llc_snap.ether_type != htons(ETHERTYPE_IP)) 1893 goto dropit; 1894 1895 hassnap = 1; 1896 } 1897 1898 m_adj(m, ETHER_HDR_LEN); 1899 if (hassnap) 1900 m_adj(m, LLC_SNAPFRAMELEN); 1901 1902 if (m->m_len < sizeof(struct ip) && 1903 (m = m_pullup(m, sizeof(struct ip))) == NULL) 1904 goto dropit; 1905 ip = mtod(m, struct ip *); 1906 1907 /* Respect IP_DF, return a ICMP_UNREACH_NEEDFRAG. */ 1908 if (ip->ip_off & htons(IP_DF)) { 1909 bridge_send_icmp_err(ifp, eh, m, hassnap, &llc, 1910 ifp->if_mtu, ICMP_UNREACH, ICMP_UNREACH_NEEDFRAG); 1911 return; 1912 } 1913 1914 error = ip_fragment(m, &fml, ifp, ifp->if_mtu); 1915 if (error) 1916 return; 1917 1918 while ((m = ml_dequeue(&fml)) != NULL) { 1919 if (hassnap) { 1920 M_PREPEND(m, LLC_SNAPFRAMELEN, M_DONTWAIT); 1921 if (m == NULL) { 1922 error = ENOBUFS; 1923 break; 1924 } 1925 bcopy(&llc, mtod(m, caddr_t), LLC_SNAPFRAMELEN); 1926 } 1927 M_PREPEND(m, sizeof(*eh), M_DONTWAIT); 1928 if (m == NULL) { 1929 error = ENOBUFS; 1930 break; 1931 } 1932 bcopy(eh, mtod(m, caddr_t), sizeof(*eh)); 1933 error = bridge_ifenqueue(brifp, ifp, m); 1934 if (error) 1935 break; 1936 } 1937 if (error) 1938 ml_purge(&fml); 1939 else 1940 ipstat_inc(ips_fragmented); 1941 1942 return; 1943 dropit: 1944 m_freem(m); 1945 } 1946 1947 int 1948 bridge_ifenqueue(struct ifnet *brifp, struct ifnet *ifp, struct mbuf *m) 1949 { 1950 int error, len; 1951 1952 /* Loop prevention. */ 1953 m->m_flags |= M_PROTO1; 1954 1955 len = m->m_pkthdr.len; 1956 1957 error = if_enqueue(ifp, m); 1958 if (error) { 1959 brifp->if_oerrors++; 1960 return (error); 1961 } 1962 1963 brifp->if_opackets++; 1964 brifp->if_obytes += len; 1965 1966 return (0); 1967 } 1968 1969 void 1970 bridge_ifinput(struct ifnet *ifp, struct mbuf *m) 1971 { 1972 struct mbuf_list ml = MBUF_LIST_INITIALIZER(); 1973 1974 m->m_flags |= M_PROTO1; 1975 1976 ml_enqueue(&ml, m); 1977 if_input(ifp, &ml); 1978 } 1979 1980 void 1981 bridge_send_icmp_err(struct ifnet *ifp, 1982 struct ether_header *eh, struct mbuf *n, int hassnap, struct llc *llc, 1983 int mtu, int type, int code) 1984 { 1985 struct ip *ip; 1986 struct icmp *icp; 1987 struct in_addr t; 1988 struct mbuf *m, *n2; 1989 int hlen; 1990 u_int8_t ether_tmp[ETHER_ADDR_LEN]; 1991 1992 n2 = m_copym(n, 0, M_COPYALL, M_DONTWAIT); 1993 if (!n2) { 1994 m_freem(n); 1995 return; 1996 } 1997 m = icmp_do_error(n, type, code, 0, mtu); 1998 if (m == NULL) { 1999 m_freem(n2); 2000 return; 2001 } 2002 2003 n = n2; 2004 2005 ip = mtod(m, struct ip *); 2006 hlen = ip->ip_hl << 2; 2007 t = ip->ip_dst; 2008 ip->ip_dst = ip->ip_src; 2009 ip->ip_src = t; 2010 2011 m->m_data += hlen; 2012 m->m_len -= hlen; 2013 icp = mtod(m, struct icmp *); 2014 icp->icmp_cksum = 0; 2015 icp->icmp_cksum = in_cksum(m, ntohs(ip->ip_len) - hlen); 2016 m->m_data -= hlen; 2017 m->m_len += hlen; 2018 2019 ip->ip_v = IPVERSION; 2020 ip->ip_off &= htons(IP_DF); 2021 ip->ip_id = htons(ip_randomid()); 2022 ip->ip_ttl = MAXTTL; 2023 ip->ip_sum = 0; 2024 ip->ip_sum = in_cksum(m, hlen); 2025 2026 /* Swap ethernet addresses */ 2027 bcopy(&eh->ether_dhost, ðer_tmp, sizeof(ether_tmp)); 2028 bcopy(&eh->ether_shost, &eh->ether_dhost, sizeof(ether_tmp)); 2029 bcopy(ðer_tmp, &eh->ether_shost, sizeof(ether_tmp)); 2030 2031 /* Reattach SNAP header */ 2032 if (hassnap) { 2033 M_PREPEND(m, LLC_SNAPFRAMELEN, M_DONTWAIT); 2034 if (m == NULL) 2035 goto dropit; 2036 bcopy(llc, mtod(m, caddr_t), LLC_SNAPFRAMELEN); 2037 } 2038 2039 /* Reattach ethernet header */ 2040 M_PREPEND(m, sizeof(*eh), M_DONTWAIT); 2041 if (m == NULL) 2042 goto dropit; 2043 bcopy(eh, mtod(m, caddr_t), sizeof(*eh)); 2044 2045 bridge_enqueue(ifp, m); 2046 m_freem(n); 2047 return; 2048 2049 dropit: 2050 m_freem(n); 2051 } 2052