1 /* $OpenBSD: if_trunk.c,v 1.77 2011/01/28 14:20:37 reyk Exp $ */ 2 3 /* 4 * Copyright (c) 2005, 2006, 2007 Reyk Floeter <reyk@openbsd.org> 5 * 6 * Permission to use, copy, modify, and distribute this software for any 7 * purpose with or without fee is hereby granted, provided that the above 8 * copyright notice and this permission notice appear in all copies. 9 * 10 * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES 11 * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF 12 * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR 13 * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES 14 * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN 15 * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF 16 * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE. 17 */ 18 19 #include "bpfilter.h" 20 #include "trunk.h" 21 22 #include <sys/param.h> 23 #include <sys/kernel.h> 24 #include <sys/malloc.h> 25 #include <sys/mbuf.h> 26 #include <sys/queue.h> 27 #include <sys/socket.h> 28 #include <sys/sockio.h> 29 #include <sys/systm.h> 30 #include <sys/proc.h> 31 #include <sys/hash.h> 32 33 #include <dev/rndvar.h> 34 35 #include <net/if.h> 36 #include <net/if_arp.h> 37 #include <net/if_dl.h> 38 #include <net/if_llc.h> 39 #include <net/if_media.h> 40 #include <net/if_types.h> 41 #if NBPFILTER > 0 42 #include <net/bpf.h> 43 #endif 44 45 #ifdef INET 46 #include <netinet/in.h> 47 #include <netinet/in_systm.h> 48 #include <netinet/if_ether.h> 49 #include <netinet/ip.h> 50 #endif 51 52 #ifdef INET6 53 #include <netinet/ip6.h> 54 #endif 55 56 #include <net/if_vlan_var.h> 57 #include <net/if_trunk.h> 58 #include <net/trunklacp.h> 59 60 61 SLIST_HEAD(__trhead, trunk_softc) trunk_list; /* list of trunks */ 62 63 extern struct ifaddr **ifnet_addrs; 64 65 void trunkattach(int); 66 int trunk_clone_create(struct if_clone *, int); 67 int trunk_clone_destroy(struct ifnet *); 68 void trunk_lladdr(struct arpcom *, u_int8_t *); 69 int trunk_capabilities(struct trunk_softc *); 70 void trunk_port_lladdr(struct trunk_port *, u_int8_t *); 71 int trunk_port_create(struct trunk_softc *, struct ifnet *); 72 int trunk_port_destroy(struct trunk_port *); 73 void trunk_port_watchdog(struct ifnet *); 74 void trunk_port_state(void *); 75 int trunk_port_ioctl(struct ifnet *, u_long, caddr_t); 76 struct trunk_port *trunk_port_get(struct trunk_softc *, struct ifnet *); 77 int trunk_port_checkstacking(struct trunk_softc *); 78 void trunk_port2req(struct trunk_port *, struct trunk_reqport *); 79 int trunk_ioctl(struct ifnet *, u_long, caddr_t); 80 int trunk_ether_addmulti(struct trunk_softc *, struct ifreq *); 81 int trunk_ether_delmulti(struct trunk_softc *, struct ifreq *); 82 void trunk_ether_purgemulti(struct trunk_softc *); 83 int trunk_ether_cmdmulti(struct trunk_port *, u_long); 84 int trunk_ioctl_allports(struct trunk_softc *, u_long, caddr_t); 85 void trunk_start(struct ifnet *); 86 void trunk_init(struct ifnet *); 87 void trunk_stop(struct ifnet *); 88 void trunk_watchdog(struct ifnet *); 89 int trunk_media_change(struct ifnet *); 90 void trunk_media_status(struct ifnet *, struct ifmediareq *); 91 struct trunk_port *trunk_link_active(struct trunk_softc *, 92 struct trunk_port *); 93 const void *trunk_gethdr(struct mbuf *, u_int, u_int, void *); 94 95 struct if_clone trunk_cloner = 96 IF_CLONE_INITIALIZER("trunk", trunk_clone_create, trunk_clone_destroy); 97 98 /* Simple round robin */ 99 int trunk_rr_attach(struct trunk_softc *); 100 int trunk_rr_detach(struct trunk_softc *); 101 void trunk_rr_port_destroy(struct trunk_port *); 102 int trunk_rr_start(struct trunk_softc *, struct mbuf *); 103 int trunk_rr_input(struct trunk_softc *, struct trunk_port *, 104 struct ether_header *, struct mbuf *); 105 106 /* Active failover */ 107 int trunk_fail_attach(struct trunk_softc *); 108 int trunk_fail_detach(struct trunk_softc *); 109 int trunk_fail_start(struct trunk_softc *, struct mbuf *); 110 int trunk_fail_input(struct trunk_softc *, struct trunk_port *, 111 struct ether_header *, struct mbuf *); 112 113 /* Loadbalancing */ 114 int trunk_lb_attach(struct trunk_softc *); 115 int trunk_lb_detach(struct trunk_softc *); 116 int trunk_lb_port_create(struct trunk_port *); 117 void trunk_lb_port_destroy(struct trunk_port *); 118 int trunk_lb_start(struct trunk_softc *, struct mbuf *); 119 int trunk_lb_input(struct trunk_softc *, struct trunk_port *, 120 struct ether_header *, struct mbuf *); 121 int trunk_lb_porttable(struct trunk_softc *, struct trunk_port *); 122 123 /* Broadcast mode */ 124 int trunk_bcast_attach(struct trunk_softc *); 125 int trunk_bcast_detach(struct trunk_softc *); 126 int trunk_bcast_start(struct trunk_softc *, struct mbuf *); 127 int trunk_bcast_input(struct trunk_softc *, struct trunk_port *, 128 struct ether_header *, struct mbuf *); 129 130 /* 802.3ad LACP */ 131 int trunk_lacp_attach(struct trunk_softc *); 132 int trunk_lacp_detach(struct trunk_softc *); 133 int trunk_lacp_start(struct trunk_softc *, struct mbuf *); 134 int trunk_lacp_input(struct trunk_softc *, struct trunk_port *, 135 struct ether_header *, struct mbuf *); 136 137 /* Trunk protocol table */ 138 static const struct { 139 enum trunk_proto ti_proto; 140 int (*ti_attach)(struct trunk_softc *); 141 } trunk_protos[] = { 142 { TRUNK_PROTO_ROUNDROBIN, trunk_rr_attach }, 143 { TRUNK_PROTO_FAILOVER, trunk_fail_attach }, 144 { TRUNK_PROTO_LOADBALANCE, trunk_lb_attach }, 145 { TRUNK_PROTO_BROADCAST, trunk_bcast_attach }, 146 { TRUNK_PROTO_LACP, trunk_lacp_attach }, 147 { TRUNK_PROTO_NONE, NULL } 148 }; 149 150 void 151 trunkattach(int count) 152 { 153 SLIST_INIT(&trunk_list); 154 if_clone_attach(&trunk_cloner); 155 } 156 157 int 158 trunk_clone_create(struct if_clone *ifc, int unit) 159 { 160 struct trunk_softc *tr; 161 struct ifnet *ifp; 162 int i, error = 0; 163 164 if ((tr = malloc(sizeof(struct trunk_softc), 165 M_DEVBUF, M_NOWAIT|M_ZERO)) == NULL) 166 return (ENOMEM); 167 168 tr->tr_unit = unit; 169 tr->tr_proto = TRUNK_PROTO_NONE; 170 for (i = 0; trunk_protos[i].ti_proto != TRUNK_PROTO_NONE; i++) { 171 if (trunk_protos[i].ti_proto == TRUNK_PROTO_DEFAULT) { 172 tr->tr_proto = trunk_protos[i].ti_proto; 173 if ((error = trunk_protos[i].ti_attach(tr)) != 0) { 174 free(tr, M_DEVBUF); 175 return (error); 176 } 177 break; 178 } 179 } 180 SLIST_INIT(&tr->tr_ports); 181 182 /* Initialise pseudo media types */ 183 ifmedia_init(&tr->tr_media, 0, trunk_media_change, 184 trunk_media_status); 185 ifmedia_add(&tr->tr_media, IFM_ETHER | IFM_AUTO, 0, NULL); 186 ifmedia_set(&tr->tr_media, IFM_ETHER | IFM_AUTO); 187 188 ifp = &tr->tr_ac.ac_if; 189 ifp->if_softc = tr; 190 ifp->if_start = trunk_start; 191 ifp->if_watchdog = trunk_watchdog; 192 ifp->if_ioctl = trunk_ioctl; 193 ifp->if_flags = IFF_SIMPLEX | IFF_BROADCAST | IFF_MULTICAST; 194 ifp->if_capabilities = trunk_capabilities(tr); 195 196 IFQ_SET_MAXLEN(&ifp->if_snd, 1); 197 IFQ_SET_READY(&ifp->if_snd); 198 199 snprintf(ifp->if_xname, sizeof(ifp->if_xname), "%s%d", 200 ifc->ifc_name, unit); 201 202 /* 203 * Attach as an ordinary ethernet device, children will be attached 204 * as special device IFT_IEEE8023ADLAG. 205 */ 206 if_attach(ifp); 207 ether_ifattach(ifp); 208 209 /* Insert into the global list of trunks */ 210 SLIST_INSERT_HEAD(&trunk_list, tr, tr_entries); 211 212 return (0); 213 } 214 215 int 216 trunk_clone_destroy(struct ifnet *ifp) 217 { 218 struct trunk_softc *tr = (struct trunk_softc *)ifp->if_softc; 219 struct trunk_port *tp; 220 int error, s; 221 222 /* Remove any multicast groups that we may have joined. */ 223 trunk_ether_purgemulti(tr); 224 225 s = splnet(); 226 227 /* Shutdown and remove trunk ports, return on error */ 228 while ((tp = SLIST_FIRST(&tr->tr_ports)) != NULL) { 229 if ((error = trunk_port_destroy(tp)) != 0) { 230 splx(s); 231 return (error); 232 } 233 } 234 235 ifmedia_delete_instance(&tr->tr_media, IFM_INST_ANY); 236 ether_ifdetach(ifp); 237 if_detach(ifp); 238 239 SLIST_REMOVE(&trunk_list, tr, trunk_softc, tr_entries); 240 free(tr, M_DEVBUF); 241 242 splx(s); 243 244 return (0); 245 } 246 247 void 248 trunk_lladdr(struct arpcom *ac, u_int8_t *lladdr) 249 { 250 struct ifnet *ifp = &ac->ac_if; 251 struct ifaddr *ifa; 252 struct sockaddr_dl *sdl; 253 254 ifa = ifnet_addrs[ifp->if_index]; 255 sdl = (struct sockaddr_dl *)ifa->ifa_addr; 256 sdl->sdl_type = IFT_ETHER; 257 sdl->sdl_alen = ETHER_ADDR_LEN; 258 bcopy(lladdr, LLADDR(sdl), ETHER_ADDR_LEN); 259 bcopy(lladdr, ac->ac_enaddr, ETHER_ADDR_LEN); 260 } 261 262 int 263 trunk_capabilities(struct trunk_softc *tr) 264 { 265 struct trunk_port *tp; 266 int cap = ~0, priv; 267 268 /* Preserve private capabilities */ 269 priv = tr->tr_capabilities & IFCAP_TRUNK_MASK; 270 271 /* Get capabilities from the trunk ports */ 272 SLIST_FOREACH(tp, &tr->tr_ports, tp_entries) 273 cap &= tp->tp_capabilities; 274 275 if (tr->tr_ifflags & IFF_DEBUG) { 276 printf("%s: capabilities 0x%08x\n", 277 tr->tr_ifname, cap == ~0 ? priv : (cap | priv)); 278 } 279 280 return (cap == ~0 ? priv : (cap | priv)); 281 } 282 283 void 284 trunk_port_lladdr(struct trunk_port *tp, u_int8_t *lladdr) 285 { 286 struct ifnet *ifp = tp->tp_if; 287 288 /* Set the link layer address */ 289 trunk_lladdr((struct arpcom *)ifp, lladdr); 290 291 /* Reset the port to update the lladdr */ 292 ifnewlladdr(ifp); 293 } 294 295 int 296 trunk_port_create(struct trunk_softc *tr, struct ifnet *ifp) 297 { 298 struct trunk_softc *tr_ptr; 299 struct trunk_port *tp; 300 int error = 0; 301 302 /* Limit the maximal number of trunk ports */ 303 if (tr->tr_count >= TRUNK_MAX_PORTS) 304 return (ENOSPC); 305 306 /* New trunk port has to be in an idle state */ 307 if (ifp->if_flags & IFF_OACTIVE) 308 return (EBUSY); 309 310 /* Check if port has already been associated to a trunk */ 311 if (trunk_port_get(NULL, ifp) != NULL) 312 return (EBUSY); 313 314 /* XXX Disallow non-ethernet interfaces (this should be any of 802) */ 315 if (ifp->if_type != IFT_ETHER) 316 return (EPROTONOSUPPORT); 317 318 if ((error = ifpromisc(ifp, 1)) != 0) 319 return (error); 320 321 if ((tp = malloc(sizeof(struct trunk_port), 322 M_DEVBUF, M_NOWAIT|M_ZERO)) == NULL) 323 return (ENOMEM); 324 325 /* Check if port is a stacked trunk */ 326 SLIST_FOREACH(tr_ptr, &trunk_list, tr_entries) { 327 if (ifp == &tr_ptr->tr_ac.ac_if) { 328 tp->tp_flags |= TRUNK_PORT_STACK; 329 if (trunk_port_checkstacking(tr_ptr) >= 330 TRUNK_MAX_STACKING) { 331 free(tp, M_DEVBUF); 332 return (E2BIG); 333 } 334 } 335 } 336 337 /* Change the interface type */ 338 tp->tp_iftype = ifp->if_type; 339 ifp->if_type = IFT_IEEE8023ADLAG; 340 ifp->if_tp = (caddr_t)tp; 341 tp->tp_watchdog = ifp->if_watchdog; 342 ifp->if_watchdog = trunk_port_watchdog; 343 tp->tp_ioctl = ifp->if_ioctl; 344 ifp->if_ioctl = trunk_port_ioctl; 345 346 tp->tp_if = ifp; 347 tp->tp_trunk = tr; 348 349 /* Save port link layer address */ 350 bcopy(((struct arpcom *)ifp)->ac_enaddr, tp->tp_lladdr, ETHER_ADDR_LEN); 351 352 if (SLIST_EMPTY(&tr->tr_ports)) { 353 tr->tr_primary = tp; 354 tp->tp_flags |= TRUNK_PORT_MASTER; 355 trunk_lladdr(&tr->tr_ac, tp->tp_lladdr); 356 } 357 358 /* Update link layer address for this port */ 359 trunk_port_lladdr(tp, 360 ((struct arpcom *)(tr->tr_primary->tp_if))->ac_enaddr); 361 362 /* Insert into the list of ports */ 363 SLIST_INSERT_HEAD(&tr->tr_ports, tp, tp_entries); 364 tr->tr_count++; 365 366 /* Update trunk capabilities */ 367 tr->tr_capabilities = trunk_capabilities(tr); 368 369 /* Add multicast addresses to this port */ 370 trunk_ether_cmdmulti(tp, SIOCADDMULTI); 371 372 /* Register callback for physical link state changes */ 373 if (ifp->if_linkstatehooks != NULL) 374 tp->lh_cookie = hook_establish(ifp->if_linkstatehooks, 1, 375 trunk_port_state, tp); 376 377 if (tr->tr_port_create != NULL) 378 error = (*tr->tr_port_create)(tp); 379 380 return (error); 381 } 382 383 int 384 trunk_port_checkstacking(struct trunk_softc *tr) 385 { 386 struct trunk_softc *tr_ptr; 387 struct trunk_port *tp; 388 int m = 0; 389 390 SLIST_FOREACH(tp, &tr->tr_ports, tp_entries) { 391 if (tp->tp_flags & TRUNK_PORT_STACK) { 392 tr_ptr = (struct trunk_softc *)tp->tp_if->if_softc; 393 m = MAX(m, trunk_port_checkstacking(tr_ptr)); 394 } 395 } 396 397 return (m + 1); 398 } 399 400 int 401 trunk_port_destroy(struct trunk_port *tp) 402 { 403 struct trunk_softc *tr = (struct trunk_softc *)tp->tp_trunk; 404 struct trunk_port *tp_ptr; 405 struct ifnet *ifp = tp->tp_if; 406 407 if (tr->tr_port_destroy != NULL) 408 (*tr->tr_port_destroy)(tp); 409 410 /* Remove multicast addresses from this port */ 411 trunk_ether_cmdmulti(tp, SIOCDELMULTI); 412 413 /* Port has to be down */ 414 if (ifp->if_flags & IFF_UP) 415 if_down(ifp); 416 417 ifpromisc(ifp, 0); 418 419 /* Restore interface */ 420 ifp->if_type = tp->tp_iftype; 421 ifp->if_watchdog = tp->tp_watchdog; 422 ifp->if_ioctl = tp->tp_ioctl; 423 ifp->if_tp = NULL; 424 425 if (ifp->if_linkstatehooks != NULL) 426 hook_disestablish(ifp->if_linkstatehooks, tp->lh_cookie); 427 428 /* Finally, remove the port from the trunk */ 429 SLIST_REMOVE(&tr->tr_ports, tp, trunk_port, tp_entries); 430 tr->tr_count--; 431 432 /* Update the primary interface */ 433 if (tp == tr->tr_primary) { 434 u_int8_t lladdr[ETHER_ADDR_LEN]; 435 436 if ((tp_ptr = SLIST_FIRST(&tr->tr_ports)) == NULL) { 437 bzero(&lladdr, ETHER_ADDR_LEN); 438 } else { 439 bcopy(((struct arpcom *)tp_ptr->tp_if)->ac_enaddr, 440 lladdr, ETHER_ADDR_LEN); 441 tp_ptr->tp_flags = TRUNK_PORT_MASTER; 442 } 443 trunk_lladdr(&tr->tr_ac, lladdr); 444 tr->tr_primary = tp_ptr; 445 446 /* Update link layer address for each port */ 447 SLIST_FOREACH(tp_ptr, &tr->tr_ports, tp_entries) 448 trunk_port_lladdr(tp_ptr, lladdr); 449 } 450 451 /* Reset the port lladdr */ 452 trunk_port_lladdr(tp, tp->tp_lladdr); 453 454 free(tp, M_DEVBUF); 455 456 /* Update trunk capabilities */ 457 tr->tr_capabilities = trunk_capabilities(tr); 458 459 return (0); 460 } 461 462 void 463 trunk_port_watchdog(struct ifnet *ifp) 464 { 465 struct trunk_port *tp; 466 467 /* Should be checked by the caller */ 468 if (ifp->if_type != IFT_IEEE8023ADLAG) 469 return; 470 if ((tp = (struct trunk_port *)ifp->if_tp) == NULL || 471 tp->tp_trunk == NULL) 472 return; 473 474 if (tp->tp_watchdog != NULL) 475 (*tp->tp_watchdog)(ifp); 476 } 477 478 479 int 480 trunk_port_ioctl(struct ifnet *ifp, u_long cmd, caddr_t data) 481 { 482 struct trunk_reqport *rp = (struct trunk_reqport *)data; 483 struct trunk_softc *tr; 484 struct trunk_port *tp = NULL; 485 int s, error = 0; 486 487 s = splnet(); 488 489 /* Should be checked by the caller */ 490 if (ifp->if_type != IFT_IEEE8023ADLAG || 491 (tp = (struct trunk_port *)ifp->if_tp) == NULL || 492 (tr = (struct trunk_softc *)tp->tp_trunk) == NULL) { 493 error = EINVAL; 494 goto fallback; 495 } 496 497 switch (cmd) { 498 case SIOCGTRUNKPORT: 499 if (rp->rp_portname[0] == '\0' || 500 ifunit(rp->rp_portname) != ifp) { 501 error = EINVAL; 502 break; 503 } 504 505 /* Search in all trunks if the global flag is set */ 506 if ((tp = trunk_port_get(rp->rp_flags & TRUNK_PORT_GLOBAL ? 507 NULL : tr, ifp)) == NULL) { 508 error = ENOENT; 509 break; 510 } 511 512 trunk_port2req(tp, rp); 513 break; 514 default: 515 error = ENOTTY; 516 goto fallback; 517 } 518 519 splx(s); 520 return (error); 521 522 fallback: 523 splx(s); 524 525 if (tp != NULL) 526 error = (*tp->tp_ioctl)(ifp, cmd, data); 527 528 return (error); 529 } 530 531 void 532 trunk_port_ifdetach(struct ifnet *ifp) 533 { 534 struct trunk_port *tp; 535 536 if ((tp = (struct trunk_port *)ifp->if_tp) == NULL) 537 return; 538 539 trunk_port_destroy(tp); 540 } 541 542 struct trunk_port * 543 trunk_port_get(struct trunk_softc *tr, struct ifnet *ifp) 544 { 545 struct trunk_port *tp; 546 struct trunk_softc *tr_ptr; 547 548 if (tr != NULL) { 549 /* Search port in specified trunk */ 550 SLIST_FOREACH(tp, &tr->tr_ports, tp_entries) { 551 if (tp->tp_if == ifp) 552 return (tp); 553 } 554 } else { 555 /* Search all trunks for the selected port */ 556 SLIST_FOREACH(tr_ptr, &trunk_list, tr_entries) { 557 SLIST_FOREACH(tp, &tr_ptr->tr_ports, tp_entries) { 558 if (tp->tp_if == ifp) 559 return (tp); 560 } 561 } 562 } 563 564 return (NULL); 565 } 566 567 void 568 trunk_port2req(struct trunk_port *tp, struct trunk_reqport *rp) 569 { 570 struct trunk_softc *tr = (struct trunk_softc *)tp->tp_trunk; 571 572 strlcpy(rp->rp_ifname, tr->tr_ifname, sizeof(rp->rp_ifname)); 573 strlcpy(rp->rp_portname, tp->tp_if->if_xname, sizeof(rp->rp_portname)); 574 rp->rp_prio = tp->tp_prio; 575 if (tr->tr_portreq != NULL) 576 (*tr->tr_portreq)(tp, (caddr_t)&rp->rp_psc); 577 578 /* Add protocol specific flags */ 579 switch (tr->tr_proto) { 580 case TRUNK_PROTO_FAILOVER: 581 rp->rp_flags = tp->tp_flags; 582 if (tp == trunk_link_active(tr, tr->tr_primary)) 583 rp->rp_flags |= TRUNK_PORT_ACTIVE; 584 break; 585 586 case TRUNK_PROTO_ROUNDROBIN: 587 case TRUNK_PROTO_LOADBALANCE: 588 case TRUNK_PROTO_BROADCAST: 589 rp->rp_flags = tp->tp_flags; 590 if (TRUNK_PORTACTIVE(tp)) 591 rp->rp_flags |= TRUNK_PORT_ACTIVE; 592 break; 593 594 case TRUNK_PROTO_LACP: 595 /* LACP has a different definition of active */ 596 rp->rp_flags = lacp_port_status(tp); 597 break; 598 default: 599 break; 600 } 601 } 602 603 int 604 trunk_ioctl(struct ifnet *ifp, u_long cmd, caddr_t data) 605 { 606 struct trunk_softc *tr = (struct trunk_softc *)ifp->if_softc; 607 struct trunk_reqall *ra = (struct trunk_reqall *)data; 608 struct trunk_reqport *rp = (struct trunk_reqport *)data, rpbuf; 609 struct ifreq *ifr = (struct ifreq *)data; 610 struct ifaddr *ifa = (struct ifaddr *)data; 611 struct trunk_port *tp; 612 struct ifnet *tpif; 613 int s, i, error = 0; 614 615 s = splnet(); 616 617 bzero(&rpbuf, sizeof(rpbuf)); 618 619 switch (cmd) { 620 case SIOCGTRUNK: 621 ra->ra_proto = tr->tr_proto; 622 if (tr->tr_req != NULL) 623 (*tr->tr_req)(tr, (caddr_t)&ra->ra_psc); 624 ra->ra_ports = i = 0; 625 tp = SLIST_FIRST(&tr->tr_ports); 626 while (tp && ra->ra_size >= 627 i + sizeof(struct trunk_reqport)) { 628 trunk_port2req(tp, &rpbuf); 629 error = copyout(&rpbuf, (caddr_t)ra->ra_port + i, 630 sizeof(struct trunk_reqport)); 631 if (error) 632 break; 633 i += sizeof(struct trunk_reqport); 634 ra->ra_ports++; 635 tp = SLIST_NEXT(tp, tp_entries); 636 } 637 break; 638 case SIOCSTRUNK: 639 if ((error = suser(curproc, 0)) != 0) { 640 error = EPERM; 641 break; 642 } 643 if (ra->ra_proto >= TRUNK_PROTO_MAX) { 644 error = EPROTONOSUPPORT; 645 break; 646 } 647 if (tr->tr_proto != TRUNK_PROTO_NONE) 648 error = tr->tr_detach(tr); 649 if (error != 0) 650 break; 651 for (i = 0; i < (sizeof(trunk_protos) / 652 sizeof(trunk_protos[0])); i++) { 653 if (trunk_protos[i].ti_proto == ra->ra_proto) { 654 if (tr->tr_ifflags & IFF_DEBUG) 655 printf("%s: using proto %u\n", 656 tr->tr_ifname, 657 trunk_protos[i].ti_proto); 658 tr->tr_proto = trunk_protos[i].ti_proto; 659 if (tr->tr_proto != TRUNK_PROTO_NONE) 660 error = trunk_protos[i].ti_attach(tr); 661 goto out; 662 } 663 } 664 error = EPROTONOSUPPORT; 665 break; 666 case SIOCGTRUNKPORT: 667 if (rp->rp_portname[0] == '\0' || 668 (tpif = ifunit(rp->rp_portname)) == NULL) { 669 error = EINVAL; 670 break; 671 } 672 673 /* Search in all trunks if the global flag is set */ 674 if ((tp = trunk_port_get(rp->rp_flags & TRUNK_PORT_GLOBAL ? 675 NULL : tr, tpif)) == NULL) { 676 error = ENOENT; 677 break; 678 } 679 680 trunk_port2req(tp, rp); 681 break; 682 case SIOCSTRUNKPORT: 683 if ((error = suser(curproc, 0)) != 0) { 684 error = EPERM; 685 break; 686 } 687 if (rp->rp_portname[0] == '\0' || 688 (tpif = ifunit(rp->rp_portname)) == NULL) { 689 error = EINVAL; 690 break; 691 } 692 error = trunk_port_create(tr, tpif); 693 break; 694 case SIOCSTRUNKDELPORT: 695 if ((error = suser(curproc, 0)) != 0) { 696 error = EPERM; 697 break; 698 } 699 if (rp->rp_portname[0] == '\0' || 700 (tpif = ifunit(rp->rp_portname)) == NULL) { 701 error = EINVAL; 702 break; 703 } 704 705 /* Search in all trunks if the global flag is set */ 706 if ((tp = trunk_port_get(rp->rp_flags & TRUNK_PORT_GLOBAL ? 707 NULL : tr, tpif)) == NULL) { 708 error = ENOENT; 709 break; 710 } 711 712 error = trunk_port_destroy(tp); 713 break; 714 case SIOCSIFADDR: 715 ifp->if_flags |= IFF_UP; 716 #ifdef INET 717 if (ifa->ifa_addr->sa_family == AF_INET) 718 arp_ifinit(&tr->tr_ac, ifa); 719 #endif /* INET */ 720 error = ENETRESET; 721 break; 722 case SIOCSIFFLAGS: 723 error = ENETRESET; 724 break; 725 case SIOCADDMULTI: 726 error = trunk_ether_addmulti(tr, ifr); 727 break; 728 case SIOCDELMULTI: 729 error = trunk_ether_delmulti(tr, ifr); 730 break; 731 case SIOCSIFMEDIA: 732 case SIOCGIFMEDIA: 733 error = ifmedia_ioctl(ifp, ifr, &tr->tr_media, cmd); 734 break; 735 case SIOCSIFLLADDR: 736 /* Update the port lladdrs as well */ 737 SLIST_FOREACH(tp, &tr->tr_ports, tp_entries) 738 trunk_port_lladdr(tp, ifr->ifr_addr.sa_data); 739 error = ENETRESET; 740 break; 741 default: 742 error = ether_ioctl(ifp, &tr->tr_ac, cmd, data); 743 } 744 745 if (error == ENETRESET) { 746 if (ifp->if_flags & IFF_UP) { 747 if ((ifp->if_flags & IFF_RUNNING) == 0) 748 trunk_init(ifp); 749 } else { 750 if (ifp->if_flags & IFF_RUNNING) 751 trunk_stop(ifp); 752 } 753 error = 0; 754 } 755 756 out: 757 splx(s); 758 return (error); 759 } 760 761 int 762 trunk_ether_addmulti(struct trunk_softc *tr, struct ifreq *ifr) 763 { 764 struct trunk_mc *mc; 765 u_int8_t addrlo[ETHER_ADDR_LEN], addrhi[ETHER_ADDR_LEN]; 766 int error; 767 768 /* Ignore ENETRESET error code */ 769 if ((error = ether_addmulti(ifr, &tr->tr_ac)) != ENETRESET) 770 return (error); 771 772 if ((mc = malloc(sizeof(*mc), M_DEVBUF, M_NOWAIT)) == NULL) { 773 error = ENOMEM; 774 goto failed; 775 } 776 777 ether_multiaddr(&ifr->ifr_addr, addrlo, addrhi); 778 ETHER_LOOKUP_MULTI(addrlo, addrhi, &tr->tr_ac, mc->mc_enm); 779 bcopy(&ifr->ifr_addr, &mc->mc_addr, ifr->ifr_addr.sa_len); 780 SLIST_INSERT_HEAD(&tr->tr_mc_head, mc, mc_entries); 781 782 if ((error = trunk_ioctl_allports(tr, SIOCADDMULTI, 783 (caddr_t)ifr)) != 0) { 784 trunk_ether_delmulti(tr, ifr); 785 return (error); 786 } 787 788 return (error); 789 790 failed: 791 ether_delmulti(ifr, &tr->tr_ac); 792 793 return (error); 794 } 795 796 int 797 trunk_ether_delmulti(struct trunk_softc *tr, struct ifreq *ifr) 798 { 799 struct ether_multi *enm; 800 struct trunk_mc *mc; 801 u_int8_t addrlo[ETHER_ADDR_LEN], addrhi[ETHER_ADDR_LEN]; 802 int error; 803 804 if ((error = ether_multiaddr(&ifr->ifr_addr, addrlo, addrhi)) != 0) 805 return (error); 806 ETHER_LOOKUP_MULTI(addrlo, addrhi, &tr->tr_ac, enm); 807 if (enm == NULL) 808 return (EINVAL); 809 810 SLIST_FOREACH(mc, &tr->tr_mc_head, mc_entries) 811 if (mc->mc_enm == enm) 812 break; 813 814 /* We won't delete entries we didn't add */ 815 if (mc == NULL) 816 return (EINVAL); 817 818 if ((error = ether_delmulti(ifr, &tr->tr_ac)) != ENETRESET) 819 return (error); 820 821 if ((error = trunk_ioctl_allports(tr, SIOCDELMULTI, 822 (caddr_t)ifr)) != 0) { 823 /* XXX At least one port failed to remove the address */ 824 if (tr->tr_ifflags & IFF_DEBUG) { 825 printf("%s: failed to remove multicast address " 826 "on all ports\n", tr->tr_ifname); 827 } 828 } 829 830 SLIST_REMOVE(&tr->tr_mc_head, mc, trunk_mc, mc_entries); 831 free(mc, M_DEVBUF); 832 833 return (0); 834 } 835 836 void 837 trunk_ether_purgemulti(struct trunk_softc *tr) 838 { 839 struct trunk_mc *mc; 840 struct trunk_ifreq ifs; 841 struct ifreq *ifr = &ifs.ifreq.ifreq; 842 843 while ((mc = SLIST_FIRST(&tr->tr_mc_head)) != NULL) { 844 bcopy(&mc->mc_addr, &ifr->ifr_addr, mc->mc_addr.ss_len); 845 846 /* Try to remove multicast address on all ports */ 847 trunk_ioctl_allports(tr, SIOCDELMULTI, (caddr_t)ifr); 848 849 SLIST_REMOVE(&tr->tr_mc_head, mc, trunk_mc, mc_entries); 850 free(mc, M_DEVBUF); 851 } 852 } 853 854 int 855 trunk_ether_cmdmulti(struct trunk_port *tp, u_long cmd) 856 { 857 struct trunk_softc *tr = (struct trunk_softc *)tp->tp_trunk; 858 struct trunk_mc *mc; 859 struct trunk_ifreq ifs; 860 struct ifreq *ifr = &ifs.ifreq.ifreq; 861 int ret, error = 0; 862 863 bcopy(tp->tp_ifname, ifr->ifr_name, IFNAMSIZ); 864 SLIST_FOREACH(mc, &tr->tr_mc_head, mc_entries) { 865 bcopy(&mc->mc_addr, &ifr->ifr_addr, mc->mc_addr.ss_len); 866 867 if ((ret = tp->tp_ioctl(tp->tp_if, cmd, (caddr_t)ifr)) != 0) { 868 if (tr->tr_ifflags & IFF_DEBUG) { 869 printf("%s: ioctl %lu failed on %s: %d\n", 870 tr->tr_ifname, cmd, tp->tp_ifname, ret); 871 } 872 /* Store last known error and continue */ 873 error = ret; 874 } 875 } 876 877 return (error); 878 } 879 880 int 881 trunk_ioctl_allports(struct trunk_softc *tr, u_long cmd, caddr_t data) 882 { 883 struct ifreq *ifr = (struct ifreq *)data; 884 struct trunk_port *tp; 885 int ret, error = 0; 886 887 SLIST_FOREACH(tp, &tr->tr_ports, tp_entries) { 888 bcopy(tp->tp_ifname, ifr->ifr_name, IFNAMSIZ); 889 if ((ret = tp->tp_ioctl(tp->tp_if, cmd, data)) != 0) { 890 if (tr->tr_ifflags & IFF_DEBUG) { 891 printf("%s: ioctl %lu failed on %s: %d\n", 892 tr->tr_ifname, cmd, tp->tp_ifname, ret); 893 } 894 /* Store last known error and continue */ 895 error = ret; 896 } 897 } 898 899 return (error); 900 } 901 902 void 903 trunk_start(struct ifnet *ifp) 904 { 905 struct trunk_softc *tr = (struct trunk_softc *)ifp->if_softc; 906 struct mbuf *m; 907 int error; 908 909 for (;;) { 910 IFQ_DEQUEUE(&ifp->if_snd, m); 911 if (m == NULL) 912 break; 913 914 #if NBPFILTER > 0 915 if (ifp->if_bpf) 916 bpf_mtap_ether(ifp->if_bpf, m, BPF_DIRECTION_OUT); 917 #endif 918 919 if (tr->tr_proto != TRUNK_PROTO_NONE && tr->tr_count) { 920 error = (*tr->tr_start)(tr, m); 921 if (error == 0) 922 ifp->if_opackets++; 923 else 924 ifp->if_oerrors++; 925 } else { 926 m_freem(m); 927 if (tr->tr_proto != TRUNK_PROTO_NONE) 928 ifp->if_oerrors++; 929 } 930 } 931 } 932 933 int 934 trunk_enqueue(struct ifnet *ifp, struct mbuf *m) 935 { 936 int len, error = 0; 937 u_short mflags; 938 939 splassert(IPL_NET); 940 941 /* Send mbuf */ 942 mflags = m->m_flags; 943 len = m->m_pkthdr.len; 944 IFQ_ENQUEUE(&ifp->if_snd, m, NULL, error); 945 if (error) 946 return (error); 947 if_start(ifp); 948 949 ifp->if_obytes += len; 950 if (mflags & M_MCAST) 951 ifp->if_omcasts++; 952 953 return (error); 954 } 955 956 u_int32_t 957 trunk_hashmbuf(struct mbuf *m, u_int32_t key) 958 { 959 u_int16_t etype, ether_vtag; 960 u_int32_t p = 0; 961 u_int16_t *vlan, vlanbuf[2]; 962 int off; 963 struct ether_header *eh; 964 #ifdef INET 965 struct ip *ip, ipbuf; 966 #endif 967 #ifdef INET6 968 u_int32_t flow; 969 struct ip6_hdr *ip6, ip6buf; 970 #endif 971 972 off = sizeof(*eh); 973 if (m->m_len < off) 974 return (p); 975 eh = mtod(m, struct ether_header *); 976 etype = ntohs(eh->ether_type); 977 p = hash32_buf(&eh->ether_shost, ETHER_ADDR_LEN, key); 978 p = hash32_buf(&eh->ether_dhost, ETHER_ADDR_LEN, p); 979 980 /* Special handling for encapsulating VLAN frames */ 981 if (m->m_flags & M_VLANTAG) { 982 ether_vtag = EVL_VLANOFTAG(m->m_pkthdr.ether_vtag); 983 p = hash32_buf(ðer_vtag, sizeof(ether_vtag), p); 984 } else if (etype == ETHERTYPE_VLAN) { 985 if ((vlan = (u_int16_t *) 986 trunk_gethdr(m, off, EVL_ENCAPLEN, &vlanbuf)) == NULL) 987 return (p); 988 ether_vtag = EVL_VLANOFTAG(*vlan); 989 p = hash32_buf(ðer_vtag, sizeof(ether_vtag), p); 990 etype = ntohs(vlan[1]); 991 off += EVL_ENCAPLEN; 992 } 993 994 switch (etype) { 995 #ifdef INET 996 case ETHERTYPE_IP: 997 if ((ip = (struct ip *) 998 trunk_gethdr(m, off, sizeof(*ip), &ipbuf)) == NULL) 999 return (p); 1000 p = hash32_buf(&ip->ip_src, sizeof(struct in_addr), p); 1001 p = hash32_buf(&ip->ip_dst, sizeof(struct in_addr), p); 1002 break; 1003 #endif 1004 #ifdef INET6 1005 case ETHERTYPE_IPV6: 1006 if ((ip6 = (struct ip6_hdr *) 1007 trunk_gethdr(m, off, sizeof(*ip6), &ip6buf)) == NULL) 1008 return (p); 1009 p = hash32_buf(&ip6->ip6_src, sizeof(struct in6_addr), p); 1010 p = hash32_buf(&ip6->ip6_dst, sizeof(struct in6_addr), p); 1011 flow = ip6->ip6_flow & IPV6_FLOWLABEL_MASK; 1012 p = hash32_buf(&flow, sizeof(flow), p); /* IPv6 flow label */ 1013 break; 1014 #endif 1015 } 1016 1017 return (p); 1018 } 1019 1020 void 1021 trunk_init(struct ifnet *ifp) 1022 { 1023 struct trunk_softc *tr = (struct trunk_softc *)ifp->if_softc; 1024 int s; 1025 1026 s = splnet(); 1027 1028 ifp->if_flags |= IFF_RUNNING; 1029 ifp->if_flags &= ~IFF_OACTIVE; 1030 1031 if (tr->tr_init != NULL) 1032 (*tr->tr_init)(tr); 1033 1034 splx(s); 1035 } 1036 1037 void 1038 trunk_stop(struct ifnet *ifp) 1039 { 1040 struct trunk_softc *tr = (struct trunk_softc *)ifp->if_softc; 1041 int s; 1042 1043 s = splnet(); 1044 1045 ifp->if_flags &= ~(IFF_RUNNING | IFF_OACTIVE); 1046 1047 if (tr->tr_stop != NULL) 1048 (*tr->tr_stop)(tr); 1049 1050 splx(s); 1051 } 1052 1053 void 1054 trunk_watchdog(struct ifnet *ifp) 1055 { 1056 struct trunk_softc *tr = (struct trunk_softc *)ifp->if_softc; 1057 1058 if (tr->tr_proto != TRUNK_PROTO_NONE && 1059 (*tr->tr_watchdog)(tr) != 0) { 1060 ifp->if_oerrors++; 1061 } 1062 1063 } 1064 1065 int 1066 trunk_input(struct ifnet *ifp, struct ether_header *eh, struct mbuf *m) 1067 { 1068 struct trunk_softc *tr; 1069 struct trunk_port *tp; 1070 struct ifnet *trifp = NULL; 1071 int error = 0; 1072 1073 /* Should be checked by the caller */ 1074 if (ifp->if_type != IFT_IEEE8023ADLAG) { 1075 error = EPROTONOSUPPORT; 1076 goto bad; 1077 } 1078 if ((tp = (struct trunk_port *)ifp->if_tp) == NULL || 1079 (tr = (struct trunk_softc *)tp->tp_trunk) == NULL) { 1080 error = ENOENT; 1081 goto bad; 1082 } 1083 trifp = &tr->tr_ac.ac_if; 1084 if (tr->tr_proto == TRUNK_PROTO_NONE) { 1085 error = ENOENT; 1086 goto bad; 1087 } 1088 1089 #if NBPFILTER > 0 1090 if (trifp->if_bpf && tr->tr_proto != TRUNK_PROTO_FAILOVER) 1091 bpf_mtap_hdr(trifp->if_bpf, (char *)eh, ETHER_HDR_LEN, m, 1092 BPF_DIRECTION_IN); 1093 #endif 1094 1095 error = (*tr->tr_input)(tr, tp, eh, m); 1096 if (error != 0) 1097 return (error); 1098 1099 trifp->if_ipackets++; 1100 return (0); 1101 1102 bad: 1103 if (error > 0 && trifp != NULL) 1104 trifp->if_ierrors++; 1105 m_freem(m); 1106 return (error); 1107 } 1108 1109 int 1110 trunk_media_change(struct ifnet *ifp) 1111 { 1112 struct trunk_softc *tr = (struct trunk_softc *)ifp->if_softc; 1113 1114 if (tr->tr_ifflags & IFF_DEBUG) 1115 printf("%s\n", __func__); 1116 1117 /* Ignore */ 1118 return (0); 1119 } 1120 1121 void 1122 trunk_media_status(struct ifnet *ifp, struct ifmediareq *imr) 1123 { 1124 struct trunk_softc *tr = (struct trunk_softc *)ifp->if_softc; 1125 struct trunk_port *tp; 1126 1127 imr->ifm_status = IFM_AVALID; 1128 imr->ifm_active = IFM_ETHER | IFM_AUTO; 1129 1130 SLIST_FOREACH(tp, &tr->tr_ports, tp_entries) { 1131 if (TRUNK_PORTACTIVE(tp)) 1132 imr->ifm_status |= IFM_ACTIVE; 1133 } 1134 } 1135 1136 void 1137 trunk_port_state(void *arg) 1138 { 1139 struct trunk_port *tp = (struct trunk_port *)arg; 1140 struct trunk_softc *tr = NULL; 1141 1142 if (tp != NULL) 1143 tr = (struct trunk_softc *)tp->tp_trunk; 1144 if (tr == NULL) 1145 return; 1146 if (tr->tr_linkstate != NULL) 1147 (*tr->tr_linkstate)(tp); 1148 trunk_link_active(tr, tp); 1149 } 1150 1151 struct trunk_port * 1152 trunk_link_active(struct trunk_softc *tr, struct trunk_port *tp) 1153 { 1154 struct trunk_port *tp_next, *rval = NULL; 1155 int new_link = LINK_STATE_DOWN; 1156 1157 /* 1158 * Search a port which reports an active link state. 1159 */ 1160 1161 if (tp == NULL) 1162 goto search; 1163 if (TRUNK_PORTACTIVE(tp)) { 1164 rval = tp; 1165 goto found; 1166 } 1167 if ((tp_next = SLIST_NEXT(tp, tp_entries)) != NULL && 1168 TRUNK_PORTACTIVE(tp_next)) { 1169 rval = tp_next; 1170 goto found; 1171 } 1172 1173 search: 1174 SLIST_FOREACH(tp_next, &tr->tr_ports, tp_entries) { 1175 if (TRUNK_PORTACTIVE(tp_next)) { 1176 rval = tp_next; 1177 goto found; 1178 } 1179 } 1180 1181 found: 1182 if (rval != NULL) { 1183 /* 1184 * The IEEE 802.1D standard assumes that a trunk with 1185 * multiple ports is always full duplex. This is valid 1186 * for load sharing trunks and if at least two links 1187 * are active. Unfortunately, checking the latter would 1188 * be too expensive at this point. 1189 */ 1190 if ((tr->tr_capabilities & IFCAP_TRUNK_FULLDUPLEX) && 1191 (tr->tr_count > 1)) 1192 new_link = LINK_STATE_FULL_DUPLEX; 1193 else 1194 new_link = rval->tp_link_state; 1195 } 1196 1197 if (tr->tr_ac.ac_if.if_link_state != new_link) { 1198 tr->tr_ac.ac_if.if_link_state = new_link; 1199 if_link_state_change(&tr->tr_ac.ac_if); 1200 } 1201 1202 return (rval); 1203 } 1204 1205 const void * 1206 trunk_gethdr(struct mbuf *m, u_int off, u_int len, void *buf) 1207 { 1208 if (m->m_pkthdr.len < (off + len)) 1209 return (NULL); 1210 else if (m->m_len < (off + len)) { 1211 m_copydata(m, off, len, buf); 1212 return (buf); 1213 } 1214 return (mtod(m, caddr_t) + off); 1215 } 1216 1217 /* 1218 * Simple round robin trunking 1219 */ 1220 1221 int 1222 trunk_rr_attach(struct trunk_softc *tr) 1223 { 1224 struct trunk_port *tp; 1225 1226 tr->tr_detach = trunk_rr_detach; 1227 tr->tr_start = trunk_rr_start; 1228 tr->tr_input = trunk_rr_input; 1229 tr->tr_init = NULL; 1230 tr->tr_stop = NULL; 1231 tr->tr_port_create = NULL; 1232 tr->tr_port_destroy = trunk_rr_port_destroy; 1233 tr->tr_capabilities = IFCAP_TRUNK_FULLDUPLEX; 1234 tr->tr_req = NULL; 1235 tr->tr_portreq = NULL; 1236 1237 tp = SLIST_FIRST(&tr->tr_ports); 1238 tr->tr_psc = (caddr_t)tp; 1239 1240 return (0); 1241 } 1242 1243 int 1244 trunk_rr_detach(struct trunk_softc *tr) 1245 { 1246 tr->tr_psc = NULL; 1247 return (0); 1248 } 1249 1250 void 1251 trunk_rr_port_destroy(struct trunk_port *tp) 1252 { 1253 struct trunk_softc *tr = (struct trunk_softc *)tp->tp_trunk; 1254 1255 if (tp == (struct trunk_port *)tr->tr_psc) 1256 tr->tr_psc = NULL; 1257 } 1258 1259 int 1260 trunk_rr_start(struct trunk_softc *tr, struct mbuf *m) 1261 { 1262 struct trunk_port *tp = (struct trunk_port *)tr->tr_psc, *tp_next; 1263 int error = 0; 1264 1265 if (tp == NULL && (tp = trunk_link_active(tr, NULL)) == NULL) { 1266 m_freem(m); 1267 return (ENOENT); 1268 } 1269 1270 /* Send mbuf */ 1271 if ((error = trunk_enqueue(tp->tp_if, m)) != 0) 1272 return (error); 1273 1274 /* Get next active port */ 1275 tp_next = trunk_link_active(tr, SLIST_NEXT(tp, tp_entries)); 1276 tr->tr_psc = (caddr_t)tp_next; 1277 1278 return (0); 1279 } 1280 1281 int 1282 trunk_rr_input(struct trunk_softc *tr, struct trunk_port *tp, 1283 struct ether_header *eh, struct mbuf *m) 1284 { 1285 struct ifnet *ifp = &tr->tr_ac.ac_if; 1286 1287 /* Just pass in the packet to our trunk device */ 1288 m->m_pkthdr.rcvif = ifp; 1289 1290 return (0); 1291 } 1292 1293 /* 1294 * Active failover 1295 */ 1296 1297 int 1298 trunk_fail_attach(struct trunk_softc *tr) 1299 { 1300 tr->tr_detach = trunk_fail_detach; 1301 tr->tr_start = trunk_fail_start; 1302 tr->tr_input = trunk_fail_input; 1303 tr->tr_init = NULL; 1304 tr->tr_stop = NULL; 1305 tr->tr_port_create = NULL; 1306 tr->tr_port_destroy = NULL; 1307 tr->tr_linkstate = NULL; 1308 tr->tr_req = NULL; 1309 tr->tr_portreq = NULL; 1310 1311 return (0); 1312 } 1313 1314 int 1315 trunk_fail_detach(struct trunk_softc *tr) 1316 { 1317 return (0); 1318 } 1319 1320 int 1321 trunk_fail_start(struct trunk_softc *tr, struct mbuf *m) 1322 { 1323 struct trunk_port *tp; 1324 1325 /* Use the master port if active or the next available port */ 1326 if ((tp = trunk_link_active(tr, tr->tr_primary)) == NULL) { 1327 m_freem(m); 1328 return (ENOENT); 1329 } 1330 1331 /* Send mbuf */ 1332 return (trunk_enqueue(tp->tp_if, m)); 1333 } 1334 1335 int 1336 trunk_fail_input(struct trunk_softc *tr, struct trunk_port *tp, 1337 struct ether_header *eh, struct mbuf *m) 1338 { 1339 struct ifnet *ifp = &tr->tr_ac.ac_if; 1340 struct trunk_port *tmp_tp; 1341 int accept = 0; 1342 1343 if (tp == tr->tr_primary) { 1344 accept = 1; 1345 } else if (tr->tr_primary->tp_link_state == LINK_STATE_DOWN) { 1346 tmp_tp = trunk_link_active(tr, NULL); 1347 /* 1348 * If tmp_tp is null, we've received a packet when all 1349 * our links are down. Weird, but process it anyways. 1350 */ 1351 if ((tmp_tp == NULL || tmp_tp == tp)) 1352 accept = 1; 1353 } 1354 if (!accept) { 1355 m_freem(m); 1356 return (-1); 1357 } 1358 #if NBPFILTER > 0 1359 if (ifp->if_bpf) 1360 bpf_mtap_hdr(ifp->if_bpf, (char *)eh, ETHER_HDR_LEN, m, 1361 BPF_DIRECTION_IN); 1362 #endif 1363 1364 m->m_pkthdr.rcvif = ifp; 1365 return (0); 1366 } 1367 1368 /* 1369 * Loadbalancing 1370 */ 1371 1372 int 1373 trunk_lb_attach(struct trunk_softc *tr) 1374 { 1375 struct trunk_lb *lb; 1376 1377 if ((lb = malloc(sizeof(*lb), M_DEVBUF, M_NOWAIT|M_ZERO)) == NULL) 1378 return (ENOMEM); 1379 1380 tr->tr_detach = trunk_lb_detach; 1381 tr->tr_start = trunk_lb_start; 1382 tr->tr_input = trunk_lb_input; 1383 tr->tr_port_create = trunk_lb_port_create; 1384 tr->tr_port_destroy = trunk_lb_port_destroy; 1385 tr->tr_linkstate = NULL; 1386 tr->tr_capabilities = IFCAP_TRUNK_FULLDUPLEX; 1387 tr->tr_req = NULL; 1388 tr->tr_portreq = NULL; 1389 1390 lb->lb_key = arc4random(); 1391 tr->tr_psc = (caddr_t)lb; 1392 1393 return (0); 1394 } 1395 1396 int 1397 trunk_lb_detach(struct trunk_softc *tr) 1398 { 1399 struct trunk_lb *lb = (struct trunk_lb *)tr->tr_psc; 1400 if (lb != NULL) 1401 free(lb, M_DEVBUF); 1402 return (0); 1403 } 1404 1405 int 1406 trunk_lb_porttable(struct trunk_softc *tr, struct trunk_port *tp) 1407 { 1408 struct trunk_lb *lb = (struct trunk_lb *)tr->tr_psc; 1409 struct trunk_port *tp_next; 1410 int i = 0; 1411 1412 bzero(&lb->lb_ports, sizeof(lb->lb_ports)); 1413 SLIST_FOREACH(tp_next, &tr->tr_ports, tp_entries) { 1414 if (tp_next == tp) 1415 continue; 1416 if (i >= TRUNK_MAX_PORTS) 1417 return (EINVAL); 1418 if (tr->tr_ifflags & IFF_DEBUG) 1419 printf("%s: port %s at index %d\n", 1420 tr->tr_ifname, tp_next->tp_ifname, i); 1421 lb->lb_ports[i++] = tp_next; 1422 } 1423 1424 return (0); 1425 } 1426 1427 int 1428 trunk_lb_port_create(struct trunk_port *tp) 1429 { 1430 struct trunk_softc *tr = (struct trunk_softc *)tp->tp_trunk; 1431 return (trunk_lb_porttable(tr, NULL)); 1432 } 1433 1434 void 1435 trunk_lb_port_destroy(struct trunk_port *tp) 1436 { 1437 struct trunk_softc *tr = (struct trunk_softc *)tp->tp_trunk; 1438 trunk_lb_porttable(tr, tp); 1439 } 1440 1441 int 1442 trunk_lb_start(struct trunk_softc *tr, struct mbuf *m) 1443 { 1444 struct trunk_lb *lb = (struct trunk_lb *)tr->tr_psc; 1445 struct trunk_port *tp = NULL; 1446 u_int32_t p = 0; 1447 1448 p = trunk_hashmbuf(m, lb->lb_key); 1449 p %= tr->tr_count; 1450 tp = lb->lb_ports[p]; 1451 1452 /* 1453 * Check the port's link state. This will return the next active 1454 * port if the link is down or the port is NULL. 1455 */ 1456 if ((tp = trunk_link_active(tr, tp)) == NULL) { 1457 m_freem(m); 1458 return (ENOENT); 1459 } 1460 1461 /* Send mbuf */ 1462 return (trunk_enqueue(tp->tp_if, m)); 1463 } 1464 1465 int 1466 trunk_lb_input(struct trunk_softc *tr, struct trunk_port *tp, 1467 struct ether_header *eh, struct mbuf *m) 1468 { 1469 struct ifnet *ifp = &tr->tr_ac.ac_if; 1470 1471 /* Just pass in the packet to our trunk device */ 1472 m->m_pkthdr.rcvif = ifp; 1473 1474 return (0); 1475 } 1476 1477 /* 1478 * Broadcast mode 1479 */ 1480 1481 int 1482 trunk_bcast_attach(struct trunk_softc *tr) 1483 { 1484 tr->tr_detach = trunk_bcast_detach; 1485 tr->tr_start = trunk_bcast_start; 1486 tr->tr_input = trunk_bcast_input; 1487 tr->tr_init = NULL; 1488 tr->tr_stop = NULL; 1489 tr->tr_port_create = NULL; 1490 tr->tr_port_destroy = NULL; 1491 tr->tr_linkstate = NULL; 1492 tr->tr_req = NULL; 1493 tr->tr_portreq = NULL; 1494 1495 return (0); 1496 } 1497 1498 int 1499 trunk_bcast_detach(struct trunk_softc *tr) 1500 { 1501 return (0); 1502 } 1503 1504 int 1505 trunk_bcast_start(struct trunk_softc *tr, struct mbuf *m) 1506 { 1507 int active_ports = 0; 1508 int errors = 0; 1509 int ret; 1510 struct trunk_port *tp; 1511 struct mbuf *n; 1512 1513 SLIST_FOREACH(tp, &tr->tr_ports, tp_entries) { 1514 if (TRUNK_PORTACTIVE(tp)) { 1515 if (active_ports) { 1516 n = m_copym(m, 0, M_COPYALL, M_DONTWAIT); 1517 if (n == NULL) { 1518 m_freem(m); 1519 return (ENOBUFS); 1520 } 1521 } else 1522 n = m; 1523 active_ports++; 1524 if ((ret = trunk_enqueue(tp->tp_if, n))) 1525 errors++; 1526 } 1527 } 1528 if (active_ports == 0) { 1529 m_freem(m); 1530 return (ENOENT); 1531 } 1532 if (errors == active_ports) 1533 return (ret); 1534 return (0); 1535 } 1536 1537 int 1538 trunk_bcast_input(struct trunk_softc *tr, struct trunk_port *tp, 1539 struct ether_header *eh, struct mbuf *m) 1540 { 1541 struct ifnet *ifp = &tr->tr_ac.ac_if; 1542 1543 m->m_pkthdr.rcvif = ifp; 1544 return (0); 1545 } 1546 1547 /* 1548 * 802.3ad LACP 1549 */ 1550 1551 int 1552 trunk_lacp_attach(struct trunk_softc *tr) 1553 { 1554 struct trunk_port *tp; 1555 int error; 1556 1557 tr->tr_detach = trunk_lacp_detach; 1558 tr->tr_port_create = lacp_port_create; 1559 tr->tr_port_destroy = lacp_port_destroy; 1560 tr->tr_linkstate = lacp_linkstate; 1561 tr->tr_start = trunk_lacp_start; 1562 tr->tr_input = trunk_lacp_input; 1563 tr->tr_init = lacp_init; 1564 tr->tr_stop = lacp_stop; 1565 tr->tr_req = lacp_req; 1566 tr->tr_portreq = lacp_portreq; 1567 1568 error = lacp_attach(tr); 1569 if (error) 1570 return (error); 1571 1572 SLIST_FOREACH(tp, &tr->tr_ports, tp_entries) 1573 lacp_port_create(tp); 1574 1575 return (error); 1576 } 1577 1578 int 1579 trunk_lacp_detach(struct trunk_softc *tr) 1580 { 1581 struct trunk_port *tp; 1582 int error; 1583 1584 SLIST_FOREACH(tp, &tr->tr_ports, tp_entries) 1585 lacp_port_destroy(tp); 1586 1587 /* unlocking is safe here */ 1588 error = lacp_detach(tr); 1589 1590 return (error); 1591 } 1592 1593 int 1594 trunk_lacp_start(struct trunk_softc *tr, struct mbuf *m) 1595 { 1596 struct trunk_port *tp; 1597 1598 tp = lacp_select_tx_port(tr, m); 1599 if (tp == NULL) { 1600 m_freem(m); 1601 return (EBUSY); 1602 } 1603 1604 /* Send mbuf */ 1605 return (trunk_enqueue(tp->tp_if, m)); 1606 } 1607 1608 int 1609 trunk_lacp_input(struct trunk_softc *tr, struct trunk_port *tp, 1610 struct ether_header *eh, struct mbuf *m) 1611 { 1612 struct ifnet *ifp = &tr->tr_ac.ac_if; 1613 1614 m = lacp_input(tp, eh, m); 1615 if (m == NULL) 1616 return (-1); 1617 1618 m->m_pkthdr.rcvif = ifp; 1619 return (0); 1620 } 1621