1 /* $OpenBSD: if_trunk.c,v 1.65 2009/01/27 16:40:54 naddy Exp $ */ 2 3 /* 4 * Copyright (c) 2005, 2006, 2007 Reyk Floeter <reyk@openbsd.org> 5 * 6 * Permission to use, copy, modify, and distribute this software for any 7 * purpose with or without fee is hereby granted, provided that the above 8 * copyright notice and this permission notice appear in all copies. 9 * 10 * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES 11 * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF 12 * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR 13 * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES 14 * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN 15 * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF 16 * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE. 17 */ 18 19 #include "bpfilter.h" 20 #include "trunk.h" 21 22 #include <sys/param.h> 23 #include <sys/kernel.h> 24 #include <sys/malloc.h> 25 #include <sys/mbuf.h> 26 #include <sys/queue.h> 27 #include <sys/socket.h> 28 #include <sys/sockio.h> 29 #include <sys/sysctl.h> 30 #include <sys/systm.h> 31 #include <sys/proc.h> 32 #include <sys/hash.h> 33 34 #include <dev/rndvar.h> 35 36 #include <net/if.h> 37 #include <net/if_arp.h> 38 #include <net/if_dl.h> 39 #include <net/if_llc.h> 40 #include <net/if_media.h> 41 #include <net/if_types.h> 42 #if NBPFILTER > 0 43 #include <net/bpf.h> 44 #endif 45 46 #ifdef INET 47 #include <netinet/in.h> 48 #include <netinet/in_systm.h> 49 #include <netinet/if_ether.h> 50 #include <netinet/ip.h> 51 #endif 52 53 #ifdef INET6 54 #include <netinet/ip6.h> 55 #endif 56 57 #include <net/if_vlan_var.h> 58 #include <net/if_trunk.h> 59 #include <net/trunklacp.h> 60 61 62 SLIST_HEAD(__trhead, trunk_softc) trunk_list; /* list of trunks */ 63 64 extern struct ifaddr **ifnet_addrs; 65 66 void trunkattach(int); 67 int trunk_clone_create(struct if_clone *, int); 68 int trunk_clone_destroy(struct ifnet *); 69 void trunk_lladdr(struct arpcom *, u_int8_t *); 70 int trunk_capabilities(struct trunk_softc *); 71 void trunk_port_lladdr(struct trunk_port *, u_int8_t *); 72 int trunk_port_create(struct trunk_softc *, struct ifnet *); 73 int trunk_port_destroy(struct trunk_port *); 74 void trunk_port_watchdog(struct ifnet *); 75 void trunk_port_state(void *); 76 int trunk_port_ioctl(struct ifnet *, u_long, caddr_t); 77 struct trunk_port *trunk_port_get(struct trunk_softc *, struct ifnet *); 78 int trunk_port_checkstacking(struct trunk_softc *); 79 void trunk_port2req(struct trunk_port *, struct trunk_reqport *); 80 int trunk_ioctl(struct ifnet *, u_long, caddr_t); 81 int trunk_ether_addmulti(struct trunk_softc *, struct ifreq *); 82 int trunk_ether_delmulti(struct trunk_softc *, struct ifreq *); 83 void trunk_ether_purgemulti(struct trunk_softc *); 84 int trunk_ether_cmdmulti(struct trunk_port *, u_long); 85 int trunk_ioctl_allports(struct trunk_softc *, u_long, caddr_t); 86 void trunk_start(struct ifnet *); 87 void trunk_init(struct ifnet *); 88 void trunk_stop(struct ifnet *); 89 void trunk_watchdog(struct ifnet *); 90 int trunk_media_change(struct ifnet *); 91 void trunk_media_status(struct ifnet *, struct ifmediareq *); 92 struct trunk_port *trunk_link_active(struct trunk_softc *, 93 struct trunk_port *); 94 const void *trunk_gethdr(struct mbuf *, u_int, u_int, void *); 95 96 struct if_clone trunk_cloner = 97 IF_CLONE_INITIALIZER("trunk", trunk_clone_create, trunk_clone_destroy); 98 99 /* Simple round robin */ 100 int trunk_rr_attach(struct trunk_softc *); 101 int trunk_rr_detach(struct trunk_softc *); 102 void trunk_rr_port_destroy(struct trunk_port *); 103 int trunk_rr_start(struct trunk_softc *, struct mbuf *); 104 int trunk_rr_input(struct trunk_softc *, struct trunk_port *, 105 struct ether_header *, struct mbuf *); 106 107 /* Active failover */ 108 int trunk_fail_attach(struct trunk_softc *); 109 int trunk_fail_detach(struct trunk_softc *); 110 int trunk_fail_start(struct trunk_softc *, struct mbuf *); 111 int trunk_fail_input(struct trunk_softc *, struct trunk_port *, 112 struct ether_header *, struct mbuf *); 113 114 /* Loadbalancing */ 115 int trunk_lb_attach(struct trunk_softc *); 116 int trunk_lb_detach(struct trunk_softc *); 117 int trunk_lb_port_create(struct trunk_port *); 118 void trunk_lb_port_destroy(struct trunk_port *); 119 int trunk_lb_start(struct trunk_softc *, struct mbuf *); 120 int trunk_lb_input(struct trunk_softc *, struct trunk_port *, 121 struct ether_header *, struct mbuf *); 122 int trunk_lb_porttable(struct trunk_softc *, struct trunk_port *); 123 124 /* Broadcast mode */ 125 int trunk_bcast_attach(struct trunk_softc *); 126 int trunk_bcast_detach(struct trunk_softc *); 127 int trunk_bcast_start(struct trunk_softc *, struct mbuf *); 128 int trunk_bcast_input(struct trunk_softc *, struct trunk_port *, 129 struct ether_header *, struct mbuf *); 130 131 /* 802.3ad LACP */ 132 int trunk_lacp_attach(struct trunk_softc *); 133 int trunk_lacp_detach(struct trunk_softc *); 134 int trunk_lacp_start(struct trunk_softc *, struct mbuf *); 135 int trunk_lacp_input(struct trunk_softc *, struct trunk_port *, 136 struct ether_header *, struct mbuf *); 137 138 /* Trunk protocol table */ 139 static const struct { 140 enum trunk_proto ti_proto; 141 int (*ti_attach)(struct trunk_softc *); 142 } trunk_protos[] = { 143 { TRUNK_PROTO_ROUNDROBIN, trunk_rr_attach }, 144 { TRUNK_PROTO_FAILOVER, trunk_fail_attach }, 145 { TRUNK_PROTO_LOADBALANCE, trunk_lb_attach }, 146 { TRUNK_PROTO_BROADCAST, trunk_bcast_attach }, 147 { TRUNK_PROTO_LACP, trunk_lacp_attach }, 148 { TRUNK_PROTO_NONE, NULL } 149 }; 150 151 void 152 trunkattach(int count) 153 { 154 SLIST_INIT(&trunk_list); 155 if_clone_attach(&trunk_cloner); 156 } 157 158 int 159 trunk_clone_create(struct if_clone *ifc, int unit) 160 { 161 struct trunk_softc *tr; 162 struct ifnet *ifp; 163 int i, error = 0; 164 165 if ((tr = malloc(sizeof(struct trunk_softc), 166 M_DEVBUF, M_NOWAIT|M_ZERO)) == NULL) 167 return (ENOMEM); 168 169 tr->tr_unit = unit; 170 tr->tr_proto = TRUNK_PROTO_NONE; 171 for (i = 0; trunk_protos[i].ti_proto != TRUNK_PROTO_NONE; i++) { 172 if (trunk_protos[i].ti_proto == TRUNK_PROTO_DEFAULT) { 173 tr->tr_proto = trunk_protos[i].ti_proto; 174 if ((error = trunk_protos[i].ti_attach(tr)) != 0) { 175 free(tr, M_DEVBUF); 176 return (error); 177 } 178 break; 179 } 180 } 181 SLIST_INIT(&tr->tr_ports); 182 183 /* Initialise pseudo media types */ 184 ifmedia_init(&tr->tr_media, 0, trunk_media_change, 185 trunk_media_status); 186 ifmedia_add(&tr->tr_media, IFM_ETHER | IFM_AUTO, 0, NULL); 187 ifmedia_set(&tr->tr_media, IFM_ETHER | IFM_AUTO); 188 189 ifp = &tr->tr_ac.ac_if; 190 ifp->if_carp = NULL; 191 ifp->if_type = IFT_ETHER; 192 ifp->if_softc = tr; 193 ifp->if_start = trunk_start; 194 ifp->if_watchdog = trunk_watchdog; 195 ifp->if_ioctl = trunk_ioctl; 196 ifp->if_output = ether_output; 197 ifp->if_flags = IFF_SIMPLEX | IFF_BROADCAST | IFF_MULTICAST; 198 ifp->if_capabilities = trunk_capabilities(tr); 199 200 IFQ_SET_MAXLEN(&ifp->if_snd, ifqmaxlen); 201 IFQ_SET_READY(&ifp->if_snd); 202 203 snprintf(ifp->if_xname, sizeof(ifp->if_xname), "%s%d", 204 ifc->ifc_name, unit); 205 206 /* 207 * Attach as an ordinary ethernet device, childs will be attached 208 * as special device IFT_IEEE8023ADLAG. 209 */ 210 if_attach(ifp); 211 ether_ifattach(ifp); 212 213 /* Insert into the global list of trunks */ 214 SLIST_INSERT_HEAD(&trunk_list, tr, tr_entries); 215 216 return (0); 217 } 218 219 int 220 trunk_clone_destroy(struct ifnet *ifp) 221 { 222 struct trunk_softc *tr = (struct trunk_softc *)ifp->if_softc; 223 struct trunk_port *tp; 224 int error, s; 225 226 /* Remove any multicast groups that we may have joined. */ 227 trunk_ether_purgemulti(tr); 228 229 s = splnet(); 230 231 /* Shutdown and remove trunk ports, return on error */ 232 while ((tp = SLIST_FIRST(&tr->tr_ports)) != NULL) { 233 if ((error = trunk_port_destroy(tp)) != 0) { 234 splx(s); 235 return (error); 236 } 237 } 238 239 ifmedia_delete_instance(&tr->tr_media, IFM_INST_ANY); 240 ether_ifdetach(ifp); 241 if_detach(ifp); 242 243 SLIST_REMOVE(&trunk_list, tr, trunk_softc, tr_entries); 244 free(tr, M_DEVBUF); 245 246 splx(s); 247 248 return (0); 249 } 250 251 void 252 trunk_lladdr(struct arpcom *ac, u_int8_t *lladdr) 253 { 254 struct ifnet *ifp = &ac->ac_if; 255 struct ifaddr *ifa; 256 struct sockaddr_dl *sdl; 257 258 ifa = ifnet_addrs[ifp->if_index]; 259 sdl = (struct sockaddr_dl *)ifa->ifa_addr; 260 sdl->sdl_type = IFT_ETHER; 261 sdl->sdl_alen = ETHER_ADDR_LEN; 262 bcopy(lladdr, LLADDR(sdl), ETHER_ADDR_LEN); 263 bcopy(lladdr, ac->ac_enaddr, ETHER_ADDR_LEN); 264 } 265 266 int 267 trunk_capabilities(struct trunk_softc *tr) 268 { 269 struct trunk_port *tp; 270 int cap = ~0, priv; 271 272 /* Preserve private capabilities */ 273 priv = tr->tr_capabilities & IFCAP_TRUNK_MASK; 274 275 /* Get capabilities from the trunk ports */ 276 SLIST_FOREACH(tp, &tr->tr_ports, tp_entries) 277 cap &= tp->tp_capabilities; 278 279 if (tr->tr_ifflags & IFF_DEBUG) { 280 printf("%s: capabilities 0x%08x\n", 281 tr->tr_ifname, cap == ~0 ? priv : (cap | priv)); 282 } 283 284 return (cap == ~0 ? priv : (cap | priv)); 285 } 286 287 void 288 trunk_port_lladdr(struct trunk_port *tp, u_int8_t *lladdr) 289 { 290 struct ifnet *ifp = tp->tp_if; 291 struct ifaddr *ifa; 292 struct ifreq ifr; 293 294 /* Set the link layer address */ 295 trunk_lladdr((struct arpcom *)ifp, lladdr); 296 297 /* Reset the port to update the lladdr */ 298 if (ifp->if_flags & IFF_UP) { 299 int s = splnet(); 300 ifp->if_flags &= ~IFF_UP; 301 (*ifp->if_ioctl)(ifp, SIOCSIFFLAGS, (caddr_t)&ifr); 302 ifp->if_flags |= IFF_UP; 303 (*ifp->if_ioctl)(ifp, SIOCSIFFLAGS, (caddr_t)&ifr); 304 splx(s); 305 TAILQ_FOREACH(ifa, &ifp->if_addrlist, ifa_list) { 306 if (ifa->ifa_addr != NULL && 307 ifa->ifa_addr->sa_family == AF_INET) 308 arp_ifinit((struct arpcom *)ifp, ifa); 309 } 310 } 311 } 312 313 int 314 trunk_port_create(struct trunk_softc *tr, struct ifnet *ifp) 315 { 316 struct trunk_softc *tr_ptr; 317 struct trunk_port *tp; 318 int error = 0; 319 320 /* Limit the maximal number of trunk ports */ 321 if (tr->tr_count >= TRUNK_MAX_PORTS) 322 return (ENOSPC); 323 324 /* New trunk port has to be in an idle state */ 325 if (ifp->if_flags & IFF_OACTIVE) 326 return (EBUSY); 327 328 /* Check if port has already been associated to a trunk */ 329 if (trunk_port_get(NULL, ifp) != NULL) 330 return (EBUSY); 331 332 /* XXX Disallow non-ethernet interfaces (this should be any of 802) */ 333 if (ifp->if_type != IFT_ETHER) 334 return (EPROTONOSUPPORT); 335 336 if ((error = ifpromisc(ifp, 1)) != 0) 337 return (error); 338 339 if ((tp = malloc(sizeof(struct trunk_port), 340 M_DEVBUF, M_NOWAIT|M_ZERO)) == NULL) 341 return (ENOMEM); 342 343 /* Check if port is a stacked trunk */ 344 SLIST_FOREACH(tr_ptr, &trunk_list, tr_entries) { 345 if (ifp == &tr_ptr->tr_ac.ac_if) { 346 tp->tp_flags |= TRUNK_PORT_STACK; 347 if (trunk_port_checkstacking(tr_ptr) >= 348 TRUNK_MAX_STACKING) { 349 free(tp, M_DEVBUF); 350 return (E2BIG); 351 } 352 } 353 } 354 355 /* Change the interface type */ 356 tp->tp_iftype = ifp->if_type; 357 ifp->if_type = IFT_IEEE8023ADLAG; 358 ifp->if_tp = (caddr_t)tp; 359 tp->tp_watchdog = ifp->if_watchdog; 360 ifp->if_watchdog = trunk_port_watchdog; 361 tp->tp_ioctl = ifp->if_ioctl; 362 ifp->if_ioctl = trunk_port_ioctl; 363 364 tp->tp_if = ifp; 365 tp->tp_trunk = tr; 366 367 /* Save port link layer address */ 368 bcopy(((struct arpcom *)ifp)->ac_enaddr, tp->tp_lladdr, ETHER_ADDR_LEN); 369 370 if (SLIST_EMPTY(&tr->tr_ports)) { 371 tr->tr_primary = tp; 372 tp->tp_flags |= TRUNK_PORT_MASTER; 373 trunk_lladdr(&tr->tr_ac, tp->tp_lladdr); 374 } 375 376 /* Update link layer address for this port */ 377 trunk_port_lladdr(tp, tr->tr_primary->tp_lladdr); 378 379 /* Insert into the list of ports */ 380 SLIST_INSERT_HEAD(&tr->tr_ports, tp, tp_entries); 381 tr->tr_count++; 382 383 /* Update trunk capabilities */ 384 tr->tr_capabilities = trunk_capabilities(tr); 385 386 /* Add multicast addresses to this port */ 387 trunk_ether_cmdmulti(tp, SIOCADDMULTI); 388 389 /* Register callback for physical link state changes */ 390 if (ifp->if_linkstatehooks != NULL) 391 tp->lh_cookie = hook_establish(ifp->if_linkstatehooks, 1, 392 trunk_port_state, tp); 393 394 if (tr->tr_port_create != NULL) 395 error = (*tr->tr_port_create)(tp); 396 397 return (error); 398 } 399 400 int 401 trunk_port_checkstacking(struct trunk_softc *tr) 402 { 403 struct trunk_softc *tr_ptr; 404 struct trunk_port *tp; 405 int m = 0; 406 407 SLIST_FOREACH(tp, &tr->tr_ports, tp_entries) { 408 if (tp->tp_flags & TRUNK_PORT_STACK) { 409 tr_ptr = (struct trunk_softc *)tp->tp_if->if_softc; 410 m = MAX(m, trunk_port_checkstacking(tr_ptr)); 411 } 412 } 413 414 return (m + 1); 415 } 416 417 int 418 trunk_port_destroy(struct trunk_port *tp) 419 { 420 struct trunk_softc *tr = (struct trunk_softc *)tp->tp_trunk; 421 struct trunk_port *tp_ptr; 422 struct ifnet *ifp = tp->tp_if; 423 424 if (tr->tr_port_destroy != NULL) 425 (*tr->tr_port_destroy)(tp); 426 427 /* Remove multicast addresses from this port */ 428 trunk_ether_cmdmulti(tp, SIOCDELMULTI); 429 430 /* Port has to be down */ 431 if (ifp->if_flags & IFF_UP) 432 if_down(ifp); 433 434 ifpromisc(ifp, 0); 435 436 /* Restore interface */ 437 ifp->if_type = tp->tp_iftype; 438 ifp->if_watchdog = tp->tp_watchdog; 439 ifp->if_ioctl = tp->tp_ioctl; 440 ifp->if_tp = NULL; 441 442 if (ifp->if_linkstatehooks != NULL) 443 hook_disestablish(ifp->if_linkstatehooks, tp->lh_cookie); 444 445 /* Finally, remove the port from the trunk */ 446 SLIST_REMOVE(&tr->tr_ports, tp, trunk_port, tp_entries); 447 tr->tr_count--; 448 449 /* Update the primary interface */ 450 if (tp == tr->tr_primary) { 451 u_int8_t lladdr[ETHER_ADDR_LEN]; 452 453 if ((tp_ptr = SLIST_FIRST(&tr->tr_ports)) == NULL) { 454 bzero(&lladdr, ETHER_ADDR_LEN); 455 } else { 456 bcopy(((struct arpcom *)tp_ptr->tp_if)->ac_enaddr, 457 lladdr, ETHER_ADDR_LEN); 458 tp_ptr->tp_flags = TRUNK_PORT_MASTER; 459 } 460 trunk_lladdr(&tr->tr_ac, lladdr); 461 tr->tr_primary = tp_ptr; 462 463 /* Update link layer address for each port */ 464 SLIST_FOREACH(tp_ptr, &tr->tr_ports, tp_entries) 465 trunk_port_lladdr(tp_ptr, lladdr); 466 } 467 468 /* Reset the port lladdr */ 469 trunk_port_lladdr(tp, tp->tp_lladdr); 470 471 free(tp, M_DEVBUF); 472 473 /* Update trunk capabilities */ 474 tr->tr_capabilities = trunk_capabilities(tr); 475 476 return (0); 477 } 478 479 void 480 trunk_port_watchdog(struct ifnet *ifp) 481 { 482 struct trunk_port *tp; 483 484 /* Should be checked by the caller */ 485 if (ifp->if_type != IFT_IEEE8023ADLAG) 486 return; 487 if ((tp = (struct trunk_port *)ifp->if_tp) == NULL || 488 tp->tp_trunk == NULL) 489 return; 490 491 if (tp->tp_watchdog != NULL) 492 (*tp->tp_watchdog)(ifp); 493 } 494 495 496 int 497 trunk_port_ioctl(struct ifnet *ifp, u_long cmd, caddr_t data) 498 { 499 struct trunk_reqport *rp = (struct trunk_reqport *)data; 500 struct trunk_softc *tr; 501 struct trunk_port *tp = NULL; 502 int s, error = 0; 503 504 s = splnet(); 505 506 /* Should be checked by the caller */ 507 if (ifp->if_type != IFT_IEEE8023ADLAG || 508 (tp = (struct trunk_port *)ifp->if_tp) == NULL || 509 (tr = (struct trunk_softc *)tp->tp_trunk) == NULL) { 510 error = EINVAL; 511 goto fallback; 512 } 513 514 switch (cmd) { 515 case SIOCGTRUNKPORT: 516 if (rp->rp_portname[0] == '\0' || 517 ifunit(rp->rp_portname) != ifp) { 518 error = EINVAL; 519 break; 520 } 521 522 /* Search in all trunks if the global flag is set */ 523 if ((tp = trunk_port_get(rp->rp_flags & TRUNK_PORT_GLOBAL ? 524 NULL : tr, ifp)) == NULL) { 525 error = ENOENT; 526 break; 527 } 528 529 trunk_port2req(tp, rp); 530 break; 531 default: 532 error = ENOTTY; 533 goto fallback; 534 } 535 536 splx(s); 537 return (error); 538 539 fallback: 540 splx(s); 541 542 if (tp != NULL) 543 error = (*tp->tp_ioctl)(ifp, cmd, data); 544 545 return (error); 546 } 547 548 void 549 trunk_port_ifdetach(struct ifnet *ifp) 550 { 551 struct trunk_port *tp; 552 553 if ((tp = (struct trunk_port *)ifp->if_tp) == NULL) 554 return; 555 556 trunk_port_destroy(tp); 557 } 558 559 struct trunk_port * 560 trunk_port_get(struct trunk_softc *tr, struct ifnet *ifp) 561 { 562 struct trunk_port *tp; 563 struct trunk_softc *tr_ptr; 564 565 if (tr != NULL) { 566 /* Search port in specified trunk */ 567 SLIST_FOREACH(tp, &tr->tr_ports, tp_entries) { 568 if (tp->tp_if == ifp) 569 return (tp); 570 } 571 } else { 572 /* Search all trunks for the selected port */ 573 SLIST_FOREACH(tr_ptr, &trunk_list, tr_entries) { 574 SLIST_FOREACH(tp, &tr_ptr->tr_ports, tp_entries) { 575 if (tp->tp_if == ifp) 576 return (tp); 577 } 578 } 579 } 580 581 return (NULL); 582 } 583 584 void 585 trunk_port2req(struct trunk_port *tp, struct trunk_reqport *rp) 586 { 587 struct trunk_softc *tr = (struct trunk_softc *)tp->tp_trunk; 588 589 strlcpy(rp->rp_ifname, tr->tr_ifname, sizeof(rp->rp_ifname)); 590 strlcpy(rp->rp_portname, tp->tp_if->if_xname, sizeof(rp->rp_portname)); 591 rp->rp_prio = tp->tp_prio; 592 if (tr->tr_portreq != NULL) 593 (*tr->tr_portreq)(tp, (caddr_t)&rp->rp_psc); 594 595 /* Add protocol specific flags */ 596 switch (tr->tr_proto) { 597 case TRUNK_PROTO_FAILOVER: 598 rp->rp_flags = tp->tp_flags; 599 if (tp == trunk_link_active(tr, tr->tr_primary)) 600 rp->rp_flags |= TRUNK_PORT_ACTIVE; 601 break; 602 603 case TRUNK_PROTO_ROUNDROBIN: 604 case TRUNK_PROTO_LOADBALANCE: 605 case TRUNK_PROTO_BROADCAST: 606 rp->rp_flags = tp->tp_flags; 607 if (TRUNK_PORTACTIVE(tp)) 608 rp->rp_flags |= TRUNK_PORT_ACTIVE; 609 break; 610 611 case TRUNK_PROTO_LACP: 612 rp->rp_flags = 0; 613 /* LACP has a different definition of active */ 614 if (lacp_isactive(tp)) 615 rp->rp_flags |= TRUNK_PORT_ACTIVE; 616 if (lacp_iscollecting(tp)) 617 rp->rp_flags |= TRUNK_PORT_COLLECTING; 618 if (lacp_isdistributing(tp)) 619 rp->rp_flags |= TRUNK_PORT_DISTRIBUTING; 620 break; 621 default: 622 break; 623 } 624 } 625 626 int 627 trunk_ioctl(struct ifnet *ifp, u_long cmd, caddr_t data) 628 { 629 struct trunk_softc *tr = (struct trunk_softc *)ifp->if_softc; 630 struct trunk_reqall *ra = (struct trunk_reqall *)data; 631 struct trunk_reqport *rp = (struct trunk_reqport *)data, rpbuf; 632 struct ifreq *ifr = (struct ifreq *)data; 633 struct ifaddr *ifa = (struct ifaddr *)data; 634 struct trunk_port *tp; 635 struct ifnet *tpif; 636 int s, i, error = 0; 637 638 s = splnet(); 639 640 bzero(&rpbuf, sizeof(rpbuf)); 641 642 switch (cmd) { 643 case SIOCGTRUNK: 644 ra->ra_proto = tr->tr_proto; 645 if (tr->tr_req != NULL) 646 (*tr->tr_req)(tr, (caddr_t)&ra->ra_psc); 647 ra->ra_ports = i = 0; 648 tp = SLIST_FIRST(&tr->tr_ports); 649 while (tp && ra->ra_size >= 650 i + sizeof(struct trunk_reqport)) { 651 trunk_port2req(tp, &rpbuf); 652 error = copyout(&rpbuf, (caddr_t)ra->ra_port + i, 653 sizeof(struct trunk_reqport)); 654 if (error) 655 break; 656 i += sizeof(struct trunk_reqport); 657 ra->ra_ports++; 658 tp = SLIST_NEXT(tp, tp_entries); 659 } 660 break; 661 case SIOCSTRUNK: 662 if ((error = suser(curproc, 0)) != 0) { 663 error = EPERM; 664 break; 665 } 666 if (ra->ra_proto >= TRUNK_PROTO_MAX) { 667 error = EPROTONOSUPPORT; 668 break; 669 } 670 if (tr->tr_proto != TRUNK_PROTO_NONE) 671 error = tr->tr_detach(tr); 672 if (error != 0) 673 break; 674 for (i = 0; i < (sizeof(trunk_protos) / 675 sizeof(trunk_protos[0])); i++) { 676 if (trunk_protos[i].ti_proto == ra->ra_proto) { 677 if (tr->tr_ifflags & IFF_DEBUG) 678 printf("%s: using proto %u\n", 679 tr->tr_ifname, 680 trunk_protos[i].ti_proto); 681 tr->tr_proto = trunk_protos[i].ti_proto; 682 if (tr->tr_proto != TRUNK_PROTO_NONE) 683 error = trunk_protos[i].ti_attach(tr); 684 goto out; 685 } 686 } 687 error = EPROTONOSUPPORT; 688 break; 689 case SIOCGTRUNKPORT: 690 if (rp->rp_portname[0] == '\0' || 691 (tpif = ifunit(rp->rp_portname)) == NULL) { 692 error = EINVAL; 693 break; 694 } 695 696 /* Search in all trunks if the global flag is set */ 697 if ((tp = trunk_port_get(rp->rp_flags & TRUNK_PORT_GLOBAL ? 698 NULL : tr, tpif)) == NULL) { 699 error = ENOENT; 700 break; 701 } 702 703 trunk_port2req(tp, rp); 704 break; 705 case SIOCSTRUNKPORT: 706 if ((error = suser(curproc, 0)) != 0) { 707 error = EPERM; 708 break; 709 } 710 if (rp->rp_portname[0] == '\0' || 711 (tpif = ifunit(rp->rp_portname)) == NULL) { 712 error = EINVAL; 713 break; 714 } 715 error = trunk_port_create(tr, tpif); 716 break; 717 case SIOCSTRUNKDELPORT: 718 if ((error = suser(curproc, 0)) != 0) { 719 error = EPERM; 720 break; 721 } 722 if (rp->rp_portname[0] == '\0' || 723 (tpif = ifunit(rp->rp_portname)) == NULL) { 724 error = EINVAL; 725 break; 726 } 727 728 /* Search in all trunks if the global flag is set */ 729 if ((tp = trunk_port_get(rp->rp_flags & TRUNK_PORT_GLOBAL ? 730 NULL : tr, tpif)) == NULL) { 731 error = ENOENT; 732 break; 733 } 734 735 error = trunk_port_destroy(tp); 736 break; 737 case SIOCSIFADDR: 738 ifp->if_flags |= IFF_UP; 739 #ifdef INET 740 if (ifa->ifa_addr->sa_family == AF_INET) 741 arp_ifinit(&tr->tr_ac, ifa); 742 #endif /* INET */ 743 error = ENETRESET; 744 break; 745 case SIOCSIFFLAGS: 746 error = ENETRESET; 747 break; 748 case SIOCADDMULTI: 749 error = trunk_ether_addmulti(tr, ifr); 750 break; 751 case SIOCDELMULTI: 752 error = trunk_ether_delmulti(tr, ifr); 753 break; 754 case SIOCSIFMEDIA: 755 case SIOCGIFMEDIA: 756 error = ifmedia_ioctl(ifp, ifr, &tr->tr_media, cmd); 757 break; 758 case SIOCSIFLLADDR: 759 /* Update the port lladdrs as well */ 760 SLIST_FOREACH(tp, &tr->tr_ports, tp_entries) 761 trunk_port_lladdr(tp, ifr->ifr_addr.sa_data); 762 error = ENETRESET; 763 break; 764 default: 765 error = ether_ioctl(ifp, &tr->tr_ac, cmd, data); 766 } 767 768 if (error == ENETRESET) { 769 if (ifp->if_flags & IFF_UP) { 770 if ((ifp->if_flags & IFF_RUNNING) == 0) 771 trunk_init(ifp); 772 } else { 773 if (ifp->if_flags & IFF_RUNNING) 774 trunk_stop(ifp); 775 } 776 error = 0; 777 } 778 779 out: 780 splx(s); 781 return (error); 782 } 783 784 int 785 trunk_ether_addmulti(struct trunk_softc *tr, struct ifreq *ifr) 786 { 787 struct trunk_mc *mc; 788 u_int8_t addrlo[ETHER_ADDR_LEN], addrhi[ETHER_ADDR_LEN]; 789 int error; 790 791 /* Ignore ENETRESET error code */ 792 if ((error = ether_addmulti(ifr, &tr->tr_ac)) != ENETRESET) 793 return (error); 794 795 if ((mc = malloc(sizeof(*mc), M_DEVBUF, M_NOWAIT)) == NULL) { 796 error = ENOMEM; 797 goto failed; 798 } 799 800 ether_multiaddr(&ifr->ifr_addr, addrlo, addrhi); 801 ETHER_LOOKUP_MULTI(addrlo, addrhi, &tr->tr_ac, mc->mc_enm); 802 bcopy(&ifr->ifr_addr, &mc->mc_addr, ifr->ifr_addr.sa_len); 803 SLIST_INSERT_HEAD(&tr->tr_mc_head, mc, mc_entries); 804 805 if ((error = trunk_ioctl_allports(tr, SIOCADDMULTI, 806 (caddr_t)ifr)) != 0) { 807 trunk_ether_delmulti(tr, ifr); 808 return (error); 809 } 810 811 return (error); 812 813 failed: 814 ether_delmulti(ifr, &tr->tr_ac); 815 816 return (error); 817 } 818 819 int 820 trunk_ether_delmulti(struct trunk_softc *tr, struct ifreq *ifr) 821 { 822 struct ether_multi *enm; 823 struct trunk_mc *mc; 824 u_int8_t addrlo[ETHER_ADDR_LEN], addrhi[ETHER_ADDR_LEN]; 825 int error; 826 827 if ((error = ether_multiaddr(&ifr->ifr_addr, addrlo, addrhi)) != 0) 828 return (error); 829 ETHER_LOOKUP_MULTI(addrlo, addrhi, &tr->tr_ac, enm); 830 if (enm == NULL) 831 return (EINVAL); 832 833 SLIST_FOREACH(mc, &tr->tr_mc_head, mc_entries) 834 if (mc->mc_enm == enm) 835 break; 836 837 /* We won't delete entries we didn't add */ 838 if (mc == NULL) 839 return (EINVAL); 840 841 if ((error = ether_delmulti(ifr, &tr->tr_ac)) != ENETRESET) 842 return (error); 843 844 if ((error = trunk_ioctl_allports(tr, SIOCDELMULTI, 845 (caddr_t)ifr)) != 0) { 846 /* XXX At least one port failed to remove the address */ 847 if (tr->tr_ifflags & IFF_DEBUG) { 848 printf("%s: failed to remove multicast address " 849 "on all ports\n", tr->tr_ifname); 850 } 851 } 852 853 SLIST_REMOVE(&tr->tr_mc_head, mc, trunk_mc, mc_entries); 854 free(mc, M_DEVBUF); 855 856 return (0); 857 } 858 859 void 860 trunk_ether_purgemulti(struct trunk_softc *tr) 861 { 862 struct trunk_mc *mc; 863 struct trunk_ifreq ifs; 864 struct ifreq *ifr = &ifs.ifreq.ifreq; 865 866 while ((mc = SLIST_FIRST(&tr->tr_mc_head)) != NULL) { 867 bcopy(&mc->mc_addr, &ifr->ifr_addr, mc->mc_addr.ss_len); 868 869 /* Try to remove multicast address on all ports */ 870 trunk_ioctl_allports(tr, SIOCDELMULTI, (caddr_t)ifr); 871 872 SLIST_REMOVE(&tr->tr_mc_head, mc, trunk_mc, mc_entries); 873 free(mc, M_DEVBUF); 874 } 875 } 876 877 int 878 trunk_ether_cmdmulti(struct trunk_port *tp, u_long cmd) 879 { 880 struct trunk_softc *tr = (struct trunk_softc *)tp->tp_trunk; 881 struct trunk_mc *mc; 882 struct trunk_ifreq ifs; 883 struct ifreq *ifr = &ifs.ifreq.ifreq; 884 int ret, error = 0; 885 886 bcopy(tp->tp_ifname, ifr->ifr_name, IFNAMSIZ); 887 SLIST_FOREACH(mc, &tr->tr_mc_head, mc_entries) { 888 bcopy(&mc->mc_addr, &ifr->ifr_addr, mc->mc_addr.ss_len); 889 890 if ((ret = tp->tp_ioctl(tp->tp_if, cmd, (caddr_t)ifr)) != 0) { 891 if (tr->tr_ifflags & IFF_DEBUG) { 892 printf("%s: ioctl %lu failed on %s: %d\n", 893 tr->tr_ifname, cmd, tp->tp_ifname, ret); 894 } 895 /* Store last known error and continue */ 896 error = ret; 897 } 898 } 899 900 return (error); 901 } 902 903 int 904 trunk_ioctl_allports(struct trunk_softc *tr, u_long cmd, caddr_t data) 905 { 906 struct ifreq *ifr = (struct ifreq *)data; 907 struct trunk_port *tp; 908 int ret, error = 0; 909 910 SLIST_FOREACH(tp, &tr->tr_ports, tp_entries) { 911 bcopy(tp->tp_ifname, ifr->ifr_name, IFNAMSIZ); 912 if ((ret = tp->tp_ioctl(tp->tp_if, cmd, data)) != 0) { 913 if (tr->tr_ifflags & IFF_DEBUG) { 914 printf("%s: ioctl %lu failed on %s: %d\n", 915 tr->tr_ifname, cmd, tp->tp_ifname, ret); 916 } 917 /* Store last known error and continue */ 918 error = ret; 919 } 920 } 921 922 return (error); 923 } 924 925 void 926 trunk_start(struct ifnet *ifp) 927 { 928 struct trunk_softc *tr = (struct trunk_softc *)ifp->if_softc; 929 struct mbuf *m; 930 int error = 0; 931 932 for (;; error = 0) { 933 IFQ_DEQUEUE(&ifp->if_snd, m); 934 if (m == NULL) 935 break; 936 937 #if NBPFILTER > 0 938 if (ifp->if_bpf) 939 bpf_mtap_ether(ifp->if_bpf, m, BPF_DIRECTION_OUT); 940 #endif 941 942 if (tr->tr_proto != TRUNK_PROTO_NONE && tr->tr_count) { 943 error = (*tr->tr_start)(tr, m); 944 if (error == 0) 945 ifp->if_opackets++; 946 else 947 ifp->if_oerrors++; 948 } else { 949 m_freem(m); 950 if (tr->tr_proto != TRUNK_PROTO_NONE) 951 ifp->if_oerrors++; 952 } 953 } 954 } 955 956 int 957 trunk_enqueue(struct ifnet *ifp, struct mbuf *m) 958 { 959 int len, error = 0; 960 u_short mflags; 961 962 /* Send mbuf */ 963 mflags = m->m_flags; 964 len = m->m_pkthdr.len; 965 IFQ_ENQUEUE(&ifp->if_snd, m, NULL, error); 966 if (error) 967 return (error); 968 if_start(ifp); 969 970 ifp->if_obytes += len; 971 if (mflags & M_MCAST) 972 ifp->if_omcasts++; 973 974 return (error); 975 } 976 977 u_int32_t 978 trunk_hashmbuf(struct mbuf *m, u_int32_t key) 979 { 980 u_int16_t etype, ether_vtag; 981 u_int32_t p = 0; 982 u_int16_t *vlan, vlanbuf[2]; 983 int off; 984 struct ether_header *eh; 985 #ifdef INET 986 struct ip *ip, ipbuf; 987 #endif 988 #ifdef INET6 989 u_int32_t flow; 990 struct ip6_hdr *ip6, ip6buf; 991 #endif 992 993 off = sizeof(*eh); 994 if (m->m_len < off) 995 return (p); 996 eh = mtod(m, struct ether_header *); 997 etype = ntohs(eh->ether_type); 998 p = hash32_buf(&eh->ether_shost, ETHER_ADDR_LEN, key); 999 p = hash32_buf(&eh->ether_dhost, ETHER_ADDR_LEN, p); 1000 1001 /* Special handling for encapsulating VLAN frames */ 1002 if (m->m_flags & M_VLANTAG) { 1003 ether_vtag = EVL_VLANOFTAG(m->m_pkthdr.ether_vtag); 1004 p = hash32_buf(ðer_vtag, sizeof(ether_vtag), p); 1005 } else if (etype == ETHERTYPE_VLAN) { 1006 if ((vlan = (u_int16_t *) 1007 trunk_gethdr(m, off, EVL_ENCAPLEN, &vlanbuf)) == NULL) 1008 return (p); 1009 ether_vtag = EVL_VLANOFTAG(*vlan); 1010 p = hash32_buf(ðer_vtag, sizeof(ether_vtag), p); 1011 etype = ntohs(vlan[1]); 1012 off += EVL_ENCAPLEN; 1013 } 1014 1015 switch (etype) { 1016 #ifdef INET 1017 case ETHERTYPE_IP: 1018 if ((ip = (struct ip *) 1019 trunk_gethdr(m, off, sizeof(*ip), &ipbuf)) == NULL) 1020 return (p); 1021 p = hash32_buf(&ip->ip_src, sizeof(struct in_addr), p); 1022 p = hash32_buf(&ip->ip_dst, sizeof(struct in_addr), p); 1023 break; 1024 #endif 1025 #ifdef INET6 1026 case ETHERTYPE_IPV6: 1027 if ((ip6 = (struct ip6_hdr *) 1028 trunk_gethdr(m, off, sizeof(*ip6), &ip6buf)) == NULL) 1029 return (p); 1030 p = hash32_buf(&ip6->ip6_src, sizeof(struct in6_addr), p); 1031 p = hash32_buf(&ip6->ip6_dst, sizeof(struct in6_addr), p); 1032 flow = ip6->ip6_flow & IPV6_FLOWLABEL_MASK; 1033 p = hash32_buf(&flow, sizeof(flow), p); /* IPv6 flow label */ 1034 break; 1035 #endif 1036 } 1037 1038 return (p); 1039 } 1040 1041 void 1042 trunk_init(struct ifnet *ifp) 1043 { 1044 struct trunk_softc *tr = (struct trunk_softc *)ifp->if_softc; 1045 int s; 1046 1047 s = splnet(); 1048 1049 ifp->if_flags |= IFF_RUNNING; 1050 ifp->if_flags &= ~IFF_OACTIVE; 1051 1052 if (tr->tr_init != NULL) 1053 (*tr->tr_init)(tr); 1054 1055 splx(s); 1056 } 1057 1058 void 1059 trunk_stop(struct ifnet *ifp) 1060 { 1061 struct trunk_softc *tr = (struct trunk_softc *)ifp->if_softc; 1062 int s; 1063 1064 s = splnet(); 1065 1066 ifp->if_flags &= ~(IFF_RUNNING | IFF_OACTIVE); 1067 1068 if (tr->tr_stop != NULL) 1069 (*tr->tr_stop)(tr); 1070 1071 splx(s); 1072 } 1073 1074 void 1075 trunk_watchdog(struct ifnet *ifp) 1076 { 1077 struct trunk_softc *tr = (struct trunk_softc *)ifp->if_softc; 1078 1079 if (tr->tr_proto != TRUNK_PROTO_NONE && 1080 (*tr->tr_watchdog)(tr) != 0) { 1081 ifp->if_oerrors++; 1082 } 1083 1084 } 1085 1086 int 1087 trunk_input(struct ifnet *ifp, struct ether_header *eh, struct mbuf *m) 1088 { 1089 struct trunk_softc *tr; 1090 struct trunk_port *tp; 1091 struct ifnet *trifp = NULL; 1092 int error = 0; 1093 1094 /* Should be checked by the caller */ 1095 if (ifp->if_type != IFT_IEEE8023ADLAG) { 1096 error = EPROTONOSUPPORT; 1097 goto bad; 1098 } 1099 if ((tp = (struct trunk_port *)ifp->if_tp) == NULL || 1100 (tr = (struct trunk_softc *)tp->tp_trunk) == NULL) { 1101 error = ENOENT; 1102 goto bad; 1103 } 1104 trifp = &tr->tr_ac.ac_if; 1105 if (tr->tr_proto == TRUNK_PROTO_NONE) { 1106 error = ENOENT; 1107 goto bad; 1108 } 1109 1110 #if NBPFILTER > 0 1111 if (trifp->if_bpf && tr->tr_proto != TRUNK_PROTO_FAILOVER) 1112 bpf_mtap_hdr(trifp->if_bpf, (char *)eh, ETHER_HDR_LEN, m, 1113 BPF_DIRECTION_IN); 1114 #endif 1115 1116 error = (*tr->tr_input)(tr, tp, eh, m); 1117 if (error != 0) 1118 return (error); 1119 1120 trifp->if_ipackets++; 1121 return (0); 1122 1123 bad: 1124 if (error > 0 && trifp != NULL) 1125 trifp->if_ierrors++; 1126 m_freem(m); 1127 return (error); 1128 } 1129 1130 int 1131 trunk_media_change(struct ifnet *ifp) 1132 { 1133 struct trunk_softc *tr = (struct trunk_softc *)ifp->if_softc; 1134 1135 if (tr->tr_ifflags & IFF_DEBUG) 1136 printf("%s\n", __func__); 1137 1138 /* Ignore */ 1139 return (0); 1140 } 1141 1142 void 1143 trunk_media_status(struct ifnet *ifp, struct ifmediareq *imr) 1144 { 1145 struct trunk_softc *tr = (struct trunk_softc *)ifp->if_softc; 1146 struct trunk_port *tp; 1147 1148 imr->ifm_status = IFM_AVALID; 1149 imr->ifm_active = IFM_ETHER | IFM_AUTO; 1150 1151 SLIST_FOREACH(tp, &tr->tr_ports, tp_entries) { 1152 if (TRUNK_PORTACTIVE(tp)) 1153 imr->ifm_status |= IFM_ACTIVE; 1154 } 1155 } 1156 1157 void 1158 trunk_port_state(void *arg) 1159 { 1160 struct trunk_port *tp = (struct trunk_port *)arg; 1161 struct trunk_softc *tr = NULL; 1162 1163 if (tp != NULL) 1164 tr = (struct trunk_softc *)tp->tp_trunk; 1165 if (tr == NULL) 1166 return; 1167 if (tr->tr_linkstate != NULL) 1168 (*tr->tr_linkstate)(tp); 1169 trunk_link_active(tr, tp); 1170 } 1171 1172 struct trunk_port * 1173 trunk_link_active(struct trunk_softc *tr, struct trunk_port *tp) 1174 { 1175 struct trunk_port *tp_next, *rval = NULL; 1176 int new_link = LINK_STATE_DOWN; 1177 1178 /* 1179 * Search a port which reports an active link state. 1180 */ 1181 1182 if (tp == NULL) 1183 goto search; 1184 if (TRUNK_PORTACTIVE(tp)) { 1185 rval = tp; 1186 goto found; 1187 } 1188 if ((tp_next = SLIST_NEXT(tp, tp_entries)) != NULL && 1189 TRUNK_PORTACTIVE(tp_next)) { 1190 rval = tp_next; 1191 goto found; 1192 } 1193 1194 search: 1195 SLIST_FOREACH(tp_next, &tr->tr_ports, tp_entries) { 1196 if (TRUNK_PORTACTIVE(tp_next)) { 1197 rval = tp_next; 1198 goto found; 1199 } 1200 } 1201 1202 found: 1203 if (rval != NULL) { 1204 /* 1205 * The IEEE 802.1D standard assumes that a trunk with 1206 * multiple ports is always full duplex. This is valid 1207 * for load sharing trunks and if at least two links 1208 * are active. Unfortunately, checking the latter would 1209 * be too expensive at this point. 1210 */ 1211 if ((tr->tr_capabilities & IFCAP_TRUNK_FULLDUPLEX) && 1212 (tr->tr_count > 1)) 1213 new_link = LINK_STATE_FULL_DUPLEX; 1214 else 1215 new_link = rval->tp_link_state; 1216 } 1217 1218 if (tr->tr_ac.ac_if.if_link_state != new_link) { 1219 tr->tr_ac.ac_if.if_link_state = new_link; 1220 if_link_state_change(&tr->tr_ac.ac_if); 1221 } 1222 1223 return (rval); 1224 } 1225 1226 const void * 1227 trunk_gethdr(struct mbuf *m, u_int off, u_int len, void *buf) 1228 { 1229 if (m->m_pkthdr.len < (off + len)) 1230 return (NULL); 1231 else if (m->m_len < (off + len)) { 1232 m_copydata(m, off, len, buf); 1233 return (buf); 1234 } 1235 return (mtod(m, const void *) + off); 1236 } 1237 1238 /* 1239 * Simple round robin trunking 1240 */ 1241 1242 int 1243 trunk_rr_attach(struct trunk_softc *tr) 1244 { 1245 struct trunk_port *tp; 1246 1247 tr->tr_detach = trunk_rr_detach; 1248 tr->tr_start = trunk_rr_start; 1249 tr->tr_input = trunk_rr_input; 1250 tr->tr_init = NULL; 1251 tr->tr_stop = NULL; 1252 tr->tr_port_create = NULL; 1253 tr->tr_port_destroy = trunk_rr_port_destroy; 1254 tr->tr_capabilities = IFCAP_TRUNK_FULLDUPLEX; 1255 tr->tr_req = NULL; 1256 tr->tr_portreq = NULL; 1257 1258 tp = SLIST_FIRST(&tr->tr_ports); 1259 tr->tr_psc = (caddr_t)tp; 1260 1261 return (0); 1262 } 1263 1264 int 1265 trunk_rr_detach(struct trunk_softc *tr) 1266 { 1267 tr->tr_psc = NULL; 1268 return (0); 1269 } 1270 1271 void 1272 trunk_rr_port_destroy(struct trunk_port *tp) 1273 { 1274 struct trunk_softc *tr = (struct trunk_softc *)tp->tp_trunk; 1275 1276 if (tp == (struct trunk_port *)tr->tr_psc) 1277 tr->tr_psc = NULL; 1278 } 1279 1280 int 1281 trunk_rr_start(struct trunk_softc *tr, struct mbuf *m) 1282 { 1283 struct trunk_port *tp = (struct trunk_port *)tr->tr_psc, *tp_next; 1284 int error = 0; 1285 1286 if (tp == NULL && (tp = trunk_link_active(tr, NULL)) == NULL) { 1287 m_freem(m); 1288 return (ENOENT); 1289 } 1290 1291 /* Send mbuf */ 1292 if ((error = trunk_enqueue(tp->tp_if, m)) != 0) 1293 return (error); 1294 1295 /* Get next active port */ 1296 tp_next = trunk_link_active(tr, SLIST_NEXT(tp, tp_entries)); 1297 tr->tr_psc = (caddr_t)tp_next; 1298 1299 return (0); 1300 } 1301 1302 int 1303 trunk_rr_input(struct trunk_softc *tr, struct trunk_port *tp, 1304 struct ether_header *eh, struct mbuf *m) 1305 { 1306 struct ifnet *ifp = &tr->tr_ac.ac_if; 1307 1308 /* Just pass in the packet to our trunk device */ 1309 m->m_pkthdr.rcvif = ifp; 1310 1311 return (0); 1312 } 1313 1314 /* 1315 * Active failover 1316 */ 1317 1318 int 1319 trunk_fail_attach(struct trunk_softc *tr) 1320 { 1321 tr->tr_detach = trunk_fail_detach; 1322 tr->tr_start = trunk_fail_start; 1323 tr->tr_input = trunk_fail_input; 1324 tr->tr_init = NULL; 1325 tr->tr_stop = NULL; 1326 tr->tr_port_create = NULL; 1327 tr->tr_port_destroy = NULL; 1328 tr->tr_linkstate = NULL; 1329 tr->tr_req = NULL; 1330 tr->tr_portreq = NULL; 1331 1332 return (0); 1333 } 1334 1335 int 1336 trunk_fail_detach(struct trunk_softc *tr) 1337 { 1338 return (0); 1339 } 1340 1341 int 1342 trunk_fail_start(struct trunk_softc *tr, struct mbuf *m) 1343 { 1344 struct trunk_port *tp; 1345 1346 /* Use the master port if active or the next available port */ 1347 if ((tp = trunk_link_active(tr, tr->tr_primary)) == NULL) { 1348 m_freem(m); 1349 return (ENOENT); 1350 } 1351 1352 /* Send mbuf */ 1353 return (trunk_enqueue(tp->tp_if, m)); 1354 } 1355 1356 int 1357 trunk_fail_input(struct trunk_softc *tr, struct trunk_port *tp, 1358 struct ether_header *eh, struct mbuf *m) 1359 { 1360 struct ifnet *ifp = &tr->tr_ac.ac_if; 1361 struct trunk_port *tmp_tp; 1362 int accept = 0; 1363 1364 if (tp == tr->tr_primary) { 1365 accept = 1; 1366 } else if (tr->tr_primary->tp_link_state == LINK_STATE_DOWN) { 1367 tmp_tp = trunk_link_active(tr, NULL); 1368 /* 1369 * If tmp_tp is null, we've received a packet when all 1370 * our links are down. Weird, but process it anyways. 1371 */ 1372 if ((tmp_tp == NULL || tmp_tp == tp)) 1373 accept = 1; 1374 } 1375 if (!accept) { 1376 m_freem(m); 1377 return (-1); 1378 } 1379 #if NBPFILTER > 0 1380 if (ifp->if_bpf) 1381 bpf_mtap_hdr(ifp->if_bpf, (char *)eh, ETHER_HDR_LEN, m, 1382 BPF_DIRECTION_IN); 1383 #endif 1384 1385 m->m_pkthdr.rcvif = ifp; 1386 return (0); 1387 } 1388 1389 /* 1390 * Loadbalancing 1391 */ 1392 1393 int 1394 trunk_lb_attach(struct trunk_softc *tr) 1395 { 1396 struct trunk_lb *lb; 1397 1398 if ((lb = malloc(sizeof(*lb), M_DEVBUF, M_NOWAIT|M_ZERO)) == NULL) 1399 return (ENOMEM); 1400 1401 tr->tr_detach = trunk_lb_detach; 1402 tr->tr_start = trunk_lb_start; 1403 tr->tr_input = trunk_lb_input; 1404 tr->tr_port_create = trunk_lb_port_create; 1405 tr->tr_port_destroy = trunk_lb_port_destroy; 1406 tr->tr_linkstate = NULL; 1407 tr->tr_capabilities = IFCAP_TRUNK_FULLDUPLEX; 1408 tr->tr_req = NULL; 1409 tr->tr_portreq = NULL; 1410 1411 lb->lb_key = arc4random(); 1412 tr->tr_psc = (caddr_t)lb; 1413 1414 return (0); 1415 } 1416 1417 int 1418 trunk_lb_detach(struct trunk_softc *tr) 1419 { 1420 struct trunk_lb *lb = (struct trunk_lb *)tr->tr_psc; 1421 if (lb != NULL) 1422 free(lb, M_DEVBUF); 1423 return (0); 1424 } 1425 1426 int 1427 trunk_lb_porttable(struct trunk_softc *tr, struct trunk_port *tp) 1428 { 1429 struct trunk_lb *lb = (struct trunk_lb *)tr->tr_psc; 1430 struct trunk_port *tp_next; 1431 int i = 0; 1432 1433 bzero(&lb->lb_ports, sizeof(lb->lb_ports)); 1434 SLIST_FOREACH(tp_next, &tr->tr_ports, tp_entries) { 1435 if (tp_next == tp) 1436 continue; 1437 if (i >= TRUNK_MAX_PORTS) 1438 return (EINVAL); 1439 if (tr->tr_ifflags & IFF_DEBUG) 1440 printf("%s: port %s at index %d\n", 1441 tr->tr_ifname, tp_next->tp_ifname, i); 1442 lb->lb_ports[i++] = tp_next; 1443 } 1444 1445 return (0); 1446 } 1447 1448 int 1449 trunk_lb_port_create(struct trunk_port *tp) 1450 { 1451 struct trunk_softc *tr = (struct trunk_softc *)tp->tp_trunk; 1452 return (trunk_lb_porttable(tr, NULL)); 1453 } 1454 1455 void 1456 trunk_lb_port_destroy(struct trunk_port *tp) 1457 { 1458 struct trunk_softc *tr = (struct trunk_softc *)tp->tp_trunk; 1459 trunk_lb_porttable(tr, tp); 1460 } 1461 1462 int 1463 trunk_lb_start(struct trunk_softc *tr, struct mbuf *m) 1464 { 1465 struct trunk_lb *lb = (struct trunk_lb *)tr->tr_psc; 1466 struct trunk_port *tp = NULL; 1467 u_int32_t p = 0; 1468 1469 p = trunk_hashmbuf(m, lb->lb_key); 1470 p %= tr->tr_count; 1471 tp = lb->lb_ports[p]; 1472 1473 /* 1474 * Check the port's link state. This will return the next active 1475 * port if the link is down or the port is NULL. 1476 */ 1477 if ((tp = trunk_link_active(tr, tp)) == NULL) { 1478 m_freem(m); 1479 return (ENOENT); 1480 } 1481 1482 /* Send mbuf */ 1483 return (trunk_enqueue(tp->tp_if, m)); 1484 } 1485 1486 int 1487 trunk_lb_input(struct trunk_softc *tr, struct trunk_port *tp, 1488 struct ether_header *eh, struct mbuf *m) 1489 { 1490 struct ifnet *ifp = &tr->tr_ac.ac_if; 1491 1492 /* Just pass in the packet to our trunk device */ 1493 m->m_pkthdr.rcvif = ifp; 1494 1495 return (0); 1496 } 1497 1498 /* 1499 * Broadcast mode 1500 */ 1501 1502 int 1503 trunk_bcast_attach(struct trunk_softc *tr) 1504 { 1505 tr->tr_detach = trunk_bcast_detach; 1506 tr->tr_start = trunk_bcast_start; 1507 tr->tr_input = trunk_bcast_input; 1508 tr->tr_init = NULL; 1509 tr->tr_stop = NULL; 1510 tr->tr_port_create = NULL; 1511 tr->tr_port_destroy = NULL; 1512 tr->tr_linkstate = NULL; 1513 tr->tr_req = NULL; 1514 tr->tr_portreq = NULL; 1515 1516 return (0); 1517 } 1518 1519 int 1520 trunk_bcast_detach(struct trunk_softc *tr) 1521 { 1522 return (0); 1523 } 1524 1525 int 1526 trunk_bcast_start(struct trunk_softc *tr, struct mbuf *m) 1527 { 1528 int active_ports = 0; 1529 int errors = 0; 1530 int ret; 1531 struct trunk_port *tp; 1532 struct mbuf *n; 1533 1534 SLIST_FOREACH(tp, &tr->tr_ports, tp_entries) { 1535 if (TRUNK_PORTACTIVE(tp)) { 1536 if (active_ports) { 1537 n = m_copym(m, 0, M_COPYALL, M_DONTWAIT); 1538 if (n == NULL) { 1539 m_free(m); 1540 return (ENOBUFS); 1541 } 1542 } else 1543 n = m; 1544 active_ports++; 1545 if ((ret = trunk_enqueue(tp->tp_if, n))) 1546 errors++; 1547 } 1548 } 1549 if (active_ports == 0) { 1550 m_free(m); 1551 return (ENOENT); 1552 } 1553 if (errors == active_ports) 1554 return (ret); 1555 return (0); 1556 } 1557 1558 int 1559 trunk_bcast_input(struct trunk_softc *tr, struct trunk_port *tp, 1560 struct ether_header *eh, struct mbuf *m) 1561 { 1562 struct ifnet *ifp = &tr->tr_ac.ac_if; 1563 1564 m->m_pkthdr.rcvif = ifp; 1565 return (0); 1566 } 1567 1568 /* 1569 * 802.3ad LACP 1570 */ 1571 1572 int 1573 trunk_lacp_attach(struct trunk_softc *tr) 1574 { 1575 struct trunk_port *tp; 1576 int error; 1577 1578 tr->tr_detach = trunk_lacp_detach; 1579 tr->tr_port_create = lacp_port_create; 1580 tr->tr_port_destroy = lacp_port_destroy; 1581 tr->tr_linkstate = lacp_linkstate; 1582 tr->tr_start = trunk_lacp_start; 1583 tr->tr_input = trunk_lacp_input; 1584 tr->tr_init = lacp_init; 1585 tr->tr_stop = lacp_stop; 1586 tr->tr_req = lacp_req; 1587 tr->tr_portreq = lacp_portreq; 1588 1589 error = lacp_attach(tr); 1590 if (error) 1591 return (error); 1592 1593 SLIST_FOREACH(tp, &tr->tr_ports, tp_entries) 1594 lacp_port_create(tp); 1595 1596 return (error); 1597 } 1598 1599 int 1600 trunk_lacp_detach(struct trunk_softc *tr) 1601 { 1602 struct trunk_port *tp; 1603 int error; 1604 1605 SLIST_FOREACH(tp, &tr->tr_ports, tp_entries) 1606 lacp_port_destroy(tp); 1607 1608 /* unlocking is safe here */ 1609 error = lacp_detach(tr); 1610 1611 return (error); 1612 } 1613 1614 int 1615 trunk_lacp_start(struct trunk_softc *tr, struct mbuf *m) 1616 { 1617 struct trunk_port *tp; 1618 1619 tp = lacp_select_tx_port(tr, m); 1620 if (tp == NULL) { 1621 m_freem(m); 1622 return (EBUSY); 1623 } 1624 1625 /* Send mbuf */ 1626 return (trunk_enqueue(tp->tp_if, m)); 1627 } 1628 1629 int 1630 trunk_lacp_input(struct trunk_softc *tr, struct trunk_port *tp, 1631 struct ether_header *eh, struct mbuf *m) 1632 { 1633 struct ifnet *ifp = &tr->tr_ac.ac_if; 1634 u_short etype; 1635 1636 etype = ntohs(eh->ether_type); 1637 1638 /* Tap off LACP control messages */ 1639 if (etype == ETHERTYPE_SLOW) { 1640 m = lacp_input(tp, eh, m); 1641 if (m == NULL) 1642 return (-1); 1643 } 1644 1645 /* 1646 * If the port is not collecting or not in the active aggregator then 1647 * free and return. 1648 */ 1649 if (lacp_iscollecting(tp) == 0 || lacp_isactive(tp) == 0) { 1650 m_freem(m); 1651 return (-1); 1652 } 1653 1654 m->m_pkthdr.rcvif = ifp; 1655 return (0); 1656 } 1657