118242d3bSAndrew Thompson /* $OpenBSD: if_trunk.c,v 1.30 2007/01/31 06:20:19 reyk Exp $ */ 218242d3bSAndrew Thompson 318242d3bSAndrew Thompson /* 418242d3bSAndrew Thompson * Copyright (c) 2005, 2006 Reyk Floeter <reyk@openbsd.org> 5f51133eeSAndrew Thompson * Copyright (c) 2007 Andrew Thompson <thompsa@FreeBSD.org> 6d62edc5eSMarcelo Araujo * Copyright (c) 2014, 2016 Marcelo Araujo <araujo@FreeBSD.org> 718242d3bSAndrew Thompson * 818242d3bSAndrew Thompson * Permission to use, copy, modify, and distribute this software for any 918242d3bSAndrew Thompson * purpose with or without fee is hereby granted, provided that the above 1018242d3bSAndrew Thompson * copyright notice and this permission notice appear in all copies. 1118242d3bSAndrew Thompson * 1218242d3bSAndrew Thompson * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES 1318242d3bSAndrew Thompson * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF 1418242d3bSAndrew Thompson * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR 1518242d3bSAndrew Thompson * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES 1618242d3bSAndrew Thompson * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN 1718242d3bSAndrew Thompson * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF 1818242d3bSAndrew Thompson * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE. 1918242d3bSAndrew Thompson */ 2018242d3bSAndrew Thompson 2118242d3bSAndrew Thompson #include "opt_inet.h" 2218242d3bSAndrew Thompson #include "opt_inet6.h" 23b2e60773SJohn Baldwin #include "opt_kern_tls.h" 24f3e7afe2SHans Petter Selasky #include "opt_ratelimit.h" 2518242d3bSAndrew Thompson 2618242d3bSAndrew Thompson #include <sys/param.h> 2718242d3bSAndrew Thompson #include <sys/kernel.h> 2818242d3bSAndrew Thompson #include <sys/malloc.h> 2918242d3bSAndrew Thompson #include <sys/mbuf.h> 3018242d3bSAndrew Thompson #include <sys/queue.h> 3118242d3bSAndrew Thompson #include <sys/socket.h> 3218242d3bSAndrew Thompson #include <sys/sockio.h> 3318242d3bSAndrew Thompson #include <sys/sysctl.h> 3418242d3bSAndrew Thompson #include <sys/module.h> 3518242d3bSAndrew Thompson #include <sys/priv.h> 3618242d3bSAndrew Thompson #include <sys/systm.h> 3718242d3bSAndrew Thompson #include <sys/proc.h> 383bf517e3SAndrew Thompson #include <sys/lock.h> 39310915a4SAdrian Chadd #include <sys/rmlock.h> 402f86d4b0SAlexander Motin #include <sys/sx.h> 41cdc6f95fSAndrew Thompson #include <sys/taskqueue.h> 42644da90dSErmal Luçi #include <sys/eventhandler.h> 4318242d3bSAndrew Thompson 4418242d3bSAndrew Thompson #include <net/ethernet.h> 4518242d3bSAndrew Thompson #include <net/if.h> 4618242d3bSAndrew Thompson #include <net/if_clone.h> 4718242d3bSAndrew Thompson #include <net/if_arp.h> 4818242d3bSAndrew Thompson #include <net/if_dl.h> 4918242d3bSAndrew Thompson #include <net/if_media.h> 5018242d3bSAndrew Thompson #include <net/if_types.h> 5118242d3bSAndrew Thompson #include <net/if_var.h> 522c2b37adSJustin Hibbits #include <net/if_private.h> 5318242d3bSAndrew Thompson #include <net/bpf.h> 5484becee1SAlexander Motin #include <net/route.h> 55939a050aSHiroki Sato #include <net/vnet.h> 56a92c4bb6SHans Petter Selasky #include <net/infiniband.h> 5718242d3bSAndrew Thompson 58a0ae8f04SBjoern A. Zeeb #if defined(INET) || defined(INET6) 5918242d3bSAndrew Thompson #include <netinet/in.h> 60d092e11cSRick Macklem #include <netinet/ip.h> 61a0ae8f04SBjoern A. Zeeb #endif 62a0ae8f04SBjoern A. Zeeb #ifdef INET 6318242d3bSAndrew Thompson #include <netinet/in_systm.h> 6418242d3bSAndrew Thompson #include <netinet/if_ether.h> 6518242d3bSAndrew Thompson #endif 6618242d3bSAndrew Thompson 6718242d3bSAndrew Thompson #ifdef INET6 6818242d3bSAndrew Thompson #include <netinet/ip6.h> 69af805644SHiroki Sato #include <netinet6/in6_var.h> 70af805644SHiroki Sato #include <netinet6/in6_ifattach.h> 7118242d3bSAndrew Thompson #endif 7218242d3bSAndrew Thompson 7318242d3bSAndrew Thompson #include <net/if_vlan_var.h> 7418242d3bSAndrew Thompson #include <net/if_lagg.h> 7518242d3bSAndrew Thompson #include <net/ieee8023ad_lacp.h> 7618242d3bSAndrew Thompson 77110ce09cSTom Jones #ifdef DEV_NETMAP 78110ce09cSTom Jones MODULE_DEPEND(if_lagg, netmap, 1, 1, 1); 79110ce09cSTom Jones #endif 80110ce09cSTom Jones 8199031b8fSStephen Hurd #define LAGG_SX_INIT(_sc) sx_init(&(_sc)->sc_sx, "if_lagg sx") 8299031b8fSStephen Hurd #define LAGG_SX_DESTROY(_sc) sx_destroy(&(_sc)->sc_sx) 8399031b8fSStephen Hurd #define LAGG_XLOCK(_sc) sx_xlock(&(_sc)->sc_sx) 8499031b8fSStephen Hurd #define LAGG_XUNLOCK(_sc) sx_xunlock(&(_sc)->sc_sx) 8599031b8fSStephen Hurd #define LAGG_XLOCK_ASSERT(_sc) sx_assert(&(_sc)->sc_sx, SA_XLOCKED) 8648698eadSGleb Smirnoff #define LAGG_SLOCK(_sc) sx_slock(&(_sc)->sc_sx) 8748698eadSGleb Smirnoff #define LAGG_SUNLOCK(_sc) sx_sunlock(&(_sc)->sc_sx) 8848698eadSGleb Smirnoff #define LAGG_SXLOCK_ASSERT(_sc) sx_assert(&(_sc)->sc_sx, SA_LOCKED) 8999031b8fSStephen Hurd 9018242d3bSAndrew Thompson /* Special flags we should propagate to the lagg ports. */ 9118242d3bSAndrew Thompson static struct { 9218242d3bSAndrew Thompson int flag; 9318242d3bSAndrew Thompson int (*func)(struct ifnet *, int); 9418242d3bSAndrew Thompson } lagg_pflags[] = { 9518242d3bSAndrew Thompson {IFF_PROMISC, ifpromisc}, 9618242d3bSAndrew Thompson {IFF_ALLMULTI, if_allmulti}, 9718242d3bSAndrew Thompson {0, NULL} 9818242d3bSAndrew Thompson }; 9918242d3bSAndrew Thompson 100fb3bc596SJohn Baldwin struct lagg_snd_tag { 101fb3bc596SJohn Baldwin struct m_snd_tag com; 102fb3bc596SJohn Baldwin struct m_snd_tag *tag; 103fb3bc596SJohn Baldwin }; 104fb3bc596SJohn Baldwin 105fcac5719SZhenlei Huang VNET_DEFINE_STATIC(SLIST_HEAD(__trhead, lagg_softc), lagg_list); /* list of laggs */ 106939a050aSHiroki Sato #define V_lagg_list VNET(lagg_list) 1075f901c92SAndrew Turner VNET_DEFINE_STATIC(struct mtx, lagg_list_mtx); 108939a050aSHiroki Sato #define V_lagg_list_mtx VNET(lagg_list_mtx) 109939a050aSHiroki Sato #define LAGG_LIST_LOCK_INIT(x) mtx_init(&V_lagg_list_mtx, \ 110939a050aSHiroki Sato "if_lagg list", NULL, MTX_DEF) 111939a050aSHiroki Sato #define LAGG_LIST_LOCK_DESTROY(x) mtx_destroy(&V_lagg_list_mtx) 112939a050aSHiroki Sato #define LAGG_LIST_LOCK(x) mtx_lock(&V_lagg_list_mtx) 113939a050aSHiroki Sato #define LAGG_LIST_UNLOCK(x) mtx_unlock(&V_lagg_list_mtx) 114fcac5719SZhenlei Huang static eventhandler_tag lagg_detach_cookie = NULL; 11518242d3bSAndrew Thompson 11691ebcbe0SAlexander V. Chernikov static int lagg_clone_create(struct if_clone *, char *, size_t, 11791ebcbe0SAlexander V. Chernikov struct ifc_data *, struct ifnet **); 11891ebcbe0SAlexander V. Chernikov static int lagg_clone_destroy(struct if_clone *, struct ifnet *, uint32_t); 1195f901c92SAndrew Turner VNET_DEFINE_STATIC(struct if_clone *, lagg_cloner); 120939a050aSHiroki Sato #define V_lagg_cloner VNET(lagg_cloner) 12142a58907SGleb Smirnoff static const char laggname[] = "lagg"; 122841613dcSJohn Baldwin static MALLOC_DEFINE(M_LAGG, laggname, "802.3AD Link Aggregation Interface"); 12342a58907SGleb Smirnoff 124de75afe6SAndrew Thompson static void lagg_capabilities(struct lagg_softc *); 12518242d3bSAndrew Thompson static int lagg_port_create(struct lagg_softc *, struct ifnet *); 12618242d3bSAndrew Thompson static int lagg_port_destroy(struct lagg_port *, int); 127a92c4bb6SHans Petter Selasky static struct mbuf *lagg_input_ethernet(struct ifnet *, struct mbuf *); 128a92c4bb6SHans Petter Selasky static struct mbuf *lagg_input_infiniband(struct ifnet *, struct mbuf *); 12980ddfb40SAndrew Thompson static void lagg_linkstate(struct lagg_softc *); 130d6e82913SSteven Hartland static void lagg_port_state(struct ifnet *, int); 13118242d3bSAndrew Thompson static int lagg_port_ioctl(struct ifnet *, u_long, caddr_t); 13218242d3bSAndrew Thompson static int lagg_port_output(struct ifnet *, struct mbuf *, 13347e8d432SGleb Smirnoff const struct sockaddr *, struct route *); 13418242d3bSAndrew Thompson static void lagg_port_ifdetach(void *arg __unused, struct ifnet *); 13522133b44SEdward Tomasz Napierala #ifdef LAGG_PORT_STACKING 13618242d3bSAndrew Thompson static int lagg_port_checkstacking(struct lagg_softc *); 13722133b44SEdward Tomasz Napierala #endif 13818242d3bSAndrew Thompson static void lagg_port2req(struct lagg_port *, struct lagg_reqport *); 13957068597SGleb Smirnoff static void lagg_if_updown(struct lagg_softc *, bool); 14018242d3bSAndrew Thompson static void lagg_init(void *); 14118242d3bSAndrew Thompson static void lagg_stop(struct lagg_softc *); 14218242d3bSAndrew Thompson static int lagg_ioctl(struct ifnet *, u_long, caddr_t); 143b2e60773SJohn Baldwin #if defined(KERN_TLS) || defined(RATELIMIT) 144f3e7afe2SHans Petter Selasky static int lagg_snd_tag_alloc(struct ifnet *, 145f3e7afe2SHans Petter Selasky union if_snd_tag_alloc_params *, 146f3e7afe2SHans Petter Selasky struct m_snd_tag **); 147fb3bc596SJohn Baldwin static int lagg_snd_tag_modify(struct m_snd_tag *, 148fb3bc596SJohn Baldwin union if_snd_tag_modify_params *); 149fb3bc596SJohn Baldwin static int lagg_snd_tag_query(struct m_snd_tag *, 150fb3bc596SJohn Baldwin union if_snd_tag_query_params *); 151fa91f845SRandall Stewart static void lagg_snd_tag_free(struct m_snd_tag *); 1521a714ff2SRandall Stewart static struct m_snd_tag *lagg_next_snd_tag(struct m_snd_tag *); 15320abea66SRandall Stewart static void lagg_ratelimit_query(struct ifnet *, 15420abea66SRandall Stewart struct if_ratelimit_query_results *); 155f3e7afe2SHans Petter Selasky #endif 1562f86d4b0SAlexander Motin static int lagg_setmulti(struct lagg_port *); 1572f86d4b0SAlexander Motin static int lagg_clrmulti(struct lagg_port *); 158713ceb99SAndrew Gallatin static void lagg_setcaps(struct lagg_port *, int cap, int cap2); 15918242d3bSAndrew Thompson static int lagg_setflag(struct lagg_port *, int, int, 16018242d3bSAndrew Thompson int (*func)(struct ifnet *, int)); 16118242d3bSAndrew Thompson static int lagg_setflags(struct lagg_port *, int status); 1627d6cc45cSAlexander V. Chernikov static uint64_t lagg_get_counter(struct ifnet *ifp, ift_counter cnt); 163a92c4bb6SHans Petter Selasky static int lagg_transmit_ethernet(struct ifnet *, struct mbuf *); 164a92c4bb6SHans Petter Selasky static int lagg_transmit_infiniband(struct ifnet *, struct mbuf *); 1653b7d677bSGleb Smirnoff static void lagg_qflush(struct ifnet *); 16618242d3bSAndrew Thompson static int lagg_media_change(struct ifnet *); 16718242d3bSAndrew Thompson static void lagg_media_status(struct ifnet *, struct ifmediareq *); 16818242d3bSAndrew Thompson static struct lagg_port *lagg_link_active(struct lagg_softc *, 16918242d3bSAndrew Thompson struct lagg_port *); 17018242d3bSAndrew Thompson 17118242d3bSAndrew Thompson /* Simple round robin */ 17209c7577eSGleb Smirnoff static void lagg_rr_attach(struct lagg_softc *); 17318242d3bSAndrew Thompson static int lagg_rr_start(struct lagg_softc *, struct mbuf *); 17418242d3bSAndrew Thompson 17518242d3bSAndrew Thompson /* Active failover */ 17618242d3bSAndrew Thompson static int lagg_fail_start(struct lagg_softc *, struct mbuf *); 17718242d3bSAndrew Thompson static struct mbuf *lagg_fail_input(struct lagg_softc *, struct lagg_port *, 17818242d3bSAndrew Thompson struct mbuf *); 17918242d3bSAndrew Thompson 18018242d3bSAndrew Thompson /* Loadbalancing */ 18109c7577eSGleb Smirnoff static void lagg_lb_attach(struct lagg_softc *); 182b1bbc5b3SGleb Smirnoff static void lagg_lb_detach(struct lagg_softc *); 18318242d3bSAndrew Thompson static int lagg_lb_port_create(struct lagg_port *); 18418242d3bSAndrew Thompson static void lagg_lb_port_destroy(struct lagg_port *); 18518242d3bSAndrew Thompson static int lagg_lb_start(struct lagg_softc *, struct mbuf *); 18618242d3bSAndrew Thompson static int lagg_lb_porttable(struct lagg_softc *, struct lagg_port *); 18718242d3bSAndrew Thompson 18899cdd961SMarcelo Araujo /* Broadcast */ 18999cdd961SMarcelo Araujo static int lagg_bcast_start(struct lagg_softc *, struct mbuf *); 19099cdd961SMarcelo Araujo 19118242d3bSAndrew Thompson /* 802.3ad LACP */ 19209c7577eSGleb Smirnoff static void lagg_lacp_attach(struct lagg_softc *); 193b1bbc5b3SGleb Smirnoff static void lagg_lacp_detach(struct lagg_softc *); 19418242d3bSAndrew Thompson static int lagg_lacp_start(struct lagg_softc *, struct mbuf *); 19518242d3bSAndrew Thompson static struct mbuf *lagg_lacp_input(struct lagg_softc *, struct lagg_port *, 19618242d3bSAndrew Thompson struct mbuf *); 19718242d3bSAndrew Thompson static void lagg_lacp_lladdr(struct lagg_softc *); 19818242d3bSAndrew Thompson 199dbe86dd5SZhenlei Huang /* Default input */ 200dbe86dd5SZhenlei Huang static struct mbuf *lagg_default_input(struct lagg_softc *, struct lagg_port *, 201dbe86dd5SZhenlei Huang struct mbuf *); 202dbe86dd5SZhenlei Huang 20318242d3bSAndrew Thompson /* lagg protocol table */ 20409c7577eSGleb Smirnoff static const struct lagg_proto { 20516ca790eSGleb Smirnoff lagg_proto pr_num; 20616ca790eSGleb Smirnoff void (*pr_attach)(struct lagg_softc *); 20716ca790eSGleb Smirnoff void (*pr_detach)(struct lagg_softc *); 20838738d73SGleb Smirnoff int (*pr_start)(struct lagg_softc *, struct mbuf *); 20938738d73SGleb Smirnoff struct mbuf * (*pr_input)(struct lagg_softc *, struct lagg_port *, 21038738d73SGleb Smirnoff struct mbuf *); 21138738d73SGleb Smirnoff int (*pr_addport)(struct lagg_port *); 21238738d73SGleb Smirnoff void (*pr_delport)(struct lagg_port *); 21338738d73SGleb Smirnoff void (*pr_linkstate)(struct lagg_port *); 21438738d73SGleb Smirnoff void (*pr_init)(struct lagg_softc *); 21538738d73SGleb Smirnoff void (*pr_stop)(struct lagg_softc *); 21638738d73SGleb Smirnoff void (*pr_lladdr)(struct lagg_softc *); 21738738d73SGleb Smirnoff void (*pr_request)(struct lagg_softc *, void *); 21838738d73SGleb Smirnoff void (*pr_portreq)(struct lagg_port *, void *); 21918242d3bSAndrew Thompson } lagg_protos[] = { 22016ca790eSGleb Smirnoff { 22116ca790eSGleb Smirnoff .pr_num = LAGG_PROTO_NONE 22216ca790eSGleb Smirnoff }, 22316ca790eSGleb Smirnoff { 22416ca790eSGleb Smirnoff .pr_num = LAGG_PROTO_ROUNDROBIN, 22516ca790eSGleb Smirnoff .pr_attach = lagg_rr_attach, 22638738d73SGleb Smirnoff .pr_start = lagg_rr_start, 227dbe86dd5SZhenlei Huang .pr_input = lagg_default_input, 22816ca790eSGleb Smirnoff }, 22916ca790eSGleb Smirnoff { 23016ca790eSGleb Smirnoff .pr_num = LAGG_PROTO_FAILOVER, 23138738d73SGleb Smirnoff .pr_start = lagg_fail_start, 23238738d73SGleb Smirnoff .pr_input = lagg_fail_input, 23316ca790eSGleb Smirnoff }, 23416ca790eSGleb Smirnoff { 23516ca790eSGleb Smirnoff .pr_num = LAGG_PROTO_LOADBALANCE, 23616ca790eSGleb Smirnoff .pr_attach = lagg_lb_attach, 23716ca790eSGleb Smirnoff .pr_detach = lagg_lb_detach, 23838738d73SGleb Smirnoff .pr_start = lagg_lb_start, 239dbe86dd5SZhenlei Huang .pr_input = lagg_default_input, 24038738d73SGleb Smirnoff .pr_addport = lagg_lb_port_create, 24138738d73SGleb Smirnoff .pr_delport = lagg_lb_port_destroy, 24216ca790eSGleb Smirnoff }, 24316ca790eSGleb Smirnoff { 24416ca790eSGleb Smirnoff .pr_num = LAGG_PROTO_LACP, 24516ca790eSGleb Smirnoff .pr_attach = lagg_lacp_attach, 24616ca790eSGleb Smirnoff .pr_detach = lagg_lacp_detach, 24738738d73SGleb Smirnoff .pr_start = lagg_lacp_start, 24838738d73SGleb Smirnoff .pr_input = lagg_lacp_input, 24938738d73SGleb Smirnoff .pr_addport = lacp_port_create, 25038738d73SGleb Smirnoff .pr_delport = lacp_port_destroy, 25138738d73SGleb Smirnoff .pr_linkstate = lacp_linkstate, 25238738d73SGleb Smirnoff .pr_init = lacp_init, 25338738d73SGleb Smirnoff .pr_stop = lacp_stop, 25438738d73SGleb Smirnoff .pr_lladdr = lagg_lacp_lladdr, 25538738d73SGleb Smirnoff .pr_request = lacp_req, 25638738d73SGleb Smirnoff .pr_portreq = lacp_portreq, 25716ca790eSGleb Smirnoff }, 25816ca790eSGleb Smirnoff { 25916ca790eSGleb Smirnoff .pr_num = LAGG_PROTO_BROADCAST, 26038738d73SGleb Smirnoff .pr_start = lagg_bcast_start, 261dbe86dd5SZhenlei Huang .pr_input = lagg_default_input, 26216ca790eSGleb Smirnoff }, 26318242d3bSAndrew Thompson }; 26418242d3bSAndrew Thompson 265be4572c8SEd Maste SYSCTL_DECL(_net_link); 2667029da5cSPawel Biernacki SYSCTL_NODE(_net_link, OID_AUTO, lagg, CTLFLAG_RW | CTLFLAG_MPSAFE, 0, 2676472ac3dSEd Schouten "Link Aggregation"); 268be4572c8SEd Maste 269939a050aSHiroki Sato /* Allow input on any failover links */ 2705f901c92SAndrew Turner VNET_DEFINE_STATIC(int, lagg_failover_rx_all); 271939a050aSHiroki Sato #define V_lagg_failover_rx_all VNET(lagg_failover_rx_all) 272939a050aSHiroki Sato SYSCTL_INT(_net_link_lagg, OID_AUTO, failover_rx_all, CTLFLAG_RW | CTLFLAG_VNET, 273939a050aSHiroki Sato &VNET_NAME(lagg_failover_rx_all), 0, 274be4572c8SEd Maste "Accept input from any interface in a failover lagg"); 275939a050aSHiroki Sato 276c2529042SHans Petter Selasky /* Default value for using flowid */ 2775f901c92SAndrew Turner VNET_DEFINE_STATIC(int, def_use_flowid) = 0; 278939a050aSHiroki Sato #define V_def_use_flowid VNET(def_use_flowid) 279401f0344SZhenlei Huang SYSCTL_INT(_net_link_lagg, OID_AUTO, default_use_flowid, 280401f0344SZhenlei Huang CTLFLAG_RWTUN | CTLFLAG_VNET, &VNET_NAME(def_use_flowid), 0, 2813122b912SAndrew Thompson "Default setting for using flow id for load sharing"); 282939a050aSHiroki Sato 28335961dceSAndrew Gallatin /* Default value for using numa */ 28435961dceSAndrew Gallatin VNET_DEFINE_STATIC(int, def_use_numa) = 1; 28535961dceSAndrew Gallatin #define V_def_use_numa VNET(def_use_numa) 286401f0344SZhenlei Huang SYSCTL_INT(_net_link_lagg, OID_AUTO, default_use_numa, 287401f0344SZhenlei Huang CTLFLAG_RWTUN | CTLFLAG_VNET, &VNET_NAME(def_use_numa), 0, 28835961dceSAndrew Gallatin "Use numa to steer flows"); 28935961dceSAndrew Gallatin 290c2529042SHans Petter Selasky /* Default value for flowid shift */ 2915f901c92SAndrew Turner VNET_DEFINE_STATIC(int, def_flowid_shift) = 16; 292939a050aSHiroki Sato #define V_def_flowid_shift VNET(def_flowid_shift) 293401f0344SZhenlei Huang SYSCTL_INT(_net_link_lagg, OID_AUTO, default_flowid_shift, 294401f0344SZhenlei Huang CTLFLAG_RWTUN | CTLFLAG_VNET, &VNET_NAME(def_flowid_shift), 0, 2951a8959daSScott Long "Default setting for flowid shift for load sharing"); 296be4572c8SEd Maste 297939a050aSHiroki Sato static void 298939a050aSHiroki Sato vnet_lagg_init(const void *unused __unused) 299939a050aSHiroki Sato { 300939a050aSHiroki Sato 301939a050aSHiroki Sato LAGG_LIST_LOCK_INIT(); 302939a050aSHiroki Sato SLIST_INIT(&V_lagg_list); 30391ebcbe0SAlexander V. Chernikov struct if_clone_addreq req = { 30491ebcbe0SAlexander V. Chernikov .create_f = lagg_clone_create, 30591ebcbe0SAlexander V. Chernikov .destroy_f = lagg_clone_destroy, 30691ebcbe0SAlexander V. Chernikov .flags = IFC_F_AUTOUNIT, 30791ebcbe0SAlexander V. Chernikov }; 30891ebcbe0SAlexander V. Chernikov V_lagg_cloner = ifc_attach_cloner(laggname, &req); 309939a050aSHiroki Sato } 310939a050aSHiroki Sato VNET_SYSINIT(vnet_lagg_init, SI_SUB_PROTO_IFATTACHDOMAIN, SI_ORDER_ANY, 311939a050aSHiroki Sato vnet_lagg_init, NULL); 312939a050aSHiroki Sato 313939a050aSHiroki Sato static void 314939a050aSHiroki Sato vnet_lagg_uninit(const void *unused __unused) 315939a050aSHiroki Sato { 316939a050aSHiroki Sato 31791ebcbe0SAlexander V. Chernikov ifc_detach_cloner(V_lagg_cloner); 318939a050aSHiroki Sato LAGG_LIST_LOCK_DESTROY(); 319939a050aSHiroki Sato } 32089856f7eSBjoern A. Zeeb VNET_SYSUNINIT(vnet_lagg_uninit, SI_SUB_INIT_IF, SI_ORDER_ANY, 321939a050aSHiroki Sato vnet_lagg_uninit, NULL); 322939a050aSHiroki Sato 32318242d3bSAndrew Thompson static int 32418242d3bSAndrew Thompson lagg_modevent(module_t mod, int type, void *data) 32518242d3bSAndrew Thompson { 32618242d3bSAndrew Thompson 32718242d3bSAndrew Thompson switch (type) { 32818242d3bSAndrew Thompson case MOD_LOAD: 329a92c4bb6SHans Petter Selasky lagg_input_ethernet_p = lagg_input_ethernet; 330a92c4bb6SHans Petter Selasky lagg_input_infiniband_p = lagg_input_infiniband; 33118242d3bSAndrew Thompson lagg_linkstate_p = lagg_port_state; 33218242d3bSAndrew Thompson lagg_detach_cookie = EVENTHANDLER_REGISTER( 33318242d3bSAndrew Thompson ifnet_departure_event, lagg_port_ifdetach, NULL, 33418242d3bSAndrew Thompson EVENTHANDLER_PRI_ANY); 33518242d3bSAndrew Thompson break; 33618242d3bSAndrew Thompson case MOD_UNLOAD: 33718242d3bSAndrew Thompson EVENTHANDLER_DEREGISTER(ifnet_departure_event, 33818242d3bSAndrew Thompson lagg_detach_cookie); 339a92c4bb6SHans Petter Selasky lagg_input_ethernet_p = NULL; 340a92c4bb6SHans Petter Selasky lagg_input_infiniband_p = NULL; 34118242d3bSAndrew Thompson lagg_linkstate_p = NULL; 34218242d3bSAndrew Thompson break; 34318242d3bSAndrew Thompson default: 34418242d3bSAndrew Thompson return (EOPNOTSUPP); 34518242d3bSAndrew Thompson } 34618242d3bSAndrew Thompson return (0); 34718242d3bSAndrew Thompson } 34818242d3bSAndrew Thompson 34918242d3bSAndrew Thompson static moduledata_t lagg_mod = { 35018242d3bSAndrew Thompson "if_lagg", 35118242d3bSAndrew Thompson lagg_modevent, 3529823d527SKevin Lo 0 35318242d3bSAndrew Thompson }; 35418242d3bSAndrew Thompson 35518242d3bSAndrew Thompson DECLARE_MODULE(if_lagg, lagg_mod, SI_SUB_PSEUDO, SI_ORDER_ANY); 356c94a66f8SSergey Kandaurov MODULE_VERSION(if_lagg, 1); 357a92c4bb6SHans Petter Selasky MODULE_DEPEND(if_lagg, if_infiniband, 1, 1, 1); 35818242d3bSAndrew Thompson 35916ca790eSGleb Smirnoff static void 36016ca790eSGleb Smirnoff lagg_proto_attach(struct lagg_softc *sc, lagg_proto pr) 36116ca790eSGleb Smirnoff { 36216ca790eSGleb Smirnoff 3632f86d4b0SAlexander Motin LAGG_XLOCK_ASSERT(sc); 36416ca790eSGleb Smirnoff KASSERT(sc->sc_proto == LAGG_PROTO_NONE, ("%s: sc %p has proto", 36516ca790eSGleb Smirnoff __func__, sc)); 36616ca790eSGleb Smirnoff 36716ca790eSGleb Smirnoff if (sc->sc_ifflags & IFF_DEBUG) 36816ca790eSGleb Smirnoff if_printf(sc->sc_ifp, "using proto %u\n", pr); 36916ca790eSGleb Smirnoff 37038738d73SGleb Smirnoff if (lagg_protos[pr].pr_attach != NULL) 37116ca790eSGleb Smirnoff lagg_protos[pr].pr_attach(sc); 37216ca790eSGleb Smirnoff sc->sc_proto = pr; 37316ca790eSGleb Smirnoff } 37416ca790eSGleb Smirnoff 37516ca790eSGleb Smirnoff static void 37616ca790eSGleb Smirnoff lagg_proto_detach(struct lagg_softc *sc) 37716ca790eSGleb Smirnoff { 37816ca790eSGleb Smirnoff lagg_proto pr; 37916ca790eSGleb Smirnoff 3802f86d4b0SAlexander Motin LAGG_XLOCK_ASSERT(sc); 38116ca790eSGleb Smirnoff pr = sc->sc_proto; 38216ca790eSGleb Smirnoff sc->sc_proto = LAGG_PROTO_NONE; 38316ca790eSGleb Smirnoff 38416ca790eSGleb Smirnoff if (lagg_protos[pr].pr_detach != NULL) 38516ca790eSGleb Smirnoff lagg_protos[pr].pr_detach(sc); 38616ca790eSGleb Smirnoff } 38716ca790eSGleb Smirnoff 388dbe86dd5SZhenlei Huang static inline int 38938738d73SGleb Smirnoff lagg_proto_start(struct lagg_softc *sc, struct mbuf *m) 39038738d73SGleb Smirnoff { 39138738d73SGleb Smirnoff 39238738d73SGleb Smirnoff return (lagg_protos[sc->sc_proto].pr_start(sc, m)); 39338738d73SGleb Smirnoff } 39438738d73SGleb Smirnoff 395dbe86dd5SZhenlei Huang static inline struct mbuf * 39638738d73SGleb Smirnoff lagg_proto_input(struct lagg_softc *sc, struct lagg_port *lp, struct mbuf *m) 39738738d73SGleb Smirnoff { 39838738d73SGleb Smirnoff 39938738d73SGleb Smirnoff return (lagg_protos[sc->sc_proto].pr_input(sc, lp, m)); 40038738d73SGleb Smirnoff } 40138738d73SGleb Smirnoff 40238738d73SGleb Smirnoff static int 40338738d73SGleb Smirnoff lagg_proto_addport(struct lagg_softc *sc, struct lagg_port *lp) 40438738d73SGleb Smirnoff { 40538738d73SGleb Smirnoff 40638738d73SGleb Smirnoff if (lagg_protos[sc->sc_proto].pr_addport == NULL) 40738738d73SGleb Smirnoff return (0); 40838738d73SGleb Smirnoff else 40938738d73SGleb Smirnoff return (lagg_protos[sc->sc_proto].pr_addport(lp)); 41038738d73SGleb Smirnoff } 41138738d73SGleb Smirnoff 41238738d73SGleb Smirnoff static void 41338738d73SGleb Smirnoff lagg_proto_delport(struct lagg_softc *sc, struct lagg_port *lp) 41438738d73SGleb Smirnoff { 41538738d73SGleb Smirnoff 41638738d73SGleb Smirnoff if (lagg_protos[sc->sc_proto].pr_delport != NULL) 41738738d73SGleb Smirnoff lagg_protos[sc->sc_proto].pr_delport(lp); 41838738d73SGleb Smirnoff } 41938738d73SGleb Smirnoff 42038738d73SGleb Smirnoff static void 42138738d73SGleb Smirnoff lagg_proto_linkstate(struct lagg_softc *sc, struct lagg_port *lp) 42238738d73SGleb Smirnoff { 42338738d73SGleb Smirnoff 42438738d73SGleb Smirnoff if (lagg_protos[sc->sc_proto].pr_linkstate != NULL) 42538738d73SGleb Smirnoff lagg_protos[sc->sc_proto].pr_linkstate(lp); 42638738d73SGleb Smirnoff } 42738738d73SGleb Smirnoff 42838738d73SGleb Smirnoff static void 42938738d73SGleb Smirnoff lagg_proto_init(struct lagg_softc *sc) 43038738d73SGleb Smirnoff { 43138738d73SGleb Smirnoff 43238738d73SGleb Smirnoff if (lagg_protos[sc->sc_proto].pr_init != NULL) 43338738d73SGleb Smirnoff lagg_protos[sc->sc_proto].pr_init(sc); 43438738d73SGleb Smirnoff } 43538738d73SGleb Smirnoff 43638738d73SGleb Smirnoff static void 43738738d73SGleb Smirnoff lagg_proto_stop(struct lagg_softc *sc) 43838738d73SGleb Smirnoff { 43938738d73SGleb Smirnoff 44038738d73SGleb Smirnoff if (lagg_protos[sc->sc_proto].pr_stop != NULL) 44138738d73SGleb Smirnoff lagg_protos[sc->sc_proto].pr_stop(sc); 44238738d73SGleb Smirnoff } 44338738d73SGleb Smirnoff 44438738d73SGleb Smirnoff static void 44538738d73SGleb Smirnoff lagg_proto_lladdr(struct lagg_softc *sc) 44638738d73SGleb Smirnoff { 44738738d73SGleb Smirnoff 44838738d73SGleb Smirnoff if (lagg_protos[sc->sc_proto].pr_lladdr != NULL) 44938738d73SGleb Smirnoff lagg_protos[sc->sc_proto].pr_lladdr(sc); 45038738d73SGleb Smirnoff } 45138738d73SGleb Smirnoff 45238738d73SGleb Smirnoff static void 45338738d73SGleb Smirnoff lagg_proto_request(struct lagg_softc *sc, void *v) 45438738d73SGleb Smirnoff { 45538738d73SGleb Smirnoff 45638738d73SGleb Smirnoff if (lagg_protos[sc->sc_proto].pr_request != NULL) 45738738d73SGleb Smirnoff lagg_protos[sc->sc_proto].pr_request(sc, v); 45838738d73SGleb Smirnoff } 45938738d73SGleb Smirnoff 46038738d73SGleb Smirnoff static void 46138738d73SGleb Smirnoff lagg_proto_portreq(struct lagg_softc *sc, struct lagg_port *lp, void *v) 46238738d73SGleb Smirnoff { 46338738d73SGleb Smirnoff 46438738d73SGleb Smirnoff if (lagg_protos[sc->sc_proto].pr_portreq != NULL) 46538738d73SGleb Smirnoff lagg_protos[sc->sc_proto].pr_portreq(lp, v); 46638738d73SGleb Smirnoff } 46738738d73SGleb Smirnoff 468644da90dSErmal Luçi /* 469644da90dSErmal Luçi * This routine is run via an vlan 470644da90dSErmal Luçi * config EVENT 471644da90dSErmal Luçi */ 472644da90dSErmal Luçi static void 473644da90dSErmal Luçi lagg_register_vlan(void *arg, struct ifnet *ifp, u_int16_t vtag) 474644da90dSErmal Luçi { 475644da90dSErmal Luçi struct lagg_softc *sc = ifp->if_softc; 476644da90dSErmal Luçi struct lagg_port *lp; 477644da90dSErmal Luçi 478644da90dSErmal Luçi if (ifp->if_softc != arg) /* Not our event */ 479644da90dSErmal Luçi return; 480644da90dSErmal Luçi 481e1074ed6SGleb Smirnoff LAGG_XLOCK(sc); 48299031b8fSStephen Hurd CK_SLIST_FOREACH(lp, &sc->sc_ports, lp_entries) 483644da90dSErmal Luçi EVENTHANDLER_INVOKE(vlan_config, lp->lp_ifp, vtag); 484e1074ed6SGleb Smirnoff LAGG_XUNLOCK(sc); 485644da90dSErmal Luçi } 486644da90dSErmal Luçi 487644da90dSErmal Luçi /* 488644da90dSErmal Luçi * This routine is run via an vlan 489644da90dSErmal Luçi * unconfig EVENT 490644da90dSErmal Luçi */ 491644da90dSErmal Luçi static void 492644da90dSErmal Luçi lagg_unregister_vlan(void *arg, struct ifnet *ifp, u_int16_t vtag) 493644da90dSErmal Luçi { 494644da90dSErmal Luçi struct lagg_softc *sc = ifp->if_softc; 495644da90dSErmal Luçi struct lagg_port *lp; 496644da90dSErmal Luçi 497644da90dSErmal Luçi if (ifp->if_softc != arg) /* Not our event */ 498644da90dSErmal Luçi return; 499644da90dSErmal Luçi 500e1074ed6SGleb Smirnoff LAGG_XLOCK(sc); 50199031b8fSStephen Hurd CK_SLIST_FOREACH(lp, &sc->sc_ports, lp_entries) 502644da90dSErmal Luçi EVENTHANDLER_INVOKE(vlan_unconfig, lp->lp_ifp, vtag); 503e1074ed6SGleb Smirnoff LAGG_XUNLOCK(sc); 504644da90dSErmal Luçi } 505644da90dSErmal Luçi 50618242d3bSAndrew Thompson static int 50791ebcbe0SAlexander V. Chernikov lagg_clone_create(struct if_clone *ifc, char *name, size_t len, 50891ebcbe0SAlexander V. Chernikov struct ifc_data *ifd, struct ifnet **ifpp) 50918242d3bSAndrew Thompson { 510a92c4bb6SHans Petter Selasky struct iflaggparam iflp; 51118242d3bSAndrew Thompson struct lagg_softc *sc; 51218242d3bSAndrew Thompson struct ifnet *ifp; 513a92c4bb6SHans Petter Selasky int if_type; 514a92c4bb6SHans Petter Selasky int error; 515a92c4bb6SHans Petter Selasky static const uint8_t eaddr[LAGG_ADDR_LEN]; 516a92c4bb6SHans Petter Selasky 51791ebcbe0SAlexander V. Chernikov if (ifd->params != NULL) { 51891ebcbe0SAlexander V. Chernikov error = ifc_copyin(ifd, &iflp, sizeof(iflp)); 519a92c4bb6SHans Petter Selasky if (error) 520a92c4bb6SHans Petter Selasky return (error); 521a92c4bb6SHans Petter Selasky 522a92c4bb6SHans Petter Selasky switch (iflp.lagg_type) { 523a92c4bb6SHans Petter Selasky case LAGG_TYPE_ETHERNET: 524a92c4bb6SHans Petter Selasky if_type = IFT_ETHER; 525a92c4bb6SHans Petter Selasky break; 526a92c4bb6SHans Petter Selasky case LAGG_TYPE_INFINIBAND: 527a92c4bb6SHans Petter Selasky if_type = IFT_INFINIBAND; 528a92c4bb6SHans Petter Selasky break; 529a92c4bb6SHans Petter Selasky default: 530a92c4bb6SHans Petter Selasky return (EINVAL); 531a92c4bb6SHans Petter Selasky } 532a92c4bb6SHans Petter Selasky } else { 533a92c4bb6SHans Petter Selasky if_type = IFT_ETHER; 534a92c4bb6SHans Petter Selasky } 53518242d3bSAndrew Thompson 536841613dcSJohn Baldwin sc = malloc(sizeof(*sc), M_LAGG, M_WAITOK | M_ZERO); 537a92c4bb6SHans Petter Selasky ifp = sc->sc_ifp = if_alloc(if_type); 5382f86d4b0SAlexander Motin LAGG_SX_INIT(sc); 53918242d3bSAndrew Thompson 540a92c4bb6SHans Petter Selasky mtx_init(&sc->sc_mtx, "lagg-mtx", NULL, MTX_DEF); 541a92c4bb6SHans Petter Selasky callout_init_mtx(&sc->sc_watchdog, &sc->sc_mtx, 0); 542a92c4bb6SHans Petter Selasky 5432f86d4b0SAlexander Motin LAGG_XLOCK(sc); 544939a050aSHiroki Sato if (V_def_use_flowid) 545939a050aSHiroki Sato sc->sc_opts |= LAGG_OPT_USE_FLOWID; 54635961dceSAndrew Gallatin if (V_def_use_numa) 54735961dceSAndrew Gallatin sc->sc_opts |= LAGG_OPT_USE_NUMA; 548939a050aSHiroki Sato sc->flowid_shift = V_def_flowid_shift; 549939a050aSHiroki Sato 55086f67641SAndrew Thompson /* Hash all layers by default */ 551b7ba031fSHans Petter Selasky sc->sc_flags = MBUF_HASHFLAG_L2 | MBUF_HASHFLAG_L3 | MBUF_HASHFLAG_L4; 5520bf97ae2SAndrew Thompson 55316ca790eSGleb Smirnoff lagg_proto_attach(sc, LAGG_PROTO_DEFAULT); 55416ca790eSGleb Smirnoff 5550f8d79d9SMatt Macy CK_SLIST_INIT(&sc->sc_ports); 556310915a4SAdrian Chadd 557a92c4bb6SHans Petter Selasky switch (if_type) { 558a92c4bb6SHans Petter Selasky case IFT_ETHER: 55918242d3bSAndrew Thompson /* Initialise pseudo media types */ 56018242d3bSAndrew Thompson ifmedia_init(&sc->sc_media, 0, lagg_media_change, 56118242d3bSAndrew Thompson lagg_media_status); 56218242d3bSAndrew Thompson ifmedia_add(&sc->sc_media, IFM_ETHER | IFM_AUTO, 0, NULL); 56318242d3bSAndrew Thompson ifmedia_set(&sc->sc_media, IFM_ETHER | IFM_AUTO); 56418242d3bSAndrew Thompson 56591ebcbe0SAlexander V. Chernikov if_initname(ifp, laggname, ifd->unit); 566a92c4bb6SHans Petter Selasky ifp->if_transmit = lagg_transmit_ethernet; 567a92c4bb6SHans Petter Selasky break; 568a92c4bb6SHans Petter Selasky case IFT_INFINIBAND: 56991ebcbe0SAlexander V. Chernikov if_initname(ifp, laggname, ifd->unit); 570a92c4bb6SHans Petter Selasky ifp->if_transmit = lagg_transmit_infiniband; 571a92c4bb6SHans Petter Selasky break; 572a92c4bb6SHans Petter Selasky default: 573a92c4bb6SHans Petter Selasky break; 574a92c4bb6SHans Petter Selasky } 57518242d3bSAndrew Thompson ifp->if_softc = sc; 5763b7d677bSGleb Smirnoff ifp->if_qflush = lagg_qflush; 57718242d3bSAndrew Thompson ifp->if_init = lagg_init; 57818242d3bSAndrew Thompson ifp->if_ioctl = lagg_ioctl; 5797d6cc45cSAlexander V. Chernikov ifp->if_get_counter = lagg_get_counter; 58018242d3bSAndrew Thompson ifp->if_flags = IFF_SIMPLEX | IFF_BROADCAST | IFF_MULTICAST; 581b2e60773SJohn Baldwin #if defined(KERN_TLS) || defined(RATELIMIT) 582f3e7afe2SHans Petter Selasky ifp->if_snd_tag_alloc = lagg_snd_tag_alloc; 58320abea66SRandall Stewart ifp->if_ratelimit_query = lagg_ratelimit_query; 584f3e7afe2SHans Petter Selasky #endif 585829c56fcSJohn Baldwin ifp->if_capenable = ifp->if_capabilities = IFCAP_HWSTATS; 58618242d3bSAndrew Thompson 58718242d3bSAndrew Thompson /* 5884b22573aSBrooks Davis * Attach as an ordinary ethernet device, children will be attached 589a92c4bb6SHans Petter Selasky * as special device IFT_IEEE8023ADLAG or IFT_INFINIBANDLAG. 59018242d3bSAndrew Thompson */ 591a92c4bb6SHans Petter Selasky switch (if_type) { 592a92c4bb6SHans Petter Selasky case IFT_ETHER: 59318242d3bSAndrew Thompson ether_ifattach(ifp, eaddr); 594a92c4bb6SHans Petter Selasky break; 595a92c4bb6SHans Petter Selasky case IFT_INFINIBAND: 59619ecb5e8SHans Petter Selasky infiniband_ifattach(ifp, eaddr, sc->sc_bcast_addr); 597a92c4bb6SHans Petter Selasky break; 598a92c4bb6SHans Petter Selasky default: 599a92c4bb6SHans Petter Selasky break; 600a92c4bb6SHans Petter Selasky } 60118242d3bSAndrew Thompson 602644da90dSErmal Luçi sc->vlan_attach = EVENTHANDLER_REGISTER(vlan_config, 603644da90dSErmal Luçi lagg_register_vlan, sc, EVENTHANDLER_PRI_FIRST); 604644da90dSErmal Luçi sc->vlan_detach = EVENTHANDLER_REGISTER(vlan_unconfig, 605644da90dSErmal Luçi lagg_unregister_vlan, sc, EVENTHANDLER_PRI_FIRST); 606644da90dSErmal Luçi 60718242d3bSAndrew Thompson /* Insert into the global list of laggs */ 608939a050aSHiroki Sato LAGG_LIST_LOCK(); 609939a050aSHiroki Sato SLIST_INSERT_HEAD(&V_lagg_list, sc, sc_entries); 610939a050aSHiroki Sato LAGG_LIST_UNLOCK(); 6112f86d4b0SAlexander Motin LAGG_XUNLOCK(sc); 61291ebcbe0SAlexander V. Chernikov *ifpp = ifp; 61318242d3bSAndrew Thompson 61418242d3bSAndrew Thompson return (0); 61518242d3bSAndrew Thompson } 61618242d3bSAndrew Thompson 61791ebcbe0SAlexander V. Chernikov static int 61891ebcbe0SAlexander V. Chernikov lagg_clone_destroy(struct if_clone *ifc, struct ifnet *ifp, uint32_t flags) 61918242d3bSAndrew Thompson { 62018242d3bSAndrew Thompson struct lagg_softc *sc = (struct lagg_softc *)ifp->if_softc; 62118242d3bSAndrew Thompson struct lagg_port *lp; 62218242d3bSAndrew Thompson 6232f86d4b0SAlexander Motin LAGG_XLOCK(sc); 6242f86d4b0SAlexander Motin sc->sc_destroying = 1; 62518242d3bSAndrew Thompson lagg_stop(sc); 62618242d3bSAndrew Thompson ifp->if_flags &= ~IFF_UP; 62718242d3bSAndrew Thompson 628644da90dSErmal Luçi EVENTHANDLER_DEREGISTER(vlan_config, sc->vlan_attach); 629644da90dSErmal Luçi EVENTHANDLER_DEREGISTER(vlan_unconfig, sc->vlan_detach); 630644da90dSErmal Luçi 63118242d3bSAndrew Thompson /* Shutdown and remove lagg ports */ 6320f8d79d9SMatt Macy while ((lp = CK_SLIST_FIRST(&sc->sc_ports)) != NULL) 63318242d3bSAndrew Thompson lagg_port_destroy(lp, 1); 6342f86d4b0SAlexander Motin 63518242d3bSAndrew Thompson /* Unhook the aggregation protocol */ 63616ca790eSGleb Smirnoff lagg_proto_detach(sc); 637bbfc32a6SAlexander Motin LAGG_XUNLOCK(sc); 63818242d3bSAndrew Thompson 639a92c4bb6SHans Petter Selasky switch (ifp->if_type) { 640a92c4bb6SHans Petter Selasky case IFT_ETHER: 64118242d3bSAndrew Thompson ether_ifdetach(ifp); 64202cbf9ebSMark Johnston ifmedia_removeall(&sc->sc_media); 643a92c4bb6SHans Petter Selasky break; 644a92c4bb6SHans Petter Selasky case IFT_INFINIBAND: 645a92c4bb6SHans Petter Selasky infiniband_ifdetach(ifp); 646a92c4bb6SHans Petter Selasky break; 647a92c4bb6SHans Petter Selasky default: 648a92c4bb6SHans Petter Selasky break; 649a92c4bb6SHans Petter Selasky } 6504b22573aSBrooks Davis if_free(ifp); 65118242d3bSAndrew Thompson 652939a050aSHiroki Sato LAGG_LIST_LOCK(); 653939a050aSHiroki Sato SLIST_REMOVE(&V_lagg_list, sc, lagg_softc, sc_entries); 654939a050aSHiroki Sato LAGG_LIST_UNLOCK(); 65518242d3bSAndrew Thompson 656a92c4bb6SHans Petter Selasky mtx_destroy(&sc->sc_mtx); 6572f86d4b0SAlexander Motin LAGG_SX_DESTROY(sc); 658841613dcSJohn Baldwin free(sc, M_LAGG); 65991ebcbe0SAlexander V. Chernikov 66091ebcbe0SAlexander V. Chernikov return (0); 66118242d3bSAndrew Thompson } 66218242d3bSAndrew Thompson 663de75afe6SAndrew Thompson static void 66418242d3bSAndrew Thompson lagg_capabilities(struct lagg_softc *sc) 66518242d3bSAndrew Thompson { 66618242d3bSAndrew Thompson struct lagg_port *lp; 667713ceb99SAndrew Gallatin int cap, cap2, ena, ena2, pena, pena2; 6688403ab79SAlexander Motin uint64_t hwa; 6699fd573c3SHans Petter Selasky struct ifnet_hw_tsomax hw_tsomax; 67018242d3bSAndrew Thompson 6712f86d4b0SAlexander Motin LAGG_XLOCK_ASSERT(sc); 67218242d3bSAndrew Thompson 6738403ab79SAlexander Motin /* Get common enabled capabilities for the lagg ports */ 674713ceb99SAndrew Gallatin ena = ena2 = ~0; 675713ceb99SAndrew Gallatin CK_SLIST_FOREACH(lp, &sc->sc_ports, lp_entries) { 6768403ab79SAlexander Motin ena &= lp->lp_ifp->if_capenable; 677713ceb99SAndrew Gallatin ena2 &= lp->lp_ifp->if_capenable2; 678713ceb99SAndrew Gallatin } 679713ceb99SAndrew Gallatin if (CK_SLIST_FIRST(&sc->sc_ports) == NULL) 680713ceb99SAndrew Gallatin ena = ena2 = 0; 6819fd573c3SHans Petter Selasky 6828403ab79SAlexander Motin /* 6838403ab79SAlexander Motin * Apply common enabled capabilities back to the lagg ports. 6848403ab79SAlexander Motin * May require several iterations if they are dependent. 6858403ab79SAlexander Motin */ 6868403ab79SAlexander Motin do { 6878403ab79SAlexander Motin pena = ena; 688713ceb99SAndrew Gallatin pena2 = ena2; 68999031b8fSStephen Hurd CK_SLIST_FOREACH(lp, &sc->sc_ports, lp_entries) { 690713ceb99SAndrew Gallatin lagg_setcaps(lp, ena, ena2); 6918403ab79SAlexander Motin ena &= lp->lp_ifp->if_capenable; 692713ceb99SAndrew Gallatin ena2 &= lp->lp_ifp->if_capenable2; 6938403ab79SAlexander Motin } 694713ceb99SAndrew Gallatin } while (pena != ena || pena2 != ena2); 695*c7d13682SKonstantin Belousov ena2 &= ~IFCAP2_BIT(IFCAP2_IPSEC_OFFLOAD); 6968403ab79SAlexander Motin 6978403ab79SAlexander Motin /* Get other capabilities from the lagg ports */ 698713ceb99SAndrew Gallatin cap = cap2 = ~0; 6998403ab79SAlexander Motin hwa = ~(uint64_t)0; 7008403ab79SAlexander Motin memset(&hw_tsomax, 0, sizeof(hw_tsomax)); 70199031b8fSStephen Hurd CK_SLIST_FOREACH(lp, &sc->sc_ports, lp_entries) { 702de75afe6SAndrew Thompson cap &= lp->lp_ifp->if_capabilities; 703713ceb99SAndrew Gallatin cap2 &= lp->lp_ifp->if_capabilities2; 70409efca80SAndrew Thompson hwa &= lp->lp_ifp->if_hwassist; 7059fd573c3SHans Petter Selasky if_hw_tsomax_common(lp->lp_ifp, &hw_tsomax); 70618242d3bSAndrew Thompson } 707*c7d13682SKonstantin Belousov cap2 &= ~IFCAP2_BIT(IFCAP2_IPSEC_OFFLOAD); 708713ceb99SAndrew Gallatin if (CK_SLIST_FIRST(&sc->sc_ports) == NULL) 709713ceb99SAndrew Gallatin cap = cap2 = hwa = 0; 71018242d3bSAndrew Thompson 711de75afe6SAndrew Thompson if (sc->sc_ifp->if_capabilities != cap || 71209efca80SAndrew Thompson sc->sc_ifp->if_capenable != ena || 713713ceb99SAndrew Gallatin sc->sc_ifp->if_capenable2 != ena2 || 7149387570fSRick Macklem sc->sc_ifp->if_hwassist != hwa || 7159fd573c3SHans Petter Selasky if_hw_tsomax_update(sc->sc_ifp, &hw_tsomax) != 0) { 716de75afe6SAndrew Thompson sc->sc_ifp->if_capabilities = cap; 717713ceb99SAndrew Gallatin sc->sc_ifp->if_capabilities2 = cap2; 718de75afe6SAndrew Thompson sc->sc_ifp->if_capenable = ena; 719713ceb99SAndrew Gallatin sc->sc_ifp->if_capenable2 = ena2; 72009efca80SAndrew Thompson sc->sc_ifp->if_hwassist = hwa; 721de75afe6SAndrew Thompson getmicrotime(&sc->sc_ifp->if_lastchange); 722de75afe6SAndrew Thompson 723de75afe6SAndrew Thompson if (sc->sc_ifflags & IFF_DEBUG) 724de75afe6SAndrew Thompson if_printf(sc->sc_ifp, 725de75afe6SAndrew Thompson "capabilities 0x%08x enabled 0x%08x\n", cap, ena); 726de75afe6SAndrew Thompson } 72718242d3bSAndrew Thompson } 72818242d3bSAndrew Thompson 72918242d3bSAndrew Thompson static int 73018242d3bSAndrew Thompson lagg_port_create(struct lagg_softc *sc, struct ifnet *ifp) 73118242d3bSAndrew Thompson { 73218242d3bSAndrew Thompson struct lagg_softc *sc_ptr; 73330e5de48SAndrey V. Elsukov struct lagg_port *lp, *tlp; 734fbd8c330SMarcelo Araujo struct ifreq ifr; 735fbd8c330SMarcelo Araujo int error, i, oldmtu; 736a92c4bb6SHans Petter Selasky int if_type; 7377d6cc45cSAlexander V. Chernikov uint64_t *pval; 73818242d3bSAndrew Thompson 7392f86d4b0SAlexander Motin LAGG_XLOCK_ASSERT(sc); 74018242d3bSAndrew Thompson 741fbd8c330SMarcelo Araujo if (sc->sc_ifp == ifp) { 742fbd8c330SMarcelo Araujo if_printf(sc->sc_ifp, 743fbd8c330SMarcelo Araujo "cannot add a lagg to itself as a port\n"); 744fbd8c330SMarcelo Araujo return (EINVAL); 745fbd8c330SMarcelo Araujo } 746fbd8c330SMarcelo Araujo 7473869d414SBryan Drewery if (sc->sc_destroying == 1) 7483869d414SBryan Drewery return (ENXIO); 7493869d414SBryan Drewery 75018242d3bSAndrew Thompson /* Limit the maximal number of lagg ports */ 75118242d3bSAndrew Thompson if (sc->sc_count >= LAGG_MAX_PORTS) 75218242d3bSAndrew Thompson return (ENOSPC); 75318242d3bSAndrew Thompson 75418242d3bSAndrew Thompson /* Check if port has already been associated to a lagg */ 755f74d5a7aSEygene Ryabinkin if (ifp->if_lagg != NULL) { 756f74d5a7aSEygene Ryabinkin /* Port is already in the current lagg? */ 757f74d5a7aSEygene Ryabinkin lp = (struct lagg_port *)ifp->if_lagg; 758f74d5a7aSEygene Ryabinkin if (lp->lp_softc == sc) 759f74d5a7aSEygene Ryabinkin return (EEXIST); 76018242d3bSAndrew Thompson return (EBUSY); 761f74d5a7aSEygene Ryabinkin } 76218242d3bSAndrew Thompson 763a92c4bb6SHans Petter Selasky switch (sc->sc_ifp->if_type) { 764a92c4bb6SHans Petter Selasky case IFT_ETHER: 76518242d3bSAndrew Thompson /* XXX Disallow non-ethernet interfaces (this should be any of 802) */ 766bb3d23fdSAlexander V. Chernikov if (ifp->if_type != IFT_ETHER && ifp->if_type != IFT_L2VLAN) 76718242d3bSAndrew Thompson return (EPROTONOSUPPORT); 768a92c4bb6SHans Petter Selasky if_type = IFT_IEEE8023ADLAG; 769a92c4bb6SHans Petter Selasky break; 770a92c4bb6SHans Petter Selasky case IFT_INFINIBAND: 771a92c4bb6SHans Petter Selasky /* XXX Disallow non-infiniband interfaces */ 772a92c4bb6SHans Petter Selasky if (ifp->if_type != IFT_INFINIBAND) 773a92c4bb6SHans Petter Selasky return (EPROTONOSUPPORT); 774a92c4bb6SHans Petter Selasky if_type = IFT_INFINIBANDLAG; 775a92c4bb6SHans Petter Selasky break; 776a92c4bb6SHans Petter Selasky default: 777a92c4bb6SHans Petter Selasky break; 778a92c4bb6SHans Petter Selasky } 77918242d3bSAndrew Thompson 780de75afe6SAndrew Thompson /* Allow the first Ethernet member to define the MTU */ 781fbd8c330SMarcelo Araujo oldmtu = -1; 782fbd8c330SMarcelo Araujo if (CK_SLIST_EMPTY(&sc->sc_ports)) { 783de75afe6SAndrew Thompson sc->sc_ifp->if_mtu = ifp->if_mtu; 784fbd8c330SMarcelo Araujo } else if (sc->sc_ifp->if_mtu != ifp->if_mtu) { 785fbd8c330SMarcelo Araujo if (ifp->if_ioctl == NULL) { 786fbd8c330SMarcelo Araujo if_printf(sc->sc_ifp, "cannot change MTU for %s\n", 787de75afe6SAndrew Thompson ifp->if_xname); 788de75afe6SAndrew Thompson return (EINVAL); 789de75afe6SAndrew Thompson } 790fbd8c330SMarcelo Araujo oldmtu = ifp->if_mtu; 791fbd8c330SMarcelo Araujo strlcpy(ifr.ifr_name, ifp->if_xname, sizeof(ifr.ifr_name)); 792fbd8c330SMarcelo Araujo ifr.ifr_mtu = sc->sc_ifp->if_mtu; 793fbd8c330SMarcelo Araujo error = (*ifp->if_ioctl)(ifp, SIOCSIFMTU, (caddr_t)&ifr); 794fbd8c330SMarcelo Araujo if (error != 0) { 795fbd8c330SMarcelo Araujo if_printf(sc->sc_ifp, "invalid MTU for %s\n", 796fbd8c330SMarcelo Araujo ifp->if_xname); 797fbd8c330SMarcelo Araujo return (error); 798fbd8c330SMarcelo Araujo } 799fbd8c330SMarcelo Araujo ifr.ifr_mtu = oldmtu; 800fbd8c330SMarcelo Araujo } 801de75afe6SAndrew Thompson 802841613dcSJohn Baldwin lp = malloc(sizeof(struct lagg_port), M_LAGG, M_WAITOK | M_ZERO); 8032f86d4b0SAlexander Motin lp->lp_softc = sc; 80418242d3bSAndrew Thompson 80518242d3bSAndrew Thompson /* Check if port is a stacked lagg */ 806939a050aSHiroki Sato LAGG_LIST_LOCK(); 807939a050aSHiroki Sato SLIST_FOREACH(sc_ptr, &V_lagg_list, sc_entries) { 80818242d3bSAndrew Thompson if (ifp == sc_ptr->sc_ifp) { 809939a050aSHiroki Sato LAGG_LIST_UNLOCK(); 810841613dcSJohn Baldwin free(lp, M_LAGG); 811fbd8c330SMarcelo Araujo if (oldmtu != -1) 812fbd8c330SMarcelo Araujo (*ifp->if_ioctl)(ifp, SIOCSIFMTU, 813fbd8c330SMarcelo Araujo (caddr_t)&ifr); 81418242d3bSAndrew Thompson return (EINVAL); 81522133b44SEdward Tomasz Napierala /* XXX disable stacking for the moment, its untested */ 81622133b44SEdward Tomasz Napierala #ifdef LAGG_PORT_STACKING 81718242d3bSAndrew Thompson lp->lp_flags |= LAGG_PORT_STACK; 81818242d3bSAndrew Thompson if (lagg_port_checkstacking(sc_ptr) >= 81918242d3bSAndrew Thompson LAGG_MAX_STACKING) { 820939a050aSHiroki Sato LAGG_LIST_UNLOCK(); 821841613dcSJohn Baldwin free(lp, M_LAGG); 822fbd8c330SMarcelo Araujo if (oldmtu != -1) 823fbd8c330SMarcelo Araujo (*ifp->if_ioctl)(ifp, SIOCSIFMTU, 824fbd8c330SMarcelo Araujo (caddr_t)&ifr); 82518242d3bSAndrew Thompson return (E2BIG); 82618242d3bSAndrew Thompson } 82722133b44SEdward Tomasz Napierala #endif 82818242d3bSAndrew Thompson } 82918242d3bSAndrew Thompson } 830939a050aSHiroki Sato LAGG_LIST_UNLOCK(); 83118242d3bSAndrew Thompson 8322f86d4b0SAlexander Motin if_ref(ifp); 8332f86d4b0SAlexander Motin lp->lp_ifp = ifp; 8342f86d4b0SAlexander Motin 835a92c4bb6SHans Petter Selasky bcopy(IF_LLADDR(ifp), lp->lp_lladdr, ifp->if_addrlen); 8362f86d4b0SAlexander Motin lp->lp_ifcapenable = ifp->if_capenable; 8370f8d79d9SMatt Macy if (CK_SLIST_EMPTY(&sc->sc_ports)) { 838a92c4bb6SHans Petter Selasky bcopy(IF_LLADDR(ifp), IF_LLADDR(sc->sc_ifp), ifp->if_addrlen); 8392f86d4b0SAlexander Motin lagg_proto_lladdr(sc); 8402f86d4b0SAlexander Motin EVENTHANDLER_INVOKE(iflladdr_event, sc->sc_ifp); 8412f86d4b0SAlexander Motin } else { 842a92c4bb6SHans Petter Selasky if_setlladdr(ifp, IF_LLADDR(sc->sc_ifp), ifp->if_addrlen); 8432f86d4b0SAlexander Motin } 8442f86d4b0SAlexander Motin lagg_setflags(lp, 1); 8452f86d4b0SAlexander Motin 8460f8d79d9SMatt Macy if (CK_SLIST_EMPTY(&sc->sc_ports)) 8472f86d4b0SAlexander Motin sc->sc_primary = lp; 8482f86d4b0SAlexander Motin 84918242d3bSAndrew Thompson /* Change the interface type */ 85018242d3bSAndrew Thompson lp->lp_iftype = ifp->if_type; 851a92c4bb6SHans Petter Selasky ifp->if_type = if_type; 85218242d3bSAndrew Thompson ifp->if_lagg = lp; 85318242d3bSAndrew Thompson lp->lp_ioctl = ifp->if_ioctl; 85418242d3bSAndrew Thompson ifp->if_ioctl = lagg_port_ioctl; 85518242d3bSAndrew Thompson lp->lp_output = ifp->if_output; 85618242d3bSAndrew Thompson ifp->if_output = lagg_port_output; 85718242d3bSAndrew Thompson 8582f86d4b0SAlexander Motin /* Read port counters */ 8592f86d4b0SAlexander Motin pval = lp->port_counters.val; 8602f86d4b0SAlexander Motin for (i = 0; i < IFCOUNTERS; i++, pval++) 8612f86d4b0SAlexander Motin *pval = ifp->if_get_counter(ifp, i); 86218242d3bSAndrew Thompson 863504289eaSAndrey V. Elsukov /* 864504289eaSAndrey V. Elsukov * Insert into the list of ports. 865504289eaSAndrey V. Elsukov * Keep ports sorted by if_index. It is handy, when configuration 866504289eaSAndrey V. Elsukov * is predictable and `ifconfig laggN create ...` command 867504289eaSAndrey V. Elsukov * will lead to the same result each time. 868504289eaSAndrey V. Elsukov */ 86999031b8fSStephen Hurd CK_SLIST_FOREACH(tlp, &sc->sc_ports, lp_entries) { 87030e5de48SAndrey V. Elsukov if (tlp->lp_ifp->if_index < ifp->if_index && ( 8710f8d79d9SMatt Macy CK_SLIST_NEXT(tlp, lp_entries) == NULL || 8720f8d79d9SMatt Macy ((struct lagg_port*)CK_SLIST_NEXT(tlp, lp_entries))->lp_ifp->if_index > 87330e5de48SAndrey V. Elsukov ifp->if_index)) 87430e5de48SAndrey V. Elsukov break; 87530e5de48SAndrey V. Elsukov } 87630e5de48SAndrey V. Elsukov if (tlp != NULL) 8770f8d79d9SMatt Macy CK_SLIST_INSERT_AFTER(tlp, lp, lp_entries); 87830e5de48SAndrey V. Elsukov else 8790f8d79d9SMatt Macy CK_SLIST_INSERT_HEAD(&sc->sc_ports, lp, lp_entries); 88018242d3bSAndrew Thompson sc->sc_count++; 88118242d3bSAndrew Thompson 8822f86d4b0SAlexander Motin lagg_setmulti(lp); 88318242d3bSAndrew Thompson 88438738d73SGleb Smirnoff if ((error = lagg_proto_addport(sc, lp)) != 0) { 88538738d73SGleb Smirnoff /* Remove the port, without calling pr_delport. */ 88618242d3bSAndrew Thompson lagg_port_destroy(lp, 0); 887fbd8c330SMarcelo Araujo if (oldmtu != -1) 888fbd8c330SMarcelo Araujo (*ifp->if_ioctl)(ifp, SIOCSIFMTU, (caddr_t)&ifr); 88918242d3bSAndrew Thompson return (error); 89018242d3bSAndrew Thompson } 89118242d3bSAndrew Thompson 8922f86d4b0SAlexander Motin /* Update lagg capabilities */ 8932f86d4b0SAlexander Motin lagg_capabilities(sc); 8942f86d4b0SAlexander Motin lagg_linkstate(sc); 8952f86d4b0SAlexander Motin 89638738d73SGleb Smirnoff return (0); 89718242d3bSAndrew Thompson } 89818242d3bSAndrew Thompson 89922133b44SEdward Tomasz Napierala #ifdef LAGG_PORT_STACKING 90018242d3bSAndrew Thompson static int 90118242d3bSAndrew Thompson lagg_port_checkstacking(struct lagg_softc *sc) 90218242d3bSAndrew Thompson { 90318242d3bSAndrew Thompson struct lagg_softc *sc_ptr; 90418242d3bSAndrew Thompson struct lagg_port *lp; 90518242d3bSAndrew Thompson int m = 0; 90618242d3bSAndrew Thompson 9072f86d4b0SAlexander Motin LAGG_SXLOCK_ASSERT(sc); 90899031b8fSStephen Hurd CK_SLIST_FOREACH(lp, &sc->sc_ports, lp_entries) { 90918242d3bSAndrew Thompson if (lp->lp_flags & LAGG_PORT_STACK) { 91018242d3bSAndrew Thompson sc_ptr = (struct lagg_softc *)lp->lp_ifp->if_softc; 91118242d3bSAndrew Thompson m = MAX(m, lagg_port_checkstacking(sc_ptr)); 91218242d3bSAndrew Thompson } 91318242d3bSAndrew Thompson } 91418242d3bSAndrew Thompson 91518242d3bSAndrew Thompson return (m + 1); 91618242d3bSAndrew Thompson } 91722133b44SEdward Tomasz Napierala #endif 91818242d3bSAndrew Thompson 91999031b8fSStephen Hurd static void 92099031b8fSStephen Hurd lagg_port_destroy_cb(epoch_context_t ec) 92199031b8fSStephen Hurd { 92299031b8fSStephen Hurd struct lagg_port *lp; 92399031b8fSStephen Hurd struct ifnet *ifp; 92499031b8fSStephen Hurd 92599031b8fSStephen Hurd lp = __containerof(ec, struct lagg_port, lp_epoch_ctx); 92699031b8fSStephen Hurd ifp = lp->lp_ifp; 92799031b8fSStephen Hurd 92899031b8fSStephen Hurd if_rele(ifp); 929841613dcSJohn Baldwin free(lp, M_LAGG); 93099031b8fSStephen Hurd } 93199031b8fSStephen Hurd 93218242d3bSAndrew Thompson static int 93338738d73SGleb Smirnoff lagg_port_destroy(struct lagg_port *lp, int rundelport) 93418242d3bSAndrew Thompson { 935ec32b37eSAndrew Thompson struct lagg_softc *sc = lp->lp_softc; 936023d10cbSHiroki Sato struct lagg_port *lp_ptr, *lp0; 93718242d3bSAndrew Thompson struct ifnet *ifp = lp->lp_ifp; 9387d6cc45cSAlexander V. Chernikov uint64_t *pval, vdiff; 9397d6cc45cSAlexander V. Chernikov int i; 94018242d3bSAndrew Thompson 9412f86d4b0SAlexander Motin LAGG_XLOCK_ASSERT(sc); 9422f86d4b0SAlexander Motin 94399031b8fSStephen Hurd if (rundelport) 9442f86d4b0SAlexander Motin lagg_proto_delport(sc, lp); 94518242d3bSAndrew Thompson 9462f86d4b0SAlexander Motin if (lp->lp_detaching == 0) 9472f86d4b0SAlexander Motin lagg_clrmulti(lp); 94818242d3bSAndrew Thompson 94918242d3bSAndrew Thompson /* Restore interface */ 95018242d3bSAndrew Thompson ifp->if_type = lp->lp_iftype; 95118242d3bSAndrew Thompson ifp->if_ioctl = lp->lp_ioctl; 95218242d3bSAndrew Thompson ifp->if_output = lp->lp_output; 95318242d3bSAndrew Thompson ifp->if_lagg = NULL; 95418242d3bSAndrew Thompson 9557d6cc45cSAlexander V. Chernikov /* Update detached port counters */ 9567d6cc45cSAlexander V. Chernikov pval = lp->port_counters.val; 957dee826ceSGleb Smirnoff for (i = 0; i < IFCOUNTERS; i++, pval++) { 9587d6cc45cSAlexander V. Chernikov vdiff = ifp->if_get_counter(ifp, i) - *pval; 959112f50ffSGleb Smirnoff sc->detached_counters.val[i] += vdiff; 9607d6cc45cSAlexander V. Chernikov } 9617d6cc45cSAlexander V. Chernikov 96218242d3bSAndrew Thompson /* Finally, remove the port from the lagg */ 96399031b8fSStephen Hurd CK_SLIST_REMOVE(&sc->sc_ports, lp, lagg_port, lp_entries); 96418242d3bSAndrew Thompson sc->sc_count--; 96518242d3bSAndrew Thompson 96618242d3bSAndrew Thompson /* Update the primary interface */ 96718242d3bSAndrew Thompson if (lp == sc->sc_primary) { 968a92c4bb6SHans Petter Selasky uint8_t lladdr[LAGG_ADDR_LEN]; 96918242d3bSAndrew Thompson 9700f8d79d9SMatt Macy if ((lp0 = CK_SLIST_FIRST(&sc->sc_ports)) == NULL) 971a92c4bb6SHans Petter Selasky bzero(&lladdr, LAGG_ADDR_LEN); 9722f86d4b0SAlexander Motin else 973a92c4bb6SHans Petter Selasky bcopy(lp0->lp_lladdr, lladdr, LAGG_ADDR_LEN); 974bb3d23fdSAlexander V. Chernikov sc->sc_primary = lp0; 9752f86d4b0SAlexander Motin if (sc->sc_destroying == 0) { 976a92c4bb6SHans Petter Selasky bcopy(lladdr, IF_LLADDR(sc->sc_ifp), sc->sc_ifp->if_addrlen); 9772f86d4b0SAlexander Motin lagg_proto_lladdr(sc); 9782f86d4b0SAlexander Motin EVENTHANDLER_INVOKE(iflladdr_event, sc->sc_ifp); 979bb3d23fdSAlexander V. Chernikov 980023d10cbSHiroki Sato /* 9812f86d4b0SAlexander Motin * Update lladdr for each port (new primary needs update 982c1384241SLuiz Otavio O Souza * as well, to switch from old lladdr to its 'real' one). 983c1384241SLuiz Otavio O Souza * We can skip this if the lagg is being destroyed. 984023d10cbSHiroki Sato */ 98599031b8fSStephen Hurd CK_SLIST_FOREACH(lp_ptr, &sc->sc_ports, lp_entries) 986c1384241SLuiz Otavio O Souza if_setlladdr(lp_ptr->lp_ifp, lladdr, 987c1384241SLuiz Otavio O Souza lp_ptr->lp_ifp->if_addrlen); 988c1384241SLuiz Otavio O Souza } 98999031b8fSStephen Hurd } 990cdc6f95fSAndrew Thompson 99118242d3bSAndrew Thompson if (lp->lp_ifflags) 99218242d3bSAndrew Thompson if_printf(ifp, "%s: lp_ifflags unclean\n", __func__); 99318242d3bSAndrew Thompson 9942f86d4b0SAlexander Motin if (lp->lp_detaching == 0) { 9952f86d4b0SAlexander Motin lagg_setflags(lp, 0); 996713ceb99SAndrew Gallatin lagg_setcaps(lp, lp->lp_ifcapenable, lp->lp_ifcapenable2); 997a92c4bb6SHans Petter Selasky if_setlladdr(ifp, lp->lp_lladdr, ifp->if_addrlen); 9982f86d4b0SAlexander Motin } 9992f86d4b0SAlexander Motin 100099031b8fSStephen Hurd /* 100199031b8fSStephen Hurd * free port and release it's ifnet reference after a grace period has 100299031b8fSStephen Hurd * elapsed. 100399031b8fSStephen Hurd */ 10042a4bd982SGleb Smirnoff NET_EPOCH_CALL(lagg_port_destroy_cb, &lp->lp_epoch_ctx); 100518242d3bSAndrew Thompson /* Update lagg capabilities */ 1006de75afe6SAndrew Thompson lagg_capabilities(sc); 100780ddfb40SAndrew Thompson lagg_linkstate(sc); 100818242d3bSAndrew Thompson 100918242d3bSAndrew Thompson return (0); 101018242d3bSAndrew Thompson } 101118242d3bSAndrew Thompson 101218242d3bSAndrew Thompson static int 101318242d3bSAndrew Thompson lagg_port_ioctl(struct ifnet *ifp, u_long cmd, caddr_t data) 101418242d3bSAndrew Thompson { 101518242d3bSAndrew Thompson struct lagg_reqport *rp = (struct lagg_reqport *)data; 101618242d3bSAndrew Thompson struct lagg_softc *sc; 101718242d3bSAndrew Thompson struct lagg_port *lp = NULL; 101818242d3bSAndrew Thompson int error = 0; 101918242d3bSAndrew Thompson 102018242d3bSAndrew Thompson /* Should be checked by the caller */ 1021a92c4bb6SHans Petter Selasky switch (ifp->if_type) { 1022a92c4bb6SHans Petter Selasky case IFT_IEEE8023ADLAG: 1023a92c4bb6SHans Petter Selasky case IFT_INFINIBANDLAG: 1024a92c4bb6SHans Petter Selasky if ((lp = ifp->if_lagg) == NULL || (sc = lp->lp_softc) == NULL) 102518242d3bSAndrew Thompson goto fallback; 1026a92c4bb6SHans Petter Selasky break; 1027a92c4bb6SHans Petter Selasky default: 1028a92c4bb6SHans Petter Selasky goto fallback; 1029a92c4bb6SHans Petter Selasky } 103018242d3bSAndrew Thompson 103118242d3bSAndrew Thompson switch (cmd) { 103218242d3bSAndrew Thompson case SIOCGLAGGPORT: 103318242d3bSAndrew Thompson if (rp->rp_portname[0] == '\0' || 103418242d3bSAndrew Thompson ifunit(rp->rp_portname) != ifp) { 103518242d3bSAndrew Thompson error = EINVAL; 103618242d3bSAndrew Thompson break; 103718242d3bSAndrew Thompson } 103818242d3bSAndrew Thompson 103948698eadSGleb Smirnoff LAGG_SLOCK(sc); 1040fadbb6f8SGleb Smirnoff if (__predict_true((lp = ifp->if_lagg) != NULL && 1041fadbb6f8SGleb Smirnoff lp->lp_softc == sc)) 104218242d3bSAndrew Thompson lagg_port2req(lp, rp); 1043fadbb6f8SGleb Smirnoff else 1044fadbb6f8SGleb Smirnoff error = ENOENT; /* XXXGL: can happen? */ 104548698eadSGleb Smirnoff LAGG_SUNLOCK(sc); 104618242d3bSAndrew Thompson break; 1047de75afe6SAndrew Thompson 1048de75afe6SAndrew Thompson case SIOCSIFCAP: 1049713ceb99SAndrew Gallatin case SIOCSIFCAPNV: 1050de75afe6SAndrew Thompson if (lp->lp_ioctl == NULL) { 1051de75afe6SAndrew Thompson error = EINVAL; 1052de75afe6SAndrew Thompson break; 1053de75afe6SAndrew Thompson } 1054de75afe6SAndrew Thompson error = (*lp->lp_ioctl)(ifp, cmd, data); 1055de75afe6SAndrew Thompson if (error) 1056de75afe6SAndrew Thompson break; 1057de75afe6SAndrew Thompson 1058de75afe6SAndrew Thompson /* Update lagg interface capabilities */ 10592f86d4b0SAlexander Motin LAGG_XLOCK(sc); 1060de75afe6SAndrew Thompson lagg_capabilities(sc); 10612f86d4b0SAlexander Motin LAGG_XUNLOCK(sc); 106241cf0d54SAlexander Motin VLAN_CAPABILITIES(sc->sc_ifp); 1063de75afe6SAndrew Thompson break; 1064de75afe6SAndrew Thompson 1065de75afe6SAndrew Thompson case SIOCSIFMTU: 1066de75afe6SAndrew Thompson /* Do not allow the MTU to be changed once joined */ 1067de75afe6SAndrew Thompson error = EINVAL; 1068de75afe6SAndrew Thompson break; 1069de75afe6SAndrew Thompson 107018242d3bSAndrew Thompson default: 107118242d3bSAndrew Thompson goto fallback; 107218242d3bSAndrew Thompson } 107318242d3bSAndrew Thompson 107418242d3bSAndrew Thompson return (error); 107518242d3bSAndrew Thompson 107618242d3bSAndrew Thompson fallback: 1077d9fa2d67SAlan Somers if (lp != NULL && lp->lp_ioctl != NULL) 107818242d3bSAndrew Thompson return ((*lp->lp_ioctl)(ifp, cmd, data)); 107918242d3bSAndrew Thompson 108018242d3bSAndrew Thompson return (EINVAL); 108118242d3bSAndrew Thompson } 108218242d3bSAndrew Thompson 10836107adc3SEd Maste /* 10847d6cc45cSAlexander V. Chernikov * Requests counter @cnt data. 10857d6cc45cSAlexander V. Chernikov * 10867d6cc45cSAlexander V. Chernikov * Counter value is calculated the following way: 10877d6cc45cSAlexander V. Chernikov * 1) for each port, sum difference between current and "initial" measurements. 10887d6cc45cSAlexander V. Chernikov * 2) add lagg logical interface counters. 10897d6cc45cSAlexander V. Chernikov * 3) add data from detached_counters array. 10907d6cc45cSAlexander V. Chernikov * 10917d6cc45cSAlexander V. Chernikov * We also do the following things on ports attach/detach: 10927d6cc45cSAlexander V. Chernikov * 1) On port attach we store all counters it has into port_counter array. 10937d6cc45cSAlexander V. Chernikov * 2) On port detach we add the different between "initial" and 10947d6cc45cSAlexander V. Chernikov * current counters data to detached_counters array. 10957d6cc45cSAlexander V. Chernikov */ 10967d6cc45cSAlexander V. Chernikov static uint64_t 10977d6cc45cSAlexander V. Chernikov lagg_get_counter(struct ifnet *ifp, ift_counter cnt) 10987d6cc45cSAlexander V. Chernikov { 109987bf9b9cSGleb Smirnoff struct epoch_tracker et; 11007d6cc45cSAlexander V. Chernikov struct lagg_softc *sc; 11017d6cc45cSAlexander V. Chernikov struct lagg_port *lp; 11027d6cc45cSAlexander V. Chernikov struct ifnet *lpifp; 11037d6cc45cSAlexander V. Chernikov uint64_t newval, oldval, vsum; 11047d6cc45cSAlexander V. Chernikov 1105112f50ffSGleb Smirnoff /* Revise this when we've got non-generic counters. */ 1106112f50ffSGleb Smirnoff KASSERT(cnt < IFCOUNTERS, ("%s: invalid cnt %d", __func__, cnt)); 11077d6cc45cSAlexander V. Chernikov 11087d6cc45cSAlexander V. Chernikov sc = (struct lagg_softc *)ifp->if_softc; 11097d6cc45cSAlexander V. Chernikov 11107d6cc45cSAlexander V. Chernikov vsum = 0; 111187bf9b9cSGleb Smirnoff NET_EPOCH_ENTER(et); 111299031b8fSStephen Hurd CK_SLIST_FOREACH(lp, &sc->sc_ports, lp_entries) { 11137d6cc45cSAlexander V. Chernikov /* Saved attached value */ 1114112f50ffSGleb Smirnoff oldval = lp->port_counters.val[cnt]; 11157d6cc45cSAlexander V. Chernikov /* current value */ 11167d6cc45cSAlexander V. Chernikov lpifp = lp->lp_ifp; 11177d6cc45cSAlexander V. Chernikov newval = lpifp->if_get_counter(lpifp, cnt); 11187d6cc45cSAlexander V. Chernikov /* Calculate diff and save new */ 11197d6cc45cSAlexander V. Chernikov vsum += newval - oldval; 11207d6cc45cSAlexander V. Chernikov } 112187bf9b9cSGleb Smirnoff NET_EPOCH_EXIT(et); 11227d6cc45cSAlexander V. Chernikov 11237d6cc45cSAlexander V. Chernikov /* 11247d6cc45cSAlexander V. Chernikov * Add counter data which might be added by upper 11257d6cc45cSAlexander V. Chernikov * layer protocols operating on logical interface. 11267d6cc45cSAlexander V. Chernikov */ 11277d6cc45cSAlexander V. Chernikov vsum += if_get_counter_default(ifp, cnt); 11287d6cc45cSAlexander V. Chernikov 11297d6cc45cSAlexander V. Chernikov /* 11307d6cc45cSAlexander V. Chernikov * Add counter data from detached ports counters 11317d6cc45cSAlexander V. Chernikov */ 1132112f50ffSGleb Smirnoff vsum += sc->detached_counters.val[cnt]; 11337d6cc45cSAlexander V. Chernikov 11347d6cc45cSAlexander V. Chernikov return (vsum); 11357d6cc45cSAlexander V. Chernikov } 11367d6cc45cSAlexander V. Chernikov 11377d6cc45cSAlexander V. Chernikov /* 11386107adc3SEd Maste * For direct output to child ports. 11396107adc3SEd Maste */ 114018242d3bSAndrew Thompson static int 114118242d3bSAndrew Thompson lagg_port_output(struct ifnet *ifp, struct mbuf *m, 114247e8d432SGleb Smirnoff const struct sockaddr *dst, struct route *ro) 114318242d3bSAndrew Thompson { 114418242d3bSAndrew Thompson struct lagg_port *lp = ifp->if_lagg; 114518242d3bSAndrew Thompson 114618242d3bSAndrew Thompson switch (dst->sa_family) { 114718242d3bSAndrew Thompson case pseudo_AF_HDRCMPLT: 114818242d3bSAndrew Thompson case AF_UNSPEC: 11492f4ffa9fSAndrey V. Elsukov if (lp != NULL) 1150279aa3d4SKip Macy return ((*lp->lp_output)(ifp, m, dst, ro)); 115118242d3bSAndrew Thompson } 115218242d3bSAndrew Thompson 115318242d3bSAndrew Thompson /* drop any other frames */ 115418242d3bSAndrew Thompson m_freem(m); 11551d9797f1SGleb Smirnoff return (ENETDOWN); 115618242d3bSAndrew Thompson } 115718242d3bSAndrew Thompson 115818242d3bSAndrew Thompson static void 115918242d3bSAndrew Thompson lagg_port_ifdetach(void *arg __unused, struct ifnet *ifp) 116018242d3bSAndrew Thompson { 116118242d3bSAndrew Thompson struct lagg_port *lp; 116218242d3bSAndrew Thompson struct lagg_softc *sc; 116318242d3bSAndrew Thompson 116418242d3bSAndrew Thompson if ((lp = ifp->if_lagg) == NULL) 116518242d3bSAndrew Thompson return; 116661587a84SAndrew Thompson /* If the ifnet is just being renamed, don't do anything. */ 116761587a84SAndrew Thompson if (ifp->if_flags & IFF_RENAMING) 116861587a84SAndrew Thompson return; 116918242d3bSAndrew Thompson 1170ec32b37eSAndrew Thompson sc = lp->lp_softc; 117118242d3bSAndrew Thompson 11722f86d4b0SAlexander Motin LAGG_XLOCK(sc); 11732f86d4b0SAlexander Motin lp->lp_detaching = 1; 117418242d3bSAndrew Thompson lagg_port_destroy(lp, 1); 11752f86d4b0SAlexander Motin LAGG_XUNLOCK(sc); 117641cf0d54SAlexander Motin VLAN_CAPABILITIES(sc->sc_ifp); 117718242d3bSAndrew Thompson } 117818242d3bSAndrew Thompson 117918242d3bSAndrew Thompson static void 118018242d3bSAndrew Thompson lagg_port2req(struct lagg_port *lp, struct lagg_reqport *rp) 118118242d3bSAndrew Thompson { 1182ec32b37eSAndrew Thompson struct lagg_softc *sc = lp->lp_softc; 118320745551SAndrew Thompson 118418242d3bSAndrew Thompson strlcpy(rp->rp_ifname, sc->sc_ifname, sizeof(rp->rp_ifname)); 118518242d3bSAndrew Thompson strlcpy(rp->rp_portname, lp->lp_ifp->if_xname, sizeof(rp->rp_portname)); 118618242d3bSAndrew Thompson rp->rp_prio = lp->lp_prio; 118718242d3bSAndrew Thompson rp->rp_flags = lp->lp_flags; 118838738d73SGleb Smirnoff lagg_proto_portreq(sc, lp, &rp->rp_psc); 118918242d3bSAndrew Thompson 119018242d3bSAndrew Thompson /* Add protocol specific flags */ 119118242d3bSAndrew Thompson switch (sc->sc_proto) { 119218242d3bSAndrew Thompson case LAGG_PROTO_FAILOVER: 119318242d3bSAndrew Thompson if (lp == sc->sc_primary) 1194139722d4SAndrew Thompson rp->rp_flags |= LAGG_PORT_MASTER; 119520745551SAndrew Thompson if (lp == lagg_link_active(sc, sc->sc_primary)) 119620745551SAndrew Thompson rp->rp_flags |= LAGG_PORT_ACTIVE; 119720745551SAndrew Thompson break; 119820745551SAndrew Thompson 119918242d3bSAndrew Thompson case LAGG_PROTO_ROUNDROBIN: 120018242d3bSAndrew Thompson case LAGG_PROTO_LOADBALANCE: 120199cdd961SMarcelo Araujo case LAGG_PROTO_BROADCAST: 120218242d3bSAndrew Thompson if (LAGG_PORTACTIVE(lp)) 120318242d3bSAndrew Thompson rp->rp_flags |= LAGG_PORT_ACTIVE; 120418242d3bSAndrew Thompson break; 120518242d3bSAndrew Thompson 120618242d3bSAndrew Thompson case LAGG_PROTO_LACP: 120718242d3bSAndrew Thompson /* LACP has a different definition of active */ 12083de18008SAndrew Thompson if (lacp_isactive(lp)) 120918242d3bSAndrew Thompson rp->rp_flags |= LAGG_PORT_ACTIVE; 12103de18008SAndrew Thompson if (lacp_iscollecting(lp)) 12113de18008SAndrew Thompson rp->rp_flags |= LAGG_PORT_COLLECTING; 12123de18008SAndrew Thompson if (lacp_isdistributing(lp)) 12133de18008SAndrew Thompson rp->rp_flags |= LAGG_PORT_DISTRIBUTING; 121418242d3bSAndrew Thompson break; 121518242d3bSAndrew Thompson } 121618242d3bSAndrew Thompson 121718242d3bSAndrew Thompson } 121818242d3bSAndrew Thompson 121918242d3bSAndrew Thompson static void 1220a92c4bb6SHans Petter Selasky lagg_watchdog_infiniband(void *arg) 1221a92c4bb6SHans Petter Selasky { 122287bf9b9cSGleb Smirnoff struct epoch_tracker et; 1223a92c4bb6SHans Petter Selasky struct lagg_softc *sc; 1224a92c4bb6SHans Petter Selasky struct lagg_port *lp; 1225a92c4bb6SHans Petter Selasky struct ifnet *ifp; 1226a92c4bb6SHans Petter Selasky struct ifnet *lp_ifp; 1227a92c4bb6SHans Petter Selasky 1228a92c4bb6SHans Petter Selasky sc = arg; 1229a92c4bb6SHans Petter Selasky 1230a92c4bb6SHans Petter Selasky /* 1231a92c4bb6SHans Petter Selasky * Because infiniband nodes have a fixed MAC address, which is 1232a92c4bb6SHans Petter Selasky * generated by the so-called GID, we need to regularly update 1233a92c4bb6SHans Petter Selasky * the link level address of the parent lagg<N> device when 1234a92c4bb6SHans Petter Selasky * the active port changes. Possibly we could piggy-back on 1235a92c4bb6SHans Petter Selasky * link up/down events aswell, but using a timer also provides 1236a92c4bb6SHans Petter Selasky * a guarantee against too frequent events. This operation 1237a92c4bb6SHans Petter Selasky * does not have to be atomic. 1238a92c4bb6SHans Petter Selasky */ 123987bf9b9cSGleb Smirnoff NET_EPOCH_ENTER(et); 1240a92c4bb6SHans Petter Selasky lp = lagg_link_active(sc, sc->sc_primary); 1241a92c4bb6SHans Petter Selasky if (lp != NULL) { 1242a92c4bb6SHans Petter Selasky ifp = sc->sc_ifp; 1243a92c4bb6SHans Petter Selasky lp_ifp = lp->lp_ifp; 1244a92c4bb6SHans Petter Selasky 1245a92c4bb6SHans Petter Selasky if (ifp != NULL && lp_ifp != NULL && 124619ecb5e8SHans Petter Selasky (memcmp(IF_LLADDR(ifp), IF_LLADDR(lp_ifp), ifp->if_addrlen) != 0 || 124719ecb5e8SHans Petter Selasky memcmp(sc->sc_bcast_addr, lp_ifp->if_broadcastaddr, ifp->if_addrlen) != 0)) { 1248a92c4bb6SHans Petter Selasky memcpy(IF_LLADDR(ifp), IF_LLADDR(lp_ifp), ifp->if_addrlen); 124919ecb5e8SHans Petter Selasky memcpy(sc->sc_bcast_addr, lp_ifp->if_broadcastaddr, ifp->if_addrlen); 125019ecb5e8SHans Petter Selasky 1251a92c4bb6SHans Petter Selasky CURVNET_SET(ifp->if_vnet); 1252a92c4bb6SHans Petter Selasky EVENTHANDLER_INVOKE(iflladdr_event, ifp); 1253a92c4bb6SHans Petter Selasky CURVNET_RESTORE(); 1254a92c4bb6SHans Petter Selasky } 1255a92c4bb6SHans Petter Selasky } 125687bf9b9cSGleb Smirnoff NET_EPOCH_EXIT(et); 1257a92c4bb6SHans Petter Selasky 1258a92c4bb6SHans Petter Selasky callout_reset(&sc->sc_watchdog, hz, &lagg_watchdog_infiniband, arg); 1259a92c4bb6SHans Petter Selasky } 1260a92c4bb6SHans Petter Selasky 1261a92c4bb6SHans Petter Selasky static void 126257068597SGleb Smirnoff lagg_if_updown(struct lagg_softc *sc, bool up) 126357068597SGleb Smirnoff { 126457068597SGleb Smirnoff struct ifreq ifr = {}; 126557068597SGleb Smirnoff struct lagg_port *lp; 126657068597SGleb Smirnoff 126757068597SGleb Smirnoff LAGG_XLOCK_ASSERT(sc); 126857068597SGleb Smirnoff 126957068597SGleb Smirnoff CK_SLIST_FOREACH(lp, &sc->sc_ports, lp_entries) { 127057068597SGleb Smirnoff if (up) 127157068597SGleb Smirnoff if_up(lp->lp_ifp); 127257068597SGleb Smirnoff else 127357068597SGleb Smirnoff if_down(lp->lp_ifp); 127457068597SGleb Smirnoff 127557068597SGleb Smirnoff if (lp->lp_ioctl != NULL) 127657068597SGleb Smirnoff lp->lp_ioctl(lp->lp_ifp, SIOCSIFFLAGS, (caddr_t)&ifr); 127757068597SGleb Smirnoff } 127857068597SGleb Smirnoff } 127957068597SGleb Smirnoff 128057068597SGleb Smirnoff static void 128118242d3bSAndrew Thompson lagg_init(void *xsc) 128218242d3bSAndrew Thompson { 128318242d3bSAndrew Thompson struct lagg_softc *sc = (struct lagg_softc *)xsc; 128418242d3bSAndrew Thompson struct ifnet *ifp = sc->sc_ifp; 1285bb3d23fdSAlexander V. Chernikov struct lagg_port *lp; 128618242d3bSAndrew Thompson 12872f86d4b0SAlexander Motin LAGG_XLOCK(sc); 12882f86d4b0SAlexander Motin if (ifp->if_drv_flags & IFF_DRV_RUNNING) { 12892f86d4b0SAlexander Motin LAGG_XUNLOCK(sc); 129018242d3bSAndrew Thompson return; 12912f86d4b0SAlexander Motin } 129218242d3bSAndrew Thompson 129318242d3bSAndrew Thompson ifp->if_drv_flags |= IFF_DRV_RUNNING; 1294bb3d23fdSAlexander V. Chernikov 1295bb3d23fdSAlexander V. Chernikov /* 1296bb3d23fdSAlexander V. Chernikov * Update the port lladdrs if needed. 1297bb3d23fdSAlexander V. Chernikov * This might be if_setlladdr() notification 1298bb3d23fdSAlexander V. Chernikov * that lladdr has been changed. 1299bb3d23fdSAlexander V. Chernikov */ 130099031b8fSStephen Hurd CK_SLIST_FOREACH(lp, &sc->sc_ports, lp_entries) { 13012f86d4b0SAlexander Motin if (memcmp(IF_LLADDR(ifp), IF_LLADDR(lp->lp_ifp), 1302a92c4bb6SHans Petter Selasky ifp->if_addrlen) != 0) 1303a92c4bb6SHans Petter Selasky if_setlladdr(lp->lp_ifp, IF_LLADDR(ifp), ifp->if_addrlen); 13042f86d4b0SAlexander Motin } 130518242d3bSAndrew Thompson 130657068597SGleb Smirnoff lagg_if_updown(sc, true); 130757068597SGleb Smirnoff 130838738d73SGleb Smirnoff lagg_proto_init(sc); 130918242d3bSAndrew Thompson 1310a92c4bb6SHans Petter Selasky if (ifp->if_type == IFT_INFINIBAND) { 1311a92c4bb6SHans Petter Selasky mtx_lock(&sc->sc_mtx); 1312a92c4bb6SHans Petter Selasky lagg_watchdog_infiniband(sc); 1313a92c4bb6SHans Petter Selasky mtx_unlock(&sc->sc_mtx); 1314a92c4bb6SHans Petter Selasky } 1315a92c4bb6SHans Petter Selasky 13162f86d4b0SAlexander Motin LAGG_XUNLOCK(sc); 131718242d3bSAndrew Thompson } 131818242d3bSAndrew Thompson 131918242d3bSAndrew Thompson static void 132018242d3bSAndrew Thompson lagg_stop(struct lagg_softc *sc) 132118242d3bSAndrew Thompson { 132218242d3bSAndrew Thompson struct ifnet *ifp = sc->sc_ifp; 132318242d3bSAndrew Thompson 13242f86d4b0SAlexander Motin LAGG_XLOCK_ASSERT(sc); 132518242d3bSAndrew Thompson 132618242d3bSAndrew Thompson if ((ifp->if_drv_flags & IFF_DRV_RUNNING) == 0) 132718242d3bSAndrew Thompson return; 132818242d3bSAndrew Thompson 132918242d3bSAndrew Thompson ifp->if_drv_flags &= ~IFF_DRV_RUNNING; 133018242d3bSAndrew Thompson 133138738d73SGleb Smirnoff lagg_proto_stop(sc); 1332a92c4bb6SHans Petter Selasky 1333a92c4bb6SHans Petter Selasky mtx_lock(&sc->sc_mtx); 1334a92c4bb6SHans Petter Selasky callout_stop(&sc->sc_watchdog); 1335a92c4bb6SHans Petter Selasky mtx_unlock(&sc->sc_mtx); 1336a92c4bb6SHans Petter Selasky 133757068597SGleb Smirnoff lagg_if_updown(sc, false); 133857068597SGleb Smirnoff 1339a92c4bb6SHans Petter Selasky callout_drain(&sc->sc_watchdog); 134018242d3bSAndrew Thompson } 134118242d3bSAndrew Thompson 134218242d3bSAndrew Thompson static int 134318242d3bSAndrew Thompson lagg_ioctl(struct ifnet *ifp, u_long cmd, caddr_t data) 134418242d3bSAndrew Thompson { 134518242d3bSAndrew Thompson struct lagg_softc *sc = (struct lagg_softc *)ifp->if_softc; 134618242d3bSAndrew Thompson struct lagg_reqall *ra = (struct lagg_reqall *)data; 13479732189cSHiroki Sato struct lagg_reqopts *ro = (struct lagg_reqopts *)data; 134818242d3bSAndrew Thompson struct lagg_reqport *rp = (struct lagg_reqport *)data, rpbuf; 134986f67641SAndrew Thompson struct lagg_reqflags *rf = (struct lagg_reqflags *)data; 135018242d3bSAndrew Thompson struct ifreq *ifr = (struct ifreq *)data; 135118242d3bSAndrew Thompson struct lagg_port *lp; 135218242d3bSAndrew Thompson struct ifnet *tpif; 135318242d3bSAndrew Thompson struct thread *td = curthread; 135482056f42SAndrew Thompson char *buf, *outbuf; 135584becee1SAlexander Motin int count, buflen, len, error = 0, oldmtu; 135618242d3bSAndrew Thompson 135718242d3bSAndrew Thompson bzero(&rpbuf, sizeof(rpbuf)); 135818242d3bSAndrew Thompson 13593869d414SBryan Drewery /* XXX: This can race with lagg_clone_destroy. */ 13603869d414SBryan Drewery 136118242d3bSAndrew Thompson switch (cmd) { 136218242d3bSAndrew Thompson case SIOCGLAGG: 136399031b8fSStephen Hurd LAGG_XLOCK(sc); 13642f86d4b0SAlexander Motin buflen = sc->sc_count * sizeof(struct lagg_reqport); 136582056f42SAndrew Thompson outbuf = malloc(buflen, M_TEMP, M_WAITOK | M_ZERO); 136618242d3bSAndrew Thompson ra->ra_proto = sc->sc_proto; 136738738d73SGleb Smirnoff lagg_proto_request(sc, &ra->ra_psc); 136882056f42SAndrew Thompson count = 0; 136982056f42SAndrew Thompson buf = outbuf; 137082056f42SAndrew Thompson len = min(ra->ra_size, buflen); 137199031b8fSStephen Hurd CK_SLIST_FOREACH(lp, &sc->sc_ports, lp_entries) { 137282056f42SAndrew Thompson if (len < sizeof(rpbuf)) 137318242d3bSAndrew Thompson break; 137482056f42SAndrew Thompson 137582056f42SAndrew Thompson lagg_port2req(lp, &rpbuf); 137682056f42SAndrew Thompson memcpy(buf, &rpbuf, sizeof(rpbuf)); 137782056f42SAndrew Thompson count++; 137882056f42SAndrew Thompson buf += sizeof(rpbuf); 137982056f42SAndrew Thompson len -= sizeof(rpbuf); 138018242d3bSAndrew Thompson } 138199031b8fSStephen Hurd LAGG_XUNLOCK(sc); 138282056f42SAndrew Thompson ra->ra_ports = count; 138382056f42SAndrew Thompson ra->ra_size = count * sizeof(rpbuf); 138482056f42SAndrew Thompson error = copyout(outbuf, ra->ra_port, ra->ra_size); 138582056f42SAndrew Thompson free(outbuf, M_TEMP); 138618242d3bSAndrew Thompson break; 138718242d3bSAndrew Thompson case SIOCSLAGG: 138818242d3bSAndrew Thompson error = priv_check(td, PRIV_NET_LAGG); 138918242d3bSAndrew Thompson if (error) 139018242d3bSAndrew Thompson break; 1391d931334bSMarcelo Araujo if (ra->ra_proto >= LAGG_PROTO_MAX) { 13929732189cSHiroki Sato error = EPROTONOSUPPORT; 13939732189cSHiroki Sato break; 13949732189cSHiroki Sato } 1395a92c4bb6SHans Petter Selasky /* Infiniband only supports the failover protocol. */ 1396a92c4bb6SHans Petter Selasky if (ra->ra_proto != LAGG_PROTO_FAILOVER && 1397a92c4bb6SHans Petter Selasky ifp->if_type == IFT_INFINIBAND) { 1398a92c4bb6SHans Petter Selasky error = EPROTONOSUPPORT; 1399a92c4bb6SHans Petter Selasky break; 1400a92c4bb6SHans Petter Selasky } 14012f86d4b0SAlexander Motin LAGG_XLOCK(sc); 14029732189cSHiroki Sato lagg_proto_detach(sc); 14039732189cSHiroki Sato lagg_proto_attach(sc, ra->ra_proto); 14042f86d4b0SAlexander Motin LAGG_XUNLOCK(sc); 14059732189cSHiroki Sato break; 14069732189cSHiroki Sato case SIOCGLAGGOPTS: 140799031b8fSStephen Hurd LAGG_XLOCK(sc); 14089732189cSHiroki Sato ro->ro_opts = sc->sc_opts; 14099732189cSHiroki Sato if (sc->sc_proto == LAGG_PROTO_LACP) { 14109732189cSHiroki Sato struct lacp_softc *lsc; 14119732189cSHiroki Sato 14129732189cSHiroki Sato lsc = (struct lacp_softc *)sc->sc_psc; 14139732189cSHiroki Sato if (lsc->lsc_debug.lsc_tx_test != 0) 14149732189cSHiroki Sato ro->ro_opts |= LAGG_OPT_LACP_TXTEST; 14159732189cSHiroki Sato if (lsc->lsc_debug.lsc_rx_test != 0) 14169732189cSHiroki Sato ro->ro_opts |= LAGG_OPT_LACP_RXTEST; 14179732189cSHiroki Sato if (lsc->lsc_strict_mode != 0) 14189732189cSHiroki Sato ro->ro_opts |= LAGG_OPT_LACP_STRICT; 14190e02b43aSHiren Panchasara if (lsc->lsc_fast_timeout != 0) 14202a73c8f5SRavi Pokala ro->ro_opts |= LAGG_OPT_LACP_FAST_TIMO; 14219732189cSHiroki Sato 14229732189cSHiroki Sato ro->ro_active = sc->sc_active; 14239732189cSHiroki Sato } else { 14249732189cSHiroki Sato ro->ro_active = 0; 142599031b8fSStephen Hurd CK_SLIST_FOREACH(lp, &sc->sc_ports, lp_entries) 14269732189cSHiroki Sato ro->ro_active += LAGG_PORTACTIVE(lp); 14279732189cSHiroki Sato } 1428c104c299SMark Johnston ro->ro_bkt = sc->sc_stride; 14299732189cSHiroki Sato ro->ro_flapping = sc->sc_flapping; 14309732189cSHiroki Sato ro->ro_flowid_shift = sc->flowid_shift; 143199031b8fSStephen Hurd LAGG_XUNLOCK(sc); 14329732189cSHiroki Sato break; 14339732189cSHiroki Sato case SIOCSLAGGOPTS: 14349732189cSHiroki Sato error = priv_check(td, PRIV_NET_LAGG); 14359732189cSHiroki Sato if (error) 14369732189cSHiroki Sato break; 1437c104c299SMark Johnston 1438c104c299SMark Johnston /* 1439c104c299SMark Johnston * The stride option was added without defining a corresponding 1440c23df8eaSMark Johnston * LAGG_OPT flag, so handle a non-zero value before checking 1441c23df8eaSMark Johnston * anything else to preserve compatibility. 1442c104c299SMark Johnston */ 1443c104c299SMark Johnston LAGG_XLOCK(sc); 1444c23df8eaSMark Johnston if (ro->ro_opts == 0 && ro->ro_bkt != 0) { 1445c104c299SMark Johnston if (sc->sc_proto != LAGG_PROTO_ROUNDROBIN) { 1446c104c299SMark Johnston LAGG_XUNLOCK(sc); 1447c104c299SMark Johnston error = EINVAL; 14489732189cSHiroki Sato break; 1449c104c299SMark Johnston } 1450c104c299SMark Johnston sc->sc_stride = ro->ro_bkt; 1451c104c299SMark Johnston } 1452c104c299SMark Johnston if (ro->ro_opts == 0) { 1453c104c299SMark Johnston LAGG_XUNLOCK(sc); 1454c104c299SMark Johnston break; 1455c104c299SMark Johnston } 1456c104c299SMark Johnston 1457939a050aSHiroki Sato /* 1458939a050aSHiroki Sato * Set options. LACP options are stored in sc->sc_psc, 1459939a050aSHiroki Sato * not in sc_opts. 1460939a050aSHiroki Sato */ 1461939a050aSHiroki Sato int valid, lacp; 1462939a050aSHiroki Sato 14639732189cSHiroki Sato switch (ro->ro_opts) { 1464939a050aSHiroki Sato case LAGG_OPT_USE_FLOWID: 1465939a050aSHiroki Sato case -LAGG_OPT_USE_FLOWID: 146635961dceSAndrew Gallatin case LAGG_OPT_USE_NUMA: 146735961dceSAndrew Gallatin case -LAGG_OPT_USE_NUMA: 1468939a050aSHiroki Sato case LAGG_OPT_FLOWIDSHIFT: 1469c23df8eaSMark Johnston case LAGG_OPT_RR_LIMIT: 1470939a050aSHiroki Sato valid = 1; 1471939a050aSHiroki Sato lacp = 0; 1472939a050aSHiroki Sato break; 1473939a050aSHiroki Sato case LAGG_OPT_LACP_TXTEST: 1474939a050aSHiroki Sato case -LAGG_OPT_LACP_TXTEST: 1475939a050aSHiroki Sato case LAGG_OPT_LACP_RXTEST: 1476939a050aSHiroki Sato case -LAGG_OPT_LACP_RXTEST: 1477939a050aSHiroki Sato case LAGG_OPT_LACP_STRICT: 1478939a050aSHiroki Sato case -LAGG_OPT_LACP_STRICT: 14792a73c8f5SRavi Pokala case LAGG_OPT_LACP_FAST_TIMO: 14802a73c8f5SRavi Pokala case -LAGG_OPT_LACP_FAST_TIMO: 1481939a050aSHiroki Sato valid = lacp = 1; 1482939a050aSHiroki Sato break; 1483939a050aSHiroki Sato default: 1484939a050aSHiroki Sato valid = lacp = 0; 1485939a050aSHiroki Sato break; 1486939a050aSHiroki Sato } 1487939a050aSHiroki Sato 1488939a050aSHiroki Sato if (valid == 0 || 1489939a050aSHiroki Sato (lacp == 1 && sc->sc_proto != LAGG_PROTO_LACP)) { 1490939a050aSHiroki Sato /* Invalid combination of options specified. */ 1491939a050aSHiroki Sato error = EINVAL; 14922f86d4b0SAlexander Motin LAGG_XUNLOCK(sc); 14939732189cSHiroki Sato break; /* Return from SIOCSLAGGOPTS. */ 1494939a050aSHiroki Sato } 1495c23df8eaSMark Johnston 1496939a050aSHiroki Sato /* 1497939a050aSHiroki Sato * Store new options into sc->sc_opts except for 1498c23df8eaSMark Johnston * FLOWIDSHIFT, RR and LACP options. 1499939a050aSHiroki Sato */ 1500939a050aSHiroki Sato if (lacp == 0) { 15019732189cSHiroki Sato if (ro->ro_opts == LAGG_OPT_FLOWIDSHIFT) 15029732189cSHiroki Sato sc->flowid_shift = ro->ro_flowid_shift; 1503c23df8eaSMark Johnston else if (ro->ro_opts == LAGG_OPT_RR_LIMIT) { 1504c23df8eaSMark Johnston if (sc->sc_proto != LAGG_PROTO_ROUNDROBIN || 1505c23df8eaSMark Johnston ro->ro_bkt == 0) { 1506c23df8eaSMark Johnston error = EINVAL; 1507c23df8eaSMark Johnston LAGG_XUNLOCK(sc); 1508c23df8eaSMark Johnston break; 1509c23df8eaSMark Johnston } 1510c23df8eaSMark Johnston sc->sc_stride = ro->ro_bkt; 1511c23df8eaSMark Johnston } else if (ro->ro_opts > 0) 15129732189cSHiroki Sato sc->sc_opts |= ro->ro_opts; 1513939a050aSHiroki Sato else 15149732189cSHiroki Sato sc->sc_opts &= ~ro->ro_opts; 1515939a050aSHiroki Sato } else { 1516939a050aSHiroki Sato struct lacp_softc *lsc; 15170e02b43aSHiren Panchasara struct lacp_port *lp; 1518939a050aSHiroki Sato 1519939a050aSHiroki Sato lsc = (struct lacp_softc *)sc->sc_psc; 1520939a050aSHiroki Sato 15219732189cSHiroki Sato switch (ro->ro_opts) { 1522939a050aSHiroki Sato case LAGG_OPT_LACP_TXTEST: 1523939a050aSHiroki Sato lsc->lsc_debug.lsc_tx_test = 1; 1524939a050aSHiroki Sato break; 1525939a050aSHiroki Sato case -LAGG_OPT_LACP_TXTEST: 1526939a050aSHiroki Sato lsc->lsc_debug.lsc_tx_test = 0; 1527939a050aSHiroki Sato break; 1528939a050aSHiroki Sato case LAGG_OPT_LACP_RXTEST: 1529939a050aSHiroki Sato lsc->lsc_debug.lsc_rx_test = 1; 1530939a050aSHiroki Sato break; 1531939a050aSHiroki Sato case -LAGG_OPT_LACP_RXTEST: 1532939a050aSHiroki Sato lsc->lsc_debug.lsc_rx_test = 0; 1533939a050aSHiroki Sato break; 1534939a050aSHiroki Sato case LAGG_OPT_LACP_STRICT: 1535939a050aSHiroki Sato lsc->lsc_strict_mode = 1; 1536939a050aSHiroki Sato break; 1537939a050aSHiroki Sato case -LAGG_OPT_LACP_STRICT: 1538939a050aSHiroki Sato lsc->lsc_strict_mode = 0; 1539939a050aSHiroki Sato break; 15402a73c8f5SRavi Pokala case LAGG_OPT_LACP_FAST_TIMO: 15410e02b43aSHiren Panchasara LACP_LOCK(lsc); 15420e02b43aSHiren Panchasara LIST_FOREACH(lp, &lsc->lsc_ports, lp_next) 15430e02b43aSHiren Panchasara lp->lp_state |= LACP_STATE_TIMEOUT; 15440e02b43aSHiren Panchasara LACP_UNLOCK(lsc); 15450e02b43aSHiren Panchasara lsc->lsc_fast_timeout = 1; 15460e02b43aSHiren Panchasara break; 15472a73c8f5SRavi Pokala case -LAGG_OPT_LACP_FAST_TIMO: 15480e02b43aSHiren Panchasara LACP_LOCK(lsc); 15490e02b43aSHiren Panchasara LIST_FOREACH(lp, &lsc->lsc_ports, lp_next) 15500e02b43aSHiren Panchasara lp->lp_state &= ~LACP_STATE_TIMEOUT; 15510e02b43aSHiren Panchasara LACP_UNLOCK(lsc); 15520e02b43aSHiren Panchasara lsc->lsc_fast_timeout = 0; 15530e02b43aSHiren Panchasara break; 1554939a050aSHiroki Sato } 1555939a050aSHiroki Sato } 15562f86d4b0SAlexander Motin LAGG_XUNLOCK(sc); 155718242d3bSAndrew Thompson break; 155886f67641SAndrew Thompson case SIOCGLAGGFLAGS: 1559b7ba031fSHans Petter Selasky rf->rf_flags = 0; 156099031b8fSStephen Hurd LAGG_XLOCK(sc); 1561b7ba031fSHans Petter Selasky if (sc->sc_flags & MBUF_HASHFLAG_L2) 1562b7ba031fSHans Petter Selasky rf->rf_flags |= LAGG_F_HASHL2; 1563b7ba031fSHans Petter Selasky if (sc->sc_flags & MBUF_HASHFLAG_L3) 1564b7ba031fSHans Petter Selasky rf->rf_flags |= LAGG_F_HASHL3; 1565b7ba031fSHans Petter Selasky if (sc->sc_flags & MBUF_HASHFLAG_L4) 1566b7ba031fSHans Petter Selasky rf->rf_flags |= LAGG_F_HASHL4; 156799031b8fSStephen Hurd LAGG_XUNLOCK(sc); 156886f67641SAndrew Thompson break; 156986f67641SAndrew Thompson case SIOCSLAGGHASH: 157086f67641SAndrew Thompson error = priv_check(td, PRIV_NET_LAGG); 157186f67641SAndrew Thompson if (error) 157286f67641SAndrew Thompson break; 157386f67641SAndrew Thompson if ((rf->rf_flags & LAGG_F_HASHMASK) == 0) { 157486f67641SAndrew Thompson error = EINVAL; 157586f67641SAndrew Thompson break; 157686f67641SAndrew Thompson } 15772f86d4b0SAlexander Motin LAGG_XLOCK(sc); 1578b7ba031fSHans Petter Selasky sc->sc_flags = 0; 1579b7ba031fSHans Petter Selasky if (rf->rf_flags & LAGG_F_HASHL2) 1580b7ba031fSHans Petter Selasky sc->sc_flags |= MBUF_HASHFLAG_L2; 1581b7ba031fSHans Petter Selasky if (rf->rf_flags & LAGG_F_HASHL3) 1582b7ba031fSHans Petter Selasky sc->sc_flags |= MBUF_HASHFLAG_L3; 1583b7ba031fSHans Petter Selasky if (rf->rf_flags & LAGG_F_HASHL4) 1584b7ba031fSHans Petter Selasky sc->sc_flags |= MBUF_HASHFLAG_L4; 15852f86d4b0SAlexander Motin LAGG_XUNLOCK(sc); 158686f67641SAndrew Thompson break; 158718242d3bSAndrew Thompson case SIOCGLAGGPORT: 158818242d3bSAndrew Thompson if (rp->rp_portname[0] == '\0' || 1589b98b5ae8SAlexander Motin (tpif = ifunit_ref(rp->rp_portname)) == NULL) { 159018242d3bSAndrew Thompson error = EINVAL; 159118242d3bSAndrew Thompson break; 159218242d3bSAndrew Thompson } 159318242d3bSAndrew Thompson 159448698eadSGleb Smirnoff LAGG_SLOCK(sc); 1595fadbb6f8SGleb Smirnoff if (__predict_true((lp = tpif->if_lagg) != NULL && 1596fadbb6f8SGleb Smirnoff lp->lp_softc == sc)) 159718242d3bSAndrew Thompson lagg_port2req(lp, rp); 1598fadbb6f8SGleb Smirnoff else 1599fadbb6f8SGleb Smirnoff error = ENOENT; /* XXXGL: can happen? */ 160048698eadSGleb Smirnoff LAGG_SUNLOCK(sc); 1601b98b5ae8SAlexander Motin if_rele(tpif); 160218242d3bSAndrew Thompson break; 1603fadbb6f8SGleb Smirnoff 160418242d3bSAndrew Thompson case SIOCSLAGGPORT: 160518242d3bSAndrew Thompson error = priv_check(td, PRIV_NET_LAGG); 160618242d3bSAndrew Thompson if (error) 160718242d3bSAndrew Thompson break; 160818242d3bSAndrew Thompson if (rp->rp_portname[0] == '\0' || 1609b98b5ae8SAlexander Motin (tpif = ifunit_ref(rp->rp_portname)) == NULL) { 161018242d3bSAndrew Thompson error = EINVAL; 161118242d3bSAndrew Thompson break; 161218242d3bSAndrew Thompson } 1613bf6d3f0cSHiroki Sato #ifdef INET6 1614bf6d3f0cSHiroki Sato /* 1615bf6d3f0cSHiroki Sato * A laggport interface should not have inet6 address 1616bf6d3f0cSHiroki Sato * because two interfaces with a valid link-local 1617bf6d3f0cSHiroki Sato * scope zone must not be merged in any form. This 1618bf6d3f0cSHiroki Sato * restriction is needed to prevent violation of 1619bf6d3f0cSHiroki Sato * link-local scope zone. Attempts to add a laggport 1620bf6d3f0cSHiroki Sato * interface which has inet6 addresses triggers 1621bf6d3f0cSHiroki Sato * removal of all inet6 addresses on the member 1622bf6d3f0cSHiroki Sato * interface. 1623bf6d3f0cSHiroki Sato */ 1624bf6d3f0cSHiroki Sato if (in6ifa_llaonifp(tpif)) { 1625bf6d3f0cSHiroki Sato in6_ifdetach(tpif); 1626bf6d3f0cSHiroki Sato if_printf(sc->sc_ifp, 1627bf6d3f0cSHiroki Sato "IPv6 addresses on %s have been removed " 1628bf6d3f0cSHiroki Sato "before adding it as a member to prevent " 1629bf6d3f0cSHiroki Sato "IPv6 address scope violation.\n", 1630bf6d3f0cSHiroki Sato tpif->if_xname); 1631bf6d3f0cSHiroki Sato } 1632bf6d3f0cSHiroki Sato #endif 163384becee1SAlexander Motin oldmtu = ifp->if_mtu; 16342f86d4b0SAlexander Motin LAGG_XLOCK(sc); 163518242d3bSAndrew Thompson error = lagg_port_create(sc, tpif); 16362f86d4b0SAlexander Motin LAGG_XUNLOCK(sc); 1637b98b5ae8SAlexander Motin if_rele(tpif); 163884becee1SAlexander Motin 163984becee1SAlexander Motin /* 164084becee1SAlexander Motin * LAGG MTU may change during addition of the first port. 164184becee1SAlexander Motin * If it did, do network layer specific procedure. 164284becee1SAlexander Motin */ 164366bdbcd5SAlexander V. Chernikov if (ifp->if_mtu != oldmtu) 164466bdbcd5SAlexander V. Chernikov if_notifymtu(ifp); 164584becee1SAlexander Motin 164641cf0d54SAlexander Motin VLAN_CAPABILITIES(ifp); 164718242d3bSAndrew Thompson break; 164818242d3bSAndrew Thompson case SIOCSLAGGDELPORT: 164918242d3bSAndrew Thompson error = priv_check(td, PRIV_NET_LAGG); 165018242d3bSAndrew Thompson if (error) 165118242d3bSAndrew Thompson break; 165218242d3bSAndrew Thompson if (rp->rp_portname[0] == '\0' || 1653b98b5ae8SAlexander Motin (tpif = ifunit_ref(rp->rp_portname)) == NULL) { 165418242d3bSAndrew Thompson error = EINVAL; 165518242d3bSAndrew Thompson break; 165618242d3bSAndrew Thompson } 165718242d3bSAndrew Thompson 16582f86d4b0SAlexander Motin LAGG_XLOCK(sc); 165918242d3bSAndrew Thompson if ((lp = (struct lagg_port *)tpif->if_lagg) == NULL || 1660ec32b37eSAndrew Thompson lp->lp_softc != sc) { 166118242d3bSAndrew Thompson error = ENOENT; 16622f86d4b0SAlexander Motin LAGG_XUNLOCK(sc); 1663b98b5ae8SAlexander Motin if_rele(tpif); 166418242d3bSAndrew Thompson break; 166518242d3bSAndrew Thompson } 166618242d3bSAndrew Thompson 166718242d3bSAndrew Thompson error = lagg_port_destroy(lp, 1); 16682f86d4b0SAlexander Motin LAGG_XUNLOCK(sc); 1669b98b5ae8SAlexander Motin if_rele(tpif); 167041cf0d54SAlexander Motin VLAN_CAPABILITIES(ifp); 167118242d3bSAndrew Thompson break; 167218242d3bSAndrew Thompson case SIOCSIFFLAGS: 167318242d3bSAndrew Thompson /* Set flags on ports too */ 16742f86d4b0SAlexander Motin LAGG_XLOCK(sc); 167599031b8fSStephen Hurd CK_SLIST_FOREACH(lp, &sc->sc_ports, lp_entries) { 167618242d3bSAndrew Thompson lagg_setflags(lp, 1); 167718242d3bSAndrew Thompson } 167818242d3bSAndrew Thompson 167918242d3bSAndrew Thompson if (!(ifp->if_flags & IFF_UP) && 168018242d3bSAndrew Thompson (ifp->if_drv_flags & IFF_DRV_RUNNING)) { 168118242d3bSAndrew Thompson /* 168218242d3bSAndrew Thompson * If interface is marked down and it is running, 168318242d3bSAndrew Thompson * then stop and disable it. 168418242d3bSAndrew Thompson */ 168518242d3bSAndrew Thompson lagg_stop(sc); 16862f86d4b0SAlexander Motin LAGG_XUNLOCK(sc); 168718242d3bSAndrew Thompson } else if ((ifp->if_flags & IFF_UP) && 168818242d3bSAndrew Thompson !(ifp->if_drv_flags & IFF_DRV_RUNNING)) { 168918242d3bSAndrew Thompson /* 169018242d3bSAndrew Thompson * If interface is marked up and it is stopped, then 169118242d3bSAndrew Thompson * start it. 169218242d3bSAndrew Thompson */ 16932f86d4b0SAlexander Motin LAGG_XUNLOCK(sc); 169418242d3bSAndrew Thompson (*ifp->if_init)(sc); 16952f86d4b0SAlexander Motin } else 16962f86d4b0SAlexander Motin LAGG_XUNLOCK(sc); 169718242d3bSAndrew Thompson break; 169818242d3bSAndrew Thompson case SIOCADDMULTI: 169918242d3bSAndrew Thompson case SIOCDELMULTI: 170099031b8fSStephen Hurd LAGG_XLOCK(sc); 170199031b8fSStephen Hurd CK_SLIST_FOREACH(lp, &sc->sc_ports, lp_entries) { 17022f86d4b0SAlexander Motin lagg_clrmulti(lp); 17032f86d4b0SAlexander Motin lagg_setmulti(lp); 17042f86d4b0SAlexander Motin } 170599031b8fSStephen Hurd LAGG_XUNLOCK(sc); 17062f86d4b0SAlexander Motin error = 0; 170718242d3bSAndrew Thompson break; 170818242d3bSAndrew Thompson case SIOCSIFMEDIA: 170918242d3bSAndrew Thompson case SIOCGIFMEDIA: 1710a92c4bb6SHans Petter Selasky if (ifp->if_type == IFT_INFINIBAND) 1711a92c4bb6SHans Petter Selasky error = EINVAL; 1712a92c4bb6SHans Petter Selasky else 171318242d3bSAndrew Thompson error = ifmedia_ioctl(ifp, ifr, &sc->sc_media, cmd); 171418242d3bSAndrew Thompson break; 1715de75afe6SAndrew Thompson 1716de75afe6SAndrew Thompson case SIOCSIFCAP: 1717713ceb99SAndrew Gallatin case SIOCSIFCAPNV: 17182f86d4b0SAlexander Motin LAGG_XLOCK(sc); 171999031b8fSStephen Hurd CK_SLIST_FOREACH(lp, &sc->sc_ports, lp_entries) { 17202f86d4b0SAlexander Motin if (lp->lp_ioctl != NULL) 17212f86d4b0SAlexander Motin (*lp->lp_ioctl)(lp->lp_ifp, cmd, data); 17222f86d4b0SAlexander Motin } 17232f86d4b0SAlexander Motin lagg_capabilities(sc); 17242f86d4b0SAlexander Motin LAGG_XUNLOCK(sc); 172541cf0d54SAlexander Motin VLAN_CAPABILITIES(ifp); 17262f86d4b0SAlexander Motin error = 0; 17272f86d4b0SAlexander Motin break; 17282f86d4b0SAlexander Motin 1729713ceb99SAndrew Gallatin case SIOCGIFCAPNV: 1730713ceb99SAndrew Gallatin error = 0; 1731713ceb99SAndrew Gallatin break; 1732713ceb99SAndrew Gallatin 173306152bf0SRavi Pokala case SIOCSIFMTU: 1734fbd8c330SMarcelo Araujo LAGG_XLOCK(sc); 1735fbd8c330SMarcelo Araujo CK_SLIST_FOREACH(lp, &sc->sc_ports, lp_entries) { 1736fbd8c330SMarcelo Araujo if (lp->lp_ioctl != NULL) 1737fbd8c330SMarcelo Araujo error = (*lp->lp_ioctl)(lp->lp_ifp, cmd, data); 1738fbd8c330SMarcelo Araujo else 1739729a4cffSRavi Pokala error = EINVAL; 1740fbd8c330SMarcelo Araujo if (error != 0) { 1741fbd8c330SMarcelo Araujo if_printf(ifp, 1742fbd8c330SMarcelo Araujo "failed to change MTU to %d on port %s, " 1743fbd8c330SMarcelo Araujo "reverting all ports to original MTU (%d)\n", 1744fbd8c330SMarcelo Araujo ifr->ifr_mtu, lp->lp_ifp->if_xname, ifp->if_mtu); 1745fbd8c330SMarcelo Araujo break; 1746fbd8c330SMarcelo Araujo } 1747fbd8c330SMarcelo Araujo } 1748fbd8c330SMarcelo Araujo if (error == 0) { 1749fbd8c330SMarcelo Araujo ifp->if_mtu = ifr->ifr_mtu; 1750fbd8c330SMarcelo Araujo } else { 1751fbd8c330SMarcelo Araujo /* set every port back to the original MTU */ 1752fbd8c330SMarcelo Araujo ifr->ifr_mtu = ifp->if_mtu; 1753fbd8c330SMarcelo Araujo CK_SLIST_FOREACH(lp, &sc->sc_ports, lp_entries) { 1754fbd8c330SMarcelo Araujo if (lp->lp_ioctl != NULL) 1755fbd8c330SMarcelo Araujo (*lp->lp_ioctl)(lp->lp_ifp, cmd, data); 1756fbd8c330SMarcelo Araujo } 1757fbd8c330SMarcelo Araujo } 1758acdfc096SWojciech Macek lagg_capabilities(sc); 1759fbd8c330SMarcelo Araujo LAGG_XUNLOCK(sc); 1760acdfc096SWojciech Macek VLAN_CAPABILITIES(ifp); 176106152bf0SRavi Pokala break; 1762de75afe6SAndrew Thompson 176318242d3bSAndrew Thompson default: 176418242d3bSAndrew Thompson error = ether_ioctl(ifp, cmd, data); 176518242d3bSAndrew Thompson break; 176618242d3bSAndrew Thompson } 176718242d3bSAndrew Thompson return (error); 176818242d3bSAndrew Thompson } 176918242d3bSAndrew Thompson 1770b2e60773SJohn Baldwin #if defined(KERN_TLS) || defined(RATELIMIT) 1771c782ea8bSJohn Baldwin #ifdef RATELIMIT 1772c782ea8bSJohn Baldwin static const struct if_snd_tag_sw lagg_snd_tag_ul_sw = { 1773c782ea8bSJohn Baldwin .snd_tag_modify = lagg_snd_tag_modify, 1774c782ea8bSJohn Baldwin .snd_tag_query = lagg_snd_tag_query, 1775c782ea8bSJohn Baldwin .snd_tag_free = lagg_snd_tag_free, 1776c782ea8bSJohn Baldwin .next_snd_tag = lagg_next_snd_tag, 1777c782ea8bSJohn Baldwin .type = IF_SND_TAG_TYPE_UNLIMITED 1778c782ea8bSJohn Baldwin }; 1779c782ea8bSJohn Baldwin 1780c782ea8bSJohn Baldwin static const struct if_snd_tag_sw lagg_snd_tag_rl_sw = { 1781c782ea8bSJohn Baldwin .snd_tag_modify = lagg_snd_tag_modify, 1782c782ea8bSJohn Baldwin .snd_tag_query = lagg_snd_tag_query, 1783c782ea8bSJohn Baldwin .snd_tag_free = lagg_snd_tag_free, 1784c782ea8bSJohn Baldwin .next_snd_tag = lagg_next_snd_tag, 1785c782ea8bSJohn Baldwin .type = IF_SND_TAG_TYPE_RATE_LIMIT 1786c782ea8bSJohn Baldwin }; 1787c782ea8bSJohn Baldwin #endif 1788c782ea8bSJohn Baldwin 1789c782ea8bSJohn Baldwin #ifdef KERN_TLS 1790c782ea8bSJohn Baldwin static const struct if_snd_tag_sw lagg_snd_tag_tls_sw = { 1791c782ea8bSJohn Baldwin .snd_tag_modify = lagg_snd_tag_modify, 1792c782ea8bSJohn Baldwin .snd_tag_query = lagg_snd_tag_query, 1793c782ea8bSJohn Baldwin .snd_tag_free = lagg_snd_tag_free, 1794c782ea8bSJohn Baldwin .next_snd_tag = lagg_next_snd_tag, 1795c782ea8bSJohn Baldwin .type = IF_SND_TAG_TYPE_TLS 1796c782ea8bSJohn Baldwin }; 1797c782ea8bSJohn Baldwin 1798c782ea8bSJohn Baldwin #ifdef RATELIMIT 1799c782ea8bSJohn Baldwin static const struct if_snd_tag_sw lagg_snd_tag_tls_rl_sw = { 1800c782ea8bSJohn Baldwin .snd_tag_modify = lagg_snd_tag_modify, 1801c782ea8bSJohn Baldwin .snd_tag_query = lagg_snd_tag_query, 1802c782ea8bSJohn Baldwin .snd_tag_free = lagg_snd_tag_free, 1803c782ea8bSJohn Baldwin .next_snd_tag = lagg_next_snd_tag, 1804c782ea8bSJohn Baldwin .type = IF_SND_TAG_TYPE_TLS_RATE_LIMIT 1805c782ea8bSJohn Baldwin }; 1806c782ea8bSJohn Baldwin #endif 1807c782ea8bSJohn Baldwin #endif 1808c782ea8bSJohn Baldwin 1809fb3bc596SJohn Baldwin static inline struct lagg_snd_tag * 1810fb3bc596SJohn Baldwin mst_to_lst(struct m_snd_tag *mst) 1811fb3bc596SJohn Baldwin { 1812fb3bc596SJohn Baldwin 1813fb3bc596SJohn Baldwin return (__containerof(mst, struct lagg_snd_tag, com)); 1814fb3bc596SJohn Baldwin } 1815fb3bc596SJohn Baldwin 1816fb3bc596SJohn Baldwin /* 1817fb3bc596SJohn Baldwin * Look up the port used by a specific flow. This only works for lagg 1818fb3bc596SJohn Baldwin * protocols with deterministic port mappings (e.g. not roundrobin). 1819fb3bc596SJohn Baldwin * In addition protocols which use a hash to map flows to ports must 1820fb3bc596SJohn Baldwin * be configured to use the mbuf flowid rather than hashing packet 1821fb3bc596SJohn Baldwin * contents. 1822fb3bc596SJohn Baldwin */ 1823fb3bc596SJohn Baldwin static struct lagg_port * 182498085baeSAndrew Gallatin lookup_snd_tag_port(struct ifnet *ifp, uint32_t flowid, uint32_t flowtype, 182598085baeSAndrew Gallatin uint8_t numa_domain) 1826fb3bc596SJohn Baldwin { 1827fb3bc596SJohn Baldwin struct lagg_softc *sc; 1828fb3bc596SJohn Baldwin struct lagg_port *lp; 1829fb3bc596SJohn Baldwin struct lagg_lb *lb; 183098085baeSAndrew Gallatin uint32_t hash, p; 18318732245dSAndrew Gallatin int err; 1832fb3bc596SJohn Baldwin 1833fb3bc596SJohn Baldwin sc = ifp->if_softc; 1834fb3bc596SJohn Baldwin 1835fb3bc596SJohn Baldwin switch (sc->sc_proto) { 1836fb3bc596SJohn Baldwin case LAGG_PROTO_FAILOVER: 1837fb3bc596SJohn Baldwin return (lagg_link_active(sc, sc->sc_primary)); 1838fb3bc596SJohn Baldwin case LAGG_PROTO_LOADBALANCE: 1839fb3bc596SJohn Baldwin if ((sc->sc_opts & LAGG_OPT_USE_FLOWID) == 0 || 1840fb3bc596SJohn Baldwin flowtype == M_HASHTYPE_NONE) 1841fb3bc596SJohn Baldwin return (NULL); 1842fb3bc596SJohn Baldwin p = flowid >> sc->flowid_shift; 1843fb3bc596SJohn Baldwin p %= sc->sc_count; 1844fb3bc596SJohn Baldwin lb = (struct lagg_lb *)sc->sc_psc; 1845fb3bc596SJohn Baldwin lp = lb->lb_ports[p]; 1846fb3bc596SJohn Baldwin return (lagg_link_active(sc, lp)); 1847fb3bc596SJohn Baldwin case LAGG_PROTO_LACP: 1848fb3bc596SJohn Baldwin if ((sc->sc_opts & LAGG_OPT_USE_FLOWID) == 0 || 1849fb3bc596SJohn Baldwin flowtype == M_HASHTYPE_NONE) 1850fb3bc596SJohn Baldwin return (NULL); 185198085baeSAndrew Gallatin hash = flowid >> sc->flowid_shift; 18528732245dSAndrew Gallatin return (lacp_select_tx_port_by_hash(sc, hash, numa_domain, &err)); 1853fb3bc596SJohn Baldwin default: 1854fb3bc596SJohn Baldwin return (NULL); 1855fb3bc596SJohn Baldwin } 1856fb3bc596SJohn Baldwin } 1857fb3bc596SJohn Baldwin 1858f3e7afe2SHans Petter Selasky static int 1859f3e7afe2SHans Petter Selasky lagg_snd_tag_alloc(struct ifnet *ifp, 1860f3e7afe2SHans Petter Selasky union if_snd_tag_alloc_params *params, 1861f3e7afe2SHans Petter Selasky struct m_snd_tag **ppmt) 1862f3e7afe2SHans Petter Selasky { 186387bf9b9cSGleb Smirnoff struct epoch_tracker et; 1864c782ea8bSJohn Baldwin const struct if_snd_tag_sw *sw; 1865fb3bc596SJohn Baldwin struct lagg_snd_tag *lst; 1866f3e7afe2SHans Petter Selasky struct lagg_port *lp; 1867fb3bc596SJohn Baldwin struct ifnet *lp_ifp; 18681967e313SHans Petter Selasky struct m_snd_tag *mst; 1869fb3bc596SJohn Baldwin int error; 1870fb3bc596SJohn Baldwin 1871c782ea8bSJohn Baldwin switch (params->hdr.type) { 1872c782ea8bSJohn Baldwin #ifdef RATELIMIT 1873c782ea8bSJohn Baldwin case IF_SND_TAG_TYPE_UNLIMITED: 1874c782ea8bSJohn Baldwin sw = &lagg_snd_tag_ul_sw; 1875c782ea8bSJohn Baldwin break; 1876c782ea8bSJohn Baldwin case IF_SND_TAG_TYPE_RATE_LIMIT: 1877c782ea8bSJohn Baldwin sw = &lagg_snd_tag_rl_sw; 1878c782ea8bSJohn Baldwin break; 1879c782ea8bSJohn Baldwin #endif 1880c782ea8bSJohn Baldwin #ifdef KERN_TLS 1881c782ea8bSJohn Baldwin case IF_SND_TAG_TYPE_TLS: 1882c782ea8bSJohn Baldwin sw = &lagg_snd_tag_tls_sw; 1883c782ea8bSJohn Baldwin break; 18841967e313SHans Petter Selasky case IF_SND_TAG_TYPE_TLS_RX: 18851967e313SHans Petter Selasky /* Return tag from port interface directly. */ 18861967e313SHans Petter Selasky sw = NULL; 18871967e313SHans Petter Selasky break; 1888c782ea8bSJohn Baldwin #ifdef RATELIMIT 1889c782ea8bSJohn Baldwin case IF_SND_TAG_TYPE_TLS_RATE_LIMIT: 1890c782ea8bSJohn Baldwin sw = &lagg_snd_tag_tls_rl_sw; 1891c782ea8bSJohn Baldwin break; 1892c782ea8bSJohn Baldwin #endif 1893c782ea8bSJohn Baldwin #endif 1894c782ea8bSJohn Baldwin default: 1895c782ea8bSJohn Baldwin return (EOPNOTSUPP); 1896c782ea8bSJohn Baldwin } 1897c782ea8bSJohn Baldwin 189887bf9b9cSGleb Smirnoff NET_EPOCH_ENTER(et); 189998085baeSAndrew Gallatin lp = lookup_snd_tag_port(ifp, params->hdr.flowid, 190098085baeSAndrew Gallatin params->hdr.flowtype, params->hdr.numa_domain); 19012f59b04aSJohn Baldwin if (lp == NULL) { 19025ee33a90SGleb Smirnoff NET_EPOCH_EXIT(et); 1903f3e7afe2SHans Petter Selasky return (EOPNOTSUPP); 19042f59b04aSJohn Baldwin } 190536e0a362SJohn Baldwin if (lp->lp_ifp == NULL) { 19065ee33a90SGleb Smirnoff NET_EPOCH_EXIT(et); 1907f3e7afe2SHans Petter Selasky return (EOPNOTSUPP); 1908fb3bc596SJohn Baldwin } 1909fb3bc596SJohn Baldwin lp_ifp = lp->lp_ifp; 1910fb3bc596SJohn Baldwin if_ref(lp_ifp); 191187bf9b9cSGleb Smirnoff NET_EPOCH_EXIT(et); 1912f3e7afe2SHans Petter Selasky 19131967e313SHans Petter Selasky if (sw != NULL) { 1914fb3bc596SJohn Baldwin lst = malloc(sizeof(*lst), M_LAGG, M_NOWAIT); 1915fb3bc596SJohn Baldwin if (lst == NULL) { 1916fb3bc596SJohn Baldwin if_rele(lp_ifp); 1917fb3bc596SJohn Baldwin return (ENOMEM); 1918fb3bc596SJohn Baldwin } 19191967e313SHans Petter Selasky } else 19201967e313SHans Petter Selasky lst = NULL; 1921fb3bc596SJohn Baldwin 19221967e313SHans Petter Selasky error = m_snd_tag_alloc(lp_ifp, params, &mst); 1923fb3bc596SJohn Baldwin if_rele(lp_ifp); 1924fb3bc596SJohn Baldwin if (error) { 1925fb3bc596SJohn Baldwin free(lst, M_LAGG); 1926fb3bc596SJohn Baldwin return (error); 1927fb3bc596SJohn Baldwin } 1928fb3bc596SJohn Baldwin 19291967e313SHans Petter Selasky if (sw != NULL) { 1930c782ea8bSJohn Baldwin m_snd_tag_init(&lst->com, ifp, sw); 19311967e313SHans Petter Selasky lst->tag = mst; 1932fb3bc596SJohn Baldwin 1933fb3bc596SJohn Baldwin *ppmt = &lst->com; 19341967e313SHans Petter Selasky } else 19351967e313SHans Petter Selasky *ppmt = mst; 19361967e313SHans Petter Selasky 1937fb3bc596SJohn Baldwin return (0); 1938fb3bc596SJohn Baldwin } 1939fb3bc596SJohn Baldwin 19401a714ff2SRandall Stewart static struct m_snd_tag * 19411a714ff2SRandall Stewart lagg_next_snd_tag(struct m_snd_tag *mst) 19421a714ff2SRandall Stewart { 19431a714ff2SRandall Stewart struct lagg_snd_tag *lst; 19441a714ff2SRandall Stewart 19451a714ff2SRandall Stewart lst = mst_to_lst(mst); 19461a714ff2SRandall Stewart return (lst->tag); 19471a714ff2SRandall Stewart } 19481a714ff2SRandall Stewart 1949fb3bc596SJohn Baldwin static int 1950fb3bc596SJohn Baldwin lagg_snd_tag_modify(struct m_snd_tag *mst, 1951fb3bc596SJohn Baldwin union if_snd_tag_modify_params *params) 1952fb3bc596SJohn Baldwin { 1953fb3bc596SJohn Baldwin struct lagg_snd_tag *lst; 1954fb3bc596SJohn Baldwin 1955fb3bc596SJohn Baldwin lst = mst_to_lst(mst); 1956c782ea8bSJohn Baldwin return (lst->tag->sw->snd_tag_modify(lst->tag, params)); 1957fb3bc596SJohn Baldwin } 1958fb3bc596SJohn Baldwin 1959fb3bc596SJohn Baldwin static int 1960fb3bc596SJohn Baldwin lagg_snd_tag_query(struct m_snd_tag *mst, 1961fb3bc596SJohn Baldwin union if_snd_tag_query_params *params) 1962fb3bc596SJohn Baldwin { 1963fb3bc596SJohn Baldwin struct lagg_snd_tag *lst; 1964fb3bc596SJohn Baldwin 1965fb3bc596SJohn Baldwin lst = mst_to_lst(mst); 1966c782ea8bSJohn Baldwin return (lst->tag->sw->snd_tag_query(lst->tag, params)); 1967f3e7afe2SHans Petter Selasky } 1968fa91f845SRandall Stewart 1969fa91f845SRandall Stewart static void 1970fb3bc596SJohn Baldwin lagg_snd_tag_free(struct m_snd_tag *mst) 1971fa91f845SRandall Stewart { 1972fb3bc596SJohn Baldwin struct lagg_snd_tag *lst; 1973fb3bc596SJohn Baldwin 1974fb3bc596SJohn Baldwin lst = mst_to_lst(mst); 1975fb3bc596SJohn Baldwin m_snd_tag_rele(lst->tag); 1976fb3bc596SJohn Baldwin free(lst, M_LAGG); 1977fa91f845SRandall Stewart } 1978fa91f845SRandall Stewart 197920abea66SRandall Stewart static void 198020abea66SRandall Stewart lagg_ratelimit_query(struct ifnet *ifp __unused, struct if_ratelimit_query_results *q) 198120abea66SRandall Stewart { 198220abea66SRandall Stewart /* 198320abea66SRandall Stewart * For lagg, we have an indirect 198420abea66SRandall Stewart * interface. The caller needs to 198520abea66SRandall Stewart * get a ratelimit tag on the actual 198620abea66SRandall Stewart * interface the flow will go on. 198720abea66SRandall Stewart */ 198820abea66SRandall Stewart q->rate_table = NULL; 198920abea66SRandall Stewart q->flags = RT_IS_INDIRECT; 199020abea66SRandall Stewart q->max_flows = 0; 199120abea66SRandall Stewart q->number_of_rates = 0; 199220abea66SRandall Stewart } 1993f3e7afe2SHans Petter Selasky #endif 1994f3e7afe2SHans Petter Selasky 199518242d3bSAndrew Thompson static int 19962f86d4b0SAlexander Motin lagg_setmulti(struct lagg_port *lp) 199718242d3bSAndrew Thompson { 1998ec32b37eSAndrew Thompson struct lagg_softc *sc = lp->lp_softc; 1999d74fd345SAndrew Thompson struct ifnet *ifp = lp->lp_ifp; 2000ec32b37eSAndrew Thompson struct ifnet *scifp = sc->sc_ifp; 200118242d3bSAndrew Thompson struct lagg_mc *mc; 20022d222cb7SAlexander Motin struct ifmultiaddr *ifma; 200318242d3bSAndrew Thompson int error; 200418242d3bSAndrew Thompson 20052d222cb7SAlexander Motin IF_ADDR_WLOCK(scifp); 2006d7c5a620SMatt Macy CK_STAILQ_FOREACH(ifma, &scifp->if_multiaddrs, ifma_link) { 2007d74fd345SAndrew Thompson if (ifma->ifma_addr->sa_family != AF_LINK) 2008d74fd345SAndrew Thompson continue; 2009841613dcSJohn Baldwin mc = malloc(sizeof(struct lagg_mc), M_LAGG, M_NOWAIT); 20102d222cb7SAlexander Motin if (mc == NULL) { 20112d222cb7SAlexander Motin IF_ADDR_WUNLOCK(scifp); 20122d222cb7SAlexander Motin return (ENOMEM); 20132d222cb7SAlexander Motin } 2014dcd7f0bdSZhenlei Huang bcopy(ifma->ifma_addr, &mc->mc_addr, ifma->ifma_addr->sa_len); 20152d222cb7SAlexander Motin mc->mc_addr.sdl_index = ifp->if_index; 20162d222cb7SAlexander Motin mc->mc_ifma = NULL; 20172d222cb7SAlexander Motin SLIST_INSERT_HEAD(&lp->lp_mc_head, mc, mc_entries); 20182d222cb7SAlexander Motin } 20192d222cb7SAlexander Motin IF_ADDR_WUNLOCK(scifp); 20202d222cb7SAlexander Motin SLIST_FOREACH (mc, &lp->lp_mc_head, mc_entries) { 20212d222cb7SAlexander Motin error = if_addmulti(ifp, 20222d222cb7SAlexander Motin (struct sockaddr *)&mc->mc_addr, &mc->mc_ifma); 2023d74fd345SAndrew Thompson if (error) 202418242d3bSAndrew Thompson return (error); 2025d74fd345SAndrew Thompson } 20262f86d4b0SAlexander Motin return (0); 20272f86d4b0SAlexander Motin } 20282f86d4b0SAlexander Motin 20292f86d4b0SAlexander Motin static int 20302f86d4b0SAlexander Motin lagg_clrmulti(struct lagg_port *lp) 20312f86d4b0SAlexander Motin { 20322f86d4b0SAlexander Motin struct lagg_mc *mc; 20332f86d4b0SAlexander Motin 203499031b8fSStephen Hurd LAGG_XLOCK_ASSERT(lp->lp_softc); 2035d74fd345SAndrew Thompson while ((mc = SLIST_FIRST(&lp->lp_mc_head)) != NULL) { 2036d74fd345SAndrew Thompson SLIST_REMOVE(&lp->lp_mc_head, mc, lagg_mc, mc_entries); 203713157b2bSLuiz Otavio O Souza if (mc->mc_ifma && lp->lp_detaching == 0) 2038d74fd345SAndrew Thompson if_delmulti_ifma(mc->mc_ifma); 2039841613dcSJohn Baldwin free(mc, M_LAGG); 204018242d3bSAndrew Thompson } 204118242d3bSAndrew Thompson return (0); 204218242d3bSAndrew Thompson } 204318242d3bSAndrew Thompson 2044713ceb99SAndrew Gallatin static void 2045713ceb99SAndrew Gallatin lagg_setcaps(struct lagg_port *lp, int cap, int cap2) 20462f86d4b0SAlexander Motin { 20472f86d4b0SAlexander Motin struct ifreq ifr; 2048713ceb99SAndrew Gallatin struct siocsifcapnv_driver_data drv_ioctl_data; 20492f86d4b0SAlexander Motin 2050713ceb99SAndrew Gallatin if (lp->lp_ifp->if_capenable == cap && 2051713ceb99SAndrew Gallatin lp->lp_ifp->if_capenable2 == cap2) 2052713ceb99SAndrew Gallatin return; 20532f86d4b0SAlexander Motin if (lp->lp_ioctl == NULL) 2054713ceb99SAndrew Gallatin return; 2055713ceb99SAndrew Gallatin /* XXX */ 2056713ceb99SAndrew Gallatin if ((lp->lp_ifp->if_capabilities & IFCAP_NV) != 0) { 2057713ceb99SAndrew Gallatin drv_ioctl_data.reqcap = cap; 2058713ceb99SAndrew Gallatin drv_ioctl_data.reqcap2 = cap2; 2059713ceb99SAndrew Gallatin drv_ioctl_data.nvcap = NULL; 2060713ceb99SAndrew Gallatin (*lp->lp_ioctl)(lp->lp_ifp, SIOCSIFCAPNV, 2061713ceb99SAndrew Gallatin (caddr_t)&drv_ioctl_data); 2062713ceb99SAndrew Gallatin } else { 20632f86d4b0SAlexander Motin ifr.ifr_reqcap = cap; 2064713ceb99SAndrew Gallatin (*lp->lp_ioctl)(lp->lp_ifp, SIOCSIFCAP, (caddr_t)&ifr); 2065713ceb99SAndrew Gallatin } 20662f86d4b0SAlexander Motin } 20672f86d4b0SAlexander Motin 206818242d3bSAndrew Thompson /* Handle a ref counted flag that should be set on the lagg port as well */ 206918242d3bSAndrew Thompson static int 207018242d3bSAndrew Thompson lagg_setflag(struct lagg_port *lp, int flag, int status, 207118242d3bSAndrew Thompson int (*func)(struct ifnet *, int)) 207218242d3bSAndrew Thompson { 2073ec32b37eSAndrew Thompson struct lagg_softc *sc = lp->lp_softc; 2074ec32b37eSAndrew Thompson struct ifnet *scifp = sc->sc_ifp; 207518242d3bSAndrew Thompson struct ifnet *ifp = lp->lp_ifp; 207618242d3bSAndrew Thompson int error; 207718242d3bSAndrew Thompson 20782f86d4b0SAlexander Motin LAGG_XLOCK_ASSERT(sc); 207918242d3bSAndrew Thompson 2080ec32b37eSAndrew Thompson status = status ? (scifp->if_flags & flag) : 0; 208118242d3bSAndrew Thompson /* Now "status" contains the flag value or 0 */ 208218242d3bSAndrew Thompson 208318242d3bSAndrew Thompson /* 208418242d3bSAndrew Thompson * See if recorded ports status is different from what 208518242d3bSAndrew Thompson * we want it to be. If it is, flip it. We record ports 208618242d3bSAndrew Thompson * status in lp_ifflags so that we won't clear ports flag 208718242d3bSAndrew Thompson * we haven't set. In fact, we don't clear or set ports 208818242d3bSAndrew Thompson * flags directly, but get or release references to them. 208918242d3bSAndrew Thompson * That's why we can be sure that recorded flags still are 209018242d3bSAndrew Thompson * in accord with actual ports flags. 209118242d3bSAndrew Thompson */ 209218242d3bSAndrew Thompson if (status != (lp->lp_ifflags & flag)) { 209318242d3bSAndrew Thompson error = (*func)(ifp, status); 209418242d3bSAndrew Thompson if (error) 209518242d3bSAndrew Thompson return (error); 209618242d3bSAndrew Thompson lp->lp_ifflags &= ~flag; 209718242d3bSAndrew Thompson lp->lp_ifflags |= status; 209818242d3bSAndrew Thompson } 209918242d3bSAndrew Thompson return (0); 210018242d3bSAndrew Thompson } 210118242d3bSAndrew Thompson 210218242d3bSAndrew Thompson /* 210318242d3bSAndrew Thompson * Handle IFF_* flags that require certain changes on the lagg port 210418242d3bSAndrew Thompson * if "status" is true, update ports flags respective to the lagg 210518242d3bSAndrew Thompson * if "status" is false, forcedly clear the flags set on port. 210618242d3bSAndrew Thompson */ 210718242d3bSAndrew Thompson static int 210818242d3bSAndrew Thompson lagg_setflags(struct lagg_port *lp, int status) 210918242d3bSAndrew Thompson { 211018242d3bSAndrew Thompson int error, i; 211118242d3bSAndrew Thompson 211218242d3bSAndrew Thompson for (i = 0; lagg_pflags[i].flag; i++) { 211318242d3bSAndrew Thompson error = lagg_setflag(lp, lagg_pflags[i].flag, 211418242d3bSAndrew Thompson status, lagg_pflags[i].func); 211518242d3bSAndrew Thompson if (error) 211618242d3bSAndrew Thompson return (error); 211718242d3bSAndrew Thompson } 211818242d3bSAndrew Thompson return (0); 211918242d3bSAndrew Thompson } 212018242d3bSAndrew Thompson 21213b7d677bSGleb Smirnoff static int 2122a92c4bb6SHans Petter Selasky lagg_transmit_ethernet(struct ifnet *ifp, struct mbuf *m) 212318242d3bSAndrew Thompson { 212418242d3bSAndrew Thompson struct lagg_softc *sc = (struct lagg_softc *)ifp->if_softc; 212518242d3bSAndrew Thompson 2126d4a80d21SZhenlei Huang NET_EPOCH_ASSERT(); 2127b2e60773SJohn Baldwin #if defined(KERN_TLS) || defined(RATELIMIT) 2128fb3bc596SJohn Baldwin if (m->m_pkthdr.csum_flags & CSUM_SND_TAG) 2129fb3bc596SJohn Baldwin MPASS(m->m_pkthdr.snd_tag->ifp == ifp); 2130fb3bc596SJohn Baldwin #endif 2131149bac03SAndrew Thompson /* We need a Tx algorithm and at least one port */ 2132149bac03SAndrew Thompson if (sc->sc_proto == LAGG_PROTO_NONE || sc->sc_count == 0) { 21333b7d677bSGleb Smirnoff m_freem(m); 213423575437SGleb Smirnoff if_inc_counter(ifp, IFCOUNTER_OERRORS, 1); 21353b7d677bSGleb Smirnoff return (ENXIO); 2136149bac03SAndrew Thompson } 2137149bac03SAndrew Thompson 2138544f7141SAndrew Thompson ETHER_BPF_MTAP(ifp, m); 213918242d3bSAndrew Thompson 2140d4a80d21SZhenlei Huang return (lagg_proto_start(sc, m)); 21413b7d677bSGleb Smirnoff } 21423b7d677bSGleb Smirnoff 2143a92c4bb6SHans Petter Selasky static int 2144a92c4bb6SHans Petter Selasky lagg_transmit_infiniband(struct ifnet *ifp, struct mbuf *m) 2145a92c4bb6SHans Petter Selasky { 2146a92c4bb6SHans Petter Selasky struct lagg_softc *sc = (struct lagg_softc *)ifp->if_softc; 2147a92c4bb6SHans Petter Selasky 2148d4a80d21SZhenlei Huang NET_EPOCH_ASSERT(); 2149a92c4bb6SHans Petter Selasky #if defined(KERN_TLS) || defined(RATELIMIT) 2150a92c4bb6SHans Petter Selasky if (m->m_pkthdr.csum_flags & CSUM_SND_TAG) 2151a92c4bb6SHans Petter Selasky MPASS(m->m_pkthdr.snd_tag->ifp == ifp); 2152a92c4bb6SHans Petter Selasky #endif 2153a92c4bb6SHans Petter Selasky /* We need a Tx algorithm and at least one port */ 2154a92c4bb6SHans Petter Selasky if (sc->sc_proto == LAGG_PROTO_NONE || sc->sc_count == 0) { 2155a92c4bb6SHans Petter Selasky m_freem(m); 2156a92c4bb6SHans Petter Selasky if_inc_counter(ifp, IFCOUNTER_OERRORS, 1); 2157a92c4bb6SHans Petter Selasky return (ENXIO); 2158a92c4bb6SHans Petter Selasky } 2159a92c4bb6SHans Petter Selasky 2160adf62e83SJustin Hibbits infiniband_bpf_mtap(ifp, m); 2161a92c4bb6SHans Petter Selasky 2162d4a80d21SZhenlei Huang return (lagg_proto_start(sc, m)); 2163a92c4bb6SHans Petter Selasky } 2164a92c4bb6SHans Petter Selasky 21653b7d677bSGleb Smirnoff /* 21663b7d677bSGleb Smirnoff * The ifp->if_qflush entry point for lagg(4) is no-op. 21673b7d677bSGleb Smirnoff */ 21683b7d677bSGleb Smirnoff static void 21693b7d677bSGleb Smirnoff lagg_qflush(struct ifnet *ifp __unused) 21703b7d677bSGleb Smirnoff { 217118242d3bSAndrew Thompson } 217218242d3bSAndrew Thompson 217318242d3bSAndrew Thompson static struct mbuf * 2174a92c4bb6SHans Petter Selasky lagg_input_ethernet(struct ifnet *ifp, struct mbuf *m) 217518242d3bSAndrew Thompson { 217618242d3bSAndrew Thompson struct lagg_port *lp = ifp->if_lagg; 2177ec32b37eSAndrew Thompson struct lagg_softc *sc = lp->lp_softc; 2178ec32b37eSAndrew Thompson struct ifnet *scifp = sc->sc_ifp; 217918242d3bSAndrew Thompson 2180d4a80d21SZhenlei Huang NET_EPOCH_ASSERT(); 2181ec32b37eSAndrew Thompson if ((scifp->if_drv_flags & IFF_DRV_RUNNING) == 0 || 2182db5a36bdSMark Johnston lp->lp_detaching != 0 || 218318242d3bSAndrew Thompson sc->sc_proto == LAGG_PROTO_NONE) { 218418242d3bSAndrew Thompson m_freem(m); 218518242d3bSAndrew Thompson return (NULL); 218618242d3bSAndrew Thompson } 218718242d3bSAndrew Thompson 21885f3d0399SZhenlei Huang m = lagg_proto_input(sc, lp, m); 21895f3d0399SZhenlei Huang if (m != NULL) { 2190544f7141SAndrew Thompson ETHER_BPF_MTAP(scifp, m); 219118242d3bSAndrew Thompson 21925f3d0399SZhenlei Huang if ((scifp->if_flags & IFF_MONITOR) != 0) { 2193d3b28963SAndrew Thompson m_freem(m); 2194d3b28963SAndrew Thompson m = NULL; 2195d3b28963SAndrew Thompson } 21965f3d0399SZhenlei Huang } 219718242d3bSAndrew Thompson 2198110ce09cSTom Jones #ifdef DEV_NETMAP 2199110ce09cSTom Jones if (m != NULL && scifp->if_capenable & IFCAP_NETMAP) { 2200110ce09cSTom Jones scifp->if_input(scifp, m); 2201110ce09cSTom Jones m = NULL; 2202110ce09cSTom Jones } 2203110ce09cSTom Jones #endif /* DEV_NETMAP */ 2204110ce09cSTom Jones 220518242d3bSAndrew Thompson return (m); 220618242d3bSAndrew Thompson } 220718242d3bSAndrew Thompson 2208a92c4bb6SHans Petter Selasky static struct mbuf * 2209a92c4bb6SHans Petter Selasky lagg_input_infiniband(struct ifnet *ifp, struct mbuf *m) 2210a92c4bb6SHans Petter Selasky { 2211a92c4bb6SHans Petter Selasky struct lagg_port *lp = ifp->if_lagg; 2212a92c4bb6SHans Petter Selasky struct lagg_softc *sc = lp->lp_softc; 2213a92c4bb6SHans Petter Selasky struct ifnet *scifp = sc->sc_ifp; 2214a92c4bb6SHans Petter Selasky 221590820ef1SZhenlei Huang NET_EPOCH_ASSERT(); 2216a92c4bb6SHans Petter Selasky if ((scifp->if_drv_flags & IFF_DRV_RUNNING) == 0 || 2217a92c4bb6SHans Petter Selasky lp->lp_detaching != 0 || 2218a92c4bb6SHans Petter Selasky sc->sc_proto == LAGG_PROTO_NONE) { 2219a92c4bb6SHans Petter Selasky m_freem(m); 2220a92c4bb6SHans Petter Selasky return (NULL); 2221a92c4bb6SHans Petter Selasky } 2222a92c4bb6SHans Petter Selasky 22235f3d0399SZhenlei Huang m = lagg_proto_input(sc, lp, m); 22245f3d0399SZhenlei Huang if (m != NULL) { 2225adf62e83SJustin Hibbits infiniband_bpf_mtap(scifp, m); 2226a92c4bb6SHans Petter Selasky 22275f3d0399SZhenlei Huang if ((scifp->if_flags & IFF_MONITOR) != 0) { 2228a92c4bb6SHans Petter Selasky m_freem(m); 2229a92c4bb6SHans Petter Selasky m = NULL; 2230a92c4bb6SHans Petter Selasky } 22315f3d0399SZhenlei Huang } 2232a92c4bb6SHans Petter Selasky 2233a92c4bb6SHans Petter Selasky return (m); 2234a92c4bb6SHans Petter Selasky } 2235a92c4bb6SHans Petter Selasky 223618242d3bSAndrew Thompson static int 223718242d3bSAndrew Thompson lagg_media_change(struct ifnet *ifp) 223818242d3bSAndrew Thompson { 223918242d3bSAndrew Thompson struct lagg_softc *sc = (struct lagg_softc *)ifp->if_softc; 224018242d3bSAndrew Thompson 224118242d3bSAndrew Thompson if (sc->sc_ifflags & IFF_DEBUG) 224218242d3bSAndrew Thompson printf("%s\n", __func__); 224318242d3bSAndrew Thompson 224418242d3bSAndrew Thompson /* Ignore */ 224518242d3bSAndrew Thompson return (0); 224618242d3bSAndrew Thompson } 224718242d3bSAndrew Thompson 224818242d3bSAndrew Thompson static void 224918242d3bSAndrew Thompson lagg_media_status(struct ifnet *ifp, struct ifmediareq *imr) 225018242d3bSAndrew Thompson { 225187bf9b9cSGleb Smirnoff struct epoch_tracker et; 225218242d3bSAndrew Thompson struct lagg_softc *sc = (struct lagg_softc *)ifp->if_softc; 225318242d3bSAndrew Thompson struct lagg_port *lp; 225418242d3bSAndrew Thompson 225518242d3bSAndrew Thompson imr->ifm_status = IFM_AVALID; 225618242d3bSAndrew Thompson imr->ifm_active = IFM_ETHER | IFM_AUTO; 225718242d3bSAndrew Thompson 225887bf9b9cSGleb Smirnoff NET_EPOCH_ENTER(et); 225999031b8fSStephen Hurd CK_SLIST_FOREACH(lp, &sc->sc_ports, lp_entries) { 2260a5715cb2SAndrew Thompson if (LAGG_PORTACTIVE(lp)) 226118242d3bSAndrew Thompson imr->ifm_status |= IFM_ACTIVE; 2262a5715cb2SAndrew Thompson } 226387bf9b9cSGleb Smirnoff NET_EPOCH_EXIT(et); 226418242d3bSAndrew Thompson } 226518242d3bSAndrew Thompson 226618242d3bSAndrew Thompson static void 226780ddfb40SAndrew Thompson lagg_linkstate(struct lagg_softc *sc) 226880ddfb40SAndrew Thompson { 226987bf9b9cSGleb Smirnoff struct epoch_tracker et; 227080ddfb40SAndrew Thompson struct lagg_port *lp; 227180ddfb40SAndrew Thompson int new_link = LINK_STATE_DOWN; 2272f812e067SAndrew Thompson uint64_t speed; 227380ddfb40SAndrew Thompson 22742f86d4b0SAlexander Motin LAGG_XLOCK_ASSERT(sc); 22752f86d4b0SAlexander Motin 22765ccac9f9SAndrew Gallatin /* LACP handles link state itself */ 22775ccac9f9SAndrew Gallatin if (sc->sc_proto == LAGG_PROTO_LACP) 22785ccac9f9SAndrew Gallatin return; 22795ccac9f9SAndrew Gallatin 228080ddfb40SAndrew Thompson /* Our link is considered up if at least one of our ports is active */ 228187bf9b9cSGleb Smirnoff NET_EPOCH_ENTER(et); 228299031b8fSStephen Hurd CK_SLIST_FOREACH(lp, &sc->sc_ports, lp_entries) { 2283eade13f9SGleb Smirnoff if (lp->lp_ifp->if_link_state == LINK_STATE_UP) { 228480ddfb40SAndrew Thompson new_link = LINK_STATE_UP; 228580ddfb40SAndrew Thompson break; 228680ddfb40SAndrew Thompson } 228780ddfb40SAndrew Thompson } 228887bf9b9cSGleb Smirnoff NET_EPOCH_EXIT(et); 2289d6e82913SSteven Hartland if_link_state_change(sc->sc_ifp, new_link); 2290be07c180SAndrew Thompson 2291be07c180SAndrew Thompson /* Update if_baudrate to reflect the max possible speed */ 2292be07c180SAndrew Thompson switch (sc->sc_proto) { 2293be07c180SAndrew Thompson case LAGG_PROTO_FAILOVER: 2294f812e067SAndrew Thompson sc->sc_ifp->if_baudrate = sc->sc_primary != NULL ? 2295f812e067SAndrew Thompson sc->sc_primary->lp_ifp->if_baudrate : 0; 2296be07c180SAndrew Thompson break; 2297be07c180SAndrew Thompson case LAGG_PROTO_ROUNDROBIN: 2298be07c180SAndrew Thompson case LAGG_PROTO_LOADBALANCE: 229999cdd961SMarcelo Araujo case LAGG_PROTO_BROADCAST: 2300f812e067SAndrew Thompson speed = 0; 230187bf9b9cSGleb Smirnoff NET_EPOCH_ENTER(et); 230299031b8fSStephen Hurd CK_SLIST_FOREACH(lp, &sc->sc_ports, lp_entries) 2303be07c180SAndrew Thompson speed += lp->lp_ifp->if_baudrate; 230487bf9b9cSGleb Smirnoff NET_EPOCH_EXIT(et); 2305be07c180SAndrew Thompson sc->sc_ifp->if_baudrate = speed; 2306be07c180SAndrew Thompson break; 2307be07c180SAndrew Thompson case LAGG_PROTO_LACP: 2308be07c180SAndrew Thompson /* LACP updates if_baudrate itself */ 2309be07c180SAndrew Thompson break; 2310be07c180SAndrew Thompson } 231180ddfb40SAndrew Thompson } 231280ddfb40SAndrew Thompson 231380ddfb40SAndrew Thompson static void 2314d6e82913SSteven Hartland lagg_port_state(struct ifnet *ifp, int state) 231518242d3bSAndrew Thompson { 231618242d3bSAndrew Thompson struct lagg_port *lp = (struct lagg_port *)ifp->if_lagg; 231718242d3bSAndrew Thompson struct lagg_softc *sc = NULL; 231818242d3bSAndrew Thompson 231918242d3bSAndrew Thompson if (lp != NULL) 2320ec32b37eSAndrew Thompson sc = lp->lp_softc; 232118242d3bSAndrew Thompson if (sc == NULL) 232218242d3bSAndrew Thompson return; 232318242d3bSAndrew Thompson 23242f86d4b0SAlexander Motin LAGG_XLOCK(sc); 232580ddfb40SAndrew Thompson lagg_linkstate(sc); 232638738d73SGleb Smirnoff lagg_proto_linkstate(sc, lp); 23272f86d4b0SAlexander Motin LAGG_XUNLOCK(sc); 232818242d3bSAndrew Thompson } 232918242d3bSAndrew Thompson 2330d6e82913SSteven Hartland struct lagg_port * 233118242d3bSAndrew Thompson lagg_link_active(struct lagg_softc *sc, struct lagg_port *lp) 233218242d3bSAndrew Thompson { 233318242d3bSAndrew Thompson struct lagg_port *lp_next, *rval = NULL; 233418242d3bSAndrew Thompson 233518242d3bSAndrew Thompson /* 233618242d3bSAndrew Thompson * Search a port which reports an active link state. 233718242d3bSAndrew Thompson */ 233818242d3bSAndrew Thompson 23399995dfd3SMark Johnston #ifdef INVARIANTS 2340ce7fb386SMark Johnston /* 234187bf9b9cSGleb Smirnoff * This is called with either in the network epoch 234287bf9b9cSGleb Smirnoff * or with LAGG_XLOCK(sc) held. 2343ce7fb386SMark Johnston */ 23442f59b04aSJohn Baldwin if (!in_epoch(net_epoch_preempt)) 23452f59b04aSJohn Baldwin LAGG_XLOCK_ASSERT(sc); 23469995dfd3SMark Johnston #endif 23472f59b04aSJohn Baldwin 234818242d3bSAndrew Thompson if (lp == NULL) 234918242d3bSAndrew Thompson goto search; 235018242d3bSAndrew Thompson if (LAGG_PORTACTIVE(lp)) { 235118242d3bSAndrew Thompson rval = lp; 235218242d3bSAndrew Thompson goto found; 235318242d3bSAndrew Thompson } 23540f8d79d9SMatt Macy if ((lp_next = CK_SLIST_NEXT(lp, lp_entries)) != NULL && 235518242d3bSAndrew Thompson LAGG_PORTACTIVE(lp_next)) { 235618242d3bSAndrew Thompson rval = lp_next; 235718242d3bSAndrew Thompson goto found; 235818242d3bSAndrew Thompson } 235918242d3bSAndrew Thompson 236018242d3bSAndrew Thompson search: 236199031b8fSStephen Hurd CK_SLIST_FOREACH(lp_next, &sc->sc_ports, lp_entries) { 236218242d3bSAndrew Thompson if (LAGG_PORTACTIVE(lp_next)) { 23636573d758SMatt Macy return (lp_next); 236418242d3bSAndrew Thompson } 236518242d3bSAndrew Thompson } 236618242d3bSAndrew Thompson found: 236718242d3bSAndrew Thompson return (rval); 236818242d3bSAndrew Thompson } 236918242d3bSAndrew Thompson 237018242d3bSAndrew Thompson int 237118242d3bSAndrew Thompson lagg_enqueue(struct ifnet *ifp, struct mbuf *m) 237218242d3bSAndrew Thompson { 237318242d3bSAndrew Thompson 2374b2e60773SJohn Baldwin #if defined(KERN_TLS) || defined(RATELIMIT) 2375fb3bc596SJohn Baldwin if (m->m_pkthdr.csum_flags & CSUM_SND_TAG) { 2376fb3bc596SJohn Baldwin struct lagg_snd_tag *lst; 2377fb3bc596SJohn Baldwin struct m_snd_tag *mst; 2378fb3bc596SJohn Baldwin 2379fb3bc596SJohn Baldwin mst = m->m_pkthdr.snd_tag; 2380fb3bc596SJohn Baldwin lst = mst_to_lst(mst); 2381fb3bc596SJohn Baldwin if (lst->tag->ifp != ifp) { 2382fb3bc596SJohn Baldwin m_freem(m); 2383fb3bc596SJohn Baldwin return (EAGAIN); 2384fb3bc596SJohn Baldwin } 2385fb3bc596SJohn Baldwin m->m_pkthdr.snd_tag = m_snd_tag_ref(lst->tag); 2386fb3bc596SJohn Baldwin m_snd_tag_rele(mst); 2387fb3bc596SJohn Baldwin } 2388fb3bc596SJohn Baldwin #endif 2389aea78d20SKip Macy return (ifp->if_transmit)(ifp, m); 239018242d3bSAndrew Thompson } 239118242d3bSAndrew Thompson 239218242d3bSAndrew Thompson /* 239318242d3bSAndrew Thompson * Simple round robin aggregation 239418242d3bSAndrew Thompson */ 239509c7577eSGleb Smirnoff static void 239618242d3bSAndrew Thompson lagg_rr_attach(struct lagg_softc *sc) 239718242d3bSAndrew Thompson { 2398960dab09SAndrew Thompson sc->sc_seq = 0; 2399c23df8eaSMark Johnston sc->sc_stride = 1; 240018242d3bSAndrew Thompson } 240118242d3bSAndrew Thompson 240218242d3bSAndrew Thompson static int 240318242d3bSAndrew Thompson lagg_rr_start(struct lagg_softc *sc, struct mbuf *m) 240418242d3bSAndrew Thompson { 2405960dab09SAndrew Thompson struct lagg_port *lp; 2406960dab09SAndrew Thompson uint32_t p; 240718242d3bSAndrew Thompson 2408960dab09SAndrew Thompson p = atomic_fetchadd_32(&sc->sc_seq, 1); 2409c104c299SMark Johnston p /= sc->sc_stride; 2410960dab09SAndrew Thompson p %= sc->sc_count; 24110f8d79d9SMatt Macy lp = CK_SLIST_FIRST(&sc->sc_ports); 2412d62edc5eSMarcelo Araujo 2413960dab09SAndrew Thompson while (p--) 24140f8d79d9SMatt Macy lp = CK_SLIST_NEXT(lp, lp_entries); 2415960dab09SAndrew Thompson 2416960dab09SAndrew Thompson /* 2417960dab09SAndrew Thompson * Check the port's link state. This will return the next active 2418960dab09SAndrew Thompson * port if the link is down or the port is NULL. 2419960dab09SAndrew Thompson */ 2420960dab09SAndrew Thompson if ((lp = lagg_link_active(sc, lp)) == NULL) { 2421bd673b99SAndrew Gallatin if_inc_counter(sc->sc_ifp, IFCOUNTER_OERRORS, 1); 2422960dab09SAndrew Thompson m_freem(m); 2423eda6cf02SXin LI return (ENETDOWN); 2424960dab09SAndrew Thompson } 242518242d3bSAndrew Thompson 242618242d3bSAndrew Thompson /* Send mbuf */ 2427960dab09SAndrew Thompson return (lagg_enqueue(lp->lp_ifp, m)); 242818242d3bSAndrew Thompson } 242918242d3bSAndrew Thompson 243018242d3bSAndrew Thompson /* 243199cdd961SMarcelo Araujo * Broadcast mode 243299cdd961SMarcelo Araujo */ 243399cdd961SMarcelo Araujo static int 243499cdd961SMarcelo Araujo lagg_bcast_start(struct lagg_softc *sc, struct mbuf *m) 243599cdd961SMarcelo Araujo { 243699cdd961SMarcelo Araujo int errors = 0; 243799cdd961SMarcelo Araujo int ret; 243899cdd961SMarcelo Araujo struct lagg_port *lp, *last = NULL; 243999cdd961SMarcelo Araujo struct mbuf *m0; 244099cdd961SMarcelo Araujo 244187bf9b9cSGleb Smirnoff NET_EPOCH_ASSERT(); 244299031b8fSStephen Hurd CK_SLIST_FOREACH(lp, &sc->sc_ports, lp_entries) { 244399cdd961SMarcelo Araujo if (!LAGG_PORTACTIVE(lp)) 244499cdd961SMarcelo Araujo continue; 244599cdd961SMarcelo Araujo 244699cdd961SMarcelo Araujo if (last != NULL) { 24476900d0d3SGleb Smirnoff m0 = m_copym(m, 0, M_COPYALL, M_NOWAIT); 244899cdd961SMarcelo Araujo if (m0 == NULL) { 244999cdd961SMarcelo Araujo ret = ENOBUFS; 245099cdd961SMarcelo Araujo errors++; 245199cdd961SMarcelo Araujo break; 245299cdd961SMarcelo Araujo } 2453bd673b99SAndrew Gallatin lagg_enqueue(last->lp_ifp, m0); 245499cdd961SMarcelo Araujo } 245599cdd961SMarcelo Araujo last = lp; 245699cdd961SMarcelo Araujo } 245799031b8fSStephen Hurd 245899cdd961SMarcelo Araujo if (last == NULL) { 2459bd673b99SAndrew Gallatin if_inc_counter(sc->sc_ifp, IFCOUNTER_OERRORS, 1); 246099cdd961SMarcelo Araujo m_freem(m); 246199cdd961SMarcelo Araujo return (ENOENT); 246299cdd961SMarcelo Araujo } 246399cdd961SMarcelo Araujo if ((last = lagg_link_active(sc, last)) == NULL) { 2464bd673b99SAndrew Gallatin errors++; 2465bd673b99SAndrew Gallatin if_inc_counter(sc->sc_ifp, IFCOUNTER_OERRORS, errors); 246699cdd961SMarcelo Araujo m_freem(m); 246799cdd961SMarcelo Araujo return (ENETDOWN); 246899cdd961SMarcelo Araujo } 246999cdd961SMarcelo Araujo 247099cdd961SMarcelo Araujo ret = lagg_enqueue(last->lp_ifp, m); 2471bd673b99SAndrew Gallatin if (errors != 0) 2472bd673b99SAndrew Gallatin if_inc_counter(sc->sc_ifp, IFCOUNTER_OERRORS, errors); 247399cdd961SMarcelo Araujo 247499cdd961SMarcelo Araujo return (ret); 247599cdd961SMarcelo Araujo } 247699cdd961SMarcelo Araujo 247799cdd961SMarcelo Araujo /* 247818242d3bSAndrew Thompson * Active failover 247918242d3bSAndrew Thompson */ 248018242d3bSAndrew Thompson static int 248118242d3bSAndrew Thompson lagg_fail_start(struct lagg_softc *sc, struct mbuf *m) 248218242d3bSAndrew Thompson { 248318242d3bSAndrew Thompson struct lagg_port *lp; 248418242d3bSAndrew Thompson 248518242d3bSAndrew Thompson /* Use the master port if active or the next available port */ 2486960dab09SAndrew Thompson if ((lp = lagg_link_active(sc, sc->sc_primary)) == NULL) { 2487bd673b99SAndrew Gallatin if_inc_counter(sc->sc_ifp, IFCOUNTER_OERRORS, 1); 2488960dab09SAndrew Thompson m_freem(m); 2489eda6cf02SXin LI return (ENETDOWN); 2490960dab09SAndrew Thompson } 249118242d3bSAndrew Thompson 249218242d3bSAndrew Thompson /* Send mbuf */ 249318242d3bSAndrew Thompson return (lagg_enqueue(lp->lp_ifp, m)); 249418242d3bSAndrew Thompson } 249518242d3bSAndrew Thompson 249618242d3bSAndrew Thompson static struct mbuf * 249718242d3bSAndrew Thompson lagg_fail_input(struct lagg_softc *sc, struct lagg_port *lp, struct mbuf *m) 249818242d3bSAndrew Thompson { 249918242d3bSAndrew Thompson struct ifnet *ifp = sc->sc_ifp; 250018242d3bSAndrew Thompson struct lagg_port *tmp_tp; 250118242d3bSAndrew Thompson 2502939a050aSHiroki Sato if (lp == sc->sc_primary || V_lagg_failover_rx_all) { 250318242d3bSAndrew Thompson m->m_pkthdr.rcvif = ifp; 250418242d3bSAndrew Thompson return (m); 250518242d3bSAndrew Thompson } 250618242d3bSAndrew Thompson 25071f019d83SAndrew Thompson if (!LAGG_PORTACTIVE(sc->sc_primary)) { 25081f019d83SAndrew Thompson tmp_tp = lagg_link_active(sc, sc->sc_primary); 250918242d3bSAndrew Thompson /* 2510a4641f4eSPedro F. Giffuni * If tmp_tp is null, we've received a packet when all 251118242d3bSAndrew Thompson * our links are down. Weird, but process it anyways. 251218242d3bSAndrew Thompson */ 2513dcd7f0bdSZhenlei Huang if (tmp_tp == NULL || tmp_tp == lp) { 251418242d3bSAndrew Thompson m->m_pkthdr.rcvif = ifp; 251518242d3bSAndrew Thompson return (m); 251618242d3bSAndrew Thompson } 251718242d3bSAndrew Thompson } 251818242d3bSAndrew Thompson 251918242d3bSAndrew Thompson m_freem(m); 252018242d3bSAndrew Thompson return (NULL); 252118242d3bSAndrew Thompson } 252218242d3bSAndrew Thompson 252318242d3bSAndrew Thompson /* 252418242d3bSAndrew Thompson * Loadbalancing 252518242d3bSAndrew Thompson */ 252609c7577eSGleb Smirnoff static void 252718242d3bSAndrew Thompson lagg_lb_attach(struct lagg_softc *sc) 252818242d3bSAndrew Thompson { 252918242d3bSAndrew Thompson struct lagg_port *lp; 253018242d3bSAndrew Thompson struct lagg_lb *lb; 253118242d3bSAndrew Thompson 253299031b8fSStephen Hurd LAGG_XLOCK_ASSERT(sc); 2533841613dcSJohn Baldwin lb = malloc(sizeof(struct lagg_lb), M_LAGG, M_WAITOK | M_ZERO); 2534b7ba031fSHans Petter Selasky lb->lb_key = m_ether_tcpip_hash_init(); 25356900d0d3SGleb Smirnoff sc->sc_psc = lb; 253618242d3bSAndrew Thompson 253799031b8fSStephen Hurd CK_SLIST_FOREACH(lp, &sc->sc_ports, lp_entries) 253818242d3bSAndrew Thompson lagg_lb_port_create(lp); 253918242d3bSAndrew Thompson } 254018242d3bSAndrew Thompson 2541b1bbc5b3SGleb Smirnoff static void 254218242d3bSAndrew Thompson lagg_lb_detach(struct lagg_softc *sc) 254318242d3bSAndrew Thompson { 2544b1bbc5b3SGleb Smirnoff struct lagg_lb *lb; 2545b1bbc5b3SGleb Smirnoff 2546b1bbc5b3SGleb Smirnoff lb = (struct lagg_lb *)sc->sc_psc; 254718242d3bSAndrew Thompson if (lb != NULL) 2548841613dcSJohn Baldwin free(lb, M_LAGG); 254918242d3bSAndrew Thompson } 255018242d3bSAndrew Thompson 255118242d3bSAndrew Thompson static int 255218242d3bSAndrew Thompson lagg_lb_porttable(struct lagg_softc *sc, struct lagg_port *lp) 255318242d3bSAndrew Thompson { 255418242d3bSAndrew Thompson struct lagg_lb *lb = (struct lagg_lb *)sc->sc_psc; 255518242d3bSAndrew Thompson struct lagg_port *lp_next; 255613c6ba6dSJonathan T. Looney int i = 0, rv; 255718242d3bSAndrew Thompson 255813c6ba6dSJonathan T. Looney rv = 0; 255918242d3bSAndrew Thompson bzero(&lb->lb_ports, sizeof(lb->lb_ports)); 25602f59b04aSJohn Baldwin LAGG_XLOCK_ASSERT(sc); 256199031b8fSStephen Hurd CK_SLIST_FOREACH(lp_next, &sc->sc_ports, lp_entries) { 256218242d3bSAndrew Thompson if (lp_next == lp) 256318242d3bSAndrew Thompson continue; 256413c6ba6dSJonathan T. Looney if (i >= LAGG_MAX_PORTS) { 256513c6ba6dSJonathan T. Looney rv = EINVAL; 256613c6ba6dSJonathan T. Looney break; 256713c6ba6dSJonathan T. Looney } 256818242d3bSAndrew Thompson if (sc->sc_ifflags & IFF_DEBUG) 256918242d3bSAndrew Thompson printf("%s: port %s at index %d\n", 2570eade13f9SGleb Smirnoff sc->sc_ifname, lp_next->lp_ifp->if_xname, i); 257118242d3bSAndrew Thompson lb->lb_ports[i++] = lp_next; 257218242d3bSAndrew Thompson } 257318242d3bSAndrew Thompson 257413c6ba6dSJonathan T. Looney return (rv); 257518242d3bSAndrew Thompson } 257618242d3bSAndrew Thompson 257718242d3bSAndrew Thompson static int 257818242d3bSAndrew Thompson lagg_lb_port_create(struct lagg_port *lp) 257918242d3bSAndrew Thompson { 2580ec32b37eSAndrew Thompson struct lagg_softc *sc = lp->lp_softc; 258118242d3bSAndrew Thompson return (lagg_lb_porttable(sc, NULL)); 258218242d3bSAndrew Thompson } 258318242d3bSAndrew Thompson 258418242d3bSAndrew Thompson static void 258518242d3bSAndrew Thompson lagg_lb_port_destroy(struct lagg_port *lp) 258618242d3bSAndrew Thompson { 2587ec32b37eSAndrew Thompson struct lagg_softc *sc = lp->lp_softc; 258818242d3bSAndrew Thompson lagg_lb_porttable(sc, lp); 258918242d3bSAndrew Thompson } 259018242d3bSAndrew Thompson 259118242d3bSAndrew Thompson static int 259218242d3bSAndrew Thompson lagg_lb_start(struct lagg_softc *sc, struct mbuf *m) 259318242d3bSAndrew Thompson { 259418242d3bSAndrew Thompson struct lagg_lb *lb = (struct lagg_lb *)sc->sc_psc; 259518242d3bSAndrew Thompson struct lagg_port *lp = NULL; 259618242d3bSAndrew Thompson uint32_t p = 0; 259718242d3bSAndrew Thompson 2598c2529042SHans Petter Selasky if ((sc->sc_opts & LAGG_OPT_USE_FLOWID) && 2599c2529042SHans Petter Selasky M_HASHTYPE_GET(m) != M_HASHTYPE_NONE) 26001a8959daSScott Long p = m->m_pkthdr.flowid >> sc->flowid_shift; 26015c6026e9SAndrew Thompson else 2602b7ba031fSHans Petter Selasky p = m_ether_tcpip_hash(sc->sc_flags, m, lb->lb_key); 26036729225fSAndrew Thompson p %= sc->sc_count; 26046729225fSAndrew Thompson lp = lb->lb_ports[p]; 260518242d3bSAndrew Thompson 260618242d3bSAndrew Thompson /* 260718242d3bSAndrew Thompson * Check the port's link state. This will return the next active 260818242d3bSAndrew Thompson * port if the link is down or the port is NULL. 260918242d3bSAndrew Thompson */ 2610960dab09SAndrew Thompson if ((lp = lagg_link_active(sc, lp)) == NULL) { 2611bd673b99SAndrew Gallatin if_inc_counter(sc->sc_ifp, IFCOUNTER_OERRORS, 1); 2612960dab09SAndrew Thompson m_freem(m); 2613eda6cf02SXin LI return (ENETDOWN); 2614960dab09SAndrew Thompson } 261518242d3bSAndrew Thompson 261618242d3bSAndrew Thompson /* Send mbuf */ 261718242d3bSAndrew Thompson return (lagg_enqueue(lp->lp_ifp, m)); 261818242d3bSAndrew Thompson } 261918242d3bSAndrew Thompson 262018242d3bSAndrew Thompson /* 262118242d3bSAndrew Thompson * 802.3ad LACP 262218242d3bSAndrew Thompson */ 262309c7577eSGleb Smirnoff static void 262418242d3bSAndrew Thompson lagg_lacp_attach(struct lagg_softc *sc) 262518242d3bSAndrew Thompson { 262618242d3bSAndrew Thompson struct lagg_port *lp; 262718242d3bSAndrew Thompson 262809c7577eSGleb Smirnoff lacp_attach(sc); 262999031b8fSStephen Hurd LAGG_XLOCK_ASSERT(sc); 263099031b8fSStephen Hurd CK_SLIST_FOREACH(lp, &sc->sc_ports, lp_entries) 263118242d3bSAndrew Thompson lacp_port_create(lp); 263218242d3bSAndrew Thompson } 263318242d3bSAndrew Thompson 2634b1bbc5b3SGleb Smirnoff static void 263518242d3bSAndrew Thompson lagg_lacp_detach(struct lagg_softc *sc) 263618242d3bSAndrew Thompson { 263718242d3bSAndrew Thompson struct lagg_port *lp; 263809c7577eSGleb Smirnoff void *psc; 263918242d3bSAndrew Thompson 264099031b8fSStephen Hurd LAGG_XLOCK_ASSERT(sc); 264199031b8fSStephen Hurd CK_SLIST_FOREACH(lp, &sc->sc_ports, lp_entries) 264218242d3bSAndrew Thompson lacp_port_destroy(lp); 264318242d3bSAndrew Thompson 264409c7577eSGleb Smirnoff psc = sc->sc_psc; 264509c7577eSGleb Smirnoff sc->sc_psc = NULL; 264609c7577eSGleb Smirnoff lacp_detach(psc); 264718242d3bSAndrew Thompson } 264818242d3bSAndrew Thompson 264918242d3bSAndrew Thompson static void 265018242d3bSAndrew Thompson lagg_lacp_lladdr(struct lagg_softc *sc) 265118242d3bSAndrew Thompson { 265218242d3bSAndrew Thompson struct lagg_port *lp; 265318242d3bSAndrew Thompson 26542f86d4b0SAlexander Motin LAGG_SXLOCK_ASSERT(sc); 26552f86d4b0SAlexander Motin 265618242d3bSAndrew Thompson /* purge all the lacp ports */ 265799031b8fSStephen Hurd CK_SLIST_FOREACH(lp, &sc->sc_ports, lp_entries) 265818242d3bSAndrew Thompson lacp_port_destroy(lp); 265918242d3bSAndrew Thompson 266018242d3bSAndrew Thompson /* add them back in */ 266199031b8fSStephen Hurd CK_SLIST_FOREACH(lp, &sc->sc_ports, lp_entries) 266218242d3bSAndrew Thompson lacp_port_create(lp); 266318242d3bSAndrew Thompson } 266418242d3bSAndrew Thompson 266518242d3bSAndrew Thompson static int 266618242d3bSAndrew Thompson lagg_lacp_start(struct lagg_softc *sc, struct mbuf *m) 266718242d3bSAndrew Thompson { 266818242d3bSAndrew Thompson struct lagg_port *lp; 26698732245dSAndrew Gallatin int err; 267018242d3bSAndrew Thompson 26718732245dSAndrew Gallatin lp = lacp_select_tx_port(sc, m, &err); 2672960dab09SAndrew Thompson if (lp == NULL) { 2673bd673b99SAndrew Gallatin if_inc_counter(sc->sc_ifp, IFCOUNTER_OERRORS, 1); 2674960dab09SAndrew Thompson m_freem(m); 26758732245dSAndrew Gallatin return (err); 2676960dab09SAndrew Thompson } 267718242d3bSAndrew Thompson 267818242d3bSAndrew Thompson /* Send mbuf */ 267918242d3bSAndrew Thompson return (lagg_enqueue(lp->lp_ifp, m)); 268018242d3bSAndrew Thompson } 268118242d3bSAndrew Thompson 268218242d3bSAndrew Thompson static struct mbuf * 268318242d3bSAndrew Thompson lagg_lacp_input(struct lagg_softc *sc, struct lagg_port *lp, struct mbuf *m) 268418242d3bSAndrew Thompson { 268518242d3bSAndrew Thompson struct ifnet *ifp = sc->sc_ifp; 268618242d3bSAndrew Thompson struct ether_header *eh; 268718242d3bSAndrew Thompson u_short etype; 268818242d3bSAndrew Thompson 268918242d3bSAndrew Thompson eh = mtod(m, struct ether_header *); 269018242d3bSAndrew Thompson etype = ntohs(eh->ether_type); 269118242d3bSAndrew Thompson 269218242d3bSAndrew Thompson /* Tap off LACP control messages */ 2693627cecc5SAndrew Thompson if ((m->m_flags & M_VLANTAG) == 0 && etype == ETHERTYPE_SLOW) { 2694af0084c9SAndrew Thompson m = lacp_input(lp, m); 2695af0084c9SAndrew Thompson if (m == NULL) 269618242d3bSAndrew Thompson return (NULL); 269718242d3bSAndrew Thompson } 269818242d3bSAndrew Thompson 269918242d3bSAndrew Thompson /* 270018242d3bSAndrew Thompson * If the port is not collecting or not in the active aggregator then 270118242d3bSAndrew Thompson * free and return. 270218242d3bSAndrew Thompson */ 27035a8abd0aSZhenlei Huang if (!lacp_iscollecting(lp) || !lacp_isactive(lp)) { 270418242d3bSAndrew Thompson m_freem(m); 270518242d3bSAndrew Thompson return (NULL); 270618242d3bSAndrew Thompson } 270718242d3bSAndrew Thompson 270818242d3bSAndrew Thompson m->m_pkthdr.rcvif = ifp; 270918242d3bSAndrew Thompson return (m); 271018242d3bSAndrew Thompson } 2711dbe86dd5SZhenlei Huang 2712dbe86dd5SZhenlei Huang /* Default input */ 2713dbe86dd5SZhenlei Huang static struct mbuf * 2714dbe86dd5SZhenlei Huang lagg_default_input(struct lagg_softc *sc, struct lagg_port *lp, struct mbuf *m) 2715dbe86dd5SZhenlei Huang { 2716dbe86dd5SZhenlei Huang struct ifnet *ifp = sc->sc_ifp; 2717dbe86dd5SZhenlei Huang 2718dbe86dd5SZhenlei Huang /* Just pass in the packet to our lagg device */ 2719dbe86dd5SZhenlei Huang m->m_pkthdr.rcvif = ifp; 2720dbe86dd5SZhenlei Huang 2721dbe86dd5SZhenlei Huang return (m); 2722dbe86dd5SZhenlei Huang } 2723