15566a3e3SBruce Richardson /* SPDX-License-Identifier: BSD-3-Clause 25566a3e3SBruce Richardson * Copyright(c) 2010-2017 Intel Corporation 33eb6bdd8SBruce Richardson */ 43eb6bdd8SBruce Richardson 53eb6bdd8SBruce Richardson #include <string.h> 63eb6bdd8SBruce Richardson 73eb6bdd8SBruce Richardson #include <rte_mbuf.h> 83eb6bdd8SBruce Richardson #include <rte_malloc.h> 9df96fd0dSBruce Richardson #include <ethdev_driver.h> 103eb6bdd8SBruce Richardson #include <rte_tcp.h> 114851ef2bSDavid Marchand #include <bus_vdev_driver.h> 1268451eb6SJan Blunck #include <rte_kvargs.h> 133eb6bdd8SBruce Richardson 143eb6bdd8SBruce Richardson #include "rte_eth_bond.h" 15b28f28aeSDharmik Thakkar #include "eth_bond_private.h" 16b28f28aeSDharmik Thakkar #include "eth_bond_8023ad_private.h" 173eb6bdd8SBruce Richardson 183eb6bdd8SBruce Richardson int 194f840086SLong Wu check_for_bonding_ethdev(const struct rte_eth_dev *eth_dev) 203eb6bdd8SBruce Richardson { 213eb6bdd8SBruce Richardson /* Check valid pointer */ 22e6b8757bSDeclan Doherty if (eth_dev == NULL || 23e6b8757bSDeclan Doherty eth_dev->device == NULL || 24e6b8757bSDeclan Doherty eth_dev->device->driver == NULL || 25e6b8757bSDeclan Doherty eth_dev->device->driver->name == NULL) 263eb6bdd8SBruce Richardson return -1; 273eb6bdd8SBruce Richardson 28fa8cc607SStephen Hemminger /* return 0 if driver name matches */ 29740feaf3SFerruh Yigit return eth_dev->device->driver->name != pmd_bond_drv.driver.name; 303eb6bdd8SBruce Richardson } 313eb6bdd8SBruce Richardson 323eb6bdd8SBruce Richardson int 334f840086SLong Wu valid_bonding_port_id(uint16_t port_id) 343eb6bdd8SBruce Richardson { 351414dabcSMauricio Vasquez B RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -1); 364f840086SLong Wu return check_for_bonding_ethdev(&rte_eth_devices[port_id]); 373eb6bdd8SBruce Richardson } 383eb6bdd8SBruce Richardson 393eb6bdd8SBruce Richardson int 404f840086SLong Wu check_for_main_bonding_ethdev(const struct rte_eth_dev *eth_dev) 411184582bSJacek Piasecki { 421184582bSJacek Piasecki int i; 431184582bSJacek Piasecki struct bond_dev_private *internals; 441184582bSJacek Piasecki 454f840086SLong Wu if (check_for_bonding_ethdev(eth_dev) != 0) 461184582bSJacek Piasecki return 0; 471184582bSJacek Piasecki 481184582bSJacek Piasecki internals = eth_dev->data->dev_private; 491184582bSJacek Piasecki 504f840086SLong Wu /* Check if any of member devices is a bonding device */ 5115e34522SLong Wu for (i = 0; i < internals->member_count; i++) 524f840086SLong Wu if (valid_bonding_port_id(internals->members[i].port_id) == 0) 531184582bSJacek Piasecki return 1; 541184582bSJacek Piasecki 551184582bSJacek Piasecki return 0; 561184582bSJacek Piasecki } 571184582bSJacek Piasecki 581184582bSJacek Piasecki int 5915e34522SLong Wu valid_member_port_id(struct bond_dev_private *internals, uint16_t member_port_id) 603eb6bdd8SBruce Richardson { 6115e34522SLong Wu RTE_ETH_VALID_PORTID_OR_ERR_RET(member_port_id, -1); 623eb6bdd8SBruce Richardson 634f840086SLong Wu /* Verify that member_port_id refers to a non bonding port */ 644f840086SLong Wu if (check_for_bonding_ethdev(&rte_eth_devices[member_port_id]) == 0 && 65324d6577SChengchang Tang internals->mode == BONDING_MODE_8023AD) { 664f840086SLong Wu RTE_BOND_LOG(ERR, "Cannot add member to bonding device in 802.3ad" 674f840086SLong Wu " mode as member is also a bonding device, only " 684c42498dSTomasz Kulasek "physical devices can be support in this mode."); 693eb6bdd8SBruce Richardson return -1; 704c42498dSTomasz Kulasek } 713eb6bdd8SBruce Richardson 7215e34522SLong Wu if (internals->port_id == member_port_id) { 73324d6577SChengchang Tang RTE_BOND_LOG(ERR, 744f840086SLong Wu "Cannot add the bonding device itself as its member."); 75324d6577SChengchang Tang return -1; 76324d6577SChengchang Tang } 77324d6577SChengchang Tang 783eb6bdd8SBruce Richardson return 0; 793eb6bdd8SBruce Richardson } 803eb6bdd8SBruce Richardson 813eb6bdd8SBruce Richardson void 8215e34522SLong Wu activate_member(struct rte_eth_dev *eth_dev, uint16_t port_id) 833eb6bdd8SBruce Richardson { 843eb6bdd8SBruce Richardson struct bond_dev_private *internals = eth_dev->data->dev_private; 8515e34522SLong Wu uint16_t active_count = internals->active_member_count; 863eb6bdd8SBruce Richardson 873eb6bdd8SBruce Richardson if (internals->mode == BONDING_MODE_8023AD) 8815e34522SLong Wu bond_mode_8023ad_activate_member(eth_dev, port_id); 893eb6bdd8SBruce Richardson 903eb6bdd8SBruce Richardson if (internals->mode == BONDING_MODE_TLB 913eb6bdd8SBruce Richardson || internals->mode == BONDING_MODE_ALB) { 923eb6bdd8SBruce Richardson 9315e34522SLong Wu internals->tlb_members_order[active_count] = port_id; 943eb6bdd8SBruce Richardson } 953eb6bdd8SBruce Richardson 9615e34522SLong Wu RTE_ASSERT(internals->active_member_count < 9715e34522SLong Wu (RTE_DIM(internals->active_members) - 1)); 983eb6bdd8SBruce Richardson 9915e34522SLong Wu internals->active_members[internals->active_member_count] = port_id; 10015e34522SLong Wu internals->active_member_count++; 1013eb6bdd8SBruce Richardson 1023eb6bdd8SBruce Richardson if (internals->mode == BONDING_MODE_TLB) 10315e34522SLong Wu bond_tlb_activate_member(internals); 1043eb6bdd8SBruce Richardson if (internals->mode == BONDING_MODE_ALB) 1053eb6bdd8SBruce Richardson bond_mode_alb_client_list_upd(eth_dev); 1063eb6bdd8SBruce Richardson } 1073eb6bdd8SBruce Richardson 1083eb6bdd8SBruce Richardson void 10915e34522SLong Wu deactivate_member(struct rte_eth_dev *eth_dev, uint16_t port_id) 1103eb6bdd8SBruce Richardson { 11115e34522SLong Wu uint16_t member_pos; 1123eb6bdd8SBruce Richardson struct bond_dev_private *internals = eth_dev->data->dev_private; 11315e34522SLong Wu uint16_t active_count = internals->active_member_count; 1143eb6bdd8SBruce Richardson 1153eb6bdd8SBruce Richardson if (internals->mode == BONDING_MODE_8023AD) { 1163eb6bdd8SBruce Richardson bond_mode_8023ad_stop(eth_dev); 11715e34522SLong Wu bond_mode_8023ad_deactivate_member(eth_dev, port_id); 1183eb6bdd8SBruce Richardson } else if (internals->mode == BONDING_MODE_TLB 1193eb6bdd8SBruce Richardson || internals->mode == BONDING_MODE_ALB) 1203eb6bdd8SBruce Richardson bond_tlb_disable(internals); 1213eb6bdd8SBruce Richardson 12215e34522SLong Wu member_pos = find_member_by_id(internals->active_members, active_count, 1233eb6bdd8SBruce Richardson port_id); 1243eb6bdd8SBruce Richardson 12515e34522SLong Wu /* 12615e34522SLong Wu * If member was not at the end of the list 12715e34522SLong Wu * shift active members up active array list. 12815e34522SLong Wu */ 12915e34522SLong Wu if (member_pos < active_count) { 1303eb6bdd8SBruce Richardson active_count--; 13115e34522SLong Wu memmove(internals->active_members + member_pos, 13215e34522SLong Wu internals->active_members + member_pos + 1, 13315e34522SLong Wu (active_count - member_pos) * 13415e34522SLong Wu sizeof(internals->active_members[0])); 1353eb6bdd8SBruce Richardson } 1363eb6bdd8SBruce Richardson 13715e34522SLong Wu RTE_ASSERT(active_count < RTE_DIM(internals->active_members)); 13815e34522SLong Wu internals->active_member_count = active_count; 1393eb6bdd8SBruce Richardson 1403eb6bdd8SBruce Richardson if (eth_dev->data->dev_started) { 1413eb6bdd8SBruce Richardson if (internals->mode == BONDING_MODE_8023AD) { 1423eb6bdd8SBruce Richardson bond_mode_8023ad_start(eth_dev); 1433eb6bdd8SBruce Richardson } else if (internals->mode == BONDING_MODE_TLB) { 1443eb6bdd8SBruce Richardson bond_tlb_enable(internals); 1453eb6bdd8SBruce Richardson } else if (internals->mode == BONDING_MODE_ALB) { 1463eb6bdd8SBruce Richardson bond_tlb_enable(internals); 1473eb6bdd8SBruce Richardson bond_mode_alb_client_list_upd(eth_dev); 1483eb6bdd8SBruce Richardson } 1493eb6bdd8SBruce Richardson } 1503eb6bdd8SBruce Richardson } 1513eb6bdd8SBruce Richardson 1523eb6bdd8SBruce Richardson int 1533eb6bdd8SBruce Richardson rte_eth_bond_create(const char *name, uint8_t mode, uint8_t socket_id) 1543eb6bdd8SBruce Richardson { 15568451eb6SJan Blunck struct bond_dev_private *internals; 156bcd9f098SKumara Parameshwaran struct rte_eth_dev *bond_dev; 15768451eb6SJan Blunck char devargs[52]; 15868451eb6SJan Blunck int ret; 1593eb6bdd8SBruce Richardson 1603eb6bdd8SBruce Richardson if (name == NULL) { 1613eb6bdd8SBruce Richardson RTE_BOND_LOG(ERR, "Invalid name specified"); 16268451eb6SJan Blunck return -EINVAL; 1633eb6bdd8SBruce Richardson } 1643eb6bdd8SBruce Richardson 16568451eb6SJan Blunck ret = snprintf(devargs, sizeof(devargs), 16668451eb6SJan Blunck "driver=net_bonding,mode=%d,socket_id=%d", mode, socket_id); 16768451eb6SJan Blunck if (ret < 0 || ret >= (int)sizeof(devargs)) 16868451eb6SJan Blunck return -ENOMEM; 1693eb6bdd8SBruce Richardson 1702f6fec53SThomas Monjalon ret = rte_vdev_init(name, devargs); 17168451eb6SJan Blunck if (ret) 1722dd4f085SWei Hu (Xavier) return ret; 1733eb6bdd8SBruce Richardson 174bcd9f098SKumara Parameshwaran bond_dev = rte_eth_dev_get_by_name(name); 175bcd9f098SKumara Parameshwaran RTE_ASSERT(bond_dev); 1763eb6bdd8SBruce Richardson 17768451eb6SJan Blunck /* 17868451eb6SJan Blunck * To make bond_ethdev_configure() happy we need to free the 17968451eb6SJan Blunck * internals->kvlist here. 18068451eb6SJan Blunck * 18168451eb6SJan Blunck * Also see comment in bond_ethdev_configure(). 18268451eb6SJan Blunck */ 183bcd9f098SKumara Parameshwaran internals = bond_dev->data->dev_private; 18468451eb6SJan Blunck rte_kvargs_free(internals->kvlist); 18568451eb6SJan Blunck internals->kvlist = NULL; 1863eb6bdd8SBruce Richardson 187bcd9f098SKumara Parameshwaran return bond_dev->data->port_id; 1883eb6bdd8SBruce Richardson } 1893eb6bdd8SBruce Richardson 1908d30fe7fSBernard Iremonger int 1918d30fe7fSBernard Iremonger rte_eth_bond_free(const char *name) 1928d30fe7fSBernard Iremonger { 1932f6fec53SThomas Monjalon return rte_vdev_uninit(name); 1948d30fe7fSBernard Iremonger } 1958d30fe7fSBernard Iremonger 1963eb6bdd8SBruce Richardson static int 1974f840086SLong Wu member_vlan_filter_set(uint16_t bonding_port_id, uint16_t member_port_id) 198c771e4efSEric Kinzie { 1994f840086SLong Wu struct rte_eth_dev *bonding_eth_dev; 200c771e4efSEric Kinzie struct bond_dev_private *internals; 201c771e4efSEric Kinzie int found; 202c771e4efSEric Kinzie int res = 0; 203c771e4efSEric Kinzie uint64_t slab = 0; 204c771e4efSEric Kinzie uint32_t pos = 0; 205c771e4efSEric Kinzie uint16_t first; 206c771e4efSEric Kinzie 2074f840086SLong Wu bonding_eth_dev = &rte_eth_devices[bonding_port_id]; 2084f840086SLong Wu if ((bonding_eth_dev->data->dev_conf.rxmode.offloads & 209295968d1SFerruh Yigit RTE_ETH_RX_OFFLOAD_VLAN_FILTER) == 0) 210c771e4efSEric Kinzie return 0; 211c771e4efSEric Kinzie 2124f840086SLong Wu internals = bonding_eth_dev->data->dev_private; 213c771e4efSEric Kinzie found = rte_bitmap_scan(internals->vlan_filter_bmp, &pos, &slab); 214c771e4efSEric Kinzie first = pos; 215c771e4efSEric Kinzie 216c771e4efSEric Kinzie if (!found) 217c771e4efSEric Kinzie return 0; 218c771e4efSEric Kinzie 219c771e4efSEric Kinzie do { 220c771e4efSEric Kinzie uint32_t i; 221c771e4efSEric Kinzie uint64_t mask; 222c771e4efSEric Kinzie 223c771e4efSEric Kinzie for (i = 0, mask = 1; 224c771e4efSEric Kinzie i < RTE_BITMAP_SLAB_BIT_SIZE; 225c771e4efSEric Kinzie i ++, mask <<= 1) { 2263639903fSChas Williams if (unlikely(slab & mask)) { 2273639903fSChas Williams uint16_t vlan_id = pos + i; 2283639903fSChas Williams 22915e34522SLong Wu res = rte_eth_dev_vlan_filter(member_port_id, 2303639903fSChas Williams vlan_id, 1); 2313639903fSChas Williams } 232c771e4efSEric Kinzie } 233c771e4efSEric Kinzie found = rte_bitmap_scan(internals->vlan_filter_bmp, 234c771e4efSEric Kinzie &pos, &slab); 235c771e4efSEric Kinzie } while (found && first != pos && res == 0); 236c771e4efSEric Kinzie 237c771e4efSEric Kinzie return res; 238c771e4efSEric Kinzie } 239c771e4efSEric Kinzie 240c771e4efSEric Kinzie static int 24115e34522SLong Wu member_rte_flow_prepare(uint16_t member_id, struct bond_dev_private *internals) 24249dad902SMatan Azrad { 24349dad902SMatan Azrad struct rte_flow *flow; 24449dad902SMatan Azrad struct rte_flow_error ferror; 24515e34522SLong Wu uint16_t member_port_id = internals->members[member_id].port_id; 24649dad902SMatan Azrad 24749dad902SMatan Azrad if (internals->flow_isolated_valid != 0) { 24815e34522SLong Wu if (rte_eth_dev_stop(member_port_id) != 0) { 249fb0379bcSIvan Ilchenko RTE_BOND_LOG(ERR, "Failed to stop device on port %u", 25015e34522SLong Wu member_port_id); 251fb0379bcSIvan Ilchenko return -1; 252fb0379bcSIvan Ilchenko } 253fb0379bcSIvan Ilchenko 25415e34522SLong Wu if (rte_flow_isolate(member_port_id, internals->flow_isolated, 25549dad902SMatan Azrad &ferror)) { 25615e34522SLong Wu RTE_BOND_LOG(ERR, "rte_flow_isolate failed for member" 25715e34522SLong Wu " %d: %s", member_id, ferror.message ? 25849dad902SMatan Azrad ferror.message : "(no stated reason)"); 25949dad902SMatan Azrad return -1; 26049dad902SMatan Azrad } 26149dad902SMatan Azrad } 26249dad902SMatan Azrad TAILQ_FOREACH(flow, &internals->flow_list, next) { 26315e34522SLong Wu flow->flows[member_id] = rte_flow_create(member_port_id, 26481b750c7SAdrien Mazarguil flow->rule.attr, 26581b750c7SAdrien Mazarguil flow->rule.pattern, 26681b750c7SAdrien Mazarguil flow->rule.actions, 26749dad902SMatan Azrad &ferror); 26815e34522SLong Wu if (flow->flows[member_id] == NULL) { 26915e34522SLong Wu RTE_BOND_LOG(ERR, "Cannot create flow for member" 27015e34522SLong Wu " %d: %s", member_id, 27149dad902SMatan Azrad ferror.message ? ferror.message : 27249dad902SMatan Azrad "(no stated reason)"); 27315e34522SLong Wu /* Destroy successful bond flows from the member */ 27449dad902SMatan Azrad TAILQ_FOREACH(flow, &internals->flow_list, next) { 27515e34522SLong Wu if (flow->flows[member_id] != NULL) { 27615e34522SLong Wu rte_flow_destroy(member_port_id, 27715e34522SLong Wu flow->flows[member_id], 27849dad902SMatan Azrad &ferror); 27915e34522SLong Wu flow->flows[member_id] = NULL; 28049dad902SMatan Azrad } 28149dad902SMatan Azrad } 28249dad902SMatan Azrad return -1; 28349dad902SMatan Azrad } 28449dad902SMatan Azrad } 28549dad902SMatan Azrad return 0; 28649dad902SMatan Azrad } 28749dad902SMatan Azrad 288f5f93e10SIvan Malov static void 28915e34522SLong Wu eth_bond_member_inherit_dev_info_rx_first(struct bond_dev_private *internals, 290f5f93e10SIvan Malov const struct rte_eth_dev_info *di) 291f5f93e10SIvan Malov { 292f5f93e10SIvan Malov struct rte_eth_rxconf *rxconf_i = &internals->default_rxconf; 293f5f93e10SIvan Malov 294f5f93e10SIvan Malov internals->reta_size = di->reta_size; 2956b1a001eSChengchang Tang internals->rss_key_len = di->hash_key_size; 296f5f93e10SIvan Malov 29715e34522SLong Wu /* Inherit Rx offload capabilities from the first member device */ 298f5f93e10SIvan Malov internals->rx_offload_capa = di->rx_offload_capa; 299f5f93e10SIvan Malov internals->rx_queue_offload_capa = di->rx_queue_offload_capa; 300f5f93e10SIvan Malov internals->flow_type_rss_offloads = di->flow_type_rss_offloads; 301f5f93e10SIvan Malov 30215e34522SLong Wu /* Inherit maximum Rx packet size from the first member device */ 303f5f93e10SIvan Malov internals->candidate_max_rx_pktlen = di->max_rx_pktlen; 304f5f93e10SIvan Malov 30515e34522SLong Wu /* Inherit default Rx queue settings from the first member device */ 306f5f93e10SIvan Malov memcpy(rxconf_i, &di->default_rxconf, sizeof(*rxconf_i)); 307f5f93e10SIvan Malov 308f5f93e10SIvan Malov /* 309f5f93e10SIvan Malov * Turn off descriptor prefetch and writeback by default for all 31015e34522SLong Wu * member devices. Applications may tweak this setting if need be. 311f5f93e10SIvan Malov */ 312f5f93e10SIvan Malov rxconf_i->rx_thresh.pthresh = 0; 313f5f93e10SIvan Malov rxconf_i->rx_thresh.hthresh = 0; 314f5f93e10SIvan Malov rxconf_i->rx_thresh.wthresh = 0; 315f5f93e10SIvan Malov 316f5f93e10SIvan Malov /* Setting this to zero should effectively enable default values */ 317f5f93e10SIvan Malov rxconf_i->rx_free_thresh = 0; 318f5f93e10SIvan Malov 31915e34522SLong Wu /* Disable deferred start by default for all member devices */ 320f5f93e10SIvan Malov rxconf_i->rx_deferred_start = 0; 321f5f93e10SIvan Malov } 322f5f93e10SIvan Malov 323f5f93e10SIvan Malov static void 32415e34522SLong Wu eth_bond_member_inherit_dev_info_tx_first(struct bond_dev_private *internals, 325f5f93e10SIvan Malov const struct rte_eth_dev_info *di) 326f5f93e10SIvan Malov { 327f5f93e10SIvan Malov struct rte_eth_txconf *txconf_i = &internals->default_txconf; 328f5f93e10SIvan Malov 32915e34522SLong Wu /* Inherit Tx offload capabilities from the first member device */ 330f5f93e10SIvan Malov internals->tx_offload_capa = di->tx_offload_capa; 331f5f93e10SIvan Malov internals->tx_queue_offload_capa = di->tx_queue_offload_capa; 332f5f93e10SIvan Malov 33315e34522SLong Wu /* Inherit default Tx queue settings from the first member device */ 334f5f93e10SIvan Malov memcpy(txconf_i, &di->default_txconf, sizeof(*txconf_i)); 335f5f93e10SIvan Malov 336f5f93e10SIvan Malov /* 337f5f93e10SIvan Malov * Turn off descriptor prefetch and writeback by default for all 33815e34522SLong Wu * member devices. Applications may tweak this setting if need be. 339f5f93e10SIvan Malov */ 340f5f93e10SIvan Malov txconf_i->tx_thresh.pthresh = 0; 341f5f93e10SIvan Malov txconf_i->tx_thresh.hthresh = 0; 342f5f93e10SIvan Malov txconf_i->tx_thresh.wthresh = 0; 343f5f93e10SIvan Malov 344f5f93e10SIvan Malov /* 345f5f93e10SIvan Malov * Setting these parameters to zero assumes that default 34615e34522SLong Wu * values will be configured implicitly by member devices. 347f5f93e10SIvan Malov */ 348f5f93e10SIvan Malov txconf_i->tx_free_thresh = 0; 349f5f93e10SIvan Malov txconf_i->tx_rs_thresh = 0; 350f5f93e10SIvan Malov 35115e34522SLong Wu /* Disable deferred start by default for all member devices */ 352f5f93e10SIvan Malov txconf_i->tx_deferred_start = 0; 353f5f93e10SIvan Malov } 354f5f93e10SIvan Malov 355f5f93e10SIvan Malov static void 35615e34522SLong Wu eth_bond_member_inherit_dev_info_rx_next(struct bond_dev_private *internals, 357f5f93e10SIvan Malov const struct rte_eth_dev_info *di) 358f5f93e10SIvan Malov { 359f5f93e10SIvan Malov struct rte_eth_rxconf *rxconf_i = &internals->default_rxconf; 360f5f93e10SIvan Malov const struct rte_eth_rxconf *rxconf = &di->default_rxconf; 361f5f93e10SIvan Malov 362f5f93e10SIvan Malov internals->rx_offload_capa &= di->rx_offload_capa; 363f5f93e10SIvan Malov internals->rx_queue_offload_capa &= di->rx_queue_offload_capa; 364f5f93e10SIvan Malov internals->flow_type_rss_offloads &= di->flow_type_rss_offloads; 365f5f93e10SIvan Malov 366f5f93e10SIvan Malov /* 36715e34522SLong Wu * If at least one member device suggests enabling this 36815e34522SLong Wu * setting by default, enable it for all member devices 369f5f93e10SIvan Malov * since disabling it may not be necessarily supported. 370f5f93e10SIvan Malov */ 371f5f93e10SIvan Malov if (rxconf->rx_drop_en == 1) 372f5f93e10SIvan Malov rxconf_i->rx_drop_en = 1; 373f5f93e10SIvan Malov 374f5f93e10SIvan Malov /* 37515e34522SLong Wu * Adding a new member device may cause some of previously inherited 376f5f93e10SIvan Malov * offloads to be withdrawn from the internal rx_queue_offload_capa 377f5f93e10SIvan Malov * value. Thus, the new internal value of default Rx queue offloads 378f5f93e10SIvan Malov * has to be masked by rx_queue_offload_capa to make sure that only 379f5f93e10SIvan Malov * commonly supported offloads are preserved from both the previous 38015e34522SLong Wu * value and the value being inherited from the new member device. 381f5f93e10SIvan Malov */ 382f5f93e10SIvan Malov rxconf_i->offloads = (rxconf_i->offloads | rxconf->offloads) & 383f5f93e10SIvan Malov internals->rx_queue_offload_capa; 384f5f93e10SIvan Malov 385f5f93e10SIvan Malov /* 38615e34522SLong Wu * RETA size is GCD of all members RETA sizes, so, if all sizes will be 387f5f93e10SIvan Malov * the power of 2, the lower one is GCD 388f5f93e10SIvan Malov */ 389f5f93e10SIvan Malov if (internals->reta_size > di->reta_size) 390f5f93e10SIvan Malov internals->reta_size = di->reta_size; 3916b1a001eSChengchang Tang if (internals->rss_key_len > di->hash_key_size) { 39215e34522SLong Wu RTE_BOND_LOG(WARNING, "member has different rss key size, " 3936b1a001eSChengchang Tang "configuring rss may fail"); 3946b1a001eSChengchang Tang internals->rss_key_len = di->hash_key_size; 3956b1a001eSChengchang Tang } 396f5f93e10SIvan Malov 397f5f93e10SIvan Malov if (!internals->max_rx_pktlen && 398f5f93e10SIvan Malov di->max_rx_pktlen < internals->candidate_max_rx_pktlen) 399f5f93e10SIvan Malov internals->candidate_max_rx_pktlen = di->max_rx_pktlen; 400f5f93e10SIvan Malov } 401f5f93e10SIvan Malov 402f5f93e10SIvan Malov static void 40315e34522SLong Wu eth_bond_member_inherit_dev_info_tx_next(struct bond_dev_private *internals, 404f5f93e10SIvan Malov const struct rte_eth_dev_info *di) 405f5f93e10SIvan Malov { 406f5f93e10SIvan Malov struct rte_eth_txconf *txconf_i = &internals->default_txconf; 407f5f93e10SIvan Malov const struct rte_eth_txconf *txconf = &di->default_txconf; 408f5f93e10SIvan Malov 409f5f93e10SIvan Malov internals->tx_offload_capa &= di->tx_offload_capa; 410f5f93e10SIvan Malov internals->tx_queue_offload_capa &= di->tx_queue_offload_capa; 411f5f93e10SIvan Malov 412f5f93e10SIvan Malov /* 41315e34522SLong Wu * Adding a new member device may cause some of previously inherited 414f5f93e10SIvan Malov * offloads to be withdrawn from the internal tx_queue_offload_capa 415f5f93e10SIvan Malov * value. Thus, the new internal value of default Tx queue offloads 416f5f93e10SIvan Malov * has to be masked by tx_queue_offload_capa to make sure that only 417f5f93e10SIvan Malov * commonly supported offloads are preserved from both the previous 41815e34522SLong Wu * value and the value being inherited from the new member device. 419f5f93e10SIvan Malov */ 420f5f93e10SIvan Malov txconf_i->offloads = (txconf_i->offloads | txconf->offloads) & 421f5f93e10SIvan Malov internals->tx_queue_offload_capa; 422f5f93e10SIvan Malov } 423f5f93e10SIvan Malov 4247a066594SIvan Malov static void 42515e34522SLong Wu eth_bond_member_inherit_desc_lim_first(struct rte_eth_desc_lim *bond_desc_lim, 42615e34522SLong Wu const struct rte_eth_desc_lim *member_desc_lim) 4277a066594SIvan Malov { 42815e34522SLong Wu memcpy(bond_desc_lim, member_desc_lim, sizeof(*bond_desc_lim)); 4297a066594SIvan Malov } 4307a066594SIvan Malov 4317a066594SIvan Malov static int 43215e34522SLong Wu eth_bond_member_inherit_desc_lim_next(struct rte_eth_desc_lim *bond_desc_lim, 43315e34522SLong Wu const struct rte_eth_desc_lim *member_desc_lim) 4347a066594SIvan Malov { 4357a066594SIvan Malov bond_desc_lim->nb_max = RTE_MIN(bond_desc_lim->nb_max, 43615e34522SLong Wu member_desc_lim->nb_max); 4377a066594SIvan Malov bond_desc_lim->nb_min = RTE_MAX(bond_desc_lim->nb_min, 43815e34522SLong Wu member_desc_lim->nb_min); 4397a066594SIvan Malov bond_desc_lim->nb_align = RTE_MAX(bond_desc_lim->nb_align, 44015e34522SLong Wu member_desc_lim->nb_align); 4417a066594SIvan Malov 4427a066594SIvan Malov if (bond_desc_lim->nb_min > bond_desc_lim->nb_max || 4437a066594SIvan Malov bond_desc_lim->nb_align > bond_desc_lim->nb_max) { 4447a066594SIvan Malov RTE_BOND_LOG(ERR, "Failed to inherit descriptor limits"); 4457a066594SIvan Malov return -EINVAL; 4467a066594SIvan Malov } 4477a066594SIvan Malov 4487a066594SIvan Malov /* Treat maximum number of segments equal to 0 as unspecified */ 44915e34522SLong Wu if (member_desc_lim->nb_seg_max != 0 && 4507a066594SIvan Malov (bond_desc_lim->nb_seg_max == 0 || 45115e34522SLong Wu member_desc_lim->nb_seg_max < bond_desc_lim->nb_seg_max)) 45215e34522SLong Wu bond_desc_lim->nb_seg_max = member_desc_lim->nb_seg_max; 45315e34522SLong Wu if (member_desc_lim->nb_mtu_seg_max != 0 && 4547a066594SIvan Malov (bond_desc_lim->nb_mtu_seg_max == 0 || 45515e34522SLong Wu member_desc_lim->nb_mtu_seg_max < bond_desc_lim->nb_mtu_seg_max)) 45615e34522SLong Wu bond_desc_lim->nb_mtu_seg_max = member_desc_lim->nb_mtu_seg_max; 4577a066594SIvan Malov 4587a066594SIvan Malov return 0; 4597a066594SIvan Malov } 4607a066594SIvan Malov 46149dad902SMatan Azrad static int 4624f840086SLong Wu __eth_bond_member_add_lock_free(uint16_t bonding_port_id, uint16_t member_port_id) 4633eb6bdd8SBruce Richardson { 4644f840086SLong Wu struct rte_eth_dev *bonding_eth_dev, *member_eth_dev; 4653eb6bdd8SBruce Richardson struct bond_dev_private *internals; 4663eb6bdd8SBruce Richardson struct rte_eth_link link_props; 4673eb6bdd8SBruce Richardson struct rte_eth_dev_info dev_info; 468fab23451SIvan Ilchenko int ret; 4693eb6bdd8SBruce Richardson 4704f840086SLong Wu bonding_eth_dev = &rte_eth_devices[bonding_port_id]; 4714f840086SLong Wu internals = bonding_eth_dev->data->dev_private; 4723eb6bdd8SBruce Richardson 47315e34522SLong Wu if (valid_member_port_id(internals, member_port_id) != 0) 4744c42498dSTomasz Kulasek return -1; 4754c42498dSTomasz Kulasek 47615e34522SLong Wu member_eth_dev = &rte_eth_devices[member_port_id]; 47715e34522SLong Wu if (member_eth_dev->data->dev_flags & RTE_ETH_DEV_BONDING_MEMBER) { 4784f840086SLong Wu RTE_BOND_LOG(ERR, "Member device is already a member of a bonding device"); 4793eb6bdd8SBruce Richardson return -1; 4803eb6bdd8SBruce Richardson } 4813eb6bdd8SBruce Richardson 48215e34522SLong Wu ret = rte_eth_dev_info_get(member_port_id, &dev_info); 483fab23451SIvan Ilchenko if (ret != 0) { 484fab23451SIvan Ilchenko RTE_BOND_LOG(ERR, 485*f665790aSDavid Marchand "%s: Error during getting device (port %u) info: %s", 48615e34522SLong Wu __func__, member_port_id, strerror(-ret)); 487fab23451SIvan Ilchenko 488fab23451SIvan Ilchenko return ret; 489fab23451SIvan Ilchenko } 4906cfc6a4fSEric Kinzie if (dev_info.max_rx_pktlen < internals->max_rx_pktlen) { 49115e34522SLong Wu RTE_BOND_LOG(ERR, "Member (port %u) max_rx_pktlen too small", 49215e34522SLong Wu member_port_id); 4936cfc6a4fSEric Kinzie return -1; 4946cfc6a4fSEric Kinzie } 4956cfc6a4fSEric Kinzie 49615e34522SLong Wu member_add(internals, member_eth_dev); 4973eb6bdd8SBruce Richardson 49815e34522SLong Wu /* We need to store members reta_size to be able to synchronize RETA for all 49915e34522SLong Wu * member devices even if its sizes are different. 500734ce47fSTomasz Kulasek */ 50115e34522SLong Wu internals->members[internals->member_count].reta_size = dev_info.reta_size; 502734ce47fSTomasz Kulasek 50315e34522SLong Wu if (internals->member_count < 1) { 5044f840086SLong Wu /* 5054f840086SLong Wu * if MAC is not user defined then use MAC of first member add to 5064f840086SLong Wu * bonding device. 5074f840086SLong Wu */ 508a508daa1SRadu Nicolau if (!internals->user_defined_mac) { 5094f840086SLong Wu if (mac_address_set(bonding_eth_dev, 51015e34522SLong Wu member_eth_dev->data->mac_addrs)) { 511a508daa1SRadu Nicolau RTE_BOND_LOG(ERR, "Failed to set MAC address"); 512a508daa1SRadu Nicolau return -1; 513a508daa1SRadu Nicolau } 514a508daa1SRadu Nicolau } 5153eb6bdd8SBruce Richardson 51615e34522SLong Wu /* Make primary member */ 51715e34522SLong Wu internals->primary_port = member_port_id; 51815e34522SLong Wu internals->current_primary_port = member_port_id; 5193eb6bdd8SBruce Richardson 520e5f18551SHuisong Li internals->speed_capa = dev_info.speed_capa; 521e5f18551SHuisong Li 52215e34522SLong Wu /* Inherit queues settings from first member */ 52315e34522SLong Wu internals->nb_rx_queues = member_eth_dev->data->nb_rx_queues; 52415e34522SLong Wu internals->nb_tx_queues = member_eth_dev->data->nb_tx_queues; 525734ce47fSTomasz Kulasek 52615e34522SLong Wu eth_bond_member_inherit_dev_info_rx_first(internals, &dev_info); 52715e34522SLong Wu eth_bond_member_inherit_dev_info_tx_first(internals, &dev_info); 5287a066594SIvan Malov 52915e34522SLong Wu eth_bond_member_inherit_desc_lim_first(&internals->rx_desc_lim, 5307a066594SIvan Malov &dev_info.rx_desc_lim); 53115e34522SLong Wu eth_bond_member_inherit_desc_lim_first(&internals->tx_desc_lim, 5327a066594SIvan Malov &dev_info.tx_desc_lim); 5333eb6bdd8SBruce Richardson } else { 5347a066594SIvan Malov int ret; 5357a066594SIvan Malov 536e5f18551SHuisong Li internals->speed_capa &= dev_info.speed_capa; 53715e34522SLong Wu eth_bond_member_inherit_dev_info_rx_next(internals, &dev_info); 53815e34522SLong Wu eth_bond_member_inherit_dev_info_tx_next(internals, &dev_info); 5397a066594SIvan Malov 54015e34522SLong Wu ret = eth_bond_member_inherit_desc_lim_next(&internals->rx_desc_lim, 54115e34522SLong Wu &dev_info.rx_desc_lim); 5427a066594SIvan Malov if (ret != 0) 5437a066594SIvan Malov return ret; 5447a066594SIvan Malov 54515e34522SLong Wu ret = eth_bond_member_inherit_desc_lim_next(&internals->tx_desc_lim, 54615e34522SLong Wu &dev_info.tx_desc_lim); 5477a066594SIvan Malov if (ret != 0) 5487a066594SIvan Malov return ret; 5493eb6bdd8SBruce Richardson } 5503eb6bdd8SBruce Richardson 551b4924c0dSHuisong Li /* Bond mode Broadcast & 8023AD don't support MBUF_FAST_FREE offload. */ 552b4924c0dSHuisong Li if (internals->mode == BONDING_MODE_8023AD || 553b4924c0dSHuisong Li internals->mode == BONDING_MODE_BROADCAST) 554b4924c0dSHuisong Li internals->tx_offload_capa &= ~RTE_ETH_TX_OFFLOAD_MBUF_FAST_FREE; 555b4924c0dSHuisong Li 5564f840086SLong Wu bonding_eth_dev->data->dev_conf.rx_adv_conf.rss_conf.rss_hf &= 557734ce47fSTomasz Kulasek internals->flow_type_rss_offloads; 558734ce47fSTomasz Kulasek 55915e34522SLong Wu if (member_rte_flow_prepare(internals->member_count, internals) != 0) { 56015e34522SLong Wu RTE_BOND_LOG(ERR, "Failed to prepare new member flows: port=%d", 56115e34522SLong Wu member_port_id); 56249dad902SMatan Azrad return -1; 56349dad902SMatan Azrad } 56449dad902SMatan Azrad 56515e34522SLong Wu /* Add additional MAC addresses to the member */ 5664f840086SLong Wu if (member_add_mac_addresses(bonding_eth_dev, member_port_id) != 0) { 56715e34522SLong Wu RTE_BOND_LOG(ERR, "Failed to add mac address(es) to member %hu", 56815e34522SLong Wu member_port_id); 5699d453d1dSAlex Kiselev return -1; 5709d453d1dSAlex Kiselev } 5719d453d1dSAlex Kiselev 57215e34522SLong Wu internals->member_count++; 5733eb6bdd8SBruce Richardson 5744f840086SLong Wu if (bonding_eth_dev->data->dev_started) { 5754f840086SLong Wu if (member_configure(bonding_eth_dev, member_eth_dev) != 0) { 57615e34522SLong Wu internals->member_count--; 57715e34522SLong Wu RTE_BOND_LOG(ERR, "rte_bond_members_configure: port=%d", 57815e34522SLong Wu member_port_id); 5793eb6bdd8SBruce Richardson return -1; 5803eb6bdd8SBruce Richardson } 5814f840086SLong Wu if (member_start(bonding_eth_dev, member_eth_dev) != 0) { 58215e34522SLong Wu internals->member_count--; 58315e34522SLong Wu RTE_BOND_LOG(ERR, "rte_bond_members_start: port=%d", 58415e34522SLong Wu member_port_id); 585b3eaaf1dSJunjie Wan return -1; 586b3eaaf1dSJunjie Wan } 5873eb6bdd8SBruce Richardson } 5883eb6bdd8SBruce Richardson 58915e34522SLong Wu /* Update all member devices MACs */ 5904f840086SLong Wu mac_address_members_update(bonding_eth_dev); 5913eb6bdd8SBruce Richardson 59215e34522SLong Wu /* 5934f840086SLong Wu * Register link status change callback with bonding device pointer as 5944f840086SLong Wu * argument. 5954f840086SLong Wu */ 5964f840086SLong Wu rte_eth_dev_callback_register(member_port_id, RTE_ETH_EVENT_INTR_LSC, 5974f840086SLong Wu bond_ethdev_lsc_event_callback, &bonding_eth_dev->data->port_id); 5984f840086SLong Wu 5994f840086SLong Wu /* 6004f840086SLong Wu * If bonding device is started then we can add the member to our active 60115e34522SLong Wu * member array. 60215e34522SLong Wu */ 6034f840086SLong Wu if (bonding_eth_dev->data->dev_started) { 60415e34522SLong Wu ret = rte_eth_link_get_nowait(member_port_id, &link_props); 605fc1134c7SIgor Romanov if (ret < 0) { 60615e34522SLong Wu rte_eth_dev_callback_unregister(member_port_id, 607fc1134c7SIgor Romanov RTE_ETH_EVENT_INTR_LSC, 608fc1134c7SIgor Romanov bond_ethdev_lsc_event_callback, 6094f840086SLong Wu &bonding_eth_dev->data->port_id); 61015e34522SLong Wu internals->member_count--; 611fc1134c7SIgor Romanov RTE_BOND_LOG(ERR, 612*f665790aSDavid Marchand "Member (port %u) link get failed: %s", 61315e34522SLong Wu member_port_id, rte_strerror(-ret)); 614fc1134c7SIgor Romanov return -1; 615fc1134c7SIgor Romanov } 6163eb6bdd8SBruce Richardson 617295968d1SFerruh Yigit if (link_props.link_status == RTE_ETH_LINK_UP) { 61815e34522SLong Wu if (internals->active_member_count == 0 && 6198997a10bSEric Kinzie !internals->user_defined_primary_port) 6208997a10bSEric Kinzie bond_ethdev_primary_set(internals, 62115e34522SLong Wu member_port_id); 6223eb6bdd8SBruce Richardson } 6238997a10bSEric Kinzie } 624c771e4efSEric Kinzie 6254f840086SLong Wu /* Add member details to bonding device */ 62615e34522SLong Wu member_eth_dev->data->dev_flags |= RTE_ETH_DEV_BONDING_MEMBER; 627fc1134c7SIgor Romanov 6284f840086SLong Wu member_vlan_filter_set(bonding_port_id, member_port_id); 629c771e4efSEric Kinzie 6303eb6bdd8SBruce Richardson return 0; 6313eb6bdd8SBruce Richardson 6323eb6bdd8SBruce Richardson } 6333eb6bdd8SBruce Richardson 6343eb6bdd8SBruce Richardson int 6354f840086SLong Wu rte_eth_bond_member_add(uint16_t bonding_port_id, uint16_t member_port_id) 6363eb6bdd8SBruce Richardson { 6374f840086SLong Wu struct rte_eth_dev *bonding_eth_dev; 6383eb6bdd8SBruce Richardson struct bond_dev_private *internals; 6393eb6bdd8SBruce Richardson 6403eb6bdd8SBruce Richardson int retval; 6413eb6bdd8SBruce Richardson 6424f840086SLong Wu if (valid_bonding_port_id(bonding_port_id) != 0) 6433eb6bdd8SBruce Richardson return -1; 6443eb6bdd8SBruce Richardson 6454f840086SLong Wu bonding_eth_dev = &rte_eth_devices[bonding_port_id]; 6464f840086SLong Wu internals = bonding_eth_dev->data->dev_private; 6473eb6bdd8SBruce Richardson 64815e34522SLong Wu if (valid_member_port_id(internals, member_port_id) != 0) 649324d6577SChengchang Tang return -1; 650324d6577SChengchang Tang 6513eb6bdd8SBruce Richardson rte_spinlock_lock(&internals->lock); 6523eb6bdd8SBruce Richardson 6534f840086SLong Wu retval = __eth_bond_member_add_lock_free(bonding_port_id, member_port_id); 6543eb6bdd8SBruce Richardson 6553eb6bdd8SBruce Richardson rte_spinlock_unlock(&internals->lock); 6563eb6bdd8SBruce Richardson 6573eb6bdd8SBruce Richardson return retval; 6583eb6bdd8SBruce Richardson } 6593eb6bdd8SBruce Richardson 6603eb6bdd8SBruce Richardson static int 6614f840086SLong Wu __eth_bond_member_remove_lock_free(uint16_t bonding_port_id, 66215e34522SLong Wu uint16_t member_port_id) 6633eb6bdd8SBruce Richardson { 6644f840086SLong Wu struct rte_eth_dev *bonding_eth_dev; 6653eb6bdd8SBruce Richardson struct bond_dev_private *internals; 66615e34522SLong Wu struct rte_eth_dev *member_eth_dev; 66749dad902SMatan Azrad struct rte_flow_error flow_error; 66849dad902SMatan Azrad struct rte_flow *flow; 66915e34522SLong Wu int i, member_idx; 6703eb6bdd8SBruce Richardson 6714f840086SLong Wu bonding_eth_dev = &rte_eth_devices[bonding_port_id]; 6724f840086SLong Wu internals = bonding_eth_dev->data->dev_private; 6733eb6bdd8SBruce Richardson 67415e34522SLong Wu if (valid_member_port_id(internals, member_port_id) < 0) 6754c42498dSTomasz Kulasek return -1; 6764c42498dSTomasz Kulasek 67715e34522SLong Wu /* first remove from active member list */ 67815e34522SLong Wu member_idx = find_member_by_id(internals->active_members, 67915e34522SLong Wu internals->active_member_count, member_port_id); 6803eb6bdd8SBruce Richardson 68115e34522SLong Wu if (member_idx < internals->active_member_count) 6824f840086SLong Wu deactivate_member(bonding_eth_dev, member_port_id); 6833eb6bdd8SBruce Richardson 68415e34522SLong Wu member_idx = -1; 68515e34522SLong Wu /* now find in member list */ 68615e34522SLong Wu for (i = 0; i < internals->member_count; i++) 68715e34522SLong Wu if (internals->members[i].port_id == member_port_id) { 68815e34522SLong Wu member_idx = i; 6893eb6bdd8SBruce Richardson break; 6903eb6bdd8SBruce Richardson } 6913eb6bdd8SBruce Richardson 69215e34522SLong Wu if (member_idx < 0) { 69315e34522SLong Wu RTE_BOND_LOG(ERR, "Could not find member in port list, member count %u", 69415e34522SLong Wu internals->member_count); 6953eb6bdd8SBruce Richardson return -1; 6963eb6bdd8SBruce Richardson } 6973eb6bdd8SBruce Richardson 6984f840086SLong Wu /* Un-register link status change callback with bonding device pointer as 6993eb6bdd8SBruce Richardson * argument*/ 70015e34522SLong Wu rte_eth_dev_callback_unregister(member_port_id, RTE_ETH_EVENT_INTR_LSC, 7013eb6bdd8SBruce Richardson bond_ethdev_lsc_event_callback, 7024f840086SLong Wu &rte_eth_devices[bonding_port_id].data->port_id); 7033eb6bdd8SBruce Richardson 70415e34522SLong Wu /* Restore original MAC address of member device */ 70515e34522SLong Wu rte_eth_dev_default_mac_addr_set(member_port_id, 70615e34522SLong Wu &internals->members[member_idx].persisted_mac_addr); 7073eb6bdd8SBruce Richardson 70815e34522SLong Wu /* remove additional MAC addresses from the member */ 7094f840086SLong Wu member_remove_mac_addresses(bonding_eth_dev, member_port_id); 7109d453d1dSAlex Kiselev 71149dad902SMatan Azrad /* 71215e34522SLong Wu * Remove bond device flows from member device. 71349dad902SMatan Azrad * Note: don't restore flow isolate mode. 71449dad902SMatan Azrad */ 71549dad902SMatan Azrad TAILQ_FOREACH(flow, &internals->flow_list, next) { 71615e34522SLong Wu if (flow->flows[member_idx] != NULL) { 71715e34522SLong Wu rte_flow_destroy(member_port_id, flow->flows[member_idx], 71849dad902SMatan Azrad &flow_error); 71915e34522SLong Wu flow->flows[member_idx] = NULL; 72049dad902SMatan Azrad } 72149dad902SMatan Azrad } 72249dad902SMatan Azrad 723f7755321SLong Wu /* Remove the dedicated queues flow */ 724f7755321SLong Wu if (internals->mode == BONDING_MODE_8023AD && 725f7755321SLong Wu internals->mode4.dedicated_queues.enabled == 1 && 72615e34522SLong Wu internals->mode4.dedicated_queues.flow[member_port_id] != NULL) { 72715e34522SLong Wu rte_flow_destroy(member_port_id, 72815e34522SLong Wu internals->mode4.dedicated_queues.flow[member_port_id], 729f7755321SLong Wu &flow_error); 73015e34522SLong Wu internals->mode4.dedicated_queues.flow[member_port_id] = NULL; 731f7755321SLong Wu } 732f7755321SLong Wu 73315e34522SLong Wu member_eth_dev = &rte_eth_devices[member_port_id]; 73415e34522SLong Wu member_remove(internals, member_eth_dev); 73515e34522SLong Wu member_eth_dev->data->dev_flags &= (~RTE_ETH_DEV_BONDING_MEMBER); 7363eb6bdd8SBruce Richardson 73715e34522SLong Wu /* first member in the active list will be the primary by default, 7383eb6bdd8SBruce Richardson * otherwise use first device in list */ 73915e34522SLong Wu if (internals->current_primary_port == member_port_id) { 74015e34522SLong Wu if (internals->active_member_count > 0) 74115e34522SLong Wu internals->current_primary_port = internals->active_members[0]; 74215e34522SLong Wu else if (internals->member_count > 0) 74315e34522SLong Wu internals->current_primary_port = internals->members[0].port_id; 7443eb6bdd8SBruce Richardson else 7453eb6bdd8SBruce Richardson internals->primary_port = 0; 7464f840086SLong Wu mac_address_members_update(bonding_eth_dev); 7473eb6bdd8SBruce Richardson } 7483eb6bdd8SBruce Richardson 74915e34522SLong Wu if (internals->active_member_count < 1) { 75015e34522SLong Wu /* 7514f840086SLong Wu * if no members are any longer attached to bonding device and MAC is not 7524f840086SLong Wu * user defined then clear MAC of bonding device as it will be reset 75315e34522SLong Wu * when a new member is added. 75415e34522SLong Wu */ 75515e34522SLong Wu if (internals->member_count < 1 && !internals->user_defined_mac) 7564f840086SLong Wu memset(rte_eth_devices[bonding_port_id].data->mac_addrs, 0, 7574f840086SLong Wu sizeof(*rte_eth_devices[bonding_port_id].data->mac_addrs)); 7583eb6bdd8SBruce Richardson } 75915e34522SLong Wu if (internals->member_count == 0) { 7603eb6bdd8SBruce Richardson internals->rx_offload_capa = 0; 7613eb6bdd8SBruce Richardson internals->tx_offload_capa = 0; 762e8b3e1a9SFerruh Yigit internals->rx_queue_offload_capa = 0; 763e8b3e1a9SFerruh Yigit internals->tx_queue_offload_capa = 0; 764295968d1SFerruh Yigit internals->flow_type_rss_offloads = RTE_ETH_RSS_PROTO_MASK; 765734ce47fSTomasz Kulasek internals->reta_size = 0; 7666cfc6a4fSEric Kinzie internals->candidate_max_rx_pktlen = 0; 7676cfc6a4fSEric Kinzie internals->max_rx_pktlen = 0; 7683eb6bdd8SBruce Richardson } 7693eb6bdd8SBruce Richardson return 0; 7703eb6bdd8SBruce Richardson } 7713eb6bdd8SBruce Richardson 7723eb6bdd8SBruce Richardson int 7734f840086SLong Wu rte_eth_bond_member_remove(uint16_t bonding_port_id, uint16_t member_port_id) 7743eb6bdd8SBruce Richardson { 7754f840086SLong Wu struct rte_eth_dev *bonding_eth_dev; 7763eb6bdd8SBruce Richardson struct bond_dev_private *internals; 7773eb6bdd8SBruce Richardson int retval; 7783eb6bdd8SBruce Richardson 7794f840086SLong Wu if (valid_bonding_port_id(bonding_port_id) != 0) 7803eb6bdd8SBruce Richardson return -1; 7813eb6bdd8SBruce Richardson 7824f840086SLong Wu bonding_eth_dev = &rte_eth_devices[bonding_port_id]; 7834f840086SLong Wu internals = bonding_eth_dev->data->dev_private; 7843eb6bdd8SBruce Richardson 7853eb6bdd8SBruce Richardson rte_spinlock_lock(&internals->lock); 7863eb6bdd8SBruce Richardson 7874f840086SLong Wu retval = __eth_bond_member_remove_lock_free(bonding_port_id, member_port_id); 7883eb6bdd8SBruce Richardson 7893eb6bdd8SBruce Richardson rte_spinlock_unlock(&internals->lock); 7903eb6bdd8SBruce Richardson 7913eb6bdd8SBruce Richardson return retval; 7923eb6bdd8SBruce Richardson } 7933eb6bdd8SBruce Richardson 7943eb6bdd8SBruce Richardson int 7954f840086SLong Wu rte_eth_bond_mode_set(uint16_t bonding_port_id, uint8_t mode) 7963eb6bdd8SBruce Richardson { 7974f840086SLong Wu struct rte_eth_dev *bonding_eth_dev; 7981184582bSJacek Piasecki 7994f840086SLong Wu if (valid_bonding_port_id(bonding_port_id) != 0) 8003eb6bdd8SBruce Richardson return -1; 8013eb6bdd8SBruce Richardson 8024f840086SLong Wu bonding_eth_dev = &rte_eth_devices[bonding_port_id]; 8031184582bSJacek Piasecki 8044f840086SLong Wu if (check_for_main_bonding_ethdev(bonding_eth_dev) != 0 && 8051184582bSJacek Piasecki mode == BONDING_MODE_8023AD) 8061184582bSJacek Piasecki return -1; 8071184582bSJacek Piasecki 8084f840086SLong Wu return bond_ethdev_mode_set(bonding_eth_dev, mode); 8093eb6bdd8SBruce Richardson } 8103eb6bdd8SBruce Richardson 8113eb6bdd8SBruce Richardson int 8124f840086SLong Wu rte_eth_bond_mode_get(uint16_t bonding_port_id) 8133eb6bdd8SBruce Richardson { 8143eb6bdd8SBruce Richardson struct bond_dev_private *internals; 8153eb6bdd8SBruce Richardson 8164f840086SLong Wu if (valid_bonding_port_id(bonding_port_id) != 0) 8173eb6bdd8SBruce Richardson return -1; 8183eb6bdd8SBruce Richardson 8194f840086SLong Wu internals = rte_eth_devices[bonding_port_id].data->dev_private; 8203eb6bdd8SBruce Richardson 8213eb6bdd8SBruce Richardson return internals->mode; 8223eb6bdd8SBruce Richardson } 8233eb6bdd8SBruce Richardson 8243eb6bdd8SBruce Richardson int 8254f840086SLong Wu rte_eth_bond_primary_set(uint16_t bonding_port_id, uint16_t member_port_id) 8263eb6bdd8SBruce Richardson { 8273eb6bdd8SBruce Richardson struct bond_dev_private *internals; 8283eb6bdd8SBruce Richardson 8294f840086SLong Wu if (valid_bonding_port_id(bonding_port_id) != 0) 8303eb6bdd8SBruce Richardson return -1; 8313eb6bdd8SBruce Richardson 8324f840086SLong Wu internals = rte_eth_devices[bonding_port_id].data->dev_private; 833466b0aceSTomasz Kulasek 83415e34522SLong Wu if (valid_member_port_id(internals, member_port_id) != 0) 8353eb6bdd8SBruce Richardson return -1; 8363eb6bdd8SBruce Richardson 8373eb6bdd8SBruce Richardson internals->user_defined_primary_port = 1; 83815e34522SLong Wu internals->primary_port = member_port_id; 8393eb6bdd8SBruce Richardson 84015e34522SLong Wu bond_ethdev_primary_set(internals, member_port_id); 8413eb6bdd8SBruce Richardson 8423eb6bdd8SBruce Richardson return 0; 8433eb6bdd8SBruce Richardson } 8443eb6bdd8SBruce Richardson 8453eb6bdd8SBruce Richardson int 8464f840086SLong Wu rte_eth_bond_primary_get(uint16_t bonding_port_id) 8473eb6bdd8SBruce Richardson { 8483eb6bdd8SBruce Richardson struct bond_dev_private *internals; 8493eb6bdd8SBruce Richardson 8504f840086SLong Wu if (valid_bonding_port_id(bonding_port_id) != 0) 8513eb6bdd8SBruce Richardson return -1; 8523eb6bdd8SBruce Richardson 8534f840086SLong Wu internals = rte_eth_devices[bonding_port_id].data->dev_private; 8543eb6bdd8SBruce Richardson 85515e34522SLong Wu if (internals->member_count < 1) 8563eb6bdd8SBruce Richardson return -1; 8573eb6bdd8SBruce Richardson 8583eb6bdd8SBruce Richardson return internals->current_primary_port; 8593eb6bdd8SBruce Richardson } 8603eb6bdd8SBruce Richardson 8613eb6bdd8SBruce Richardson int 8624f840086SLong Wu rte_eth_bond_members_get(uint16_t bonding_port_id, uint16_t members[], 863f8244c63SZhiyong Yang uint16_t len) 8643eb6bdd8SBruce Richardson { 8653eb6bdd8SBruce Richardson struct bond_dev_private *internals; 8661d6cab8aSDavid Marchand uint16_t i; 8673eb6bdd8SBruce Richardson 8684f840086SLong Wu if (valid_bonding_port_id(bonding_port_id) != 0) 8693eb6bdd8SBruce Richardson return -1; 8703eb6bdd8SBruce Richardson 87115e34522SLong Wu if (members == NULL) 8723eb6bdd8SBruce Richardson return -1; 8733eb6bdd8SBruce Richardson 8744f840086SLong Wu internals = rte_eth_devices[bonding_port_id].data->dev_private; 8753eb6bdd8SBruce Richardson 87615e34522SLong Wu if (internals->member_count > len) 8773eb6bdd8SBruce Richardson return -1; 8783eb6bdd8SBruce Richardson 87915e34522SLong Wu for (i = 0; i < internals->member_count; i++) 88015e34522SLong Wu members[i] = internals->members[i].port_id; 8813eb6bdd8SBruce Richardson 88215e34522SLong Wu return internals->member_count; 8833eb6bdd8SBruce Richardson } 8843eb6bdd8SBruce Richardson 8853eb6bdd8SBruce Richardson int 8864f840086SLong Wu rte_eth_bond_active_members_get(uint16_t bonding_port_id, uint16_t members[], 887f8244c63SZhiyong Yang uint16_t len) 8883eb6bdd8SBruce Richardson { 8893eb6bdd8SBruce Richardson struct bond_dev_private *internals; 8903eb6bdd8SBruce Richardson 8914f840086SLong Wu if (valid_bonding_port_id(bonding_port_id) != 0) 8923eb6bdd8SBruce Richardson return -1; 8933eb6bdd8SBruce Richardson 89415e34522SLong Wu if (members == NULL) 8953eb6bdd8SBruce Richardson return -1; 8963eb6bdd8SBruce Richardson 8974f840086SLong Wu internals = rte_eth_devices[bonding_port_id].data->dev_private; 8983eb6bdd8SBruce Richardson 89915e34522SLong Wu if (internals->active_member_count > len) 9003eb6bdd8SBruce Richardson return -1; 9013eb6bdd8SBruce Richardson 90215e34522SLong Wu memcpy(members, internals->active_members, 90315e34522SLong Wu internals->active_member_count * sizeof(internals->active_members[0])); 9043eb6bdd8SBruce Richardson 90515e34522SLong Wu return internals->active_member_count; 9063eb6bdd8SBruce Richardson } 9073eb6bdd8SBruce Richardson 9083eb6bdd8SBruce Richardson int 9094f840086SLong Wu rte_eth_bond_mac_address_set(uint16_t bonding_port_id, 9106d13ea8eSOlivier Matz struct rte_ether_addr *mac_addr) 9113eb6bdd8SBruce Richardson { 9124f840086SLong Wu struct rte_eth_dev *bonding_eth_dev; 9133eb6bdd8SBruce Richardson struct bond_dev_private *internals; 9143eb6bdd8SBruce Richardson 9154f840086SLong Wu if (valid_bonding_port_id(bonding_port_id) != 0) 9163eb6bdd8SBruce Richardson return -1; 9173eb6bdd8SBruce Richardson 9184f840086SLong Wu bonding_eth_dev = &rte_eth_devices[bonding_port_id]; 9194f840086SLong Wu internals = bonding_eth_dev->data->dev_private; 9203eb6bdd8SBruce Richardson 9214f840086SLong Wu /* Set MAC Address of Bonding Device */ 9224f840086SLong Wu if (mac_address_set(bonding_eth_dev, mac_addr)) 9233eb6bdd8SBruce Richardson return -1; 9243eb6bdd8SBruce Richardson 9253eb6bdd8SBruce Richardson internals->user_defined_mac = 1; 9263eb6bdd8SBruce Richardson 92715e34522SLong Wu /* Update all member devices MACs*/ 92815e34522SLong Wu if (internals->member_count > 0) 9294f840086SLong Wu return mac_address_members_update(bonding_eth_dev); 9303eb6bdd8SBruce Richardson 9313eb6bdd8SBruce Richardson return 0; 9323eb6bdd8SBruce Richardson } 9333eb6bdd8SBruce Richardson 9343eb6bdd8SBruce Richardson int 9354f840086SLong Wu rte_eth_bond_mac_address_reset(uint16_t bonding_port_id) 9363eb6bdd8SBruce Richardson { 9374f840086SLong Wu struct rte_eth_dev *bonding_eth_dev; 9383eb6bdd8SBruce Richardson struct bond_dev_private *internals; 9393eb6bdd8SBruce Richardson 9404f840086SLong Wu if (valid_bonding_port_id(bonding_port_id) != 0) 9413eb6bdd8SBruce Richardson return -1; 9423eb6bdd8SBruce Richardson 9434f840086SLong Wu bonding_eth_dev = &rte_eth_devices[bonding_port_id]; 9444f840086SLong Wu internals = bonding_eth_dev->data->dev_private; 9453eb6bdd8SBruce Richardson 9463eb6bdd8SBruce Richardson internals->user_defined_mac = 0; 9473eb6bdd8SBruce Richardson 94815e34522SLong Wu if (internals->member_count > 0) { 94915e34522SLong Wu int member_port; 95015e34522SLong Wu /* Get the primary member location based on the primary port 95115e34522SLong Wu * number as, while member_add(), we will keep the primary 95215e34522SLong Wu * member based on member_count,but not based on the primary port. 953fb6eb1e6SKiran Kumar */ 95415e34522SLong Wu for (member_port = 0; member_port < internals->member_count; 95515e34522SLong Wu member_port++) { 95615e34522SLong Wu if (internals->members[member_port].port_id == 957fb6eb1e6SKiran Kumar internals->primary_port) 958fb6eb1e6SKiran Kumar break; 959fb6eb1e6SKiran Kumar } 960fb6eb1e6SKiran Kumar 9614f840086SLong Wu /* Set MAC Address of Bonding Device */ 9624f840086SLong Wu if (mac_address_set(bonding_eth_dev, 96315e34522SLong Wu &internals->members[member_port].persisted_mac_addr) 9643eb6bdd8SBruce Richardson != 0) { 9654f840086SLong Wu RTE_BOND_LOG(ERR, "Failed to set MAC address on bonding device"); 9663eb6bdd8SBruce Richardson return -1; 9673eb6bdd8SBruce Richardson } 96815e34522SLong Wu /* Update all member devices MAC addresses */ 9694f840086SLong Wu return mac_address_members_update(bonding_eth_dev); 9703eb6bdd8SBruce Richardson } 97115e34522SLong Wu /* No need to update anything as no members present */ 9723eb6bdd8SBruce Richardson return 0; 9733eb6bdd8SBruce Richardson } 9743eb6bdd8SBruce Richardson 9753eb6bdd8SBruce Richardson int 9764f840086SLong Wu rte_eth_bond_xmit_policy_set(uint16_t bonding_port_id, uint8_t policy) 9773eb6bdd8SBruce Richardson { 9783eb6bdd8SBruce Richardson struct bond_dev_private *internals; 9793eb6bdd8SBruce Richardson 9804f840086SLong Wu if (valid_bonding_port_id(bonding_port_id) != 0) 9813eb6bdd8SBruce Richardson return -1; 9823eb6bdd8SBruce Richardson 9834f840086SLong Wu internals = rte_eth_devices[bonding_port_id].data->dev_private; 9843eb6bdd8SBruce Richardson 9853eb6bdd8SBruce Richardson switch (policy) { 9863eb6bdd8SBruce Richardson case BALANCE_XMIT_POLICY_LAYER2: 9873eb6bdd8SBruce Richardson internals->balance_xmit_policy = policy; 988c5224f62SRadu Nicolau internals->burst_xmit_hash = burst_xmit_l2_hash; 9893eb6bdd8SBruce Richardson break; 9903eb6bdd8SBruce Richardson case BALANCE_XMIT_POLICY_LAYER23: 9913eb6bdd8SBruce Richardson internals->balance_xmit_policy = policy; 992c5224f62SRadu Nicolau internals->burst_xmit_hash = burst_xmit_l23_hash; 9933eb6bdd8SBruce Richardson break; 9943eb6bdd8SBruce Richardson case BALANCE_XMIT_POLICY_LAYER34: 9953eb6bdd8SBruce Richardson internals->balance_xmit_policy = policy; 996c5224f62SRadu Nicolau internals->burst_xmit_hash = burst_xmit_l34_hash; 9973eb6bdd8SBruce Richardson break; 9983eb6bdd8SBruce Richardson 9993eb6bdd8SBruce Richardson default: 10003eb6bdd8SBruce Richardson return -1; 10013eb6bdd8SBruce Richardson } 10023eb6bdd8SBruce Richardson return 0; 10033eb6bdd8SBruce Richardson } 10043eb6bdd8SBruce Richardson 10053eb6bdd8SBruce Richardson int 10064f840086SLong Wu rte_eth_bond_xmit_policy_get(uint16_t bonding_port_id) 10073eb6bdd8SBruce Richardson { 10083eb6bdd8SBruce Richardson struct bond_dev_private *internals; 10093eb6bdd8SBruce Richardson 10104f840086SLong Wu if (valid_bonding_port_id(bonding_port_id) != 0) 10113eb6bdd8SBruce Richardson return -1; 10123eb6bdd8SBruce Richardson 10134f840086SLong Wu internals = rte_eth_devices[bonding_port_id].data->dev_private; 10143eb6bdd8SBruce Richardson 10153eb6bdd8SBruce Richardson return internals->balance_xmit_policy; 10163eb6bdd8SBruce Richardson } 10173eb6bdd8SBruce Richardson 10183eb6bdd8SBruce Richardson int 10194f840086SLong Wu rte_eth_bond_link_monitoring_set(uint16_t bonding_port_id, uint32_t internal_ms) 10203eb6bdd8SBruce Richardson { 10213eb6bdd8SBruce Richardson struct bond_dev_private *internals; 10223eb6bdd8SBruce Richardson 10234f840086SLong Wu if (valid_bonding_port_id(bonding_port_id) != 0) 10243eb6bdd8SBruce Richardson return -1; 10253eb6bdd8SBruce Richardson 10264f840086SLong Wu internals = rte_eth_devices[bonding_port_id].data->dev_private; 10273eb6bdd8SBruce Richardson internals->link_status_polling_interval_ms = internal_ms; 10283eb6bdd8SBruce Richardson 10293eb6bdd8SBruce Richardson return 0; 10303eb6bdd8SBruce Richardson } 10313eb6bdd8SBruce Richardson 10323eb6bdd8SBruce Richardson int 10334f840086SLong Wu rte_eth_bond_link_monitoring_get(uint16_t bonding_port_id) 10343eb6bdd8SBruce Richardson { 10353eb6bdd8SBruce Richardson struct bond_dev_private *internals; 10363eb6bdd8SBruce Richardson 10374f840086SLong Wu if (valid_bonding_port_id(bonding_port_id) != 0) 10383eb6bdd8SBruce Richardson return -1; 10393eb6bdd8SBruce Richardson 10404f840086SLong Wu internals = rte_eth_devices[bonding_port_id].data->dev_private; 10413eb6bdd8SBruce Richardson 10423eb6bdd8SBruce Richardson return internals->link_status_polling_interval_ms; 10433eb6bdd8SBruce Richardson } 10443eb6bdd8SBruce Richardson 10453eb6bdd8SBruce Richardson int 10464f840086SLong Wu rte_eth_bond_link_down_prop_delay_set(uint16_t bonding_port_id, 1047f8244c63SZhiyong Yang uint32_t delay_ms) 10483eb6bdd8SBruce Richardson 10493eb6bdd8SBruce Richardson { 10503eb6bdd8SBruce Richardson struct bond_dev_private *internals; 10513eb6bdd8SBruce Richardson 10524f840086SLong Wu if (valid_bonding_port_id(bonding_port_id) != 0) 10533eb6bdd8SBruce Richardson return -1; 10543eb6bdd8SBruce Richardson 10554f840086SLong Wu internals = rte_eth_devices[bonding_port_id].data->dev_private; 10563eb6bdd8SBruce Richardson internals->link_down_delay_ms = delay_ms; 10573eb6bdd8SBruce Richardson 10583eb6bdd8SBruce Richardson return 0; 10593eb6bdd8SBruce Richardson } 10603eb6bdd8SBruce Richardson 10613eb6bdd8SBruce Richardson int 10624f840086SLong Wu rte_eth_bond_link_down_prop_delay_get(uint16_t bonding_port_id) 10633eb6bdd8SBruce Richardson { 10643eb6bdd8SBruce Richardson struct bond_dev_private *internals; 10653eb6bdd8SBruce Richardson 10664f840086SLong Wu if (valid_bonding_port_id(bonding_port_id) != 0) 10673eb6bdd8SBruce Richardson return -1; 10683eb6bdd8SBruce Richardson 10694f840086SLong Wu internals = rte_eth_devices[bonding_port_id].data->dev_private; 10703eb6bdd8SBruce Richardson 10713eb6bdd8SBruce Richardson return internals->link_down_delay_ms; 10723eb6bdd8SBruce Richardson } 10733eb6bdd8SBruce Richardson 10743eb6bdd8SBruce Richardson int 10754f840086SLong Wu rte_eth_bond_link_up_prop_delay_set(uint16_t bonding_port_id, uint32_t delay_ms) 10763eb6bdd8SBruce Richardson 10773eb6bdd8SBruce Richardson { 10783eb6bdd8SBruce Richardson struct bond_dev_private *internals; 10793eb6bdd8SBruce Richardson 10804f840086SLong Wu if (valid_bonding_port_id(bonding_port_id) != 0) 10813eb6bdd8SBruce Richardson return -1; 10823eb6bdd8SBruce Richardson 10834f840086SLong Wu internals = rte_eth_devices[bonding_port_id].data->dev_private; 10843eb6bdd8SBruce Richardson internals->link_up_delay_ms = delay_ms; 10853eb6bdd8SBruce Richardson 10863eb6bdd8SBruce Richardson return 0; 10873eb6bdd8SBruce Richardson } 10883eb6bdd8SBruce Richardson 10893eb6bdd8SBruce Richardson int 10904f840086SLong Wu rte_eth_bond_link_up_prop_delay_get(uint16_t bonding_port_id) 10913eb6bdd8SBruce Richardson { 10923eb6bdd8SBruce Richardson struct bond_dev_private *internals; 10933eb6bdd8SBruce Richardson 10944f840086SLong Wu if (valid_bonding_port_id(bonding_port_id) != 0) 10953eb6bdd8SBruce Richardson return -1; 10963eb6bdd8SBruce Richardson 10974f840086SLong Wu internals = rte_eth_devices[bonding_port_id].data->dev_private; 10983eb6bdd8SBruce Richardson 10993eb6bdd8SBruce Richardson return internals->link_up_delay_ms; 11003eb6bdd8SBruce Richardson } 1101