1009c327cSOlivier Matz /* SPDX-License-Identifier: BSD-3-Clause 2ebea83f8SGaetan Rivet * Copyright 2017 6WIND S.A. 35feecc57SShahaf Shuler * Copyright 2017 Mellanox Technologies, Ltd 4ebea83f8SGaetan Rivet */ 5ebea83f8SGaetan Rivet 6ebea83f8SGaetan Rivet #include <unistd.h> 7ebea83f8SGaetan Rivet 8b737a1eeSGaetan Rivet #include <rte_flow.h> 9b737a1eeSGaetan Rivet #include <rte_flow_driver.h> 109dda3e33SMatan Azrad #include <rte_cycles.h> 11b737a1eeSGaetan Rivet 12ebea83f8SGaetan Rivet #include "failsafe_private.h" 13ebea83f8SGaetan Rivet 14b737a1eeSGaetan Rivet /** Print a message out of a flow error. */ 15b737a1eeSGaetan Rivet static int 16b737a1eeSGaetan Rivet fs_flow_complain(struct rte_flow_error *error) 17b737a1eeSGaetan Rivet { 18b737a1eeSGaetan Rivet static const char *const errstrlist[] = { 19b737a1eeSGaetan Rivet [RTE_FLOW_ERROR_TYPE_NONE] = "no error", 20b737a1eeSGaetan Rivet [RTE_FLOW_ERROR_TYPE_UNSPECIFIED] = "cause unspecified", 21b737a1eeSGaetan Rivet [RTE_FLOW_ERROR_TYPE_HANDLE] = "flow rule (handle)", 22b737a1eeSGaetan Rivet [RTE_FLOW_ERROR_TYPE_ATTR_GROUP] = "group field", 23b737a1eeSGaetan Rivet [RTE_FLOW_ERROR_TYPE_ATTR_PRIORITY] = "priority field", 24b737a1eeSGaetan Rivet [RTE_FLOW_ERROR_TYPE_ATTR_INGRESS] = "ingress field", 25b737a1eeSGaetan Rivet [RTE_FLOW_ERROR_TYPE_ATTR_EGRESS] = "egress field", 26b737a1eeSGaetan Rivet [RTE_FLOW_ERROR_TYPE_ATTR] = "attributes structure", 27b737a1eeSGaetan Rivet [RTE_FLOW_ERROR_TYPE_ITEM_NUM] = "pattern length", 28b737a1eeSGaetan Rivet [RTE_FLOW_ERROR_TYPE_ITEM] = "specific pattern item", 29b737a1eeSGaetan Rivet [RTE_FLOW_ERROR_TYPE_ACTION_NUM] = "number of actions", 30b737a1eeSGaetan Rivet [RTE_FLOW_ERROR_TYPE_ACTION] = "specific action", 31b737a1eeSGaetan Rivet }; 32b737a1eeSGaetan Rivet const char *errstr; 33b737a1eeSGaetan Rivet char buf[32]; 34b737a1eeSGaetan Rivet int err = rte_errno; 35b737a1eeSGaetan Rivet 36b737a1eeSGaetan Rivet if ((unsigned int)error->type >= RTE_DIM(errstrlist) || 37b737a1eeSGaetan Rivet !errstrlist[error->type]) 38b737a1eeSGaetan Rivet errstr = "unknown type"; 39b737a1eeSGaetan Rivet else 40b737a1eeSGaetan Rivet errstr = errstrlist[error->type]; 41b737a1eeSGaetan Rivet ERROR("Caught error type %d (%s): %s%s\n", 42b737a1eeSGaetan Rivet error->type, errstr, 43b737a1eeSGaetan Rivet error->cause ? (snprintf(buf, sizeof(buf), "cause: %p, ", 44b737a1eeSGaetan Rivet error->cause), buf) : "", 45b737a1eeSGaetan Rivet error->message ? error->message : "(no stated reason)"); 46b737a1eeSGaetan Rivet return -err; 47b737a1eeSGaetan Rivet } 48b737a1eeSGaetan Rivet 49ebea83f8SGaetan Rivet static int 502cc52cd7SGaetan Rivet eth_dev_flow_isolate_set(struct rte_eth_dev *dev, 512cc52cd7SGaetan Rivet struct sub_device *sdev) 522cc52cd7SGaetan Rivet { 532cc52cd7SGaetan Rivet struct rte_flow_error ferror; 542cc52cd7SGaetan Rivet int ret; 552cc52cd7SGaetan Rivet 562cc52cd7SGaetan Rivet if (!PRIV(dev)->flow_isolated) { 572cc52cd7SGaetan Rivet DEBUG("Flow isolation already disabled"); 582cc52cd7SGaetan Rivet } else { 592cc52cd7SGaetan Rivet DEBUG("Enabling flow isolation"); 602cc52cd7SGaetan Rivet ret = rte_flow_isolate(PORT_ID(sdev), 612cc52cd7SGaetan Rivet PRIV(dev)->flow_isolated, 622cc52cd7SGaetan Rivet &ferror); 632cc52cd7SGaetan Rivet if (ret) { 642cc52cd7SGaetan Rivet fs_flow_complain(&ferror); 652cc52cd7SGaetan Rivet return ret; 662cc52cd7SGaetan Rivet } 672cc52cd7SGaetan Rivet } 682cc52cd7SGaetan Rivet return 0; 692cc52cd7SGaetan Rivet } 702cc52cd7SGaetan Rivet 712cc52cd7SGaetan Rivet static int 72ebea83f8SGaetan Rivet fs_eth_dev_conf_apply(struct rte_eth_dev *dev, 73ebea83f8SGaetan Rivet struct sub_device *sdev) 74ebea83f8SGaetan Rivet { 75ebea83f8SGaetan Rivet struct rte_eth_dev *edev; 76ebea83f8SGaetan Rivet struct rte_vlan_filter_conf *vfc1; 77ebea83f8SGaetan Rivet struct rte_vlan_filter_conf *vfc2; 78b737a1eeSGaetan Rivet struct rte_flow *flow; 79b737a1eeSGaetan Rivet struct rte_flow_error ferror; 80ebea83f8SGaetan Rivet uint32_t i; 81ebea83f8SGaetan Rivet int ret; 82ebea83f8SGaetan Rivet 83ebea83f8SGaetan Rivet edev = ETH(sdev); 84ebea83f8SGaetan Rivet /* RX queue setup */ 85ebea83f8SGaetan Rivet for (i = 0; i < dev->data->nb_rx_queues; i++) { 86ebea83f8SGaetan Rivet struct rxq *rxq; 87ebea83f8SGaetan Rivet 88ebea83f8SGaetan Rivet rxq = dev->data->rx_queues[i]; 89ebea83f8SGaetan Rivet ret = rte_eth_rx_queue_setup(PORT_ID(sdev), i, 90ebea83f8SGaetan Rivet rxq->info.nb_desc, rxq->socket_id, 91ebea83f8SGaetan Rivet &rxq->info.conf, rxq->info.mp); 92ebea83f8SGaetan Rivet if (ret) { 93ebea83f8SGaetan Rivet ERROR("rx_queue_setup failed"); 94ebea83f8SGaetan Rivet return ret; 95ebea83f8SGaetan Rivet } 96ebea83f8SGaetan Rivet } 97ebea83f8SGaetan Rivet /* TX queue setup */ 98ebea83f8SGaetan Rivet for (i = 0; i < dev->data->nb_tx_queues; i++) { 99ebea83f8SGaetan Rivet struct txq *txq; 100ebea83f8SGaetan Rivet 101ebea83f8SGaetan Rivet txq = dev->data->tx_queues[i]; 102ebea83f8SGaetan Rivet ret = rte_eth_tx_queue_setup(PORT_ID(sdev), i, 103ebea83f8SGaetan Rivet txq->info.nb_desc, txq->socket_id, 104ebea83f8SGaetan Rivet &txq->info.conf); 105ebea83f8SGaetan Rivet if (ret) { 106ebea83f8SGaetan Rivet ERROR("tx_queue_setup failed"); 107ebea83f8SGaetan Rivet return ret; 108ebea83f8SGaetan Rivet } 109ebea83f8SGaetan Rivet } 110ebea83f8SGaetan Rivet /* dev_link.link_status */ 111ebea83f8SGaetan Rivet if (dev->data->dev_link.link_status != 112ebea83f8SGaetan Rivet edev->data->dev_link.link_status) { 113ebea83f8SGaetan Rivet DEBUG("Configuring link_status"); 114ebea83f8SGaetan Rivet if (dev->data->dev_link.link_status) 115ebea83f8SGaetan Rivet ret = rte_eth_dev_set_link_up(PORT_ID(sdev)); 116ebea83f8SGaetan Rivet else 117ebea83f8SGaetan Rivet ret = rte_eth_dev_set_link_down(PORT_ID(sdev)); 118ebea83f8SGaetan Rivet if (ret) { 119ebea83f8SGaetan Rivet ERROR("Failed to apply link_status"); 120ebea83f8SGaetan Rivet return ret; 121ebea83f8SGaetan Rivet } 122ebea83f8SGaetan Rivet } else { 123ebea83f8SGaetan Rivet DEBUG("link_status already set"); 124ebea83f8SGaetan Rivet } 125ebea83f8SGaetan Rivet /* promiscuous */ 126ebea83f8SGaetan Rivet if (dev->data->promiscuous != edev->data->promiscuous) { 127ebea83f8SGaetan Rivet DEBUG("Configuring promiscuous"); 128ebea83f8SGaetan Rivet if (dev->data->promiscuous) 129ebea83f8SGaetan Rivet rte_eth_promiscuous_enable(PORT_ID(sdev)); 130ebea83f8SGaetan Rivet else 131ebea83f8SGaetan Rivet rte_eth_promiscuous_disable(PORT_ID(sdev)); 132ebea83f8SGaetan Rivet } else { 133ebea83f8SGaetan Rivet DEBUG("promiscuous already set"); 134ebea83f8SGaetan Rivet } 135ebea83f8SGaetan Rivet /* all_multicast */ 136ebea83f8SGaetan Rivet if (dev->data->all_multicast != edev->data->all_multicast) { 137ebea83f8SGaetan Rivet DEBUG("Configuring all_multicast"); 138ebea83f8SGaetan Rivet if (dev->data->all_multicast) 139ebea83f8SGaetan Rivet rte_eth_allmulticast_enable(PORT_ID(sdev)); 140ebea83f8SGaetan Rivet else 141ebea83f8SGaetan Rivet rte_eth_allmulticast_disable(PORT_ID(sdev)); 142ebea83f8SGaetan Rivet } else { 143ebea83f8SGaetan Rivet DEBUG("all_multicast already set"); 144ebea83f8SGaetan Rivet } 145ebea83f8SGaetan Rivet /* MTU */ 146ebea83f8SGaetan Rivet if (dev->data->mtu != edev->data->mtu) { 147ebea83f8SGaetan Rivet DEBUG("Configuring MTU"); 148ebea83f8SGaetan Rivet ret = rte_eth_dev_set_mtu(PORT_ID(sdev), dev->data->mtu); 149ebea83f8SGaetan Rivet if (ret) { 150ebea83f8SGaetan Rivet ERROR("Failed to apply MTU"); 151ebea83f8SGaetan Rivet return ret; 152ebea83f8SGaetan Rivet } 153ebea83f8SGaetan Rivet } else { 154ebea83f8SGaetan Rivet DEBUG("MTU already set"); 155ebea83f8SGaetan Rivet } 156ebea83f8SGaetan Rivet /* default MAC */ 157ebea83f8SGaetan Rivet DEBUG("Configuring default MAC address"); 158ebea83f8SGaetan Rivet ret = rte_eth_dev_default_mac_addr_set(PORT_ID(sdev), 159ebea83f8SGaetan Rivet &dev->data->mac_addrs[0]); 160ebea83f8SGaetan Rivet if (ret) { 161ebea83f8SGaetan Rivet ERROR("Setting default MAC address failed"); 162ebea83f8SGaetan Rivet return ret; 163ebea83f8SGaetan Rivet } 164ebea83f8SGaetan Rivet /* additional MAC */ 165ebea83f8SGaetan Rivet if (PRIV(dev)->nb_mac_addr > 1) 166ebea83f8SGaetan Rivet DEBUG("Configure additional MAC address%s", 167ebea83f8SGaetan Rivet (PRIV(dev)->nb_mac_addr > 2 ? "es" : "")); 168ebea83f8SGaetan Rivet for (i = 1; i < PRIV(dev)->nb_mac_addr; i++) { 169ebea83f8SGaetan Rivet struct ether_addr *ea; 170ebea83f8SGaetan Rivet 171ebea83f8SGaetan Rivet ea = &dev->data->mac_addrs[i]; 172ebea83f8SGaetan Rivet ret = rte_eth_dev_mac_addr_add(PORT_ID(sdev), ea, 173ebea83f8SGaetan Rivet PRIV(dev)->mac_addr_pool[i]); 174ebea83f8SGaetan Rivet if (ret) { 175ebea83f8SGaetan Rivet char ea_fmt[ETHER_ADDR_FMT_SIZE]; 176ebea83f8SGaetan Rivet 177ebea83f8SGaetan Rivet ether_format_addr(ea_fmt, ETHER_ADDR_FMT_SIZE, ea); 178ebea83f8SGaetan Rivet ERROR("Adding MAC address %s failed", ea_fmt); 179ae7cb246SMatan Azrad return ret; 180ebea83f8SGaetan Rivet } 181ebea83f8SGaetan Rivet } 182901efc0dSEvgeny Im /* 183901efc0dSEvgeny Im * Propagate multicast MAC addresses to sub-devices, 184901efc0dSEvgeny Im * if non zero number of addresses is set. 185901efc0dSEvgeny Im * The condition is required to avoid breakage of failsafe 186901efc0dSEvgeny Im * for sub-devices which do not support the operation 187901efc0dSEvgeny Im * if the feature is really not used. 188901efc0dSEvgeny Im */ 189901efc0dSEvgeny Im if (PRIV(dev)->nb_mcast_addr > 0) { 190901efc0dSEvgeny Im DEBUG("Configuring multicast MAC addresses"); 191901efc0dSEvgeny Im ret = rte_eth_dev_set_mc_addr_list(PORT_ID(sdev), 192901efc0dSEvgeny Im PRIV(dev)->mcast_addrs, 193901efc0dSEvgeny Im PRIV(dev)->nb_mcast_addr); 194901efc0dSEvgeny Im if (ret) { 195901efc0dSEvgeny Im ERROR("Failed to apply multicast MAC addresses"); 196901efc0dSEvgeny Im return ret; 197901efc0dSEvgeny Im } 198901efc0dSEvgeny Im } 199ebea83f8SGaetan Rivet /* VLAN filter */ 200ebea83f8SGaetan Rivet vfc1 = &dev->data->vlan_filter_conf; 201ebea83f8SGaetan Rivet vfc2 = &edev->data->vlan_filter_conf; 202ebea83f8SGaetan Rivet if (memcmp(vfc1, vfc2, sizeof(struct rte_vlan_filter_conf))) { 203ebea83f8SGaetan Rivet uint64_t vbit; 204ebea83f8SGaetan Rivet uint64_t ids; 205ebea83f8SGaetan Rivet size_t i; 206ebea83f8SGaetan Rivet uint16_t vlan_id; 207ebea83f8SGaetan Rivet 208ebea83f8SGaetan Rivet DEBUG("Configuring VLAN filter"); 209ebea83f8SGaetan Rivet for (i = 0; i < RTE_DIM(vfc1->ids); i++) { 210ebea83f8SGaetan Rivet if (vfc1->ids[i] == 0) 211ebea83f8SGaetan Rivet continue; 212ebea83f8SGaetan Rivet ids = vfc1->ids[i]; 213ebea83f8SGaetan Rivet while (ids) { 214ebea83f8SGaetan Rivet vlan_id = 64 * i; 215ebea83f8SGaetan Rivet /* count trailing zeroes */ 216ebea83f8SGaetan Rivet vbit = ~ids & (ids - 1); 217ebea83f8SGaetan Rivet /* clear least significant bit set */ 218ebea83f8SGaetan Rivet ids ^= (ids ^ (ids - 1)) ^ vbit; 219ebea83f8SGaetan Rivet for (; vbit; vlan_id++) 220ebea83f8SGaetan Rivet vbit >>= 1; 221ebea83f8SGaetan Rivet ret = rte_eth_dev_vlan_filter( 222ebea83f8SGaetan Rivet PORT_ID(sdev), vlan_id, 1); 223ebea83f8SGaetan Rivet if (ret) { 224ebea83f8SGaetan Rivet ERROR("Failed to apply VLAN filter %hu", 225ebea83f8SGaetan Rivet vlan_id); 226ebea83f8SGaetan Rivet return ret; 227ebea83f8SGaetan Rivet } 228ebea83f8SGaetan Rivet } 229ebea83f8SGaetan Rivet } 230ebea83f8SGaetan Rivet } else { 231ebea83f8SGaetan Rivet DEBUG("VLAN filter already set"); 232ebea83f8SGaetan Rivet } 233b737a1eeSGaetan Rivet /* rte_flow */ 234b737a1eeSGaetan Rivet if (TAILQ_EMPTY(&PRIV(dev)->flow_list)) { 235b737a1eeSGaetan Rivet DEBUG("rte_flow already set"); 236b737a1eeSGaetan Rivet } else { 237b737a1eeSGaetan Rivet DEBUG("Resetting rte_flow configuration"); 238b737a1eeSGaetan Rivet ret = rte_flow_flush(PORT_ID(sdev), &ferror); 239b737a1eeSGaetan Rivet if (ret) { 240b737a1eeSGaetan Rivet fs_flow_complain(&ferror); 241b737a1eeSGaetan Rivet return ret; 242b737a1eeSGaetan Rivet } 243b737a1eeSGaetan Rivet i = 0; 244b737a1eeSGaetan Rivet rte_errno = 0; 245b737a1eeSGaetan Rivet DEBUG("Configuring rte_flow"); 246b737a1eeSGaetan Rivet TAILQ_FOREACH(flow, &PRIV(dev)->flow_list, next) { 247b737a1eeSGaetan Rivet DEBUG("Creating flow #%" PRIu32, i++); 248b737a1eeSGaetan Rivet flow->flows[SUB_ID(sdev)] = 249b737a1eeSGaetan Rivet rte_flow_create(PORT_ID(sdev), 250*33fcf207SAdrien Mazarguil flow->rule.attr, 251*33fcf207SAdrien Mazarguil flow->rule.pattern, 252*33fcf207SAdrien Mazarguil flow->rule.actions, 253b737a1eeSGaetan Rivet &ferror); 254b737a1eeSGaetan Rivet ret = rte_errno; 255b737a1eeSGaetan Rivet if (ret) 256b737a1eeSGaetan Rivet break; 257b737a1eeSGaetan Rivet } 258b737a1eeSGaetan Rivet if (ret) { 259b737a1eeSGaetan Rivet fs_flow_complain(&ferror); 260b737a1eeSGaetan Rivet return ret; 261b737a1eeSGaetan Rivet } 262b737a1eeSGaetan Rivet } 263ebea83f8SGaetan Rivet return 0; 264ebea83f8SGaetan Rivet } 265ebea83f8SGaetan Rivet 266598fb8aeSGaetan Rivet static void 267598fb8aeSGaetan Rivet fs_dev_remove(struct sub_device *sdev) 268598fb8aeSGaetan Rivet { 269598fb8aeSGaetan Rivet int ret; 270598fb8aeSGaetan Rivet 271598fb8aeSGaetan Rivet if (sdev == NULL) 272598fb8aeSGaetan Rivet return; 273598fb8aeSGaetan Rivet switch (sdev->state) { 274598fb8aeSGaetan Rivet case DEV_STARTED: 275f234e5bdSMoti Haimovsky failsafe_rx_intr_uninstall_subdevice(sdev); 276598fb8aeSGaetan Rivet rte_eth_dev_stop(PORT_ID(sdev)); 277598fb8aeSGaetan Rivet sdev->state = DEV_ACTIVE; 278598fb8aeSGaetan Rivet /* fallthrough */ 279598fb8aeSGaetan Rivet case DEV_ACTIVE: 2800545c580SMatan Azrad failsafe_eth_dev_unregister_callbacks(sdev); 281598fb8aeSGaetan Rivet rte_eth_dev_close(PORT_ID(sdev)); 282598fb8aeSGaetan Rivet sdev->state = DEV_PROBED; 283598fb8aeSGaetan Rivet /* fallthrough */ 284598fb8aeSGaetan Rivet case DEV_PROBED: 285911462ebSThomas Monjalon ret = rte_dev_remove(sdev->dev); 286598fb8aeSGaetan Rivet if (ret) { 287598fb8aeSGaetan Rivet ERROR("Bus detach failed for sub_device %u", 288598fb8aeSGaetan Rivet SUB_ID(sdev)); 289598fb8aeSGaetan Rivet } else { 290fac0ae54SMatan Azrad rte_eth_dev_release_port(ETH(sdev)); 291598fb8aeSGaetan Rivet } 292598fb8aeSGaetan Rivet sdev->state = DEV_PARSED; 293598fb8aeSGaetan Rivet /* fallthrough */ 294598fb8aeSGaetan Rivet case DEV_PARSED: 295598fb8aeSGaetan Rivet case DEV_UNDEFINED: 296598fb8aeSGaetan Rivet sdev->state = DEV_UNDEFINED; 297598fb8aeSGaetan Rivet /* the end */ 298598fb8aeSGaetan Rivet break; 299598fb8aeSGaetan Rivet } 30082bae1eaSMatan Azrad sdev->remove = 0; 301598fb8aeSGaetan Rivet failsafe_hotplug_alarm_install(sdev->fs_dev); 302598fb8aeSGaetan Rivet } 303598fb8aeSGaetan Rivet 3046265ab51SMatan Azrad static void 3056265ab51SMatan Azrad fs_dev_stats_save(struct sub_device *sdev) 3066265ab51SMatan Azrad { 307321809bbSMatan Azrad struct rte_eth_stats stats; 308321809bbSMatan Azrad int err; 309321809bbSMatan Azrad 310321809bbSMatan Azrad /* Attempt to read current stats. */ 311321809bbSMatan Azrad err = rte_eth_stats_get(PORT_ID(sdev), &stats); 3129dda3e33SMatan Azrad if (err) { 3139dda3e33SMatan Azrad uint64_t timestamp = sdev->stats_snapshot.timestamp; 3149dda3e33SMatan Azrad 3159dda3e33SMatan Azrad WARN("Could not access latest statistics from sub-device %d.\n", 3169dda3e33SMatan Azrad SUB_ID(sdev)); 3179dda3e33SMatan Azrad if (timestamp != 0) 3189dda3e33SMatan Azrad WARN("Using latest snapshot taken before %"PRIu64" seconds.\n", 3199dda3e33SMatan Azrad (rte_rdtsc() - timestamp) / rte_get_tsc_hz()); 3209dda3e33SMatan Azrad } 3216265ab51SMatan Azrad failsafe_stats_increment(&PRIV(sdev->fs_dev)->stats_accumulator, 3229dda3e33SMatan Azrad err ? &sdev->stats_snapshot.stats : &stats); 3239dda3e33SMatan Azrad memset(&sdev->stats_snapshot, 0, sizeof(sdev->stats_snapshot)); 3246265ab51SMatan Azrad } 3256265ab51SMatan Azrad 326598fb8aeSGaetan Rivet static inline int 327598fb8aeSGaetan Rivet fs_rxtx_clean(struct sub_device *sdev) 328598fb8aeSGaetan Rivet { 329598fb8aeSGaetan Rivet uint16_t i; 330598fb8aeSGaetan Rivet 331598fb8aeSGaetan Rivet for (i = 0; i < ETH(sdev)->data->nb_rx_queues; i++) 332598fb8aeSGaetan Rivet if (FS_ATOMIC_RX(sdev, i)) 333598fb8aeSGaetan Rivet return 0; 334598fb8aeSGaetan Rivet for (i = 0; i < ETH(sdev)->data->nb_tx_queues; i++) 335598fb8aeSGaetan Rivet if (FS_ATOMIC_TX(sdev, i)) 336598fb8aeSGaetan Rivet return 0; 337598fb8aeSGaetan Rivet return 1; 338598fb8aeSGaetan Rivet } 339598fb8aeSGaetan Rivet 340598fb8aeSGaetan Rivet void 3410545c580SMatan Azrad failsafe_eth_dev_unregister_callbacks(struct sub_device *sdev) 3420545c580SMatan Azrad { 3430545c580SMatan Azrad int ret; 3440545c580SMatan Azrad 3450545c580SMatan Azrad if (sdev == NULL) 3460545c580SMatan Azrad return; 3470545c580SMatan Azrad if (sdev->rmv_callback) { 3480545c580SMatan Azrad ret = rte_eth_dev_callback_unregister(PORT_ID(sdev), 3490545c580SMatan Azrad RTE_ETH_EVENT_INTR_RMV, 3500545c580SMatan Azrad failsafe_eth_rmv_event_callback, 3510545c580SMatan Azrad sdev); 3520545c580SMatan Azrad if (ret) 3530545c580SMatan Azrad WARN("Failed to unregister RMV callback for sub_device" 3540545c580SMatan Azrad " %d", SUB_ID(sdev)); 3550545c580SMatan Azrad sdev->rmv_callback = 0; 3560545c580SMatan Azrad } 3570545c580SMatan Azrad if (sdev->lsc_callback) { 3580545c580SMatan Azrad ret = rte_eth_dev_callback_unregister(PORT_ID(sdev), 3590545c580SMatan Azrad RTE_ETH_EVENT_INTR_LSC, 3600545c580SMatan Azrad failsafe_eth_lsc_event_callback, 3610545c580SMatan Azrad sdev); 3620545c580SMatan Azrad if (ret) 3630545c580SMatan Azrad WARN("Failed to unregister LSC callback for sub_device" 3640545c580SMatan Azrad " %d", SUB_ID(sdev)); 3650545c580SMatan Azrad sdev->lsc_callback = 0; 3660545c580SMatan Azrad } 3670545c580SMatan Azrad } 3680545c580SMatan Azrad 3690545c580SMatan Azrad void 370598fb8aeSGaetan Rivet failsafe_dev_remove(struct rte_eth_dev *dev) 371598fb8aeSGaetan Rivet { 372598fb8aeSGaetan Rivet struct sub_device *sdev; 373598fb8aeSGaetan Rivet uint8_t i; 374598fb8aeSGaetan Rivet 375598fb8aeSGaetan Rivet FOREACH_SUBDEV_STATE(sdev, i, dev, DEV_ACTIVE) 3766265ab51SMatan Azrad if (sdev->remove && fs_rxtx_clean(sdev)) { 377655fcd68SMatan Azrad if (fs_lock(dev, 1) != 0) 378655fcd68SMatan Azrad return; 3796265ab51SMatan Azrad fs_dev_stats_save(sdev); 380598fb8aeSGaetan Rivet fs_dev_remove(sdev); 381655fcd68SMatan Azrad fs_unlock(dev, 1); 382598fb8aeSGaetan Rivet } 3836265ab51SMatan Azrad } 384598fb8aeSGaetan Rivet 3853db7001eSIan Dolzhansky static int 3863db7001eSIan Dolzhansky failsafe_eth_dev_rx_queues_sync(struct rte_eth_dev *dev) 3873db7001eSIan Dolzhansky { 3883db7001eSIan Dolzhansky struct rxq *rxq; 3893db7001eSIan Dolzhansky int ret; 3903db7001eSIan Dolzhansky uint16_t i; 3913db7001eSIan Dolzhansky 3923db7001eSIan Dolzhansky for (i = 0; i < dev->data->nb_rx_queues; i++) { 3933db7001eSIan Dolzhansky rxq = dev->data->rx_queues[i]; 3943db7001eSIan Dolzhansky 3953db7001eSIan Dolzhansky if (rxq->info.conf.rx_deferred_start && 3963db7001eSIan Dolzhansky dev->data->rx_queue_state[i] == 3973db7001eSIan Dolzhansky RTE_ETH_QUEUE_STATE_STARTED) { 3983db7001eSIan Dolzhansky /* 3993db7001eSIan Dolzhansky * The subdevice Rx queue does not launch on device 4003db7001eSIan Dolzhansky * start if deferred start flag is set. It needs to be 4013db7001eSIan Dolzhansky * started manually in case an appropriate failsafe Rx 4023db7001eSIan Dolzhansky * queue has been started earlier. 4033db7001eSIan Dolzhansky */ 4043db7001eSIan Dolzhansky ret = dev->dev_ops->rx_queue_start(dev, i); 4053db7001eSIan Dolzhansky if (ret) { 4063db7001eSIan Dolzhansky ERROR("Could not synchronize Rx queue %d", i); 4073db7001eSIan Dolzhansky return ret; 4083db7001eSIan Dolzhansky } 4093db7001eSIan Dolzhansky } else if (dev->data->rx_queue_state[i] == 4103db7001eSIan Dolzhansky RTE_ETH_QUEUE_STATE_STOPPED) { 4113db7001eSIan Dolzhansky /* 4123db7001eSIan Dolzhansky * The subdevice Rx queue needs to be stopped manually 4133db7001eSIan Dolzhansky * in case an appropriate failsafe Rx queue has been 4143db7001eSIan Dolzhansky * stopped earlier. 4153db7001eSIan Dolzhansky */ 4163db7001eSIan Dolzhansky ret = dev->dev_ops->rx_queue_stop(dev, i); 4173db7001eSIan Dolzhansky if (ret) { 4183db7001eSIan Dolzhansky ERROR("Could not synchronize Rx queue %d", i); 4193db7001eSIan Dolzhansky return ret; 4203db7001eSIan Dolzhansky } 4213db7001eSIan Dolzhansky } 4223db7001eSIan Dolzhansky } 4233db7001eSIan Dolzhansky return 0; 4243db7001eSIan Dolzhansky } 4253db7001eSIan Dolzhansky 426b32c9075SIan Dolzhansky static int 427b32c9075SIan Dolzhansky failsafe_eth_dev_tx_queues_sync(struct rte_eth_dev *dev) 428b32c9075SIan Dolzhansky { 429b32c9075SIan Dolzhansky struct txq *txq; 430b32c9075SIan Dolzhansky int ret; 431b32c9075SIan Dolzhansky uint16_t i; 432b32c9075SIan Dolzhansky 433b32c9075SIan Dolzhansky for (i = 0; i < dev->data->nb_tx_queues; i++) { 434b32c9075SIan Dolzhansky txq = dev->data->tx_queues[i]; 435b32c9075SIan Dolzhansky 436b32c9075SIan Dolzhansky if (txq->info.conf.tx_deferred_start && 437b32c9075SIan Dolzhansky dev->data->tx_queue_state[i] == 438b32c9075SIan Dolzhansky RTE_ETH_QUEUE_STATE_STARTED) { 439b32c9075SIan Dolzhansky /* 440b32c9075SIan Dolzhansky * The subdevice Tx queue does not launch on device 441b32c9075SIan Dolzhansky * start if deferred start flag is set. It needs to be 442b32c9075SIan Dolzhansky * started manually in case an appropriate failsafe Tx 443b32c9075SIan Dolzhansky * queue has been started earlier. 444b32c9075SIan Dolzhansky */ 445b32c9075SIan Dolzhansky ret = dev->dev_ops->tx_queue_start(dev, i); 446b32c9075SIan Dolzhansky if (ret) { 447b32c9075SIan Dolzhansky ERROR("Could not synchronize Tx queue %d", i); 448b32c9075SIan Dolzhansky return ret; 449b32c9075SIan Dolzhansky } 450b32c9075SIan Dolzhansky } else if (dev->data->tx_queue_state[i] == 451b32c9075SIan Dolzhansky RTE_ETH_QUEUE_STATE_STOPPED) { 452b32c9075SIan Dolzhansky /* 453b32c9075SIan Dolzhansky * The subdevice Tx queue needs to be stopped manually 454b32c9075SIan Dolzhansky * in case an appropriate failsafe Tx queue has been 455b32c9075SIan Dolzhansky * stopped earlier. 456b32c9075SIan Dolzhansky */ 457b32c9075SIan Dolzhansky ret = dev->dev_ops->tx_queue_stop(dev, i); 458b32c9075SIan Dolzhansky if (ret) { 459b32c9075SIan Dolzhansky ERROR("Could not synchronize Tx queue %d", i); 460b32c9075SIan Dolzhansky return ret; 461b32c9075SIan Dolzhansky } 462b32c9075SIan Dolzhansky } 463b32c9075SIan Dolzhansky } 464b32c9075SIan Dolzhansky return 0; 465b32c9075SIan Dolzhansky } 466b32c9075SIan Dolzhansky 467ebea83f8SGaetan Rivet int 468ebea83f8SGaetan Rivet failsafe_eth_dev_state_sync(struct rte_eth_dev *dev) 469ebea83f8SGaetan Rivet { 470ebea83f8SGaetan Rivet struct sub_device *sdev; 471ebea83f8SGaetan Rivet uint32_t inactive; 472ebea83f8SGaetan Rivet int ret; 473ebea83f8SGaetan Rivet uint8_t i; 474ebea83f8SGaetan Rivet 475a0194d82SGaetan Rivet if (PRIV(dev)->state < DEV_PARSED) 476a0194d82SGaetan Rivet return 0; 477a0194d82SGaetan Rivet 478a0194d82SGaetan Rivet ret = failsafe_args_parse_subs(dev); 479a0194d82SGaetan Rivet if (ret) 480598fb8aeSGaetan Rivet goto err_remove; 481a0194d82SGaetan Rivet 482ebea83f8SGaetan Rivet if (PRIV(dev)->state < DEV_PROBED) 483ebea83f8SGaetan Rivet return 0; 484ebea83f8SGaetan Rivet ret = failsafe_eal_init(dev); 485ebea83f8SGaetan Rivet if (ret) 486598fb8aeSGaetan Rivet goto err_remove; 487ebea83f8SGaetan Rivet if (PRIV(dev)->state < DEV_ACTIVE) 488ebea83f8SGaetan Rivet return 0; 489ebea83f8SGaetan Rivet inactive = 0; 4902cc52cd7SGaetan Rivet FOREACH_SUBDEV(sdev, i, dev) { 4912cc52cd7SGaetan Rivet if (sdev->state == DEV_PROBED) { 492ebea83f8SGaetan Rivet inactive |= UINT32_C(1) << i; 4932cc52cd7SGaetan Rivet ret = eth_dev_flow_isolate_set(dev, sdev); 4942cc52cd7SGaetan Rivet if (ret) { 4952cc52cd7SGaetan Rivet ERROR("Could not apply configuration to sub_device %d", 4962cc52cd7SGaetan Rivet i); 4972cc52cd7SGaetan Rivet goto err_remove; 4982cc52cd7SGaetan Rivet } 4992cc52cd7SGaetan Rivet } 5002cc52cd7SGaetan Rivet } 501ebea83f8SGaetan Rivet ret = dev->dev_ops->dev_configure(dev); 502ebea83f8SGaetan Rivet if (ret) 503598fb8aeSGaetan Rivet goto err_remove; 504ebea83f8SGaetan Rivet FOREACH_SUBDEV(sdev, i, dev) { 505ebea83f8SGaetan Rivet if (inactive & (UINT32_C(1) << i)) { 506ebea83f8SGaetan Rivet ret = fs_eth_dev_conf_apply(dev, sdev); 507ebea83f8SGaetan Rivet if (ret) { 508ebea83f8SGaetan Rivet ERROR("Could not apply configuration to sub_device %d", 509ebea83f8SGaetan Rivet i); 510598fb8aeSGaetan Rivet goto err_remove; 511ebea83f8SGaetan Rivet } 512ebea83f8SGaetan Rivet } 513ebea83f8SGaetan Rivet } 514ebea83f8SGaetan Rivet /* 515ebea83f8SGaetan Rivet * If new devices have been configured, check if 516ebea83f8SGaetan Rivet * the link state has changed. 517ebea83f8SGaetan Rivet */ 518ebea83f8SGaetan Rivet if (inactive) 519ebea83f8SGaetan Rivet dev->dev_ops->link_update(dev, 1); 520ebea83f8SGaetan Rivet if (PRIV(dev)->state < DEV_STARTED) 521ebea83f8SGaetan Rivet return 0; 522ebea83f8SGaetan Rivet ret = dev->dev_ops->dev_start(dev); 523ebea83f8SGaetan Rivet if (ret) 524598fb8aeSGaetan Rivet goto err_remove; 5253db7001eSIan Dolzhansky ret = failsafe_eth_dev_rx_queues_sync(dev); 5263db7001eSIan Dolzhansky if (ret) 5273db7001eSIan Dolzhansky goto err_remove; 528b32c9075SIan Dolzhansky ret = failsafe_eth_dev_tx_queues_sync(dev); 529b32c9075SIan Dolzhansky if (ret) 530b32c9075SIan Dolzhansky goto err_remove; 531598fb8aeSGaetan Rivet return 0; 532598fb8aeSGaetan Rivet err_remove: 533598fb8aeSGaetan Rivet FOREACH_SUBDEV(sdev, i, dev) 534598fb8aeSGaetan Rivet if (sdev->state != PRIV(dev)->state) 535598fb8aeSGaetan Rivet sdev->remove = 1; 536ebea83f8SGaetan Rivet return ret; 537598fb8aeSGaetan Rivet } 538598fb8aeSGaetan Rivet 5396265ab51SMatan Azrad void 5406265ab51SMatan Azrad failsafe_stats_increment(struct rte_eth_stats *to, struct rte_eth_stats *from) 5416265ab51SMatan Azrad { 5426265ab51SMatan Azrad uint32_t i; 5436265ab51SMatan Azrad 5446265ab51SMatan Azrad RTE_ASSERT(to != NULL && from != NULL); 5456265ab51SMatan Azrad to->ipackets += from->ipackets; 5466265ab51SMatan Azrad to->opackets += from->opackets; 5476265ab51SMatan Azrad to->ibytes += from->ibytes; 5486265ab51SMatan Azrad to->obytes += from->obytes; 5496265ab51SMatan Azrad to->imissed += from->imissed; 5506265ab51SMatan Azrad to->ierrors += from->ierrors; 5516265ab51SMatan Azrad to->oerrors += from->oerrors; 5526265ab51SMatan Azrad to->rx_nombuf += from->rx_nombuf; 5536265ab51SMatan Azrad for (i = 0; i < RTE_ETHDEV_QUEUE_STAT_CNTRS; i++) { 5546265ab51SMatan Azrad to->q_ipackets[i] += from->q_ipackets[i]; 5556265ab51SMatan Azrad to->q_opackets[i] += from->q_opackets[i]; 5566265ab51SMatan Azrad to->q_ibytes[i] += from->q_ibytes[i]; 5576265ab51SMatan Azrad to->q_obytes[i] += from->q_obytes[i]; 5586265ab51SMatan Azrad to->q_errors[i] += from->q_errors[i]; 5596265ab51SMatan Azrad } 5606265ab51SMatan Azrad } 5616265ab51SMatan Azrad 562598fb8aeSGaetan Rivet int 563f8244c63SZhiyong Yang failsafe_eth_rmv_event_callback(uint16_t port_id __rte_unused, 564598fb8aeSGaetan Rivet enum rte_eth_event_type event __rte_unused, 565598fb8aeSGaetan Rivet void *cb_arg, void *out __rte_unused) 566598fb8aeSGaetan Rivet { 567598fb8aeSGaetan Rivet struct sub_device *sdev = cb_arg; 568598fb8aeSGaetan Rivet 569655fcd68SMatan Azrad fs_lock(sdev->fs_dev, 0); 570598fb8aeSGaetan Rivet /* Switch as soon as possible tx_dev. */ 571598fb8aeSGaetan Rivet fs_switch_dev(sdev->fs_dev, sdev); 572598fb8aeSGaetan Rivet /* Use safe bursts in any case. */ 573598fb8aeSGaetan Rivet set_burst_fn(sdev->fs_dev, 1); 574598fb8aeSGaetan Rivet /* 575598fb8aeSGaetan Rivet * Async removal, the sub-PMD will try to unregister 576598fb8aeSGaetan Rivet * the callback at the source of the current thread context. 577598fb8aeSGaetan Rivet */ 578598fb8aeSGaetan Rivet sdev->remove = 1; 579655fcd68SMatan Azrad fs_unlock(sdev->fs_dev, 0); 580ebea83f8SGaetan Rivet return 0; 581ebea83f8SGaetan Rivet } 582ad7d6a35SGaetan Rivet 583ad7d6a35SGaetan Rivet int 584f8244c63SZhiyong Yang failsafe_eth_lsc_event_callback(uint16_t port_id __rte_unused, 585ad7d6a35SGaetan Rivet enum rte_eth_event_type event __rte_unused, 586ad7d6a35SGaetan Rivet void *cb_arg, void *out __rte_unused) 587ad7d6a35SGaetan Rivet { 588ad7d6a35SGaetan Rivet struct rte_eth_dev *dev = cb_arg; 589ad7d6a35SGaetan Rivet int ret; 590ad7d6a35SGaetan Rivet 591ad7d6a35SGaetan Rivet ret = dev->dev_ops->link_update(dev, 0); 592ad7d6a35SGaetan Rivet /* We must pass on the LSC event */ 593ad7d6a35SGaetan Rivet if (ret) 594ad7d6a35SGaetan Rivet return _rte_eth_dev_callback_process(dev, 595ad7d6a35SGaetan Rivet RTE_ETH_EVENT_INTR_LSC, 596cebe3d7bSThomas Monjalon NULL); 597ad7d6a35SGaetan Rivet else 598ad7d6a35SGaetan Rivet return 0; 599ad7d6a35SGaetan Rivet } 6007fda13d3SMatan Azrad 6017fda13d3SMatan Azrad /* Take sub-device ownership before it becomes exposed to the application. */ 6027fda13d3SMatan Azrad int 6037fda13d3SMatan Azrad failsafe_eth_new_event_callback(uint16_t port_id, 6047fda13d3SMatan Azrad enum rte_eth_event_type event __rte_unused, 6057fda13d3SMatan Azrad void *cb_arg, void *out __rte_unused) 6067fda13d3SMatan Azrad { 6077fda13d3SMatan Azrad struct rte_eth_dev *fs_dev = cb_arg; 6087fda13d3SMatan Azrad struct sub_device *sdev; 6097fda13d3SMatan Azrad struct rte_eth_dev *dev = &rte_eth_devices[port_id]; 6107fda13d3SMatan Azrad uint8_t i; 6117fda13d3SMatan Azrad 6127fda13d3SMatan Azrad FOREACH_SUBDEV_STATE(sdev, i, fs_dev, DEV_PARSED) { 6137fda13d3SMatan Azrad if (sdev->state >= DEV_PROBED) 6147fda13d3SMatan Azrad continue; 6157fda13d3SMatan Azrad if (strcmp(sdev->devargs.name, dev->device->name) != 0) 6167fda13d3SMatan Azrad continue; 6177fda13d3SMatan Azrad rte_eth_dev_owner_set(port_id, &PRIV(fs_dev)->my_owner); 6187fda13d3SMatan Azrad /* The actual owner will be checked after the port probing. */ 6197fda13d3SMatan Azrad break; 6207fda13d3SMatan Azrad } 6217fda13d3SMatan Azrad return 0; 6227fda13d3SMatan Azrad } 623