1009c327cSOlivier Matz /* SPDX-License-Identifier: BSD-3-Clause 2a46f8d58SGaetan Rivet * Copyright 2017 6WIND S.A. 35feecc57SShahaf Shuler * Copyright 2017 Mellanox Technologies, Ltd 4a46f8d58SGaetan Rivet */ 5a46f8d58SGaetan Rivet 6a11c2422SMoti Haimovsky #include <stdbool.h> 7a46f8d58SGaetan Rivet #include <stdint.h> 89e0360aeSMoti Haimovsky #include <unistd.h> 9f809040bSDavid Marchand #ifdef RTE_EXEC_ENV_LINUX 10f809040bSDavid Marchand #include <sys/eventfd.h> 11f809040bSDavid Marchand #endif 12a46f8d58SGaetan Rivet 13a46f8d58SGaetan Rivet #include <rte_debug.h> 14598fb8aeSGaetan Rivet #include <rte_atomic.h> 15df96fd0dSBruce Richardson #include <ethdev_driver.h> 16a46f8d58SGaetan Rivet #include <rte_malloc.h> 17b737a1eeSGaetan Rivet #include <rte_flow.h> 189dda3e33SMatan Azrad #include <rte_cycles.h> 1990725207SOphir Munk #include <rte_ethdev.h> 20938420ebSStephen Hemminger #include <rte_string_fns.h> 21a46f8d58SGaetan Rivet 22a46f8d58SGaetan Rivet #include "failsafe_private.h" 23a46f8d58SGaetan Rivet 24a46f8d58SGaetan Rivet static int 25a46f8d58SGaetan Rivet fs_dev_configure(struct rte_eth_dev *dev) 26a46f8d58SGaetan Rivet { 27a46f8d58SGaetan Rivet struct sub_device *sdev; 28a46f8d58SGaetan Rivet uint8_t i; 29a46f8d58SGaetan Rivet int ret; 30a46f8d58SGaetan Rivet 31813f085fSDavid Marchand ret = fs_lock(dev, 0); 32813f085fSDavid Marchand if (ret != 0) 33813f085fSDavid Marchand return ret; 34a46f8d58SGaetan Rivet FOREACH_SUBDEV(sdev, i, dev) { 35598fb8aeSGaetan Rivet int rmv_interrupt = 0; 36ad7d6a35SGaetan Rivet int lsc_interrupt = 0; 37ad7d6a35SGaetan Rivet int lsc_enabled; 38598fb8aeSGaetan Rivet 39344259abSMatan Azrad if (sdev->state != DEV_PROBED && 40344259abSMatan Azrad !(PRIV(dev)->alarm_lock == 0 && sdev->state == DEV_ACTIVE)) 41bbc6a53dSGaetan Rivet continue; 42598fb8aeSGaetan Rivet 43598fb8aeSGaetan Rivet rmv_interrupt = ETH(sdev)->data->dev_flags & 44598fb8aeSGaetan Rivet RTE_ETH_DEV_INTR_RMV; 45598fb8aeSGaetan Rivet if (rmv_interrupt) { 46598fb8aeSGaetan Rivet DEBUG("Enabling RMV interrupts for sub_device %d", i); 47598fb8aeSGaetan Rivet dev->data->dev_conf.intr_conf.rmv = 1; 48598fb8aeSGaetan Rivet } else { 49598fb8aeSGaetan Rivet DEBUG("sub_device %d does not support RMV event", i); 50598fb8aeSGaetan Rivet } 51ad7d6a35SGaetan Rivet lsc_enabled = dev->data->dev_conf.intr_conf.lsc; 52ad7d6a35SGaetan Rivet lsc_interrupt = lsc_enabled && 53ad7d6a35SGaetan Rivet (ETH(sdev)->data->dev_flags & 54ad7d6a35SGaetan Rivet RTE_ETH_DEV_INTR_LSC); 55ad7d6a35SGaetan Rivet if (lsc_interrupt) { 56ad7d6a35SGaetan Rivet DEBUG("Enabling LSC interrupts for sub_device %d", i); 57ad7d6a35SGaetan Rivet dev->data->dev_conf.intr_conf.lsc = 1; 58ad7d6a35SGaetan Rivet } else if (lsc_enabled && !lsc_interrupt) { 59ad7d6a35SGaetan Rivet DEBUG("Disabling LSC interrupts for sub_device %d", i); 60ad7d6a35SGaetan Rivet dev->data->dev_conf.intr_conf.lsc = 0; 61ad7d6a35SGaetan Rivet } 62a46f8d58SGaetan Rivet DEBUG("Configuring sub-device %d", i); 63a46f8d58SGaetan Rivet ret = rte_eth_dev_configure(PORT_ID(sdev), 64a46f8d58SGaetan Rivet dev->data->nb_rx_queues, 65a46f8d58SGaetan Rivet dev->data->nb_tx_queues, 66a46f8d58SGaetan Rivet &dev->data->dev_conf); 67a46f8d58SGaetan Rivet if (ret) { 68ae80146cSMatan Azrad if (!fs_err(sdev, ret)) 69ae80146cSMatan Azrad continue; 70a46f8d58SGaetan Rivet ERROR("Could not configure sub_device %d", i); 71655fcd68SMatan Azrad fs_unlock(dev, 0); 72a46f8d58SGaetan Rivet return ret; 73a46f8d58SGaetan Rivet } 74602c5623SMatan Azrad if (rmv_interrupt && sdev->rmv_callback == 0) { 75598fb8aeSGaetan Rivet ret = rte_eth_dev_callback_register(PORT_ID(sdev), 76598fb8aeSGaetan Rivet RTE_ETH_EVENT_INTR_RMV, 77598fb8aeSGaetan Rivet failsafe_eth_rmv_event_callback, 78598fb8aeSGaetan Rivet sdev); 79598fb8aeSGaetan Rivet if (ret) 80598fb8aeSGaetan Rivet WARN("Failed to register RMV callback for sub_device %d", 81598fb8aeSGaetan Rivet SUB_ID(sdev)); 820545c580SMatan Azrad else 830545c580SMatan Azrad sdev->rmv_callback = 1; 84598fb8aeSGaetan Rivet } 85598fb8aeSGaetan Rivet dev->data->dev_conf.intr_conf.rmv = 0; 86602c5623SMatan Azrad if (lsc_interrupt && sdev->lsc_callback == 0) { 87ad7d6a35SGaetan Rivet ret = rte_eth_dev_callback_register(PORT_ID(sdev), 88ad7d6a35SGaetan Rivet RTE_ETH_EVENT_INTR_LSC, 89ad7d6a35SGaetan Rivet failsafe_eth_lsc_event_callback, 90ad7d6a35SGaetan Rivet dev); 91ad7d6a35SGaetan Rivet if (ret) 92ad7d6a35SGaetan Rivet WARN("Failed to register LSC callback for sub_device %d", 93ad7d6a35SGaetan Rivet SUB_ID(sdev)); 940545c580SMatan Azrad else 950545c580SMatan Azrad sdev->lsc_callback = 1; 96ad7d6a35SGaetan Rivet } 97ad7d6a35SGaetan Rivet dev->data->dev_conf.intr_conf.lsc = lsc_enabled; 98a46f8d58SGaetan Rivet sdev->state = DEV_ACTIVE; 99a46f8d58SGaetan Rivet } 100ebea83f8SGaetan Rivet if (PRIV(dev)->state < DEV_ACTIVE) 101ebea83f8SGaetan Rivet PRIV(dev)->state = DEV_ACTIVE; 102655fcd68SMatan Azrad fs_unlock(dev, 0); 103a46f8d58SGaetan Rivet return 0; 104a46f8d58SGaetan Rivet } 105a46f8d58SGaetan Rivet 1063db7001eSIan Dolzhansky static void 1073db7001eSIan Dolzhansky fs_set_queues_state_start(struct rte_eth_dev *dev) 1083db7001eSIan Dolzhansky { 1093db7001eSIan Dolzhansky struct rxq *rxq; 110b32c9075SIan Dolzhansky struct txq *txq; 1113db7001eSIan Dolzhansky uint16_t i; 1123db7001eSIan Dolzhansky 1133db7001eSIan Dolzhansky for (i = 0; i < dev->data->nb_rx_queues; i++) { 114*084d0cdbSMorten Brørup __rte_assume(i < RTE_MAX_QUEUES_PER_PORT); 1153db7001eSIan Dolzhansky rxq = dev->data->rx_queues[i]; 116c942a182SIan Dolzhansky if (rxq != NULL && !rxq->info.conf.rx_deferred_start) 1173db7001eSIan Dolzhansky dev->data->rx_queue_state[i] = 1183db7001eSIan Dolzhansky RTE_ETH_QUEUE_STATE_STARTED; 1193db7001eSIan Dolzhansky } 120b32c9075SIan Dolzhansky for (i = 0; i < dev->data->nb_tx_queues; i++) { 121*084d0cdbSMorten Brørup __rte_assume(i < RTE_MAX_QUEUES_PER_PORT); 122b32c9075SIan Dolzhansky txq = dev->data->tx_queues[i]; 12399aa3e0fSIan Dolzhansky if (txq != NULL && !txq->info.conf.tx_deferred_start) 124b32c9075SIan Dolzhansky dev->data->tx_queue_state[i] = 125b32c9075SIan Dolzhansky RTE_ETH_QUEUE_STATE_STARTED; 126b32c9075SIan Dolzhansky } 1273db7001eSIan Dolzhansky } 1283db7001eSIan Dolzhansky 129a46f8d58SGaetan Rivet static int 130a46f8d58SGaetan Rivet fs_dev_start(struct rte_eth_dev *dev) 131a46f8d58SGaetan Rivet { 132a46f8d58SGaetan Rivet struct sub_device *sdev; 133a46f8d58SGaetan Rivet uint8_t i; 134a46f8d58SGaetan Rivet int ret; 135a46f8d58SGaetan Rivet 136813f085fSDavid Marchand ret = fs_lock(dev, 0); 137813f085fSDavid Marchand if (ret != 0) 138813f085fSDavid Marchand return ret; 1399e0360aeSMoti Haimovsky ret = failsafe_rx_intr_install(dev); 140655fcd68SMatan Azrad if (ret) { 141655fcd68SMatan Azrad fs_unlock(dev, 0); 1429e0360aeSMoti Haimovsky return ret; 143655fcd68SMatan Azrad } 144a46f8d58SGaetan Rivet FOREACH_SUBDEV(sdev, i, dev) { 145a46f8d58SGaetan Rivet if (sdev->state != DEV_ACTIVE) 146a46f8d58SGaetan Rivet continue; 147a46f8d58SGaetan Rivet DEBUG("Starting sub_device %d", i); 148a46f8d58SGaetan Rivet ret = rte_eth_dev_start(PORT_ID(sdev)); 149ae80146cSMatan Azrad if (ret) { 150ae80146cSMatan Azrad if (!fs_err(sdev, ret)) 151ae80146cSMatan Azrad continue; 152655fcd68SMatan Azrad fs_unlock(dev, 0); 153a46f8d58SGaetan Rivet return ret; 154ae80146cSMatan Azrad } 155f234e5bdSMoti Haimovsky ret = failsafe_rx_intr_install_subdevice(sdev); 156f234e5bdSMoti Haimovsky if (ret) { 157f234e5bdSMoti Haimovsky if (!fs_err(sdev, ret)) 158f234e5bdSMoti Haimovsky continue; 15997742c7bSIvan Ilchenko if (fs_err(sdev, rte_eth_dev_stop(PORT_ID(sdev))) < 0) 16097742c7bSIvan Ilchenko ERROR("Failed to stop sub-device %u", 16197742c7bSIvan Ilchenko SUB_ID(sdev)); 162655fcd68SMatan Azrad fs_unlock(dev, 0); 163f234e5bdSMoti Haimovsky return ret; 164f234e5bdSMoti Haimovsky } 165a46f8d58SGaetan Rivet sdev->state = DEV_STARTED; 166a46f8d58SGaetan Rivet } 1673db7001eSIan Dolzhansky if (PRIV(dev)->state < DEV_STARTED) { 168ebea83f8SGaetan Rivet PRIV(dev)->state = DEV_STARTED; 1693db7001eSIan Dolzhansky fs_set_queues_state_start(dev); 1703db7001eSIan Dolzhansky } 171598fb8aeSGaetan Rivet fs_switch_dev(dev, NULL); 172655fcd68SMatan Azrad fs_unlock(dev, 0); 173a46f8d58SGaetan Rivet return 0; 174a46f8d58SGaetan Rivet } 175a46f8d58SGaetan Rivet 176a46f8d58SGaetan Rivet static void 1773db7001eSIan Dolzhansky fs_set_queues_state_stop(struct rte_eth_dev *dev) 1783db7001eSIan Dolzhansky { 1793db7001eSIan Dolzhansky uint16_t i; 1803db7001eSIan Dolzhansky 181*084d0cdbSMorten Brørup for (i = 0; i < dev->data->nb_rx_queues; i++) { 182*084d0cdbSMorten Brørup __rte_assume(i < RTE_MAX_QUEUES_PER_PORT); 183c942a182SIan Dolzhansky if (dev->data->rx_queues[i] != NULL) 184c942a182SIan Dolzhansky dev->data->rx_queue_state[i] = 185c942a182SIan Dolzhansky RTE_ETH_QUEUE_STATE_STOPPED; 186*084d0cdbSMorten Brørup } 187*084d0cdbSMorten Brørup for (i = 0; i < dev->data->nb_tx_queues; i++) { 188*084d0cdbSMorten Brørup __rte_assume(i < RTE_MAX_QUEUES_PER_PORT); 18999aa3e0fSIan Dolzhansky if (dev->data->tx_queues[i] != NULL) 19099aa3e0fSIan Dolzhansky dev->data->tx_queue_state[i] = 19199aa3e0fSIan Dolzhansky RTE_ETH_QUEUE_STATE_STOPPED; 1923db7001eSIan Dolzhansky } 193*084d0cdbSMorten Brørup } 1943db7001eSIan Dolzhansky 19562024eb8SIvan Ilchenko static int 196a46f8d58SGaetan Rivet fs_dev_stop(struct rte_eth_dev *dev) 197a46f8d58SGaetan Rivet { 198a46f8d58SGaetan Rivet struct sub_device *sdev; 199a46f8d58SGaetan Rivet uint8_t i; 20062024eb8SIvan Ilchenko int ret; 201a46f8d58SGaetan Rivet 202813f085fSDavid Marchand ret = fs_lock(dev, 0); 203813f085fSDavid Marchand if (ret != 0) 204813f085fSDavid Marchand return ret; 205ebea83f8SGaetan Rivet PRIV(dev)->state = DEV_STARTED - 1; 206a46f8d58SGaetan Rivet FOREACH_SUBDEV_STATE(sdev, i, dev, DEV_STARTED) { 20762024eb8SIvan Ilchenko ret = rte_eth_dev_stop(PORT_ID(sdev)); 20862024eb8SIvan Ilchenko if (fs_err(sdev, ret) < 0) { 20962024eb8SIvan Ilchenko ERROR("Failed to stop device %u", 21062024eb8SIvan Ilchenko PORT_ID(sdev)); 21162024eb8SIvan Ilchenko PRIV(dev)->state = DEV_STARTED + 1; 21262024eb8SIvan Ilchenko fs_unlock(dev, 0); 21362024eb8SIvan Ilchenko return ret; 21462024eb8SIvan Ilchenko } 215f234e5bdSMoti Haimovsky failsafe_rx_intr_uninstall_subdevice(sdev); 216a46f8d58SGaetan Rivet sdev->state = DEV_STARTED - 1; 217a46f8d58SGaetan Rivet } 2189e0360aeSMoti Haimovsky failsafe_rx_intr_uninstall(dev); 2193db7001eSIan Dolzhansky fs_set_queues_state_stop(dev); 220655fcd68SMatan Azrad fs_unlock(dev, 0); 22162024eb8SIvan Ilchenko 22262024eb8SIvan Ilchenko return 0; 223a46f8d58SGaetan Rivet } 224a46f8d58SGaetan Rivet 225a46f8d58SGaetan Rivet static int 226a46f8d58SGaetan Rivet fs_dev_set_link_up(struct rte_eth_dev *dev) 227a46f8d58SGaetan Rivet { 228a46f8d58SGaetan Rivet struct sub_device *sdev; 229a46f8d58SGaetan Rivet uint8_t i; 230a46f8d58SGaetan Rivet int ret; 231a46f8d58SGaetan Rivet 232813f085fSDavid Marchand ret = fs_lock(dev, 0); 233813f085fSDavid Marchand if (ret != 0) 234813f085fSDavid Marchand return ret; 235a46f8d58SGaetan Rivet FOREACH_SUBDEV_STATE(sdev, i, dev, DEV_ACTIVE) { 236a46f8d58SGaetan Rivet DEBUG("Calling rte_eth_dev_set_link_up on sub_device %d", i); 237a46f8d58SGaetan Rivet ret = rte_eth_dev_set_link_up(PORT_ID(sdev)); 238ae80146cSMatan Azrad if ((ret = fs_err(sdev, ret))) { 239a46f8d58SGaetan Rivet ERROR("Operation rte_eth_dev_set_link_up failed for sub_device %d" 240a46f8d58SGaetan Rivet " with error %d", i, ret); 241655fcd68SMatan Azrad fs_unlock(dev, 0); 242a46f8d58SGaetan Rivet return ret; 243a46f8d58SGaetan Rivet } 244a46f8d58SGaetan Rivet } 245655fcd68SMatan Azrad fs_unlock(dev, 0); 246a46f8d58SGaetan Rivet return 0; 247a46f8d58SGaetan Rivet } 248a46f8d58SGaetan Rivet 249a46f8d58SGaetan Rivet static int 250a46f8d58SGaetan Rivet fs_dev_set_link_down(struct rte_eth_dev *dev) 251a46f8d58SGaetan Rivet { 252a46f8d58SGaetan Rivet struct sub_device *sdev; 253a46f8d58SGaetan Rivet uint8_t i; 254a46f8d58SGaetan Rivet int ret; 255a46f8d58SGaetan Rivet 256813f085fSDavid Marchand ret = fs_lock(dev, 0); 257813f085fSDavid Marchand if (ret != 0) 258813f085fSDavid Marchand return ret; 259a46f8d58SGaetan Rivet FOREACH_SUBDEV_STATE(sdev, i, dev, DEV_ACTIVE) { 260a46f8d58SGaetan Rivet DEBUG("Calling rte_eth_dev_set_link_down on sub_device %d", i); 261a46f8d58SGaetan Rivet ret = rte_eth_dev_set_link_down(PORT_ID(sdev)); 262ae80146cSMatan Azrad if ((ret = fs_err(sdev, ret))) { 263a46f8d58SGaetan Rivet ERROR("Operation rte_eth_dev_set_link_down failed for sub_device %d" 264a46f8d58SGaetan Rivet " with error %d", i, ret); 265655fcd68SMatan Azrad fs_unlock(dev, 0); 266a46f8d58SGaetan Rivet return ret; 267a46f8d58SGaetan Rivet } 268a46f8d58SGaetan Rivet } 269655fcd68SMatan Azrad fs_unlock(dev, 0); 270a46f8d58SGaetan Rivet return 0; 271a46f8d58SGaetan Rivet } 272a46f8d58SGaetan Rivet 2733db7001eSIan Dolzhansky static int 2743db7001eSIan Dolzhansky fs_rx_queue_stop(struct rte_eth_dev *dev, uint16_t rx_queue_id) 2753db7001eSIan Dolzhansky { 2763db7001eSIan Dolzhansky struct sub_device *sdev; 2773db7001eSIan Dolzhansky uint8_t i; 2783db7001eSIan Dolzhansky int ret; 2793db7001eSIan Dolzhansky int err = 0; 2803db7001eSIan Dolzhansky bool failure = true; 2813db7001eSIan Dolzhansky 282813f085fSDavid Marchand ret = fs_lock(dev, 0); 283813f085fSDavid Marchand if (ret != 0) 284813f085fSDavid Marchand return ret; 2853db7001eSIan Dolzhansky FOREACH_SUBDEV_STATE(sdev, i, dev, DEV_ACTIVE) { 2863db7001eSIan Dolzhansky uint16_t port_id = ETH(sdev)->data->port_id; 2873db7001eSIan Dolzhansky 2883db7001eSIan Dolzhansky ret = rte_eth_dev_rx_queue_stop(port_id, rx_queue_id); 2893db7001eSIan Dolzhansky ret = fs_err(sdev, ret); 2903db7001eSIan Dolzhansky if (ret) { 2913db7001eSIan Dolzhansky ERROR("Rx queue stop failed for subdevice %d", i); 2923db7001eSIan Dolzhansky err = ret; 2933db7001eSIan Dolzhansky } else { 2943db7001eSIan Dolzhansky failure = false; 2953db7001eSIan Dolzhansky } 2963db7001eSIan Dolzhansky } 2973db7001eSIan Dolzhansky dev->data->rx_queue_state[rx_queue_id] = RTE_ETH_QUEUE_STATE_STOPPED; 2983db7001eSIan Dolzhansky fs_unlock(dev, 0); 2993db7001eSIan Dolzhansky /* Return 0 in case of at least one successful queue stop */ 3003db7001eSIan Dolzhansky return (failure) ? err : 0; 3013db7001eSIan Dolzhansky } 3023db7001eSIan Dolzhansky 3033db7001eSIan Dolzhansky static int 3043db7001eSIan Dolzhansky fs_rx_queue_start(struct rte_eth_dev *dev, uint16_t rx_queue_id) 3053db7001eSIan Dolzhansky { 3063db7001eSIan Dolzhansky struct sub_device *sdev; 3073db7001eSIan Dolzhansky uint8_t i; 3083db7001eSIan Dolzhansky int ret; 3093db7001eSIan Dolzhansky 310813f085fSDavid Marchand ret = fs_lock(dev, 0); 311813f085fSDavid Marchand if (ret != 0) 312813f085fSDavid Marchand return ret; 3133db7001eSIan Dolzhansky FOREACH_SUBDEV_STATE(sdev, i, dev, DEV_ACTIVE) { 3143db7001eSIan Dolzhansky uint16_t port_id = ETH(sdev)->data->port_id; 3153db7001eSIan Dolzhansky 3163db7001eSIan Dolzhansky ret = rte_eth_dev_rx_queue_start(port_id, rx_queue_id); 3173db7001eSIan Dolzhansky ret = fs_err(sdev, ret); 3183db7001eSIan Dolzhansky if (ret) { 3193db7001eSIan Dolzhansky ERROR("Rx queue start failed for subdevice %d", i); 3203db7001eSIan Dolzhansky fs_rx_queue_stop(dev, rx_queue_id); 3213db7001eSIan Dolzhansky fs_unlock(dev, 0); 3223db7001eSIan Dolzhansky return ret; 3233db7001eSIan Dolzhansky } 3243db7001eSIan Dolzhansky } 3253db7001eSIan Dolzhansky dev->data->rx_queue_state[rx_queue_id] = RTE_ETH_QUEUE_STATE_STARTED; 3263db7001eSIan Dolzhansky fs_unlock(dev, 0); 3273db7001eSIan Dolzhansky return 0; 3283db7001eSIan Dolzhansky } 3293db7001eSIan Dolzhansky 330b32c9075SIan Dolzhansky static int 331b32c9075SIan Dolzhansky fs_tx_queue_stop(struct rte_eth_dev *dev, uint16_t tx_queue_id) 332b32c9075SIan Dolzhansky { 333b32c9075SIan Dolzhansky struct sub_device *sdev; 334b32c9075SIan Dolzhansky uint8_t i; 335b32c9075SIan Dolzhansky int ret; 336b32c9075SIan Dolzhansky int err = 0; 337b32c9075SIan Dolzhansky bool failure = true; 338b32c9075SIan Dolzhansky 339813f085fSDavid Marchand ret = fs_lock(dev, 0); 340813f085fSDavid Marchand if (ret != 0) 341813f085fSDavid Marchand return ret; 342b32c9075SIan Dolzhansky FOREACH_SUBDEV_STATE(sdev, i, dev, DEV_ACTIVE) { 343b32c9075SIan Dolzhansky uint16_t port_id = ETH(sdev)->data->port_id; 344b32c9075SIan Dolzhansky 345b32c9075SIan Dolzhansky ret = rte_eth_dev_tx_queue_stop(port_id, tx_queue_id); 346b32c9075SIan Dolzhansky ret = fs_err(sdev, ret); 347b32c9075SIan Dolzhansky if (ret) { 348b32c9075SIan Dolzhansky ERROR("Tx queue stop failed for subdevice %d", i); 349b32c9075SIan Dolzhansky err = ret; 350b32c9075SIan Dolzhansky } else { 351b32c9075SIan Dolzhansky failure = false; 352b32c9075SIan Dolzhansky } 353b32c9075SIan Dolzhansky } 354b32c9075SIan Dolzhansky dev->data->tx_queue_state[tx_queue_id] = RTE_ETH_QUEUE_STATE_STOPPED; 355b32c9075SIan Dolzhansky fs_unlock(dev, 0); 356b32c9075SIan Dolzhansky /* Return 0 in case of at least one successful queue stop */ 357b32c9075SIan Dolzhansky return (failure) ? err : 0; 358b32c9075SIan Dolzhansky } 359b32c9075SIan Dolzhansky 360b32c9075SIan Dolzhansky static int 361b32c9075SIan Dolzhansky fs_tx_queue_start(struct rte_eth_dev *dev, uint16_t tx_queue_id) 362b32c9075SIan Dolzhansky { 363b32c9075SIan Dolzhansky struct sub_device *sdev; 364b32c9075SIan Dolzhansky uint8_t i; 365b32c9075SIan Dolzhansky int ret; 366b32c9075SIan Dolzhansky 367813f085fSDavid Marchand ret = fs_lock(dev, 0); 368813f085fSDavid Marchand if (ret != 0) 369813f085fSDavid Marchand return ret; 370b32c9075SIan Dolzhansky FOREACH_SUBDEV_STATE(sdev, i, dev, DEV_ACTIVE) { 371b32c9075SIan Dolzhansky uint16_t port_id = ETH(sdev)->data->port_id; 372b32c9075SIan Dolzhansky 373b32c9075SIan Dolzhansky ret = rte_eth_dev_tx_queue_start(port_id, tx_queue_id); 374b32c9075SIan Dolzhansky ret = fs_err(sdev, ret); 375b32c9075SIan Dolzhansky if (ret) { 376b32c9075SIan Dolzhansky ERROR("Tx queue start failed for subdevice %d", i); 377b32c9075SIan Dolzhansky fs_tx_queue_stop(dev, tx_queue_id); 378b32c9075SIan Dolzhansky fs_unlock(dev, 0); 379b32c9075SIan Dolzhansky return ret; 380b32c9075SIan Dolzhansky } 381b32c9075SIan Dolzhansky } 382b32c9075SIan Dolzhansky dev->data->tx_queue_state[tx_queue_id] = RTE_ETH_QUEUE_STATE_STARTED; 383b32c9075SIan Dolzhansky fs_unlock(dev, 0); 384b32c9075SIan Dolzhansky return 0; 385b32c9075SIan Dolzhansky } 386b32c9075SIan Dolzhansky 387a46f8d58SGaetan Rivet static void 3887483341aSXueming Li fs_rx_queue_release(struct rte_eth_dev *dev, uint16_t qid) 389a46f8d58SGaetan Rivet { 390a46f8d58SGaetan Rivet struct sub_device *sdev; 391a46f8d58SGaetan Rivet uint8_t i; 3927483341aSXueming Li struct rxq *rxq = dev->data->rx_queues[qid]; 393a46f8d58SGaetan Rivet 3947483341aSXueming Li if (rxq == NULL) 395a46f8d58SGaetan Rivet return; 396813f085fSDavid Marchand if (fs_lock(dev, 0) != 0) 397813f085fSDavid Marchand return; 398b9663f60SYunjian Wang if (rxq->event_fd >= 0) 3999e0360aeSMoti Haimovsky close(rxq->event_fd); 4006b35f4d8SIgor Romanov FOREACH_SUBDEV_STATE(sdev, i, dev, DEV_ACTIVE) { 4016b35f4d8SIgor Romanov if (ETH(sdev)->data->rx_queues != NULL && 4027483341aSXueming Li ETH(sdev)->data->rx_queues[rxq->qid] != NULL) 4037483341aSXueming Li SUBOPS(sdev, rx_queue_release)(ETH(sdev), rxq->qid); 4046b35f4d8SIgor Romanov } 405a46f8d58SGaetan Rivet dev->data->rx_queues[rxq->qid] = NULL; 406a46f8d58SGaetan Rivet rte_free(rxq); 407655fcd68SMatan Azrad fs_unlock(dev, 0); 408a46f8d58SGaetan Rivet } 409a46f8d58SGaetan Rivet 410a46f8d58SGaetan Rivet static int 411a46f8d58SGaetan Rivet fs_rx_queue_setup(struct rte_eth_dev *dev, 412a46f8d58SGaetan Rivet uint16_t rx_queue_id, 413a46f8d58SGaetan Rivet uint16_t nb_rx_desc, 414a46f8d58SGaetan Rivet unsigned int socket_id, 415a46f8d58SGaetan Rivet const struct rte_eth_rxconf *rx_conf, 416a46f8d58SGaetan Rivet struct rte_mempool *mb_pool) 417a46f8d58SGaetan Rivet { 418a46f8d58SGaetan Rivet struct sub_device *sdev; 419a46f8d58SGaetan Rivet struct rxq *rxq; 420a46f8d58SGaetan Rivet uint8_t i; 421a46f8d58SGaetan Rivet int ret; 422a46f8d58SGaetan Rivet 423813f085fSDavid Marchand ret = fs_lock(dev, 0); 424813f085fSDavid Marchand if (ret != 0) 425813f085fSDavid Marchand return ret; 426c3a210a2SIan Dolzhansky if (rx_conf->rx_deferred_start) { 4273db7001eSIan Dolzhansky FOREACH_SUBDEV_STATE(sdev, i, dev, DEV_PROBED) { 4283db7001eSIan Dolzhansky if (SUBOPS(sdev, rx_queue_start) == NULL) { 4293db7001eSIan Dolzhansky ERROR("Rx queue deferred start is not " 4303db7001eSIan Dolzhansky "supported for subdevice %d", i); 4313db7001eSIan Dolzhansky fs_unlock(dev, 0); 432c3a210a2SIan Dolzhansky return -EINVAL; 433c3a210a2SIan Dolzhansky } 4343db7001eSIan Dolzhansky } 4353db7001eSIan Dolzhansky } 436a46f8d58SGaetan Rivet rxq = dev->data->rx_queues[rx_queue_id]; 437a46f8d58SGaetan Rivet if (rxq != NULL) { 4387483341aSXueming Li fs_rx_queue_release(dev, rx_queue_id); 439a46f8d58SGaetan Rivet dev->data->rx_queues[rx_queue_id] = NULL; 440a46f8d58SGaetan Rivet } 441598fb8aeSGaetan Rivet rxq = rte_zmalloc(NULL, 442598fb8aeSGaetan Rivet sizeof(*rxq) + 443598fb8aeSGaetan Rivet sizeof(rte_atomic64_t) * PRIV(dev)->subs_tail, 444a46f8d58SGaetan Rivet RTE_CACHE_LINE_SIZE); 445655fcd68SMatan Azrad if (rxq == NULL) { 446655fcd68SMatan Azrad fs_unlock(dev, 0); 447a46f8d58SGaetan Rivet return -ENOMEM; 448655fcd68SMatan Azrad } 449598fb8aeSGaetan Rivet FOREACH_SUBDEV(sdev, i, dev) 450598fb8aeSGaetan Rivet rte_atomic64_init(&rxq->refcnt[i]); 451a46f8d58SGaetan Rivet rxq->qid = rx_queue_id; 452a46f8d58SGaetan Rivet rxq->socket_id = socket_id; 453a46f8d58SGaetan Rivet rxq->info.mp = mb_pool; 454a46f8d58SGaetan Rivet rxq->info.conf = *rx_conf; 455a46f8d58SGaetan Rivet rxq->info.nb_desc = nb_rx_desc; 456a46f8d58SGaetan Rivet rxq->priv = PRIV(dev); 4578052bbd9SMatan Azrad rxq->sdev = PRIV(dev)->subs; 458f809040bSDavid Marchand #ifdef RTE_EXEC_ENV_LINUX 459f809040bSDavid Marchand rxq->event_fd = eventfd(0, EFD_NONBLOCK | EFD_CLOEXEC); 460f809040bSDavid Marchand if (rxq->event_fd < 0) { 461f809040bSDavid Marchand ERROR("Failed to create an eventfd: %s", strerror(errno)); 462655fcd68SMatan Azrad fs_unlock(dev, 0); 463f809040bSDavid Marchand return -errno; 464655fcd68SMatan Azrad } 465f809040bSDavid Marchand #else 466f809040bSDavid Marchand rxq->event_fd = -1; 467f809040bSDavid Marchand #endif 468a46f8d58SGaetan Rivet dev->data->rx_queues[rx_queue_id] = rxq; 469a46f8d58SGaetan Rivet FOREACH_SUBDEV_STATE(sdev, i, dev, DEV_ACTIVE) { 470a46f8d58SGaetan Rivet ret = rte_eth_rx_queue_setup(PORT_ID(sdev), 471a46f8d58SGaetan Rivet rx_queue_id, 472a46f8d58SGaetan Rivet nb_rx_desc, socket_id, 473a46f8d58SGaetan Rivet rx_conf, mb_pool); 474ae80146cSMatan Azrad if ((ret = fs_err(sdev, ret))) { 475a46f8d58SGaetan Rivet ERROR("RX queue setup failed for sub_device %d", i); 476a46f8d58SGaetan Rivet goto free_rxq; 477a46f8d58SGaetan Rivet } 478a46f8d58SGaetan Rivet } 479655fcd68SMatan Azrad fs_unlock(dev, 0); 480a46f8d58SGaetan Rivet return 0; 481a46f8d58SGaetan Rivet free_rxq: 4827483341aSXueming Li fs_rx_queue_release(dev, rx_queue_id); 483655fcd68SMatan Azrad fs_unlock(dev, 0); 484a46f8d58SGaetan Rivet return ret; 485a46f8d58SGaetan Rivet } 486a46f8d58SGaetan Rivet 4879e0360aeSMoti Haimovsky static int 4889e0360aeSMoti Haimovsky fs_rx_intr_enable(struct rte_eth_dev *dev, uint16_t idx) 4899e0360aeSMoti Haimovsky { 4909e0360aeSMoti Haimovsky struct rxq *rxq; 491f234e5bdSMoti Haimovsky struct sub_device *sdev; 492f234e5bdSMoti Haimovsky uint8_t i; 493f234e5bdSMoti Haimovsky int ret; 494f234e5bdSMoti Haimovsky int rc = 0; 4959e0360aeSMoti Haimovsky 496813f085fSDavid Marchand ret = fs_lock(dev, 0); 497813f085fSDavid Marchand if (ret != 0) 498813f085fSDavid Marchand return ret; 4999e0360aeSMoti Haimovsky if (idx >= dev->data->nb_rx_queues) { 500655fcd68SMatan Azrad rc = -EINVAL; 501655fcd68SMatan Azrad goto unlock; 5029e0360aeSMoti Haimovsky } 5039e0360aeSMoti Haimovsky rxq = dev->data->rx_queues[idx]; 5049e0360aeSMoti Haimovsky if (rxq == NULL || rxq->event_fd <= 0) { 505655fcd68SMatan Azrad rc = -EINVAL; 506655fcd68SMatan Azrad goto unlock; 5079e0360aeSMoti Haimovsky } 508709676bcSMoti Haimovsky /* Fail if proxy service is nor running. */ 509709676bcSMoti Haimovsky if (PRIV(dev)->rxp.sstate != SS_RUNNING) { 510709676bcSMoti Haimovsky ERROR("failsafe interrupt services are not running"); 511655fcd68SMatan Azrad rc = -EAGAIN; 512655fcd68SMatan Azrad goto unlock; 513709676bcSMoti Haimovsky } 5149e0360aeSMoti Haimovsky rxq->enable_events = 1; 515f234e5bdSMoti Haimovsky FOREACH_SUBDEV_STATE(sdev, i, dev, DEV_ACTIVE) { 516f234e5bdSMoti Haimovsky ret = rte_eth_dev_rx_intr_enable(PORT_ID(sdev), idx); 517f234e5bdSMoti Haimovsky ret = fs_err(sdev, ret); 518f234e5bdSMoti Haimovsky if (ret) 519f234e5bdSMoti Haimovsky rc = ret; 520f234e5bdSMoti Haimovsky } 521655fcd68SMatan Azrad unlock: 522655fcd68SMatan Azrad fs_unlock(dev, 0); 523f234e5bdSMoti Haimovsky if (rc) 524f234e5bdSMoti Haimovsky rte_errno = -rc; 525f234e5bdSMoti Haimovsky return rc; 5269e0360aeSMoti Haimovsky } 5279e0360aeSMoti Haimovsky 5289e0360aeSMoti Haimovsky static int 5299e0360aeSMoti Haimovsky fs_rx_intr_disable(struct rte_eth_dev *dev, uint16_t idx) 5309e0360aeSMoti Haimovsky { 5319e0360aeSMoti Haimovsky struct rxq *rxq; 532f234e5bdSMoti Haimovsky struct sub_device *sdev; 5339e0360aeSMoti Haimovsky uint64_t u64; 534f234e5bdSMoti Haimovsky uint8_t i; 535f234e5bdSMoti Haimovsky int rc = 0; 536f234e5bdSMoti Haimovsky int ret; 5379e0360aeSMoti Haimovsky 538813f085fSDavid Marchand ret = fs_lock(dev, 0); 539813f085fSDavid Marchand if (ret != 0) 540813f085fSDavid Marchand return ret; 5419e0360aeSMoti Haimovsky if (idx >= dev->data->nb_rx_queues) { 542655fcd68SMatan Azrad rc = -EINVAL; 543655fcd68SMatan Azrad goto unlock; 5449e0360aeSMoti Haimovsky } 5459e0360aeSMoti Haimovsky rxq = dev->data->rx_queues[idx]; 5469e0360aeSMoti Haimovsky if (rxq == NULL || rxq->event_fd <= 0) { 547655fcd68SMatan Azrad rc = -EINVAL; 548655fcd68SMatan Azrad goto unlock; 5499e0360aeSMoti Haimovsky } 5509e0360aeSMoti Haimovsky rxq->enable_events = 0; 551f234e5bdSMoti Haimovsky FOREACH_SUBDEV_STATE(sdev, i, dev, DEV_ACTIVE) { 552f234e5bdSMoti Haimovsky ret = rte_eth_dev_rx_intr_disable(PORT_ID(sdev), idx); 553f234e5bdSMoti Haimovsky ret = fs_err(sdev, ret); 554f234e5bdSMoti Haimovsky if (ret) 555f234e5bdSMoti Haimovsky rc = ret; 556f234e5bdSMoti Haimovsky } 5579e0360aeSMoti Haimovsky /* Clear pending events */ 5589e0360aeSMoti Haimovsky while (read(rxq->event_fd, &u64, sizeof(uint64_t)) > 0) 5599e0360aeSMoti Haimovsky ; 560655fcd68SMatan Azrad unlock: 561655fcd68SMatan Azrad fs_unlock(dev, 0); 562f234e5bdSMoti Haimovsky if (rc) 563f234e5bdSMoti Haimovsky rte_errno = -rc; 564f234e5bdSMoti Haimovsky return rc; 5659e0360aeSMoti Haimovsky } 5669e0360aeSMoti Haimovsky 567a46f8d58SGaetan Rivet static void 5687483341aSXueming Li fs_tx_queue_release(struct rte_eth_dev *dev, uint16_t qid) 569a46f8d58SGaetan Rivet { 570a46f8d58SGaetan Rivet struct sub_device *sdev; 571a46f8d58SGaetan Rivet uint8_t i; 5727483341aSXueming Li struct txq *txq = dev->data->tx_queues[qid]; 573a46f8d58SGaetan Rivet 5747483341aSXueming Li if (txq == NULL) 575a46f8d58SGaetan Rivet return; 576813f085fSDavid Marchand if (fs_lock(dev, 0) != 0) 577813f085fSDavid Marchand return; 5786b35f4d8SIgor Romanov FOREACH_SUBDEV_STATE(sdev, i, dev, DEV_ACTIVE) { 5796b35f4d8SIgor Romanov if (ETH(sdev)->data->tx_queues != NULL && 5807483341aSXueming Li ETH(sdev)->data->tx_queues[txq->qid] != NULL) 5817483341aSXueming Li SUBOPS(sdev, tx_queue_release)(ETH(sdev), txq->qid); 5826b35f4d8SIgor Romanov } 583a46f8d58SGaetan Rivet dev->data->tx_queues[txq->qid] = NULL; 584a46f8d58SGaetan Rivet rte_free(txq); 585655fcd68SMatan Azrad fs_unlock(dev, 0); 586a46f8d58SGaetan Rivet } 587a46f8d58SGaetan Rivet 588a46f8d58SGaetan Rivet static int 589a46f8d58SGaetan Rivet fs_tx_queue_setup(struct rte_eth_dev *dev, 590a46f8d58SGaetan Rivet uint16_t tx_queue_id, 591a46f8d58SGaetan Rivet uint16_t nb_tx_desc, 592a46f8d58SGaetan Rivet unsigned int socket_id, 593a46f8d58SGaetan Rivet const struct rte_eth_txconf *tx_conf) 594a46f8d58SGaetan Rivet { 595a46f8d58SGaetan Rivet struct sub_device *sdev; 596a46f8d58SGaetan Rivet struct txq *txq; 597a46f8d58SGaetan Rivet uint8_t i; 598a46f8d58SGaetan Rivet int ret; 599a46f8d58SGaetan Rivet 600813f085fSDavid Marchand ret = fs_lock(dev, 0); 601813f085fSDavid Marchand if (ret != 0) 602813f085fSDavid Marchand return ret; 603c3a210a2SIan Dolzhansky if (tx_conf->tx_deferred_start) { 604b32c9075SIan Dolzhansky FOREACH_SUBDEV_STATE(sdev, i, dev, DEV_PROBED) { 605b32c9075SIan Dolzhansky if (SUBOPS(sdev, tx_queue_start) == NULL) { 606b32c9075SIan Dolzhansky ERROR("Tx queue deferred start is not " 607b32c9075SIan Dolzhansky "supported for subdevice %d", i); 608b32c9075SIan Dolzhansky fs_unlock(dev, 0); 609c3a210a2SIan Dolzhansky return -EINVAL; 610c3a210a2SIan Dolzhansky } 611b32c9075SIan Dolzhansky } 612b32c9075SIan Dolzhansky } 613a46f8d58SGaetan Rivet txq = dev->data->tx_queues[tx_queue_id]; 614a46f8d58SGaetan Rivet if (txq != NULL) { 6157483341aSXueming Li fs_tx_queue_release(dev, tx_queue_id); 616a46f8d58SGaetan Rivet dev->data->tx_queues[tx_queue_id] = NULL; 617a46f8d58SGaetan Rivet } 618598fb8aeSGaetan Rivet txq = rte_zmalloc("ethdev TX queue", 619598fb8aeSGaetan Rivet sizeof(*txq) + 620598fb8aeSGaetan Rivet sizeof(rte_atomic64_t) * PRIV(dev)->subs_tail, 621a46f8d58SGaetan Rivet RTE_CACHE_LINE_SIZE); 622655fcd68SMatan Azrad if (txq == NULL) { 623655fcd68SMatan Azrad fs_unlock(dev, 0); 624a46f8d58SGaetan Rivet return -ENOMEM; 625655fcd68SMatan Azrad } 626598fb8aeSGaetan Rivet FOREACH_SUBDEV(sdev, i, dev) 627598fb8aeSGaetan Rivet rte_atomic64_init(&txq->refcnt[i]); 628a46f8d58SGaetan Rivet txq->qid = tx_queue_id; 629a46f8d58SGaetan Rivet txq->socket_id = socket_id; 630a46f8d58SGaetan Rivet txq->info.conf = *tx_conf; 631a46f8d58SGaetan Rivet txq->info.nb_desc = nb_tx_desc; 632a46f8d58SGaetan Rivet txq->priv = PRIV(dev); 633a46f8d58SGaetan Rivet dev->data->tx_queues[tx_queue_id] = txq; 634a46f8d58SGaetan Rivet FOREACH_SUBDEV_STATE(sdev, i, dev, DEV_ACTIVE) { 635a46f8d58SGaetan Rivet ret = rte_eth_tx_queue_setup(PORT_ID(sdev), 636a46f8d58SGaetan Rivet tx_queue_id, 637a46f8d58SGaetan Rivet nb_tx_desc, socket_id, 638a46f8d58SGaetan Rivet tx_conf); 639ae80146cSMatan Azrad if ((ret = fs_err(sdev, ret))) { 640a46f8d58SGaetan Rivet ERROR("TX queue setup failed for sub_device %d", i); 641a46f8d58SGaetan Rivet goto free_txq; 642a46f8d58SGaetan Rivet } 643a46f8d58SGaetan Rivet } 644655fcd68SMatan Azrad fs_unlock(dev, 0); 645a46f8d58SGaetan Rivet return 0; 646a46f8d58SGaetan Rivet free_txq: 6477483341aSXueming Li fs_tx_queue_release(dev, tx_queue_id); 648655fcd68SMatan Azrad fs_unlock(dev, 0); 649a46f8d58SGaetan Rivet return ret; 650a46f8d58SGaetan Rivet } 651a46f8d58SGaetan Rivet 652a46f8d58SGaetan Rivet static void 653a46f8d58SGaetan Rivet fs_dev_free_queues(struct rte_eth_dev *dev) 654a46f8d58SGaetan Rivet { 655a46f8d58SGaetan Rivet uint16_t i; 656a46f8d58SGaetan Rivet 657a46f8d58SGaetan Rivet for (i = 0; i < dev->data->nb_rx_queues; i++) { 6587483341aSXueming Li fs_rx_queue_release(dev, i); 659a46f8d58SGaetan Rivet dev->data->rx_queues[i] = NULL; 660a46f8d58SGaetan Rivet } 661a46f8d58SGaetan Rivet dev->data->nb_rx_queues = 0; 662a46f8d58SGaetan Rivet for (i = 0; i < dev->data->nb_tx_queues; i++) { 6637483341aSXueming Li fs_tx_queue_release(dev, i); 664a46f8d58SGaetan Rivet dev->data->tx_queues[i] = NULL; 665a46f8d58SGaetan Rivet } 666a46f8d58SGaetan Rivet dev->data->nb_tx_queues = 0; 667a46f8d58SGaetan Rivet } 668a46f8d58SGaetan Rivet 6691a7fa562SThomas Monjalon int 6701a7fa562SThomas Monjalon failsafe_eth_dev_close(struct rte_eth_dev *dev) 6711a7fa562SThomas Monjalon { 6721a7fa562SThomas Monjalon struct sub_device *sdev; 6731a7fa562SThomas Monjalon uint8_t i; 6748a5a0aadSThomas Monjalon int err, ret = 0; 6751a7fa562SThomas Monjalon 676813f085fSDavid Marchand ret = fs_lock(dev, 0); 677813f085fSDavid Marchand if (ret != 0) 678813f085fSDavid Marchand return ret; 6791a7fa562SThomas Monjalon failsafe_hotplug_alarm_cancel(dev); 68062024eb8SIvan Ilchenko if (PRIV(dev)->state == DEV_STARTED) { 68162024eb8SIvan Ilchenko ret = dev->dev_ops->dev_stop(dev); 68262024eb8SIvan Ilchenko if (ret != 0) { 68362024eb8SIvan Ilchenko fs_unlock(dev, 0); 68462024eb8SIvan Ilchenko return ret; 68562024eb8SIvan Ilchenko } 68662024eb8SIvan Ilchenko } 6871a7fa562SThomas Monjalon PRIV(dev)->state = DEV_ACTIVE - 1; 6881a7fa562SThomas Monjalon FOREACH_SUBDEV_STATE(sdev, i, dev, DEV_ACTIVE) { 6891a7fa562SThomas Monjalon DEBUG("Closing sub_device %d", i); 6901a7fa562SThomas Monjalon failsafe_eth_dev_unregister_callbacks(sdev); 6918a5a0aadSThomas Monjalon err = rte_eth_dev_close(PORT_ID(sdev)); 6928a5a0aadSThomas Monjalon if (err) { 6938a5a0aadSThomas Monjalon ret = ret ? ret : err; 6948a5a0aadSThomas Monjalon ERROR("Error while closing sub-device %u", 6958a5a0aadSThomas Monjalon PORT_ID(sdev)); 6968a5a0aadSThomas Monjalon } 6971a7fa562SThomas Monjalon sdev->state = DEV_ACTIVE - 1; 6981a7fa562SThomas Monjalon } 6991a7fa562SThomas Monjalon rte_eth_dev_callback_unregister(RTE_ETH_ALL, RTE_ETH_EVENT_NEW, 7001a7fa562SThomas Monjalon failsafe_eth_new_event_callback, dev); 7011a7fa562SThomas Monjalon if (rte_eal_process_type() != RTE_PROC_PRIMARY) { 7021a7fa562SThomas Monjalon fs_unlock(dev, 0); 7038a5a0aadSThomas Monjalon return ret; 7041a7fa562SThomas Monjalon } 7051a7fa562SThomas Monjalon fs_dev_free_queues(dev); 7068a5a0aadSThomas Monjalon err = failsafe_eal_uninit(dev); 7078a5a0aadSThomas Monjalon if (err) { 7088a5a0aadSThomas Monjalon ret = ret ? ret : err; 7091a7fa562SThomas Monjalon ERROR("Error while uninitializing sub-EAL"); 7108a5a0aadSThomas Monjalon } 7111a7fa562SThomas Monjalon failsafe_args_free(dev); 7121a7fa562SThomas Monjalon rte_free(PRIV(dev)->subs); 7131a7fa562SThomas Monjalon rte_free(PRIV(dev)->mcast_addrs); 7141a7fa562SThomas Monjalon /* mac_addrs must not be freed alone because part of dev_private */ 7151a7fa562SThomas Monjalon dev->data->mac_addrs = NULL; 7161a7fa562SThomas Monjalon fs_unlock(dev, 0); 7178a5a0aadSThomas Monjalon err = pthread_mutex_destroy(&PRIV(dev)->hotplug_mutex); 7188a5a0aadSThomas Monjalon if (err) { 7198a5a0aadSThomas Monjalon ret = ret ? ret : err; 7201a7fa562SThomas Monjalon ERROR("Error while destroying hotplug mutex"); 7218a5a0aadSThomas Monjalon } 7228a5a0aadSThomas Monjalon return ret; 7231a7fa562SThomas Monjalon } 7241a7fa562SThomas Monjalon 7259039c812SAndrew Rybchenko static int 726a46f8d58SGaetan Rivet fs_promiscuous_enable(struct rte_eth_dev *dev) 727a46f8d58SGaetan Rivet { 728a46f8d58SGaetan Rivet struct sub_device *sdev; 729a46f8d58SGaetan Rivet uint8_t i; 7306f6d5d21SIvan Ilchenko int ret = 0; 731a46f8d58SGaetan Rivet 732813f085fSDavid Marchand ret = fs_lock(dev, 0); 733813f085fSDavid Marchand if (ret != 0) 734813f085fSDavid Marchand return ret; 7356f6d5d21SIvan Ilchenko FOREACH_SUBDEV_STATE(sdev, i, dev, DEV_ACTIVE) { 7366f6d5d21SIvan Ilchenko ret = rte_eth_promiscuous_enable(PORT_ID(sdev)); 7376f6d5d21SIvan Ilchenko ret = fs_err(sdev, ret); 7386f6d5d21SIvan Ilchenko if (ret != 0) { 7396f6d5d21SIvan Ilchenko ERROR("Promiscuous mode enable failed for subdevice %d", 7406f6d5d21SIvan Ilchenko PORT_ID(sdev)); 7416f6d5d21SIvan Ilchenko break; 7426f6d5d21SIvan Ilchenko } 7436f6d5d21SIvan Ilchenko } 7446f6d5d21SIvan Ilchenko if (ret != 0) { 7456f6d5d21SIvan Ilchenko /* Rollback in the case of failure */ 7466f6d5d21SIvan Ilchenko FOREACH_SUBDEV_STATE(sdev, i, dev, DEV_ACTIVE) { 7476f6d5d21SIvan Ilchenko ret = rte_eth_promiscuous_disable(PORT_ID(sdev)); 7486f6d5d21SIvan Ilchenko ret = fs_err(sdev, ret); 7496f6d5d21SIvan Ilchenko if (ret != 0) 7506f6d5d21SIvan Ilchenko ERROR("Promiscuous mode disable during rollback failed for subdevice %d", 7516f6d5d21SIvan Ilchenko PORT_ID(sdev)); 7526f6d5d21SIvan Ilchenko } 7536f6d5d21SIvan Ilchenko } 754655fcd68SMatan Azrad fs_unlock(dev, 0); 7559039c812SAndrew Rybchenko 7569039c812SAndrew Rybchenko return ret; 757a46f8d58SGaetan Rivet } 758a46f8d58SGaetan Rivet 7599039c812SAndrew Rybchenko static int 760a46f8d58SGaetan Rivet fs_promiscuous_disable(struct rte_eth_dev *dev) 761a46f8d58SGaetan Rivet { 762a46f8d58SGaetan Rivet struct sub_device *sdev; 763a46f8d58SGaetan Rivet uint8_t i; 7646f6d5d21SIvan Ilchenko int ret = 0; 765a46f8d58SGaetan Rivet 766813f085fSDavid Marchand ret = fs_lock(dev, 0); 767813f085fSDavid Marchand if (ret != 0) 768813f085fSDavid Marchand return ret; 7696f6d5d21SIvan Ilchenko FOREACH_SUBDEV_STATE(sdev, i, dev, DEV_ACTIVE) { 7706f6d5d21SIvan Ilchenko ret = rte_eth_promiscuous_disable(PORT_ID(sdev)); 7716f6d5d21SIvan Ilchenko ret = fs_err(sdev, ret); 7726f6d5d21SIvan Ilchenko if (ret != 0) { 7736f6d5d21SIvan Ilchenko ERROR("Promiscuous mode disable failed for subdevice %d", 7746f6d5d21SIvan Ilchenko PORT_ID(sdev)); 7756f6d5d21SIvan Ilchenko break; 7766f6d5d21SIvan Ilchenko } 7776f6d5d21SIvan Ilchenko } 7786f6d5d21SIvan Ilchenko if (ret != 0) { 7796f6d5d21SIvan Ilchenko /* Rollback in the case of failure */ 7806f6d5d21SIvan Ilchenko FOREACH_SUBDEV_STATE(sdev, i, dev, DEV_ACTIVE) { 7816f6d5d21SIvan Ilchenko ret = rte_eth_promiscuous_enable(PORT_ID(sdev)); 7826f6d5d21SIvan Ilchenko ret = fs_err(sdev, ret); 7836f6d5d21SIvan Ilchenko if (ret != 0) 7846f6d5d21SIvan Ilchenko ERROR("Promiscuous mode enable during rollback failed for subdevice %d", 7856f6d5d21SIvan Ilchenko PORT_ID(sdev)); 7866f6d5d21SIvan Ilchenko } 7876f6d5d21SIvan Ilchenko } 788655fcd68SMatan Azrad fs_unlock(dev, 0); 7899039c812SAndrew Rybchenko 7909039c812SAndrew Rybchenko return ret; 791a46f8d58SGaetan Rivet } 792a46f8d58SGaetan Rivet 793ca041cd4SIvan Ilchenko static int 794a46f8d58SGaetan Rivet fs_allmulticast_enable(struct rte_eth_dev *dev) 795a46f8d58SGaetan Rivet { 796a46f8d58SGaetan Rivet struct sub_device *sdev; 797a46f8d58SGaetan Rivet uint8_t i; 7985bc4baf0SIvan Ilchenko int ret = 0; 799a46f8d58SGaetan Rivet 800813f085fSDavid Marchand ret = fs_lock(dev, 0); 801813f085fSDavid Marchand if (ret != 0) 802813f085fSDavid Marchand return ret; 8035bc4baf0SIvan Ilchenko FOREACH_SUBDEV_STATE(sdev, i, dev, DEV_ACTIVE) { 8045bc4baf0SIvan Ilchenko ret = rte_eth_allmulticast_enable(PORT_ID(sdev)); 8055bc4baf0SIvan Ilchenko ret = fs_err(sdev, ret); 8065bc4baf0SIvan Ilchenko if (ret != 0) { 8075bc4baf0SIvan Ilchenko ERROR("All-multicast mode enable failed for subdevice %d", 8085bc4baf0SIvan Ilchenko PORT_ID(sdev)); 8095bc4baf0SIvan Ilchenko break; 8105bc4baf0SIvan Ilchenko } 8115bc4baf0SIvan Ilchenko } 8125bc4baf0SIvan Ilchenko if (ret != 0) { 8135bc4baf0SIvan Ilchenko /* Rollback in the case of failure */ 8145bc4baf0SIvan Ilchenko FOREACH_SUBDEV_STATE(sdev, i, dev, DEV_ACTIVE) { 8155bc4baf0SIvan Ilchenko ret = rte_eth_allmulticast_disable(PORT_ID(sdev)); 8165bc4baf0SIvan Ilchenko ret = fs_err(sdev, ret); 8175bc4baf0SIvan Ilchenko if (ret != 0) 8185bc4baf0SIvan Ilchenko ERROR("All-multicast mode disable during rollback failed for subdevice %d", 8195bc4baf0SIvan Ilchenko PORT_ID(sdev)); 8205bc4baf0SIvan Ilchenko } 8215bc4baf0SIvan Ilchenko } 822655fcd68SMatan Azrad fs_unlock(dev, 0); 823ca041cd4SIvan Ilchenko 824ca041cd4SIvan Ilchenko return ret; 825a46f8d58SGaetan Rivet } 826a46f8d58SGaetan Rivet 827ca041cd4SIvan Ilchenko static int 828a46f8d58SGaetan Rivet fs_allmulticast_disable(struct rte_eth_dev *dev) 829a46f8d58SGaetan Rivet { 830a46f8d58SGaetan Rivet struct sub_device *sdev; 831a46f8d58SGaetan Rivet uint8_t i; 8325bc4baf0SIvan Ilchenko int ret = 0; 833a46f8d58SGaetan Rivet 834813f085fSDavid Marchand ret = fs_lock(dev, 0); 835813f085fSDavid Marchand if (ret != 0) 836813f085fSDavid Marchand return ret; 8375bc4baf0SIvan Ilchenko FOREACH_SUBDEV_STATE(sdev, i, dev, DEV_ACTIVE) { 8385bc4baf0SIvan Ilchenko ret = rte_eth_allmulticast_disable(PORT_ID(sdev)); 8395bc4baf0SIvan Ilchenko ret = fs_err(sdev, ret); 8405bc4baf0SIvan Ilchenko if (ret != 0) { 8415bc4baf0SIvan Ilchenko ERROR("All-multicast mode disable failed for subdevice %d", 8425bc4baf0SIvan Ilchenko PORT_ID(sdev)); 8435bc4baf0SIvan Ilchenko break; 8445bc4baf0SIvan Ilchenko } 8455bc4baf0SIvan Ilchenko } 8465bc4baf0SIvan Ilchenko if (ret != 0) { 8475bc4baf0SIvan Ilchenko /* Rollback in the case of failure */ 8485bc4baf0SIvan Ilchenko FOREACH_SUBDEV_STATE(sdev, i, dev, DEV_ACTIVE) { 8495bc4baf0SIvan Ilchenko ret = rte_eth_allmulticast_enable(PORT_ID(sdev)); 8505bc4baf0SIvan Ilchenko ret = fs_err(sdev, ret); 8515bc4baf0SIvan Ilchenko if (ret != 0) 8525bc4baf0SIvan Ilchenko ERROR("All-multicast mode enable during rollback failed for subdevice %d", 8535bc4baf0SIvan Ilchenko PORT_ID(sdev)); 8545bc4baf0SIvan Ilchenko } 8555bc4baf0SIvan Ilchenko } 856655fcd68SMatan Azrad fs_unlock(dev, 0); 857ca041cd4SIvan Ilchenko 858ca041cd4SIvan Ilchenko return ret; 859a46f8d58SGaetan Rivet } 860a46f8d58SGaetan Rivet 861a46f8d58SGaetan Rivet static int 862a46f8d58SGaetan Rivet fs_link_update(struct rte_eth_dev *dev, 863a46f8d58SGaetan Rivet int wait_to_complete) 864a46f8d58SGaetan Rivet { 865a46f8d58SGaetan Rivet struct sub_device *sdev; 866a46f8d58SGaetan Rivet uint8_t i; 867a46f8d58SGaetan Rivet int ret; 868a46f8d58SGaetan Rivet 869813f085fSDavid Marchand ret = fs_lock(dev, 0); 870813f085fSDavid Marchand if (ret != 0) 871813f085fSDavid Marchand return ret; 872a46f8d58SGaetan Rivet FOREACH_SUBDEV_STATE(sdev, i, dev, DEV_ACTIVE) { 873a46f8d58SGaetan Rivet DEBUG("Calling link_update on sub_device %d", i); 874a46f8d58SGaetan Rivet ret = (SUBOPS(sdev, link_update))(ETH(sdev), wait_to_complete); 875ae80146cSMatan Azrad if (ret && ret != -1 && sdev->remove == 0 && 876ae80146cSMatan Azrad rte_eth_dev_is_removed(PORT_ID(sdev)) == 0) { 877a46f8d58SGaetan Rivet ERROR("Link update failed for sub_device %d with error %d", 878a46f8d58SGaetan Rivet i, ret); 879655fcd68SMatan Azrad fs_unlock(dev, 0); 880a46f8d58SGaetan Rivet return ret; 881a46f8d58SGaetan Rivet } 882a46f8d58SGaetan Rivet } 883a46f8d58SGaetan Rivet if (TX_SUBDEV(dev)) { 884a46f8d58SGaetan Rivet struct rte_eth_link *l1; 885a46f8d58SGaetan Rivet struct rte_eth_link *l2; 886a46f8d58SGaetan Rivet 887a46f8d58SGaetan Rivet l1 = &dev->data->dev_link; 888a46f8d58SGaetan Rivet l2 = Ð(TX_SUBDEV(dev))->data->dev_link; 889a46f8d58SGaetan Rivet if (memcmp(l1, l2, sizeof(*l1))) { 890a46f8d58SGaetan Rivet *l1 = *l2; 891655fcd68SMatan Azrad fs_unlock(dev, 0); 892a46f8d58SGaetan Rivet return 0; 893a46f8d58SGaetan Rivet } 894a46f8d58SGaetan Rivet } 895655fcd68SMatan Azrad fs_unlock(dev, 0); 896a46f8d58SGaetan Rivet return -1; 897a46f8d58SGaetan Rivet } 898a46f8d58SGaetan Rivet 899d5b0924bSMatan Azrad static int 900a46f8d58SGaetan Rivet fs_stats_get(struct rte_eth_dev *dev, 901a46f8d58SGaetan Rivet struct rte_eth_stats *stats) 902a46f8d58SGaetan Rivet { 903ae80146cSMatan Azrad struct rte_eth_stats backup; 9046265ab51SMatan Azrad struct sub_device *sdev; 9056265ab51SMatan Azrad uint8_t i; 906d5b0924bSMatan Azrad int ret; 9076265ab51SMatan Azrad 908813f085fSDavid Marchand ret = fs_lock(dev, 0); 909813f085fSDavid Marchand if (ret != 0) 910813f085fSDavid Marchand return ret; 9116265ab51SMatan Azrad rte_memcpy(stats, &PRIV(dev)->stats_accumulator, sizeof(*stats)); 9126265ab51SMatan Azrad FOREACH_SUBDEV_STATE(sdev, i, dev, DEV_ACTIVE) { 9139dda3e33SMatan Azrad struct rte_eth_stats *snapshot = &sdev->stats_snapshot.stats; 9149dda3e33SMatan Azrad uint64_t *timestamp = &sdev->stats_snapshot.timestamp; 9159dda3e33SMatan Azrad 916ae80146cSMatan Azrad rte_memcpy(&backup, snapshot, sizeof(backup)); 9179dda3e33SMatan Azrad ret = rte_eth_stats_get(PORT_ID(sdev), snapshot); 918d5b0924bSMatan Azrad if (ret) { 919ae80146cSMatan Azrad if (!fs_err(sdev, ret)) { 920ae80146cSMatan Azrad rte_memcpy(snapshot, &backup, sizeof(backup)); 921ae80146cSMatan Azrad goto inc; 922ae80146cSMatan Azrad } 923d5b0924bSMatan Azrad ERROR("Operation rte_eth_stats_get failed for sub_device %d with error %d", 924d5b0924bSMatan Azrad i, ret); 9259dda3e33SMatan Azrad *timestamp = 0; 926655fcd68SMatan Azrad fs_unlock(dev, 0); 927d5b0924bSMatan Azrad return ret; 928d5b0924bSMatan Azrad } 9299dda3e33SMatan Azrad *timestamp = rte_rdtsc(); 930ae80146cSMatan Azrad inc: 9319dda3e33SMatan Azrad failsafe_stats_increment(stats, snapshot); 9326265ab51SMatan Azrad } 933655fcd68SMatan Azrad fs_unlock(dev, 0); 934d5b0924bSMatan Azrad return 0; 935a46f8d58SGaetan Rivet } 936a46f8d58SGaetan Rivet 9379970a9adSIgor Romanov static int 938a46f8d58SGaetan Rivet fs_stats_reset(struct rte_eth_dev *dev) 939a46f8d58SGaetan Rivet { 940a46f8d58SGaetan Rivet struct sub_device *sdev; 941a46f8d58SGaetan Rivet uint8_t i; 9429970a9adSIgor Romanov int ret; 943a46f8d58SGaetan Rivet 944813f085fSDavid Marchand ret = fs_lock(dev, 0); 945813f085fSDavid Marchand if (ret != 0) 946813f085fSDavid Marchand return ret; 9476265ab51SMatan Azrad FOREACH_SUBDEV_STATE(sdev, i, dev, DEV_ACTIVE) { 9489970a9adSIgor Romanov ret = rte_eth_stats_reset(PORT_ID(sdev)); 9499970a9adSIgor Romanov if (ret) { 9509970a9adSIgor Romanov if (!fs_err(sdev, ret)) 9519970a9adSIgor Romanov continue; 9529970a9adSIgor Romanov 9539970a9adSIgor Romanov ERROR("Operation rte_eth_stats_reset failed for sub_device %d with error %d", 9549970a9adSIgor Romanov i, ret); 9559970a9adSIgor Romanov fs_unlock(dev, 0); 9569970a9adSIgor Romanov return ret; 9579970a9adSIgor Romanov } 9586265ab51SMatan Azrad memset(&sdev->stats_snapshot, 0, sizeof(struct rte_eth_stats)); 9596265ab51SMatan Azrad } 9606265ab51SMatan Azrad memset(&PRIV(dev)->stats_accumulator, 0, sizeof(struct rte_eth_stats)); 961655fcd68SMatan Azrad fs_unlock(dev, 0); 9629970a9adSIgor Romanov 9639970a9adSIgor Romanov return 0; 964a46f8d58SGaetan Rivet } 965a46f8d58SGaetan Rivet 966938420ebSStephen Hemminger static int 967938420ebSStephen Hemminger __fs_xstats_count(struct rte_eth_dev *dev) 968938420ebSStephen Hemminger { 969938420ebSStephen Hemminger struct sub_device *sdev; 970938420ebSStephen Hemminger int count = 0; 971938420ebSStephen Hemminger uint8_t i; 972938420ebSStephen Hemminger int ret; 973938420ebSStephen Hemminger 974938420ebSStephen Hemminger FOREACH_SUBDEV_STATE(sdev, i, dev, DEV_ACTIVE) { 975938420ebSStephen Hemminger ret = rte_eth_xstats_get_names(PORT_ID(sdev), NULL, 0); 976938420ebSStephen Hemminger if (ret < 0) 977938420ebSStephen Hemminger return ret; 978938420ebSStephen Hemminger count += ret; 979938420ebSStephen Hemminger } 980938420ebSStephen Hemminger 981938420ebSStephen Hemminger return count; 982938420ebSStephen Hemminger } 983938420ebSStephen Hemminger 984938420ebSStephen Hemminger static int 985938420ebSStephen Hemminger __fs_xstats_get_names(struct rte_eth_dev *dev, 986938420ebSStephen Hemminger struct rte_eth_xstat_name *xstats_names, 987938420ebSStephen Hemminger unsigned int limit) 988938420ebSStephen Hemminger { 989938420ebSStephen Hemminger struct sub_device *sdev; 990938420ebSStephen Hemminger unsigned int count = 0; 991938420ebSStephen Hemminger uint8_t i; 992938420ebSStephen Hemminger 993938420ebSStephen Hemminger /* Caller only cares about count */ 994938420ebSStephen Hemminger if (!xstats_names) 995938420ebSStephen Hemminger return __fs_xstats_count(dev); 996938420ebSStephen Hemminger 997938420ebSStephen Hemminger FOREACH_SUBDEV_STATE(sdev, i, dev, DEV_ACTIVE) { 998938420ebSStephen Hemminger struct rte_eth_xstat_name *sub_names = xstats_names + count; 999938420ebSStephen Hemminger int j, r; 1000938420ebSStephen Hemminger 1001938420ebSStephen Hemminger if (count >= limit) 1002938420ebSStephen Hemminger break; 1003938420ebSStephen Hemminger 1004938420ebSStephen Hemminger r = rte_eth_xstats_get_names(PORT_ID(sdev), 1005938420ebSStephen Hemminger sub_names, limit - count); 1006938420ebSStephen Hemminger if (r < 0) 1007938420ebSStephen Hemminger return r; 1008938420ebSStephen Hemminger 1009938420ebSStephen Hemminger /* add subN_ prefix to names */ 1010938420ebSStephen Hemminger for (j = 0; j < r; j++) { 1011938420ebSStephen Hemminger char *xname = sub_names[j].name; 1012938420ebSStephen Hemminger char tmp[RTE_ETH_XSTATS_NAME_SIZE]; 1013938420ebSStephen Hemminger 1014938420ebSStephen Hemminger if ((xname[0] == 't' || xname[0] == 'r') && 1015938420ebSStephen Hemminger xname[1] == 'x' && xname[2] == '_') 1016938420ebSStephen Hemminger snprintf(tmp, sizeof(tmp), "%.3ssub%u_%s", 1017938420ebSStephen Hemminger xname, i, xname + 3); 1018938420ebSStephen Hemminger else 1019938420ebSStephen Hemminger snprintf(tmp, sizeof(tmp), "sub%u_%s", 1020938420ebSStephen Hemminger i, xname); 1021938420ebSStephen Hemminger 1022938420ebSStephen Hemminger strlcpy(xname, tmp, RTE_ETH_XSTATS_NAME_SIZE); 1023938420ebSStephen Hemminger } 1024938420ebSStephen Hemminger count += r; 1025938420ebSStephen Hemminger } 1026938420ebSStephen Hemminger return count; 1027938420ebSStephen Hemminger } 1028938420ebSStephen Hemminger 1029938420ebSStephen Hemminger static int 1030938420ebSStephen Hemminger fs_xstats_get_names(struct rte_eth_dev *dev, 1031938420ebSStephen Hemminger struct rte_eth_xstat_name *xstats_names, 1032938420ebSStephen Hemminger unsigned int limit) 1033938420ebSStephen Hemminger { 1034938420ebSStephen Hemminger int ret; 1035938420ebSStephen Hemminger 1036813f085fSDavid Marchand ret = fs_lock(dev, 0); 1037813f085fSDavid Marchand if (ret != 0) 1038813f085fSDavid Marchand return ret; 1039938420ebSStephen Hemminger ret = __fs_xstats_get_names(dev, xstats_names, limit); 1040938420ebSStephen Hemminger fs_unlock(dev, 0); 1041938420ebSStephen Hemminger return ret; 1042938420ebSStephen Hemminger } 1043938420ebSStephen Hemminger 1044938420ebSStephen Hemminger static int 1045938420ebSStephen Hemminger __fs_xstats_get(struct rte_eth_dev *dev, 1046938420ebSStephen Hemminger struct rte_eth_xstat *xstats, 1047938420ebSStephen Hemminger unsigned int n) 1048938420ebSStephen Hemminger { 1049938420ebSStephen Hemminger unsigned int count = 0; 1050938420ebSStephen Hemminger struct sub_device *sdev; 1051938420ebSStephen Hemminger uint8_t i; 1052938420ebSStephen Hemminger int j, ret; 1053938420ebSStephen Hemminger 1054938420ebSStephen Hemminger ret = __fs_xstats_count(dev); 1055938420ebSStephen Hemminger /* 1056938420ebSStephen Hemminger * if error 1057938420ebSStephen Hemminger * or caller did not give enough space 1058938420ebSStephen Hemminger * or just querying 1059938420ebSStephen Hemminger */ 1060938420ebSStephen Hemminger if (ret < 0 || ret > (int)n || xstats == NULL) 1061938420ebSStephen Hemminger return ret; 1062938420ebSStephen Hemminger 1063938420ebSStephen Hemminger FOREACH_SUBDEV_STATE(sdev, i, dev, DEV_ACTIVE) { 1064938420ebSStephen Hemminger ret = rte_eth_xstats_get(PORT_ID(sdev), xstats, n); 1065938420ebSStephen Hemminger if (ret < 0) 1066938420ebSStephen Hemminger return ret; 1067938420ebSStephen Hemminger 1068938420ebSStephen Hemminger if (ret > (int)n) 1069938420ebSStephen Hemminger return n + count; 1070938420ebSStephen Hemminger 1071938420ebSStephen Hemminger /* add offset to id's from sub-device */ 1072938420ebSStephen Hemminger for (j = 0; j < ret; j++) 1073938420ebSStephen Hemminger xstats[j].id += count; 1074938420ebSStephen Hemminger 1075938420ebSStephen Hemminger xstats += ret; 1076938420ebSStephen Hemminger n -= ret; 1077938420ebSStephen Hemminger count += ret; 1078938420ebSStephen Hemminger } 1079938420ebSStephen Hemminger 1080938420ebSStephen Hemminger return count; 1081938420ebSStephen Hemminger } 1082938420ebSStephen Hemminger 1083938420ebSStephen Hemminger static int 1084938420ebSStephen Hemminger fs_xstats_get(struct rte_eth_dev *dev, 1085938420ebSStephen Hemminger struct rte_eth_xstat *xstats, 1086938420ebSStephen Hemminger unsigned int n) 1087938420ebSStephen Hemminger { 1088938420ebSStephen Hemminger int ret; 1089938420ebSStephen Hemminger 1090813f085fSDavid Marchand ret = fs_lock(dev, 0); 1091813f085fSDavid Marchand if (ret != 0) 1092813f085fSDavid Marchand return ret; 1093938420ebSStephen Hemminger ret = __fs_xstats_get(dev, xstats, n); 1094938420ebSStephen Hemminger fs_unlock(dev, 0); 1095938420ebSStephen Hemminger 1096938420ebSStephen Hemminger return ret; 1097938420ebSStephen Hemminger } 1098938420ebSStephen Hemminger 1099938420ebSStephen Hemminger 1100938420ebSStephen Hemminger static int 1101938420ebSStephen Hemminger fs_xstats_reset(struct rte_eth_dev *dev) 1102938420ebSStephen Hemminger { 1103938420ebSStephen Hemminger struct sub_device *sdev; 1104938420ebSStephen Hemminger uint8_t i; 1105813f085fSDavid Marchand int r; 1106938420ebSStephen Hemminger 1107813f085fSDavid Marchand r = fs_lock(dev, 0); 1108813f085fSDavid Marchand if (r != 0) 1109813f085fSDavid Marchand return r; 1110938420ebSStephen Hemminger FOREACH_SUBDEV_STATE(sdev, i, dev, DEV_ACTIVE) { 1111938420ebSStephen Hemminger r = rte_eth_xstats_reset(PORT_ID(sdev)); 1112938420ebSStephen Hemminger if (r < 0) 1113938420ebSStephen Hemminger break; 1114938420ebSStephen Hemminger } 1115938420ebSStephen Hemminger fs_unlock(dev, 0); 1116938420ebSStephen Hemminger 1117938420ebSStephen Hemminger return r; 1118938420ebSStephen Hemminger } 1119938420ebSStephen Hemminger 11204586be37SStephen Hemminger static void 11214586be37SStephen Hemminger fs_dev_merge_desc_lim(struct rte_eth_desc_lim *to, 11224586be37SStephen Hemminger const struct rte_eth_desc_lim *from) 11234586be37SStephen Hemminger { 11244586be37SStephen Hemminger to->nb_max = RTE_MIN(to->nb_max, from->nb_max); 11254586be37SStephen Hemminger to->nb_min = RTE_MAX(to->nb_min, from->nb_min); 11264586be37SStephen Hemminger to->nb_align = RTE_MAX(to->nb_align, from->nb_align); 11274586be37SStephen Hemminger 11284586be37SStephen Hemminger to->nb_seg_max = RTE_MIN(to->nb_seg_max, from->nb_seg_max); 11294586be37SStephen Hemminger to->nb_mtu_seg_max = RTE_MIN(to->nb_mtu_seg_max, from->nb_mtu_seg_max); 11304586be37SStephen Hemminger } 11314586be37SStephen Hemminger 11324586be37SStephen Hemminger /* 11334586be37SStephen Hemminger * Merge the information from sub-devices. 11344586be37SStephen Hemminger * 11354586be37SStephen Hemminger * The reported values must be the common subset of all sub devices 11364586be37SStephen Hemminger */ 11374586be37SStephen Hemminger static void 11384586be37SStephen Hemminger fs_dev_merge_info(struct rte_eth_dev_info *info, 11394586be37SStephen Hemminger const struct rte_eth_dev_info *sinfo) 11404586be37SStephen Hemminger { 1141c0396a48SAndrew Rybchenko info->min_mtu = RTE_MAX(info->min_mtu, sinfo->min_mtu); 1142c0396a48SAndrew Rybchenko info->max_mtu = RTE_MIN(info->max_mtu, sinfo->max_mtu); 11434586be37SStephen Hemminger info->max_rx_pktlen = RTE_MIN(info->max_rx_pktlen, sinfo->max_rx_pktlen); 11444586be37SStephen Hemminger info->max_rx_queues = RTE_MIN(info->max_rx_queues, sinfo->max_rx_queues); 11454586be37SStephen Hemminger info->max_tx_queues = RTE_MIN(info->max_tx_queues, sinfo->max_tx_queues); 11464586be37SStephen Hemminger info->max_mac_addrs = RTE_MIN(info->max_mac_addrs, sinfo->max_mac_addrs); 11474586be37SStephen Hemminger info->max_hash_mac_addrs = RTE_MIN(info->max_hash_mac_addrs, 11484586be37SStephen Hemminger sinfo->max_hash_mac_addrs); 11494586be37SStephen Hemminger info->max_vmdq_pools = RTE_MIN(info->max_vmdq_pools, sinfo->max_vmdq_pools); 11504586be37SStephen Hemminger info->max_vfs = RTE_MIN(info->max_vfs, sinfo->max_vfs); 11514586be37SStephen Hemminger 11524586be37SStephen Hemminger fs_dev_merge_desc_lim(&info->rx_desc_lim, &sinfo->rx_desc_lim); 11534586be37SStephen Hemminger fs_dev_merge_desc_lim(&info->tx_desc_lim, &sinfo->tx_desc_lim); 11544586be37SStephen Hemminger 11554586be37SStephen Hemminger info->rx_offload_capa &= sinfo->rx_offload_capa; 11564586be37SStephen Hemminger info->tx_offload_capa &= sinfo->tx_offload_capa; 11574586be37SStephen Hemminger info->rx_queue_offload_capa &= sinfo->rx_queue_offload_capa; 11584586be37SStephen Hemminger info->tx_queue_offload_capa &= sinfo->tx_queue_offload_capa; 11594586be37SStephen Hemminger info->flow_type_rss_offloads &= sinfo->flow_type_rss_offloads; 11606e8bdf72SIan Dolzhansky 11616e8bdf72SIan Dolzhansky /* 11626e8bdf72SIan Dolzhansky * RETA size is a GCD of RETA sizes indicated by sub-devices. 11636e8bdf72SIan Dolzhansky * Each of these sizes is a power of 2, so use the lower one. 11646e8bdf72SIan Dolzhansky */ 11656e8bdf72SIan Dolzhansky info->reta_size = RTE_MIN(info->reta_size, sinfo->reta_size); 11666e8bdf72SIan Dolzhansky 1167f9dd7539SRaslan Darawsheh info->hash_key_size = RTE_MIN(info->hash_key_size, 1168f9dd7539SRaslan Darawsheh sinfo->hash_key_size); 11694586be37SStephen Hemminger } 11704586be37SStephen Hemminger 1171a46f8d58SGaetan Rivet /** 1172a46f8d58SGaetan Rivet * Fail-safe dev_infos_get rules: 1173a46f8d58SGaetan Rivet * 1174a46f8d58SGaetan Rivet * No sub_device: 1175a46f8d58SGaetan Rivet * Numerables: 1176a46f8d58SGaetan Rivet * Use the maximum possible values for any field, so as not 1177a46f8d58SGaetan Rivet * to impede any further configuration effort. 1178a46f8d58SGaetan Rivet * Capabilities: 1179a46f8d58SGaetan Rivet * Limits capabilities to those that are understood by the 1180a46f8d58SGaetan Rivet * fail-safe PMD. This understanding stems from the fail-safe 1181a46f8d58SGaetan Rivet * being capable of verifying that the related capability is 1182a46f8d58SGaetan Rivet * expressed within the device configuration (struct rte_eth_conf). 1183a46f8d58SGaetan Rivet * 1184a46f8d58SGaetan Rivet * At least one probed sub_device: 1185a46f8d58SGaetan Rivet * Numerables: 1186a46f8d58SGaetan Rivet * Uses values from the active probed sub_device 1187a46f8d58SGaetan Rivet * The rationale here is that if any sub_device is less capable 1188a46f8d58SGaetan Rivet * (for example concerning the number of queues) than the active 1189a46f8d58SGaetan Rivet * sub_device, then its subsequent configuration will fail. 1190a46f8d58SGaetan Rivet * It is impossible to foresee this failure when the failing sub_device 1191a46f8d58SGaetan Rivet * is supposed to be plugged-in later on, so the configuration process 1192a46f8d58SGaetan Rivet * is the single point of failure and error reporting. 1193a46f8d58SGaetan Rivet * Capabilities: 1194a46f8d58SGaetan Rivet * Uses a logical AND of RX capabilities among 1195a46f8d58SGaetan Rivet * all sub_devices and the default capabilities. 1196a46f8d58SGaetan Rivet * Uses a logical AND of TX capabilities among 1197a46f8d58SGaetan Rivet * the active probed sub_device and the default capabilities. 11984e31ee26SIgor Romanov * Uses a logical AND of device capabilities among 11994e31ee26SIgor Romanov * all sub_devices and the default capabilities. 1200a46f8d58SGaetan Rivet * 1201a46f8d58SGaetan Rivet */ 1202bdad90d1SIvan Ilchenko static int 1203a46f8d58SGaetan Rivet fs_dev_infos_get(struct rte_eth_dev *dev, 1204a46f8d58SGaetan Rivet struct rte_eth_dev_info *infos) 1205a46f8d58SGaetan Rivet { 1206a46f8d58SGaetan Rivet struct sub_device *sdev; 1207a46f8d58SGaetan Rivet uint8_t i; 12082cd12214SIvan Ilchenko int ret; 1209a46f8d58SGaetan Rivet 12104586be37SStephen Hemminger /* Use maximum upper bounds by default */ 1211c0396a48SAndrew Rybchenko infos->min_mtu = RTE_ETHER_MIN_MTU; 1212c0396a48SAndrew Rybchenko infos->max_mtu = UINT16_MAX; 12134586be37SStephen Hemminger infos->max_rx_pktlen = UINT32_MAX; 12144586be37SStephen Hemminger infos->max_rx_queues = RTE_MAX_QUEUES_PER_PORT; 12154586be37SStephen Hemminger infos->max_tx_queues = RTE_MAX_QUEUES_PER_PORT; 12164586be37SStephen Hemminger infos->max_mac_addrs = FAILSAFE_MAX_ETHADDR; 12174586be37SStephen Hemminger infos->max_hash_mac_addrs = UINT32_MAX; 12184586be37SStephen Hemminger infos->max_vfs = UINT16_MAX; 12194586be37SStephen Hemminger infos->max_vmdq_pools = UINT16_MAX; 12206e8bdf72SIan Dolzhansky infos->reta_size = UINT16_MAX; 1221f9dd7539SRaslan Darawsheh infos->hash_key_size = UINT8_MAX; 1222a46f8d58SGaetan Rivet 12234586be37SStephen Hemminger /* 12244586be37SStephen Hemminger * Set of capabilities that can be verified upon 12254586be37SStephen Hemminger * configuring a sub-device. 12264586be37SStephen Hemminger */ 12274586be37SStephen Hemminger infos->rx_offload_capa = 1228295968d1SFerruh Yigit RTE_ETH_RX_OFFLOAD_VLAN_STRIP | 1229295968d1SFerruh Yigit RTE_ETH_RX_OFFLOAD_IPV4_CKSUM | 1230295968d1SFerruh Yigit RTE_ETH_RX_OFFLOAD_UDP_CKSUM | 1231295968d1SFerruh Yigit RTE_ETH_RX_OFFLOAD_TCP_CKSUM | 1232295968d1SFerruh Yigit RTE_ETH_RX_OFFLOAD_TCP_LRO | 1233295968d1SFerruh Yigit RTE_ETH_RX_OFFLOAD_QINQ_STRIP | 1234295968d1SFerruh Yigit RTE_ETH_RX_OFFLOAD_OUTER_IPV4_CKSUM | 1235295968d1SFerruh Yigit RTE_ETH_RX_OFFLOAD_MACSEC_STRIP | 1236295968d1SFerruh Yigit RTE_ETH_RX_OFFLOAD_VLAN_FILTER | 1237295968d1SFerruh Yigit RTE_ETH_RX_OFFLOAD_VLAN_EXTEND | 1238295968d1SFerruh Yigit RTE_ETH_RX_OFFLOAD_SCATTER | 1239295968d1SFerruh Yigit RTE_ETH_RX_OFFLOAD_TIMESTAMP | 1240295968d1SFerruh Yigit RTE_ETH_RX_OFFLOAD_SECURITY | 1241295968d1SFerruh Yigit RTE_ETH_RX_OFFLOAD_RSS_HASH; 12424586be37SStephen Hemminger 12434586be37SStephen Hemminger infos->rx_queue_offload_capa = 1244295968d1SFerruh Yigit RTE_ETH_RX_OFFLOAD_VLAN_STRIP | 1245295968d1SFerruh Yigit RTE_ETH_RX_OFFLOAD_IPV4_CKSUM | 1246295968d1SFerruh Yigit RTE_ETH_RX_OFFLOAD_UDP_CKSUM | 1247295968d1SFerruh Yigit RTE_ETH_RX_OFFLOAD_TCP_CKSUM | 1248295968d1SFerruh Yigit RTE_ETH_RX_OFFLOAD_TCP_LRO | 1249295968d1SFerruh Yigit RTE_ETH_RX_OFFLOAD_QINQ_STRIP | 1250295968d1SFerruh Yigit RTE_ETH_RX_OFFLOAD_OUTER_IPV4_CKSUM | 1251295968d1SFerruh Yigit RTE_ETH_RX_OFFLOAD_MACSEC_STRIP | 1252295968d1SFerruh Yigit RTE_ETH_RX_OFFLOAD_VLAN_FILTER | 1253295968d1SFerruh Yigit RTE_ETH_RX_OFFLOAD_VLAN_EXTEND | 1254295968d1SFerruh Yigit RTE_ETH_RX_OFFLOAD_SCATTER | 1255295968d1SFerruh Yigit RTE_ETH_RX_OFFLOAD_TIMESTAMP | 1256295968d1SFerruh Yigit RTE_ETH_RX_OFFLOAD_SECURITY | 1257295968d1SFerruh Yigit RTE_ETH_RX_OFFLOAD_RSS_HASH; 12584586be37SStephen Hemminger 12594586be37SStephen Hemminger infos->tx_offload_capa = 1260295968d1SFerruh Yigit RTE_ETH_TX_OFFLOAD_MULTI_SEGS | 1261295968d1SFerruh Yigit RTE_ETH_TX_OFFLOAD_MBUF_FAST_FREE | 1262295968d1SFerruh Yigit RTE_ETH_TX_OFFLOAD_IPV4_CKSUM | 1263295968d1SFerruh Yigit RTE_ETH_TX_OFFLOAD_UDP_CKSUM | 1264295968d1SFerruh Yigit RTE_ETH_TX_OFFLOAD_TCP_CKSUM | 1265295968d1SFerruh Yigit RTE_ETH_TX_OFFLOAD_TCP_TSO; 12664586be37SStephen Hemminger 12674586be37SStephen Hemminger infos->flow_type_rss_offloads = 1268295968d1SFerruh Yigit RTE_ETH_RSS_IP | 1269295968d1SFerruh Yigit RTE_ETH_RSS_UDP | 1270295968d1SFerruh Yigit RTE_ETH_RSS_TCP; 12714586be37SStephen Hemminger infos->dev_capa = 12724586be37SStephen Hemminger RTE_ETH_DEV_CAPA_RUNTIME_RX_QUEUE_SETUP | 12734586be37SStephen Hemminger RTE_ETH_DEV_CAPA_RUNTIME_TX_QUEUE_SETUP; 12742fe6f1b7SDmitry Kozlyuk infos->dev_capa &= ~RTE_ETH_DEV_CAPA_FLOW_RULE_KEEP; 12754586be37SStephen Hemminger 1276a46f8d58SGaetan Rivet FOREACH_SUBDEV_STATE(sdev, i, dev, DEV_PROBED) { 12774586be37SStephen Hemminger struct rte_eth_dev_info sub_info; 12784586be37SStephen Hemminger 12792cd12214SIvan Ilchenko ret = rte_eth_dev_info_get(PORT_ID(sdev), &sub_info); 12802cd12214SIvan Ilchenko ret = fs_err(sdev, ret); 12812cd12214SIvan Ilchenko if (ret != 0) 1282bdad90d1SIvan Ilchenko return ret; 12834586be37SStephen Hemminger 12844586be37SStephen Hemminger fs_dev_merge_info(infos, &sub_info); 1285a46f8d58SGaetan Rivet } 1286bdad90d1SIvan Ilchenko 1287bdad90d1SIvan Ilchenko return 0; 1288a46f8d58SGaetan Rivet } 1289a46f8d58SGaetan Rivet 1290a46f8d58SGaetan Rivet static const uint32_t * 1291ba6a168aSSivaramakrishnan Venkat fs_dev_supported_ptypes_get(struct rte_eth_dev *dev, size_t *no_of_elements) 1292a46f8d58SGaetan Rivet { 1293a46f8d58SGaetan Rivet struct sub_device *sdev; 1294a46f8d58SGaetan Rivet struct rte_eth_dev *edev; 1295655fcd68SMatan Azrad const uint32_t *ret; 1296a46f8d58SGaetan Rivet 1297813f085fSDavid Marchand if (fs_lock(dev, 0) != 0) 1298813f085fSDavid Marchand return NULL; 1299a46f8d58SGaetan Rivet sdev = TX_SUBDEV(dev); 1300655fcd68SMatan Azrad if (sdev == NULL) { 1301655fcd68SMatan Azrad ret = NULL; 1302655fcd68SMatan Azrad goto unlock; 1303655fcd68SMatan Azrad } 1304a46f8d58SGaetan Rivet edev = ETH(sdev); 1305a46f8d58SGaetan Rivet /* ENOTSUP: counts as no supported ptypes */ 1306655fcd68SMatan Azrad if (SUBOPS(sdev, dev_supported_ptypes_get) == NULL) { 1307655fcd68SMatan Azrad ret = NULL; 1308655fcd68SMatan Azrad goto unlock; 1309655fcd68SMatan Azrad } 1310a46f8d58SGaetan Rivet /* 1311a46f8d58SGaetan Rivet * The API does not permit to do a clean AND of all ptypes, 1312a46f8d58SGaetan Rivet * It is also incomplete by design and we do not really care 1313a46f8d58SGaetan Rivet * to have a best possible value in this context. 1314a46f8d58SGaetan Rivet * We just return the ptypes of the device of highest 1315a46f8d58SGaetan Rivet * priority, usually the PREFERRED device. 1316a46f8d58SGaetan Rivet */ 1317ba6a168aSSivaramakrishnan Venkat ret = SUBOPS(sdev, dev_supported_ptypes_get)(edev, no_of_elements); 1318655fcd68SMatan Azrad unlock: 1319655fcd68SMatan Azrad fs_unlock(dev, 0); 1320655fcd68SMatan Azrad return ret; 1321a46f8d58SGaetan Rivet } 1322a46f8d58SGaetan Rivet 1323a46f8d58SGaetan Rivet static int 1324a46f8d58SGaetan Rivet fs_mtu_set(struct rte_eth_dev *dev, uint16_t mtu) 1325a46f8d58SGaetan Rivet { 1326a46f8d58SGaetan Rivet struct sub_device *sdev; 1327a46f8d58SGaetan Rivet uint8_t i; 1328a46f8d58SGaetan Rivet int ret; 1329a46f8d58SGaetan Rivet 1330813f085fSDavid Marchand ret = fs_lock(dev, 0); 1331813f085fSDavid Marchand if (ret != 0) 1332813f085fSDavid Marchand return ret; 1333a46f8d58SGaetan Rivet FOREACH_SUBDEV_STATE(sdev, i, dev, DEV_ACTIVE) { 1334a46f8d58SGaetan Rivet DEBUG("Calling rte_eth_dev_set_mtu on sub_device %d", i); 1335a46f8d58SGaetan Rivet ret = rte_eth_dev_set_mtu(PORT_ID(sdev), mtu); 1336ae80146cSMatan Azrad if ((ret = fs_err(sdev, ret))) { 1337a46f8d58SGaetan Rivet ERROR("Operation rte_eth_dev_set_mtu failed for sub_device %d with error %d", 1338a46f8d58SGaetan Rivet i, ret); 1339655fcd68SMatan Azrad fs_unlock(dev, 0); 1340a46f8d58SGaetan Rivet return ret; 1341a46f8d58SGaetan Rivet } 1342a46f8d58SGaetan Rivet } 1343655fcd68SMatan Azrad fs_unlock(dev, 0); 1344a46f8d58SGaetan Rivet return 0; 1345a46f8d58SGaetan Rivet } 1346a46f8d58SGaetan Rivet 1347a46f8d58SGaetan Rivet static int 1348a46f8d58SGaetan Rivet fs_vlan_filter_set(struct rte_eth_dev *dev, uint16_t vlan_id, int on) 1349a46f8d58SGaetan Rivet { 1350a46f8d58SGaetan Rivet struct sub_device *sdev; 1351a46f8d58SGaetan Rivet uint8_t i; 1352a46f8d58SGaetan Rivet int ret; 1353a46f8d58SGaetan Rivet 1354813f085fSDavid Marchand ret = fs_lock(dev, 0); 1355813f085fSDavid Marchand if (ret != 0) 1356813f085fSDavid Marchand return ret; 1357a46f8d58SGaetan Rivet FOREACH_SUBDEV_STATE(sdev, i, dev, DEV_ACTIVE) { 1358a46f8d58SGaetan Rivet DEBUG("Calling rte_eth_dev_vlan_filter on sub_device %d", i); 1359a46f8d58SGaetan Rivet ret = rte_eth_dev_vlan_filter(PORT_ID(sdev), vlan_id, on); 1360ae80146cSMatan Azrad if ((ret = fs_err(sdev, ret))) { 1361a46f8d58SGaetan Rivet ERROR("Operation rte_eth_dev_vlan_filter failed for sub_device %d" 1362a46f8d58SGaetan Rivet " with error %d", i, ret); 1363655fcd68SMatan Azrad fs_unlock(dev, 0); 1364a46f8d58SGaetan Rivet return ret; 1365a46f8d58SGaetan Rivet } 1366a46f8d58SGaetan Rivet } 1367655fcd68SMatan Azrad fs_unlock(dev, 0); 1368a46f8d58SGaetan Rivet return 0; 1369a46f8d58SGaetan Rivet } 1370a46f8d58SGaetan Rivet 1371a46f8d58SGaetan Rivet static int 1372a46f8d58SGaetan Rivet fs_flow_ctrl_get(struct rte_eth_dev *dev, 1373a46f8d58SGaetan Rivet struct rte_eth_fc_conf *fc_conf) 1374a46f8d58SGaetan Rivet { 1375a46f8d58SGaetan Rivet struct sub_device *sdev; 1376655fcd68SMatan Azrad int ret; 1377a46f8d58SGaetan Rivet 1378813f085fSDavid Marchand ret = fs_lock(dev, 0); 1379813f085fSDavid Marchand if (ret != 0) 1380813f085fSDavid Marchand return ret; 1381a46f8d58SGaetan Rivet sdev = TX_SUBDEV(dev); 1382655fcd68SMatan Azrad if (sdev == NULL) { 1383655fcd68SMatan Azrad ret = 0; 1384655fcd68SMatan Azrad goto unlock; 1385655fcd68SMatan Azrad } 1386655fcd68SMatan Azrad if (SUBOPS(sdev, flow_ctrl_get) == NULL) { 1387655fcd68SMatan Azrad ret = -ENOTSUP; 1388655fcd68SMatan Azrad goto unlock; 1389655fcd68SMatan Azrad } 1390655fcd68SMatan Azrad ret = SUBOPS(sdev, flow_ctrl_get)(ETH(sdev), fc_conf); 1391655fcd68SMatan Azrad unlock: 1392655fcd68SMatan Azrad fs_unlock(dev, 0); 1393655fcd68SMatan Azrad return ret; 1394a46f8d58SGaetan Rivet } 1395a46f8d58SGaetan Rivet 1396a46f8d58SGaetan Rivet static int 1397a46f8d58SGaetan Rivet fs_flow_ctrl_set(struct rte_eth_dev *dev, 1398a46f8d58SGaetan Rivet struct rte_eth_fc_conf *fc_conf) 1399a46f8d58SGaetan Rivet { 1400a46f8d58SGaetan Rivet struct sub_device *sdev; 1401a46f8d58SGaetan Rivet uint8_t i; 1402a46f8d58SGaetan Rivet int ret; 1403a46f8d58SGaetan Rivet 1404813f085fSDavid Marchand ret = fs_lock(dev, 0); 1405813f085fSDavid Marchand if (ret != 0) 1406813f085fSDavid Marchand return ret; 1407a46f8d58SGaetan Rivet FOREACH_SUBDEV_STATE(sdev, i, dev, DEV_ACTIVE) { 1408a46f8d58SGaetan Rivet DEBUG("Calling rte_eth_dev_flow_ctrl_set on sub_device %d", i); 1409a46f8d58SGaetan Rivet ret = rte_eth_dev_flow_ctrl_set(PORT_ID(sdev), fc_conf); 1410ae80146cSMatan Azrad if ((ret = fs_err(sdev, ret))) { 1411a46f8d58SGaetan Rivet ERROR("Operation rte_eth_dev_flow_ctrl_set failed for sub_device %d" 1412a46f8d58SGaetan Rivet " with error %d", i, ret); 1413655fcd68SMatan Azrad fs_unlock(dev, 0); 1414a46f8d58SGaetan Rivet return ret; 1415a46f8d58SGaetan Rivet } 1416a46f8d58SGaetan Rivet } 1417655fcd68SMatan Azrad fs_unlock(dev, 0); 1418a46f8d58SGaetan Rivet return 0; 1419a46f8d58SGaetan Rivet } 1420a46f8d58SGaetan Rivet 1421a46f8d58SGaetan Rivet static void 1422a46f8d58SGaetan Rivet fs_mac_addr_remove(struct rte_eth_dev *dev, uint32_t index) 1423a46f8d58SGaetan Rivet { 1424a46f8d58SGaetan Rivet struct sub_device *sdev; 1425a46f8d58SGaetan Rivet uint8_t i; 1426a46f8d58SGaetan Rivet 1427813f085fSDavid Marchand if (fs_lock(dev, 0) != 0) 1428813f085fSDavid Marchand return; 1429a46f8d58SGaetan Rivet /* No check: already done within the rte_eth_dev_mac_addr_remove 1430a46f8d58SGaetan Rivet * call for the fail-safe device. 1431a46f8d58SGaetan Rivet */ 1432a46f8d58SGaetan Rivet FOREACH_SUBDEV_STATE(sdev, i, dev, DEV_ACTIVE) 1433a46f8d58SGaetan Rivet rte_eth_dev_mac_addr_remove(PORT_ID(sdev), 1434a46f8d58SGaetan Rivet &dev->data->mac_addrs[index]); 1435a46f8d58SGaetan Rivet PRIV(dev)->mac_addr_pool[index] = 0; 1436655fcd68SMatan Azrad fs_unlock(dev, 0); 1437a46f8d58SGaetan Rivet } 1438a46f8d58SGaetan Rivet 1439a46f8d58SGaetan Rivet static int 1440a46f8d58SGaetan Rivet fs_mac_addr_add(struct rte_eth_dev *dev, 14416d13ea8eSOlivier Matz struct rte_ether_addr *mac_addr, 1442a46f8d58SGaetan Rivet uint32_t index, 1443a46f8d58SGaetan Rivet uint32_t vmdq) 1444a46f8d58SGaetan Rivet { 1445a46f8d58SGaetan Rivet struct sub_device *sdev; 1446a46f8d58SGaetan Rivet int ret; 1447a46f8d58SGaetan Rivet uint8_t i; 1448a46f8d58SGaetan Rivet 1449a46f8d58SGaetan Rivet RTE_ASSERT(index < FAILSAFE_MAX_ETHADDR); 1450813f085fSDavid Marchand ret = fs_lock(dev, 0); 1451813f085fSDavid Marchand if (ret != 0) 1452813f085fSDavid Marchand return ret; 1453a46f8d58SGaetan Rivet FOREACH_SUBDEV_STATE(sdev, i, dev, DEV_ACTIVE) { 1454a46f8d58SGaetan Rivet ret = rte_eth_dev_mac_addr_add(PORT_ID(sdev), mac_addr, vmdq); 1455ae80146cSMatan Azrad if ((ret = fs_err(sdev, ret))) { 1456a46f8d58SGaetan Rivet ERROR("Operation rte_eth_dev_mac_addr_add failed for sub_device %" 1457a46f8d58SGaetan Rivet PRIu8 " with error %d", i, ret); 1458655fcd68SMatan Azrad fs_unlock(dev, 0); 1459a46f8d58SGaetan Rivet return ret; 1460a46f8d58SGaetan Rivet } 1461a46f8d58SGaetan Rivet } 1462a46f8d58SGaetan Rivet if (index >= PRIV(dev)->nb_mac_addr) { 1463a46f8d58SGaetan Rivet DEBUG("Growing mac_addrs array"); 1464a46f8d58SGaetan Rivet PRIV(dev)->nb_mac_addr = index; 1465a46f8d58SGaetan Rivet } 1466a46f8d58SGaetan Rivet PRIV(dev)->mac_addr_pool[index] = vmdq; 1467655fcd68SMatan Azrad fs_unlock(dev, 0); 1468a46f8d58SGaetan Rivet return 0; 1469a46f8d58SGaetan Rivet } 1470a46f8d58SGaetan Rivet 1471caccf8b3SOlivier Matz static int 14726d13ea8eSOlivier Matz fs_mac_addr_set(struct rte_eth_dev *dev, struct rte_ether_addr *mac_addr) 1473a46f8d58SGaetan Rivet { 1474a46f8d58SGaetan Rivet struct sub_device *sdev; 1475a46f8d58SGaetan Rivet uint8_t i; 1476caccf8b3SOlivier Matz int ret; 1477a46f8d58SGaetan Rivet 1478813f085fSDavid Marchand ret = fs_lock(dev, 0); 1479813f085fSDavid Marchand if (ret != 0) 1480813f085fSDavid Marchand return ret; 1481caccf8b3SOlivier Matz FOREACH_SUBDEV_STATE(sdev, i, dev, DEV_ACTIVE) { 1482caccf8b3SOlivier Matz ret = rte_eth_dev_default_mac_addr_set(PORT_ID(sdev), mac_addr); 1483caccf8b3SOlivier Matz ret = fs_err(sdev, ret); 1484caccf8b3SOlivier Matz if (ret) { 1485caccf8b3SOlivier Matz ERROR("Operation rte_eth_dev_mac_addr_set failed for sub_device %d with error %d", 1486caccf8b3SOlivier Matz i, ret); 1487655fcd68SMatan Azrad fs_unlock(dev, 0); 1488caccf8b3SOlivier Matz return ret; 1489caccf8b3SOlivier Matz } 1490caccf8b3SOlivier Matz } 1491caccf8b3SOlivier Matz fs_unlock(dev, 0); 1492caccf8b3SOlivier Matz 1493caccf8b3SOlivier Matz return 0; 1494a46f8d58SGaetan Rivet } 1495a46f8d58SGaetan Rivet 1496b737a1eeSGaetan Rivet static int 1497901efc0dSEvgeny Im fs_set_mc_addr_list(struct rte_eth_dev *dev, 14986d13ea8eSOlivier Matz struct rte_ether_addr *mc_addr_set, uint32_t nb_mc_addr) 1499901efc0dSEvgeny Im { 1500901efc0dSEvgeny Im struct sub_device *sdev; 1501901efc0dSEvgeny Im uint8_t i; 1502901efc0dSEvgeny Im int ret; 1503901efc0dSEvgeny Im void *mcast_addrs; 1504901efc0dSEvgeny Im 1505813f085fSDavid Marchand ret = fs_lock(dev, 0); 1506813f085fSDavid Marchand if (ret != 0) 1507813f085fSDavid Marchand return ret; 1508901efc0dSEvgeny Im 1509901efc0dSEvgeny Im FOREACH_SUBDEV_STATE(sdev, i, dev, DEV_ACTIVE) { 1510901efc0dSEvgeny Im ret = rte_eth_dev_set_mc_addr_list(PORT_ID(sdev), 1511901efc0dSEvgeny Im mc_addr_set, nb_mc_addr); 1512901efc0dSEvgeny Im if (ret != 0) { 1513901efc0dSEvgeny Im ERROR("Operation rte_eth_dev_set_mc_addr_list failed for sub_device %d with error %d", 1514901efc0dSEvgeny Im i, ret); 1515901efc0dSEvgeny Im goto rollback; 1516901efc0dSEvgeny Im } 1517901efc0dSEvgeny Im } 1518901efc0dSEvgeny Im 1519901efc0dSEvgeny Im mcast_addrs = rte_realloc(PRIV(dev)->mcast_addrs, 1520901efc0dSEvgeny Im nb_mc_addr * sizeof(PRIV(dev)->mcast_addrs[0]), 0); 1521901efc0dSEvgeny Im if (mcast_addrs == NULL && nb_mc_addr > 0) { 1522901efc0dSEvgeny Im ret = -ENOMEM; 1523901efc0dSEvgeny Im goto rollback; 1524901efc0dSEvgeny Im } 1525901efc0dSEvgeny Im rte_memcpy(mcast_addrs, mc_addr_set, 1526901efc0dSEvgeny Im nb_mc_addr * sizeof(PRIV(dev)->mcast_addrs[0])); 1527901efc0dSEvgeny Im PRIV(dev)->nb_mcast_addr = nb_mc_addr; 1528901efc0dSEvgeny Im PRIV(dev)->mcast_addrs = mcast_addrs; 1529901efc0dSEvgeny Im 1530901efc0dSEvgeny Im fs_unlock(dev, 0); 1531901efc0dSEvgeny Im return 0; 1532901efc0dSEvgeny Im 1533901efc0dSEvgeny Im rollback: 1534901efc0dSEvgeny Im FOREACH_SUBDEV_STATE(sdev, i, dev, DEV_ACTIVE) { 1535901efc0dSEvgeny Im int rc = rte_eth_dev_set_mc_addr_list(PORT_ID(sdev), 1536901efc0dSEvgeny Im PRIV(dev)->mcast_addrs, PRIV(dev)->nb_mcast_addr); 1537901efc0dSEvgeny Im if (rc != 0) { 1538901efc0dSEvgeny Im ERROR("Multicast MAC address list rollback for sub_device %d failed with error %d", 1539901efc0dSEvgeny Im i, rc); 1540901efc0dSEvgeny Im } 1541901efc0dSEvgeny Im } 1542901efc0dSEvgeny Im 1543901efc0dSEvgeny Im fs_unlock(dev, 0); 1544901efc0dSEvgeny Im return ret; 1545901efc0dSEvgeny Im } 1546901efc0dSEvgeny Im 1547901efc0dSEvgeny Im static int 154890725207SOphir Munk fs_rss_hash_update(struct rte_eth_dev *dev, 154990725207SOphir Munk struct rte_eth_rss_conf *rss_conf) 155090725207SOphir Munk { 155190725207SOphir Munk struct sub_device *sdev; 155290725207SOphir Munk uint8_t i; 155390725207SOphir Munk int ret; 155490725207SOphir Munk 1555813f085fSDavid Marchand ret = fs_lock(dev, 0); 1556813f085fSDavid Marchand if (ret != 0) 1557813f085fSDavid Marchand return ret; 155890725207SOphir Munk FOREACH_SUBDEV_STATE(sdev, i, dev, DEV_ACTIVE) { 155990725207SOphir Munk ret = rte_eth_dev_rss_hash_update(PORT_ID(sdev), rss_conf); 156090725207SOphir Munk ret = fs_err(sdev, ret); 156190725207SOphir Munk if (ret) { 156290725207SOphir Munk ERROR("Operation rte_eth_dev_rss_hash_update" 156390725207SOphir Munk " failed for sub_device %d with error %d", 156490725207SOphir Munk i, ret); 156590725207SOphir Munk fs_unlock(dev, 0); 156690725207SOphir Munk return ret; 156790725207SOphir Munk } 156890725207SOphir Munk } 156990725207SOphir Munk fs_unlock(dev, 0); 157090725207SOphir Munk 157190725207SOphir Munk return 0; 157290725207SOphir Munk } 157390725207SOphir Munk 157490725207SOphir Munk static int 1575fb7ad441SThomas Monjalon fs_flow_ops_get(struct rte_eth_dev *dev __rte_unused, 1576fb7ad441SThomas Monjalon const struct rte_flow_ops **ops) 1577b737a1eeSGaetan Rivet { 1578fb7ad441SThomas Monjalon *ops = &fs_flow_ops; 1579b737a1eeSGaetan Rivet return 0; 1580b737a1eeSGaetan Rivet } 1581b737a1eeSGaetan Rivet 1582a46f8d58SGaetan Rivet const struct eth_dev_ops failsafe_ops = { 1583a46f8d58SGaetan Rivet .dev_configure = fs_dev_configure, 1584a46f8d58SGaetan Rivet .dev_start = fs_dev_start, 1585a46f8d58SGaetan Rivet .dev_stop = fs_dev_stop, 1586a46f8d58SGaetan Rivet .dev_set_link_down = fs_dev_set_link_down, 1587a46f8d58SGaetan Rivet .dev_set_link_up = fs_dev_set_link_up, 15881a7fa562SThomas Monjalon .dev_close = failsafe_eth_dev_close, 1589a46f8d58SGaetan Rivet .promiscuous_enable = fs_promiscuous_enable, 1590a46f8d58SGaetan Rivet .promiscuous_disable = fs_promiscuous_disable, 1591a46f8d58SGaetan Rivet .allmulticast_enable = fs_allmulticast_enable, 1592a46f8d58SGaetan Rivet .allmulticast_disable = fs_allmulticast_disable, 1593a46f8d58SGaetan Rivet .link_update = fs_link_update, 1594a46f8d58SGaetan Rivet .stats_get = fs_stats_get, 1595a46f8d58SGaetan Rivet .stats_reset = fs_stats_reset, 1596938420ebSStephen Hemminger .xstats_get = fs_xstats_get, 1597938420ebSStephen Hemminger .xstats_get_names = fs_xstats_get_names, 1598938420ebSStephen Hemminger .xstats_reset = fs_xstats_reset, 1599a46f8d58SGaetan Rivet .dev_infos_get = fs_dev_infos_get, 1600a46f8d58SGaetan Rivet .dev_supported_ptypes_get = fs_dev_supported_ptypes_get, 1601a46f8d58SGaetan Rivet .mtu_set = fs_mtu_set, 1602a46f8d58SGaetan Rivet .vlan_filter_set = fs_vlan_filter_set, 16033db7001eSIan Dolzhansky .rx_queue_start = fs_rx_queue_start, 16043db7001eSIan Dolzhansky .rx_queue_stop = fs_rx_queue_stop, 1605b32c9075SIan Dolzhansky .tx_queue_start = fs_tx_queue_start, 1606b32c9075SIan Dolzhansky .tx_queue_stop = fs_tx_queue_stop, 1607a46f8d58SGaetan Rivet .rx_queue_setup = fs_rx_queue_setup, 1608a46f8d58SGaetan Rivet .tx_queue_setup = fs_tx_queue_setup, 1609a46f8d58SGaetan Rivet .rx_queue_release = fs_rx_queue_release, 1610a46f8d58SGaetan Rivet .tx_queue_release = fs_tx_queue_release, 16119e0360aeSMoti Haimovsky .rx_queue_intr_enable = fs_rx_intr_enable, 16129e0360aeSMoti Haimovsky .rx_queue_intr_disable = fs_rx_intr_disable, 1613a46f8d58SGaetan Rivet .flow_ctrl_get = fs_flow_ctrl_get, 1614a46f8d58SGaetan Rivet .flow_ctrl_set = fs_flow_ctrl_set, 1615a46f8d58SGaetan Rivet .mac_addr_remove = fs_mac_addr_remove, 1616a46f8d58SGaetan Rivet .mac_addr_add = fs_mac_addr_add, 1617a46f8d58SGaetan Rivet .mac_addr_set = fs_mac_addr_set, 1618901efc0dSEvgeny Im .set_mc_addr_list = fs_set_mc_addr_list, 161990725207SOphir Munk .rss_hash_update = fs_rss_hash_update, 1620fb7ad441SThomas Monjalon .flow_ops_get = fs_flow_ops_get, 1621a46f8d58SGaetan Rivet }; 1622