18fd92a66SOlivier Matz /* SPDX-License-Identifier: BSD-3-Clause 2e60fbd5bSAdrien Mazarguil * Copyright 2015 6WIND S.A. 35feecc57SShahaf Shuler * Copyright 2015 Mellanox Technologies, Ltd 4e60fbd5bSAdrien Mazarguil */ 58fd92a66SOlivier Matz 63f2fe392SNélio Laranjeiro #include <unistd.h> 7e60fbd5bSAdrien Mazarguil 8e60fbd5bSAdrien Mazarguil #include <rte_ether.h> 9ffc905f3SFerruh Yigit #include <rte_ethdev_driver.h> 10198a3c33SNelio Laranjeiro #include <rte_interrupts.h> 11198a3c33SNelio Laranjeiro #include <rte_alarm.h> 12e60fbd5bSAdrien Mazarguil 13e60fbd5bSAdrien Mazarguil #include "mlx5.h" 14b8dc6b0eSVu Pham #include "mlx5_mr.h" 15e60fbd5bSAdrien Mazarguil #include "mlx5_rxtx.h" 16e60fbd5bSAdrien Mazarguil #include "mlx5_utils.h" 17efa79e68SOri Kam #include "rte_pmd_mlx5.h" 18e60fbd5bSAdrien Mazarguil 19fb732b0aSNélio Laranjeiro /** 20fb732b0aSNélio Laranjeiro * Stop traffic on Tx queues. 21fb732b0aSNélio Laranjeiro * 22fb732b0aSNélio Laranjeiro * @param dev 23fb732b0aSNélio Laranjeiro * Pointer to Ethernet device structure. 24fb732b0aSNélio Laranjeiro */ 256e78005aSNélio Laranjeiro static void 26af4f09f2SNélio Laranjeiro mlx5_txq_stop(struct rte_eth_dev *dev) 276e78005aSNélio Laranjeiro { 28dbeba4cfSThomas Monjalon struct mlx5_priv *priv = dev->data->dev_private; 296e78005aSNélio Laranjeiro unsigned int i; 306e78005aSNélio Laranjeiro 316e78005aSNélio Laranjeiro for (i = 0; i != priv->txqs_n; ++i) 32af4f09f2SNélio Laranjeiro mlx5_txq_release(dev, i); 336e78005aSNélio Laranjeiro } 346e78005aSNélio Laranjeiro 35fb732b0aSNélio Laranjeiro /** 36fb732b0aSNélio Laranjeiro * Start traffic on Tx queues. 37fb732b0aSNélio Laranjeiro * 38fb732b0aSNélio Laranjeiro * @param dev 39fb732b0aSNélio Laranjeiro * Pointer to Ethernet device structure. 40fb732b0aSNélio Laranjeiro * 41fb732b0aSNélio Laranjeiro * @return 42a6d83b6aSNélio Laranjeiro * 0 on success, a negative errno value otherwise and rte_errno is set. 43fb732b0aSNélio Laranjeiro */ 446e78005aSNélio Laranjeiro static int 45af4f09f2SNélio Laranjeiro mlx5_txq_start(struct rte_eth_dev *dev) 466e78005aSNélio Laranjeiro { 47dbeba4cfSThomas Monjalon struct mlx5_priv *priv = dev->data->dev_private; 486e78005aSNélio Laranjeiro unsigned int i; 49a6d83b6aSNélio Laranjeiro int ret; 506e78005aSNélio Laranjeiro 516e78005aSNélio Laranjeiro for (i = 0; i != priv->txqs_n; ++i) { 52af4f09f2SNélio Laranjeiro struct mlx5_txq_ctrl *txq_ctrl = mlx5_txq_get(dev, i); 536e78005aSNélio Laranjeiro 546e78005aSNélio Laranjeiro if (!txq_ctrl) 556e78005aSNélio Laranjeiro continue; 56ae18a1aeSOri Kam if (txq_ctrl->type == MLX5_TXQ_TYPE_HAIRPIN) { 57ae18a1aeSOri Kam txq_ctrl->obj = mlx5_txq_obj_new 58ae18a1aeSOri Kam (dev, i, MLX5_TXQ_OBJ_TYPE_DEVX_HAIRPIN); 59ae18a1aeSOri Kam } else { 606e78005aSNélio Laranjeiro txq_alloc_elts(txq_ctrl); 61ae18a1aeSOri Kam txq_ctrl->obj = mlx5_txq_obj_new 62ae18a1aeSOri Kam (dev, i, MLX5_TXQ_OBJ_TYPE_IBV); 63ae18a1aeSOri Kam } 64894c4a8eSOri Kam if (!txq_ctrl->obj) { 65a6d83b6aSNélio Laranjeiro rte_errno = ENOMEM; 666e78005aSNélio Laranjeiro goto error; 676e78005aSNélio Laranjeiro } 686e78005aSNélio Laranjeiro } 69a6d83b6aSNélio Laranjeiro return 0; 706e78005aSNélio Laranjeiro error: 71a6d83b6aSNélio Laranjeiro ret = rte_errno; /* Save rte_errno before cleanup. */ 7224f653a7SYongseok Koh do { 7324f653a7SYongseok Koh mlx5_txq_release(dev, i); 7424f653a7SYongseok Koh } while (i-- != 0); 75a6d83b6aSNélio Laranjeiro rte_errno = ret; /* Restore rte_errno. */ 76a6d83b6aSNélio Laranjeiro return -rte_errno; 776e78005aSNélio Laranjeiro } 786e78005aSNélio Laranjeiro 79fb732b0aSNélio Laranjeiro /** 80fb732b0aSNélio Laranjeiro * Stop traffic on Rx queues. 81fb732b0aSNélio Laranjeiro * 82fb732b0aSNélio Laranjeiro * @param dev 83fb732b0aSNélio Laranjeiro * Pointer to Ethernet device structure. 84fb732b0aSNélio Laranjeiro */ 85a1366b1aSNélio Laranjeiro static void 86af4f09f2SNélio Laranjeiro mlx5_rxq_stop(struct rte_eth_dev *dev) 87a1366b1aSNélio Laranjeiro { 88dbeba4cfSThomas Monjalon struct mlx5_priv *priv = dev->data->dev_private; 89a1366b1aSNélio Laranjeiro unsigned int i; 90a1366b1aSNélio Laranjeiro 91a1366b1aSNélio Laranjeiro for (i = 0; i != priv->rxqs_n; ++i) 92af4f09f2SNélio Laranjeiro mlx5_rxq_release(dev, i); 93a1366b1aSNélio Laranjeiro } 94a1366b1aSNélio Laranjeiro 95fb732b0aSNélio Laranjeiro /** 96fb732b0aSNélio Laranjeiro * Start traffic on Rx queues. 97fb732b0aSNélio Laranjeiro * 98fb732b0aSNélio Laranjeiro * @param dev 99fb732b0aSNélio Laranjeiro * Pointer to Ethernet device structure. 100fb732b0aSNélio Laranjeiro * 101fb732b0aSNélio Laranjeiro * @return 102a6d83b6aSNélio Laranjeiro * 0 on success, a negative errno value otherwise and rte_errno is set. 103fb732b0aSNélio Laranjeiro */ 104a1366b1aSNélio Laranjeiro static int 105af4f09f2SNélio Laranjeiro mlx5_rxq_start(struct rte_eth_dev *dev) 106a1366b1aSNélio Laranjeiro { 107dbeba4cfSThomas Monjalon struct mlx5_priv *priv = dev->data->dev_private; 108a1366b1aSNélio Laranjeiro unsigned int i; 109a1366b1aSNélio Laranjeiro int ret = 0; 110*b0447b54SDekel Peled enum mlx5_rxq_obj_type obj_type = MLX5_RXQ_OBJ_TYPE_IBV; 111*b0447b54SDekel Peled struct mlx5_rxq_data *rxq = NULL; 112a1366b1aSNélio Laranjeiro 113*b0447b54SDekel Peled for (i = 0; i < priv->rxqs_n; ++i) { 114*b0447b54SDekel Peled rxq = (*priv->rxqs)[i]; 115*b0447b54SDekel Peled if (rxq && rxq->lro) { 116*b0447b54SDekel Peled obj_type = MLX5_RXQ_OBJ_TYPE_DEVX_RQ; 117*b0447b54SDekel Peled break; 118*b0447b54SDekel Peled } 119*b0447b54SDekel Peled } 1207d6bf6b8SYongseok Koh /* Allocate/reuse/resize mempool for Multi-Packet RQ. */ 12124f653a7SYongseok Koh if (mlx5_mprq_alloc_mp(dev)) { 12224f653a7SYongseok Koh /* Should not release Rx queues but return immediately. */ 12324f653a7SYongseok Koh return -rte_errno; 12424f653a7SYongseok Koh } 125a1366b1aSNélio Laranjeiro for (i = 0; i != priv->rxqs_n; ++i) { 126af4f09f2SNélio Laranjeiro struct mlx5_rxq_ctrl *rxq_ctrl = mlx5_rxq_get(dev, i); 127974f1e7eSYongseok Koh struct rte_mempool *mp; 128a1366b1aSNélio Laranjeiro 129a1366b1aSNélio Laranjeiro if (!rxq_ctrl) 130a1366b1aSNélio Laranjeiro continue; 131e79c9be9SOri Kam if (rxq_ctrl->type == MLX5_RXQ_TYPE_HAIRPIN) { 132e79c9be9SOri Kam rxq_ctrl->obj = mlx5_rxq_obj_new 133e79c9be9SOri Kam (dev, i, MLX5_RXQ_OBJ_TYPE_DEVX_HAIRPIN); 134e79c9be9SOri Kam if (!rxq_ctrl->obj) 135e79c9be9SOri Kam goto error; 136e79c9be9SOri Kam continue; 137e79c9be9SOri Kam } 138974f1e7eSYongseok Koh /* Pre-register Rx mempool. */ 1397d6bf6b8SYongseok Koh mp = mlx5_rxq_mprq_enabled(&rxq_ctrl->rxq) ? 1407d6bf6b8SYongseok Koh rxq_ctrl->rxq.mprq_mp : rxq_ctrl->rxq.mp; 141974f1e7eSYongseok Koh DRV_LOG(DEBUG, 142974f1e7eSYongseok Koh "port %u Rx queue %u registering" 143974f1e7eSYongseok Koh " mp %s having %u chunks", 144d5c900d1SYongseok Koh dev->data->port_id, rxq_ctrl->rxq.idx, 145974f1e7eSYongseok Koh mp->name, mp->nb_mem_chunks); 146974f1e7eSYongseok Koh mlx5_mr_update_mp(dev, &rxq_ctrl->rxq.mr_ctrl, mp); 147a1366b1aSNélio Laranjeiro ret = rxq_alloc_elts(rxq_ctrl); 148a1366b1aSNélio Laranjeiro if (ret) 149a1366b1aSNélio Laranjeiro goto error; 150940f0a1dSDekel Peled rxq_ctrl->obj = mlx5_rxq_obj_new(dev, i, obj_type); 15193403560SDekel Peled if (!rxq_ctrl->obj) 152a1366b1aSNélio Laranjeiro goto error; 153940f0a1dSDekel Peled if (obj_type == MLX5_RXQ_OBJ_TYPE_IBV) 15493403560SDekel Peled rxq_ctrl->wqn = rxq_ctrl->obj->wq->wq_num; 155940f0a1dSDekel Peled else if (obj_type == MLX5_RXQ_OBJ_TYPE_DEVX_RQ) 156940f0a1dSDekel Peled rxq_ctrl->wqn = rxq_ctrl->obj->rq->id; 157a1366b1aSNélio Laranjeiro } 158a6d83b6aSNélio Laranjeiro return 0; 159a1366b1aSNélio Laranjeiro error: 160a6d83b6aSNélio Laranjeiro ret = rte_errno; /* Save rte_errno before cleanup. */ 16124f653a7SYongseok Koh do { 16224f653a7SYongseok Koh mlx5_rxq_release(dev, i); 16324f653a7SYongseok Koh } while (i-- != 0); 164a6d83b6aSNélio Laranjeiro rte_errno = ret; /* Restore rte_errno. */ 165a6d83b6aSNélio Laranjeiro return -rte_errno; 166a1366b1aSNélio Laranjeiro } 167a1366b1aSNélio Laranjeiro 168e60fbd5bSAdrien Mazarguil /** 1696a338ad4SOri Kam * Binds Tx queues to Rx queues for hairpin. 1706a338ad4SOri Kam * 1716a338ad4SOri Kam * Binds Tx queues to the target Rx queues. 1726a338ad4SOri Kam * 1736a338ad4SOri Kam * @param dev 1746a338ad4SOri Kam * Pointer to Ethernet device structure. 1756a338ad4SOri Kam * 1766a338ad4SOri Kam * @return 1776a338ad4SOri Kam * 0 on success, a negative errno value otherwise and rte_errno is set. 1786a338ad4SOri Kam */ 1796a338ad4SOri Kam static int 1806a338ad4SOri Kam mlx5_hairpin_bind(struct rte_eth_dev *dev) 1816a338ad4SOri Kam { 1826a338ad4SOri Kam struct mlx5_priv *priv = dev->data->dev_private; 1836a338ad4SOri Kam struct mlx5_devx_modify_sq_attr sq_attr = { 0 }; 1846a338ad4SOri Kam struct mlx5_devx_modify_rq_attr rq_attr = { 0 }; 1856a338ad4SOri Kam struct mlx5_txq_ctrl *txq_ctrl; 1866a338ad4SOri Kam struct mlx5_rxq_ctrl *rxq_ctrl; 1876a338ad4SOri Kam struct mlx5_devx_obj *sq; 1886a338ad4SOri Kam struct mlx5_devx_obj *rq; 1896a338ad4SOri Kam unsigned int i; 1906a338ad4SOri Kam int ret = 0; 1916a338ad4SOri Kam 1926a338ad4SOri Kam for (i = 0; i != priv->txqs_n; ++i) { 1936a338ad4SOri Kam txq_ctrl = mlx5_txq_get(dev, i); 1946a338ad4SOri Kam if (!txq_ctrl) 1956a338ad4SOri Kam continue; 1966a338ad4SOri Kam if (txq_ctrl->type != MLX5_TXQ_TYPE_HAIRPIN) { 1976a338ad4SOri Kam mlx5_txq_release(dev, i); 1986a338ad4SOri Kam continue; 1996a338ad4SOri Kam } 2006a338ad4SOri Kam if (!txq_ctrl->obj) { 2016a338ad4SOri Kam rte_errno = ENOMEM; 2026a338ad4SOri Kam DRV_LOG(ERR, "port %u no txq object found: %d", 2036a338ad4SOri Kam dev->data->port_id, i); 2046a338ad4SOri Kam mlx5_txq_release(dev, i); 2056a338ad4SOri Kam return -rte_errno; 2066a338ad4SOri Kam } 2076a338ad4SOri Kam sq = txq_ctrl->obj->sq; 2086a338ad4SOri Kam rxq_ctrl = mlx5_rxq_get(dev, 2096a338ad4SOri Kam txq_ctrl->hairpin_conf.peers[0].queue); 2106a338ad4SOri Kam if (!rxq_ctrl) { 2116a338ad4SOri Kam mlx5_txq_release(dev, i); 2126a338ad4SOri Kam rte_errno = EINVAL; 2136a338ad4SOri Kam DRV_LOG(ERR, "port %u no rxq object found: %d", 2146a338ad4SOri Kam dev->data->port_id, 2156a338ad4SOri Kam txq_ctrl->hairpin_conf.peers[0].queue); 2166a338ad4SOri Kam return -rte_errno; 2176a338ad4SOri Kam } 2186a338ad4SOri Kam if (rxq_ctrl->type != MLX5_RXQ_TYPE_HAIRPIN || 2196a338ad4SOri Kam rxq_ctrl->hairpin_conf.peers[0].queue != i) { 2206a338ad4SOri Kam rte_errno = ENOMEM; 2216a338ad4SOri Kam DRV_LOG(ERR, "port %u Tx queue %d can't be binded to " 2226a338ad4SOri Kam "Rx queue %d", dev->data->port_id, 2236a338ad4SOri Kam i, txq_ctrl->hairpin_conf.peers[0].queue); 2246a338ad4SOri Kam goto error; 2256a338ad4SOri Kam } 2266a338ad4SOri Kam rq = rxq_ctrl->obj->rq; 2276a338ad4SOri Kam if (!rq) { 2286a338ad4SOri Kam rte_errno = ENOMEM; 2296a338ad4SOri Kam DRV_LOG(ERR, "port %u hairpin no matching rxq: %d", 2306a338ad4SOri Kam dev->data->port_id, 2316a338ad4SOri Kam txq_ctrl->hairpin_conf.peers[0].queue); 2326a338ad4SOri Kam goto error; 2336a338ad4SOri Kam } 2346a338ad4SOri Kam sq_attr.state = MLX5_SQC_STATE_RDY; 2356a338ad4SOri Kam sq_attr.sq_state = MLX5_SQC_STATE_RST; 2366a338ad4SOri Kam sq_attr.hairpin_peer_rq = rq->id; 2376a338ad4SOri Kam sq_attr.hairpin_peer_vhca = priv->config.hca_attr.vhca_id; 2386a338ad4SOri Kam ret = mlx5_devx_cmd_modify_sq(sq, &sq_attr); 2396a338ad4SOri Kam if (ret) 2406a338ad4SOri Kam goto error; 2416a338ad4SOri Kam rq_attr.state = MLX5_SQC_STATE_RDY; 2426a338ad4SOri Kam rq_attr.rq_state = MLX5_SQC_STATE_RST; 2436a338ad4SOri Kam rq_attr.hairpin_peer_sq = sq->id; 2446a338ad4SOri Kam rq_attr.hairpin_peer_vhca = priv->config.hca_attr.vhca_id; 2456a338ad4SOri Kam ret = mlx5_devx_cmd_modify_rq(rq, &rq_attr); 2466a338ad4SOri Kam if (ret) 2476a338ad4SOri Kam goto error; 2486a338ad4SOri Kam mlx5_txq_release(dev, i); 2496a338ad4SOri Kam mlx5_rxq_release(dev, txq_ctrl->hairpin_conf.peers[0].queue); 2506a338ad4SOri Kam } 2516a338ad4SOri Kam return 0; 2526a338ad4SOri Kam error: 2536a338ad4SOri Kam mlx5_txq_release(dev, i); 2546a338ad4SOri Kam mlx5_rxq_release(dev, txq_ctrl->hairpin_conf.peers[0].queue); 2556a338ad4SOri Kam return -rte_errno; 2566a338ad4SOri Kam } 2576a338ad4SOri Kam 2586a338ad4SOri Kam /** 259e60fbd5bSAdrien Mazarguil * DPDK callback to start the device. 260e60fbd5bSAdrien Mazarguil * 261e60fbd5bSAdrien Mazarguil * Simulate device start by attaching all configured flows. 262e60fbd5bSAdrien Mazarguil * 263e60fbd5bSAdrien Mazarguil * @param dev 264e60fbd5bSAdrien Mazarguil * Pointer to Ethernet device structure. 265e60fbd5bSAdrien Mazarguil * 266e60fbd5bSAdrien Mazarguil * @return 267a6d83b6aSNélio Laranjeiro * 0 on success, a negative errno value otherwise and rte_errno is set. 268e60fbd5bSAdrien Mazarguil */ 269e60fbd5bSAdrien Mazarguil int 270e60fbd5bSAdrien Mazarguil mlx5_dev_start(struct rte_eth_dev *dev) 271e60fbd5bSAdrien Mazarguil { 272a6d83b6aSNélio Laranjeiro int ret; 273efa79e68SOri Kam int fine_inline; 274e60fbd5bSAdrien Mazarguil 27524f653a7SYongseok Koh DRV_LOG(DEBUG, "port %u starting device", dev->data->port_id); 276efa79e68SOri Kam fine_inline = rte_mbuf_dynflag_lookup 277efa79e68SOri Kam (RTE_PMD_MLX5_FINE_GRANULARITY_INLINE, NULL); 278efa79e68SOri Kam if (fine_inline > 0) 279efa79e68SOri Kam rte_net_mlx5_dynf_inline_mask = 1UL << fine_inline; 280efa79e68SOri Kam else 281efa79e68SOri Kam rte_net_mlx5_dynf_inline_mask = 0; 282606d6905SShiri Kuzin if (dev->data->nb_rx_queues > 0) { 28363bd1629SOri Kam ret = mlx5_dev_configure_rss_reta(dev); 28463bd1629SOri Kam if (ret) { 28563bd1629SOri Kam DRV_LOG(ERR, "port %u reta config failed: %s", 28663bd1629SOri Kam dev->data->port_id, strerror(rte_errno)); 28763bd1629SOri Kam return -rte_errno; 28863bd1629SOri Kam } 289606d6905SShiri Kuzin } 290a6d83b6aSNélio Laranjeiro ret = mlx5_txq_start(dev); 291a6d83b6aSNélio Laranjeiro if (ret) { 292a170a30dSNélio Laranjeiro DRV_LOG(ERR, "port %u Tx queue allocation failed: %s", 2930f99970bSNélio Laranjeiro dev->data->port_id, strerror(rte_errno)); 29424f653a7SYongseok Koh return -rte_errno; 2956e78005aSNélio Laranjeiro } 296a6d83b6aSNélio Laranjeiro ret = mlx5_rxq_start(dev); 297a6d83b6aSNélio Laranjeiro if (ret) { 298a170a30dSNélio Laranjeiro DRV_LOG(ERR, "port %u Rx queue allocation failed: %s", 2990f99970bSNélio Laranjeiro dev->data->port_id, strerror(rte_errno)); 30024f653a7SYongseok Koh mlx5_txq_stop(dev); 30124f653a7SYongseok Koh return -rte_errno; 302a1366b1aSNélio Laranjeiro } 3036a338ad4SOri Kam ret = mlx5_hairpin_bind(dev); 3046a338ad4SOri Kam if (ret) { 3056a338ad4SOri Kam DRV_LOG(ERR, "port %u hairpin binding failed: %s", 3066a338ad4SOri Kam dev->data->port_id, strerror(rte_errno)); 3076a338ad4SOri Kam mlx5_txq_stop(dev); 3086a338ad4SOri Kam return -rte_errno; 3096a338ad4SOri Kam } 310e7bfa359SBing Zhao /* Set started flag here for the following steps like control flow. */ 31124f653a7SYongseok Koh dev->data->dev_started = 1; 312a6d83b6aSNélio Laranjeiro ret = mlx5_rx_intr_vec_enable(dev); 313a6d83b6aSNélio Laranjeiro if (ret) { 314a170a30dSNélio Laranjeiro DRV_LOG(ERR, "port %u Rx interrupt vector creation failed", 3150f99970bSNélio Laranjeiro dev->data->port_id); 316e1016cb7SAdrien Mazarguil goto error; 3173c7d44afSShahaf Shuler } 318ce9494d7STom Barbette mlx5_stats_init(dev); 3197ba5320bSNélio Laranjeiro ret = mlx5_traffic_enable(dev); 320a6d83b6aSNélio Laranjeiro if (ret) { 3218db7e3b6SBing Zhao DRV_LOG(ERR, "port %u failed to set defaults flows", 322e313ef4cSShahaf Shuler dev->data->port_id); 323e313ef4cSShahaf Shuler goto error; 324e313ef4cSShahaf Shuler } 3256c55b622SAlexander Kozyrev /* Set a mask and offset of dynamic metadata flows into Rx queues*/ 3266c55b622SAlexander Kozyrev mlx5_flow_rxq_dynf_metadata_set(dev); 3278db7e3b6SBing Zhao /* 3288db7e3b6SBing Zhao * In non-cached mode, it only needs to start the default mreg copy 3298db7e3b6SBing Zhao * action and no flow created by application exists anymore. 3308db7e3b6SBing Zhao * But it is worth wrapping the interface for further usage. 3318db7e3b6SBing Zhao */ 3328db7e3b6SBing Zhao ret = mlx5_flow_start_default(dev); 3337ba5320bSNélio Laranjeiro if (ret) { 3348db7e3b6SBing Zhao DRV_LOG(DEBUG, "port %u failed to start default actions: %s", 3358db7e3b6SBing Zhao dev->data->port_id, strerror(rte_errno)); 3367ba5320bSNélio Laranjeiro goto error; 3377ba5320bSNélio Laranjeiro } 3382aac5b5dSYongseok Koh rte_wmb(); 3397ba5320bSNélio Laranjeiro dev->tx_pkt_burst = mlx5_select_tx_function(dev); 3407ba5320bSNélio Laranjeiro dev->rx_pkt_burst = mlx5_select_rx_function(dev); 3412aac5b5dSYongseok Koh /* Enable datapath on secondary process. */ 3422aac5b5dSYongseok Koh mlx5_mp_req_start_rxtx(dev); 343af4f09f2SNélio Laranjeiro mlx5_dev_interrupt_handler_install(dev); 344c8d4ee50SNélio Laranjeiro return 0; 345c8d4ee50SNélio Laranjeiro error: 346a6d83b6aSNélio Laranjeiro ret = rte_errno; /* Save rte_errno before cleanup. */ 347e60fbd5bSAdrien Mazarguil /* Rollback. */ 348272733b5SNélio Laranjeiro dev->data->dev_started = 0; 3498db7e3b6SBing Zhao mlx5_flow_stop_default(dev); 350af4f09f2SNélio Laranjeiro mlx5_traffic_disable(dev); 351af4f09f2SNélio Laranjeiro mlx5_txq_stop(dev); 352af4f09f2SNélio Laranjeiro mlx5_rxq_stop(dev); 353a6d83b6aSNélio Laranjeiro rte_errno = ret; /* Restore rte_errno. */ 354a6d83b6aSNélio Laranjeiro return -rte_errno; 355e60fbd5bSAdrien Mazarguil } 356e60fbd5bSAdrien Mazarguil 357e60fbd5bSAdrien Mazarguil /** 358e60fbd5bSAdrien Mazarguil * DPDK callback to stop the device. 359e60fbd5bSAdrien Mazarguil * 360e60fbd5bSAdrien Mazarguil * Simulate device stop by detaching all configured flows. 361e60fbd5bSAdrien Mazarguil * 362e60fbd5bSAdrien Mazarguil * @param dev 363e60fbd5bSAdrien Mazarguil * Pointer to Ethernet device structure. 364e60fbd5bSAdrien Mazarguil */ 365e60fbd5bSAdrien Mazarguil void 366e60fbd5bSAdrien Mazarguil mlx5_dev_stop(struct rte_eth_dev *dev) 367e60fbd5bSAdrien Mazarguil { 368dbeba4cfSThomas Monjalon struct mlx5_priv *priv = dev->data->dev_private; 369e60fbd5bSAdrien Mazarguil 3703f2fe392SNélio Laranjeiro dev->data->dev_started = 0; 3713f2fe392SNélio Laranjeiro /* Prevent crashes when queues are still in use. */ 3723f2fe392SNélio Laranjeiro dev->rx_pkt_burst = removed_rx_burst; 3733f2fe392SNélio Laranjeiro dev->tx_pkt_burst = removed_tx_burst; 3743f2fe392SNélio Laranjeiro rte_wmb(); 3752aac5b5dSYongseok Koh /* Disable datapath on secondary process. */ 3762aac5b5dSYongseok Koh mlx5_mp_req_stop_rxtx(dev); 3773f2fe392SNélio Laranjeiro usleep(1000 * priv->rxqs_n); 37824f653a7SYongseok Koh DRV_LOG(DEBUG, "port %u stopping device", dev->data->port_id); 3798db7e3b6SBing Zhao mlx5_flow_stop_default(dev); 3808db7e3b6SBing Zhao /* Control flows for default traffic can be removed firstly. */ 381af4f09f2SNélio Laranjeiro mlx5_traffic_disable(dev); 3828db7e3b6SBing Zhao /* All RX queue flags will be cleared in the flush interface. */ 3838db7e3b6SBing Zhao mlx5_flow_list_flush(dev, &priv->flows, true); 384af4f09f2SNélio Laranjeiro mlx5_rx_intr_vec_disable(dev); 385af4f09f2SNélio Laranjeiro mlx5_dev_interrupt_handler_uninstall(dev); 386af4f09f2SNélio Laranjeiro mlx5_txq_stop(dev); 387af4f09f2SNélio Laranjeiro mlx5_rxq_stop(dev); 388e60fbd5bSAdrien Mazarguil } 389272733b5SNélio Laranjeiro 390272733b5SNélio Laranjeiro /** 391272733b5SNélio Laranjeiro * Enable traffic flows configured by control plane 392272733b5SNélio Laranjeiro * 393af4f09f2SNélio Laranjeiro * @param dev 394272733b5SNélio Laranjeiro * Pointer to Ethernet device private data. 395272733b5SNélio Laranjeiro * @param dev 396272733b5SNélio Laranjeiro * Pointer to Ethernet device structure. 397272733b5SNélio Laranjeiro * 398272733b5SNélio Laranjeiro * @return 399a6d83b6aSNélio Laranjeiro * 0 on success, a negative errno value otherwise and rte_errno is set. 400272733b5SNélio Laranjeiro */ 401272733b5SNélio Laranjeiro int 402af4f09f2SNélio Laranjeiro mlx5_traffic_enable(struct rte_eth_dev *dev) 403272733b5SNélio Laranjeiro { 404dbeba4cfSThomas Monjalon struct mlx5_priv *priv = dev->data->dev_private; 405272733b5SNélio Laranjeiro struct rte_flow_item_eth bcast = { 406272733b5SNélio Laranjeiro .dst.addr_bytes = "\xff\xff\xff\xff\xff\xff", 407272733b5SNélio Laranjeiro }; 408272733b5SNélio Laranjeiro struct rte_flow_item_eth ipv6_multi_spec = { 409272733b5SNélio Laranjeiro .dst.addr_bytes = "\x33\x33\x00\x00\x00\x00", 410272733b5SNélio Laranjeiro }; 411272733b5SNélio Laranjeiro struct rte_flow_item_eth ipv6_multi_mask = { 412272733b5SNélio Laranjeiro .dst.addr_bytes = "\xff\xff\x00\x00\x00\x00", 413272733b5SNélio Laranjeiro }; 414272733b5SNélio Laranjeiro struct rte_flow_item_eth unicast = { 415272733b5SNélio Laranjeiro .src.addr_bytes = "\x00\x00\x00\x00\x00\x00", 416272733b5SNélio Laranjeiro }; 417272733b5SNélio Laranjeiro struct rte_flow_item_eth unicast_mask = { 418272733b5SNélio Laranjeiro .dst.addr_bytes = "\xff\xff\xff\xff\xff\xff", 419272733b5SNélio Laranjeiro }; 420272733b5SNélio Laranjeiro const unsigned int vlan_filter_n = priv->vlan_filter_n; 4216d13ea8eSOlivier Matz const struct rte_ether_addr cmp = { 422272733b5SNélio Laranjeiro .addr_bytes = "\x00\x00\x00\x00\x00\x00", 423272733b5SNélio Laranjeiro }; 424272733b5SNélio Laranjeiro unsigned int i; 425272733b5SNélio Laranjeiro unsigned int j; 426272733b5SNélio Laranjeiro int ret; 427272733b5SNélio Laranjeiro 4283c84f34eSOri Kam /* 4293c84f34eSOri Kam * Hairpin txq default flow should be created no matter if it is 4303c84f34eSOri Kam * isolation mode. Or else all the packets to be sent will be sent 4313c84f34eSOri Kam * out directly without the TX flow actions, e.g. encapsulation. 4323c84f34eSOri Kam */ 4333c84f34eSOri Kam for (i = 0; i != priv->txqs_n; ++i) { 4343c84f34eSOri Kam struct mlx5_txq_ctrl *txq_ctrl = mlx5_txq_get(dev, i); 4353c84f34eSOri Kam if (!txq_ctrl) 4363c84f34eSOri Kam continue; 4373c84f34eSOri Kam if (txq_ctrl->type == MLX5_TXQ_TYPE_HAIRPIN) { 4383c84f34eSOri Kam ret = mlx5_ctrl_flow_source_queue(dev, i); 4393c84f34eSOri Kam if (ret) { 4403c84f34eSOri Kam mlx5_txq_release(dev, i); 4413c84f34eSOri Kam goto error; 4423c84f34eSOri Kam } 4433c84f34eSOri Kam } 4443c84f34eSOri Kam mlx5_txq_release(dev, i); 4453c84f34eSOri Kam } 446fbde4331SMatan Azrad if (priv->config.dv_esw_en && !priv->config.vf) { 447fbde4331SMatan Azrad if (mlx5_flow_create_esw_table_zero_flow(dev)) 448fbde4331SMatan Azrad priv->fdb_def_rule = 1; 449fbde4331SMatan Azrad else 450fbde4331SMatan Azrad DRV_LOG(INFO, "port %u FDB default rule cannot be" 451fbde4331SMatan Azrad " configured - only Eswitch group 0 flows are" 452fbde4331SMatan Azrad " supported.", dev->data->port_id); 453fbde4331SMatan Azrad } 454f8cb4b57SNélio Laranjeiro if (priv->isolated) 455f8cb4b57SNélio Laranjeiro return 0; 456f8cb4b57SNélio Laranjeiro if (dev->data->promiscuous) { 457f8cb4b57SNélio Laranjeiro struct rte_flow_item_eth promisc = { 458f8cb4b57SNélio Laranjeiro .dst.addr_bytes = "\x00\x00\x00\x00\x00\x00", 459f8cb4b57SNélio Laranjeiro .src.addr_bytes = "\x00\x00\x00\x00\x00\x00", 460f8cb4b57SNélio Laranjeiro .type = 0, 461f8cb4b57SNélio Laranjeiro }; 462f8cb4b57SNélio Laranjeiro 463a6d83b6aSNélio Laranjeiro ret = mlx5_ctrl_flow(dev, &promisc, &promisc); 464a6d83b6aSNélio Laranjeiro if (ret) 465a6d83b6aSNélio Laranjeiro goto error; 466f8cb4b57SNélio Laranjeiro } 467f8cb4b57SNélio Laranjeiro if (dev->data->all_multicast) { 468f8cb4b57SNélio Laranjeiro struct rte_flow_item_eth multicast = { 469f8cb4b57SNélio Laranjeiro .dst.addr_bytes = "\x01\x00\x00\x00\x00\x00", 470f8cb4b57SNélio Laranjeiro .src.addr_bytes = "\x00\x00\x00\x00\x00\x00", 471f8cb4b57SNélio Laranjeiro .type = 0, 472f8cb4b57SNélio Laranjeiro }; 473f8cb4b57SNélio Laranjeiro 474a6d83b6aSNélio Laranjeiro ret = mlx5_ctrl_flow(dev, &multicast, &multicast); 475a6d83b6aSNélio Laranjeiro if (ret) 476a6d83b6aSNélio Laranjeiro goto error; 477f8cb4b57SNélio Laranjeiro } else { 478f8cb4b57SNélio Laranjeiro /* Add broadcast/multicast flows. */ 479f8cb4b57SNélio Laranjeiro for (i = 0; i != vlan_filter_n; ++i) { 480f8cb4b57SNélio Laranjeiro uint16_t vlan = priv->vlan_filter[i]; 481f8cb4b57SNélio Laranjeiro 482f8cb4b57SNélio Laranjeiro struct rte_flow_item_vlan vlan_spec = { 483f8cb4b57SNélio Laranjeiro .tci = rte_cpu_to_be_16(vlan), 484f8cb4b57SNélio Laranjeiro }; 4852bc98393SNelio Laranjeiro struct rte_flow_item_vlan vlan_mask = 4862bc98393SNelio Laranjeiro rte_flow_item_vlan_mask; 487f8cb4b57SNélio Laranjeiro 488f8cb4b57SNélio Laranjeiro ret = mlx5_ctrl_flow_vlan(dev, &bcast, &bcast, 489f8cb4b57SNélio Laranjeiro &vlan_spec, &vlan_mask); 490f8cb4b57SNélio Laranjeiro if (ret) 491f8cb4b57SNélio Laranjeiro goto error; 492f8cb4b57SNélio Laranjeiro ret = mlx5_ctrl_flow_vlan(dev, &ipv6_multi_spec, 493f8cb4b57SNélio Laranjeiro &ipv6_multi_mask, 494f8cb4b57SNélio Laranjeiro &vlan_spec, &vlan_mask); 495f8cb4b57SNélio Laranjeiro if (ret) 496f8cb4b57SNélio Laranjeiro goto error; 497f8cb4b57SNélio Laranjeiro } 498f8cb4b57SNélio Laranjeiro if (!vlan_filter_n) { 499f8cb4b57SNélio Laranjeiro ret = mlx5_ctrl_flow(dev, &bcast, &bcast); 500f8cb4b57SNélio Laranjeiro if (ret) 501f8cb4b57SNélio Laranjeiro goto error; 502f8cb4b57SNélio Laranjeiro ret = mlx5_ctrl_flow(dev, &ipv6_multi_spec, 503f8cb4b57SNélio Laranjeiro &ipv6_multi_mask); 504f8cb4b57SNélio Laranjeiro if (ret) 505f8cb4b57SNélio Laranjeiro goto error; 506f8cb4b57SNélio Laranjeiro } 507f8cb4b57SNélio Laranjeiro } 508f8cb4b57SNélio Laranjeiro /* Add MAC address flows. */ 509272733b5SNélio Laranjeiro for (i = 0; i != MLX5_MAX_MAC_ADDRESSES; ++i) { 5106d13ea8eSOlivier Matz struct rte_ether_addr *mac = &dev->data->mac_addrs[i]; 511272733b5SNélio Laranjeiro 512272733b5SNélio Laranjeiro if (!memcmp(mac, &cmp, sizeof(*mac))) 513272733b5SNélio Laranjeiro continue; 514272733b5SNélio Laranjeiro memcpy(&unicast.dst.addr_bytes, 515272733b5SNélio Laranjeiro mac->addr_bytes, 51635b2d13fSOlivier Matz RTE_ETHER_ADDR_LEN); 517272733b5SNélio Laranjeiro for (j = 0; j != vlan_filter_n; ++j) { 518272733b5SNélio Laranjeiro uint16_t vlan = priv->vlan_filter[j]; 519272733b5SNélio Laranjeiro 520272733b5SNélio Laranjeiro struct rte_flow_item_vlan vlan_spec = { 521272733b5SNélio Laranjeiro .tci = rte_cpu_to_be_16(vlan), 522272733b5SNélio Laranjeiro }; 5232bc98393SNelio Laranjeiro struct rte_flow_item_vlan vlan_mask = 5242bc98393SNelio Laranjeiro rte_flow_item_vlan_mask; 525272733b5SNélio Laranjeiro 526272733b5SNélio Laranjeiro ret = mlx5_ctrl_flow_vlan(dev, &unicast, 527272733b5SNélio Laranjeiro &unicast_mask, 528272733b5SNélio Laranjeiro &vlan_spec, 529272733b5SNélio Laranjeiro &vlan_mask); 530272733b5SNélio Laranjeiro if (ret) 531272733b5SNélio Laranjeiro goto error; 532272733b5SNélio Laranjeiro } 533272733b5SNélio Laranjeiro if (!vlan_filter_n) { 534a6d83b6aSNélio Laranjeiro ret = mlx5_ctrl_flow(dev, &unicast, &unicast_mask); 535272733b5SNélio Laranjeiro if (ret) 536272733b5SNélio Laranjeiro goto error; 537272733b5SNélio Laranjeiro } 538272733b5SNélio Laranjeiro } 539272733b5SNélio Laranjeiro return 0; 540272733b5SNélio Laranjeiro error: 541a6d83b6aSNélio Laranjeiro ret = rte_errno; /* Save rte_errno before cleanup. */ 5428db7e3b6SBing Zhao mlx5_flow_list_flush(dev, &priv->ctrl_flows, false); 543a6d83b6aSNélio Laranjeiro rte_errno = ret; /* Restore rte_errno. */ 544a6d83b6aSNélio Laranjeiro return -rte_errno; 545272733b5SNélio Laranjeiro } 546272733b5SNélio Laranjeiro 547272733b5SNélio Laranjeiro 548272733b5SNélio Laranjeiro /** 549272733b5SNélio Laranjeiro * Disable traffic flows configured by control plane 550272733b5SNélio Laranjeiro * 551272733b5SNélio Laranjeiro * @param dev 552af4f09f2SNélio Laranjeiro * Pointer to Ethernet device private data. 553272733b5SNélio Laranjeiro */ 554925061b5SNélio Laranjeiro void 555af4f09f2SNélio Laranjeiro mlx5_traffic_disable(struct rte_eth_dev *dev) 556272733b5SNélio Laranjeiro { 557dbeba4cfSThomas Monjalon struct mlx5_priv *priv = dev->data->dev_private; 558272733b5SNélio Laranjeiro 5598db7e3b6SBing Zhao mlx5_flow_list_flush(dev, &priv->ctrl_flows, false); 560272733b5SNélio Laranjeiro } 561272733b5SNélio Laranjeiro 562272733b5SNélio Laranjeiro /** 563272733b5SNélio Laranjeiro * Restart traffic flows configured by control plane 564272733b5SNélio Laranjeiro * 565272733b5SNélio Laranjeiro * @param dev 566af4f09f2SNélio Laranjeiro * Pointer to Ethernet device private data. 567272733b5SNélio Laranjeiro * 568272733b5SNélio Laranjeiro * @return 569a6d83b6aSNélio Laranjeiro * 0 on success, a negative errno value otherwise and rte_errno is set. 570272733b5SNélio Laranjeiro */ 571272733b5SNélio Laranjeiro int 572272733b5SNélio Laranjeiro mlx5_traffic_restart(struct rte_eth_dev *dev) 573272733b5SNélio Laranjeiro { 574af4f09f2SNélio Laranjeiro if (dev->data->dev_started) { 575af4f09f2SNélio Laranjeiro mlx5_traffic_disable(dev); 576a6d83b6aSNélio Laranjeiro return mlx5_traffic_enable(dev); 577af4f09f2SNélio Laranjeiro } 578272733b5SNélio Laranjeiro return 0; 579272733b5SNélio Laranjeiro } 580