18fd92a66SOlivier Matz /* SPDX-License-Identifier: BSD-3-Clause 2e60fbd5bSAdrien Mazarguil * Copyright 2015 6WIND S.A. 35feecc57SShahaf Shuler * Copyright 2015 Mellanox Technologies, Ltd 4e60fbd5bSAdrien Mazarguil */ 58fd92a66SOlivier Matz 63f2fe392SNélio Laranjeiro #include <unistd.h> 7e60fbd5bSAdrien Mazarguil 8e60fbd5bSAdrien Mazarguil #include <rte_ether.h> 9ffc905f3SFerruh Yigit #include <rte_ethdev_driver.h> 10198a3c33SNelio Laranjeiro #include <rte_interrupts.h> 11198a3c33SNelio Laranjeiro #include <rte_alarm.h> 12e60fbd5bSAdrien Mazarguil 13e60fbd5bSAdrien Mazarguil #include "mlx5.h" 14b8dc6b0eSVu Pham #include "mlx5_mr.h" 15e60fbd5bSAdrien Mazarguil #include "mlx5_rxtx.h" 16e60fbd5bSAdrien Mazarguil #include "mlx5_utils.h" 17efa79e68SOri Kam #include "rte_pmd_mlx5.h" 18e60fbd5bSAdrien Mazarguil 19fb732b0aSNélio Laranjeiro /** 20fb732b0aSNélio Laranjeiro * Stop traffic on Tx queues. 21fb732b0aSNélio Laranjeiro * 22fb732b0aSNélio Laranjeiro * @param dev 23fb732b0aSNélio Laranjeiro * Pointer to Ethernet device structure. 24fb732b0aSNélio Laranjeiro */ 256e78005aSNélio Laranjeiro static void 26af4f09f2SNélio Laranjeiro mlx5_txq_stop(struct rte_eth_dev *dev) 276e78005aSNélio Laranjeiro { 28dbeba4cfSThomas Monjalon struct mlx5_priv *priv = dev->data->dev_private; 296e78005aSNélio Laranjeiro unsigned int i; 306e78005aSNélio Laranjeiro 316e78005aSNélio Laranjeiro for (i = 0; i != priv->txqs_n; ++i) 32af4f09f2SNélio Laranjeiro mlx5_txq_release(dev, i); 336e78005aSNélio Laranjeiro } 346e78005aSNélio Laranjeiro 35fb732b0aSNélio Laranjeiro /** 36fb732b0aSNélio Laranjeiro * Start traffic on Tx queues. 37fb732b0aSNélio Laranjeiro * 38fb732b0aSNélio Laranjeiro * @param dev 39fb732b0aSNélio Laranjeiro * Pointer to Ethernet device structure. 40fb732b0aSNélio Laranjeiro * 41fb732b0aSNélio Laranjeiro * @return 42a6d83b6aSNélio Laranjeiro * 0 on success, a negative errno value otherwise and rte_errno is set. 43fb732b0aSNélio Laranjeiro */ 446e78005aSNélio Laranjeiro static int 45af4f09f2SNélio Laranjeiro mlx5_txq_start(struct rte_eth_dev *dev) 466e78005aSNélio Laranjeiro { 47dbeba4cfSThomas Monjalon struct mlx5_priv *priv = dev->data->dev_private; 486e78005aSNélio Laranjeiro unsigned int i; 49a6d83b6aSNélio Laranjeiro int ret; 506e78005aSNélio Laranjeiro 516e78005aSNélio Laranjeiro for (i = 0; i != priv->txqs_n; ++i) { 52af4f09f2SNélio Laranjeiro struct mlx5_txq_ctrl *txq_ctrl = mlx5_txq_get(dev, i); 536e78005aSNélio Laranjeiro 546e78005aSNélio Laranjeiro if (!txq_ctrl) 556e78005aSNélio Laranjeiro continue; 56ae18a1aeSOri Kam if (txq_ctrl->type == MLX5_TXQ_TYPE_HAIRPIN) { 57ae18a1aeSOri Kam txq_ctrl->obj = mlx5_txq_obj_new 58ae18a1aeSOri Kam (dev, i, MLX5_TXQ_OBJ_TYPE_DEVX_HAIRPIN); 59ae18a1aeSOri Kam } else { 606e78005aSNélio Laranjeiro txq_alloc_elts(txq_ctrl); 61ae18a1aeSOri Kam txq_ctrl->obj = mlx5_txq_obj_new 623a87b964SViacheslav Ovsiienko (dev, i, priv->txpp_en ? 633a87b964SViacheslav Ovsiienko MLX5_TXQ_OBJ_TYPE_DEVX_SQ : 643a87b964SViacheslav Ovsiienko MLX5_TXQ_OBJ_TYPE_IBV); 65ae18a1aeSOri Kam } 66894c4a8eSOri Kam if (!txq_ctrl->obj) { 67a6d83b6aSNélio Laranjeiro rte_errno = ENOMEM; 686e78005aSNélio Laranjeiro goto error; 696e78005aSNélio Laranjeiro } 706e78005aSNélio Laranjeiro } 71a6d83b6aSNélio Laranjeiro return 0; 726e78005aSNélio Laranjeiro error: 73a6d83b6aSNélio Laranjeiro ret = rte_errno; /* Save rte_errno before cleanup. */ 7424f653a7SYongseok Koh do { 7524f653a7SYongseok Koh mlx5_txq_release(dev, i); 7624f653a7SYongseok Koh } while (i-- != 0); 77a6d83b6aSNélio Laranjeiro rte_errno = ret; /* Restore rte_errno. */ 78a6d83b6aSNélio Laranjeiro return -rte_errno; 796e78005aSNélio Laranjeiro } 806e78005aSNélio Laranjeiro 81fb732b0aSNélio Laranjeiro /** 82fb732b0aSNélio Laranjeiro * Stop traffic on Rx queues. 83fb732b0aSNélio Laranjeiro * 84fb732b0aSNélio Laranjeiro * @param dev 85fb732b0aSNélio Laranjeiro * Pointer to Ethernet device structure. 86fb732b0aSNélio Laranjeiro */ 87a1366b1aSNélio Laranjeiro static void 88af4f09f2SNélio Laranjeiro mlx5_rxq_stop(struct rte_eth_dev *dev) 89a1366b1aSNélio Laranjeiro { 90dbeba4cfSThomas Monjalon struct mlx5_priv *priv = dev->data->dev_private; 91a1366b1aSNélio Laranjeiro unsigned int i; 92a1366b1aSNélio Laranjeiro 93a1366b1aSNélio Laranjeiro for (i = 0; i != priv->rxqs_n; ++i) 94af4f09f2SNélio Laranjeiro mlx5_rxq_release(dev, i); 95a1366b1aSNélio Laranjeiro } 96a1366b1aSNélio Laranjeiro 97fb732b0aSNélio Laranjeiro /** 98fb732b0aSNélio Laranjeiro * Start traffic on Rx queues. 99fb732b0aSNélio Laranjeiro * 100fb732b0aSNélio Laranjeiro * @param dev 101fb732b0aSNélio Laranjeiro * Pointer to Ethernet device structure. 102fb732b0aSNélio Laranjeiro * 103fb732b0aSNélio Laranjeiro * @return 104a6d83b6aSNélio Laranjeiro * 0 on success, a negative errno value otherwise and rte_errno is set. 105fb732b0aSNélio Laranjeiro */ 106a1366b1aSNélio Laranjeiro static int 107af4f09f2SNélio Laranjeiro mlx5_rxq_start(struct rte_eth_dev *dev) 108a1366b1aSNélio Laranjeiro { 109dbeba4cfSThomas Monjalon struct mlx5_priv *priv = dev->data->dev_private; 110a1366b1aSNélio Laranjeiro unsigned int i; 111a1366b1aSNélio Laranjeiro int ret = 0; 112b0447b54SDekel Peled enum mlx5_rxq_obj_type obj_type = MLX5_RXQ_OBJ_TYPE_IBV; 113b0447b54SDekel Peled struct mlx5_rxq_data *rxq = NULL; 114a1366b1aSNélio Laranjeiro 115b0447b54SDekel Peled for (i = 0; i < priv->rxqs_n; ++i) { 116b0447b54SDekel Peled rxq = (*priv->rxqs)[i]; 117b0447b54SDekel Peled if (rxq && rxq->lro) { 118b0447b54SDekel Peled obj_type = MLX5_RXQ_OBJ_TYPE_DEVX_RQ; 119b0447b54SDekel Peled break; 120b0447b54SDekel Peled } 121b0447b54SDekel Peled } 1227d6bf6b8SYongseok Koh /* Allocate/reuse/resize mempool for Multi-Packet RQ. */ 12324f653a7SYongseok Koh if (mlx5_mprq_alloc_mp(dev)) { 12424f653a7SYongseok Koh /* Should not release Rx queues but return immediately. */ 12524f653a7SYongseok Koh return -rte_errno; 12624f653a7SYongseok Koh } 127a1366b1aSNélio Laranjeiro for (i = 0; i != priv->rxqs_n; ++i) { 128af4f09f2SNélio Laranjeiro struct mlx5_rxq_ctrl *rxq_ctrl = mlx5_rxq_get(dev, i); 129974f1e7eSYongseok Koh struct rte_mempool *mp; 130a1366b1aSNélio Laranjeiro 131a1366b1aSNélio Laranjeiro if (!rxq_ctrl) 132a1366b1aSNélio Laranjeiro continue; 133e79c9be9SOri Kam if (rxq_ctrl->type == MLX5_RXQ_TYPE_HAIRPIN) { 134e79c9be9SOri Kam rxq_ctrl->obj = mlx5_rxq_obj_new 135e79c9be9SOri Kam (dev, i, MLX5_RXQ_OBJ_TYPE_DEVX_HAIRPIN); 136e79c9be9SOri Kam if (!rxq_ctrl->obj) 137e79c9be9SOri Kam goto error; 138e79c9be9SOri Kam continue; 139e79c9be9SOri Kam } 140974f1e7eSYongseok Koh /* Pre-register Rx mempool. */ 1417d6bf6b8SYongseok Koh mp = mlx5_rxq_mprq_enabled(&rxq_ctrl->rxq) ? 1427d6bf6b8SYongseok Koh rxq_ctrl->rxq.mprq_mp : rxq_ctrl->rxq.mp; 143974f1e7eSYongseok Koh DRV_LOG(DEBUG, 144974f1e7eSYongseok Koh "port %u Rx queue %u registering" 145974f1e7eSYongseok Koh " mp %s having %u chunks", 146d5c900d1SYongseok Koh dev->data->port_id, rxq_ctrl->rxq.idx, 147974f1e7eSYongseok Koh mp->name, mp->nb_mem_chunks); 148974f1e7eSYongseok Koh mlx5_mr_update_mp(dev, &rxq_ctrl->rxq.mr_ctrl, mp); 149a1366b1aSNélio Laranjeiro ret = rxq_alloc_elts(rxq_ctrl); 150a1366b1aSNélio Laranjeiro if (ret) 151a1366b1aSNélio Laranjeiro goto error; 152940f0a1dSDekel Peled rxq_ctrl->obj = mlx5_rxq_obj_new(dev, i, obj_type); 15393403560SDekel Peled if (!rxq_ctrl->obj) 154a1366b1aSNélio Laranjeiro goto error; 155940f0a1dSDekel Peled if (obj_type == MLX5_RXQ_OBJ_TYPE_IBV) 15693403560SDekel Peled rxq_ctrl->wqn = rxq_ctrl->obj->wq->wq_num; 157940f0a1dSDekel Peled else if (obj_type == MLX5_RXQ_OBJ_TYPE_DEVX_RQ) 158940f0a1dSDekel Peled rxq_ctrl->wqn = rxq_ctrl->obj->rq->id; 159a1366b1aSNélio Laranjeiro } 160a6d83b6aSNélio Laranjeiro return 0; 161a1366b1aSNélio Laranjeiro error: 162a6d83b6aSNélio Laranjeiro ret = rte_errno; /* Save rte_errno before cleanup. */ 16324f653a7SYongseok Koh do { 16424f653a7SYongseok Koh mlx5_rxq_release(dev, i); 16524f653a7SYongseok Koh } while (i-- != 0); 166a6d83b6aSNélio Laranjeiro rte_errno = ret; /* Restore rte_errno. */ 167a6d83b6aSNélio Laranjeiro return -rte_errno; 168a1366b1aSNélio Laranjeiro } 169a1366b1aSNélio Laranjeiro 170e60fbd5bSAdrien Mazarguil /** 1716a338ad4SOri Kam * Binds Tx queues to Rx queues for hairpin. 1726a338ad4SOri Kam * 1736a338ad4SOri Kam * Binds Tx queues to the target Rx queues. 1746a338ad4SOri Kam * 1756a338ad4SOri Kam * @param dev 1766a338ad4SOri Kam * Pointer to Ethernet device structure. 1776a338ad4SOri Kam * 1786a338ad4SOri Kam * @return 1796a338ad4SOri Kam * 0 on success, a negative errno value otherwise and rte_errno is set. 1806a338ad4SOri Kam */ 1816a338ad4SOri Kam static int 1826a338ad4SOri Kam mlx5_hairpin_bind(struct rte_eth_dev *dev) 1836a338ad4SOri Kam { 1846a338ad4SOri Kam struct mlx5_priv *priv = dev->data->dev_private; 1856a338ad4SOri Kam struct mlx5_devx_modify_sq_attr sq_attr = { 0 }; 1866a338ad4SOri Kam struct mlx5_devx_modify_rq_attr rq_attr = { 0 }; 1876a338ad4SOri Kam struct mlx5_txq_ctrl *txq_ctrl; 1886a338ad4SOri Kam struct mlx5_rxq_ctrl *rxq_ctrl; 1896a338ad4SOri Kam struct mlx5_devx_obj *sq; 1906a338ad4SOri Kam struct mlx5_devx_obj *rq; 1916a338ad4SOri Kam unsigned int i; 1926a338ad4SOri Kam int ret = 0; 1936a338ad4SOri Kam 1946a338ad4SOri Kam for (i = 0; i != priv->txqs_n; ++i) { 1956a338ad4SOri Kam txq_ctrl = mlx5_txq_get(dev, i); 1966a338ad4SOri Kam if (!txq_ctrl) 1976a338ad4SOri Kam continue; 1986a338ad4SOri Kam if (txq_ctrl->type != MLX5_TXQ_TYPE_HAIRPIN) { 1996a338ad4SOri Kam mlx5_txq_release(dev, i); 2006a338ad4SOri Kam continue; 2016a338ad4SOri Kam } 2026a338ad4SOri Kam if (!txq_ctrl->obj) { 2036a338ad4SOri Kam rte_errno = ENOMEM; 2046a338ad4SOri Kam DRV_LOG(ERR, "port %u no txq object found: %d", 2056a338ad4SOri Kam dev->data->port_id, i); 2066a338ad4SOri Kam mlx5_txq_release(dev, i); 2076a338ad4SOri Kam return -rte_errno; 2086a338ad4SOri Kam } 2096a338ad4SOri Kam sq = txq_ctrl->obj->sq; 2106a338ad4SOri Kam rxq_ctrl = mlx5_rxq_get(dev, 2116a338ad4SOri Kam txq_ctrl->hairpin_conf.peers[0].queue); 2126a338ad4SOri Kam if (!rxq_ctrl) { 2136a338ad4SOri Kam mlx5_txq_release(dev, i); 2146a338ad4SOri Kam rte_errno = EINVAL; 2156a338ad4SOri Kam DRV_LOG(ERR, "port %u no rxq object found: %d", 2166a338ad4SOri Kam dev->data->port_id, 2176a338ad4SOri Kam txq_ctrl->hairpin_conf.peers[0].queue); 2186a338ad4SOri Kam return -rte_errno; 2196a338ad4SOri Kam } 2206a338ad4SOri Kam if (rxq_ctrl->type != MLX5_RXQ_TYPE_HAIRPIN || 2216a338ad4SOri Kam rxq_ctrl->hairpin_conf.peers[0].queue != i) { 2226a338ad4SOri Kam rte_errno = ENOMEM; 2236a338ad4SOri Kam DRV_LOG(ERR, "port %u Tx queue %d can't be binded to " 2246a338ad4SOri Kam "Rx queue %d", dev->data->port_id, 2256a338ad4SOri Kam i, txq_ctrl->hairpin_conf.peers[0].queue); 2266a338ad4SOri Kam goto error; 2276a338ad4SOri Kam } 2286a338ad4SOri Kam rq = rxq_ctrl->obj->rq; 2296a338ad4SOri Kam if (!rq) { 2306a338ad4SOri Kam rte_errno = ENOMEM; 2316a338ad4SOri Kam DRV_LOG(ERR, "port %u hairpin no matching rxq: %d", 2326a338ad4SOri Kam dev->data->port_id, 2336a338ad4SOri Kam txq_ctrl->hairpin_conf.peers[0].queue); 2346a338ad4SOri Kam goto error; 2356a338ad4SOri Kam } 2366a338ad4SOri Kam sq_attr.state = MLX5_SQC_STATE_RDY; 2376a338ad4SOri Kam sq_attr.sq_state = MLX5_SQC_STATE_RST; 2386a338ad4SOri Kam sq_attr.hairpin_peer_rq = rq->id; 2396a338ad4SOri Kam sq_attr.hairpin_peer_vhca = priv->config.hca_attr.vhca_id; 2406a338ad4SOri Kam ret = mlx5_devx_cmd_modify_sq(sq, &sq_attr); 2416a338ad4SOri Kam if (ret) 2426a338ad4SOri Kam goto error; 2436a338ad4SOri Kam rq_attr.state = MLX5_SQC_STATE_RDY; 2446a338ad4SOri Kam rq_attr.rq_state = MLX5_SQC_STATE_RST; 2456a338ad4SOri Kam rq_attr.hairpin_peer_sq = sq->id; 2466a338ad4SOri Kam rq_attr.hairpin_peer_vhca = priv->config.hca_attr.vhca_id; 2476a338ad4SOri Kam ret = mlx5_devx_cmd_modify_rq(rq, &rq_attr); 2486a338ad4SOri Kam if (ret) 2496a338ad4SOri Kam goto error; 2506a338ad4SOri Kam mlx5_txq_release(dev, i); 2516a338ad4SOri Kam mlx5_rxq_release(dev, txq_ctrl->hairpin_conf.peers[0].queue); 2526a338ad4SOri Kam } 2536a338ad4SOri Kam return 0; 2546a338ad4SOri Kam error: 2556a338ad4SOri Kam mlx5_txq_release(dev, i); 2566a338ad4SOri Kam mlx5_rxq_release(dev, txq_ctrl->hairpin_conf.peers[0].queue); 2576a338ad4SOri Kam return -rte_errno; 2586a338ad4SOri Kam } 2596a338ad4SOri Kam 2606a338ad4SOri Kam /** 261e60fbd5bSAdrien Mazarguil * DPDK callback to start the device. 262e60fbd5bSAdrien Mazarguil * 263e60fbd5bSAdrien Mazarguil * Simulate device start by attaching all configured flows. 264e60fbd5bSAdrien Mazarguil * 265e60fbd5bSAdrien Mazarguil * @param dev 266e60fbd5bSAdrien Mazarguil * Pointer to Ethernet device structure. 267e60fbd5bSAdrien Mazarguil * 268e60fbd5bSAdrien Mazarguil * @return 269a6d83b6aSNélio Laranjeiro * 0 on success, a negative errno value otherwise and rte_errno is set. 270e60fbd5bSAdrien Mazarguil */ 271e60fbd5bSAdrien Mazarguil int 272e60fbd5bSAdrien Mazarguil mlx5_dev_start(struct rte_eth_dev *dev) 273e60fbd5bSAdrien Mazarguil { 27433860cfaSSuanming Mou struct mlx5_priv *priv = dev->data->dev_private; 275a6d83b6aSNélio Laranjeiro int ret; 276efa79e68SOri Kam int fine_inline; 277e60fbd5bSAdrien Mazarguil 27824f653a7SYongseok Koh DRV_LOG(DEBUG, "port %u starting device", dev->data->port_id); 279efa79e68SOri Kam fine_inline = rte_mbuf_dynflag_lookup 280efa79e68SOri Kam (RTE_PMD_MLX5_FINE_GRANULARITY_INLINE, NULL); 281efa79e68SOri Kam if (fine_inline > 0) 282efa79e68SOri Kam rte_net_mlx5_dynf_inline_mask = 1UL << fine_inline; 283efa79e68SOri Kam else 284efa79e68SOri Kam rte_net_mlx5_dynf_inline_mask = 0; 285606d6905SShiri Kuzin if (dev->data->nb_rx_queues > 0) { 28663bd1629SOri Kam ret = mlx5_dev_configure_rss_reta(dev); 28763bd1629SOri Kam if (ret) { 28863bd1629SOri Kam DRV_LOG(ERR, "port %u reta config failed: %s", 28963bd1629SOri Kam dev->data->port_id, strerror(rte_errno)); 29063bd1629SOri Kam return -rte_errno; 29163bd1629SOri Kam } 292606d6905SShiri Kuzin } 293d133f4cdSViacheslav Ovsiienko ret = mlx5_txpp_start(dev); 294d133f4cdSViacheslav Ovsiienko if (ret) { 295d133f4cdSViacheslav Ovsiienko DRV_LOG(ERR, "port %u Tx packet pacing init failed: %s", 296d133f4cdSViacheslav Ovsiienko dev->data->port_id, strerror(rte_errno)); 297d133f4cdSViacheslav Ovsiienko goto error; 298d133f4cdSViacheslav Ovsiienko } 299a6d83b6aSNélio Laranjeiro ret = mlx5_txq_start(dev); 300a6d83b6aSNélio Laranjeiro if (ret) { 301a170a30dSNélio Laranjeiro DRV_LOG(ERR, "port %u Tx queue allocation failed: %s", 3020f99970bSNélio Laranjeiro dev->data->port_id, strerror(rte_errno)); 303d133f4cdSViacheslav Ovsiienko goto error; 3046e78005aSNélio Laranjeiro } 305a6d83b6aSNélio Laranjeiro ret = mlx5_rxq_start(dev); 306a6d83b6aSNélio Laranjeiro if (ret) { 307a170a30dSNélio Laranjeiro DRV_LOG(ERR, "port %u Rx queue allocation failed: %s", 3080f99970bSNélio Laranjeiro dev->data->port_id, strerror(rte_errno)); 309d133f4cdSViacheslav Ovsiienko goto error; 310a1366b1aSNélio Laranjeiro } 3116a338ad4SOri Kam ret = mlx5_hairpin_bind(dev); 3126a338ad4SOri Kam if (ret) { 3136a338ad4SOri Kam DRV_LOG(ERR, "port %u hairpin binding failed: %s", 3146a338ad4SOri Kam dev->data->port_id, strerror(rte_errno)); 315d133f4cdSViacheslav Ovsiienko goto error; 3166a338ad4SOri Kam } 317e7bfa359SBing Zhao /* Set started flag here for the following steps like control flow. */ 31824f653a7SYongseok Koh dev->data->dev_started = 1; 319a6d83b6aSNélio Laranjeiro ret = mlx5_rx_intr_vec_enable(dev); 320a6d83b6aSNélio Laranjeiro if (ret) { 321a170a30dSNélio Laranjeiro DRV_LOG(ERR, "port %u Rx interrupt vector creation failed", 3220f99970bSNélio Laranjeiro dev->data->port_id); 323e1016cb7SAdrien Mazarguil goto error; 3243c7d44afSShahaf Shuler } 32573bf9235SOphir Munk mlx5_os_stats_init(dev); 3267ba5320bSNélio Laranjeiro ret = mlx5_traffic_enable(dev); 327a6d83b6aSNélio Laranjeiro if (ret) { 3288db7e3b6SBing Zhao DRV_LOG(ERR, "port %u failed to set defaults flows", 329e313ef4cSShahaf Shuler dev->data->port_id); 330e313ef4cSShahaf Shuler goto error; 331e313ef4cSShahaf Shuler } 332*a2854c4dSViacheslav Ovsiienko /* Set a mask and offset of dynamic metadata flows into Rx queues. */ 3336c55b622SAlexander Kozyrev mlx5_flow_rxq_dynf_metadata_set(dev); 334*a2854c4dSViacheslav Ovsiienko /* Set flags and context to convert Rx timestamps. */ 335*a2854c4dSViacheslav Ovsiienko mlx5_rxq_timestamp_set(dev); 336*a2854c4dSViacheslav Ovsiienko /* Set a mask and offset of scheduling on timestamp into Tx queues. */ 3373172c471SViacheslav Ovsiienko mlx5_txq_dynf_timestamp_set(dev); 3388db7e3b6SBing Zhao /* 3398db7e3b6SBing Zhao * In non-cached mode, it only needs to start the default mreg copy 3408db7e3b6SBing Zhao * action and no flow created by application exists anymore. 3418db7e3b6SBing Zhao * But it is worth wrapping the interface for further usage. 3428db7e3b6SBing Zhao */ 3438db7e3b6SBing Zhao ret = mlx5_flow_start_default(dev); 3447ba5320bSNélio Laranjeiro if (ret) { 3458db7e3b6SBing Zhao DRV_LOG(DEBUG, "port %u failed to start default actions: %s", 3468db7e3b6SBing Zhao dev->data->port_id, strerror(rte_errno)); 3477ba5320bSNélio Laranjeiro goto error; 3487ba5320bSNélio Laranjeiro } 3492aac5b5dSYongseok Koh rte_wmb(); 3507ba5320bSNélio Laranjeiro dev->tx_pkt_burst = mlx5_select_tx_function(dev); 3517ba5320bSNélio Laranjeiro dev->rx_pkt_burst = mlx5_select_rx_function(dev); 3522aac5b5dSYongseok Koh /* Enable datapath on secondary process. */ 3532aac5b5dSYongseok Koh mlx5_mp_req_start_rxtx(dev); 35433860cfaSSuanming Mou if (priv->sh->intr_handle.fd >= 0) { 35591389890SOphir Munk priv->sh->port[priv->dev_port - 1].ih_port_id = 35633860cfaSSuanming Mou (uint32_t)dev->data->port_id; 35733860cfaSSuanming Mou } else { 35833860cfaSSuanming Mou DRV_LOG(INFO, "port %u starts without LSC and RMV interrupts.", 35933860cfaSSuanming Mou dev->data->port_id); 36033860cfaSSuanming Mou dev->data->dev_conf.intr_conf.lsc = 0; 36133860cfaSSuanming Mou dev->data->dev_conf.intr_conf.rmv = 0; 36233860cfaSSuanming Mou } 36333860cfaSSuanming Mou if (priv->sh->intr_handle_devx.fd >= 0) 36491389890SOphir Munk priv->sh->port[priv->dev_port - 1].devx_ih_port_id = 36533860cfaSSuanming Mou (uint32_t)dev->data->port_id; 366c8d4ee50SNélio Laranjeiro return 0; 367c8d4ee50SNélio Laranjeiro error: 368a6d83b6aSNélio Laranjeiro ret = rte_errno; /* Save rte_errno before cleanup. */ 369e60fbd5bSAdrien Mazarguil /* Rollback. */ 370272733b5SNélio Laranjeiro dev->data->dev_started = 0; 3718db7e3b6SBing Zhao mlx5_flow_stop_default(dev); 372af4f09f2SNélio Laranjeiro mlx5_traffic_disable(dev); 373af4f09f2SNélio Laranjeiro mlx5_txq_stop(dev); 374af4f09f2SNélio Laranjeiro mlx5_rxq_stop(dev); 375d133f4cdSViacheslav Ovsiienko mlx5_txpp_stop(dev); /* Stop last. */ 376a6d83b6aSNélio Laranjeiro rte_errno = ret; /* Restore rte_errno. */ 377a6d83b6aSNélio Laranjeiro return -rte_errno; 378e60fbd5bSAdrien Mazarguil } 379e60fbd5bSAdrien Mazarguil 380e60fbd5bSAdrien Mazarguil /** 381e60fbd5bSAdrien Mazarguil * DPDK callback to stop the device. 382e60fbd5bSAdrien Mazarguil * 383e60fbd5bSAdrien Mazarguil * Simulate device stop by detaching all configured flows. 384e60fbd5bSAdrien Mazarguil * 385e60fbd5bSAdrien Mazarguil * @param dev 386e60fbd5bSAdrien Mazarguil * Pointer to Ethernet device structure. 387e60fbd5bSAdrien Mazarguil */ 388e60fbd5bSAdrien Mazarguil void 389e60fbd5bSAdrien Mazarguil mlx5_dev_stop(struct rte_eth_dev *dev) 390e60fbd5bSAdrien Mazarguil { 391dbeba4cfSThomas Monjalon struct mlx5_priv *priv = dev->data->dev_private; 392e60fbd5bSAdrien Mazarguil 3933f2fe392SNélio Laranjeiro dev->data->dev_started = 0; 3943f2fe392SNélio Laranjeiro /* Prevent crashes when queues are still in use. */ 3953f2fe392SNélio Laranjeiro dev->rx_pkt_burst = removed_rx_burst; 3963f2fe392SNélio Laranjeiro dev->tx_pkt_burst = removed_tx_burst; 3973f2fe392SNélio Laranjeiro rte_wmb(); 3982aac5b5dSYongseok Koh /* Disable datapath on secondary process. */ 3992aac5b5dSYongseok Koh mlx5_mp_req_stop_rxtx(dev); 4003f2fe392SNélio Laranjeiro usleep(1000 * priv->rxqs_n); 40124f653a7SYongseok Koh DRV_LOG(DEBUG, "port %u stopping device", dev->data->port_id); 4028db7e3b6SBing Zhao mlx5_flow_stop_default(dev); 4038db7e3b6SBing Zhao /* Control flows for default traffic can be removed firstly. */ 404af4f09f2SNélio Laranjeiro mlx5_traffic_disable(dev); 4058db7e3b6SBing Zhao /* All RX queue flags will be cleared in the flush interface. */ 4068db7e3b6SBing Zhao mlx5_flow_list_flush(dev, &priv->flows, true); 407af4f09f2SNélio Laranjeiro mlx5_rx_intr_vec_disable(dev); 40891389890SOphir Munk priv->sh->port[priv->dev_port - 1].ih_port_id = RTE_MAX_ETHPORTS; 40991389890SOphir Munk priv->sh->port[priv->dev_port - 1].devx_ih_port_id = RTE_MAX_ETHPORTS; 410af4f09f2SNélio Laranjeiro mlx5_txq_stop(dev); 411af4f09f2SNélio Laranjeiro mlx5_rxq_stop(dev); 412d133f4cdSViacheslav Ovsiienko mlx5_txpp_stop(dev); 413e60fbd5bSAdrien Mazarguil } 414272733b5SNélio Laranjeiro 415272733b5SNélio Laranjeiro /** 416272733b5SNélio Laranjeiro * Enable traffic flows configured by control plane 417272733b5SNélio Laranjeiro * 418af4f09f2SNélio Laranjeiro * @param dev 419272733b5SNélio Laranjeiro * Pointer to Ethernet device private data. 420272733b5SNélio Laranjeiro * @param dev 421272733b5SNélio Laranjeiro * Pointer to Ethernet device structure. 422272733b5SNélio Laranjeiro * 423272733b5SNélio Laranjeiro * @return 424a6d83b6aSNélio Laranjeiro * 0 on success, a negative errno value otherwise and rte_errno is set. 425272733b5SNélio Laranjeiro */ 426272733b5SNélio Laranjeiro int 427af4f09f2SNélio Laranjeiro mlx5_traffic_enable(struct rte_eth_dev *dev) 428272733b5SNélio Laranjeiro { 429dbeba4cfSThomas Monjalon struct mlx5_priv *priv = dev->data->dev_private; 430272733b5SNélio Laranjeiro struct rte_flow_item_eth bcast = { 431272733b5SNélio Laranjeiro .dst.addr_bytes = "\xff\xff\xff\xff\xff\xff", 432272733b5SNélio Laranjeiro }; 433272733b5SNélio Laranjeiro struct rte_flow_item_eth ipv6_multi_spec = { 434272733b5SNélio Laranjeiro .dst.addr_bytes = "\x33\x33\x00\x00\x00\x00", 435272733b5SNélio Laranjeiro }; 436272733b5SNélio Laranjeiro struct rte_flow_item_eth ipv6_multi_mask = { 437272733b5SNélio Laranjeiro .dst.addr_bytes = "\xff\xff\x00\x00\x00\x00", 438272733b5SNélio Laranjeiro }; 439272733b5SNélio Laranjeiro struct rte_flow_item_eth unicast = { 440272733b5SNélio Laranjeiro .src.addr_bytes = "\x00\x00\x00\x00\x00\x00", 441272733b5SNélio Laranjeiro }; 442272733b5SNélio Laranjeiro struct rte_flow_item_eth unicast_mask = { 443272733b5SNélio Laranjeiro .dst.addr_bytes = "\xff\xff\xff\xff\xff\xff", 444272733b5SNélio Laranjeiro }; 445272733b5SNélio Laranjeiro const unsigned int vlan_filter_n = priv->vlan_filter_n; 4466d13ea8eSOlivier Matz const struct rte_ether_addr cmp = { 447272733b5SNélio Laranjeiro .addr_bytes = "\x00\x00\x00\x00\x00\x00", 448272733b5SNélio Laranjeiro }; 449272733b5SNélio Laranjeiro unsigned int i; 450272733b5SNélio Laranjeiro unsigned int j; 451272733b5SNélio Laranjeiro int ret; 452272733b5SNélio Laranjeiro 4533c84f34eSOri Kam /* 4543c84f34eSOri Kam * Hairpin txq default flow should be created no matter if it is 4553c84f34eSOri Kam * isolation mode. Or else all the packets to be sent will be sent 4563c84f34eSOri Kam * out directly without the TX flow actions, e.g. encapsulation. 4573c84f34eSOri Kam */ 4583c84f34eSOri Kam for (i = 0; i != priv->txqs_n; ++i) { 4593c84f34eSOri Kam struct mlx5_txq_ctrl *txq_ctrl = mlx5_txq_get(dev, i); 4603c84f34eSOri Kam if (!txq_ctrl) 4613c84f34eSOri Kam continue; 4623c84f34eSOri Kam if (txq_ctrl->type == MLX5_TXQ_TYPE_HAIRPIN) { 4633c84f34eSOri Kam ret = mlx5_ctrl_flow_source_queue(dev, i); 4643c84f34eSOri Kam if (ret) { 4653c84f34eSOri Kam mlx5_txq_release(dev, i); 4663c84f34eSOri Kam goto error; 4673c84f34eSOri Kam } 4683c84f34eSOri Kam } 4693c84f34eSOri Kam mlx5_txq_release(dev, i); 4703c84f34eSOri Kam } 471fbde4331SMatan Azrad if (priv->config.dv_esw_en && !priv->config.vf) { 472fbde4331SMatan Azrad if (mlx5_flow_create_esw_table_zero_flow(dev)) 473fbde4331SMatan Azrad priv->fdb_def_rule = 1; 474fbde4331SMatan Azrad else 475fbde4331SMatan Azrad DRV_LOG(INFO, "port %u FDB default rule cannot be" 476fbde4331SMatan Azrad " configured - only Eswitch group 0 flows are" 477fbde4331SMatan Azrad " supported.", dev->data->port_id); 478fbde4331SMatan Azrad } 4790f0ae73aSShiri Kuzin if (!priv->config.lacp_by_user && priv->pf_bond >= 0) { 4800f0ae73aSShiri Kuzin ret = mlx5_flow_lacp_miss(dev); 4810f0ae73aSShiri Kuzin if (ret) 4820f0ae73aSShiri Kuzin DRV_LOG(INFO, "port %u LACP rule cannot be created - " 4830f0ae73aSShiri Kuzin "forward LACP to kernel.", dev->data->port_id); 4840f0ae73aSShiri Kuzin else 4850f0ae73aSShiri Kuzin DRV_LOG(INFO, "LACP traffic will be missed in port %u." 4860f0ae73aSShiri Kuzin , dev->data->port_id); 4870f0ae73aSShiri Kuzin } 488f8cb4b57SNélio Laranjeiro if (priv->isolated) 489f8cb4b57SNélio Laranjeiro return 0; 490f8cb4b57SNélio Laranjeiro if (dev->data->promiscuous) { 491f8cb4b57SNélio Laranjeiro struct rte_flow_item_eth promisc = { 492f8cb4b57SNélio Laranjeiro .dst.addr_bytes = "\x00\x00\x00\x00\x00\x00", 493f8cb4b57SNélio Laranjeiro .src.addr_bytes = "\x00\x00\x00\x00\x00\x00", 494f8cb4b57SNélio Laranjeiro .type = 0, 495f8cb4b57SNélio Laranjeiro }; 496f8cb4b57SNélio Laranjeiro 497a6d83b6aSNélio Laranjeiro ret = mlx5_ctrl_flow(dev, &promisc, &promisc); 498a6d83b6aSNélio Laranjeiro if (ret) 499a6d83b6aSNélio Laranjeiro goto error; 500f8cb4b57SNélio Laranjeiro } 501f8cb4b57SNélio Laranjeiro if (dev->data->all_multicast) { 502f8cb4b57SNélio Laranjeiro struct rte_flow_item_eth multicast = { 503f8cb4b57SNélio Laranjeiro .dst.addr_bytes = "\x01\x00\x00\x00\x00\x00", 504f8cb4b57SNélio Laranjeiro .src.addr_bytes = "\x00\x00\x00\x00\x00\x00", 505f8cb4b57SNélio Laranjeiro .type = 0, 506f8cb4b57SNélio Laranjeiro }; 507f8cb4b57SNélio Laranjeiro 508a6d83b6aSNélio Laranjeiro ret = mlx5_ctrl_flow(dev, &multicast, &multicast); 509a6d83b6aSNélio Laranjeiro if (ret) 510a6d83b6aSNélio Laranjeiro goto error; 511f8cb4b57SNélio Laranjeiro } else { 512f8cb4b57SNélio Laranjeiro /* Add broadcast/multicast flows. */ 513f8cb4b57SNélio Laranjeiro for (i = 0; i != vlan_filter_n; ++i) { 514f8cb4b57SNélio Laranjeiro uint16_t vlan = priv->vlan_filter[i]; 515f8cb4b57SNélio Laranjeiro 516f8cb4b57SNélio Laranjeiro struct rte_flow_item_vlan vlan_spec = { 517f8cb4b57SNélio Laranjeiro .tci = rte_cpu_to_be_16(vlan), 518f8cb4b57SNélio Laranjeiro }; 5192bc98393SNelio Laranjeiro struct rte_flow_item_vlan vlan_mask = 5202bc98393SNelio Laranjeiro rte_flow_item_vlan_mask; 521f8cb4b57SNélio Laranjeiro 522f8cb4b57SNélio Laranjeiro ret = mlx5_ctrl_flow_vlan(dev, &bcast, &bcast, 523f8cb4b57SNélio Laranjeiro &vlan_spec, &vlan_mask); 524f8cb4b57SNélio Laranjeiro if (ret) 525f8cb4b57SNélio Laranjeiro goto error; 526f8cb4b57SNélio Laranjeiro ret = mlx5_ctrl_flow_vlan(dev, &ipv6_multi_spec, 527f8cb4b57SNélio Laranjeiro &ipv6_multi_mask, 528f8cb4b57SNélio Laranjeiro &vlan_spec, &vlan_mask); 529f8cb4b57SNélio Laranjeiro if (ret) 530f8cb4b57SNélio Laranjeiro goto error; 531f8cb4b57SNélio Laranjeiro } 532f8cb4b57SNélio Laranjeiro if (!vlan_filter_n) { 533f8cb4b57SNélio Laranjeiro ret = mlx5_ctrl_flow(dev, &bcast, &bcast); 534f8cb4b57SNélio Laranjeiro if (ret) 535f8cb4b57SNélio Laranjeiro goto error; 536f8cb4b57SNélio Laranjeiro ret = mlx5_ctrl_flow(dev, &ipv6_multi_spec, 537f8cb4b57SNélio Laranjeiro &ipv6_multi_mask); 538f8cb4b57SNélio Laranjeiro if (ret) 539f8cb4b57SNélio Laranjeiro goto error; 540f8cb4b57SNélio Laranjeiro } 541f8cb4b57SNélio Laranjeiro } 542f8cb4b57SNélio Laranjeiro /* Add MAC address flows. */ 543272733b5SNélio Laranjeiro for (i = 0; i != MLX5_MAX_MAC_ADDRESSES; ++i) { 5446d13ea8eSOlivier Matz struct rte_ether_addr *mac = &dev->data->mac_addrs[i]; 545272733b5SNélio Laranjeiro 546272733b5SNélio Laranjeiro if (!memcmp(mac, &cmp, sizeof(*mac))) 547272733b5SNélio Laranjeiro continue; 548272733b5SNélio Laranjeiro memcpy(&unicast.dst.addr_bytes, 549272733b5SNélio Laranjeiro mac->addr_bytes, 55035b2d13fSOlivier Matz RTE_ETHER_ADDR_LEN); 551272733b5SNélio Laranjeiro for (j = 0; j != vlan_filter_n; ++j) { 552272733b5SNélio Laranjeiro uint16_t vlan = priv->vlan_filter[j]; 553272733b5SNélio Laranjeiro 554272733b5SNélio Laranjeiro struct rte_flow_item_vlan vlan_spec = { 555272733b5SNélio Laranjeiro .tci = rte_cpu_to_be_16(vlan), 556272733b5SNélio Laranjeiro }; 5572bc98393SNelio Laranjeiro struct rte_flow_item_vlan vlan_mask = 5582bc98393SNelio Laranjeiro rte_flow_item_vlan_mask; 559272733b5SNélio Laranjeiro 560272733b5SNélio Laranjeiro ret = mlx5_ctrl_flow_vlan(dev, &unicast, 561272733b5SNélio Laranjeiro &unicast_mask, 562272733b5SNélio Laranjeiro &vlan_spec, 563272733b5SNélio Laranjeiro &vlan_mask); 564272733b5SNélio Laranjeiro if (ret) 565272733b5SNélio Laranjeiro goto error; 566272733b5SNélio Laranjeiro } 567272733b5SNélio Laranjeiro if (!vlan_filter_n) { 568a6d83b6aSNélio Laranjeiro ret = mlx5_ctrl_flow(dev, &unicast, &unicast_mask); 569272733b5SNélio Laranjeiro if (ret) 570272733b5SNélio Laranjeiro goto error; 571272733b5SNélio Laranjeiro } 572272733b5SNélio Laranjeiro } 573272733b5SNélio Laranjeiro return 0; 574272733b5SNélio Laranjeiro error: 575a6d83b6aSNélio Laranjeiro ret = rte_errno; /* Save rte_errno before cleanup. */ 5768db7e3b6SBing Zhao mlx5_flow_list_flush(dev, &priv->ctrl_flows, false); 577a6d83b6aSNélio Laranjeiro rte_errno = ret; /* Restore rte_errno. */ 578a6d83b6aSNélio Laranjeiro return -rte_errno; 579272733b5SNélio Laranjeiro } 580272733b5SNélio Laranjeiro 581272733b5SNélio Laranjeiro 582272733b5SNélio Laranjeiro /** 583272733b5SNélio Laranjeiro * Disable traffic flows configured by control plane 584272733b5SNélio Laranjeiro * 585272733b5SNélio Laranjeiro * @param dev 586af4f09f2SNélio Laranjeiro * Pointer to Ethernet device private data. 587272733b5SNélio Laranjeiro */ 588925061b5SNélio Laranjeiro void 589af4f09f2SNélio Laranjeiro mlx5_traffic_disable(struct rte_eth_dev *dev) 590272733b5SNélio Laranjeiro { 591dbeba4cfSThomas Monjalon struct mlx5_priv *priv = dev->data->dev_private; 592272733b5SNélio Laranjeiro 5938db7e3b6SBing Zhao mlx5_flow_list_flush(dev, &priv->ctrl_flows, false); 594272733b5SNélio Laranjeiro } 595272733b5SNélio Laranjeiro 596272733b5SNélio Laranjeiro /** 597272733b5SNélio Laranjeiro * Restart traffic flows configured by control plane 598272733b5SNélio Laranjeiro * 599272733b5SNélio Laranjeiro * @param dev 600af4f09f2SNélio Laranjeiro * Pointer to Ethernet device private data. 601272733b5SNélio Laranjeiro * 602272733b5SNélio Laranjeiro * @return 603a6d83b6aSNélio Laranjeiro * 0 on success, a negative errno value otherwise and rte_errno is set. 604272733b5SNélio Laranjeiro */ 605272733b5SNélio Laranjeiro int 606272733b5SNélio Laranjeiro mlx5_traffic_restart(struct rte_eth_dev *dev) 607272733b5SNélio Laranjeiro { 608af4f09f2SNélio Laranjeiro if (dev->data->dev_started) { 609af4f09f2SNélio Laranjeiro mlx5_traffic_disable(dev); 610a6d83b6aSNélio Laranjeiro return mlx5_traffic_enable(dev); 611af4f09f2SNélio Laranjeiro } 612272733b5SNélio Laranjeiro return 0; 613272733b5SNélio Laranjeiro } 614