18fd92a66SOlivier Matz /* SPDX-License-Identifier: BSD-3-Clause 2e60fbd5bSAdrien Mazarguil * Copyright 2015 6WIND S.A. 35feecc57SShahaf Shuler * Copyright 2015 Mellanox Technologies, Ltd 4e60fbd5bSAdrien Mazarguil */ 58fd92a66SOlivier Matz 63f2fe392SNélio Laranjeiro #include <unistd.h> 7e60fbd5bSAdrien Mazarguil 8e60fbd5bSAdrien Mazarguil #include <rte_ether.h> 9df96fd0dSBruce Richardson #include <ethdev_driver.h> 10198a3c33SNelio Laranjeiro #include <rte_interrupts.h> 11198a3c33SNelio Laranjeiro #include <rte_alarm.h> 1220698c9fSOphir Munk #include <rte_cycles.h> 13e60fbd5bSAdrien Mazarguil 141260a87bSMichael Baum #include <mlx5_malloc.h> 151260a87bSMichael Baum 16e60fbd5bSAdrien Mazarguil #include "mlx5.h" 17ec4e11d4SDmitry Kozlyuk #include "mlx5_flow.h" 18151cbe3aSMichael Baum #include "mlx5_rx.h" 19377b69fbSMichael Baum #include "mlx5_tx.h" 20e60fbd5bSAdrien Mazarguil #include "mlx5_utils.h" 21efa79e68SOri Kam #include "rte_pmd_mlx5.h" 22e60fbd5bSAdrien Mazarguil 23fb732b0aSNélio Laranjeiro /** 24fb732b0aSNélio Laranjeiro * Stop traffic on Tx queues. 25fb732b0aSNélio Laranjeiro * 26fb732b0aSNélio Laranjeiro * @param dev 27fb732b0aSNélio Laranjeiro * Pointer to Ethernet device structure. 28fb732b0aSNélio Laranjeiro */ 296e78005aSNélio Laranjeiro static void 30af4f09f2SNélio Laranjeiro mlx5_txq_stop(struct rte_eth_dev *dev) 316e78005aSNélio Laranjeiro { 32dbeba4cfSThomas Monjalon struct mlx5_priv *priv = dev->data->dev_private; 336e78005aSNélio Laranjeiro unsigned int i; 346e78005aSNélio Laranjeiro 356e78005aSNélio Laranjeiro for (i = 0; i != priv->txqs_n; ++i) 36af4f09f2SNélio Laranjeiro mlx5_txq_release(dev, i); 376e78005aSNélio Laranjeiro } 386e78005aSNélio Laranjeiro 39fb732b0aSNélio Laranjeiro /** 40fb732b0aSNélio Laranjeiro * Start traffic on Tx queues. 41fb732b0aSNélio Laranjeiro * 42fb732b0aSNélio Laranjeiro * @param dev 43fb732b0aSNélio Laranjeiro * Pointer to Ethernet device structure. 44fb732b0aSNélio Laranjeiro * 45fb732b0aSNélio Laranjeiro * @return 46a6d83b6aSNélio Laranjeiro * 0 on success, a negative errno value otherwise and rte_errno is set. 47fb732b0aSNélio Laranjeiro */ 486e78005aSNélio Laranjeiro static int 49af4f09f2SNélio Laranjeiro mlx5_txq_start(struct rte_eth_dev *dev) 506e78005aSNélio Laranjeiro { 51dbeba4cfSThomas Monjalon struct mlx5_priv *priv = dev->data->dev_private; 526e78005aSNélio Laranjeiro unsigned int i; 53a6d83b6aSNélio Laranjeiro int ret; 546e78005aSNélio Laranjeiro 556e78005aSNélio Laranjeiro for (i = 0; i != priv->txqs_n; ++i) { 56af4f09f2SNélio Laranjeiro struct mlx5_txq_ctrl *txq_ctrl = mlx5_txq_get(dev, i); 57f49f4483SMichael Baum struct mlx5_txq_data *txq_data = &txq_ctrl->txq; 58f49f4483SMichael Baum uint32_t flags = MLX5_MEM_RTE | MLX5_MEM_ZERO; 596e78005aSNélio Laranjeiro 606e78005aSNélio Laranjeiro if (!txq_ctrl) 616e78005aSNélio Laranjeiro continue; 62c06f77aeSMichael Baum if (!txq_ctrl->is_hairpin) 636e78005aSNélio Laranjeiro txq_alloc_elts(txq_ctrl); 64f49f4483SMichael Baum MLX5_ASSERT(!txq_ctrl->obj); 65f49f4483SMichael Baum txq_ctrl->obj = mlx5_malloc(flags, sizeof(struct mlx5_txq_obj), 66f49f4483SMichael Baum 0, txq_ctrl->socket); 67894c4a8eSOri Kam if (!txq_ctrl->obj) { 68f49f4483SMichael Baum DRV_LOG(ERR, "Port %u Tx queue %u cannot allocate " 69f49f4483SMichael Baum "memory resources.", dev->data->port_id, 70f49f4483SMichael Baum txq_data->idx); 71a6d83b6aSNélio Laranjeiro rte_errno = ENOMEM; 726e78005aSNélio Laranjeiro goto error; 736e78005aSNélio Laranjeiro } 74f49f4483SMichael Baum ret = priv->obj_ops.txq_obj_new(dev, i); 75f49f4483SMichael Baum if (ret < 0) { 76f49f4483SMichael Baum mlx5_free(txq_ctrl->obj); 77f49f4483SMichael Baum txq_ctrl->obj = NULL; 78f49f4483SMichael Baum goto error; 79f49f4483SMichael Baum } 80c06f77aeSMichael Baum if (!txq_ctrl->is_hairpin) { 81f49f4483SMichael Baum size_t size = txq_data->cqe_s * sizeof(*txq_data->fcqs); 82876b5d52SMatan Azrad 83f49f4483SMichael Baum txq_data->fcqs = mlx5_malloc(flags, size, 84f49f4483SMichael Baum RTE_CACHE_LINE_SIZE, 85f49f4483SMichael Baum txq_ctrl->socket); 86f49f4483SMichael Baum if (!txq_data->fcqs) { 87f49f4483SMichael Baum DRV_LOG(ERR, "Port %u Tx queue %u cannot " 88f49f4483SMichael Baum "allocate memory (FCQ).", 89f49f4483SMichael Baum dev->data->port_id, i); 90f49f4483SMichael Baum rte_errno = ENOMEM; 91f49f4483SMichael Baum goto error; 92f49f4483SMichael Baum } 93f49f4483SMichael Baum } 94f49f4483SMichael Baum DRV_LOG(DEBUG, "Port %u txq %u updated with %p.", 95f49f4483SMichael Baum dev->data->port_id, i, (void *)&txq_ctrl->obj); 96f49f4483SMichael Baum LIST_INSERT_HEAD(&priv->txqsobj, txq_ctrl->obj, next); 976e78005aSNélio Laranjeiro } 98a6d83b6aSNélio Laranjeiro return 0; 996e78005aSNélio Laranjeiro error: 100a6d83b6aSNélio Laranjeiro ret = rte_errno; /* Save rte_errno before cleanup. */ 10124f653a7SYongseok Koh do { 10224f653a7SYongseok Koh mlx5_txq_release(dev, i); 10324f653a7SYongseok Koh } while (i-- != 0); 104a6d83b6aSNélio Laranjeiro rte_errno = ret; /* Restore rte_errno. */ 105a6d83b6aSNélio Laranjeiro return -rte_errno; 1066e78005aSNélio Laranjeiro } 1076e78005aSNélio Laranjeiro 108fb732b0aSNélio Laranjeiro /** 109fec28ca0SDmitry Kozlyuk * Register Rx queue mempools and fill the Rx queue cache. 110fec28ca0SDmitry Kozlyuk * This function tolerates repeated mempool registration. 111fec28ca0SDmitry Kozlyuk * 112fec28ca0SDmitry Kozlyuk * @param[in] rxq_ctrl 113fec28ca0SDmitry Kozlyuk * Rx queue control data. 114fec28ca0SDmitry Kozlyuk * 115fec28ca0SDmitry Kozlyuk * @return 116fec28ca0SDmitry Kozlyuk * 0 on success, (-1) on failure and rte_errno is set. 117fec28ca0SDmitry Kozlyuk */ 118fec28ca0SDmitry Kozlyuk static int 11920489176SMichael Baum mlx5_rxq_mempool_register(struct mlx5_rxq_ctrl *rxq_ctrl) 120fec28ca0SDmitry Kozlyuk { 121fec28ca0SDmitry Kozlyuk struct rte_mempool *mp; 122fec28ca0SDmitry Kozlyuk uint32_t s; 123fec28ca0SDmitry Kozlyuk int ret = 0; 124fec28ca0SDmitry Kozlyuk 125fec28ca0SDmitry Kozlyuk mlx5_mr_flush_local_cache(&rxq_ctrl->rxq.mr_ctrl); 126fec28ca0SDmitry Kozlyuk /* MPRQ mempool is registered on creation, just fill the cache. */ 12708ac0358SDmitry Kozlyuk if (mlx5_rxq_mprq_enabled(&rxq_ctrl->rxq)) 12808ac0358SDmitry Kozlyuk return mlx5_mr_mempool_populate_cache(&rxq_ctrl->rxq.mr_ctrl, 12908ac0358SDmitry Kozlyuk rxq_ctrl->rxq.mprq_mp); 130fec28ca0SDmitry Kozlyuk for (s = 0; s < rxq_ctrl->rxq.rxseg_n; s++) { 13108ac0358SDmitry Kozlyuk bool is_extmem; 1327297d2cdSDmitry Kozlyuk 133fec28ca0SDmitry Kozlyuk mp = rxq_ctrl->rxq.rxseg[s].mp; 13408ac0358SDmitry Kozlyuk is_extmem = (rte_pktmbuf_priv_flags(mp) & 13508ac0358SDmitry Kozlyuk RTE_PKTMBUF_POOL_F_PINNED_EXT_BUF) != 0; 13608ac0358SDmitry Kozlyuk ret = mlx5_mr_mempool_register(rxq_ctrl->sh->cdev, mp, 13708ac0358SDmitry Kozlyuk is_extmem); 138fec28ca0SDmitry Kozlyuk if (ret < 0 && rte_errno != EEXIST) 139fec28ca0SDmitry Kozlyuk return ret; 14008ac0358SDmitry Kozlyuk ret = mlx5_mr_mempool_populate_cache(&rxq_ctrl->rxq.mr_ctrl, 14108ac0358SDmitry Kozlyuk mp); 14208ac0358SDmitry Kozlyuk if (ret < 0) 14308ac0358SDmitry Kozlyuk return ret; 144fec28ca0SDmitry Kozlyuk } 145fec28ca0SDmitry Kozlyuk return 0; 146fec28ca0SDmitry Kozlyuk } 147fec28ca0SDmitry Kozlyuk 148fec28ca0SDmitry Kozlyuk /** 149fb732b0aSNélio Laranjeiro * Stop traffic on Rx queues. 150fb732b0aSNélio Laranjeiro * 151fb732b0aSNélio Laranjeiro * @param dev 152fb732b0aSNélio Laranjeiro * Pointer to Ethernet device structure. 153fb732b0aSNélio Laranjeiro */ 154a1366b1aSNélio Laranjeiro static void 155af4f09f2SNélio Laranjeiro mlx5_rxq_stop(struct rte_eth_dev *dev) 156a1366b1aSNélio Laranjeiro { 157dbeba4cfSThomas Monjalon struct mlx5_priv *priv = dev->data->dev_private; 158a1366b1aSNélio Laranjeiro unsigned int i; 159a1366b1aSNélio Laranjeiro 160a1366b1aSNélio Laranjeiro for (i = 0; i != priv->rxqs_n; ++i) 161af4f09f2SNélio Laranjeiro mlx5_rxq_release(dev, i); 162a1366b1aSNélio Laranjeiro } 163a1366b1aSNélio Laranjeiro 16409c25553SXueming Li static int 16509c25553SXueming Li mlx5_rxq_ctrl_prepare(struct rte_eth_dev *dev, struct mlx5_rxq_ctrl *rxq_ctrl, 16609c25553SXueming Li unsigned int idx) 16709c25553SXueming Li { 16809c25553SXueming Li int ret = 0; 16909c25553SXueming Li 170c06f77aeSMichael Baum if (!rxq_ctrl->is_hairpin) { 17109c25553SXueming Li /* 17209c25553SXueming Li * Pre-register the mempools. Regardless of whether 17309c25553SXueming Li * the implicit registration is enabled or not, 17409c25553SXueming Li * Rx mempool destruction is tracked to free MRs. 17509c25553SXueming Li */ 17620489176SMichael Baum if (mlx5_rxq_mempool_register(rxq_ctrl) < 0) 17709c25553SXueming Li return -rte_errno; 17809c25553SXueming Li ret = rxq_alloc_elts(rxq_ctrl); 17909c25553SXueming Li if (ret) 18009c25553SXueming Li return ret; 18109c25553SXueming Li } 18209c25553SXueming Li MLX5_ASSERT(!rxq_ctrl->obj); 18309c25553SXueming Li rxq_ctrl->obj = mlx5_malloc(MLX5_MEM_RTE | MLX5_MEM_ZERO, 18409c25553SXueming Li sizeof(*rxq_ctrl->obj), 0, 18509c25553SXueming Li rxq_ctrl->socket); 18609c25553SXueming Li if (!rxq_ctrl->obj) { 18709c25553SXueming Li DRV_LOG(ERR, "Port %u Rx queue %u can't allocate resources.", 18809c25553SXueming Li dev->data->port_id, idx); 18909c25553SXueming Li rte_errno = ENOMEM; 19009c25553SXueming Li return -rte_errno; 19109c25553SXueming Li } 19209c25553SXueming Li DRV_LOG(DEBUG, "Port %u rxq %u updated with %p.", dev->data->port_id, 19309c25553SXueming Li idx, (void *)&rxq_ctrl->obj); 19409c25553SXueming Li return 0; 19509c25553SXueming Li } 19609c25553SXueming Li 197fb732b0aSNélio Laranjeiro /** 198fb732b0aSNélio Laranjeiro * Start traffic on Rx queues. 199fb732b0aSNélio Laranjeiro * 200fb732b0aSNélio Laranjeiro * @param dev 201fb732b0aSNélio Laranjeiro * Pointer to Ethernet device structure. 202fb732b0aSNélio Laranjeiro * 203fb732b0aSNélio Laranjeiro * @return 204a6d83b6aSNélio Laranjeiro * 0 on success, a negative errno value otherwise and rte_errno is set. 205fb732b0aSNélio Laranjeiro */ 206a1366b1aSNélio Laranjeiro static int 207af4f09f2SNélio Laranjeiro mlx5_rxq_start(struct rte_eth_dev *dev) 208a1366b1aSNélio Laranjeiro { 209dbeba4cfSThomas Monjalon struct mlx5_priv *priv = dev->data->dev_private; 210a1366b1aSNélio Laranjeiro unsigned int i; 211a1366b1aSNélio Laranjeiro int ret = 0; 212a1366b1aSNélio Laranjeiro 2137d6bf6b8SYongseok Koh /* Allocate/reuse/resize mempool for Multi-Packet RQ. */ 21424f653a7SYongseok Koh if (mlx5_mprq_alloc_mp(dev)) { 21524f653a7SYongseok Koh /* Should not release Rx queues but return immediately. */ 21624f653a7SYongseok Koh return -rte_errno; 21724f653a7SYongseok Koh } 21891d1cfafSMichael Baum DRV_LOG(DEBUG, "Port %u dev_cap.max_qp_wr is %d.", 21991d1cfafSMichael Baum dev->data->port_id, priv->sh->dev_cap.max_qp_wr); 22091d1cfafSMichael Baum DRV_LOG(DEBUG, "Port %u dev_cap.max_sge is %d.", 22191d1cfafSMichael Baum dev->data->port_id, priv->sh->dev_cap.max_sge); 222a1366b1aSNélio Laranjeiro for (i = 0; i != priv->rxqs_n; ++i) { 2230cedf34dSXueming Li struct mlx5_rxq_priv *rxq = mlx5_rxq_ref(dev, i); 2240cedf34dSXueming Li struct mlx5_rxq_ctrl *rxq_ctrl; 225a1366b1aSNélio Laranjeiro 2260cedf34dSXueming Li if (rxq == NULL) 227a1366b1aSNélio Laranjeiro continue; 2280cedf34dSXueming Li rxq_ctrl = rxq->ctrl; 229c93943c5SDariusz Sosnowski if (!rxq_ctrl->started) 23009c25553SXueming Li if (mlx5_rxq_ctrl_prepare(dev, rxq_ctrl, i) < 0) 231fec28ca0SDmitry Kozlyuk goto error; 2325ceb3a02SXueming Li ret = priv->obj_ops.rxq_obj_new(rxq); 2331260a87bSMichael Baum if (ret) { 2341260a87bSMichael Baum mlx5_free(rxq_ctrl->obj); 2359ec1ceabSDmitry Kozlyuk rxq_ctrl->obj = NULL; 2361260a87bSMichael Baum goto error; 2371260a87bSMichael Baum } 238c93943c5SDariusz Sosnowski if (!rxq_ctrl->started) 239c93943c5SDariusz Sosnowski LIST_INSERT_HEAD(&priv->rxqsobj, rxq_ctrl->obj, next); 24009c25553SXueming Li rxq_ctrl->started = true; 2411260a87bSMichael Baum } 242a6d83b6aSNélio Laranjeiro return 0; 243a1366b1aSNélio Laranjeiro error: 244a6d83b6aSNélio Laranjeiro ret = rte_errno; /* Save rte_errno before cleanup. */ 24524f653a7SYongseok Koh do { 24624f653a7SYongseok Koh mlx5_rxq_release(dev, i); 24724f653a7SYongseok Koh } while (i-- != 0); 248a6d83b6aSNélio Laranjeiro rte_errno = ret; /* Restore rte_errno. */ 249a6d83b6aSNélio Laranjeiro return -rte_errno; 250a1366b1aSNélio Laranjeiro } 251a1366b1aSNélio Laranjeiro 252e60fbd5bSAdrien Mazarguil /** 2536a338ad4SOri Kam * Binds Tx queues to Rx queues for hairpin. 2546a338ad4SOri Kam * 2556a338ad4SOri Kam * Binds Tx queues to the target Rx queues. 2566a338ad4SOri Kam * 2576a338ad4SOri Kam * @param dev 2586a338ad4SOri Kam * Pointer to Ethernet device structure. 2596a338ad4SOri Kam * 2606a338ad4SOri Kam * @return 2616a338ad4SOri Kam * 0 on success, a negative errno value otherwise and rte_errno is set. 2626a338ad4SOri Kam */ 2636a338ad4SOri Kam static int 26437cd4501SBing Zhao mlx5_hairpin_auto_bind(struct rte_eth_dev *dev) 2656a338ad4SOri Kam { 2666a338ad4SOri Kam struct mlx5_priv *priv = dev->data->dev_private; 2676a338ad4SOri Kam struct mlx5_devx_modify_sq_attr sq_attr = { 0 }; 2686a338ad4SOri Kam struct mlx5_devx_modify_rq_attr rq_attr = { 0 }; 2696a338ad4SOri Kam struct mlx5_txq_ctrl *txq_ctrl; 2700cedf34dSXueming Li struct mlx5_rxq_priv *rxq; 2716a338ad4SOri Kam struct mlx5_rxq_ctrl *rxq_ctrl; 2726a338ad4SOri Kam struct mlx5_devx_obj *sq; 2736a338ad4SOri Kam struct mlx5_devx_obj *rq; 2746a338ad4SOri Kam unsigned int i; 2756a338ad4SOri Kam int ret = 0; 276aa8bea0eSBing Zhao bool need_auto = false; 277aa8bea0eSBing Zhao uint16_t self_port = dev->data->port_id; 2786a338ad4SOri Kam 2796a338ad4SOri Kam for (i = 0; i != priv->txqs_n; ++i) { 2806a338ad4SOri Kam txq_ctrl = mlx5_txq_get(dev, i); 2816a338ad4SOri Kam if (!txq_ctrl) 2826a338ad4SOri Kam continue; 283c06f77aeSMichael Baum if (!txq_ctrl->is_hairpin || 28475f166c2SBing Zhao txq_ctrl->hairpin_conf.peers[0].port != self_port) { 2856a338ad4SOri Kam mlx5_txq_release(dev, i); 2866a338ad4SOri Kam continue; 2876a338ad4SOri Kam } 288aa8bea0eSBing Zhao if (txq_ctrl->hairpin_conf.manual_bind) { 289aa8bea0eSBing Zhao mlx5_txq_release(dev, i); 290aa8bea0eSBing Zhao return 0; 291aa8bea0eSBing Zhao } 292aa8bea0eSBing Zhao need_auto = true; 293aa8bea0eSBing Zhao mlx5_txq_release(dev, i); 294aa8bea0eSBing Zhao } 295aa8bea0eSBing Zhao if (!need_auto) 296aa8bea0eSBing Zhao return 0; 297aa8bea0eSBing Zhao for (i = 0; i != priv->txqs_n; ++i) { 298aa8bea0eSBing Zhao txq_ctrl = mlx5_txq_get(dev, i); 299aa8bea0eSBing Zhao if (!txq_ctrl) 300aa8bea0eSBing Zhao continue; 30175f166c2SBing Zhao /* Skip hairpin queues with other peer ports. */ 302c06f77aeSMichael Baum if (!txq_ctrl->is_hairpin || 30375f166c2SBing Zhao txq_ctrl->hairpin_conf.peers[0].port != self_port) { 304aa8bea0eSBing Zhao mlx5_txq_release(dev, i); 305aa8bea0eSBing Zhao continue; 306aa8bea0eSBing Zhao } 3076a338ad4SOri Kam if (!txq_ctrl->obj) { 3086a338ad4SOri Kam rte_errno = ENOMEM; 3096a338ad4SOri Kam DRV_LOG(ERR, "port %u no txq object found: %d", 3106a338ad4SOri Kam dev->data->port_id, i); 3116a338ad4SOri Kam mlx5_txq_release(dev, i); 3126a338ad4SOri Kam return -rte_errno; 3136a338ad4SOri Kam } 3146a338ad4SOri Kam sq = txq_ctrl->obj->sq; 3150cedf34dSXueming Li rxq = mlx5_rxq_get(dev, txq_ctrl->hairpin_conf.peers[0].queue); 3160cedf34dSXueming Li if (rxq == NULL) { 3176a338ad4SOri Kam mlx5_txq_release(dev, i); 3186a338ad4SOri Kam rte_errno = EINVAL; 3196a338ad4SOri Kam DRV_LOG(ERR, "port %u no rxq object found: %d", 3206a338ad4SOri Kam dev->data->port_id, 3216a338ad4SOri Kam txq_ctrl->hairpin_conf.peers[0].queue); 3226a338ad4SOri Kam return -rte_errno; 3236a338ad4SOri Kam } 3240cedf34dSXueming Li rxq_ctrl = rxq->ctrl; 325c06f77aeSMichael Baum if (!rxq_ctrl->is_hairpin || 32644126bd9SXueming Li rxq->hairpin_conf.peers[0].queue != i) { 3276a338ad4SOri Kam rte_errno = ENOMEM; 3286a338ad4SOri Kam DRV_LOG(ERR, "port %u Tx queue %d can't be binded to " 3296a338ad4SOri Kam "Rx queue %d", dev->data->port_id, 3306a338ad4SOri Kam i, txq_ctrl->hairpin_conf.peers[0].queue); 3316a338ad4SOri Kam goto error; 3326a338ad4SOri Kam } 3336a338ad4SOri Kam rq = rxq_ctrl->obj->rq; 3346a338ad4SOri Kam if (!rq) { 3356a338ad4SOri Kam rte_errno = ENOMEM; 3366a338ad4SOri Kam DRV_LOG(ERR, "port %u hairpin no matching rxq: %d", 3376a338ad4SOri Kam dev->data->port_id, 3386a338ad4SOri Kam txq_ctrl->hairpin_conf.peers[0].queue); 3396a338ad4SOri Kam goto error; 3406a338ad4SOri Kam } 3416a338ad4SOri Kam sq_attr.state = MLX5_SQC_STATE_RDY; 3426a338ad4SOri Kam sq_attr.sq_state = MLX5_SQC_STATE_RST; 3436a338ad4SOri Kam sq_attr.hairpin_peer_rq = rq->id; 34453820561SMichael Baum sq_attr.hairpin_peer_vhca = 34553820561SMichael Baum priv->sh->cdev->config.hca_attr.vhca_id; 3466a338ad4SOri Kam ret = mlx5_devx_cmd_modify_sq(sq, &sq_attr); 3476a338ad4SOri Kam if (ret) 3486a338ad4SOri Kam goto error; 349ca638c49SDariusz Sosnowski rq_attr.state = MLX5_RQC_STATE_RDY; 350ca638c49SDariusz Sosnowski rq_attr.rq_state = MLX5_RQC_STATE_RST; 3516a338ad4SOri Kam rq_attr.hairpin_peer_sq = sq->id; 35253820561SMichael Baum rq_attr.hairpin_peer_vhca = 35353820561SMichael Baum priv->sh->cdev->config.hca_attr.vhca_id; 3546a338ad4SOri Kam ret = mlx5_devx_cmd_modify_rq(rq, &rq_attr); 3556a338ad4SOri Kam if (ret) 3566a338ad4SOri Kam goto error; 357aa8bea0eSBing Zhao /* Qs with auto-bind will be destroyed directly. */ 35844126bd9SXueming Li rxq->hairpin_status = 1; 359aa8bea0eSBing Zhao txq_ctrl->hairpin_status = 1; 3606a338ad4SOri Kam mlx5_txq_release(dev, i); 3616a338ad4SOri Kam } 3626a338ad4SOri Kam return 0; 3636a338ad4SOri Kam error: 3646a338ad4SOri Kam mlx5_txq_release(dev, i); 3656a338ad4SOri Kam return -rte_errno; 3666a338ad4SOri Kam } 3676a338ad4SOri Kam 36837cd4501SBing Zhao /* 36937cd4501SBing Zhao * Fetch the peer queue's SW & HW information. 37037cd4501SBing Zhao * 37137cd4501SBing Zhao * @param dev 37237cd4501SBing Zhao * Pointer to Ethernet device structure. 37337cd4501SBing Zhao * @param peer_queue 37437cd4501SBing Zhao * Index of the queue to fetch the information. 37537cd4501SBing Zhao * @param current_info 37637cd4501SBing Zhao * Pointer to the input peer information, not used currently. 37737cd4501SBing Zhao * @param peer_info 37837cd4501SBing Zhao * Pointer to the structure to store the information, output. 37937cd4501SBing Zhao * @param direction 38037cd4501SBing Zhao * Positive to get the RxQ information, zero to get the TxQ information. 38137cd4501SBing Zhao * 38237cd4501SBing Zhao * @return 38337cd4501SBing Zhao * 0 on success, a negative errno value otherwise and rte_errno is set. 38437cd4501SBing Zhao */ 38537cd4501SBing Zhao int 38637cd4501SBing Zhao mlx5_hairpin_queue_peer_update(struct rte_eth_dev *dev, uint16_t peer_queue, 38737cd4501SBing Zhao struct rte_hairpin_peer_info *current_info, 38837cd4501SBing Zhao struct rte_hairpin_peer_info *peer_info, 38937cd4501SBing Zhao uint32_t direction) 39037cd4501SBing Zhao { 39137cd4501SBing Zhao struct mlx5_priv *priv = dev->data->dev_private; 39237cd4501SBing Zhao RTE_SET_USED(current_info); 39337cd4501SBing Zhao 39437cd4501SBing Zhao if (dev->data->dev_started == 0) { 39537cd4501SBing Zhao rte_errno = EBUSY; 39637cd4501SBing Zhao DRV_LOG(ERR, "peer port %u is not started", 39737cd4501SBing Zhao dev->data->port_id); 39837cd4501SBing Zhao return -rte_errno; 39937cd4501SBing Zhao } 40037cd4501SBing Zhao /* 40137cd4501SBing Zhao * Peer port used as egress. In the current design, hairpin Tx queue 40237cd4501SBing Zhao * will be bound to the peer Rx queue. Indeed, only the information of 40337cd4501SBing Zhao * peer Rx queue needs to be fetched. 40437cd4501SBing Zhao */ 40537cd4501SBing Zhao if (direction == 0) { 40637cd4501SBing Zhao struct mlx5_txq_ctrl *txq_ctrl; 40737cd4501SBing Zhao 40837cd4501SBing Zhao txq_ctrl = mlx5_txq_get(dev, peer_queue); 40937cd4501SBing Zhao if (txq_ctrl == NULL) { 41037cd4501SBing Zhao rte_errno = EINVAL; 41137cd4501SBing Zhao DRV_LOG(ERR, "Failed to get port %u Tx queue %d", 41237cd4501SBing Zhao dev->data->port_id, peer_queue); 41337cd4501SBing Zhao return -rte_errno; 41437cd4501SBing Zhao } 415c06f77aeSMichael Baum if (!txq_ctrl->is_hairpin) { 41637cd4501SBing Zhao rte_errno = EINVAL; 41737cd4501SBing Zhao DRV_LOG(ERR, "port %u queue %d is not a hairpin Txq", 41837cd4501SBing Zhao dev->data->port_id, peer_queue); 41937cd4501SBing Zhao mlx5_txq_release(dev, peer_queue); 42037cd4501SBing Zhao return -rte_errno; 42137cd4501SBing Zhao } 42237cd4501SBing Zhao if (txq_ctrl->obj == NULL || txq_ctrl->obj->sq == NULL) { 42337cd4501SBing Zhao rte_errno = ENOMEM; 42437cd4501SBing Zhao DRV_LOG(ERR, "port %u no Txq object found: %d", 42537cd4501SBing Zhao dev->data->port_id, peer_queue); 42637cd4501SBing Zhao mlx5_txq_release(dev, peer_queue); 42737cd4501SBing Zhao return -rte_errno; 42837cd4501SBing Zhao } 42926e1eaf2SDariusz Sosnowski peer_info->qp_id = mlx5_txq_get_sqn(txq_ctrl); 43053820561SMichael Baum peer_info->vhca_id = priv->sh->cdev->config.hca_attr.vhca_id; 43137cd4501SBing Zhao /* 1-to-1 mapping, only the first one is used. */ 43237cd4501SBing Zhao peer_info->peer_q = txq_ctrl->hairpin_conf.peers[0].queue; 43337cd4501SBing Zhao peer_info->tx_explicit = txq_ctrl->hairpin_conf.tx_explicit; 43437cd4501SBing Zhao peer_info->manual_bind = txq_ctrl->hairpin_conf.manual_bind; 43537cd4501SBing Zhao mlx5_txq_release(dev, peer_queue); 43637cd4501SBing Zhao } else { /* Peer port used as ingress. */ 4370cedf34dSXueming Li struct mlx5_rxq_priv *rxq = mlx5_rxq_get(dev, peer_queue); 43837cd4501SBing Zhao struct mlx5_rxq_ctrl *rxq_ctrl; 43937cd4501SBing Zhao 4400cedf34dSXueming Li if (rxq == NULL) { 44137cd4501SBing Zhao rte_errno = EINVAL; 44237cd4501SBing Zhao DRV_LOG(ERR, "Failed to get port %u Rx queue %d", 44337cd4501SBing Zhao dev->data->port_id, peer_queue); 44437cd4501SBing Zhao return -rte_errno; 44537cd4501SBing Zhao } 4460cedf34dSXueming Li rxq_ctrl = rxq->ctrl; 447c06f77aeSMichael Baum if (!rxq_ctrl->is_hairpin) { 44837cd4501SBing Zhao rte_errno = EINVAL; 44937cd4501SBing Zhao DRV_LOG(ERR, "port %u queue %d is not a hairpin Rxq", 45037cd4501SBing Zhao dev->data->port_id, peer_queue); 45137cd4501SBing Zhao return -rte_errno; 45237cd4501SBing Zhao } 45337cd4501SBing Zhao if (rxq_ctrl->obj == NULL || rxq_ctrl->obj->rq == NULL) { 45437cd4501SBing Zhao rte_errno = ENOMEM; 45537cd4501SBing Zhao DRV_LOG(ERR, "port %u no Rxq object found: %d", 45637cd4501SBing Zhao dev->data->port_id, peer_queue); 45737cd4501SBing Zhao return -rte_errno; 45837cd4501SBing Zhao } 45937cd4501SBing Zhao peer_info->qp_id = rxq_ctrl->obj->rq->id; 46053820561SMichael Baum peer_info->vhca_id = priv->sh->cdev->config.hca_attr.vhca_id; 46144126bd9SXueming Li peer_info->peer_q = rxq->hairpin_conf.peers[0].queue; 46244126bd9SXueming Li peer_info->tx_explicit = rxq->hairpin_conf.tx_explicit; 46344126bd9SXueming Li peer_info->manual_bind = rxq->hairpin_conf.manual_bind; 46437cd4501SBing Zhao } 46537cd4501SBing Zhao return 0; 46637cd4501SBing Zhao } 46737cd4501SBing Zhao 46837cd4501SBing Zhao /* 46937cd4501SBing Zhao * Bind the hairpin queue with the peer HW information. 47037cd4501SBing Zhao * This needs to be called twice both for Tx and Rx queues of a pair. 47137cd4501SBing Zhao * If the queue is already bound, it is considered successful. 47237cd4501SBing Zhao * 47337cd4501SBing Zhao * @param dev 47437cd4501SBing Zhao * Pointer to Ethernet device structure. 47537cd4501SBing Zhao * @param cur_queue 47637cd4501SBing Zhao * Index of the queue to change the HW configuration to bind. 47737cd4501SBing Zhao * @param peer_info 47837cd4501SBing Zhao * Pointer to information of the peer queue. 47937cd4501SBing Zhao * @param direction 48037cd4501SBing Zhao * Positive to configure the TxQ, zero to configure the RxQ. 48137cd4501SBing Zhao * 48237cd4501SBing Zhao * @return 48337cd4501SBing Zhao * 0 on success, a negative errno value otherwise and rte_errno is set. 48437cd4501SBing Zhao */ 48537cd4501SBing Zhao int 48637cd4501SBing Zhao mlx5_hairpin_queue_peer_bind(struct rte_eth_dev *dev, uint16_t cur_queue, 48737cd4501SBing Zhao struct rte_hairpin_peer_info *peer_info, 48837cd4501SBing Zhao uint32_t direction) 48937cd4501SBing Zhao { 49037cd4501SBing Zhao int ret = 0; 49137cd4501SBing Zhao 49237cd4501SBing Zhao /* 49337cd4501SBing Zhao * Consistency checking of the peer queue: opposite direction is used 49437cd4501SBing Zhao * to get the peer queue info with ethdev port ID, no need to check. 49537cd4501SBing Zhao */ 49637cd4501SBing Zhao if (peer_info->peer_q != cur_queue) { 49737cd4501SBing Zhao rte_errno = EINVAL; 49837cd4501SBing Zhao DRV_LOG(ERR, "port %u queue %d and peer queue %d mismatch", 49937cd4501SBing Zhao dev->data->port_id, cur_queue, peer_info->peer_q); 50037cd4501SBing Zhao return -rte_errno; 50137cd4501SBing Zhao } 50237cd4501SBing Zhao if (direction != 0) { 50337cd4501SBing Zhao struct mlx5_txq_ctrl *txq_ctrl; 50437cd4501SBing Zhao struct mlx5_devx_modify_sq_attr sq_attr = { 0 }; 50537cd4501SBing Zhao 50637cd4501SBing Zhao txq_ctrl = mlx5_txq_get(dev, cur_queue); 50737cd4501SBing Zhao if (txq_ctrl == NULL) { 50837cd4501SBing Zhao rte_errno = EINVAL; 50937cd4501SBing Zhao DRV_LOG(ERR, "Failed to get port %u Tx queue %d", 51037cd4501SBing Zhao dev->data->port_id, cur_queue); 51137cd4501SBing Zhao return -rte_errno; 51237cd4501SBing Zhao } 513c06f77aeSMichael Baum if (!txq_ctrl->is_hairpin) { 51437cd4501SBing Zhao rte_errno = EINVAL; 51537cd4501SBing Zhao DRV_LOG(ERR, "port %u queue %d not a hairpin Txq", 51637cd4501SBing Zhao dev->data->port_id, cur_queue); 51737cd4501SBing Zhao mlx5_txq_release(dev, cur_queue); 51837cd4501SBing Zhao return -rte_errno; 51937cd4501SBing Zhao } 52037cd4501SBing Zhao if (txq_ctrl->obj == NULL || txq_ctrl->obj->sq == NULL) { 52137cd4501SBing Zhao rte_errno = ENOMEM; 52237cd4501SBing Zhao DRV_LOG(ERR, "port %u no Txq object found: %d", 52337cd4501SBing Zhao dev->data->port_id, cur_queue); 52437cd4501SBing Zhao mlx5_txq_release(dev, cur_queue); 52537cd4501SBing Zhao return -rte_errno; 52637cd4501SBing Zhao } 52737cd4501SBing Zhao if (txq_ctrl->hairpin_status != 0) { 52837cd4501SBing Zhao DRV_LOG(DEBUG, "port %u Tx queue %d is already bound", 52937cd4501SBing Zhao dev->data->port_id, cur_queue); 53037cd4501SBing Zhao mlx5_txq_release(dev, cur_queue); 53137cd4501SBing Zhao return 0; 53237cd4501SBing Zhao } 53337cd4501SBing Zhao /* 53437cd4501SBing Zhao * All queues' of one port consistency checking is done in the 53537cd4501SBing Zhao * bind() function, and that is optional. 53637cd4501SBing Zhao */ 53737cd4501SBing Zhao if (peer_info->tx_explicit != 53837cd4501SBing Zhao txq_ctrl->hairpin_conf.tx_explicit) { 53937cd4501SBing Zhao rte_errno = EINVAL; 54037cd4501SBing Zhao DRV_LOG(ERR, "port %u Tx queue %d and peer Tx rule mode" 54137cd4501SBing Zhao " mismatch", dev->data->port_id, cur_queue); 54237cd4501SBing Zhao mlx5_txq_release(dev, cur_queue); 54337cd4501SBing Zhao return -rte_errno; 54437cd4501SBing Zhao } 54537cd4501SBing Zhao if (peer_info->manual_bind != 54637cd4501SBing Zhao txq_ctrl->hairpin_conf.manual_bind) { 54737cd4501SBing Zhao rte_errno = EINVAL; 54837cd4501SBing Zhao DRV_LOG(ERR, "port %u Tx queue %d and peer binding mode" 54937cd4501SBing Zhao " mismatch", dev->data->port_id, cur_queue); 55037cd4501SBing Zhao mlx5_txq_release(dev, cur_queue); 55137cd4501SBing Zhao return -rte_errno; 55237cd4501SBing Zhao } 55337cd4501SBing Zhao sq_attr.state = MLX5_SQC_STATE_RDY; 55437cd4501SBing Zhao sq_attr.sq_state = MLX5_SQC_STATE_RST; 55537cd4501SBing Zhao sq_attr.hairpin_peer_rq = peer_info->qp_id; 55637cd4501SBing Zhao sq_attr.hairpin_peer_vhca = peer_info->vhca_id; 55737cd4501SBing Zhao ret = mlx5_devx_cmd_modify_sq(txq_ctrl->obj->sq, &sq_attr); 55837cd4501SBing Zhao if (ret == 0) 55937cd4501SBing Zhao txq_ctrl->hairpin_status = 1; 56037cd4501SBing Zhao mlx5_txq_release(dev, cur_queue); 56137cd4501SBing Zhao } else { 5620cedf34dSXueming Li struct mlx5_rxq_priv *rxq = mlx5_rxq_get(dev, cur_queue); 56337cd4501SBing Zhao struct mlx5_rxq_ctrl *rxq_ctrl; 56437cd4501SBing Zhao struct mlx5_devx_modify_rq_attr rq_attr = { 0 }; 56537cd4501SBing Zhao 5660cedf34dSXueming Li if (rxq == NULL) { 56737cd4501SBing Zhao rte_errno = EINVAL; 56837cd4501SBing Zhao DRV_LOG(ERR, "Failed to get port %u Rx queue %d", 56937cd4501SBing Zhao dev->data->port_id, cur_queue); 57037cd4501SBing Zhao return -rte_errno; 57137cd4501SBing Zhao } 5720cedf34dSXueming Li rxq_ctrl = rxq->ctrl; 573c06f77aeSMichael Baum if (!rxq_ctrl->is_hairpin) { 57437cd4501SBing Zhao rte_errno = EINVAL; 57537cd4501SBing Zhao DRV_LOG(ERR, "port %u queue %d not a hairpin Rxq", 57637cd4501SBing Zhao dev->data->port_id, cur_queue); 57737cd4501SBing Zhao return -rte_errno; 57837cd4501SBing Zhao } 57937cd4501SBing Zhao if (rxq_ctrl->obj == NULL || rxq_ctrl->obj->rq == NULL) { 58037cd4501SBing Zhao rte_errno = ENOMEM; 58137cd4501SBing Zhao DRV_LOG(ERR, "port %u no Rxq object found: %d", 58237cd4501SBing Zhao dev->data->port_id, cur_queue); 58337cd4501SBing Zhao return -rte_errno; 58437cd4501SBing Zhao } 58544126bd9SXueming Li if (rxq->hairpin_status != 0) { 58637cd4501SBing Zhao DRV_LOG(DEBUG, "port %u Rx queue %d is already bound", 58737cd4501SBing Zhao dev->data->port_id, cur_queue); 58837cd4501SBing Zhao return 0; 58937cd4501SBing Zhao } 59037cd4501SBing Zhao if (peer_info->tx_explicit != 59144126bd9SXueming Li rxq->hairpin_conf.tx_explicit) { 59237cd4501SBing Zhao rte_errno = EINVAL; 59337cd4501SBing Zhao DRV_LOG(ERR, "port %u Rx queue %d and peer Tx rule mode" 59437cd4501SBing Zhao " mismatch", dev->data->port_id, cur_queue); 59537cd4501SBing Zhao return -rte_errno; 59637cd4501SBing Zhao } 59737cd4501SBing Zhao if (peer_info->manual_bind != 59844126bd9SXueming Li rxq->hairpin_conf.manual_bind) { 59937cd4501SBing Zhao rte_errno = EINVAL; 60037cd4501SBing Zhao DRV_LOG(ERR, "port %u Rx queue %d and peer binding mode" 60137cd4501SBing Zhao " mismatch", dev->data->port_id, cur_queue); 60237cd4501SBing Zhao return -rte_errno; 60337cd4501SBing Zhao } 604ca638c49SDariusz Sosnowski rq_attr.state = MLX5_RQC_STATE_RDY; 605ca638c49SDariusz Sosnowski rq_attr.rq_state = MLX5_RQC_STATE_RST; 60637cd4501SBing Zhao rq_attr.hairpin_peer_sq = peer_info->qp_id; 60737cd4501SBing Zhao rq_attr.hairpin_peer_vhca = peer_info->vhca_id; 60837cd4501SBing Zhao ret = mlx5_devx_cmd_modify_rq(rxq_ctrl->obj->rq, &rq_attr); 60937cd4501SBing Zhao if (ret == 0) 61044126bd9SXueming Li rxq->hairpin_status = 1; 61137cd4501SBing Zhao } 61237cd4501SBing Zhao return ret; 61337cd4501SBing Zhao } 61437cd4501SBing Zhao 61537cd4501SBing Zhao /* 61637cd4501SBing Zhao * Unbind the hairpin queue and reset its HW configuration. 61737cd4501SBing Zhao * This needs to be called twice both for Tx and Rx queues of a pair. 61837cd4501SBing Zhao * If the queue is already unbound, it is considered successful. 61937cd4501SBing Zhao * 62037cd4501SBing Zhao * @param dev 62137cd4501SBing Zhao * Pointer to Ethernet device structure. 62237cd4501SBing Zhao * @param cur_queue 62337cd4501SBing Zhao * Index of the queue to change the HW configuration to unbind. 62437cd4501SBing Zhao * @param direction 62537cd4501SBing Zhao * Positive to reset the TxQ, zero to reset the RxQ. 62637cd4501SBing Zhao * 62737cd4501SBing Zhao * @return 62837cd4501SBing Zhao * 0 on success, a negative errno value otherwise and rte_errno is set. 62937cd4501SBing Zhao */ 63037cd4501SBing Zhao int 63137cd4501SBing Zhao mlx5_hairpin_queue_peer_unbind(struct rte_eth_dev *dev, uint16_t cur_queue, 63237cd4501SBing Zhao uint32_t direction) 63337cd4501SBing Zhao { 63437cd4501SBing Zhao int ret = 0; 63537cd4501SBing Zhao 63637cd4501SBing Zhao if (direction != 0) { 63737cd4501SBing Zhao struct mlx5_txq_ctrl *txq_ctrl; 63837cd4501SBing Zhao struct mlx5_devx_modify_sq_attr sq_attr = { 0 }; 63937cd4501SBing Zhao 64037cd4501SBing Zhao txq_ctrl = mlx5_txq_get(dev, cur_queue); 64137cd4501SBing Zhao if (txq_ctrl == NULL) { 64237cd4501SBing Zhao rte_errno = EINVAL; 64337cd4501SBing Zhao DRV_LOG(ERR, "Failed to get port %u Tx queue %d", 64437cd4501SBing Zhao dev->data->port_id, cur_queue); 64537cd4501SBing Zhao return -rte_errno; 64637cd4501SBing Zhao } 647c06f77aeSMichael Baum if (!txq_ctrl->is_hairpin) { 64837cd4501SBing Zhao rte_errno = EINVAL; 64937cd4501SBing Zhao DRV_LOG(ERR, "port %u queue %d not a hairpin Txq", 65037cd4501SBing Zhao dev->data->port_id, cur_queue); 65137cd4501SBing Zhao mlx5_txq_release(dev, cur_queue); 65237cd4501SBing Zhao return -rte_errno; 65337cd4501SBing Zhao } 65437cd4501SBing Zhao /* Already unbound, return success before obj checking. */ 65537cd4501SBing Zhao if (txq_ctrl->hairpin_status == 0) { 65637cd4501SBing Zhao DRV_LOG(DEBUG, "port %u Tx queue %d is already unbound", 65737cd4501SBing Zhao dev->data->port_id, cur_queue); 65837cd4501SBing Zhao mlx5_txq_release(dev, cur_queue); 65937cd4501SBing Zhao return 0; 66037cd4501SBing Zhao } 66137cd4501SBing Zhao if (!txq_ctrl->obj || !txq_ctrl->obj->sq) { 66237cd4501SBing Zhao rte_errno = ENOMEM; 66337cd4501SBing Zhao DRV_LOG(ERR, "port %u no Txq object found: %d", 66437cd4501SBing Zhao dev->data->port_id, cur_queue); 66537cd4501SBing Zhao mlx5_txq_release(dev, cur_queue); 66637cd4501SBing Zhao return -rte_errno; 66737cd4501SBing Zhao } 66837cd4501SBing Zhao sq_attr.state = MLX5_SQC_STATE_RST; 669ca638c49SDariusz Sosnowski sq_attr.sq_state = MLX5_SQC_STATE_RDY; 67037cd4501SBing Zhao ret = mlx5_devx_cmd_modify_sq(txq_ctrl->obj->sq, &sq_attr); 67137cd4501SBing Zhao if (ret == 0) 67237cd4501SBing Zhao txq_ctrl->hairpin_status = 0; 67337cd4501SBing Zhao mlx5_txq_release(dev, cur_queue); 67437cd4501SBing Zhao } else { 6750cedf34dSXueming Li struct mlx5_rxq_priv *rxq = mlx5_rxq_get(dev, cur_queue); 67637cd4501SBing Zhao struct mlx5_rxq_ctrl *rxq_ctrl; 67737cd4501SBing Zhao struct mlx5_devx_modify_rq_attr rq_attr = { 0 }; 67837cd4501SBing Zhao 6790cedf34dSXueming Li if (rxq == NULL) { 68037cd4501SBing Zhao rte_errno = EINVAL; 68137cd4501SBing Zhao DRV_LOG(ERR, "Failed to get port %u Rx queue %d", 68237cd4501SBing Zhao dev->data->port_id, cur_queue); 68337cd4501SBing Zhao return -rte_errno; 68437cd4501SBing Zhao } 6850cedf34dSXueming Li rxq_ctrl = rxq->ctrl; 686c06f77aeSMichael Baum if (!rxq_ctrl->is_hairpin) { 68737cd4501SBing Zhao rte_errno = EINVAL; 68837cd4501SBing Zhao DRV_LOG(ERR, "port %u queue %d not a hairpin Rxq", 68937cd4501SBing Zhao dev->data->port_id, cur_queue); 69037cd4501SBing Zhao return -rte_errno; 69137cd4501SBing Zhao } 69244126bd9SXueming Li if (rxq->hairpin_status == 0) { 69337cd4501SBing Zhao DRV_LOG(DEBUG, "port %u Rx queue %d is already unbound", 69437cd4501SBing Zhao dev->data->port_id, cur_queue); 69537cd4501SBing Zhao return 0; 69637cd4501SBing Zhao } 69737cd4501SBing Zhao if (rxq_ctrl->obj == NULL || rxq_ctrl->obj->rq == NULL) { 69837cd4501SBing Zhao rte_errno = ENOMEM; 69937cd4501SBing Zhao DRV_LOG(ERR, "port %u no Rxq object found: %d", 70037cd4501SBing Zhao dev->data->port_id, cur_queue); 70137cd4501SBing Zhao return -rte_errno; 70237cd4501SBing Zhao } 703ca638c49SDariusz Sosnowski rq_attr.state = MLX5_RQC_STATE_RST; 704ca638c49SDariusz Sosnowski rq_attr.rq_state = MLX5_RQC_STATE_RDY; 70537cd4501SBing Zhao ret = mlx5_devx_cmd_modify_rq(rxq_ctrl->obj->rq, &rq_attr); 70637cd4501SBing Zhao if (ret == 0) 70744126bd9SXueming Li rxq->hairpin_status = 0; 70837cd4501SBing Zhao } 70937cd4501SBing Zhao return ret; 71037cd4501SBing Zhao } 71137cd4501SBing Zhao 71237cd4501SBing Zhao /* 71337cd4501SBing Zhao * Bind the hairpin port pairs, from the Tx to the peer Rx. 71437cd4501SBing Zhao * This function only supports to bind the Tx to one Rx. 71537cd4501SBing Zhao * 71637cd4501SBing Zhao * @param dev 71737cd4501SBing Zhao * Pointer to Ethernet device structure. 71837cd4501SBing Zhao * @param rx_port 71937cd4501SBing Zhao * Port identifier of the Rx port. 72037cd4501SBing Zhao * 72137cd4501SBing Zhao * @return 72237cd4501SBing Zhao * 0 on success, a negative errno value otherwise and rte_errno is set. 72337cd4501SBing Zhao */ 72437cd4501SBing Zhao static int 72537cd4501SBing Zhao mlx5_hairpin_bind_single_port(struct rte_eth_dev *dev, uint16_t rx_port) 72637cd4501SBing Zhao { 72737cd4501SBing Zhao struct mlx5_priv *priv = dev->data->dev_private; 72837cd4501SBing Zhao int ret = 0; 72937cd4501SBing Zhao struct mlx5_txq_ctrl *txq_ctrl; 73037cd4501SBing Zhao uint32_t i; 73137cd4501SBing Zhao struct rte_hairpin_peer_info peer = {0xffffff}; 73237cd4501SBing Zhao struct rte_hairpin_peer_info cur; 73337cd4501SBing Zhao const struct rte_eth_hairpin_conf *conf; 73437cd4501SBing Zhao uint16_t num_q = 0; 73537cd4501SBing Zhao uint16_t local_port = priv->dev_data->port_id; 73637cd4501SBing Zhao uint32_t manual; 73737cd4501SBing Zhao uint32_t explicit; 73837cd4501SBing Zhao uint16_t rx_queue; 73937cd4501SBing Zhao 74056bb3c84SXueming Li if (mlx5_eth_find_next(rx_port, dev->device) != rx_port) { 74137cd4501SBing Zhao rte_errno = ENODEV; 74237cd4501SBing Zhao DRV_LOG(ERR, "Rx port %u does not belong to mlx5", rx_port); 74337cd4501SBing Zhao return -rte_errno; 74437cd4501SBing Zhao } 74537cd4501SBing Zhao /* 74637cd4501SBing Zhao * Before binding TxQ to peer RxQ, first round loop will be used for 74737cd4501SBing Zhao * checking the queues' configuration consistency. This would be a 74837cd4501SBing Zhao * little time consuming but better than doing the rollback. 74937cd4501SBing Zhao */ 75037cd4501SBing Zhao for (i = 0; i != priv->txqs_n; i++) { 75137cd4501SBing Zhao txq_ctrl = mlx5_txq_get(dev, i); 75237cd4501SBing Zhao if (txq_ctrl == NULL) 75337cd4501SBing Zhao continue; 754c06f77aeSMichael Baum if (!txq_ctrl->is_hairpin) { 75537cd4501SBing Zhao mlx5_txq_release(dev, i); 75637cd4501SBing Zhao continue; 75737cd4501SBing Zhao } 75837cd4501SBing Zhao /* 75937cd4501SBing Zhao * All hairpin Tx queues of a single port that connected to the 76037cd4501SBing Zhao * same peer Rx port should have the same "auto binding" and 76137cd4501SBing Zhao * "implicit Tx flow" modes. 76237cd4501SBing Zhao * Peer consistency checking will be done in per queue binding. 76337cd4501SBing Zhao */ 76437cd4501SBing Zhao conf = &txq_ctrl->hairpin_conf; 76537cd4501SBing Zhao if (conf->peers[0].port == rx_port) { 76637cd4501SBing Zhao if (num_q == 0) { 76737cd4501SBing Zhao manual = conf->manual_bind; 76837cd4501SBing Zhao explicit = conf->tx_explicit; 76937cd4501SBing Zhao } else { 77037cd4501SBing Zhao if (manual != conf->manual_bind || 77137cd4501SBing Zhao explicit != conf->tx_explicit) { 77237cd4501SBing Zhao rte_errno = EINVAL; 77337cd4501SBing Zhao DRV_LOG(ERR, "port %u queue %d mode" 77437cd4501SBing Zhao " mismatch: %u %u, %u %u", 77537cd4501SBing Zhao local_port, i, manual, 77637cd4501SBing Zhao conf->manual_bind, explicit, 77737cd4501SBing Zhao conf->tx_explicit); 77837cd4501SBing Zhao mlx5_txq_release(dev, i); 77937cd4501SBing Zhao return -rte_errno; 78037cd4501SBing Zhao } 78137cd4501SBing Zhao } 78237cd4501SBing Zhao num_q++; 78337cd4501SBing Zhao } 78437cd4501SBing Zhao mlx5_txq_release(dev, i); 78537cd4501SBing Zhao } 78637cd4501SBing Zhao /* Once no queue is configured, success is returned directly. */ 78737cd4501SBing Zhao if (num_q == 0) 78837cd4501SBing Zhao return ret; 78937cd4501SBing Zhao /* All the hairpin TX queues need to be traversed again. */ 79037cd4501SBing Zhao for (i = 0; i != priv->txqs_n; i++) { 79137cd4501SBing Zhao txq_ctrl = mlx5_txq_get(dev, i); 79237cd4501SBing Zhao if (txq_ctrl == NULL) 79337cd4501SBing Zhao continue; 794c06f77aeSMichael Baum if (!txq_ctrl->is_hairpin) { 79537cd4501SBing Zhao mlx5_txq_release(dev, i); 79637cd4501SBing Zhao continue; 79737cd4501SBing Zhao } 79837cd4501SBing Zhao if (txq_ctrl->hairpin_conf.peers[0].port != rx_port) { 79937cd4501SBing Zhao mlx5_txq_release(dev, i); 80037cd4501SBing Zhao continue; 80137cd4501SBing Zhao } 80237cd4501SBing Zhao rx_queue = txq_ctrl->hairpin_conf.peers[0].queue; 80337cd4501SBing Zhao /* 80437cd4501SBing Zhao * Fetch peer RxQ's information. 80537cd4501SBing Zhao * No need to pass the information of the current queue. 80637cd4501SBing Zhao */ 80737cd4501SBing Zhao ret = rte_eth_hairpin_queue_peer_update(rx_port, rx_queue, 80837cd4501SBing Zhao NULL, &peer, 1); 80937cd4501SBing Zhao if (ret != 0) { 81037cd4501SBing Zhao mlx5_txq_release(dev, i); 81137cd4501SBing Zhao goto error; 81237cd4501SBing Zhao } 81337cd4501SBing Zhao /* Accessing its own device, inside mlx5 PMD. */ 81437cd4501SBing Zhao ret = mlx5_hairpin_queue_peer_bind(dev, i, &peer, 1); 81537cd4501SBing Zhao if (ret != 0) { 81637cd4501SBing Zhao mlx5_txq_release(dev, i); 81737cd4501SBing Zhao goto error; 81837cd4501SBing Zhao } 81937cd4501SBing Zhao /* Pass TxQ's information to peer RxQ and try binding. */ 82037cd4501SBing Zhao cur.peer_q = rx_queue; 82126e1eaf2SDariusz Sosnowski cur.qp_id = mlx5_txq_get_sqn(txq_ctrl); 82253820561SMichael Baum cur.vhca_id = priv->sh->cdev->config.hca_attr.vhca_id; 82337cd4501SBing Zhao cur.tx_explicit = txq_ctrl->hairpin_conf.tx_explicit; 82437cd4501SBing Zhao cur.manual_bind = txq_ctrl->hairpin_conf.manual_bind; 82537cd4501SBing Zhao /* 82637cd4501SBing Zhao * In order to access another device in a proper way, RTE level 82737cd4501SBing Zhao * private function is needed. 82837cd4501SBing Zhao */ 82937cd4501SBing Zhao ret = rte_eth_hairpin_queue_peer_bind(rx_port, rx_queue, 83037cd4501SBing Zhao &cur, 0); 83137cd4501SBing Zhao if (ret != 0) { 83237cd4501SBing Zhao mlx5_txq_release(dev, i); 83337cd4501SBing Zhao goto error; 83437cd4501SBing Zhao } 83537cd4501SBing Zhao mlx5_txq_release(dev, i); 83637cd4501SBing Zhao } 83737cd4501SBing Zhao return 0; 83837cd4501SBing Zhao error: 83937cd4501SBing Zhao /* 84037cd4501SBing Zhao * Do roll-back process for the queues already bound. 84137cd4501SBing Zhao * No need to check the return value of the queue unbind function. 84237cd4501SBing Zhao */ 84337cd4501SBing Zhao do { 84437cd4501SBing Zhao /* No validation is needed here. */ 84537cd4501SBing Zhao txq_ctrl = mlx5_txq_get(dev, i); 84637cd4501SBing Zhao if (txq_ctrl == NULL) 84737cd4501SBing Zhao continue; 848ab2439f8SDariusz Sosnowski if (!txq_ctrl->is_hairpin || 849ab2439f8SDariusz Sosnowski txq_ctrl->hairpin_conf.peers[0].port != rx_port) { 850ab2439f8SDariusz Sosnowski mlx5_txq_release(dev, i); 851ab2439f8SDariusz Sosnowski continue; 852ab2439f8SDariusz Sosnowski } 85337cd4501SBing Zhao rx_queue = txq_ctrl->hairpin_conf.peers[0].queue; 85437cd4501SBing Zhao rte_eth_hairpin_queue_peer_unbind(rx_port, rx_queue, 0); 85537cd4501SBing Zhao mlx5_hairpin_queue_peer_unbind(dev, i, 1); 85637cd4501SBing Zhao mlx5_txq_release(dev, i); 85737cd4501SBing Zhao } while (i--); 85837cd4501SBing Zhao return ret; 85937cd4501SBing Zhao } 86037cd4501SBing Zhao 86137cd4501SBing Zhao /* 86237cd4501SBing Zhao * Unbind the hairpin port pair, HW configuration of both devices will be clear 863b53d106dSSean Morrissey * and status will be reset for all the queues used between them. 86437cd4501SBing Zhao * This function only supports to unbind the Tx from one Rx. 86537cd4501SBing Zhao * 86637cd4501SBing Zhao * @param dev 86737cd4501SBing Zhao * Pointer to Ethernet device structure. 86837cd4501SBing Zhao * @param rx_port 86937cd4501SBing Zhao * Port identifier of the Rx port. 87037cd4501SBing Zhao * 87137cd4501SBing Zhao * @return 87237cd4501SBing Zhao * 0 on success, a negative errno value otherwise and rte_errno is set. 87337cd4501SBing Zhao */ 87437cd4501SBing Zhao static int 87537cd4501SBing Zhao mlx5_hairpin_unbind_single_port(struct rte_eth_dev *dev, uint16_t rx_port) 87637cd4501SBing Zhao { 87737cd4501SBing Zhao struct mlx5_priv *priv = dev->data->dev_private; 87837cd4501SBing Zhao struct mlx5_txq_ctrl *txq_ctrl; 87937cd4501SBing Zhao uint32_t i; 88037cd4501SBing Zhao int ret; 88137cd4501SBing Zhao uint16_t cur_port = priv->dev_data->port_id; 88237cd4501SBing Zhao 88356bb3c84SXueming Li if (mlx5_eth_find_next(rx_port, dev->device) != rx_port) { 88437cd4501SBing Zhao rte_errno = ENODEV; 88537cd4501SBing Zhao DRV_LOG(ERR, "Rx port %u does not belong to mlx5", rx_port); 88637cd4501SBing Zhao return -rte_errno; 88737cd4501SBing Zhao } 88837cd4501SBing Zhao for (i = 0; i != priv->txqs_n; i++) { 88937cd4501SBing Zhao uint16_t rx_queue; 89037cd4501SBing Zhao 89137cd4501SBing Zhao txq_ctrl = mlx5_txq_get(dev, i); 89237cd4501SBing Zhao if (txq_ctrl == NULL) 89337cd4501SBing Zhao continue; 894c06f77aeSMichael Baum if (!txq_ctrl->is_hairpin) { 89537cd4501SBing Zhao mlx5_txq_release(dev, i); 89637cd4501SBing Zhao continue; 89737cd4501SBing Zhao } 89837cd4501SBing Zhao if (txq_ctrl->hairpin_conf.peers[0].port != rx_port) { 89937cd4501SBing Zhao mlx5_txq_release(dev, i); 90037cd4501SBing Zhao continue; 90137cd4501SBing Zhao } 90237cd4501SBing Zhao /* Indeed, only the first used queue needs to be checked. */ 90337cd4501SBing Zhao if (txq_ctrl->hairpin_conf.manual_bind == 0) { 9049284987aSBing Zhao mlx5_txq_release(dev, i); 90537cd4501SBing Zhao if (cur_port != rx_port) { 90637cd4501SBing Zhao rte_errno = EINVAL; 90737cd4501SBing Zhao DRV_LOG(ERR, "port %u and port %u are in" 90837cd4501SBing Zhao " auto-bind mode", cur_port, rx_port); 90937cd4501SBing Zhao return -rte_errno; 91037cd4501SBing Zhao } else { 91137cd4501SBing Zhao return 0; 91237cd4501SBing Zhao } 91337cd4501SBing Zhao } 91437cd4501SBing Zhao rx_queue = txq_ctrl->hairpin_conf.peers[0].queue; 91537cd4501SBing Zhao mlx5_txq_release(dev, i); 91637cd4501SBing Zhao ret = rte_eth_hairpin_queue_peer_unbind(rx_port, rx_queue, 0); 91737cd4501SBing Zhao if (ret) { 91837cd4501SBing Zhao DRV_LOG(ERR, "port %u Rx queue %d unbind - failure", 91937cd4501SBing Zhao rx_port, rx_queue); 92037cd4501SBing Zhao return ret; 92137cd4501SBing Zhao } 92237cd4501SBing Zhao ret = mlx5_hairpin_queue_peer_unbind(dev, i, 1); 92337cd4501SBing Zhao if (ret) { 92437cd4501SBing Zhao DRV_LOG(ERR, "port %u Tx queue %d unbind - failure", 92537cd4501SBing Zhao cur_port, i); 92637cd4501SBing Zhao return ret; 92737cd4501SBing Zhao } 92837cd4501SBing Zhao } 92937cd4501SBing Zhao return 0; 93037cd4501SBing Zhao } 93137cd4501SBing Zhao 93237cd4501SBing Zhao /* 93337cd4501SBing Zhao * Bind hairpin ports, Rx could be all ports when using RTE_MAX_ETHPORTS. 93437cd4501SBing Zhao * @see mlx5_hairpin_bind_single_port() 93537cd4501SBing Zhao */ 93637cd4501SBing Zhao int 93737cd4501SBing Zhao mlx5_hairpin_bind(struct rte_eth_dev *dev, uint16_t rx_port) 93837cd4501SBing Zhao { 93937cd4501SBing Zhao int ret = 0; 94037cd4501SBing Zhao uint16_t p, pp; 94137cd4501SBing Zhao 94237cd4501SBing Zhao /* 94337cd4501SBing Zhao * If the Rx port has no hairpin configuration with the current port, 94437cd4501SBing Zhao * the binding will be skipped in the called function of single port. 94537cd4501SBing Zhao * Device started status will be checked only before the queue 94637cd4501SBing Zhao * information updating. 94737cd4501SBing Zhao */ 94837cd4501SBing Zhao if (rx_port == RTE_MAX_ETHPORTS) { 94956bb3c84SXueming Li MLX5_ETH_FOREACH_DEV(p, dev->device) { 95037cd4501SBing Zhao ret = mlx5_hairpin_bind_single_port(dev, p); 95137cd4501SBing Zhao if (ret != 0) 95237cd4501SBing Zhao goto unbind; 95337cd4501SBing Zhao } 95437cd4501SBing Zhao return ret; 95537cd4501SBing Zhao } else { 95637cd4501SBing Zhao return mlx5_hairpin_bind_single_port(dev, rx_port); 95737cd4501SBing Zhao } 95837cd4501SBing Zhao unbind: 95956bb3c84SXueming Li MLX5_ETH_FOREACH_DEV(pp, dev->device) 96037cd4501SBing Zhao if (pp < p) 96137cd4501SBing Zhao mlx5_hairpin_unbind_single_port(dev, pp); 96237cd4501SBing Zhao return ret; 96337cd4501SBing Zhao } 96437cd4501SBing Zhao 96537cd4501SBing Zhao /* 96637cd4501SBing Zhao * Unbind hairpin ports, Rx could be all ports when using RTE_MAX_ETHPORTS. 96737cd4501SBing Zhao * @see mlx5_hairpin_unbind_single_port() 96837cd4501SBing Zhao */ 96937cd4501SBing Zhao int 97037cd4501SBing Zhao mlx5_hairpin_unbind(struct rte_eth_dev *dev, uint16_t rx_port) 97137cd4501SBing Zhao { 97237cd4501SBing Zhao int ret = 0; 97337cd4501SBing Zhao uint16_t p; 97437cd4501SBing Zhao 97537cd4501SBing Zhao if (rx_port == RTE_MAX_ETHPORTS) 97656bb3c84SXueming Li MLX5_ETH_FOREACH_DEV(p, dev->device) { 97737cd4501SBing Zhao ret = mlx5_hairpin_unbind_single_port(dev, p); 97837cd4501SBing Zhao if (ret != 0) 97937cd4501SBing Zhao return ret; 98037cd4501SBing Zhao } 98137cd4501SBing Zhao else 9820746dcabSBing Zhao ret = mlx5_hairpin_unbind_single_port(dev, rx_port); 98337cd4501SBing Zhao return ret; 98437cd4501SBing Zhao } 98537cd4501SBing Zhao 98602109eaeSBing Zhao /* 98702109eaeSBing Zhao * DPDK callback to get the hairpin peer ports list. 98802109eaeSBing Zhao * This will return the actual number of peer ports and save the identifiers 98902109eaeSBing Zhao * into the array (sorted, may be different from that when setting up the 99002109eaeSBing Zhao * hairpin peer queues). 99102109eaeSBing Zhao * The peer port ID could be the same as the port ID of the current device. 99202109eaeSBing Zhao * 99302109eaeSBing Zhao * @param dev 99402109eaeSBing Zhao * Pointer to Ethernet device structure. 99502109eaeSBing Zhao * @param peer_ports 99602109eaeSBing Zhao * Pointer to array to save the port identifiers. 99702109eaeSBing Zhao * @param len 99802109eaeSBing Zhao * The length of the array. 99902109eaeSBing Zhao * @param direction 100002109eaeSBing Zhao * Current port to peer port direction. 100102109eaeSBing Zhao * positive - current used as Tx to get all peer Rx ports. 100202109eaeSBing Zhao * zero - current used as Rx to get all peer Tx ports. 100302109eaeSBing Zhao * 100402109eaeSBing Zhao * @return 100502109eaeSBing Zhao * 0 or positive value on success, actual number of peer ports. 100602109eaeSBing Zhao * a negative errno value otherwise and rte_errno is set. 100702109eaeSBing Zhao */ 100802109eaeSBing Zhao int 100902109eaeSBing Zhao mlx5_hairpin_get_peer_ports(struct rte_eth_dev *dev, uint16_t *peer_ports, 101002109eaeSBing Zhao size_t len, uint32_t direction) 101102109eaeSBing Zhao { 101202109eaeSBing Zhao struct mlx5_priv *priv = dev->data->dev_private; 101302109eaeSBing Zhao struct mlx5_txq_ctrl *txq_ctrl; 101402109eaeSBing Zhao uint32_t i; 101502109eaeSBing Zhao uint16_t pp; 101602109eaeSBing Zhao uint32_t bits[(RTE_MAX_ETHPORTS + 31) / 32] = {0}; 101702109eaeSBing Zhao int ret = 0; 101802109eaeSBing Zhao 101902109eaeSBing Zhao if (direction) { 102002109eaeSBing Zhao for (i = 0; i < priv->txqs_n; i++) { 102102109eaeSBing Zhao txq_ctrl = mlx5_txq_get(dev, i); 102202109eaeSBing Zhao if (!txq_ctrl) 102302109eaeSBing Zhao continue; 1024c06f77aeSMichael Baum if (!txq_ctrl->is_hairpin) { 102502109eaeSBing Zhao mlx5_txq_release(dev, i); 102602109eaeSBing Zhao continue; 102702109eaeSBing Zhao } 102802109eaeSBing Zhao pp = txq_ctrl->hairpin_conf.peers[0].port; 102902109eaeSBing Zhao if (pp >= RTE_MAX_ETHPORTS) { 103002109eaeSBing Zhao rte_errno = ERANGE; 103102109eaeSBing Zhao mlx5_txq_release(dev, i); 103202109eaeSBing Zhao DRV_LOG(ERR, "port %hu queue %u peer port " 103302109eaeSBing Zhao "out of range %hu", 103402109eaeSBing Zhao priv->dev_data->port_id, i, pp); 103502109eaeSBing Zhao return -rte_errno; 103602109eaeSBing Zhao } 103702109eaeSBing Zhao bits[pp / 32] |= 1 << (pp % 32); 103802109eaeSBing Zhao mlx5_txq_release(dev, i); 103902109eaeSBing Zhao } 104002109eaeSBing Zhao } else { 104102109eaeSBing Zhao for (i = 0; i < priv->rxqs_n; i++) { 10420cedf34dSXueming Li struct mlx5_rxq_priv *rxq = mlx5_rxq_get(dev, i); 10430cedf34dSXueming Li struct mlx5_rxq_ctrl *rxq_ctrl; 10440cedf34dSXueming Li 10450cedf34dSXueming Li if (rxq == NULL) 104602109eaeSBing Zhao continue; 10470cedf34dSXueming Li rxq_ctrl = rxq->ctrl; 1048c06f77aeSMichael Baum if (!rxq_ctrl->is_hairpin) 104902109eaeSBing Zhao continue; 105044126bd9SXueming Li pp = rxq->hairpin_conf.peers[0].port; 105102109eaeSBing Zhao if (pp >= RTE_MAX_ETHPORTS) { 105202109eaeSBing Zhao rte_errno = ERANGE; 105302109eaeSBing Zhao DRV_LOG(ERR, "port %hu queue %u peer port " 105402109eaeSBing Zhao "out of range %hu", 105502109eaeSBing Zhao priv->dev_data->port_id, i, pp); 105602109eaeSBing Zhao return -rte_errno; 105702109eaeSBing Zhao } 105802109eaeSBing Zhao bits[pp / 32] |= 1 << (pp % 32); 105902109eaeSBing Zhao } 106002109eaeSBing Zhao } 106102109eaeSBing Zhao for (i = 0; i < RTE_MAX_ETHPORTS; i++) { 106202109eaeSBing Zhao if (bits[i / 32] & (1 << (i % 32))) { 106302109eaeSBing Zhao if ((size_t)ret >= len) { 106402109eaeSBing Zhao rte_errno = E2BIG; 106502109eaeSBing Zhao return -rte_errno; 106602109eaeSBing Zhao } 106702109eaeSBing Zhao peer_ports[ret++] = i; 106802109eaeSBing Zhao } 106902109eaeSBing Zhao } 107002109eaeSBing Zhao return ret; 107102109eaeSBing Zhao } 107202109eaeSBing Zhao 1073483181f7SDariusz Sosnowski #ifdef HAVE_MLX5_HWS_SUPPORT 1074483181f7SDariusz Sosnowski 1075483181f7SDariusz Sosnowski /** 1076483181f7SDariusz Sosnowski * Check if starting representor port is allowed. 1077483181f7SDariusz Sosnowski * 1078483181f7SDariusz Sosnowski * If transfer proxy port is configured for HWS, then starting representor port 1079483181f7SDariusz Sosnowski * is allowed if and only if transfer proxy port is started as well. 1080483181f7SDariusz Sosnowski * 1081483181f7SDariusz Sosnowski * @param dev 1082483181f7SDariusz Sosnowski * Pointer to Ethernet device structure. 1083483181f7SDariusz Sosnowski * 1084483181f7SDariusz Sosnowski * @return 1085483181f7SDariusz Sosnowski * If stopping representor port is allowed, then 0 is returned. 1086483181f7SDariusz Sosnowski * Otherwise rte_errno is set, and negative errno value is returned. 1087483181f7SDariusz Sosnowski */ 1088483181f7SDariusz Sosnowski static int 1089483181f7SDariusz Sosnowski mlx5_hw_representor_port_allowed_start(struct rte_eth_dev *dev) 1090483181f7SDariusz Sosnowski { 1091483181f7SDariusz Sosnowski struct mlx5_priv *priv = dev->data->dev_private; 1092483181f7SDariusz Sosnowski struct rte_eth_dev *proxy_dev; 1093483181f7SDariusz Sosnowski struct mlx5_priv *proxy_priv; 1094483181f7SDariusz Sosnowski uint16_t proxy_port_id = UINT16_MAX; 1095483181f7SDariusz Sosnowski int ret; 1096483181f7SDariusz Sosnowski 1097483181f7SDariusz Sosnowski MLX5_ASSERT(priv->sh->config.dv_flow_en == 2); 1098483181f7SDariusz Sosnowski MLX5_ASSERT(priv->sh->config.dv_esw_en); 1099483181f7SDariusz Sosnowski MLX5_ASSERT(priv->representor); 1100483181f7SDariusz Sosnowski ret = rte_flow_pick_transfer_proxy(dev->data->port_id, &proxy_port_id, NULL); 1101483181f7SDariusz Sosnowski if (ret) { 1102483181f7SDariusz Sosnowski if (ret == -ENODEV) 1103483181f7SDariusz Sosnowski DRV_LOG(ERR, "Starting representor port %u is not allowed. Transfer " 1104483181f7SDariusz Sosnowski "proxy port is not available.", dev->data->port_id); 1105483181f7SDariusz Sosnowski else 1106483181f7SDariusz Sosnowski DRV_LOG(ERR, "Failed to pick transfer proxy for port %u (ret = %d)", 1107483181f7SDariusz Sosnowski dev->data->port_id, ret); 1108483181f7SDariusz Sosnowski return ret; 1109483181f7SDariusz Sosnowski } 1110483181f7SDariusz Sosnowski proxy_dev = &rte_eth_devices[proxy_port_id]; 1111483181f7SDariusz Sosnowski proxy_priv = proxy_dev->data->dev_private; 1112483181f7SDariusz Sosnowski if (proxy_priv->dr_ctx == NULL) { 1113483181f7SDariusz Sosnowski DRV_LOG(DEBUG, "Starting representor port %u is allowed, but default traffic flows" 1114483181f7SDariusz Sosnowski " will not be created. Transfer proxy port must be configured" 1115483181f7SDariusz Sosnowski " for HWS and started.", 1116483181f7SDariusz Sosnowski dev->data->port_id); 1117483181f7SDariusz Sosnowski return 0; 1118483181f7SDariusz Sosnowski } 1119483181f7SDariusz Sosnowski if (!proxy_dev->data->dev_started) { 1120483181f7SDariusz Sosnowski DRV_LOG(ERR, "Failed to start port %u: transfer proxy (port %u) must be started", 1121483181f7SDariusz Sosnowski dev->data->port_id, proxy_port_id); 1122483181f7SDariusz Sosnowski rte_errno = EAGAIN; 1123483181f7SDariusz Sosnowski return -rte_errno; 1124483181f7SDariusz Sosnowski } 1125483181f7SDariusz Sosnowski if (priv->sh->config.repr_matching && !priv->dr_ctx) { 1126483181f7SDariusz Sosnowski DRV_LOG(ERR, "Failed to start port %u: with representor matching enabled, port " 1127483181f7SDariusz Sosnowski "must be configured for HWS", dev->data->port_id); 1128483181f7SDariusz Sosnowski rte_errno = EINVAL; 1129483181f7SDariusz Sosnowski return -rte_errno; 1130483181f7SDariusz Sosnowski } 1131483181f7SDariusz Sosnowski return 0; 1132483181f7SDariusz Sosnowski } 1133483181f7SDariusz Sosnowski 1134483181f7SDariusz Sosnowski #endif 1135483181f7SDariusz Sosnowski 11366a338ad4SOri Kam /** 1137e60fbd5bSAdrien Mazarguil * DPDK callback to start the device. 1138e60fbd5bSAdrien Mazarguil * 1139e60fbd5bSAdrien Mazarguil * Simulate device start by attaching all configured flows. 1140e60fbd5bSAdrien Mazarguil * 1141e60fbd5bSAdrien Mazarguil * @param dev 1142e60fbd5bSAdrien Mazarguil * Pointer to Ethernet device structure. 1143e60fbd5bSAdrien Mazarguil * 1144e60fbd5bSAdrien Mazarguil * @return 1145a6d83b6aSNélio Laranjeiro * 0 on success, a negative errno value otherwise and rte_errno is set. 11468e82ebe2SDariusz Sosnowski * The following error values are defined: 11478e82ebe2SDariusz Sosnowski * 11488e82ebe2SDariusz Sosnowski * - -EAGAIN: If port representor cannot be started, 11498e82ebe2SDariusz Sosnowski * because transfer proxy port is not started. 1150e60fbd5bSAdrien Mazarguil */ 1151e60fbd5bSAdrien Mazarguil int 1152e60fbd5bSAdrien Mazarguil mlx5_dev_start(struct rte_eth_dev *dev) 1153e60fbd5bSAdrien Mazarguil { 115433860cfaSSuanming Mou struct mlx5_priv *priv = dev->data->dev_private; 1155a6d83b6aSNélio Laranjeiro int ret; 1156efa79e68SOri Kam int fine_inline; 1157e60fbd5bSAdrien Mazarguil 115824f653a7SYongseok Koh DRV_LOG(DEBUG, "port %u starting device", dev->data->port_id); 1159483181f7SDariusz Sosnowski #ifdef HAVE_MLX5_HWS_SUPPORT 1160483181f7SDariusz Sosnowski if (priv->sh->config.dv_flow_en == 2) { 1161e38776c3SMaayan Kashani /*If previous configuration does not exist. */ 1162e38776c3SMaayan Kashani if (!(priv->dr_ctx)) { 1163e38776c3SMaayan Kashani ret = flow_hw_init(dev, NULL); 1164e38776c3SMaayan Kashani if (ret) 1165e38776c3SMaayan Kashani return ret; 1166e38776c3SMaayan Kashani } 1167483181f7SDariusz Sosnowski /* If there is no E-Switch, then there are no start/stop order limitations. */ 1168483181f7SDariusz Sosnowski if (!priv->sh->config.dv_esw_en) 1169483181f7SDariusz Sosnowski goto continue_dev_start; 1170483181f7SDariusz Sosnowski /* If master is being started, then it is always allowed. */ 1171483181f7SDariusz Sosnowski if (priv->master) 1172483181f7SDariusz Sosnowski goto continue_dev_start; 1173483181f7SDariusz Sosnowski if (mlx5_hw_representor_port_allowed_start(dev)) 1174483181f7SDariusz Sosnowski return -rte_errno; 1175483181f7SDariusz Sosnowski } 1176483181f7SDariusz Sosnowski continue_dev_start: 1177483181f7SDariusz Sosnowski #endif 1178efa79e68SOri Kam fine_inline = rte_mbuf_dynflag_lookup 1179efa79e68SOri Kam (RTE_PMD_MLX5_FINE_GRANULARITY_INLINE, NULL); 1180042540e4SThomas Monjalon if (fine_inline >= 0) 1181efa79e68SOri Kam rte_net_mlx5_dynf_inline_mask = 1UL << fine_inline; 1182efa79e68SOri Kam else 1183efa79e68SOri Kam rte_net_mlx5_dynf_inline_mask = 0; 1184606d6905SShiri Kuzin if (dev->data->nb_rx_queues > 0) { 1185b9f1f4c2SGregory Etelson uint32_t max_lro_msg_size = priv->max_lro_msg_size; 1186b9f1f4c2SGregory Etelson 1187b9f1f4c2SGregory Etelson if (max_lro_msg_size < MLX5_LRO_SEG_CHUNK_SIZE) { 1188b9f1f4c2SGregory Etelson uint32_t i; 1189b9f1f4c2SGregory Etelson struct mlx5_rxq_priv *rxq; 1190b9f1f4c2SGregory Etelson 1191b9f1f4c2SGregory Etelson for (i = 0; i != priv->rxqs_n; ++i) { 1192b9f1f4c2SGregory Etelson rxq = mlx5_rxq_get(dev, i); 1193b9f1f4c2SGregory Etelson if (rxq && rxq->ctrl && rxq->ctrl->rxq.lro) { 1194b9f1f4c2SGregory Etelson DRV_LOG(ERR, "port %u invalid max LRO size", 1195b9f1f4c2SGregory Etelson dev->data->port_id); 1196b9f1f4c2SGregory Etelson rte_errno = EINVAL; 1197b9f1f4c2SGregory Etelson return -rte_errno; 1198b9f1f4c2SGregory Etelson } 1199b9f1f4c2SGregory Etelson } 1200b9f1f4c2SGregory Etelson } 120163bd1629SOri Kam ret = mlx5_dev_configure_rss_reta(dev); 120263bd1629SOri Kam if (ret) { 120363bd1629SOri Kam DRV_LOG(ERR, "port %u reta config failed: %s", 120463bd1629SOri Kam dev->data->port_id, strerror(rte_errno)); 120563bd1629SOri Kam return -rte_errno; 120663bd1629SOri Kam } 1207606d6905SShiri Kuzin } 1208d133f4cdSViacheslav Ovsiienko ret = mlx5_txpp_start(dev); 1209d133f4cdSViacheslav Ovsiienko if (ret) { 1210d133f4cdSViacheslav Ovsiienko DRV_LOG(ERR, "port %u Tx packet pacing init failed: %s", 1211d133f4cdSViacheslav Ovsiienko dev->data->port_id, strerror(rte_errno)); 1212d133f4cdSViacheslav Ovsiienko goto error; 1213d133f4cdSViacheslav Ovsiienko } 1214c4b86201SMichael Baum if (mlx5_devx_obj_ops_en(priv->sh) && 121587af0d1eSMichael Baum priv->obj_ops.lb_dummy_queue_create) { 121623233fd6SBing Zhao ret = priv->obj_ops.lb_dummy_queue_create(dev); 121723233fd6SBing Zhao if (ret) 121823233fd6SBing Zhao goto error; 121923233fd6SBing Zhao } 1220a6d83b6aSNélio Laranjeiro ret = mlx5_txq_start(dev); 1221a6d83b6aSNélio Laranjeiro if (ret) { 1222a170a30dSNélio Laranjeiro DRV_LOG(ERR, "port %u Tx queue allocation failed: %s", 12230f99970bSNélio Laranjeiro dev->data->port_id, strerror(rte_errno)); 1224d133f4cdSViacheslav Ovsiienko goto error; 12256e78005aSNélio Laranjeiro } 1226e8482187SBing Zhao if (priv->config.std_delay_drop || priv->config.hp_delay_drop) { 122787af0d1eSMichael Baum if (!priv->sh->dev_cap.vf && !priv->sh->dev_cap.sf && 1228e8482187SBing Zhao !priv->representor) { 1229e8482187SBing Zhao ret = mlx5_get_flag_dropless_rq(dev); 1230e8482187SBing Zhao if (ret < 0) 1231e8482187SBing Zhao DRV_LOG(WARNING, 1232e8482187SBing Zhao "port %u cannot query dropless flag", 1233e8482187SBing Zhao dev->data->port_id); 1234e8482187SBing Zhao else if (!ret) 1235e8482187SBing Zhao DRV_LOG(WARNING, 1236e8482187SBing Zhao "port %u dropless_rq OFF, no rearming", 1237e8482187SBing Zhao dev->data->port_id); 1238e8482187SBing Zhao } else { 1239e8482187SBing Zhao DRV_LOG(DEBUG, 1240e8482187SBing Zhao "port %u doesn't support dropless_rq flag", 1241e8482187SBing Zhao dev->data->port_id); 1242e8482187SBing Zhao } 1243e8482187SBing Zhao } 1244a6d83b6aSNélio Laranjeiro ret = mlx5_rxq_start(dev); 1245a6d83b6aSNélio Laranjeiro if (ret) { 1246a170a30dSNélio Laranjeiro DRV_LOG(ERR, "port %u Rx queue allocation failed: %s", 12470f99970bSNélio Laranjeiro dev->data->port_id, strerror(rte_errno)); 1248d133f4cdSViacheslav Ovsiienko goto error; 1249a1366b1aSNélio Laranjeiro } 1250aa8bea0eSBing Zhao /* 1251aa8bea0eSBing Zhao * Such step will be skipped if there is no hairpin TX queue configured 1252aa8bea0eSBing Zhao * with RX peer queue from the same device. 1253aa8bea0eSBing Zhao */ 125437cd4501SBing Zhao ret = mlx5_hairpin_auto_bind(dev); 12556a338ad4SOri Kam if (ret) { 1256aa8bea0eSBing Zhao DRV_LOG(ERR, "port %u hairpin auto binding failed: %s", 12576a338ad4SOri Kam dev->data->port_id, strerror(rte_errno)); 1258d133f4cdSViacheslav Ovsiienko goto error; 12596a338ad4SOri Kam } 1260e7bfa359SBing Zhao /* Set started flag here for the following steps like control flow. */ 126124f653a7SYongseok Koh dev->data->dev_started = 1; 1262a6d83b6aSNélio Laranjeiro ret = mlx5_rx_intr_vec_enable(dev); 1263a6d83b6aSNélio Laranjeiro if (ret) { 1264a170a30dSNélio Laranjeiro DRV_LOG(ERR, "port %u Rx interrupt vector creation failed", 12650f99970bSNélio Laranjeiro dev->data->port_id); 1266e1016cb7SAdrien Mazarguil goto error; 12673c7d44afSShahaf Shuler } 126873bf9235SOphir Munk mlx5_os_stats_init(dev); 12695c078fceSDmitry Kozlyuk /* 12705c078fceSDmitry Kozlyuk * Attach indirection table objects detached on port stop. 12715c078fceSDmitry Kozlyuk * They may be needed to create RSS in non-isolated mode. 12725c078fceSDmitry Kozlyuk */ 12735c078fceSDmitry Kozlyuk ret = mlx5_action_handle_attach(dev); 12745c078fceSDmitry Kozlyuk if (ret) { 12755c078fceSDmitry Kozlyuk DRV_LOG(ERR, 12765c078fceSDmitry Kozlyuk "port %u failed to attach indirect actions: %s", 12775c078fceSDmitry Kozlyuk dev->data->port_id, rte_strerror(rte_errno)); 12785c078fceSDmitry Kozlyuk goto error; 12795c078fceSDmitry Kozlyuk } 1280f1fecffaSDariusz Sosnowski #ifdef HAVE_MLX5_HWS_SUPPORT 1281f1fecffaSDariusz Sosnowski if (priv->sh->config.dv_flow_en == 2) { 1282f1fecffaSDariusz Sosnowski ret = flow_hw_table_update(dev, NULL); 1283f1fecffaSDariusz Sosnowski if (ret) { 1284f1fecffaSDariusz Sosnowski DRV_LOG(ERR, "port %u failed to update HWS tables", 1285f1fecffaSDariusz Sosnowski dev->data->port_id); 1286f1fecffaSDariusz Sosnowski goto error; 1287f1fecffaSDariusz Sosnowski } 1288f1fecffaSDariusz Sosnowski } 1289f1fecffaSDariusz Sosnowski #endif 12907ba5320bSNélio Laranjeiro ret = mlx5_traffic_enable(dev); 1291a6d83b6aSNélio Laranjeiro if (ret) { 12928db7e3b6SBing Zhao DRV_LOG(ERR, "port %u failed to set defaults flows", 1293e313ef4cSShahaf Shuler dev->data->port_id); 1294e313ef4cSShahaf Shuler goto error; 1295e313ef4cSShahaf Shuler } 1296fca8cba4SDavid Marchand /* Set dynamic fields and flags into Rx queues. */ 1297fca8cba4SDavid Marchand mlx5_flow_rxq_dynf_set(dev); 1298a2854c4dSViacheslav Ovsiienko /* Set flags and context to convert Rx timestamps. */ 1299a2854c4dSViacheslav Ovsiienko mlx5_rxq_timestamp_set(dev); 1300a2854c4dSViacheslav Ovsiienko /* Set a mask and offset of scheduling on timestamp into Tx queues. */ 13013172c471SViacheslav Ovsiienko mlx5_txq_dynf_timestamp_set(dev); 13028db7e3b6SBing Zhao /* 13038db7e3b6SBing Zhao * In non-cached mode, it only needs to start the default mreg copy 13048db7e3b6SBing Zhao * action and no flow created by application exists anymore. 13058db7e3b6SBing Zhao * But it is worth wrapping the interface for further usage. 13068db7e3b6SBing Zhao */ 13078db7e3b6SBing Zhao ret = mlx5_flow_start_default(dev); 13087ba5320bSNélio Laranjeiro if (ret) { 13098db7e3b6SBing Zhao DRV_LOG(DEBUG, "port %u failed to start default actions: %s", 13108db7e3b6SBing Zhao dev->data->port_id, strerror(rte_errno)); 13117ba5320bSNélio Laranjeiro goto error; 13127ba5320bSNélio Laranjeiro } 1313fec28ca0SDmitry Kozlyuk if (mlx5_dev_ctx_shared_mempool_subscribe(dev) != 0) { 1314fec28ca0SDmitry Kozlyuk DRV_LOG(ERR, "port %u failed to subscribe for mempool life cycle: %s", 1315fec28ca0SDmitry Kozlyuk dev->data->port_id, rte_strerror(rte_errno)); 1316fec28ca0SDmitry Kozlyuk goto error; 1317fec28ca0SDmitry Kozlyuk } 13182aac5b5dSYongseok Koh rte_wmb(); 13197ba5320bSNélio Laranjeiro dev->tx_pkt_burst = mlx5_select_tx_function(dev); 13207ba5320bSNélio Laranjeiro dev->rx_pkt_burst = mlx5_select_rx_function(dev); 13212aac5b5dSYongseok Koh /* Enable datapath on secondary process. */ 13222e86c4e5SOphir Munk mlx5_mp_os_req_start_rxtx(dev); 1323d61138d4SHarman Kalra if (rte_intr_fd_get(priv->sh->intr_handle) >= 0) { 132491389890SOphir Munk priv->sh->port[priv->dev_port - 1].ih_port_id = 132533860cfaSSuanming Mou (uint32_t)dev->data->port_id; 132633860cfaSSuanming Mou } else { 132717f95513SDmitry Kozlyuk DRV_LOG(INFO, "port %u starts without RMV interrupts.", 132817f95513SDmitry Kozlyuk dev->data->port_id); 132917f95513SDmitry Kozlyuk dev->data->dev_conf.intr_conf.rmv = 0; 133017f95513SDmitry Kozlyuk } 133117f95513SDmitry Kozlyuk if (rte_intr_fd_get(priv->sh->intr_handle_nl) >= 0) { 133217f95513SDmitry Kozlyuk priv->sh->port[priv->dev_port - 1].nl_ih_port_id = 133317f95513SDmitry Kozlyuk (uint32_t)dev->data->port_id; 133417f95513SDmitry Kozlyuk } else { 133517f95513SDmitry Kozlyuk DRV_LOG(INFO, "port %u starts without LSC interrupts.", 133633860cfaSSuanming Mou dev->data->port_id); 133733860cfaSSuanming Mou dev->data->dev_conf.intr_conf.lsc = 0; 133833860cfaSSuanming Mou } 1339d61138d4SHarman Kalra if (rte_intr_fd_get(priv->sh->intr_handle_devx) >= 0) 134091389890SOphir Munk priv->sh->port[priv->dev_port - 1].devx_ih_port_id = 134133860cfaSSuanming Mou (uint32_t)dev->data->port_id; 1342c8d4ee50SNélio Laranjeiro return 0; 1343c8d4ee50SNélio Laranjeiro error: 1344a6d83b6aSNélio Laranjeiro ret = rte_errno; /* Save rte_errno before cleanup. */ 1345e60fbd5bSAdrien Mazarguil /* Rollback. */ 1346272733b5SNélio Laranjeiro dev->data->dev_started = 0; 13478db7e3b6SBing Zhao mlx5_flow_stop_default(dev); 1348af4f09f2SNélio Laranjeiro mlx5_traffic_disable(dev); 1349af4f09f2SNélio Laranjeiro mlx5_txq_stop(dev); 1350af4f09f2SNélio Laranjeiro mlx5_rxq_stop(dev); 135123233fd6SBing Zhao if (priv->obj_ops.lb_dummy_queue_release) 135223233fd6SBing Zhao priv->obj_ops.lb_dummy_queue_release(dev); 1353d133f4cdSViacheslav Ovsiienko mlx5_txpp_stop(dev); /* Stop last. */ 1354a6d83b6aSNélio Laranjeiro rte_errno = ret; /* Restore rte_errno. */ 1355a6d83b6aSNélio Laranjeiro return -rte_errno; 1356e60fbd5bSAdrien Mazarguil } 1357e60fbd5bSAdrien Mazarguil 1358483181f7SDariusz Sosnowski #ifdef HAVE_MLX5_HWS_SUPPORT 1359483181f7SDariusz Sosnowski /** 1360483181f7SDariusz Sosnowski * Check if stopping transfer proxy port is allowed. 1361483181f7SDariusz Sosnowski * 1362483181f7SDariusz Sosnowski * If transfer proxy port is configured for HWS, then it is allowed to stop it 1363483181f7SDariusz Sosnowski * if and only if all other representor ports are stopped. 1364483181f7SDariusz Sosnowski * 1365483181f7SDariusz Sosnowski * @param dev 1366483181f7SDariusz Sosnowski * Pointer to Ethernet device structure. 1367483181f7SDariusz Sosnowski * 1368483181f7SDariusz Sosnowski * @return 1369483181f7SDariusz Sosnowski * If stopping transfer proxy port is allowed, then 0 is returned. 1370483181f7SDariusz Sosnowski * Otherwise rte_errno is set, and negative errno value is returned. 1371483181f7SDariusz Sosnowski */ 1372483181f7SDariusz Sosnowski static int 1373483181f7SDariusz Sosnowski mlx5_hw_proxy_port_allowed_stop(struct rte_eth_dev *dev) 1374483181f7SDariusz Sosnowski { 1375483181f7SDariusz Sosnowski struct mlx5_priv *priv = dev->data->dev_private; 1376483181f7SDariusz Sosnowski bool representor_started = false; 1377483181f7SDariusz Sosnowski uint16_t port_id; 1378483181f7SDariusz Sosnowski 1379483181f7SDariusz Sosnowski MLX5_ASSERT(priv->sh->config.dv_flow_en == 2); 1380483181f7SDariusz Sosnowski MLX5_ASSERT(priv->sh->config.dv_esw_en); 1381483181f7SDariusz Sosnowski MLX5_ASSERT(priv->master); 1382483181f7SDariusz Sosnowski /* If transfer proxy port was not configured for HWS, then stopping it is allowed. */ 1383483181f7SDariusz Sosnowski if (!priv->dr_ctx) 1384483181f7SDariusz Sosnowski return 0; 1385483181f7SDariusz Sosnowski MLX5_ETH_FOREACH_DEV(port_id, dev->device) { 1386483181f7SDariusz Sosnowski const struct rte_eth_dev *port_dev = &rte_eth_devices[port_id]; 1387483181f7SDariusz Sosnowski const struct mlx5_priv *port_priv = port_dev->data->dev_private; 1388483181f7SDariusz Sosnowski 1389483181f7SDariusz Sosnowski if (port_id != dev->data->port_id && 1390483181f7SDariusz Sosnowski port_priv->domain_id == priv->domain_id && 1391483181f7SDariusz Sosnowski port_dev->data->dev_started) 1392483181f7SDariusz Sosnowski representor_started = true; 1393483181f7SDariusz Sosnowski } 1394483181f7SDariusz Sosnowski if (representor_started) { 1395f359b715SDariusz Sosnowski DRV_LOG(ERR, "Failed to stop port %u: attached representor ports" 1396483181f7SDariusz Sosnowski " must be stopped before stopping transfer proxy port", 1397483181f7SDariusz Sosnowski dev->data->port_id); 1398483181f7SDariusz Sosnowski rte_errno = EBUSY; 1399483181f7SDariusz Sosnowski return -rte_errno; 1400483181f7SDariusz Sosnowski } 1401483181f7SDariusz Sosnowski return 0; 1402483181f7SDariusz Sosnowski } 1403483181f7SDariusz Sosnowski #endif 1404483181f7SDariusz Sosnowski 1405e60fbd5bSAdrien Mazarguil /** 1406e60fbd5bSAdrien Mazarguil * DPDK callback to stop the device. 1407e60fbd5bSAdrien Mazarguil * 1408e60fbd5bSAdrien Mazarguil * Simulate device stop by detaching all configured flows. 1409e60fbd5bSAdrien Mazarguil * 1410e60fbd5bSAdrien Mazarguil * @param dev 1411e60fbd5bSAdrien Mazarguil * Pointer to Ethernet device structure. 14128e82ebe2SDariusz Sosnowski * 14138e82ebe2SDariusz Sosnowski * @return 14148e82ebe2SDariusz Sosnowski * 0 on success, a negative errno value otherwise and rte_errno is set. 14158e82ebe2SDariusz Sosnowski * The following error values are defined: 14168e82ebe2SDariusz Sosnowski * 14178e82ebe2SDariusz Sosnowski * - -EBUSY: If transfer proxy port cannot be stopped, 14188e82ebe2SDariusz Sosnowski * because other port representors are still running. 1419e60fbd5bSAdrien Mazarguil */ 142062024eb8SIvan Ilchenko int 1421e60fbd5bSAdrien Mazarguil mlx5_dev_stop(struct rte_eth_dev *dev) 1422e60fbd5bSAdrien Mazarguil { 1423dbeba4cfSThomas Monjalon struct mlx5_priv *priv = dev->data->dev_private; 1424e60fbd5bSAdrien Mazarguil 1425483181f7SDariusz Sosnowski #ifdef HAVE_MLX5_HWS_SUPPORT 1426483181f7SDariusz Sosnowski if (priv->sh->config.dv_flow_en == 2) { 1427483181f7SDariusz Sosnowski /* If there is no E-Switch, then there are no start/stop order limitations. */ 1428483181f7SDariusz Sosnowski if (!priv->sh->config.dv_esw_en) 1429483181f7SDariusz Sosnowski goto continue_dev_stop; 1430483181f7SDariusz Sosnowski /* If representor is being stopped, then it is always allowed. */ 1431483181f7SDariusz Sosnowski if (priv->representor) 1432483181f7SDariusz Sosnowski goto continue_dev_stop; 1433483181f7SDariusz Sosnowski if (mlx5_hw_proxy_port_allowed_stop(dev)) { 1434483181f7SDariusz Sosnowski dev->data->dev_started = 1; 1435483181f7SDariusz Sosnowski return -rte_errno; 1436483181f7SDariusz Sosnowski } 1437483181f7SDariusz Sosnowski } 1438483181f7SDariusz Sosnowski continue_dev_stop: 1439483181f7SDariusz Sosnowski #endif 14403f2fe392SNélio Laranjeiro dev->data->dev_started = 0; 14413f2fe392SNélio Laranjeiro /* Prevent crashes when queues are still in use. */ 1442a41f593fSFerruh Yigit dev->rx_pkt_burst = rte_eth_pkt_burst_dummy; 1443a41f593fSFerruh Yigit dev->tx_pkt_burst = rte_eth_pkt_burst_dummy; 14443f2fe392SNélio Laranjeiro rte_wmb(); 14452aac5b5dSYongseok Koh /* Disable datapath on secondary process. */ 14462e86c4e5SOphir Munk mlx5_mp_os_req_stop_rxtx(dev); 144720698c9fSOphir Munk rte_delay_us_sleep(1000 * priv->rxqs_n); 144824f653a7SYongseok Koh DRV_LOG(DEBUG, "port %u stopping device", dev->data->port_id); 1449f64a7946SRongwei Liu if (priv->sh->config.dv_flow_en == 2) { 1450e12a0166STyler Retzlaff if (!rte_atomic_load_explicit(&priv->hws_mark_refcnt, rte_memory_order_relaxed)) 1451f64a7946SRongwei Liu flow_hw_rxq_flag_set(dev, false); 1452f64a7946SRongwei Liu } else { 14538db7e3b6SBing Zhao mlx5_flow_stop_default(dev); 1454f64a7946SRongwei Liu } 14558db7e3b6SBing Zhao /* Control flows for default traffic can be removed firstly. */ 1456af4f09f2SNélio Laranjeiro mlx5_traffic_disable(dev); 14578db7e3b6SBing Zhao /* All RX queue flags will be cleared in the flush interface. */ 1458b4edeaf3SSuanming Mou mlx5_flow_list_flush(dev, MLX5_FLOW_TYPE_GEN, true); 1459ec962badSLi Zhang mlx5_flow_meter_rxq_flush(dev); 1460ec4e11d4SDmitry Kozlyuk mlx5_action_handle_detach(dev); 14619fa7c1cdSDariusz Sosnowski #ifdef HAVE_MLX5_HWS_SUPPORT 14629fa7c1cdSDariusz Sosnowski mlx5_flow_hw_cleanup_ctrl_rx_templates(dev); 14639fa7c1cdSDariusz Sosnowski #endif 1464af4f09f2SNélio Laranjeiro mlx5_rx_intr_vec_disable(dev); 146591389890SOphir Munk priv->sh->port[priv->dev_port - 1].ih_port_id = RTE_MAX_ETHPORTS; 146691389890SOphir Munk priv->sh->port[priv->dev_port - 1].devx_ih_port_id = RTE_MAX_ETHPORTS; 146717f95513SDmitry Kozlyuk priv->sh->port[priv->dev_port - 1].nl_ih_port_id = RTE_MAX_ETHPORTS; 1468af4f09f2SNélio Laranjeiro mlx5_txq_stop(dev); 1469af4f09f2SNélio Laranjeiro mlx5_rxq_stop(dev); 147023233fd6SBing Zhao if (priv->obj_ops.lb_dummy_queue_release) 147123233fd6SBing Zhao priv->obj_ops.lb_dummy_queue_release(dev); 1472d133f4cdSViacheslav Ovsiienko mlx5_txpp_stop(dev); 147362024eb8SIvan Ilchenko 147462024eb8SIvan Ilchenko return 0; 1475e60fbd5bSAdrien Mazarguil } 1476272733b5SNélio Laranjeiro 14771939eb6fSDariusz Sosnowski #ifdef HAVE_MLX5_HWS_SUPPORT 14781939eb6fSDariusz Sosnowski 14791939eb6fSDariusz Sosnowski static int 14801939eb6fSDariusz Sosnowski mlx5_traffic_enable_hws(struct rte_eth_dev *dev) 14811939eb6fSDariusz Sosnowski { 14821939eb6fSDariusz Sosnowski struct mlx5_priv *priv = dev->data->dev_private; 1483483181f7SDariusz Sosnowski struct mlx5_sh_config *config = &priv->sh->config; 14849fa7c1cdSDariusz Sosnowski uint64_t flags = 0; 14851939eb6fSDariusz Sosnowski unsigned int i; 14861939eb6fSDariusz Sosnowski int ret; 14871939eb6fSDariusz Sosnowski 1488483181f7SDariusz Sosnowski /* 1489483181f7SDariusz Sosnowski * With extended metadata enabled, the Tx metadata copy is handled by default 1490483181f7SDariusz Sosnowski * Tx tagging flow rules, so default Tx flow rule is not needed. It is only 1491483181f7SDariusz Sosnowski * required when representor matching is disabled. 1492483181f7SDariusz Sosnowski */ 1493483181f7SDariusz Sosnowski if (config->dv_esw_en && 1494483181f7SDariusz Sosnowski !config->repr_matching && 1495483181f7SDariusz Sosnowski config->dv_xmeta_en == MLX5_XMETA_MODE_META32_HWS && 1496483181f7SDariusz Sosnowski priv->master) { 1497ddb68e47SBing Zhao if (mlx5_flow_hw_create_tx_default_mreg_copy_flow(dev)) 1498ddb68e47SBing Zhao goto error; 14991939eb6fSDariusz Sosnowski } 15001939eb6fSDariusz Sosnowski for (i = 0; i < priv->txqs_n; ++i) { 15011939eb6fSDariusz Sosnowski struct mlx5_txq_ctrl *txq = mlx5_txq_get(dev, i); 15021939eb6fSDariusz Sosnowski uint32_t queue; 15031939eb6fSDariusz Sosnowski 15041939eb6fSDariusz Sosnowski if (!txq) 15051939eb6fSDariusz Sosnowski continue; 150626e1eaf2SDariusz Sosnowski queue = mlx5_txq_get_sqn(txq); 1507*cf9a91c6SDariusz Sosnowski if ((priv->representor || priv->master) && 1508*cf9a91c6SDariusz Sosnowski config->dv_esw_en && 1509*cf9a91c6SDariusz Sosnowski config->fdb_def_rule) { 1510f37c184aSSuanming Mou if (mlx5_flow_hw_esw_create_sq_miss_flow(dev, queue, false)) { 15111939eb6fSDariusz Sosnowski mlx5_txq_release(dev, i); 15121939eb6fSDariusz Sosnowski goto error; 15131939eb6fSDariusz Sosnowski } 15141939eb6fSDariusz Sosnowski } 1515483181f7SDariusz Sosnowski if (config->dv_esw_en && config->repr_matching) { 1516f37c184aSSuanming Mou if (mlx5_flow_hw_tx_repr_matching_flow(dev, queue, false)) { 1517483181f7SDariusz Sosnowski mlx5_txq_release(dev, i); 1518483181f7SDariusz Sosnowski goto error; 1519483181f7SDariusz Sosnowski } 1520483181f7SDariusz Sosnowski } 15211939eb6fSDariusz Sosnowski mlx5_txq_release(dev, i); 15221939eb6fSDariusz Sosnowski } 1523483181f7SDariusz Sosnowski if (config->fdb_def_rule) { 1524483181f7SDariusz Sosnowski if ((priv->master || priv->representor) && config->dv_esw_en) { 152526e1eaf2SDariusz Sosnowski if (!mlx5_flow_hw_esw_create_default_jump_flow(dev)) 152626e1eaf2SDariusz Sosnowski priv->fdb_def_rule = 1; 152726e1eaf2SDariusz Sosnowski else 15281939eb6fSDariusz Sosnowski goto error; 15291939eb6fSDariusz Sosnowski } 153026e1eaf2SDariusz Sosnowski } else { 153126e1eaf2SDariusz Sosnowski DRV_LOG(INFO, "port %u FDB default rule is disabled", dev->data->port_id); 153226e1eaf2SDariusz Sosnowski } 15339fa7c1cdSDariusz Sosnowski if (priv->isolated) 15341939eb6fSDariusz Sosnowski return 0; 153587e4384dSBing Zhao if (!priv->sh->config.lacp_by_user && priv->pf_bond >= 0 && priv->master) 153649dffadfSBing Zhao if (mlx5_flow_hw_lacp_rx_flow(dev)) 153749dffadfSBing Zhao goto error; 15389fa7c1cdSDariusz Sosnowski if (dev->data->promiscuous) 15399fa7c1cdSDariusz Sosnowski flags |= MLX5_CTRL_PROMISCUOUS; 15409fa7c1cdSDariusz Sosnowski if (dev->data->all_multicast) 15419fa7c1cdSDariusz Sosnowski flags |= MLX5_CTRL_ALL_MULTICAST; 15429fa7c1cdSDariusz Sosnowski else 15439fa7c1cdSDariusz Sosnowski flags |= MLX5_CTRL_BROADCAST | MLX5_CTRL_IPV4_MULTICAST | MLX5_CTRL_IPV6_MULTICAST; 15449fa7c1cdSDariusz Sosnowski flags |= MLX5_CTRL_DMAC; 15459fa7c1cdSDariusz Sosnowski if (priv->vlan_filter_n) 15469fa7c1cdSDariusz Sosnowski flags |= MLX5_CTRL_VLAN_FILTER; 15479fa7c1cdSDariusz Sosnowski return mlx5_flow_hw_ctrl_flows(dev, flags); 15481939eb6fSDariusz Sosnowski error: 15491939eb6fSDariusz Sosnowski ret = rte_errno; 15501939eb6fSDariusz Sosnowski mlx5_flow_hw_flush_ctrl_flows(dev); 15511939eb6fSDariusz Sosnowski rte_errno = ret; 15521939eb6fSDariusz Sosnowski return -rte_errno; 15531939eb6fSDariusz Sosnowski } 15541939eb6fSDariusz Sosnowski 15551939eb6fSDariusz Sosnowski #endif 15561939eb6fSDariusz Sosnowski 1557272733b5SNélio Laranjeiro /** 1558272733b5SNélio Laranjeiro * Enable traffic flows configured by control plane 1559272733b5SNélio Laranjeiro * 1560af4f09f2SNélio Laranjeiro * @param dev 1561272733b5SNélio Laranjeiro * Pointer to Ethernet device structure. 1562272733b5SNélio Laranjeiro * 1563272733b5SNélio Laranjeiro * @return 1564a6d83b6aSNélio Laranjeiro * 0 on success, a negative errno value otherwise and rte_errno is set. 1565272733b5SNélio Laranjeiro */ 1566272733b5SNélio Laranjeiro int 1567af4f09f2SNélio Laranjeiro mlx5_traffic_enable(struct rte_eth_dev *dev) 1568272733b5SNélio Laranjeiro { 1569dbeba4cfSThomas Monjalon struct mlx5_priv *priv = dev->data->dev_private; 1570272733b5SNélio Laranjeiro struct rte_flow_item_eth bcast = { 15718275d5fcSThomas Monjalon .hdr.dst_addr.addr_bytes = "\xff\xff\xff\xff\xff\xff", 1572272733b5SNélio Laranjeiro }; 1573272733b5SNélio Laranjeiro struct rte_flow_item_eth ipv6_multi_spec = { 15748275d5fcSThomas Monjalon .hdr.dst_addr.addr_bytes = "\x33\x33\x00\x00\x00\x00", 1575272733b5SNélio Laranjeiro }; 1576272733b5SNélio Laranjeiro struct rte_flow_item_eth ipv6_multi_mask = { 15778275d5fcSThomas Monjalon .hdr.dst_addr.addr_bytes = "\xff\xff\x00\x00\x00\x00", 1578272733b5SNélio Laranjeiro }; 1579272733b5SNélio Laranjeiro struct rte_flow_item_eth unicast = { 15808275d5fcSThomas Monjalon .hdr.src_addr.addr_bytes = "\x00\x00\x00\x00\x00\x00", 1581272733b5SNélio Laranjeiro }; 1582272733b5SNélio Laranjeiro struct rte_flow_item_eth unicast_mask = { 15838275d5fcSThomas Monjalon .hdr.dst_addr.addr_bytes = "\xff\xff\xff\xff\xff\xff", 1584272733b5SNélio Laranjeiro }; 1585272733b5SNélio Laranjeiro const unsigned int vlan_filter_n = priv->vlan_filter_n; 15866d13ea8eSOlivier Matz const struct rte_ether_addr cmp = { 1587272733b5SNélio Laranjeiro .addr_bytes = "\x00\x00\x00\x00\x00\x00", 1588272733b5SNélio Laranjeiro }; 1589272733b5SNélio Laranjeiro unsigned int i; 1590272733b5SNélio Laranjeiro unsigned int j; 1591272733b5SNélio Laranjeiro int ret; 1592272733b5SNélio Laranjeiro 15931939eb6fSDariusz Sosnowski #ifdef HAVE_MLX5_HWS_SUPPORT 15941939eb6fSDariusz Sosnowski if (priv->sh->config.dv_flow_en == 2) 15951939eb6fSDariusz Sosnowski return mlx5_traffic_enable_hws(dev); 15961939eb6fSDariusz Sosnowski #endif 15973c84f34eSOri Kam /* 15983c84f34eSOri Kam * Hairpin txq default flow should be created no matter if it is 15993c84f34eSOri Kam * isolation mode. Or else all the packets to be sent will be sent 16003c84f34eSOri Kam * out directly without the TX flow actions, e.g. encapsulation. 16013c84f34eSOri Kam */ 16023c84f34eSOri Kam for (i = 0; i != priv->txqs_n; ++i) { 16033c84f34eSOri Kam struct mlx5_txq_ctrl *txq_ctrl = mlx5_txq_get(dev, i); 16043c84f34eSOri Kam if (!txq_ctrl) 16053c84f34eSOri Kam continue; 1606aa8bea0eSBing Zhao /* Only Tx implicit mode requires the default Tx flow. */ 1607c06f77aeSMichael Baum if (txq_ctrl->is_hairpin && 1608aa8bea0eSBing Zhao txq_ctrl->hairpin_conf.tx_explicit == 0 && 1609aa8bea0eSBing Zhao txq_ctrl->hairpin_conf.peers[0].port == 1610aa8bea0eSBing Zhao priv->dev_data->port_id) { 161126e1eaf2SDariusz Sosnowski ret = mlx5_ctrl_flow_source_queue(dev, 161226e1eaf2SDariusz Sosnowski mlx5_txq_get_sqn(txq_ctrl)); 16133c84f34eSOri Kam if (ret) { 16143c84f34eSOri Kam mlx5_txq_release(dev, i); 16153c84f34eSOri Kam goto error; 16163c84f34eSOri Kam } 16173c84f34eSOri Kam } 1618a13ec19cSMichael Baum if (priv->sh->config.dv_esw_en) { 161926e1eaf2SDariusz Sosnowski uint32_t q = mlx5_txq_get_sqn(txq_ctrl); 162026e1eaf2SDariusz Sosnowski 162126e1eaf2SDariusz Sosnowski if (mlx5_flow_create_devx_sq_miss_flow(dev, q) == 0) { 162226e1eaf2SDariusz Sosnowski mlx5_txq_release(dev, i); 1623686d05b6SXueming Li DRV_LOG(ERR, 1624686d05b6SXueming Li "Port %u Tx queue %u SQ create representor devx default miss rule failed.", 1625686d05b6SXueming Li dev->data->port_id, i); 1626686d05b6SXueming Li goto error; 1627686d05b6SXueming Li } 1628686d05b6SXueming Li } 16293c84f34eSOri Kam mlx5_txq_release(dev, i); 16303c84f34eSOri Kam } 16311939eb6fSDariusz Sosnowski if (priv->sh->config.fdb_def_rule) { 1632a13ec19cSMichael Baum if (priv->sh->config.dv_esw_en) { 1633fbde4331SMatan Azrad if (mlx5_flow_create_esw_table_zero_flow(dev)) 1634fbde4331SMatan Azrad priv->fdb_def_rule = 1; 1635fbde4331SMatan Azrad else 16361939eb6fSDariusz Sosnowski DRV_LOG(INFO, "port %u FDB default rule cannot be configured - only Eswitch group 0 flows are supported.", 16371939eb6fSDariusz Sosnowski dev->data->port_id); 16381939eb6fSDariusz Sosnowski } 16391939eb6fSDariusz Sosnowski } else { 16401939eb6fSDariusz Sosnowski DRV_LOG(INFO, "port %u FDB default rule is disabled", 16411939eb6fSDariusz Sosnowski dev->data->port_id); 1642fbde4331SMatan Azrad } 164387e4384dSBing Zhao if (!priv->sh->config.lacp_by_user && priv->pf_bond >= 0 && priv->master) { 16440f0ae73aSShiri Kuzin ret = mlx5_flow_lacp_miss(dev); 16450f0ae73aSShiri Kuzin if (ret) 16460f0ae73aSShiri Kuzin DRV_LOG(INFO, "port %u LACP rule cannot be created - " 16470f0ae73aSShiri Kuzin "forward LACP to kernel.", dev->data->port_id); 16480f0ae73aSShiri Kuzin else 164987e4384dSBing Zhao DRV_LOG(INFO, "LACP traffic will be missed in port %u.", 165087e4384dSBing Zhao dev->data->port_id); 16510f0ae73aSShiri Kuzin } 1652f8cb4b57SNélio Laranjeiro if (priv->isolated) 1653f8cb4b57SNélio Laranjeiro return 0; 1654f8cb4b57SNélio Laranjeiro if (dev->data->promiscuous) { 1655f8cb4b57SNélio Laranjeiro struct rte_flow_item_eth promisc = { 16568275d5fcSThomas Monjalon .hdr.dst_addr.addr_bytes = "\x00\x00\x00\x00\x00\x00", 16578275d5fcSThomas Monjalon .hdr.src_addr.addr_bytes = "\x00\x00\x00\x00\x00\x00", 16588275d5fcSThomas Monjalon .hdr.ether_type = 0, 1659f8cb4b57SNélio Laranjeiro }; 1660f8cb4b57SNélio Laranjeiro 1661a6d83b6aSNélio Laranjeiro ret = mlx5_ctrl_flow(dev, &promisc, &promisc); 1662a6d83b6aSNélio Laranjeiro if (ret) 1663a6d83b6aSNélio Laranjeiro goto error; 1664f8cb4b57SNélio Laranjeiro } 1665f8cb4b57SNélio Laranjeiro if (dev->data->all_multicast) { 1666f8cb4b57SNélio Laranjeiro struct rte_flow_item_eth multicast = { 16678275d5fcSThomas Monjalon .hdr.dst_addr.addr_bytes = "\x01\x00\x00\x00\x00\x00", 16688275d5fcSThomas Monjalon .hdr.src_addr.addr_bytes = "\x00\x00\x00\x00\x00\x00", 16698275d5fcSThomas Monjalon .hdr.ether_type = 0, 1670f8cb4b57SNélio Laranjeiro }; 1671f8cb4b57SNélio Laranjeiro 1672a6d83b6aSNélio Laranjeiro ret = mlx5_ctrl_flow(dev, &multicast, &multicast); 1673a6d83b6aSNélio Laranjeiro if (ret) 1674a6d83b6aSNélio Laranjeiro goto error; 1675f8cb4b57SNélio Laranjeiro } else { 1676f8cb4b57SNélio Laranjeiro /* Add broadcast/multicast flows. */ 1677f8cb4b57SNélio Laranjeiro for (i = 0; i != vlan_filter_n; ++i) { 1678f8cb4b57SNélio Laranjeiro uint16_t vlan = priv->vlan_filter[i]; 1679f8cb4b57SNélio Laranjeiro 1680f8cb4b57SNélio Laranjeiro struct rte_flow_item_vlan vlan_spec = { 16818275d5fcSThomas Monjalon .hdr.vlan_tci = rte_cpu_to_be_16(vlan), 1682f8cb4b57SNélio Laranjeiro }; 16832bc98393SNelio Laranjeiro struct rte_flow_item_vlan vlan_mask = 16842bc98393SNelio Laranjeiro rte_flow_item_vlan_mask; 1685f8cb4b57SNélio Laranjeiro 1686f8cb4b57SNélio Laranjeiro ret = mlx5_ctrl_flow_vlan(dev, &bcast, &bcast, 1687f8cb4b57SNélio Laranjeiro &vlan_spec, &vlan_mask); 1688f8cb4b57SNélio Laranjeiro if (ret) 1689f8cb4b57SNélio Laranjeiro goto error; 1690f8cb4b57SNélio Laranjeiro ret = mlx5_ctrl_flow_vlan(dev, &ipv6_multi_spec, 1691f8cb4b57SNélio Laranjeiro &ipv6_multi_mask, 1692f8cb4b57SNélio Laranjeiro &vlan_spec, &vlan_mask); 1693f8cb4b57SNélio Laranjeiro if (ret) 1694f8cb4b57SNélio Laranjeiro goto error; 1695f8cb4b57SNélio Laranjeiro } 1696f8cb4b57SNélio Laranjeiro if (!vlan_filter_n) { 1697f8cb4b57SNélio Laranjeiro ret = mlx5_ctrl_flow(dev, &bcast, &bcast); 1698f8cb4b57SNélio Laranjeiro if (ret) 1699f8cb4b57SNélio Laranjeiro goto error; 1700f8cb4b57SNélio Laranjeiro ret = mlx5_ctrl_flow(dev, &ipv6_multi_spec, 1701f8cb4b57SNélio Laranjeiro &ipv6_multi_mask); 1702084de7a1STal Shnaiderman if (ret) { 1703084de7a1STal Shnaiderman /* Do not fail on IPv6 broadcast creation failure. */ 1704084de7a1STal Shnaiderman DRV_LOG(WARNING, 1705084de7a1STal Shnaiderman "IPv6 broadcast is not supported"); 1706084de7a1STal Shnaiderman ret = 0; 1707084de7a1STal Shnaiderman } 1708f8cb4b57SNélio Laranjeiro } 1709f8cb4b57SNélio Laranjeiro } 1710f8cb4b57SNélio Laranjeiro /* Add MAC address flows. */ 1711272733b5SNélio Laranjeiro for (i = 0; i != MLX5_MAX_MAC_ADDRESSES; ++i) { 17126d13ea8eSOlivier Matz struct rte_ether_addr *mac = &dev->data->mac_addrs[i]; 1713272733b5SNélio Laranjeiro 1714272733b5SNélio Laranjeiro if (!memcmp(mac, &cmp, sizeof(*mac))) 1715272733b5SNélio Laranjeiro continue; 17168275d5fcSThomas Monjalon memcpy(&unicast.hdr.dst_addr.addr_bytes, 1717272733b5SNélio Laranjeiro mac->addr_bytes, 171835b2d13fSOlivier Matz RTE_ETHER_ADDR_LEN); 1719272733b5SNélio Laranjeiro for (j = 0; j != vlan_filter_n; ++j) { 1720272733b5SNélio Laranjeiro uint16_t vlan = priv->vlan_filter[j]; 1721272733b5SNélio Laranjeiro 1722272733b5SNélio Laranjeiro struct rte_flow_item_vlan vlan_spec = { 17238275d5fcSThomas Monjalon .hdr.vlan_tci = rte_cpu_to_be_16(vlan), 1724272733b5SNélio Laranjeiro }; 17252bc98393SNelio Laranjeiro struct rte_flow_item_vlan vlan_mask = 17262bc98393SNelio Laranjeiro rte_flow_item_vlan_mask; 1727272733b5SNélio Laranjeiro 1728272733b5SNélio Laranjeiro ret = mlx5_ctrl_flow_vlan(dev, &unicast, 1729272733b5SNélio Laranjeiro &unicast_mask, 1730272733b5SNélio Laranjeiro &vlan_spec, 1731272733b5SNélio Laranjeiro &vlan_mask); 1732272733b5SNélio Laranjeiro if (ret) 1733272733b5SNélio Laranjeiro goto error; 1734272733b5SNélio Laranjeiro } 1735272733b5SNélio Laranjeiro if (!vlan_filter_n) { 1736a6d83b6aSNélio Laranjeiro ret = mlx5_ctrl_flow(dev, &unicast, &unicast_mask); 1737272733b5SNélio Laranjeiro if (ret) 1738272733b5SNélio Laranjeiro goto error; 1739272733b5SNélio Laranjeiro } 1740272733b5SNélio Laranjeiro } 1741272733b5SNélio Laranjeiro return 0; 1742272733b5SNélio Laranjeiro error: 1743a6d83b6aSNélio Laranjeiro ret = rte_errno; /* Save rte_errno before cleanup. */ 1744b4edeaf3SSuanming Mou mlx5_flow_list_flush(dev, MLX5_FLOW_TYPE_CTL, false); 1745a6d83b6aSNélio Laranjeiro rte_errno = ret; /* Restore rte_errno. */ 1746a6d83b6aSNélio Laranjeiro return -rte_errno; 1747272733b5SNélio Laranjeiro } 1748272733b5SNélio Laranjeiro 1749272733b5SNélio Laranjeiro 1750272733b5SNélio Laranjeiro /** 1751272733b5SNélio Laranjeiro * Disable traffic flows configured by control plane 1752272733b5SNélio Laranjeiro * 1753272733b5SNélio Laranjeiro * @param dev 1754af4f09f2SNélio Laranjeiro * Pointer to Ethernet device private data. 1755272733b5SNélio Laranjeiro */ 1756925061b5SNélio Laranjeiro void 1757af4f09f2SNélio Laranjeiro mlx5_traffic_disable(struct rte_eth_dev *dev) 1758272733b5SNélio Laranjeiro { 17591939eb6fSDariusz Sosnowski #ifdef HAVE_MLX5_HWS_SUPPORT 17601939eb6fSDariusz Sosnowski struct mlx5_priv *priv = dev->data->dev_private; 17611939eb6fSDariusz Sosnowski 17621939eb6fSDariusz Sosnowski if (priv->sh->config.dv_flow_en == 2) 17631939eb6fSDariusz Sosnowski mlx5_flow_hw_flush_ctrl_flows(dev); 17641939eb6fSDariusz Sosnowski else 17651939eb6fSDariusz Sosnowski #endif 1766b4edeaf3SSuanming Mou mlx5_flow_list_flush(dev, MLX5_FLOW_TYPE_CTL, false); 1767272733b5SNélio Laranjeiro } 1768272733b5SNélio Laranjeiro 1769272733b5SNélio Laranjeiro /** 1770272733b5SNélio Laranjeiro * Restart traffic flows configured by control plane 1771272733b5SNélio Laranjeiro * 1772272733b5SNélio Laranjeiro * @param dev 1773af4f09f2SNélio Laranjeiro * Pointer to Ethernet device private data. 1774272733b5SNélio Laranjeiro * 1775272733b5SNélio Laranjeiro * @return 1776a6d83b6aSNélio Laranjeiro * 0 on success, a negative errno value otherwise and rte_errno is set. 1777272733b5SNélio Laranjeiro */ 1778272733b5SNélio Laranjeiro int 1779272733b5SNélio Laranjeiro mlx5_traffic_restart(struct rte_eth_dev *dev) 1780272733b5SNélio Laranjeiro { 1781af4f09f2SNélio Laranjeiro if (dev->data->dev_started) { 1782af4f09f2SNélio Laranjeiro mlx5_traffic_disable(dev); 17839fa7c1cdSDariusz Sosnowski #ifdef HAVE_MLX5_HWS_SUPPORT 17849fa7c1cdSDariusz Sosnowski mlx5_flow_hw_cleanup_ctrl_rx_templates(dev); 17859fa7c1cdSDariusz Sosnowski #endif 1786a6d83b6aSNélio Laranjeiro return mlx5_traffic_enable(dev); 1787af4f09f2SNélio Laranjeiro } 1788272733b5SNélio Laranjeiro return 0; 1789272733b5SNélio Laranjeiro } 1790