18fd92a66SOlivier Matz /* SPDX-License-Identifier: BSD-3-Clause 2e60fbd5bSAdrien Mazarguil * Copyright 2015 6WIND S.A. 35feecc57SShahaf Shuler * Copyright 2015 Mellanox Technologies, Ltd 4e60fbd5bSAdrien Mazarguil */ 58fd92a66SOlivier Matz 63f2fe392SNélio Laranjeiro #include <unistd.h> 7e60fbd5bSAdrien Mazarguil 8e60fbd5bSAdrien Mazarguil #include <rte_ether.h> 9ffc905f3SFerruh Yigit #include <rte_ethdev_driver.h> 10198a3c33SNelio Laranjeiro #include <rte_interrupts.h> 11198a3c33SNelio Laranjeiro #include <rte_alarm.h> 12e60fbd5bSAdrien Mazarguil 131260a87bSMichael Baum #include <mlx5_malloc.h> 141260a87bSMichael Baum 15e60fbd5bSAdrien Mazarguil #include "mlx5.h" 16b8dc6b0eSVu Pham #include "mlx5_mr.h" 17e60fbd5bSAdrien Mazarguil #include "mlx5_rxtx.h" 18e60fbd5bSAdrien Mazarguil #include "mlx5_utils.h" 19efa79e68SOri Kam #include "rte_pmd_mlx5.h" 20e60fbd5bSAdrien Mazarguil 21fb732b0aSNélio Laranjeiro /** 22fb732b0aSNélio Laranjeiro * Stop traffic on Tx queues. 23fb732b0aSNélio Laranjeiro * 24fb732b0aSNélio Laranjeiro * @param dev 25fb732b0aSNélio Laranjeiro * Pointer to Ethernet device structure. 26fb732b0aSNélio Laranjeiro */ 276e78005aSNélio Laranjeiro static void 28af4f09f2SNélio Laranjeiro mlx5_txq_stop(struct rte_eth_dev *dev) 296e78005aSNélio Laranjeiro { 30dbeba4cfSThomas Monjalon struct mlx5_priv *priv = dev->data->dev_private; 316e78005aSNélio Laranjeiro unsigned int i; 326e78005aSNélio Laranjeiro 336e78005aSNélio Laranjeiro for (i = 0; i != priv->txqs_n; ++i) 34af4f09f2SNélio Laranjeiro mlx5_txq_release(dev, i); 356e78005aSNélio Laranjeiro } 366e78005aSNélio Laranjeiro 37fb732b0aSNélio Laranjeiro /** 38fb732b0aSNélio Laranjeiro * Start traffic on Tx queues. 39fb732b0aSNélio Laranjeiro * 40fb732b0aSNélio Laranjeiro * @param dev 41fb732b0aSNélio Laranjeiro * Pointer to Ethernet device structure. 42fb732b0aSNélio Laranjeiro * 43fb732b0aSNélio Laranjeiro * @return 44a6d83b6aSNélio Laranjeiro * 0 on success, a negative errno value otherwise and rte_errno is set. 45fb732b0aSNélio Laranjeiro */ 466e78005aSNélio Laranjeiro static int 47af4f09f2SNélio Laranjeiro mlx5_txq_start(struct rte_eth_dev *dev) 486e78005aSNélio Laranjeiro { 49dbeba4cfSThomas Monjalon struct mlx5_priv *priv = dev->data->dev_private; 506e78005aSNélio Laranjeiro unsigned int i; 51a6d83b6aSNélio Laranjeiro int ret; 526e78005aSNélio Laranjeiro 536e78005aSNélio Laranjeiro for (i = 0; i != priv->txqs_n; ++i) { 54af4f09f2SNélio Laranjeiro struct mlx5_txq_ctrl *txq_ctrl = mlx5_txq_get(dev, i); 556e78005aSNélio Laranjeiro 566e78005aSNélio Laranjeiro if (!txq_ctrl) 576e78005aSNélio Laranjeiro continue; 58ae18a1aeSOri Kam if (txq_ctrl->type == MLX5_TXQ_TYPE_HAIRPIN) { 59ae18a1aeSOri Kam txq_ctrl->obj = mlx5_txq_obj_new 60ae18a1aeSOri Kam (dev, i, MLX5_TXQ_OBJ_TYPE_DEVX_HAIRPIN); 61ae18a1aeSOri Kam } else { 626e78005aSNélio Laranjeiro txq_alloc_elts(txq_ctrl); 63ae18a1aeSOri Kam txq_ctrl->obj = mlx5_txq_obj_new 643a87b964SViacheslav Ovsiienko (dev, i, priv->txpp_en ? 653a87b964SViacheslav Ovsiienko MLX5_TXQ_OBJ_TYPE_DEVX_SQ : 663a87b964SViacheslav Ovsiienko MLX5_TXQ_OBJ_TYPE_IBV); 67ae18a1aeSOri Kam } 68894c4a8eSOri Kam if (!txq_ctrl->obj) { 69a6d83b6aSNélio Laranjeiro rte_errno = ENOMEM; 706e78005aSNélio Laranjeiro goto error; 716e78005aSNélio Laranjeiro } 726e78005aSNélio Laranjeiro } 73a6d83b6aSNélio Laranjeiro return 0; 746e78005aSNélio Laranjeiro error: 75a6d83b6aSNélio Laranjeiro ret = rte_errno; /* Save rte_errno before cleanup. */ 7624f653a7SYongseok Koh do { 7724f653a7SYongseok Koh mlx5_txq_release(dev, i); 7824f653a7SYongseok Koh } while (i-- != 0); 79a6d83b6aSNélio Laranjeiro rte_errno = ret; /* Restore rte_errno. */ 80a6d83b6aSNélio Laranjeiro return -rte_errno; 816e78005aSNélio Laranjeiro } 826e78005aSNélio Laranjeiro 83fb732b0aSNélio Laranjeiro /** 84fb732b0aSNélio Laranjeiro * Stop traffic on Rx queues. 85fb732b0aSNélio Laranjeiro * 86fb732b0aSNélio Laranjeiro * @param dev 87fb732b0aSNélio Laranjeiro * Pointer to Ethernet device structure. 88fb732b0aSNélio Laranjeiro */ 89a1366b1aSNélio Laranjeiro static void 90af4f09f2SNélio Laranjeiro mlx5_rxq_stop(struct rte_eth_dev *dev) 91a1366b1aSNélio Laranjeiro { 92dbeba4cfSThomas Monjalon struct mlx5_priv *priv = dev->data->dev_private; 93a1366b1aSNélio Laranjeiro unsigned int i; 94a1366b1aSNélio Laranjeiro 95a1366b1aSNélio Laranjeiro for (i = 0; i != priv->rxqs_n; ++i) 96af4f09f2SNélio Laranjeiro mlx5_rxq_release(dev, i); 97a1366b1aSNélio Laranjeiro } 98a1366b1aSNélio Laranjeiro 99fb732b0aSNélio Laranjeiro /** 100fb732b0aSNélio Laranjeiro * Start traffic on Rx queues. 101fb732b0aSNélio Laranjeiro * 102fb732b0aSNélio Laranjeiro * @param dev 103fb732b0aSNélio Laranjeiro * Pointer to Ethernet device structure. 104fb732b0aSNélio Laranjeiro * 105fb732b0aSNélio Laranjeiro * @return 106a6d83b6aSNélio Laranjeiro * 0 on success, a negative errno value otherwise and rte_errno is set. 107fb732b0aSNélio Laranjeiro */ 108a1366b1aSNélio Laranjeiro static int 109af4f09f2SNélio Laranjeiro mlx5_rxq_start(struct rte_eth_dev *dev) 110a1366b1aSNélio Laranjeiro { 111dbeba4cfSThomas Monjalon struct mlx5_priv *priv = dev->data->dev_private; 112a1366b1aSNélio Laranjeiro unsigned int i; 113a1366b1aSNélio Laranjeiro int ret = 0; 114a1366b1aSNélio Laranjeiro 1157d6bf6b8SYongseok Koh /* Allocate/reuse/resize mempool for Multi-Packet RQ. */ 11624f653a7SYongseok Koh if (mlx5_mprq_alloc_mp(dev)) { 11724f653a7SYongseok Koh /* Should not release Rx queues but return immediately. */ 11824f653a7SYongseok Koh return -rte_errno; 11924f653a7SYongseok Koh } 1201260a87bSMichael Baum DRV_LOG(DEBUG, "Port %u device_attr.max_qp_wr is %d.", 1211260a87bSMichael Baum dev->data->port_id, priv->sh->device_attr.max_qp_wr); 1221260a87bSMichael Baum DRV_LOG(DEBUG, "Port %u device_attr.max_sge is %d.", 1231260a87bSMichael Baum dev->data->port_id, priv->sh->device_attr.max_sge); 124a1366b1aSNélio Laranjeiro for (i = 0; i != priv->rxqs_n; ++i) { 125af4f09f2SNélio Laranjeiro struct mlx5_rxq_ctrl *rxq_ctrl = mlx5_rxq_get(dev, i); 126974f1e7eSYongseok Koh struct rte_mempool *mp; 127a1366b1aSNélio Laranjeiro 128a1366b1aSNélio Laranjeiro if (!rxq_ctrl) 129a1366b1aSNélio Laranjeiro continue; 1306deb19e1SMichael Baum if (rxq_ctrl->type == MLX5_RXQ_TYPE_STANDARD) { 131974f1e7eSYongseok Koh /* Pre-register Rx mempool. */ 1327d6bf6b8SYongseok Koh mp = mlx5_rxq_mprq_enabled(&rxq_ctrl->rxq) ? 1337d6bf6b8SYongseok Koh rxq_ctrl->rxq.mprq_mp : rxq_ctrl->rxq.mp; 1341260a87bSMichael Baum DRV_LOG(DEBUG, "Port %u Rx queue %u registering mp %s" 1351260a87bSMichael Baum " having %u chunks.", dev->data->port_id, 1366deb19e1SMichael Baum rxq_ctrl->rxq.idx, mp->name, mp->nb_mem_chunks); 137974f1e7eSYongseok Koh mlx5_mr_update_mp(dev, &rxq_ctrl->rxq.mr_ctrl, mp); 138a1366b1aSNélio Laranjeiro ret = rxq_alloc_elts(rxq_ctrl); 139a1366b1aSNélio Laranjeiro if (ret) 140a1366b1aSNélio Laranjeiro goto error; 1416deb19e1SMichael Baum } 1421260a87bSMichael Baum MLX5_ASSERT(!rxq_ctrl->obj); 1431260a87bSMichael Baum rxq_ctrl->obj = mlx5_malloc(MLX5_MEM_RTE | MLX5_MEM_ZERO, 1441260a87bSMichael Baum sizeof(*rxq_ctrl->obj), 0, 1451260a87bSMichael Baum rxq_ctrl->socket); 1461260a87bSMichael Baum if (!rxq_ctrl->obj) { 1471260a87bSMichael Baum DRV_LOG(ERR, 1481260a87bSMichael Baum "Port %u Rx queue %u can't allocate resources.", 1491260a87bSMichael Baum dev->data->port_id, (*priv->rxqs)[i]->idx); 1501260a87bSMichael Baum rte_errno = ENOMEM; 151a1366b1aSNélio Laranjeiro goto error; 152a1366b1aSNélio Laranjeiro } 153*5eaf882eSMichael Baum ret = priv->obj_ops.rxq_obj_new(dev, i); 1541260a87bSMichael Baum if (ret) { 1551260a87bSMichael Baum mlx5_free(rxq_ctrl->obj); 1561260a87bSMichael Baum goto error; 1571260a87bSMichael Baum } 1581260a87bSMichael Baum DRV_LOG(DEBUG, "Port %u rxq %u updated with %p.", 1591260a87bSMichael Baum dev->data->port_id, i, (void *)&rxq_ctrl->obj); 1601260a87bSMichael Baum LIST_INSERT_HEAD(&priv->rxqsobj, rxq_ctrl->obj, next); 1611260a87bSMichael Baum } 162a6d83b6aSNélio Laranjeiro return 0; 163a1366b1aSNélio Laranjeiro error: 164a6d83b6aSNélio Laranjeiro ret = rte_errno; /* Save rte_errno before cleanup. */ 16524f653a7SYongseok Koh do { 16624f653a7SYongseok Koh mlx5_rxq_release(dev, i); 16724f653a7SYongseok Koh } while (i-- != 0); 168a6d83b6aSNélio Laranjeiro rte_errno = ret; /* Restore rte_errno. */ 169a6d83b6aSNélio Laranjeiro return -rte_errno; 170a1366b1aSNélio Laranjeiro } 171a1366b1aSNélio Laranjeiro 172e60fbd5bSAdrien Mazarguil /** 1736a338ad4SOri Kam * Binds Tx queues to Rx queues for hairpin. 1746a338ad4SOri Kam * 1756a338ad4SOri Kam * Binds Tx queues to the target Rx queues. 1766a338ad4SOri Kam * 1776a338ad4SOri Kam * @param dev 1786a338ad4SOri Kam * Pointer to Ethernet device structure. 1796a338ad4SOri Kam * 1806a338ad4SOri Kam * @return 1816a338ad4SOri Kam * 0 on success, a negative errno value otherwise and rte_errno is set. 1826a338ad4SOri Kam */ 1836a338ad4SOri Kam static int 1846a338ad4SOri Kam mlx5_hairpin_bind(struct rte_eth_dev *dev) 1856a338ad4SOri Kam { 1866a338ad4SOri Kam struct mlx5_priv *priv = dev->data->dev_private; 1876a338ad4SOri Kam struct mlx5_devx_modify_sq_attr sq_attr = { 0 }; 1886a338ad4SOri Kam struct mlx5_devx_modify_rq_attr rq_attr = { 0 }; 1896a338ad4SOri Kam struct mlx5_txq_ctrl *txq_ctrl; 1906a338ad4SOri Kam struct mlx5_rxq_ctrl *rxq_ctrl; 1916a338ad4SOri Kam struct mlx5_devx_obj *sq; 1926a338ad4SOri Kam struct mlx5_devx_obj *rq; 1936a338ad4SOri Kam unsigned int i; 1946a338ad4SOri Kam int ret = 0; 1956a338ad4SOri Kam 1966a338ad4SOri Kam for (i = 0; i != priv->txqs_n; ++i) { 1976a338ad4SOri Kam txq_ctrl = mlx5_txq_get(dev, i); 1986a338ad4SOri Kam if (!txq_ctrl) 1996a338ad4SOri Kam continue; 2006a338ad4SOri Kam if (txq_ctrl->type != MLX5_TXQ_TYPE_HAIRPIN) { 2016a338ad4SOri Kam mlx5_txq_release(dev, i); 2026a338ad4SOri Kam continue; 2036a338ad4SOri Kam } 2046a338ad4SOri Kam if (!txq_ctrl->obj) { 2056a338ad4SOri Kam rte_errno = ENOMEM; 2066a338ad4SOri Kam DRV_LOG(ERR, "port %u no txq object found: %d", 2076a338ad4SOri Kam dev->data->port_id, i); 2086a338ad4SOri Kam mlx5_txq_release(dev, i); 2096a338ad4SOri Kam return -rte_errno; 2106a338ad4SOri Kam } 2116a338ad4SOri Kam sq = txq_ctrl->obj->sq; 2126a338ad4SOri Kam rxq_ctrl = mlx5_rxq_get(dev, 2136a338ad4SOri Kam txq_ctrl->hairpin_conf.peers[0].queue); 2146a338ad4SOri Kam if (!rxq_ctrl) { 2156a338ad4SOri Kam mlx5_txq_release(dev, i); 2166a338ad4SOri Kam rte_errno = EINVAL; 2176a338ad4SOri Kam DRV_LOG(ERR, "port %u no rxq object found: %d", 2186a338ad4SOri Kam dev->data->port_id, 2196a338ad4SOri Kam txq_ctrl->hairpin_conf.peers[0].queue); 2206a338ad4SOri Kam return -rte_errno; 2216a338ad4SOri Kam } 2226a338ad4SOri Kam if (rxq_ctrl->type != MLX5_RXQ_TYPE_HAIRPIN || 2236a338ad4SOri Kam rxq_ctrl->hairpin_conf.peers[0].queue != i) { 2246a338ad4SOri Kam rte_errno = ENOMEM; 2256a338ad4SOri Kam DRV_LOG(ERR, "port %u Tx queue %d can't be binded to " 2266a338ad4SOri Kam "Rx queue %d", dev->data->port_id, 2276a338ad4SOri Kam i, txq_ctrl->hairpin_conf.peers[0].queue); 2286a338ad4SOri Kam goto error; 2296a338ad4SOri Kam } 2306a338ad4SOri Kam rq = rxq_ctrl->obj->rq; 2316a338ad4SOri Kam if (!rq) { 2326a338ad4SOri Kam rte_errno = ENOMEM; 2336a338ad4SOri Kam DRV_LOG(ERR, "port %u hairpin no matching rxq: %d", 2346a338ad4SOri Kam dev->data->port_id, 2356a338ad4SOri Kam txq_ctrl->hairpin_conf.peers[0].queue); 2366a338ad4SOri Kam goto error; 2376a338ad4SOri Kam } 2386a338ad4SOri Kam sq_attr.state = MLX5_SQC_STATE_RDY; 2396a338ad4SOri Kam sq_attr.sq_state = MLX5_SQC_STATE_RST; 2406a338ad4SOri Kam sq_attr.hairpin_peer_rq = rq->id; 2416a338ad4SOri Kam sq_attr.hairpin_peer_vhca = priv->config.hca_attr.vhca_id; 2426a338ad4SOri Kam ret = mlx5_devx_cmd_modify_sq(sq, &sq_attr); 2436a338ad4SOri Kam if (ret) 2446a338ad4SOri Kam goto error; 2456a338ad4SOri Kam rq_attr.state = MLX5_SQC_STATE_RDY; 2466a338ad4SOri Kam rq_attr.rq_state = MLX5_SQC_STATE_RST; 2476a338ad4SOri Kam rq_attr.hairpin_peer_sq = sq->id; 2486a338ad4SOri Kam rq_attr.hairpin_peer_vhca = priv->config.hca_attr.vhca_id; 2496a338ad4SOri Kam ret = mlx5_devx_cmd_modify_rq(rq, &rq_attr); 2506a338ad4SOri Kam if (ret) 2516a338ad4SOri Kam goto error; 2526a338ad4SOri Kam mlx5_txq_release(dev, i); 2536a338ad4SOri Kam mlx5_rxq_release(dev, txq_ctrl->hairpin_conf.peers[0].queue); 2546a338ad4SOri Kam } 2556a338ad4SOri Kam return 0; 2566a338ad4SOri Kam error: 2576a338ad4SOri Kam mlx5_txq_release(dev, i); 2586a338ad4SOri Kam mlx5_rxq_release(dev, txq_ctrl->hairpin_conf.peers[0].queue); 2596a338ad4SOri Kam return -rte_errno; 2606a338ad4SOri Kam } 2616a338ad4SOri Kam 2626a338ad4SOri Kam /** 263e60fbd5bSAdrien Mazarguil * DPDK callback to start the device. 264e60fbd5bSAdrien Mazarguil * 265e60fbd5bSAdrien Mazarguil * Simulate device start by attaching all configured flows. 266e60fbd5bSAdrien Mazarguil * 267e60fbd5bSAdrien Mazarguil * @param dev 268e60fbd5bSAdrien Mazarguil * Pointer to Ethernet device structure. 269e60fbd5bSAdrien Mazarguil * 270e60fbd5bSAdrien Mazarguil * @return 271a6d83b6aSNélio Laranjeiro * 0 on success, a negative errno value otherwise and rte_errno is set. 272e60fbd5bSAdrien Mazarguil */ 273e60fbd5bSAdrien Mazarguil int 274e60fbd5bSAdrien Mazarguil mlx5_dev_start(struct rte_eth_dev *dev) 275e60fbd5bSAdrien Mazarguil { 27633860cfaSSuanming Mou struct mlx5_priv *priv = dev->data->dev_private; 277a6d83b6aSNélio Laranjeiro int ret; 278efa79e68SOri Kam int fine_inline; 279e60fbd5bSAdrien Mazarguil 28024f653a7SYongseok Koh DRV_LOG(DEBUG, "port %u starting device", dev->data->port_id); 281efa79e68SOri Kam fine_inline = rte_mbuf_dynflag_lookup 282efa79e68SOri Kam (RTE_PMD_MLX5_FINE_GRANULARITY_INLINE, NULL); 283efa79e68SOri Kam if (fine_inline > 0) 284efa79e68SOri Kam rte_net_mlx5_dynf_inline_mask = 1UL << fine_inline; 285efa79e68SOri Kam else 286efa79e68SOri Kam rte_net_mlx5_dynf_inline_mask = 0; 287606d6905SShiri Kuzin if (dev->data->nb_rx_queues > 0) { 28863bd1629SOri Kam ret = mlx5_dev_configure_rss_reta(dev); 28963bd1629SOri Kam if (ret) { 29063bd1629SOri Kam DRV_LOG(ERR, "port %u reta config failed: %s", 29163bd1629SOri Kam dev->data->port_id, strerror(rte_errno)); 29263bd1629SOri Kam return -rte_errno; 29363bd1629SOri Kam } 294606d6905SShiri Kuzin } 295d133f4cdSViacheslav Ovsiienko ret = mlx5_txpp_start(dev); 296d133f4cdSViacheslav Ovsiienko if (ret) { 297d133f4cdSViacheslav Ovsiienko DRV_LOG(ERR, "port %u Tx packet pacing init failed: %s", 298d133f4cdSViacheslav Ovsiienko dev->data->port_id, strerror(rte_errno)); 299d133f4cdSViacheslav Ovsiienko goto error; 300d133f4cdSViacheslav Ovsiienko } 301a6d83b6aSNélio Laranjeiro ret = mlx5_txq_start(dev); 302a6d83b6aSNélio Laranjeiro if (ret) { 303a170a30dSNélio Laranjeiro DRV_LOG(ERR, "port %u Tx queue allocation failed: %s", 3040f99970bSNélio Laranjeiro dev->data->port_id, strerror(rte_errno)); 305d133f4cdSViacheslav Ovsiienko goto error; 3066e78005aSNélio Laranjeiro } 307a6d83b6aSNélio Laranjeiro ret = mlx5_rxq_start(dev); 308a6d83b6aSNélio Laranjeiro if (ret) { 309a170a30dSNélio Laranjeiro DRV_LOG(ERR, "port %u Rx queue allocation failed: %s", 3100f99970bSNélio Laranjeiro dev->data->port_id, strerror(rte_errno)); 311d133f4cdSViacheslav Ovsiienko goto error; 312a1366b1aSNélio Laranjeiro } 3136a338ad4SOri Kam ret = mlx5_hairpin_bind(dev); 3146a338ad4SOri Kam if (ret) { 3156a338ad4SOri Kam DRV_LOG(ERR, "port %u hairpin binding failed: %s", 3166a338ad4SOri Kam dev->data->port_id, strerror(rte_errno)); 317d133f4cdSViacheslav Ovsiienko goto error; 3186a338ad4SOri Kam } 319e7bfa359SBing Zhao /* Set started flag here for the following steps like control flow. */ 32024f653a7SYongseok Koh dev->data->dev_started = 1; 321a6d83b6aSNélio Laranjeiro ret = mlx5_rx_intr_vec_enable(dev); 322a6d83b6aSNélio Laranjeiro if (ret) { 323a170a30dSNélio Laranjeiro DRV_LOG(ERR, "port %u Rx interrupt vector creation failed", 3240f99970bSNélio Laranjeiro dev->data->port_id); 325e1016cb7SAdrien Mazarguil goto error; 3263c7d44afSShahaf Shuler } 32773bf9235SOphir Munk mlx5_os_stats_init(dev); 3287ba5320bSNélio Laranjeiro ret = mlx5_traffic_enable(dev); 329a6d83b6aSNélio Laranjeiro if (ret) { 3308db7e3b6SBing Zhao DRV_LOG(ERR, "port %u failed to set defaults flows", 331e313ef4cSShahaf Shuler dev->data->port_id); 332e313ef4cSShahaf Shuler goto error; 333e313ef4cSShahaf Shuler } 334a2854c4dSViacheslav Ovsiienko /* Set a mask and offset of dynamic metadata flows into Rx queues. */ 3356c55b622SAlexander Kozyrev mlx5_flow_rxq_dynf_metadata_set(dev); 336a2854c4dSViacheslav Ovsiienko /* Set flags and context to convert Rx timestamps. */ 337a2854c4dSViacheslav Ovsiienko mlx5_rxq_timestamp_set(dev); 338a2854c4dSViacheslav Ovsiienko /* Set a mask and offset of scheduling on timestamp into Tx queues. */ 3393172c471SViacheslav Ovsiienko mlx5_txq_dynf_timestamp_set(dev); 3408db7e3b6SBing Zhao /* 3418db7e3b6SBing Zhao * In non-cached mode, it only needs to start the default mreg copy 3428db7e3b6SBing Zhao * action and no flow created by application exists anymore. 3438db7e3b6SBing Zhao * But it is worth wrapping the interface for further usage. 3448db7e3b6SBing Zhao */ 3458db7e3b6SBing Zhao ret = mlx5_flow_start_default(dev); 3467ba5320bSNélio Laranjeiro if (ret) { 3478db7e3b6SBing Zhao DRV_LOG(DEBUG, "port %u failed to start default actions: %s", 3488db7e3b6SBing Zhao dev->data->port_id, strerror(rte_errno)); 3497ba5320bSNélio Laranjeiro goto error; 3507ba5320bSNélio Laranjeiro } 3512aac5b5dSYongseok Koh rte_wmb(); 3527ba5320bSNélio Laranjeiro dev->tx_pkt_burst = mlx5_select_tx_function(dev); 3537ba5320bSNélio Laranjeiro dev->rx_pkt_burst = mlx5_select_rx_function(dev); 3542aac5b5dSYongseok Koh /* Enable datapath on secondary process. */ 3552e86c4e5SOphir Munk mlx5_mp_os_req_start_rxtx(dev); 35633860cfaSSuanming Mou if (priv->sh->intr_handle.fd >= 0) { 35791389890SOphir Munk priv->sh->port[priv->dev_port - 1].ih_port_id = 35833860cfaSSuanming Mou (uint32_t)dev->data->port_id; 35933860cfaSSuanming Mou } else { 36033860cfaSSuanming Mou DRV_LOG(INFO, "port %u starts without LSC and RMV interrupts.", 36133860cfaSSuanming Mou dev->data->port_id); 36233860cfaSSuanming Mou dev->data->dev_conf.intr_conf.lsc = 0; 36333860cfaSSuanming Mou dev->data->dev_conf.intr_conf.rmv = 0; 36433860cfaSSuanming Mou } 36533860cfaSSuanming Mou if (priv->sh->intr_handle_devx.fd >= 0) 36691389890SOphir Munk priv->sh->port[priv->dev_port - 1].devx_ih_port_id = 36733860cfaSSuanming Mou (uint32_t)dev->data->port_id; 368c8d4ee50SNélio Laranjeiro return 0; 369c8d4ee50SNélio Laranjeiro error: 370a6d83b6aSNélio Laranjeiro ret = rte_errno; /* Save rte_errno before cleanup. */ 371e60fbd5bSAdrien Mazarguil /* Rollback. */ 372272733b5SNélio Laranjeiro dev->data->dev_started = 0; 3738db7e3b6SBing Zhao mlx5_flow_stop_default(dev); 374af4f09f2SNélio Laranjeiro mlx5_traffic_disable(dev); 375af4f09f2SNélio Laranjeiro mlx5_txq_stop(dev); 376af4f09f2SNélio Laranjeiro mlx5_rxq_stop(dev); 377d133f4cdSViacheslav Ovsiienko mlx5_txpp_stop(dev); /* Stop last. */ 378a6d83b6aSNélio Laranjeiro rte_errno = ret; /* Restore rte_errno. */ 379a6d83b6aSNélio Laranjeiro return -rte_errno; 380e60fbd5bSAdrien Mazarguil } 381e60fbd5bSAdrien Mazarguil 382e60fbd5bSAdrien Mazarguil /** 383e60fbd5bSAdrien Mazarguil * DPDK callback to stop the device. 384e60fbd5bSAdrien Mazarguil * 385e60fbd5bSAdrien Mazarguil * Simulate device stop by detaching all configured flows. 386e60fbd5bSAdrien Mazarguil * 387e60fbd5bSAdrien Mazarguil * @param dev 388e60fbd5bSAdrien Mazarguil * Pointer to Ethernet device structure. 389e60fbd5bSAdrien Mazarguil */ 390e60fbd5bSAdrien Mazarguil void 391e60fbd5bSAdrien Mazarguil mlx5_dev_stop(struct rte_eth_dev *dev) 392e60fbd5bSAdrien Mazarguil { 393dbeba4cfSThomas Monjalon struct mlx5_priv *priv = dev->data->dev_private; 394e60fbd5bSAdrien Mazarguil 3953f2fe392SNélio Laranjeiro dev->data->dev_started = 0; 3963f2fe392SNélio Laranjeiro /* Prevent crashes when queues are still in use. */ 3973f2fe392SNélio Laranjeiro dev->rx_pkt_burst = removed_rx_burst; 3983f2fe392SNélio Laranjeiro dev->tx_pkt_burst = removed_tx_burst; 3993f2fe392SNélio Laranjeiro rte_wmb(); 4002aac5b5dSYongseok Koh /* Disable datapath on secondary process. */ 4012e86c4e5SOphir Munk mlx5_mp_os_req_stop_rxtx(dev); 4023f2fe392SNélio Laranjeiro usleep(1000 * priv->rxqs_n); 40324f653a7SYongseok Koh DRV_LOG(DEBUG, "port %u stopping device", dev->data->port_id); 4048db7e3b6SBing Zhao mlx5_flow_stop_default(dev); 4058db7e3b6SBing Zhao /* Control flows for default traffic can be removed firstly. */ 406af4f09f2SNélio Laranjeiro mlx5_traffic_disable(dev); 4078db7e3b6SBing Zhao /* All RX queue flags will be cleared in the flush interface. */ 4088db7e3b6SBing Zhao mlx5_flow_list_flush(dev, &priv->flows, true); 409af4f09f2SNélio Laranjeiro mlx5_rx_intr_vec_disable(dev); 41091389890SOphir Munk priv->sh->port[priv->dev_port - 1].ih_port_id = RTE_MAX_ETHPORTS; 41191389890SOphir Munk priv->sh->port[priv->dev_port - 1].devx_ih_port_id = RTE_MAX_ETHPORTS; 412af4f09f2SNélio Laranjeiro mlx5_txq_stop(dev); 413af4f09f2SNélio Laranjeiro mlx5_rxq_stop(dev); 414d133f4cdSViacheslav Ovsiienko mlx5_txpp_stop(dev); 415e60fbd5bSAdrien Mazarguil } 416272733b5SNélio Laranjeiro 417272733b5SNélio Laranjeiro /** 418272733b5SNélio Laranjeiro * Enable traffic flows configured by control plane 419272733b5SNélio Laranjeiro * 420af4f09f2SNélio Laranjeiro * @param dev 421272733b5SNélio Laranjeiro * Pointer to Ethernet device private data. 422272733b5SNélio Laranjeiro * @param dev 423272733b5SNélio Laranjeiro * Pointer to Ethernet device structure. 424272733b5SNélio Laranjeiro * 425272733b5SNélio Laranjeiro * @return 426a6d83b6aSNélio Laranjeiro * 0 on success, a negative errno value otherwise and rte_errno is set. 427272733b5SNélio Laranjeiro */ 428272733b5SNélio Laranjeiro int 429af4f09f2SNélio Laranjeiro mlx5_traffic_enable(struct rte_eth_dev *dev) 430272733b5SNélio Laranjeiro { 431dbeba4cfSThomas Monjalon struct mlx5_priv *priv = dev->data->dev_private; 432272733b5SNélio Laranjeiro struct rte_flow_item_eth bcast = { 433272733b5SNélio Laranjeiro .dst.addr_bytes = "\xff\xff\xff\xff\xff\xff", 434272733b5SNélio Laranjeiro }; 435272733b5SNélio Laranjeiro struct rte_flow_item_eth ipv6_multi_spec = { 436272733b5SNélio Laranjeiro .dst.addr_bytes = "\x33\x33\x00\x00\x00\x00", 437272733b5SNélio Laranjeiro }; 438272733b5SNélio Laranjeiro struct rte_flow_item_eth ipv6_multi_mask = { 439272733b5SNélio Laranjeiro .dst.addr_bytes = "\xff\xff\x00\x00\x00\x00", 440272733b5SNélio Laranjeiro }; 441272733b5SNélio Laranjeiro struct rte_flow_item_eth unicast = { 442272733b5SNélio Laranjeiro .src.addr_bytes = "\x00\x00\x00\x00\x00\x00", 443272733b5SNélio Laranjeiro }; 444272733b5SNélio Laranjeiro struct rte_flow_item_eth unicast_mask = { 445272733b5SNélio Laranjeiro .dst.addr_bytes = "\xff\xff\xff\xff\xff\xff", 446272733b5SNélio Laranjeiro }; 447272733b5SNélio Laranjeiro const unsigned int vlan_filter_n = priv->vlan_filter_n; 4486d13ea8eSOlivier Matz const struct rte_ether_addr cmp = { 449272733b5SNélio Laranjeiro .addr_bytes = "\x00\x00\x00\x00\x00\x00", 450272733b5SNélio Laranjeiro }; 451272733b5SNélio Laranjeiro unsigned int i; 452272733b5SNélio Laranjeiro unsigned int j; 453272733b5SNélio Laranjeiro int ret; 454272733b5SNélio Laranjeiro 4553c84f34eSOri Kam /* 4563c84f34eSOri Kam * Hairpin txq default flow should be created no matter if it is 4573c84f34eSOri Kam * isolation mode. Or else all the packets to be sent will be sent 4583c84f34eSOri Kam * out directly without the TX flow actions, e.g. encapsulation. 4593c84f34eSOri Kam */ 4603c84f34eSOri Kam for (i = 0; i != priv->txqs_n; ++i) { 4613c84f34eSOri Kam struct mlx5_txq_ctrl *txq_ctrl = mlx5_txq_get(dev, i); 4623c84f34eSOri Kam if (!txq_ctrl) 4633c84f34eSOri Kam continue; 4643c84f34eSOri Kam if (txq_ctrl->type == MLX5_TXQ_TYPE_HAIRPIN) { 4653c84f34eSOri Kam ret = mlx5_ctrl_flow_source_queue(dev, i); 4663c84f34eSOri Kam if (ret) { 4673c84f34eSOri Kam mlx5_txq_release(dev, i); 4683c84f34eSOri Kam goto error; 4693c84f34eSOri Kam } 4703c84f34eSOri Kam } 4713c84f34eSOri Kam mlx5_txq_release(dev, i); 4723c84f34eSOri Kam } 473fbde4331SMatan Azrad if (priv->config.dv_esw_en && !priv->config.vf) { 474fbde4331SMatan Azrad if (mlx5_flow_create_esw_table_zero_flow(dev)) 475fbde4331SMatan Azrad priv->fdb_def_rule = 1; 476fbde4331SMatan Azrad else 477fbde4331SMatan Azrad DRV_LOG(INFO, "port %u FDB default rule cannot be" 478fbde4331SMatan Azrad " configured - only Eswitch group 0 flows are" 479fbde4331SMatan Azrad " supported.", dev->data->port_id); 480fbde4331SMatan Azrad } 4810f0ae73aSShiri Kuzin if (!priv->config.lacp_by_user && priv->pf_bond >= 0) { 4820f0ae73aSShiri Kuzin ret = mlx5_flow_lacp_miss(dev); 4830f0ae73aSShiri Kuzin if (ret) 4840f0ae73aSShiri Kuzin DRV_LOG(INFO, "port %u LACP rule cannot be created - " 4850f0ae73aSShiri Kuzin "forward LACP to kernel.", dev->data->port_id); 4860f0ae73aSShiri Kuzin else 4870f0ae73aSShiri Kuzin DRV_LOG(INFO, "LACP traffic will be missed in port %u." 4880f0ae73aSShiri Kuzin , dev->data->port_id); 4890f0ae73aSShiri Kuzin } 490f8cb4b57SNélio Laranjeiro if (priv->isolated) 491f8cb4b57SNélio Laranjeiro return 0; 492f8cb4b57SNélio Laranjeiro if (dev->data->promiscuous) { 493f8cb4b57SNélio Laranjeiro struct rte_flow_item_eth promisc = { 494f8cb4b57SNélio Laranjeiro .dst.addr_bytes = "\x00\x00\x00\x00\x00\x00", 495f8cb4b57SNélio Laranjeiro .src.addr_bytes = "\x00\x00\x00\x00\x00\x00", 496f8cb4b57SNélio Laranjeiro .type = 0, 497f8cb4b57SNélio Laranjeiro }; 498f8cb4b57SNélio Laranjeiro 499a6d83b6aSNélio Laranjeiro ret = mlx5_ctrl_flow(dev, &promisc, &promisc); 500a6d83b6aSNélio Laranjeiro if (ret) 501a6d83b6aSNélio Laranjeiro goto error; 502f8cb4b57SNélio Laranjeiro } 503f8cb4b57SNélio Laranjeiro if (dev->data->all_multicast) { 504f8cb4b57SNélio Laranjeiro struct rte_flow_item_eth multicast = { 505f8cb4b57SNélio Laranjeiro .dst.addr_bytes = "\x01\x00\x00\x00\x00\x00", 506f8cb4b57SNélio Laranjeiro .src.addr_bytes = "\x00\x00\x00\x00\x00\x00", 507f8cb4b57SNélio Laranjeiro .type = 0, 508f8cb4b57SNélio Laranjeiro }; 509f8cb4b57SNélio Laranjeiro 510a6d83b6aSNélio Laranjeiro ret = mlx5_ctrl_flow(dev, &multicast, &multicast); 511a6d83b6aSNélio Laranjeiro if (ret) 512a6d83b6aSNélio Laranjeiro goto error; 513f8cb4b57SNélio Laranjeiro } else { 514f8cb4b57SNélio Laranjeiro /* Add broadcast/multicast flows. */ 515f8cb4b57SNélio Laranjeiro for (i = 0; i != vlan_filter_n; ++i) { 516f8cb4b57SNélio Laranjeiro uint16_t vlan = priv->vlan_filter[i]; 517f8cb4b57SNélio Laranjeiro 518f8cb4b57SNélio Laranjeiro struct rte_flow_item_vlan vlan_spec = { 519f8cb4b57SNélio Laranjeiro .tci = rte_cpu_to_be_16(vlan), 520f8cb4b57SNélio Laranjeiro }; 5212bc98393SNelio Laranjeiro struct rte_flow_item_vlan vlan_mask = 5222bc98393SNelio Laranjeiro rte_flow_item_vlan_mask; 523f8cb4b57SNélio Laranjeiro 524f8cb4b57SNélio Laranjeiro ret = mlx5_ctrl_flow_vlan(dev, &bcast, &bcast, 525f8cb4b57SNélio Laranjeiro &vlan_spec, &vlan_mask); 526f8cb4b57SNélio Laranjeiro if (ret) 527f8cb4b57SNélio Laranjeiro goto error; 528f8cb4b57SNélio Laranjeiro ret = mlx5_ctrl_flow_vlan(dev, &ipv6_multi_spec, 529f8cb4b57SNélio Laranjeiro &ipv6_multi_mask, 530f8cb4b57SNélio Laranjeiro &vlan_spec, &vlan_mask); 531f8cb4b57SNélio Laranjeiro if (ret) 532f8cb4b57SNélio Laranjeiro goto error; 533f8cb4b57SNélio Laranjeiro } 534f8cb4b57SNélio Laranjeiro if (!vlan_filter_n) { 535f8cb4b57SNélio Laranjeiro ret = mlx5_ctrl_flow(dev, &bcast, &bcast); 536f8cb4b57SNélio Laranjeiro if (ret) 537f8cb4b57SNélio Laranjeiro goto error; 538f8cb4b57SNélio Laranjeiro ret = mlx5_ctrl_flow(dev, &ipv6_multi_spec, 539f8cb4b57SNélio Laranjeiro &ipv6_multi_mask); 540f8cb4b57SNélio Laranjeiro if (ret) 541f8cb4b57SNélio Laranjeiro goto error; 542f8cb4b57SNélio Laranjeiro } 543f8cb4b57SNélio Laranjeiro } 544f8cb4b57SNélio Laranjeiro /* Add MAC address flows. */ 545272733b5SNélio Laranjeiro for (i = 0; i != MLX5_MAX_MAC_ADDRESSES; ++i) { 5466d13ea8eSOlivier Matz struct rte_ether_addr *mac = &dev->data->mac_addrs[i]; 547272733b5SNélio Laranjeiro 548272733b5SNélio Laranjeiro if (!memcmp(mac, &cmp, sizeof(*mac))) 549272733b5SNélio Laranjeiro continue; 550272733b5SNélio Laranjeiro memcpy(&unicast.dst.addr_bytes, 551272733b5SNélio Laranjeiro mac->addr_bytes, 55235b2d13fSOlivier Matz RTE_ETHER_ADDR_LEN); 553272733b5SNélio Laranjeiro for (j = 0; j != vlan_filter_n; ++j) { 554272733b5SNélio Laranjeiro uint16_t vlan = priv->vlan_filter[j]; 555272733b5SNélio Laranjeiro 556272733b5SNélio Laranjeiro struct rte_flow_item_vlan vlan_spec = { 557272733b5SNélio Laranjeiro .tci = rte_cpu_to_be_16(vlan), 558272733b5SNélio Laranjeiro }; 5592bc98393SNelio Laranjeiro struct rte_flow_item_vlan vlan_mask = 5602bc98393SNelio Laranjeiro rte_flow_item_vlan_mask; 561272733b5SNélio Laranjeiro 562272733b5SNélio Laranjeiro ret = mlx5_ctrl_flow_vlan(dev, &unicast, 563272733b5SNélio Laranjeiro &unicast_mask, 564272733b5SNélio Laranjeiro &vlan_spec, 565272733b5SNélio Laranjeiro &vlan_mask); 566272733b5SNélio Laranjeiro if (ret) 567272733b5SNélio Laranjeiro goto error; 568272733b5SNélio Laranjeiro } 569272733b5SNélio Laranjeiro if (!vlan_filter_n) { 570a6d83b6aSNélio Laranjeiro ret = mlx5_ctrl_flow(dev, &unicast, &unicast_mask); 571272733b5SNélio Laranjeiro if (ret) 572272733b5SNélio Laranjeiro goto error; 573272733b5SNélio Laranjeiro } 574272733b5SNélio Laranjeiro } 575272733b5SNélio Laranjeiro return 0; 576272733b5SNélio Laranjeiro error: 577a6d83b6aSNélio Laranjeiro ret = rte_errno; /* Save rte_errno before cleanup. */ 5788db7e3b6SBing Zhao mlx5_flow_list_flush(dev, &priv->ctrl_flows, false); 579a6d83b6aSNélio Laranjeiro rte_errno = ret; /* Restore rte_errno. */ 580a6d83b6aSNélio Laranjeiro return -rte_errno; 581272733b5SNélio Laranjeiro } 582272733b5SNélio Laranjeiro 583272733b5SNélio Laranjeiro 584272733b5SNélio Laranjeiro /** 585272733b5SNélio Laranjeiro * Disable traffic flows configured by control plane 586272733b5SNélio Laranjeiro * 587272733b5SNélio Laranjeiro * @param dev 588af4f09f2SNélio Laranjeiro * Pointer to Ethernet device private data. 589272733b5SNélio Laranjeiro */ 590925061b5SNélio Laranjeiro void 591af4f09f2SNélio Laranjeiro mlx5_traffic_disable(struct rte_eth_dev *dev) 592272733b5SNélio Laranjeiro { 593dbeba4cfSThomas Monjalon struct mlx5_priv *priv = dev->data->dev_private; 594272733b5SNélio Laranjeiro 5958db7e3b6SBing Zhao mlx5_flow_list_flush(dev, &priv->ctrl_flows, false); 596272733b5SNélio Laranjeiro } 597272733b5SNélio Laranjeiro 598272733b5SNélio Laranjeiro /** 599272733b5SNélio Laranjeiro * Restart traffic flows configured by control plane 600272733b5SNélio Laranjeiro * 601272733b5SNélio Laranjeiro * @param dev 602af4f09f2SNélio Laranjeiro * Pointer to Ethernet device private data. 603272733b5SNélio Laranjeiro * 604272733b5SNélio Laranjeiro * @return 605a6d83b6aSNélio Laranjeiro * 0 on success, a negative errno value otherwise and rte_errno is set. 606272733b5SNélio Laranjeiro */ 607272733b5SNélio Laranjeiro int 608272733b5SNélio Laranjeiro mlx5_traffic_restart(struct rte_eth_dev *dev) 609272733b5SNélio Laranjeiro { 610af4f09f2SNélio Laranjeiro if (dev->data->dev_started) { 611af4f09f2SNélio Laranjeiro mlx5_traffic_disable(dev); 612a6d83b6aSNélio Laranjeiro return mlx5_traffic_enable(dev); 613af4f09f2SNélio Laranjeiro } 614272733b5SNélio Laranjeiro return 0; 615272733b5SNélio Laranjeiro } 616