18fd92a66SOlivier Matz /* SPDX-License-Identifier: BSD-3-Clause 22e22920bSAdrien Mazarguil * Copyright 2015 6WIND S.A. 32e22920bSAdrien Mazarguil * Copyright 2015 Mellanox. 42e22920bSAdrien Mazarguil */ 52e22920bSAdrien Mazarguil 62e22920bSAdrien Mazarguil #include <stddef.h> 72e22920bSAdrien Mazarguil #include <assert.h> 82e22920bSAdrien Mazarguil #include <errno.h> 92e22920bSAdrien Mazarguil #include <string.h> 102e22920bSAdrien Mazarguil #include <stdint.h> 113c7d44afSShahaf Shuler #include <fcntl.h> 12a1366b1aSNélio Laranjeiro #include <sys/queue.h> 132e22920bSAdrien Mazarguil 142e22920bSAdrien Mazarguil /* Verbs header. */ 152e22920bSAdrien Mazarguil /* ISO C doesn't support unnamed structs/unions, disabling -pedantic. */ 162e22920bSAdrien Mazarguil #ifdef PEDANTIC 17fc5b160fSBruce Richardson #pragma GCC diagnostic ignored "-Wpedantic" 182e22920bSAdrien Mazarguil #endif 192e22920bSAdrien Mazarguil #include <infiniband/verbs.h> 2043e9d979SShachar Beiser #include <infiniband/mlx5dv.h> 212e22920bSAdrien Mazarguil #ifdef PEDANTIC 22fc5b160fSBruce Richardson #pragma GCC diagnostic error "-Wpedantic" 232e22920bSAdrien Mazarguil #endif 242e22920bSAdrien Mazarguil 252e22920bSAdrien Mazarguil #include <rte_mbuf.h> 262e22920bSAdrien Mazarguil #include <rte_malloc.h> 27ffc905f3SFerruh Yigit #include <rte_ethdev_driver.h> 282e22920bSAdrien Mazarguil #include <rte_common.h> 293c7d44afSShahaf Shuler #include <rte_interrupts.h> 303b57c21eSNélio Laranjeiro #include <rte_debug.h> 3143e9d979SShachar Beiser #include <rte_io.h> 322e22920bSAdrien Mazarguil 332e22920bSAdrien Mazarguil #include "mlx5.h" 342e22920bSAdrien Mazarguil #include "mlx5_rxtx.h" 352e22920bSAdrien Mazarguil #include "mlx5_utils.h" 3676f5c99eSYaacov Hazan #include "mlx5_autoconf.h" 372e22920bSAdrien Mazarguil #include "mlx5_defs.h" 380e83b8e5SNelio Laranjeiro #include "mlx5_glue.h" 392e22920bSAdrien Mazarguil 40ecc1c29dSAdrien Mazarguil /* Default RSS hash key also used for ConnectX-3. */ 412f97422eSNelio Laranjeiro uint8_t rss_hash_default_key[] = { 42ecc1c29dSAdrien Mazarguil 0x2c, 0xc6, 0x81, 0xd1, 43ecc1c29dSAdrien Mazarguil 0x5b, 0xdb, 0xf4, 0xf7, 44ecc1c29dSAdrien Mazarguil 0xfc, 0xa2, 0x83, 0x19, 45ecc1c29dSAdrien Mazarguil 0xdb, 0x1a, 0x3e, 0x94, 46ecc1c29dSAdrien Mazarguil 0x6b, 0x9e, 0x38, 0xd9, 47ecc1c29dSAdrien Mazarguil 0x2c, 0x9c, 0x03, 0xd1, 48ecc1c29dSAdrien Mazarguil 0xad, 0x99, 0x44, 0xa7, 49ecc1c29dSAdrien Mazarguil 0xd9, 0x56, 0x3d, 0x59, 50ecc1c29dSAdrien Mazarguil 0x06, 0x3c, 0x25, 0xf3, 51ecc1c29dSAdrien Mazarguil 0xfc, 0x1f, 0xdc, 0x2a, 52ecc1c29dSAdrien Mazarguil }; 53ecc1c29dSAdrien Mazarguil 542f97422eSNelio Laranjeiro /* Length of the default RSS hash key. */ 552f97422eSNelio Laranjeiro const size_t rss_hash_default_key_len = sizeof(rss_hash_default_key); 562f97422eSNelio Laranjeiro 57ecc1c29dSAdrien Mazarguil /** 582e22920bSAdrien Mazarguil * Allocate RX queue elements. 592e22920bSAdrien Mazarguil * 600cdddf4dSNélio Laranjeiro * @param rxq_ctrl 612e22920bSAdrien Mazarguil * Pointer to RX queue structure. 622e22920bSAdrien Mazarguil * 632e22920bSAdrien Mazarguil * @return 642e22920bSAdrien Mazarguil * 0 on success, errno value on failure. 652e22920bSAdrien Mazarguil */ 66a1366b1aSNélio Laranjeiro int 67a1366b1aSNélio Laranjeiro rxq_alloc_elts(struct mlx5_rxq_ctrl *rxq_ctrl) 682e22920bSAdrien Mazarguil { 699964b965SNélio Laranjeiro const unsigned int sges_n = 1 << rxq_ctrl->rxq.sges_n; 70a1366b1aSNélio Laranjeiro unsigned int elts_n = 1 << rxq_ctrl->rxq.elts_n; 712e22920bSAdrien Mazarguil unsigned int i; 722e22920bSAdrien Mazarguil int ret = 0; 732e22920bSAdrien Mazarguil 749964b965SNélio Laranjeiro /* Iterate on segments. */ 752e22920bSAdrien Mazarguil for (i = 0; (i != elts_n); ++i) { 762e22920bSAdrien Mazarguil struct rte_mbuf *buf; 772e22920bSAdrien Mazarguil 780cdddf4dSNélio Laranjeiro buf = rte_pktmbuf_alloc(rxq_ctrl->rxq.mp); 792e22920bSAdrien Mazarguil if (buf == NULL) { 800cdddf4dSNélio Laranjeiro ERROR("%p: empty mbuf pool", (void *)rxq_ctrl); 812e22920bSAdrien Mazarguil ret = ENOMEM; 822e22920bSAdrien Mazarguil goto error; 832e22920bSAdrien Mazarguil } 842e22920bSAdrien Mazarguil /* Headroom is reserved by rte_pktmbuf_alloc(). */ 852e22920bSAdrien Mazarguil assert(DATA_OFF(buf) == RTE_PKTMBUF_HEADROOM); 862e22920bSAdrien Mazarguil /* Buffer is supposed to be empty. */ 872e22920bSAdrien Mazarguil assert(rte_pktmbuf_data_len(buf) == 0); 882e22920bSAdrien Mazarguil assert(rte_pktmbuf_pkt_len(buf) == 0); 896218063bSNélio Laranjeiro assert(!buf->next); 909964b965SNélio Laranjeiro /* Only the first segment keeps headroom. */ 919964b965SNélio Laranjeiro if (i % sges_n) 929964b965SNélio Laranjeiro SET_DATA_OFF(buf, 0); 936218063bSNélio Laranjeiro PORT(buf) = rxq_ctrl->rxq.port_id; 946218063bSNélio Laranjeiro DATA_LEN(buf) = rte_pktmbuf_tailroom(buf); 956218063bSNélio Laranjeiro PKT_LEN(buf) = DATA_LEN(buf); 966218063bSNélio Laranjeiro NB_SEGS(buf) = 1; 976218063bSNélio Laranjeiro (*rxq_ctrl->rxq.elts)[i] = buf; 982e22920bSAdrien Mazarguil } 99a1366b1aSNélio Laranjeiro /* If Rx vector is activated. */ 100*af4f09f2SNélio Laranjeiro if (mlx5_rxq_check_vec_support(&rxq_ctrl->rxq) > 0) { 10178142aacSNélio Laranjeiro struct mlx5_rxq_data *rxq = &rxq_ctrl->rxq; 102301271bcSNélio Laranjeiro struct rte_mbuf *mbuf_init = &rxq->fake_mbuf; 103a1366b1aSNélio Laranjeiro int j; 104301271bcSNélio Laranjeiro 105301271bcSNélio Laranjeiro /* Initialize default rearm_data for vPMD. */ 106301271bcSNélio Laranjeiro mbuf_init->data_off = RTE_PKTMBUF_HEADROOM; 107301271bcSNélio Laranjeiro rte_mbuf_refcnt_set(mbuf_init, 1); 108301271bcSNélio Laranjeiro mbuf_init->nb_segs = 1; 109301271bcSNélio Laranjeiro mbuf_init->port = rxq->port_id; 110301271bcSNélio Laranjeiro /* 111301271bcSNélio Laranjeiro * prevent compiler reordering: 112301271bcSNélio Laranjeiro * rearm_data covers previous fields. 113301271bcSNélio Laranjeiro */ 114301271bcSNélio Laranjeiro rte_compiler_barrier(); 115a1366b1aSNélio Laranjeiro rxq->mbuf_initializer = 116a1366b1aSNélio Laranjeiro *(uint64_t *)&mbuf_init->rearm_data; 117301271bcSNélio Laranjeiro /* Padding with a fake mbuf for vectorized Rx. */ 118a1366b1aSNélio Laranjeiro for (j = 0; j < MLX5_VPMD_DESCS_PER_LOOP; ++j) 119a1366b1aSNélio Laranjeiro (*rxq->elts)[elts_n + j] = &rxq->fake_mbuf; 120301271bcSNélio Laranjeiro } 1219964b965SNélio Laranjeiro DEBUG("%p: allocated and configured %u segments (max %u packets)", 1229964b965SNélio Laranjeiro (void *)rxq_ctrl, elts_n, elts_n / (1 << rxq_ctrl->rxq.sges_n)); 1232e22920bSAdrien Mazarguil assert(ret == 0); 1242e22920bSAdrien Mazarguil return 0; 1252e22920bSAdrien Mazarguil error: 1266218063bSNélio Laranjeiro elts_n = i; 1276218063bSNélio Laranjeiro for (i = 0; (i != elts_n); ++i) { 1286218063bSNélio Laranjeiro if ((*rxq_ctrl->rxq.elts)[i] != NULL) 1296218063bSNélio Laranjeiro rte_pktmbuf_free_seg((*rxq_ctrl->rxq.elts)[i]); 1306218063bSNélio Laranjeiro (*rxq_ctrl->rxq.elts)[i] = NULL; 1312e22920bSAdrien Mazarguil } 1320cdddf4dSNélio Laranjeiro DEBUG("%p: failed, freed everything", (void *)rxq_ctrl); 1332e22920bSAdrien Mazarguil assert(ret > 0); 1342e22920bSAdrien Mazarguil return ret; 1352e22920bSAdrien Mazarguil } 1362e22920bSAdrien Mazarguil 1372e22920bSAdrien Mazarguil /** 1382e22920bSAdrien Mazarguil * Free RX queue elements. 1392e22920bSAdrien Mazarguil * 1400cdddf4dSNélio Laranjeiro * @param rxq_ctrl 1412e22920bSAdrien Mazarguil * Pointer to RX queue structure. 1422e22920bSAdrien Mazarguil */ 1432e22920bSAdrien Mazarguil static void 14478142aacSNélio Laranjeiro rxq_free_elts(struct mlx5_rxq_ctrl *rxq_ctrl) 1452e22920bSAdrien Mazarguil { 14678142aacSNélio Laranjeiro struct mlx5_rxq_data *rxq = &rxq_ctrl->rxq; 1472dc76e40SNélio Laranjeiro const uint16_t q_n = (1 << rxq->elts_n); 1482dc76e40SNélio Laranjeiro const uint16_t q_mask = q_n - 1; 1492dc76e40SNélio Laranjeiro uint16_t used = q_n - (rxq->rq_ci - rxq->rq_pi); 1502dc76e40SNélio Laranjeiro uint16_t i; 1512e22920bSAdrien Mazarguil 1520cdddf4dSNélio Laranjeiro DEBUG("%p: freeing WRs", (void *)rxq_ctrl); 1532dc76e40SNélio Laranjeiro if (rxq->elts == NULL) 1542e22920bSAdrien Mazarguil return; 1552dc76e40SNélio Laranjeiro /** 1562dc76e40SNélio Laranjeiro * Some mbuf in the Ring belongs to the application. They cannot be 1572dc76e40SNélio Laranjeiro * freed. 1582dc76e40SNélio Laranjeiro */ 159*af4f09f2SNélio Laranjeiro if (mlx5_rxq_check_vec_support(rxq) > 0) { 1602dc76e40SNélio Laranjeiro for (i = 0; i < used; ++i) 1612dc76e40SNélio Laranjeiro (*rxq->elts)[(rxq->rq_ci + i) & q_mask] = NULL; 1622dc76e40SNélio Laranjeiro rxq->rq_pi = rxq->rq_ci; 1632dc76e40SNélio Laranjeiro } 1642dc76e40SNélio Laranjeiro for (i = 0; (i != (1u << rxq->elts_n)); ++i) { 1652dc76e40SNélio Laranjeiro if ((*rxq->elts)[i] != NULL) 1662dc76e40SNélio Laranjeiro rte_pktmbuf_free_seg((*rxq->elts)[i]); 1672dc76e40SNélio Laranjeiro (*rxq->elts)[i] = NULL; 1682e22920bSAdrien Mazarguil } 1692e22920bSAdrien Mazarguil } 1702e22920bSAdrien Mazarguil 1712e22920bSAdrien Mazarguil /** 1722e22920bSAdrien Mazarguil * Clean up a RX queue. 1732e22920bSAdrien Mazarguil * 1742e22920bSAdrien Mazarguil * Destroy objects, free allocated memory and reset the structure for reuse. 1752e22920bSAdrien Mazarguil * 1760cdddf4dSNélio Laranjeiro * @param rxq_ctrl 1772e22920bSAdrien Mazarguil * Pointer to RX queue structure. 1782e22920bSAdrien Mazarguil */ 1792e22920bSAdrien Mazarguil void 18078142aacSNélio Laranjeiro mlx5_rxq_cleanup(struct mlx5_rxq_ctrl *rxq_ctrl) 1812e22920bSAdrien Mazarguil { 1820cdddf4dSNélio Laranjeiro DEBUG("cleaning up %p", (void *)rxq_ctrl); 18309cb5b58SNélio Laranjeiro if (rxq_ctrl->ibv) 184*af4f09f2SNélio Laranjeiro mlx5_rxq_ibv_release(rxq_ctrl->ibv); 1850cdddf4dSNélio Laranjeiro memset(rxq_ctrl, 0, sizeof(*rxq_ctrl)); 1862e22920bSAdrien Mazarguil } 1872e22920bSAdrien Mazarguil 1882e22920bSAdrien Mazarguil /** 18917b843ebSShahaf Shuler * Returns the per-queue supported offloads. 19017b843ebSShahaf Shuler * 191*af4f09f2SNélio Laranjeiro * @param dev 192*af4f09f2SNélio Laranjeiro * Pointer to Ethernet device. 19317b843ebSShahaf Shuler * 19417b843ebSShahaf Shuler * @return 19517b843ebSShahaf Shuler * Supported Rx offloads. 19617b843ebSShahaf Shuler */ 19717b843ebSShahaf Shuler uint64_t 198*af4f09f2SNélio Laranjeiro mlx5_get_rx_queue_offloads(struct rte_eth_dev *dev) 19917b843ebSShahaf Shuler { 200*af4f09f2SNélio Laranjeiro struct priv *priv = dev->data->dev_private; 20117b843ebSShahaf Shuler struct mlx5_dev_config *config = &priv->config; 20217b843ebSShahaf Shuler uint64_t offloads = (DEV_RX_OFFLOAD_SCATTER | 20317b843ebSShahaf Shuler DEV_RX_OFFLOAD_TIMESTAMP | 20417b843ebSShahaf Shuler DEV_RX_OFFLOAD_JUMBO_FRAME); 20517b843ebSShahaf Shuler 20617b843ebSShahaf Shuler if (config->hw_fcs_strip) 20717b843ebSShahaf Shuler offloads |= DEV_RX_OFFLOAD_CRC_STRIP; 20817b843ebSShahaf Shuler if (config->hw_csum) 20917b843ebSShahaf Shuler offloads |= (DEV_RX_OFFLOAD_IPV4_CKSUM | 21017b843ebSShahaf Shuler DEV_RX_OFFLOAD_UDP_CKSUM | 21117b843ebSShahaf Shuler DEV_RX_OFFLOAD_TCP_CKSUM); 21217b843ebSShahaf Shuler if (config->hw_vlan_strip) 21317b843ebSShahaf Shuler offloads |= DEV_RX_OFFLOAD_VLAN_STRIP; 21417b843ebSShahaf Shuler return offloads; 21517b843ebSShahaf Shuler } 21617b843ebSShahaf Shuler 21717b843ebSShahaf Shuler 21817b843ebSShahaf Shuler /** 21917b843ebSShahaf Shuler * Returns the per-port supported offloads. 22017b843ebSShahaf Shuler * 22117b843ebSShahaf Shuler * @return 22217b843ebSShahaf Shuler * Supported Rx offloads. 22317b843ebSShahaf Shuler */ 22417b843ebSShahaf Shuler uint64_t 225*af4f09f2SNélio Laranjeiro mlx5_get_rx_port_offloads(void) 22617b843ebSShahaf Shuler { 22717b843ebSShahaf Shuler uint64_t offloads = DEV_RX_OFFLOAD_VLAN_FILTER; 22817b843ebSShahaf Shuler 22917b843ebSShahaf Shuler return offloads; 23017b843ebSShahaf Shuler } 23117b843ebSShahaf Shuler 23217b843ebSShahaf Shuler /** 23317b843ebSShahaf Shuler * Checks if the per-queue offload configuration is valid. 23417b843ebSShahaf Shuler * 235*af4f09f2SNélio Laranjeiro * @param dev 236*af4f09f2SNélio Laranjeiro * Pointer to Ethernet device. 23717b843ebSShahaf Shuler * @param offloads 23817b843ebSShahaf Shuler * Per-queue offloads configuration. 23917b843ebSShahaf Shuler * 24017b843ebSShahaf Shuler * @return 24117b843ebSShahaf Shuler * 1 if the configuration is valid, 0 otherwise. 24217b843ebSShahaf Shuler */ 24317b843ebSShahaf Shuler static int 244*af4f09f2SNélio Laranjeiro mlx5_is_rx_queue_offloads_allowed(struct rte_eth_dev *dev, uint64_t offloads) 24517b843ebSShahaf Shuler { 246*af4f09f2SNélio Laranjeiro uint64_t port_offloads = dev->data->dev_conf.rxmode.offloads; 247*af4f09f2SNélio Laranjeiro uint64_t queue_supp_offloads = mlx5_get_rx_queue_offloads(dev); 248*af4f09f2SNélio Laranjeiro uint64_t port_supp_offloads = mlx5_get_rx_port_offloads(); 24917b843ebSShahaf Shuler 25017b843ebSShahaf Shuler if ((offloads & (queue_supp_offloads | port_supp_offloads)) != 25117b843ebSShahaf Shuler offloads) 25217b843ebSShahaf Shuler return 0; 25317b843ebSShahaf Shuler if (((port_offloads ^ offloads) & port_supp_offloads)) 25417b843ebSShahaf Shuler return 0; 25517b843ebSShahaf Shuler return 1; 25617b843ebSShahaf Shuler } 25717b843ebSShahaf Shuler 25817b843ebSShahaf Shuler /** 2592e22920bSAdrien Mazarguil * 2602e22920bSAdrien Mazarguil * @param dev 2612e22920bSAdrien Mazarguil * Pointer to Ethernet device structure. 2622e22920bSAdrien Mazarguil * @param idx 2632e22920bSAdrien Mazarguil * RX queue index. 2642e22920bSAdrien Mazarguil * @param desc 2652e22920bSAdrien Mazarguil * Number of descriptors to configure in queue. 2662e22920bSAdrien Mazarguil * @param socket 2672e22920bSAdrien Mazarguil * NUMA socket on which memory must be allocated. 2682e22920bSAdrien Mazarguil * @param[in] conf 2692e22920bSAdrien Mazarguil * Thresholds parameters. 2702e22920bSAdrien Mazarguil * @param mp 2712e22920bSAdrien Mazarguil * Memory pool for buffer allocations. 2722e22920bSAdrien Mazarguil * 2732e22920bSAdrien Mazarguil * @return 2742e22920bSAdrien Mazarguil * 0 on success, negative errno value on failure. 2752e22920bSAdrien Mazarguil */ 2762e22920bSAdrien Mazarguil int 2772e22920bSAdrien Mazarguil mlx5_rx_queue_setup(struct rte_eth_dev *dev, uint16_t idx, uint16_t desc, 2782e22920bSAdrien Mazarguil unsigned int socket, const struct rte_eth_rxconf *conf, 2792e22920bSAdrien Mazarguil struct rte_mempool *mp) 2802e22920bSAdrien Mazarguil { 2812e22920bSAdrien Mazarguil struct priv *priv = dev->data->dev_private; 28278142aacSNélio Laranjeiro struct mlx5_rxq_data *rxq = (*priv->rxqs)[idx]; 28378142aacSNélio Laranjeiro struct mlx5_rxq_ctrl *rxq_ctrl = 28478142aacSNélio Laranjeiro container_of(rxq, struct mlx5_rxq_ctrl, rxq); 285a1366b1aSNélio Laranjeiro int ret = 0; 2862e22920bSAdrien Mazarguil 2876218063bSNélio Laranjeiro if (!rte_is_power_of_2(desc)) { 2886218063bSNélio Laranjeiro desc = 1 << log2above(desc); 2896218063bSNélio Laranjeiro WARN("%p: increased number of descriptors in RX queue %u" 2906218063bSNélio Laranjeiro " to the next power of two (%d)", 2916218063bSNélio Laranjeiro (void *)dev, idx, desc); 2926218063bSNélio Laranjeiro } 2932e22920bSAdrien Mazarguil DEBUG("%p: configuring queue %u for %u descriptors", 2942e22920bSAdrien Mazarguil (void *)dev, idx, desc); 2952e22920bSAdrien Mazarguil if (idx >= priv->rxqs_n) { 2962e22920bSAdrien Mazarguil ERROR("%p: queue index out of range (%u >= %u)", 2972e22920bSAdrien Mazarguil (void *)dev, idx, priv->rxqs_n); 2982e22920bSAdrien Mazarguil return -EOVERFLOW; 2992e22920bSAdrien Mazarguil } 300*af4f09f2SNélio Laranjeiro if (!mlx5_is_rx_queue_offloads_allowed(dev, conf->offloads)) { 30117b843ebSShahaf Shuler ret = ENOTSUP; 30217b843ebSShahaf Shuler ERROR("%p: Rx queue offloads 0x%" PRIx64 " don't match port " 30317b843ebSShahaf Shuler "offloads 0x%" PRIx64 " or supported offloads 0x%" PRIx64, 30417b843ebSShahaf Shuler (void *)dev, conf->offloads, 30517b843ebSShahaf Shuler dev->data->dev_conf.rxmode.offloads, 306*af4f09f2SNélio Laranjeiro (mlx5_get_rx_port_offloads() | 307*af4f09f2SNélio Laranjeiro mlx5_get_rx_queue_offloads(dev))); 30817b843ebSShahaf Shuler goto out; 30917b843ebSShahaf Shuler } 310*af4f09f2SNélio Laranjeiro if (!mlx5_rxq_releasable(dev, idx)) { 311a1366b1aSNélio Laranjeiro ret = EBUSY; 312a1366b1aSNélio Laranjeiro ERROR("%p: unable to release queue index %u", 31369a3d576SYongseok Koh (void *)dev, idx); 31409cb5b58SNélio Laranjeiro goto out; 31509cb5b58SNélio Laranjeiro } 316*af4f09f2SNélio Laranjeiro mlx5_rxq_release(dev, idx); 317*af4f09f2SNélio Laranjeiro rxq_ctrl = mlx5_rxq_new(dev, idx, desc, socket, conf, mp); 318a1366b1aSNélio Laranjeiro if (!rxq_ctrl) { 319a1366b1aSNélio Laranjeiro ERROR("%p: unable to allocate queue index %u", 320a1366b1aSNélio Laranjeiro (void *)dev, idx); 321a1366b1aSNélio Laranjeiro ret = ENOMEM; 322a1366b1aSNélio Laranjeiro goto out; 323a1366b1aSNélio Laranjeiro } 3242e22920bSAdrien Mazarguil DEBUG("%p: adding RX queue %p to list", 3250cdddf4dSNélio Laranjeiro (void *)dev, (void *)rxq_ctrl); 3260cdddf4dSNélio Laranjeiro (*priv->rxqs)[idx] = &rxq_ctrl->rxq; 32709cb5b58SNélio Laranjeiro out: 3282e22920bSAdrien Mazarguil return -ret; 3292e22920bSAdrien Mazarguil } 3302e22920bSAdrien Mazarguil 3312e22920bSAdrien Mazarguil /** 3322e22920bSAdrien Mazarguil * DPDK callback to release a RX queue. 3332e22920bSAdrien Mazarguil * 3342e22920bSAdrien Mazarguil * @param dpdk_rxq 3352e22920bSAdrien Mazarguil * Generic RX queue pointer. 3362e22920bSAdrien Mazarguil */ 3372e22920bSAdrien Mazarguil void 3382e22920bSAdrien Mazarguil mlx5_rx_queue_release(void *dpdk_rxq) 3392e22920bSAdrien Mazarguil { 34078142aacSNélio Laranjeiro struct mlx5_rxq_data *rxq = (struct mlx5_rxq_data *)dpdk_rxq; 34178142aacSNélio Laranjeiro struct mlx5_rxq_ctrl *rxq_ctrl; 3422e22920bSAdrien Mazarguil struct priv *priv; 3432e22920bSAdrien Mazarguil 3442e22920bSAdrien Mazarguil if (rxq == NULL) 3452e22920bSAdrien Mazarguil return; 34678142aacSNélio Laranjeiro rxq_ctrl = container_of(rxq, struct mlx5_rxq_ctrl, rxq); 3476218063bSNélio Laranjeiro priv = rxq_ctrl->priv; 348*af4f09f2SNélio Laranjeiro if (!mlx5_rxq_releasable(priv->dev, rxq_ctrl->rxq.stats.idx)) 3493b57c21eSNélio Laranjeiro rte_panic("Rx queue %p is still used by a flow and cannot be" 3503b57c21eSNélio Laranjeiro " removed\n", (void *)rxq_ctrl); 351*af4f09f2SNélio Laranjeiro mlx5_rxq_release(priv->dev, rxq_ctrl->rxq.stats.idx); 3522e22920bSAdrien Mazarguil } 353a48deadaSOr Ami 354a48deadaSOr Ami /** 355e1016cb7SAdrien Mazarguil * Allocate queue vector and fill epoll fd list for Rx interrupts. 3563c7d44afSShahaf Shuler * 357*af4f09f2SNélio Laranjeiro * @param dev 358*af4f09f2SNélio Laranjeiro * Pointer to Ethernet device. 3593c7d44afSShahaf Shuler * 3603c7d44afSShahaf Shuler * @return 3613c7d44afSShahaf Shuler * 0 on success, negative on failure. 3623c7d44afSShahaf Shuler */ 3633c7d44afSShahaf Shuler int 364*af4f09f2SNélio Laranjeiro mlx5_rx_intr_vec_enable(struct rte_eth_dev *dev) 3653c7d44afSShahaf Shuler { 366*af4f09f2SNélio Laranjeiro struct priv *priv = dev->data->dev_private; 3673c7d44afSShahaf Shuler unsigned int i; 3683c7d44afSShahaf Shuler unsigned int rxqs_n = priv->rxqs_n; 3693c7d44afSShahaf Shuler unsigned int n = RTE_MIN(rxqs_n, (uint32_t)RTE_MAX_RXTX_INTR_VEC_ID); 370e1016cb7SAdrien Mazarguil unsigned int count = 0; 3713c7d44afSShahaf Shuler struct rte_intr_handle *intr_handle = priv->dev->intr_handle; 3723c7d44afSShahaf Shuler 373e1016cb7SAdrien Mazarguil if (!priv->dev->data->dev_conf.intr_conf.rxq) 3743c7d44afSShahaf Shuler return 0; 375*af4f09f2SNélio Laranjeiro mlx5_rx_intr_vec_disable(dev); 376b2e0e28eSShahaf Shuler intr_handle->intr_vec = malloc(n * sizeof(intr_handle->intr_vec[0])); 377e1016cb7SAdrien Mazarguil if (intr_handle->intr_vec == NULL) { 378e1016cb7SAdrien Mazarguil ERROR("failed to allocate memory for interrupt vector," 379e1016cb7SAdrien Mazarguil " Rx interrupts will not be supported"); 380e1016cb7SAdrien Mazarguil return -ENOMEM; 3813c7d44afSShahaf Shuler } 3823c7d44afSShahaf Shuler intr_handle->type = RTE_INTR_HANDLE_EXT; 3833c7d44afSShahaf Shuler for (i = 0; i != n; ++i) { 38409cb5b58SNélio Laranjeiro /* This rxq ibv must not be released in this function. */ 385*af4f09f2SNélio Laranjeiro struct mlx5_rxq_ibv *rxq_ibv = mlx5_rxq_ibv_get(dev, i); 386e1016cb7SAdrien Mazarguil int fd; 3873c7d44afSShahaf Shuler int flags; 3883c7d44afSShahaf Shuler int rc; 3893c7d44afSShahaf Shuler 390e1016cb7SAdrien Mazarguil /* Skip queues that cannot request interrupts. */ 39109cb5b58SNélio Laranjeiro if (!rxq_ibv || !rxq_ibv->channel) { 392e1016cb7SAdrien Mazarguil /* Use invalid intr_vec[] index to disable entry. */ 393e1016cb7SAdrien Mazarguil intr_handle->intr_vec[i] = 394e1016cb7SAdrien Mazarguil RTE_INTR_VEC_RXTX_OFFSET + 395e1016cb7SAdrien Mazarguil RTE_MAX_RXTX_INTR_VEC_ID; 396e1016cb7SAdrien Mazarguil continue; 397e1016cb7SAdrien Mazarguil } 398e1016cb7SAdrien Mazarguil if (count >= RTE_MAX_RXTX_INTR_VEC_ID) { 399e1016cb7SAdrien Mazarguil ERROR("too many Rx queues for interrupt vector size" 400e1016cb7SAdrien Mazarguil " (%d), Rx interrupts cannot be enabled", 401e1016cb7SAdrien Mazarguil RTE_MAX_RXTX_INTR_VEC_ID); 402*af4f09f2SNélio Laranjeiro mlx5_rx_intr_vec_disable(dev); 403e1016cb7SAdrien Mazarguil return -1; 404e1016cb7SAdrien Mazarguil } 40509cb5b58SNélio Laranjeiro fd = rxq_ibv->channel->fd; 4063c7d44afSShahaf Shuler flags = fcntl(fd, F_GETFL); 4073c7d44afSShahaf Shuler rc = fcntl(fd, F_SETFL, flags | O_NONBLOCK); 4083c7d44afSShahaf Shuler if (rc < 0) { 409e1016cb7SAdrien Mazarguil ERROR("failed to make Rx interrupt file descriptor" 410e1016cb7SAdrien Mazarguil " %d non-blocking for queue index %d", fd, i); 411*af4f09f2SNélio Laranjeiro mlx5_rx_intr_vec_disable(dev); 4123c7d44afSShahaf Shuler return -1; 4133c7d44afSShahaf Shuler } 414e1016cb7SAdrien Mazarguil intr_handle->intr_vec[i] = RTE_INTR_VEC_RXTX_OFFSET + count; 415e1016cb7SAdrien Mazarguil intr_handle->efds[count] = fd; 416e1016cb7SAdrien Mazarguil count++; 4173c7d44afSShahaf Shuler } 418e1016cb7SAdrien Mazarguil if (!count) 419*af4f09f2SNélio Laranjeiro mlx5_rx_intr_vec_disable(dev); 420e1016cb7SAdrien Mazarguil else 421e1016cb7SAdrien Mazarguil intr_handle->nb_efd = count; 4223c7d44afSShahaf Shuler return 0; 4233c7d44afSShahaf Shuler } 4243c7d44afSShahaf Shuler 4253c7d44afSShahaf Shuler /** 426e1016cb7SAdrien Mazarguil * Clean up Rx interrupts handler. 4273c7d44afSShahaf Shuler * 428*af4f09f2SNélio Laranjeiro * @param dev 429*af4f09f2SNélio Laranjeiro * Pointer to Ethernet device. 4303c7d44afSShahaf Shuler */ 4313c7d44afSShahaf Shuler void 432*af4f09f2SNélio Laranjeiro mlx5_rx_intr_vec_disable(struct rte_eth_dev *dev) 4333c7d44afSShahaf Shuler { 434*af4f09f2SNélio Laranjeiro struct priv *priv = dev->data->dev_private; 4353c7d44afSShahaf Shuler struct rte_intr_handle *intr_handle = priv->dev->intr_handle; 43609cb5b58SNélio Laranjeiro unsigned int i; 43709cb5b58SNélio Laranjeiro unsigned int rxqs_n = priv->rxqs_n; 43809cb5b58SNélio Laranjeiro unsigned int n = RTE_MIN(rxqs_n, (uint32_t)RTE_MAX_RXTX_INTR_VEC_ID); 4393c7d44afSShahaf Shuler 44009cb5b58SNélio Laranjeiro if (!priv->dev->data->dev_conf.intr_conf.rxq) 44109cb5b58SNélio Laranjeiro return; 4428d929641SShahaf Shuler if (!intr_handle->intr_vec) 4438d929641SShahaf Shuler goto free; 44409cb5b58SNélio Laranjeiro for (i = 0; i != n; ++i) { 44509cb5b58SNélio Laranjeiro struct mlx5_rxq_ctrl *rxq_ctrl; 44609cb5b58SNélio Laranjeiro struct mlx5_rxq_data *rxq_data; 44709cb5b58SNélio Laranjeiro 44809cb5b58SNélio Laranjeiro if (intr_handle->intr_vec[i] == RTE_INTR_VEC_RXTX_OFFSET + 44909cb5b58SNélio Laranjeiro RTE_MAX_RXTX_INTR_VEC_ID) 45009cb5b58SNélio Laranjeiro continue; 45109cb5b58SNélio Laranjeiro /** 45209cb5b58SNélio Laranjeiro * Need to access directly the queue to release the reference 45309cb5b58SNélio Laranjeiro * kept in priv_rx_intr_vec_enable(). 45409cb5b58SNélio Laranjeiro */ 45509cb5b58SNélio Laranjeiro rxq_data = (*priv->rxqs)[i]; 45609cb5b58SNélio Laranjeiro rxq_ctrl = container_of(rxq_data, struct mlx5_rxq_ctrl, rxq); 457*af4f09f2SNélio Laranjeiro mlx5_rxq_ibv_release(rxq_ctrl->ibv); 45809cb5b58SNélio Laranjeiro } 4598d929641SShahaf Shuler free: 4603c7d44afSShahaf Shuler rte_intr_free_epoll_fd(intr_handle); 4618d929641SShahaf Shuler if (intr_handle->intr_vec) 462e1016cb7SAdrien Mazarguil free(intr_handle->intr_vec); 463e1016cb7SAdrien Mazarguil intr_handle->nb_efd = 0; 464e1016cb7SAdrien Mazarguil intr_handle->intr_vec = NULL; 4653c7d44afSShahaf Shuler } 466b18042fbSAdrien Mazarguil 46743e9d979SShachar Beiser /** 46843e9d979SShachar Beiser * MLX5 CQ notification . 46943e9d979SShachar Beiser * 47043e9d979SShachar Beiser * @param rxq 47143e9d979SShachar Beiser * Pointer to receive queue structure. 47243e9d979SShachar Beiser * @param sq_n_rxq 47343e9d979SShachar Beiser * Sequence number per receive queue . 47443e9d979SShachar Beiser */ 47543e9d979SShachar Beiser static inline void 47678142aacSNélio Laranjeiro mlx5_arm_cq(struct mlx5_rxq_data *rxq, int sq_n_rxq) 47743e9d979SShachar Beiser { 47843e9d979SShachar Beiser int sq_n = 0; 47943e9d979SShachar Beiser uint32_t doorbell_hi; 48043e9d979SShachar Beiser uint64_t doorbell; 48143e9d979SShachar Beiser void *cq_db_reg = (char *)rxq->cq_uar + MLX5_CQ_DOORBELL; 48243e9d979SShachar Beiser 48343e9d979SShachar Beiser sq_n = sq_n_rxq & MLX5_CQ_SQN_MASK; 48443e9d979SShachar Beiser doorbell_hi = sq_n << MLX5_CQ_SQN_OFFSET | (rxq->cq_ci & MLX5_CI_MASK); 48543e9d979SShachar Beiser doorbell = (uint64_t)doorbell_hi << 32; 48643e9d979SShachar Beiser doorbell |= rxq->cqn; 48743e9d979SShachar Beiser rxq->cq_db[MLX5_CQ_ARM_DB] = rte_cpu_to_be_32(doorbell_hi); 48843e9d979SShachar Beiser rte_write64(rte_cpu_to_be_64(doorbell), cq_db_reg); 48943e9d979SShachar Beiser } 4909f91fb54SAdrien Mazarguil 491b18042fbSAdrien Mazarguil /** 492e1016cb7SAdrien Mazarguil * DPDK callback for Rx queue interrupt enable. 493b18042fbSAdrien Mazarguil * 494b18042fbSAdrien Mazarguil * @param dev 495b18042fbSAdrien Mazarguil * Pointer to Ethernet device structure. 496b18042fbSAdrien Mazarguil * @param rx_queue_id 497e1016cb7SAdrien Mazarguil * Rx queue number. 498b18042fbSAdrien Mazarguil * 499b18042fbSAdrien Mazarguil * @return 500b18042fbSAdrien Mazarguil * 0 on success, negative on failure. 501b18042fbSAdrien Mazarguil */ 502b18042fbSAdrien Mazarguil int 503b18042fbSAdrien Mazarguil mlx5_rx_intr_enable(struct rte_eth_dev *dev, uint16_t rx_queue_id) 504b18042fbSAdrien Mazarguil { 50501d79216SNélio Laranjeiro struct priv *priv = dev->data->dev_private; 50609cb5b58SNélio Laranjeiro struct mlx5_rxq_data *rxq_data; 50709cb5b58SNélio Laranjeiro struct mlx5_rxq_ctrl *rxq_ctrl; 50843e9d979SShachar Beiser int ret = 0; 509b18042fbSAdrien Mazarguil 51009cb5b58SNélio Laranjeiro rxq_data = (*priv->rxqs)[rx_queue_id]; 51109cb5b58SNélio Laranjeiro if (!rxq_data) { 512e1016cb7SAdrien Mazarguil ret = EINVAL; 51309cb5b58SNélio Laranjeiro goto exit; 514e1016cb7SAdrien Mazarguil } 51509cb5b58SNélio Laranjeiro rxq_ctrl = container_of(rxq_data, struct mlx5_rxq_ctrl, rxq); 51609cb5b58SNélio Laranjeiro if (rxq_ctrl->irq) { 51709cb5b58SNélio Laranjeiro struct mlx5_rxq_ibv *rxq_ibv; 51809cb5b58SNélio Laranjeiro 519*af4f09f2SNélio Laranjeiro rxq_ibv = mlx5_rxq_ibv_get(dev, rx_queue_id); 52009cb5b58SNélio Laranjeiro if (!rxq_ibv) { 52109cb5b58SNélio Laranjeiro ret = EINVAL; 52209cb5b58SNélio Laranjeiro goto exit; 52309cb5b58SNélio Laranjeiro } 52409cb5b58SNélio Laranjeiro mlx5_arm_cq(rxq_data, rxq_data->cq_arm_sn); 525*af4f09f2SNélio Laranjeiro mlx5_rxq_ibv_release(rxq_ibv); 52609cb5b58SNélio Laranjeiro } 52709cb5b58SNélio Laranjeiro exit: 528b18042fbSAdrien Mazarguil if (ret) 529b18042fbSAdrien Mazarguil WARN("unable to arm interrupt on rx queue %d", rx_queue_id); 530ad883aa2SAdrien Mazarguil return -ret; 531b18042fbSAdrien Mazarguil } 532b18042fbSAdrien Mazarguil 533b18042fbSAdrien Mazarguil /** 534e1016cb7SAdrien Mazarguil * DPDK callback for Rx queue interrupt disable. 535b18042fbSAdrien Mazarguil * 536b18042fbSAdrien Mazarguil * @param dev 537b18042fbSAdrien Mazarguil * Pointer to Ethernet device structure. 538b18042fbSAdrien Mazarguil * @param rx_queue_id 539e1016cb7SAdrien Mazarguil * Rx queue number. 540b18042fbSAdrien Mazarguil * 541b18042fbSAdrien Mazarguil * @return 542b18042fbSAdrien Mazarguil * 0 on success, negative on failure. 543b18042fbSAdrien Mazarguil */ 544b18042fbSAdrien Mazarguil int 545b18042fbSAdrien Mazarguil mlx5_rx_intr_disable(struct rte_eth_dev *dev, uint16_t rx_queue_id) 546b18042fbSAdrien Mazarguil { 54701d79216SNélio Laranjeiro struct priv *priv = dev->data->dev_private; 54809cb5b58SNélio Laranjeiro struct mlx5_rxq_data *rxq_data; 54909cb5b58SNélio Laranjeiro struct mlx5_rxq_ctrl *rxq_ctrl; 55009cb5b58SNélio Laranjeiro struct mlx5_rxq_ibv *rxq_ibv = NULL; 551b18042fbSAdrien Mazarguil struct ibv_cq *ev_cq; 552b18042fbSAdrien Mazarguil void *ev_ctx; 55309cb5b58SNélio Laranjeiro int ret = 0; 554b18042fbSAdrien Mazarguil 55509cb5b58SNélio Laranjeiro rxq_data = (*priv->rxqs)[rx_queue_id]; 55609cb5b58SNélio Laranjeiro if (!rxq_data) { 557e1016cb7SAdrien Mazarguil ret = EINVAL; 55809cb5b58SNélio Laranjeiro goto exit; 559e1016cb7SAdrien Mazarguil } 56009cb5b58SNélio Laranjeiro rxq_ctrl = container_of(rxq_data, struct mlx5_rxq_ctrl, rxq); 56109cb5b58SNélio Laranjeiro if (!rxq_ctrl->irq) 56209cb5b58SNélio Laranjeiro goto exit; 563*af4f09f2SNélio Laranjeiro rxq_ibv = mlx5_rxq_ibv_get(dev, rx_queue_id); 56409cb5b58SNélio Laranjeiro if (!rxq_ibv) { 56509cb5b58SNélio Laranjeiro ret = EINVAL; 56609cb5b58SNélio Laranjeiro goto exit; 56709cb5b58SNélio Laranjeiro } 5680e83b8e5SNelio Laranjeiro ret = mlx5_glue->get_cq_event(rxq_ibv->channel, &ev_cq, &ev_ctx); 56909cb5b58SNélio Laranjeiro if (ret || ev_cq != rxq_ibv->cq) { 57009cb5b58SNélio Laranjeiro ret = EINVAL; 57109cb5b58SNélio Laranjeiro goto exit; 57209cb5b58SNélio Laranjeiro } 57309cb5b58SNélio Laranjeiro rxq_data->cq_arm_sn++; 5740e83b8e5SNelio Laranjeiro mlx5_glue->ack_cq_events(rxq_ibv->cq, 1); 57509cb5b58SNélio Laranjeiro exit: 57609cb5b58SNélio Laranjeiro if (rxq_ibv) 577*af4f09f2SNélio Laranjeiro mlx5_rxq_ibv_release(rxq_ibv); 578b18042fbSAdrien Mazarguil if (ret) 579b18042fbSAdrien Mazarguil WARN("unable to disable interrupt on rx queue %d", 580b18042fbSAdrien Mazarguil rx_queue_id); 581e1016cb7SAdrien Mazarguil return -ret; 582b18042fbSAdrien Mazarguil } 58309cb5b58SNélio Laranjeiro 58409cb5b58SNélio Laranjeiro /** 58509cb5b58SNélio Laranjeiro * Create the Rx queue Verbs object. 58609cb5b58SNélio Laranjeiro * 587*af4f09f2SNélio Laranjeiro * @param dev 588*af4f09f2SNélio Laranjeiro * Pointer to Ethernet device. 58909cb5b58SNélio Laranjeiro * @param idx 59009cb5b58SNélio Laranjeiro * Queue index in DPDK Rx queue array 59109cb5b58SNélio Laranjeiro * 59209cb5b58SNélio Laranjeiro * @return 59309cb5b58SNélio Laranjeiro * The Verbs object initialised if it can be created. 59409cb5b58SNélio Laranjeiro */ 59509cb5b58SNélio Laranjeiro struct mlx5_rxq_ibv * 596*af4f09f2SNélio Laranjeiro mlx5_rxq_ibv_new(struct rte_eth_dev *dev, uint16_t idx) 59709cb5b58SNélio Laranjeiro { 598*af4f09f2SNélio Laranjeiro struct priv *priv = dev->data->dev_private; 59909cb5b58SNélio Laranjeiro struct mlx5_rxq_data *rxq_data = (*priv->rxqs)[idx]; 60009cb5b58SNélio Laranjeiro struct mlx5_rxq_ctrl *rxq_ctrl = 60109cb5b58SNélio Laranjeiro container_of(rxq_data, struct mlx5_rxq_ctrl, rxq); 60209cb5b58SNélio Laranjeiro struct ibv_wq_attr mod; 60309cb5b58SNélio Laranjeiro union { 604523f5a74SYongseok Koh struct { 605523f5a74SYongseok Koh struct ibv_cq_init_attr_ex ibv; 606523f5a74SYongseok Koh struct mlx5dv_cq_init_attr mlx5; 607523f5a74SYongseok Koh } cq; 60809cb5b58SNélio Laranjeiro struct ibv_wq_init_attr wq; 60909cb5b58SNélio Laranjeiro struct ibv_cq_ex cq_attr; 61009cb5b58SNélio Laranjeiro } attr; 61109cb5b58SNélio Laranjeiro unsigned int cqe_n = (1 << rxq_data->elts_n) - 1; 61209cb5b58SNélio Laranjeiro struct mlx5_rxq_ibv *tmpl; 61309cb5b58SNélio Laranjeiro struct mlx5dv_cq cq_info; 61409cb5b58SNélio Laranjeiro struct mlx5dv_rwq rwq; 61509cb5b58SNélio Laranjeiro unsigned int i; 61609cb5b58SNélio Laranjeiro int ret = 0; 61709cb5b58SNélio Laranjeiro struct mlx5dv_obj obj; 6187fe24446SShahaf Shuler struct mlx5_dev_config *config = &priv->config; 61909cb5b58SNélio Laranjeiro 62009cb5b58SNélio Laranjeiro assert(rxq_data); 62109cb5b58SNélio Laranjeiro assert(!rxq_ctrl->ibv); 622d10b09dbSOlivier Matz priv->verbs_alloc_ctx.type = MLX5_VERBS_ALLOC_TYPE_RX_QUEUE; 623d10b09dbSOlivier Matz priv->verbs_alloc_ctx.obj = rxq_ctrl; 62409cb5b58SNélio Laranjeiro tmpl = rte_calloc_socket(__func__, 1, sizeof(*tmpl), 0, 62509cb5b58SNélio Laranjeiro rxq_ctrl->socket); 62609cb5b58SNélio Laranjeiro if (!tmpl) { 62709cb5b58SNélio Laranjeiro ERROR("%p: cannot allocate verbs resources", 62809cb5b58SNélio Laranjeiro (void *)rxq_ctrl); 62909cb5b58SNélio Laranjeiro goto error; 63009cb5b58SNélio Laranjeiro } 63109cb5b58SNélio Laranjeiro tmpl->rxq_ctrl = rxq_ctrl; 63209cb5b58SNélio Laranjeiro /* Use the entire RX mempool as the memory region. */ 633*af4f09f2SNélio Laranjeiro tmpl->mr = mlx5_mr_get(dev, rxq_data->mp); 63409cb5b58SNélio Laranjeiro if (!tmpl->mr) { 635*af4f09f2SNélio Laranjeiro tmpl->mr = mlx5_mr_new(dev, rxq_data->mp); 63609cb5b58SNélio Laranjeiro if (!tmpl->mr) { 63709cb5b58SNélio Laranjeiro ERROR("%p: MR creation failure", (void *)rxq_ctrl); 63809cb5b58SNélio Laranjeiro goto error; 63909cb5b58SNélio Laranjeiro } 64009cb5b58SNélio Laranjeiro } 64109cb5b58SNélio Laranjeiro if (rxq_ctrl->irq) { 6420e83b8e5SNelio Laranjeiro tmpl->channel = mlx5_glue->create_comp_channel(priv->ctx); 64309cb5b58SNélio Laranjeiro if (!tmpl->channel) { 64409cb5b58SNélio Laranjeiro ERROR("%p: Comp Channel creation failure", 64509cb5b58SNélio Laranjeiro (void *)rxq_ctrl); 64609cb5b58SNélio Laranjeiro goto error; 64709cb5b58SNélio Laranjeiro } 64809cb5b58SNélio Laranjeiro } 649523f5a74SYongseok Koh attr.cq.ibv = (struct ibv_cq_init_attr_ex){ 650523f5a74SYongseok Koh .cqe = cqe_n, 651523f5a74SYongseok Koh .channel = tmpl->channel, 652523f5a74SYongseok Koh .comp_mask = 0, 653523f5a74SYongseok Koh }; 654523f5a74SYongseok Koh attr.cq.mlx5 = (struct mlx5dv_cq_init_attr){ 65509cb5b58SNélio Laranjeiro .comp_mask = 0, 65609cb5b58SNélio Laranjeiro }; 6577fe24446SShahaf Shuler if (config->cqe_comp && !rxq_data->hw_timestamp) { 658523f5a74SYongseok Koh attr.cq.mlx5.comp_mask |= 659523f5a74SYongseok Koh MLX5DV_CQ_INIT_ATTR_MASK_COMPRESSED_CQE; 660523f5a74SYongseok Koh attr.cq.mlx5.cqe_comp_res_format = MLX5DV_CQE_RES_FORMAT_HASH; 66109cb5b58SNélio Laranjeiro /* 66209cb5b58SNélio Laranjeiro * For vectorized Rx, it must not be doubled in order to 66309cb5b58SNélio Laranjeiro * make cq_ci and rq_ci aligned. 66409cb5b58SNélio Laranjeiro */ 665*af4f09f2SNélio Laranjeiro if (mlx5_rxq_check_vec_support(rxq_data) < 0) 6668fa9c312SYongseok Koh attr.cq.ibv.cqe *= 2; 6677fe24446SShahaf Shuler } else if (config->cqe_comp && rxq_data->hw_timestamp) { 66878c7406bSRaslan Darawsheh DEBUG("Rx CQE compression is disabled for HW timestamp"); 66909cb5b58SNélio Laranjeiro } 6700e83b8e5SNelio Laranjeiro tmpl->cq = mlx5_glue->cq_ex_to_cq 6710e83b8e5SNelio Laranjeiro (mlx5_glue->dv_create_cq(priv->ctx, &attr.cq.ibv, 672523f5a74SYongseok Koh &attr.cq.mlx5)); 67309cb5b58SNélio Laranjeiro if (tmpl->cq == NULL) { 67409cb5b58SNélio Laranjeiro ERROR("%p: CQ creation failure", (void *)rxq_ctrl); 67509cb5b58SNélio Laranjeiro goto error; 67609cb5b58SNélio Laranjeiro } 67709cb5b58SNélio Laranjeiro DEBUG("priv->device_attr.max_qp_wr is %d", 67809cb5b58SNélio Laranjeiro priv->device_attr.orig_attr.max_qp_wr); 67909cb5b58SNélio Laranjeiro DEBUG("priv->device_attr.max_sge is %d", 68009cb5b58SNélio Laranjeiro priv->device_attr.orig_attr.max_sge); 68109cb5b58SNélio Laranjeiro attr.wq = (struct ibv_wq_init_attr){ 68209cb5b58SNélio Laranjeiro .wq_context = NULL, /* Could be useful in the future. */ 68309cb5b58SNélio Laranjeiro .wq_type = IBV_WQT_RQ, 68409cb5b58SNélio Laranjeiro /* Max number of outstanding WRs. */ 68509cb5b58SNélio Laranjeiro .max_wr = (1 << rxq_data->elts_n) >> rxq_data->sges_n, 68609cb5b58SNélio Laranjeiro /* Max number of scatter/gather elements in a WR. */ 68709cb5b58SNélio Laranjeiro .max_sge = 1 << rxq_data->sges_n, 68809cb5b58SNélio Laranjeiro .pd = priv->pd, 68909cb5b58SNélio Laranjeiro .cq = tmpl->cq, 69009cb5b58SNélio Laranjeiro .comp_mask = 69109cb5b58SNélio Laranjeiro IBV_WQ_FLAGS_CVLAN_STRIPPING | 69209cb5b58SNélio Laranjeiro 0, 69309cb5b58SNélio Laranjeiro .create_flags = (rxq_data->vlan_strip ? 69409cb5b58SNélio Laranjeiro IBV_WQ_FLAGS_CVLAN_STRIPPING : 69509cb5b58SNélio Laranjeiro 0), 69609cb5b58SNélio Laranjeiro }; 69709cb5b58SNélio Laranjeiro /* By default, FCS (CRC) is stripped by hardware. */ 69809cb5b58SNélio Laranjeiro if (rxq_data->crc_present) { 69909cb5b58SNélio Laranjeiro attr.wq.create_flags |= IBV_WQ_FLAGS_SCATTER_FCS; 70009cb5b58SNélio Laranjeiro attr.wq.comp_mask |= IBV_WQ_INIT_ATTR_FLAGS; 70109cb5b58SNélio Laranjeiro } 70209cb5b58SNélio Laranjeiro #ifdef HAVE_IBV_WQ_FLAG_RX_END_PADDING 7037fe24446SShahaf Shuler if (config->hw_padding) { 70409cb5b58SNélio Laranjeiro attr.wq.create_flags |= IBV_WQ_FLAG_RX_END_PADDING; 70509cb5b58SNélio Laranjeiro attr.wq.comp_mask |= IBV_WQ_INIT_ATTR_FLAGS; 70609cb5b58SNélio Laranjeiro } 70709cb5b58SNélio Laranjeiro #endif 7080e83b8e5SNelio Laranjeiro tmpl->wq = mlx5_glue->create_wq(priv->ctx, &attr.wq); 70909cb5b58SNélio Laranjeiro if (tmpl->wq == NULL) { 71009cb5b58SNélio Laranjeiro ERROR("%p: WQ creation failure", (void *)rxq_ctrl); 71109cb5b58SNélio Laranjeiro goto error; 71209cb5b58SNélio Laranjeiro } 71309cb5b58SNélio Laranjeiro /* 71409cb5b58SNélio Laranjeiro * Make sure number of WRs*SGEs match expectations since a queue 71509cb5b58SNélio Laranjeiro * cannot allocate more than "desc" buffers. 71609cb5b58SNélio Laranjeiro */ 71709cb5b58SNélio Laranjeiro if (((int)attr.wq.max_wr != 71809cb5b58SNélio Laranjeiro ((1 << rxq_data->elts_n) >> rxq_data->sges_n)) || 71909cb5b58SNélio Laranjeiro ((int)attr.wq.max_sge != (1 << rxq_data->sges_n))) { 72009cb5b58SNélio Laranjeiro ERROR("%p: requested %u*%u but got %u*%u WRs*SGEs", 72109cb5b58SNélio Laranjeiro (void *)rxq_ctrl, 72209cb5b58SNélio Laranjeiro ((1 << rxq_data->elts_n) >> rxq_data->sges_n), 72309cb5b58SNélio Laranjeiro (1 << rxq_data->sges_n), 72409cb5b58SNélio Laranjeiro attr.wq.max_wr, attr.wq.max_sge); 72509cb5b58SNélio Laranjeiro goto error; 72609cb5b58SNélio Laranjeiro } 72709cb5b58SNélio Laranjeiro /* Change queue state to ready. */ 72809cb5b58SNélio Laranjeiro mod = (struct ibv_wq_attr){ 72909cb5b58SNélio Laranjeiro .attr_mask = IBV_WQ_ATTR_STATE, 73009cb5b58SNélio Laranjeiro .wq_state = IBV_WQS_RDY, 73109cb5b58SNélio Laranjeiro }; 7320e83b8e5SNelio Laranjeiro ret = mlx5_glue->modify_wq(tmpl->wq, &mod); 73309cb5b58SNélio Laranjeiro if (ret) { 73409cb5b58SNélio Laranjeiro ERROR("%p: WQ state to IBV_WQS_RDY failed", 73509cb5b58SNélio Laranjeiro (void *)rxq_ctrl); 73609cb5b58SNélio Laranjeiro goto error; 73709cb5b58SNélio Laranjeiro } 73809cb5b58SNélio Laranjeiro obj.cq.in = tmpl->cq; 73909cb5b58SNélio Laranjeiro obj.cq.out = &cq_info; 74009cb5b58SNélio Laranjeiro obj.rwq.in = tmpl->wq; 74109cb5b58SNélio Laranjeiro obj.rwq.out = &rwq; 7420e83b8e5SNelio Laranjeiro ret = mlx5_glue->dv_init_obj(&obj, MLX5DV_OBJ_CQ | MLX5DV_OBJ_RWQ); 74309cb5b58SNélio Laranjeiro if (ret != 0) 74409cb5b58SNélio Laranjeiro goto error; 74509cb5b58SNélio Laranjeiro if (cq_info.cqe_size != RTE_CACHE_LINE_SIZE) { 74609cb5b58SNélio Laranjeiro ERROR("Wrong MLX5_CQE_SIZE environment variable value: " 74709cb5b58SNélio Laranjeiro "it should be set to %u", RTE_CACHE_LINE_SIZE); 74809cb5b58SNélio Laranjeiro goto error; 74909cb5b58SNélio Laranjeiro } 75009cb5b58SNélio Laranjeiro /* Fill the rings. */ 75109cb5b58SNélio Laranjeiro rxq_data->wqes = (volatile struct mlx5_wqe_data_seg (*)[]) 75209cb5b58SNélio Laranjeiro (uintptr_t)rwq.buf; 75309cb5b58SNélio Laranjeiro for (i = 0; (i != (unsigned int)(1 << rxq_data->elts_n)); ++i) { 75409cb5b58SNélio Laranjeiro struct rte_mbuf *buf = (*rxq_data->elts)[i]; 75509cb5b58SNélio Laranjeiro volatile struct mlx5_wqe_data_seg *scat = &(*rxq_data->wqes)[i]; 75609cb5b58SNélio Laranjeiro 75709cb5b58SNélio Laranjeiro /* scat->addr must be able to store a pointer. */ 75809cb5b58SNélio Laranjeiro assert(sizeof(scat->addr) >= sizeof(uintptr_t)); 75909cb5b58SNélio Laranjeiro *scat = (struct mlx5_wqe_data_seg){ 76009cb5b58SNélio Laranjeiro .addr = rte_cpu_to_be_64(rte_pktmbuf_mtod(buf, 76109cb5b58SNélio Laranjeiro uintptr_t)), 76209cb5b58SNélio Laranjeiro .byte_count = rte_cpu_to_be_32(DATA_LEN(buf)), 76309cb5b58SNélio Laranjeiro .lkey = tmpl->mr->lkey, 76409cb5b58SNélio Laranjeiro }; 76509cb5b58SNélio Laranjeiro } 76609cb5b58SNélio Laranjeiro rxq_data->rq_db = rwq.dbrec; 76709cb5b58SNélio Laranjeiro rxq_data->cqe_n = log2above(cq_info.cqe_cnt); 76809cb5b58SNélio Laranjeiro rxq_data->cq_ci = 0; 76909cb5b58SNélio Laranjeiro rxq_data->rq_ci = 0; 77009cb5b58SNélio Laranjeiro rxq_data->rq_pi = 0; 77109cb5b58SNélio Laranjeiro rxq_data->zip = (struct rxq_zip){ 77209cb5b58SNélio Laranjeiro .ai = 0, 77309cb5b58SNélio Laranjeiro }; 77409cb5b58SNélio Laranjeiro rxq_data->cq_db = cq_info.dbrec; 77509cb5b58SNélio Laranjeiro rxq_data->cqes = (volatile struct mlx5_cqe (*)[])(uintptr_t)cq_info.buf; 7765ea20307SShahaf Shuler rxq_data->cq_uar = cq_info.cq_uar; 7775ea20307SShahaf Shuler rxq_data->cqn = cq_info.cqn; 7785ea20307SShahaf Shuler rxq_data->cq_arm_sn = 0; 77909cb5b58SNélio Laranjeiro /* Update doorbell counter. */ 78009cb5b58SNélio Laranjeiro rxq_data->rq_ci = (1 << rxq_data->elts_n) >> rxq_data->sges_n; 78109cb5b58SNélio Laranjeiro rte_wmb(); 78209cb5b58SNélio Laranjeiro *rxq_data->rq_db = rte_cpu_to_be_32(rxq_data->rq_ci); 78309cb5b58SNélio Laranjeiro DEBUG("%p: rxq updated with %p", (void *)rxq_ctrl, (void *)&tmpl); 78409cb5b58SNélio Laranjeiro rte_atomic32_inc(&tmpl->refcnt); 785*af4f09f2SNélio Laranjeiro DEBUG("%p: Verbs Rx queue %p: refcnt %d", (void *)dev, 78609cb5b58SNélio Laranjeiro (void *)tmpl, rte_atomic32_read(&tmpl->refcnt)); 78709cb5b58SNélio Laranjeiro LIST_INSERT_HEAD(&priv->rxqsibv, tmpl, next); 788d10b09dbSOlivier Matz priv->verbs_alloc_ctx.type = MLX5_VERBS_ALLOC_TYPE_NONE; 78909cb5b58SNélio Laranjeiro return tmpl; 79009cb5b58SNélio Laranjeiro error: 79109cb5b58SNélio Laranjeiro if (tmpl->wq) 7920e83b8e5SNelio Laranjeiro claim_zero(mlx5_glue->destroy_wq(tmpl->wq)); 79309cb5b58SNélio Laranjeiro if (tmpl->cq) 7940e83b8e5SNelio Laranjeiro claim_zero(mlx5_glue->destroy_cq(tmpl->cq)); 79509cb5b58SNélio Laranjeiro if (tmpl->channel) 7960e83b8e5SNelio Laranjeiro claim_zero(mlx5_glue->destroy_comp_channel(tmpl->channel)); 79709cb5b58SNélio Laranjeiro if (tmpl->mr) 798*af4f09f2SNélio Laranjeiro mlx5_mr_release(tmpl->mr); 799d10b09dbSOlivier Matz priv->verbs_alloc_ctx.type = MLX5_VERBS_ALLOC_TYPE_NONE; 80009cb5b58SNélio Laranjeiro return NULL; 80109cb5b58SNélio Laranjeiro } 80209cb5b58SNélio Laranjeiro 80309cb5b58SNélio Laranjeiro /** 80409cb5b58SNélio Laranjeiro * Get an Rx queue Verbs object. 80509cb5b58SNélio Laranjeiro * 806*af4f09f2SNélio Laranjeiro * @param dev 807*af4f09f2SNélio Laranjeiro * Pointer to Ethernet device. 80809cb5b58SNélio Laranjeiro * @param idx 80909cb5b58SNélio Laranjeiro * Queue index in DPDK Rx queue array 81009cb5b58SNélio Laranjeiro * 81109cb5b58SNélio Laranjeiro * @return 81209cb5b58SNélio Laranjeiro * The Verbs object if it exists. 81309cb5b58SNélio Laranjeiro */ 81409cb5b58SNélio Laranjeiro struct mlx5_rxq_ibv * 815*af4f09f2SNélio Laranjeiro mlx5_rxq_ibv_get(struct rte_eth_dev *dev, uint16_t idx) 81609cb5b58SNélio Laranjeiro { 817*af4f09f2SNélio Laranjeiro struct priv *priv = dev->data->dev_private; 81809cb5b58SNélio Laranjeiro struct mlx5_rxq_data *rxq_data = (*priv->rxqs)[idx]; 81909cb5b58SNélio Laranjeiro struct mlx5_rxq_ctrl *rxq_ctrl; 82009cb5b58SNélio Laranjeiro 82109cb5b58SNélio Laranjeiro if (idx >= priv->rxqs_n) 82209cb5b58SNélio Laranjeiro return NULL; 82309cb5b58SNélio Laranjeiro if (!rxq_data) 82409cb5b58SNélio Laranjeiro return NULL; 82509cb5b58SNélio Laranjeiro rxq_ctrl = container_of(rxq_data, struct mlx5_rxq_ctrl, rxq); 82609cb5b58SNélio Laranjeiro if (rxq_ctrl->ibv) { 827*af4f09f2SNélio Laranjeiro mlx5_mr_get(dev, rxq_data->mp); 82809cb5b58SNélio Laranjeiro rte_atomic32_inc(&rxq_ctrl->ibv->refcnt); 829*af4f09f2SNélio Laranjeiro DEBUG("%p: Verbs Rx queue %p: refcnt %d", (void *)dev, 83009cb5b58SNélio Laranjeiro (void *)rxq_ctrl->ibv, 83109cb5b58SNélio Laranjeiro rte_atomic32_read(&rxq_ctrl->ibv->refcnt)); 83209cb5b58SNélio Laranjeiro } 83309cb5b58SNélio Laranjeiro return rxq_ctrl->ibv; 83409cb5b58SNélio Laranjeiro } 83509cb5b58SNélio Laranjeiro 83609cb5b58SNélio Laranjeiro /** 83709cb5b58SNélio Laranjeiro * Release an Rx verbs queue object. 83809cb5b58SNélio Laranjeiro * 83909cb5b58SNélio Laranjeiro * @param rxq_ibv 84009cb5b58SNélio Laranjeiro * Verbs Rx queue object. 84109cb5b58SNélio Laranjeiro * 84209cb5b58SNélio Laranjeiro * @return 84309cb5b58SNélio Laranjeiro * 0 on success, errno value on failure. 84409cb5b58SNélio Laranjeiro */ 84509cb5b58SNélio Laranjeiro int 846*af4f09f2SNélio Laranjeiro mlx5_rxq_ibv_release(struct mlx5_rxq_ibv *rxq_ibv) 84709cb5b58SNélio Laranjeiro { 84809cb5b58SNélio Laranjeiro int ret; 84909cb5b58SNélio Laranjeiro 85009cb5b58SNélio Laranjeiro assert(rxq_ibv); 85109cb5b58SNélio Laranjeiro assert(rxq_ibv->wq); 85209cb5b58SNélio Laranjeiro assert(rxq_ibv->cq); 85309cb5b58SNélio Laranjeiro assert(rxq_ibv->mr); 854*af4f09f2SNélio Laranjeiro ret = mlx5_mr_release(rxq_ibv->mr); 85509cb5b58SNélio Laranjeiro if (!ret) 85609cb5b58SNélio Laranjeiro rxq_ibv->mr = NULL; 857*af4f09f2SNélio Laranjeiro DEBUG("Verbs Rx queue %p: refcnt %d", 85809cb5b58SNélio Laranjeiro (void *)rxq_ibv, rte_atomic32_read(&rxq_ibv->refcnt)); 85909cb5b58SNélio Laranjeiro if (rte_atomic32_dec_and_test(&rxq_ibv->refcnt)) { 86009cb5b58SNélio Laranjeiro rxq_free_elts(rxq_ibv->rxq_ctrl); 8610e83b8e5SNelio Laranjeiro claim_zero(mlx5_glue->destroy_wq(rxq_ibv->wq)); 8620e83b8e5SNelio Laranjeiro claim_zero(mlx5_glue->destroy_cq(rxq_ibv->cq)); 86309cb5b58SNélio Laranjeiro if (rxq_ibv->channel) 8640e83b8e5SNelio Laranjeiro claim_zero(mlx5_glue->destroy_comp_channel 8650e83b8e5SNelio Laranjeiro (rxq_ibv->channel)); 86609cb5b58SNélio Laranjeiro LIST_REMOVE(rxq_ibv, next); 86709cb5b58SNélio Laranjeiro rte_free(rxq_ibv); 86809cb5b58SNélio Laranjeiro return 0; 86909cb5b58SNélio Laranjeiro } 87009cb5b58SNélio Laranjeiro return EBUSY; 87109cb5b58SNélio Laranjeiro } 87209cb5b58SNélio Laranjeiro 87309cb5b58SNélio Laranjeiro /** 87409cb5b58SNélio Laranjeiro * Verify the Verbs Rx queue list is empty 87509cb5b58SNélio Laranjeiro * 876*af4f09f2SNélio Laranjeiro * @param dev 877*af4f09f2SNélio Laranjeiro * Pointer to Ethernet device. 87809cb5b58SNélio Laranjeiro * 879fb732b0aSNélio Laranjeiro * @return 880fb732b0aSNélio Laranjeiro * The number of object not released. 88109cb5b58SNélio Laranjeiro */ 88209cb5b58SNélio Laranjeiro int 883*af4f09f2SNélio Laranjeiro mlx5_rxq_ibv_verify(struct rte_eth_dev *dev) 88409cb5b58SNélio Laranjeiro { 885*af4f09f2SNélio Laranjeiro struct priv *priv = dev->data->dev_private; 88609cb5b58SNélio Laranjeiro int ret = 0; 88709cb5b58SNélio Laranjeiro struct mlx5_rxq_ibv *rxq_ibv; 88809cb5b58SNélio Laranjeiro 88909cb5b58SNélio Laranjeiro LIST_FOREACH(rxq_ibv, &priv->rxqsibv, next) { 890*af4f09f2SNélio Laranjeiro DEBUG("%p: Verbs Rx queue %p still referenced", (void *)dev, 89109cb5b58SNélio Laranjeiro (void *)rxq_ibv); 89209cb5b58SNélio Laranjeiro ++ret; 89309cb5b58SNélio Laranjeiro } 89409cb5b58SNélio Laranjeiro return ret; 89509cb5b58SNélio Laranjeiro } 89609cb5b58SNélio Laranjeiro 89709cb5b58SNélio Laranjeiro /** 89809cb5b58SNélio Laranjeiro * Return true if a single reference exists on the object. 89909cb5b58SNélio Laranjeiro * 90009cb5b58SNélio Laranjeiro * @param rxq_ibv 90109cb5b58SNélio Laranjeiro * Verbs Rx queue object. 90209cb5b58SNélio Laranjeiro */ 90309cb5b58SNélio Laranjeiro int 904*af4f09f2SNélio Laranjeiro mlx5_rxq_ibv_releasable(struct mlx5_rxq_ibv *rxq_ibv) 90509cb5b58SNélio Laranjeiro { 90609cb5b58SNélio Laranjeiro assert(rxq_ibv); 90709cb5b58SNélio Laranjeiro return (rte_atomic32_read(&rxq_ibv->refcnt) == 1); 90809cb5b58SNélio Laranjeiro } 909a1366b1aSNélio Laranjeiro 910a1366b1aSNélio Laranjeiro /** 911a1366b1aSNélio Laranjeiro * Create a DPDK Rx queue. 912a1366b1aSNélio Laranjeiro * 913*af4f09f2SNélio Laranjeiro * @param dev 914*af4f09f2SNélio Laranjeiro * Pointer to Ethernet device. 915a1366b1aSNélio Laranjeiro * @param idx 916a1366b1aSNélio Laranjeiro * TX queue index. 917a1366b1aSNélio Laranjeiro * @param desc 918a1366b1aSNélio Laranjeiro * Number of descriptors to configure in queue. 919a1366b1aSNélio Laranjeiro * @param socket 920a1366b1aSNélio Laranjeiro * NUMA socket on which memory must be allocated. 921a1366b1aSNélio Laranjeiro * 922a1366b1aSNélio Laranjeiro * @return 923a1366b1aSNélio Laranjeiro * A DPDK queue object on success. 924a1366b1aSNélio Laranjeiro */ 925a1366b1aSNélio Laranjeiro struct mlx5_rxq_ctrl * 926*af4f09f2SNélio Laranjeiro mlx5_rxq_new(struct rte_eth_dev *dev, uint16_t idx, uint16_t desc, 92717b843ebSShahaf Shuler unsigned int socket, const struct rte_eth_rxconf *conf, 92817b843ebSShahaf Shuler struct rte_mempool *mp) 929a1366b1aSNélio Laranjeiro { 930*af4f09f2SNélio Laranjeiro struct priv *priv = dev->data->dev_private; 931a1366b1aSNélio Laranjeiro struct mlx5_rxq_ctrl *tmpl; 932a1366b1aSNélio Laranjeiro unsigned int mb_len = rte_pktmbuf_data_room_size(mp); 9337fe24446SShahaf Shuler struct mlx5_dev_config *config = &priv->config; 9347fe24446SShahaf Shuler /* 9357fe24446SShahaf Shuler * Always allocate extra slots, even if eventually 9367fe24446SShahaf Shuler * the vector Rx will not be used. 9377fe24446SShahaf Shuler */ 9387fe24446SShahaf Shuler const uint16_t desc_n = 9397fe24446SShahaf Shuler desc + config->rx_vec_en * MLX5_VPMD_DESCS_PER_LOOP; 940a1366b1aSNélio Laranjeiro 941a1366b1aSNélio Laranjeiro tmpl = rte_calloc_socket("RXQ", 1, 942a1366b1aSNélio Laranjeiro sizeof(*tmpl) + 943a1366b1aSNélio Laranjeiro desc_n * sizeof(struct rte_mbuf *), 944a1366b1aSNélio Laranjeiro 0, socket); 945a1366b1aSNélio Laranjeiro if (!tmpl) 946a1366b1aSNélio Laranjeiro return NULL; 947a49b617bSOlivier Gournet tmpl->socket = socket; 948a1366b1aSNélio Laranjeiro if (priv->dev->data->dev_conf.intr_conf.rxq) 949a1366b1aSNélio Laranjeiro tmpl->irq = 1; 950a1366b1aSNélio Laranjeiro /* Enable scattered packets support for this queue if necessary. */ 951a1366b1aSNélio Laranjeiro assert(mb_len >= RTE_PKTMBUF_HEADROOM); 952a1366b1aSNélio Laranjeiro if (dev->data->dev_conf.rxmode.max_rx_pkt_len <= 953a1366b1aSNélio Laranjeiro (mb_len - RTE_PKTMBUF_HEADROOM)) { 954a1366b1aSNélio Laranjeiro tmpl->rxq.sges_n = 0; 95517b843ebSShahaf Shuler } else if (conf->offloads & DEV_RX_OFFLOAD_SCATTER) { 956a1366b1aSNélio Laranjeiro unsigned int size = 957a1366b1aSNélio Laranjeiro RTE_PKTMBUF_HEADROOM + 958a1366b1aSNélio Laranjeiro dev->data->dev_conf.rxmode.max_rx_pkt_len; 959a1366b1aSNélio Laranjeiro unsigned int sges_n; 960a1366b1aSNélio Laranjeiro 961a1366b1aSNélio Laranjeiro /* 962a1366b1aSNélio Laranjeiro * Determine the number of SGEs needed for a full packet 963a1366b1aSNélio Laranjeiro * and round it to the next power of two. 964a1366b1aSNélio Laranjeiro */ 965a1366b1aSNélio Laranjeiro sges_n = log2above((size / mb_len) + !!(size % mb_len)); 966a1366b1aSNélio Laranjeiro tmpl->rxq.sges_n = sges_n; 967a1366b1aSNélio Laranjeiro /* Make sure rxq.sges_n did not overflow. */ 968a1366b1aSNélio Laranjeiro size = mb_len * (1 << tmpl->rxq.sges_n); 969a1366b1aSNélio Laranjeiro size -= RTE_PKTMBUF_HEADROOM; 970a1366b1aSNélio Laranjeiro if (size < dev->data->dev_conf.rxmode.max_rx_pkt_len) { 971a1366b1aSNélio Laranjeiro ERROR("%p: too many SGEs (%u) needed to handle" 972a1366b1aSNélio Laranjeiro " requested maximum packet size %u", 973a1366b1aSNélio Laranjeiro (void *)dev, 974a1366b1aSNélio Laranjeiro 1 << sges_n, 975a1366b1aSNélio Laranjeiro dev->data->dev_conf.rxmode.max_rx_pkt_len); 976a1366b1aSNélio Laranjeiro goto error; 977a1366b1aSNélio Laranjeiro } 978a1366b1aSNélio Laranjeiro } else { 979a1366b1aSNélio Laranjeiro WARN("%p: the requested maximum Rx packet size (%u) is" 980a1366b1aSNélio Laranjeiro " larger than a single mbuf (%u) and scattered" 981a1366b1aSNélio Laranjeiro " mode has not been requested", 982a1366b1aSNélio Laranjeiro (void *)dev, 983a1366b1aSNélio Laranjeiro dev->data->dev_conf.rxmode.max_rx_pkt_len, 984a1366b1aSNélio Laranjeiro mb_len - RTE_PKTMBUF_HEADROOM); 985a1366b1aSNélio Laranjeiro } 986a1366b1aSNélio Laranjeiro DEBUG("%p: maximum number of segments per packet: %u", 987a1366b1aSNélio Laranjeiro (void *)dev, 1 << tmpl->rxq.sges_n); 988a1366b1aSNélio Laranjeiro if (desc % (1 << tmpl->rxq.sges_n)) { 989a1366b1aSNélio Laranjeiro ERROR("%p: number of RX queue descriptors (%u) is not a" 990a1366b1aSNélio Laranjeiro " multiple of SGEs per packet (%u)", 991a1366b1aSNélio Laranjeiro (void *)dev, 992a1366b1aSNélio Laranjeiro desc, 993a1366b1aSNélio Laranjeiro 1 << tmpl->rxq.sges_n); 994a1366b1aSNélio Laranjeiro goto error; 995a1366b1aSNélio Laranjeiro } 996a1366b1aSNélio Laranjeiro /* Toggle RX checksum offload if hardware supports it. */ 99717b843ebSShahaf Shuler tmpl->rxq.csum = !!(conf->offloads & DEV_RX_OFFLOAD_CHECKSUM); 99817b843ebSShahaf Shuler tmpl->rxq.csum_l2tun = (!!(conf->offloads & DEV_RX_OFFLOAD_CHECKSUM) && 999038e7251SShahaf Shuler priv->config.tunnel_en); 100017b843ebSShahaf Shuler tmpl->rxq.hw_timestamp = !!(conf->offloads & DEV_RX_OFFLOAD_TIMESTAMP); 1001a1366b1aSNélio Laranjeiro /* Configure VLAN stripping. */ 100217b843ebSShahaf Shuler tmpl->rxq.vlan_strip = !!(conf->offloads & DEV_RX_OFFLOAD_VLAN_STRIP); 1003a1366b1aSNélio Laranjeiro /* By default, FCS (CRC) is stripped by hardware. */ 100417b843ebSShahaf Shuler if (conf->offloads & DEV_RX_OFFLOAD_CRC_STRIP) { 1005a1366b1aSNélio Laranjeiro tmpl->rxq.crc_present = 0; 10067fe24446SShahaf Shuler } else if (config->hw_fcs_strip) { 1007a1366b1aSNélio Laranjeiro tmpl->rxq.crc_present = 1; 1008a1366b1aSNélio Laranjeiro } else { 1009a1366b1aSNélio Laranjeiro WARN("%p: CRC stripping has been disabled but will still" 1010a1366b1aSNélio Laranjeiro " be performed by hardware, make sure MLNX_OFED and" 1011a1366b1aSNélio Laranjeiro " firmware are up to date", 1012a1366b1aSNélio Laranjeiro (void *)dev); 1013a1366b1aSNélio Laranjeiro tmpl->rxq.crc_present = 0; 1014a1366b1aSNélio Laranjeiro } 1015a1366b1aSNélio Laranjeiro DEBUG("%p: CRC stripping is %s, %u bytes will be subtracted from" 1016a1366b1aSNélio Laranjeiro " incoming frames to hide it", 1017a1366b1aSNélio Laranjeiro (void *)dev, 1018a1366b1aSNélio Laranjeiro tmpl->rxq.crc_present ? "disabled" : "enabled", 1019a1366b1aSNélio Laranjeiro tmpl->rxq.crc_present << 2); 1020a1366b1aSNélio Laranjeiro /* Save port ID. */ 1021a1366b1aSNélio Laranjeiro tmpl->rxq.rss_hash = priv->rxqs_n > 1; 1022a1366b1aSNélio Laranjeiro tmpl->rxq.port_id = dev->data->port_id; 1023a1366b1aSNélio Laranjeiro tmpl->priv = priv; 1024a1366b1aSNélio Laranjeiro tmpl->rxq.mp = mp; 1025a1366b1aSNélio Laranjeiro tmpl->rxq.stats.idx = idx; 1026a1366b1aSNélio Laranjeiro tmpl->rxq.elts_n = log2above(desc); 1027a1366b1aSNélio Laranjeiro tmpl->rxq.elts = 1028a1366b1aSNélio Laranjeiro (struct rte_mbuf *(*)[1 << tmpl->rxq.elts_n])(tmpl + 1); 1029a1366b1aSNélio Laranjeiro rte_atomic32_inc(&tmpl->refcnt); 1030*af4f09f2SNélio Laranjeiro DEBUG("%p: Rx queue %p: refcnt %d", (void *)dev, 1031a1366b1aSNélio Laranjeiro (void *)tmpl, rte_atomic32_read(&tmpl->refcnt)); 1032a1366b1aSNélio Laranjeiro LIST_INSERT_HEAD(&priv->rxqsctrl, tmpl, next); 1033a1366b1aSNélio Laranjeiro return tmpl; 1034a1366b1aSNélio Laranjeiro error: 1035a1366b1aSNélio Laranjeiro rte_free(tmpl); 1036a1366b1aSNélio Laranjeiro return NULL; 1037a1366b1aSNélio Laranjeiro } 1038a1366b1aSNélio Laranjeiro 1039a1366b1aSNélio Laranjeiro /** 1040a1366b1aSNélio Laranjeiro * Get a Rx queue. 1041a1366b1aSNélio Laranjeiro * 1042*af4f09f2SNélio Laranjeiro * @param dev 1043*af4f09f2SNélio Laranjeiro * Pointer to Ethernet device. 1044a1366b1aSNélio Laranjeiro * @param idx 1045a1366b1aSNélio Laranjeiro * TX queue index. 1046a1366b1aSNélio Laranjeiro * 1047a1366b1aSNélio Laranjeiro * @return 1048a1366b1aSNélio Laranjeiro * A pointer to the queue if it exists. 1049a1366b1aSNélio Laranjeiro */ 1050a1366b1aSNélio Laranjeiro struct mlx5_rxq_ctrl * 1051*af4f09f2SNélio Laranjeiro mlx5_rxq_get(struct rte_eth_dev *dev, uint16_t idx) 1052a1366b1aSNélio Laranjeiro { 1053*af4f09f2SNélio Laranjeiro struct priv *priv = dev->data->dev_private; 1054a1366b1aSNélio Laranjeiro struct mlx5_rxq_ctrl *rxq_ctrl = NULL; 1055a1366b1aSNélio Laranjeiro 1056a1366b1aSNélio Laranjeiro if ((*priv->rxqs)[idx]) { 1057a1366b1aSNélio Laranjeiro rxq_ctrl = container_of((*priv->rxqs)[idx], 1058a1366b1aSNélio Laranjeiro struct mlx5_rxq_ctrl, 1059a1366b1aSNélio Laranjeiro rxq); 1060*af4f09f2SNélio Laranjeiro mlx5_rxq_ibv_get(dev, idx); 1061a1366b1aSNélio Laranjeiro rte_atomic32_inc(&rxq_ctrl->refcnt); 1062*af4f09f2SNélio Laranjeiro DEBUG("%p: Rx queue %p: refcnt %d", (void *)dev, 1063a1366b1aSNélio Laranjeiro (void *)rxq_ctrl, rte_atomic32_read(&rxq_ctrl->refcnt)); 1064a1366b1aSNélio Laranjeiro } 1065a1366b1aSNélio Laranjeiro return rxq_ctrl; 1066a1366b1aSNélio Laranjeiro } 1067a1366b1aSNélio Laranjeiro 1068a1366b1aSNélio Laranjeiro /** 1069a1366b1aSNélio Laranjeiro * Release a Rx queue. 1070a1366b1aSNélio Laranjeiro * 1071*af4f09f2SNélio Laranjeiro * @param dev 1072*af4f09f2SNélio Laranjeiro * Pointer to Ethernet device. 1073a1366b1aSNélio Laranjeiro * @param idx 1074a1366b1aSNélio Laranjeiro * TX queue index. 1075a1366b1aSNélio Laranjeiro * 1076a1366b1aSNélio Laranjeiro * @return 1077a1366b1aSNélio Laranjeiro * 0 on success, errno value on failure. 1078a1366b1aSNélio Laranjeiro */ 1079a1366b1aSNélio Laranjeiro int 1080*af4f09f2SNélio Laranjeiro mlx5_rxq_release(struct rte_eth_dev *dev, uint16_t idx) 1081a1366b1aSNélio Laranjeiro { 1082*af4f09f2SNélio Laranjeiro struct priv *priv = dev->data->dev_private; 1083a1366b1aSNélio Laranjeiro struct mlx5_rxq_ctrl *rxq_ctrl; 1084a1366b1aSNélio Laranjeiro 1085a1366b1aSNélio Laranjeiro if (!(*priv->rxqs)[idx]) 1086a1366b1aSNélio Laranjeiro return 0; 1087a1366b1aSNélio Laranjeiro rxq_ctrl = container_of((*priv->rxqs)[idx], struct mlx5_rxq_ctrl, rxq); 1088a1366b1aSNélio Laranjeiro assert(rxq_ctrl->priv); 1089a1366b1aSNélio Laranjeiro if (rxq_ctrl->ibv) { 1090a1366b1aSNélio Laranjeiro int ret; 1091a1366b1aSNélio Laranjeiro 1092*af4f09f2SNélio Laranjeiro ret = mlx5_rxq_ibv_release(rxq_ctrl->ibv); 1093a1366b1aSNélio Laranjeiro if (!ret) 1094a1366b1aSNélio Laranjeiro rxq_ctrl->ibv = NULL; 1095a1366b1aSNélio Laranjeiro } 1096*af4f09f2SNélio Laranjeiro DEBUG("%p: Rx queue %p: refcnt %d", (void *)dev, 1097a1366b1aSNélio Laranjeiro (void *)rxq_ctrl, rte_atomic32_read(&rxq_ctrl->refcnt)); 1098a1366b1aSNélio Laranjeiro if (rte_atomic32_dec_and_test(&rxq_ctrl->refcnt)) { 1099a1366b1aSNélio Laranjeiro LIST_REMOVE(rxq_ctrl, next); 1100a1366b1aSNélio Laranjeiro rte_free(rxq_ctrl); 1101a1366b1aSNélio Laranjeiro (*priv->rxqs)[idx] = NULL; 1102a1366b1aSNélio Laranjeiro return 0; 1103a1366b1aSNélio Laranjeiro } 1104a1366b1aSNélio Laranjeiro return EBUSY; 1105a1366b1aSNélio Laranjeiro } 1106a1366b1aSNélio Laranjeiro 1107a1366b1aSNélio Laranjeiro /** 1108a1366b1aSNélio Laranjeiro * Verify if the queue can be released. 1109a1366b1aSNélio Laranjeiro * 1110*af4f09f2SNélio Laranjeiro * @param dev 1111*af4f09f2SNélio Laranjeiro * Pointer to Ethernet device. 1112a1366b1aSNélio Laranjeiro * @param idx 1113a1366b1aSNélio Laranjeiro * TX queue index. 1114a1366b1aSNélio Laranjeiro * 1115a1366b1aSNélio Laranjeiro * @return 1116a1366b1aSNélio Laranjeiro * 1 if the queue can be released. 1117a1366b1aSNélio Laranjeiro */ 1118a1366b1aSNélio Laranjeiro int 1119*af4f09f2SNélio Laranjeiro mlx5_rxq_releasable(struct rte_eth_dev *dev, uint16_t idx) 1120a1366b1aSNélio Laranjeiro { 1121*af4f09f2SNélio Laranjeiro struct priv *priv = dev->data->dev_private; 1122a1366b1aSNélio Laranjeiro struct mlx5_rxq_ctrl *rxq_ctrl; 1123a1366b1aSNélio Laranjeiro 1124a1366b1aSNélio Laranjeiro if (!(*priv->rxqs)[idx]) 1125a1366b1aSNélio Laranjeiro return -1; 1126a1366b1aSNélio Laranjeiro rxq_ctrl = container_of((*priv->rxqs)[idx], struct mlx5_rxq_ctrl, rxq); 1127a1366b1aSNélio Laranjeiro return (rte_atomic32_read(&rxq_ctrl->refcnt) == 1); 1128a1366b1aSNélio Laranjeiro } 1129a1366b1aSNélio Laranjeiro 1130a1366b1aSNélio Laranjeiro /** 1131a1366b1aSNélio Laranjeiro * Verify the Rx Queue list is empty 1132a1366b1aSNélio Laranjeiro * 1133*af4f09f2SNélio Laranjeiro * @param dev 1134*af4f09f2SNélio Laranjeiro * Pointer to Ethernet device. 1135a1366b1aSNélio Laranjeiro * 1136fb732b0aSNélio Laranjeiro * @return 1137fb732b0aSNélio Laranjeiro * The number of object not released. 1138a1366b1aSNélio Laranjeiro */ 1139a1366b1aSNélio Laranjeiro int 1140*af4f09f2SNélio Laranjeiro mlx5_rxq_verify(struct rte_eth_dev *dev) 1141a1366b1aSNélio Laranjeiro { 1142*af4f09f2SNélio Laranjeiro struct priv *priv = dev->data->dev_private; 1143a1366b1aSNélio Laranjeiro struct mlx5_rxq_ctrl *rxq_ctrl; 1144a1366b1aSNélio Laranjeiro int ret = 0; 1145a1366b1aSNélio Laranjeiro 1146a1366b1aSNélio Laranjeiro LIST_FOREACH(rxq_ctrl, &priv->rxqsctrl, next) { 1147*af4f09f2SNélio Laranjeiro DEBUG("%p: Rx Queue %p still referenced", (void *)dev, 1148a1366b1aSNélio Laranjeiro (void *)rxq_ctrl); 1149a1366b1aSNélio Laranjeiro ++ret; 1150a1366b1aSNélio Laranjeiro } 1151a1366b1aSNélio Laranjeiro return ret; 1152a1366b1aSNélio Laranjeiro } 11534c7a0f5fSNélio Laranjeiro 11544c7a0f5fSNélio Laranjeiro /** 11554c7a0f5fSNélio Laranjeiro * Create an indirection table. 11564c7a0f5fSNélio Laranjeiro * 1157*af4f09f2SNélio Laranjeiro * @param dev 1158*af4f09f2SNélio Laranjeiro * Pointer to Ethernet device. 11594c7a0f5fSNélio Laranjeiro * @param queues 11604c7a0f5fSNélio Laranjeiro * Queues entering in the indirection table. 11614c7a0f5fSNélio Laranjeiro * @param queues_n 11624c7a0f5fSNélio Laranjeiro * Number of queues in the array. 11634c7a0f5fSNélio Laranjeiro * 11644c7a0f5fSNélio Laranjeiro * @return 11654c7a0f5fSNélio Laranjeiro * A new indirection table. 11664c7a0f5fSNélio Laranjeiro */ 11674c7a0f5fSNélio Laranjeiro struct mlx5_ind_table_ibv * 1168*af4f09f2SNélio Laranjeiro mlx5_ind_table_ibv_new(struct rte_eth_dev *dev, uint16_t queues[], 11694c7a0f5fSNélio Laranjeiro uint16_t queues_n) 11704c7a0f5fSNélio Laranjeiro { 1171*af4f09f2SNélio Laranjeiro struct priv *priv = dev->data->dev_private; 11724c7a0f5fSNélio Laranjeiro struct mlx5_ind_table_ibv *ind_tbl; 11734c7a0f5fSNélio Laranjeiro const unsigned int wq_n = rte_is_power_of_2(queues_n) ? 11744c7a0f5fSNélio Laranjeiro log2above(queues_n) : 1175c1236ad7SNélio Laranjeiro log2above(priv->config.ind_table_max_size); 11764c7a0f5fSNélio Laranjeiro struct ibv_wq *wq[1 << wq_n]; 11774c7a0f5fSNélio Laranjeiro unsigned int i; 11784c7a0f5fSNélio Laranjeiro unsigned int j; 11794c7a0f5fSNélio Laranjeiro 11804c7a0f5fSNélio Laranjeiro ind_tbl = rte_calloc(__func__, 1, sizeof(*ind_tbl) + 11814c7a0f5fSNélio Laranjeiro queues_n * sizeof(uint16_t), 0); 11824c7a0f5fSNélio Laranjeiro if (!ind_tbl) 11834c7a0f5fSNélio Laranjeiro return NULL; 11844c7a0f5fSNélio Laranjeiro for (i = 0; i != queues_n; ++i) { 1185*af4f09f2SNélio Laranjeiro struct mlx5_rxq_ctrl *rxq = mlx5_rxq_get(dev, queues[i]); 11864c7a0f5fSNélio Laranjeiro 11874c7a0f5fSNélio Laranjeiro if (!rxq) 11884c7a0f5fSNélio Laranjeiro goto error; 11894c7a0f5fSNélio Laranjeiro wq[i] = rxq->ibv->wq; 11904c7a0f5fSNélio Laranjeiro ind_tbl->queues[i] = queues[i]; 11914c7a0f5fSNélio Laranjeiro } 11924c7a0f5fSNélio Laranjeiro ind_tbl->queues_n = queues_n; 11934c7a0f5fSNélio Laranjeiro /* Finalise indirection table. */ 11944c7a0f5fSNélio Laranjeiro for (j = 0; i != (unsigned int)(1 << wq_n); ++i, ++j) 11954c7a0f5fSNélio Laranjeiro wq[i] = wq[j]; 11960e83b8e5SNelio Laranjeiro ind_tbl->ind_table = mlx5_glue->create_rwq_ind_table 11970e83b8e5SNelio Laranjeiro (priv->ctx, 11984c7a0f5fSNélio Laranjeiro &(struct ibv_rwq_ind_table_init_attr){ 11994c7a0f5fSNélio Laranjeiro .log_ind_tbl_size = wq_n, 12004c7a0f5fSNélio Laranjeiro .ind_tbl = wq, 12014c7a0f5fSNélio Laranjeiro .comp_mask = 0, 12024c7a0f5fSNélio Laranjeiro }); 12034c7a0f5fSNélio Laranjeiro if (!ind_tbl->ind_table) 12044c7a0f5fSNélio Laranjeiro goto error; 12054c7a0f5fSNélio Laranjeiro rte_atomic32_inc(&ind_tbl->refcnt); 12064c7a0f5fSNélio Laranjeiro LIST_INSERT_HEAD(&priv->ind_tbls, ind_tbl, next); 1207*af4f09f2SNélio Laranjeiro DEBUG("%p: Indirection table %p: refcnt %d", (void *)dev, 12084c7a0f5fSNélio Laranjeiro (void *)ind_tbl, rte_atomic32_read(&ind_tbl->refcnt)); 12094c7a0f5fSNélio Laranjeiro return ind_tbl; 12104c7a0f5fSNélio Laranjeiro error: 12114c7a0f5fSNélio Laranjeiro rte_free(ind_tbl); 1212*af4f09f2SNélio Laranjeiro DEBUG("%p cannot create indirection table", (void *)dev); 12134c7a0f5fSNélio Laranjeiro return NULL; 12144c7a0f5fSNélio Laranjeiro } 12154c7a0f5fSNélio Laranjeiro 12164c7a0f5fSNélio Laranjeiro /** 12174c7a0f5fSNélio Laranjeiro * Get an indirection table. 12184c7a0f5fSNélio Laranjeiro * 1219*af4f09f2SNélio Laranjeiro * @param dev 1220*af4f09f2SNélio Laranjeiro * Pointer to Ethernet device. 12214c7a0f5fSNélio Laranjeiro * @param queues 12224c7a0f5fSNélio Laranjeiro * Queues entering in the indirection table. 12234c7a0f5fSNélio Laranjeiro * @param queues_n 12244c7a0f5fSNélio Laranjeiro * Number of queues in the array. 12254c7a0f5fSNélio Laranjeiro * 12264c7a0f5fSNélio Laranjeiro * @return 12274c7a0f5fSNélio Laranjeiro * An indirection table if found. 12284c7a0f5fSNélio Laranjeiro */ 12294c7a0f5fSNélio Laranjeiro struct mlx5_ind_table_ibv * 1230*af4f09f2SNélio Laranjeiro mlx5_ind_table_ibv_get(struct rte_eth_dev *dev, uint16_t queues[], 12314c7a0f5fSNélio Laranjeiro uint16_t queues_n) 12324c7a0f5fSNélio Laranjeiro { 1233*af4f09f2SNélio Laranjeiro struct priv *priv = dev->data->dev_private; 12344c7a0f5fSNélio Laranjeiro struct mlx5_ind_table_ibv *ind_tbl; 12354c7a0f5fSNélio Laranjeiro 12364c7a0f5fSNélio Laranjeiro LIST_FOREACH(ind_tbl, &priv->ind_tbls, next) { 12374c7a0f5fSNélio Laranjeiro if ((ind_tbl->queues_n == queues_n) && 12384c7a0f5fSNélio Laranjeiro (memcmp(ind_tbl->queues, queues, 12394c7a0f5fSNélio Laranjeiro ind_tbl->queues_n * sizeof(ind_tbl->queues[0])) 12404c7a0f5fSNélio Laranjeiro == 0)) 12414c7a0f5fSNélio Laranjeiro break; 12424c7a0f5fSNélio Laranjeiro } 12434c7a0f5fSNélio Laranjeiro if (ind_tbl) { 12444c7a0f5fSNélio Laranjeiro unsigned int i; 12454c7a0f5fSNélio Laranjeiro 12464c7a0f5fSNélio Laranjeiro rte_atomic32_inc(&ind_tbl->refcnt); 1247*af4f09f2SNélio Laranjeiro DEBUG("%p: Indirection table %p: refcnt %d", (void *)dev, 12484c7a0f5fSNélio Laranjeiro (void *)ind_tbl, rte_atomic32_read(&ind_tbl->refcnt)); 12494c7a0f5fSNélio Laranjeiro for (i = 0; i != ind_tbl->queues_n; ++i) 1250*af4f09f2SNélio Laranjeiro mlx5_rxq_get(dev, ind_tbl->queues[i]); 12514c7a0f5fSNélio Laranjeiro } 12524c7a0f5fSNélio Laranjeiro return ind_tbl; 12534c7a0f5fSNélio Laranjeiro } 12544c7a0f5fSNélio Laranjeiro 12554c7a0f5fSNélio Laranjeiro /** 12564c7a0f5fSNélio Laranjeiro * Release an indirection table. 12574c7a0f5fSNélio Laranjeiro * 1258*af4f09f2SNélio Laranjeiro * @param dev 1259*af4f09f2SNélio Laranjeiro * Pointer to Ethernet device. 12604c7a0f5fSNélio Laranjeiro * @param ind_table 12614c7a0f5fSNélio Laranjeiro * Indirection table to release. 12624c7a0f5fSNélio Laranjeiro * 12634c7a0f5fSNélio Laranjeiro * @return 12644c7a0f5fSNélio Laranjeiro * 0 on success, errno value on failure. 12654c7a0f5fSNélio Laranjeiro */ 12664c7a0f5fSNélio Laranjeiro int 1267*af4f09f2SNélio Laranjeiro mlx5_ind_table_ibv_release(struct rte_eth_dev *dev, 12684c7a0f5fSNélio Laranjeiro struct mlx5_ind_table_ibv *ind_tbl) 12694c7a0f5fSNélio Laranjeiro { 12704c7a0f5fSNélio Laranjeiro unsigned int i; 12714c7a0f5fSNélio Laranjeiro 1272*af4f09f2SNélio Laranjeiro DEBUG("%p: Indirection table %p: refcnt %d", (void *)dev, 12734c7a0f5fSNélio Laranjeiro (void *)ind_tbl, rte_atomic32_read(&ind_tbl->refcnt)); 12744c7a0f5fSNélio Laranjeiro if (rte_atomic32_dec_and_test(&ind_tbl->refcnt)) 12750e83b8e5SNelio Laranjeiro claim_zero(mlx5_glue->destroy_rwq_ind_table 12760e83b8e5SNelio Laranjeiro (ind_tbl->ind_table)); 12774c7a0f5fSNélio Laranjeiro for (i = 0; i != ind_tbl->queues_n; ++i) 1278*af4f09f2SNélio Laranjeiro claim_nonzero(mlx5_rxq_release(dev, ind_tbl->queues[i])); 12794c7a0f5fSNélio Laranjeiro if (!rte_atomic32_read(&ind_tbl->refcnt)) { 12804c7a0f5fSNélio Laranjeiro LIST_REMOVE(ind_tbl, next); 12814c7a0f5fSNélio Laranjeiro rte_free(ind_tbl); 12824c7a0f5fSNélio Laranjeiro return 0; 12834c7a0f5fSNélio Laranjeiro } 12844c7a0f5fSNélio Laranjeiro return EBUSY; 12854c7a0f5fSNélio Laranjeiro } 12864c7a0f5fSNélio Laranjeiro 12874c7a0f5fSNélio Laranjeiro /** 12884c7a0f5fSNélio Laranjeiro * Verify the Rx Queue list is empty 12894c7a0f5fSNélio Laranjeiro * 1290*af4f09f2SNélio Laranjeiro * @param dev 1291*af4f09f2SNélio Laranjeiro * Pointer to Ethernet device. 12924c7a0f5fSNélio Laranjeiro * 1293fb732b0aSNélio Laranjeiro * @return 1294fb732b0aSNélio Laranjeiro * The number of object not released. 12954c7a0f5fSNélio Laranjeiro */ 12964c7a0f5fSNélio Laranjeiro int 1297*af4f09f2SNélio Laranjeiro mlx5_ind_table_ibv_verify(struct rte_eth_dev *dev) 12984c7a0f5fSNélio Laranjeiro { 1299*af4f09f2SNélio Laranjeiro struct priv *priv = dev->data->dev_private; 13004c7a0f5fSNélio Laranjeiro struct mlx5_ind_table_ibv *ind_tbl; 13014c7a0f5fSNélio Laranjeiro int ret = 0; 13024c7a0f5fSNélio Laranjeiro 13034c7a0f5fSNélio Laranjeiro LIST_FOREACH(ind_tbl, &priv->ind_tbls, next) { 13044c7a0f5fSNélio Laranjeiro DEBUG("%p: Verbs indirection table %p still referenced", 1305*af4f09f2SNélio Laranjeiro (void *)dev, (void *)ind_tbl); 13064c7a0f5fSNélio Laranjeiro ++ret; 13074c7a0f5fSNélio Laranjeiro } 13084c7a0f5fSNélio Laranjeiro return ret; 13094c7a0f5fSNélio Laranjeiro } 1310f5479b68SNélio Laranjeiro 1311f5479b68SNélio Laranjeiro /** 1312f5479b68SNélio Laranjeiro * Create an Rx Hash queue. 1313f5479b68SNélio Laranjeiro * 1314*af4f09f2SNélio Laranjeiro * @param dev 1315*af4f09f2SNélio Laranjeiro * Pointer to Ethernet device. 1316f5479b68SNélio Laranjeiro * @param rss_key 1317f5479b68SNélio Laranjeiro * RSS key for the Rx hash queue. 1318f5479b68SNélio Laranjeiro * @param rss_key_len 1319f5479b68SNélio Laranjeiro * RSS key length. 1320f5479b68SNélio Laranjeiro * @param hash_fields 1321f5479b68SNélio Laranjeiro * Verbs protocol hash field to make the RSS on. 1322f5479b68SNélio Laranjeiro * @param queues 132331b3e2b8SShahaf Shuler * Queues entering in hash queue. In case of empty hash_fields only the 132431b3e2b8SShahaf Shuler * first queue index will be taken for the indirection table. 1325f5479b68SNélio Laranjeiro * @param queues_n 1326f5479b68SNélio Laranjeiro * Number of queues. 1327f5479b68SNélio Laranjeiro * 1328f5479b68SNélio Laranjeiro * @return 1329f5479b68SNélio Laranjeiro * An hash Rx queue on success. 1330f5479b68SNélio Laranjeiro */ 1331f5479b68SNélio Laranjeiro struct mlx5_hrxq * 1332*af4f09f2SNélio Laranjeiro mlx5_hrxq_new(struct rte_eth_dev *dev, uint8_t *rss_key, uint8_t rss_key_len, 1333f5479b68SNélio Laranjeiro uint64_t hash_fields, uint16_t queues[], uint16_t queues_n) 1334f5479b68SNélio Laranjeiro { 1335*af4f09f2SNélio Laranjeiro struct priv *priv = dev->data->dev_private; 1336f5479b68SNélio Laranjeiro struct mlx5_hrxq *hrxq; 1337f5479b68SNélio Laranjeiro struct mlx5_ind_table_ibv *ind_tbl; 1338f5479b68SNélio Laranjeiro struct ibv_qp *qp; 1339f5479b68SNélio Laranjeiro 134031b3e2b8SShahaf Shuler queues_n = hash_fields ? queues_n : 1; 1341*af4f09f2SNélio Laranjeiro ind_tbl = mlx5_ind_table_ibv_get(dev, queues, queues_n); 1342f5479b68SNélio Laranjeiro if (!ind_tbl) 1343*af4f09f2SNélio Laranjeiro ind_tbl = mlx5_ind_table_ibv_new(dev, queues, queues_n); 1344f5479b68SNélio Laranjeiro if (!ind_tbl) 1345f5479b68SNélio Laranjeiro return NULL; 13460e83b8e5SNelio Laranjeiro qp = mlx5_glue->create_qp_ex 13470e83b8e5SNelio Laranjeiro (priv->ctx, 1348f5479b68SNélio Laranjeiro &(struct ibv_qp_init_attr_ex){ 1349f5479b68SNélio Laranjeiro .qp_type = IBV_QPT_RAW_PACKET, 1350f5479b68SNélio Laranjeiro .comp_mask = 1351f5479b68SNélio Laranjeiro IBV_QP_INIT_ATTR_PD | 1352f5479b68SNélio Laranjeiro IBV_QP_INIT_ATTR_IND_TABLE | 1353f5479b68SNélio Laranjeiro IBV_QP_INIT_ATTR_RX_HASH, 1354f5479b68SNélio Laranjeiro .rx_hash_conf = (struct ibv_rx_hash_conf){ 1355f5479b68SNélio Laranjeiro .rx_hash_function = IBV_RX_HASH_FUNC_TOEPLITZ, 1356f5479b68SNélio Laranjeiro .rx_hash_key_len = rss_key_len, 1357f5479b68SNélio Laranjeiro .rx_hash_key = rss_key, 1358f5479b68SNélio Laranjeiro .rx_hash_fields_mask = hash_fields, 1359f5479b68SNélio Laranjeiro }, 1360f5479b68SNélio Laranjeiro .rwq_ind_tbl = ind_tbl->ind_table, 1361f5479b68SNélio Laranjeiro .pd = priv->pd, 1362f5479b68SNélio Laranjeiro }); 1363f5479b68SNélio Laranjeiro if (!qp) 1364f5479b68SNélio Laranjeiro goto error; 1365f5479b68SNélio Laranjeiro hrxq = rte_calloc(__func__, 1, sizeof(*hrxq) + rss_key_len, 0); 1366f5479b68SNélio Laranjeiro if (!hrxq) 1367f5479b68SNélio Laranjeiro goto error; 1368f5479b68SNélio Laranjeiro hrxq->ind_table = ind_tbl; 1369f5479b68SNélio Laranjeiro hrxq->qp = qp; 1370f5479b68SNélio Laranjeiro hrxq->rss_key_len = rss_key_len; 1371f5479b68SNélio Laranjeiro hrxq->hash_fields = hash_fields; 1372f5479b68SNélio Laranjeiro memcpy(hrxq->rss_key, rss_key, rss_key_len); 1373f5479b68SNélio Laranjeiro rte_atomic32_inc(&hrxq->refcnt); 1374f5479b68SNélio Laranjeiro LIST_INSERT_HEAD(&priv->hrxqs, hrxq, next); 1375*af4f09f2SNélio Laranjeiro DEBUG("%p: Hash Rx queue %p: refcnt %d", (void *)dev, 1376f5479b68SNélio Laranjeiro (void *)hrxq, rte_atomic32_read(&hrxq->refcnt)); 1377f5479b68SNélio Laranjeiro return hrxq; 1378f5479b68SNélio Laranjeiro error: 1379*af4f09f2SNélio Laranjeiro mlx5_ind_table_ibv_release(dev, ind_tbl); 1380f5479b68SNélio Laranjeiro if (qp) 13810e83b8e5SNelio Laranjeiro claim_zero(mlx5_glue->destroy_qp(qp)); 1382f5479b68SNélio Laranjeiro return NULL; 1383f5479b68SNélio Laranjeiro } 1384f5479b68SNélio Laranjeiro 1385f5479b68SNélio Laranjeiro /** 1386f5479b68SNélio Laranjeiro * Get an Rx Hash queue. 1387f5479b68SNélio Laranjeiro * 1388*af4f09f2SNélio Laranjeiro * @param dev 1389*af4f09f2SNélio Laranjeiro * Pointer to Ethernet device. 1390f5479b68SNélio Laranjeiro * @param rss_conf 1391f5479b68SNélio Laranjeiro * RSS configuration for the Rx hash queue. 1392f5479b68SNélio Laranjeiro * @param queues 139331b3e2b8SShahaf Shuler * Queues entering in hash queue. In case of empty hash_fields only the 139431b3e2b8SShahaf Shuler * first queue index will be taken for the indirection table. 1395f5479b68SNélio Laranjeiro * @param queues_n 1396f5479b68SNélio Laranjeiro * Number of queues. 1397f5479b68SNélio Laranjeiro * 1398f5479b68SNélio Laranjeiro * @return 1399f5479b68SNélio Laranjeiro * An hash Rx queue on success. 1400f5479b68SNélio Laranjeiro */ 1401f5479b68SNélio Laranjeiro struct mlx5_hrxq * 1402*af4f09f2SNélio Laranjeiro mlx5_hrxq_get(struct rte_eth_dev *dev, uint8_t *rss_key, uint8_t rss_key_len, 1403f5479b68SNélio Laranjeiro uint64_t hash_fields, uint16_t queues[], uint16_t queues_n) 1404f5479b68SNélio Laranjeiro { 1405*af4f09f2SNélio Laranjeiro struct priv *priv = dev->data->dev_private; 1406f5479b68SNélio Laranjeiro struct mlx5_hrxq *hrxq; 1407f5479b68SNélio Laranjeiro 140831b3e2b8SShahaf Shuler queues_n = hash_fields ? queues_n : 1; 1409f5479b68SNélio Laranjeiro LIST_FOREACH(hrxq, &priv->hrxqs, next) { 1410f5479b68SNélio Laranjeiro struct mlx5_ind_table_ibv *ind_tbl; 1411f5479b68SNélio Laranjeiro 1412f5479b68SNélio Laranjeiro if (hrxq->rss_key_len != rss_key_len) 1413f5479b68SNélio Laranjeiro continue; 1414f5479b68SNélio Laranjeiro if (memcmp(hrxq->rss_key, rss_key, rss_key_len)) 1415f5479b68SNélio Laranjeiro continue; 1416f5479b68SNélio Laranjeiro if (hrxq->hash_fields != hash_fields) 1417f5479b68SNélio Laranjeiro continue; 1418*af4f09f2SNélio Laranjeiro ind_tbl = mlx5_ind_table_ibv_get(dev, queues, queues_n); 1419f5479b68SNélio Laranjeiro if (!ind_tbl) 1420f5479b68SNélio Laranjeiro continue; 1421f5479b68SNélio Laranjeiro if (ind_tbl != hrxq->ind_table) { 1422*af4f09f2SNélio Laranjeiro mlx5_ind_table_ibv_release(dev, ind_tbl); 1423f5479b68SNélio Laranjeiro continue; 1424f5479b68SNélio Laranjeiro } 1425f5479b68SNélio Laranjeiro rte_atomic32_inc(&hrxq->refcnt); 1426*af4f09f2SNélio Laranjeiro DEBUG("%p: Hash Rx queue %p: refcnt %d", (void *)dev, 1427f5479b68SNélio Laranjeiro (void *)hrxq, rte_atomic32_read(&hrxq->refcnt)); 1428f5479b68SNélio Laranjeiro return hrxq; 1429f5479b68SNélio Laranjeiro } 1430f5479b68SNélio Laranjeiro return NULL; 1431f5479b68SNélio Laranjeiro } 1432f5479b68SNélio Laranjeiro 1433f5479b68SNélio Laranjeiro /** 1434f5479b68SNélio Laranjeiro * Release the hash Rx queue. 1435f5479b68SNélio Laranjeiro * 1436*af4f09f2SNélio Laranjeiro * @param dev 1437*af4f09f2SNélio Laranjeiro * Pointer to Ethernet device. 1438f5479b68SNélio Laranjeiro * @param hrxq 1439f5479b68SNélio Laranjeiro * Pointer to Hash Rx queue to release. 1440f5479b68SNélio Laranjeiro * 1441f5479b68SNélio Laranjeiro * @return 1442f5479b68SNélio Laranjeiro * 0 on success, errno value on failure. 1443f5479b68SNélio Laranjeiro */ 1444f5479b68SNélio Laranjeiro int 1445*af4f09f2SNélio Laranjeiro mlx5_hrxq_release(struct rte_eth_dev *dev, struct mlx5_hrxq *hrxq) 1446f5479b68SNélio Laranjeiro { 1447*af4f09f2SNélio Laranjeiro DEBUG("%p: Hash Rx queue %p: refcnt %d", (void *)dev, 1448f5479b68SNélio Laranjeiro (void *)hrxq, rte_atomic32_read(&hrxq->refcnt)); 1449f5479b68SNélio Laranjeiro if (rte_atomic32_dec_and_test(&hrxq->refcnt)) { 14500e83b8e5SNelio Laranjeiro claim_zero(mlx5_glue->destroy_qp(hrxq->qp)); 1451*af4f09f2SNélio Laranjeiro mlx5_ind_table_ibv_release(dev, hrxq->ind_table); 1452f5479b68SNélio Laranjeiro LIST_REMOVE(hrxq, next); 1453f5479b68SNélio Laranjeiro rte_free(hrxq); 1454f5479b68SNélio Laranjeiro return 0; 1455f5479b68SNélio Laranjeiro } 1456*af4f09f2SNélio Laranjeiro claim_nonzero(mlx5_ind_table_ibv_release(dev, hrxq->ind_table)); 1457f5479b68SNélio Laranjeiro return EBUSY; 1458f5479b68SNélio Laranjeiro } 1459f5479b68SNélio Laranjeiro 1460f5479b68SNélio Laranjeiro /** 1461f5479b68SNélio Laranjeiro * Verify the Rx Queue list is empty 1462f5479b68SNélio Laranjeiro * 1463*af4f09f2SNélio Laranjeiro * @param dev 1464*af4f09f2SNélio Laranjeiro * Pointer to Ethernet device. 1465f5479b68SNélio Laranjeiro * 1466fb732b0aSNélio Laranjeiro * @return 1467fb732b0aSNélio Laranjeiro * The number of object not released. 1468f5479b68SNélio Laranjeiro */ 1469f5479b68SNélio Laranjeiro int 1470*af4f09f2SNélio Laranjeiro mlx5_hrxq_ibv_verify(struct rte_eth_dev *dev) 1471f5479b68SNélio Laranjeiro { 1472*af4f09f2SNélio Laranjeiro struct priv *priv = dev->data->dev_private; 1473f5479b68SNélio Laranjeiro struct mlx5_hrxq *hrxq; 1474f5479b68SNélio Laranjeiro int ret = 0; 1475f5479b68SNélio Laranjeiro 1476f5479b68SNélio Laranjeiro LIST_FOREACH(hrxq, &priv->hrxqs, next) { 1477f5479b68SNélio Laranjeiro DEBUG("%p: Verbs Hash Rx queue %p still referenced", 1478*af4f09f2SNélio Laranjeiro (void *)dev, (void *)hrxq); 1479f5479b68SNélio Laranjeiro ++ret; 1480f5479b68SNélio Laranjeiro } 1481f5479b68SNélio Laranjeiro return ret; 1482f5479b68SNélio Laranjeiro } 1483