18fd92a66SOlivier Matz /* SPDX-License-Identifier: BSD-3-Clause 2f0d2114fSYongseok Koh * Copyright 2017 6WIND S.A. 35feecc57SShahaf Shuler * Copyright 2017 Mellanox Technologies, Ltd 4f0d2114fSYongseok Koh */ 5f0d2114fSYongseok Koh 6f0d2114fSYongseok Koh #include <stdint.h> 7f0d2114fSYongseok Koh #include <string.h> 8f0d2114fSYongseok Koh #include <stdlib.h> 9f0d2114fSYongseok Koh 10f0d2114fSYongseok Koh #include <rte_mbuf.h> 11f0d2114fSYongseok Koh #include <rte_mempool.h> 12f0d2114fSYongseok Koh #include <rte_prefetch.h> 132c5e0dd2SCiara Power #include <rte_vect.h> 14f0d2114fSYongseok Koh 159d60f545SOphir Munk #include <mlx5_glue.h> 167b4f1e6bSMatan Azrad #include <mlx5_prm.h> 177b4f1e6bSMatan Azrad 187b4f1e6bSMatan Azrad #include "mlx5_defs.h" 19f0d2114fSYongseok Koh #include "mlx5.h" 20f0d2114fSYongseok Koh #include "mlx5_utils.h" 21f0d2114fSYongseok Koh #include "mlx5_rxtx.h" 22151cbe3aSMichael Baum #include "mlx5_rx.h" 235bfc9fc1SYongseok Koh #include "mlx5_rxtx_vec.h" 24f0d2114fSYongseok Koh #include "mlx5_autoconf.h" 25f0d2114fSYongseok Koh 26570acdb1SYongseok Koh #if defined RTE_ARCH_X86_64 273c2ddbd4SYongseok Koh #include "mlx5_rxtx_vec_sse.h" 28570acdb1SYongseok Koh #elif defined RTE_ARCH_ARM64 29570acdb1SYongseok Koh #include "mlx5_rxtx_vec_neon.h" 302e542da7SDavid Christensen #elif defined RTE_ARCH_PPC_64 312e542da7SDavid Christensen #include "mlx5_rxtx_vec_altivec.h" 323c2ddbd4SYongseok Koh #else 333c2ddbd4SYongseok Koh #error "This should not be compiled if SIMD instructions are not supported." 34f0d2114fSYongseok Koh #endif 35f0d2114fSYongseok Koh 36f0d2114fSYongseok Koh /** 37f0d2114fSYongseok Koh * Skip error packets. 38f0d2114fSYongseok Koh * 39f0d2114fSYongseok Koh * @param rxq 40f0d2114fSYongseok Koh * Pointer to RX queue structure. 41f0d2114fSYongseok Koh * @param[out] pkts 42f0d2114fSYongseok Koh * Array to store received packets. 43f0d2114fSYongseok Koh * @param pkts_n 44f0d2114fSYongseok Koh * Maximum number of packets in array. 45f0d2114fSYongseok Koh * 46f0d2114fSYongseok Koh * @return 47f0d2114fSYongseok Koh * Number of packets successfully received (<= pkts_n). 48f0d2114fSYongseok Koh */ 49f0d2114fSYongseok Koh static uint16_t 50f0d2114fSYongseok Koh rxq_handle_pending_error(struct mlx5_rxq_data *rxq, struct rte_mbuf **pkts, 51f0d2114fSYongseok Koh uint16_t pkts_n) 52f0d2114fSYongseok Koh { 53f0d2114fSYongseok Koh uint16_t n = 0; 54aa67ed30SAlexander Kozyrev uint16_t skip_cnt; 55f0d2114fSYongseok Koh unsigned int i; 56f0d2114fSYongseok Koh #ifdef MLX5_PMD_SOFT_COUNTERS 57f0d2114fSYongseok Koh uint32_t err_bytes = 0; 58f0d2114fSYongseok Koh #endif 59f0d2114fSYongseok Koh 60f0d2114fSYongseok Koh for (i = 0; i < pkts_n; ++i) { 61f0d2114fSYongseok Koh struct rte_mbuf *pkt = pkts[i]; 62f0d2114fSYongseok Koh 6388c07335SMatan Azrad if (pkt->packet_type == RTE_PTYPE_ALL_MASK || rxq->err_state) { 64f0d2114fSYongseok Koh #ifdef MLX5_PMD_SOFT_COUNTERS 65f0d2114fSYongseok Koh err_bytes += PKT_LEN(pkt); 66f0d2114fSYongseok Koh #endif 67f0d2114fSYongseok Koh rte_pktmbuf_free_seg(pkt); 68f0d2114fSYongseok Koh } else { 69f0d2114fSYongseok Koh pkts[n++] = pkt; 70f0d2114fSYongseok Koh } 71f0d2114fSYongseok Koh } 72f0d2114fSYongseok Koh rxq->stats.idropped += (pkts_n - n); 73f0d2114fSYongseok Koh #ifdef MLX5_PMD_SOFT_COUNTERS 74f0d2114fSYongseok Koh /* Correct counters of errored completions. */ 75f0d2114fSYongseok Koh rxq->stats.ipackets -= (pkts_n - n); 76f0d2114fSYongseok Koh rxq->stats.ibytes -= err_bytes; 77f0d2114fSYongseok Koh #endif 78aa67ed30SAlexander Kozyrev mlx5_rx_err_handle(rxq, 1, pkts_n, &skip_cnt); 79f0d2114fSYongseok Koh return n; 80f0d2114fSYongseok Koh } 81f0d2114fSYongseok Koh 82f0d2114fSYongseok Koh /** 830f20acbfSAlexander Kozyrev * Replenish buffers for RX in bulk. 840f20acbfSAlexander Kozyrev * 850f20acbfSAlexander Kozyrev * @param rxq 860f20acbfSAlexander Kozyrev * Pointer to RX queue structure. 870f20acbfSAlexander Kozyrev */ 880f20acbfSAlexander Kozyrev static inline void 890f20acbfSAlexander Kozyrev mlx5_rx_replenish_bulk_mbuf(struct mlx5_rxq_data *rxq) 900f20acbfSAlexander Kozyrev { 910f20acbfSAlexander Kozyrev const uint16_t q_n = 1 << rxq->elts_n; 920f20acbfSAlexander Kozyrev const uint16_t q_mask = q_n - 1; 930f20acbfSAlexander Kozyrev uint16_t n = q_n - (rxq->rq_ci - rxq->rq_pi); 940f20acbfSAlexander Kozyrev uint16_t elts_idx = rxq->rq_ci & q_mask; 950f20acbfSAlexander Kozyrev struct rte_mbuf **elts = &(*rxq->elts)[elts_idx]; 960f20acbfSAlexander Kozyrev volatile struct mlx5_wqe_data_seg *wq = 970f20acbfSAlexander Kozyrev &((volatile struct mlx5_wqe_data_seg *)rxq->wqes)[elts_idx]; 980f20acbfSAlexander Kozyrev unsigned int i; 990f20acbfSAlexander Kozyrev 1000f20acbfSAlexander Kozyrev if (n >= rxq->rq_repl_thresh) { 1010f20acbfSAlexander Kozyrev MLX5_ASSERT(n >= MLX5_VPMD_RXQ_RPLNSH_THRESH(q_n)); 1020f20acbfSAlexander Kozyrev MLX5_ASSERT(MLX5_VPMD_RXQ_RPLNSH_THRESH(q_n) > 1030f20acbfSAlexander Kozyrev MLX5_VPMD_DESCS_PER_LOOP); 1040f20acbfSAlexander Kozyrev /* Not to cross queue end. */ 1050f20acbfSAlexander Kozyrev n = RTE_MIN(n - MLX5_VPMD_DESCS_PER_LOOP, q_n - elts_idx); 1060f20acbfSAlexander Kozyrev if (rte_mempool_get_bulk(rxq->mp, (void *)elts, n) < 0) { 1070f20acbfSAlexander Kozyrev rxq->stats.rx_nombuf += n; 1080f20acbfSAlexander Kozyrev return; 1090f20acbfSAlexander Kozyrev } 1101db288f9SRuifeng Wang if (unlikely(mlx5_mr_btree_len(&rxq->mr_ctrl.cache_bh) > 1)) { 1110f20acbfSAlexander Kozyrev for (i = 0; i < n; ++i) { 1120f20acbfSAlexander Kozyrev /* 1130f20acbfSAlexander Kozyrev * In order to support the mbufs with external attached 1140f20acbfSAlexander Kozyrev * data buffer we should use the buf_addr pointer 1150f20acbfSAlexander Kozyrev * instead of rte_mbuf_buf_addr(). It touches the mbuf 1160f20acbfSAlexander Kozyrev * itself and may impact the performance. 1170f20acbfSAlexander Kozyrev */ 1181db288f9SRuifeng Wang void *buf_addr = elts[i]->buf_addr; 1191db288f9SRuifeng Wang 1200f20acbfSAlexander Kozyrev wq[i].addr = rte_cpu_to_be_64((uintptr_t)buf_addr + 1210f20acbfSAlexander Kozyrev RTE_PKTMBUF_HEADROOM); 1220f20acbfSAlexander Kozyrev wq[i].lkey = mlx5_rx_mb2mr(rxq, elts[i]); 1230f20acbfSAlexander Kozyrev } 1241db288f9SRuifeng Wang } else { 1251db288f9SRuifeng Wang for (i = 0; i < n; ++i) { 1261db288f9SRuifeng Wang void *buf_addr = elts[i]->buf_addr; 1271db288f9SRuifeng Wang 1281db288f9SRuifeng Wang wq[i].addr = rte_cpu_to_be_64((uintptr_t)buf_addr + 1291db288f9SRuifeng Wang RTE_PKTMBUF_HEADROOM); 1301db288f9SRuifeng Wang } 1311db288f9SRuifeng Wang } 1320f20acbfSAlexander Kozyrev rxq->rq_ci += n; 1330f20acbfSAlexander Kozyrev /* Prevent overflowing into consumed mbufs. */ 1340f20acbfSAlexander Kozyrev elts_idx = rxq->rq_ci & q_mask; 1350f20acbfSAlexander Kozyrev for (i = 0; i < MLX5_VPMD_DESCS_PER_LOOP; ++i) 1360f20acbfSAlexander Kozyrev (*rxq->elts)[elts_idx + i] = &rxq->fake_mbuf; 1370f20acbfSAlexander Kozyrev rte_io_wmb(); 1380f20acbfSAlexander Kozyrev *rxq->rq_db = rte_cpu_to_be_32(rxq->rq_ci); 1390f20acbfSAlexander Kozyrev } 1400f20acbfSAlexander Kozyrev } 1410f20acbfSAlexander Kozyrev 1420f20acbfSAlexander Kozyrev /** 1430f20acbfSAlexander Kozyrev * Replenish buffers for MPRQ RX in bulk. 1440f20acbfSAlexander Kozyrev * 1450f20acbfSAlexander Kozyrev * @param rxq 1460f20acbfSAlexander Kozyrev * Pointer to RX queue structure. 1470f20acbfSAlexander Kozyrev */ 1480f20acbfSAlexander Kozyrev static inline void 1490f20acbfSAlexander Kozyrev mlx5_rx_mprq_replenish_bulk_mbuf(struct mlx5_rxq_data *rxq) 1500f20acbfSAlexander Kozyrev { 1510f20acbfSAlexander Kozyrev const uint16_t wqe_n = 1 << rxq->elts_n; 1520947ed38SMichael Baum const uint32_t strd_n = RTE_BIT32(rxq->log_strd_num); 1530f20acbfSAlexander Kozyrev const uint32_t elts_n = wqe_n * strd_n; 1540f20acbfSAlexander Kozyrev const uint32_t wqe_mask = elts_n - 1; 1555fc2e5c2SAlexander Kozyrev uint32_t n = elts_n - (rxq->elts_ci - rxq->rq_pi); 1560f20acbfSAlexander Kozyrev uint32_t elts_idx = rxq->elts_ci & wqe_mask; 1570f20acbfSAlexander Kozyrev struct rte_mbuf **elts = &(*rxq->elts)[elts_idx]; 1585fc2e5c2SAlexander Kozyrev unsigned int i; 1590f20acbfSAlexander Kozyrev 1605fc2e5c2SAlexander Kozyrev if (n >= rxq->rq_repl_thresh && 161acc87479SAlexander Kozyrev rxq->elts_ci - rxq->rq_pi <= 162acc87479SAlexander Kozyrev rxq->rq_repl_thresh + MLX5_VPMD_RX_MAX_BURST) { 1635fc2e5c2SAlexander Kozyrev MLX5_ASSERT(n >= MLX5_VPMD_RXQ_RPLNSH_THRESH(elts_n)); 1640f20acbfSAlexander Kozyrev MLX5_ASSERT(MLX5_VPMD_RXQ_RPLNSH_THRESH(elts_n) > 1650f20acbfSAlexander Kozyrev MLX5_VPMD_DESCS_PER_LOOP); 1665c687643SAlexander Kozyrev /* Not to cross queue end. */ 1675fc2e5c2SAlexander Kozyrev n = RTE_MIN(n - MLX5_VPMD_DESCS_PER_LOOP, elts_n - elts_idx); 1685fc2e5c2SAlexander Kozyrev /* Limit replenish number to threshold value. */ 1695fc2e5c2SAlexander Kozyrev n = RTE_MIN(n, rxq->rq_repl_thresh); 1700f20acbfSAlexander Kozyrev if (rte_mempool_get_bulk(rxq->mp, (void *)elts, n) < 0) { 1710f20acbfSAlexander Kozyrev rxq->stats.rx_nombuf += n; 1720f20acbfSAlexander Kozyrev return; 1730f20acbfSAlexander Kozyrev } 1740f20acbfSAlexander Kozyrev rxq->elts_ci += n; 1755fc2e5c2SAlexander Kozyrev /* Prevent overflowing into consumed mbufs. */ 1765fc2e5c2SAlexander Kozyrev elts_idx = rxq->elts_ci & wqe_mask; 1775fc2e5c2SAlexander Kozyrev for (i = 0; i < MLX5_VPMD_DESCS_PER_LOOP; ++i) 1785fc2e5c2SAlexander Kozyrev (*rxq->elts)[elts_idx + i] = &rxq->fake_mbuf; 1790f20acbfSAlexander Kozyrev } 1800f20acbfSAlexander Kozyrev } 1810f20acbfSAlexander Kozyrev 1820f20acbfSAlexander Kozyrev /** 1830f20acbfSAlexander Kozyrev * Copy or attach MPRQ buffers to RX SW ring. 1840f20acbfSAlexander Kozyrev * 1850f20acbfSAlexander Kozyrev * @param rxq 1860f20acbfSAlexander Kozyrev * Pointer to RX queue structure. 1870f20acbfSAlexander Kozyrev * @param pkts 1880f20acbfSAlexander Kozyrev * Pointer to array of packets to be stored. 1890f20acbfSAlexander Kozyrev * @param pkts_n 1900f20acbfSAlexander Kozyrev * Number of packets to be stored. 1910f20acbfSAlexander Kozyrev * 1920f20acbfSAlexander Kozyrev * @return 1930f20acbfSAlexander Kozyrev * Number of packets successfully copied/attached (<= pkts_n). 1940f20acbfSAlexander Kozyrev */ 1950f20acbfSAlexander Kozyrev static inline uint16_t 1960f20acbfSAlexander Kozyrev rxq_copy_mprq_mbuf_v(struct mlx5_rxq_data *rxq, 1970f20acbfSAlexander Kozyrev struct rte_mbuf **pkts, uint16_t pkts_n) 1980f20acbfSAlexander Kozyrev { 1990f20acbfSAlexander Kozyrev const uint16_t wqe_n = 1 << rxq->elts_n; 2000f20acbfSAlexander Kozyrev const uint16_t wqe_mask = wqe_n - 1; 2010947ed38SMichael Baum const uint16_t strd_sz = RTE_BIT32(rxq->log_strd_sz); 2020947ed38SMichael Baum const uint32_t strd_n = RTE_BIT32(rxq->log_strd_num); 2030f20acbfSAlexander Kozyrev const uint32_t elts_n = wqe_n * strd_n; 2040f20acbfSAlexander Kozyrev const uint32_t elts_mask = elts_n - 1; 2050f20acbfSAlexander Kozyrev uint32_t elts_idx = rxq->rq_pi & elts_mask; 2060f20acbfSAlexander Kozyrev struct rte_mbuf **elts = &(*rxq->elts)[elts_idx]; 2070f20acbfSAlexander Kozyrev uint32_t rq_ci = rxq->rq_ci; 2080f20acbfSAlexander Kozyrev struct mlx5_mprq_buf *buf = (*rxq->mprq_bufs)[rq_ci & wqe_mask]; 2090f20acbfSAlexander Kozyrev uint16_t copied = 0; 2100f20acbfSAlexander Kozyrev uint16_t i = 0; 2110f20acbfSAlexander Kozyrev 2120f20acbfSAlexander Kozyrev for (i = 0; i < pkts_n; ++i) { 2130f20acbfSAlexander Kozyrev uint16_t strd_cnt; 2140f20acbfSAlexander Kozyrev enum mlx5_rqx_code rxq_code; 2150f20acbfSAlexander Kozyrev 2160f20acbfSAlexander Kozyrev if (rxq->consumed_strd == strd_n) { 2170f20acbfSAlexander Kozyrev /* Replace WQE if the buffer is still in use. */ 2180f20acbfSAlexander Kozyrev mprq_buf_replace(rxq, rq_ci & wqe_mask); 2190f20acbfSAlexander Kozyrev /* Advance to the next WQE. */ 2200f20acbfSAlexander Kozyrev rxq->consumed_strd = 0; 2210f20acbfSAlexander Kozyrev rq_ci++; 2220f20acbfSAlexander Kozyrev buf = (*rxq->mprq_bufs)[rq_ci & wqe_mask]; 2230f20acbfSAlexander Kozyrev } 2240f20acbfSAlexander Kozyrev 2250f20acbfSAlexander Kozyrev if (!elts[i]->pkt_len) { 2260f20acbfSAlexander Kozyrev rxq->consumed_strd = strd_n; 2270f20acbfSAlexander Kozyrev rte_pktmbuf_free_seg(elts[i]); 2280f20acbfSAlexander Kozyrev #ifdef MLX5_PMD_SOFT_COUNTERS 2290f20acbfSAlexander Kozyrev rxq->stats.ipackets -= 1; 2300f20acbfSAlexander Kozyrev #endif 2310f20acbfSAlexander Kozyrev continue; 2320f20acbfSAlexander Kozyrev } 2330f20acbfSAlexander Kozyrev strd_cnt = (elts[i]->pkt_len / strd_sz) + 2340f20acbfSAlexander Kozyrev ((elts[i]->pkt_len % strd_sz) ? 1 : 0); 2350f20acbfSAlexander Kozyrev rxq_code = mprq_buf_to_pkt(rxq, elts[i], elts[i]->pkt_len, 2360f20acbfSAlexander Kozyrev buf, rxq->consumed_strd, strd_cnt); 2370f20acbfSAlexander Kozyrev rxq->consumed_strd += strd_cnt; 2380f20acbfSAlexander Kozyrev if (unlikely(rxq_code != MLX5_RXQ_CODE_EXIT)) { 2390f20acbfSAlexander Kozyrev rte_pktmbuf_free_seg(elts[i]); 2400f20acbfSAlexander Kozyrev #ifdef MLX5_PMD_SOFT_COUNTERS 2410f20acbfSAlexander Kozyrev rxq->stats.ipackets -= 1; 2420f20acbfSAlexander Kozyrev rxq->stats.ibytes -= elts[i]->pkt_len; 2430f20acbfSAlexander Kozyrev #endif 2440f20acbfSAlexander Kozyrev if (rxq_code == MLX5_RXQ_CODE_NOMBUF) { 2450f20acbfSAlexander Kozyrev ++rxq->stats.rx_nombuf; 2460f20acbfSAlexander Kozyrev break; 2470f20acbfSAlexander Kozyrev } 2480f20acbfSAlexander Kozyrev if (rxq_code == MLX5_RXQ_CODE_DROPPED) { 2490f20acbfSAlexander Kozyrev ++rxq->stats.idropped; 2500f20acbfSAlexander Kozyrev continue; 2510f20acbfSAlexander Kozyrev } 2520f20acbfSAlexander Kozyrev } 2530f20acbfSAlexander Kozyrev pkts[copied++] = elts[i]; 2540f20acbfSAlexander Kozyrev } 2550f20acbfSAlexander Kozyrev rxq->rq_pi += i; 2560f20acbfSAlexander Kozyrev rxq->cq_ci += i; 2570f20acbfSAlexander Kozyrev if (rq_ci != rxq->rq_ci) { 2580f20acbfSAlexander Kozyrev rxq->rq_ci = rq_ci; 2590f20acbfSAlexander Kozyrev rte_io_wmb(); 2600f20acbfSAlexander Kozyrev *rxq->rq_db = rte_cpu_to_be_32(rxq->rq_ci); 2610f20acbfSAlexander Kozyrev } 2620f20acbfSAlexander Kozyrev return copied; 2630f20acbfSAlexander Kozyrev } 2640f20acbfSAlexander Kozyrev 2650f20acbfSAlexander Kozyrev /** 2661ded2623SAlexander Kozyrev * Receive burst of packets. An errored completion also consumes a mbuf, but the 2671ded2623SAlexander Kozyrev * packet_type is set to be RTE_PTYPE_ALL_MASK. Marked mbufs should be freed 2681ded2623SAlexander Kozyrev * before returning to application. 2691ded2623SAlexander Kozyrev * 2701ded2623SAlexander Kozyrev * @param rxq 2711ded2623SAlexander Kozyrev * Pointer to RX queue structure. 2721ded2623SAlexander Kozyrev * @param[out] pkts 2731ded2623SAlexander Kozyrev * Array to store received packets. 2741ded2623SAlexander Kozyrev * @param pkts_n 2751ded2623SAlexander Kozyrev * Maximum number of packets in array. 2761ded2623SAlexander Kozyrev * @param[out] err 2771ded2623SAlexander Kozyrev * Pointer to a flag. Set non-zero value if pkts array has at least one error 2781ded2623SAlexander Kozyrev * packet to handle. 2791ded2623SAlexander Kozyrev * @param[out] no_cq 2801ded2623SAlexander Kozyrev * Pointer to a boolean. Set true if no new CQE seen. 2811ded2623SAlexander Kozyrev * 2821ded2623SAlexander Kozyrev * @return 2831ded2623SAlexander Kozyrev * Number of packets received including errors (<= pkts_n). 2841ded2623SAlexander Kozyrev */ 2851ded2623SAlexander Kozyrev static inline uint16_t 2861ded2623SAlexander Kozyrev rxq_burst_v(struct mlx5_rxq_data *rxq, struct rte_mbuf **pkts, 2871ded2623SAlexander Kozyrev uint16_t pkts_n, uint64_t *err, bool *no_cq) 2881ded2623SAlexander Kozyrev { 2891ded2623SAlexander Kozyrev const uint16_t q_n = 1 << rxq->cqe_n; 2901ded2623SAlexander Kozyrev const uint16_t q_mask = q_n - 1; 2911ded2623SAlexander Kozyrev const uint16_t e_n = 1 << rxq->elts_n; 2921ded2623SAlexander Kozyrev const uint16_t e_mask = e_n - 1; 293fc3e1798SAlexander Kozyrev volatile struct mlx5_cqe *cq, *next; 2941ded2623SAlexander Kozyrev struct rte_mbuf **elts; 2951ded2623SAlexander Kozyrev uint64_t comp_idx = MLX5_VPMD_DESCS_PER_LOOP; 2961ded2623SAlexander Kozyrev uint16_t nocmp_n = 0; 2971ded2623SAlexander Kozyrev uint16_t rcvd_pkt = 0; 2981ded2623SAlexander Kozyrev unsigned int cq_idx = rxq->cq_ci & q_mask; 2991ded2623SAlexander Kozyrev unsigned int elts_idx; 300fc3e1798SAlexander Kozyrev int ret; 3011ded2623SAlexander Kozyrev 3021ded2623SAlexander Kozyrev MLX5_ASSERT(rxq->sges_n == 0); 3031ded2623SAlexander Kozyrev MLX5_ASSERT(rxq->cqe_n == rxq->elts_n); 3041ded2623SAlexander Kozyrev cq = &(*rxq->cqes)[cq_idx]; 3051ded2623SAlexander Kozyrev rte_prefetch0(cq); 3061ded2623SAlexander Kozyrev rte_prefetch0(cq + 1); 3071ded2623SAlexander Kozyrev rte_prefetch0(cq + 2); 3081ded2623SAlexander Kozyrev rte_prefetch0(cq + 3); 3091ded2623SAlexander Kozyrev pkts_n = RTE_MIN(pkts_n, MLX5_VPMD_RX_MAX_BURST); 3101ded2623SAlexander Kozyrev mlx5_rx_replenish_bulk_mbuf(rxq); 3111ded2623SAlexander Kozyrev /* See if there're unreturned mbufs from compressed CQE. */ 3121ded2623SAlexander Kozyrev rcvd_pkt = rxq->decompressed; 3131ded2623SAlexander Kozyrev if (rcvd_pkt > 0) { 3141ded2623SAlexander Kozyrev rcvd_pkt = RTE_MIN(rcvd_pkt, pkts_n); 3151ded2623SAlexander Kozyrev rxq_copy_mbuf_v(&(*rxq->elts)[rxq->rq_pi & e_mask], 3161ded2623SAlexander Kozyrev pkts, rcvd_pkt); 3171ded2623SAlexander Kozyrev rxq->rq_pi += rcvd_pkt; 3181ded2623SAlexander Kozyrev rxq->decompressed -= rcvd_pkt; 3191ded2623SAlexander Kozyrev pkts += rcvd_pkt; 3201ded2623SAlexander Kozyrev } 3211ded2623SAlexander Kozyrev elts_idx = rxq->rq_pi & e_mask; 3221ded2623SAlexander Kozyrev elts = &(*rxq->elts)[elts_idx]; 3231ded2623SAlexander Kozyrev /* Not to overflow pkts array. */ 3241ded2623SAlexander Kozyrev pkts_n = RTE_ALIGN_FLOOR(pkts_n - rcvd_pkt, MLX5_VPMD_DESCS_PER_LOOP); 3251ded2623SAlexander Kozyrev /* Not to cross queue end. */ 3261ded2623SAlexander Kozyrev pkts_n = RTE_MIN(pkts_n, q_n - elts_idx); 3271ded2623SAlexander Kozyrev pkts_n = RTE_MIN(pkts_n, q_n - cq_idx); 328*73f7ae1dSGavin Hu /* Not to move past the allocated mbufs. */ 329*73f7ae1dSGavin Hu pkts_n = RTE_MIN(pkts_n, RTE_ALIGN_FLOOR(rxq->rq_ci - rxq->rq_pi, 330*73f7ae1dSGavin Hu MLX5_VPMD_DESCS_PER_LOOP)); 3311ded2623SAlexander Kozyrev if (!pkts_n) { 3321ded2623SAlexander Kozyrev *no_cq = !rcvd_pkt; 3331ded2623SAlexander Kozyrev return rcvd_pkt; 3341ded2623SAlexander Kozyrev } 3351ded2623SAlexander Kozyrev /* At this point, there shouldn't be any remaining packets. */ 3361ded2623SAlexander Kozyrev MLX5_ASSERT(rxq->decompressed == 0); 33790ec9b0dSAlexander Kozyrev /* Go directly to unzipping in case the first CQE is compressed. */ 33890ec9b0dSAlexander Kozyrev if (rxq->cqe_comp_layout) { 33990ec9b0dSAlexander Kozyrev ret = check_cqe_iteration(cq, rxq->cqe_n, rxq->cq_ci); 34090ec9b0dSAlexander Kozyrev if (ret == MLX5_CQE_STATUS_SW_OWN && 34190ec9b0dSAlexander Kozyrev (MLX5_CQE_FORMAT(cq->op_own) == MLX5_COMPRESSED)) { 34290ec9b0dSAlexander Kozyrev comp_idx = 0; 34390ec9b0dSAlexander Kozyrev goto decompress; 34490ec9b0dSAlexander Kozyrev } 34590ec9b0dSAlexander Kozyrev } 3461ded2623SAlexander Kozyrev /* Process all the CQEs */ 3471ded2623SAlexander Kozyrev nocmp_n = rxq_cq_process_v(rxq, cq, elts, pkts, pkts_n, err, &comp_idx); 3481ded2623SAlexander Kozyrev /* If no new CQE seen, return without updating cq_db. */ 3491ded2623SAlexander Kozyrev if (unlikely(!nocmp_n && comp_idx == MLX5_VPMD_DESCS_PER_LOOP)) { 3501ded2623SAlexander Kozyrev *no_cq = true; 3511ded2623SAlexander Kozyrev return rcvd_pkt; 3521ded2623SAlexander Kozyrev } 3531ded2623SAlexander Kozyrev /* Update the consumer indexes for non-compressed CQEs. */ 3541ded2623SAlexander Kozyrev MLX5_ASSERT(nocmp_n <= pkts_n); 3551ded2623SAlexander Kozyrev rxq->cq_ci += nocmp_n; 3561ded2623SAlexander Kozyrev rxq->rq_pi += nocmp_n; 3571ded2623SAlexander Kozyrev rcvd_pkt += nocmp_n; 358fc3e1798SAlexander Kozyrev /* Copy title packet for future compressed sessions. */ 359fc3e1798SAlexander Kozyrev if (rxq->cqe_comp_layout) { 36090ec9b0dSAlexander Kozyrev ret = check_cqe_iteration(cq, rxq->cqe_n, rxq->cq_ci); 36190ec9b0dSAlexander Kozyrev if (ret == MLX5_CQE_STATUS_SW_OWN && 36290ec9b0dSAlexander Kozyrev (MLX5_CQE_FORMAT(cq->op_own) != MLX5_COMPRESSED)) { 363fc3e1798SAlexander Kozyrev next = &(*rxq->cqes)[rxq->cq_ci & q_mask]; 364fc3e1798SAlexander Kozyrev ret = check_cqe_iteration(next, rxq->cqe_n, rxq->cq_ci); 36590ec9b0dSAlexander Kozyrev if (MLX5_CQE_FORMAT(next->op_own) == MLX5_COMPRESSED || 36690ec9b0dSAlexander Kozyrev ret != MLX5_CQE_STATUS_SW_OWN) 367fc3e1798SAlexander Kozyrev rte_memcpy(&rxq->title_pkt, elts[nocmp_n - 1], 368fc3e1798SAlexander Kozyrev sizeof(struct rte_mbuf)); 369fc3e1798SAlexander Kozyrev } 37090ec9b0dSAlexander Kozyrev } 37190ec9b0dSAlexander Kozyrev decompress: 3721ded2623SAlexander Kozyrev /* Decompress the last CQE if compressed. */ 3731ded2623SAlexander Kozyrev if (comp_idx < MLX5_VPMD_DESCS_PER_LOOP) { 3741ded2623SAlexander Kozyrev MLX5_ASSERT(comp_idx == (nocmp_n % MLX5_VPMD_DESCS_PER_LOOP)); 3751ded2623SAlexander Kozyrev rxq->decompressed = rxq_cq_decompress_v(rxq, &cq[nocmp_n], 37690ec9b0dSAlexander Kozyrev &elts[nocmp_n], true); 3771ded2623SAlexander Kozyrev rxq->cq_ci += rxq->decompressed; 3781ded2623SAlexander Kozyrev /* Return more packets if needed. */ 3791ded2623SAlexander Kozyrev if (nocmp_n < pkts_n) { 3801ded2623SAlexander Kozyrev uint16_t n = rxq->decompressed; 3811ded2623SAlexander Kozyrev 3821ded2623SAlexander Kozyrev n = RTE_MIN(n, pkts_n - nocmp_n); 3831ded2623SAlexander Kozyrev rxq_copy_mbuf_v(&(*rxq->elts)[rxq->rq_pi & e_mask], 3841ded2623SAlexander Kozyrev &pkts[nocmp_n], n); 3851ded2623SAlexander Kozyrev rxq->rq_pi += n; 3861ded2623SAlexander Kozyrev rcvd_pkt += n; 3871ded2623SAlexander Kozyrev rxq->decompressed -= n; 3881ded2623SAlexander Kozyrev } 3891ded2623SAlexander Kozyrev } 3901ded2623SAlexander Kozyrev *no_cq = !rcvd_pkt; 3911ded2623SAlexander Kozyrev return rcvd_pkt; 3921ded2623SAlexander Kozyrev } 3931ded2623SAlexander Kozyrev 3941ded2623SAlexander Kozyrev /** 395f0d2114fSYongseok Koh * DPDK callback for vectorized RX. 396f0d2114fSYongseok Koh * 397f0d2114fSYongseok Koh * @param dpdk_rxq 398f0d2114fSYongseok Koh * Generic pointer to RX queue structure. 399f0d2114fSYongseok Koh * @param[out] pkts 400f0d2114fSYongseok Koh * Array to store received packets. 401f0d2114fSYongseok Koh * @param pkts_n 402f0d2114fSYongseok Koh * Maximum number of packets in array. 403f0d2114fSYongseok Koh * 404f0d2114fSYongseok Koh * @return 405f0d2114fSYongseok Koh * Number of packets successfully received (<= pkts_n). 406f0d2114fSYongseok Koh */ 407f0d2114fSYongseok Koh uint16_t 408f0d2114fSYongseok Koh mlx5_rx_burst_vec(void *dpdk_rxq, struct rte_mbuf **pkts, uint16_t pkts_n) 409f0d2114fSYongseok Koh { 410f0d2114fSYongseok Koh struct mlx5_rxq_data *rxq = dpdk_rxq; 411c9cc554bSAlexander Kozyrev uint16_t nb_rx = 0; 412c9cc554bSAlexander Kozyrev uint16_t tn = 0; 413d27fb0deSYongseok Koh uint64_t err = 0; 414c9cc554bSAlexander Kozyrev bool no_cq = false; 415f0d2114fSYongseok Koh 416c9cc554bSAlexander Kozyrev do { 417633684e0SAlexander Kozyrev err = 0; 4180f20acbfSAlexander Kozyrev nb_rx = rxq_burst_v(rxq, pkts + tn, pkts_n - tn, 4190f20acbfSAlexander Kozyrev &err, &no_cq); 4200f20acbfSAlexander Kozyrev if (unlikely(err | rxq->err_state)) 4210f20acbfSAlexander Kozyrev nb_rx = rxq_handle_pending_error(rxq, pkts + tn, nb_rx); 4220f20acbfSAlexander Kozyrev tn += nb_rx; 4230f20acbfSAlexander Kozyrev if (unlikely(no_cq)) 4240f20acbfSAlexander Kozyrev break; 425633684e0SAlexander Kozyrev rte_io_wmb(); 426633684e0SAlexander Kozyrev *rxq->cq_db = rte_cpu_to_be_32(rxq->cq_ci); 4270f20acbfSAlexander Kozyrev } while (tn != pkts_n); 4280f20acbfSAlexander Kozyrev return tn; 4290f20acbfSAlexander Kozyrev } 4300f20acbfSAlexander Kozyrev 4310f20acbfSAlexander Kozyrev /** 4320f20acbfSAlexander Kozyrev * Receive burst of packets. An errored completion also consumes a mbuf, but the 4330f20acbfSAlexander Kozyrev * packet_type is set to be RTE_PTYPE_ALL_MASK. Marked mbufs should be freed 4340f20acbfSAlexander Kozyrev * before returning to application. 4350f20acbfSAlexander Kozyrev * 4360f20acbfSAlexander Kozyrev * @param rxq 4370f20acbfSAlexander Kozyrev * Pointer to RX queue structure. 4380f20acbfSAlexander Kozyrev * @param[out] pkts 4390f20acbfSAlexander Kozyrev * Array to store received packets. 4400f20acbfSAlexander Kozyrev * @param pkts_n 4410f20acbfSAlexander Kozyrev * Maximum number of packets in array. 4420f20acbfSAlexander Kozyrev * @param[out] err 4430f20acbfSAlexander Kozyrev * Pointer to a flag. Set non-zero value if pkts array has at least one error 4440f20acbfSAlexander Kozyrev * packet to handle. 4450f20acbfSAlexander Kozyrev * @param[out] no_cq 4460f20acbfSAlexander Kozyrev * Pointer to a boolean. Set true if no new CQE seen. 4470f20acbfSAlexander Kozyrev * 4480f20acbfSAlexander Kozyrev * @return 4490f20acbfSAlexander Kozyrev * Number of packets received including errors (<= pkts_n). 4500f20acbfSAlexander Kozyrev */ 4510f20acbfSAlexander Kozyrev static inline uint16_t 4520f20acbfSAlexander Kozyrev rxq_burst_mprq_v(struct mlx5_rxq_data *rxq, struct rte_mbuf **pkts, 4530f20acbfSAlexander Kozyrev uint16_t pkts_n, uint64_t *err, bool *no_cq) 4540f20acbfSAlexander Kozyrev { 4550f20acbfSAlexander Kozyrev const uint16_t q_n = 1 << rxq->cqe_n; 4560f20acbfSAlexander Kozyrev const uint16_t q_mask = q_n - 1; 4570f20acbfSAlexander Kozyrev const uint16_t wqe_n = 1 << rxq->elts_n; 4580947ed38SMichael Baum const uint32_t strd_n = RTE_BIT32(rxq->log_strd_num); 4590f20acbfSAlexander Kozyrev const uint32_t elts_n = wqe_n * strd_n; 4600f20acbfSAlexander Kozyrev const uint32_t elts_mask = elts_n - 1; 461fc3e1798SAlexander Kozyrev volatile struct mlx5_cqe *cq, *next; 4620f20acbfSAlexander Kozyrev struct rte_mbuf **elts; 4630f20acbfSAlexander Kozyrev uint64_t comp_idx = MLX5_VPMD_DESCS_PER_LOOP; 4640f20acbfSAlexander Kozyrev uint16_t nocmp_n = 0; 4650f20acbfSAlexander Kozyrev uint16_t rcvd_pkt = 0; 4660f20acbfSAlexander Kozyrev uint16_t cp_pkt = 0; 4670f20acbfSAlexander Kozyrev unsigned int cq_idx = rxq->cq_ci & q_mask; 4680f20acbfSAlexander Kozyrev unsigned int elts_idx; 469fc3e1798SAlexander Kozyrev int ret; 4700f20acbfSAlexander Kozyrev 4710f20acbfSAlexander Kozyrev MLX5_ASSERT(rxq->sges_n == 0); 4720f20acbfSAlexander Kozyrev cq = &(*rxq->cqes)[cq_idx]; 4730f20acbfSAlexander Kozyrev rte_prefetch0(cq); 4740f20acbfSAlexander Kozyrev rte_prefetch0(cq + 1); 4750f20acbfSAlexander Kozyrev rte_prefetch0(cq + 2); 4760f20acbfSAlexander Kozyrev rte_prefetch0(cq + 3); 4770f20acbfSAlexander Kozyrev pkts_n = RTE_MIN(pkts_n, MLX5_VPMD_RX_MAX_BURST); 4780f20acbfSAlexander Kozyrev mlx5_rx_mprq_replenish_bulk_mbuf(rxq); 479828274b7SAlexander Kozyrev /* Not to move past the allocated mbufs. */ 480828274b7SAlexander Kozyrev pkts_n = RTE_MIN(pkts_n, rxq->elts_ci - rxq->rq_pi); 4810f20acbfSAlexander Kozyrev /* See if there're unreturned mbufs from compressed CQE. */ 4820f20acbfSAlexander Kozyrev rcvd_pkt = rxq->decompressed; 4830f20acbfSAlexander Kozyrev if (rcvd_pkt > 0) { 4840f20acbfSAlexander Kozyrev rcvd_pkt = RTE_MIN(rcvd_pkt, pkts_n); 4850f20acbfSAlexander Kozyrev cp_pkt = rxq_copy_mprq_mbuf_v(rxq, pkts, rcvd_pkt); 4860f20acbfSAlexander Kozyrev rxq->decompressed -= rcvd_pkt; 4870f20acbfSAlexander Kozyrev pkts += cp_pkt; 4880f20acbfSAlexander Kozyrev } 4890f20acbfSAlexander Kozyrev elts_idx = rxq->rq_pi & elts_mask; 4900f20acbfSAlexander Kozyrev elts = &(*rxq->elts)[elts_idx]; 4910f20acbfSAlexander Kozyrev /* Not to overflow pkts array. */ 4920f20acbfSAlexander Kozyrev pkts_n = RTE_ALIGN_FLOOR(pkts_n - cp_pkt, MLX5_VPMD_DESCS_PER_LOOP); 4930f20acbfSAlexander Kozyrev /* Not to cross queue end. */ 4940f20acbfSAlexander Kozyrev pkts_n = RTE_MIN(pkts_n, elts_n - elts_idx); 4950f20acbfSAlexander Kozyrev pkts_n = RTE_MIN(pkts_n, q_n - cq_idx); 4960f20acbfSAlexander Kozyrev if (!pkts_n) { 4970f20acbfSAlexander Kozyrev *no_cq = !cp_pkt; 4980f20acbfSAlexander Kozyrev return cp_pkt; 4990f20acbfSAlexander Kozyrev } 5000f20acbfSAlexander Kozyrev /* At this point, there shouldn't be any remaining packets. */ 5010f20acbfSAlexander Kozyrev MLX5_ASSERT(rxq->decompressed == 0); 50290ec9b0dSAlexander Kozyrev /* Go directly to unzipping in case the first CQE is compressed. */ 50390ec9b0dSAlexander Kozyrev if (rxq->cqe_comp_layout) { 50490ec9b0dSAlexander Kozyrev ret = check_cqe_iteration(cq, rxq->cqe_n, rxq->cq_ci); 50590ec9b0dSAlexander Kozyrev if (ret == MLX5_CQE_STATUS_SW_OWN && 50690ec9b0dSAlexander Kozyrev (MLX5_CQE_FORMAT(cq->op_own) == MLX5_COMPRESSED)) { 50790ec9b0dSAlexander Kozyrev comp_idx = 0; 50890ec9b0dSAlexander Kozyrev goto decompress; 50990ec9b0dSAlexander Kozyrev } 51090ec9b0dSAlexander Kozyrev } 5110f20acbfSAlexander Kozyrev /* Process all the CQEs */ 5120f20acbfSAlexander Kozyrev nocmp_n = rxq_cq_process_v(rxq, cq, elts, pkts, pkts_n, err, &comp_idx); 5130f20acbfSAlexander Kozyrev /* If no new CQE seen, return without updating cq_db. */ 5140f20acbfSAlexander Kozyrev if (unlikely(!nocmp_n && comp_idx == MLX5_VPMD_DESCS_PER_LOOP)) { 5150f20acbfSAlexander Kozyrev *no_cq = true; 5160f20acbfSAlexander Kozyrev return cp_pkt; 5170f20acbfSAlexander Kozyrev } 5180f20acbfSAlexander Kozyrev /* Update the consumer indexes for non-compressed CQEs. */ 5190f20acbfSAlexander Kozyrev MLX5_ASSERT(nocmp_n <= pkts_n); 5200f20acbfSAlexander Kozyrev cp_pkt = rxq_copy_mprq_mbuf_v(rxq, pkts, nocmp_n); 5210f20acbfSAlexander Kozyrev rcvd_pkt += cp_pkt; 522fc3e1798SAlexander Kozyrev /* Copy title packet for future compressed sessions. */ 523fc3e1798SAlexander Kozyrev if (rxq->cqe_comp_layout) { 52490ec9b0dSAlexander Kozyrev ret = check_cqe_iteration(cq, rxq->cqe_n, rxq->cq_ci); 52590ec9b0dSAlexander Kozyrev if (ret == MLX5_CQE_STATUS_SW_OWN && 52690ec9b0dSAlexander Kozyrev (MLX5_CQE_FORMAT(cq->op_own) != MLX5_COMPRESSED)) { 527fc3e1798SAlexander Kozyrev next = &(*rxq->cqes)[rxq->cq_ci & q_mask]; 528fc3e1798SAlexander Kozyrev ret = check_cqe_iteration(next, rxq->cqe_n, rxq->cq_ci); 52990ec9b0dSAlexander Kozyrev if (MLX5_CQE_FORMAT(next->op_own) == MLX5_COMPRESSED || 53090ec9b0dSAlexander Kozyrev ret != MLX5_CQE_STATUS_SW_OWN) 531fc3e1798SAlexander Kozyrev rte_memcpy(&rxq->title_pkt, elts[nocmp_n - 1], 532fc3e1798SAlexander Kozyrev sizeof(struct rte_mbuf)); 533fc3e1798SAlexander Kozyrev } 53490ec9b0dSAlexander Kozyrev } 53590ec9b0dSAlexander Kozyrev decompress: 5360f20acbfSAlexander Kozyrev /* Decompress the last CQE if compressed. */ 5370f20acbfSAlexander Kozyrev if (comp_idx < MLX5_VPMD_DESCS_PER_LOOP) { 5380f20acbfSAlexander Kozyrev MLX5_ASSERT(comp_idx == (nocmp_n % MLX5_VPMD_DESCS_PER_LOOP)); 5390f20acbfSAlexander Kozyrev rxq->decompressed = rxq_cq_decompress_v(rxq, &cq[nocmp_n], 54090ec9b0dSAlexander Kozyrev &elts[nocmp_n], false); 5410f20acbfSAlexander Kozyrev /* Return more packets if needed. */ 5420f20acbfSAlexander Kozyrev if (nocmp_n < pkts_n) { 5430f20acbfSAlexander Kozyrev uint16_t n = rxq->decompressed; 5440f20acbfSAlexander Kozyrev 5450f20acbfSAlexander Kozyrev n = RTE_MIN(n, pkts_n - nocmp_n); 5460f20acbfSAlexander Kozyrev cp_pkt = rxq_copy_mprq_mbuf_v(rxq, &pkts[cp_pkt], n); 5470f20acbfSAlexander Kozyrev rcvd_pkt += cp_pkt; 5480f20acbfSAlexander Kozyrev rxq->decompressed -= n; 5490f20acbfSAlexander Kozyrev } 5500f20acbfSAlexander Kozyrev } 5510f20acbfSAlexander Kozyrev *no_cq = !rcvd_pkt; 5520f20acbfSAlexander Kozyrev return rcvd_pkt; 5530f20acbfSAlexander Kozyrev } 5540f20acbfSAlexander Kozyrev 5550f20acbfSAlexander Kozyrev /** 5560f20acbfSAlexander Kozyrev * DPDK callback for vectorized MPRQ RX. 5570f20acbfSAlexander Kozyrev * 5580f20acbfSAlexander Kozyrev * @param dpdk_rxq 5590f20acbfSAlexander Kozyrev * Generic pointer to RX queue structure. 5600f20acbfSAlexander Kozyrev * @param[out] pkts 5610f20acbfSAlexander Kozyrev * Array to store received packets. 5620f20acbfSAlexander Kozyrev * @param pkts_n 5630f20acbfSAlexander Kozyrev * Maximum number of packets in array. 5640f20acbfSAlexander Kozyrev * 5650f20acbfSAlexander Kozyrev * @return 5660f20acbfSAlexander Kozyrev * Number of packets successfully received (<= pkts_n). 5670f20acbfSAlexander Kozyrev */ 5680f20acbfSAlexander Kozyrev uint16_t 5690f20acbfSAlexander Kozyrev mlx5_rx_burst_mprq_vec(void *dpdk_rxq, struct rte_mbuf **pkts, uint16_t pkts_n) 5700f20acbfSAlexander Kozyrev { 5710f20acbfSAlexander Kozyrev struct mlx5_rxq_data *rxq = dpdk_rxq; 5720f20acbfSAlexander Kozyrev uint16_t nb_rx = 0; 5730f20acbfSAlexander Kozyrev uint16_t tn = 0; 5740f20acbfSAlexander Kozyrev uint64_t err = 0; 5750f20acbfSAlexander Kozyrev bool no_cq = false; 5760f20acbfSAlexander Kozyrev 5770f20acbfSAlexander Kozyrev do { 578633684e0SAlexander Kozyrev err = 0; 5790f20acbfSAlexander Kozyrev nb_rx = rxq_burst_mprq_v(rxq, pkts + tn, pkts_n - tn, 5800f20acbfSAlexander Kozyrev &err, &no_cq); 58188c07335SMatan Azrad if (unlikely(err | rxq->err_state)) 582c9cc554bSAlexander Kozyrev nb_rx = rxq_handle_pending_error(rxq, pkts + tn, nb_rx); 583c9cc554bSAlexander Kozyrev tn += nb_rx; 584c9cc554bSAlexander Kozyrev if (unlikely(no_cq)) 585c9cc554bSAlexander Kozyrev break; 586633684e0SAlexander Kozyrev rte_io_wmb(); 587633684e0SAlexander Kozyrev *rxq->cq_db = rte_cpu_to_be_32(rxq->cq_ci); 588c9cc554bSAlexander Kozyrev } while (tn != pkts_n); 589c9cc554bSAlexander Kozyrev return tn; 590f0d2114fSYongseok Koh } 591f0d2114fSYongseok Koh 592f0d2114fSYongseok Koh /** 593f0d2114fSYongseok Koh * Check a RX queue can support vectorized RX. 594f0d2114fSYongseok Koh * 595f0d2114fSYongseok Koh * @param rxq 596f0d2114fSYongseok Koh * Pointer to RX queue. 597f0d2114fSYongseok Koh * 598f0d2114fSYongseok Koh * @return 599f0d2114fSYongseok Koh * 1 if supported, negative errno value if not. 600f0d2114fSYongseok Koh */ 601ce6427ddSThomas Monjalon int __rte_cold 602af4f09f2SNélio Laranjeiro mlx5_rxq_check_vec_support(struct mlx5_rxq_data *rxq) 603f0d2114fSYongseok Koh { 604f0d2114fSYongseok Koh struct mlx5_rxq_ctrl *ctrl = 605f0d2114fSYongseok Koh container_of(rxq, struct mlx5_rxq_ctrl, rxq); 606f0d2114fSYongseok Koh 6075db77fefSXueming Li if (!RXQ_PORT(ctrl)->config.rx_vec_en || rxq->sges_n != 0) 608f0d2114fSYongseok Koh return -ENOTSUP; 60917ed314cSMatan Azrad if (rxq->lro) 61017ed314cSMatan Azrad return -ENOTSUP; 611f0d2114fSYongseok Koh return 1; 612f0d2114fSYongseok Koh } 613f0d2114fSYongseok Koh 614f0d2114fSYongseok Koh /** 615f0d2114fSYongseok Koh * Check a device can support vectorized RX. 616f0d2114fSYongseok Koh * 617af4f09f2SNélio Laranjeiro * @param dev 618af4f09f2SNélio Laranjeiro * Pointer to Ethernet device. 619f0d2114fSYongseok Koh * 620f0d2114fSYongseok Koh * @return 621f0d2114fSYongseok Koh * 1 if supported, negative errno value if not. 622f0d2114fSYongseok Koh */ 623ce6427ddSThomas Monjalon int __rte_cold 624af4f09f2SNélio Laranjeiro mlx5_check_vec_rx_support(struct rte_eth_dev *dev) 625f0d2114fSYongseok Koh { 626dbeba4cfSThomas Monjalon struct mlx5_priv *priv = dev->data->dev_private; 6270f006468SMichael Baum uint32_t i; 628f0d2114fSYongseok Koh 6292c5e0dd2SCiara Power if (rte_vect_get_max_simd_bitwidth() < RTE_VECT_SIMD_128) 6302c5e0dd2SCiara Power return -ENOTSUP; 6317fe24446SShahaf Shuler if (!priv->config.rx_vec_en) 632f0d2114fSYongseok Koh return -ENOTSUP; 633f0d2114fSYongseok Koh /* All the configured queues should support. */ 634f0d2114fSYongseok Koh for (i = 0; i < priv->rxqs_n; ++i) { 6355cf0707fSXueming Li struct mlx5_rxq_data *rxq_data = mlx5_rxq_data_get(dev, i); 636f0d2114fSYongseok Koh 6375cf0707fSXueming Li if (!rxq_data) 638f0d2114fSYongseok Koh continue; 6395cf0707fSXueming Li if (mlx5_rxq_check_vec_support(rxq_data) < 0) 640f0d2114fSYongseok Koh break; 641f0d2114fSYongseok Koh } 642f0d2114fSYongseok Koh if (i != priv->rxqs_n) 643f0d2114fSYongseok Koh return -ENOTSUP; 644f0d2114fSYongseok Koh return 1; 645f0d2114fSYongseok Koh } 646