1 /* SPDX-License-Identifier: BSD-3-Clause 2 * Copyright 2017 6WIND S.A. 3 * Copyright 2017 Mellanox Technologies, Ltd 4 */ 5 6 #include <stdint.h> 7 #include <string.h> 8 #include <stdlib.h> 9 10 #include <rte_mbuf.h> 11 #include <rte_mempool.h> 12 #include <rte_prefetch.h> 13 14 #include <mlx5_glue.h> 15 #include <mlx5_prm.h> 16 17 #include "mlx5_defs.h" 18 #include "mlx5.h" 19 #include "mlx5_utils.h" 20 #include "mlx5_rxtx.h" 21 #include "mlx5_rxtx_vec.h" 22 #include "mlx5_autoconf.h" 23 24 #if defined RTE_ARCH_X86_64 25 #include "mlx5_rxtx_vec_sse.h" 26 #elif defined RTE_ARCH_ARM64 27 #include "mlx5_rxtx_vec_neon.h" 28 #elif defined RTE_ARCH_PPC_64 29 #include "mlx5_rxtx_vec_altivec.h" 30 #else 31 #error "This should not be compiled if SIMD instructions are not supported." 32 #endif 33 34 /** 35 * Skip error packets. 36 * 37 * @param rxq 38 * Pointer to RX queue structure. 39 * @param[out] pkts 40 * Array to store received packets. 41 * @param pkts_n 42 * Maximum number of packets in array. 43 * 44 * @return 45 * Number of packets successfully received (<= pkts_n). 46 */ 47 static uint16_t 48 rxq_handle_pending_error(struct mlx5_rxq_data *rxq, struct rte_mbuf **pkts, 49 uint16_t pkts_n) 50 { 51 uint16_t n = 0; 52 unsigned int i; 53 #ifdef MLX5_PMD_SOFT_COUNTERS 54 uint32_t err_bytes = 0; 55 #endif 56 57 for (i = 0; i < pkts_n; ++i) { 58 struct rte_mbuf *pkt = pkts[i]; 59 60 if (pkt->packet_type == RTE_PTYPE_ALL_MASK || rxq->err_state) { 61 #ifdef MLX5_PMD_SOFT_COUNTERS 62 err_bytes += PKT_LEN(pkt); 63 #endif 64 rte_pktmbuf_free_seg(pkt); 65 } else { 66 pkts[n++] = pkt; 67 } 68 } 69 rxq->stats.idropped += (pkts_n - n); 70 #ifdef MLX5_PMD_SOFT_COUNTERS 71 /* Correct counters of errored completions. */ 72 rxq->stats.ipackets -= (pkts_n - n); 73 rxq->stats.ibytes -= err_bytes; 74 #endif 75 mlx5_rx_err_handle(rxq, 1); 76 return n; 77 } 78 79 /** 80 * DPDK callback for vectorized RX. 81 * 82 * @param dpdk_rxq 83 * Generic pointer to RX queue structure. 84 * @param[out] pkts 85 * Array to store received packets. 86 * @param pkts_n 87 * Maximum number of packets in array. 88 * 89 * @return 90 * Number of packets successfully received (<= pkts_n). 91 */ 92 uint16_t 93 mlx5_rx_burst_vec(void *dpdk_rxq, struct rte_mbuf **pkts, uint16_t pkts_n) 94 { 95 struct mlx5_rxq_data *rxq = dpdk_rxq; 96 uint16_t nb_rx = 0; 97 uint16_t tn = 0; 98 uint64_t err = 0; 99 bool no_cq = false; 100 101 do { 102 nb_rx = rxq_burst_v(rxq, pkts + tn, pkts_n - tn, &err, &no_cq); 103 if (unlikely(err | rxq->err_state)) 104 nb_rx = rxq_handle_pending_error(rxq, pkts + tn, nb_rx); 105 tn += nb_rx; 106 if (unlikely(no_cq)) 107 break; 108 } while (tn != pkts_n); 109 return tn; 110 } 111 112 /** 113 * Check a RX queue can support vectorized RX. 114 * 115 * @param rxq 116 * Pointer to RX queue. 117 * 118 * @return 119 * 1 if supported, negative errno value if not. 120 */ 121 int __rte_cold 122 mlx5_rxq_check_vec_support(struct mlx5_rxq_data *rxq) 123 { 124 struct mlx5_rxq_ctrl *ctrl = 125 container_of(rxq, struct mlx5_rxq_ctrl, rxq); 126 127 if (mlx5_mprq_enabled(ETH_DEV(ctrl->priv))) 128 return -ENOTSUP; 129 if (!ctrl->priv->config.rx_vec_en || rxq->sges_n != 0) 130 return -ENOTSUP; 131 if (rxq->lro) 132 return -ENOTSUP; 133 return 1; 134 } 135 136 /** 137 * Check a device can support vectorized RX. 138 * 139 * @param dev 140 * Pointer to Ethernet device. 141 * 142 * @return 143 * 1 if supported, negative errno value if not. 144 */ 145 int __rte_cold 146 mlx5_check_vec_rx_support(struct rte_eth_dev *dev) 147 { 148 struct mlx5_priv *priv = dev->data->dev_private; 149 uint32_t i; 150 151 if (!priv->config.rx_vec_en) 152 return -ENOTSUP; 153 if (mlx5_mprq_enabled(dev)) 154 return -ENOTSUP; 155 /* All the configured queues should support. */ 156 for (i = 0; i < priv->rxqs_n; ++i) { 157 struct mlx5_rxq_data *rxq = (*priv->rxqs)[i]; 158 159 if (!rxq) 160 continue; 161 if (mlx5_rxq_check_vec_support(rxq) < 0) 162 break; 163 } 164 if (i != priv->rxqs_n) 165 return -ENOTSUP; 166 return 1; 167 } 168