xref: /dpdk/drivers/net/mlx5/mlx5_rxtx_vec.c (revision 2c5e0dd21f3aa7e352f8842b46f807f65ffda37f)
18fd92a66SOlivier Matz /* SPDX-License-Identifier: BSD-3-Clause
2f0d2114fSYongseok Koh  * Copyright 2017 6WIND S.A.
35feecc57SShahaf Shuler  * Copyright 2017 Mellanox Technologies, Ltd
4f0d2114fSYongseok Koh  */
5f0d2114fSYongseok Koh 
6f0d2114fSYongseok Koh #include <stdint.h>
7f0d2114fSYongseok Koh #include <string.h>
8f0d2114fSYongseok Koh #include <stdlib.h>
9f0d2114fSYongseok Koh 
10f0d2114fSYongseok Koh #include <rte_mbuf.h>
11f0d2114fSYongseok Koh #include <rte_mempool.h>
12f0d2114fSYongseok Koh #include <rte_prefetch.h>
13*2c5e0dd2SCiara Power #include <rte_vect.h>
14f0d2114fSYongseok Koh 
159d60f545SOphir Munk #include <mlx5_glue.h>
167b4f1e6bSMatan Azrad #include <mlx5_prm.h>
177b4f1e6bSMatan Azrad 
187b4f1e6bSMatan Azrad #include "mlx5_defs.h"
19f0d2114fSYongseok Koh #include "mlx5.h"
20f0d2114fSYongseok Koh #include "mlx5_utils.h"
21f0d2114fSYongseok Koh #include "mlx5_rxtx.h"
225bfc9fc1SYongseok Koh #include "mlx5_rxtx_vec.h"
23f0d2114fSYongseok Koh #include "mlx5_autoconf.h"
24f0d2114fSYongseok Koh 
25570acdb1SYongseok Koh #if defined RTE_ARCH_X86_64
263c2ddbd4SYongseok Koh #include "mlx5_rxtx_vec_sse.h"
27570acdb1SYongseok Koh #elif defined RTE_ARCH_ARM64
28570acdb1SYongseok Koh #include "mlx5_rxtx_vec_neon.h"
292e542da7SDavid Christensen #elif defined RTE_ARCH_PPC_64
302e542da7SDavid Christensen #include "mlx5_rxtx_vec_altivec.h"
313c2ddbd4SYongseok Koh #else
323c2ddbd4SYongseok Koh #error "This should not be compiled if SIMD instructions are not supported."
33f0d2114fSYongseok Koh #endif
34f0d2114fSYongseok Koh 
35f0d2114fSYongseok Koh /**
36f0d2114fSYongseok Koh  * Skip error packets.
37f0d2114fSYongseok Koh  *
38f0d2114fSYongseok Koh  * @param rxq
39f0d2114fSYongseok Koh  *   Pointer to RX queue structure.
40f0d2114fSYongseok Koh  * @param[out] pkts
41f0d2114fSYongseok Koh  *   Array to store received packets.
42f0d2114fSYongseok Koh  * @param pkts_n
43f0d2114fSYongseok Koh  *   Maximum number of packets in array.
44f0d2114fSYongseok Koh  *
45f0d2114fSYongseok Koh  * @return
46f0d2114fSYongseok Koh  *   Number of packets successfully received (<= pkts_n).
47f0d2114fSYongseok Koh  */
48f0d2114fSYongseok Koh static uint16_t
49f0d2114fSYongseok Koh rxq_handle_pending_error(struct mlx5_rxq_data *rxq, struct rte_mbuf **pkts,
50f0d2114fSYongseok Koh 			 uint16_t pkts_n)
51f0d2114fSYongseok Koh {
52f0d2114fSYongseok Koh 	uint16_t n = 0;
53f0d2114fSYongseok Koh 	unsigned int i;
54f0d2114fSYongseok Koh #ifdef MLX5_PMD_SOFT_COUNTERS
55f0d2114fSYongseok Koh 	uint32_t err_bytes = 0;
56f0d2114fSYongseok Koh #endif
57f0d2114fSYongseok Koh 
58f0d2114fSYongseok Koh 	for (i = 0; i < pkts_n; ++i) {
59f0d2114fSYongseok Koh 		struct rte_mbuf *pkt = pkts[i];
60f0d2114fSYongseok Koh 
6188c07335SMatan Azrad 		if (pkt->packet_type == RTE_PTYPE_ALL_MASK || rxq->err_state) {
62f0d2114fSYongseok Koh #ifdef MLX5_PMD_SOFT_COUNTERS
63f0d2114fSYongseok Koh 			err_bytes += PKT_LEN(pkt);
64f0d2114fSYongseok Koh #endif
65f0d2114fSYongseok Koh 			rte_pktmbuf_free_seg(pkt);
66f0d2114fSYongseok Koh 		} else {
67f0d2114fSYongseok Koh 			pkts[n++] = pkt;
68f0d2114fSYongseok Koh 		}
69f0d2114fSYongseok Koh 	}
70f0d2114fSYongseok Koh 	rxq->stats.idropped += (pkts_n - n);
71f0d2114fSYongseok Koh #ifdef MLX5_PMD_SOFT_COUNTERS
72f0d2114fSYongseok Koh 	/* Correct counters of errored completions. */
73f0d2114fSYongseok Koh 	rxq->stats.ipackets -= (pkts_n - n);
74f0d2114fSYongseok Koh 	rxq->stats.ibytes -= err_bytes;
75f0d2114fSYongseok Koh #endif
7688c07335SMatan Azrad 	mlx5_rx_err_handle(rxq, 1);
77f0d2114fSYongseok Koh 	return n;
78f0d2114fSYongseok Koh }
79f0d2114fSYongseok Koh 
80f0d2114fSYongseok Koh /**
81f0d2114fSYongseok Koh  * DPDK callback for vectorized RX.
82f0d2114fSYongseok Koh  *
83f0d2114fSYongseok Koh  * @param dpdk_rxq
84f0d2114fSYongseok Koh  *   Generic pointer to RX queue structure.
85f0d2114fSYongseok Koh  * @param[out] pkts
86f0d2114fSYongseok Koh  *   Array to store received packets.
87f0d2114fSYongseok Koh  * @param pkts_n
88f0d2114fSYongseok Koh  *   Maximum number of packets in array.
89f0d2114fSYongseok Koh  *
90f0d2114fSYongseok Koh  * @return
91f0d2114fSYongseok Koh  *   Number of packets successfully received (<= pkts_n).
92f0d2114fSYongseok Koh  */
93f0d2114fSYongseok Koh uint16_t
94f0d2114fSYongseok Koh mlx5_rx_burst_vec(void *dpdk_rxq, struct rte_mbuf **pkts, uint16_t pkts_n)
95f0d2114fSYongseok Koh {
96f0d2114fSYongseok Koh 	struct mlx5_rxq_data *rxq = dpdk_rxq;
97c9cc554bSAlexander Kozyrev 	uint16_t nb_rx = 0;
98c9cc554bSAlexander Kozyrev 	uint16_t tn = 0;
99d27fb0deSYongseok Koh 	uint64_t err = 0;
100c9cc554bSAlexander Kozyrev 	bool no_cq = false;
101f0d2114fSYongseok Koh 
102c9cc554bSAlexander Kozyrev 	do {
103c9cc554bSAlexander Kozyrev 		nb_rx = rxq_burst_v(rxq, pkts + tn, pkts_n - tn, &err, &no_cq);
10488c07335SMatan Azrad 		if (unlikely(err | rxq->err_state))
105c9cc554bSAlexander Kozyrev 			nb_rx = rxq_handle_pending_error(rxq, pkts + tn, nb_rx);
106c9cc554bSAlexander Kozyrev 		tn += nb_rx;
107c9cc554bSAlexander Kozyrev 		if (unlikely(no_cq))
108c9cc554bSAlexander Kozyrev 			break;
109c9cc554bSAlexander Kozyrev 	} while (tn != pkts_n);
110c9cc554bSAlexander Kozyrev 	return tn;
111f0d2114fSYongseok Koh }
112f0d2114fSYongseok Koh 
113f0d2114fSYongseok Koh /**
114f0d2114fSYongseok Koh  * Check a RX queue can support vectorized RX.
115f0d2114fSYongseok Koh  *
116f0d2114fSYongseok Koh  * @param rxq
117f0d2114fSYongseok Koh  *   Pointer to RX queue.
118f0d2114fSYongseok Koh  *
119f0d2114fSYongseok Koh  * @return
120f0d2114fSYongseok Koh  *   1 if supported, negative errno value if not.
121f0d2114fSYongseok Koh  */
122ce6427ddSThomas Monjalon int __rte_cold
123af4f09f2SNélio Laranjeiro mlx5_rxq_check_vec_support(struct mlx5_rxq_data *rxq)
124f0d2114fSYongseok Koh {
125f0d2114fSYongseok Koh 	struct mlx5_rxq_ctrl *ctrl =
126f0d2114fSYongseok Koh 		container_of(rxq, struct mlx5_rxq_ctrl, rxq);
127f0d2114fSYongseok Koh 
1287d6bf6b8SYongseok Koh 	if (mlx5_mprq_enabled(ETH_DEV(ctrl->priv)))
1297d6bf6b8SYongseok Koh 		return -ENOTSUP;
1307fe24446SShahaf Shuler 	if (!ctrl->priv->config.rx_vec_en || rxq->sges_n != 0)
131f0d2114fSYongseok Koh 		return -ENOTSUP;
13217ed314cSMatan Azrad 	if (rxq->lro)
13317ed314cSMatan Azrad 		return -ENOTSUP;
134f0d2114fSYongseok Koh 	return 1;
135f0d2114fSYongseok Koh }
136f0d2114fSYongseok Koh 
137f0d2114fSYongseok Koh /**
138f0d2114fSYongseok Koh  * Check a device can support vectorized RX.
139f0d2114fSYongseok Koh  *
140af4f09f2SNélio Laranjeiro  * @param dev
141af4f09f2SNélio Laranjeiro  *   Pointer to Ethernet device.
142f0d2114fSYongseok Koh  *
143f0d2114fSYongseok Koh  * @return
144f0d2114fSYongseok Koh  *   1 if supported, negative errno value if not.
145f0d2114fSYongseok Koh  */
146ce6427ddSThomas Monjalon int __rte_cold
147af4f09f2SNélio Laranjeiro mlx5_check_vec_rx_support(struct rte_eth_dev *dev)
148f0d2114fSYongseok Koh {
149dbeba4cfSThomas Monjalon 	struct mlx5_priv *priv = dev->data->dev_private;
1500f006468SMichael Baum 	uint32_t i;
151f0d2114fSYongseok Koh 
152*2c5e0dd2SCiara Power 	if (rte_vect_get_max_simd_bitwidth() < RTE_VECT_SIMD_128)
153*2c5e0dd2SCiara Power 		return -ENOTSUP;
1547fe24446SShahaf Shuler 	if (!priv->config.rx_vec_en)
155f0d2114fSYongseok Koh 		return -ENOTSUP;
1567d6bf6b8SYongseok Koh 	if (mlx5_mprq_enabled(dev))
1577d6bf6b8SYongseok Koh 		return -ENOTSUP;
158f0d2114fSYongseok Koh 	/* All the configured queues should support. */
159f0d2114fSYongseok Koh 	for (i = 0; i < priv->rxqs_n; ++i) {
160f0d2114fSYongseok Koh 		struct mlx5_rxq_data *rxq = (*priv->rxqs)[i];
161f0d2114fSYongseok Koh 
162f0d2114fSYongseok Koh 		if (!rxq)
163f0d2114fSYongseok Koh 			continue;
164af4f09f2SNélio Laranjeiro 		if (mlx5_rxq_check_vec_support(rxq) < 0)
165f0d2114fSYongseok Koh 			break;
166f0d2114fSYongseok Koh 	}
167f0d2114fSYongseok Koh 	if (i != priv->rxqs_n)
168f0d2114fSYongseok Koh 		return -ENOTSUP;
169f0d2114fSYongseok Koh 	return 1;
170f0d2114fSYongseok Koh }
171