xref: /dpdk/drivers/net/mlx5/mlx5_rxtx_vec.c (revision 0947ed380febad9d6f794b6f4e9aa9137860a06e)
18fd92a66SOlivier Matz /* SPDX-License-Identifier: BSD-3-Clause
2f0d2114fSYongseok Koh  * Copyright 2017 6WIND S.A.
35feecc57SShahaf Shuler  * Copyright 2017 Mellanox Technologies, Ltd
4f0d2114fSYongseok Koh  */
5f0d2114fSYongseok Koh 
6f0d2114fSYongseok Koh #include <stdint.h>
7f0d2114fSYongseok Koh #include <string.h>
8f0d2114fSYongseok Koh #include <stdlib.h>
9f0d2114fSYongseok Koh 
10f0d2114fSYongseok Koh #include <rte_mbuf.h>
11f0d2114fSYongseok Koh #include <rte_mempool.h>
12f0d2114fSYongseok Koh #include <rte_prefetch.h>
132c5e0dd2SCiara Power #include <rte_vect.h>
14f0d2114fSYongseok Koh 
159d60f545SOphir Munk #include <mlx5_glue.h>
167b4f1e6bSMatan Azrad #include <mlx5_prm.h>
177b4f1e6bSMatan Azrad 
187b4f1e6bSMatan Azrad #include "mlx5_defs.h"
19f0d2114fSYongseok Koh #include "mlx5.h"
20f0d2114fSYongseok Koh #include "mlx5_utils.h"
21f0d2114fSYongseok Koh #include "mlx5_rxtx.h"
22151cbe3aSMichael Baum #include "mlx5_rx.h"
235bfc9fc1SYongseok Koh #include "mlx5_rxtx_vec.h"
24f0d2114fSYongseok Koh #include "mlx5_autoconf.h"
25f0d2114fSYongseok Koh 
26570acdb1SYongseok Koh #if defined RTE_ARCH_X86_64
273c2ddbd4SYongseok Koh #include "mlx5_rxtx_vec_sse.h"
28570acdb1SYongseok Koh #elif defined RTE_ARCH_ARM64
29570acdb1SYongseok Koh #include "mlx5_rxtx_vec_neon.h"
302e542da7SDavid Christensen #elif defined RTE_ARCH_PPC_64
312e542da7SDavid Christensen #include "mlx5_rxtx_vec_altivec.h"
323c2ddbd4SYongseok Koh #else
333c2ddbd4SYongseok Koh #error "This should not be compiled if SIMD instructions are not supported."
34f0d2114fSYongseok Koh #endif
35f0d2114fSYongseok Koh 
36f0d2114fSYongseok Koh /**
37f0d2114fSYongseok Koh  * Skip error packets.
38f0d2114fSYongseok Koh  *
39f0d2114fSYongseok Koh  * @param rxq
40f0d2114fSYongseok Koh  *   Pointer to RX queue structure.
41f0d2114fSYongseok Koh  * @param[out] pkts
42f0d2114fSYongseok Koh  *   Array to store received packets.
43f0d2114fSYongseok Koh  * @param pkts_n
44f0d2114fSYongseok Koh  *   Maximum number of packets in array.
45f0d2114fSYongseok Koh  *
46f0d2114fSYongseok Koh  * @return
47f0d2114fSYongseok Koh  *   Number of packets successfully received (<= pkts_n).
48f0d2114fSYongseok Koh  */
49f0d2114fSYongseok Koh static uint16_t
50f0d2114fSYongseok Koh rxq_handle_pending_error(struct mlx5_rxq_data *rxq, struct rte_mbuf **pkts,
51f0d2114fSYongseok Koh 			 uint16_t pkts_n)
52f0d2114fSYongseok Koh {
53f0d2114fSYongseok Koh 	uint16_t n = 0;
54f0d2114fSYongseok Koh 	unsigned int i;
55f0d2114fSYongseok Koh #ifdef MLX5_PMD_SOFT_COUNTERS
56f0d2114fSYongseok Koh 	uint32_t err_bytes = 0;
57f0d2114fSYongseok Koh #endif
58f0d2114fSYongseok Koh 
59f0d2114fSYongseok Koh 	for (i = 0; i < pkts_n; ++i) {
60f0d2114fSYongseok Koh 		struct rte_mbuf *pkt = pkts[i];
61f0d2114fSYongseok Koh 
6288c07335SMatan Azrad 		if (pkt->packet_type == RTE_PTYPE_ALL_MASK || rxq->err_state) {
63f0d2114fSYongseok Koh #ifdef MLX5_PMD_SOFT_COUNTERS
64f0d2114fSYongseok Koh 			err_bytes += PKT_LEN(pkt);
65f0d2114fSYongseok Koh #endif
66f0d2114fSYongseok Koh 			rte_pktmbuf_free_seg(pkt);
67f0d2114fSYongseok Koh 		} else {
68f0d2114fSYongseok Koh 			pkts[n++] = pkt;
69f0d2114fSYongseok Koh 		}
70f0d2114fSYongseok Koh 	}
71f0d2114fSYongseok Koh 	rxq->stats.idropped += (pkts_n - n);
72f0d2114fSYongseok Koh #ifdef MLX5_PMD_SOFT_COUNTERS
73f0d2114fSYongseok Koh 	/* Correct counters of errored completions. */
74f0d2114fSYongseok Koh 	rxq->stats.ipackets -= (pkts_n - n);
75f0d2114fSYongseok Koh 	rxq->stats.ibytes -= err_bytes;
76f0d2114fSYongseok Koh #endif
7788c07335SMatan Azrad 	mlx5_rx_err_handle(rxq, 1);
78f0d2114fSYongseok Koh 	return n;
79f0d2114fSYongseok Koh }
80f0d2114fSYongseok Koh 
81f0d2114fSYongseok Koh /**
820f20acbfSAlexander Kozyrev  * Replenish buffers for RX in bulk.
830f20acbfSAlexander Kozyrev  *
840f20acbfSAlexander Kozyrev  * @param rxq
850f20acbfSAlexander Kozyrev  *   Pointer to RX queue structure.
860f20acbfSAlexander Kozyrev  */
870f20acbfSAlexander Kozyrev static inline void
880f20acbfSAlexander Kozyrev mlx5_rx_replenish_bulk_mbuf(struct mlx5_rxq_data *rxq)
890f20acbfSAlexander Kozyrev {
900f20acbfSAlexander Kozyrev 	const uint16_t q_n = 1 << rxq->elts_n;
910f20acbfSAlexander Kozyrev 	const uint16_t q_mask = q_n - 1;
920f20acbfSAlexander Kozyrev 	uint16_t n = q_n - (rxq->rq_ci - rxq->rq_pi);
930f20acbfSAlexander Kozyrev 	uint16_t elts_idx = rxq->rq_ci & q_mask;
940f20acbfSAlexander Kozyrev 	struct rte_mbuf **elts = &(*rxq->elts)[elts_idx];
950f20acbfSAlexander Kozyrev 	volatile struct mlx5_wqe_data_seg *wq =
960f20acbfSAlexander Kozyrev 		&((volatile struct mlx5_wqe_data_seg *)rxq->wqes)[elts_idx];
970f20acbfSAlexander Kozyrev 	unsigned int i;
980f20acbfSAlexander Kozyrev 
990f20acbfSAlexander Kozyrev 	if (n >= rxq->rq_repl_thresh) {
1000f20acbfSAlexander Kozyrev 		MLX5_ASSERT(n >= MLX5_VPMD_RXQ_RPLNSH_THRESH(q_n));
1010f20acbfSAlexander Kozyrev 		MLX5_ASSERT(MLX5_VPMD_RXQ_RPLNSH_THRESH(q_n) >
1020f20acbfSAlexander Kozyrev 			    MLX5_VPMD_DESCS_PER_LOOP);
1030f20acbfSAlexander Kozyrev 		/* Not to cross queue end. */
1040f20acbfSAlexander Kozyrev 		n = RTE_MIN(n - MLX5_VPMD_DESCS_PER_LOOP, q_n - elts_idx);
1050f20acbfSAlexander Kozyrev 		if (rte_mempool_get_bulk(rxq->mp, (void *)elts, n) < 0) {
1060f20acbfSAlexander Kozyrev 			rxq->stats.rx_nombuf += n;
1070f20acbfSAlexander Kozyrev 			return;
1080f20acbfSAlexander Kozyrev 		}
1091db288f9SRuifeng Wang 		if (unlikely(mlx5_mr_btree_len(&rxq->mr_ctrl.cache_bh) > 1)) {
1100f20acbfSAlexander Kozyrev 			for (i = 0; i < n; ++i) {
1110f20acbfSAlexander Kozyrev 				/*
1120f20acbfSAlexander Kozyrev 				 * In order to support the mbufs with external attached
1130f20acbfSAlexander Kozyrev 				 * data buffer we should use the buf_addr pointer
1140f20acbfSAlexander Kozyrev 				 * instead of rte_mbuf_buf_addr(). It touches the mbuf
1150f20acbfSAlexander Kozyrev 				 * itself and may impact the performance.
1160f20acbfSAlexander Kozyrev 				 */
1171db288f9SRuifeng Wang 				void *buf_addr = elts[i]->buf_addr;
1181db288f9SRuifeng Wang 
1190f20acbfSAlexander Kozyrev 				wq[i].addr = rte_cpu_to_be_64((uintptr_t)buf_addr +
1200f20acbfSAlexander Kozyrev 							      RTE_PKTMBUF_HEADROOM);
1210f20acbfSAlexander Kozyrev 				wq[i].lkey = mlx5_rx_mb2mr(rxq, elts[i]);
1220f20acbfSAlexander Kozyrev 			}
1231db288f9SRuifeng Wang 		} else {
1241db288f9SRuifeng Wang 			for (i = 0; i < n; ++i) {
1251db288f9SRuifeng Wang 				void *buf_addr = elts[i]->buf_addr;
1261db288f9SRuifeng Wang 
1271db288f9SRuifeng Wang 				wq[i].addr = rte_cpu_to_be_64((uintptr_t)buf_addr +
1281db288f9SRuifeng Wang 							      RTE_PKTMBUF_HEADROOM);
1291db288f9SRuifeng Wang 			}
1301db288f9SRuifeng Wang 		}
1310f20acbfSAlexander Kozyrev 		rxq->rq_ci += n;
1320f20acbfSAlexander Kozyrev 		/* Prevent overflowing into consumed mbufs. */
1330f20acbfSAlexander Kozyrev 		elts_idx = rxq->rq_ci & q_mask;
1340f20acbfSAlexander Kozyrev 		for (i = 0; i < MLX5_VPMD_DESCS_PER_LOOP; ++i)
1350f20acbfSAlexander Kozyrev 			(*rxq->elts)[elts_idx + i] = &rxq->fake_mbuf;
1360f20acbfSAlexander Kozyrev 		rte_io_wmb();
1370f20acbfSAlexander Kozyrev 		*rxq->rq_db = rte_cpu_to_be_32(rxq->rq_ci);
1380f20acbfSAlexander Kozyrev 	}
1390f20acbfSAlexander Kozyrev }
1400f20acbfSAlexander Kozyrev 
1410f20acbfSAlexander Kozyrev /**
1420f20acbfSAlexander Kozyrev  * Replenish buffers for MPRQ RX in bulk.
1430f20acbfSAlexander Kozyrev  *
1440f20acbfSAlexander Kozyrev  * @param rxq
1450f20acbfSAlexander Kozyrev  *   Pointer to RX queue structure.
1460f20acbfSAlexander Kozyrev  */
1470f20acbfSAlexander Kozyrev static inline void
1480f20acbfSAlexander Kozyrev mlx5_rx_mprq_replenish_bulk_mbuf(struct mlx5_rxq_data *rxq)
1490f20acbfSAlexander Kozyrev {
1500f20acbfSAlexander Kozyrev 	const uint16_t wqe_n = 1 << rxq->elts_n;
151*0947ed38SMichael Baum 	const uint32_t strd_n = RTE_BIT32(rxq->log_strd_num);
1520f20acbfSAlexander Kozyrev 	const uint32_t elts_n = wqe_n * strd_n;
1530f20acbfSAlexander Kozyrev 	const uint32_t wqe_mask = elts_n - 1;
1545fc2e5c2SAlexander Kozyrev 	uint32_t n = elts_n - (rxq->elts_ci - rxq->rq_pi);
1550f20acbfSAlexander Kozyrev 	uint32_t elts_idx = rxq->elts_ci & wqe_mask;
1560f20acbfSAlexander Kozyrev 	struct rte_mbuf **elts = &(*rxq->elts)[elts_idx];
1575fc2e5c2SAlexander Kozyrev 	unsigned int i;
1580f20acbfSAlexander Kozyrev 
1595fc2e5c2SAlexander Kozyrev 	if (n >= rxq->rq_repl_thresh &&
160acc87479SAlexander Kozyrev 	    rxq->elts_ci - rxq->rq_pi <=
161acc87479SAlexander Kozyrev 	    rxq->rq_repl_thresh + MLX5_VPMD_RX_MAX_BURST) {
1625fc2e5c2SAlexander Kozyrev 		MLX5_ASSERT(n >= MLX5_VPMD_RXQ_RPLNSH_THRESH(elts_n));
1630f20acbfSAlexander Kozyrev 		MLX5_ASSERT(MLX5_VPMD_RXQ_RPLNSH_THRESH(elts_n) >
1640f20acbfSAlexander Kozyrev 			     MLX5_VPMD_DESCS_PER_LOOP);
1655c687643SAlexander Kozyrev 		/* Not to cross queue end. */
1665fc2e5c2SAlexander Kozyrev 		n = RTE_MIN(n - MLX5_VPMD_DESCS_PER_LOOP, elts_n - elts_idx);
1675fc2e5c2SAlexander Kozyrev 		/* Limit replenish number to threshold value. */
1685fc2e5c2SAlexander Kozyrev 		n = RTE_MIN(n, rxq->rq_repl_thresh);
1690f20acbfSAlexander Kozyrev 		if (rte_mempool_get_bulk(rxq->mp, (void *)elts, n) < 0) {
1700f20acbfSAlexander Kozyrev 			rxq->stats.rx_nombuf += n;
1710f20acbfSAlexander Kozyrev 			return;
1720f20acbfSAlexander Kozyrev 		}
1730f20acbfSAlexander Kozyrev 		rxq->elts_ci += n;
1745fc2e5c2SAlexander Kozyrev 		/* Prevent overflowing into consumed mbufs. */
1755fc2e5c2SAlexander Kozyrev 		elts_idx = rxq->elts_ci & wqe_mask;
1765fc2e5c2SAlexander Kozyrev 		for (i = 0; i < MLX5_VPMD_DESCS_PER_LOOP; ++i)
1775fc2e5c2SAlexander Kozyrev 			(*rxq->elts)[elts_idx + i] = &rxq->fake_mbuf;
1780f20acbfSAlexander Kozyrev 	}
1790f20acbfSAlexander Kozyrev }
1800f20acbfSAlexander Kozyrev 
1810f20acbfSAlexander Kozyrev /**
1820f20acbfSAlexander Kozyrev  * Copy or attach MPRQ buffers to RX SW ring.
1830f20acbfSAlexander Kozyrev  *
1840f20acbfSAlexander Kozyrev  * @param rxq
1850f20acbfSAlexander Kozyrev  *   Pointer to RX queue structure.
1860f20acbfSAlexander Kozyrev  * @param pkts
1870f20acbfSAlexander Kozyrev  *   Pointer to array of packets to be stored.
1880f20acbfSAlexander Kozyrev  * @param pkts_n
1890f20acbfSAlexander Kozyrev  *   Number of packets to be stored.
1900f20acbfSAlexander Kozyrev  *
1910f20acbfSAlexander Kozyrev  * @return
1920f20acbfSAlexander Kozyrev  *   Number of packets successfully copied/attached (<= pkts_n).
1930f20acbfSAlexander Kozyrev  */
1940f20acbfSAlexander Kozyrev static inline uint16_t
1950f20acbfSAlexander Kozyrev rxq_copy_mprq_mbuf_v(struct mlx5_rxq_data *rxq,
1960f20acbfSAlexander Kozyrev 		     struct rte_mbuf **pkts, uint16_t pkts_n)
1970f20acbfSAlexander Kozyrev {
1980f20acbfSAlexander Kozyrev 	const uint16_t wqe_n = 1 << rxq->elts_n;
1990f20acbfSAlexander Kozyrev 	const uint16_t wqe_mask = wqe_n - 1;
200*0947ed38SMichael Baum 	const uint16_t strd_sz = RTE_BIT32(rxq->log_strd_sz);
201*0947ed38SMichael Baum 	const uint32_t strd_n = RTE_BIT32(rxq->log_strd_num);
2020f20acbfSAlexander Kozyrev 	const uint32_t elts_n = wqe_n * strd_n;
2030f20acbfSAlexander Kozyrev 	const uint32_t elts_mask = elts_n - 1;
2040f20acbfSAlexander Kozyrev 	uint32_t elts_idx = rxq->rq_pi & elts_mask;
2050f20acbfSAlexander Kozyrev 	struct rte_mbuf **elts = &(*rxq->elts)[elts_idx];
2060f20acbfSAlexander Kozyrev 	uint32_t rq_ci = rxq->rq_ci;
2070f20acbfSAlexander Kozyrev 	struct mlx5_mprq_buf *buf = (*rxq->mprq_bufs)[rq_ci & wqe_mask];
2080f20acbfSAlexander Kozyrev 	uint16_t copied = 0;
2090f20acbfSAlexander Kozyrev 	uint16_t i = 0;
2100f20acbfSAlexander Kozyrev 
2110f20acbfSAlexander Kozyrev 	for (i = 0; i < pkts_n; ++i) {
2120f20acbfSAlexander Kozyrev 		uint16_t strd_cnt;
2130f20acbfSAlexander Kozyrev 		enum mlx5_rqx_code rxq_code;
2140f20acbfSAlexander Kozyrev 
2150f20acbfSAlexander Kozyrev 		if (rxq->consumed_strd == strd_n) {
2160f20acbfSAlexander Kozyrev 			/* Replace WQE if the buffer is still in use. */
2170f20acbfSAlexander Kozyrev 			mprq_buf_replace(rxq, rq_ci & wqe_mask);
2180f20acbfSAlexander Kozyrev 			/* Advance to the next WQE. */
2190f20acbfSAlexander Kozyrev 			rxq->consumed_strd = 0;
2200f20acbfSAlexander Kozyrev 			rq_ci++;
2210f20acbfSAlexander Kozyrev 			buf = (*rxq->mprq_bufs)[rq_ci & wqe_mask];
2220f20acbfSAlexander Kozyrev 		}
2230f20acbfSAlexander Kozyrev 
2240f20acbfSAlexander Kozyrev 		if (!elts[i]->pkt_len) {
2250f20acbfSAlexander Kozyrev 			rxq->consumed_strd = strd_n;
2260f20acbfSAlexander Kozyrev 			rte_pktmbuf_free_seg(elts[i]);
2270f20acbfSAlexander Kozyrev #ifdef MLX5_PMD_SOFT_COUNTERS
2280f20acbfSAlexander Kozyrev 			rxq->stats.ipackets -= 1;
2290f20acbfSAlexander Kozyrev #endif
2300f20acbfSAlexander Kozyrev 			continue;
2310f20acbfSAlexander Kozyrev 		}
2320f20acbfSAlexander Kozyrev 		strd_cnt = (elts[i]->pkt_len / strd_sz) +
2330f20acbfSAlexander Kozyrev 			   ((elts[i]->pkt_len % strd_sz) ? 1 : 0);
2340f20acbfSAlexander Kozyrev 		rxq_code = mprq_buf_to_pkt(rxq, elts[i], elts[i]->pkt_len,
2350f20acbfSAlexander Kozyrev 					   buf, rxq->consumed_strd, strd_cnt);
2360f20acbfSAlexander Kozyrev 		rxq->consumed_strd += strd_cnt;
2370f20acbfSAlexander Kozyrev 		if (unlikely(rxq_code != MLX5_RXQ_CODE_EXIT)) {
2380f20acbfSAlexander Kozyrev 			rte_pktmbuf_free_seg(elts[i]);
2390f20acbfSAlexander Kozyrev #ifdef MLX5_PMD_SOFT_COUNTERS
2400f20acbfSAlexander Kozyrev 			rxq->stats.ipackets -= 1;
2410f20acbfSAlexander Kozyrev 			rxq->stats.ibytes -= elts[i]->pkt_len;
2420f20acbfSAlexander Kozyrev #endif
2430f20acbfSAlexander Kozyrev 			if (rxq_code == MLX5_RXQ_CODE_NOMBUF) {
2440f20acbfSAlexander Kozyrev 				++rxq->stats.rx_nombuf;
2450f20acbfSAlexander Kozyrev 				break;
2460f20acbfSAlexander Kozyrev 			}
2470f20acbfSAlexander Kozyrev 			if (rxq_code == MLX5_RXQ_CODE_DROPPED) {
2480f20acbfSAlexander Kozyrev 				++rxq->stats.idropped;
2490f20acbfSAlexander Kozyrev 				continue;
2500f20acbfSAlexander Kozyrev 			}
2510f20acbfSAlexander Kozyrev 		}
2520f20acbfSAlexander Kozyrev 		pkts[copied++] = elts[i];
2530f20acbfSAlexander Kozyrev 	}
2540f20acbfSAlexander Kozyrev 	rxq->rq_pi += i;
2550f20acbfSAlexander Kozyrev 	rxq->cq_ci += i;
2560f20acbfSAlexander Kozyrev 	rte_io_wmb();
2570f20acbfSAlexander Kozyrev 	*rxq->cq_db = rte_cpu_to_be_32(rxq->cq_ci);
2580f20acbfSAlexander Kozyrev 	if (rq_ci != rxq->rq_ci) {
2590f20acbfSAlexander Kozyrev 		rxq->rq_ci = rq_ci;
2600f20acbfSAlexander Kozyrev 		rte_io_wmb();
2610f20acbfSAlexander Kozyrev 		*rxq->rq_db = rte_cpu_to_be_32(rxq->rq_ci);
2620f20acbfSAlexander Kozyrev 	}
2630f20acbfSAlexander Kozyrev 	return copied;
2640f20acbfSAlexander Kozyrev }
2650f20acbfSAlexander Kozyrev 
2660f20acbfSAlexander Kozyrev /**
2671ded2623SAlexander Kozyrev  * Receive burst of packets. An errored completion also consumes a mbuf, but the
2681ded2623SAlexander Kozyrev  * packet_type is set to be RTE_PTYPE_ALL_MASK. Marked mbufs should be freed
2691ded2623SAlexander Kozyrev  * before returning to application.
2701ded2623SAlexander Kozyrev  *
2711ded2623SAlexander Kozyrev  * @param rxq
2721ded2623SAlexander Kozyrev  *   Pointer to RX queue structure.
2731ded2623SAlexander Kozyrev  * @param[out] pkts
2741ded2623SAlexander Kozyrev  *   Array to store received packets.
2751ded2623SAlexander Kozyrev  * @param pkts_n
2761ded2623SAlexander Kozyrev  *   Maximum number of packets in array.
2771ded2623SAlexander Kozyrev  * @param[out] err
2781ded2623SAlexander Kozyrev  *   Pointer to a flag. Set non-zero value if pkts array has at least one error
2791ded2623SAlexander Kozyrev  *   packet to handle.
2801ded2623SAlexander Kozyrev  * @param[out] no_cq
2811ded2623SAlexander Kozyrev  *   Pointer to a boolean. Set true if no new CQE seen.
2821ded2623SAlexander Kozyrev  *
2831ded2623SAlexander Kozyrev  * @return
2841ded2623SAlexander Kozyrev  *   Number of packets received including errors (<= pkts_n).
2851ded2623SAlexander Kozyrev  */
2861ded2623SAlexander Kozyrev static inline uint16_t
2871ded2623SAlexander Kozyrev rxq_burst_v(struct mlx5_rxq_data *rxq, struct rte_mbuf **pkts,
2881ded2623SAlexander Kozyrev 	    uint16_t pkts_n, uint64_t *err, bool *no_cq)
2891ded2623SAlexander Kozyrev {
2901ded2623SAlexander Kozyrev 	const uint16_t q_n = 1 << rxq->cqe_n;
2911ded2623SAlexander Kozyrev 	const uint16_t q_mask = q_n - 1;
2921ded2623SAlexander Kozyrev 	const uint16_t e_n = 1 << rxq->elts_n;
2931ded2623SAlexander Kozyrev 	const uint16_t e_mask = e_n - 1;
2941ded2623SAlexander Kozyrev 	volatile struct mlx5_cqe *cq;
2951ded2623SAlexander Kozyrev 	struct rte_mbuf **elts;
2961ded2623SAlexander Kozyrev 	uint64_t comp_idx = MLX5_VPMD_DESCS_PER_LOOP;
2971ded2623SAlexander Kozyrev 	uint16_t nocmp_n = 0;
2981ded2623SAlexander Kozyrev 	uint16_t rcvd_pkt = 0;
2991ded2623SAlexander Kozyrev 	unsigned int cq_idx = rxq->cq_ci & q_mask;
3001ded2623SAlexander Kozyrev 	unsigned int elts_idx;
3011ded2623SAlexander Kozyrev 
3021ded2623SAlexander Kozyrev 	MLX5_ASSERT(rxq->sges_n == 0);
3031ded2623SAlexander Kozyrev 	MLX5_ASSERT(rxq->cqe_n == rxq->elts_n);
3041ded2623SAlexander Kozyrev 	cq = &(*rxq->cqes)[cq_idx];
3051ded2623SAlexander Kozyrev 	rte_prefetch0(cq);
3061ded2623SAlexander Kozyrev 	rte_prefetch0(cq + 1);
3071ded2623SAlexander Kozyrev 	rte_prefetch0(cq + 2);
3081ded2623SAlexander Kozyrev 	rte_prefetch0(cq + 3);
3091ded2623SAlexander Kozyrev 	pkts_n = RTE_MIN(pkts_n, MLX5_VPMD_RX_MAX_BURST);
3101ded2623SAlexander Kozyrev 	mlx5_rx_replenish_bulk_mbuf(rxq);
3111ded2623SAlexander Kozyrev 	/* See if there're unreturned mbufs from compressed CQE. */
3121ded2623SAlexander Kozyrev 	rcvd_pkt = rxq->decompressed;
3131ded2623SAlexander Kozyrev 	if (rcvd_pkt > 0) {
3141ded2623SAlexander Kozyrev 		rcvd_pkt = RTE_MIN(rcvd_pkt, pkts_n);
3151ded2623SAlexander Kozyrev 		rxq_copy_mbuf_v(&(*rxq->elts)[rxq->rq_pi & e_mask],
3161ded2623SAlexander Kozyrev 				pkts, rcvd_pkt);
3171ded2623SAlexander Kozyrev 		rxq->rq_pi += rcvd_pkt;
3181ded2623SAlexander Kozyrev 		rxq->decompressed -= rcvd_pkt;
3191ded2623SAlexander Kozyrev 		pkts += rcvd_pkt;
3201ded2623SAlexander Kozyrev 	}
3211ded2623SAlexander Kozyrev 	elts_idx = rxq->rq_pi & e_mask;
3221ded2623SAlexander Kozyrev 	elts = &(*rxq->elts)[elts_idx];
3231ded2623SAlexander Kozyrev 	/* Not to overflow pkts array. */
3241ded2623SAlexander Kozyrev 	pkts_n = RTE_ALIGN_FLOOR(pkts_n - rcvd_pkt, MLX5_VPMD_DESCS_PER_LOOP);
3251ded2623SAlexander Kozyrev 	/* Not to cross queue end. */
3261ded2623SAlexander Kozyrev 	pkts_n = RTE_MIN(pkts_n, q_n - elts_idx);
3271ded2623SAlexander Kozyrev 	pkts_n = RTE_MIN(pkts_n, q_n - cq_idx);
3281ded2623SAlexander Kozyrev 	if (!pkts_n) {
3291ded2623SAlexander Kozyrev 		*no_cq = !rcvd_pkt;
3301ded2623SAlexander Kozyrev 		return rcvd_pkt;
3311ded2623SAlexander Kozyrev 	}
3321ded2623SAlexander Kozyrev 	/* At this point, there shouldn't be any remaining packets. */
3331ded2623SAlexander Kozyrev 	MLX5_ASSERT(rxq->decompressed == 0);
3341ded2623SAlexander Kozyrev 	/* Process all the CQEs */
3351ded2623SAlexander Kozyrev 	nocmp_n = rxq_cq_process_v(rxq, cq, elts, pkts, pkts_n, err, &comp_idx);
3361ded2623SAlexander Kozyrev 	/* If no new CQE seen, return without updating cq_db. */
3371ded2623SAlexander Kozyrev 	if (unlikely(!nocmp_n && comp_idx == MLX5_VPMD_DESCS_PER_LOOP)) {
3381ded2623SAlexander Kozyrev 		*no_cq = true;
3391ded2623SAlexander Kozyrev 		return rcvd_pkt;
3401ded2623SAlexander Kozyrev 	}
3411ded2623SAlexander Kozyrev 	/* Update the consumer indexes for non-compressed CQEs. */
3421ded2623SAlexander Kozyrev 	MLX5_ASSERT(nocmp_n <= pkts_n);
3431ded2623SAlexander Kozyrev 	rxq->cq_ci += nocmp_n;
3441ded2623SAlexander Kozyrev 	rxq->rq_pi += nocmp_n;
3451ded2623SAlexander Kozyrev 	rcvd_pkt += nocmp_n;
3461ded2623SAlexander Kozyrev 	/* Decompress the last CQE if compressed. */
3471ded2623SAlexander Kozyrev 	if (comp_idx < MLX5_VPMD_DESCS_PER_LOOP) {
3481ded2623SAlexander Kozyrev 		MLX5_ASSERT(comp_idx == (nocmp_n % MLX5_VPMD_DESCS_PER_LOOP));
3491ded2623SAlexander Kozyrev 		rxq->decompressed = rxq_cq_decompress_v(rxq, &cq[nocmp_n],
3501ded2623SAlexander Kozyrev 							&elts[nocmp_n]);
3511ded2623SAlexander Kozyrev 		rxq->cq_ci += rxq->decompressed;
3521ded2623SAlexander Kozyrev 		/* Return more packets if needed. */
3531ded2623SAlexander Kozyrev 		if (nocmp_n < pkts_n) {
3541ded2623SAlexander Kozyrev 			uint16_t n = rxq->decompressed;
3551ded2623SAlexander Kozyrev 
3561ded2623SAlexander Kozyrev 			n = RTE_MIN(n, pkts_n - nocmp_n);
3571ded2623SAlexander Kozyrev 			rxq_copy_mbuf_v(&(*rxq->elts)[rxq->rq_pi & e_mask],
3581ded2623SAlexander Kozyrev 					&pkts[nocmp_n], n);
3591ded2623SAlexander Kozyrev 			rxq->rq_pi += n;
3601ded2623SAlexander Kozyrev 			rcvd_pkt += n;
3611ded2623SAlexander Kozyrev 			rxq->decompressed -= n;
3621ded2623SAlexander Kozyrev 		}
3631ded2623SAlexander Kozyrev 	}
3641ded2623SAlexander Kozyrev 	rte_io_wmb();
3651ded2623SAlexander Kozyrev 	*rxq->cq_db = rte_cpu_to_be_32(rxq->cq_ci);
3661ded2623SAlexander Kozyrev 	*no_cq = !rcvd_pkt;
3671ded2623SAlexander Kozyrev 	return rcvd_pkt;
3681ded2623SAlexander Kozyrev }
3691ded2623SAlexander Kozyrev 
3701ded2623SAlexander Kozyrev /**
371f0d2114fSYongseok Koh  * DPDK callback for vectorized RX.
372f0d2114fSYongseok Koh  *
373f0d2114fSYongseok Koh  * @param dpdk_rxq
374f0d2114fSYongseok Koh  *   Generic pointer to RX queue structure.
375f0d2114fSYongseok Koh  * @param[out] pkts
376f0d2114fSYongseok Koh  *   Array to store received packets.
377f0d2114fSYongseok Koh  * @param pkts_n
378f0d2114fSYongseok Koh  *   Maximum number of packets in array.
379f0d2114fSYongseok Koh  *
380f0d2114fSYongseok Koh  * @return
381f0d2114fSYongseok Koh  *   Number of packets successfully received (<= pkts_n).
382f0d2114fSYongseok Koh  */
383f0d2114fSYongseok Koh uint16_t
384f0d2114fSYongseok Koh mlx5_rx_burst_vec(void *dpdk_rxq, struct rte_mbuf **pkts, uint16_t pkts_n)
385f0d2114fSYongseok Koh {
386f0d2114fSYongseok Koh 	struct mlx5_rxq_data *rxq = dpdk_rxq;
387c9cc554bSAlexander Kozyrev 	uint16_t nb_rx = 0;
388c9cc554bSAlexander Kozyrev 	uint16_t tn = 0;
389d27fb0deSYongseok Koh 	uint64_t err = 0;
390c9cc554bSAlexander Kozyrev 	bool no_cq = false;
391f0d2114fSYongseok Koh 
392c9cc554bSAlexander Kozyrev 	do {
3930f20acbfSAlexander Kozyrev 		nb_rx = rxq_burst_v(rxq, pkts + tn, pkts_n - tn,
3940f20acbfSAlexander Kozyrev 				    &err, &no_cq);
3950f20acbfSAlexander Kozyrev 		if (unlikely(err | rxq->err_state))
3960f20acbfSAlexander Kozyrev 			nb_rx = rxq_handle_pending_error(rxq, pkts + tn, nb_rx);
3970f20acbfSAlexander Kozyrev 		tn += nb_rx;
3980f20acbfSAlexander Kozyrev 		if (unlikely(no_cq))
3990f20acbfSAlexander Kozyrev 			break;
4000f20acbfSAlexander Kozyrev 	} while (tn != pkts_n);
4010f20acbfSAlexander Kozyrev 	return tn;
4020f20acbfSAlexander Kozyrev }
4030f20acbfSAlexander Kozyrev 
4040f20acbfSAlexander Kozyrev /**
4050f20acbfSAlexander Kozyrev  * Receive burst of packets. An errored completion also consumes a mbuf, but the
4060f20acbfSAlexander Kozyrev  * packet_type is set to be RTE_PTYPE_ALL_MASK. Marked mbufs should be freed
4070f20acbfSAlexander Kozyrev  * before returning to application.
4080f20acbfSAlexander Kozyrev  *
4090f20acbfSAlexander Kozyrev  * @param rxq
4100f20acbfSAlexander Kozyrev  *   Pointer to RX queue structure.
4110f20acbfSAlexander Kozyrev  * @param[out] pkts
4120f20acbfSAlexander Kozyrev  *   Array to store received packets.
4130f20acbfSAlexander Kozyrev  * @param pkts_n
4140f20acbfSAlexander Kozyrev  *   Maximum number of packets in array.
4150f20acbfSAlexander Kozyrev  * @param[out] err
4160f20acbfSAlexander Kozyrev  *   Pointer to a flag. Set non-zero value if pkts array has at least one error
4170f20acbfSAlexander Kozyrev  *   packet to handle.
4180f20acbfSAlexander Kozyrev  * @param[out] no_cq
4190f20acbfSAlexander Kozyrev  *   Pointer to a boolean. Set true if no new CQE seen.
4200f20acbfSAlexander Kozyrev  *
4210f20acbfSAlexander Kozyrev  * @return
4220f20acbfSAlexander Kozyrev  *   Number of packets received including errors (<= pkts_n).
4230f20acbfSAlexander Kozyrev  */
4240f20acbfSAlexander Kozyrev static inline uint16_t
4250f20acbfSAlexander Kozyrev rxq_burst_mprq_v(struct mlx5_rxq_data *rxq, struct rte_mbuf **pkts,
4260f20acbfSAlexander Kozyrev 		 uint16_t pkts_n, uint64_t *err, bool *no_cq)
4270f20acbfSAlexander Kozyrev {
4280f20acbfSAlexander Kozyrev 	const uint16_t q_n = 1 << rxq->cqe_n;
4290f20acbfSAlexander Kozyrev 	const uint16_t q_mask = q_n - 1;
4300f20acbfSAlexander Kozyrev 	const uint16_t wqe_n = 1 << rxq->elts_n;
431*0947ed38SMichael Baum 	const uint32_t strd_n = RTE_BIT32(rxq->log_strd_num);
4320f20acbfSAlexander Kozyrev 	const uint32_t elts_n = wqe_n * strd_n;
4330f20acbfSAlexander Kozyrev 	const uint32_t elts_mask = elts_n - 1;
4340f20acbfSAlexander Kozyrev 	volatile struct mlx5_cqe *cq;
4350f20acbfSAlexander Kozyrev 	struct rte_mbuf **elts;
4360f20acbfSAlexander Kozyrev 	uint64_t comp_idx = MLX5_VPMD_DESCS_PER_LOOP;
4370f20acbfSAlexander Kozyrev 	uint16_t nocmp_n = 0;
4380f20acbfSAlexander Kozyrev 	uint16_t rcvd_pkt = 0;
4390f20acbfSAlexander Kozyrev 	uint16_t cp_pkt = 0;
4400f20acbfSAlexander Kozyrev 	unsigned int cq_idx = rxq->cq_ci & q_mask;
4410f20acbfSAlexander Kozyrev 	unsigned int elts_idx;
4420f20acbfSAlexander Kozyrev 
4430f20acbfSAlexander Kozyrev 	MLX5_ASSERT(rxq->sges_n == 0);
4440f20acbfSAlexander Kozyrev 	cq = &(*rxq->cqes)[cq_idx];
4450f20acbfSAlexander Kozyrev 	rte_prefetch0(cq);
4460f20acbfSAlexander Kozyrev 	rte_prefetch0(cq + 1);
4470f20acbfSAlexander Kozyrev 	rte_prefetch0(cq + 2);
4480f20acbfSAlexander Kozyrev 	rte_prefetch0(cq + 3);
4490f20acbfSAlexander Kozyrev 	pkts_n = RTE_MIN(pkts_n, MLX5_VPMD_RX_MAX_BURST);
4500f20acbfSAlexander Kozyrev 	mlx5_rx_mprq_replenish_bulk_mbuf(rxq);
451828274b7SAlexander Kozyrev 	/* Not to move past the allocated mbufs. */
452828274b7SAlexander Kozyrev 	pkts_n = RTE_MIN(pkts_n, rxq->elts_ci - rxq->rq_pi);
4530f20acbfSAlexander Kozyrev 	/* See if there're unreturned mbufs from compressed CQE. */
4540f20acbfSAlexander Kozyrev 	rcvd_pkt = rxq->decompressed;
4550f20acbfSAlexander Kozyrev 	if (rcvd_pkt > 0) {
4560f20acbfSAlexander Kozyrev 		rcvd_pkt = RTE_MIN(rcvd_pkt, pkts_n);
4570f20acbfSAlexander Kozyrev 		cp_pkt = rxq_copy_mprq_mbuf_v(rxq, pkts, rcvd_pkt);
4580f20acbfSAlexander Kozyrev 		rxq->decompressed -= rcvd_pkt;
4590f20acbfSAlexander Kozyrev 		pkts += cp_pkt;
4600f20acbfSAlexander Kozyrev 	}
4610f20acbfSAlexander Kozyrev 	elts_idx = rxq->rq_pi & elts_mask;
4620f20acbfSAlexander Kozyrev 	elts = &(*rxq->elts)[elts_idx];
4630f20acbfSAlexander Kozyrev 	/* Not to overflow pkts array. */
4640f20acbfSAlexander Kozyrev 	pkts_n = RTE_ALIGN_FLOOR(pkts_n - cp_pkt, MLX5_VPMD_DESCS_PER_LOOP);
4650f20acbfSAlexander Kozyrev 	/* Not to cross queue end. */
4660f20acbfSAlexander Kozyrev 	pkts_n = RTE_MIN(pkts_n, elts_n - elts_idx);
4670f20acbfSAlexander Kozyrev 	pkts_n = RTE_MIN(pkts_n, q_n - cq_idx);
4680f20acbfSAlexander Kozyrev 	if (!pkts_n) {
4690f20acbfSAlexander Kozyrev 		*no_cq = !cp_pkt;
4700f20acbfSAlexander Kozyrev 		return cp_pkt;
4710f20acbfSAlexander Kozyrev 	}
4720f20acbfSAlexander Kozyrev 	/* At this point, there shouldn't be any remaining packets. */
4730f20acbfSAlexander Kozyrev 	MLX5_ASSERT(rxq->decompressed == 0);
4740f20acbfSAlexander Kozyrev 	/* Process all the CQEs */
4750f20acbfSAlexander Kozyrev 	nocmp_n = rxq_cq_process_v(rxq, cq, elts, pkts, pkts_n, err, &comp_idx);
4760f20acbfSAlexander Kozyrev 	/* If no new CQE seen, return without updating cq_db. */
4770f20acbfSAlexander Kozyrev 	if (unlikely(!nocmp_n && comp_idx == MLX5_VPMD_DESCS_PER_LOOP)) {
4780f20acbfSAlexander Kozyrev 		*no_cq = true;
4790f20acbfSAlexander Kozyrev 		return cp_pkt;
4800f20acbfSAlexander Kozyrev 	}
4810f20acbfSAlexander Kozyrev 	/* Update the consumer indexes for non-compressed CQEs. */
4820f20acbfSAlexander Kozyrev 	MLX5_ASSERT(nocmp_n <= pkts_n);
4830f20acbfSAlexander Kozyrev 	cp_pkt = rxq_copy_mprq_mbuf_v(rxq, pkts, nocmp_n);
4840f20acbfSAlexander Kozyrev 	rcvd_pkt += cp_pkt;
4850f20acbfSAlexander Kozyrev 	/* Decompress the last CQE if compressed. */
4860f20acbfSAlexander Kozyrev 	if (comp_idx < MLX5_VPMD_DESCS_PER_LOOP) {
4870f20acbfSAlexander Kozyrev 		MLX5_ASSERT(comp_idx == (nocmp_n % MLX5_VPMD_DESCS_PER_LOOP));
4880f20acbfSAlexander Kozyrev 		rxq->decompressed = rxq_cq_decompress_v(rxq, &cq[nocmp_n],
4890f20acbfSAlexander Kozyrev 							&elts[nocmp_n]);
4900f20acbfSAlexander Kozyrev 		/* Return more packets if needed. */
4910f20acbfSAlexander Kozyrev 		if (nocmp_n < pkts_n) {
4920f20acbfSAlexander Kozyrev 			uint16_t n = rxq->decompressed;
4930f20acbfSAlexander Kozyrev 
4940f20acbfSAlexander Kozyrev 			n = RTE_MIN(n, pkts_n - nocmp_n);
4950f20acbfSAlexander Kozyrev 			cp_pkt = rxq_copy_mprq_mbuf_v(rxq, &pkts[cp_pkt], n);
4960f20acbfSAlexander Kozyrev 			rcvd_pkt += cp_pkt;
4970f20acbfSAlexander Kozyrev 			rxq->decompressed -= n;
4980f20acbfSAlexander Kozyrev 		}
4990f20acbfSAlexander Kozyrev 	}
5000f20acbfSAlexander Kozyrev 	*no_cq = !rcvd_pkt;
5010f20acbfSAlexander Kozyrev 	return rcvd_pkt;
5020f20acbfSAlexander Kozyrev }
5030f20acbfSAlexander Kozyrev 
5040f20acbfSAlexander Kozyrev /**
5050f20acbfSAlexander Kozyrev  * DPDK callback for vectorized MPRQ RX.
5060f20acbfSAlexander Kozyrev  *
5070f20acbfSAlexander Kozyrev  * @param dpdk_rxq
5080f20acbfSAlexander Kozyrev  *   Generic pointer to RX queue structure.
5090f20acbfSAlexander Kozyrev  * @param[out] pkts
5100f20acbfSAlexander Kozyrev  *   Array to store received packets.
5110f20acbfSAlexander Kozyrev  * @param pkts_n
5120f20acbfSAlexander Kozyrev  *   Maximum number of packets in array.
5130f20acbfSAlexander Kozyrev  *
5140f20acbfSAlexander Kozyrev  * @return
5150f20acbfSAlexander Kozyrev  *   Number of packets successfully received (<= pkts_n).
5160f20acbfSAlexander Kozyrev  */
5170f20acbfSAlexander Kozyrev uint16_t
5180f20acbfSAlexander Kozyrev mlx5_rx_burst_mprq_vec(void *dpdk_rxq, struct rte_mbuf **pkts, uint16_t pkts_n)
5190f20acbfSAlexander Kozyrev {
5200f20acbfSAlexander Kozyrev 	struct mlx5_rxq_data *rxq = dpdk_rxq;
5210f20acbfSAlexander Kozyrev 	uint16_t nb_rx = 0;
5220f20acbfSAlexander Kozyrev 	uint16_t tn = 0;
5230f20acbfSAlexander Kozyrev 	uint64_t err = 0;
5240f20acbfSAlexander Kozyrev 	bool no_cq = false;
5250f20acbfSAlexander Kozyrev 
5260f20acbfSAlexander Kozyrev 	do {
5270f20acbfSAlexander Kozyrev 		nb_rx = rxq_burst_mprq_v(rxq, pkts + tn, pkts_n - tn,
5280f20acbfSAlexander Kozyrev 					 &err, &no_cq);
52988c07335SMatan Azrad 		if (unlikely(err | rxq->err_state))
530c9cc554bSAlexander Kozyrev 			nb_rx = rxq_handle_pending_error(rxq, pkts + tn, nb_rx);
531c9cc554bSAlexander Kozyrev 		tn += nb_rx;
532c9cc554bSAlexander Kozyrev 		if (unlikely(no_cq))
533c9cc554bSAlexander Kozyrev 			break;
534c9cc554bSAlexander Kozyrev 	} while (tn != pkts_n);
535c9cc554bSAlexander Kozyrev 	return tn;
536f0d2114fSYongseok Koh }
537f0d2114fSYongseok Koh 
538f0d2114fSYongseok Koh /**
539f0d2114fSYongseok Koh  * Check a RX queue can support vectorized RX.
540f0d2114fSYongseok Koh  *
541f0d2114fSYongseok Koh  * @param rxq
542f0d2114fSYongseok Koh  *   Pointer to RX queue.
543f0d2114fSYongseok Koh  *
544f0d2114fSYongseok Koh  * @return
545f0d2114fSYongseok Koh  *   1 if supported, negative errno value if not.
546f0d2114fSYongseok Koh  */
547ce6427ddSThomas Monjalon int __rte_cold
548af4f09f2SNélio Laranjeiro mlx5_rxq_check_vec_support(struct mlx5_rxq_data *rxq)
549f0d2114fSYongseok Koh {
550f0d2114fSYongseok Koh 	struct mlx5_rxq_ctrl *ctrl =
551f0d2114fSYongseok Koh 		container_of(rxq, struct mlx5_rxq_ctrl, rxq);
552f0d2114fSYongseok Koh 
5535db77fefSXueming Li 	if (!RXQ_PORT(ctrl)->config.rx_vec_en || rxq->sges_n != 0)
554f0d2114fSYongseok Koh 		return -ENOTSUP;
55517ed314cSMatan Azrad 	if (rxq->lro)
55617ed314cSMatan Azrad 		return -ENOTSUP;
557f0d2114fSYongseok Koh 	return 1;
558f0d2114fSYongseok Koh }
559f0d2114fSYongseok Koh 
560f0d2114fSYongseok Koh /**
561f0d2114fSYongseok Koh  * Check a device can support vectorized RX.
562f0d2114fSYongseok Koh  *
563af4f09f2SNélio Laranjeiro  * @param dev
564af4f09f2SNélio Laranjeiro  *   Pointer to Ethernet device.
565f0d2114fSYongseok Koh  *
566f0d2114fSYongseok Koh  * @return
567f0d2114fSYongseok Koh  *   1 if supported, negative errno value if not.
568f0d2114fSYongseok Koh  */
569ce6427ddSThomas Monjalon int __rte_cold
570af4f09f2SNélio Laranjeiro mlx5_check_vec_rx_support(struct rte_eth_dev *dev)
571f0d2114fSYongseok Koh {
572dbeba4cfSThomas Monjalon 	struct mlx5_priv *priv = dev->data->dev_private;
5730f006468SMichael Baum 	uint32_t i;
574f0d2114fSYongseok Koh 
5752c5e0dd2SCiara Power 	if (rte_vect_get_max_simd_bitwidth() < RTE_VECT_SIMD_128)
5762c5e0dd2SCiara Power 		return -ENOTSUP;
5777fe24446SShahaf Shuler 	if (!priv->config.rx_vec_en)
578f0d2114fSYongseok Koh 		return -ENOTSUP;
579f0d2114fSYongseok Koh 	/* All the configured queues should support. */
580f0d2114fSYongseok Koh 	for (i = 0; i < priv->rxqs_n; ++i) {
5815cf0707fSXueming Li 		struct mlx5_rxq_data *rxq_data = mlx5_rxq_data_get(dev, i);
582f0d2114fSYongseok Koh 
5835cf0707fSXueming Li 		if (!rxq_data)
584f0d2114fSYongseok Koh 			continue;
5855cf0707fSXueming Li 		if (mlx5_rxq_check_vec_support(rxq_data) < 0)
586f0d2114fSYongseok Koh 			break;
587f0d2114fSYongseok Koh 	}
588f0d2114fSYongseok Koh 	if (i != priv->rxqs_n)
589f0d2114fSYongseok Koh 		return -ENOTSUP;
590f0d2114fSYongseok Koh 	return 1;
591f0d2114fSYongseok Koh }
592