xref: /dpdk/drivers/net/mlx5/mlx5_rxtx_vec.h (revision 9c55c6bd86156d17df93bf947dc620222ee9f7e4)
18fd92a66SOlivier Matz /* SPDX-License-Identifier: BSD-3-Clause
25bfc9fc1SYongseok Koh  * Copyright 2017 6WIND S.A.
35feecc57SShahaf Shuler  * Copyright 2017 Mellanox Technologies, Ltd
45bfc9fc1SYongseok Koh  */
55bfc9fc1SYongseok Koh 
65bfc9fc1SYongseok Koh #ifndef RTE_PMD_MLX5_RXTX_VEC_H_
75bfc9fc1SYongseok Koh #define RTE_PMD_MLX5_RXTX_VEC_H_
85bfc9fc1SYongseok Koh 
95bfc9fc1SYongseok Koh #include <rte_common.h>
105bfc9fc1SYongseok Koh #include <rte_mbuf.h>
115bfc9fc1SYongseok Koh 
125bfc9fc1SYongseok Koh #include "mlx5_autoconf.h"
135bfc9fc1SYongseok Koh #include "mlx5_prm.h"
145bfc9fc1SYongseok Koh 
15dbccb4cdSShahaf Shuler /* HW checksum offload capabilities of vectorized Tx. */
16dbccb4cdSShahaf Shuler #define MLX5_VEC_TX_CKSUM_OFFLOAD_CAP \
17dbccb4cdSShahaf Shuler 	(DEV_TX_OFFLOAD_IPV4_CKSUM | \
18dbccb4cdSShahaf Shuler 	 DEV_TX_OFFLOAD_UDP_CKSUM | \
19dbccb4cdSShahaf Shuler 	 DEV_TX_OFFLOAD_TCP_CKSUM | \
20dbccb4cdSShahaf Shuler 	 DEV_TX_OFFLOAD_OUTER_IPV4_CKSUM)
21dbccb4cdSShahaf Shuler 
22dbccb4cdSShahaf Shuler /* HW offload capabilities of vectorized Tx. */
23dbccb4cdSShahaf Shuler #define MLX5_VEC_TX_OFFLOAD_CAP \
24dbccb4cdSShahaf Shuler 	(MLX5_VEC_TX_CKSUM_OFFLOAD_CAP | \
256bd7fbd0SDekel Peled 	 DEV_TX_OFFLOAD_MATCH_METADATA | \
26dbccb4cdSShahaf Shuler 	 DEV_TX_OFFLOAD_MULTI_SEGS)
27dbccb4cdSShahaf Shuler 
285bfc9fc1SYongseok Koh /*
295bfc9fc1SYongseok Koh  * Compile time sanity check for vectorized functions.
305bfc9fc1SYongseok Koh  */
315bfc9fc1SYongseok Koh 
325bfc9fc1SYongseok Koh #define S_ASSERT_RTE_MBUF(s) \
335bfc9fc1SYongseok Koh 	static_assert(s, "A field of struct rte_mbuf is changed")
345bfc9fc1SYongseok Koh #define S_ASSERT_MLX5_CQE(s) \
355bfc9fc1SYongseok Koh 	static_assert(s, "A field of struct mlx5_cqe is changed")
365bfc9fc1SYongseok Koh 
375bfc9fc1SYongseok Koh /* rxq_cq_decompress_v() */
385bfc9fc1SYongseok Koh S_ASSERT_RTE_MBUF(offsetof(struct rte_mbuf, pkt_len) ==
395bfc9fc1SYongseok Koh 		  offsetof(struct rte_mbuf, rx_descriptor_fields1) + 4);
405bfc9fc1SYongseok Koh S_ASSERT_RTE_MBUF(offsetof(struct rte_mbuf, data_len) ==
415bfc9fc1SYongseok Koh 		  offsetof(struct rte_mbuf, rx_descriptor_fields1) + 8);
425bfc9fc1SYongseok Koh S_ASSERT_RTE_MBUF(offsetof(struct rte_mbuf, hash) ==
435bfc9fc1SYongseok Koh 		  offsetof(struct rte_mbuf, rx_descriptor_fields1) + 12);
445bfc9fc1SYongseok Koh 
455bfc9fc1SYongseok Koh /* rxq_cq_to_ptype_oflags_v() */
465bfc9fc1SYongseok Koh S_ASSERT_RTE_MBUF(offsetof(struct rte_mbuf, ol_flags) ==
475bfc9fc1SYongseok Koh 		  offsetof(struct rte_mbuf, rearm_data) + 8);
485bfc9fc1SYongseok Koh S_ASSERT_RTE_MBUF(offsetof(struct rte_mbuf, rearm_data) ==
495bfc9fc1SYongseok Koh 		  RTE_ALIGN(offsetof(struct rte_mbuf, rearm_data), 16));
505bfc9fc1SYongseok Koh 
515bfc9fc1SYongseok Koh /* rxq_burst_v() */
525bfc9fc1SYongseok Koh S_ASSERT_RTE_MBUF(offsetof(struct rte_mbuf, pkt_len) ==
535bfc9fc1SYongseok Koh 		  offsetof(struct rte_mbuf, rx_descriptor_fields1) + 4);
545bfc9fc1SYongseok Koh S_ASSERT_RTE_MBUF(offsetof(struct rte_mbuf, data_len) ==
555bfc9fc1SYongseok Koh 		  offsetof(struct rte_mbuf, rx_descriptor_fields1) + 8);
56570acdb1SYongseok Koh #if (RTE_CACHE_LINE_SIZE == 128)
57570acdb1SYongseok Koh S_ASSERT_MLX5_CQE(offsetof(struct mlx5_cqe, pkt_info) == 64);
58570acdb1SYongseok Koh #else
595bfc9fc1SYongseok Koh S_ASSERT_MLX5_CQE(offsetof(struct mlx5_cqe, pkt_info) == 0);
60570acdb1SYongseok Koh #endif
615bfc9fc1SYongseok Koh S_ASSERT_MLX5_CQE(offsetof(struct mlx5_cqe, rx_hash_res) ==
625bfc9fc1SYongseok Koh 		  offsetof(struct mlx5_cqe, pkt_info) + 12);
635bfc9fc1SYongseok Koh S_ASSERT_MLX5_CQE(offsetof(struct mlx5_cqe, rsvd1) +
645bfc9fc1SYongseok Koh 		  sizeof(((struct mlx5_cqe *)0)->rsvd1) ==
655bfc9fc1SYongseok Koh 		  offsetof(struct mlx5_cqe, hdr_type_etc));
665bfc9fc1SYongseok Koh S_ASSERT_MLX5_CQE(offsetof(struct mlx5_cqe, vlan_info) ==
675bfc9fc1SYongseok Koh 		  offsetof(struct mlx5_cqe, hdr_type_etc) + 2);
685bfc9fc1SYongseok Koh S_ASSERT_MLX5_CQE(offsetof(struct mlx5_cqe, rsvd2) +
695bfc9fc1SYongseok Koh 		  sizeof(((struct mlx5_cqe *)0)->rsvd2) ==
705bfc9fc1SYongseok Koh 		  offsetof(struct mlx5_cqe, byte_cnt));
715bfc9fc1SYongseok Koh S_ASSERT_MLX5_CQE(offsetof(struct mlx5_cqe, sop_drop_qpn) ==
725bfc9fc1SYongseok Koh 		  RTE_ALIGN(offsetof(struct mlx5_cqe, sop_drop_qpn), 8));
735bfc9fc1SYongseok Koh S_ASSERT_MLX5_CQE(offsetof(struct mlx5_cqe, op_own) ==
745bfc9fc1SYongseok Koh 		  offsetof(struct mlx5_cqe, sop_drop_qpn) + 7);
755bfc9fc1SYongseok Koh 
763c2ddbd4SYongseok Koh /**
773c2ddbd4SYongseok Koh  * Replenish buffers for RX in bulk.
783c2ddbd4SYongseok Koh  *
793c2ddbd4SYongseok Koh  * @param rxq
803c2ddbd4SYongseok Koh  *   Pointer to RX queue structure.
813c2ddbd4SYongseok Koh  * @param n
823c2ddbd4SYongseok Koh  *   Number of buffers to be replenished.
833c2ddbd4SYongseok Koh  */
843c2ddbd4SYongseok Koh static inline void
853c2ddbd4SYongseok Koh mlx5_rx_replenish_bulk_mbuf(struct mlx5_rxq_data *rxq, uint16_t n)
863c2ddbd4SYongseok Koh {
873c2ddbd4SYongseok Koh 	const uint16_t q_n = 1 << rxq->elts_n;
883c2ddbd4SYongseok Koh 	const uint16_t q_mask = q_n - 1;
8903e0868bSYongseok Koh 	uint16_t elts_idx = rxq->rq_ci & q_mask;
903c2ddbd4SYongseok Koh 	struct rte_mbuf **elts = &(*rxq->elts)[elts_idx];
917d6bf6b8SYongseok Koh 	volatile struct mlx5_wqe_data_seg *wq =
927d6bf6b8SYongseok Koh 		&((volatile struct mlx5_wqe_data_seg *)rxq->wqes)[elts_idx];
933c2ddbd4SYongseok Koh 	unsigned int i;
943c2ddbd4SYongseok Koh 
95e10245a1SYongseok Koh 	assert(n >= MLX5_VPMD_RXQ_RPLNSH_THRESH(q_n));
963c2ddbd4SYongseok Koh 	assert(n <= (uint16_t)(q_n - (rxq->rq_ci - rxq->rq_pi)));
97e10245a1SYongseok Koh 	assert(MLX5_VPMD_RXQ_RPLNSH_THRESH(q_n) > MLX5_VPMD_DESCS_PER_LOOP);
983c2ddbd4SYongseok Koh 	/* Not to cross queue end. */
993c2ddbd4SYongseok Koh 	n = RTE_MIN(n - MLX5_VPMD_DESCS_PER_LOOP, q_n - elts_idx);
1003c2ddbd4SYongseok Koh 	if (rte_mempool_get_bulk(rxq->mp, (void *)elts, n) < 0) {
1013c2ddbd4SYongseok Koh 		rxq->stats.rx_nombuf += n;
1023c2ddbd4SYongseok Koh 		return;
1033c2ddbd4SYongseok Koh 	}
104974f1e7eSYongseok Koh 	for (i = 0; i < n; ++i) {
105*9c55c6bdSYongseok Koh 		void *buf_addr;
10612d468a6SYongseok Koh 
107*9c55c6bdSYongseok Koh 		/*
108*9c55c6bdSYongseok Koh 		 * Load the virtual address for Rx WQE. non-x86 processors
109*9c55c6bdSYongseok Koh 		 * (mostly RISC such as ARM and Power) are more vulnerable to
110*9c55c6bdSYongseok Koh 		 * load stall. For x86, reducing the number of instructions
111*9c55c6bdSYongseok Koh 		 * seems to matter most.
112*9c55c6bdSYongseok Koh 		 */
113*9c55c6bdSYongseok Koh #ifdef RTE_ARCH_X86_64
114*9c55c6bdSYongseok Koh 		buf_addr = elts[i]->buf_addr;
115*9c55c6bdSYongseok Koh 		assert(buf_addr == rte_mbuf_buf_addr(elts[i], rxq->mp));
116*9c55c6bdSYongseok Koh #else
117*9c55c6bdSYongseok Koh 		buf_addr = rte_mbuf_buf_addr(elts[i], rxq->mp);
11812d468a6SYongseok Koh 		assert(buf_addr == elts[i]->buf_addr);
119*9c55c6bdSYongseok Koh #endif
12012d468a6SYongseok Koh 		wq[i].addr = rte_cpu_to_be_64((uintptr_t)buf_addr +
1213c2ddbd4SYongseok Koh 					      RTE_PKTMBUF_HEADROOM);
122974f1e7eSYongseok Koh 		/* If there's only one MR, no need to replace LKey in WQE. */
123974f1e7eSYongseok Koh 		if (unlikely(mlx5_mr_btree_len(&rxq->mr_ctrl.cache_bh) > 1))
124974f1e7eSYongseok Koh 			wq[i].lkey = mlx5_rx_mb2mr(rxq, elts[i]);
125974f1e7eSYongseok Koh 	}
1263c2ddbd4SYongseok Koh 	rxq->rq_ci += n;
12703e0868bSYongseok Koh 	/* Prevent overflowing into consumed mbufs. */
12803e0868bSYongseok Koh 	elts_idx = rxq->rq_ci & q_mask;
12903e0868bSYongseok Koh 	for (i = 0; i < MLX5_VPMD_DESCS_PER_LOOP; ++i)
13003e0868bSYongseok Koh 		(*rxq->elts)[elts_idx + i] = &rxq->fake_mbuf;
1314fe7f662SYongseok Koh 	rte_cio_wmb();
1323c2ddbd4SYongseok Koh 	*rxq->rq_db = rte_cpu_to_be_32(rxq->rq_ci);
1333c2ddbd4SYongseok Koh }
1343c2ddbd4SYongseok Koh 
1355bfc9fc1SYongseok Koh #endif /* RTE_PMD_MLX5_RXTX_VEC_H_ */
136