xref: /dpdk/drivers/net/mlx5/mlx5_rxtx_vec.h (revision 250c9eb3ca895127f21a729caf4a928eb2f04d2c)
1 /* SPDX-License-Identifier: BSD-3-Clause
2  * Copyright 2017 6WIND S.A.
3  * Copyright 2017 Mellanox Technologies, Ltd
4  */
5 
6 #ifndef RTE_PMD_MLX5_RXTX_VEC_H_
7 #define RTE_PMD_MLX5_RXTX_VEC_H_
8 
9 #include <rte_common.h>
10 #include <rte_mbuf.h>
11 
12 #include "mlx5_autoconf.h"
13 #include "mlx5_prm.h"
14 
15 /* HW checksum offload capabilities of vectorized Tx. */
16 #define MLX5_VEC_TX_CKSUM_OFFLOAD_CAP \
17 	(DEV_TX_OFFLOAD_IPV4_CKSUM | \
18 	 DEV_TX_OFFLOAD_UDP_CKSUM | \
19 	 DEV_TX_OFFLOAD_TCP_CKSUM | \
20 	 DEV_TX_OFFLOAD_OUTER_IPV4_CKSUM)
21 
22 /* HW offload capabilities of vectorized Tx. */
23 #define MLX5_VEC_TX_OFFLOAD_CAP \
24 	(MLX5_VEC_TX_CKSUM_OFFLOAD_CAP | \
25 	 DEV_TX_OFFLOAD_MULTI_SEGS)
26 
27 /*
28  * Compile time sanity check for vectorized functions.
29  */
30 
31 #define S_ASSERT_RTE_MBUF(s) \
32 	static_assert(s, "A field of struct rte_mbuf is changed")
33 #define S_ASSERT_MLX5_CQE(s) \
34 	static_assert(s, "A field of struct mlx5_cqe is changed")
35 
36 /* rxq_cq_decompress_v() */
37 S_ASSERT_RTE_MBUF(offsetof(struct rte_mbuf, pkt_len) ==
38 		  offsetof(struct rte_mbuf, rx_descriptor_fields1) + 4);
39 S_ASSERT_RTE_MBUF(offsetof(struct rte_mbuf, data_len) ==
40 		  offsetof(struct rte_mbuf, rx_descriptor_fields1) + 8);
41 S_ASSERT_RTE_MBUF(offsetof(struct rte_mbuf, hash) ==
42 		  offsetof(struct rte_mbuf, rx_descriptor_fields1) + 12);
43 
44 /* rxq_cq_to_ptype_oflags_v() */
45 S_ASSERT_RTE_MBUF(offsetof(struct rte_mbuf, ol_flags) ==
46 		  offsetof(struct rte_mbuf, rearm_data) + 8);
47 S_ASSERT_RTE_MBUF(offsetof(struct rte_mbuf, rearm_data) ==
48 		  RTE_ALIGN(offsetof(struct rte_mbuf, rearm_data), 16));
49 
50 /* rxq_burst_v() */
51 S_ASSERT_RTE_MBUF(offsetof(struct rte_mbuf, pkt_len) ==
52 		  offsetof(struct rte_mbuf, rx_descriptor_fields1) + 4);
53 S_ASSERT_RTE_MBUF(offsetof(struct rte_mbuf, data_len) ==
54 		  offsetof(struct rte_mbuf, rx_descriptor_fields1) + 8);
55 #if (RTE_CACHE_LINE_SIZE == 128)
56 S_ASSERT_MLX5_CQE(offsetof(struct mlx5_cqe, pkt_info) == 64);
57 #else
58 S_ASSERT_MLX5_CQE(offsetof(struct mlx5_cqe, pkt_info) == 0);
59 #endif
60 S_ASSERT_MLX5_CQE(offsetof(struct mlx5_cqe, rx_hash_res) ==
61 		  offsetof(struct mlx5_cqe, pkt_info) + 12);
62 S_ASSERT_MLX5_CQE(offsetof(struct mlx5_cqe, rsvd1) +
63 		  sizeof(((struct mlx5_cqe *)0)->rsvd1) ==
64 		  offsetof(struct mlx5_cqe, hdr_type_etc));
65 S_ASSERT_MLX5_CQE(offsetof(struct mlx5_cqe, vlan_info) ==
66 		  offsetof(struct mlx5_cqe, hdr_type_etc) + 2);
67 S_ASSERT_MLX5_CQE(offsetof(struct mlx5_cqe, rsvd2) +
68 		  sizeof(((struct mlx5_cqe *)0)->rsvd2) ==
69 		  offsetof(struct mlx5_cqe, byte_cnt));
70 S_ASSERT_MLX5_CQE(offsetof(struct mlx5_cqe, sop_drop_qpn) ==
71 		  RTE_ALIGN(offsetof(struct mlx5_cqe, sop_drop_qpn), 8));
72 S_ASSERT_MLX5_CQE(offsetof(struct mlx5_cqe, op_own) ==
73 		  offsetof(struct mlx5_cqe, sop_drop_qpn) + 7);
74 
75 /**
76  * Replenish buffers for RX in bulk.
77  *
78  * @param rxq
79  *   Pointer to RX queue structure.
80  * @param n
81  *   Number of buffers to be replenished.
82  */
83 static inline void
84 mlx5_rx_replenish_bulk_mbuf(struct mlx5_rxq_data *rxq, uint16_t n)
85 {
86 	const uint16_t q_n = 1 << rxq->elts_n;
87 	const uint16_t q_mask = q_n - 1;
88 	uint16_t elts_idx = rxq->rq_ci & q_mask;
89 	struct rte_mbuf **elts = &(*rxq->elts)[elts_idx];
90 	volatile struct mlx5_wqe_data_seg *wq =
91 		&((volatile struct mlx5_wqe_data_seg *)rxq->wqes)[elts_idx];
92 	unsigned int i;
93 
94 	assert(n >= MLX5_VPMD_RXQ_RPLNSH_THRESH(q_n));
95 	assert(n <= (uint16_t)(q_n - (rxq->rq_ci - rxq->rq_pi)));
96 	assert(MLX5_VPMD_RXQ_RPLNSH_THRESH(q_n) > MLX5_VPMD_DESCS_PER_LOOP);
97 	/* Not to cross queue end. */
98 	n = RTE_MIN(n - MLX5_VPMD_DESCS_PER_LOOP, q_n - elts_idx);
99 	if (rte_mempool_get_bulk(rxq->mp, (void *)elts, n) < 0) {
100 		rxq->stats.rx_nombuf += n;
101 		return;
102 	}
103 	for (i = 0; i < n; ++i) {
104 		wq[i].addr = rte_cpu_to_be_64((uintptr_t)elts[i]->buf_addr +
105 					      RTE_PKTMBUF_HEADROOM);
106 		/* If there's only one MR, no need to replace LKey in WQE. */
107 		if (unlikely(mlx5_mr_btree_len(&rxq->mr_ctrl.cache_bh) > 1))
108 			wq[i].lkey = mlx5_rx_mb2mr(rxq, elts[i]);
109 	}
110 	rxq->rq_ci += n;
111 	/* Prevent overflowing into consumed mbufs. */
112 	elts_idx = rxq->rq_ci & q_mask;
113 	for (i = 0; i < MLX5_VPMD_DESCS_PER_LOOP; ++i)
114 		(*rxq->elts)[elts_idx + i] = &rxq->fake_mbuf;
115 	rte_cio_wmb();
116 	*rxq->rq_db = rte_cpu_to_be_32(rxq->rq_ci);
117 }
118 
119 #endif /* RTE_PMD_MLX5_RXTX_VEC_H_ */
120