xref: /dpdk/drivers/net/mlx5/mlx5_rxtx_vec.h (revision 945acb4a0d644d194f1823084a234f9c286dcf8c)
1 /*-
2  *   BSD LICENSE
3  *
4  *   Copyright 2017 6WIND S.A.
5  *   Copyright 2017 Mellanox.
6  *
7  *   Redistribution and use in source and binary forms, with or without
8  *   modification, are permitted provided that the following conditions
9  *   are met:
10  *
11  *     * Redistributions of source code must retain the above copyright
12  *       notice, this list of conditions and the following disclaimer.
13  *     * Redistributions in binary form must reproduce the above copyright
14  *       notice, this list of conditions and the following disclaimer in
15  *       the documentation and/or other materials provided with the
16  *       distribution.
17  *     * Neither the name of 6WIND S.A. nor the names of its
18  *       contributors may be used to endorse or promote products derived
19  *       from this software without specific prior written permission.
20  *
21  *   THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
22  *   "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
23  *   LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
24  *   A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
25  *   OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
26  *   SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
27  *   LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
28  *   DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
29  *   THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
30  *   (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
31  *   OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
32  */
33 
34 #ifndef RTE_PMD_MLX5_RXTX_VEC_H_
35 #define RTE_PMD_MLX5_RXTX_VEC_H_
36 
37 #include <rte_common.h>
38 #include <rte_mbuf.h>
39 
40 #include "mlx5_autoconf.h"
41 #include "mlx5_prm.h"
42 
43 /* HW checksum offload capabilities of vectorized Tx. */
44 #define MLX5_VEC_TX_CKSUM_OFFLOAD_CAP \
45 	(DEV_TX_OFFLOAD_IPV4_CKSUM | \
46 	 DEV_TX_OFFLOAD_UDP_CKSUM | \
47 	 DEV_TX_OFFLOAD_TCP_CKSUM | \
48 	 DEV_TX_OFFLOAD_OUTER_IPV4_CKSUM)
49 
50 /* HW offload capabilities of vectorized Tx. */
51 #define MLX5_VEC_TX_OFFLOAD_CAP \
52 	(MLX5_VEC_TX_CKSUM_OFFLOAD_CAP | \
53 	 DEV_TX_OFFLOAD_MULTI_SEGS)
54 
55 /*
56  * Compile time sanity check for vectorized functions.
57  */
58 
59 #define S_ASSERT_RTE_MBUF(s) \
60 	static_assert(s, "A field of struct rte_mbuf is changed")
61 #define S_ASSERT_MLX5_CQE(s) \
62 	static_assert(s, "A field of struct mlx5_cqe is changed")
63 
64 /* rxq_cq_decompress_v() */
65 S_ASSERT_RTE_MBUF(offsetof(struct rte_mbuf, pkt_len) ==
66 		  offsetof(struct rte_mbuf, rx_descriptor_fields1) + 4);
67 S_ASSERT_RTE_MBUF(offsetof(struct rte_mbuf, data_len) ==
68 		  offsetof(struct rte_mbuf, rx_descriptor_fields1) + 8);
69 S_ASSERT_RTE_MBUF(offsetof(struct rte_mbuf, hash) ==
70 		  offsetof(struct rte_mbuf, rx_descriptor_fields1) + 12);
71 
72 /* rxq_cq_to_ptype_oflags_v() */
73 S_ASSERT_RTE_MBUF(offsetof(struct rte_mbuf, ol_flags) ==
74 		  offsetof(struct rte_mbuf, rearm_data) + 8);
75 S_ASSERT_RTE_MBUF(offsetof(struct rte_mbuf, rearm_data) ==
76 		  RTE_ALIGN(offsetof(struct rte_mbuf, rearm_data), 16));
77 
78 /* rxq_burst_v() */
79 S_ASSERT_RTE_MBUF(offsetof(struct rte_mbuf, pkt_len) ==
80 		  offsetof(struct rte_mbuf, rx_descriptor_fields1) + 4);
81 S_ASSERT_RTE_MBUF(offsetof(struct rte_mbuf, data_len) ==
82 		  offsetof(struct rte_mbuf, rx_descriptor_fields1) + 8);
83 #if (RTE_CACHE_LINE_SIZE == 128)
84 S_ASSERT_MLX5_CQE(offsetof(struct mlx5_cqe, pkt_info) == 64);
85 #else
86 S_ASSERT_MLX5_CQE(offsetof(struct mlx5_cqe, pkt_info) == 0);
87 #endif
88 S_ASSERT_MLX5_CQE(offsetof(struct mlx5_cqe, rx_hash_res) ==
89 		  offsetof(struct mlx5_cqe, pkt_info) + 12);
90 S_ASSERT_MLX5_CQE(offsetof(struct mlx5_cqe, rsvd1) +
91 		  sizeof(((struct mlx5_cqe *)0)->rsvd1) ==
92 		  offsetof(struct mlx5_cqe, hdr_type_etc));
93 S_ASSERT_MLX5_CQE(offsetof(struct mlx5_cqe, vlan_info) ==
94 		  offsetof(struct mlx5_cqe, hdr_type_etc) + 2);
95 S_ASSERT_MLX5_CQE(offsetof(struct mlx5_cqe, rsvd2) +
96 		  sizeof(((struct mlx5_cqe *)0)->rsvd2) ==
97 		  offsetof(struct mlx5_cqe, byte_cnt));
98 S_ASSERT_MLX5_CQE(offsetof(struct mlx5_cqe, sop_drop_qpn) ==
99 		  RTE_ALIGN(offsetof(struct mlx5_cqe, sop_drop_qpn), 8));
100 S_ASSERT_MLX5_CQE(offsetof(struct mlx5_cqe, op_own) ==
101 		  offsetof(struct mlx5_cqe, sop_drop_qpn) + 7);
102 
103 /**
104  * Replenish buffers for RX in bulk.
105  *
106  * @param rxq
107  *   Pointer to RX queue structure.
108  * @param n
109  *   Number of buffers to be replenished.
110  */
111 static inline void
112 mlx5_rx_replenish_bulk_mbuf(struct mlx5_rxq_data *rxq, uint16_t n)
113 {
114 	const uint16_t q_n = 1 << rxq->elts_n;
115 	const uint16_t q_mask = q_n - 1;
116 	uint16_t elts_idx = rxq->rq_ci & q_mask;
117 	struct rte_mbuf **elts = &(*rxq->elts)[elts_idx];
118 	volatile struct mlx5_wqe_data_seg *wq = &(*rxq->wqes)[elts_idx];
119 	unsigned int i;
120 
121 	assert(n >= MLX5_VPMD_RXQ_RPLNSH_THRESH);
122 	assert(n <= (uint16_t)(q_n - (rxq->rq_ci - rxq->rq_pi)));
123 	assert(MLX5_VPMD_RXQ_RPLNSH_THRESH > MLX5_VPMD_DESCS_PER_LOOP);
124 	/* Not to cross queue end. */
125 	n = RTE_MIN(n - MLX5_VPMD_DESCS_PER_LOOP, q_n - elts_idx);
126 	if (rte_mempool_get_bulk(rxq->mp, (void *)elts, n) < 0) {
127 		rxq->stats.rx_nombuf += n;
128 		return;
129 	}
130 	for (i = 0; i < n; ++i)
131 		wq[i].addr = rte_cpu_to_be_64((uintptr_t)elts[i]->buf_addr +
132 					      RTE_PKTMBUF_HEADROOM);
133 	rxq->rq_ci += n;
134 	/* Prevent overflowing into consumed mbufs. */
135 	elts_idx = rxq->rq_ci & q_mask;
136 	for (i = 0; i < MLX5_VPMD_DESCS_PER_LOOP; ++i)
137 		(*rxq->elts)[elts_idx + i] = &rxq->fake_mbuf;
138 	rte_io_wmb();
139 	*rxq->rq_db = rte_cpu_to_be_32(rxq->rq_ci);
140 }
141 
142 #endif /* RTE_PMD_MLX5_RXTX_VEC_H_ */
143