xref: /dpdk/drivers/net/bnxt/bnxt_rxr.h (revision 3e9a43bad2ce1413be2456c7e53945444aac99f9)
1 /* SPDX-License-Identifier: BSD-3-Clause
2  * Copyright(c) 2014-2023 Broadcom
3  * All rights reserved.
4  */
5 
6 #ifndef _BNXT_RXR_H_
7 #define _BNXT_RXR_H_
8 #include "hsi_struct_def_dpdk.h"
9 
10 #define BNXT_TPA_START_AGG_ID_PRE_TH(cmp) \
11 	((rte_le_to_cpu_16((cmp)->agg_id) & RX_TPA_START_CMPL_AGG_ID_MASK) >> \
12 	 RX_TPA_START_CMPL_AGG_ID_SFT)
13 
14 #define BNXT_TPA_START_AGG_ID_TH(cmp) \
15 	rte_le_to_cpu_16((cmp)->agg_id)
16 
17 static inline uint16_t bnxt_tpa_start_agg_id(struct bnxt *bp,
18 					     struct rx_tpa_start_cmpl *cmp)
19 {
20 	if (BNXT_CHIP_P5_P7(bp))
21 		return BNXT_TPA_START_AGG_ID_TH(cmp);
22 	else
23 		return BNXT_TPA_START_AGG_ID_PRE_TH(cmp);
24 }
25 
26 #define BNXT_TPA_END_AGG_BUFS(cmp) \
27 	(((cmp)->agg_bufs_v1 & RX_TPA_END_CMPL_AGG_BUFS_MASK) \
28 	 >> RX_TPA_END_CMPL_AGG_BUFS_SFT)
29 
30 #define BNXT_TPA_END_AGG_BUFS_TH(cmp) \
31 	((cmp)->tpa_agg_bufs)
32 
33 #define BNXT_TPA_END_AGG_ID(cmp) \
34 	(((cmp)->agg_id & RX_TPA_END_CMPL_AGG_ID_MASK) >> \
35 	 RX_TPA_END_CMPL_AGG_ID_SFT)
36 
37 #define BNXT_TPA_END_AGG_ID_TH(cmp) \
38 	rte_le_to_cpu_16((cmp)->agg_id)
39 
40 #define BNXT_RX_L2_AGG_BUFS(cmp) \
41 	(((cmp)->agg_bufs_v1 & RX_PKT_CMPL_AGG_BUFS_MASK) >> \
42 		RX_PKT_CMPL_AGG_BUFS_SFT)
43 
44 /* Number of descriptors to process per inner loop in vector mode. */
45 #define BNXT_RX_DESCS_PER_LOOP_VEC128	4U /* SSE, Neon */
46 #define BNXT_RX_DESCS_PER_LOOP_VEC256	8U /* AVX2 */
47 
48 /* Number of extra Rx mbuf ring entries to allocate for vector mode. */
49 #define BNXT_RX_EXTRA_MBUF_ENTRIES \
50 	RTE_MAX(BNXT_RX_DESCS_PER_LOOP_VEC128, BNXT_RX_DESCS_PER_LOOP_VEC256)
51 
52 #define BNXT_OL_FLAGS_TBL_DIM	64
53 #define BNXT_OL_FLAGS_ERR_TBL_DIM 32
54 
55 #define BNXT_CRX_CQE_OPAQUE_MASK		\
56 	RX_PKT_COMPRESS_CMPL_ERRORS_AGG_BUFS_OPAQUE_OPAQUE_MASK
57 #define BNXT_CRX_CQE_AGG_BUF_MASK		\
58 	RX_PKT_COMPRESS_CMPL_ERRORS_AGG_BUFS_OPAQUE_AGG_BUFS_MASK
59 #define BNXT_CRX_CQE_AGG_BUF_SFT		\
60 	RX_PKT_COMPRESS_CMPL_ERRORS_AGG_BUFS_OPAQUE_AGG_BUFS_SFT
61 #define BNXT_CRX_CQE_AGG_BUFS(cmp)		\
62 	(((cmp)->errors_agg_bufs_opaque & BNXT_CRX_CQE_AGG_BUF_MASK) >> \
63 	 BNXT_CRX_CQE_AGG_BUF_SFT)
64 #define BNXT_CRX_CQE_CSUM_CALC_MASK		\
65 	(RX_PKT_COMPRESS_CMPL_CS_ERROR_CALC_IP_CS_CALC | \
66 	 RX_PKT_COMPRESS_CMPL_CS_ERROR_CALC_L4_CS_CALC | \
67 	 RX_PKT_COMPRESS_CMPL_CS_ERROR_CALC_T_IP_CS_CALC | \
68 	 RX_PKT_COMPRESS_CMPL_CS_ERROR_CALC_T_L4_CS_CALC)
69 #define BNXT_CRX_CQE_CSUM_CALC_SFT	8
70 #define BNXT_PKT_CMPL_T_IP_CS_CALC	0x4
71 
72 #define BNXT_CRX_TUN_CS_CALC                                  \
73 	(!!(RX_PKT_COMPRESS_CMPL_CS_ERROR_CALC_T_IP_CS_CALC | \
74 	    RX_PKT_COMPRESS_CMPL_CS_ERROR_CALC_T_L4_CS_CALC))
75 
76 # define BNXT_CRX_CQE_CSUM_ERROR_MASK		\
77 	(RX_PKT_COMPRESS_CMPL_CS_ERROR_CALC_IP_CS_ERROR | \
78 	 RX_PKT_COMPRESS_CMPL_CS_ERROR_CALC_L4_CS_ERROR | \
79 	 RX_PKT_COMPRESS_CMPL_CS_ERROR_CALC_T_IP_CS_ERROR | \
80 	 RX_PKT_COMPRESS_CMPL_CS_ERROR_CALC_T_L4_CS_ERROR)
81 
82 /* meta_format != 0 and bit3 is valid, the value in meta is VLAN.
83  * Use the bit as VLAN valid bit
84  */
85 #define BNXT_RXC_METADATA1_VLAN_VALID		\
86 	RX_PKT_COMPRESS_CMPL_METADATA1_VALID
87 
88 static inline void bnxt_set_vlan_crx(struct rx_pkt_compress_cmpl *rxcmp,
89 				     struct rte_mbuf *mbuf)
90 {
91 	uint16_t metadata = rte_le_to_cpu_16(rxcmp->metadata1_cs_error_calc_v1);
92 	uint16_t vlan_tci = rte_le_to_cpu_16(rxcmp->vlanc_tcid);
93 
94 	if (metadata & RX_PKT_COMPRESS_CMPL_METADATA1_VALID)
95 		mbuf->vlan_tci =
96 			vlan_tci & (RX_PKT_COMPRESS_CMPL_VLANC_TCID_VID_MASK |
97 				    RX_PKT_COMPRESS_CMPL_VLANC_TCID_DE |
98 				    RX_PKT_COMPRESS_CMPL_VLANC_TCID_PRI_MASK);
99 }
100 
101 struct bnxt_tpa_info {
102 	struct rte_mbuf			*mbuf;
103 	uint16_t			len;
104 	uint32_t			agg_count;
105 	struct rx_tpa_v2_abuf_cmpl	agg_arr[TPA_MAX_NUM_SEGS];
106 
107 	uint32_t                        rss_hash;
108 	uint32_t                        vlan;
109 	uint16_t                        cfa_code;
110 	uint8_t                         hash_valid:1;
111 	uint8_t                         vlan_valid:1;
112 	uint8_t                         cfa_code_valid:1;
113 	uint8_t                         l4_csum_valid:1;
114 };
115 
116 struct bnxt_rx_ring_info {
117 	uint16_t		rx_raw_prod;
118 	uint16_t		ag_raw_prod;
119 	uint16_t		ag_cons; /* Needed with compressed CQE */
120 	uint16_t                rx_cons; /* Needed for representor */
121 	uint16_t                rx_next_cons;
122 	struct bnxt_db_info     rx_db;
123 	struct bnxt_db_info     ag_db;
124 
125 	struct rx_prod_pkt_bd	*rx_desc_ring;
126 	struct rx_prod_pkt_bd	*ag_desc_ring;
127 	struct rte_mbuf		**rx_buf_ring; /* sw ring */
128 	struct rte_mbuf		**ag_buf_ring; /* sw ring */
129 
130 	rte_iova_t		rx_desc_mapping;
131 	rte_iova_t		ag_desc_mapping;
132 
133 	struct bnxt_ring	*rx_ring_struct;
134 	struct bnxt_ring	*ag_ring_struct;
135 
136 	/*
137 	 * To deal with out of order return from TPA, use free buffer indicator
138 	 */
139 	struct rte_bitmap	*ag_bitmap;
140 
141 	struct bnxt_tpa_info *tpa_info;
142 
143 	uint32_t ol_flags_table[BNXT_OL_FLAGS_TBL_DIM];
144 	uint32_t ol_flags_err_table[BNXT_OL_FLAGS_ERR_TBL_DIM];
145 };
146 
147 uint16_t bnxt_recv_pkts(void *rx_queue, struct rte_mbuf **rx_pkts,
148 			       uint16_t nb_pkts);
149 void bnxt_free_rx_rings(struct bnxt *bp);
150 int bnxt_init_rx_ring_struct(struct bnxt_rx_queue *rxq, unsigned int socket_id);
151 int bnxt_init_one_rx_ring(struct bnxt_rx_queue *rxq);
152 int bnxt_rx_queue_start(struct rte_eth_dev *dev, uint16_t rx_queue_id);
153 int bnxt_rx_queue_stop(struct rte_eth_dev *dev, uint16_t rx_queue_id);
154 int bnxt_flush_rx_cmp(struct bnxt_cp_ring_info *cpr);
155 
156 #if defined(RTE_ARCH_X86) || defined(RTE_ARCH_ARM64)
157 uint16_t bnxt_recv_pkts_vec(void *rx_queue, struct rte_mbuf **rx_pkts,
158 			    uint16_t nb_pkts);
159 uint16_t bnxt_crx_pkts_vec(void *rx_queue, struct rte_mbuf **rx_pkts,
160 			   uint16_t nb_pkts);
161 int bnxt_rxq_vec_setup(struct bnxt_rx_queue *rxq);
162 #endif
163 
164 #if defined(RTE_ARCH_X86)
165 uint16_t bnxt_recv_pkts_vec_avx2(void *rx_queue, struct rte_mbuf **rx_pkts,
166 				 uint16_t nb_pkts);
167 uint16_t bnxt_crx_pkts_vec_avx2(void *rx_queue, struct rte_mbuf **rx_pkts,
168 				uint16_t nb_pkts);
169 #endif
170 void bnxt_set_mark_in_mbuf(struct bnxt *bp,
171 			   struct rx_pkt_cmpl_hi *rxcmp1,
172 			   struct rte_mbuf *mbuf);
173 
174 typedef uint32_t bnxt_cfa_code_dynfield_t;
175 extern int bnxt_cfa_code_dynfield_offset;
176 
177 static inline bnxt_cfa_code_dynfield_t *
178 bnxt_cfa_code_dynfield(struct rte_mbuf *mbuf)
179 {
180 	return RTE_MBUF_DYNFIELD(mbuf,
181 		bnxt_cfa_code_dynfield_offset, bnxt_cfa_code_dynfield_t *);
182 }
183 
184 #define BNXT_RX_META_CFA_CODE_SHIFT		19
185 #define BNXT_CFA_CODE_META_SHIFT		16
186 #define BNXT_RX_META_CFA_CODE_INT_ACT_REC_BIT	0x8000000
187 #define BNXT_RX_META_CFA_CODE_EEM_BIT		0x4000000
188 #define BNXT_CFA_META_FMT_MASK			0x70
189 #define BNXT_CFA_META_FMT_SHFT			4
190 #define BNXT_CFA_META_FMT_EM_EEM_SHFT		1
191 #define BNXT_CFA_META_FMT_EEM			3
192 #define BNXT_CFA_META_EEM_TCAM_SHIFT		31
193 #define BNXT_CFA_META_EM_TEST(x) ((x) >> BNXT_CFA_META_EEM_TCAM_SHIFT)
194 
195 /* Definitions for translation of hardware packet type to mbuf ptype. */
196 #define BNXT_PTYPE_TBL_DIM		128
197 #define BNXT_PTYPE_TBL_TUN_SFT		0 /* Set if tunneled packet. */
198 #define BNXT_PTYPE_TBL_TUN_MSK		BIT(BNXT_PTYPE_TBL_TUN_SFT)
199 #define BNXT_PTYPE_TBL_IP_VER_SFT	1 /* Set if IPv6, clear if IPv4. */
200 #define BNXT_PTYPE_TBL_IP_VER_MSK	BIT(BNXT_PTYPE_TBL_IP_VER_SFT)
201 #define BNXT_PTYPE_TBL_VLAN_SFT		2 /* Set if VLAN encapsulated. */
202 #define BNXT_PTYPE_TBL_VLAN_MSK		BIT(BNXT_PTYPE_TBL_VLAN_SFT)
203 #define BNXT_PTYPE_TBL_TYPE_SFT		3 /* Hardware packet type field. */
204 #define BNXT_PTYPE_TBL_TYPE_MSK		0x78 /* Hardware itype field mask. */
205 #define BNXT_PTYPE_TBL_TYPE_IP		1
206 #define BNXT_PTYPE_TBL_TYPE_TCP		2
207 #define BNXT_PTYPE_TBL_TYPE_UDP		3
208 #define BNXT_PTYPE_TBL_TYPE_ICMP	7
209 
210 #define RX_PKT_CMPL_FLAGS2_IP_TYPE_SFT	8
211 #define CMPL_FLAGS2_VLAN_TUN_MSK \
212 	(RX_PKT_CMPL_FLAGS2_META_FORMAT_VLAN | RX_PKT_CMPL_FLAGS2_T_IP_CS_CALC)
213 
214 #define CMPL_FLAGS2_VLAN_TUN_MSK_CRX \
215 	(RX_PKT_COMPRESS_CMPL_METADATA1_VALID | \
216 	 RX_PKT_COMPRESS_CMPL_CS_ERROR_CALC_T_IP_CS_CALC)
217 
218 #define BNXT_CMPL_ITYPE_TO_IDX(ft) \
219 	(((ft) & RX_PKT_CMPL_FLAGS_ITYPE_MASK) >> \
220 	  (RX_PKT_CMPL_FLAGS_ITYPE_SFT - BNXT_PTYPE_TBL_TYPE_SFT))
221 
222 #define BNXT_CMPL_VLAN_TUN_TO_IDX(f2) \
223 	(((f2) & CMPL_FLAGS2_VLAN_TUN_MSK) >> \
224 	 (RX_PKT_CMPL_FLAGS2_META_FORMAT_SFT - BNXT_PTYPE_TBL_VLAN_SFT))
225 
226 #define BNXT_CMPL_VLAN_TUN_TO_IDX_CRX(md) \
227 	(((md) & CMPL_FLAGS2_VLAN_TUN_MSK_CRX) >> \
228 	 (RX_PKT_COMPRESS_CMPL_METADATA1_SFT - BNXT_PTYPE_TBL_VLAN_SFT))
229 
230 #define BNXT_CMPL_IP_VER_TO_IDX(f2) \
231 	(((f2) & RX_PKT_CMPL_FLAGS2_IP_TYPE) >> \
232 	 (RX_PKT_CMPL_FLAGS2_IP_TYPE_SFT - BNXT_PTYPE_TBL_IP_VER_SFT))
233 
234 static inline void
235 bnxt_check_ptype_constants(void)
236 {
237 	RTE_BUILD_BUG_ON(BNXT_CMPL_ITYPE_TO_IDX(RX_PKT_CMPL_FLAGS_ITYPE_MASK) !=
238 			 BNXT_PTYPE_TBL_TYPE_MSK);
239 	RTE_BUILD_BUG_ON(BNXT_CMPL_VLAN_TUN_TO_IDX(CMPL_FLAGS2_VLAN_TUN_MSK) !=
240 			 (BNXT_PTYPE_TBL_VLAN_MSK | BNXT_PTYPE_TBL_TUN_MSK));
241 	RTE_BUILD_BUG_ON(BNXT_CMPL_IP_VER_TO_IDX(RX_PKT_CMPL_FLAGS2_IP_TYPE) !=
242 			 BNXT_PTYPE_TBL_IP_VER_MSK);
243 }
244 
245 extern uint32_t bnxt_ptype_table[BNXT_PTYPE_TBL_DIM];
246 
247 static inline void bnxt_set_vlan(struct rx_pkt_cmpl_hi *rxcmp1,
248 				 struct rte_mbuf *mbuf)
249 {
250 	uint32_t metadata = rte_le_to_cpu_32(rxcmp1->metadata);
251 
252 	mbuf->vlan_tci = metadata & (RX_PKT_CMPL_METADATA_VID_MASK |
253 				     RX_PKT_CMPL_METADATA_DE |
254 				     RX_PKT_CMPL_METADATA_PRI_MASK);
255 }
256 
257 /* Stingray2 specific code for RX completion parsing */
258 #define RX_CMP_VLAN_VALID(rxcmp)        \
259 	(((struct rx_pkt_v2_cmpl *)rxcmp)->metadata1_payload_offset &	\
260 	 RX_PKT_V2_CMPL_METADATA1_VALID)
261 
262 #define RX_CMP_METADATA0_VID(rxcmp1)				\
263 	((((struct rx_pkt_v2_cmpl_hi *)rxcmp1)->metadata0) &	\
264 	 (RX_PKT_V2_CMPL_HI_METADATA0_VID_MASK |		\
265 	  RX_PKT_V2_CMPL_HI_METADATA0_DE  |			\
266 	  RX_PKT_V2_CMPL_HI_METADATA0_PRI_MASK))
267 
268 static inline void bnxt_rx_vlan_v2(struct rte_mbuf *mbuf,
269 				   struct rx_pkt_cmpl *rxcmp,
270 				   struct rx_pkt_cmpl_hi *rxcmp1)
271 {
272 	if (RX_CMP_VLAN_VALID(rxcmp)) {
273 		mbuf->vlan_tci = RX_CMP_METADATA0_VID(rxcmp1);
274 		mbuf->ol_flags |= RTE_MBUF_F_RX_VLAN | RTE_MBUF_F_RX_VLAN_STRIPPED;
275 	}
276 }
277 
278 #define RX_CMP_FLAGS2_CS_ALL_OK_MODE_MASK	(0x1 << 3)
279 #define RX_CMP_FLAGS2_CS_OK_HDR_CNT_MASK	(0x7 << 10)
280 #define RX_CMP_FLAGS2_IP_CSUM_ALL_OK_MASK	(0x1 << 13)
281 #define RX_CMP_FLAGS2_L4_CSUM_ALL_OK_MASK	(0x1 << 14)
282 
283 #define RX_CMP_V2_CS_OK_HDR_CNT(flags)				\
284 	(((flags) & RX_CMP_FLAGS2_CS_OK_HDR_CNT_MASK) >>	\
285 	 RX_PKT_V2_CMPL_HI_FLAGS2_CS_OK_SFT)
286 
287 #define RX_CMP_V2_CS_ALL_OK_MODE(flags)				\
288 	(((flags) & RX_CMP_FLAGS2_CS_ALL_OK_MODE_MASK))
289 
290 #define RX_CMP_FLAGS2_L3_CS_OK_MASK		(0x7 << 10)
291 #define RX_CMP_FLAGS2_L4_CS_OK_MASK		(0x38 << 10)
292 #define RX_CMP_FLAGS2_L3_CS_OK_SFT		10
293 #define RX_CMP_FLAGS2_L4_CS_OK_SFT		13
294 
295 #define RX_CMP_V2_L4_CS_OK(flags2)			\
296 	(((flags2) & RX_CMP_FLAGS2_L4_CS_OK_MASK) >>	\
297 	 RX_CMP_FLAGS2_L4_CS_OK_SFT)
298 
299 #define RX_CMP_V2_L3_CS_OK(flags2)			\
300 	(((flags2) & RX_CMP_FLAGS2_L3_CS_OK_MASK) >>	\
301 	 RX_CMP_FLAGS2_L3_CS_OK_SFT)
302 
303 #define RX_CMP_V2_L4_CS_ERR(err)				\
304 	(((err) & RX_PKT_V2_CMPL_HI_ERRORS_PKT_ERROR_MASK)  ==	\
305 	 RX_PKT_V2_CMPL_HI_ERRORS_PKT_ERROR_L4_CS_ERROR)
306 
307 #define RX_CMP_V2_L3_CS_ERR(err)				\
308 	(((err) & RX_PKT_V2_CMPL_HI_ERRORS_PKT_ERROR_MASK) ==	\
309 	 RX_PKT_V2_CMPL_HI_ERRORS_PKT_ERROR_IP_CS_ERROR)
310 
311 #define RX_CMP_V2_T_IP_CS_ERR(err)				\
312 	(((err) & RX_PKT_V2_CMPL_HI_ERRORS_T_PKT_ERROR_MASK) ==	\
313 	 RX_PKT_V2_CMPL_HI_ERRORS_T_PKT_ERROR_T_IP_CS_ERROR)
314 
315 #define RX_CMP_V2_T_L4_CS_ERR(err)				\
316 	(((err) & RX_PKT_V2_CMPL_HI_ERRORS_T_PKT_ERROR_MASK) ==	\
317 	 RX_PKT_V2_CMPL_HI_ERRORS_T_PKT_ERROR_T_L4_CS_ERROR)
318 
319 #define RX_CMP_V2_OT_L4_CS_ERR(err)					\
320 	(((err) & RX_PKT_V2_CMPL_HI_ERRORS_OT_PKT_ERROR_MASK) ==	\
321 	 RX_PKT_V2_CMPL_HI_ERRORS_OT_PKT_ERROR_OT_L4_CS_ERROR)
322 
323 static inline void bnxt_parse_csum_v2(struct rte_mbuf *mbuf,
324 				      struct rx_pkt_cmpl_hi *rxcmp1)
325 {
326 	struct rx_pkt_v2_cmpl_hi *v2_cmp =
327 		(struct rx_pkt_v2_cmpl_hi *)(rxcmp1);
328 	uint16_t error_v2 = rte_le_to_cpu_16(v2_cmp->errors_v2);
329 	uint32_t flags2 = rte_le_to_cpu_32(v2_cmp->flags2);
330 	uint32_t hdr_cnt = 0, t_pkt = 0;
331 
332 	if (RX_CMP_V2_CS_ALL_OK_MODE(flags2)) {
333 		hdr_cnt = RX_CMP_V2_CS_OK_HDR_CNT(flags2);
334 		if (hdr_cnt > 1)
335 			t_pkt = 1;
336 
337 		if (unlikely(RX_CMP_V2_L4_CS_ERR(error_v2)))
338 			mbuf->ol_flags |= RTE_MBUF_F_RX_L4_CKSUM_BAD;
339 		else if (flags2 & RX_CMP_FLAGS2_L4_CSUM_ALL_OK_MASK)
340 			mbuf->ol_flags |= RTE_MBUF_F_RX_L4_CKSUM_GOOD;
341 		else
342 			mbuf->ol_flags |= RTE_MBUF_F_RX_L4_CKSUM_UNKNOWN;
343 
344 		if (unlikely(RX_CMP_V2_L3_CS_ERR(error_v2)))
345 			mbuf->ol_flags |= RTE_MBUF_F_RX_IP_CKSUM_BAD;
346 		else if (flags2 & RX_CMP_FLAGS2_IP_CSUM_ALL_OK_MASK)
347 			mbuf->ol_flags |= RTE_MBUF_F_RX_IP_CKSUM_GOOD;
348 		else
349 			mbuf->ol_flags |= RTE_MBUF_F_RX_IP_CKSUM_UNKNOWN;
350 	} else {
351 		hdr_cnt = RX_CMP_V2_L4_CS_OK(flags2);
352 		if (hdr_cnt > 1)
353 			t_pkt = 1;
354 
355 		if (RX_CMP_V2_L4_CS_OK(flags2))
356 			mbuf->ol_flags |= RTE_MBUF_F_RX_L4_CKSUM_GOOD;
357 		else if (RX_CMP_V2_L4_CS_ERR(error_v2))
358 			mbuf->ol_flags |= RTE_MBUF_F_RX_L4_CKSUM_BAD;
359 		else
360 			mbuf->ol_flags |= RTE_MBUF_F_RX_L4_CKSUM_UNKNOWN;
361 
362 		if (RX_CMP_V2_L3_CS_OK(flags2))
363 			mbuf->ol_flags |= RTE_MBUF_F_RX_IP_CKSUM_GOOD;
364 		else if (RX_CMP_V2_L3_CS_ERR(error_v2))
365 			mbuf->ol_flags |= RTE_MBUF_F_RX_IP_CKSUM_BAD;
366 		else
367 			mbuf->ol_flags |= RTE_MBUF_F_RX_IP_CKSUM_UNKNOWN;
368 	}
369 
370 	if (t_pkt) {
371 		if (unlikely(RX_CMP_V2_OT_L4_CS_ERR(error_v2) ||
372 					RX_CMP_V2_T_L4_CS_ERR(error_v2)))
373 			mbuf->ol_flags |= RTE_MBUF_F_RX_OUTER_L4_CKSUM_BAD;
374 		else
375 			mbuf->ol_flags |= RTE_MBUF_F_RX_OUTER_L4_CKSUM_GOOD;
376 
377 		if (unlikely(RX_CMP_V2_T_IP_CS_ERR(error_v2)))
378 			mbuf->ol_flags |= RTE_MBUF_F_RX_IP_CKSUM_BAD;
379 	}
380 }
381 
382 static inline void
383 bnxt_parse_pkt_type_v2(struct rte_mbuf *mbuf,
384 		       struct rx_pkt_cmpl *rxcmp,
385 		       struct rx_pkt_cmpl_hi *rxcmp1)
386 {
387 	struct rx_pkt_v2_cmpl *v2_cmp =
388 		(struct rx_pkt_v2_cmpl *)(rxcmp);
389 	struct rx_pkt_v2_cmpl_hi *v2_cmp1 =
390 		(struct rx_pkt_v2_cmpl_hi *)(rxcmp1);
391 	uint16_t flags_type = v2_cmp->flags_type &
392 		rte_cpu_to_le_32(RX_PKT_V2_CMPL_FLAGS_ITYPE_MASK);
393 	uint32_t flags2 = rte_le_to_cpu_32(v2_cmp1->flags2);
394 	uint32_t l3, pkt_type = 0, vlan = 0;
395 	uint32_t ip6 = 0, t_pkt = 0;
396 	uint32_t hdr_cnt, csum_count;
397 
398 	if (RX_CMP_V2_CS_ALL_OK_MODE(flags2)) {
399 		hdr_cnt = RX_CMP_V2_CS_OK_HDR_CNT(flags2);
400 		if (hdr_cnt > 1)
401 			t_pkt = 1;
402 	} else {
403 		csum_count = RX_CMP_V2_L4_CS_OK(flags2);
404 		if (csum_count > 1)
405 			t_pkt = 1;
406 	}
407 
408 	vlan = !!RX_CMP_VLAN_VALID(rxcmp);
409 	pkt_type |= vlan ? RTE_PTYPE_L2_ETHER_VLAN : RTE_PTYPE_L2_ETHER;
410 
411 	ip6 = !!(flags2 & RX_PKT_V2_CMPL_HI_FLAGS2_IP_TYPE);
412 
413 	if (!t_pkt && !ip6)
414 		l3 = RTE_PTYPE_L3_IPV4_EXT_UNKNOWN;
415 	else if (!t_pkt && ip6)
416 		l3 = RTE_PTYPE_L3_IPV6_EXT_UNKNOWN;
417 	else if (t_pkt && !ip6)
418 		l3 = RTE_PTYPE_INNER_L3_IPV4_EXT_UNKNOWN;
419 	else
420 		l3 = RTE_PTYPE_INNER_L3_IPV6_EXT_UNKNOWN;
421 
422 	switch (flags_type) {
423 	case RTE_LE32(RX_PKT_V2_CMPL_FLAGS_ITYPE_ICMP):
424 		if (!t_pkt)
425 			pkt_type |= l3 | RTE_PTYPE_L4_ICMP;
426 		else
427 			pkt_type |= l3 | RTE_PTYPE_INNER_L4_ICMP;
428 		break;
429 	case RTE_LE32(RX_PKT_V2_CMPL_FLAGS_ITYPE_TCP):
430 		if (!t_pkt)
431 			pkt_type |= l3 | RTE_PTYPE_L4_TCP;
432 		else
433 			pkt_type |= l3 | RTE_PTYPE_INNER_L4_TCP;
434 		break;
435 	case RTE_LE32(RX_PKT_V2_CMPL_FLAGS_ITYPE_UDP):
436 		if (!t_pkt)
437 			pkt_type |= l3 | RTE_PTYPE_L4_UDP;
438 		else
439 			pkt_type |= l3 | RTE_PTYPE_INNER_L4_UDP;
440 		break;
441 	case RTE_LE32(RX_PKT_V2_CMPL_FLAGS_ITYPE_IP):
442 		pkt_type |= l3;
443 		break;
444 	}
445 
446 	mbuf->packet_type = pkt_type;
447 }
448 
449 /* Thor2 specific code for RX completion parsing */
450 #define RX_PKT_V3_CMPL_FLAGS2_IP_TYPE_SFT	8
451 #define RX_PKT_V3_CMPL_METADATA1_VALID_SFT	15
452 
453 #define BNXT_CMPL_V3_ITYPE_TO_IDX(ft) \
454 	(((ft) & RX_PKT_V3_CMPL_FLAGS_ITYPE_MASK) >> \
455 	 (RX_PKT_V3_CMPL_FLAGS_ITYPE_SFT - BNXT_PTYPE_TBL_TYPE_SFT))
456 
457 #define BNXT_CMPL_V3_VLAN_TO_IDX(meta) \
458 	(((meta) & (1 << RX_PKT_V3_CMPL_METADATA1_VALID_SFT)) >> \
459 	 (RX_PKT_V3_CMPL_METADATA1_VALID_SFT - BNXT_PTYPE_TBL_VLAN_SFT))
460 
461 #define BNXT_CMPL_V3_IP_VER_TO_IDX(f2) \
462 	(((f2) & RX_PKT_V3_CMPL_HI_FLAGS2_IP_TYPE) >> \
463 	 (RX_PKT_V3_CMPL_FLAGS2_IP_TYPE_SFT - BNXT_PTYPE_TBL_IP_VER_SFT))
464 
465 #define RX_CMP_V3_VLAN_VALID(rxcmp)        \
466 	(((struct rx_pkt_v3_cmpl *)rxcmp)->metadata1_payload_offset &	\
467 	 RX_PKT_V3_CMPL_METADATA1_VALID)
468 
469 #define RX_CMP_V3_METADATA0_VID(rxcmp1)				\
470 	((((struct rx_pkt_v3_cmpl_hi *)rxcmp1)->metadata0) &	\
471 	 (RX_PKT_V3_CMPL_HI_METADATA0_VID_MASK |		\
472 	  RX_PKT_V3_CMPL_HI_METADATA0_DE  |			\
473 	  RX_PKT_V3_CMPL_HI_METADATA0_PRI_MASK))
474 
475 static inline void bnxt_rx_vlan_v3(struct rte_mbuf *mbuf,
476 	struct rx_pkt_cmpl *rxcmp,
477 	struct rx_pkt_cmpl_hi *rxcmp1)
478 {
479 	if (RX_CMP_V3_VLAN_VALID(rxcmp)) {
480 		mbuf->vlan_tci = RX_CMP_V3_METADATA0_VID(rxcmp1);
481 		mbuf->ol_flags |= RTE_MBUF_F_RX_VLAN | RTE_MBUF_F_RX_VLAN_STRIPPED;
482 	}
483 }
484 
485 #define RX_CMP_V3_L4_CS_ERR(err)		\
486 	(((err) & RX_PKT_CMPL_ERRORS_MASK)	\
487 	 & (RX_PKT_CMPL_ERRORS_L4_CS_ERROR))
488 #define RX_CMP_V3_L3_CS_ERR(err)		\
489 	(((err) & RX_PKT_CMPL_ERRORS_MASK)	\
490 	 & (RX_PKT_CMPL_ERRORS_IP_CS_ERROR))
491 #define RX_CMP_V3_T_IP_CS_ERR(err)		\
492 	(((err) & RX_PKT_CMPL_ERRORS_MASK)	\
493 	 & (RX_PKT_CMPL_ERRORS_T_IP_CS_ERROR))
494 #define RX_CMP_V3_T_L4_CS_ERR(err)		\
495 	(((err) & RX_PKT_CMPL_ERRORS_MASK)	\
496 	 & (RX_PKT_CMPL_ERRORS_T_L4_CS_ERROR))
497 #define RX_PKT_CMPL_CALC			\
498 	(RX_PKT_CMPL_FLAGS2_IP_CS_CALC |	\
499 	 RX_PKT_CMPL_FLAGS2_L4_CS_CALC |	\
500 	 RX_PKT_CMPL_FLAGS2_T_IP_CS_CALC |	\
501 	 RX_PKT_CMPL_FLAGS2_T_L4_CS_CALC)
502 
503 static inline uint64_t
504 bnxt_parse_csum_fields_v3(uint32_t flags2, uint32_t error_v2)
505 {
506 	uint64_t ol_flags = 0;
507 
508 	if (flags2 & RX_PKT_CMPL_CALC) {
509 		if (unlikely(RX_CMP_V3_L4_CS_ERR(error_v2)))
510 			ol_flags |= RTE_MBUF_F_RX_L4_CKSUM_BAD;
511 		else
512 			ol_flags |= RTE_MBUF_F_RX_L4_CKSUM_GOOD;
513 		if (unlikely(RX_CMP_V3_L3_CS_ERR(error_v2)))
514 			ol_flags |= RTE_MBUF_F_RX_IP_CKSUM_BAD;
515 		if (unlikely(RX_CMP_V3_T_L4_CS_ERR(error_v2)))
516 			ol_flags |= RTE_MBUF_F_RX_OUTER_L4_CKSUM_BAD;
517 		else
518 			ol_flags |= RTE_MBUF_F_RX_OUTER_L4_CKSUM_GOOD;
519 		if (unlikely(RX_CMP_V3_T_IP_CS_ERR(error_v2)))
520 			ol_flags |= RTE_MBUF_F_RX_OUTER_IP_CKSUM_BAD;
521 		if (!(ol_flags & (RTE_MBUF_F_RX_IP_CKSUM_BAD | RTE_MBUF_F_RX_OUTER_IP_CKSUM_BAD)))
522 			ol_flags |= RTE_MBUF_F_RX_IP_CKSUM_GOOD;
523 	} else {
524 		/* Unknown is defined as 0 for all packets types hence using below for all */
525 		ol_flags |= RTE_MBUF_F_RX_IP_CKSUM_UNKNOWN;
526 	}
527 	return ol_flags;
528 }
529 
530 static inline void
531 bnxt_parse_csum_v3(struct rte_mbuf *mbuf, struct rx_pkt_cmpl_hi *rxcmp1)
532 {
533 	struct rx_pkt_v3_cmpl_hi *v3_cmp =
534 		(struct rx_pkt_v3_cmpl_hi *)(rxcmp1);
535 	uint16_t error_v2 = rte_le_to_cpu_16(v3_cmp->errors_v2);
536 	uint32_t flags2 = rte_le_to_cpu_32(v3_cmp->flags2);
537 
538 	mbuf->ol_flags = bnxt_parse_csum_fields_v3(flags2, error_v2);
539 }
540 #endif /*  _BNXT_RXR_H_ */
541