xref: /dpdk/drivers/net/bnxt/tf_ulp/ulp_rte_parser.c (revision 0c036a1485b9d9163a8fa8059ed5272d060c05e0)
1 /* SPDX-License-Identifier: BSD-3-Clause
2  * Copyright(c) 2014-2023 Broadcom
3  * All rights reserved.
4  */
5 
6 #include "bnxt.h"
7 #include "ulp_template_db_enum.h"
8 #include "ulp_template_struct.h"
9 #include "bnxt_ulp.h"
10 #include "bnxt_ulp_utils.h"
11 #include "bnxt_tf_common.h"
12 #include "bnxt_tf_pmd_shim.h"
13 #include "ulp_rte_parser.h"
14 #include "ulp_matcher.h"
15 #include "ulp_utils.h"
16 #include "tfp.h"
17 #include "ulp_port_db.h"
18 #include "ulp_flow_db.h"
19 #include "ulp_mapper.h"
20 #include "ulp_tun.h"
21 #include "ulp_template_db_tbl.h"
22 
23 /* Local defines for the parsing functions */
24 #define ULP_VLAN_PRIORITY_SHIFT		13 /* First 3 bits */
25 #define ULP_VLAN_PRIORITY_MASK		0x700
26 #define ULP_VLAN_TAG_MASK		0xFFF /* Last 12 bits*/
27 #define ULP_UDP_PORT_VXLAN		4789
28 #define ULP_UDP_PORT_VXLAN_MASK		0xFFFF
29 #define ULP_UDP_PORT_VXLAN_GPE		4790
30 #define ULP_UDP_PORT_VXLAN_GPE_MASK	0xFFFF
31 #define ULP_UDP_PORT_GENEVE		6081
32 #define ULP_UDP_PORT_GENEVE_MASK	0xFFFF
33 
34 /**
35  * Geneve header first 16Bit
36  * Version (2b), length of the options fields (6b), OAM packet (1b),
37  * critical options present (1b), reserved 0 (6b).
38  */
39 #define ULP_GENEVE_OPT_MAX_SIZE 6 /* HW only supports 6 words */
40 #define ULP_GENEVE_OPTLEN_MASK 0x3F
41 #define ULP_GENEVE_OPTLEN_SHIFT 8
42 #define ULP_GENEVE_OPTLEN_VAL(a) \
43 	    (((a) >> (ULP_GENEVE_OPTLEN_SHIFT)) & (ULP_GENEVE_OPTLEN_MASK))
44 
45 /* Utility function to skip the void items. */
46 static inline int32_t
47 ulp_rte_item_skip_void(const struct rte_flow_item **item, uint32_t increment)
48 {
49 	if (!*item)
50 		return 0;
51 	if (increment)
52 		(*item)++;
53 	while ((*item) && (*item)->type == RTE_FLOW_ITEM_TYPE_VOID)
54 		(*item)++;
55 	if (*item)
56 		return 1;
57 	return 0;
58 }
59 
60 /* Utility function to copy field spec items */
61 static inline struct ulp_rte_hdr_field *
62 ulp_rte_parser_fld_copy(struct ulp_rte_hdr_field *field,
63 			const void *buffer,
64 			uint32_t size)
65 {
66 	field->size = size;
67 	memcpy(field->spec, buffer, field->size);
68 	field++;
69 	return field;
70 }
71 
72 /* Utility function to update the field_bitmap */
73 static void
74 ulp_rte_parser_field_bitmap_update(struct ulp_rte_parser_params *params,
75 				   uint32_t idx,
76 				   enum bnxt_ulp_prsr_action prsr_act)
77 {
78 	struct ulp_rte_hdr_field *field;
79 
80 	field = &params->hdr_field[idx];
81 	if (ulp_bitmap_notzero(field->mask, field->size)) {
82 		ULP_INDEX_BITMAP_SET(params->fld_bitmap.bits, idx);
83 		if (!(prsr_act & ULP_PRSR_ACT_MATCH_IGNORE))
84 			ULP_INDEX_BITMAP_SET(params->fld_s_bitmap.bits, idx);
85 		/* Not exact match */
86 		if (!ulp_bitmap_is_ones(field->mask, field->size))
87 			ULP_COMP_FLD_IDX_WR(params,
88 					    BNXT_ULP_CF_IDX_WC_MATCH, 1);
89 	} else {
90 		ULP_INDEX_BITMAP_RESET(params->fld_bitmap.bits, idx);
91 	}
92 }
93 
94 #define ulp_deference_struct(x, y) ((x) ? &((x)->y) : NULL)
95 /* Utility function to copy field spec and masks items */
96 static inline void
97 ulp_rte_prsr_fld_mask(struct ulp_rte_parser_params *params,
98 		      uint32_t *idx,
99 		      uint32_t size,
100 		      const void *spec_buff,
101 		      const void *mask_buff,
102 		      enum bnxt_ulp_prsr_action prsr_act)
103 {
104 	struct ulp_rte_hdr_field *field = &params->hdr_field[*idx];
105 
106 	/* update the field size */
107 	field->size = size;
108 
109 	/* copy the mask specifications only if mask is not null */
110 	if (!(prsr_act & ULP_PRSR_ACT_MASK_IGNORE) && mask_buff &&
111 	    spec_buff && ulp_bitmap_notzero(spec_buff, size)) {
112 		memcpy(field->mask, mask_buff, size);
113 		ulp_rte_parser_field_bitmap_update(params, *idx, prsr_act);
114 	}
115 
116 	/* copy the protocol specifications only if mask is not null*/
117 	if (spec_buff && mask_buff && ulp_bitmap_notzero(mask_buff, size))
118 		memcpy(field->spec, spec_buff, size);
119 
120 	/* Increment the index */
121 	*idx = *idx + 1;
122 }
123 
124 /* Utility function to copy field spec and masks items */
125 static inline int32_t
126 ulp_rte_prsr_fld_size_validate(struct ulp_rte_parser_params *params,
127 			       uint32_t *idx,
128 			       uint32_t size)
129 {
130 	if (unlikely(params->field_idx + size >= BNXT_ULP_PROTO_HDR_MAX)) {
131 		BNXT_DRV_DBG(ERR, "OOB for field processing %u\n", *idx);
132 		return -EINVAL;
133 	}
134 	*idx = params->field_idx;
135 	params->field_idx += size;
136 	return 0;
137 }
138 
139 /*
140  * Function to handle the parsing of RTE Flows and placing
141  * the RTE flow items into the ulp structures.
142  */
143 int32_t
144 bnxt_ulp_rte_parser_hdr_parse(const struct rte_flow_item pattern[],
145 			      struct ulp_rte_parser_params *params)
146 {
147 	const struct rte_flow_item *item = pattern;
148 	struct bnxt_ulp_rte_hdr_info *hdr_info;
149 
150 	params->field_idx = BNXT_ULP_PROTO_HDR_SVIF_NUM;
151 
152 	/* Parse all the items in the pattern */
153 	while (item && item->type != RTE_FLOW_ITEM_TYPE_END) {
154 		if (item->type >= (typeof(item->type))
155 		    BNXT_RTE_FLOW_ITEM_TYPE_END) {
156 			if (item->type >=
157 			    (typeof(item->type))BNXT_RTE_FLOW_ITEM_TYPE_LAST)
158 				goto hdr_parser_error;
159 			/* get the header information */
160 			hdr_info = &ulp_vendor_hdr_info[item->type -
161 				BNXT_RTE_FLOW_ITEM_TYPE_END];
162 		} else {
163 			if (item->type > RTE_FLOW_ITEM_TYPE_ECPRI)
164 				goto hdr_parser_error;
165 			hdr_info = &ulp_hdr_info[item->type];
166 		}
167 		if (hdr_info->hdr_type == BNXT_ULP_HDR_TYPE_NOT_SUPPORTED) {
168 			goto hdr_parser_error;
169 		} else if (hdr_info->hdr_type == BNXT_ULP_HDR_TYPE_SUPPORTED) {
170 			/* call the registered callback handler */
171 			if (hdr_info->proto_hdr_func) {
172 				if (hdr_info->proto_hdr_func(item, params) !=
173 				    BNXT_TF_RC_SUCCESS) {
174 					return BNXT_TF_RC_ERROR;
175 				}
176 			}
177 		}
178 		item++;
179 	}
180 	/* update the implied SVIF */
181 	return ulp_rte_parser_implicit_match_port_process(params);
182 
183 hdr_parser_error:
184 	BNXT_DRV_DBG(ERR, "Truflow parser does not support type %d\n",
185 		    item->type);
186 	return BNXT_TF_RC_PARSE_ERR;
187 }
188 
189 /*
190  * Function to handle the parsing of RTE Flows and placing
191  * the RTE flow actions into the ulp structures.
192  */
193 int32_t
194 bnxt_ulp_rte_parser_act_parse(const struct rte_flow_action actions[],
195 			      struct ulp_rte_parser_params *params)
196 {
197 	const struct rte_flow_action *action_item = actions;
198 	struct bnxt_ulp_rte_act_info *hdr_info;
199 
200 	/* Parse all the items in the pattern */
201 	while (action_item && action_item->type != RTE_FLOW_ACTION_TYPE_END) {
202 		if (action_item->type >=
203 		    (typeof(action_item->type))BNXT_RTE_FLOW_ACTION_TYPE_END) {
204 			if (action_item->type >=
205 			    (typeof(action_item->type))BNXT_RTE_FLOW_ACTION_TYPE_LAST)
206 				goto act_parser_error;
207 			/* get the header information from bnxt actinfo table */
208 			hdr_info = &ulp_vendor_act_info[action_item->type -
209 				BNXT_RTE_FLOW_ACTION_TYPE_END];
210 		} else {
211 			if (action_item->type > RTE_FLOW_ACTION_TYPE_INDIRECT)
212 				goto act_parser_error;
213 			/* get the header information from the act info table */
214 			hdr_info = &ulp_act_info[action_item->type];
215 		}
216 		if (hdr_info->act_type == BNXT_ULP_ACT_TYPE_NOT_SUPPORTED) {
217 			goto act_parser_error;
218 		} else if (hdr_info->act_type == BNXT_ULP_ACT_TYPE_SUPPORTED) {
219 			/* call the registered callback handler */
220 			if (hdr_info->proto_act_func) {
221 				if (hdr_info->proto_act_func(action_item,
222 							     params) !=
223 				    BNXT_TF_RC_SUCCESS) {
224 					return BNXT_TF_RC_ERROR;
225 				}
226 			}
227 		}
228 		action_item++;
229 	}
230 	/* update the implied port details */
231 	ulp_rte_parser_implicit_act_port_process(params);
232 	return BNXT_TF_RC_SUCCESS;
233 
234 act_parser_error:
235 	BNXT_DRV_DBG(ERR, "Truflow parser does not support act %u\n",
236 		    action_item->type);
237 	return BNXT_TF_RC_ERROR;
238 }
239 
240 /*
241  * Function to handle the post processing of the computed
242  * fields for the interface.
243  */
244 static void
245 bnxt_ulp_comp_fld_intf_update(struct ulp_rte_parser_params *params)
246 {
247 	uint32_t ifindex;
248 	uint16_t port_id, parif, svif;
249 	uint32_t mtype;
250 	enum bnxt_ulp_direction_type dir;
251 
252 	/* get the direction details */
253 	dir = ULP_COMP_FLD_IDX_RD(params, BNXT_ULP_CF_IDX_DIRECTION);
254 
255 	/* read the port id details */
256 	port_id = ULP_COMP_FLD_IDX_RD(params, BNXT_ULP_CF_IDX_INCOMING_IF);
257 	if (ulp_port_db_dev_port_to_ulp_index(params->ulp_ctx,
258 					      port_id,
259 					      &ifindex)) {
260 		BNXT_DRV_DBG(ERR, "ParseErr:Portid is not valid\n");
261 		return;
262 	}
263 
264 	if (dir == BNXT_ULP_DIR_INGRESS) {
265 		/* Set port PARIF */
266 		if (ulp_port_db_parif_get(params->ulp_ctx, ifindex,
267 					  BNXT_ULP_DRV_FUNC_PARIF, &parif)) {
268 			BNXT_DRV_DBG(ERR, "ParseErr:ifindex is not valid\n");
269 			return;
270 		}
271 		/* Note:
272 		 * We save the drv_func_parif into CF_IDX of phy_port_parif,
273 		 * since that index is currently referenced by ingress templates
274 		 * for datapath flows. If in the future we change the parser to
275 		 * save it in the CF_IDX of drv_func_parif we also need to update
276 		 * the template.
277 		 * WARNING: Two VFs on same parent PF will not work, as the parif is
278 		 * based on fw fid of the parent PF.
279 		 */
280 		ULP_COMP_FLD_IDX_WR(params, BNXT_ULP_CF_IDX_PHY_PORT_PARIF,
281 				    parif);
282 		/* Set port SVIF */
283 		if (ulp_port_db_svif_get(params->ulp_ctx, ifindex,
284 					  BNXT_ULP_PHY_PORT_SVIF, &svif)) {
285 			BNXT_DRV_DBG(ERR, "ParseErr:ifindex is not valid\n");
286 			return;
287 		}
288 		ULP_COMP_FLD_IDX_WR(params, BNXT_ULP_CF_IDX_PHY_PORT_SVIF,
289 				    svif);
290 	} else {
291 		/* Get the match port type */
292 		mtype = ULP_COMP_FLD_IDX_RD(params,
293 					    BNXT_ULP_CF_IDX_MATCH_PORT_TYPE);
294 		if (mtype == BNXT_ULP_INTF_TYPE_VF_REP) {
295 			ULP_COMP_FLD_IDX_WR(params,
296 					    BNXT_ULP_CF_IDX_MATCH_PORT_IS_VFREP,
297 					    1);
298 			/* Set VF func PARIF */
299 			if (ulp_port_db_parif_get(params->ulp_ctx, ifindex,
300 						  BNXT_ULP_VF_FUNC_PARIF,
301 						  &parif)) {
302 				BNXT_DRV_DBG(ERR,
303 					    "ParseErr:ifindex is not valid\n");
304 				return;
305 			}
306 			ULP_COMP_FLD_IDX_WR(params,
307 					    BNXT_ULP_CF_IDX_VF_FUNC_PARIF,
308 					    parif);
309 
310 		} else {
311 			/* Set DRV func PARIF */
312 			if (ulp_port_db_parif_get(params->ulp_ctx, ifindex,
313 						  BNXT_ULP_DRV_FUNC_PARIF,
314 						  &parif)) {
315 				BNXT_DRV_DBG(ERR,
316 					    "ParseErr:ifindex is not valid\n");
317 				return;
318 			}
319 			ULP_COMP_FLD_IDX_WR(params,
320 					    BNXT_ULP_CF_IDX_DRV_FUNC_PARIF,
321 					    parif);
322 		}
323 		if (mtype == BNXT_ULP_INTF_TYPE_PF) {
324 			ULP_COMP_FLD_IDX_WR(params,
325 					    BNXT_ULP_CF_IDX_MATCH_PORT_IS_PF,
326 					    1);
327 		}
328 	}
329 }
330 
331 static int32_t
332 ulp_post_process_normal_flow(struct ulp_rte_parser_params *params)
333 {
334 	enum bnxt_ulp_intf_type match_port_type, act_port_type;
335 	enum bnxt_ulp_direction_type dir;
336 	uint32_t act_port_set;
337 
338 	/* Get the computed details */
339 	dir = ULP_COMP_FLD_IDX_RD(params, BNXT_ULP_CF_IDX_DIRECTION);
340 	match_port_type = ULP_COMP_FLD_IDX_RD(params,
341 					      BNXT_ULP_CF_IDX_MATCH_PORT_TYPE);
342 	act_port_type = ULP_COMP_FLD_IDX_RD(params,
343 					    BNXT_ULP_CF_IDX_ACT_PORT_TYPE);
344 	act_port_set = ULP_COMP_FLD_IDX_RD(params,
345 					   BNXT_ULP_CF_IDX_ACT_PORT_IS_SET);
346 
347 	/* set the flow direction in the proto and action header */
348 	if (dir == BNXT_ULP_DIR_EGRESS) {
349 		ULP_BITMAP_SET(params->hdr_bitmap.bits,
350 			       BNXT_ULP_FLOW_DIR_BITMASK_EGR);
351 		ULP_BITMAP_SET(params->act_bitmap.bits,
352 			       BNXT_ULP_FLOW_DIR_BITMASK_EGR);
353 	} else {
354 		ULP_BITMAP_SET(params->hdr_bitmap.bits,
355 			       BNXT_ULP_FLOW_DIR_BITMASK_ING);
356 		ULP_BITMAP_SET(params->act_bitmap.bits,
357 			       BNXT_ULP_FLOW_DIR_BITMASK_ING);
358 	}
359 
360 	/* Evaluate the VF to VF flag */
361 	if (act_port_set && act_port_type == BNXT_ULP_INTF_TYPE_VF_REP &&
362 	     match_port_type == BNXT_ULP_INTF_TYPE_VF_REP) {
363 		if (!ULP_BITMAP_ISSET(params->act_bitmap.bits,
364 				      BNXT_ULP_ACT_BIT_MULTIPLE_PORT)) {
365 			ULP_BITMAP_SET(params->act_bitmap.bits,
366 				       BNXT_ULP_ACT_BIT_VF_TO_VF);
367 		} else {
368 			if (ULP_COMP_FLD_IDX_RD(params, BNXT_ULP_CF_IDX_MP_A_IS_VFREP) &&
369 			    ULP_COMP_FLD_IDX_RD(params, BNXT_ULP_CF_IDX_MP_B_IS_VFREP))
370 				ULP_BITMAP_SET(params->act_bitmap.bits,
371 					       BNXT_ULP_ACT_BIT_VF_TO_VF);
372 			else
373 				ULP_BITMAP_RESET(params->act_bitmap.bits,
374 						 BNXT_ULP_ACT_BIT_VF_TO_VF);
375 		}
376 	}
377 
378 	/* Update the decrement ttl computational fields */
379 	if (ULP_BITMAP_ISSET(params->act_bitmap.bits,
380 			     BNXT_ULP_ACT_BIT_DEC_TTL)) {
381 		/*
382 		 * Check that vxlan proto is included and vxlan decap
383 		 * action is not set then decrement tunnel ttl.
384 		 * Similarly add GRE and NVGRE in future.
385 		 */
386 		if ((ULP_BITMAP_ISSET(params->hdr_bitmap.bits,
387 				      BNXT_ULP_HDR_BIT_T_VXLAN) &&
388 		    !ULP_BITMAP_ISSET(params->act_bitmap.bits,
389 				      BNXT_ULP_ACT_BIT_VXLAN_DECAP))) {
390 			ULP_COMP_FLD_IDX_WR(params,
391 					    BNXT_ULP_CF_IDX_ACT_T_DEC_TTL, 1);
392 		} else {
393 			ULP_COMP_FLD_IDX_WR(params,
394 					    BNXT_ULP_CF_IDX_ACT_DEC_TTL, 1);
395 		}
396 	}
397 
398 	/* Merge the hdr_fp_bit into the proto header bit */
399 	params->hdr_bitmap.bits |= params->hdr_fp_bit.bits;
400 
401 	/* Update the comp fld fid */
402 	ULP_COMP_FLD_IDX_WR(params, BNXT_ULP_CF_IDX_FID, params->fid);
403 
404 	/* set the L2 context usage shall change it later */
405 	ULP_BITMAP_SET(params->cf_bitmap, BNXT_ULP_CF_BIT_L2_CNTXT_ID);
406 
407 	/* Update the computed interface parameters */
408 	bnxt_ulp_comp_fld_intf_update(params);
409 
410 	/* TBD: Handle the flow rejection scenarios */
411 	return 0;
412 }
413 
414 /*
415  * Function to handle the post processing of the parsing details
416  */
417 void
418 bnxt_ulp_rte_parser_post_process(struct ulp_rte_parser_params *params)
419 {
420 	ulp_post_process_normal_flow(params);
421 }
422 
423 /*
424  * Function to compute the flow direction based on the match port details
425  */
426 static void
427 bnxt_ulp_rte_parser_direction_compute(struct ulp_rte_parser_params *params)
428 {
429 	enum bnxt_ulp_intf_type match_port_type;
430 
431 	/* Get the match port type */
432 	match_port_type = ULP_COMP_FLD_IDX_RD(params,
433 					      BNXT_ULP_CF_IDX_MATCH_PORT_TYPE);
434 
435 	/* If ingress flow and matchport is vf rep then dir is egress*/
436 	if ((params->dir_attr & BNXT_ULP_FLOW_ATTR_INGRESS) &&
437 	    match_port_type == BNXT_ULP_INTF_TYPE_VF_REP) {
438 		ULP_COMP_FLD_IDX_WR(params, BNXT_ULP_CF_IDX_DIRECTION,
439 				    BNXT_ULP_DIR_EGRESS);
440 	} else {
441 		/* Assign the input direction */
442 		if (params->dir_attr & BNXT_ULP_FLOW_ATTR_INGRESS)
443 			ULP_COMP_FLD_IDX_WR(params, BNXT_ULP_CF_IDX_DIRECTION,
444 					    BNXT_ULP_DIR_INGRESS);
445 		else if (params->dir_attr & BNXT_ULP_FLOW_ATTR_EGRESS)
446 			ULP_COMP_FLD_IDX_WR(params, BNXT_ULP_CF_IDX_DIRECTION,
447 					    BNXT_ULP_DIR_EGRESS);
448 		else if (match_port_type == BNXT_ULP_INTF_TYPE_VF_REP)
449 			ULP_COMP_FLD_IDX_WR(params, BNXT_ULP_CF_IDX_DIRECTION,
450 					    BNXT_ULP_DIR_EGRESS);
451 		else
452 			ULP_COMP_FLD_IDX_WR(params, BNXT_ULP_CF_IDX_DIRECTION,
453 					    BNXT_ULP_DIR_INGRESS);
454 	}
455 }
456 
457 /* Function to handle the parsing of RTE Flow item PF Header. */
458 static int32_t
459 ulp_rte_parser_svif_set(struct ulp_rte_parser_params *params,
460 			uint32_t ifindex,
461 			uint16_t mask,
462 			enum bnxt_ulp_direction_type item_dir)
463 {
464 	uint16_t svif;
465 	enum bnxt_ulp_direction_type dir;
466 	struct ulp_rte_hdr_field *hdr_field;
467 	enum bnxt_ulp_svif_type svif_type;
468 	enum bnxt_ulp_intf_type port_type;
469 
470 	if (ULP_COMP_FLD_IDX_RD(params, BNXT_ULP_CF_IDX_SVIF_FLAG) !=
471 	    BNXT_ULP_INVALID_SVIF_VAL) {
472 		BNXT_DRV_DBG(ERR,
473 			    "SVIF already set,multiple source not support'd\n");
474 		return BNXT_TF_RC_ERROR;
475 	}
476 
477 	/* Get port type details */
478 	port_type = ulp_port_db_port_type_get(params->ulp_ctx, ifindex);
479 	if (port_type == BNXT_ULP_INTF_TYPE_INVALID) {
480 		BNXT_DRV_DBG(ERR, "Invalid port type\n");
481 		return BNXT_TF_RC_ERROR;
482 	}
483 
484 	/* Update the match port type */
485 	ULP_COMP_FLD_IDX_WR(params, BNXT_ULP_CF_IDX_MATCH_PORT_TYPE, port_type);
486 
487 	/* compute the direction */
488 	bnxt_ulp_rte_parser_direction_compute(params);
489 
490 	/* Get the computed direction */
491 	dir = (item_dir != BNXT_ULP_DIR_INVALID) ? item_dir :
492 		ULP_COMP_FLD_IDX_RD(params, BNXT_ULP_CF_IDX_DIRECTION);
493 	if (dir == BNXT_ULP_DIR_INGRESS &&
494 	    port_type != BNXT_ULP_INTF_TYPE_VF_REP) {
495 		svif_type = BNXT_ULP_PHY_PORT_SVIF;
496 	} else {
497 		if (port_type == BNXT_ULP_INTF_TYPE_VF_REP &&
498 		    item_dir != BNXT_ULP_DIR_EGRESS)
499 			svif_type = BNXT_ULP_VF_FUNC_SVIF;
500 		else
501 			svif_type = BNXT_ULP_DRV_FUNC_SVIF;
502 	}
503 	ulp_port_db_svif_get(params->ulp_ctx, ifindex, svif_type, &svif);
504 	svif = rte_cpu_to_be_16(svif);
505 	mask = rte_cpu_to_be_16(mask);
506 	hdr_field = &params->hdr_field[BNXT_ULP_PROTO_HDR_FIELD_SVIF_IDX];
507 	memcpy(hdr_field->spec, &svif, sizeof(svif));
508 	memcpy(hdr_field->mask, &mask, sizeof(mask));
509 	hdr_field->size = sizeof(svif);
510 	ULP_COMP_FLD_IDX_WR(params, BNXT_ULP_CF_IDX_SVIF_FLAG,
511 			    rte_be_to_cpu_16(svif));
512 	return BNXT_TF_RC_SUCCESS;
513 }
514 
515 /* Function to handle the parsing of the RTE port id */
516 int32_t
517 ulp_rte_parser_implicit_match_port_process(struct ulp_rte_parser_params *params)
518 {
519 	uint16_t port_id = 0;
520 	uint16_t svif_mask = 0xFFFF;
521 	uint32_t ifindex;
522 	int32_t rc = BNXT_TF_RC_ERROR;
523 
524 	if (ULP_COMP_FLD_IDX_RD(params, BNXT_ULP_CF_IDX_SVIF_FLAG) !=
525 	    BNXT_ULP_INVALID_SVIF_VAL)
526 		return BNXT_TF_RC_SUCCESS;
527 
528 	/* SVIF not set. So get the port id */
529 	port_id = ULP_COMP_FLD_IDX_RD(params, BNXT_ULP_CF_IDX_INCOMING_IF);
530 
531 	if (ulp_port_db_dev_port_to_ulp_index(params->ulp_ctx,
532 					      port_id,
533 					      &ifindex)) {
534 		BNXT_DRV_DBG(ERR, "ParseErr:Portid is not valid\n");
535 		return rc;
536 	}
537 
538 	/* Update the SVIF details */
539 	rc = ulp_rte_parser_svif_set(params, ifindex, svif_mask,
540 				     BNXT_ULP_DIR_INVALID);
541 	return rc;
542 }
543 
544 /* Function to handle the implicit action port id */
545 int32_t
546 ulp_rte_parser_implicit_act_port_process(struct ulp_rte_parser_params *params)
547 {
548 	struct rte_flow_action action_item = {0};
549 	struct rte_flow_action_port_id port_id = {0};
550 
551 	/* Read the action port set bit */
552 	if (ULP_COMP_FLD_IDX_RD(params, BNXT_ULP_CF_IDX_ACT_PORT_IS_SET)) {
553 		/* Already set, so just exit */
554 		return BNXT_TF_RC_SUCCESS;
555 	}
556 	port_id.id = ULP_COMP_FLD_IDX_RD(params, BNXT_ULP_CF_IDX_INCOMING_IF);
557 	action_item.type = RTE_FLOW_ACTION_TYPE_PORT_ID;
558 	action_item.conf = &port_id;
559 
560 	/* Update the action port based on incoming port */
561 	ulp_rte_port_act_handler(&action_item, params);
562 
563 	/* Reset the action port set bit */
564 	ULP_COMP_FLD_IDX_WR(params, BNXT_ULP_CF_IDX_ACT_PORT_IS_SET, 0);
565 	return BNXT_TF_RC_SUCCESS;
566 }
567 
568 /* Parse items PORT_ID, PORT_REPRESENTOR and REPRESENTED_PORT. */
569 int32_t
570 ulp_rte_port_hdr_handler(const struct rte_flow_item *item,
571 			 struct ulp_rte_parser_params *params)
572 {
573 	enum bnxt_ulp_direction_type item_dir;
574 	uint16_t ethdev_id;
575 	uint16_t mask = 0;
576 	uint32_t ifindex;
577 	int32_t rc = BNXT_TF_RC_PARSE_ERR;
578 
579 	if (!item->spec) {
580 		BNXT_DRV_DBG(ERR, "ParseErr:Port spec is not valid\n");
581 		return rc;
582 	}
583 	if (!item->mask) {
584 		BNXT_DRV_DBG(ERR, "ParseErr:Port mask is not valid\n");
585 		return rc;
586 	}
587 
588 	switch (item->type) {
589 	case RTE_FLOW_ITEM_TYPE_PORT_ID: {
590 		const struct rte_flow_item_port_id *port_spec = item->spec;
591 		const struct rte_flow_item_port_id *port_mask = item->mask;
592 
593 		item_dir = BNXT_ULP_DIR_INVALID;
594 		ethdev_id = port_spec->id;
595 		mask = port_mask->id;
596 
597 		if (!port_mask->id) {
598 			ULP_BITMAP_SET(params->hdr_bitmap.bits, BNXT_ULP_HDR_BIT_SVIF_IGNORE);
599 		}
600 		break;
601 	}
602 	case RTE_FLOW_ITEM_TYPE_PORT_REPRESENTOR: {
603 		const struct rte_flow_item_ethdev *ethdev_spec = item->spec;
604 		const struct rte_flow_item_ethdev *ethdev_mask = item->mask;
605 
606 		item_dir = BNXT_ULP_DIR_INGRESS;
607 		ethdev_id = ethdev_spec->port_id;
608 		mask = ethdev_mask->port_id;
609 		break;
610 	}
611 	case RTE_FLOW_ITEM_TYPE_REPRESENTED_PORT: {
612 		const struct rte_flow_item_ethdev *ethdev_spec = item->spec;
613 		const struct rte_flow_item_ethdev *ethdev_mask = item->mask;
614 
615 		item_dir = BNXT_ULP_DIR_EGRESS;
616 		ethdev_id = ethdev_spec->port_id;
617 		mask = ethdev_mask->port_id;
618 		break;
619 	}
620 	default:
621 		BNXT_DRV_DBG(ERR, "ParseErr:Unexpected item\n");
622 		return rc;
623 	}
624 
625 	/* perform the conversion from dpdk port to bnxt ifindex */
626 	if (ulp_port_db_dev_port_to_ulp_index(params->ulp_ctx,
627 					      ethdev_id,
628 					      &ifindex)) {
629 		BNXT_DRV_DBG(ERR, "ParseErr:Portid is not valid\n");
630 		return rc;
631 	}
632 	/* Update the SVIF details */
633 	return ulp_rte_parser_svif_set(params, ifindex, mask, item_dir);
634 }
635 
636 /* Function to handle the update of proto header based on field values */
637 static void
638 ulp_rte_l2_proto_type_update(struct ulp_rte_parser_params *param,
639 			     uint16_t type, uint32_t in_flag,
640 			     uint32_t has_vlan, uint32_t has_vlan_mask)
641 {
642 #define ULP_RTE_ETHER_TYPE_ROE	0xfc3d
643 
644 	if (type == tfp_cpu_to_be_16(RTE_ETHER_TYPE_IPV4)) {
645 		if (in_flag) {
646 			ULP_BITMAP_SET(param->hdr_fp_bit.bits,
647 				       BNXT_ULP_HDR_BIT_I_IPV4);
648 			ULP_COMP_FLD_IDX_WR(param, BNXT_ULP_CF_IDX_I_L3, 1);
649 		} else {
650 			ULP_BITMAP_SET(param->hdr_fp_bit.bits,
651 				       BNXT_ULP_HDR_BIT_O_IPV4);
652 			ULP_COMP_FLD_IDX_WR(param, BNXT_ULP_CF_IDX_O_L3, 1);
653 		}
654 	} else if (type == tfp_cpu_to_be_16(RTE_ETHER_TYPE_IPV6)) {
655 		if (in_flag) {
656 			ULP_BITMAP_SET(param->hdr_fp_bit.bits,
657 				       BNXT_ULP_HDR_BIT_I_IPV6);
658 			ULP_COMP_FLD_IDX_WR(param, BNXT_ULP_CF_IDX_I_L3, 1);
659 		} else {
660 			ULP_BITMAP_SET(param->hdr_fp_bit.bits,
661 				       BNXT_ULP_HDR_BIT_O_IPV6);
662 			ULP_COMP_FLD_IDX_WR(param, BNXT_ULP_CF_IDX_O_L3, 1);
663 		}
664 	} else if (type == tfp_cpu_to_be_16(RTE_ETHER_TYPE_VLAN)) {
665 		has_vlan_mask = 1;
666 		has_vlan = 1;
667 	} else if (type == tfp_cpu_to_be_16(RTE_ETHER_TYPE_ECPRI)) {
668 		/* Update the hdr_bitmap with eCPRI */
669 		ULP_BITMAP_SET(param->hdr_fp_bit.bits,
670 				BNXT_ULP_HDR_BIT_O_ECPRI);
671 	} else if (type == tfp_cpu_to_be_16(ULP_RTE_ETHER_TYPE_ROE)) {
672 		/* Update the hdr_bitmap with RoE */
673 		ULP_BITMAP_SET(param->hdr_fp_bit.bits,
674 				BNXT_ULP_HDR_BIT_O_ROE);
675 	}
676 
677 	if (has_vlan_mask) {
678 		if (in_flag) {
679 			ULP_COMP_FLD_IDX_WR(param, BNXT_ULP_CF_IDX_I_HAS_VTAG,
680 					    has_vlan);
681 			ULP_COMP_FLD_IDX_WR(param,
682 					    BNXT_ULP_CF_IDX_I_VLAN_NO_IGNORE,
683 					    1);
684 		} else {
685 			ULP_COMP_FLD_IDX_WR(param, BNXT_ULP_CF_IDX_O_HAS_VTAG,
686 					    has_vlan);
687 			ULP_COMP_FLD_IDX_WR(param,
688 					    BNXT_ULP_CF_IDX_O_VLAN_NO_IGNORE,
689 					    1);
690 		}
691 	}
692 }
693 
694 /* Internal Function to identify broadcast or multicast packets */
695 static int32_t
696 ulp_rte_parser_is_bcmc_addr(const struct rte_ether_addr *eth_addr)
697 {
698 	if (rte_is_multicast_ether_addr(eth_addr) ||
699 	    rte_is_broadcast_ether_addr(eth_addr)) {
700 		BNXT_DRV_DBG(DEBUG,
701 			    "No support for bcast or mcast addr offload\n");
702 		return 1;
703 	}
704 	return 0;
705 }
706 
707 /* Function to handle the parsing of RTE Flow item Ethernet Header. */
708 int32_t
709 ulp_rte_eth_hdr_handler(const struct rte_flow_item *item,
710 			struct ulp_rte_parser_params *params)
711 {
712 	const struct rte_flow_item_eth *eth_spec = item->spec;
713 	const struct rte_flow_item_eth *eth_mask = item->mask;
714 	uint32_t idx = 0, dmac_idx = 0;
715 	uint32_t size;
716 	uint16_t eth_type = 0;
717 	uint32_t inner_flag = 0;
718 	uint32_t has_vlan = 0, has_vlan_mask = 0;
719 
720 	/* Perform validations */
721 	if (eth_spec) {
722 		/* Avoid multicast and broadcast addr */
723 		if (!ULP_APP_BC_MC_SUPPORT(params->ulp_ctx) &&
724 		    ulp_rte_parser_is_bcmc_addr(&eth_spec->hdr.dst_addr))
725 			return BNXT_TF_RC_PARSE_ERR;
726 
727 		if (!ULP_APP_BC_MC_SUPPORT(params->ulp_ctx) &&
728 		    ulp_rte_parser_is_bcmc_addr(&eth_spec->hdr.src_addr))
729 			return BNXT_TF_RC_PARSE_ERR;
730 
731 		eth_type = eth_spec->hdr.ether_type;
732 		has_vlan = eth_spec->has_vlan;
733 	}
734 
735 	/* If mask is not specified then use the default mask */
736 	if (eth_spec && !eth_mask)
737 		eth_mask = &rte_flow_item_eth_mask;
738 
739 	if (eth_mask) {
740 		eth_type &= eth_mask->type;
741 		has_vlan_mask = eth_mask->has_vlan;
742 	}
743 
744 	if (unlikely(ulp_rte_prsr_fld_size_validate(params, &idx,
745 						    BNXT_ULP_PROTO_HDR_ETH_NUM))) {
746 		BNXT_DRV_DBG(ERR, "Error parsing protocol header\n");
747 		return BNXT_TF_RC_ERROR;
748 	}
749 	/*
750 	 * Copy the rte_flow_item for eth into hdr_field using ethernet
751 	 * header fields
752 	 */
753 	dmac_idx = idx;
754 	size = sizeof(((struct rte_flow_item_eth *)NULL)->hdr.dst_addr.addr_bytes);
755 	ulp_rte_prsr_fld_mask(params, &idx, size,
756 			      ulp_deference_struct(eth_spec, hdr.dst_addr.addr_bytes),
757 			      ulp_deference_struct(eth_mask, hdr.dst_addr.addr_bytes),
758 			      ULP_PRSR_ACT_DEFAULT);
759 
760 	size = sizeof(((struct rte_flow_item_eth *)NULL)->hdr.src_addr.addr_bytes);
761 	ulp_rte_prsr_fld_mask(params, &idx, size,
762 			      ulp_deference_struct(eth_spec, hdr.src_addr.addr_bytes),
763 			      ulp_deference_struct(eth_mask, hdr.src_addr.addr_bytes),
764 			      ULP_PRSR_ACT_DEFAULT);
765 
766 	size = sizeof(((struct rte_flow_item_eth *)NULL)->hdr.ether_type);
767 	ulp_rte_prsr_fld_mask(params, &idx, size,
768 			      ulp_deference_struct(eth_spec, hdr.ether_type),
769 			      ulp_deference_struct(eth_mask, hdr.ether_type),
770 			      (ULP_APP_TOS_PROTO_SUPPORT(params->ulp_ctx)) ?
771 			      ULP_PRSR_ACT_DEFAULT : ULP_PRSR_ACT_MATCH_IGNORE);
772 
773 	/* Update the protocol hdr bitmap */
774 	if (ULP_BITMAP_ISSET(params->hdr_bitmap.bits,
775 			     BNXT_ULP_HDR_BIT_O_ETH) ||
776 	    ULP_BITMAP_ISSET(params->hdr_bitmap.bits,
777 			     BNXT_ULP_HDR_BIT_O_IPV4) ||
778 	    ULP_BITMAP_ISSET(params->hdr_bitmap.bits,
779 			     BNXT_ULP_HDR_BIT_O_IPV6) ||
780 	    ULP_BITMAP_ISSET(params->hdr_bitmap.bits,
781 			     BNXT_ULP_HDR_BIT_O_UDP) ||
782 	    ULP_BITMAP_ISSET(params->hdr_bitmap.bits,
783 			     BNXT_ULP_HDR_BIT_O_TCP)) {
784 		ULP_BITMAP_SET(params->hdr_bitmap.bits, BNXT_ULP_HDR_BIT_I_ETH);
785 		inner_flag = 1;
786 	} else {
787 		ULP_BITMAP_SET(params->hdr_bitmap.bits, BNXT_ULP_HDR_BIT_O_ETH);
788 		ULP_COMP_FLD_IDX_WR(params, BNXT_ULP_CF_IDX_TUN_OFF_DMAC_ID,
789 				    dmac_idx);
790 	}
791 	/* Update the field protocol hdr bitmap */
792 	ulp_rte_l2_proto_type_update(params, eth_type, inner_flag,
793 				     has_vlan, has_vlan_mask);
794 
795 	return BNXT_TF_RC_SUCCESS;
796 }
797 
798 /* Function to handle the parsing of RTE Flow item Vlan Header. */
799 int32_t
800 ulp_rte_vlan_hdr_handler(const struct rte_flow_item *item,
801 			 struct ulp_rte_parser_params *params)
802 {
803 	const struct rte_flow_item_vlan *vlan_spec = item->spec;
804 	const struct rte_flow_item_vlan *vlan_mask = item->mask;
805 	struct ulp_rte_hdr_bitmap	*hdr_bit;
806 	uint32_t idx = 0;
807 	uint16_t vlan_tag = 0, priority = 0;
808 	uint16_t vlan_tag_mask = 0, priority_mask = 0;
809 	uint32_t outer_vtag_num;
810 	uint32_t inner_vtag_num;
811 	uint16_t eth_type = 0;
812 	uint32_t inner_flag = 0;
813 	uint32_t size;
814 
815 	if (vlan_spec) {
816 		vlan_tag = ntohs(vlan_spec->hdr.vlan_tci);
817 		priority = htons(vlan_tag >> ULP_VLAN_PRIORITY_SHIFT);
818 		vlan_tag &= ULP_VLAN_TAG_MASK;
819 		vlan_tag = htons(vlan_tag);
820 		eth_type = vlan_spec->hdr.eth_proto;
821 	}
822 
823 	/* assign default vlan mask if spec is valid and mask is not */
824 	if (vlan_spec && !vlan_mask)
825 		vlan_mask = &rte_flow_item_vlan_mask;
826 
827 	if (vlan_mask) {
828 		vlan_tag_mask = ntohs(vlan_mask->tci);
829 		priority_mask = htons(vlan_tag_mask >> ULP_VLAN_PRIORITY_SHIFT);
830 		vlan_tag_mask &= 0xfff;
831 		/*
832 		 * the storage for priority and vlan tag is 2 bytes
833 		 * The mask of priority which is 3 bits if it is all 1's
834 		 * then make the rest bits 13 bits as 1's
835 		 * so that it is matched as exact match.
836 		 */
837 		if (priority_mask == ULP_VLAN_PRIORITY_MASK)
838 			priority_mask |= ~ULP_VLAN_PRIORITY_MASK;
839 		if (vlan_tag_mask == ULP_VLAN_TAG_MASK)
840 			vlan_tag_mask |= ~ULP_VLAN_TAG_MASK;
841 		vlan_tag_mask = htons(vlan_tag_mask);
842 	}
843 
844 	if (unlikely(ulp_rte_prsr_fld_size_validate(params, &idx,
845 						    BNXT_ULP_PROTO_HDR_S_VLAN_NUM))) {
846 		BNXT_DRV_DBG(ERR, "Error parsing protocol header\n");
847 		return BNXT_TF_RC_ERROR;
848 	}
849 
850 	/*
851 	 * Copy the rte_flow_item for vlan into hdr_field using Vlan
852 	 * header fields
853 	 */
854 	size = sizeof(((struct rte_flow_item_vlan *)NULL)->hdr.vlan_tci);
855 	/*
856 	 * The priority field is ignored since OVS is setting it as
857 	 * wild card match and it is not supported. This is a work
858 	 * around and shall be addressed in the future.
859 	 */
860 	ulp_rte_prsr_fld_mask(params, &idx, size,
861 			      &priority,
862 			      (vlan_mask) ? &priority_mask : NULL,
863 			      ULP_PRSR_ACT_MASK_IGNORE);
864 
865 	ulp_rte_prsr_fld_mask(params, &idx, size,
866 			      &vlan_tag,
867 			      (vlan_mask) ? &vlan_tag_mask : NULL,
868 			      ULP_PRSR_ACT_DEFAULT);
869 
870 	size = sizeof(((struct rte_flow_item_vlan *)NULL)->hdr.eth_proto);
871 	ulp_rte_prsr_fld_mask(params, &idx, size,
872 			      ulp_deference_struct(vlan_spec, hdr.eth_proto),
873 			      ulp_deference_struct(vlan_mask, hdr.eth_proto),
874 			      ULP_PRSR_ACT_MATCH_IGNORE);
875 
876 	/* Get the outer tag and inner tag counts */
877 	outer_vtag_num = ULP_COMP_FLD_IDX_RD(params,
878 					     BNXT_ULP_CF_IDX_O_VTAG_NUM);
879 	inner_vtag_num = ULP_COMP_FLD_IDX_RD(params,
880 					     BNXT_ULP_CF_IDX_I_VTAG_NUM);
881 
882 	/* Update the hdr_bitmap of the vlans */
883 	hdr_bit = &params->hdr_bitmap;
884 	if (ULP_BITMAP_ISSET(hdr_bit->bits, BNXT_ULP_HDR_BIT_O_ETH) &&
885 	    !ULP_BITMAP_ISSET(hdr_bit->bits, BNXT_ULP_HDR_BIT_I_ETH) &&
886 	    !outer_vtag_num) {
887 		/* Update the vlan tag num */
888 		outer_vtag_num++;
889 		ULP_COMP_FLD_IDX_WR(params, BNXT_ULP_CF_IDX_O_VTAG_NUM,
890 				    outer_vtag_num);
891 		ULP_COMP_FLD_IDX_WR(params, BNXT_ULP_CF_IDX_O_HAS_VTAG, 1);
892 		ULP_COMP_FLD_IDX_WR(params, BNXT_ULP_CF_IDX_O_ONE_VTAG, 1);
893 		ULP_BITMAP_SET(params->hdr_bitmap.bits,
894 			       BNXT_ULP_HDR_BIT_OO_VLAN);
895 		if (vlan_mask && vlan_tag_mask)
896 			ULP_COMP_FLD_IDX_WR(params,
897 					    BNXT_ULP_CF_IDX_OO_VLAN_FB_VID, 1);
898 
899 	} else if (ULP_BITMAP_ISSET(hdr_bit->bits, BNXT_ULP_HDR_BIT_O_ETH) &&
900 		   !ULP_BITMAP_ISSET(hdr_bit->bits, BNXT_ULP_HDR_BIT_I_ETH) &&
901 		   outer_vtag_num == 1) {
902 		/* update the vlan tag num */
903 		outer_vtag_num++;
904 		ULP_COMP_FLD_IDX_WR(params, BNXT_ULP_CF_IDX_O_VTAG_NUM,
905 				    outer_vtag_num);
906 		ULP_COMP_FLD_IDX_WR(params, BNXT_ULP_CF_IDX_O_TWO_VTAGS, 1);
907 		ULP_COMP_FLD_IDX_WR(params, BNXT_ULP_CF_IDX_O_ONE_VTAG, 0);
908 		ULP_BITMAP_SET(params->hdr_bitmap.bits,
909 			       BNXT_ULP_HDR_BIT_OI_VLAN);
910 		if (vlan_mask && vlan_tag_mask)
911 			ULP_COMP_FLD_IDX_WR(params,
912 					    BNXT_ULP_CF_IDX_OI_VLAN_FB_VID, 1);
913 
914 	} else if (ULP_BITMAP_ISSET(hdr_bit->bits, BNXT_ULP_HDR_BIT_O_ETH) &&
915 		   ULP_BITMAP_ISSET(hdr_bit->bits, BNXT_ULP_HDR_BIT_I_ETH) &&
916 		   !inner_vtag_num) {
917 		/* update the vlan tag num */
918 		inner_vtag_num++;
919 		ULP_COMP_FLD_IDX_WR(params, BNXT_ULP_CF_IDX_I_VTAG_NUM,
920 				    inner_vtag_num);
921 		ULP_COMP_FLD_IDX_WR(params, BNXT_ULP_CF_IDX_I_HAS_VTAG, 1);
922 		ULP_COMP_FLD_IDX_WR(params, BNXT_ULP_CF_IDX_I_ONE_VTAG, 1);
923 		ULP_BITMAP_SET(params->hdr_bitmap.bits,
924 			       BNXT_ULP_HDR_BIT_IO_VLAN);
925 		if (vlan_mask && vlan_tag_mask)
926 			ULP_COMP_FLD_IDX_WR(params,
927 					    BNXT_ULP_CF_IDX_IO_VLAN_FB_VID, 1);
928 		inner_flag = 1;
929 	} else if (ULP_BITMAP_ISSET(hdr_bit->bits, BNXT_ULP_HDR_BIT_O_ETH) &&
930 		   ULP_BITMAP_ISSET(hdr_bit->bits, BNXT_ULP_HDR_BIT_I_ETH) &&
931 		   inner_vtag_num == 1) {
932 		/* update the vlan tag num */
933 		inner_vtag_num++;
934 		ULP_COMP_FLD_IDX_WR(params, BNXT_ULP_CF_IDX_I_VTAG_NUM,
935 				    inner_vtag_num);
936 		ULP_COMP_FLD_IDX_WR(params, BNXT_ULP_CF_IDX_I_TWO_VTAGS, 1);
937 		ULP_COMP_FLD_IDX_WR(params, BNXT_ULP_CF_IDX_I_ONE_VTAG, 0);
938 		ULP_BITMAP_SET(params->hdr_bitmap.bits,
939 			       BNXT_ULP_HDR_BIT_II_VLAN);
940 		if (vlan_mask && vlan_tag_mask)
941 			ULP_COMP_FLD_IDX_WR(params,
942 					    BNXT_ULP_CF_IDX_II_VLAN_FB_VID, 1);
943 		inner_flag = 1;
944 	} else {
945 		BNXT_DRV_DBG(ERR, "Error Parsing:Vlan hdr found without eth\n");
946 		return BNXT_TF_RC_ERROR;
947 	}
948 	/* Update the field protocol hdr bitmap */
949 	ulp_rte_l2_proto_type_update(params, eth_type, inner_flag, 1, 1);
950 	return BNXT_TF_RC_SUCCESS;
951 }
952 
953 /* Function to handle the update of proto header based on field values */
954 static void
955 ulp_rte_l3_proto_type_update(struct ulp_rte_parser_params *param,
956 			     uint8_t proto, uint32_t in_flag)
957 {
958 	if (proto == IPPROTO_UDP) {
959 		if (in_flag) {
960 			ULP_BITMAP_SET(param->hdr_fp_bit.bits,
961 				       BNXT_ULP_HDR_BIT_I_UDP);
962 			ULP_COMP_FLD_IDX_WR(param, BNXT_ULP_CF_IDX_I_L4, 1);
963 		} else {
964 			ULP_BITMAP_SET(param->hdr_fp_bit.bits,
965 				       BNXT_ULP_HDR_BIT_O_UDP);
966 			ULP_COMP_FLD_IDX_WR(param, BNXT_ULP_CF_IDX_O_L4, 1);
967 		}
968 	} else if (proto == IPPROTO_TCP) {
969 		if (in_flag) {
970 			ULP_BITMAP_SET(param->hdr_fp_bit.bits,
971 				       BNXT_ULP_HDR_BIT_I_TCP);
972 			ULP_COMP_FLD_IDX_WR(param, BNXT_ULP_CF_IDX_I_L4, 1);
973 		} else {
974 			ULP_BITMAP_SET(param->hdr_fp_bit.bits,
975 				       BNXT_ULP_HDR_BIT_O_TCP);
976 			ULP_COMP_FLD_IDX_WR(param, BNXT_ULP_CF_IDX_O_L4, 1);
977 		}
978 	} else if (proto == IPPROTO_GRE) {
979 		ULP_BITMAP_SET(param->hdr_bitmap.bits, BNXT_ULP_HDR_BIT_T_GRE);
980 	} else if (proto == IPPROTO_ICMP) {
981 		if (ULP_BITMAP_ISSET(param->cf_bitmap,
982 				     BNXT_ULP_CF_BIT_IS_TUNNEL))
983 			ULP_BITMAP_SET(param->hdr_bitmap.bits,
984 				       BNXT_ULP_HDR_BIT_I_ICMP);
985 		else
986 			ULP_BITMAP_SET(param->hdr_bitmap.bits,
987 				       BNXT_ULP_HDR_BIT_O_ICMP);
988 	}
989 
990 	if (in_flag) {
991 		ULP_COMP_FLD_IDX_WR(param,
992 				    BNXT_ULP_CF_IDX_I_L3_FB_PROTO_ID,
993 				    1);
994 		ULP_COMP_FLD_IDX_WR(param,
995 				    BNXT_ULP_CF_IDX_I_L3_PROTO_ID,
996 				    proto);
997 	} else {
998 		ULP_COMP_FLD_IDX_WR(param,
999 				    BNXT_ULP_CF_IDX_O_L3_FB_PROTO_ID,
1000 				    1);
1001 		ULP_COMP_FLD_IDX_WR(param,
1002 				    BNXT_ULP_CF_IDX_O_L3_PROTO_ID,
1003 				    proto);
1004 	}
1005 }
1006 
1007 /* Function to handle the parsing of RTE Flow item IPV4 Header. */
1008 int32_t
1009 ulp_rte_ipv4_hdr_handler(const struct rte_flow_item *item,
1010 			 struct ulp_rte_parser_params *params)
1011 {
1012 	const struct rte_flow_item_ipv4 *ipv4_spec = item->spec;
1013 	const struct rte_flow_item_ipv4 *ipv4_mask = item->mask;
1014 	struct ulp_rte_hdr_bitmap *hdr_bitmap = &params->hdr_bitmap;
1015 	uint32_t idx = 0, dip_idx = 0;
1016 	uint32_t size;
1017 	uint8_t proto = 0;
1018 	uint8_t ttl = 0;
1019 	uint8_t proto_mask = 0;
1020 	uint32_t inner_flag = 0;
1021 	uint32_t cnt;
1022 
1023 	/* validate there are no 3rd L3 header */
1024 	cnt = ULP_COMP_FLD_IDX_RD(params, BNXT_ULP_CF_IDX_L3_HDR_CNT);
1025 	if (cnt == 2) {
1026 		BNXT_DRV_DBG(ERR, "Parse Err:Third L3 header not supported\n");
1027 		return BNXT_TF_RC_ERROR;
1028 	}
1029 
1030 	if (unlikely(ulp_rte_prsr_fld_size_validate(params, &idx,
1031 						    BNXT_ULP_PROTO_HDR_IPV4_NUM))) {
1032 		BNXT_DRV_DBG(ERR, "Error parsing protocol header\n");
1033 		return BNXT_TF_RC_ERROR;
1034 	}
1035 
1036 	/* If mask is not specified then use the default mask */
1037 	if (ipv4_spec && !ipv4_mask)
1038 		ipv4_mask = &rte_flow_item_ipv4_mask;
1039 
1040 	/*
1041 	 * Copy the rte_flow_item for ipv4 into hdr_field using ipv4
1042 	 * header fields
1043 	 */
1044 	size = sizeof(((struct rte_flow_item_ipv4 *)NULL)->hdr.version_ihl);
1045 	ulp_rte_prsr_fld_mask(params, &idx, size,
1046 			      ulp_deference_struct(ipv4_spec, hdr.version_ihl),
1047 			      ulp_deference_struct(ipv4_mask, hdr.version_ihl),
1048 			      ULP_PRSR_ACT_DEFAULT);
1049 
1050 	/*
1051 	 * The tos field is ignored since OVS is setting it as wild card
1052 	 * match and it is not supported. An application can enable tos support.
1053 	 */
1054 	size = sizeof(((struct rte_flow_item_ipv4 *)NULL)->hdr.type_of_service);
1055 	ulp_rte_prsr_fld_mask(params, &idx, size,
1056 			      ulp_deference_struct(ipv4_spec,
1057 						   hdr.type_of_service),
1058 			      ulp_deference_struct(ipv4_mask,
1059 						   hdr.type_of_service),
1060 			      (ULP_APP_TOS_PROTO_SUPPORT(params->ulp_ctx)) ?
1061 			      ULP_PRSR_ACT_DEFAULT : ULP_PRSR_ACT_MASK_IGNORE);
1062 
1063 	size = sizeof(((struct rte_flow_item_ipv4 *)NULL)->hdr.total_length);
1064 	ulp_rte_prsr_fld_mask(params, &idx, size,
1065 			      ulp_deference_struct(ipv4_spec, hdr.total_length),
1066 			      ulp_deference_struct(ipv4_mask, hdr.total_length),
1067 			      ULP_PRSR_ACT_DEFAULT);
1068 
1069 	size = sizeof(((struct rte_flow_item_ipv4 *)NULL)->hdr.packet_id);
1070 	ulp_rte_prsr_fld_mask(params, &idx, size,
1071 			      ulp_deference_struct(ipv4_spec, hdr.packet_id),
1072 			      ulp_deference_struct(ipv4_mask, hdr.packet_id),
1073 			      ULP_PRSR_ACT_DEFAULT);
1074 
1075 	size = sizeof(((struct rte_flow_item_ipv4 *)NULL)->hdr.fragment_offset);
1076 	ulp_rte_prsr_fld_mask(params, &idx, size,
1077 			      ulp_deference_struct(ipv4_spec,
1078 						   hdr.fragment_offset),
1079 			      ulp_deference_struct(ipv4_mask,
1080 						   hdr.fragment_offset),
1081 			      ULP_PRSR_ACT_MASK_IGNORE);
1082 
1083 	size = sizeof(((struct rte_flow_item_ipv4 *)NULL)->hdr.time_to_live);
1084 	ulp_rte_prsr_fld_mask(params, &idx, size,
1085 			      ulp_deference_struct(ipv4_spec, hdr.time_to_live),
1086 			      ulp_deference_struct(ipv4_mask, hdr.time_to_live),
1087 			      ULP_PRSR_ACT_DEFAULT);
1088 	if (ipv4_spec)
1089 		ttl = ipv4_spec->hdr.time_to_live;
1090 	if (!ULP_BITMAP_ISSET(params->cf_bitmap, BNXT_ULP_CF_BIT_IS_TUNNEL))
1091 		ULP_COMP_FLD_IDX_WR(params, BNXT_ULP_CF_IDX_O_L3_TTL, ttl);
1092 
1093 	/* Ignore proto for matching templates */
1094 	size = sizeof(((struct rte_flow_item_ipv4 *)NULL)->hdr.next_proto_id);
1095 	ulp_rte_prsr_fld_mask(params, &idx, size,
1096 			      ulp_deference_struct(ipv4_spec,
1097 						   hdr.next_proto_id),
1098 			      ulp_deference_struct(ipv4_mask,
1099 						   hdr.next_proto_id),
1100 			      (ULP_APP_TOS_PROTO_SUPPORT(params->ulp_ctx)) ?
1101 			      ULP_PRSR_ACT_DEFAULT : ULP_PRSR_ACT_MATCH_IGNORE);
1102 
1103 	if (ipv4_spec)
1104 		proto = ipv4_spec->hdr.next_proto_id;
1105 
1106 	size = sizeof(((struct rte_flow_item_ipv4 *)NULL)->hdr.hdr_checksum);
1107 	ulp_rte_prsr_fld_mask(params, &idx, size,
1108 			      ulp_deference_struct(ipv4_spec, hdr.hdr_checksum),
1109 			      ulp_deference_struct(ipv4_mask, hdr.hdr_checksum),
1110 			      ULP_PRSR_ACT_DEFAULT);
1111 
1112 	size = sizeof(((struct rte_flow_item_ipv4 *)NULL)->hdr.src_addr);
1113 	ulp_rte_prsr_fld_mask(params, &idx, size,
1114 			      ulp_deference_struct(ipv4_spec, hdr.src_addr),
1115 			      ulp_deference_struct(ipv4_mask, hdr.src_addr),
1116 			      ULP_PRSR_ACT_DEFAULT);
1117 
1118 	dip_idx = idx;
1119 	size = sizeof(((struct rte_flow_item_ipv4 *)NULL)->hdr.dst_addr);
1120 	ulp_rte_prsr_fld_mask(params, &idx, size,
1121 			      ulp_deference_struct(ipv4_spec, hdr.dst_addr),
1122 			      ulp_deference_struct(ipv4_mask, hdr.dst_addr),
1123 			      ULP_PRSR_ACT_DEFAULT);
1124 
1125 	/* Set the ipv4 header bitmap and computed l3 header bitmaps */
1126 	if (ULP_BITMAP_ISSET(hdr_bitmap->bits, BNXT_ULP_HDR_BIT_O_IPV4) ||
1127 	    ULP_BITMAP_ISSET(hdr_bitmap->bits, BNXT_ULP_HDR_BIT_O_IPV6) ||
1128 	    ULP_BITMAP_ISSET(params->cf_bitmap, BNXT_ULP_CF_BIT_IS_TUNNEL)) {
1129 		ULP_BITMAP_SET(hdr_bitmap->bits, BNXT_ULP_HDR_BIT_I_IPV4);
1130 		ULP_COMP_FLD_IDX_WR(params, BNXT_ULP_CF_IDX_I_L3, 1);
1131 		inner_flag = 1;
1132 	} else {
1133 		ULP_BITMAP_SET(hdr_bitmap->bits, BNXT_ULP_HDR_BIT_O_IPV4);
1134 		ULP_COMP_FLD_IDX_WR(params, BNXT_ULP_CF_IDX_O_L3, 1);
1135 		/* Update the tunnel offload dest ip offset */
1136 		ULP_COMP_FLD_IDX_WR(params, BNXT_ULP_CF_IDX_TUN_OFF_DIP_ID,
1137 				    dip_idx);
1138 	}
1139 
1140 	/* Some of the PMD applications may set the protocol field
1141 	 * in the IPv4 spec but don't set the mask. So, consider
1142 	 * the mask in the proto value calculation.
1143 	 */
1144 	if (ipv4_mask) {
1145 		proto &= ipv4_mask->hdr.next_proto_id;
1146 		proto_mask = ipv4_mask->hdr.next_proto_id;
1147 	}
1148 
1149 	/* Update the field protocol hdr bitmap */
1150 	if (proto_mask)
1151 		ulp_rte_l3_proto_type_update(params, proto, inner_flag);
1152 	ULP_COMP_FLD_IDX_WR(params, BNXT_ULP_CF_IDX_L3_HDR_CNT, ++cnt);
1153 	return BNXT_TF_RC_SUCCESS;
1154 }
1155 
1156 /* Function to handle the parsing of RTE Flow item IPV6 Header */
1157 int32_t
1158 ulp_rte_ipv6_hdr_handler(const struct rte_flow_item *item,
1159 			 struct ulp_rte_parser_params *params)
1160 {
1161 	const struct rte_flow_item_ipv6	*ipv6_spec = item->spec;
1162 	const struct rte_flow_item_ipv6	*ipv6_mask = item->mask;
1163 	struct ulp_rte_hdr_bitmap *hdr_bitmap = &params->hdr_bitmap;
1164 	uint32_t idx = 0, dip_idx = 0;
1165 	uint32_t size, vtc_flow;
1166 	uint32_t ver_spec = 0, ver_mask = 0;
1167 	uint32_t tc_spec = 0, tc_mask = 0;
1168 	uint32_t lab_spec = 0, lab_mask = 0;
1169 	uint8_t proto = 0;
1170 	uint8_t proto_mask = 0;
1171 	uint8_t ttl = 0;
1172 	uint32_t inner_flag = 0;
1173 	uint32_t cnt;
1174 
1175 	/* validate there are no 3rd L3 header */
1176 	cnt = ULP_COMP_FLD_IDX_RD(params, BNXT_ULP_CF_IDX_L3_HDR_CNT);
1177 	if (cnt == 2) {
1178 		BNXT_DRV_DBG(ERR, "Parse Err:Third L3 header not supported\n");
1179 		return BNXT_TF_RC_ERROR;
1180 	}
1181 
1182 	if (unlikely(ulp_rte_prsr_fld_size_validate(params, &idx,
1183 						    BNXT_ULP_PROTO_HDR_IPV6_NUM))) {
1184 		BNXT_DRV_DBG(ERR, "Error parsing protocol header\n");
1185 		return BNXT_TF_RC_ERROR;
1186 	}
1187 
1188 	/* If mask is not specified then use the default mask */
1189 	if (ipv6_spec && !ipv6_mask)
1190 		ipv6_mask = &rte_flow_item_ipv6_mask;
1191 
1192 	/*
1193 	 * Copy the rte_flow_item for ipv6 into hdr_field using ipv6
1194 	 * header fields
1195 	 */
1196 	if (ipv6_spec) {
1197 		vtc_flow = ntohl(ipv6_spec->hdr.vtc_flow);
1198 		ver_spec = htonl(BNXT_ULP_GET_IPV6_VER(vtc_flow));
1199 		tc_spec = htonl(BNXT_ULP_GET_IPV6_TC(vtc_flow));
1200 		lab_spec = htonl(BNXT_ULP_GET_IPV6_FLOWLABEL(vtc_flow));
1201 		proto = ipv6_spec->hdr.proto;
1202 	}
1203 
1204 	if (ipv6_mask) {
1205 		vtc_flow = ntohl(ipv6_mask->hdr.vtc_flow);
1206 		ver_mask = htonl(BNXT_ULP_GET_IPV6_VER(vtc_flow));
1207 		tc_mask = htonl(BNXT_ULP_GET_IPV6_TC(vtc_flow));
1208 		lab_mask = htonl(BNXT_ULP_GET_IPV6_FLOWLABEL(vtc_flow));
1209 
1210 		/* Some of the PMD applications may set the protocol field
1211 		 * in the IPv6 spec but don't set the mask. So, consider
1212 		 * the mask in proto value calculation.
1213 		 */
1214 		proto &= ipv6_mask->hdr.proto;
1215 		proto_mask = ipv6_mask->hdr.proto;
1216 	}
1217 
1218 	size = sizeof(((struct rte_flow_item_ipv6 *)NULL)->hdr.vtc_flow);
1219 	ulp_rte_prsr_fld_mask(params, &idx, size, &ver_spec, &ver_mask,
1220 			      ULP_PRSR_ACT_DEFAULT);
1221 	/*
1222 	 * The TC and flow label field are ignored since OVS is
1223 	 * setting it for match and it is not supported.
1224 	 * This is a work around and
1225 	 * shall be addressed in the future.
1226 	 */
1227 	ulp_rte_prsr_fld_mask(params, &idx, size, &tc_spec, &tc_mask,
1228 			      (ULP_APP_TOS_PROTO_SUPPORT(params->ulp_ctx)) ?
1229 			      ULP_PRSR_ACT_DEFAULT : ULP_PRSR_ACT_MASK_IGNORE);
1230 	ulp_rte_prsr_fld_mask(params, &idx, size, &lab_spec, &lab_mask,
1231 			      ULP_PRSR_ACT_MASK_IGNORE);
1232 
1233 	size = sizeof(((struct rte_flow_item_ipv6 *)NULL)->hdr.payload_len);
1234 	ulp_rte_prsr_fld_mask(params, &idx, size,
1235 			      ulp_deference_struct(ipv6_spec, hdr.payload_len),
1236 			      ulp_deference_struct(ipv6_mask, hdr.payload_len),
1237 			      ULP_PRSR_ACT_DEFAULT);
1238 
1239 	/* Ignore proto for template matching */
1240 	size = sizeof(((struct rte_flow_item_ipv6 *)NULL)->hdr.proto);
1241 	ulp_rte_prsr_fld_mask(params, &idx, size,
1242 			      ulp_deference_struct(ipv6_spec, hdr.proto),
1243 			      ulp_deference_struct(ipv6_mask, hdr.proto),
1244 			      (ULP_APP_TOS_PROTO_SUPPORT(params->ulp_ctx)) ?
1245 			      ULP_PRSR_ACT_DEFAULT : ULP_PRSR_ACT_MATCH_IGNORE);
1246 
1247 	size = sizeof(((struct rte_flow_item_ipv6 *)NULL)->hdr.hop_limits);
1248 	ulp_rte_prsr_fld_mask(params, &idx, size,
1249 			      ulp_deference_struct(ipv6_spec, hdr.hop_limits),
1250 			      ulp_deference_struct(ipv6_mask, hdr.hop_limits),
1251 			      ULP_PRSR_ACT_DEFAULT);
1252 	if (ipv6_spec)
1253 		ttl = ipv6_spec->hdr.hop_limits;
1254 	if (!ULP_BITMAP_ISSET(params->cf_bitmap, BNXT_ULP_CF_BIT_IS_TUNNEL))
1255 		ULP_COMP_FLD_IDX_WR(params, BNXT_ULP_CF_IDX_O_L3_TTL, ttl);
1256 
1257 	size = sizeof(((struct rte_flow_item_ipv6 *)NULL)->hdr.src_addr);
1258 	ulp_rte_prsr_fld_mask(params, &idx, size,
1259 			      ulp_deference_struct(ipv6_spec, hdr.src_addr),
1260 			      ulp_deference_struct(ipv6_mask, hdr.src_addr),
1261 			      ULP_PRSR_ACT_DEFAULT);
1262 
1263 	dip_idx =  idx;
1264 	size = sizeof(((struct rte_flow_item_ipv6 *)NULL)->hdr.dst_addr);
1265 	ulp_rte_prsr_fld_mask(params, &idx, size,
1266 			      ulp_deference_struct(ipv6_spec, hdr.dst_addr),
1267 			      ulp_deference_struct(ipv6_mask, hdr.dst_addr),
1268 			      ULP_PRSR_ACT_DEFAULT);
1269 
1270 	/* Set the ipv6 header bitmap and computed l3 header bitmaps */
1271 	if (ULP_BITMAP_ISSET(hdr_bitmap->bits, BNXT_ULP_HDR_BIT_O_IPV4) ||
1272 	    ULP_BITMAP_ISSET(hdr_bitmap->bits, BNXT_ULP_HDR_BIT_O_IPV6) ||
1273 	    ULP_BITMAP_ISSET(params->cf_bitmap, BNXT_ULP_CF_BIT_IS_TUNNEL)) {
1274 		ULP_BITMAP_SET(hdr_bitmap->bits, BNXT_ULP_HDR_BIT_I_IPV6);
1275 		ULP_COMP_FLD_IDX_WR(params, BNXT_ULP_CF_IDX_I_L3, 1);
1276 		inner_flag = 1;
1277 	} else {
1278 		ULP_BITMAP_SET(hdr_bitmap->bits, BNXT_ULP_HDR_BIT_O_IPV6);
1279 		ULP_COMP_FLD_IDX_WR(params, BNXT_ULP_CF_IDX_O_L3, 1);
1280 		/* Update the tunnel offload dest ip offset */
1281 		ULP_COMP_FLD_IDX_WR(params, BNXT_ULP_CF_IDX_TUN_OFF_DIP_ID,
1282 				    dip_idx);
1283 	}
1284 
1285 	/* Update the field protocol hdr bitmap */
1286 	if (proto_mask)
1287 		ulp_rte_l3_proto_type_update(params, proto, inner_flag);
1288 	ULP_COMP_FLD_IDX_WR(params, BNXT_ULP_CF_IDX_L3_HDR_CNT, ++cnt);
1289 
1290 	return BNXT_TF_RC_SUCCESS;
1291 }
1292 
1293 /* Function to handle the update of proto header based on field values */
1294 static void
1295 ulp_rte_l4_proto_type_update(struct ulp_rte_parser_params *params,
1296 			     uint16_t src_port, uint16_t src_mask,
1297 			     uint16_t dst_port, uint16_t dst_mask,
1298 			     enum bnxt_ulp_hdr_bit hdr_bit)
1299 {
1300 	uint16_t stat_port = 0;
1301 
1302 	switch (hdr_bit) {
1303 	case BNXT_ULP_HDR_BIT_I_UDP:
1304 	case BNXT_ULP_HDR_BIT_I_TCP:
1305 		ULP_BITMAP_SET(params->hdr_bitmap.bits, hdr_bit);
1306 		ULP_COMP_FLD_IDX_WR(params, BNXT_ULP_CF_IDX_I_L4, 1);
1307 		ULP_COMP_FLD_IDX_WR(params, BNXT_ULP_CF_IDX_I_L4_SRC_PORT,
1308 				    (uint64_t)rte_be_to_cpu_16(src_port));
1309 		ULP_COMP_FLD_IDX_WR(params, BNXT_ULP_CF_IDX_I_L4_DST_PORT,
1310 				    (uint64_t)rte_be_to_cpu_16(dst_port));
1311 		ULP_COMP_FLD_IDX_WR(params, BNXT_ULP_CF_IDX_I_L4_SRC_PORT_MASK,
1312 				    (uint64_t)rte_be_to_cpu_16(src_mask));
1313 		ULP_COMP_FLD_IDX_WR(params, BNXT_ULP_CF_IDX_I_L4_DST_PORT_MASK,
1314 				    (uint64_t)rte_be_to_cpu_16(dst_mask));
1315 		ULP_COMP_FLD_IDX_WR(params, BNXT_ULP_CF_IDX_I_L3_FB_PROTO_ID,
1316 				    1);
1317 		ULP_COMP_FLD_IDX_WR(params, BNXT_ULP_CF_IDX_I_L4_FB_SRC_PORT,
1318 				    !!(src_port & src_mask));
1319 		ULP_COMP_FLD_IDX_WR(params, BNXT_ULP_CF_IDX_I_L4_FB_DST_PORT,
1320 				    !!(dst_port & dst_mask));
1321 		ULP_COMP_FLD_IDX_WR(params, BNXT_ULP_CF_IDX_I_L3_PROTO_ID,
1322 				    (hdr_bit == BNXT_ULP_HDR_BIT_I_UDP) ?
1323 				    IPPROTO_UDP : IPPROTO_TCP);
1324 		break;
1325 	case BNXT_ULP_HDR_BIT_O_UDP:
1326 	case BNXT_ULP_HDR_BIT_O_TCP:
1327 		ULP_BITMAP_SET(params->hdr_bitmap.bits, hdr_bit);
1328 		ULP_COMP_FLD_IDX_WR(params, BNXT_ULP_CF_IDX_O_L4, 1);
1329 		ULP_COMP_FLD_IDX_WR(params, BNXT_ULP_CF_IDX_O_L4_SRC_PORT,
1330 				    (uint64_t)rte_be_to_cpu_16(src_port));
1331 		ULP_COMP_FLD_IDX_WR(params, BNXT_ULP_CF_IDX_O_L4_DST_PORT,
1332 				    (uint64_t)rte_be_to_cpu_16(dst_port));
1333 		ULP_COMP_FLD_IDX_WR(params, BNXT_ULP_CF_IDX_O_L4_SRC_PORT_MASK,
1334 				    (uint64_t)rte_be_to_cpu_16(src_mask));
1335 		ULP_COMP_FLD_IDX_WR(params, BNXT_ULP_CF_IDX_O_L4_DST_PORT_MASK,
1336 				    (uint64_t)rte_be_to_cpu_16(dst_mask));
1337 		ULP_COMP_FLD_IDX_WR(params, BNXT_ULP_CF_IDX_O_L3_FB_PROTO_ID,
1338 				    1);
1339 		ULP_COMP_FLD_IDX_WR(params, BNXT_ULP_CF_IDX_O_L4_FB_SRC_PORT,
1340 				    !!(src_port & src_mask));
1341 		ULP_COMP_FLD_IDX_WR(params, BNXT_ULP_CF_IDX_O_L4_FB_DST_PORT,
1342 				    !!(dst_port & dst_mask));
1343 		ULP_COMP_FLD_IDX_WR(params, BNXT_ULP_CF_IDX_O_L3_PROTO_ID,
1344 				    (hdr_bit == BNXT_ULP_HDR_BIT_O_UDP) ?
1345 				    IPPROTO_UDP : IPPROTO_TCP);
1346 		break;
1347 	default:
1348 		break;
1349 	}
1350 
1351 	/* If it is not udp port then there is no need to set tunnel bits */
1352 	if (hdr_bit != BNXT_ULP_HDR_BIT_O_UDP)
1353 		return;
1354 
1355 	ULP_COMP_FLD_IDX_WR(params, BNXT_ULP_CF_IDX_TUNNEL_PORT,
1356 			    tfp_be_to_cpu_16(dst_port));
1357 
1358 	/* vxlan static customized port */
1359 	if (ULP_APP_STATIC_VXLAN_PORT_EN(params->ulp_ctx)) {
1360 		stat_port = bnxt_ulp_cntxt_vxlan_ip_port_get(params->ulp_ctx);
1361 		if (!stat_port)
1362 			stat_port =
1363 			bnxt_ulp_cntxt_vxlan_port_get(params->ulp_ctx);
1364 
1365 		/* if udp and equal to static vxlan port then set tunnel bits*/
1366 		if (stat_port && dst_port == tfp_cpu_to_be_16(stat_port)) {
1367 			ULP_BITMAP_SET(params->hdr_fp_bit.bits,
1368 				       BNXT_ULP_HDR_BIT_T_VXLAN);
1369 			ULP_BITMAP_SET(params->cf_bitmap,
1370 				       BNXT_ULP_CF_BIT_IS_TUNNEL);
1371 		}
1372 	} else {
1373 		/* if dynamic Vxlan is enabled then skip dport checks */
1374 		if (ULP_APP_DYNAMIC_VXLAN_PORT_EN(params->ulp_ctx))
1375 			return;
1376 
1377 		/* Vxlan and GPE port check */
1378 		if (dst_port == tfp_cpu_to_be_16(ULP_UDP_PORT_VXLAN_GPE)) {
1379 			ULP_BITMAP_SET(params->hdr_fp_bit.bits,
1380 				       BNXT_ULP_HDR_BIT_T_VXLAN_GPE);
1381 			ULP_BITMAP_SET(params->cf_bitmap,
1382 				       BNXT_ULP_CF_BIT_IS_TUNNEL);
1383 		} else if (dst_port == tfp_cpu_to_be_16(ULP_UDP_PORT_VXLAN)) {
1384 			ULP_BITMAP_SET(params->hdr_fp_bit.bits,
1385 				       BNXT_ULP_HDR_BIT_T_VXLAN);
1386 			ULP_BITMAP_SET(params->cf_bitmap,
1387 				       BNXT_ULP_CF_BIT_IS_TUNNEL);
1388 		}
1389 	}
1390 }
1391 
1392 /* Function to handle the parsing of RTE Flow item UDP Header. */
1393 int32_t
1394 ulp_rte_udp_hdr_handler(const struct rte_flow_item *item,
1395 			struct ulp_rte_parser_params *params)
1396 {
1397 	const struct rte_flow_item_udp *udp_spec = item->spec;
1398 	const struct rte_flow_item_udp *udp_mask = item->mask;
1399 	struct ulp_rte_hdr_bitmap *hdr_bitmap = &params->hdr_bitmap;
1400 	uint32_t idx = 0;
1401 	uint32_t size;
1402 	uint16_t dport = 0, sport = 0;
1403 	uint16_t dport_mask = 0, sport_mask = 0;
1404 	uint32_t cnt;
1405 	enum bnxt_ulp_hdr_bit out_l4 = BNXT_ULP_HDR_BIT_O_UDP;
1406 
1407 	cnt = ULP_COMP_FLD_IDX_RD(params, BNXT_ULP_CF_IDX_L4_HDR_CNT);
1408 	if (cnt == 2) {
1409 		BNXT_DRV_DBG(ERR, "Parse Err:Third L4 header not supported\n");
1410 		return BNXT_TF_RC_ERROR;
1411 	}
1412 
1413 	if (udp_spec) {
1414 		sport = udp_spec->hdr.src_port;
1415 		dport = udp_spec->hdr.dst_port;
1416 	}
1417 	if (udp_spec && !udp_mask)
1418 		udp_mask = &rte_flow_item_udp_mask;
1419 
1420 	if (udp_mask) {
1421 		sport_mask = udp_mask->hdr.src_port;
1422 		dport_mask = udp_mask->hdr.dst_port;
1423 	}
1424 
1425 	if (unlikely(ulp_rte_prsr_fld_size_validate(params, &idx,
1426 						    BNXT_ULP_PROTO_HDR_UDP_NUM))) {
1427 		BNXT_DRV_DBG(ERR, "Error parsing protocol header\n");
1428 		return BNXT_TF_RC_ERROR;
1429 	}
1430 
1431 	/*
1432 	 * Copy the rte_flow_item for ipv4 into hdr_field using ipv4
1433 	 * header fields
1434 	 */
1435 	size = sizeof(((struct rte_flow_item_udp *)NULL)->hdr.src_port);
1436 	ulp_rte_prsr_fld_mask(params, &idx, size,
1437 			      ulp_deference_struct(udp_spec, hdr.src_port),
1438 			      ulp_deference_struct(udp_mask, hdr.src_port),
1439 			      ULP_PRSR_ACT_DEFAULT);
1440 
1441 	size = sizeof(((struct rte_flow_item_udp *)NULL)->hdr.dst_port);
1442 	ulp_rte_prsr_fld_mask(params, &idx, size,
1443 			      ulp_deference_struct(udp_spec, hdr.dst_port),
1444 			      ulp_deference_struct(udp_mask, hdr.dst_port),
1445 			      ULP_PRSR_ACT_DEFAULT);
1446 
1447 	size = sizeof(((struct rte_flow_item_udp *)NULL)->hdr.dgram_len);
1448 	ulp_rte_prsr_fld_mask(params, &idx, size,
1449 			      ulp_deference_struct(udp_spec, hdr.dgram_len),
1450 			      ulp_deference_struct(udp_mask, hdr.dgram_len),
1451 			      ULP_PRSR_ACT_DEFAULT);
1452 
1453 	size = sizeof(((struct rte_flow_item_udp *)NULL)->hdr.dgram_cksum);
1454 	ulp_rte_prsr_fld_mask(params, &idx, size,
1455 			      ulp_deference_struct(udp_spec, hdr.dgram_cksum),
1456 			      ulp_deference_struct(udp_mask, hdr.dgram_cksum),
1457 			      ULP_PRSR_ACT_DEFAULT);
1458 
1459 	/* Set the udp header bitmap and computed l4 header bitmaps */
1460 	if (ULP_BITMAP_ISSET(hdr_bitmap->bits, BNXT_ULP_HDR_BIT_O_UDP) ||
1461 	    ULP_BITMAP_ISSET(hdr_bitmap->bits, BNXT_ULP_HDR_BIT_O_TCP) ||
1462 	    ULP_BITMAP_ISSET(params->cf_bitmap, BNXT_ULP_CF_BIT_IS_TUNNEL))
1463 		out_l4 = BNXT_ULP_HDR_BIT_I_UDP;
1464 
1465 	ulp_rte_l4_proto_type_update(params, sport, sport_mask, dport,
1466 				     dport_mask, out_l4);
1467 	ULP_COMP_FLD_IDX_WR(params, BNXT_ULP_CF_IDX_L4_HDR_CNT, ++cnt);
1468 	return BNXT_TF_RC_SUCCESS;
1469 }
1470 
1471 /* Function to handle the parsing of RTE Flow item TCP Header. */
1472 int32_t
1473 ulp_rte_tcp_hdr_handler(const struct rte_flow_item *item,
1474 			struct ulp_rte_parser_params *params)
1475 {
1476 	const struct rte_flow_item_tcp *tcp_spec = item->spec;
1477 	const struct rte_flow_item_tcp *tcp_mask = item->mask;
1478 	struct ulp_rte_hdr_bitmap *hdr_bitmap = &params->hdr_bitmap;
1479 	uint32_t idx = 0;
1480 	uint16_t dport = 0, sport = 0;
1481 	uint16_t dport_mask = 0, sport_mask = 0;
1482 	uint32_t size;
1483 	uint32_t cnt;
1484 	enum bnxt_ulp_hdr_bit out_l4 = BNXT_ULP_HDR_BIT_O_TCP;
1485 
1486 	cnt = ULP_COMP_FLD_IDX_RD(params, BNXT_ULP_CF_IDX_L4_HDR_CNT);
1487 	if (cnt == 2) {
1488 		BNXT_DRV_DBG(ERR, "Parse Err:Third L4 header not supported\n");
1489 		return BNXT_TF_RC_ERROR;
1490 	}
1491 
1492 	if (tcp_spec) {
1493 		sport = tcp_spec->hdr.src_port;
1494 		dport = tcp_spec->hdr.dst_port;
1495 	}
1496 
1497 	if (tcp_spec && !tcp_mask)
1498 		tcp_mask = &rte_flow_item_tcp_mask;
1499 
1500 	if (tcp_mask) {
1501 		sport_mask = tcp_mask->hdr.src_port;
1502 		dport_mask = tcp_mask->hdr.dst_port;
1503 	}
1504 
1505 	if (unlikely(ulp_rte_prsr_fld_size_validate(params, &idx,
1506 						    BNXT_ULP_PROTO_HDR_TCP_NUM))) {
1507 		BNXT_DRV_DBG(ERR, "Error parsing protocol header\n");
1508 		return BNXT_TF_RC_ERROR;
1509 	}
1510 
1511 	/*
1512 	 * Copy the rte_flow_item for ipv4 into hdr_field using ipv4
1513 	 * header fields
1514 	 */
1515 	size = sizeof(((struct rte_flow_item_tcp *)NULL)->hdr.src_port);
1516 	ulp_rte_prsr_fld_mask(params, &idx, size,
1517 			      ulp_deference_struct(tcp_spec, hdr.src_port),
1518 			      ulp_deference_struct(tcp_mask, hdr.src_port),
1519 			      ULP_PRSR_ACT_DEFAULT);
1520 
1521 	size = sizeof(((struct rte_flow_item_tcp *)NULL)->hdr.dst_port);
1522 	ulp_rte_prsr_fld_mask(params, &idx, size,
1523 			      ulp_deference_struct(tcp_spec, hdr.dst_port),
1524 			      ulp_deference_struct(tcp_mask, hdr.dst_port),
1525 			      ULP_PRSR_ACT_DEFAULT);
1526 
1527 	size = sizeof(((struct rte_flow_item_tcp *)NULL)->hdr.sent_seq);
1528 	ulp_rte_prsr_fld_mask(params, &idx, size,
1529 			      ulp_deference_struct(tcp_spec, hdr.sent_seq),
1530 			      ulp_deference_struct(tcp_mask, hdr.sent_seq),
1531 			      ULP_PRSR_ACT_DEFAULT);
1532 
1533 	size = sizeof(((struct rte_flow_item_tcp *)NULL)->hdr.recv_ack);
1534 	ulp_rte_prsr_fld_mask(params, &idx, size,
1535 			      ulp_deference_struct(tcp_spec, hdr.recv_ack),
1536 			      ulp_deference_struct(tcp_mask, hdr.recv_ack),
1537 			      ULP_PRSR_ACT_DEFAULT);
1538 
1539 	size = sizeof(((struct rte_flow_item_tcp *)NULL)->hdr.data_off);
1540 	ulp_rte_prsr_fld_mask(params, &idx, size,
1541 			      ulp_deference_struct(tcp_spec, hdr.data_off),
1542 			      ulp_deference_struct(tcp_mask, hdr.data_off),
1543 			      ULP_PRSR_ACT_DEFAULT);
1544 
1545 	size = sizeof(((struct rte_flow_item_tcp *)NULL)->hdr.tcp_flags);
1546 	ulp_rte_prsr_fld_mask(params, &idx, size,
1547 			      ulp_deference_struct(tcp_spec, hdr.tcp_flags),
1548 			      ulp_deference_struct(tcp_mask, hdr.tcp_flags),
1549 			      ULP_PRSR_ACT_DEFAULT);
1550 
1551 	size = sizeof(((struct rte_flow_item_tcp *)NULL)->hdr.rx_win);
1552 	ulp_rte_prsr_fld_mask(params, &idx, size,
1553 			      ulp_deference_struct(tcp_spec, hdr.rx_win),
1554 			      ulp_deference_struct(tcp_mask, hdr.rx_win),
1555 			      ULP_PRSR_ACT_DEFAULT);
1556 
1557 	size = sizeof(((struct rte_flow_item_tcp *)NULL)->hdr.cksum);
1558 	ulp_rte_prsr_fld_mask(params, &idx, size,
1559 			      ulp_deference_struct(tcp_spec, hdr.cksum),
1560 			      ulp_deference_struct(tcp_mask, hdr.cksum),
1561 			      ULP_PRSR_ACT_DEFAULT);
1562 
1563 	size = sizeof(((struct rte_flow_item_tcp *)NULL)->hdr.tcp_urp);
1564 	ulp_rte_prsr_fld_mask(params, &idx, size,
1565 			      ulp_deference_struct(tcp_spec, hdr.tcp_urp),
1566 			      ulp_deference_struct(tcp_mask, hdr.tcp_urp),
1567 			      ULP_PRSR_ACT_DEFAULT);
1568 
1569 	/* Set the udp header bitmap and computed l4 header bitmaps */
1570 	if (ULP_BITMAP_ISSET(hdr_bitmap->bits, BNXT_ULP_HDR_BIT_O_UDP) ||
1571 	    ULP_BITMAP_ISSET(hdr_bitmap->bits, BNXT_ULP_HDR_BIT_O_TCP) ||
1572 	    ULP_BITMAP_ISSET(params->cf_bitmap, BNXT_ULP_CF_BIT_IS_TUNNEL))
1573 		out_l4 = BNXT_ULP_HDR_BIT_I_TCP;
1574 
1575 	ulp_rte_l4_proto_type_update(params, sport, sport_mask, dport,
1576 				     dport_mask, out_l4);
1577 	ULP_COMP_FLD_IDX_WR(params, BNXT_ULP_CF_IDX_L4_HDR_CNT, ++cnt);
1578 	return BNXT_TF_RC_SUCCESS;
1579 }
1580 
1581 /* Function to handle the parsing of RTE Flow item Vxlan Header. */
1582 int32_t
1583 ulp_rte_vxlan_hdr_handler(const struct rte_flow_item *item,
1584 			  struct ulp_rte_parser_params *params)
1585 {
1586 	const struct rte_flow_item_vxlan *vxlan_spec = item->spec;
1587 	const struct rte_flow_item_vxlan *vxlan_mask = item->mask;
1588 	struct ulp_rte_hdr_bitmap *hdr_bitmap = &params->hdr_bitmap;
1589 	struct bnxt_ulp_context *ulp_ctx = params->ulp_ctx;
1590 	uint32_t idx = 0;
1591 	uint16_t dport, stat_port;
1592 	uint32_t size;
1593 
1594 	if (unlikely(ulp_rte_prsr_fld_size_validate(params, &idx,
1595 						    BNXT_ULP_PROTO_HDR_VXLAN_NUM))) {
1596 		BNXT_DRV_DBG(ERR, "Error parsing protocol header\n");
1597 		return BNXT_TF_RC_ERROR;
1598 	}
1599 
1600 	if (vxlan_spec && !vxlan_mask)
1601 		vxlan_mask = &rte_flow_item_vxlan_mask;
1602 
1603 	/* Update if the outer headers have any partial masks */
1604 	if (!ULP_COMP_FLD_IDX_RD(params, BNXT_ULP_CF_IDX_WC_MATCH))
1605 		ULP_COMP_FLD_IDX_WR(params, BNXT_ULP_CF_IDX_OUTER_EM_ONLY, 1);
1606 
1607 	/*
1608 	 * Copy the rte_flow_item for vxlan into hdr_field using vxlan
1609 	 * header fields
1610 	 */
1611 	size = sizeof(((struct rte_flow_item_vxlan *)NULL)->hdr.flags);
1612 	ulp_rte_prsr_fld_mask(params, &idx, size,
1613 			      ulp_deference_struct(vxlan_spec, hdr.flags),
1614 			      ulp_deference_struct(vxlan_mask, hdr.flags),
1615 			      ULP_PRSR_ACT_DEFAULT);
1616 
1617 	size = sizeof(((struct rte_flow_item_vxlan *)NULL)->hdr.rsvd0);
1618 	ulp_rte_prsr_fld_mask(params, &idx, size,
1619 			      ulp_deference_struct(vxlan_spec, hdr.rsvd0),
1620 			      ulp_deference_struct(vxlan_mask, hdr.rsvd0),
1621 			      ULP_PRSR_ACT_DEFAULT);
1622 
1623 	size = sizeof(((struct rte_flow_item_vxlan *)NULL)->hdr.vni);
1624 	ulp_rte_prsr_fld_mask(params, &idx, size,
1625 			      ulp_deference_struct(vxlan_spec, hdr.vni),
1626 			      ulp_deference_struct(vxlan_mask, hdr.vni),
1627 			      ULP_PRSR_ACT_DEFAULT);
1628 
1629 	size = sizeof(((struct rte_flow_item_vxlan *)NULL)->hdr.rsvd1);
1630 	ulp_rte_prsr_fld_mask(params, &idx, size,
1631 			      ulp_deference_struct(vxlan_spec, hdr.rsvd1),
1632 			      ulp_deference_struct(vxlan_mask, hdr.rsvd1),
1633 			      ULP_PRSR_ACT_DEFAULT);
1634 
1635 	/* Update the hdr_bitmap with vxlan */
1636 	ULP_BITMAP_SET(hdr_bitmap->bits, BNXT_ULP_HDR_BIT_T_VXLAN);
1637 	ULP_BITMAP_SET(params->cf_bitmap, BNXT_ULP_CF_BIT_IS_TUNNEL);
1638 
1639 	/* if l4 protocol header updated it then reset it */
1640 	ULP_BITMAP_RESET(params->hdr_fp_bit.bits, BNXT_ULP_HDR_BIT_T_VXLAN_GPE);
1641 
1642 	dport = ULP_COMP_FLD_IDX_RD(params, BNXT_ULP_CF_IDX_O_L4_DST_PORT);
1643 	if (!dport) {
1644 		ULP_COMP_FLD_IDX_WR(params, BNXT_ULP_CF_IDX_O_L4_DST_PORT,
1645 				    ULP_UDP_PORT_VXLAN);
1646 		ULP_COMP_FLD_IDX_WR(params, BNXT_ULP_CF_IDX_O_L4_DST_PORT_MASK,
1647 				    ULP_UDP_PORT_VXLAN_MASK);
1648 	}
1649 
1650 	/* vxlan static customized port */
1651 	if (ULP_APP_STATIC_VXLAN_PORT_EN(ulp_ctx)) {
1652 		stat_port = bnxt_ulp_cntxt_vxlan_ip_port_get(ulp_ctx);
1653 		if (!stat_port)
1654 			stat_port = bnxt_ulp_cntxt_vxlan_port_get(ulp_ctx);
1655 
1656 		/* validate that static ports match if not reject */
1657 		if (dport != 0 && dport != tfp_cpu_to_be_16(stat_port)) {
1658 			BNXT_DRV_DBG(ERR, "ParseErr:vxlan port is not valid\n");
1659 			return BNXT_TF_RC_PARSE_ERR;
1660 		} else if (dport == 0) {
1661 			ULP_COMP_FLD_IDX_WR(params,
1662 					    BNXT_ULP_CF_IDX_TUNNEL_PORT,
1663 					    tfp_cpu_to_be_16(stat_port));
1664 		}
1665 	} else {
1666 		/* dynamic vxlan support */
1667 		if (ULP_APP_DYNAMIC_VXLAN_PORT_EN(params->ulp_ctx)) {
1668 			if (dport == 0) {
1669 				BNXT_DRV_DBG(ERR,
1670 					     "ParseErr:vxlan port is null\n");
1671 				return BNXT_TF_RC_PARSE_ERR;
1672 			}
1673 			/* set the dynamic vxlan port check */
1674 			ULP_BITMAP_SET(params->cf_bitmap,
1675 				       BNXT_ULP_CF_BIT_DYNAMIC_VXLAN_PORT);
1676 			ULP_COMP_FLD_IDX_WR(params,
1677 					    BNXT_ULP_CF_IDX_TUNNEL_PORT, dport);
1678 		} else if (dport != 0 && dport != ULP_UDP_PORT_VXLAN) {
1679 			/* set the dynamic vxlan port check */
1680 			ULP_BITMAP_SET(params->cf_bitmap,
1681 				       BNXT_ULP_CF_BIT_DYNAMIC_VXLAN_PORT);
1682 			ULP_COMP_FLD_IDX_WR(params,
1683 					    BNXT_ULP_CF_IDX_TUNNEL_PORT, dport);
1684 		} else {
1685 			ULP_COMP_FLD_IDX_WR(params, BNXT_ULP_CF_IDX_TUNNEL_PORT,
1686 					    ULP_UDP_PORT_VXLAN);
1687 		}
1688 	}
1689 	return BNXT_TF_RC_SUCCESS;
1690 }
1691 
1692 /* Function to handle the parsing of RTE Flow item Vxlan GPE Header. */
1693 int32_t
1694 ulp_rte_vxlan_gpe_hdr_handler(const struct rte_flow_item *item,
1695 			      struct ulp_rte_parser_params *params)
1696 {
1697 	const struct rte_flow_item_vxlan_gpe *vxlan_gpe_spec = item->spec;
1698 	const struct rte_flow_item_vxlan_gpe *vxlan_gpe_mask = item->mask;
1699 	struct ulp_rte_hdr_bitmap *hdr_bitmap = &params->hdr_bitmap;
1700 	uint32_t idx = 0;
1701 	uint16_t dport;
1702 	uint32_t size;
1703 
1704 	if (unlikely(ulp_rte_prsr_fld_size_validate(params, &idx,
1705 						    BNXT_ULP_PROTO_HDR_VXLAN_GPE_NUM))) {
1706 		BNXT_DRV_DBG(ERR, "Error parsing protocol header\n");
1707 		return BNXT_TF_RC_ERROR;
1708 	}
1709 
1710 	if (vxlan_gpe_spec && !vxlan_gpe_mask)
1711 		vxlan_gpe_mask = &rte_flow_item_vxlan_gpe_mask;
1712 	/*
1713 	 * Copy the rte_flow_item for vxlan gpe into hdr_field using vxlan
1714 	 * header fields
1715 	 */
1716 	size = sizeof(((struct rte_flow_item_vxlan_gpe *)NULL)->flags);
1717 	ulp_rte_prsr_fld_mask(params, &idx, size,
1718 			      ulp_deference_struct(vxlan_gpe_spec, flags),
1719 			      ulp_deference_struct(vxlan_gpe_mask, flags),
1720 			      ULP_PRSR_ACT_DEFAULT);
1721 
1722 	size = sizeof(((struct rte_flow_item_vxlan_gpe *)NULL)->rsvd0);
1723 	ulp_rte_prsr_fld_mask(params, &idx, size,
1724 			      ulp_deference_struct(vxlan_gpe_spec, rsvd0),
1725 			      ulp_deference_struct(vxlan_gpe_mask, rsvd0),
1726 			      ULP_PRSR_ACT_DEFAULT);
1727 
1728 	size = sizeof(((struct rte_flow_item_vxlan_gpe *)NULL)->protocol);
1729 	ulp_rte_prsr_fld_mask(params, &idx, size,
1730 			      ulp_deference_struct(vxlan_gpe_spec, protocol),
1731 			      ulp_deference_struct(vxlan_gpe_mask, protocol),
1732 			      ULP_PRSR_ACT_DEFAULT);
1733 
1734 	size = sizeof(((struct rte_flow_item_vxlan_gpe *)NULL)->vni);
1735 	ulp_rte_prsr_fld_mask(params, &idx, size,
1736 			      ulp_deference_struct(vxlan_gpe_spec, vni),
1737 			      ulp_deference_struct(vxlan_gpe_mask, vni),
1738 			      ULP_PRSR_ACT_DEFAULT);
1739 
1740 	size = sizeof(((struct rte_flow_item_vxlan_gpe *)NULL)->rsvd1);
1741 	ulp_rte_prsr_fld_mask(params, &idx, size,
1742 			      ulp_deference_struct(vxlan_gpe_spec, rsvd1),
1743 			      ulp_deference_struct(vxlan_gpe_mask, rsvd1),
1744 			      ULP_PRSR_ACT_DEFAULT);
1745 
1746 	/* Update the hdr_bitmap with vxlan gpe*/
1747 	ULP_BITMAP_SET(hdr_bitmap->bits, BNXT_ULP_HDR_BIT_T_VXLAN_GPE);
1748 	ULP_BITMAP_SET(params->cf_bitmap, BNXT_ULP_CF_BIT_IS_TUNNEL);
1749 
1750 	/* if l4 protocol header updated it then reset it */
1751 	ULP_BITMAP_RESET(params->hdr_fp_bit.bits, BNXT_ULP_HDR_BIT_T_VXLAN);
1752 
1753 	dport = ULP_COMP_FLD_IDX_RD(params, BNXT_ULP_CF_IDX_O_L4_DST_PORT);
1754 	if (!dport) {
1755 		ULP_COMP_FLD_IDX_WR(params, BNXT_ULP_CF_IDX_O_L4_DST_PORT,
1756 				    ULP_UDP_PORT_VXLAN_GPE);
1757 		ULP_COMP_FLD_IDX_WR(params, BNXT_ULP_CF_IDX_O_L4_DST_PORT_MASK,
1758 				    ULP_UDP_PORT_VXLAN_GPE_MASK);
1759 	}
1760 	/* TBD: currently dynamic or static gpe port config is not supported */
1761 	/* Update the tunnel port */
1762 	ULP_COMP_FLD_IDX_WR(params, BNXT_ULP_CF_IDX_TUNNEL_PORT, dport);
1763 
1764 	/* Verify the vxlan gpe port */
1765 	if (dport != 0 && dport != ULP_UDP_PORT_VXLAN_GPE) {
1766 		BNXT_DRV_DBG(ERR, "ParseErr:vxlan gpe port is not valid\n");
1767 		return BNXT_TF_RC_PARSE_ERR;
1768 	}
1769 	return BNXT_TF_RC_SUCCESS;
1770 }
1771 
1772 /* Function to handle the parsing of RTE Flow item GENEVE Header. */
1773 int32_t
1774 ulp_rte_geneve_hdr_handler(const struct rte_flow_item *item,
1775 			      struct ulp_rte_parser_params *params)
1776 {
1777 	const struct rte_flow_item_geneve *geneve_spec = item->spec;
1778 	const struct rte_flow_item_geneve *geneve_mask = item->mask;
1779 	struct ulp_rte_hdr_bitmap *hdr_bitmap = &params->hdr_bitmap;
1780 	uint32_t idx = 0;
1781 	uint16_t dport;
1782 	uint32_t size;
1783 
1784 	if (unlikely(ulp_rte_prsr_fld_size_validate(params, &idx,
1785 						    BNXT_ULP_PROTO_HDR_GENEVE_NUM))) {
1786 		BNXT_DRV_DBG(ERR, "Error parsing protocol header\n");
1787 		return BNXT_TF_RC_ERROR;
1788 	}
1789 
1790 	if (geneve_spec && !geneve_mask)
1791 		geneve_mask = &rte_flow_item_geneve_mask;
1792 
1793 	/*
1794 	 * Copy the rte_flow_item for geneve into hdr_field using geneve
1795 	 * header fields
1796 	 */
1797 	size = sizeof(((struct rte_flow_item_geneve *)NULL)->ver_opt_len_o_c_rsvd0);
1798 	ulp_rte_prsr_fld_mask(params, &idx, size,
1799 			      ulp_deference_struct(geneve_spec, ver_opt_len_o_c_rsvd0),
1800 			      ulp_deference_struct(geneve_mask, ver_opt_len_o_c_rsvd0),
1801 			      ULP_PRSR_ACT_DEFAULT);
1802 
1803 	size = sizeof(((struct rte_flow_item_geneve *)NULL)->protocol);
1804 	ulp_rte_prsr_fld_mask(params, &idx, size,
1805 			      ulp_deference_struct(geneve_spec, protocol),
1806 			      ulp_deference_struct(geneve_mask, protocol),
1807 			      ULP_PRSR_ACT_DEFAULT);
1808 
1809 	size = sizeof(((struct rte_flow_item_geneve *)NULL)->vni);
1810 	ulp_rte_prsr_fld_mask(params, &idx, size,
1811 			      ulp_deference_struct(geneve_spec, vni),
1812 			      ulp_deference_struct(geneve_mask, vni),
1813 			      ULP_PRSR_ACT_DEFAULT);
1814 
1815 	size = sizeof(((struct rte_flow_item_geneve *)NULL)->rsvd1);
1816 	ulp_rte_prsr_fld_mask(params, &idx, size,
1817 			      ulp_deference_struct(geneve_spec, rsvd1),
1818 			      ulp_deference_struct(geneve_mask, rsvd1),
1819 			      ULP_PRSR_ACT_DEFAULT);
1820 
1821 	/* Update the hdr_bitmap with geneve */
1822 	ULP_BITMAP_SET(hdr_bitmap->bits, BNXT_ULP_HDR_BIT_T_GENEVE);
1823 	ULP_BITMAP_SET(params->cf_bitmap, BNXT_ULP_CF_BIT_IS_TUNNEL);
1824 
1825 	/* update the tunnel port */
1826 	dport = ULP_COMP_FLD_IDX_RD(params, BNXT_ULP_CF_IDX_O_L4_DST_PORT);
1827 	if  (ULP_APP_DYNAMIC_GENEVE_PORT_EN(params->ulp_ctx)) {
1828 		if (dport == 0) {
1829 			BNXT_DRV_DBG(ERR, "ParseErr:geneve port is null\n");
1830 			return BNXT_TF_RC_PARSE_ERR;
1831 		}
1832 		/* set the dynamic geneve port check */
1833 		ULP_BITMAP_SET(params->cf_bitmap,
1834 			       BNXT_ULP_CF_BIT_DYNAMIC_GENEVE_PORT);
1835 		ULP_COMP_FLD_IDX_WR(params,
1836 				    BNXT_ULP_CF_IDX_TUNNEL_PORT, dport);
1837 	} else {
1838 		if (dport == 0) {
1839 			ULP_COMP_FLD_IDX_WR(params,
1840 					    BNXT_ULP_CF_IDX_O_L4_DST_PORT,
1841 					    ULP_UDP_PORT_GENEVE);
1842 			ULP_COMP_FLD_IDX_WR(params,
1843 					    BNXT_ULP_CF_IDX_O_L4_DST_PORT_MASK,
1844 					    ULP_UDP_PORT_GENEVE_MASK);
1845 		} else if (dport != 0 && dport != ULP_UDP_PORT_GENEVE) {
1846 			ULP_COMP_FLD_IDX_WR(params, BNXT_ULP_CF_IDX_TUNNEL_PORT,
1847 					    dport);
1848 			ULP_BITMAP_SET(params->cf_bitmap,
1849 				       BNXT_ULP_CF_BIT_DYNAMIC_GENEVE_PORT);
1850 		}
1851 	}
1852 	return BNXT_TF_RC_SUCCESS;
1853 }
1854 
1855 /* Function to handle the parsing of RTE Flow item GRE Header. */
1856 int32_t
1857 ulp_rte_gre_hdr_handler(const struct rte_flow_item *item,
1858 			struct ulp_rte_parser_params *params)
1859 {
1860 	const struct rte_flow_item_gre *gre_spec = item->spec;
1861 	const struct rte_flow_item_gre *gre_mask = item->mask;
1862 	struct ulp_rte_hdr_bitmap *hdr_bitmap = &params->hdr_bitmap;
1863 	uint32_t idx = 0;
1864 	uint32_t size;
1865 
1866 	if (unlikely(ulp_rte_prsr_fld_size_validate(params, &idx,
1867 						    BNXT_ULP_PROTO_HDR_GRE_NUM))) {
1868 		BNXT_DRV_DBG(ERR, "Error parsing protocol header\n");
1869 		return BNXT_TF_RC_ERROR;
1870 	}
1871 
1872 	if (gre_spec && !gre_mask)
1873 		gre_mask = &rte_flow_item_gre_mask;
1874 
1875 	size = sizeof(((struct rte_flow_item_gre *)NULL)->c_rsvd0_ver);
1876 	ulp_rte_prsr_fld_mask(params, &idx, size,
1877 			      ulp_deference_struct(gre_spec, c_rsvd0_ver),
1878 			      ulp_deference_struct(gre_mask, c_rsvd0_ver),
1879 			      ULP_PRSR_ACT_DEFAULT);
1880 
1881 	size = sizeof(((struct rte_flow_item_gre *)NULL)->protocol);
1882 	ulp_rte_prsr_fld_mask(params, &idx, size,
1883 			      ulp_deference_struct(gre_spec, protocol),
1884 			      ulp_deference_struct(gre_mask, protocol),
1885 			      ULP_PRSR_ACT_DEFAULT);
1886 
1887 	/* Update the hdr_bitmap with GRE */
1888 	ULP_BITMAP_SET(hdr_bitmap->bits, BNXT_ULP_HDR_BIT_T_GRE);
1889 	ULP_BITMAP_SET(params->cf_bitmap, BNXT_ULP_CF_BIT_IS_TUNNEL);
1890 	return BNXT_TF_RC_SUCCESS;
1891 }
1892 
1893 /* Function to handle the parsing of RTE Flow item ANY. */
1894 int32_t
1895 ulp_rte_item_any_handler(const struct rte_flow_item *item __rte_unused,
1896 			 struct ulp_rte_parser_params *params __rte_unused)
1897 {
1898 	return BNXT_TF_RC_SUCCESS;
1899 }
1900 
1901 /* Function to handle the parsing of RTE Flow item ICMP Header. */
1902 int32_t
1903 ulp_rte_icmp_hdr_handler(const struct rte_flow_item *item,
1904 			 struct ulp_rte_parser_params *params)
1905 {
1906 	const struct rte_flow_item_icmp *icmp_spec = item->spec;
1907 	const struct rte_flow_item_icmp *icmp_mask = item->mask;
1908 	struct ulp_rte_hdr_bitmap *hdr_bitmap = &params->hdr_bitmap;
1909 	uint32_t idx = 0;
1910 	uint32_t size;
1911 
1912 	if (unlikely(ulp_rte_prsr_fld_size_validate(params, &idx,
1913 						    BNXT_ULP_PROTO_HDR_ICMP_NUM))) {
1914 		BNXT_DRV_DBG(ERR, "Error parsing protocol header\n");
1915 		return BNXT_TF_RC_ERROR;
1916 	}
1917 
1918 	if (icmp_spec && !icmp_mask)
1919 		icmp_mask = &rte_flow_item_icmp_mask;
1920 
1921 	size = sizeof(((struct rte_flow_item_icmp *)NULL)->hdr.icmp_type);
1922 	ulp_rte_prsr_fld_mask(params, &idx, size,
1923 			      ulp_deference_struct(icmp_spec, hdr.icmp_type),
1924 			      ulp_deference_struct(icmp_mask, hdr.icmp_type),
1925 			      ULP_PRSR_ACT_DEFAULT);
1926 
1927 	size = sizeof(((struct rte_flow_item_icmp *)NULL)->hdr.icmp_code);
1928 	ulp_rte_prsr_fld_mask(params, &idx, size,
1929 			      ulp_deference_struct(icmp_spec, hdr.icmp_code),
1930 			      ulp_deference_struct(icmp_mask, hdr.icmp_code),
1931 			      ULP_PRSR_ACT_DEFAULT);
1932 
1933 	size = sizeof(((struct rte_flow_item_icmp *)NULL)->hdr.icmp_cksum);
1934 	ulp_rte_prsr_fld_mask(params, &idx, size,
1935 			      ulp_deference_struct(icmp_spec, hdr.icmp_cksum),
1936 			      ulp_deference_struct(icmp_mask, hdr.icmp_cksum),
1937 			      ULP_PRSR_ACT_DEFAULT);
1938 
1939 	size = sizeof(((struct rte_flow_item_icmp *)NULL)->hdr.icmp_ident);
1940 	ulp_rte_prsr_fld_mask(params, &idx, size,
1941 			      ulp_deference_struct(icmp_spec, hdr.icmp_ident),
1942 			      ulp_deference_struct(icmp_mask, hdr.icmp_ident),
1943 			      ULP_PRSR_ACT_DEFAULT);
1944 
1945 	size = sizeof(((struct rte_flow_item_icmp *)NULL)->hdr.icmp_seq_nb);
1946 	ulp_rte_prsr_fld_mask(params, &idx, size,
1947 			      ulp_deference_struct(icmp_spec, hdr.icmp_seq_nb),
1948 			      ulp_deference_struct(icmp_mask, hdr.icmp_seq_nb),
1949 			      ULP_PRSR_ACT_DEFAULT);
1950 
1951 	/* Update the hdr_bitmap with ICMP */
1952 	if (ULP_BITMAP_ISSET(params->cf_bitmap, BNXT_ULP_CF_BIT_IS_TUNNEL))
1953 		ULP_BITMAP_SET(hdr_bitmap->bits, BNXT_ULP_HDR_BIT_I_ICMP);
1954 	else
1955 		ULP_BITMAP_SET(hdr_bitmap->bits, BNXT_ULP_HDR_BIT_O_ICMP);
1956 	return BNXT_TF_RC_SUCCESS;
1957 }
1958 
1959 /* Function to handle the parsing of RTE Flow item ICMP6 Header. */
1960 int32_t
1961 ulp_rte_icmp6_hdr_handler(const struct rte_flow_item *item,
1962 			  struct ulp_rte_parser_params *params)
1963 {
1964 	const struct rte_flow_item_icmp6 *icmp_spec = item->spec;
1965 	const struct rte_flow_item_icmp6 *icmp_mask = item->mask;
1966 	struct ulp_rte_hdr_bitmap *hdr_bitmap = &params->hdr_bitmap;
1967 	uint32_t idx = 0;
1968 	uint32_t size;
1969 
1970 	if (unlikely(ulp_rte_prsr_fld_size_validate(params, &idx,
1971 						    BNXT_ULP_PROTO_HDR_ICMP_NUM))) {
1972 		BNXT_DRV_DBG(ERR, "Error parsing protocol header\n");
1973 		return BNXT_TF_RC_ERROR;
1974 	}
1975 
1976 	if (icmp_spec && !icmp_mask)
1977 		icmp_mask = &rte_flow_item_icmp6_mask;
1978 
1979 	size = sizeof(((struct rte_flow_item_icmp6 *)NULL)->type);
1980 	ulp_rte_prsr_fld_mask(params, &idx, size,
1981 			      ulp_deference_struct(icmp_spec, type),
1982 			      ulp_deference_struct(icmp_mask, type),
1983 			      ULP_PRSR_ACT_DEFAULT);
1984 
1985 	size = sizeof(((struct rte_flow_item_icmp6 *)NULL)->code);
1986 	ulp_rte_prsr_fld_mask(params, &idx, size,
1987 			      ulp_deference_struct(icmp_spec, code),
1988 			      ulp_deference_struct(icmp_mask, code),
1989 			      ULP_PRSR_ACT_DEFAULT);
1990 
1991 	size = sizeof(((struct rte_flow_item_icmp6 *)NULL)->checksum);
1992 	ulp_rte_prsr_fld_mask(params, &idx, size,
1993 			      ulp_deference_struct(icmp_spec, checksum),
1994 			      ulp_deference_struct(icmp_mask, checksum),
1995 			      ULP_PRSR_ACT_DEFAULT);
1996 
1997 	if (ULP_BITMAP_ISSET(hdr_bitmap->bits, BNXT_ULP_HDR_BIT_O_IPV4)) {
1998 		BNXT_DRV_DBG(ERR, "Error: incorrect icmp version\n");
1999 		return BNXT_TF_RC_ERROR;
2000 	}
2001 
2002 	/* Update the hdr_bitmap with ICMP */
2003 	if (ULP_BITMAP_ISSET(params->cf_bitmap, BNXT_ULP_CF_BIT_IS_TUNNEL))
2004 		ULP_BITMAP_SET(hdr_bitmap->bits, BNXT_ULP_HDR_BIT_I_ICMP);
2005 	else
2006 		ULP_BITMAP_SET(hdr_bitmap->bits, BNXT_ULP_HDR_BIT_O_ICMP);
2007 	return BNXT_TF_RC_SUCCESS;
2008 }
2009 
2010 /* Function to handle the parsing of RTE Flow item ECPRI Header. */
2011 int32_t
2012 ulp_rte_ecpri_hdr_handler(const struct rte_flow_item *item,
2013 			  struct ulp_rte_parser_params *params)
2014 {
2015 	const struct rte_flow_item_ecpri *ecpri_spec = item->spec;
2016 	const struct rte_flow_item_ecpri *ecpri_mask = item->mask;
2017 	struct rte_flow_item_ecpri l_ecpri_spec, l_ecpri_mask;
2018 	struct rte_flow_item_ecpri *p_ecpri_spec = &l_ecpri_spec;
2019 	struct rte_flow_item_ecpri *p_ecpri_mask = &l_ecpri_mask;
2020 	struct ulp_rte_hdr_bitmap *hdr_bitmap = &params->hdr_bitmap;
2021 	uint32_t idx = 0, cnt;
2022 	uint32_t size;
2023 
2024 	if (unlikely(ulp_rte_prsr_fld_size_validate(params, &idx,
2025 						    BNXT_ULP_PROTO_HDR_ECPRI_NUM))) {
2026 		BNXT_DRV_DBG(ERR, "Error parsing protocol header\n");
2027 		return BNXT_TF_RC_ERROR;
2028 	}
2029 
2030 	if (ecpri_spec && !ecpri_mask)
2031 		ecpri_mask = &rte_flow_item_ecpri_mask;
2032 
2033 	/* Figure out if eCPRI is within L4(UDP), unsupported, for now */
2034 	cnt = ULP_COMP_FLD_IDX_RD(params, BNXT_ULP_CF_IDX_L4_HDR_CNT);
2035 	if (cnt >= 1) {
2036 		BNXT_DRV_DBG(ERR, "Parse Err: L4 header stack >= 2 not supported\n");
2037 		return BNXT_TF_RC_ERROR;
2038 	}
2039 
2040 	if (!ecpri_spec || !ecpri_mask)
2041 		goto parser_set_ecpri_hdr_bit;
2042 
2043 	memcpy(p_ecpri_spec, ecpri_spec, sizeof(*ecpri_spec));
2044 	memcpy(p_ecpri_mask, ecpri_mask, sizeof(*ecpri_mask));
2045 
2046 	p_ecpri_spec->hdr.common.u32 = rte_be_to_cpu_32(p_ecpri_spec->hdr.common.u32);
2047 	p_ecpri_mask->hdr.common.u32 = rte_be_to_cpu_32(p_ecpri_mask->hdr.common.u32);
2048 
2049 	/*
2050 	 * Init eCPRI spec+mask to correct defaults, also clear masks of fields
2051 	 * we ignore in the TCAM.
2052 	 */
2053 
2054 	l_ecpri_spec.hdr.common.size = 0;
2055 	l_ecpri_spec.hdr.common.c = 0;
2056 	l_ecpri_spec.hdr.common.res = 0;
2057 	l_ecpri_spec.hdr.common.revision = 1;
2058 	l_ecpri_mask.hdr.common.size = 0;
2059 	l_ecpri_mask.hdr.common.c = 1;
2060 	l_ecpri_mask.hdr.common.res = 0;
2061 	l_ecpri_mask.hdr.common.revision = 0xf;
2062 
2063 	switch (p_ecpri_spec->hdr.common.type) {
2064 	case RTE_ECPRI_MSG_TYPE_IQ_DATA:
2065 		l_ecpri_mask.hdr.type0.seq_id = 0;
2066 		break;
2067 
2068 	case RTE_ECPRI_MSG_TYPE_BIT_SEQ:
2069 		l_ecpri_mask.hdr.type1.seq_id = 0;
2070 		break;
2071 
2072 	case RTE_ECPRI_MSG_TYPE_RTC_CTRL:
2073 		l_ecpri_mask.hdr.type2.seq_id = 0;
2074 		break;
2075 
2076 	case RTE_ECPRI_MSG_TYPE_GEN_DATA:
2077 		l_ecpri_mask.hdr.type3.seq_id = 0;
2078 		break;
2079 
2080 	case RTE_ECPRI_MSG_TYPE_RM_ACC:
2081 		l_ecpri_mask.hdr.type4.rr = 0;
2082 		l_ecpri_mask.hdr.type4.rw = 0;
2083 		l_ecpri_mask.hdr.type4.rma_id = 0;
2084 		break;
2085 
2086 	case RTE_ECPRI_MSG_TYPE_DLY_MSR:
2087 		l_ecpri_spec.hdr.type5.act_type = 0;
2088 		break;
2089 
2090 	case RTE_ECPRI_MSG_TYPE_RMT_RST:
2091 		l_ecpri_spec.hdr.type6.rst_op = 0;
2092 		break;
2093 
2094 	case RTE_ECPRI_MSG_TYPE_EVT_IND:
2095 		l_ecpri_spec.hdr.type7.evt_type = 0;
2096 		l_ecpri_spec.hdr.type7.seq = 0;
2097 		l_ecpri_spec.hdr.type7.number = 0;
2098 		break;
2099 
2100 	default:
2101 		break;
2102 	}
2103 
2104 	p_ecpri_spec->hdr.common.u32 = rte_cpu_to_be_32(p_ecpri_spec->hdr.common.u32);
2105 	p_ecpri_mask->hdr.common.u32 = rte_cpu_to_be_32(p_ecpri_mask->hdr.common.u32);
2106 
2107 	/* Type */
2108 	size = sizeof(((struct rte_flow_item_ecpri *)NULL)->hdr.common.u32);
2109 	ulp_rte_prsr_fld_mask(params, &idx, size,
2110 			      ulp_deference_struct(p_ecpri_spec, hdr.common.u32),
2111 			      ulp_deference_struct(p_ecpri_mask, hdr.common.u32),
2112 			      ULP_PRSR_ACT_DEFAULT);
2113 
2114 	/* PC/RTC/MSR_ID */
2115 	size = sizeof(((struct rte_flow_item_ecpri *)NULL)->hdr.dummy[0]);
2116 	ulp_rte_prsr_fld_mask(params, &idx, size,
2117 			      ulp_deference_struct(p_ecpri_spec, hdr.dummy),
2118 			      ulp_deference_struct(p_ecpri_mask, hdr.dummy),
2119 			      ULP_PRSR_ACT_DEFAULT);
2120 
2121 parser_set_ecpri_hdr_bit:
2122 	/* Update the hdr_bitmap with eCPRI */
2123 	ULP_BITMAP_SET(hdr_bitmap->bits, BNXT_ULP_HDR_BIT_O_ECPRI);
2124 	return BNXT_TF_RC_SUCCESS;
2125 }
2126 
2127 /* Function to handle the parsing of RTE Flow item void Header */
2128 int32_t
2129 ulp_rte_void_hdr_handler(const struct rte_flow_item *item __rte_unused,
2130 			 struct ulp_rte_parser_params *params __rte_unused)
2131 {
2132 	return BNXT_TF_RC_SUCCESS;
2133 }
2134 
2135 /* Function to handle the parsing of RTE Flow action void Header. */
2136 int32_t
2137 ulp_rte_void_act_handler(const struct rte_flow_action *action_item __rte_unused,
2138 			 struct ulp_rte_parser_params *params __rte_unused)
2139 {
2140 	return BNXT_TF_RC_SUCCESS;
2141 }
2142 
2143 /* Function to handle the parsing of RTE Flow action Mark Header. */
2144 int32_t
2145 ulp_rte_mark_act_handler(const struct rte_flow_action *action_item,
2146 			 struct ulp_rte_parser_params *param)
2147 {
2148 	const struct rte_flow_action_mark *mark;
2149 	struct ulp_rte_act_bitmap *act = &param->act_bitmap;
2150 	uint32_t mark_id;
2151 
2152 	mark = action_item->conf;
2153 	if (mark) {
2154 		mark_id = tfp_cpu_to_be_32(mark->id);
2155 		memcpy(&param->act_prop.act_details[BNXT_ULP_ACT_PROP_IDX_MARK],
2156 		       &mark_id, BNXT_ULP_ACT_PROP_SZ_MARK);
2157 
2158 		/* Update the hdr_bitmap with vxlan */
2159 		ULP_BITMAP_SET(act->bits, BNXT_ULP_ACT_BIT_MARK);
2160 		return BNXT_TF_RC_SUCCESS;
2161 	}
2162 	BNXT_DRV_DBG(ERR, "Parse Error: Mark arg is invalid\n");
2163 	return BNXT_TF_RC_ERROR;
2164 }
2165 
2166 /* Function to handle the parsing of RTE Flow action RSS Header. */
2167 int32_t
2168 ulp_rte_rss_act_handler(const struct rte_flow_action *action_item,
2169 			struct ulp_rte_parser_params *param)
2170 {
2171 	const struct rte_flow_action_rss *rss;
2172 	struct ulp_rte_act_prop *ap = &param->act_prop;
2173 	uint64_t queue_list[BNXT_ULP_ACT_PROP_SZ_RSS_QUEUE / sizeof(uint64_t)];
2174 	uint32_t idx = 0, id;
2175 
2176 	if (action_item == NULL || action_item->conf == NULL) {
2177 		BNXT_DRV_DBG(ERR, "Parse Err: invalid rss configuration\n");
2178 		return BNXT_TF_RC_ERROR;
2179 	}
2180 
2181 	rss = action_item->conf;
2182 	/* Copy the rss into the specific action properties */
2183 	memcpy(&ap->act_details[BNXT_ULP_ACT_PROP_IDX_RSS_FUNC], &rss->func,
2184 	       BNXT_ULP_ACT_PROP_SZ_RSS_FUNC);
2185 	memcpy(&ap->act_details[BNXT_ULP_ACT_PROP_IDX_RSS_TYPES], &rss->types,
2186 	       BNXT_ULP_ACT_PROP_SZ_RSS_TYPES);
2187 	memcpy(&ap->act_details[BNXT_ULP_ACT_PROP_IDX_RSS_LEVEL], &rss->level,
2188 	       BNXT_ULP_ACT_PROP_SZ_RSS_LEVEL);
2189 	memcpy(&ap->act_details[BNXT_ULP_ACT_PROP_IDX_RSS_KEY_LEN],
2190 	       &rss->key_len, BNXT_ULP_ACT_PROP_SZ_RSS_KEY_LEN);
2191 
2192 	if (rss->key_len != 0 && rss->key_len != BNXT_ULP_ACT_PROP_SZ_RSS_KEY) {
2193 		BNXT_DRV_DBG(ERR, "Parse Err: RSS key length must be 40 bytes\n");
2194 		return BNXT_TF_RC_ERROR;
2195 	}
2196 
2197 	/* User may specify only key length. In that case, rss->key will be NULL.
2198 	 * So, reject the flow if key_length is valid but rss->key is NULL.
2199 	 * Also, copy the RSS hash key only when rss->key is valid.
2200 	 */
2201 	if (rss->key_len != 0 && rss->key == NULL) {
2202 		BNXT_DRV_DBG(ERR,
2203 			    "Parse Err: A valid RSS key must be provided with a valid key len.\n");
2204 		return BNXT_TF_RC_ERROR;
2205 	}
2206 	if (rss->key)
2207 		memcpy(&ap->act_details[BNXT_ULP_ACT_PROP_IDX_RSS_KEY], rss->key, rss->key_len);
2208 
2209 	memcpy(&ap->act_details[BNXT_ULP_ACT_PROP_IDX_RSS_QUEUE_NUM],
2210 	       &rss->queue_num, BNXT_ULP_ACT_PROP_SZ_RSS_QUEUE_NUM);
2211 
2212 	if (rss->queue_num >= ULP_BYTE_2_BITS(BNXT_ULP_ACT_PROP_SZ_RSS_QUEUE)) {
2213 		BNXT_DRV_DBG(ERR, "Parse Err: RSS queue num too big\n");
2214 		return BNXT_TF_RC_ERROR;
2215 	}
2216 
2217 	/* Queues converted into a bitmap format */
2218 	memset(queue_list, 0, sizeof(queue_list));
2219 	for (idx = 0; idx < rss->queue_num; idx++) {
2220 		id = rss->queue[idx];
2221 		if (id >= ULP_BYTE_2_BITS(BNXT_ULP_ACT_PROP_SZ_RSS_QUEUE)) {
2222 			BNXT_DRV_DBG(ERR, "Parse Err: RSS queue id too big\n");
2223 			return BNXT_TF_RC_ERROR;
2224 		}
2225 		if ((queue_list[id / ULP_INDEX_BITMAP_SIZE] >>
2226 		    ((ULP_INDEX_BITMAP_SIZE - 1) -
2227 		     (id % ULP_INDEX_BITMAP_SIZE)) & 1)) {
2228 			BNXT_DRV_DBG(ERR, "Parse Err: duplicate queue ids\n");
2229 			return BNXT_TF_RC_ERROR;
2230 		}
2231 		queue_list[id / ULP_INDEX_BITMAP_SIZE] |= (1UL <<
2232 		((ULP_INDEX_BITMAP_SIZE - 1) - (id % ULP_INDEX_BITMAP_SIZE)));
2233 	}
2234 	memcpy(&ap->act_details[BNXT_ULP_ACT_PROP_IDX_RSS_QUEUE],
2235 	       (uint8_t *)queue_list, BNXT_ULP_ACT_PROP_SZ_RSS_QUEUE);
2236 
2237 	/* set the RSS action header bit */
2238 	ULP_BITMAP_SET(param->act_bitmap.bits, BNXT_ULP_ACT_BIT_RSS);
2239 
2240 	return BNXT_TF_RC_SUCCESS;
2241 }
2242 
2243 /* Function to handle the parsing of RTE Flow item eth Header. */
2244 static void
2245 ulp_rte_enc_eth_hdr_handler(struct ulp_rte_parser_params *params,
2246 			    const struct rte_flow_item_eth *eth_spec)
2247 {
2248 	struct ulp_rte_hdr_field *field;
2249 	uint32_t size;
2250 
2251 	field = &params->enc_field[BNXT_ULP_ENC_FIELD_ETH_DMAC];
2252 	size = sizeof(eth_spec->hdr.dst_addr.addr_bytes);
2253 	field = ulp_rte_parser_fld_copy(field, eth_spec->hdr.dst_addr.addr_bytes, size);
2254 
2255 	size = sizeof(eth_spec->hdr.src_addr.addr_bytes);
2256 	field = ulp_rte_parser_fld_copy(field, eth_spec->hdr.src_addr.addr_bytes, size);
2257 
2258 	size = sizeof(eth_spec->hdr.ether_type);
2259 	field = ulp_rte_parser_fld_copy(field, &eth_spec->hdr.ether_type, size);
2260 
2261 	ULP_BITMAP_SET(params->enc_hdr_bitmap.bits, BNXT_ULP_HDR_BIT_O_ETH);
2262 }
2263 
2264 /* Function to handle the parsing of RTE Flow item vlan Header. */
2265 static void
2266 ulp_rte_enc_vlan_hdr_handler(struct ulp_rte_parser_params *params,
2267 			     const struct rte_flow_item_vlan *vlan_spec,
2268 			     uint32_t inner)
2269 {
2270 	struct ulp_rte_hdr_field *field;
2271 	uint32_t size;
2272 
2273 	if (!inner) {
2274 		field = &params->enc_field[BNXT_ULP_ENC_FIELD_O_VLAN_TCI];
2275 		ULP_BITMAP_SET(params->enc_hdr_bitmap.bits,
2276 			       BNXT_ULP_HDR_BIT_OO_VLAN);
2277 	} else {
2278 		field = &params->enc_field[BNXT_ULP_ENC_FIELD_I_VLAN_TCI];
2279 		ULP_BITMAP_SET(params->enc_hdr_bitmap.bits,
2280 			       BNXT_ULP_HDR_BIT_OI_VLAN);
2281 	}
2282 
2283 	size = sizeof(vlan_spec->hdr.vlan_tci);
2284 	field = ulp_rte_parser_fld_copy(field, &vlan_spec->hdr.vlan_tci, size);
2285 
2286 	size = sizeof(vlan_spec->hdr.eth_proto);
2287 	field = ulp_rte_parser_fld_copy(field, &vlan_spec->hdr.eth_proto, size);
2288 }
2289 
2290 /* Function to handle the parsing of RTE Flow item ipv4 Header. */
2291 static void
2292 ulp_rte_enc_ipv4_hdr_handler(struct ulp_rte_parser_params *params,
2293 			     const struct rte_flow_item_ipv4 *ip)
2294 {
2295 	struct ulp_rte_hdr_field *field;
2296 	uint32_t size;
2297 	uint8_t val8;
2298 
2299 	field = &params->enc_field[BNXT_ULP_ENC_FIELD_IPV4_IHL];
2300 	size = sizeof(ip->hdr.version_ihl);
2301 	if (!ip->hdr.version_ihl)
2302 		val8 = RTE_IPV4_VHL_DEF;
2303 	else
2304 		val8 = ip->hdr.version_ihl;
2305 	field = ulp_rte_parser_fld_copy(field, &val8, size);
2306 
2307 	size = sizeof(ip->hdr.type_of_service);
2308 	field = ulp_rte_parser_fld_copy(field, &ip->hdr.type_of_service, size);
2309 
2310 	size = sizeof(ip->hdr.packet_id);
2311 	field = ulp_rte_parser_fld_copy(field, &ip->hdr.packet_id, size);
2312 
2313 	size = sizeof(ip->hdr.fragment_offset);
2314 	field = ulp_rte_parser_fld_copy(field, &ip->hdr.fragment_offset, size);
2315 
2316 	size = sizeof(ip->hdr.time_to_live);
2317 	if (!ip->hdr.time_to_live)
2318 		val8 = BNXT_ULP_DEFAULT_TTL;
2319 	else
2320 		val8 = ip->hdr.time_to_live;
2321 	field = ulp_rte_parser_fld_copy(field, &val8, size);
2322 
2323 	size = sizeof(ip->hdr.next_proto_id);
2324 	field = ulp_rte_parser_fld_copy(field, &ip->hdr.next_proto_id, size);
2325 
2326 	size = sizeof(ip->hdr.src_addr);
2327 	field = ulp_rte_parser_fld_copy(field, &ip->hdr.src_addr, size);
2328 
2329 	size = sizeof(ip->hdr.dst_addr);
2330 	field = ulp_rte_parser_fld_copy(field, &ip->hdr.dst_addr, size);
2331 
2332 	ULP_BITMAP_SET(params->enc_hdr_bitmap.bits, BNXT_ULP_HDR_BIT_O_IPV4);
2333 }
2334 
2335 /* Function to handle the parsing of RTE Flow item ipv6 Header. */
2336 static void
2337 ulp_rte_enc_ipv6_hdr_handler(struct ulp_rte_parser_params *params,
2338 			     const struct rte_flow_item_ipv6 *ip)
2339 {
2340 	struct ulp_rte_hdr_field *field;
2341 	uint32_t size;
2342 	uint32_t val32;
2343 	uint8_t val8;
2344 
2345 	field = &params->enc_field[BNXT_ULP_ENC_FIELD_IPV6_VTC_FLOW];
2346 	size = sizeof(ip->hdr.vtc_flow);
2347 	if (!ip->hdr.vtc_flow)
2348 		val32 = rte_cpu_to_be_32(BNXT_ULP_IPV6_DFLT_VER);
2349 	else
2350 		val32 = ip->hdr.vtc_flow;
2351 	field = ulp_rte_parser_fld_copy(field, &val32, size);
2352 
2353 	size = sizeof(ip->hdr.proto);
2354 	field = ulp_rte_parser_fld_copy(field, &ip->hdr.proto, size);
2355 
2356 	size = sizeof(ip->hdr.hop_limits);
2357 	if (!ip->hdr.hop_limits)
2358 		val8 = BNXT_ULP_DEFAULT_TTL;
2359 	else
2360 		val8 = ip->hdr.hop_limits;
2361 	field = ulp_rte_parser_fld_copy(field, &val8, size);
2362 
2363 	size = sizeof(ip->hdr.src_addr);
2364 	field = ulp_rte_parser_fld_copy(field, &ip->hdr.src_addr, size);
2365 
2366 	size = sizeof(ip->hdr.dst_addr);
2367 	field = ulp_rte_parser_fld_copy(field, &ip->hdr.dst_addr, size);
2368 
2369 	ULP_BITMAP_SET(params->enc_hdr_bitmap.bits, BNXT_ULP_HDR_BIT_O_IPV6);
2370 }
2371 
2372 /* Function to handle the parsing of RTE Flow item UDP Header. */
2373 static void
2374 ulp_rte_enc_udp_hdr_handler(struct ulp_rte_parser_params *params,
2375 			    const struct rte_flow_item_udp *udp_spec)
2376 {
2377 	struct ulp_rte_hdr_field *field;
2378 	uint32_t size;
2379 	uint8_t type = IPPROTO_UDP;
2380 
2381 	field = &params->enc_field[BNXT_ULP_ENC_FIELD_UDP_SPORT];
2382 	size = sizeof(udp_spec->hdr.src_port);
2383 	field = ulp_rte_parser_fld_copy(field, &udp_spec->hdr.src_port, size);
2384 
2385 	size = sizeof(udp_spec->hdr.dst_port);
2386 	field = ulp_rte_parser_fld_copy(field, &udp_spec->hdr.dst_port, size);
2387 
2388 	ULP_BITMAP_SET(params->enc_hdr_bitmap.bits, BNXT_ULP_HDR_BIT_O_UDP);
2389 
2390 	/* Update the ip header protocol */
2391 	field = &params->enc_field[BNXT_ULP_ENC_FIELD_IPV4_PROTO];
2392 	ulp_rte_parser_fld_copy(field, &type, sizeof(type));
2393 	field = &params->enc_field[BNXT_ULP_ENC_FIELD_IPV6_PROTO];
2394 	ulp_rte_parser_fld_copy(field, &type, sizeof(type));
2395 }
2396 
2397 /* Function to handle the parsing of RTE Flow item vxlan Header. */
2398 static void
2399 ulp_rte_enc_vxlan_hdr_handler(struct ulp_rte_parser_params *params,
2400 			      struct rte_flow_item_vxlan *vxlan_spec)
2401 {
2402 	struct ulp_rte_hdr_field *field;
2403 	uint32_t size;
2404 
2405 	field = &params->enc_field[BNXT_ULP_ENC_FIELD_VXLAN_FLAGS];
2406 	size = sizeof(vxlan_spec->hdr.flags);
2407 	field = ulp_rte_parser_fld_copy(field, &vxlan_spec->hdr.flags, size);
2408 
2409 	size = sizeof(vxlan_spec->hdr.rsvd0);
2410 	field = ulp_rte_parser_fld_copy(field, &vxlan_spec->hdr.rsvd0, size);
2411 
2412 	size = sizeof(vxlan_spec->hdr.vni);
2413 	field = ulp_rte_parser_fld_copy(field, &vxlan_spec->hdr.vni, size);
2414 
2415 	size = sizeof(vxlan_spec->hdr.rsvd1);
2416 	field = ulp_rte_parser_fld_copy(field, &vxlan_spec->hdr.rsvd1, size);
2417 
2418 	ULP_BITMAP_SET(params->enc_hdr_bitmap.bits, BNXT_ULP_HDR_BIT_T_VXLAN);
2419 }
2420 
2421 /* Function to handle the parsing of RTE Flow action vxlan_encap Header. */
2422 int32_t
2423 ulp_rte_vxlan_encap_act_handler(const struct rte_flow_action *action_item,
2424 				struct ulp_rte_parser_params *params)
2425 {
2426 	const struct rte_flow_action_vxlan_encap *vxlan_encap;
2427 	const struct rte_flow_item *item;
2428 	const struct rte_flow_item_ipv4 *ipv4_spec;
2429 	const struct rte_flow_item_ipv6 *ipv6_spec;
2430 	struct rte_flow_item_vxlan vxlan_spec;
2431 	uint32_t vlan_num = 0, vlan_size = 0;
2432 	uint32_t ip_size = 0, ip_type = 0;
2433 	uint32_t vxlan_size = 0;
2434 	struct ulp_rte_act_bitmap *act = &params->act_bitmap;
2435 	struct ulp_rte_act_prop *ap = &params->act_prop;
2436 
2437 	vxlan_encap = action_item->conf;
2438 	if (!vxlan_encap) {
2439 		BNXT_DRV_DBG(ERR, "Parse Error: Vxlan_encap arg is invalid\n");
2440 		return BNXT_TF_RC_ERROR;
2441 	}
2442 
2443 	item = vxlan_encap->definition;
2444 	if (!item) {
2445 		BNXT_DRV_DBG(ERR, "Parse Error: definition arg is invalid\n");
2446 		return BNXT_TF_RC_ERROR;
2447 	}
2448 
2449 	if (!ulp_rte_item_skip_void(&item, 0))
2450 		return BNXT_TF_RC_ERROR;
2451 
2452 	/* must have ethernet header */
2453 	if (item->type != RTE_FLOW_ITEM_TYPE_ETH) {
2454 		BNXT_DRV_DBG(ERR, "Parse Error:vxlan encap does not have eth\n");
2455 		return BNXT_TF_RC_ERROR;
2456 	}
2457 
2458 	/* Parse the ethernet header */
2459 	if (item->spec)
2460 		ulp_rte_enc_eth_hdr_handler(params, item->spec);
2461 
2462 	/* Goto the next item */
2463 	if (!ulp_rte_item_skip_void(&item, 1))
2464 		return BNXT_TF_RC_ERROR;
2465 
2466 	/* May have vlan header */
2467 	if (item->type == RTE_FLOW_ITEM_TYPE_VLAN) {
2468 		vlan_num++;
2469 		if (item->spec)
2470 			ulp_rte_enc_vlan_hdr_handler(params, item->spec, 0);
2471 
2472 		if (!ulp_rte_item_skip_void(&item, 1))
2473 			return BNXT_TF_RC_ERROR;
2474 	}
2475 
2476 	/* may have two vlan headers */
2477 	if (item->type == RTE_FLOW_ITEM_TYPE_VLAN) {
2478 		vlan_num++;
2479 		if (item->spec)
2480 			ulp_rte_enc_vlan_hdr_handler(params, item->spec, 1);
2481 
2482 		if (!ulp_rte_item_skip_void(&item, 1))
2483 			return BNXT_TF_RC_ERROR;
2484 	}
2485 
2486 	/* Update the vlan count and size of more than one */
2487 	if (vlan_num) {
2488 		vlan_size = vlan_num * sizeof(struct rte_flow_item_vlan);
2489 		vlan_num = tfp_cpu_to_be_32(vlan_num);
2490 		memcpy(&ap->act_details[BNXT_ULP_ACT_PROP_IDX_ENCAP_VTAG_NUM],
2491 		       &vlan_num,
2492 		       sizeof(uint32_t));
2493 		vlan_size = tfp_cpu_to_be_32(vlan_size);
2494 		memcpy(&ap->act_details[BNXT_ULP_ACT_PROP_IDX_ENCAP_VTAG_SZ],
2495 		       &vlan_size,
2496 		       sizeof(uint32_t));
2497 	}
2498 
2499 	/* L3 must be IPv4, IPv6 */
2500 	if (item->type == RTE_FLOW_ITEM_TYPE_IPV4) {
2501 		ipv4_spec = item->spec;
2502 		ip_size = BNXT_ULP_ENCAP_IPV4_SIZE;
2503 
2504 		/* Update the ip size details */
2505 		ip_size = tfp_cpu_to_be_32(ip_size);
2506 		memcpy(&ap->act_details[BNXT_ULP_ACT_PROP_IDX_ENCAP_IP_SZ],
2507 		       &ip_size, sizeof(uint32_t));
2508 
2509 		/* update the ip type */
2510 		ip_type = rte_cpu_to_be_32(BNXT_ULP_ETH_IPV4);
2511 		memcpy(&ap->act_details[BNXT_ULP_ACT_PROP_IDX_ENCAP_L3_TYPE],
2512 		       &ip_type, sizeof(uint32_t));
2513 
2514 		/* update the computed field to notify it is ipv4 header */
2515 		ULP_COMP_FLD_IDX_WR(params, BNXT_ULP_CF_IDX_ACT_ENCAP_IPV4_FLAG,
2516 				    1);
2517 		if (ipv4_spec)
2518 			ulp_rte_enc_ipv4_hdr_handler(params, ipv4_spec);
2519 
2520 		if (!ulp_rte_item_skip_void(&item, 1))
2521 			return BNXT_TF_RC_ERROR;
2522 	} else if (item->type == RTE_FLOW_ITEM_TYPE_IPV6) {
2523 		ipv6_spec = item->spec;
2524 		ip_size = BNXT_ULP_ENCAP_IPV6_SIZE;
2525 
2526 		/* Update the ip size details */
2527 		ip_size = tfp_cpu_to_be_32(ip_size);
2528 		memcpy(&ap->act_details[BNXT_ULP_ACT_PROP_IDX_ENCAP_IP_SZ],
2529 		       &ip_size, sizeof(uint32_t));
2530 
2531 		 /* update the ip type */
2532 		ip_type = rte_cpu_to_be_32(BNXT_ULP_ETH_IPV6);
2533 		memcpy(&ap->act_details[BNXT_ULP_ACT_PROP_IDX_ENCAP_L3_TYPE],
2534 		       &ip_type, sizeof(uint32_t));
2535 
2536 		/* update the computed field to notify it is ipv6 header */
2537 		ULP_COMP_FLD_IDX_WR(params, BNXT_ULP_CF_IDX_ACT_ENCAP_IPV6_FLAG,
2538 				    1);
2539 		if (ipv6_spec)
2540 			ulp_rte_enc_ipv6_hdr_handler(params, ipv6_spec);
2541 
2542 		if (!ulp_rte_item_skip_void(&item, 1))
2543 			return BNXT_TF_RC_ERROR;
2544 	} else {
2545 		BNXT_DRV_DBG(ERR, "Parse Error: Vxlan Encap expects L3 hdr\n");
2546 		return BNXT_TF_RC_ERROR;
2547 	}
2548 
2549 	/* L4 is UDP */
2550 	if (item->type != RTE_FLOW_ITEM_TYPE_UDP) {
2551 		BNXT_DRV_DBG(ERR, "vxlan encap does not have udp\n");
2552 		return BNXT_TF_RC_ERROR;
2553 	}
2554 	if (item->spec)
2555 		ulp_rte_enc_udp_hdr_handler(params, item->spec);
2556 
2557 	if (!ulp_rte_item_skip_void(&item, 1))
2558 		return BNXT_TF_RC_ERROR;
2559 
2560 	/* Finally VXLAN */
2561 	if (item->type != RTE_FLOW_ITEM_TYPE_VXLAN) {
2562 		BNXT_DRV_DBG(ERR, "vxlan encap does not have vni\n");
2563 		return BNXT_TF_RC_ERROR;
2564 	}
2565 	vxlan_size = sizeof(struct rte_flow_item_vxlan);
2566 	/* copy the vxlan details */
2567 	memcpy(&vxlan_spec, item->spec, vxlan_size);
2568 	vxlan_spec.hdr.flags = 0x08;
2569 	vxlan_size = tfp_cpu_to_be_32(vxlan_size);
2570 	memcpy(&ap->act_details[BNXT_ULP_ACT_PROP_IDX_ENCAP_TUN_SZ],
2571 	       &vxlan_size, sizeof(uint32_t));
2572 
2573 	ulp_rte_enc_vxlan_hdr_handler(params, &vxlan_spec);
2574 
2575 	/* update the hdr_bitmap with vxlan */
2576 	ULP_BITMAP_SET(act->bits, BNXT_ULP_ACT_BIT_VXLAN_ENCAP);
2577 	return BNXT_TF_RC_SUCCESS;
2578 }
2579 
2580 /* Function to handle the parsing of RTE Flow action vxlan_encap Header */
2581 int32_t
2582 ulp_rte_vxlan_decap_act_handler(const struct rte_flow_action *action_item
2583 				__rte_unused,
2584 				struct ulp_rte_parser_params *params)
2585 {
2586 	/* update the hdr_bitmap with vxlan */
2587 	ULP_BITMAP_SET(params->act_bitmap.bits,
2588 		       BNXT_ULP_ACT_BIT_VXLAN_DECAP);
2589 	/* Update computational field with tunnel decap info */
2590 	ULP_COMP_FLD_IDX_WR(params, BNXT_ULP_CF_IDX_L3_TUN_DECAP, 1);
2591 	return BNXT_TF_RC_SUCCESS;
2592 }
2593 
2594 /* Function to handle the parsing of RTE Flow action drop Header. */
2595 int32_t
2596 ulp_rte_drop_act_handler(const struct rte_flow_action *action_item __rte_unused,
2597 			 struct ulp_rte_parser_params *params)
2598 {
2599 	/* Update the hdr_bitmap with drop */
2600 	ULP_BITMAP_SET(params->act_bitmap.bits, BNXT_ULP_ACT_BIT_DROP);
2601 	return BNXT_TF_RC_SUCCESS;
2602 }
2603 
2604 /* Function to handle the parsing of RTE Flow action count. */
2605 int32_t
2606 ulp_rte_count_act_handler(const struct rte_flow_action *action_item,
2607 			  struct ulp_rte_parser_params *params)
2608 {
2609 	const struct rte_flow_action_count *act_count;
2610 	struct ulp_rte_act_prop *act_prop = &params->act_prop;
2611 
2612 	act_count = action_item->conf;
2613 	if (act_count) {
2614 		memcpy(&act_prop->act_details[BNXT_ULP_ACT_PROP_IDX_COUNT],
2615 		       &act_count->id,
2616 		       BNXT_ULP_ACT_PROP_SZ_COUNT);
2617 	}
2618 
2619 	/* Update the hdr_bitmap with count */
2620 	ULP_BITMAP_SET(params->act_bitmap.bits, BNXT_ULP_ACT_BIT_COUNT);
2621 	return BNXT_TF_RC_SUCCESS;
2622 }
2623 
2624 static bool ulp_rte_parser_is_portb_vfrep(struct ulp_rte_parser_params *param)
2625 {
2626 	return ULP_COMP_FLD_IDX_RD(param, BNXT_ULP_CF_IDX_MP_B_IS_VFREP);
2627 }
2628 
2629 /*
2630  * Swaps info related to multi-port:
2631  * common:
2632  *    BNXT_ULP_CF_IDX_MP_B_IS_VFREP, BNXT_ULP_CF_IDX_MP_A_IS_VFREP
2633  *    BNXT_ULP_CF_IDX_MP_PORT_A, BNXT_ULP_CF_IDX_MP_PORT_B
2634  *
2635  * ingress:
2636  *    BNXT_ULP_CF_IDX_MP_VNIC_B, BNXT_ULP_CF_IDX_MP_VNIC_A
2637  *
2638  * egress:
2639  *    BNXT_ULP_CF_IDX_MP_MDATA_B, BNXT_ULP_CF_IDX_MP_MDATA_A
2640  *    BNXT_ULP_CF_IDX_MP_VPORT_B, BNXT_ULP_CF_IDX_MP_VPORT_A
2641  *
2642  * Note: This is done as OVS could give us a non-VFREP port in port B, and we
2643  * cannot use that to mirror, so we swap out the ports so that a VFREP is now
2644  * in port B instead.
2645  */
2646 static int32_t
2647 ulp_rte_parser_normalize_port_info(struct ulp_rte_parser_params *param)
2648 {
2649 	uint16_t mp_port_a, mp_port_b, mp_mdata_a, mp_mdata_b,
2650 		 mp_vport_a, mp_vport_b, mp_vnic_a, mp_vnic_b,
2651 		 mp_is_vfrep_a, mp_is_vfrep_b;
2652 
2653 	mp_is_vfrep_a = ULP_COMP_FLD_IDX_RD(param, BNXT_ULP_CF_IDX_MP_A_IS_VFREP);
2654 	mp_is_vfrep_b = ULP_COMP_FLD_IDX_RD(param, BNXT_ULP_CF_IDX_MP_B_IS_VFREP);
2655 	ULP_COMP_FLD_IDX_WR(param, BNXT_ULP_CF_IDX_MP_B_IS_VFREP, mp_is_vfrep_a);
2656 	ULP_COMP_FLD_IDX_WR(param, BNXT_ULP_CF_IDX_MP_A_IS_VFREP, mp_is_vfrep_b);
2657 
2658 	mp_port_a = ULP_COMP_FLD_IDX_RD(param, BNXT_ULP_CF_IDX_MP_PORT_A);
2659 	mp_port_b = ULP_COMP_FLD_IDX_RD(param, BNXT_ULP_CF_IDX_MP_PORT_B);
2660 	ULP_COMP_FLD_IDX_WR(param, BNXT_ULP_CF_IDX_MP_PORT_B, mp_port_a);
2661 	ULP_COMP_FLD_IDX_WR(param, BNXT_ULP_CF_IDX_MP_PORT_A, mp_port_b);
2662 
2663 	mp_vport_a = ULP_COMP_FLD_IDX_RD(param, BNXT_ULP_CF_IDX_MP_VPORT_A);
2664 	mp_vport_b = ULP_COMP_FLD_IDX_RD(param, BNXT_ULP_CF_IDX_MP_VPORT_B);
2665 	ULP_COMP_FLD_IDX_WR(param, BNXT_ULP_CF_IDX_MP_VPORT_B, mp_vport_a);
2666 	ULP_COMP_FLD_IDX_WR(param, BNXT_ULP_CF_IDX_MP_VPORT_A, mp_vport_b);
2667 
2668 	mp_vnic_a = ULP_COMP_FLD_IDX_RD(param, BNXT_ULP_CF_IDX_MP_VNIC_A);
2669 	mp_vnic_b = ULP_COMP_FLD_IDX_RD(param, BNXT_ULP_CF_IDX_MP_VNIC_B);
2670 	ULP_COMP_FLD_IDX_WR(param, BNXT_ULP_CF_IDX_MP_VNIC_B, mp_vnic_a);
2671 	ULP_COMP_FLD_IDX_WR(param, BNXT_ULP_CF_IDX_MP_VNIC_A, mp_vnic_b);
2672 
2673 	mp_mdata_a = ULP_COMP_FLD_IDX_RD(param, BNXT_ULP_CF_IDX_MP_MDATA_A);
2674 	mp_mdata_b = ULP_COMP_FLD_IDX_RD(param, BNXT_ULP_CF_IDX_MP_MDATA_B);
2675 	ULP_COMP_FLD_IDX_WR(param, BNXT_ULP_CF_IDX_MP_MDATA_B, mp_mdata_a);
2676 	ULP_COMP_FLD_IDX_WR(param, BNXT_ULP_CF_IDX_MP_MDATA_A, mp_mdata_b);
2677 
2678 	return BNXT_TF_RC_SUCCESS;
2679 }
2680 
2681 
2682 /* Function to handle the parsing of action ports. */
2683 static int32_t
2684 ulp_rte_parser_act_port_set(struct ulp_rte_parser_params *param,
2685 			    uint32_t ifindex, bool multi_port,
2686 			    enum bnxt_ulp_direction_type act_dir)
2687 {
2688 	enum bnxt_ulp_direction_type dir;
2689 	uint16_t pid_s;
2690 	uint8_t *p_mdata;
2691 	uint32_t pid, port_index;
2692 	struct ulp_rte_act_prop *act = &param->act_prop;
2693 	enum bnxt_ulp_intf_type port_type;
2694 	uint32_t vnic_type;
2695 
2696 	/* Get the direction */
2697 	/* If action implicitly specifies direction, use the specification. */
2698 	dir = (act_dir == BNXT_ULP_DIR_INVALID) ?
2699 		ULP_COMP_FLD_IDX_RD(param, BNXT_ULP_CF_IDX_DIRECTION) :
2700 		act_dir;
2701 
2702 	port_type = ULP_COMP_FLD_IDX_RD(param,
2703 					BNXT_ULP_CF_IDX_ACT_PORT_TYPE);
2704 
2705 	/* Update flag if Port A/B type is VF-REP */
2706 	ULP_COMP_FLD_IDX_WR(param, multi_port ?
2707 					BNXT_ULP_CF_IDX_MP_B_IS_VFREP :
2708 					BNXT_ULP_CF_IDX_MP_A_IS_VFREP,
2709 			    (port_type == BNXT_ULP_INTF_TYPE_VF_REP) ? 1 : 0);
2710 
2711 	/* An egress flow where the action port is not another VF endpoint
2712 	 * requires a VPORT.
2713 	 */
2714 	if (dir == BNXT_ULP_DIR_EGRESS) {
2715 		/* For egress direction, fill vport */
2716 		if (ulp_port_db_vport_get(param->ulp_ctx, ifindex, &pid_s))
2717 			return BNXT_TF_RC_ERROR;
2718 
2719 		pid = pid_s;
2720 		pid = rte_cpu_to_be_32(pid);
2721 		if (!multi_port)
2722 			memcpy(&act->act_details[BNXT_ULP_ACT_PROP_IDX_VPORT],
2723 			       &pid, BNXT_ULP_ACT_PROP_SZ_VPORT);
2724 
2725 		/* Fill metadata */
2726 		if (port_type == BNXT_ULP_INTF_TYPE_VF_REP) {
2727 			port_index  = ULP_COMP_FLD_IDX_RD(param, multi_port ?
2728 								 BNXT_ULP_CF_IDX_MP_PORT_B :
2729 								 BNXT_ULP_CF_IDX_MP_PORT_A);
2730 			if (ulp_port_db_port_meta_data_get(param->ulp_ctx,
2731 							   port_index, &p_mdata))
2732 				return BNXT_TF_RC_ERROR;
2733 			/*
2734 			 * Update appropriate port (A/B) metadata based on multi-port
2735 			 * indication
2736 			 */
2737 			ULP_COMP_FLD_IDX_WR(param,
2738 					    multi_port ?
2739 						BNXT_ULP_CF_IDX_MP_MDATA_B :
2740 						BNXT_ULP_CF_IDX_MP_MDATA_A,
2741 					    rte_cpu_to_be_16(*((uint16_t *)p_mdata)));
2742 		}
2743 		/*
2744 		 * Update appropriate port (A/B) VPORT based on multi-port
2745 		 * indication.
2746 		 */
2747 		ULP_COMP_FLD_IDX_WR(param,
2748 				    multi_port ?
2749 					BNXT_ULP_CF_IDX_MP_VPORT_B :
2750 					BNXT_ULP_CF_IDX_MP_VPORT_A,
2751 				    pid_s);
2752 
2753 		/* Setup the VF_TO_VF VNIC information */
2754 		if (!multi_port && port_type == BNXT_ULP_INTF_TYPE_VF_REP) {
2755 			if (ulp_port_db_default_vnic_get(param->ulp_ctx,
2756 							 ifindex,
2757 							 BNXT_ULP_VF_FUNC_VNIC,
2758 							 &pid_s))
2759 				return BNXT_TF_RC_ERROR;
2760 			pid = pid_s;
2761 
2762 			/* Allows use of func_opcode with VNIC */
2763 			ULP_COMP_FLD_IDX_WR(param, BNXT_ULP_CF_IDX_VNIC, pid);
2764 		}
2765 	} else {
2766 		/* For ingress direction, fill vnic */
2767 		/*
2768 		 * Action               Destination
2769 		 * ------------------------------------
2770 		 * PORT_REPRESENTOR     Driver Function
2771 		 * ------------------------------------
2772 		 * REPRESENTED_PORT     VF
2773 		 * ------------------------------------
2774 		 * PORT_ID              VF
2775 		 */
2776 		if (act_dir != BNXT_ULP_DIR_INGRESS &&
2777 		    port_type == BNXT_ULP_INTF_TYPE_VF_REP)
2778 			vnic_type = BNXT_ULP_VF_FUNC_VNIC;
2779 		else
2780 			vnic_type = BNXT_ULP_DRV_FUNC_VNIC;
2781 
2782 		if (ulp_port_db_default_vnic_get(param->ulp_ctx, ifindex,
2783 						 vnic_type, &pid_s))
2784 			return BNXT_TF_RC_ERROR;
2785 
2786 		pid = pid_s;
2787 
2788 		/* Allows use of func_opcode with VNIC */
2789 		ULP_COMP_FLD_IDX_WR(param, BNXT_ULP_CF_IDX_VNIC, pid);
2790 
2791 		pid = rte_cpu_to_be_32(pid);
2792 		if (!multi_port)
2793 			memcpy(&act->act_details[BNXT_ULP_ACT_PROP_IDX_VNIC],
2794 			       &pid, BNXT_ULP_ACT_PROP_SZ_VNIC);
2795 		/*
2796 		 * Update appropriate port (A/B) VNIC based on multi-port
2797 		 * indication.
2798 		 */
2799 		ULP_COMP_FLD_IDX_WR(param,
2800 				    multi_port ?
2801 					BNXT_ULP_CF_IDX_MP_VNIC_B :
2802 					BNXT_ULP_CF_IDX_MP_VNIC_A,
2803 				    pid_s);
2804 	}
2805 
2806 	if (multi_port && !ulp_rte_parser_is_portb_vfrep(param))
2807 		ulp_rte_parser_normalize_port_info(param);
2808 
2809 	/* Update the action port set bit */
2810 	ULP_COMP_FLD_IDX_WR(param, BNXT_ULP_CF_IDX_ACT_PORT_IS_SET, 1);
2811 	return BNXT_TF_RC_SUCCESS;
2812 }
2813 
2814 /* Function to handle the parsing of RTE Flow action PF. */
2815 int32_t
2816 ulp_rte_pf_act_handler(const struct rte_flow_action *action_item __rte_unused,
2817 		       struct ulp_rte_parser_params *params)
2818 {
2819 	uint32_t port_id;
2820 	uint32_t ifindex;
2821 	enum bnxt_ulp_intf_type intf_type;
2822 
2823 	/* Get the port id of the current device */
2824 	port_id = ULP_COMP_FLD_IDX_RD(params, BNXT_ULP_CF_IDX_INCOMING_IF);
2825 
2826 	/* Get the port db ifindex */
2827 	if (ulp_port_db_dev_port_to_ulp_index(params->ulp_ctx, port_id,
2828 					      &ifindex)) {
2829 		BNXT_DRV_DBG(ERR, "Invalid port id\n");
2830 		return BNXT_TF_RC_ERROR;
2831 	}
2832 
2833 	/* Check the port is PF port */
2834 	intf_type = ulp_port_db_port_type_get(params->ulp_ctx, ifindex);
2835 	if (intf_type != BNXT_ULP_INTF_TYPE_PF) {
2836 		BNXT_DRV_DBG(ERR, "Port is not a PF port\n");
2837 		return BNXT_TF_RC_ERROR;
2838 	}
2839 	/* Update the action properties */
2840 	ULP_COMP_FLD_IDX_WR(params, BNXT_ULP_CF_IDX_ACT_PORT_TYPE, intf_type);
2841 	return ulp_rte_parser_act_port_set(params, ifindex, false,
2842 					   BNXT_ULP_DIR_INVALID);
2843 }
2844 
2845 /* Function to handle the parsing of RTE Flow action VF. */
2846 int32_t
2847 ulp_rte_vf_act_handler(const struct rte_flow_action *action_item,
2848 		       struct ulp_rte_parser_params *params)
2849 {
2850 	const struct rte_flow_action_vf *vf_action;
2851 	enum bnxt_ulp_intf_type intf_type;
2852 	uint32_t ifindex;
2853 	struct bnxt *bp;
2854 
2855 	vf_action = action_item->conf;
2856 	if (!vf_action) {
2857 		BNXT_DRV_DBG(ERR, "ParseErr: Invalid Argument\n");
2858 		return BNXT_TF_RC_PARSE_ERR;
2859 	}
2860 
2861 	if (vf_action->original) {
2862 		BNXT_DRV_DBG(ERR, "ParseErr:VF Original not supported\n");
2863 		return BNXT_TF_RC_PARSE_ERR;
2864 	}
2865 
2866 	bp = bnxt_pmd_get_bp(params->port_id);
2867 	if (bp == NULL) {
2868 		BNXT_DRV_DBG(ERR, "Invalid bp\n");
2869 		return BNXT_TF_RC_ERROR;
2870 	}
2871 
2872 	/* vf_action->id is a logical number which in this case is an
2873 	 * offset from the first VF. So, to get the absolute VF id, the
2874 	 * offset must be added to the absolute first vf id of that port.
2875 	 */
2876 	if (ulp_port_db_dev_func_id_to_ulp_index(params->ulp_ctx,
2877 						 bp->first_vf_id +
2878 						 vf_action->id,
2879 						 &ifindex)) {
2880 		BNXT_DRV_DBG(ERR, "VF is not valid interface\n");
2881 		return BNXT_TF_RC_ERROR;
2882 	}
2883 	/* Check the port is VF port */
2884 	intf_type = ulp_port_db_port_type_get(params->ulp_ctx, ifindex);
2885 	if (intf_type != BNXT_ULP_INTF_TYPE_VF &&
2886 	    intf_type != BNXT_ULP_INTF_TYPE_TRUSTED_VF) {
2887 		BNXT_DRV_DBG(ERR, "Port is not a VF port\n");
2888 		return BNXT_TF_RC_ERROR;
2889 	}
2890 
2891 	/* Update the action properties */
2892 	ULP_COMP_FLD_IDX_WR(params, BNXT_ULP_CF_IDX_ACT_PORT_TYPE, intf_type);
2893 	return ulp_rte_parser_act_port_set(params, ifindex, false,
2894 					   BNXT_ULP_DIR_INVALID);
2895 }
2896 
2897 /* Parse actions PORT_ID, PORT_REPRESENTOR and REPRESENTED_PORT. */
2898 int32_t
2899 ulp_rte_port_act_handler(const struct rte_flow_action *act_item,
2900 			 struct ulp_rte_parser_params *param)
2901 {
2902 	uint32_t ethdev_id;
2903 	uint32_t ifindex;
2904 	const struct rte_flow_action_port_id *port_id = act_item->conf;
2905 	uint32_t num_ports;
2906 	enum bnxt_ulp_intf_type intf_type;
2907 	enum bnxt_ulp_direction_type act_dir;
2908 
2909 	if (!act_item->conf) {
2910 		BNXT_DRV_DBG(ERR,
2911 				"ParseErr: Invalid Argument\n");
2912 		return BNXT_TF_RC_PARSE_ERR;
2913 	}
2914 	switch (act_item->type) {
2915 	case RTE_FLOW_ACTION_TYPE_PORT_ID: {
2916 		const struct rte_flow_action_port_id *port_id = act_item->conf;
2917 
2918 		if (port_id->original) {
2919 			BNXT_DRV_DBG(ERR,
2920 				    "ParseErr:Portid Original not supported\n");
2921 			return BNXT_TF_RC_PARSE_ERR;
2922 		}
2923 		ethdev_id = port_id->id;
2924 		act_dir = BNXT_ULP_DIR_INVALID;
2925 		break;
2926 	}
2927 	case RTE_FLOW_ACTION_TYPE_PORT_REPRESENTOR: {
2928 		const struct rte_flow_action_ethdev *ethdev = act_item->conf;
2929 
2930 		ethdev_id = ethdev->port_id;
2931 		act_dir = BNXT_ULP_DIR_INGRESS;
2932 		break;
2933 	}
2934 	case RTE_FLOW_ACTION_TYPE_REPRESENTED_PORT: {
2935 		const struct rte_flow_action_ethdev *ethdev = act_item->conf;
2936 
2937 		ethdev_id = ethdev->port_id;
2938 		act_dir = BNXT_ULP_DIR_EGRESS;
2939 		break;
2940 	}
2941 	default:
2942 		BNXT_DRV_DBG(ERR, "Unknown port action\n");
2943 		return BNXT_TF_RC_ERROR;
2944 	}
2945 
2946 	num_ports  = ULP_COMP_FLD_IDX_RD(param, BNXT_ULP_CF_IDX_MP_NPORTS);
2947 
2948 	if (num_ports) {
2949 		ULP_COMP_FLD_IDX_WR(param, BNXT_ULP_CF_IDX_MP_PORT_B,
2950 				    port_id->id);
2951 		ULP_BITMAP_SET(param->act_bitmap.bits,
2952 			       BNXT_ULP_ACT_BIT_MULTIPLE_PORT);
2953 	} else {
2954 		ULP_COMP_FLD_IDX_WR(param, BNXT_ULP_CF_IDX_MP_PORT_A,
2955 				    port_id->id);
2956 	}
2957 
2958 	/* Get the port db ifindex */
2959 	if (ulp_port_db_dev_port_to_ulp_index(param->ulp_ctx, ethdev_id,
2960 					      &ifindex)) {
2961 		BNXT_DRV_DBG(ERR, "Invalid port id\n");
2962 		return BNXT_TF_RC_ERROR;
2963 	}
2964 
2965 	/* Get the intf type */
2966 	intf_type = ulp_port_db_port_type_get(param->ulp_ctx, ifindex);
2967 	if (!intf_type) {
2968 		BNXT_DRV_DBG(ERR, "Invalid port type\n");
2969 		return BNXT_TF_RC_ERROR;
2970 	}
2971 
2972 	/* Set the action port */
2973 	ULP_COMP_FLD_IDX_WR(param, BNXT_ULP_CF_IDX_ACT_PORT_TYPE, intf_type);
2974 	ULP_COMP_FLD_IDX_WR(param, BNXT_ULP_CF_IDX_DEV_ACT_PORT_ID,
2975 			    ethdev_id);
2976 
2977 	ULP_COMP_FLD_IDX_WR(param, BNXT_ULP_CF_IDX_MP_NPORTS, ++num_ports);
2978 	return ulp_rte_parser_act_port_set(param, ifindex,
2979 					   ULP_BITMAP_ISSET(param->act_bitmap.bits,
2980 							    BNXT_ULP_ACT_BIT_MULTIPLE_PORT),
2981 					   act_dir);
2982 }
2983 
2984 /* Function to handle the parsing of RTE Flow action pop vlan. */
2985 int32_t
2986 ulp_rte_of_pop_vlan_act_handler(const struct rte_flow_action *a __rte_unused,
2987 				struct ulp_rte_parser_params *params)
2988 {
2989 	/* Update the act_bitmap with pop */
2990 	ULP_BITMAP_SET(params->act_bitmap.bits, BNXT_ULP_ACT_BIT_POP_VLAN);
2991 	return BNXT_TF_RC_SUCCESS;
2992 }
2993 
2994 /* Function to handle the parsing of RTE Flow action push vlan. */
2995 int32_t
2996 ulp_rte_of_push_vlan_act_handler(const struct rte_flow_action *action_item,
2997 				 struct ulp_rte_parser_params *params)
2998 {
2999 	const struct rte_flow_action_of_push_vlan *push_vlan;
3000 	uint16_t ethertype;
3001 	struct ulp_rte_act_prop *act = &params->act_prop;
3002 
3003 	push_vlan = action_item->conf;
3004 	if (push_vlan) {
3005 		ethertype = push_vlan->ethertype;
3006 		if (tfp_cpu_to_be_16(ethertype) != RTE_ETHER_TYPE_VLAN) {
3007 			BNXT_DRV_DBG(ERR,
3008 				    "Parse Err: Ethertype not supported\n");
3009 			return BNXT_TF_RC_PARSE_ERR;
3010 		}
3011 		memcpy(&act->act_details[BNXT_ULP_ACT_PROP_IDX_PUSH_VLAN],
3012 		       &ethertype, BNXT_ULP_ACT_PROP_SZ_PUSH_VLAN);
3013 		/* Update the hdr_bitmap with push vlan */
3014 		ULP_BITMAP_SET(params->act_bitmap.bits,
3015 			       BNXT_ULP_ACT_BIT_PUSH_VLAN);
3016 		return BNXT_TF_RC_SUCCESS;
3017 	}
3018 	BNXT_DRV_DBG(ERR, "Parse Error: Push vlan arg is invalid\n");
3019 	return BNXT_TF_RC_ERROR;
3020 }
3021 
3022 /* Function to handle the parsing of RTE Flow action set vlan id. */
3023 int32_t
3024 ulp_rte_of_set_vlan_vid_act_handler(const struct rte_flow_action *action_item,
3025 				    struct ulp_rte_parser_params *params)
3026 {
3027 	const struct rte_flow_action_of_set_vlan_vid *vlan_vid;
3028 	uint32_t vid;
3029 	struct ulp_rte_act_prop *act = &params->act_prop;
3030 
3031 	vlan_vid = action_item->conf;
3032 	if (vlan_vid && vlan_vid->vlan_vid) {
3033 		vid = vlan_vid->vlan_vid;
3034 		memcpy(&act->act_details[BNXT_ULP_ACT_PROP_IDX_SET_VLAN_VID],
3035 		       &vid, BNXT_ULP_ACT_PROP_SZ_SET_VLAN_VID);
3036 		/* Update the hdr_bitmap with vlan vid */
3037 		ULP_BITMAP_SET(params->act_bitmap.bits,
3038 			       BNXT_ULP_ACT_BIT_SET_VLAN_VID);
3039 		return BNXT_TF_RC_SUCCESS;
3040 	}
3041 	BNXT_DRV_DBG(ERR, "Parse Error: Vlan vid arg is invalid\n");
3042 	return BNXT_TF_RC_ERROR;
3043 }
3044 
3045 /* Function to handle the parsing of RTE Flow action set vlan pcp. */
3046 int32_t
3047 ulp_rte_of_set_vlan_pcp_act_handler(const struct rte_flow_action *action_item,
3048 				    struct ulp_rte_parser_params *params)
3049 {
3050 	const struct rte_flow_action_of_set_vlan_pcp *vlan_pcp;
3051 	uint8_t pcp;
3052 	struct ulp_rte_act_prop *act = &params->act_prop;
3053 
3054 	vlan_pcp = action_item->conf;
3055 	if (vlan_pcp) {
3056 		pcp = vlan_pcp->vlan_pcp;
3057 		memcpy(&act->act_details[BNXT_ULP_ACT_PROP_IDX_SET_VLAN_PCP],
3058 		       &pcp, BNXT_ULP_ACT_PROP_SZ_SET_VLAN_PCP);
3059 		/* Update the hdr_bitmap with vlan vid */
3060 		ULP_BITMAP_SET(params->act_bitmap.bits,
3061 			       BNXT_ULP_ACT_BIT_SET_VLAN_PCP);
3062 		return BNXT_TF_RC_SUCCESS;
3063 	}
3064 	BNXT_DRV_DBG(ERR, "Parse Error: Vlan pcp arg is invalid\n");
3065 	return BNXT_TF_RC_ERROR;
3066 }
3067 
3068 /* Function to handle the parsing of RTE Flow action set ipv4 src.*/
3069 int32_t
3070 ulp_rte_set_ipv4_src_act_handler(const struct rte_flow_action *action_item,
3071 				 struct ulp_rte_parser_params *params)
3072 {
3073 	const struct rte_flow_action_set_ipv4 *set_ipv4;
3074 	struct ulp_rte_act_prop *act = &params->act_prop;
3075 
3076 	set_ipv4 = action_item->conf;
3077 	if (set_ipv4) {
3078 		memcpy(&act->act_details[BNXT_ULP_ACT_PROP_IDX_SET_IPV4_SRC],
3079 		       &set_ipv4->ipv4_addr, BNXT_ULP_ACT_PROP_SZ_SET_IPV4_SRC);
3080 		/* Update the hdr_bitmap with set ipv4 src */
3081 		ULP_BITMAP_SET(params->act_bitmap.bits,
3082 			       BNXT_ULP_ACT_BIT_SET_IPV4_SRC);
3083 		return BNXT_TF_RC_SUCCESS;
3084 	}
3085 	BNXT_DRV_DBG(ERR, "Parse Error: set ipv4 src arg is invalid\n");
3086 	return BNXT_TF_RC_ERROR;
3087 }
3088 
3089 /* Function to handle the parsing of RTE Flow action set ipv4 dst.*/
3090 int32_t
3091 ulp_rte_set_ipv4_dst_act_handler(const struct rte_flow_action *action_item,
3092 				 struct ulp_rte_parser_params *params)
3093 {
3094 	const struct rte_flow_action_set_ipv4 *set_ipv4;
3095 	struct ulp_rte_act_prop *act = &params->act_prop;
3096 
3097 	set_ipv4 = action_item->conf;
3098 	if (set_ipv4) {
3099 		memcpy(&act->act_details[BNXT_ULP_ACT_PROP_IDX_SET_IPV4_DST],
3100 		       &set_ipv4->ipv4_addr, BNXT_ULP_ACT_PROP_SZ_SET_IPV4_DST);
3101 		/* Update the hdr_bitmap with set ipv4 dst */
3102 		ULP_BITMAP_SET(params->act_bitmap.bits,
3103 			       BNXT_ULP_ACT_BIT_SET_IPV4_DST);
3104 		return BNXT_TF_RC_SUCCESS;
3105 	}
3106 	BNXT_DRV_DBG(ERR, "Parse Error: set ipv4 dst arg is invalid\n");
3107 	return BNXT_TF_RC_ERROR;
3108 }
3109 
3110 /* Function to handle the parsing of RTE Flow action set ipv6 src.*/
3111 int32_t
3112 ulp_rte_set_ipv6_src_act_handler(const struct rte_flow_action *action_item,
3113 				 struct ulp_rte_parser_params *params)
3114 {
3115 	const struct rte_flow_action_set_ipv6 *set_ipv6;
3116 	struct ulp_rte_act_prop *act = &params->act_prop;
3117 
3118 	set_ipv6 = action_item->conf;
3119 	if (set_ipv6) {
3120 		memcpy(&act->act_details[BNXT_ULP_ACT_PROP_IDX_SET_IPV6_SRC],
3121 		       &set_ipv6->ipv6_addr, BNXT_ULP_ACT_PROP_SZ_SET_IPV6_SRC);
3122 		/* Update the hdr_bitmap with set ipv4 src */
3123 		ULP_BITMAP_SET(params->act_bitmap.bits,
3124 			       BNXT_ULP_ACT_BIT_SET_IPV6_SRC);
3125 		return BNXT_TF_RC_SUCCESS;
3126 	}
3127 	BNXT_DRV_DBG(ERR, "Parse Error: set ipv6 src arg is invalid\n");
3128 	return BNXT_TF_RC_ERROR;
3129 }
3130 
3131 /* Function to handle the parsing of RTE Flow action set ipv6 dst.*/
3132 int32_t
3133 ulp_rte_set_ipv6_dst_act_handler(const struct rte_flow_action *action_item,
3134 				 struct ulp_rte_parser_params *params)
3135 {
3136 	const struct rte_flow_action_set_ipv6 *set_ipv6;
3137 	struct ulp_rte_act_prop *act = &params->act_prop;
3138 
3139 	set_ipv6 = action_item->conf;
3140 	if (set_ipv6) {
3141 		memcpy(&act->act_details[BNXT_ULP_ACT_PROP_IDX_SET_IPV6_DST],
3142 		       &set_ipv6->ipv6_addr, BNXT_ULP_ACT_PROP_SZ_SET_IPV6_DST);
3143 		/* Update the hdr_bitmap with set ipv6 dst */
3144 		ULP_BITMAP_SET(params->act_bitmap.bits,
3145 			       BNXT_ULP_ACT_BIT_SET_IPV6_DST);
3146 		return BNXT_TF_RC_SUCCESS;
3147 	}
3148 	BNXT_DRV_DBG(ERR, "Parse Error: set ipv6 dst arg is invalid\n");
3149 	return BNXT_TF_RC_ERROR;
3150 }
3151 
3152 /* Function to handle the parsing of RTE Flow action set tp src.*/
3153 int32_t
3154 ulp_rte_set_tp_src_act_handler(const struct rte_flow_action *action_item,
3155 			       struct ulp_rte_parser_params *params)
3156 {
3157 	const struct rte_flow_action_set_tp *set_tp;
3158 	struct ulp_rte_act_prop *act = &params->act_prop;
3159 
3160 	set_tp = action_item->conf;
3161 	if (set_tp) {
3162 		memcpy(&act->act_details[BNXT_ULP_ACT_PROP_IDX_SET_TP_SRC],
3163 		       &set_tp->port, BNXT_ULP_ACT_PROP_SZ_SET_TP_SRC);
3164 		/* Update the hdr_bitmap with set tp src */
3165 		ULP_BITMAP_SET(params->act_bitmap.bits,
3166 			       BNXT_ULP_ACT_BIT_SET_TP_SRC);
3167 		return BNXT_TF_RC_SUCCESS;
3168 	}
3169 
3170 	BNXT_DRV_DBG(ERR, "Parse Error: set tp src arg is invalid\n");
3171 	return BNXT_TF_RC_ERROR;
3172 }
3173 
3174 /* Function to handle the parsing of RTE Flow action set tp dst.*/
3175 int32_t
3176 ulp_rte_set_tp_dst_act_handler(const struct rte_flow_action *action_item,
3177 			       struct ulp_rte_parser_params *params)
3178 {
3179 	const struct rte_flow_action_set_tp *set_tp;
3180 	struct ulp_rte_act_prop *act = &params->act_prop;
3181 
3182 	set_tp = action_item->conf;
3183 	if (set_tp) {
3184 		memcpy(&act->act_details[BNXT_ULP_ACT_PROP_IDX_SET_TP_DST],
3185 		       &set_tp->port, BNXT_ULP_ACT_PROP_SZ_SET_TP_DST);
3186 		/* Update the hdr_bitmap with set tp dst */
3187 		ULP_BITMAP_SET(params->act_bitmap.bits,
3188 			       BNXT_ULP_ACT_BIT_SET_TP_DST);
3189 		return BNXT_TF_RC_SUCCESS;
3190 	}
3191 
3192 	BNXT_DRV_DBG(ERR, "Parse Error: set tp src arg is invalid\n");
3193 	return BNXT_TF_RC_ERROR;
3194 }
3195 
3196 /* Function to handle the parsing of RTE Flow action dec ttl.*/
3197 int32_t
3198 ulp_rte_dec_ttl_act_handler(const struct rte_flow_action *act __rte_unused,
3199 			    struct ulp_rte_parser_params *params)
3200 {
3201 	/* Update the act_bitmap with dec ttl */
3202 	ULP_BITMAP_SET(params->act_bitmap.bits, BNXT_ULP_ACT_BIT_DEC_TTL);
3203 	return BNXT_TF_RC_SUCCESS;
3204 }
3205 
3206 /* Function to handle the parsing of RTE Flow action set ttl.*/
3207 int32_t
3208 ulp_rte_set_ttl_act_handler(const struct rte_flow_action *action_item,
3209 			    struct ulp_rte_parser_params *params)
3210 {
3211 	const struct rte_flow_action_set_ttl *set_ttl;
3212 	struct ulp_rte_act_prop *act = &params->act_prop;
3213 
3214 	set_ttl = action_item->conf;
3215 	if (set_ttl) {
3216 		memcpy(&act->act_details[BNXT_ULP_ACT_PROP_IDX_SET_TTL],
3217 		       &set_ttl->ttl_value, BNXT_ULP_ACT_PROP_SZ_SET_TTL);
3218 		/* Update the act_bitmap with dec ttl */
3219 		/* Note: NIC HW not support the set_ttl action, here using dec_ttl to simulate
3220 		 * the set_ttl action. And ensure the ttl field must be one more than the value
3221 		 * of action set_ttl.
3222 		 */
3223 		if (ULP_COMP_FLD_IDX_RD(params, BNXT_ULP_CF_IDX_O_L3_TTL) ==
3224 		    (uint32_t)(set_ttl->ttl_value + 1)) {
3225 			ULP_BITMAP_SET(params->act_bitmap.bits, BNXT_ULP_ACT_BIT_DEC_TTL);
3226 			return BNXT_TF_RC_SUCCESS;
3227 		}
3228 		BNXT_DRV_DBG(ERR, "Parse Error: set_ttl value not match with flow ttl field.\n");
3229 		return BNXT_TF_RC_ERROR;
3230 	}
3231 
3232 	BNXT_DRV_DBG(ERR, "Parse Error: set ttl arg is invalid.\n");
3233 	return BNXT_TF_RC_ERROR;
3234 }
3235 
3236 /* Function to handle the parsing of RTE Flow action JUMP */
3237 int32_t
3238 ulp_rte_jump_act_handler(const struct rte_flow_action *action_item,
3239 			 struct ulp_rte_parser_params *params)
3240 {
3241 	const struct rte_flow_action_jump *jump_act;
3242 	struct ulp_rte_act_prop *act = &params->act_prop;
3243 	uint32_t group_id;
3244 
3245 	jump_act = action_item->conf;
3246 	if (jump_act) {
3247 		group_id = rte_cpu_to_be_32(jump_act->group);
3248 		memcpy(&act->act_details[BNXT_ULP_ACT_PROP_IDX_JUMP],
3249 		       &group_id, BNXT_ULP_ACT_PROP_SZ_JUMP);
3250 		ULP_BITMAP_SET(params->act_bitmap.bits, BNXT_ULP_ACT_BIT_JUMP);
3251 	}
3252 	return BNXT_TF_RC_SUCCESS;
3253 }
3254 
3255 int32_t
3256 ulp_rte_sample_act_handler(const struct rte_flow_action *action_item,
3257 			   struct ulp_rte_parser_params *params)
3258 {
3259 	const struct rte_flow_action_sample *sample;
3260 	int ret;
3261 
3262 	sample = action_item->conf;
3263 
3264 	/* if SAMPLE bit is set it means this sample action is nested within the
3265 	 * actions of another sample action; this is not allowed
3266 	 */
3267 	if (ULP_BITMAP_ISSET(params->act_bitmap.bits,
3268 			     BNXT_ULP_ACT_BIT_SAMPLE))
3269 		return BNXT_TF_RC_ERROR;
3270 
3271 	/* a sample action is only allowed as a shared action */
3272 	if (!ULP_BITMAP_ISSET(params->act_bitmap.bits,
3273 			      BNXT_ULP_ACT_BIT_SHARED))
3274 		return BNXT_TF_RC_ERROR;
3275 
3276 	/* only a ratio of 1 i.e. 100% is supported */
3277 	if (sample->ratio != 1)
3278 		return BNXT_TF_RC_ERROR;
3279 
3280 	if (!sample->actions)
3281 		return BNXT_TF_RC_ERROR;
3282 
3283 	/* parse the nested actions for a sample action */
3284 	ret = bnxt_ulp_rte_parser_act_parse(sample->actions, params);
3285 	if (ret == BNXT_TF_RC_SUCCESS)
3286 		/* Update the act_bitmap with sample */
3287 		ULP_BITMAP_SET(params->act_bitmap.bits,
3288 			       BNXT_ULP_ACT_BIT_SAMPLE);
3289 
3290 	return ret;
3291 }
3292 
3293 int32_t
3294 ulp_rte_action_hdlr_handler(const struct rte_flow_action *action_item,
3295 			   struct ulp_rte_parser_params *params)
3296 {
3297 	const struct rte_flow_action_handle *handle;
3298 	struct bnxt_ulp_shared_act_info *act_info;
3299 	uint64_t action_bitmask;
3300 	uint32_t shared_action_type;
3301 	struct ulp_rte_act_prop *act = &params->act_prop;
3302 	uint64_t tmp64;
3303 	enum bnxt_ulp_direction_type dir, handle_dir;
3304 	uint32_t act_info_entries = 0;
3305 	int32_t ret;
3306 
3307 	handle = action_item->conf;
3308 
3309 	/* Have to use the computed direction since the params->dir_attr
3310 	 * can be different (transfer, ingress, egress)
3311 	 */
3312 	dir = ULP_COMP_FLD_IDX_RD(params, BNXT_ULP_CF_IDX_DIRECTION);
3313 
3314 	/* direction of shared action must match direction of flow */
3315 	ret = bnxt_get_action_handle_direction(handle, &handle_dir);
3316 	if (unlikely(ret || dir != handle_dir)) {
3317 		BNXT_DRV_DBG(ERR, "Invalid shared handle or direction\n");
3318 		return BNXT_TF_RC_ERROR;
3319 	}
3320 
3321 	if (unlikely(bnxt_get_action_handle_type(handle, &shared_action_type))) {
3322 		BNXT_DRV_DBG(ERR, "Invalid shared handle\n");
3323 		return BNXT_TF_RC_ERROR;
3324 	}
3325 
3326 	act_info = bnxt_ulp_shared_act_info_get(&act_info_entries);
3327 	if (unlikely(shared_action_type >= act_info_entries || !act_info)) {
3328 		BNXT_DRV_DBG(ERR, "Invalid shared handle\n");
3329 		return BNXT_TF_RC_ERROR;
3330 	}
3331 
3332 	action_bitmask = act_info[shared_action_type].act_bitmask;
3333 
3334 	/* shared actions of the same type cannot be repeated */
3335 	if (unlikely(params->act_bitmap.bits & action_bitmask)) {
3336 		BNXT_DRV_DBG(ERR, "indirect actions cannot be repeated\n");
3337 		return BNXT_TF_RC_ERROR;
3338 	}
3339 
3340 	tmp64 = tfp_cpu_to_be_64((uint64_t)bnxt_get_action_handle_index(handle));
3341 
3342 	memcpy(&act->act_details[BNXT_ULP_ACT_PROP_IDX_SHARED_HANDLE],
3343 	       &tmp64, BNXT_ULP_ACT_PROP_SZ_SHARED_HANDLE);
3344 
3345 	ULP_BITMAP_SET(params->act_bitmap.bits, action_bitmask);
3346 
3347 	return BNXT_TF_RC_SUCCESS;
3348 }
3349 
3350 /* Function to handle the parsing of bnxt vendor Flow action vxlan Header. */
3351 int32_t
3352 ulp_vendor_vxlan_decap_act_handler(const struct rte_flow_action *action_item,
3353 				   struct ulp_rte_parser_params *params)
3354 {
3355 	/* Set the F1 flow header bit */
3356 	ULP_BITMAP_SET(params->hdr_bitmap.bits, BNXT_ULP_HDR_BIT_F1);
3357 	return ulp_rte_vxlan_decap_act_handler(action_item, params);
3358 }
3359 
3360 /* Function to handle the parsing of bnxt vendor Flow item vxlan Header. */
3361 int32_t
3362 ulp_rte_vendor_vxlan_decap_hdr_handler(const struct rte_flow_item *item,
3363 				       struct ulp_rte_parser_params *params)
3364 {
3365 	RTE_SET_USED(item);
3366 	/* Set the F2 flow header bit */
3367 	ULP_BITMAP_SET(params->hdr_bitmap.bits, BNXT_ULP_HDR_BIT_F2);
3368 	return ulp_rte_vxlan_decap_act_handler(NULL, params);
3369 }
3370 
3371 /* Function to handle the parsing of RTE Flow action queue. */
3372 int32_t
3373 ulp_rte_queue_act_handler(const struct rte_flow_action *action_item,
3374 			  struct ulp_rte_parser_params *param)
3375 {
3376 	const struct rte_flow_action_queue *q_info;
3377 	struct ulp_rte_act_prop *ap = &param->act_prop;
3378 
3379 	if (action_item == NULL || action_item->conf == NULL) {
3380 		BNXT_DRV_DBG(ERR, "Parse Err: invalid queue configuration\n");
3381 		return BNXT_TF_RC_ERROR;
3382 	}
3383 
3384 	q_info = action_item->conf;
3385 	/* Copy the queue into the specific action properties */
3386 	memcpy(&ap->act_details[BNXT_ULP_ACT_PROP_IDX_QUEUE_INDEX],
3387 	       &q_info->index, BNXT_ULP_ACT_PROP_SZ_QUEUE_INDEX);
3388 
3389 	/* set the queue action header bit */
3390 	ULP_BITMAP_SET(param->act_bitmap.bits, BNXT_ULP_ACT_BIT_QUEUE);
3391 
3392 	return BNXT_TF_RC_SUCCESS;
3393 }
3394 
3395 /* Function to handle the parsing of RTE Flow action meter. */
3396 int32_t
3397 ulp_rte_meter_act_handler(const struct rte_flow_action *action_item,
3398 			  struct ulp_rte_parser_params *params)
3399 {
3400 	const struct rte_flow_action_meter *meter;
3401 	struct ulp_rte_act_prop *act_prop = &params->act_prop;
3402 	uint32_t tmp_meter_id;
3403 
3404 	if (unlikely(action_item == NULL || action_item->conf == NULL)) {
3405 		BNXT_DRV_DBG(ERR, "Parse Err: invalid meter configuration\n");
3406 		return BNXT_TF_RC_ERROR;
3407 	}
3408 
3409 	meter = action_item->conf;
3410 	/* validate the mtr_id and update the reference counter */
3411 	tmp_meter_id = tfp_cpu_to_be_32(meter->mtr_id);
3412 	memcpy(&act_prop->act_details[BNXT_ULP_ACT_PROP_IDX_METER],
3413 	       &tmp_meter_id,
3414 	       BNXT_ULP_ACT_PROP_SZ_METER);
3415 
3416 	/* set the meter action header bit */
3417 	ULP_BITMAP_SET(params->act_bitmap.bits, BNXT_ULP_ACT_BIT_METER);
3418 
3419 	return BNXT_TF_RC_SUCCESS;
3420 }
3421 
3422 /* Function to handle the parsing of RTE Flow action set mac src.*/
3423 int32_t
3424 ulp_rte_set_mac_src_act_handler(const struct rte_flow_action *action_item,
3425 				struct ulp_rte_parser_params *params)
3426 {
3427 	const struct rte_flow_action_set_mac *set_mac;
3428 	struct ulp_rte_act_prop *act = &params->act_prop;
3429 
3430 	set_mac = action_item->conf;
3431 	if (likely(set_mac)) {
3432 		memcpy(&act->act_details[BNXT_ULP_ACT_PROP_IDX_SET_MAC_SRC],
3433 		       set_mac->mac_addr, BNXT_ULP_ACT_PROP_SZ_SET_MAC_SRC);
3434 		/* Update the hdr_bitmap with set mac src */
3435 		ULP_BITMAP_SET(params->act_bitmap.bits,
3436 			       BNXT_ULP_ACT_BIT_SET_MAC_SRC);
3437 		return BNXT_TF_RC_SUCCESS;
3438 	}
3439 	BNXT_DRV_DBG(ERR, "Parse Error: set mac src arg is invalid\n");
3440 	return BNXT_TF_RC_ERROR;
3441 }
3442 
3443 /* Function to handle the parsing of RTE Flow action set mac dst.*/
3444 int32_t
3445 ulp_rte_set_mac_dst_act_handler(const struct rte_flow_action *action_item,
3446 				struct ulp_rte_parser_params *params)
3447 {
3448 	const struct rte_flow_action_set_mac *set_mac;
3449 	struct ulp_rte_act_prop *act = &params->act_prop;
3450 
3451 	set_mac = action_item->conf;
3452 	if (likely(set_mac)) {
3453 		memcpy(&act->act_details[BNXT_ULP_ACT_PROP_IDX_SET_MAC_DST],
3454 		       set_mac->mac_addr, BNXT_ULP_ACT_PROP_SZ_SET_MAC_DST);
3455 		/* Update the hdr_bitmap with set ipv4 dst */
3456 		ULP_BITMAP_SET(params->act_bitmap.bits,
3457 			       BNXT_ULP_ACT_BIT_SET_MAC_DST);
3458 		return BNXT_TF_RC_SUCCESS;
3459 	}
3460 	BNXT_DRV_DBG(ERR, "Parse Error: set mac dst arg is invalid\n");
3461 	return BNXT_TF_RC_ERROR;
3462 }
3463