xref: /dpdk/drivers/net/bnxt/tf_ulp/ulp_rte_parser.c (revision d4b36fc5f0dc59b256441c82e5a9395054026496)
1 /* SPDX-License-Identifier: BSD-3-Clause
2  * Copyright(c) 2014-2023 Broadcom
3  * All rights reserved.
4  */
5 
6 #include "bnxt.h"
7 #include "ulp_template_db_enum.h"
8 #include "ulp_template_struct.h"
9 #include "bnxt_ulp.h"
10 #include "bnxt_ulp_utils.h"
11 #include "bnxt_tf_common.h"
12 #include "bnxt_tf_pmd_shim.h"
13 #include "ulp_rte_parser.h"
14 #include "ulp_matcher.h"
15 #include "ulp_utils.h"
16 #include "tfp.h"
17 #include "ulp_port_db.h"
18 #include "ulp_flow_db.h"
19 #include "ulp_mapper.h"
20 #include "ulp_tun.h"
21 #include "ulp_template_db_tbl.h"
22 
23 /* Local defines for the parsing functions */
24 #define ULP_VLAN_PRIORITY_SHIFT		13 /* First 3 bits */
25 #define ULP_VLAN_PRIORITY_MASK		0x700
26 #define ULP_VLAN_TAG_MASK		0xFFF /* Last 12 bits*/
27 #define ULP_UDP_PORT_VXLAN		4789
28 #define ULP_UDP_PORT_VXLAN_MASK		0xFFFF
29 #define ULP_UDP_PORT_VXLAN_GPE		4790
30 #define ULP_UDP_PORT_VXLAN_GPE_MASK	0xFFFF
31 #define ULP_UDP_PORT_GENEVE		6081
32 #define ULP_UDP_PORT_GENEVE_MASK	0xFFFF
33 
34 /**
35  * Geneve header first 16Bit
36  * Version (2b), length of the options fields (6b), OAM packet (1b),
37  * critical options present (1b), reserved 0 (6b).
38  */
39 #define ULP_GENEVE_OPT_MAX_SIZE 6 /* HW only supports 6 words */
40 #define ULP_GENEVE_OPTLEN_MASK 0x3F
41 #define ULP_GENEVE_OPTLEN_SHIFT 8
42 #define ULP_GENEVE_OPTLEN_VAL(a) \
43 	    (((a) >> (ULP_GENEVE_OPTLEN_SHIFT)) & (ULP_GENEVE_OPTLEN_MASK))
44 
45 /* Utility function to skip the void items. */
46 static inline int32_t
47 ulp_rte_item_skip_void(const struct rte_flow_item **item, uint32_t increment)
48 {
49 	if (!*item)
50 		return 0;
51 	if (increment)
52 		(*item)++;
53 	while ((*item) && (*item)->type == RTE_FLOW_ITEM_TYPE_VOID)
54 		(*item)++;
55 	if (*item)
56 		return 1;
57 	return 0;
58 }
59 
60 /* Utility function to copy field spec items */
61 static inline struct ulp_rte_hdr_field *
62 ulp_rte_parser_fld_copy(struct ulp_rte_hdr_field *field,
63 			const void *buffer,
64 			uint32_t size)
65 {
66 	field->size = size;
67 	memcpy(field->spec, buffer, field->size);
68 	field++;
69 	return field;
70 }
71 
72 /* Utility function to update the field_bitmap */
73 static void
74 ulp_rte_parser_field_bitmap_update(struct ulp_rte_parser_params *params,
75 				   uint32_t idx,
76 				   enum bnxt_ulp_prsr_action prsr_act)
77 {
78 	struct ulp_rte_hdr_field *field;
79 
80 	field = &params->hdr_field[idx];
81 	if (ulp_bitmap_notzero(field->mask, field->size)) {
82 		ULP_INDEX_BITMAP_SET(params->fld_bitmap.bits, idx);
83 		if (!(prsr_act & ULP_PRSR_ACT_MATCH_IGNORE))
84 			ULP_INDEX_BITMAP_SET(params->fld_s_bitmap.bits, idx);
85 		/* Not exact match */
86 		if (!ulp_bitmap_is_ones(field->mask, field->size))
87 			ULP_COMP_FLD_IDX_WR(params,
88 					    BNXT_ULP_CF_IDX_WC_MATCH, 1);
89 	} else {
90 		ULP_INDEX_BITMAP_RESET(params->fld_bitmap.bits, idx);
91 	}
92 }
93 
94 #define ulp_deference_struct(x, y) ((x) ? &((x)->y) : NULL)
95 /* Utility function to copy field spec and masks items */
96 static inline void
97 ulp_rte_prsr_fld_mask(struct ulp_rte_parser_params *params,
98 		      uint32_t *idx,
99 		      uint32_t size,
100 		      const void *spec_buff,
101 		      const void *mask_buff,
102 		      enum bnxt_ulp_prsr_action prsr_act)
103 {
104 	struct ulp_rte_hdr_field *field = &params->hdr_field[*idx];
105 
106 	/* update the field size */
107 	field->size = size;
108 
109 	/* copy the mask specifications only if mask is not null */
110 	if (!(prsr_act & ULP_PRSR_ACT_MASK_IGNORE) && mask_buff &&
111 	    spec_buff && ulp_bitmap_notzero(spec_buff, size)) {
112 		memcpy(field->mask, mask_buff, size);
113 		ulp_rte_parser_field_bitmap_update(params, *idx, prsr_act);
114 	}
115 
116 	/* copy the protocol specifications only if mask is not null*/
117 	if (spec_buff && mask_buff && ulp_bitmap_notzero(mask_buff, size))
118 		memcpy(field->spec, spec_buff, size);
119 
120 	/* Increment the index */
121 	*idx = *idx + 1;
122 }
123 
124 /* Utility function to copy field spec and masks items */
125 static inline int32_t
126 ulp_rte_prsr_fld_size_validate(struct ulp_rte_parser_params *params,
127 			       uint32_t *idx,
128 			       uint32_t size)
129 {
130 	if (unlikely(params->field_idx + size >= BNXT_ULP_PROTO_HDR_MAX)) {
131 		BNXT_DRV_DBG(ERR, "OOB for field processing %u\n", *idx);
132 		return -EINVAL;
133 	}
134 	*idx = params->field_idx;
135 	params->field_idx += size;
136 	return 0;
137 }
138 
139 /*
140  * Function to handle the parsing of RTE Flows and placing
141  * the RTE flow items into the ulp structures.
142  */
143 int32_t
144 bnxt_ulp_rte_parser_hdr_parse(const struct rte_flow_item pattern[],
145 			      struct ulp_rte_parser_params *params)
146 {
147 	const struct rte_flow_item *item = pattern;
148 	struct bnxt_ulp_rte_hdr_info *hdr_info;
149 
150 	params->field_idx = BNXT_ULP_PROTO_HDR_SVIF_NUM;
151 
152 	/* Parse all the items in the pattern */
153 	while (item && item->type != RTE_FLOW_ITEM_TYPE_END) {
154 		if (item->type >= (typeof(item->type))
155 		    BNXT_RTE_FLOW_ITEM_TYPE_END) {
156 			if (item->type >=
157 			    (typeof(item->type))BNXT_RTE_FLOW_ITEM_TYPE_LAST)
158 				goto hdr_parser_error;
159 			/* get the header information */
160 			hdr_info = &ulp_vendor_hdr_info[item->type -
161 				BNXT_RTE_FLOW_ITEM_TYPE_END];
162 		} else {
163 			if (item->type > RTE_FLOW_ITEM_TYPE_ECPRI)
164 				goto hdr_parser_error;
165 			hdr_info = &ulp_hdr_info[item->type];
166 		}
167 		if (hdr_info->hdr_type == BNXT_ULP_HDR_TYPE_NOT_SUPPORTED) {
168 			goto hdr_parser_error;
169 		} else if (hdr_info->hdr_type == BNXT_ULP_HDR_TYPE_SUPPORTED) {
170 			/* call the registered callback handler */
171 			if (hdr_info->proto_hdr_func) {
172 				if (hdr_info->proto_hdr_func(item, params) !=
173 				    BNXT_TF_RC_SUCCESS) {
174 					return BNXT_TF_RC_ERROR;
175 				}
176 			}
177 		}
178 		item++;
179 	}
180 	/* update the implied SVIF */
181 	return ulp_rte_parser_implicit_match_port_process(params);
182 
183 hdr_parser_error:
184 	BNXT_DRV_DBG(ERR, "Truflow parser does not support type %d\n",
185 		    item->type);
186 	return BNXT_TF_RC_PARSE_ERR;
187 }
188 
189 /*
190  * Function to handle the parsing of RTE Flows and placing
191  * the RTE flow actions into the ulp structures.
192  */
193 int32_t
194 bnxt_ulp_rte_parser_act_parse(const struct rte_flow_action actions[],
195 			      struct ulp_rte_parser_params *params)
196 {
197 	const struct rte_flow_action *action_item = actions;
198 	struct bnxt_ulp_rte_act_info *hdr_info;
199 
200 	/* Parse all the items in the pattern */
201 	while (action_item && action_item->type != RTE_FLOW_ACTION_TYPE_END) {
202 		if (action_item->type >=
203 		    (typeof(action_item->type))BNXT_RTE_FLOW_ACTION_TYPE_END) {
204 			if (action_item->type >=
205 			    (typeof(action_item->type))BNXT_RTE_FLOW_ACTION_TYPE_LAST)
206 				goto act_parser_error;
207 			/* get the header information from bnxt actinfo table */
208 			hdr_info = &ulp_vendor_act_info[action_item->type -
209 				BNXT_RTE_FLOW_ACTION_TYPE_END];
210 		} else {
211 			if (action_item->type > RTE_FLOW_ACTION_TYPE_INDIRECT)
212 				goto act_parser_error;
213 			/* get the header information from the act info table */
214 			hdr_info = &ulp_act_info[action_item->type];
215 		}
216 		if (hdr_info->act_type == BNXT_ULP_ACT_TYPE_NOT_SUPPORTED) {
217 			goto act_parser_error;
218 		} else if (hdr_info->act_type == BNXT_ULP_ACT_TYPE_SUPPORTED) {
219 			/* call the registered callback handler */
220 			if (hdr_info->proto_act_func) {
221 				if (hdr_info->proto_act_func(action_item,
222 							     params) !=
223 				    BNXT_TF_RC_SUCCESS) {
224 					return BNXT_TF_RC_ERROR;
225 				}
226 			}
227 		}
228 		action_item++;
229 	}
230 	/* update the implied port details */
231 	ulp_rte_parser_implicit_act_port_process(params);
232 	return BNXT_TF_RC_SUCCESS;
233 
234 act_parser_error:
235 	BNXT_DRV_DBG(ERR, "Truflow parser does not support act %u\n",
236 		    action_item->type);
237 	return BNXT_TF_RC_ERROR;
238 }
239 
240 /*
241  * Function to handle the post processing of the computed
242  * fields for the interface.
243  */
244 static void
245 bnxt_ulp_comp_fld_intf_update(struct ulp_rte_parser_params *params)
246 {
247 	uint32_t ifindex;
248 	uint16_t port_id, parif, svif;
249 	uint32_t mtype;
250 	enum bnxt_ulp_direction_type dir;
251 
252 	/* get the direction details */
253 	dir = ULP_COMP_FLD_IDX_RD(params, BNXT_ULP_CF_IDX_DIRECTION);
254 
255 	/* read the port id details */
256 	port_id = ULP_COMP_FLD_IDX_RD(params, BNXT_ULP_CF_IDX_INCOMING_IF);
257 	if (ulp_port_db_dev_port_to_ulp_index(params->ulp_ctx,
258 					      port_id,
259 					      &ifindex)) {
260 		BNXT_DRV_DBG(ERR, "ParseErr:Portid is not valid\n");
261 		return;
262 	}
263 
264 	if (dir == BNXT_ULP_DIR_INGRESS) {
265 		/* Set port PARIF */
266 		if (ulp_port_db_parif_get(params->ulp_ctx, ifindex,
267 					  BNXT_ULP_DRV_FUNC_PARIF, &parif)) {
268 			BNXT_DRV_DBG(ERR, "ParseErr:ifindex is not valid\n");
269 			return;
270 		}
271 		/* Note:
272 		 * We save the drv_func_parif into CF_IDX of phy_port_parif,
273 		 * since that index is currently referenced by ingress templates
274 		 * for datapath flows. If in the future we change the parser to
275 		 * save it in the CF_IDX of drv_func_parif we also need to update
276 		 * the template.
277 		 * WARNING: Two VFs on same parent PF will not work, as the parif is
278 		 * based on fw fid of the parent PF.
279 		 */
280 		ULP_COMP_FLD_IDX_WR(params, BNXT_ULP_CF_IDX_PHY_PORT_PARIF,
281 				    parif);
282 		/* Set port SVIF */
283 		if (ulp_port_db_svif_get(params->ulp_ctx, ifindex,
284 					  BNXT_ULP_PHY_PORT_SVIF, &svif)) {
285 			BNXT_DRV_DBG(ERR, "ParseErr:ifindex is not valid\n");
286 			return;
287 		}
288 		ULP_COMP_FLD_IDX_WR(params, BNXT_ULP_CF_IDX_PHY_PORT_SVIF,
289 				    svif);
290 	} else {
291 		/* Get the match port type */
292 		mtype = ULP_COMP_FLD_IDX_RD(params,
293 					    BNXT_ULP_CF_IDX_MATCH_PORT_TYPE);
294 		if (mtype == BNXT_ULP_INTF_TYPE_VF_REP) {
295 			ULP_COMP_FLD_IDX_WR(params,
296 					    BNXT_ULP_CF_IDX_MATCH_PORT_IS_VFREP,
297 					    1);
298 			/* Set VF func PARIF */
299 			if (ulp_port_db_parif_get(params->ulp_ctx, ifindex,
300 						  BNXT_ULP_VF_FUNC_PARIF,
301 						  &parif)) {
302 				BNXT_DRV_DBG(ERR,
303 					    "ParseErr:ifindex is not valid\n");
304 				return;
305 			}
306 			ULP_COMP_FLD_IDX_WR(params,
307 					    BNXT_ULP_CF_IDX_VF_FUNC_PARIF,
308 					    parif);
309 
310 			/* Set VF func SVIF */
311 			if (ulp_port_db_svif_get(params->ulp_ctx, ifindex,
312 						 BNXT_ULP_CF_IDX_VF_FUNC_SVIF, &svif)) {
313 				BNXT_DRV_DBG(ERR, "ParseErr:ifindex is not valid\n");
314 				return;
315 			}
316 			ULP_COMP_FLD_IDX_WR(params, BNXT_ULP_CF_IDX_VF_FUNC_SVIF,
317 					    svif);
318 		} else {
319 			/* Set DRV func PARIF */
320 			if (ulp_port_db_parif_get(params->ulp_ctx, ifindex,
321 						  BNXT_ULP_DRV_FUNC_PARIF,
322 						  &parif)) {
323 				BNXT_DRV_DBG(ERR,
324 					    "ParseErr:ifindex is not valid\n");
325 				return;
326 			}
327 			ULP_COMP_FLD_IDX_WR(params,
328 					    BNXT_ULP_CF_IDX_DRV_FUNC_PARIF,
329 					    parif);
330 
331 			/* Set DRV SVIF */
332 			if (ulp_port_db_svif_get(params->ulp_ctx, ifindex,
333 						 BNXT_ULP_DRV_FUNC_SVIF, &svif)) {
334 				BNXT_DRV_DBG(ERR, "ParseErr:ifindex is not valid\n");
335 				return;
336 			}
337 			ULP_COMP_FLD_IDX_WR(params, BNXT_ULP_CF_IDX_DRV_FUNC_SVIF,
338 					    svif);
339 		}
340 		if (mtype == BNXT_ULP_INTF_TYPE_PF) {
341 			ULP_COMP_FLD_IDX_WR(params,
342 					    BNXT_ULP_CF_IDX_MATCH_PORT_IS_PF,
343 					    1);
344 		}
345 	}
346 }
347 
348 static int32_t
349 ulp_post_process_normal_flow(struct ulp_rte_parser_params *params)
350 {
351 	enum bnxt_ulp_intf_type match_port_type, act_port_type;
352 	enum bnxt_ulp_direction_type dir;
353 	uint32_t act_port_set;
354 
355 	/* Get the computed details */
356 	dir = ULP_COMP_FLD_IDX_RD(params, BNXT_ULP_CF_IDX_DIRECTION);
357 	match_port_type = ULP_COMP_FLD_IDX_RD(params,
358 					      BNXT_ULP_CF_IDX_MATCH_PORT_TYPE);
359 	act_port_type = ULP_COMP_FLD_IDX_RD(params,
360 					    BNXT_ULP_CF_IDX_ACT_PORT_TYPE);
361 	act_port_set = ULP_COMP_FLD_IDX_RD(params,
362 					   BNXT_ULP_CF_IDX_ACT_PORT_IS_SET);
363 
364 	/* set the flow direction in the proto and action header */
365 	if (dir == BNXT_ULP_DIR_EGRESS) {
366 		ULP_BITMAP_SET(params->hdr_bitmap.bits,
367 			       BNXT_ULP_FLOW_DIR_BITMASK_EGR);
368 		ULP_BITMAP_SET(params->act_bitmap.bits,
369 			       BNXT_ULP_FLOW_DIR_BITMASK_EGR);
370 	} else {
371 		ULP_BITMAP_SET(params->hdr_bitmap.bits,
372 			       BNXT_ULP_FLOW_DIR_BITMASK_ING);
373 		ULP_BITMAP_SET(params->act_bitmap.bits,
374 			       BNXT_ULP_FLOW_DIR_BITMASK_ING);
375 	}
376 
377 	/* Evaluate the VF to VF flag */
378 	if (act_port_set && act_port_type == BNXT_ULP_INTF_TYPE_VF_REP &&
379 	     match_port_type == BNXT_ULP_INTF_TYPE_VF_REP) {
380 		if (!ULP_BITMAP_ISSET(params->act_bitmap.bits,
381 				      BNXT_ULP_ACT_BIT_MULTIPLE_PORT)) {
382 			ULP_BITMAP_SET(params->act_bitmap.bits,
383 				       BNXT_ULP_ACT_BIT_VF_TO_VF);
384 		} else {
385 			if (ULP_COMP_FLD_IDX_RD(params, BNXT_ULP_CF_IDX_MP_A_IS_VFREP) &&
386 			    ULP_COMP_FLD_IDX_RD(params, BNXT_ULP_CF_IDX_MP_B_IS_VFREP))
387 				ULP_BITMAP_SET(params->act_bitmap.bits,
388 					       BNXT_ULP_ACT_BIT_VF_TO_VF);
389 			else
390 				ULP_BITMAP_RESET(params->act_bitmap.bits,
391 						 BNXT_ULP_ACT_BIT_VF_TO_VF);
392 		}
393 	}
394 
395 	/* Update the decrement ttl computational fields */
396 	if (ULP_BITMAP_ISSET(params->act_bitmap.bits,
397 			     BNXT_ULP_ACT_BIT_DEC_TTL)) {
398 		/*
399 		 * Check that vxlan proto is included and vxlan decap
400 		 * action is not set then decrement tunnel ttl.
401 		 * Similarly add GRE and NVGRE in future.
402 		 */
403 		if ((ULP_BITMAP_ISSET(params->hdr_bitmap.bits,
404 				      BNXT_ULP_HDR_BIT_T_VXLAN) &&
405 		    !ULP_BITMAP_ISSET(params->act_bitmap.bits,
406 				      BNXT_ULP_ACT_BIT_VXLAN_DECAP))) {
407 			ULP_COMP_FLD_IDX_WR(params,
408 					    BNXT_ULP_CF_IDX_ACT_T_DEC_TTL, 1);
409 		} else {
410 			ULP_COMP_FLD_IDX_WR(params,
411 					    BNXT_ULP_CF_IDX_ACT_DEC_TTL, 1);
412 		}
413 	}
414 
415 	/* Merge the hdr_fp_bit into the proto header bit */
416 	params->hdr_bitmap.bits |= params->hdr_fp_bit.bits;
417 
418 	/* Update the comp fld fid */
419 	ULP_COMP_FLD_IDX_WR(params, BNXT_ULP_CF_IDX_FID, params->fid);
420 
421 	/* set the L2 context usage shall change it later */
422 	ULP_BITMAP_SET(params->cf_bitmap, BNXT_ULP_CF_BIT_L2_CNTXT_ID);
423 
424 	/* Update the computed interface parameters */
425 	bnxt_ulp_comp_fld_intf_update(params);
426 
427 	/* TBD: Handle the flow rejection scenarios */
428 	return 0;
429 }
430 
431 /*
432  * Function to handle the post processing of the parsing details
433  */
434 void
435 bnxt_ulp_rte_parser_post_process(struct ulp_rte_parser_params *params)
436 {
437 	ulp_post_process_normal_flow(params);
438 }
439 
440 /*
441  * Function to compute the flow direction based on the match port details
442  */
443 static void
444 bnxt_ulp_rte_parser_direction_compute(struct ulp_rte_parser_params *params)
445 {
446 	enum bnxt_ulp_intf_type match_port_type;
447 
448 	/* Get the match port type */
449 	match_port_type = ULP_COMP_FLD_IDX_RD(params,
450 					      BNXT_ULP_CF_IDX_MATCH_PORT_TYPE);
451 
452 	/* If ingress flow and matchport is vf rep then dir is egress*/
453 	if ((params->dir_attr & BNXT_ULP_FLOW_ATTR_INGRESS) &&
454 	    match_port_type == BNXT_ULP_INTF_TYPE_VF_REP) {
455 		ULP_COMP_FLD_IDX_WR(params, BNXT_ULP_CF_IDX_DIRECTION,
456 				    BNXT_ULP_DIR_EGRESS);
457 	} else {
458 		/* Assign the input direction */
459 		if (params->dir_attr & BNXT_ULP_FLOW_ATTR_INGRESS)
460 			ULP_COMP_FLD_IDX_WR(params, BNXT_ULP_CF_IDX_DIRECTION,
461 					    BNXT_ULP_DIR_INGRESS);
462 		else if (params->dir_attr & BNXT_ULP_FLOW_ATTR_EGRESS)
463 			ULP_COMP_FLD_IDX_WR(params, BNXT_ULP_CF_IDX_DIRECTION,
464 					    BNXT_ULP_DIR_EGRESS);
465 		else if (match_port_type == BNXT_ULP_INTF_TYPE_VF_REP)
466 			ULP_COMP_FLD_IDX_WR(params, BNXT_ULP_CF_IDX_DIRECTION,
467 					    BNXT_ULP_DIR_EGRESS);
468 		else
469 			ULP_COMP_FLD_IDX_WR(params, BNXT_ULP_CF_IDX_DIRECTION,
470 					    BNXT_ULP_DIR_INGRESS);
471 	}
472 }
473 
474 /* Function to handle the parsing of RTE Flow item PF Header. */
475 static int32_t
476 ulp_rte_parser_svif_set(struct ulp_rte_parser_params *params,
477 			uint32_t ifindex,
478 			uint16_t mask,
479 			enum bnxt_ulp_direction_type item_dir)
480 {
481 	uint16_t svif;
482 	enum bnxt_ulp_direction_type dir;
483 	struct ulp_rte_hdr_field *hdr_field;
484 	enum bnxt_ulp_svif_type svif_type;
485 	enum bnxt_ulp_intf_type port_type;
486 
487 	if (ULP_COMP_FLD_IDX_RD(params, BNXT_ULP_CF_IDX_SVIF_FLAG) !=
488 	    BNXT_ULP_INVALID_SVIF_VAL) {
489 		BNXT_DRV_DBG(ERR,
490 			    "SVIF already set,multiple source not support'd\n");
491 		return BNXT_TF_RC_ERROR;
492 	}
493 
494 	/* Get port type details */
495 	port_type = ulp_port_db_port_type_get(params->ulp_ctx, ifindex);
496 	if (port_type == BNXT_ULP_INTF_TYPE_INVALID) {
497 		BNXT_DRV_DBG(ERR, "Invalid port type\n");
498 		return BNXT_TF_RC_ERROR;
499 	}
500 
501 	/* Update the match port type */
502 	ULP_COMP_FLD_IDX_WR(params, BNXT_ULP_CF_IDX_MATCH_PORT_TYPE, port_type);
503 
504 	/* compute the direction */
505 	bnxt_ulp_rte_parser_direction_compute(params);
506 
507 	/* Get the computed direction */
508 	dir = (item_dir != BNXT_ULP_DIR_INVALID) ? item_dir :
509 		ULP_COMP_FLD_IDX_RD(params, BNXT_ULP_CF_IDX_DIRECTION);
510 	if (dir == BNXT_ULP_DIR_INGRESS &&
511 	    port_type != BNXT_ULP_INTF_TYPE_VF_REP) {
512 		svif_type = BNXT_ULP_PHY_PORT_SVIF;
513 	} else {
514 		if (port_type == BNXT_ULP_INTF_TYPE_VF_REP &&
515 		    item_dir != BNXT_ULP_DIR_EGRESS)
516 			svif_type = BNXT_ULP_VF_FUNC_SVIF;
517 		else
518 			svif_type = BNXT_ULP_DRV_FUNC_SVIF;
519 	}
520 	ulp_port_db_svif_get(params->ulp_ctx, ifindex, svif_type, &svif);
521 	svif = rte_cpu_to_be_16(svif);
522 	mask = rte_cpu_to_be_16(mask);
523 	hdr_field = &params->hdr_field[BNXT_ULP_PROTO_HDR_FIELD_SVIF_IDX];
524 	memcpy(hdr_field->spec, &svif, sizeof(svif));
525 	memcpy(hdr_field->mask, &mask, sizeof(mask));
526 	hdr_field->size = sizeof(svif);
527 	ULP_COMP_FLD_IDX_WR(params, BNXT_ULP_CF_IDX_SVIF_FLAG,
528 			    rte_be_to_cpu_16(svif));
529 	return BNXT_TF_RC_SUCCESS;
530 }
531 
532 /* Function to handle the parsing of the RTE port id */
533 int32_t
534 ulp_rte_parser_implicit_match_port_process(struct ulp_rte_parser_params *params)
535 {
536 	uint16_t port_id = 0;
537 	uint16_t svif_mask = 0xFFFF;
538 	uint32_t ifindex;
539 	int32_t rc = BNXT_TF_RC_ERROR;
540 
541 	if (ULP_COMP_FLD_IDX_RD(params, BNXT_ULP_CF_IDX_SVIF_FLAG) !=
542 	    BNXT_ULP_INVALID_SVIF_VAL)
543 		return BNXT_TF_RC_SUCCESS;
544 
545 	/* SVIF not set. So get the port id */
546 	port_id = ULP_COMP_FLD_IDX_RD(params, BNXT_ULP_CF_IDX_INCOMING_IF);
547 
548 	if (ulp_port_db_dev_port_to_ulp_index(params->ulp_ctx,
549 					      port_id,
550 					      &ifindex)) {
551 		BNXT_DRV_DBG(ERR, "ParseErr:Portid is not valid\n");
552 		return rc;
553 	}
554 
555 	/* Update the SVIF details */
556 	rc = ulp_rte_parser_svif_set(params, ifindex, svif_mask,
557 				     BNXT_ULP_DIR_INVALID);
558 	return rc;
559 }
560 
561 /* Function to handle the implicit action port id */
562 int32_t
563 ulp_rte_parser_implicit_act_port_process(struct ulp_rte_parser_params *params)
564 {
565 	struct rte_flow_action action_item = {0};
566 	struct rte_flow_action_port_id port_id = {0};
567 
568 	/* Read the action port set bit */
569 	if (ULP_COMP_FLD_IDX_RD(params, BNXT_ULP_CF_IDX_ACT_PORT_IS_SET)) {
570 		/* Already set, so just exit */
571 		return BNXT_TF_RC_SUCCESS;
572 	}
573 	port_id.id = ULP_COMP_FLD_IDX_RD(params, BNXT_ULP_CF_IDX_INCOMING_IF);
574 	action_item.type = RTE_FLOW_ACTION_TYPE_PORT_ID;
575 	action_item.conf = &port_id;
576 
577 	/* Update the action port based on incoming port */
578 	ulp_rte_port_act_handler(&action_item, params);
579 
580 	/* Reset the action port set bit */
581 	ULP_COMP_FLD_IDX_WR(params, BNXT_ULP_CF_IDX_ACT_PORT_IS_SET, 0);
582 	return BNXT_TF_RC_SUCCESS;
583 }
584 
585 /* Parse items PORT_ID, PORT_REPRESENTOR and REPRESENTED_PORT. */
586 int32_t
587 ulp_rte_port_hdr_handler(const struct rte_flow_item *item,
588 			 struct ulp_rte_parser_params *params)
589 {
590 	enum bnxt_ulp_direction_type item_dir;
591 	uint16_t ethdev_id;
592 	uint16_t mask = 0;
593 	uint32_t ifindex;
594 	int32_t rc = BNXT_TF_RC_PARSE_ERR;
595 
596 	if (!item->spec) {
597 		BNXT_DRV_DBG(ERR, "ParseErr:Port spec is not valid\n");
598 		return rc;
599 	}
600 	if (!item->mask) {
601 		BNXT_DRV_DBG(ERR, "ParseErr:Port mask is not valid\n");
602 		return rc;
603 	}
604 
605 	switch (item->type) {
606 	case RTE_FLOW_ITEM_TYPE_PORT_ID: {
607 		const struct rte_flow_item_port_id *port_spec = item->spec;
608 		const struct rte_flow_item_port_id *port_mask = item->mask;
609 
610 		item_dir = BNXT_ULP_DIR_INVALID;
611 		ethdev_id = port_spec->id;
612 		mask = port_mask->id;
613 
614 		if (!port_mask->id) {
615 			ULP_BITMAP_SET(params->hdr_bitmap.bits, BNXT_ULP_HDR_BIT_SVIF_IGNORE);
616 		}
617 		break;
618 	}
619 	case RTE_FLOW_ITEM_TYPE_PORT_REPRESENTOR: {
620 		const struct rte_flow_item_ethdev *ethdev_spec = item->spec;
621 		const struct rte_flow_item_ethdev *ethdev_mask = item->mask;
622 
623 		item_dir = BNXT_ULP_DIR_INGRESS;
624 		ethdev_id = ethdev_spec->port_id;
625 		mask = ethdev_mask->port_id;
626 		break;
627 	}
628 	case RTE_FLOW_ITEM_TYPE_REPRESENTED_PORT: {
629 		const struct rte_flow_item_ethdev *ethdev_spec = item->spec;
630 		const struct rte_flow_item_ethdev *ethdev_mask = item->mask;
631 
632 		item_dir = BNXT_ULP_DIR_EGRESS;
633 		ethdev_id = ethdev_spec->port_id;
634 		mask = ethdev_mask->port_id;
635 		break;
636 	}
637 	default:
638 		BNXT_DRV_DBG(ERR, "ParseErr:Unexpected item\n");
639 		return rc;
640 	}
641 
642 	/* perform the conversion from dpdk port to bnxt ifindex */
643 	if (ulp_port_db_dev_port_to_ulp_index(params->ulp_ctx,
644 					      ethdev_id,
645 					      &ifindex)) {
646 		BNXT_DRV_DBG(ERR, "ParseErr:Portid is not valid\n");
647 		return rc;
648 	}
649 	/* Update the SVIF details */
650 	return ulp_rte_parser_svif_set(params, ifindex, mask, item_dir);
651 }
652 
653 /* Function to handle the update of proto header based on field values */
654 static void
655 ulp_rte_l2_proto_type_update(struct ulp_rte_parser_params *param,
656 			     uint16_t type, uint32_t in_flag,
657 			     uint32_t has_vlan, uint32_t has_vlan_mask)
658 {
659 #define ULP_RTE_ETHER_TYPE_ROE	0xfc3d
660 
661 	if (type == tfp_cpu_to_be_16(RTE_ETHER_TYPE_IPV4)) {
662 		if (in_flag) {
663 			ULP_BITMAP_SET(param->hdr_fp_bit.bits,
664 				       BNXT_ULP_HDR_BIT_I_IPV4);
665 			ULP_COMP_FLD_IDX_WR(param, BNXT_ULP_CF_IDX_I_L3, 1);
666 		} else {
667 			ULP_BITMAP_SET(param->hdr_fp_bit.bits,
668 				       BNXT_ULP_HDR_BIT_O_IPV4);
669 			ULP_COMP_FLD_IDX_WR(param, BNXT_ULP_CF_IDX_O_L3, 1);
670 		}
671 	} else if (type == tfp_cpu_to_be_16(RTE_ETHER_TYPE_IPV6)) {
672 		if (in_flag) {
673 			ULP_BITMAP_SET(param->hdr_fp_bit.bits,
674 				       BNXT_ULP_HDR_BIT_I_IPV6);
675 			ULP_COMP_FLD_IDX_WR(param, BNXT_ULP_CF_IDX_I_L3, 1);
676 		} else {
677 			ULP_BITMAP_SET(param->hdr_fp_bit.bits,
678 				       BNXT_ULP_HDR_BIT_O_IPV6);
679 			ULP_COMP_FLD_IDX_WR(param, BNXT_ULP_CF_IDX_O_L3, 1);
680 		}
681 	} else if (type == tfp_cpu_to_be_16(RTE_ETHER_TYPE_VLAN)) {
682 		has_vlan_mask = 1;
683 		has_vlan = 1;
684 	} else if (type == tfp_cpu_to_be_16(RTE_ETHER_TYPE_ECPRI)) {
685 		/* Update the hdr_bitmap with eCPRI */
686 		ULP_BITMAP_SET(param->hdr_fp_bit.bits,
687 				BNXT_ULP_HDR_BIT_O_ECPRI);
688 	} else if (type == tfp_cpu_to_be_16(ULP_RTE_ETHER_TYPE_ROE)) {
689 		/* Update the hdr_bitmap with RoE */
690 		ULP_BITMAP_SET(param->hdr_fp_bit.bits,
691 				BNXT_ULP_HDR_BIT_O_ROE);
692 	}
693 
694 	if (has_vlan_mask) {
695 		if (in_flag) {
696 			ULP_COMP_FLD_IDX_WR(param, BNXT_ULP_CF_IDX_I_HAS_VTAG,
697 					    has_vlan);
698 			ULP_COMP_FLD_IDX_WR(param,
699 					    BNXT_ULP_CF_IDX_I_VLAN_NO_IGNORE,
700 					    1);
701 		} else {
702 			ULP_COMP_FLD_IDX_WR(param, BNXT_ULP_CF_IDX_O_HAS_VTAG,
703 					    has_vlan);
704 			ULP_COMP_FLD_IDX_WR(param,
705 					    BNXT_ULP_CF_IDX_O_VLAN_NO_IGNORE,
706 					    1);
707 		}
708 	}
709 }
710 
711 /* Internal Function to identify broadcast or multicast packets */
712 static int32_t
713 ulp_rte_parser_is_bcmc_addr(const struct rte_ether_addr *eth_addr)
714 {
715 	if (rte_is_multicast_ether_addr(eth_addr) ||
716 	    rte_is_broadcast_ether_addr(eth_addr)) {
717 		BNXT_DRV_DBG(DEBUG,
718 			    "No support for bcast or mcast addr offload\n");
719 		return 1;
720 	}
721 	return 0;
722 }
723 
724 /* Function to handle the parsing of RTE Flow item Ethernet Header. */
725 int32_t
726 ulp_rte_eth_hdr_handler(const struct rte_flow_item *item,
727 			struct ulp_rte_parser_params *params)
728 {
729 	const struct rte_flow_item_eth *eth_spec = item->spec;
730 	const struct rte_flow_item_eth *eth_mask = item->mask;
731 	uint32_t idx = 0, dmac_idx = 0;
732 	uint32_t size;
733 	uint16_t eth_type = 0;
734 	uint32_t inner_flag = 0;
735 	uint32_t has_vlan = 0, has_vlan_mask = 0;
736 
737 	/* Perform validations */
738 	if (eth_spec) {
739 		/* Avoid multicast and broadcast addr */
740 		if (!ULP_APP_BC_MC_SUPPORT(params->ulp_ctx) &&
741 		    ulp_rte_parser_is_bcmc_addr(&eth_spec->hdr.dst_addr))
742 			return BNXT_TF_RC_PARSE_ERR;
743 
744 		if (!ULP_APP_BC_MC_SUPPORT(params->ulp_ctx) &&
745 		    ulp_rte_parser_is_bcmc_addr(&eth_spec->hdr.src_addr))
746 			return BNXT_TF_RC_PARSE_ERR;
747 
748 		eth_type = eth_spec->hdr.ether_type;
749 		has_vlan = eth_spec->has_vlan;
750 	}
751 
752 	/* If mask is not specified then use the default mask */
753 	if (eth_spec && !eth_mask)
754 		eth_mask = &rte_flow_item_eth_mask;
755 
756 	if (eth_mask) {
757 		eth_type &= eth_mask->type;
758 		has_vlan_mask = eth_mask->has_vlan;
759 	}
760 
761 	if (unlikely(ulp_rte_prsr_fld_size_validate(params, &idx,
762 						    BNXT_ULP_PROTO_HDR_ETH_NUM))) {
763 		BNXT_DRV_DBG(ERR, "Error parsing protocol header\n");
764 		return BNXT_TF_RC_ERROR;
765 	}
766 	/*
767 	 * Copy the rte_flow_item for eth into hdr_field using ethernet
768 	 * header fields
769 	 */
770 	dmac_idx = idx;
771 	size = sizeof(((struct rte_flow_item_eth *)NULL)->hdr.dst_addr.addr_bytes);
772 	ulp_rte_prsr_fld_mask(params, &idx, size,
773 			      ulp_deference_struct(eth_spec, hdr.dst_addr.addr_bytes),
774 			      ulp_deference_struct(eth_mask, hdr.dst_addr.addr_bytes),
775 			      ULP_PRSR_ACT_DEFAULT);
776 
777 	size = sizeof(((struct rte_flow_item_eth *)NULL)->hdr.src_addr.addr_bytes);
778 	ulp_rte_prsr_fld_mask(params, &idx, size,
779 			      ulp_deference_struct(eth_spec, hdr.src_addr.addr_bytes),
780 			      ulp_deference_struct(eth_mask, hdr.src_addr.addr_bytes),
781 			      ULP_PRSR_ACT_DEFAULT);
782 
783 	size = sizeof(((struct rte_flow_item_eth *)NULL)->hdr.ether_type);
784 	ulp_rte_prsr_fld_mask(params, &idx, size,
785 			      ulp_deference_struct(eth_spec, hdr.ether_type),
786 			      ulp_deference_struct(eth_mask, hdr.ether_type),
787 			      (ULP_APP_TOS_PROTO_SUPPORT(params->ulp_ctx)) ?
788 			      ULP_PRSR_ACT_DEFAULT : ULP_PRSR_ACT_MATCH_IGNORE);
789 
790 	/* Update the protocol hdr bitmap */
791 	if (ULP_BITMAP_ISSET(params->hdr_bitmap.bits,
792 			     BNXT_ULP_HDR_BIT_O_ETH) ||
793 	    ULP_BITMAP_ISSET(params->hdr_bitmap.bits,
794 			     BNXT_ULP_HDR_BIT_O_IPV4) ||
795 	    ULP_BITMAP_ISSET(params->hdr_bitmap.bits,
796 			     BNXT_ULP_HDR_BIT_O_IPV6) ||
797 	    ULP_BITMAP_ISSET(params->hdr_bitmap.bits,
798 			     BNXT_ULP_HDR_BIT_O_UDP) ||
799 	    ULP_BITMAP_ISSET(params->hdr_bitmap.bits,
800 			     BNXT_ULP_HDR_BIT_O_TCP)) {
801 		ULP_BITMAP_SET(params->hdr_bitmap.bits, BNXT_ULP_HDR_BIT_I_ETH);
802 		inner_flag = 1;
803 	} else {
804 		ULP_BITMAP_SET(params->hdr_bitmap.bits, BNXT_ULP_HDR_BIT_O_ETH);
805 		ULP_COMP_FLD_IDX_WR(params, BNXT_ULP_CF_IDX_TUN_OFF_DMAC_ID,
806 				    dmac_idx);
807 	}
808 	/* Update the field protocol hdr bitmap */
809 	ulp_rte_l2_proto_type_update(params, eth_type, inner_flag,
810 				     has_vlan, has_vlan_mask);
811 
812 	return BNXT_TF_RC_SUCCESS;
813 }
814 
815 /* Function to handle the parsing of RTE Flow item Vlan Header. */
816 int32_t
817 ulp_rte_vlan_hdr_handler(const struct rte_flow_item *item,
818 			 struct ulp_rte_parser_params *params)
819 {
820 	const struct rte_flow_item_vlan *vlan_spec = item->spec;
821 	const struct rte_flow_item_vlan *vlan_mask = item->mask;
822 	struct ulp_rte_hdr_bitmap	*hdr_bit;
823 	uint32_t idx = 0;
824 	uint16_t vlan_tag = 0, priority = 0;
825 	uint16_t vlan_tag_mask = 0, priority_mask = 0;
826 	uint32_t outer_vtag_num;
827 	uint32_t inner_vtag_num;
828 	uint16_t eth_type = 0;
829 	uint32_t inner_flag = 0;
830 	uint32_t size;
831 
832 	if (vlan_spec) {
833 		vlan_tag = ntohs(vlan_spec->hdr.vlan_tci);
834 		priority = htons(vlan_tag >> ULP_VLAN_PRIORITY_SHIFT);
835 		vlan_tag &= ULP_VLAN_TAG_MASK;
836 		vlan_tag = htons(vlan_tag);
837 		eth_type = vlan_spec->hdr.eth_proto;
838 	}
839 
840 	/* assign default vlan mask if spec is valid and mask is not */
841 	if (vlan_spec && !vlan_mask)
842 		vlan_mask = &rte_flow_item_vlan_mask;
843 
844 	if (vlan_mask) {
845 		vlan_tag_mask = ntohs(vlan_mask->tci);
846 		priority_mask = htons(vlan_tag_mask >> ULP_VLAN_PRIORITY_SHIFT);
847 		vlan_tag_mask &= 0xfff;
848 		/*
849 		 * the storage for priority and vlan tag is 2 bytes
850 		 * The mask of priority which is 3 bits if it is all 1's
851 		 * then make the rest bits 13 bits as 1's
852 		 * so that it is matched as exact match.
853 		 */
854 		if (priority_mask == ULP_VLAN_PRIORITY_MASK)
855 			priority_mask |= ~ULP_VLAN_PRIORITY_MASK;
856 		if (vlan_tag_mask == ULP_VLAN_TAG_MASK)
857 			vlan_tag_mask |= ~ULP_VLAN_TAG_MASK;
858 		vlan_tag_mask = htons(vlan_tag_mask);
859 	}
860 
861 	if (unlikely(ulp_rte_prsr_fld_size_validate(params, &idx,
862 						    BNXT_ULP_PROTO_HDR_S_VLAN_NUM))) {
863 		BNXT_DRV_DBG(ERR, "Error parsing protocol header\n");
864 		return BNXT_TF_RC_ERROR;
865 	}
866 
867 	/*
868 	 * Copy the rte_flow_item for vlan into hdr_field using Vlan
869 	 * header fields
870 	 */
871 	size = sizeof(((struct rte_flow_item_vlan *)NULL)->hdr.vlan_tci);
872 	/*
873 	 * The priority field is ignored since OVS is setting it as
874 	 * wild card match and it is not supported. This is a work
875 	 * around and shall be addressed in the future.
876 	 */
877 	ulp_rte_prsr_fld_mask(params, &idx, size,
878 			      &priority,
879 			      (vlan_mask) ? &priority_mask : NULL,
880 			      ULP_PRSR_ACT_MASK_IGNORE);
881 
882 	ulp_rte_prsr_fld_mask(params, &idx, size,
883 			      &vlan_tag,
884 			      (vlan_mask) ? &vlan_tag_mask : NULL,
885 			      ULP_PRSR_ACT_DEFAULT);
886 
887 	size = sizeof(((struct rte_flow_item_vlan *)NULL)->hdr.eth_proto);
888 	ulp_rte_prsr_fld_mask(params, &idx, size,
889 			      ulp_deference_struct(vlan_spec, hdr.eth_proto),
890 			      ulp_deference_struct(vlan_mask, hdr.eth_proto),
891 			      ULP_PRSR_ACT_MATCH_IGNORE);
892 
893 	/* Get the outer tag and inner tag counts */
894 	outer_vtag_num = ULP_COMP_FLD_IDX_RD(params,
895 					     BNXT_ULP_CF_IDX_O_VTAG_NUM);
896 	inner_vtag_num = ULP_COMP_FLD_IDX_RD(params,
897 					     BNXT_ULP_CF_IDX_I_VTAG_NUM);
898 
899 	/* Update the hdr_bitmap of the vlans */
900 	hdr_bit = &params->hdr_bitmap;
901 	if (ULP_BITMAP_ISSET(hdr_bit->bits, BNXT_ULP_HDR_BIT_O_ETH) &&
902 	    !ULP_BITMAP_ISSET(hdr_bit->bits, BNXT_ULP_HDR_BIT_I_ETH) &&
903 	    !outer_vtag_num) {
904 		/* Update the vlan tag num */
905 		outer_vtag_num++;
906 		ULP_COMP_FLD_IDX_WR(params, BNXT_ULP_CF_IDX_O_VTAG_NUM,
907 				    outer_vtag_num);
908 		ULP_COMP_FLD_IDX_WR(params, BNXT_ULP_CF_IDX_O_HAS_VTAG, 1);
909 		ULP_COMP_FLD_IDX_WR(params, BNXT_ULP_CF_IDX_O_ONE_VTAG, 1);
910 		ULP_BITMAP_SET(params->hdr_bitmap.bits,
911 			       BNXT_ULP_HDR_BIT_OO_VLAN);
912 		if (vlan_mask && vlan_tag_mask)
913 			ULP_COMP_FLD_IDX_WR(params,
914 					    BNXT_ULP_CF_IDX_OO_VLAN_FB_VID, 1);
915 
916 	} else if (ULP_BITMAP_ISSET(hdr_bit->bits, BNXT_ULP_HDR_BIT_O_ETH) &&
917 		   !ULP_BITMAP_ISSET(hdr_bit->bits, BNXT_ULP_HDR_BIT_I_ETH) &&
918 		   outer_vtag_num == 1) {
919 		/* update the vlan tag num */
920 		outer_vtag_num++;
921 		ULP_COMP_FLD_IDX_WR(params, BNXT_ULP_CF_IDX_O_VTAG_NUM,
922 				    outer_vtag_num);
923 		ULP_COMP_FLD_IDX_WR(params, BNXT_ULP_CF_IDX_O_TWO_VTAGS, 1);
924 		ULP_COMP_FLD_IDX_WR(params, BNXT_ULP_CF_IDX_O_ONE_VTAG, 0);
925 		ULP_BITMAP_SET(params->hdr_bitmap.bits,
926 			       BNXT_ULP_HDR_BIT_OI_VLAN);
927 		if (vlan_mask && vlan_tag_mask)
928 			ULP_COMP_FLD_IDX_WR(params,
929 					    BNXT_ULP_CF_IDX_OI_VLAN_FB_VID, 1);
930 
931 	} else if (ULP_BITMAP_ISSET(hdr_bit->bits, BNXT_ULP_HDR_BIT_O_ETH) &&
932 		   ULP_BITMAP_ISSET(hdr_bit->bits, BNXT_ULP_HDR_BIT_I_ETH) &&
933 		   !inner_vtag_num) {
934 		/* update the vlan tag num */
935 		inner_vtag_num++;
936 		ULP_COMP_FLD_IDX_WR(params, BNXT_ULP_CF_IDX_I_VTAG_NUM,
937 				    inner_vtag_num);
938 		ULP_COMP_FLD_IDX_WR(params, BNXT_ULP_CF_IDX_I_HAS_VTAG, 1);
939 		ULP_COMP_FLD_IDX_WR(params, BNXT_ULP_CF_IDX_I_ONE_VTAG, 1);
940 		ULP_BITMAP_SET(params->hdr_bitmap.bits,
941 			       BNXT_ULP_HDR_BIT_IO_VLAN);
942 		if (vlan_mask && vlan_tag_mask)
943 			ULP_COMP_FLD_IDX_WR(params,
944 					    BNXT_ULP_CF_IDX_IO_VLAN_FB_VID, 1);
945 		inner_flag = 1;
946 	} else if (ULP_BITMAP_ISSET(hdr_bit->bits, BNXT_ULP_HDR_BIT_O_ETH) &&
947 		   ULP_BITMAP_ISSET(hdr_bit->bits, BNXT_ULP_HDR_BIT_I_ETH) &&
948 		   inner_vtag_num == 1) {
949 		/* update the vlan tag num */
950 		inner_vtag_num++;
951 		ULP_COMP_FLD_IDX_WR(params, BNXT_ULP_CF_IDX_I_VTAG_NUM,
952 				    inner_vtag_num);
953 		ULP_COMP_FLD_IDX_WR(params, BNXT_ULP_CF_IDX_I_TWO_VTAGS, 1);
954 		ULP_COMP_FLD_IDX_WR(params, BNXT_ULP_CF_IDX_I_ONE_VTAG, 0);
955 		ULP_BITMAP_SET(params->hdr_bitmap.bits,
956 			       BNXT_ULP_HDR_BIT_II_VLAN);
957 		if (vlan_mask && vlan_tag_mask)
958 			ULP_COMP_FLD_IDX_WR(params,
959 					    BNXT_ULP_CF_IDX_II_VLAN_FB_VID, 1);
960 		inner_flag = 1;
961 	} else {
962 		BNXT_DRV_DBG(ERR, "Error Parsing:Vlan hdr found without eth\n");
963 		return BNXT_TF_RC_ERROR;
964 	}
965 	/* Update the field protocol hdr bitmap */
966 	ulp_rte_l2_proto_type_update(params, eth_type, inner_flag, 1, 1);
967 	return BNXT_TF_RC_SUCCESS;
968 }
969 
970 /* Function to handle the update of proto header based on field values */
971 static void
972 ulp_rte_l3_proto_type_update(struct ulp_rte_parser_params *param,
973 			     uint8_t proto, uint32_t in_flag)
974 {
975 	if (proto == IPPROTO_UDP) {
976 		if (in_flag) {
977 			ULP_BITMAP_SET(param->hdr_fp_bit.bits,
978 				       BNXT_ULP_HDR_BIT_I_UDP);
979 			ULP_COMP_FLD_IDX_WR(param, BNXT_ULP_CF_IDX_I_L4, 1);
980 		} else {
981 			ULP_BITMAP_SET(param->hdr_fp_bit.bits,
982 				       BNXT_ULP_HDR_BIT_O_UDP);
983 			ULP_COMP_FLD_IDX_WR(param, BNXT_ULP_CF_IDX_O_L4, 1);
984 		}
985 	} else if (proto == IPPROTO_TCP) {
986 		if (in_flag) {
987 			ULP_BITMAP_SET(param->hdr_fp_bit.bits,
988 				       BNXT_ULP_HDR_BIT_I_TCP);
989 			ULP_COMP_FLD_IDX_WR(param, BNXT_ULP_CF_IDX_I_L4, 1);
990 		} else {
991 			ULP_BITMAP_SET(param->hdr_fp_bit.bits,
992 				       BNXT_ULP_HDR_BIT_O_TCP);
993 			ULP_COMP_FLD_IDX_WR(param, BNXT_ULP_CF_IDX_O_L4, 1);
994 		}
995 	} else if (proto == IPPROTO_GRE) {
996 		ULP_BITMAP_SET(param->hdr_bitmap.bits, BNXT_ULP_HDR_BIT_T_GRE);
997 	} else if (proto == IPPROTO_ICMP) {
998 		if (ULP_BITMAP_ISSET(param->cf_bitmap,
999 				     BNXT_ULP_CF_BIT_IS_TUNNEL))
1000 			ULP_BITMAP_SET(param->hdr_bitmap.bits,
1001 				       BNXT_ULP_HDR_BIT_I_ICMP);
1002 		else
1003 			ULP_BITMAP_SET(param->hdr_bitmap.bits,
1004 				       BNXT_ULP_HDR_BIT_O_ICMP);
1005 	}
1006 
1007 	if (in_flag) {
1008 		ULP_COMP_FLD_IDX_WR(param,
1009 				    BNXT_ULP_CF_IDX_I_L3_FB_PROTO_ID,
1010 				    1);
1011 		ULP_COMP_FLD_IDX_WR(param,
1012 				    BNXT_ULP_CF_IDX_I_L3_PROTO_ID,
1013 				    proto);
1014 	} else {
1015 		ULP_COMP_FLD_IDX_WR(param,
1016 				    BNXT_ULP_CF_IDX_O_L3_FB_PROTO_ID,
1017 				    1);
1018 		ULP_COMP_FLD_IDX_WR(param,
1019 				    BNXT_ULP_CF_IDX_O_L3_PROTO_ID,
1020 				    proto);
1021 	}
1022 }
1023 
1024 /* Function to handle the parsing of RTE Flow item IPV4 Header. */
1025 int32_t
1026 ulp_rte_ipv4_hdr_handler(const struct rte_flow_item *item,
1027 			 struct ulp_rte_parser_params *params)
1028 {
1029 	const struct rte_flow_item_ipv4 *ipv4_spec = item->spec;
1030 	const struct rte_flow_item_ipv4 *ipv4_mask = item->mask;
1031 	struct ulp_rte_hdr_bitmap *hdr_bitmap = &params->hdr_bitmap;
1032 	uint32_t idx = 0, dip_idx = 0;
1033 	uint32_t size;
1034 	uint8_t proto = 0;
1035 	uint8_t ttl = 0;
1036 	uint8_t proto_mask = 0;
1037 	uint32_t inner_flag = 0;
1038 	uint32_t cnt;
1039 
1040 	/* validate there are no 3rd L3 header */
1041 	cnt = ULP_COMP_FLD_IDX_RD(params, BNXT_ULP_CF_IDX_L3_HDR_CNT);
1042 	if (cnt == 2) {
1043 		BNXT_DRV_DBG(ERR, "Parse Err:Third L3 header not supported\n");
1044 		return BNXT_TF_RC_ERROR;
1045 	}
1046 
1047 	if (unlikely(ulp_rte_prsr_fld_size_validate(params, &idx,
1048 						    BNXT_ULP_PROTO_HDR_IPV4_NUM))) {
1049 		BNXT_DRV_DBG(ERR, "Error parsing protocol header\n");
1050 		return BNXT_TF_RC_ERROR;
1051 	}
1052 
1053 	/* If mask is not specified then use the default mask */
1054 	if (ipv4_spec && !ipv4_mask)
1055 		ipv4_mask = &rte_flow_item_ipv4_mask;
1056 
1057 	/*
1058 	 * Copy the rte_flow_item for ipv4 into hdr_field using ipv4
1059 	 * header fields
1060 	 */
1061 	size = sizeof(((struct rte_flow_item_ipv4 *)NULL)->hdr.version_ihl);
1062 	ulp_rte_prsr_fld_mask(params, &idx, size,
1063 			      ulp_deference_struct(ipv4_spec, hdr.version_ihl),
1064 			      ulp_deference_struct(ipv4_mask, hdr.version_ihl),
1065 			      ULP_PRSR_ACT_DEFAULT);
1066 
1067 	/*
1068 	 * The tos field is ignored since OVS is setting it as wild card
1069 	 * match and it is not supported. An application can enable tos support.
1070 	 */
1071 	size = sizeof(((struct rte_flow_item_ipv4 *)NULL)->hdr.type_of_service);
1072 	ulp_rte_prsr_fld_mask(params, &idx, size,
1073 			      ulp_deference_struct(ipv4_spec,
1074 						   hdr.type_of_service),
1075 			      ulp_deference_struct(ipv4_mask,
1076 						   hdr.type_of_service),
1077 			      (ULP_APP_TOS_PROTO_SUPPORT(params->ulp_ctx)) ?
1078 			      ULP_PRSR_ACT_DEFAULT : ULP_PRSR_ACT_MASK_IGNORE);
1079 
1080 	size = sizeof(((struct rte_flow_item_ipv4 *)NULL)->hdr.total_length);
1081 	ulp_rte_prsr_fld_mask(params, &idx, size,
1082 			      ulp_deference_struct(ipv4_spec, hdr.total_length),
1083 			      ulp_deference_struct(ipv4_mask, hdr.total_length),
1084 			      ULP_PRSR_ACT_DEFAULT);
1085 
1086 	size = sizeof(((struct rte_flow_item_ipv4 *)NULL)->hdr.packet_id);
1087 	ulp_rte_prsr_fld_mask(params, &idx, size,
1088 			      ulp_deference_struct(ipv4_spec, hdr.packet_id),
1089 			      ulp_deference_struct(ipv4_mask, hdr.packet_id),
1090 			      ULP_PRSR_ACT_DEFAULT);
1091 
1092 	size = sizeof(((struct rte_flow_item_ipv4 *)NULL)->hdr.fragment_offset);
1093 	ulp_rte_prsr_fld_mask(params, &idx, size,
1094 			      ulp_deference_struct(ipv4_spec,
1095 						   hdr.fragment_offset),
1096 			      ulp_deference_struct(ipv4_mask,
1097 						   hdr.fragment_offset),
1098 			      ULP_PRSR_ACT_MASK_IGNORE);
1099 
1100 	size = sizeof(((struct rte_flow_item_ipv4 *)NULL)->hdr.time_to_live);
1101 	ulp_rte_prsr_fld_mask(params, &idx, size,
1102 			      ulp_deference_struct(ipv4_spec, hdr.time_to_live),
1103 			      ulp_deference_struct(ipv4_mask, hdr.time_to_live),
1104 			      ULP_PRSR_ACT_DEFAULT);
1105 	if (ipv4_spec)
1106 		ttl = ipv4_spec->hdr.time_to_live;
1107 	if (!ULP_BITMAP_ISSET(params->cf_bitmap, BNXT_ULP_CF_BIT_IS_TUNNEL))
1108 		ULP_COMP_FLD_IDX_WR(params, BNXT_ULP_CF_IDX_O_L3_TTL, ttl);
1109 
1110 	/* Ignore proto for matching templates */
1111 	size = sizeof(((struct rte_flow_item_ipv4 *)NULL)->hdr.next_proto_id);
1112 	ulp_rte_prsr_fld_mask(params, &idx, size,
1113 			      ulp_deference_struct(ipv4_spec,
1114 						   hdr.next_proto_id),
1115 			      ulp_deference_struct(ipv4_mask,
1116 						   hdr.next_proto_id),
1117 			      (ULP_APP_TOS_PROTO_SUPPORT(params->ulp_ctx)) ?
1118 			      ULP_PRSR_ACT_DEFAULT : ULP_PRSR_ACT_MATCH_IGNORE);
1119 
1120 	if (ipv4_spec)
1121 		proto = ipv4_spec->hdr.next_proto_id;
1122 
1123 	size = sizeof(((struct rte_flow_item_ipv4 *)NULL)->hdr.hdr_checksum);
1124 	ulp_rte_prsr_fld_mask(params, &idx, size,
1125 			      ulp_deference_struct(ipv4_spec, hdr.hdr_checksum),
1126 			      ulp_deference_struct(ipv4_mask, hdr.hdr_checksum),
1127 			      ULP_PRSR_ACT_DEFAULT);
1128 
1129 	size = sizeof(((struct rte_flow_item_ipv4 *)NULL)->hdr.src_addr);
1130 	ulp_rte_prsr_fld_mask(params, &idx, size,
1131 			      ulp_deference_struct(ipv4_spec, hdr.src_addr),
1132 			      ulp_deference_struct(ipv4_mask, hdr.src_addr),
1133 			      ULP_PRSR_ACT_DEFAULT);
1134 
1135 	dip_idx = idx;
1136 	size = sizeof(((struct rte_flow_item_ipv4 *)NULL)->hdr.dst_addr);
1137 	ulp_rte_prsr_fld_mask(params, &idx, size,
1138 			      ulp_deference_struct(ipv4_spec, hdr.dst_addr),
1139 			      ulp_deference_struct(ipv4_mask, hdr.dst_addr),
1140 			      ULP_PRSR_ACT_DEFAULT);
1141 
1142 	/* Set the ipv4 header bitmap and computed l3 header bitmaps */
1143 	if (ULP_BITMAP_ISSET(hdr_bitmap->bits, BNXT_ULP_HDR_BIT_O_IPV4) ||
1144 	    ULP_BITMAP_ISSET(hdr_bitmap->bits, BNXT_ULP_HDR_BIT_O_IPV6) ||
1145 	    ULP_BITMAP_ISSET(params->cf_bitmap, BNXT_ULP_CF_BIT_IS_TUNNEL)) {
1146 		ULP_BITMAP_SET(hdr_bitmap->bits, BNXT_ULP_HDR_BIT_I_IPV4);
1147 		ULP_COMP_FLD_IDX_WR(params, BNXT_ULP_CF_IDX_I_L3, 1);
1148 		inner_flag = 1;
1149 	} else {
1150 		ULP_BITMAP_SET(hdr_bitmap->bits, BNXT_ULP_HDR_BIT_O_IPV4);
1151 		ULP_COMP_FLD_IDX_WR(params, BNXT_ULP_CF_IDX_O_L3, 1);
1152 		/* Update the tunnel offload dest ip offset */
1153 		ULP_COMP_FLD_IDX_WR(params, BNXT_ULP_CF_IDX_TUN_OFF_DIP_ID,
1154 				    dip_idx);
1155 	}
1156 
1157 	/* Some of the PMD applications may set the protocol field
1158 	 * in the IPv4 spec but don't set the mask. So, consider
1159 	 * the mask in the proto value calculation.
1160 	 */
1161 	if (ipv4_mask) {
1162 		proto &= ipv4_mask->hdr.next_proto_id;
1163 		proto_mask = ipv4_mask->hdr.next_proto_id;
1164 	}
1165 
1166 	/* Update the field protocol hdr bitmap */
1167 	if (proto_mask)
1168 		ulp_rte_l3_proto_type_update(params, proto, inner_flag);
1169 	ULP_COMP_FLD_IDX_WR(params, BNXT_ULP_CF_IDX_L3_HDR_CNT, ++cnt);
1170 	return BNXT_TF_RC_SUCCESS;
1171 }
1172 
1173 /* Function to handle the parsing of RTE Flow item IPV6 Header */
1174 int32_t
1175 ulp_rte_ipv6_hdr_handler(const struct rte_flow_item *item,
1176 			 struct ulp_rte_parser_params *params)
1177 {
1178 	const struct rte_flow_item_ipv6	*ipv6_spec = item->spec;
1179 	const struct rte_flow_item_ipv6	*ipv6_mask = item->mask;
1180 	struct ulp_rte_hdr_bitmap *hdr_bitmap = &params->hdr_bitmap;
1181 	uint32_t idx = 0, dip_idx = 0;
1182 	uint32_t size, vtc_flow;
1183 	uint32_t ver_spec = 0, ver_mask = 0;
1184 	uint32_t tc_spec = 0, tc_mask = 0;
1185 	uint32_t lab_spec = 0, lab_mask = 0;
1186 	uint8_t proto = 0;
1187 	uint8_t proto_mask = 0;
1188 	uint8_t ttl = 0;
1189 	uint32_t inner_flag = 0;
1190 	uint32_t cnt;
1191 
1192 	/* validate there are no 3rd L3 header */
1193 	cnt = ULP_COMP_FLD_IDX_RD(params, BNXT_ULP_CF_IDX_L3_HDR_CNT);
1194 	if (cnt == 2) {
1195 		BNXT_DRV_DBG(ERR, "Parse Err:Third L3 header not supported\n");
1196 		return BNXT_TF_RC_ERROR;
1197 	}
1198 
1199 	if (unlikely(ulp_rte_prsr_fld_size_validate(params, &idx,
1200 						    BNXT_ULP_PROTO_HDR_IPV6_NUM))) {
1201 		BNXT_DRV_DBG(ERR, "Error parsing protocol header\n");
1202 		return BNXT_TF_RC_ERROR;
1203 	}
1204 
1205 	/* If mask is not specified then use the default mask */
1206 	if (ipv6_spec && !ipv6_mask)
1207 		ipv6_mask = &rte_flow_item_ipv6_mask;
1208 
1209 	/*
1210 	 * Copy the rte_flow_item for ipv6 into hdr_field using ipv6
1211 	 * header fields
1212 	 */
1213 	if (ipv6_spec) {
1214 		vtc_flow = ntohl(ipv6_spec->hdr.vtc_flow);
1215 		ver_spec = htonl(BNXT_ULP_GET_IPV6_VER(vtc_flow));
1216 		tc_spec = htonl(BNXT_ULP_GET_IPV6_TC(vtc_flow));
1217 		lab_spec = htonl(BNXT_ULP_GET_IPV6_FLOWLABEL(vtc_flow));
1218 		proto = ipv6_spec->hdr.proto;
1219 	}
1220 
1221 	if (ipv6_mask) {
1222 		vtc_flow = ntohl(ipv6_mask->hdr.vtc_flow);
1223 		ver_mask = htonl(BNXT_ULP_GET_IPV6_VER(vtc_flow));
1224 		tc_mask = htonl(BNXT_ULP_GET_IPV6_TC(vtc_flow));
1225 		lab_mask = htonl(BNXT_ULP_GET_IPV6_FLOWLABEL(vtc_flow));
1226 
1227 		/* Some of the PMD applications may set the protocol field
1228 		 * in the IPv6 spec but don't set the mask. So, consider
1229 		 * the mask in proto value calculation.
1230 		 */
1231 		proto &= ipv6_mask->hdr.proto;
1232 		proto_mask = ipv6_mask->hdr.proto;
1233 	}
1234 
1235 	size = sizeof(((struct rte_flow_item_ipv6 *)NULL)->hdr.vtc_flow);
1236 	ulp_rte_prsr_fld_mask(params, &idx, size, &ver_spec, &ver_mask,
1237 			      ULP_PRSR_ACT_DEFAULT);
1238 	/*
1239 	 * The TC and flow label field are ignored since OVS is
1240 	 * setting it for match and it is not supported.
1241 	 * This is a work around and
1242 	 * shall be addressed in the future.
1243 	 */
1244 	ulp_rte_prsr_fld_mask(params, &idx, size, &tc_spec, &tc_mask,
1245 			      (ULP_APP_TOS_PROTO_SUPPORT(params->ulp_ctx)) ?
1246 			      ULP_PRSR_ACT_DEFAULT : ULP_PRSR_ACT_MASK_IGNORE);
1247 	ulp_rte_prsr_fld_mask(params, &idx, size, &lab_spec, &lab_mask,
1248 			      ULP_PRSR_ACT_MASK_IGNORE);
1249 
1250 	size = sizeof(((struct rte_flow_item_ipv6 *)NULL)->hdr.payload_len);
1251 	ulp_rte_prsr_fld_mask(params, &idx, size,
1252 			      ulp_deference_struct(ipv6_spec, hdr.payload_len),
1253 			      ulp_deference_struct(ipv6_mask, hdr.payload_len),
1254 			      ULP_PRSR_ACT_DEFAULT);
1255 
1256 	/* Ignore proto for template matching */
1257 	size = sizeof(((struct rte_flow_item_ipv6 *)NULL)->hdr.proto);
1258 	ulp_rte_prsr_fld_mask(params, &idx, size,
1259 			      ulp_deference_struct(ipv6_spec, hdr.proto),
1260 			      ulp_deference_struct(ipv6_mask, hdr.proto),
1261 			      (ULP_APP_TOS_PROTO_SUPPORT(params->ulp_ctx)) ?
1262 			      ULP_PRSR_ACT_DEFAULT : ULP_PRSR_ACT_MATCH_IGNORE);
1263 
1264 	size = sizeof(((struct rte_flow_item_ipv6 *)NULL)->hdr.hop_limits);
1265 	ulp_rte_prsr_fld_mask(params, &idx, size,
1266 			      ulp_deference_struct(ipv6_spec, hdr.hop_limits),
1267 			      ulp_deference_struct(ipv6_mask, hdr.hop_limits),
1268 			      ULP_PRSR_ACT_DEFAULT);
1269 	if (ipv6_spec)
1270 		ttl = ipv6_spec->hdr.hop_limits;
1271 	if (!ULP_BITMAP_ISSET(params->cf_bitmap, BNXT_ULP_CF_BIT_IS_TUNNEL))
1272 		ULP_COMP_FLD_IDX_WR(params, BNXT_ULP_CF_IDX_O_L3_TTL, ttl);
1273 
1274 	size = sizeof(((struct rte_flow_item_ipv6 *)NULL)->hdr.src_addr);
1275 	ulp_rte_prsr_fld_mask(params, &idx, size,
1276 			      ulp_deference_struct(ipv6_spec, hdr.src_addr),
1277 			      ulp_deference_struct(ipv6_mask, hdr.src_addr),
1278 			      ULP_PRSR_ACT_DEFAULT);
1279 
1280 	dip_idx =  idx;
1281 	size = sizeof(((struct rte_flow_item_ipv6 *)NULL)->hdr.dst_addr);
1282 	ulp_rte_prsr_fld_mask(params, &idx, size,
1283 			      ulp_deference_struct(ipv6_spec, hdr.dst_addr),
1284 			      ulp_deference_struct(ipv6_mask, hdr.dst_addr),
1285 			      ULP_PRSR_ACT_DEFAULT);
1286 
1287 	/* Set the ipv6 header bitmap and computed l3 header bitmaps */
1288 	if (ULP_BITMAP_ISSET(hdr_bitmap->bits, BNXT_ULP_HDR_BIT_O_IPV4) ||
1289 	    ULP_BITMAP_ISSET(hdr_bitmap->bits, BNXT_ULP_HDR_BIT_O_IPV6) ||
1290 	    ULP_BITMAP_ISSET(params->cf_bitmap, BNXT_ULP_CF_BIT_IS_TUNNEL)) {
1291 		ULP_BITMAP_SET(hdr_bitmap->bits, BNXT_ULP_HDR_BIT_I_IPV6);
1292 		ULP_COMP_FLD_IDX_WR(params, BNXT_ULP_CF_IDX_I_L3, 1);
1293 		inner_flag = 1;
1294 	} else {
1295 		ULP_BITMAP_SET(hdr_bitmap->bits, BNXT_ULP_HDR_BIT_O_IPV6);
1296 		ULP_COMP_FLD_IDX_WR(params, BNXT_ULP_CF_IDX_O_L3, 1);
1297 		/* Update the tunnel offload dest ip offset */
1298 		ULP_COMP_FLD_IDX_WR(params, BNXT_ULP_CF_IDX_TUN_OFF_DIP_ID,
1299 				    dip_idx);
1300 	}
1301 
1302 	/* Update the field protocol hdr bitmap */
1303 	if (proto_mask)
1304 		ulp_rte_l3_proto_type_update(params, proto, inner_flag);
1305 	ULP_COMP_FLD_IDX_WR(params, BNXT_ULP_CF_IDX_L3_HDR_CNT, ++cnt);
1306 
1307 	return BNXT_TF_RC_SUCCESS;
1308 }
1309 
1310 /* Function to handle the update of proto header based on field values */
1311 static void
1312 ulp_rte_l4_proto_type_update(struct ulp_rte_parser_params *params,
1313 			     uint16_t src_port, uint16_t src_mask,
1314 			     uint16_t dst_port, uint16_t dst_mask,
1315 			     enum bnxt_ulp_hdr_bit hdr_bit)
1316 {
1317 	uint16_t stat_port = 0;
1318 	struct bnxt *bp;
1319 
1320 	switch (hdr_bit) {
1321 	case BNXT_ULP_HDR_BIT_I_UDP:
1322 	case BNXT_ULP_HDR_BIT_I_TCP:
1323 		ULP_BITMAP_SET(params->hdr_bitmap.bits, hdr_bit);
1324 		ULP_COMP_FLD_IDX_WR(params, BNXT_ULP_CF_IDX_I_L4, 1);
1325 		ULP_COMP_FLD_IDX_WR(params, BNXT_ULP_CF_IDX_I_L4_SRC_PORT,
1326 				    (uint64_t)rte_be_to_cpu_16(src_port));
1327 		ULP_COMP_FLD_IDX_WR(params, BNXT_ULP_CF_IDX_I_L4_DST_PORT,
1328 				    (uint64_t)rte_be_to_cpu_16(dst_port));
1329 		ULP_COMP_FLD_IDX_WR(params, BNXT_ULP_CF_IDX_I_L4_SRC_PORT_MASK,
1330 				    (uint64_t)rte_be_to_cpu_16(src_mask));
1331 		ULP_COMP_FLD_IDX_WR(params, BNXT_ULP_CF_IDX_I_L4_DST_PORT_MASK,
1332 				    (uint64_t)rte_be_to_cpu_16(dst_mask));
1333 		ULP_COMP_FLD_IDX_WR(params, BNXT_ULP_CF_IDX_I_L3_FB_PROTO_ID,
1334 				    1);
1335 		ULP_COMP_FLD_IDX_WR(params, BNXT_ULP_CF_IDX_I_L4_FB_SRC_PORT,
1336 				    !!(src_port & src_mask));
1337 		ULP_COMP_FLD_IDX_WR(params, BNXT_ULP_CF_IDX_I_L4_FB_DST_PORT,
1338 				    !!(dst_port & dst_mask));
1339 		ULP_COMP_FLD_IDX_WR(params, BNXT_ULP_CF_IDX_I_L3_PROTO_ID,
1340 				    (hdr_bit == BNXT_ULP_HDR_BIT_I_UDP) ?
1341 				    IPPROTO_UDP : IPPROTO_TCP);
1342 		break;
1343 	case BNXT_ULP_HDR_BIT_O_UDP:
1344 	case BNXT_ULP_HDR_BIT_O_TCP:
1345 		ULP_BITMAP_SET(params->hdr_bitmap.bits, hdr_bit);
1346 		ULP_COMP_FLD_IDX_WR(params, BNXT_ULP_CF_IDX_O_L4, 1);
1347 		ULP_COMP_FLD_IDX_WR(params, BNXT_ULP_CF_IDX_O_L4_SRC_PORT,
1348 				    (uint64_t)rte_be_to_cpu_16(src_port));
1349 		ULP_COMP_FLD_IDX_WR(params, BNXT_ULP_CF_IDX_O_L4_DST_PORT,
1350 				    (uint64_t)rte_be_to_cpu_16(dst_port));
1351 		ULP_COMP_FLD_IDX_WR(params, BNXT_ULP_CF_IDX_O_L4_SRC_PORT_MASK,
1352 				    (uint64_t)rte_be_to_cpu_16(src_mask));
1353 		ULP_COMP_FLD_IDX_WR(params, BNXT_ULP_CF_IDX_O_L4_DST_PORT_MASK,
1354 				    (uint64_t)rte_be_to_cpu_16(dst_mask));
1355 		ULP_COMP_FLD_IDX_WR(params, BNXT_ULP_CF_IDX_O_L3_FB_PROTO_ID,
1356 				    1);
1357 		ULP_COMP_FLD_IDX_WR(params, BNXT_ULP_CF_IDX_O_L4_FB_SRC_PORT,
1358 				    !!(src_port & src_mask));
1359 		ULP_COMP_FLD_IDX_WR(params, BNXT_ULP_CF_IDX_O_L4_FB_DST_PORT,
1360 				    !!(dst_port & dst_mask));
1361 		ULP_COMP_FLD_IDX_WR(params, BNXT_ULP_CF_IDX_O_L3_PROTO_ID,
1362 				    (hdr_bit == BNXT_ULP_HDR_BIT_O_UDP) ?
1363 				    IPPROTO_UDP : IPPROTO_TCP);
1364 		break;
1365 	default:
1366 		break;
1367 	}
1368 
1369 	/* If it is not udp port then there is no need to set tunnel bits */
1370 	if (hdr_bit != BNXT_ULP_HDR_BIT_O_UDP)
1371 		return;
1372 
1373 	ULP_COMP_FLD_IDX_WR(params, BNXT_ULP_CF_IDX_TUNNEL_PORT,
1374 			    tfp_be_to_cpu_16(dst_port));
1375 
1376 	/* vxlan static customized port */
1377 	if (ULP_APP_STATIC_VXLAN_PORT_EN(params->ulp_ctx)) {
1378 		stat_port = bnxt_ulp_cntxt_vxlan_ip_port_get(params->ulp_ctx);
1379 		if (!stat_port)
1380 			stat_port =
1381 			bnxt_ulp_cntxt_vxlan_port_get(params->ulp_ctx);
1382 
1383 		/* if udp and equal to static vxlan port then set tunnel bits*/
1384 		if (stat_port && dst_port == tfp_cpu_to_be_16(stat_port)) {
1385 			bp = bnxt_pmd_get_bp(params->port_id);
1386 			if (bp == NULL) {
1387 				BNXT_DRV_DBG(ERR, "Invalid bp\n");
1388 				return;
1389 			}
1390 			ULP_BITMAP_SET(params->hdr_fp_bit.bits,
1391 				       BNXT_ULP_HDR_BIT_T_VXLAN);
1392 			ULP_BITMAP_SET(params->cf_bitmap,
1393 				       BNXT_ULP_CF_BIT_IS_TUNNEL);
1394 			if (bp->vxlan_ip_upar_in_use &
1395 			    HWRM_TUNNEL_DST_PORT_QUERY_OUTPUT_UPAR_IN_USE_UPAR0) {
1396 				ULP_COMP_FLD_IDX_WR(params,
1397 						    BNXT_ULP_CF_IDX_VXLAN_IP_UPAR_ID,
1398 						    ULP_WP_SYM_TUN_HDR_TYPE_UPAR1);
1399 			}
1400 		}
1401 	} else {
1402 		/* if dynamic Vxlan is enabled then skip dport checks */
1403 		if (ULP_APP_DYNAMIC_VXLAN_PORT_EN(params->ulp_ctx))
1404 			return;
1405 
1406 		/* Vxlan and GPE port check */
1407 		if (dst_port == tfp_cpu_to_be_16(ULP_UDP_PORT_VXLAN_GPE)) {
1408 			ULP_BITMAP_SET(params->hdr_fp_bit.bits,
1409 				       BNXT_ULP_HDR_BIT_T_VXLAN_GPE);
1410 			ULP_BITMAP_SET(params->cf_bitmap,
1411 				       BNXT_ULP_CF_BIT_IS_TUNNEL);
1412 		} else if (dst_port == tfp_cpu_to_be_16(ULP_UDP_PORT_VXLAN)) {
1413 			ULP_BITMAP_SET(params->hdr_fp_bit.bits,
1414 				       BNXT_ULP_HDR_BIT_T_VXLAN);
1415 			ULP_BITMAP_SET(params->cf_bitmap,
1416 				       BNXT_ULP_CF_BIT_IS_TUNNEL);
1417 		}
1418 	}
1419 }
1420 
1421 /* Function to handle the parsing of RTE Flow item UDP Header. */
1422 int32_t
1423 ulp_rte_udp_hdr_handler(const struct rte_flow_item *item,
1424 			struct ulp_rte_parser_params *params)
1425 {
1426 	const struct rte_flow_item_udp *udp_spec = item->spec;
1427 	const struct rte_flow_item_udp *udp_mask = item->mask;
1428 	struct ulp_rte_hdr_bitmap *hdr_bitmap = &params->hdr_bitmap;
1429 	uint32_t idx = 0;
1430 	uint32_t size;
1431 	uint16_t dport = 0, sport = 0;
1432 	uint16_t dport_mask = 0, sport_mask = 0;
1433 	uint32_t cnt;
1434 	enum bnxt_ulp_hdr_bit out_l4 = BNXT_ULP_HDR_BIT_O_UDP;
1435 
1436 	cnt = ULP_COMP_FLD_IDX_RD(params, BNXT_ULP_CF_IDX_L4_HDR_CNT);
1437 	if (cnt == 2) {
1438 		BNXT_DRV_DBG(ERR, "Parse Err:Third L4 header not supported\n");
1439 		return BNXT_TF_RC_ERROR;
1440 	}
1441 
1442 	if (udp_spec) {
1443 		sport = udp_spec->hdr.src_port;
1444 		dport = udp_spec->hdr.dst_port;
1445 	}
1446 	if (udp_spec && !udp_mask)
1447 		udp_mask = &rte_flow_item_udp_mask;
1448 
1449 	if (udp_mask) {
1450 		sport_mask = udp_mask->hdr.src_port;
1451 		dport_mask = udp_mask->hdr.dst_port;
1452 	}
1453 
1454 	if (unlikely(ulp_rte_prsr_fld_size_validate(params, &idx,
1455 						    BNXT_ULP_PROTO_HDR_UDP_NUM))) {
1456 		BNXT_DRV_DBG(ERR, "Error parsing protocol header\n");
1457 		return BNXT_TF_RC_ERROR;
1458 	}
1459 
1460 	/*
1461 	 * Copy the rte_flow_item for ipv4 into hdr_field using ipv4
1462 	 * header fields
1463 	 */
1464 	size = sizeof(((struct rte_flow_item_udp *)NULL)->hdr.src_port);
1465 	ulp_rte_prsr_fld_mask(params, &idx, size,
1466 			      ulp_deference_struct(udp_spec, hdr.src_port),
1467 			      ulp_deference_struct(udp_mask, hdr.src_port),
1468 			      ULP_PRSR_ACT_DEFAULT);
1469 
1470 	size = sizeof(((struct rte_flow_item_udp *)NULL)->hdr.dst_port);
1471 	ulp_rte_prsr_fld_mask(params, &idx, size,
1472 			      ulp_deference_struct(udp_spec, hdr.dst_port),
1473 			      ulp_deference_struct(udp_mask, hdr.dst_port),
1474 			      ULP_PRSR_ACT_DEFAULT);
1475 
1476 	size = sizeof(((struct rte_flow_item_udp *)NULL)->hdr.dgram_len);
1477 	ulp_rte_prsr_fld_mask(params, &idx, size,
1478 			      ulp_deference_struct(udp_spec, hdr.dgram_len),
1479 			      ulp_deference_struct(udp_mask, hdr.dgram_len),
1480 			      ULP_PRSR_ACT_DEFAULT);
1481 
1482 	size = sizeof(((struct rte_flow_item_udp *)NULL)->hdr.dgram_cksum);
1483 	ulp_rte_prsr_fld_mask(params, &idx, size,
1484 			      ulp_deference_struct(udp_spec, hdr.dgram_cksum),
1485 			      ulp_deference_struct(udp_mask, hdr.dgram_cksum),
1486 			      ULP_PRSR_ACT_DEFAULT);
1487 
1488 	/* Set the udp header bitmap and computed l4 header bitmaps */
1489 	if (ULP_BITMAP_ISSET(hdr_bitmap->bits, BNXT_ULP_HDR_BIT_O_UDP) ||
1490 	    ULP_BITMAP_ISSET(hdr_bitmap->bits, BNXT_ULP_HDR_BIT_O_TCP) ||
1491 	    ULP_BITMAP_ISSET(params->cf_bitmap, BNXT_ULP_CF_BIT_IS_TUNNEL))
1492 		out_l4 = BNXT_ULP_HDR_BIT_I_UDP;
1493 
1494 	ulp_rte_l4_proto_type_update(params, sport, sport_mask, dport,
1495 				     dport_mask, out_l4);
1496 	ULP_COMP_FLD_IDX_WR(params, BNXT_ULP_CF_IDX_L4_HDR_CNT, ++cnt);
1497 	return BNXT_TF_RC_SUCCESS;
1498 }
1499 
1500 /* Function to handle the parsing of RTE Flow item TCP Header. */
1501 int32_t
1502 ulp_rte_tcp_hdr_handler(const struct rte_flow_item *item,
1503 			struct ulp_rte_parser_params *params)
1504 {
1505 	const struct rte_flow_item_tcp *tcp_spec = item->spec;
1506 	const struct rte_flow_item_tcp *tcp_mask = item->mask;
1507 	struct ulp_rte_hdr_bitmap *hdr_bitmap = &params->hdr_bitmap;
1508 	uint32_t idx = 0;
1509 	uint16_t dport = 0, sport = 0;
1510 	uint16_t dport_mask = 0, sport_mask = 0;
1511 	uint32_t size;
1512 	uint32_t cnt;
1513 	enum bnxt_ulp_hdr_bit out_l4 = BNXT_ULP_HDR_BIT_O_TCP;
1514 
1515 	cnt = ULP_COMP_FLD_IDX_RD(params, BNXT_ULP_CF_IDX_L4_HDR_CNT);
1516 	if (cnt == 2) {
1517 		BNXT_DRV_DBG(ERR, "Parse Err:Third L4 header not supported\n");
1518 		return BNXT_TF_RC_ERROR;
1519 	}
1520 
1521 	if (tcp_spec) {
1522 		sport = tcp_spec->hdr.src_port;
1523 		dport = tcp_spec->hdr.dst_port;
1524 	}
1525 
1526 	if (tcp_spec && !tcp_mask)
1527 		tcp_mask = &rte_flow_item_tcp_mask;
1528 
1529 	if (tcp_mask) {
1530 		sport_mask = tcp_mask->hdr.src_port;
1531 		dport_mask = tcp_mask->hdr.dst_port;
1532 	}
1533 
1534 	if (unlikely(ulp_rte_prsr_fld_size_validate(params, &idx,
1535 						    BNXT_ULP_PROTO_HDR_TCP_NUM))) {
1536 		BNXT_DRV_DBG(ERR, "Error parsing protocol header\n");
1537 		return BNXT_TF_RC_ERROR;
1538 	}
1539 
1540 	/*
1541 	 * Copy the rte_flow_item for ipv4 into hdr_field using ipv4
1542 	 * header fields
1543 	 */
1544 	size = sizeof(((struct rte_flow_item_tcp *)NULL)->hdr.src_port);
1545 	ulp_rte_prsr_fld_mask(params, &idx, size,
1546 			      ulp_deference_struct(tcp_spec, hdr.src_port),
1547 			      ulp_deference_struct(tcp_mask, hdr.src_port),
1548 			      ULP_PRSR_ACT_DEFAULT);
1549 
1550 	size = sizeof(((struct rte_flow_item_tcp *)NULL)->hdr.dst_port);
1551 	ulp_rte_prsr_fld_mask(params, &idx, size,
1552 			      ulp_deference_struct(tcp_spec, hdr.dst_port),
1553 			      ulp_deference_struct(tcp_mask, hdr.dst_port),
1554 			      ULP_PRSR_ACT_DEFAULT);
1555 
1556 	size = sizeof(((struct rte_flow_item_tcp *)NULL)->hdr.sent_seq);
1557 	ulp_rte_prsr_fld_mask(params, &idx, size,
1558 			      ulp_deference_struct(tcp_spec, hdr.sent_seq),
1559 			      ulp_deference_struct(tcp_mask, hdr.sent_seq),
1560 			      ULP_PRSR_ACT_DEFAULT);
1561 
1562 	size = sizeof(((struct rte_flow_item_tcp *)NULL)->hdr.recv_ack);
1563 	ulp_rte_prsr_fld_mask(params, &idx, size,
1564 			      ulp_deference_struct(tcp_spec, hdr.recv_ack),
1565 			      ulp_deference_struct(tcp_mask, hdr.recv_ack),
1566 			      ULP_PRSR_ACT_DEFAULT);
1567 
1568 	size = sizeof(((struct rte_flow_item_tcp *)NULL)->hdr.data_off);
1569 	ulp_rte_prsr_fld_mask(params, &idx, size,
1570 			      ulp_deference_struct(tcp_spec, hdr.data_off),
1571 			      ulp_deference_struct(tcp_mask, hdr.data_off),
1572 			      ULP_PRSR_ACT_DEFAULT);
1573 
1574 	size = sizeof(((struct rte_flow_item_tcp *)NULL)->hdr.tcp_flags);
1575 	ulp_rte_prsr_fld_mask(params, &idx, size,
1576 			      ulp_deference_struct(tcp_spec, hdr.tcp_flags),
1577 			      ulp_deference_struct(tcp_mask, hdr.tcp_flags),
1578 			      ULP_PRSR_ACT_DEFAULT);
1579 
1580 	size = sizeof(((struct rte_flow_item_tcp *)NULL)->hdr.rx_win);
1581 	ulp_rte_prsr_fld_mask(params, &idx, size,
1582 			      ulp_deference_struct(tcp_spec, hdr.rx_win),
1583 			      ulp_deference_struct(tcp_mask, hdr.rx_win),
1584 			      ULP_PRSR_ACT_DEFAULT);
1585 
1586 	size = sizeof(((struct rte_flow_item_tcp *)NULL)->hdr.cksum);
1587 	ulp_rte_prsr_fld_mask(params, &idx, size,
1588 			      ulp_deference_struct(tcp_spec, hdr.cksum),
1589 			      ulp_deference_struct(tcp_mask, hdr.cksum),
1590 			      ULP_PRSR_ACT_DEFAULT);
1591 
1592 	size = sizeof(((struct rte_flow_item_tcp *)NULL)->hdr.tcp_urp);
1593 	ulp_rte_prsr_fld_mask(params, &idx, size,
1594 			      ulp_deference_struct(tcp_spec, hdr.tcp_urp),
1595 			      ulp_deference_struct(tcp_mask, hdr.tcp_urp),
1596 			      ULP_PRSR_ACT_DEFAULT);
1597 
1598 	/* Set the udp header bitmap and computed l4 header bitmaps */
1599 	if (ULP_BITMAP_ISSET(hdr_bitmap->bits, BNXT_ULP_HDR_BIT_O_UDP) ||
1600 	    ULP_BITMAP_ISSET(hdr_bitmap->bits, BNXT_ULP_HDR_BIT_O_TCP) ||
1601 	    ULP_BITMAP_ISSET(params->cf_bitmap, BNXT_ULP_CF_BIT_IS_TUNNEL))
1602 		out_l4 = BNXT_ULP_HDR_BIT_I_TCP;
1603 
1604 	ulp_rte_l4_proto_type_update(params, sport, sport_mask, dport,
1605 				     dport_mask, out_l4);
1606 	ULP_COMP_FLD_IDX_WR(params, BNXT_ULP_CF_IDX_L4_HDR_CNT, ++cnt);
1607 	return BNXT_TF_RC_SUCCESS;
1608 }
1609 
1610 /* Function to handle the parsing of RTE Flow item Vxlan Header. */
1611 int32_t
1612 ulp_rte_vxlan_hdr_handler(const struct rte_flow_item *item,
1613 			  struct ulp_rte_parser_params *params)
1614 {
1615 	const struct rte_flow_item_vxlan *vxlan_spec = item->spec;
1616 	const struct rte_flow_item_vxlan *vxlan_mask = item->mask;
1617 	struct ulp_rte_hdr_bitmap *hdr_bitmap = &params->hdr_bitmap;
1618 	struct bnxt_ulp_context *ulp_ctx = params->ulp_ctx;
1619 	uint32_t idx = 0;
1620 	uint16_t dport, stat_port;
1621 	uint32_t size;
1622 
1623 	if (unlikely(ulp_rte_prsr_fld_size_validate(params, &idx,
1624 						    BNXT_ULP_PROTO_HDR_VXLAN_NUM))) {
1625 		BNXT_DRV_DBG(ERR, "Error parsing protocol header\n");
1626 		return BNXT_TF_RC_ERROR;
1627 	}
1628 
1629 	if (vxlan_spec && !vxlan_mask)
1630 		vxlan_mask = &rte_flow_item_vxlan_mask;
1631 
1632 	/* Update if the outer headers have any partial masks */
1633 	if (!ULP_COMP_FLD_IDX_RD(params, BNXT_ULP_CF_IDX_WC_MATCH))
1634 		ULP_COMP_FLD_IDX_WR(params, BNXT_ULP_CF_IDX_OUTER_EM_ONLY, 1);
1635 
1636 	/*
1637 	 * Copy the rte_flow_item for vxlan into hdr_field using vxlan
1638 	 * header fields
1639 	 */
1640 	size = sizeof(((struct rte_flow_item_vxlan *)NULL)->hdr.flags);
1641 	ulp_rte_prsr_fld_mask(params, &idx, size,
1642 			      ulp_deference_struct(vxlan_spec, hdr.flags),
1643 			      ulp_deference_struct(vxlan_mask, hdr.flags),
1644 			      ULP_PRSR_ACT_DEFAULT);
1645 
1646 	size = sizeof(((struct rte_flow_item_vxlan *)NULL)->hdr.rsvd0);
1647 	ulp_rte_prsr_fld_mask(params, &idx, size,
1648 			      ulp_deference_struct(vxlan_spec, hdr.rsvd0),
1649 			      ulp_deference_struct(vxlan_mask, hdr.rsvd0),
1650 			      ULP_PRSR_ACT_DEFAULT);
1651 
1652 	size = sizeof(((struct rte_flow_item_vxlan *)NULL)->hdr.vni);
1653 	ulp_rte_prsr_fld_mask(params, &idx, size,
1654 			      ulp_deference_struct(vxlan_spec, hdr.vni),
1655 			      ulp_deference_struct(vxlan_mask, hdr.vni),
1656 			      ULP_PRSR_ACT_DEFAULT);
1657 
1658 	size = sizeof(((struct rte_flow_item_vxlan *)NULL)->hdr.rsvd1);
1659 	ulp_rte_prsr_fld_mask(params, &idx, size,
1660 			      ulp_deference_struct(vxlan_spec, hdr.rsvd1),
1661 			      ulp_deference_struct(vxlan_mask, hdr.rsvd1),
1662 			      ULP_PRSR_ACT_DEFAULT);
1663 
1664 	/* Update the hdr_bitmap with vxlan */
1665 	ULP_BITMAP_SET(hdr_bitmap->bits, BNXT_ULP_HDR_BIT_T_VXLAN);
1666 	ULP_BITMAP_SET(params->cf_bitmap, BNXT_ULP_CF_BIT_IS_TUNNEL);
1667 
1668 	/* if l4 protocol header updated it then reset it */
1669 	ULP_BITMAP_RESET(params->hdr_fp_bit.bits, BNXT_ULP_HDR_BIT_T_VXLAN_GPE);
1670 
1671 	dport = ULP_COMP_FLD_IDX_RD(params, BNXT_ULP_CF_IDX_O_L4_DST_PORT);
1672 	if (!dport) {
1673 		ULP_COMP_FLD_IDX_WR(params, BNXT_ULP_CF_IDX_O_L4_DST_PORT,
1674 				    ULP_UDP_PORT_VXLAN);
1675 		ULP_COMP_FLD_IDX_WR(params, BNXT_ULP_CF_IDX_O_L4_DST_PORT_MASK,
1676 				    ULP_UDP_PORT_VXLAN_MASK);
1677 	}
1678 
1679 	/* vxlan static customized port */
1680 	if (ULP_APP_STATIC_VXLAN_PORT_EN(ulp_ctx)) {
1681 		stat_port = bnxt_ulp_cntxt_vxlan_ip_port_get(ulp_ctx);
1682 		if (!stat_port)
1683 			stat_port = bnxt_ulp_cntxt_vxlan_port_get(ulp_ctx);
1684 
1685 		/* validate that static ports match if not reject */
1686 		if (dport != 0 && dport != tfp_cpu_to_be_16(stat_port)) {
1687 			BNXT_DRV_DBG(ERR, "ParseErr:vxlan port is not valid\n");
1688 			return BNXT_TF_RC_PARSE_ERR;
1689 		} else if (dport == 0) {
1690 			ULP_COMP_FLD_IDX_WR(params,
1691 					    BNXT_ULP_CF_IDX_TUNNEL_PORT,
1692 					    tfp_cpu_to_be_16(stat_port));
1693 		}
1694 	} else {
1695 		/* dynamic vxlan support */
1696 		if (ULP_APP_DYNAMIC_VXLAN_PORT_EN(params->ulp_ctx)) {
1697 			if (dport == 0) {
1698 				BNXT_DRV_DBG(ERR,
1699 					     "ParseErr:vxlan port is null\n");
1700 				return BNXT_TF_RC_PARSE_ERR;
1701 			}
1702 			/* set the dynamic vxlan port check */
1703 			ULP_BITMAP_SET(params->cf_bitmap,
1704 				       BNXT_ULP_CF_BIT_DYNAMIC_VXLAN_PORT);
1705 			ULP_COMP_FLD_IDX_WR(params,
1706 					    BNXT_ULP_CF_IDX_TUNNEL_PORT, dport);
1707 		} else if (dport != 0 && dport != ULP_UDP_PORT_VXLAN) {
1708 			/* set the dynamic vxlan port check */
1709 			ULP_BITMAP_SET(params->cf_bitmap,
1710 				       BNXT_ULP_CF_BIT_DYNAMIC_VXLAN_PORT);
1711 			ULP_COMP_FLD_IDX_WR(params,
1712 					    BNXT_ULP_CF_IDX_TUNNEL_PORT, dport);
1713 		} else {
1714 			ULP_COMP_FLD_IDX_WR(params, BNXT_ULP_CF_IDX_TUNNEL_PORT,
1715 					    ULP_UDP_PORT_VXLAN);
1716 		}
1717 	}
1718 	return BNXT_TF_RC_SUCCESS;
1719 }
1720 
1721 /* Function to handle the parsing of RTE Flow item Vxlan GPE Header. */
1722 int32_t
1723 ulp_rte_vxlan_gpe_hdr_handler(const struct rte_flow_item *item,
1724 			      struct ulp_rte_parser_params *params)
1725 {
1726 	const struct rte_flow_item_vxlan_gpe *vxlan_gpe_spec = item->spec;
1727 	const struct rte_flow_item_vxlan_gpe *vxlan_gpe_mask = item->mask;
1728 	struct ulp_rte_hdr_bitmap *hdr_bitmap = &params->hdr_bitmap;
1729 	uint32_t idx = 0;
1730 	uint16_t dport;
1731 	uint32_t size;
1732 
1733 	if (unlikely(ulp_rte_prsr_fld_size_validate(params, &idx,
1734 						    BNXT_ULP_PROTO_HDR_VXLAN_GPE_NUM))) {
1735 		BNXT_DRV_DBG(ERR, "Error parsing protocol header\n");
1736 		return BNXT_TF_RC_ERROR;
1737 	}
1738 
1739 	if (vxlan_gpe_spec && !vxlan_gpe_mask)
1740 		vxlan_gpe_mask = &rte_flow_item_vxlan_gpe_mask;
1741 	/*
1742 	 * Copy the rte_flow_item for vxlan gpe into hdr_field using vxlan
1743 	 * header fields
1744 	 */
1745 	size = sizeof(((struct rte_flow_item_vxlan_gpe *)NULL)->flags);
1746 	ulp_rte_prsr_fld_mask(params, &idx, size,
1747 			      ulp_deference_struct(vxlan_gpe_spec, flags),
1748 			      ulp_deference_struct(vxlan_gpe_mask, flags),
1749 			      ULP_PRSR_ACT_DEFAULT);
1750 
1751 	size = sizeof(((struct rte_flow_item_vxlan_gpe *)NULL)->rsvd0);
1752 	ulp_rte_prsr_fld_mask(params, &idx, size,
1753 			      ulp_deference_struct(vxlan_gpe_spec, rsvd0),
1754 			      ulp_deference_struct(vxlan_gpe_mask, rsvd0),
1755 			      ULP_PRSR_ACT_DEFAULT);
1756 
1757 	size = sizeof(((struct rte_flow_item_vxlan_gpe *)NULL)->protocol);
1758 	ulp_rte_prsr_fld_mask(params, &idx, size,
1759 			      ulp_deference_struct(vxlan_gpe_spec, protocol),
1760 			      ulp_deference_struct(vxlan_gpe_mask, protocol),
1761 			      ULP_PRSR_ACT_DEFAULT);
1762 
1763 	size = sizeof(((struct rte_flow_item_vxlan_gpe *)NULL)->vni);
1764 	ulp_rte_prsr_fld_mask(params, &idx, size,
1765 			      ulp_deference_struct(vxlan_gpe_spec, vni),
1766 			      ulp_deference_struct(vxlan_gpe_mask, vni),
1767 			      ULP_PRSR_ACT_DEFAULT);
1768 
1769 	size = sizeof(((struct rte_flow_item_vxlan_gpe *)NULL)->rsvd1);
1770 	ulp_rte_prsr_fld_mask(params, &idx, size,
1771 			      ulp_deference_struct(vxlan_gpe_spec, rsvd1),
1772 			      ulp_deference_struct(vxlan_gpe_mask, rsvd1),
1773 			      ULP_PRSR_ACT_DEFAULT);
1774 
1775 	/* Update the hdr_bitmap with vxlan gpe*/
1776 	ULP_BITMAP_SET(hdr_bitmap->bits, BNXT_ULP_HDR_BIT_T_VXLAN_GPE);
1777 	ULP_BITMAP_SET(params->cf_bitmap, BNXT_ULP_CF_BIT_IS_TUNNEL);
1778 
1779 	/* if l4 protocol header updated it then reset it */
1780 	ULP_BITMAP_RESET(params->hdr_fp_bit.bits, BNXT_ULP_HDR_BIT_T_VXLAN);
1781 
1782 	dport = ULP_COMP_FLD_IDX_RD(params, BNXT_ULP_CF_IDX_O_L4_DST_PORT);
1783 	if (!dport) {
1784 		ULP_COMP_FLD_IDX_WR(params, BNXT_ULP_CF_IDX_O_L4_DST_PORT,
1785 				    ULP_UDP_PORT_VXLAN_GPE);
1786 		ULP_COMP_FLD_IDX_WR(params, BNXT_ULP_CF_IDX_O_L4_DST_PORT_MASK,
1787 				    ULP_UDP_PORT_VXLAN_GPE_MASK);
1788 	}
1789 	/* TBD: currently dynamic or static gpe port config is not supported */
1790 	/* Update the tunnel port */
1791 	ULP_COMP_FLD_IDX_WR(params, BNXT_ULP_CF_IDX_TUNNEL_PORT, dport);
1792 
1793 	/* Verify the vxlan gpe port */
1794 	if (dport != 0 && dport != ULP_UDP_PORT_VXLAN_GPE) {
1795 		BNXT_DRV_DBG(ERR, "ParseErr:vxlan gpe port is not valid\n");
1796 		return BNXT_TF_RC_PARSE_ERR;
1797 	}
1798 	return BNXT_TF_RC_SUCCESS;
1799 }
1800 
1801 /* Function to handle the parsing of RTE Flow item GENEVE Header. */
1802 int32_t
1803 ulp_rte_geneve_hdr_handler(const struct rte_flow_item *item,
1804 			      struct ulp_rte_parser_params *params)
1805 {
1806 	const struct rte_flow_item_geneve *geneve_spec = item->spec;
1807 	const struct rte_flow_item_geneve *geneve_mask = item->mask;
1808 	struct ulp_rte_hdr_bitmap *hdr_bitmap = &params->hdr_bitmap;
1809 	uint32_t idx = 0;
1810 	uint16_t dport;
1811 	uint32_t size;
1812 
1813 	if (unlikely(ulp_rte_prsr_fld_size_validate(params, &idx,
1814 						    BNXT_ULP_PROTO_HDR_GENEVE_NUM))) {
1815 		BNXT_DRV_DBG(ERR, "Error parsing protocol header\n");
1816 		return BNXT_TF_RC_ERROR;
1817 	}
1818 
1819 	if (geneve_spec && !geneve_mask)
1820 		geneve_mask = &rte_flow_item_geneve_mask;
1821 
1822 	/*
1823 	 * Copy the rte_flow_item for geneve into hdr_field using geneve
1824 	 * header fields
1825 	 */
1826 	size = sizeof(((struct rte_flow_item_geneve *)NULL)->ver_opt_len_o_c_rsvd0);
1827 	ulp_rte_prsr_fld_mask(params, &idx, size,
1828 			      ulp_deference_struct(geneve_spec, ver_opt_len_o_c_rsvd0),
1829 			      ulp_deference_struct(geneve_mask, ver_opt_len_o_c_rsvd0),
1830 			      ULP_PRSR_ACT_DEFAULT);
1831 
1832 	size = sizeof(((struct rte_flow_item_geneve *)NULL)->protocol);
1833 	ulp_rte_prsr_fld_mask(params, &idx, size,
1834 			      ulp_deference_struct(geneve_spec, protocol),
1835 			      ulp_deference_struct(geneve_mask, protocol),
1836 			      ULP_PRSR_ACT_DEFAULT);
1837 
1838 	size = sizeof(((struct rte_flow_item_geneve *)NULL)->vni);
1839 	ulp_rte_prsr_fld_mask(params, &idx, size,
1840 			      ulp_deference_struct(geneve_spec, vni),
1841 			      ulp_deference_struct(geneve_mask, vni),
1842 			      ULP_PRSR_ACT_DEFAULT);
1843 
1844 	size = sizeof(((struct rte_flow_item_geneve *)NULL)->rsvd1);
1845 	ulp_rte_prsr_fld_mask(params, &idx, size,
1846 			      ulp_deference_struct(geneve_spec, rsvd1),
1847 			      ulp_deference_struct(geneve_mask, rsvd1),
1848 			      ULP_PRSR_ACT_DEFAULT);
1849 
1850 	/* Update the hdr_bitmap with geneve */
1851 	ULP_BITMAP_SET(hdr_bitmap->bits, BNXT_ULP_HDR_BIT_T_GENEVE);
1852 	ULP_BITMAP_SET(params->cf_bitmap, BNXT_ULP_CF_BIT_IS_TUNNEL);
1853 
1854 	/* update the tunnel port */
1855 	dport = ULP_COMP_FLD_IDX_RD(params, BNXT_ULP_CF_IDX_O_L4_DST_PORT);
1856 	if  (ULP_APP_DYNAMIC_GENEVE_PORT_EN(params->ulp_ctx)) {
1857 		if (dport == 0) {
1858 			BNXT_DRV_DBG(ERR, "ParseErr:geneve port is null\n");
1859 			return BNXT_TF_RC_PARSE_ERR;
1860 		}
1861 		/* set the dynamic geneve port check */
1862 		ULP_BITMAP_SET(params->cf_bitmap,
1863 			       BNXT_ULP_CF_BIT_DYNAMIC_GENEVE_PORT);
1864 		ULP_COMP_FLD_IDX_WR(params,
1865 				    BNXT_ULP_CF_IDX_TUNNEL_PORT, dport);
1866 	} else {
1867 		if (dport == 0) {
1868 			ULP_COMP_FLD_IDX_WR(params,
1869 					    BNXT_ULP_CF_IDX_O_L4_DST_PORT,
1870 					    ULP_UDP_PORT_GENEVE);
1871 			ULP_COMP_FLD_IDX_WR(params,
1872 					    BNXT_ULP_CF_IDX_O_L4_DST_PORT_MASK,
1873 					    ULP_UDP_PORT_GENEVE_MASK);
1874 		} else if (dport != 0 && dport != ULP_UDP_PORT_GENEVE) {
1875 			ULP_COMP_FLD_IDX_WR(params, BNXT_ULP_CF_IDX_TUNNEL_PORT,
1876 					    dport);
1877 			ULP_BITMAP_SET(params->cf_bitmap,
1878 				       BNXT_ULP_CF_BIT_DYNAMIC_GENEVE_PORT);
1879 		}
1880 	}
1881 	return BNXT_TF_RC_SUCCESS;
1882 }
1883 
1884 /* Function to handle the parsing of RTE Flow item GRE Header. */
1885 int32_t
1886 ulp_rte_gre_hdr_handler(const struct rte_flow_item *item,
1887 			struct ulp_rte_parser_params *params)
1888 {
1889 	const struct rte_flow_item_gre *gre_spec = item->spec;
1890 	const struct rte_flow_item_gre *gre_mask = item->mask;
1891 	struct ulp_rte_hdr_bitmap *hdr_bitmap = &params->hdr_bitmap;
1892 	uint32_t idx = 0;
1893 	uint32_t size;
1894 
1895 	if (unlikely(ulp_rte_prsr_fld_size_validate(params, &idx,
1896 						    BNXT_ULP_PROTO_HDR_GRE_NUM))) {
1897 		BNXT_DRV_DBG(ERR, "Error parsing protocol header\n");
1898 		return BNXT_TF_RC_ERROR;
1899 	}
1900 
1901 	if (gre_spec && !gre_mask)
1902 		gre_mask = &rte_flow_item_gre_mask;
1903 
1904 	size = sizeof(((struct rte_flow_item_gre *)NULL)->c_rsvd0_ver);
1905 	ulp_rte_prsr_fld_mask(params, &idx, size,
1906 			      ulp_deference_struct(gre_spec, c_rsvd0_ver),
1907 			      ulp_deference_struct(gre_mask, c_rsvd0_ver),
1908 			      ULP_PRSR_ACT_DEFAULT);
1909 
1910 	size = sizeof(((struct rte_flow_item_gre *)NULL)->protocol);
1911 	ulp_rte_prsr_fld_mask(params, &idx, size,
1912 			      ulp_deference_struct(gre_spec, protocol),
1913 			      ulp_deference_struct(gre_mask, protocol),
1914 			      ULP_PRSR_ACT_DEFAULT);
1915 
1916 	/* Update the hdr_bitmap with GRE */
1917 	ULP_BITMAP_SET(hdr_bitmap->bits, BNXT_ULP_HDR_BIT_T_GRE);
1918 	ULP_BITMAP_SET(params->cf_bitmap, BNXT_ULP_CF_BIT_IS_TUNNEL);
1919 	return BNXT_TF_RC_SUCCESS;
1920 }
1921 
1922 /* Function to handle the parsing of RTE Flow item ANY. */
1923 int32_t
1924 ulp_rte_item_any_handler(const struct rte_flow_item *item __rte_unused,
1925 			 struct ulp_rte_parser_params *params __rte_unused)
1926 {
1927 	return BNXT_TF_RC_SUCCESS;
1928 }
1929 
1930 /* Function to handle the parsing of RTE Flow item ICMP Header. */
1931 int32_t
1932 ulp_rte_icmp_hdr_handler(const struct rte_flow_item *item,
1933 			 struct ulp_rte_parser_params *params)
1934 {
1935 	const struct rte_flow_item_icmp *icmp_spec = item->spec;
1936 	const struct rte_flow_item_icmp *icmp_mask = item->mask;
1937 	struct ulp_rte_hdr_bitmap *hdr_bitmap = &params->hdr_bitmap;
1938 	uint32_t idx = 0;
1939 	uint32_t size;
1940 
1941 	if (unlikely(ulp_rte_prsr_fld_size_validate(params, &idx,
1942 						    BNXT_ULP_PROTO_HDR_ICMP_NUM))) {
1943 		BNXT_DRV_DBG(ERR, "Error parsing protocol header\n");
1944 		return BNXT_TF_RC_ERROR;
1945 	}
1946 
1947 	if (icmp_spec && !icmp_mask)
1948 		icmp_mask = &rte_flow_item_icmp_mask;
1949 
1950 	size = sizeof(((struct rte_flow_item_icmp *)NULL)->hdr.icmp_type);
1951 	ulp_rte_prsr_fld_mask(params, &idx, size,
1952 			      ulp_deference_struct(icmp_spec, hdr.icmp_type),
1953 			      ulp_deference_struct(icmp_mask, hdr.icmp_type),
1954 			      ULP_PRSR_ACT_DEFAULT);
1955 
1956 	size = sizeof(((struct rte_flow_item_icmp *)NULL)->hdr.icmp_code);
1957 	ulp_rte_prsr_fld_mask(params, &idx, size,
1958 			      ulp_deference_struct(icmp_spec, hdr.icmp_code),
1959 			      ulp_deference_struct(icmp_mask, hdr.icmp_code),
1960 			      ULP_PRSR_ACT_DEFAULT);
1961 
1962 	size = sizeof(((struct rte_flow_item_icmp *)NULL)->hdr.icmp_cksum);
1963 	ulp_rte_prsr_fld_mask(params, &idx, size,
1964 			      ulp_deference_struct(icmp_spec, hdr.icmp_cksum),
1965 			      ulp_deference_struct(icmp_mask, hdr.icmp_cksum),
1966 			      ULP_PRSR_ACT_DEFAULT);
1967 
1968 	size = sizeof(((struct rte_flow_item_icmp *)NULL)->hdr.icmp_ident);
1969 	ulp_rte_prsr_fld_mask(params, &idx, size,
1970 			      ulp_deference_struct(icmp_spec, hdr.icmp_ident),
1971 			      ulp_deference_struct(icmp_mask, hdr.icmp_ident),
1972 			      ULP_PRSR_ACT_DEFAULT);
1973 
1974 	size = sizeof(((struct rte_flow_item_icmp *)NULL)->hdr.icmp_seq_nb);
1975 	ulp_rte_prsr_fld_mask(params, &idx, size,
1976 			      ulp_deference_struct(icmp_spec, hdr.icmp_seq_nb),
1977 			      ulp_deference_struct(icmp_mask, hdr.icmp_seq_nb),
1978 			      ULP_PRSR_ACT_DEFAULT);
1979 
1980 	/* Update the hdr_bitmap with ICMP */
1981 	if (ULP_BITMAP_ISSET(params->cf_bitmap, BNXT_ULP_CF_BIT_IS_TUNNEL))
1982 		ULP_BITMAP_SET(hdr_bitmap->bits, BNXT_ULP_HDR_BIT_I_ICMP);
1983 	else
1984 		ULP_BITMAP_SET(hdr_bitmap->bits, BNXT_ULP_HDR_BIT_O_ICMP);
1985 	return BNXT_TF_RC_SUCCESS;
1986 }
1987 
1988 /* Function to handle the parsing of RTE Flow item ICMP6 Header. */
1989 int32_t
1990 ulp_rte_icmp6_hdr_handler(const struct rte_flow_item *item,
1991 			  struct ulp_rte_parser_params *params)
1992 {
1993 	const struct rte_flow_item_icmp6 *icmp_spec = item->spec;
1994 	const struct rte_flow_item_icmp6 *icmp_mask = item->mask;
1995 	struct ulp_rte_hdr_bitmap *hdr_bitmap = &params->hdr_bitmap;
1996 	uint32_t idx = 0;
1997 	uint32_t size;
1998 
1999 	if (unlikely(ulp_rte_prsr_fld_size_validate(params, &idx,
2000 						    BNXT_ULP_PROTO_HDR_ICMP_NUM))) {
2001 		BNXT_DRV_DBG(ERR, "Error parsing protocol header\n");
2002 		return BNXT_TF_RC_ERROR;
2003 	}
2004 
2005 	if (icmp_spec && !icmp_mask)
2006 		icmp_mask = &rte_flow_item_icmp6_mask;
2007 
2008 	size = sizeof(((struct rte_flow_item_icmp6 *)NULL)->type);
2009 	ulp_rte_prsr_fld_mask(params, &idx, size,
2010 			      ulp_deference_struct(icmp_spec, type),
2011 			      ulp_deference_struct(icmp_mask, type),
2012 			      ULP_PRSR_ACT_DEFAULT);
2013 
2014 	size = sizeof(((struct rte_flow_item_icmp6 *)NULL)->code);
2015 	ulp_rte_prsr_fld_mask(params, &idx, size,
2016 			      ulp_deference_struct(icmp_spec, code),
2017 			      ulp_deference_struct(icmp_mask, code),
2018 			      ULP_PRSR_ACT_DEFAULT);
2019 
2020 	size = sizeof(((struct rte_flow_item_icmp6 *)NULL)->checksum);
2021 	ulp_rte_prsr_fld_mask(params, &idx, size,
2022 			      ulp_deference_struct(icmp_spec, checksum),
2023 			      ulp_deference_struct(icmp_mask, checksum),
2024 			      ULP_PRSR_ACT_DEFAULT);
2025 
2026 	if (ULP_BITMAP_ISSET(hdr_bitmap->bits, BNXT_ULP_HDR_BIT_O_IPV4)) {
2027 		BNXT_DRV_DBG(ERR, "Error: incorrect icmp version\n");
2028 		return BNXT_TF_RC_ERROR;
2029 	}
2030 
2031 	/* Update the hdr_bitmap with ICMP */
2032 	if (ULP_BITMAP_ISSET(params->cf_bitmap, BNXT_ULP_CF_BIT_IS_TUNNEL))
2033 		ULP_BITMAP_SET(hdr_bitmap->bits, BNXT_ULP_HDR_BIT_I_ICMP);
2034 	else
2035 		ULP_BITMAP_SET(hdr_bitmap->bits, BNXT_ULP_HDR_BIT_O_ICMP);
2036 	return BNXT_TF_RC_SUCCESS;
2037 }
2038 
2039 /* Function to handle the parsing of RTE Flow item ECPRI Header. */
2040 int32_t
2041 ulp_rte_ecpri_hdr_handler(const struct rte_flow_item *item,
2042 			  struct ulp_rte_parser_params *params)
2043 {
2044 	const struct rte_flow_item_ecpri *ecpri_spec = item->spec;
2045 	const struct rte_flow_item_ecpri *ecpri_mask = item->mask;
2046 	struct rte_flow_item_ecpri l_ecpri_spec, l_ecpri_mask;
2047 	struct rte_flow_item_ecpri *p_ecpri_spec = &l_ecpri_spec;
2048 	struct rte_flow_item_ecpri *p_ecpri_mask = &l_ecpri_mask;
2049 	struct ulp_rte_hdr_bitmap *hdr_bitmap = &params->hdr_bitmap;
2050 	uint32_t idx = 0, cnt;
2051 	uint32_t size;
2052 
2053 	if (unlikely(ulp_rte_prsr_fld_size_validate(params, &idx,
2054 						    BNXT_ULP_PROTO_HDR_ECPRI_NUM))) {
2055 		BNXT_DRV_DBG(ERR, "Error parsing protocol header\n");
2056 		return BNXT_TF_RC_ERROR;
2057 	}
2058 
2059 	if (ecpri_spec && !ecpri_mask)
2060 		ecpri_mask = &rte_flow_item_ecpri_mask;
2061 
2062 	/* Figure out if eCPRI is within L4(UDP), unsupported, for now */
2063 	cnt = ULP_COMP_FLD_IDX_RD(params, BNXT_ULP_CF_IDX_L4_HDR_CNT);
2064 	if (cnt >= 1) {
2065 		BNXT_DRV_DBG(ERR, "Parse Err: L4 header stack >= 2 not supported\n");
2066 		return BNXT_TF_RC_ERROR;
2067 	}
2068 
2069 	if (!ecpri_spec || !ecpri_mask)
2070 		goto parser_set_ecpri_hdr_bit;
2071 
2072 	memcpy(p_ecpri_spec, ecpri_spec, sizeof(*ecpri_spec));
2073 	memcpy(p_ecpri_mask, ecpri_mask, sizeof(*ecpri_mask));
2074 
2075 	p_ecpri_spec->hdr.common.u32 = rte_be_to_cpu_32(p_ecpri_spec->hdr.common.u32);
2076 	p_ecpri_mask->hdr.common.u32 = rte_be_to_cpu_32(p_ecpri_mask->hdr.common.u32);
2077 
2078 	/*
2079 	 * Init eCPRI spec+mask to correct defaults, also clear masks of fields
2080 	 * we ignore in the TCAM.
2081 	 */
2082 
2083 	l_ecpri_spec.hdr.common.size = 0;
2084 	l_ecpri_spec.hdr.common.c = 0;
2085 	l_ecpri_spec.hdr.common.res = 0;
2086 	l_ecpri_spec.hdr.common.revision = 1;
2087 	l_ecpri_mask.hdr.common.size = 0;
2088 	l_ecpri_mask.hdr.common.c = 1;
2089 	l_ecpri_mask.hdr.common.res = 0;
2090 	l_ecpri_mask.hdr.common.revision = 0xf;
2091 
2092 	switch (p_ecpri_spec->hdr.common.type) {
2093 	case RTE_ECPRI_MSG_TYPE_IQ_DATA:
2094 		l_ecpri_mask.hdr.type0.seq_id = 0;
2095 		break;
2096 
2097 	case RTE_ECPRI_MSG_TYPE_BIT_SEQ:
2098 		l_ecpri_mask.hdr.type1.seq_id = 0;
2099 		break;
2100 
2101 	case RTE_ECPRI_MSG_TYPE_RTC_CTRL:
2102 		l_ecpri_mask.hdr.type2.seq_id = 0;
2103 		break;
2104 
2105 	case RTE_ECPRI_MSG_TYPE_GEN_DATA:
2106 		l_ecpri_mask.hdr.type3.seq_id = 0;
2107 		break;
2108 
2109 	case RTE_ECPRI_MSG_TYPE_RM_ACC:
2110 		l_ecpri_mask.hdr.type4.rr = 0;
2111 		l_ecpri_mask.hdr.type4.rw = 0;
2112 		l_ecpri_mask.hdr.type4.rma_id = 0;
2113 		break;
2114 
2115 	case RTE_ECPRI_MSG_TYPE_DLY_MSR:
2116 		l_ecpri_spec.hdr.type5.act_type = 0;
2117 		break;
2118 
2119 	case RTE_ECPRI_MSG_TYPE_RMT_RST:
2120 		l_ecpri_spec.hdr.type6.rst_op = 0;
2121 		break;
2122 
2123 	case RTE_ECPRI_MSG_TYPE_EVT_IND:
2124 		l_ecpri_spec.hdr.type7.evt_type = 0;
2125 		l_ecpri_spec.hdr.type7.seq = 0;
2126 		l_ecpri_spec.hdr.type7.number = 0;
2127 		break;
2128 
2129 	default:
2130 		break;
2131 	}
2132 
2133 	p_ecpri_spec->hdr.common.u32 = rte_cpu_to_be_32(p_ecpri_spec->hdr.common.u32);
2134 	p_ecpri_mask->hdr.common.u32 = rte_cpu_to_be_32(p_ecpri_mask->hdr.common.u32);
2135 
2136 	/* Type */
2137 	size = sizeof(((struct rte_flow_item_ecpri *)NULL)->hdr.common.u32);
2138 	ulp_rte_prsr_fld_mask(params, &idx, size,
2139 			      ulp_deference_struct(p_ecpri_spec, hdr.common.u32),
2140 			      ulp_deference_struct(p_ecpri_mask, hdr.common.u32),
2141 			      ULP_PRSR_ACT_DEFAULT);
2142 
2143 	/* PC/RTC/MSR_ID */
2144 	size = sizeof(((struct rte_flow_item_ecpri *)NULL)->hdr.dummy[0]);
2145 	ulp_rte_prsr_fld_mask(params, &idx, size,
2146 			      ulp_deference_struct(p_ecpri_spec, hdr.dummy),
2147 			      ulp_deference_struct(p_ecpri_mask, hdr.dummy),
2148 			      ULP_PRSR_ACT_DEFAULT);
2149 
2150 parser_set_ecpri_hdr_bit:
2151 	/* Update the hdr_bitmap with eCPRI */
2152 	ULP_BITMAP_SET(hdr_bitmap->bits, BNXT_ULP_HDR_BIT_O_ECPRI);
2153 	return BNXT_TF_RC_SUCCESS;
2154 }
2155 
2156 /* Function to handle the parsing of RTE Flow item void Header */
2157 int32_t
2158 ulp_rte_void_hdr_handler(const struct rte_flow_item *item __rte_unused,
2159 			 struct ulp_rte_parser_params *params __rte_unused)
2160 {
2161 	return BNXT_TF_RC_SUCCESS;
2162 }
2163 
2164 /* Function to handle the parsing of RTE Flow action void Header. */
2165 int32_t
2166 ulp_rte_void_act_handler(const struct rte_flow_action *action_item __rte_unused,
2167 			 struct ulp_rte_parser_params *params __rte_unused)
2168 {
2169 	return BNXT_TF_RC_SUCCESS;
2170 }
2171 
2172 /* Function to handle the parsing of RTE Flow action Mark Header. */
2173 int32_t
2174 ulp_rte_mark_act_handler(const struct rte_flow_action *action_item,
2175 			 struct ulp_rte_parser_params *param)
2176 {
2177 	const struct rte_flow_action_mark *mark;
2178 	struct ulp_rte_act_bitmap *act = &param->act_bitmap;
2179 	uint32_t mark_id;
2180 
2181 	mark = action_item->conf;
2182 	if (mark) {
2183 		mark_id = tfp_cpu_to_be_32(mark->id);
2184 		memcpy(&param->act_prop.act_details[BNXT_ULP_ACT_PROP_IDX_MARK],
2185 		       &mark_id, BNXT_ULP_ACT_PROP_SZ_MARK);
2186 
2187 		/* Update the hdr_bitmap with vxlan */
2188 		ULP_BITMAP_SET(act->bits, BNXT_ULP_ACT_BIT_MARK);
2189 		return BNXT_TF_RC_SUCCESS;
2190 	}
2191 	BNXT_DRV_DBG(ERR, "Parse Error: Mark arg is invalid\n");
2192 	return BNXT_TF_RC_ERROR;
2193 }
2194 
2195 /* Function to handle the parsing of RTE Flow action RSS Header. */
2196 int32_t
2197 ulp_rte_rss_act_handler(const struct rte_flow_action *action_item,
2198 			struct ulp_rte_parser_params *param)
2199 {
2200 	const struct rte_flow_action_rss *rss;
2201 	struct ulp_rte_act_prop *ap = &param->act_prop;
2202 	uint64_t queue_list[BNXT_ULP_ACT_PROP_SZ_RSS_QUEUE / sizeof(uint64_t)];
2203 	uint32_t idx = 0, id;
2204 
2205 	if (action_item == NULL || action_item->conf == NULL) {
2206 		BNXT_DRV_DBG(ERR, "Parse Err: invalid rss configuration\n");
2207 		return BNXT_TF_RC_ERROR;
2208 	}
2209 
2210 	rss = action_item->conf;
2211 	/* Copy the rss into the specific action properties */
2212 	memcpy(&ap->act_details[BNXT_ULP_ACT_PROP_IDX_RSS_FUNC], &rss->func,
2213 	       BNXT_ULP_ACT_PROP_SZ_RSS_FUNC);
2214 	memcpy(&ap->act_details[BNXT_ULP_ACT_PROP_IDX_RSS_TYPES], &rss->types,
2215 	       BNXT_ULP_ACT_PROP_SZ_RSS_TYPES);
2216 	memcpy(&ap->act_details[BNXT_ULP_ACT_PROP_IDX_RSS_LEVEL], &rss->level,
2217 	       BNXT_ULP_ACT_PROP_SZ_RSS_LEVEL);
2218 	memcpy(&ap->act_details[BNXT_ULP_ACT_PROP_IDX_RSS_KEY_LEN],
2219 	       &rss->key_len, BNXT_ULP_ACT_PROP_SZ_RSS_KEY_LEN);
2220 
2221 	if (rss->key_len != 0 && rss->key_len != BNXT_ULP_ACT_PROP_SZ_RSS_KEY) {
2222 		BNXT_DRV_DBG(ERR, "Parse Err: RSS key length must be 40 bytes\n");
2223 		return BNXT_TF_RC_ERROR;
2224 	}
2225 
2226 	/* User may specify only key length. In that case, rss->key will be NULL.
2227 	 * So, reject the flow if key_length is valid but rss->key is NULL.
2228 	 * Also, copy the RSS hash key only when rss->key is valid.
2229 	 */
2230 	if (rss->key_len != 0 && rss->key == NULL) {
2231 		BNXT_DRV_DBG(ERR,
2232 			    "Parse Err: A valid RSS key must be provided with a valid key len.\n");
2233 		return BNXT_TF_RC_ERROR;
2234 	}
2235 	if (rss->key)
2236 		memcpy(&ap->act_details[BNXT_ULP_ACT_PROP_IDX_RSS_KEY], rss->key, rss->key_len);
2237 
2238 	memcpy(&ap->act_details[BNXT_ULP_ACT_PROP_IDX_RSS_QUEUE_NUM],
2239 	       &rss->queue_num, BNXT_ULP_ACT_PROP_SZ_RSS_QUEUE_NUM);
2240 
2241 	if (rss->queue_num >= ULP_BYTE_2_BITS(BNXT_ULP_ACT_PROP_SZ_RSS_QUEUE)) {
2242 		BNXT_DRV_DBG(ERR, "Parse Err: RSS queue num too big\n");
2243 		return BNXT_TF_RC_ERROR;
2244 	}
2245 
2246 	/* Queues converted into a bitmap format */
2247 	memset(queue_list, 0, sizeof(queue_list));
2248 	for (idx = 0; idx < rss->queue_num; idx++) {
2249 		id = rss->queue[idx];
2250 		if (id >= ULP_BYTE_2_BITS(BNXT_ULP_ACT_PROP_SZ_RSS_QUEUE)) {
2251 			BNXT_DRV_DBG(ERR, "Parse Err: RSS queue id too big\n");
2252 			return BNXT_TF_RC_ERROR;
2253 		}
2254 		if ((queue_list[id / ULP_INDEX_BITMAP_SIZE] >>
2255 		    ((ULP_INDEX_BITMAP_SIZE - 1) -
2256 		     (id % ULP_INDEX_BITMAP_SIZE)) & 1)) {
2257 			BNXT_DRV_DBG(ERR, "Parse Err: duplicate queue ids\n");
2258 			return BNXT_TF_RC_ERROR;
2259 		}
2260 		queue_list[id / ULP_INDEX_BITMAP_SIZE] |= (1UL <<
2261 		((ULP_INDEX_BITMAP_SIZE - 1) - (id % ULP_INDEX_BITMAP_SIZE)));
2262 	}
2263 	memcpy(&ap->act_details[BNXT_ULP_ACT_PROP_IDX_RSS_QUEUE],
2264 	       (uint8_t *)queue_list, BNXT_ULP_ACT_PROP_SZ_RSS_QUEUE);
2265 
2266 	/* set the RSS action header bit */
2267 	ULP_BITMAP_SET(param->act_bitmap.bits, BNXT_ULP_ACT_BIT_RSS);
2268 
2269 	return BNXT_TF_RC_SUCCESS;
2270 }
2271 
2272 /* Function to handle the parsing of RTE Flow item eth Header. */
2273 static void
2274 ulp_rte_enc_eth_hdr_handler(struct ulp_rte_parser_params *params,
2275 			    const struct rte_flow_item_eth *eth_spec)
2276 {
2277 	struct ulp_rte_hdr_field *field;
2278 	uint32_t size;
2279 
2280 	field = &params->enc_field[BNXT_ULP_ENC_FIELD_ETH_DMAC];
2281 	size = sizeof(eth_spec->hdr.dst_addr.addr_bytes);
2282 	field = ulp_rte_parser_fld_copy(field, eth_spec->hdr.dst_addr.addr_bytes, size);
2283 
2284 	size = sizeof(eth_spec->hdr.src_addr.addr_bytes);
2285 	field = ulp_rte_parser_fld_copy(field, eth_spec->hdr.src_addr.addr_bytes, size);
2286 
2287 	size = sizeof(eth_spec->hdr.ether_type);
2288 	field = ulp_rte_parser_fld_copy(field, &eth_spec->hdr.ether_type, size);
2289 
2290 	ULP_BITMAP_SET(params->enc_hdr_bitmap.bits, BNXT_ULP_HDR_BIT_O_ETH);
2291 }
2292 
2293 /* Function to handle the parsing of RTE Flow item vlan Header. */
2294 static void
2295 ulp_rte_enc_vlan_hdr_handler(struct ulp_rte_parser_params *params,
2296 			     const struct rte_flow_item_vlan *vlan_spec,
2297 			     uint32_t inner)
2298 {
2299 	struct ulp_rte_hdr_field *field;
2300 	uint32_t size;
2301 
2302 	if (!inner) {
2303 		field = &params->enc_field[BNXT_ULP_ENC_FIELD_O_VLAN_TCI];
2304 		ULP_BITMAP_SET(params->enc_hdr_bitmap.bits,
2305 			       BNXT_ULP_HDR_BIT_OO_VLAN);
2306 	} else {
2307 		field = &params->enc_field[BNXT_ULP_ENC_FIELD_I_VLAN_TCI];
2308 		ULP_BITMAP_SET(params->enc_hdr_bitmap.bits,
2309 			       BNXT_ULP_HDR_BIT_OI_VLAN);
2310 	}
2311 
2312 	size = sizeof(vlan_spec->hdr.vlan_tci);
2313 	field = ulp_rte_parser_fld_copy(field, &vlan_spec->hdr.vlan_tci, size);
2314 
2315 	size = sizeof(vlan_spec->hdr.eth_proto);
2316 	field = ulp_rte_parser_fld_copy(field, &vlan_spec->hdr.eth_proto, size);
2317 }
2318 
2319 /* Function to handle the parsing of RTE Flow item ipv4 Header. */
2320 static void
2321 ulp_rte_enc_ipv4_hdr_handler(struct ulp_rte_parser_params *params,
2322 			     const struct rte_flow_item_ipv4 *ip)
2323 {
2324 	struct ulp_rte_hdr_field *field;
2325 	uint32_t size;
2326 	uint8_t val8;
2327 
2328 	field = &params->enc_field[BNXT_ULP_ENC_FIELD_IPV4_IHL];
2329 	size = sizeof(ip->hdr.version_ihl);
2330 	if (!ip->hdr.version_ihl)
2331 		val8 = RTE_IPV4_VHL_DEF;
2332 	else
2333 		val8 = ip->hdr.version_ihl;
2334 	field = ulp_rte_parser_fld_copy(field, &val8, size);
2335 
2336 	size = sizeof(ip->hdr.type_of_service);
2337 	field = ulp_rte_parser_fld_copy(field, &ip->hdr.type_of_service, size);
2338 
2339 	size = sizeof(ip->hdr.packet_id);
2340 	field = ulp_rte_parser_fld_copy(field, &ip->hdr.packet_id, size);
2341 
2342 	size = sizeof(ip->hdr.fragment_offset);
2343 	field = ulp_rte_parser_fld_copy(field, &ip->hdr.fragment_offset, size);
2344 
2345 	size = sizeof(ip->hdr.time_to_live);
2346 	if (!ip->hdr.time_to_live)
2347 		val8 = BNXT_ULP_DEFAULT_TTL;
2348 	else
2349 		val8 = ip->hdr.time_to_live;
2350 	field = ulp_rte_parser_fld_copy(field, &val8, size);
2351 
2352 	size = sizeof(ip->hdr.next_proto_id);
2353 	field = ulp_rte_parser_fld_copy(field, &ip->hdr.next_proto_id, size);
2354 
2355 	size = sizeof(ip->hdr.src_addr);
2356 	field = ulp_rte_parser_fld_copy(field, &ip->hdr.src_addr, size);
2357 
2358 	size = sizeof(ip->hdr.dst_addr);
2359 	field = ulp_rte_parser_fld_copy(field, &ip->hdr.dst_addr, size);
2360 
2361 	ULP_BITMAP_SET(params->enc_hdr_bitmap.bits, BNXT_ULP_HDR_BIT_O_IPV4);
2362 }
2363 
2364 /* Function to handle the parsing of RTE Flow item ipv6 Header. */
2365 static void
2366 ulp_rte_enc_ipv6_hdr_handler(struct ulp_rte_parser_params *params,
2367 			     const struct rte_flow_item_ipv6 *ip)
2368 {
2369 	struct ulp_rte_hdr_field *field;
2370 	uint32_t size;
2371 	uint32_t val32;
2372 	uint8_t val8;
2373 
2374 	field = &params->enc_field[BNXT_ULP_ENC_FIELD_IPV6_VTC_FLOW];
2375 	size = sizeof(ip->hdr.vtc_flow);
2376 	if (!ip->hdr.vtc_flow)
2377 		val32 = rte_cpu_to_be_32(BNXT_ULP_IPV6_DFLT_VER);
2378 	else
2379 		val32 = ip->hdr.vtc_flow;
2380 	field = ulp_rte_parser_fld_copy(field, &val32, size);
2381 
2382 	size = sizeof(ip->hdr.proto);
2383 	field = ulp_rte_parser_fld_copy(field, &ip->hdr.proto, size);
2384 
2385 	size = sizeof(ip->hdr.hop_limits);
2386 	if (!ip->hdr.hop_limits)
2387 		val8 = BNXT_ULP_DEFAULT_TTL;
2388 	else
2389 		val8 = ip->hdr.hop_limits;
2390 	field = ulp_rte_parser_fld_copy(field, &val8, size);
2391 
2392 	size = sizeof(ip->hdr.src_addr);
2393 	field = ulp_rte_parser_fld_copy(field, &ip->hdr.src_addr, size);
2394 
2395 	size = sizeof(ip->hdr.dst_addr);
2396 	field = ulp_rte_parser_fld_copy(field, &ip->hdr.dst_addr, size);
2397 
2398 	ULP_BITMAP_SET(params->enc_hdr_bitmap.bits, BNXT_ULP_HDR_BIT_O_IPV6);
2399 }
2400 
2401 /* Function to handle the parsing of RTE Flow item UDP Header. */
2402 static void
2403 ulp_rte_enc_udp_hdr_handler(struct ulp_rte_parser_params *params,
2404 			    const struct rte_flow_item_udp *udp_spec)
2405 {
2406 	struct ulp_rte_hdr_field *field;
2407 	uint32_t size;
2408 	uint8_t type = IPPROTO_UDP;
2409 
2410 	field = &params->enc_field[BNXT_ULP_ENC_FIELD_UDP_SPORT];
2411 	size = sizeof(udp_spec->hdr.src_port);
2412 	field = ulp_rte_parser_fld_copy(field, &udp_spec->hdr.src_port, size);
2413 
2414 	size = sizeof(udp_spec->hdr.dst_port);
2415 	field = ulp_rte_parser_fld_copy(field, &udp_spec->hdr.dst_port, size);
2416 
2417 	ULP_BITMAP_SET(params->enc_hdr_bitmap.bits, BNXT_ULP_HDR_BIT_O_UDP);
2418 
2419 	/* Update the ip header protocol */
2420 	field = &params->enc_field[BNXT_ULP_ENC_FIELD_IPV4_PROTO];
2421 	ulp_rte_parser_fld_copy(field, &type, sizeof(type));
2422 	field = &params->enc_field[BNXT_ULP_ENC_FIELD_IPV6_PROTO];
2423 	ulp_rte_parser_fld_copy(field, &type, sizeof(type));
2424 }
2425 
2426 /* Function to handle the parsing of RTE Flow item vxlan Header. */
2427 static void
2428 ulp_rte_enc_vxlan_hdr_handler(struct ulp_rte_parser_params *params,
2429 			      struct rte_flow_item_vxlan *vxlan_spec)
2430 {
2431 	struct ulp_rte_hdr_field *field;
2432 	uint32_t size;
2433 
2434 	field = &params->enc_field[BNXT_ULP_ENC_FIELD_VXLAN_FLAGS];
2435 	size = sizeof(vxlan_spec->hdr.flags);
2436 	field = ulp_rte_parser_fld_copy(field, &vxlan_spec->hdr.flags, size);
2437 
2438 	size = sizeof(vxlan_spec->hdr.rsvd0);
2439 	field = ulp_rte_parser_fld_copy(field, &vxlan_spec->hdr.rsvd0, size);
2440 
2441 	size = sizeof(vxlan_spec->hdr.vni);
2442 	field = ulp_rte_parser_fld_copy(field, &vxlan_spec->hdr.vni, size);
2443 
2444 	size = sizeof(vxlan_spec->hdr.rsvd1);
2445 	field = ulp_rte_parser_fld_copy(field, &vxlan_spec->hdr.rsvd1, size);
2446 
2447 	ULP_BITMAP_SET(params->enc_hdr_bitmap.bits, BNXT_ULP_HDR_BIT_T_VXLAN);
2448 }
2449 
2450 /* Function to handle the parsing of RTE Flow action vxlan_encap Header. */
2451 int32_t
2452 ulp_rte_vxlan_encap_act_handler(const struct rte_flow_action *action_item,
2453 				struct ulp_rte_parser_params *params)
2454 {
2455 	const struct rte_flow_action_vxlan_encap *vxlan_encap;
2456 	const struct rte_flow_item *item;
2457 	const struct rte_flow_item_ipv4 *ipv4_spec;
2458 	const struct rte_flow_item_ipv6 *ipv6_spec;
2459 	struct rte_flow_item_vxlan vxlan_spec;
2460 	uint32_t vlan_num = 0, vlan_size = 0;
2461 	uint32_t ip_size = 0, ip_type = 0;
2462 	uint32_t vxlan_size = 0;
2463 	struct ulp_rte_act_bitmap *act = &params->act_bitmap;
2464 	struct ulp_rte_act_prop *ap = &params->act_prop;
2465 
2466 	vxlan_encap = action_item->conf;
2467 	if (!vxlan_encap) {
2468 		BNXT_DRV_DBG(ERR, "Parse Error: Vxlan_encap arg is invalid\n");
2469 		return BNXT_TF_RC_ERROR;
2470 	}
2471 
2472 	item = vxlan_encap->definition;
2473 	if (!item) {
2474 		BNXT_DRV_DBG(ERR, "Parse Error: definition arg is invalid\n");
2475 		return BNXT_TF_RC_ERROR;
2476 	}
2477 
2478 	if (!ulp_rte_item_skip_void(&item, 0))
2479 		return BNXT_TF_RC_ERROR;
2480 
2481 	/* must have ethernet header */
2482 	if (item->type != RTE_FLOW_ITEM_TYPE_ETH) {
2483 		BNXT_DRV_DBG(ERR, "Parse Error:vxlan encap does not have eth\n");
2484 		return BNXT_TF_RC_ERROR;
2485 	}
2486 
2487 	/* Parse the ethernet header */
2488 	if (item->spec)
2489 		ulp_rte_enc_eth_hdr_handler(params, item->spec);
2490 
2491 	/* Goto the next item */
2492 	if (!ulp_rte_item_skip_void(&item, 1))
2493 		return BNXT_TF_RC_ERROR;
2494 
2495 	/* May have vlan header */
2496 	if (item->type == RTE_FLOW_ITEM_TYPE_VLAN) {
2497 		vlan_num++;
2498 		if (item->spec)
2499 			ulp_rte_enc_vlan_hdr_handler(params, item->spec, 0);
2500 
2501 		if (!ulp_rte_item_skip_void(&item, 1))
2502 			return BNXT_TF_RC_ERROR;
2503 	}
2504 
2505 	/* may have two vlan headers */
2506 	if (item->type == RTE_FLOW_ITEM_TYPE_VLAN) {
2507 		vlan_num++;
2508 		if (item->spec)
2509 			ulp_rte_enc_vlan_hdr_handler(params, item->spec, 1);
2510 
2511 		if (!ulp_rte_item_skip_void(&item, 1))
2512 			return BNXT_TF_RC_ERROR;
2513 	}
2514 
2515 	/* Update the vlan count and size of more than one */
2516 	if (vlan_num) {
2517 		vlan_size = vlan_num * sizeof(struct rte_flow_item_vlan);
2518 		vlan_num = tfp_cpu_to_be_32(vlan_num);
2519 		memcpy(&ap->act_details[BNXT_ULP_ACT_PROP_IDX_ENCAP_VTAG_NUM],
2520 		       &vlan_num,
2521 		       sizeof(uint32_t));
2522 		vlan_size = tfp_cpu_to_be_32(vlan_size);
2523 		memcpy(&ap->act_details[BNXT_ULP_ACT_PROP_IDX_ENCAP_VTAG_SZ],
2524 		       &vlan_size,
2525 		       sizeof(uint32_t));
2526 	}
2527 
2528 	/* L3 must be IPv4, IPv6 */
2529 	if (item->type == RTE_FLOW_ITEM_TYPE_IPV4) {
2530 		ipv4_spec = item->spec;
2531 		ip_size = BNXT_ULP_ENCAP_IPV4_SIZE;
2532 
2533 		/* Update the ip size details */
2534 		ip_size = tfp_cpu_to_be_32(ip_size);
2535 		memcpy(&ap->act_details[BNXT_ULP_ACT_PROP_IDX_ENCAP_IP_SZ],
2536 		       &ip_size, sizeof(uint32_t));
2537 
2538 		/* update the ip type */
2539 		ip_type = rte_cpu_to_be_32(BNXT_ULP_ETH_IPV4);
2540 		memcpy(&ap->act_details[BNXT_ULP_ACT_PROP_IDX_ENCAP_L3_TYPE],
2541 		       &ip_type, sizeof(uint32_t));
2542 
2543 		/* update the computed field to notify it is ipv4 header */
2544 		ULP_COMP_FLD_IDX_WR(params, BNXT_ULP_CF_IDX_ACT_ENCAP_IPV4_FLAG,
2545 				    1);
2546 		if (ipv4_spec)
2547 			ulp_rte_enc_ipv4_hdr_handler(params, ipv4_spec);
2548 
2549 		if (!ulp_rte_item_skip_void(&item, 1))
2550 			return BNXT_TF_RC_ERROR;
2551 	} else if (item->type == RTE_FLOW_ITEM_TYPE_IPV6) {
2552 		ipv6_spec = item->spec;
2553 		ip_size = BNXT_ULP_ENCAP_IPV6_SIZE;
2554 
2555 		/* Update the ip size details */
2556 		ip_size = tfp_cpu_to_be_32(ip_size);
2557 		memcpy(&ap->act_details[BNXT_ULP_ACT_PROP_IDX_ENCAP_IP_SZ],
2558 		       &ip_size, sizeof(uint32_t));
2559 
2560 		 /* update the ip type */
2561 		ip_type = rte_cpu_to_be_32(BNXT_ULP_ETH_IPV6);
2562 		memcpy(&ap->act_details[BNXT_ULP_ACT_PROP_IDX_ENCAP_L3_TYPE],
2563 		       &ip_type, sizeof(uint32_t));
2564 
2565 		/* update the computed field to notify it is ipv6 header */
2566 		ULP_COMP_FLD_IDX_WR(params, BNXT_ULP_CF_IDX_ACT_ENCAP_IPV6_FLAG,
2567 				    1);
2568 		if (ipv6_spec)
2569 			ulp_rte_enc_ipv6_hdr_handler(params, ipv6_spec);
2570 
2571 		if (!ulp_rte_item_skip_void(&item, 1))
2572 			return BNXT_TF_RC_ERROR;
2573 	} else {
2574 		BNXT_DRV_DBG(ERR, "Parse Error: Vxlan Encap expects L3 hdr\n");
2575 		return BNXT_TF_RC_ERROR;
2576 	}
2577 
2578 	/* L4 is UDP */
2579 	if (item->type != RTE_FLOW_ITEM_TYPE_UDP) {
2580 		BNXT_DRV_DBG(ERR, "vxlan encap does not have udp\n");
2581 		return BNXT_TF_RC_ERROR;
2582 	}
2583 	if (item->spec)
2584 		ulp_rte_enc_udp_hdr_handler(params, item->spec);
2585 
2586 	if (!ulp_rte_item_skip_void(&item, 1))
2587 		return BNXT_TF_RC_ERROR;
2588 
2589 	/* Finally VXLAN */
2590 	if (item->type != RTE_FLOW_ITEM_TYPE_VXLAN) {
2591 		BNXT_DRV_DBG(ERR, "vxlan encap does not have vni\n");
2592 		return BNXT_TF_RC_ERROR;
2593 	}
2594 	vxlan_size = sizeof(struct rte_flow_item_vxlan);
2595 	/* copy the vxlan details */
2596 	memcpy(&vxlan_spec, item->spec, vxlan_size);
2597 	vxlan_spec.hdr.flags = 0x08;
2598 	vxlan_size = tfp_cpu_to_be_32(vxlan_size);
2599 	memcpy(&ap->act_details[BNXT_ULP_ACT_PROP_IDX_ENCAP_TUN_SZ],
2600 	       &vxlan_size, sizeof(uint32_t));
2601 
2602 	ulp_rte_enc_vxlan_hdr_handler(params, &vxlan_spec);
2603 
2604 	/* update the hdr_bitmap with vxlan */
2605 	ULP_BITMAP_SET(act->bits, BNXT_ULP_ACT_BIT_VXLAN_ENCAP);
2606 	return BNXT_TF_RC_SUCCESS;
2607 }
2608 
2609 /* Function to handle the parsing of RTE Flow action vxlan_encap Header */
2610 int32_t
2611 ulp_rte_vxlan_decap_act_handler(const struct rte_flow_action *action_item
2612 				__rte_unused,
2613 				struct ulp_rte_parser_params *params)
2614 {
2615 	/* update the hdr_bitmap with vxlan */
2616 	ULP_BITMAP_SET(params->act_bitmap.bits,
2617 		       BNXT_ULP_ACT_BIT_VXLAN_DECAP);
2618 	/* Update computational field with tunnel decap info */
2619 	ULP_COMP_FLD_IDX_WR(params, BNXT_ULP_CF_IDX_L3_TUN_DECAP, 1);
2620 	return BNXT_TF_RC_SUCCESS;
2621 }
2622 
2623 /* Function to handle the parsing of RTE Flow action drop Header. */
2624 int32_t
2625 ulp_rte_drop_act_handler(const struct rte_flow_action *action_item __rte_unused,
2626 			 struct ulp_rte_parser_params *params)
2627 {
2628 	/* Update the hdr_bitmap with drop */
2629 	ULP_BITMAP_SET(params->act_bitmap.bits, BNXT_ULP_ACT_BIT_DROP);
2630 	return BNXT_TF_RC_SUCCESS;
2631 }
2632 
2633 /* Function to handle the parsing of RTE Flow action count. */
2634 int32_t
2635 ulp_rte_count_act_handler(const struct rte_flow_action *action_item,
2636 			  struct ulp_rte_parser_params *params)
2637 {
2638 	const struct rte_flow_action_count *act_count;
2639 	struct ulp_rte_act_prop *act_prop = &params->act_prop;
2640 
2641 	act_count = action_item->conf;
2642 	if (act_count) {
2643 		memcpy(&act_prop->act_details[BNXT_ULP_ACT_PROP_IDX_COUNT],
2644 		       &act_count->id,
2645 		       BNXT_ULP_ACT_PROP_SZ_COUNT);
2646 	}
2647 
2648 	/* Update the hdr_bitmap with count */
2649 	ULP_BITMAP_SET(params->act_bitmap.bits, BNXT_ULP_ACT_BIT_COUNT);
2650 	return BNXT_TF_RC_SUCCESS;
2651 }
2652 
2653 static bool ulp_rte_parser_is_portb_vfrep(struct ulp_rte_parser_params *param)
2654 {
2655 	return ULP_COMP_FLD_IDX_RD(param, BNXT_ULP_CF_IDX_MP_B_IS_VFREP);
2656 }
2657 
2658 /*
2659  * Swaps info related to multi-port:
2660  * common:
2661  *    BNXT_ULP_CF_IDX_MP_B_IS_VFREP, BNXT_ULP_CF_IDX_MP_A_IS_VFREP
2662  *    BNXT_ULP_CF_IDX_MP_PORT_A, BNXT_ULP_CF_IDX_MP_PORT_B
2663  *
2664  * ingress:
2665  *    BNXT_ULP_CF_IDX_MP_VNIC_B, BNXT_ULP_CF_IDX_MP_VNIC_A
2666  *
2667  * egress:
2668  *    BNXT_ULP_CF_IDX_MP_MDATA_B, BNXT_ULP_CF_IDX_MP_MDATA_A
2669  *    BNXT_ULP_CF_IDX_MP_VPORT_B, BNXT_ULP_CF_IDX_MP_VPORT_A
2670  *
2671  * Note: This is done as OVS could give us a non-VFREP port in port B, and we
2672  * cannot use that to mirror, so we swap out the ports so that a VFREP is now
2673  * in port B instead.
2674  */
2675 static int32_t
2676 ulp_rte_parser_normalize_port_info(struct ulp_rte_parser_params *param)
2677 {
2678 	uint16_t mp_port_a, mp_port_b, mp_mdata_a, mp_mdata_b,
2679 		 mp_vport_a, mp_vport_b, mp_vnic_a, mp_vnic_b,
2680 		 mp_is_vfrep_a, mp_is_vfrep_b;
2681 
2682 	mp_is_vfrep_a = ULP_COMP_FLD_IDX_RD(param, BNXT_ULP_CF_IDX_MP_A_IS_VFREP);
2683 	mp_is_vfrep_b = ULP_COMP_FLD_IDX_RD(param, BNXT_ULP_CF_IDX_MP_B_IS_VFREP);
2684 	ULP_COMP_FLD_IDX_WR(param, BNXT_ULP_CF_IDX_MP_B_IS_VFREP, mp_is_vfrep_a);
2685 	ULP_COMP_FLD_IDX_WR(param, BNXT_ULP_CF_IDX_MP_A_IS_VFREP, mp_is_vfrep_b);
2686 
2687 	mp_port_a = ULP_COMP_FLD_IDX_RD(param, BNXT_ULP_CF_IDX_MP_PORT_A);
2688 	mp_port_b = ULP_COMP_FLD_IDX_RD(param, BNXT_ULP_CF_IDX_MP_PORT_B);
2689 	ULP_COMP_FLD_IDX_WR(param, BNXT_ULP_CF_IDX_MP_PORT_B, mp_port_a);
2690 	ULP_COMP_FLD_IDX_WR(param, BNXT_ULP_CF_IDX_MP_PORT_A, mp_port_b);
2691 
2692 	mp_vport_a = ULP_COMP_FLD_IDX_RD(param, BNXT_ULP_CF_IDX_MP_VPORT_A);
2693 	mp_vport_b = ULP_COMP_FLD_IDX_RD(param, BNXT_ULP_CF_IDX_MP_VPORT_B);
2694 	ULP_COMP_FLD_IDX_WR(param, BNXT_ULP_CF_IDX_MP_VPORT_B, mp_vport_a);
2695 	ULP_COMP_FLD_IDX_WR(param, BNXT_ULP_CF_IDX_MP_VPORT_A, mp_vport_b);
2696 
2697 	mp_vnic_a = ULP_COMP_FLD_IDX_RD(param, BNXT_ULP_CF_IDX_MP_VNIC_A);
2698 	mp_vnic_b = ULP_COMP_FLD_IDX_RD(param, BNXT_ULP_CF_IDX_MP_VNIC_B);
2699 	ULP_COMP_FLD_IDX_WR(param, BNXT_ULP_CF_IDX_MP_VNIC_B, mp_vnic_a);
2700 	ULP_COMP_FLD_IDX_WR(param, BNXT_ULP_CF_IDX_MP_VNIC_A, mp_vnic_b);
2701 
2702 	mp_mdata_a = ULP_COMP_FLD_IDX_RD(param, BNXT_ULP_CF_IDX_MP_MDATA_A);
2703 	mp_mdata_b = ULP_COMP_FLD_IDX_RD(param, BNXT_ULP_CF_IDX_MP_MDATA_B);
2704 	ULP_COMP_FLD_IDX_WR(param, BNXT_ULP_CF_IDX_MP_MDATA_B, mp_mdata_a);
2705 	ULP_COMP_FLD_IDX_WR(param, BNXT_ULP_CF_IDX_MP_MDATA_A, mp_mdata_b);
2706 
2707 	return BNXT_TF_RC_SUCCESS;
2708 }
2709 
2710 
2711 /* Function to handle the parsing of action ports. */
2712 static int32_t
2713 ulp_rte_parser_act_port_set(struct ulp_rte_parser_params *param,
2714 			    uint32_t ifindex, bool multi_port,
2715 			    enum bnxt_ulp_direction_type act_dir)
2716 {
2717 	enum bnxt_ulp_direction_type dir;
2718 	uint16_t pid_s;
2719 	uint8_t *p_mdata;
2720 	uint32_t pid, port_index;
2721 	struct ulp_rte_act_prop *act = &param->act_prop;
2722 	enum bnxt_ulp_intf_type port_type;
2723 	uint32_t vnic_type;
2724 
2725 	/* Get the direction */
2726 	/* If action implicitly specifies direction, use the specification. */
2727 	dir = (act_dir == BNXT_ULP_DIR_INVALID) ?
2728 		ULP_COMP_FLD_IDX_RD(param, BNXT_ULP_CF_IDX_DIRECTION) :
2729 		act_dir;
2730 
2731 	port_type = ULP_COMP_FLD_IDX_RD(param,
2732 					BNXT_ULP_CF_IDX_ACT_PORT_TYPE);
2733 
2734 	/* Update flag if Port A/B type is VF-REP */
2735 	ULP_COMP_FLD_IDX_WR(param, multi_port ?
2736 					BNXT_ULP_CF_IDX_MP_B_IS_VFREP :
2737 					BNXT_ULP_CF_IDX_MP_A_IS_VFREP,
2738 			    (port_type == BNXT_ULP_INTF_TYPE_VF_REP) ? 1 : 0);
2739 
2740 	/* An egress flow where the action port is not another VF endpoint
2741 	 * requires a VPORT.
2742 	 */
2743 	if (dir == BNXT_ULP_DIR_EGRESS) {
2744 		/* For egress direction, fill vport */
2745 		if (ulp_port_db_vport_get(param->ulp_ctx, ifindex, &pid_s))
2746 			return BNXT_TF_RC_ERROR;
2747 
2748 		pid = pid_s;
2749 		pid = rte_cpu_to_be_32(pid);
2750 		if (!multi_port)
2751 			memcpy(&act->act_details[BNXT_ULP_ACT_PROP_IDX_VPORT],
2752 			       &pid, BNXT_ULP_ACT_PROP_SZ_VPORT);
2753 
2754 		/* Fill metadata */
2755 		if (port_type == BNXT_ULP_INTF_TYPE_VF_REP) {
2756 			port_index  = ULP_COMP_FLD_IDX_RD(param, multi_port ?
2757 								 BNXT_ULP_CF_IDX_MP_PORT_B :
2758 								 BNXT_ULP_CF_IDX_MP_PORT_A);
2759 			if (ulp_port_db_port_meta_data_get(param->ulp_ctx,
2760 							   port_index, &p_mdata))
2761 				return BNXT_TF_RC_ERROR;
2762 			/*
2763 			 * Update appropriate port (A/B) metadata based on multi-port
2764 			 * indication
2765 			 */
2766 			ULP_COMP_FLD_IDX_WR(param,
2767 					    multi_port ?
2768 						BNXT_ULP_CF_IDX_MP_MDATA_B :
2769 						BNXT_ULP_CF_IDX_MP_MDATA_A,
2770 					    rte_cpu_to_be_16(*((uint16_t *)p_mdata)));
2771 		}
2772 		/*
2773 		 * Update appropriate port (A/B) VPORT based on multi-port
2774 		 * indication.
2775 		 */
2776 		ULP_COMP_FLD_IDX_WR(param,
2777 				    multi_port ?
2778 					BNXT_ULP_CF_IDX_MP_VPORT_B :
2779 					BNXT_ULP_CF_IDX_MP_VPORT_A,
2780 				    pid_s);
2781 
2782 		/* Setup the VF_TO_VF VNIC information */
2783 		if (!multi_port && port_type == BNXT_ULP_INTF_TYPE_VF_REP) {
2784 			if (ulp_port_db_default_vnic_get(param->ulp_ctx,
2785 							 ifindex,
2786 							 BNXT_ULP_VF_FUNC_VNIC,
2787 							 &pid_s))
2788 				return BNXT_TF_RC_ERROR;
2789 			pid = pid_s;
2790 
2791 			/* Allows use of func_opcode with VNIC */
2792 			ULP_COMP_FLD_IDX_WR(param, BNXT_ULP_CF_IDX_VNIC, pid);
2793 		}
2794 	} else {
2795 		/* For ingress direction, fill vnic */
2796 		/*
2797 		 * Action               Destination
2798 		 * ------------------------------------
2799 		 * PORT_REPRESENTOR     Driver Function
2800 		 * ------------------------------------
2801 		 * REPRESENTED_PORT     VF
2802 		 * ------------------------------------
2803 		 * PORT_ID              VF
2804 		 */
2805 		if (act_dir != BNXT_ULP_DIR_INGRESS &&
2806 		    port_type == BNXT_ULP_INTF_TYPE_VF_REP)
2807 			vnic_type = BNXT_ULP_VF_FUNC_VNIC;
2808 		else
2809 			vnic_type = BNXT_ULP_DRV_FUNC_VNIC;
2810 
2811 		if (ulp_port_db_default_vnic_get(param->ulp_ctx, ifindex,
2812 						 vnic_type, &pid_s))
2813 			return BNXT_TF_RC_ERROR;
2814 
2815 		pid = pid_s;
2816 
2817 		/* Allows use of func_opcode with VNIC */
2818 		ULP_COMP_FLD_IDX_WR(param, BNXT_ULP_CF_IDX_VNIC, pid);
2819 
2820 		pid = rte_cpu_to_be_32(pid);
2821 		if (!multi_port)
2822 			memcpy(&act->act_details[BNXT_ULP_ACT_PROP_IDX_VNIC],
2823 			       &pid, BNXT_ULP_ACT_PROP_SZ_VNIC);
2824 		/*
2825 		 * Update appropriate port (A/B) VNIC based on multi-port
2826 		 * indication.
2827 		 */
2828 		ULP_COMP_FLD_IDX_WR(param,
2829 				    multi_port ?
2830 					BNXT_ULP_CF_IDX_MP_VNIC_B :
2831 					BNXT_ULP_CF_IDX_MP_VNIC_A,
2832 				    pid_s);
2833 	}
2834 
2835 	if (multi_port && !ulp_rte_parser_is_portb_vfrep(param))
2836 		ulp_rte_parser_normalize_port_info(param);
2837 
2838 	/* Update the action port set bit */
2839 	ULP_COMP_FLD_IDX_WR(param, BNXT_ULP_CF_IDX_ACT_PORT_IS_SET, 1);
2840 	return BNXT_TF_RC_SUCCESS;
2841 }
2842 
2843 /* Function to handle the parsing of RTE Flow action PF. */
2844 int32_t
2845 ulp_rte_pf_act_handler(const struct rte_flow_action *action_item __rte_unused,
2846 		       struct ulp_rte_parser_params *params)
2847 {
2848 	uint32_t port_id;
2849 	uint32_t ifindex;
2850 	enum bnxt_ulp_intf_type intf_type;
2851 
2852 	/* Get the port id of the current device */
2853 	port_id = ULP_COMP_FLD_IDX_RD(params, BNXT_ULP_CF_IDX_INCOMING_IF);
2854 
2855 	/* Get the port db ifindex */
2856 	if (ulp_port_db_dev_port_to_ulp_index(params->ulp_ctx, port_id,
2857 					      &ifindex)) {
2858 		BNXT_DRV_DBG(ERR, "Invalid port id\n");
2859 		return BNXT_TF_RC_ERROR;
2860 	}
2861 
2862 	/* Check the port is PF port */
2863 	intf_type = ulp_port_db_port_type_get(params->ulp_ctx, ifindex);
2864 	if (intf_type != BNXT_ULP_INTF_TYPE_PF) {
2865 		BNXT_DRV_DBG(ERR, "Port is not a PF port\n");
2866 		return BNXT_TF_RC_ERROR;
2867 	}
2868 	/* Update the action properties */
2869 	ULP_COMP_FLD_IDX_WR(params, BNXT_ULP_CF_IDX_ACT_PORT_TYPE, intf_type);
2870 	return ulp_rte_parser_act_port_set(params, ifindex, false,
2871 					   BNXT_ULP_DIR_INVALID);
2872 }
2873 
2874 /* Function to handle the parsing of RTE Flow action VF. */
2875 int32_t
2876 ulp_rte_vf_act_handler(const struct rte_flow_action *action_item,
2877 		       struct ulp_rte_parser_params *params)
2878 {
2879 	const struct rte_flow_action_vf *vf_action;
2880 	enum bnxt_ulp_intf_type intf_type;
2881 	uint32_t ifindex;
2882 	struct bnxt *bp;
2883 
2884 	vf_action = action_item->conf;
2885 	if (!vf_action) {
2886 		BNXT_DRV_DBG(ERR, "ParseErr: Invalid Argument\n");
2887 		return BNXT_TF_RC_PARSE_ERR;
2888 	}
2889 
2890 	if (vf_action->original) {
2891 		BNXT_DRV_DBG(ERR, "ParseErr:VF Original not supported\n");
2892 		return BNXT_TF_RC_PARSE_ERR;
2893 	}
2894 
2895 	bp = bnxt_pmd_get_bp(params->port_id);
2896 	if (bp == NULL) {
2897 		BNXT_DRV_DBG(ERR, "Invalid bp\n");
2898 		return BNXT_TF_RC_ERROR;
2899 	}
2900 
2901 	/* vf_action->id is a logical number which in this case is an
2902 	 * offset from the first VF. So, to get the absolute VF id, the
2903 	 * offset must be added to the absolute first vf id of that port.
2904 	 */
2905 	if (ulp_port_db_dev_func_id_to_ulp_index(params->ulp_ctx,
2906 						 bp->first_vf_id +
2907 						 vf_action->id,
2908 						 &ifindex)) {
2909 		BNXT_DRV_DBG(ERR, "VF is not valid interface\n");
2910 		return BNXT_TF_RC_ERROR;
2911 	}
2912 	/* Check the port is VF port */
2913 	intf_type = ulp_port_db_port_type_get(params->ulp_ctx, ifindex);
2914 	if (intf_type != BNXT_ULP_INTF_TYPE_VF &&
2915 	    intf_type != BNXT_ULP_INTF_TYPE_TRUSTED_VF) {
2916 		BNXT_DRV_DBG(ERR, "Port is not a VF port\n");
2917 		return BNXT_TF_RC_ERROR;
2918 	}
2919 
2920 	/* Update the action properties */
2921 	ULP_COMP_FLD_IDX_WR(params, BNXT_ULP_CF_IDX_ACT_PORT_TYPE, intf_type);
2922 	return ulp_rte_parser_act_port_set(params, ifindex, false,
2923 					   BNXT_ULP_DIR_INVALID);
2924 }
2925 
2926 /* Parse actions PORT_ID, PORT_REPRESENTOR and REPRESENTED_PORT. */
2927 int32_t
2928 ulp_rte_port_act_handler(const struct rte_flow_action *act_item,
2929 			 struct ulp_rte_parser_params *param)
2930 {
2931 	uint32_t ethdev_id;
2932 	uint32_t ifindex;
2933 	const struct rte_flow_action_port_id *port_id = act_item->conf;
2934 	uint32_t num_ports;
2935 	enum bnxt_ulp_intf_type intf_type;
2936 	enum bnxt_ulp_direction_type act_dir;
2937 
2938 	if (!act_item->conf) {
2939 		BNXT_DRV_DBG(ERR,
2940 				"ParseErr: Invalid Argument\n");
2941 		return BNXT_TF_RC_PARSE_ERR;
2942 	}
2943 	switch (act_item->type) {
2944 	case RTE_FLOW_ACTION_TYPE_PORT_ID: {
2945 		const struct rte_flow_action_port_id *port_id = act_item->conf;
2946 
2947 		if (port_id->original) {
2948 			BNXT_DRV_DBG(ERR,
2949 				    "ParseErr:Portid Original not supported\n");
2950 			return BNXT_TF_RC_PARSE_ERR;
2951 		}
2952 		ethdev_id = port_id->id;
2953 		act_dir = BNXT_ULP_DIR_INVALID;
2954 		break;
2955 	}
2956 	case RTE_FLOW_ACTION_TYPE_PORT_REPRESENTOR: {
2957 		const struct rte_flow_action_ethdev *ethdev = act_item->conf;
2958 
2959 		ethdev_id = ethdev->port_id;
2960 		act_dir = BNXT_ULP_DIR_INGRESS;
2961 		break;
2962 	}
2963 	case RTE_FLOW_ACTION_TYPE_REPRESENTED_PORT: {
2964 		const struct rte_flow_action_ethdev *ethdev = act_item->conf;
2965 
2966 		ethdev_id = ethdev->port_id;
2967 		act_dir = BNXT_ULP_DIR_EGRESS;
2968 		break;
2969 	}
2970 	default:
2971 		BNXT_DRV_DBG(ERR, "Unknown port action\n");
2972 		return BNXT_TF_RC_ERROR;
2973 	}
2974 
2975 	num_ports  = ULP_COMP_FLD_IDX_RD(param, BNXT_ULP_CF_IDX_MP_NPORTS);
2976 
2977 	if (num_ports) {
2978 		ULP_COMP_FLD_IDX_WR(param, BNXT_ULP_CF_IDX_MP_PORT_B,
2979 				    port_id->id);
2980 		ULP_BITMAP_SET(param->act_bitmap.bits,
2981 			       BNXT_ULP_ACT_BIT_MULTIPLE_PORT);
2982 	} else {
2983 		ULP_COMP_FLD_IDX_WR(param, BNXT_ULP_CF_IDX_MP_PORT_A,
2984 				    port_id->id);
2985 	}
2986 
2987 	/* Get the port db ifindex */
2988 	if (ulp_port_db_dev_port_to_ulp_index(param->ulp_ctx, ethdev_id,
2989 					      &ifindex)) {
2990 		BNXT_DRV_DBG(ERR, "Invalid port id\n");
2991 		return BNXT_TF_RC_ERROR;
2992 	}
2993 
2994 	/* Get the intf type */
2995 	intf_type = ulp_port_db_port_type_get(param->ulp_ctx, ifindex);
2996 	if (!intf_type) {
2997 		BNXT_DRV_DBG(ERR, "Invalid port type\n");
2998 		return BNXT_TF_RC_ERROR;
2999 	}
3000 
3001 	/* Set the action port */
3002 	ULP_COMP_FLD_IDX_WR(param, BNXT_ULP_CF_IDX_ACT_PORT_TYPE, intf_type);
3003 	ULP_COMP_FLD_IDX_WR(param, BNXT_ULP_CF_IDX_DEV_ACT_PORT_ID,
3004 			    ethdev_id);
3005 
3006 	ULP_COMP_FLD_IDX_WR(param, BNXT_ULP_CF_IDX_MP_NPORTS, ++num_ports);
3007 	return ulp_rte_parser_act_port_set(param, ifindex,
3008 					   ULP_BITMAP_ISSET(param->act_bitmap.bits,
3009 							    BNXT_ULP_ACT_BIT_MULTIPLE_PORT),
3010 					   act_dir);
3011 }
3012 
3013 /* Function to handle the parsing of RTE Flow action pop vlan. */
3014 int32_t
3015 ulp_rte_of_pop_vlan_act_handler(const struct rte_flow_action *a __rte_unused,
3016 				struct ulp_rte_parser_params *params)
3017 {
3018 	/* Update the act_bitmap with pop */
3019 	ULP_BITMAP_SET(params->act_bitmap.bits, BNXT_ULP_ACT_BIT_POP_VLAN);
3020 	return BNXT_TF_RC_SUCCESS;
3021 }
3022 
3023 /* Function to handle the parsing of RTE Flow action push vlan. */
3024 int32_t
3025 ulp_rte_of_push_vlan_act_handler(const struct rte_flow_action *action_item,
3026 				 struct ulp_rte_parser_params *params)
3027 {
3028 	const struct rte_flow_action_of_push_vlan *push_vlan;
3029 	uint16_t ethertype;
3030 	struct ulp_rte_act_prop *act = &params->act_prop;
3031 
3032 	push_vlan = action_item->conf;
3033 	if (push_vlan) {
3034 		ethertype = push_vlan->ethertype;
3035 		if (tfp_cpu_to_be_16(ethertype) != RTE_ETHER_TYPE_VLAN) {
3036 			BNXT_DRV_DBG(ERR,
3037 				    "Parse Err: Ethertype not supported\n");
3038 			return BNXT_TF_RC_PARSE_ERR;
3039 		}
3040 		memcpy(&act->act_details[BNXT_ULP_ACT_PROP_IDX_PUSH_VLAN],
3041 		       &ethertype, BNXT_ULP_ACT_PROP_SZ_PUSH_VLAN);
3042 		/* Update the hdr_bitmap with push vlan */
3043 		ULP_BITMAP_SET(params->act_bitmap.bits,
3044 			       BNXT_ULP_ACT_BIT_PUSH_VLAN);
3045 		return BNXT_TF_RC_SUCCESS;
3046 	}
3047 	BNXT_DRV_DBG(ERR, "Parse Error: Push vlan arg is invalid\n");
3048 	return BNXT_TF_RC_ERROR;
3049 }
3050 
3051 /* Function to handle the parsing of RTE Flow action set vlan id. */
3052 int32_t
3053 ulp_rte_of_set_vlan_vid_act_handler(const struct rte_flow_action *action_item,
3054 				    struct ulp_rte_parser_params *params)
3055 {
3056 	const struct rte_flow_action_of_set_vlan_vid *vlan_vid;
3057 	uint32_t vid;
3058 	struct ulp_rte_act_prop *act = &params->act_prop;
3059 
3060 	vlan_vid = action_item->conf;
3061 	if (vlan_vid && vlan_vid->vlan_vid) {
3062 		vid = vlan_vid->vlan_vid;
3063 		memcpy(&act->act_details[BNXT_ULP_ACT_PROP_IDX_SET_VLAN_VID],
3064 		       &vid, BNXT_ULP_ACT_PROP_SZ_SET_VLAN_VID);
3065 		/* Update the hdr_bitmap with vlan vid */
3066 		ULP_BITMAP_SET(params->act_bitmap.bits,
3067 			       BNXT_ULP_ACT_BIT_SET_VLAN_VID);
3068 		return BNXT_TF_RC_SUCCESS;
3069 	}
3070 	BNXT_DRV_DBG(ERR, "Parse Error: Vlan vid arg is invalid\n");
3071 	return BNXT_TF_RC_ERROR;
3072 }
3073 
3074 /* Function to handle the parsing of RTE Flow action set vlan pcp. */
3075 int32_t
3076 ulp_rte_of_set_vlan_pcp_act_handler(const struct rte_flow_action *action_item,
3077 				    struct ulp_rte_parser_params *params)
3078 {
3079 	const struct rte_flow_action_of_set_vlan_pcp *vlan_pcp;
3080 	uint8_t pcp;
3081 	struct ulp_rte_act_prop *act = &params->act_prop;
3082 
3083 	vlan_pcp = action_item->conf;
3084 	if (vlan_pcp) {
3085 		pcp = vlan_pcp->vlan_pcp;
3086 		memcpy(&act->act_details[BNXT_ULP_ACT_PROP_IDX_SET_VLAN_PCP],
3087 		       &pcp, BNXT_ULP_ACT_PROP_SZ_SET_VLAN_PCP);
3088 		/* Update the hdr_bitmap with vlan vid */
3089 		ULP_BITMAP_SET(params->act_bitmap.bits,
3090 			       BNXT_ULP_ACT_BIT_SET_VLAN_PCP);
3091 		return BNXT_TF_RC_SUCCESS;
3092 	}
3093 	BNXT_DRV_DBG(ERR, "Parse Error: Vlan pcp arg is invalid\n");
3094 	return BNXT_TF_RC_ERROR;
3095 }
3096 
3097 /* Function to handle the parsing of RTE Flow action set ipv4 src.*/
3098 int32_t
3099 ulp_rte_set_ipv4_src_act_handler(const struct rte_flow_action *action_item,
3100 				 struct ulp_rte_parser_params *params)
3101 {
3102 	const struct rte_flow_action_set_ipv4 *set_ipv4;
3103 	struct ulp_rte_act_prop *act = &params->act_prop;
3104 
3105 	set_ipv4 = action_item->conf;
3106 	if (set_ipv4) {
3107 		memcpy(&act->act_details[BNXT_ULP_ACT_PROP_IDX_SET_IPV4_SRC],
3108 		       &set_ipv4->ipv4_addr, BNXT_ULP_ACT_PROP_SZ_SET_IPV4_SRC);
3109 		/* Update the hdr_bitmap with set ipv4 src */
3110 		ULP_BITMAP_SET(params->act_bitmap.bits,
3111 			       BNXT_ULP_ACT_BIT_SET_IPV4_SRC);
3112 		return BNXT_TF_RC_SUCCESS;
3113 	}
3114 	BNXT_DRV_DBG(ERR, "Parse Error: set ipv4 src arg is invalid\n");
3115 	return BNXT_TF_RC_ERROR;
3116 }
3117 
3118 /* Function to handle the parsing of RTE Flow action set ipv4 dst.*/
3119 int32_t
3120 ulp_rte_set_ipv4_dst_act_handler(const struct rte_flow_action *action_item,
3121 				 struct ulp_rte_parser_params *params)
3122 {
3123 	const struct rte_flow_action_set_ipv4 *set_ipv4;
3124 	struct ulp_rte_act_prop *act = &params->act_prop;
3125 
3126 	set_ipv4 = action_item->conf;
3127 	if (set_ipv4) {
3128 		memcpy(&act->act_details[BNXT_ULP_ACT_PROP_IDX_SET_IPV4_DST],
3129 		       &set_ipv4->ipv4_addr, BNXT_ULP_ACT_PROP_SZ_SET_IPV4_DST);
3130 		/* Update the hdr_bitmap with set ipv4 dst */
3131 		ULP_BITMAP_SET(params->act_bitmap.bits,
3132 			       BNXT_ULP_ACT_BIT_SET_IPV4_DST);
3133 		return BNXT_TF_RC_SUCCESS;
3134 	}
3135 	BNXT_DRV_DBG(ERR, "Parse Error: set ipv4 dst arg is invalid\n");
3136 	return BNXT_TF_RC_ERROR;
3137 }
3138 
3139 /* Function to handle the parsing of RTE Flow action set ipv6 src.*/
3140 int32_t
3141 ulp_rte_set_ipv6_src_act_handler(const struct rte_flow_action *action_item,
3142 				 struct ulp_rte_parser_params *params)
3143 {
3144 	const struct rte_flow_action_set_ipv6 *set_ipv6;
3145 	struct ulp_rte_act_prop *act = &params->act_prop;
3146 
3147 	set_ipv6 = action_item->conf;
3148 	if (set_ipv6) {
3149 		memcpy(&act->act_details[BNXT_ULP_ACT_PROP_IDX_SET_IPV6_SRC],
3150 		       &set_ipv6->ipv6_addr, BNXT_ULP_ACT_PROP_SZ_SET_IPV6_SRC);
3151 		/* Update the hdr_bitmap with set ipv4 src */
3152 		ULP_BITMAP_SET(params->act_bitmap.bits,
3153 			       BNXT_ULP_ACT_BIT_SET_IPV6_SRC);
3154 		return BNXT_TF_RC_SUCCESS;
3155 	}
3156 	BNXT_DRV_DBG(ERR, "Parse Error: set ipv6 src arg is invalid\n");
3157 	return BNXT_TF_RC_ERROR;
3158 }
3159 
3160 /* Function to handle the parsing of RTE Flow action set ipv6 dst.*/
3161 int32_t
3162 ulp_rte_set_ipv6_dst_act_handler(const struct rte_flow_action *action_item,
3163 				 struct ulp_rte_parser_params *params)
3164 {
3165 	const struct rte_flow_action_set_ipv6 *set_ipv6;
3166 	struct ulp_rte_act_prop *act = &params->act_prop;
3167 
3168 	set_ipv6 = action_item->conf;
3169 	if (set_ipv6) {
3170 		memcpy(&act->act_details[BNXT_ULP_ACT_PROP_IDX_SET_IPV6_DST],
3171 		       &set_ipv6->ipv6_addr, BNXT_ULP_ACT_PROP_SZ_SET_IPV6_DST);
3172 		/* Update the hdr_bitmap with set ipv6 dst */
3173 		ULP_BITMAP_SET(params->act_bitmap.bits,
3174 			       BNXT_ULP_ACT_BIT_SET_IPV6_DST);
3175 		return BNXT_TF_RC_SUCCESS;
3176 	}
3177 	BNXT_DRV_DBG(ERR, "Parse Error: set ipv6 dst arg is invalid\n");
3178 	return BNXT_TF_RC_ERROR;
3179 }
3180 
3181 /* Function to handle the parsing of RTE Flow action set tp src.*/
3182 int32_t
3183 ulp_rte_set_tp_src_act_handler(const struct rte_flow_action *action_item,
3184 			       struct ulp_rte_parser_params *params)
3185 {
3186 	const struct rte_flow_action_set_tp *set_tp;
3187 	struct ulp_rte_act_prop *act = &params->act_prop;
3188 
3189 	set_tp = action_item->conf;
3190 	if (set_tp) {
3191 		memcpy(&act->act_details[BNXT_ULP_ACT_PROP_IDX_SET_TP_SRC],
3192 		       &set_tp->port, BNXT_ULP_ACT_PROP_SZ_SET_TP_SRC);
3193 		/* Update the hdr_bitmap with set tp src */
3194 		ULP_BITMAP_SET(params->act_bitmap.bits,
3195 			       BNXT_ULP_ACT_BIT_SET_TP_SRC);
3196 		return BNXT_TF_RC_SUCCESS;
3197 	}
3198 
3199 	BNXT_DRV_DBG(ERR, "Parse Error: set tp src arg is invalid\n");
3200 	return BNXT_TF_RC_ERROR;
3201 }
3202 
3203 /* Function to handle the parsing of RTE Flow action set tp dst.*/
3204 int32_t
3205 ulp_rte_set_tp_dst_act_handler(const struct rte_flow_action *action_item,
3206 			       struct ulp_rte_parser_params *params)
3207 {
3208 	const struct rte_flow_action_set_tp *set_tp;
3209 	struct ulp_rte_act_prop *act = &params->act_prop;
3210 
3211 	set_tp = action_item->conf;
3212 	if (set_tp) {
3213 		memcpy(&act->act_details[BNXT_ULP_ACT_PROP_IDX_SET_TP_DST],
3214 		       &set_tp->port, BNXT_ULP_ACT_PROP_SZ_SET_TP_DST);
3215 		/* Update the hdr_bitmap with set tp dst */
3216 		ULP_BITMAP_SET(params->act_bitmap.bits,
3217 			       BNXT_ULP_ACT_BIT_SET_TP_DST);
3218 		return BNXT_TF_RC_SUCCESS;
3219 	}
3220 
3221 	BNXT_DRV_DBG(ERR, "Parse Error: set tp src arg is invalid\n");
3222 	return BNXT_TF_RC_ERROR;
3223 }
3224 
3225 /* Function to handle the parsing of RTE Flow action dec ttl.*/
3226 int32_t
3227 ulp_rte_dec_ttl_act_handler(const struct rte_flow_action *act __rte_unused,
3228 			    struct ulp_rte_parser_params *params)
3229 {
3230 	/* Update the act_bitmap with dec ttl */
3231 	ULP_BITMAP_SET(params->act_bitmap.bits, BNXT_ULP_ACT_BIT_DEC_TTL);
3232 	return BNXT_TF_RC_SUCCESS;
3233 }
3234 
3235 /* Function to handle the parsing of RTE Flow action set ttl.*/
3236 int32_t
3237 ulp_rte_set_ttl_act_handler(const struct rte_flow_action *action_item,
3238 			    struct ulp_rte_parser_params *params)
3239 {
3240 	const struct rte_flow_action_set_ttl *set_ttl;
3241 	struct ulp_rte_act_prop *act = &params->act_prop;
3242 
3243 	set_ttl = action_item->conf;
3244 	if (set_ttl) {
3245 		memcpy(&act->act_details[BNXT_ULP_ACT_PROP_IDX_SET_TTL],
3246 		       &set_ttl->ttl_value, BNXT_ULP_ACT_PROP_SZ_SET_TTL);
3247 		/* Update the act_bitmap with dec ttl */
3248 		/* Note: NIC HW not support the set_ttl action, here using dec_ttl to simulate
3249 		 * the set_ttl action. And ensure the ttl field must be one more than the value
3250 		 * of action set_ttl.
3251 		 */
3252 		if (ULP_COMP_FLD_IDX_RD(params, BNXT_ULP_CF_IDX_O_L3_TTL) ==
3253 		    (uint32_t)(set_ttl->ttl_value + 1)) {
3254 			ULP_BITMAP_SET(params->act_bitmap.bits, BNXT_ULP_ACT_BIT_DEC_TTL);
3255 			return BNXT_TF_RC_SUCCESS;
3256 		}
3257 		BNXT_DRV_DBG(ERR, "Parse Error: set_ttl value not match with flow ttl field.\n");
3258 		return BNXT_TF_RC_ERROR;
3259 	}
3260 
3261 	BNXT_DRV_DBG(ERR, "Parse Error: set ttl arg is invalid.\n");
3262 	return BNXT_TF_RC_ERROR;
3263 }
3264 
3265 /* Function to handle the parsing of RTE Flow action JUMP */
3266 int32_t
3267 ulp_rte_jump_act_handler(const struct rte_flow_action *action_item,
3268 			 struct ulp_rte_parser_params *params)
3269 {
3270 	const struct rte_flow_action_jump *jump_act;
3271 	struct ulp_rte_act_prop *act = &params->act_prop;
3272 	uint32_t group_id;
3273 
3274 	jump_act = action_item->conf;
3275 	if (jump_act) {
3276 		group_id = rte_cpu_to_be_32(jump_act->group);
3277 		memcpy(&act->act_details[BNXT_ULP_ACT_PROP_IDX_JUMP],
3278 		       &group_id, BNXT_ULP_ACT_PROP_SZ_JUMP);
3279 		ULP_BITMAP_SET(params->act_bitmap.bits, BNXT_ULP_ACT_BIT_JUMP);
3280 	}
3281 	return BNXT_TF_RC_SUCCESS;
3282 }
3283 
3284 int32_t
3285 ulp_rte_sample_act_handler(const struct rte_flow_action *action_item,
3286 			   struct ulp_rte_parser_params *params)
3287 {
3288 	const struct rte_flow_action_sample *sample;
3289 	int ret;
3290 
3291 	sample = action_item->conf;
3292 
3293 	/* if SAMPLE bit is set it means this sample action is nested within the
3294 	 * actions of another sample action; this is not allowed
3295 	 */
3296 	if (ULP_BITMAP_ISSET(params->act_bitmap.bits,
3297 			     BNXT_ULP_ACT_BIT_SAMPLE))
3298 		return BNXT_TF_RC_ERROR;
3299 
3300 	/* a sample action is only allowed as a shared action */
3301 	if (!ULP_BITMAP_ISSET(params->act_bitmap.bits,
3302 			      BNXT_ULP_ACT_BIT_SHARED))
3303 		return BNXT_TF_RC_ERROR;
3304 
3305 	/* only a ratio of 1 i.e. 100% is supported */
3306 	if (sample->ratio != 1)
3307 		return BNXT_TF_RC_ERROR;
3308 
3309 	if (!sample->actions)
3310 		return BNXT_TF_RC_ERROR;
3311 
3312 	/* parse the nested actions for a sample action */
3313 	ret = bnxt_ulp_rte_parser_act_parse(sample->actions, params);
3314 	if (ret == BNXT_TF_RC_SUCCESS)
3315 		/* Update the act_bitmap with sample */
3316 		ULP_BITMAP_SET(params->act_bitmap.bits,
3317 			       BNXT_ULP_ACT_BIT_SAMPLE);
3318 
3319 	return ret;
3320 }
3321 
3322 int32_t
3323 ulp_rte_action_hdlr_handler(const struct rte_flow_action *action_item,
3324 			   struct ulp_rte_parser_params *params)
3325 {
3326 	const struct rte_flow_action_handle *handle;
3327 	struct bnxt_ulp_shared_act_info *act_info;
3328 	uint64_t action_bitmask;
3329 	uint32_t shared_action_type;
3330 	struct ulp_rte_act_prop *act = &params->act_prop;
3331 	uint64_t tmp64;
3332 	enum bnxt_ulp_direction_type dir, handle_dir;
3333 	uint32_t act_info_entries = 0;
3334 	int32_t ret;
3335 
3336 	handle = action_item->conf;
3337 
3338 	/* Have to use the computed direction since the params->dir_attr
3339 	 * can be different (transfer, ingress, egress)
3340 	 */
3341 	dir = ULP_COMP_FLD_IDX_RD(params, BNXT_ULP_CF_IDX_DIRECTION);
3342 
3343 	/* direction of shared action must match direction of flow */
3344 	ret = bnxt_get_action_handle_direction(handle, &handle_dir);
3345 	if (unlikely(ret || dir != handle_dir)) {
3346 		BNXT_DRV_DBG(ERR, "Invalid shared handle or direction\n");
3347 		return BNXT_TF_RC_ERROR;
3348 	}
3349 
3350 	if (unlikely(bnxt_get_action_handle_type(handle, &shared_action_type))) {
3351 		BNXT_DRV_DBG(ERR, "Invalid shared handle\n");
3352 		return BNXT_TF_RC_ERROR;
3353 	}
3354 
3355 	act_info = bnxt_ulp_shared_act_info_get(&act_info_entries);
3356 	if (unlikely(shared_action_type >= act_info_entries || !act_info)) {
3357 		BNXT_DRV_DBG(ERR, "Invalid shared handle\n");
3358 		return BNXT_TF_RC_ERROR;
3359 	}
3360 
3361 	action_bitmask = act_info[shared_action_type].act_bitmask;
3362 
3363 	/* shared actions of the same type cannot be repeated */
3364 	if (unlikely(params->act_bitmap.bits & action_bitmask)) {
3365 		BNXT_DRV_DBG(ERR, "indirect actions cannot be repeated\n");
3366 		return BNXT_TF_RC_ERROR;
3367 	}
3368 
3369 	tmp64 = tfp_cpu_to_be_64((uint64_t)bnxt_get_action_handle_index(handle));
3370 
3371 	memcpy(&act->act_details[BNXT_ULP_ACT_PROP_IDX_SHARED_HANDLE],
3372 	       &tmp64, BNXT_ULP_ACT_PROP_SZ_SHARED_HANDLE);
3373 
3374 	ULP_BITMAP_SET(params->act_bitmap.bits, action_bitmask);
3375 
3376 	return BNXT_TF_RC_SUCCESS;
3377 }
3378 
3379 /* Function to handle the parsing of bnxt vendor Flow action vxlan Header. */
3380 int32_t
3381 ulp_vendor_vxlan_decap_act_handler(const struct rte_flow_action *action_item,
3382 				   struct ulp_rte_parser_params *params)
3383 {
3384 	/* Set the F1 flow header bit */
3385 	ULP_BITMAP_SET(params->hdr_bitmap.bits, BNXT_ULP_HDR_BIT_F1);
3386 	return ulp_rte_vxlan_decap_act_handler(action_item, params);
3387 }
3388 
3389 /* Function to handle the parsing of bnxt vendor Flow item vxlan Header. */
3390 int32_t
3391 ulp_rte_vendor_vxlan_decap_hdr_handler(const struct rte_flow_item *item,
3392 				       struct ulp_rte_parser_params *params)
3393 {
3394 	RTE_SET_USED(item);
3395 	/* Set the F2 flow header bit */
3396 	ULP_BITMAP_SET(params->hdr_bitmap.bits, BNXT_ULP_HDR_BIT_F2);
3397 	return ulp_rte_vxlan_decap_act_handler(NULL, params);
3398 }
3399 
3400 /* Function to handle the parsing of RTE Flow action queue. */
3401 int32_t
3402 ulp_rte_queue_act_handler(const struct rte_flow_action *action_item,
3403 			  struct ulp_rte_parser_params *param)
3404 {
3405 	const struct rte_flow_action_queue *q_info;
3406 	struct ulp_rte_act_prop *ap = &param->act_prop;
3407 
3408 	if (action_item == NULL || action_item->conf == NULL) {
3409 		BNXT_DRV_DBG(ERR, "Parse Err: invalid queue configuration\n");
3410 		return BNXT_TF_RC_ERROR;
3411 	}
3412 
3413 	q_info = action_item->conf;
3414 	/* Copy the queue into the specific action properties */
3415 	memcpy(&ap->act_details[BNXT_ULP_ACT_PROP_IDX_QUEUE_INDEX],
3416 	       &q_info->index, BNXT_ULP_ACT_PROP_SZ_QUEUE_INDEX);
3417 
3418 	/* set the queue action header bit */
3419 	ULP_BITMAP_SET(param->act_bitmap.bits, BNXT_ULP_ACT_BIT_QUEUE);
3420 
3421 	return BNXT_TF_RC_SUCCESS;
3422 }
3423 
3424 /* Function to handle the parsing of RTE Flow action meter. */
3425 int32_t
3426 ulp_rte_meter_act_handler(const struct rte_flow_action *action_item,
3427 			  struct ulp_rte_parser_params *params)
3428 {
3429 	const struct rte_flow_action_meter *meter;
3430 	struct ulp_rte_act_prop *act_prop = &params->act_prop;
3431 	uint32_t tmp_meter_id;
3432 
3433 	if (unlikely(action_item == NULL || action_item->conf == NULL)) {
3434 		BNXT_DRV_DBG(ERR, "Parse Err: invalid meter configuration\n");
3435 		return BNXT_TF_RC_ERROR;
3436 	}
3437 
3438 	meter = action_item->conf;
3439 	/* validate the mtr_id and update the reference counter */
3440 	tmp_meter_id = tfp_cpu_to_be_32(meter->mtr_id);
3441 	memcpy(&act_prop->act_details[BNXT_ULP_ACT_PROP_IDX_METER],
3442 	       &tmp_meter_id,
3443 	       BNXT_ULP_ACT_PROP_SZ_METER);
3444 
3445 	/* set the meter action header bit */
3446 	ULP_BITMAP_SET(params->act_bitmap.bits, BNXT_ULP_ACT_BIT_METER);
3447 
3448 	return BNXT_TF_RC_SUCCESS;
3449 }
3450 
3451 /* Function to handle the parsing of RTE Flow action set mac src.*/
3452 int32_t
3453 ulp_rte_set_mac_src_act_handler(const struct rte_flow_action *action_item,
3454 				struct ulp_rte_parser_params *params)
3455 {
3456 	const struct rte_flow_action_set_mac *set_mac;
3457 	struct ulp_rte_act_prop *act = &params->act_prop;
3458 
3459 	set_mac = action_item->conf;
3460 	if (likely(set_mac)) {
3461 		memcpy(&act->act_details[BNXT_ULP_ACT_PROP_IDX_SET_MAC_SRC],
3462 		       set_mac->mac_addr, BNXT_ULP_ACT_PROP_SZ_SET_MAC_SRC);
3463 		/* Update the hdr_bitmap with set mac src */
3464 		ULP_BITMAP_SET(params->act_bitmap.bits,
3465 			       BNXT_ULP_ACT_BIT_SET_MAC_SRC);
3466 		return BNXT_TF_RC_SUCCESS;
3467 	}
3468 	BNXT_DRV_DBG(ERR, "Parse Error: set mac src arg is invalid\n");
3469 	return BNXT_TF_RC_ERROR;
3470 }
3471 
3472 /* Function to handle the parsing of RTE Flow action set mac dst.*/
3473 int32_t
3474 ulp_rte_set_mac_dst_act_handler(const struct rte_flow_action *action_item,
3475 				struct ulp_rte_parser_params *params)
3476 {
3477 	const struct rte_flow_action_set_mac *set_mac;
3478 	struct ulp_rte_act_prop *act = &params->act_prop;
3479 
3480 	set_mac = action_item->conf;
3481 	if (likely(set_mac)) {
3482 		memcpy(&act->act_details[BNXT_ULP_ACT_PROP_IDX_SET_MAC_DST],
3483 		       set_mac->mac_addr, BNXT_ULP_ACT_PROP_SZ_SET_MAC_DST);
3484 		/* Update the hdr_bitmap with set ipv4 dst */
3485 		ULP_BITMAP_SET(params->act_bitmap.bits,
3486 			       BNXT_ULP_ACT_BIT_SET_MAC_DST);
3487 		return BNXT_TF_RC_SUCCESS;
3488 	}
3489 	BNXT_DRV_DBG(ERR, "Parse Error: set mac dst arg is invalid\n");
3490 	return BNXT_TF_RC_ERROR;
3491 }
3492