xref: /dpdk/drivers/net/bnxt/tf_ulp/ulp_rte_parser.c (revision 987f2ec9a0cfcb835cb62fe4c4abde6a584735a8)
1 /* SPDX-License-Identifier: BSD-3-Clause
2  * Copyright(c) 2014-2023 Broadcom
3  * All rights reserved.
4  */
5 
6 #include "bnxt.h"
7 #include "ulp_template_db_enum.h"
8 #include "ulp_template_struct.h"
9 #include "bnxt_ulp.h"
10 #include "bnxt_tf_common.h"
11 #include "bnxt_tf_pmd_shim.h"
12 #include "ulp_rte_parser.h"
13 #include "ulp_matcher.h"
14 #include "ulp_utils.h"
15 #include "tfp.h"
16 #include "ulp_port_db.h"
17 #include "ulp_flow_db.h"
18 #include "ulp_mapper.h"
19 #include "ulp_tun.h"
20 #include "ulp_template_db_tbl.h"
21 
22 /* Local defines for the parsing functions */
23 #define ULP_VLAN_PRIORITY_SHIFT		13 /* First 3 bits */
24 #define ULP_VLAN_PRIORITY_MASK		0x700
25 #define ULP_VLAN_TAG_MASK		0xFFF /* Last 12 bits*/
26 #define ULP_UDP_PORT_VXLAN		4789
27 #define ULP_UDP_PORT_VXLAN_MASK		0xFFFF
28 #define ULP_UDP_PORT_VXLAN_GPE		4790
29 #define ULP_UDP_PORT_VXLAN_GPE_MASK	0xFFFF
30 
31 /* Utility function to skip the void items. */
32 static inline int32_t
33 ulp_rte_item_skip_void(const struct rte_flow_item **item, uint32_t increment)
34 {
35 	if (!*item)
36 		return 0;
37 	if (increment)
38 		(*item)++;
39 	while ((*item) && (*item)->type == RTE_FLOW_ITEM_TYPE_VOID)
40 		(*item)++;
41 	if (*item)
42 		return 1;
43 	return 0;
44 }
45 
46 /* Utility function to copy field spec items */
47 static struct ulp_rte_hdr_field *
48 ulp_rte_parser_fld_copy(struct ulp_rte_hdr_field *field,
49 			const void *buffer,
50 			uint32_t size)
51 {
52 	field->size = size;
53 	memcpy(field->spec, buffer, field->size);
54 	field++;
55 	return field;
56 }
57 
58 /* Utility function to update the field_bitmap */
59 static void
60 ulp_rte_parser_field_bitmap_update(struct ulp_rte_parser_params *params,
61 				   uint32_t idx,
62 				   enum bnxt_ulp_prsr_action prsr_act)
63 {
64 	struct ulp_rte_hdr_field *field;
65 
66 	field = &params->hdr_field[idx];
67 	if (ulp_bitmap_notzero(field->mask, field->size)) {
68 		ULP_INDEX_BITMAP_SET(params->fld_bitmap.bits, idx);
69 		if (!(prsr_act & ULP_PRSR_ACT_MATCH_IGNORE))
70 			ULP_INDEX_BITMAP_SET(params->fld_s_bitmap.bits, idx);
71 		/* Not exact match */
72 		if (!ulp_bitmap_is_ones(field->mask, field->size))
73 			ULP_COMP_FLD_IDX_WR(params,
74 					    BNXT_ULP_CF_IDX_WC_MATCH, 1);
75 	} else {
76 		ULP_INDEX_BITMAP_RESET(params->fld_bitmap.bits, idx);
77 	}
78 }
79 
80 #define ulp_deference_struct(x, y) ((x) ? &((x)->y) : NULL)
81 /* Utility function to copy field spec and masks items */
82 static void
83 ulp_rte_prsr_fld_mask(struct ulp_rte_parser_params *params,
84 		      uint32_t *idx,
85 		      uint32_t size,
86 		      const void *spec_buff,
87 		      const void *mask_buff,
88 		      enum bnxt_ulp_prsr_action prsr_act)
89 {
90 	struct ulp_rte_hdr_field *field = &params->hdr_field[*idx];
91 
92 	/* update the field size */
93 	field->size = size;
94 
95 	/* copy the mask specifications only if mask is not null */
96 	if (!(prsr_act & ULP_PRSR_ACT_MASK_IGNORE) && mask_buff) {
97 		memcpy(field->mask, mask_buff, size);
98 		ulp_rte_parser_field_bitmap_update(params, *idx, prsr_act);
99 	}
100 
101 	/* copy the protocol specifications only if mask is not null*/
102 	if (spec_buff && mask_buff && ulp_bitmap_notzero(mask_buff, size))
103 		memcpy(field->spec, spec_buff, size);
104 
105 	/* Increment the index */
106 	*idx = *idx + 1;
107 }
108 
109 /* Utility function to copy field spec and masks items */
110 static int32_t
111 ulp_rte_prsr_fld_size_validate(struct ulp_rte_parser_params *params,
112 			       uint32_t *idx,
113 			       uint32_t size)
114 {
115 	if (params->field_idx + size >= BNXT_ULP_PROTO_HDR_MAX) {
116 		BNXT_TF_DBG(ERR, "OOB for field processing %u\n", *idx);
117 		return -EINVAL;
118 	}
119 	*idx = params->field_idx;
120 	params->field_idx += size;
121 	return 0;
122 }
123 
124 /*
125  * Function to handle the parsing of RTE Flows and placing
126  * the RTE flow items into the ulp structures.
127  */
128 int32_t
129 bnxt_ulp_rte_parser_hdr_parse(const struct rte_flow_item pattern[],
130 			      struct ulp_rte_parser_params *params)
131 {
132 	const struct rte_flow_item *item = pattern;
133 	struct bnxt_ulp_rte_hdr_info *hdr_info;
134 
135 	params->field_idx = BNXT_ULP_PROTO_HDR_SVIF_NUM;
136 
137 	/* Parse all the items in the pattern */
138 	while (item && item->type != RTE_FLOW_ITEM_TYPE_END) {
139 		if (item->type >= (typeof(item->type))
140 		    BNXT_RTE_FLOW_ITEM_TYPE_END) {
141 			if (item->type >=
142 			    (typeof(item->type))BNXT_RTE_FLOW_ITEM_TYPE_LAST)
143 				goto hdr_parser_error;
144 			/* get the header information */
145 			hdr_info = &ulp_vendor_hdr_info[item->type -
146 				BNXT_RTE_FLOW_ITEM_TYPE_END];
147 		} else {
148 			if (item->type > RTE_FLOW_ITEM_TYPE_ECPRI)
149 				goto hdr_parser_error;
150 			hdr_info = &ulp_hdr_info[item->type];
151 		}
152 		if (hdr_info->hdr_type == BNXT_ULP_HDR_TYPE_NOT_SUPPORTED) {
153 			goto hdr_parser_error;
154 		} else if (hdr_info->hdr_type == BNXT_ULP_HDR_TYPE_SUPPORTED) {
155 			/* call the registered callback handler */
156 			if (hdr_info->proto_hdr_func) {
157 				if (hdr_info->proto_hdr_func(item, params) !=
158 				    BNXT_TF_RC_SUCCESS) {
159 					return BNXT_TF_RC_ERROR;
160 				}
161 			}
162 		}
163 		item++;
164 	}
165 	/* update the implied SVIF */
166 	return ulp_rte_parser_implicit_match_port_process(params);
167 
168 hdr_parser_error:
169 	BNXT_TF_DBG(ERR, "Truflow parser does not support type %d\n",
170 		    item->type);
171 	return BNXT_TF_RC_PARSE_ERR;
172 }
173 
174 /*
175  * Function to handle the parsing of RTE Flows and placing
176  * the RTE flow actions into the ulp structures.
177  */
178 int32_t
179 bnxt_ulp_rte_parser_act_parse(const struct rte_flow_action actions[],
180 			      struct ulp_rte_parser_params *params)
181 {
182 	const struct rte_flow_action *action_item = actions;
183 	struct bnxt_ulp_rte_act_info *hdr_info;
184 
185 	/* Parse all the items in the pattern */
186 	while (action_item && action_item->type != RTE_FLOW_ACTION_TYPE_END) {
187 		if (action_item->type >=
188 		    (typeof(action_item->type))BNXT_RTE_FLOW_ACTION_TYPE_END) {
189 			if (action_item->type >=
190 			    (typeof(action_item->type))BNXT_RTE_FLOW_ACTION_TYPE_LAST)
191 				goto act_parser_error;
192 			/* get the header information from bnxt actinfo table */
193 			hdr_info = &ulp_vendor_act_info[action_item->type -
194 				BNXT_RTE_FLOW_ACTION_TYPE_END];
195 		} else {
196 			if (action_item->type > RTE_FLOW_ACTION_TYPE_INDIRECT)
197 				goto act_parser_error;
198 			/* get the header information from the act info table */
199 			hdr_info = &ulp_act_info[action_item->type];
200 		}
201 		if (hdr_info->act_type == BNXT_ULP_ACT_TYPE_NOT_SUPPORTED) {
202 			goto act_parser_error;
203 		} else if (hdr_info->act_type == BNXT_ULP_ACT_TYPE_SUPPORTED) {
204 			/* call the registered callback handler */
205 			if (hdr_info->proto_act_func) {
206 				if (hdr_info->proto_act_func(action_item,
207 							     params) !=
208 				    BNXT_TF_RC_SUCCESS) {
209 					return BNXT_TF_RC_ERROR;
210 				}
211 			}
212 		}
213 		action_item++;
214 	}
215 	/* update the implied port details */
216 	ulp_rte_parser_implicit_act_port_process(params);
217 	return BNXT_TF_RC_SUCCESS;
218 
219 act_parser_error:
220 	BNXT_TF_DBG(ERR, "Truflow parser does not support act %u\n",
221 		    action_item->type);
222 	return BNXT_TF_RC_ERROR;
223 }
224 
225 /*
226  * Function to handle the post processing of the computed
227  * fields for the interface.
228  */
229 static void
230 bnxt_ulp_comp_fld_intf_update(struct ulp_rte_parser_params *params)
231 {
232 	uint32_t ifindex;
233 	uint16_t port_id, parif, svif;
234 	uint32_t mtype;
235 	enum bnxt_ulp_direction_type dir;
236 
237 	/* get the direction details */
238 	dir = ULP_COMP_FLD_IDX_RD(params, BNXT_ULP_CF_IDX_DIRECTION);
239 
240 	/* read the port id details */
241 	port_id = ULP_COMP_FLD_IDX_RD(params, BNXT_ULP_CF_IDX_INCOMING_IF);
242 	if (ulp_port_db_dev_port_to_ulp_index(params->ulp_ctx,
243 					      port_id,
244 					      &ifindex)) {
245 		BNXT_TF_DBG(ERR, "ParseErr:Portid is not valid\n");
246 		return;
247 	}
248 
249 	if (dir == BNXT_ULP_DIR_INGRESS) {
250 		/* Set port PARIF */
251 		if (ulp_port_db_parif_get(params->ulp_ctx, ifindex,
252 					  BNXT_ULP_PHY_PORT_PARIF, &parif)) {
253 			BNXT_TF_DBG(ERR, "ParseErr:ifindex is not valid\n");
254 			return;
255 		}
256 		ULP_COMP_FLD_IDX_WR(params, BNXT_ULP_CF_IDX_PHY_PORT_PARIF,
257 				    parif);
258 		/* Set port SVIF */
259 		if (ulp_port_db_svif_get(params->ulp_ctx, ifindex,
260 					  BNXT_ULP_PHY_PORT_SVIF, &svif)) {
261 			BNXT_TF_DBG(ERR, "ParseErr:ifindex is not valid\n");
262 			return;
263 		}
264 		ULP_COMP_FLD_IDX_WR(params, BNXT_ULP_CF_IDX_PHY_PORT_SVIF,
265 				    svif);
266 	} else {
267 		/* Get the match port type */
268 		mtype = ULP_COMP_FLD_IDX_RD(params,
269 					    BNXT_ULP_CF_IDX_MATCH_PORT_TYPE);
270 		if (mtype == BNXT_ULP_INTF_TYPE_VF_REP) {
271 			ULP_COMP_FLD_IDX_WR(params,
272 					    BNXT_ULP_CF_IDX_MATCH_PORT_IS_VFREP,
273 					    1);
274 			/* Set VF func PARIF */
275 			if (ulp_port_db_parif_get(params->ulp_ctx, ifindex,
276 						  BNXT_ULP_VF_FUNC_PARIF,
277 						  &parif)) {
278 				BNXT_TF_DBG(ERR,
279 					    "ParseErr:ifindex is not valid\n");
280 				return;
281 			}
282 			ULP_COMP_FLD_IDX_WR(params,
283 					    BNXT_ULP_CF_IDX_VF_FUNC_PARIF,
284 					    parif);
285 
286 		} else {
287 			/* Set DRV func PARIF */
288 			if (ulp_port_db_parif_get(params->ulp_ctx, ifindex,
289 						  BNXT_ULP_DRV_FUNC_PARIF,
290 						  &parif)) {
291 				BNXT_TF_DBG(ERR,
292 					    "ParseErr:ifindex is not valid\n");
293 				return;
294 			}
295 			ULP_COMP_FLD_IDX_WR(params,
296 					    BNXT_ULP_CF_IDX_DRV_FUNC_PARIF,
297 					    parif);
298 		}
299 		if (mtype == BNXT_ULP_INTF_TYPE_PF) {
300 			ULP_COMP_FLD_IDX_WR(params,
301 					    BNXT_ULP_CF_IDX_MATCH_PORT_IS_PF,
302 					    1);
303 		}
304 	}
305 }
306 
307 static int32_t
308 ulp_post_process_normal_flow(struct ulp_rte_parser_params *params)
309 {
310 	enum bnxt_ulp_intf_type match_port_type, act_port_type;
311 	enum bnxt_ulp_direction_type dir;
312 	uint32_t act_port_set;
313 
314 	/* Get the computed details */
315 	dir = ULP_COMP_FLD_IDX_RD(params, BNXT_ULP_CF_IDX_DIRECTION);
316 	match_port_type = ULP_COMP_FLD_IDX_RD(params,
317 					      BNXT_ULP_CF_IDX_MATCH_PORT_TYPE);
318 	act_port_type = ULP_COMP_FLD_IDX_RD(params,
319 					    BNXT_ULP_CF_IDX_ACT_PORT_TYPE);
320 	act_port_set = ULP_COMP_FLD_IDX_RD(params,
321 					   BNXT_ULP_CF_IDX_ACT_PORT_IS_SET);
322 
323 	/* set the flow direction in the proto and action header */
324 	if (dir == BNXT_ULP_DIR_EGRESS) {
325 		ULP_BITMAP_SET(params->hdr_bitmap.bits,
326 			       BNXT_ULP_FLOW_DIR_BITMASK_EGR);
327 		ULP_BITMAP_SET(params->act_bitmap.bits,
328 			       BNXT_ULP_FLOW_DIR_BITMASK_EGR);
329 	}
330 
331 	/* Evaluate the VF to VF flag */
332 	if (act_port_set && act_port_type == BNXT_ULP_INTF_TYPE_VF_REP &&
333 	     match_port_type == BNXT_ULP_INTF_TYPE_VF_REP) {
334 		if (!ULP_BITMAP_ISSET(params->act_bitmap.bits,
335 				      BNXT_ULP_ACT_BIT_MULTIPLE_PORT)) {
336 			ULP_BITMAP_SET(params->act_bitmap.bits,
337 				       BNXT_ULP_ACT_BIT_VF_TO_VF);
338 		} else {
339 			if (ULP_COMP_FLD_IDX_RD(params, BNXT_ULP_CF_IDX_MP_A_IS_VFREP) &&
340 			    ULP_COMP_FLD_IDX_RD(params, BNXT_ULP_CF_IDX_MP_B_IS_VFREP))
341 				ULP_BITMAP_SET(params->act_bitmap.bits,
342 					       BNXT_ULP_ACT_BIT_VF_TO_VF);
343 			else
344 				ULP_BITMAP_RESET(params->act_bitmap.bits,
345 						 BNXT_ULP_ACT_BIT_VF_TO_VF);
346 		}
347 	}
348 
349 	/* Update the decrement ttl computational fields */
350 	if (ULP_BITMAP_ISSET(params->act_bitmap.bits,
351 			     BNXT_ULP_ACT_BIT_DEC_TTL)) {
352 		/*
353 		 * Check that vxlan proto is included and vxlan decap
354 		 * action is not set then decrement tunnel ttl.
355 		 * Similarly add GRE and NVGRE in future.
356 		 */
357 		if ((ULP_BITMAP_ISSET(params->hdr_bitmap.bits,
358 				      BNXT_ULP_HDR_BIT_T_VXLAN) &&
359 		    !ULP_BITMAP_ISSET(params->act_bitmap.bits,
360 				      BNXT_ULP_ACT_BIT_VXLAN_DECAP))) {
361 			ULP_COMP_FLD_IDX_WR(params,
362 					    BNXT_ULP_CF_IDX_ACT_T_DEC_TTL, 1);
363 		} else {
364 			ULP_COMP_FLD_IDX_WR(params,
365 					    BNXT_ULP_CF_IDX_ACT_DEC_TTL, 1);
366 		}
367 	}
368 
369 	/* Merge the hdr_fp_bit into the proto header bit */
370 	params->hdr_bitmap.bits |= params->hdr_fp_bit.bits;
371 
372 	/* Update the comp fld fid */
373 	ULP_COMP_FLD_IDX_WR(params, BNXT_ULP_CF_IDX_FID, params->fid);
374 
375 	/* Update the computed interface parameters */
376 	bnxt_ulp_comp_fld_intf_update(params);
377 
378 	/* TBD: Handle the flow rejection scenarios */
379 	return 0;
380 }
381 
382 /*
383  * Function to handle the post processing of the parsing details
384  */
385 void
386 bnxt_ulp_rte_parser_post_process(struct ulp_rte_parser_params *params)
387 {
388 	ulp_post_process_normal_flow(params);
389 }
390 
391 /*
392  * Function to compute the flow direction based on the match port details
393  */
394 static void
395 bnxt_ulp_rte_parser_direction_compute(struct ulp_rte_parser_params *params)
396 {
397 	enum bnxt_ulp_intf_type match_port_type;
398 
399 	/* Get the match port type */
400 	match_port_type = ULP_COMP_FLD_IDX_RD(params,
401 					      BNXT_ULP_CF_IDX_MATCH_PORT_TYPE);
402 
403 	/* If ingress flow and matchport is vf rep then dir is egress*/
404 	if ((params->dir_attr & BNXT_ULP_FLOW_ATTR_INGRESS) &&
405 	    match_port_type == BNXT_ULP_INTF_TYPE_VF_REP) {
406 		ULP_COMP_FLD_IDX_WR(params, BNXT_ULP_CF_IDX_DIRECTION,
407 				    BNXT_ULP_DIR_EGRESS);
408 	} else {
409 		/* Assign the input direction */
410 		if (params->dir_attr & BNXT_ULP_FLOW_ATTR_INGRESS)
411 			ULP_COMP_FLD_IDX_WR(params, BNXT_ULP_CF_IDX_DIRECTION,
412 					    BNXT_ULP_DIR_INGRESS);
413 		else
414 			ULP_COMP_FLD_IDX_WR(params, BNXT_ULP_CF_IDX_DIRECTION,
415 					    BNXT_ULP_DIR_EGRESS);
416 	}
417 }
418 
419 /* Function to handle the parsing of RTE Flow item PF Header. */
420 static int32_t
421 ulp_rte_parser_svif_set(struct ulp_rte_parser_params *params,
422 			uint32_t ifindex,
423 			uint16_t mask,
424 			enum bnxt_ulp_direction_type item_dir)
425 {
426 	uint16_t svif;
427 	enum bnxt_ulp_direction_type dir;
428 	struct ulp_rte_hdr_field *hdr_field;
429 	enum bnxt_ulp_svif_type svif_type;
430 	enum bnxt_ulp_intf_type port_type;
431 
432 	if (ULP_COMP_FLD_IDX_RD(params, BNXT_ULP_CF_IDX_SVIF_FLAG) !=
433 	    BNXT_ULP_INVALID_SVIF_VAL) {
434 		BNXT_TF_DBG(ERR,
435 			    "SVIF already set,multiple source not support'd\n");
436 		return BNXT_TF_RC_ERROR;
437 	}
438 
439 	/* Get port type details */
440 	port_type = ulp_port_db_port_type_get(params->ulp_ctx, ifindex);
441 	if (port_type == BNXT_ULP_INTF_TYPE_INVALID) {
442 		BNXT_TF_DBG(ERR, "Invalid port type\n");
443 		return BNXT_TF_RC_ERROR;
444 	}
445 
446 	/* Update the match port type */
447 	ULP_COMP_FLD_IDX_WR(params, BNXT_ULP_CF_IDX_MATCH_PORT_TYPE, port_type);
448 
449 	/* compute the direction */
450 	bnxt_ulp_rte_parser_direction_compute(params);
451 
452 	/* Get the computed direction */
453 	dir = (item_dir != BNXT_ULP_DIR_INVALID) ? item_dir :
454 		ULP_COMP_FLD_IDX_RD(params, BNXT_ULP_CF_IDX_DIRECTION);
455 	if (dir == BNXT_ULP_DIR_INGRESS &&
456 	    port_type != BNXT_ULP_INTF_TYPE_VF_REP) {
457 		svif_type = BNXT_ULP_PHY_PORT_SVIF;
458 	} else {
459 		if (port_type == BNXT_ULP_INTF_TYPE_VF_REP &&
460 		    item_dir != BNXT_ULP_DIR_EGRESS)
461 			svif_type = BNXT_ULP_VF_FUNC_SVIF;
462 		else
463 			svif_type = BNXT_ULP_DRV_FUNC_SVIF;
464 	}
465 	ulp_port_db_svif_get(params->ulp_ctx, ifindex, svif_type, &svif);
466 	svif = rte_cpu_to_be_16(svif);
467 	hdr_field = &params->hdr_field[BNXT_ULP_PROTO_HDR_FIELD_SVIF_IDX];
468 	memcpy(hdr_field->spec, &svif, sizeof(svif));
469 	memcpy(hdr_field->mask, &mask, sizeof(mask));
470 	hdr_field->size = sizeof(svif);
471 	ULP_COMP_FLD_IDX_WR(params, BNXT_ULP_CF_IDX_SVIF_FLAG,
472 			    rte_be_to_cpu_16(svif));
473 	return BNXT_TF_RC_SUCCESS;
474 }
475 
476 /* Function to handle the parsing of the RTE port id */
477 int32_t
478 ulp_rte_parser_implicit_match_port_process(struct ulp_rte_parser_params *params)
479 {
480 	uint16_t port_id = 0;
481 	uint16_t svif_mask = 0xFFFF;
482 	uint32_t ifindex;
483 	int32_t rc = BNXT_TF_RC_ERROR;
484 
485 	if (ULP_COMP_FLD_IDX_RD(params, BNXT_ULP_CF_IDX_SVIF_FLAG) !=
486 	    BNXT_ULP_INVALID_SVIF_VAL)
487 		return BNXT_TF_RC_SUCCESS;
488 
489 	/* SVIF not set. So get the port id */
490 	port_id = ULP_COMP_FLD_IDX_RD(params, BNXT_ULP_CF_IDX_INCOMING_IF);
491 
492 	if (ulp_port_db_dev_port_to_ulp_index(params->ulp_ctx,
493 					      port_id,
494 					      &ifindex)) {
495 		BNXT_TF_DBG(ERR, "ParseErr:Portid is not valid\n");
496 		return rc;
497 	}
498 
499 	/* Update the SVIF details */
500 	rc = ulp_rte_parser_svif_set(params, ifindex, svif_mask,
501 				     BNXT_ULP_DIR_INVALID);
502 	return rc;
503 }
504 
505 /* Function to handle the implicit action port id */
506 int32_t
507 ulp_rte_parser_implicit_act_port_process(struct ulp_rte_parser_params *params)
508 {
509 	struct rte_flow_action action_item = {0};
510 	struct rte_flow_action_port_id port_id = {0};
511 
512 	/* Read the action port set bit */
513 	if (ULP_COMP_FLD_IDX_RD(params, BNXT_ULP_CF_IDX_ACT_PORT_IS_SET)) {
514 		/* Already set, so just exit */
515 		return BNXT_TF_RC_SUCCESS;
516 	}
517 	port_id.id = ULP_COMP_FLD_IDX_RD(params, BNXT_ULP_CF_IDX_INCOMING_IF);
518 	action_item.type = RTE_FLOW_ACTION_TYPE_PORT_ID;
519 	action_item.conf = &port_id;
520 
521 	/* Update the action port based on incoming port */
522 	ulp_rte_port_act_handler(&action_item, params);
523 
524 	/* Reset the action port set bit */
525 	ULP_COMP_FLD_IDX_WR(params, BNXT_ULP_CF_IDX_ACT_PORT_IS_SET, 0);
526 	return BNXT_TF_RC_SUCCESS;
527 }
528 
529 /* Parse items PORT_ID, PORT_REPRESENTOR and REPRESENTED_PORT. */
530 int32_t
531 ulp_rte_port_hdr_handler(const struct rte_flow_item *item,
532 			 struct ulp_rte_parser_params *params)
533 {
534 	enum bnxt_ulp_direction_type item_dir;
535 	uint16_t ethdev_id;
536 	uint16_t mask = 0;
537 	uint32_t ifindex;
538 	int32_t rc = BNXT_TF_RC_PARSE_ERR;
539 
540 	if (!item->spec) {
541 		BNXT_TF_DBG(ERR, "ParseErr:Port spec is not valid\n");
542 		return rc;
543 	}
544 	if (!item->mask) {
545 		BNXT_TF_DBG(ERR, "ParseErr:Port mask is not valid\n");
546 		return rc;
547 	}
548 
549 	switch (item->type) {
550 	case RTE_FLOW_ITEM_TYPE_PORT_ID: {
551 		const struct rte_flow_item_port_id *port_spec = item->spec;
552 		const struct rte_flow_item_port_id *port_mask = item->mask;
553 
554 		item_dir = BNXT_ULP_DIR_INVALID;
555 		ethdev_id = port_spec->id;
556 		mask = port_mask->id;
557 
558 		if (!port_mask->id) {
559 			ULP_BITMAP_SET(params->hdr_bitmap.bits, BNXT_ULP_HDR_BIT_SVIF_IGNORE);
560 			mask = 0xff;
561 		}
562 		break;
563 	}
564 	case RTE_FLOW_ITEM_TYPE_PORT_REPRESENTOR: {
565 		const struct rte_flow_item_ethdev *ethdev_spec = item->spec;
566 		const struct rte_flow_item_ethdev *ethdev_mask = item->mask;
567 
568 		item_dir = BNXT_ULP_DIR_INGRESS;
569 		ethdev_id = ethdev_spec->port_id;
570 		mask = ethdev_mask->port_id;
571 		break;
572 	}
573 	case RTE_FLOW_ITEM_TYPE_REPRESENTED_PORT: {
574 		const struct rte_flow_item_ethdev *ethdev_spec = item->spec;
575 		const struct rte_flow_item_ethdev *ethdev_mask = item->mask;
576 
577 		item_dir = BNXT_ULP_DIR_EGRESS;
578 		ethdev_id = ethdev_spec->port_id;
579 		mask = ethdev_mask->port_id;
580 		break;
581 	}
582 	default:
583 		BNXT_TF_DBG(ERR, "ParseErr:Unexpected item\n");
584 		return rc;
585 	}
586 
587 	/* perform the conversion from dpdk port to bnxt ifindex */
588 	if (ulp_port_db_dev_port_to_ulp_index(params->ulp_ctx,
589 					      ethdev_id,
590 					      &ifindex)) {
591 		BNXT_TF_DBG(ERR, "ParseErr:Portid is not valid\n");
592 		return rc;
593 	}
594 	/* Update the SVIF details */
595 	return ulp_rte_parser_svif_set(params, ifindex, mask, item_dir);
596 }
597 
598 /* Function to handle the update of proto header based on field values */
599 static void
600 ulp_rte_l2_proto_type_update(struct ulp_rte_parser_params *param,
601 			     uint16_t type, uint32_t in_flag,
602 			     uint32_t has_vlan, uint32_t has_vlan_mask)
603 {
604 #define ULP_RTE_ETHER_TYPE_ROE	0xfc3d
605 
606 	if (type == tfp_cpu_to_be_16(RTE_ETHER_TYPE_IPV4)) {
607 		if (in_flag) {
608 			ULP_BITMAP_SET(param->hdr_fp_bit.bits,
609 				       BNXT_ULP_HDR_BIT_I_IPV4);
610 			ULP_COMP_FLD_IDX_WR(param, BNXT_ULP_CF_IDX_I_L3, 1);
611 		} else {
612 			ULP_BITMAP_SET(param->hdr_fp_bit.bits,
613 				       BNXT_ULP_HDR_BIT_O_IPV4);
614 			ULP_COMP_FLD_IDX_WR(param, BNXT_ULP_CF_IDX_O_L3, 1);
615 		}
616 	} else if (type == tfp_cpu_to_be_16(RTE_ETHER_TYPE_IPV6)) {
617 		if (in_flag) {
618 			ULP_BITMAP_SET(param->hdr_fp_bit.bits,
619 				       BNXT_ULP_HDR_BIT_I_IPV6);
620 			ULP_COMP_FLD_IDX_WR(param, BNXT_ULP_CF_IDX_I_L3, 1);
621 		} else {
622 			ULP_BITMAP_SET(param->hdr_fp_bit.bits,
623 				       BNXT_ULP_HDR_BIT_O_IPV6);
624 			ULP_COMP_FLD_IDX_WR(param, BNXT_ULP_CF_IDX_O_L3, 1);
625 		}
626 	} else if (type == tfp_cpu_to_be_16(RTE_ETHER_TYPE_VLAN)) {
627 		has_vlan_mask = 1;
628 		has_vlan = 1;
629 	} else if (type == tfp_cpu_to_be_16(RTE_ETHER_TYPE_ECPRI)) {
630 		/* Update the hdr_bitmap with eCPRI */
631 		ULP_BITMAP_SET(param->hdr_fp_bit.bits,
632 				BNXT_ULP_HDR_BIT_O_ECPRI);
633 	} else if (type == tfp_cpu_to_be_16(ULP_RTE_ETHER_TYPE_ROE)) {
634 		/* Update the hdr_bitmap with RoE */
635 		ULP_BITMAP_SET(param->hdr_fp_bit.bits,
636 				BNXT_ULP_HDR_BIT_O_ROE);
637 	}
638 
639 	if (has_vlan_mask) {
640 		if (in_flag) {
641 			ULP_COMP_FLD_IDX_WR(param, BNXT_ULP_CF_IDX_I_HAS_VTAG,
642 					    has_vlan);
643 			ULP_COMP_FLD_IDX_WR(param,
644 					    BNXT_ULP_CF_IDX_I_VLAN_NO_IGNORE,
645 					    1);
646 		} else {
647 			ULP_COMP_FLD_IDX_WR(param, BNXT_ULP_CF_IDX_O_HAS_VTAG,
648 					    has_vlan);
649 			ULP_COMP_FLD_IDX_WR(param,
650 					    BNXT_ULP_CF_IDX_O_VLAN_NO_IGNORE,
651 					    1);
652 		}
653 	}
654 }
655 
656 /* Internal Function to identify broadcast or multicast packets */
657 static int32_t
658 ulp_rte_parser_is_bcmc_addr(const struct rte_ether_addr *eth_addr)
659 {
660 	if (rte_is_multicast_ether_addr(eth_addr) ||
661 	    rte_is_broadcast_ether_addr(eth_addr)) {
662 		BNXT_TF_DBG(DEBUG,
663 			    "No support for bcast or mcast addr offload\n");
664 		return 1;
665 	}
666 	return 0;
667 }
668 
669 /* Function to handle the parsing of RTE Flow item Ethernet Header. */
670 int32_t
671 ulp_rte_eth_hdr_handler(const struct rte_flow_item *item,
672 			struct ulp_rte_parser_params *params)
673 {
674 	const struct rte_flow_item_eth *eth_spec = item->spec;
675 	const struct rte_flow_item_eth *eth_mask = item->mask;
676 	uint32_t idx = 0, dmac_idx = 0;
677 	uint32_t size;
678 	uint16_t eth_type = 0;
679 	uint32_t inner_flag = 0;
680 	uint32_t has_vlan = 0, has_vlan_mask = 0;
681 
682 	/* Perform validations */
683 	if (eth_spec) {
684 		/* Avoid multicast and broadcast addr */
685 		if (!ULP_APP_BC_MC_SUPPORT(params->ulp_ctx) &&
686 		    ulp_rte_parser_is_bcmc_addr(&eth_spec->hdr.dst_addr))
687 			return BNXT_TF_RC_PARSE_ERR;
688 
689 		if (!ULP_APP_BC_MC_SUPPORT(params->ulp_ctx) &&
690 		    ulp_rte_parser_is_bcmc_addr(&eth_spec->hdr.src_addr))
691 			return BNXT_TF_RC_PARSE_ERR;
692 
693 		eth_type = eth_spec->hdr.ether_type;
694 		has_vlan = eth_spec->has_vlan;
695 	}
696 	if (eth_mask) {
697 		eth_type &= eth_mask->hdr.ether_type;
698 		has_vlan_mask = eth_mask->has_vlan;
699 	}
700 
701 	if (ulp_rte_prsr_fld_size_validate(params, &idx,
702 					   BNXT_ULP_PROTO_HDR_ETH_NUM)) {
703 		BNXT_TF_DBG(ERR, "Error parsing protocol header\n");
704 		return BNXT_TF_RC_ERROR;
705 	}
706 	/*
707 	 * Copy the rte_flow_item for eth into hdr_field using ethernet
708 	 * header fields
709 	 */
710 	dmac_idx = idx;
711 	size = sizeof(((struct rte_flow_item_eth *)NULL)->hdr.dst_addr.addr_bytes);
712 	ulp_rte_prsr_fld_mask(params, &idx, size,
713 			      ulp_deference_struct(eth_spec, hdr.dst_addr.addr_bytes),
714 			      ulp_deference_struct(eth_mask, hdr.dst_addr.addr_bytes),
715 			      ULP_PRSR_ACT_DEFAULT);
716 
717 	size = sizeof(((struct rte_flow_item_eth *)NULL)->hdr.src_addr.addr_bytes);
718 	ulp_rte_prsr_fld_mask(params, &idx, size,
719 			      ulp_deference_struct(eth_spec, hdr.src_addr.addr_bytes),
720 			      ulp_deference_struct(eth_mask, hdr.src_addr.addr_bytes),
721 			      ULP_PRSR_ACT_DEFAULT);
722 
723 	size = sizeof(((struct rte_flow_item_eth *)NULL)->hdr.ether_type);
724 	ulp_rte_prsr_fld_mask(params, &idx, size,
725 			      ulp_deference_struct(eth_spec, hdr.ether_type),
726 			      ulp_deference_struct(eth_mask, hdr.ether_type),
727 			      (ULP_APP_TOS_PROTO_SUPPORT(params->ulp_ctx)) ?
728 			      ULP_PRSR_ACT_DEFAULT : ULP_PRSR_ACT_MATCH_IGNORE);
729 
730 	/* Update the protocol hdr bitmap */
731 	if (ULP_BITMAP_ISSET(params->hdr_bitmap.bits,
732 			     BNXT_ULP_HDR_BIT_O_ETH) ||
733 	    ULP_BITMAP_ISSET(params->hdr_bitmap.bits,
734 			     BNXT_ULP_HDR_BIT_O_IPV4) ||
735 	    ULP_BITMAP_ISSET(params->hdr_bitmap.bits,
736 			     BNXT_ULP_HDR_BIT_O_IPV6) ||
737 	    ULP_BITMAP_ISSET(params->hdr_bitmap.bits,
738 			     BNXT_ULP_HDR_BIT_O_UDP) ||
739 	    ULP_BITMAP_ISSET(params->hdr_bitmap.bits,
740 			     BNXT_ULP_HDR_BIT_O_TCP)) {
741 		ULP_BITMAP_SET(params->hdr_bitmap.bits, BNXT_ULP_HDR_BIT_I_ETH);
742 		inner_flag = 1;
743 	} else {
744 		ULP_BITMAP_SET(params->hdr_bitmap.bits, BNXT_ULP_HDR_BIT_O_ETH);
745 		ULP_COMP_FLD_IDX_WR(params, BNXT_ULP_CF_IDX_TUN_OFF_DMAC_ID,
746 				    dmac_idx);
747 	}
748 	/* Update the field protocol hdr bitmap */
749 	ulp_rte_l2_proto_type_update(params, eth_type, inner_flag,
750 				     has_vlan, has_vlan_mask);
751 
752 	return BNXT_TF_RC_SUCCESS;
753 }
754 
755 /* Function to handle the parsing of RTE Flow item Vlan Header. */
756 int32_t
757 ulp_rte_vlan_hdr_handler(const struct rte_flow_item *item,
758 			 struct ulp_rte_parser_params *params)
759 {
760 	const struct rte_flow_item_vlan *vlan_spec = item->spec;
761 	const struct rte_flow_item_vlan *vlan_mask = item->mask;
762 	struct ulp_rte_hdr_bitmap	*hdr_bit;
763 	uint32_t idx = 0;
764 	uint16_t vlan_tag = 0, priority = 0;
765 	uint16_t vlan_tag_mask = 0, priority_mask = 0;
766 	uint32_t outer_vtag_num;
767 	uint32_t inner_vtag_num;
768 	uint16_t eth_type = 0;
769 	uint32_t inner_flag = 0;
770 	uint32_t size;
771 
772 	if (vlan_spec) {
773 		vlan_tag = ntohs(vlan_spec->hdr.vlan_tci);
774 		priority = htons(vlan_tag >> ULP_VLAN_PRIORITY_SHIFT);
775 		vlan_tag &= ULP_VLAN_TAG_MASK;
776 		vlan_tag = htons(vlan_tag);
777 		eth_type = vlan_spec->hdr.eth_proto;
778 	}
779 
780 	if (vlan_mask) {
781 		vlan_tag_mask = ntohs(vlan_mask->hdr.vlan_tci);
782 		priority_mask = htons(vlan_tag_mask >> ULP_VLAN_PRIORITY_SHIFT);
783 		vlan_tag_mask &= 0xfff;
784 
785 		/*
786 		 * the storage for priority and vlan tag is 2 bytes
787 		 * The mask of priority which is 3 bits if it is all 1's
788 		 * then make the rest bits 13 bits as 1's
789 		 * so that it is matched as exact match.
790 		 */
791 		if (priority_mask == ULP_VLAN_PRIORITY_MASK)
792 			priority_mask |= ~ULP_VLAN_PRIORITY_MASK;
793 		if (vlan_tag_mask == ULP_VLAN_TAG_MASK)
794 			vlan_tag_mask |= ~ULP_VLAN_TAG_MASK;
795 		vlan_tag_mask = htons(vlan_tag_mask);
796 	}
797 
798 	if (ulp_rte_prsr_fld_size_validate(params, &idx,
799 					   BNXT_ULP_PROTO_HDR_S_VLAN_NUM)) {
800 		BNXT_TF_DBG(ERR, "Error parsing protocol header\n");
801 		return BNXT_TF_RC_ERROR;
802 	}
803 
804 	/*
805 	 * Copy the rte_flow_item for vlan into hdr_field using Vlan
806 	 * header fields
807 	 */
808 	size = sizeof(((struct rte_flow_item_vlan *)NULL)->hdr.vlan_tci);
809 	/*
810 	 * The priority field is ignored since OVS is setting it as
811 	 * wild card match and it is not supported. This is a work
812 	 * around and shall be addressed in the future.
813 	 */
814 	ulp_rte_prsr_fld_mask(params, &idx, size,
815 			      &priority,
816 			      (vlan_mask) ? &priority_mask : NULL,
817 			      ULP_PRSR_ACT_MASK_IGNORE);
818 
819 	ulp_rte_prsr_fld_mask(params, &idx, size,
820 			      &vlan_tag,
821 			      (vlan_mask) ? &vlan_tag_mask : NULL,
822 			      ULP_PRSR_ACT_DEFAULT);
823 
824 	size = sizeof(((struct rte_flow_item_vlan *)NULL)->hdr.eth_proto);
825 	ulp_rte_prsr_fld_mask(params, &idx, size,
826 			      ulp_deference_struct(vlan_spec, hdr.eth_proto),
827 			      ulp_deference_struct(vlan_mask, hdr.eth_proto),
828 			      ULP_PRSR_ACT_MATCH_IGNORE);
829 
830 	/* Get the outer tag and inner tag counts */
831 	outer_vtag_num = ULP_COMP_FLD_IDX_RD(params,
832 					     BNXT_ULP_CF_IDX_O_VTAG_NUM);
833 	inner_vtag_num = ULP_COMP_FLD_IDX_RD(params,
834 					     BNXT_ULP_CF_IDX_I_VTAG_NUM);
835 
836 	/* Update the hdr_bitmap of the vlans */
837 	hdr_bit = &params->hdr_bitmap;
838 	if (ULP_BITMAP_ISSET(hdr_bit->bits, BNXT_ULP_HDR_BIT_O_ETH) &&
839 	    !ULP_BITMAP_ISSET(hdr_bit->bits, BNXT_ULP_HDR_BIT_I_ETH) &&
840 	    !outer_vtag_num) {
841 		/* Update the vlan tag num */
842 		outer_vtag_num++;
843 		ULP_COMP_FLD_IDX_WR(params, BNXT_ULP_CF_IDX_O_VTAG_NUM,
844 				    outer_vtag_num);
845 		ULP_COMP_FLD_IDX_WR(params, BNXT_ULP_CF_IDX_O_HAS_VTAG, 1);
846 		ULP_COMP_FLD_IDX_WR(params, BNXT_ULP_CF_IDX_O_ONE_VTAG, 1);
847 		ULP_BITMAP_SET(params->hdr_bitmap.bits,
848 			       BNXT_ULP_HDR_BIT_OO_VLAN);
849 		if (vlan_mask && vlan_tag_mask)
850 			ULP_COMP_FLD_IDX_WR(params,
851 					    BNXT_ULP_CF_IDX_OO_VLAN_FB_VID, 1);
852 
853 	} else if (ULP_BITMAP_ISSET(hdr_bit->bits, BNXT_ULP_HDR_BIT_O_ETH) &&
854 		   !ULP_BITMAP_ISSET(hdr_bit->bits, BNXT_ULP_HDR_BIT_I_ETH) &&
855 		   outer_vtag_num == 1) {
856 		/* update the vlan tag num */
857 		outer_vtag_num++;
858 		ULP_COMP_FLD_IDX_WR(params, BNXT_ULP_CF_IDX_O_VTAG_NUM,
859 				    outer_vtag_num);
860 		ULP_COMP_FLD_IDX_WR(params, BNXT_ULP_CF_IDX_O_TWO_VTAGS, 1);
861 		ULP_COMP_FLD_IDX_WR(params, BNXT_ULP_CF_IDX_O_ONE_VTAG, 0);
862 		ULP_BITMAP_SET(params->hdr_bitmap.bits,
863 			       BNXT_ULP_HDR_BIT_OI_VLAN);
864 		if (vlan_mask && vlan_tag_mask)
865 			ULP_COMP_FLD_IDX_WR(params,
866 					    BNXT_ULP_CF_IDX_OI_VLAN_FB_VID, 1);
867 
868 	} else if (ULP_BITMAP_ISSET(hdr_bit->bits, BNXT_ULP_HDR_BIT_O_ETH) &&
869 		   ULP_BITMAP_ISSET(hdr_bit->bits, BNXT_ULP_HDR_BIT_I_ETH) &&
870 		   !inner_vtag_num) {
871 		/* update the vlan tag num */
872 		inner_vtag_num++;
873 		ULP_COMP_FLD_IDX_WR(params, BNXT_ULP_CF_IDX_I_VTAG_NUM,
874 				    inner_vtag_num);
875 		ULP_COMP_FLD_IDX_WR(params, BNXT_ULP_CF_IDX_I_HAS_VTAG, 1);
876 		ULP_COMP_FLD_IDX_WR(params, BNXT_ULP_CF_IDX_I_ONE_VTAG, 1);
877 		ULP_BITMAP_SET(params->hdr_bitmap.bits,
878 			       BNXT_ULP_HDR_BIT_IO_VLAN);
879 		if (vlan_mask && vlan_tag_mask)
880 			ULP_COMP_FLD_IDX_WR(params,
881 					    BNXT_ULP_CF_IDX_IO_VLAN_FB_VID, 1);
882 		inner_flag = 1;
883 	} else if (ULP_BITMAP_ISSET(hdr_bit->bits, BNXT_ULP_HDR_BIT_O_ETH) &&
884 		   ULP_BITMAP_ISSET(hdr_bit->bits, BNXT_ULP_HDR_BIT_I_ETH) &&
885 		   inner_vtag_num == 1) {
886 		/* update the vlan tag num */
887 		inner_vtag_num++;
888 		ULP_COMP_FLD_IDX_WR(params, BNXT_ULP_CF_IDX_I_VTAG_NUM,
889 				    inner_vtag_num);
890 		ULP_COMP_FLD_IDX_WR(params, BNXT_ULP_CF_IDX_I_TWO_VTAGS, 1);
891 		ULP_COMP_FLD_IDX_WR(params, BNXT_ULP_CF_IDX_I_ONE_VTAG, 0);
892 		ULP_BITMAP_SET(params->hdr_bitmap.bits,
893 			       BNXT_ULP_HDR_BIT_II_VLAN);
894 		if (vlan_mask && vlan_tag_mask)
895 			ULP_COMP_FLD_IDX_WR(params,
896 					    BNXT_ULP_CF_IDX_II_VLAN_FB_VID, 1);
897 		inner_flag = 1;
898 	} else {
899 		BNXT_TF_DBG(ERR, "Error Parsing:Vlan hdr found without eth\n");
900 		return BNXT_TF_RC_ERROR;
901 	}
902 	/* Update the field protocol hdr bitmap */
903 	ulp_rte_l2_proto_type_update(params, eth_type, inner_flag, 1, 1);
904 	return BNXT_TF_RC_SUCCESS;
905 }
906 
907 /* Function to handle the update of proto header based on field values */
908 static void
909 ulp_rte_l3_proto_type_update(struct ulp_rte_parser_params *param,
910 			     uint8_t proto, uint32_t in_flag)
911 {
912 	if (proto == IPPROTO_UDP) {
913 		if (in_flag) {
914 			ULP_BITMAP_SET(param->hdr_fp_bit.bits,
915 				       BNXT_ULP_HDR_BIT_I_UDP);
916 			ULP_COMP_FLD_IDX_WR(param, BNXT_ULP_CF_IDX_I_L4, 1);
917 		} else {
918 			ULP_BITMAP_SET(param->hdr_fp_bit.bits,
919 				       BNXT_ULP_HDR_BIT_O_UDP);
920 			ULP_COMP_FLD_IDX_WR(param, BNXT_ULP_CF_IDX_O_L4, 1);
921 		}
922 	} else if (proto == IPPROTO_TCP) {
923 		if (in_flag) {
924 			ULP_BITMAP_SET(param->hdr_fp_bit.bits,
925 				       BNXT_ULP_HDR_BIT_I_TCP);
926 			ULP_COMP_FLD_IDX_WR(param, BNXT_ULP_CF_IDX_I_L4, 1);
927 		} else {
928 			ULP_BITMAP_SET(param->hdr_fp_bit.bits,
929 				       BNXT_ULP_HDR_BIT_O_TCP);
930 			ULP_COMP_FLD_IDX_WR(param, BNXT_ULP_CF_IDX_O_L4, 1);
931 		}
932 	} else if (proto == IPPROTO_GRE) {
933 		ULP_BITMAP_SET(param->hdr_bitmap.bits, BNXT_ULP_HDR_BIT_T_GRE);
934 	} else if (proto == IPPROTO_ICMP) {
935 		if (ULP_COMP_FLD_IDX_RD(param, BNXT_ULP_CF_IDX_L3_TUN))
936 			ULP_BITMAP_SET(param->hdr_bitmap.bits,
937 				       BNXT_ULP_HDR_BIT_I_ICMP);
938 		else
939 			ULP_BITMAP_SET(param->hdr_bitmap.bits,
940 				       BNXT_ULP_HDR_BIT_O_ICMP);
941 	}
942 
943 	if (in_flag) {
944 		ULP_COMP_FLD_IDX_WR(param,
945 				    BNXT_ULP_CF_IDX_I_L3_FB_PROTO_ID,
946 				    1);
947 		ULP_COMP_FLD_IDX_WR(param,
948 				    BNXT_ULP_CF_IDX_I_L3_PROTO_ID,
949 				    proto);
950 	} else {
951 		ULP_COMP_FLD_IDX_WR(param,
952 				    BNXT_ULP_CF_IDX_O_L3_FB_PROTO_ID,
953 				    1);
954 		ULP_COMP_FLD_IDX_WR(param,
955 				    BNXT_ULP_CF_IDX_O_L3_PROTO_ID,
956 				    proto);
957 	}
958 }
959 
960 /* Function to handle the parsing of RTE Flow item IPV4 Header. */
961 int32_t
962 ulp_rte_ipv4_hdr_handler(const struct rte_flow_item *item,
963 			 struct ulp_rte_parser_params *params)
964 {
965 	const struct rte_flow_item_ipv4 *ipv4_spec = item->spec;
966 	const struct rte_flow_item_ipv4 *ipv4_mask = item->mask;
967 	struct ulp_rte_hdr_bitmap *hdr_bitmap = &params->hdr_bitmap;
968 	uint32_t idx = 0, dip_idx = 0;
969 	uint32_t size;
970 	uint8_t proto = 0;
971 	uint8_t proto_mask = 0;
972 	uint32_t inner_flag = 0;
973 	uint32_t cnt;
974 
975 	/* validate there are no 3rd L3 header */
976 	cnt = ULP_COMP_FLD_IDX_RD(params, BNXT_ULP_CF_IDX_L3_HDR_CNT);
977 	if (cnt == 2) {
978 		BNXT_TF_DBG(ERR, "Parse Err:Third L3 header not supported\n");
979 		return BNXT_TF_RC_ERROR;
980 	}
981 
982 	if (ulp_rte_prsr_fld_size_validate(params, &idx,
983 					   BNXT_ULP_PROTO_HDR_IPV4_NUM)) {
984 		BNXT_TF_DBG(ERR, "Error parsing protocol header\n");
985 		return BNXT_TF_RC_ERROR;
986 	}
987 
988 	/*
989 	 * Copy the rte_flow_item for ipv4 into hdr_field using ipv4
990 	 * header fields
991 	 */
992 	size = sizeof(((struct rte_flow_item_ipv4 *)NULL)->hdr.version_ihl);
993 	ulp_rte_prsr_fld_mask(params, &idx, size,
994 			      ulp_deference_struct(ipv4_spec, hdr.version_ihl),
995 			      ulp_deference_struct(ipv4_mask, hdr.version_ihl),
996 			      ULP_PRSR_ACT_DEFAULT);
997 
998 	/*
999 	 * The tos field is ignored since OVS is setting it as wild card
1000 	 * match and it is not supported. An application can enable tos support.
1001 	 */
1002 	size = sizeof(((struct rte_flow_item_ipv4 *)NULL)->hdr.type_of_service);
1003 	ulp_rte_prsr_fld_mask(params, &idx, size,
1004 			      ulp_deference_struct(ipv4_spec,
1005 						   hdr.type_of_service),
1006 			      ulp_deference_struct(ipv4_mask,
1007 						   hdr.type_of_service),
1008 			      (ULP_APP_TOS_PROTO_SUPPORT(params->ulp_ctx)) ?
1009 			      ULP_PRSR_ACT_DEFAULT : ULP_PRSR_ACT_MASK_IGNORE);
1010 
1011 	size = sizeof(((struct rte_flow_item_ipv4 *)NULL)->hdr.total_length);
1012 	ulp_rte_prsr_fld_mask(params, &idx, size,
1013 			      ulp_deference_struct(ipv4_spec, hdr.total_length),
1014 			      ulp_deference_struct(ipv4_mask, hdr.total_length),
1015 			      ULP_PRSR_ACT_DEFAULT);
1016 
1017 	size = sizeof(((struct rte_flow_item_ipv4 *)NULL)->hdr.packet_id);
1018 	ulp_rte_prsr_fld_mask(params, &idx, size,
1019 			      ulp_deference_struct(ipv4_spec, hdr.packet_id),
1020 			      ulp_deference_struct(ipv4_mask, hdr.packet_id),
1021 			      ULP_PRSR_ACT_DEFAULT);
1022 
1023 	size = sizeof(((struct rte_flow_item_ipv4 *)NULL)->hdr.fragment_offset);
1024 	ulp_rte_prsr_fld_mask(params, &idx, size,
1025 			      ulp_deference_struct(ipv4_spec,
1026 						   hdr.fragment_offset),
1027 			      ulp_deference_struct(ipv4_mask,
1028 						   hdr.fragment_offset),
1029 			      ULP_PRSR_ACT_MASK_IGNORE);
1030 
1031 	size = sizeof(((struct rte_flow_item_ipv4 *)NULL)->hdr.time_to_live);
1032 	ulp_rte_prsr_fld_mask(params, &idx, size,
1033 			      ulp_deference_struct(ipv4_spec, hdr.time_to_live),
1034 			      ulp_deference_struct(ipv4_mask, hdr.time_to_live),
1035 			      ULP_PRSR_ACT_DEFAULT);
1036 
1037 	/* Ignore proto for matching templates */
1038 	size = sizeof(((struct rte_flow_item_ipv4 *)NULL)->hdr.next_proto_id);
1039 	ulp_rte_prsr_fld_mask(params, &idx, size,
1040 			      ulp_deference_struct(ipv4_spec,
1041 						   hdr.next_proto_id),
1042 			      ulp_deference_struct(ipv4_mask,
1043 						   hdr.next_proto_id),
1044 			      (ULP_APP_TOS_PROTO_SUPPORT(params->ulp_ctx)) ?
1045 			      ULP_PRSR_ACT_DEFAULT : ULP_PRSR_ACT_MATCH_IGNORE);
1046 
1047 	if (ipv4_spec)
1048 		proto = ipv4_spec->hdr.next_proto_id;
1049 
1050 	size = sizeof(((struct rte_flow_item_ipv4 *)NULL)->hdr.hdr_checksum);
1051 	ulp_rte_prsr_fld_mask(params, &idx, size,
1052 			      ulp_deference_struct(ipv4_spec, hdr.hdr_checksum),
1053 			      ulp_deference_struct(ipv4_mask, hdr.hdr_checksum),
1054 			      ULP_PRSR_ACT_DEFAULT);
1055 
1056 	size = sizeof(((struct rte_flow_item_ipv4 *)NULL)->hdr.src_addr);
1057 	ulp_rte_prsr_fld_mask(params, &idx, size,
1058 			      ulp_deference_struct(ipv4_spec, hdr.src_addr),
1059 			      ulp_deference_struct(ipv4_mask, hdr.src_addr),
1060 			      ULP_PRSR_ACT_DEFAULT);
1061 
1062 	dip_idx = idx;
1063 	size = sizeof(((struct rte_flow_item_ipv4 *)NULL)->hdr.dst_addr);
1064 	ulp_rte_prsr_fld_mask(params, &idx, size,
1065 			      ulp_deference_struct(ipv4_spec, hdr.dst_addr),
1066 			      ulp_deference_struct(ipv4_mask, hdr.dst_addr),
1067 			      ULP_PRSR_ACT_DEFAULT);
1068 
1069 	/* Set the ipv4 header bitmap and computed l3 header bitmaps */
1070 	if (ULP_BITMAP_ISSET(hdr_bitmap->bits, BNXT_ULP_HDR_BIT_O_IPV4) ||
1071 	    ULP_BITMAP_ISSET(hdr_bitmap->bits, BNXT_ULP_HDR_BIT_O_IPV6) ||
1072 	    ULP_COMP_FLD_IDX_RD(params, BNXT_ULP_CF_IDX_L3_TUN)) {
1073 		ULP_BITMAP_SET(hdr_bitmap->bits, BNXT_ULP_HDR_BIT_I_IPV4);
1074 		ULP_COMP_FLD_IDX_WR(params, BNXT_ULP_CF_IDX_I_L3, 1);
1075 		inner_flag = 1;
1076 	} else {
1077 		ULP_BITMAP_SET(hdr_bitmap->bits, BNXT_ULP_HDR_BIT_O_IPV4);
1078 		ULP_COMP_FLD_IDX_WR(params, BNXT_ULP_CF_IDX_O_L3, 1);
1079 		/* Update the tunnel offload dest ip offset */
1080 		ULP_COMP_FLD_IDX_WR(params, BNXT_ULP_CF_IDX_TUN_OFF_DIP_ID,
1081 				    dip_idx);
1082 	}
1083 
1084 	/* Some of the PMD applications may set the protocol field
1085 	 * in the IPv4 spec but don't set the mask. So, consider
1086 	 * the mask in the proto value calculation.
1087 	 */
1088 	if (ipv4_mask) {
1089 		proto &= ipv4_mask->hdr.next_proto_id;
1090 		proto_mask = ipv4_mask->hdr.next_proto_id;
1091 	}
1092 
1093 	/* Update the field protocol hdr bitmap */
1094 	if (proto_mask)
1095 		ulp_rte_l3_proto_type_update(params, proto, inner_flag);
1096 	ULP_COMP_FLD_IDX_WR(params, BNXT_ULP_CF_IDX_L3_HDR_CNT, ++cnt);
1097 	return BNXT_TF_RC_SUCCESS;
1098 }
1099 
1100 /* Function to handle the parsing of RTE Flow item IPV6 Header */
1101 int32_t
1102 ulp_rte_ipv6_hdr_handler(const struct rte_flow_item *item,
1103 			 struct ulp_rte_parser_params *params)
1104 {
1105 	const struct rte_flow_item_ipv6	*ipv6_spec = item->spec;
1106 	const struct rte_flow_item_ipv6	*ipv6_mask = item->mask;
1107 	struct ulp_rte_hdr_bitmap *hdr_bitmap = &params->hdr_bitmap;
1108 	uint32_t idx = 0, dip_idx = 0;
1109 	uint32_t size, vtc_flow;
1110 	uint32_t ver_spec = 0, ver_mask = 0;
1111 	uint32_t tc_spec = 0, tc_mask = 0;
1112 	uint32_t lab_spec = 0, lab_mask = 0;
1113 	uint8_t proto = 0;
1114 	uint8_t proto_mask = 0;
1115 	uint32_t inner_flag = 0;
1116 	uint32_t cnt;
1117 
1118 	/* validate there are no 3rd L3 header */
1119 	cnt = ULP_COMP_FLD_IDX_RD(params, BNXT_ULP_CF_IDX_L3_HDR_CNT);
1120 	if (cnt == 2) {
1121 		BNXT_TF_DBG(ERR, "Parse Err:Third L3 header not supported\n");
1122 		return BNXT_TF_RC_ERROR;
1123 	}
1124 
1125 	if (ulp_rte_prsr_fld_size_validate(params, &idx,
1126 					   BNXT_ULP_PROTO_HDR_IPV6_NUM)) {
1127 		BNXT_TF_DBG(ERR, "Error parsing protocol header\n");
1128 		return BNXT_TF_RC_ERROR;
1129 	}
1130 
1131 	/*
1132 	 * Copy the rte_flow_item for ipv6 into hdr_field using ipv6
1133 	 * header fields
1134 	 */
1135 	if (ipv6_spec) {
1136 		vtc_flow = ntohl(ipv6_spec->hdr.vtc_flow);
1137 		ver_spec = htonl(BNXT_ULP_GET_IPV6_VER(vtc_flow));
1138 		tc_spec = htonl(BNXT_ULP_GET_IPV6_TC(vtc_flow));
1139 		lab_spec = htonl(BNXT_ULP_GET_IPV6_FLOWLABEL(vtc_flow));
1140 		proto = ipv6_spec->hdr.proto;
1141 	}
1142 
1143 	if (ipv6_mask) {
1144 		vtc_flow = ntohl(ipv6_mask->hdr.vtc_flow);
1145 		ver_mask = htonl(BNXT_ULP_GET_IPV6_VER(vtc_flow));
1146 		tc_mask = htonl(BNXT_ULP_GET_IPV6_TC(vtc_flow));
1147 		lab_mask = htonl(BNXT_ULP_GET_IPV6_FLOWLABEL(vtc_flow));
1148 
1149 		/* Some of the PMD applications may set the protocol field
1150 		 * in the IPv6 spec but don't set the mask. So, consider
1151 		 * the mask in proto value calculation.
1152 		 */
1153 		proto &= ipv6_mask->hdr.proto;
1154 		proto_mask = ipv6_mask->hdr.proto;
1155 	}
1156 
1157 	size = sizeof(((struct rte_flow_item_ipv6 *)NULL)->hdr.vtc_flow);
1158 	ulp_rte_prsr_fld_mask(params, &idx, size, &ver_spec, &ver_mask,
1159 			      ULP_PRSR_ACT_DEFAULT);
1160 	/*
1161 	 * The TC and flow label field are ignored since OVS is
1162 	 * setting it for match and it is not supported.
1163 	 * This is a work around and
1164 	 * shall be addressed in the future.
1165 	 */
1166 	ulp_rte_prsr_fld_mask(params, &idx, size, &tc_spec, &tc_mask,
1167 			      (ULP_APP_TOS_PROTO_SUPPORT(params->ulp_ctx)) ?
1168 			      ULP_PRSR_ACT_DEFAULT : ULP_PRSR_ACT_MASK_IGNORE);
1169 	ulp_rte_prsr_fld_mask(params, &idx, size, &lab_spec, &lab_mask,
1170 			      ULP_PRSR_ACT_MASK_IGNORE);
1171 
1172 	size = sizeof(((struct rte_flow_item_ipv6 *)NULL)->hdr.payload_len);
1173 	ulp_rte_prsr_fld_mask(params, &idx, size,
1174 			      ulp_deference_struct(ipv6_spec, hdr.payload_len),
1175 			      ulp_deference_struct(ipv6_mask, hdr.payload_len),
1176 			      ULP_PRSR_ACT_DEFAULT);
1177 
1178 	/* Ignore proto for template matching */
1179 	size = sizeof(((struct rte_flow_item_ipv6 *)NULL)->hdr.proto);
1180 	ulp_rte_prsr_fld_mask(params, &idx, size,
1181 			      ulp_deference_struct(ipv6_spec, hdr.proto),
1182 			      ulp_deference_struct(ipv6_mask, hdr.proto),
1183 			      (ULP_APP_TOS_PROTO_SUPPORT(params->ulp_ctx)) ?
1184 			      ULP_PRSR_ACT_DEFAULT : ULP_PRSR_ACT_MATCH_IGNORE);
1185 
1186 	size = sizeof(((struct rte_flow_item_ipv6 *)NULL)->hdr.hop_limits);
1187 	ulp_rte_prsr_fld_mask(params, &idx, size,
1188 			      ulp_deference_struct(ipv6_spec, hdr.hop_limits),
1189 			      ulp_deference_struct(ipv6_mask, hdr.hop_limits),
1190 			      ULP_PRSR_ACT_DEFAULT);
1191 
1192 	size = sizeof(((struct rte_flow_item_ipv6 *)NULL)->hdr.src_addr);
1193 	ulp_rte_prsr_fld_mask(params, &idx, size,
1194 			      ulp_deference_struct(ipv6_spec, hdr.src_addr),
1195 			      ulp_deference_struct(ipv6_mask, hdr.src_addr),
1196 			      ULP_PRSR_ACT_DEFAULT);
1197 
1198 	dip_idx =  idx;
1199 	size = sizeof(((struct rte_flow_item_ipv6 *)NULL)->hdr.dst_addr);
1200 	ulp_rte_prsr_fld_mask(params, &idx, size,
1201 			      ulp_deference_struct(ipv6_spec, hdr.dst_addr),
1202 			      ulp_deference_struct(ipv6_mask, hdr.dst_addr),
1203 			      ULP_PRSR_ACT_DEFAULT);
1204 
1205 	/* Set the ipv6 header bitmap and computed l3 header bitmaps */
1206 	if (ULP_BITMAP_ISSET(hdr_bitmap->bits, BNXT_ULP_HDR_BIT_O_IPV4) ||
1207 	    ULP_BITMAP_ISSET(hdr_bitmap->bits, BNXT_ULP_HDR_BIT_O_IPV6) ||
1208 	    ULP_COMP_FLD_IDX_RD(params, BNXT_ULP_CF_IDX_L3_TUN)) {
1209 		ULP_BITMAP_SET(hdr_bitmap->bits, BNXT_ULP_HDR_BIT_I_IPV6);
1210 		ULP_COMP_FLD_IDX_WR(params, BNXT_ULP_CF_IDX_I_L3, 1);
1211 		inner_flag = 1;
1212 	} else {
1213 		ULP_BITMAP_SET(hdr_bitmap->bits, BNXT_ULP_HDR_BIT_O_IPV6);
1214 		ULP_COMP_FLD_IDX_WR(params, BNXT_ULP_CF_IDX_O_L3, 1);
1215 		/* Update the tunnel offload dest ip offset */
1216 		ULP_COMP_FLD_IDX_WR(params, BNXT_ULP_CF_IDX_TUN_OFF_DIP_ID,
1217 				    dip_idx);
1218 	}
1219 
1220 	/* Update the field protocol hdr bitmap */
1221 	if (proto_mask)
1222 		ulp_rte_l3_proto_type_update(params, proto, inner_flag);
1223 	ULP_COMP_FLD_IDX_WR(params, BNXT_ULP_CF_IDX_L3_HDR_CNT, ++cnt);
1224 
1225 	return BNXT_TF_RC_SUCCESS;
1226 }
1227 
1228 /* Function to handle the update of proto header based on field values */
1229 static void
1230 ulp_rte_l4_proto_type_update(struct ulp_rte_parser_params *params,
1231 			     uint16_t src_port, uint16_t src_mask,
1232 			     uint16_t dst_port, uint16_t dst_mask,
1233 			     enum bnxt_ulp_hdr_bit hdr_bit)
1234 {
1235 	switch (hdr_bit) {
1236 	case BNXT_ULP_HDR_BIT_I_UDP:
1237 	case BNXT_ULP_HDR_BIT_I_TCP:
1238 		ULP_BITMAP_SET(params->hdr_bitmap.bits, hdr_bit);
1239 		ULP_COMP_FLD_IDX_WR(params, BNXT_ULP_CF_IDX_I_L4, 1);
1240 		ULP_COMP_FLD_IDX_WR(params, BNXT_ULP_CF_IDX_I_L4_SRC_PORT,
1241 				    (uint64_t)rte_be_to_cpu_16(src_port));
1242 		ULP_COMP_FLD_IDX_WR(params, BNXT_ULP_CF_IDX_I_L4_DST_PORT,
1243 				    (uint64_t)rte_be_to_cpu_16(dst_port));
1244 		ULP_COMP_FLD_IDX_WR(params, BNXT_ULP_CF_IDX_I_L4_SRC_PORT_MASK,
1245 				    (uint64_t)rte_be_to_cpu_16(src_mask));
1246 		ULP_COMP_FLD_IDX_WR(params, BNXT_ULP_CF_IDX_I_L4_DST_PORT_MASK,
1247 				    (uint64_t)rte_be_to_cpu_16(dst_mask));
1248 		ULP_COMP_FLD_IDX_WR(params, BNXT_ULP_CF_IDX_I_L3_FB_PROTO_ID,
1249 				    1);
1250 		ULP_COMP_FLD_IDX_WR(params, BNXT_ULP_CF_IDX_I_L4_FB_SRC_PORT,
1251 				    !!(src_port & src_mask));
1252 		ULP_COMP_FLD_IDX_WR(params, BNXT_ULP_CF_IDX_I_L4_FB_DST_PORT,
1253 				    !!(dst_port & dst_mask));
1254 		ULP_COMP_FLD_IDX_WR(params, BNXT_ULP_CF_IDX_I_L3_PROTO_ID,
1255 				    (hdr_bit == BNXT_ULP_HDR_BIT_I_UDP) ?
1256 				    IPPROTO_UDP : IPPROTO_TCP);
1257 		break;
1258 	case BNXT_ULP_HDR_BIT_O_UDP:
1259 	case BNXT_ULP_HDR_BIT_O_TCP:
1260 		ULP_BITMAP_SET(params->hdr_bitmap.bits, hdr_bit);
1261 		ULP_COMP_FLD_IDX_WR(params, BNXT_ULP_CF_IDX_O_L4, 1);
1262 		ULP_COMP_FLD_IDX_WR(params, BNXT_ULP_CF_IDX_O_L4_SRC_PORT,
1263 				    (uint64_t)rte_be_to_cpu_16(src_port));
1264 		ULP_COMP_FLD_IDX_WR(params, BNXT_ULP_CF_IDX_O_L4_DST_PORT,
1265 				    (uint64_t)rte_be_to_cpu_16(dst_port));
1266 		ULP_COMP_FLD_IDX_WR(params, BNXT_ULP_CF_IDX_O_L4_SRC_PORT_MASK,
1267 				    (uint64_t)rte_be_to_cpu_16(src_mask));
1268 		ULP_COMP_FLD_IDX_WR(params, BNXT_ULP_CF_IDX_O_L4_DST_PORT_MASK,
1269 				    (uint64_t)rte_be_to_cpu_16(dst_mask));
1270 		ULP_COMP_FLD_IDX_WR(params, BNXT_ULP_CF_IDX_O_L3_FB_PROTO_ID,
1271 				    1);
1272 		ULP_COMP_FLD_IDX_WR(params, BNXT_ULP_CF_IDX_O_L4_FB_SRC_PORT,
1273 				    !!(src_port & src_mask));
1274 		ULP_COMP_FLD_IDX_WR(params, BNXT_ULP_CF_IDX_O_L4_FB_DST_PORT,
1275 				    !!(dst_port & dst_mask));
1276 		ULP_COMP_FLD_IDX_WR(params, BNXT_ULP_CF_IDX_O_L3_PROTO_ID,
1277 				    (hdr_bit == BNXT_ULP_HDR_BIT_O_UDP) ?
1278 				    IPPROTO_UDP : IPPROTO_TCP);
1279 		break;
1280 	default:
1281 		break;
1282 	}
1283 
1284 	if (hdr_bit == BNXT_ULP_HDR_BIT_O_UDP && dst_port ==
1285 	    tfp_cpu_to_be_16(ULP_UDP_PORT_VXLAN)) {
1286 		ULP_BITMAP_SET(params->hdr_fp_bit.bits,
1287 			       BNXT_ULP_HDR_BIT_T_VXLAN);
1288 		ULP_COMP_FLD_IDX_WR(params, BNXT_ULP_CF_IDX_L3_TUN, 1);
1289 	}
1290 
1291 	if (hdr_bit == BNXT_ULP_HDR_BIT_O_UDP && dst_port ==
1292 	    tfp_cpu_to_be_16(ULP_UDP_PORT_VXLAN_GPE)) {
1293 		ULP_BITMAP_SET(params->hdr_fp_bit.bits,
1294 			       BNXT_ULP_HDR_BIT_T_VXLAN_GPE);
1295 		ULP_COMP_FLD_IDX_WR(params, BNXT_ULP_CF_IDX_L3_TUN, 1);
1296 	}
1297 }
1298 
1299 /* Function to handle the parsing of RTE Flow item UDP Header. */
1300 int32_t
1301 ulp_rte_udp_hdr_handler(const struct rte_flow_item *item,
1302 			struct ulp_rte_parser_params *params)
1303 {
1304 	const struct rte_flow_item_udp *udp_spec = item->spec;
1305 	const struct rte_flow_item_udp *udp_mask = item->mask;
1306 	struct ulp_rte_hdr_bitmap *hdr_bitmap = &params->hdr_bitmap;
1307 	uint32_t idx = 0;
1308 	uint32_t size;
1309 	uint16_t dport = 0, sport = 0;
1310 	uint16_t dport_mask = 0, sport_mask = 0;
1311 	uint32_t cnt;
1312 	enum bnxt_ulp_hdr_bit out_l4 = BNXT_ULP_HDR_BIT_O_UDP;
1313 
1314 	cnt = ULP_COMP_FLD_IDX_RD(params, BNXT_ULP_CF_IDX_L4_HDR_CNT);
1315 	if (cnt == 2) {
1316 		BNXT_TF_DBG(ERR, "Parse Err:Third L4 header not supported\n");
1317 		return BNXT_TF_RC_ERROR;
1318 	}
1319 
1320 	if (udp_spec) {
1321 		sport = udp_spec->hdr.src_port;
1322 		dport = udp_spec->hdr.dst_port;
1323 	}
1324 	if (udp_mask) {
1325 		sport_mask = udp_mask->hdr.src_port;
1326 		dport_mask = udp_mask->hdr.dst_port;
1327 	}
1328 
1329 	if (ulp_rte_prsr_fld_size_validate(params, &idx,
1330 					   BNXT_ULP_PROTO_HDR_UDP_NUM)) {
1331 		BNXT_TF_DBG(ERR, "Error parsing protocol header\n");
1332 		return BNXT_TF_RC_ERROR;
1333 	}
1334 
1335 	/*
1336 	 * Copy the rte_flow_item for ipv4 into hdr_field using ipv4
1337 	 * header fields
1338 	 */
1339 	size = sizeof(((struct rte_flow_item_udp *)NULL)->hdr.src_port);
1340 	ulp_rte_prsr_fld_mask(params, &idx, size,
1341 			      ulp_deference_struct(udp_spec, hdr.src_port),
1342 			      ulp_deference_struct(udp_mask, hdr.src_port),
1343 			      ULP_PRSR_ACT_DEFAULT);
1344 
1345 	size = sizeof(((struct rte_flow_item_udp *)NULL)->hdr.dst_port);
1346 	ulp_rte_prsr_fld_mask(params, &idx, size,
1347 			      ulp_deference_struct(udp_spec, hdr.dst_port),
1348 			      ulp_deference_struct(udp_mask, hdr.dst_port),
1349 			      ULP_PRSR_ACT_DEFAULT);
1350 
1351 	size = sizeof(((struct rte_flow_item_udp *)NULL)->hdr.dgram_len);
1352 	ulp_rte_prsr_fld_mask(params, &idx, size,
1353 			      ulp_deference_struct(udp_spec, hdr.dgram_len),
1354 			      ulp_deference_struct(udp_mask, hdr.dgram_len),
1355 			      ULP_PRSR_ACT_DEFAULT);
1356 
1357 	size = sizeof(((struct rte_flow_item_udp *)NULL)->hdr.dgram_cksum);
1358 	ulp_rte_prsr_fld_mask(params, &idx, size,
1359 			      ulp_deference_struct(udp_spec, hdr.dgram_cksum),
1360 			      ulp_deference_struct(udp_mask, hdr.dgram_cksum),
1361 			      ULP_PRSR_ACT_DEFAULT);
1362 
1363 	/* Set the udp header bitmap and computed l4 header bitmaps */
1364 	if (ULP_BITMAP_ISSET(hdr_bitmap->bits, BNXT_ULP_HDR_BIT_O_UDP) ||
1365 	    ULP_BITMAP_ISSET(hdr_bitmap->bits, BNXT_ULP_HDR_BIT_O_TCP) ||
1366 	    ULP_COMP_FLD_IDX_RD(params, BNXT_ULP_CF_IDX_L3_TUN))
1367 		out_l4 = BNXT_ULP_HDR_BIT_I_UDP;
1368 
1369 	ulp_rte_l4_proto_type_update(params, sport, sport_mask, dport,
1370 				     dport_mask, out_l4);
1371 	ULP_COMP_FLD_IDX_WR(params, BNXT_ULP_CF_IDX_L4_HDR_CNT, ++cnt);
1372 	return BNXT_TF_RC_SUCCESS;
1373 }
1374 
1375 /* Function to handle the parsing of RTE Flow item TCP Header. */
1376 int32_t
1377 ulp_rte_tcp_hdr_handler(const struct rte_flow_item *item,
1378 			struct ulp_rte_parser_params *params)
1379 {
1380 	const struct rte_flow_item_tcp *tcp_spec = item->spec;
1381 	const struct rte_flow_item_tcp *tcp_mask = item->mask;
1382 	struct ulp_rte_hdr_bitmap *hdr_bitmap = &params->hdr_bitmap;
1383 	uint32_t idx = 0;
1384 	uint16_t dport = 0, sport = 0;
1385 	uint16_t dport_mask = 0, sport_mask = 0;
1386 	uint32_t size;
1387 	uint32_t cnt;
1388 	enum bnxt_ulp_hdr_bit out_l4 = BNXT_ULP_HDR_BIT_O_TCP;
1389 
1390 	cnt = ULP_COMP_FLD_IDX_RD(params, BNXT_ULP_CF_IDX_L4_HDR_CNT);
1391 	if (cnt == 2) {
1392 		BNXT_TF_DBG(ERR, "Parse Err:Third L4 header not supported\n");
1393 		return BNXT_TF_RC_ERROR;
1394 	}
1395 
1396 	if (tcp_spec) {
1397 		sport = tcp_spec->hdr.src_port;
1398 		dport = tcp_spec->hdr.dst_port;
1399 	}
1400 	if (tcp_mask) {
1401 		sport_mask = tcp_mask->hdr.src_port;
1402 		dport_mask = tcp_mask->hdr.dst_port;
1403 	}
1404 
1405 	if (ulp_rte_prsr_fld_size_validate(params, &idx,
1406 					   BNXT_ULP_PROTO_HDR_TCP_NUM)) {
1407 		BNXT_TF_DBG(ERR, "Error parsing protocol header\n");
1408 		return BNXT_TF_RC_ERROR;
1409 	}
1410 
1411 	/*
1412 	 * Copy the rte_flow_item for ipv4 into hdr_field using ipv4
1413 	 * header fields
1414 	 */
1415 	size = sizeof(((struct rte_flow_item_tcp *)NULL)->hdr.src_port);
1416 	ulp_rte_prsr_fld_mask(params, &idx, size,
1417 			      ulp_deference_struct(tcp_spec, hdr.src_port),
1418 			      ulp_deference_struct(tcp_mask, hdr.src_port),
1419 			      ULP_PRSR_ACT_DEFAULT);
1420 
1421 	size = sizeof(((struct rte_flow_item_tcp *)NULL)->hdr.dst_port);
1422 	ulp_rte_prsr_fld_mask(params, &idx, size,
1423 			      ulp_deference_struct(tcp_spec, hdr.dst_port),
1424 			      ulp_deference_struct(tcp_mask, hdr.dst_port),
1425 			      ULP_PRSR_ACT_DEFAULT);
1426 
1427 	size = sizeof(((struct rte_flow_item_tcp *)NULL)->hdr.sent_seq);
1428 	ulp_rte_prsr_fld_mask(params, &idx, size,
1429 			      ulp_deference_struct(tcp_spec, hdr.sent_seq),
1430 			      ulp_deference_struct(tcp_mask, hdr.sent_seq),
1431 			      ULP_PRSR_ACT_DEFAULT);
1432 
1433 	size = sizeof(((struct rte_flow_item_tcp *)NULL)->hdr.recv_ack);
1434 	ulp_rte_prsr_fld_mask(params, &idx, size,
1435 			      ulp_deference_struct(tcp_spec, hdr.recv_ack),
1436 			      ulp_deference_struct(tcp_mask, hdr.recv_ack),
1437 			      ULP_PRSR_ACT_DEFAULT);
1438 
1439 	size = sizeof(((struct rte_flow_item_tcp *)NULL)->hdr.data_off);
1440 	ulp_rte_prsr_fld_mask(params, &idx, size,
1441 			      ulp_deference_struct(tcp_spec, hdr.data_off),
1442 			      ulp_deference_struct(tcp_mask, hdr.data_off),
1443 			      ULP_PRSR_ACT_DEFAULT);
1444 
1445 	size = sizeof(((struct rte_flow_item_tcp *)NULL)->hdr.tcp_flags);
1446 	ulp_rte_prsr_fld_mask(params, &idx, size,
1447 			      ulp_deference_struct(tcp_spec, hdr.tcp_flags),
1448 			      ulp_deference_struct(tcp_mask, hdr.tcp_flags),
1449 			      ULP_PRSR_ACT_DEFAULT);
1450 
1451 	size = sizeof(((struct rte_flow_item_tcp *)NULL)->hdr.rx_win);
1452 	ulp_rte_prsr_fld_mask(params, &idx, size,
1453 			      ulp_deference_struct(tcp_spec, hdr.rx_win),
1454 			      ulp_deference_struct(tcp_mask, hdr.rx_win),
1455 			      ULP_PRSR_ACT_DEFAULT);
1456 
1457 	size = sizeof(((struct rte_flow_item_tcp *)NULL)->hdr.cksum);
1458 	ulp_rte_prsr_fld_mask(params, &idx, size,
1459 			      ulp_deference_struct(tcp_spec, hdr.cksum),
1460 			      ulp_deference_struct(tcp_mask, hdr.cksum),
1461 			      ULP_PRSR_ACT_DEFAULT);
1462 
1463 	size = sizeof(((struct rte_flow_item_tcp *)NULL)->hdr.tcp_urp);
1464 	ulp_rte_prsr_fld_mask(params, &idx, size,
1465 			      ulp_deference_struct(tcp_spec, hdr.tcp_urp),
1466 			      ulp_deference_struct(tcp_mask, hdr.tcp_urp),
1467 			      ULP_PRSR_ACT_DEFAULT);
1468 
1469 	/* Set the udp header bitmap and computed l4 header bitmaps */
1470 	if (ULP_BITMAP_ISSET(hdr_bitmap->bits, BNXT_ULP_HDR_BIT_O_UDP) ||
1471 	    ULP_BITMAP_ISSET(hdr_bitmap->bits, BNXT_ULP_HDR_BIT_O_TCP) ||
1472 	    ULP_COMP_FLD_IDX_RD(params, BNXT_ULP_CF_IDX_L3_TUN))
1473 		out_l4 = BNXT_ULP_HDR_BIT_I_TCP;
1474 
1475 	ulp_rte_l4_proto_type_update(params, sport, sport_mask, dport,
1476 				     dport_mask, out_l4);
1477 	ULP_COMP_FLD_IDX_WR(params, BNXT_ULP_CF_IDX_L4_HDR_CNT, ++cnt);
1478 	return BNXT_TF_RC_SUCCESS;
1479 }
1480 
1481 /* Function to handle the parsing of RTE Flow item Vxlan Header. */
1482 int32_t
1483 ulp_rte_vxlan_hdr_handler(const struct rte_flow_item *item,
1484 			  struct ulp_rte_parser_params *params)
1485 {
1486 	const struct rte_flow_item_vxlan *vxlan_spec = item->spec;
1487 	const struct rte_flow_item_vxlan *vxlan_mask = item->mask;
1488 	struct ulp_rte_hdr_bitmap *hdr_bitmap = &params->hdr_bitmap;
1489 	uint32_t idx = 0;
1490 	uint16_t dport;
1491 	uint32_t size;
1492 
1493 	if (ulp_rte_prsr_fld_size_validate(params, &idx,
1494 					   BNXT_ULP_PROTO_HDR_VXLAN_NUM)) {
1495 		BNXT_TF_DBG(ERR, "Error parsing protocol header\n");
1496 		return BNXT_TF_RC_ERROR;
1497 	}
1498 
1499 	/*
1500 	 * Copy the rte_flow_item for vxlan into hdr_field using vxlan
1501 	 * header fields
1502 	 */
1503 	size = sizeof(((struct rte_flow_item_vxlan *)NULL)->hdr.flags);
1504 	ulp_rte_prsr_fld_mask(params, &idx, size,
1505 			      ulp_deference_struct(vxlan_spec, hdr.flags),
1506 			      ulp_deference_struct(vxlan_mask, hdr.flags),
1507 			      ULP_PRSR_ACT_DEFAULT);
1508 
1509 	size = sizeof(((struct rte_flow_item_vxlan *)NULL)->hdr.rsvd0);
1510 	ulp_rte_prsr_fld_mask(params, &idx, size,
1511 			      ulp_deference_struct(vxlan_spec, hdr.rsvd0),
1512 			      ulp_deference_struct(vxlan_mask, hdr.rsvd0),
1513 			      ULP_PRSR_ACT_DEFAULT);
1514 
1515 	size = sizeof(((struct rte_flow_item_vxlan *)NULL)->hdr.vni);
1516 	ulp_rte_prsr_fld_mask(params, &idx, size,
1517 			      ulp_deference_struct(vxlan_spec, hdr.vni),
1518 			      ulp_deference_struct(vxlan_mask, hdr.vni),
1519 			      ULP_PRSR_ACT_DEFAULT);
1520 
1521 	size = sizeof(((struct rte_flow_item_vxlan *)NULL)->hdr.rsvd1);
1522 	ulp_rte_prsr_fld_mask(params, &idx, size,
1523 			      ulp_deference_struct(vxlan_spec, hdr.rsvd1),
1524 			      ulp_deference_struct(vxlan_mask, hdr.rsvd1),
1525 			      ULP_PRSR_ACT_DEFAULT);
1526 
1527 	/* Update the hdr_bitmap with vxlan */
1528 	ULP_BITMAP_SET(hdr_bitmap->bits, BNXT_ULP_HDR_BIT_T_VXLAN);
1529 	ULP_COMP_FLD_IDX_WR(params, BNXT_ULP_CF_IDX_L3_TUN, 1);
1530 
1531 	dport = ULP_COMP_FLD_IDX_RD(params, BNXT_ULP_CF_IDX_O_L4_DST_PORT);
1532 	if (!dport) {
1533 		ULP_COMP_FLD_IDX_WR(params, BNXT_ULP_CF_IDX_O_L4_DST_PORT,
1534 				    ULP_UDP_PORT_VXLAN);
1535 		ULP_COMP_FLD_IDX_WR(params, BNXT_ULP_CF_IDX_O_L4_DST_PORT_MASK,
1536 				    ULP_UDP_PORT_VXLAN_MASK);
1537 	}
1538 
1539 	return BNXT_TF_RC_SUCCESS;
1540 }
1541 
1542 /* Function to handle the parsing of RTE Flow item Vxlan GPE Header. */
1543 int32_t
1544 ulp_rte_vxlan_gpe_hdr_handler(const struct rte_flow_item *item,
1545 			      struct ulp_rte_parser_params *params)
1546 {
1547 	const struct rte_flow_item_vxlan_gpe *vxlan_gpe_spec = item->spec;
1548 	const struct rte_flow_item_vxlan_gpe *vxlan_gpe_mask = item->mask;
1549 	struct ulp_rte_hdr_bitmap *hdr_bitmap = &params->hdr_bitmap;
1550 	uint32_t idx = 0;
1551 	uint16_t dport;
1552 	uint32_t size;
1553 
1554 	if (ulp_rte_prsr_fld_size_validate(params, &idx,
1555 					   BNXT_ULP_PROTO_HDR_VXLAN_GPE_NUM)) {
1556 		BNXT_TF_DBG(ERR, "Error parsing protocol header\n");
1557 		return BNXT_TF_RC_ERROR;
1558 	}
1559 
1560 	/*
1561 	 * Copy the rte_flow_item for vxlan gpe into hdr_field using vxlan
1562 	 * header fields
1563 	 */
1564 	size = sizeof(((struct rte_flow_item_vxlan_gpe *)NULL)->flags);
1565 	ulp_rte_prsr_fld_mask(params, &idx, size,
1566 			      ulp_deference_struct(vxlan_gpe_spec, flags),
1567 			      ulp_deference_struct(vxlan_gpe_mask, flags),
1568 			      ULP_PRSR_ACT_DEFAULT);
1569 
1570 	size = sizeof(((struct rte_flow_item_vxlan_gpe *)NULL)->rsvd0);
1571 	ulp_rte_prsr_fld_mask(params, &idx, size,
1572 			      ulp_deference_struct(vxlan_gpe_spec, rsvd0),
1573 			      ulp_deference_struct(vxlan_gpe_mask, rsvd0),
1574 			      ULP_PRSR_ACT_DEFAULT);
1575 
1576 	size = sizeof(((struct rte_flow_item_vxlan_gpe *)NULL)->protocol);
1577 	ulp_rte_prsr_fld_mask(params, &idx, size,
1578 			      ulp_deference_struct(vxlan_gpe_spec, protocol),
1579 			      ulp_deference_struct(vxlan_gpe_mask, protocol),
1580 			      ULP_PRSR_ACT_DEFAULT);
1581 
1582 	size = sizeof(((struct rte_flow_item_vxlan_gpe *)NULL)->vni);
1583 	ulp_rte_prsr_fld_mask(params, &idx, size,
1584 			      ulp_deference_struct(vxlan_gpe_spec, vni),
1585 			      ulp_deference_struct(vxlan_gpe_mask, vni),
1586 			      ULP_PRSR_ACT_DEFAULT);
1587 
1588 	size = sizeof(((struct rte_flow_item_vxlan_gpe *)NULL)->rsvd1);
1589 	ulp_rte_prsr_fld_mask(params, &idx, size,
1590 			      ulp_deference_struct(vxlan_gpe_spec, rsvd1),
1591 			      ulp_deference_struct(vxlan_gpe_mask, rsvd1),
1592 			      ULP_PRSR_ACT_DEFAULT);
1593 
1594 	/* Update the hdr_bitmap with vxlan gpe*/
1595 	ULP_BITMAP_SET(hdr_bitmap->bits, BNXT_ULP_HDR_BIT_T_VXLAN_GPE);
1596 	ULP_COMP_FLD_IDX_WR(params, BNXT_ULP_CF_IDX_L3_TUN, 1);
1597 
1598 	dport = ULP_COMP_FLD_IDX_RD(params, BNXT_ULP_CF_IDX_O_L4_DST_PORT);
1599 	if (!dport) {
1600 		ULP_COMP_FLD_IDX_WR(params, BNXT_ULP_CF_IDX_O_L4_DST_PORT,
1601 				    ULP_UDP_PORT_VXLAN_GPE);
1602 		ULP_COMP_FLD_IDX_WR(params, BNXT_ULP_CF_IDX_O_L4_DST_PORT_MASK,
1603 				    ULP_UDP_PORT_VXLAN_GPE_MASK);
1604 	}
1605 
1606 	return BNXT_TF_RC_SUCCESS;
1607 }
1608 
1609 /* Function to handle the parsing of RTE Flow item GRE Header. */
1610 int32_t
1611 ulp_rte_gre_hdr_handler(const struct rte_flow_item *item,
1612 			struct ulp_rte_parser_params *params)
1613 {
1614 	const struct rte_flow_item_gre *gre_spec = item->spec;
1615 	const struct rte_flow_item_gre *gre_mask = item->mask;
1616 	struct ulp_rte_hdr_bitmap *hdr_bitmap = &params->hdr_bitmap;
1617 	uint32_t idx = 0;
1618 	uint32_t size;
1619 
1620 	if (ulp_rte_prsr_fld_size_validate(params, &idx,
1621 					   BNXT_ULP_PROTO_HDR_GRE_NUM)) {
1622 		BNXT_TF_DBG(ERR, "Error parsing protocol header\n");
1623 		return BNXT_TF_RC_ERROR;
1624 	}
1625 
1626 	size = sizeof(((struct rte_flow_item_gre *)NULL)->c_rsvd0_ver);
1627 	ulp_rte_prsr_fld_mask(params, &idx, size,
1628 			      ulp_deference_struct(gre_spec, c_rsvd0_ver),
1629 			      ulp_deference_struct(gre_mask, c_rsvd0_ver),
1630 			      ULP_PRSR_ACT_DEFAULT);
1631 
1632 	size = sizeof(((struct rte_flow_item_gre *)NULL)->protocol);
1633 	ulp_rte_prsr_fld_mask(params, &idx, size,
1634 			      ulp_deference_struct(gre_spec, protocol),
1635 			      ulp_deference_struct(gre_mask, protocol),
1636 			      ULP_PRSR_ACT_DEFAULT);
1637 
1638 	/* Update the hdr_bitmap with GRE */
1639 	ULP_BITMAP_SET(hdr_bitmap->bits, BNXT_ULP_HDR_BIT_T_GRE);
1640 	ULP_COMP_FLD_IDX_WR(params, BNXT_ULP_CF_IDX_L3_TUN, 1);
1641 	return BNXT_TF_RC_SUCCESS;
1642 }
1643 
1644 /* Function to handle the parsing of RTE Flow item ANY. */
1645 int32_t
1646 ulp_rte_item_any_handler(const struct rte_flow_item *item __rte_unused,
1647 			 struct ulp_rte_parser_params *params __rte_unused)
1648 {
1649 	return BNXT_TF_RC_SUCCESS;
1650 }
1651 
1652 /* Function to handle the parsing of RTE Flow item ICMP Header. */
1653 int32_t
1654 ulp_rte_icmp_hdr_handler(const struct rte_flow_item *item,
1655 			 struct ulp_rte_parser_params *params)
1656 {
1657 	const struct rte_flow_item_icmp *icmp_spec = item->spec;
1658 	const struct rte_flow_item_icmp *icmp_mask = item->mask;
1659 	struct ulp_rte_hdr_bitmap *hdr_bitmap = &params->hdr_bitmap;
1660 	uint32_t idx = 0;
1661 	uint32_t size;
1662 
1663 	if (ulp_rte_prsr_fld_size_validate(params, &idx,
1664 					   BNXT_ULP_PROTO_HDR_ICMP_NUM)) {
1665 		BNXT_TF_DBG(ERR, "Error parsing protocol header\n");
1666 		return BNXT_TF_RC_ERROR;
1667 	}
1668 
1669 	size = sizeof(((struct rte_flow_item_icmp *)NULL)->hdr.icmp_type);
1670 	ulp_rte_prsr_fld_mask(params, &idx, size,
1671 			      ulp_deference_struct(icmp_spec, hdr.icmp_type),
1672 			      ulp_deference_struct(icmp_mask, hdr.icmp_type),
1673 			      ULP_PRSR_ACT_DEFAULT);
1674 
1675 	size = sizeof(((struct rte_flow_item_icmp *)NULL)->hdr.icmp_code);
1676 	ulp_rte_prsr_fld_mask(params, &idx, size,
1677 			      ulp_deference_struct(icmp_spec, hdr.icmp_code),
1678 			      ulp_deference_struct(icmp_mask, hdr.icmp_code),
1679 			      ULP_PRSR_ACT_DEFAULT);
1680 
1681 	size = sizeof(((struct rte_flow_item_icmp *)NULL)->hdr.icmp_cksum);
1682 	ulp_rte_prsr_fld_mask(params, &idx, size,
1683 			      ulp_deference_struct(icmp_spec, hdr.icmp_cksum),
1684 			      ulp_deference_struct(icmp_mask, hdr.icmp_cksum),
1685 			      ULP_PRSR_ACT_DEFAULT);
1686 
1687 	size = sizeof(((struct rte_flow_item_icmp *)NULL)->hdr.icmp_ident);
1688 	ulp_rte_prsr_fld_mask(params, &idx, size,
1689 			      ulp_deference_struct(icmp_spec, hdr.icmp_ident),
1690 			      ulp_deference_struct(icmp_mask, hdr.icmp_ident),
1691 			      ULP_PRSR_ACT_DEFAULT);
1692 
1693 	size = sizeof(((struct rte_flow_item_icmp *)NULL)->hdr.icmp_seq_nb);
1694 	ulp_rte_prsr_fld_mask(params, &idx, size,
1695 			      ulp_deference_struct(icmp_spec, hdr.icmp_seq_nb),
1696 			      ulp_deference_struct(icmp_mask, hdr.icmp_seq_nb),
1697 			      ULP_PRSR_ACT_DEFAULT);
1698 
1699 	/* Update the hdr_bitmap with ICMP */
1700 	if (ULP_COMP_FLD_IDX_RD(params, BNXT_ULP_CF_IDX_L3_TUN))
1701 		ULP_BITMAP_SET(hdr_bitmap->bits, BNXT_ULP_HDR_BIT_I_ICMP);
1702 	else
1703 		ULP_BITMAP_SET(hdr_bitmap->bits, BNXT_ULP_HDR_BIT_O_ICMP);
1704 	return BNXT_TF_RC_SUCCESS;
1705 }
1706 
1707 /* Function to handle the parsing of RTE Flow item ICMP6 Header. */
1708 int32_t
1709 ulp_rte_icmp6_hdr_handler(const struct rte_flow_item *item,
1710 			  struct ulp_rte_parser_params *params)
1711 {
1712 	const struct rte_flow_item_icmp6 *icmp_spec = item->spec;
1713 	const struct rte_flow_item_icmp6 *icmp_mask = item->mask;
1714 	struct ulp_rte_hdr_bitmap *hdr_bitmap = &params->hdr_bitmap;
1715 	uint32_t idx = 0;
1716 	uint32_t size;
1717 
1718 	if (ulp_rte_prsr_fld_size_validate(params, &idx,
1719 					   BNXT_ULP_PROTO_HDR_ICMP_NUM)) {
1720 		BNXT_TF_DBG(ERR, "Error parsing protocol header\n");
1721 		return BNXT_TF_RC_ERROR;
1722 	}
1723 
1724 	size = sizeof(((struct rte_flow_item_icmp6 *)NULL)->type);
1725 	ulp_rte_prsr_fld_mask(params, &idx, size,
1726 			      ulp_deference_struct(icmp_spec, type),
1727 			      ulp_deference_struct(icmp_mask, type),
1728 			      ULP_PRSR_ACT_DEFAULT);
1729 
1730 	size = sizeof(((struct rte_flow_item_icmp6 *)NULL)->code);
1731 	ulp_rte_prsr_fld_mask(params, &idx, size,
1732 			      ulp_deference_struct(icmp_spec, code),
1733 			      ulp_deference_struct(icmp_mask, code),
1734 			      ULP_PRSR_ACT_DEFAULT);
1735 
1736 	size = sizeof(((struct rte_flow_item_icmp6 *)NULL)->checksum);
1737 	ulp_rte_prsr_fld_mask(params, &idx, size,
1738 			      ulp_deference_struct(icmp_spec, checksum),
1739 			      ulp_deference_struct(icmp_mask, checksum),
1740 			      ULP_PRSR_ACT_DEFAULT);
1741 
1742 	if (ULP_BITMAP_ISSET(hdr_bitmap->bits, BNXT_ULP_HDR_BIT_O_IPV4)) {
1743 		BNXT_TF_DBG(ERR, "Error: incorrect icmp version\n");
1744 		return BNXT_TF_RC_ERROR;
1745 	}
1746 
1747 	/* Update the hdr_bitmap with ICMP */
1748 	if (ULP_COMP_FLD_IDX_RD(params, BNXT_ULP_CF_IDX_L3_TUN))
1749 		ULP_BITMAP_SET(hdr_bitmap->bits, BNXT_ULP_HDR_BIT_I_ICMP);
1750 	else
1751 		ULP_BITMAP_SET(hdr_bitmap->bits, BNXT_ULP_HDR_BIT_O_ICMP);
1752 	return BNXT_TF_RC_SUCCESS;
1753 }
1754 
1755 /* Function to handle the parsing of RTE Flow item ECPRI Header. */
1756 int32_t
1757 ulp_rte_ecpri_hdr_handler(const struct rte_flow_item *item,
1758 			  struct ulp_rte_parser_params *params)
1759 {
1760 	const struct rte_flow_item_ecpri *ecpri_spec = item->spec;
1761 	const struct rte_flow_item_ecpri *ecpri_mask = item->mask;
1762 	struct rte_flow_item_ecpri l_ecpri_spec, l_ecpri_mask;
1763 	struct rte_flow_item_ecpri *p_ecpri_spec = &l_ecpri_spec;
1764 	struct rte_flow_item_ecpri *p_ecpri_mask = &l_ecpri_mask;
1765 	struct ulp_rte_hdr_bitmap *hdr_bitmap = &params->hdr_bitmap;
1766 	uint32_t idx = 0, cnt;
1767 	uint32_t size;
1768 
1769 	if (ulp_rte_prsr_fld_size_validate(params, &idx,
1770 					   BNXT_ULP_PROTO_HDR_ECPRI_NUM)) {
1771 		BNXT_TF_DBG(ERR, "Error parsing protocol header\n");
1772 		return BNXT_TF_RC_ERROR;
1773 	}
1774 
1775 	/* Figure out if eCPRI is within L4(UDP), unsupported, for now */
1776 	cnt = ULP_COMP_FLD_IDX_RD(params, BNXT_ULP_CF_IDX_L4_HDR_CNT);
1777 	if (cnt >= 1) {
1778 		BNXT_TF_DBG(ERR, "Parse Err: L4 header stack >= 2 not supported\n");
1779 		return BNXT_TF_RC_ERROR;
1780 	}
1781 
1782 	if (!ecpri_spec || !ecpri_mask)
1783 		goto parser_set_ecpri_hdr_bit;
1784 
1785 	memcpy(p_ecpri_spec, ecpri_spec, sizeof(*ecpri_spec));
1786 	memcpy(p_ecpri_mask, ecpri_mask, sizeof(*ecpri_mask));
1787 
1788 	p_ecpri_spec->hdr.common.u32 = rte_be_to_cpu_32(p_ecpri_spec->hdr.common.u32);
1789 	p_ecpri_mask->hdr.common.u32 = rte_be_to_cpu_32(p_ecpri_mask->hdr.common.u32);
1790 
1791 	/*
1792 	 * Init eCPRI spec+mask to correct defaults, also clear masks of fields
1793 	 * we ignore in the TCAM.
1794 	 */
1795 
1796 	l_ecpri_spec.hdr.common.size = 0;
1797 	l_ecpri_spec.hdr.common.c = 0;
1798 	l_ecpri_spec.hdr.common.res = 0;
1799 	l_ecpri_spec.hdr.common.revision = 1;
1800 	l_ecpri_mask.hdr.common.size = 0;
1801 	l_ecpri_mask.hdr.common.c = 1;
1802 	l_ecpri_mask.hdr.common.res = 0;
1803 	l_ecpri_mask.hdr.common.revision = 0xf;
1804 
1805 	switch (p_ecpri_spec->hdr.common.type) {
1806 	case RTE_ECPRI_MSG_TYPE_IQ_DATA:
1807 		l_ecpri_mask.hdr.type0.seq_id = 0;
1808 		break;
1809 
1810 	case RTE_ECPRI_MSG_TYPE_BIT_SEQ:
1811 		l_ecpri_mask.hdr.type1.seq_id = 0;
1812 		break;
1813 
1814 	case RTE_ECPRI_MSG_TYPE_RTC_CTRL:
1815 		l_ecpri_mask.hdr.type2.seq_id = 0;
1816 		break;
1817 
1818 	case RTE_ECPRI_MSG_TYPE_GEN_DATA:
1819 		l_ecpri_mask.hdr.type3.seq_id = 0;
1820 		break;
1821 
1822 	case RTE_ECPRI_MSG_TYPE_RM_ACC:
1823 		l_ecpri_mask.hdr.type4.rr = 0;
1824 		l_ecpri_mask.hdr.type4.rw = 0;
1825 		l_ecpri_mask.hdr.type4.rma_id = 0;
1826 		break;
1827 
1828 	case RTE_ECPRI_MSG_TYPE_DLY_MSR:
1829 		l_ecpri_spec.hdr.type5.act_type = 0;
1830 		break;
1831 
1832 	case RTE_ECPRI_MSG_TYPE_RMT_RST:
1833 		l_ecpri_spec.hdr.type6.rst_op = 0;
1834 		break;
1835 
1836 	case RTE_ECPRI_MSG_TYPE_EVT_IND:
1837 		l_ecpri_spec.hdr.type7.evt_type = 0;
1838 		l_ecpri_spec.hdr.type7.seq = 0;
1839 		l_ecpri_spec.hdr.type7.number = 0;
1840 		break;
1841 
1842 	default:
1843 		break;
1844 	}
1845 
1846 	p_ecpri_spec->hdr.common.u32 = rte_cpu_to_be_32(p_ecpri_spec->hdr.common.u32);
1847 	p_ecpri_mask->hdr.common.u32 = rte_cpu_to_be_32(p_ecpri_mask->hdr.common.u32);
1848 
1849 	/* Type */
1850 	size = sizeof(((struct rte_flow_item_ecpri *)NULL)->hdr.common.u32);
1851 	ulp_rte_prsr_fld_mask(params, &idx, size,
1852 			      ulp_deference_struct(p_ecpri_spec, hdr.common.u32),
1853 			      ulp_deference_struct(p_ecpri_mask, hdr.common.u32),
1854 			      ULP_PRSR_ACT_DEFAULT);
1855 
1856 	/* PC/RTC/MSR_ID */
1857 	size = sizeof(((struct rte_flow_item_ecpri *)NULL)->hdr.dummy[0]);
1858 	ulp_rte_prsr_fld_mask(params, &idx, size,
1859 			      ulp_deference_struct(p_ecpri_spec, hdr.dummy),
1860 			      ulp_deference_struct(p_ecpri_mask, hdr.dummy),
1861 			      ULP_PRSR_ACT_DEFAULT);
1862 
1863 parser_set_ecpri_hdr_bit:
1864 	/* Update the hdr_bitmap with eCPRI */
1865 	ULP_BITMAP_SET(hdr_bitmap->bits, BNXT_ULP_HDR_BIT_O_ECPRI);
1866 	return BNXT_TF_RC_SUCCESS;
1867 }
1868 
1869 /* Function to handle the parsing of RTE Flow item void Header */
1870 int32_t
1871 ulp_rte_void_hdr_handler(const struct rte_flow_item *item __rte_unused,
1872 			 struct ulp_rte_parser_params *params __rte_unused)
1873 {
1874 	return BNXT_TF_RC_SUCCESS;
1875 }
1876 
1877 /* Function to handle the parsing of RTE Flow action void Header. */
1878 int32_t
1879 ulp_rte_void_act_handler(const struct rte_flow_action *action_item __rte_unused,
1880 			 struct ulp_rte_parser_params *params __rte_unused)
1881 {
1882 	return BNXT_TF_RC_SUCCESS;
1883 }
1884 
1885 /* Function to handle the parsing of RTE Flow action Mark Header. */
1886 int32_t
1887 ulp_rte_mark_act_handler(const struct rte_flow_action *action_item,
1888 			 struct ulp_rte_parser_params *param)
1889 {
1890 	const struct rte_flow_action_mark *mark;
1891 	struct ulp_rte_act_bitmap *act = &param->act_bitmap;
1892 	uint32_t mark_id;
1893 
1894 	mark = action_item->conf;
1895 	if (mark) {
1896 		mark_id = tfp_cpu_to_be_32(mark->id);
1897 		memcpy(&param->act_prop.act_details[BNXT_ULP_ACT_PROP_IDX_MARK],
1898 		       &mark_id, BNXT_ULP_ACT_PROP_SZ_MARK);
1899 
1900 		/* Update the hdr_bitmap with vxlan */
1901 		ULP_BITMAP_SET(act->bits, BNXT_ULP_ACT_BIT_MARK);
1902 		return BNXT_TF_RC_SUCCESS;
1903 	}
1904 	BNXT_TF_DBG(ERR, "Parse Error: Mark arg is invalid\n");
1905 	return BNXT_TF_RC_ERROR;
1906 }
1907 
1908 /* Function to handle the parsing of RTE Flow action RSS Header. */
1909 int32_t
1910 ulp_rte_rss_act_handler(const struct rte_flow_action *action_item,
1911 			struct ulp_rte_parser_params *param)
1912 {
1913 	const struct rte_flow_action_rss *rss;
1914 	struct ulp_rte_act_prop *ap = &param->act_prop;
1915 	uint64_t queue_list[BNXT_ULP_ACT_PROP_SZ_RSS_QUEUE / sizeof(uint64_t)];
1916 	uint32_t idx = 0, id;
1917 
1918 	if (action_item == NULL || action_item->conf == NULL) {
1919 		BNXT_TF_DBG(ERR, "Parse Err: invalid rss configuration\n");
1920 		return BNXT_TF_RC_ERROR;
1921 	}
1922 
1923 	rss = action_item->conf;
1924 	/* Copy the rss into the specific action properties */
1925 	memcpy(&ap->act_details[BNXT_ULP_ACT_PROP_IDX_RSS_TYPES], &rss->types,
1926 	       BNXT_ULP_ACT_PROP_SZ_RSS_TYPES);
1927 	memcpy(&ap->act_details[BNXT_ULP_ACT_PROP_IDX_RSS_LEVEL], &rss->level,
1928 	       BNXT_ULP_ACT_PROP_SZ_RSS_LEVEL);
1929 	memcpy(&ap->act_details[BNXT_ULP_ACT_PROP_IDX_RSS_KEY_LEN],
1930 	       &rss->key_len, BNXT_ULP_ACT_PROP_SZ_RSS_KEY_LEN);
1931 
1932 	if (rss->key_len != 0 && rss->key_len != BNXT_ULP_ACT_PROP_SZ_RSS_KEY) {
1933 		BNXT_TF_DBG(ERR, "Parse Err: RSS key length must be 40 bytes\n");
1934 		return BNXT_TF_RC_ERROR;
1935 	}
1936 
1937 	/* User may specify only key length. In that case, rss->key will be NULL.
1938 	 * So, reject the flow if key_length is valid but rss->key is NULL.
1939 	 * Also, copy the RSS hash key only when rss->key is valid.
1940 	 */
1941 	if (rss->key_len != 0 && rss->key == NULL) {
1942 		BNXT_TF_DBG(ERR,
1943 			    "Parse Err: A valid RSS key must be provided with a valid key len.\n");
1944 		return BNXT_TF_RC_ERROR;
1945 	}
1946 	if (rss->key)
1947 		memcpy(&ap->act_details[BNXT_ULP_ACT_PROP_IDX_RSS_KEY], rss->key, rss->key_len);
1948 
1949 	memcpy(&ap->act_details[BNXT_ULP_ACT_PROP_IDX_RSS_QUEUE_NUM],
1950 	       &rss->queue_num, BNXT_ULP_ACT_PROP_SZ_RSS_QUEUE_NUM);
1951 
1952 	if (rss->queue_num >= ULP_BYTE_2_BITS(BNXT_ULP_ACT_PROP_SZ_RSS_QUEUE)) {
1953 		BNXT_TF_DBG(ERR, "Parse Err: RSS queue num too big\n");
1954 		return BNXT_TF_RC_ERROR;
1955 	}
1956 
1957 	/* Queues converted into a bitmap format */
1958 	memset(queue_list, 0, sizeof(queue_list));
1959 	for (idx = 0; idx < rss->queue_num; idx++) {
1960 		id = rss->queue[idx];
1961 		if (id >= ULP_BYTE_2_BITS(BNXT_ULP_ACT_PROP_SZ_RSS_QUEUE)) {
1962 			BNXT_TF_DBG(ERR, "Parse Err: RSS queue id too big\n");
1963 			return BNXT_TF_RC_ERROR;
1964 		}
1965 		if ((queue_list[id / ULP_INDEX_BITMAP_SIZE] >>
1966 		    ((ULP_INDEX_BITMAP_SIZE - 1) -
1967 		     (id % ULP_INDEX_BITMAP_SIZE)) & 1)) {
1968 			BNXT_TF_DBG(ERR, "Parse Err: duplicate queue ids\n");
1969 			return BNXT_TF_RC_ERROR;
1970 		}
1971 		queue_list[id / ULP_INDEX_BITMAP_SIZE] |= (1UL <<
1972 		((ULP_INDEX_BITMAP_SIZE - 1) - (id % ULP_INDEX_BITMAP_SIZE)));
1973 	}
1974 	memcpy(&ap->act_details[BNXT_ULP_ACT_PROP_IDX_RSS_QUEUE],
1975 	       (uint8_t *)queue_list, BNXT_ULP_ACT_PROP_SZ_RSS_QUEUE);
1976 
1977 	/* set the RSS action header bit */
1978 	ULP_BITMAP_SET(param->act_bitmap.bits, BNXT_ULP_ACT_BIT_RSS);
1979 
1980 	return BNXT_TF_RC_SUCCESS;
1981 }
1982 
1983 /* Function to handle the parsing of RTE Flow item eth Header. */
1984 static void
1985 ulp_rte_enc_eth_hdr_handler(struct ulp_rte_parser_params *params,
1986 			    const struct rte_flow_item_eth *eth_spec)
1987 {
1988 	struct ulp_rte_hdr_field *field;
1989 	uint32_t size;
1990 
1991 	field = &params->enc_field[BNXT_ULP_ENC_FIELD_ETH_DMAC];
1992 	size = sizeof(eth_spec->hdr.dst_addr.addr_bytes);
1993 	field = ulp_rte_parser_fld_copy(field, eth_spec->hdr.dst_addr.addr_bytes, size);
1994 
1995 	size = sizeof(eth_spec->hdr.src_addr.addr_bytes);
1996 	field = ulp_rte_parser_fld_copy(field, eth_spec->hdr.src_addr.addr_bytes, size);
1997 
1998 	size = sizeof(eth_spec->hdr.ether_type);
1999 	field = ulp_rte_parser_fld_copy(field, &eth_spec->hdr.ether_type, size);
2000 
2001 	ULP_BITMAP_SET(params->enc_hdr_bitmap.bits, BNXT_ULP_HDR_BIT_O_ETH);
2002 }
2003 
2004 /* Function to handle the parsing of RTE Flow item vlan Header. */
2005 static void
2006 ulp_rte_enc_vlan_hdr_handler(struct ulp_rte_parser_params *params,
2007 			     const struct rte_flow_item_vlan *vlan_spec,
2008 			     uint32_t inner)
2009 {
2010 	struct ulp_rte_hdr_field *field;
2011 	uint32_t size;
2012 
2013 	if (!inner) {
2014 		field = &params->enc_field[BNXT_ULP_ENC_FIELD_O_VLAN_TCI];
2015 		ULP_BITMAP_SET(params->enc_hdr_bitmap.bits,
2016 			       BNXT_ULP_HDR_BIT_OO_VLAN);
2017 	} else {
2018 		field = &params->enc_field[BNXT_ULP_ENC_FIELD_I_VLAN_TCI];
2019 		ULP_BITMAP_SET(params->enc_hdr_bitmap.bits,
2020 			       BNXT_ULP_HDR_BIT_OI_VLAN);
2021 	}
2022 
2023 	size = sizeof(vlan_spec->hdr.vlan_tci);
2024 	field = ulp_rte_parser_fld_copy(field, &vlan_spec->hdr.vlan_tci, size);
2025 
2026 	size = sizeof(vlan_spec->hdr.eth_proto);
2027 	field = ulp_rte_parser_fld_copy(field, &vlan_spec->hdr.eth_proto, size);
2028 }
2029 
2030 /* Function to handle the parsing of RTE Flow item ipv4 Header. */
2031 static void
2032 ulp_rte_enc_ipv4_hdr_handler(struct ulp_rte_parser_params *params,
2033 			     const struct rte_flow_item_ipv4 *ip)
2034 {
2035 	struct ulp_rte_hdr_field *field;
2036 	uint32_t size;
2037 	uint8_t val8;
2038 
2039 	field = &params->enc_field[BNXT_ULP_ENC_FIELD_IPV4_IHL];
2040 	size = sizeof(ip->hdr.version_ihl);
2041 	if (!ip->hdr.version_ihl)
2042 		val8 = RTE_IPV4_VHL_DEF;
2043 	else
2044 		val8 = ip->hdr.version_ihl;
2045 	field = ulp_rte_parser_fld_copy(field, &val8, size);
2046 
2047 	size = sizeof(ip->hdr.type_of_service);
2048 	field = ulp_rte_parser_fld_copy(field, &ip->hdr.type_of_service, size);
2049 
2050 	size = sizeof(ip->hdr.packet_id);
2051 	field = ulp_rte_parser_fld_copy(field, &ip->hdr.packet_id, size);
2052 
2053 	size = sizeof(ip->hdr.fragment_offset);
2054 	field = ulp_rte_parser_fld_copy(field, &ip->hdr.fragment_offset, size);
2055 
2056 	size = sizeof(ip->hdr.time_to_live);
2057 	if (!ip->hdr.time_to_live)
2058 		val8 = BNXT_ULP_DEFAULT_TTL;
2059 	else
2060 		val8 = ip->hdr.time_to_live;
2061 	field = ulp_rte_parser_fld_copy(field, &val8, size);
2062 
2063 	size = sizeof(ip->hdr.next_proto_id);
2064 	field = ulp_rte_parser_fld_copy(field, &ip->hdr.next_proto_id, size);
2065 
2066 	size = sizeof(ip->hdr.src_addr);
2067 	field = ulp_rte_parser_fld_copy(field, &ip->hdr.src_addr, size);
2068 
2069 	size = sizeof(ip->hdr.dst_addr);
2070 	field = ulp_rte_parser_fld_copy(field, &ip->hdr.dst_addr, size);
2071 
2072 	ULP_BITMAP_SET(params->enc_hdr_bitmap.bits, BNXT_ULP_HDR_BIT_O_IPV4);
2073 }
2074 
2075 /* Function to handle the parsing of RTE Flow item ipv6 Header. */
2076 static void
2077 ulp_rte_enc_ipv6_hdr_handler(struct ulp_rte_parser_params *params,
2078 			     const struct rte_flow_item_ipv6 *ip)
2079 {
2080 	struct ulp_rte_hdr_field *field;
2081 	uint32_t size;
2082 	uint32_t val32;
2083 	uint8_t val8;
2084 
2085 	field = &params->enc_field[BNXT_ULP_ENC_FIELD_IPV6_VTC_FLOW];
2086 	size = sizeof(ip->hdr.vtc_flow);
2087 	if (!ip->hdr.vtc_flow)
2088 		val32 = rte_cpu_to_be_32(BNXT_ULP_IPV6_DFLT_VER);
2089 	else
2090 		val32 = ip->hdr.vtc_flow;
2091 	field = ulp_rte_parser_fld_copy(field, &val32, size);
2092 
2093 	size = sizeof(ip->hdr.proto);
2094 	field = ulp_rte_parser_fld_copy(field, &ip->hdr.proto, size);
2095 
2096 	size = sizeof(ip->hdr.hop_limits);
2097 	if (!ip->hdr.hop_limits)
2098 		val8 = BNXT_ULP_DEFAULT_TTL;
2099 	else
2100 		val8 = ip->hdr.hop_limits;
2101 	field = ulp_rte_parser_fld_copy(field, &val8, size);
2102 
2103 	size = sizeof(ip->hdr.src_addr);
2104 	field = ulp_rte_parser_fld_copy(field, &ip->hdr.src_addr, size);
2105 
2106 	size = sizeof(ip->hdr.dst_addr);
2107 	field = ulp_rte_parser_fld_copy(field, &ip->hdr.dst_addr, size);
2108 
2109 	ULP_BITMAP_SET(params->enc_hdr_bitmap.bits, BNXT_ULP_HDR_BIT_O_IPV6);
2110 }
2111 
2112 /* Function to handle the parsing of RTE Flow item UDP Header. */
2113 static void
2114 ulp_rte_enc_udp_hdr_handler(struct ulp_rte_parser_params *params,
2115 			    const struct rte_flow_item_udp *udp_spec)
2116 {
2117 	struct ulp_rte_hdr_field *field;
2118 	uint32_t size;
2119 	uint8_t type = IPPROTO_UDP;
2120 
2121 	field = &params->enc_field[BNXT_ULP_ENC_FIELD_UDP_SPORT];
2122 	size = sizeof(udp_spec->hdr.src_port);
2123 	field = ulp_rte_parser_fld_copy(field, &udp_spec->hdr.src_port, size);
2124 
2125 	size = sizeof(udp_spec->hdr.dst_port);
2126 	field = ulp_rte_parser_fld_copy(field, &udp_spec->hdr.dst_port, size);
2127 
2128 	ULP_BITMAP_SET(params->enc_hdr_bitmap.bits, BNXT_ULP_HDR_BIT_O_UDP);
2129 
2130 	/* Update thhe ip header protocol */
2131 	field = &params->enc_field[BNXT_ULP_ENC_FIELD_IPV4_PROTO];
2132 	ulp_rte_parser_fld_copy(field, &type, sizeof(type));
2133 	field = &params->enc_field[BNXT_ULP_ENC_FIELD_IPV6_PROTO];
2134 	ulp_rte_parser_fld_copy(field, &type, sizeof(type));
2135 }
2136 
2137 /* Function to handle the parsing of RTE Flow item vxlan Header. */
2138 static void
2139 ulp_rte_enc_vxlan_hdr_handler(struct ulp_rte_parser_params *params,
2140 			      struct rte_flow_item_vxlan *vxlan_spec)
2141 {
2142 	struct ulp_rte_hdr_field *field;
2143 	uint32_t size;
2144 
2145 	field = &params->enc_field[BNXT_ULP_ENC_FIELD_VXLAN_FLAGS];
2146 	size = sizeof(vxlan_spec->hdr.flags);
2147 	field = ulp_rte_parser_fld_copy(field, &vxlan_spec->hdr.flags, size);
2148 
2149 	size = sizeof(vxlan_spec->hdr.rsvd0);
2150 	field = ulp_rte_parser_fld_copy(field, &vxlan_spec->hdr.rsvd0, size);
2151 
2152 	size = sizeof(vxlan_spec->hdr.vni);
2153 	field = ulp_rte_parser_fld_copy(field, &vxlan_spec->hdr.vni, size);
2154 
2155 	size = sizeof(vxlan_spec->hdr.rsvd1);
2156 	field = ulp_rte_parser_fld_copy(field, &vxlan_spec->hdr.rsvd1, size);
2157 
2158 	ULP_BITMAP_SET(params->enc_hdr_bitmap.bits, BNXT_ULP_HDR_BIT_T_VXLAN);
2159 }
2160 
2161 /* Function to handle the parsing of RTE Flow action vxlan_encap Header. */
2162 int32_t
2163 ulp_rte_vxlan_encap_act_handler(const struct rte_flow_action *action_item,
2164 				struct ulp_rte_parser_params *params)
2165 {
2166 	const struct rte_flow_action_vxlan_encap *vxlan_encap;
2167 	const struct rte_flow_item *item;
2168 	const struct rte_flow_item_ipv4 *ipv4_spec;
2169 	const struct rte_flow_item_ipv6 *ipv6_spec;
2170 	struct rte_flow_item_vxlan vxlan_spec;
2171 	uint32_t vlan_num = 0, vlan_size = 0;
2172 	uint32_t ip_size = 0, ip_type = 0;
2173 	uint32_t vxlan_size = 0;
2174 	struct ulp_rte_act_bitmap *act = &params->act_bitmap;
2175 	struct ulp_rte_act_prop *ap = &params->act_prop;
2176 
2177 	vxlan_encap = action_item->conf;
2178 	if (!vxlan_encap) {
2179 		BNXT_TF_DBG(ERR, "Parse Error: Vxlan_encap arg is invalid\n");
2180 		return BNXT_TF_RC_ERROR;
2181 	}
2182 
2183 	item = vxlan_encap->definition;
2184 	if (!item) {
2185 		BNXT_TF_DBG(ERR, "Parse Error: definition arg is invalid\n");
2186 		return BNXT_TF_RC_ERROR;
2187 	}
2188 
2189 	if (!ulp_rte_item_skip_void(&item, 0))
2190 		return BNXT_TF_RC_ERROR;
2191 
2192 	/* must have ethernet header */
2193 	if (item->type != RTE_FLOW_ITEM_TYPE_ETH) {
2194 		BNXT_TF_DBG(ERR, "Parse Error:vxlan encap does not have eth\n");
2195 		return BNXT_TF_RC_ERROR;
2196 	}
2197 
2198 	/* Parse the ethernet header */
2199 	if (item->spec)
2200 		ulp_rte_enc_eth_hdr_handler(params, item->spec);
2201 
2202 	/* Goto the next item */
2203 	if (!ulp_rte_item_skip_void(&item, 1))
2204 		return BNXT_TF_RC_ERROR;
2205 
2206 	/* May have vlan header */
2207 	if (item->type == RTE_FLOW_ITEM_TYPE_VLAN) {
2208 		vlan_num++;
2209 		if (item->spec)
2210 			ulp_rte_enc_vlan_hdr_handler(params, item->spec, 0);
2211 
2212 		if (!ulp_rte_item_skip_void(&item, 1))
2213 			return BNXT_TF_RC_ERROR;
2214 	}
2215 
2216 	/* may have two vlan headers */
2217 	if (item->type == RTE_FLOW_ITEM_TYPE_VLAN) {
2218 		vlan_num++;
2219 		if (item->spec)
2220 			ulp_rte_enc_vlan_hdr_handler(params, item->spec, 1);
2221 
2222 		if (!ulp_rte_item_skip_void(&item, 1))
2223 			return BNXT_TF_RC_ERROR;
2224 	}
2225 
2226 	/* Update the vlan count and size of more than one */
2227 	if (vlan_num) {
2228 		vlan_size = vlan_num * sizeof(struct rte_flow_item_vlan);
2229 		vlan_num = tfp_cpu_to_be_32(vlan_num);
2230 		memcpy(&ap->act_details[BNXT_ULP_ACT_PROP_IDX_ENCAP_VTAG_NUM],
2231 		       &vlan_num,
2232 		       sizeof(uint32_t));
2233 		vlan_size = tfp_cpu_to_be_32(vlan_size);
2234 		memcpy(&ap->act_details[BNXT_ULP_ACT_PROP_IDX_ENCAP_VTAG_SZ],
2235 		       &vlan_size,
2236 		       sizeof(uint32_t));
2237 	}
2238 
2239 	/* L3 must be IPv4, IPv6 */
2240 	if (item->type == RTE_FLOW_ITEM_TYPE_IPV4) {
2241 		ipv4_spec = item->spec;
2242 		ip_size = BNXT_ULP_ENCAP_IPV4_SIZE;
2243 
2244 		/* Update the ip size details */
2245 		ip_size = tfp_cpu_to_be_32(ip_size);
2246 		memcpy(&ap->act_details[BNXT_ULP_ACT_PROP_IDX_ENCAP_IP_SZ],
2247 		       &ip_size, sizeof(uint32_t));
2248 
2249 		/* update the ip type */
2250 		ip_type = rte_cpu_to_be_32(BNXT_ULP_ETH_IPV4);
2251 		memcpy(&ap->act_details[BNXT_ULP_ACT_PROP_IDX_ENCAP_L3_TYPE],
2252 		       &ip_type, sizeof(uint32_t));
2253 
2254 		/* update the computed field to notify it is ipv4 header */
2255 		ULP_COMP_FLD_IDX_WR(params, BNXT_ULP_CF_IDX_ACT_ENCAP_IPV4_FLAG,
2256 				    1);
2257 		if (ipv4_spec)
2258 			ulp_rte_enc_ipv4_hdr_handler(params, ipv4_spec);
2259 
2260 		if (!ulp_rte_item_skip_void(&item, 1))
2261 			return BNXT_TF_RC_ERROR;
2262 	} else if (item->type == RTE_FLOW_ITEM_TYPE_IPV6) {
2263 		ipv6_spec = item->spec;
2264 		ip_size = BNXT_ULP_ENCAP_IPV6_SIZE;
2265 
2266 		/* Update the ip size details */
2267 		ip_size = tfp_cpu_to_be_32(ip_size);
2268 		memcpy(&ap->act_details[BNXT_ULP_ACT_PROP_IDX_ENCAP_IP_SZ],
2269 		       &ip_size, sizeof(uint32_t));
2270 
2271 		 /* update the ip type */
2272 		ip_type = rte_cpu_to_be_32(BNXT_ULP_ETH_IPV6);
2273 		memcpy(&ap->act_details[BNXT_ULP_ACT_PROP_IDX_ENCAP_L3_TYPE],
2274 		       &ip_type, sizeof(uint32_t));
2275 
2276 		/* update the computed field to notify it is ipv6 header */
2277 		ULP_COMP_FLD_IDX_WR(params, BNXT_ULP_CF_IDX_ACT_ENCAP_IPV6_FLAG,
2278 				    1);
2279 		if (ipv6_spec)
2280 			ulp_rte_enc_ipv6_hdr_handler(params, ipv6_spec);
2281 
2282 		if (!ulp_rte_item_skip_void(&item, 1))
2283 			return BNXT_TF_RC_ERROR;
2284 	} else {
2285 		BNXT_TF_DBG(ERR, "Parse Error: Vxlan Encap expects L3 hdr\n");
2286 		return BNXT_TF_RC_ERROR;
2287 	}
2288 
2289 	/* L4 is UDP */
2290 	if (item->type != RTE_FLOW_ITEM_TYPE_UDP) {
2291 		BNXT_TF_DBG(ERR, "vxlan encap does not have udp\n");
2292 		return BNXT_TF_RC_ERROR;
2293 	}
2294 	if (item->spec)
2295 		ulp_rte_enc_udp_hdr_handler(params, item->spec);
2296 
2297 	if (!ulp_rte_item_skip_void(&item, 1))
2298 		return BNXT_TF_RC_ERROR;
2299 
2300 	/* Finally VXLAN */
2301 	if (item->type != RTE_FLOW_ITEM_TYPE_VXLAN) {
2302 		BNXT_TF_DBG(ERR, "vxlan encap does not have vni\n");
2303 		return BNXT_TF_RC_ERROR;
2304 	}
2305 	vxlan_size = sizeof(struct rte_flow_item_vxlan);
2306 	/* copy the vxlan details */
2307 	memcpy(&vxlan_spec, item->spec, vxlan_size);
2308 	vxlan_spec.hdr.flags = 0x08;
2309 	vxlan_size = tfp_cpu_to_be_32(vxlan_size);
2310 	memcpy(&ap->act_details[BNXT_ULP_ACT_PROP_IDX_ENCAP_TUN_SZ],
2311 	       &vxlan_size, sizeof(uint32_t));
2312 
2313 	ulp_rte_enc_vxlan_hdr_handler(params, &vxlan_spec);
2314 
2315 	/* update the hdr_bitmap with vxlan */
2316 	ULP_BITMAP_SET(act->bits, BNXT_ULP_ACT_BIT_VXLAN_ENCAP);
2317 	return BNXT_TF_RC_SUCCESS;
2318 }
2319 
2320 /* Function to handle the parsing of RTE Flow action vxlan_encap Header */
2321 int32_t
2322 ulp_rte_vxlan_decap_act_handler(const struct rte_flow_action *action_item
2323 				__rte_unused,
2324 				struct ulp_rte_parser_params *params)
2325 {
2326 	/* update the hdr_bitmap with vxlan */
2327 	ULP_BITMAP_SET(params->act_bitmap.bits,
2328 		       BNXT_ULP_ACT_BIT_VXLAN_DECAP);
2329 	/* Update computational field with tunnel decap info */
2330 	ULP_COMP_FLD_IDX_WR(params, BNXT_ULP_CF_IDX_L3_TUN_DECAP, 1);
2331 	return BNXT_TF_RC_SUCCESS;
2332 }
2333 
2334 /* Function to handle the parsing of RTE Flow action drop Header. */
2335 int32_t
2336 ulp_rte_drop_act_handler(const struct rte_flow_action *action_item __rte_unused,
2337 			 struct ulp_rte_parser_params *params)
2338 {
2339 	/* Update the hdr_bitmap with drop */
2340 	ULP_BITMAP_SET(params->act_bitmap.bits, BNXT_ULP_ACT_BIT_DROP);
2341 	return BNXT_TF_RC_SUCCESS;
2342 }
2343 
2344 /* Function to handle the parsing of RTE Flow action count. */
2345 int32_t
2346 ulp_rte_count_act_handler(const struct rte_flow_action *action_item,
2347 			  struct ulp_rte_parser_params *params)
2348 {
2349 	const struct rte_flow_action_count *act_count;
2350 	struct ulp_rte_act_prop *act_prop = &params->act_prop;
2351 
2352 	act_count = action_item->conf;
2353 	if (act_count) {
2354 		memcpy(&act_prop->act_details[BNXT_ULP_ACT_PROP_IDX_COUNT],
2355 		       &act_count->id,
2356 		       BNXT_ULP_ACT_PROP_SZ_COUNT);
2357 	}
2358 
2359 	/* Update the hdr_bitmap with count */
2360 	ULP_BITMAP_SET(params->act_bitmap.bits, BNXT_ULP_ACT_BIT_COUNT);
2361 	return BNXT_TF_RC_SUCCESS;
2362 }
2363 
2364 static bool ulp_rte_parser_is_portb_vfrep(struct ulp_rte_parser_params *param)
2365 {
2366 	return ULP_COMP_FLD_IDX_RD(param, BNXT_ULP_CF_IDX_MP_B_IS_VFREP);
2367 }
2368 
2369 /*
2370  * Swaps info related to multi-port:
2371  * common:
2372  *    BNXT_ULP_CF_IDX_MP_B_IS_VFREP, BNXT_ULP_CF_IDX_MP_A_IS_VFREP
2373  *    BNXT_ULP_CF_IDX_MP_PORT_A, BNXT_ULP_CF_IDX_MP_PORT_B
2374  *
2375  * ingress:
2376  *    BNXT_ULP_CF_IDX_MP_VNIC_B, BNXT_ULP_CF_IDX_MP_VNIC_A
2377  *
2378  * egress:
2379  *    BNXT_ULP_CF_IDX_MP_MDATA_B, BNXT_ULP_CF_IDX_MP_MDATA_A
2380  *    BNXT_ULP_CF_IDX_MP_VPORT_B, BNXT_ULP_CF_IDX_MP_VPORT_A
2381  *
2382  * Note: This is done as OVS could give us a non-VFREP port in port B, and we
2383  * cannot use that to mirror, so we swap out the ports so that a VFREP is now
2384  * in port B instead.
2385  */
2386 static int32_t
2387 ulp_rte_parser_normalize_port_info(struct ulp_rte_parser_params *param)
2388 {
2389 	uint16_t mp_port_a, mp_port_b, mp_mdata_a, mp_mdata_b,
2390 		 mp_vport_a, mp_vport_b, mp_vnic_a, mp_vnic_b,
2391 		 mp_is_vfrep_a, mp_is_vfrep_b;
2392 
2393 	mp_is_vfrep_a = ULP_COMP_FLD_IDX_RD(param, BNXT_ULP_CF_IDX_MP_A_IS_VFREP);
2394 	mp_is_vfrep_b = ULP_COMP_FLD_IDX_RD(param, BNXT_ULP_CF_IDX_MP_B_IS_VFREP);
2395 	ULP_COMP_FLD_IDX_WR(param, BNXT_ULP_CF_IDX_MP_B_IS_VFREP, mp_is_vfrep_a);
2396 	ULP_COMP_FLD_IDX_WR(param, BNXT_ULP_CF_IDX_MP_A_IS_VFREP, mp_is_vfrep_b);
2397 
2398 	mp_port_a = ULP_COMP_FLD_IDX_RD(param, BNXT_ULP_CF_IDX_MP_PORT_A);
2399 	mp_port_b = ULP_COMP_FLD_IDX_RD(param, BNXT_ULP_CF_IDX_MP_PORT_B);
2400 	ULP_COMP_FLD_IDX_WR(param, BNXT_ULP_CF_IDX_MP_PORT_B, mp_port_a);
2401 	ULP_COMP_FLD_IDX_WR(param, BNXT_ULP_CF_IDX_MP_PORT_A, mp_port_b);
2402 
2403 	mp_vport_a = ULP_COMP_FLD_IDX_RD(param, BNXT_ULP_CF_IDX_MP_VPORT_A);
2404 	mp_vport_b = ULP_COMP_FLD_IDX_RD(param, BNXT_ULP_CF_IDX_MP_VPORT_B);
2405 	ULP_COMP_FLD_IDX_WR(param, BNXT_ULP_CF_IDX_MP_VPORT_B, mp_vport_a);
2406 	ULP_COMP_FLD_IDX_WR(param, BNXT_ULP_CF_IDX_MP_VPORT_A, mp_vport_b);
2407 
2408 	mp_vnic_a = ULP_COMP_FLD_IDX_RD(param, BNXT_ULP_CF_IDX_MP_VNIC_A);
2409 	mp_vnic_b = ULP_COMP_FLD_IDX_RD(param, BNXT_ULP_CF_IDX_MP_VNIC_B);
2410 	ULP_COMP_FLD_IDX_WR(param, BNXT_ULP_CF_IDX_MP_VNIC_B, mp_vnic_a);
2411 	ULP_COMP_FLD_IDX_WR(param, BNXT_ULP_CF_IDX_MP_VNIC_A, mp_vnic_b);
2412 
2413 	mp_mdata_a = ULP_COMP_FLD_IDX_RD(param, BNXT_ULP_CF_IDX_MP_MDATA_A);
2414 	mp_mdata_b = ULP_COMP_FLD_IDX_RD(param, BNXT_ULP_CF_IDX_MP_MDATA_B);
2415 	ULP_COMP_FLD_IDX_WR(param, BNXT_ULP_CF_IDX_MP_MDATA_B, mp_mdata_a);
2416 	ULP_COMP_FLD_IDX_WR(param, BNXT_ULP_CF_IDX_MP_MDATA_A, mp_mdata_b);
2417 
2418 	return BNXT_TF_RC_SUCCESS;
2419 }
2420 
2421 
2422 /* Function to handle the parsing of action ports. */
2423 static int32_t
2424 ulp_rte_parser_act_port_set(struct ulp_rte_parser_params *param,
2425 			    uint32_t ifindex, bool multi_port,
2426 			    enum bnxt_ulp_direction_type act_dir)
2427 {
2428 	enum bnxt_ulp_direction_type dir;
2429 	uint16_t pid_s;
2430 	uint8_t *p_mdata;
2431 	uint32_t pid, port_index;
2432 	struct ulp_rte_act_prop *act = &param->act_prop;
2433 	enum bnxt_ulp_intf_type port_type;
2434 	uint32_t vnic_type;
2435 
2436 	/* Get the direction */
2437 	/* If action implicitly specifies direction, use the specification. */
2438 	dir = (act_dir == BNXT_ULP_DIR_INVALID) ?
2439 		ULP_COMP_FLD_IDX_RD(param, BNXT_ULP_CF_IDX_DIRECTION) :
2440 		act_dir;
2441 
2442 	port_type = ULP_COMP_FLD_IDX_RD(param,
2443 					BNXT_ULP_CF_IDX_ACT_PORT_TYPE);
2444 
2445 	/* Update flag if Port A/B type is VF-REP */
2446 	ULP_COMP_FLD_IDX_WR(param, multi_port ?
2447 					BNXT_ULP_CF_IDX_MP_B_IS_VFREP :
2448 					BNXT_ULP_CF_IDX_MP_A_IS_VFREP,
2449 			    (port_type == BNXT_ULP_INTF_TYPE_VF_REP) ? 1 : 0);
2450 	if (dir == BNXT_ULP_DIR_EGRESS) {
2451 		/* For egress direction, fill vport */
2452 		if (ulp_port_db_vport_get(param->ulp_ctx, ifindex, &pid_s))
2453 			return BNXT_TF_RC_ERROR;
2454 
2455 		pid = pid_s;
2456 		pid = rte_cpu_to_be_32(pid);
2457 		if (!multi_port)
2458 			memcpy(&act->act_details[BNXT_ULP_ACT_PROP_IDX_VPORT],
2459 			       &pid, BNXT_ULP_ACT_PROP_SZ_VPORT);
2460 
2461 		/* Fill metadata */
2462 		if (port_type == BNXT_ULP_INTF_TYPE_VF_REP) {
2463 			port_index  = ULP_COMP_FLD_IDX_RD(param, multi_port ?
2464 								 BNXT_ULP_CF_IDX_MP_PORT_B :
2465 								 BNXT_ULP_CF_IDX_MP_PORT_A);
2466 			if (ulp_port_db_port_meta_data_get(param->ulp_ctx,
2467 							   port_index, &p_mdata))
2468 				return BNXT_TF_RC_ERROR;
2469 			/*
2470 			 * Update appropriate port (A/B) metadata based on multi-port
2471 			 * indication
2472 			 */
2473 			ULP_COMP_FLD_IDX_WR(param,
2474 					    multi_port ?
2475 						BNXT_ULP_CF_IDX_MP_MDATA_B :
2476 						BNXT_ULP_CF_IDX_MP_MDATA_A,
2477 					    rte_cpu_to_be_16(*((uint16_t *)p_mdata)));
2478 		}
2479 		/*
2480 		 * Update appropriate port (A/B) VPORT based on multi-port
2481 		 * indication.
2482 		 */
2483 		ULP_COMP_FLD_IDX_WR(param,
2484 				    multi_port ?
2485 					BNXT_ULP_CF_IDX_MP_VPORT_B :
2486 					BNXT_ULP_CF_IDX_MP_VPORT_A,
2487 				    pid_s);
2488 	} else {
2489 		/* For ingress direction, fill vnic */
2490 		/*
2491 		 * Action               Destination
2492 		 * ------------------------------------
2493 		 * PORT_REPRESENTOR     Driver Function
2494 		 * ------------------------------------
2495 		 * REPRESENTED_PORT     VF
2496 		 * ------------------------------------
2497 		 * PORT_ID              VF
2498 		 */
2499 		if (act_dir != BNXT_ULP_DIR_INGRESS &&
2500 		    port_type == BNXT_ULP_INTF_TYPE_VF_REP)
2501 			vnic_type = BNXT_ULP_VF_FUNC_VNIC;
2502 		else
2503 			vnic_type = BNXT_ULP_DRV_FUNC_VNIC;
2504 
2505 		if (ulp_port_db_default_vnic_get(param->ulp_ctx, ifindex,
2506 						 vnic_type, &pid_s))
2507 			return BNXT_TF_RC_ERROR;
2508 
2509 		pid = pid_s;
2510 		pid = rte_cpu_to_be_32(pid);
2511 		if (!multi_port)
2512 			memcpy(&act->act_details[BNXT_ULP_ACT_PROP_IDX_VNIC],
2513 			       &pid, BNXT_ULP_ACT_PROP_SZ_VNIC);
2514 		/*
2515 		 * Update appropriate port (A/B) VNIC based on multi-port
2516 		 * indication.
2517 		 */
2518 		ULP_COMP_FLD_IDX_WR(param,
2519 				    multi_port ?
2520 					BNXT_ULP_CF_IDX_MP_VNIC_B :
2521 					BNXT_ULP_CF_IDX_MP_VNIC_A,
2522 				    pid_s);
2523 	}
2524 
2525 	if (multi_port && !ulp_rte_parser_is_portb_vfrep(param))
2526 		ulp_rte_parser_normalize_port_info(param);
2527 
2528 	/* Update the action port set bit */
2529 	ULP_COMP_FLD_IDX_WR(param, BNXT_ULP_CF_IDX_ACT_PORT_IS_SET, 1);
2530 	return BNXT_TF_RC_SUCCESS;
2531 }
2532 
2533 /* Function to handle the parsing of RTE Flow action PF. */
2534 int32_t
2535 ulp_rte_pf_act_handler(const struct rte_flow_action *action_item __rte_unused,
2536 		       struct ulp_rte_parser_params *params)
2537 {
2538 	uint32_t port_id;
2539 	uint32_t ifindex;
2540 	enum bnxt_ulp_intf_type intf_type;
2541 
2542 	/* Get the port id of the current device */
2543 	port_id = ULP_COMP_FLD_IDX_RD(params, BNXT_ULP_CF_IDX_INCOMING_IF);
2544 
2545 	/* Get the port db ifindex */
2546 	if (ulp_port_db_dev_port_to_ulp_index(params->ulp_ctx, port_id,
2547 					      &ifindex)) {
2548 		BNXT_TF_DBG(ERR, "Invalid port id\n");
2549 		return BNXT_TF_RC_ERROR;
2550 	}
2551 
2552 	/* Check the port is PF port */
2553 	intf_type = ulp_port_db_port_type_get(params->ulp_ctx, ifindex);
2554 	if (intf_type != BNXT_ULP_INTF_TYPE_PF) {
2555 		BNXT_TF_DBG(ERR, "Port is not a PF port\n");
2556 		return BNXT_TF_RC_ERROR;
2557 	}
2558 	/* Update the action properties */
2559 	ULP_COMP_FLD_IDX_WR(params, BNXT_ULP_CF_IDX_ACT_PORT_TYPE, intf_type);
2560 	return ulp_rte_parser_act_port_set(params, ifindex, false,
2561 					   BNXT_ULP_DIR_INVALID);
2562 }
2563 
2564 /* Function to handle the parsing of RTE Flow action VF. */
2565 int32_t
2566 ulp_rte_vf_act_handler(const struct rte_flow_action *action_item,
2567 		       struct ulp_rte_parser_params *params)
2568 {
2569 	const struct rte_flow_action_vf *vf_action;
2570 	enum bnxt_ulp_intf_type intf_type;
2571 	uint32_t ifindex;
2572 	struct bnxt *bp;
2573 
2574 	vf_action = action_item->conf;
2575 	if (!vf_action) {
2576 		BNXT_TF_DBG(ERR, "ParseErr: Invalid Argument\n");
2577 		return BNXT_TF_RC_PARSE_ERR;
2578 	}
2579 
2580 	if (vf_action->original) {
2581 		BNXT_TF_DBG(ERR, "ParseErr:VF Original not supported\n");
2582 		return BNXT_TF_RC_PARSE_ERR;
2583 	}
2584 
2585 	bp = bnxt_pmd_get_bp(params->port_id);
2586 	if (bp == NULL) {
2587 		BNXT_TF_DBG(ERR, "Invalid bp\n");
2588 		return BNXT_TF_RC_ERROR;
2589 	}
2590 
2591 	/* vf_action->id is a logical number which in this case is an
2592 	 * offset from the first VF. So, to get the absolute VF id, the
2593 	 * offset must be added to the absolute first vf id of that port.
2594 	 */
2595 	if (ulp_port_db_dev_func_id_to_ulp_index(params->ulp_ctx,
2596 						 bp->first_vf_id +
2597 						 vf_action->id,
2598 						 &ifindex)) {
2599 		BNXT_TF_DBG(ERR, "VF is not valid interface\n");
2600 		return BNXT_TF_RC_ERROR;
2601 	}
2602 	/* Check the port is VF port */
2603 	intf_type = ulp_port_db_port_type_get(params->ulp_ctx, ifindex);
2604 	if (intf_type != BNXT_ULP_INTF_TYPE_VF &&
2605 	    intf_type != BNXT_ULP_INTF_TYPE_TRUSTED_VF) {
2606 		BNXT_TF_DBG(ERR, "Port is not a VF port\n");
2607 		return BNXT_TF_RC_ERROR;
2608 	}
2609 
2610 	/* Update the action properties */
2611 	ULP_COMP_FLD_IDX_WR(params, BNXT_ULP_CF_IDX_ACT_PORT_TYPE, intf_type);
2612 	return ulp_rte_parser_act_port_set(params, ifindex, false,
2613 					   BNXT_ULP_DIR_INVALID);
2614 }
2615 
2616 /* Parse actions PORT_ID, PORT_REPRESENTOR and REPRESENTED_PORT. */
2617 int32_t
2618 ulp_rte_port_act_handler(const struct rte_flow_action *act_item,
2619 			 struct ulp_rte_parser_params *param)
2620 {
2621 	uint32_t ethdev_id;
2622 	uint32_t ifindex;
2623 	const struct rte_flow_action_port_id *port_id = act_item->conf;
2624 	uint32_t num_ports;
2625 	enum bnxt_ulp_intf_type intf_type;
2626 	enum bnxt_ulp_direction_type act_dir;
2627 
2628 	if (!act_item->conf) {
2629 		BNXT_TF_DBG(ERR,
2630 				"ParseErr: Invalid Argument\n");
2631 		return BNXT_TF_RC_PARSE_ERR;
2632 	}
2633 	switch (act_item->type) {
2634 	case RTE_FLOW_ACTION_TYPE_PORT_ID: {
2635 		const struct rte_flow_action_port_id *port_id = act_item->conf;
2636 
2637 		if (port_id->original) {
2638 			BNXT_TF_DBG(ERR,
2639 				    "ParseErr:Portid Original not supported\n");
2640 			return BNXT_TF_RC_PARSE_ERR;
2641 		}
2642 		ethdev_id = port_id->id;
2643 		act_dir = BNXT_ULP_DIR_INVALID;
2644 		break;
2645 	}
2646 	case RTE_FLOW_ACTION_TYPE_PORT_REPRESENTOR: {
2647 		const struct rte_flow_action_ethdev *ethdev = act_item->conf;
2648 
2649 		ethdev_id = ethdev->port_id;
2650 		act_dir = BNXT_ULP_DIR_INGRESS;
2651 		break;
2652 	}
2653 	case RTE_FLOW_ACTION_TYPE_REPRESENTED_PORT: {
2654 		const struct rte_flow_action_ethdev *ethdev = act_item->conf;
2655 
2656 		ethdev_id = ethdev->port_id;
2657 		act_dir = BNXT_ULP_DIR_EGRESS;
2658 		break;
2659 	}
2660 	default:
2661 		BNXT_TF_DBG(ERR, "Unknown port action\n");
2662 		return BNXT_TF_RC_ERROR;
2663 	}
2664 
2665 	num_ports  = ULP_COMP_FLD_IDX_RD(param, BNXT_ULP_CF_IDX_MP_NPORTS);
2666 
2667 	if (num_ports) {
2668 		ULP_COMP_FLD_IDX_WR(param, BNXT_ULP_CF_IDX_MP_PORT_B,
2669 				    port_id->id);
2670 		ULP_BITMAP_SET(param->act_bitmap.bits,
2671 			       BNXT_ULP_ACT_BIT_MULTIPLE_PORT);
2672 	} else {
2673 		ULP_COMP_FLD_IDX_WR(param, BNXT_ULP_CF_IDX_MP_PORT_A,
2674 				    port_id->id);
2675 	}
2676 
2677 	/* Get the port db ifindex */
2678 	if (ulp_port_db_dev_port_to_ulp_index(param->ulp_ctx, ethdev_id,
2679 					      &ifindex)) {
2680 		BNXT_TF_DBG(ERR, "Invalid port id\n");
2681 		return BNXT_TF_RC_ERROR;
2682 	}
2683 
2684 	/* Get the intf type */
2685 	intf_type = ulp_port_db_port_type_get(param->ulp_ctx, ifindex);
2686 	if (!intf_type) {
2687 		BNXT_TF_DBG(ERR, "Invalid port type\n");
2688 		return BNXT_TF_RC_ERROR;
2689 	}
2690 
2691 	/* Set the action port */
2692 	ULP_COMP_FLD_IDX_WR(param, BNXT_ULP_CF_IDX_ACT_PORT_TYPE, intf_type);
2693 	ULP_COMP_FLD_IDX_WR(param, BNXT_ULP_CF_IDX_DEV_ACT_PORT_ID,
2694 			    ethdev_id);
2695 
2696 	ULP_COMP_FLD_IDX_WR(param, BNXT_ULP_CF_IDX_MP_NPORTS, ++num_ports);
2697 	return ulp_rte_parser_act_port_set(param, ifindex,
2698 					   ULP_BITMAP_ISSET(param->act_bitmap.bits,
2699 							    BNXT_ULP_ACT_BIT_MULTIPLE_PORT),
2700 					   act_dir);
2701 }
2702 
2703 /* Function to handle the parsing of RTE Flow action pop vlan. */
2704 int32_t
2705 ulp_rte_of_pop_vlan_act_handler(const struct rte_flow_action *a __rte_unused,
2706 				struct ulp_rte_parser_params *params)
2707 {
2708 	/* Update the act_bitmap with pop */
2709 	ULP_BITMAP_SET(params->act_bitmap.bits, BNXT_ULP_ACT_BIT_POP_VLAN);
2710 	return BNXT_TF_RC_SUCCESS;
2711 }
2712 
2713 /* Function to handle the parsing of RTE Flow action push vlan. */
2714 int32_t
2715 ulp_rte_of_push_vlan_act_handler(const struct rte_flow_action *action_item,
2716 				 struct ulp_rte_parser_params *params)
2717 {
2718 	const struct rte_flow_action_of_push_vlan *push_vlan;
2719 	uint16_t ethertype;
2720 	struct ulp_rte_act_prop *act = &params->act_prop;
2721 
2722 	push_vlan = action_item->conf;
2723 	if (push_vlan) {
2724 		ethertype = push_vlan->ethertype;
2725 		if (tfp_cpu_to_be_16(ethertype) != RTE_ETHER_TYPE_VLAN) {
2726 			BNXT_TF_DBG(ERR,
2727 				    "Parse Err: Ethertype not supported\n");
2728 			return BNXT_TF_RC_PARSE_ERR;
2729 		}
2730 		memcpy(&act->act_details[BNXT_ULP_ACT_PROP_IDX_PUSH_VLAN],
2731 		       &ethertype, BNXT_ULP_ACT_PROP_SZ_PUSH_VLAN);
2732 		/* Update the hdr_bitmap with push vlan */
2733 		ULP_BITMAP_SET(params->act_bitmap.bits,
2734 			       BNXT_ULP_ACT_BIT_PUSH_VLAN);
2735 		return BNXT_TF_RC_SUCCESS;
2736 	}
2737 	BNXT_TF_DBG(ERR, "Parse Error: Push vlan arg is invalid\n");
2738 	return BNXT_TF_RC_ERROR;
2739 }
2740 
2741 /* Function to handle the parsing of RTE Flow action set vlan id. */
2742 int32_t
2743 ulp_rte_of_set_vlan_vid_act_handler(const struct rte_flow_action *action_item,
2744 				    struct ulp_rte_parser_params *params)
2745 {
2746 	const struct rte_flow_action_of_set_vlan_vid *vlan_vid;
2747 	uint32_t vid;
2748 	struct ulp_rte_act_prop *act = &params->act_prop;
2749 
2750 	vlan_vid = action_item->conf;
2751 	if (vlan_vid && vlan_vid->vlan_vid) {
2752 		vid = vlan_vid->vlan_vid;
2753 		memcpy(&act->act_details[BNXT_ULP_ACT_PROP_IDX_SET_VLAN_VID],
2754 		       &vid, BNXT_ULP_ACT_PROP_SZ_SET_VLAN_VID);
2755 		/* Update the hdr_bitmap with vlan vid */
2756 		ULP_BITMAP_SET(params->act_bitmap.bits,
2757 			       BNXT_ULP_ACT_BIT_SET_VLAN_VID);
2758 		return BNXT_TF_RC_SUCCESS;
2759 	}
2760 	BNXT_TF_DBG(ERR, "Parse Error: Vlan vid arg is invalid\n");
2761 	return BNXT_TF_RC_ERROR;
2762 }
2763 
2764 /* Function to handle the parsing of RTE Flow action set vlan pcp. */
2765 int32_t
2766 ulp_rte_of_set_vlan_pcp_act_handler(const struct rte_flow_action *action_item,
2767 				    struct ulp_rte_parser_params *params)
2768 {
2769 	const struct rte_flow_action_of_set_vlan_pcp *vlan_pcp;
2770 	uint8_t pcp;
2771 	struct ulp_rte_act_prop *act = &params->act_prop;
2772 
2773 	vlan_pcp = action_item->conf;
2774 	if (vlan_pcp) {
2775 		pcp = vlan_pcp->vlan_pcp;
2776 		memcpy(&act->act_details[BNXT_ULP_ACT_PROP_IDX_SET_VLAN_PCP],
2777 		       &pcp, BNXT_ULP_ACT_PROP_SZ_SET_VLAN_PCP);
2778 		/* Update the hdr_bitmap with vlan vid */
2779 		ULP_BITMAP_SET(params->act_bitmap.bits,
2780 			       BNXT_ULP_ACT_BIT_SET_VLAN_PCP);
2781 		return BNXT_TF_RC_SUCCESS;
2782 	}
2783 	BNXT_TF_DBG(ERR, "Parse Error: Vlan pcp arg is invalid\n");
2784 	return BNXT_TF_RC_ERROR;
2785 }
2786 
2787 /* Function to handle the parsing of RTE Flow action set ipv4 src.*/
2788 int32_t
2789 ulp_rte_set_ipv4_src_act_handler(const struct rte_flow_action *action_item,
2790 				 struct ulp_rte_parser_params *params)
2791 {
2792 	const struct rte_flow_action_set_ipv4 *set_ipv4;
2793 	struct ulp_rte_act_prop *act = &params->act_prop;
2794 
2795 	set_ipv4 = action_item->conf;
2796 	if (set_ipv4) {
2797 		memcpy(&act->act_details[BNXT_ULP_ACT_PROP_IDX_SET_IPV4_SRC],
2798 		       &set_ipv4->ipv4_addr, BNXT_ULP_ACT_PROP_SZ_SET_IPV4_SRC);
2799 		/* Update the hdr_bitmap with set ipv4 src */
2800 		ULP_BITMAP_SET(params->act_bitmap.bits,
2801 			       BNXT_ULP_ACT_BIT_SET_IPV4_SRC);
2802 		return BNXT_TF_RC_SUCCESS;
2803 	}
2804 	BNXT_TF_DBG(ERR, "Parse Error: set ipv4 src arg is invalid\n");
2805 	return BNXT_TF_RC_ERROR;
2806 }
2807 
2808 /* Function to handle the parsing of RTE Flow action set ipv4 dst.*/
2809 int32_t
2810 ulp_rte_set_ipv4_dst_act_handler(const struct rte_flow_action *action_item,
2811 				 struct ulp_rte_parser_params *params)
2812 {
2813 	const struct rte_flow_action_set_ipv4 *set_ipv4;
2814 	struct ulp_rte_act_prop *act = &params->act_prop;
2815 
2816 	set_ipv4 = action_item->conf;
2817 	if (set_ipv4) {
2818 		memcpy(&act->act_details[BNXT_ULP_ACT_PROP_IDX_SET_IPV4_DST],
2819 		       &set_ipv4->ipv4_addr, BNXT_ULP_ACT_PROP_SZ_SET_IPV4_DST);
2820 		/* Update the hdr_bitmap with set ipv4 dst */
2821 		ULP_BITMAP_SET(params->act_bitmap.bits,
2822 			       BNXT_ULP_ACT_BIT_SET_IPV4_DST);
2823 		return BNXT_TF_RC_SUCCESS;
2824 	}
2825 	BNXT_TF_DBG(ERR, "Parse Error: set ipv4 dst arg is invalid\n");
2826 	return BNXT_TF_RC_ERROR;
2827 }
2828 
2829 /* Function to handle the parsing of RTE Flow action set tp src.*/
2830 int32_t
2831 ulp_rte_set_tp_src_act_handler(const struct rte_flow_action *action_item,
2832 			       struct ulp_rte_parser_params *params)
2833 {
2834 	const struct rte_flow_action_set_tp *set_tp;
2835 	struct ulp_rte_act_prop *act = &params->act_prop;
2836 
2837 	set_tp = action_item->conf;
2838 	if (set_tp) {
2839 		memcpy(&act->act_details[BNXT_ULP_ACT_PROP_IDX_SET_TP_SRC],
2840 		       &set_tp->port, BNXT_ULP_ACT_PROP_SZ_SET_TP_SRC);
2841 		/* Update the hdr_bitmap with set tp src */
2842 		ULP_BITMAP_SET(params->act_bitmap.bits,
2843 			       BNXT_ULP_ACT_BIT_SET_TP_SRC);
2844 		return BNXT_TF_RC_SUCCESS;
2845 	}
2846 
2847 	BNXT_TF_DBG(ERR, "Parse Error: set tp src arg is invalid\n");
2848 	return BNXT_TF_RC_ERROR;
2849 }
2850 
2851 /* Function to handle the parsing of RTE Flow action set tp dst.*/
2852 int32_t
2853 ulp_rte_set_tp_dst_act_handler(const struct rte_flow_action *action_item,
2854 			       struct ulp_rte_parser_params *params)
2855 {
2856 	const struct rte_flow_action_set_tp *set_tp;
2857 	struct ulp_rte_act_prop *act = &params->act_prop;
2858 
2859 	set_tp = action_item->conf;
2860 	if (set_tp) {
2861 		memcpy(&act->act_details[BNXT_ULP_ACT_PROP_IDX_SET_TP_DST],
2862 		       &set_tp->port, BNXT_ULP_ACT_PROP_SZ_SET_TP_DST);
2863 		/* Update the hdr_bitmap with set tp dst */
2864 		ULP_BITMAP_SET(params->act_bitmap.bits,
2865 			       BNXT_ULP_ACT_BIT_SET_TP_DST);
2866 		return BNXT_TF_RC_SUCCESS;
2867 	}
2868 
2869 	BNXT_TF_DBG(ERR, "Parse Error: set tp src arg is invalid\n");
2870 	return BNXT_TF_RC_ERROR;
2871 }
2872 
2873 /* Function to handle the parsing of RTE Flow action dec ttl.*/
2874 int32_t
2875 ulp_rte_dec_ttl_act_handler(const struct rte_flow_action *act __rte_unused,
2876 			    struct ulp_rte_parser_params *params)
2877 {
2878 	/* Update the act_bitmap with dec ttl */
2879 	ULP_BITMAP_SET(params->act_bitmap.bits, BNXT_ULP_ACT_BIT_DEC_TTL);
2880 	return BNXT_TF_RC_SUCCESS;
2881 }
2882 
2883 /* Function to handle the parsing of RTE Flow action JUMP */
2884 int32_t
2885 ulp_rte_jump_act_handler(const struct rte_flow_action *action_item __rte_unused,
2886 			 struct ulp_rte_parser_params *params)
2887 {
2888 	/* Update the act_bitmap with dec ttl */
2889 	ULP_BITMAP_SET(params->act_bitmap.bits, BNXT_ULP_ACT_BIT_JUMP);
2890 	return BNXT_TF_RC_SUCCESS;
2891 }
2892 
2893 int32_t
2894 ulp_rte_sample_act_handler(const struct rte_flow_action *action_item,
2895 			   struct ulp_rte_parser_params *params)
2896 {
2897 	const struct rte_flow_action_sample *sample;
2898 	int ret;
2899 
2900 	sample = action_item->conf;
2901 
2902 	/* if SAMPLE bit is set it means this sample action is nested within the
2903 	 * actions of another sample action; this is not allowed
2904 	 */
2905 	if (ULP_BITMAP_ISSET(params->act_bitmap.bits,
2906 			     BNXT_ULP_ACT_BIT_SAMPLE))
2907 		return BNXT_TF_RC_ERROR;
2908 
2909 	/* a sample action is only allowed as a shared action */
2910 	if (!ULP_BITMAP_ISSET(params->act_bitmap.bits,
2911 			      BNXT_ULP_ACT_BIT_SHARED))
2912 		return BNXT_TF_RC_ERROR;
2913 
2914 	/* only a ratio of 1 i.e. 100% is supported */
2915 	if (sample->ratio != 1)
2916 		return BNXT_TF_RC_ERROR;
2917 
2918 	if (!sample->actions)
2919 		return BNXT_TF_RC_ERROR;
2920 
2921 	/* parse the nested actions for a sample action */
2922 	ret = bnxt_ulp_rte_parser_act_parse(sample->actions, params);
2923 	if (ret == BNXT_TF_RC_SUCCESS)
2924 		/* Update the act_bitmap with sample */
2925 		ULP_BITMAP_SET(params->act_bitmap.bits,
2926 			       BNXT_ULP_ACT_BIT_SAMPLE);
2927 
2928 	return ret;
2929 }
2930 
2931 int32_t
2932 ulp_rte_action_hdlr_handler(const struct rte_flow_action *action_item,
2933 			   struct ulp_rte_parser_params *params)
2934 {
2935 	const struct rte_flow_action_handle *handle;
2936 	struct bnxt_ulp_shared_act_info *act_info;
2937 	uint64_t action_bitmask;
2938 	uint32_t shared_action_type;
2939 	struct ulp_rte_act_prop *act = &params->act_prop;
2940 	uint64_t tmp64;
2941 	enum bnxt_ulp_direction_type dir, handle_dir;
2942 	uint32_t act_info_entries = 0;
2943 	int32_t ret;
2944 
2945 	handle = action_item->conf;
2946 
2947 	/* Have to use the computed direction since the params->dir_attr
2948 	 * can be different (transfer, ingress, egress)
2949 	 */
2950 	dir = ULP_COMP_FLD_IDX_RD(params, BNXT_ULP_CF_IDX_DIRECTION);
2951 
2952 	/* direction of shared action must match direction of flow */
2953 	ret = bnxt_get_action_handle_direction(handle, &handle_dir);
2954 	if (ret || dir != handle_dir) {
2955 		BNXT_TF_DBG(ERR, "Invalid shared handle or direction\n");
2956 		return BNXT_TF_RC_ERROR;
2957 	}
2958 
2959 	if (bnxt_get_action_handle_type(handle, &shared_action_type)) {
2960 		BNXT_TF_DBG(ERR, "Invalid shared handle\n");
2961 		return BNXT_TF_RC_ERROR;
2962 	}
2963 
2964 	act_info = bnxt_ulp_shared_act_info_get(&act_info_entries);
2965 	if (shared_action_type >= act_info_entries || !act_info) {
2966 		BNXT_TF_DBG(ERR, "Invalid shared handle\n");
2967 		return BNXT_TF_RC_ERROR;
2968 	}
2969 
2970 	action_bitmask = act_info[shared_action_type].act_bitmask;
2971 
2972 	/* shared actions of the same type cannot be repeated */
2973 	if (params->act_bitmap.bits & action_bitmask) {
2974 		BNXT_TF_DBG(ERR, "indirect actions cannot be repeated\n");
2975 		return BNXT_TF_RC_ERROR;
2976 	}
2977 
2978 	tmp64 = tfp_cpu_to_be_64((uint64_t)bnxt_get_action_handle_index(handle));
2979 
2980 	memcpy(&act->act_details[BNXT_ULP_ACT_PROP_IDX_SHARED_HANDLE],
2981 	       &tmp64, BNXT_ULP_ACT_PROP_SZ_SHARED_HANDLE);
2982 
2983 	ULP_BITMAP_SET(params->act_bitmap.bits, action_bitmask);
2984 
2985 	return BNXT_TF_RC_SUCCESS;
2986 }
2987 
2988 /* Function to handle the parsing of bnxt vendor Flow action vxlan Header. */
2989 int32_t
2990 ulp_vendor_vxlan_decap_act_handler(const struct rte_flow_action *action_item,
2991 				   struct ulp_rte_parser_params *params)
2992 {
2993 	/* Set the F1 flow header bit */
2994 	ULP_BITMAP_SET(params->hdr_bitmap.bits, BNXT_ULP_HDR_BIT_F1);
2995 	return ulp_rte_vxlan_decap_act_handler(action_item, params);
2996 }
2997 
2998 /* Function to handle the parsing of bnxt vendor Flow item vxlan Header. */
2999 int32_t
3000 ulp_rte_vendor_vxlan_decap_hdr_handler(const struct rte_flow_item *item,
3001 				       struct ulp_rte_parser_params *params)
3002 {
3003 	RTE_SET_USED(item);
3004 	/* Set the F2 flow header bit */
3005 	ULP_BITMAP_SET(params->hdr_bitmap.bits, BNXT_ULP_HDR_BIT_F2);
3006 	return ulp_rte_vxlan_decap_act_handler(NULL, params);
3007 }
3008 
3009 /* Function to handle the parsing of RTE Flow action queue. */
3010 int32_t
3011 ulp_rte_queue_act_handler(const struct rte_flow_action *action_item,
3012 			  struct ulp_rte_parser_params *param)
3013 {
3014 	const struct rte_flow_action_queue *q_info;
3015 	struct ulp_rte_act_prop *ap = &param->act_prop;
3016 
3017 	if (action_item == NULL || action_item->conf == NULL) {
3018 		BNXT_TF_DBG(ERR, "Parse Err: invalid queue configuration\n");
3019 		return BNXT_TF_RC_ERROR;
3020 	}
3021 
3022 	q_info = action_item->conf;
3023 	/* Copy the queue into the specific action properties */
3024 	memcpy(&ap->act_details[BNXT_ULP_ACT_PROP_IDX_QUEUE_INDEX],
3025 	       &q_info->index, BNXT_ULP_ACT_PROP_SZ_QUEUE_INDEX);
3026 
3027 	/* set the queue action header bit */
3028 	ULP_BITMAP_SET(param->act_bitmap.bits, BNXT_ULP_ACT_BIT_QUEUE);
3029 
3030 	return BNXT_TF_RC_SUCCESS;
3031 }
3032 
3033 /* Function to handle the parsing of RTE Flow action meter. */
3034 int32_t
3035 ulp_rte_meter_act_handler(const struct rte_flow_action *action_item,
3036 			  struct ulp_rte_parser_params *params)
3037 {
3038 	const struct rte_flow_action_meter *meter;
3039 	struct ulp_rte_act_prop *act_prop = &params->act_prop;
3040 	uint32_t tmp_meter_id;
3041 
3042 	if (action_item == NULL || action_item->conf == NULL) {
3043 		BNXT_TF_DBG(ERR, "Parse Err: invalid meter configuration\n");
3044 		return BNXT_TF_RC_ERROR;
3045 	}
3046 
3047 	meter = action_item->conf;
3048 	if (meter) {
3049 		/* validate the mtr_id and update the reference counter */
3050 		tmp_meter_id = tfp_cpu_to_be_32(meter->mtr_id);
3051 		memcpy(&act_prop->act_details[BNXT_ULP_ACT_PROP_IDX_METER],
3052 		       &tmp_meter_id,
3053 		       BNXT_ULP_ACT_PROP_SZ_METER);
3054 	}
3055 
3056 	/* set the meter action header bit */
3057 	ULP_BITMAP_SET(params->act_bitmap.bits, BNXT_ULP_ACT_BIT_METER);
3058 
3059 	return BNXT_TF_RC_SUCCESS;
3060 }
3061 
3062 /* Function to handle the parsing of RTE Flow action set mac src.*/
3063 int32_t
3064 ulp_rte_set_mac_src_act_handler(const struct rte_flow_action *action_item,
3065 				struct ulp_rte_parser_params *params)
3066 {
3067 	const struct rte_flow_action_set_mac *set_mac;
3068 	struct ulp_rte_act_prop *act = &params->act_prop;
3069 
3070 	set_mac = action_item->conf;
3071 	if (set_mac) {
3072 		memcpy(&act->act_details[BNXT_ULP_ACT_PROP_IDX_SET_MAC_SRC],
3073 		       set_mac->mac_addr, BNXT_ULP_ACT_PROP_SZ_SET_MAC_SRC);
3074 		/* Update the hdr_bitmap with set mac src */
3075 		ULP_BITMAP_SET(params->act_bitmap.bits,
3076 			       BNXT_ULP_ACT_BIT_SET_MAC_SRC);
3077 		return BNXT_TF_RC_SUCCESS;
3078 	}
3079 	BNXT_TF_DBG(ERR, "Parse Error: set mac src arg is invalid\n");
3080 	return BNXT_TF_RC_ERROR;
3081 }
3082 
3083 /* Function to handle the parsing of RTE Flow action set mac dst.*/
3084 int32_t
3085 ulp_rte_set_mac_dst_act_handler(const struct rte_flow_action *action_item,
3086 				struct ulp_rte_parser_params *params)
3087 {
3088 	const struct rte_flow_action_set_mac *set_mac;
3089 	struct ulp_rte_act_prop *act = &params->act_prop;
3090 
3091 	set_mac = action_item->conf;
3092 	if (set_mac) {
3093 		memcpy(&act->act_details[BNXT_ULP_ACT_PROP_IDX_SET_MAC_DST],
3094 		       set_mac->mac_addr, BNXT_ULP_ACT_PROP_SZ_SET_MAC_DST);
3095 		/* Update the hdr_bitmap with set ipv4 dst */
3096 		ULP_BITMAP_SET(params->act_bitmap.bits,
3097 			       BNXT_ULP_ACT_BIT_SET_MAC_DST);
3098 		return BNXT_TF_RC_SUCCESS;
3099 	}
3100 	BNXT_TF_DBG(ERR, "Parse Error: set mac dst arg is invalid\n");
3101 	return BNXT_TF_RC_ERROR;
3102 }
3103