xref: /dpdk/drivers/net/bnxt/tf_ulp/ulp_rte_parser.c (revision 13f8de927ae3cb63e6d784b7e2d9078bc909700c)
1 /* SPDX-License-Identifier: BSD-3-Clause
2  * Copyright(c) 2014-2021 Broadcom
3  * All rights reserved.
4  */
5 
6 #include "bnxt.h"
7 #include "ulp_template_db_enum.h"
8 #include "ulp_template_struct.h"
9 #include "bnxt_ulp.h"
10 #include "bnxt_tf_common.h"
11 #include "bnxt_tf_pmd_shim.h"
12 #include "ulp_rte_parser.h"
13 #include "ulp_matcher.h"
14 #include "ulp_utils.h"
15 #include "tfp.h"
16 #include "ulp_port_db.h"
17 #include "ulp_flow_db.h"
18 #include "ulp_mapper.h"
19 #include "ulp_tun.h"
20 #include "ulp_template_db_tbl.h"
21 
22 /* Local defines for the parsing functions */
23 #define ULP_VLAN_PRIORITY_SHIFT		13 /* First 3 bits */
24 #define ULP_VLAN_PRIORITY_MASK		0x700
25 #define ULP_VLAN_TAG_MASK		0xFFF /* Last 12 bits*/
26 #define ULP_UDP_PORT_VXLAN		4789
27 
28 /* Utility function to skip the void items. */
29 static inline int32_t
30 ulp_rte_item_skip_void(const struct rte_flow_item **item, uint32_t increment)
31 {
32 	if (!*item)
33 		return 0;
34 	if (increment)
35 		(*item)++;
36 	while ((*item) && (*item)->type == RTE_FLOW_ITEM_TYPE_VOID)
37 		(*item)++;
38 	if (*item)
39 		return 1;
40 	return 0;
41 }
42 
43 /* Utility function to copy field spec items */
44 static struct ulp_rte_hdr_field *
45 ulp_rte_parser_fld_copy(struct ulp_rte_hdr_field *field,
46 			const void *buffer,
47 			uint32_t size)
48 {
49 	field->size = size;
50 	memcpy(field->spec, buffer, field->size);
51 	field++;
52 	return field;
53 }
54 
55 /* Utility function to update the field_bitmap */
56 static void
57 ulp_rte_parser_field_bitmap_update(struct ulp_rte_parser_params *params,
58 				   uint32_t idx,
59 				   enum bnxt_ulp_prsr_action prsr_act)
60 {
61 	struct ulp_rte_hdr_field *field;
62 
63 	field = &params->hdr_field[idx];
64 	if (ulp_bitmap_notzero(field->mask, field->size)) {
65 		ULP_INDEX_BITMAP_SET(params->fld_bitmap.bits, idx);
66 		if (!(prsr_act & ULP_PRSR_ACT_MATCH_IGNORE))
67 			ULP_INDEX_BITMAP_SET(params->fld_s_bitmap.bits, idx);
68 		/* Not exact match */
69 		if (!ulp_bitmap_is_ones(field->mask, field->size))
70 			ULP_COMP_FLD_IDX_WR(params,
71 					    BNXT_ULP_CF_IDX_WC_MATCH, 1);
72 	} else {
73 		ULP_INDEX_BITMAP_RESET(params->fld_bitmap.bits, idx);
74 	}
75 }
76 
77 #define ulp_deference_struct(x, y) ((x) ? &((x)->y) : NULL)
78 /* Utility function to copy field spec and masks items */
79 static void
80 ulp_rte_prsr_fld_mask(struct ulp_rte_parser_params *params,
81 		      uint32_t *idx,
82 		      uint32_t size,
83 		      const void *spec_buff,
84 		      const void *mask_buff,
85 		      enum bnxt_ulp_prsr_action prsr_act)
86 {
87 	struct ulp_rte_hdr_field *field = &params->hdr_field[*idx];
88 
89 	/* update the field size */
90 	field->size = size;
91 
92 	/* copy the mask specifications only if mask is not null */
93 	if (!(prsr_act & ULP_PRSR_ACT_MASK_IGNORE) && mask_buff) {
94 		memcpy(field->mask, mask_buff, size);
95 		ulp_rte_parser_field_bitmap_update(params, *idx, prsr_act);
96 	}
97 
98 	/* copy the protocol specifications only if mask is not null*/
99 	if (spec_buff && mask_buff && ulp_bitmap_notzero(mask_buff, size))
100 		memcpy(field->spec, spec_buff, size);
101 
102 	/* Increment the index */
103 	*idx = *idx + 1;
104 }
105 
106 /* Utility function to copy field spec and masks items */
107 static int32_t
108 ulp_rte_prsr_fld_size_validate(struct ulp_rte_parser_params *params,
109 			       uint32_t *idx,
110 			       uint32_t size)
111 {
112 	if (params->field_idx + size >= BNXT_ULP_PROTO_HDR_MAX) {
113 		BNXT_TF_DBG(ERR, "OOB for field processing %u\n", *idx);
114 		return -EINVAL;
115 	}
116 	*idx = params->field_idx;
117 	params->field_idx += size;
118 	return 0;
119 }
120 
121 /*
122  * Function to handle the parsing of RTE Flows and placing
123  * the RTE flow items into the ulp structures.
124  */
125 int32_t
126 bnxt_ulp_rte_parser_hdr_parse(const struct rte_flow_item pattern[],
127 			      struct ulp_rte_parser_params *params)
128 {
129 	const struct rte_flow_item *item = pattern;
130 	struct bnxt_ulp_rte_hdr_info *hdr_info;
131 
132 	params->field_idx = BNXT_ULP_PROTO_HDR_SVIF_NUM;
133 
134 	/* Set the computed flags for no vlan tags before parsing */
135 	ULP_COMP_FLD_IDX_WR(params, BNXT_ULP_CF_IDX_O_NO_VTAG, 1);
136 	ULP_COMP_FLD_IDX_WR(params, BNXT_ULP_CF_IDX_I_NO_VTAG, 1);
137 
138 	/* Parse all the items in the pattern */
139 	while (item && item->type != RTE_FLOW_ITEM_TYPE_END) {
140 		if (item->type >= (typeof(item->type))
141 		    BNXT_RTE_FLOW_ITEM_TYPE_END) {
142 			if (item->type >=
143 			    (typeof(item->type))BNXT_RTE_FLOW_ITEM_TYPE_LAST)
144 				goto hdr_parser_error;
145 			/* get the header information */
146 			hdr_info = &ulp_vendor_hdr_info[item->type -
147 				BNXT_RTE_FLOW_ITEM_TYPE_END];
148 		} else {
149 			if (item->type > RTE_FLOW_ITEM_TYPE_HIGIG2)
150 				goto hdr_parser_error;
151 			hdr_info = &ulp_hdr_info[item->type];
152 		}
153 		if (hdr_info->hdr_type == BNXT_ULP_HDR_TYPE_NOT_SUPPORTED) {
154 			goto hdr_parser_error;
155 		} else if (hdr_info->hdr_type == BNXT_ULP_HDR_TYPE_SUPPORTED) {
156 			/* call the registered callback handler */
157 			if (hdr_info->proto_hdr_func) {
158 				if (hdr_info->proto_hdr_func(item, params) !=
159 				    BNXT_TF_RC_SUCCESS) {
160 					return BNXT_TF_RC_ERROR;
161 				}
162 			}
163 		}
164 		item++;
165 	}
166 	/* update the implied SVIF */
167 	return ulp_rte_parser_implicit_match_port_process(params);
168 
169 hdr_parser_error:
170 	BNXT_TF_DBG(ERR, "Truflow parser does not support type %d\n",
171 		    item->type);
172 	return BNXT_TF_RC_PARSE_ERR;
173 }
174 
175 /*
176  * Function to handle the parsing of RTE Flows and placing
177  * the RTE flow actions into the ulp structures.
178  */
179 int32_t
180 bnxt_ulp_rte_parser_act_parse(const struct rte_flow_action actions[],
181 			      struct ulp_rte_parser_params *params)
182 {
183 	const struct rte_flow_action *action_item = actions;
184 	struct bnxt_ulp_rte_act_info *hdr_info;
185 
186 	/* Parse all the items in the pattern */
187 	while (action_item && action_item->type != RTE_FLOW_ACTION_TYPE_END) {
188 		if (action_item->type >=
189 		    (typeof(action_item->type))BNXT_RTE_FLOW_ACTION_TYPE_END) {
190 			if (action_item->type >=
191 			    (typeof(action_item->type))BNXT_RTE_FLOW_ACTION_TYPE_LAST)
192 				goto act_parser_error;
193 			/* get the header information from bnxt actinfo table */
194 			hdr_info = &ulp_vendor_act_info[action_item->type -
195 				BNXT_RTE_FLOW_ACTION_TYPE_END];
196 		} else {
197 			if (action_item->type > RTE_FLOW_ACTION_TYPE_SHARED)
198 				goto act_parser_error;
199 			/* get the header information from the act info table */
200 			hdr_info = &ulp_act_info[action_item->type];
201 		}
202 		if (hdr_info->act_type == BNXT_ULP_ACT_TYPE_NOT_SUPPORTED) {
203 			goto act_parser_error;
204 		} else if (hdr_info->act_type == BNXT_ULP_ACT_TYPE_SUPPORTED) {
205 			/* call the registered callback handler */
206 			if (hdr_info->proto_act_func) {
207 				if (hdr_info->proto_act_func(action_item,
208 							     params) !=
209 				    BNXT_TF_RC_SUCCESS) {
210 					return BNXT_TF_RC_ERROR;
211 				}
212 			}
213 		}
214 		action_item++;
215 	}
216 	/* update the implied port details */
217 	ulp_rte_parser_implicit_act_port_process(params);
218 	return BNXT_TF_RC_SUCCESS;
219 
220 act_parser_error:
221 	BNXT_TF_DBG(ERR, "Truflow parser does not support act %u\n",
222 		    action_item->type);
223 	return BNXT_TF_RC_ERROR;
224 }
225 
226 /*
227  * Function to handle the post processing of the computed
228  * fields for the interface.
229  */
230 static void
231 bnxt_ulp_comp_fld_intf_update(struct ulp_rte_parser_params *params)
232 {
233 	uint32_t ifindex;
234 	uint16_t port_id, parif;
235 	uint32_t mtype;
236 	enum bnxt_ulp_direction_type dir;
237 
238 	/* get the direction details */
239 	dir = ULP_COMP_FLD_IDX_RD(params, BNXT_ULP_CF_IDX_DIRECTION);
240 
241 	/* read the port id details */
242 	port_id = ULP_COMP_FLD_IDX_RD(params, BNXT_ULP_CF_IDX_INCOMING_IF);
243 	if (ulp_port_db_dev_port_to_ulp_index(params->ulp_ctx,
244 					      port_id,
245 					      &ifindex)) {
246 		BNXT_TF_DBG(ERR, "ParseErr:Portid is not valid\n");
247 		return;
248 	}
249 
250 	if (dir == BNXT_ULP_DIR_INGRESS) {
251 		/* Set port PARIF */
252 		if (ulp_port_db_parif_get(params->ulp_ctx, ifindex,
253 					  BNXT_ULP_PHY_PORT_PARIF, &parif)) {
254 			BNXT_TF_DBG(ERR, "ParseErr:ifindex is not valid\n");
255 			return;
256 		}
257 		ULP_COMP_FLD_IDX_WR(params, BNXT_ULP_CF_IDX_PHY_PORT_PARIF,
258 				    parif);
259 	} else {
260 		/* Get the match port type */
261 		mtype = ULP_COMP_FLD_IDX_RD(params,
262 					    BNXT_ULP_CF_IDX_MATCH_PORT_TYPE);
263 		if (mtype == BNXT_ULP_INTF_TYPE_VF_REP) {
264 			ULP_COMP_FLD_IDX_WR(params,
265 					    BNXT_ULP_CF_IDX_MATCH_PORT_IS_VFREP,
266 					    1);
267 			/* Set VF func PARIF */
268 			if (ulp_port_db_parif_get(params->ulp_ctx, ifindex,
269 						  BNXT_ULP_VF_FUNC_PARIF,
270 						  &parif)) {
271 				BNXT_TF_DBG(ERR,
272 					    "ParseErr:ifindex is not valid\n");
273 				return;
274 			}
275 			ULP_COMP_FLD_IDX_WR(params,
276 					    BNXT_ULP_CF_IDX_VF_FUNC_PARIF,
277 					    parif);
278 
279 		} else {
280 			/* Set DRV func PARIF */
281 			if (ulp_port_db_parif_get(params->ulp_ctx, ifindex,
282 						  BNXT_ULP_DRV_FUNC_PARIF,
283 						  &parif)) {
284 				BNXT_TF_DBG(ERR,
285 					    "ParseErr:ifindex is not valid\n");
286 				return;
287 			}
288 			ULP_COMP_FLD_IDX_WR(params,
289 					    BNXT_ULP_CF_IDX_DRV_FUNC_PARIF,
290 					    parif);
291 		}
292 		if (mtype == BNXT_ULP_INTF_TYPE_PF) {
293 			ULP_COMP_FLD_IDX_WR(params,
294 					    BNXT_ULP_CF_IDX_MATCH_PORT_IS_PF,
295 					    1);
296 		}
297 	}
298 }
299 
300 static int32_t
301 ulp_post_process_normal_flow(struct ulp_rte_parser_params *params)
302 {
303 	enum bnxt_ulp_intf_type match_port_type, act_port_type;
304 	enum bnxt_ulp_direction_type dir;
305 	uint32_t act_port_set;
306 
307 	/* Get the computed details */
308 	dir = ULP_COMP_FLD_IDX_RD(params, BNXT_ULP_CF_IDX_DIRECTION);
309 	match_port_type = ULP_COMP_FLD_IDX_RD(params,
310 					      BNXT_ULP_CF_IDX_MATCH_PORT_TYPE);
311 	act_port_type = ULP_COMP_FLD_IDX_RD(params,
312 					    BNXT_ULP_CF_IDX_ACT_PORT_TYPE);
313 	act_port_set = ULP_COMP_FLD_IDX_RD(params,
314 					   BNXT_ULP_CF_IDX_ACT_PORT_IS_SET);
315 
316 	/* set the flow direction in the proto and action header */
317 	if (dir == BNXT_ULP_DIR_EGRESS) {
318 		ULP_BITMAP_SET(params->hdr_bitmap.bits,
319 			       BNXT_ULP_FLOW_DIR_BITMASK_EGR);
320 		ULP_BITMAP_SET(params->act_bitmap.bits,
321 			       BNXT_ULP_FLOW_DIR_BITMASK_EGR);
322 	}
323 
324 	/* calculate the VF to VF flag */
325 	if (act_port_set && act_port_type == BNXT_ULP_INTF_TYPE_VF_REP &&
326 	    match_port_type == BNXT_ULP_INTF_TYPE_VF_REP)
327 		ULP_COMP_FLD_IDX_WR(params, BNXT_ULP_CF_IDX_VF_TO_VF, 1);
328 
329 	/* Update the decrement ttl computational fields */
330 	if (ULP_BITMAP_ISSET(params->act_bitmap.bits,
331 			     BNXT_ULP_ACT_BIT_DEC_TTL)) {
332 		/*
333 		 * Check that vxlan proto is included and vxlan decap
334 		 * action is not set then decrement tunnel ttl.
335 		 * Similarly add GRE and NVGRE in future.
336 		 */
337 		if ((ULP_BITMAP_ISSET(params->hdr_bitmap.bits,
338 				      BNXT_ULP_HDR_BIT_T_VXLAN) &&
339 		    !ULP_BITMAP_ISSET(params->act_bitmap.bits,
340 				      BNXT_ULP_ACT_BIT_VXLAN_DECAP))) {
341 			ULP_COMP_FLD_IDX_WR(params,
342 					    BNXT_ULP_CF_IDX_ACT_T_DEC_TTL, 1);
343 		} else {
344 			ULP_COMP_FLD_IDX_WR(params,
345 					    BNXT_ULP_CF_IDX_ACT_DEC_TTL, 1);
346 		}
347 	}
348 
349 	/* Merge the hdr_fp_bit into the proto header bit */
350 	params->hdr_bitmap.bits |= params->hdr_fp_bit.bits;
351 
352 	/* Update the comp fld fid */
353 	ULP_COMP_FLD_IDX_WR(params, BNXT_ULP_CF_IDX_FID, params->fid);
354 
355 	/* Update the computed interface parameters */
356 	bnxt_ulp_comp_fld_intf_update(params);
357 
358 	/* TBD: Handle the flow rejection scenarios */
359 	return 0;
360 }
361 
362 /*
363  * Function to handle the post processing of the parsing details
364  */
365 void
366 bnxt_ulp_rte_parser_post_process(struct ulp_rte_parser_params *params)
367 {
368 	ulp_post_process_normal_flow(params);
369 }
370 
371 /*
372  * Function to compute the flow direction based on the match port details
373  */
374 static void
375 bnxt_ulp_rte_parser_direction_compute(struct ulp_rte_parser_params *params)
376 {
377 	enum bnxt_ulp_intf_type match_port_type;
378 
379 	/* Get the match port type */
380 	match_port_type = ULP_COMP_FLD_IDX_RD(params,
381 					      BNXT_ULP_CF_IDX_MATCH_PORT_TYPE);
382 
383 	/* If ingress flow and matchport is vf rep then dir is egress*/
384 	if ((params->dir_attr & BNXT_ULP_FLOW_ATTR_INGRESS) &&
385 	    match_port_type == BNXT_ULP_INTF_TYPE_VF_REP) {
386 		ULP_COMP_FLD_IDX_WR(params, BNXT_ULP_CF_IDX_DIRECTION,
387 				    BNXT_ULP_DIR_EGRESS);
388 	} else {
389 		/* Assign the input direction */
390 		if (params->dir_attr & BNXT_ULP_FLOW_ATTR_INGRESS)
391 			ULP_COMP_FLD_IDX_WR(params, BNXT_ULP_CF_IDX_DIRECTION,
392 					    BNXT_ULP_DIR_INGRESS);
393 		else
394 			ULP_COMP_FLD_IDX_WR(params, BNXT_ULP_CF_IDX_DIRECTION,
395 					    BNXT_ULP_DIR_EGRESS);
396 	}
397 }
398 
399 /* Function to handle the parsing of RTE Flow item PF Header. */
400 static int32_t
401 ulp_rte_parser_svif_set(struct ulp_rte_parser_params *params,
402 			uint32_t ifindex,
403 			uint16_t mask,
404 			enum bnxt_ulp_direction_type item_dir)
405 {
406 	uint16_t svif;
407 	enum bnxt_ulp_direction_type dir;
408 	struct ulp_rte_hdr_field *hdr_field;
409 	enum bnxt_ulp_svif_type svif_type;
410 	enum bnxt_ulp_intf_type port_type;
411 
412 	if (ULP_COMP_FLD_IDX_RD(params, BNXT_ULP_CF_IDX_SVIF_FLAG) !=
413 	    BNXT_ULP_INVALID_SVIF_VAL) {
414 		BNXT_TF_DBG(ERR,
415 			    "SVIF already set,multiple source not support'd\n");
416 		return BNXT_TF_RC_ERROR;
417 	}
418 
419 	/* Get port type details */
420 	port_type = ulp_port_db_port_type_get(params->ulp_ctx, ifindex);
421 	if (port_type == BNXT_ULP_INTF_TYPE_INVALID) {
422 		BNXT_TF_DBG(ERR, "Invalid port type\n");
423 		return BNXT_TF_RC_ERROR;
424 	}
425 
426 	/* Update the match port type */
427 	ULP_COMP_FLD_IDX_WR(params, BNXT_ULP_CF_IDX_MATCH_PORT_TYPE, port_type);
428 
429 	/* compute the direction */
430 	bnxt_ulp_rte_parser_direction_compute(params);
431 
432 	/* Get the computed direction */
433 	dir = (item_dir != BNXT_ULP_DIR_INVALID) ? item_dir :
434 		ULP_COMP_FLD_IDX_RD(params, BNXT_ULP_CF_IDX_DIRECTION);
435 	if (dir == BNXT_ULP_DIR_INGRESS &&
436 	    port_type != BNXT_ULP_INTF_TYPE_VF_REP) {
437 		svif_type = BNXT_ULP_PHY_PORT_SVIF;
438 	} else {
439 		if (port_type == BNXT_ULP_INTF_TYPE_VF_REP &&
440 		    item_dir != BNXT_ULP_DIR_EGRESS)
441 			svif_type = BNXT_ULP_VF_FUNC_SVIF;
442 		else
443 			svif_type = BNXT_ULP_DRV_FUNC_SVIF;
444 	}
445 	ulp_port_db_svif_get(params->ulp_ctx, ifindex, svif_type,
446 			     &svif);
447 	svif = rte_cpu_to_be_16(svif);
448 	hdr_field = &params->hdr_field[BNXT_ULP_PROTO_HDR_FIELD_SVIF_IDX];
449 	memcpy(hdr_field->spec, &svif, sizeof(svif));
450 	memcpy(hdr_field->mask, &mask, sizeof(mask));
451 	hdr_field->size = sizeof(svif);
452 	ULP_COMP_FLD_IDX_WR(params, BNXT_ULP_CF_IDX_SVIF_FLAG,
453 			    rte_be_to_cpu_16(svif));
454 	return BNXT_TF_RC_SUCCESS;
455 }
456 
457 /* Function to handle the parsing of the RTE port id */
458 int32_t
459 ulp_rte_parser_implicit_match_port_process(struct ulp_rte_parser_params *params)
460 {
461 	uint16_t port_id = 0;
462 	uint16_t svif_mask = 0xFFFF;
463 	uint32_t ifindex;
464 	int32_t rc = BNXT_TF_RC_ERROR;
465 
466 	if (ULP_COMP_FLD_IDX_RD(params, BNXT_ULP_CF_IDX_SVIF_FLAG) !=
467 	    BNXT_ULP_INVALID_SVIF_VAL)
468 		return BNXT_TF_RC_SUCCESS;
469 
470 	/* SVIF not set. So get the port id */
471 	port_id = ULP_COMP_FLD_IDX_RD(params, BNXT_ULP_CF_IDX_INCOMING_IF);
472 
473 	if (ulp_port_db_dev_port_to_ulp_index(params->ulp_ctx,
474 					      port_id,
475 					      &ifindex)) {
476 		BNXT_TF_DBG(ERR, "ParseErr:Portid is not valid\n");
477 		return rc;
478 	}
479 
480 	/* Update the SVIF details */
481 	rc = ulp_rte_parser_svif_set(params, ifindex, svif_mask,
482 				     BNXT_ULP_DIR_INVALID);
483 	return rc;
484 }
485 
486 /* Function to handle the implicit action port id */
487 int32_t
488 ulp_rte_parser_implicit_act_port_process(struct ulp_rte_parser_params *params)
489 {
490 	struct rte_flow_action action_item = {0};
491 	struct rte_flow_action_port_id port_id = {0};
492 
493 	/* Read the action port set bit */
494 	if (ULP_COMP_FLD_IDX_RD(params, BNXT_ULP_CF_IDX_ACT_PORT_IS_SET)) {
495 		/* Already set, so just exit */
496 		return BNXT_TF_RC_SUCCESS;
497 	}
498 	port_id.id = ULP_COMP_FLD_IDX_RD(params, BNXT_ULP_CF_IDX_INCOMING_IF);
499 	action_item.type = RTE_FLOW_ACTION_TYPE_PORT_ID;
500 	action_item.conf = &port_id;
501 
502 	/* Update the action port based on incoming port */
503 	ulp_rte_port_act_handler(&action_item, params);
504 
505 	/* Reset the action port set bit */
506 	ULP_COMP_FLD_IDX_WR(params, BNXT_ULP_CF_IDX_ACT_PORT_IS_SET, 0);
507 	return BNXT_TF_RC_SUCCESS;
508 }
509 
510 /* Function to handle the parsing of RTE Flow item VF Header. */
511 int32_t
512 ulp_rte_vf_hdr_handler(const struct rte_flow_item *item,
513 		       struct ulp_rte_parser_params *params)
514 {
515 	const struct rte_flow_item_vf *vf_spec = item->spec;
516 	const struct rte_flow_item_vf *vf_mask = item->mask;
517 	uint16_t mask = 0;
518 	uint32_t ifindex;
519 	int32_t rc = BNXT_TF_RC_PARSE_ERR;
520 
521 	/* Get VF rte_flow_item for Port details */
522 	if (!vf_spec) {
523 		BNXT_TF_DBG(ERR, "ParseErr:VF id is not valid\n");
524 		return rc;
525 	}
526 	if (!vf_mask) {
527 		BNXT_TF_DBG(ERR, "ParseErr:VF mask is not valid\n");
528 		return rc;
529 	}
530 	mask = vf_mask->id;
531 
532 	/* perform the conversion from VF Func id to bnxt ifindex */
533 	if (ulp_port_db_dev_func_id_to_ulp_index(params->ulp_ctx,
534 						 vf_spec->id,
535 						 &ifindex)) {
536 		BNXT_TF_DBG(ERR, "ParseErr:Portid is not valid\n");
537 		return rc;
538 	}
539 	/* Update the SVIF details */
540 	return ulp_rte_parser_svif_set(params, ifindex, mask,
541 				       BNXT_ULP_DIR_INVALID);
542 }
543 
544 /* Parse items PORT_ID, PORT_REPRESENTOR and REPRESENTED_PORT. */
545 int32_t
546 ulp_rte_port_hdr_handler(const struct rte_flow_item *item,
547 			 struct ulp_rte_parser_params *params)
548 {
549 	enum bnxt_ulp_direction_type item_dir;
550 	uint16_t ethdev_id;
551 	uint16_t mask = 0;
552 	int32_t rc = BNXT_TF_RC_PARSE_ERR;
553 	uint32_t ifindex;
554 
555 	if (!item->spec) {
556 		BNXT_TF_DBG(ERR, "ParseErr:Port spec is not valid\n");
557 		return rc;
558 	}
559 	if (!item->mask) {
560 		BNXT_TF_DBG(ERR, "ParseErr:Port mask is not valid\n");
561 		return rc;
562 	}
563 
564 	switch (item->type) {
565 	case RTE_FLOW_ITEM_TYPE_PORT_ID: {
566 		const struct rte_flow_item_port_id *port_spec = item->spec;
567 		const struct rte_flow_item_port_id *port_mask = item->mask;
568 
569 		item_dir = BNXT_ULP_DIR_INVALID;
570 		ethdev_id = port_spec->id;
571 		mask = port_mask->id;
572 		break;
573 	}
574 	case RTE_FLOW_ITEM_TYPE_PORT_REPRESENTOR: {
575 		const struct rte_flow_item_ethdev *ethdev_spec = item->spec;
576 		const struct rte_flow_item_ethdev *ethdev_mask = item->mask;
577 
578 		item_dir = BNXT_ULP_DIR_INGRESS;
579 		ethdev_id = ethdev_spec->port_id;
580 		mask = ethdev_mask->port_id;
581 		break;
582 	}
583 	case RTE_FLOW_ITEM_TYPE_REPRESENTED_PORT: {
584 		const struct rte_flow_item_ethdev *ethdev_spec = item->spec;
585 		const struct rte_flow_item_ethdev *ethdev_mask = item->mask;
586 
587 		item_dir = BNXT_ULP_DIR_EGRESS;
588 		ethdev_id = ethdev_spec->port_id;
589 		mask = ethdev_mask->port_id;
590 		break;
591 	}
592 	default:
593 		BNXT_TF_DBG(ERR, "ParseErr:Unexpected item\n");
594 		return rc;
595 	}
596 
597 	/* perform the conversion from dpdk port to bnxt ifindex */
598 	if (ulp_port_db_dev_port_to_ulp_index(params->ulp_ctx,
599 					      ethdev_id,
600 					      &ifindex)) {
601 		BNXT_TF_DBG(ERR, "ParseErr:Portid is not valid\n");
602 		return rc;
603 	}
604 	/* Update the SVIF details */
605 	return ulp_rte_parser_svif_set(params, ifindex, mask, item_dir);
606 }
607 
608 /* Function to handle the parsing of RTE Flow item phy port Header. */
609 int32_t
610 ulp_rte_phy_port_hdr_handler(const struct rte_flow_item *item,
611 			     struct ulp_rte_parser_params *params)
612 {
613 	const struct rte_flow_item_phy_port *port_spec = item->spec;
614 	const struct rte_flow_item_phy_port *port_mask = item->mask;
615 	uint16_t mask = 0;
616 	int32_t rc = BNXT_TF_RC_ERROR;
617 	uint16_t svif;
618 	enum bnxt_ulp_direction_type dir;
619 	struct ulp_rte_hdr_field *hdr_field;
620 
621 	/* Copy the rte_flow_item for phy port into hdr_field */
622 	if (!port_spec) {
623 		BNXT_TF_DBG(ERR, "ParseErr:Phy Port id is not valid\n");
624 		return rc;
625 	}
626 	if (!port_mask) {
627 		BNXT_TF_DBG(ERR, "ParseErr:Phy Port mask is not valid\n");
628 		return rc;
629 	}
630 	mask = port_mask->index;
631 
632 	/* Update the match port type */
633 	ULP_COMP_FLD_IDX_WR(params, BNXT_ULP_CF_IDX_MATCH_PORT_TYPE,
634 			    BNXT_ULP_INTF_TYPE_PHY_PORT);
635 
636 	/* Compute the Hw direction */
637 	bnxt_ulp_rte_parser_direction_compute(params);
638 
639 	/* Direction validation */
640 	dir = ULP_COMP_FLD_IDX_RD(params, BNXT_ULP_CF_IDX_DIRECTION);
641 	if (dir == BNXT_ULP_DIR_EGRESS) {
642 		BNXT_TF_DBG(ERR,
643 			    "Parse Err:Phy ports are valid only for ingress\n");
644 		return BNXT_TF_RC_PARSE_ERR;
645 	}
646 
647 	/* Get the physical port details from port db */
648 	rc = ulp_port_db_phy_port_svif_get(params->ulp_ctx, port_spec->index,
649 					   &svif);
650 	if (rc) {
651 		BNXT_TF_DBG(ERR, "Failed to get port details\n");
652 		return BNXT_TF_RC_PARSE_ERR;
653 	}
654 
655 	/* Update the SVIF details */
656 	svif = rte_cpu_to_be_16(svif);
657 	hdr_field = &params->hdr_field[BNXT_ULP_PROTO_HDR_FIELD_SVIF_IDX];
658 	memcpy(hdr_field->spec, &svif, sizeof(svif));
659 	memcpy(hdr_field->mask, &mask, sizeof(mask));
660 	hdr_field->size = sizeof(svif);
661 	ULP_COMP_FLD_IDX_WR(params, BNXT_ULP_CF_IDX_SVIF_FLAG,
662 			    rte_be_to_cpu_16(svif));
663 	if (!mask) {
664 		uint32_t port_id = 0;
665 		uint16_t phy_port = 0;
666 
667 		/* Validate the control port */
668 		port_id = ULP_COMP_FLD_IDX_RD(params,
669 					      BNXT_ULP_CF_IDX_DEV_PORT_ID);
670 		if (ulp_port_db_phy_port_get(params->ulp_ctx,
671 					     port_id, &phy_port) ||
672 		    (uint16_t)port_spec->index != phy_port) {
673 			BNXT_TF_DBG(ERR, "Mismatch of control and phy_port\n");
674 			return BNXT_TF_RC_PARSE_ERR;
675 		}
676 		ULP_BITMAP_SET(params->hdr_bitmap.bits,
677 			       BNXT_ULP_HDR_BIT_SVIF_IGNORE);
678 		memset(hdr_field->mask, 0xFF, sizeof(mask));
679 	}
680 	return BNXT_TF_RC_SUCCESS;
681 }
682 
683 /* Function to handle the update of proto header based on field values */
684 static void
685 ulp_rte_l2_proto_type_update(struct ulp_rte_parser_params *param,
686 			     uint16_t type, uint32_t in_flag)
687 {
688 	if (type == tfp_cpu_to_be_16(RTE_ETHER_TYPE_IPV4)) {
689 		if (in_flag) {
690 			ULP_BITMAP_SET(param->hdr_fp_bit.bits,
691 				       BNXT_ULP_HDR_BIT_I_IPV4);
692 			ULP_COMP_FLD_IDX_WR(param, BNXT_ULP_CF_IDX_I_L3, 1);
693 		} else {
694 			ULP_BITMAP_SET(param->hdr_fp_bit.bits,
695 				       BNXT_ULP_HDR_BIT_O_IPV4);
696 			ULP_COMP_FLD_IDX_WR(param, BNXT_ULP_CF_IDX_O_L3, 1);
697 		}
698 	} else if (type == tfp_cpu_to_be_16(RTE_ETHER_TYPE_IPV6))  {
699 		if (in_flag) {
700 			ULP_BITMAP_SET(param->hdr_fp_bit.bits,
701 				       BNXT_ULP_HDR_BIT_I_IPV6);
702 			ULP_COMP_FLD_IDX_WR(param, BNXT_ULP_CF_IDX_I_L3, 1);
703 		} else {
704 			ULP_BITMAP_SET(param->hdr_fp_bit.bits,
705 				       BNXT_ULP_HDR_BIT_O_IPV6);
706 			ULP_COMP_FLD_IDX_WR(param, BNXT_ULP_CF_IDX_O_L3, 1);
707 		}
708 	}
709 }
710 
711 /* Internal Function to identify broadcast or multicast packets */
712 static int32_t
713 ulp_rte_parser_is_bcmc_addr(const struct rte_ether_addr *eth_addr)
714 {
715 	if (rte_is_multicast_ether_addr(eth_addr) ||
716 	    rte_is_broadcast_ether_addr(eth_addr)) {
717 		BNXT_TF_DBG(DEBUG,
718 			    "No support for bcast or mcast addr offload\n");
719 		return 1;
720 	}
721 	return 0;
722 }
723 
724 /* Function to handle the parsing of RTE Flow item Ethernet Header. */
725 int32_t
726 ulp_rte_eth_hdr_handler(const struct rte_flow_item *item,
727 			struct ulp_rte_parser_params *params)
728 {
729 	const struct rte_flow_item_eth *eth_spec = item->spec;
730 	const struct rte_flow_item_eth *eth_mask = item->mask;
731 	uint32_t idx = 0, dmac_idx = 0;
732 	uint32_t size;
733 	uint16_t eth_type = 0;
734 	uint32_t inner_flag = 0;
735 
736 	/* Perform validations */
737 	if (eth_spec) {
738 		/* Todo: work around to avoid multicast and broadcast addr */
739 		if (ulp_rte_parser_is_bcmc_addr(&eth_spec->dst))
740 			return BNXT_TF_RC_PARSE_ERR;
741 
742 		if (ulp_rte_parser_is_bcmc_addr(&eth_spec->src))
743 			return BNXT_TF_RC_PARSE_ERR;
744 
745 		eth_type = eth_spec->type;
746 	}
747 
748 	if (ulp_rte_prsr_fld_size_validate(params, &idx,
749 					   BNXT_ULP_PROTO_HDR_ETH_NUM)) {
750 		BNXT_TF_DBG(ERR, "Error parsing protocol header\n");
751 		return BNXT_TF_RC_ERROR;
752 	}
753 	/*
754 	 * Copy the rte_flow_item for eth into hdr_field using ethernet
755 	 * header fields
756 	 */
757 	dmac_idx = idx;
758 	size = sizeof(((struct rte_flow_item_eth *)NULL)->dst.addr_bytes);
759 	ulp_rte_prsr_fld_mask(params, &idx, size,
760 			      ulp_deference_struct(eth_spec, dst.addr_bytes),
761 			      ulp_deference_struct(eth_mask, dst.addr_bytes),
762 			      ULP_PRSR_ACT_DEFAULT);
763 
764 	size = sizeof(((struct rte_flow_item_eth *)NULL)->src.addr_bytes);
765 	ulp_rte_prsr_fld_mask(params, &idx, size,
766 			      ulp_deference_struct(eth_spec, src.addr_bytes),
767 			      ulp_deference_struct(eth_mask, src.addr_bytes),
768 			      ULP_PRSR_ACT_DEFAULT);
769 
770 	size = sizeof(((struct rte_flow_item_eth *)NULL)->type);
771 	ulp_rte_prsr_fld_mask(params, &idx, size,
772 			      ulp_deference_struct(eth_spec, type),
773 			      ulp_deference_struct(eth_mask, type),
774 			      ULP_PRSR_ACT_MATCH_IGNORE);
775 
776 	/* Update the protocol hdr bitmap */
777 	if (ULP_BITMAP_ISSET(params->hdr_bitmap.bits,
778 			     BNXT_ULP_HDR_BIT_O_ETH) ||
779 	    ULP_BITMAP_ISSET(params->hdr_bitmap.bits,
780 			     BNXT_ULP_HDR_BIT_O_IPV4) ||
781 	    ULP_BITMAP_ISSET(params->hdr_bitmap.bits,
782 			     BNXT_ULP_HDR_BIT_O_IPV6) ||
783 	    ULP_BITMAP_ISSET(params->hdr_bitmap.bits,
784 			     BNXT_ULP_HDR_BIT_O_UDP) ||
785 	    ULP_BITMAP_ISSET(params->hdr_bitmap.bits,
786 			     BNXT_ULP_HDR_BIT_O_TCP)) {
787 		ULP_BITMAP_SET(params->hdr_bitmap.bits, BNXT_ULP_HDR_BIT_I_ETH);
788 		inner_flag = 1;
789 	} else {
790 		ULP_BITMAP_SET(params->hdr_bitmap.bits, BNXT_ULP_HDR_BIT_O_ETH);
791 		ULP_COMP_FLD_IDX_WR(params, BNXT_ULP_CF_IDX_TUN_OFF_DMAC_ID,
792 				    dmac_idx);
793 	}
794 	/* Update the field protocol hdr bitmap */
795 	ulp_rte_l2_proto_type_update(params, eth_type, inner_flag);
796 
797 	return BNXT_TF_RC_SUCCESS;
798 }
799 
800 /* Function to handle the parsing of RTE Flow item Vlan Header. */
801 int32_t
802 ulp_rte_vlan_hdr_handler(const struct rte_flow_item *item,
803 			 struct ulp_rte_parser_params *params)
804 {
805 	const struct rte_flow_item_vlan *vlan_spec = item->spec;
806 	const struct rte_flow_item_vlan *vlan_mask = item->mask;
807 	struct ulp_rte_hdr_bitmap	*hdr_bit;
808 	uint32_t idx = 0;
809 	uint16_t vlan_tag = 0, priority = 0;
810 	uint16_t vlan_tag_mask = 0, priority_mask = 0;
811 	uint32_t outer_vtag_num;
812 	uint32_t inner_vtag_num;
813 	uint16_t eth_type = 0;
814 	uint32_t inner_flag = 0;
815 	uint32_t size;
816 
817 	if (vlan_spec) {
818 		vlan_tag = ntohs(vlan_spec->tci);
819 		priority = htons(vlan_tag >> ULP_VLAN_PRIORITY_SHIFT);
820 		vlan_tag &= ULP_VLAN_TAG_MASK;
821 		vlan_tag = htons(vlan_tag);
822 		eth_type = vlan_spec->inner_type;
823 	}
824 
825 	if (vlan_mask) {
826 		vlan_tag_mask = ntohs(vlan_mask->tci);
827 		priority_mask = htons(vlan_tag_mask >> ULP_VLAN_PRIORITY_SHIFT);
828 		vlan_tag_mask &= 0xfff;
829 
830 		/*
831 		 * the storage for priority and vlan tag is 2 bytes
832 		 * The mask of priority which is 3 bits if it is all 1's
833 		 * then make the rest bits 13 bits as 1's
834 		 * so that it is matched as exact match.
835 		 */
836 		if (priority_mask == ULP_VLAN_PRIORITY_MASK)
837 			priority_mask |= ~ULP_VLAN_PRIORITY_MASK;
838 		if (vlan_tag_mask == ULP_VLAN_TAG_MASK)
839 			vlan_tag_mask |= ~ULP_VLAN_TAG_MASK;
840 		vlan_tag_mask = htons(vlan_tag_mask);
841 	}
842 
843 	if (ulp_rte_prsr_fld_size_validate(params, &idx,
844 					   BNXT_ULP_PROTO_HDR_S_VLAN_NUM)) {
845 		BNXT_TF_DBG(ERR, "Error parsing protocol header\n");
846 		return BNXT_TF_RC_ERROR;
847 	}
848 
849 	/*
850 	 * Copy the rte_flow_item for vlan into hdr_field using Vlan
851 	 * header fields
852 	 */
853 	size = sizeof(((struct rte_flow_item_vlan *)NULL)->tci);
854 	/*
855 	 * The priority field is ignored since OVS is setting it as
856 	 * wild card match and it is not supported. This is a work
857 	 * around and shall be addressed in the future.
858 	 */
859 	ulp_rte_prsr_fld_mask(params, &idx, size,
860 			      &priority,
861 			      (vlan_mask) ? &priority_mask : NULL,
862 			      ULP_PRSR_ACT_MASK_IGNORE);
863 
864 	ulp_rte_prsr_fld_mask(params, &idx, size,
865 			      &vlan_tag,
866 			      (vlan_mask) ? &vlan_tag_mask : NULL,
867 			      ULP_PRSR_ACT_DEFAULT);
868 
869 	size = sizeof(((struct rte_flow_item_vlan *)NULL)->inner_type);
870 	ulp_rte_prsr_fld_mask(params, &idx, size,
871 			      ulp_deference_struct(vlan_spec, inner_type),
872 			      ulp_deference_struct(vlan_mask, inner_type),
873 			      ULP_PRSR_ACT_MATCH_IGNORE);
874 
875 	/* Get the outer tag and inner tag counts */
876 	outer_vtag_num = ULP_COMP_FLD_IDX_RD(params,
877 					     BNXT_ULP_CF_IDX_O_VTAG_NUM);
878 	inner_vtag_num = ULP_COMP_FLD_IDX_RD(params,
879 					     BNXT_ULP_CF_IDX_I_VTAG_NUM);
880 
881 	/* Update the hdr_bitmap of the vlans */
882 	hdr_bit = &params->hdr_bitmap;
883 	if (ULP_BITMAP_ISSET(hdr_bit->bits, BNXT_ULP_HDR_BIT_O_ETH) &&
884 	    !ULP_BITMAP_ISSET(hdr_bit->bits, BNXT_ULP_HDR_BIT_I_ETH) &&
885 	    !outer_vtag_num) {
886 		/* Update the vlan tag num */
887 		outer_vtag_num++;
888 		ULP_COMP_FLD_IDX_WR(params, BNXT_ULP_CF_IDX_O_VTAG_NUM,
889 				    outer_vtag_num);
890 		ULP_COMP_FLD_IDX_WR(params, BNXT_ULP_CF_IDX_O_NO_VTAG, 0);
891 		ULP_COMP_FLD_IDX_WR(params, BNXT_ULP_CF_IDX_O_ONE_VTAG, 1);
892 		ULP_BITMAP_SET(params->hdr_bitmap.bits,
893 			       BNXT_ULP_HDR_BIT_OO_VLAN);
894 		if (vlan_mask && vlan_tag_mask)
895 			ULP_COMP_FLD_IDX_WR(params,
896 					    BNXT_ULP_CF_IDX_OO_VLAN_FB_VID, 1);
897 
898 	} else if (ULP_BITMAP_ISSET(hdr_bit->bits, BNXT_ULP_HDR_BIT_O_ETH) &&
899 		   !ULP_BITMAP_ISSET(hdr_bit->bits, BNXT_ULP_HDR_BIT_I_ETH) &&
900 		   outer_vtag_num == 1) {
901 		/* update the vlan tag num */
902 		outer_vtag_num++;
903 		ULP_COMP_FLD_IDX_WR(params, BNXT_ULP_CF_IDX_O_VTAG_NUM,
904 				    outer_vtag_num);
905 		ULP_COMP_FLD_IDX_WR(params, BNXT_ULP_CF_IDX_O_TWO_VTAGS, 1);
906 		ULP_COMP_FLD_IDX_WR(params, BNXT_ULP_CF_IDX_O_ONE_VTAG, 0);
907 		ULP_BITMAP_SET(params->hdr_bitmap.bits,
908 			       BNXT_ULP_HDR_BIT_OI_VLAN);
909 		if (vlan_mask && vlan_tag_mask)
910 			ULP_COMP_FLD_IDX_WR(params,
911 					    BNXT_ULP_CF_IDX_OI_VLAN_FB_VID, 1);
912 
913 	} else if (ULP_BITMAP_ISSET(hdr_bit->bits, BNXT_ULP_HDR_BIT_O_ETH) &&
914 		   ULP_BITMAP_ISSET(hdr_bit->bits, BNXT_ULP_HDR_BIT_I_ETH) &&
915 		   !inner_vtag_num) {
916 		/* update the vlan tag num */
917 		inner_vtag_num++;
918 		ULP_COMP_FLD_IDX_WR(params, BNXT_ULP_CF_IDX_I_VTAG_NUM,
919 				    inner_vtag_num);
920 		ULP_COMP_FLD_IDX_WR(params, BNXT_ULP_CF_IDX_I_NO_VTAG, 0);
921 		ULP_COMP_FLD_IDX_WR(params, BNXT_ULP_CF_IDX_I_ONE_VTAG, 1);
922 		ULP_BITMAP_SET(params->hdr_bitmap.bits,
923 			       BNXT_ULP_HDR_BIT_IO_VLAN);
924 		if (vlan_mask && vlan_tag_mask)
925 			ULP_COMP_FLD_IDX_WR(params,
926 					    BNXT_ULP_CF_IDX_IO_VLAN_FB_VID, 1);
927 		inner_flag = 1;
928 	} else if (ULP_BITMAP_ISSET(hdr_bit->bits, BNXT_ULP_HDR_BIT_O_ETH) &&
929 		   ULP_BITMAP_ISSET(hdr_bit->bits, BNXT_ULP_HDR_BIT_I_ETH) &&
930 		   inner_vtag_num == 1) {
931 		/* update the vlan tag num */
932 		inner_vtag_num++;
933 		ULP_COMP_FLD_IDX_WR(params, BNXT_ULP_CF_IDX_I_VTAG_NUM,
934 				    inner_vtag_num);
935 		ULP_COMP_FLD_IDX_WR(params, BNXT_ULP_CF_IDX_I_TWO_VTAGS, 1);
936 		ULP_COMP_FLD_IDX_WR(params, BNXT_ULP_CF_IDX_I_ONE_VTAG, 0);
937 		ULP_BITMAP_SET(params->hdr_bitmap.bits,
938 			       BNXT_ULP_HDR_BIT_II_VLAN);
939 		if (vlan_mask && vlan_tag_mask)
940 			ULP_COMP_FLD_IDX_WR(params,
941 					    BNXT_ULP_CF_IDX_II_VLAN_FB_VID, 1);
942 		inner_flag = 1;
943 	} else {
944 		BNXT_TF_DBG(ERR, "Error Parsing:Vlan hdr found without eth\n");
945 		return BNXT_TF_RC_ERROR;
946 	}
947 	/* Update the field protocol hdr bitmap */
948 	ulp_rte_l2_proto_type_update(params, eth_type, inner_flag);
949 	return BNXT_TF_RC_SUCCESS;
950 }
951 
952 /* Function to handle the update of proto header based on field values */
953 static void
954 ulp_rte_l3_proto_type_update(struct ulp_rte_parser_params *param,
955 			     uint8_t proto, uint32_t in_flag)
956 {
957 	if (proto == IPPROTO_UDP) {
958 		if (in_flag) {
959 			ULP_BITMAP_SET(param->hdr_fp_bit.bits,
960 				       BNXT_ULP_HDR_BIT_I_UDP);
961 			ULP_COMP_FLD_IDX_WR(param, BNXT_ULP_CF_IDX_I_L4, 1);
962 		} else {
963 			ULP_BITMAP_SET(param->hdr_fp_bit.bits,
964 				       BNXT_ULP_HDR_BIT_O_UDP);
965 			ULP_COMP_FLD_IDX_WR(param, BNXT_ULP_CF_IDX_O_L4, 1);
966 		}
967 	} else if (proto == IPPROTO_TCP) {
968 		if (in_flag) {
969 			ULP_BITMAP_SET(param->hdr_fp_bit.bits,
970 				       BNXT_ULP_HDR_BIT_I_TCP);
971 			ULP_COMP_FLD_IDX_WR(param, BNXT_ULP_CF_IDX_I_L4, 1);
972 		} else {
973 			ULP_BITMAP_SET(param->hdr_fp_bit.bits,
974 				       BNXT_ULP_HDR_BIT_O_TCP);
975 			ULP_COMP_FLD_IDX_WR(param, BNXT_ULP_CF_IDX_O_L4, 1);
976 		}
977 	} else if (proto == IPPROTO_GRE) {
978 		ULP_BITMAP_SET(param->hdr_bitmap.bits, BNXT_ULP_HDR_BIT_T_GRE);
979 	} else if (proto == IPPROTO_ICMP) {
980 		if (ULP_COMP_FLD_IDX_RD(param, BNXT_ULP_CF_IDX_L3_TUN))
981 			ULP_BITMAP_SET(param->hdr_bitmap.bits,
982 				       BNXT_ULP_HDR_BIT_I_ICMP);
983 		else
984 			ULP_BITMAP_SET(param->hdr_bitmap.bits,
985 				       BNXT_ULP_HDR_BIT_O_ICMP);
986 	}
987 	if (proto) {
988 		if (in_flag) {
989 			ULP_COMP_FLD_IDX_WR(param,
990 					    BNXT_ULP_CF_IDX_I_L3_FB_PROTO_ID,
991 					    1);
992 			ULP_COMP_FLD_IDX_WR(param,
993 					    BNXT_ULP_CF_IDX_I_L3_PROTO_ID,
994 					    proto);
995 		} else {
996 			ULP_COMP_FLD_IDX_WR(param,
997 					    BNXT_ULP_CF_IDX_O_L3_FB_PROTO_ID,
998 					    1);
999 			ULP_COMP_FLD_IDX_WR(param,
1000 					    BNXT_ULP_CF_IDX_O_L3_PROTO_ID,
1001 					    proto);
1002 		}
1003 	}
1004 }
1005 
1006 /* Function to handle the parsing of RTE Flow item IPV4 Header. */
1007 int32_t
1008 ulp_rte_ipv4_hdr_handler(const struct rte_flow_item *item,
1009 			 struct ulp_rte_parser_params *params)
1010 {
1011 	const struct rte_flow_item_ipv4 *ipv4_spec = item->spec;
1012 	const struct rte_flow_item_ipv4 *ipv4_mask = item->mask;
1013 	struct ulp_rte_hdr_bitmap *hdr_bitmap = &params->hdr_bitmap;
1014 	uint32_t idx = 0, dip_idx = 0;
1015 	uint32_t size;
1016 	uint8_t proto = 0;
1017 	uint32_t inner_flag = 0;
1018 	uint32_t cnt;
1019 
1020 	/* validate there are no 3rd L3 header */
1021 	cnt = ULP_COMP_FLD_IDX_RD(params, BNXT_ULP_CF_IDX_L3_HDR_CNT);
1022 	if (cnt == 2) {
1023 		BNXT_TF_DBG(ERR, "Parse Err:Third L3 header not supported\n");
1024 		return BNXT_TF_RC_ERROR;
1025 	}
1026 
1027 	if (ulp_rte_prsr_fld_size_validate(params, &idx,
1028 					   BNXT_ULP_PROTO_HDR_IPV4_NUM)) {
1029 		BNXT_TF_DBG(ERR, "Error parsing protocol header\n");
1030 		return BNXT_TF_RC_ERROR;
1031 	}
1032 
1033 	/*
1034 	 * Copy the rte_flow_item for ipv4 into hdr_field using ipv4
1035 	 * header fields
1036 	 */
1037 	size = sizeof(((struct rte_flow_item_ipv4 *)NULL)->hdr.version_ihl);
1038 	ulp_rte_prsr_fld_mask(params, &idx, size,
1039 			      ulp_deference_struct(ipv4_spec, hdr.version_ihl),
1040 			      ulp_deference_struct(ipv4_mask, hdr.version_ihl),
1041 			      ULP_PRSR_ACT_DEFAULT);
1042 
1043 	/*
1044 	 * The tos field is ignored since OVS is setting it as wild card
1045 	 * match and it is not supported. This is a work around and
1046 	 * shall be addressed in the future.
1047 	 */
1048 	size = sizeof(((struct rte_flow_item_ipv4 *)NULL)->hdr.type_of_service);
1049 	ulp_rte_prsr_fld_mask(params, &idx, size,
1050 			      ulp_deference_struct(ipv4_spec,
1051 						   hdr.type_of_service),
1052 			      ulp_deference_struct(ipv4_mask,
1053 						   hdr.type_of_service),
1054 			      ULP_PRSR_ACT_MASK_IGNORE);
1055 
1056 	size = sizeof(((struct rte_flow_item_ipv4 *)NULL)->hdr.total_length);
1057 	ulp_rte_prsr_fld_mask(params, &idx, size,
1058 			      ulp_deference_struct(ipv4_spec, hdr.total_length),
1059 			      ulp_deference_struct(ipv4_mask, hdr.total_length),
1060 			      ULP_PRSR_ACT_DEFAULT);
1061 
1062 	size = sizeof(((struct rte_flow_item_ipv4 *)NULL)->hdr.packet_id);
1063 	ulp_rte_prsr_fld_mask(params, &idx, size,
1064 			      ulp_deference_struct(ipv4_spec, hdr.packet_id),
1065 			      ulp_deference_struct(ipv4_mask, hdr.packet_id),
1066 			      ULP_PRSR_ACT_DEFAULT);
1067 
1068 	size = sizeof(((struct rte_flow_item_ipv4 *)NULL)->hdr.fragment_offset);
1069 	ulp_rte_prsr_fld_mask(params, &idx, size,
1070 			      ulp_deference_struct(ipv4_spec,
1071 						   hdr.fragment_offset),
1072 			      ulp_deference_struct(ipv4_mask,
1073 						   hdr.fragment_offset),
1074 			      ULP_PRSR_ACT_MASK_IGNORE);
1075 
1076 	size = sizeof(((struct rte_flow_item_ipv4 *)NULL)->hdr.time_to_live);
1077 	ulp_rte_prsr_fld_mask(params, &idx, size,
1078 			      ulp_deference_struct(ipv4_spec, hdr.time_to_live),
1079 			      ulp_deference_struct(ipv4_mask, hdr.time_to_live),
1080 			      ULP_PRSR_ACT_DEFAULT);
1081 
1082 	/* Ignore proto for matching templates */
1083 	size = sizeof(((struct rte_flow_item_ipv4 *)NULL)->hdr.next_proto_id);
1084 	ulp_rte_prsr_fld_mask(params, &idx, size,
1085 			      ulp_deference_struct(ipv4_spec,
1086 						   hdr.next_proto_id),
1087 			      ulp_deference_struct(ipv4_mask,
1088 						   hdr.next_proto_id),
1089 			      ULP_PRSR_ACT_MATCH_IGNORE);
1090 	if (ipv4_spec)
1091 		proto = ipv4_spec->hdr.next_proto_id;
1092 
1093 	size = sizeof(((struct rte_flow_item_ipv4 *)NULL)->hdr.hdr_checksum);
1094 	ulp_rte_prsr_fld_mask(params, &idx, size,
1095 			      ulp_deference_struct(ipv4_spec, hdr.hdr_checksum),
1096 			      ulp_deference_struct(ipv4_mask, hdr.hdr_checksum),
1097 			      ULP_PRSR_ACT_DEFAULT);
1098 
1099 	size = sizeof(((struct rte_flow_item_ipv4 *)NULL)->hdr.src_addr);
1100 	ulp_rte_prsr_fld_mask(params, &idx, size,
1101 			      ulp_deference_struct(ipv4_spec, hdr.src_addr),
1102 			      ulp_deference_struct(ipv4_mask, hdr.src_addr),
1103 			      ULP_PRSR_ACT_DEFAULT);
1104 
1105 	dip_idx = idx;
1106 	size = sizeof(((struct rte_flow_item_ipv4 *)NULL)->hdr.dst_addr);
1107 	ulp_rte_prsr_fld_mask(params, &idx, size,
1108 			      ulp_deference_struct(ipv4_spec, hdr.dst_addr),
1109 			      ulp_deference_struct(ipv4_mask, hdr.dst_addr),
1110 			      ULP_PRSR_ACT_DEFAULT);
1111 
1112 	/* Set the ipv4 header bitmap and computed l3 header bitmaps */
1113 	if (ULP_BITMAP_ISSET(hdr_bitmap->bits, BNXT_ULP_HDR_BIT_O_IPV4) ||
1114 	    ULP_BITMAP_ISSET(hdr_bitmap->bits, BNXT_ULP_HDR_BIT_O_IPV6) ||
1115 	    ULP_COMP_FLD_IDX_RD(params, BNXT_ULP_CF_IDX_L3_TUN)) {
1116 		ULP_BITMAP_SET(hdr_bitmap->bits, BNXT_ULP_HDR_BIT_I_IPV4);
1117 		ULP_COMP_FLD_IDX_WR(params, BNXT_ULP_CF_IDX_I_L3, 1);
1118 		inner_flag = 1;
1119 	} else {
1120 		ULP_BITMAP_SET(hdr_bitmap->bits, BNXT_ULP_HDR_BIT_O_IPV4);
1121 		ULP_COMP_FLD_IDX_WR(params, BNXT_ULP_CF_IDX_O_L3, 1);
1122 		/* Update the tunnel offload dest ip offset */
1123 		ULP_COMP_FLD_IDX_WR(params, BNXT_ULP_CF_IDX_TUN_OFF_DIP_ID,
1124 				    dip_idx);
1125 	}
1126 
1127 	/* Some of the PMD applications may set the protocol field
1128 	 * in the IPv4 spec but don't set the mask. So, consider
1129 	 * the mask in the proto value calculation.
1130 	 */
1131 	if (ipv4_mask)
1132 		proto &= ipv4_mask->hdr.next_proto_id;
1133 
1134 	/* Update the field protocol hdr bitmap */
1135 	ulp_rte_l3_proto_type_update(params, proto, inner_flag);
1136 	ULP_COMP_FLD_IDX_WR(params, BNXT_ULP_CF_IDX_L3_HDR_CNT, ++cnt);
1137 	return BNXT_TF_RC_SUCCESS;
1138 }
1139 
1140 /* Function to handle the parsing of RTE Flow item IPV6 Header */
1141 int32_t
1142 ulp_rte_ipv6_hdr_handler(const struct rte_flow_item *item,
1143 			 struct ulp_rte_parser_params *params)
1144 {
1145 	const struct rte_flow_item_ipv6	*ipv6_spec = item->spec;
1146 	const struct rte_flow_item_ipv6	*ipv6_mask = item->mask;
1147 	struct ulp_rte_hdr_bitmap *hdr_bitmap = &params->hdr_bitmap;
1148 	uint32_t idx = 0, dip_idx = 0;
1149 	uint32_t size;
1150 	uint32_t ver_spec = 0, ver_mask = 0;
1151 	uint32_t tc_spec = 0, tc_mask = 0;
1152 	uint32_t lab_spec = 0, lab_mask = 0;
1153 	uint8_t proto = 0;
1154 	uint32_t inner_flag = 0;
1155 	uint32_t cnt;
1156 
1157 	/* validate there are no 3rd L3 header */
1158 	cnt = ULP_COMP_FLD_IDX_RD(params, BNXT_ULP_CF_IDX_L3_HDR_CNT);
1159 	if (cnt == 2) {
1160 		BNXT_TF_DBG(ERR, "Parse Err:Third L3 header not supported\n");
1161 		return BNXT_TF_RC_ERROR;
1162 	}
1163 
1164 	if (ulp_rte_prsr_fld_size_validate(params, &idx,
1165 					   BNXT_ULP_PROTO_HDR_IPV6_NUM)) {
1166 		BNXT_TF_DBG(ERR, "Error parsing protocol header\n");
1167 		return BNXT_TF_RC_ERROR;
1168 	}
1169 
1170 	/*
1171 	 * Copy the rte_flow_item for ipv6 into hdr_field using ipv6
1172 	 * header fields
1173 	 */
1174 	if (ipv6_spec) {
1175 		ver_spec = BNXT_ULP_GET_IPV6_VER(ipv6_spec->hdr.vtc_flow);
1176 		tc_spec = BNXT_ULP_GET_IPV6_TC(ipv6_spec->hdr.vtc_flow);
1177 		lab_spec = BNXT_ULP_GET_IPV6_FLOWLABEL(ipv6_spec->hdr.vtc_flow);
1178 		proto = ipv6_spec->hdr.proto;
1179 	}
1180 
1181 	if (ipv6_mask) {
1182 		ver_mask = BNXT_ULP_GET_IPV6_VER(ipv6_mask->hdr.vtc_flow);
1183 		tc_mask = BNXT_ULP_GET_IPV6_TC(ipv6_mask->hdr.vtc_flow);
1184 		lab_mask = BNXT_ULP_GET_IPV6_FLOWLABEL(ipv6_mask->hdr.vtc_flow);
1185 
1186 		/* Some of the PMD applications may set the protocol field
1187 		 * in the IPv6 spec but don't set the mask. So, consider
1188 		 * the mask in proto value calculation.
1189 		 */
1190 		proto &= ipv6_mask->hdr.proto;
1191 	}
1192 
1193 	size = sizeof(((struct rte_flow_item_ipv6 *)NULL)->hdr.vtc_flow);
1194 	ulp_rte_prsr_fld_mask(params, &idx, size, &ver_spec, &ver_mask,
1195 			      ULP_PRSR_ACT_DEFAULT);
1196 	/*
1197 	 * The TC and flow label field are ignored since OVS is
1198 	 * setting it for match and it is not supported.
1199 	 * This is a work around and
1200 	 * shall be addressed in the future.
1201 	 */
1202 	ulp_rte_prsr_fld_mask(params, &idx, size, &tc_spec, &tc_mask,
1203 			      ULP_PRSR_ACT_MASK_IGNORE);
1204 	ulp_rte_prsr_fld_mask(params, &idx, size, &lab_spec, &lab_mask,
1205 			      ULP_PRSR_ACT_MASK_IGNORE);
1206 
1207 	size = sizeof(((struct rte_flow_item_ipv6 *)NULL)->hdr.payload_len);
1208 	ulp_rte_prsr_fld_mask(params, &idx, size,
1209 			      ulp_deference_struct(ipv6_spec, hdr.payload_len),
1210 			      ulp_deference_struct(ipv6_mask, hdr.payload_len),
1211 			      ULP_PRSR_ACT_DEFAULT);
1212 
1213 	/* Ignore proto for template matching */
1214 	size = sizeof(((struct rte_flow_item_ipv6 *)NULL)->hdr.proto);
1215 	ulp_rte_prsr_fld_mask(params, &idx, size,
1216 			      ulp_deference_struct(ipv6_spec, hdr.proto),
1217 			      ulp_deference_struct(ipv6_mask, hdr.proto),
1218 			      ULP_PRSR_ACT_MATCH_IGNORE);
1219 
1220 	size = sizeof(((struct rte_flow_item_ipv6 *)NULL)->hdr.hop_limits);
1221 	ulp_rte_prsr_fld_mask(params, &idx, size,
1222 			      ulp_deference_struct(ipv6_spec, hdr.hop_limits),
1223 			      ulp_deference_struct(ipv6_mask, hdr.hop_limits),
1224 			      ULP_PRSR_ACT_DEFAULT);
1225 
1226 	size = sizeof(((struct rte_flow_item_ipv6 *)NULL)->hdr.src_addr);
1227 	ulp_rte_prsr_fld_mask(params, &idx, size,
1228 			      ulp_deference_struct(ipv6_spec, hdr.src_addr),
1229 			      ulp_deference_struct(ipv6_mask, hdr.src_addr),
1230 			      ULP_PRSR_ACT_DEFAULT);
1231 
1232 	dip_idx =  idx;
1233 	size = sizeof(((struct rte_flow_item_ipv6 *)NULL)->hdr.dst_addr);
1234 	ulp_rte_prsr_fld_mask(params, &idx, size,
1235 			      ulp_deference_struct(ipv6_spec, hdr.dst_addr),
1236 			      ulp_deference_struct(ipv6_mask, hdr.dst_addr),
1237 			      ULP_PRSR_ACT_DEFAULT);
1238 
1239 	/* Set the ipv6 header bitmap and computed l3 header bitmaps */
1240 	if (ULP_BITMAP_ISSET(hdr_bitmap->bits, BNXT_ULP_HDR_BIT_O_IPV4) ||
1241 	    ULP_BITMAP_ISSET(hdr_bitmap->bits, BNXT_ULP_HDR_BIT_O_IPV6) ||
1242 	    ULP_COMP_FLD_IDX_RD(params, BNXT_ULP_CF_IDX_L3_TUN)) {
1243 		ULP_BITMAP_SET(hdr_bitmap->bits, BNXT_ULP_HDR_BIT_I_IPV6);
1244 		ULP_COMP_FLD_IDX_WR(params, BNXT_ULP_CF_IDX_I_L3, 1);
1245 		inner_flag = 1;
1246 	} else {
1247 		ULP_BITMAP_SET(hdr_bitmap->bits, BNXT_ULP_HDR_BIT_O_IPV6);
1248 		ULP_COMP_FLD_IDX_WR(params, BNXT_ULP_CF_IDX_O_L3, 1);
1249 		/* Update the tunnel offload dest ip offset */
1250 		ULP_COMP_FLD_IDX_WR(params, BNXT_ULP_CF_IDX_TUN_OFF_DIP_ID,
1251 				    dip_idx);
1252 	}
1253 
1254 	/* Update the field protocol hdr bitmap */
1255 	ulp_rte_l3_proto_type_update(params, proto, inner_flag);
1256 	ULP_COMP_FLD_IDX_WR(params, BNXT_ULP_CF_IDX_L3_HDR_CNT, ++cnt);
1257 
1258 	return BNXT_TF_RC_SUCCESS;
1259 }
1260 
1261 /* Function to handle the update of proto header based on field values */
1262 static void
1263 ulp_rte_l4_proto_type_update(struct ulp_rte_parser_params *params,
1264 			     uint16_t src_port, uint16_t src_mask,
1265 			     uint16_t dst_port, uint16_t dst_mask,
1266 			     enum bnxt_ulp_hdr_bit hdr_bit)
1267 {
1268 	switch (hdr_bit) {
1269 	case BNXT_ULP_HDR_BIT_I_UDP:
1270 	case BNXT_ULP_HDR_BIT_I_TCP:
1271 		ULP_BITMAP_SET(params->hdr_bitmap.bits, hdr_bit);
1272 		ULP_COMP_FLD_IDX_WR(params, BNXT_ULP_CF_IDX_I_L4, 1);
1273 		ULP_COMP_FLD_IDX_WR(params, BNXT_ULP_CF_IDX_I_L4_SRC_PORT,
1274 				    (uint64_t)rte_be_to_cpu_16(src_port));
1275 		ULP_COMP_FLD_IDX_WR(params, BNXT_ULP_CF_IDX_I_L4_DST_PORT,
1276 				    (uint64_t)rte_be_to_cpu_16(dst_port));
1277 		ULP_COMP_FLD_IDX_WR(params, BNXT_ULP_CF_IDX_I_L4_SRC_PORT_MASK,
1278 				    (uint64_t)rte_be_to_cpu_16(src_mask));
1279 		ULP_COMP_FLD_IDX_WR(params, BNXT_ULP_CF_IDX_I_L4_DST_PORT_MASK,
1280 				    (uint64_t)rte_be_to_cpu_16(dst_mask));
1281 		ULP_COMP_FLD_IDX_WR(params, BNXT_ULP_CF_IDX_I_L3_FB_PROTO_ID,
1282 				    1);
1283 		ULP_COMP_FLD_IDX_WR(params, BNXT_ULP_CF_IDX_I_L4_FB_SRC_PORT,
1284 				    !!(src_port & src_mask));
1285 		ULP_COMP_FLD_IDX_WR(params, BNXT_ULP_CF_IDX_I_L4_FB_DST_PORT,
1286 				    !!(dst_port & dst_mask));
1287 		ULP_COMP_FLD_IDX_WR(params, BNXT_ULP_CF_IDX_I_L3_PROTO_ID,
1288 				    (hdr_bit == BNXT_ULP_HDR_BIT_I_UDP) ?
1289 				    IPPROTO_UDP : IPPROTO_TCP);
1290 		break;
1291 	case BNXT_ULP_HDR_BIT_O_UDP:
1292 	case BNXT_ULP_HDR_BIT_O_TCP:
1293 		ULP_BITMAP_SET(params->hdr_bitmap.bits, hdr_bit);
1294 		ULP_COMP_FLD_IDX_WR(params, BNXT_ULP_CF_IDX_O_L4, 1);
1295 		ULP_COMP_FLD_IDX_WR(params, BNXT_ULP_CF_IDX_O_L4_SRC_PORT,
1296 				    (uint64_t)rte_be_to_cpu_16(src_port));
1297 		ULP_COMP_FLD_IDX_WR(params, BNXT_ULP_CF_IDX_O_L4_DST_PORT,
1298 				    (uint64_t)rte_be_to_cpu_16(dst_port));
1299 		ULP_COMP_FLD_IDX_WR(params, BNXT_ULP_CF_IDX_O_L4_SRC_PORT_MASK,
1300 				    (uint64_t)rte_be_to_cpu_16(src_mask));
1301 		ULP_COMP_FLD_IDX_WR(params, BNXT_ULP_CF_IDX_O_L4_DST_PORT_MASK,
1302 				    (uint64_t)rte_be_to_cpu_16(dst_mask));
1303 		ULP_COMP_FLD_IDX_WR(params, BNXT_ULP_CF_IDX_O_L3_FB_PROTO_ID,
1304 				    1);
1305 		ULP_COMP_FLD_IDX_WR(params, BNXT_ULP_CF_IDX_O_L4_FB_SRC_PORT,
1306 				    !!(src_port & src_mask));
1307 		ULP_COMP_FLD_IDX_WR(params, BNXT_ULP_CF_IDX_O_L4_FB_DST_PORT,
1308 				    !!(dst_port & dst_mask));
1309 		ULP_COMP_FLD_IDX_WR(params, BNXT_ULP_CF_IDX_O_L3_PROTO_ID,
1310 				    (hdr_bit == BNXT_ULP_HDR_BIT_O_UDP) ?
1311 				    IPPROTO_UDP : IPPROTO_TCP);
1312 		break;
1313 	default:
1314 		break;
1315 	}
1316 
1317 	if (hdr_bit == BNXT_ULP_HDR_BIT_O_UDP && dst_port ==
1318 	    tfp_cpu_to_be_16(ULP_UDP_PORT_VXLAN)) {
1319 		ULP_BITMAP_SET(params->hdr_fp_bit.bits,
1320 			       BNXT_ULP_HDR_BIT_T_VXLAN);
1321 		ULP_COMP_FLD_IDX_WR(params, BNXT_ULP_CF_IDX_L3_TUN, 1);
1322 	}
1323 }
1324 
1325 /* Function to handle the parsing of RTE Flow item UDP Header. */
1326 int32_t
1327 ulp_rte_udp_hdr_handler(const struct rte_flow_item *item,
1328 			struct ulp_rte_parser_params *params)
1329 {
1330 	const struct rte_flow_item_udp *udp_spec = item->spec;
1331 	const struct rte_flow_item_udp *udp_mask = item->mask;
1332 	struct ulp_rte_hdr_bitmap *hdr_bitmap = &params->hdr_bitmap;
1333 	uint32_t idx = 0;
1334 	uint32_t size;
1335 	uint16_t dport = 0, sport = 0;
1336 	uint16_t dport_mask = 0, sport_mask = 0;
1337 	uint32_t cnt;
1338 	enum bnxt_ulp_hdr_bit out_l4 = BNXT_ULP_HDR_BIT_O_UDP;
1339 
1340 	cnt = ULP_COMP_FLD_IDX_RD(params, BNXT_ULP_CF_IDX_L4_HDR_CNT);
1341 	if (cnt == 2) {
1342 		BNXT_TF_DBG(ERR, "Parse Err:Third L4 header not supported\n");
1343 		return BNXT_TF_RC_ERROR;
1344 	}
1345 
1346 	if (udp_spec) {
1347 		sport = udp_spec->hdr.src_port;
1348 		dport = udp_spec->hdr.dst_port;
1349 	}
1350 	if (udp_mask) {
1351 		sport_mask = udp_mask->hdr.src_port;
1352 		dport_mask = udp_mask->hdr.dst_port;
1353 	}
1354 
1355 	if (ulp_rte_prsr_fld_size_validate(params, &idx,
1356 					   BNXT_ULP_PROTO_HDR_UDP_NUM)) {
1357 		BNXT_TF_DBG(ERR, "Error parsing protocol header\n");
1358 		return BNXT_TF_RC_ERROR;
1359 	}
1360 
1361 	/*
1362 	 * Copy the rte_flow_item for ipv4 into hdr_field using ipv4
1363 	 * header fields
1364 	 */
1365 	size = sizeof(((struct rte_flow_item_udp *)NULL)->hdr.src_port);
1366 	ulp_rte_prsr_fld_mask(params, &idx, size,
1367 			      ulp_deference_struct(udp_spec, hdr.src_port),
1368 			      ulp_deference_struct(udp_mask, hdr.src_port),
1369 			      ULP_PRSR_ACT_DEFAULT);
1370 
1371 	size = sizeof(((struct rte_flow_item_udp *)NULL)->hdr.dst_port);
1372 	ulp_rte_prsr_fld_mask(params, &idx, size,
1373 			      ulp_deference_struct(udp_spec, hdr.dst_port),
1374 			      ulp_deference_struct(udp_mask, hdr.dst_port),
1375 			      ULP_PRSR_ACT_DEFAULT);
1376 
1377 	size = sizeof(((struct rte_flow_item_udp *)NULL)->hdr.dgram_len);
1378 	ulp_rte_prsr_fld_mask(params, &idx, size,
1379 			      ulp_deference_struct(udp_spec, hdr.dgram_len),
1380 			      ulp_deference_struct(udp_mask, hdr.dgram_len),
1381 			      ULP_PRSR_ACT_DEFAULT);
1382 
1383 	size = sizeof(((struct rte_flow_item_udp *)NULL)->hdr.dgram_cksum);
1384 	ulp_rte_prsr_fld_mask(params, &idx, size,
1385 			      ulp_deference_struct(udp_spec, hdr.dgram_cksum),
1386 			      ulp_deference_struct(udp_mask, hdr.dgram_cksum),
1387 			      ULP_PRSR_ACT_DEFAULT);
1388 
1389 	/* Set the udp header bitmap and computed l4 header bitmaps */
1390 	if (ULP_BITMAP_ISSET(hdr_bitmap->bits, BNXT_ULP_HDR_BIT_O_UDP) ||
1391 	    ULP_BITMAP_ISSET(hdr_bitmap->bits, BNXT_ULP_HDR_BIT_O_TCP))
1392 		out_l4 = BNXT_ULP_HDR_BIT_I_UDP;
1393 
1394 	ulp_rte_l4_proto_type_update(params, sport, sport_mask, dport,
1395 				     dport_mask, out_l4);
1396 	ULP_COMP_FLD_IDX_WR(params, BNXT_ULP_CF_IDX_L4_HDR_CNT, ++cnt);
1397 	return BNXT_TF_RC_SUCCESS;
1398 }
1399 
1400 /* Function to handle the parsing of RTE Flow item TCP Header. */
1401 int32_t
1402 ulp_rte_tcp_hdr_handler(const struct rte_flow_item *item,
1403 			struct ulp_rte_parser_params *params)
1404 {
1405 	const struct rte_flow_item_tcp *tcp_spec = item->spec;
1406 	const struct rte_flow_item_tcp *tcp_mask = item->mask;
1407 	struct ulp_rte_hdr_bitmap *hdr_bitmap = &params->hdr_bitmap;
1408 	uint32_t idx = 0;
1409 	uint16_t dport = 0, sport = 0;
1410 	uint16_t dport_mask = 0, sport_mask = 0;
1411 	uint32_t size;
1412 	uint32_t cnt;
1413 	enum bnxt_ulp_hdr_bit out_l4 = BNXT_ULP_HDR_BIT_O_TCP;
1414 
1415 	cnt = ULP_COMP_FLD_IDX_RD(params, BNXT_ULP_CF_IDX_L4_HDR_CNT);
1416 	if (cnt == 2) {
1417 		BNXT_TF_DBG(ERR, "Parse Err:Third L4 header not supported\n");
1418 		return BNXT_TF_RC_ERROR;
1419 	}
1420 
1421 	if (tcp_spec) {
1422 		sport = tcp_spec->hdr.src_port;
1423 		dport = tcp_spec->hdr.dst_port;
1424 	}
1425 	if (tcp_mask) {
1426 		sport_mask = tcp_mask->hdr.src_port;
1427 		dport_mask = tcp_mask->hdr.dst_port;
1428 	}
1429 
1430 	if (ulp_rte_prsr_fld_size_validate(params, &idx,
1431 					   BNXT_ULP_PROTO_HDR_TCP_NUM)) {
1432 		BNXT_TF_DBG(ERR, "Error parsing protocol header\n");
1433 		return BNXT_TF_RC_ERROR;
1434 	}
1435 
1436 	/*
1437 	 * Copy the rte_flow_item for ipv4 into hdr_field using ipv4
1438 	 * header fields
1439 	 */
1440 	size = sizeof(((struct rte_flow_item_tcp *)NULL)->hdr.src_port);
1441 	ulp_rte_prsr_fld_mask(params, &idx, size,
1442 			      ulp_deference_struct(tcp_spec, hdr.src_port),
1443 			      ulp_deference_struct(tcp_mask, hdr.src_port),
1444 			      ULP_PRSR_ACT_DEFAULT);
1445 
1446 	size = sizeof(((struct rte_flow_item_tcp *)NULL)->hdr.dst_port);
1447 	ulp_rte_prsr_fld_mask(params, &idx, size,
1448 			      ulp_deference_struct(tcp_spec, hdr.dst_port),
1449 			      ulp_deference_struct(tcp_mask, hdr.dst_port),
1450 			      ULP_PRSR_ACT_DEFAULT);
1451 
1452 	size = sizeof(((struct rte_flow_item_tcp *)NULL)->hdr.sent_seq);
1453 	ulp_rte_prsr_fld_mask(params, &idx, size,
1454 			      ulp_deference_struct(tcp_spec, hdr.sent_seq),
1455 			      ulp_deference_struct(tcp_mask, hdr.sent_seq),
1456 			      ULP_PRSR_ACT_DEFAULT);
1457 
1458 	size = sizeof(((struct rte_flow_item_tcp *)NULL)->hdr.recv_ack);
1459 	ulp_rte_prsr_fld_mask(params, &idx, size,
1460 			      ulp_deference_struct(tcp_spec, hdr.recv_ack),
1461 			      ulp_deference_struct(tcp_mask, hdr.recv_ack),
1462 			      ULP_PRSR_ACT_DEFAULT);
1463 
1464 	size = sizeof(((struct rte_flow_item_tcp *)NULL)->hdr.data_off);
1465 	ulp_rte_prsr_fld_mask(params, &idx, size,
1466 			      ulp_deference_struct(tcp_spec, hdr.data_off),
1467 			      ulp_deference_struct(tcp_mask, hdr.data_off),
1468 			      ULP_PRSR_ACT_DEFAULT);
1469 
1470 	size = sizeof(((struct rte_flow_item_tcp *)NULL)->hdr.tcp_flags);
1471 	ulp_rte_prsr_fld_mask(params, &idx, size,
1472 			      ulp_deference_struct(tcp_spec, hdr.tcp_flags),
1473 			      ulp_deference_struct(tcp_mask, hdr.tcp_flags),
1474 			      ULP_PRSR_ACT_DEFAULT);
1475 
1476 	size = sizeof(((struct rte_flow_item_tcp *)NULL)->hdr.rx_win);
1477 	ulp_rte_prsr_fld_mask(params, &idx, size,
1478 			      ulp_deference_struct(tcp_spec, hdr.rx_win),
1479 			      ulp_deference_struct(tcp_mask, hdr.rx_win),
1480 			      ULP_PRSR_ACT_DEFAULT);
1481 
1482 	size = sizeof(((struct rte_flow_item_tcp *)NULL)->hdr.cksum);
1483 	ulp_rte_prsr_fld_mask(params, &idx, size,
1484 			      ulp_deference_struct(tcp_spec, hdr.cksum),
1485 			      ulp_deference_struct(tcp_mask, hdr.cksum),
1486 			      ULP_PRSR_ACT_DEFAULT);
1487 
1488 	size = sizeof(((struct rte_flow_item_tcp *)NULL)->hdr.tcp_urp);
1489 	ulp_rte_prsr_fld_mask(params, &idx, size,
1490 			      ulp_deference_struct(tcp_spec, hdr.tcp_urp),
1491 			      ulp_deference_struct(tcp_mask, hdr.tcp_urp),
1492 			      ULP_PRSR_ACT_DEFAULT);
1493 
1494 	/* Set the udp header bitmap and computed l4 header bitmaps */
1495 	if (ULP_BITMAP_ISSET(hdr_bitmap->bits, BNXT_ULP_HDR_BIT_O_UDP) ||
1496 	    ULP_BITMAP_ISSET(hdr_bitmap->bits, BNXT_ULP_HDR_BIT_O_TCP))
1497 		out_l4 = BNXT_ULP_HDR_BIT_I_TCP;
1498 
1499 	ulp_rte_l4_proto_type_update(params, sport, sport_mask, dport,
1500 				     dport_mask, out_l4);
1501 	ULP_COMP_FLD_IDX_WR(params, BNXT_ULP_CF_IDX_L4_HDR_CNT, ++cnt);
1502 	return BNXT_TF_RC_SUCCESS;
1503 }
1504 
1505 /* Function to handle the parsing of RTE Flow item Vxlan Header. */
1506 int32_t
1507 ulp_rte_vxlan_hdr_handler(const struct rte_flow_item *item,
1508 			  struct ulp_rte_parser_params *params)
1509 {
1510 	const struct rte_flow_item_vxlan *vxlan_spec = item->spec;
1511 	const struct rte_flow_item_vxlan *vxlan_mask = item->mask;
1512 	struct ulp_rte_hdr_bitmap *hdr_bitmap = &params->hdr_bitmap;
1513 	uint32_t idx = 0;
1514 	uint32_t size;
1515 
1516 	if (ulp_rte_prsr_fld_size_validate(params, &idx,
1517 					   BNXT_ULP_PROTO_HDR_VXLAN_NUM)) {
1518 		BNXT_TF_DBG(ERR, "Error parsing protocol header\n");
1519 		return BNXT_TF_RC_ERROR;
1520 	}
1521 
1522 	/*
1523 	 * Copy the rte_flow_item for vxlan into hdr_field using vxlan
1524 	 * header fields
1525 	 */
1526 	size = sizeof(((struct rte_flow_item_vxlan *)NULL)->flags);
1527 	ulp_rte_prsr_fld_mask(params, &idx, size,
1528 			      ulp_deference_struct(vxlan_spec, flags),
1529 			      ulp_deference_struct(vxlan_mask, flags),
1530 			      ULP_PRSR_ACT_DEFAULT);
1531 
1532 	size = sizeof(((struct rte_flow_item_vxlan *)NULL)->rsvd0);
1533 	ulp_rte_prsr_fld_mask(params, &idx, size,
1534 			      ulp_deference_struct(vxlan_spec, rsvd0),
1535 			      ulp_deference_struct(vxlan_mask, rsvd0),
1536 			      ULP_PRSR_ACT_DEFAULT);
1537 
1538 	size = sizeof(((struct rte_flow_item_vxlan *)NULL)->vni);
1539 	ulp_rte_prsr_fld_mask(params, &idx, size,
1540 			      ulp_deference_struct(vxlan_spec, vni),
1541 			      ulp_deference_struct(vxlan_mask, vni),
1542 			      ULP_PRSR_ACT_DEFAULT);
1543 
1544 	size = sizeof(((struct rte_flow_item_vxlan *)NULL)->rsvd1);
1545 	ulp_rte_prsr_fld_mask(params, &idx, size,
1546 			      ulp_deference_struct(vxlan_spec, rsvd1),
1547 			      ulp_deference_struct(vxlan_mask, rsvd1),
1548 			      ULP_PRSR_ACT_DEFAULT);
1549 
1550 	/* Update the hdr_bitmap with vxlan */
1551 	ULP_BITMAP_SET(hdr_bitmap->bits, BNXT_ULP_HDR_BIT_T_VXLAN);
1552 	ULP_COMP_FLD_IDX_WR(params, BNXT_ULP_CF_IDX_L3_TUN, 1);
1553 	return BNXT_TF_RC_SUCCESS;
1554 }
1555 
1556 /* Function to handle the parsing of RTE Flow item GRE Header. */
1557 int32_t
1558 ulp_rte_gre_hdr_handler(const struct rte_flow_item *item,
1559 			struct ulp_rte_parser_params *params)
1560 {
1561 	const struct rte_flow_item_gre *gre_spec = item->spec;
1562 	const struct rte_flow_item_gre *gre_mask = item->mask;
1563 	struct ulp_rte_hdr_bitmap *hdr_bitmap = &params->hdr_bitmap;
1564 	uint32_t idx = 0;
1565 	uint32_t size;
1566 
1567 	if (ulp_rte_prsr_fld_size_validate(params, &idx,
1568 					   BNXT_ULP_PROTO_HDR_GRE_NUM)) {
1569 		BNXT_TF_DBG(ERR, "Error parsing protocol header\n");
1570 		return BNXT_TF_RC_ERROR;
1571 	}
1572 
1573 	size = sizeof(((struct rte_flow_item_gre *)NULL)->c_rsvd0_ver);
1574 	ulp_rte_prsr_fld_mask(params, &idx, size,
1575 			      ulp_deference_struct(gre_spec, c_rsvd0_ver),
1576 			      ulp_deference_struct(gre_mask, c_rsvd0_ver),
1577 			      ULP_PRSR_ACT_DEFAULT);
1578 
1579 	size = sizeof(((struct rte_flow_item_gre *)NULL)->protocol);
1580 	ulp_rte_prsr_fld_mask(params, &idx, size,
1581 			      ulp_deference_struct(gre_spec, protocol),
1582 			      ulp_deference_struct(gre_mask, protocol),
1583 			      ULP_PRSR_ACT_DEFAULT);
1584 
1585 	/* Update the hdr_bitmap with GRE */
1586 	ULP_BITMAP_SET(hdr_bitmap->bits, BNXT_ULP_HDR_BIT_T_GRE);
1587 	ULP_COMP_FLD_IDX_WR(params, BNXT_ULP_CF_IDX_L3_TUN, 1);
1588 	return BNXT_TF_RC_SUCCESS;
1589 }
1590 
1591 /* Function to handle the parsing of RTE Flow item ANY. */
1592 int32_t
1593 ulp_rte_item_any_handler(const struct rte_flow_item *item __rte_unused,
1594 			 struct ulp_rte_parser_params *params __rte_unused)
1595 {
1596 	return BNXT_TF_RC_SUCCESS;
1597 }
1598 
1599 /* Function to handle the parsing of RTE Flow item ICMP Header. */
1600 int32_t
1601 ulp_rte_icmp_hdr_handler(const struct rte_flow_item *item,
1602 			 struct ulp_rte_parser_params *params)
1603 {
1604 	const struct rte_flow_item_icmp *icmp_spec = item->spec;
1605 	const struct rte_flow_item_icmp *icmp_mask = item->mask;
1606 	struct ulp_rte_hdr_bitmap *hdr_bitmap = &params->hdr_bitmap;
1607 	uint32_t idx = 0;
1608 	uint32_t size;
1609 
1610 	if (ulp_rte_prsr_fld_size_validate(params, &idx,
1611 					   BNXT_ULP_PROTO_HDR_ICMP_NUM)) {
1612 		BNXT_TF_DBG(ERR, "Error parsing protocol header\n");
1613 		return BNXT_TF_RC_ERROR;
1614 	}
1615 
1616 	size = sizeof(((struct rte_flow_item_icmp *)NULL)->hdr.icmp_type);
1617 	ulp_rte_prsr_fld_mask(params, &idx, size,
1618 			      ulp_deference_struct(icmp_spec, hdr.icmp_type),
1619 			      ulp_deference_struct(icmp_mask, hdr.icmp_type),
1620 			      ULP_PRSR_ACT_DEFAULT);
1621 
1622 	size = sizeof(((struct rte_flow_item_icmp *)NULL)->hdr.icmp_code);
1623 	ulp_rte_prsr_fld_mask(params, &idx, size,
1624 			      ulp_deference_struct(icmp_spec, hdr.icmp_code),
1625 			      ulp_deference_struct(icmp_mask, hdr.icmp_code),
1626 			      ULP_PRSR_ACT_DEFAULT);
1627 
1628 	size = sizeof(((struct rte_flow_item_icmp *)NULL)->hdr.icmp_cksum);
1629 	ulp_rte_prsr_fld_mask(params, &idx, size,
1630 			      ulp_deference_struct(icmp_spec, hdr.icmp_cksum),
1631 			      ulp_deference_struct(icmp_mask, hdr.icmp_cksum),
1632 			      ULP_PRSR_ACT_DEFAULT);
1633 
1634 	size = sizeof(((struct rte_flow_item_icmp *)NULL)->hdr.icmp_ident);
1635 	ulp_rte_prsr_fld_mask(params, &idx, size,
1636 			      ulp_deference_struct(icmp_spec, hdr.icmp_ident),
1637 			      ulp_deference_struct(icmp_mask, hdr.icmp_ident),
1638 			      ULP_PRSR_ACT_DEFAULT);
1639 
1640 	size = sizeof(((struct rte_flow_item_icmp *)NULL)->hdr.icmp_seq_nb);
1641 	ulp_rte_prsr_fld_mask(params, &idx, size,
1642 			      ulp_deference_struct(icmp_spec, hdr.icmp_seq_nb),
1643 			      ulp_deference_struct(icmp_mask, hdr.icmp_seq_nb),
1644 			      ULP_PRSR_ACT_DEFAULT);
1645 
1646 	/* Update the hdr_bitmap with ICMP */
1647 	if (ULP_COMP_FLD_IDX_RD(params, BNXT_ULP_CF_IDX_L3_TUN))
1648 		ULP_BITMAP_SET(hdr_bitmap->bits, BNXT_ULP_HDR_BIT_I_ICMP);
1649 	else
1650 		ULP_BITMAP_SET(hdr_bitmap->bits, BNXT_ULP_HDR_BIT_O_ICMP);
1651 	return BNXT_TF_RC_SUCCESS;
1652 }
1653 
1654 /* Function to handle the parsing of RTE Flow item ICMP6 Header. */
1655 int32_t
1656 ulp_rte_icmp6_hdr_handler(const struct rte_flow_item *item,
1657 			  struct ulp_rte_parser_params *params)
1658 {
1659 	const struct rte_flow_item_icmp6 *icmp_spec = item->spec;
1660 	const struct rte_flow_item_icmp6 *icmp_mask = item->mask;
1661 	struct ulp_rte_hdr_bitmap *hdr_bitmap = &params->hdr_bitmap;
1662 	uint32_t idx = 0;
1663 	uint32_t size;
1664 
1665 	if (ulp_rte_prsr_fld_size_validate(params, &idx,
1666 					   BNXT_ULP_PROTO_HDR_ICMP_NUM)) {
1667 		BNXT_TF_DBG(ERR, "Error parsing protocol header\n");
1668 		return BNXT_TF_RC_ERROR;
1669 	}
1670 
1671 	size = sizeof(((struct rte_flow_item_icmp6 *)NULL)->type);
1672 	ulp_rte_prsr_fld_mask(params, &idx, size,
1673 			      ulp_deference_struct(icmp_spec, type),
1674 			      ulp_deference_struct(icmp_mask, type),
1675 			      ULP_PRSR_ACT_DEFAULT);
1676 
1677 	size = sizeof(((struct rte_flow_item_icmp6 *)NULL)->code);
1678 	ulp_rte_prsr_fld_mask(params, &idx, size,
1679 			      ulp_deference_struct(icmp_spec, code),
1680 			      ulp_deference_struct(icmp_mask, code),
1681 			      ULP_PRSR_ACT_DEFAULT);
1682 
1683 	size = sizeof(((struct rte_flow_item_icmp6 *)NULL)->checksum);
1684 	ulp_rte_prsr_fld_mask(params, &idx, size,
1685 			      ulp_deference_struct(icmp_spec, checksum),
1686 			      ulp_deference_struct(icmp_mask, checksum),
1687 			      ULP_PRSR_ACT_DEFAULT);
1688 
1689 	if (ULP_BITMAP_ISSET(hdr_bitmap->bits, BNXT_ULP_HDR_BIT_O_IPV4)) {
1690 		BNXT_TF_DBG(ERR, "Error: incorrect icmp version\n");
1691 		return BNXT_TF_RC_ERROR;
1692 	}
1693 
1694 	/* Update the hdr_bitmap with ICMP */
1695 	if (ULP_COMP_FLD_IDX_RD(params, BNXT_ULP_CF_IDX_L3_TUN))
1696 		ULP_BITMAP_SET(hdr_bitmap->bits, BNXT_ULP_HDR_BIT_I_ICMP);
1697 	else
1698 		ULP_BITMAP_SET(hdr_bitmap->bits, BNXT_ULP_HDR_BIT_O_ICMP);
1699 	return BNXT_TF_RC_SUCCESS;
1700 }
1701 
1702 /* Function to handle the parsing of RTE Flow item void Header */
1703 int32_t
1704 ulp_rte_void_hdr_handler(const struct rte_flow_item *item __rte_unused,
1705 			 struct ulp_rte_parser_params *params __rte_unused)
1706 {
1707 	return BNXT_TF_RC_SUCCESS;
1708 }
1709 
1710 /* Function to handle the parsing of RTE Flow action void Header. */
1711 int32_t
1712 ulp_rte_void_act_handler(const struct rte_flow_action *action_item __rte_unused,
1713 			 struct ulp_rte_parser_params *params __rte_unused)
1714 {
1715 	return BNXT_TF_RC_SUCCESS;
1716 }
1717 
1718 /* Function to handle the parsing of RTE Flow action Mark Header. */
1719 int32_t
1720 ulp_rte_mark_act_handler(const struct rte_flow_action *action_item,
1721 			 struct ulp_rte_parser_params *param)
1722 {
1723 	const struct rte_flow_action_mark *mark;
1724 	struct ulp_rte_act_bitmap *act = &param->act_bitmap;
1725 	uint32_t mark_id;
1726 
1727 	mark = action_item->conf;
1728 	if (mark) {
1729 		mark_id = tfp_cpu_to_be_32(mark->id);
1730 		memcpy(&param->act_prop.act_details[BNXT_ULP_ACT_PROP_IDX_MARK],
1731 		       &mark_id, BNXT_ULP_ACT_PROP_SZ_MARK);
1732 
1733 		/* Update the hdr_bitmap with vxlan */
1734 		ULP_BITMAP_SET(act->bits, BNXT_ULP_ACT_BIT_MARK);
1735 		return BNXT_TF_RC_SUCCESS;
1736 	}
1737 	BNXT_TF_DBG(ERR, "Parse Error: Mark arg is invalid\n");
1738 	return BNXT_TF_RC_ERROR;
1739 }
1740 
1741 /* Function to handle the parsing of RTE Flow action RSS Header. */
1742 int32_t
1743 ulp_rte_rss_act_handler(const struct rte_flow_action *action_item,
1744 			struct ulp_rte_parser_params *param)
1745 {
1746 	const struct rte_flow_action_rss *rss;
1747 	struct ulp_rte_act_prop *ap = &param->act_prop;
1748 
1749 	if (action_item == NULL || action_item->conf == NULL) {
1750 		BNXT_TF_DBG(ERR, "Parse Err: invalid rss configuration\n");
1751 		return BNXT_TF_RC_ERROR;
1752 	}
1753 
1754 	rss = action_item->conf;
1755 	/* Copy the rss into the specific action properties */
1756 	memcpy(&ap->act_details[BNXT_ULP_ACT_PROP_IDX_RSS_TYPES], &rss->types,
1757 	       BNXT_ULP_ACT_PROP_SZ_RSS_TYPES);
1758 	memcpy(&ap->act_details[BNXT_ULP_ACT_PROP_IDX_RSS_LEVEL], &rss->level,
1759 	       BNXT_ULP_ACT_PROP_SZ_RSS_LEVEL);
1760 	memcpy(&ap->act_details[BNXT_ULP_ACT_PROP_IDX_RSS_KEY_LEN],
1761 	       &rss->key_len, BNXT_ULP_ACT_PROP_SZ_RSS_KEY_LEN);
1762 
1763 	if (rss->key_len > BNXT_ULP_ACT_PROP_SZ_RSS_KEY) {
1764 		BNXT_TF_DBG(ERR, "Parse Err: RSS key too big\n");
1765 		return BNXT_TF_RC_ERROR;
1766 	}
1767 	memcpy(&ap->act_details[BNXT_ULP_ACT_PROP_IDX_RSS_KEY], rss->key,
1768 	       rss->key_len);
1769 
1770 	/* set the RSS action header bit */
1771 	ULP_BITMAP_SET(param->act_bitmap.bits, BNXT_ULP_ACT_BIT_RSS);
1772 
1773 	return BNXT_TF_RC_SUCCESS;
1774 }
1775 
1776 /* Function to handle the parsing of RTE Flow item eth Header. */
1777 static void
1778 ulp_rte_enc_eth_hdr_handler(struct ulp_rte_parser_params *params,
1779 			    const struct rte_flow_item_eth *eth_spec)
1780 {
1781 	struct ulp_rte_hdr_field *field;
1782 	uint32_t size;
1783 
1784 	field = &params->enc_field[BNXT_ULP_ENC_FIELD_ETH_DMAC];
1785 	size = sizeof(eth_spec->dst.addr_bytes);
1786 	field = ulp_rte_parser_fld_copy(field, eth_spec->dst.addr_bytes, size);
1787 
1788 	size = sizeof(eth_spec->src.addr_bytes);
1789 	field = ulp_rte_parser_fld_copy(field, eth_spec->src.addr_bytes, size);
1790 
1791 	size = sizeof(eth_spec->type);
1792 	field = ulp_rte_parser_fld_copy(field, &eth_spec->type, size);
1793 
1794 	ULP_BITMAP_SET(params->enc_hdr_bitmap.bits, BNXT_ULP_HDR_BIT_O_ETH);
1795 }
1796 
1797 /* Function to handle the parsing of RTE Flow item vlan Header. */
1798 static void
1799 ulp_rte_enc_vlan_hdr_handler(struct ulp_rte_parser_params *params,
1800 			     const struct rte_flow_item_vlan *vlan_spec,
1801 			     uint32_t inner)
1802 {
1803 	struct ulp_rte_hdr_field *field;
1804 	uint32_t size;
1805 
1806 	if (!inner) {
1807 		field = &params->enc_field[BNXT_ULP_ENC_FIELD_O_VLAN_TCI];
1808 		ULP_BITMAP_SET(params->enc_hdr_bitmap.bits,
1809 			       BNXT_ULP_HDR_BIT_OO_VLAN);
1810 	} else {
1811 		field = &params->enc_field[BNXT_ULP_ENC_FIELD_I_VLAN_TCI];
1812 		ULP_BITMAP_SET(params->enc_hdr_bitmap.bits,
1813 			       BNXT_ULP_HDR_BIT_OI_VLAN);
1814 	}
1815 
1816 	size = sizeof(vlan_spec->tci);
1817 	field = ulp_rte_parser_fld_copy(field, &vlan_spec->tci, size);
1818 
1819 	size = sizeof(vlan_spec->inner_type);
1820 	field = ulp_rte_parser_fld_copy(field, &vlan_spec->inner_type, size);
1821 }
1822 
1823 /* Function to handle the parsing of RTE Flow item ipv4 Header. */
1824 static void
1825 ulp_rte_enc_ipv4_hdr_handler(struct ulp_rte_parser_params *params,
1826 			     const struct rte_flow_item_ipv4 *ip)
1827 {
1828 	struct ulp_rte_hdr_field *field;
1829 	uint32_t size;
1830 	uint8_t val8;
1831 
1832 	field = &params->enc_field[BNXT_ULP_ENC_FIELD_IPV4_IHL];
1833 	size = sizeof(ip->hdr.version_ihl);
1834 	if (!ip->hdr.version_ihl)
1835 		val8 = RTE_IPV4_VHL_DEF;
1836 	else
1837 		val8 = ip->hdr.version_ihl;
1838 	field = ulp_rte_parser_fld_copy(field, &val8, size);
1839 
1840 	size = sizeof(ip->hdr.type_of_service);
1841 	field = ulp_rte_parser_fld_copy(field, &ip->hdr.type_of_service, size);
1842 
1843 	size = sizeof(ip->hdr.packet_id);
1844 	field = ulp_rte_parser_fld_copy(field, &ip->hdr.packet_id, size);
1845 
1846 	size = sizeof(ip->hdr.fragment_offset);
1847 	field = ulp_rte_parser_fld_copy(field, &ip->hdr.fragment_offset, size);
1848 
1849 	size = sizeof(ip->hdr.time_to_live);
1850 	if (!ip->hdr.time_to_live)
1851 		val8 = BNXT_ULP_DEFAULT_TTL;
1852 	else
1853 		val8 = ip->hdr.time_to_live;
1854 	field = ulp_rte_parser_fld_copy(field, &val8, size);
1855 
1856 	size = sizeof(ip->hdr.next_proto_id);
1857 	field = ulp_rte_parser_fld_copy(field, &ip->hdr.next_proto_id, size);
1858 
1859 	size = sizeof(ip->hdr.src_addr);
1860 	field = ulp_rte_parser_fld_copy(field, &ip->hdr.src_addr, size);
1861 
1862 	size = sizeof(ip->hdr.dst_addr);
1863 	field = ulp_rte_parser_fld_copy(field, &ip->hdr.dst_addr, size);
1864 
1865 	ULP_BITMAP_SET(params->enc_hdr_bitmap.bits, BNXT_ULP_HDR_BIT_O_IPV4);
1866 }
1867 
1868 /* Function to handle the parsing of RTE Flow item ipv6 Header. */
1869 static void
1870 ulp_rte_enc_ipv6_hdr_handler(struct ulp_rte_parser_params *params,
1871 			     const struct rte_flow_item_ipv6 *ip)
1872 {
1873 	struct ulp_rte_hdr_field *field;
1874 	uint32_t size;
1875 	uint32_t val32;
1876 	uint8_t val8;
1877 
1878 	field = &params->enc_field[BNXT_ULP_ENC_FIELD_IPV6_VTC_FLOW];
1879 	size = sizeof(ip->hdr.vtc_flow);
1880 	if (!ip->hdr.vtc_flow)
1881 		val32 = rte_cpu_to_be_32(BNXT_ULP_IPV6_DFLT_VER);
1882 	else
1883 		val32 = ip->hdr.vtc_flow;
1884 	field = ulp_rte_parser_fld_copy(field, &val32, size);
1885 
1886 	size = sizeof(ip->hdr.proto);
1887 	field = ulp_rte_parser_fld_copy(field, &ip->hdr.proto, size);
1888 
1889 	size = sizeof(ip->hdr.hop_limits);
1890 	if (!ip->hdr.hop_limits)
1891 		val8 = BNXT_ULP_DEFAULT_TTL;
1892 	else
1893 		val8 = ip->hdr.hop_limits;
1894 	field = ulp_rte_parser_fld_copy(field, &val8, size);
1895 
1896 	size = sizeof(ip->hdr.src_addr);
1897 	field = ulp_rte_parser_fld_copy(field, &ip->hdr.src_addr, size);
1898 
1899 	size = sizeof(ip->hdr.dst_addr);
1900 	field = ulp_rte_parser_fld_copy(field, &ip->hdr.dst_addr, size);
1901 
1902 	ULP_BITMAP_SET(params->enc_hdr_bitmap.bits, BNXT_ULP_HDR_BIT_O_IPV6);
1903 }
1904 
1905 /* Function to handle the parsing of RTE Flow item UDP Header. */
1906 static void
1907 ulp_rte_enc_udp_hdr_handler(struct ulp_rte_parser_params *params,
1908 			    const struct rte_flow_item_udp *udp_spec)
1909 {
1910 	struct ulp_rte_hdr_field *field;
1911 	uint32_t size;
1912 	uint8_t type = IPPROTO_UDP;
1913 
1914 	field = &params->enc_field[BNXT_ULP_ENC_FIELD_UDP_SPORT];
1915 	size = sizeof(udp_spec->hdr.src_port);
1916 	field = ulp_rte_parser_fld_copy(field, &udp_spec->hdr.src_port, size);
1917 
1918 	size = sizeof(udp_spec->hdr.dst_port);
1919 	field = ulp_rte_parser_fld_copy(field, &udp_spec->hdr.dst_port, size);
1920 
1921 	ULP_BITMAP_SET(params->enc_hdr_bitmap.bits, BNXT_ULP_HDR_BIT_O_UDP);
1922 
1923 	/* Update thhe ip header protocol */
1924 	field = &params->enc_field[BNXT_ULP_ENC_FIELD_IPV4_PROTO];
1925 	ulp_rte_parser_fld_copy(field, &type, sizeof(type));
1926 	field = &params->enc_field[BNXT_ULP_ENC_FIELD_IPV6_PROTO];
1927 	ulp_rte_parser_fld_copy(field, &type, sizeof(type));
1928 }
1929 
1930 /* Function to handle the parsing of RTE Flow item vxlan Header. */
1931 static void
1932 ulp_rte_enc_vxlan_hdr_handler(struct ulp_rte_parser_params *params,
1933 			      struct rte_flow_item_vxlan *vxlan_spec)
1934 {
1935 	struct ulp_rte_hdr_field *field;
1936 	uint32_t size;
1937 
1938 	field = &params->enc_field[BNXT_ULP_ENC_FIELD_VXLAN_FLAGS];
1939 	size = sizeof(vxlan_spec->flags);
1940 	field = ulp_rte_parser_fld_copy(field, &vxlan_spec->flags, size);
1941 
1942 	size = sizeof(vxlan_spec->rsvd0);
1943 	field = ulp_rte_parser_fld_copy(field, &vxlan_spec->rsvd0, size);
1944 
1945 	size = sizeof(vxlan_spec->vni);
1946 	field = ulp_rte_parser_fld_copy(field, &vxlan_spec->vni, size);
1947 
1948 	size = sizeof(vxlan_spec->rsvd1);
1949 	field = ulp_rte_parser_fld_copy(field, &vxlan_spec->rsvd1, size);
1950 
1951 	ULP_BITMAP_SET(params->enc_hdr_bitmap.bits, BNXT_ULP_HDR_BIT_T_VXLAN);
1952 }
1953 
1954 /* Function to handle the parsing of RTE Flow action vxlan_encap Header. */
1955 int32_t
1956 ulp_rte_vxlan_encap_act_handler(const struct rte_flow_action *action_item,
1957 				struct ulp_rte_parser_params *params)
1958 {
1959 	const struct rte_flow_action_vxlan_encap *vxlan_encap;
1960 	const struct rte_flow_item *item;
1961 	const struct rte_flow_item_ipv4 *ipv4_spec;
1962 	const struct rte_flow_item_ipv6 *ipv6_spec;
1963 	struct rte_flow_item_vxlan vxlan_spec;
1964 	uint32_t vlan_num = 0, vlan_size = 0;
1965 	uint32_t ip_size = 0, ip_type = 0;
1966 	uint32_t vxlan_size = 0;
1967 	struct ulp_rte_act_bitmap *act = &params->act_bitmap;
1968 	struct ulp_rte_act_prop *ap = &params->act_prop;
1969 
1970 	vxlan_encap = action_item->conf;
1971 	if (!vxlan_encap) {
1972 		BNXT_TF_DBG(ERR, "Parse Error: Vxlan_encap arg is invalid\n");
1973 		return BNXT_TF_RC_ERROR;
1974 	}
1975 
1976 	item = vxlan_encap->definition;
1977 	if (!item) {
1978 		BNXT_TF_DBG(ERR, "Parse Error: definition arg is invalid\n");
1979 		return BNXT_TF_RC_ERROR;
1980 	}
1981 
1982 	if (!ulp_rte_item_skip_void(&item, 0))
1983 		return BNXT_TF_RC_ERROR;
1984 
1985 	/* must have ethernet header */
1986 	if (item->type != RTE_FLOW_ITEM_TYPE_ETH) {
1987 		BNXT_TF_DBG(ERR, "Parse Error:vxlan encap does not have eth\n");
1988 		return BNXT_TF_RC_ERROR;
1989 	}
1990 
1991 	/* Parse the ethernet header */
1992 	if (item->spec)
1993 		ulp_rte_enc_eth_hdr_handler(params, item->spec);
1994 
1995 	/* Goto the next item */
1996 	if (!ulp_rte_item_skip_void(&item, 1))
1997 		return BNXT_TF_RC_ERROR;
1998 
1999 	/* May have vlan header */
2000 	if (item->type == RTE_FLOW_ITEM_TYPE_VLAN) {
2001 		vlan_num++;
2002 		if (item->spec)
2003 			ulp_rte_enc_vlan_hdr_handler(params, item->spec, 0);
2004 
2005 		if (!ulp_rte_item_skip_void(&item, 1))
2006 			return BNXT_TF_RC_ERROR;
2007 	}
2008 
2009 	/* may have two vlan headers */
2010 	if (item->type == RTE_FLOW_ITEM_TYPE_VLAN) {
2011 		vlan_num++;
2012 		if (item->spec)
2013 			ulp_rte_enc_vlan_hdr_handler(params, item->spec, 1);
2014 
2015 		if (!ulp_rte_item_skip_void(&item, 1))
2016 			return BNXT_TF_RC_ERROR;
2017 	}
2018 
2019 	/* Update the vlan count and size of more than one */
2020 	if (vlan_num) {
2021 		vlan_size = vlan_num * sizeof(struct rte_flow_item_vlan);
2022 		vlan_num = tfp_cpu_to_be_32(vlan_num);
2023 		memcpy(&ap->act_details[BNXT_ULP_ACT_PROP_IDX_ENCAP_VTAG_NUM],
2024 		       &vlan_num,
2025 		       sizeof(uint32_t));
2026 		vlan_size = tfp_cpu_to_be_32(vlan_size);
2027 		memcpy(&ap->act_details[BNXT_ULP_ACT_PROP_IDX_ENCAP_VTAG_SZ],
2028 		       &vlan_size,
2029 		       sizeof(uint32_t));
2030 	}
2031 
2032 	/* L3 must be IPv4, IPv6 */
2033 	if (item->type == RTE_FLOW_ITEM_TYPE_IPV4) {
2034 		ipv4_spec = item->spec;
2035 		ip_size = BNXT_ULP_ENCAP_IPV4_SIZE;
2036 
2037 		/* Update the ip size details */
2038 		ip_size = tfp_cpu_to_be_32(ip_size);
2039 		memcpy(&ap->act_details[BNXT_ULP_ACT_PROP_IDX_ENCAP_IP_SZ],
2040 		       &ip_size, sizeof(uint32_t));
2041 
2042 		/* update the ip type */
2043 		ip_type = rte_cpu_to_be_32(BNXT_ULP_ETH_IPV4);
2044 		memcpy(&ap->act_details[BNXT_ULP_ACT_PROP_IDX_ENCAP_L3_TYPE],
2045 		       &ip_type, sizeof(uint32_t));
2046 
2047 		/* update the computed field to notify it is ipv4 header */
2048 		ULP_COMP_FLD_IDX_WR(params, BNXT_ULP_CF_IDX_ACT_ENCAP_IPV4_FLAG,
2049 				    1);
2050 		if (ipv4_spec)
2051 			ulp_rte_enc_ipv4_hdr_handler(params, ipv4_spec);
2052 
2053 		if (!ulp_rte_item_skip_void(&item, 1))
2054 			return BNXT_TF_RC_ERROR;
2055 	} else if (item->type == RTE_FLOW_ITEM_TYPE_IPV6) {
2056 		ipv6_spec = item->spec;
2057 		ip_size = BNXT_ULP_ENCAP_IPV6_SIZE;
2058 
2059 		/* Update the ip size details */
2060 		ip_size = tfp_cpu_to_be_32(ip_size);
2061 		memcpy(&ap->act_details[BNXT_ULP_ACT_PROP_IDX_ENCAP_IP_SZ],
2062 		       &ip_size, sizeof(uint32_t));
2063 
2064 		 /* update the ip type */
2065 		ip_type = rte_cpu_to_be_32(BNXT_ULP_ETH_IPV6);
2066 		memcpy(&ap->act_details[BNXT_ULP_ACT_PROP_IDX_ENCAP_L3_TYPE],
2067 		       &ip_type, sizeof(uint32_t));
2068 
2069 		/* update the computed field to notify it is ipv6 header */
2070 		ULP_COMP_FLD_IDX_WR(params, BNXT_ULP_CF_IDX_ACT_ENCAP_IPV6_FLAG,
2071 				    1);
2072 		if (ipv6_spec)
2073 			ulp_rte_enc_ipv6_hdr_handler(params, ipv6_spec);
2074 
2075 		if (!ulp_rte_item_skip_void(&item, 1))
2076 			return BNXT_TF_RC_ERROR;
2077 	} else {
2078 		BNXT_TF_DBG(ERR, "Parse Error: Vxlan Encap expects L3 hdr\n");
2079 		return BNXT_TF_RC_ERROR;
2080 	}
2081 
2082 	/* L4 is UDP */
2083 	if (item->type != RTE_FLOW_ITEM_TYPE_UDP) {
2084 		BNXT_TF_DBG(ERR, "vxlan encap does not have udp\n");
2085 		return BNXT_TF_RC_ERROR;
2086 	}
2087 	if (item->spec)
2088 		ulp_rte_enc_udp_hdr_handler(params, item->spec);
2089 
2090 	if (!ulp_rte_item_skip_void(&item, 1))
2091 		return BNXT_TF_RC_ERROR;
2092 
2093 	/* Finally VXLAN */
2094 	if (item->type != RTE_FLOW_ITEM_TYPE_VXLAN) {
2095 		BNXT_TF_DBG(ERR, "vxlan encap does not have vni\n");
2096 		return BNXT_TF_RC_ERROR;
2097 	}
2098 	vxlan_size = sizeof(struct rte_flow_item_vxlan);
2099 	/* copy the vxlan details */
2100 	memcpy(&vxlan_spec, item->spec, vxlan_size);
2101 	vxlan_spec.flags = 0x08;
2102 	vxlan_size = tfp_cpu_to_be_32(vxlan_size);
2103 	memcpy(&ap->act_details[BNXT_ULP_ACT_PROP_IDX_ENCAP_TUN_SZ],
2104 	       &vxlan_size, sizeof(uint32_t));
2105 
2106 	ulp_rte_enc_vxlan_hdr_handler(params, &vxlan_spec);
2107 
2108 	/* update the hdr_bitmap with vxlan */
2109 	ULP_BITMAP_SET(act->bits, BNXT_ULP_ACT_BIT_VXLAN_ENCAP);
2110 	return BNXT_TF_RC_SUCCESS;
2111 }
2112 
2113 /* Function to handle the parsing of RTE Flow action vxlan_encap Header */
2114 int32_t
2115 ulp_rte_vxlan_decap_act_handler(const struct rte_flow_action *action_item
2116 				__rte_unused,
2117 				struct ulp_rte_parser_params *params)
2118 {
2119 	/* update the hdr_bitmap with vxlan */
2120 	ULP_BITMAP_SET(params->act_bitmap.bits,
2121 		       BNXT_ULP_ACT_BIT_VXLAN_DECAP);
2122 	/* Update computational field with tunnel decap info */
2123 	ULP_COMP_FLD_IDX_WR(params, BNXT_ULP_CF_IDX_L3_TUN_DECAP, 1);
2124 	return BNXT_TF_RC_SUCCESS;
2125 }
2126 
2127 /* Function to handle the parsing of RTE Flow action drop Header. */
2128 int32_t
2129 ulp_rte_drop_act_handler(const struct rte_flow_action *action_item __rte_unused,
2130 			 struct ulp_rte_parser_params *params)
2131 {
2132 	/* Update the hdr_bitmap with drop */
2133 	ULP_BITMAP_SET(params->act_bitmap.bits, BNXT_ULP_ACT_BIT_DROP);
2134 	return BNXT_TF_RC_SUCCESS;
2135 }
2136 
2137 /* Function to handle the parsing of RTE Flow action count. */
2138 int32_t
2139 ulp_rte_count_act_handler(const struct rte_flow_action *action_item,
2140 			  struct ulp_rte_parser_params *params)
2141 {
2142 	const struct rte_flow_action_count *act_count;
2143 	struct ulp_rte_act_prop *act_prop = &params->act_prop;
2144 
2145 	act_count = action_item->conf;
2146 	if (act_count) {
2147 		memcpy(&act_prop->act_details[BNXT_ULP_ACT_PROP_IDX_COUNT],
2148 		       &act_count->id,
2149 		       BNXT_ULP_ACT_PROP_SZ_COUNT);
2150 	}
2151 
2152 	/* Update the hdr_bitmap with count */
2153 	ULP_BITMAP_SET(params->act_bitmap.bits, BNXT_ULP_ACT_BIT_COUNT);
2154 	return BNXT_TF_RC_SUCCESS;
2155 }
2156 
2157 /* Function to handle the parsing of action ports. */
2158 static int32_t
2159 ulp_rte_parser_act_port_set(struct ulp_rte_parser_params *param,
2160 			    uint32_t ifindex,
2161 			    enum bnxt_ulp_direction_type act_dir)
2162 {
2163 	enum bnxt_ulp_direction_type dir;
2164 	uint16_t pid_s;
2165 	uint32_t pid;
2166 	struct ulp_rte_act_prop *act = &param->act_prop;
2167 	enum bnxt_ulp_intf_type port_type;
2168 	uint32_t vnic_type;
2169 
2170 	/* Get the direction */
2171 	/* If action implicitly specifies direction, use the specification. */
2172 	dir = (act_dir == BNXT_ULP_DIR_INVALID) ?
2173 		ULP_COMP_FLD_IDX_RD(param, BNXT_ULP_CF_IDX_DIRECTION) :
2174 		act_dir;
2175 	port_type = ULP_COMP_FLD_IDX_RD(param, BNXT_ULP_CF_IDX_ACT_PORT_TYPE);
2176 	if (dir == BNXT_ULP_DIR_EGRESS &&
2177 	    port_type != BNXT_ULP_INTF_TYPE_VF_REP) {
2178 		/* For egress direction, fill vport */
2179 		if (ulp_port_db_vport_get(param->ulp_ctx, ifindex, &pid_s))
2180 			return BNXT_TF_RC_ERROR;
2181 
2182 		pid = pid_s;
2183 		pid = rte_cpu_to_be_32(pid);
2184 		memcpy(&act->act_details[BNXT_ULP_ACT_PROP_IDX_VPORT],
2185 		       &pid, BNXT_ULP_ACT_PROP_SZ_VPORT);
2186 	} else {
2187 		/* For ingress direction, fill vnic */
2188 		/*
2189 		 * Action		Destination
2190 		 * ------------------------------------
2191 		 * PORT_REPRESENTOR	Driver Function
2192 		 * ------------------------------------
2193 		 * REPRESENTED_PORT	VF
2194 		 * ------------------------------------
2195 		 * PORT_ID		VF
2196 		 */
2197 		if (act_dir != BNXT_ULP_DIR_INGRESS &&
2198 		    port_type == BNXT_ULP_INTF_TYPE_VF_REP)
2199 			vnic_type = BNXT_ULP_VF_FUNC_VNIC;
2200 		else
2201 			vnic_type = BNXT_ULP_DRV_FUNC_VNIC;
2202 
2203 		if (ulp_port_db_default_vnic_get(param->ulp_ctx, ifindex,
2204 						 vnic_type, &pid_s))
2205 			return BNXT_TF_RC_ERROR;
2206 
2207 		pid = pid_s;
2208 		pid = rte_cpu_to_be_32(pid);
2209 		memcpy(&act->act_details[BNXT_ULP_ACT_PROP_IDX_VNIC],
2210 		       &pid, BNXT_ULP_ACT_PROP_SZ_VNIC);
2211 	}
2212 
2213 	/* Update the action port set bit */
2214 	ULP_COMP_FLD_IDX_WR(param, BNXT_ULP_CF_IDX_ACT_PORT_IS_SET, 1);
2215 	return BNXT_TF_RC_SUCCESS;
2216 }
2217 
2218 /* Function to handle the parsing of RTE Flow action PF. */
2219 int32_t
2220 ulp_rte_pf_act_handler(const struct rte_flow_action *action_item __rte_unused,
2221 		       struct ulp_rte_parser_params *params)
2222 {
2223 	uint32_t port_id;
2224 	uint32_t ifindex;
2225 	enum bnxt_ulp_intf_type intf_type;
2226 
2227 	/* Get the port id of the current device */
2228 	port_id = ULP_COMP_FLD_IDX_RD(params, BNXT_ULP_CF_IDX_INCOMING_IF);
2229 
2230 	/* Get the port db ifindex */
2231 	if (ulp_port_db_dev_port_to_ulp_index(params->ulp_ctx, port_id,
2232 					      &ifindex)) {
2233 		BNXT_TF_DBG(ERR, "Invalid port id\n");
2234 		return BNXT_TF_RC_ERROR;
2235 	}
2236 
2237 	/* Check the port is PF port */
2238 	intf_type = ulp_port_db_port_type_get(params->ulp_ctx, ifindex);
2239 	if (intf_type != BNXT_ULP_INTF_TYPE_PF) {
2240 		BNXT_TF_DBG(ERR, "Port is not a PF port\n");
2241 		return BNXT_TF_RC_ERROR;
2242 	}
2243 	/* Update the action properties */
2244 	ULP_COMP_FLD_IDX_WR(params, BNXT_ULP_CF_IDX_ACT_PORT_TYPE, intf_type);
2245 	return ulp_rte_parser_act_port_set(params, ifindex,
2246 					   BNXT_ULP_DIR_INVALID);
2247 }
2248 
2249 /* Function to handle the parsing of RTE Flow action VF. */
2250 int32_t
2251 ulp_rte_vf_act_handler(const struct rte_flow_action *action_item,
2252 		       struct ulp_rte_parser_params *params)
2253 {
2254 	const struct rte_flow_action_vf *vf_action;
2255 	enum bnxt_ulp_intf_type intf_type;
2256 	uint32_t ifindex;
2257 	struct bnxt *bp;
2258 
2259 	vf_action = action_item->conf;
2260 	if (!vf_action) {
2261 		BNXT_TF_DBG(ERR, "ParseErr: Invalid Argument\n");
2262 		return BNXT_TF_RC_PARSE_ERR;
2263 	}
2264 
2265 	if (vf_action->original) {
2266 		BNXT_TF_DBG(ERR, "ParseErr:VF Original not supported\n");
2267 		return BNXT_TF_RC_PARSE_ERR;
2268 	}
2269 
2270 	bp = bnxt_pmd_get_bp(params->port_id);
2271 	if (bp == NULL) {
2272 		BNXT_TF_DBG(ERR, "Invalid bp\n");
2273 		return BNXT_TF_RC_ERROR;
2274 	}
2275 
2276 	/* vf_action->id is a logical number which in this case is an
2277 	 * offset from the first VF. So, to get the absolute VF id, the
2278 	 * offset must be added to the absolute first vf id of that port.
2279 	 */
2280 	if (ulp_port_db_dev_func_id_to_ulp_index(params->ulp_ctx,
2281 						 bp->first_vf_id +
2282 						 vf_action->id,
2283 						 &ifindex)) {
2284 		BNXT_TF_DBG(ERR, "VF is not valid interface\n");
2285 		return BNXT_TF_RC_ERROR;
2286 	}
2287 	/* Check the port is VF port */
2288 	intf_type = ulp_port_db_port_type_get(params->ulp_ctx, ifindex);
2289 	if (intf_type != BNXT_ULP_INTF_TYPE_VF &&
2290 	    intf_type != BNXT_ULP_INTF_TYPE_TRUSTED_VF) {
2291 		BNXT_TF_DBG(ERR, "Port is not a VF port\n");
2292 		return BNXT_TF_RC_ERROR;
2293 	}
2294 
2295 	/* Update the action properties */
2296 	ULP_COMP_FLD_IDX_WR(params, BNXT_ULP_CF_IDX_ACT_PORT_TYPE, intf_type);
2297 	return ulp_rte_parser_act_port_set(params, ifindex,
2298 					   BNXT_ULP_DIR_INVALID);
2299 }
2300 
2301 /* Parse actions PORT_ID, PORT_REPRESENTOR and REPRESENTED_PORT. */
2302 int32_t
2303 ulp_rte_port_act_handler(const struct rte_flow_action *act_item,
2304 			 struct ulp_rte_parser_params *param)
2305 {
2306 	uint32_t ethdev_id;
2307 	uint32_t ifindex;
2308 	enum bnxt_ulp_intf_type intf_type;
2309 	enum bnxt_ulp_direction_type act_dir;
2310 
2311 	if (!act_item->conf) {
2312 		BNXT_TF_DBG(ERR,
2313 			    "ParseErr: Invalid Argument\n");
2314 		return BNXT_TF_RC_PARSE_ERR;
2315 	}
2316 	switch (act_item->type) {
2317 	case RTE_FLOW_ACTION_TYPE_PORT_ID: {
2318 		const struct rte_flow_action_port_id *port_id = act_item->conf;
2319 
2320 		if (port_id->original) {
2321 			BNXT_TF_DBG(ERR,
2322 				    "ParseErr:Portid Original not supported\n");
2323 			return BNXT_TF_RC_PARSE_ERR;
2324 		}
2325 		ethdev_id = port_id->id;
2326 		act_dir = BNXT_ULP_DIR_INVALID;
2327 		break;
2328 	}
2329 	case RTE_FLOW_ACTION_TYPE_PORT_REPRESENTOR: {
2330 		const struct rte_flow_action_ethdev *ethdev = act_item->conf;
2331 
2332 		ethdev_id = ethdev->port_id;
2333 		act_dir = BNXT_ULP_DIR_INGRESS;
2334 		break;
2335 	}
2336 	case RTE_FLOW_ACTION_TYPE_REPRESENTED_PORT: {
2337 		const struct rte_flow_action_ethdev *ethdev = act_item->conf;
2338 
2339 		ethdev_id = ethdev->port_id;
2340 		act_dir = BNXT_ULP_DIR_EGRESS;
2341 		break;
2342 	}
2343 	default:
2344 		BNXT_TF_DBG(ERR, "Unknown port action\n");
2345 		return BNXT_TF_RC_ERROR;
2346 	}
2347 
2348 	/* Get the port db ifindex */
2349 	if (ulp_port_db_dev_port_to_ulp_index(param->ulp_ctx, ethdev_id,
2350 					      &ifindex)) {
2351 		BNXT_TF_DBG(ERR, "Invalid port id\n");
2352 		return BNXT_TF_RC_ERROR;
2353 	}
2354 
2355 	/* Get the intf type */
2356 	intf_type = ulp_port_db_port_type_get(param->ulp_ctx, ifindex);
2357 	if (!intf_type) {
2358 		BNXT_TF_DBG(ERR, "Invalid port type\n");
2359 		return BNXT_TF_RC_ERROR;
2360 	}
2361 
2362 	/* Set the action port */
2363 	ULP_COMP_FLD_IDX_WR(param, BNXT_ULP_CF_IDX_ACT_PORT_TYPE, intf_type);
2364 	return ulp_rte_parser_act_port_set(param, ifindex, act_dir);
2365 }
2366 
2367 /* Function to handle the parsing of RTE Flow action phy_port. */
2368 int32_t
2369 ulp_rte_phy_port_act_handler(const struct rte_flow_action *action_item,
2370 			     struct ulp_rte_parser_params *prm)
2371 {
2372 	const struct rte_flow_action_phy_port *phy_port;
2373 	uint32_t pid;
2374 	int32_t rc;
2375 	uint16_t pid_s;
2376 	enum bnxt_ulp_direction_type dir;
2377 
2378 	phy_port = action_item->conf;
2379 	if (!phy_port) {
2380 		BNXT_TF_DBG(ERR,
2381 			    "ParseErr: Invalid Argument\n");
2382 		return BNXT_TF_RC_PARSE_ERR;
2383 	}
2384 
2385 	if (phy_port->original) {
2386 		BNXT_TF_DBG(ERR,
2387 			    "Parse Err:Port Original not supported\n");
2388 		return BNXT_TF_RC_PARSE_ERR;
2389 	}
2390 	dir = ULP_COMP_FLD_IDX_RD(prm, BNXT_ULP_CF_IDX_DIRECTION);
2391 	if (dir != BNXT_ULP_DIR_EGRESS) {
2392 		BNXT_TF_DBG(ERR,
2393 			    "Parse Err:Phy ports are valid only for egress\n");
2394 		return BNXT_TF_RC_PARSE_ERR;
2395 	}
2396 	/* Get the physical port details from port db */
2397 	rc = ulp_port_db_phy_port_vport_get(prm->ulp_ctx, phy_port->index,
2398 					    &pid_s);
2399 	if (rc) {
2400 		BNXT_TF_DBG(ERR, "Failed to get port details\n");
2401 		return -EINVAL;
2402 	}
2403 
2404 	pid = pid_s;
2405 	pid = rte_cpu_to_be_32(pid);
2406 	memcpy(&prm->act_prop.act_details[BNXT_ULP_ACT_PROP_IDX_VPORT],
2407 	       &pid, BNXT_ULP_ACT_PROP_SZ_VPORT);
2408 
2409 	/* Update the action port set bit */
2410 	ULP_COMP_FLD_IDX_WR(prm, BNXT_ULP_CF_IDX_ACT_PORT_IS_SET, 1);
2411 	ULP_COMP_FLD_IDX_WR(prm, BNXT_ULP_CF_IDX_ACT_PORT_TYPE,
2412 			    BNXT_ULP_INTF_TYPE_PHY_PORT);
2413 	return BNXT_TF_RC_SUCCESS;
2414 }
2415 
2416 /* Function to handle the parsing of RTE Flow action pop vlan. */
2417 int32_t
2418 ulp_rte_of_pop_vlan_act_handler(const struct rte_flow_action *a __rte_unused,
2419 				struct ulp_rte_parser_params *params)
2420 {
2421 	/* Update the act_bitmap with pop */
2422 	ULP_BITMAP_SET(params->act_bitmap.bits, BNXT_ULP_ACT_BIT_POP_VLAN);
2423 	return BNXT_TF_RC_SUCCESS;
2424 }
2425 
2426 /* Function to handle the parsing of RTE Flow action push vlan. */
2427 int32_t
2428 ulp_rte_of_push_vlan_act_handler(const struct rte_flow_action *action_item,
2429 				 struct ulp_rte_parser_params *params)
2430 {
2431 	const struct rte_flow_action_of_push_vlan *push_vlan;
2432 	uint16_t ethertype;
2433 	struct ulp_rte_act_prop *act = &params->act_prop;
2434 
2435 	push_vlan = action_item->conf;
2436 	if (push_vlan) {
2437 		ethertype = push_vlan->ethertype;
2438 		if (tfp_cpu_to_be_16(ethertype) != RTE_ETHER_TYPE_VLAN) {
2439 			BNXT_TF_DBG(ERR,
2440 				    "Parse Err: Ethertype not supported\n");
2441 			return BNXT_TF_RC_PARSE_ERR;
2442 		}
2443 		memcpy(&act->act_details[BNXT_ULP_ACT_PROP_IDX_PUSH_VLAN],
2444 		       &ethertype, BNXT_ULP_ACT_PROP_SZ_PUSH_VLAN);
2445 		/* Update the hdr_bitmap with push vlan */
2446 		ULP_BITMAP_SET(params->act_bitmap.bits,
2447 			       BNXT_ULP_ACT_BIT_PUSH_VLAN);
2448 		return BNXT_TF_RC_SUCCESS;
2449 	}
2450 	BNXT_TF_DBG(ERR, "Parse Error: Push vlan arg is invalid\n");
2451 	return BNXT_TF_RC_ERROR;
2452 }
2453 
2454 /* Function to handle the parsing of RTE Flow action set vlan id. */
2455 int32_t
2456 ulp_rte_of_set_vlan_vid_act_handler(const struct rte_flow_action *action_item,
2457 				    struct ulp_rte_parser_params *params)
2458 {
2459 	const struct rte_flow_action_of_set_vlan_vid *vlan_vid;
2460 	uint32_t vid;
2461 	struct ulp_rte_act_prop *act = &params->act_prop;
2462 
2463 	vlan_vid = action_item->conf;
2464 	if (vlan_vid && vlan_vid->vlan_vid) {
2465 		vid = vlan_vid->vlan_vid;
2466 		memcpy(&act->act_details[BNXT_ULP_ACT_PROP_IDX_SET_VLAN_VID],
2467 		       &vid, BNXT_ULP_ACT_PROP_SZ_SET_VLAN_VID);
2468 		/* Update the hdr_bitmap with vlan vid */
2469 		ULP_BITMAP_SET(params->act_bitmap.bits,
2470 			       BNXT_ULP_ACT_BIT_SET_VLAN_VID);
2471 		return BNXT_TF_RC_SUCCESS;
2472 	}
2473 	BNXT_TF_DBG(ERR, "Parse Error: Vlan vid arg is invalid\n");
2474 	return BNXT_TF_RC_ERROR;
2475 }
2476 
2477 /* Function to handle the parsing of RTE Flow action set vlan pcp. */
2478 int32_t
2479 ulp_rte_of_set_vlan_pcp_act_handler(const struct rte_flow_action *action_item,
2480 				    struct ulp_rte_parser_params *params)
2481 {
2482 	const struct rte_flow_action_of_set_vlan_pcp *vlan_pcp;
2483 	uint8_t pcp;
2484 	struct ulp_rte_act_prop *act = &params->act_prop;
2485 
2486 	vlan_pcp = action_item->conf;
2487 	if (vlan_pcp) {
2488 		pcp = vlan_pcp->vlan_pcp;
2489 		memcpy(&act->act_details[BNXT_ULP_ACT_PROP_IDX_SET_VLAN_PCP],
2490 		       &pcp, BNXT_ULP_ACT_PROP_SZ_SET_VLAN_PCP);
2491 		/* Update the hdr_bitmap with vlan vid */
2492 		ULP_BITMAP_SET(params->act_bitmap.bits,
2493 			       BNXT_ULP_ACT_BIT_SET_VLAN_PCP);
2494 		return BNXT_TF_RC_SUCCESS;
2495 	}
2496 	BNXT_TF_DBG(ERR, "Parse Error: Vlan pcp arg is invalid\n");
2497 	return BNXT_TF_RC_ERROR;
2498 }
2499 
2500 /* Function to handle the parsing of RTE Flow action set ipv4 src.*/
2501 int32_t
2502 ulp_rte_set_ipv4_src_act_handler(const struct rte_flow_action *action_item,
2503 				 struct ulp_rte_parser_params *params)
2504 {
2505 	const struct rte_flow_action_set_ipv4 *set_ipv4;
2506 	struct ulp_rte_act_prop *act = &params->act_prop;
2507 
2508 	set_ipv4 = action_item->conf;
2509 	if (set_ipv4) {
2510 		memcpy(&act->act_details[BNXT_ULP_ACT_PROP_IDX_SET_IPV4_SRC],
2511 		       &set_ipv4->ipv4_addr, BNXT_ULP_ACT_PROP_SZ_SET_IPV4_SRC);
2512 		/* Update the hdr_bitmap with set ipv4 src */
2513 		ULP_BITMAP_SET(params->act_bitmap.bits,
2514 			       BNXT_ULP_ACT_BIT_SET_IPV4_SRC);
2515 		return BNXT_TF_RC_SUCCESS;
2516 	}
2517 	BNXT_TF_DBG(ERR, "Parse Error: set ipv4 src arg is invalid\n");
2518 	return BNXT_TF_RC_ERROR;
2519 }
2520 
2521 /* Function to handle the parsing of RTE Flow action set ipv4 dst.*/
2522 int32_t
2523 ulp_rte_set_ipv4_dst_act_handler(const struct rte_flow_action *action_item,
2524 				 struct ulp_rte_parser_params *params)
2525 {
2526 	const struct rte_flow_action_set_ipv4 *set_ipv4;
2527 	struct ulp_rte_act_prop *act = &params->act_prop;
2528 
2529 	set_ipv4 = action_item->conf;
2530 	if (set_ipv4) {
2531 		memcpy(&act->act_details[BNXT_ULP_ACT_PROP_IDX_SET_IPV4_DST],
2532 		       &set_ipv4->ipv4_addr, BNXT_ULP_ACT_PROP_SZ_SET_IPV4_DST);
2533 		/* Update the hdr_bitmap with set ipv4 dst */
2534 		ULP_BITMAP_SET(params->act_bitmap.bits,
2535 			       BNXT_ULP_ACT_BIT_SET_IPV4_DST);
2536 		return BNXT_TF_RC_SUCCESS;
2537 	}
2538 	BNXT_TF_DBG(ERR, "Parse Error: set ipv4 dst arg is invalid\n");
2539 	return BNXT_TF_RC_ERROR;
2540 }
2541 
2542 /* Function to handle the parsing of RTE Flow action set tp src.*/
2543 int32_t
2544 ulp_rte_set_tp_src_act_handler(const struct rte_flow_action *action_item,
2545 			       struct ulp_rte_parser_params *params)
2546 {
2547 	const struct rte_flow_action_set_tp *set_tp;
2548 	struct ulp_rte_act_prop *act = &params->act_prop;
2549 
2550 	set_tp = action_item->conf;
2551 	if (set_tp) {
2552 		memcpy(&act->act_details[BNXT_ULP_ACT_PROP_IDX_SET_TP_SRC],
2553 		       &set_tp->port, BNXT_ULP_ACT_PROP_SZ_SET_TP_SRC);
2554 		/* Update the hdr_bitmap with set tp src */
2555 		ULP_BITMAP_SET(params->act_bitmap.bits,
2556 			       BNXT_ULP_ACT_BIT_SET_TP_SRC);
2557 		return BNXT_TF_RC_SUCCESS;
2558 	}
2559 
2560 	BNXT_TF_DBG(ERR, "Parse Error: set tp src arg is invalid\n");
2561 	return BNXT_TF_RC_ERROR;
2562 }
2563 
2564 /* Function to handle the parsing of RTE Flow action set tp dst.*/
2565 int32_t
2566 ulp_rte_set_tp_dst_act_handler(const struct rte_flow_action *action_item,
2567 			       struct ulp_rte_parser_params *params)
2568 {
2569 	const struct rte_flow_action_set_tp *set_tp;
2570 	struct ulp_rte_act_prop *act = &params->act_prop;
2571 
2572 	set_tp = action_item->conf;
2573 	if (set_tp) {
2574 		memcpy(&act->act_details[BNXT_ULP_ACT_PROP_IDX_SET_TP_DST],
2575 		       &set_tp->port, BNXT_ULP_ACT_PROP_SZ_SET_TP_DST);
2576 		/* Update the hdr_bitmap with set tp dst */
2577 		ULP_BITMAP_SET(params->act_bitmap.bits,
2578 			       BNXT_ULP_ACT_BIT_SET_TP_DST);
2579 		return BNXT_TF_RC_SUCCESS;
2580 	}
2581 
2582 	BNXT_TF_DBG(ERR, "Parse Error: set tp src arg is invalid\n");
2583 	return BNXT_TF_RC_ERROR;
2584 }
2585 
2586 /* Function to handle the parsing of RTE Flow action dec ttl.*/
2587 int32_t
2588 ulp_rte_dec_ttl_act_handler(const struct rte_flow_action *act __rte_unused,
2589 			    struct ulp_rte_parser_params *params)
2590 {
2591 	/* Update the act_bitmap with dec ttl */
2592 	ULP_BITMAP_SET(params->act_bitmap.bits, BNXT_ULP_ACT_BIT_DEC_TTL);
2593 	return BNXT_TF_RC_SUCCESS;
2594 }
2595 
2596 /* Function to handle the parsing of RTE Flow action JUMP */
2597 int32_t
2598 ulp_rte_jump_act_handler(const struct rte_flow_action *action_item __rte_unused,
2599 			 struct ulp_rte_parser_params *params)
2600 {
2601 	/* Update the act_bitmap with dec ttl */
2602 	ULP_BITMAP_SET(params->act_bitmap.bits, BNXT_ULP_ACT_BIT_JUMP);
2603 	return BNXT_TF_RC_SUCCESS;
2604 }
2605 
2606 int32_t
2607 ulp_rte_sample_act_handler(const struct rte_flow_action *action_item,
2608 			   struct ulp_rte_parser_params *params)
2609 {
2610 	const struct rte_flow_action_sample *sample;
2611 	int ret;
2612 
2613 	sample = action_item->conf;
2614 
2615 	/* if SAMPLE bit is set it means this sample action is nested within the
2616 	 * actions of another sample action; this is not allowed
2617 	 */
2618 	if (ULP_BITMAP_ISSET(params->act_bitmap.bits,
2619 			     BNXT_ULP_ACT_BIT_SAMPLE))
2620 		return BNXT_TF_RC_ERROR;
2621 
2622 	/* a sample action is only allowed as a shared action */
2623 	if (!ULP_BITMAP_ISSET(params->act_bitmap.bits,
2624 			      BNXT_ULP_ACT_BIT_SHARED))
2625 		return BNXT_TF_RC_ERROR;
2626 
2627 	/* only a ratio of 1 i.e. 100% is supported */
2628 	if (sample->ratio != 1)
2629 		return BNXT_TF_RC_ERROR;
2630 
2631 	if (!sample->actions)
2632 		return BNXT_TF_RC_ERROR;
2633 
2634 	/* parse the nested actions for a sample action */
2635 	ret = bnxt_ulp_rte_parser_act_parse(sample->actions, params);
2636 	if (ret == BNXT_TF_RC_SUCCESS)
2637 		/* Update the act_bitmap with sample */
2638 		ULP_BITMAP_SET(params->act_bitmap.bits,
2639 			       BNXT_ULP_ACT_BIT_SAMPLE);
2640 
2641 	return ret;
2642 }
2643 
2644 /* Function to handle the parsing of bnxt vendor Flow action vxlan Header. */
2645 int32_t
2646 ulp_vendor_vxlan_decap_act_handler(const struct rte_flow_action *action_item,
2647 				   struct ulp_rte_parser_params *params)
2648 {
2649 	/* Set the F1 flow header bit */
2650 	ULP_BITMAP_SET(params->hdr_bitmap.bits, BNXT_ULP_HDR_BIT_F1);
2651 	return ulp_rte_vxlan_decap_act_handler(action_item, params);
2652 }
2653 
2654 /* Function to handle the parsing of bnxt vendor Flow item vxlan Header. */
2655 int32_t
2656 ulp_rte_vendor_vxlan_decap_hdr_handler(const struct rte_flow_item *item,
2657 				       struct ulp_rte_parser_params *params)
2658 {
2659 	RTE_SET_USED(item);
2660 	/* Set the F2 flow header bit */
2661 	ULP_BITMAP_SET(params->hdr_bitmap.bits, BNXT_ULP_HDR_BIT_F2);
2662 	return ulp_rte_vxlan_decap_act_handler(NULL, params);
2663 }
2664