xref: /dpdk/drivers/net/bnxt/tf_ulp/ulp_rte_parser.c (revision 7b08003b5df2d64564f331da8cae764bcd7c8925)
1 /* SPDX-License-Identifier: BSD-3-Clause
2  * Copyright(c) 2014-2020 Broadcom
3  * All rights reserved.
4  */
5 
6 #include "bnxt.h"
7 #include "ulp_template_db_enum.h"
8 #include "ulp_template_struct.h"
9 #include "bnxt_tf_common.h"
10 #include "ulp_rte_parser.h"
11 #include "ulp_utils.h"
12 #include "tfp.h"
13 #include "ulp_port_db.h"
14 
15 /* Local defines for the parsing functions */
16 #define ULP_VLAN_PRIORITY_SHIFT		13 /* First 3 bits */
17 #define ULP_VLAN_PRIORITY_MASK		0x700
18 #define ULP_VLAN_TAG_MASK		0xFFF /* Last 12 bits*/
19 #define ULP_UDP_PORT_VXLAN		4789
20 
21 /* Utility function to skip the void items. */
22 static inline int32_t
23 ulp_rte_item_skip_void(const struct rte_flow_item **item, uint32_t increment)
24 {
25 	if (!*item)
26 		return 0;
27 	if (increment)
28 		(*item)++;
29 	while ((*item) && (*item)->type == RTE_FLOW_ITEM_TYPE_VOID)
30 		(*item)++;
31 	if (*item)
32 		return 1;
33 	return 0;
34 }
35 
36 /* Utility function to update the field_bitmap */
37 static void
38 ulp_rte_parser_field_bitmap_update(struct ulp_rte_parser_params *params,
39 				   uint32_t idx)
40 {
41 	struct ulp_rte_hdr_field *field;
42 
43 	field = &params->hdr_field[idx];
44 	if (ulp_bitmap_notzero(field->mask, field->size)) {
45 		ULP_INDEX_BITMAP_SET(params->fld_bitmap.bits, idx);
46 		/* Not exact match */
47 		if (!ulp_bitmap_is_ones(field->mask, field->size))
48 			ULP_BITMAP_SET(params->fld_bitmap.bits,
49 				       BNXT_ULP_MATCH_TYPE_BITMASK_WM);
50 	} else {
51 		ULP_INDEX_BITMAP_RESET(params->fld_bitmap.bits, idx);
52 	}
53 }
54 
55 /* Utility function to copy field spec items */
56 static struct ulp_rte_hdr_field *
57 ulp_rte_parser_fld_copy(struct ulp_rte_hdr_field *field,
58 			const void *buffer,
59 			uint32_t size)
60 {
61 	field->size = size;
62 	memcpy(field->spec, buffer, field->size);
63 	field++;
64 	return field;
65 }
66 
67 /* Utility function to copy field masks items */
68 static void
69 ulp_rte_prsr_mask_copy(struct ulp_rte_parser_params *params,
70 		       uint32_t *idx,
71 		       const void *buffer,
72 		       uint32_t size)
73 {
74 	struct ulp_rte_hdr_field *field = &params->hdr_field[*idx];
75 
76 	memcpy(field->mask, buffer, size);
77 	ulp_rte_parser_field_bitmap_update(params, *idx);
78 	*idx = *idx + 1;
79 }
80 
81 /*
82  * Function to handle the parsing of RTE Flows and placing
83  * the RTE flow items into the ulp structures.
84  */
85 int32_t
86 bnxt_ulp_rte_parser_hdr_parse(const struct rte_flow_item pattern[],
87 			      struct ulp_rte_parser_params *params)
88 {
89 	const struct rte_flow_item *item = pattern;
90 	struct bnxt_ulp_rte_hdr_info *hdr_info;
91 
92 	params->field_idx = BNXT_ULP_PROTO_HDR_SVIF_NUM;
93 
94 	/* Set the computed flags for no vlan tags before parsing */
95 	ULP_COMP_FLD_IDX_WR(params, BNXT_ULP_CF_IDX_O_NO_VTAG, 1);
96 	ULP_COMP_FLD_IDX_WR(params, BNXT_ULP_CF_IDX_I_NO_VTAG, 1);
97 
98 	/* Parse all the items in the pattern */
99 	while (item && item->type != RTE_FLOW_ITEM_TYPE_END) {
100 		/* get the header information from the flow_hdr_info table */
101 		hdr_info = &ulp_hdr_info[item->type];
102 		if (hdr_info->hdr_type == BNXT_ULP_HDR_TYPE_NOT_SUPPORTED) {
103 			BNXT_TF_DBG(ERR,
104 				    "Truflow parser does not support type %d\n",
105 				    item->type);
106 			return BNXT_TF_RC_PARSE_ERR;
107 		} else if (hdr_info->hdr_type == BNXT_ULP_HDR_TYPE_SUPPORTED) {
108 			/* call the registered callback handler */
109 			if (hdr_info->proto_hdr_func) {
110 				if (hdr_info->proto_hdr_func(item, params) !=
111 				    BNXT_TF_RC_SUCCESS) {
112 					return BNXT_TF_RC_ERROR;
113 				}
114 			}
115 		}
116 		item++;
117 	}
118 	/* update the implied SVIF */
119 	return ulp_rte_parser_implicit_match_port_process(params);
120 }
121 
122 /*
123  * Function to handle the parsing of RTE Flows and placing
124  * the RTE flow actions into the ulp structures.
125  */
126 int32_t
127 bnxt_ulp_rte_parser_act_parse(const struct rte_flow_action actions[],
128 			      struct ulp_rte_parser_params *params)
129 {
130 	const struct rte_flow_action *action_item = actions;
131 	struct bnxt_ulp_rte_act_info *hdr_info;
132 
133 	/* Parse all the items in the pattern */
134 	while (action_item && action_item->type != RTE_FLOW_ACTION_TYPE_END) {
135 		/* get the header information from the flow_hdr_info table */
136 		hdr_info = &ulp_act_info[action_item->type];
137 		if (hdr_info->act_type ==
138 		    BNXT_ULP_ACT_TYPE_NOT_SUPPORTED) {
139 			BNXT_TF_DBG(ERR,
140 				    "Truflow parser does not support act %u\n",
141 				    action_item->type);
142 			return BNXT_TF_RC_ERROR;
143 		} else if (hdr_info->act_type ==
144 		    BNXT_ULP_ACT_TYPE_SUPPORTED) {
145 			/* call the registered callback handler */
146 			if (hdr_info->proto_act_func) {
147 				if (hdr_info->proto_act_func(action_item,
148 							     params) !=
149 				    BNXT_TF_RC_SUCCESS) {
150 					return BNXT_TF_RC_ERROR;
151 				}
152 			}
153 		}
154 		action_item++;
155 	}
156 	/* update the implied port details */
157 	ulp_rte_parser_implicit_act_port_process(params);
158 	return BNXT_TF_RC_SUCCESS;
159 }
160 
161 /*
162  * Function to handle the post processing of the parsing details
163  */
164 int32_t
165 bnxt_ulp_rte_parser_post_process(struct ulp_rte_parser_params *params)
166 {
167 	enum bnxt_ulp_direction_type dir;
168 	enum bnxt_ulp_intf_type match_port_type, act_port_type;
169 	uint32_t act_port_set;
170 
171 	/* Get the computed details */
172 	dir = ULP_COMP_FLD_IDX_RD(params, BNXT_ULP_CF_IDX_DIRECTION);
173 	match_port_type = ULP_COMP_FLD_IDX_RD(params,
174 					      BNXT_ULP_CF_IDX_MATCH_PORT_TYPE);
175 	act_port_type = ULP_COMP_FLD_IDX_RD(params,
176 					    BNXT_ULP_CF_IDX_ACT_PORT_TYPE);
177 	act_port_set = ULP_COMP_FLD_IDX_RD(params,
178 					   BNXT_ULP_CF_IDX_ACT_PORT_IS_SET);
179 
180 	/* set the flow direction in the proto and action header */
181 	if (dir == BNXT_ULP_DIR_EGRESS) {
182 		ULP_BITMAP_SET(params->hdr_bitmap.bits,
183 			       BNXT_ULP_FLOW_DIR_BITMASK_EGR);
184 		ULP_BITMAP_SET(params->act_bitmap.bits,
185 			       BNXT_ULP_FLOW_DIR_BITMASK_EGR);
186 	}
187 
188 	/* calculate the VF to VF flag */
189 	if (act_port_set && act_port_type == BNXT_ULP_INTF_TYPE_VF_REP &&
190 	    match_port_type == BNXT_ULP_INTF_TYPE_VF_REP)
191 		ULP_COMP_FLD_IDX_WR(params, BNXT_ULP_CF_IDX_VF_TO_VF, 1);
192 
193 	/* Update the decrement ttl computational fields */
194 	if (ULP_BITMAP_ISSET(params->act_bitmap.bits,
195 			     BNXT_ULP_ACTION_BIT_DEC_TTL)) {
196 		/*
197 		 * Check that vxlan proto is included and vxlan decap
198 		 * action is not set then decrement tunnel ttl.
199 		 * Similarly add GRE and NVGRE in future.
200 		 */
201 		if ((ULP_BITMAP_ISSET(params->hdr_bitmap.bits,
202 				      BNXT_ULP_HDR_BIT_T_VXLAN) &&
203 		    !ULP_BITMAP_ISSET(params->act_bitmap.bits,
204 				      BNXT_ULP_ACTION_BIT_VXLAN_DECAP))) {
205 			ULP_COMP_FLD_IDX_WR(params,
206 					    BNXT_ULP_CF_IDX_ACT_T_DEC_TTL, 1);
207 		} else {
208 			ULP_COMP_FLD_IDX_WR(params,
209 					    BNXT_ULP_CF_IDX_ACT_DEC_TTL, 1);
210 		}
211 	}
212 
213 	/* Merge the hdr_fp_bit into the proto header bit */
214 	params->hdr_bitmap.bits |= params->hdr_fp_bit.bits;
215 
216 	/* TBD: Handle the flow rejection scenarios */
217 	return 0;
218 }
219 
220 /*
221  * Function to compute the flow direction based on the match port details
222  */
223 static void
224 bnxt_ulp_rte_parser_direction_compute(struct ulp_rte_parser_params *params)
225 {
226 	enum bnxt_ulp_intf_type match_port_type;
227 
228 	/* Get the match port type */
229 	match_port_type = ULP_COMP_FLD_IDX_RD(params,
230 					      BNXT_ULP_CF_IDX_MATCH_PORT_TYPE);
231 
232 	/* If ingress flow and matchport is vf rep then dir is egress*/
233 	if ((params->dir_attr & BNXT_ULP_FLOW_ATTR_INGRESS) &&
234 	    match_port_type == BNXT_ULP_INTF_TYPE_VF_REP) {
235 		ULP_COMP_FLD_IDX_WR(params, BNXT_ULP_CF_IDX_DIRECTION,
236 				    BNXT_ULP_DIR_EGRESS);
237 	} else {
238 		/* Assign the input direction */
239 		if (params->dir_attr & BNXT_ULP_FLOW_ATTR_INGRESS)
240 			ULP_COMP_FLD_IDX_WR(params, BNXT_ULP_CF_IDX_DIRECTION,
241 					    BNXT_ULP_DIR_INGRESS);
242 		else
243 			ULP_COMP_FLD_IDX_WR(params, BNXT_ULP_CF_IDX_DIRECTION,
244 					    BNXT_ULP_DIR_EGRESS);
245 	}
246 }
247 
248 /* Function to handle the parsing of RTE Flow item PF Header. */
249 static int32_t
250 ulp_rte_parser_svif_set(struct ulp_rte_parser_params *params,
251 			uint32_t ifindex,
252 			uint16_t mask)
253 {
254 	uint16_t svif;
255 	enum bnxt_ulp_direction_type dir;
256 	struct ulp_rte_hdr_field *hdr_field;
257 	enum bnxt_ulp_svif_type svif_type;
258 	enum bnxt_ulp_intf_type port_type;
259 
260 	if (ULP_COMP_FLD_IDX_RD(params, BNXT_ULP_CF_IDX_SVIF_FLAG) !=
261 	    BNXT_ULP_INVALID_SVIF_VAL) {
262 		BNXT_TF_DBG(ERR,
263 			    "SVIF already set,multiple source not support'd\n");
264 		return BNXT_TF_RC_ERROR;
265 	}
266 
267 	/* Get port type details */
268 	port_type = ulp_port_db_port_type_get(params->ulp_ctx, ifindex);
269 	if (port_type == BNXT_ULP_INTF_TYPE_INVALID) {
270 		BNXT_TF_DBG(ERR, "Invalid port type\n");
271 		return BNXT_TF_RC_ERROR;
272 	}
273 
274 	/* Update the match port type */
275 	ULP_COMP_FLD_IDX_WR(params, BNXT_ULP_CF_IDX_MATCH_PORT_TYPE, port_type);
276 
277 	/* compute the direction */
278 	bnxt_ulp_rte_parser_direction_compute(params);
279 
280 	/* Get the computed direction */
281 	dir = ULP_COMP_FLD_IDX_RD(params, BNXT_ULP_CF_IDX_DIRECTION);
282 	if (dir == BNXT_ULP_DIR_INGRESS) {
283 		svif_type = BNXT_ULP_PHY_PORT_SVIF;
284 	} else {
285 		if (port_type == BNXT_ULP_INTF_TYPE_VF_REP)
286 			svif_type = BNXT_ULP_VF_FUNC_SVIF;
287 		else
288 			svif_type = BNXT_ULP_DRV_FUNC_SVIF;
289 	}
290 	ulp_port_db_svif_get(params->ulp_ctx, ifindex, svif_type,
291 			     &svif);
292 	svif = rte_cpu_to_be_16(svif);
293 	hdr_field = &params->hdr_field[BNXT_ULP_PROTO_HDR_FIELD_SVIF_IDX];
294 	memcpy(hdr_field->spec, &svif, sizeof(svif));
295 	memcpy(hdr_field->mask, &mask, sizeof(mask));
296 	hdr_field->size = sizeof(svif);
297 	ULP_COMP_FLD_IDX_WR(params, BNXT_ULP_CF_IDX_SVIF_FLAG,
298 			    rte_be_to_cpu_16(svif));
299 	return BNXT_TF_RC_SUCCESS;
300 }
301 
302 /* Function to handle the parsing of the RTE port id */
303 int32_t
304 ulp_rte_parser_implicit_match_port_process(struct ulp_rte_parser_params *params)
305 {
306 	uint16_t port_id = 0;
307 	uint16_t svif_mask = 0xFFFF;
308 	uint32_t ifindex;
309 	int32_t rc = BNXT_TF_RC_ERROR;
310 
311 	if (ULP_COMP_FLD_IDX_RD(params, BNXT_ULP_CF_IDX_SVIF_FLAG) !=
312 	    BNXT_ULP_INVALID_SVIF_VAL)
313 		return BNXT_TF_RC_SUCCESS;
314 
315 	/* SVIF not set. So get the port id */
316 	port_id = ULP_COMP_FLD_IDX_RD(params, BNXT_ULP_CF_IDX_INCOMING_IF);
317 
318 	if (ulp_port_db_dev_port_to_ulp_index(params->ulp_ctx,
319 					      port_id,
320 					      &ifindex)) {
321 		BNXT_TF_DBG(ERR, "ParseErr:Portid is not valid\n");
322 		return rc;
323 	}
324 
325 	/* Update the SVIF details */
326 	rc = ulp_rte_parser_svif_set(params, ifindex, svif_mask);
327 	return rc;
328 }
329 
330 /* Function to handle the implicit action port id */
331 int32_t
332 ulp_rte_parser_implicit_act_port_process(struct ulp_rte_parser_params *params)
333 {
334 	struct rte_flow_action action_item = {0};
335 	struct rte_flow_action_port_id port_id = {0};
336 
337 	/* Read the action port set bit */
338 	if (ULP_COMP_FLD_IDX_RD(params, BNXT_ULP_CF_IDX_ACT_PORT_IS_SET)) {
339 		/* Already set, so just exit */
340 		return BNXT_TF_RC_SUCCESS;
341 	}
342 	port_id.id = ULP_COMP_FLD_IDX_RD(params, BNXT_ULP_CF_IDX_INCOMING_IF);
343 	action_item.conf = &port_id;
344 
345 	/* Update the action port based on incoming port */
346 	ulp_rte_port_id_act_handler(&action_item, params);
347 
348 	/* Reset the action port set bit */
349 	ULP_COMP_FLD_IDX_WR(params, BNXT_ULP_CF_IDX_ACT_PORT_IS_SET, 0);
350 	return BNXT_TF_RC_SUCCESS;
351 }
352 
353 /* Function to handle the parsing of RTE Flow item PF Header. */
354 int32_t
355 ulp_rte_pf_hdr_handler(const struct rte_flow_item *item __rte_unused,
356 		       struct ulp_rte_parser_params *params)
357 {
358 	uint16_t port_id = 0;
359 	uint16_t svif_mask = 0xFFFF;
360 	uint32_t ifindex;
361 
362 	/* Get the implicit port id */
363 	port_id = ULP_COMP_FLD_IDX_RD(params, BNXT_ULP_CF_IDX_INCOMING_IF);
364 
365 	/* perform the conversion from dpdk port to bnxt ifindex */
366 	if (ulp_port_db_dev_port_to_ulp_index(params->ulp_ctx,
367 					      port_id,
368 					      &ifindex)) {
369 		BNXT_TF_DBG(ERR, "ParseErr:Portid is not valid\n");
370 		return BNXT_TF_RC_ERROR;
371 	}
372 
373 	/* Update the SVIF details */
374 	return  ulp_rte_parser_svif_set(params, ifindex, svif_mask);
375 }
376 
377 /* Function to handle the parsing of RTE Flow item VF Header. */
378 int32_t
379 ulp_rte_vf_hdr_handler(const struct rte_flow_item *item,
380 		       struct ulp_rte_parser_params *params)
381 {
382 	const struct rte_flow_item_vf *vf_spec = item->spec;
383 	const struct rte_flow_item_vf *vf_mask = item->mask;
384 	uint16_t mask = 0;
385 	uint32_t ifindex;
386 	int32_t rc = BNXT_TF_RC_PARSE_ERR;
387 
388 	/* Get VF rte_flow_item for Port details */
389 	if (!vf_spec) {
390 		BNXT_TF_DBG(ERR, "ParseErr:VF id is not valid\n");
391 		return rc;
392 	}
393 	if (!vf_mask) {
394 		BNXT_TF_DBG(ERR, "ParseErr:VF mask is not valid\n");
395 		return rc;
396 	}
397 	mask = vf_mask->id;
398 
399 	/* perform the conversion from VF Func id to bnxt ifindex */
400 	if (ulp_port_db_dev_func_id_to_ulp_index(params->ulp_ctx,
401 						 vf_spec->id,
402 						 &ifindex)) {
403 		BNXT_TF_DBG(ERR, "ParseErr:Portid is not valid\n");
404 		return rc;
405 	}
406 	/* Update the SVIF details */
407 	return ulp_rte_parser_svif_set(params, ifindex, mask);
408 }
409 
410 /* Function to handle the parsing of RTE Flow item port id  Header. */
411 int32_t
412 ulp_rte_port_id_hdr_handler(const struct rte_flow_item *item,
413 			    struct ulp_rte_parser_params *params)
414 {
415 	const struct rte_flow_item_port_id *port_spec = item->spec;
416 	const struct rte_flow_item_port_id *port_mask = item->mask;
417 	uint16_t mask = 0;
418 	int32_t rc = BNXT_TF_RC_PARSE_ERR;
419 	uint32_t ifindex;
420 
421 	if (!port_spec) {
422 		BNXT_TF_DBG(ERR, "ParseErr:Port id is not valid\n");
423 		return rc;
424 	}
425 	if (!port_mask) {
426 		BNXT_TF_DBG(ERR, "ParseErr:Phy Port mask is not valid\n");
427 		return rc;
428 	}
429 	mask = port_mask->id;
430 
431 	/* perform the conversion from dpdk port to bnxt ifindex */
432 	if (ulp_port_db_dev_port_to_ulp_index(params->ulp_ctx,
433 					      port_spec->id,
434 					      &ifindex)) {
435 		BNXT_TF_DBG(ERR, "ParseErr:Portid is not valid\n");
436 		return rc;
437 	}
438 	/* Update the SVIF details */
439 	return ulp_rte_parser_svif_set(params, ifindex, mask);
440 }
441 
442 /* Function to handle the parsing of RTE Flow item phy port Header. */
443 int32_t
444 ulp_rte_phy_port_hdr_handler(const struct rte_flow_item *item,
445 			     struct ulp_rte_parser_params *params)
446 {
447 	const struct rte_flow_item_phy_port *port_spec = item->spec;
448 	const struct rte_flow_item_phy_port *port_mask = item->mask;
449 	uint16_t mask = 0;
450 	int32_t rc = BNXT_TF_RC_ERROR;
451 	uint16_t svif;
452 	enum bnxt_ulp_direction_type dir;
453 	struct ulp_rte_hdr_field *hdr_field;
454 
455 	/* Copy the rte_flow_item for phy port into hdr_field */
456 	if (!port_spec) {
457 		BNXT_TF_DBG(ERR, "ParseErr:Phy Port id is not valid\n");
458 		return rc;
459 	}
460 	if (!port_mask) {
461 		BNXT_TF_DBG(ERR, "ParseErr:Phy Port mask is not valid\n");
462 		return rc;
463 	}
464 	mask = port_mask->index;
465 
466 	/* Update the match port type */
467 	ULP_COMP_FLD_IDX_WR(params, BNXT_ULP_CF_IDX_MATCH_PORT_TYPE,
468 			    BNXT_ULP_INTF_TYPE_PHY_PORT);
469 
470 	/* Compute the Hw direction */
471 	bnxt_ulp_rte_parser_direction_compute(params);
472 
473 	/* Direction validation */
474 	dir = ULP_COMP_FLD_IDX_RD(params, BNXT_ULP_CF_IDX_DIRECTION);
475 	if (dir == BNXT_ULP_DIR_EGRESS) {
476 		BNXT_TF_DBG(ERR,
477 			    "Parse Err:Phy ports are valid only for ingress\n");
478 		return BNXT_TF_RC_PARSE_ERR;
479 	}
480 
481 	/* Get the physical port details from port db */
482 	rc = ulp_port_db_phy_port_svif_get(params->ulp_ctx, port_spec->index,
483 					   &svif);
484 	if (rc) {
485 		BNXT_TF_DBG(ERR, "Failed to get port details\n");
486 		return BNXT_TF_RC_PARSE_ERR;
487 	}
488 
489 	/* Update the SVIF details */
490 	svif = rte_cpu_to_be_16(svif);
491 	hdr_field = &params->hdr_field[BNXT_ULP_PROTO_HDR_FIELD_SVIF_IDX];
492 	memcpy(hdr_field->spec, &svif, sizeof(svif));
493 	memcpy(hdr_field->mask, &mask, sizeof(mask));
494 	hdr_field->size = sizeof(svif);
495 	ULP_COMP_FLD_IDX_WR(params, BNXT_ULP_CF_IDX_SVIF_FLAG,
496 			    rte_be_to_cpu_16(svif));
497 	return BNXT_TF_RC_SUCCESS;
498 }
499 
500 /* Function to handle the update of proto header based on field values */
501 static void
502 ulp_rte_l2_proto_type_update(struct ulp_rte_parser_params *param,
503 			     uint16_t type, uint32_t in_flag)
504 {
505 	if (type == tfp_cpu_to_be_16(RTE_ETHER_TYPE_IPV4)) {
506 		if (in_flag) {
507 			ULP_BITMAP_SET(param->hdr_fp_bit.bits,
508 				       BNXT_ULP_HDR_BIT_I_IPV4);
509 			ULP_COMP_FLD_IDX_WR(param, BNXT_ULP_CF_IDX_I_L3, 1);
510 		} else {
511 			ULP_BITMAP_SET(param->hdr_fp_bit.bits,
512 				       BNXT_ULP_HDR_BIT_O_IPV4);
513 			ULP_COMP_FLD_IDX_WR(param, BNXT_ULP_CF_IDX_O_L3, 1);
514 		}
515 	} else if (type == tfp_cpu_to_be_16(RTE_ETHER_TYPE_IPV6))  {
516 		if (in_flag) {
517 			ULP_BITMAP_SET(param->hdr_fp_bit.bits,
518 				       BNXT_ULP_HDR_BIT_I_IPV6);
519 			ULP_COMP_FLD_IDX_WR(param, BNXT_ULP_CF_IDX_I_L3, 1);
520 		} else {
521 			ULP_BITMAP_SET(param->hdr_fp_bit.bits,
522 				       BNXT_ULP_HDR_BIT_O_IPV6);
523 			ULP_COMP_FLD_IDX_WR(param, BNXT_ULP_CF_IDX_O_L3, 1);
524 		}
525 	}
526 }
527 
528 /* Function to handle the parsing of RTE Flow item Ethernet Header. */
529 int32_t
530 ulp_rte_eth_hdr_handler(const struct rte_flow_item *item,
531 			struct ulp_rte_parser_params *params)
532 {
533 	const struct rte_flow_item_eth *eth_spec = item->spec;
534 	const struct rte_flow_item_eth *eth_mask = item->mask;
535 	struct ulp_rte_hdr_field *field;
536 	uint32_t idx = params->field_idx;
537 	uint32_t size;
538 	uint16_t eth_type = 0;
539 	uint32_t inner_flag = 0;
540 
541 	/*
542 	 * Copy the rte_flow_item for eth into hdr_field using ethernet
543 	 * header fields
544 	 */
545 	if (eth_spec) {
546 		size = sizeof(eth_spec->dst.addr_bytes);
547 		field = ulp_rte_parser_fld_copy(&params->hdr_field[idx],
548 						eth_spec->dst.addr_bytes,
549 						size);
550 		size = sizeof(eth_spec->src.addr_bytes);
551 		field = ulp_rte_parser_fld_copy(field,
552 						eth_spec->src.addr_bytes,
553 						size);
554 		field = ulp_rte_parser_fld_copy(field,
555 						&eth_spec->type,
556 						sizeof(eth_spec->type));
557 		eth_type = eth_spec->type;
558 	}
559 	if (eth_mask) {
560 		ulp_rte_prsr_mask_copy(params, &idx, eth_mask->dst.addr_bytes,
561 				       sizeof(eth_mask->dst.addr_bytes));
562 		ulp_rte_prsr_mask_copy(params, &idx, eth_mask->src.addr_bytes,
563 				       sizeof(eth_mask->src.addr_bytes));
564 		ulp_rte_prsr_mask_copy(params, &idx, &eth_mask->type,
565 				       sizeof(eth_mask->type));
566 	}
567 	/* Add number of vlan header elements */
568 	params->field_idx += BNXT_ULP_PROTO_HDR_ETH_NUM;
569 	params->vlan_idx = params->field_idx;
570 	params->field_idx += BNXT_ULP_PROTO_HDR_VLAN_NUM;
571 
572 	/* Update the protocol hdr bitmap */
573 	if (ULP_BITMAP_ISSET(params->hdr_bitmap.bits, BNXT_ULP_HDR_BIT_O_ETH)) {
574 		ULP_BITMAP_SET(params->hdr_bitmap.bits, BNXT_ULP_HDR_BIT_I_ETH);
575 		inner_flag = 1;
576 	} else {
577 		ULP_BITMAP_SET(params->hdr_bitmap.bits, BNXT_ULP_HDR_BIT_O_ETH);
578 	}
579 	/* Update the field protocol hdr bitmap */
580 	ulp_rte_l2_proto_type_update(params, eth_type, inner_flag);
581 
582 	return BNXT_TF_RC_SUCCESS;
583 }
584 
585 /* Function to handle the parsing of RTE Flow item Vlan Header. */
586 int32_t
587 ulp_rte_vlan_hdr_handler(const struct rte_flow_item *item,
588 			 struct ulp_rte_parser_params *params)
589 {
590 	const struct rte_flow_item_vlan *vlan_spec = item->spec;
591 	const struct rte_flow_item_vlan *vlan_mask = item->mask;
592 	struct ulp_rte_hdr_field *field;
593 	struct ulp_rte_hdr_bitmap	*hdr_bit;
594 	uint32_t idx = params->vlan_idx;
595 	uint16_t vlan_tag, priority;
596 	uint32_t outer_vtag_num;
597 	uint32_t inner_vtag_num;
598 	uint16_t eth_type = 0;
599 	uint32_t inner_flag = 0;
600 
601 	/*
602 	 * Copy the rte_flow_item for vlan into hdr_field using Vlan
603 	 * header fields
604 	 */
605 	if (vlan_spec) {
606 		vlan_tag = ntohs(vlan_spec->tci);
607 		priority = htons(vlan_tag >> ULP_VLAN_PRIORITY_SHIFT);
608 		vlan_tag &= ULP_VLAN_TAG_MASK;
609 		vlan_tag = htons(vlan_tag);
610 
611 		field = ulp_rte_parser_fld_copy(&params->hdr_field[idx],
612 						&priority,
613 						sizeof(priority));
614 		field = ulp_rte_parser_fld_copy(field,
615 						&vlan_tag,
616 						sizeof(vlan_tag));
617 		field = ulp_rte_parser_fld_copy(field,
618 						&vlan_spec->inner_type,
619 						sizeof(vlan_spec->inner_type));
620 		eth_type = vlan_spec->inner_type;
621 	}
622 
623 	if (vlan_mask) {
624 		vlan_tag = ntohs(vlan_mask->tci);
625 		priority = htons(vlan_tag >> ULP_VLAN_PRIORITY_SHIFT);
626 		vlan_tag &= 0xfff;
627 
628 		/*
629 		 * the storage for priority and vlan tag is 2 bytes
630 		 * The mask of priority which is 3 bits if it is all 1's
631 		 * then make the rest bits 13 bits as 1's
632 		 * so that it is matched as exact match.
633 		 */
634 		if (priority == ULP_VLAN_PRIORITY_MASK)
635 			priority |= ~ULP_VLAN_PRIORITY_MASK;
636 		if (vlan_tag == ULP_VLAN_TAG_MASK)
637 			vlan_tag |= ~ULP_VLAN_TAG_MASK;
638 		vlan_tag = htons(vlan_tag);
639 
640 		ulp_rte_prsr_mask_copy(params, &idx, &priority,
641 				       sizeof(priority));
642 		ulp_rte_prsr_mask_copy(params, &idx, &vlan_tag,
643 				       sizeof(vlan_tag));
644 		ulp_rte_prsr_mask_copy(params, &idx, &vlan_mask->inner_type,
645 				       sizeof(vlan_mask->inner_type));
646 	}
647 	/* Set the vlan index to new incremented value */
648 	params->vlan_idx += BNXT_ULP_PROTO_HDR_S_VLAN_NUM;
649 
650 	/* Get the outer tag and inner tag counts */
651 	outer_vtag_num = ULP_COMP_FLD_IDX_RD(params,
652 					     BNXT_ULP_CF_IDX_O_VTAG_NUM);
653 	inner_vtag_num = ULP_COMP_FLD_IDX_RD(params,
654 					     BNXT_ULP_CF_IDX_I_VTAG_NUM);
655 
656 	/* Update the hdr_bitmap of the vlans */
657 	hdr_bit = &params->hdr_bitmap;
658 	if (ULP_BITMAP_ISSET(hdr_bit->bits, BNXT_ULP_HDR_BIT_O_ETH) &&
659 	    !ULP_BITMAP_ISSET(hdr_bit->bits, BNXT_ULP_HDR_BIT_I_ETH) &&
660 	    !outer_vtag_num) {
661 		/* Update the vlan tag num */
662 		outer_vtag_num++;
663 		ULP_COMP_FLD_IDX_WR(params, BNXT_ULP_CF_IDX_O_VTAG_NUM,
664 				    outer_vtag_num);
665 		ULP_COMP_FLD_IDX_WR(params, BNXT_ULP_CF_IDX_O_NO_VTAG, 0);
666 		ULP_COMP_FLD_IDX_WR(params, BNXT_ULP_CF_IDX_O_ONE_VTAG, 1);
667 		ULP_BITMAP_SET(params->hdr_bitmap.bits,
668 			       BNXT_ULP_HDR_BIT_OO_VLAN);
669 	} else if (ULP_BITMAP_ISSET(hdr_bit->bits, BNXT_ULP_HDR_BIT_O_ETH) &&
670 		   !ULP_BITMAP_ISSET(hdr_bit->bits, BNXT_ULP_HDR_BIT_I_ETH) &&
671 		   outer_vtag_num == 1) {
672 		/* update the vlan tag num */
673 		outer_vtag_num++;
674 		ULP_COMP_FLD_IDX_WR(params, BNXT_ULP_CF_IDX_O_VTAG_NUM,
675 				    outer_vtag_num);
676 		ULP_COMP_FLD_IDX_WR(params, BNXT_ULP_CF_IDX_O_TWO_VTAGS, 1);
677 		ULP_COMP_FLD_IDX_WR(params, BNXT_ULP_CF_IDX_O_ONE_VTAG, 0);
678 		ULP_BITMAP_SET(params->hdr_bitmap.bits,
679 			       BNXT_ULP_HDR_BIT_OI_VLAN);
680 	} else if (ULP_BITMAP_ISSET(hdr_bit->bits, BNXT_ULP_HDR_BIT_O_ETH) &&
681 		   ULP_BITMAP_ISSET(hdr_bit->bits, BNXT_ULP_HDR_BIT_I_ETH) &&
682 		   !inner_vtag_num) {
683 		/* update the vlan tag num */
684 		inner_vtag_num++;
685 		ULP_COMP_FLD_IDX_WR(params, BNXT_ULP_CF_IDX_I_VTAG_NUM,
686 				    inner_vtag_num);
687 		ULP_COMP_FLD_IDX_WR(params, BNXT_ULP_CF_IDX_I_NO_VTAG, 0);
688 		ULP_COMP_FLD_IDX_WR(params, BNXT_ULP_CF_IDX_I_ONE_VTAG, 1);
689 		ULP_BITMAP_SET(params->hdr_bitmap.bits,
690 			       BNXT_ULP_HDR_BIT_IO_VLAN);
691 		inner_flag = 1;
692 	} else if (ULP_BITMAP_ISSET(hdr_bit->bits, BNXT_ULP_HDR_BIT_O_ETH) &&
693 		   ULP_BITMAP_ISSET(hdr_bit->bits, BNXT_ULP_HDR_BIT_I_ETH) &&
694 		   inner_vtag_num == 1) {
695 		/* update the vlan tag num */
696 		inner_vtag_num++;
697 		ULP_COMP_FLD_IDX_WR(params, BNXT_ULP_CF_IDX_I_VTAG_NUM,
698 				    inner_vtag_num);
699 		ULP_COMP_FLD_IDX_WR(params, BNXT_ULP_CF_IDX_I_TWO_VTAGS, 1);
700 		ULP_COMP_FLD_IDX_WR(params, BNXT_ULP_CF_IDX_I_ONE_VTAG, 0);
701 		ULP_BITMAP_SET(params->hdr_bitmap.bits,
702 			       BNXT_ULP_HDR_BIT_II_VLAN);
703 		inner_flag = 1;
704 	} else {
705 		BNXT_TF_DBG(ERR, "Error Parsing:Vlan hdr found withtout eth\n");
706 		return BNXT_TF_RC_ERROR;
707 	}
708 	/* Update the field protocol hdr bitmap */
709 	ulp_rte_l2_proto_type_update(params, eth_type, inner_flag);
710 	return BNXT_TF_RC_SUCCESS;
711 }
712 
713 /* Function to handle the update of proto header based on field values */
714 static void
715 ulp_rte_l3_proto_type_update(struct ulp_rte_parser_params *param,
716 			     uint8_t proto, uint32_t in_flag)
717 {
718 	if (proto == IPPROTO_UDP) {
719 		if (in_flag) {
720 			ULP_BITMAP_SET(param->hdr_fp_bit.bits,
721 				       BNXT_ULP_HDR_BIT_I_UDP);
722 			ULP_COMP_FLD_IDX_WR(param, BNXT_ULP_CF_IDX_I_L4, 1);
723 		} else {
724 			ULP_BITMAP_SET(param->hdr_fp_bit.bits,
725 				       BNXT_ULP_HDR_BIT_O_UDP);
726 			ULP_COMP_FLD_IDX_WR(param, BNXT_ULP_CF_IDX_O_L4, 1);
727 		}
728 	} else if (proto == IPPROTO_TCP) {
729 		if (in_flag) {
730 			ULP_BITMAP_SET(param->hdr_fp_bit.bits,
731 				       BNXT_ULP_HDR_BIT_I_TCP);
732 			ULP_COMP_FLD_IDX_WR(param, BNXT_ULP_CF_IDX_I_L4, 1);
733 		} else {
734 			ULP_BITMAP_SET(param->hdr_fp_bit.bits,
735 				       BNXT_ULP_HDR_BIT_O_TCP);
736 			ULP_COMP_FLD_IDX_WR(param, BNXT_ULP_CF_IDX_O_L4, 1);
737 		}
738 	}
739 }
740 
741 /* Function to handle the parsing of RTE Flow item IPV4 Header. */
742 int32_t
743 ulp_rte_ipv4_hdr_handler(const struct rte_flow_item *item,
744 			 struct ulp_rte_parser_params *params)
745 {
746 	const struct rte_flow_item_ipv4 *ipv4_spec = item->spec;
747 	const struct rte_flow_item_ipv4 *ipv4_mask = item->mask;
748 	struct ulp_rte_hdr_field *field;
749 	struct ulp_rte_hdr_bitmap *hdr_bitmap = &params->hdr_bitmap;
750 	uint32_t idx = params->field_idx;
751 	uint32_t size;
752 	uint8_t proto = 0;
753 	uint32_t inner_flag = 0;
754 	uint32_t cnt;
755 
756 	/* validate there are no 3rd L3 header */
757 	cnt = ULP_COMP_FLD_IDX_RD(params, BNXT_ULP_CF_IDX_L3_HDR_CNT);
758 	if (cnt == 2) {
759 		BNXT_TF_DBG(ERR, "Parse Err:Third L3 header not supported\n");
760 		return BNXT_TF_RC_ERROR;
761 	}
762 
763 	/*
764 	 * Copy the rte_flow_item for ipv4 into hdr_field using ipv4
765 	 * header fields
766 	 */
767 	if (ipv4_spec) {
768 		size = sizeof(ipv4_spec->hdr.version_ihl);
769 		field = ulp_rte_parser_fld_copy(&params->hdr_field[idx],
770 						&ipv4_spec->hdr.version_ihl,
771 						size);
772 		size = sizeof(ipv4_spec->hdr.type_of_service);
773 		field = ulp_rte_parser_fld_copy(field,
774 						&ipv4_spec->hdr.type_of_service,
775 						size);
776 		size = sizeof(ipv4_spec->hdr.total_length);
777 		field = ulp_rte_parser_fld_copy(field,
778 						&ipv4_spec->hdr.total_length,
779 						size);
780 		size = sizeof(ipv4_spec->hdr.packet_id);
781 		field = ulp_rte_parser_fld_copy(field,
782 						&ipv4_spec->hdr.packet_id,
783 						size);
784 		size = sizeof(ipv4_spec->hdr.fragment_offset);
785 		field = ulp_rte_parser_fld_copy(field,
786 						&ipv4_spec->hdr.fragment_offset,
787 						size);
788 		size = sizeof(ipv4_spec->hdr.time_to_live);
789 		field = ulp_rte_parser_fld_copy(field,
790 						&ipv4_spec->hdr.time_to_live,
791 						size);
792 		size = sizeof(ipv4_spec->hdr.next_proto_id);
793 		field = ulp_rte_parser_fld_copy(field,
794 						&ipv4_spec->hdr.next_proto_id,
795 						size);
796 		proto = ipv4_spec->hdr.next_proto_id;
797 		size = sizeof(ipv4_spec->hdr.hdr_checksum);
798 		field = ulp_rte_parser_fld_copy(field,
799 						&ipv4_spec->hdr.hdr_checksum,
800 						size);
801 		size = sizeof(ipv4_spec->hdr.src_addr);
802 		field = ulp_rte_parser_fld_copy(field,
803 						&ipv4_spec->hdr.src_addr,
804 						size);
805 		size = sizeof(ipv4_spec->hdr.dst_addr);
806 		field = ulp_rte_parser_fld_copy(field,
807 						&ipv4_spec->hdr.dst_addr,
808 						size);
809 	}
810 	if (ipv4_mask) {
811 		ulp_rte_prsr_mask_copy(params, &idx,
812 				       &ipv4_mask->hdr.version_ihl,
813 				       sizeof(ipv4_mask->hdr.version_ihl));
814 #ifdef ULP_DONT_IGNORE_TOS
815 		ulp_rte_prsr_mask_copy(params, &idx,
816 				       &ipv4_mask->hdr.type_of_service,
817 				       sizeof(ipv4_mask->hdr.type_of_service));
818 #else
819 		/*
820 		 * The tos field is ignored since OVS is setting it as wild card
821 		 * match and it is not supported. This is a work around and
822 		 * shall be addressed in the future.
823 		 */
824 		idx += 1;
825 #endif
826 
827 		ulp_rte_prsr_mask_copy(params, &idx,
828 				       &ipv4_mask->hdr.total_length,
829 				       sizeof(ipv4_mask->hdr.total_length));
830 		ulp_rte_prsr_mask_copy(params, &idx,
831 				       &ipv4_mask->hdr.packet_id,
832 				       sizeof(ipv4_mask->hdr.packet_id));
833 		ulp_rte_prsr_mask_copy(params, &idx,
834 				       &ipv4_mask->hdr.fragment_offset,
835 				       sizeof(ipv4_mask->hdr.fragment_offset));
836 		ulp_rte_prsr_mask_copy(params, &idx,
837 				       &ipv4_mask->hdr.time_to_live,
838 				       sizeof(ipv4_mask->hdr.time_to_live));
839 		ulp_rte_prsr_mask_copy(params, &idx,
840 				       &ipv4_mask->hdr.next_proto_id,
841 				       sizeof(ipv4_mask->hdr.next_proto_id));
842 		ulp_rte_prsr_mask_copy(params, &idx,
843 				       &ipv4_mask->hdr.hdr_checksum,
844 				       sizeof(ipv4_mask->hdr.hdr_checksum));
845 		ulp_rte_prsr_mask_copy(params, &idx,
846 				       &ipv4_mask->hdr.src_addr,
847 				       sizeof(ipv4_mask->hdr.src_addr));
848 		ulp_rte_prsr_mask_copy(params, &idx,
849 				       &ipv4_mask->hdr.dst_addr,
850 				       sizeof(ipv4_mask->hdr.dst_addr));
851 	}
852 	/* Add the number of ipv4 header elements */
853 	params->field_idx += BNXT_ULP_PROTO_HDR_IPV4_NUM;
854 
855 	/* Set the ipv4 header bitmap and computed l3 header bitmaps */
856 	if (ULP_BITMAP_ISSET(hdr_bitmap->bits, BNXT_ULP_HDR_BIT_O_IPV4) ||
857 	    ULP_BITMAP_ISSET(hdr_bitmap->bits, BNXT_ULP_HDR_BIT_O_IPV6)) {
858 		ULP_BITMAP_SET(hdr_bitmap->bits, BNXT_ULP_HDR_BIT_I_IPV4);
859 		ULP_COMP_FLD_IDX_WR(params, BNXT_ULP_CF_IDX_I_L3, 1);
860 		inner_flag = 1;
861 	} else {
862 		ULP_BITMAP_SET(hdr_bitmap->bits, BNXT_ULP_HDR_BIT_O_IPV4);
863 		ULP_COMP_FLD_IDX_WR(params, BNXT_ULP_CF_IDX_O_L3, 1);
864 	}
865 
866 	/* Update the field protocol hdr bitmap */
867 	ulp_rte_l3_proto_type_update(params, proto, inner_flag);
868 	ULP_COMP_FLD_IDX_WR(params, BNXT_ULP_CF_IDX_L3_HDR_CNT, ++cnt);
869 	return BNXT_TF_RC_SUCCESS;
870 }
871 
872 /* Function to handle the parsing of RTE Flow item IPV6 Header */
873 int32_t
874 ulp_rte_ipv6_hdr_handler(const struct rte_flow_item *item,
875 			 struct ulp_rte_parser_params *params)
876 {
877 	const struct rte_flow_item_ipv6	*ipv6_spec = item->spec;
878 	const struct rte_flow_item_ipv6	*ipv6_mask = item->mask;
879 	struct ulp_rte_hdr_field *field;
880 	struct ulp_rte_hdr_bitmap *hdr_bitmap = &params->hdr_bitmap;
881 	uint32_t idx = params->field_idx;
882 	uint32_t size;
883 	uint32_t vtcf, vtcf_mask;
884 	uint8_t proto = 0;
885 	uint32_t inner_flag = 0;
886 	uint32_t cnt;
887 
888 	/* validate there are no 3rd L3 header */
889 	cnt = ULP_COMP_FLD_IDX_RD(params, BNXT_ULP_CF_IDX_L3_HDR_CNT);
890 	if (cnt == 2) {
891 		BNXT_TF_DBG(ERR, "Parse Err:Third L3 header not supported\n");
892 		return BNXT_TF_RC_ERROR;
893 	}
894 
895 	/*
896 	 * Copy the rte_flow_item for ipv6 into hdr_field using ipv6
897 	 * header fields
898 	 */
899 	if (ipv6_spec) {
900 		size = sizeof(ipv6_spec->hdr.vtc_flow);
901 
902 		vtcf = BNXT_ULP_GET_IPV6_VER(ipv6_spec->hdr.vtc_flow);
903 		field = ulp_rte_parser_fld_copy(&params->hdr_field[idx],
904 						&vtcf,
905 						size);
906 
907 		vtcf = BNXT_ULP_GET_IPV6_TC(ipv6_spec->hdr.vtc_flow);
908 		field = ulp_rte_parser_fld_copy(field,
909 						&vtcf,
910 						size);
911 
912 		vtcf = BNXT_ULP_GET_IPV6_FLOWLABEL(ipv6_spec->hdr.vtc_flow);
913 		field = ulp_rte_parser_fld_copy(field,
914 						&vtcf,
915 						size);
916 
917 		size = sizeof(ipv6_spec->hdr.payload_len);
918 		field = ulp_rte_parser_fld_copy(field,
919 						&ipv6_spec->hdr.payload_len,
920 						size);
921 		size = sizeof(ipv6_spec->hdr.proto);
922 		field = ulp_rte_parser_fld_copy(field,
923 						&ipv6_spec->hdr.proto,
924 						size);
925 		proto = ipv6_spec->hdr.proto;
926 		size = sizeof(ipv6_spec->hdr.hop_limits);
927 		field = ulp_rte_parser_fld_copy(field,
928 						&ipv6_spec->hdr.hop_limits,
929 						size);
930 		size = sizeof(ipv6_spec->hdr.src_addr);
931 		field = ulp_rte_parser_fld_copy(field,
932 						&ipv6_spec->hdr.src_addr,
933 						size);
934 		size = sizeof(ipv6_spec->hdr.dst_addr);
935 		field = ulp_rte_parser_fld_copy(field,
936 						&ipv6_spec->hdr.dst_addr,
937 						size);
938 	}
939 	if (ipv6_mask) {
940 		size = sizeof(ipv6_mask->hdr.vtc_flow);
941 
942 		vtcf_mask = BNXT_ULP_GET_IPV6_VER(ipv6_mask->hdr.vtc_flow);
943 		ulp_rte_prsr_mask_copy(params, &idx,
944 				       &vtcf_mask,
945 				       size);
946 
947 		vtcf_mask = BNXT_ULP_GET_IPV6_TC(ipv6_mask->hdr.vtc_flow);
948 		ulp_rte_prsr_mask_copy(params, &idx,
949 				       &vtcf_mask,
950 				       size);
951 
952 		vtcf_mask =
953 			BNXT_ULP_GET_IPV6_FLOWLABEL(ipv6_mask->hdr.vtc_flow);
954 		ulp_rte_prsr_mask_copy(params, &idx,
955 				       &vtcf_mask,
956 				       size);
957 
958 		ulp_rte_prsr_mask_copy(params, &idx,
959 				       &ipv6_mask->hdr.payload_len,
960 				       sizeof(ipv6_mask->hdr.payload_len));
961 		ulp_rte_prsr_mask_copy(params, &idx,
962 				       &ipv6_mask->hdr.proto,
963 				       sizeof(ipv6_mask->hdr.proto));
964 		ulp_rte_prsr_mask_copy(params, &idx,
965 				       &ipv6_mask->hdr.hop_limits,
966 				       sizeof(ipv6_mask->hdr.hop_limits));
967 		ulp_rte_prsr_mask_copy(params, &idx,
968 				       &ipv6_mask->hdr.src_addr,
969 				       sizeof(ipv6_mask->hdr.src_addr));
970 		ulp_rte_prsr_mask_copy(params, &idx,
971 				       &ipv6_mask->hdr.dst_addr,
972 				       sizeof(ipv6_mask->hdr.dst_addr));
973 	}
974 	/* add number of ipv6 header elements */
975 	params->field_idx += BNXT_ULP_PROTO_HDR_IPV6_NUM;
976 
977 	/* Set the ipv6 header bitmap and computed l3 header bitmaps */
978 	if (ULP_BITMAP_ISSET(hdr_bitmap->bits, BNXT_ULP_HDR_BIT_O_IPV4) ||
979 	    ULP_BITMAP_ISSET(hdr_bitmap->bits, BNXT_ULP_HDR_BIT_O_IPV6)) {
980 		ULP_BITMAP_SET(hdr_bitmap->bits, BNXT_ULP_HDR_BIT_I_IPV6);
981 		ULP_COMP_FLD_IDX_WR(params, BNXT_ULP_CF_IDX_I_L3, 1);
982 		inner_flag = 1;
983 	} else {
984 		ULP_BITMAP_SET(hdr_bitmap->bits, BNXT_ULP_HDR_BIT_O_IPV6);
985 		ULP_COMP_FLD_IDX_WR(params, BNXT_ULP_CF_IDX_O_L3, 1);
986 	}
987 
988 	/* Update the field protocol hdr bitmap */
989 	ulp_rte_l3_proto_type_update(params, proto, inner_flag);
990 	ULP_COMP_FLD_IDX_WR(params, BNXT_ULP_CF_IDX_L3_HDR_CNT, ++cnt);
991 
992 	return BNXT_TF_RC_SUCCESS;
993 }
994 
995 /* Function to handle the update of proto header based on field values */
996 static void
997 ulp_rte_l4_proto_type_update(struct ulp_rte_parser_params *param,
998 			     uint16_t dst_port)
999 {
1000 	if (dst_port == tfp_cpu_to_be_16(ULP_UDP_PORT_VXLAN))
1001 		ULP_BITMAP_SET(param->hdr_fp_bit.bits,
1002 			       BNXT_ULP_HDR_BIT_T_VXLAN);
1003 }
1004 
1005 /* Function to handle the parsing of RTE Flow item UDP Header. */
1006 int32_t
1007 ulp_rte_udp_hdr_handler(const struct rte_flow_item *item,
1008 			struct ulp_rte_parser_params *params)
1009 {
1010 	const struct rte_flow_item_udp *udp_spec = item->spec;
1011 	const struct rte_flow_item_udp *udp_mask = item->mask;
1012 	struct ulp_rte_hdr_field *field;
1013 	struct ulp_rte_hdr_bitmap *hdr_bitmap = &params->hdr_bitmap;
1014 	uint32_t idx = params->field_idx;
1015 	uint32_t size;
1016 	uint16_t dst_port = 0;
1017 	uint32_t cnt;
1018 
1019 	cnt = ULP_COMP_FLD_IDX_RD(params, BNXT_ULP_CF_IDX_L4_HDR_CNT);
1020 	if (cnt == 2) {
1021 		BNXT_TF_DBG(ERR, "Parse Err:Third L4 header not supported\n");
1022 		return BNXT_TF_RC_ERROR;
1023 	}
1024 
1025 	/*
1026 	 * Copy the rte_flow_item for ipv4 into hdr_field using ipv4
1027 	 * header fields
1028 	 */
1029 	if (udp_spec) {
1030 		size = sizeof(udp_spec->hdr.src_port);
1031 		field = ulp_rte_parser_fld_copy(&params->hdr_field[idx],
1032 						&udp_spec->hdr.src_port,
1033 						size);
1034 		size = sizeof(udp_spec->hdr.dst_port);
1035 		field = ulp_rte_parser_fld_copy(field,
1036 						&udp_spec->hdr.dst_port,
1037 						size);
1038 		dst_port = udp_spec->hdr.dst_port;
1039 		size = sizeof(udp_spec->hdr.dgram_len);
1040 		field = ulp_rte_parser_fld_copy(field,
1041 						&udp_spec->hdr.dgram_len,
1042 						size);
1043 		size = sizeof(udp_spec->hdr.dgram_cksum);
1044 		field = ulp_rte_parser_fld_copy(field,
1045 						&udp_spec->hdr.dgram_cksum,
1046 						size);
1047 	}
1048 	if (udp_mask) {
1049 		ulp_rte_prsr_mask_copy(params, &idx,
1050 				       &udp_mask->hdr.src_port,
1051 				       sizeof(udp_mask->hdr.src_port));
1052 		ulp_rte_prsr_mask_copy(params, &idx,
1053 				       &udp_mask->hdr.dst_port,
1054 				       sizeof(udp_mask->hdr.dst_port));
1055 		ulp_rte_prsr_mask_copy(params, &idx,
1056 				       &udp_mask->hdr.dgram_len,
1057 				       sizeof(udp_mask->hdr.dgram_len));
1058 		ulp_rte_prsr_mask_copy(params, &idx,
1059 				       &udp_mask->hdr.dgram_cksum,
1060 				       sizeof(udp_mask->hdr.dgram_cksum));
1061 	}
1062 
1063 	/* Add number of UDP header elements */
1064 	params->field_idx += BNXT_ULP_PROTO_HDR_UDP_NUM;
1065 
1066 	/* Set the udp header bitmap and computed l4 header bitmaps */
1067 	if (ULP_BITMAP_ISSET(hdr_bitmap->bits, BNXT_ULP_HDR_BIT_O_UDP) ||
1068 	    ULP_BITMAP_ISSET(hdr_bitmap->bits, BNXT_ULP_HDR_BIT_O_TCP)) {
1069 		ULP_BITMAP_SET(hdr_bitmap->bits, BNXT_ULP_HDR_BIT_I_UDP);
1070 		ULP_COMP_FLD_IDX_WR(params, BNXT_ULP_CF_IDX_I_L4, 1);
1071 	} else {
1072 		ULP_BITMAP_SET(hdr_bitmap->bits, BNXT_ULP_HDR_BIT_O_UDP);
1073 		ULP_COMP_FLD_IDX_WR(params, BNXT_ULP_CF_IDX_O_L4, 1);
1074 		/* Update the field protocol hdr bitmap */
1075 		ulp_rte_l4_proto_type_update(params, dst_port);
1076 	}
1077 	ULP_COMP_FLD_IDX_WR(params, BNXT_ULP_CF_IDX_L4_HDR_CNT, ++cnt);
1078 	return BNXT_TF_RC_SUCCESS;
1079 }
1080 
1081 /* Function to handle the parsing of RTE Flow item TCP Header. */
1082 int32_t
1083 ulp_rte_tcp_hdr_handler(const struct rte_flow_item *item,
1084 			struct ulp_rte_parser_params *params)
1085 {
1086 	const struct rte_flow_item_tcp *tcp_spec = item->spec;
1087 	const struct rte_flow_item_tcp *tcp_mask = item->mask;
1088 	struct ulp_rte_hdr_field *field;
1089 	struct ulp_rte_hdr_bitmap *hdr_bitmap = &params->hdr_bitmap;
1090 	uint32_t idx = params->field_idx;
1091 	uint32_t size;
1092 	uint32_t cnt;
1093 
1094 	cnt = ULP_COMP_FLD_IDX_RD(params, BNXT_ULP_CF_IDX_L4_HDR_CNT);
1095 	if (cnt == 2) {
1096 		BNXT_TF_DBG(ERR, "Parse Err:Third L4 header not supported\n");
1097 		return BNXT_TF_RC_ERROR;
1098 	}
1099 
1100 	/*
1101 	 * Copy the rte_flow_item for ipv4 into hdr_field using ipv4
1102 	 * header fields
1103 	 */
1104 	if (tcp_spec) {
1105 		size = sizeof(tcp_spec->hdr.src_port);
1106 		field = ulp_rte_parser_fld_copy(&params->hdr_field[idx],
1107 						&tcp_spec->hdr.src_port,
1108 						size);
1109 		size = sizeof(tcp_spec->hdr.dst_port);
1110 		field = ulp_rte_parser_fld_copy(field,
1111 						&tcp_spec->hdr.dst_port,
1112 						size);
1113 		size = sizeof(tcp_spec->hdr.sent_seq);
1114 		field = ulp_rte_parser_fld_copy(field,
1115 						&tcp_spec->hdr.sent_seq,
1116 						size);
1117 		size = sizeof(tcp_spec->hdr.recv_ack);
1118 		field = ulp_rte_parser_fld_copy(field,
1119 						&tcp_spec->hdr.recv_ack,
1120 						size);
1121 		size = sizeof(tcp_spec->hdr.data_off);
1122 		field = ulp_rte_parser_fld_copy(field,
1123 						&tcp_spec->hdr.data_off,
1124 						size);
1125 		size = sizeof(tcp_spec->hdr.tcp_flags);
1126 		field = ulp_rte_parser_fld_copy(field,
1127 						&tcp_spec->hdr.tcp_flags,
1128 						size);
1129 		size = sizeof(tcp_spec->hdr.rx_win);
1130 		field = ulp_rte_parser_fld_copy(field,
1131 						&tcp_spec->hdr.rx_win,
1132 						size);
1133 		size = sizeof(tcp_spec->hdr.cksum);
1134 		field = ulp_rte_parser_fld_copy(field,
1135 						&tcp_spec->hdr.cksum,
1136 						size);
1137 		size = sizeof(tcp_spec->hdr.tcp_urp);
1138 		field = ulp_rte_parser_fld_copy(field,
1139 						&tcp_spec->hdr.tcp_urp,
1140 						size);
1141 	} else {
1142 		idx += BNXT_ULP_PROTO_HDR_TCP_NUM;
1143 	}
1144 
1145 	if (tcp_mask) {
1146 		ulp_rte_prsr_mask_copy(params, &idx,
1147 				       &tcp_mask->hdr.src_port,
1148 				       sizeof(tcp_mask->hdr.src_port));
1149 		ulp_rte_prsr_mask_copy(params, &idx,
1150 				       &tcp_mask->hdr.dst_port,
1151 				       sizeof(tcp_mask->hdr.dst_port));
1152 		ulp_rte_prsr_mask_copy(params, &idx,
1153 				       &tcp_mask->hdr.sent_seq,
1154 				       sizeof(tcp_mask->hdr.sent_seq));
1155 		ulp_rte_prsr_mask_copy(params, &idx,
1156 				       &tcp_mask->hdr.recv_ack,
1157 				       sizeof(tcp_mask->hdr.recv_ack));
1158 		ulp_rte_prsr_mask_copy(params, &idx,
1159 				       &tcp_mask->hdr.data_off,
1160 				       sizeof(tcp_mask->hdr.data_off));
1161 		ulp_rte_prsr_mask_copy(params, &idx,
1162 				       &tcp_mask->hdr.tcp_flags,
1163 				       sizeof(tcp_mask->hdr.tcp_flags));
1164 		ulp_rte_prsr_mask_copy(params, &idx,
1165 				       &tcp_mask->hdr.rx_win,
1166 				       sizeof(tcp_mask->hdr.rx_win));
1167 		ulp_rte_prsr_mask_copy(params, &idx,
1168 				       &tcp_mask->hdr.cksum,
1169 				       sizeof(tcp_mask->hdr.cksum));
1170 		ulp_rte_prsr_mask_copy(params, &idx,
1171 				       &tcp_mask->hdr.tcp_urp,
1172 				       sizeof(tcp_mask->hdr.tcp_urp));
1173 	}
1174 	/* add number of TCP header elements */
1175 	params->field_idx += BNXT_ULP_PROTO_HDR_TCP_NUM;
1176 
1177 	/* Set the udp header bitmap and computed l4 header bitmaps */
1178 	if (ULP_BITMAP_ISSET(hdr_bitmap->bits, BNXT_ULP_HDR_BIT_O_UDP) ||
1179 	    ULP_BITMAP_ISSET(hdr_bitmap->bits, BNXT_ULP_HDR_BIT_O_TCP)) {
1180 		ULP_BITMAP_SET(hdr_bitmap->bits, BNXT_ULP_HDR_BIT_I_TCP);
1181 		ULP_COMP_FLD_IDX_WR(params, BNXT_ULP_CF_IDX_I_L4, 1);
1182 	} else {
1183 		ULP_BITMAP_SET(hdr_bitmap->bits, BNXT_ULP_HDR_BIT_O_TCP);
1184 		ULP_COMP_FLD_IDX_WR(params, BNXT_ULP_CF_IDX_O_L4, 1);
1185 	}
1186 	ULP_COMP_FLD_IDX_WR(params, BNXT_ULP_CF_IDX_L4_HDR_CNT, ++cnt);
1187 	return BNXT_TF_RC_SUCCESS;
1188 }
1189 
1190 /* Function to handle the parsing of RTE Flow item Vxlan Header. */
1191 int32_t
1192 ulp_rte_vxlan_hdr_handler(const struct rte_flow_item *item,
1193 			  struct ulp_rte_parser_params *params)
1194 {
1195 	const struct rte_flow_item_vxlan *vxlan_spec = item->spec;
1196 	const struct rte_flow_item_vxlan *vxlan_mask = item->mask;
1197 	struct ulp_rte_hdr_field *field;
1198 	struct ulp_rte_hdr_bitmap *hdr_bitmap = &params->hdr_bitmap;
1199 	uint32_t idx = params->field_idx;
1200 	uint32_t size;
1201 
1202 	/*
1203 	 * Copy the rte_flow_item for vxlan into hdr_field using vxlan
1204 	 * header fields
1205 	 */
1206 	if (vxlan_spec) {
1207 		size = sizeof(vxlan_spec->flags);
1208 		field = ulp_rte_parser_fld_copy(&params->hdr_field[idx],
1209 						&vxlan_spec->flags,
1210 						size);
1211 		size = sizeof(vxlan_spec->rsvd0);
1212 		field = ulp_rte_parser_fld_copy(field,
1213 						&vxlan_spec->rsvd0,
1214 						size);
1215 		size = sizeof(vxlan_spec->vni);
1216 		field = ulp_rte_parser_fld_copy(field,
1217 						&vxlan_spec->vni,
1218 						size);
1219 		size = sizeof(vxlan_spec->rsvd1);
1220 		field = ulp_rte_parser_fld_copy(field,
1221 						&vxlan_spec->rsvd1,
1222 						size);
1223 	}
1224 	if (vxlan_mask) {
1225 		ulp_rte_prsr_mask_copy(params, &idx,
1226 				       &vxlan_mask->flags,
1227 				       sizeof(vxlan_mask->flags));
1228 		ulp_rte_prsr_mask_copy(params, &idx,
1229 				       &vxlan_mask->rsvd0,
1230 				       sizeof(vxlan_mask->rsvd0));
1231 		ulp_rte_prsr_mask_copy(params, &idx,
1232 				       &vxlan_mask->vni,
1233 				       sizeof(vxlan_mask->vni));
1234 		ulp_rte_prsr_mask_copy(params, &idx,
1235 				       &vxlan_mask->rsvd1,
1236 				       sizeof(vxlan_mask->rsvd1));
1237 	}
1238 	/* Add number of vxlan header elements */
1239 	params->field_idx += BNXT_ULP_PROTO_HDR_VXLAN_NUM;
1240 
1241 	/* Update the hdr_bitmap with vxlan */
1242 	ULP_BITMAP_SET(hdr_bitmap->bits, BNXT_ULP_HDR_BIT_T_VXLAN);
1243 	return BNXT_TF_RC_SUCCESS;
1244 }
1245 
1246 /* Function to handle the parsing of RTE Flow item void Header */
1247 int32_t
1248 ulp_rte_void_hdr_handler(const struct rte_flow_item *item __rte_unused,
1249 			 struct ulp_rte_parser_params *params __rte_unused)
1250 {
1251 	return BNXT_TF_RC_SUCCESS;
1252 }
1253 
1254 /* Function to handle the parsing of RTE Flow action void Header. */
1255 int32_t
1256 ulp_rte_void_act_handler(const struct rte_flow_action *action_item __rte_unused,
1257 			 struct ulp_rte_parser_params *params __rte_unused)
1258 {
1259 	return BNXT_TF_RC_SUCCESS;
1260 }
1261 
1262 /* Function to handle the parsing of RTE Flow action Mark Header. */
1263 int32_t
1264 ulp_rte_mark_act_handler(const struct rte_flow_action *action_item,
1265 			 struct ulp_rte_parser_params *param)
1266 {
1267 	const struct rte_flow_action_mark *mark;
1268 	struct ulp_rte_act_bitmap *act = &param->act_bitmap;
1269 	uint32_t mark_id;
1270 
1271 	mark = action_item->conf;
1272 	if (mark) {
1273 		mark_id = tfp_cpu_to_be_32(mark->id);
1274 		memcpy(&param->act_prop.act_details[BNXT_ULP_ACT_PROP_IDX_MARK],
1275 		       &mark_id, BNXT_ULP_ACT_PROP_SZ_MARK);
1276 
1277 		/* Update the hdr_bitmap with vxlan */
1278 		ULP_BITMAP_SET(act->bits, BNXT_ULP_ACTION_BIT_MARK);
1279 		return BNXT_TF_RC_SUCCESS;
1280 	}
1281 	BNXT_TF_DBG(ERR, "Parse Error: Mark arg is invalid\n");
1282 	return BNXT_TF_RC_ERROR;
1283 }
1284 
1285 /* Function to handle the parsing of RTE Flow action RSS Header. */
1286 int32_t
1287 ulp_rte_rss_act_handler(const struct rte_flow_action *action_item,
1288 			struct ulp_rte_parser_params *param)
1289 {
1290 	const struct rte_flow_action_rss *rss = action_item->conf;
1291 
1292 	if (rss) {
1293 		/* Update the hdr_bitmap with vxlan */
1294 		ULP_BITMAP_SET(param->act_bitmap.bits, BNXT_ULP_ACTION_BIT_RSS);
1295 		return BNXT_TF_RC_SUCCESS;
1296 	}
1297 	BNXT_TF_DBG(ERR, "Parse Error: RSS arg is invalid\n");
1298 	return BNXT_TF_RC_ERROR;
1299 }
1300 
1301 /* Function to handle the parsing of RTE Flow action vxlan_encap Header. */
1302 int32_t
1303 ulp_rte_vxlan_encap_act_handler(const struct rte_flow_action *action_item,
1304 				struct ulp_rte_parser_params *params)
1305 {
1306 	const struct rte_flow_action_vxlan_encap *vxlan_encap;
1307 	const struct rte_flow_item *item;
1308 	const struct rte_flow_item_eth *eth_spec;
1309 	const struct rte_flow_item_ipv4 *ipv4_spec;
1310 	const struct rte_flow_item_ipv6 *ipv6_spec;
1311 	struct rte_flow_item_vxlan vxlan_spec;
1312 	uint32_t vlan_num = 0, vlan_size = 0;
1313 	uint32_t ip_size = 0, ip_type = 0;
1314 	uint32_t vxlan_size = 0;
1315 	uint8_t *buff;
1316 	/* IP header per byte - ver/hlen, TOS, ID, ID, FRAG, FRAG, TTL, PROTO */
1317 	const uint8_t def_ipv4_hdr[] = {0x45, 0x00, 0x00, 0x01, 0x00,
1318 				    0x00, 0x40, 0x11};
1319 	struct ulp_rte_act_bitmap *act = &params->act_bitmap;
1320 	struct ulp_rte_act_prop *ap = &params->act_prop;
1321 
1322 	vxlan_encap = action_item->conf;
1323 	if (!vxlan_encap) {
1324 		BNXT_TF_DBG(ERR, "Parse Error: Vxlan_encap arg is invalid\n");
1325 		return BNXT_TF_RC_ERROR;
1326 	}
1327 
1328 	item = vxlan_encap->definition;
1329 	if (!item) {
1330 		BNXT_TF_DBG(ERR, "Parse Error: definition arg is invalid\n");
1331 		return BNXT_TF_RC_ERROR;
1332 	}
1333 
1334 	if (!ulp_rte_item_skip_void(&item, 0))
1335 		return BNXT_TF_RC_ERROR;
1336 
1337 	/* must have ethernet header */
1338 	if (item->type != RTE_FLOW_ITEM_TYPE_ETH) {
1339 		BNXT_TF_DBG(ERR, "Parse Error:vxlan encap does not have eth\n");
1340 		return BNXT_TF_RC_ERROR;
1341 	}
1342 	eth_spec = item->spec;
1343 	buff = &ap->act_details[BNXT_ULP_ACT_PROP_IDX_ENCAP_L2_DMAC];
1344 	ulp_encap_buffer_copy(buff,
1345 			      eth_spec->dst.addr_bytes,
1346 			      BNXT_ULP_ACT_PROP_SZ_ENCAP_L2_DMAC);
1347 
1348 	buff = &ap->act_details[BNXT_ULP_ACT_PROP_IDX_ENCAP_L2_SMAC];
1349 	ulp_encap_buffer_copy(buff,
1350 			      eth_spec->src.addr_bytes,
1351 			      BNXT_ULP_ACT_PROP_SZ_ENCAP_L2_SMAC);
1352 
1353 	/* Goto the next item */
1354 	if (!ulp_rte_item_skip_void(&item, 1))
1355 		return BNXT_TF_RC_ERROR;
1356 
1357 	/* May have vlan header */
1358 	if (item->type == RTE_FLOW_ITEM_TYPE_VLAN) {
1359 		vlan_num++;
1360 		buff = &ap->act_details[BNXT_ULP_ACT_PROP_IDX_ENCAP_VTAG];
1361 		ulp_encap_buffer_copy(buff,
1362 				      item->spec,
1363 				      sizeof(struct rte_flow_item_vlan));
1364 
1365 		if (!ulp_rte_item_skip_void(&item, 1))
1366 			return BNXT_TF_RC_ERROR;
1367 	}
1368 
1369 	/* may have two vlan headers */
1370 	if (item->type == RTE_FLOW_ITEM_TYPE_VLAN) {
1371 		vlan_num++;
1372 		memcpy(&ap->act_details[BNXT_ULP_ACT_PROP_IDX_ENCAP_VTAG +
1373 		       sizeof(struct rte_flow_item_vlan)],
1374 		       item->spec,
1375 		       sizeof(struct rte_flow_item_vlan));
1376 		if (!ulp_rte_item_skip_void(&item, 1))
1377 			return BNXT_TF_RC_ERROR;
1378 	}
1379 	/* Update the vlan count and size of more than one */
1380 	if (vlan_num) {
1381 		vlan_size = vlan_num * sizeof(struct rte_flow_item_vlan);
1382 		vlan_num = tfp_cpu_to_be_32(vlan_num);
1383 		memcpy(&ap->act_details[BNXT_ULP_ACT_PROP_IDX_ENCAP_VTAG_NUM],
1384 		       &vlan_num,
1385 		       sizeof(uint32_t));
1386 		vlan_size = tfp_cpu_to_be_32(vlan_size);
1387 		memcpy(&ap->act_details[BNXT_ULP_ACT_PROP_IDX_ENCAP_VTAG_SZ],
1388 		       &vlan_size,
1389 		       sizeof(uint32_t));
1390 	}
1391 
1392 	/* L3 must be IPv4, IPv6 */
1393 	if (item->type == RTE_FLOW_ITEM_TYPE_IPV4) {
1394 		ipv4_spec = item->spec;
1395 		ip_size = BNXT_ULP_ENCAP_IPV4_SIZE;
1396 
1397 		/* copy the ipv4 details */
1398 		if (ulp_buffer_is_empty(&ipv4_spec->hdr.version_ihl,
1399 					BNXT_ULP_ENCAP_IPV4_VER_HLEN_TOS)) {
1400 			buff = &ap->act_details[BNXT_ULP_ACT_PROP_IDX_ENCAP_IP];
1401 			ulp_encap_buffer_copy(buff,
1402 					      def_ipv4_hdr,
1403 					      BNXT_ULP_ENCAP_IPV4_VER_HLEN_TOS +
1404 					      BNXT_ULP_ENCAP_IPV4_ID_PROTO);
1405 		} else {
1406 			const uint8_t *tmp_buff;
1407 
1408 			buff = &ap->act_details[BNXT_ULP_ACT_PROP_IDX_ENCAP_IP];
1409 			tmp_buff = (const uint8_t *)&ipv4_spec->hdr.packet_id;
1410 			ulp_encap_buffer_copy(buff,
1411 					      tmp_buff,
1412 					      BNXT_ULP_ENCAP_IPV4_ID_PROTO);
1413 			buff = &ap->act_details[BNXT_ULP_ACT_PROP_IDX_ENCAP_IP +
1414 			     BNXT_ULP_ENCAP_IPV4_ID_PROTO];
1415 			ulp_encap_buffer_copy(buff,
1416 					      &ipv4_spec->hdr.version_ihl,
1417 					      BNXT_ULP_ENCAP_IPV4_VER_HLEN_TOS);
1418 		}
1419 		buff = &ap->act_details[BNXT_ULP_ACT_PROP_IDX_ENCAP_IP +
1420 		    BNXT_ULP_ENCAP_IPV4_VER_HLEN_TOS +
1421 		    BNXT_ULP_ENCAP_IPV4_ID_PROTO];
1422 		ulp_encap_buffer_copy(buff,
1423 				      (const uint8_t *)&ipv4_spec->hdr.dst_addr,
1424 				      BNXT_ULP_ENCAP_IPV4_DEST_IP);
1425 
1426 		buff = &ap->act_details[BNXT_ULP_ACT_PROP_IDX_ENCAP_IP_SRC];
1427 		ulp_encap_buffer_copy(buff,
1428 				      (const uint8_t *)&ipv4_spec->hdr.src_addr,
1429 				      BNXT_ULP_ACT_PROP_SZ_ENCAP_IP_SRC);
1430 
1431 		/* Update the ip size details */
1432 		ip_size = tfp_cpu_to_be_32(ip_size);
1433 		memcpy(&ap->act_details[BNXT_ULP_ACT_PROP_IDX_ENCAP_IP_SZ],
1434 		       &ip_size, sizeof(uint32_t));
1435 
1436 		/* update the ip type */
1437 		ip_type = rte_cpu_to_be_32(BNXT_ULP_ETH_IPV4);
1438 		memcpy(&ap->act_details[BNXT_ULP_ACT_PROP_IDX_ENCAP_L3_TYPE],
1439 		       &ip_type, sizeof(uint32_t));
1440 
1441 		/* update the computed field to notify it is ipv4 header */
1442 		ULP_COMP_FLD_IDX_WR(params, BNXT_ULP_CF_IDX_ACT_ENCAP_IPV4_FLAG,
1443 				    1);
1444 
1445 		if (!ulp_rte_item_skip_void(&item, 1))
1446 			return BNXT_TF_RC_ERROR;
1447 	} else if (item->type == RTE_FLOW_ITEM_TYPE_IPV6) {
1448 		ipv6_spec = item->spec;
1449 		ip_size = BNXT_ULP_ENCAP_IPV6_SIZE;
1450 
1451 		/* copy the ipv4 details */
1452 		memcpy(&ap->act_details[BNXT_ULP_ACT_PROP_IDX_ENCAP_IP],
1453 		       ipv6_spec, BNXT_ULP_ENCAP_IPV6_SIZE);
1454 
1455 		/* Update the ip size details */
1456 		ip_size = tfp_cpu_to_be_32(ip_size);
1457 		memcpy(&ap->act_details[BNXT_ULP_ACT_PROP_IDX_ENCAP_IP_SZ],
1458 		       &ip_size, sizeof(uint32_t));
1459 
1460 		 /* update the ip type */
1461 		ip_type = rte_cpu_to_be_32(BNXT_ULP_ETH_IPV6);
1462 		memcpy(&ap->act_details[BNXT_ULP_ACT_PROP_IDX_ENCAP_L3_TYPE],
1463 		       &ip_type, sizeof(uint32_t));
1464 
1465 		/* update the computed field to notify it is ipv6 header */
1466 		ULP_COMP_FLD_IDX_WR(params, BNXT_ULP_CF_IDX_ACT_ENCAP_IPV6_FLAG,
1467 				    1);
1468 
1469 		if (!ulp_rte_item_skip_void(&item, 1))
1470 			return BNXT_TF_RC_ERROR;
1471 	} else {
1472 		BNXT_TF_DBG(ERR, "Parse Error: Vxlan Encap expects L3 hdr\n");
1473 		return BNXT_TF_RC_ERROR;
1474 	}
1475 
1476 	/* L4 is UDP */
1477 	if (item->type != RTE_FLOW_ITEM_TYPE_UDP) {
1478 		BNXT_TF_DBG(ERR, "vxlan encap does not have udp\n");
1479 		return BNXT_TF_RC_ERROR;
1480 	}
1481 	/* copy the udp details */
1482 	ulp_encap_buffer_copy(&ap->act_details[BNXT_ULP_ACT_PROP_IDX_ENCAP_UDP],
1483 			      item->spec, BNXT_ULP_ENCAP_UDP_SIZE);
1484 
1485 	if (!ulp_rte_item_skip_void(&item, 1))
1486 		return BNXT_TF_RC_ERROR;
1487 
1488 	/* Finally VXLAN */
1489 	if (item->type != RTE_FLOW_ITEM_TYPE_VXLAN) {
1490 		BNXT_TF_DBG(ERR, "vxlan encap does not have vni\n");
1491 		return BNXT_TF_RC_ERROR;
1492 	}
1493 	vxlan_size = sizeof(struct rte_flow_item_vxlan);
1494 	/* copy the vxlan details */
1495 	memcpy(&vxlan_spec, item->spec, vxlan_size);
1496 	vxlan_spec.flags = 0x08;
1497 	ulp_encap_buffer_copy(&ap->act_details[BNXT_ULP_ACT_PROP_IDX_ENCAP_TUN],
1498 			      (const uint8_t *)&vxlan_spec,
1499 			      vxlan_size);
1500 	vxlan_size = tfp_cpu_to_be_32(vxlan_size);
1501 	memcpy(&ap->act_details[BNXT_ULP_ACT_PROP_IDX_ENCAP_TUN_SZ],
1502 	       &vxlan_size, sizeof(uint32_t));
1503 
1504 	/* update the hdr_bitmap with vxlan */
1505 	ULP_BITMAP_SET(act->bits, BNXT_ULP_ACTION_BIT_VXLAN_ENCAP);
1506 	return BNXT_TF_RC_SUCCESS;
1507 }
1508 
1509 /* Function to handle the parsing of RTE Flow action vxlan_encap Header */
1510 int32_t
1511 ulp_rte_vxlan_decap_act_handler(const struct rte_flow_action *action_item
1512 				__rte_unused,
1513 				struct ulp_rte_parser_params *params)
1514 {
1515 	/* update the hdr_bitmap with vxlan */
1516 	ULP_BITMAP_SET(params->act_bitmap.bits,
1517 		       BNXT_ULP_ACTION_BIT_VXLAN_DECAP);
1518 	return BNXT_TF_RC_SUCCESS;
1519 }
1520 
1521 /* Function to handle the parsing of RTE Flow action drop Header. */
1522 int32_t
1523 ulp_rte_drop_act_handler(const struct rte_flow_action *action_item __rte_unused,
1524 			 struct ulp_rte_parser_params *params)
1525 {
1526 	/* Update the hdr_bitmap with drop */
1527 	ULP_BITMAP_SET(params->act_bitmap.bits, BNXT_ULP_ACTION_BIT_DROP);
1528 	return BNXT_TF_RC_SUCCESS;
1529 }
1530 
1531 /* Function to handle the parsing of RTE Flow action count. */
1532 int32_t
1533 ulp_rte_count_act_handler(const struct rte_flow_action *action_item,
1534 			  struct ulp_rte_parser_params *params)
1535 
1536 {
1537 	const struct rte_flow_action_count *act_count;
1538 	struct ulp_rte_act_prop *act_prop = &params->act_prop;
1539 
1540 	act_count = action_item->conf;
1541 	if (act_count) {
1542 		if (act_count->shared) {
1543 			BNXT_TF_DBG(ERR,
1544 				    "Parse Error:Shared count not supported\n");
1545 			return BNXT_TF_RC_PARSE_ERR;
1546 		}
1547 		memcpy(&act_prop->act_details[BNXT_ULP_ACT_PROP_IDX_COUNT],
1548 		       &act_count->id,
1549 		       BNXT_ULP_ACT_PROP_SZ_COUNT);
1550 	}
1551 
1552 	/* Update the hdr_bitmap with count */
1553 	ULP_BITMAP_SET(params->act_bitmap.bits, BNXT_ULP_ACTION_BIT_COUNT);
1554 	return BNXT_TF_RC_SUCCESS;
1555 }
1556 
1557 /* Function to handle the parsing of action ports. */
1558 static int32_t
1559 ulp_rte_parser_act_port_set(struct ulp_rte_parser_params *param,
1560 			    uint32_t ifindex)
1561 {
1562 	enum bnxt_ulp_direction_type dir;
1563 	uint16_t pid_s;
1564 	uint32_t pid;
1565 	struct ulp_rte_act_prop *act = &param->act_prop;
1566 	enum bnxt_ulp_intf_type port_type;
1567 	uint32_t vnic_type;
1568 
1569 	/* Get the direction */
1570 	dir = ULP_COMP_FLD_IDX_RD(param, BNXT_ULP_CF_IDX_DIRECTION);
1571 	if (dir == BNXT_ULP_DIR_EGRESS) {
1572 		/* For egress direction, fill vport */
1573 		if (ulp_port_db_vport_get(param->ulp_ctx, ifindex, &pid_s))
1574 			return BNXT_TF_RC_ERROR;
1575 
1576 		pid = pid_s;
1577 		pid = rte_cpu_to_be_32(pid);
1578 		memcpy(&act->act_details[BNXT_ULP_ACT_PROP_IDX_VPORT],
1579 		       &pid, BNXT_ULP_ACT_PROP_SZ_VPORT);
1580 	} else {
1581 		/* For ingress direction, fill vnic */
1582 		port_type = ULP_COMP_FLD_IDX_RD(param,
1583 						BNXT_ULP_CF_IDX_ACT_PORT_TYPE);
1584 		if (port_type == BNXT_ULP_INTF_TYPE_VF_REP)
1585 			vnic_type = BNXT_ULP_VF_FUNC_VNIC;
1586 		else
1587 			vnic_type = BNXT_ULP_DRV_FUNC_VNIC;
1588 
1589 		if (ulp_port_db_default_vnic_get(param->ulp_ctx, ifindex,
1590 						 vnic_type, &pid_s))
1591 			return BNXT_TF_RC_ERROR;
1592 
1593 		pid = pid_s;
1594 		pid = rte_cpu_to_be_32(pid);
1595 		memcpy(&act->act_details[BNXT_ULP_ACT_PROP_IDX_VNIC],
1596 		       &pid, BNXT_ULP_ACT_PROP_SZ_VNIC);
1597 	}
1598 
1599 	/* Update the action port set bit */
1600 	ULP_COMP_FLD_IDX_WR(param, BNXT_ULP_CF_IDX_ACT_PORT_IS_SET, 1);
1601 	return BNXT_TF_RC_SUCCESS;
1602 }
1603 
1604 /* Function to handle the parsing of RTE Flow action PF. */
1605 int32_t
1606 ulp_rte_pf_act_handler(const struct rte_flow_action *action_item __rte_unused,
1607 		       struct ulp_rte_parser_params *params)
1608 {
1609 	uint32_t port_id;
1610 	uint32_t ifindex;
1611 	enum bnxt_ulp_intf_type intf_type;
1612 
1613 	/* Get the port id of the current device */
1614 	port_id = ULP_COMP_FLD_IDX_RD(params, BNXT_ULP_CF_IDX_INCOMING_IF);
1615 
1616 	/* Get the port db ifindex */
1617 	if (ulp_port_db_dev_port_to_ulp_index(params->ulp_ctx, port_id,
1618 					      &ifindex)) {
1619 		BNXT_TF_DBG(ERR, "Invalid port id\n");
1620 		return BNXT_TF_RC_ERROR;
1621 	}
1622 
1623 	/* Check the port is PF port */
1624 	intf_type = ulp_port_db_port_type_get(params->ulp_ctx, ifindex);
1625 	if (intf_type != BNXT_ULP_INTF_TYPE_PF) {
1626 		BNXT_TF_DBG(ERR, "Port is not a PF port\n");
1627 		return BNXT_TF_RC_ERROR;
1628 	}
1629 	/* Update the action properties */
1630 	ULP_COMP_FLD_IDX_WR(params, BNXT_ULP_CF_IDX_ACT_PORT_TYPE, intf_type);
1631 	return ulp_rte_parser_act_port_set(params, ifindex);
1632 }
1633 
1634 /* Function to handle the parsing of RTE Flow action VF. */
1635 int32_t
1636 ulp_rte_vf_act_handler(const struct rte_flow_action *action_item,
1637 		       struct ulp_rte_parser_params *params)
1638 {
1639 	const struct rte_flow_action_vf *vf_action;
1640 	uint32_t ifindex;
1641 	enum bnxt_ulp_intf_type intf_type;
1642 
1643 	vf_action = action_item->conf;
1644 	if (!vf_action) {
1645 		BNXT_TF_DBG(ERR, "ParseErr: Invalid Argument\n");
1646 		return BNXT_TF_RC_PARSE_ERR;
1647 	}
1648 
1649 	if (vf_action->original) {
1650 		BNXT_TF_DBG(ERR, "ParseErr:VF Original not supported\n");
1651 		return BNXT_TF_RC_PARSE_ERR;
1652 	}
1653 
1654 	/* Check the port is VF port */
1655 	if (ulp_port_db_dev_func_id_to_ulp_index(params->ulp_ctx, vf_action->id,
1656 						 &ifindex)) {
1657 		BNXT_TF_DBG(ERR, "VF is not valid interface\n");
1658 		return BNXT_TF_RC_ERROR;
1659 	}
1660 	intf_type = ulp_port_db_port_type_get(params->ulp_ctx, ifindex);
1661 	if (intf_type != BNXT_ULP_INTF_TYPE_VF &&
1662 	    intf_type != BNXT_ULP_INTF_TYPE_TRUSTED_VF) {
1663 		BNXT_TF_DBG(ERR, "Port is not a VF port\n");
1664 		return BNXT_TF_RC_ERROR;
1665 	}
1666 
1667 	/* Update the action properties */
1668 	ULP_COMP_FLD_IDX_WR(params, BNXT_ULP_CF_IDX_ACT_PORT_TYPE, intf_type);
1669 	return ulp_rte_parser_act_port_set(params, ifindex);
1670 }
1671 
1672 /* Function to handle the parsing of RTE Flow action port_id. */
1673 int32_t
1674 ulp_rte_port_id_act_handler(const struct rte_flow_action *act_item,
1675 			    struct ulp_rte_parser_params *param)
1676 {
1677 	const struct rte_flow_action_port_id *port_id = act_item->conf;
1678 	uint32_t ifindex;
1679 	enum bnxt_ulp_intf_type intf_type;
1680 
1681 	if (!port_id) {
1682 		BNXT_TF_DBG(ERR,
1683 			    "ParseErr: Invalid Argument\n");
1684 		return BNXT_TF_RC_PARSE_ERR;
1685 	}
1686 	if (port_id->original) {
1687 		BNXT_TF_DBG(ERR,
1688 			    "ParseErr:Portid Original not supported\n");
1689 		return BNXT_TF_RC_PARSE_ERR;
1690 	}
1691 
1692 	/* Get the port db ifindex */
1693 	if (ulp_port_db_dev_port_to_ulp_index(param->ulp_ctx, port_id->id,
1694 					      &ifindex)) {
1695 		BNXT_TF_DBG(ERR, "Invalid port id\n");
1696 		return BNXT_TF_RC_ERROR;
1697 	}
1698 
1699 	/* Get the intf type */
1700 	intf_type = ulp_port_db_port_type_get(param->ulp_ctx, ifindex);
1701 	if (!intf_type) {
1702 		BNXT_TF_DBG(ERR, "Invalid port type\n");
1703 		return BNXT_TF_RC_ERROR;
1704 	}
1705 
1706 	/* Set the action port */
1707 	ULP_COMP_FLD_IDX_WR(param, BNXT_ULP_CF_IDX_ACT_PORT_TYPE, intf_type);
1708 	return ulp_rte_parser_act_port_set(param, ifindex);
1709 }
1710 
1711 /* Function to handle the parsing of RTE Flow action phy_port. */
1712 int32_t
1713 ulp_rte_phy_port_act_handler(const struct rte_flow_action *action_item,
1714 			     struct ulp_rte_parser_params *prm)
1715 {
1716 	const struct rte_flow_action_phy_port *phy_port;
1717 	uint32_t pid;
1718 	int32_t rc;
1719 	uint16_t pid_s;
1720 	enum bnxt_ulp_direction_type dir;
1721 
1722 	phy_port = action_item->conf;
1723 	if (!phy_port) {
1724 		BNXT_TF_DBG(ERR,
1725 			    "ParseErr: Invalid Argument\n");
1726 		return BNXT_TF_RC_PARSE_ERR;
1727 	}
1728 
1729 	if (phy_port->original) {
1730 		BNXT_TF_DBG(ERR,
1731 			    "Parse Err:Port Original not supported\n");
1732 		return BNXT_TF_RC_PARSE_ERR;
1733 	}
1734 	dir = ULP_COMP_FLD_IDX_RD(prm, BNXT_ULP_CF_IDX_DIRECTION);
1735 	if (dir != BNXT_ULP_DIR_EGRESS) {
1736 		BNXT_TF_DBG(ERR,
1737 			    "Parse Err:Phy ports are valid only for egress\n");
1738 		return BNXT_TF_RC_PARSE_ERR;
1739 	}
1740 	/* Get the physical port details from port db */
1741 	rc = ulp_port_db_phy_port_vport_get(prm->ulp_ctx, phy_port->index,
1742 					    &pid_s);
1743 	if (rc) {
1744 		BNXT_TF_DBG(ERR, "Failed to get port details\n");
1745 		return -EINVAL;
1746 	}
1747 
1748 	pid = pid_s;
1749 	pid = rte_cpu_to_be_32(pid);
1750 	memcpy(&prm->act_prop.act_details[BNXT_ULP_ACT_PROP_IDX_VPORT],
1751 	       &pid, BNXT_ULP_ACT_PROP_SZ_VPORT);
1752 
1753 	/* Update the action port set bit */
1754 	ULP_COMP_FLD_IDX_WR(prm, BNXT_ULP_CF_IDX_ACT_PORT_IS_SET, 1);
1755 	ULP_COMP_FLD_IDX_WR(prm, BNXT_ULP_CF_IDX_ACT_PORT_TYPE,
1756 			    BNXT_ULP_INTF_TYPE_PHY_PORT);
1757 	return BNXT_TF_RC_SUCCESS;
1758 }
1759 
1760 /* Function to handle the parsing of RTE Flow action pop vlan. */
1761 int32_t
1762 ulp_rte_of_pop_vlan_act_handler(const struct rte_flow_action *a __rte_unused,
1763 				struct ulp_rte_parser_params *params)
1764 {
1765 	/* Update the act_bitmap with pop */
1766 	ULP_BITMAP_SET(params->act_bitmap.bits, BNXT_ULP_ACTION_BIT_POP_VLAN);
1767 	return BNXT_TF_RC_SUCCESS;
1768 }
1769 
1770 /* Function to handle the parsing of RTE Flow action push vlan. */
1771 int32_t
1772 ulp_rte_of_push_vlan_act_handler(const struct rte_flow_action *action_item,
1773 				 struct ulp_rte_parser_params *params)
1774 {
1775 	const struct rte_flow_action_of_push_vlan *push_vlan;
1776 	uint16_t ethertype;
1777 	struct ulp_rte_act_prop *act = &params->act_prop;
1778 
1779 	push_vlan = action_item->conf;
1780 	if (push_vlan) {
1781 		ethertype = push_vlan->ethertype;
1782 		if (tfp_cpu_to_be_16(ethertype) != RTE_ETHER_TYPE_VLAN) {
1783 			BNXT_TF_DBG(ERR,
1784 				    "Parse Err: Ethertype not supported\n");
1785 			return BNXT_TF_RC_PARSE_ERR;
1786 		}
1787 		memcpy(&act->act_details[BNXT_ULP_ACT_PROP_IDX_PUSH_VLAN],
1788 		       &ethertype, BNXT_ULP_ACT_PROP_SZ_PUSH_VLAN);
1789 		/* Update the hdr_bitmap with push vlan */
1790 		ULP_BITMAP_SET(params->act_bitmap.bits,
1791 			       BNXT_ULP_ACTION_BIT_PUSH_VLAN);
1792 		return BNXT_TF_RC_SUCCESS;
1793 	}
1794 	BNXT_TF_DBG(ERR, "Parse Error: Push vlan arg is invalid\n");
1795 	return BNXT_TF_RC_ERROR;
1796 }
1797 
1798 /* Function to handle the parsing of RTE Flow action set vlan id. */
1799 int32_t
1800 ulp_rte_of_set_vlan_vid_act_handler(const struct rte_flow_action *action_item,
1801 				    struct ulp_rte_parser_params *params)
1802 {
1803 	const struct rte_flow_action_of_set_vlan_vid *vlan_vid;
1804 	uint32_t vid;
1805 	struct ulp_rte_act_prop *act = &params->act_prop;
1806 
1807 	vlan_vid = action_item->conf;
1808 	if (vlan_vid && vlan_vid->vlan_vid) {
1809 		vid = vlan_vid->vlan_vid;
1810 		memcpy(&act->act_details[BNXT_ULP_ACT_PROP_IDX_SET_VLAN_VID],
1811 		       &vid, BNXT_ULP_ACT_PROP_SZ_SET_VLAN_VID);
1812 		/* Update the hdr_bitmap with vlan vid */
1813 		ULP_BITMAP_SET(params->act_bitmap.bits,
1814 			       BNXT_ULP_ACTION_BIT_SET_VLAN_VID);
1815 		return BNXT_TF_RC_SUCCESS;
1816 	}
1817 	BNXT_TF_DBG(ERR, "Parse Error: Vlan vid arg is invalid\n");
1818 	return BNXT_TF_RC_ERROR;
1819 }
1820 
1821 /* Function to handle the parsing of RTE Flow action set vlan pcp. */
1822 int32_t
1823 ulp_rte_of_set_vlan_pcp_act_handler(const struct rte_flow_action *action_item,
1824 				    struct ulp_rte_parser_params *params)
1825 {
1826 	const struct rte_flow_action_of_set_vlan_pcp *vlan_pcp;
1827 	uint8_t pcp;
1828 	struct ulp_rte_act_prop *act = &params->act_prop;
1829 
1830 	vlan_pcp = action_item->conf;
1831 	if (vlan_pcp) {
1832 		pcp = vlan_pcp->vlan_pcp;
1833 		memcpy(&act->act_details[BNXT_ULP_ACT_PROP_IDX_SET_VLAN_PCP],
1834 		       &pcp, BNXT_ULP_ACT_PROP_SZ_SET_VLAN_PCP);
1835 		/* Update the hdr_bitmap with vlan vid */
1836 		ULP_BITMAP_SET(params->act_bitmap.bits,
1837 			       BNXT_ULP_ACTION_BIT_SET_VLAN_PCP);
1838 		return BNXT_TF_RC_SUCCESS;
1839 	}
1840 	BNXT_TF_DBG(ERR, "Parse Error: Vlan pcp arg is invalid\n");
1841 	return BNXT_TF_RC_ERROR;
1842 }
1843 
1844 /* Function to handle the parsing of RTE Flow action set ipv4 src.*/
1845 int32_t
1846 ulp_rte_set_ipv4_src_act_handler(const struct rte_flow_action *action_item,
1847 				 struct ulp_rte_parser_params *params)
1848 {
1849 	const struct rte_flow_action_set_ipv4 *set_ipv4;
1850 	struct ulp_rte_act_prop *act = &params->act_prop;
1851 
1852 	set_ipv4 = action_item->conf;
1853 	if (set_ipv4) {
1854 		memcpy(&act->act_details[BNXT_ULP_ACT_PROP_IDX_SET_IPV4_SRC],
1855 		       &set_ipv4->ipv4_addr, BNXT_ULP_ACT_PROP_SZ_SET_IPV4_SRC);
1856 		/* Update the hdr_bitmap with set ipv4 src */
1857 		ULP_BITMAP_SET(params->act_bitmap.bits,
1858 			       BNXT_ULP_ACTION_BIT_SET_IPV4_SRC);
1859 		return BNXT_TF_RC_SUCCESS;
1860 	}
1861 	BNXT_TF_DBG(ERR, "Parse Error: set ipv4 src arg is invalid\n");
1862 	return BNXT_TF_RC_ERROR;
1863 }
1864 
1865 /* Function to handle the parsing of RTE Flow action set ipv4 dst.*/
1866 int32_t
1867 ulp_rte_set_ipv4_dst_act_handler(const struct rte_flow_action *action_item,
1868 				 struct ulp_rte_parser_params *params)
1869 {
1870 	const struct rte_flow_action_set_ipv4 *set_ipv4;
1871 	struct ulp_rte_act_prop *act = &params->act_prop;
1872 
1873 	set_ipv4 = action_item->conf;
1874 	if (set_ipv4) {
1875 		memcpy(&act->act_details[BNXT_ULP_ACT_PROP_IDX_SET_IPV4_DST],
1876 		       &set_ipv4->ipv4_addr, BNXT_ULP_ACT_PROP_SZ_SET_IPV4_DST);
1877 		/* Update the hdr_bitmap with set ipv4 dst */
1878 		ULP_BITMAP_SET(params->act_bitmap.bits,
1879 			       BNXT_ULP_ACTION_BIT_SET_IPV4_DST);
1880 		return BNXT_TF_RC_SUCCESS;
1881 	}
1882 	BNXT_TF_DBG(ERR, "Parse Error: set ipv4 dst arg is invalid\n");
1883 	return BNXT_TF_RC_ERROR;
1884 }
1885 
1886 /* Function to handle the parsing of RTE Flow action set tp src.*/
1887 int32_t
1888 ulp_rte_set_tp_src_act_handler(const struct rte_flow_action *action_item,
1889 			       struct ulp_rte_parser_params *params)
1890 {
1891 	const struct rte_flow_action_set_tp *set_tp;
1892 	struct ulp_rte_act_prop *act = &params->act_prop;
1893 
1894 	set_tp = action_item->conf;
1895 	if (set_tp) {
1896 		memcpy(&act->act_details[BNXT_ULP_ACT_PROP_IDX_SET_TP_SRC],
1897 		       &set_tp->port, BNXT_ULP_ACT_PROP_SZ_SET_TP_SRC);
1898 		/* Update the hdr_bitmap with set tp src */
1899 		ULP_BITMAP_SET(params->act_bitmap.bits,
1900 			       BNXT_ULP_ACTION_BIT_SET_TP_SRC);
1901 		return BNXT_TF_RC_SUCCESS;
1902 	}
1903 
1904 	BNXT_TF_DBG(ERR, "Parse Error: set tp src arg is invalid\n");
1905 	return BNXT_TF_RC_ERROR;
1906 }
1907 
1908 /* Function to handle the parsing of RTE Flow action set tp dst.*/
1909 int32_t
1910 ulp_rte_set_tp_dst_act_handler(const struct rte_flow_action *action_item,
1911 			       struct ulp_rte_parser_params *params)
1912 {
1913 	const struct rte_flow_action_set_tp *set_tp;
1914 	struct ulp_rte_act_prop *act = &params->act_prop;
1915 
1916 	set_tp = action_item->conf;
1917 	if (set_tp) {
1918 		memcpy(&act->act_details[BNXT_ULP_ACT_PROP_IDX_SET_TP_DST],
1919 		       &set_tp->port, BNXT_ULP_ACT_PROP_SZ_SET_TP_DST);
1920 		/* Update the hdr_bitmap with set tp dst */
1921 		ULP_BITMAP_SET(params->act_bitmap.bits,
1922 			       BNXT_ULP_ACTION_BIT_SET_TP_DST);
1923 		return BNXT_TF_RC_SUCCESS;
1924 	}
1925 
1926 	BNXT_TF_DBG(ERR, "Parse Error: set tp src arg is invalid\n");
1927 	return BNXT_TF_RC_ERROR;
1928 }
1929 
1930 /* Function to handle the parsing of RTE Flow action dec ttl.*/
1931 int32_t
1932 ulp_rte_dec_ttl_act_handler(const struct rte_flow_action *act __rte_unused,
1933 			    struct ulp_rte_parser_params *params)
1934 {
1935 	/* Update the act_bitmap with dec ttl */
1936 	ULP_BITMAP_SET(params->act_bitmap.bits, BNXT_ULP_ACTION_BIT_DEC_TTL);
1937 	return BNXT_TF_RC_SUCCESS;
1938 }
1939