xref: /dpdk/drivers/net/bnxt/tf_ulp/ulp_rte_parser.c (revision bc700b6767278e49c4ea9c08bb43c0fd9ca3e70d)
1 /* SPDX-License-Identifier: BSD-3-Clause
2  * Copyright(c) 2014-2020 Broadcom
3  * All rights reserved.
4  */
5 
6 #include "bnxt.h"
7 #include "ulp_template_db_enum.h"
8 #include "ulp_template_struct.h"
9 #include "bnxt_tf_common.h"
10 #include "ulp_rte_parser.h"
11 #include "ulp_utils.h"
12 #include "tfp.h"
13 #include "ulp_port_db.h"
14 
15 /* Local defines for the parsing functions */
16 #define ULP_VLAN_PRIORITY_SHIFT		13 /* First 3 bits */
17 #define ULP_VLAN_PRIORITY_MASK		0x700
18 #define ULP_VLAN_TAG_MASK		0xFFF /* Last 12 bits*/
19 #define ULP_UDP_PORT_VXLAN		4789
20 
21 /* Utility function to skip the void items. */
22 static inline int32_t
23 ulp_rte_item_skip_void(const struct rte_flow_item **item, uint32_t increment)
24 {
25 	if (!*item)
26 		return 0;
27 	if (increment)
28 		(*item)++;
29 	while ((*item) && (*item)->type == RTE_FLOW_ITEM_TYPE_VOID)
30 		(*item)++;
31 	if (*item)
32 		return 1;
33 	return 0;
34 }
35 
36 /* Utility function to update the field_bitmap */
37 static void
38 ulp_rte_parser_field_bitmap_update(struct ulp_rte_parser_params *params,
39 				   uint32_t idx)
40 {
41 	struct ulp_rte_hdr_field *field;
42 
43 	field = &params->hdr_field[idx];
44 	if (ulp_bitmap_notzero(field->mask, field->size)) {
45 		ULP_INDEX_BITMAP_SET(params->fld_bitmap.bits, idx);
46 		/* Not exact match */
47 		if (!ulp_bitmap_is_ones(field->mask, field->size))
48 			ULP_BITMAP_SET(params->fld_bitmap.bits,
49 				       BNXT_ULP_MATCH_TYPE_BITMASK_WM);
50 	} else {
51 		ULP_INDEX_BITMAP_RESET(params->fld_bitmap.bits, idx);
52 	}
53 }
54 
55 /* Utility function to copy field spec items */
56 static struct ulp_rte_hdr_field *
57 ulp_rte_parser_fld_copy(struct ulp_rte_hdr_field *field,
58 			const void *buffer,
59 			uint32_t size)
60 {
61 	field->size = size;
62 	memcpy(field->spec, buffer, field->size);
63 	field++;
64 	return field;
65 }
66 
67 /* Utility function to copy field masks items */
68 static void
69 ulp_rte_prsr_mask_copy(struct ulp_rte_parser_params *params,
70 		       uint32_t *idx,
71 		       const void *buffer,
72 		       uint32_t size)
73 {
74 	struct ulp_rte_hdr_field *field = &params->hdr_field[*idx];
75 
76 	memcpy(field->mask, buffer, size);
77 	ulp_rte_parser_field_bitmap_update(params, *idx);
78 	*idx = *idx + 1;
79 }
80 
81 /*
82  * Function to handle the parsing of RTE Flows and placing
83  * the RTE flow items into the ulp structures.
84  */
85 int32_t
86 bnxt_ulp_rte_parser_hdr_parse(const struct rte_flow_item pattern[],
87 			      struct ulp_rte_parser_params *params)
88 {
89 	const struct rte_flow_item *item = pattern;
90 	struct bnxt_ulp_rte_hdr_info *hdr_info;
91 
92 	params->field_idx = BNXT_ULP_PROTO_HDR_SVIF_NUM;
93 
94 	/* Set the computed flags for no vlan tags before parsing */
95 	ULP_COMP_FLD_IDX_WR(params, BNXT_ULP_CF_IDX_O_NO_VTAG, 1);
96 	ULP_COMP_FLD_IDX_WR(params, BNXT_ULP_CF_IDX_I_NO_VTAG, 1);
97 
98 	/* Parse all the items in the pattern */
99 	while (item && item->type != RTE_FLOW_ITEM_TYPE_END) {
100 		/* get the header information from the flow_hdr_info table */
101 		hdr_info = &ulp_hdr_info[item->type];
102 		if (hdr_info->hdr_type == BNXT_ULP_HDR_TYPE_NOT_SUPPORTED) {
103 			BNXT_TF_DBG(ERR,
104 				    "Truflow parser does not support type %d\n",
105 				    item->type);
106 			return BNXT_TF_RC_PARSE_ERR;
107 		} else if (hdr_info->hdr_type == BNXT_ULP_HDR_TYPE_SUPPORTED) {
108 			/* call the registered callback handler */
109 			if (hdr_info->proto_hdr_func) {
110 				if (hdr_info->proto_hdr_func(item, params) !=
111 				    BNXT_TF_RC_SUCCESS) {
112 					return BNXT_TF_RC_ERROR;
113 				}
114 			}
115 		}
116 		item++;
117 	}
118 	/* update the implied SVIF */
119 	return ulp_rte_parser_implicit_match_port_process(params);
120 }
121 
122 /*
123  * Function to handle the parsing of RTE Flows and placing
124  * the RTE flow actions into the ulp structures.
125  */
126 int32_t
127 bnxt_ulp_rte_parser_act_parse(const struct rte_flow_action actions[],
128 			      struct ulp_rte_parser_params *params)
129 {
130 	const struct rte_flow_action *action_item = actions;
131 	struct bnxt_ulp_rte_act_info *hdr_info;
132 
133 	/* Parse all the items in the pattern */
134 	while (action_item && action_item->type != RTE_FLOW_ACTION_TYPE_END) {
135 		/* get the header information from the flow_hdr_info table */
136 		hdr_info = &ulp_act_info[action_item->type];
137 		if (hdr_info->act_type ==
138 		    BNXT_ULP_ACT_TYPE_NOT_SUPPORTED) {
139 			BNXT_TF_DBG(ERR,
140 				    "Truflow parser does not support act %u\n",
141 				    action_item->type);
142 			return BNXT_TF_RC_ERROR;
143 		} else if (hdr_info->act_type ==
144 		    BNXT_ULP_ACT_TYPE_SUPPORTED) {
145 			/* call the registered callback handler */
146 			if (hdr_info->proto_act_func) {
147 				if (hdr_info->proto_act_func(action_item,
148 							     params) !=
149 				    BNXT_TF_RC_SUCCESS) {
150 					return BNXT_TF_RC_ERROR;
151 				}
152 			}
153 		}
154 		action_item++;
155 	}
156 	/* update the implied port details */
157 	ulp_rte_parser_implicit_act_port_process(params);
158 	return BNXT_TF_RC_SUCCESS;
159 }
160 
161 /*
162  * Function to handle the post processing of the computed
163  * fields for the interface.
164  */
165 static void
166 bnxt_ulp_comp_fld_intf_update(struct ulp_rte_parser_params *params)
167 {
168 	uint32_t ifindex;
169 	uint16_t port_id, parif;
170 	uint32_t mtype;
171 	enum bnxt_ulp_direction_type dir;
172 
173 	/* get the direction details */
174 	dir = ULP_COMP_FLD_IDX_RD(params, BNXT_ULP_CF_IDX_DIRECTION);
175 
176 	/* read the port id details */
177 	port_id = ULP_COMP_FLD_IDX_RD(params,
178 				      BNXT_ULP_CF_IDX_INCOMING_IF);
179 	if (ulp_port_db_dev_port_to_ulp_index(params->ulp_ctx,
180 					      port_id,
181 					      &ifindex)) {
182 		BNXT_TF_DBG(ERR, "ParseErr:Portid is not valid\n");
183 		return;
184 	}
185 
186 	if (dir == BNXT_ULP_DIR_INGRESS) {
187 		/* Set port PARIF */
188 		if (ulp_port_db_parif_get(params->ulp_ctx, ifindex,
189 					  BNXT_ULP_PHY_PORT_PARIF, &parif)) {
190 			BNXT_TF_DBG(ERR, "ParseErr:ifindex is not valid\n");
191 			return;
192 		}
193 		ULP_COMP_FLD_IDX_WR(params, BNXT_ULP_CF_IDX_PHY_PORT_PARIF,
194 				    parif);
195 	} else {
196 		/* Get the match port type */
197 		mtype = ULP_COMP_FLD_IDX_RD(params,
198 					    BNXT_ULP_CF_IDX_MATCH_PORT_TYPE);
199 		if (mtype == BNXT_ULP_INTF_TYPE_VF_REP) {
200 			ULP_COMP_FLD_IDX_WR(params,
201 					    BNXT_ULP_CF_IDX_MATCH_PORT_IS_VFREP,
202 					    1);
203 			/* Set VF func PARIF */
204 			if (ulp_port_db_parif_get(params->ulp_ctx, ifindex,
205 						  BNXT_ULP_VF_FUNC_PARIF,
206 						  &parif)) {
207 				BNXT_TF_DBG(ERR,
208 					    "ParseErr:ifindex is not valid\n");
209 				return;
210 			}
211 			ULP_COMP_FLD_IDX_WR(params,
212 					    BNXT_ULP_CF_IDX_VF_FUNC_PARIF,
213 					    parif);
214 
215 			/* populate the loopback parif */
216 			ULP_COMP_FLD_IDX_WR(params,
217 					    BNXT_ULP_CF_IDX_LOOPBACK_PARIF,
218 					    BNXT_ULP_SYM_VF_FUNC_PARIF);
219 
220 		} else {
221 			/* Set DRV func PARIF */
222 			if (ulp_port_db_parif_get(params->ulp_ctx, ifindex,
223 						  BNXT_ULP_DRV_FUNC_PARIF,
224 						  &parif)) {
225 				BNXT_TF_DBG(ERR,
226 					    "ParseErr:ifindex is not valid\n");
227 				return;
228 			}
229 			ULP_COMP_FLD_IDX_WR(params,
230 					    BNXT_ULP_CF_IDX_DRV_FUNC_PARIF,
231 					    parif);
232 		}
233 	}
234 }
235 
236 /*
237  * Function to handle the post processing of the parsing details
238  */
239 int32_t
240 bnxt_ulp_rte_parser_post_process(struct ulp_rte_parser_params *params)
241 {
242 	enum bnxt_ulp_direction_type dir;
243 	enum bnxt_ulp_intf_type match_port_type, act_port_type;
244 	uint32_t act_port_set;
245 
246 	/* Get the computed details */
247 	dir = ULP_COMP_FLD_IDX_RD(params, BNXT_ULP_CF_IDX_DIRECTION);
248 	match_port_type = ULP_COMP_FLD_IDX_RD(params,
249 					      BNXT_ULP_CF_IDX_MATCH_PORT_TYPE);
250 	act_port_type = ULP_COMP_FLD_IDX_RD(params,
251 					    BNXT_ULP_CF_IDX_ACT_PORT_TYPE);
252 	act_port_set = ULP_COMP_FLD_IDX_RD(params,
253 					   BNXT_ULP_CF_IDX_ACT_PORT_IS_SET);
254 
255 	/* set the flow direction in the proto and action header */
256 	if (dir == BNXT_ULP_DIR_EGRESS) {
257 		ULP_BITMAP_SET(params->hdr_bitmap.bits,
258 			       BNXT_ULP_FLOW_DIR_BITMASK_EGR);
259 		ULP_BITMAP_SET(params->act_bitmap.bits,
260 			       BNXT_ULP_FLOW_DIR_BITMASK_EGR);
261 	}
262 
263 	/* calculate the VF to VF flag */
264 	if (act_port_set && act_port_type == BNXT_ULP_INTF_TYPE_VF_REP &&
265 	    match_port_type == BNXT_ULP_INTF_TYPE_VF_REP)
266 		ULP_COMP_FLD_IDX_WR(params, BNXT_ULP_CF_IDX_VF_TO_VF, 1);
267 
268 	/* Update the decrement ttl computational fields */
269 	if (ULP_BITMAP_ISSET(params->act_bitmap.bits,
270 			     BNXT_ULP_ACTION_BIT_DEC_TTL)) {
271 		/*
272 		 * Check that vxlan proto is included and vxlan decap
273 		 * action is not set then decrement tunnel ttl.
274 		 * Similarly add GRE and NVGRE in future.
275 		 */
276 		if ((ULP_BITMAP_ISSET(params->hdr_bitmap.bits,
277 				      BNXT_ULP_HDR_BIT_T_VXLAN) &&
278 		    !ULP_BITMAP_ISSET(params->act_bitmap.bits,
279 				      BNXT_ULP_ACTION_BIT_VXLAN_DECAP))) {
280 			ULP_COMP_FLD_IDX_WR(params,
281 					    BNXT_ULP_CF_IDX_ACT_T_DEC_TTL, 1);
282 		} else {
283 			ULP_COMP_FLD_IDX_WR(params,
284 					    BNXT_ULP_CF_IDX_ACT_DEC_TTL, 1);
285 		}
286 	}
287 
288 	/* Merge the hdr_fp_bit into the proto header bit */
289 	params->hdr_bitmap.bits |= params->hdr_fp_bit.bits;
290 
291 	/* Update the computed interface parameters */
292 	bnxt_ulp_comp_fld_intf_update(params);
293 
294 	/* TBD: Handle the flow rejection scenarios */
295 	return 0;
296 }
297 
298 /*
299  * Function to compute the flow direction based on the match port details
300  */
301 static void
302 bnxt_ulp_rte_parser_direction_compute(struct ulp_rte_parser_params *params)
303 {
304 	enum bnxt_ulp_intf_type match_port_type;
305 
306 	/* Get the match port type */
307 	match_port_type = ULP_COMP_FLD_IDX_RD(params,
308 					      BNXT_ULP_CF_IDX_MATCH_PORT_TYPE);
309 
310 	/* If ingress flow and matchport is vf rep then dir is egress*/
311 	if ((params->dir_attr & BNXT_ULP_FLOW_ATTR_INGRESS) &&
312 	    match_port_type == BNXT_ULP_INTF_TYPE_VF_REP) {
313 		ULP_COMP_FLD_IDX_WR(params, BNXT_ULP_CF_IDX_DIRECTION,
314 				    BNXT_ULP_DIR_EGRESS);
315 	} else {
316 		/* Assign the input direction */
317 		if (params->dir_attr & BNXT_ULP_FLOW_ATTR_INGRESS)
318 			ULP_COMP_FLD_IDX_WR(params, BNXT_ULP_CF_IDX_DIRECTION,
319 					    BNXT_ULP_DIR_INGRESS);
320 		else
321 			ULP_COMP_FLD_IDX_WR(params, BNXT_ULP_CF_IDX_DIRECTION,
322 					    BNXT_ULP_DIR_EGRESS);
323 	}
324 }
325 
326 /* Function to handle the parsing of RTE Flow item PF Header. */
327 static int32_t
328 ulp_rte_parser_svif_set(struct ulp_rte_parser_params *params,
329 			uint32_t ifindex,
330 			uint16_t mask)
331 {
332 	uint16_t svif;
333 	enum bnxt_ulp_direction_type dir;
334 	struct ulp_rte_hdr_field *hdr_field;
335 	enum bnxt_ulp_svif_type svif_type;
336 	enum bnxt_ulp_intf_type port_type;
337 
338 	if (ULP_COMP_FLD_IDX_RD(params, BNXT_ULP_CF_IDX_SVIF_FLAG) !=
339 	    BNXT_ULP_INVALID_SVIF_VAL) {
340 		BNXT_TF_DBG(ERR,
341 			    "SVIF already set,multiple source not support'd\n");
342 		return BNXT_TF_RC_ERROR;
343 	}
344 
345 	/* Get port type details */
346 	port_type = ulp_port_db_port_type_get(params->ulp_ctx, ifindex);
347 	if (port_type == BNXT_ULP_INTF_TYPE_INVALID) {
348 		BNXT_TF_DBG(ERR, "Invalid port type\n");
349 		return BNXT_TF_RC_ERROR;
350 	}
351 
352 	/* Update the match port type */
353 	ULP_COMP_FLD_IDX_WR(params, BNXT_ULP_CF_IDX_MATCH_PORT_TYPE, port_type);
354 
355 	/* compute the direction */
356 	bnxt_ulp_rte_parser_direction_compute(params);
357 
358 	/* Get the computed direction */
359 	dir = ULP_COMP_FLD_IDX_RD(params, BNXT_ULP_CF_IDX_DIRECTION);
360 	if (dir == BNXT_ULP_DIR_INGRESS) {
361 		svif_type = BNXT_ULP_PHY_PORT_SVIF;
362 	} else {
363 		if (port_type == BNXT_ULP_INTF_TYPE_VF_REP)
364 			svif_type = BNXT_ULP_VF_FUNC_SVIF;
365 		else
366 			svif_type = BNXT_ULP_DRV_FUNC_SVIF;
367 	}
368 	ulp_port_db_svif_get(params->ulp_ctx, ifindex, svif_type,
369 			     &svif);
370 	svif = rte_cpu_to_be_16(svif);
371 	hdr_field = &params->hdr_field[BNXT_ULP_PROTO_HDR_FIELD_SVIF_IDX];
372 	memcpy(hdr_field->spec, &svif, sizeof(svif));
373 	memcpy(hdr_field->mask, &mask, sizeof(mask));
374 	hdr_field->size = sizeof(svif);
375 	ULP_COMP_FLD_IDX_WR(params, BNXT_ULP_CF_IDX_SVIF_FLAG,
376 			    rte_be_to_cpu_16(svif));
377 	return BNXT_TF_RC_SUCCESS;
378 }
379 
380 /* Function to handle the parsing of the RTE port id */
381 int32_t
382 ulp_rte_parser_implicit_match_port_process(struct ulp_rte_parser_params *params)
383 {
384 	uint16_t port_id = 0;
385 	uint16_t svif_mask = 0xFFFF;
386 	uint32_t ifindex;
387 	int32_t rc = BNXT_TF_RC_ERROR;
388 
389 	if (ULP_COMP_FLD_IDX_RD(params, BNXT_ULP_CF_IDX_SVIF_FLAG) !=
390 	    BNXT_ULP_INVALID_SVIF_VAL)
391 		return BNXT_TF_RC_SUCCESS;
392 
393 	/* SVIF not set. So get the port id */
394 	port_id = ULP_COMP_FLD_IDX_RD(params, BNXT_ULP_CF_IDX_INCOMING_IF);
395 
396 	if (ulp_port_db_dev_port_to_ulp_index(params->ulp_ctx,
397 					      port_id,
398 					      &ifindex)) {
399 		BNXT_TF_DBG(ERR, "ParseErr:Portid is not valid\n");
400 		return rc;
401 	}
402 
403 	/* Update the SVIF details */
404 	rc = ulp_rte_parser_svif_set(params, ifindex, svif_mask);
405 	return rc;
406 }
407 
408 /* Function to handle the implicit action port id */
409 int32_t
410 ulp_rte_parser_implicit_act_port_process(struct ulp_rte_parser_params *params)
411 {
412 	struct rte_flow_action action_item = {0};
413 	struct rte_flow_action_port_id port_id = {0};
414 
415 	/* Read the action port set bit */
416 	if (ULP_COMP_FLD_IDX_RD(params, BNXT_ULP_CF_IDX_ACT_PORT_IS_SET)) {
417 		/* Already set, so just exit */
418 		return BNXT_TF_RC_SUCCESS;
419 	}
420 	port_id.id = ULP_COMP_FLD_IDX_RD(params, BNXT_ULP_CF_IDX_INCOMING_IF);
421 	action_item.conf = &port_id;
422 
423 	/* Update the action port based on incoming port */
424 	ulp_rte_port_id_act_handler(&action_item, params);
425 
426 	/* Reset the action port set bit */
427 	ULP_COMP_FLD_IDX_WR(params, BNXT_ULP_CF_IDX_ACT_PORT_IS_SET, 0);
428 	return BNXT_TF_RC_SUCCESS;
429 }
430 
431 /* Function to handle the parsing of RTE Flow item PF Header. */
432 int32_t
433 ulp_rte_pf_hdr_handler(const struct rte_flow_item *item __rte_unused,
434 		       struct ulp_rte_parser_params *params)
435 {
436 	uint16_t port_id = 0;
437 	uint16_t svif_mask = 0xFFFF;
438 	uint32_t ifindex;
439 
440 	/* Get the implicit port id */
441 	port_id = ULP_COMP_FLD_IDX_RD(params, BNXT_ULP_CF_IDX_INCOMING_IF);
442 
443 	/* perform the conversion from dpdk port to bnxt ifindex */
444 	if (ulp_port_db_dev_port_to_ulp_index(params->ulp_ctx,
445 					      port_id,
446 					      &ifindex)) {
447 		BNXT_TF_DBG(ERR, "ParseErr:Portid is not valid\n");
448 		return BNXT_TF_RC_ERROR;
449 	}
450 
451 	/* Update the SVIF details */
452 	return  ulp_rte_parser_svif_set(params, ifindex, svif_mask);
453 }
454 
455 /* Function to handle the parsing of RTE Flow item VF Header. */
456 int32_t
457 ulp_rte_vf_hdr_handler(const struct rte_flow_item *item,
458 		       struct ulp_rte_parser_params *params)
459 {
460 	const struct rte_flow_item_vf *vf_spec = item->spec;
461 	const struct rte_flow_item_vf *vf_mask = item->mask;
462 	uint16_t mask = 0;
463 	uint32_t ifindex;
464 	int32_t rc = BNXT_TF_RC_PARSE_ERR;
465 
466 	/* Get VF rte_flow_item for Port details */
467 	if (!vf_spec) {
468 		BNXT_TF_DBG(ERR, "ParseErr:VF id is not valid\n");
469 		return rc;
470 	}
471 	if (!vf_mask) {
472 		BNXT_TF_DBG(ERR, "ParseErr:VF mask is not valid\n");
473 		return rc;
474 	}
475 	mask = vf_mask->id;
476 
477 	/* perform the conversion from VF Func id to bnxt ifindex */
478 	if (ulp_port_db_dev_func_id_to_ulp_index(params->ulp_ctx,
479 						 vf_spec->id,
480 						 &ifindex)) {
481 		BNXT_TF_DBG(ERR, "ParseErr:Portid is not valid\n");
482 		return rc;
483 	}
484 	/* Update the SVIF details */
485 	return ulp_rte_parser_svif_set(params, ifindex, mask);
486 }
487 
488 /* Function to handle the parsing of RTE Flow item port id  Header. */
489 int32_t
490 ulp_rte_port_id_hdr_handler(const struct rte_flow_item *item,
491 			    struct ulp_rte_parser_params *params)
492 {
493 	const struct rte_flow_item_port_id *port_spec = item->spec;
494 	const struct rte_flow_item_port_id *port_mask = item->mask;
495 	uint16_t mask = 0;
496 	int32_t rc = BNXT_TF_RC_PARSE_ERR;
497 	uint32_t ifindex;
498 
499 	if (!port_spec) {
500 		BNXT_TF_DBG(ERR, "ParseErr:Port id is not valid\n");
501 		return rc;
502 	}
503 	if (!port_mask) {
504 		BNXT_TF_DBG(ERR, "ParseErr:Phy Port mask is not valid\n");
505 		return rc;
506 	}
507 	mask = port_mask->id;
508 
509 	/* perform the conversion from dpdk port to bnxt ifindex */
510 	if (ulp_port_db_dev_port_to_ulp_index(params->ulp_ctx,
511 					      port_spec->id,
512 					      &ifindex)) {
513 		BNXT_TF_DBG(ERR, "ParseErr:Portid is not valid\n");
514 		return rc;
515 	}
516 	/* Update the SVIF details */
517 	return ulp_rte_parser_svif_set(params, ifindex, mask);
518 }
519 
520 /* Function to handle the parsing of RTE Flow item phy port Header. */
521 int32_t
522 ulp_rte_phy_port_hdr_handler(const struct rte_flow_item *item,
523 			     struct ulp_rte_parser_params *params)
524 {
525 	const struct rte_flow_item_phy_port *port_spec = item->spec;
526 	const struct rte_flow_item_phy_port *port_mask = item->mask;
527 	uint16_t mask = 0;
528 	int32_t rc = BNXT_TF_RC_ERROR;
529 	uint16_t svif;
530 	enum bnxt_ulp_direction_type dir;
531 	struct ulp_rte_hdr_field *hdr_field;
532 
533 	/* Copy the rte_flow_item for phy port into hdr_field */
534 	if (!port_spec) {
535 		BNXT_TF_DBG(ERR, "ParseErr:Phy Port id is not valid\n");
536 		return rc;
537 	}
538 	if (!port_mask) {
539 		BNXT_TF_DBG(ERR, "ParseErr:Phy Port mask is not valid\n");
540 		return rc;
541 	}
542 	mask = port_mask->index;
543 
544 	/* Update the match port type */
545 	ULP_COMP_FLD_IDX_WR(params, BNXT_ULP_CF_IDX_MATCH_PORT_TYPE,
546 			    BNXT_ULP_INTF_TYPE_PHY_PORT);
547 
548 	/* Compute the Hw direction */
549 	bnxt_ulp_rte_parser_direction_compute(params);
550 
551 	/* Direction validation */
552 	dir = ULP_COMP_FLD_IDX_RD(params, BNXT_ULP_CF_IDX_DIRECTION);
553 	if (dir == BNXT_ULP_DIR_EGRESS) {
554 		BNXT_TF_DBG(ERR,
555 			    "Parse Err:Phy ports are valid only for ingress\n");
556 		return BNXT_TF_RC_PARSE_ERR;
557 	}
558 
559 	/* Get the physical port details from port db */
560 	rc = ulp_port_db_phy_port_svif_get(params->ulp_ctx, port_spec->index,
561 					   &svif);
562 	if (rc) {
563 		BNXT_TF_DBG(ERR, "Failed to get port details\n");
564 		return BNXT_TF_RC_PARSE_ERR;
565 	}
566 
567 	/* Update the SVIF details */
568 	svif = rte_cpu_to_be_16(svif);
569 	hdr_field = &params->hdr_field[BNXT_ULP_PROTO_HDR_FIELD_SVIF_IDX];
570 	memcpy(hdr_field->spec, &svif, sizeof(svif));
571 	memcpy(hdr_field->mask, &mask, sizeof(mask));
572 	hdr_field->size = sizeof(svif);
573 	ULP_COMP_FLD_IDX_WR(params, BNXT_ULP_CF_IDX_SVIF_FLAG,
574 			    rte_be_to_cpu_16(svif));
575 	return BNXT_TF_RC_SUCCESS;
576 }
577 
578 /* Function to handle the update of proto header based on field values */
579 static void
580 ulp_rte_l2_proto_type_update(struct ulp_rte_parser_params *param,
581 			     uint16_t type, uint32_t in_flag)
582 {
583 	if (type == tfp_cpu_to_be_16(RTE_ETHER_TYPE_IPV4)) {
584 		if (in_flag) {
585 			ULP_BITMAP_SET(param->hdr_fp_bit.bits,
586 				       BNXT_ULP_HDR_BIT_I_IPV4);
587 			ULP_COMP_FLD_IDX_WR(param, BNXT_ULP_CF_IDX_I_L3, 1);
588 		} else {
589 			ULP_BITMAP_SET(param->hdr_fp_bit.bits,
590 				       BNXT_ULP_HDR_BIT_O_IPV4);
591 			ULP_COMP_FLD_IDX_WR(param, BNXT_ULP_CF_IDX_O_L3, 1);
592 		}
593 	} else if (type == tfp_cpu_to_be_16(RTE_ETHER_TYPE_IPV6))  {
594 		if (in_flag) {
595 			ULP_BITMAP_SET(param->hdr_fp_bit.bits,
596 				       BNXT_ULP_HDR_BIT_I_IPV6);
597 			ULP_COMP_FLD_IDX_WR(param, BNXT_ULP_CF_IDX_I_L3, 1);
598 		} else {
599 			ULP_BITMAP_SET(param->hdr_fp_bit.bits,
600 				       BNXT_ULP_HDR_BIT_O_IPV6);
601 			ULP_COMP_FLD_IDX_WR(param, BNXT_ULP_CF_IDX_O_L3, 1);
602 		}
603 	}
604 }
605 
606 /* Function to handle the parsing of RTE Flow item Ethernet Header. */
607 int32_t
608 ulp_rte_eth_hdr_handler(const struct rte_flow_item *item,
609 			struct ulp_rte_parser_params *params)
610 {
611 	const struct rte_flow_item_eth *eth_spec = item->spec;
612 	const struct rte_flow_item_eth *eth_mask = item->mask;
613 	struct ulp_rte_hdr_field *field;
614 	uint32_t idx = params->field_idx;
615 	uint32_t size;
616 	uint16_t eth_type = 0;
617 	uint32_t inner_flag = 0;
618 
619 	/*
620 	 * Copy the rte_flow_item for eth into hdr_field using ethernet
621 	 * header fields
622 	 */
623 	if (eth_spec) {
624 		size = sizeof(eth_spec->dst.addr_bytes);
625 		field = ulp_rte_parser_fld_copy(&params->hdr_field[idx],
626 						eth_spec->dst.addr_bytes,
627 						size);
628 		size = sizeof(eth_spec->src.addr_bytes);
629 		field = ulp_rte_parser_fld_copy(field,
630 						eth_spec->src.addr_bytes,
631 						size);
632 		field = ulp_rte_parser_fld_copy(field,
633 						&eth_spec->type,
634 						sizeof(eth_spec->type));
635 		eth_type = eth_spec->type;
636 	}
637 	if (eth_mask) {
638 		ulp_rte_prsr_mask_copy(params, &idx, eth_mask->dst.addr_bytes,
639 				       sizeof(eth_mask->dst.addr_bytes));
640 		ulp_rte_prsr_mask_copy(params, &idx, eth_mask->src.addr_bytes,
641 				       sizeof(eth_mask->src.addr_bytes));
642 		ulp_rte_prsr_mask_copy(params, &idx, &eth_mask->type,
643 				       sizeof(eth_mask->type));
644 	}
645 	/* Add number of vlan header elements */
646 	params->field_idx += BNXT_ULP_PROTO_HDR_ETH_NUM;
647 	params->vlan_idx = params->field_idx;
648 	params->field_idx += BNXT_ULP_PROTO_HDR_VLAN_NUM;
649 
650 	/* Update the protocol hdr bitmap */
651 	if (ULP_BITMAP_ISSET(params->hdr_bitmap.bits, BNXT_ULP_HDR_BIT_O_ETH)) {
652 		ULP_BITMAP_SET(params->hdr_bitmap.bits, BNXT_ULP_HDR_BIT_I_ETH);
653 		inner_flag = 1;
654 	} else {
655 		ULP_BITMAP_SET(params->hdr_bitmap.bits, BNXT_ULP_HDR_BIT_O_ETH);
656 	}
657 	/* Update the field protocol hdr bitmap */
658 	ulp_rte_l2_proto_type_update(params, eth_type, inner_flag);
659 
660 	return BNXT_TF_RC_SUCCESS;
661 }
662 
663 /* Function to handle the parsing of RTE Flow item Vlan Header. */
664 int32_t
665 ulp_rte_vlan_hdr_handler(const struct rte_flow_item *item,
666 			 struct ulp_rte_parser_params *params)
667 {
668 	const struct rte_flow_item_vlan *vlan_spec = item->spec;
669 	const struct rte_flow_item_vlan *vlan_mask = item->mask;
670 	struct ulp_rte_hdr_field *field;
671 	struct ulp_rte_hdr_bitmap	*hdr_bit;
672 	uint32_t idx = params->vlan_idx;
673 	uint16_t vlan_tag, priority;
674 	uint32_t outer_vtag_num;
675 	uint32_t inner_vtag_num;
676 	uint16_t eth_type = 0;
677 	uint32_t inner_flag = 0;
678 
679 	/*
680 	 * Copy the rte_flow_item for vlan into hdr_field using Vlan
681 	 * header fields
682 	 */
683 	if (vlan_spec) {
684 		vlan_tag = ntohs(vlan_spec->tci);
685 		priority = htons(vlan_tag >> ULP_VLAN_PRIORITY_SHIFT);
686 		vlan_tag &= ULP_VLAN_TAG_MASK;
687 		vlan_tag = htons(vlan_tag);
688 
689 		field = ulp_rte_parser_fld_copy(&params->hdr_field[idx],
690 						&priority,
691 						sizeof(priority));
692 		field = ulp_rte_parser_fld_copy(field,
693 						&vlan_tag,
694 						sizeof(vlan_tag));
695 		field = ulp_rte_parser_fld_copy(field,
696 						&vlan_spec->inner_type,
697 						sizeof(vlan_spec->inner_type));
698 		eth_type = vlan_spec->inner_type;
699 	}
700 
701 	if (vlan_mask) {
702 		vlan_tag = ntohs(vlan_mask->tci);
703 		priority = htons(vlan_tag >> ULP_VLAN_PRIORITY_SHIFT);
704 		vlan_tag &= 0xfff;
705 
706 		/*
707 		 * the storage for priority and vlan tag is 2 bytes
708 		 * The mask of priority which is 3 bits if it is all 1's
709 		 * then make the rest bits 13 bits as 1's
710 		 * so that it is matched as exact match.
711 		 */
712 		if (priority == ULP_VLAN_PRIORITY_MASK)
713 			priority |= ~ULP_VLAN_PRIORITY_MASK;
714 		if (vlan_tag == ULP_VLAN_TAG_MASK)
715 			vlan_tag |= ~ULP_VLAN_TAG_MASK;
716 		vlan_tag = htons(vlan_tag);
717 
718 		/*
719 		 * The priority field is ignored since OVS is setting it as
720 		 * wild card match and it is not supported. This is a work
721 		 * around and shall be addressed in the future.
722 		 */
723 		idx += 1;
724 
725 		ulp_rte_prsr_mask_copy(params, &idx, &vlan_tag,
726 				       sizeof(vlan_tag));
727 		ulp_rte_prsr_mask_copy(params, &idx, &vlan_mask->inner_type,
728 				       sizeof(vlan_mask->inner_type));
729 	}
730 	/* Set the vlan index to new incremented value */
731 	params->vlan_idx += BNXT_ULP_PROTO_HDR_S_VLAN_NUM;
732 
733 	/* Get the outer tag and inner tag counts */
734 	outer_vtag_num = ULP_COMP_FLD_IDX_RD(params,
735 					     BNXT_ULP_CF_IDX_O_VTAG_NUM);
736 	inner_vtag_num = ULP_COMP_FLD_IDX_RD(params,
737 					     BNXT_ULP_CF_IDX_I_VTAG_NUM);
738 
739 	/* Update the hdr_bitmap of the vlans */
740 	hdr_bit = &params->hdr_bitmap;
741 	if (ULP_BITMAP_ISSET(hdr_bit->bits, BNXT_ULP_HDR_BIT_O_ETH) &&
742 	    !ULP_BITMAP_ISSET(hdr_bit->bits, BNXT_ULP_HDR_BIT_I_ETH) &&
743 	    !outer_vtag_num) {
744 		/* Update the vlan tag num */
745 		outer_vtag_num++;
746 		ULP_COMP_FLD_IDX_WR(params, BNXT_ULP_CF_IDX_O_VTAG_NUM,
747 				    outer_vtag_num);
748 		ULP_COMP_FLD_IDX_WR(params, BNXT_ULP_CF_IDX_O_NO_VTAG, 0);
749 		ULP_COMP_FLD_IDX_WR(params, BNXT_ULP_CF_IDX_O_ONE_VTAG, 1);
750 		ULP_BITMAP_SET(params->hdr_bitmap.bits,
751 			       BNXT_ULP_HDR_BIT_OO_VLAN);
752 	} else if (ULP_BITMAP_ISSET(hdr_bit->bits, BNXT_ULP_HDR_BIT_O_ETH) &&
753 		   !ULP_BITMAP_ISSET(hdr_bit->bits, BNXT_ULP_HDR_BIT_I_ETH) &&
754 		   outer_vtag_num == 1) {
755 		/* update the vlan tag num */
756 		outer_vtag_num++;
757 		ULP_COMP_FLD_IDX_WR(params, BNXT_ULP_CF_IDX_O_VTAG_NUM,
758 				    outer_vtag_num);
759 		ULP_COMP_FLD_IDX_WR(params, BNXT_ULP_CF_IDX_O_TWO_VTAGS, 1);
760 		ULP_COMP_FLD_IDX_WR(params, BNXT_ULP_CF_IDX_O_ONE_VTAG, 0);
761 		ULP_BITMAP_SET(params->hdr_bitmap.bits,
762 			       BNXT_ULP_HDR_BIT_OI_VLAN);
763 	} else if (ULP_BITMAP_ISSET(hdr_bit->bits, BNXT_ULP_HDR_BIT_O_ETH) &&
764 		   ULP_BITMAP_ISSET(hdr_bit->bits, BNXT_ULP_HDR_BIT_I_ETH) &&
765 		   !inner_vtag_num) {
766 		/* update the vlan tag num */
767 		inner_vtag_num++;
768 		ULP_COMP_FLD_IDX_WR(params, BNXT_ULP_CF_IDX_I_VTAG_NUM,
769 				    inner_vtag_num);
770 		ULP_COMP_FLD_IDX_WR(params, BNXT_ULP_CF_IDX_I_NO_VTAG, 0);
771 		ULP_COMP_FLD_IDX_WR(params, BNXT_ULP_CF_IDX_I_ONE_VTAG, 1);
772 		ULP_BITMAP_SET(params->hdr_bitmap.bits,
773 			       BNXT_ULP_HDR_BIT_IO_VLAN);
774 		inner_flag = 1;
775 	} else if (ULP_BITMAP_ISSET(hdr_bit->bits, BNXT_ULP_HDR_BIT_O_ETH) &&
776 		   ULP_BITMAP_ISSET(hdr_bit->bits, BNXT_ULP_HDR_BIT_I_ETH) &&
777 		   inner_vtag_num == 1) {
778 		/* update the vlan tag num */
779 		inner_vtag_num++;
780 		ULP_COMP_FLD_IDX_WR(params, BNXT_ULP_CF_IDX_I_VTAG_NUM,
781 				    inner_vtag_num);
782 		ULP_COMP_FLD_IDX_WR(params, BNXT_ULP_CF_IDX_I_TWO_VTAGS, 1);
783 		ULP_COMP_FLD_IDX_WR(params, BNXT_ULP_CF_IDX_I_ONE_VTAG, 0);
784 		ULP_BITMAP_SET(params->hdr_bitmap.bits,
785 			       BNXT_ULP_HDR_BIT_II_VLAN);
786 		inner_flag = 1;
787 	} else {
788 		BNXT_TF_DBG(ERR, "Error Parsing:Vlan hdr found withtout eth\n");
789 		return BNXT_TF_RC_ERROR;
790 	}
791 	/* Update the field protocol hdr bitmap */
792 	ulp_rte_l2_proto_type_update(params, eth_type, inner_flag);
793 	return BNXT_TF_RC_SUCCESS;
794 }
795 
796 /* Function to handle the update of proto header based on field values */
797 static void
798 ulp_rte_l3_proto_type_update(struct ulp_rte_parser_params *param,
799 			     uint8_t proto, uint32_t in_flag)
800 {
801 	if (proto == IPPROTO_UDP) {
802 		if (in_flag) {
803 			ULP_BITMAP_SET(param->hdr_fp_bit.bits,
804 				       BNXT_ULP_HDR_BIT_I_UDP);
805 			ULP_COMP_FLD_IDX_WR(param, BNXT_ULP_CF_IDX_I_L4, 1);
806 		} else {
807 			ULP_BITMAP_SET(param->hdr_fp_bit.bits,
808 				       BNXT_ULP_HDR_BIT_O_UDP);
809 			ULP_COMP_FLD_IDX_WR(param, BNXT_ULP_CF_IDX_O_L4, 1);
810 		}
811 	} else if (proto == IPPROTO_TCP) {
812 		if (in_flag) {
813 			ULP_BITMAP_SET(param->hdr_fp_bit.bits,
814 				       BNXT_ULP_HDR_BIT_I_TCP);
815 			ULP_COMP_FLD_IDX_WR(param, BNXT_ULP_CF_IDX_I_L4, 1);
816 		} else {
817 			ULP_BITMAP_SET(param->hdr_fp_bit.bits,
818 				       BNXT_ULP_HDR_BIT_O_TCP);
819 			ULP_COMP_FLD_IDX_WR(param, BNXT_ULP_CF_IDX_O_L4, 1);
820 		}
821 	}
822 }
823 
824 /* Function to handle the parsing of RTE Flow item IPV4 Header. */
825 int32_t
826 ulp_rte_ipv4_hdr_handler(const struct rte_flow_item *item,
827 			 struct ulp_rte_parser_params *params)
828 {
829 	const struct rte_flow_item_ipv4 *ipv4_spec = item->spec;
830 	const struct rte_flow_item_ipv4 *ipv4_mask = item->mask;
831 	struct ulp_rte_hdr_field *field;
832 	struct ulp_rte_hdr_bitmap *hdr_bitmap = &params->hdr_bitmap;
833 	uint32_t idx = params->field_idx;
834 	uint32_t size;
835 	uint8_t proto = 0;
836 	uint32_t inner_flag = 0;
837 	uint32_t cnt;
838 
839 	/* validate there are no 3rd L3 header */
840 	cnt = ULP_COMP_FLD_IDX_RD(params, BNXT_ULP_CF_IDX_L3_HDR_CNT);
841 	if (cnt == 2) {
842 		BNXT_TF_DBG(ERR, "Parse Err:Third L3 header not supported\n");
843 		return BNXT_TF_RC_ERROR;
844 	}
845 
846 	/*
847 	 * Copy the rte_flow_item for ipv4 into hdr_field using ipv4
848 	 * header fields
849 	 */
850 	if (ipv4_spec) {
851 		size = sizeof(ipv4_spec->hdr.version_ihl);
852 		field = ulp_rte_parser_fld_copy(&params->hdr_field[idx],
853 						&ipv4_spec->hdr.version_ihl,
854 						size);
855 		size = sizeof(ipv4_spec->hdr.type_of_service);
856 		field = ulp_rte_parser_fld_copy(field,
857 						&ipv4_spec->hdr.type_of_service,
858 						size);
859 		size = sizeof(ipv4_spec->hdr.total_length);
860 		field = ulp_rte_parser_fld_copy(field,
861 						&ipv4_spec->hdr.total_length,
862 						size);
863 		size = sizeof(ipv4_spec->hdr.packet_id);
864 		field = ulp_rte_parser_fld_copy(field,
865 						&ipv4_spec->hdr.packet_id,
866 						size);
867 		size = sizeof(ipv4_spec->hdr.fragment_offset);
868 		field = ulp_rte_parser_fld_copy(field,
869 						&ipv4_spec->hdr.fragment_offset,
870 						size);
871 		size = sizeof(ipv4_spec->hdr.time_to_live);
872 		field = ulp_rte_parser_fld_copy(field,
873 						&ipv4_spec->hdr.time_to_live,
874 						size);
875 		size = sizeof(ipv4_spec->hdr.next_proto_id);
876 		field = ulp_rte_parser_fld_copy(field,
877 						&ipv4_spec->hdr.next_proto_id,
878 						size);
879 		proto = ipv4_spec->hdr.next_proto_id;
880 		size = sizeof(ipv4_spec->hdr.hdr_checksum);
881 		field = ulp_rte_parser_fld_copy(field,
882 						&ipv4_spec->hdr.hdr_checksum,
883 						size);
884 		size = sizeof(ipv4_spec->hdr.src_addr);
885 		field = ulp_rte_parser_fld_copy(field,
886 						&ipv4_spec->hdr.src_addr,
887 						size);
888 		size = sizeof(ipv4_spec->hdr.dst_addr);
889 		field = ulp_rte_parser_fld_copy(field,
890 						&ipv4_spec->hdr.dst_addr,
891 						size);
892 	}
893 	if (ipv4_mask) {
894 		ulp_rte_prsr_mask_copy(params, &idx,
895 				       &ipv4_mask->hdr.version_ihl,
896 				       sizeof(ipv4_mask->hdr.version_ihl));
897 		/*
898 		 * The tos field is ignored since OVS is setting it as wild card
899 		 * match and it is not supported. This is a work around and
900 		 * shall be addressed in the future.
901 		 */
902 		idx += 1;
903 
904 		ulp_rte_prsr_mask_copy(params, &idx,
905 				       &ipv4_mask->hdr.total_length,
906 				       sizeof(ipv4_mask->hdr.total_length));
907 		ulp_rte_prsr_mask_copy(params, &idx,
908 				       &ipv4_mask->hdr.packet_id,
909 				       sizeof(ipv4_mask->hdr.packet_id));
910 		ulp_rte_prsr_mask_copy(params, &idx,
911 				       &ipv4_mask->hdr.fragment_offset,
912 				       sizeof(ipv4_mask->hdr.fragment_offset));
913 		ulp_rte_prsr_mask_copy(params, &idx,
914 				       &ipv4_mask->hdr.time_to_live,
915 				       sizeof(ipv4_mask->hdr.time_to_live));
916 		ulp_rte_prsr_mask_copy(params, &idx,
917 				       &ipv4_mask->hdr.next_proto_id,
918 				       sizeof(ipv4_mask->hdr.next_proto_id));
919 		ulp_rte_prsr_mask_copy(params, &idx,
920 				       &ipv4_mask->hdr.hdr_checksum,
921 				       sizeof(ipv4_mask->hdr.hdr_checksum));
922 		ulp_rte_prsr_mask_copy(params, &idx,
923 				       &ipv4_mask->hdr.src_addr,
924 				       sizeof(ipv4_mask->hdr.src_addr));
925 		ulp_rte_prsr_mask_copy(params, &idx,
926 				       &ipv4_mask->hdr.dst_addr,
927 				       sizeof(ipv4_mask->hdr.dst_addr));
928 	}
929 	/* Add the number of ipv4 header elements */
930 	params->field_idx += BNXT_ULP_PROTO_HDR_IPV4_NUM;
931 
932 	/* Set the ipv4 header bitmap and computed l3 header bitmaps */
933 	if (ULP_BITMAP_ISSET(hdr_bitmap->bits, BNXT_ULP_HDR_BIT_O_IPV4) ||
934 	    ULP_BITMAP_ISSET(hdr_bitmap->bits, BNXT_ULP_HDR_BIT_O_IPV6)) {
935 		ULP_BITMAP_SET(hdr_bitmap->bits, BNXT_ULP_HDR_BIT_I_IPV4);
936 		ULP_COMP_FLD_IDX_WR(params, BNXT_ULP_CF_IDX_I_L3, 1);
937 		inner_flag = 1;
938 	} else {
939 		ULP_BITMAP_SET(hdr_bitmap->bits, BNXT_ULP_HDR_BIT_O_IPV4);
940 		ULP_COMP_FLD_IDX_WR(params, BNXT_ULP_CF_IDX_O_L3, 1);
941 	}
942 
943 	/* Update the field protocol hdr bitmap */
944 	ulp_rte_l3_proto_type_update(params, proto, inner_flag);
945 	ULP_COMP_FLD_IDX_WR(params, BNXT_ULP_CF_IDX_L3_HDR_CNT, ++cnt);
946 	return BNXT_TF_RC_SUCCESS;
947 }
948 
949 /* Function to handle the parsing of RTE Flow item IPV6 Header */
950 int32_t
951 ulp_rte_ipv6_hdr_handler(const struct rte_flow_item *item,
952 			 struct ulp_rte_parser_params *params)
953 {
954 	const struct rte_flow_item_ipv6	*ipv6_spec = item->spec;
955 	const struct rte_flow_item_ipv6	*ipv6_mask = item->mask;
956 	struct ulp_rte_hdr_field *field;
957 	struct ulp_rte_hdr_bitmap *hdr_bitmap = &params->hdr_bitmap;
958 	uint32_t idx = params->field_idx;
959 	uint32_t size;
960 	uint32_t vtcf, vtcf_mask;
961 	uint8_t proto = 0;
962 	uint32_t inner_flag = 0;
963 	uint32_t cnt;
964 
965 	/* validate there are no 3rd L3 header */
966 	cnt = ULP_COMP_FLD_IDX_RD(params, BNXT_ULP_CF_IDX_L3_HDR_CNT);
967 	if (cnt == 2) {
968 		BNXT_TF_DBG(ERR, "Parse Err:Third L3 header not supported\n");
969 		return BNXT_TF_RC_ERROR;
970 	}
971 
972 	/*
973 	 * Copy the rte_flow_item for ipv6 into hdr_field using ipv6
974 	 * header fields
975 	 */
976 	if (ipv6_spec) {
977 		size = sizeof(ipv6_spec->hdr.vtc_flow);
978 
979 		vtcf = BNXT_ULP_GET_IPV6_VER(ipv6_spec->hdr.vtc_flow);
980 		field = ulp_rte_parser_fld_copy(&params->hdr_field[idx],
981 						&vtcf,
982 						size);
983 
984 		vtcf = BNXT_ULP_GET_IPV6_TC(ipv6_spec->hdr.vtc_flow);
985 		field = ulp_rte_parser_fld_copy(field,
986 						&vtcf,
987 						size);
988 
989 		vtcf = BNXT_ULP_GET_IPV6_FLOWLABEL(ipv6_spec->hdr.vtc_flow);
990 		field = ulp_rte_parser_fld_copy(field,
991 						&vtcf,
992 						size);
993 
994 		size = sizeof(ipv6_spec->hdr.payload_len);
995 		field = ulp_rte_parser_fld_copy(field,
996 						&ipv6_spec->hdr.payload_len,
997 						size);
998 		size = sizeof(ipv6_spec->hdr.proto);
999 		field = ulp_rte_parser_fld_copy(field,
1000 						&ipv6_spec->hdr.proto,
1001 						size);
1002 		proto = ipv6_spec->hdr.proto;
1003 		size = sizeof(ipv6_spec->hdr.hop_limits);
1004 		field = ulp_rte_parser_fld_copy(field,
1005 						&ipv6_spec->hdr.hop_limits,
1006 						size);
1007 		size = sizeof(ipv6_spec->hdr.src_addr);
1008 		field = ulp_rte_parser_fld_copy(field,
1009 						&ipv6_spec->hdr.src_addr,
1010 						size);
1011 		size = sizeof(ipv6_spec->hdr.dst_addr);
1012 		field = ulp_rte_parser_fld_copy(field,
1013 						&ipv6_spec->hdr.dst_addr,
1014 						size);
1015 	}
1016 	if (ipv6_mask) {
1017 		size = sizeof(ipv6_mask->hdr.vtc_flow);
1018 
1019 		vtcf_mask = BNXT_ULP_GET_IPV6_VER(ipv6_mask->hdr.vtc_flow);
1020 		ulp_rte_prsr_mask_copy(params, &idx,
1021 				       &vtcf_mask,
1022 				       size);
1023 
1024 		vtcf_mask = BNXT_ULP_GET_IPV6_TC(ipv6_mask->hdr.vtc_flow);
1025 		ulp_rte_prsr_mask_copy(params, &idx,
1026 				       &vtcf_mask,
1027 				       size);
1028 
1029 		vtcf_mask =
1030 			BNXT_ULP_GET_IPV6_FLOWLABEL(ipv6_mask->hdr.vtc_flow);
1031 		ulp_rte_prsr_mask_copy(params, &idx,
1032 				       &vtcf_mask,
1033 				       size);
1034 
1035 		ulp_rte_prsr_mask_copy(params, &idx,
1036 				       &ipv6_mask->hdr.payload_len,
1037 				       sizeof(ipv6_mask->hdr.payload_len));
1038 		ulp_rte_prsr_mask_copy(params, &idx,
1039 				       &ipv6_mask->hdr.proto,
1040 				       sizeof(ipv6_mask->hdr.proto));
1041 		ulp_rte_prsr_mask_copy(params, &idx,
1042 				       &ipv6_mask->hdr.hop_limits,
1043 				       sizeof(ipv6_mask->hdr.hop_limits));
1044 		ulp_rte_prsr_mask_copy(params, &idx,
1045 				       &ipv6_mask->hdr.src_addr,
1046 				       sizeof(ipv6_mask->hdr.src_addr));
1047 		ulp_rte_prsr_mask_copy(params, &idx,
1048 				       &ipv6_mask->hdr.dst_addr,
1049 				       sizeof(ipv6_mask->hdr.dst_addr));
1050 	}
1051 	/* add number of ipv6 header elements */
1052 	params->field_idx += BNXT_ULP_PROTO_HDR_IPV6_NUM;
1053 
1054 	/* Set the ipv6 header bitmap and computed l3 header bitmaps */
1055 	if (ULP_BITMAP_ISSET(hdr_bitmap->bits, BNXT_ULP_HDR_BIT_O_IPV4) ||
1056 	    ULP_BITMAP_ISSET(hdr_bitmap->bits, BNXT_ULP_HDR_BIT_O_IPV6)) {
1057 		ULP_BITMAP_SET(hdr_bitmap->bits, BNXT_ULP_HDR_BIT_I_IPV6);
1058 		ULP_COMP_FLD_IDX_WR(params, BNXT_ULP_CF_IDX_I_L3, 1);
1059 		inner_flag = 1;
1060 	} else {
1061 		ULP_BITMAP_SET(hdr_bitmap->bits, BNXT_ULP_HDR_BIT_O_IPV6);
1062 		ULP_COMP_FLD_IDX_WR(params, BNXT_ULP_CF_IDX_O_L3, 1);
1063 	}
1064 
1065 	/* Update the field protocol hdr bitmap */
1066 	ulp_rte_l3_proto_type_update(params, proto, inner_flag);
1067 	ULP_COMP_FLD_IDX_WR(params, BNXT_ULP_CF_IDX_L3_HDR_CNT, ++cnt);
1068 
1069 	return BNXT_TF_RC_SUCCESS;
1070 }
1071 
1072 /* Function to handle the update of proto header based on field values */
1073 static void
1074 ulp_rte_l4_proto_type_update(struct ulp_rte_parser_params *param,
1075 			     uint16_t dst_port)
1076 {
1077 	if (dst_port == tfp_cpu_to_be_16(ULP_UDP_PORT_VXLAN))
1078 		ULP_BITMAP_SET(param->hdr_fp_bit.bits,
1079 			       BNXT_ULP_HDR_BIT_T_VXLAN);
1080 }
1081 
1082 /* Function to handle the parsing of RTE Flow item UDP Header. */
1083 int32_t
1084 ulp_rte_udp_hdr_handler(const struct rte_flow_item *item,
1085 			struct ulp_rte_parser_params *params)
1086 {
1087 	const struct rte_flow_item_udp *udp_spec = item->spec;
1088 	const struct rte_flow_item_udp *udp_mask = item->mask;
1089 	struct ulp_rte_hdr_field *field;
1090 	struct ulp_rte_hdr_bitmap *hdr_bitmap = &params->hdr_bitmap;
1091 	uint32_t idx = params->field_idx;
1092 	uint32_t size;
1093 	uint16_t dst_port = 0;
1094 	uint32_t cnt;
1095 
1096 	cnt = ULP_COMP_FLD_IDX_RD(params, BNXT_ULP_CF_IDX_L4_HDR_CNT);
1097 	if (cnt == 2) {
1098 		BNXT_TF_DBG(ERR, "Parse Err:Third L4 header not supported\n");
1099 		return BNXT_TF_RC_ERROR;
1100 	}
1101 
1102 	/*
1103 	 * Copy the rte_flow_item for ipv4 into hdr_field using ipv4
1104 	 * header fields
1105 	 */
1106 	if (udp_spec) {
1107 		size = sizeof(udp_spec->hdr.src_port);
1108 		field = ulp_rte_parser_fld_copy(&params->hdr_field[idx],
1109 						&udp_spec->hdr.src_port,
1110 						size);
1111 		size = sizeof(udp_spec->hdr.dst_port);
1112 		field = ulp_rte_parser_fld_copy(field,
1113 						&udp_spec->hdr.dst_port,
1114 						size);
1115 		dst_port = udp_spec->hdr.dst_port;
1116 		size = sizeof(udp_spec->hdr.dgram_len);
1117 		field = ulp_rte_parser_fld_copy(field,
1118 						&udp_spec->hdr.dgram_len,
1119 						size);
1120 		size = sizeof(udp_spec->hdr.dgram_cksum);
1121 		field = ulp_rte_parser_fld_copy(field,
1122 						&udp_spec->hdr.dgram_cksum,
1123 						size);
1124 	}
1125 	if (udp_mask) {
1126 		ulp_rte_prsr_mask_copy(params, &idx,
1127 				       &udp_mask->hdr.src_port,
1128 				       sizeof(udp_mask->hdr.src_port));
1129 		ulp_rte_prsr_mask_copy(params, &idx,
1130 				       &udp_mask->hdr.dst_port,
1131 				       sizeof(udp_mask->hdr.dst_port));
1132 		ulp_rte_prsr_mask_copy(params, &idx,
1133 				       &udp_mask->hdr.dgram_len,
1134 				       sizeof(udp_mask->hdr.dgram_len));
1135 		ulp_rte_prsr_mask_copy(params, &idx,
1136 				       &udp_mask->hdr.dgram_cksum,
1137 				       sizeof(udp_mask->hdr.dgram_cksum));
1138 	}
1139 
1140 	/* Add number of UDP header elements */
1141 	params->field_idx += BNXT_ULP_PROTO_HDR_UDP_NUM;
1142 
1143 	/* Set the udp header bitmap and computed l4 header bitmaps */
1144 	if (ULP_BITMAP_ISSET(hdr_bitmap->bits, BNXT_ULP_HDR_BIT_O_UDP) ||
1145 	    ULP_BITMAP_ISSET(hdr_bitmap->bits, BNXT_ULP_HDR_BIT_O_TCP)) {
1146 		ULP_BITMAP_SET(hdr_bitmap->bits, BNXT_ULP_HDR_BIT_I_UDP);
1147 		ULP_COMP_FLD_IDX_WR(params, BNXT_ULP_CF_IDX_I_L4, 1);
1148 	} else {
1149 		ULP_BITMAP_SET(hdr_bitmap->bits, BNXT_ULP_HDR_BIT_O_UDP);
1150 		ULP_COMP_FLD_IDX_WR(params, BNXT_ULP_CF_IDX_O_L4, 1);
1151 		/* Update the field protocol hdr bitmap */
1152 		ulp_rte_l4_proto_type_update(params, dst_port);
1153 	}
1154 	ULP_COMP_FLD_IDX_WR(params, BNXT_ULP_CF_IDX_L4_HDR_CNT, ++cnt);
1155 	return BNXT_TF_RC_SUCCESS;
1156 }
1157 
1158 /* Function to handle the parsing of RTE Flow item TCP Header. */
1159 int32_t
1160 ulp_rte_tcp_hdr_handler(const struct rte_flow_item *item,
1161 			struct ulp_rte_parser_params *params)
1162 {
1163 	const struct rte_flow_item_tcp *tcp_spec = item->spec;
1164 	const struct rte_flow_item_tcp *tcp_mask = item->mask;
1165 	struct ulp_rte_hdr_field *field;
1166 	struct ulp_rte_hdr_bitmap *hdr_bitmap = &params->hdr_bitmap;
1167 	uint32_t idx = params->field_idx;
1168 	uint32_t size;
1169 	uint32_t cnt;
1170 
1171 	cnt = ULP_COMP_FLD_IDX_RD(params, BNXT_ULP_CF_IDX_L4_HDR_CNT);
1172 	if (cnt == 2) {
1173 		BNXT_TF_DBG(ERR, "Parse Err:Third L4 header not supported\n");
1174 		return BNXT_TF_RC_ERROR;
1175 	}
1176 
1177 	/*
1178 	 * Copy the rte_flow_item for ipv4 into hdr_field using ipv4
1179 	 * header fields
1180 	 */
1181 	if (tcp_spec) {
1182 		size = sizeof(tcp_spec->hdr.src_port);
1183 		field = ulp_rte_parser_fld_copy(&params->hdr_field[idx],
1184 						&tcp_spec->hdr.src_port,
1185 						size);
1186 		size = sizeof(tcp_spec->hdr.dst_port);
1187 		field = ulp_rte_parser_fld_copy(field,
1188 						&tcp_spec->hdr.dst_port,
1189 						size);
1190 		size = sizeof(tcp_spec->hdr.sent_seq);
1191 		field = ulp_rte_parser_fld_copy(field,
1192 						&tcp_spec->hdr.sent_seq,
1193 						size);
1194 		size = sizeof(tcp_spec->hdr.recv_ack);
1195 		field = ulp_rte_parser_fld_copy(field,
1196 						&tcp_spec->hdr.recv_ack,
1197 						size);
1198 		size = sizeof(tcp_spec->hdr.data_off);
1199 		field = ulp_rte_parser_fld_copy(field,
1200 						&tcp_spec->hdr.data_off,
1201 						size);
1202 		size = sizeof(tcp_spec->hdr.tcp_flags);
1203 		field = ulp_rte_parser_fld_copy(field,
1204 						&tcp_spec->hdr.tcp_flags,
1205 						size);
1206 		size = sizeof(tcp_spec->hdr.rx_win);
1207 		field = ulp_rte_parser_fld_copy(field,
1208 						&tcp_spec->hdr.rx_win,
1209 						size);
1210 		size = sizeof(tcp_spec->hdr.cksum);
1211 		field = ulp_rte_parser_fld_copy(field,
1212 						&tcp_spec->hdr.cksum,
1213 						size);
1214 		size = sizeof(tcp_spec->hdr.tcp_urp);
1215 		field = ulp_rte_parser_fld_copy(field,
1216 						&tcp_spec->hdr.tcp_urp,
1217 						size);
1218 	} else {
1219 		idx += BNXT_ULP_PROTO_HDR_TCP_NUM;
1220 	}
1221 
1222 	if (tcp_mask) {
1223 		ulp_rte_prsr_mask_copy(params, &idx,
1224 				       &tcp_mask->hdr.src_port,
1225 				       sizeof(tcp_mask->hdr.src_port));
1226 		ulp_rte_prsr_mask_copy(params, &idx,
1227 				       &tcp_mask->hdr.dst_port,
1228 				       sizeof(tcp_mask->hdr.dst_port));
1229 		ulp_rte_prsr_mask_copy(params, &idx,
1230 				       &tcp_mask->hdr.sent_seq,
1231 				       sizeof(tcp_mask->hdr.sent_seq));
1232 		ulp_rte_prsr_mask_copy(params, &idx,
1233 				       &tcp_mask->hdr.recv_ack,
1234 				       sizeof(tcp_mask->hdr.recv_ack));
1235 		ulp_rte_prsr_mask_copy(params, &idx,
1236 				       &tcp_mask->hdr.data_off,
1237 				       sizeof(tcp_mask->hdr.data_off));
1238 		ulp_rte_prsr_mask_copy(params, &idx,
1239 				       &tcp_mask->hdr.tcp_flags,
1240 				       sizeof(tcp_mask->hdr.tcp_flags));
1241 		ulp_rte_prsr_mask_copy(params, &idx,
1242 				       &tcp_mask->hdr.rx_win,
1243 				       sizeof(tcp_mask->hdr.rx_win));
1244 		ulp_rte_prsr_mask_copy(params, &idx,
1245 				       &tcp_mask->hdr.cksum,
1246 				       sizeof(tcp_mask->hdr.cksum));
1247 		ulp_rte_prsr_mask_copy(params, &idx,
1248 				       &tcp_mask->hdr.tcp_urp,
1249 				       sizeof(tcp_mask->hdr.tcp_urp));
1250 	}
1251 	/* add number of TCP header elements */
1252 	params->field_idx += BNXT_ULP_PROTO_HDR_TCP_NUM;
1253 
1254 	/* Set the udp header bitmap and computed l4 header bitmaps */
1255 	if (ULP_BITMAP_ISSET(hdr_bitmap->bits, BNXT_ULP_HDR_BIT_O_UDP) ||
1256 	    ULP_BITMAP_ISSET(hdr_bitmap->bits, BNXT_ULP_HDR_BIT_O_TCP)) {
1257 		ULP_BITMAP_SET(hdr_bitmap->bits, BNXT_ULP_HDR_BIT_I_TCP);
1258 		ULP_COMP_FLD_IDX_WR(params, BNXT_ULP_CF_IDX_I_L4, 1);
1259 	} else {
1260 		ULP_BITMAP_SET(hdr_bitmap->bits, BNXT_ULP_HDR_BIT_O_TCP);
1261 		ULP_COMP_FLD_IDX_WR(params, BNXT_ULP_CF_IDX_O_L4, 1);
1262 	}
1263 	ULP_COMP_FLD_IDX_WR(params, BNXT_ULP_CF_IDX_L4_HDR_CNT, ++cnt);
1264 	return BNXT_TF_RC_SUCCESS;
1265 }
1266 
1267 /* Function to handle the parsing of RTE Flow item Vxlan Header. */
1268 int32_t
1269 ulp_rte_vxlan_hdr_handler(const struct rte_flow_item *item,
1270 			  struct ulp_rte_parser_params *params)
1271 {
1272 	const struct rte_flow_item_vxlan *vxlan_spec = item->spec;
1273 	const struct rte_flow_item_vxlan *vxlan_mask = item->mask;
1274 	struct ulp_rte_hdr_field *field;
1275 	struct ulp_rte_hdr_bitmap *hdr_bitmap = &params->hdr_bitmap;
1276 	uint32_t idx = params->field_idx;
1277 	uint32_t size;
1278 
1279 	/*
1280 	 * Copy the rte_flow_item for vxlan into hdr_field using vxlan
1281 	 * header fields
1282 	 */
1283 	if (vxlan_spec) {
1284 		size = sizeof(vxlan_spec->flags);
1285 		field = ulp_rte_parser_fld_copy(&params->hdr_field[idx],
1286 						&vxlan_spec->flags,
1287 						size);
1288 		size = sizeof(vxlan_spec->rsvd0);
1289 		field = ulp_rte_parser_fld_copy(field,
1290 						&vxlan_spec->rsvd0,
1291 						size);
1292 		size = sizeof(vxlan_spec->vni);
1293 		field = ulp_rte_parser_fld_copy(field,
1294 						&vxlan_spec->vni,
1295 						size);
1296 		size = sizeof(vxlan_spec->rsvd1);
1297 		field = ulp_rte_parser_fld_copy(field,
1298 						&vxlan_spec->rsvd1,
1299 						size);
1300 	}
1301 	if (vxlan_mask) {
1302 		ulp_rte_prsr_mask_copy(params, &idx,
1303 				       &vxlan_mask->flags,
1304 				       sizeof(vxlan_mask->flags));
1305 		ulp_rte_prsr_mask_copy(params, &idx,
1306 				       &vxlan_mask->rsvd0,
1307 				       sizeof(vxlan_mask->rsvd0));
1308 		ulp_rte_prsr_mask_copy(params, &idx,
1309 				       &vxlan_mask->vni,
1310 				       sizeof(vxlan_mask->vni));
1311 		ulp_rte_prsr_mask_copy(params, &idx,
1312 				       &vxlan_mask->rsvd1,
1313 				       sizeof(vxlan_mask->rsvd1));
1314 	}
1315 	/* Add number of vxlan header elements */
1316 	params->field_idx += BNXT_ULP_PROTO_HDR_VXLAN_NUM;
1317 
1318 	/* Update the hdr_bitmap with vxlan */
1319 	ULP_BITMAP_SET(hdr_bitmap->bits, BNXT_ULP_HDR_BIT_T_VXLAN);
1320 	return BNXT_TF_RC_SUCCESS;
1321 }
1322 
1323 /* Function to handle the parsing of RTE Flow item void Header */
1324 int32_t
1325 ulp_rte_void_hdr_handler(const struct rte_flow_item *item __rte_unused,
1326 			 struct ulp_rte_parser_params *params __rte_unused)
1327 {
1328 	return BNXT_TF_RC_SUCCESS;
1329 }
1330 
1331 /* Function to handle the parsing of RTE Flow action void Header. */
1332 int32_t
1333 ulp_rte_void_act_handler(const struct rte_flow_action *action_item __rte_unused,
1334 			 struct ulp_rte_parser_params *params __rte_unused)
1335 {
1336 	return BNXT_TF_RC_SUCCESS;
1337 }
1338 
1339 /* Function to handle the parsing of RTE Flow action Mark Header. */
1340 int32_t
1341 ulp_rte_mark_act_handler(const struct rte_flow_action *action_item,
1342 			 struct ulp_rte_parser_params *param)
1343 {
1344 	const struct rte_flow_action_mark *mark;
1345 	struct ulp_rte_act_bitmap *act = &param->act_bitmap;
1346 	uint32_t mark_id;
1347 
1348 	mark = action_item->conf;
1349 	if (mark) {
1350 		mark_id = tfp_cpu_to_be_32(mark->id);
1351 		memcpy(&param->act_prop.act_details[BNXT_ULP_ACT_PROP_IDX_MARK],
1352 		       &mark_id, BNXT_ULP_ACT_PROP_SZ_MARK);
1353 
1354 		/* Update the hdr_bitmap with vxlan */
1355 		ULP_BITMAP_SET(act->bits, BNXT_ULP_ACTION_BIT_MARK);
1356 		return BNXT_TF_RC_SUCCESS;
1357 	}
1358 	BNXT_TF_DBG(ERR, "Parse Error: Mark arg is invalid\n");
1359 	return BNXT_TF_RC_ERROR;
1360 }
1361 
1362 /* Function to handle the parsing of RTE Flow action RSS Header. */
1363 int32_t
1364 ulp_rte_rss_act_handler(const struct rte_flow_action *action_item,
1365 			struct ulp_rte_parser_params *param)
1366 {
1367 	const struct rte_flow_action_rss *rss = action_item->conf;
1368 
1369 	if (rss) {
1370 		/* Update the hdr_bitmap with vxlan */
1371 		ULP_BITMAP_SET(param->act_bitmap.bits, BNXT_ULP_ACTION_BIT_RSS);
1372 		return BNXT_TF_RC_SUCCESS;
1373 	}
1374 	BNXT_TF_DBG(ERR, "Parse Error: RSS arg is invalid\n");
1375 	return BNXT_TF_RC_ERROR;
1376 }
1377 
1378 /* Function to handle the parsing of RTE Flow action vxlan_encap Header. */
1379 int32_t
1380 ulp_rte_vxlan_encap_act_handler(const struct rte_flow_action *action_item,
1381 				struct ulp_rte_parser_params *params)
1382 {
1383 	const struct rte_flow_action_vxlan_encap *vxlan_encap;
1384 	const struct rte_flow_item *item;
1385 	const struct rte_flow_item_eth *eth_spec;
1386 	const struct rte_flow_item_ipv4 *ipv4_spec;
1387 	const struct rte_flow_item_ipv6 *ipv6_spec;
1388 	struct rte_flow_item_vxlan vxlan_spec;
1389 	uint32_t vlan_num = 0, vlan_size = 0;
1390 	uint32_t ip_size = 0, ip_type = 0;
1391 	uint32_t vxlan_size = 0;
1392 	uint8_t *buff;
1393 	/* IP header per byte - ver/hlen, TOS, ID, ID, FRAG, FRAG, TTL, PROTO */
1394 	const uint8_t def_ipv4_hdr[] = {0x45, 0x00, 0x00, 0x01, 0x00,
1395 				    0x00, 0x40, 0x11};
1396 	struct ulp_rte_act_bitmap *act = &params->act_bitmap;
1397 	struct ulp_rte_act_prop *ap = &params->act_prop;
1398 
1399 	vxlan_encap = action_item->conf;
1400 	if (!vxlan_encap) {
1401 		BNXT_TF_DBG(ERR, "Parse Error: Vxlan_encap arg is invalid\n");
1402 		return BNXT_TF_RC_ERROR;
1403 	}
1404 
1405 	item = vxlan_encap->definition;
1406 	if (!item) {
1407 		BNXT_TF_DBG(ERR, "Parse Error: definition arg is invalid\n");
1408 		return BNXT_TF_RC_ERROR;
1409 	}
1410 
1411 	if (!ulp_rte_item_skip_void(&item, 0))
1412 		return BNXT_TF_RC_ERROR;
1413 
1414 	/* must have ethernet header */
1415 	if (item->type != RTE_FLOW_ITEM_TYPE_ETH) {
1416 		BNXT_TF_DBG(ERR, "Parse Error:vxlan encap does not have eth\n");
1417 		return BNXT_TF_RC_ERROR;
1418 	}
1419 	eth_spec = item->spec;
1420 	buff = &ap->act_details[BNXT_ULP_ACT_PROP_IDX_ENCAP_L2_DMAC];
1421 	ulp_encap_buffer_copy(buff,
1422 			      eth_spec->dst.addr_bytes,
1423 			      BNXT_ULP_ACT_PROP_SZ_ENCAP_L2_DMAC);
1424 
1425 	buff = &ap->act_details[BNXT_ULP_ACT_PROP_IDX_ENCAP_L2_SMAC];
1426 	ulp_encap_buffer_copy(buff,
1427 			      eth_spec->src.addr_bytes,
1428 			      BNXT_ULP_ACT_PROP_SZ_ENCAP_L2_SMAC);
1429 
1430 	/* Goto the next item */
1431 	if (!ulp_rte_item_skip_void(&item, 1))
1432 		return BNXT_TF_RC_ERROR;
1433 
1434 	/* May have vlan header */
1435 	if (item->type == RTE_FLOW_ITEM_TYPE_VLAN) {
1436 		vlan_num++;
1437 		buff = &ap->act_details[BNXT_ULP_ACT_PROP_IDX_ENCAP_VTAG];
1438 		ulp_encap_buffer_copy(buff,
1439 				      item->spec,
1440 				      sizeof(struct rte_flow_item_vlan));
1441 
1442 		if (!ulp_rte_item_skip_void(&item, 1))
1443 			return BNXT_TF_RC_ERROR;
1444 	}
1445 
1446 	/* may have two vlan headers */
1447 	if (item->type == RTE_FLOW_ITEM_TYPE_VLAN) {
1448 		vlan_num++;
1449 		memcpy(&ap->act_details[BNXT_ULP_ACT_PROP_IDX_ENCAP_VTAG +
1450 		       sizeof(struct rte_flow_item_vlan)],
1451 		       item->spec,
1452 		       sizeof(struct rte_flow_item_vlan));
1453 		if (!ulp_rte_item_skip_void(&item, 1))
1454 			return BNXT_TF_RC_ERROR;
1455 	}
1456 	/* Update the vlan count and size of more than one */
1457 	if (vlan_num) {
1458 		vlan_size = vlan_num * sizeof(struct rte_flow_item_vlan);
1459 		vlan_num = tfp_cpu_to_be_32(vlan_num);
1460 		memcpy(&ap->act_details[BNXT_ULP_ACT_PROP_IDX_ENCAP_VTAG_NUM],
1461 		       &vlan_num,
1462 		       sizeof(uint32_t));
1463 		vlan_size = tfp_cpu_to_be_32(vlan_size);
1464 		memcpy(&ap->act_details[BNXT_ULP_ACT_PROP_IDX_ENCAP_VTAG_SZ],
1465 		       &vlan_size,
1466 		       sizeof(uint32_t));
1467 	}
1468 
1469 	/* L3 must be IPv4, IPv6 */
1470 	if (item->type == RTE_FLOW_ITEM_TYPE_IPV4) {
1471 		ipv4_spec = item->spec;
1472 		ip_size = BNXT_ULP_ENCAP_IPV4_SIZE;
1473 
1474 		/* copy the ipv4 details */
1475 		if (ulp_buffer_is_empty(&ipv4_spec->hdr.version_ihl,
1476 					BNXT_ULP_ENCAP_IPV4_VER_HLEN_TOS)) {
1477 			buff = &ap->act_details[BNXT_ULP_ACT_PROP_IDX_ENCAP_IP];
1478 			ulp_encap_buffer_copy(buff,
1479 					      def_ipv4_hdr,
1480 					      BNXT_ULP_ENCAP_IPV4_VER_HLEN_TOS +
1481 					      BNXT_ULP_ENCAP_IPV4_ID_PROTO);
1482 		} else {
1483 			const uint8_t *tmp_buff;
1484 
1485 			buff = &ap->act_details[BNXT_ULP_ACT_PROP_IDX_ENCAP_IP];
1486 			tmp_buff = (const uint8_t *)&ipv4_spec->hdr.packet_id;
1487 			ulp_encap_buffer_copy(buff,
1488 					      tmp_buff,
1489 					      BNXT_ULP_ENCAP_IPV4_ID_PROTO);
1490 			buff = &ap->act_details[BNXT_ULP_ACT_PROP_IDX_ENCAP_IP +
1491 			     BNXT_ULP_ENCAP_IPV4_ID_PROTO];
1492 			ulp_encap_buffer_copy(buff,
1493 					      &ipv4_spec->hdr.version_ihl,
1494 					      BNXT_ULP_ENCAP_IPV4_VER_HLEN_TOS);
1495 		}
1496 		buff = &ap->act_details[BNXT_ULP_ACT_PROP_IDX_ENCAP_IP +
1497 		    BNXT_ULP_ENCAP_IPV4_VER_HLEN_TOS +
1498 		    BNXT_ULP_ENCAP_IPV4_ID_PROTO];
1499 		ulp_encap_buffer_copy(buff,
1500 				      (const uint8_t *)&ipv4_spec->hdr.dst_addr,
1501 				      BNXT_ULP_ENCAP_IPV4_DEST_IP);
1502 
1503 		buff = &ap->act_details[BNXT_ULP_ACT_PROP_IDX_ENCAP_IP_SRC];
1504 		ulp_encap_buffer_copy(buff,
1505 				      (const uint8_t *)&ipv4_spec->hdr.src_addr,
1506 				      BNXT_ULP_ACT_PROP_SZ_ENCAP_IP_SRC);
1507 
1508 		/* Update the ip size details */
1509 		ip_size = tfp_cpu_to_be_32(ip_size);
1510 		memcpy(&ap->act_details[BNXT_ULP_ACT_PROP_IDX_ENCAP_IP_SZ],
1511 		       &ip_size, sizeof(uint32_t));
1512 
1513 		/* update the ip type */
1514 		ip_type = rte_cpu_to_be_32(BNXT_ULP_ETH_IPV4);
1515 		memcpy(&ap->act_details[BNXT_ULP_ACT_PROP_IDX_ENCAP_L3_TYPE],
1516 		       &ip_type, sizeof(uint32_t));
1517 
1518 		/* update the computed field to notify it is ipv4 header */
1519 		ULP_COMP_FLD_IDX_WR(params, BNXT_ULP_CF_IDX_ACT_ENCAP_IPV4_FLAG,
1520 				    1);
1521 
1522 		if (!ulp_rte_item_skip_void(&item, 1))
1523 			return BNXT_TF_RC_ERROR;
1524 	} else if (item->type == RTE_FLOW_ITEM_TYPE_IPV6) {
1525 		ipv6_spec = item->spec;
1526 		ip_size = BNXT_ULP_ENCAP_IPV6_SIZE;
1527 
1528 		/* copy the ipv4 details */
1529 		memcpy(&ap->act_details[BNXT_ULP_ACT_PROP_IDX_ENCAP_IP],
1530 		       ipv6_spec, BNXT_ULP_ENCAP_IPV6_SIZE);
1531 
1532 		/* Update the ip size details */
1533 		ip_size = tfp_cpu_to_be_32(ip_size);
1534 		memcpy(&ap->act_details[BNXT_ULP_ACT_PROP_IDX_ENCAP_IP_SZ],
1535 		       &ip_size, sizeof(uint32_t));
1536 
1537 		 /* update the ip type */
1538 		ip_type = rte_cpu_to_be_32(BNXT_ULP_ETH_IPV6);
1539 		memcpy(&ap->act_details[BNXT_ULP_ACT_PROP_IDX_ENCAP_L3_TYPE],
1540 		       &ip_type, sizeof(uint32_t));
1541 
1542 		/* update the computed field to notify it is ipv6 header */
1543 		ULP_COMP_FLD_IDX_WR(params, BNXT_ULP_CF_IDX_ACT_ENCAP_IPV6_FLAG,
1544 				    1);
1545 
1546 		if (!ulp_rte_item_skip_void(&item, 1))
1547 			return BNXT_TF_RC_ERROR;
1548 	} else {
1549 		BNXT_TF_DBG(ERR, "Parse Error: Vxlan Encap expects L3 hdr\n");
1550 		return BNXT_TF_RC_ERROR;
1551 	}
1552 
1553 	/* L4 is UDP */
1554 	if (item->type != RTE_FLOW_ITEM_TYPE_UDP) {
1555 		BNXT_TF_DBG(ERR, "vxlan encap does not have udp\n");
1556 		return BNXT_TF_RC_ERROR;
1557 	}
1558 	/* copy the udp details */
1559 	ulp_encap_buffer_copy(&ap->act_details[BNXT_ULP_ACT_PROP_IDX_ENCAP_UDP],
1560 			      item->spec, BNXT_ULP_ENCAP_UDP_SIZE);
1561 
1562 	if (!ulp_rte_item_skip_void(&item, 1))
1563 		return BNXT_TF_RC_ERROR;
1564 
1565 	/* Finally VXLAN */
1566 	if (item->type != RTE_FLOW_ITEM_TYPE_VXLAN) {
1567 		BNXT_TF_DBG(ERR, "vxlan encap does not have vni\n");
1568 		return BNXT_TF_RC_ERROR;
1569 	}
1570 	vxlan_size = sizeof(struct rte_flow_item_vxlan);
1571 	/* copy the vxlan details */
1572 	memcpy(&vxlan_spec, item->spec, vxlan_size);
1573 	vxlan_spec.flags = 0x08;
1574 	ulp_encap_buffer_copy(&ap->act_details[BNXT_ULP_ACT_PROP_IDX_ENCAP_TUN],
1575 			      (const uint8_t *)&vxlan_spec,
1576 			      vxlan_size);
1577 	vxlan_size = tfp_cpu_to_be_32(vxlan_size);
1578 	memcpy(&ap->act_details[BNXT_ULP_ACT_PROP_IDX_ENCAP_TUN_SZ],
1579 	       &vxlan_size, sizeof(uint32_t));
1580 
1581 	/* update the hdr_bitmap with vxlan */
1582 	ULP_BITMAP_SET(act->bits, BNXT_ULP_ACTION_BIT_VXLAN_ENCAP);
1583 	return BNXT_TF_RC_SUCCESS;
1584 }
1585 
1586 /* Function to handle the parsing of RTE Flow action vxlan_encap Header */
1587 int32_t
1588 ulp_rte_vxlan_decap_act_handler(const struct rte_flow_action *action_item
1589 				__rte_unused,
1590 				struct ulp_rte_parser_params *params)
1591 {
1592 	/* update the hdr_bitmap with vxlan */
1593 	ULP_BITMAP_SET(params->act_bitmap.bits,
1594 		       BNXT_ULP_ACTION_BIT_VXLAN_DECAP);
1595 	return BNXT_TF_RC_SUCCESS;
1596 }
1597 
1598 /* Function to handle the parsing of RTE Flow action drop Header. */
1599 int32_t
1600 ulp_rte_drop_act_handler(const struct rte_flow_action *action_item __rte_unused,
1601 			 struct ulp_rte_parser_params *params)
1602 {
1603 	/* Update the hdr_bitmap with drop */
1604 	ULP_BITMAP_SET(params->act_bitmap.bits, BNXT_ULP_ACTION_BIT_DROP);
1605 	return BNXT_TF_RC_SUCCESS;
1606 }
1607 
1608 /* Function to handle the parsing of RTE Flow action count. */
1609 int32_t
1610 ulp_rte_count_act_handler(const struct rte_flow_action *action_item,
1611 			  struct ulp_rte_parser_params *params)
1612 
1613 {
1614 	const struct rte_flow_action_count *act_count;
1615 	struct ulp_rte_act_prop *act_prop = &params->act_prop;
1616 
1617 	act_count = action_item->conf;
1618 	if (act_count) {
1619 		if (act_count->shared) {
1620 			BNXT_TF_DBG(ERR,
1621 				    "Parse Error:Shared count not supported\n");
1622 			return BNXT_TF_RC_PARSE_ERR;
1623 		}
1624 		memcpy(&act_prop->act_details[BNXT_ULP_ACT_PROP_IDX_COUNT],
1625 		       &act_count->id,
1626 		       BNXT_ULP_ACT_PROP_SZ_COUNT);
1627 	}
1628 
1629 	/* Update the hdr_bitmap with count */
1630 	ULP_BITMAP_SET(params->act_bitmap.bits, BNXT_ULP_ACTION_BIT_COUNT);
1631 	return BNXT_TF_RC_SUCCESS;
1632 }
1633 
1634 /* Function to handle the parsing of action ports. */
1635 static int32_t
1636 ulp_rte_parser_act_port_set(struct ulp_rte_parser_params *param,
1637 			    uint32_t ifindex)
1638 {
1639 	enum bnxt_ulp_direction_type dir;
1640 	uint16_t pid_s;
1641 	uint32_t pid;
1642 	struct ulp_rte_act_prop *act = &param->act_prop;
1643 	enum bnxt_ulp_intf_type port_type;
1644 	uint32_t vnic_type;
1645 
1646 	/* Get the direction */
1647 	dir = ULP_COMP_FLD_IDX_RD(param, BNXT_ULP_CF_IDX_DIRECTION);
1648 	if (dir == BNXT_ULP_DIR_EGRESS) {
1649 		/* For egress direction, fill vport */
1650 		if (ulp_port_db_vport_get(param->ulp_ctx, ifindex, &pid_s))
1651 			return BNXT_TF_RC_ERROR;
1652 
1653 		pid = pid_s;
1654 		pid = rte_cpu_to_be_32(pid);
1655 		memcpy(&act->act_details[BNXT_ULP_ACT_PROP_IDX_VPORT],
1656 		       &pid, BNXT_ULP_ACT_PROP_SZ_VPORT);
1657 	} else {
1658 		/* For ingress direction, fill vnic */
1659 		port_type = ULP_COMP_FLD_IDX_RD(param,
1660 						BNXT_ULP_CF_IDX_ACT_PORT_TYPE);
1661 		if (port_type == BNXT_ULP_INTF_TYPE_VF_REP)
1662 			vnic_type = BNXT_ULP_VF_FUNC_VNIC;
1663 		else
1664 			vnic_type = BNXT_ULP_DRV_FUNC_VNIC;
1665 
1666 		if (ulp_port_db_default_vnic_get(param->ulp_ctx, ifindex,
1667 						 vnic_type, &pid_s))
1668 			return BNXT_TF_RC_ERROR;
1669 
1670 		pid = pid_s;
1671 		pid = rte_cpu_to_be_32(pid);
1672 		memcpy(&act->act_details[BNXT_ULP_ACT_PROP_IDX_VNIC],
1673 		       &pid, BNXT_ULP_ACT_PROP_SZ_VNIC);
1674 	}
1675 
1676 	/* Update the action port set bit */
1677 	ULP_COMP_FLD_IDX_WR(param, BNXT_ULP_CF_IDX_ACT_PORT_IS_SET, 1);
1678 	return BNXT_TF_RC_SUCCESS;
1679 }
1680 
1681 /* Function to handle the parsing of RTE Flow action PF. */
1682 int32_t
1683 ulp_rte_pf_act_handler(const struct rte_flow_action *action_item __rte_unused,
1684 		       struct ulp_rte_parser_params *params)
1685 {
1686 	uint32_t port_id;
1687 	uint32_t ifindex;
1688 	enum bnxt_ulp_intf_type intf_type;
1689 
1690 	/* Get the port id of the current device */
1691 	port_id = ULP_COMP_FLD_IDX_RD(params, BNXT_ULP_CF_IDX_INCOMING_IF);
1692 
1693 	/* Get the port db ifindex */
1694 	if (ulp_port_db_dev_port_to_ulp_index(params->ulp_ctx, port_id,
1695 					      &ifindex)) {
1696 		BNXT_TF_DBG(ERR, "Invalid port id\n");
1697 		return BNXT_TF_RC_ERROR;
1698 	}
1699 
1700 	/* Check the port is PF port */
1701 	intf_type = ulp_port_db_port_type_get(params->ulp_ctx, ifindex);
1702 	if (intf_type != BNXT_ULP_INTF_TYPE_PF) {
1703 		BNXT_TF_DBG(ERR, "Port is not a PF port\n");
1704 		return BNXT_TF_RC_ERROR;
1705 	}
1706 	/* Update the action properties */
1707 	ULP_COMP_FLD_IDX_WR(params, BNXT_ULP_CF_IDX_ACT_PORT_TYPE, intf_type);
1708 	return ulp_rte_parser_act_port_set(params, ifindex);
1709 }
1710 
1711 /* Function to handle the parsing of RTE Flow action VF. */
1712 int32_t
1713 ulp_rte_vf_act_handler(const struct rte_flow_action *action_item,
1714 		       struct ulp_rte_parser_params *params)
1715 {
1716 	const struct rte_flow_action_vf *vf_action;
1717 	uint32_t ifindex;
1718 	enum bnxt_ulp_intf_type intf_type;
1719 
1720 	vf_action = action_item->conf;
1721 	if (!vf_action) {
1722 		BNXT_TF_DBG(ERR, "ParseErr: Invalid Argument\n");
1723 		return BNXT_TF_RC_PARSE_ERR;
1724 	}
1725 
1726 	if (vf_action->original) {
1727 		BNXT_TF_DBG(ERR, "ParseErr:VF Original not supported\n");
1728 		return BNXT_TF_RC_PARSE_ERR;
1729 	}
1730 
1731 	/* Check the port is VF port */
1732 	if (ulp_port_db_dev_func_id_to_ulp_index(params->ulp_ctx, vf_action->id,
1733 						 &ifindex)) {
1734 		BNXT_TF_DBG(ERR, "VF is not valid interface\n");
1735 		return BNXT_TF_RC_ERROR;
1736 	}
1737 	intf_type = ulp_port_db_port_type_get(params->ulp_ctx, ifindex);
1738 	if (intf_type != BNXT_ULP_INTF_TYPE_VF &&
1739 	    intf_type != BNXT_ULP_INTF_TYPE_TRUSTED_VF) {
1740 		BNXT_TF_DBG(ERR, "Port is not a VF port\n");
1741 		return BNXT_TF_RC_ERROR;
1742 	}
1743 
1744 	/* Update the action properties */
1745 	ULP_COMP_FLD_IDX_WR(params, BNXT_ULP_CF_IDX_ACT_PORT_TYPE, intf_type);
1746 	return ulp_rte_parser_act_port_set(params, ifindex);
1747 }
1748 
1749 /* Function to handle the parsing of RTE Flow action port_id. */
1750 int32_t
1751 ulp_rte_port_id_act_handler(const struct rte_flow_action *act_item,
1752 			    struct ulp_rte_parser_params *param)
1753 {
1754 	const struct rte_flow_action_port_id *port_id = act_item->conf;
1755 	uint32_t ifindex;
1756 	enum bnxt_ulp_intf_type intf_type;
1757 
1758 	if (!port_id) {
1759 		BNXT_TF_DBG(ERR,
1760 			    "ParseErr: Invalid Argument\n");
1761 		return BNXT_TF_RC_PARSE_ERR;
1762 	}
1763 	if (port_id->original) {
1764 		BNXT_TF_DBG(ERR,
1765 			    "ParseErr:Portid Original not supported\n");
1766 		return BNXT_TF_RC_PARSE_ERR;
1767 	}
1768 
1769 	/* Get the port db ifindex */
1770 	if (ulp_port_db_dev_port_to_ulp_index(param->ulp_ctx, port_id->id,
1771 					      &ifindex)) {
1772 		BNXT_TF_DBG(ERR, "Invalid port id\n");
1773 		return BNXT_TF_RC_ERROR;
1774 	}
1775 
1776 	/* Get the intf type */
1777 	intf_type = ulp_port_db_port_type_get(param->ulp_ctx, ifindex);
1778 	if (!intf_type) {
1779 		BNXT_TF_DBG(ERR, "Invalid port type\n");
1780 		return BNXT_TF_RC_ERROR;
1781 	}
1782 
1783 	/* Set the action port */
1784 	ULP_COMP_FLD_IDX_WR(param, BNXT_ULP_CF_IDX_ACT_PORT_TYPE, intf_type);
1785 	return ulp_rte_parser_act_port_set(param, ifindex);
1786 }
1787 
1788 /* Function to handle the parsing of RTE Flow action phy_port. */
1789 int32_t
1790 ulp_rte_phy_port_act_handler(const struct rte_flow_action *action_item,
1791 			     struct ulp_rte_parser_params *prm)
1792 {
1793 	const struct rte_flow_action_phy_port *phy_port;
1794 	uint32_t pid;
1795 	int32_t rc;
1796 	uint16_t pid_s;
1797 	enum bnxt_ulp_direction_type dir;
1798 
1799 	phy_port = action_item->conf;
1800 	if (!phy_port) {
1801 		BNXT_TF_DBG(ERR,
1802 			    "ParseErr: Invalid Argument\n");
1803 		return BNXT_TF_RC_PARSE_ERR;
1804 	}
1805 
1806 	if (phy_port->original) {
1807 		BNXT_TF_DBG(ERR,
1808 			    "Parse Err:Port Original not supported\n");
1809 		return BNXT_TF_RC_PARSE_ERR;
1810 	}
1811 	dir = ULP_COMP_FLD_IDX_RD(prm, BNXT_ULP_CF_IDX_DIRECTION);
1812 	if (dir != BNXT_ULP_DIR_EGRESS) {
1813 		BNXT_TF_DBG(ERR,
1814 			    "Parse Err:Phy ports are valid only for egress\n");
1815 		return BNXT_TF_RC_PARSE_ERR;
1816 	}
1817 	/* Get the physical port details from port db */
1818 	rc = ulp_port_db_phy_port_vport_get(prm->ulp_ctx, phy_port->index,
1819 					    &pid_s);
1820 	if (rc) {
1821 		BNXT_TF_DBG(ERR, "Failed to get port details\n");
1822 		return -EINVAL;
1823 	}
1824 
1825 	pid = pid_s;
1826 	pid = rte_cpu_to_be_32(pid);
1827 	memcpy(&prm->act_prop.act_details[BNXT_ULP_ACT_PROP_IDX_VPORT],
1828 	       &pid, BNXT_ULP_ACT_PROP_SZ_VPORT);
1829 
1830 	/* Update the action port set bit */
1831 	ULP_COMP_FLD_IDX_WR(prm, BNXT_ULP_CF_IDX_ACT_PORT_IS_SET, 1);
1832 	ULP_COMP_FLD_IDX_WR(prm, BNXT_ULP_CF_IDX_ACT_PORT_TYPE,
1833 			    BNXT_ULP_INTF_TYPE_PHY_PORT);
1834 	return BNXT_TF_RC_SUCCESS;
1835 }
1836 
1837 /* Function to handle the parsing of RTE Flow action pop vlan. */
1838 int32_t
1839 ulp_rte_of_pop_vlan_act_handler(const struct rte_flow_action *a __rte_unused,
1840 				struct ulp_rte_parser_params *params)
1841 {
1842 	/* Update the act_bitmap with pop */
1843 	ULP_BITMAP_SET(params->act_bitmap.bits, BNXT_ULP_ACTION_BIT_POP_VLAN);
1844 	return BNXT_TF_RC_SUCCESS;
1845 }
1846 
1847 /* Function to handle the parsing of RTE Flow action push vlan. */
1848 int32_t
1849 ulp_rte_of_push_vlan_act_handler(const struct rte_flow_action *action_item,
1850 				 struct ulp_rte_parser_params *params)
1851 {
1852 	const struct rte_flow_action_of_push_vlan *push_vlan;
1853 	uint16_t ethertype;
1854 	struct ulp_rte_act_prop *act = &params->act_prop;
1855 
1856 	push_vlan = action_item->conf;
1857 	if (push_vlan) {
1858 		ethertype = push_vlan->ethertype;
1859 		if (tfp_cpu_to_be_16(ethertype) != RTE_ETHER_TYPE_VLAN) {
1860 			BNXT_TF_DBG(ERR,
1861 				    "Parse Err: Ethertype not supported\n");
1862 			return BNXT_TF_RC_PARSE_ERR;
1863 		}
1864 		memcpy(&act->act_details[BNXT_ULP_ACT_PROP_IDX_PUSH_VLAN],
1865 		       &ethertype, BNXT_ULP_ACT_PROP_SZ_PUSH_VLAN);
1866 		/* Update the hdr_bitmap with push vlan */
1867 		ULP_BITMAP_SET(params->act_bitmap.bits,
1868 			       BNXT_ULP_ACTION_BIT_PUSH_VLAN);
1869 		return BNXT_TF_RC_SUCCESS;
1870 	}
1871 	BNXT_TF_DBG(ERR, "Parse Error: Push vlan arg is invalid\n");
1872 	return BNXT_TF_RC_ERROR;
1873 }
1874 
1875 /* Function to handle the parsing of RTE Flow action set vlan id. */
1876 int32_t
1877 ulp_rte_of_set_vlan_vid_act_handler(const struct rte_flow_action *action_item,
1878 				    struct ulp_rte_parser_params *params)
1879 {
1880 	const struct rte_flow_action_of_set_vlan_vid *vlan_vid;
1881 	uint32_t vid;
1882 	struct ulp_rte_act_prop *act = &params->act_prop;
1883 
1884 	vlan_vid = action_item->conf;
1885 	if (vlan_vid && vlan_vid->vlan_vid) {
1886 		vid = vlan_vid->vlan_vid;
1887 		memcpy(&act->act_details[BNXT_ULP_ACT_PROP_IDX_SET_VLAN_VID],
1888 		       &vid, BNXT_ULP_ACT_PROP_SZ_SET_VLAN_VID);
1889 		/* Update the hdr_bitmap with vlan vid */
1890 		ULP_BITMAP_SET(params->act_bitmap.bits,
1891 			       BNXT_ULP_ACTION_BIT_SET_VLAN_VID);
1892 		return BNXT_TF_RC_SUCCESS;
1893 	}
1894 	BNXT_TF_DBG(ERR, "Parse Error: Vlan vid arg is invalid\n");
1895 	return BNXT_TF_RC_ERROR;
1896 }
1897 
1898 /* Function to handle the parsing of RTE Flow action set vlan pcp. */
1899 int32_t
1900 ulp_rte_of_set_vlan_pcp_act_handler(const struct rte_flow_action *action_item,
1901 				    struct ulp_rte_parser_params *params)
1902 {
1903 	const struct rte_flow_action_of_set_vlan_pcp *vlan_pcp;
1904 	uint8_t pcp;
1905 	struct ulp_rte_act_prop *act = &params->act_prop;
1906 
1907 	vlan_pcp = action_item->conf;
1908 	if (vlan_pcp) {
1909 		pcp = vlan_pcp->vlan_pcp;
1910 		memcpy(&act->act_details[BNXT_ULP_ACT_PROP_IDX_SET_VLAN_PCP],
1911 		       &pcp, BNXT_ULP_ACT_PROP_SZ_SET_VLAN_PCP);
1912 		/* Update the hdr_bitmap with vlan vid */
1913 		ULP_BITMAP_SET(params->act_bitmap.bits,
1914 			       BNXT_ULP_ACTION_BIT_SET_VLAN_PCP);
1915 		return BNXT_TF_RC_SUCCESS;
1916 	}
1917 	BNXT_TF_DBG(ERR, "Parse Error: Vlan pcp arg is invalid\n");
1918 	return BNXT_TF_RC_ERROR;
1919 }
1920 
1921 /* Function to handle the parsing of RTE Flow action set ipv4 src.*/
1922 int32_t
1923 ulp_rte_set_ipv4_src_act_handler(const struct rte_flow_action *action_item,
1924 				 struct ulp_rte_parser_params *params)
1925 {
1926 	const struct rte_flow_action_set_ipv4 *set_ipv4;
1927 	struct ulp_rte_act_prop *act = &params->act_prop;
1928 
1929 	set_ipv4 = action_item->conf;
1930 	if (set_ipv4) {
1931 		memcpy(&act->act_details[BNXT_ULP_ACT_PROP_IDX_SET_IPV4_SRC],
1932 		       &set_ipv4->ipv4_addr, BNXT_ULP_ACT_PROP_SZ_SET_IPV4_SRC);
1933 		/* Update the hdr_bitmap with set ipv4 src */
1934 		ULP_BITMAP_SET(params->act_bitmap.bits,
1935 			       BNXT_ULP_ACTION_BIT_SET_IPV4_SRC);
1936 		return BNXT_TF_RC_SUCCESS;
1937 	}
1938 	BNXT_TF_DBG(ERR, "Parse Error: set ipv4 src arg is invalid\n");
1939 	return BNXT_TF_RC_ERROR;
1940 }
1941 
1942 /* Function to handle the parsing of RTE Flow action set ipv4 dst.*/
1943 int32_t
1944 ulp_rte_set_ipv4_dst_act_handler(const struct rte_flow_action *action_item,
1945 				 struct ulp_rte_parser_params *params)
1946 {
1947 	const struct rte_flow_action_set_ipv4 *set_ipv4;
1948 	struct ulp_rte_act_prop *act = &params->act_prop;
1949 
1950 	set_ipv4 = action_item->conf;
1951 	if (set_ipv4) {
1952 		memcpy(&act->act_details[BNXT_ULP_ACT_PROP_IDX_SET_IPV4_DST],
1953 		       &set_ipv4->ipv4_addr, BNXT_ULP_ACT_PROP_SZ_SET_IPV4_DST);
1954 		/* Update the hdr_bitmap with set ipv4 dst */
1955 		ULP_BITMAP_SET(params->act_bitmap.bits,
1956 			       BNXT_ULP_ACTION_BIT_SET_IPV4_DST);
1957 		return BNXT_TF_RC_SUCCESS;
1958 	}
1959 	BNXT_TF_DBG(ERR, "Parse Error: set ipv4 dst arg is invalid\n");
1960 	return BNXT_TF_RC_ERROR;
1961 }
1962 
1963 /* Function to handle the parsing of RTE Flow action set tp src.*/
1964 int32_t
1965 ulp_rte_set_tp_src_act_handler(const struct rte_flow_action *action_item,
1966 			       struct ulp_rte_parser_params *params)
1967 {
1968 	const struct rte_flow_action_set_tp *set_tp;
1969 	struct ulp_rte_act_prop *act = &params->act_prop;
1970 
1971 	set_tp = action_item->conf;
1972 	if (set_tp) {
1973 		memcpy(&act->act_details[BNXT_ULP_ACT_PROP_IDX_SET_TP_SRC],
1974 		       &set_tp->port, BNXT_ULP_ACT_PROP_SZ_SET_TP_SRC);
1975 		/* Update the hdr_bitmap with set tp src */
1976 		ULP_BITMAP_SET(params->act_bitmap.bits,
1977 			       BNXT_ULP_ACTION_BIT_SET_TP_SRC);
1978 		return BNXT_TF_RC_SUCCESS;
1979 	}
1980 
1981 	BNXT_TF_DBG(ERR, "Parse Error: set tp src arg is invalid\n");
1982 	return BNXT_TF_RC_ERROR;
1983 }
1984 
1985 /* Function to handle the parsing of RTE Flow action set tp dst.*/
1986 int32_t
1987 ulp_rte_set_tp_dst_act_handler(const struct rte_flow_action *action_item,
1988 			       struct ulp_rte_parser_params *params)
1989 {
1990 	const struct rte_flow_action_set_tp *set_tp;
1991 	struct ulp_rte_act_prop *act = &params->act_prop;
1992 
1993 	set_tp = action_item->conf;
1994 	if (set_tp) {
1995 		memcpy(&act->act_details[BNXT_ULP_ACT_PROP_IDX_SET_TP_DST],
1996 		       &set_tp->port, BNXT_ULP_ACT_PROP_SZ_SET_TP_DST);
1997 		/* Update the hdr_bitmap with set tp dst */
1998 		ULP_BITMAP_SET(params->act_bitmap.bits,
1999 			       BNXT_ULP_ACTION_BIT_SET_TP_DST);
2000 		return BNXT_TF_RC_SUCCESS;
2001 	}
2002 
2003 	BNXT_TF_DBG(ERR, "Parse Error: set tp src arg is invalid\n");
2004 	return BNXT_TF_RC_ERROR;
2005 }
2006 
2007 /* Function to handle the parsing of RTE Flow action dec ttl.*/
2008 int32_t
2009 ulp_rte_dec_ttl_act_handler(const struct rte_flow_action *act __rte_unused,
2010 			    struct ulp_rte_parser_params *params)
2011 {
2012 	/* Update the act_bitmap with dec ttl */
2013 	ULP_BITMAP_SET(params->act_bitmap.bits, BNXT_ULP_ACTION_BIT_DEC_TTL);
2014 	return BNXT_TF_RC_SUCCESS;
2015 }
2016