xref: /dpdk/drivers/net/bnxt/tf_ulp/ulp_rte_parser.c (revision 68a03efeed657e6e05f281479b33b51102797e15)
1 /* SPDX-License-Identifier: BSD-3-Clause
2  * Copyright(c) 2014-2021 Broadcom
3  * All rights reserved.
4  */
5 
6 #include <rte_vxlan.h>
7 #include "bnxt.h"
8 #include "ulp_template_db_enum.h"
9 #include "ulp_template_struct.h"
10 #include "bnxt_ulp.h"
11 #include "bnxt_tf_common.h"
12 #include "ulp_rte_parser.h"
13 #include "ulp_matcher.h"
14 #include "ulp_utils.h"
15 #include "tfp.h"
16 #include "ulp_port_db.h"
17 #include "ulp_flow_db.h"
18 #include "ulp_mapper.h"
19 #include "ulp_tun.h"
20 
21 /* Local defines for the parsing functions */
22 #define ULP_VLAN_PRIORITY_SHIFT		13 /* First 3 bits */
23 #define ULP_VLAN_PRIORITY_MASK		0x700
24 #define ULP_VLAN_TAG_MASK		0xFFF /* Last 12 bits*/
25 #define ULP_UDP_PORT_VXLAN		4789
26 
27 /* Utility function to skip the void items. */
28 static inline int32_t
29 ulp_rte_item_skip_void(const struct rte_flow_item **item, uint32_t increment)
30 {
31 	if (!*item)
32 		return 0;
33 	if (increment)
34 		(*item)++;
35 	while ((*item) && (*item)->type == RTE_FLOW_ITEM_TYPE_VOID)
36 		(*item)++;
37 	if (*item)
38 		return 1;
39 	return 0;
40 }
41 
42 /* Utility function to update the field_bitmap */
43 static void
44 ulp_rte_parser_field_bitmap_update(struct ulp_rte_parser_params *params,
45 				   uint32_t idx)
46 {
47 	struct ulp_rte_hdr_field *field;
48 
49 	field = &params->hdr_field[idx];
50 	if (ulp_bitmap_notzero(field->mask, field->size)) {
51 		ULP_INDEX_BITMAP_SET(params->fld_bitmap.bits, idx);
52 		/* Not exact match */
53 		if (!ulp_bitmap_is_ones(field->mask, field->size))
54 			ULP_BITMAP_SET(params->fld_bitmap.bits,
55 				       BNXT_ULP_MATCH_TYPE_BITMASK_WM);
56 	} else {
57 		ULP_INDEX_BITMAP_RESET(params->fld_bitmap.bits, idx);
58 	}
59 }
60 
61 /* Utility function to copy field spec items */
62 static struct ulp_rte_hdr_field *
63 ulp_rte_parser_fld_copy(struct ulp_rte_hdr_field *field,
64 			const void *buffer,
65 			uint32_t size)
66 {
67 	field->size = size;
68 	memcpy(field->spec, buffer, field->size);
69 	field++;
70 	return field;
71 }
72 
73 /* Utility function to copy field masks items */
74 static void
75 ulp_rte_prsr_mask_copy(struct ulp_rte_parser_params *params,
76 		       uint32_t *idx,
77 		       const void *buffer,
78 		       uint32_t size)
79 {
80 	struct ulp_rte_hdr_field *field = &params->hdr_field[*idx];
81 
82 	memcpy(field->mask, buffer, size);
83 	ulp_rte_parser_field_bitmap_update(params, *idx);
84 	*idx = *idx + 1;
85 }
86 
87 /* Utility function to ignore field masks items */
88 static void
89 ulp_rte_prsr_mask_ignore(struct ulp_rte_parser_params *params __rte_unused,
90 			 uint32_t *idx,
91 			 const void *buffer __rte_unused,
92 			 uint32_t size __rte_unused)
93 {
94 	*idx = *idx + 1;
95 }
96 
97 /*
98  * Function to handle the parsing of RTE Flows and placing
99  * the RTE flow items into the ulp structures.
100  */
101 int32_t
102 bnxt_ulp_rte_parser_hdr_parse(const struct rte_flow_item pattern[],
103 			      struct ulp_rte_parser_params *params)
104 {
105 	const struct rte_flow_item *item = pattern;
106 	struct bnxt_ulp_rte_hdr_info *hdr_info;
107 
108 	params->field_idx = BNXT_ULP_PROTO_HDR_SVIF_NUM;
109 
110 	/* Set the computed flags for no vlan tags before parsing */
111 	ULP_COMP_FLD_IDX_WR(params, BNXT_ULP_CF_IDX_O_NO_VTAG, 1);
112 	ULP_COMP_FLD_IDX_WR(params, BNXT_ULP_CF_IDX_I_NO_VTAG, 1);
113 
114 	/* Parse all the items in the pattern */
115 	while (item && item->type != RTE_FLOW_ITEM_TYPE_END) {
116 		/* get the header information from the flow_hdr_info table */
117 		hdr_info = &ulp_hdr_info[item->type];
118 		if (hdr_info->hdr_type == BNXT_ULP_HDR_TYPE_NOT_SUPPORTED) {
119 			BNXT_TF_DBG(ERR,
120 				    "Truflow parser does not support type %d\n",
121 				    item->type);
122 			return BNXT_TF_RC_PARSE_ERR;
123 		} else if (hdr_info->hdr_type == BNXT_ULP_HDR_TYPE_SUPPORTED) {
124 			/* call the registered callback handler */
125 			if (hdr_info->proto_hdr_func) {
126 				if (hdr_info->proto_hdr_func(item, params) !=
127 				    BNXT_TF_RC_SUCCESS) {
128 					return BNXT_TF_RC_ERROR;
129 				}
130 			}
131 		}
132 		item++;
133 	}
134 	/* update the implied SVIF */
135 	return ulp_rte_parser_implicit_match_port_process(params);
136 }
137 
138 /*
139  * Function to handle the parsing of RTE Flows and placing
140  * the RTE flow actions into the ulp structures.
141  */
142 int32_t
143 bnxt_ulp_rte_parser_act_parse(const struct rte_flow_action actions[],
144 			      struct ulp_rte_parser_params *params)
145 {
146 	const struct rte_flow_action *action_item = actions;
147 	struct bnxt_ulp_rte_act_info *hdr_info;
148 
149 	/* Parse all the items in the pattern */
150 	while (action_item && action_item->type != RTE_FLOW_ACTION_TYPE_END) {
151 		/* get the header information from the flow_hdr_info table */
152 		hdr_info = &ulp_act_info[action_item->type];
153 		if (hdr_info->act_type ==
154 		    BNXT_ULP_ACT_TYPE_NOT_SUPPORTED) {
155 			BNXT_TF_DBG(ERR,
156 				    "Truflow parser does not support act %u\n",
157 				    action_item->type);
158 			return BNXT_TF_RC_ERROR;
159 		} else if (hdr_info->act_type ==
160 		    BNXT_ULP_ACT_TYPE_SUPPORTED) {
161 			/* call the registered callback handler */
162 			if (hdr_info->proto_act_func) {
163 				if (hdr_info->proto_act_func(action_item,
164 							     params) !=
165 				    BNXT_TF_RC_SUCCESS) {
166 					return BNXT_TF_RC_ERROR;
167 				}
168 			}
169 		}
170 		action_item++;
171 	}
172 	/* update the implied port details */
173 	ulp_rte_parser_implicit_act_port_process(params);
174 	return BNXT_TF_RC_SUCCESS;
175 }
176 
177 /*
178  * Function to handle the post processing of the computed
179  * fields for the interface.
180  */
181 static void
182 bnxt_ulp_comp_fld_intf_update(struct ulp_rte_parser_params *params)
183 {
184 	uint32_t ifindex;
185 	uint16_t port_id, parif;
186 	uint32_t mtype;
187 	enum bnxt_ulp_direction_type dir;
188 
189 	/* get the direction details */
190 	dir = ULP_COMP_FLD_IDX_RD(params, BNXT_ULP_CF_IDX_DIRECTION);
191 
192 	/* read the port id details */
193 	port_id = ULP_COMP_FLD_IDX_RD(params,
194 				      BNXT_ULP_CF_IDX_INCOMING_IF);
195 	if (ulp_port_db_dev_port_to_ulp_index(params->ulp_ctx,
196 					      port_id,
197 					      &ifindex)) {
198 		BNXT_TF_DBG(ERR, "ParseErr:Portid is not valid\n");
199 		return;
200 	}
201 
202 	if (dir == BNXT_ULP_DIR_INGRESS) {
203 		/* Set port PARIF */
204 		if (ulp_port_db_parif_get(params->ulp_ctx, ifindex,
205 					  BNXT_ULP_PHY_PORT_PARIF, &parif)) {
206 			BNXT_TF_DBG(ERR, "ParseErr:ifindex is not valid\n");
207 			return;
208 		}
209 		ULP_COMP_FLD_IDX_WR(params, BNXT_ULP_CF_IDX_PHY_PORT_PARIF,
210 				    parif);
211 	} else {
212 		/* Get the match port type */
213 		mtype = ULP_COMP_FLD_IDX_RD(params,
214 					    BNXT_ULP_CF_IDX_MATCH_PORT_TYPE);
215 		if (mtype == BNXT_ULP_INTF_TYPE_VF_REP) {
216 			ULP_COMP_FLD_IDX_WR(params,
217 					    BNXT_ULP_CF_IDX_MATCH_PORT_IS_VFREP,
218 					    1);
219 			/* Set VF func PARIF */
220 			if (ulp_port_db_parif_get(params->ulp_ctx, ifindex,
221 						  BNXT_ULP_VF_FUNC_PARIF,
222 						  &parif)) {
223 				BNXT_TF_DBG(ERR,
224 					    "ParseErr:ifindex is not valid\n");
225 				return;
226 			}
227 			ULP_COMP_FLD_IDX_WR(params,
228 					    BNXT_ULP_CF_IDX_VF_FUNC_PARIF,
229 					    parif);
230 
231 			/* populate the loopback parif */
232 			ULP_COMP_FLD_IDX_WR(params,
233 					    BNXT_ULP_CF_IDX_LOOPBACK_PARIF,
234 					    BNXT_ULP_SYM_VF_FUNC_PARIF);
235 
236 		} else {
237 			/* Set DRV func PARIF */
238 			if (ulp_port_db_parif_get(params->ulp_ctx, ifindex,
239 						  BNXT_ULP_DRV_FUNC_PARIF,
240 						  &parif)) {
241 				BNXT_TF_DBG(ERR,
242 					    "ParseErr:ifindex is not valid\n");
243 				return;
244 			}
245 			ULP_COMP_FLD_IDX_WR(params,
246 					    BNXT_ULP_CF_IDX_DRV_FUNC_PARIF,
247 					    parif);
248 		}
249 	}
250 }
251 
252 static int32_t
253 ulp_post_process_normal_flow(struct ulp_rte_parser_params *params)
254 {
255 	enum bnxt_ulp_intf_type match_port_type, act_port_type;
256 	enum bnxt_ulp_direction_type dir;
257 	uint32_t act_port_set;
258 
259 	/* Get the computed details */
260 	dir = ULP_COMP_FLD_IDX_RD(params, BNXT_ULP_CF_IDX_DIRECTION);
261 	match_port_type = ULP_COMP_FLD_IDX_RD(params,
262 					      BNXT_ULP_CF_IDX_MATCH_PORT_TYPE);
263 	act_port_type = ULP_COMP_FLD_IDX_RD(params,
264 					    BNXT_ULP_CF_IDX_ACT_PORT_TYPE);
265 	act_port_set = ULP_COMP_FLD_IDX_RD(params,
266 					   BNXT_ULP_CF_IDX_ACT_PORT_IS_SET);
267 
268 	/* set the flow direction in the proto and action header */
269 	if (dir == BNXT_ULP_DIR_EGRESS) {
270 		ULP_BITMAP_SET(params->hdr_bitmap.bits,
271 			       BNXT_ULP_FLOW_DIR_BITMASK_EGR);
272 		ULP_BITMAP_SET(params->act_bitmap.bits,
273 			       BNXT_ULP_FLOW_DIR_BITMASK_EGR);
274 	}
275 
276 	/* calculate the VF to VF flag */
277 	if (act_port_set && act_port_type == BNXT_ULP_INTF_TYPE_VF_REP &&
278 	    match_port_type == BNXT_ULP_INTF_TYPE_VF_REP)
279 		ULP_COMP_FLD_IDX_WR(params, BNXT_ULP_CF_IDX_VF_TO_VF, 1);
280 
281 	/* Update the decrement ttl computational fields */
282 	if (ULP_BITMAP_ISSET(params->act_bitmap.bits,
283 			     BNXT_ULP_ACTION_BIT_DEC_TTL)) {
284 		/*
285 		 * Check that vxlan proto is included and vxlan decap
286 		 * action is not set then decrement tunnel ttl.
287 		 * Similarly add GRE and NVGRE in future.
288 		 */
289 		if ((ULP_BITMAP_ISSET(params->hdr_bitmap.bits,
290 				      BNXT_ULP_HDR_BIT_T_VXLAN) &&
291 		    !ULP_BITMAP_ISSET(params->act_bitmap.bits,
292 				      BNXT_ULP_ACTION_BIT_VXLAN_DECAP))) {
293 			ULP_COMP_FLD_IDX_WR(params,
294 					    BNXT_ULP_CF_IDX_ACT_T_DEC_TTL, 1);
295 		} else {
296 			ULP_COMP_FLD_IDX_WR(params,
297 					    BNXT_ULP_CF_IDX_ACT_DEC_TTL, 1);
298 		}
299 	}
300 
301 	/* Merge the hdr_fp_bit into the proto header bit */
302 	params->hdr_bitmap.bits |= params->hdr_fp_bit.bits;
303 
304 	/* Update the computed interface parameters */
305 	bnxt_ulp_comp_fld_intf_update(params);
306 
307 	/* TBD: Handle the flow rejection scenarios */
308 	return 0;
309 }
310 
311 /*
312  * Function to handle the post processing of the parsing details
313  */
314 int32_t
315 bnxt_ulp_rte_parser_post_process(struct ulp_rte_parser_params *params)
316 {
317 	ulp_post_process_normal_flow(params);
318 	return ulp_post_process_tun_flow(params);
319 }
320 
321 /*
322  * Function to compute the flow direction based on the match port details
323  */
324 static void
325 bnxt_ulp_rte_parser_direction_compute(struct ulp_rte_parser_params *params)
326 {
327 	enum bnxt_ulp_intf_type match_port_type;
328 
329 	/* Get the match port type */
330 	match_port_type = ULP_COMP_FLD_IDX_RD(params,
331 					      BNXT_ULP_CF_IDX_MATCH_PORT_TYPE);
332 
333 	/* If ingress flow and matchport is vf rep then dir is egress*/
334 	if ((params->dir_attr & BNXT_ULP_FLOW_ATTR_INGRESS) &&
335 	    match_port_type == BNXT_ULP_INTF_TYPE_VF_REP) {
336 		ULP_COMP_FLD_IDX_WR(params, BNXT_ULP_CF_IDX_DIRECTION,
337 				    BNXT_ULP_DIR_EGRESS);
338 	} else {
339 		/* Assign the input direction */
340 		if (params->dir_attr & BNXT_ULP_FLOW_ATTR_INGRESS)
341 			ULP_COMP_FLD_IDX_WR(params, BNXT_ULP_CF_IDX_DIRECTION,
342 					    BNXT_ULP_DIR_INGRESS);
343 		else
344 			ULP_COMP_FLD_IDX_WR(params, BNXT_ULP_CF_IDX_DIRECTION,
345 					    BNXT_ULP_DIR_EGRESS);
346 	}
347 }
348 
349 /* Function to handle the parsing of RTE Flow item PF Header. */
350 static int32_t
351 ulp_rte_parser_svif_set(struct ulp_rte_parser_params *params,
352 			uint32_t ifindex,
353 			uint16_t mask)
354 {
355 	uint16_t svif;
356 	enum bnxt_ulp_direction_type dir;
357 	struct ulp_rte_hdr_field *hdr_field;
358 	enum bnxt_ulp_svif_type svif_type;
359 	enum bnxt_ulp_intf_type port_type;
360 
361 	if (ULP_COMP_FLD_IDX_RD(params, BNXT_ULP_CF_IDX_SVIF_FLAG) !=
362 	    BNXT_ULP_INVALID_SVIF_VAL) {
363 		BNXT_TF_DBG(ERR,
364 			    "SVIF already set,multiple source not support'd\n");
365 		return BNXT_TF_RC_ERROR;
366 	}
367 
368 	/* Get port type details */
369 	port_type = ulp_port_db_port_type_get(params->ulp_ctx, ifindex);
370 	if (port_type == BNXT_ULP_INTF_TYPE_INVALID) {
371 		BNXT_TF_DBG(ERR, "Invalid port type\n");
372 		return BNXT_TF_RC_ERROR;
373 	}
374 
375 	/* Update the match port type */
376 	ULP_COMP_FLD_IDX_WR(params, BNXT_ULP_CF_IDX_MATCH_PORT_TYPE, port_type);
377 
378 	/* compute the direction */
379 	bnxt_ulp_rte_parser_direction_compute(params);
380 
381 	/* Get the computed direction */
382 	dir = ULP_COMP_FLD_IDX_RD(params, BNXT_ULP_CF_IDX_DIRECTION);
383 	if (dir == BNXT_ULP_DIR_INGRESS) {
384 		svif_type = BNXT_ULP_PHY_PORT_SVIF;
385 	} else {
386 		if (port_type == BNXT_ULP_INTF_TYPE_VF_REP)
387 			svif_type = BNXT_ULP_VF_FUNC_SVIF;
388 		else
389 			svif_type = BNXT_ULP_DRV_FUNC_SVIF;
390 	}
391 	ulp_port_db_svif_get(params->ulp_ctx, ifindex, svif_type,
392 			     &svif);
393 	svif = rte_cpu_to_be_16(svif);
394 	hdr_field = &params->hdr_field[BNXT_ULP_PROTO_HDR_FIELD_SVIF_IDX];
395 	memcpy(hdr_field->spec, &svif, sizeof(svif));
396 	memcpy(hdr_field->mask, &mask, sizeof(mask));
397 	hdr_field->size = sizeof(svif);
398 	ULP_COMP_FLD_IDX_WR(params, BNXT_ULP_CF_IDX_SVIF_FLAG,
399 			    rte_be_to_cpu_16(svif));
400 	return BNXT_TF_RC_SUCCESS;
401 }
402 
403 /* Function to handle the parsing of the RTE port id */
404 int32_t
405 ulp_rte_parser_implicit_match_port_process(struct ulp_rte_parser_params *params)
406 {
407 	uint16_t port_id = 0;
408 	uint16_t svif_mask = 0xFFFF;
409 	uint32_t ifindex;
410 	int32_t rc = BNXT_TF_RC_ERROR;
411 
412 	if (ULP_COMP_FLD_IDX_RD(params, BNXT_ULP_CF_IDX_SVIF_FLAG) !=
413 	    BNXT_ULP_INVALID_SVIF_VAL)
414 		return BNXT_TF_RC_SUCCESS;
415 
416 	/* SVIF not set. So get the port id */
417 	port_id = ULP_COMP_FLD_IDX_RD(params, BNXT_ULP_CF_IDX_INCOMING_IF);
418 
419 	if (ulp_port_db_dev_port_to_ulp_index(params->ulp_ctx,
420 					      port_id,
421 					      &ifindex)) {
422 		BNXT_TF_DBG(ERR, "ParseErr:Portid is not valid\n");
423 		return rc;
424 	}
425 
426 	/* Update the SVIF details */
427 	rc = ulp_rte_parser_svif_set(params, ifindex, svif_mask);
428 	return rc;
429 }
430 
431 /* Function to handle the implicit action port id */
432 int32_t
433 ulp_rte_parser_implicit_act_port_process(struct ulp_rte_parser_params *params)
434 {
435 	struct rte_flow_action action_item = {0};
436 	struct rte_flow_action_port_id port_id = {0};
437 
438 	/* Read the action port set bit */
439 	if (ULP_COMP_FLD_IDX_RD(params, BNXT_ULP_CF_IDX_ACT_PORT_IS_SET)) {
440 		/* Already set, so just exit */
441 		return BNXT_TF_RC_SUCCESS;
442 	}
443 	port_id.id = ULP_COMP_FLD_IDX_RD(params, BNXT_ULP_CF_IDX_INCOMING_IF);
444 	action_item.conf = &port_id;
445 
446 	/* Update the action port based on incoming port */
447 	ulp_rte_port_id_act_handler(&action_item, params);
448 
449 	/* Reset the action port set bit */
450 	ULP_COMP_FLD_IDX_WR(params, BNXT_ULP_CF_IDX_ACT_PORT_IS_SET, 0);
451 	return BNXT_TF_RC_SUCCESS;
452 }
453 
454 /* Function to handle the parsing of RTE Flow item PF Header. */
455 int32_t
456 ulp_rte_pf_hdr_handler(const struct rte_flow_item *item __rte_unused,
457 		       struct ulp_rte_parser_params *params)
458 {
459 	uint16_t port_id = 0;
460 	uint16_t svif_mask = 0xFFFF;
461 	uint32_t ifindex;
462 
463 	/* Get the implicit port id */
464 	port_id = ULP_COMP_FLD_IDX_RD(params, BNXT_ULP_CF_IDX_INCOMING_IF);
465 
466 	/* perform the conversion from dpdk port to bnxt ifindex */
467 	if (ulp_port_db_dev_port_to_ulp_index(params->ulp_ctx,
468 					      port_id,
469 					      &ifindex)) {
470 		BNXT_TF_DBG(ERR, "ParseErr:Portid is not valid\n");
471 		return BNXT_TF_RC_ERROR;
472 	}
473 
474 	/* Update the SVIF details */
475 	return  ulp_rte_parser_svif_set(params, ifindex, svif_mask);
476 }
477 
478 /* Function to handle the parsing of RTE Flow item VF Header. */
479 int32_t
480 ulp_rte_vf_hdr_handler(const struct rte_flow_item *item,
481 		       struct ulp_rte_parser_params *params)
482 {
483 	const struct rte_flow_item_vf *vf_spec = item->spec;
484 	const struct rte_flow_item_vf *vf_mask = item->mask;
485 	uint16_t mask = 0;
486 	uint32_t ifindex;
487 	int32_t rc = BNXT_TF_RC_PARSE_ERR;
488 
489 	/* Get VF rte_flow_item for Port details */
490 	if (!vf_spec) {
491 		BNXT_TF_DBG(ERR, "ParseErr:VF id is not valid\n");
492 		return rc;
493 	}
494 	if (!vf_mask) {
495 		BNXT_TF_DBG(ERR, "ParseErr:VF mask is not valid\n");
496 		return rc;
497 	}
498 	mask = vf_mask->id;
499 
500 	/* perform the conversion from VF Func id to bnxt ifindex */
501 	if (ulp_port_db_dev_func_id_to_ulp_index(params->ulp_ctx,
502 						 vf_spec->id,
503 						 &ifindex)) {
504 		BNXT_TF_DBG(ERR, "ParseErr:Portid is not valid\n");
505 		return rc;
506 	}
507 	/* Update the SVIF details */
508 	return ulp_rte_parser_svif_set(params, ifindex, mask);
509 }
510 
511 /* Function to handle the parsing of RTE Flow item port id  Header. */
512 int32_t
513 ulp_rte_port_id_hdr_handler(const struct rte_flow_item *item,
514 			    struct ulp_rte_parser_params *params)
515 {
516 	const struct rte_flow_item_port_id *port_spec = item->spec;
517 	const struct rte_flow_item_port_id *port_mask = item->mask;
518 	uint16_t mask = 0;
519 	int32_t rc = BNXT_TF_RC_PARSE_ERR;
520 	uint32_t ifindex;
521 
522 	if (!port_spec) {
523 		BNXT_TF_DBG(ERR, "ParseErr:Port id is not valid\n");
524 		return rc;
525 	}
526 	if (!port_mask) {
527 		BNXT_TF_DBG(ERR, "ParseErr:Phy Port mask is not valid\n");
528 		return rc;
529 	}
530 	mask = port_mask->id;
531 
532 	/* perform the conversion from dpdk port to bnxt ifindex */
533 	if (ulp_port_db_dev_port_to_ulp_index(params->ulp_ctx,
534 					      port_spec->id,
535 					      &ifindex)) {
536 		BNXT_TF_DBG(ERR, "ParseErr:Portid is not valid\n");
537 		return rc;
538 	}
539 	/* Update the SVIF details */
540 	return ulp_rte_parser_svif_set(params, ifindex, mask);
541 }
542 
543 /* Function to handle the parsing of RTE Flow item phy port Header. */
544 int32_t
545 ulp_rte_phy_port_hdr_handler(const struct rte_flow_item *item,
546 			     struct ulp_rte_parser_params *params)
547 {
548 	const struct rte_flow_item_phy_port *port_spec = item->spec;
549 	const struct rte_flow_item_phy_port *port_mask = item->mask;
550 	uint16_t mask = 0;
551 	int32_t rc = BNXT_TF_RC_ERROR;
552 	uint16_t svif;
553 	enum bnxt_ulp_direction_type dir;
554 	struct ulp_rte_hdr_field *hdr_field;
555 
556 	/* Copy the rte_flow_item for phy port into hdr_field */
557 	if (!port_spec) {
558 		BNXT_TF_DBG(ERR, "ParseErr:Phy Port id is not valid\n");
559 		return rc;
560 	}
561 	if (!port_mask) {
562 		BNXT_TF_DBG(ERR, "ParseErr:Phy Port mask is not valid\n");
563 		return rc;
564 	}
565 	mask = port_mask->index;
566 
567 	/* Update the match port type */
568 	ULP_COMP_FLD_IDX_WR(params, BNXT_ULP_CF_IDX_MATCH_PORT_TYPE,
569 			    BNXT_ULP_INTF_TYPE_PHY_PORT);
570 
571 	/* Compute the Hw direction */
572 	bnxt_ulp_rte_parser_direction_compute(params);
573 
574 	/* Direction validation */
575 	dir = ULP_COMP_FLD_IDX_RD(params, BNXT_ULP_CF_IDX_DIRECTION);
576 	if (dir == BNXT_ULP_DIR_EGRESS) {
577 		BNXT_TF_DBG(ERR,
578 			    "Parse Err:Phy ports are valid only for ingress\n");
579 		return BNXT_TF_RC_PARSE_ERR;
580 	}
581 
582 	/* Get the physical port details from port db */
583 	rc = ulp_port_db_phy_port_svif_get(params->ulp_ctx, port_spec->index,
584 					   &svif);
585 	if (rc) {
586 		BNXT_TF_DBG(ERR, "Failed to get port details\n");
587 		return BNXT_TF_RC_PARSE_ERR;
588 	}
589 
590 	/* Update the SVIF details */
591 	svif = rte_cpu_to_be_16(svif);
592 	hdr_field = &params->hdr_field[BNXT_ULP_PROTO_HDR_FIELD_SVIF_IDX];
593 	memcpy(hdr_field->spec, &svif, sizeof(svif));
594 	memcpy(hdr_field->mask, &mask, sizeof(mask));
595 	hdr_field->size = sizeof(svif);
596 	ULP_COMP_FLD_IDX_WR(params, BNXT_ULP_CF_IDX_SVIF_FLAG,
597 			    rte_be_to_cpu_16(svif));
598 	return BNXT_TF_RC_SUCCESS;
599 }
600 
601 /* Function to handle the update of proto header based on field values */
602 static void
603 ulp_rte_l2_proto_type_update(struct ulp_rte_parser_params *param,
604 			     uint16_t type, uint32_t in_flag)
605 {
606 	if (type == tfp_cpu_to_be_16(RTE_ETHER_TYPE_IPV4)) {
607 		if (in_flag) {
608 			ULP_BITMAP_SET(param->hdr_fp_bit.bits,
609 				       BNXT_ULP_HDR_BIT_I_IPV4);
610 			ULP_COMP_FLD_IDX_WR(param, BNXT_ULP_CF_IDX_I_L3, 1);
611 		} else {
612 			ULP_BITMAP_SET(param->hdr_fp_bit.bits,
613 				       BNXT_ULP_HDR_BIT_O_IPV4);
614 			ULP_COMP_FLD_IDX_WR(param, BNXT_ULP_CF_IDX_O_L3, 1);
615 		}
616 	} else if (type == tfp_cpu_to_be_16(RTE_ETHER_TYPE_IPV6))  {
617 		if (in_flag) {
618 			ULP_BITMAP_SET(param->hdr_fp_bit.bits,
619 				       BNXT_ULP_HDR_BIT_I_IPV6);
620 			ULP_COMP_FLD_IDX_WR(param, BNXT_ULP_CF_IDX_I_L3, 1);
621 		} else {
622 			ULP_BITMAP_SET(param->hdr_fp_bit.bits,
623 				       BNXT_ULP_HDR_BIT_O_IPV6);
624 			ULP_COMP_FLD_IDX_WR(param, BNXT_ULP_CF_IDX_O_L3, 1);
625 		}
626 	}
627 }
628 
629 /* Internal Function to identify broadcast or multicast packets */
630 static int32_t
631 ulp_rte_parser_is_bcmc_addr(const struct rte_ether_addr *eth_addr)
632 {
633 	if (rte_is_multicast_ether_addr(eth_addr) ||
634 	    rte_is_broadcast_ether_addr(eth_addr)) {
635 		BNXT_TF_DBG(DEBUG,
636 			    "No support for bcast or mcast addr offload\n");
637 		return 1;
638 	}
639 	return 0;
640 }
641 
642 /* Function to handle the parsing of RTE Flow item Ethernet Header. */
643 int32_t
644 ulp_rte_eth_hdr_handler(const struct rte_flow_item *item,
645 			struct ulp_rte_parser_params *params)
646 {
647 	const struct rte_flow_item_eth *eth_spec = item->spec;
648 	const struct rte_flow_item_eth *eth_mask = item->mask;
649 	struct ulp_rte_hdr_field *field;
650 	uint32_t idx = params->field_idx;
651 	uint32_t size;
652 	uint16_t eth_type = 0;
653 	uint32_t inner_flag = 0;
654 
655 	/*
656 	 * Copy the rte_flow_item for eth into hdr_field using ethernet
657 	 * header fields
658 	 */
659 	if (eth_spec) {
660 		size = sizeof(eth_spec->dst.addr_bytes);
661 		field = ulp_rte_parser_fld_copy(&params->hdr_field[idx],
662 						eth_spec->dst.addr_bytes,
663 						size);
664 		/* Todo: work around to avoid multicast and broadcast addr */
665 		if (ulp_rte_parser_is_bcmc_addr(&eth_spec->dst))
666 			return BNXT_TF_RC_PARSE_ERR;
667 
668 		size = sizeof(eth_spec->src.addr_bytes);
669 		field = ulp_rte_parser_fld_copy(field,
670 						eth_spec->src.addr_bytes,
671 						size);
672 		/* Todo: work around to avoid multicast and broadcast addr */
673 		if (ulp_rte_parser_is_bcmc_addr(&eth_spec->src))
674 			return BNXT_TF_RC_PARSE_ERR;
675 
676 		field = ulp_rte_parser_fld_copy(field,
677 						&eth_spec->type,
678 						sizeof(eth_spec->type));
679 		eth_type = eth_spec->type;
680 	}
681 	if (eth_mask) {
682 		ulp_rte_prsr_mask_copy(params, &idx, eth_mask->dst.addr_bytes,
683 				       sizeof(eth_mask->dst.addr_bytes));
684 		ulp_rte_prsr_mask_copy(params, &idx, eth_mask->src.addr_bytes,
685 				       sizeof(eth_mask->src.addr_bytes));
686 		ulp_rte_prsr_mask_copy(params, &idx, &eth_mask->type,
687 				       sizeof(eth_mask->type));
688 	}
689 	/* Add number of vlan header elements */
690 	params->field_idx += BNXT_ULP_PROTO_HDR_ETH_NUM;
691 	params->vlan_idx = params->field_idx;
692 	params->field_idx += BNXT_ULP_PROTO_HDR_VLAN_NUM;
693 
694 	/* Update the protocol hdr bitmap */
695 	if (ULP_BITMAP_ISSET(params->hdr_bitmap.bits,
696 			     BNXT_ULP_HDR_BIT_O_ETH) ||
697 	    ULP_BITMAP_ISSET(params->hdr_bitmap.bits,
698 			     BNXT_ULP_HDR_BIT_O_IPV4) ||
699 	    ULP_BITMAP_ISSET(params->hdr_bitmap.bits,
700 			     BNXT_ULP_HDR_BIT_O_IPV6) ||
701 	    ULP_BITMAP_ISSET(params->hdr_bitmap.bits,
702 			     BNXT_ULP_HDR_BIT_O_UDP) ||
703 	    ULP_BITMAP_ISSET(params->hdr_bitmap.bits,
704 			     BNXT_ULP_HDR_BIT_O_TCP)) {
705 		ULP_BITMAP_SET(params->hdr_bitmap.bits, BNXT_ULP_HDR_BIT_I_ETH);
706 		inner_flag = 1;
707 	} else {
708 		ULP_BITMAP_SET(params->hdr_bitmap.bits, BNXT_ULP_HDR_BIT_O_ETH);
709 	}
710 	/* Update the field protocol hdr bitmap */
711 	ulp_rte_l2_proto_type_update(params, eth_type, inner_flag);
712 
713 	return BNXT_TF_RC_SUCCESS;
714 }
715 
716 /* Function to handle the parsing of RTE Flow item Vlan Header. */
717 int32_t
718 ulp_rte_vlan_hdr_handler(const struct rte_flow_item *item,
719 			 struct ulp_rte_parser_params *params)
720 {
721 	const struct rte_flow_item_vlan *vlan_spec = item->spec;
722 	const struct rte_flow_item_vlan *vlan_mask = item->mask;
723 	struct ulp_rte_hdr_field *field;
724 	struct ulp_rte_hdr_bitmap	*hdr_bit;
725 	uint32_t idx = params->vlan_idx;
726 	uint16_t vlan_tag, priority;
727 	uint32_t outer_vtag_num;
728 	uint32_t inner_vtag_num;
729 	uint16_t eth_type = 0;
730 	uint32_t inner_flag = 0;
731 
732 	/*
733 	 * Copy the rte_flow_item for vlan into hdr_field using Vlan
734 	 * header fields
735 	 */
736 	if (vlan_spec) {
737 		vlan_tag = ntohs(vlan_spec->tci);
738 		priority = htons(vlan_tag >> ULP_VLAN_PRIORITY_SHIFT);
739 		vlan_tag &= ULP_VLAN_TAG_MASK;
740 		vlan_tag = htons(vlan_tag);
741 
742 		field = ulp_rte_parser_fld_copy(&params->hdr_field[idx],
743 						&priority,
744 						sizeof(priority));
745 		field = ulp_rte_parser_fld_copy(field,
746 						&vlan_tag,
747 						sizeof(vlan_tag));
748 		field = ulp_rte_parser_fld_copy(field,
749 						&vlan_spec->inner_type,
750 						sizeof(vlan_spec->inner_type));
751 		eth_type = vlan_spec->inner_type;
752 	}
753 
754 	if (vlan_mask) {
755 		vlan_tag = ntohs(vlan_mask->tci);
756 		priority = htons(vlan_tag >> ULP_VLAN_PRIORITY_SHIFT);
757 		vlan_tag &= 0xfff;
758 
759 		/*
760 		 * the storage for priority and vlan tag is 2 bytes
761 		 * The mask of priority which is 3 bits if it is all 1's
762 		 * then make the rest bits 13 bits as 1's
763 		 * so that it is matched as exact match.
764 		 */
765 		if (priority == ULP_VLAN_PRIORITY_MASK)
766 			priority |= ~ULP_VLAN_PRIORITY_MASK;
767 		if (vlan_tag == ULP_VLAN_TAG_MASK)
768 			vlan_tag |= ~ULP_VLAN_TAG_MASK;
769 		vlan_tag = htons(vlan_tag);
770 
771 		/*
772 		 * The priority field is ignored since OVS is setting it as
773 		 * wild card match and it is not supported. This is a work
774 		 * around and shall be addressed in the future.
775 		 */
776 		ulp_rte_prsr_mask_ignore(params, &idx, &priority,
777 					 sizeof(priority));
778 
779 		ulp_rte_prsr_mask_copy(params, &idx, &vlan_tag,
780 				       sizeof(vlan_tag));
781 		ulp_rte_prsr_mask_copy(params, &idx, &vlan_mask->inner_type,
782 				       sizeof(vlan_mask->inner_type));
783 	}
784 	/* Set the vlan index to new incremented value */
785 	params->vlan_idx += BNXT_ULP_PROTO_HDR_S_VLAN_NUM;
786 
787 	/* Get the outer tag and inner tag counts */
788 	outer_vtag_num = ULP_COMP_FLD_IDX_RD(params,
789 					     BNXT_ULP_CF_IDX_O_VTAG_NUM);
790 	inner_vtag_num = ULP_COMP_FLD_IDX_RD(params,
791 					     BNXT_ULP_CF_IDX_I_VTAG_NUM);
792 
793 	/* Update the hdr_bitmap of the vlans */
794 	hdr_bit = &params->hdr_bitmap;
795 	if (ULP_BITMAP_ISSET(hdr_bit->bits, BNXT_ULP_HDR_BIT_O_ETH) &&
796 	    !ULP_BITMAP_ISSET(hdr_bit->bits, BNXT_ULP_HDR_BIT_I_ETH) &&
797 	    !outer_vtag_num) {
798 		/* Update the vlan tag num */
799 		outer_vtag_num++;
800 		ULP_COMP_FLD_IDX_WR(params, BNXT_ULP_CF_IDX_O_VTAG_NUM,
801 				    outer_vtag_num);
802 		ULP_COMP_FLD_IDX_WR(params, BNXT_ULP_CF_IDX_O_NO_VTAG, 0);
803 		ULP_COMP_FLD_IDX_WR(params, BNXT_ULP_CF_IDX_O_ONE_VTAG, 1);
804 		ULP_BITMAP_SET(params->hdr_bitmap.bits,
805 			       BNXT_ULP_HDR_BIT_OO_VLAN);
806 	} else if (ULP_BITMAP_ISSET(hdr_bit->bits, BNXT_ULP_HDR_BIT_O_ETH) &&
807 		   !ULP_BITMAP_ISSET(hdr_bit->bits, BNXT_ULP_HDR_BIT_I_ETH) &&
808 		   outer_vtag_num == 1) {
809 		/* update the vlan tag num */
810 		outer_vtag_num++;
811 		ULP_COMP_FLD_IDX_WR(params, BNXT_ULP_CF_IDX_O_VTAG_NUM,
812 				    outer_vtag_num);
813 		ULP_COMP_FLD_IDX_WR(params, BNXT_ULP_CF_IDX_O_TWO_VTAGS, 1);
814 		ULP_COMP_FLD_IDX_WR(params, BNXT_ULP_CF_IDX_O_ONE_VTAG, 0);
815 		ULP_BITMAP_SET(params->hdr_bitmap.bits,
816 			       BNXT_ULP_HDR_BIT_OI_VLAN);
817 	} else if (ULP_BITMAP_ISSET(hdr_bit->bits, BNXT_ULP_HDR_BIT_O_ETH) &&
818 		   ULP_BITMAP_ISSET(hdr_bit->bits, BNXT_ULP_HDR_BIT_I_ETH) &&
819 		   !inner_vtag_num) {
820 		/* update the vlan tag num */
821 		inner_vtag_num++;
822 		ULP_COMP_FLD_IDX_WR(params, BNXT_ULP_CF_IDX_I_VTAG_NUM,
823 				    inner_vtag_num);
824 		ULP_COMP_FLD_IDX_WR(params, BNXT_ULP_CF_IDX_I_NO_VTAG, 0);
825 		ULP_COMP_FLD_IDX_WR(params, BNXT_ULP_CF_IDX_I_ONE_VTAG, 1);
826 		ULP_BITMAP_SET(params->hdr_bitmap.bits,
827 			       BNXT_ULP_HDR_BIT_IO_VLAN);
828 		inner_flag = 1;
829 	} else if (ULP_BITMAP_ISSET(hdr_bit->bits, BNXT_ULP_HDR_BIT_O_ETH) &&
830 		   ULP_BITMAP_ISSET(hdr_bit->bits, BNXT_ULP_HDR_BIT_I_ETH) &&
831 		   inner_vtag_num == 1) {
832 		/* update the vlan tag num */
833 		inner_vtag_num++;
834 		ULP_COMP_FLD_IDX_WR(params, BNXT_ULP_CF_IDX_I_VTAG_NUM,
835 				    inner_vtag_num);
836 		ULP_COMP_FLD_IDX_WR(params, BNXT_ULP_CF_IDX_I_TWO_VTAGS, 1);
837 		ULP_COMP_FLD_IDX_WR(params, BNXT_ULP_CF_IDX_I_ONE_VTAG, 0);
838 		ULP_BITMAP_SET(params->hdr_bitmap.bits,
839 			       BNXT_ULP_HDR_BIT_II_VLAN);
840 		inner_flag = 1;
841 	} else {
842 		BNXT_TF_DBG(ERR, "Error Parsing:Vlan hdr found withtout eth\n");
843 		return BNXT_TF_RC_ERROR;
844 	}
845 	/* Update the field protocol hdr bitmap */
846 	ulp_rte_l2_proto_type_update(params, eth_type, inner_flag);
847 	return BNXT_TF_RC_SUCCESS;
848 }
849 
850 /* Function to handle the update of proto header based on field values */
851 static void
852 ulp_rte_l3_proto_type_update(struct ulp_rte_parser_params *param,
853 			     uint8_t proto, uint32_t in_flag)
854 {
855 	if (proto == IPPROTO_UDP) {
856 		if (in_flag) {
857 			ULP_BITMAP_SET(param->hdr_fp_bit.bits,
858 				       BNXT_ULP_HDR_BIT_I_UDP);
859 			ULP_COMP_FLD_IDX_WR(param, BNXT_ULP_CF_IDX_I_L4, 1);
860 		} else {
861 			ULP_BITMAP_SET(param->hdr_fp_bit.bits,
862 				       BNXT_ULP_HDR_BIT_O_UDP);
863 			ULP_COMP_FLD_IDX_WR(param, BNXT_ULP_CF_IDX_O_L4, 1);
864 		}
865 	} else if (proto == IPPROTO_TCP) {
866 		if (in_flag) {
867 			ULP_BITMAP_SET(param->hdr_fp_bit.bits,
868 				       BNXT_ULP_HDR_BIT_I_TCP);
869 			ULP_COMP_FLD_IDX_WR(param, BNXT_ULP_CF_IDX_I_L4, 1);
870 		} else {
871 			ULP_BITMAP_SET(param->hdr_fp_bit.bits,
872 				       BNXT_ULP_HDR_BIT_O_TCP);
873 			ULP_COMP_FLD_IDX_WR(param, BNXT_ULP_CF_IDX_O_L4, 1);
874 		}
875 	}
876 }
877 
878 /* Function to handle the parsing of RTE Flow item IPV4 Header. */
879 int32_t
880 ulp_rte_ipv4_hdr_handler(const struct rte_flow_item *item,
881 			 struct ulp_rte_parser_params *params)
882 {
883 	const struct rte_flow_item_ipv4 *ipv4_spec = item->spec;
884 	const struct rte_flow_item_ipv4 *ipv4_mask = item->mask;
885 	struct ulp_rte_hdr_field *field;
886 	struct ulp_rte_hdr_bitmap *hdr_bitmap = &params->hdr_bitmap;
887 	uint32_t idx = params->field_idx;
888 	uint32_t size;
889 	uint8_t proto = 0;
890 	uint32_t inner_flag = 0;
891 	uint32_t cnt;
892 
893 	/* validate there are no 3rd L3 header */
894 	cnt = ULP_COMP_FLD_IDX_RD(params, BNXT_ULP_CF_IDX_L3_HDR_CNT);
895 	if (cnt == 2) {
896 		BNXT_TF_DBG(ERR, "Parse Err:Third L3 header not supported\n");
897 		return BNXT_TF_RC_ERROR;
898 	}
899 
900 	if (!ULP_BITMAP_ISSET(params->hdr_bitmap.bits,
901 			      BNXT_ULP_HDR_BIT_O_ETH) &&
902 	    !ULP_BITMAP_ISSET(params->hdr_bitmap.bits,
903 			      BNXT_ULP_HDR_BIT_I_ETH)) {
904 		/* Since F2 flow does not include eth item, when parser detects
905 		 * IPv4/IPv6 item list and it belongs to the outer header; i.e.,
906 		 * o_ipv4/o_ipv6, check if O_ETH and I_ETH is set. If not set,
907 		 * then add offset sizeof(o_eth/oo_vlan/oi_vlan) to the index.
908 		 * This will allow the parser post processor to update the
909 		 * t_dmac in hdr_field[o_eth.dmac]
910 		 */
911 		idx += (BNXT_ULP_PROTO_HDR_ETH_NUM +
912 			BNXT_ULP_PROTO_HDR_VLAN_NUM);
913 		params->field_idx = idx;
914 	}
915 
916 	/*
917 	 * Copy the rte_flow_item for ipv4 into hdr_field using ipv4
918 	 * header fields
919 	 */
920 	if (ipv4_spec) {
921 		size = sizeof(ipv4_spec->hdr.version_ihl);
922 		field = ulp_rte_parser_fld_copy(&params->hdr_field[idx],
923 						&ipv4_spec->hdr.version_ihl,
924 						size);
925 		size = sizeof(ipv4_spec->hdr.type_of_service);
926 		field = ulp_rte_parser_fld_copy(field,
927 						&ipv4_spec->hdr.type_of_service,
928 						size);
929 		size = sizeof(ipv4_spec->hdr.total_length);
930 		field = ulp_rte_parser_fld_copy(field,
931 						&ipv4_spec->hdr.total_length,
932 						size);
933 		size = sizeof(ipv4_spec->hdr.packet_id);
934 		field = ulp_rte_parser_fld_copy(field,
935 						&ipv4_spec->hdr.packet_id,
936 						size);
937 		size = sizeof(ipv4_spec->hdr.fragment_offset);
938 		field = ulp_rte_parser_fld_copy(field,
939 						&ipv4_spec->hdr.fragment_offset,
940 						size);
941 		size = sizeof(ipv4_spec->hdr.time_to_live);
942 		field = ulp_rte_parser_fld_copy(field,
943 						&ipv4_spec->hdr.time_to_live,
944 						size);
945 		size = sizeof(ipv4_spec->hdr.next_proto_id);
946 		field = ulp_rte_parser_fld_copy(field,
947 						&ipv4_spec->hdr.next_proto_id,
948 						size);
949 		proto = ipv4_spec->hdr.next_proto_id;
950 		size = sizeof(ipv4_spec->hdr.hdr_checksum);
951 		field = ulp_rte_parser_fld_copy(field,
952 						&ipv4_spec->hdr.hdr_checksum,
953 						size);
954 		size = sizeof(ipv4_spec->hdr.src_addr);
955 		field = ulp_rte_parser_fld_copy(field,
956 						&ipv4_spec->hdr.src_addr,
957 						size);
958 		size = sizeof(ipv4_spec->hdr.dst_addr);
959 		field = ulp_rte_parser_fld_copy(field,
960 						&ipv4_spec->hdr.dst_addr,
961 						size);
962 	}
963 	if (ipv4_mask) {
964 		ulp_rte_prsr_mask_copy(params, &idx,
965 				       &ipv4_mask->hdr.version_ihl,
966 				       sizeof(ipv4_mask->hdr.version_ihl));
967 		/*
968 		 * The tos field is ignored since OVS is setting it as wild card
969 		 * match and it is not supported. This is a work around and
970 		 * shall be addressed in the future.
971 		 */
972 		ulp_rte_prsr_mask_ignore(params, &idx,
973 					 &ipv4_mask->hdr.type_of_service,
974 					 sizeof(ipv4_mask->hdr.type_of_service)
975 					 );
976 
977 		ulp_rte_prsr_mask_copy(params, &idx,
978 				       &ipv4_mask->hdr.total_length,
979 				       sizeof(ipv4_mask->hdr.total_length));
980 		ulp_rte_prsr_mask_copy(params, &idx,
981 				       &ipv4_mask->hdr.packet_id,
982 				       sizeof(ipv4_mask->hdr.packet_id));
983 		ulp_rte_prsr_mask_copy(params, &idx,
984 				       &ipv4_mask->hdr.fragment_offset,
985 				       sizeof(ipv4_mask->hdr.fragment_offset));
986 		ulp_rte_prsr_mask_copy(params, &idx,
987 				       &ipv4_mask->hdr.time_to_live,
988 				       sizeof(ipv4_mask->hdr.time_to_live));
989 		ulp_rte_prsr_mask_copy(params, &idx,
990 				       &ipv4_mask->hdr.next_proto_id,
991 				       sizeof(ipv4_mask->hdr.next_proto_id));
992 		ulp_rte_prsr_mask_copy(params, &idx,
993 				       &ipv4_mask->hdr.hdr_checksum,
994 				       sizeof(ipv4_mask->hdr.hdr_checksum));
995 		ulp_rte_prsr_mask_copy(params, &idx,
996 				       &ipv4_mask->hdr.src_addr,
997 				       sizeof(ipv4_mask->hdr.src_addr));
998 		ulp_rte_prsr_mask_copy(params, &idx,
999 				       &ipv4_mask->hdr.dst_addr,
1000 				       sizeof(ipv4_mask->hdr.dst_addr));
1001 	}
1002 	/* Add the number of ipv4 header elements */
1003 	params->field_idx += BNXT_ULP_PROTO_HDR_IPV4_NUM;
1004 
1005 	/* Set the ipv4 header bitmap and computed l3 header bitmaps */
1006 	if (ULP_BITMAP_ISSET(hdr_bitmap->bits, BNXT_ULP_HDR_BIT_O_IPV4) ||
1007 	    ULP_BITMAP_ISSET(hdr_bitmap->bits, BNXT_ULP_HDR_BIT_O_IPV6)) {
1008 		ULP_BITMAP_SET(hdr_bitmap->bits, BNXT_ULP_HDR_BIT_I_IPV4);
1009 		ULP_COMP_FLD_IDX_WR(params, BNXT_ULP_CF_IDX_I_L3, 1);
1010 		inner_flag = 1;
1011 	} else {
1012 		ULP_BITMAP_SET(hdr_bitmap->bits, BNXT_ULP_HDR_BIT_O_IPV4);
1013 		ULP_COMP_FLD_IDX_WR(params, BNXT_ULP_CF_IDX_O_L3, 1);
1014 	}
1015 
1016 	/* Some of the PMD applications may set the protocol field
1017 	 * in the IPv4 spec but don't set the mask. So, consider
1018 	 * the mask in the proto value calculation.
1019 	 */
1020 	if (ipv4_mask)
1021 		proto &= ipv4_mask->hdr.next_proto_id;
1022 
1023 	/* Update the field protocol hdr bitmap */
1024 	ulp_rte_l3_proto_type_update(params, proto, inner_flag);
1025 	ULP_COMP_FLD_IDX_WR(params, BNXT_ULP_CF_IDX_L3_HDR_CNT, ++cnt);
1026 	return BNXT_TF_RC_SUCCESS;
1027 }
1028 
1029 /* Function to handle the parsing of RTE Flow item IPV6 Header */
1030 int32_t
1031 ulp_rte_ipv6_hdr_handler(const struct rte_flow_item *item,
1032 			 struct ulp_rte_parser_params *params)
1033 {
1034 	const struct rte_flow_item_ipv6	*ipv6_spec = item->spec;
1035 	const struct rte_flow_item_ipv6	*ipv6_mask = item->mask;
1036 	struct ulp_rte_hdr_field *field;
1037 	struct ulp_rte_hdr_bitmap *hdr_bitmap = &params->hdr_bitmap;
1038 	uint32_t idx = params->field_idx;
1039 	uint32_t size;
1040 	uint32_t vtcf, vtcf_mask;
1041 	uint8_t proto = 0;
1042 	uint32_t inner_flag = 0;
1043 	uint32_t cnt;
1044 
1045 	/* validate there are no 3rd L3 header */
1046 	cnt = ULP_COMP_FLD_IDX_RD(params, BNXT_ULP_CF_IDX_L3_HDR_CNT);
1047 	if (cnt == 2) {
1048 		BNXT_TF_DBG(ERR, "Parse Err:Third L3 header not supported\n");
1049 		return BNXT_TF_RC_ERROR;
1050 	}
1051 
1052 	if (!ULP_BITMAP_ISSET(params->hdr_bitmap.bits,
1053 			      BNXT_ULP_HDR_BIT_O_ETH) &&
1054 	    !ULP_BITMAP_ISSET(params->hdr_bitmap.bits,
1055 			      BNXT_ULP_HDR_BIT_I_ETH)) {
1056 		/* Since F2 flow does not include eth item, when parser detects
1057 		 * IPv4/IPv6 item list and it belongs to the outer header; i.e.,
1058 		 * o_ipv4/o_ipv6, check if O_ETH and I_ETH is set. If not set,
1059 		 * then add offset sizeof(o_eth/oo_vlan/oi_vlan) to the index.
1060 		 * This will allow the parser post processor to update the
1061 		 * t_dmac in hdr_field[o_eth.dmac]
1062 		 */
1063 		idx += (BNXT_ULP_PROTO_HDR_ETH_NUM +
1064 			BNXT_ULP_PROTO_HDR_VLAN_NUM);
1065 		params->field_idx = idx;
1066 	}
1067 
1068 	/*
1069 	 * Copy the rte_flow_item for ipv6 into hdr_field using ipv6
1070 	 * header fields
1071 	 */
1072 	if (ipv6_spec) {
1073 		size = sizeof(ipv6_spec->hdr.vtc_flow);
1074 
1075 		vtcf = BNXT_ULP_GET_IPV6_VER(ipv6_spec->hdr.vtc_flow);
1076 		field = ulp_rte_parser_fld_copy(&params->hdr_field[idx],
1077 						&vtcf,
1078 						size);
1079 
1080 		vtcf = BNXT_ULP_GET_IPV6_TC(ipv6_spec->hdr.vtc_flow);
1081 		field = ulp_rte_parser_fld_copy(field,
1082 						&vtcf,
1083 						size);
1084 
1085 		vtcf = BNXT_ULP_GET_IPV6_FLOWLABEL(ipv6_spec->hdr.vtc_flow);
1086 		field = ulp_rte_parser_fld_copy(field,
1087 						&vtcf,
1088 						size);
1089 
1090 		size = sizeof(ipv6_spec->hdr.payload_len);
1091 		field = ulp_rte_parser_fld_copy(field,
1092 						&ipv6_spec->hdr.payload_len,
1093 						size);
1094 		size = sizeof(ipv6_spec->hdr.proto);
1095 		field = ulp_rte_parser_fld_copy(field,
1096 						&ipv6_spec->hdr.proto,
1097 						size);
1098 		proto = ipv6_spec->hdr.proto;
1099 		size = sizeof(ipv6_spec->hdr.hop_limits);
1100 		field = ulp_rte_parser_fld_copy(field,
1101 						&ipv6_spec->hdr.hop_limits,
1102 						size);
1103 		size = sizeof(ipv6_spec->hdr.src_addr);
1104 		field = ulp_rte_parser_fld_copy(field,
1105 						&ipv6_spec->hdr.src_addr,
1106 						size);
1107 		size = sizeof(ipv6_spec->hdr.dst_addr);
1108 		field = ulp_rte_parser_fld_copy(field,
1109 						&ipv6_spec->hdr.dst_addr,
1110 						size);
1111 	}
1112 	if (ipv6_mask) {
1113 		size = sizeof(ipv6_mask->hdr.vtc_flow);
1114 
1115 		vtcf_mask = BNXT_ULP_GET_IPV6_VER(ipv6_mask->hdr.vtc_flow);
1116 		ulp_rte_prsr_mask_copy(params, &idx,
1117 				       &vtcf_mask,
1118 				       size);
1119 		/*
1120 		 * The TC and flow label field are ignored since OVS is
1121 		 * setting it for match and it is not supported.
1122 		 * This is a work around and
1123 		 * shall be addressed in the future.
1124 		 */
1125 		vtcf_mask = BNXT_ULP_GET_IPV6_TC(ipv6_mask->hdr.vtc_flow);
1126 		ulp_rte_prsr_mask_ignore(params, &idx, &vtcf_mask, size);
1127 		vtcf_mask =
1128 			BNXT_ULP_GET_IPV6_FLOWLABEL(ipv6_mask->hdr.vtc_flow);
1129 		ulp_rte_prsr_mask_ignore(params, &idx, &vtcf_mask, size);
1130 
1131 		ulp_rte_prsr_mask_copy(params, &idx,
1132 				       &ipv6_mask->hdr.payload_len,
1133 				       sizeof(ipv6_mask->hdr.payload_len));
1134 		ulp_rte_prsr_mask_copy(params, &idx,
1135 				       &ipv6_mask->hdr.proto,
1136 				       sizeof(ipv6_mask->hdr.proto));
1137 		ulp_rte_prsr_mask_copy(params, &idx,
1138 				       &ipv6_mask->hdr.hop_limits,
1139 				       sizeof(ipv6_mask->hdr.hop_limits));
1140 		ulp_rte_prsr_mask_copy(params, &idx,
1141 				       &ipv6_mask->hdr.src_addr,
1142 				       sizeof(ipv6_mask->hdr.src_addr));
1143 		ulp_rte_prsr_mask_copy(params, &idx,
1144 				       &ipv6_mask->hdr.dst_addr,
1145 				       sizeof(ipv6_mask->hdr.dst_addr));
1146 	}
1147 	/* add number of ipv6 header elements */
1148 	params->field_idx += BNXT_ULP_PROTO_HDR_IPV6_NUM;
1149 
1150 	/* Set the ipv6 header bitmap and computed l3 header bitmaps */
1151 	if (ULP_BITMAP_ISSET(hdr_bitmap->bits, BNXT_ULP_HDR_BIT_O_IPV4) ||
1152 	    ULP_BITMAP_ISSET(hdr_bitmap->bits, BNXT_ULP_HDR_BIT_O_IPV6)) {
1153 		ULP_BITMAP_SET(hdr_bitmap->bits, BNXT_ULP_HDR_BIT_I_IPV6);
1154 		ULP_COMP_FLD_IDX_WR(params, BNXT_ULP_CF_IDX_I_L3, 1);
1155 		inner_flag = 1;
1156 	} else {
1157 		ULP_BITMAP_SET(hdr_bitmap->bits, BNXT_ULP_HDR_BIT_O_IPV6);
1158 		ULP_COMP_FLD_IDX_WR(params, BNXT_ULP_CF_IDX_O_L3, 1);
1159 	}
1160 
1161 	/* Some of the PMD applications may set the protocol field
1162 	 * in the IPv6 spec but don't set the mask. So, consider
1163 	 * the mask in proto value calculation.
1164 	 */
1165 	if (ipv6_mask)
1166 		proto &= ipv6_mask->hdr.proto;
1167 
1168 	/* Update the field protocol hdr bitmap */
1169 	ulp_rte_l3_proto_type_update(params, proto, inner_flag);
1170 	ULP_COMP_FLD_IDX_WR(params, BNXT_ULP_CF_IDX_L3_HDR_CNT, ++cnt);
1171 
1172 	return BNXT_TF_RC_SUCCESS;
1173 }
1174 
1175 /* Function to handle the update of proto header based on field values */
1176 static void
1177 ulp_rte_l4_proto_type_update(struct ulp_rte_parser_params *param,
1178 			     uint16_t dst_port)
1179 {
1180 	if (dst_port == tfp_cpu_to_be_16(ULP_UDP_PORT_VXLAN)) {
1181 		ULP_BITMAP_SET(param->hdr_fp_bit.bits,
1182 			       BNXT_ULP_HDR_BIT_T_VXLAN);
1183 		ULP_COMP_FLD_IDX_WR(param, BNXT_ULP_CF_IDX_L3_TUN, 1);
1184 	}
1185 }
1186 
1187 /* Function to handle the parsing of RTE Flow item UDP Header. */
1188 int32_t
1189 ulp_rte_udp_hdr_handler(const struct rte_flow_item *item,
1190 			struct ulp_rte_parser_params *params)
1191 {
1192 	const struct rte_flow_item_udp *udp_spec = item->spec;
1193 	const struct rte_flow_item_udp *udp_mask = item->mask;
1194 	struct ulp_rte_hdr_field *field;
1195 	struct ulp_rte_hdr_bitmap *hdr_bitmap = &params->hdr_bitmap;
1196 	uint32_t idx = params->field_idx;
1197 	uint32_t size;
1198 	uint16_t dst_port = 0;
1199 	uint32_t cnt;
1200 
1201 	cnt = ULP_COMP_FLD_IDX_RD(params, BNXT_ULP_CF_IDX_L4_HDR_CNT);
1202 	if (cnt == 2) {
1203 		BNXT_TF_DBG(ERR, "Parse Err:Third L4 header not supported\n");
1204 		return BNXT_TF_RC_ERROR;
1205 	}
1206 
1207 	/*
1208 	 * Copy the rte_flow_item for ipv4 into hdr_field using ipv4
1209 	 * header fields
1210 	 */
1211 	if (udp_spec) {
1212 		size = sizeof(udp_spec->hdr.src_port);
1213 		field = ulp_rte_parser_fld_copy(&params->hdr_field[idx],
1214 						&udp_spec->hdr.src_port,
1215 						size);
1216 
1217 		size = sizeof(udp_spec->hdr.dst_port);
1218 		field = ulp_rte_parser_fld_copy(field,
1219 						&udp_spec->hdr.dst_port,
1220 						size);
1221 		dst_port = udp_spec->hdr.dst_port;
1222 		size = sizeof(udp_spec->hdr.dgram_len);
1223 		field = ulp_rte_parser_fld_copy(field,
1224 						&udp_spec->hdr.dgram_len,
1225 						size);
1226 		size = sizeof(udp_spec->hdr.dgram_cksum);
1227 		field = ulp_rte_parser_fld_copy(field,
1228 						&udp_spec->hdr.dgram_cksum,
1229 						size);
1230 	}
1231 	if (udp_mask) {
1232 		ulp_rte_prsr_mask_copy(params, &idx,
1233 				       &udp_mask->hdr.src_port,
1234 				       sizeof(udp_mask->hdr.src_port));
1235 		ulp_rte_prsr_mask_copy(params, &idx,
1236 				       &udp_mask->hdr.dst_port,
1237 				       sizeof(udp_mask->hdr.dst_port));
1238 		ulp_rte_prsr_mask_copy(params, &idx,
1239 				       &udp_mask->hdr.dgram_len,
1240 				       sizeof(udp_mask->hdr.dgram_len));
1241 		ulp_rte_prsr_mask_copy(params, &idx,
1242 				       &udp_mask->hdr.dgram_cksum,
1243 				       sizeof(udp_mask->hdr.dgram_cksum));
1244 	}
1245 
1246 	/* Add number of UDP header elements */
1247 	params->field_idx += BNXT_ULP_PROTO_HDR_UDP_NUM;
1248 
1249 	/* Set the udp header bitmap and computed l4 header bitmaps */
1250 	if (ULP_BITMAP_ISSET(hdr_bitmap->bits, BNXT_ULP_HDR_BIT_O_UDP) ||
1251 	    ULP_BITMAP_ISSET(hdr_bitmap->bits, BNXT_ULP_HDR_BIT_O_TCP)) {
1252 		ULP_BITMAP_SET(hdr_bitmap->bits, BNXT_ULP_HDR_BIT_I_UDP);
1253 		ULP_COMP_FLD_IDX_WR(params, BNXT_ULP_CF_IDX_I_L4, 1);
1254 	} else {
1255 		ULP_BITMAP_SET(hdr_bitmap->bits, BNXT_ULP_HDR_BIT_O_UDP);
1256 		ULP_COMP_FLD_IDX_WR(params, BNXT_ULP_CF_IDX_O_L4, 1);
1257 		/* Update the field protocol hdr bitmap */
1258 		ulp_rte_l4_proto_type_update(params, dst_port);
1259 	}
1260 	ULP_COMP_FLD_IDX_WR(params, BNXT_ULP_CF_IDX_L4_HDR_CNT, ++cnt);
1261 	return BNXT_TF_RC_SUCCESS;
1262 }
1263 
1264 /* Function to handle the parsing of RTE Flow item TCP Header. */
1265 int32_t
1266 ulp_rte_tcp_hdr_handler(const struct rte_flow_item *item,
1267 			struct ulp_rte_parser_params *params)
1268 {
1269 	const struct rte_flow_item_tcp *tcp_spec = item->spec;
1270 	const struct rte_flow_item_tcp *tcp_mask = item->mask;
1271 	struct ulp_rte_hdr_field *field;
1272 	struct ulp_rte_hdr_bitmap *hdr_bitmap = &params->hdr_bitmap;
1273 	uint32_t idx = params->field_idx;
1274 	uint32_t size;
1275 	uint32_t cnt;
1276 
1277 	cnt = ULP_COMP_FLD_IDX_RD(params, BNXT_ULP_CF_IDX_L4_HDR_CNT);
1278 	if (cnt == 2) {
1279 		BNXT_TF_DBG(ERR, "Parse Err:Third L4 header not supported\n");
1280 		return BNXT_TF_RC_ERROR;
1281 	}
1282 
1283 	/*
1284 	 * Copy the rte_flow_item for ipv4 into hdr_field using ipv4
1285 	 * header fields
1286 	 */
1287 	if (tcp_spec) {
1288 		size = sizeof(tcp_spec->hdr.src_port);
1289 		field = ulp_rte_parser_fld_copy(&params->hdr_field[idx],
1290 						&tcp_spec->hdr.src_port,
1291 						size);
1292 		size = sizeof(tcp_spec->hdr.dst_port);
1293 		field = ulp_rte_parser_fld_copy(field,
1294 						&tcp_spec->hdr.dst_port,
1295 						size);
1296 		size = sizeof(tcp_spec->hdr.sent_seq);
1297 		field = ulp_rte_parser_fld_copy(field,
1298 						&tcp_spec->hdr.sent_seq,
1299 						size);
1300 		size = sizeof(tcp_spec->hdr.recv_ack);
1301 		field = ulp_rte_parser_fld_copy(field,
1302 						&tcp_spec->hdr.recv_ack,
1303 						size);
1304 		size = sizeof(tcp_spec->hdr.data_off);
1305 		field = ulp_rte_parser_fld_copy(field,
1306 						&tcp_spec->hdr.data_off,
1307 						size);
1308 		size = sizeof(tcp_spec->hdr.tcp_flags);
1309 		field = ulp_rte_parser_fld_copy(field,
1310 						&tcp_spec->hdr.tcp_flags,
1311 						size);
1312 		size = sizeof(tcp_spec->hdr.rx_win);
1313 		field = ulp_rte_parser_fld_copy(field,
1314 						&tcp_spec->hdr.rx_win,
1315 						size);
1316 		size = sizeof(tcp_spec->hdr.cksum);
1317 		field = ulp_rte_parser_fld_copy(field,
1318 						&tcp_spec->hdr.cksum,
1319 						size);
1320 		size = sizeof(tcp_spec->hdr.tcp_urp);
1321 		field = ulp_rte_parser_fld_copy(field,
1322 						&tcp_spec->hdr.tcp_urp,
1323 						size);
1324 	} else {
1325 		idx += BNXT_ULP_PROTO_HDR_TCP_NUM;
1326 	}
1327 
1328 	if (tcp_mask) {
1329 		ulp_rte_prsr_mask_copy(params, &idx,
1330 				       &tcp_mask->hdr.src_port,
1331 				       sizeof(tcp_mask->hdr.src_port));
1332 		ulp_rte_prsr_mask_copy(params, &idx,
1333 				       &tcp_mask->hdr.dst_port,
1334 				       sizeof(tcp_mask->hdr.dst_port));
1335 		ulp_rte_prsr_mask_copy(params, &idx,
1336 				       &tcp_mask->hdr.sent_seq,
1337 				       sizeof(tcp_mask->hdr.sent_seq));
1338 		ulp_rte_prsr_mask_copy(params, &idx,
1339 				       &tcp_mask->hdr.recv_ack,
1340 				       sizeof(tcp_mask->hdr.recv_ack));
1341 		ulp_rte_prsr_mask_copy(params, &idx,
1342 				       &tcp_mask->hdr.data_off,
1343 				       sizeof(tcp_mask->hdr.data_off));
1344 		ulp_rte_prsr_mask_copy(params, &idx,
1345 				       &tcp_mask->hdr.tcp_flags,
1346 				       sizeof(tcp_mask->hdr.tcp_flags));
1347 		ulp_rte_prsr_mask_copy(params, &idx,
1348 				       &tcp_mask->hdr.rx_win,
1349 				       sizeof(tcp_mask->hdr.rx_win));
1350 		ulp_rte_prsr_mask_copy(params, &idx,
1351 				       &tcp_mask->hdr.cksum,
1352 				       sizeof(tcp_mask->hdr.cksum));
1353 		ulp_rte_prsr_mask_copy(params, &idx,
1354 				       &tcp_mask->hdr.tcp_urp,
1355 				       sizeof(tcp_mask->hdr.tcp_urp));
1356 	}
1357 	/* add number of TCP header elements */
1358 	params->field_idx += BNXT_ULP_PROTO_HDR_TCP_NUM;
1359 
1360 	/* Set the udp header bitmap and computed l4 header bitmaps */
1361 	if (ULP_BITMAP_ISSET(hdr_bitmap->bits, BNXT_ULP_HDR_BIT_O_UDP) ||
1362 	    ULP_BITMAP_ISSET(hdr_bitmap->bits, BNXT_ULP_HDR_BIT_O_TCP)) {
1363 		ULP_BITMAP_SET(hdr_bitmap->bits, BNXT_ULP_HDR_BIT_I_TCP);
1364 		ULP_COMP_FLD_IDX_WR(params, BNXT_ULP_CF_IDX_I_L4, 1);
1365 	} else {
1366 		ULP_BITMAP_SET(hdr_bitmap->bits, BNXT_ULP_HDR_BIT_O_TCP);
1367 		ULP_COMP_FLD_IDX_WR(params, BNXT_ULP_CF_IDX_O_L4, 1);
1368 	}
1369 	ULP_COMP_FLD_IDX_WR(params, BNXT_ULP_CF_IDX_L4_HDR_CNT, ++cnt);
1370 	return BNXT_TF_RC_SUCCESS;
1371 }
1372 
1373 /* Function to handle the parsing of RTE Flow item Vxlan Header. */
1374 int32_t
1375 ulp_rte_vxlan_hdr_handler(const struct rte_flow_item *item,
1376 			  struct ulp_rte_parser_params *params)
1377 {
1378 	const struct rte_flow_item_vxlan *vxlan_spec = item->spec;
1379 	const struct rte_flow_item_vxlan *vxlan_mask = item->mask;
1380 	struct ulp_rte_hdr_field *field;
1381 	struct ulp_rte_hdr_bitmap *hdr_bitmap = &params->hdr_bitmap;
1382 	uint32_t idx = params->field_idx;
1383 	uint32_t size;
1384 
1385 	/*
1386 	 * Copy the rte_flow_item for vxlan into hdr_field using vxlan
1387 	 * header fields
1388 	 */
1389 	if (vxlan_spec) {
1390 		size = sizeof(vxlan_spec->flags);
1391 		field = ulp_rte_parser_fld_copy(&params->hdr_field[idx],
1392 						&vxlan_spec->flags,
1393 						size);
1394 		size = sizeof(vxlan_spec->rsvd0);
1395 		field = ulp_rte_parser_fld_copy(field,
1396 						&vxlan_spec->rsvd0,
1397 						size);
1398 		size = sizeof(vxlan_spec->vni);
1399 		field = ulp_rte_parser_fld_copy(field,
1400 						&vxlan_spec->vni,
1401 						size);
1402 		size = sizeof(vxlan_spec->rsvd1);
1403 		field = ulp_rte_parser_fld_copy(field,
1404 						&vxlan_spec->rsvd1,
1405 						size);
1406 	}
1407 	if (vxlan_mask) {
1408 		ulp_rte_prsr_mask_copy(params, &idx,
1409 				       &vxlan_mask->flags,
1410 				       sizeof(vxlan_mask->flags));
1411 		ulp_rte_prsr_mask_copy(params, &idx,
1412 				       &vxlan_mask->rsvd0,
1413 				       sizeof(vxlan_mask->rsvd0));
1414 		ulp_rte_prsr_mask_copy(params, &idx,
1415 				       &vxlan_mask->vni,
1416 				       sizeof(vxlan_mask->vni));
1417 		ulp_rte_prsr_mask_copy(params, &idx,
1418 				       &vxlan_mask->rsvd1,
1419 				       sizeof(vxlan_mask->rsvd1));
1420 	}
1421 	/* Add number of vxlan header elements */
1422 	params->field_idx += BNXT_ULP_PROTO_HDR_VXLAN_NUM;
1423 
1424 	/* Update the hdr_bitmap with vxlan */
1425 	ULP_BITMAP_SET(hdr_bitmap->bits, BNXT_ULP_HDR_BIT_T_VXLAN);
1426 	return BNXT_TF_RC_SUCCESS;
1427 }
1428 
1429 /* Function to handle the parsing of RTE Flow item void Header */
1430 int32_t
1431 ulp_rte_void_hdr_handler(const struct rte_flow_item *item __rte_unused,
1432 			 struct ulp_rte_parser_params *params __rte_unused)
1433 {
1434 	return BNXT_TF_RC_SUCCESS;
1435 }
1436 
1437 /* Function to handle the parsing of RTE Flow action void Header. */
1438 int32_t
1439 ulp_rte_void_act_handler(const struct rte_flow_action *action_item __rte_unused,
1440 			 struct ulp_rte_parser_params *params __rte_unused)
1441 {
1442 	return BNXT_TF_RC_SUCCESS;
1443 }
1444 
1445 /* Function to handle the parsing of RTE Flow action Mark Header. */
1446 int32_t
1447 ulp_rte_mark_act_handler(const struct rte_flow_action *action_item,
1448 			 struct ulp_rte_parser_params *param)
1449 {
1450 	const struct rte_flow_action_mark *mark;
1451 	struct ulp_rte_act_bitmap *act = &param->act_bitmap;
1452 	uint32_t mark_id;
1453 
1454 	mark = action_item->conf;
1455 	if (mark) {
1456 		mark_id = tfp_cpu_to_be_32(mark->id);
1457 		memcpy(&param->act_prop.act_details[BNXT_ULP_ACT_PROP_IDX_MARK],
1458 		       &mark_id, BNXT_ULP_ACT_PROP_SZ_MARK);
1459 
1460 		/* Update the hdr_bitmap with vxlan */
1461 		ULP_BITMAP_SET(act->bits, BNXT_ULP_ACTION_BIT_MARK);
1462 		return BNXT_TF_RC_SUCCESS;
1463 	}
1464 	BNXT_TF_DBG(ERR, "Parse Error: Mark arg is invalid\n");
1465 	return BNXT_TF_RC_ERROR;
1466 }
1467 
1468 /* Function to handle the parsing of RTE Flow action RSS Header. */
1469 int32_t
1470 ulp_rte_rss_act_handler(const struct rte_flow_action *action_item,
1471 			struct ulp_rte_parser_params *param)
1472 {
1473 	const struct rte_flow_action_rss *rss = action_item->conf;
1474 
1475 	if (rss) {
1476 		/* Update the hdr_bitmap with vxlan */
1477 		ULP_BITMAP_SET(param->act_bitmap.bits, BNXT_ULP_ACTION_BIT_RSS);
1478 		return BNXT_TF_RC_SUCCESS;
1479 	}
1480 	BNXT_TF_DBG(ERR, "Parse Error: RSS arg is invalid\n");
1481 	return BNXT_TF_RC_ERROR;
1482 }
1483 
1484 /* Function to handle the parsing of RTE Flow action vxlan_encap Header. */
1485 int32_t
1486 ulp_rte_vxlan_encap_act_handler(const struct rte_flow_action *action_item,
1487 				struct ulp_rte_parser_params *params)
1488 {
1489 	const struct rte_flow_action_vxlan_encap *vxlan_encap;
1490 	const struct rte_flow_item *item;
1491 	const struct rte_flow_item_eth *eth_spec;
1492 	const struct rte_flow_item_ipv4 *ipv4_spec;
1493 	const struct rte_flow_item_ipv6 *ipv6_spec;
1494 	struct rte_flow_item_vxlan vxlan_spec;
1495 	uint32_t vlan_num = 0, vlan_size = 0;
1496 	uint32_t ip_size = 0, ip_type = 0;
1497 	uint32_t vxlan_size = 0;
1498 	uint8_t *buff;
1499 	/* IP header per byte - ver/hlen, TOS, ID, ID, FRAG, FRAG, TTL, PROTO */
1500 	const uint8_t def_ipv4_hdr[] = {0x45, 0x00, 0x00, 0x01, 0x00,
1501 				    0x00, 0x40, 0x11};
1502 	/* IPv6 header per byte - vtc-flow,flow,zero,nexthdr-ttl */
1503 	const uint8_t def_ipv6_hdr[] = {0x60, 0x00, 0x00, 0x01, 0x00,
1504 				0x00, 0x11, 0xf6};
1505 	struct ulp_rte_act_bitmap *act = &params->act_bitmap;
1506 	struct ulp_rte_act_prop *ap = &params->act_prop;
1507 	const uint8_t *tmp_buff;
1508 
1509 	vxlan_encap = action_item->conf;
1510 	if (!vxlan_encap) {
1511 		BNXT_TF_DBG(ERR, "Parse Error: Vxlan_encap arg is invalid\n");
1512 		return BNXT_TF_RC_ERROR;
1513 	}
1514 
1515 	item = vxlan_encap->definition;
1516 	if (!item) {
1517 		BNXT_TF_DBG(ERR, "Parse Error: definition arg is invalid\n");
1518 		return BNXT_TF_RC_ERROR;
1519 	}
1520 
1521 	if (!ulp_rte_item_skip_void(&item, 0))
1522 		return BNXT_TF_RC_ERROR;
1523 
1524 	/* must have ethernet header */
1525 	if (item->type != RTE_FLOW_ITEM_TYPE_ETH) {
1526 		BNXT_TF_DBG(ERR, "Parse Error:vxlan encap does not have eth\n");
1527 		return BNXT_TF_RC_ERROR;
1528 	}
1529 	eth_spec = item->spec;
1530 	buff = &ap->act_details[BNXT_ULP_ACT_PROP_IDX_ENCAP_L2_DMAC];
1531 	ulp_encap_buffer_copy(buff,
1532 			      eth_spec->dst.addr_bytes,
1533 			      BNXT_ULP_ACT_PROP_SZ_ENCAP_L2_DMAC,
1534 			      ULP_BUFFER_ALIGN_8_BYTE);
1535 
1536 	buff = &ap->act_details[BNXT_ULP_ACT_PROP_IDX_ENCAP_L2_SMAC];
1537 	ulp_encap_buffer_copy(buff,
1538 			      eth_spec->src.addr_bytes,
1539 			      BNXT_ULP_ACT_PROP_SZ_ENCAP_L2_SMAC,
1540 			      ULP_BUFFER_ALIGN_8_BYTE);
1541 
1542 	/* Goto the next item */
1543 	if (!ulp_rte_item_skip_void(&item, 1))
1544 		return BNXT_TF_RC_ERROR;
1545 
1546 	/* May have vlan header */
1547 	if (item->type == RTE_FLOW_ITEM_TYPE_VLAN) {
1548 		vlan_num++;
1549 		buff = &ap->act_details[BNXT_ULP_ACT_PROP_IDX_ENCAP_VTAG];
1550 		ulp_encap_buffer_copy(buff,
1551 				      item->spec,
1552 				      sizeof(struct rte_vlan_hdr),
1553 				      ULP_BUFFER_ALIGN_8_BYTE);
1554 
1555 		if (!ulp_rte_item_skip_void(&item, 1))
1556 			return BNXT_TF_RC_ERROR;
1557 	}
1558 
1559 	/* may have two vlan headers */
1560 	if (item->type == RTE_FLOW_ITEM_TYPE_VLAN) {
1561 		vlan_num++;
1562 		memcpy(&ap->act_details[BNXT_ULP_ACT_PROP_IDX_ENCAP_VTAG +
1563 		       sizeof(struct rte_vlan_hdr)],
1564 		       item->spec,
1565 		       sizeof(struct rte_vlan_hdr));
1566 		if (!ulp_rte_item_skip_void(&item, 1))
1567 			return BNXT_TF_RC_ERROR;
1568 	}
1569 	/* Update the vlan count and size of more than one */
1570 	if (vlan_num) {
1571 		vlan_size = vlan_num * sizeof(struct rte_vlan_hdr);
1572 		vlan_num = tfp_cpu_to_be_32(vlan_num);
1573 		memcpy(&ap->act_details[BNXT_ULP_ACT_PROP_IDX_ENCAP_VTAG_NUM],
1574 		       &vlan_num,
1575 		       sizeof(uint32_t));
1576 		vlan_size = tfp_cpu_to_be_32(vlan_size);
1577 		memcpy(&ap->act_details[BNXT_ULP_ACT_PROP_IDX_ENCAP_VTAG_SZ],
1578 		       &vlan_size,
1579 		       sizeof(uint32_t));
1580 	}
1581 
1582 	/* L3 must be IPv4, IPv6 */
1583 	if (item->type == RTE_FLOW_ITEM_TYPE_IPV4) {
1584 		ipv4_spec = item->spec;
1585 		ip_size = BNXT_ULP_ENCAP_IPV4_SIZE;
1586 
1587 		/* copy the ipv4 details */
1588 		if (ulp_buffer_is_empty(&ipv4_spec->hdr.version_ihl,
1589 					BNXT_ULP_ENCAP_IPV4_VER_HLEN_TOS)) {
1590 			buff = &ap->act_details[BNXT_ULP_ACT_PROP_IDX_ENCAP_IP];
1591 			ulp_encap_buffer_copy(buff,
1592 					      def_ipv4_hdr,
1593 					      BNXT_ULP_ENCAP_IPV4_VER_HLEN_TOS +
1594 					      BNXT_ULP_ENCAP_IPV4_ID_PROTO,
1595 					      ULP_BUFFER_ALIGN_8_BYTE);
1596 		} else {
1597 			/* Total length being ignored in the ip hdr. */
1598 			buff = &ap->act_details[BNXT_ULP_ACT_PROP_IDX_ENCAP_IP];
1599 			tmp_buff = (const uint8_t *)&ipv4_spec->hdr.packet_id;
1600 			ulp_encap_buffer_copy(buff,
1601 					      tmp_buff,
1602 					      BNXT_ULP_ENCAP_IPV4_ID_PROTO,
1603 					      ULP_BUFFER_ALIGN_8_BYTE);
1604 			buff = &ap->act_details[BNXT_ULP_ACT_PROP_IDX_ENCAP_IP +
1605 			     BNXT_ULP_ENCAP_IPV4_ID_PROTO];
1606 			ulp_encap_buffer_copy(buff,
1607 					      &ipv4_spec->hdr.version_ihl,
1608 					      BNXT_ULP_ENCAP_IPV4_VER_HLEN_TOS,
1609 					      ULP_BUFFER_ALIGN_8_BYTE);
1610 		}
1611 
1612 		/* Update the dst ip address in ip encap buffer */
1613 		buff = &ap->act_details[BNXT_ULP_ACT_PROP_IDX_ENCAP_IP +
1614 		    BNXT_ULP_ENCAP_IPV4_VER_HLEN_TOS +
1615 		    BNXT_ULP_ENCAP_IPV4_ID_PROTO];
1616 		ulp_encap_buffer_copy(buff,
1617 				      (const uint8_t *)&ipv4_spec->hdr.dst_addr,
1618 				      sizeof(ipv4_spec->hdr.dst_addr),
1619 				      ULP_BUFFER_ALIGN_8_BYTE);
1620 
1621 		/* Update the src ip address */
1622 		buff = &ap->act_details[BNXT_ULP_ACT_PROP_IDX_ENCAP_IP_SRC +
1623 			BNXT_ULP_ACT_PROP_SZ_ENCAP_IP_SRC -
1624 			sizeof(ipv4_spec->hdr.src_addr)];
1625 		ulp_encap_buffer_copy(buff,
1626 				      (const uint8_t *)&ipv4_spec->hdr.src_addr,
1627 				      sizeof(ipv4_spec->hdr.src_addr),
1628 				      ULP_BUFFER_ALIGN_8_BYTE);
1629 
1630 		/* Update the ip size details */
1631 		ip_size = tfp_cpu_to_be_32(ip_size);
1632 		memcpy(&ap->act_details[BNXT_ULP_ACT_PROP_IDX_ENCAP_IP_SZ],
1633 		       &ip_size, sizeof(uint32_t));
1634 
1635 		/* update the ip type */
1636 		ip_type = rte_cpu_to_be_32(BNXT_ULP_ETH_IPV4);
1637 		memcpy(&ap->act_details[BNXT_ULP_ACT_PROP_IDX_ENCAP_L3_TYPE],
1638 		       &ip_type, sizeof(uint32_t));
1639 
1640 		/* update the computed field to notify it is ipv4 header */
1641 		ULP_COMP_FLD_IDX_WR(params, BNXT_ULP_CF_IDX_ACT_ENCAP_IPV4_FLAG,
1642 				    1);
1643 
1644 		if (!ulp_rte_item_skip_void(&item, 1))
1645 			return BNXT_TF_RC_ERROR;
1646 	} else if (item->type == RTE_FLOW_ITEM_TYPE_IPV6) {
1647 		ipv6_spec = item->spec;
1648 		ip_size = BNXT_ULP_ENCAP_IPV6_SIZE;
1649 
1650 		/* copy the ipv6 details */
1651 		tmp_buff = (const uint8_t *)&ipv6_spec->hdr.vtc_flow;
1652 		if (ulp_buffer_is_empty(tmp_buff,
1653 					BNXT_ULP_ENCAP_IPV6_VTC_FLOW)) {
1654 			buff = &ap->act_details[BNXT_ULP_ACT_PROP_IDX_ENCAP_IP];
1655 			ulp_encap_buffer_copy(buff,
1656 					      def_ipv6_hdr,
1657 					      sizeof(def_ipv6_hdr),
1658 					      ULP_BUFFER_ALIGN_8_BYTE);
1659 		} else {
1660 			/* The payload length being ignored in the ip hdr. */
1661 			buff = &ap->act_details[BNXT_ULP_ACT_PROP_IDX_ENCAP_IP];
1662 			tmp_buff = (const uint8_t *)&ipv6_spec->hdr.proto;
1663 			ulp_encap_buffer_copy(buff,
1664 					      tmp_buff,
1665 					      BNXT_ULP_ENCAP_IPV6_PROTO_TTL,
1666 					      ULP_BUFFER_ALIGN_8_BYTE);
1667 			buff = &ap->act_details[BNXT_ULP_ACT_PROP_IDX_ENCAP_IP +
1668 				BNXT_ULP_ENCAP_IPV6_PROTO_TTL +
1669 				BNXT_ULP_ENCAP_IPV6_DO];
1670 			tmp_buff = (const uint8_t *)&ipv6_spec->hdr.vtc_flow;
1671 			ulp_encap_buffer_copy(buff,
1672 					      tmp_buff,
1673 					      BNXT_ULP_ENCAP_IPV6_VTC_FLOW,
1674 					      ULP_BUFFER_ALIGN_8_BYTE);
1675 		}
1676 		/* Update the dst ip address in ip encap buffer */
1677 		buff = &ap->act_details[BNXT_ULP_ACT_PROP_IDX_ENCAP_IP +
1678 			sizeof(def_ipv6_hdr)];
1679 		ulp_encap_buffer_copy(buff,
1680 				      (const uint8_t *)ipv6_spec->hdr.dst_addr,
1681 				      sizeof(ipv6_spec->hdr.dst_addr),
1682 				      ULP_BUFFER_ALIGN_8_BYTE);
1683 
1684 		/* Update the src ip address */
1685 		buff = &ap->act_details[BNXT_ULP_ACT_PROP_IDX_ENCAP_IP_SRC];
1686 		ulp_encap_buffer_copy(buff,
1687 				      (const uint8_t *)ipv6_spec->hdr.src_addr,
1688 				      sizeof(ipv6_spec->hdr.src_addr),
1689 				      ULP_BUFFER_ALIGN_16_BYTE);
1690 
1691 		/* Update the ip size details */
1692 		ip_size = tfp_cpu_to_be_32(ip_size);
1693 		memcpy(&ap->act_details[BNXT_ULP_ACT_PROP_IDX_ENCAP_IP_SZ],
1694 		       &ip_size, sizeof(uint32_t));
1695 
1696 		 /* update the ip type */
1697 		ip_type = rte_cpu_to_be_32(BNXT_ULP_ETH_IPV6);
1698 		memcpy(&ap->act_details[BNXT_ULP_ACT_PROP_IDX_ENCAP_L3_TYPE],
1699 		       &ip_type, sizeof(uint32_t));
1700 
1701 		/* update the computed field to notify it is ipv6 header */
1702 		ULP_COMP_FLD_IDX_WR(params, BNXT_ULP_CF_IDX_ACT_ENCAP_IPV6_FLAG,
1703 				    1);
1704 
1705 		if (!ulp_rte_item_skip_void(&item, 1))
1706 			return BNXT_TF_RC_ERROR;
1707 	} else {
1708 		BNXT_TF_DBG(ERR, "Parse Error: Vxlan Encap expects L3 hdr\n");
1709 		return BNXT_TF_RC_ERROR;
1710 	}
1711 
1712 	/* L4 is UDP */
1713 	if (item->type != RTE_FLOW_ITEM_TYPE_UDP) {
1714 		BNXT_TF_DBG(ERR, "vxlan encap does not have udp\n");
1715 		return BNXT_TF_RC_ERROR;
1716 	}
1717 	/* copy the udp details */
1718 	ulp_encap_buffer_copy(&ap->act_details[BNXT_ULP_ACT_PROP_IDX_ENCAP_UDP],
1719 			      item->spec, BNXT_ULP_ENCAP_UDP_SIZE,
1720 			      ULP_BUFFER_ALIGN_8_BYTE);
1721 
1722 	if (!ulp_rte_item_skip_void(&item, 1))
1723 		return BNXT_TF_RC_ERROR;
1724 
1725 	/* Finally VXLAN */
1726 	if (item->type != RTE_FLOW_ITEM_TYPE_VXLAN) {
1727 		BNXT_TF_DBG(ERR, "vxlan encap does not have vni\n");
1728 		return BNXT_TF_RC_ERROR;
1729 	}
1730 	vxlan_size = sizeof(struct rte_vxlan_hdr);
1731 	/* copy the vxlan details */
1732 	memcpy(&vxlan_spec, item->spec, vxlan_size);
1733 	vxlan_spec.flags = 0x08;
1734 	buff = &ap->act_details[BNXT_ULP_ACT_PROP_IDX_ENCAP_TUN];
1735 	if (ip_type == rte_cpu_to_be_32(BNXT_ULP_ETH_IPV4)) {
1736 		ulp_encap_buffer_copy(buff, (const uint8_t *)&vxlan_spec,
1737 				      vxlan_size, ULP_BUFFER_ALIGN_8_BYTE);
1738 	} else {
1739 		ulp_encap_buffer_copy(buff, (const uint8_t *)&vxlan_spec,
1740 				      vxlan_size / 2, ULP_BUFFER_ALIGN_8_BYTE);
1741 		ulp_encap_buffer_copy(buff + (vxlan_size / 2),
1742 				      (const uint8_t *)&vxlan_spec.vni,
1743 				      vxlan_size / 2, ULP_BUFFER_ALIGN_8_BYTE);
1744 	}
1745 	vxlan_size = tfp_cpu_to_be_32(vxlan_size);
1746 	memcpy(&ap->act_details[BNXT_ULP_ACT_PROP_IDX_ENCAP_TUN_SZ],
1747 	       &vxlan_size, sizeof(uint32_t));
1748 
1749 	/* update the hdr_bitmap with vxlan */
1750 	ULP_BITMAP_SET(act->bits, BNXT_ULP_ACTION_BIT_VXLAN_ENCAP);
1751 	return BNXT_TF_RC_SUCCESS;
1752 }
1753 
1754 /* Function to handle the parsing of RTE Flow action vxlan_encap Header */
1755 int32_t
1756 ulp_rte_vxlan_decap_act_handler(const struct rte_flow_action *action_item
1757 				__rte_unused,
1758 				struct ulp_rte_parser_params *params)
1759 {
1760 	/* update the hdr_bitmap with vxlan */
1761 	ULP_BITMAP_SET(params->act_bitmap.bits,
1762 		       BNXT_ULP_ACTION_BIT_VXLAN_DECAP);
1763 	/* Update computational field with tunnel decap info */
1764 	ULP_COMP_FLD_IDX_WR(params, BNXT_ULP_CF_IDX_L3_TUN_DECAP, 1);
1765 	ULP_COMP_FLD_IDX_WR(params, BNXT_ULP_CF_IDX_L3_TUN, 1);
1766 	return BNXT_TF_RC_SUCCESS;
1767 }
1768 
1769 /* Function to handle the parsing of RTE Flow action drop Header. */
1770 int32_t
1771 ulp_rte_drop_act_handler(const struct rte_flow_action *action_item __rte_unused,
1772 			 struct ulp_rte_parser_params *params)
1773 {
1774 	/* Update the hdr_bitmap with drop */
1775 	ULP_BITMAP_SET(params->act_bitmap.bits, BNXT_ULP_ACTION_BIT_DROP);
1776 	return BNXT_TF_RC_SUCCESS;
1777 }
1778 
1779 /* Function to handle the parsing of RTE Flow action count. */
1780 int32_t
1781 ulp_rte_count_act_handler(const struct rte_flow_action *action_item,
1782 			  struct ulp_rte_parser_params *params)
1783 
1784 {
1785 	const struct rte_flow_action_count *act_count;
1786 	struct ulp_rte_act_prop *act_prop = &params->act_prop;
1787 
1788 	act_count = action_item->conf;
1789 	if (act_count) {
1790 		if (act_count->shared) {
1791 			BNXT_TF_DBG(ERR,
1792 				    "Parse Error:Shared count not supported\n");
1793 			return BNXT_TF_RC_PARSE_ERR;
1794 		}
1795 		memcpy(&act_prop->act_details[BNXT_ULP_ACT_PROP_IDX_COUNT],
1796 		       &act_count->id,
1797 		       BNXT_ULP_ACT_PROP_SZ_COUNT);
1798 	}
1799 
1800 	/* Update the hdr_bitmap with count */
1801 	ULP_BITMAP_SET(params->act_bitmap.bits, BNXT_ULP_ACTION_BIT_COUNT);
1802 	return BNXT_TF_RC_SUCCESS;
1803 }
1804 
1805 /* Function to handle the parsing of action ports. */
1806 static int32_t
1807 ulp_rte_parser_act_port_set(struct ulp_rte_parser_params *param,
1808 			    uint32_t ifindex)
1809 {
1810 	enum bnxt_ulp_direction_type dir;
1811 	uint16_t pid_s;
1812 	uint32_t pid;
1813 	struct ulp_rte_act_prop *act = &param->act_prop;
1814 	enum bnxt_ulp_intf_type port_type;
1815 	uint32_t vnic_type;
1816 
1817 	/* Get the direction */
1818 	dir = ULP_COMP_FLD_IDX_RD(param, BNXT_ULP_CF_IDX_DIRECTION);
1819 	if (dir == BNXT_ULP_DIR_EGRESS) {
1820 		/* For egress direction, fill vport */
1821 		if (ulp_port_db_vport_get(param->ulp_ctx, ifindex, &pid_s))
1822 			return BNXT_TF_RC_ERROR;
1823 
1824 		pid = pid_s;
1825 		pid = rte_cpu_to_be_32(pid);
1826 		memcpy(&act->act_details[BNXT_ULP_ACT_PROP_IDX_VPORT],
1827 		       &pid, BNXT_ULP_ACT_PROP_SZ_VPORT);
1828 	} else {
1829 		/* For ingress direction, fill vnic */
1830 		port_type = ULP_COMP_FLD_IDX_RD(param,
1831 						BNXT_ULP_CF_IDX_ACT_PORT_TYPE);
1832 		if (port_type == BNXT_ULP_INTF_TYPE_VF_REP)
1833 			vnic_type = BNXT_ULP_VF_FUNC_VNIC;
1834 		else
1835 			vnic_type = BNXT_ULP_DRV_FUNC_VNIC;
1836 
1837 		if (ulp_port_db_default_vnic_get(param->ulp_ctx, ifindex,
1838 						 vnic_type, &pid_s))
1839 			return BNXT_TF_RC_ERROR;
1840 
1841 		pid = pid_s;
1842 		pid = rte_cpu_to_be_32(pid);
1843 		memcpy(&act->act_details[BNXT_ULP_ACT_PROP_IDX_VNIC],
1844 		       &pid, BNXT_ULP_ACT_PROP_SZ_VNIC);
1845 	}
1846 
1847 	/* Update the action port set bit */
1848 	ULP_COMP_FLD_IDX_WR(param, BNXT_ULP_CF_IDX_ACT_PORT_IS_SET, 1);
1849 	return BNXT_TF_RC_SUCCESS;
1850 }
1851 
1852 /* Function to handle the parsing of RTE Flow action PF. */
1853 int32_t
1854 ulp_rte_pf_act_handler(const struct rte_flow_action *action_item __rte_unused,
1855 		       struct ulp_rte_parser_params *params)
1856 {
1857 	uint32_t port_id;
1858 	uint32_t ifindex;
1859 	enum bnxt_ulp_intf_type intf_type;
1860 
1861 	/* Get the port id of the current device */
1862 	port_id = ULP_COMP_FLD_IDX_RD(params, BNXT_ULP_CF_IDX_INCOMING_IF);
1863 
1864 	/* Get the port db ifindex */
1865 	if (ulp_port_db_dev_port_to_ulp_index(params->ulp_ctx, port_id,
1866 					      &ifindex)) {
1867 		BNXT_TF_DBG(ERR, "Invalid port id\n");
1868 		return BNXT_TF_RC_ERROR;
1869 	}
1870 
1871 	/* Check the port is PF port */
1872 	intf_type = ulp_port_db_port_type_get(params->ulp_ctx, ifindex);
1873 	if (intf_type != BNXT_ULP_INTF_TYPE_PF) {
1874 		BNXT_TF_DBG(ERR, "Port is not a PF port\n");
1875 		return BNXT_TF_RC_ERROR;
1876 	}
1877 	/* Update the action properties */
1878 	ULP_COMP_FLD_IDX_WR(params, BNXT_ULP_CF_IDX_ACT_PORT_TYPE, intf_type);
1879 	return ulp_rte_parser_act_port_set(params, ifindex);
1880 }
1881 
1882 /* Function to handle the parsing of RTE Flow action VF. */
1883 int32_t
1884 ulp_rte_vf_act_handler(const struct rte_flow_action *action_item,
1885 		       struct ulp_rte_parser_params *params)
1886 {
1887 	const struct rte_flow_action_vf *vf_action;
1888 	uint32_t ifindex;
1889 	enum bnxt_ulp_intf_type intf_type;
1890 
1891 	vf_action = action_item->conf;
1892 	if (!vf_action) {
1893 		BNXT_TF_DBG(ERR, "ParseErr: Invalid Argument\n");
1894 		return BNXT_TF_RC_PARSE_ERR;
1895 	}
1896 
1897 	if (vf_action->original) {
1898 		BNXT_TF_DBG(ERR, "ParseErr:VF Original not supported\n");
1899 		return BNXT_TF_RC_PARSE_ERR;
1900 	}
1901 
1902 	/* Check the port is VF port */
1903 	if (ulp_port_db_dev_func_id_to_ulp_index(params->ulp_ctx, vf_action->id,
1904 						 &ifindex)) {
1905 		BNXT_TF_DBG(ERR, "VF is not valid interface\n");
1906 		return BNXT_TF_RC_ERROR;
1907 	}
1908 	intf_type = ulp_port_db_port_type_get(params->ulp_ctx, ifindex);
1909 	if (intf_type != BNXT_ULP_INTF_TYPE_VF &&
1910 	    intf_type != BNXT_ULP_INTF_TYPE_TRUSTED_VF) {
1911 		BNXT_TF_DBG(ERR, "Port is not a VF port\n");
1912 		return BNXT_TF_RC_ERROR;
1913 	}
1914 
1915 	/* Update the action properties */
1916 	ULP_COMP_FLD_IDX_WR(params, BNXT_ULP_CF_IDX_ACT_PORT_TYPE, intf_type);
1917 	return ulp_rte_parser_act_port_set(params, ifindex);
1918 }
1919 
1920 /* Function to handle the parsing of RTE Flow action port_id. */
1921 int32_t
1922 ulp_rte_port_id_act_handler(const struct rte_flow_action *act_item,
1923 			    struct ulp_rte_parser_params *param)
1924 {
1925 	const struct rte_flow_action_port_id *port_id = act_item->conf;
1926 	uint32_t ifindex;
1927 	enum bnxt_ulp_intf_type intf_type;
1928 
1929 	if (!port_id) {
1930 		BNXT_TF_DBG(ERR,
1931 			    "ParseErr: Invalid Argument\n");
1932 		return BNXT_TF_RC_PARSE_ERR;
1933 	}
1934 	if (port_id->original) {
1935 		BNXT_TF_DBG(ERR,
1936 			    "ParseErr:Portid Original not supported\n");
1937 		return BNXT_TF_RC_PARSE_ERR;
1938 	}
1939 
1940 	/* Get the port db ifindex */
1941 	if (ulp_port_db_dev_port_to_ulp_index(param->ulp_ctx, port_id->id,
1942 					      &ifindex)) {
1943 		BNXT_TF_DBG(ERR, "Invalid port id\n");
1944 		return BNXT_TF_RC_ERROR;
1945 	}
1946 
1947 	/* Get the intf type */
1948 	intf_type = ulp_port_db_port_type_get(param->ulp_ctx, ifindex);
1949 	if (!intf_type) {
1950 		BNXT_TF_DBG(ERR, "Invalid port type\n");
1951 		return BNXT_TF_RC_ERROR;
1952 	}
1953 
1954 	/* Set the action port */
1955 	ULP_COMP_FLD_IDX_WR(param, BNXT_ULP_CF_IDX_ACT_PORT_TYPE, intf_type);
1956 	return ulp_rte_parser_act_port_set(param, ifindex);
1957 }
1958 
1959 /* Function to handle the parsing of RTE Flow action phy_port. */
1960 int32_t
1961 ulp_rte_phy_port_act_handler(const struct rte_flow_action *action_item,
1962 			     struct ulp_rte_parser_params *prm)
1963 {
1964 	const struct rte_flow_action_phy_port *phy_port;
1965 	uint32_t pid;
1966 	int32_t rc;
1967 	uint16_t pid_s;
1968 	enum bnxt_ulp_direction_type dir;
1969 
1970 	phy_port = action_item->conf;
1971 	if (!phy_port) {
1972 		BNXT_TF_DBG(ERR,
1973 			    "ParseErr: Invalid Argument\n");
1974 		return BNXT_TF_RC_PARSE_ERR;
1975 	}
1976 
1977 	if (phy_port->original) {
1978 		BNXT_TF_DBG(ERR,
1979 			    "Parse Err:Port Original not supported\n");
1980 		return BNXT_TF_RC_PARSE_ERR;
1981 	}
1982 	dir = ULP_COMP_FLD_IDX_RD(prm, BNXT_ULP_CF_IDX_DIRECTION);
1983 	if (dir != BNXT_ULP_DIR_EGRESS) {
1984 		BNXT_TF_DBG(ERR,
1985 			    "Parse Err:Phy ports are valid only for egress\n");
1986 		return BNXT_TF_RC_PARSE_ERR;
1987 	}
1988 	/* Get the physical port details from port db */
1989 	rc = ulp_port_db_phy_port_vport_get(prm->ulp_ctx, phy_port->index,
1990 					    &pid_s);
1991 	if (rc) {
1992 		BNXT_TF_DBG(ERR, "Failed to get port details\n");
1993 		return -EINVAL;
1994 	}
1995 
1996 	pid = pid_s;
1997 	pid = rte_cpu_to_be_32(pid);
1998 	memcpy(&prm->act_prop.act_details[BNXT_ULP_ACT_PROP_IDX_VPORT],
1999 	       &pid, BNXT_ULP_ACT_PROP_SZ_VPORT);
2000 
2001 	/* Update the action port set bit */
2002 	ULP_COMP_FLD_IDX_WR(prm, BNXT_ULP_CF_IDX_ACT_PORT_IS_SET, 1);
2003 	ULP_COMP_FLD_IDX_WR(prm, BNXT_ULP_CF_IDX_ACT_PORT_TYPE,
2004 			    BNXT_ULP_INTF_TYPE_PHY_PORT);
2005 	return BNXT_TF_RC_SUCCESS;
2006 }
2007 
2008 /* Function to handle the parsing of RTE Flow action pop vlan. */
2009 int32_t
2010 ulp_rte_of_pop_vlan_act_handler(const struct rte_flow_action *a __rte_unused,
2011 				struct ulp_rte_parser_params *params)
2012 {
2013 	/* Update the act_bitmap with pop */
2014 	ULP_BITMAP_SET(params->act_bitmap.bits, BNXT_ULP_ACTION_BIT_POP_VLAN);
2015 	return BNXT_TF_RC_SUCCESS;
2016 }
2017 
2018 /* Function to handle the parsing of RTE Flow action push vlan. */
2019 int32_t
2020 ulp_rte_of_push_vlan_act_handler(const struct rte_flow_action *action_item,
2021 				 struct ulp_rte_parser_params *params)
2022 {
2023 	const struct rte_flow_action_of_push_vlan *push_vlan;
2024 	uint16_t ethertype;
2025 	struct ulp_rte_act_prop *act = &params->act_prop;
2026 
2027 	push_vlan = action_item->conf;
2028 	if (push_vlan) {
2029 		ethertype = push_vlan->ethertype;
2030 		if (tfp_cpu_to_be_16(ethertype) != RTE_ETHER_TYPE_VLAN) {
2031 			BNXT_TF_DBG(ERR,
2032 				    "Parse Err: Ethertype not supported\n");
2033 			return BNXT_TF_RC_PARSE_ERR;
2034 		}
2035 		memcpy(&act->act_details[BNXT_ULP_ACT_PROP_IDX_PUSH_VLAN],
2036 		       &ethertype, BNXT_ULP_ACT_PROP_SZ_PUSH_VLAN);
2037 		/* Update the hdr_bitmap with push vlan */
2038 		ULP_BITMAP_SET(params->act_bitmap.bits,
2039 			       BNXT_ULP_ACTION_BIT_PUSH_VLAN);
2040 		return BNXT_TF_RC_SUCCESS;
2041 	}
2042 	BNXT_TF_DBG(ERR, "Parse Error: Push vlan arg is invalid\n");
2043 	return BNXT_TF_RC_ERROR;
2044 }
2045 
2046 /* Function to handle the parsing of RTE Flow action set vlan id. */
2047 int32_t
2048 ulp_rte_of_set_vlan_vid_act_handler(const struct rte_flow_action *action_item,
2049 				    struct ulp_rte_parser_params *params)
2050 {
2051 	const struct rte_flow_action_of_set_vlan_vid *vlan_vid;
2052 	uint32_t vid;
2053 	struct ulp_rte_act_prop *act = &params->act_prop;
2054 
2055 	vlan_vid = action_item->conf;
2056 	if (vlan_vid && vlan_vid->vlan_vid) {
2057 		vid = vlan_vid->vlan_vid;
2058 		memcpy(&act->act_details[BNXT_ULP_ACT_PROP_IDX_SET_VLAN_VID],
2059 		       &vid, BNXT_ULP_ACT_PROP_SZ_SET_VLAN_VID);
2060 		/* Update the hdr_bitmap with vlan vid */
2061 		ULP_BITMAP_SET(params->act_bitmap.bits,
2062 			       BNXT_ULP_ACTION_BIT_SET_VLAN_VID);
2063 		return BNXT_TF_RC_SUCCESS;
2064 	}
2065 	BNXT_TF_DBG(ERR, "Parse Error: Vlan vid arg is invalid\n");
2066 	return BNXT_TF_RC_ERROR;
2067 }
2068 
2069 /* Function to handle the parsing of RTE Flow action set vlan pcp. */
2070 int32_t
2071 ulp_rte_of_set_vlan_pcp_act_handler(const struct rte_flow_action *action_item,
2072 				    struct ulp_rte_parser_params *params)
2073 {
2074 	const struct rte_flow_action_of_set_vlan_pcp *vlan_pcp;
2075 	uint8_t pcp;
2076 	struct ulp_rte_act_prop *act = &params->act_prop;
2077 
2078 	vlan_pcp = action_item->conf;
2079 	if (vlan_pcp) {
2080 		pcp = vlan_pcp->vlan_pcp;
2081 		memcpy(&act->act_details[BNXT_ULP_ACT_PROP_IDX_SET_VLAN_PCP],
2082 		       &pcp, BNXT_ULP_ACT_PROP_SZ_SET_VLAN_PCP);
2083 		/* Update the hdr_bitmap with vlan vid */
2084 		ULP_BITMAP_SET(params->act_bitmap.bits,
2085 			       BNXT_ULP_ACTION_BIT_SET_VLAN_PCP);
2086 		return BNXT_TF_RC_SUCCESS;
2087 	}
2088 	BNXT_TF_DBG(ERR, "Parse Error: Vlan pcp arg is invalid\n");
2089 	return BNXT_TF_RC_ERROR;
2090 }
2091 
2092 /* Function to handle the parsing of RTE Flow action set ipv4 src.*/
2093 int32_t
2094 ulp_rte_set_ipv4_src_act_handler(const struct rte_flow_action *action_item,
2095 				 struct ulp_rte_parser_params *params)
2096 {
2097 	const struct rte_flow_action_set_ipv4 *set_ipv4;
2098 	struct ulp_rte_act_prop *act = &params->act_prop;
2099 
2100 	set_ipv4 = action_item->conf;
2101 	if (set_ipv4) {
2102 		memcpy(&act->act_details[BNXT_ULP_ACT_PROP_IDX_SET_IPV4_SRC],
2103 		       &set_ipv4->ipv4_addr, BNXT_ULP_ACT_PROP_SZ_SET_IPV4_SRC);
2104 		/* Update the hdr_bitmap with set ipv4 src */
2105 		ULP_BITMAP_SET(params->act_bitmap.bits,
2106 			       BNXT_ULP_ACTION_BIT_SET_IPV4_SRC);
2107 		return BNXT_TF_RC_SUCCESS;
2108 	}
2109 	BNXT_TF_DBG(ERR, "Parse Error: set ipv4 src arg is invalid\n");
2110 	return BNXT_TF_RC_ERROR;
2111 }
2112 
2113 /* Function to handle the parsing of RTE Flow action set ipv4 dst.*/
2114 int32_t
2115 ulp_rte_set_ipv4_dst_act_handler(const struct rte_flow_action *action_item,
2116 				 struct ulp_rte_parser_params *params)
2117 {
2118 	const struct rte_flow_action_set_ipv4 *set_ipv4;
2119 	struct ulp_rte_act_prop *act = &params->act_prop;
2120 
2121 	set_ipv4 = action_item->conf;
2122 	if (set_ipv4) {
2123 		memcpy(&act->act_details[BNXT_ULP_ACT_PROP_IDX_SET_IPV4_DST],
2124 		       &set_ipv4->ipv4_addr, BNXT_ULP_ACT_PROP_SZ_SET_IPV4_DST);
2125 		/* Update the hdr_bitmap with set ipv4 dst */
2126 		ULP_BITMAP_SET(params->act_bitmap.bits,
2127 			       BNXT_ULP_ACTION_BIT_SET_IPV4_DST);
2128 		return BNXT_TF_RC_SUCCESS;
2129 	}
2130 	BNXT_TF_DBG(ERR, "Parse Error: set ipv4 dst arg is invalid\n");
2131 	return BNXT_TF_RC_ERROR;
2132 }
2133 
2134 /* Function to handle the parsing of RTE Flow action set tp src.*/
2135 int32_t
2136 ulp_rte_set_tp_src_act_handler(const struct rte_flow_action *action_item,
2137 			       struct ulp_rte_parser_params *params)
2138 {
2139 	const struct rte_flow_action_set_tp *set_tp;
2140 	struct ulp_rte_act_prop *act = &params->act_prop;
2141 
2142 	set_tp = action_item->conf;
2143 	if (set_tp) {
2144 		memcpy(&act->act_details[BNXT_ULP_ACT_PROP_IDX_SET_TP_SRC],
2145 		       &set_tp->port, BNXT_ULP_ACT_PROP_SZ_SET_TP_SRC);
2146 		/* Update the hdr_bitmap with set tp src */
2147 		ULP_BITMAP_SET(params->act_bitmap.bits,
2148 			       BNXT_ULP_ACTION_BIT_SET_TP_SRC);
2149 		return BNXT_TF_RC_SUCCESS;
2150 	}
2151 
2152 	BNXT_TF_DBG(ERR, "Parse Error: set tp src arg is invalid\n");
2153 	return BNXT_TF_RC_ERROR;
2154 }
2155 
2156 /* Function to handle the parsing of RTE Flow action set tp dst.*/
2157 int32_t
2158 ulp_rte_set_tp_dst_act_handler(const struct rte_flow_action *action_item,
2159 			       struct ulp_rte_parser_params *params)
2160 {
2161 	const struct rte_flow_action_set_tp *set_tp;
2162 	struct ulp_rte_act_prop *act = &params->act_prop;
2163 
2164 	set_tp = action_item->conf;
2165 	if (set_tp) {
2166 		memcpy(&act->act_details[BNXT_ULP_ACT_PROP_IDX_SET_TP_DST],
2167 		       &set_tp->port, BNXT_ULP_ACT_PROP_SZ_SET_TP_DST);
2168 		/* Update the hdr_bitmap with set tp dst */
2169 		ULP_BITMAP_SET(params->act_bitmap.bits,
2170 			       BNXT_ULP_ACTION_BIT_SET_TP_DST);
2171 		return BNXT_TF_RC_SUCCESS;
2172 	}
2173 
2174 	BNXT_TF_DBG(ERR, "Parse Error: set tp src arg is invalid\n");
2175 	return BNXT_TF_RC_ERROR;
2176 }
2177 
2178 /* Function to handle the parsing of RTE Flow action dec ttl.*/
2179 int32_t
2180 ulp_rte_dec_ttl_act_handler(const struct rte_flow_action *act __rte_unused,
2181 			    struct ulp_rte_parser_params *params)
2182 {
2183 	/* Update the act_bitmap with dec ttl */
2184 	ULP_BITMAP_SET(params->act_bitmap.bits, BNXT_ULP_ACTION_BIT_DEC_TTL);
2185 	return BNXT_TF_RC_SUCCESS;
2186 }
2187 
2188 /* Function to handle the parsing of RTE Flow action JUMP */
2189 int32_t
2190 ulp_rte_jump_act_handler(const struct rte_flow_action *action_item __rte_unused,
2191 			    struct ulp_rte_parser_params *params)
2192 {
2193 	/* Update the act_bitmap with dec ttl */
2194 	ULP_BITMAP_SET(params->act_bitmap.bits, BNXT_ULP_ACTION_BIT_JUMP);
2195 	return BNXT_TF_RC_SUCCESS;
2196 }
2197