xref: /dpdk/drivers/net/bnxt/tf_ulp/ulp_rte_parser.c (revision d9e70b1d1d52ad8dbc29733419c58005f57671dc)
1 /* SPDX-License-Identifier: BSD-3-Clause
2  * Copyright(c) 2014-2023 Broadcom
3  * All rights reserved.
4  */
5 
6 #include "bnxt.h"
7 #include "ulp_template_db_enum.h"
8 #include "ulp_template_struct.h"
9 #include "bnxt_ulp.h"
10 #include "bnxt_tf_common.h"
11 #include "bnxt_tf_pmd_shim.h"
12 #include "ulp_rte_parser.h"
13 #include "ulp_matcher.h"
14 #include "ulp_utils.h"
15 #include "tfp.h"
16 #include "ulp_port_db.h"
17 #include "ulp_flow_db.h"
18 #include "ulp_mapper.h"
19 #include "ulp_tun.h"
20 #include "ulp_template_db_tbl.h"
21 
22 /* Local defines for the parsing functions */
23 #define ULP_VLAN_PRIORITY_SHIFT		13 /* First 3 bits */
24 #define ULP_VLAN_PRIORITY_MASK		0x700
25 #define ULP_VLAN_TAG_MASK		0xFFF /* Last 12 bits*/
26 #define ULP_UDP_PORT_VXLAN		4789
27 
28 /* Utility function to skip the void items. */
29 static inline int32_t
30 ulp_rte_item_skip_void(const struct rte_flow_item **item, uint32_t increment)
31 {
32 	if (!*item)
33 		return 0;
34 	if (increment)
35 		(*item)++;
36 	while ((*item) && (*item)->type == RTE_FLOW_ITEM_TYPE_VOID)
37 		(*item)++;
38 	if (*item)
39 		return 1;
40 	return 0;
41 }
42 
43 /* Utility function to copy field spec items */
44 static struct ulp_rte_hdr_field *
45 ulp_rte_parser_fld_copy(struct ulp_rte_hdr_field *field,
46 			const void *buffer,
47 			uint32_t size)
48 {
49 	field->size = size;
50 	memcpy(field->spec, buffer, field->size);
51 	field++;
52 	return field;
53 }
54 
55 /* Utility function to update the field_bitmap */
56 static void
57 ulp_rte_parser_field_bitmap_update(struct ulp_rte_parser_params *params,
58 				   uint32_t idx,
59 				   enum bnxt_ulp_prsr_action prsr_act)
60 {
61 	struct ulp_rte_hdr_field *field;
62 
63 	field = &params->hdr_field[idx];
64 	if (ulp_bitmap_notzero(field->mask, field->size)) {
65 		ULP_INDEX_BITMAP_SET(params->fld_bitmap.bits, idx);
66 		if (!(prsr_act & ULP_PRSR_ACT_MATCH_IGNORE))
67 			ULP_INDEX_BITMAP_SET(params->fld_s_bitmap.bits, idx);
68 		/* Not exact match */
69 		if (!ulp_bitmap_is_ones(field->mask, field->size))
70 			ULP_COMP_FLD_IDX_WR(params,
71 					    BNXT_ULP_CF_IDX_WC_MATCH, 1);
72 	} else {
73 		ULP_INDEX_BITMAP_RESET(params->fld_bitmap.bits, idx);
74 	}
75 }
76 
77 #define ulp_deference_struct(x, y) ((x) ? &((x)->y) : NULL)
78 /* Utility function to copy field spec and masks items */
79 static void
80 ulp_rte_prsr_fld_mask(struct ulp_rte_parser_params *params,
81 		      uint32_t *idx,
82 		      uint32_t size,
83 		      const void *spec_buff,
84 		      const void *mask_buff,
85 		      enum bnxt_ulp_prsr_action prsr_act)
86 {
87 	struct ulp_rte_hdr_field *field = &params->hdr_field[*idx];
88 
89 	/* update the field size */
90 	field->size = size;
91 
92 	/* copy the mask specifications only if mask is not null */
93 	if (!(prsr_act & ULP_PRSR_ACT_MASK_IGNORE) && mask_buff) {
94 		memcpy(field->mask, mask_buff, size);
95 		ulp_rte_parser_field_bitmap_update(params, *idx, prsr_act);
96 	}
97 
98 	/* copy the protocol specifications only if mask is not null*/
99 	if (spec_buff && mask_buff && ulp_bitmap_notzero(mask_buff, size))
100 		memcpy(field->spec, spec_buff, size);
101 
102 	/* Increment the index */
103 	*idx = *idx + 1;
104 }
105 
106 /* Utility function to copy field spec and masks items */
107 static int32_t
108 ulp_rte_prsr_fld_size_validate(struct ulp_rte_parser_params *params,
109 			       uint32_t *idx,
110 			       uint32_t size)
111 {
112 	if (params->field_idx + size >= BNXT_ULP_PROTO_HDR_MAX) {
113 		BNXT_TF_DBG(ERR, "OOB for field processing %u\n", *idx);
114 		return -EINVAL;
115 	}
116 	*idx = params->field_idx;
117 	params->field_idx += size;
118 	return 0;
119 }
120 
121 /*
122  * Function to handle the parsing of RTE Flows and placing
123  * the RTE flow items into the ulp structures.
124  */
125 int32_t
126 bnxt_ulp_rte_parser_hdr_parse(const struct rte_flow_item pattern[],
127 			      struct ulp_rte_parser_params *params)
128 {
129 	const struct rte_flow_item *item = pattern;
130 	struct bnxt_ulp_rte_hdr_info *hdr_info;
131 
132 	params->field_idx = BNXT_ULP_PROTO_HDR_SVIF_NUM;
133 
134 	/* Parse all the items in the pattern */
135 	while (item && item->type != RTE_FLOW_ITEM_TYPE_END) {
136 		if (item->type >= (typeof(item->type))
137 		    BNXT_RTE_FLOW_ITEM_TYPE_END) {
138 			if (item->type >=
139 			    (typeof(item->type))BNXT_RTE_FLOW_ITEM_TYPE_LAST)
140 				goto hdr_parser_error;
141 			/* get the header information */
142 			hdr_info = &ulp_vendor_hdr_info[item->type -
143 				BNXT_RTE_FLOW_ITEM_TYPE_END];
144 		} else {
145 			if (item->type > RTE_FLOW_ITEM_TYPE_HIGIG2)
146 				goto hdr_parser_error;
147 			hdr_info = &ulp_hdr_info[item->type];
148 		}
149 		if (hdr_info->hdr_type == BNXT_ULP_HDR_TYPE_NOT_SUPPORTED) {
150 			goto hdr_parser_error;
151 		} else if (hdr_info->hdr_type == BNXT_ULP_HDR_TYPE_SUPPORTED) {
152 			/* call the registered callback handler */
153 			if (hdr_info->proto_hdr_func) {
154 				if (hdr_info->proto_hdr_func(item, params) !=
155 				    BNXT_TF_RC_SUCCESS) {
156 					return BNXT_TF_RC_ERROR;
157 				}
158 			}
159 		}
160 		item++;
161 	}
162 	/* update the implied SVIF */
163 	return ulp_rte_parser_implicit_match_port_process(params);
164 
165 hdr_parser_error:
166 	BNXT_TF_DBG(ERR, "Truflow parser does not support type %d\n",
167 		    item->type);
168 	return BNXT_TF_RC_PARSE_ERR;
169 }
170 
171 /*
172  * Function to handle the parsing of RTE Flows and placing
173  * the RTE flow actions into the ulp structures.
174  */
175 int32_t
176 bnxt_ulp_rte_parser_act_parse(const struct rte_flow_action actions[],
177 			      struct ulp_rte_parser_params *params)
178 {
179 	const struct rte_flow_action *action_item = actions;
180 	struct bnxt_ulp_rte_act_info *hdr_info;
181 
182 	/* Parse all the items in the pattern */
183 	while (action_item && action_item->type != RTE_FLOW_ACTION_TYPE_END) {
184 		if (action_item->type >=
185 		    (typeof(action_item->type))BNXT_RTE_FLOW_ACTION_TYPE_END) {
186 			if (action_item->type >=
187 			    (typeof(action_item->type))BNXT_RTE_FLOW_ACTION_TYPE_LAST)
188 				goto act_parser_error;
189 			/* get the header information from bnxt actinfo table */
190 			hdr_info = &ulp_vendor_act_info[action_item->type -
191 				BNXT_RTE_FLOW_ACTION_TYPE_END];
192 		} else {
193 			if (action_item->type > RTE_FLOW_ACTION_TYPE_SHARED)
194 				goto act_parser_error;
195 			/* get the header information from the act info table */
196 			hdr_info = &ulp_act_info[action_item->type];
197 		}
198 		if (hdr_info->act_type == BNXT_ULP_ACT_TYPE_NOT_SUPPORTED) {
199 			goto act_parser_error;
200 		} else if (hdr_info->act_type == BNXT_ULP_ACT_TYPE_SUPPORTED) {
201 			/* call the registered callback handler */
202 			if (hdr_info->proto_act_func) {
203 				if (hdr_info->proto_act_func(action_item,
204 							     params) !=
205 				    BNXT_TF_RC_SUCCESS) {
206 					return BNXT_TF_RC_ERROR;
207 				}
208 			}
209 		}
210 		action_item++;
211 	}
212 	/* update the implied port details */
213 	ulp_rte_parser_implicit_act_port_process(params);
214 	return BNXT_TF_RC_SUCCESS;
215 
216 act_parser_error:
217 	BNXT_TF_DBG(ERR, "Truflow parser does not support act %u\n",
218 		    action_item->type);
219 	return BNXT_TF_RC_ERROR;
220 }
221 
222 /*
223  * Function to handle the post processing of the computed
224  * fields for the interface.
225  */
226 static void
227 bnxt_ulp_comp_fld_intf_update(struct ulp_rte_parser_params *params)
228 {
229 	uint32_t ifindex;
230 	uint16_t port_id, parif;
231 	uint32_t mtype;
232 	enum bnxt_ulp_direction_type dir;
233 
234 	/* get the direction details */
235 	dir = ULP_COMP_FLD_IDX_RD(params, BNXT_ULP_CF_IDX_DIRECTION);
236 
237 	/* read the port id details */
238 	port_id = ULP_COMP_FLD_IDX_RD(params, BNXT_ULP_CF_IDX_INCOMING_IF);
239 	if (ulp_port_db_dev_port_to_ulp_index(params->ulp_ctx,
240 					      port_id,
241 					      &ifindex)) {
242 		BNXT_TF_DBG(ERR, "ParseErr:Portid is not valid\n");
243 		return;
244 	}
245 
246 	if (dir == BNXT_ULP_DIR_INGRESS) {
247 		/* Set port PARIF */
248 		if (ulp_port_db_parif_get(params->ulp_ctx, ifindex,
249 					  BNXT_ULP_PHY_PORT_PARIF, &parif)) {
250 			BNXT_TF_DBG(ERR, "ParseErr:ifindex is not valid\n");
251 			return;
252 		}
253 		ULP_COMP_FLD_IDX_WR(params, BNXT_ULP_CF_IDX_PHY_PORT_PARIF,
254 				    parif);
255 	} else {
256 		/* Get the match port type */
257 		mtype = ULP_COMP_FLD_IDX_RD(params,
258 					    BNXT_ULP_CF_IDX_MATCH_PORT_TYPE);
259 		if (mtype == BNXT_ULP_INTF_TYPE_VF_REP) {
260 			ULP_COMP_FLD_IDX_WR(params,
261 					    BNXT_ULP_CF_IDX_MATCH_PORT_IS_VFREP,
262 					    1);
263 			/* Set VF func PARIF */
264 			if (ulp_port_db_parif_get(params->ulp_ctx, ifindex,
265 						  BNXT_ULP_VF_FUNC_PARIF,
266 						  &parif)) {
267 				BNXT_TF_DBG(ERR,
268 					    "ParseErr:ifindex is not valid\n");
269 				return;
270 			}
271 			ULP_COMP_FLD_IDX_WR(params,
272 					    BNXT_ULP_CF_IDX_VF_FUNC_PARIF,
273 					    parif);
274 
275 		} else {
276 			/* Set DRV func PARIF */
277 			if (ulp_port_db_parif_get(params->ulp_ctx, ifindex,
278 						  BNXT_ULP_DRV_FUNC_PARIF,
279 						  &parif)) {
280 				BNXT_TF_DBG(ERR,
281 					    "ParseErr:ifindex is not valid\n");
282 				return;
283 			}
284 			ULP_COMP_FLD_IDX_WR(params,
285 					    BNXT_ULP_CF_IDX_DRV_FUNC_PARIF,
286 					    parif);
287 		}
288 		if (mtype == BNXT_ULP_INTF_TYPE_PF) {
289 			ULP_COMP_FLD_IDX_WR(params,
290 					    BNXT_ULP_CF_IDX_MATCH_PORT_IS_PF,
291 					    1);
292 		}
293 	}
294 }
295 
296 static int32_t
297 ulp_post_process_normal_flow(struct ulp_rte_parser_params *params)
298 {
299 	enum bnxt_ulp_intf_type match_port_type, act_port_type;
300 	enum bnxt_ulp_direction_type dir;
301 	uint32_t act_port_set;
302 
303 	/* Get the computed details */
304 	dir = ULP_COMP_FLD_IDX_RD(params, BNXT_ULP_CF_IDX_DIRECTION);
305 	match_port_type = ULP_COMP_FLD_IDX_RD(params,
306 					      BNXT_ULP_CF_IDX_MATCH_PORT_TYPE);
307 	act_port_type = ULP_COMP_FLD_IDX_RD(params,
308 					    BNXT_ULP_CF_IDX_ACT_PORT_TYPE);
309 	act_port_set = ULP_COMP_FLD_IDX_RD(params,
310 					   BNXT_ULP_CF_IDX_ACT_PORT_IS_SET);
311 
312 	/* set the flow direction in the proto and action header */
313 	if (dir == BNXT_ULP_DIR_EGRESS) {
314 		ULP_BITMAP_SET(params->hdr_bitmap.bits,
315 			       BNXT_ULP_FLOW_DIR_BITMASK_EGR);
316 		ULP_BITMAP_SET(params->act_bitmap.bits,
317 			       BNXT_ULP_FLOW_DIR_BITMASK_EGR);
318 	}
319 
320 	/* calculate the VF to VF flag */
321 	if (act_port_set && act_port_type == BNXT_ULP_INTF_TYPE_VF_REP &&
322 	    match_port_type == BNXT_ULP_INTF_TYPE_VF_REP)
323 		ULP_COMP_FLD_IDX_WR(params, BNXT_ULP_CF_IDX_VF_TO_VF, 1);
324 
325 	/* Update the decrement ttl computational fields */
326 	if (ULP_BITMAP_ISSET(params->act_bitmap.bits,
327 			     BNXT_ULP_ACT_BIT_DEC_TTL)) {
328 		/*
329 		 * Check that vxlan proto is included and vxlan decap
330 		 * action is not set then decrement tunnel ttl.
331 		 * Similarly add GRE and NVGRE in future.
332 		 */
333 		if ((ULP_BITMAP_ISSET(params->hdr_bitmap.bits,
334 				      BNXT_ULP_HDR_BIT_T_VXLAN) &&
335 		    !ULP_BITMAP_ISSET(params->act_bitmap.bits,
336 				      BNXT_ULP_ACT_BIT_VXLAN_DECAP))) {
337 			ULP_COMP_FLD_IDX_WR(params,
338 					    BNXT_ULP_CF_IDX_ACT_T_DEC_TTL, 1);
339 		} else {
340 			ULP_COMP_FLD_IDX_WR(params,
341 					    BNXT_ULP_CF_IDX_ACT_DEC_TTL, 1);
342 		}
343 	}
344 
345 	/* Merge the hdr_fp_bit into the proto header bit */
346 	params->hdr_bitmap.bits |= params->hdr_fp_bit.bits;
347 
348 	/* Update the comp fld fid */
349 	ULP_COMP_FLD_IDX_WR(params, BNXT_ULP_CF_IDX_FID, params->fid);
350 
351 	/* Update the computed interface parameters */
352 	bnxt_ulp_comp_fld_intf_update(params);
353 
354 	/* TBD: Handle the flow rejection scenarios */
355 	return 0;
356 }
357 
358 /*
359  * Function to handle the post processing of the parsing details
360  */
361 void
362 bnxt_ulp_rte_parser_post_process(struct ulp_rte_parser_params *params)
363 {
364 	ulp_post_process_normal_flow(params);
365 }
366 
367 /*
368  * Function to compute the flow direction based on the match port details
369  */
370 static void
371 bnxt_ulp_rte_parser_direction_compute(struct ulp_rte_parser_params *params)
372 {
373 	enum bnxt_ulp_intf_type match_port_type;
374 
375 	/* Get the match port type */
376 	match_port_type = ULP_COMP_FLD_IDX_RD(params,
377 					      BNXT_ULP_CF_IDX_MATCH_PORT_TYPE);
378 
379 	/* If ingress flow and matchport is vf rep then dir is egress*/
380 	if ((params->dir_attr & BNXT_ULP_FLOW_ATTR_INGRESS) &&
381 	    match_port_type == BNXT_ULP_INTF_TYPE_VF_REP) {
382 		ULP_COMP_FLD_IDX_WR(params, BNXT_ULP_CF_IDX_DIRECTION,
383 				    BNXT_ULP_DIR_EGRESS);
384 	} else {
385 		/* Assign the input direction */
386 		if (params->dir_attr & BNXT_ULP_FLOW_ATTR_INGRESS)
387 			ULP_COMP_FLD_IDX_WR(params, BNXT_ULP_CF_IDX_DIRECTION,
388 					    BNXT_ULP_DIR_INGRESS);
389 		else
390 			ULP_COMP_FLD_IDX_WR(params, BNXT_ULP_CF_IDX_DIRECTION,
391 					    BNXT_ULP_DIR_EGRESS);
392 	}
393 }
394 
395 /* Function to handle the parsing of RTE Flow item PF Header. */
396 static int32_t
397 ulp_rte_parser_svif_set(struct ulp_rte_parser_params *params,
398 			uint32_t ifindex,
399 			uint16_t mask,
400 			enum bnxt_ulp_direction_type item_dir)
401 {
402 	uint16_t svif;
403 	enum bnxt_ulp_direction_type dir;
404 	struct ulp_rte_hdr_field *hdr_field;
405 	enum bnxt_ulp_svif_type svif_type;
406 	enum bnxt_ulp_intf_type port_type;
407 
408 	if (ULP_COMP_FLD_IDX_RD(params, BNXT_ULP_CF_IDX_SVIF_FLAG) !=
409 	    BNXT_ULP_INVALID_SVIF_VAL) {
410 		BNXT_TF_DBG(ERR,
411 			    "SVIF already set,multiple source not support'd\n");
412 		return BNXT_TF_RC_ERROR;
413 	}
414 
415 	/* Get port type details */
416 	port_type = ulp_port_db_port_type_get(params->ulp_ctx, ifindex);
417 	if (port_type == BNXT_ULP_INTF_TYPE_INVALID) {
418 		BNXT_TF_DBG(ERR, "Invalid port type\n");
419 		return BNXT_TF_RC_ERROR;
420 	}
421 
422 	/* Update the match port type */
423 	ULP_COMP_FLD_IDX_WR(params, BNXT_ULP_CF_IDX_MATCH_PORT_TYPE, port_type);
424 
425 	/* compute the direction */
426 	bnxt_ulp_rte_parser_direction_compute(params);
427 
428 	/* Get the computed direction */
429 	dir = (item_dir != BNXT_ULP_DIR_INVALID) ? item_dir :
430 		ULP_COMP_FLD_IDX_RD(params, BNXT_ULP_CF_IDX_DIRECTION);
431 	if (dir == BNXT_ULP_DIR_INGRESS &&
432 	    port_type != BNXT_ULP_INTF_TYPE_VF_REP) {
433 		svif_type = BNXT_ULP_PHY_PORT_SVIF;
434 	} else {
435 		if (port_type == BNXT_ULP_INTF_TYPE_VF_REP &&
436 		    item_dir != BNXT_ULP_DIR_EGRESS)
437 			svif_type = BNXT_ULP_VF_FUNC_SVIF;
438 		else
439 			svif_type = BNXT_ULP_DRV_FUNC_SVIF;
440 	}
441 	ulp_port_db_svif_get(params->ulp_ctx, ifindex, svif_type,
442 			     &svif);
443 	svif = rte_cpu_to_be_16(svif);
444 	hdr_field = &params->hdr_field[BNXT_ULP_PROTO_HDR_FIELD_SVIF_IDX];
445 	memcpy(hdr_field->spec, &svif, sizeof(svif));
446 	memcpy(hdr_field->mask, &mask, sizeof(mask));
447 	hdr_field->size = sizeof(svif);
448 	ULP_COMP_FLD_IDX_WR(params, BNXT_ULP_CF_IDX_SVIF_FLAG,
449 			    rte_be_to_cpu_16(svif));
450 	return BNXT_TF_RC_SUCCESS;
451 }
452 
453 /* Function to handle the parsing of the RTE port id */
454 int32_t
455 ulp_rte_parser_implicit_match_port_process(struct ulp_rte_parser_params *params)
456 {
457 	uint16_t port_id = 0;
458 	uint16_t svif_mask = 0xFFFF;
459 	uint32_t ifindex;
460 	int32_t rc = BNXT_TF_RC_ERROR;
461 
462 	if (ULP_COMP_FLD_IDX_RD(params, BNXT_ULP_CF_IDX_SVIF_FLAG) !=
463 	    BNXT_ULP_INVALID_SVIF_VAL)
464 		return BNXT_TF_RC_SUCCESS;
465 
466 	/* SVIF not set. So get the port id */
467 	port_id = ULP_COMP_FLD_IDX_RD(params, BNXT_ULP_CF_IDX_INCOMING_IF);
468 
469 	if (ulp_port_db_dev_port_to_ulp_index(params->ulp_ctx,
470 					      port_id,
471 					      &ifindex)) {
472 		BNXT_TF_DBG(ERR, "ParseErr:Portid is not valid\n");
473 		return rc;
474 	}
475 
476 	/* Update the SVIF details */
477 	rc = ulp_rte_parser_svif_set(params, ifindex, svif_mask,
478 				     BNXT_ULP_DIR_INVALID);
479 	return rc;
480 }
481 
482 /* Function to handle the implicit action port id */
483 int32_t
484 ulp_rte_parser_implicit_act_port_process(struct ulp_rte_parser_params *params)
485 {
486 	struct rte_flow_action action_item = {0};
487 	struct rte_flow_action_port_id port_id = {0};
488 
489 	/* Read the action port set bit */
490 	if (ULP_COMP_FLD_IDX_RD(params, BNXT_ULP_CF_IDX_ACT_PORT_IS_SET)) {
491 		/* Already set, so just exit */
492 		return BNXT_TF_RC_SUCCESS;
493 	}
494 	port_id.id = ULP_COMP_FLD_IDX_RD(params, BNXT_ULP_CF_IDX_INCOMING_IF);
495 	action_item.type = RTE_FLOW_ACTION_TYPE_PORT_ID;
496 	action_item.conf = &port_id;
497 
498 	/* Update the action port based on incoming port */
499 	ulp_rte_port_act_handler(&action_item, params);
500 
501 	/* Reset the action port set bit */
502 	ULP_COMP_FLD_IDX_WR(params, BNXT_ULP_CF_IDX_ACT_PORT_IS_SET, 0);
503 	return BNXT_TF_RC_SUCCESS;
504 }
505 
506 /* Parse items PORT_ID, PORT_REPRESENTOR and REPRESENTED_PORT. */
507 int32_t
508 ulp_rte_port_hdr_handler(const struct rte_flow_item *item,
509 			 struct ulp_rte_parser_params *params)
510 {
511 	enum bnxt_ulp_direction_type item_dir;
512 	uint16_t ethdev_id;
513 	uint16_t mask = 0;
514 	uint32_t ifindex;
515 	int32_t rc = BNXT_TF_RC_PARSE_ERR;
516 
517 	if (!item->spec) {
518 		BNXT_TF_DBG(ERR, "ParseErr:Port spec is not valid\n");
519 		return rc;
520 	}
521 	if (!item->mask) {
522 		BNXT_TF_DBG(ERR, "ParseErr:Port mask is not valid\n");
523 		return rc;
524 	}
525 
526 	switch (item->type) {
527 	case RTE_FLOW_ITEM_TYPE_PORT_ID: {
528 		const struct rte_flow_item_port_id *port_spec = item->spec;
529 		const struct rte_flow_item_port_id *port_mask = item->mask;
530 
531 		item_dir = BNXT_ULP_DIR_INVALID;
532 		ethdev_id = port_spec->id;
533 		mask = port_mask->id;
534 
535 		if (!port_mask->id) {
536 			ULP_BITMAP_SET(params->hdr_bitmap.bits, BNXT_ULP_HDR_BIT_SVIF_IGNORE);
537 			mask = 0xff;
538 		}
539 		break;
540 	}
541 	case RTE_FLOW_ITEM_TYPE_PORT_REPRESENTOR: {
542 		const struct rte_flow_item_ethdev *ethdev_spec = item->spec;
543 		const struct rte_flow_item_ethdev *ethdev_mask = item->mask;
544 
545 		item_dir = BNXT_ULP_DIR_INGRESS;
546 		ethdev_id = ethdev_spec->port_id;
547 		mask = ethdev_mask->port_id;
548 		break;
549 	}
550 	case RTE_FLOW_ITEM_TYPE_REPRESENTED_PORT: {
551 		const struct rte_flow_item_ethdev *ethdev_spec = item->spec;
552 		const struct rte_flow_item_ethdev *ethdev_mask = item->mask;
553 
554 		item_dir = BNXT_ULP_DIR_EGRESS;
555 		ethdev_id = ethdev_spec->port_id;
556 		mask = ethdev_mask->port_id;
557 		break;
558 	}
559 	default:
560 		BNXT_TF_DBG(ERR, "ParseErr:Unexpected item\n");
561 		return rc;
562 	}
563 
564 	/* perform the conversion from dpdk port to bnxt ifindex */
565 	if (ulp_port_db_dev_port_to_ulp_index(params->ulp_ctx,
566 					      ethdev_id,
567 					      &ifindex)) {
568 		BNXT_TF_DBG(ERR, "ParseErr:Portid is not valid\n");
569 		return rc;
570 	}
571 	/* Update the SVIF details */
572 	return ulp_rte_parser_svif_set(params, ifindex, mask, item_dir);
573 }
574 
575 /* Function to handle the update of proto header based on field values */
576 static void
577 ulp_rte_l2_proto_type_update(struct ulp_rte_parser_params *param,
578 			     uint16_t type, uint32_t in_flag)
579 {
580 	if (type == tfp_cpu_to_be_16(RTE_ETHER_TYPE_IPV4)) {
581 		if (in_flag) {
582 			ULP_BITMAP_SET(param->hdr_fp_bit.bits,
583 				       BNXT_ULP_HDR_BIT_I_IPV4);
584 			ULP_COMP_FLD_IDX_WR(param, BNXT_ULP_CF_IDX_I_L3, 1);
585 		} else {
586 			ULP_BITMAP_SET(param->hdr_fp_bit.bits,
587 				       BNXT_ULP_HDR_BIT_O_IPV4);
588 			ULP_COMP_FLD_IDX_WR(param, BNXT_ULP_CF_IDX_O_L3, 1);
589 		}
590 	} else if (type == tfp_cpu_to_be_16(RTE_ETHER_TYPE_IPV6))  {
591 		if (in_flag) {
592 			ULP_BITMAP_SET(param->hdr_fp_bit.bits,
593 				       BNXT_ULP_HDR_BIT_I_IPV6);
594 			ULP_COMP_FLD_IDX_WR(param, BNXT_ULP_CF_IDX_I_L3, 1);
595 		} else {
596 			ULP_BITMAP_SET(param->hdr_fp_bit.bits,
597 				       BNXT_ULP_HDR_BIT_O_IPV6);
598 			ULP_COMP_FLD_IDX_WR(param, BNXT_ULP_CF_IDX_O_L3, 1);
599 		}
600 	}
601 }
602 
603 /* Internal Function to identify broadcast or multicast packets */
604 static int32_t
605 ulp_rte_parser_is_bcmc_addr(const struct rte_ether_addr *eth_addr)
606 {
607 	if (rte_is_multicast_ether_addr(eth_addr) ||
608 	    rte_is_broadcast_ether_addr(eth_addr)) {
609 		BNXT_TF_DBG(DEBUG,
610 			    "No support for bcast or mcast addr offload\n");
611 		return 1;
612 	}
613 	return 0;
614 }
615 
616 /* Function to handle the parsing of RTE Flow item Ethernet Header. */
617 int32_t
618 ulp_rte_eth_hdr_handler(const struct rte_flow_item *item,
619 			struct ulp_rte_parser_params *params)
620 {
621 	const struct rte_flow_item_eth *eth_spec = item->spec;
622 	const struct rte_flow_item_eth *eth_mask = item->mask;
623 	uint32_t idx = 0, dmac_idx = 0;
624 	uint32_t size;
625 	uint16_t eth_type = 0;
626 	uint32_t inner_flag = 0;
627 
628 	/* Perform validations */
629 	if (eth_spec) {
630 		/* Todo: work around to avoid multicast and broadcast addr */
631 		if (ulp_rte_parser_is_bcmc_addr(&eth_spec->hdr.dst_addr))
632 			return BNXT_TF_RC_PARSE_ERR;
633 
634 		if (ulp_rte_parser_is_bcmc_addr(&eth_spec->hdr.src_addr))
635 			return BNXT_TF_RC_PARSE_ERR;
636 
637 		eth_type = eth_spec->hdr.ether_type;
638 	}
639 
640 	if (ulp_rte_prsr_fld_size_validate(params, &idx,
641 					   BNXT_ULP_PROTO_HDR_ETH_NUM)) {
642 		BNXT_TF_DBG(ERR, "Error parsing protocol header\n");
643 		return BNXT_TF_RC_ERROR;
644 	}
645 	/*
646 	 * Copy the rte_flow_item for eth into hdr_field using ethernet
647 	 * header fields
648 	 */
649 	dmac_idx = idx;
650 	size = sizeof(((struct rte_flow_item_eth *)NULL)->hdr.dst_addr.addr_bytes);
651 	ulp_rte_prsr_fld_mask(params, &idx, size,
652 			      ulp_deference_struct(eth_spec, hdr.dst_addr.addr_bytes),
653 			      ulp_deference_struct(eth_mask, hdr.dst_addr.addr_bytes),
654 			      ULP_PRSR_ACT_DEFAULT);
655 
656 	size = sizeof(((struct rte_flow_item_eth *)NULL)->hdr.src_addr.addr_bytes);
657 	ulp_rte_prsr_fld_mask(params, &idx, size,
658 			      ulp_deference_struct(eth_spec, hdr.src_addr.addr_bytes),
659 			      ulp_deference_struct(eth_mask, hdr.src_addr.addr_bytes),
660 			      ULP_PRSR_ACT_DEFAULT);
661 
662 	size = sizeof(((struct rte_flow_item_eth *)NULL)->hdr.ether_type);
663 	ulp_rte_prsr_fld_mask(params, &idx, size,
664 			      ulp_deference_struct(eth_spec, hdr.ether_type),
665 			      ulp_deference_struct(eth_mask, hdr.ether_type),
666 			      ULP_PRSR_ACT_MATCH_IGNORE);
667 
668 	/* Update the protocol hdr bitmap */
669 	if (ULP_BITMAP_ISSET(params->hdr_bitmap.bits,
670 			     BNXT_ULP_HDR_BIT_O_ETH) ||
671 	    ULP_BITMAP_ISSET(params->hdr_bitmap.bits,
672 			     BNXT_ULP_HDR_BIT_O_IPV4) ||
673 	    ULP_BITMAP_ISSET(params->hdr_bitmap.bits,
674 			     BNXT_ULP_HDR_BIT_O_IPV6) ||
675 	    ULP_BITMAP_ISSET(params->hdr_bitmap.bits,
676 			     BNXT_ULP_HDR_BIT_O_UDP) ||
677 	    ULP_BITMAP_ISSET(params->hdr_bitmap.bits,
678 			     BNXT_ULP_HDR_BIT_O_TCP)) {
679 		ULP_BITMAP_SET(params->hdr_bitmap.bits, BNXT_ULP_HDR_BIT_I_ETH);
680 		inner_flag = 1;
681 	} else {
682 		ULP_BITMAP_SET(params->hdr_bitmap.bits, BNXT_ULP_HDR_BIT_O_ETH);
683 		ULP_COMP_FLD_IDX_WR(params, BNXT_ULP_CF_IDX_TUN_OFF_DMAC_ID,
684 				    dmac_idx);
685 	}
686 	/* Update the field protocol hdr bitmap */
687 	ulp_rte_l2_proto_type_update(params, eth_type, inner_flag);
688 
689 	return BNXT_TF_RC_SUCCESS;
690 }
691 
692 /* Function to handle the parsing of RTE Flow item Vlan Header. */
693 int32_t
694 ulp_rte_vlan_hdr_handler(const struct rte_flow_item *item,
695 			 struct ulp_rte_parser_params *params)
696 {
697 	const struct rte_flow_item_vlan *vlan_spec = item->spec;
698 	const struct rte_flow_item_vlan *vlan_mask = item->mask;
699 	struct ulp_rte_hdr_bitmap	*hdr_bit;
700 	uint32_t idx = 0;
701 	uint16_t vlan_tag = 0, priority = 0;
702 	uint16_t vlan_tag_mask = 0, priority_mask = 0;
703 	uint32_t outer_vtag_num;
704 	uint32_t inner_vtag_num;
705 	uint16_t eth_type = 0;
706 	uint32_t inner_flag = 0;
707 	uint32_t size;
708 
709 	if (vlan_spec) {
710 		vlan_tag = ntohs(vlan_spec->hdr.vlan_tci);
711 		priority = htons(vlan_tag >> ULP_VLAN_PRIORITY_SHIFT);
712 		vlan_tag &= ULP_VLAN_TAG_MASK;
713 		vlan_tag = htons(vlan_tag);
714 		eth_type = vlan_spec->hdr.eth_proto;
715 	}
716 
717 	if (vlan_mask) {
718 		vlan_tag_mask = ntohs(vlan_mask->hdr.vlan_tci);
719 		priority_mask = htons(vlan_tag_mask >> ULP_VLAN_PRIORITY_SHIFT);
720 		vlan_tag_mask &= 0xfff;
721 
722 		/*
723 		 * the storage for priority and vlan tag is 2 bytes
724 		 * The mask of priority which is 3 bits if it is all 1's
725 		 * then make the rest bits 13 bits as 1's
726 		 * so that it is matched as exact match.
727 		 */
728 		if (priority_mask == ULP_VLAN_PRIORITY_MASK)
729 			priority_mask |= ~ULP_VLAN_PRIORITY_MASK;
730 		if (vlan_tag_mask == ULP_VLAN_TAG_MASK)
731 			vlan_tag_mask |= ~ULP_VLAN_TAG_MASK;
732 		vlan_tag_mask = htons(vlan_tag_mask);
733 	}
734 
735 	if (ulp_rte_prsr_fld_size_validate(params, &idx,
736 					   BNXT_ULP_PROTO_HDR_S_VLAN_NUM)) {
737 		BNXT_TF_DBG(ERR, "Error parsing protocol header\n");
738 		return BNXT_TF_RC_ERROR;
739 	}
740 
741 	/*
742 	 * Copy the rte_flow_item for vlan into hdr_field using Vlan
743 	 * header fields
744 	 */
745 	size = sizeof(((struct rte_flow_item_vlan *)NULL)->hdr.vlan_tci);
746 	/*
747 	 * The priority field is ignored since OVS is setting it as
748 	 * wild card match and it is not supported. This is a work
749 	 * around and shall be addressed in the future.
750 	 */
751 	ulp_rte_prsr_fld_mask(params, &idx, size,
752 			      &priority,
753 			      (vlan_mask) ? &priority_mask : NULL,
754 			      ULP_PRSR_ACT_MASK_IGNORE);
755 
756 	ulp_rte_prsr_fld_mask(params, &idx, size,
757 			      &vlan_tag,
758 			      (vlan_mask) ? &vlan_tag_mask : NULL,
759 			      ULP_PRSR_ACT_DEFAULT);
760 
761 	size = sizeof(((struct rte_flow_item_vlan *)NULL)->hdr.eth_proto);
762 	ulp_rte_prsr_fld_mask(params, &idx, size,
763 			      ulp_deference_struct(vlan_spec, hdr.eth_proto),
764 			      ulp_deference_struct(vlan_mask, hdr.eth_proto),
765 			      ULP_PRSR_ACT_MATCH_IGNORE);
766 
767 	/* Get the outer tag and inner tag counts */
768 	outer_vtag_num = ULP_COMP_FLD_IDX_RD(params,
769 					     BNXT_ULP_CF_IDX_O_VTAG_NUM);
770 	inner_vtag_num = ULP_COMP_FLD_IDX_RD(params,
771 					     BNXT_ULP_CF_IDX_I_VTAG_NUM);
772 
773 	/* Update the hdr_bitmap of the vlans */
774 	hdr_bit = &params->hdr_bitmap;
775 	if (ULP_BITMAP_ISSET(hdr_bit->bits, BNXT_ULP_HDR_BIT_O_ETH) &&
776 	    !ULP_BITMAP_ISSET(hdr_bit->bits, BNXT_ULP_HDR_BIT_I_ETH) &&
777 	    !outer_vtag_num) {
778 		/* Update the vlan tag num */
779 		outer_vtag_num++;
780 		ULP_COMP_FLD_IDX_WR(params, BNXT_ULP_CF_IDX_O_VTAG_NUM,
781 				    outer_vtag_num);
782 		ULP_COMP_FLD_IDX_WR(params, BNXT_ULP_CF_IDX_O_HAS_VTAG, 1);
783 		ULP_COMP_FLD_IDX_WR(params, BNXT_ULP_CF_IDX_O_ONE_VTAG, 1);
784 		ULP_BITMAP_SET(params->hdr_bitmap.bits,
785 			       BNXT_ULP_HDR_BIT_OO_VLAN);
786 		if (vlan_mask && vlan_tag_mask)
787 			ULP_COMP_FLD_IDX_WR(params,
788 					    BNXT_ULP_CF_IDX_OO_VLAN_FB_VID, 1);
789 
790 	} else if (ULP_BITMAP_ISSET(hdr_bit->bits, BNXT_ULP_HDR_BIT_O_ETH) &&
791 		   !ULP_BITMAP_ISSET(hdr_bit->bits, BNXT_ULP_HDR_BIT_I_ETH) &&
792 		   outer_vtag_num == 1) {
793 		/* update the vlan tag num */
794 		outer_vtag_num++;
795 		ULP_COMP_FLD_IDX_WR(params, BNXT_ULP_CF_IDX_O_VTAG_NUM,
796 				    outer_vtag_num);
797 		ULP_COMP_FLD_IDX_WR(params, BNXT_ULP_CF_IDX_O_TWO_VTAGS, 1);
798 		ULP_COMP_FLD_IDX_WR(params, BNXT_ULP_CF_IDX_O_ONE_VTAG, 0);
799 		ULP_BITMAP_SET(params->hdr_bitmap.bits,
800 			       BNXT_ULP_HDR_BIT_OI_VLAN);
801 		if (vlan_mask && vlan_tag_mask)
802 			ULP_COMP_FLD_IDX_WR(params,
803 					    BNXT_ULP_CF_IDX_OI_VLAN_FB_VID, 1);
804 
805 	} else if (ULP_BITMAP_ISSET(hdr_bit->bits, BNXT_ULP_HDR_BIT_O_ETH) &&
806 		   ULP_BITMAP_ISSET(hdr_bit->bits, BNXT_ULP_HDR_BIT_I_ETH) &&
807 		   !inner_vtag_num) {
808 		/* update the vlan tag num */
809 		inner_vtag_num++;
810 		ULP_COMP_FLD_IDX_WR(params, BNXT_ULP_CF_IDX_I_VTAG_NUM,
811 				    inner_vtag_num);
812 		ULP_COMP_FLD_IDX_WR(params, BNXT_ULP_CF_IDX_I_HAS_VTAG, 1);
813 		ULP_COMP_FLD_IDX_WR(params, BNXT_ULP_CF_IDX_I_ONE_VTAG, 1);
814 		ULP_BITMAP_SET(params->hdr_bitmap.bits,
815 			       BNXT_ULP_HDR_BIT_IO_VLAN);
816 		if (vlan_mask && vlan_tag_mask)
817 			ULP_COMP_FLD_IDX_WR(params,
818 					    BNXT_ULP_CF_IDX_IO_VLAN_FB_VID, 1);
819 		inner_flag = 1;
820 	} else if (ULP_BITMAP_ISSET(hdr_bit->bits, BNXT_ULP_HDR_BIT_O_ETH) &&
821 		   ULP_BITMAP_ISSET(hdr_bit->bits, BNXT_ULP_HDR_BIT_I_ETH) &&
822 		   inner_vtag_num == 1) {
823 		/* update the vlan tag num */
824 		inner_vtag_num++;
825 		ULP_COMP_FLD_IDX_WR(params, BNXT_ULP_CF_IDX_I_VTAG_NUM,
826 				    inner_vtag_num);
827 		ULP_COMP_FLD_IDX_WR(params, BNXT_ULP_CF_IDX_I_TWO_VTAGS, 1);
828 		ULP_COMP_FLD_IDX_WR(params, BNXT_ULP_CF_IDX_I_ONE_VTAG, 0);
829 		ULP_BITMAP_SET(params->hdr_bitmap.bits,
830 			       BNXT_ULP_HDR_BIT_II_VLAN);
831 		if (vlan_mask && vlan_tag_mask)
832 			ULP_COMP_FLD_IDX_WR(params,
833 					    BNXT_ULP_CF_IDX_II_VLAN_FB_VID, 1);
834 		inner_flag = 1;
835 	} else {
836 		BNXT_TF_DBG(ERR, "Error Parsing:Vlan hdr found without eth\n");
837 		return BNXT_TF_RC_ERROR;
838 	}
839 	/* Update the field protocol hdr bitmap */
840 	ulp_rte_l2_proto_type_update(params, eth_type, inner_flag);
841 	return BNXT_TF_RC_SUCCESS;
842 }
843 
844 /* Function to handle the update of proto header based on field values */
845 static void
846 ulp_rte_l3_proto_type_update(struct ulp_rte_parser_params *param,
847 			     uint8_t proto, uint32_t in_flag)
848 {
849 	if (proto == IPPROTO_UDP) {
850 		if (in_flag) {
851 			ULP_BITMAP_SET(param->hdr_fp_bit.bits,
852 				       BNXT_ULP_HDR_BIT_I_UDP);
853 			ULP_COMP_FLD_IDX_WR(param, BNXT_ULP_CF_IDX_I_L4, 1);
854 		} else {
855 			ULP_BITMAP_SET(param->hdr_fp_bit.bits,
856 				       BNXT_ULP_HDR_BIT_O_UDP);
857 			ULP_COMP_FLD_IDX_WR(param, BNXT_ULP_CF_IDX_O_L4, 1);
858 		}
859 	} else if (proto == IPPROTO_TCP) {
860 		if (in_flag) {
861 			ULP_BITMAP_SET(param->hdr_fp_bit.bits,
862 				       BNXT_ULP_HDR_BIT_I_TCP);
863 			ULP_COMP_FLD_IDX_WR(param, BNXT_ULP_CF_IDX_I_L4, 1);
864 		} else {
865 			ULP_BITMAP_SET(param->hdr_fp_bit.bits,
866 				       BNXT_ULP_HDR_BIT_O_TCP);
867 			ULP_COMP_FLD_IDX_WR(param, BNXT_ULP_CF_IDX_O_L4, 1);
868 		}
869 	} else if (proto == IPPROTO_GRE) {
870 		ULP_BITMAP_SET(param->hdr_bitmap.bits, BNXT_ULP_HDR_BIT_T_GRE);
871 	} else if (proto == IPPROTO_ICMP) {
872 		if (ULP_COMP_FLD_IDX_RD(param, BNXT_ULP_CF_IDX_L3_TUN))
873 			ULP_BITMAP_SET(param->hdr_bitmap.bits,
874 				       BNXT_ULP_HDR_BIT_I_ICMP);
875 		else
876 			ULP_BITMAP_SET(param->hdr_bitmap.bits,
877 				       BNXT_ULP_HDR_BIT_O_ICMP);
878 	}
879 	if (proto) {
880 		if (in_flag) {
881 			ULP_COMP_FLD_IDX_WR(param,
882 					    BNXT_ULP_CF_IDX_I_L3_FB_PROTO_ID,
883 					    1);
884 			ULP_COMP_FLD_IDX_WR(param,
885 					    BNXT_ULP_CF_IDX_I_L3_PROTO_ID,
886 					    proto);
887 		} else {
888 			ULP_COMP_FLD_IDX_WR(param,
889 					    BNXT_ULP_CF_IDX_O_L3_FB_PROTO_ID,
890 					    1);
891 			ULP_COMP_FLD_IDX_WR(param,
892 					    BNXT_ULP_CF_IDX_O_L3_PROTO_ID,
893 					    proto);
894 		}
895 	}
896 }
897 
898 /* Function to handle the parsing of RTE Flow item IPV4 Header. */
899 int32_t
900 ulp_rte_ipv4_hdr_handler(const struct rte_flow_item *item,
901 			 struct ulp_rte_parser_params *params)
902 {
903 	const struct rte_flow_item_ipv4 *ipv4_spec = item->spec;
904 	const struct rte_flow_item_ipv4 *ipv4_mask = item->mask;
905 	struct ulp_rte_hdr_bitmap *hdr_bitmap = &params->hdr_bitmap;
906 	uint32_t idx = 0, dip_idx = 0;
907 	uint32_t size;
908 	uint8_t proto = 0;
909 	uint32_t inner_flag = 0;
910 	uint32_t cnt;
911 
912 	/* validate there are no 3rd L3 header */
913 	cnt = ULP_COMP_FLD_IDX_RD(params, BNXT_ULP_CF_IDX_L3_HDR_CNT);
914 	if (cnt == 2) {
915 		BNXT_TF_DBG(ERR, "Parse Err:Third L3 header not supported\n");
916 		return BNXT_TF_RC_ERROR;
917 	}
918 
919 	if (ulp_rte_prsr_fld_size_validate(params, &idx,
920 					   BNXT_ULP_PROTO_HDR_IPV4_NUM)) {
921 		BNXT_TF_DBG(ERR, "Error parsing protocol header\n");
922 		return BNXT_TF_RC_ERROR;
923 	}
924 
925 	/*
926 	 * Copy the rte_flow_item for ipv4 into hdr_field using ipv4
927 	 * header fields
928 	 */
929 	size = sizeof(((struct rte_flow_item_ipv4 *)NULL)->hdr.version_ihl);
930 	ulp_rte_prsr_fld_mask(params, &idx, size,
931 			      ulp_deference_struct(ipv4_spec, hdr.version_ihl),
932 			      ulp_deference_struct(ipv4_mask, hdr.version_ihl),
933 			      ULP_PRSR_ACT_DEFAULT);
934 
935 	/*
936 	 * The tos field is ignored since OVS is setting it as wild card
937 	 * match and it is not supported. This is a work around and
938 	 * shall be addressed in the future.
939 	 */
940 	size = sizeof(((struct rte_flow_item_ipv4 *)NULL)->hdr.type_of_service);
941 	ulp_rte_prsr_fld_mask(params, &idx, size,
942 			      ulp_deference_struct(ipv4_spec,
943 						   hdr.type_of_service),
944 			      ulp_deference_struct(ipv4_mask,
945 						   hdr.type_of_service),
946 			      ULP_PRSR_ACT_MASK_IGNORE);
947 
948 	size = sizeof(((struct rte_flow_item_ipv4 *)NULL)->hdr.total_length);
949 	ulp_rte_prsr_fld_mask(params, &idx, size,
950 			      ulp_deference_struct(ipv4_spec, hdr.total_length),
951 			      ulp_deference_struct(ipv4_mask, hdr.total_length),
952 			      ULP_PRSR_ACT_DEFAULT);
953 
954 	size = sizeof(((struct rte_flow_item_ipv4 *)NULL)->hdr.packet_id);
955 	ulp_rte_prsr_fld_mask(params, &idx, size,
956 			      ulp_deference_struct(ipv4_spec, hdr.packet_id),
957 			      ulp_deference_struct(ipv4_mask, hdr.packet_id),
958 			      ULP_PRSR_ACT_DEFAULT);
959 
960 	size = sizeof(((struct rte_flow_item_ipv4 *)NULL)->hdr.fragment_offset);
961 	ulp_rte_prsr_fld_mask(params, &idx, size,
962 			      ulp_deference_struct(ipv4_spec,
963 						   hdr.fragment_offset),
964 			      ulp_deference_struct(ipv4_mask,
965 						   hdr.fragment_offset),
966 			      ULP_PRSR_ACT_MASK_IGNORE);
967 
968 	size = sizeof(((struct rte_flow_item_ipv4 *)NULL)->hdr.time_to_live);
969 	ulp_rte_prsr_fld_mask(params, &idx, size,
970 			      ulp_deference_struct(ipv4_spec, hdr.time_to_live),
971 			      ulp_deference_struct(ipv4_mask, hdr.time_to_live),
972 			      ULP_PRSR_ACT_DEFAULT);
973 
974 	/* Ignore proto for matching templates */
975 	size = sizeof(((struct rte_flow_item_ipv4 *)NULL)->hdr.next_proto_id);
976 	ulp_rte_prsr_fld_mask(params, &idx, size,
977 			      ulp_deference_struct(ipv4_spec,
978 						   hdr.next_proto_id),
979 			      ulp_deference_struct(ipv4_mask,
980 						   hdr.next_proto_id),
981 			      ULP_PRSR_ACT_MATCH_IGNORE);
982 	if (ipv4_spec)
983 		proto = ipv4_spec->hdr.next_proto_id;
984 
985 	size = sizeof(((struct rte_flow_item_ipv4 *)NULL)->hdr.hdr_checksum);
986 	ulp_rte_prsr_fld_mask(params, &idx, size,
987 			      ulp_deference_struct(ipv4_spec, hdr.hdr_checksum),
988 			      ulp_deference_struct(ipv4_mask, hdr.hdr_checksum),
989 			      ULP_PRSR_ACT_DEFAULT);
990 
991 	size = sizeof(((struct rte_flow_item_ipv4 *)NULL)->hdr.src_addr);
992 	ulp_rte_prsr_fld_mask(params, &idx, size,
993 			      ulp_deference_struct(ipv4_spec, hdr.src_addr),
994 			      ulp_deference_struct(ipv4_mask, hdr.src_addr),
995 			      ULP_PRSR_ACT_DEFAULT);
996 
997 	dip_idx = idx;
998 	size = sizeof(((struct rte_flow_item_ipv4 *)NULL)->hdr.dst_addr);
999 	ulp_rte_prsr_fld_mask(params, &idx, size,
1000 			      ulp_deference_struct(ipv4_spec, hdr.dst_addr),
1001 			      ulp_deference_struct(ipv4_mask, hdr.dst_addr),
1002 			      ULP_PRSR_ACT_DEFAULT);
1003 
1004 	/* Set the ipv4 header bitmap and computed l3 header bitmaps */
1005 	if (ULP_BITMAP_ISSET(hdr_bitmap->bits, BNXT_ULP_HDR_BIT_O_IPV4) ||
1006 	    ULP_BITMAP_ISSET(hdr_bitmap->bits, BNXT_ULP_HDR_BIT_O_IPV6) ||
1007 	    ULP_COMP_FLD_IDX_RD(params, BNXT_ULP_CF_IDX_L3_TUN)) {
1008 		ULP_BITMAP_SET(hdr_bitmap->bits, BNXT_ULP_HDR_BIT_I_IPV4);
1009 		ULP_COMP_FLD_IDX_WR(params, BNXT_ULP_CF_IDX_I_L3, 1);
1010 		inner_flag = 1;
1011 	} else {
1012 		ULP_BITMAP_SET(hdr_bitmap->bits, BNXT_ULP_HDR_BIT_O_IPV4);
1013 		ULP_COMP_FLD_IDX_WR(params, BNXT_ULP_CF_IDX_O_L3, 1);
1014 		/* Update the tunnel offload dest ip offset */
1015 		ULP_COMP_FLD_IDX_WR(params, BNXT_ULP_CF_IDX_TUN_OFF_DIP_ID,
1016 				    dip_idx);
1017 	}
1018 
1019 	/* Some of the PMD applications may set the protocol field
1020 	 * in the IPv4 spec but don't set the mask. So, consider
1021 	 * the mask in the proto value calculation.
1022 	 */
1023 	if (ipv4_mask)
1024 		proto &= ipv4_mask->hdr.next_proto_id;
1025 
1026 	/* Update the field protocol hdr bitmap */
1027 	ulp_rte_l3_proto_type_update(params, proto, inner_flag);
1028 	ULP_COMP_FLD_IDX_WR(params, BNXT_ULP_CF_IDX_L3_HDR_CNT, ++cnt);
1029 	return BNXT_TF_RC_SUCCESS;
1030 }
1031 
1032 /* Function to handle the parsing of RTE Flow item IPV6 Header */
1033 int32_t
1034 ulp_rte_ipv6_hdr_handler(const struct rte_flow_item *item,
1035 			 struct ulp_rte_parser_params *params)
1036 {
1037 	const struct rte_flow_item_ipv6	*ipv6_spec = item->spec;
1038 	const struct rte_flow_item_ipv6	*ipv6_mask = item->mask;
1039 	struct ulp_rte_hdr_bitmap *hdr_bitmap = &params->hdr_bitmap;
1040 	uint32_t idx = 0, dip_idx = 0;
1041 	uint32_t size;
1042 	uint32_t ver_spec = 0, ver_mask = 0;
1043 	uint32_t tc_spec = 0, tc_mask = 0;
1044 	uint32_t lab_spec = 0, lab_mask = 0;
1045 	uint8_t proto = 0;
1046 	uint32_t inner_flag = 0;
1047 	uint32_t cnt;
1048 
1049 	/* validate there are no 3rd L3 header */
1050 	cnt = ULP_COMP_FLD_IDX_RD(params, BNXT_ULP_CF_IDX_L3_HDR_CNT);
1051 	if (cnt == 2) {
1052 		BNXT_TF_DBG(ERR, "Parse Err:Third L3 header not supported\n");
1053 		return BNXT_TF_RC_ERROR;
1054 	}
1055 
1056 	if (ulp_rte_prsr_fld_size_validate(params, &idx,
1057 					   BNXT_ULP_PROTO_HDR_IPV6_NUM)) {
1058 		BNXT_TF_DBG(ERR, "Error parsing protocol header\n");
1059 		return BNXT_TF_RC_ERROR;
1060 	}
1061 
1062 	/*
1063 	 * Copy the rte_flow_item for ipv6 into hdr_field using ipv6
1064 	 * header fields
1065 	 */
1066 	if (ipv6_spec) {
1067 		ver_spec = BNXT_ULP_GET_IPV6_VER(ipv6_spec->hdr.vtc_flow);
1068 		tc_spec = BNXT_ULP_GET_IPV6_TC(ipv6_spec->hdr.vtc_flow);
1069 		lab_spec = BNXT_ULP_GET_IPV6_FLOWLABEL(ipv6_spec->hdr.vtc_flow);
1070 		proto = ipv6_spec->hdr.proto;
1071 	}
1072 
1073 	if (ipv6_mask) {
1074 		ver_mask = BNXT_ULP_GET_IPV6_VER(ipv6_mask->hdr.vtc_flow);
1075 		tc_mask = BNXT_ULP_GET_IPV6_TC(ipv6_mask->hdr.vtc_flow);
1076 		lab_mask = BNXT_ULP_GET_IPV6_FLOWLABEL(ipv6_mask->hdr.vtc_flow);
1077 
1078 		/* Some of the PMD applications may set the protocol field
1079 		 * in the IPv6 spec but don't set the mask. So, consider
1080 		 * the mask in proto value calculation.
1081 		 */
1082 		proto &= ipv6_mask->hdr.proto;
1083 	}
1084 
1085 	size = sizeof(((struct rte_flow_item_ipv6 *)NULL)->hdr.vtc_flow);
1086 	ulp_rte_prsr_fld_mask(params, &idx, size, &ver_spec, &ver_mask,
1087 			      ULP_PRSR_ACT_DEFAULT);
1088 	/*
1089 	 * The TC and flow label field are ignored since OVS is
1090 	 * setting it for match and it is not supported.
1091 	 * This is a work around and
1092 	 * shall be addressed in the future.
1093 	 */
1094 	ulp_rte_prsr_fld_mask(params, &idx, size, &tc_spec, &tc_mask,
1095 			      ULP_PRSR_ACT_MASK_IGNORE);
1096 	ulp_rte_prsr_fld_mask(params, &idx, size, &lab_spec, &lab_mask,
1097 			      ULP_PRSR_ACT_MASK_IGNORE);
1098 
1099 	size = sizeof(((struct rte_flow_item_ipv6 *)NULL)->hdr.payload_len);
1100 	ulp_rte_prsr_fld_mask(params, &idx, size,
1101 			      ulp_deference_struct(ipv6_spec, hdr.payload_len),
1102 			      ulp_deference_struct(ipv6_mask, hdr.payload_len),
1103 			      ULP_PRSR_ACT_DEFAULT);
1104 
1105 	/* Ignore proto for template matching */
1106 	size = sizeof(((struct rte_flow_item_ipv6 *)NULL)->hdr.proto);
1107 	ulp_rte_prsr_fld_mask(params, &idx, size,
1108 			      ulp_deference_struct(ipv6_spec, hdr.proto),
1109 			      ulp_deference_struct(ipv6_mask, hdr.proto),
1110 			      ULP_PRSR_ACT_MATCH_IGNORE);
1111 
1112 	size = sizeof(((struct rte_flow_item_ipv6 *)NULL)->hdr.hop_limits);
1113 	ulp_rte_prsr_fld_mask(params, &idx, size,
1114 			      ulp_deference_struct(ipv6_spec, hdr.hop_limits),
1115 			      ulp_deference_struct(ipv6_mask, hdr.hop_limits),
1116 			      ULP_PRSR_ACT_DEFAULT);
1117 
1118 	size = sizeof(((struct rte_flow_item_ipv6 *)NULL)->hdr.src_addr);
1119 	ulp_rte_prsr_fld_mask(params, &idx, size,
1120 			      ulp_deference_struct(ipv6_spec, hdr.src_addr),
1121 			      ulp_deference_struct(ipv6_mask, hdr.src_addr),
1122 			      ULP_PRSR_ACT_DEFAULT);
1123 
1124 	dip_idx =  idx;
1125 	size = sizeof(((struct rte_flow_item_ipv6 *)NULL)->hdr.dst_addr);
1126 	ulp_rte_prsr_fld_mask(params, &idx, size,
1127 			      ulp_deference_struct(ipv6_spec, hdr.dst_addr),
1128 			      ulp_deference_struct(ipv6_mask, hdr.dst_addr),
1129 			      ULP_PRSR_ACT_DEFAULT);
1130 
1131 	/* Set the ipv6 header bitmap and computed l3 header bitmaps */
1132 	if (ULP_BITMAP_ISSET(hdr_bitmap->bits, BNXT_ULP_HDR_BIT_O_IPV4) ||
1133 	    ULP_BITMAP_ISSET(hdr_bitmap->bits, BNXT_ULP_HDR_BIT_O_IPV6) ||
1134 	    ULP_COMP_FLD_IDX_RD(params, BNXT_ULP_CF_IDX_L3_TUN)) {
1135 		ULP_BITMAP_SET(hdr_bitmap->bits, BNXT_ULP_HDR_BIT_I_IPV6);
1136 		ULP_COMP_FLD_IDX_WR(params, BNXT_ULP_CF_IDX_I_L3, 1);
1137 		inner_flag = 1;
1138 	} else {
1139 		ULP_BITMAP_SET(hdr_bitmap->bits, BNXT_ULP_HDR_BIT_O_IPV6);
1140 		ULP_COMP_FLD_IDX_WR(params, BNXT_ULP_CF_IDX_O_L3, 1);
1141 		/* Update the tunnel offload dest ip offset */
1142 		ULP_COMP_FLD_IDX_WR(params, BNXT_ULP_CF_IDX_TUN_OFF_DIP_ID,
1143 				    dip_idx);
1144 	}
1145 
1146 	/* Update the field protocol hdr bitmap */
1147 	ulp_rte_l3_proto_type_update(params, proto, inner_flag);
1148 	ULP_COMP_FLD_IDX_WR(params, BNXT_ULP_CF_IDX_L3_HDR_CNT, ++cnt);
1149 
1150 	return BNXT_TF_RC_SUCCESS;
1151 }
1152 
1153 /* Function to handle the update of proto header based on field values */
1154 static void
1155 ulp_rte_l4_proto_type_update(struct ulp_rte_parser_params *params,
1156 			     uint16_t src_port, uint16_t src_mask,
1157 			     uint16_t dst_port, uint16_t dst_mask,
1158 			     enum bnxt_ulp_hdr_bit hdr_bit)
1159 {
1160 	switch (hdr_bit) {
1161 	case BNXT_ULP_HDR_BIT_I_UDP:
1162 	case BNXT_ULP_HDR_BIT_I_TCP:
1163 		ULP_BITMAP_SET(params->hdr_bitmap.bits, hdr_bit);
1164 		ULP_COMP_FLD_IDX_WR(params, BNXT_ULP_CF_IDX_I_L4, 1);
1165 		ULP_COMP_FLD_IDX_WR(params, BNXT_ULP_CF_IDX_I_L4_SRC_PORT,
1166 				    (uint64_t)rte_be_to_cpu_16(src_port));
1167 		ULP_COMP_FLD_IDX_WR(params, BNXT_ULP_CF_IDX_I_L4_DST_PORT,
1168 				    (uint64_t)rte_be_to_cpu_16(dst_port));
1169 		ULP_COMP_FLD_IDX_WR(params, BNXT_ULP_CF_IDX_I_L4_SRC_PORT_MASK,
1170 				    (uint64_t)rte_be_to_cpu_16(src_mask));
1171 		ULP_COMP_FLD_IDX_WR(params, BNXT_ULP_CF_IDX_I_L4_DST_PORT_MASK,
1172 				    (uint64_t)rte_be_to_cpu_16(dst_mask));
1173 		ULP_COMP_FLD_IDX_WR(params, BNXT_ULP_CF_IDX_I_L3_FB_PROTO_ID,
1174 				    1);
1175 		ULP_COMP_FLD_IDX_WR(params, BNXT_ULP_CF_IDX_I_L4_FB_SRC_PORT,
1176 				    !!(src_port & src_mask));
1177 		ULP_COMP_FLD_IDX_WR(params, BNXT_ULP_CF_IDX_I_L4_FB_DST_PORT,
1178 				    !!(dst_port & dst_mask));
1179 		ULP_COMP_FLD_IDX_WR(params, BNXT_ULP_CF_IDX_I_L3_PROTO_ID,
1180 				    (hdr_bit == BNXT_ULP_HDR_BIT_I_UDP) ?
1181 				    IPPROTO_UDP : IPPROTO_TCP);
1182 		break;
1183 	case BNXT_ULP_HDR_BIT_O_UDP:
1184 	case BNXT_ULP_HDR_BIT_O_TCP:
1185 		ULP_BITMAP_SET(params->hdr_bitmap.bits, hdr_bit);
1186 		ULP_COMP_FLD_IDX_WR(params, BNXT_ULP_CF_IDX_O_L4, 1);
1187 		ULP_COMP_FLD_IDX_WR(params, BNXT_ULP_CF_IDX_O_L4_SRC_PORT,
1188 				    (uint64_t)rte_be_to_cpu_16(src_port));
1189 		ULP_COMP_FLD_IDX_WR(params, BNXT_ULP_CF_IDX_O_L4_DST_PORT,
1190 				    (uint64_t)rte_be_to_cpu_16(dst_port));
1191 		ULP_COMP_FLD_IDX_WR(params, BNXT_ULP_CF_IDX_O_L4_SRC_PORT_MASK,
1192 				    (uint64_t)rte_be_to_cpu_16(src_mask));
1193 		ULP_COMP_FLD_IDX_WR(params, BNXT_ULP_CF_IDX_O_L4_DST_PORT_MASK,
1194 				    (uint64_t)rte_be_to_cpu_16(dst_mask));
1195 		ULP_COMP_FLD_IDX_WR(params, BNXT_ULP_CF_IDX_O_L3_FB_PROTO_ID,
1196 				    1);
1197 		ULP_COMP_FLD_IDX_WR(params, BNXT_ULP_CF_IDX_O_L4_FB_SRC_PORT,
1198 				    !!(src_port & src_mask));
1199 		ULP_COMP_FLD_IDX_WR(params, BNXT_ULP_CF_IDX_O_L4_FB_DST_PORT,
1200 				    !!(dst_port & dst_mask));
1201 		ULP_COMP_FLD_IDX_WR(params, BNXT_ULP_CF_IDX_O_L3_PROTO_ID,
1202 				    (hdr_bit == BNXT_ULP_HDR_BIT_O_UDP) ?
1203 				    IPPROTO_UDP : IPPROTO_TCP);
1204 		break;
1205 	default:
1206 		break;
1207 	}
1208 
1209 	if (hdr_bit == BNXT_ULP_HDR_BIT_O_UDP && dst_port ==
1210 	    tfp_cpu_to_be_16(ULP_UDP_PORT_VXLAN)) {
1211 		ULP_BITMAP_SET(params->hdr_fp_bit.bits,
1212 			       BNXT_ULP_HDR_BIT_T_VXLAN);
1213 		ULP_COMP_FLD_IDX_WR(params, BNXT_ULP_CF_IDX_L3_TUN, 1);
1214 	}
1215 }
1216 
1217 /* Function to handle the parsing of RTE Flow item UDP Header. */
1218 int32_t
1219 ulp_rte_udp_hdr_handler(const struct rte_flow_item *item,
1220 			struct ulp_rte_parser_params *params)
1221 {
1222 	const struct rte_flow_item_udp *udp_spec = item->spec;
1223 	const struct rte_flow_item_udp *udp_mask = item->mask;
1224 	struct ulp_rte_hdr_bitmap *hdr_bitmap = &params->hdr_bitmap;
1225 	uint32_t idx = 0;
1226 	uint32_t size;
1227 	uint16_t dport = 0, sport = 0;
1228 	uint16_t dport_mask = 0, sport_mask = 0;
1229 	uint32_t cnt;
1230 	enum bnxt_ulp_hdr_bit out_l4 = BNXT_ULP_HDR_BIT_O_UDP;
1231 
1232 	cnt = ULP_COMP_FLD_IDX_RD(params, BNXT_ULP_CF_IDX_L4_HDR_CNT);
1233 	if (cnt == 2) {
1234 		BNXT_TF_DBG(ERR, "Parse Err:Third L4 header not supported\n");
1235 		return BNXT_TF_RC_ERROR;
1236 	}
1237 
1238 	if (udp_spec) {
1239 		sport = udp_spec->hdr.src_port;
1240 		dport = udp_spec->hdr.dst_port;
1241 	}
1242 	if (udp_mask) {
1243 		sport_mask = udp_mask->hdr.src_port;
1244 		dport_mask = udp_mask->hdr.dst_port;
1245 	}
1246 
1247 	if (ulp_rte_prsr_fld_size_validate(params, &idx,
1248 					   BNXT_ULP_PROTO_HDR_UDP_NUM)) {
1249 		BNXT_TF_DBG(ERR, "Error parsing protocol header\n");
1250 		return BNXT_TF_RC_ERROR;
1251 	}
1252 
1253 	/*
1254 	 * Copy the rte_flow_item for ipv4 into hdr_field using ipv4
1255 	 * header fields
1256 	 */
1257 	size = sizeof(((struct rte_flow_item_udp *)NULL)->hdr.src_port);
1258 	ulp_rte_prsr_fld_mask(params, &idx, size,
1259 			      ulp_deference_struct(udp_spec, hdr.src_port),
1260 			      ulp_deference_struct(udp_mask, hdr.src_port),
1261 			      ULP_PRSR_ACT_DEFAULT);
1262 
1263 	size = sizeof(((struct rte_flow_item_udp *)NULL)->hdr.dst_port);
1264 	ulp_rte_prsr_fld_mask(params, &idx, size,
1265 			      ulp_deference_struct(udp_spec, hdr.dst_port),
1266 			      ulp_deference_struct(udp_mask, hdr.dst_port),
1267 			      ULP_PRSR_ACT_DEFAULT);
1268 
1269 	size = sizeof(((struct rte_flow_item_udp *)NULL)->hdr.dgram_len);
1270 	ulp_rte_prsr_fld_mask(params, &idx, size,
1271 			      ulp_deference_struct(udp_spec, hdr.dgram_len),
1272 			      ulp_deference_struct(udp_mask, hdr.dgram_len),
1273 			      ULP_PRSR_ACT_DEFAULT);
1274 
1275 	size = sizeof(((struct rte_flow_item_udp *)NULL)->hdr.dgram_cksum);
1276 	ulp_rte_prsr_fld_mask(params, &idx, size,
1277 			      ulp_deference_struct(udp_spec, hdr.dgram_cksum),
1278 			      ulp_deference_struct(udp_mask, hdr.dgram_cksum),
1279 			      ULP_PRSR_ACT_DEFAULT);
1280 
1281 	/* Set the udp header bitmap and computed l4 header bitmaps */
1282 	if (ULP_BITMAP_ISSET(hdr_bitmap->bits, BNXT_ULP_HDR_BIT_O_UDP) ||
1283 	    ULP_BITMAP_ISSET(hdr_bitmap->bits, BNXT_ULP_HDR_BIT_O_TCP))
1284 		out_l4 = BNXT_ULP_HDR_BIT_I_UDP;
1285 
1286 	ulp_rte_l4_proto_type_update(params, sport, sport_mask, dport,
1287 				     dport_mask, out_l4);
1288 	ULP_COMP_FLD_IDX_WR(params, BNXT_ULP_CF_IDX_L4_HDR_CNT, ++cnt);
1289 	return BNXT_TF_RC_SUCCESS;
1290 }
1291 
1292 /* Function to handle the parsing of RTE Flow item TCP Header. */
1293 int32_t
1294 ulp_rte_tcp_hdr_handler(const struct rte_flow_item *item,
1295 			struct ulp_rte_parser_params *params)
1296 {
1297 	const struct rte_flow_item_tcp *tcp_spec = item->spec;
1298 	const struct rte_flow_item_tcp *tcp_mask = item->mask;
1299 	struct ulp_rte_hdr_bitmap *hdr_bitmap = &params->hdr_bitmap;
1300 	uint32_t idx = 0;
1301 	uint16_t dport = 0, sport = 0;
1302 	uint16_t dport_mask = 0, sport_mask = 0;
1303 	uint32_t size;
1304 	uint32_t cnt;
1305 	enum bnxt_ulp_hdr_bit out_l4 = BNXT_ULP_HDR_BIT_O_TCP;
1306 
1307 	cnt = ULP_COMP_FLD_IDX_RD(params, BNXT_ULP_CF_IDX_L4_HDR_CNT);
1308 	if (cnt == 2) {
1309 		BNXT_TF_DBG(ERR, "Parse Err:Third L4 header not supported\n");
1310 		return BNXT_TF_RC_ERROR;
1311 	}
1312 
1313 	if (tcp_spec) {
1314 		sport = tcp_spec->hdr.src_port;
1315 		dport = tcp_spec->hdr.dst_port;
1316 	}
1317 	if (tcp_mask) {
1318 		sport_mask = tcp_mask->hdr.src_port;
1319 		dport_mask = tcp_mask->hdr.dst_port;
1320 	}
1321 
1322 	if (ulp_rte_prsr_fld_size_validate(params, &idx,
1323 					   BNXT_ULP_PROTO_HDR_TCP_NUM)) {
1324 		BNXT_TF_DBG(ERR, "Error parsing protocol header\n");
1325 		return BNXT_TF_RC_ERROR;
1326 	}
1327 
1328 	/*
1329 	 * Copy the rte_flow_item for ipv4 into hdr_field using ipv4
1330 	 * header fields
1331 	 */
1332 	size = sizeof(((struct rte_flow_item_tcp *)NULL)->hdr.src_port);
1333 	ulp_rte_prsr_fld_mask(params, &idx, size,
1334 			      ulp_deference_struct(tcp_spec, hdr.src_port),
1335 			      ulp_deference_struct(tcp_mask, hdr.src_port),
1336 			      ULP_PRSR_ACT_DEFAULT);
1337 
1338 	size = sizeof(((struct rte_flow_item_tcp *)NULL)->hdr.dst_port);
1339 	ulp_rte_prsr_fld_mask(params, &idx, size,
1340 			      ulp_deference_struct(tcp_spec, hdr.dst_port),
1341 			      ulp_deference_struct(tcp_mask, hdr.dst_port),
1342 			      ULP_PRSR_ACT_DEFAULT);
1343 
1344 	size = sizeof(((struct rte_flow_item_tcp *)NULL)->hdr.sent_seq);
1345 	ulp_rte_prsr_fld_mask(params, &idx, size,
1346 			      ulp_deference_struct(tcp_spec, hdr.sent_seq),
1347 			      ulp_deference_struct(tcp_mask, hdr.sent_seq),
1348 			      ULP_PRSR_ACT_DEFAULT);
1349 
1350 	size = sizeof(((struct rte_flow_item_tcp *)NULL)->hdr.recv_ack);
1351 	ulp_rte_prsr_fld_mask(params, &idx, size,
1352 			      ulp_deference_struct(tcp_spec, hdr.recv_ack),
1353 			      ulp_deference_struct(tcp_mask, hdr.recv_ack),
1354 			      ULP_PRSR_ACT_DEFAULT);
1355 
1356 	size = sizeof(((struct rte_flow_item_tcp *)NULL)->hdr.data_off);
1357 	ulp_rte_prsr_fld_mask(params, &idx, size,
1358 			      ulp_deference_struct(tcp_spec, hdr.data_off),
1359 			      ulp_deference_struct(tcp_mask, hdr.data_off),
1360 			      ULP_PRSR_ACT_DEFAULT);
1361 
1362 	size = sizeof(((struct rte_flow_item_tcp *)NULL)->hdr.tcp_flags);
1363 	ulp_rte_prsr_fld_mask(params, &idx, size,
1364 			      ulp_deference_struct(tcp_spec, hdr.tcp_flags),
1365 			      ulp_deference_struct(tcp_mask, hdr.tcp_flags),
1366 			      ULP_PRSR_ACT_DEFAULT);
1367 
1368 	size = sizeof(((struct rte_flow_item_tcp *)NULL)->hdr.rx_win);
1369 	ulp_rte_prsr_fld_mask(params, &idx, size,
1370 			      ulp_deference_struct(tcp_spec, hdr.rx_win),
1371 			      ulp_deference_struct(tcp_mask, hdr.rx_win),
1372 			      ULP_PRSR_ACT_DEFAULT);
1373 
1374 	size = sizeof(((struct rte_flow_item_tcp *)NULL)->hdr.cksum);
1375 	ulp_rte_prsr_fld_mask(params, &idx, size,
1376 			      ulp_deference_struct(tcp_spec, hdr.cksum),
1377 			      ulp_deference_struct(tcp_mask, hdr.cksum),
1378 			      ULP_PRSR_ACT_DEFAULT);
1379 
1380 	size = sizeof(((struct rte_flow_item_tcp *)NULL)->hdr.tcp_urp);
1381 	ulp_rte_prsr_fld_mask(params, &idx, size,
1382 			      ulp_deference_struct(tcp_spec, hdr.tcp_urp),
1383 			      ulp_deference_struct(tcp_mask, hdr.tcp_urp),
1384 			      ULP_PRSR_ACT_DEFAULT);
1385 
1386 	/* Set the udp header bitmap and computed l4 header bitmaps */
1387 	if (ULP_BITMAP_ISSET(hdr_bitmap->bits, BNXT_ULP_HDR_BIT_O_UDP) ||
1388 	    ULP_BITMAP_ISSET(hdr_bitmap->bits, BNXT_ULP_HDR_BIT_O_TCP))
1389 		out_l4 = BNXT_ULP_HDR_BIT_I_TCP;
1390 
1391 	ulp_rte_l4_proto_type_update(params, sport, sport_mask, dport,
1392 				     dport_mask, out_l4);
1393 	ULP_COMP_FLD_IDX_WR(params, BNXT_ULP_CF_IDX_L4_HDR_CNT, ++cnt);
1394 	return BNXT_TF_RC_SUCCESS;
1395 }
1396 
1397 /* Function to handle the parsing of RTE Flow item Vxlan Header. */
1398 int32_t
1399 ulp_rte_vxlan_hdr_handler(const struct rte_flow_item *item,
1400 			  struct ulp_rte_parser_params *params)
1401 {
1402 	const struct rte_flow_item_vxlan *vxlan_spec = item->spec;
1403 	const struct rte_flow_item_vxlan *vxlan_mask = item->mask;
1404 	struct ulp_rte_hdr_bitmap *hdr_bitmap = &params->hdr_bitmap;
1405 	uint32_t idx = 0;
1406 	uint32_t size;
1407 
1408 	if (ulp_rte_prsr_fld_size_validate(params, &idx,
1409 					   BNXT_ULP_PROTO_HDR_VXLAN_NUM)) {
1410 		BNXT_TF_DBG(ERR, "Error parsing protocol header\n");
1411 		return BNXT_TF_RC_ERROR;
1412 	}
1413 
1414 	/*
1415 	 * Copy the rte_flow_item for vxlan into hdr_field using vxlan
1416 	 * header fields
1417 	 */
1418 	size = sizeof(((struct rte_flow_item_vxlan *)NULL)->hdr.flags);
1419 	ulp_rte_prsr_fld_mask(params, &idx, size,
1420 			      ulp_deference_struct(vxlan_spec, hdr.flags),
1421 			      ulp_deference_struct(vxlan_mask, hdr.flags),
1422 			      ULP_PRSR_ACT_DEFAULT);
1423 
1424 	size = sizeof(((struct rte_flow_item_vxlan *)NULL)->hdr.rsvd0);
1425 	ulp_rte_prsr_fld_mask(params, &idx, size,
1426 			      ulp_deference_struct(vxlan_spec, hdr.rsvd0),
1427 			      ulp_deference_struct(vxlan_mask, hdr.rsvd0),
1428 			      ULP_PRSR_ACT_DEFAULT);
1429 
1430 	size = sizeof(((struct rte_flow_item_vxlan *)NULL)->hdr.vni);
1431 	ulp_rte_prsr_fld_mask(params, &idx, size,
1432 			      ulp_deference_struct(vxlan_spec, hdr.vni),
1433 			      ulp_deference_struct(vxlan_mask, hdr.vni),
1434 			      ULP_PRSR_ACT_DEFAULT);
1435 
1436 	size = sizeof(((struct rte_flow_item_vxlan *)NULL)->hdr.rsvd1);
1437 	ulp_rte_prsr_fld_mask(params, &idx, size,
1438 			      ulp_deference_struct(vxlan_spec, hdr.rsvd1),
1439 			      ulp_deference_struct(vxlan_mask, hdr.rsvd1),
1440 			      ULP_PRSR_ACT_DEFAULT);
1441 
1442 	/* Update the hdr_bitmap with vxlan */
1443 	ULP_BITMAP_SET(hdr_bitmap->bits, BNXT_ULP_HDR_BIT_T_VXLAN);
1444 	ULP_COMP_FLD_IDX_WR(params, BNXT_ULP_CF_IDX_L3_TUN, 1);
1445 	return BNXT_TF_RC_SUCCESS;
1446 }
1447 
1448 /* Function to handle the parsing of RTE Flow item GRE Header. */
1449 int32_t
1450 ulp_rte_gre_hdr_handler(const struct rte_flow_item *item,
1451 			struct ulp_rte_parser_params *params)
1452 {
1453 	const struct rte_flow_item_gre *gre_spec = item->spec;
1454 	const struct rte_flow_item_gre *gre_mask = item->mask;
1455 	struct ulp_rte_hdr_bitmap *hdr_bitmap = &params->hdr_bitmap;
1456 	uint32_t idx = 0;
1457 	uint32_t size;
1458 
1459 	if (ulp_rte_prsr_fld_size_validate(params, &idx,
1460 					   BNXT_ULP_PROTO_HDR_GRE_NUM)) {
1461 		BNXT_TF_DBG(ERR, "Error parsing protocol header\n");
1462 		return BNXT_TF_RC_ERROR;
1463 	}
1464 
1465 	size = sizeof(((struct rte_flow_item_gre *)NULL)->c_rsvd0_ver);
1466 	ulp_rte_prsr_fld_mask(params, &idx, size,
1467 			      ulp_deference_struct(gre_spec, c_rsvd0_ver),
1468 			      ulp_deference_struct(gre_mask, c_rsvd0_ver),
1469 			      ULP_PRSR_ACT_DEFAULT);
1470 
1471 	size = sizeof(((struct rte_flow_item_gre *)NULL)->protocol);
1472 	ulp_rte_prsr_fld_mask(params, &idx, size,
1473 			      ulp_deference_struct(gre_spec, protocol),
1474 			      ulp_deference_struct(gre_mask, protocol),
1475 			      ULP_PRSR_ACT_DEFAULT);
1476 
1477 	/* Update the hdr_bitmap with GRE */
1478 	ULP_BITMAP_SET(hdr_bitmap->bits, BNXT_ULP_HDR_BIT_T_GRE);
1479 	ULP_COMP_FLD_IDX_WR(params, BNXT_ULP_CF_IDX_L3_TUN, 1);
1480 	return BNXT_TF_RC_SUCCESS;
1481 }
1482 
1483 /* Function to handle the parsing of RTE Flow item ANY. */
1484 int32_t
1485 ulp_rte_item_any_handler(const struct rte_flow_item *item __rte_unused,
1486 			 struct ulp_rte_parser_params *params __rte_unused)
1487 {
1488 	return BNXT_TF_RC_SUCCESS;
1489 }
1490 
1491 /* Function to handle the parsing of RTE Flow item ICMP Header. */
1492 int32_t
1493 ulp_rte_icmp_hdr_handler(const struct rte_flow_item *item,
1494 			 struct ulp_rte_parser_params *params)
1495 {
1496 	const struct rte_flow_item_icmp *icmp_spec = item->spec;
1497 	const struct rte_flow_item_icmp *icmp_mask = item->mask;
1498 	struct ulp_rte_hdr_bitmap *hdr_bitmap = &params->hdr_bitmap;
1499 	uint32_t idx = 0;
1500 	uint32_t size;
1501 
1502 	if (ulp_rte_prsr_fld_size_validate(params, &idx,
1503 					   BNXT_ULP_PROTO_HDR_ICMP_NUM)) {
1504 		BNXT_TF_DBG(ERR, "Error parsing protocol header\n");
1505 		return BNXT_TF_RC_ERROR;
1506 	}
1507 
1508 	size = sizeof(((struct rte_flow_item_icmp *)NULL)->hdr.icmp_type);
1509 	ulp_rte_prsr_fld_mask(params, &idx, size,
1510 			      ulp_deference_struct(icmp_spec, hdr.icmp_type),
1511 			      ulp_deference_struct(icmp_mask, hdr.icmp_type),
1512 			      ULP_PRSR_ACT_DEFAULT);
1513 
1514 	size = sizeof(((struct rte_flow_item_icmp *)NULL)->hdr.icmp_code);
1515 	ulp_rte_prsr_fld_mask(params, &idx, size,
1516 			      ulp_deference_struct(icmp_spec, hdr.icmp_code),
1517 			      ulp_deference_struct(icmp_mask, hdr.icmp_code),
1518 			      ULP_PRSR_ACT_DEFAULT);
1519 
1520 	size = sizeof(((struct rte_flow_item_icmp *)NULL)->hdr.icmp_cksum);
1521 	ulp_rte_prsr_fld_mask(params, &idx, size,
1522 			      ulp_deference_struct(icmp_spec, hdr.icmp_cksum),
1523 			      ulp_deference_struct(icmp_mask, hdr.icmp_cksum),
1524 			      ULP_PRSR_ACT_DEFAULT);
1525 
1526 	size = sizeof(((struct rte_flow_item_icmp *)NULL)->hdr.icmp_ident);
1527 	ulp_rte_prsr_fld_mask(params, &idx, size,
1528 			      ulp_deference_struct(icmp_spec, hdr.icmp_ident),
1529 			      ulp_deference_struct(icmp_mask, hdr.icmp_ident),
1530 			      ULP_PRSR_ACT_DEFAULT);
1531 
1532 	size = sizeof(((struct rte_flow_item_icmp *)NULL)->hdr.icmp_seq_nb);
1533 	ulp_rte_prsr_fld_mask(params, &idx, size,
1534 			      ulp_deference_struct(icmp_spec, hdr.icmp_seq_nb),
1535 			      ulp_deference_struct(icmp_mask, hdr.icmp_seq_nb),
1536 			      ULP_PRSR_ACT_DEFAULT);
1537 
1538 	/* Update the hdr_bitmap with ICMP */
1539 	if (ULP_COMP_FLD_IDX_RD(params, BNXT_ULP_CF_IDX_L3_TUN))
1540 		ULP_BITMAP_SET(hdr_bitmap->bits, BNXT_ULP_HDR_BIT_I_ICMP);
1541 	else
1542 		ULP_BITMAP_SET(hdr_bitmap->bits, BNXT_ULP_HDR_BIT_O_ICMP);
1543 	return BNXT_TF_RC_SUCCESS;
1544 }
1545 
1546 /* Function to handle the parsing of RTE Flow item ICMP6 Header. */
1547 int32_t
1548 ulp_rte_icmp6_hdr_handler(const struct rte_flow_item *item,
1549 			  struct ulp_rte_parser_params *params)
1550 {
1551 	const struct rte_flow_item_icmp6 *icmp_spec = item->spec;
1552 	const struct rte_flow_item_icmp6 *icmp_mask = item->mask;
1553 	struct ulp_rte_hdr_bitmap *hdr_bitmap = &params->hdr_bitmap;
1554 	uint32_t idx = 0;
1555 	uint32_t size;
1556 
1557 	if (ulp_rte_prsr_fld_size_validate(params, &idx,
1558 					   BNXT_ULP_PROTO_HDR_ICMP_NUM)) {
1559 		BNXT_TF_DBG(ERR, "Error parsing protocol header\n");
1560 		return BNXT_TF_RC_ERROR;
1561 	}
1562 
1563 	size = sizeof(((struct rte_flow_item_icmp6 *)NULL)->type);
1564 	ulp_rte_prsr_fld_mask(params, &idx, size,
1565 			      ulp_deference_struct(icmp_spec, type),
1566 			      ulp_deference_struct(icmp_mask, type),
1567 			      ULP_PRSR_ACT_DEFAULT);
1568 
1569 	size = sizeof(((struct rte_flow_item_icmp6 *)NULL)->code);
1570 	ulp_rte_prsr_fld_mask(params, &idx, size,
1571 			      ulp_deference_struct(icmp_spec, code),
1572 			      ulp_deference_struct(icmp_mask, code),
1573 			      ULP_PRSR_ACT_DEFAULT);
1574 
1575 	size = sizeof(((struct rte_flow_item_icmp6 *)NULL)->checksum);
1576 	ulp_rte_prsr_fld_mask(params, &idx, size,
1577 			      ulp_deference_struct(icmp_spec, checksum),
1578 			      ulp_deference_struct(icmp_mask, checksum),
1579 			      ULP_PRSR_ACT_DEFAULT);
1580 
1581 	if (ULP_BITMAP_ISSET(hdr_bitmap->bits, BNXT_ULP_HDR_BIT_O_IPV4)) {
1582 		BNXT_TF_DBG(ERR, "Error: incorrect icmp version\n");
1583 		return BNXT_TF_RC_ERROR;
1584 	}
1585 
1586 	/* Update the hdr_bitmap with ICMP */
1587 	if (ULP_COMP_FLD_IDX_RD(params, BNXT_ULP_CF_IDX_L3_TUN))
1588 		ULP_BITMAP_SET(hdr_bitmap->bits, BNXT_ULP_HDR_BIT_I_ICMP);
1589 	else
1590 		ULP_BITMAP_SET(hdr_bitmap->bits, BNXT_ULP_HDR_BIT_O_ICMP);
1591 	return BNXT_TF_RC_SUCCESS;
1592 }
1593 
1594 /* Function to handle the parsing of RTE Flow item void Header */
1595 int32_t
1596 ulp_rte_void_hdr_handler(const struct rte_flow_item *item __rte_unused,
1597 			 struct ulp_rte_parser_params *params __rte_unused)
1598 {
1599 	return BNXT_TF_RC_SUCCESS;
1600 }
1601 
1602 /* Function to handle the parsing of RTE Flow action void Header. */
1603 int32_t
1604 ulp_rte_void_act_handler(const struct rte_flow_action *action_item __rte_unused,
1605 			 struct ulp_rte_parser_params *params __rte_unused)
1606 {
1607 	return BNXT_TF_RC_SUCCESS;
1608 }
1609 
1610 /* Function to handle the parsing of RTE Flow action Mark Header. */
1611 int32_t
1612 ulp_rte_mark_act_handler(const struct rte_flow_action *action_item,
1613 			 struct ulp_rte_parser_params *param)
1614 {
1615 	const struct rte_flow_action_mark *mark;
1616 	struct ulp_rte_act_bitmap *act = &param->act_bitmap;
1617 	uint32_t mark_id;
1618 
1619 	mark = action_item->conf;
1620 	if (mark) {
1621 		mark_id = tfp_cpu_to_be_32(mark->id);
1622 		memcpy(&param->act_prop.act_details[BNXT_ULP_ACT_PROP_IDX_MARK],
1623 		       &mark_id, BNXT_ULP_ACT_PROP_SZ_MARK);
1624 
1625 		/* Update the hdr_bitmap with vxlan */
1626 		ULP_BITMAP_SET(act->bits, BNXT_ULP_ACT_BIT_MARK);
1627 		return BNXT_TF_RC_SUCCESS;
1628 	}
1629 	BNXT_TF_DBG(ERR, "Parse Error: Mark arg is invalid\n");
1630 	return BNXT_TF_RC_ERROR;
1631 }
1632 
1633 /* Function to handle the parsing of RTE Flow action RSS Header. */
1634 int32_t
1635 ulp_rte_rss_act_handler(const struct rte_flow_action *action_item,
1636 			struct ulp_rte_parser_params *param)
1637 {
1638 	const struct rte_flow_action_rss *rss;
1639 	struct ulp_rte_act_prop *ap = &param->act_prop;
1640 
1641 	if (action_item == NULL || action_item->conf == NULL) {
1642 		BNXT_TF_DBG(ERR, "Parse Err: invalid rss configuration\n");
1643 		return BNXT_TF_RC_ERROR;
1644 	}
1645 
1646 	rss = action_item->conf;
1647 	/* Copy the rss into the specific action properties */
1648 	memcpy(&ap->act_details[BNXT_ULP_ACT_PROP_IDX_RSS_TYPES], &rss->types,
1649 	       BNXT_ULP_ACT_PROP_SZ_RSS_TYPES);
1650 	memcpy(&ap->act_details[BNXT_ULP_ACT_PROP_IDX_RSS_LEVEL], &rss->level,
1651 	       BNXT_ULP_ACT_PROP_SZ_RSS_LEVEL);
1652 	memcpy(&ap->act_details[BNXT_ULP_ACT_PROP_IDX_RSS_KEY_LEN],
1653 	       &rss->key_len, BNXT_ULP_ACT_PROP_SZ_RSS_KEY_LEN);
1654 
1655 	if (rss->key_len > BNXT_ULP_ACT_PROP_SZ_RSS_KEY) {
1656 		BNXT_TF_DBG(ERR, "Parse Err: RSS key too big\n");
1657 		return BNXT_TF_RC_ERROR;
1658 	}
1659 	memcpy(&ap->act_details[BNXT_ULP_ACT_PROP_IDX_RSS_KEY], rss->key,
1660 	       rss->key_len);
1661 
1662 	/* set the RSS action header bit */
1663 	ULP_BITMAP_SET(param->act_bitmap.bits, BNXT_ULP_ACT_BIT_RSS);
1664 
1665 	return BNXT_TF_RC_SUCCESS;
1666 }
1667 
1668 /* Function to handle the parsing of RTE Flow item eth Header. */
1669 static void
1670 ulp_rte_enc_eth_hdr_handler(struct ulp_rte_parser_params *params,
1671 			    const struct rte_flow_item_eth *eth_spec)
1672 {
1673 	struct ulp_rte_hdr_field *field;
1674 	uint32_t size;
1675 
1676 	field = &params->enc_field[BNXT_ULP_ENC_FIELD_ETH_DMAC];
1677 	size = sizeof(eth_spec->hdr.dst_addr.addr_bytes);
1678 	field = ulp_rte_parser_fld_copy(field, eth_spec->hdr.dst_addr.addr_bytes, size);
1679 
1680 	size = sizeof(eth_spec->hdr.src_addr.addr_bytes);
1681 	field = ulp_rte_parser_fld_copy(field, eth_spec->hdr.src_addr.addr_bytes, size);
1682 
1683 	size = sizeof(eth_spec->hdr.ether_type);
1684 	field = ulp_rte_parser_fld_copy(field, &eth_spec->hdr.ether_type, size);
1685 
1686 	ULP_BITMAP_SET(params->enc_hdr_bitmap.bits, BNXT_ULP_HDR_BIT_O_ETH);
1687 }
1688 
1689 /* Function to handle the parsing of RTE Flow item vlan Header. */
1690 static void
1691 ulp_rte_enc_vlan_hdr_handler(struct ulp_rte_parser_params *params,
1692 			     const struct rte_flow_item_vlan *vlan_spec,
1693 			     uint32_t inner)
1694 {
1695 	struct ulp_rte_hdr_field *field;
1696 	uint32_t size;
1697 
1698 	if (!inner) {
1699 		field = &params->enc_field[BNXT_ULP_ENC_FIELD_O_VLAN_TCI];
1700 		ULP_BITMAP_SET(params->enc_hdr_bitmap.bits,
1701 			       BNXT_ULP_HDR_BIT_OO_VLAN);
1702 	} else {
1703 		field = &params->enc_field[BNXT_ULP_ENC_FIELD_I_VLAN_TCI];
1704 		ULP_BITMAP_SET(params->enc_hdr_bitmap.bits,
1705 			       BNXT_ULP_HDR_BIT_OI_VLAN);
1706 	}
1707 
1708 	size = sizeof(vlan_spec->hdr.vlan_tci);
1709 	field = ulp_rte_parser_fld_copy(field, &vlan_spec->hdr.vlan_tci, size);
1710 
1711 	size = sizeof(vlan_spec->hdr.eth_proto);
1712 	field = ulp_rte_parser_fld_copy(field, &vlan_spec->hdr.eth_proto, size);
1713 }
1714 
1715 /* Function to handle the parsing of RTE Flow item ipv4 Header. */
1716 static void
1717 ulp_rte_enc_ipv4_hdr_handler(struct ulp_rte_parser_params *params,
1718 			     const struct rte_flow_item_ipv4 *ip)
1719 {
1720 	struct ulp_rte_hdr_field *field;
1721 	uint32_t size;
1722 	uint8_t val8;
1723 
1724 	field = &params->enc_field[BNXT_ULP_ENC_FIELD_IPV4_IHL];
1725 	size = sizeof(ip->hdr.version_ihl);
1726 	if (!ip->hdr.version_ihl)
1727 		val8 = RTE_IPV4_VHL_DEF;
1728 	else
1729 		val8 = ip->hdr.version_ihl;
1730 	field = ulp_rte_parser_fld_copy(field, &val8, size);
1731 
1732 	size = sizeof(ip->hdr.type_of_service);
1733 	field = ulp_rte_parser_fld_copy(field, &ip->hdr.type_of_service, size);
1734 
1735 	size = sizeof(ip->hdr.packet_id);
1736 	field = ulp_rte_parser_fld_copy(field, &ip->hdr.packet_id, size);
1737 
1738 	size = sizeof(ip->hdr.fragment_offset);
1739 	field = ulp_rte_parser_fld_copy(field, &ip->hdr.fragment_offset, size);
1740 
1741 	size = sizeof(ip->hdr.time_to_live);
1742 	if (!ip->hdr.time_to_live)
1743 		val8 = BNXT_ULP_DEFAULT_TTL;
1744 	else
1745 		val8 = ip->hdr.time_to_live;
1746 	field = ulp_rte_parser_fld_copy(field, &val8, size);
1747 
1748 	size = sizeof(ip->hdr.next_proto_id);
1749 	field = ulp_rte_parser_fld_copy(field, &ip->hdr.next_proto_id, size);
1750 
1751 	size = sizeof(ip->hdr.src_addr);
1752 	field = ulp_rte_parser_fld_copy(field, &ip->hdr.src_addr, size);
1753 
1754 	size = sizeof(ip->hdr.dst_addr);
1755 	field = ulp_rte_parser_fld_copy(field, &ip->hdr.dst_addr, size);
1756 
1757 	ULP_BITMAP_SET(params->enc_hdr_bitmap.bits, BNXT_ULP_HDR_BIT_O_IPV4);
1758 }
1759 
1760 /* Function to handle the parsing of RTE Flow item ipv6 Header. */
1761 static void
1762 ulp_rte_enc_ipv6_hdr_handler(struct ulp_rte_parser_params *params,
1763 			     const struct rte_flow_item_ipv6 *ip)
1764 {
1765 	struct ulp_rte_hdr_field *field;
1766 	uint32_t size;
1767 	uint32_t val32;
1768 	uint8_t val8;
1769 
1770 	field = &params->enc_field[BNXT_ULP_ENC_FIELD_IPV6_VTC_FLOW];
1771 	size = sizeof(ip->hdr.vtc_flow);
1772 	if (!ip->hdr.vtc_flow)
1773 		val32 = rte_cpu_to_be_32(BNXT_ULP_IPV6_DFLT_VER);
1774 	else
1775 		val32 = ip->hdr.vtc_flow;
1776 	field = ulp_rte_parser_fld_copy(field, &val32, size);
1777 
1778 	size = sizeof(ip->hdr.proto);
1779 	field = ulp_rte_parser_fld_copy(field, &ip->hdr.proto, size);
1780 
1781 	size = sizeof(ip->hdr.hop_limits);
1782 	if (!ip->hdr.hop_limits)
1783 		val8 = BNXT_ULP_DEFAULT_TTL;
1784 	else
1785 		val8 = ip->hdr.hop_limits;
1786 	field = ulp_rte_parser_fld_copy(field, &val8, size);
1787 
1788 	size = sizeof(ip->hdr.src_addr);
1789 	field = ulp_rte_parser_fld_copy(field, &ip->hdr.src_addr, size);
1790 
1791 	size = sizeof(ip->hdr.dst_addr);
1792 	field = ulp_rte_parser_fld_copy(field, &ip->hdr.dst_addr, size);
1793 
1794 	ULP_BITMAP_SET(params->enc_hdr_bitmap.bits, BNXT_ULP_HDR_BIT_O_IPV6);
1795 }
1796 
1797 /* Function to handle the parsing of RTE Flow item UDP Header. */
1798 static void
1799 ulp_rte_enc_udp_hdr_handler(struct ulp_rte_parser_params *params,
1800 			    const struct rte_flow_item_udp *udp_spec)
1801 {
1802 	struct ulp_rte_hdr_field *field;
1803 	uint32_t size;
1804 	uint8_t type = IPPROTO_UDP;
1805 
1806 	field = &params->enc_field[BNXT_ULP_ENC_FIELD_UDP_SPORT];
1807 	size = sizeof(udp_spec->hdr.src_port);
1808 	field = ulp_rte_parser_fld_copy(field, &udp_spec->hdr.src_port, size);
1809 
1810 	size = sizeof(udp_spec->hdr.dst_port);
1811 	field = ulp_rte_parser_fld_copy(field, &udp_spec->hdr.dst_port, size);
1812 
1813 	ULP_BITMAP_SET(params->enc_hdr_bitmap.bits, BNXT_ULP_HDR_BIT_O_UDP);
1814 
1815 	/* Update thhe ip header protocol */
1816 	field = &params->enc_field[BNXT_ULP_ENC_FIELD_IPV4_PROTO];
1817 	ulp_rte_parser_fld_copy(field, &type, sizeof(type));
1818 	field = &params->enc_field[BNXT_ULP_ENC_FIELD_IPV6_PROTO];
1819 	ulp_rte_parser_fld_copy(field, &type, sizeof(type));
1820 }
1821 
1822 /* Function to handle the parsing of RTE Flow item vxlan Header. */
1823 static void
1824 ulp_rte_enc_vxlan_hdr_handler(struct ulp_rte_parser_params *params,
1825 			      struct rte_flow_item_vxlan *vxlan_spec)
1826 {
1827 	struct ulp_rte_hdr_field *field;
1828 	uint32_t size;
1829 
1830 	field = &params->enc_field[BNXT_ULP_ENC_FIELD_VXLAN_FLAGS];
1831 	size = sizeof(vxlan_spec->hdr.flags);
1832 	field = ulp_rte_parser_fld_copy(field, &vxlan_spec->hdr.flags, size);
1833 
1834 	size = sizeof(vxlan_spec->hdr.rsvd0);
1835 	field = ulp_rte_parser_fld_copy(field, &vxlan_spec->hdr.rsvd0, size);
1836 
1837 	size = sizeof(vxlan_spec->hdr.vni);
1838 	field = ulp_rte_parser_fld_copy(field, &vxlan_spec->hdr.vni, size);
1839 
1840 	size = sizeof(vxlan_spec->hdr.rsvd1);
1841 	field = ulp_rte_parser_fld_copy(field, &vxlan_spec->hdr.rsvd1, size);
1842 
1843 	ULP_BITMAP_SET(params->enc_hdr_bitmap.bits, BNXT_ULP_HDR_BIT_T_VXLAN);
1844 }
1845 
1846 /* Function to handle the parsing of RTE Flow action vxlan_encap Header. */
1847 int32_t
1848 ulp_rte_vxlan_encap_act_handler(const struct rte_flow_action *action_item,
1849 				struct ulp_rte_parser_params *params)
1850 {
1851 	const struct rte_flow_action_vxlan_encap *vxlan_encap;
1852 	const struct rte_flow_item *item;
1853 	const struct rte_flow_item_ipv4 *ipv4_spec;
1854 	const struct rte_flow_item_ipv6 *ipv6_spec;
1855 	struct rte_flow_item_vxlan vxlan_spec;
1856 	uint32_t vlan_num = 0, vlan_size = 0;
1857 	uint32_t ip_size = 0, ip_type = 0;
1858 	uint32_t vxlan_size = 0;
1859 	struct ulp_rte_act_bitmap *act = &params->act_bitmap;
1860 	struct ulp_rte_act_prop *ap = &params->act_prop;
1861 
1862 	vxlan_encap = action_item->conf;
1863 	if (!vxlan_encap) {
1864 		BNXT_TF_DBG(ERR, "Parse Error: Vxlan_encap arg is invalid\n");
1865 		return BNXT_TF_RC_ERROR;
1866 	}
1867 
1868 	item = vxlan_encap->definition;
1869 	if (!item) {
1870 		BNXT_TF_DBG(ERR, "Parse Error: definition arg is invalid\n");
1871 		return BNXT_TF_RC_ERROR;
1872 	}
1873 
1874 	if (!ulp_rte_item_skip_void(&item, 0))
1875 		return BNXT_TF_RC_ERROR;
1876 
1877 	/* must have ethernet header */
1878 	if (item->type != RTE_FLOW_ITEM_TYPE_ETH) {
1879 		BNXT_TF_DBG(ERR, "Parse Error:vxlan encap does not have eth\n");
1880 		return BNXT_TF_RC_ERROR;
1881 	}
1882 
1883 	/* Parse the ethernet header */
1884 	if (item->spec)
1885 		ulp_rte_enc_eth_hdr_handler(params, item->spec);
1886 
1887 	/* Goto the next item */
1888 	if (!ulp_rte_item_skip_void(&item, 1))
1889 		return BNXT_TF_RC_ERROR;
1890 
1891 	/* May have vlan header */
1892 	if (item->type == RTE_FLOW_ITEM_TYPE_VLAN) {
1893 		vlan_num++;
1894 		if (item->spec)
1895 			ulp_rte_enc_vlan_hdr_handler(params, item->spec, 0);
1896 
1897 		if (!ulp_rte_item_skip_void(&item, 1))
1898 			return BNXT_TF_RC_ERROR;
1899 	}
1900 
1901 	/* may have two vlan headers */
1902 	if (item->type == RTE_FLOW_ITEM_TYPE_VLAN) {
1903 		vlan_num++;
1904 		if (item->spec)
1905 			ulp_rte_enc_vlan_hdr_handler(params, item->spec, 1);
1906 
1907 		if (!ulp_rte_item_skip_void(&item, 1))
1908 			return BNXT_TF_RC_ERROR;
1909 	}
1910 
1911 	/* Update the vlan count and size of more than one */
1912 	if (vlan_num) {
1913 		vlan_size = vlan_num * sizeof(struct rte_flow_item_vlan);
1914 		vlan_num = tfp_cpu_to_be_32(vlan_num);
1915 		memcpy(&ap->act_details[BNXT_ULP_ACT_PROP_IDX_ENCAP_VTAG_NUM],
1916 		       &vlan_num,
1917 		       sizeof(uint32_t));
1918 		vlan_size = tfp_cpu_to_be_32(vlan_size);
1919 		memcpy(&ap->act_details[BNXT_ULP_ACT_PROP_IDX_ENCAP_VTAG_SZ],
1920 		       &vlan_size,
1921 		       sizeof(uint32_t));
1922 	}
1923 
1924 	/* L3 must be IPv4, IPv6 */
1925 	if (item->type == RTE_FLOW_ITEM_TYPE_IPV4) {
1926 		ipv4_spec = item->spec;
1927 		ip_size = BNXT_ULP_ENCAP_IPV4_SIZE;
1928 
1929 		/* Update the ip size details */
1930 		ip_size = tfp_cpu_to_be_32(ip_size);
1931 		memcpy(&ap->act_details[BNXT_ULP_ACT_PROP_IDX_ENCAP_IP_SZ],
1932 		       &ip_size, sizeof(uint32_t));
1933 
1934 		/* update the ip type */
1935 		ip_type = rte_cpu_to_be_32(BNXT_ULP_ETH_IPV4);
1936 		memcpy(&ap->act_details[BNXT_ULP_ACT_PROP_IDX_ENCAP_L3_TYPE],
1937 		       &ip_type, sizeof(uint32_t));
1938 
1939 		/* update the computed field to notify it is ipv4 header */
1940 		ULP_COMP_FLD_IDX_WR(params, BNXT_ULP_CF_IDX_ACT_ENCAP_IPV4_FLAG,
1941 				    1);
1942 		if (ipv4_spec)
1943 			ulp_rte_enc_ipv4_hdr_handler(params, ipv4_spec);
1944 
1945 		if (!ulp_rte_item_skip_void(&item, 1))
1946 			return BNXT_TF_RC_ERROR;
1947 	} else if (item->type == RTE_FLOW_ITEM_TYPE_IPV6) {
1948 		ipv6_spec = item->spec;
1949 		ip_size = BNXT_ULP_ENCAP_IPV6_SIZE;
1950 
1951 		/* Update the ip size details */
1952 		ip_size = tfp_cpu_to_be_32(ip_size);
1953 		memcpy(&ap->act_details[BNXT_ULP_ACT_PROP_IDX_ENCAP_IP_SZ],
1954 		       &ip_size, sizeof(uint32_t));
1955 
1956 		 /* update the ip type */
1957 		ip_type = rte_cpu_to_be_32(BNXT_ULP_ETH_IPV6);
1958 		memcpy(&ap->act_details[BNXT_ULP_ACT_PROP_IDX_ENCAP_L3_TYPE],
1959 		       &ip_type, sizeof(uint32_t));
1960 
1961 		/* update the computed field to notify it is ipv6 header */
1962 		ULP_COMP_FLD_IDX_WR(params, BNXT_ULP_CF_IDX_ACT_ENCAP_IPV6_FLAG,
1963 				    1);
1964 		if (ipv6_spec)
1965 			ulp_rte_enc_ipv6_hdr_handler(params, ipv6_spec);
1966 
1967 		if (!ulp_rte_item_skip_void(&item, 1))
1968 			return BNXT_TF_RC_ERROR;
1969 	} else {
1970 		BNXT_TF_DBG(ERR, "Parse Error: Vxlan Encap expects L3 hdr\n");
1971 		return BNXT_TF_RC_ERROR;
1972 	}
1973 
1974 	/* L4 is UDP */
1975 	if (item->type != RTE_FLOW_ITEM_TYPE_UDP) {
1976 		BNXT_TF_DBG(ERR, "vxlan encap does not have udp\n");
1977 		return BNXT_TF_RC_ERROR;
1978 	}
1979 	if (item->spec)
1980 		ulp_rte_enc_udp_hdr_handler(params, item->spec);
1981 
1982 	if (!ulp_rte_item_skip_void(&item, 1))
1983 		return BNXT_TF_RC_ERROR;
1984 
1985 	/* Finally VXLAN */
1986 	if (item->type != RTE_FLOW_ITEM_TYPE_VXLAN) {
1987 		BNXT_TF_DBG(ERR, "vxlan encap does not have vni\n");
1988 		return BNXT_TF_RC_ERROR;
1989 	}
1990 	vxlan_size = sizeof(struct rte_flow_item_vxlan);
1991 	/* copy the vxlan details */
1992 	memcpy(&vxlan_spec, item->spec, vxlan_size);
1993 	vxlan_spec.hdr.flags = 0x08;
1994 	vxlan_size = tfp_cpu_to_be_32(vxlan_size);
1995 	memcpy(&ap->act_details[BNXT_ULP_ACT_PROP_IDX_ENCAP_TUN_SZ],
1996 	       &vxlan_size, sizeof(uint32_t));
1997 
1998 	ulp_rte_enc_vxlan_hdr_handler(params, &vxlan_spec);
1999 
2000 	/* update the hdr_bitmap with vxlan */
2001 	ULP_BITMAP_SET(act->bits, BNXT_ULP_ACT_BIT_VXLAN_ENCAP);
2002 	return BNXT_TF_RC_SUCCESS;
2003 }
2004 
2005 /* Function to handle the parsing of RTE Flow action vxlan_encap Header */
2006 int32_t
2007 ulp_rte_vxlan_decap_act_handler(const struct rte_flow_action *action_item
2008 				__rte_unused,
2009 				struct ulp_rte_parser_params *params)
2010 {
2011 	/* update the hdr_bitmap with vxlan */
2012 	ULP_BITMAP_SET(params->act_bitmap.bits,
2013 		       BNXT_ULP_ACT_BIT_VXLAN_DECAP);
2014 	/* Update computational field with tunnel decap info */
2015 	ULP_COMP_FLD_IDX_WR(params, BNXT_ULP_CF_IDX_L3_TUN_DECAP, 1);
2016 	return BNXT_TF_RC_SUCCESS;
2017 }
2018 
2019 /* Function to handle the parsing of RTE Flow action drop Header. */
2020 int32_t
2021 ulp_rte_drop_act_handler(const struct rte_flow_action *action_item __rte_unused,
2022 			 struct ulp_rte_parser_params *params)
2023 {
2024 	/* Update the hdr_bitmap with drop */
2025 	ULP_BITMAP_SET(params->act_bitmap.bits, BNXT_ULP_ACT_BIT_DROP);
2026 	return BNXT_TF_RC_SUCCESS;
2027 }
2028 
2029 /* Function to handle the parsing of RTE Flow action count. */
2030 int32_t
2031 ulp_rte_count_act_handler(const struct rte_flow_action *action_item,
2032 			  struct ulp_rte_parser_params *params)
2033 {
2034 	const struct rte_flow_action_count *act_count;
2035 	struct ulp_rte_act_prop *act_prop = &params->act_prop;
2036 
2037 	act_count = action_item->conf;
2038 	if (act_count) {
2039 		memcpy(&act_prop->act_details[BNXT_ULP_ACT_PROP_IDX_COUNT],
2040 		       &act_count->id,
2041 		       BNXT_ULP_ACT_PROP_SZ_COUNT);
2042 	}
2043 
2044 	/* Update the hdr_bitmap with count */
2045 	ULP_BITMAP_SET(params->act_bitmap.bits, BNXT_ULP_ACT_BIT_COUNT);
2046 	return BNXT_TF_RC_SUCCESS;
2047 }
2048 
2049 /* Function to handle the parsing of action ports. */
2050 static int32_t
2051 ulp_rte_parser_act_port_set(struct ulp_rte_parser_params *param,
2052 			    uint32_t ifindex,
2053 			    enum bnxt_ulp_direction_type act_dir)
2054 {
2055 	enum bnxt_ulp_direction_type dir;
2056 	uint16_t pid_s;
2057 	uint32_t pid;
2058 	struct ulp_rte_act_prop *act = &param->act_prop;
2059 	enum bnxt_ulp_intf_type port_type;
2060 	uint32_t vnic_type;
2061 
2062 	/* Get the direction */
2063 	/* If action implicitly specifies direction, use the specification. */
2064 	dir = (act_dir == BNXT_ULP_DIR_INVALID) ?
2065 		ULP_COMP_FLD_IDX_RD(param, BNXT_ULP_CF_IDX_DIRECTION) :
2066 		act_dir;
2067 	port_type = ULP_COMP_FLD_IDX_RD(param, BNXT_ULP_CF_IDX_ACT_PORT_TYPE);
2068 	if (dir == BNXT_ULP_DIR_EGRESS &&
2069 	    port_type != BNXT_ULP_INTF_TYPE_VF_REP) {
2070 		/* For egress direction, fill vport */
2071 		if (ulp_port_db_vport_get(param->ulp_ctx, ifindex, &pid_s))
2072 			return BNXT_TF_RC_ERROR;
2073 
2074 		pid = pid_s;
2075 		pid = rte_cpu_to_be_32(pid);
2076 		memcpy(&act->act_details[BNXT_ULP_ACT_PROP_IDX_VPORT],
2077 		       &pid, BNXT_ULP_ACT_PROP_SZ_VPORT);
2078 	} else {
2079 		/* For ingress direction, fill vnic */
2080 		/*
2081 		 * Action		Destination
2082 		 * ------------------------------------
2083 		 * PORT_REPRESENTOR	Driver Function
2084 		 * ------------------------------------
2085 		 * REPRESENTED_PORT	VF
2086 		 * ------------------------------------
2087 		 * PORT_ID		VF
2088 		 */
2089 		if (act_dir != BNXT_ULP_DIR_INGRESS &&
2090 		    port_type == BNXT_ULP_INTF_TYPE_VF_REP)
2091 			vnic_type = BNXT_ULP_VF_FUNC_VNIC;
2092 		else
2093 			vnic_type = BNXT_ULP_DRV_FUNC_VNIC;
2094 
2095 		if (ulp_port_db_default_vnic_get(param->ulp_ctx, ifindex,
2096 						 vnic_type, &pid_s))
2097 			return BNXT_TF_RC_ERROR;
2098 
2099 		pid = pid_s;
2100 		pid = rte_cpu_to_be_32(pid);
2101 		memcpy(&act->act_details[BNXT_ULP_ACT_PROP_IDX_VNIC],
2102 		       &pid, BNXT_ULP_ACT_PROP_SZ_VNIC);
2103 	}
2104 
2105 	/* Update the action port set bit */
2106 	ULP_COMP_FLD_IDX_WR(param, BNXT_ULP_CF_IDX_ACT_PORT_IS_SET, 1);
2107 	return BNXT_TF_RC_SUCCESS;
2108 }
2109 
2110 /* Function to handle the parsing of RTE Flow action PF. */
2111 int32_t
2112 ulp_rte_pf_act_handler(const struct rte_flow_action *action_item __rte_unused,
2113 		       struct ulp_rte_parser_params *params)
2114 {
2115 	uint32_t port_id;
2116 	uint32_t ifindex;
2117 	enum bnxt_ulp_intf_type intf_type;
2118 
2119 	/* Get the port id of the current device */
2120 	port_id = ULP_COMP_FLD_IDX_RD(params, BNXT_ULP_CF_IDX_INCOMING_IF);
2121 
2122 	/* Get the port db ifindex */
2123 	if (ulp_port_db_dev_port_to_ulp_index(params->ulp_ctx, port_id,
2124 					      &ifindex)) {
2125 		BNXT_TF_DBG(ERR, "Invalid port id\n");
2126 		return BNXT_TF_RC_ERROR;
2127 	}
2128 
2129 	/* Check the port is PF port */
2130 	intf_type = ulp_port_db_port_type_get(params->ulp_ctx, ifindex);
2131 	if (intf_type != BNXT_ULP_INTF_TYPE_PF) {
2132 		BNXT_TF_DBG(ERR, "Port is not a PF port\n");
2133 		return BNXT_TF_RC_ERROR;
2134 	}
2135 	/* Update the action properties */
2136 	ULP_COMP_FLD_IDX_WR(params, BNXT_ULP_CF_IDX_ACT_PORT_TYPE, intf_type);
2137 	return ulp_rte_parser_act_port_set(params, ifindex,
2138 					   BNXT_ULP_DIR_INVALID);
2139 }
2140 
2141 /* Function to handle the parsing of RTE Flow action VF. */
2142 int32_t
2143 ulp_rte_vf_act_handler(const struct rte_flow_action *action_item,
2144 		       struct ulp_rte_parser_params *params)
2145 {
2146 	const struct rte_flow_action_vf *vf_action;
2147 	enum bnxt_ulp_intf_type intf_type;
2148 	uint32_t ifindex;
2149 	struct bnxt *bp;
2150 
2151 	vf_action = action_item->conf;
2152 	if (!vf_action) {
2153 		BNXT_TF_DBG(ERR, "ParseErr: Invalid Argument\n");
2154 		return BNXT_TF_RC_PARSE_ERR;
2155 	}
2156 
2157 	if (vf_action->original) {
2158 		BNXT_TF_DBG(ERR, "ParseErr:VF Original not supported\n");
2159 		return BNXT_TF_RC_PARSE_ERR;
2160 	}
2161 
2162 	bp = bnxt_pmd_get_bp(params->port_id);
2163 	if (bp == NULL) {
2164 		BNXT_TF_DBG(ERR, "Invalid bp\n");
2165 		return BNXT_TF_RC_ERROR;
2166 	}
2167 
2168 	/* vf_action->id is a logical number which in this case is an
2169 	 * offset from the first VF. So, to get the absolute VF id, the
2170 	 * offset must be added to the absolute first vf id of that port.
2171 	 */
2172 	if (ulp_port_db_dev_func_id_to_ulp_index(params->ulp_ctx,
2173 						 bp->first_vf_id +
2174 						 vf_action->id,
2175 						 &ifindex)) {
2176 		BNXT_TF_DBG(ERR, "VF is not valid interface\n");
2177 		return BNXT_TF_RC_ERROR;
2178 	}
2179 	/* Check the port is VF port */
2180 	intf_type = ulp_port_db_port_type_get(params->ulp_ctx, ifindex);
2181 	if (intf_type != BNXT_ULP_INTF_TYPE_VF &&
2182 	    intf_type != BNXT_ULP_INTF_TYPE_TRUSTED_VF) {
2183 		BNXT_TF_DBG(ERR, "Port is not a VF port\n");
2184 		return BNXT_TF_RC_ERROR;
2185 	}
2186 
2187 	/* Update the action properties */
2188 	ULP_COMP_FLD_IDX_WR(params, BNXT_ULP_CF_IDX_ACT_PORT_TYPE, intf_type);
2189 	return ulp_rte_parser_act_port_set(params, ifindex,
2190 					   BNXT_ULP_DIR_INVALID);
2191 }
2192 
2193 /* Parse actions PORT_ID, PORT_REPRESENTOR and REPRESENTED_PORT. */
2194 int32_t
2195 ulp_rte_port_act_handler(const struct rte_flow_action *act_item,
2196 			 struct ulp_rte_parser_params *param)
2197 {
2198 	uint32_t ethdev_id;
2199 	uint32_t ifindex;
2200 	enum bnxt_ulp_intf_type intf_type;
2201 	enum bnxt_ulp_direction_type act_dir;
2202 
2203 	if (!act_item->conf) {
2204 		BNXT_TF_DBG(ERR,
2205 			    "ParseErr: Invalid Argument\n");
2206 		return BNXT_TF_RC_PARSE_ERR;
2207 	}
2208 	switch (act_item->type) {
2209 	case RTE_FLOW_ACTION_TYPE_PORT_ID: {
2210 		const struct rte_flow_action_port_id *port_id = act_item->conf;
2211 
2212 		if (port_id->original) {
2213 			BNXT_TF_DBG(ERR,
2214 				    "ParseErr:Portid Original not supported\n");
2215 			return BNXT_TF_RC_PARSE_ERR;
2216 		}
2217 		ethdev_id = port_id->id;
2218 		act_dir = BNXT_ULP_DIR_INVALID;
2219 		break;
2220 	}
2221 	case RTE_FLOW_ACTION_TYPE_PORT_REPRESENTOR: {
2222 		const struct rte_flow_action_ethdev *ethdev = act_item->conf;
2223 
2224 		ethdev_id = ethdev->port_id;
2225 		act_dir = BNXT_ULP_DIR_INGRESS;
2226 		break;
2227 	}
2228 	case RTE_FLOW_ACTION_TYPE_REPRESENTED_PORT: {
2229 		const struct rte_flow_action_ethdev *ethdev = act_item->conf;
2230 
2231 		ethdev_id = ethdev->port_id;
2232 		act_dir = BNXT_ULP_DIR_EGRESS;
2233 		break;
2234 	}
2235 	default:
2236 		BNXT_TF_DBG(ERR, "Unknown port action\n");
2237 		return BNXT_TF_RC_ERROR;
2238 	}
2239 
2240 	/* Get the port db ifindex */
2241 	if (ulp_port_db_dev_port_to_ulp_index(param->ulp_ctx, ethdev_id,
2242 					      &ifindex)) {
2243 		BNXT_TF_DBG(ERR, "Invalid port id\n");
2244 		return BNXT_TF_RC_ERROR;
2245 	}
2246 
2247 	/* Get the intf type */
2248 	intf_type = ulp_port_db_port_type_get(param->ulp_ctx, ifindex);
2249 	if (!intf_type) {
2250 		BNXT_TF_DBG(ERR, "Invalid port type\n");
2251 		return BNXT_TF_RC_ERROR;
2252 	}
2253 
2254 	/* Set the action port */
2255 	ULP_COMP_FLD_IDX_WR(param, BNXT_ULP_CF_IDX_ACT_PORT_TYPE, intf_type);
2256 	return ulp_rte_parser_act_port_set(param, ifindex, act_dir);
2257 }
2258 
2259 /* Function to handle the parsing of RTE Flow action pop vlan. */
2260 int32_t
2261 ulp_rte_of_pop_vlan_act_handler(const struct rte_flow_action *a __rte_unused,
2262 				struct ulp_rte_parser_params *params)
2263 {
2264 	/* Update the act_bitmap with pop */
2265 	ULP_BITMAP_SET(params->act_bitmap.bits, BNXT_ULP_ACT_BIT_POP_VLAN);
2266 	return BNXT_TF_RC_SUCCESS;
2267 }
2268 
2269 /* Function to handle the parsing of RTE Flow action push vlan. */
2270 int32_t
2271 ulp_rte_of_push_vlan_act_handler(const struct rte_flow_action *action_item,
2272 				 struct ulp_rte_parser_params *params)
2273 {
2274 	const struct rte_flow_action_of_push_vlan *push_vlan;
2275 	uint16_t ethertype;
2276 	struct ulp_rte_act_prop *act = &params->act_prop;
2277 
2278 	push_vlan = action_item->conf;
2279 	if (push_vlan) {
2280 		ethertype = push_vlan->ethertype;
2281 		if (tfp_cpu_to_be_16(ethertype) != RTE_ETHER_TYPE_VLAN) {
2282 			BNXT_TF_DBG(ERR,
2283 				    "Parse Err: Ethertype not supported\n");
2284 			return BNXT_TF_RC_PARSE_ERR;
2285 		}
2286 		memcpy(&act->act_details[BNXT_ULP_ACT_PROP_IDX_PUSH_VLAN],
2287 		       &ethertype, BNXT_ULP_ACT_PROP_SZ_PUSH_VLAN);
2288 		/* Update the hdr_bitmap with push vlan */
2289 		ULP_BITMAP_SET(params->act_bitmap.bits,
2290 			       BNXT_ULP_ACT_BIT_PUSH_VLAN);
2291 		return BNXT_TF_RC_SUCCESS;
2292 	}
2293 	BNXT_TF_DBG(ERR, "Parse Error: Push vlan arg is invalid\n");
2294 	return BNXT_TF_RC_ERROR;
2295 }
2296 
2297 /* Function to handle the parsing of RTE Flow action set vlan id. */
2298 int32_t
2299 ulp_rte_of_set_vlan_vid_act_handler(const struct rte_flow_action *action_item,
2300 				    struct ulp_rte_parser_params *params)
2301 {
2302 	const struct rte_flow_action_of_set_vlan_vid *vlan_vid;
2303 	uint32_t vid;
2304 	struct ulp_rte_act_prop *act = &params->act_prop;
2305 
2306 	vlan_vid = action_item->conf;
2307 	if (vlan_vid && vlan_vid->vlan_vid) {
2308 		vid = vlan_vid->vlan_vid;
2309 		memcpy(&act->act_details[BNXT_ULP_ACT_PROP_IDX_SET_VLAN_VID],
2310 		       &vid, BNXT_ULP_ACT_PROP_SZ_SET_VLAN_VID);
2311 		/* Update the hdr_bitmap with vlan vid */
2312 		ULP_BITMAP_SET(params->act_bitmap.bits,
2313 			       BNXT_ULP_ACT_BIT_SET_VLAN_VID);
2314 		return BNXT_TF_RC_SUCCESS;
2315 	}
2316 	BNXT_TF_DBG(ERR, "Parse Error: Vlan vid arg is invalid\n");
2317 	return BNXT_TF_RC_ERROR;
2318 }
2319 
2320 /* Function to handle the parsing of RTE Flow action set vlan pcp. */
2321 int32_t
2322 ulp_rte_of_set_vlan_pcp_act_handler(const struct rte_flow_action *action_item,
2323 				    struct ulp_rte_parser_params *params)
2324 {
2325 	const struct rte_flow_action_of_set_vlan_pcp *vlan_pcp;
2326 	uint8_t pcp;
2327 	struct ulp_rte_act_prop *act = &params->act_prop;
2328 
2329 	vlan_pcp = action_item->conf;
2330 	if (vlan_pcp) {
2331 		pcp = vlan_pcp->vlan_pcp;
2332 		memcpy(&act->act_details[BNXT_ULP_ACT_PROP_IDX_SET_VLAN_PCP],
2333 		       &pcp, BNXT_ULP_ACT_PROP_SZ_SET_VLAN_PCP);
2334 		/* Update the hdr_bitmap with vlan vid */
2335 		ULP_BITMAP_SET(params->act_bitmap.bits,
2336 			       BNXT_ULP_ACT_BIT_SET_VLAN_PCP);
2337 		return BNXT_TF_RC_SUCCESS;
2338 	}
2339 	BNXT_TF_DBG(ERR, "Parse Error: Vlan pcp arg is invalid\n");
2340 	return BNXT_TF_RC_ERROR;
2341 }
2342 
2343 /* Function to handle the parsing of RTE Flow action set ipv4 src.*/
2344 int32_t
2345 ulp_rte_set_ipv4_src_act_handler(const struct rte_flow_action *action_item,
2346 				 struct ulp_rte_parser_params *params)
2347 {
2348 	const struct rte_flow_action_set_ipv4 *set_ipv4;
2349 	struct ulp_rte_act_prop *act = &params->act_prop;
2350 
2351 	set_ipv4 = action_item->conf;
2352 	if (set_ipv4) {
2353 		memcpy(&act->act_details[BNXT_ULP_ACT_PROP_IDX_SET_IPV4_SRC],
2354 		       &set_ipv4->ipv4_addr, BNXT_ULP_ACT_PROP_SZ_SET_IPV4_SRC);
2355 		/* Update the hdr_bitmap with set ipv4 src */
2356 		ULP_BITMAP_SET(params->act_bitmap.bits,
2357 			       BNXT_ULP_ACT_BIT_SET_IPV4_SRC);
2358 		return BNXT_TF_RC_SUCCESS;
2359 	}
2360 	BNXT_TF_DBG(ERR, "Parse Error: set ipv4 src arg is invalid\n");
2361 	return BNXT_TF_RC_ERROR;
2362 }
2363 
2364 /* Function to handle the parsing of RTE Flow action set ipv4 dst.*/
2365 int32_t
2366 ulp_rte_set_ipv4_dst_act_handler(const struct rte_flow_action *action_item,
2367 				 struct ulp_rte_parser_params *params)
2368 {
2369 	const struct rte_flow_action_set_ipv4 *set_ipv4;
2370 	struct ulp_rte_act_prop *act = &params->act_prop;
2371 
2372 	set_ipv4 = action_item->conf;
2373 	if (set_ipv4) {
2374 		memcpy(&act->act_details[BNXT_ULP_ACT_PROP_IDX_SET_IPV4_DST],
2375 		       &set_ipv4->ipv4_addr, BNXT_ULP_ACT_PROP_SZ_SET_IPV4_DST);
2376 		/* Update the hdr_bitmap with set ipv4 dst */
2377 		ULP_BITMAP_SET(params->act_bitmap.bits,
2378 			       BNXT_ULP_ACT_BIT_SET_IPV4_DST);
2379 		return BNXT_TF_RC_SUCCESS;
2380 	}
2381 	BNXT_TF_DBG(ERR, "Parse Error: set ipv4 dst arg is invalid\n");
2382 	return BNXT_TF_RC_ERROR;
2383 }
2384 
2385 /* Function to handle the parsing of RTE Flow action set tp src.*/
2386 int32_t
2387 ulp_rte_set_tp_src_act_handler(const struct rte_flow_action *action_item,
2388 			       struct ulp_rte_parser_params *params)
2389 {
2390 	const struct rte_flow_action_set_tp *set_tp;
2391 	struct ulp_rte_act_prop *act = &params->act_prop;
2392 
2393 	set_tp = action_item->conf;
2394 	if (set_tp) {
2395 		memcpy(&act->act_details[BNXT_ULP_ACT_PROP_IDX_SET_TP_SRC],
2396 		       &set_tp->port, BNXT_ULP_ACT_PROP_SZ_SET_TP_SRC);
2397 		/* Update the hdr_bitmap with set tp src */
2398 		ULP_BITMAP_SET(params->act_bitmap.bits,
2399 			       BNXT_ULP_ACT_BIT_SET_TP_SRC);
2400 		return BNXT_TF_RC_SUCCESS;
2401 	}
2402 
2403 	BNXT_TF_DBG(ERR, "Parse Error: set tp src arg is invalid\n");
2404 	return BNXT_TF_RC_ERROR;
2405 }
2406 
2407 /* Function to handle the parsing of RTE Flow action set tp dst.*/
2408 int32_t
2409 ulp_rte_set_tp_dst_act_handler(const struct rte_flow_action *action_item,
2410 			       struct ulp_rte_parser_params *params)
2411 {
2412 	const struct rte_flow_action_set_tp *set_tp;
2413 	struct ulp_rte_act_prop *act = &params->act_prop;
2414 
2415 	set_tp = action_item->conf;
2416 	if (set_tp) {
2417 		memcpy(&act->act_details[BNXT_ULP_ACT_PROP_IDX_SET_TP_DST],
2418 		       &set_tp->port, BNXT_ULP_ACT_PROP_SZ_SET_TP_DST);
2419 		/* Update the hdr_bitmap with set tp dst */
2420 		ULP_BITMAP_SET(params->act_bitmap.bits,
2421 			       BNXT_ULP_ACT_BIT_SET_TP_DST);
2422 		return BNXT_TF_RC_SUCCESS;
2423 	}
2424 
2425 	BNXT_TF_DBG(ERR, "Parse Error: set tp src arg is invalid\n");
2426 	return BNXT_TF_RC_ERROR;
2427 }
2428 
2429 /* Function to handle the parsing of RTE Flow action dec ttl.*/
2430 int32_t
2431 ulp_rte_dec_ttl_act_handler(const struct rte_flow_action *act __rte_unused,
2432 			    struct ulp_rte_parser_params *params)
2433 {
2434 	/* Update the act_bitmap with dec ttl */
2435 	ULP_BITMAP_SET(params->act_bitmap.bits, BNXT_ULP_ACT_BIT_DEC_TTL);
2436 	return BNXT_TF_RC_SUCCESS;
2437 }
2438 
2439 /* Function to handle the parsing of RTE Flow action JUMP */
2440 int32_t
2441 ulp_rte_jump_act_handler(const struct rte_flow_action *action_item __rte_unused,
2442 			 struct ulp_rte_parser_params *params)
2443 {
2444 	/* Update the act_bitmap with dec ttl */
2445 	ULP_BITMAP_SET(params->act_bitmap.bits, BNXT_ULP_ACT_BIT_JUMP);
2446 	return BNXT_TF_RC_SUCCESS;
2447 }
2448 
2449 int32_t
2450 ulp_rte_sample_act_handler(const struct rte_flow_action *action_item,
2451 			   struct ulp_rte_parser_params *params)
2452 {
2453 	const struct rte_flow_action_sample *sample;
2454 	int ret;
2455 
2456 	sample = action_item->conf;
2457 
2458 	/* if SAMPLE bit is set it means this sample action is nested within the
2459 	 * actions of another sample action; this is not allowed
2460 	 */
2461 	if (ULP_BITMAP_ISSET(params->act_bitmap.bits,
2462 			     BNXT_ULP_ACT_BIT_SAMPLE))
2463 		return BNXT_TF_RC_ERROR;
2464 
2465 	/* a sample action is only allowed as a shared action */
2466 	if (!ULP_BITMAP_ISSET(params->act_bitmap.bits,
2467 			      BNXT_ULP_ACT_BIT_SHARED))
2468 		return BNXT_TF_RC_ERROR;
2469 
2470 	/* only a ratio of 1 i.e. 100% is supported */
2471 	if (sample->ratio != 1)
2472 		return BNXT_TF_RC_ERROR;
2473 
2474 	if (!sample->actions)
2475 		return BNXT_TF_RC_ERROR;
2476 
2477 	/* parse the nested actions for a sample action */
2478 	ret = bnxt_ulp_rte_parser_act_parse(sample->actions, params);
2479 	if (ret == BNXT_TF_RC_SUCCESS)
2480 		/* Update the act_bitmap with sample */
2481 		ULP_BITMAP_SET(params->act_bitmap.bits,
2482 			       BNXT_ULP_ACT_BIT_SAMPLE);
2483 
2484 	return ret;
2485 }
2486 
2487 /* Function to handle the parsing of bnxt vendor Flow action vxlan Header. */
2488 int32_t
2489 ulp_vendor_vxlan_decap_act_handler(const struct rte_flow_action *action_item,
2490 				   struct ulp_rte_parser_params *params)
2491 {
2492 	/* Set the F1 flow header bit */
2493 	ULP_BITMAP_SET(params->hdr_bitmap.bits, BNXT_ULP_HDR_BIT_F1);
2494 	return ulp_rte_vxlan_decap_act_handler(action_item, params);
2495 }
2496 
2497 /* Function to handle the parsing of bnxt vendor Flow item vxlan Header. */
2498 int32_t
2499 ulp_rte_vendor_vxlan_decap_hdr_handler(const struct rte_flow_item *item,
2500 				       struct ulp_rte_parser_params *params)
2501 {
2502 	RTE_SET_USED(item);
2503 	/* Set the F2 flow header bit */
2504 	ULP_BITMAP_SET(params->hdr_bitmap.bits, BNXT_ULP_HDR_BIT_F2);
2505 	return ulp_rte_vxlan_decap_act_handler(NULL, params);
2506 }
2507