xref: /dpdk/drivers/net/bnxt/tf_ulp/ulp_rte_parser.c (revision 7917b0d38e92e8b9ec5a870415b791420e10f11a)
1 /* SPDX-License-Identifier: BSD-3-Clause
2  * Copyright(c) 2014-2023 Broadcom
3  * All rights reserved.
4  */
5 
6 #include "bnxt.h"
7 #include "ulp_template_db_enum.h"
8 #include "ulp_template_struct.h"
9 #include "bnxt_ulp.h"
10 #include "bnxt_tf_common.h"
11 #include "bnxt_tf_pmd_shim.h"
12 #include "ulp_rte_parser.h"
13 #include "ulp_matcher.h"
14 #include "ulp_utils.h"
15 #include "tfp.h"
16 #include "ulp_port_db.h"
17 #include "ulp_flow_db.h"
18 #include "ulp_mapper.h"
19 #include "ulp_tun.h"
20 #include "ulp_template_db_tbl.h"
21 
22 /* Local defines for the parsing functions */
23 #define ULP_VLAN_PRIORITY_SHIFT		13 /* First 3 bits */
24 #define ULP_VLAN_PRIORITY_MASK		0x700
25 #define ULP_VLAN_TAG_MASK		0xFFF /* Last 12 bits*/
26 #define ULP_UDP_PORT_VXLAN		4789
27 #define ULP_UDP_PORT_VXLAN_MASK	 0XFFFF
28 
29 /* Utility function to skip the void items. */
30 static inline int32_t
31 ulp_rte_item_skip_void(const struct rte_flow_item **item, uint32_t increment)
32 {
33 	if (!*item)
34 		return 0;
35 	if (increment)
36 		(*item)++;
37 	while ((*item) && (*item)->type == RTE_FLOW_ITEM_TYPE_VOID)
38 		(*item)++;
39 	if (*item)
40 		return 1;
41 	return 0;
42 }
43 
44 /* Utility function to copy field spec items */
45 static struct ulp_rte_hdr_field *
46 ulp_rte_parser_fld_copy(struct ulp_rte_hdr_field *field,
47 			const void *buffer,
48 			uint32_t size)
49 {
50 	field->size = size;
51 	memcpy(field->spec, buffer, field->size);
52 	field++;
53 	return field;
54 }
55 
56 /* Utility function to update the field_bitmap */
57 static void
58 ulp_rte_parser_field_bitmap_update(struct ulp_rte_parser_params *params,
59 				   uint32_t idx,
60 				   enum bnxt_ulp_prsr_action prsr_act)
61 {
62 	struct ulp_rte_hdr_field *field;
63 
64 	field = &params->hdr_field[idx];
65 	if (ulp_bitmap_notzero(field->mask, field->size)) {
66 		ULP_INDEX_BITMAP_SET(params->fld_bitmap.bits, idx);
67 		if (!(prsr_act & ULP_PRSR_ACT_MATCH_IGNORE))
68 			ULP_INDEX_BITMAP_SET(params->fld_s_bitmap.bits, idx);
69 		/* Not exact match */
70 		if (!ulp_bitmap_is_ones(field->mask, field->size))
71 			ULP_COMP_FLD_IDX_WR(params,
72 					    BNXT_ULP_CF_IDX_WC_MATCH, 1);
73 	} else {
74 		ULP_INDEX_BITMAP_RESET(params->fld_bitmap.bits, idx);
75 	}
76 }
77 
78 #define ulp_deference_struct(x, y) ((x) ? &((x)->y) : NULL)
79 /* Utility function to copy field spec and masks items */
80 static void
81 ulp_rte_prsr_fld_mask(struct ulp_rte_parser_params *params,
82 		      uint32_t *idx,
83 		      uint32_t size,
84 		      const void *spec_buff,
85 		      const void *mask_buff,
86 		      enum bnxt_ulp_prsr_action prsr_act)
87 {
88 	struct ulp_rte_hdr_field *field = &params->hdr_field[*idx];
89 
90 	/* update the field size */
91 	field->size = size;
92 
93 	/* copy the mask specifications only if mask is not null */
94 	if (!(prsr_act & ULP_PRSR_ACT_MASK_IGNORE) && mask_buff) {
95 		memcpy(field->mask, mask_buff, size);
96 		ulp_rte_parser_field_bitmap_update(params, *idx, prsr_act);
97 	}
98 
99 	/* copy the protocol specifications only if mask is not null*/
100 	if (spec_buff && mask_buff && ulp_bitmap_notzero(mask_buff, size))
101 		memcpy(field->spec, spec_buff, size);
102 
103 	/* Increment the index */
104 	*idx = *idx + 1;
105 }
106 
107 /* Utility function to copy field spec and masks items */
108 static int32_t
109 ulp_rte_prsr_fld_size_validate(struct ulp_rte_parser_params *params,
110 			       uint32_t *idx,
111 			       uint32_t size)
112 {
113 	if (params->field_idx + size >= BNXT_ULP_PROTO_HDR_MAX) {
114 		BNXT_TF_DBG(ERR, "OOB for field processing %u\n", *idx);
115 		return -EINVAL;
116 	}
117 	*idx = params->field_idx;
118 	params->field_idx += size;
119 	return 0;
120 }
121 
122 /*
123  * Function to handle the parsing of RTE Flows and placing
124  * the RTE flow items into the ulp structures.
125  */
126 int32_t
127 bnxt_ulp_rte_parser_hdr_parse(const struct rte_flow_item pattern[],
128 			      struct ulp_rte_parser_params *params)
129 {
130 	const struct rte_flow_item *item = pattern;
131 	struct bnxt_ulp_rte_hdr_info *hdr_info;
132 
133 	params->field_idx = BNXT_ULP_PROTO_HDR_SVIF_NUM;
134 
135 	/* Parse all the items in the pattern */
136 	while (item && item->type != RTE_FLOW_ITEM_TYPE_END) {
137 		if (item->type >= (typeof(item->type))
138 		    BNXT_RTE_FLOW_ITEM_TYPE_END) {
139 			if (item->type >=
140 			    (typeof(item->type))BNXT_RTE_FLOW_ITEM_TYPE_LAST)
141 				goto hdr_parser_error;
142 			/* get the header information */
143 			hdr_info = &ulp_vendor_hdr_info[item->type -
144 				BNXT_RTE_FLOW_ITEM_TYPE_END];
145 		} else {
146 			if (item->type > RTE_FLOW_ITEM_TYPE_ECPRI)
147 				goto hdr_parser_error;
148 			hdr_info = &ulp_hdr_info[item->type];
149 		}
150 		if (hdr_info->hdr_type == BNXT_ULP_HDR_TYPE_NOT_SUPPORTED) {
151 			goto hdr_parser_error;
152 		} else if (hdr_info->hdr_type == BNXT_ULP_HDR_TYPE_SUPPORTED) {
153 			/* call the registered callback handler */
154 			if (hdr_info->proto_hdr_func) {
155 				if (hdr_info->proto_hdr_func(item, params) !=
156 				    BNXT_TF_RC_SUCCESS) {
157 					return BNXT_TF_RC_ERROR;
158 				}
159 			}
160 		}
161 		item++;
162 	}
163 	/* update the implied SVIF */
164 	return ulp_rte_parser_implicit_match_port_process(params);
165 
166 hdr_parser_error:
167 	BNXT_TF_DBG(ERR, "Truflow parser does not support type %d\n",
168 		    item->type);
169 	return BNXT_TF_RC_PARSE_ERR;
170 }
171 
172 /*
173  * Function to handle the parsing of RTE Flows and placing
174  * the RTE flow actions into the ulp structures.
175  */
176 int32_t
177 bnxt_ulp_rte_parser_act_parse(const struct rte_flow_action actions[],
178 			      struct ulp_rte_parser_params *params)
179 {
180 	const struct rte_flow_action *action_item = actions;
181 	struct bnxt_ulp_rte_act_info *hdr_info;
182 
183 	/* Parse all the items in the pattern */
184 	while (action_item && action_item->type != RTE_FLOW_ACTION_TYPE_END) {
185 		if (action_item->type >=
186 		    (typeof(action_item->type))BNXT_RTE_FLOW_ACTION_TYPE_END) {
187 			if (action_item->type >=
188 			    (typeof(action_item->type))BNXT_RTE_FLOW_ACTION_TYPE_LAST)
189 				goto act_parser_error;
190 			/* get the header information from bnxt actinfo table */
191 			hdr_info = &ulp_vendor_act_info[action_item->type -
192 				BNXT_RTE_FLOW_ACTION_TYPE_END];
193 		} else {
194 			if (action_item->type > RTE_FLOW_ACTION_TYPE_INDIRECT)
195 				goto act_parser_error;
196 			/* get the header information from the act info table */
197 			hdr_info = &ulp_act_info[action_item->type];
198 		}
199 		if (hdr_info->act_type == BNXT_ULP_ACT_TYPE_NOT_SUPPORTED) {
200 			goto act_parser_error;
201 		} else if (hdr_info->act_type == BNXT_ULP_ACT_TYPE_SUPPORTED) {
202 			/* call the registered callback handler */
203 			if (hdr_info->proto_act_func) {
204 				if (hdr_info->proto_act_func(action_item,
205 							     params) !=
206 				    BNXT_TF_RC_SUCCESS) {
207 					return BNXT_TF_RC_ERROR;
208 				}
209 			}
210 		}
211 		action_item++;
212 	}
213 	/* update the implied port details */
214 	ulp_rte_parser_implicit_act_port_process(params);
215 	return BNXT_TF_RC_SUCCESS;
216 
217 act_parser_error:
218 	BNXT_TF_DBG(ERR, "Truflow parser does not support act %u\n",
219 		    action_item->type);
220 	return BNXT_TF_RC_ERROR;
221 }
222 
223 /*
224  * Function to handle the post processing of the computed
225  * fields for the interface.
226  */
227 static void
228 bnxt_ulp_comp_fld_intf_update(struct ulp_rte_parser_params *params)
229 {
230 	uint32_t ifindex;
231 	uint16_t port_id, parif, svif;
232 	uint32_t mtype;
233 	enum bnxt_ulp_direction_type dir;
234 
235 	/* get the direction details */
236 	dir = ULP_COMP_FLD_IDX_RD(params, BNXT_ULP_CF_IDX_DIRECTION);
237 
238 	/* read the port id details */
239 	port_id = ULP_COMP_FLD_IDX_RD(params, BNXT_ULP_CF_IDX_INCOMING_IF);
240 	if (ulp_port_db_dev_port_to_ulp_index(params->ulp_ctx,
241 					      port_id,
242 					      &ifindex)) {
243 		BNXT_TF_DBG(ERR, "ParseErr:Portid is not valid\n");
244 		return;
245 	}
246 
247 	if (dir == BNXT_ULP_DIR_INGRESS) {
248 		/* Set port PARIF */
249 		if (ulp_port_db_parif_get(params->ulp_ctx, ifindex,
250 					  BNXT_ULP_PHY_PORT_PARIF, &parif)) {
251 			BNXT_TF_DBG(ERR, "ParseErr:ifindex is not valid\n");
252 			return;
253 		}
254 		ULP_COMP_FLD_IDX_WR(params, BNXT_ULP_CF_IDX_PHY_PORT_PARIF,
255 				    parif);
256 		/* Set port SVIF */
257 		if (ulp_port_db_svif_get(params->ulp_ctx, ifindex,
258 					  BNXT_ULP_PHY_PORT_SVIF, &svif)) {
259 			BNXT_TF_DBG(ERR, "ParseErr:ifindex is not valid\n");
260 			return;
261 		}
262 		ULP_COMP_FLD_IDX_WR(params, BNXT_ULP_CF_IDX_PHY_PORT_SVIF,
263 				    svif);
264 	} else {
265 		/* Get the match port type */
266 		mtype = ULP_COMP_FLD_IDX_RD(params,
267 					    BNXT_ULP_CF_IDX_MATCH_PORT_TYPE);
268 		if (mtype == BNXT_ULP_INTF_TYPE_VF_REP) {
269 			ULP_COMP_FLD_IDX_WR(params,
270 					    BNXT_ULP_CF_IDX_MATCH_PORT_IS_VFREP,
271 					    1);
272 			/* Set VF func PARIF */
273 			if (ulp_port_db_parif_get(params->ulp_ctx, ifindex,
274 						  BNXT_ULP_VF_FUNC_PARIF,
275 						  &parif)) {
276 				BNXT_TF_DBG(ERR,
277 					    "ParseErr:ifindex is not valid\n");
278 				return;
279 			}
280 			ULP_COMP_FLD_IDX_WR(params,
281 					    BNXT_ULP_CF_IDX_VF_FUNC_PARIF,
282 					    parif);
283 
284 		} else {
285 			/* Set DRV func PARIF */
286 			if (ulp_port_db_parif_get(params->ulp_ctx, ifindex,
287 						  BNXT_ULP_DRV_FUNC_PARIF,
288 						  &parif)) {
289 				BNXT_TF_DBG(ERR,
290 					    "ParseErr:ifindex is not valid\n");
291 				return;
292 			}
293 			ULP_COMP_FLD_IDX_WR(params,
294 					    BNXT_ULP_CF_IDX_DRV_FUNC_PARIF,
295 					    parif);
296 		}
297 		if (mtype == BNXT_ULP_INTF_TYPE_PF) {
298 			ULP_COMP_FLD_IDX_WR(params,
299 					    BNXT_ULP_CF_IDX_MATCH_PORT_IS_PF,
300 					    1);
301 		}
302 	}
303 }
304 
305 static int32_t
306 ulp_post_process_normal_flow(struct ulp_rte_parser_params *params)
307 {
308 	enum bnxt_ulp_intf_type match_port_type, act_port_type;
309 	enum bnxt_ulp_direction_type dir;
310 	uint32_t act_port_set;
311 
312 	/* Get the computed details */
313 	dir = ULP_COMP_FLD_IDX_RD(params, BNXT_ULP_CF_IDX_DIRECTION);
314 	match_port_type = ULP_COMP_FLD_IDX_RD(params,
315 					      BNXT_ULP_CF_IDX_MATCH_PORT_TYPE);
316 	act_port_type = ULP_COMP_FLD_IDX_RD(params,
317 					    BNXT_ULP_CF_IDX_ACT_PORT_TYPE);
318 	act_port_set = ULP_COMP_FLD_IDX_RD(params,
319 					   BNXT_ULP_CF_IDX_ACT_PORT_IS_SET);
320 
321 	/* set the flow direction in the proto and action header */
322 	if (dir == BNXT_ULP_DIR_EGRESS) {
323 		ULP_BITMAP_SET(params->hdr_bitmap.bits,
324 			       BNXT_ULP_FLOW_DIR_BITMASK_EGR);
325 		ULP_BITMAP_SET(params->act_bitmap.bits,
326 			       BNXT_ULP_FLOW_DIR_BITMASK_EGR);
327 	}
328 
329 	/* Evaluate the VF to VF flag */
330 	if (act_port_set && act_port_type == BNXT_ULP_INTF_TYPE_VF_REP &&
331 	    match_port_type == BNXT_ULP_INTF_TYPE_VF_REP)
332 		ULP_BITMAP_SET(params->act_bitmap.bits,
333 			       BNXT_ULP_ACT_BIT_VF_TO_VF);
334 
335 	/* Update the decrement ttl computational fields */
336 	if (ULP_BITMAP_ISSET(params->act_bitmap.bits,
337 			     BNXT_ULP_ACT_BIT_DEC_TTL)) {
338 		/*
339 		 * Check that vxlan proto is included and vxlan decap
340 		 * action is not set then decrement tunnel ttl.
341 		 * Similarly add GRE and NVGRE in future.
342 		 */
343 		if ((ULP_BITMAP_ISSET(params->hdr_bitmap.bits,
344 				      BNXT_ULP_HDR_BIT_T_VXLAN) &&
345 		    !ULP_BITMAP_ISSET(params->act_bitmap.bits,
346 				      BNXT_ULP_ACT_BIT_VXLAN_DECAP))) {
347 			ULP_COMP_FLD_IDX_WR(params,
348 					    BNXT_ULP_CF_IDX_ACT_T_DEC_TTL, 1);
349 		} else {
350 			ULP_COMP_FLD_IDX_WR(params,
351 					    BNXT_ULP_CF_IDX_ACT_DEC_TTL, 1);
352 		}
353 	}
354 
355 	/* Merge the hdr_fp_bit into the proto header bit */
356 	params->hdr_bitmap.bits |= params->hdr_fp_bit.bits;
357 
358 	/* Update the comp fld fid */
359 	ULP_COMP_FLD_IDX_WR(params, BNXT_ULP_CF_IDX_FID, params->fid);
360 
361 	/* Update the computed interface parameters */
362 	bnxt_ulp_comp_fld_intf_update(params);
363 
364 	/* TBD: Handle the flow rejection scenarios */
365 	return 0;
366 }
367 
368 /*
369  * Function to handle the post processing of the parsing details
370  */
371 void
372 bnxt_ulp_rte_parser_post_process(struct ulp_rte_parser_params *params)
373 {
374 	ulp_post_process_normal_flow(params);
375 }
376 
377 /*
378  * Function to compute the flow direction based on the match port details
379  */
380 static void
381 bnxt_ulp_rte_parser_direction_compute(struct ulp_rte_parser_params *params)
382 {
383 	enum bnxt_ulp_intf_type match_port_type;
384 
385 	/* Get the match port type */
386 	match_port_type = ULP_COMP_FLD_IDX_RD(params,
387 					      BNXT_ULP_CF_IDX_MATCH_PORT_TYPE);
388 
389 	/* If ingress flow and matchport is vf rep then dir is egress*/
390 	if ((params->dir_attr & BNXT_ULP_FLOW_ATTR_INGRESS) &&
391 	    match_port_type == BNXT_ULP_INTF_TYPE_VF_REP) {
392 		ULP_COMP_FLD_IDX_WR(params, BNXT_ULP_CF_IDX_DIRECTION,
393 				    BNXT_ULP_DIR_EGRESS);
394 	} else {
395 		/* Assign the input direction */
396 		if (params->dir_attr & BNXT_ULP_FLOW_ATTR_INGRESS)
397 			ULP_COMP_FLD_IDX_WR(params, BNXT_ULP_CF_IDX_DIRECTION,
398 					    BNXT_ULP_DIR_INGRESS);
399 		else
400 			ULP_COMP_FLD_IDX_WR(params, BNXT_ULP_CF_IDX_DIRECTION,
401 					    BNXT_ULP_DIR_EGRESS);
402 	}
403 }
404 
405 /* Function to handle the parsing of RTE Flow item PF Header. */
406 static int32_t
407 ulp_rte_parser_svif_set(struct ulp_rte_parser_params *params,
408 			uint32_t ifindex,
409 			uint16_t mask,
410 			enum bnxt_ulp_direction_type item_dir)
411 {
412 	uint16_t svif;
413 	enum bnxt_ulp_direction_type dir;
414 	struct ulp_rte_hdr_field *hdr_field;
415 	enum bnxt_ulp_svif_type svif_type;
416 	enum bnxt_ulp_intf_type port_type;
417 
418 	if (ULP_COMP_FLD_IDX_RD(params, BNXT_ULP_CF_IDX_SVIF_FLAG) !=
419 	    BNXT_ULP_INVALID_SVIF_VAL) {
420 		BNXT_TF_DBG(ERR,
421 			    "SVIF already set,multiple source not support'd\n");
422 		return BNXT_TF_RC_ERROR;
423 	}
424 
425 	/* Get port type details */
426 	port_type = ulp_port_db_port_type_get(params->ulp_ctx, ifindex);
427 	if (port_type == BNXT_ULP_INTF_TYPE_INVALID) {
428 		BNXT_TF_DBG(ERR, "Invalid port type\n");
429 		return BNXT_TF_RC_ERROR;
430 	}
431 
432 	/* Update the match port type */
433 	ULP_COMP_FLD_IDX_WR(params, BNXT_ULP_CF_IDX_MATCH_PORT_TYPE, port_type);
434 
435 	/* compute the direction */
436 	bnxt_ulp_rte_parser_direction_compute(params);
437 
438 	/* Get the computed direction */
439 	dir = (item_dir != BNXT_ULP_DIR_INVALID) ? item_dir :
440 		ULP_COMP_FLD_IDX_RD(params, BNXT_ULP_CF_IDX_DIRECTION);
441 	if (dir == BNXT_ULP_DIR_INGRESS &&
442 	    port_type != BNXT_ULP_INTF_TYPE_VF_REP) {
443 		svif_type = BNXT_ULP_PHY_PORT_SVIF;
444 	} else {
445 		if (port_type == BNXT_ULP_INTF_TYPE_VF_REP &&
446 		    item_dir != BNXT_ULP_DIR_EGRESS)
447 			svif_type = BNXT_ULP_VF_FUNC_SVIF;
448 		else
449 			svif_type = BNXT_ULP_DRV_FUNC_SVIF;
450 	}
451 	ulp_port_db_svif_get(params->ulp_ctx, ifindex, svif_type, &svif);
452 	svif = rte_cpu_to_be_16(svif);
453 	hdr_field = &params->hdr_field[BNXT_ULP_PROTO_HDR_FIELD_SVIF_IDX];
454 	memcpy(hdr_field->spec, &svif, sizeof(svif));
455 	memcpy(hdr_field->mask, &mask, sizeof(mask));
456 	hdr_field->size = sizeof(svif);
457 	ULP_COMP_FLD_IDX_WR(params, BNXT_ULP_CF_IDX_SVIF_FLAG,
458 			    rte_be_to_cpu_16(svif));
459 	return BNXT_TF_RC_SUCCESS;
460 }
461 
462 /* Function to handle the parsing of the RTE port id */
463 int32_t
464 ulp_rte_parser_implicit_match_port_process(struct ulp_rte_parser_params *params)
465 {
466 	uint16_t port_id = 0;
467 	uint16_t svif_mask = 0xFFFF;
468 	uint32_t ifindex;
469 	int32_t rc = BNXT_TF_RC_ERROR;
470 
471 	if (ULP_COMP_FLD_IDX_RD(params, BNXT_ULP_CF_IDX_SVIF_FLAG) !=
472 	    BNXT_ULP_INVALID_SVIF_VAL)
473 		return BNXT_TF_RC_SUCCESS;
474 
475 	/* SVIF not set. So get the port id */
476 	port_id = ULP_COMP_FLD_IDX_RD(params, BNXT_ULP_CF_IDX_INCOMING_IF);
477 
478 	if (ulp_port_db_dev_port_to_ulp_index(params->ulp_ctx,
479 					      port_id,
480 					      &ifindex)) {
481 		BNXT_TF_DBG(ERR, "ParseErr:Portid is not valid\n");
482 		return rc;
483 	}
484 
485 	/* Update the SVIF details */
486 	rc = ulp_rte_parser_svif_set(params, ifindex, svif_mask,
487 				     BNXT_ULP_DIR_INVALID);
488 	return rc;
489 }
490 
491 /* Function to handle the implicit action port id */
492 int32_t
493 ulp_rte_parser_implicit_act_port_process(struct ulp_rte_parser_params *params)
494 {
495 	struct rte_flow_action action_item = {0};
496 	struct rte_flow_action_port_id port_id = {0};
497 
498 	/* Read the action port set bit */
499 	if (ULP_COMP_FLD_IDX_RD(params, BNXT_ULP_CF_IDX_ACT_PORT_IS_SET)) {
500 		/* Already set, so just exit */
501 		return BNXT_TF_RC_SUCCESS;
502 	}
503 	port_id.id = ULP_COMP_FLD_IDX_RD(params, BNXT_ULP_CF_IDX_INCOMING_IF);
504 	action_item.type = RTE_FLOW_ACTION_TYPE_PORT_ID;
505 	action_item.conf = &port_id;
506 
507 	/* Update the action port based on incoming port */
508 	ulp_rte_port_act_handler(&action_item, params);
509 
510 	/* Reset the action port set bit */
511 	ULP_COMP_FLD_IDX_WR(params, BNXT_ULP_CF_IDX_ACT_PORT_IS_SET, 0);
512 	return BNXT_TF_RC_SUCCESS;
513 }
514 
515 /* Parse items PORT_ID, PORT_REPRESENTOR and REPRESENTED_PORT. */
516 int32_t
517 ulp_rte_port_hdr_handler(const struct rte_flow_item *item,
518 			 struct ulp_rte_parser_params *params)
519 {
520 	enum bnxt_ulp_direction_type item_dir;
521 	uint16_t ethdev_id;
522 	uint16_t mask = 0;
523 	uint32_t ifindex;
524 	int32_t rc = BNXT_TF_RC_PARSE_ERR;
525 
526 	if (!item->spec) {
527 		BNXT_TF_DBG(ERR, "ParseErr:Port spec is not valid\n");
528 		return rc;
529 	}
530 	if (!item->mask) {
531 		BNXT_TF_DBG(ERR, "ParseErr:Port mask is not valid\n");
532 		return rc;
533 	}
534 
535 	switch (item->type) {
536 	case RTE_FLOW_ITEM_TYPE_PORT_ID: {
537 		const struct rte_flow_item_port_id *port_spec = item->spec;
538 		const struct rte_flow_item_port_id *port_mask = item->mask;
539 
540 		item_dir = BNXT_ULP_DIR_INVALID;
541 		ethdev_id = port_spec->id;
542 		mask = port_mask->id;
543 
544 		if (!port_mask->id) {
545 			ULP_BITMAP_SET(params->hdr_bitmap.bits, BNXT_ULP_HDR_BIT_SVIF_IGNORE);
546 			mask = 0xff;
547 		}
548 		break;
549 	}
550 	case RTE_FLOW_ITEM_TYPE_PORT_REPRESENTOR: {
551 		const struct rte_flow_item_ethdev *ethdev_spec = item->spec;
552 		const struct rte_flow_item_ethdev *ethdev_mask = item->mask;
553 
554 		item_dir = BNXT_ULP_DIR_INGRESS;
555 		ethdev_id = ethdev_spec->port_id;
556 		mask = ethdev_mask->port_id;
557 		break;
558 	}
559 	case RTE_FLOW_ITEM_TYPE_REPRESENTED_PORT: {
560 		const struct rte_flow_item_ethdev *ethdev_spec = item->spec;
561 		const struct rte_flow_item_ethdev *ethdev_mask = item->mask;
562 
563 		item_dir = BNXT_ULP_DIR_EGRESS;
564 		ethdev_id = ethdev_spec->port_id;
565 		mask = ethdev_mask->port_id;
566 		break;
567 	}
568 	default:
569 		BNXT_TF_DBG(ERR, "ParseErr:Unexpected item\n");
570 		return rc;
571 	}
572 
573 	/* perform the conversion from dpdk port to bnxt ifindex */
574 	if (ulp_port_db_dev_port_to_ulp_index(params->ulp_ctx,
575 					      ethdev_id,
576 					      &ifindex)) {
577 		BNXT_TF_DBG(ERR, "ParseErr:Portid is not valid\n");
578 		return rc;
579 	}
580 	/* Update the SVIF details */
581 	return ulp_rte_parser_svif_set(params, ifindex, mask, item_dir);
582 }
583 
584 /* Function to handle the update of proto header based on field values */
585 static void
586 ulp_rte_l2_proto_type_update(struct ulp_rte_parser_params *param,
587 			     uint16_t type, uint32_t in_flag,
588 			     uint32_t has_vlan, uint32_t has_vlan_mask)
589 {
590 #define ULP_RTE_ETHER_TYPE_ROE	0xfc3d
591 
592 	if (type == tfp_cpu_to_be_16(RTE_ETHER_TYPE_IPV4)) {
593 		if (in_flag) {
594 			ULP_BITMAP_SET(param->hdr_fp_bit.bits,
595 				       BNXT_ULP_HDR_BIT_I_IPV4);
596 			ULP_COMP_FLD_IDX_WR(param, BNXT_ULP_CF_IDX_I_L3, 1);
597 		} else {
598 			ULP_BITMAP_SET(param->hdr_fp_bit.bits,
599 				       BNXT_ULP_HDR_BIT_O_IPV4);
600 			ULP_COMP_FLD_IDX_WR(param, BNXT_ULP_CF_IDX_O_L3, 1);
601 		}
602 	} else if (type == tfp_cpu_to_be_16(RTE_ETHER_TYPE_IPV6)) {
603 		if (in_flag) {
604 			ULP_BITMAP_SET(param->hdr_fp_bit.bits,
605 				       BNXT_ULP_HDR_BIT_I_IPV6);
606 			ULP_COMP_FLD_IDX_WR(param, BNXT_ULP_CF_IDX_I_L3, 1);
607 		} else {
608 			ULP_BITMAP_SET(param->hdr_fp_bit.bits,
609 				       BNXT_ULP_HDR_BIT_O_IPV6);
610 			ULP_COMP_FLD_IDX_WR(param, BNXT_ULP_CF_IDX_O_L3, 1);
611 		}
612 	} else if (type == tfp_cpu_to_be_16(RTE_ETHER_TYPE_VLAN)) {
613 		has_vlan_mask = 1;
614 		has_vlan = 1;
615 	} else if (type == tfp_cpu_to_be_16(RTE_ETHER_TYPE_ECPRI)) {
616 		/* Update the hdr_bitmap with eCPRI */
617 		ULP_BITMAP_SET(param->hdr_fp_bit.bits,
618 				BNXT_ULP_HDR_BIT_O_ECPRI);
619 	} else if (type == tfp_cpu_to_be_16(ULP_RTE_ETHER_TYPE_ROE)) {
620 		/* Update the hdr_bitmap with RoE */
621 		ULP_BITMAP_SET(param->hdr_fp_bit.bits,
622 				BNXT_ULP_HDR_BIT_O_ROE);
623 	}
624 
625 	if (has_vlan_mask) {
626 		if (in_flag) {
627 			ULP_COMP_FLD_IDX_WR(param, BNXT_ULP_CF_IDX_I_HAS_VTAG,
628 					    has_vlan);
629 			ULP_COMP_FLD_IDX_WR(param,
630 					    BNXT_ULP_CF_IDX_I_VLAN_NO_IGNORE,
631 					    1);
632 		} else {
633 			ULP_COMP_FLD_IDX_WR(param, BNXT_ULP_CF_IDX_O_HAS_VTAG,
634 					    has_vlan);
635 			ULP_COMP_FLD_IDX_WR(param,
636 					    BNXT_ULP_CF_IDX_O_VLAN_NO_IGNORE,
637 					    1);
638 		}
639 	}
640 }
641 
642 /* Internal Function to identify broadcast or multicast packets */
643 static int32_t
644 ulp_rte_parser_is_bcmc_addr(const struct rte_ether_addr *eth_addr)
645 {
646 	if (rte_is_multicast_ether_addr(eth_addr) ||
647 	    rte_is_broadcast_ether_addr(eth_addr)) {
648 		BNXT_TF_DBG(DEBUG,
649 			    "No support for bcast or mcast addr offload\n");
650 		return 1;
651 	}
652 	return 0;
653 }
654 
655 /* Function to handle the parsing of RTE Flow item Ethernet Header. */
656 int32_t
657 ulp_rte_eth_hdr_handler(const struct rte_flow_item *item,
658 			struct ulp_rte_parser_params *params)
659 {
660 	const struct rte_flow_item_eth *eth_spec = item->spec;
661 	const struct rte_flow_item_eth *eth_mask = item->mask;
662 	uint32_t idx = 0, dmac_idx = 0;
663 	uint32_t size;
664 	uint16_t eth_type = 0;
665 	uint32_t inner_flag = 0;
666 	uint32_t has_vlan = 0, has_vlan_mask = 0;
667 
668 	/* Perform validations */
669 	if (eth_spec) {
670 		/* Avoid multicast and broadcast addr */
671 		if (!ULP_APP_BC_MC_SUPPORT(params->ulp_ctx) &&
672 		    ulp_rte_parser_is_bcmc_addr(&eth_spec->hdr.dst_addr))
673 			return BNXT_TF_RC_PARSE_ERR;
674 
675 		if (!ULP_APP_BC_MC_SUPPORT(params->ulp_ctx) &&
676 		    ulp_rte_parser_is_bcmc_addr(&eth_spec->hdr.src_addr))
677 			return BNXT_TF_RC_PARSE_ERR;
678 
679 		eth_type = eth_spec->hdr.ether_type;
680 		has_vlan = eth_spec->has_vlan;
681 	}
682 	if (eth_mask) {
683 		eth_type &= eth_mask->hdr.ether_type;
684 		has_vlan_mask = eth_mask->has_vlan;
685 	}
686 
687 	if (ulp_rte_prsr_fld_size_validate(params, &idx,
688 					   BNXT_ULP_PROTO_HDR_ETH_NUM)) {
689 		BNXT_TF_DBG(ERR, "Error parsing protocol header\n");
690 		return BNXT_TF_RC_ERROR;
691 	}
692 	/*
693 	 * Copy the rte_flow_item for eth into hdr_field using ethernet
694 	 * header fields
695 	 */
696 	dmac_idx = idx;
697 	size = sizeof(((struct rte_flow_item_eth *)NULL)->hdr.dst_addr.addr_bytes);
698 	ulp_rte_prsr_fld_mask(params, &idx, size,
699 			      ulp_deference_struct(eth_spec, hdr.dst_addr.addr_bytes),
700 			      ulp_deference_struct(eth_mask, hdr.dst_addr.addr_bytes),
701 			      ULP_PRSR_ACT_DEFAULT);
702 
703 	size = sizeof(((struct rte_flow_item_eth *)NULL)->hdr.src_addr.addr_bytes);
704 	ulp_rte_prsr_fld_mask(params, &idx, size,
705 			      ulp_deference_struct(eth_spec, hdr.src_addr.addr_bytes),
706 			      ulp_deference_struct(eth_mask, hdr.src_addr.addr_bytes),
707 			      ULP_PRSR_ACT_DEFAULT);
708 
709 	size = sizeof(((struct rte_flow_item_eth *)NULL)->hdr.ether_type);
710 	ulp_rte_prsr_fld_mask(params, &idx, size,
711 			      ulp_deference_struct(eth_spec, hdr.ether_type),
712 			      ulp_deference_struct(eth_mask, hdr.ether_type),
713 			      (ULP_APP_TOS_PROTO_SUPPORT(params->ulp_ctx)) ?
714 			      ULP_PRSR_ACT_DEFAULT : ULP_PRSR_ACT_MATCH_IGNORE);
715 
716 	/* Update the protocol hdr bitmap */
717 	if (ULP_BITMAP_ISSET(params->hdr_bitmap.bits,
718 			     BNXT_ULP_HDR_BIT_O_ETH) ||
719 	    ULP_BITMAP_ISSET(params->hdr_bitmap.bits,
720 			     BNXT_ULP_HDR_BIT_O_IPV4) ||
721 	    ULP_BITMAP_ISSET(params->hdr_bitmap.bits,
722 			     BNXT_ULP_HDR_BIT_O_IPV6) ||
723 	    ULP_BITMAP_ISSET(params->hdr_bitmap.bits,
724 			     BNXT_ULP_HDR_BIT_O_UDP) ||
725 	    ULP_BITMAP_ISSET(params->hdr_bitmap.bits,
726 			     BNXT_ULP_HDR_BIT_O_TCP)) {
727 		ULP_BITMAP_SET(params->hdr_bitmap.bits, BNXT_ULP_HDR_BIT_I_ETH);
728 		inner_flag = 1;
729 	} else {
730 		ULP_BITMAP_SET(params->hdr_bitmap.bits, BNXT_ULP_HDR_BIT_O_ETH);
731 		ULP_COMP_FLD_IDX_WR(params, BNXT_ULP_CF_IDX_TUN_OFF_DMAC_ID,
732 				    dmac_idx);
733 	}
734 	/* Update the field protocol hdr bitmap */
735 	ulp_rte_l2_proto_type_update(params, eth_type, inner_flag,
736 				     has_vlan, has_vlan_mask);
737 
738 	return BNXT_TF_RC_SUCCESS;
739 }
740 
741 /* Function to handle the parsing of RTE Flow item Vlan Header. */
742 int32_t
743 ulp_rte_vlan_hdr_handler(const struct rte_flow_item *item,
744 			 struct ulp_rte_parser_params *params)
745 {
746 	const struct rte_flow_item_vlan *vlan_spec = item->spec;
747 	const struct rte_flow_item_vlan *vlan_mask = item->mask;
748 	struct ulp_rte_hdr_bitmap	*hdr_bit;
749 	uint32_t idx = 0;
750 	uint16_t vlan_tag = 0, priority = 0;
751 	uint16_t vlan_tag_mask = 0, priority_mask = 0;
752 	uint32_t outer_vtag_num;
753 	uint32_t inner_vtag_num;
754 	uint16_t eth_type = 0;
755 	uint32_t inner_flag = 0;
756 	uint32_t size;
757 
758 	if (vlan_spec) {
759 		vlan_tag = ntohs(vlan_spec->hdr.vlan_tci);
760 		priority = htons(vlan_tag >> ULP_VLAN_PRIORITY_SHIFT);
761 		vlan_tag &= ULP_VLAN_TAG_MASK;
762 		vlan_tag = htons(vlan_tag);
763 		eth_type = vlan_spec->hdr.eth_proto;
764 	}
765 
766 	if (vlan_mask) {
767 		vlan_tag_mask = ntohs(vlan_mask->hdr.vlan_tci);
768 		priority_mask = htons(vlan_tag_mask >> ULP_VLAN_PRIORITY_SHIFT);
769 		vlan_tag_mask &= 0xfff;
770 
771 		/*
772 		 * the storage for priority and vlan tag is 2 bytes
773 		 * The mask of priority which is 3 bits if it is all 1's
774 		 * then make the rest bits 13 bits as 1's
775 		 * so that it is matched as exact match.
776 		 */
777 		if (priority_mask == ULP_VLAN_PRIORITY_MASK)
778 			priority_mask |= ~ULP_VLAN_PRIORITY_MASK;
779 		if (vlan_tag_mask == ULP_VLAN_TAG_MASK)
780 			vlan_tag_mask |= ~ULP_VLAN_TAG_MASK;
781 		vlan_tag_mask = htons(vlan_tag_mask);
782 	}
783 
784 	if (ulp_rte_prsr_fld_size_validate(params, &idx,
785 					   BNXT_ULP_PROTO_HDR_S_VLAN_NUM)) {
786 		BNXT_TF_DBG(ERR, "Error parsing protocol header\n");
787 		return BNXT_TF_RC_ERROR;
788 	}
789 
790 	/*
791 	 * Copy the rte_flow_item for vlan into hdr_field using Vlan
792 	 * header fields
793 	 */
794 	size = sizeof(((struct rte_flow_item_vlan *)NULL)->hdr.vlan_tci);
795 	/*
796 	 * The priority field is ignored since OVS is setting it as
797 	 * wild card match and it is not supported. This is a work
798 	 * around and shall be addressed in the future.
799 	 */
800 	ulp_rte_prsr_fld_mask(params, &idx, size,
801 			      &priority,
802 			      (vlan_mask) ? &priority_mask : NULL,
803 			      ULP_PRSR_ACT_MASK_IGNORE);
804 
805 	ulp_rte_prsr_fld_mask(params, &idx, size,
806 			      &vlan_tag,
807 			      (vlan_mask) ? &vlan_tag_mask : NULL,
808 			      ULP_PRSR_ACT_DEFAULT);
809 
810 	size = sizeof(((struct rte_flow_item_vlan *)NULL)->hdr.eth_proto);
811 	ulp_rte_prsr_fld_mask(params, &idx, size,
812 			      ulp_deference_struct(vlan_spec, hdr.eth_proto),
813 			      ulp_deference_struct(vlan_mask, hdr.eth_proto),
814 			      ULP_PRSR_ACT_MATCH_IGNORE);
815 
816 	/* Get the outer tag and inner tag counts */
817 	outer_vtag_num = ULP_COMP_FLD_IDX_RD(params,
818 					     BNXT_ULP_CF_IDX_O_VTAG_NUM);
819 	inner_vtag_num = ULP_COMP_FLD_IDX_RD(params,
820 					     BNXT_ULP_CF_IDX_I_VTAG_NUM);
821 
822 	/* Update the hdr_bitmap of the vlans */
823 	hdr_bit = &params->hdr_bitmap;
824 	if (ULP_BITMAP_ISSET(hdr_bit->bits, BNXT_ULP_HDR_BIT_O_ETH) &&
825 	    !ULP_BITMAP_ISSET(hdr_bit->bits, BNXT_ULP_HDR_BIT_I_ETH) &&
826 	    !outer_vtag_num) {
827 		/* Update the vlan tag num */
828 		outer_vtag_num++;
829 		ULP_COMP_FLD_IDX_WR(params, BNXT_ULP_CF_IDX_O_VTAG_NUM,
830 				    outer_vtag_num);
831 		ULP_COMP_FLD_IDX_WR(params, BNXT_ULP_CF_IDX_O_HAS_VTAG, 1);
832 		ULP_COMP_FLD_IDX_WR(params, BNXT_ULP_CF_IDX_O_ONE_VTAG, 1);
833 		ULP_BITMAP_SET(params->hdr_bitmap.bits,
834 			       BNXT_ULP_HDR_BIT_OO_VLAN);
835 		if (vlan_mask && vlan_tag_mask)
836 			ULP_COMP_FLD_IDX_WR(params,
837 					    BNXT_ULP_CF_IDX_OO_VLAN_FB_VID, 1);
838 
839 	} else if (ULP_BITMAP_ISSET(hdr_bit->bits, BNXT_ULP_HDR_BIT_O_ETH) &&
840 		   !ULP_BITMAP_ISSET(hdr_bit->bits, BNXT_ULP_HDR_BIT_I_ETH) &&
841 		   outer_vtag_num == 1) {
842 		/* update the vlan tag num */
843 		outer_vtag_num++;
844 		ULP_COMP_FLD_IDX_WR(params, BNXT_ULP_CF_IDX_O_VTAG_NUM,
845 				    outer_vtag_num);
846 		ULP_COMP_FLD_IDX_WR(params, BNXT_ULP_CF_IDX_O_TWO_VTAGS, 1);
847 		ULP_COMP_FLD_IDX_WR(params, BNXT_ULP_CF_IDX_O_ONE_VTAG, 0);
848 		ULP_BITMAP_SET(params->hdr_bitmap.bits,
849 			       BNXT_ULP_HDR_BIT_OI_VLAN);
850 		if (vlan_mask && vlan_tag_mask)
851 			ULP_COMP_FLD_IDX_WR(params,
852 					    BNXT_ULP_CF_IDX_OI_VLAN_FB_VID, 1);
853 
854 	} else if (ULP_BITMAP_ISSET(hdr_bit->bits, BNXT_ULP_HDR_BIT_O_ETH) &&
855 		   ULP_BITMAP_ISSET(hdr_bit->bits, BNXT_ULP_HDR_BIT_I_ETH) &&
856 		   !inner_vtag_num) {
857 		/* update the vlan tag num */
858 		inner_vtag_num++;
859 		ULP_COMP_FLD_IDX_WR(params, BNXT_ULP_CF_IDX_I_VTAG_NUM,
860 				    inner_vtag_num);
861 		ULP_COMP_FLD_IDX_WR(params, BNXT_ULP_CF_IDX_I_HAS_VTAG, 1);
862 		ULP_COMP_FLD_IDX_WR(params, BNXT_ULP_CF_IDX_I_ONE_VTAG, 1);
863 		ULP_BITMAP_SET(params->hdr_bitmap.bits,
864 			       BNXT_ULP_HDR_BIT_IO_VLAN);
865 		if (vlan_mask && vlan_tag_mask)
866 			ULP_COMP_FLD_IDX_WR(params,
867 					    BNXT_ULP_CF_IDX_IO_VLAN_FB_VID, 1);
868 		inner_flag = 1;
869 	} else if (ULP_BITMAP_ISSET(hdr_bit->bits, BNXT_ULP_HDR_BIT_O_ETH) &&
870 		   ULP_BITMAP_ISSET(hdr_bit->bits, BNXT_ULP_HDR_BIT_I_ETH) &&
871 		   inner_vtag_num == 1) {
872 		/* update the vlan tag num */
873 		inner_vtag_num++;
874 		ULP_COMP_FLD_IDX_WR(params, BNXT_ULP_CF_IDX_I_VTAG_NUM,
875 				    inner_vtag_num);
876 		ULP_COMP_FLD_IDX_WR(params, BNXT_ULP_CF_IDX_I_TWO_VTAGS, 1);
877 		ULP_COMP_FLD_IDX_WR(params, BNXT_ULP_CF_IDX_I_ONE_VTAG, 0);
878 		ULP_BITMAP_SET(params->hdr_bitmap.bits,
879 			       BNXT_ULP_HDR_BIT_II_VLAN);
880 		if (vlan_mask && vlan_tag_mask)
881 			ULP_COMP_FLD_IDX_WR(params,
882 					    BNXT_ULP_CF_IDX_II_VLAN_FB_VID, 1);
883 		inner_flag = 1;
884 	} else {
885 		BNXT_TF_DBG(ERR, "Error Parsing:Vlan hdr found without eth\n");
886 		return BNXT_TF_RC_ERROR;
887 	}
888 	/* Update the field protocol hdr bitmap */
889 	ulp_rte_l2_proto_type_update(params, eth_type, inner_flag, 1, 1);
890 	return BNXT_TF_RC_SUCCESS;
891 }
892 
893 /* Function to handle the update of proto header based on field values */
894 static void
895 ulp_rte_l3_proto_type_update(struct ulp_rte_parser_params *param,
896 			     uint8_t proto, uint32_t in_flag)
897 {
898 	if (proto == IPPROTO_UDP) {
899 		if (in_flag) {
900 			ULP_BITMAP_SET(param->hdr_fp_bit.bits,
901 				       BNXT_ULP_HDR_BIT_I_UDP);
902 			ULP_COMP_FLD_IDX_WR(param, BNXT_ULP_CF_IDX_I_L4, 1);
903 		} else {
904 			ULP_BITMAP_SET(param->hdr_fp_bit.bits,
905 				       BNXT_ULP_HDR_BIT_O_UDP);
906 			ULP_COMP_FLD_IDX_WR(param, BNXT_ULP_CF_IDX_O_L4, 1);
907 		}
908 	} else if (proto == IPPROTO_TCP) {
909 		if (in_flag) {
910 			ULP_BITMAP_SET(param->hdr_fp_bit.bits,
911 				       BNXT_ULP_HDR_BIT_I_TCP);
912 			ULP_COMP_FLD_IDX_WR(param, BNXT_ULP_CF_IDX_I_L4, 1);
913 		} else {
914 			ULP_BITMAP_SET(param->hdr_fp_bit.bits,
915 				       BNXT_ULP_HDR_BIT_O_TCP);
916 			ULP_COMP_FLD_IDX_WR(param, BNXT_ULP_CF_IDX_O_L4, 1);
917 		}
918 	} else if (proto == IPPROTO_GRE) {
919 		ULP_BITMAP_SET(param->hdr_bitmap.bits, BNXT_ULP_HDR_BIT_T_GRE);
920 	} else if (proto == IPPROTO_ICMP) {
921 		if (ULP_COMP_FLD_IDX_RD(param, BNXT_ULP_CF_IDX_L3_TUN))
922 			ULP_BITMAP_SET(param->hdr_bitmap.bits,
923 				       BNXT_ULP_HDR_BIT_I_ICMP);
924 		else
925 			ULP_BITMAP_SET(param->hdr_bitmap.bits,
926 				       BNXT_ULP_HDR_BIT_O_ICMP);
927 	}
928 
929 	if (in_flag) {
930 		ULP_COMP_FLD_IDX_WR(param,
931 				    BNXT_ULP_CF_IDX_I_L3_FB_PROTO_ID,
932 				    1);
933 		ULP_COMP_FLD_IDX_WR(param,
934 				    BNXT_ULP_CF_IDX_I_L3_PROTO_ID,
935 				    proto);
936 	} else {
937 		ULP_COMP_FLD_IDX_WR(param,
938 				    BNXT_ULP_CF_IDX_O_L3_FB_PROTO_ID,
939 				    1);
940 		ULP_COMP_FLD_IDX_WR(param,
941 				    BNXT_ULP_CF_IDX_O_L3_PROTO_ID,
942 				    proto);
943 	}
944 }
945 
946 /* Function to handle the parsing of RTE Flow item IPV4 Header. */
947 int32_t
948 ulp_rte_ipv4_hdr_handler(const struct rte_flow_item *item,
949 			 struct ulp_rte_parser_params *params)
950 {
951 	const struct rte_flow_item_ipv4 *ipv4_spec = item->spec;
952 	const struct rte_flow_item_ipv4 *ipv4_mask = item->mask;
953 	struct ulp_rte_hdr_bitmap *hdr_bitmap = &params->hdr_bitmap;
954 	uint32_t idx = 0, dip_idx = 0;
955 	uint32_t size;
956 	uint8_t proto = 0;
957 	uint8_t proto_mask = 0;
958 	uint32_t inner_flag = 0;
959 	uint32_t cnt;
960 
961 	/* validate there are no 3rd L3 header */
962 	cnt = ULP_COMP_FLD_IDX_RD(params, BNXT_ULP_CF_IDX_L3_HDR_CNT);
963 	if (cnt == 2) {
964 		BNXT_TF_DBG(ERR, "Parse Err:Third L3 header not supported\n");
965 		return BNXT_TF_RC_ERROR;
966 	}
967 
968 	if (ulp_rte_prsr_fld_size_validate(params, &idx,
969 					   BNXT_ULP_PROTO_HDR_IPV4_NUM)) {
970 		BNXT_TF_DBG(ERR, "Error parsing protocol header\n");
971 		return BNXT_TF_RC_ERROR;
972 	}
973 
974 	/*
975 	 * Copy the rte_flow_item for ipv4 into hdr_field using ipv4
976 	 * header fields
977 	 */
978 	size = sizeof(((struct rte_flow_item_ipv4 *)NULL)->hdr.version_ihl);
979 	ulp_rte_prsr_fld_mask(params, &idx, size,
980 			      ulp_deference_struct(ipv4_spec, hdr.version_ihl),
981 			      ulp_deference_struct(ipv4_mask, hdr.version_ihl),
982 			      ULP_PRSR_ACT_DEFAULT);
983 
984 	/*
985 	 * The tos field is ignored since OVS is setting it as wild card
986 	 * match and it is not supported. An application can enable tos support.
987 	 */
988 	size = sizeof(((struct rte_flow_item_ipv4 *)NULL)->hdr.type_of_service);
989 	ulp_rte_prsr_fld_mask(params, &idx, size,
990 			      ulp_deference_struct(ipv4_spec,
991 						   hdr.type_of_service),
992 			      ulp_deference_struct(ipv4_mask,
993 						   hdr.type_of_service),
994 			      (ULP_APP_TOS_PROTO_SUPPORT(params->ulp_ctx)) ?
995 			      ULP_PRSR_ACT_DEFAULT : ULP_PRSR_ACT_MASK_IGNORE);
996 
997 	size = sizeof(((struct rte_flow_item_ipv4 *)NULL)->hdr.total_length);
998 	ulp_rte_prsr_fld_mask(params, &idx, size,
999 			      ulp_deference_struct(ipv4_spec, hdr.total_length),
1000 			      ulp_deference_struct(ipv4_mask, hdr.total_length),
1001 			      ULP_PRSR_ACT_DEFAULT);
1002 
1003 	size = sizeof(((struct rte_flow_item_ipv4 *)NULL)->hdr.packet_id);
1004 	ulp_rte_prsr_fld_mask(params, &idx, size,
1005 			      ulp_deference_struct(ipv4_spec, hdr.packet_id),
1006 			      ulp_deference_struct(ipv4_mask, hdr.packet_id),
1007 			      ULP_PRSR_ACT_DEFAULT);
1008 
1009 	size = sizeof(((struct rte_flow_item_ipv4 *)NULL)->hdr.fragment_offset);
1010 	ulp_rte_prsr_fld_mask(params, &idx, size,
1011 			      ulp_deference_struct(ipv4_spec,
1012 						   hdr.fragment_offset),
1013 			      ulp_deference_struct(ipv4_mask,
1014 						   hdr.fragment_offset),
1015 			      ULP_PRSR_ACT_MASK_IGNORE);
1016 
1017 	size = sizeof(((struct rte_flow_item_ipv4 *)NULL)->hdr.time_to_live);
1018 	ulp_rte_prsr_fld_mask(params, &idx, size,
1019 			      ulp_deference_struct(ipv4_spec, hdr.time_to_live),
1020 			      ulp_deference_struct(ipv4_mask, hdr.time_to_live),
1021 			      ULP_PRSR_ACT_DEFAULT);
1022 
1023 	/* Ignore proto for matching templates */
1024 	size = sizeof(((struct rte_flow_item_ipv4 *)NULL)->hdr.next_proto_id);
1025 	ulp_rte_prsr_fld_mask(params, &idx, size,
1026 			      ulp_deference_struct(ipv4_spec,
1027 						   hdr.next_proto_id),
1028 			      ulp_deference_struct(ipv4_mask,
1029 						   hdr.next_proto_id),
1030 			      (ULP_APP_TOS_PROTO_SUPPORT(params->ulp_ctx)) ?
1031 			      ULP_PRSR_ACT_DEFAULT : ULP_PRSR_ACT_MATCH_IGNORE);
1032 
1033 	if (ipv4_spec)
1034 		proto = ipv4_spec->hdr.next_proto_id;
1035 
1036 	size = sizeof(((struct rte_flow_item_ipv4 *)NULL)->hdr.hdr_checksum);
1037 	ulp_rte_prsr_fld_mask(params, &idx, size,
1038 			      ulp_deference_struct(ipv4_spec, hdr.hdr_checksum),
1039 			      ulp_deference_struct(ipv4_mask, hdr.hdr_checksum),
1040 			      ULP_PRSR_ACT_DEFAULT);
1041 
1042 	size = sizeof(((struct rte_flow_item_ipv4 *)NULL)->hdr.src_addr);
1043 	ulp_rte_prsr_fld_mask(params, &idx, size,
1044 			      ulp_deference_struct(ipv4_spec, hdr.src_addr),
1045 			      ulp_deference_struct(ipv4_mask, hdr.src_addr),
1046 			      ULP_PRSR_ACT_DEFAULT);
1047 
1048 	dip_idx = idx;
1049 	size = sizeof(((struct rte_flow_item_ipv4 *)NULL)->hdr.dst_addr);
1050 	ulp_rte_prsr_fld_mask(params, &idx, size,
1051 			      ulp_deference_struct(ipv4_spec, hdr.dst_addr),
1052 			      ulp_deference_struct(ipv4_mask, hdr.dst_addr),
1053 			      ULP_PRSR_ACT_DEFAULT);
1054 
1055 	/* Set the ipv4 header bitmap and computed l3 header bitmaps */
1056 	if (ULP_BITMAP_ISSET(hdr_bitmap->bits, BNXT_ULP_HDR_BIT_O_IPV4) ||
1057 	    ULP_BITMAP_ISSET(hdr_bitmap->bits, BNXT_ULP_HDR_BIT_O_IPV6) ||
1058 	    ULP_COMP_FLD_IDX_RD(params, BNXT_ULP_CF_IDX_L3_TUN)) {
1059 		ULP_BITMAP_SET(hdr_bitmap->bits, BNXT_ULP_HDR_BIT_I_IPV4);
1060 		ULP_COMP_FLD_IDX_WR(params, BNXT_ULP_CF_IDX_I_L3, 1);
1061 		inner_flag = 1;
1062 	} else {
1063 		ULP_BITMAP_SET(hdr_bitmap->bits, BNXT_ULP_HDR_BIT_O_IPV4);
1064 		ULP_COMP_FLD_IDX_WR(params, BNXT_ULP_CF_IDX_O_L3, 1);
1065 		/* Update the tunnel offload dest ip offset */
1066 		ULP_COMP_FLD_IDX_WR(params, BNXT_ULP_CF_IDX_TUN_OFF_DIP_ID,
1067 				    dip_idx);
1068 	}
1069 
1070 	/* Some of the PMD applications may set the protocol field
1071 	 * in the IPv4 spec but don't set the mask. So, consider
1072 	 * the mask in the proto value calculation.
1073 	 */
1074 	if (ipv4_mask) {
1075 		proto &= ipv4_mask->hdr.next_proto_id;
1076 		proto_mask = ipv4_mask->hdr.next_proto_id;
1077 	}
1078 
1079 	/* Update the field protocol hdr bitmap */
1080 	if (proto_mask)
1081 		ulp_rte_l3_proto_type_update(params, proto, inner_flag);
1082 	ULP_COMP_FLD_IDX_WR(params, BNXT_ULP_CF_IDX_L3_HDR_CNT, ++cnt);
1083 	return BNXT_TF_RC_SUCCESS;
1084 }
1085 
1086 /* Function to handle the parsing of RTE Flow item IPV6 Header */
1087 int32_t
1088 ulp_rte_ipv6_hdr_handler(const struct rte_flow_item *item,
1089 			 struct ulp_rte_parser_params *params)
1090 {
1091 	const struct rte_flow_item_ipv6	*ipv6_spec = item->spec;
1092 	const struct rte_flow_item_ipv6	*ipv6_mask = item->mask;
1093 	struct ulp_rte_hdr_bitmap *hdr_bitmap = &params->hdr_bitmap;
1094 	uint32_t idx = 0, dip_idx = 0;
1095 	uint32_t size, vtc_flow;
1096 	uint32_t ver_spec = 0, ver_mask = 0;
1097 	uint32_t tc_spec = 0, tc_mask = 0;
1098 	uint32_t lab_spec = 0, lab_mask = 0;
1099 	uint8_t proto = 0;
1100 	uint8_t proto_mask = 0;
1101 	uint32_t inner_flag = 0;
1102 	uint32_t cnt;
1103 
1104 	/* validate there are no 3rd L3 header */
1105 	cnt = ULP_COMP_FLD_IDX_RD(params, BNXT_ULP_CF_IDX_L3_HDR_CNT);
1106 	if (cnt == 2) {
1107 		BNXT_TF_DBG(ERR, "Parse Err:Third L3 header not supported\n");
1108 		return BNXT_TF_RC_ERROR;
1109 	}
1110 
1111 	if (ulp_rte_prsr_fld_size_validate(params, &idx,
1112 					   BNXT_ULP_PROTO_HDR_IPV6_NUM)) {
1113 		BNXT_TF_DBG(ERR, "Error parsing protocol header\n");
1114 		return BNXT_TF_RC_ERROR;
1115 	}
1116 
1117 	/*
1118 	 * Copy the rte_flow_item for ipv6 into hdr_field using ipv6
1119 	 * header fields
1120 	 */
1121 	if (ipv6_spec) {
1122 		vtc_flow = ntohl(ipv6_spec->hdr.vtc_flow);
1123 		ver_spec = htonl(BNXT_ULP_GET_IPV6_VER(vtc_flow));
1124 		tc_spec = htonl(BNXT_ULP_GET_IPV6_TC(vtc_flow));
1125 		lab_spec = htonl(BNXT_ULP_GET_IPV6_FLOWLABEL(vtc_flow));
1126 		proto = ipv6_spec->hdr.proto;
1127 	}
1128 
1129 	if (ipv6_mask) {
1130 		vtc_flow = ntohl(ipv6_mask->hdr.vtc_flow);
1131 		ver_mask = htonl(BNXT_ULP_GET_IPV6_VER(vtc_flow));
1132 		tc_mask = htonl(BNXT_ULP_GET_IPV6_TC(vtc_flow));
1133 		lab_mask = htonl(BNXT_ULP_GET_IPV6_FLOWLABEL(vtc_flow));
1134 
1135 		/* Some of the PMD applications may set the protocol field
1136 		 * in the IPv6 spec but don't set the mask. So, consider
1137 		 * the mask in proto value calculation.
1138 		 */
1139 		proto &= ipv6_mask->hdr.proto;
1140 		proto_mask = ipv6_mask->hdr.proto;
1141 	}
1142 
1143 	size = sizeof(((struct rte_flow_item_ipv6 *)NULL)->hdr.vtc_flow);
1144 	ulp_rte_prsr_fld_mask(params, &idx, size, &ver_spec, &ver_mask,
1145 			      ULP_PRSR_ACT_DEFAULT);
1146 	/*
1147 	 * The TC and flow label field are ignored since OVS is
1148 	 * setting it for match and it is not supported.
1149 	 * This is a work around and
1150 	 * shall be addressed in the future.
1151 	 */
1152 	ulp_rte_prsr_fld_mask(params, &idx, size, &tc_spec, &tc_mask,
1153 			      (ULP_APP_TOS_PROTO_SUPPORT(params->ulp_ctx)) ?
1154 			      ULP_PRSR_ACT_DEFAULT : ULP_PRSR_ACT_MASK_IGNORE);
1155 	ulp_rte_prsr_fld_mask(params, &idx, size, &lab_spec, &lab_mask,
1156 			      ULP_PRSR_ACT_MASK_IGNORE);
1157 
1158 	size = sizeof(((struct rte_flow_item_ipv6 *)NULL)->hdr.payload_len);
1159 	ulp_rte_prsr_fld_mask(params, &idx, size,
1160 			      ulp_deference_struct(ipv6_spec, hdr.payload_len),
1161 			      ulp_deference_struct(ipv6_mask, hdr.payload_len),
1162 			      ULP_PRSR_ACT_DEFAULT);
1163 
1164 	/* Ignore proto for template matching */
1165 	size = sizeof(((struct rte_flow_item_ipv6 *)NULL)->hdr.proto);
1166 	ulp_rte_prsr_fld_mask(params, &idx, size,
1167 			      ulp_deference_struct(ipv6_spec, hdr.proto),
1168 			      ulp_deference_struct(ipv6_mask, hdr.proto),
1169 			      (ULP_APP_TOS_PROTO_SUPPORT(params->ulp_ctx)) ?
1170 			      ULP_PRSR_ACT_DEFAULT : ULP_PRSR_ACT_MATCH_IGNORE);
1171 
1172 	size = sizeof(((struct rte_flow_item_ipv6 *)NULL)->hdr.hop_limits);
1173 	ulp_rte_prsr_fld_mask(params, &idx, size,
1174 			      ulp_deference_struct(ipv6_spec, hdr.hop_limits),
1175 			      ulp_deference_struct(ipv6_mask, hdr.hop_limits),
1176 			      ULP_PRSR_ACT_DEFAULT);
1177 
1178 	size = sizeof(((struct rte_flow_item_ipv6 *)NULL)->hdr.src_addr);
1179 	ulp_rte_prsr_fld_mask(params, &idx, size,
1180 			      ulp_deference_struct(ipv6_spec, hdr.src_addr),
1181 			      ulp_deference_struct(ipv6_mask, hdr.src_addr),
1182 			      ULP_PRSR_ACT_DEFAULT);
1183 
1184 	dip_idx =  idx;
1185 	size = sizeof(((struct rte_flow_item_ipv6 *)NULL)->hdr.dst_addr);
1186 	ulp_rte_prsr_fld_mask(params, &idx, size,
1187 			      ulp_deference_struct(ipv6_spec, hdr.dst_addr),
1188 			      ulp_deference_struct(ipv6_mask, hdr.dst_addr),
1189 			      ULP_PRSR_ACT_DEFAULT);
1190 
1191 	/* Set the ipv6 header bitmap and computed l3 header bitmaps */
1192 	if (ULP_BITMAP_ISSET(hdr_bitmap->bits, BNXT_ULP_HDR_BIT_O_IPV4) ||
1193 	    ULP_BITMAP_ISSET(hdr_bitmap->bits, BNXT_ULP_HDR_BIT_O_IPV6) ||
1194 	    ULP_COMP_FLD_IDX_RD(params, BNXT_ULP_CF_IDX_L3_TUN)) {
1195 		ULP_BITMAP_SET(hdr_bitmap->bits, BNXT_ULP_HDR_BIT_I_IPV6);
1196 		ULP_COMP_FLD_IDX_WR(params, BNXT_ULP_CF_IDX_I_L3, 1);
1197 		inner_flag = 1;
1198 	} else {
1199 		ULP_BITMAP_SET(hdr_bitmap->bits, BNXT_ULP_HDR_BIT_O_IPV6);
1200 		ULP_COMP_FLD_IDX_WR(params, BNXT_ULP_CF_IDX_O_L3, 1);
1201 		/* Update the tunnel offload dest ip offset */
1202 		ULP_COMP_FLD_IDX_WR(params, BNXT_ULP_CF_IDX_TUN_OFF_DIP_ID,
1203 				    dip_idx);
1204 	}
1205 
1206 	/* Update the field protocol hdr bitmap */
1207 	if (proto_mask)
1208 		ulp_rte_l3_proto_type_update(params, proto, inner_flag);
1209 	ULP_COMP_FLD_IDX_WR(params, BNXT_ULP_CF_IDX_L3_HDR_CNT, ++cnt);
1210 
1211 	return BNXT_TF_RC_SUCCESS;
1212 }
1213 
1214 /* Function to handle the update of proto header based on field values */
1215 static void
1216 ulp_rte_l4_proto_type_update(struct ulp_rte_parser_params *params,
1217 			     uint16_t src_port, uint16_t src_mask,
1218 			     uint16_t dst_port, uint16_t dst_mask,
1219 			     enum bnxt_ulp_hdr_bit hdr_bit)
1220 {
1221 	switch (hdr_bit) {
1222 	case BNXT_ULP_HDR_BIT_I_UDP:
1223 	case BNXT_ULP_HDR_BIT_I_TCP:
1224 		ULP_BITMAP_SET(params->hdr_bitmap.bits, hdr_bit);
1225 		ULP_COMP_FLD_IDX_WR(params, BNXT_ULP_CF_IDX_I_L4, 1);
1226 		ULP_COMP_FLD_IDX_WR(params, BNXT_ULP_CF_IDX_I_L4_SRC_PORT,
1227 				    (uint64_t)rte_be_to_cpu_16(src_port));
1228 		ULP_COMP_FLD_IDX_WR(params, BNXT_ULP_CF_IDX_I_L4_DST_PORT,
1229 				    (uint64_t)rte_be_to_cpu_16(dst_port));
1230 		ULP_COMP_FLD_IDX_WR(params, BNXT_ULP_CF_IDX_I_L4_SRC_PORT_MASK,
1231 				    (uint64_t)rte_be_to_cpu_16(src_mask));
1232 		ULP_COMP_FLD_IDX_WR(params, BNXT_ULP_CF_IDX_I_L4_DST_PORT_MASK,
1233 				    (uint64_t)rte_be_to_cpu_16(dst_mask));
1234 		ULP_COMP_FLD_IDX_WR(params, BNXT_ULP_CF_IDX_I_L3_FB_PROTO_ID,
1235 				    1);
1236 		ULP_COMP_FLD_IDX_WR(params, BNXT_ULP_CF_IDX_I_L4_FB_SRC_PORT,
1237 				    !!(src_port & src_mask));
1238 		ULP_COMP_FLD_IDX_WR(params, BNXT_ULP_CF_IDX_I_L4_FB_DST_PORT,
1239 				    !!(dst_port & dst_mask));
1240 		ULP_COMP_FLD_IDX_WR(params, BNXT_ULP_CF_IDX_I_L3_PROTO_ID,
1241 				    (hdr_bit == BNXT_ULP_HDR_BIT_I_UDP) ?
1242 				    IPPROTO_UDP : IPPROTO_TCP);
1243 		break;
1244 	case BNXT_ULP_HDR_BIT_O_UDP:
1245 	case BNXT_ULP_HDR_BIT_O_TCP:
1246 		ULP_BITMAP_SET(params->hdr_bitmap.bits, hdr_bit);
1247 		ULP_COMP_FLD_IDX_WR(params, BNXT_ULP_CF_IDX_O_L4, 1);
1248 		ULP_COMP_FLD_IDX_WR(params, BNXT_ULP_CF_IDX_O_L4_SRC_PORT,
1249 				    (uint64_t)rte_be_to_cpu_16(src_port));
1250 		ULP_COMP_FLD_IDX_WR(params, BNXT_ULP_CF_IDX_O_L4_DST_PORT,
1251 				    (uint64_t)rte_be_to_cpu_16(dst_port));
1252 		ULP_COMP_FLD_IDX_WR(params, BNXT_ULP_CF_IDX_O_L4_SRC_PORT_MASK,
1253 				    (uint64_t)rte_be_to_cpu_16(src_mask));
1254 		ULP_COMP_FLD_IDX_WR(params, BNXT_ULP_CF_IDX_O_L4_DST_PORT_MASK,
1255 				    (uint64_t)rte_be_to_cpu_16(dst_mask));
1256 		ULP_COMP_FLD_IDX_WR(params, BNXT_ULP_CF_IDX_O_L3_FB_PROTO_ID,
1257 				    1);
1258 		ULP_COMP_FLD_IDX_WR(params, BNXT_ULP_CF_IDX_O_L4_FB_SRC_PORT,
1259 				    !!(src_port & src_mask));
1260 		ULP_COMP_FLD_IDX_WR(params, BNXT_ULP_CF_IDX_O_L4_FB_DST_PORT,
1261 				    !!(dst_port & dst_mask));
1262 		ULP_COMP_FLD_IDX_WR(params, BNXT_ULP_CF_IDX_O_L3_PROTO_ID,
1263 				    (hdr_bit == BNXT_ULP_HDR_BIT_O_UDP) ?
1264 				    IPPROTO_UDP : IPPROTO_TCP);
1265 		break;
1266 	default:
1267 		break;
1268 	}
1269 
1270 	if (hdr_bit == BNXT_ULP_HDR_BIT_O_UDP && dst_port ==
1271 	    tfp_cpu_to_be_16(ULP_UDP_PORT_VXLAN)) {
1272 		ULP_BITMAP_SET(params->hdr_fp_bit.bits,
1273 			       BNXT_ULP_HDR_BIT_T_VXLAN);
1274 		ULP_COMP_FLD_IDX_WR(params, BNXT_ULP_CF_IDX_L3_TUN, 1);
1275 	}
1276 }
1277 
1278 /* Function to handle the parsing of RTE Flow item UDP Header. */
1279 int32_t
1280 ulp_rte_udp_hdr_handler(const struct rte_flow_item *item,
1281 			struct ulp_rte_parser_params *params)
1282 {
1283 	const struct rte_flow_item_udp *udp_spec = item->spec;
1284 	const struct rte_flow_item_udp *udp_mask = item->mask;
1285 	struct ulp_rte_hdr_bitmap *hdr_bitmap = &params->hdr_bitmap;
1286 	uint32_t idx = 0;
1287 	uint32_t size;
1288 	uint16_t dport = 0, sport = 0;
1289 	uint16_t dport_mask = 0, sport_mask = 0;
1290 	uint32_t cnt;
1291 	enum bnxt_ulp_hdr_bit out_l4 = BNXT_ULP_HDR_BIT_O_UDP;
1292 
1293 	cnt = ULP_COMP_FLD_IDX_RD(params, BNXT_ULP_CF_IDX_L4_HDR_CNT);
1294 	if (cnt == 2) {
1295 		BNXT_TF_DBG(ERR, "Parse Err:Third L4 header not supported\n");
1296 		return BNXT_TF_RC_ERROR;
1297 	}
1298 
1299 	if (udp_spec) {
1300 		sport = udp_spec->hdr.src_port;
1301 		dport = udp_spec->hdr.dst_port;
1302 	}
1303 	if (udp_mask) {
1304 		sport_mask = udp_mask->hdr.src_port;
1305 		dport_mask = udp_mask->hdr.dst_port;
1306 	}
1307 
1308 	if (ulp_rte_prsr_fld_size_validate(params, &idx,
1309 					   BNXT_ULP_PROTO_HDR_UDP_NUM)) {
1310 		BNXT_TF_DBG(ERR, "Error parsing protocol header\n");
1311 		return BNXT_TF_RC_ERROR;
1312 	}
1313 
1314 	/*
1315 	 * Copy the rte_flow_item for ipv4 into hdr_field using ipv4
1316 	 * header fields
1317 	 */
1318 	size = sizeof(((struct rte_flow_item_udp *)NULL)->hdr.src_port);
1319 	ulp_rte_prsr_fld_mask(params, &idx, size,
1320 			      ulp_deference_struct(udp_spec, hdr.src_port),
1321 			      ulp_deference_struct(udp_mask, hdr.src_port),
1322 			      ULP_PRSR_ACT_DEFAULT);
1323 
1324 	size = sizeof(((struct rte_flow_item_udp *)NULL)->hdr.dst_port);
1325 	ulp_rte_prsr_fld_mask(params, &idx, size,
1326 			      ulp_deference_struct(udp_spec, hdr.dst_port),
1327 			      ulp_deference_struct(udp_mask, hdr.dst_port),
1328 			      ULP_PRSR_ACT_DEFAULT);
1329 
1330 	size = sizeof(((struct rte_flow_item_udp *)NULL)->hdr.dgram_len);
1331 	ulp_rte_prsr_fld_mask(params, &idx, size,
1332 			      ulp_deference_struct(udp_spec, hdr.dgram_len),
1333 			      ulp_deference_struct(udp_mask, hdr.dgram_len),
1334 			      ULP_PRSR_ACT_DEFAULT);
1335 
1336 	size = sizeof(((struct rte_flow_item_udp *)NULL)->hdr.dgram_cksum);
1337 	ulp_rte_prsr_fld_mask(params, &idx, size,
1338 			      ulp_deference_struct(udp_spec, hdr.dgram_cksum),
1339 			      ulp_deference_struct(udp_mask, hdr.dgram_cksum),
1340 			      ULP_PRSR_ACT_DEFAULT);
1341 
1342 	/* Set the udp header bitmap and computed l4 header bitmaps */
1343 	if (ULP_BITMAP_ISSET(hdr_bitmap->bits, BNXT_ULP_HDR_BIT_O_UDP) ||
1344 	    ULP_BITMAP_ISSET(hdr_bitmap->bits, BNXT_ULP_HDR_BIT_O_TCP) ||
1345 	    ULP_COMP_FLD_IDX_RD(params, BNXT_ULP_CF_IDX_L3_TUN))
1346 		out_l4 = BNXT_ULP_HDR_BIT_I_UDP;
1347 
1348 	ulp_rte_l4_proto_type_update(params, sport, sport_mask, dport,
1349 				     dport_mask, out_l4);
1350 	ULP_COMP_FLD_IDX_WR(params, BNXT_ULP_CF_IDX_L4_HDR_CNT, ++cnt);
1351 	return BNXT_TF_RC_SUCCESS;
1352 }
1353 
1354 /* Function to handle the parsing of RTE Flow item TCP Header. */
1355 int32_t
1356 ulp_rte_tcp_hdr_handler(const struct rte_flow_item *item,
1357 			struct ulp_rte_parser_params *params)
1358 {
1359 	const struct rte_flow_item_tcp *tcp_spec = item->spec;
1360 	const struct rte_flow_item_tcp *tcp_mask = item->mask;
1361 	struct ulp_rte_hdr_bitmap *hdr_bitmap = &params->hdr_bitmap;
1362 	uint32_t idx = 0;
1363 	uint16_t dport = 0, sport = 0;
1364 	uint16_t dport_mask = 0, sport_mask = 0;
1365 	uint32_t size;
1366 	uint32_t cnt;
1367 	enum bnxt_ulp_hdr_bit out_l4 = BNXT_ULP_HDR_BIT_O_TCP;
1368 
1369 	cnt = ULP_COMP_FLD_IDX_RD(params, BNXT_ULP_CF_IDX_L4_HDR_CNT);
1370 	if (cnt == 2) {
1371 		BNXT_TF_DBG(ERR, "Parse Err:Third L4 header not supported\n");
1372 		return BNXT_TF_RC_ERROR;
1373 	}
1374 
1375 	if (tcp_spec) {
1376 		sport = tcp_spec->hdr.src_port;
1377 		dport = tcp_spec->hdr.dst_port;
1378 	}
1379 	if (tcp_mask) {
1380 		sport_mask = tcp_mask->hdr.src_port;
1381 		dport_mask = tcp_mask->hdr.dst_port;
1382 	}
1383 
1384 	if (ulp_rte_prsr_fld_size_validate(params, &idx,
1385 					   BNXT_ULP_PROTO_HDR_TCP_NUM)) {
1386 		BNXT_TF_DBG(ERR, "Error parsing protocol header\n");
1387 		return BNXT_TF_RC_ERROR;
1388 	}
1389 
1390 	/*
1391 	 * Copy the rte_flow_item for ipv4 into hdr_field using ipv4
1392 	 * header fields
1393 	 */
1394 	size = sizeof(((struct rte_flow_item_tcp *)NULL)->hdr.src_port);
1395 	ulp_rte_prsr_fld_mask(params, &idx, size,
1396 			      ulp_deference_struct(tcp_spec, hdr.src_port),
1397 			      ulp_deference_struct(tcp_mask, hdr.src_port),
1398 			      ULP_PRSR_ACT_DEFAULT);
1399 
1400 	size = sizeof(((struct rte_flow_item_tcp *)NULL)->hdr.dst_port);
1401 	ulp_rte_prsr_fld_mask(params, &idx, size,
1402 			      ulp_deference_struct(tcp_spec, hdr.dst_port),
1403 			      ulp_deference_struct(tcp_mask, hdr.dst_port),
1404 			      ULP_PRSR_ACT_DEFAULT);
1405 
1406 	size = sizeof(((struct rte_flow_item_tcp *)NULL)->hdr.sent_seq);
1407 	ulp_rte_prsr_fld_mask(params, &idx, size,
1408 			      ulp_deference_struct(tcp_spec, hdr.sent_seq),
1409 			      ulp_deference_struct(tcp_mask, hdr.sent_seq),
1410 			      ULP_PRSR_ACT_DEFAULT);
1411 
1412 	size = sizeof(((struct rte_flow_item_tcp *)NULL)->hdr.recv_ack);
1413 	ulp_rte_prsr_fld_mask(params, &idx, size,
1414 			      ulp_deference_struct(tcp_spec, hdr.recv_ack),
1415 			      ulp_deference_struct(tcp_mask, hdr.recv_ack),
1416 			      ULP_PRSR_ACT_DEFAULT);
1417 
1418 	size = sizeof(((struct rte_flow_item_tcp *)NULL)->hdr.data_off);
1419 	ulp_rte_prsr_fld_mask(params, &idx, size,
1420 			      ulp_deference_struct(tcp_spec, hdr.data_off),
1421 			      ulp_deference_struct(tcp_mask, hdr.data_off),
1422 			      ULP_PRSR_ACT_DEFAULT);
1423 
1424 	size = sizeof(((struct rte_flow_item_tcp *)NULL)->hdr.tcp_flags);
1425 	ulp_rte_prsr_fld_mask(params, &idx, size,
1426 			      ulp_deference_struct(tcp_spec, hdr.tcp_flags),
1427 			      ulp_deference_struct(tcp_mask, hdr.tcp_flags),
1428 			      ULP_PRSR_ACT_DEFAULT);
1429 
1430 	size = sizeof(((struct rte_flow_item_tcp *)NULL)->hdr.rx_win);
1431 	ulp_rte_prsr_fld_mask(params, &idx, size,
1432 			      ulp_deference_struct(tcp_spec, hdr.rx_win),
1433 			      ulp_deference_struct(tcp_mask, hdr.rx_win),
1434 			      ULP_PRSR_ACT_DEFAULT);
1435 
1436 	size = sizeof(((struct rte_flow_item_tcp *)NULL)->hdr.cksum);
1437 	ulp_rte_prsr_fld_mask(params, &idx, size,
1438 			      ulp_deference_struct(tcp_spec, hdr.cksum),
1439 			      ulp_deference_struct(tcp_mask, hdr.cksum),
1440 			      ULP_PRSR_ACT_DEFAULT);
1441 
1442 	size = sizeof(((struct rte_flow_item_tcp *)NULL)->hdr.tcp_urp);
1443 	ulp_rte_prsr_fld_mask(params, &idx, size,
1444 			      ulp_deference_struct(tcp_spec, hdr.tcp_urp),
1445 			      ulp_deference_struct(tcp_mask, hdr.tcp_urp),
1446 			      ULP_PRSR_ACT_DEFAULT);
1447 
1448 	/* Set the udp header bitmap and computed l4 header bitmaps */
1449 	if (ULP_BITMAP_ISSET(hdr_bitmap->bits, BNXT_ULP_HDR_BIT_O_UDP) ||
1450 	    ULP_BITMAP_ISSET(hdr_bitmap->bits, BNXT_ULP_HDR_BIT_O_TCP) ||
1451 	    ULP_COMP_FLD_IDX_RD(params, BNXT_ULP_CF_IDX_L3_TUN))
1452 		out_l4 = BNXT_ULP_HDR_BIT_I_TCP;
1453 
1454 	ulp_rte_l4_proto_type_update(params, sport, sport_mask, dport,
1455 				     dport_mask, out_l4);
1456 	ULP_COMP_FLD_IDX_WR(params, BNXT_ULP_CF_IDX_L4_HDR_CNT, ++cnt);
1457 	return BNXT_TF_RC_SUCCESS;
1458 }
1459 
1460 /* Function to handle the parsing of RTE Flow item Vxlan Header. */
1461 int32_t
1462 ulp_rte_vxlan_hdr_handler(const struct rte_flow_item *item,
1463 			  struct ulp_rte_parser_params *params)
1464 {
1465 	const struct rte_flow_item_vxlan *vxlan_spec = item->spec;
1466 	const struct rte_flow_item_vxlan *vxlan_mask = item->mask;
1467 	struct ulp_rte_hdr_bitmap *hdr_bitmap = &params->hdr_bitmap;
1468 	uint32_t idx = 0;
1469 	uint16_t dport;
1470 	uint32_t size;
1471 
1472 	if (ulp_rte_prsr_fld_size_validate(params, &idx,
1473 					   BNXT_ULP_PROTO_HDR_VXLAN_NUM)) {
1474 		BNXT_TF_DBG(ERR, "Error parsing protocol header\n");
1475 		return BNXT_TF_RC_ERROR;
1476 	}
1477 
1478 	/*
1479 	 * Copy the rte_flow_item for vxlan into hdr_field using vxlan
1480 	 * header fields
1481 	 */
1482 	size = sizeof(((struct rte_flow_item_vxlan *)NULL)->hdr.flags);
1483 	ulp_rte_prsr_fld_mask(params, &idx, size,
1484 			      ulp_deference_struct(vxlan_spec, hdr.flags),
1485 			      ulp_deference_struct(vxlan_mask, hdr.flags),
1486 			      ULP_PRSR_ACT_DEFAULT);
1487 
1488 	size = sizeof(((struct rte_flow_item_vxlan *)NULL)->hdr.rsvd0);
1489 	ulp_rte_prsr_fld_mask(params, &idx, size,
1490 			      ulp_deference_struct(vxlan_spec, hdr.rsvd0),
1491 			      ulp_deference_struct(vxlan_mask, hdr.rsvd0),
1492 			      ULP_PRSR_ACT_DEFAULT);
1493 
1494 	size = sizeof(((struct rte_flow_item_vxlan *)NULL)->hdr.vni);
1495 	ulp_rte_prsr_fld_mask(params, &idx, size,
1496 			      ulp_deference_struct(vxlan_spec, hdr.vni),
1497 			      ulp_deference_struct(vxlan_mask, hdr.vni),
1498 			      ULP_PRSR_ACT_DEFAULT);
1499 
1500 	size = sizeof(((struct rte_flow_item_vxlan *)NULL)->hdr.rsvd1);
1501 	ulp_rte_prsr_fld_mask(params, &idx, size,
1502 			      ulp_deference_struct(vxlan_spec, hdr.rsvd1),
1503 			      ulp_deference_struct(vxlan_mask, hdr.rsvd1),
1504 			      ULP_PRSR_ACT_DEFAULT);
1505 
1506 	/* Update the hdr_bitmap with vxlan */
1507 	ULP_BITMAP_SET(hdr_bitmap->bits, BNXT_ULP_HDR_BIT_T_VXLAN);
1508 	ULP_COMP_FLD_IDX_WR(params, BNXT_ULP_CF_IDX_L3_TUN, 1);
1509 
1510 	dport = ULP_COMP_FLD_IDX_RD(params, BNXT_ULP_CF_IDX_O_L4_DST_PORT);
1511 	if (!dport) {
1512 		ULP_COMP_FLD_IDX_WR(params, BNXT_ULP_CF_IDX_O_L4_DST_PORT,
1513 				    ULP_UDP_PORT_VXLAN);
1514 		ULP_COMP_FLD_IDX_WR(params, BNXT_ULP_CF_IDX_O_L4_DST_PORT_MASK,
1515 				    ULP_UDP_PORT_VXLAN_MASK);
1516 	}
1517 
1518 	return BNXT_TF_RC_SUCCESS;
1519 }
1520 
1521 /* Function to handle the parsing of RTE Flow item GRE Header. */
1522 int32_t
1523 ulp_rte_gre_hdr_handler(const struct rte_flow_item *item,
1524 			struct ulp_rte_parser_params *params)
1525 {
1526 	const struct rte_flow_item_gre *gre_spec = item->spec;
1527 	const struct rte_flow_item_gre *gre_mask = item->mask;
1528 	struct ulp_rte_hdr_bitmap *hdr_bitmap = &params->hdr_bitmap;
1529 	uint32_t idx = 0;
1530 	uint32_t size;
1531 
1532 	if (ulp_rte_prsr_fld_size_validate(params, &idx,
1533 					   BNXT_ULP_PROTO_HDR_GRE_NUM)) {
1534 		BNXT_TF_DBG(ERR, "Error parsing protocol header\n");
1535 		return BNXT_TF_RC_ERROR;
1536 	}
1537 
1538 	size = sizeof(((struct rte_flow_item_gre *)NULL)->c_rsvd0_ver);
1539 	ulp_rte_prsr_fld_mask(params, &idx, size,
1540 			      ulp_deference_struct(gre_spec, c_rsvd0_ver),
1541 			      ulp_deference_struct(gre_mask, c_rsvd0_ver),
1542 			      ULP_PRSR_ACT_DEFAULT);
1543 
1544 	size = sizeof(((struct rte_flow_item_gre *)NULL)->protocol);
1545 	ulp_rte_prsr_fld_mask(params, &idx, size,
1546 			      ulp_deference_struct(gre_spec, protocol),
1547 			      ulp_deference_struct(gre_mask, protocol),
1548 			      ULP_PRSR_ACT_DEFAULT);
1549 
1550 	/* Update the hdr_bitmap with GRE */
1551 	ULP_BITMAP_SET(hdr_bitmap->bits, BNXT_ULP_HDR_BIT_T_GRE);
1552 	ULP_COMP_FLD_IDX_WR(params, BNXT_ULP_CF_IDX_L3_TUN, 1);
1553 	return BNXT_TF_RC_SUCCESS;
1554 }
1555 
1556 /* Function to handle the parsing of RTE Flow item ANY. */
1557 int32_t
1558 ulp_rte_item_any_handler(const struct rte_flow_item *item __rte_unused,
1559 			 struct ulp_rte_parser_params *params __rte_unused)
1560 {
1561 	return BNXT_TF_RC_SUCCESS;
1562 }
1563 
1564 /* Function to handle the parsing of RTE Flow item ICMP Header. */
1565 int32_t
1566 ulp_rte_icmp_hdr_handler(const struct rte_flow_item *item,
1567 			 struct ulp_rte_parser_params *params)
1568 {
1569 	const struct rte_flow_item_icmp *icmp_spec = item->spec;
1570 	const struct rte_flow_item_icmp *icmp_mask = item->mask;
1571 	struct ulp_rte_hdr_bitmap *hdr_bitmap = &params->hdr_bitmap;
1572 	uint32_t idx = 0;
1573 	uint32_t size;
1574 
1575 	if (ulp_rte_prsr_fld_size_validate(params, &idx,
1576 					   BNXT_ULP_PROTO_HDR_ICMP_NUM)) {
1577 		BNXT_TF_DBG(ERR, "Error parsing protocol header\n");
1578 		return BNXT_TF_RC_ERROR;
1579 	}
1580 
1581 	size = sizeof(((struct rte_flow_item_icmp *)NULL)->hdr.icmp_type);
1582 	ulp_rte_prsr_fld_mask(params, &idx, size,
1583 			      ulp_deference_struct(icmp_spec, hdr.icmp_type),
1584 			      ulp_deference_struct(icmp_mask, hdr.icmp_type),
1585 			      ULP_PRSR_ACT_DEFAULT);
1586 
1587 	size = sizeof(((struct rte_flow_item_icmp *)NULL)->hdr.icmp_code);
1588 	ulp_rte_prsr_fld_mask(params, &idx, size,
1589 			      ulp_deference_struct(icmp_spec, hdr.icmp_code),
1590 			      ulp_deference_struct(icmp_mask, hdr.icmp_code),
1591 			      ULP_PRSR_ACT_DEFAULT);
1592 
1593 	size = sizeof(((struct rte_flow_item_icmp *)NULL)->hdr.icmp_cksum);
1594 	ulp_rte_prsr_fld_mask(params, &idx, size,
1595 			      ulp_deference_struct(icmp_spec, hdr.icmp_cksum),
1596 			      ulp_deference_struct(icmp_mask, hdr.icmp_cksum),
1597 			      ULP_PRSR_ACT_DEFAULT);
1598 
1599 	size = sizeof(((struct rte_flow_item_icmp *)NULL)->hdr.icmp_ident);
1600 	ulp_rte_prsr_fld_mask(params, &idx, size,
1601 			      ulp_deference_struct(icmp_spec, hdr.icmp_ident),
1602 			      ulp_deference_struct(icmp_mask, hdr.icmp_ident),
1603 			      ULP_PRSR_ACT_DEFAULT);
1604 
1605 	size = sizeof(((struct rte_flow_item_icmp *)NULL)->hdr.icmp_seq_nb);
1606 	ulp_rte_prsr_fld_mask(params, &idx, size,
1607 			      ulp_deference_struct(icmp_spec, hdr.icmp_seq_nb),
1608 			      ulp_deference_struct(icmp_mask, hdr.icmp_seq_nb),
1609 			      ULP_PRSR_ACT_DEFAULT);
1610 
1611 	/* Update the hdr_bitmap with ICMP */
1612 	if (ULP_COMP_FLD_IDX_RD(params, BNXT_ULP_CF_IDX_L3_TUN))
1613 		ULP_BITMAP_SET(hdr_bitmap->bits, BNXT_ULP_HDR_BIT_I_ICMP);
1614 	else
1615 		ULP_BITMAP_SET(hdr_bitmap->bits, BNXT_ULP_HDR_BIT_O_ICMP);
1616 	return BNXT_TF_RC_SUCCESS;
1617 }
1618 
1619 /* Function to handle the parsing of RTE Flow item ICMP6 Header. */
1620 int32_t
1621 ulp_rte_icmp6_hdr_handler(const struct rte_flow_item *item,
1622 			  struct ulp_rte_parser_params *params)
1623 {
1624 	const struct rte_flow_item_icmp6 *icmp_spec = item->spec;
1625 	const struct rte_flow_item_icmp6 *icmp_mask = item->mask;
1626 	struct ulp_rte_hdr_bitmap *hdr_bitmap = &params->hdr_bitmap;
1627 	uint32_t idx = 0;
1628 	uint32_t size;
1629 
1630 	if (ulp_rte_prsr_fld_size_validate(params, &idx,
1631 					   BNXT_ULP_PROTO_HDR_ICMP_NUM)) {
1632 		BNXT_TF_DBG(ERR, "Error parsing protocol header\n");
1633 		return BNXT_TF_RC_ERROR;
1634 	}
1635 
1636 	size = sizeof(((struct rte_flow_item_icmp6 *)NULL)->type);
1637 	ulp_rte_prsr_fld_mask(params, &idx, size,
1638 			      ulp_deference_struct(icmp_spec, type),
1639 			      ulp_deference_struct(icmp_mask, type),
1640 			      ULP_PRSR_ACT_DEFAULT);
1641 
1642 	size = sizeof(((struct rte_flow_item_icmp6 *)NULL)->code);
1643 	ulp_rte_prsr_fld_mask(params, &idx, size,
1644 			      ulp_deference_struct(icmp_spec, code),
1645 			      ulp_deference_struct(icmp_mask, code),
1646 			      ULP_PRSR_ACT_DEFAULT);
1647 
1648 	size = sizeof(((struct rte_flow_item_icmp6 *)NULL)->checksum);
1649 	ulp_rte_prsr_fld_mask(params, &idx, size,
1650 			      ulp_deference_struct(icmp_spec, checksum),
1651 			      ulp_deference_struct(icmp_mask, checksum),
1652 			      ULP_PRSR_ACT_DEFAULT);
1653 
1654 	if (ULP_BITMAP_ISSET(hdr_bitmap->bits, BNXT_ULP_HDR_BIT_O_IPV4)) {
1655 		BNXT_TF_DBG(ERR, "Error: incorrect icmp version\n");
1656 		return BNXT_TF_RC_ERROR;
1657 	}
1658 
1659 	/* Update the hdr_bitmap with ICMP */
1660 	if (ULP_COMP_FLD_IDX_RD(params, BNXT_ULP_CF_IDX_L3_TUN))
1661 		ULP_BITMAP_SET(hdr_bitmap->bits, BNXT_ULP_HDR_BIT_I_ICMP);
1662 	else
1663 		ULP_BITMAP_SET(hdr_bitmap->bits, BNXT_ULP_HDR_BIT_O_ICMP);
1664 	return BNXT_TF_RC_SUCCESS;
1665 }
1666 
1667 /* Function to handle the parsing of RTE Flow item ECPRI Header. */
1668 int32_t
1669 ulp_rte_ecpri_hdr_handler(const struct rte_flow_item *item,
1670 			  struct ulp_rte_parser_params *params)
1671 {
1672 	const struct rte_flow_item_ecpri *ecpri_spec = item->spec;
1673 	const struct rte_flow_item_ecpri *ecpri_mask = item->mask;
1674 	struct rte_flow_item_ecpri l_ecpri_spec, l_ecpri_mask;
1675 	struct rte_flow_item_ecpri *p_ecpri_spec = &l_ecpri_spec;
1676 	struct rte_flow_item_ecpri *p_ecpri_mask = &l_ecpri_mask;
1677 	struct ulp_rte_hdr_bitmap *hdr_bitmap = &params->hdr_bitmap;
1678 	uint32_t idx = 0, cnt;
1679 	uint32_t size;
1680 
1681 	if (ulp_rte_prsr_fld_size_validate(params, &idx,
1682 					   BNXT_ULP_PROTO_HDR_ECPRI_NUM)) {
1683 		BNXT_TF_DBG(ERR, "Error parsing protocol header\n");
1684 		return BNXT_TF_RC_ERROR;
1685 	}
1686 
1687 	/* Figure out if eCPRI is within L4(UDP), unsupported, for now */
1688 	cnt = ULP_COMP_FLD_IDX_RD(params, BNXT_ULP_CF_IDX_L4_HDR_CNT);
1689 	if (cnt >= 1) {
1690 		BNXT_TF_DBG(ERR, "Parse Err: L4 header stack >= 2 not supported\n");
1691 		return BNXT_TF_RC_ERROR;
1692 	}
1693 
1694 	if (!ecpri_spec || !ecpri_mask)
1695 		goto parser_set_ecpri_hdr_bit;
1696 
1697 	memcpy(p_ecpri_spec, ecpri_spec, sizeof(*ecpri_spec));
1698 	memcpy(p_ecpri_mask, ecpri_mask, sizeof(*ecpri_mask));
1699 
1700 	p_ecpri_spec->hdr.common.u32 = rte_be_to_cpu_32(p_ecpri_spec->hdr.common.u32);
1701 	p_ecpri_mask->hdr.common.u32 = rte_be_to_cpu_32(p_ecpri_mask->hdr.common.u32);
1702 
1703 	/*
1704 	 * Init eCPRI spec+mask to correct defaults, also clear masks of fields
1705 	 * we ignore in the TCAM.
1706 	 */
1707 
1708 	l_ecpri_spec.hdr.common.size = 0;
1709 	l_ecpri_spec.hdr.common.c = 0;
1710 	l_ecpri_spec.hdr.common.res = 0;
1711 	l_ecpri_spec.hdr.common.revision = 1;
1712 	l_ecpri_mask.hdr.common.size = 0;
1713 	l_ecpri_mask.hdr.common.c = 1;
1714 	l_ecpri_mask.hdr.common.res = 0;
1715 	l_ecpri_mask.hdr.common.revision = 0xf;
1716 
1717 	switch (p_ecpri_spec->hdr.common.type) {
1718 	case RTE_ECPRI_MSG_TYPE_IQ_DATA:
1719 		l_ecpri_mask.hdr.type0.seq_id = 0;
1720 		break;
1721 
1722 	case RTE_ECPRI_MSG_TYPE_BIT_SEQ:
1723 		l_ecpri_mask.hdr.type1.seq_id = 0;
1724 		break;
1725 
1726 	case RTE_ECPRI_MSG_TYPE_RTC_CTRL:
1727 		l_ecpri_mask.hdr.type2.seq_id = 0;
1728 		break;
1729 
1730 	case RTE_ECPRI_MSG_TYPE_GEN_DATA:
1731 		l_ecpri_mask.hdr.type3.seq_id = 0;
1732 		break;
1733 
1734 	case RTE_ECPRI_MSG_TYPE_RM_ACC:
1735 		l_ecpri_mask.hdr.type4.rr = 0;
1736 		l_ecpri_mask.hdr.type4.rw = 0;
1737 		l_ecpri_mask.hdr.type4.rma_id = 0;
1738 		break;
1739 
1740 	case RTE_ECPRI_MSG_TYPE_DLY_MSR:
1741 		l_ecpri_spec.hdr.type5.act_type = 0;
1742 		break;
1743 
1744 	case RTE_ECPRI_MSG_TYPE_RMT_RST:
1745 		l_ecpri_spec.hdr.type6.rst_op = 0;
1746 		break;
1747 
1748 	case RTE_ECPRI_MSG_TYPE_EVT_IND:
1749 		l_ecpri_spec.hdr.type7.evt_type = 0;
1750 		l_ecpri_spec.hdr.type7.seq = 0;
1751 		l_ecpri_spec.hdr.type7.number = 0;
1752 		break;
1753 
1754 	default:
1755 		break;
1756 	}
1757 
1758 	p_ecpri_spec->hdr.common.u32 = rte_cpu_to_be_32(p_ecpri_spec->hdr.common.u32);
1759 	p_ecpri_mask->hdr.common.u32 = rte_cpu_to_be_32(p_ecpri_mask->hdr.common.u32);
1760 
1761 	/* Type */
1762 	size = sizeof(((struct rte_flow_item_ecpri *)NULL)->hdr.common.u32);
1763 	ulp_rte_prsr_fld_mask(params, &idx, size,
1764 			      ulp_deference_struct(p_ecpri_spec, hdr.common.u32),
1765 			      ulp_deference_struct(p_ecpri_mask, hdr.common.u32),
1766 			      ULP_PRSR_ACT_DEFAULT);
1767 
1768 	/* PC/RTC/MSR_ID */
1769 	size = sizeof(((struct rte_flow_item_ecpri *)NULL)->hdr.dummy[0]);
1770 	ulp_rte_prsr_fld_mask(params, &idx, size,
1771 			      ulp_deference_struct(p_ecpri_spec, hdr.dummy),
1772 			      ulp_deference_struct(p_ecpri_mask, hdr.dummy),
1773 			      ULP_PRSR_ACT_DEFAULT);
1774 
1775 parser_set_ecpri_hdr_bit:
1776 	/* Update the hdr_bitmap with eCPRI */
1777 	ULP_BITMAP_SET(hdr_bitmap->bits, BNXT_ULP_HDR_BIT_O_ECPRI);
1778 	return BNXT_TF_RC_SUCCESS;
1779 }
1780 
1781 /* Function to handle the parsing of RTE Flow item void Header */
1782 int32_t
1783 ulp_rte_void_hdr_handler(const struct rte_flow_item *item __rte_unused,
1784 			 struct ulp_rte_parser_params *params __rte_unused)
1785 {
1786 	return BNXT_TF_RC_SUCCESS;
1787 }
1788 
1789 /* Function to handle the parsing of RTE Flow action void Header. */
1790 int32_t
1791 ulp_rte_void_act_handler(const struct rte_flow_action *action_item __rte_unused,
1792 			 struct ulp_rte_parser_params *params __rte_unused)
1793 {
1794 	return BNXT_TF_RC_SUCCESS;
1795 }
1796 
1797 /* Function to handle the parsing of RTE Flow action Mark Header. */
1798 int32_t
1799 ulp_rte_mark_act_handler(const struct rte_flow_action *action_item,
1800 			 struct ulp_rte_parser_params *param)
1801 {
1802 	const struct rte_flow_action_mark *mark;
1803 	struct ulp_rte_act_bitmap *act = &param->act_bitmap;
1804 	uint32_t mark_id;
1805 
1806 	mark = action_item->conf;
1807 	if (mark) {
1808 		mark_id = tfp_cpu_to_be_32(mark->id);
1809 		memcpy(&param->act_prop.act_details[BNXT_ULP_ACT_PROP_IDX_MARK],
1810 		       &mark_id, BNXT_ULP_ACT_PROP_SZ_MARK);
1811 
1812 		/* Update the hdr_bitmap with vxlan */
1813 		ULP_BITMAP_SET(act->bits, BNXT_ULP_ACT_BIT_MARK);
1814 		return BNXT_TF_RC_SUCCESS;
1815 	}
1816 	BNXT_TF_DBG(ERR, "Parse Error: Mark arg is invalid\n");
1817 	return BNXT_TF_RC_ERROR;
1818 }
1819 
1820 /* Function to handle the parsing of RTE Flow action RSS Header. */
1821 int32_t
1822 ulp_rte_rss_act_handler(const struct rte_flow_action *action_item,
1823 			struct ulp_rte_parser_params *param)
1824 {
1825 	const struct rte_flow_action_rss *rss;
1826 	struct ulp_rte_act_prop *ap = &param->act_prop;
1827 	uint64_t queue_list[BNXT_ULP_ACT_PROP_SZ_RSS_QUEUE / sizeof(uint64_t)];
1828 	uint32_t idx = 0, id;
1829 
1830 	if (action_item == NULL || action_item->conf == NULL) {
1831 		BNXT_TF_DBG(ERR, "Parse Err: invalid rss configuration\n");
1832 		return BNXT_TF_RC_ERROR;
1833 	}
1834 
1835 	rss = action_item->conf;
1836 	/* Copy the rss into the specific action properties */
1837 	memcpy(&ap->act_details[BNXT_ULP_ACT_PROP_IDX_RSS_TYPES], &rss->types,
1838 	       BNXT_ULP_ACT_PROP_SZ_RSS_TYPES);
1839 	memcpy(&ap->act_details[BNXT_ULP_ACT_PROP_IDX_RSS_LEVEL], &rss->level,
1840 	       BNXT_ULP_ACT_PROP_SZ_RSS_LEVEL);
1841 	memcpy(&ap->act_details[BNXT_ULP_ACT_PROP_IDX_RSS_KEY_LEN],
1842 	       &rss->key_len, BNXT_ULP_ACT_PROP_SZ_RSS_KEY_LEN);
1843 
1844 	if (rss->key_len != 0 && rss->key_len != BNXT_ULP_ACT_PROP_SZ_RSS_KEY) {
1845 		BNXT_TF_DBG(ERR, "Parse Err: RSS key length must be 40 bytes\n");
1846 		return BNXT_TF_RC_ERROR;
1847 	}
1848 
1849 	/* User may specify only key length. In that case, rss->key will be NULL.
1850 	 * So, reject the flow if key_length is valid but rss->key is NULL.
1851 	 * Also, copy the RSS hash key only when rss->key is valid.
1852 	 */
1853 	if (rss->key_len != 0 && rss->key == NULL) {
1854 		BNXT_TF_DBG(ERR,
1855 			    "Parse Err: A valid RSS key must be provided with a valid key len.\n");
1856 		return BNXT_TF_RC_ERROR;
1857 	}
1858 	if (rss->key)
1859 		memcpy(&ap->act_details[BNXT_ULP_ACT_PROP_IDX_RSS_KEY], rss->key, rss->key_len);
1860 
1861 	memcpy(&ap->act_details[BNXT_ULP_ACT_PROP_IDX_RSS_QUEUE_NUM],
1862 	       &rss->queue_num, BNXT_ULP_ACT_PROP_SZ_RSS_QUEUE_NUM);
1863 
1864 	if (rss->queue_num >= ULP_BYTE_2_BITS(BNXT_ULP_ACT_PROP_SZ_RSS_QUEUE)) {
1865 		BNXT_TF_DBG(ERR, "Parse Err: RSS queue num too big\n");
1866 		return BNXT_TF_RC_ERROR;
1867 	}
1868 
1869 	/* Queues converted into a bitmap format */
1870 	memset(queue_list, 0, sizeof(queue_list));
1871 	for (idx = 0; idx < rss->queue_num; idx++) {
1872 		id = rss->queue[idx];
1873 		if (id >= ULP_BYTE_2_BITS(BNXT_ULP_ACT_PROP_SZ_RSS_QUEUE)) {
1874 			BNXT_TF_DBG(ERR, "Parse Err: RSS queue id too big\n");
1875 			return BNXT_TF_RC_ERROR;
1876 		}
1877 		if ((queue_list[id / ULP_INDEX_BITMAP_SIZE] >>
1878 		    ((ULP_INDEX_BITMAP_SIZE - 1) -
1879 		     (id % ULP_INDEX_BITMAP_SIZE)) & 1)) {
1880 			BNXT_TF_DBG(ERR, "Parse Err: duplicate queue ids\n");
1881 			return BNXT_TF_RC_ERROR;
1882 		}
1883 		queue_list[id / ULP_INDEX_BITMAP_SIZE] |= (1UL <<
1884 		((ULP_INDEX_BITMAP_SIZE - 1) - (id % ULP_INDEX_BITMAP_SIZE)));
1885 	}
1886 	memcpy(&ap->act_details[BNXT_ULP_ACT_PROP_IDX_RSS_QUEUE],
1887 	       (uint8_t *)queue_list, BNXT_ULP_ACT_PROP_SZ_RSS_QUEUE);
1888 
1889 	/* set the RSS action header bit */
1890 	ULP_BITMAP_SET(param->act_bitmap.bits, BNXT_ULP_ACT_BIT_RSS);
1891 
1892 	return BNXT_TF_RC_SUCCESS;
1893 }
1894 
1895 /* Function to handle the parsing of RTE Flow item eth Header. */
1896 static void
1897 ulp_rte_enc_eth_hdr_handler(struct ulp_rte_parser_params *params,
1898 			    const struct rte_flow_item_eth *eth_spec)
1899 {
1900 	struct ulp_rte_hdr_field *field;
1901 	uint32_t size;
1902 
1903 	field = &params->enc_field[BNXT_ULP_ENC_FIELD_ETH_DMAC];
1904 	size = sizeof(eth_spec->hdr.dst_addr.addr_bytes);
1905 	field = ulp_rte_parser_fld_copy(field, eth_spec->hdr.dst_addr.addr_bytes, size);
1906 
1907 	size = sizeof(eth_spec->hdr.src_addr.addr_bytes);
1908 	field = ulp_rte_parser_fld_copy(field, eth_spec->hdr.src_addr.addr_bytes, size);
1909 
1910 	size = sizeof(eth_spec->hdr.ether_type);
1911 	field = ulp_rte_parser_fld_copy(field, &eth_spec->hdr.ether_type, size);
1912 
1913 	ULP_BITMAP_SET(params->enc_hdr_bitmap.bits, BNXT_ULP_HDR_BIT_O_ETH);
1914 }
1915 
1916 /* Function to handle the parsing of RTE Flow item vlan Header. */
1917 static void
1918 ulp_rte_enc_vlan_hdr_handler(struct ulp_rte_parser_params *params,
1919 			     const struct rte_flow_item_vlan *vlan_spec,
1920 			     uint32_t inner)
1921 {
1922 	struct ulp_rte_hdr_field *field;
1923 	uint32_t size;
1924 
1925 	if (!inner) {
1926 		field = &params->enc_field[BNXT_ULP_ENC_FIELD_O_VLAN_TCI];
1927 		ULP_BITMAP_SET(params->enc_hdr_bitmap.bits,
1928 			       BNXT_ULP_HDR_BIT_OO_VLAN);
1929 	} else {
1930 		field = &params->enc_field[BNXT_ULP_ENC_FIELD_I_VLAN_TCI];
1931 		ULP_BITMAP_SET(params->enc_hdr_bitmap.bits,
1932 			       BNXT_ULP_HDR_BIT_OI_VLAN);
1933 	}
1934 
1935 	size = sizeof(vlan_spec->hdr.vlan_tci);
1936 	field = ulp_rte_parser_fld_copy(field, &vlan_spec->hdr.vlan_tci, size);
1937 
1938 	size = sizeof(vlan_spec->hdr.eth_proto);
1939 	field = ulp_rte_parser_fld_copy(field, &vlan_spec->hdr.eth_proto, size);
1940 }
1941 
1942 /* Function to handle the parsing of RTE Flow item ipv4 Header. */
1943 static void
1944 ulp_rte_enc_ipv4_hdr_handler(struct ulp_rte_parser_params *params,
1945 			     const struct rte_flow_item_ipv4 *ip)
1946 {
1947 	struct ulp_rte_hdr_field *field;
1948 	uint32_t size;
1949 	uint8_t val8;
1950 
1951 	field = &params->enc_field[BNXT_ULP_ENC_FIELD_IPV4_IHL];
1952 	size = sizeof(ip->hdr.version_ihl);
1953 	if (!ip->hdr.version_ihl)
1954 		val8 = RTE_IPV4_VHL_DEF;
1955 	else
1956 		val8 = ip->hdr.version_ihl;
1957 	field = ulp_rte_parser_fld_copy(field, &val8, size);
1958 
1959 	size = sizeof(ip->hdr.type_of_service);
1960 	field = ulp_rte_parser_fld_copy(field, &ip->hdr.type_of_service, size);
1961 
1962 	size = sizeof(ip->hdr.packet_id);
1963 	field = ulp_rte_parser_fld_copy(field, &ip->hdr.packet_id, size);
1964 
1965 	size = sizeof(ip->hdr.fragment_offset);
1966 	field = ulp_rte_parser_fld_copy(field, &ip->hdr.fragment_offset, size);
1967 
1968 	size = sizeof(ip->hdr.time_to_live);
1969 	if (!ip->hdr.time_to_live)
1970 		val8 = BNXT_ULP_DEFAULT_TTL;
1971 	else
1972 		val8 = ip->hdr.time_to_live;
1973 	field = ulp_rte_parser_fld_copy(field, &val8, size);
1974 
1975 	size = sizeof(ip->hdr.next_proto_id);
1976 	field = ulp_rte_parser_fld_copy(field, &ip->hdr.next_proto_id, size);
1977 
1978 	size = sizeof(ip->hdr.src_addr);
1979 	field = ulp_rte_parser_fld_copy(field, &ip->hdr.src_addr, size);
1980 
1981 	size = sizeof(ip->hdr.dst_addr);
1982 	field = ulp_rte_parser_fld_copy(field, &ip->hdr.dst_addr, size);
1983 
1984 	ULP_BITMAP_SET(params->enc_hdr_bitmap.bits, BNXT_ULP_HDR_BIT_O_IPV4);
1985 }
1986 
1987 /* Function to handle the parsing of RTE Flow item ipv6 Header. */
1988 static void
1989 ulp_rte_enc_ipv6_hdr_handler(struct ulp_rte_parser_params *params,
1990 			     const struct rte_flow_item_ipv6 *ip)
1991 {
1992 	struct ulp_rte_hdr_field *field;
1993 	uint32_t size;
1994 	uint32_t val32;
1995 	uint8_t val8;
1996 
1997 	field = &params->enc_field[BNXT_ULP_ENC_FIELD_IPV6_VTC_FLOW];
1998 	size = sizeof(ip->hdr.vtc_flow);
1999 	if (!ip->hdr.vtc_flow)
2000 		val32 = rte_cpu_to_be_32(BNXT_ULP_IPV6_DFLT_VER);
2001 	else
2002 		val32 = ip->hdr.vtc_flow;
2003 	field = ulp_rte_parser_fld_copy(field, &val32, size);
2004 
2005 	size = sizeof(ip->hdr.proto);
2006 	field = ulp_rte_parser_fld_copy(field, &ip->hdr.proto, size);
2007 
2008 	size = sizeof(ip->hdr.hop_limits);
2009 	if (!ip->hdr.hop_limits)
2010 		val8 = BNXT_ULP_DEFAULT_TTL;
2011 	else
2012 		val8 = ip->hdr.hop_limits;
2013 	field = ulp_rte_parser_fld_copy(field, &val8, size);
2014 
2015 	size = sizeof(ip->hdr.src_addr);
2016 	field = ulp_rte_parser_fld_copy(field, &ip->hdr.src_addr, size);
2017 
2018 	size = sizeof(ip->hdr.dst_addr);
2019 	field = ulp_rte_parser_fld_copy(field, &ip->hdr.dst_addr, size);
2020 
2021 	ULP_BITMAP_SET(params->enc_hdr_bitmap.bits, BNXT_ULP_HDR_BIT_O_IPV6);
2022 }
2023 
2024 /* Function to handle the parsing of RTE Flow item UDP Header. */
2025 static void
2026 ulp_rte_enc_udp_hdr_handler(struct ulp_rte_parser_params *params,
2027 			    const struct rte_flow_item_udp *udp_spec)
2028 {
2029 	struct ulp_rte_hdr_field *field;
2030 	uint32_t size;
2031 	uint8_t type = IPPROTO_UDP;
2032 
2033 	field = &params->enc_field[BNXT_ULP_ENC_FIELD_UDP_SPORT];
2034 	size = sizeof(udp_spec->hdr.src_port);
2035 	field = ulp_rte_parser_fld_copy(field, &udp_spec->hdr.src_port, size);
2036 
2037 	size = sizeof(udp_spec->hdr.dst_port);
2038 	field = ulp_rte_parser_fld_copy(field, &udp_spec->hdr.dst_port, size);
2039 
2040 	ULP_BITMAP_SET(params->enc_hdr_bitmap.bits, BNXT_ULP_HDR_BIT_O_UDP);
2041 
2042 	/* Update thhe ip header protocol */
2043 	field = &params->enc_field[BNXT_ULP_ENC_FIELD_IPV4_PROTO];
2044 	ulp_rte_parser_fld_copy(field, &type, sizeof(type));
2045 	field = &params->enc_field[BNXT_ULP_ENC_FIELD_IPV6_PROTO];
2046 	ulp_rte_parser_fld_copy(field, &type, sizeof(type));
2047 }
2048 
2049 /* Function to handle the parsing of RTE Flow item vxlan Header. */
2050 static void
2051 ulp_rte_enc_vxlan_hdr_handler(struct ulp_rte_parser_params *params,
2052 			      struct rte_flow_item_vxlan *vxlan_spec)
2053 {
2054 	struct ulp_rte_hdr_field *field;
2055 	uint32_t size;
2056 
2057 	field = &params->enc_field[BNXT_ULP_ENC_FIELD_VXLAN_FLAGS];
2058 	size = sizeof(vxlan_spec->hdr.flags);
2059 	field = ulp_rte_parser_fld_copy(field, &vxlan_spec->hdr.flags, size);
2060 
2061 	size = sizeof(vxlan_spec->hdr.rsvd0);
2062 	field = ulp_rte_parser_fld_copy(field, &vxlan_spec->hdr.rsvd0, size);
2063 
2064 	size = sizeof(vxlan_spec->hdr.vni);
2065 	field = ulp_rte_parser_fld_copy(field, &vxlan_spec->hdr.vni, size);
2066 
2067 	size = sizeof(vxlan_spec->hdr.rsvd1);
2068 	field = ulp_rte_parser_fld_copy(field, &vxlan_spec->hdr.rsvd1, size);
2069 
2070 	ULP_BITMAP_SET(params->enc_hdr_bitmap.bits, BNXT_ULP_HDR_BIT_T_VXLAN);
2071 }
2072 
2073 /* Function to handle the parsing of RTE Flow action vxlan_encap Header. */
2074 int32_t
2075 ulp_rte_vxlan_encap_act_handler(const struct rte_flow_action *action_item,
2076 				struct ulp_rte_parser_params *params)
2077 {
2078 	const struct rte_flow_action_vxlan_encap *vxlan_encap;
2079 	const struct rte_flow_item *item;
2080 	const struct rte_flow_item_ipv4 *ipv4_spec;
2081 	const struct rte_flow_item_ipv6 *ipv6_spec;
2082 	struct rte_flow_item_vxlan vxlan_spec;
2083 	uint32_t vlan_num = 0, vlan_size = 0;
2084 	uint32_t ip_size = 0, ip_type = 0;
2085 	uint32_t vxlan_size = 0;
2086 	struct ulp_rte_act_bitmap *act = &params->act_bitmap;
2087 	struct ulp_rte_act_prop *ap = &params->act_prop;
2088 
2089 	vxlan_encap = action_item->conf;
2090 	if (!vxlan_encap) {
2091 		BNXT_TF_DBG(ERR, "Parse Error: Vxlan_encap arg is invalid\n");
2092 		return BNXT_TF_RC_ERROR;
2093 	}
2094 
2095 	item = vxlan_encap->definition;
2096 	if (!item) {
2097 		BNXT_TF_DBG(ERR, "Parse Error: definition arg is invalid\n");
2098 		return BNXT_TF_RC_ERROR;
2099 	}
2100 
2101 	if (!ulp_rte_item_skip_void(&item, 0))
2102 		return BNXT_TF_RC_ERROR;
2103 
2104 	/* must have ethernet header */
2105 	if (item->type != RTE_FLOW_ITEM_TYPE_ETH) {
2106 		BNXT_TF_DBG(ERR, "Parse Error:vxlan encap does not have eth\n");
2107 		return BNXT_TF_RC_ERROR;
2108 	}
2109 
2110 	/* Parse the ethernet header */
2111 	if (item->spec)
2112 		ulp_rte_enc_eth_hdr_handler(params, item->spec);
2113 
2114 	/* Goto the next item */
2115 	if (!ulp_rte_item_skip_void(&item, 1))
2116 		return BNXT_TF_RC_ERROR;
2117 
2118 	/* May have vlan header */
2119 	if (item->type == RTE_FLOW_ITEM_TYPE_VLAN) {
2120 		vlan_num++;
2121 		if (item->spec)
2122 			ulp_rte_enc_vlan_hdr_handler(params, item->spec, 0);
2123 
2124 		if (!ulp_rte_item_skip_void(&item, 1))
2125 			return BNXT_TF_RC_ERROR;
2126 	}
2127 
2128 	/* may have two vlan headers */
2129 	if (item->type == RTE_FLOW_ITEM_TYPE_VLAN) {
2130 		vlan_num++;
2131 		if (item->spec)
2132 			ulp_rte_enc_vlan_hdr_handler(params, item->spec, 1);
2133 
2134 		if (!ulp_rte_item_skip_void(&item, 1))
2135 			return BNXT_TF_RC_ERROR;
2136 	}
2137 
2138 	/* Update the vlan count and size of more than one */
2139 	if (vlan_num) {
2140 		vlan_size = vlan_num * sizeof(struct rte_flow_item_vlan);
2141 		vlan_num = tfp_cpu_to_be_32(vlan_num);
2142 		memcpy(&ap->act_details[BNXT_ULP_ACT_PROP_IDX_ENCAP_VTAG_NUM],
2143 		       &vlan_num,
2144 		       sizeof(uint32_t));
2145 		vlan_size = tfp_cpu_to_be_32(vlan_size);
2146 		memcpy(&ap->act_details[BNXT_ULP_ACT_PROP_IDX_ENCAP_VTAG_SZ],
2147 		       &vlan_size,
2148 		       sizeof(uint32_t));
2149 	}
2150 
2151 	/* L3 must be IPv4, IPv6 */
2152 	if (item->type == RTE_FLOW_ITEM_TYPE_IPV4) {
2153 		ipv4_spec = item->spec;
2154 		ip_size = BNXT_ULP_ENCAP_IPV4_SIZE;
2155 
2156 		/* Update the ip size details */
2157 		ip_size = tfp_cpu_to_be_32(ip_size);
2158 		memcpy(&ap->act_details[BNXT_ULP_ACT_PROP_IDX_ENCAP_IP_SZ],
2159 		       &ip_size, sizeof(uint32_t));
2160 
2161 		/* update the ip type */
2162 		ip_type = rte_cpu_to_be_32(BNXT_ULP_ETH_IPV4);
2163 		memcpy(&ap->act_details[BNXT_ULP_ACT_PROP_IDX_ENCAP_L3_TYPE],
2164 		       &ip_type, sizeof(uint32_t));
2165 
2166 		/* update the computed field to notify it is ipv4 header */
2167 		ULP_COMP_FLD_IDX_WR(params, BNXT_ULP_CF_IDX_ACT_ENCAP_IPV4_FLAG,
2168 				    1);
2169 		if (ipv4_spec)
2170 			ulp_rte_enc_ipv4_hdr_handler(params, ipv4_spec);
2171 
2172 		if (!ulp_rte_item_skip_void(&item, 1))
2173 			return BNXT_TF_RC_ERROR;
2174 	} else if (item->type == RTE_FLOW_ITEM_TYPE_IPV6) {
2175 		ipv6_spec = item->spec;
2176 		ip_size = BNXT_ULP_ENCAP_IPV6_SIZE;
2177 
2178 		/* Update the ip size details */
2179 		ip_size = tfp_cpu_to_be_32(ip_size);
2180 		memcpy(&ap->act_details[BNXT_ULP_ACT_PROP_IDX_ENCAP_IP_SZ],
2181 		       &ip_size, sizeof(uint32_t));
2182 
2183 		 /* update the ip type */
2184 		ip_type = rte_cpu_to_be_32(BNXT_ULP_ETH_IPV6);
2185 		memcpy(&ap->act_details[BNXT_ULP_ACT_PROP_IDX_ENCAP_L3_TYPE],
2186 		       &ip_type, sizeof(uint32_t));
2187 
2188 		/* update the computed field to notify it is ipv6 header */
2189 		ULP_COMP_FLD_IDX_WR(params, BNXT_ULP_CF_IDX_ACT_ENCAP_IPV6_FLAG,
2190 				    1);
2191 		if (ipv6_spec)
2192 			ulp_rte_enc_ipv6_hdr_handler(params, ipv6_spec);
2193 
2194 		if (!ulp_rte_item_skip_void(&item, 1))
2195 			return BNXT_TF_RC_ERROR;
2196 	} else {
2197 		BNXT_TF_DBG(ERR, "Parse Error: Vxlan Encap expects L3 hdr\n");
2198 		return BNXT_TF_RC_ERROR;
2199 	}
2200 
2201 	/* L4 is UDP */
2202 	if (item->type != RTE_FLOW_ITEM_TYPE_UDP) {
2203 		BNXT_TF_DBG(ERR, "vxlan encap does not have udp\n");
2204 		return BNXT_TF_RC_ERROR;
2205 	}
2206 	if (item->spec)
2207 		ulp_rte_enc_udp_hdr_handler(params, item->spec);
2208 
2209 	if (!ulp_rte_item_skip_void(&item, 1))
2210 		return BNXT_TF_RC_ERROR;
2211 
2212 	/* Finally VXLAN */
2213 	if (item->type != RTE_FLOW_ITEM_TYPE_VXLAN) {
2214 		BNXT_TF_DBG(ERR, "vxlan encap does not have vni\n");
2215 		return BNXT_TF_RC_ERROR;
2216 	}
2217 	vxlan_size = sizeof(struct rte_flow_item_vxlan);
2218 	/* copy the vxlan details */
2219 	memcpy(&vxlan_spec, item->spec, vxlan_size);
2220 	vxlan_spec.hdr.flags = 0x08;
2221 	vxlan_size = tfp_cpu_to_be_32(vxlan_size);
2222 	memcpy(&ap->act_details[BNXT_ULP_ACT_PROP_IDX_ENCAP_TUN_SZ],
2223 	       &vxlan_size, sizeof(uint32_t));
2224 
2225 	ulp_rte_enc_vxlan_hdr_handler(params, &vxlan_spec);
2226 
2227 	/* update the hdr_bitmap with vxlan */
2228 	ULP_BITMAP_SET(act->bits, BNXT_ULP_ACT_BIT_VXLAN_ENCAP);
2229 	return BNXT_TF_RC_SUCCESS;
2230 }
2231 
2232 /* Function to handle the parsing of RTE Flow action vxlan_encap Header */
2233 int32_t
2234 ulp_rte_vxlan_decap_act_handler(const struct rte_flow_action *action_item
2235 				__rte_unused,
2236 				struct ulp_rte_parser_params *params)
2237 {
2238 	/* update the hdr_bitmap with vxlan */
2239 	ULP_BITMAP_SET(params->act_bitmap.bits,
2240 		       BNXT_ULP_ACT_BIT_VXLAN_DECAP);
2241 	/* Update computational field with tunnel decap info */
2242 	ULP_COMP_FLD_IDX_WR(params, BNXT_ULP_CF_IDX_L3_TUN_DECAP, 1);
2243 	return BNXT_TF_RC_SUCCESS;
2244 }
2245 
2246 /* Function to handle the parsing of RTE Flow action drop Header. */
2247 int32_t
2248 ulp_rte_drop_act_handler(const struct rte_flow_action *action_item __rte_unused,
2249 			 struct ulp_rte_parser_params *params)
2250 {
2251 	/* Update the hdr_bitmap with drop */
2252 	ULP_BITMAP_SET(params->act_bitmap.bits, BNXT_ULP_ACT_BIT_DROP);
2253 	return BNXT_TF_RC_SUCCESS;
2254 }
2255 
2256 /* Function to handle the parsing of RTE Flow action count. */
2257 int32_t
2258 ulp_rte_count_act_handler(const struct rte_flow_action *action_item,
2259 			  struct ulp_rte_parser_params *params)
2260 {
2261 	const struct rte_flow_action_count *act_count;
2262 	struct ulp_rte_act_prop *act_prop = &params->act_prop;
2263 
2264 	act_count = action_item->conf;
2265 	if (act_count) {
2266 		memcpy(&act_prop->act_details[BNXT_ULP_ACT_PROP_IDX_COUNT],
2267 		       &act_count->id,
2268 		       BNXT_ULP_ACT_PROP_SZ_COUNT);
2269 	}
2270 
2271 	/* Update the hdr_bitmap with count */
2272 	ULP_BITMAP_SET(params->act_bitmap.bits, BNXT_ULP_ACT_BIT_COUNT);
2273 	return BNXT_TF_RC_SUCCESS;
2274 }
2275 
2276 /* Function to handle the parsing of action ports. */
2277 static int32_t
2278 ulp_rte_parser_act_port_set(struct ulp_rte_parser_params *param,
2279 			    uint32_t ifindex,
2280 			    enum bnxt_ulp_direction_type act_dir)
2281 {
2282 	enum bnxt_ulp_direction_type dir;
2283 	uint16_t pid_s;
2284 	uint32_t pid;
2285 	struct ulp_rte_act_prop *act = &param->act_prop;
2286 	enum bnxt_ulp_intf_type port_type;
2287 	uint32_t vnic_type;
2288 
2289 	/* Get the direction */
2290 	/* If action implicitly specifies direction, use the specification. */
2291 	dir = (act_dir == BNXT_ULP_DIR_INVALID) ?
2292 		ULP_COMP_FLD_IDX_RD(param, BNXT_ULP_CF_IDX_DIRECTION) :
2293 		act_dir;
2294 	port_type = ULP_COMP_FLD_IDX_RD(param, BNXT_ULP_CF_IDX_ACT_PORT_TYPE);
2295 	if (dir == BNXT_ULP_DIR_EGRESS &&
2296 	    port_type != BNXT_ULP_INTF_TYPE_VF_REP) {
2297 		/* For egress direction, fill vport */
2298 		if (ulp_port_db_vport_get(param->ulp_ctx, ifindex, &pid_s))
2299 			return BNXT_TF_RC_ERROR;
2300 
2301 		pid = pid_s;
2302 		pid = rte_cpu_to_be_32(pid);
2303 		memcpy(&act->act_details[BNXT_ULP_ACT_PROP_IDX_VPORT],
2304 		       &pid, BNXT_ULP_ACT_PROP_SZ_VPORT);
2305 	} else {
2306 		/* For ingress direction, fill vnic */
2307 		/*
2308 		 * Action		Destination
2309 		 * ------------------------------------
2310 		 * PORT_REPRESENTOR	Driver Function
2311 		 * ------------------------------------
2312 		 * REPRESENTED_PORT	VF
2313 		 * ------------------------------------
2314 		 * PORT_ID		VF
2315 		 */
2316 		if (act_dir != BNXT_ULP_DIR_INGRESS &&
2317 		    port_type == BNXT_ULP_INTF_TYPE_VF_REP)
2318 			vnic_type = BNXT_ULP_VF_FUNC_VNIC;
2319 		else
2320 			vnic_type = BNXT_ULP_DRV_FUNC_VNIC;
2321 
2322 		if (ulp_port_db_default_vnic_get(param->ulp_ctx, ifindex,
2323 						 vnic_type, &pid_s))
2324 			return BNXT_TF_RC_ERROR;
2325 
2326 		pid = pid_s;
2327 		pid = rte_cpu_to_be_32(pid);
2328 		memcpy(&act->act_details[BNXT_ULP_ACT_PROP_IDX_VNIC],
2329 		       &pid, BNXT_ULP_ACT_PROP_SZ_VNIC);
2330 	}
2331 
2332 	/* Update the action port set bit */
2333 	ULP_COMP_FLD_IDX_WR(param, BNXT_ULP_CF_IDX_ACT_PORT_IS_SET, 1);
2334 	return BNXT_TF_RC_SUCCESS;
2335 }
2336 
2337 /* Function to handle the parsing of RTE Flow action PF. */
2338 int32_t
2339 ulp_rte_pf_act_handler(const struct rte_flow_action *action_item __rte_unused,
2340 		       struct ulp_rte_parser_params *params)
2341 {
2342 	uint32_t port_id;
2343 	uint32_t ifindex;
2344 	enum bnxt_ulp_intf_type intf_type;
2345 
2346 	/* Get the port id of the current device */
2347 	port_id = ULP_COMP_FLD_IDX_RD(params, BNXT_ULP_CF_IDX_INCOMING_IF);
2348 
2349 	/* Get the port db ifindex */
2350 	if (ulp_port_db_dev_port_to_ulp_index(params->ulp_ctx, port_id,
2351 					      &ifindex)) {
2352 		BNXT_TF_DBG(ERR, "Invalid port id\n");
2353 		return BNXT_TF_RC_ERROR;
2354 	}
2355 
2356 	/* Check the port is PF port */
2357 	intf_type = ulp_port_db_port_type_get(params->ulp_ctx, ifindex);
2358 	if (intf_type != BNXT_ULP_INTF_TYPE_PF) {
2359 		BNXT_TF_DBG(ERR, "Port is not a PF port\n");
2360 		return BNXT_TF_RC_ERROR;
2361 	}
2362 	/* Update the action properties */
2363 	ULP_COMP_FLD_IDX_WR(params, BNXT_ULP_CF_IDX_ACT_PORT_TYPE, intf_type);
2364 	return ulp_rte_parser_act_port_set(params, ifindex,
2365 					   BNXT_ULP_DIR_INVALID);
2366 }
2367 
2368 /* Function to handle the parsing of RTE Flow action VF. */
2369 int32_t
2370 ulp_rte_vf_act_handler(const struct rte_flow_action *action_item,
2371 		       struct ulp_rte_parser_params *params)
2372 {
2373 	const struct rte_flow_action_vf *vf_action;
2374 	enum bnxt_ulp_intf_type intf_type;
2375 	uint32_t ifindex;
2376 	struct bnxt *bp;
2377 
2378 	vf_action = action_item->conf;
2379 	if (!vf_action) {
2380 		BNXT_TF_DBG(ERR, "ParseErr: Invalid Argument\n");
2381 		return BNXT_TF_RC_PARSE_ERR;
2382 	}
2383 
2384 	if (vf_action->original) {
2385 		BNXT_TF_DBG(ERR, "ParseErr:VF Original not supported\n");
2386 		return BNXT_TF_RC_PARSE_ERR;
2387 	}
2388 
2389 	bp = bnxt_pmd_get_bp(params->port_id);
2390 	if (bp == NULL) {
2391 		BNXT_TF_DBG(ERR, "Invalid bp\n");
2392 		return BNXT_TF_RC_ERROR;
2393 	}
2394 
2395 	/* vf_action->id is a logical number which in this case is an
2396 	 * offset from the first VF. So, to get the absolute VF id, the
2397 	 * offset must be added to the absolute first vf id of that port.
2398 	 */
2399 	if (ulp_port_db_dev_func_id_to_ulp_index(params->ulp_ctx,
2400 						 bp->first_vf_id +
2401 						 vf_action->id,
2402 						 &ifindex)) {
2403 		BNXT_TF_DBG(ERR, "VF is not valid interface\n");
2404 		return BNXT_TF_RC_ERROR;
2405 	}
2406 	/* Check the port is VF port */
2407 	intf_type = ulp_port_db_port_type_get(params->ulp_ctx, ifindex);
2408 	if (intf_type != BNXT_ULP_INTF_TYPE_VF &&
2409 	    intf_type != BNXT_ULP_INTF_TYPE_TRUSTED_VF) {
2410 		BNXT_TF_DBG(ERR, "Port is not a VF port\n");
2411 		return BNXT_TF_RC_ERROR;
2412 	}
2413 
2414 	/* Update the action properties */
2415 	ULP_COMP_FLD_IDX_WR(params, BNXT_ULP_CF_IDX_ACT_PORT_TYPE, intf_type);
2416 	return ulp_rte_parser_act_port_set(params, ifindex,
2417 					   BNXT_ULP_DIR_INVALID);
2418 }
2419 
2420 /* Parse actions PORT_ID, PORT_REPRESENTOR and REPRESENTED_PORT. */
2421 int32_t
2422 ulp_rte_port_act_handler(const struct rte_flow_action *act_item,
2423 			 struct ulp_rte_parser_params *param)
2424 {
2425 	uint32_t ethdev_id;
2426 	uint32_t ifindex;
2427 	enum bnxt_ulp_intf_type intf_type;
2428 	enum bnxt_ulp_direction_type act_dir;
2429 
2430 	if (!act_item->conf) {
2431 		BNXT_TF_DBG(ERR,
2432 			    "ParseErr: Invalid Argument\n");
2433 		return BNXT_TF_RC_PARSE_ERR;
2434 	}
2435 	switch (act_item->type) {
2436 	case RTE_FLOW_ACTION_TYPE_PORT_ID: {
2437 		const struct rte_flow_action_port_id *port_id = act_item->conf;
2438 
2439 		if (port_id->original) {
2440 			BNXT_TF_DBG(ERR,
2441 				    "ParseErr:Portid Original not supported\n");
2442 			return BNXT_TF_RC_PARSE_ERR;
2443 		}
2444 		ethdev_id = port_id->id;
2445 		act_dir = BNXT_ULP_DIR_INVALID;
2446 		break;
2447 	}
2448 	case RTE_FLOW_ACTION_TYPE_PORT_REPRESENTOR: {
2449 		const struct rte_flow_action_ethdev *ethdev = act_item->conf;
2450 
2451 		ethdev_id = ethdev->port_id;
2452 		act_dir = BNXT_ULP_DIR_INGRESS;
2453 		break;
2454 	}
2455 	case RTE_FLOW_ACTION_TYPE_REPRESENTED_PORT: {
2456 		const struct rte_flow_action_ethdev *ethdev = act_item->conf;
2457 
2458 		ethdev_id = ethdev->port_id;
2459 		act_dir = BNXT_ULP_DIR_EGRESS;
2460 		break;
2461 	}
2462 	default:
2463 		BNXT_TF_DBG(ERR, "Unknown port action\n");
2464 		return BNXT_TF_RC_ERROR;
2465 	}
2466 
2467 	/* Get the port db ifindex */
2468 	if (ulp_port_db_dev_port_to_ulp_index(param->ulp_ctx, ethdev_id,
2469 					      &ifindex)) {
2470 		BNXT_TF_DBG(ERR, "Invalid port id\n");
2471 		return BNXT_TF_RC_ERROR;
2472 	}
2473 
2474 	/* Get the intf type */
2475 	intf_type = ulp_port_db_port_type_get(param->ulp_ctx, ifindex);
2476 	if (!intf_type) {
2477 		BNXT_TF_DBG(ERR, "Invalid port type\n");
2478 		return BNXT_TF_RC_ERROR;
2479 	}
2480 
2481 	/* Set the action port */
2482 	ULP_COMP_FLD_IDX_WR(param, BNXT_ULP_CF_IDX_ACT_PORT_TYPE, intf_type);
2483 	ULP_COMP_FLD_IDX_WR(param, BNXT_ULP_CF_IDX_DEV_ACT_PORT_ID,
2484 			    ethdev_id);
2485 	return ulp_rte_parser_act_port_set(param, ifindex, act_dir);
2486 }
2487 
2488 /* Function to handle the parsing of RTE Flow action pop vlan. */
2489 int32_t
2490 ulp_rte_of_pop_vlan_act_handler(const struct rte_flow_action *a __rte_unused,
2491 				struct ulp_rte_parser_params *params)
2492 {
2493 	/* Update the act_bitmap with pop */
2494 	ULP_BITMAP_SET(params->act_bitmap.bits, BNXT_ULP_ACT_BIT_POP_VLAN);
2495 	return BNXT_TF_RC_SUCCESS;
2496 }
2497 
2498 /* Function to handle the parsing of RTE Flow action push vlan. */
2499 int32_t
2500 ulp_rte_of_push_vlan_act_handler(const struct rte_flow_action *action_item,
2501 				 struct ulp_rte_parser_params *params)
2502 {
2503 	const struct rte_flow_action_of_push_vlan *push_vlan;
2504 	uint16_t ethertype;
2505 	struct ulp_rte_act_prop *act = &params->act_prop;
2506 
2507 	push_vlan = action_item->conf;
2508 	if (push_vlan) {
2509 		ethertype = push_vlan->ethertype;
2510 		if (tfp_cpu_to_be_16(ethertype) != RTE_ETHER_TYPE_VLAN) {
2511 			BNXT_TF_DBG(ERR,
2512 				    "Parse Err: Ethertype not supported\n");
2513 			return BNXT_TF_RC_PARSE_ERR;
2514 		}
2515 		memcpy(&act->act_details[BNXT_ULP_ACT_PROP_IDX_PUSH_VLAN],
2516 		       &ethertype, BNXT_ULP_ACT_PROP_SZ_PUSH_VLAN);
2517 		/* Update the hdr_bitmap with push vlan */
2518 		ULP_BITMAP_SET(params->act_bitmap.bits,
2519 			       BNXT_ULP_ACT_BIT_PUSH_VLAN);
2520 		return BNXT_TF_RC_SUCCESS;
2521 	}
2522 	BNXT_TF_DBG(ERR, "Parse Error: Push vlan arg is invalid\n");
2523 	return BNXT_TF_RC_ERROR;
2524 }
2525 
2526 /* Function to handle the parsing of RTE Flow action set vlan id. */
2527 int32_t
2528 ulp_rte_of_set_vlan_vid_act_handler(const struct rte_flow_action *action_item,
2529 				    struct ulp_rte_parser_params *params)
2530 {
2531 	const struct rte_flow_action_of_set_vlan_vid *vlan_vid;
2532 	uint32_t vid;
2533 	struct ulp_rte_act_prop *act = &params->act_prop;
2534 
2535 	vlan_vid = action_item->conf;
2536 	if (vlan_vid && vlan_vid->vlan_vid) {
2537 		vid = vlan_vid->vlan_vid;
2538 		memcpy(&act->act_details[BNXT_ULP_ACT_PROP_IDX_SET_VLAN_VID],
2539 		       &vid, BNXT_ULP_ACT_PROP_SZ_SET_VLAN_VID);
2540 		/* Update the hdr_bitmap with vlan vid */
2541 		ULP_BITMAP_SET(params->act_bitmap.bits,
2542 			       BNXT_ULP_ACT_BIT_SET_VLAN_VID);
2543 		return BNXT_TF_RC_SUCCESS;
2544 	}
2545 	BNXT_TF_DBG(ERR, "Parse Error: Vlan vid arg is invalid\n");
2546 	return BNXT_TF_RC_ERROR;
2547 }
2548 
2549 /* Function to handle the parsing of RTE Flow action set vlan pcp. */
2550 int32_t
2551 ulp_rte_of_set_vlan_pcp_act_handler(const struct rte_flow_action *action_item,
2552 				    struct ulp_rte_parser_params *params)
2553 {
2554 	const struct rte_flow_action_of_set_vlan_pcp *vlan_pcp;
2555 	uint8_t pcp;
2556 	struct ulp_rte_act_prop *act = &params->act_prop;
2557 
2558 	vlan_pcp = action_item->conf;
2559 	if (vlan_pcp) {
2560 		pcp = vlan_pcp->vlan_pcp;
2561 		memcpy(&act->act_details[BNXT_ULP_ACT_PROP_IDX_SET_VLAN_PCP],
2562 		       &pcp, BNXT_ULP_ACT_PROP_SZ_SET_VLAN_PCP);
2563 		/* Update the hdr_bitmap with vlan vid */
2564 		ULP_BITMAP_SET(params->act_bitmap.bits,
2565 			       BNXT_ULP_ACT_BIT_SET_VLAN_PCP);
2566 		return BNXT_TF_RC_SUCCESS;
2567 	}
2568 	BNXT_TF_DBG(ERR, "Parse Error: Vlan pcp arg is invalid\n");
2569 	return BNXT_TF_RC_ERROR;
2570 }
2571 
2572 /* Function to handle the parsing of RTE Flow action set ipv4 src.*/
2573 int32_t
2574 ulp_rte_set_ipv4_src_act_handler(const struct rte_flow_action *action_item,
2575 				 struct ulp_rte_parser_params *params)
2576 {
2577 	const struct rte_flow_action_set_ipv4 *set_ipv4;
2578 	struct ulp_rte_act_prop *act = &params->act_prop;
2579 
2580 	set_ipv4 = action_item->conf;
2581 	if (set_ipv4) {
2582 		memcpy(&act->act_details[BNXT_ULP_ACT_PROP_IDX_SET_IPV4_SRC],
2583 		       &set_ipv4->ipv4_addr, BNXT_ULP_ACT_PROP_SZ_SET_IPV4_SRC);
2584 		/* Update the hdr_bitmap with set ipv4 src */
2585 		ULP_BITMAP_SET(params->act_bitmap.bits,
2586 			       BNXT_ULP_ACT_BIT_SET_IPV4_SRC);
2587 		return BNXT_TF_RC_SUCCESS;
2588 	}
2589 	BNXT_TF_DBG(ERR, "Parse Error: set ipv4 src arg is invalid\n");
2590 	return BNXT_TF_RC_ERROR;
2591 }
2592 
2593 /* Function to handle the parsing of RTE Flow action set ipv4 dst.*/
2594 int32_t
2595 ulp_rte_set_ipv4_dst_act_handler(const struct rte_flow_action *action_item,
2596 				 struct ulp_rte_parser_params *params)
2597 {
2598 	const struct rte_flow_action_set_ipv4 *set_ipv4;
2599 	struct ulp_rte_act_prop *act = &params->act_prop;
2600 
2601 	set_ipv4 = action_item->conf;
2602 	if (set_ipv4) {
2603 		memcpy(&act->act_details[BNXT_ULP_ACT_PROP_IDX_SET_IPV4_DST],
2604 		       &set_ipv4->ipv4_addr, BNXT_ULP_ACT_PROP_SZ_SET_IPV4_DST);
2605 		/* Update the hdr_bitmap with set ipv4 dst */
2606 		ULP_BITMAP_SET(params->act_bitmap.bits,
2607 			       BNXT_ULP_ACT_BIT_SET_IPV4_DST);
2608 		return BNXT_TF_RC_SUCCESS;
2609 	}
2610 	BNXT_TF_DBG(ERR, "Parse Error: set ipv4 dst arg is invalid\n");
2611 	return BNXT_TF_RC_ERROR;
2612 }
2613 
2614 /* Function to handle the parsing of RTE Flow action set tp src.*/
2615 int32_t
2616 ulp_rte_set_tp_src_act_handler(const struct rte_flow_action *action_item,
2617 			       struct ulp_rte_parser_params *params)
2618 {
2619 	const struct rte_flow_action_set_tp *set_tp;
2620 	struct ulp_rte_act_prop *act = &params->act_prop;
2621 
2622 	set_tp = action_item->conf;
2623 	if (set_tp) {
2624 		memcpy(&act->act_details[BNXT_ULP_ACT_PROP_IDX_SET_TP_SRC],
2625 		       &set_tp->port, BNXT_ULP_ACT_PROP_SZ_SET_TP_SRC);
2626 		/* Update the hdr_bitmap with set tp src */
2627 		ULP_BITMAP_SET(params->act_bitmap.bits,
2628 			       BNXT_ULP_ACT_BIT_SET_TP_SRC);
2629 		return BNXT_TF_RC_SUCCESS;
2630 	}
2631 
2632 	BNXT_TF_DBG(ERR, "Parse Error: set tp src arg is invalid\n");
2633 	return BNXT_TF_RC_ERROR;
2634 }
2635 
2636 /* Function to handle the parsing of RTE Flow action set tp dst.*/
2637 int32_t
2638 ulp_rte_set_tp_dst_act_handler(const struct rte_flow_action *action_item,
2639 			       struct ulp_rte_parser_params *params)
2640 {
2641 	const struct rte_flow_action_set_tp *set_tp;
2642 	struct ulp_rte_act_prop *act = &params->act_prop;
2643 
2644 	set_tp = action_item->conf;
2645 	if (set_tp) {
2646 		memcpy(&act->act_details[BNXT_ULP_ACT_PROP_IDX_SET_TP_DST],
2647 		       &set_tp->port, BNXT_ULP_ACT_PROP_SZ_SET_TP_DST);
2648 		/* Update the hdr_bitmap with set tp dst */
2649 		ULP_BITMAP_SET(params->act_bitmap.bits,
2650 			       BNXT_ULP_ACT_BIT_SET_TP_DST);
2651 		return BNXT_TF_RC_SUCCESS;
2652 	}
2653 
2654 	BNXT_TF_DBG(ERR, "Parse Error: set tp src arg is invalid\n");
2655 	return BNXT_TF_RC_ERROR;
2656 }
2657 
2658 /* Function to handle the parsing of RTE Flow action dec ttl.*/
2659 int32_t
2660 ulp_rte_dec_ttl_act_handler(const struct rte_flow_action *act __rte_unused,
2661 			    struct ulp_rte_parser_params *params)
2662 {
2663 	/* Update the act_bitmap with dec ttl */
2664 	ULP_BITMAP_SET(params->act_bitmap.bits, BNXT_ULP_ACT_BIT_DEC_TTL);
2665 	return BNXT_TF_RC_SUCCESS;
2666 }
2667 
2668 /* Function to handle the parsing of RTE Flow action JUMP */
2669 int32_t
2670 ulp_rte_jump_act_handler(const struct rte_flow_action *action_item __rte_unused,
2671 			 struct ulp_rte_parser_params *params)
2672 {
2673 	/* Update the act_bitmap with dec ttl */
2674 	ULP_BITMAP_SET(params->act_bitmap.bits, BNXT_ULP_ACT_BIT_JUMP);
2675 	return BNXT_TF_RC_SUCCESS;
2676 }
2677 
2678 int32_t
2679 ulp_rte_sample_act_handler(const struct rte_flow_action *action_item,
2680 			   struct ulp_rte_parser_params *params)
2681 {
2682 	const struct rte_flow_action_sample *sample;
2683 	int ret;
2684 
2685 	sample = action_item->conf;
2686 
2687 	/* if SAMPLE bit is set it means this sample action is nested within the
2688 	 * actions of another sample action; this is not allowed
2689 	 */
2690 	if (ULP_BITMAP_ISSET(params->act_bitmap.bits,
2691 			     BNXT_ULP_ACT_BIT_SAMPLE))
2692 		return BNXT_TF_RC_ERROR;
2693 
2694 	/* a sample action is only allowed as a shared action */
2695 	if (!ULP_BITMAP_ISSET(params->act_bitmap.bits,
2696 			      BNXT_ULP_ACT_BIT_SHARED))
2697 		return BNXT_TF_RC_ERROR;
2698 
2699 	/* only a ratio of 1 i.e. 100% is supported */
2700 	if (sample->ratio != 1)
2701 		return BNXT_TF_RC_ERROR;
2702 
2703 	if (!sample->actions)
2704 		return BNXT_TF_RC_ERROR;
2705 
2706 	/* parse the nested actions for a sample action */
2707 	ret = bnxt_ulp_rte_parser_act_parse(sample->actions, params);
2708 	if (ret == BNXT_TF_RC_SUCCESS)
2709 		/* Update the act_bitmap with sample */
2710 		ULP_BITMAP_SET(params->act_bitmap.bits,
2711 			       BNXT_ULP_ACT_BIT_SAMPLE);
2712 
2713 	return ret;
2714 }
2715 
2716 int32_t
2717 ulp_rte_action_hdlr_handler(const struct rte_flow_action *action_item,
2718 			   struct ulp_rte_parser_params *params)
2719 {
2720 	const struct rte_flow_action_handle *handle;
2721 	struct bnxt_ulp_shared_act_info *act_info;
2722 	uint64_t action_bitmask;
2723 	uint32_t shared_action_type;
2724 	struct ulp_rte_act_prop *act = &params->act_prop;
2725 	uint64_t tmp64;
2726 	enum bnxt_ulp_direction_type dir, handle_dir;
2727 	uint32_t act_info_entries = 0;
2728 	int32_t ret;
2729 
2730 	handle = action_item->conf;
2731 
2732 	/* Have to use the computed direction since the params->dir_attr
2733 	 * can be different (transfer, ingress, egress)
2734 	 */
2735 	dir = ULP_COMP_FLD_IDX_RD(params, BNXT_ULP_CF_IDX_DIRECTION);
2736 
2737 	/* direction of shared action must match direction of flow */
2738 	ret = bnxt_get_action_handle_direction(handle, &handle_dir);
2739 	if (ret || dir != handle_dir) {
2740 		BNXT_TF_DBG(ERR, "Invalid shared handle or direction\n");
2741 		return BNXT_TF_RC_ERROR;
2742 	}
2743 
2744 	if (bnxt_get_action_handle_type(handle, &shared_action_type)) {
2745 		BNXT_TF_DBG(ERR, "Invalid shared handle\n");
2746 		return BNXT_TF_RC_ERROR;
2747 	}
2748 
2749 	act_info = bnxt_ulp_shared_act_info_get(&act_info_entries);
2750 	if (shared_action_type >= act_info_entries || !act_info) {
2751 		BNXT_TF_DBG(ERR, "Invalid shared handle\n");
2752 		return BNXT_TF_RC_ERROR;
2753 	}
2754 
2755 	action_bitmask = act_info[shared_action_type].act_bitmask;
2756 
2757 	/* shared actions of the same type cannot be repeated */
2758 	if (params->act_bitmap.bits & action_bitmask) {
2759 		BNXT_TF_DBG(ERR, "indirect actions cannot be repeated\n");
2760 		return BNXT_TF_RC_ERROR;
2761 	}
2762 
2763 	tmp64 = tfp_cpu_to_be_64((uint64_t)bnxt_get_action_handle_index(handle));
2764 
2765 	memcpy(&act->act_details[BNXT_ULP_ACT_PROP_IDX_SHARED_HANDLE],
2766 	       &tmp64, BNXT_ULP_ACT_PROP_SZ_SHARED_HANDLE);
2767 
2768 	ULP_BITMAP_SET(params->act_bitmap.bits, action_bitmask);
2769 
2770 	return BNXT_TF_RC_SUCCESS;
2771 }
2772 
2773 /* Function to handle the parsing of bnxt vendor Flow action vxlan Header. */
2774 int32_t
2775 ulp_vendor_vxlan_decap_act_handler(const struct rte_flow_action *action_item,
2776 				   struct ulp_rte_parser_params *params)
2777 {
2778 	/* Set the F1 flow header bit */
2779 	ULP_BITMAP_SET(params->hdr_bitmap.bits, BNXT_ULP_HDR_BIT_F1);
2780 	return ulp_rte_vxlan_decap_act_handler(action_item, params);
2781 }
2782 
2783 /* Function to handle the parsing of bnxt vendor Flow item vxlan Header. */
2784 int32_t
2785 ulp_rte_vendor_vxlan_decap_hdr_handler(const struct rte_flow_item *item,
2786 				       struct ulp_rte_parser_params *params)
2787 {
2788 	RTE_SET_USED(item);
2789 	/* Set the F2 flow header bit */
2790 	ULP_BITMAP_SET(params->hdr_bitmap.bits, BNXT_ULP_HDR_BIT_F2);
2791 	return ulp_rte_vxlan_decap_act_handler(NULL, params);
2792 }
2793 
2794 /* Function to handle the parsing of RTE Flow action queue. */
2795 int32_t
2796 ulp_rte_queue_act_handler(const struct rte_flow_action *action_item,
2797 			  struct ulp_rte_parser_params *param)
2798 {
2799 	const struct rte_flow_action_queue *q_info;
2800 	struct ulp_rte_act_prop *ap = &param->act_prop;
2801 
2802 	if (action_item == NULL || action_item->conf == NULL) {
2803 		BNXT_TF_DBG(ERR, "Parse Err: invalid queue configuration\n");
2804 		return BNXT_TF_RC_ERROR;
2805 	}
2806 
2807 	q_info = action_item->conf;
2808 	/* Copy the queue into the specific action properties */
2809 	memcpy(&ap->act_details[BNXT_ULP_ACT_PROP_IDX_QUEUE_INDEX],
2810 	       &q_info->index, BNXT_ULP_ACT_PROP_SZ_QUEUE_INDEX);
2811 
2812 	/* set the queue action header bit */
2813 	ULP_BITMAP_SET(param->act_bitmap.bits, BNXT_ULP_ACT_BIT_QUEUE);
2814 
2815 	return BNXT_TF_RC_SUCCESS;
2816 }
2817 
2818 /* Function to handle the parsing of RTE Flow action meter. */
2819 int32_t
2820 ulp_rte_meter_act_handler(const struct rte_flow_action *action_item,
2821 			  struct ulp_rte_parser_params *params)
2822 {
2823 	const struct rte_flow_action_meter *meter;
2824 	struct ulp_rte_act_prop *act_prop = &params->act_prop;
2825 	uint32_t tmp_meter_id;
2826 
2827 	if (action_item == NULL || action_item->conf == NULL) {
2828 		BNXT_TF_DBG(ERR, "Parse Err: invalid meter configuration\n");
2829 		return BNXT_TF_RC_ERROR;
2830 	}
2831 
2832 	meter = action_item->conf;
2833 	if (meter) {
2834 		/* validate the mtr_id and update the reference counter */
2835 		tmp_meter_id = tfp_cpu_to_be_32(meter->mtr_id);
2836 		memcpy(&act_prop->act_details[BNXT_ULP_ACT_PROP_IDX_METER],
2837 		       &tmp_meter_id,
2838 		       BNXT_ULP_ACT_PROP_SZ_METER);
2839 	}
2840 
2841 	/* set the meter action header bit */
2842 	ULP_BITMAP_SET(params->act_bitmap.bits, BNXT_ULP_ACT_BIT_METER);
2843 
2844 	return BNXT_TF_RC_SUCCESS;
2845 }
2846 
2847 /* Function to handle the parsing of RTE Flow action set mac src.*/
2848 int32_t
2849 ulp_rte_set_mac_src_act_handler(const struct rte_flow_action *action_item,
2850 				struct ulp_rte_parser_params *params)
2851 {
2852 	const struct rte_flow_action_set_mac *set_mac;
2853 	struct ulp_rte_act_prop *act = &params->act_prop;
2854 
2855 	set_mac = action_item->conf;
2856 	if (set_mac) {
2857 		memcpy(&act->act_details[BNXT_ULP_ACT_PROP_IDX_SET_MAC_SRC],
2858 		       set_mac->mac_addr, BNXT_ULP_ACT_PROP_SZ_SET_MAC_SRC);
2859 		/* Update the hdr_bitmap with set mac src */
2860 		ULP_BITMAP_SET(params->act_bitmap.bits,
2861 			       BNXT_ULP_ACT_BIT_SET_MAC_SRC);
2862 		return BNXT_TF_RC_SUCCESS;
2863 	}
2864 	BNXT_TF_DBG(ERR, "Parse Error: set mac src arg is invalid\n");
2865 	return BNXT_TF_RC_ERROR;
2866 }
2867 
2868 /* Function to handle the parsing of RTE Flow action set mac dst.*/
2869 int32_t
2870 ulp_rte_set_mac_dst_act_handler(const struct rte_flow_action *action_item,
2871 				struct ulp_rte_parser_params *params)
2872 {
2873 	const struct rte_flow_action_set_mac *set_mac;
2874 	struct ulp_rte_act_prop *act = &params->act_prop;
2875 
2876 	set_mac = action_item->conf;
2877 	if (set_mac) {
2878 		memcpy(&act->act_details[BNXT_ULP_ACT_PROP_IDX_SET_MAC_DST],
2879 		       set_mac->mac_addr, BNXT_ULP_ACT_PROP_SZ_SET_MAC_DST);
2880 		/* Update the hdr_bitmap with set ipv4 dst */
2881 		ULP_BITMAP_SET(params->act_bitmap.bits,
2882 			       BNXT_ULP_ACT_BIT_SET_MAC_DST);
2883 		return BNXT_TF_RC_SUCCESS;
2884 	}
2885 	BNXT_TF_DBG(ERR, "Parse Error: set mac dst arg is invalid\n");
2886 	return BNXT_TF_RC_ERROR;
2887 }
2888